Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:5.17 commit in: /
Date: Thu, 12 May 2022 11:27:32
Message-Id: 1652354828.33ae7af26e3ce63605bf43c87d215a1d710d852d.mpagano@gentoo
1 commit: 33ae7af26e3ce63605bf43c87d215a1d710d852d
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Thu May 12 11:27:08 2022 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Thu May 12 11:27:08 2022 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=33ae7af2
7
8 Linux patch 5.17.7
9
10 Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
11
12 0000_README | 4 +
13 1006_linux-5.17.7.patch | 4888 +++++++++++++++++++++++++++++++++++++++++++++++
14 2 files changed, 4892 insertions(+)
15
16 diff --git a/0000_README b/0000_README
17 index 91016f55..cf45e5d3 100644
18 --- a/0000_README
19 +++ b/0000_README
20 @@ -67,6 +67,10 @@ Patch: 1005_linux-5.17.6.patch
21 From: http://www.kernel.org
22 Desc: Linux 5.17.6
23
24 +Patch: 1006_linux-5.17.7.patch
25 +From: http://www.kernel.org
26 +Desc: Linux 5.17.7
27 +
28 Patch: 1500_XATTR_USER_PREFIX.patch
29 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
30 Desc: Support for namespace user.pax.* on tmpfs.
31
32 diff --git a/1006_linux-5.17.7.patch b/1006_linux-5.17.7.patch
33 new file mode 100644
34 index 00000000..ed7d05cf
35 --- /dev/null
36 +++ b/1006_linux-5.17.7.patch
37 @@ -0,0 +1,4888 @@
38 +diff --git a/Documentation/devicetree/bindings/pci/apple,pcie.yaml b/Documentation/devicetree/bindings/pci/apple,pcie.yaml
39 +index 7f01e15fc81c2..daf602ac0d0fd 100644
40 +--- a/Documentation/devicetree/bindings/pci/apple,pcie.yaml
41 ++++ b/Documentation/devicetree/bindings/pci/apple,pcie.yaml
42 +@@ -142,7 +142,6 @@ examples:
43 + device_type = "pci";
44 + reg = <0x0 0x0 0x0 0x0 0x0>;
45 + reset-gpios = <&pinctrl_ap 152 0>;
46 +- max-link-speed = <2>;
47 +
48 + #address-cells = <3>;
49 + #size-cells = <2>;
50 +@@ -153,7 +152,6 @@ examples:
51 + device_type = "pci";
52 + reg = <0x800 0x0 0x0 0x0 0x0>;
53 + reset-gpios = <&pinctrl_ap 153 0>;
54 +- max-link-speed = <2>;
55 +
56 + #address-cells = <3>;
57 + #size-cells = <2>;
58 +@@ -164,7 +162,6 @@ examples:
59 + device_type = "pci";
60 + reg = <0x1000 0x0 0x0 0x0 0x0>;
61 + reset-gpios = <&pinctrl_ap 33 0>;
62 +- max-link-speed = <1>;
63 +
64 + #address-cells = <3>;
65 + #size-cells = <2>;
66 +diff --git a/Makefile b/Makefile
67 +index 7ef8dd5ab6f28..ce65b393a2b49 100644
68 +--- a/Makefile
69 ++++ b/Makefile
70 +@@ -1,7 +1,7 @@
71 + # SPDX-License-Identifier: GPL-2.0
72 + VERSION = 5
73 + PATCHLEVEL = 17
74 +-SUBLEVEL = 6
75 ++SUBLEVEL = 7
76 + EXTRAVERSION =
77 + NAME = Superb Owl
78 +
79 +diff --git a/arch/mips/include/asm/timex.h b/arch/mips/include/asm/timex.h
80 +index b05bb70a2e46f..8026baf46e729 100644
81 +--- a/arch/mips/include/asm/timex.h
82 ++++ b/arch/mips/include/asm/timex.h
83 +@@ -40,9 +40,9 @@
84 + typedef unsigned int cycles_t;
85 +
86 + /*
87 +- * On R4000/R4400 before version 5.0 an erratum exists such that if the
88 +- * cycle counter is read in the exact moment that it is matching the
89 +- * compare register, no interrupt will be generated.
90 ++ * On R4000/R4400 an erratum exists such that if the cycle counter is
91 ++ * read in the exact moment that it is matching the compare register,
92 ++ * no interrupt will be generated.
93 + *
94 + * There is a suggested workaround and also the erratum can't strike if
95 + * the compare interrupt isn't being used as the clock source device.
96 +@@ -63,7 +63,7 @@ static inline int can_use_mips_counter(unsigned int prid)
97 + if (!__builtin_constant_p(cpu_has_counter))
98 + asm volatile("" : "=m" (cpu_data[0].options));
99 + if (likely(cpu_has_counter &&
100 +- prid >= (PRID_IMP_R4000 | PRID_REV_ENCODE_44(5, 0))))
101 ++ prid > (PRID_IMP_R4000 | PRID_REV_ENCODE_44(15, 15))))
102 + return 1;
103 + else
104 + return 0;
105 +diff --git a/arch/mips/kernel/time.c b/arch/mips/kernel/time.c
106 +index caa01457dce60..ed339d7979f3f 100644
107 +--- a/arch/mips/kernel/time.c
108 ++++ b/arch/mips/kernel/time.c
109 +@@ -141,15 +141,10 @@ static __init int cpu_has_mfc0_count_bug(void)
110 + case CPU_R4400MC:
111 + /*
112 + * The published errata for the R4400 up to 3.0 say the CPU
113 +- * has the mfc0 from count bug.
114 ++ * has the mfc0 from count bug. This seems the last version
115 ++ * produced.
116 + */
117 +- if ((current_cpu_data.processor_id & 0xff) <= 0x30)
118 +- return 1;
119 +-
120 +- /*
121 +- * we assume newer revisions are ok
122 +- */
123 +- return 0;
124 ++ return 1;
125 + }
126 +
127 + return 0;
128 +diff --git a/arch/parisc/kernel/processor.c b/arch/parisc/kernel/processor.c
129 +index 1b6129e7d776b..b861bbbc87178 100644
130 +--- a/arch/parisc/kernel/processor.c
131 ++++ b/arch/parisc/kernel/processor.c
132 +@@ -418,8 +418,7 @@ show_cpuinfo (struct seq_file *m, void *v)
133 + }
134 + seq_printf(m, " (0x%02lx)\n", boot_cpu_data.pdc.capabilities);
135 +
136 +- seq_printf(m, "model\t\t: %s\n"
137 +- "model name\t: %s\n",
138 ++ seq_printf(m, "model\t\t: %s - %s\n",
139 + boot_cpu_data.pdc.sys_model_name,
140 + cpuinfo->dev ?
141 + cpuinfo->dev->name : "Unknown");
142 +diff --git a/arch/parisc/kernel/setup.c b/arch/parisc/kernel/setup.c
143 +index b91cb45ffd4e3..f005ddedb50e4 100644
144 +--- a/arch/parisc/kernel/setup.c
145 ++++ b/arch/parisc/kernel/setup.c
146 +@@ -161,6 +161,8 @@ void __init setup_arch(char **cmdline_p)
147 + #ifdef CONFIG_PA11
148 + dma_ops_init();
149 + #endif
150 ++
151 ++ clear_sched_clock_stable();
152 + }
153 +
154 + /*
155 +diff --git a/arch/parisc/kernel/time.c b/arch/parisc/kernel/time.c
156 +index 061119a56fbe8..d8e59a1000ab7 100644
157 +--- a/arch/parisc/kernel/time.c
158 ++++ b/arch/parisc/kernel/time.c
159 +@@ -249,13 +249,9 @@ void __init time_init(void)
160 + static int __init init_cr16_clocksource(void)
161 + {
162 + /*
163 +- * The cr16 interval timers are not syncronized across CPUs, even if
164 +- * they share the same socket.
165 ++ * The cr16 interval timers are not synchronized across CPUs.
166 + */
167 + if (num_online_cpus() > 1 && !running_on_qemu) {
168 +- /* mark sched_clock unstable */
169 +- clear_sched_clock_stable();
170 +-
171 + clocksource_cr16.name = "cr16_unstable";
172 + clocksource_cr16.flags = CLOCK_SOURCE_UNSTABLE;
173 + clocksource_cr16.rating = 0;
174 +diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c
175 +index 0d588032d6e69..697a9aed4f77f 100644
176 +--- a/arch/riscv/mm/init.c
177 ++++ b/arch/riscv/mm/init.c
178 +@@ -206,8 +206,25 @@ static void __init setup_bootmem(void)
179 + * early_init_fdt_reserve_self() since __pa() does
180 + * not work for DTB pointers that are fixmap addresses
181 + */
182 +- if (!IS_ENABLED(CONFIG_BUILTIN_DTB))
183 +- memblock_reserve(dtb_early_pa, fdt_totalsize(dtb_early_va));
184 ++ if (!IS_ENABLED(CONFIG_BUILTIN_DTB)) {
185 ++ /*
186 ++ * In case the DTB is not located in a memory region we won't
187 ++ * be able to locate it later on via the linear mapping and
188 ++ * get a segfault when accessing it via __va(dtb_early_pa).
189 ++ * To avoid this situation copy DTB to a memory region.
190 ++ * Note that memblock_phys_alloc will also reserve DTB region.
191 ++ */
192 ++ if (!memblock_is_memory(dtb_early_pa)) {
193 ++ size_t fdt_size = fdt_totalsize(dtb_early_va);
194 ++ phys_addr_t new_dtb_early_pa = memblock_phys_alloc(fdt_size, PAGE_SIZE);
195 ++ void *new_dtb_early_va = early_memremap(new_dtb_early_pa, fdt_size);
196 ++
197 ++ memcpy(new_dtb_early_va, dtb_early_va, fdt_size);
198 ++ early_memunmap(new_dtb_early_va, fdt_size);
199 ++ _dtb_early_pa = new_dtb_early_pa;
200 ++ } else
201 ++ memblock_reserve(dtb_early_pa, fdt_totalsize(dtb_early_va));
202 ++ }
203 +
204 + early_init_fdt_scan_reserved_mem();
205 + dma_contiguous_reserve(dma32_phys_limit);
206 +diff --git a/arch/x86/kernel/fpu/core.c b/arch/x86/kernel/fpu/core.c
207 +index 8dea01ffc5c18..5290d64723086 100644
208 +--- a/arch/x86/kernel/fpu/core.c
209 ++++ b/arch/x86/kernel/fpu/core.c
210 +@@ -41,17 +41,7 @@ struct fpu_state_config fpu_user_cfg __ro_after_init;
211 + */
212 + struct fpstate init_fpstate __ro_after_init;
213 +
214 +-/*
215 +- * Track whether the kernel is using the FPU state
216 +- * currently.
217 +- *
218 +- * This flag is used:
219 +- *
220 +- * - by IRQ context code to potentially use the FPU
221 +- * if it's unused.
222 +- *
223 +- * - to debug kernel_fpu_begin()/end() correctness
224 +- */
225 ++/* Track in-kernel FPU usage */
226 + static DEFINE_PER_CPU(bool, in_kernel_fpu);
227 +
228 + /*
229 +@@ -59,42 +49,37 @@ static DEFINE_PER_CPU(bool, in_kernel_fpu);
230 + */
231 + DEFINE_PER_CPU(struct fpu *, fpu_fpregs_owner_ctx);
232 +
233 +-static bool kernel_fpu_disabled(void)
234 +-{
235 +- return this_cpu_read(in_kernel_fpu);
236 +-}
237 +-
238 +-static bool interrupted_kernel_fpu_idle(void)
239 +-{
240 +- return !kernel_fpu_disabled();
241 +-}
242 +-
243 +-/*
244 +- * Were we in user mode (or vm86 mode) when we were
245 +- * interrupted?
246 +- *
247 +- * Doing kernel_fpu_begin/end() is ok if we are running
248 +- * in an interrupt context from user mode - we'll just
249 +- * save the FPU state as required.
250 +- */
251 +-static bool interrupted_user_mode(void)
252 +-{
253 +- struct pt_regs *regs = get_irq_regs();
254 +- return regs && user_mode(regs);
255 +-}
256 +-
257 + /*
258 + * Can we use the FPU in kernel mode with the
259 + * whole "kernel_fpu_begin/end()" sequence?
260 +- *
261 +- * It's always ok in process context (ie "not interrupt")
262 +- * but it is sometimes ok even from an irq.
263 + */
264 + bool irq_fpu_usable(void)
265 + {
266 +- return !in_interrupt() ||
267 +- interrupted_user_mode() ||
268 +- interrupted_kernel_fpu_idle();
269 ++ if (WARN_ON_ONCE(in_nmi()))
270 ++ return false;
271 ++
272 ++ /* In kernel FPU usage already active? */
273 ++ if (this_cpu_read(in_kernel_fpu))
274 ++ return false;
275 ++
276 ++ /*
277 ++ * When not in NMI or hard interrupt context, FPU can be used in:
278 ++ *
279 ++ * - Task context except from within fpregs_lock()'ed critical
280 ++ * regions.
281 ++ *
282 ++ * - Soft interrupt processing context which cannot happen
283 ++ * while in a fpregs_lock()'ed critical region.
284 ++ */
285 ++ if (!in_hardirq())
286 ++ return true;
287 ++
288 ++ /*
289 ++ * In hard interrupt context it's safe when soft interrupts
290 ++ * are enabled, which means the interrupt did not hit in
291 ++ * a fpregs_lock()'ed critical region.
292 ++ */
293 ++ return !softirq_count();
294 + }
295 + EXPORT_SYMBOL(irq_fpu_usable);
296 +
297 +diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
298 +index ed8a13ac4ab23..4c2a158bb6c4f 100644
299 +--- a/arch/x86/kernel/kvm.c
300 ++++ b/arch/x86/kernel/kvm.c
301 +@@ -69,6 +69,7 @@ static DEFINE_PER_CPU_DECRYPTED(struct kvm_vcpu_pv_apf_data, apf_reason) __align
302 + DEFINE_PER_CPU_DECRYPTED(struct kvm_steal_time, steal_time) __aligned(64) __visible;
303 + static int has_steal_clock = 0;
304 +
305 ++static int has_guest_poll = 0;
306 + /*
307 + * No need for any "IO delay" on KVM
308 + */
309 +@@ -706,14 +707,26 @@ static int kvm_cpu_down_prepare(unsigned int cpu)
310 +
311 + static int kvm_suspend(void)
312 + {
313 ++ u64 val = 0;
314 ++
315 + kvm_guest_cpu_offline(false);
316 +
317 ++#ifdef CONFIG_ARCH_CPUIDLE_HALTPOLL
318 ++ if (kvm_para_has_feature(KVM_FEATURE_POLL_CONTROL))
319 ++ rdmsrl(MSR_KVM_POLL_CONTROL, val);
320 ++ has_guest_poll = !(val & 1);
321 ++#endif
322 + return 0;
323 + }
324 +
325 + static void kvm_resume(void)
326 + {
327 + kvm_cpu_online(raw_smp_processor_id());
328 ++
329 ++#ifdef CONFIG_ARCH_CPUIDLE_HALTPOLL
330 ++ if (kvm_para_has_feature(KVM_FEATURE_POLL_CONTROL) && has_guest_poll)
331 ++ wrmsrl(MSR_KVM_POLL_CONTROL, 0);
332 ++#endif
333 + }
334 +
335 + static struct syscore_ops kvm_syscore_ops = {
336 +diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
337 +index b8f8d268d0585..ee15db75fd624 100644
338 +--- a/arch/x86/kvm/cpuid.c
339 ++++ b/arch/x86/kvm/cpuid.c
340 +@@ -865,6 +865,11 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function)
341 + union cpuid10_eax eax;
342 + union cpuid10_edx edx;
343 +
344 ++ if (!static_cpu_has(X86_FEATURE_ARCH_PERFMON)) {
345 ++ entry->eax = entry->ebx = entry->ecx = entry->edx = 0;
346 ++ break;
347 ++ }
348 ++
349 + perf_get_x86_pmu_capability(&cap);
350 +
351 + /*
352 +diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
353 +index 2a10d0033c964..970d5c740b00b 100644
354 +--- a/arch/x86/kvm/lapic.c
355 ++++ b/arch/x86/kvm/lapic.c
356 +@@ -113,7 +113,8 @@ static inline u32 kvm_x2apic_id(struct kvm_lapic *apic)
357 +
358 + static bool kvm_can_post_timer_interrupt(struct kvm_vcpu *vcpu)
359 + {
360 +- return pi_inject_timer && kvm_vcpu_apicv_active(vcpu);
361 ++ return pi_inject_timer && kvm_vcpu_apicv_active(vcpu) &&
362 ++ (kvm_mwait_in_guest(vcpu->kvm) || kvm_hlt_in_guest(vcpu->kvm));
363 + }
364 +
365 + bool kvm_can_use_hv_timer(struct kvm_vcpu *vcpu)
366 +@@ -2125,10 +2126,9 @@ int kvm_lapic_reg_write(struct kvm_lapic *apic, u32 reg, u32 val)
367 + break;
368 +
369 + case APIC_SELF_IPI:
370 +- if (apic_x2apic_mode(apic)) {
371 +- kvm_lapic_reg_write(apic, APIC_ICR,
372 +- APIC_DEST_SELF | (val & APIC_VECTOR_MASK));
373 +- } else
374 ++ if (apic_x2apic_mode(apic))
375 ++ kvm_apic_send_ipi(apic, APIC_DEST_SELF | (val & APIC_VECTOR_MASK), 0);
376 ++ else
377 + ret = 1;
378 + break;
379 + default:
380 +diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
381 +index 7f009ebb319ab..e7cd16e1e0a0b 100644
382 +--- a/arch/x86/kvm/mmu/mmu.c
383 ++++ b/arch/x86/kvm/mmu/mmu.c
384 +@@ -3239,6 +3239,8 @@ static void mmu_free_root_page(struct kvm *kvm, hpa_t *root_hpa,
385 + return;
386 +
387 + sp = to_shadow_page(*root_hpa & PT64_BASE_ADDR_MASK);
388 ++ if (WARN_ON(!sp))
389 ++ return;
390 +
391 + if (is_tdp_mmu_page(sp))
392 + kvm_tdp_mmu_put_root(kvm, sp, false);
393 +diff --git a/arch/x86/kvm/svm/pmu.c b/arch/x86/kvm/svm/pmu.c
394 +index b5b0837df0d11..50108634835f4 100644
395 +--- a/arch/x86/kvm/svm/pmu.c
396 ++++ b/arch/x86/kvm/svm/pmu.c
397 +@@ -45,6 +45,22 @@ static struct kvm_event_hw_type_mapping amd_event_mapping[] = {
398 + [7] = { 0xd1, 0x00, PERF_COUNT_HW_STALLED_CYCLES_BACKEND },
399 + };
400 +
401 ++/* duplicated from amd_f17h_perfmon_event_map. */
402 ++static struct kvm_event_hw_type_mapping amd_f17h_event_mapping[] = {
403 ++ [0] = { 0x76, 0x00, PERF_COUNT_HW_CPU_CYCLES },
404 ++ [1] = { 0xc0, 0x00, PERF_COUNT_HW_INSTRUCTIONS },
405 ++ [2] = { 0x60, 0xff, PERF_COUNT_HW_CACHE_REFERENCES },
406 ++ [3] = { 0x64, 0x09, PERF_COUNT_HW_CACHE_MISSES },
407 ++ [4] = { 0xc2, 0x00, PERF_COUNT_HW_BRANCH_INSTRUCTIONS },
408 ++ [5] = { 0xc3, 0x00, PERF_COUNT_HW_BRANCH_MISSES },
409 ++ [6] = { 0x87, 0x02, PERF_COUNT_HW_STALLED_CYCLES_FRONTEND },
410 ++ [7] = { 0x87, 0x01, PERF_COUNT_HW_STALLED_CYCLES_BACKEND },
411 ++};
412 ++
413 ++/* amd_pmc_perf_hw_id depends on these being the same size */
414 ++static_assert(ARRAY_SIZE(amd_event_mapping) ==
415 ++ ARRAY_SIZE(amd_f17h_event_mapping));
416 ++
417 + static unsigned int get_msr_base(struct kvm_pmu *pmu, enum pmu_type type)
418 + {
419 + struct kvm_vcpu *vcpu = pmu_to_vcpu(pmu);
420 +@@ -140,6 +156,7 @@ static inline struct kvm_pmc *get_gp_pmc_amd(struct kvm_pmu *pmu, u32 msr,
421 +
422 + static unsigned int amd_pmc_perf_hw_id(struct kvm_pmc *pmc)
423 + {
424 ++ struct kvm_event_hw_type_mapping *event_mapping;
425 + u8 event_select = pmc->eventsel & ARCH_PERFMON_EVENTSEL_EVENT;
426 + u8 unit_mask = (pmc->eventsel & ARCH_PERFMON_EVENTSEL_UMASK) >> 8;
427 + int i;
428 +@@ -148,15 +165,20 @@ static unsigned int amd_pmc_perf_hw_id(struct kvm_pmc *pmc)
429 + if (WARN_ON(pmc_is_fixed(pmc)))
430 + return PERF_COUNT_HW_MAX;
431 +
432 ++ if (guest_cpuid_family(pmc->vcpu) >= 0x17)
433 ++ event_mapping = amd_f17h_event_mapping;
434 ++ else
435 ++ event_mapping = amd_event_mapping;
436 ++
437 + for (i = 0; i < ARRAY_SIZE(amd_event_mapping); i++)
438 +- if (amd_event_mapping[i].eventsel == event_select
439 +- && amd_event_mapping[i].unit_mask == unit_mask)
440 ++ if (event_mapping[i].eventsel == event_select
441 ++ && event_mapping[i].unit_mask == unit_mask)
442 + break;
443 +
444 + if (i == ARRAY_SIZE(amd_event_mapping))
445 + return PERF_COUNT_HW_MAX;
446 +
447 +- return amd_event_mapping[i].event_type;
448 ++ return event_mapping[i].event_type;
449 + }
450 +
451 + /* check if a PMC is enabled by comparing it against global_ctrl bits. Because
452 +diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c
453 +index e5cecd4ad2d44..76e6411d4dde1 100644
454 +--- a/arch/x86/kvm/svm/sev.c
455 ++++ b/arch/x86/kvm/svm/sev.c
456 +@@ -1590,24 +1590,51 @@ static void sev_unlock_two_vms(struct kvm *dst_kvm, struct kvm *src_kvm)
457 + atomic_set_release(&src_sev->migration_in_progress, 0);
458 + }
459 +
460 ++/* vCPU mutex subclasses. */
461 ++enum sev_migration_role {
462 ++ SEV_MIGRATION_SOURCE = 0,
463 ++ SEV_MIGRATION_TARGET,
464 ++ SEV_NR_MIGRATION_ROLES,
465 ++};
466 +
467 +-static int sev_lock_vcpus_for_migration(struct kvm *kvm)
468 ++static int sev_lock_vcpus_for_migration(struct kvm *kvm,
469 ++ enum sev_migration_role role)
470 + {
471 + struct kvm_vcpu *vcpu;
472 + unsigned long i, j;
473 ++ bool first = true;
474 +
475 + kvm_for_each_vcpu(i, vcpu, kvm) {
476 +- if (mutex_lock_killable(&vcpu->mutex))
477 ++ if (mutex_lock_killable_nested(&vcpu->mutex, role))
478 + goto out_unlock;
479 ++
480 ++ if (first) {
481 ++ /*
482 ++ * Reset the role to one that avoids colliding with
483 ++ * the role used for the first vcpu mutex.
484 ++ */
485 ++ role = SEV_NR_MIGRATION_ROLES;
486 ++ first = false;
487 ++ } else {
488 ++ mutex_release(&vcpu->mutex.dep_map, _THIS_IP_);
489 ++ }
490 + }
491 +
492 + return 0;
493 +
494 + out_unlock:
495 ++
496 ++ first = true;
497 + kvm_for_each_vcpu(j, vcpu, kvm) {
498 + if (i == j)
499 + break;
500 +
501 ++ if (first)
502 ++ first = false;
503 ++ else
504 ++ mutex_acquire(&vcpu->mutex.dep_map, role, 0, _THIS_IP_);
505 ++
506 ++
507 + mutex_unlock(&vcpu->mutex);
508 + }
509 + return -EINTR;
510 +@@ -1617,8 +1644,15 @@ static void sev_unlock_vcpus_for_migration(struct kvm *kvm)
511 + {
512 + struct kvm_vcpu *vcpu;
513 + unsigned long i;
514 ++ bool first = true;
515 +
516 + kvm_for_each_vcpu(i, vcpu, kvm) {
517 ++ if (first)
518 ++ first = false;
519 ++ else
520 ++ mutex_acquire(&vcpu->mutex.dep_map,
521 ++ SEV_NR_MIGRATION_ROLES, 0, _THIS_IP_);
522 ++
523 + mutex_unlock(&vcpu->mutex);
524 + }
525 + }
526 +@@ -1726,10 +1760,10 @@ int svm_vm_migrate_from(struct kvm *kvm, unsigned int source_fd)
527 + charged = true;
528 + }
529 +
530 +- ret = sev_lock_vcpus_for_migration(kvm);
531 ++ ret = sev_lock_vcpus_for_migration(kvm, SEV_MIGRATION_SOURCE);
532 + if (ret)
533 + goto out_dst_cgroup;
534 +- ret = sev_lock_vcpus_for_migration(source_kvm);
535 ++ ret = sev_lock_vcpus_for_migration(source_kvm, SEV_MIGRATION_TARGET);
536 + if (ret)
537 + goto out_dst_vcpu;
538 +
539 +diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
540 +index ef63cfd57029a..267d6dc4b8186 100644
541 +--- a/arch/x86/kvm/vmx/vmx.c
542 ++++ b/arch/x86/kvm/vmx/vmx.c
543 +@@ -5473,7 +5473,7 @@ static bool vmx_emulation_required_with_pending_exception(struct kvm_vcpu *vcpu)
544 + struct vcpu_vmx *vmx = to_vmx(vcpu);
545 +
546 + return vmx->emulation_required && !vmx->rmode.vm86_active &&
547 +- vcpu->arch.exception.pending;
548 ++ (vcpu->arch.exception.pending || vcpu->arch.exception.injected);
549 + }
550 +
551 + static int handle_invalid_guest_state(struct kvm_vcpu *vcpu)
552 +diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
553 +index c59265146e9c8..f1827257ef0e0 100644
554 +--- a/drivers/char/ipmi/ipmi_msghandler.c
555 ++++ b/drivers/char/ipmi/ipmi_msghandler.c
556 +@@ -3677,8 +3677,11 @@ static void cleanup_smi_msgs(struct ipmi_smi *intf)
557 + void ipmi_unregister_smi(struct ipmi_smi *intf)
558 + {
559 + struct ipmi_smi_watcher *w;
560 +- int intf_num = intf->intf_num, index;
561 ++ int intf_num, index;
562 +
563 ++ if (!intf)
564 ++ return;
565 ++ intf_num = intf->intf_num;
566 + mutex_lock(&ipmi_interfaces_mutex);
567 + intf->intf_num = -1;
568 + intf->in_shutdown = true;
569 +@@ -4518,6 +4521,8 @@ return_unspecified:
570 + } else
571 + /* The message was sent, start the timer. */
572 + intf_start_seq_timer(intf, msg->msgid);
573 ++ requeue = 0;
574 ++ goto out;
575 + } else if (((msg->rsp[0] >> 2) != ((msg->data[0] >> 2) | 1))
576 + || (msg->rsp[1] != msg->data[1])) {
577 + /*
578 +diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
579 +index 64dedb3ef8ec4..5604a810fb3d2 100644
580 +--- a/drivers/char/ipmi/ipmi_si_intf.c
581 ++++ b/drivers/char/ipmi/ipmi_si_intf.c
582 +@@ -2220,10 +2220,7 @@ static void cleanup_one_si(struct smi_info *smi_info)
583 + return;
584 +
585 + list_del(&smi_info->link);
586 +-
587 +- if (smi_info->intf)
588 +- ipmi_unregister_smi(smi_info->intf);
589 +-
590 ++ ipmi_unregister_smi(smi_info->intf);
591 + kfree(smi_info);
592 + }
593 +
594 +diff --git a/drivers/firewire/core-card.c b/drivers/firewire/core-card.c
595 +index 54be88167c60b..f3b3953cac834 100644
596 +--- a/drivers/firewire/core-card.c
597 ++++ b/drivers/firewire/core-card.c
598 +@@ -668,6 +668,7 @@ EXPORT_SYMBOL_GPL(fw_card_release);
599 + void fw_core_remove_card(struct fw_card *card)
600 + {
601 + struct fw_card_driver dummy_driver = dummy_driver_template;
602 ++ unsigned long flags;
603 +
604 + card->driver->update_phy_reg(card, 4,
605 + PHY_LINK_ACTIVE | PHY_CONTENDER, 0);
606 +@@ -682,7 +683,9 @@ void fw_core_remove_card(struct fw_card *card)
607 + dummy_driver.stop_iso = card->driver->stop_iso;
608 + card->driver = &dummy_driver;
609 +
610 ++ spin_lock_irqsave(&card->lock, flags);
611 + fw_destroy_nodes(card);
612 ++ spin_unlock_irqrestore(&card->lock, flags);
613 +
614 + /* Wait for all users, especially device workqueue jobs, to finish. */
615 + fw_card_put(card);
616 +diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c
617 +index 9f89c17730b12..708e417200f46 100644
618 +--- a/drivers/firewire/core-cdev.c
619 ++++ b/drivers/firewire/core-cdev.c
620 +@@ -1500,6 +1500,7 @@ static void outbound_phy_packet_callback(struct fw_packet *packet,
621 + {
622 + struct outbound_phy_packet_event *e =
623 + container_of(packet, struct outbound_phy_packet_event, p);
624 ++ struct client *e_client;
625 +
626 + switch (status) {
627 + /* expected: */
628 +@@ -1516,9 +1517,10 @@ static void outbound_phy_packet_callback(struct fw_packet *packet,
629 + }
630 + e->phy_packet.data[0] = packet->timestamp;
631 +
632 ++ e_client = e->client;
633 + queue_event(e->client, &e->event, &e->phy_packet,
634 + sizeof(e->phy_packet) + e->phy_packet.length, NULL, 0);
635 +- client_put(e->client);
636 ++ client_put(e_client);
637 + }
638 +
639 + static int ioctl_send_phy_packet(struct client *client, union ioctl_arg *arg)
640 +diff --git a/drivers/firewire/core-topology.c b/drivers/firewire/core-topology.c
641 +index b63d55f5ebd33..f40c815343812 100644
642 +--- a/drivers/firewire/core-topology.c
643 ++++ b/drivers/firewire/core-topology.c
644 +@@ -375,16 +375,13 @@ static void report_found_node(struct fw_card *card,
645 + card->bm_retries = 0;
646 + }
647 +
648 ++/* Must be called with card->lock held */
649 + void fw_destroy_nodes(struct fw_card *card)
650 + {
651 +- unsigned long flags;
652 +-
653 +- spin_lock_irqsave(&card->lock, flags);
654 + card->color++;
655 + if (card->local_node != NULL)
656 + for_each_fw_node(card, card->local_node, report_lost_node);
657 + card->local_node = NULL;
658 +- spin_unlock_irqrestore(&card->lock, flags);
659 + }
660 +
661 + static void move_tree(struct fw_node *node0, struct fw_node *node1, int port)
662 +@@ -510,6 +507,8 @@ void fw_core_handle_bus_reset(struct fw_card *card, int node_id, int generation,
663 + struct fw_node *local_node;
664 + unsigned long flags;
665 +
666 ++ spin_lock_irqsave(&card->lock, flags);
667 ++
668 + /*
669 + * If the selfID buffer is not the immediate successor of the
670 + * previously processed one, we cannot reliably compare the
671 +@@ -521,8 +520,6 @@ void fw_core_handle_bus_reset(struct fw_card *card, int node_id, int generation,
672 + card->bm_retries = 0;
673 + }
674 +
675 +- spin_lock_irqsave(&card->lock, flags);
676 +-
677 + card->broadcast_channel_allocated = card->broadcast_channel_auto_allocated;
678 + card->node_id = node_id;
679 + /*
680 +diff --git a/drivers/firewire/core-transaction.c b/drivers/firewire/core-transaction.c
681 +index ac487c96bb717..6c20815cc8d16 100644
682 +--- a/drivers/firewire/core-transaction.c
683 ++++ b/drivers/firewire/core-transaction.c
684 +@@ -73,24 +73,25 @@ static int try_cancel_split_timeout(struct fw_transaction *t)
685 + static int close_transaction(struct fw_transaction *transaction,
686 + struct fw_card *card, int rcode)
687 + {
688 +- struct fw_transaction *t;
689 ++ struct fw_transaction *t = NULL, *iter;
690 + unsigned long flags;
691 +
692 + spin_lock_irqsave(&card->lock, flags);
693 +- list_for_each_entry(t, &card->transaction_list, link) {
694 +- if (t == transaction) {
695 +- if (!try_cancel_split_timeout(t)) {
696 ++ list_for_each_entry(iter, &card->transaction_list, link) {
697 ++ if (iter == transaction) {
698 ++ if (!try_cancel_split_timeout(iter)) {
699 + spin_unlock_irqrestore(&card->lock, flags);
700 + goto timed_out;
701 + }
702 +- list_del_init(&t->link);
703 +- card->tlabel_mask &= ~(1ULL << t->tlabel);
704 ++ list_del_init(&iter->link);
705 ++ card->tlabel_mask &= ~(1ULL << iter->tlabel);
706 ++ t = iter;
707 + break;
708 + }
709 + }
710 + spin_unlock_irqrestore(&card->lock, flags);
711 +
712 +- if (&t->link != &card->transaction_list) {
713 ++ if (t) {
714 + t->callback(card, rcode, NULL, 0, t->callback_data);
715 + return 0;
716 + }
717 +@@ -935,7 +936,7 @@ EXPORT_SYMBOL(fw_core_handle_request);
718 +
719 + void fw_core_handle_response(struct fw_card *card, struct fw_packet *p)
720 + {
721 +- struct fw_transaction *t;
722 ++ struct fw_transaction *t = NULL, *iter;
723 + unsigned long flags;
724 + u32 *data;
725 + size_t data_length;
726 +@@ -947,20 +948,21 @@ void fw_core_handle_response(struct fw_card *card, struct fw_packet *p)
727 + rcode = HEADER_GET_RCODE(p->header[1]);
728 +
729 + spin_lock_irqsave(&card->lock, flags);
730 +- list_for_each_entry(t, &card->transaction_list, link) {
731 +- if (t->node_id == source && t->tlabel == tlabel) {
732 +- if (!try_cancel_split_timeout(t)) {
733 ++ list_for_each_entry(iter, &card->transaction_list, link) {
734 ++ if (iter->node_id == source && iter->tlabel == tlabel) {
735 ++ if (!try_cancel_split_timeout(iter)) {
736 + spin_unlock_irqrestore(&card->lock, flags);
737 + goto timed_out;
738 + }
739 +- list_del_init(&t->link);
740 +- card->tlabel_mask &= ~(1ULL << t->tlabel);
741 ++ list_del_init(&iter->link);
742 ++ card->tlabel_mask &= ~(1ULL << iter->tlabel);
743 ++ t = iter;
744 + break;
745 + }
746 + }
747 + spin_unlock_irqrestore(&card->lock, flags);
748 +
749 +- if (&t->link == &card->transaction_list) {
750 ++ if (!t) {
751 + timed_out:
752 + fw_notice(card, "unsolicited response (source %x, tlabel %x)\n",
753 + source, tlabel);
754 +diff --git a/drivers/firewire/sbp2.c b/drivers/firewire/sbp2.c
755 +index 85cd379fd3838..60051c0cabeaa 100644
756 +--- a/drivers/firewire/sbp2.c
757 ++++ b/drivers/firewire/sbp2.c
758 +@@ -408,7 +408,7 @@ static void sbp2_status_write(struct fw_card *card, struct fw_request *request,
759 + void *payload, size_t length, void *callback_data)
760 + {
761 + struct sbp2_logical_unit *lu = callback_data;
762 +- struct sbp2_orb *orb;
763 ++ struct sbp2_orb *orb = NULL, *iter;
764 + struct sbp2_status status;
765 + unsigned long flags;
766 +
767 +@@ -433,17 +433,18 @@ static void sbp2_status_write(struct fw_card *card, struct fw_request *request,
768 +
769 + /* Lookup the orb corresponding to this status write. */
770 + spin_lock_irqsave(&lu->tgt->lock, flags);
771 +- list_for_each_entry(orb, &lu->orb_list, link) {
772 ++ list_for_each_entry(iter, &lu->orb_list, link) {
773 + if (STATUS_GET_ORB_HIGH(status) == 0 &&
774 +- STATUS_GET_ORB_LOW(status) == orb->request_bus) {
775 +- orb->rcode = RCODE_COMPLETE;
776 +- list_del(&orb->link);
777 ++ STATUS_GET_ORB_LOW(status) == iter->request_bus) {
778 ++ iter->rcode = RCODE_COMPLETE;
779 ++ list_del(&iter->link);
780 ++ orb = iter;
781 + break;
782 + }
783 + }
784 + spin_unlock_irqrestore(&lu->tgt->lock, flags);
785 +
786 +- if (&orb->link != &lu->orb_list) {
787 ++ if (orb) {
788 + orb->callback(orb, &status);
789 + kref_put(&orb->kref, free_orb); /* orb callback reference */
790 + } else {
791 +diff --git a/drivers/gpio/gpio-mvebu.c b/drivers/gpio/gpio-mvebu.c
792 +index 4c1f9e1091b7f..a2c8dd329b31b 100644
793 +--- a/drivers/gpio/gpio-mvebu.c
794 ++++ b/drivers/gpio/gpio-mvebu.c
795 +@@ -871,13 +871,6 @@ static int mvebu_pwm_probe(struct platform_device *pdev,
796 + mvpwm->chip.dev = dev;
797 + mvpwm->chip.ops = &mvebu_pwm_ops;
798 + mvpwm->chip.npwm = mvchip->chip.ngpio;
799 +- /*
800 +- * There may already be some PWM allocated, so we can't force
801 +- * mvpwm->chip.base to a fixed point like mvchip->chip.base.
802 +- * So, we let pwmchip_add() do the numbering and take the next free
803 +- * region.
804 +- */
805 +- mvpwm->chip.base = -1;
806 +
807 + spin_lock_init(&mvpwm->lock);
808 +
809 +diff --git a/drivers/gpio/gpio-pca953x.c b/drivers/gpio/gpio-pca953x.c
810 +index d2fe76f3f34fd..8726921a11294 100644
811 +--- a/drivers/gpio/gpio-pca953x.c
812 ++++ b/drivers/gpio/gpio-pca953x.c
813 +@@ -762,11 +762,11 @@ static bool pca953x_irq_pending(struct pca953x_chip *chip, unsigned long *pendin
814 + bitmap_xor(cur_stat, new_stat, old_stat, gc->ngpio);
815 + bitmap_and(trigger, cur_stat, chip->irq_mask, gc->ngpio);
816 +
817 ++ bitmap_copy(chip->irq_stat, new_stat, gc->ngpio);
818 ++
819 + if (bitmap_empty(trigger, gc->ngpio))
820 + return false;
821 +
822 +- bitmap_copy(chip->irq_stat, new_stat, gc->ngpio);
823 +-
824 + bitmap_and(cur_stat, chip->irq_trig_fall, old_stat, gc->ngpio);
825 + bitmap_and(old_stat, chip->irq_trig_raise, new_stat, gc->ngpio);
826 + bitmap_or(new_stat, old_stat, cur_stat, gc->ngpio);
827 +diff --git a/drivers/gpio/gpio-visconti.c b/drivers/gpio/gpio-visconti.c
828 +index 47455810bdb91..e6534ea1eaa7a 100644
829 +--- a/drivers/gpio/gpio-visconti.c
830 ++++ b/drivers/gpio/gpio-visconti.c
831 +@@ -130,7 +130,6 @@ static int visconti_gpio_probe(struct platform_device *pdev)
832 + struct gpio_irq_chip *girq;
833 + struct irq_domain *parent;
834 + struct device_node *irq_parent;
835 +- struct fwnode_handle *fwnode;
836 + int ret;
837 +
838 + priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
839 +@@ -150,14 +149,12 @@ static int visconti_gpio_probe(struct platform_device *pdev)
840 + }
841 +
842 + parent = irq_find_host(irq_parent);
843 ++ of_node_put(irq_parent);
844 + if (!parent) {
845 + dev_err(dev, "No IRQ parent domain\n");
846 + return -ENODEV;
847 + }
848 +
849 +- fwnode = of_node_to_fwnode(irq_parent);
850 +- of_node_put(irq_parent);
851 +-
852 + ret = bgpio_init(&priv->gpio_chip, dev, 4,
853 + priv->base + GPIO_IDATA,
854 + priv->base + GPIO_OSET,
855 +@@ -180,7 +177,7 @@ static int visconti_gpio_probe(struct platform_device *pdev)
856 +
857 + girq = &priv->gpio_chip.irq;
858 + girq->chip = irq_chip;
859 +- girq->fwnode = fwnode;
860 ++ girq->fwnode = of_node_to_fwnode(dev->of_node);
861 + girq->parent_domain = parent;
862 + girq->child_to_parent_hwirq = visconti_gpio_child_to_parent_hwirq;
863 + girq->populate_parent_alloc_arg = visconti_gpio_populate_parent_fwspec;
864 +diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c
865 +index 91dcf2c6cdd84..775a7dadf9a39 100644
866 +--- a/drivers/gpio/gpiolib-of.c
867 ++++ b/drivers/gpio/gpiolib-of.c
868 +@@ -912,7 +912,7 @@ static void of_gpiochip_init_valid_mask(struct gpio_chip *chip)
869 + i, &start);
870 + of_property_read_u32_index(np, "gpio-reserved-ranges",
871 + i + 1, &count);
872 +- if (start >= chip->ngpio || start + count >= chip->ngpio)
873 ++ if (start >= chip->ngpio || start + count > chip->ngpio)
874 + continue;
875 +
876 + bitmap_clear(chip->valid_mask, start, count);
877 +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
878 +index 07bc0f5047130..5d065bf7c5146 100644
879 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
880 ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
881 +@@ -24,6 +24,7 @@
882 + #include <linux/module.h>
883 +
884 + #include <drm/drm_drv.h>
885 ++#include <xen/xen.h>
886 +
887 + #include "amdgpu.h"
888 + #include "amdgpu_ras.h"
889 +@@ -708,7 +709,8 @@ void amdgpu_detect_virtualization(struct amdgpu_device *adev)
890 + adev->virt.caps |= AMDGPU_SRIOV_CAPS_ENABLE_IOV;
891 +
892 + if (!reg) {
893 +- if (is_virtual_machine()) /* passthrough mode exclus sriov mod */
894 ++ /* passthrough mode exclus sriov mod */
895 ++ if (is_virtual_machine() && !xen_initial_domain())
896 + adev->virt.caps |= AMDGPU_PASSTHROUGH_MODE;
897 + }
898 +
899 +diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
900 +index 49d5271dcfdc8..bbe94e8729831 100644
901 +--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
902 ++++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
903 +@@ -4634,7 +4634,7 @@ static void dp_test_get_audio_test_data(struct dc_link *link, bool disable_video
904 + &dpcd_pattern_type.value,
905 + sizeof(dpcd_pattern_type));
906 +
907 +- channel_count = dpcd_test_mode.bits.channel_count + 1;
908 ++ channel_count = min(dpcd_test_mode.bits.channel_count + 1, AUDIO_CHANNELS_COUNT);
909 +
910 + // read pattern periods for requested channels when sawTooth pattern is requested
911 + if (dpcd_pattern_type.value == AUDIO_TEST_PATTERN_SAWTOOTH ||
912 +diff --git a/drivers/gpu/drm/msm/dp/dp_display.c b/drivers/gpu/drm/msm/dp/dp_display.c
913 +index af9c09c308601..1d7f82e6eafea 100644
914 +--- a/drivers/gpu/drm/msm/dp/dp_display.c
915 ++++ b/drivers/gpu/drm/msm/dp/dp_display.c
916 +@@ -551,12 +551,6 @@ static int dp_hpd_plug_handle(struct dp_display_private *dp, u32 data)
917 +
918 + mutex_unlock(&dp->event_mutex);
919 +
920 +- /*
921 +- * add fail safe mode outside event_mutex scope
922 +- * to avoid potiential circular lock with drm thread
923 +- */
924 +- dp_panel_add_fail_safe_mode(dp->dp_display.connector);
925 +-
926 + /* uevent will complete connection part */
927 + return 0;
928 + };
929 +diff --git a/drivers/gpu/drm/msm/dp/dp_panel.c b/drivers/gpu/drm/msm/dp/dp_panel.c
930 +index 26c3653c99ec9..26f4b6959c31d 100644
931 +--- a/drivers/gpu/drm/msm/dp/dp_panel.c
932 ++++ b/drivers/gpu/drm/msm/dp/dp_panel.c
933 +@@ -151,15 +151,6 @@ static int dp_panel_update_modes(struct drm_connector *connector,
934 + return rc;
935 + }
936 +
937 +-void dp_panel_add_fail_safe_mode(struct drm_connector *connector)
938 +-{
939 +- /* fail safe edid */
940 +- mutex_lock(&connector->dev->mode_config.mutex);
941 +- if (drm_add_modes_noedid(connector, 640, 480))
942 +- drm_set_preferred_mode(connector, 640, 480);
943 +- mutex_unlock(&connector->dev->mode_config.mutex);
944 +-}
945 +-
946 + int dp_panel_read_sink_caps(struct dp_panel *dp_panel,
947 + struct drm_connector *connector)
948 + {
949 +@@ -215,8 +206,6 @@ int dp_panel_read_sink_caps(struct dp_panel *dp_panel,
950 + rc = -ETIMEDOUT;
951 + goto end;
952 + }
953 +-
954 +- dp_panel_add_fail_safe_mode(connector);
955 + }
956 +
957 + if (panel->aux_cfg_update_done) {
958 +diff --git a/drivers/gpu/drm/msm/dp/dp_panel.h b/drivers/gpu/drm/msm/dp/dp_panel.h
959 +index 99739ea679a77..9023e5bb4b8b2 100644
960 +--- a/drivers/gpu/drm/msm/dp/dp_panel.h
961 ++++ b/drivers/gpu/drm/msm/dp/dp_panel.h
962 +@@ -59,7 +59,6 @@ int dp_panel_init_panel_info(struct dp_panel *dp_panel);
963 + int dp_panel_deinit(struct dp_panel *dp_panel);
964 + int dp_panel_timing_cfg(struct dp_panel *dp_panel);
965 + void dp_panel_dump_regs(struct dp_panel *dp_panel);
966 +-void dp_panel_add_fail_safe_mode(struct drm_connector *connector);
967 + int dp_panel_read_sink_caps(struct dp_panel *dp_panel,
968 + struct drm_connector *connector);
969 + u32 dp_panel_get_mode_bpp(struct dp_panel *dp_panel, u32 mode_max_bpp,
970 +diff --git a/drivers/hwmon/adt7470.c b/drivers/hwmon/adt7470.c
971 +index fb6d14d213a18..c67cd037a93fd 100644
972 +--- a/drivers/hwmon/adt7470.c
973 ++++ b/drivers/hwmon/adt7470.c
974 +@@ -19,6 +19,7 @@
975 + #include <linux/log2.h>
976 + #include <linux/kthread.h>
977 + #include <linux/regmap.h>
978 ++#include <linux/sched.h>
979 + #include <linux/slab.h>
980 + #include <linux/util_macros.h>
981 +
982 +@@ -294,11 +295,10 @@ static int adt7470_update_thread(void *p)
983 + adt7470_read_temperatures(data);
984 + mutex_unlock(&data->lock);
985 +
986 +- set_current_state(TASK_INTERRUPTIBLE);
987 + if (kthread_should_stop())
988 + break;
989 +
990 +- schedule_timeout(msecs_to_jiffies(data->auto_update_interval));
991 ++ schedule_timeout_interruptible(msecs_to_jiffies(data->auto_update_interval));
992 + }
993 +
994 + return 0;
995 +diff --git a/drivers/hwmon/pmbus/delta-ahe50dc-fan.c b/drivers/hwmon/pmbus/delta-ahe50dc-fan.c
996 +index 40dffd9c4cbfc..f546f0c12497b 100644
997 +--- a/drivers/hwmon/pmbus/delta-ahe50dc-fan.c
998 ++++ b/drivers/hwmon/pmbus/delta-ahe50dc-fan.c
999 +@@ -14,6 +14,21 @@
1000 +
1001 + #define AHE50DC_PMBUS_READ_TEMP4 0xd0
1002 +
1003 ++static int ahe50dc_fan_write_byte(struct i2c_client *client, int page, u8 value)
1004 ++{
1005 ++ /*
1006 ++ * The CLEAR_FAULTS operation seems to sometimes (unpredictably, perhaps
1007 ++ * 5% of the time or so) trigger a problematic phenomenon in which the
1008 ++ * fan speeds surge momentarily and at least some (perhaps all?) of the
1009 ++ * system's power outputs experience a glitch.
1010 ++ *
1011 ++ * However, according to Delta it should be OK to simply not send any
1012 ++ * CLEAR_FAULTS commands (the device doesn't seem to be capable of
1013 ++ * reporting any faults anyway), so just blackhole them unconditionally.
1014 ++ */
1015 ++ return value == PMBUS_CLEAR_FAULTS ? -EOPNOTSUPP : -ENODATA;
1016 ++}
1017 ++
1018 + static int ahe50dc_fan_read_word_data(struct i2c_client *client, int page, int phase, int reg)
1019 + {
1020 + /* temp1 in (virtual) page 1 is remapped to mfr-specific temp4 */
1021 +@@ -68,6 +83,7 @@ static struct pmbus_driver_info ahe50dc_fan_info = {
1022 + PMBUS_HAVE_VIN | PMBUS_HAVE_FAN12 | PMBUS_HAVE_FAN34 |
1023 + PMBUS_HAVE_STATUS_FAN12 | PMBUS_HAVE_STATUS_FAN34 | PMBUS_PAGE_VIRTUAL,
1024 + .func[1] = PMBUS_HAVE_TEMP | PMBUS_PAGE_VIRTUAL,
1025 ++ .write_byte = ahe50dc_fan_write_byte,
1026 + .read_word_data = ahe50dc_fan_read_word_data,
1027 + };
1028 +
1029 +diff --git a/drivers/hwmon/pmbus/pmbus_core.c b/drivers/hwmon/pmbus/pmbus_core.c
1030 +index ca0bfaf2f6911..5f8f824d997f8 100644
1031 +--- a/drivers/hwmon/pmbus/pmbus_core.c
1032 ++++ b/drivers/hwmon/pmbus/pmbus_core.c
1033 +@@ -2326,6 +2326,9 @@ static int pmbus_init_common(struct i2c_client *client, struct pmbus_data *data,
1034 + data->has_status_word = true;
1035 + }
1036 +
1037 ++ /* Make sure PEC is disabled, will be enabled later if needed */
1038 ++ client->flags &= ~I2C_CLIENT_PEC;
1039 ++
1040 + /* Enable PEC if the controller and bus supports it */
1041 + if (!(data->flags & PMBUS_NO_CAPABILITY)) {
1042 + ret = i2c_smbus_read_byte_data(client, PMBUS_CAPABILITY);
1043 +diff --git a/drivers/infiniband/hw/irdma/cm.c b/drivers/infiniband/hw/irdma/cm.c
1044 +index 6dea0a49d1718..082a3ddb0fa3b 100644
1045 +--- a/drivers/infiniband/hw/irdma/cm.c
1046 ++++ b/drivers/infiniband/hw/irdma/cm.c
1047 +@@ -2305,10 +2305,8 @@ err:
1048 + return NULL;
1049 + }
1050 +
1051 +-static void irdma_cm_node_free_cb(struct rcu_head *rcu_head)
1052 ++static void irdma_destroy_connection(struct irdma_cm_node *cm_node)
1053 + {
1054 +- struct irdma_cm_node *cm_node =
1055 +- container_of(rcu_head, struct irdma_cm_node, rcu_head);
1056 + struct irdma_cm_core *cm_core = cm_node->cm_core;
1057 + struct irdma_qp *iwqp;
1058 + struct irdma_cm_info nfo;
1059 +@@ -2356,7 +2354,6 @@ static void irdma_cm_node_free_cb(struct rcu_head *rcu_head)
1060 + }
1061 +
1062 + cm_core->cm_free_ah(cm_node);
1063 +- kfree(cm_node);
1064 + }
1065 +
1066 + /**
1067 +@@ -2384,8 +2381,9 @@ void irdma_rem_ref_cm_node(struct irdma_cm_node *cm_node)
1068 +
1069 + spin_unlock_irqrestore(&cm_core->ht_lock, flags);
1070 +
1071 +- /* wait for all list walkers to exit their grace period */
1072 +- call_rcu(&cm_node->rcu_head, irdma_cm_node_free_cb);
1073 ++ irdma_destroy_connection(cm_node);
1074 ++
1075 ++ kfree_rcu(cm_node, rcu_head);
1076 + }
1077 +
1078 + /**
1079 +@@ -3465,12 +3463,6 @@ static void irdma_cm_disconn_true(struct irdma_qp *iwqp)
1080 + }
1081 +
1082 + cm_id = iwqp->cm_id;
1083 +- /* make sure we havent already closed this connection */
1084 +- if (!cm_id) {
1085 +- spin_unlock_irqrestore(&iwqp->lock, flags);
1086 +- return;
1087 +- }
1088 +-
1089 + original_hw_tcp_state = iwqp->hw_tcp_state;
1090 + original_ibqp_state = iwqp->ibqp_state;
1091 + last_ae = iwqp->last_aeq;
1092 +@@ -3492,11 +3484,11 @@ static void irdma_cm_disconn_true(struct irdma_qp *iwqp)
1093 + disconn_status = -ECONNRESET;
1094 + }
1095 +
1096 +- if ((original_hw_tcp_state == IRDMA_TCP_STATE_CLOSED ||
1097 +- original_hw_tcp_state == IRDMA_TCP_STATE_TIME_WAIT ||
1098 +- last_ae == IRDMA_AE_RDMAP_ROE_BAD_LLP_CLOSE ||
1099 +- last_ae == IRDMA_AE_BAD_CLOSE ||
1100 +- last_ae == IRDMA_AE_LLP_CONNECTION_RESET || iwdev->rf->reset)) {
1101 ++ if (original_hw_tcp_state == IRDMA_TCP_STATE_CLOSED ||
1102 ++ original_hw_tcp_state == IRDMA_TCP_STATE_TIME_WAIT ||
1103 ++ last_ae == IRDMA_AE_RDMAP_ROE_BAD_LLP_CLOSE ||
1104 ++ last_ae == IRDMA_AE_BAD_CLOSE ||
1105 ++ last_ae == IRDMA_AE_LLP_CONNECTION_RESET || iwdev->rf->reset || !cm_id) {
1106 + issue_close = 1;
1107 + iwqp->cm_id = NULL;
1108 + qp->term_flags = 0;
1109 +diff --git a/drivers/infiniband/hw/irdma/utils.c b/drivers/infiniband/hw/irdma/utils.c
1110 +index e81b74a518dd0..7f72a006367fe 100644
1111 +--- a/drivers/infiniband/hw/irdma/utils.c
1112 ++++ b/drivers/infiniband/hw/irdma/utils.c
1113 +@@ -258,18 +258,16 @@ int irdma_net_event(struct notifier_block *notifier, unsigned long event,
1114 + u32 local_ipaddr[4] = {};
1115 + bool ipv4 = true;
1116 +
1117 +- real_dev = rdma_vlan_dev_real_dev(netdev);
1118 +- if (!real_dev)
1119 +- real_dev = netdev;
1120 +-
1121 +- ibdev = ib_device_get_by_netdev(real_dev, RDMA_DRIVER_IRDMA);
1122 +- if (!ibdev)
1123 +- return NOTIFY_DONE;
1124 +-
1125 +- iwdev = to_iwdev(ibdev);
1126 +-
1127 + switch (event) {
1128 + case NETEVENT_NEIGH_UPDATE:
1129 ++ real_dev = rdma_vlan_dev_real_dev(netdev);
1130 ++ if (!real_dev)
1131 ++ real_dev = netdev;
1132 ++ ibdev = ib_device_get_by_netdev(real_dev, RDMA_DRIVER_IRDMA);
1133 ++ if (!ibdev)
1134 ++ return NOTIFY_DONE;
1135 ++
1136 ++ iwdev = to_iwdev(ibdev);
1137 + p = (__be32 *)neigh->primary_key;
1138 + if (neigh->tbl->family == AF_INET6) {
1139 + ipv4 = false;
1140 +@@ -290,13 +288,12 @@ int irdma_net_event(struct notifier_block *notifier, unsigned long event,
1141 + irdma_manage_arp_cache(iwdev->rf, neigh->ha,
1142 + local_ipaddr, ipv4,
1143 + IRDMA_ARP_DELETE);
1144 ++ ib_device_put(ibdev);
1145 + break;
1146 + default:
1147 + break;
1148 + }
1149 +
1150 +- ib_device_put(ibdev);
1151 +-
1152 + return NOTIFY_DONE;
1153 + }
1154 +
1155 +diff --git a/drivers/infiniband/hw/irdma/verbs.c b/drivers/infiniband/hw/irdma/verbs.c
1156 +index 1bf6404ec8340..c2aa713e0f4d7 100644
1157 +--- a/drivers/infiniband/hw/irdma/verbs.c
1158 ++++ b/drivers/infiniband/hw/irdma/verbs.c
1159 +@@ -1620,13 +1620,13 @@ int irdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask,
1160 +
1161 + if (issue_modify_qp && iwqp->ibqp_state > IB_QPS_RTS) {
1162 + if (dont_wait) {
1163 +- if (iwqp->cm_id && iwqp->hw_tcp_state) {
1164 ++ if (iwqp->hw_tcp_state) {
1165 + spin_lock_irqsave(&iwqp->lock, flags);
1166 + iwqp->hw_tcp_state = IRDMA_TCP_STATE_CLOSED;
1167 + iwqp->last_aeq = IRDMA_AE_RESET_SENT;
1168 + spin_unlock_irqrestore(&iwqp->lock, flags);
1169 +- irdma_cm_disconn(iwqp);
1170 + }
1171 ++ irdma_cm_disconn(iwqp);
1172 + } else {
1173 + int close_timer_started;
1174 +
1175 +diff --git a/drivers/infiniband/sw/siw/siw_cm.c b/drivers/infiniband/sw/siw/siw_cm.c
1176 +index 7acdd3c3a599d..17f34d584cd9e 100644
1177 +--- a/drivers/infiniband/sw/siw/siw_cm.c
1178 ++++ b/drivers/infiniband/sw/siw/siw_cm.c
1179 +@@ -968,14 +968,15 @@ static void siw_accept_newconn(struct siw_cep *cep)
1180 +
1181 + siw_cep_set_inuse(new_cep);
1182 + rv = siw_proc_mpareq(new_cep);
1183 +- siw_cep_set_free(new_cep);
1184 +-
1185 + if (rv != -EAGAIN) {
1186 + siw_cep_put(cep);
1187 + new_cep->listen_cep = NULL;
1188 +- if (rv)
1189 ++ if (rv) {
1190 ++ siw_cep_set_free(new_cep);
1191 + goto error;
1192 ++ }
1193 + }
1194 ++ siw_cep_set_free(new_cep);
1195 + }
1196 + return;
1197 +
1198 +diff --git a/drivers/iommu/apple-dart.c b/drivers/iommu/apple-dart.c
1199 +index 565ef55988112..68821f86b063c 100644
1200 +--- a/drivers/iommu/apple-dart.c
1201 ++++ b/drivers/iommu/apple-dart.c
1202 +@@ -782,6 +782,7 @@ static const struct iommu_ops apple_dart_iommu_ops = {
1203 + .get_resv_regions = apple_dart_get_resv_regions,
1204 + .put_resv_regions = generic_iommu_put_resv_regions,
1205 + .pgsize_bitmap = -1UL, /* Restricted during dart probe */
1206 ++ .owner = THIS_MODULE,
1207 + };
1208 +
1209 + static irqreturn_t apple_dart_irq(int irq, void *dev)
1210 +@@ -857,16 +858,15 @@ static int apple_dart_probe(struct platform_device *pdev)
1211 + dart->dev = dev;
1212 + spin_lock_init(&dart->lock);
1213 +
1214 +- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1215 ++ dart->regs = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
1216 ++ if (IS_ERR(dart->regs))
1217 ++ return PTR_ERR(dart->regs);
1218 ++
1219 + if (resource_size(res) < 0x4000) {
1220 + dev_err(dev, "MMIO region too small (%pr)\n", res);
1221 + return -EINVAL;
1222 + }
1223 +
1224 +- dart->regs = devm_ioremap_resource(dev, res);
1225 +- if (IS_ERR(dart->regs))
1226 +- return PTR_ERR(dart->regs);
1227 +-
1228 + dart->irq = platform_get_irq(pdev, 0);
1229 + if (dart->irq < 0)
1230 + return -ENODEV;
1231 +diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c
1232 +index a737ba5f727e6..f9e9b4fb78bd5 100644
1233 +--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c
1234 ++++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c
1235 +@@ -183,7 +183,14 @@ static void arm_smmu_mm_invalidate_range(struct mmu_notifier *mn,
1236 + {
1237 + struct arm_smmu_mmu_notifier *smmu_mn = mn_to_smmu(mn);
1238 + struct arm_smmu_domain *smmu_domain = smmu_mn->domain;
1239 +- size_t size = end - start + 1;
1240 ++ size_t size;
1241 ++
1242 ++ /*
1243 ++ * The mm_types defines vm_end as the first byte after the end address,
1244 ++ * different from IOMMU subsystem using the last address of an address
1245 ++ * range. So do a simple translation here by calculating size correctly.
1246 ++ */
1247 ++ size = end - start;
1248 +
1249 + if (!(smmu_domain->smmu->features & ARM_SMMU_FEAT_BTM))
1250 + arm_smmu_tlb_inv_range_asid(start, size, smmu_mn->cd->asid,
1251 +diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c
1252 +index 5b196cfe9ed23..ab22733003464 100644
1253 +--- a/drivers/iommu/intel/iommu.c
1254 ++++ b/drivers/iommu/intel/iommu.c
1255 +@@ -1717,7 +1717,8 @@ static void iommu_flush_iotlb_psi(struct intel_iommu *iommu,
1256 + unsigned long pfn, unsigned int pages,
1257 + int ih, int map)
1258 + {
1259 +- unsigned int mask = ilog2(__roundup_pow_of_two(pages));
1260 ++ unsigned int aligned_pages = __roundup_pow_of_two(pages);
1261 ++ unsigned int mask = ilog2(aligned_pages);
1262 + uint64_t addr = (uint64_t)pfn << VTD_PAGE_SHIFT;
1263 + u16 did = domain->iommu_did[iommu->seq_id];
1264 +
1265 +@@ -1729,10 +1730,30 @@ static void iommu_flush_iotlb_psi(struct intel_iommu *iommu,
1266 + if (domain_use_first_level(domain)) {
1267 + domain_flush_piotlb(iommu, domain, addr, pages, ih);
1268 + } else {
1269 ++ unsigned long bitmask = aligned_pages - 1;
1270 ++
1271 ++ /*
1272 ++ * PSI masks the low order bits of the base address. If the
1273 ++ * address isn't aligned to the mask, then compute a mask value
1274 ++ * needed to ensure the target range is flushed.
1275 ++ */
1276 ++ if (unlikely(bitmask & pfn)) {
1277 ++ unsigned long end_pfn = pfn + pages - 1, shared_bits;
1278 ++
1279 ++ /*
1280 ++ * Since end_pfn <= pfn + bitmask, the only way bits
1281 ++ * higher than bitmask can differ in pfn and end_pfn is
1282 ++ * by carrying. This means after masking out bitmask,
1283 ++ * high bits starting with the first set bit in
1284 ++ * shared_bits are all equal in both pfn and end_pfn.
1285 ++ */
1286 ++ shared_bits = ~(pfn ^ end_pfn) & ~bitmask;
1287 ++ mask = shared_bits ? __ffs(shared_bits) : BITS_PER_LONG;
1288 ++ }
1289 ++
1290 + /*
1291 + * Fallback to domain selective flush if no PSI support or
1292 +- * the size is too big. PSI requires page size to be 2 ^ x,
1293 +- * and the base address is naturally aligned to the size.
1294 ++ * the size is too big.
1295 + */
1296 + if (!cap_pgsel_inv(iommu->cap) ||
1297 + mask > cap_max_amask_val(iommu->cap))
1298 +diff --git a/drivers/iommu/intel/svm.c b/drivers/iommu/intel/svm.c
1299 +index 5b5d69b04fcc8..06e51f7241877 100644
1300 +--- a/drivers/iommu/intel/svm.c
1301 ++++ b/drivers/iommu/intel/svm.c
1302 +@@ -956,6 +956,10 @@ bad_req:
1303 + goto bad_req;
1304 + }
1305 +
1306 ++ /* Drop Stop Marker message. No need for a response. */
1307 ++ if (unlikely(req->lpig && !req->rd_req && !req->wr_req))
1308 ++ goto prq_advance;
1309 ++
1310 + if (!svm || svm->pasid != req->pasid) {
1311 + /*
1312 + * It can't go away, because the driver is not permitted
1313 +diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
1314 +index 43d1b9b2fa499..64f47ec9266a9 100644
1315 +--- a/drivers/mmc/core/mmc.c
1316 ++++ b/drivers/mmc/core/mmc.c
1317 +@@ -1389,13 +1389,17 @@ static int mmc_select_hs400es(struct mmc_card *card)
1318 + goto out_err;
1319 + }
1320 +
1321 ++ /*
1322 ++ * Bump to HS timing and frequency. Some cards don't handle
1323 ++ * SEND_STATUS reliably at the initial frequency.
1324 ++ */
1325 + mmc_set_timing(host, MMC_TIMING_MMC_HS);
1326 ++ mmc_set_bus_speed(card);
1327 ++
1328 + err = mmc_switch_status(card, true);
1329 + if (err)
1330 + goto out_err;
1331 +
1332 +- mmc_set_clock(host, card->ext_csd.hs_max_dtr);
1333 +-
1334 + /* Switch card to DDR with strobe bit */
1335 + val = EXT_CSD_DDR_BUS_WIDTH_8 | EXT_CSD_BUS_WIDTH_STROBE;
1336 + err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1337 +@@ -1453,7 +1457,7 @@ out_err:
1338 + static int mmc_select_hs200(struct mmc_card *card)
1339 + {
1340 + struct mmc_host *host = card->host;
1341 +- unsigned int old_timing, old_signal_voltage;
1342 ++ unsigned int old_timing, old_signal_voltage, old_clock;
1343 + int err = -EINVAL;
1344 + u8 val;
1345 +
1346 +@@ -1484,8 +1488,17 @@ static int mmc_select_hs200(struct mmc_card *card)
1347 + false, true, MMC_CMD_RETRIES);
1348 + if (err)
1349 + goto err;
1350 ++
1351 ++ /*
1352 ++ * Bump to HS timing and frequency. Some cards don't handle
1353 ++ * SEND_STATUS reliably at the initial frequency.
1354 ++ * NB: We can't move to full (HS200) speeds until after we've
1355 ++ * successfully switched over.
1356 ++ */
1357 + old_timing = host->ios.timing;
1358 ++ old_clock = host->ios.clock;
1359 + mmc_set_timing(host, MMC_TIMING_MMC_HS200);
1360 ++ mmc_set_clock(card->host, card->ext_csd.hs_max_dtr);
1361 +
1362 + /*
1363 + * For HS200, CRC errors are not a reliable way to know the
1364 +@@ -1498,8 +1511,10 @@ static int mmc_select_hs200(struct mmc_card *card)
1365 + * mmc_select_timing() assumes timing has not changed if
1366 + * it is a switch error.
1367 + */
1368 +- if (err == -EBADMSG)
1369 ++ if (err == -EBADMSG) {
1370 ++ mmc_set_clock(host, old_clock);
1371 + mmc_set_timing(host, old_timing);
1372 ++ }
1373 + }
1374 + err:
1375 + if (err) {
1376 +diff --git a/drivers/mmc/host/rtsx_pci_sdmmc.c b/drivers/mmc/host/rtsx_pci_sdmmc.c
1377 +index f7c384db89bf3..e1580f78c6b2d 100644
1378 +--- a/drivers/mmc/host/rtsx_pci_sdmmc.c
1379 ++++ b/drivers/mmc/host/rtsx_pci_sdmmc.c
1380 +@@ -38,10 +38,7 @@ struct realtek_pci_sdmmc {
1381 + bool double_clk;
1382 + bool eject;
1383 + bool initial_mode;
1384 +- int power_state;
1385 +-#define SDMMC_POWER_ON 1
1386 +-#define SDMMC_POWER_OFF 0
1387 +-
1388 ++ int prev_power_state;
1389 + int sg_count;
1390 + s32 cookie;
1391 + int cookie_sg_count;
1392 +@@ -905,7 +902,7 @@ static int sd_set_bus_width(struct realtek_pci_sdmmc *host,
1393 + return err;
1394 + }
1395 +
1396 +-static int sd_power_on(struct realtek_pci_sdmmc *host)
1397 ++static int sd_power_on(struct realtek_pci_sdmmc *host, unsigned char power_mode)
1398 + {
1399 + struct rtsx_pcr *pcr = host->pcr;
1400 + struct mmc_host *mmc = host->mmc;
1401 +@@ -913,9 +910,14 @@ static int sd_power_on(struct realtek_pci_sdmmc *host)
1402 + u32 val;
1403 + u8 test_mode;
1404 +
1405 +- if (host->power_state == SDMMC_POWER_ON)
1406 ++ if (host->prev_power_state == MMC_POWER_ON)
1407 + return 0;
1408 +
1409 ++ if (host->prev_power_state == MMC_POWER_UP) {
1410 ++ rtsx_pci_write_register(pcr, SD_BUS_STAT, SD_CLK_TOGGLE_EN, 0);
1411 ++ goto finish;
1412 ++ }
1413 ++
1414 + msleep(100);
1415 +
1416 + rtsx_pci_init_cmd(pcr);
1417 +@@ -936,10 +938,15 @@ static int sd_power_on(struct realtek_pci_sdmmc *host)
1418 + if (err < 0)
1419 + return err;
1420 +
1421 ++ mdelay(1);
1422 ++
1423 + err = rtsx_pci_write_register(pcr, CARD_OE, SD_OUTPUT_EN, SD_OUTPUT_EN);
1424 + if (err < 0)
1425 + return err;
1426 +
1427 ++ /* send at least 74 clocks */
1428 ++ rtsx_pci_write_register(pcr, SD_BUS_STAT, SD_CLK_TOGGLE_EN, SD_CLK_TOGGLE_EN);
1429 ++
1430 + if (PCI_PID(pcr) == PID_5261) {
1431 + /*
1432 + * If test mode is set switch to SD Express mandatorily,
1433 +@@ -964,7 +971,8 @@ static int sd_power_on(struct realtek_pci_sdmmc *host)
1434 + }
1435 + }
1436 +
1437 +- host->power_state = SDMMC_POWER_ON;
1438 ++finish:
1439 ++ host->prev_power_state = power_mode;
1440 + return 0;
1441 + }
1442 +
1443 +@@ -973,7 +981,7 @@ static int sd_power_off(struct realtek_pci_sdmmc *host)
1444 + struct rtsx_pcr *pcr = host->pcr;
1445 + int err;
1446 +
1447 +- host->power_state = SDMMC_POWER_OFF;
1448 ++ host->prev_power_state = MMC_POWER_OFF;
1449 +
1450 + rtsx_pci_init_cmd(pcr);
1451 +
1452 +@@ -999,7 +1007,7 @@ static int sd_set_power_mode(struct realtek_pci_sdmmc *host,
1453 + if (power_mode == MMC_POWER_OFF)
1454 + err = sd_power_off(host);
1455 + else
1456 +- err = sd_power_on(host);
1457 ++ err = sd_power_on(host, power_mode);
1458 +
1459 + return err;
1460 + }
1461 +@@ -1482,10 +1490,11 @@ static int rtsx_pci_sdmmc_drv_probe(struct platform_device *pdev)
1462 +
1463 + host = mmc_priv(mmc);
1464 + host->pcr = pcr;
1465 ++ mmc->ios.power_delay_ms = 5;
1466 + host->mmc = mmc;
1467 + host->pdev = pdev;
1468 + host->cookie = -1;
1469 +- host->power_state = SDMMC_POWER_OFF;
1470 ++ host->prev_power_state = MMC_POWER_OFF;
1471 + INIT_WORK(&host->work, sd_request);
1472 + platform_set_drvdata(pdev, host);
1473 + pcr->slots[RTSX_SD_CARD].p_dev = pdev;
1474 +diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c
1475 +index 50c71e0ba5e4e..ff9f5b63c337e 100644
1476 +--- a/drivers/mmc/host/sdhci-msm.c
1477 ++++ b/drivers/mmc/host/sdhci-msm.c
1478 +@@ -17,6 +17,7 @@
1479 + #include <linux/regulator/consumer.h>
1480 + #include <linux/interconnect.h>
1481 + #include <linux/pinctrl/consumer.h>
1482 ++#include <linux/reset.h>
1483 +
1484 + #include "sdhci-pltfm.h"
1485 + #include "cqhci.h"
1486 +@@ -2482,6 +2483,43 @@ static inline void sdhci_msm_get_of_property(struct platform_device *pdev,
1487 + of_property_read_u32(node, "qcom,dll-config", &msm_host->dll_config);
1488 + }
1489 +
1490 ++static int sdhci_msm_gcc_reset(struct device *dev, struct sdhci_host *host)
1491 ++{
1492 ++ struct reset_control *reset;
1493 ++ int ret = 0;
1494 ++
1495 ++ reset = reset_control_get_optional_exclusive(dev, NULL);
1496 ++ if (IS_ERR(reset))
1497 ++ return dev_err_probe(dev, PTR_ERR(reset),
1498 ++ "unable to acquire core_reset\n");
1499 ++
1500 ++ if (!reset)
1501 ++ return ret;
1502 ++
1503 ++ ret = reset_control_assert(reset);
1504 ++ if (ret) {
1505 ++ reset_control_put(reset);
1506 ++ return dev_err_probe(dev, ret, "core_reset assert failed\n");
1507 ++ }
1508 ++
1509 ++ /*
1510 ++ * The hardware requirement for delay between assert/deassert
1511 ++ * is at least 3-4 sleep clock (32.7KHz) cycles, which comes to
1512 ++ * ~125us (4/32768). To be on the safe side add 200us delay.
1513 ++ */
1514 ++ usleep_range(200, 210);
1515 ++
1516 ++ ret = reset_control_deassert(reset);
1517 ++ if (ret) {
1518 ++ reset_control_put(reset);
1519 ++ return dev_err_probe(dev, ret, "core_reset deassert failed\n");
1520 ++ }
1521 ++
1522 ++ usleep_range(200, 210);
1523 ++ reset_control_put(reset);
1524 ++
1525 ++ return ret;
1526 ++}
1527 +
1528 + static int sdhci_msm_probe(struct platform_device *pdev)
1529 + {
1530 +@@ -2529,6 +2567,10 @@ static int sdhci_msm_probe(struct platform_device *pdev)
1531 +
1532 + msm_host->saved_tuning_phase = INVALID_TUNING_PHASE;
1533 +
1534 ++ ret = sdhci_msm_gcc_reset(&pdev->dev, host);
1535 ++ if (ret)
1536 ++ goto pltfm_free;
1537 ++
1538 + /* Setup SDCC bus voter clock. */
1539 + msm_host->bus_clk = devm_clk_get(&pdev->dev, "bus");
1540 + if (!IS_ERR(msm_host->bus_clk)) {
1541 +diff --git a/drivers/mmc/host/sunxi-mmc.c b/drivers/mmc/host/sunxi-mmc.c
1542 +index 2702736a1c57d..ce6cb8be654ef 100644
1543 +--- a/drivers/mmc/host/sunxi-mmc.c
1544 ++++ b/drivers/mmc/host/sunxi-mmc.c
1545 +@@ -377,8 +377,9 @@ static void sunxi_mmc_init_idma_des(struct sunxi_mmc_host *host,
1546 + pdes[i].buf_addr_ptr1 =
1547 + cpu_to_le32(sg_dma_address(&data->sg[i]) >>
1548 + host->cfg->idma_des_shift);
1549 +- pdes[i].buf_addr_ptr2 = cpu_to_le32((u32)next_desc >>
1550 +- host->cfg->idma_des_shift);
1551 ++ pdes[i].buf_addr_ptr2 =
1552 ++ cpu_to_le32(next_desc >>
1553 ++ host->cfg->idma_des_shift);
1554 + }
1555 +
1556 + pdes[0].config |= cpu_to_le32(SDXC_IDMAC_DES0_FD);
1557 +diff --git a/drivers/net/can/grcan.c b/drivers/net/can/grcan.c
1558 +index d0c5a7a60dafb..5215bd9b2c80d 100644
1559 +--- a/drivers/net/can/grcan.c
1560 ++++ b/drivers/net/can/grcan.c
1561 +@@ -241,13 +241,14 @@ struct grcan_device_config {
1562 + .rxsize = GRCAN_DEFAULT_BUFFER_SIZE, \
1563 + }
1564 +
1565 +-#define GRCAN_TXBUG_SAFE_GRLIB_VERSION 0x4100
1566 ++#define GRCAN_TXBUG_SAFE_GRLIB_VERSION 4100
1567 + #define GRLIB_VERSION_MASK 0xffff
1568 +
1569 + /* GRCAN private data structure */
1570 + struct grcan_priv {
1571 + struct can_priv can; /* must be the first member */
1572 + struct net_device *dev;
1573 ++ struct device *ofdev_dev;
1574 + struct napi_struct napi;
1575 +
1576 + struct grcan_registers __iomem *regs; /* ioremap'ed registers */
1577 +@@ -921,7 +922,7 @@ static void grcan_free_dma_buffers(struct net_device *dev)
1578 + struct grcan_priv *priv = netdev_priv(dev);
1579 + struct grcan_dma *dma = &priv->dma;
1580 +
1581 +- dma_free_coherent(&dev->dev, dma->base_size, dma->base_buf,
1582 ++ dma_free_coherent(priv->ofdev_dev, dma->base_size, dma->base_buf,
1583 + dma->base_handle);
1584 + memset(dma, 0, sizeof(*dma));
1585 + }
1586 +@@ -946,7 +947,7 @@ static int grcan_allocate_dma_buffers(struct net_device *dev,
1587 +
1588 + /* Extra GRCAN_BUFFER_ALIGNMENT to allow for alignment */
1589 + dma->base_size = lsize + ssize + GRCAN_BUFFER_ALIGNMENT;
1590 +- dma->base_buf = dma_alloc_coherent(&dev->dev,
1591 ++ dma->base_buf = dma_alloc_coherent(priv->ofdev_dev,
1592 + dma->base_size,
1593 + &dma->base_handle,
1594 + GFP_KERNEL);
1595 +@@ -1102,8 +1103,10 @@ static int grcan_close(struct net_device *dev)
1596 +
1597 + priv->closing = true;
1598 + if (priv->need_txbug_workaround) {
1599 ++ spin_unlock_irqrestore(&priv->lock, flags);
1600 + del_timer_sync(&priv->hang_timer);
1601 + del_timer_sync(&priv->rr_timer);
1602 ++ spin_lock_irqsave(&priv->lock, flags);
1603 + }
1604 + netif_stop_queue(dev);
1605 + grcan_stop_hardware(dev);
1606 +@@ -1122,7 +1125,7 @@ static int grcan_close(struct net_device *dev)
1607 + return 0;
1608 + }
1609 +
1610 +-static int grcan_transmit_catch_up(struct net_device *dev, int budget)
1611 ++static void grcan_transmit_catch_up(struct net_device *dev)
1612 + {
1613 + struct grcan_priv *priv = netdev_priv(dev);
1614 + unsigned long flags;
1615 +@@ -1130,7 +1133,7 @@ static int grcan_transmit_catch_up(struct net_device *dev, int budget)
1616 +
1617 + spin_lock_irqsave(&priv->lock, flags);
1618 +
1619 +- work_done = catch_up_echo_skb(dev, budget, true);
1620 ++ work_done = catch_up_echo_skb(dev, -1, true);
1621 + if (work_done) {
1622 + if (!priv->resetting && !priv->closing &&
1623 + !(priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY))
1624 +@@ -1144,8 +1147,6 @@ static int grcan_transmit_catch_up(struct net_device *dev, int budget)
1625 + }
1626 +
1627 + spin_unlock_irqrestore(&priv->lock, flags);
1628 +-
1629 +- return work_done;
1630 + }
1631 +
1632 + static int grcan_receive(struct net_device *dev, int budget)
1633 +@@ -1227,19 +1228,13 @@ static int grcan_poll(struct napi_struct *napi, int budget)
1634 + struct net_device *dev = priv->dev;
1635 + struct grcan_registers __iomem *regs = priv->regs;
1636 + unsigned long flags;
1637 +- int tx_work_done, rx_work_done;
1638 +- int rx_budget = budget / 2;
1639 +- int tx_budget = budget - rx_budget;
1640 ++ int work_done;
1641 +
1642 +- /* Half of the budget for receiving messages */
1643 +- rx_work_done = grcan_receive(dev, rx_budget);
1644 ++ work_done = grcan_receive(dev, budget);
1645 +
1646 +- /* Half of the budget for transmitting messages as that can trigger echo
1647 +- * frames being received
1648 +- */
1649 +- tx_work_done = grcan_transmit_catch_up(dev, tx_budget);
1650 ++ grcan_transmit_catch_up(dev);
1651 +
1652 +- if (rx_work_done < rx_budget && tx_work_done < tx_budget) {
1653 ++ if (work_done < budget) {
1654 + napi_complete(napi);
1655 +
1656 + /* Guarantee no interference with a running reset that otherwise
1657 +@@ -1256,7 +1251,7 @@ static int grcan_poll(struct napi_struct *napi, int budget)
1658 + spin_unlock_irqrestore(&priv->lock, flags);
1659 + }
1660 +
1661 +- return rx_work_done + tx_work_done;
1662 ++ return work_done;
1663 + }
1664 +
1665 + /* Work tx bug by waiting while for the risky situation to clear. If that fails,
1666 +@@ -1587,6 +1582,7 @@ static int grcan_setup_netdev(struct platform_device *ofdev,
1667 + memcpy(&priv->config, &grcan_module_config,
1668 + sizeof(struct grcan_device_config));
1669 + priv->dev = dev;
1670 ++ priv->ofdev_dev = &ofdev->dev;
1671 + priv->regs = base;
1672 + priv->can.bittiming_const = &grcan_bittiming_const;
1673 + priv->can.do_set_bittiming = grcan_set_bittiming;
1674 +@@ -1639,6 +1635,7 @@ exit_free_candev:
1675 + static int grcan_probe(struct platform_device *ofdev)
1676 + {
1677 + struct device_node *np = ofdev->dev.of_node;
1678 ++ struct device_node *sysid_parent;
1679 + u32 sysid, ambafreq;
1680 + int irq, err;
1681 + void __iomem *base;
1682 +@@ -1647,10 +1644,15 @@ static int grcan_probe(struct platform_device *ofdev)
1683 + /* Compare GRLIB version number with the first that does not
1684 + * have the tx bug (see start_xmit)
1685 + */
1686 +- err = of_property_read_u32(np, "systemid", &sysid);
1687 +- if (!err && ((sysid & GRLIB_VERSION_MASK)
1688 +- >= GRCAN_TXBUG_SAFE_GRLIB_VERSION))
1689 +- txbug = false;
1690 ++ sysid_parent = of_find_node_by_path("/ambapp0");
1691 ++ if (sysid_parent) {
1692 ++ of_node_get(sysid_parent);
1693 ++ err = of_property_read_u32(sysid_parent, "systemid", &sysid);
1694 ++ if (!err && ((sysid & GRLIB_VERSION_MASK) >=
1695 ++ GRCAN_TXBUG_SAFE_GRLIB_VERSION))
1696 ++ txbug = false;
1697 ++ of_node_put(sysid_parent);
1698 ++ }
1699 +
1700 + err = of_property_read_u32(np, "freq", &ambafreq);
1701 + if (err) {
1702 +diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c
1703 +index a251bc55727ff..fcdd022b24986 100644
1704 +--- a/drivers/net/dsa/mt7530.c
1705 ++++ b/drivers/net/dsa/mt7530.c
1706 +@@ -2224,6 +2224,7 @@ mt7530_setup(struct dsa_switch *ds)
1707 + ret = of_get_phy_mode(mac_np, &interface);
1708 + if (ret && ret != -ENODEV) {
1709 + of_node_put(mac_np);
1710 ++ of_node_put(phy_node);
1711 + return ret;
1712 + }
1713 + id = of_mdio_parse_addr(ds->dev, phy_node);
1714 +diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
1715 +index 6af0ae1d0c462..9167517de3d97 100644
1716 +--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
1717 ++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
1718 +@@ -2678,6 +2678,10 @@ static int bnxt_poll_p5(struct napi_struct *napi, int budget)
1719 + u32 idx = le32_to_cpu(nqcmp->cq_handle_low);
1720 + struct bnxt_cp_ring_info *cpr2;
1721 +
1722 ++ /* No more budget for RX work */
1723 ++ if (budget && work_done >= budget && idx == BNXT_RX_HDL)
1724 ++ break;
1725 ++
1726 + cpr2 = cpr->cp_ring_arr[idx];
1727 + work_done += __bnxt_poll_work(bp, cpr2,
1728 + budget - work_done);
1729 +@@ -10938,7 +10942,7 @@ static bool bnxt_rfs_capable(struct bnxt *bp)
1730 +
1731 + if (bp->flags & BNXT_FLAG_CHIP_P5)
1732 + return bnxt_rfs_supported(bp);
1733 +- if (!(bp->flags & BNXT_FLAG_MSIX_CAP) || !bnxt_can_reserve_rings(bp))
1734 ++ if (!(bp->flags & BNXT_FLAG_MSIX_CAP) || !bnxt_can_reserve_rings(bp) || !bp->rx_nr_rings)
1735 + return false;
1736 +
1737 + vnics = 1 + bp->rx_nr_rings;
1738 +@@ -13194,10 +13198,9 @@ static int bnxt_init_dflt_ring_mode(struct bnxt *bp)
1739 + goto init_dflt_ring_err;
1740 +
1741 + bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
1742 +- if (bnxt_rfs_supported(bp) && bnxt_rfs_capable(bp)) {
1743 +- bp->flags |= BNXT_FLAG_RFS;
1744 +- bp->dev->features |= NETIF_F_NTUPLE;
1745 +- }
1746 ++
1747 ++ bnxt_set_dflt_rfs(bp);
1748 ++
1749 + init_dflt_ring_err:
1750 + bnxt_ulp_irq_restart(bp, rc);
1751 + return rc;
1752 +diff --git a/drivers/net/ethernet/cavium/thunder/nic_main.c b/drivers/net/ethernet/cavium/thunder/nic_main.c
1753 +index f2f1ce81fd9cc..0ec65ec634df5 100644
1754 +--- a/drivers/net/ethernet/cavium/thunder/nic_main.c
1755 ++++ b/drivers/net/ethernet/cavium/thunder/nic_main.c
1756 +@@ -59,7 +59,7 @@ struct nicpf {
1757 +
1758 + /* MSI-X */
1759 + u8 num_vec;
1760 +- bool irq_allocated[NIC_PF_MSIX_VECTORS];
1761 ++ unsigned int irq_allocated[NIC_PF_MSIX_VECTORS];
1762 + char irq_name[NIC_PF_MSIX_VECTORS][20];
1763 + };
1764 +
1765 +@@ -1150,7 +1150,7 @@ static irqreturn_t nic_mbx_intr_handler(int irq, void *nic_irq)
1766 + u64 intr;
1767 + u8 vf;
1768 +
1769 +- if (irq == pci_irq_vector(nic->pdev, NIC_PF_INTR_ID_MBOX0))
1770 ++ if (irq == nic->irq_allocated[NIC_PF_INTR_ID_MBOX0])
1771 + mbx = 0;
1772 + else
1773 + mbx = 1;
1774 +@@ -1176,14 +1176,14 @@ static void nic_free_all_interrupts(struct nicpf *nic)
1775 +
1776 + for (irq = 0; irq < nic->num_vec; irq++) {
1777 + if (nic->irq_allocated[irq])
1778 +- free_irq(pci_irq_vector(nic->pdev, irq), nic);
1779 +- nic->irq_allocated[irq] = false;
1780 ++ free_irq(nic->irq_allocated[irq], nic);
1781 ++ nic->irq_allocated[irq] = 0;
1782 + }
1783 + }
1784 +
1785 + static int nic_register_interrupts(struct nicpf *nic)
1786 + {
1787 +- int i, ret;
1788 ++ int i, ret, irq;
1789 + nic->num_vec = pci_msix_vec_count(nic->pdev);
1790 +
1791 + /* Enable MSI-X */
1792 +@@ -1201,13 +1201,13 @@ static int nic_register_interrupts(struct nicpf *nic)
1793 + sprintf(nic->irq_name[i],
1794 + "NICPF Mbox%d", (i - NIC_PF_INTR_ID_MBOX0));
1795 +
1796 +- ret = request_irq(pci_irq_vector(nic->pdev, i),
1797 +- nic_mbx_intr_handler, 0,
1798 ++ irq = pci_irq_vector(nic->pdev, i);
1799 ++ ret = request_irq(irq, nic_mbx_intr_handler, 0,
1800 + nic->irq_name[i], nic);
1801 + if (ret)
1802 + goto fail;
1803 +
1804 +- nic->irq_allocated[i] = true;
1805 ++ nic->irq_allocated[i] = irq;
1806 + }
1807 +
1808 + /* Enable mailbox interrupt */
1809 +diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c
1810 +index 2d9b06d7caadb..f7dc7d825f637 100644
1811 +--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c
1812 ++++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c
1813 +@@ -771,7 +771,7 @@ struct hinic_hw_wqe *hinic_get_wqe(struct hinic_wq *wq, unsigned int wqe_size,
1814 + /* If we only have one page, still need to get shadown wqe when
1815 + * wqe rolling-over page
1816 + */
1817 +- if (curr_pg != end_pg || MASKED_WQE_IDX(wq, end_prod_idx) < *prod_idx) {
1818 ++ if (curr_pg != end_pg || end_prod_idx < *prod_idx) {
1819 + void *shadow_addr = &wq->shadow_wqe[curr_pg * wq->max_wqe_size];
1820 +
1821 + copy_wqe_to_shadow(wq, shadow_addr, num_wqebbs, *prod_idx);
1822 +@@ -841,7 +841,10 @@ struct hinic_hw_wqe *hinic_read_wqe(struct hinic_wq *wq, unsigned int wqe_size,
1823 +
1824 + *cons_idx = curr_cons_idx;
1825 +
1826 +- if (curr_pg != end_pg) {
1827 ++ /* If we only have one page, still need to get shadown wqe when
1828 ++ * wqe rolling-over page
1829 ++ */
1830 ++ if (curr_pg != end_pg || end_cons_idx < curr_cons_idx) {
1831 + void *shadow_addr = &wq->shadow_wqe[curr_pg * wq->max_wqe_size];
1832 +
1833 + copy_wqe_to_shadow(wq, shadow_addr, num_wqebbs, *cons_idx);
1834 +diff --git a/drivers/net/ethernet/mediatek/mtk_sgmii.c b/drivers/net/ethernet/mediatek/mtk_sgmii.c
1835 +index 32d83421226a2..5897940a418b6 100644
1836 +--- a/drivers/net/ethernet/mediatek/mtk_sgmii.c
1837 ++++ b/drivers/net/ethernet/mediatek/mtk_sgmii.c
1838 +@@ -26,6 +26,7 @@ int mtk_sgmii_init(struct mtk_sgmii *ss, struct device_node *r, u32 ana_rgc3)
1839 + break;
1840 +
1841 + ss->regmap[i] = syscon_node_to_regmap(np);
1842 ++ of_node_put(np);
1843 + if (IS_ERR(ss->regmap[i]))
1844 + return PTR_ERR(ss->regmap[i]);
1845 + }
1846 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/rsc_dump.c b/drivers/net/ethernet/mellanox/mlx5/core/diag/rsc_dump.c
1847 +index 538adab6878b5..c5b560a8b026e 100644
1848 +--- a/drivers/net/ethernet/mellanox/mlx5/core/diag/rsc_dump.c
1849 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/rsc_dump.c
1850 +@@ -31,6 +31,7 @@ static const char *const mlx5_rsc_sgmt_name[] = {
1851 + struct mlx5_rsc_dump {
1852 + u32 pdn;
1853 + u32 mkey;
1854 ++ u32 number_of_menu_items;
1855 + u16 fw_segment_type[MLX5_SGMT_TYPE_NUM];
1856 + };
1857 +
1858 +@@ -50,21 +51,37 @@ static int mlx5_rsc_dump_sgmt_get_by_name(char *name)
1859 + return -EINVAL;
1860 + }
1861 +
1862 +-static void mlx5_rsc_dump_read_menu_sgmt(struct mlx5_rsc_dump *rsc_dump, struct page *page)
1863 ++#define MLX5_RSC_DUMP_MENU_HEADER_SIZE (MLX5_ST_SZ_BYTES(resource_dump_info_segment) + \
1864 ++ MLX5_ST_SZ_BYTES(resource_dump_command_segment) + \
1865 ++ MLX5_ST_SZ_BYTES(resource_dump_menu_segment))
1866 ++
1867 ++static int mlx5_rsc_dump_read_menu_sgmt(struct mlx5_rsc_dump *rsc_dump, struct page *page,
1868 ++ int read_size, int start_idx)
1869 + {
1870 + void *data = page_address(page);
1871 + enum mlx5_sgmt_type sgmt_idx;
1872 + int num_of_items;
1873 + char *sgmt_name;
1874 + void *member;
1875 ++ int size = 0;
1876 + void *menu;
1877 + int i;
1878 +
1879 +- menu = MLX5_ADDR_OF(menu_resource_dump_response, data, menu);
1880 +- num_of_items = MLX5_GET(resource_dump_menu_segment, menu, num_of_records);
1881 ++ if (!start_idx) {
1882 ++ menu = MLX5_ADDR_OF(menu_resource_dump_response, data, menu);
1883 ++ rsc_dump->number_of_menu_items = MLX5_GET(resource_dump_menu_segment, menu,
1884 ++ num_of_records);
1885 ++ size = MLX5_RSC_DUMP_MENU_HEADER_SIZE;
1886 ++ data += size;
1887 ++ }
1888 ++ num_of_items = rsc_dump->number_of_menu_items;
1889 ++
1890 ++ for (i = 0; start_idx + i < num_of_items; i++) {
1891 ++ size += MLX5_ST_SZ_BYTES(resource_dump_menu_record);
1892 ++ if (size >= read_size)
1893 ++ return start_idx + i;
1894 +
1895 +- for (i = 0; i < num_of_items; i++) {
1896 +- member = MLX5_ADDR_OF(resource_dump_menu_segment, menu, record[i]);
1897 ++ member = data + MLX5_ST_SZ_BYTES(resource_dump_menu_record) * i;
1898 + sgmt_name = MLX5_ADDR_OF(resource_dump_menu_record, member, segment_name);
1899 + sgmt_idx = mlx5_rsc_dump_sgmt_get_by_name(sgmt_name);
1900 + if (sgmt_idx == -EINVAL)
1901 +@@ -72,6 +89,7 @@ static void mlx5_rsc_dump_read_menu_sgmt(struct mlx5_rsc_dump *rsc_dump, struct
1902 + rsc_dump->fw_segment_type[sgmt_idx] = MLX5_GET(resource_dump_menu_record,
1903 + member, segment_type);
1904 + }
1905 ++ return 0;
1906 + }
1907 +
1908 + static int mlx5_rsc_dump_trigger(struct mlx5_core_dev *dev, struct mlx5_rsc_dump_cmd *cmd,
1909 +@@ -168,6 +186,7 @@ static int mlx5_rsc_dump_menu(struct mlx5_core_dev *dev)
1910 + struct mlx5_rsc_dump_cmd *cmd = NULL;
1911 + struct mlx5_rsc_key key = {};
1912 + struct page *page;
1913 ++ int start_idx = 0;
1914 + int size;
1915 + int err;
1916 +
1917 +@@ -189,7 +208,7 @@ static int mlx5_rsc_dump_menu(struct mlx5_core_dev *dev)
1918 + if (err < 0)
1919 + goto destroy_cmd;
1920 +
1921 +- mlx5_rsc_dump_read_menu_sgmt(dev->rsc_dump, page);
1922 ++ start_idx = mlx5_rsc_dump_read_menu_sgmt(dev->rsc_dump, page, size, start_idx);
1923 +
1924 + } while (err > 0);
1925 +
1926 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c b/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c
1927 +index 673f1c82d3815..c9d5d8d93994d 100644
1928 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c
1929 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c
1930 +@@ -309,8 +309,8 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv,
1931 + if (err)
1932 + return err;
1933 +
1934 +- err = update_buffer_lossy(max_mtu, curr_pfc_en, prio2buffer, port_buff_cell_sz,
1935 +- xoff, &port_buffer, &update_buffer);
1936 ++ err = update_buffer_lossy(max_mtu, curr_pfc_en, prio2buffer, xoff,
1937 ++ port_buff_cell_sz, &port_buffer, &update_buffer);
1938 + if (err)
1939 + return err;
1940 + }
1941 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
1942 +index 4a0d38d219edc..9028e9958c72d 100644
1943 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
1944 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
1945 +@@ -1739,6 +1739,8 @@ mlx5_tc_ct_flush_ft_entry(void *ptr, void *arg)
1946 + static void
1947 + mlx5_tc_ct_del_ft_cb(struct mlx5_tc_ct_priv *ct_priv, struct mlx5_ct_ft *ft)
1948 + {
1949 ++ struct mlx5e_priv *priv;
1950 ++
1951 + if (!refcount_dec_and_test(&ft->refcount))
1952 + return;
1953 +
1954 +@@ -1748,6 +1750,8 @@ mlx5_tc_ct_del_ft_cb(struct mlx5_tc_ct_priv *ct_priv, struct mlx5_ct_ft *ft)
1955 + rhashtable_free_and_destroy(&ft->ct_entries_ht,
1956 + mlx5_tc_ct_flush_ft_entry,
1957 + ct_priv);
1958 ++ priv = netdev_priv(ct_priv->netdev);
1959 ++ flush_workqueue(priv->wq);
1960 + mlx5_tc_ct_free_pre_ct_tables(ft);
1961 + mapping_remove(ct_priv->zone_mapping, ft->zone_restore_id);
1962 + kfree(ft);
1963 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
1964 +index 378fc8e3bd975..d87bbb0be7c86 100644
1965 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
1966 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
1967 +@@ -713,6 +713,7 @@ int mlx5e_tc_tun_route_lookup(struct mlx5e_priv *priv,
1968 + struct net_device *filter_dev)
1969 + {
1970 + struct mlx5_esw_flow_attr *esw_attr = flow_attr->esw_attr;
1971 ++ struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
1972 + struct mlx5e_tc_int_port *int_port;
1973 + TC_TUN_ROUTE_ATTR_INIT(attr);
1974 + u16 vport_num;
1975 +@@ -747,7 +748,7 @@ int mlx5e_tc_tun_route_lookup(struct mlx5e_priv *priv,
1976 + esw_attr->rx_tun_attr->vni = MLX5_GET(fte_match_param, spec->match_value,
1977 + misc_parameters.vxlan_vni);
1978 + esw_attr->rx_tun_attr->decap_vport = vport_num;
1979 +- } else if (netif_is_ovs_master(attr.route_dev)) {
1980 ++ } else if (netif_is_ovs_master(attr.route_dev) && mlx5e_tc_int_port_supported(esw)) {
1981 + int_port = mlx5e_tc_int_port_get(mlx5e_get_int_port_priv(priv),
1982 + attr.route_dev->ifindex,
1983 + MLX5E_TC_INT_PORT_INGRESS);
1984 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
1985 +index a4c8d8d00d5a4..72e08559e0d05 100644
1986 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
1987 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
1988 +@@ -1198,6 +1198,16 @@ static int mlx5e_trust_initialize(struct mlx5e_priv *priv)
1989 + if (err)
1990 + return err;
1991 +
1992 ++ if (priv->dcbx_dp.trust_state == MLX5_QPTS_TRUST_PCP && priv->dcbx.dscp_app_cnt) {
1993 ++ /*
1994 ++ * Align the driver state with the register state.
1995 ++ * Temporary state change is required to enable the app list reset.
1996 ++ */
1997 ++ priv->dcbx_dp.trust_state = MLX5_QPTS_TRUST_DSCP;
1998 ++ mlx5e_dcbnl_delete_app(priv);
1999 ++ priv->dcbx_dp.trust_state = MLX5_QPTS_TRUST_PCP;
2000 ++ }
2001 ++
2002 + mlx5e_params_calc_trust_tx_min_inline_mode(priv->mdev, &priv->channels.params,
2003 + priv->dcbx_dp.trust_state);
2004 +
2005 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
2006 +index 7e5c00349ccf9..e0f45cef97c34 100644
2007 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
2008 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
2009 +@@ -2355,6 +2355,17 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
2010 + match.key->vlan_priority);
2011 +
2012 + *match_level = MLX5_MATCH_L2;
2013 ++
2014 ++ if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CVLAN) &&
2015 ++ match.mask->vlan_eth_type &&
2016 ++ MLX5_CAP_FLOWTABLE_TYPE(priv->mdev,
2017 ++ ft_field_support.outer_second_vid,
2018 ++ fs_type)) {
2019 ++ MLX5_SET(fte_match_set_misc, misc_c,
2020 ++ outer_second_cvlan_tag, 1);
2021 ++ spec->match_criteria_enable |=
2022 ++ MLX5_MATCH_MISC_PARAMETERS;
2023 ++ }
2024 + }
2025 + } else if (*match_level != MLX5_MATCH_NONE) {
2026 + /* cvlan_tag enabled in match criteria and
2027 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
2028 +index e7e7b4b0dcdb5..cebfa8565c9d9 100644
2029 +--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
2030 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
2031 +@@ -139,7 +139,7 @@ mlx5_eswitch_set_rule_source_port(struct mlx5_eswitch *esw,
2032 + if (mlx5_esw_indir_table_decap_vport(attr))
2033 + vport = mlx5_esw_indir_table_decap_vport(attr);
2034 +
2035 +- if (esw_attr->int_port)
2036 ++ if (attr && !attr->chain && esw_attr->int_port)
2037 + metadata =
2038 + mlx5e_tc_int_port_get_metadata_for_match(esw_attr->int_port);
2039 + else
2040 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
2041 +index 84dbe46d5ede6..862f5b7cb2106 100644
2042 +--- a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
2043 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
2044 +@@ -112,6 +112,28 @@ static void mlx5_fw_reset_complete_reload(struct mlx5_core_dev *dev)
2045 + }
2046 + }
2047 +
2048 ++static void mlx5_stop_sync_reset_poll(struct mlx5_core_dev *dev)
2049 ++{
2050 ++ struct mlx5_fw_reset *fw_reset = dev->priv.fw_reset;
2051 ++
2052 ++ del_timer_sync(&fw_reset->timer);
2053 ++}
2054 ++
2055 ++static int mlx5_sync_reset_clear_reset_requested(struct mlx5_core_dev *dev, bool poll_health)
2056 ++{
2057 ++ struct mlx5_fw_reset *fw_reset = dev->priv.fw_reset;
2058 ++
2059 ++ if (!test_and_clear_bit(MLX5_FW_RESET_FLAGS_RESET_REQUESTED, &fw_reset->reset_flags)) {
2060 ++ mlx5_core_warn(dev, "Reset request was already cleared\n");
2061 ++ return -EALREADY;
2062 ++ }
2063 ++
2064 ++ mlx5_stop_sync_reset_poll(dev);
2065 ++ if (poll_health)
2066 ++ mlx5_start_health_poll(dev);
2067 ++ return 0;
2068 ++}
2069 ++
2070 + static void mlx5_sync_reset_reload_work(struct work_struct *work)
2071 + {
2072 + struct mlx5_fw_reset *fw_reset = container_of(work, struct mlx5_fw_reset,
2073 +@@ -119,6 +141,7 @@ static void mlx5_sync_reset_reload_work(struct work_struct *work)
2074 + struct mlx5_core_dev *dev = fw_reset->dev;
2075 + int err;
2076 +
2077 ++ mlx5_sync_reset_clear_reset_requested(dev, false);
2078 + mlx5_enter_error_state(dev, true);
2079 + mlx5_unload_one(dev);
2080 + err = mlx5_health_wait_pci_up(dev);
2081 +@@ -128,23 +151,6 @@ static void mlx5_sync_reset_reload_work(struct work_struct *work)
2082 + mlx5_fw_reset_complete_reload(dev);
2083 + }
2084 +
2085 +-static void mlx5_stop_sync_reset_poll(struct mlx5_core_dev *dev)
2086 +-{
2087 +- struct mlx5_fw_reset *fw_reset = dev->priv.fw_reset;
2088 +-
2089 +- del_timer_sync(&fw_reset->timer);
2090 +-}
2091 +-
2092 +-static void mlx5_sync_reset_clear_reset_requested(struct mlx5_core_dev *dev, bool poll_health)
2093 +-{
2094 +- struct mlx5_fw_reset *fw_reset = dev->priv.fw_reset;
2095 +-
2096 +- mlx5_stop_sync_reset_poll(dev);
2097 +- clear_bit(MLX5_FW_RESET_FLAGS_RESET_REQUESTED, &fw_reset->reset_flags);
2098 +- if (poll_health)
2099 +- mlx5_start_health_poll(dev);
2100 +-}
2101 +-
2102 + #define MLX5_RESET_POLL_INTERVAL (HZ / 10)
2103 + static void poll_sync_reset(struct timer_list *t)
2104 + {
2105 +@@ -159,7 +165,6 @@ static void poll_sync_reset(struct timer_list *t)
2106 +
2107 + if (fatal_error) {
2108 + mlx5_core_warn(dev, "Got Device Reset\n");
2109 +- mlx5_sync_reset_clear_reset_requested(dev, false);
2110 + queue_work(fw_reset->wq, &fw_reset->reset_reload_work);
2111 + return;
2112 + }
2113 +@@ -186,13 +191,17 @@ static int mlx5_fw_reset_set_reset_sync_nack(struct mlx5_core_dev *dev)
2114 + return mlx5_reg_mfrl_set(dev, MLX5_MFRL_REG_RESET_LEVEL3, 0, 2, false);
2115 + }
2116 +
2117 +-static void mlx5_sync_reset_set_reset_requested(struct mlx5_core_dev *dev)
2118 ++static int mlx5_sync_reset_set_reset_requested(struct mlx5_core_dev *dev)
2119 + {
2120 + struct mlx5_fw_reset *fw_reset = dev->priv.fw_reset;
2121 +
2122 ++ if (test_and_set_bit(MLX5_FW_RESET_FLAGS_RESET_REQUESTED, &fw_reset->reset_flags)) {
2123 ++ mlx5_core_warn(dev, "Reset request was already set\n");
2124 ++ return -EALREADY;
2125 ++ }
2126 + mlx5_stop_health_poll(dev, true);
2127 +- set_bit(MLX5_FW_RESET_FLAGS_RESET_REQUESTED, &fw_reset->reset_flags);
2128 + mlx5_start_sync_reset_poll(dev);
2129 ++ return 0;
2130 + }
2131 +
2132 + static void mlx5_fw_live_patch_event(struct work_struct *work)
2133 +@@ -221,7 +230,9 @@ static void mlx5_sync_reset_request_event(struct work_struct *work)
2134 + err ? "Failed" : "Sent");
2135 + return;
2136 + }
2137 +- mlx5_sync_reset_set_reset_requested(dev);
2138 ++ if (mlx5_sync_reset_set_reset_requested(dev))
2139 ++ return;
2140 ++
2141 + err = mlx5_fw_reset_set_reset_sync_ack(dev);
2142 + if (err)
2143 + mlx5_core_warn(dev, "PCI Sync FW Update Reset Ack Failed. Error code: %d\n", err);
2144 +@@ -319,7 +330,8 @@ static void mlx5_sync_reset_now_event(struct work_struct *work)
2145 + struct mlx5_core_dev *dev = fw_reset->dev;
2146 + int err;
2147 +
2148 +- mlx5_sync_reset_clear_reset_requested(dev, false);
2149 ++ if (mlx5_sync_reset_clear_reset_requested(dev, false))
2150 ++ return;
2151 +
2152 + mlx5_core_warn(dev, "Sync Reset now. Device is going to reset.\n");
2153 +
2154 +@@ -348,10 +360,8 @@ static void mlx5_sync_reset_abort_event(struct work_struct *work)
2155 + reset_abort_work);
2156 + struct mlx5_core_dev *dev = fw_reset->dev;
2157 +
2158 +- if (!test_bit(MLX5_FW_RESET_FLAGS_RESET_REQUESTED, &fw_reset->reset_flags))
2159 ++ if (mlx5_sync_reset_clear_reset_requested(dev, true))
2160 + return;
2161 +-
2162 +- mlx5_sync_reset_clear_reset_requested(dev, true);
2163 + mlx5_core_warn(dev, "PCI Sync FW Update Reset Aborted.\n");
2164 + }
2165 +
2166 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/mp.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/mp.c
2167 +index 626aa60b6099b..7da710951572d 100644
2168 +--- a/drivers/net/ethernet/mellanox/mlx5/core/lag/mp.c
2169 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/mp.c
2170 +@@ -100,6 +100,14 @@ static void mlx5_lag_fib_event_flush(struct notifier_block *nb)
2171 + flush_workqueue(mp->wq);
2172 + }
2173 +
2174 ++static void mlx5_lag_fib_set(struct lag_mp *mp, struct fib_info *fi, u32 dst, int dst_len)
2175 ++{
2176 ++ mp->fib.mfi = fi;
2177 ++ mp->fib.priority = fi->fib_priority;
2178 ++ mp->fib.dst = dst;
2179 ++ mp->fib.dst_len = dst_len;
2180 ++}
2181 ++
2182 + struct mlx5_fib_event_work {
2183 + struct work_struct work;
2184 + struct mlx5_lag *ldev;
2185 +@@ -110,10 +118,10 @@ struct mlx5_fib_event_work {
2186 + };
2187 + };
2188 +
2189 +-static void mlx5_lag_fib_route_event(struct mlx5_lag *ldev,
2190 +- unsigned long event,
2191 +- struct fib_info *fi)
2192 ++static void mlx5_lag_fib_route_event(struct mlx5_lag *ldev, unsigned long event,
2193 ++ struct fib_entry_notifier_info *fen_info)
2194 + {
2195 ++ struct fib_info *fi = fen_info->fi;
2196 + struct lag_mp *mp = &ldev->lag_mp;
2197 + struct fib_nh *fib_nh0, *fib_nh1;
2198 + unsigned int nhs;
2199 +@@ -121,13 +129,15 @@ static void mlx5_lag_fib_route_event(struct mlx5_lag *ldev,
2200 + /* Handle delete event */
2201 + if (event == FIB_EVENT_ENTRY_DEL) {
2202 + /* stop track */
2203 +- if (mp->mfi == fi)
2204 +- mp->mfi = NULL;
2205 ++ if (mp->fib.mfi == fi)
2206 ++ mp->fib.mfi = NULL;
2207 + return;
2208 + }
2209 +
2210 + /* Handle multipath entry with lower priority value */
2211 +- if (mp->mfi && mp->mfi != fi && fi->fib_priority >= mp->mfi->fib_priority)
2212 ++ if (mp->fib.mfi && mp->fib.mfi != fi &&
2213 ++ (mp->fib.dst != fen_info->dst || mp->fib.dst_len != fen_info->dst_len) &&
2214 ++ fi->fib_priority >= mp->fib.priority)
2215 + return;
2216 +
2217 + /* Handle add/replace event */
2218 +@@ -143,9 +153,9 @@ static void mlx5_lag_fib_route_event(struct mlx5_lag *ldev,
2219 +
2220 + i++;
2221 + mlx5_lag_set_port_affinity(ldev, i);
2222 ++ mlx5_lag_fib_set(mp, fi, fen_info->dst, fen_info->dst_len);
2223 + }
2224 +
2225 +- mp->mfi = fi;
2226 + return;
2227 + }
2228 +
2229 +@@ -165,7 +175,7 @@ static void mlx5_lag_fib_route_event(struct mlx5_lag *ldev,
2230 + }
2231 +
2232 + /* First time we see multipath route */
2233 +- if (!mp->mfi && !__mlx5_lag_is_active(ldev)) {
2234 ++ if (!mp->fib.mfi && !__mlx5_lag_is_active(ldev)) {
2235 + struct lag_tracker tracker;
2236 +
2237 + tracker = ldev->tracker;
2238 +@@ -173,7 +183,7 @@ static void mlx5_lag_fib_route_event(struct mlx5_lag *ldev,
2239 + }
2240 +
2241 + mlx5_lag_set_port_affinity(ldev, MLX5_LAG_NORMAL_AFFINITY);
2242 +- mp->mfi = fi;
2243 ++ mlx5_lag_fib_set(mp, fi, fen_info->dst, fen_info->dst_len);
2244 + }
2245 +
2246 + static void mlx5_lag_fib_nexthop_event(struct mlx5_lag *ldev,
2247 +@@ -184,7 +194,7 @@ static void mlx5_lag_fib_nexthop_event(struct mlx5_lag *ldev,
2248 + struct lag_mp *mp = &ldev->lag_mp;
2249 +
2250 + /* Check the nh event is related to the route */
2251 +- if (!mp->mfi || mp->mfi != fi)
2252 ++ if (!mp->fib.mfi || mp->fib.mfi != fi)
2253 + return;
2254 +
2255 + /* nh added/removed */
2256 +@@ -214,7 +224,7 @@ static void mlx5_lag_fib_update(struct work_struct *work)
2257 + case FIB_EVENT_ENTRY_REPLACE:
2258 + case FIB_EVENT_ENTRY_DEL:
2259 + mlx5_lag_fib_route_event(ldev, fib_work->event,
2260 +- fib_work->fen_info.fi);
2261 ++ &fib_work->fen_info);
2262 + fib_info_put(fib_work->fen_info.fi);
2263 + break;
2264 + case FIB_EVENT_NH_ADD:
2265 +@@ -313,7 +323,7 @@ void mlx5_lag_mp_reset(struct mlx5_lag *ldev)
2266 + /* Clear mfi, as it might become stale when a route delete event
2267 + * has been missed, see mlx5_lag_fib_route_event().
2268 + */
2269 +- ldev->lag_mp.mfi = NULL;
2270 ++ ldev->lag_mp.fib.mfi = NULL;
2271 + }
2272 +
2273 + int mlx5_lag_mp_init(struct mlx5_lag *ldev)
2274 +@@ -324,7 +334,7 @@ int mlx5_lag_mp_init(struct mlx5_lag *ldev)
2275 + /* always clear mfi, as it might become stale when a route delete event
2276 + * has been missed
2277 + */
2278 +- mp->mfi = NULL;
2279 ++ mp->fib.mfi = NULL;
2280 +
2281 + if (mp->fib_nb.notifier_call)
2282 + return 0;
2283 +@@ -354,5 +364,5 @@ void mlx5_lag_mp_cleanup(struct mlx5_lag *ldev)
2284 + unregister_fib_notifier(&init_net, &mp->fib_nb);
2285 + destroy_workqueue(mp->wq);
2286 + mp->fib_nb.notifier_call = NULL;
2287 +- mp->mfi = NULL;
2288 ++ mp->fib.mfi = NULL;
2289 + }
2290 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/mp.h b/drivers/net/ethernet/mellanox/mlx5/core/lag/mp.h
2291 +index 57af962cad298..056a066da604b 100644
2292 +--- a/drivers/net/ethernet/mellanox/mlx5/core/lag/mp.h
2293 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/mp.h
2294 +@@ -15,7 +15,12 @@ enum mlx5_lag_port_affinity {
2295 +
2296 + struct lag_mp {
2297 + struct notifier_block fib_nb;
2298 +- struct fib_info *mfi; /* used in tracking fib events */
2299 ++ struct {
2300 ++ const void *mfi; /* used in tracking fib events */
2301 ++ u32 priority;
2302 ++ u32 dst;
2303 ++ int dst_len;
2304 ++ } fib;
2305 + struct workqueue_struct *wq;
2306 + };
2307 +
2308 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c
2309 +index a6592f9c3c05f..5be322528279a 100644
2310 +--- a/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c
2311 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c
2312 +@@ -505,7 +505,7 @@ static int mlx5_lag_create_inner_ttc_table(struct mlx5_lag *ldev)
2313 + struct ttc_params ttc_params = {};
2314 +
2315 + mlx5_lag_set_inner_ttc_params(ldev, &ttc_params);
2316 +- port_sel->inner.ttc = mlx5_create_ttc_table(dev, &ttc_params);
2317 ++ port_sel->inner.ttc = mlx5_create_inner_ttc_table(dev, &ttc_params);
2318 + if (IS_ERR(port_sel->inner.ttc))
2319 + return PTR_ERR(port_sel->inner.ttc);
2320 +
2321 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_ttc.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_ttc.c
2322 +index b63dec24747ab..b78f2ba25c19b 100644
2323 +--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_ttc.c
2324 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_ttc.c
2325 +@@ -408,6 +408,8 @@ static int mlx5_generate_inner_ttc_table_rules(struct mlx5_core_dev *dev,
2326 + for (tt = 0; tt < MLX5_NUM_TT; tt++) {
2327 + struct mlx5_ttc_rule *rule = &rules[tt];
2328 +
2329 ++ if (test_bit(tt, params->ignore_dests))
2330 ++ continue;
2331 + rule->rule = mlx5_generate_inner_ttc_rule(dev, ft,
2332 + &params->dests[tt],
2333 + ttc_rules[tt].etype,
2334 +diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c
2335 +index 7a50ba00f8ae3..c854efdf1f25f 100644
2336 +--- a/drivers/net/ethernet/smsc/smsc911x.c
2337 ++++ b/drivers/net/ethernet/smsc/smsc911x.c
2338 +@@ -2431,7 +2431,7 @@ static int smsc911x_drv_probe(struct platform_device *pdev)
2339 + if (irq == -EPROBE_DEFER) {
2340 + retval = -EPROBE_DEFER;
2341 + goto out_0;
2342 +- } else if (irq <= 0) {
2343 ++ } else if (irq < 0) {
2344 + pr_warn("Could not allocate irq resource\n");
2345 + retval = -ENODEV;
2346 + goto out_0;
2347 +diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c
2348 +index 8e8778cfbbadd..6f87e296a410f 100644
2349 +--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c
2350 ++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c
2351 +@@ -454,6 +454,7 @@ static int intel_mgbe_common_data(struct pci_dev *pdev,
2352 + plat->has_gmac4 = 1;
2353 + plat->force_sf_dma_mode = 0;
2354 + plat->tso_en = 1;
2355 ++ plat->sph_disable = 1;
2356 +
2357 + /* Multiplying factor to the clk_eee_i clock time
2358 + * period to make it closer to 100 ns. This value
2359 +diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
2360 +index 09644ab0d87a7..fda53b4b9406f 100644
2361 +--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
2362 ++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
2363 +@@ -916,6 +916,7 @@ static int sun8i_dwmac_register_mdio_mux(struct stmmac_priv *priv)
2364 +
2365 + ret = mdio_mux_init(priv->device, mdio_mux, mdio_mux_syscon_switch_fn,
2366 + &gmac->mux_handle, priv, priv->mii);
2367 ++ of_node_put(mdio_mux);
2368 + return ret;
2369 + }
2370 +
2371 +diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
2372 +index 422e3225f476a..fb115273f5533 100644
2373 +--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
2374 ++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
2375 +@@ -7077,7 +7077,7 @@ int stmmac_dvr_probe(struct device *device,
2376 + dev_info(priv->device, "TSO feature enabled\n");
2377 + }
2378 +
2379 +- if (priv->dma_cap.sphen) {
2380 ++ if (priv->dma_cap.sphen && !priv->plat->sph_disable) {
2381 + ndev->hw_features |= NETIF_F_GRO;
2382 + priv->sph_cap = true;
2383 + priv->sph = priv->sph_cap;
2384 +diff --git a/drivers/net/ethernet/ti/cpsw_new.c b/drivers/net/ethernet/ti/cpsw_new.c
2385 +index bd4b1528cf992..79e850fe4621c 100644
2386 +--- a/drivers/net/ethernet/ti/cpsw_new.c
2387 ++++ b/drivers/net/ethernet/ti/cpsw_new.c
2388 +@@ -1246,8 +1246,10 @@ static int cpsw_probe_dt(struct cpsw_common *cpsw)
2389 + data->slave_data = devm_kcalloc(dev, CPSW_SLAVE_PORTS_NUM,
2390 + sizeof(struct cpsw_slave_data),
2391 + GFP_KERNEL);
2392 +- if (!data->slave_data)
2393 ++ if (!data->slave_data) {
2394 ++ of_node_put(tmp_node);
2395 + return -ENOMEM;
2396 ++ }
2397 +
2398 + /* Populate all the child nodes here...
2399 + */
2400 +@@ -1341,6 +1343,7 @@ static int cpsw_probe_dt(struct cpsw_common *cpsw)
2401 +
2402 + err_node_put:
2403 + of_node_put(port_np);
2404 ++ of_node_put(tmp_node);
2405 + return ret;
2406 + }
2407 +
2408 +diff --git a/drivers/net/ethernet/xilinx/xilinx_emaclite.c b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
2409 +index 77fa2cb03acaa..08a670bf2cd19 100644
2410 +--- a/drivers/net/ethernet/xilinx/xilinx_emaclite.c
2411 ++++ b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
2412 +@@ -823,10 +823,10 @@ static int xemaclite_mdio_write(struct mii_bus *bus, int phy_id, int reg,
2413 + static int xemaclite_mdio_setup(struct net_local *lp, struct device *dev)
2414 + {
2415 + struct mii_bus *bus;
2416 +- int rc;
2417 + struct resource res;
2418 + struct device_node *np = of_get_parent(lp->phy_node);
2419 + struct device_node *npp;
2420 ++ int rc, ret;
2421 +
2422 + /* Don't register the MDIO bus if the phy_node or its parent node
2423 + * can't be found.
2424 +@@ -836,8 +836,14 @@ static int xemaclite_mdio_setup(struct net_local *lp, struct device *dev)
2425 + return -ENODEV;
2426 + }
2427 + npp = of_get_parent(np);
2428 +-
2429 +- of_address_to_resource(npp, 0, &res);
2430 ++ ret = of_address_to_resource(npp, 0, &res);
2431 ++ of_node_put(npp);
2432 ++ if (ret) {
2433 ++ dev_err(dev, "%s resource error!\n",
2434 ++ dev->of_node->full_name);
2435 ++ of_node_put(np);
2436 ++ return ret;
2437 ++ }
2438 + if (lp->ndev->mem_start != res.start) {
2439 + struct phy_device *phydev;
2440 + phydev = of_phy_find_device(lp->phy_node);
2441 +@@ -846,6 +852,7 @@ static int xemaclite_mdio_setup(struct net_local *lp, struct device *dev)
2442 + "MDIO of the phy is not registered yet\n");
2443 + else
2444 + put_device(&phydev->mdio.dev);
2445 ++ of_node_put(np);
2446 + return 0;
2447 + }
2448 +
2449 +@@ -858,6 +865,7 @@ static int xemaclite_mdio_setup(struct net_local *lp, struct device *dev)
2450 + bus = mdiobus_alloc();
2451 + if (!bus) {
2452 + dev_err(dev, "Failed to allocate mdiobus\n");
2453 ++ of_node_put(np);
2454 + return -ENOMEM;
2455 + }
2456 +
2457 +@@ -870,6 +878,7 @@ static int xemaclite_mdio_setup(struct net_local *lp, struct device *dev)
2458 + bus->parent = dev;
2459 +
2460 + rc = of_mdiobus_register(bus, np);
2461 ++ of_node_put(np);
2462 + if (rc) {
2463 + dev_err(dev, "Failed to register mdio bus.\n");
2464 + goto err_register;
2465 +diff --git a/drivers/net/mdio/mdio-mux-bcm6368.c b/drivers/net/mdio/mdio-mux-bcm6368.c
2466 +index 6dcbf987d61b5..8b444a8eb6b55 100644
2467 +--- a/drivers/net/mdio/mdio-mux-bcm6368.c
2468 ++++ b/drivers/net/mdio/mdio-mux-bcm6368.c
2469 +@@ -115,7 +115,7 @@ static int bcm6368_mdiomux_probe(struct platform_device *pdev)
2470 + md->mii_bus = devm_mdiobus_alloc(&pdev->dev);
2471 + if (!md->mii_bus) {
2472 + dev_err(&pdev->dev, "mdiomux bus alloc failed\n");
2473 +- return ENOMEM;
2474 ++ return -ENOMEM;
2475 + }
2476 +
2477 + bus = md->mii_bus;
2478 +diff --git a/drivers/nfc/nfcmrvl/main.c b/drivers/nfc/nfcmrvl/main.c
2479 +index 2fcf545012b16..1a5284de4341b 100644
2480 +--- a/drivers/nfc/nfcmrvl/main.c
2481 ++++ b/drivers/nfc/nfcmrvl/main.c
2482 +@@ -183,6 +183,7 @@ void nfcmrvl_nci_unregister_dev(struct nfcmrvl_private *priv)
2483 + {
2484 + struct nci_dev *ndev = priv->ndev;
2485 +
2486 ++ nci_unregister_device(ndev);
2487 + if (priv->ndev->nfc_dev->fw_download_in_progress)
2488 + nfcmrvl_fw_dnld_abort(priv);
2489 +
2490 +@@ -191,7 +192,6 @@ void nfcmrvl_nci_unregister_dev(struct nfcmrvl_private *priv)
2491 + if (gpio_is_valid(priv->config.reset_n_io))
2492 + gpio_free(priv->config.reset_n_io);
2493 +
2494 +- nci_unregister_device(ndev);
2495 + nci_free_device(ndev);
2496 + kfree(priv);
2497 + }
2498 +diff --git a/drivers/pci/controller/pci-aardvark.c b/drivers/pci/controller/pci-aardvark.c
2499 +index 15348be1a8aa5..5be382b19d9a7 100644
2500 +--- a/drivers/pci/controller/pci-aardvark.c
2501 ++++ b/drivers/pci/controller/pci-aardvark.c
2502 +@@ -38,10 +38,6 @@
2503 + #define PCIE_CORE_ERR_CAPCTL_ECRC_CHK_TX_EN BIT(6)
2504 + #define PCIE_CORE_ERR_CAPCTL_ECRC_CHCK BIT(7)
2505 + #define PCIE_CORE_ERR_CAPCTL_ECRC_CHCK_RCV BIT(8)
2506 +-#define PCIE_CORE_INT_A_ASSERT_ENABLE 1
2507 +-#define PCIE_CORE_INT_B_ASSERT_ENABLE 2
2508 +-#define PCIE_CORE_INT_C_ASSERT_ENABLE 3
2509 +-#define PCIE_CORE_INT_D_ASSERT_ENABLE 4
2510 + /* PIO registers base address and register offsets */
2511 + #define PIO_BASE_ADDR 0x4000
2512 + #define PIO_CTRL (PIO_BASE_ADDR + 0x0)
2513 +@@ -102,6 +98,10 @@
2514 + #define PCIE_MSG_PM_PME_MASK BIT(7)
2515 + #define PCIE_ISR0_MASK_REG (CONTROL_BASE_ADDR + 0x44)
2516 + #define PCIE_ISR0_MSI_INT_PENDING BIT(24)
2517 ++#define PCIE_ISR0_CORR_ERR BIT(11)
2518 ++#define PCIE_ISR0_NFAT_ERR BIT(12)
2519 ++#define PCIE_ISR0_FAT_ERR BIT(13)
2520 ++#define PCIE_ISR0_ERR_MASK GENMASK(13, 11)
2521 + #define PCIE_ISR0_INTX_ASSERT(val) BIT(16 + (val))
2522 + #define PCIE_ISR0_INTX_DEASSERT(val) BIT(20 + (val))
2523 + #define PCIE_ISR0_ALL_MASK GENMASK(31, 0)
2524 +@@ -272,17 +272,16 @@ struct advk_pcie {
2525 + u32 actions;
2526 + } wins[OB_WIN_COUNT];
2527 + u8 wins_count;
2528 ++ int irq;
2529 ++ struct irq_domain *rp_irq_domain;
2530 + struct irq_domain *irq_domain;
2531 + struct irq_chip irq_chip;
2532 + raw_spinlock_t irq_lock;
2533 + struct irq_domain *msi_domain;
2534 + struct irq_domain *msi_inner_domain;
2535 +- struct irq_chip msi_bottom_irq_chip;
2536 +- struct irq_chip msi_irq_chip;
2537 +- struct msi_domain_info msi_domain_info;
2538 ++ raw_spinlock_t msi_irq_lock;
2539 + DECLARE_BITMAP(msi_used, MSI_IRQ_NUM);
2540 + struct mutex msi_used_lock;
2541 +- u16 msi_msg;
2542 + int link_gen;
2543 + struct pci_bridge_emul bridge;
2544 + struct gpio_desc *reset_gpio;
2545 +@@ -477,6 +476,7 @@ static void advk_pcie_disable_ob_win(struct advk_pcie *pcie, u8 win_num)
2546 +
2547 + static void advk_pcie_setup_hw(struct advk_pcie *pcie)
2548 + {
2549 ++ phys_addr_t msi_addr;
2550 + u32 reg;
2551 + int i;
2552 +
2553 +@@ -565,6 +565,11 @@ static void advk_pcie_setup_hw(struct advk_pcie *pcie)
2554 + reg |= LANE_COUNT_1;
2555 + advk_writel(pcie, reg, PCIE_CORE_CTRL0_REG);
2556 +
2557 ++ /* Set MSI address */
2558 ++ msi_addr = virt_to_phys(pcie);
2559 ++ advk_writel(pcie, lower_32_bits(msi_addr), PCIE_MSI_ADDR_LOW_REG);
2560 ++ advk_writel(pcie, upper_32_bits(msi_addr), PCIE_MSI_ADDR_HIGH_REG);
2561 ++
2562 + /* Enable MSI */
2563 + reg = advk_readl(pcie, PCIE_CORE_CTRL2_REG);
2564 + reg |= PCIE_CORE_CTRL2_MSI_ENABLE;
2565 +@@ -576,15 +581,20 @@ static void advk_pcie_setup_hw(struct advk_pcie *pcie)
2566 + advk_writel(pcie, PCIE_ISR1_ALL_MASK, PCIE_ISR1_REG);
2567 + advk_writel(pcie, PCIE_IRQ_ALL_MASK, HOST_CTRL_INT_STATUS_REG);
2568 +
2569 +- /* Disable All ISR0/1 Sources */
2570 +- reg = PCIE_ISR0_ALL_MASK;
2571 ++ /* Disable All ISR0/1 and MSI Sources */
2572 ++ advk_writel(pcie, PCIE_ISR0_ALL_MASK, PCIE_ISR0_MASK_REG);
2573 ++ advk_writel(pcie, PCIE_ISR1_ALL_MASK, PCIE_ISR1_MASK_REG);
2574 ++ advk_writel(pcie, PCIE_MSI_ALL_MASK, PCIE_MSI_MASK_REG);
2575 ++
2576 ++ /* Unmask summary MSI interrupt */
2577 ++ reg = advk_readl(pcie, PCIE_ISR0_MASK_REG);
2578 + reg &= ~PCIE_ISR0_MSI_INT_PENDING;
2579 + advk_writel(pcie, reg, PCIE_ISR0_MASK_REG);
2580 +
2581 +- advk_writel(pcie, PCIE_ISR1_ALL_MASK, PCIE_ISR1_MASK_REG);
2582 +-
2583 +- /* Unmask all MSIs */
2584 +- advk_writel(pcie, ~(u32)PCIE_MSI_ALL_MASK, PCIE_MSI_MASK_REG);
2585 ++ /* Unmask PME interrupt for processing of PME requester */
2586 ++ reg = advk_readl(pcie, PCIE_ISR0_MASK_REG);
2587 ++ reg &= ~PCIE_MSG_PM_PME_MASK;
2588 ++ advk_writel(pcie, reg, PCIE_ISR0_MASK_REG);
2589 +
2590 + /* Enable summary interrupt for GIC SPI source */
2591 + reg = PCIE_IRQ_ALL_MASK & (~PCIE_IRQ_ENABLE_INTS_MASK);
2592 +@@ -778,11 +788,15 @@ advk_pci_bridge_emul_base_conf_read(struct pci_bridge_emul *bridge,
2593 + case PCI_INTERRUPT_LINE: {
2594 + /*
2595 + * From the whole 32bit register we support reading from HW only
2596 +- * one bit: PCI_BRIDGE_CTL_BUS_RESET.
2597 ++ * two bits: PCI_BRIDGE_CTL_BUS_RESET and PCI_BRIDGE_CTL_SERR.
2598 + * Other bits are retrieved only from emulated config buffer.
2599 + */
2600 + __le32 *cfgspace = (__le32 *)&bridge->conf;
2601 + u32 val = le32_to_cpu(cfgspace[PCI_INTERRUPT_LINE / 4]);
2602 ++ if (advk_readl(pcie, PCIE_ISR0_MASK_REG) & PCIE_ISR0_ERR_MASK)
2603 ++ val &= ~(PCI_BRIDGE_CTL_SERR << 16);
2604 ++ else
2605 ++ val |= PCI_BRIDGE_CTL_SERR << 16;
2606 + if (advk_readl(pcie, PCIE_CORE_CTRL1_REG) & HOT_RESET_GEN)
2607 + val |= PCI_BRIDGE_CTL_BUS_RESET << 16;
2608 + else
2609 +@@ -808,6 +822,19 @@ advk_pci_bridge_emul_base_conf_write(struct pci_bridge_emul *bridge,
2610 + break;
2611 +
2612 + case PCI_INTERRUPT_LINE:
2613 ++ /*
2614 ++ * According to Figure 6-3: Pseudo Logic Diagram for Error
2615 ++ * Message Controls in PCIe base specification, SERR# Enable bit
2616 ++ * in Bridge Control register enable receiving of ERR_* messages
2617 ++ */
2618 ++ if (mask & (PCI_BRIDGE_CTL_SERR << 16)) {
2619 ++ u32 val = advk_readl(pcie, PCIE_ISR0_MASK_REG);
2620 ++ if (new & (PCI_BRIDGE_CTL_SERR << 16))
2621 ++ val &= ~PCIE_ISR0_ERR_MASK;
2622 ++ else
2623 ++ val |= PCIE_ISR0_ERR_MASK;
2624 ++ advk_writel(pcie, val, PCIE_ISR0_MASK_REG);
2625 ++ }
2626 + if (mask & (PCI_BRIDGE_CTL_BUS_RESET << 16)) {
2627 + u32 val = advk_readl(pcie, PCIE_CORE_CTRL1_REG);
2628 + if (new & (PCI_BRIDGE_CTL_BUS_RESET << 16))
2629 +@@ -835,22 +862,11 @@ advk_pci_bridge_emul_pcie_conf_read(struct pci_bridge_emul *bridge,
2630 + *value = PCI_EXP_SLTSTA_PDS << 16;
2631 + return PCI_BRIDGE_EMUL_HANDLED;
2632 +
2633 +- case PCI_EXP_RTCTL: {
2634 +- u32 val = advk_readl(pcie, PCIE_ISR0_MASK_REG);
2635 +- *value = (val & PCIE_MSG_PM_PME_MASK) ? 0 : PCI_EXP_RTCTL_PMEIE;
2636 +- *value |= le16_to_cpu(bridge->pcie_conf.rootctl) & PCI_EXP_RTCTL_CRSSVE;
2637 +- *value |= PCI_EXP_RTCAP_CRSVIS << 16;
2638 +- return PCI_BRIDGE_EMUL_HANDLED;
2639 +- }
2640 +-
2641 +- case PCI_EXP_RTSTA: {
2642 +- u32 isr0 = advk_readl(pcie, PCIE_ISR0_REG);
2643 +- u32 msglog = advk_readl(pcie, PCIE_MSG_LOG_REG);
2644 +- *value = msglog >> 16;
2645 +- if (isr0 & PCIE_MSG_PM_PME_MASK)
2646 +- *value |= PCI_EXP_RTSTA_PME;
2647 +- return PCI_BRIDGE_EMUL_HANDLED;
2648 +- }
2649 ++ /*
2650 ++ * PCI_EXP_RTCTL and PCI_EXP_RTSTA are also supported, but do not need
2651 ++ * to be handled here, because their values are stored in emulated
2652 ++ * config space buffer, and we read them from there when needed.
2653 ++ */
2654 +
2655 + case PCI_EXP_LNKCAP: {
2656 + u32 val = advk_readl(pcie, PCIE_CORE_PCIEXP_CAP + reg);
2657 +@@ -905,19 +921,18 @@ advk_pci_bridge_emul_pcie_conf_write(struct pci_bridge_emul *bridge,
2658 + break;
2659 +
2660 + case PCI_EXP_RTCTL: {
2661 +- /* Only mask/unmask PME interrupt */
2662 +- u32 val = advk_readl(pcie, PCIE_ISR0_MASK_REG) &
2663 +- ~PCIE_MSG_PM_PME_MASK;
2664 +- if ((new & PCI_EXP_RTCTL_PMEIE) == 0)
2665 +- val |= PCIE_MSG_PM_PME_MASK;
2666 +- advk_writel(pcie, val, PCIE_ISR0_MASK_REG);
2667 ++ u16 rootctl = le16_to_cpu(bridge->pcie_conf.rootctl);
2668 ++ /* Only emulation of PMEIE and CRSSVE bits is provided */
2669 ++ rootctl &= PCI_EXP_RTCTL_PMEIE | PCI_EXP_RTCTL_CRSSVE;
2670 ++ bridge->pcie_conf.rootctl = cpu_to_le16(rootctl);
2671 + break;
2672 + }
2673 +
2674 +- case PCI_EXP_RTSTA:
2675 +- new = (new & PCI_EXP_RTSTA_PME) >> 9;
2676 +- advk_writel(pcie, new, PCIE_ISR0_REG);
2677 +- break;
2678 ++ /*
2679 ++ * PCI_EXP_RTSTA is also supported, but does not need to be handled
2680 ++ * here, because its value is stored in emulated config space buffer,
2681 ++ * and we write it there when needed.
2682 ++ */
2683 +
2684 + case PCI_EXP_DEVCTL:
2685 + case PCI_EXP_DEVCTL2:
2686 +@@ -961,7 +976,7 @@ static int advk_sw_pci_bridge_init(struct advk_pcie *pcie)
2687 + bridge->conf.pref_mem_limit = cpu_to_le16(PCI_PREF_RANGE_TYPE_64);
2688 +
2689 + /* Support interrupt A for MSI feature */
2690 +- bridge->conf.intpin = PCIE_CORE_INT_A_ASSERT_ENABLE;
2691 ++ bridge->conf.intpin = PCI_INTERRUPT_INTA;
2692 +
2693 + /* Aardvark HW provides PCIe Capability structure in version 2 */
2694 + bridge->pcie_conf.cap = cpu_to_le16(2);
2695 +@@ -983,8 +998,12 @@ static bool advk_pcie_valid_device(struct advk_pcie *pcie, struct pci_bus *bus,
2696 + return false;
2697 +
2698 + /*
2699 +- * If the link goes down after we check for link-up, nothing bad
2700 +- * happens but the config access times out.
2701 ++ * If the link goes down after we check for link-up, we have a problem:
2702 ++ * if a PIO request is executed while link-down, the whole controller
2703 ++ * gets stuck in a non-functional state, and even after link comes up
2704 ++ * again, PIO requests won't work anymore, and a reset of the whole PCIe
2705 ++ * controller is needed. Therefore we need to prevent sending PIO
2706 ++ * requests while the link is down.
2707 + */
2708 + if (!pci_is_root_bus(bus) && !advk_pcie_link_up(pcie))
2709 + return false;
2710 +@@ -1182,10 +1201,10 @@ static void advk_msi_irq_compose_msi_msg(struct irq_data *data,
2711 + struct msi_msg *msg)
2712 + {
2713 + struct advk_pcie *pcie = irq_data_get_irq_chip_data(data);
2714 +- phys_addr_t msi_msg = virt_to_phys(&pcie->msi_msg);
2715 ++ phys_addr_t msi_addr = virt_to_phys(pcie);
2716 +
2717 +- msg->address_lo = lower_32_bits(msi_msg);
2718 +- msg->address_hi = upper_32_bits(msi_msg);
2719 ++ msg->address_lo = lower_32_bits(msi_addr);
2720 ++ msg->address_hi = upper_32_bits(msi_addr);
2721 + msg->data = data->hwirq;
2722 + }
2723 +
2724 +@@ -1195,6 +1214,54 @@ static int advk_msi_set_affinity(struct irq_data *irq_data,
2725 + return -EINVAL;
2726 + }
2727 +
2728 ++static void advk_msi_irq_mask(struct irq_data *d)
2729 ++{
2730 ++ struct advk_pcie *pcie = d->domain->host_data;
2731 ++ irq_hw_number_t hwirq = irqd_to_hwirq(d);
2732 ++ unsigned long flags;
2733 ++ u32 mask;
2734 ++
2735 ++ raw_spin_lock_irqsave(&pcie->msi_irq_lock, flags);
2736 ++ mask = advk_readl(pcie, PCIE_MSI_MASK_REG);
2737 ++ mask |= BIT(hwirq);
2738 ++ advk_writel(pcie, mask, PCIE_MSI_MASK_REG);
2739 ++ raw_spin_unlock_irqrestore(&pcie->msi_irq_lock, flags);
2740 ++}
2741 ++
2742 ++static void advk_msi_irq_unmask(struct irq_data *d)
2743 ++{
2744 ++ struct advk_pcie *pcie = d->domain->host_data;
2745 ++ irq_hw_number_t hwirq = irqd_to_hwirq(d);
2746 ++ unsigned long flags;
2747 ++ u32 mask;
2748 ++
2749 ++ raw_spin_lock_irqsave(&pcie->msi_irq_lock, flags);
2750 ++ mask = advk_readl(pcie, PCIE_MSI_MASK_REG);
2751 ++ mask &= ~BIT(hwirq);
2752 ++ advk_writel(pcie, mask, PCIE_MSI_MASK_REG);
2753 ++ raw_spin_unlock_irqrestore(&pcie->msi_irq_lock, flags);
2754 ++}
2755 ++
2756 ++static void advk_msi_top_irq_mask(struct irq_data *d)
2757 ++{
2758 ++ pci_msi_mask_irq(d);
2759 ++ irq_chip_mask_parent(d);
2760 ++}
2761 ++
2762 ++static void advk_msi_top_irq_unmask(struct irq_data *d)
2763 ++{
2764 ++ pci_msi_unmask_irq(d);
2765 ++ irq_chip_unmask_parent(d);
2766 ++}
2767 ++
2768 ++static struct irq_chip advk_msi_bottom_irq_chip = {
2769 ++ .name = "MSI",
2770 ++ .irq_compose_msi_msg = advk_msi_irq_compose_msi_msg,
2771 ++ .irq_set_affinity = advk_msi_set_affinity,
2772 ++ .irq_mask = advk_msi_irq_mask,
2773 ++ .irq_unmask = advk_msi_irq_unmask,
2774 ++};
2775 ++
2776 + static int advk_msi_irq_domain_alloc(struct irq_domain *domain,
2777 + unsigned int virq,
2778 + unsigned int nr_irqs, void *args)
2779 +@@ -1211,7 +1278,7 @@ static int advk_msi_irq_domain_alloc(struct irq_domain *domain,
2780 +
2781 + for (i = 0; i < nr_irqs; i++)
2782 + irq_domain_set_info(domain, virq + i, hwirq + i,
2783 +- &pcie->msi_bottom_irq_chip,
2784 ++ &advk_msi_bottom_irq_chip,
2785 + domain->host_data, handle_simple_irq,
2786 + NULL, NULL);
2787 +
2788 +@@ -1267,7 +1334,6 @@ static int advk_pcie_irq_map(struct irq_domain *h,
2789 + {
2790 + struct advk_pcie *pcie = h->host_data;
2791 +
2792 +- advk_pcie_irq_mask(irq_get_irq_data(virq));
2793 + irq_set_status_flags(virq, IRQ_LEVEL);
2794 + irq_set_chip_and_handler(virq, &pcie->irq_chip,
2795 + handle_level_irq);
2796 +@@ -1281,37 +1347,25 @@ static const struct irq_domain_ops advk_pcie_irq_domain_ops = {
2797 + .xlate = irq_domain_xlate_onecell,
2798 + };
2799 +
2800 ++static struct irq_chip advk_msi_irq_chip = {
2801 ++ .name = "advk-MSI",
2802 ++ .irq_mask = advk_msi_top_irq_mask,
2803 ++ .irq_unmask = advk_msi_top_irq_unmask,
2804 ++};
2805 ++
2806 ++static struct msi_domain_info advk_msi_domain_info = {
2807 ++ .flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
2808 ++ MSI_FLAG_MULTI_PCI_MSI | MSI_FLAG_PCI_MSIX,
2809 ++ .chip = &advk_msi_irq_chip,
2810 ++};
2811 ++
2812 + static int advk_pcie_init_msi_irq_domain(struct advk_pcie *pcie)
2813 + {
2814 + struct device *dev = &pcie->pdev->dev;
2815 +- struct device_node *node = dev->of_node;
2816 +- struct irq_chip *bottom_ic, *msi_ic;
2817 +- struct msi_domain_info *msi_di;
2818 +- phys_addr_t msi_msg_phys;
2819 +
2820 ++ raw_spin_lock_init(&pcie->msi_irq_lock);
2821 + mutex_init(&pcie->msi_used_lock);
2822 +
2823 +- bottom_ic = &pcie->msi_bottom_irq_chip;
2824 +-
2825 +- bottom_ic->name = "MSI";
2826 +- bottom_ic->irq_compose_msi_msg = advk_msi_irq_compose_msi_msg;
2827 +- bottom_ic->irq_set_affinity = advk_msi_set_affinity;
2828 +-
2829 +- msi_ic = &pcie->msi_irq_chip;
2830 +- msi_ic->name = "advk-MSI";
2831 +-
2832 +- msi_di = &pcie->msi_domain_info;
2833 +- msi_di->flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
2834 +- MSI_FLAG_MULTI_PCI_MSI;
2835 +- msi_di->chip = msi_ic;
2836 +-
2837 +- msi_msg_phys = virt_to_phys(&pcie->msi_msg);
2838 +-
2839 +- advk_writel(pcie, lower_32_bits(msi_msg_phys),
2840 +- PCIE_MSI_ADDR_LOW_REG);
2841 +- advk_writel(pcie, upper_32_bits(msi_msg_phys),
2842 +- PCIE_MSI_ADDR_HIGH_REG);
2843 +-
2844 + pcie->msi_inner_domain =
2845 + irq_domain_add_linear(NULL, MSI_IRQ_NUM,
2846 + &advk_msi_domain_ops, pcie);
2847 +@@ -1319,8 +1373,9 @@ static int advk_pcie_init_msi_irq_domain(struct advk_pcie *pcie)
2848 + return -ENOMEM;
2849 +
2850 + pcie->msi_domain =
2851 +- pci_msi_create_irq_domain(of_node_to_fwnode(node),
2852 +- msi_di, pcie->msi_inner_domain);
2853 ++ pci_msi_create_irq_domain(dev_fwnode(dev),
2854 ++ &advk_msi_domain_info,
2855 ++ pcie->msi_inner_domain);
2856 + if (!pcie->msi_domain) {
2857 + irq_domain_remove(pcie->msi_inner_domain);
2858 + return -ENOMEM;
2859 +@@ -1361,7 +1416,6 @@ static int advk_pcie_init_irq_domain(struct advk_pcie *pcie)
2860 + }
2861 +
2862 + irq_chip->irq_mask = advk_pcie_irq_mask;
2863 +- irq_chip->irq_mask_ack = advk_pcie_irq_mask;
2864 + irq_chip->irq_unmask = advk_pcie_irq_unmask;
2865 +
2866 + pcie->irq_domain =
2867 +@@ -1383,6 +1437,70 @@ static void advk_pcie_remove_irq_domain(struct advk_pcie *pcie)
2868 + irq_domain_remove(pcie->irq_domain);
2869 + }
2870 +
2871 ++static struct irq_chip advk_rp_irq_chip = {
2872 ++ .name = "advk-RP",
2873 ++};
2874 ++
2875 ++static int advk_pcie_rp_irq_map(struct irq_domain *h,
2876 ++ unsigned int virq, irq_hw_number_t hwirq)
2877 ++{
2878 ++ struct advk_pcie *pcie = h->host_data;
2879 ++
2880 ++ irq_set_chip_and_handler(virq, &advk_rp_irq_chip, handle_simple_irq);
2881 ++ irq_set_chip_data(virq, pcie);
2882 ++
2883 ++ return 0;
2884 ++}
2885 ++
2886 ++static const struct irq_domain_ops advk_pcie_rp_irq_domain_ops = {
2887 ++ .map = advk_pcie_rp_irq_map,
2888 ++ .xlate = irq_domain_xlate_onecell,
2889 ++};
2890 ++
2891 ++static int advk_pcie_init_rp_irq_domain(struct advk_pcie *pcie)
2892 ++{
2893 ++ pcie->rp_irq_domain = irq_domain_add_linear(NULL, 1,
2894 ++ &advk_pcie_rp_irq_domain_ops,
2895 ++ pcie);
2896 ++ if (!pcie->rp_irq_domain) {
2897 ++ dev_err(&pcie->pdev->dev, "Failed to add Root Port IRQ domain\n");
2898 ++ return -ENOMEM;
2899 ++ }
2900 ++
2901 ++ return 0;
2902 ++}
2903 ++
2904 ++static void advk_pcie_remove_rp_irq_domain(struct advk_pcie *pcie)
2905 ++{
2906 ++ irq_domain_remove(pcie->rp_irq_domain);
2907 ++}
2908 ++
2909 ++static void advk_pcie_handle_pme(struct advk_pcie *pcie)
2910 ++{
2911 ++ u32 requester = advk_readl(pcie, PCIE_MSG_LOG_REG) >> 16;
2912 ++
2913 ++ advk_writel(pcie, PCIE_MSG_PM_PME_MASK, PCIE_ISR0_REG);
2914 ++
2915 ++ /*
2916 ++ * PCIE_MSG_LOG_REG contains the last inbound message, so store
2917 ++ * the requester ID only when PME was not asserted yet.
2918 ++ * Also do not trigger PME interrupt when PME is still asserted.
2919 ++ */
2920 ++ if (!(le32_to_cpu(pcie->bridge.pcie_conf.rootsta) & PCI_EXP_RTSTA_PME)) {
2921 ++ pcie->bridge.pcie_conf.rootsta = cpu_to_le32(requester | PCI_EXP_RTSTA_PME);
2922 ++
2923 ++ /*
2924 ++ * Trigger PME interrupt only if PMEIE bit in Root Control is set.
2925 ++ * Aardvark HW returns zero for PCI_EXP_FLAGS_IRQ, so use PCIe interrupt 0.
2926 ++ */
2927 ++ if (!(le16_to_cpu(pcie->bridge.pcie_conf.rootctl) & PCI_EXP_RTCTL_PMEIE))
2928 ++ return;
2929 ++
2930 ++ if (generic_handle_domain_irq(pcie->rp_irq_domain, 0) == -EINVAL)
2931 ++ dev_err_ratelimited(&pcie->pdev->dev, "unhandled PME IRQ\n");
2932 ++ }
2933 ++}
2934 ++
2935 + static void advk_pcie_handle_msi(struct advk_pcie *pcie)
2936 + {
2937 + u32 msi_val, msi_mask, msi_status, msi_idx;
2938 +@@ -1418,6 +1536,22 @@ static void advk_pcie_handle_int(struct advk_pcie *pcie)
2939 + isr1_mask = advk_readl(pcie, PCIE_ISR1_MASK_REG);
2940 + isr1_status = isr1_val & ((~isr1_mask) & PCIE_ISR1_ALL_MASK);
2941 +
2942 ++ /* Process PME interrupt as the first one to do not miss PME requester id */
2943 ++ if (isr0_status & PCIE_MSG_PM_PME_MASK)
2944 ++ advk_pcie_handle_pme(pcie);
2945 ++
2946 ++ /* Process ERR interrupt */
2947 ++ if (isr0_status & PCIE_ISR0_ERR_MASK) {
2948 ++ advk_writel(pcie, PCIE_ISR0_ERR_MASK, PCIE_ISR0_REG);
2949 ++
2950 ++ /*
2951 ++ * Aardvark HW returns zero for PCI_ERR_ROOT_AER_IRQ, so use
2952 ++ * PCIe interrupt 0
2953 ++ */
2954 ++ if (generic_handle_domain_irq(pcie->rp_irq_domain, 0) == -EINVAL)
2955 ++ dev_err_ratelimited(&pcie->pdev->dev, "unhandled ERR IRQ\n");
2956 ++ }
2957 ++
2958 + /* Process MSI interrupts */
2959 + if (isr0_status & PCIE_ISR0_MSI_INT_PENDING)
2960 + advk_pcie_handle_msi(pcie);
2961 +@@ -1430,28 +1564,50 @@ static void advk_pcie_handle_int(struct advk_pcie *pcie)
2962 + advk_writel(pcie, PCIE_ISR1_INTX_ASSERT(i),
2963 + PCIE_ISR1_REG);
2964 +
2965 +- generic_handle_domain_irq(pcie->irq_domain, i);
2966 ++ if (generic_handle_domain_irq(pcie->irq_domain, i) == -EINVAL)
2967 ++ dev_err_ratelimited(&pcie->pdev->dev, "unexpected INT%c IRQ\n",
2968 ++ (char)i + 'A');
2969 + }
2970 + }
2971 +
2972 +-static irqreturn_t advk_pcie_irq_handler(int irq, void *arg)
2973 ++static void advk_pcie_irq_handler(struct irq_desc *desc)
2974 + {
2975 +- struct advk_pcie *pcie = arg;
2976 +- u32 status;
2977 ++ struct advk_pcie *pcie = irq_desc_get_handler_data(desc);
2978 ++ struct irq_chip *chip = irq_desc_get_chip(desc);
2979 ++ u32 val, mask, status;
2980 +
2981 +- status = advk_readl(pcie, HOST_CTRL_INT_STATUS_REG);
2982 +- if (!(status & PCIE_IRQ_CORE_INT))
2983 +- return IRQ_NONE;
2984 ++ chained_irq_enter(chip, desc);
2985 +
2986 +- advk_pcie_handle_int(pcie);
2987 ++ val = advk_readl(pcie, HOST_CTRL_INT_STATUS_REG);
2988 ++ mask = advk_readl(pcie, HOST_CTRL_INT_MASK_REG);
2989 ++ status = val & ((~mask) & PCIE_IRQ_ALL_MASK);
2990 +
2991 +- /* Clear interrupt */
2992 +- advk_writel(pcie, PCIE_IRQ_CORE_INT, HOST_CTRL_INT_STATUS_REG);
2993 ++ if (status & PCIE_IRQ_CORE_INT) {
2994 ++ advk_pcie_handle_int(pcie);
2995 +
2996 +- return IRQ_HANDLED;
2997 ++ /* Clear interrupt */
2998 ++ advk_writel(pcie, PCIE_IRQ_CORE_INT, HOST_CTRL_INT_STATUS_REG);
2999 ++ }
3000 ++
3001 ++ chained_irq_exit(chip, desc);
3002 ++}
3003 ++
3004 ++static int advk_pcie_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
3005 ++{
3006 ++ struct advk_pcie *pcie = dev->bus->sysdata;
3007 ++
3008 ++ /*
3009 ++ * Emulated root bridge has its own emulated irq chip and irq domain.
3010 ++ * Argument pin is the INTx pin (1=INTA, 2=INTB, 3=INTC, 4=INTD) and
3011 ++ * hwirq for irq_create_mapping() is indexed from zero.
3012 ++ */
3013 ++ if (pci_is_root_bus(dev->bus))
3014 ++ return irq_create_mapping(pcie->rp_irq_domain, pin - 1);
3015 ++ else
3016 ++ return of_irq_parse_and_map_pci(dev, slot, pin);
3017 + }
3018 +
3019 +-static void __maybe_unused advk_pcie_disable_phy(struct advk_pcie *pcie)
3020 ++static void advk_pcie_disable_phy(struct advk_pcie *pcie)
3021 + {
3022 + phy_power_off(pcie->phy);
3023 + phy_exit(pcie->phy);
3024 +@@ -1515,7 +1671,7 @@ static int advk_pcie_probe(struct platform_device *pdev)
3025 + struct advk_pcie *pcie;
3026 + struct pci_host_bridge *bridge;
3027 + struct resource_entry *entry;
3028 +- int ret, irq;
3029 ++ int ret;
3030 +
3031 + bridge = devm_pci_alloc_host_bridge(dev, sizeof(struct advk_pcie));
3032 + if (!bridge)
3033 +@@ -1601,17 +1757,9 @@ static int advk_pcie_probe(struct platform_device *pdev)
3034 + if (IS_ERR(pcie->base))
3035 + return PTR_ERR(pcie->base);
3036 +
3037 +- irq = platform_get_irq(pdev, 0);
3038 +- if (irq < 0)
3039 +- return irq;
3040 +-
3041 +- ret = devm_request_irq(dev, irq, advk_pcie_irq_handler,
3042 +- IRQF_SHARED | IRQF_NO_THREAD, "advk-pcie",
3043 +- pcie);
3044 +- if (ret) {
3045 +- dev_err(dev, "Failed to register interrupt\n");
3046 +- return ret;
3047 +- }
3048 ++ pcie->irq = platform_get_irq(pdev, 0);
3049 ++ if (pcie->irq < 0)
3050 ++ return pcie->irq;
3051 +
3052 + pcie->reset_gpio = devm_gpiod_get_from_of_node(dev, dev->of_node,
3053 + "reset-gpios", 0,
3054 +@@ -1660,11 +1808,24 @@ static int advk_pcie_probe(struct platform_device *pdev)
3055 + return ret;
3056 + }
3057 +
3058 ++ ret = advk_pcie_init_rp_irq_domain(pcie);
3059 ++ if (ret) {
3060 ++ dev_err(dev, "Failed to initialize irq\n");
3061 ++ advk_pcie_remove_msi_irq_domain(pcie);
3062 ++ advk_pcie_remove_irq_domain(pcie);
3063 ++ return ret;
3064 ++ }
3065 ++
3066 ++ irq_set_chained_handler_and_data(pcie->irq, advk_pcie_irq_handler, pcie);
3067 ++
3068 + bridge->sysdata = pcie;
3069 + bridge->ops = &advk_pcie_ops;
3070 ++ bridge->map_irq = advk_pcie_map_irq;
3071 +
3072 + ret = pci_host_probe(bridge);
3073 + if (ret < 0) {
3074 ++ irq_set_chained_handler_and_data(pcie->irq, NULL, NULL);
3075 ++ advk_pcie_remove_rp_irq_domain(pcie);
3076 + advk_pcie_remove_msi_irq_domain(pcie);
3077 + advk_pcie_remove_irq_domain(pcie);
3078 + return ret;
3079 +@@ -1712,7 +1873,11 @@ static int advk_pcie_remove(struct platform_device *pdev)
3080 + advk_writel(pcie, PCIE_ISR1_ALL_MASK, PCIE_ISR1_REG);
3081 + advk_writel(pcie, PCIE_IRQ_ALL_MASK, HOST_CTRL_INT_STATUS_REG);
3082 +
3083 ++ /* Remove IRQ handler */
3084 ++ irq_set_chained_handler_and_data(pcie->irq, NULL, NULL);
3085 ++
3086 + /* Remove IRQ domains */
3087 ++ advk_pcie_remove_rp_irq_domain(pcie);
3088 + advk_pcie_remove_msi_irq_domain(pcie);
3089 + advk_pcie_remove_irq_domain(pcie);
3090 +
3091 +diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
3092 +index 8e87a31e329d0..ba6d787896606 100644
3093 +--- a/drivers/s390/block/dasd.c
3094 ++++ b/drivers/s390/block/dasd.c
3095 +@@ -1422,6 +1422,13 @@ int dasd_start_IO(struct dasd_ccw_req *cqr)
3096 + if (!cqr->lpm)
3097 + cqr->lpm = dasd_path_get_opm(device);
3098 + }
3099 ++ /*
3100 ++ * remember the amount of formatted tracks to prevent double format on
3101 ++ * ESE devices
3102 ++ */
3103 ++ if (cqr->block)
3104 ++ cqr->trkcount = atomic_read(&cqr->block->trkcount);
3105 ++
3106 + if (cqr->cpmode == 1) {
3107 + rc = ccw_device_tm_start(device->cdev, cqr->cpaddr,
3108 + (long) cqr, cqr->lpm);
3109 +@@ -1639,6 +1646,7 @@ void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm,
3110 + unsigned long now;
3111 + int nrf_suppressed = 0;
3112 + int fp_suppressed = 0;
3113 ++ struct request *req;
3114 + u8 *sense = NULL;
3115 + int expires;
3116 +
3117 +@@ -1739,7 +1747,12 @@ void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm,
3118 + }
3119 +
3120 + if (dasd_ese_needs_format(cqr->block, irb)) {
3121 +- if (rq_data_dir((struct request *)cqr->callback_data) == READ) {
3122 ++ req = dasd_get_callback_data(cqr);
3123 ++ if (!req) {
3124 ++ cqr->status = DASD_CQR_ERROR;
3125 ++ return;
3126 ++ }
3127 ++ if (rq_data_dir(req) == READ) {
3128 + device->discipline->ese_read(cqr, irb);
3129 + cqr->status = DASD_CQR_SUCCESS;
3130 + cqr->stopclk = now;
3131 +@@ -2765,8 +2778,7 @@ static void __dasd_cleanup_cqr(struct dasd_ccw_req *cqr)
3132 + * complete a request partially.
3133 + */
3134 + if (proc_bytes) {
3135 +- blk_update_request(req, BLK_STS_OK,
3136 +- blk_rq_bytes(req) - proc_bytes);
3137 ++ blk_update_request(req, BLK_STS_OK, proc_bytes);
3138 + blk_mq_requeue_request(req, true);
3139 + } else if (likely(!blk_should_fake_timeout(req->q))) {
3140 + blk_mq_complete_request(req);
3141 +diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
3142 +index 8410a25a65c13..e46461b4d8a75 100644
3143 +--- a/drivers/s390/block/dasd_eckd.c
3144 ++++ b/drivers/s390/block/dasd_eckd.c
3145 +@@ -3083,13 +3083,24 @@ static int dasd_eckd_format_device(struct dasd_device *base,
3146 + }
3147 +
3148 + static bool test_and_set_format_track(struct dasd_format_entry *to_format,
3149 +- struct dasd_block *block)
3150 ++ struct dasd_ccw_req *cqr)
3151 + {
3152 ++ struct dasd_block *block = cqr->block;
3153 + struct dasd_format_entry *format;
3154 + unsigned long flags;
3155 + bool rc = false;
3156 +
3157 + spin_lock_irqsave(&block->format_lock, flags);
3158 ++ if (cqr->trkcount != atomic_read(&block->trkcount)) {
3159 ++ /*
3160 ++ * The number of formatted tracks has changed after request
3161 ++ * start and we can not tell if the current track was involved.
3162 ++ * To avoid data corruption treat it as if the current track is
3163 ++ * involved
3164 ++ */
3165 ++ rc = true;
3166 ++ goto out;
3167 ++ }
3168 + list_for_each_entry(format, &block->format_list, list) {
3169 + if (format->track == to_format->track) {
3170 + rc = true;
3171 +@@ -3109,6 +3120,7 @@ static void clear_format_track(struct dasd_format_entry *format,
3172 + unsigned long flags;
3173 +
3174 + spin_lock_irqsave(&block->format_lock, flags);
3175 ++ atomic_inc(&block->trkcount);
3176 + list_del_init(&format->list);
3177 + spin_unlock_irqrestore(&block->format_lock, flags);
3178 + }
3179 +@@ -3145,7 +3157,7 @@ dasd_eckd_ese_format(struct dasd_device *startdev, struct dasd_ccw_req *cqr,
3180 + sector_t curr_trk;
3181 + int rc;
3182 +
3183 +- req = cqr->callback_data;
3184 ++ req = dasd_get_callback_data(cqr);
3185 + block = cqr->block;
3186 + base = block->base;
3187 + private = base->private;
3188 +@@ -3170,8 +3182,11 @@ dasd_eckd_ese_format(struct dasd_device *startdev, struct dasd_ccw_req *cqr,
3189 + }
3190 + format->track = curr_trk;
3191 + /* test if track is already in formatting by another thread */
3192 +- if (test_and_set_format_track(format, block))
3193 ++ if (test_and_set_format_track(format, cqr)) {
3194 ++ /* this is no real error so do not count down retries */
3195 ++ cqr->retries++;
3196 + return ERR_PTR(-EEXIST);
3197 ++ }
3198 +
3199 + fdata.start_unit = curr_trk;
3200 + fdata.stop_unit = curr_trk;
3201 +@@ -3270,12 +3285,11 @@ static int dasd_eckd_ese_read(struct dasd_ccw_req *cqr, struct irb *irb)
3202 + cqr->proc_bytes = blk_count * blksize;
3203 + return 0;
3204 + }
3205 +- if (dst && !skip_block) {
3206 +- dst += off;
3207 ++ if (dst && !skip_block)
3208 + memset(dst, 0, blksize);
3209 +- } else {
3210 ++ else
3211 + skip_block--;
3212 +- }
3213 ++ dst += blksize;
3214 + blk_count++;
3215 + }
3216 + }
3217 +diff --git a/drivers/s390/block/dasd_int.h b/drivers/s390/block/dasd_int.h
3218 +index 8b458010f88a1..6e7f1a4a28a03 100644
3219 +--- a/drivers/s390/block/dasd_int.h
3220 ++++ b/drivers/s390/block/dasd_int.h
3221 +@@ -188,6 +188,7 @@ struct dasd_ccw_req {
3222 + void (*callback)(struct dasd_ccw_req *, void *data);
3223 + void *callback_data;
3224 + unsigned int proc_bytes; /* bytes for partial completion */
3225 ++ unsigned int trkcount; /* count formatted tracks */
3226 + };
3227 +
3228 + /*
3229 +@@ -611,6 +612,7 @@ struct dasd_block {
3230 +
3231 + struct list_head format_list;
3232 + spinlock_t format_lock;
3233 ++ atomic_t trkcount;
3234 + };
3235 +
3236 + struct dasd_attention_data {
3237 +@@ -757,6 +759,18 @@ dasd_check_blocksize(int bsize)
3238 + return 0;
3239 + }
3240 +
3241 ++/*
3242 ++ * return the callback data of the original request in case there are
3243 ++ * ERP requests build on top of it
3244 ++ */
3245 ++static inline void *dasd_get_callback_data(struct dasd_ccw_req *cqr)
3246 ++{
3247 ++ while (cqr->refers)
3248 ++ cqr = cqr->refers;
3249 ++
3250 ++ return cqr->callback_data;
3251 ++}
3252 ++
3253 + /* externals in dasd.c */
3254 + #define DASD_PROFILE_OFF 0
3255 + #define DASD_PROFILE_ON 1
3256 +diff --git a/drivers/video/fbdev/core/fbmem.c b/drivers/video/fbdev/core/fbmem.c
3257 +index 00f0f282e7a13..10a9369c9dea4 100644
3258 +--- a/drivers/video/fbdev/core/fbmem.c
3259 ++++ b/drivers/video/fbdev/core/fbmem.c
3260 +@@ -1438,7 +1438,10 @@ fb_release(struct inode *inode, struct file *file)
3261 + __acquires(&info->lock)
3262 + __releases(&info->lock)
3263 + {
3264 +- struct fb_info * const info = file->private_data;
3265 ++ struct fb_info * const info = file_fb_info(file);
3266 ++
3267 ++ if (!info)
3268 ++ return -ENODEV;
3269 +
3270 + lock_fb_info(info);
3271 + if (info->fbops->fb_release)
3272 +diff --git a/fs/btrfs/btrfs_inode.h b/fs/btrfs/btrfs_inode.h
3273 +index b3e46aabc3d86..5c0035316dd01 100644
3274 +--- a/fs/btrfs/btrfs_inode.h
3275 ++++ b/fs/btrfs/btrfs_inode.h
3276 +@@ -346,6 +346,17 @@ static inline bool btrfs_inode_in_log(struct btrfs_inode *inode, u64 generation)
3277 + return ret;
3278 + }
3279 +
3280 ++/*
3281 ++ * Check if the inode has flags compatible with compression
3282 ++ */
3283 ++static inline bool btrfs_inode_can_compress(const struct btrfs_inode *inode)
3284 ++{
3285 ++ if (inode->flags & BTRFS_INODE_NODATACOW ||
3286 ++ inode->flags & BTRFS_INODE_NODATASUM)
3287 ++ return false;
3288 ++ return true;
3289 ++}
3290 ++
3291 + struct btrfs_dio_private {
3292 + struct inode *inode;
3293 +
3294 +diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
3295 +index ed986c70cbc5e..e5f13922a18fe 100644
3296 +--- a/fs/btrfs/disk-io.c
3297 ++++ b/fs/btrfs/disk-io.c
3298 +@@ -3569,6 +3569,17 @@ int __cold open_ctree(struct super_block *sb, struct btrfs_fs_devices *fs_device
3299 + if (sectorsize < PAGE_SIZE) {
3300 + struct btrfs_subpage_info *subpage_info;
3301 +
3302 ++ /*
3303 ++ * V1 space cache has some hardcoded PAGE_SIZE usage, and is
3304 ++ * going to be deprecated.
3305 ++ *
3306 ++ * Force to use v2 cache for subpage case.
3307 ++ */
3308 ++ btrfs_clear_opt(fs_info->mount_opt, SPACE_CACHE);
3309 ++ btrfs_set_and_info(fs_info, FREE_SPACE_TREE,
3310 ++ "forcing free space tree for sector size %u with page size %lu",
3311 ++ sectorsize, PAGE_SIZE);
3312 ++
3313 + btrfs_warn(fs_info,
3314 + "read-write for sector size %u with page size %lu is experimental",
3315 + sectorsize, PAGE_SIZE);
3316 +diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
3317 +index ecd305649e129..10e205fbad6cf 100644
3318 +--- a/fs/btrfs/inode.c
3319 ++++ b/fs/btrfs/inode.c
3320 +@@ -485,17 +485,6 @@ static noinline int add_async_extent(struct async_chunk *cow,
3321 + return 0;
3322 + }
3323 +
3324 +-/*
3325 +- * Check if the inode has flags compatible with compression
3326 +- */
3327 +-static inline bool inode_can_compress(struct btrfs_inode *inode)
3328 +-{
3329 +- if (inode->flags & BTRFS_INODE_NODATACOW ||
3330 +- inode->flags & BTRFS_INODE_NODATASUM)
3331 +- return false;
3332 +- return true;
3333 +-}
3334 +-
3335 + /*
3336 + * Check if the inode needs to be submitted to compression, based on mount
3337 + * options, defragmentation, properties or heuristics.
3338 +@@ -505,7 +494,7 @@ static inline int inode_need_compress(struct btrfs_inode *inode, u64 start,
3339 + {
3340 + struct btrfs_fs_info *fs_info = inode->root->fs_info;
3341 +
3342 +- if (!inode_can_compress(inode)) {
3343 ++ if (!btrfs_inode_can_compress(inode)) {
3344 + WARN(IS_ENABLED(CONFIG_BTRFS_DEBUG),
3345 + KERN_ERR "BTRFS: unexpected compression for ino %llu\n",
3346 + btrfs_ino(inode));
3347 +@@ -2015,7 +2004,7 @@ int btrfs_run_delalloc_range(struct btrfs_inode *inode, struct page *locked_page
3348 + (zoned && btrfs_is_data_reloc_root(inode->root)));
3349 + ret = run_delalloc_nocow(inode, locked_page, start, end,
3350 + page_started, nr_written);
3351 +- } else if (!inode_can_compress(inode) ||
3352 ++ } else if (!btrfs_inode_can_compress(inode) ||
3353 + !inode_need_compress(inode, start, end)) {
3354 + if (zoned)
3355 + ret = run_delalloc_zoned(inode, locked_page, start, end,
3356 +diff --git a/fs/btrfs/props.c b/fs/btrfs/props.c
3357 +index 1a6d2d5b4b333..1b31481f9e72c 100644
3358 +--- a/fs/btrfs/props.c
3359 ++++ b/fs/btrfs/props.c
3360 +@@ -17,9 +17,11 @@ static DEFINE_HASHTABLE(prop_handlers_ht, BTRFS_PROP_HANDLERS_HT_BITS);
3361 + struct prop_handler {
3362 + struct hlist_node node;
3363 + const char *xattr_name;
3364 +- int (*validate)(const char *value, size_t len);
3365 ++ int (*validate)(const struct btrfs_inode *inode, const char *value,
3366 ++ size_t len);
3367 + int (*apply)(struct inode *inode, const char *value, size_t len);
3368 + const char *(*extract)(struct inode *inode);
3369 ++ bool (*ignore)(const struct btrfs_inode *inode);
3370 + int inheritable;
3371 + };
3372 +
3373 +@@ -55,7 +57,8 @@ find_prop_handler(const char *name,
3374 + return NULL;
3375 + }
3376 +
3377 +-int btrfs_validate_prop(const char *name, const char *value, size_t value_len)
3378 ++int btrfs_validate_prop(const struct btrfs_inode *inode, const char *name,
3379 ++ const char *value, size_t value_len)
3380 + {
3381 + const struct prop_handler *handler;
3382 +
3383 +@@ -69,7 +72,29 @@ int btrfs_validate_prop(const char *name, const char *value, size_t value_len)
3384 + if (value_len == 0)
3385 + return 0;
3386 +
3387 +- return handler->validate(value, value_len);
3388 ++ return handler->validate(inode, value, value_len);
3389 ++}
3390 ++
3391 ++/*
3392 ++ * Check if a property should be ignored (not set) for an inode.
3393 ++ *
3394 ++ * @inode: The target inode.
3395 ++ * @name: The property's name.
3396 ++ *
3397 ++ * The caller must be sure the given property name is valid, for example by
3398 ++ * having previously called btrfs_validate_prop().
3399 ++ *
3400 ++ * Returns: true if the property should be ignored for the given inode
3401 ++ * false if the property must not be ignored for the given inode
3402 ++ */
3403 ++bool btrfs_ignore_prop(const struct btrfs_inode *inode, const char *name)
3404 ++{
3405 ++ const struct prop_handler *handler;
3406 ++
3407 ++ handler = find_prop_handler(name, NULL);
3408 ++ ASSERT(handler != NULL);
3409 ++
3410 ++ return handler->ignore(inode);
3411 + }
3412 +
3413 + int btrfs_set_prop(struct btrfs_trans_handle *trans, struct inode *inode,
3414 +@@ -252,8 +277,12 @@ int btrfs_load_inode_props(struct inode *inode, struct btrfs_path *path)
3415 + return ret;
3416 + }
3417 +
3418 +-static int prop_compression_validate(const char *value, size_t len)
3419 ++static int prop_compression_validate(const struct btrfs_inode *inode,
3420 ++ const char *value, size_t len)
3421 + {
3422 ++ if (!btrfs_inode_can_compress(inode))
3423 ++ return -EINVAL;
3424 ++
3425 + if (!value)
3426 + return 0;
3427 +
3428 +@@ -310,6 +339,22 @@ static int prop_compression_apply(struct inode *inode, const char *value,
3429 + return 0;
3430 + }
3431 +
3432 ++static bool prop_compression_ignore(const struct btrfs_inode *inode)
3433 ++{
3434 ++ /*
3435 ++ * Compression only has effect for regular files, and for directories
3436 ++ * we set it just to propagate it to new files created inside them.
3437 ++ * Everything else (symlinks, devices, sockets, fifos) is pointless as
3438 ++ * it will do nothing, so don't waste metadata space on a compression
3439 ++ * xattr for anything that is neither a file nor a directory.
3440 ++ */
3441 ++ if (!S_ISREG(inode->vfs_inode.i_mode) &&
3442 ++ !S_ISDIR(inode->vfs_inode.i_mode))
3443 ++ return true;
3444 ++
3445 ++ return false;
3446 ++}
3447 ++
3448 + static const char *prop_compression_extract(struct inode *inode)
3449 + {
3450 + switch (BTRFS_I(inode)->prop_compress) {
3451 +@@ -330,6 +375,7 @@ static struct prop_handler prop_handlers[] = {
3452 + .validate = prop_compression_validate,
3453 + .apply = prop_compression_apply,
3454 + .extract = prop_compression_extract,
3455 ++ .ignore = prop_compression_ignore,
3456 + .inheritable = 1
3457 + },
3458 + };
3459 +@@ -356,6 +402,9 @@ static int inherit_props(struct btrfs_trans_handle *trans,
3460 + if (!h->inheritable)
3461 + continue;
3462 +
3463 ++ if (h->ignore(BTRFS_I(inode)))
3464 ++ continue;
3465 ++
3466 + value = h->extract(parent);
3467 + if (!value)
3468 + continue;
3469 +@@ -364,7 +413,7 @@ static int inherit_props(struct btrfs_trans_handle *trans,
3470 + * This is not strictly necessary as the property should be
3471 + * valid, but in case it isn't, don't propagate it further.
3472 + */
3473 +- ret = h->validate(value, strlen(value));
3474 ++ ret = h->validate(BTRFS_I(inode), value, strlen(value));
3475 + if (ret)
3476 + continue;
3477 +
3478 +diff --git a/fs/btrfs/props.h b/fs/btrfs/props.h
3479 +index 40b2c65b518c6..59bea741cfcf4 100644
3480 +--- a/fs/btrfs/props.h
3481 ++++ b/fs/btrfs/props.h
3482 +@@ -13,7 +13,9 @@ void __init btrfs_props_init(void);
3483 + int btrfs_set_prop(struct btrfs_trans_handle *trans, struct inode *inode,
3484 + const char *name, const char *value, size_t value_len,
3485 + int flags);
3486 +-int btrfs_validate_prop(const char *name, const char *value, size_t value_len);
3487 ++int btrfs_validate_prop(const struct btrfs_inode *inode, const char *name,
3488 ++ const char *value, size_t value_len);
3489 ++bool btrfs_ignore_prop(const struct btrfs_inode *inode, const char *name);
3490 +
3491 + int btrfs_load_inode_props(struct inode *inode, struct btrfs_path *path);
3492 +
3493 +diff --git a/fs/btrfs/sysfs.c b/fs/btrfs/sysfs.c
3494 +index beb7f72d50b86..11927d440f11a 100644
3495 +--- a/fs/btrfs/sysfs.c
3496 ++++ b/fs/btrfs/sysfs.c
3497 +@@ -919,6 +919,9 @@ static ssize_t btrfs_exclusive_operation_show(struct kobject *kobj,
3498 + case BTRFS_EXCLOP_BALANCE:
3499 + str = "balance\n";
3500 + break;
3501 ++ case BTRFS_EXCLOP_BALANCE_PAUSED:
3502 ++ str = "balance paused\n";
3503 ++ break;
3504 + case BTRFS_EXCLOP_DEV_ADD:
3505 + str = "device add\n";
3506 + break;
3507 +diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
3508 +index 7a0bfa5bedb95..049ee19041c7b 100644
3509 +--- a/fs/btrfs/tree-log.c
3510 ++++ b/fs/btrfs/tree-log.c
3511 +@@ -5655,6 +5655,18 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans,
3512 + mutex_lock(&inode->log_mutex);
3513 + }
3514 +
3515 ++ /*
3516 ++ * For symlinks, we must always log their content, which is stored in an
3517 ++ * inline extent, otherwise we could end up with an empty symlink after
3518 ++ * log replay, which is invalid on linux (symlink(2) returns -ENOENT if
3519 ++ * one attempts to create an empty symlink).
3520 ++ * We don't need to worry about flushing delalloc, because when we create
3521 ++ * the inline extent when the symlink is created (we never have delalloc
3522 ++ * for symlinks).
3523 ++ */
3524 ++ if (S_ISLNK(inode->vfs_inode.i_mode))
3525 ++ inode_only = LOG_INODE_ALL;
3526 ++
3527 + /*
3528 + * This is for cases where logging a directory could result in losing a
3529 + * a file after replaying the log. For example, if we move a file from a
3530 +@@ -6015,7 +6027,7 @@ process_leaf:
3531 + }
3532 +
3533 + ctx->log_new_dentries = false;
3534 +- if (type == BTRFS_FT_DIR || type == BTRFS_FT_SYMLINK)
3535 ++ if (type == BTRFS_FT_DIR)
3536 + log_mode = LOG_INODE_ALL;
3537 + ret = btrfs_log_inode(trans, BTRFS_I(di_inode),
3538 + log_mode, ctx);
3539 +diff --git a/fs/btrfs/xattr.c b/fs/btrfs/xattr.c
3540 +index 99abf41b89b92..85691dc2232fa 100644
3541 +--- a/fs/btrfs/xattr.c
3542 ++++ b/fs/btrfs/xattr.c
3543 +@@ -262,7 +262,8 @@ int btrfs_setxattr_trans(struct inode *inode, const char *name,
3544 + inode_inc_iversion(inode);
3545 + inode->i_ctime = current_time(inode);
3546 + ret = btrfs_update_inode(trans, root, BTRFS_I(inode));
3547 +- BUG_ON(ret);
3548 ++ if (ret)
3549 ++ btrfs_abort_transaction(trans, ret);
3550 + out:
3551 + if (start_trans)
3552 + btrfs_end_transaction(trans);
3553 +@@ -403,10 +404,13 @@ static int btrfs_xattr_handler_set_prop(const struct xattr_handler *handler,
3554 + struct btrfs_root *root = BTRFS_I(inode)->root;
3555 +
3556 + name = xattr_full_name(handler, name);
3557 +- ret = btrfs_validate_prop(name, value, size);
3558 ++ ret = btrfs_validate_prop(BTRFS_I(inode), name, value, size);
3559 + if (ret)
3560 + return ret;
3561 +
3562 ++ if (btrfs_ignore_prop(BTRFS_I(inode), name))
3563 ++ return 0;
3564 ++
3565 + trans = btrfs_start_transaction(root, 2);
3566 + if (IS_ERR(trans))
3567 + return PTR_ERR(trans);
3568 +@@ -416,7 +420,8 @@ static int btrfs_xattr_handler_set_prop(const struct xattr_handler *handler,
3569 + inode_inc_iversion(inode);
3570 + inode->i_ctime = current_time(inode);
3571 + ret = btrfs_update_inode(trans, root, BTRFS_I(inode));
3572 +- BUG_ON(ret);
3573 ++ if (ret)
3574 ++ btrfs_abort_transaction(trans, ret);
3575 + }
3576 +
3577 + btrfs_end_transaction(trans);
3578 +diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
3579 +index c36fa0d0d438b..3d307854c6504 100644
3580 +--- a/fs/nfs/nfs4proc.c
3581 ++++ b/fs/nfs/nfs4proc.c
3582 +@@ -363,6 +363,14 @@ static void nfs4_setup_readdir(u64 cookie, __be32 *verifier, struct dentry *dent
3583 + kunmap_atomic(start);
3584 + }
3585 +
3586 ++static void nfs4_fattr_set_prechange(struct nfs_fattr *fattr, u64 version)
3587 ++{
3588 ++ if (!(fattr->valid & NFS_ATTR_FATTR_PRECHANGE)) {
3589 ++ fattr->pre_change_attr = version;
3590 ++ fattr->valid |= NFS_ATTR_FATTR_PRECHANGE;
3591 ++ }
3592 ++}
3593 ++
3594 + static void nfs4_test_and_free_stateid(struct nfs_server *server,
3595 + nfs4_stateid *stateid,
3596 + const struct cred *cred)
3597 +@@ -6556,7 +6564,9 @@ static void nfs4_delegreturn_release(void *calldata)
3598 + pnfs_roc_release(&data->lr.arg, &data->lr.res,
3599 + data->res.lr_ret);
3600 + if (inode) {
3601 +- nfs_post_op_update_inode_force_wcc(inode, &data->fattr);
3602 ++ nfs4_fattr_set_prechange(&data->fattr,
3603 ++ inode_peek_iversion_raw(inode));
3604 ++ nfs_refresh_inode(inode, &data->fattr);
3605 + nfs_iput_and_deactive(inode);
3606 + }
3607 + kfree(calldata);
3608 +diff --git a/include/linux/stmmac.h b/include/linux/stmmac.h
3609 +index 24eea1b05ca27..29917850f0794 100644
3610 +--- a/include/linux/stmmac.h
3611 ++++ b/include/linux/stmmac.h
3612 +@@ -270,5 +270,6 @@ struct plat_stmmacenet_data {
3613 + int msi_rx_base_vec;
3614 + int msi_tx_base_vec;
3615 + bool use_phy_wol;
3616 ++ bool sph_disable;
3617 + };
3618 + #endif
3619 +diff --git a/kernel/irq/internals.h b/kernel/irq/internals.h
3620 +index 99cbdf55a8bda..f09c60393e559 100644
3621 +--- a/kernel/irq/internals.h
3622 ++++ b/kernel/irq/internals.h
3623 +@@ -29,12 +29,14 @@ extern struct irqaction chained_action;
3624 + * IRQTF_WARNED - warning "IRQ_WAKE_THREAD w/o thread_fn" has been printed
3625 + * IRQTF_AFFINITY - irq thread is requested to adjust affinity
3626 + * IRQTF_FORCED_THREAD - irq action is force threaded
3627 ++ * IRQTF_READY - signals that irq thread is ready
3628 + */
3629 + enum {
3630 + IRQTF_RUNTHREAD,
3631 + IRQTF_WARNED,
3632 + IRQTF_AFFINITY,
3633 + IRQTF_FORCED_THREAD,
3634 ++ IRQTF_READY,
3635 + };
3636 +
3637 + /*
3638 +diff --git a/kernel/irq/irqdesc.c b/kernel/irq/irqdesc.c
3639 +index 2267e6527db3c..a4426a00b9edf 100644
3640 +--- a/kernel/irq/irqdesc.c
3641 ++++ b/kernel/irq/irqdesc.c
3642 +@@ -407,6 +407,7 @@ static struct irq_desc *alloc_desc(int irq, int node, unsigned int flags,
3643 + lockdep_set_class(&desc->lock, &irq_desc_lock_class);
3644 + mutex_init(&desc->request_mutex);
3645 + init_rcu_head(&desc->rcu);
3646 ++ init_waitqueue_head(&desc->wait_for_threads);
3647 +
3648 + desc_set_defaults(irq, desc, node, affinity, owner);
3649 + irqd_set(&desc->irq_data, flags);
3650 +@@ -575,6 +576,7 @@ int __init early_irq_init(void)
3651 + raw_spin_lock_init(&desc[i].lock);
3652 + lockdep_set_class(&desc[i].lock, &irq_desc_lock_class);
3653 + mutex_init(&desc[i].request_mutex);
3654 ++ init_waitqueue_head(&desc[i].wait_for_threads);
3655 + desc_set_defaults(i, &desc[i], node, NULL, NULL);
3656 + }
3657 + return arch_early_irq_init();
3658 +diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
3659 +index f23ffd30385b1..8915fba0697a0 100644
3660 +--- a/kernel/irq/manage.c
3661 ++++ b/kernel/irq/manage.c
3662 +@@ -1248,6 +1248,31 @@ static void irq_wake_secondary(struct irq_desc *desc, struct irqaction *action)
3663 + raw_spin_unlock_irq(&desc->lock);
3664 + }
3665 +
3666 ++/*
3667 ++ * Internal function to notify that a interrupt thread is ready.
3668 ++ */
3669 ++static void irq_thread_set_ready(struct irq_desc *desc,
3670 ++ struct irqaction *action)
3671 ++{
3672 ++ set_bit(IRQTF_READY, &action->thread_flags);
3673 ++ wake_up(&desc->wait_for_threads);
3674 ++}
3675 ++
3676 ++/*
3677 ++ * Internal function to wake up a interrupt thread and wait until it is
3678 ++ * ready.
3679 ++ */
3680 ++static void wake_up_and_wait_for_irq_thread_ready(struct irq_desc *desc,
3681 ++ struct irqaction *action)
3682 ++{
3683 ++ if (!action || !action->thread)
3684 ++ return;
3685 ++
3686 ++ wake_up_process(action->thread);
3687 ++ wait_event(desc->wait_for_threads,
3688 ++ test_bit(IRQTF_READY, &action->thread_flags));
3689 ++}
3690 ++
3691 + /*
3692 + * Interrupt handler thread
3693 + */
3694 +@@ -1259,6 +1284,8 @@ static int irq_thread(void *data)
3695 + irqreturn_t (*handler_fn)(struct irq_desc *desc,
3696 + struct irqaction *action);
3697 +
3698 ++ irq_thread_set_ready(desc, action);
3699 ++
3700 + sched_set_fifo(current);
3701 +
3702 + if (force_irqthreads() && test_bit(IRQTF_FORCED_THREAD,
3703 +@@ -1683,8 +1710,6 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
3704 + }
3705 +
3706 + if (!shared) {
3707 +- init_waitqueue_head(&desc->wait_for_threads);
3708 +-
3709 + /* Setup the type (level, edge polarity) if configured: */
3710 + if (new->flags & IRQF_TRIGGER_MASK) {
3711 + ret = __irq_set_trigger(desc,
3712 +@@ -1780,14 +1805,8 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
3713 +
3714 + irq_setup_timings(desc, new);
3715 +
3716 +- /*
3717 +- * Strictly no need to wake it up, but hung_task complains
3718 +- * when no hard interrupt wakes the thread up.
3719 +- */
3720 +- if (new->thread)
3721 +- wake_up_process(new->thread);
3722 +- if (new->secondary)
3723 +- wake_up_process(new->secondary->thread);
3724 ++ wake_up_and_wait_for_irq_thread_ready(desc, new);
3725 ++ wake_up_and_wait_for_irq_thread_ready(desc, new->secondary);
3726 +
3727 + register_irq_proc(irq, desc);
3728 + new->dir = NULL;
3729 +diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
3730 +index dcdcb85121e40..3b1398fbddaf8 100644
3731 +--- a/kernel/time/timekeeping.c
3732 ++++ b/kernel/time/timekeeping.c
3733 +@@ -482,7 +482,7 @@ static __always_inline u64 __ktime_get_fast_ns(struct tk_fast *tkf)
3734 + * of the following timestamps. Callers need to be aware of that and
3735 + * deal with it.
3736 + */
3737 +-u64 ktime_get_mono_fast_ns(void)
3738 ++u64 notrace ktime_get_mono_fast_ns(void)
3739 + {
3740 + return __ktime_get_fast_ns(&tk_fast_mono);
3741 + }
3742 +@@ -494,7 +494,7 @@ EXPORT_SYMBOL_GPL(ktime_get_mono_fast_ns);
3743 + * Contrary to ktime_get_mono_fast_ns() this is always correct because the
3744 + * conversion factor is not affected by NTP/PTP correction.
3745 + */
3746 +-u64 ktime_get_raw_fast_ns(void)
3747 ++u64 notrace ktime_get_raw_fast_ns(void)
3748 + {
3749 + return __ktime_get_fast_ns(&tk_fast_raw);
3750 + }
3751 +diff --git a/net/can/isotp.c b/net/can/isotp.c
3752 +index 8c753dcefe7fc..26821487a0573 100644
3753 +--- a/net/can/isotp.c
3754 ++++ b/net/can/isotp.c
3755 +@@ -1146,6 +1146,11 @@ static int isotp_bind(struct socket *sock, struct sockaddr *uaddr, int len)
3756 +
3757 + lock_sock(sk);
3758 +
3759 ++ if (so->bound) {
3760 ++ err = -EINVAL;
3761 ++ goto out;
3762 ++ }
3763 ++
3764 + /* do not register frame reception for functional addressing */
3765 + if (so->opt.flags & CAN_ISOTP_SF_BROADCAST)
3766 + do_rx_reg = 0;
3767 +@@ -1156,10 +1161,6 @@ static int isotp_bind(struct socket *sock, struct sockaddr *uaddr, int len)
3768 + goto out;
3769 + }
3770 +
3771 +- if (so->bound && addr->can_ifindex == so->ifindex &&
3772 +- rx_id == so->rxid && tx_id == so->txid)
3773 +- goto out;
3774 +-
3775 + dev = dev_get_by_index(net, addr->can_ifindex);
3776 + if (!dev) {
3777 + err = -ENODEV;
3778 +@@ -1186,19 +1187,6 @@ static int isotp_bind(struct socket *sock, struct sockaddr *uaddr, int len)
3779 +
3780 + dev_put(dev);
3781 +
3782 +- if (so->bound && do_rx_reg) {
3783 +- /* unregister old filter */
3784 +- if (so->ifindex) {
3785 +- dev = dev_get_by_index(net, so->ifindex);
3786 +- if (dev) {
3787 +- can_rx_unregister(net, dev, so->rxid,
3788 +- SINGLE_MASK(so->rxid),
3789 +- isotp_rcv, sk);
3790 +- dev_put(dev);
3791 +- }
3792 +- }
3793 +- }
3794 +-
3795 + /* switch to new settings */
3796 + so->ifindex = ifindex;
3797 + so->rxid = rx_id;
3798 +diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
3799 +index 2ad3c7b42d6d2..1d9e6d5e9a76c 100644
3800 +--- a/net/ipv4/igmp.c
3801 ++++ b/net/ipv4/igmp.c
3802 +@@ -2403,9 +2403,10 @@ int ip_mc_source(int add, int omode, struct sock *sk, struct
3803 + /* decrease mem now to avoid the memleak warning */
3804 + atomic_sub(struct_size(psl, sl_addr, psl->sl_max),
3805 + &sk->sk_omem_alloc);
3806 +- kfree_rcu(psl, rcu);
3807 + }
3808 + rcu_assign_pointer(pmc->sflist, newpsl);
3809 ++ if (psl)
3810 ++ kfree_rcu(psl, rcu);
3811 + psl = newpsl;
3812 + }
3813 + rv = 1; /* > 0 for insert logic below if sl_count is 0 */
3814 +@@ -2507,11 +2508,13 @@ int ip_mc_msfilter(struct sock *sk, struct ip_msfilter *msf, int ifindex)
3815 + /* decrease mem now to avoid the memleak warning */
3816 + atomic_sub(struct_size(psl, sl_addr, psl->sl_max),
3817 + &sk->sk_omem_alloc);
3818 +- kfree_rcu(psl, rcu);
3819 +- } else
3820 ++ } else {
3821 + (void) ip_mc_del_src(in_dev, &msf->imsf_multiaddr, pmc->sfmode,
3822 + 0, NULL, 0);
3823 ++ }
3824 + rcu_assign_pointer(pmc->sflist, newpsl);
3825 ++ if (psl)
3826 ++ kfree_rcu(psl, rcu);
3827 + pmc->sfmode = msf->imsf_fmode;
3828 + err = 0;
3829 + done:
3830 +diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c
3831 +index 909f937befd71..7f695c39d9a8c 100644
3832 +--- a/net/ipv6/mcast.c
3833 ++++ b/net/ipv6/mcast.c
3834 +@@ -460,10 +460,10 @@ int ip6_mc_source(int add, int omode, struct sock *sk,
3835 + newpsl->sl_addr[i] = psl->sl_addr[i];
3836 + atomic_sub(struct_size(psl, sl_addr, psl->sl_max),
3837 + &sk->sk_omem_alloc);
3838 +- kfree_rcu(psl, rcu);
3839 + }
3840 ++ rcu_assign_pointer(pmc->sflist, newpsl);
3841 ++ kfree_rcu(psl, rcu);
3842 + psl = newpsl;
3843 +- rcu_assign_pointer(pmc->sflist, psl);
3844 + }
3845 + rv = 1; /* > 0 for insert logic below if sl_count is 0 */
3846 + for (i = 0; i < psl->sl_count; i++) {
3847 +@@ -565,12 +565,12 @@ int ip6_mc_msfilter(struct sock *sk, struct group_filter *gsf,
3848 + psl->sl_count, psl->sl_addr, 0);
3849 + atomic_sub(struct_size(psl, sl_addr, psl->sl_max),
3850 + &sk->sk_omem_alloc);
3851 +- kfree_rcu(psl, rcu);
3852 + } else {
3853 + ip6_mc_del_src(idev, group, pmc->sfmode, 0, NULL, 0);
3854 + }
3855 +- mutex_unlock(&idev->mc_lock);
3856 + rcu_assign_pointer(pmc->sflist, newpsl);
3857 ++ mutex_unlock(&idev->mc_lock);
3858 ++ kfree_rcu(psl, rcu);
3859 + pmc->sfmode = gsf->gf_fmode;
3860 + err = 0;
3861 + done:
3862 +diff --git a/net/nfc/core.c b/net/nfc/core.c
3863 +index dc7a2404efdf9..5b286e1e0a6ff 100644
3864 +--- a/net/nfc/core.c
3865 ++++ b/net/nfc/core.c
3866 +@@ -38,7 +38,7 @@ int nfc_fw_download(struct nfc_dev *dev, const char *firmware_name)
3867 +
3868 + device_lock(&dev->dev);
3869 +
3870 +- if (!device_is_registered(&dev->dev)) {
3871 ++ if (dev->shutting_down) {
3872 + rc = -ENODEV;
3873 + goto error;
3874 + }
3875 +@@ -94,7 +94,7 @@ int nfc_dev_up(struct nfc_dev *dev)
3876 +
3877 + device_lock(&dev->dev);
3878 +
3879 +- if (!device_is_registered(&dev->dev)) {
3880 ++ if (dev->shutting_down) {
3881 + rc = -ENODEV;
3882 + goto error;
3883 + }
3884 +@@ -142,7 +142,7 @@ int nfc_dev_down(struct nfc_dev *dev)
3885 +
3886 + device_lock(&dev->dev);
3887 +
3888 +- if (!device_is_registered(&dev->dev)) {
3889 ++ if (dev->shutting_down) {
3890 + rc = -ENODEV;
3891 + goto error;
3892 + }
3893 +@@ -207,7 +207,7 @@ int nfc_start_poll(struct nfc_dev *dev, u32 im_protocols, u32 tm_protocols)
3894 +
3895 + device_lock(&dev->dev);
3896 +
3897 +- if (!device_is_registered(&dev->dev)) {
3898 ++ if (dev->shutting_down) {
3899 + rc = -ENODEV;
3900 + goto error;
3901 + }
3902 +@@ -246,7 +246,7 @@ int nfc_stop_poll(struct nfc_dev *dev)
3903 +
3904 + device_lock(&dev->dev);
3905 +
3906 +- if (!device_is_registered(&dev->dev)) {
3907 ++ if (dev->shutting_down) {
3908 + rc = -ENODEV;
3909 + goto error;
3910 + }
3911 +@@ -291,7 +291,7 @@ int nfc_dep_link_up(struct nfc_dev *dev, int target_index, u8 comm_mode)
3912 +
3913 + device_lock(&dev->dev);
3914 +
3915 +- if (!device_is_registered(&dev->dev)) {
3916 ++ if (dev->shutting_down) {
3917 + rc = -ENODEV;
3918 + goto error;
3919 + }
3920 +@@ -335,7 +335,7 @@ int nfc_dep_link_down(struct nfc_dev *dev)
3921 +
3922 + device_lock(&dev->dev);
3923 +
3924 +- if (!device_is_registered(&dev->dev)) {
3925 ++ if (dev->shutting_down) {
3926 + rc = -ENODEV;
3927 + goto error;
3928 + }
3929 +@@ -401,7 +401,7 @@ int nfc_activate_target(struct nfc_dev *dev, u32 target_idx, u32 protocol)
3930 +
3931 + device_lock(&dev->dev);
3932 +
3933 +- if (!device_is_registered(&dev->dev)) {
3934 ++ if (dev->shutting_down) {
3935 + rc = -ENODEV;
3936 + goto error;
3937 + }
3938 +@@ -448,7 +448,7 @@ int nfc_deactivate_target(struct nfc_dev *dev, u32 target_idx, u8 mode)
3939 +
3940 + device_lock(&dev->dev);
3941 +
3942 +- if (!device_is_registered(&dev->dev)) {
3943 ++ if (dev->shutting_down) {
3944 + rc = -ENODEV;
3945 + goto error;
3946 + }
3947 +@@ -495,7 +495,7 @@ int nfc_data_exchange(struct nfc_dev *dev, u32 target_idx, struct sk_buff *skb,
3948 +
3949 + device_lock(&dev->dev);
3950 +
3951 +- if (!device_is_registered(&dev->dev)) {
3952 ++ if (dev->shutting_down) {
3953 + rc = -ENODEV;
3954 + kfree_skb(skb);
3955 + goto error;
3956 +@@ -552,7 +552,7 @@ int nfc_enable_se(struct nfc_dev *dev, u32 se_idx)
3957 +
3958 + device_lock(&dev->dev);
3959 +
3960 +- if (!device_is_registered(&dev->dev)) {
3961 ++ if (dev->shutting_down) {
3962 + rc = -ENODEV;
3963 + goto error;
3964 + }
3965 +@@ -601,7 +601,7 @@ int nfc_disable_se(struct nfc_dev *dev, u32 se_idx)
3966 +
3967 + device_lock(&dev->dev);
3968 +
3969 +- if (!device_is_registered(&dev->dev)) {
3970 ++ if (dev->shutting_down) {
3971 + rc = -ENODEV;
3972 + goto error;
3973 + }
3974 +@@ -1134,6 +1134,7 @@ int nfc_register_device(struct nfc_dev *dev)
3975 + dev->rfkill = NULL;
3976 + }
3977 + }
3978 ++ dev->shutting_down = false;
3979 + device_unlock(&dev->dev);
3980 +
3981 + rc = nfc_genl_device_added(dev);
3982 +@@ -1166,12 +1167,10 @@ void nfc_unregister_device(struct nfc_dev *dev)
3983 + rfkill_unregister(dev->rfkill);
3984 + rfkill_destroy(dev->rfkill);
3985 + }
3986 ++ dev->shutting_down = true;
3987 + device_unlock(&dev->dev);
3988 +
3989 + if (dev->ops->check_presence) {
3990 +- device_lock(&dev->dev);
3991 +- dev->shutting_down = true;
3992 +- device_unlock(&dev->dev);
3993 + del_timer_sync(&dev->check_pres_timer);
3994 + cancel_work_sync(&dev->check_pres_work);
3995 + }
3996 +diff --git a/net/nfc/netlink.c b/net/nfc/netlink.c
3997 +index f184b0db79d40..7c62417ccfd78 100644
3998 +--- a/net/nfc/netlink.c
3999 ++++ b/net/nfc/netlink.c
4000 +@@ -1244,7 +1244,7 @@ int nfc_genl_fw_download_done(struct nfc_dev *dev, const char *firmware_name,
4001 + struct sk_buff *msg;
4002 + void *hdr;
4003 +
4004 +- msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
4005 ++ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
4006 + if (!msg)
4007 + return -ENOMEM;
4008 +
4009 +@@ -1260,7 +1260,7 @@ int nfc_genl_fw_download_done(struct nfc_dev *dev, const char *firmware_name,
4010 +
4011 + genlmsg_end(msg, hdr);
4012 +
4013 +- genlmsg_multicast(&nfc_genl_family, msg, 0, 0, GFP_KERNEL);
4014 ++ genlmsg_multicast(&nfc_genl_family, msg, 0, 0, GFP_ATOMIC);
4015 +
4016 + return 0;
4017 +
4018 +diff --git a/net/rds/tcp.c b/net/rds/tcp.c
4019 +index 5327d130c4b56..2f638f8b7b1e7 100644
4020 +--- a/net/rds/tcp.c
4021 ++++ b/net/rds/tcp.c
4022 +@@ -495,6 +495,14 @@ void rds_tcp_tune(struct socket *sock)
4023 +
4024 + tcp_sock_set_nodelay(sock->sk);
4025 + lock_sock(sk);
4026 ++ /* TCP timer functions might access net namespace even after
4027 ++ * a process which created this net namespace terminated.
4028 ++ */
4029 ++ if (!sk->sk_net_refcnt) {
4030 ++ sk->sk_net_refcnt = 1;
4031 ++ get_net_track(net, &sk->ns_tracker, GFP_KERNEL);
4032 ++ sock_inuse_add(net, 1);
4033 ++ }
4034 + if (rtn->sndbuf_size > 0) {
4035 + sk->sk_sndbuf = rtn->sndbuf_size;
4036 + sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
4037 +diff --git a/net/rxrpc/local_object.c b/net/rxrpc/local_object.c
4038 +index a4111408ffd0c..6a1611b0e3037 100644
4039 +--- a/net/rxrpc/local_object.c
4040 ++++ b/net/rxrpc/local_object.c
4041 +@@ -117,6 +117,7 @@ static int rxrpc_open_socket(struct rxrpc_local *local, struct net *net)
4042 + local, srx->transport_type, srx->transport.family);
4043 +
4044 + udp_conf.family = srx->transport.family;
4045 ++ udp_conf.use_udp_checksums = true;
4046 + if (udp_conf.family == AF_INET) {
4047 + udp_conf.local_ip = srx->transport.sin.sin_addr;
4048 + udp_conf.local_udp_port = srx->transport.sin.sin_port;
4049 +@@ -124,6 +125,8 @@ static int rxrpc_open_socket(struct rxrpc_local *local, struct net *net)
4050 + } else {
4051 + udp_conf.local_ip6 = srx->transport.sin6.sin6_addr;
4052 + udp_conf.local_udp_port = srx->transport.sin6.sin6_port;
4053 ++ udp_conf.use_udp6_tx_checksums = true;
4054 ++ udp_conf.use_udp6_rx_checksums = true;
4055 + #endif
4056 + }
4057 + ret = udp_sock_create(net, &udp_conf, &local->socket);
4058 +diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
4059 +index 0222ad4523a9d..258ebc194ee2b 100644
4060 +--- a/net/sunrpc/clnt.c
4061 ++++ b/net/sunrpc/clnt.c
4062 +@@ -1065,10 +1065,13 @@ rpc_task_get_next_xprt(struct rpc_clnt *clnt)
4063 + static
4064 + void rpc_task_set_transport(struct rpc_task *task, struct rpc_clnt *clnt)
4065 + {
4066 +- if (task->tk_xprt &&
4067 +- !(test_bit(XPRT_OFFLINE, &task->tk_xprt->state) &&
4068 +- (task->tk_flags & RPC_TASK_MOVEABLE)))
4069 +- return;
4070 ++ if (task->tk_xprt) {
4071 ++ if (!(test_bit(XPRT_OFFLINE, &task->tk_xprt->state) &&
4072 ++ (task->tk_flags & RPC_TASK_MOVEABLE)))
4073 ++ return;
4074 ++ xprt_release(task);
4075 ++ xprt_put(task->tk_xprt);
4076 ++ }
4077 + if (task->tk_flags & RPC_TASK_NO_ROUND_ROBIN)
4078 + task->tk_xprt = rpc_task_get_first_xprt(clnt);
4079 + else
4080 +diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
4081 +index 7aef2876beb38..eec9569af4c51 100644
4082 +--- a/net/sunrpc/xprtsock.c
4083 ++++ b/net/sunrpc/xprtsock.c
4084 +@@ -1967,6 +1967,9 @@ static void xs_local_connect(struct rpc_xprt *xprt, struct rpc_task *task)
4085 + struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
4086 + int ret;
4087 +
4088 ++ if (transport->file)
4089 ++ goto force_disconnect;
4090 ++
4091 + if (RPC_IS_ASYNC(task)) {
4092 + /*
4093 + * We want the AF_LOCAL connect to be resolved in the
4094 +@@ -1979,11 +1982,17 @@ static void xs_local_connect(struct rpc_xprt *xprt, struct rpc_task *task)
4095 + */
4096 + task->tk_rpc_status = -ENOTCONN;
4097 + rpc_exit(task, -ENOTCONN);
4098 +- return;
4099 ++ goto out_wake;
4100 + }
4101 + ret = xs_local_setup_socket(transport);
4102 + if (ret && !RPC_IS_SOFTCONN(task))
4103 + msleep_interruptible(15000);
4104 ++ return;
4105 ++force_disconnect:
4106 ++ xprt_force_disconnect(xprt);
4107 ++out_wake:
4108 ++ xprt_clear_connecting(xprt);
4109 ++ xprt_wake_pending_tasks(xprt, -ENOTCONN);
4110 + }
4111 +
4112 + #if IS_ENABLED(CONFIG_SUNRPC_SWAP)
4113 +@@ -2867,9 +2876,6 @@ static struct rpc_xprt *xs_setup_local(struct xprt_create *args)
4114 + }
4115 + xprt_set_bound(xprt);
4116 + xs_format_peer_addresses(xprt, "local", RPCBIND_NETID_LOCAL);
4117 +- ret = ERR_PTR(xs_local_setup_socket(transport));
4118 +- if (ret)
4119 +- goto out_err;
4120 + break;
4121 + default:
4122 + ret = ERR_PTR(-EAFNOSUPPORT);
4123 +diff --git a/sound/firewire/fireworks/fireworks_hwdep.c b/sound/firewire/fireworks/fireworks_hwdep.c
4124 +index 626c0c34b0b66..3a53914277d35 100644
4125 +--- a/sound/firewire/fireworks/fireworks_hwdep.c
4126 ++++ b/sound/firewire/fireworks/fireworks_hwdep.c
4127 +@@ -34,6 +34,7 @@ hwdep_read_resp_buf(struct snd_efw *efw, char __user *buf, long remained,
4128 + type = SNDRV_FIREWIRE_EVENT_EFW_RESPONSE;
4129 + if (copy_to_user(buf, &type, sizeof(type)))
4130 + return -EFAULT;
4131 ++ count += sizeof(type);
4132 + remained -= sizeof(type);
4133 + buf += sizeof(type);
4134 +
4135 +diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
4136 +index c66d31d8a498c..51c54cf0f3127 100644
4137 +--- a/sound/pci/hda/patch_realtek.c
4138 ++++ b/sound/pci/hda/patch_realtek.c
4139 +@@ -8759,6 +8759,8 @@ static const struct hda_fixup alc269_fixups[] = {
4140 + [ALC287_FIXUP_CS35L41_I2C_2] = {
4141 + .type = HDA_FIXUP_FUNC,
4142 + .v.func = cs35l41_fixup_i2c_two,
4143 ++ .chained = true,
4144 ++ .chain_id = ALC269_FIXUP_THINKPAD_ACPI,
4145 + },
4146 + [ALC285_FIXUP_HP_SPEAKERS_MICMUTE_LED] = {
4147 + .type = HDA_FIXUP_VERBS,
4148 +@@ -9191,6 +9193,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
4149 + SND_PCI_QUIRK(0x17aa, 0x3813, "Legion 7i 15IMHG05", ALC287_FIXUP_LEGION_15IMHG05_SPEAKERS),
4150 + SND_PCI_QUIRK(0x17aa, 0x3818, "Lenovo C940", ALC298_FIXUP_LENOVO_SPK_VOLUME),
4151 + SND_PCI_QUIRK(0x17aa, 0x3819, "Lenovo 13s Gen2 ITL", ALC287_FIXUP_13S_GEN2_SPEAKERS),
4152 ++ SND_PCI_QUIRK(0x17aa, 0x3820, "Yoga Duet 7 13ITL6", ALC287_FIXUP_YOGA7_14ITL_SPEAKERS),
4153 + SND_PCI_QUIRK(0x17aa, 0x3824, "Legion Y9000X 2020", ALC285_FIXUP_LEGION_Y9000X_SPEAKERS),
4154 + SND_PCI_QUIRK(0x17aa, 0x3827, "Ideapad S740", ALC285_FIXUP_IDEAPAD_S740_COEF),
4155 + SND_PCI_QUIRK(0x17aa, 0x3834, "Lenovo IdeaPad Slim 9i 14ITL5", ALC287_FIXUP_YOGA7_14ITL_SPEAKERS),
4156 +diff --git a/sound/soc/codecs/da7219.c b/sound/soc/codecs/da7219.c
4157 +index 13009d08b09ac..c7493549a9a50 100644
4158 +--- a/sound/soc/codecs/da7219.c
4159 ++++ b/sound/soc/codecs/da7219.c
4160 +@@ -446,7 +446,7 @@ static int da7219_tonegen_freq_put(struct snd_kcontrol *kcontrol,
4161 + struct soc_mixer_control *mixer_ctrl =
4162 + (struct soc_mixer_control *) kcontrol->private_value;
4163 + unsigned int reg = mixer_ctrl->reg;
4164 +- __le16 val;
4165 ++ __le16 val_new, val_old;
4166 + int ret;
4167 +
4168 + /*
4169 +@@ -454,13 +454,19 @@ static int da7219_tonegen_freq_put(struct snd_kcontrol *kcontrol,
4170 + * Therefore we need to convert to little endian here to align with
4171 + * HW registers.
4172 + */
4173 +- val = cpu_to_le16(ucontrol->value.integer.value[0]);
4174 ++ val_new = cpu_to_le16(ucontrol->value.integer.value[0]);
4175 +
4176 + mutex_lock(&da7219->ctrl_lock);
4177 +- ret = regmap_raw_write(da7219->regmap, reg, &val, sizeof(val));
4178 ++ ret = regmap_raw_read(da7219->regmap, reg, &val_old, sizeof(val_old));
4179 ++ if (ret == 0 && (val_old != val_new))
4180 ++ ret = regmap_raw_write(da7219->regmap, reg,
4181 ++ &val_new, sizeof(val_new));
4182 + mutex_unlock(&da7219->ctrl_lock);
4183 +
4184 +- return ret;
4185 ++ if (ret < 0)
4186 ++ return ret;
4187 ++
4188 ++ return val_old != val_new;
4189 + }
4190 +
4191 +
4192 +diff --git a/sound/soc/codecs/rt9120.c b/sound/soc/codecs/rt9120.c
4193 +index 7aa1772a915f3..6e0d7cf0c8c92 100644
4194 +--- a/sound/soc/codecs/rt9120.c
4195 ++++ b/sound/soc/codecs/rt9120.c
4196 +@@ -341,7 +341,6 @@ static int rt9120_get_reg_size(unsigned int reg)
4197 + {
4198 + switch (reg) {
4199 + case 0x00:
4200 +- case 0x09:
4201 + case 0x20 ... 0x27:
4202 + return 2;
4203 + case 0x30 ... 0x3D:
4204 +diff --git a/sound/soc/codecs/wm8958-dsp2.c b/sound/soc/codecs/wm8958-dsp2.c
4205 +index e4018ba3b19a2..7878c7a58ff10 100644
4206 +--- a/sound/soc/codecs/wm8958-dsp2.c
4207 ++++ b/sound/soc/codecs/wm8958-dsp2.c
4208 +@@ -530,7 +530,7 @@ static int wm8958_mbc_put(struct snd_kcontrol *kcontrol,
4209 +
4210 + wm8958_dsp_apply(component, mbc, wm8994->mbc_ena[mbc]);
4211 +
4212 +- return 0;
4213 ++ return 1;
4214 + }
4215 +
4216 + #define WM8958_MBC_SWITCH(xname, xval) {\
4217 +@@ -656,7 +656,7 @@ static int wm8958_vss_put(struct snd_kcontrol *kcontrol,
4218 +
4219 + wm8958_dsp_apply(component, vss, wm8994->vss_ena[vss]);
4220 +
4221 +- return 0;
4222 ++ return 1;
4223 + }
4224 +
4225 +
4226 +@@ -730,7 +730,7 @@ static int wm8958_hpf_put(struct snd_kcontrol *kcontrol,
4227 +
4228 + wm8958_dsp_apply(component, hpf % 3, ucontrol->value.integer.value[0]);
4229 +
4230 +- return 0;
4231 ++ return 1;
4232 + }
4233 +
4234 + #define WM8958_HPF_SWITCH(xname, xval) {\
4235 +@@ -824,7 +824,7 @@ static int wm8958_enh_eq_put(struct snd_kcontrol *kcontrol,
4236 +
4237 + wm8958_dsp_apply(component, eq, ucontrol->value.integer.value[0]);
4238 +
4239 +- return 0;
4240 ++ return 1;
4241 + }
4242 +
4243 + #define WM8958_ENH_EQ_SWITCH(xname, xval) {\
4244 +diff --git a/sound/soc/meson/aiu-acodec-ctrl.c b/sound/soc/meson/aiu-acodec-ctrl.c
4245 +index 27a6d3259c50a..442c215936d97 100644
4246 +--- a/sound/soc/meson/aiu-acodec-ctrl.c
4247 ++++ b/sound/soc/meson/aiu-acodec-ctrl.c
4248 +@@ -58,7 +58,7 @@ static int aiu_acodec_ctrl_mux_put_enum(struct snd_kcontrol *kcontrol,
4249 +
4250 + snd_soc_dapm_mux_update_power(dapm, kcontrol, mux, e, NULL);
4251 +
4252 +- return 0;
4253 ++ return 1;
4254 + }
4255 +
4256 + static SOC_ENUM_SINGLE_DECL(aiu_acodec_ctrl_mux_enum, AIU_ACODEC_CTRL,
4257 +diff --git a/sound/soc/meson/aiu-codec-ctrl.c b/sound/soc/meson/aiu-codec-ctrl.c
4258 +index c3ea733fce91f..c966fc60dc733 100644
4259 +--- a/sound/soc/meson/aiu-codec-ctrl.c
4260 ++++ b/sound/soc/meson/aiu-codec-ctrl.c
4261 +@@ -57,7 +57,7 @@ static int aiu_codec_ctrl_mux_put_enum(struct snd_kcontrol *kcontrol,
4262 +
4263 + snd_soc_dapm_mux_update_power(dapm, kcontrol, mux, e, NULL);
4264 +
4265 +- return 0;
4266 ++ return 1;
4267 + }
4268 +
4269 + static SOC_ENUM_SINGLE_DECL(aiu_hdmi_ctrl_mux_enum, AIU_HDMI_CLK_DATA_CTRL,
4270 +diff --git a/sound/soc/meson/axg-card.c b/sound/soc/meson/axg-card.c
4271 +index cbbaa55d92a66..2b77010c2c5ce 100644
4272 +--- a/sound/soc/meson/axg-card.c
4273 ++++ b/sound/soc/meson/axg-card.c
4274 +@@ -320,7 +320,6 @@ static int axg_card_add_link(struct snd_soc_card *card, struct device_node *np,
4275 +
4276 + dai_link->cpus = cpu;
4277 + dai_link->num_cpus = 1;
4278 +- dai_link->nonatomic = true;
4279 +
4280 + ret = meson_card_parse_dai(card, np, &dai_link->cpus->of_node,
4281 + &dai_link->cpus->dai_name);
4282 +diff --git a/sound/soc/meson/axg-tdm-interface.c b/sound/soc/meson/axg-tdm-interface.c
4283 +index 0c31934a96301..e076ced300257 100644
4284 +--- a/sound/soc/meson/axg-tdm-interface.c
4285 ++++ b/sound/soc/meson/axg-tdm-interface.c
4286 +@@ -351,29 +351,13 @@ static int axg_tdm_iface_hw_free(struct snd_pcm_substream *substream,
4287 + return 0;
4288 + }
4289 +
4290 +-static int axg_tdm_iface_trigger(struct snd_pcm_substream *substream,
4291 +- int cmd,
4292 ++static int axg_tdm_iface_prepare(struct snd_pcm_substream *substream,
4293 + struct snd_soc_dai *dai)
4294 + {
4295 +- struct axg_tdm_stream *ts =
4296 +- snd_soc_dai_get_dma_data(dai, substream);
4297 +-
4298 +- switch (cmd) {
4299 +- case SNDRV_PCM_TRIGGER_START:
4300 +- case SNDRV_PCM_TRIGGER_RESUME:
4301 +- case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
4302 +- axg_tdm_stream_start(ts);
4303 +- break;
4304 +- case SNDRV_PCM_TRIGGER_SUSPEND:
4305 +- case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
4306 +- case SNDRV_PCM_TRIGGER_STOP:
4307 +- axg_tdm_stream_stop(ts);
4308 +- break;
4309 +- default:
4310 +- return -EINVAL;
4311 +- }
4312 ++ struct axg_tdm_stream *ts = snd_soc_dai_get_dma_data(dai, substream);
4313 +
4314 +- return 0;
4315 ++ /* Force all attached formatters to update */
4316 ++ return axg_tdm_stream_reset(ts);
4317 + }
4318 +
4319 + static int axg_tdm_iface_remove_dai(struct snd_soc_dai *dai)
4320 +@@ -413,8 +397,8 @@ static const struct snd_soc_dai_ops axg_tdm_iface_ops = {
4321 + .set_fmt = axg_tdm_iface_set_fmt,
4322 + .startup = axg_tdm_iface_startup,
4323 + .hw_params = axg_tdm_iface_hw_params,
4324 ++ .prepare = axg_tdm_iface_prepare,
4325 + .hw_free = axg_tdm_iface_hw_free,
4326 +- .trigger = axg_tdm_iface_trigger,
4327 + };
4328 +
4329 + /* TDM Backend DAIs */
4330 +diff --git a/sound/soc/meson/g12a-tohdmitx.c b/sound/soc/meson/g12a-tohdmitx.c
4331 +index 9b2b59536ced0..6c99052feafd8 100644
4332 +--- a/sound/soc/meson/g12a-tohdmitx.c
4333 ++++ b/sound/soc/meson/g12a-tohdmitx.c
4334 +@@ -67,7 +67,7 @@ static int g12a_tohdmitx_i2s_mux_put_enum(struct snd_kcontrol *kcontrol,
4335 +
4336 + snd_soc_dapm_mux_update_power(dapm, kcontrol, mux, e, NULL);
4337 +
4338 +- return 0;
4339 ++ return 1;
4340 + }
4341 +
4342 + static SOC_ENUM_SINGLE_DECL(g12a_tohdmitx_i2s_mux_enum, TOHDMITX_CTRL0,
4343 +diff --git a/sound/soc/soc-generic-dmaengine-pcm.c b/sound/soc/soc-generic-dmaengine-pcm.c
4344 +index 359987bf76d1b..c54c8ca8d7156 100644
4345 +--- a/sound/soc/soc-generic-dmaengine-pcm.c
4346 ++++ b/sound/soc/soc-generic-dmaengine-pcm.c
4347 +@@ -86,10 +86,10 @@ static int dmaengine_pcm_hw_params(struct snd_soc_component *component,
4348 +
4349 + memset(&slave_config, 0, sizeof(slave_config));
4350 +
4351 +- if (pcm->config && pcm->config->prepare_slave_config)
4352 +- prepare_slave_config = pcm->config->prepare_slave_config;
4353 +- else
4354 ++ if (!pcm->config)
4355 + prepare_slave_config = snd_dmaengine_pcm_prepare_slave_config;
4356 ++ else
4357 ++ prepare_slave_config = pcm->config->prepare_slave_config;
4358 +
4359 + if (prepare_slave_config) {
4360 + int ret = prepare_slave_config(substream, params, &slave_config);
4361 +diff --git a/sound/soc/soc-ops.c b/sound/soc/soc-ops.c
4362 +index a0ca58ba16273..58347eadd219b 100644
4363 +--- a/sound/soc/soc-ops.c
4364 ++++ b/sound/soc/soc-ops.c
4365 +@@ -461,7 +461,7 @@ int snd_soc_put_volsw_sx(struct snd_kcontrol *kcontrol,
4366 + ret = err;
4367 + }
4368 + }
4369 +- return err;
4370 ++ return ret;
4371 + }
4372 + EXPORT_SYMBOL_GPL(snd_soc_put_volsw_sx);
4373 +
4374 +diff --git a/tools/testing/selftests/drivers/net/ocelot/tc_flower_chains.sh b/tools/testing/selftests/drivers/net/ocelot/tc_flower_chains.sh
4375 +index eaf8a04a7ca5f..10e54bcca7a93 100755
4376 +--- a/tools/testing/selftests/drivers/net/ocelot/tc_flower_chains.sh
4377 ++++ b/tools/testing/selftests/drivers/net/ocelot/tc_flower_chains.sh
4378 +@@ -190,7 +190,7 @@ setup_prepare()
4379 +
4380 + tc filter add dev $eth0 ingress chain $(IS2 0 0) pref 1 \
4381 + protocol ipv4 flower skip_sw ip_proto udp dst_port 5201 \
4382 +- action police rate 50mbit burst 64k \
4383 ++ action police rate 50mbit burst 64k conform-exceed drop/pipe \
4384 + action goto chain $(IS2 1 0)
4385 + }
4386 +
4387 +diff --git a/tools/testing/selftests/kvm/include/x86_64/processor.h b/tools/testing/selftests/kvm/include/x86_64/processor.h
4388 +index 8a470da7b71af..15a2875698b53 100644
4389 +--- a/tools/testing/selftests/kvm/include/x86_64/processor.h
4390 ++++ b/tools/testing/selftests/kvm/include/x86_64/processor.h
4391 +@@ -60,6 +60,21 @@
4392 + /* CPUID.0x8000_0001.EDX */
4393 + #define CPUID_GBPAGES (1ul << 26)
4394 +
4395 ++/* Page table bitfield declarations */
4396 ++#define PTE_PRESENT_MASK BIT_ULL(0)
4397 ++#define PTE_WRITABLE_MASK BIT_ULL(1)
4398 ++#define PTE_USER_MASK BIT_ULL(2)
4399 ++#define PTE_ACCESSED_MASK BIT_ULL(5)
4400 ++#define PTE_DIRTY_MASK BIT_ULL(6)
4401 ++#define PTE_LARGE_MASK BIT_ULL(7)
4402 ++#define PTE_GLOBAL_MASK BIT_ULL(8)
4403 ++#define PTE_NX_MASK BIT_ULL(63)
4404 ++
4405 ++#define PAGE_SHIFT 12
4406 ++
4407 ++#define PHYSICAL_PAGE_MASK GENMASK_ULL(51, 12)
4408 ++#define PTE_GET_PFN(pte) (((pte) & PHYSICAL_PAGE_MASK) >> PAGE_SHIFT)
4409 ++
4410 + /* General Registers in 64-Bit Mode */
4411 + struct gpr64_regs {
4412 + u64 rax;
4413 +diff --git a/tools/testing/selftests/kvm/kvm_page_table_test.c b/tools/testing/selftests/kvm/kvm_page_table_test.c
4414 +index ba1fdc3dcf4a9..2c4a7563a4f8a 100644
4415 +--- a/tools/testing/selftests/kvm/kvm_page_table_test.c
4416 ++++ b/tools/testing/selftests/kvm/kvm_page_table_test.c
4417 +@@ -278,7 +278,7 @@ static struct kvm_vm *pre_init_before_test(enum vm_guest_mode mode, void *arg)
4418 + else
4419 + guest_test_phys_mem = p->phys_offset;
4420 + #ifdef __s390x__
4421 +- alignment = max(0x100000, alignment);
4422 ++ alignment = max(0x100000UL, alignment);
4423 + #endif
4424 + guest_test_phys_mem = align_down(guest_test_phys_mem, alignment);
4425 +
4426 +diff --git a/tools/testing/selftests/kvm/lib/x86_64/processor.c b/tools/testing/selftests/kvm/lib/x86_64/processor.c
4427 +index 9f000dfb55949..0dd442c260159 100644
4428 +--- a/tools/testing/selftests/kvm/lib/x86_64/processor.c
4429 ++++ b/tools/testing/selftests/kvm/lib/x86_64/processor.c
4430 +@@ -19,38 +19,6 @@
4431 +
4432 + vm_vaddr_t exception_handlers;
4433 +
4434 +-/* Virtual translation table structure declarations */
4435 +-struct pageUpperEntry {
4436 +- uint64_t present:1;
4437 +- uint64_t writable:1;
4438 +- uint64_t user:1;
4439 +- uint64_t write_through:1;
4440 +- uint64_t cache_disable:1;
4441 +- uint64_t accessed:1;
4442 +- uint64_t ignored_06:1;
4443 +- uint64_t page_size:1;
4444 +- uint64_t ignored_11_08:4;
4445 +- uint64_t pfn:40;
4446 +- uint64_t ignored_62_52:11;
4447 +- uint64_t execute_disable:1;
4448 +-};
4449 +-
4450 +-struct pageTableEntry {
4451 +- uint64_t present:1;
4452 +- uint64_t writable:1;
4453 +- uint64_t user:1;
4454 +- uint64_t write_through:1;
4455 +- uint64_t cache_disable:1;
4456 +- uint64_t accessed:1;
4457 +- uint64_t dirty:1;
4458 +- uint64_t reserved_07:1;
4459 +- uint64_t global:1;
4460 +- uint64_t ignored_11_09:3;
4461 +- uint64_t pfn:40;
4462 +- uint64_t ignored_62_52:11;
4463 +- uint64_t execute_disable:1;
4464 +-};
4465 +-
4466 + void regs_dump(FILE *stream, struct kvm_regs *regs,
4467 + uint8_t indent)
4468 + {
4469 +@@ -195,23 +163,21 @@ static void *virt_get_pte(struct kvm_vm *vm, uint64_t pt_pfn, uint64_t vaddr,
4470 + return &page_table[index];
4471 + }
4472 +
4473 +-static struct pageUpperEntry *virt_create_upper_pte(struct kvm_vm *vm,
4474 +- uint64_t pt_pfn,
4475 +- uint64_t vaddr,
4476 +- uint64_t paddr,
4477 +- int level,
4478 +- enum x86_page_size page_size)
4479 ++static uint64_t *virt_create_upper_pte(struct kvm_vm *vm,
4480 ++ uint64_t pt_pfn,
4481 ++ uint64_t vaddr,
4482 ++ uint64_t paddr,
4483 ++ int level,
4484 ++ enum x86_page_size page_size)
4485 + {
4486 +- struct pageUpperEntry *pte = virt_get_pte(vm, pt_pfn, vaddr, level);
4487 +-
4488 +- if (!pte->present) {
4489 +- pte->writable = true;
4490 +- pte->present = true;
4491 +- pte->page_size = (level == page_size);
4492 +- if (pte->page_size)
4493 +- pte->pfn = paddr >> vm->page_shift;
4494 ++ uint64_t *pte = virt_get_pte(vm, pt_pfn, vaddr, level);
4495 ++
4496 ++ if (!(*pte & PTE_PRESENT_MASK)) {
4497 ++ *pte = PTE_PRESENT_MASK | PTE_WRITABLE_MASK;
4498 ++ if (level == page_size)
4499 ++ *pte |= PTE_LARGE_MASK | (paddr & PHYSICAL_PAGE_MASK);
4500 + else
4501 +- pte->pfn = vm_alloc_page_table(vm) >> vm->page_shift;
4502 ++ *pte |= vm_alloc_page_table(vm) & PHYSICAL_PAGE_MASK;
4503 + } else {
4504 + /*
4505 + * Entry already present. Assert that the caller doesn't want
4506 +@@ -221,7 +187,7 @@ static struct pageUpperEntry *virt_create_upper_pte(struct kvm_vm *vm,
4507 + TEST_ASSERT(level != page_size,
4508 + "Cannot create hugepage at level: %u, vaddr: 0x%lx\n",
4509 + page_size, vaddr);
4510 +- TEST_ASSERT(!pte->page_size,
4511 ++ TEST_ASSERT(!(*pte & PTE_LARGE_MASK),
4512 + "Cannot create page table at level: %u, vaddr: 0x%lx\n",
4513 + level, vaddr);
4514 + }
4515 +@@ -232,8 +198,8 @@ void __virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr,
4516 + enum x86_page_size page_size)
4517 + {
4518 + const uint64_t pg_size = 1ull << ((page_size * 9) + 12);
4519 +- struct pageUpperEntry *pml4e, *pdpe, *pde;
4520 +- struct pageTableEntry *pte;
4521 ++ uint64_t *pml4e, *pdpe, *pde;
4522 ++ uint64_t *pte;
4523 +
4524 + TEST_ASSERT(vm->mode == VM_MODE_PXXV48_4K,
4525 + "Unknown or unsupported guest mode, mode: 0x%x", vm->mode);
4526 +@@ -257,24 +223,22 @@ void __virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr,
4527 + */
4528 + pml4e = virt_create_upper_pte(vm, vm->pgd >> vm->page_shift,
4529 + vaddr, paddr, 3, page_size);
4530 +- if (pml4e->page_size)
4531 ++ if (*pml4e & PTE_LARGE_MASK)
4532 + return;
4533 +
4534 +- pdpe = virt_create_upper_pte(vm, pml4e->pfn, vaddr, paddr, 2, page_size);
4535 +- if (pdpe->page_size)
4536 ++ pdpe = virt_create_upper_pte(vm, PTE_GET_PFN(*pml4e), vaddr, paddr, 2, page_size);
4537 ++ if (*pdpe & PTE_LARGE_MASK)
4538 + return;
4539 +
4540 +- pde = virt_create_upper_pte(vm, pdpe->pfn, vaddr, paddr, 1, page_size);
4541 +- if (pde->page_size)
4542 ++ pde = virt_create_upper_pte(vm, PTE_GET_PFN(*pdpe), vaddr, paddr, 1, page_size);
4543 ++ if (*pde & PTE_LARGE_MASK)
4544 + return;
4545 +
4546 + /* Fill in page table entry. */
4547 +- pte = virt_get_pte(vm, pde->pfn, vaddr, 0);
4548 +- TEST_ASSERT(!pte->present,
4549 ++ pte = virt_get_pte(vm, PTE_GET_PFN(*pde), vaddr, 0);
4550 ++ TEST_ASSERT(!(*pte & PTE_PRESENT_MASK),
4551 + "PTE already present for 4k page at vaddr: 0x%lx\n", vaddr);
4552 +- pte->pfn = paddr >> vm->page_shift;
4553 +- pte->writable = true;
4554 +- pte->present = 1;
4555 ++ *pte = PTE_PRESENT_MASK | PTE_WRITABLE_MASK | (paddr & PHYSICAL_PAGE_MASK);
4556 + }
4557 +
4558 + void virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr)
4559 +@@ -282,12 +246,12 @@ void virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr)
4560 + __virt_pg_map(vm, vaddr, paddr, X86_PAGE_SIZE_4K);
4561 + }
4562 +
4563 +-static struct pageTableEntry *_vm_get_page_table_entry(struct kvm_vm *vm, int vcpuid,
4564 ++static uint64_t *_vm_get_page_table_entry(struct kvm_vm *vm, int vcpuid,
4565 + uint64_t vaddr)
4566 + {
4567 + uint16_t index[4];
4568 +- struct pageUpperEntry *pml4e, *pdpe, *pde;
4569 +- struct pageTableEntry *pte;
4570 ++ uint64_t *pml4e, *pdpe, *pde;
4571 ++ uint64_t *pte;
4572 + struct kvm_cpuid_entry2 *entry;
4573 + struct kvm_sregs sregs;
4574 + int max_phy_addr;
4575 +@@ -329,30 +293,29 @@ static struct pageTableEntry *_vm_get_page_table_entry(struct kvm_vm *vm, int vc
4576 + index[3] = (vaddr >> 39) & 0x1ffu;
4577 +
4578 + pml4e = addr_gpa2hva(vm, vm->pgd);
4579 +- TEST_ASSERT(pml4e[index[3]].present,
4580 ++ TEST_ASSERT(pml4e[index[3]] & PTE_PRESENT_MASK,
4581 + "Expected pml4e to be present for gva: 0x%08lx", vaddr);
4582 +- TEST_ASSERT((*(uint64_t*)(&pml4e[index[3]]) &
4583 +- (rsvd_mask | (1ull << 7))) == 0,
4584 ++ TEST_ASSERT((pml4e[index[3]] & (rsvd_mask | PTE_LARGE_MASK)) == 0,
4585 + "Unexpected reserved bits set.");
4586 +
4587 +- pdpe = addr_gpa2hva(vm, pml4e[index[3]].pfn * vm->page_size);
4588 +- TEST_ASSERT(pdpe[index[2]].present,
4589 ++ pdpe = addr_gpa2hva(vm, PTE_GET_PFN(pml4e[index[3]]) * vm->page_size);
4590 ++ TEST_ASSERT(pdpe[index[2]] & PTE_PRESENT_MASK,
4591 + "Expected pdpe to be present for gva: 0x%08lx", vaddr);
4592 +- TEST_ASSERT(pdpe[index[2]].page_size == 0,
4593 ++ TEST_ASSERT(!(pdpe[index[2]] & PTE_LARGE_MASK),
4594 + "Expected pdpe to map a pde not a 1-GByte page.");
4595 +- TEST_ASSERT((*(uint64_t*)(&pdpe[index[2]]) & rsvd_mask) == 0,
4596 ++ TEST_ASSERT((pdpe[index[2]] & rsvd_mask) == 0,
4597 + "Unexpected reserved bits set.");
4598 +
4599 +- pde = addr_gpa2hva(vm, pdpe[index[2]].pfn * vm->page_size);
4600 +- TEST_ASSERT(pde[index[1]].present,
4601 ++ pde = addr_gpa2hva(vm, PTE_GET_PFN(pdpe[index[2]]) * vm->page_size);
4602 ++ TEST_ASSERT(pde[index[1]] & PTE_PRESENT_MASK,
4603 + "Expected pde to be present for gva: 0x%08lx", vaddr);
4604 +- TEST_ASSERT(pde[index[1]].page_size == 0,
4605 ++ TEST_ASSERT(!(pde[index[1]] & PTE_LARGE_MASK),
4606 + "Expected pde to map a pte not a 2-MByte page.");
4607 +- TEST_ASSERT((*(uint64_t*)(&pde[index[1]]) & rsvd_mask) == 0,
4608 ++ TEST_ASSERT((pde[index[1]] & rsvd_mask) == 0,
4609 + "Unexpected reserved bits set.");
4610 +
4611 +- pte = addr_gpa2hva(vm, pde[index[1]].pfn * vm->page_size);
4612 +- TEST_ASSERT(pte[index[0]].present,
4613 ++ pte = addr_gpa2hva(vm, PTE_GET_PFN(pde[index[1]]) * vm->page_size);
4614 ++ TEST_ASSERT(pte[index[0]] & PTE_PRESENT_MASK,
4615 + "Expected pte to be present for gva: 0x%08lx", vaddr);
4616 +
4617 + return &pte[index[0]];
4618 +@@ -360,7 +323,7 @@ static struct pageTableEntry *_vm_get_page_table_entry(struct kvm_vm *vm, int vc
4619 +
4620 + uint64_t vm_get_page_table_entry(struct kvm_vm *vm, int vcpuid, uint64_t vaddr)
4621 + {
4622 +- struct pageTableEntry *pte = _vm_get_page_table_entry(vm, vcpuid, vaddr);
4623 ++ uint64_t *pte = _vm_get_page_table_entry(vm, vcpuid, vaddr);
4624 +
4625 + return *(uint64_t *)pte;
4626 + }
4627 +@@ -368,18 +331,17 @@ uint64_t vm_get_page_table_entry(struct kvm_vm *vm, int vcpuid, uint64_t vaddr)
4628 + void vm_set_page_table_entry(struct kvm_vm *vm, int vcpuid, uint64_t vaddr,
4629 + uint64_t pte)
4630 + {
4631 +- struct pageTableEntry *new_pte = _vm_get_page_table_entry(vm, vcpuid,
4632 +- vaddr);
4633 ++ uint64_t *new_pte = _vm_get_page_table_entry(vm, vcpuid, vaddr);
4634 +
4635 + *(uint64_t *)new_pte = pte;
4636 + }
4637 +
4638 + void virt_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent)
4639 + {
4640 +- struct pageUpperEntry *pml4e, *pml4e_start;
4641 +- struct pageUpperEntry *pdpe, *pdpe_start;
4642 +- struct pageUpperEntry *pde, *pde_start;
4643 +- struct pageTableEntry *pte, *pte_start;
4644 ++ uint64_t *pml4e, *pml4e_start;
4645 ++ uint64_t *pdpe, *pdpe_start;
4646 ++ uint64_t *pde, *pde_start;
4647 ++ uint64_t *pte, *pte_start;
4648 +
4649 + if (!vm->pgd_created)
4650 + return;
4651 +@@ -389,58 +351,58 @@ void virt_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent)
4652 + fprintf(stream, "%*s index hvaddr gpaddr "
4653 + "addr w exec dirty\n",
4654 + indent, "");
4655 +- pml4e_start = (struct pageUpperEntry *) addr_gpa2hva(vm, vm->pgd);
4656 ++ pml4e_start = (uint64_t *) addr_gpa2hva(vm, vm->pgd);
4657 + for (uint16_t n1 = 0; n1 <= 0x1ffu; n1++) {
4658 + pml4e = &pml4e_start[n1];
4659 +- if (!pml4e->present)
4660 ++ if (!(*pml4e & PTE_PRESENT_MASK))
4661 + continue;
4662 +- fprintf(stream, "%*spml4e 0x%-3zx %p 0x%-12lx 0x%-10lx %u "
4663 ++ fprintf(stream, "%*spml4e 0x%-3zx %p 0x%-12lx 0x%-10llx %u "
4664 + " %u\n",
4665 + indent, "",
4666 + pml4e - pml4e_start, pml4e,
4667 +- addr_hva2gpa(vm, pml4e), (uint64_t) pml4e->pfn,
4668 +- pml4e->writable, pml4e->execute_disable);
4669 ++ addr_hva2gpa(vm, pml4e), PTE_GET_PFN(*pml4e),
4670 ++ !!(*pml4e & PTE_WRITABLE_MASK), !!(*pml4e & PTE_NX_MASK));
4671 +
4672 +- pdpe_start = addr_gpa2hva(vm, pml4e->pfn * vm->page_size);
4673 ++ pdpe_start = addr_gpa2hva(vm, *pml4e & PHYSICAL_PAGE_MASK);
4674 + for (uint16_t n2 = 0; n2 <= 0x1ffu; n2++) {
4675 + pdpe = &pdpe_start[n2];
4676 +- if (!pdpe->present)
4677 ++ if (!(*pdpe & PTE_PRESENT_MASK))
4678 + continue;
4679 +- fprintf(stream, "%*spdpe 0x%-3zx %p 0x%-12lx 0x%-10lx "
4680 ++ fprintf(stream, "%*spdpe 0x%-3zx %p 0x%-12lx 0x%-10llx "
4681 + "%u %u\n",
4682 + indent, "",
4683 + pdpe - pdpe_start, pdpe,
4684 + addr_hva2gpa(vm, pdpe),
4685 +- (uint64_t) pdpe->pfn, pdpe->writable,
4686 +- pdpe->execute_disable);
4687 ++ PTE_GET_PFN(*pdpe), !!(*pdpe & PTE_WRITABLE_MASK),
4688 ++ !!(*pdpe & PTE_NX_MASK));
4689 +
4690 +- pde_start = addr_gpa2hva(vm, pdpe->pfn * vm->page_size);
4691 ++ pde_start = addr_gpa2hva(vm, *pdpe & PHYSICAL_PAGE_MASK);
4692 + for (uint16_t n3 = 0; n3 <= 0x1ffu; n3++) {
4693 + pde = &pde_start[n3];
4694 +- if (!pde->present)
4695 ++ if (!(*pde & PTE_PRESENT_MASK))
4696 + continue;
4697 + fprintf(stream, "%*spde 0x%-3zx %p "
4698 +- "0x%-12lx 0x%-10lx %u %u\n",
4699 ++ "0x%-12lx 0x%-10llx %u %u\n",
4700 + indent, "", pde - pde_start, pde,
4701 + addr_hva2gpa(vm, pde),
4702 +- (uint64_t) pde->pfn, pde->writable,
4703 +- pde->execute_disable);
4704 ++ PTE_GET_PFN(*pde), !!(*pde & PTE_WRITABLE_MASK),
4705 ++ !!(*pde & PTE_NX_MASK));
4706 +
4707 +- pte_start = addr_gpa2hva(vm, pde->pfn * vm->page_size);
4708 ++ pte_start = addr_gpa2hva(vm, *pde & PHYSICAL_PAGE_MASK);
4709 + for (uint16_t n4 = 0; n4 <= 0x1ffu; n4++) {
4710 + pte = &pte_start[n4];
4711 +- if (!pte->present)
4712 ++ if (!(*pte & PTE_PRESENT_MASK))
4713 + continue;
4714 + fprintf(stream, "%*spte 0x%-3zx %p "
4715 +- "0x%-12lx 0x%-10lx %u %u "
4716 ++ "0x%-12lx 0x%-10llx %u %u "
4717 + " %u 0x%-10lx\n",
4718 + indent, "",
4719 + pte - pte_start, pte,
4720 + addr_hva2gpa(vm, pte),
4721 +- (uint64_t) pte->pfn,
4722 +- pte->writable,
4723 +- pte->execute_disable,
4724 +- pte->dirty,
4725 ++ PTE_GET_PFN(*pte),
4726 ++ !!(*pte & PTE_WRITABLE_MASK),
4727 ++ !!(*pte & PTE_NX_MASK),
4728 ++ !!(*pte & PTE_DIRTY_MASK),
4729 + ((uint64_t) n1 << 27)
4730 + | ((uint64_t) n2 << 18)
4731 + | ((uint64_t) n3 << 9)
4732 +@@ -558,8 +520,8 @@ static void kvm_seg_set_kernel_data_64bit(struct kvm_vm *vm, uint16_t selector,
4733 + vm_paddr_t addr_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva)
4734 + {
4735 + uint16_t index[4];
4736 +- struct pageUpperEntry *pml4e, *pdpe, *pde;
4737 +- struct pageTableEntry *pte;
4738 ++ uint64_t *pml4e, *pdpe, *pde;
4739 ++ uint64_t *pte;
4740 +
4741 + TEST_ASSERT(vm->mode == VM_MODE_PXXV48_4K, "Attempt to use "
4742 + "unknown or unsupported guest mode, mode: 0x%x", vm->mode);
4743 +@@ -572,22 +534,22 @@ vm_paddr_t addr_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva)
4744 + if (!vm->pgd_created)
4745 + goto unmapped_gva;
4746 + pml4e = addr_gpa2hva(vm, vm->pgd);
4747 +- if (!pml4e[index[3]].present)
4748 ++ if (!(pml4e[index[3]] & PTE_PRESENT_MASK))
4749 + goto unmapped_gva;
4750 +
4751 +- pdpe = addr_gpa2hva(vm, pml4e[index[3]].pfn * vm->page_size);
4752 +- if (!pdpe[index[2]].present)
4753 ++ pdpe = addr_gpa2hva(vm, PTE_GET_PFN(pml4e[index[3]]) * vm->page_size);
4754 ++ if (!(pdpe[index[2]] & PTE_PRESENT_MASK))
4755 + goto unmapped_gva;
4756 +
4757 +- pde = addr_gpa2hva(vm, pdpe[index[2]].pfn * vm->page_size);
4758 +- if (!pde[index[1]].present)
4759 ++ pde = addr_gpa2hva(vm, PTE_GET_PFN(pdpe[index[2]]) * vm->page_size);
4760 ++ if (!(pde[index[1]] & PTE_PRESENT_MASK))
4761 + goto unmapped_gva;
4762 +
4763 +- pte = addr_gpa2hva(vm, pde[index[1]].pfn * vm->page_size);
4764 +- if (!pte[index[0]].present)
4765 ++ pte = addr_gpa2hva(vm, PTE_GET_PFN(pde[index[1]]) * vm->page_size);
4766 ++ if (!(pte[index[0]] & PTE_PRESENT_MASK))
4767 + goto unmapped_gva;
4768 +
4769 +- return (pte[index[0]].pfn * vm->page_size) + (gva & 0xfffu);
4770 ++ return (PTE_GET_PFN(pte[index[0]]) * vm->page_size) + (gva & 0xfffu);
4771 +
4772 + unmapped_gva:
4773 + TEST_FAIL("No mapping for vm virtual address, gva: 0x%lx", gva);
4774 +diff --git a/tools/testing/selftests/net/forwarding/mirror_gre_bridge_1q.sh b/tools/testing/selftests/net/forwarding/mirror_gre_bridge_1q.sh
4775 +index a3402cd8d5b68..9ff22f28032dd 100755
4776 +--- a/tools/testing/selftests/net/forwarding/mirror_gre_bridge_1q.sh
4777 ++++ b/tools/testing/selftests/net/forwarding/mirror_gre_bridge_1q.sh
4778 +@@ -61,9 +61,12 @@ setup_prepare()
4779 +
4780 + vrf_prepare
4781 + mirror_gre_topo_create
4782 ++ # Avoid changing br1's PVID while it is operational as a L3 interface.
4783 ++ ip link set dev br1 down
4784 +
4785 + ip link set dev $swp3 master br1
4786 + bridge vlan add dev br1 vid 555 pvid untagged self
4787 ++ ip link set dev br1 up
4788 + ip address add dev br1 192.0.2.129/28
4789 + ip address add dev br1 2001:db8:2::1/64
4790 +
4791 +diff --git a/tools/testing/selftests/net/so_txtime.c b/tools/testing/selftests/net/so_txtime.c
4792 +index 59067f64b7753..2672ac0b6d1f3 100644
4793 +--- a/tools/testing/selftests/net/so_txtime.c
4794 ++++ b/tools/testing/selftests/net/so_txtime.c
4795 +@@ -421,7 +421,7 @@ static void usage(const char *progname)
4796 + "Options:\n"
4797 + " -4 only IPv4\n"
4798 + " -6 only IPv6\n"
4799 +- " -c <clock> monotonic (default) or tai\n"
4800 ++ " -c <clock> monotonic or tai (default)\n"
4801 + " -D <addr> destination IP address (server)\n"
4802 + " -S <addr> source IP address (client)\n"
4803 + " -r run rx mode\n"
4804 +@@ -475,7 +475,7 @@ static void parse_opts(int argc, char **argv)
4805 + cfg_rx = true;
4806 + break;
4807 + case 't':
4808 +- cfg_start_time_ns = strtol(optarg, NULL, 0);
4809 ++ cfg_start_time_ns = strtoll(optarg, NULL, 0);
4810 + break;
4811 + case 'm':
4812 + cfg_mark = strtol(optarg, NULL, 0);
4813 +diff --git a/tools/testing/selftests/seccomp/seccomp_bpf.c b/tools/testing/selftests/seccomp/seccomp_bpf.c
4814 +index 9d126d7fabdb7..313bb0cbfb1eb 100644
4815 +--- a/tools/testing/selftests/seccomp/seccomp_bpf.c
4816 ++++ b/tools/testing/selftests/seccomp/seccomp_bpf.c
4817 +@@ -955,7 +955,7 @@ TEST(ERRNO_valid)
4818 + ASSERT_EQ(0, ret);
4819 +
4820 + EXPECT_EQ(parent, syscall(__NR_getppid));
4821 +- EXPECT_EQ(-1, read(0, NULL, 0));
4822 ++ EXPECT_EQ(-1, read(-1, NULL, 0));
4823 + EXPECT_EQ(E2BIG, errno);
4824 + }
4825 +
4826 +@@ -974,7 +974,7 @@ TEST(ERRNO_zero)
4827 +
4828 + EXPECT_EQ(parent, syscall(__NR_getppid));
4829 + /* "errno" of 0 is ok. */
4830 +- EXPECT_EQ(0, read(0, NULL, 0));
4831 ++ EXPECT_EQ(0, read(-1, NULL, 0));
4832 + }
4833 +
4834 + /*
4835 +@@ -995,7 +995,7 @@ TEST(ERRNO_capped)
4836 + ASSERT_EQ(0, ret);
4837 +
4838 + EXPECT_EQ(parent, syscall(__NR_getppid));
4839 +- EXPECT_EQ(-1, read(0, NULL, 0));
4840 ++ EXPECT_EQ(-1, read(-1, NULL, 0));
4841 + EXPECT_EQ(4095, errno);
4842 + }
4843 +
4844 +@@ -1026,7 +1026,7 @@ TEST(ERRNO_order)
4845 + ASSERT_EQ(0, ret);
4846 +
4847 + EXPECT_EQ(parent, syscall(__NR_getppid));
4848 +- EXPECT_EQ(-1, read(0, NULL, 0));
4849 ++ EXPECT_EQ(-1, read(-1, NULL, 0));
4850 + EXPECT_EQ(12, errno);
4851 + }
4852 +
4853 +@@ -2623,7 +2623,7 @@ void *tsync_sibling(void *data)
4854 + ret = prctl(PR_GET_NO_NEW_PRIVS, 0, 0, 0, 0);
4855 + if (!ret)
4856 + return (void *)SIBLING_EXIT_NEWPRIVS;
4857 +- read(0, NULL, 0);
4858 ++ read(-1, NULL, 0);
4859 + return (void *)SIBLING_EXIT_UNKILLED;
4860 + }
4861 +
4862 +diff --git a/tools/testing/selftests/vm/mremap_test.c b/tools/testing/selftests/vm/mremap_test.c
4863 +index 58775dab3cc6c..5ef41640d657a 100644
4864 +--- a/tools/testing/selftests/vm/mremap_test.c
4865 ++++ b/tools/testing/selftests/vm/mremap_test.c
4866 +@@ -118,6 +118,59 @@ static unsigned long long get_mmap_min_addr(void)
4867 + return addr;
4868 + }
4869 +
4870 ++/*
4871 ++ * Returns false if the requested remap region overlaps with an
4872 ++ * existing mapping (e.g text, stack) else returns true.
4873 ++ */
4874 ++static bool is_remap_region_valid(void *addr, unsigned long long size)
4875 ++{
4876 ++ void *remap_addr = NULL;
4877 ++ bool ret = true;
4878 ++
4879 ++ /* Use MAP_FIXED_NOREPLACE flag to ensure region is not mapped */
4880 ++ remap_addr = mmap(addr, size, PROT_READ | PROT_WRITE,
4881 ++ MAP_FIXED_NOREPLACE | MAP_ANONYMOUS | MAP_SHARED,
4882 ++ -1, 0);
4883 ++
4884 ++ if (remap_addr == MAP_FAILED) {
4885 ++ if (errno == EEXIST)
4886 ++ ret = false;
4887 ++ } else {
4888 ++ munmap(remap_addr, size);
4889 ++ }
4890 ++
4891 ++ return ret;
4892 ++}
4893 ++
4894 ++/* Returns mmap_min_addr sysctl tunable from procfs */
4895 ++static unsigned long long get_mmap_min_addr(void)
4896 ++{
4897 ++ FILE *fp;
4898 ++ int n_matched;
4899 ++ static unsigned long long addr;
4900 ++
4901 ++ if (addr)
4902 ++ return addr;
4903 ++
4904 ++ fp = fopen("/proc/sys/vm/mmap_min_addr", "r");
4905 ++ if (fp == NULL) {
4906 ++ ksft_print_msg("Failed to open /proc/sys/vm/mmap_min_addr: %s\n",
4907 ++ strerror(errno));
4908 ++ exit(KSFT_SKIP);
4909 ++ }
4910 ++
4911 ++ n_matched = fscanf(fp, "%llu", &addr);
4912 ++ if (n_matched != 1) {
4913 ++ ksft_print_msg("Failed to read /proc/sys/vm/mmap_min_addr: %s\n",
4914 ++ strerror(errno));
4915 ++ fclose(fp);
4916 ++ exit(KSFT_SKIP);
4917 ++ }
4918 ++
4919 ++ fclose(fp);
4920 ++ return addr;
4921 ++}
4922 ++
4923 + /*
4924 + * Returns the start address of the mapping on success, else returns
4925 + * NULL on failure.