Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:5.15 commit in: /
Date: Thu, 12 May 2022 11:28:42
Message-Id: 1652354902.59a8899d2ec7373d1d01b2cdc9cbeba8511a398f.mpagano@gentoo
1 commit: 59a8899d2ec7373d1d01b2cdc9cbeba8511a398f
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Thu May 12 11:28:22 2022 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Thu May 12 11:28:22 2022 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=59a8899d
7
8 Linux patch 5.15.39
9
10 Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
11
12 0000_README | 4 +
13 1038_linux-5.15.39.patch | 4667 ++++++++++++++++++++++++++++++++++++++++++++++
14 2 files changed, 4671 insertions(+)
15
16 diff --git a/0000_README b/0000_README
17 index da0799e5..f0426435 100644
18 --- a/0000_README
19 +++ b/0000_README
20 @@ -195,6 +195,10 @@ Patch: 1037_linux-5.15.38.patch
21 From: http://www.kernel.org
22 Desc: Linux 5.15.38
23
24 +Patch: 1038_linux-5.15.39.patch
25 +From: http://www.kernel.org
26 +Desc: Linux 5.15.39
27 +
28 Patch: 1500_XATTR_USER_PREFIX.patch
29 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
30 Desc: Support for namespace user.pax.* on tmpfs.
31
32 diff --git a/1038_linux-5.15.39.patch b/1038_linux-5.15.39.patch
33 new file mode 100644
34 index 00000000..d9bd272c
35 --- /dev/null
36 +++ b/1038_linux-5.15.39.patch
37 @@ -0,0 +1,4667 @@
38 +diff --git a/Makefile b/Makefile
39 +index 73b884c9baa40..ceb42be114388 100644
40 +--- a/Makefile
41 ++++ b/Makefile
42 +@@ -1,7 +1,7 @@
43 + # SPDX-License-Identifier: GPL-2.0
44 + VERSION = 5
45 + PATCHLEVEL = 15
46 +-SUBLEVEL = 38
47 ++SUBLEVEL = 39
48 + EXTRAVERSION =
49 + NAME = Trick or Treat
50 +
51 +diff --git a/arch/mips/include/asm/timex.h b/arch/mips/include/asm/timex.h
52 +index b05bb70a2e46f..8026baf46e729 100644
53 +--- a/arch/mips/include/asm/timex.h
54 ++++ b/arch/mips/include/asm/timex.h
55 +@@ -40,9 +40,9 @@
56 + typedef unsigned int cycles_t;
57 +
58 + /*
59 +- * On R4000/R4400 before version 5.0 an erratum exists such that if the
60 +- * cycle counter is read in the exact moment that it is matching the
61 +- * compare register, no interrupt will be generated.
62 ++ * On R4000/R4400 an erratum exists such that if the cycle counter is
63 ++ * read in the exact moment that it is matching the compare register,
64 ++ * no interrupt will be generated.
65 + *
66 + * There is a suggested workaround and also the erratum can't strike if
67 + * the compare interrupt isn't being used as the clock source device.
68 +@@ -63,7 +63,7 @@ static inline int can_use_mips_counter(unsigned int prid)
69 + if (!__builtin_constant_p(cpu_has_counter))
70 + asm volatile("" : "=m" (cpu_data[0].options));
71 + if (likely(cpu_has_counter &&
72 +- prid >= (PRID_IMP_R4000 | PRID_REV_ENCODE_44(5, 0))))
73 ++ prid > (PRID_IMP_R4000 | PRID_REV_ENCODE_44(15, 15))))
74 + return 1;
75 + else
76 + return 0;
77 +diff --git a/arch/mips/kernel/time.c b/arch/mips/kernel/time.c
78 +index caa01457dce60..ed339d7979f3f 100644
79 +--- a/arch/mips/kernel/time.c
80 ++++ b/arch/mips/kernel/time.c
81 +@@ -141,15 +141,10 @@ static __init int cpu_has_mfc0_count_bug(void)
82 + case CPU_R4400MC:
83 + /*
84 + * The published errata for the R4400 up to 3.0 say the CPU
85 +- * has the mfc0 from count bug.
86 ++ * has the mfc0 from count bug. This seems the last version
87 ++ * produced.
88 + */
89 +- if ((current_cpu_data.processor_id & 0xff) <= 0x30)
90 +- return 1;
91 +-
92 +- /*
93 +- * we assume newer revisions are ok
94 +- */
95 +- return 0;
96 ++ return 1;
97 + }
98 +
99 + return 0;
100 +diff --git a/arch/parisc/kernel/processor.c b/arch/parisc/kernel/processor.c
101 +index 1b6129e7d776b..b861bbbc87178 100644
102 +--- a/arch/parisc/kernel/processor.c
103 ++++ b/arch/parisc/kernel/processor.c
104 +@@ -418,8 +418,7 @@ show_cpuinfo (struct seq_file *m, void *v)
105 + }
106 + seq_printf(m, " (0x%02lx)\n", boot_cpu_data.pdc.capabilities);
107 +
108 +- seq_printf(m, "model\t\t: %s\n"
109 +- "model name\t: %s\n",
110 ++ seq_printf(m, "model\t\t: %s - %s\n",
111 + boot_cpu_data.pdc.sys_model_name,
112 + cpuinfo->dev ?
113 + cpuinfo->dev->name : "Unknown");
114 +diff --git a/arch/parisc/kernel/setup.c b/arch/parisc/kernel/setup.c
115 +index cceb09855e03f..3fb86ee507dd5 100644
116 +--- a/arch/parisc/kernel/setup.c
117 ++++ b/arch/parisc/kernel/setup.c
118 +@@ -150,6 +150,8 @@ void __init setup_arch(char **cmdline_p)
119 + #ifdef CONFIG_PA11
120 + dma_ops_init();
121 + #endif
122 ++
123 ++ clear_sched_clock_stable();
124 + }
125 +
126 + /*
127 +diff --git a/arch/parisc/kernel/time.c b/arch/parisc/kernel/time.c
128 +index 061119a56fbe8..d8e59a1000ab7 100644
129 +--- a/arch/parisc/kernel/time.c
130 ++++ b/arch/parisc/kernel/time.c
131 +@@ -249,13 +249,9 @@ void __init time_init(void)
132 + static int __init init_cr16_clocksource(void)
133 + {
134 + /*
135 +- * The cr16 interval timers are not syncronized across CPUs, even if
136 +- * they share the same socket.
137 ++ * The cr16 interval timers are not synchronized across CPUs.
138 + */
139 + if (num_online_cpus() > 1 && !running_on_qemu) {
140 +- /* mark sched_clock unstable */
141 +- clear_sched_clock_stable();
142 +-
143 + clocksource_cr16.name = "cr16_unstable";
144 + clocksource_cr16.flags = CLOCK_SOURCE_UNSTABLE;
145 + clocksource_cr16.rating = 0;
146 +diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c
147 +index 3de593b26850e..7f130ac3b9f9a 100644
148 +--- a/arch/riscv/mm/init.c
149 ++++ b/arch/riscv/mm/init.c
150 +@@ -218,8 +218,25 @@ static void __init setup_bootmem(void)
151 + * early_init_fdt_reserve_self() since __pa() does
152 + * not work for DTB pointers that are fixmap addresses
153 + */
154 +- if (!IS_ENABLED(CONFIG_BUILTIN_DTB))
155 +- memblock_reserve(dtb_early_pa, fdt_totalsize(dtb_early_va));
156 ++ if (!IS_ENABLED(CONFIG_BUILTIN_DTB)) {
157 ++ /*
158 ++ * In case the DTB is not located in a memory region we won't
159 ++ * be able to locate it later on via the linear mapping and
160 ++ * get a segfault when accessing it via __va(dtb_early_pa).
161 ++ * To avoid this situation copy DTB to a memory region.
162 ++ * Note that memblock_phys_alloc will also reserve DTB region.
163 ++ */
164 ++ if (!memblock_is_memory(dtb_early_pa)) {
165 ++ size_t fdt_size = fdt_totalsize(dtb_early_va);
166 ++ phys_addr_t new_dtb_early_pa = memblock_phys_alloc(fdt_size, PAGE_SIZE);
167 ++ void *new_dtb_early_va = early_memremap(new_dtb_early_pa, fdt_size);
168 ++
169 ++ memcpy(new_dtb_early_va, dtb_early_va, fdt_size);
170 ++ early_memunmap(new_dtb_early_va, fdt_size);
171 ++ _dtb_early_pa = new_dtb_early_pa;
172 ++ } else
173 ++ memblock_reserve(dtb_early_pa, fdt_totalsize(dtb_early_va));
174 ++ }
175 +
176 + early_init_fdt_scan_reserved_mem();
177 + dma_contiguous_reserve(dma32_phys_limit);
178 +diff --git a/arch/x86/kernel/fpu/core.c b/arch/x86/kernel/fpu/core.c
179 +index 7ada7bd03a327..759e1cef5e695 100644
180 +--- a/arch/x86/kernel/fpu/core.c
181 ++++ b/arch/x86/kernel/fpu/core.c
182 +@@ -25,17 +25,7 @@
183 + */
184 + union fpregs_state init_fpstate __ro_after_init;
185 +
186 +-/*
187 +- * Track whether the kernel is using the FPU state
188 +- * currently.
189 +- *
190 +- * This flag is used:
191 +- *
192 +- * - by IRQ context code to potentially use the FPU
193 +- * if it's unused.
194 +- *
195 +- * - to debug kernel_fpu_begin()/end() correctness
196 +- */
197 ++/* Track in-kernel FPU usage */
198 + static DEFINE_PER_CPU(bool, in_kernel_fpu);
199 +
200 + /*
201 +@@ -43,42 +33,37 @@ static DEFINE_PER_CPU(bool, in_kernel_fpu);
202 + */
203 + DEFINE_PER_CPU(struct fpu *, fpu_fpregs_owner_ctx);
204 +
205 +-static bool kernel_fpu_disabled(void)
206 +-{
207 +- return this_cpu_read(in_kernel_fpu);
208 +-}
209 +-
210 +-static bool interrupted_kernel_fpu_idle(void)
211 +-{
212 +- return !kernel_fpu_disabled();
213 +-}
214 +-
215 +-/*
216 +- * Were we in user mode (or vm86 mode) when we were
217 +- * interrupted?
218 +- *
219 +- * Doing kernel_fpu_begin/end() is ok if we are running
220 +- * in an interrupt context from user mode - we'll just
221 +- * save the FPU state as required.
222 +- */
223 +-static bool interrupted_user_mode(void)
224 +-{
225 +- struct pt_regs *regs = get_irq_regs();
226 +- return regs && user_mode(regs);
227 +-}
228 +-
229 + /*
230 + * Can we use the FPU in kernel mode with the
231 + * whole "kernel_fpu_begin/end()" sequence?
232 +- *
233 +- * It's always ok in process context (ie "not interrupt")
234 +- * but it is sometimes ok even from an irq.
235 + */
236 + bool irq_fpu_usable(void)
237 + {
238 +- return !in_interrupt() ||
239 +- interrupted_user_mode() ||
240 +- interrupted_kernel_fpu_idle();
241 ++ if (WARN_ON_ONCE(in_nmi()))
242 ++ return false;
243 ++
244 ++ /* In kernel FPU usage already active? */
245 ++ if (this_cpu_read(in_kernel_fpu))
246 ++ return false;
247 ++
248 ++ /*
249 ++ * When not in NMI or hard interrupt context, FPU can be used in:
250 ++ *
251 ++ * - Task context except from within fpregs_lock()'ed critical
252 ++ * regions.
253 ++ *
254 ++ * - Soft interrupt processing context which cannot happen
255 ++ * while in a fpregs_lock()'ed critical region.
256 ++ */
257 ++ if (!in_hardirq())
258 ++ return true;
259 ++
260 ++ /*
261 ++ * In hard interrupt context it's safe when soft interrupts
262 ++ * are enabled, which means the interrupt did not hit in
263 ++ * a fpregs_lock()'ed critical region.
264 ++ */
265 ++ return !softirq_count();
266 + }
267 + EXPORT_SYMBOL(irq_fpu_usable);
268 +
269 +diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
270 +index bd7b65081eb0a..d36b58e705b6c 100644
271 +--- a/arch/x86/kernel/kvm.c
272 ++++ b/arch/x86/kernel/kvm.c
273 +@@ -66,6 +66,7 @@ static DEFINE_PER_CPU_DECRYPTED(struct kvm_vcpu_pv_apf_data, apf_reason) __align
274 + DEFINE_PER_CPU_DECRYPTED(struct kvm_steal_time, steal_time) __aligned(64) __visible;
275 + static int has_steal_clock = 0;
276 +
277 ++static int has_guest_poll = 0;
278 + /*
279 + * No need for any "IO delay" on KVM
280 + */
281 +@@ -650,14 +651,26 @@ static int kvm_cpu_down_prepare(unsigned int cpu)
282 +
283 + static int kvm_suspend(void)
284 + {
285 ++ u64 val = 0;
286 ++
287 + kvm_guest_cpu_offline(false);
288 +
289 ++#ifdef CONFIG_ARCH_CPUIDLE_HALTPOLL
290 ++ if (kvm_para_has_feature(KVM_FEATURE_POLL_CONTROL))
291 ++ rdmsrl(MSR_KVM_POLL_CONTROL, val);
292 ++ has_guest_poll = !(val & 1);
293 ++#endif
294 + return 0;
295 + }
296 +
297 + static void kvm_resume(void)
298 + {
299 + kvm_cpu_online(raw_smp_processor_id());
300 ++
301 ++#ifdef CONFIG_ARCH_CPUIDLE_HALTPOLL
302 ++ if (kvm_para_has_feature(KVM_FEATURE_POLL_CONTROL) && has_guest_poll)
303 ++ wrmsrl(MSR_KVM_POLL_CONTROL, 0);
304 ++#endif
305 + }
306 +
307 + static struct syscore_ops kvm_syscore_ops = {
308 +diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
309 +index 5f1d4a5aa8716..b17c9b00669e5 100644
310 +--- a/arch/x86/kvm/cpuid.c
311 ++++ b/arch/x86/kvm/cpuid.c
312 +@@ -725,6 +725,11 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function)
313 + union cpuid10_eax eax;
314 + union cpuid10_edx edx;
315 +
316 ++ if (!static_cpu_has(X86_FEATURE_ARCH_PERFMON)) {
317 ++ entry->eax = entry->ebx = entry->ecx = entry->edx = 0;
318 ++ break;
319 ++ }
320 ++
321 + perf_get_x86_pmu_capability(&cap);
322 +
323 + /*
324 +diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
325 +index 4d92fb4fdf699..493d636e62316 100644
326 +--- a/arch/x86/kvm/lapic.c
327 ++++ b/arch/x86/kvm/lapic.c
328 +@@ -113,7 +113,8 @@ static inline u32 kvm_x2apic_id(struct kvm_lapic *apic)
329 +
330 + static bool kvm_can_post_timer_interrupt(struct kvm_vcpu *vcpu)
331 + {
332 +- return pi_inject_timer && kvm_vcpu_apicv_active(vcpu);
333 ++ return pi_inject_timer && kvm_vcpu_apicv_active(vcpu) &&
334 ++ (kvm_mwait_in_guest(vcpu->kvm) || kvm_hlt_in_guest(vcpu->kvm));
335 + }
336 +
337 + bool kvm_can_use_hv_timer(struct kvm_vcpu *vcpu)
338 +@@ -2125,10 +2126,9 @@ int kvm_lapic_reg_write(struct kvm_lapic *apic, u32 reg, u32 val)
339 + break;
340 +
341 + case APIC_SELF_IPI:
342 +- if (apic_x2apic_mode(apic)) {
343 +- kvm_lapic_reg_write(apic, APIC_ICR,
344 +- APIC_DEST_SELF | (val & APIC_VECTOR_MASK));
345 +- } else
346 ++ if (apic_x2apic_mode(apic))
347 ++ kvm_apic_send_ipi(apic, APIC_DEST_SELF | (val & APIC_VECTOR_MASK), 0);
348 ++ else
349 + ret = 1;
350 + break;
351 + default:
352 +diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
353 +index 34e828badc511..806f9d42bcce8 100644
354 +--- a/arch/x86/kvm/mmu/mmu.c
355 ++++ b/arch/x86/kvm/mmu/mmu.c
356 +@@ -3314,6 +3314,8 @@ static void mmu_free_root_page(struct kvm *kvm, hpa_t *root_hpa,
357 + return;
358 +
359 + sp = to_shadow_page(*root_hpa & PT64_BASE_ADDR_MASK);
360 ++ if (WARN_ON(!sp))
361 ++ return;
362 +
363 + if (is_tdp_mmu_page(sp))
364 + kvm_tdp_mmu_put_root(kvm, sp, false);
365 +diff --git a/arch/x86/kvm/svm/pmu.c b/arch/x86/kvm/svm/pmu.c
366 +index f337ce7e898e3..d35c94e13afb0 100644
367 +--- a/arch/x86/kvm/svm/pmu.c
368 ++++ b/arch/x86/kvm/svm/pmu.c
369 +@@ -44,6 +44,22 @@ static struct kvm_event_hw_type_mapping amd_event_mapping[] = {
370 + [7] = { 0xd1, 0x00, PERF_COUNT_HW_STALLED_CYCLES_BACKEND },
371 + };
372 +
373 ++/* duplicated from amd_f17h_perfmon_event_map. */
374 ++static struct kvm_event_hw_type_mapping amd_f17h_event_mapping[] = {
375 ++ [0] = { 0x76, 0x00, PERF_COUNT_HW_CPU_CYCLES },
376 ++ [1] = { 0xc0, 0x00, PERF_COUNT_HW_INSTRUCTIONS },
377 ++ [2] = { 0x60, 0xff, PERF_COUNT_HW_CACHE_REFERENCES },
378 ++ [3] = { 0x64, 0x09, PERF_COUNT_HW_CACHE_MISSES },
379 ++ [4] = { 0xc2, 0x00, PERF_COUNT_HW_BRANCH_INSTRUCTIONS },
380 ++ [5] = { 0xc3, 0x00, PERF_COUNT_HW_BRANCH_MISSES },
381 ++ [6] = { 0x87, 0x02, PERF_COUNT_HW_STALLED_CYCLES_FRONTEND },
382 ++ [7] = { 0x87, 0x01, PERF_COUNT_HW_STALLED_CYCLES_BACKEND },
383 ++};
384 ++
385 ++/* amd_pmc_perf_hw_id depends on these being the same size */
386 ++static_assert(ARRAY_SIZE(amd_event_mapping) ==
387 ++ ARRAY_SIZE(amd_f17h_event_mapping));
388 ++
389 + static unsigned int get_msr_base(struct kvm_pmu *pmu, enum pmu_type type)
390 + {
391 + struct kvm_vcpu *vcpu = pmu_to_vcpu(pmu);
392 +@@ -136,19 +152,25 @@ static inline struct kvm_pmc *get_gp_pmc_amd(struct kvm_pmu *pmu, u32 msr,
393 +
394 + static unsigned int amd_pmc_perf_hw_id(struct kvm_pmc *pmc)
395 + {
396 ++ struct kvm_event_hw_type_mapping *event_mapping;
397 + u8 event_select = pmc->eventsel & ARCH_PERFMON_EVENTSEL_EVENT;
398 + u8 unit_mask = (pmc->eventsel & ARCH_PERFMON_EVENTSEL_UMASK) >> 8;
399 + int i;
400 +
401 ++ if (guest_cpuid_family(pmc->vcpu) >= 0x17)
402 ++ event_mapping = amd_f17h_event_mapping;
403 ++ else
404 ++ event_mapping = amd_event_mapping;
405 ++
406 + for (i = 0; i < ARRAY_SIZE(amd_event_mapping); i++)
407 +- if (amd_event_mapping[i].eventsel == event_select
408 +- && amd_event_mapping[i].unit_mask == unit_mask)
409 ++ if (event_mapping[i].eventsel == event_select
410 ++ && event_mapping[i].unit_mask == unit_mask)
411 + break;
412 +
413 + if (i == ARRAY_SIZE(amd_event_mapping))
414 + return PERF_COUNT_HW_MAX;
415 +
416 +- return amd_event_mapping[i].event_type;
417 ++ return event_mapping[i].event_type;
418 + }
419 +
420 + /* return PERF_COUNT_HW_MAX as AMD doesn't have fixed events */
421 +diff --git a/drivers/firewire/core-card.c b/drivers/firewire/core-card.c
422 +index 54be88167c60b..f3b3953cac834 100644
423 +--- a/drivers/firewire/core-card.c
424 ++++ b/drivers/firewire/core-card.c
425 +@@ -668,6 +668,7 @@ EXPORT_SYMBOL_GPL(fw_card_release);
426 + void fw_core_remove_card(struct fw_card *card)
427 + {
428 + struct fw_card_driver dummy_driver = dummy_driver_template;
429 ++ unsigned long flags;
430 +
431 + card->driver->update_phy_reg(card, 4,
432 + PHY_LINK_ACTIVE | PHY_CONTENDER, 0);
433 +@@ -682,7 +683,9 @@ void fw_core_remove_card(struct fw_card *card)
434 + dummy_driver.stop_iso = card->driver->stop_iso;
435 + card->driver = &dummy_driver;
436 +
437 ++ spin_lock_irqsave(&card->lock, flags);
438 + fw_destroy_nodes(card);
439 ++ spin_unlock_irqrestore(&card->lock, flags);
440 +
441 + /* Wait for all users, especially device workqueue jobs, to finish. */
442 + fw_card_put(card);
443 +diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c
444 +index fb6c651214f32..b0cc3f1e9bb00 100644
445 +--- a/drivers/firewire/core-cdev.c
446 ++++ b/drivers/firewire/core-cdev.c
447 +@@ -1480,6 +1480,7 @@ static void outbound_phy_packet_callback(struct fw_packet *packet,
448 + {
449 + struct outbound_phy_packet_event *e =
450 + container_of(packet, struct outbound_phy_packet_event, p);
451 ++ struct client *e_client;
452 +
453 + switch (status) {
454 + /* expected: */
455 +@@ -1496,9 +1497,10 @@ static void outbound_phy_packet_callback(struct fw_packet *packet,
456 + }
457 + e->phy_packet.data[0] = packet->timestamp;
458 +
459 ++ e_client = e->client;
460 + queue_event(e->client, &e->event, &e->phy_packet,
461 + sizeof(e->phy_packet) + e->phy_packet.length, NULL, 0);
462 +- client_put(e->client);
463 ++ client_put(e_client);
464 + }
465 +
466 + static int ioctl_send_phy_packet(struct client *client, union ioctl_arg *arg)
467 +diff --git a/drivers/firewire/core-topology.c b/drivers/firewire/core-topology.c
468 +index b63d55f5ebd33..f40c815343812 100644
469 +--- a/drivers/firewire/core-topology.c
470 ++++ b/drivers/firewire/core-topology.c
471 +@@ -375,16 +375,13 @@ static void report_found_node(struct fw_card *card,
472 + card->bm_retries = 0;
473 + }
474 +
475 ++/* Must be called with card->lock held */
476 + void fw_destroy_nodes(struct fw_card *card)
477 + {
478 +- unsigned long flags;
479 +-
480 +- spin_lock_irqsave(&card->lock, flags);
481 + card->color++;
482 + if (card->local_node != NULL)
483 + for_each_fw_node(card, card->local_node, report_lost_node);
484 + card->local_node = NULL;
485 +- spin_unlock_irqrestore(&card->lock, flags);
486 + }
487 +
488 + static void move_tree(struct fw_node *node0, struct fw_node *node1, int port)
489 +@@ -510,6 +507,8 @@ void fw_core_handle_bus_reset(struct fw_card *card, int node_id, int generation,
490 + struct fw_node *local_node;
491 + unsigned long flags;
492 +
493 ++ spin_lock_irqsave(&card->lock, flags);
494 ++
495 + /*
496 + * If the selfID buffer is not the immediate successor of the
497 + * previously processed one, we cannot reliably compare the
498 +@@ -521,8 +520,6 @@ void fw_core_handle_bus_reset(struct fw_card *card, int node_id, int generation,
499 + card->bm_retries = 0;
500 + }
501 +
502 +- spin_lock_irqsave(&card->lock, flags);
503 +-
504 + card->broadcast_channel_allocated = card->broadcast_channel_auto_allocated;
505 + card->node_id = node_id;
506 + /*
507 +diff --git a/drivers/firewire/core-transaction.c b/drivers/firewire/core-transaction.c
508 +index ac487c96bb717..6c20815cc8d16 100644
509 +--- a/drivers/firewire/core-transaction.c
510 ++++ b/drivers/firewire/core-transaction.c
511 +@@ -73,24 +73,25 @@ static int try_cancel_split_timeout(struct fw_transaction *t)
512 + static int close_transaction(struct fw_transaction *transaction,
513 + struct fw_card *card, int rcode)
514 + {
515 +- struct fw_transaction *t;
516 ++ struct fw_transaction *t = NULL, *iter;
517 + unsigned long flags;
518 +
519 + spin_lock_irqsave(&card->lock, flags);
520 +- list_for_each_entry(t, &card->transaction_list, link) {
521 +- if (t == transaction) {
522 +- if (!try_cancel_split_timeout(t)) {
523 ++ list_for_each_entry(iter, &card->transaction_list, link) {
524 ++ if (iter == transaction) {
525 ++ if (!try_cancel_split_timeout(iter)) {
526 + spin_unlock_irqrestore(&card->lock, flags);
527 + goto timed_out;
528 + }
529 +- list_del_init(&t->link);
530 +- card->tlabel_mask &= ~(1ULL << t->tlabel);
531 ++ list_del_init(&iter->link);
532 ++ card->tlabel_mask &= ~(1ULL << iter->tlabel);
533 ++ t = iter;
534 + break;
535 + }
536 + }
537 + spin_unlock_irqrestore(&card->lock, flags);
538 +
539 +- if (&t->link != &card->transaction_list) {
540 ++ if (t) {
541 + t->callback(card, rcode, NULL, 0, t->callback_data);
542 + return 0;
543 + }
544 +@@ -935,7 +936,7 @@ EXPORT_SYMBOL(fw_core_handle_request);
545 +
546 + void fw_core_handle_response(struct fw_card *card, struct fw_packet *p)
547 + {
548 +- struct fw_transaction *t;
549 ++ struct fw_transaction *t = NULL, *iter;
550 + unsigned long flags;
551 + u32 *data;
552 + size_t data_length;
553 +@@ -947,20 +948,21 @@ void fw_core_handle_response(struct fw_card *card, struct fw_packet *p)
554 + rcode = HEADER_GET_RCODE(p->header[1]);
555 +
556 + spin_lock_irqsave(&card->lock, flags);
557 +- list_for_each_entry(t, &card->transaction_list, link) {
558 +- if (t->node_id == source && t->tlabel == tlabel) {
559 +- if (!try_cancel_split_timeout(t)) {
560 ++ list_for_each_entry(iter, &card->transaction_list, link) {
561 ++ if (iter->node_id == source && iter->tlabel == tlabel) {
562 ++ if (!try_cancel_split_timeout(iter)) {
563 + spin_unlock_irqrestore(&card->lock, flags);
564 + goto timed_out;
565 + }
566 +- list_del_init(&t->link);
567 +- card->tlabel_mask &= ~(1ULL << t->tlabel);
568 ++ list_del_init(&iter->link);
569 ++ card->tlabel_mask &= ~(1ULL << iter->tlabel);
570 ++ t = iter;
571 + break;
572 + }
573 + }
574 + spin_unlock_irqrestore(&card->lock, flags);
575 +
576 +- if (&t->link == &card->transaction_list) {
577 ++ if (!t) {
578 + timed_out:
579 + fw_notice(card, "unsolicited response (source %x, tlabel %x)\n",
580 + source, tlabel);
581 +diff --git a/drivers/firewire/sbp2.c b/drivers/firewire/sbp2.c
582 +index 4d5054211550b..2ceed9287435f 100644
583 +--- a/drivers/firewire/sbp2.c
584 ++++ b/drivers/firewire/sbp2.c
585 +@@ -408,7 +408,7 @@ static void sbp2_status_write(struct fw_card *card, struct fw_request *request,
586 + void *payload, size_t length, void *callback_data)
587 + {
588 + struct sbp2_logical_unit *lu = callback_data;
589 +- struct sbp2_orb *orb;
590 ++ struct sbp2_orb *orb = NULL, *iter;
591 + struct sbp2_status status;
592 + unsigned long flags;
593 +
594 +@@ -433,17 +433,18 @@ static void sbp2_status_write(struct fw_card *card, struct fw_request *request,
595 +
596 + /* Lookup the orb corresponding to this status write. */
597 + spin_lock_irqsave(&lu->tgt->lock, flags);
598 +- list_for_each_entry(orb, &lu->orb_list, link) {
599 ++ list_for_each_entry(iter, &lu->orb_list, link) {
600 + if (STATUS_GET_ORB_HIGH(status) == 0 &&
601 +- STATUS_GET_ORB_LOW(status) == orb->request_bus) {
602 +- orb->rcode = RCODE_COMPLETE;
603 +- list_del(&orb->link);
604 ++ STATUS_GET_ORB_LOW(status) == iter->request_bus) {
605 ++ iter->rcode = RCODE_COMPLETE;
606 ++ list_del(&iter->link);
607 ++ orb = iter;
608 + break;
609 + }
610 + }
611 + spin_unlock_irqrestore(&lu->tgt->lock, flags);
612 +
613 +- if (&orb->link != &lu->orb_list) {
614 ++ if (orb) {
615 + orb->callback(orb, &status);
616 + kref_put(&orb->kref, free_orb); /* orb callback reference */
617 + } else {
618 +diff --git a/drivers/gpio/gpio-mvebu.c b/drivers/gpio/gpio-mvebu.c
619 +index 8f429d9f36616..ad8822da7c27a 100644
620 +--- a/drivers/gpio/gpio-mvebu.c
621 ++++ b/drivers/gpio/gpio-mvebu.c
622 +@@ -871,13 +871,6 @@ static int mvebu_pwm_probe(struct platform_device *pdev,
623 + mvpwm->chip.dev = dev;
624 + mvpwm->chip.ops = &mvebu_pwm_ops;
625 + mvpwm->chip.npwm = mvchip->chip.ngpio;
626 +- /*
627 +- * There may already be some PWM allocated, so we can't force
628 +- * mvpwm->chip.base to a fixed point like mvchip->chip.base.
629 +- * So, we let pwmchip_add() do the numbering and take the next free
630 +- * region.
631 +- */
632 +- mvpwm->chip.base = -1;
633 +
634 + spin_lock_init(&mvpwm->lock);
635 +
636 +diff --git a/drivers/gpio/gpio-pca953x.c b/drivers/gpio/gpio-pca953x.c
637 +index d2fe76f3f34fd..8726921a11294 100644
638 +--- a/drivers/gpio/gpio-pca953x.c
639 ++++ b/drivers/gpio/gpio-pca953x.c
640 +@@ -762,11 +762,11 @@ static bool pca953x_irq_pending(struct pca953x_chip *chip, unsigned long *pendin
641 + bitmap_xor(cur_stat, new_stat, old_stat, gc->ngpio);
642 + bitmap_and(trigger, cur_stat, chip->irq_mask, gc->ngpio);
643 +
644 ++ bitmap_copy(chip->irq_stat, new_stat, gc->ngpio);
645 ++
646 + if (bitmap_empty(trigger, gc->ngpio))
647 + return false;
648 +
649 +- bitmap_copy(chip->irq_stat, new_stat, gc->ngpio);
650 +-
651 + bitmap_and(cur_stat, chip->irq_trig_fall, old_stat, gc->ngpio);
652 + bitmap_and(old_stat, chip->irq_trig_raise, new_stat, gc->ngpio);
653 + bitmap_or(new_stat, old_stat, cur_stat, gc->ngpio);
654 +diff --git a/drivers/gpio/gpio-visconti.c b/drivers/gpio/gpio-visconti.c
655 +index 47455810bdb91..e6534ea1eaa7a 100644
656 +--- a/drivers/gpio/gpio-visconti.c
657 ++++ b/drivers/gpio/gpio-visconti.c
658 +@@ -130,7 +130,6 @@ static int visconti_gpio_probe(struct platform_device *pdev)
659 + struct gpio_irq_chip *girq;
660 + struct irq_domain *parent;
661 + struct device_node *irq_parent;
662 +- struct fwnode_handle *fwnode;
663 + int ret;
664 +
665 + priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
666 +@@ -150,14 +149,12 @@ static int visconti_gpio_probe(struct platform_device *pdev)
667 + }
668 +
669 + parent = irq_find_host(irq_parent);
670 ++ of_node_put(irq_parent);
671 + if (!parent) {
672 + dev_err(dev, "No IRQ parent domain\n");
673 + return -ENODEV;
674 + }
675 +
676 +- fwnode = of_node_to_fwnode(irq_parent);
677 +- of_node_put(irq_parent);
678 +-
679 + ret = bgpio_init(&priv->gpio_chip, dev, 4,
680 + priv->base + GPIO_IDATA,
681 + priv->base + GPIO_OSET,
682 +@@ -180,7 +177,7 @@ static int visconti_gpio_probe(struct platform_device *pdev)
683 +
684 + girq = &priv->gpio_chip.irq;
685 + girq->chip = irq_chip;
686 +- girq->fwnode = fwnode;
687 ++ girq->fwnode = of_node_to_fwnode(dev->of_node);
688 + girq->parent_domain = parent;
689 + girq->child_to_parent_hwirq = visconti_gpio_child_to_parent_hwirq;
690 + girq->populate_parent_alloc_arg = visconti_gpio_populate_parent_fwspec;
691 +diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c
692 +index 0ad288ab6262d..a5b34c2487679 100644
693 +--- a/drivers/gpio/gpiolib-of.c
694 ++++ b/drivers/gpio/gpiolib-of.c
695 +@@ -912,7 +912,7 @@ static void of_gpiochip_init_valid_mask(struct gpio_chip *chip)
696 + i, &start);
697 + of_property_read_u32_index(np, "gpio-reserved-ranges",
698 + i + 1, &count);
699 +- if (start >= chip->ngpio || start + count >= chip->ngpio)
700 ++ if (start >= chip->ngpio || start + count > chip->ngpio)
701 + continue;
702 +
703 + bitmap_clear(chip->valid_mask, start, count);
704 +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
705 +index ec30d81586a79..348629ea0e153 100644
706 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
707 ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
708 +@@ -1176,7 +1176,7 @@ static int amdgpu_debugfs_evict_vram(void *data, u64 *val)
709 + return r;
710 + }
711 +
712 +- *val = amdgpu_bo_evict_vram(adev);
713 ++ *val = amdgpu_ttm_evict_resources(adev, TTM_PL_VRAM);
714 +
715 + pm_runtime_mark_last_busy(dev->dev);
716 + pm_runtime_put_autosuspend(dev->dev);
717 +@@ -1189,17 +1189,15 @@ static int amdgpu_debugfs_evict_gtt(void *data, u64 *val)
718 + {
719 + struct amdgpu_device *adev = (struct amdgpu_device *)data;
720 + struct drm_device *dev = adev_to_drm(adev);
721 +- struct ttm_resource_manager *man;
722 + int r;
723 +
724 + r = pm_runtime_get_sync(dev->dev);
725 + if (r < 0) {
726 +- pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
727 ++ pm_runtime_put_autosuspend(dev->dev);
728 + return r;
729 + }
730 +
731 +- man = ttm_manager_type(&adev->mman.bdev, TTM_PL_TT);
732 +- *val = ttm_resource_manager_evict_all(&adev->mman.bdev, man);
733 ++ *val = amdgpu_ttm_evict_resources(adev, TTM_PL_TT);
734 +
735 + pm_runtime_mark_last_busy(dev->dev);
736 + pm_runtime_put_autosuspend(dev->dev);
737 +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
738 +index 2f2ae26a8068d..a926b5ebbfdf5 100644
739 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
740 ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
741 +@@ -3928,6 +3928,25 @@ void amdgpu_device_fini_sw(struct amdgpu_device *adev)
742 +
743 + }
744 +
745 ++/**
746 ++ * amdgpu_device_evict_resources - evict device resources
747 ++ * @adev: amdgpu device object
748 ++ *
749 ++ * Evicts all ttm device resources(vram BOs, gart table) from the lru list
750 ++ * of the vram memory type. Mainly used for evicting device resources
751 ++ * at suspend time.
752 ++ *
753 ++ */
754 ++static void amdgpu_device_evict_resources(struct amdgpu_device *adev)
755 ++{
756 ++ /* No need to evict vram on APUs for suspend to ram or s2idle */
757 ++ if ((adev->in_s3 || adev->in_s0ix) && (adev->flags & AMD_IS_APU))
758 ++ return;
759 ++
760 ++ if (amdgpu_ttm_evict_resources(adev, TTM_PL_VRAM))
761 ++ DRM_WARN("evicting device resources failed\n");
762 ++
763 ++}
764 +
765 + /*
766 + * Suspend & resume.
767 +@@ -3968,17 +3987,16 @@ int amdgpu_device_suspend(struct drm_device *dev, bool fbcon)
768 + if (!adev->in_s0ix)
769 + amdgpu_amdkfd_suspend(adev, adev->in_runpm);
770 +
771 +- /* evict vram memory */
772 +- amdgpu_bo_evict_vram(adev);
773 ++ /* First evict vram memory */
774 ++ amdgpu_device_evict_resources(adev);
775 +
776 + amdgpu_fence_driver_hw_fini(adev);
777 +
778 + amdgpu_device_ip_suspend_phase2(adev);
779 +- /* evict remaining vram memory
780 +- * This second call to evict vram is to evict the gart page table
781 +- * using the CPU.
782 ++ /* This second call to evict device resources is to evict
783 ++ * the gart page table using the CPU.
784 + */
785 +- amdgpu_bo_evict_vram(adev);
786 ++ amdgpu_device_evict_resources(adev);
787 +
788 + return 0;
789 + }
790 +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
791 +index b517b76e96a1d..2bd7b9fe6005f 100644
792 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
793 ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
794 +@@ -2246,18 +2246,23 @@ static int amdgpu_pmops_suspend(struct device *dev)
795 + {
796 + struct drm_device *drm_dev = dev_get_drvdata(dev);
797 + struct amdgpu_device *adev = drm_to_adev(drm_dev);
798 +- int r;
799 +
800 + if (amdgpu_acpi_is_s0ix_active(adev))
801 + adev->in_s0ix = true;
802 +- adev->in_s3 = true;
803 +- r = amdgpu_device_suspend(drm_dev, true);
804 +- adev->in_s3 = false;
805 +- if (r)
806 +- return r;
807 ++ else
808 ++ adev->in_s3 = true;
809 ++ return amdgpu_device_suspend(drm_dev, true);
810 ++}
811 ++
812 ++static int amdgpu_pmops_suspend_noirq(struct device *dev)
813 ++{
814 ++ struct drm_device *drm_dev = dev_get_drvdata(dev);
815 ++ struct amdgpu_device *adev = drm_to_adev(drm_dev);
816 ++
817 + if (!adev->in_s0ix)
818 +- r = amdgpu_asic_reset(adev);
819 +- return r;
820 ++ return amdgpu_asic_reset(adev);
821 ++
822 ++ return 0;
823 + }
824 +
825 + static int amdgpu_pmops_resume(struct device *dev)
826 +@@ -2269,6 +2274,8 @@ static int amdgpu_pmops_resume(struct device *dev)
827 + r = amdgpu_device_resume(drm_dev, true);
828 + if (amdgpu_acpi_is_s0ix_active(adev))
829 + adev->in_s0ix = false;
830 ++ else
831 ++ adev->in_s3 = false;
832 + return r;
833 + }
834 +
835 +@@ -2492,6 +2499,7 @@ static const struct dev_pm_ops amdgpu_pm_ops = {
836 + .prepare = amdgpu_pmops_prepare,
837 + .complete = amdgpu_pmops_complete,
838 + .suspend = amdgpu_pmops_suspend,
839 ++ .suspend_noirq = amdgpu_pmops_suspend_noirq,
840 + .resume = amdgpu_pmops_resume,
841 + .freeze = amdgpu_pmops_freeze,
842 + .thaw = amdgpu_pmops_thaw,
843 +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
844 +index d62b770cc9dc9..41d0b3a7b65d8 100644
845 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
846 ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
847 +@@ -1038,29 +1038,6 @@ void amdgpu_bo_unpin(struct amdgpu_bo *bo)
848 + }
849 + }
850 +
851 +-/**
852 +- * amdgpu_bo_evict_vram - evict VRAM buffers
853 +- * @adev: amdgpu device object
854 +- *
855 +- * Evicts all VRAM buffers on the lru list of the memory type.
856 +- * Mainly used for evicting vram at suspend time.
857 +- *
858 +- * Returns:
859 +- * 0 for success or a negative error code on failure.
860 +- */
861 +-int amdgpu_bo_evict_vram(struct amdgpu_device *adev)
862 +-{
863 +- struct ttm_resource_manager *man;
864 +-
865 +- if (adev->in_s3 && (adev->flags & AMD_IS_APU)) {
866 +- /* No need to evict vram on APUs for suspend to ram */
867 +- return 0;
868 +- }
869 +-
870 +- man = ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM);
871 +- return ttm_resource_manager_evict_all(&adev->mman.bdev, man);
872 +-}
873 +-
874 + static const char *amdgpu_vram_names[] = {
875 + "UNKNOWN",
876 + "GDDR1",
877 +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
878 +index 9d6c001c15f89..d8ef8a53a562d 100644
879 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
880 ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
881 +@@ -304,7 +304,6 @@ int amdgpu_bo_pin(struct amdgpu_bo *bo, u32 domain);
882 + int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
883 + u64 min_offset, u64 max_offset);
884 + void amdgpu_bo_unpin(struct amdgpu_bo *bo);
885 +-int amdgpu_bo_evict_vram(struct amdgpu_device *adev);
886 + int amdgpu_bo_init(struct amdgpu_device *adev);
887 + void amdgpu_bo_fini(struct amdgpu_device *adev);
888 + int amdgpu_bo_set_tiling_flags(struct amdgpu_bo *bo, u64 tiling_flags);
889 +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
890 +index 8132f66177c27..51c76d6322c94 100644
891 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
892 ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
893 +@@ -2036,6 +2036,36 @@ error_free:
894 + return r;
895 + }
896 +
897 ++/**
898 ++ * amdgpu_ttm_evict_resources - evict memory buffers
899 ++ * @adev: amdgpu device object
900 ++ * @mem_type: evicted BO's memory type
901 ++ *
902 ++ * Evicts all @mem_type buffers on the lru list of the memory type.
903 ++ *
904 ++ * Returns:
905 ++ * 0 for success or a negative error code on failure.
906 ++ */
907 ++int amdgpu_ttm_evict_resources(struct amdgpu_device *adev, int mem_type)
908 ++{
909 ++ struct ttm_resource_manager *man;
910 ++
911 ++ switch (mem_type) {
912 ++ case TTM_PL_VRAM:
913 ++ case TTM_PL_TT:
914 ++ case AMDGPU_PL_GWS:
915 ++ case AMDGPU_PL_GDS:
916 ++ case AMDGPU_PL_OA:
917 ++ man = ttm_manager_type(&adev->mman.bdev, mem_type);
918 ++ break;
919 ++ default:
920 ++ DRM_ERROR("Trying to evict invalid memory type\n");
921 ++ return -EINVAL;
922 ++ }
923 ++
924 ++ return ttm_resource_manager_evict_all(&adev->mman.bdev, man);
925 ++}
926 ++
927 + #if defined(CONFIG_DEBUG_FS)
928 +
929 + static int amdgpu_mm_vram_table_show(struct seq_file *m, void *unused)
930 +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
931 +index 3205fd5200601..639c7b41e30b9 100644
932 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
933 ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
934 +@@ -190,6 +190,7 @@ bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm);
935 + uint64_t amdgpu_ttm_tt_pde_flags(struct ttm_tt *ttm, struct ttm_resource *mem);
936 + uint64_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm,
937 + struct ttm_resource *mem);
938 ++int amdgpu_ttm_evict_resources(struct amdgpu_device *adev, int mem_type);
939 +
940 + void amdgpu_ttm_debugfs_init(struct amdgpu_device *adev);
941 +
942 +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
943 +index ca058fbcccd43..16787c675f35e 100644
944 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
945 ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
946 +@@ -24,6 +24,7 @@
947 + #include <linux/module.h>
948 +
949 + #include <drm/drm_drv.h>
950 ++#include <xen/xen.h>
951 +
952 + #include "amdgpu.h"
953 + #include "amdgpu_ras.h"
954 +@@ -694,7 +695,8 @@ void amdgpu_detect_virtualization(struct amdgpu_device *adev)
955 + adev->virt.caps |= AMDGPU_SRIOV_CAPS_ENABLE_IOV;
956 +
957 + if (!reg) {
958 +- if (is_virtual_machine()) /* passthrough mode exclus sriov mod */
959 ++ /* passthrough mode exclus sriov mod */
960 ++ if (is_virtual_machine() && !xen_initial_domain())
961 + adev->virt.caps |= AMDGPU_PASSTHROUGH_MODE;
962 + }
963 +
964 +diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
965 +index 605b96873d8cd..05f81d44aa6c2 100644
966 +--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
967 ++++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
968 +@@ -3118,7 +3118,7 @@ static void dp_test_get_audio_test_data(struct dc_link *link, bool disable_video
969 + &dpcd_pattern_type.value,
970 + sizeof(dpcd_pattern_type));
971 +
972 +- channel_count = dpcd_test_mode.bits.channel_count + 1;
973 ++ channel_count = min(dpcd_test_mode.bits.channel_count + 1, AUDIO_CHANNELS_COUNT);
974 +
975 + // read pattern periods for requested channels when sawTooth pattern is requested
976 + if (dpcd_pattern_type.value == AUDIO_TEST_PATTERN_SAWTOOTH ||
977 +diff --git a/drivers/gpu/drm/msm/dp/dp_display.c b/drivers/gpu/drm/msm/dp/dp_display.c
978 +index a133f7e154e70..d5198b435638c 100644
979 +--- a/drivers/gpu/drm/msm/dp/dp_display.c
980 ++++ b/drivers/gpu/drm/msm/dp/dp_display.c
981 +@@ -551,12 +551,6 @@ static int dp_hpd_plug_handle(struct dp_display_private *dp, u32 data)
982 +
983 + mutex_unlock(&dp->event_mutex);
984 +
985 +- /*
986 +- * add fail safe mode outside event_mutex scope
987 +- * to avoid potiential circular lock with drm thread
988 +- */
989 +- dp_panel_add_fail_safe_mode(dp->dp_display.connector);
990 +-
991 + /* uevent will complete connection part */
992 + return 0;
993 + };
994 +diff --git a/drivers/gpu/drm/msm/dp/dp_panel.c b/drivers/gpu/drm/msm/dp/dp_panel.c
995 +index 982f5e8c3546f..62b742e701d2c 100644
996 +--- a/drivers/gpu/drm/msm/dp/dp_panel.c
997 ++++ b/drivers/gpu/drm/msm/dp/dp_panel.c
998 +@@ -151,15 +151,6 @@ static int dp_panel_update_modes(struct drm_connector *connector,
999 + return rc;
1000 + }
1001 +
1002 +-void dp_panel_add_fail_safe_mode(struct drm_connector *connector)
1003 +-{
1004 +- /* fail safe edid */
1005 +- mutex_lock(&connector->dev->mode_config.mutex);
1006 +- if (drm_add_modes_noedid(connector, 640, 480))
1007 +- drm_set_preferred_mode(connector, 640, 480);
1008 +- mutex_unlock(&connector->dev->mode_config.mutex);
1009 +-}
1010 +-
1011 + int dp_panel_read_sink_caps(struct dp_panel *dp_panel,
1012 + struct drm_connector *connector)
1013 + {
1014 +@@ -215,8 +206,6 @@ int dp_panel_read_sink_caps(struct dp_panel *dp_panel,
1015 + rc = -ETIMEDOUT;
1016 + goto end;
1017 + }
1018 +-
1019 +- dp_panel_add_fail_safe_mode(connector);
1020 + }
1021 +
1022 + if (panel->aux_cfg_update_done) {
1023 +diff --git a/drivers/gpu/drm/msm/dp/dp_panel.h b/drivers/gpu/drm/msm/dp/dp_panel.h
1024 +index 99739ea679a77..9023e5bb4b8b2 100644
1025 +--- a/drivers/gpu/drm/msm/dp/dp_panel.h
1026 ++++ b/drivers/gpu/drm/msm/dp/dp_panel.h
1027 +@@ -59,7 +59,6 @@ int dp_panel_init_panel_info(struct dp_panel *dp_panel);
1028 + int dp_panel_deinit(struct dp_panel *dp_panel);
1029 + int dp_panel_timing_cfg(struct dp_panel *dp_panel);
1030 + void dp_panel_dump_regs(struct dp_panel *dp_panel);
1031 +-void dp_panel_add_fail_safe_mode(struct drm_connector *connector);
1032 + int dp_panel_read_sink_caps(struct dp_panel *dp_panel,
1033 + struct drm_connector *connector);
1034 + u32 dp_panel_get_mode_bpp(struct dp_panel *dp_panel, u32 mode_max_bpp,
1035 +diff --git a/drivers/hwmon/adt7470.c b/drivers/hwmon/adt7470.c
1036 +index fb6d14d213a18..c67cd037a93fd 100644
1037 +--- a/drivers/hwmon/adt7470.c
1038 ++++ b/drivers/hwmon/adt7470.c
1039 +@@ -19,6 +19,7 @@
1040 + #include <linux/log2.h>
1041 + #include <linux/kthread.h>
1042 + #include <linux/regmap.h>
1043 ++#include <linux/sched.h>
1044 + #include <linux/slab.h>
1045 + #include <linux/util_macros.h>
1046 +
1047 +@@ -294,11 +295,10 @@ static int adt7470_update_thread(void *p)
1048 + adt7470_read_temperatures(data);
1049 + mutex_unlock(&data->lock);
1050 +
1051 +- set_current_state(TASK_INTERRUPTIBLE);
1052 + if (kthread_should_stop())
1053 + break;
1054 +
1055 +- schedule_timeout(msecs_to_jiffies(data->auto_update_interval));
1056 ++ schedule_timeout_interruptible(msecs_to_jiffies(data->auto_update_interval));
1057 + }
1058 +
1059 + return 0;
1060 +diff --git a/drivers/hwmon/pmbus/pmbus_core.c b/drivers/hwmon/pmbus/pmbus_core.c
1061 +index ca0bfaf2f6911..5f8f824d997f8 100644
1062 +--- a/drivers/hwmon/pmbus/pmbus_core.c
1063 ++++ b/drivers/hwmon/pmbus/pmbus_core.c
1064 +@@ -2326,6 +2326,9 @@ static int pmbus_init_common(struct i2c_client *client, struct pmbus_data *data,
1065 + data->has_status_word = true;
1066 + }
1067 +
1068 ++ /* Make sure PEC is disabled, will be enabled later if needed */
1069 ++ client->flags &= ~I2C_CLIENT_PEC;
1070 ++
1071 + /* Enable PEC if the controller and bus supports it */
1072 + if (!(data->flags & PMBUS_NO_CAPABILITY)) {
1073 + ret = i2c_smbus_read_byte_data(client, PMBUS_CAPABILITY);
1074 +diff --git a/drivers/infiniband/hw/irdma/cm.c b/drivers/infiniband/hw/irdma/cm.c
1075 +index 6dea0a49d1718..082a3ddb0fa3b 100644
1076 +--- a/drivers/infiniband/hw/irdma/cm.c
1077 ++++ b/drivers/infiniband/hw/irdma/cm.c
1078 +@@ -2305,10 +2305,8 @@ err:
1079 + return NULL;
1080 + }
1081 +
1082 +-static void irdma_cm_node_free_cb(struct rcu_head *rcu_head)
1083 ++static void irdma_destroy_connection(struct irdma_cm_node *cm_node)
1084 + {
1085 +- struct irdma_cm_node *cm_node =
1086 +- container_of(rcu_head, struct irdma_cm_node, rcu_head);
1087 + struct irdma_cm_core *cm_core = cm_node->cm_core;
1088 + struct irdma_qp *iwqp;
1089 + struct irdma_cm_info nfo;
1090 +@@ -2356,7 +2354,6 @@ static void irdma_cm_node_free_cb(struct rcu_head *rcu_head)
1091 + }
1092 +
1093 + cm_core->cm_free_ah(cm_node);
1094 +- kfree(cm_node);
1095 + }
1096 +
1097 + /**
1098 +@@ -2384,8 +2381,9 @@ void irdma_rem_ref_cm_node(struct irdma_cm_node *cm_node)
1099 +
1100 + spin_unlock_irqrestore(&cm_core->ht_lock, flags);
1101 +
1102 +- /* wait for all list walkers to exit their grace period */
1103 +- call_rcu(&cm_node->rcu_head, irdma_cm_node_free_cb);
1104 ++ irdma_destroy_connection(cm_node);
1105 ++
1106 ++ kfree_rcu(cm_node, rcu_head);
1107 + }
1108 +
1109 + /**
1110 +@@ -3465,12 +3463,6 @@ static void irdma_cm_disconn_true(struct irdma_qp *iwqp)
1111 + }
1112 +
1113 + cm_id = iwqp->cm_id;
1114 +- /* make sure we havent already closed this connection */
1115 +- if (!cm_id) {
1116 +- spin_unlock_irqrestore(&iwqp->lock, flags);
1117 +- return;
1118 +- }
1119 +-
1120 + original_hw_tcp_state = iwqp->hw_tcp_state;
1121 + original_ibqp_state = iwqp->ibqp_state;
1122 + last_ae = iwqp->last_aeq;
1123 +@@ -3492,11 +3484,11 @@ static void irdma_cm_disconn_true(struct irdma_qp *iwqp)
1124 + disconn_status = -ECONNRESET;
1125 + }
1126 +
1127 +- if ((original_hw_tcp_state == IRDMA_TCP_STATE_CLOSED ||
1128 +- original_hw_tcp_state == IRDMA_TCP_STATE_TIME_WAIT ||
1129 +- last_ae == IRDMA_AE_RDMAP_ROE_BAD_LLP_CLOSE ||
1130 +- last_ae == IRDMA_AE_BAD_CLOSE ||
1131 +- last_ae == IRDMA_AE_LLP_CONNECTION_RESET || iwdev->rf->reset)) {
1132 ++ if (original_hw_tcp_state == IRDMA_TCP_STATE_CLOSED ||
1133 ++ original_hw_tcp_state == IRDMA_TCP_STATE_TIME_WAIT ||
1134 ++ last_ae == IRDMA_AE_RDMAP_ROE_BAD_LLP_CLOSE ||
1135 ++ last_ae == IRDMA_AE_BAD_CLOSE ||
1136 ++ last_ae == IRDMA_AE_LLP_CONNECTION_RESET || iwdev->rf->reset || !cm_id) {
1137 + issue_close = 1;
1138 + iwqp->cm_id = NULL;
1139 + qp->term_flags = 0;
1140 +diff --git a/drivers/infiniband/hw/irdma/utils.c b/drivers/infiniband/hw/irdma/utils.c
1141 +index fee179fd8c67b..85d4212f59dbc 100644
1142 +--- a/drivers/infiniband/hw/irdma/utils.c
1143 ++++ b/drivers/infiniband/hw/irdma/utils.c
1144 +@@ -258,18 +258,16 @@ int irdma_net_event(struct notifier_block *notifier, unsigned long event,
1145 + u32 local_ipaddr[4] = {};
1146 + bool ipv4 = true;
1147 +
1148 +- real_dev = rdma_vlan_dev_real_dev(netdev);
1149 +- if (!real_dev)
1150 +- real_dev = netdev;
1151 +-
1152 +- ibdev = ib_device_get_by_netdev(real_dev, RDMA_DRIVER_IRDMA);
1153 +- if (!ibdev)
1154 +- return NOTIFY_DONE;
1155 +-
1156 +- iwdev = to_iwdev(ibdev);
1157 +-
1158 + switch (event) {
1159 + case NETEVENT_NEIGH_UPDATE:
1160 ++ real_dev = rdma_vlan_dev_real_dev(netdev);
1161 ++ if (!real_dev)
1162 ++ real_dev = netdev;
1163 ++ ibdev = ib_device_get_by_netdev(real_dev, RDMA_DRIVER_IRDMA);
1164 ++ if (!ibdev)
1165 ++ return NOTIFY_DONE;
1166 ++
1167 ++ iwdev = to_iwdev(ibdev);
1168 + p = (__be32 *)neigh->primary_key;
1169 + if (neigh->tbl->family == AF_INET6) {
1170 + ipv4 = false;
1171 +@@ -290,13 +288,12 @@ int irdma_net_event(struct notifier_block *notifier, unsigned long event,
1172 + irdma_manage_arp_cache(iwdev->rf, neigh->ha,
1173 + local_ipaddr, ipv4,
1174 + IRDMA_ARP_DELETE);
1175 ++ ib_device_put(ibdev);
1176 + break;
1177 + default:
1178 + break;
1179 + }
1180 +
1181 +- ib_device_put(ibdev);
1182 +-
1183 + return NOTIFY_DONE;
1184 + }
1185 +
1186 +diff --git a/drivers/infiniband/hw/irdma/verbs.c b/drivers/infiniband/hw/irdma/verbs.c
1187 +index 4a6fdd5c09e3e..8a3ac4257e867 100644
1188 +--- a/drivers/infiniband/hw/irdma/verbs.c
1189 ++++ b/drivers/infiniband/hw/irdma/verbs.c
1190 +@@ -1617,13 +1617,13 @@ int irdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask,
1191 +
1192 + if (issue_modify_qp && iwqp->ibqp_state > IB_QPS_RTS) {
1193 + if (dont_wait) {
1194 +- if (iwqp->cm_id && iwqp->hw_tcp_state) {
1195 ++ if (iwqp->hw_tcp_state) {
1196 + spin_lock_irqsave(&iwqp->lock, flags);
1197 + iwqp->hw_tcp_state = IRDMA_TCP_STATE_CLOSED;
1198 + iwqp->last_aeq = IRDMA_AE_RESET_SENT;
1199 + spin_unlock_irqrestore(&iwqp->lock, flags);
1200 +- irdma_cm_disconn(iwqp);
1201 + }
1202 ++ irdma_cm_disconn(iwqp);
1203 + } else {
1204 + int close_timer_started;
1205 +
1206 +diff --git a/drivers/infiniband/sw/siw/siw_cm.c b/drivers/infiniband/sw/siw/siw_cm.c
1207 +index 7a5ed86ffc9f9..18a64ccbb0e58 100644
1208 +--- a/drivers/infiniband/sw/siw/siw_cm.c
1209 ++++ b/drivers/infiniband/sw/siw/siw_cm.c
1210 +@@ -968,14 +968,15 @@ static void siw_accept_newconn(struct siw_cep *cep)
1211 +
1212 + siw_cep_set_inuse(new_cep);
1213 + rv = siw_proc_mpareq(new_cep);
1214 +- siw_cep_set_free(new_cep);
1215 +-
1216 + if (rv != -EAGAIN) {
1217 + siw_cep_put(cep);
1218 + new_cep->listen_cep = NULL;
1219 +- if (rv)
1220 ++ if (rv) {
1221 ++ siw_cep_set_free(new_cep);
1222 + goto error;
1223 ++ }
1224 + }
1225 ++ siw_cep_set_free(new_cep);
1226 + }
1227 + return;
1228 +
1229 +diff --git a/drivers/iommu/apple-dart.c b/drivers/iommu/apple-dart.c
1230 +index ad69eeb5ac5ba..baba4571c8152 100644
1231 +--- a/drivers/iommu/apple-dart.c
1232 ++++ b/drivers/iommu/apple-dart.c
1233 +@@ -757,6 +757,7 @@ static const struct iommu_ops apple_dart_iommu_ops = {
1234 + .of_xlate = apple_dart_of_xlate,
1235 + .def_domain_type = apple_dart_def_domain_type,
1236 + .pgsize_bitmap = -1UL, /* Restricted during dart probe */
1237 ++ .owner = THIS_MODULE,
1238 + };
1239 +
1240 + static irqreturn_t apple_dart_irq(int irq, void *dev)
1241 +@@ -832,16 +833,15 @@ static int apple_dart_probe(struct platform_device *pdev)
1242 + dart->dev = dev;
1243 + spin_lock_init(&dart->lock);
1244 +
1245 +- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1246 ++ dart->regs = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
1247 ++ if (IS_ERR(dart->regs))
1248 ++ return PTR_ERR(dart->regs);
1249 ++
1250 + if (resource_size(res) < 0x4000) {
1251 + dev_err(dev, "MMIO region too small (%pr)\n", res);
1252 + return -EINVAL;
1253 + }
1254 +
1255 +- dart->regs = devm_ioremap_resource(dev, res);
1256 +- if (IS_ERR(dart->regs))
1257 +- return PTR_ERR(dart->regs);
1258 +-
1259 + dart->irq = platform_get_irq(pdev, 0);
1260 + if (dart->irq < 0)
1261 + return -ENODEV;
1262 +diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c
1263 +index ee66d1f4cb81e..f763c1430d152 100644
1264 +--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c
1265 ++++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c
1266 +@@ -183,7 +183,14 @@ static void arm_smmu_mm_invalidate_range(struct mmu_notifier *mn,
1267 + {
1268 + struct arm_smmu_mmu_notifier *smmu_mn = mn_to_smmu(mn);
1269 + struct arm_smmu_domain *smmu_domain = smmu_mn->domain;
1270 +- size_t size = end - start + 1;
1271 ++ size_t size;
1272 ++
1273 ++ /*
1274 ++ * The mm_types defines vm_end as the first byte after the end address,
1275 ++ * different from IOMMU subsystem using the last address of an address
1276 ++ * range. So do a simple translation here by calculating size correctly.
1277 ++ */
1278 ++ size = end - start;
1279 +
1280 + if (!(smmu_domain->smmu->features & ARM_SMMU_FEAT_BTM))
1281 + arm_smmu_tlb_inv_range_asid(start, size, smmu_mn->cd->asid,
1282 +diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c
1283 +index 0b3076144beb7..91a5c75966f32 100644
1284 +--- a/drivers/iommu/intel/iommu.c
1285 ++++ b/drivers/iommu/intel/iommu.c
1286 +@@ -1637,7 +1637,8 @@ static void iommu_flush_iotlb_psi(struct intel_iommu *iommu,
1287 + unsigned long pfn, unsigned int pages,
1288 + int ih, int map)
1289 + {
1290 +- unsigned int mask = ilog2(__roundup_pow_of_two(pages));
1291 ++ unsigned int aligned_pages = __roundup_pow_of_two(pages);
1292 ++ unsigned int mask = ilog2(aligned_pages);
1293 + uint64_t addr = (uint64_t)pfn << VTD_PAGE_SHIFT;
1294 + u16 did = domain->iommu_did[iommu->seq_id];
1295 +
1296 +@@ -1649,10 +1650,30 @@ static void iommu_flush_iotlb_psi(struct intel_iommu *iommu,
1297 + if (domain_use_first_level(domain)) {
1298 + domain_flush_piotlb(iommu, domain, addr, pages, ih);
1299 + } else {
1300 ++ unsigned long bitmask = aligned_pages - 1;
1301 ++
1302 ++ /*
1303 ++ * PSI masks the low order bits of the base address. If the
1304 ++ * address isn't aligned to the mask, then compute a mask value
1305 ++ * needed to ensure the target range is flushed.
1306 ++ */
1307 ++ if (unlikely(bitmask & pfn)) {
1308 ++ unsigned long end_pfn = pfn + pages - 1, shared_bits;
1309 ++
1310 ++ /*
1311 ++ * Since end_pfn <= pfn + bitmask, the only way bits
1312 ++ * higher than bitmask can differ in pfn and end_pfn is
1313 ++ * by carrying. This means after masking out bitmask,
1314 ++ * high bits starting with the first set bit in
1315 ++ * shared_bits are all equal in both pfn and end_pfn.
1316 ++ */
1317 ++ shared_bits = ~(pfn ^ end_pfn) & ~bitmask;
1318 ++ mask = shared_bits ? __ffs(shared_bits) : BITS_PER_LONG;
1319 ++ }
1320 ++
1321 + /*
1322 + * Fallback to domain selective flush if no PSI support or
1323 +- * the size is too big. PSI requires page size to be 2 ^ x,
1324 +- * and the base address is naturally aligned to the size.
1325 ++ * the size is too big.
1326 + */
1327 + if (!cap_pgsel_inv(iommu->cap) ||
1328 + mask > cap_max_amask_val(iommu->cap))
1329 +diff --git a/drivers/iommu/intel/svm.c b/drivers/iommu/intel/svm.c
1330 +index 0c228787704f3..3a9468b1d2c3c 100644
1331 +--- a/drivers/iommu/intel/svm.c
1332 ++++ b/drivers/iommu/intel/svm.c
1333 +@@ -978,6 +978,10 @@ bad_req:
1334 + goto bad_req;
1335 + }
1336 +
1337 ++ /* Drop Stop Marker message. No need for a response. */
1338 ++ if (unlikely(req->lpig && !req->rd_req && !req->wr_req))
1339 ++ goto prq_advance;
1340 ++
1341 + if (!svm || svm->pasid != req->pasid) {
1342 + /*
1343 + * It can't go away, because the driver is not permitted
1344 +diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
1345 +index 29e58ffae3797..d805f84507198 100644
1346 +--- a/drivers/mmc/core/mmc.c
1347 ++++ b/drivers/mmc/core/mmc.c
1348 +@@ -1381,13 +1381,17 @@ static int mmc_select_hs400es(struct mmc_card *card)
1349 + goto out_err;
1350 + }
1351 +
1352 ++ /*
1353 ++ * Bump to HS timing and frequency. Some cards don't handle
1354 ++ * SEND_STATUS reliably at the initial frequency.
1355 ++ */
1356 + mmc_set_timing(host, MMC_TIMING_MMC_HS);
1357 ++ mmc_set_bus_speed(card);
1358 ++
1359 + err = mmc_switch_status(card, true);
1360 + if (err)
1361 + goto out_err;
1362 +
1363 +- mmc_set_clock(host, card->ext_csd.hs_max_dtr);
1364 +-
1365 + /* Switch card to DDR with strobe bit */
1366 + val = EXT_CSD_DDR_BUS_WIDTH_8 | EXT_CSD_BUS_WIDTH_STROBE;
1367 + err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1368 +@@ -1445,7 +1449,7 @@ out_err:
1369 + static int mmc_select_hs200(struct mmc_card *card)
1370 + {
1371 + struct mmc_host *host = card->host;
1372 +- unsigned int old_timing, old_signal_voltage;
1373 ++ unsigned int old_timing, old_signal_voltage, old_clock;
1374 + int err = -EINVAL;
1375 + u8 val;
1376 +
1377 +@@ -1476,8 +1480,17 @@ static int mmc_select_hs200(struct mmc_card *card)
1378 + false, true, MMC_CMD_RETRIES);
1379 + if (err)
1380 + goto err;
1381 ++
1382 ++ /*
1383 ++ * Bump to HS timing and frequency. Some cards don't handle
1384 ++ * SEND_STATUS reliably at the initial frequency.
1385 ++ * NB: We can't move to full (HS200) speeds until after we've
1386 ++ * successfully switched over.
1387 ++ */
1388 + old_timing = host->ios.timing;
1389 ++ old_clock = host->ios.clock;
1390 + mmc_set_timing(host, MMC_TIMING_MMC_HS200);
1391 ++ mmc_set_clock(card->host, card->ext_csd.hs_max_dtr);
1392 +
1393 + /*
1394 + * For HS200, CRC errors are not a reliable way to know the
1395 +@@ -1490,8 +1503,10 @@ static int mmc_select_hs200(struct mmc_card *card)
1396 + * mmc_select_timing() assumes timing has not changed if
1397 + * it is a switch error.
1398 + */
1399 +- if (err == -EBADMSG)
1400 ++ if (err == -EBADMSG) {
1401 ++ mmc_set_clock(host, old_clock);
1402 + mmc_set_timing(host, old_timing);
1403 ++ }
1404 + }
1405 + err:
1406 + if (err) {
1407 +diff --git a/drivers/mmc/host/rtsx_pci_sdmmc.c b/drivers/mmc/host/rtsx_pci_sdmmc.c
1408 +index f7c384db89bf3..e1580f78c6b2d 100644
1409 +--- a/drivers/mmc/host/rtsx_pci_sdmmc.c
1410 ++++ b/drivers/mmc/host/rtsx_pci_sdmmc.c
1411 +@@ -38,10 +38,7 @@ struct realtek_pci_sdmmc {
1412 + bool double_clk;
1413 + bool eject;
1414 + bool initial_mode;
1415 +- int power_state;
1416 +-#define SDMMC_POWER_ON 1
1417 +-#define SDMMC_POWER_OFF 0
1418 +-
1419 ++ int prev_power_state;
1420 + int sg_count;
1421 + s32 cookie;
1422 + int cookie_sg_count;
1423 +@@ -905,7 +902,7 @@ static int sd_set_bus_width(struct realtek_pci_sdmmc *host,
1424 + return err;
1425 + }
1426 +
1427 +-static int sd_power_on(struct realtek_pci_sdmmc *host)
1428 ++static int sd_power_on(struct realtek_pci_sdmmc *host, unsigned char power_mode)
1429 + {
1430 + struct rtsx_pcr *pcr = host->pcr;
1431 + struct mmc_host *mmc = host->mmc;
1432 +@@ -913,9 +910,14 @@ static int sd_power_on(struct realtek_pci_sdmmc *host)
1433 + u32 val;
1434 + u8 test_mode;
1435 +
1436 +- if (host->power_state == SDMMC_POWER_ON)
1437 ++ if (host->prev_power_state == MMC_POWER_ON)
1438 + return 0;
1439 +
1440 ++ if (host->prev_power_state == MMC_POWER_UP) {
1441 ++ rtsx_pci_write_register(pcr, SD_BUS_STAT, SD_CLK_TOGGLE_EN, 0);
1442 ++ goto finish;
1443 ++ }
1444 ++
1445 + msleep(100);
1446 +
1447 + rtsx_pci_init_cmd(pcr);
1448 +@@ -936,10 +938,15 @@ static int sd_power_on(struct realtek_pci_sdmmc *host)
1449 + if (err < 0)
1450 + return err;
1451 +
1452 ++ mdelay(1);
1453 ++
1454 + err = rtsx_pci_write_register(pcr, CARD_OE, SD_OUTPUT_EN, SD_OUTPUT_EN);
1455 + if (err < 0)
1456 + return err;
1457 +
1458 ++ /* send at least 74 clocks */
1459 ++ rtsx_pci_write_register(pcr, SD_BUS_STAT, SD_CLK_TOGGLE_EN, SD_CLK_TOGGLE_EN);
1460 ++
1461 + if (PCI_PID(pcr) == PID_5261) {
1462 + /*
1463 + * If test mode is set switch to SD Express mandatorily,
1464 +@@ -964,7 +971,8 @@ static int sd_power_on(struct realtek_pci_sdmmc *host)
1465 + }
1466 + }
1467 +
1468 +- host->power_state = SDMMC_POWER_ON;
1469 ++finish:
1470 ++ host->prev_power_state = power_mode;
1471 + return 0;
1472 + }
1473 +
1474 +@@ -973,7 +981,7 @@ static int sd_power_off(struct realtek_pci_sdmmc *host)
1475 + struct rtsx_pcr *pcr = host->pcr;
1476 + int err;
1477 +
1478 +- host->power_state = SDMMC_POWER_OFF;
1479 ++ host->prev_power_state = MMC_POWER_OFF;
1480 +
1481 + rtsx_pci_init_cmd(pcr);
1482 +
1483 +@@ -999,7 +1007,7 @@ static int sd_set_power_mode(struct realtek_pci_sdmmc *host,
1484 + if (power_mode == MMC_POWER_OFF)
1485 + err = sd_power_off(host);
1486 + else
1487 +- err = sd_power_on(host);
1488 ++ err = sd_power_on(host, power_mode);
1489 +
1490 + return err;
1491 + }
1492 +@@ -1482,10 +1490,11 @@ static int rtsx_pci_sdmmc_drv_probe(struct platform_device *pdev)
1493 +
1494 + host = mmc_priv(mmc);
1495 + host->pcr = pcr;
1496 ++ mmc->ios.power_delay_ms = 5;
1497 + host->mmc = mmc;
1498 + host->pdev = pdev;
1499 + host->cookie = -1;
1500 +- host->power_state = SDMMC_POWER_OFF;
1501 ++ host->prev_power_state = MMC_POWER_OFF;
1502 + INIT_WORK(&host->work, sd_request);
1503 + platform_set_drvdata(pdev, host);
1504 + pcr->slots[RTSX_SD_CARD].p_dev = pdev;
1505 +diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c
1506 +index 50c71e0ba5e4e..ff9f5b63c337e 100644
1507 +--- a/drivers/mmc/host/sdhci-msm.c
1508 ++++ b/drivers/mmc/host/sdhci-msm.c
1509 +@@ -17,6 +17,7 @@
1510 + #include <linux/regulator/consumer.h>
1511 + #include <linux/interconnect.h>
1512 + #include <linux/pinctrl/consumer.h>
1513 ++#include <linux/reset.h>
1514 +
1515 + #include "sdhci-pltfm.h"
1516 + #include "cqhci.h"
1517 +@@ -2482,6 +2483,43 @@ static inline void sdhci_msm_get_of_property(struct platform_device *pdev,
1518 + of_property_read_u32(node, "qcom,dll-config", &msm_host->dll_config);
1519 + }
1520 +
1521 ++static int sdhci_msm_gcc_reset(struct device *dev, struct sdhci_host *host)
1522 ++{
1523 ++ struct reset_control *reset;
1524 ++ int ret = 0;
1525 ++
1526 ++ reset = reset_control_get_optional_exclusive(dev, NULL);
1527 ++ if (IS_ERR(reset))
1528 ++ return dev_err_probe(dev, PTR_ERR(reset),
1529 ++ "unable to acquire core_reset\n");
1530 ++
1531 ++ if (!reset)
1532 ++ return ret;
1533 ++
1534 ++ ret = reset_control_assert(reset);
1535 ++ if (ret) {
1536 ++ reset_control_put(reset);
1537 ++ return dev_err_probe(dev, ret, "core_reset assert failed\n");
1538 ++ }
1539 ++
1540 ++ /*
1541 ++ * The hardware requirement for delay between assert/deassert
1542 ++ * is at least 3-4 sleep clock (32.7KHz) cycles, which comes to
1543 ++ * ~125us (4/32768). To be on the safe side add 200us delay.
1544 ++ */
1545 ++ usleep_range(200, 210);
1546 ++
1547 ++ ret = reset_control_deassert(reset);
1548 ++ if (ret) {
1549 ++ reset_control_put(reset);
1550 ++ return dev_err_probe(dev, ret, "core_reset deassert failed\n");
1551 ++ }
1552 ++
1553 ++ usleep_range(200, 210);
1554 ++ reset_control_put(reset);
1555 ++
1556 ++ return ret;
1557 ++}
1558 +
1559 + static int sdhci_msm_probe(struct platform_device *pdev)
1560 + {
1561 +@@ -2529,6 +2567,10 @@ static int sdhci_msm_probe(struct platform_device *pdev)
1562 +
1563 + msm_host->saved_tuning_phase = INVALID_TUNING_PHASE;
1564 +
1565 ++ ret = sdhci_msm_gcc_reset(&pdev->dev, host);
1566 ++ if (ret)
1567 ++ goto pltfm_free;
1568 ++
1569 + /* Setup SDCC bus voter clock. */
1570 + msm_host->bus_clk = devm_clk_get(&pdev->dev, "bus");
1571 + if (!IS_ERR(msm_host->bus_clk)) {
1572 +diff --git a/drivers/mmc/host/sunxi-mmc.c b/drivers/mmc/host/sunxi-mmc.c
1573 +index 2702736a1c57d..ce6cb8be654ef 100644
1574 +--- a/drivers/mmc/host/sunxi-mmc.c
1575 ++++ b/drivers/mmc/host/sunxi-mmc.c
1576 +@@ -377,8 +377,9 @@ static void sunxi_mmc_init_idma_des(struct sunxi_mmc_host *host,
1577 + pdes[i].buf_addr_ptr1 =
1578 + cpu_to_le32(sg_dma_address(&data->sg[i]) >>
1579 + host->cfg->idma_des_shift);
1580 +- pdes[i].buf_addr_ptr2 = cpu_to_le32((u32)next_desc >>
1581 +- host->cfg->idma_des_shift);
1582 ++ pdes[i].buf_addr_ptr2 =
1583 ++ cpu_to_le32(next_desc >>
1584 ++ host->cfg->idma_des_shift);
1585 + }
1586 +
1587 + pdes[0].config |= cpu_to_le32(SDXC_IDMAC_DES0_FD);
1588 +diff --git a/drivers/net/can/grcan.c b/drivers/net/can/grcan.c
1589 +index 78e27940b2aff..daee3652ac8b3 100644
1590 +--- a/drivers/net/can/grcan.c
1591 ++++ b/drivers/net/can/grcan.c
1592 +@@ -241,13 +241,14 @@ struct grcan_device_config {
1593 + .rxsize = GRCAN_DEFAULT_BUFFER_SIZE, \
1594 + }
1595 +
1596 +-#define GRCAN_TXBUG_SAFE_GRLIB_VERSION 0x4100
1597 ++#define GRCAN_TXBUG_SAFE_GRLIB_VERSION 4100
1598 + #define GRLIB_VERSION_MASK 0xffff
1599 +
1600 + /* GRCAN private data structure */
1601 + struct grcan_priv {
1602 + struct can_priv can; /* must be the first member */
1603 + struct net_device *dev;
1604 ++ struct device *ofdev_dev;
1605 + struct napi_struct napi;
1606 +
1607 + struct grcan_registers __iomem *regs; /* ioremap'ed registers */
1608 +@@ -924,7 +925,7 @@ static void grcan_free_dma_buffers(struct net_device *dev)
1609 + struct grcan_priv *priv = netdev_priv(dev);
1610 + struct grcan_dma *dma = &priv->dma;
1611 +
1612 +- dma_free_coherent(&dev->dev, dma->base_size, dma->base_buf,
1613 ++ dma_free_coherent(priv->ofdev_dev, dma->base_size, dma->base_buf,
1614 + dma->base_handle);
1615 + memset(dma, 0, sizeof(*dma));
1616 + }
1617 +@@ -949,7 +950,7 @@ static int grcan_allocate_dma_buffers(struct net_device *dev,
1618 +
1619 + /* Extra GRCAN_BUFFER_ALIGNMENT to allow for alignment */
1620 + dma->base_size = lsize + ssize + GRCAN_BUFFER_ALIGNMENT;
1621 +- dma->base_buf = dma_alloc_coherent(&dev->dev,
1622 ++ dma->base_buf = dma_alloc_coherent(priv->ofdev_dev,
1623 + dma->base_size,
1624 + &dma->base_handle,
1625 + GFP_KERNEL);
1626 +@@ -1113,8 +1114,10 @@ static int grcan_close(struct net_device *dev)
1627 +
1628 + priv->closing = true;
1629 + if (priv->need_txbug_workaround) {
1630 ++ spin_unlock_irqrestore(&priv->lock, flags);
1631 + del_timer_sync(&priv->hang_timer);
1632 + del_timer_sync(&priv->rr_timer);
1633 ++ spin_lock_irqsave(&priv->lock, flags);
1634 + }
1635 + netif_stop_queue(dev);
1636 + grcan_stop_hardware(dev);
1637 +@@ -1134,7 +1137,7 @@ static int grcan_close(struct net_device *dev)
1638 + return 0;
1639 + }
1640 +
1641 +-static int grcan_transmit_catch_up(struct net_device *dev, int budget)
1642 ++static void grcan_transmit_catch_up(struct net_device *dev)
1643 + {
1644 + struct grcan_priv *priv = netdev_priv(dev);
1645 + unsigned long flags;
1646 +@@ -1142,7 +1145,7 @@ static int grcan_transmit_catch_up(struct net_device *dev, int budget)
1647 +
1648 + spin_lock_irqsave(&priv->lock, flags);
1649 +
1650 +- work_done = catch_up_echo_skb(dev, budget, true);
1651 ++ work_done = catch_up_echo_skb(dev, -1, true);
1652 + if (work_done) {
1653 + if (!priv->resetting && !priv->closing &&
1654 + !(priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY))
1655 +@@ -1156,8 +1159,6 @@ static int grcan_transmit_catch_up(struct net_device *dev, int budget)
1656 + }
1657 +
1658 + spin_unlock_irqrestore(&priv->lock, flags);
1659 +-
1660 +- return work_done;
1661 + }
1662 +
1663 + static int grcan_receive(struct net_device *dev, int budget)
1664 +@@ -1239,19 +1240,13 @@ static int grcan_poll(struct napi_struct *napi, int budget)
1665 + struct net_device *dev = priv->dev;
1666 + struct grcan_registers __iomem *regs = priv->regs;
1667 + unsigned long flags;
1668 +- int tx_work_done, rx_work_done;
1669 +- int rx_budget = budget / 2;
1670 +- int tx_budget = budget - rx_budget;
1671 ++ int work_done;
1672 +
1673 +- /* Half of the budget for receiving messages */
1674 +- rx_work_done = grcan_receive(dev, rx_budget);
1675 ++ work_done = grcan_receive(dev, budget);
1676 +
1677 +- /* Half of the budget for transmitting messages as that can trigger echo
1678 +- * frames being received
1679 +- */
1680 +- tx_work_done = grcan_transmit_catch_up(dev, tx_budget);
1681 ++ grcan_transmit_catch_up(dev);
1682 +
1683 +- if (rx_work_done < rx_budget && tx_work_done < tx_budget) {
1684 ++ if (work_done < budget) {
1685 + napi_complete(napi);
1686 +
1687 + /* Guarantee no interference with a running reset that otherwise
1688 +@@ -1268,7 +1263,7 @@ static int grcan_poll(struct napi_struct *napi, int budget)
1689 + spin_unlock_irqrestore(&priv->lock, flags);
1690 + }
1691 +
1692 +- return rx_work_done + tx_work_done;
1693 ++ return work_done;
1694 + }
1695 +
1696 + /* Work tx bug by waiting while for the risky situation to clear. If that fails,
1697 +@@ -1600,6 +1595,7 @@ static int grcan_setup_netdev(struct platform_device *ofdev,
1698 + memcpy(&priv->config, &grcan_module_config,
1699 + sizeof(struct grcan_device_config));
1700 + priv->dev = dev;
1701 ++ priv->ofdev_dev = &ofdev->dev;
1702 + priv->regs = base;
1703 + priv->can.bittiming_const = &grcan_bittiming_const;
1704 + priv->can.do_set_bittiming = grcan_set_bittiming;
1705 +@@ -1652,6 +1648,7 @@ exit_free_candev:
1706 + static int grcan_probe(struct platform_device *ofdev)
1707 + {
1708 + struct device_node *np = ofdev->dev.of_node;
1709 ++ struct device_node *sysid_parent;
1710 + u32 sysid, ambafreq;
1711 + int irq, err;
1712 + void __iomem *base;
1713 +@@ -1660,10 +1657,15 @@ static int grcan_probe(struct platform_device *ofdev)
1714 + /* Compare GRLIB version number with the first that does not
1715 + * have the tx bug (see start_xmit)
1716 + */
1717 +- err = of_property_read_u32(np, "systemid", &sysid);
1718 +- if (!err && ((sysid & GRLIB_VERSION_MASK)
1719 +- >= GRCAN_TXBUG_SAFE_GRLIB_VERSION))
1720 +- txbug = false;
1721 ++ sysid_parent = of_find_node_by_path("/ambapp0");
1722 ++ if (sysid_parent) {
1723 ++ of_node_get(sysid_parent);
1724 ++ err = of_property_read_u32(sysid_parent, "systemid", &sysid);
1725 ++ if (!err && ((sysid & GRLIB_VERSION_MASK) >=
1726 ++ GRCAN_TXBUG_SAFE_GRLIB_VERSION))
1727 ++ txbug = false;
1728 ++ of_node_put(sysid_parent);
1729 ++ }
1730 +
1731 + err = of_property_read_u32(np, "freq", &ambafreq);
1732 + if (err) {
1733 +diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c
1734 +index 14bf1828cbba3..ca9e171cb2bb0 100644
1735 +--- a/drivers/net/dsa/mt7530.c
1736 ++++ b/drivers/net/dsa/mt7530.c
1737 +@@ -2216,6 +2216,7 @@ mt7530_setup(struct dsa_switch *ds)
1738 + ret = of_get_phy_mode(mac_np, &interface);
1739 + if (ret && ret != -ENODEV) {
1740 + of_node_put(mac_np);
1741 ++ of_node_put(phy_node);
1742 + return ret;
1743 + }
1744 + id = of_mdio_parse_addr(ds->dev, phy_node);
1745 +diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
1746 +index 8b078c319872f..cb53149455890 100644
1747 +--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
1748 ++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
1749 +@@ -2699,6 +2699,10 @@ static int bnxt_poll_p5(struct napi_struct *napi, int budget)
1750 + u32 idx = le32_to_cpu(nqcmp->cq_handle_low);
1751 + struct bnxt_cp_ring_info *cpr2;
1752 +
1753 ++ /* No more budget for RX work */
1754 ++ if (budget && work_done >= budget && idx == BNXT_RX_HDL)
1755 ++ break;
1756 ++
1757 + cpr2 = cpr->cp_ring_arr[idx];
1758 + work_done += __bnxt_poll_work(bp, cpr2,
1759 + budget - work_done);
1760 +@@ -10881,7 +10885,7 @@ static bool bnxt_rfs_capable(struct bnxt *bp)
1761 +
1762 + if (bp->flags & BNXT_FLAG_CHIP_P5)
1763 + return bnxt_rfs_supported(bp);
1764 +- if (!(bp->flags & BNXT_FLAG_MSIX_CAP) || !bnxt_can_reserve_rings(bp))
1765 ++ if (!(bp->flags & BNXT_FLAG_MSIX_CAP) || !bnxt_can_reserve_rings(bp) || !bp->rx_nr_rings)
1766 + return false;
1767 +
1768 + vnics = 1 + bp->rx_nr_rings;
1769 +@@ -13087,10 +13091,9 @@ static int bnxt_init_dflt_ring_mode(struct bnxt *bp)
1770 + goto init_dflt_ring_err;
1771 +
1772 + bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
1773 +- if (bnxt_rfs_supported(bp) && bnxt_rfs_capable(bp)) {
1774 +- bp->flags |= BNXT_FLAG_RFS;
1775 +- bp->dev->features |= NETIF_F_NTUPLE;
1776 +- }
1777 ++
1778 ++ bnxt_set_dflt_rfs(bp);
1779 ++
1780 + init_dflt_ring_err:
1781 + bnxt_ulp_irq_restart(bp, rc);
1782 + return rc;
1783 +diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c
1784 +index 7f0f1aa3cedd9..0c1b0a91b1aed 100644
1785 +--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c
1786 ++++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c
1787 +@@ -772,7 +772,7 @@ struct hinic_hw_wqe *hinic_get_wqe(struct hinic_wq *wq, unsigned int wqe_size,
1788 + /* If we only have one page, still need to get shadown wqe when
1789 + * wqe rolling-over page
1790 + */
1791 +- if (curr_pg != end_pg || MASKED_WQE_IDX(wq, end_prod_idx) < *prod_idx) {
1792 ++ if (curr_pg != end_pg || end_prod_idx < *prod_idx) {
1793 + void *shadow_addr = &wq->shadow_wqe[curr_pg * wq->max_wqe_size];
1794 +
1795 + copy_wqe_to_shadow(wq, shadow_addr, num_wqebbs, *prod_idx);
1796 +@@ -842,7 +842,10 @@ struct hinic_hw_wqe *hinic_read_wqe(struct hinic_wq *wq, unsigned int wqe_size,
1797 +
1798 + *cons_idx = curr_cons_idx;
1799 +
1800 +- if (curr_pg != end_pg) {
1801 ++ /* If we only have one page, still need to get shadown wqe when
1802 ++ * wqe rolling-over page
1803 ++ */
1804 ++ if (curr_pg != end_pg || end_cons_idx < curr_cons_idx) {
1805 + void *shadow_addr = &wq->shadow_wqe[curr_pg * wq->max_wqe_size];
1806 +
1807 + copy_wqe_to_shadow(wq, shadow_addr, num_wqebbs, *cons_idx);
1808 +diff --git a/drivers/net/ethernet/mediatek/mtk_sgmii.c b/drivers/net/ethernet/mediatek/mtk_sgmii.c
1809 +index 32d83421226a2..5897940a418b6 100644
1810 +--- a/drivers/net/ethernet/mediatek/mtk_sgmii.c
1811 ++++ b/drivers/net/ethernet/mediatek/mtk_sgmii.c
1812 +@@ -26,6 +26,7 @@ int mtk_sgmii_init(struct mtk_sgmii *ss, struct device_node *r, u32 ana_rgc3)
1813 + break;
1814 +
1815 + ss->regmap[i] = syscon_node_to_regmap(np);
1816 ++ of_node_put(np);
1817 + if (IS_ERR(ss->regmap[i]))
1818 + return PTR_ERR(ss->regmap[i]);
1819 + }
1820 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/rsc_dump.c b/drivers/net/ethernet/mellanox/mlx5/core/diag/rsc_dump.c
1821 +index ed4fb79b4db76..75b6060f7a9ae 100644
1822 +--- a/drivers/net/ethernet/mellanox/mlx5/core/diag/rsc_dump.c
1823 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/rsc_dump.c
1824 +@@ -31,6 +31,7 @@ static const char *const mlx5_rsc_sgmt_name[] = {
1825 + struct mlx5_rsc_dump {
1826 + u32 pdn;
1827 + struct mlx5_core_mkey mkey;
1828 ++ u32 number_of_menu_items;
1829 + u16 fw_segment_type[MLX5_SGMT_TYPE_NUM];
1830 + };
1831 +
1832 +@@ -50,21 +51,37 @@ static int mlx5_rsc_dump_sgmt_get_by_name(char *name)
1833 + return -EINVAL;
1834 + }
1835 +
1836 +-static void mlx5_rsc_dump_read_menu_sgmt(struct mlx5_rsc_dump *rsc_dump, struct page *page)
1837 ++#define MLX5_RSC_DUMP_MENU_HEADER_SIZE (MLX5_ST_SZ_BYTES(resource_dump_info_segment) + \
1838 ++ MLX5_ST_SZ_BYTES(resource_dump_command_segment) + \
1839 ++ MLX5_ST_SZ_BYTES(resource_dump_menu_segment))
1840 ++
1841 ++static int mlx5_rsc_dump_read_menu_sgmt(struct mlx5_rsc_dump *rsc_dump, struct page *page,
1842 ++ int read_size, int start_idx)
1843 + {
1844 + void *data = page_address(page);
1845 + enum mlx5_sgmt_type sgmt_idx;
1846 + int num_of_items;
1847 + char *sgmt_name;
1848 + void *member;
1849 ++ int size = 0;
1850 + void *menu;
1851 + int i;
1852 +
1853 +- menu = MLX5_ADDR_OF(menu_resource_dump_response, data, menu);
1854 +- num_of_items = MLX5_GET(resource_dump_menu_segment, menu, num_of_records);
1855 ++ if (!start_idx) {
1856 ++ menu = MLX5_ADDR_OF(menu_resource_dump_response, data, menu);
1857 ++ rsc_dump->number_of_menu_items = MLX5_GET(resource_dump_menu_segment, menu,
1858 ++ num_of_records);
1859 ++ size = MLX5_RSC_DUMP_MENU_HEADER_SIZE;
1860 ++ data += size;
1861 ++ }
1862 ++ num_of_items = rsc_dump->number_of_menu_items;
1863 ++
1864 ++ for (i = 0; start_idx + i < num_of_items; i++) {
1865 ++ size += MLX5_ST_SZ_BYTES(resource_dump_menu_record);
1866 ++ if (size >= read_size)
1867 ++ return start_idx + i;
1868 +
1869 +- for (i = 0; i < num_of_items; i++) {
1870 +- member = MLX5_ADDR_OF(resource_dump_menu_segment, menu, record[i]);
1871 ++ member = data + MLX5_ST_SZ_BYTES(resource_dump_menu_record) * i;
1872 + sgmt_name = MLX5_ADDR_OF(resource_dump_menu_record, member, segment_name);
1873 + sgmt_idx = mlx5_rsc_dump_sgmt_get_by_name(sgmt_name);
1874 + if (sgmt_idx == -EINVAL)
1875 +@@ -72,6 +89,7 @@ static void mlx5_rsc_dump_read_menu_sgmt(struct mlx5_rsc_dump *rsc_dump, struct
1876 + rsc_dump->fw_segment_type[sgmt_idx] = MLX5_GET(resource_dump_menu_record,
1877 + member, segment_type);
1878 + }
1879 ++ return 0;
1880 + }
1881 +
1882 + static int mlx5_rsc_dump_trigger(struct mlx5_core_dev *dev, struct mlx5_rsc_dump_cmd *cmd,
1883 +@@ -168,6 +186,7 @@ static int mlx5_rsc_dump_menu(struct mlx5_core_dev *dev)
1884 + struct mlx5_rsc_dump_cmd *cmd = NULL;
1885 + struct mlx5_rsc_key key = {};
1886 + struct page *page;
1887 ++ int start_idx = 0;
1888 + int size;
1889 + int err;
1890 +
1891 +@@ -189,7 +208,7 @@ static int mlx5_rsc_dump_menu(struct mlx5_core_dev *dev)
1892 + if (err < 0)
1893 + goto destroy_cmd;
1894 +
1895 +- mlx5_rsc_dump_read_menu_sgmt(dev->rsc_dump, page);
1896 ++ start_idx = mlx5_rsc_dump_read_menu_sgmt(dev->rsc_dump, page, size, start_idx);
1897 +
1898 + } while (err > 0);
1899 +
1900 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c b/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c
1901 +index 673f1c82d3815..c9d5d8d93994d 100644
1902 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c
1903 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c
1904 +@@ -309,8 +309,8 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv,
1905 + if (err)
1906 + return err;
1907 +
1908 +- err = update_buffer_lossy(max_mtu, curr_pfc_en, prio2buffer, port_buff_cell_sz,
1909 +- xoff, &port_buffer, &update_buffer);
1910 ++ err = update_buffer_lossy(max_mtu, curr_pfc_en, prio2buffer, xoff,
1911 ++ port_buff_cell_sz, &port_buffer, &update_buffer);
1912 + if (err)
1913 + return err;
1914 + }
1915 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
1916 +index bc65151321ec2..d4b7b4d73b08e 100644
1917 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
1918 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
1919 +@@ -1699,6 +1699,8 @@ mlx5_tc_ct_flush_ft_entry(void *ptr, void *arg)
1920 + static void
1921 + mlx5_tc_ct_del_ft_cb(struct mlx5_tc_ct_priv *ct_priv, struct mlx5_ct_ft *ft)
1922 + {
1923 ++ struct mlx5e_priv *priv;
1924 ++
1925 + if (!refcount_dec_and_test(&ft->refcount))
1926 + return;
1927 +
1928 +@@ -1708,6 +1710,8 @@ mlx5_tc_ct_del_ft_cb(struct mlx5_tc_ct_priv *ct_priv, struct mlx5_ct_ft *ft)
1929 + rhashtable_free_and_destroy(&ft->ct_entries_ht,
1930 + mlx5_tc_ct_flush_ft_entry,
1931 + ct_priv);
1932 ++ priv = netdev_priv(ct_priv->netdev);
1933 ++ flush_workqueue(priv->wq);
1934 + mlx5_tc_ct_free_pre_ct_tables(ft);
1935 + mapping_remove(ct_priv->zone_mapping, ft->zone_restore_id);
1936 + kfree(ft);
1937 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
1938 +index a4c8d8d00d5a4..72e08559e0d05 100644
1939 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
1940 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
1941 +@@ -1198,6 +1198,16 @@ static int mlx5e_trust_initialize(struct mlx5e_priv *priv)
1942 + if (err)
1943 + return err;
1944 +
1945 ++ if (priv->dcbx_dp.trust_state == MLX5_QPTS_TRUST_PCP && priv->dcbx.dscp_app_cnt) {
1946 ++ /*
1947 ++ * Align the driver state with the register state.
1948 ++ * Temporary state change is required to enable the app list reset.
1949 ++ */
1950 ++ priv->dcbx_dp.trust_state = MLX5_QPTS_TRUST_DSCP;
1951 ++ mlx5e_dcbnl_delete_app(priv);
1952 ++ priv->dcbx_dp.trust_state = MLX5_QPTS_TRUST_PCP;
1953 ++ }
1954 ++
1955 + mlx5e_params_calc_trust_tx_min_inline_mode(priv->mdev, &priv->channels.params,
1956 + priv->dcbx_dp.trust_state);
1957 +
1958 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
1959 +index 8b041deb25e5f..60a4ac0ca76da 100644
1960 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
1961 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
1962 +@@ -2291,6 +2291,17 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
1963 + match.key->vlan_priority);
1964 +
1965 + *match_level = MLX5_MATCH_L2;
1966 ++
1967 ++ if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CVLAN) &&
1968 ++ match.mask->vlan_eth_type &&
1969 ++ MLX5_CAP_FLOWTABLE_TYPE(priv->mdev,
1970 ++ ft_field_support.outer_second_vid,
1971 ++ fs_type)) {
1972 ++ MLX5_SET(fte_match_set_misc, misc_c,
1973 ++ outer_second_cvlan_tag, 1);
1974 ++ spec->match_criteria_enable |=
1975 ++ MLX5_MATCH_MISC_PARAMETERS;
1976 ++ }
1977 + }
1978 + } else if (*match_level != MLX5_MATCH_NONE) {
1979 + /* cvlan_tag enabled in match criteria and
1980 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
1981 +index a45c6f25add16..8c2b249949b97 100644
1982 +--- a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
1983 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
1984 +@@ -111,6 +111,28 @@ static void mlx5_fw_reset_complete_reload(struct mlx5_core_dev *dev)
1985 + }
1986 + }
1987 +
1988 ++static void mlx5_stop_sync_reset_poll(struct mlx5_core_dev *dev)
1989 ++{
1990 ++ struct mlx5_fw_reset *fw_reset = dev->priv.fw_reset;
1991 ++
1992 ++ del_timer_sync(&fw_reset->timer);
1993 ++}
1994 ++
1995 ++static int mlx5_sync_reset_clear_reset_requested(struct mlx5_core_dev *dev, bool poll_health)
1996 ++{
1997 ++ struct mlx5_fw_reset *fw_reset = dev->priv.fw_reset;
1998 ++
1999 ++ if (!test_and_clear_bit(MLX5_FW_RESET_FLAGS_RESET_REQUESTED, &fw_reset->reset_flags)) {
2000 ++ mlx5_core_warn(dev, "Reset request was already cleared\n");
2001 ++ return -EALREADY;
2002 ++ }
2003 ++
2004 ++ mlx5_stop_sync_reset_poll(dev);
2005 ++ if (poll_health)
2006 ++ mlx5_start_health_poll(dev);
2007 ++ return 0;
2008 ++}
2009 ++
2010 + static void mlx5_sync_reset_reload_work(struct work_struct *work)
2011 + {
2012 + struct mlx5_fw_reset *fw_reset = container_of(work, struct mlx5_fw_reset,
2013 +@@ -118,6 +140,7 @@ static void mlx5_sync_reset_reload_work(struct work_struct *work)
2014 + struct mlx5_core_dev *dev = fw_reset->dev;
2015 + int err;
2016 +
2017 ++ mlx5_sync_reset_clear_reset_requested(dev, false);
2018 + mlx5_enter_error_state(dev, true);
2019 + mlx5_unload_one(dev);
2020 + err = mlx5_health_wait_pci_up(dev);
2021 +@@ -127,23 +150,6 @@ static void mlx5_sync_reset_reload_work(struct work_struct *work)
2022 + mlx5_fw_reset_complete_reload(dev);
2023 + }
2024 +
2025 +-static void mlx5_stop_sync_reset_poll(struct mlx5_core_dev *dev)
2026 +-{
2027 +- struct mlx5_fw_reset *fw_reset = dev->priv.fw_reset;
2028 +-
2029 +- del_timer_sync(&fw_reset->timer);
2030 +-}
2031 +-
2032 +-static void mlx5_sync_reset_clear_reset_requested(struct mlx5_core_dev *dev, bool poll_health)
2033 +-{
2034 +- struct mlx5_fw_reset *fw_reset = dev->priv.fw_reset;
2035 +-
2036 +- mlx5_stop_sync_reset_poll(dev);
2037 +- clear_bit(MLX5_FW_RESET_FLAGS_RESET_REQUESTED, &fw_reset->reset_flags);
2038 +- if (poll_health)
2039 +- mlx5_start_health_poll(dev);
2040 +-}
2041 +-
2042 + #define MLX5_RESET_POLL_INTERVAL (HZ / 10)
2043 + static void poll_sync_reset(struct timer_list *t)
2044 + {
2045 +@@ -158,7 +164,6 @@ static void poll_sync_reset(struct timer_list *t)
2046 +
2047 + if (fatal_error) {
2048 + mlx5_core_warn(dev, "Got Device Reset\n");
2049 +- mlx5_sync_reset_clear_reset_requested(dev, false);
2050 + queue_work(fw_reset->wq, &fw_reset->reset_reload_work);
2051 + return;
2052 + }
2053 +@@ -185,13 +190,17 @@ static int mlx5_fw_reset_set_reset_sync_nack(struct mlx5_core_dev *dev)
2054 + return mlx5_reg_mfrl_set(dev, MLX5_MFRL_REG_RESET_LEVEL3, 0, 2, false);
2055 + }
2056 +
2057 +-static void mlx5_sync_reset_set_reset_requested(struct mlx5_core_dev *dev)
2058 ++static int mlx5_sync_reset_set_reset_requested(struct mlx5_core_dev *dev)
2059 + {
2060 + struct mlx5_fw_reset *fw_reset = dev->priv.fw_reset;
2061 +
2062 ++ if (test_and_set_bit(MLX5_FW_RESET_FLAGS_RESET_REQUESTED, &fw_reset->reset_flags)) {
2063 ++ mlx5_core_warn(dev, "Reset request was already set\n");
2064 ++ return -EALREADY;
2065 ++ }
2066 + mlx5_stop_health_poll(dev, true);
2067 +- set_bit(MLX5_FW_RESET_FLAGS_RESET_REQUESTED, &fw_reset->reset_flags);
2068 + mlx5_start_sync_reset_poll(dev);
2069 ++ return 0;
2070 + }
2071 +
2072 + static void mlx5_fw_live_patch_event(struct work_struct *work)
2073 +@@ -220,7 +229,9 @@ static void mlx5_sync_reset_request_event(struct work_struct *work)
2074 + err ? "Failed" : "Sent");
2075 + return;
2076 + }
2077 +- mlx5_sync_reset_set_reset_requested(dev);
2078 ++ if (mlx5_sync_reset_set_reset_requested(dev))
2079 ++ return;
2080 ++
2081 + err = mlx5_fw_reset_set_reset_sync_ack(dev);
2082 + if (err)
2083 + mlx5_core_warn(dev, "PCI Sync FW Update Reset Ack Failed. Error code: %d\n", err);
2084 +@@ -320,7 +331,8 @@ static void mlx5_sync_reset_now_event(struct work_struct *work)
2085 + struct mlx5_core_dev *dev = fw_reset->dev;
2086 + int err;
2087 +
2088 +- mlx5_sync_reset_clear_reset_requested(dev, false);
2089 ++ if (mlx5_sync_reset_clear_reset_requested(dev, false))
2090 ++ return;
2091 +
2092 + mlx5_core_warn(dev, "Sync Reset now. Device is going to reset.\n");
2093 +
2094 +@@ -349,10 +361,8 @@ static void mlx5_sync_reset_abort_event(struct work_struct *work)
2095 + reset_abort_work);
2096 + struct mlx5_core_dev *dev = fw_reset->dev;
2097 +
2098 +- if (!test_bit(MLX5_FW_RESET_FLAGS_RESET_REQUESTED, &fw_reset->reset_flags))
2099 ++ if (mlx5_sync_reset_clear_reset_requested(dev, true))
2100 + return;
2101 +-
2102 +- mlx5_sync_reset_clear_reset_requested(dev, true);
2103 + mlx5_core_warn(dev, "PCI Sync FW Update Reset Aborted.\n");
2104 + }
2105 +
2106 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c b/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c
2107 +index cb0a48d374a3c..81786a9a424c8 100644
2108 +--- a/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c
2109 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c
2110 +@@ -100,6 +100,14 @@ static void mlx5_lag_fib_event_flush(struct notifier_block *nb)
2111 + flush_workqueue(mp->wq);
2112 + }
2113 +
2114 ++static void mlx5_lag_fib_set(struct lag_mp *mp, struct fib_info *fi, u32 dst, int dst_len)
2115 ++{
2116 ++ mp->fib.mfi = fi;
2117 ++ mp->fib.priority = fi->fib_priority;
2118 ++ mp->fib.dst = dst;
2119 ++ mp->fib.dst_len = dst_len;
2120 ++}
2121 ++
2122 + struct mlx5_fib_event_work {
2123 + struct work_struct work;
2124 + struct mlx5_lag *ldev;
2125 +@@ -110,10 +118,10 @@ struct mlx5_fib_event_work {
2126 + };
2127 + };
2128 +
2129 +-static void mlx5_lag_fib_route_event(struct mlx5_lag *ldev,
2130 +- unsigned long event,
2131 +- struct fib_info *fi)
2132 ++static void mlx5_lag_fib_route_event(struct mlx5_lag *ldev, unsigned long event,
2133 ++ struct fib_entry_notifier_info *fen_info)
2134 + {
2135 ++ struct fib_info *fi = fen_info->fi;
2136 + struct lag_mp *mp = &ldev->lag_mp;
2137 + struct fib_nh *fib_nh0, *fib_nh1;
2138 + unsigned int nhs;
2139 +@@ -121,13 +129,15 @@ static void mlx5_lag_fib_route_event(struct mlx5_lag *ldev,
2140 + /* Handle delete event */
2141 + if (event == FIB_EVENT_ENTRY_DEL) {
2142 + /* stop track */
2143 +- if (mp->mfi == fi)
2144 +- mp->mfi = NULL;
2145 ++ if (mp->fib.mfi == fi)
2146 ++ mp->fib.mfi = NULL;
2147 + return;
2148 + }
2149 +
2150 + /* Handle multipath entry with lower priority value */
2151 +- if (mp->mfi && mp->mfi != fi && fi->fib_priority >= mp->mfi->fib_priority)
2152 ++ if (mp->fib.mfi && mp->fib.mfi != fi &&
2153 ++ (mp->fib.dst != fen_info->dst || mp->fib.dst_len != fen_info->dst_len) &&
2154 ++ fi->fib_priority >= mp->fib.priority)
2155 + return;
2156 +
2157 + /* Handle add/replace event */
2158 +@@ -143,9 +153,9 @@ static void mlx5_lag_fib_route_event(struct mlx5_lag *ldev,
2159 +
2160 + i++;
2161 + mlx5_lag_set_port_affinity(ldev, i);
2162 ++ mlx5_lag_fib_set(mp, fi, fen_info->dst, fen_info->dst_len);
2163 + }
2164 +
2165 +- mp->mfi = fi;
2166 + return;
2167 + }
2168 +
2169 +@@ -165,7 +175,7 @@ static void mlx5_lag_fib_route_event(struct mlx5_lag *ldev,
2170 + }
2171 +
2172 + /* First time we see multipath route */
2173 +- if (!mp->mfi && !__mlx5_lag_is_active(ldev)) {
2174 ++ if (!mp->fib.mfi && !__mlx5_lag_is_active(ldev)) {
2175 + struct lag_tracker tracker;
2176 +
2177 + tracker = ldev->tracker;
2178 +@@ -173,7 +183,7 @@ static void mlx5_lag_fib_route_event(struct mlx5_lag *ldev,
2179 + }
2180 +
2181 + mlx5_lag_set_port_affinity(ldev, MLX5_LAG_NORMAL_AFFINITY);
2182 +- mp->mfi = fi;
2183 ++ mlx5_lag_fib_set(mp, fi, fen_info->dst, fen_info->dst_len);
2184 + }
2185 +
2186 + static void mlx5_lag_fib_nexthop_event(struct mlx5_lag *ldev,
2187 +@@ -184,7 +194,7 @@ static void mlx5_lag_fib_nexthop_event(struct mlx5_lag *ldev,
2188 + struct lag_mp *mp = &ldev->lag_mp;
2189 +
2190 + /* Check the nh event is related to the route */
2191 +- if (!mp->mfi || mp->mfi != fi)
2192 ++ if (!mp->fib.mfi || mp->fib.mfi != fi)
2193 + return;
2194 +
2195 + /* nh added/removed */
2196 +@@ -214,7 +224,7 @@ static void mlx5_lag_fib_update(struct work_struct *work)
2197 + case FIB_EVENT_ENTRY_REPLACE:
2198 + case FIB_EVENT_ENTRY_DEL:
2199 + mlx5_lag_fib_route_event(ldev, fib_work->event,
2200 +- fib_work->fen_info.fi);
2201 ++ &fib_work->fen_info);
2202 + fib_info_put(fib_work->fen_info.fi);
2203 + break;
2204 + case FIB_EVENT_NH_ADD:
2205 +@@ -313,7 +323,7 @@ void mlx5_lag_mp_reset(struct mlx5_lag *ldev)
2206 + /* Clear mfi, as it might become stale when a route delete event
2207 + * has been missed, see mlx5_lag_fib_route_event().
2208 + */
2209 +- ldev->lag_mp.mfi = NULL;
2210 ++ ldev->lag_mp.fib.mfi = NULL;
2211 + }
2212 +
2213 + int mlx5_lag_mp_init(struct mlx5_lag *ldev)
2214 +@@ -324,7 +334,7 @@ int mlx5_lag_mp_init(struct mlx5_lag *ldev)
2215 + /* always clear mfi, as it might become stale when a route delete event
2216 + * has been missed
2217 + */
2218 +- mp->mfi = NULL;
2219 ++ mp->fib.mfi = NULL;
2220 +
2221 + if (mp->fib_nb.notifier_call)
2222 + return 0;
2223 +@@ -354,5 +364,5 @@ void mlx5_lag_mp_cleanup(struct mlx5_lag *ldev)
2224 + unregister_fib_notifier(&init_net, &mp->fib_nb);
2225 + destroy_workqueue(mp->wq);
2226 + mp->fib_nb.notifier_call = NULL;
2227 +- mp->mfi = NULL;
2228 ++ mp->fib.mfi = NULL;
2229 + }
2230 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.h b/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.h
2231 +index dea199e79beda..b3a7f18b9e303 100644
2232 +--- a/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.h
2233 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.h
2234 +@@ -15,7 +15,12 @@ enum mlx5_lag_port_affinity {
2235 +
2236 + struct lag_mp {
2237 + struct notifier_block fib_nb;
2238 +- struct fib_info *mfi; /* used in tracking fib events */
2239 ++ struct {
2240 ++ const void *mfi; /* used in tracking fib events */
2241 ++ u32 priority;
2242 ++ u32 dst;
2243 ++ int dst_len;
2244 ++ } fib;
2245 + struct workqueue_struct *wq;
2246 + };
2247 +
2248 +diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c
2249 +index 199a973392806..592e191adbf7d 100644
2250 +--- a/drivers/net/ethernet/smsc/smsc911x.c
2251 ++++ b/drivers/net/ethernet/smsc/smsc911x.c
2252 +@@ -2429,7 +2429,7 @@ static int smsc911x_drv_probe(struct platform_device *pdev)
2253 + if (irq == -EPROBE_DEFER) {
2254 + retval = -EPROBE_DEFER;
2255 + goto out_0;
2256 +- } else if (irq <= 0) {
2257 ++ } else if (irq < 0) {
2258 + pr_warn("Could not allocate irq resource\n");
2259 + retval = -ENODEV;
2260 + goto out_0;
2261 +diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c
2262 +index 8e8778cfbbadd..6f87e296a410f 100644
2263 +--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c
2264 ++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c
2265 +@@ -454,6 +454,7 @@ static int intel_mgbe_common_data(struct pci_dev *pdev,
2266 + plat->has_gmac4 = 1;
2267 + plat->force_sf_dma_mode = 0;
2268 + plat->tso_en = 1;
2269 ++ plat->sph_disable = 1;
2270 +
2271 + /* Multiplying factor to the clk_eee_i clock time
2272 + * period to make it closer to 100 ns. This value
2273 +diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
2274 +index 13fbb68158c66..06e2af9387d7c 100644
2275 +--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
2276 ++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
2277 +@@ -916,6 +916,7 @@ static int sun8i_dwmac_register_mdio_mux(struct stmmac_priv *priv)
2278 +
2279 + ret = mdio_mux_init(priv->device, mdio_mux, mdio_mux_syscon_switch_fn,
2280 + &gmac->mux_handle, priv, priv->mii);
2281 ++ of_node_put(mdio_mux);
2282 + return ret;
2283 + }
2284 +
2285 +diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
2286 +index 9376c4e286268..9c1e19ea6fcd0 100644
2287 +--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
2288 ++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
2289 +@@ -7081,7 +7081,7 @@ int stmmac_dvr_probe(struct device *device,
2290 + dev_info(priv->device, "TSO feature enabled\n");
2291 + }
2292 +
2293 +- if (priv->dma_cap.sphen) {
2294 ++ if (priv->dma_cap.sphen && !priv->plat->sph_disable) {
2295 + ndev->hw_features |= NETIF_F_GRO;
2296 + priv->sph_cap = true;
2297 + priv->sph = priv->sph_cap;
2298 +diff --git a/drivers/net/ethernet/ti/cpsw_new.c b/drivers/net/ethernet/ti/cpsw_new.c
2299 +index 9e16afbdbdc1d..277c91d135708 100644
2300 +--- a/drivers/net/ethernet/ti/cpsw_new.c
2301 ++++ b/drivers/net/ethernet/ti/cpsw_new.c
2302 +@@ -1246,8 +1246,10 @@ static int cpsw_probe_dt(struct cpsw_common *cpsw)
2303 + data->slave_data = devm_kcalloc(dev, CPSW_SLAVE_PORTS_NUM,
2304 + sizeof(struct cpsw_slave_data),
2305 + GFP_KERNEL);
2306 +- if (!data->slave_data)
2307 ++ if (!data->slave_data) {
2308 ++ of_node_put(tmp_node);
2309 + return -ENOMEM;
2310 ++ }
2311 +
2312 + /* Populate all the child nodes here...
2313 + */
2314 +@@ -1341,6 +1343,7 @@ static int cpsw_probe_dt(struct cpsw_common *cpsw)
2315 +
2316 + err_node_put:
2317 + of_node_put(port_np);
2318 ++ of_node_put(tmp_node);
2319 + return ret;
2320 + }
2321 +
2322 +diff --git a/drivers/net/ethernet/xilinx/xilinx_emaclite.c b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
2323 +index 5524ac4fae80a..642472de5a08b 100644
2324 +--- a/drivers/net/ethernet/xilinx/xilinx_emaclite.c
2325 ++++ b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
2326 +@@ -822,10 +822,10 @@ static int xemaclite_mdio_write(struct mii_bus *bus, int phy_id, int reg,
2327 + static int xemaclite_mdio_setup(struct net_local *lp, struct device *dev)
2328 + {
2329 + struct mii_bus *bus;
2330 +- int rc;
2331 + struct resource res;
2332 + struct device_node *np = of_get_parent(lp->phy_node);
2333 + struct device_node *npp;
2334 ++ int rc, ret;
2335 +
2336 + /* Don't register the MDIO bus if the phy_node or its parent node
2337 + * can't be found.
2338 +@@ -835,8 +835,14 @@ static int xemaclite_mdio_setup(struct net_local *lp, struct device *dev)
2339 + return -ENODEV;
2340 + }
2341 + npp = of_get_parent(np);
2342 +-
2343 +- of_address_to_resource(npp, 0, &res);
2344 ++ ret = of_address_to_resource(npp, 0, &res);
2345 ++ of_node_put(npp);
2346 ++ if (ret) {
2347 ++ dev_err(dev, "%s resource error!\n",
2348 ++ dev->of_node->full_name);
2349 ++ of_node_put(np);
2350 ++ return ret;
2351 ++ }
2352 + if (lp->ndev->mem_start != res.start) {
2353 + struct phy_device *phydev;
2354 + phydev = of_phy_find_device(lp->phy_node);
2355 +@@ -845,6 +851,7 @@ static int xemaclite_mdio_setup(struct net_local *lp, struct device *dev)
2356 + "MDIO of the phy is not registered yet\n");
2357 + else
2358 + put_device(&phydev->mdio.dev);
2359 ++ of_node_put(np);
2360 + return 0;
2361 + }
2362 +
2363 +@@ -857,6 +864,7 @@ static int xemaclite_mdio_setup(struct net_local *lp, struct device *dev)
2364 + bus = mdiobus_alloc();
2365 + if (!bus) {
2366 + dev_err(dev, "Failed to allocate mdiobus\n");
2367 ++ of_node_put(np);
2368 + return -ENOMEM;
2369 + }
2370 +
2371 +@@ -869,6 +877,7 @@ static int xemaclite_mdio_setup(struct net_local *lp, struct device *dev)
2372 + bus->parent = dev;
2373 +
2374 + rc = of_mdiobus_register(bus, np);
2375 ++ of_node_put(np);
2376 + if (rc) {
2377 + dev_err(dev, "Failed to register mdio bus.\n");
2378 + goto err_register;
2379 +diff --git a/drivers/net/mdio/mdio-mux-bcm6368.c b/drivers/net/mdio/mdio-mux-bcm6368.c
2380 +index 6dcbf987d61b5..8b444a8eb6b55 100644
2381 +--- a/drivers/net/mdio/mdio-mux-bcm6368.c
2382 ++++ b/drivers/net/mdio/mdio-mux-bcm6368.c
2383 +@@ -115,7 +115,7 @@ static int bcm6368_mdiomux_probe(struct platform_device *pdev)
2384 + md->mii_bus = devm_mdiobus_alloc(&pdev->dev);
2385 + if (!md->mii_bus) {
2386 + dev_err(&pdev->dev, "mdiomux bus alloc failed\n");
2387 +- return ENOMEM;
2388 ++ return -ENOMEM;
2389 + }
2390 +
2391 + bus = md->mii_bus;
2392 +diff --git a/drivers/nfc/nfcmrvl/main.c b/drivers/nfc/nfcmrvl/main.c
2393 +index 2fcf545012b16..1a5284de4341b 100644
2394 +--- a/drivers/nfc/nfcmrvl/main.c
2395 ++++ b/drivers/nfc/nfcmrvl/main.c
2396 +@@ -183,6 +183,7 @@ void nfcmrvl_nci_unregister_dev(struct nfcmrvl_private *priv)
2397 + {
2398 + struct nci_dev *ndev = priv->ndev;
2399 +
2400 ++ nci_unregister_device(ndev);
2401 + if (priv->ndev->nfc_dev->fw_download_in_progress)
2402 + nfcmrvl_fw_dnld_abort(priv);
2403 +
2404 +@@ -191,7 +192,6 @@ void nfcmrvl_nci_unregister_dev(struct nfcmrvl_private *priv)
2405 + if (gpio_is_valid(priv->config.reset_n_io))
2406 + gpio_free(priv->config.reset_n_io);
2407 +
2408 +- nci_unregister_device(ndev);
2409 + nci_free_device(ndev);
2410 + kfree(priv);
2411 + }
2412 +diff --git a/drivers/pci/controller/pci-aardvark.c b/drivers/pci/controller/pci-aardvark.c
2413 +index 6277b3f3031a0..ff45052cf48de 100644
2414 +--- a/drivers/pci/controller/pci-aardvark.c
2415 ++++ b/drivers/pci/controller/pci-aardvark.c
2416 +@@ -38,10 +38,6 @@
2417 + #define PCIE_CORE_ERR_CAPCTL_ECRC_CHK_TX_EN BIT(6)
2418 + #define PCIE_CORE_ERR_CAPCTL_ECRC_CHCK BIT(7)
2419 + #define PCIE_CORE_ERR_CAPCTL_ECRC_CHCK_RCV BIT(8)
2420 +-#define PCIE_CORE_INT_A_ASSERT_ENABLE 1
2421 +-#define PCIE_CORE_INT_B_ASSERT_ENABLE 2
2422 +-#define PCIE_CORE_INT_C_ASSERT_ENABLE 3
2423 +-#define PCIE_CORE_INT_D_ASSERT_ENABLE 4
2424 + /* PIO registers base address and register offsets */
2425 + #define PIO_BASE_ADDR 0x4000
2426 + #define PIO_CTRL (PIO_BASE_ADDR + 0x0)
2427 +@@ -102,6 +98,10 @@
2428 + #define PCIE_MSG_PM_PME_MASK BIT(7)
2429 + #define PCIE_ISR0_MASK_REG (CONTROL_BASE_ADDR + 0x44)
2430 + #define PCIE_ISR0_MSI_INT_PENDING BIT(24)
2431 ++#define PCIE_ISR0_CORR_ERR BIT(11)
2432 ++#define PCIE_ISR0_NFAT_ERR BIT(12)
2433 ++#define PCIE_ISR0_FAT_ERR BIT(13)
2434 ++#define PCIE_ISR0_ERR_MASK GENMASK(13, 11)
2435 + #define PCIE_ISR0_INTX_ASSERT(val) BIT(16 + (val))
2436 + #define PCIE_ISR0_INTX_DEASSERT(val) BIT(20 + (val))
2437 + #define PCIE_ISR0_ALL_MASK GENMASK(31, 0)
2438 +@@ -115,6 +115,7 @@
2439 + #define PCIE_MSI_ADDR_HIGH_REG (CONTROL_BASE_ADDR + 0x54)
2440 + #define PCIE_MSI_STATUS_REG (CONTROL_BASE_ADDR + 0x58)
2441 + #define PCIE_MSI_MASK_REG (CONTROL_BASE_ADDR + 0x5C)
2442 ++#define PCIE_MSI_ALL_MASK GENMASK(31, 0)
2443 + #define PCIE_MSI_PAYLOAD_REG (CONTROL_BASE_ADDR + 0x9C)
2444 + #define PCIE_MSI_DATA_MASK GENMASK(15, 0)
2445 +
2446 +@@ -271,17 +272,16 @@ struct advk_pcie {
2447 + u32 actions;
2448 + } wins[OB_WIN_COUNT];
2449 + u8 wins_count;
2450 ++ int irq;
2451 ++ struct irq_domain *rp_irq_domain;
2452 + struct irq_domain *irq_domain;
2453 + struct irq_chip irq_chip;
2454 + raw_spinlock_t irq_lock;
2455 + struct irq_domain *msi_domain;
2456 + struct irq_domain *msi_inner_domain;
2457 +- struct irq_chip msi_bottom_irq_chip;
2458 +- struct irq_chip msi_irq_chip;
2459 +- struct msi_domain_info msi_domain_info;
2460 ++ raw_spinlock_t msi_irq_lock;
2461 + DECLARE_BITMAP(msi_used, MSI_IRQ_NUM);
2462 + struct mutex msi_used_lock;
2463 +- u16 msi_msg;
2464 + int link_gen;
2465 + struct pci_bridge_emul bridge;
2466 + struct gpio_desc *reset_gpio;
2467 +@@ -476,6 +476,7 @@ static void advk_pcie_disable_ob_win(struct advk_pcie *pcie, u8 win_num)
2468 +
2469 + static void advk_pcie_setup_hw(struct advk_pcie *pcie)
2470 + {
2471 ++ phys_addr_t msi_addr;
2472 + u32 reg;
2473 + int i;
2474 +
2475 +@@ -564,25 +565,36 @@ static void advk_pcie_setup_hw(struct advk_pcie *pcie)
2476 + reg |= LANE_COUNT_1;
2477 + advk_writel(pcie, reg, PCIE_CORE_CTRL0_REG);
2478 +
2479 ++ /* Set MSI address */
2480 ++ msi_addr = virt_to_phys(pcie);
2481 ++ advk_writel(pcie, lower_32_bits(msi_addr), PCIE_MSI_ADDR_LOW_REG);
2482 ++ advk_writel(pcie, upper_32_bits(msi_addr), PCIE_MSI_ADDR_HIGH_REG);
2483 ++
2484 + /* Enable MSI */
2485 + reg = advk_readl(pcie, PCIE_CORE_CTRL2_REG);
2486 + reg |= PCIE_CORE_CTRL2_MSI_ENABLE;
2487 + advk_writel(pcie, reg, PCIE_CORE_CTRL2_REG);
2488 +
2489 + /* Clear all interrupts */
2490 ++ advk_writel(pcie, PCIE_MSI_ALL_MASK, PCIE_MSI_STATUS_REG);
2491 + advk_writel(pcie, PCIE_ISR0_ALL_MASK, PCIE_ISR0_REG);
2492 + advk_writel(pcie, PCIE_ISR1_ALL_MASK, PCIE_ISR1_REG);
2493 + advk_writel(pcie, PCIE_IRQ_ALL_MASK, HOST_CTRL_INT_STATUS_REG);
2494 +
2495 +- /* Disable All ISR0/1 Sources */
2496 +- reg = PCIE_ISR0_ALL_MASK;
2497 ++ /* Disable All ISR0/1 and MSI Sources */
2498 ++ advk_writel(pcie, PCIE_ISR0_ALL_MASK, PCIE_ISR0_MASK_REG);
2499 ++ advk_writel(pcie, PCIE_ISR1_ALL_MASK, PCIE_ISR1_MASK_REG);
2500 ++ advk_writel(pcie, PCIE_MSI_ALL_MASK, PCIE_MSI_MASK_REG);
2501 ++
2502 ++ /* Unmask summary MSI interrupt */
2503 ++ reg = advk_readl(pcie, PCIE_ISR0_MASK_REG);
2504 + reg &= ~PCIE_ISR0_MSI_INT_PENDING;
2505 + advk_writel(pcie, reg, PCIE_ISR0_MASK_REG);
2506 +
2507 +- advk_writel(pcie, PCIE_ISR1_ALL_MASK, PCIE_ISR1_MASK_REG);
2508 +-
2509 +- /* Unmask all MSIs */
2510 +- advk_writel(pcie, 0, PCIE_MSI_MASK_REG);
2511 ++ /* Unmask PME interrupt for processing of PME requester */
2512 ++ reg = advk_readl(pcie, PCIE_ISR0_MASK_REG);
2513 ++ reg &= ~PCIE_MSG_PM_PME_MASK;
2514 ++ advk_writel(pcie, reg, PCIE_ISR0_MASK_REG);
2515 +
2516 + /* Enable summary interrupt for GIC SPI source */
2517 + reg = PCIE_IRQ_ALL_MASK & (~PCIE_IRQ_ENABLE_INTS_MASK);
2518 +@@ -776,11 +788,15 @@ advk_pci_bridge_emul_base_conf_read(struct pci_bridge_emul *bridge,
2519 + case PCI_INTERRUPT_LINE: {
2520 + /*
2521 + * From the whole 32bit register we support reading from HW only
2522 +- * one bit: PCI_BRIDGE_CTL_BUS_RESET.
2523 ++ * two bits: PCI_BRIDGE_CTL_BUS_RESET and PCI_BRIDGE_CTL_SERR.
2524 + * Other bits are retrieved only from emulated config buffer.
2525 + */
2526 + __le32 *cfgspace = (__le32 *)&bridge->conf;
2527 + u32 val = le32_to_cpu(cfgspace[PCI_INTERRUPT_LINE / 4]);
2528 ++ if (advk_readl(pcie, PCIE_ISR0_MASK_REG) & PCIE_ISR0_ERR_MASK)
2529 ++ val &= ~(PCI_BRIDGE_CTL_SERR << 16);
2530 ++ else
2531 ++ val |= PCI_BRIDGE_CTL_SERR << 16;
2532 + if (advk_readl(pcie, PCIE_CORE_CTRL1_REG) & HOT_RESET_GEN)
2533 + val |= PCI_BRIDGE_CTL_BUS_RESET << 16;
2534 + else
2535 +@@ -806,6 +822,19 @@ advk_pci_bridge_emul_base_conf_write(struct pci_bridge_emul *bridge,
2536 + break;
2537 +
2538 + case PCI_INTERRUPT_LINE:
2539 ++ /*
2540 ++ * According to Figure 6-3: Pseudo Logic Diagram for Error
2541 ++ * Message Controls in PCIe base specification, SERR# Enable bit
2542 ++ * in Bridge Control register enable receiving of ERR_* messages
2543 ++ */
2544 ++ if (mask & (PCI_BRIDGE_CTL_SERR << 16)) {
2545 ++ u32 val = advk_readl(pcie, PCIE_ISR0_MASK_REG);
2546 ++ if (new & (PCI_BRIDGE_CTL_SERR << 16))
2547 ++ val &= ~PCIE_ISR0_ERR_MASK;
2548 ++ else
2549 ++ val |= PCIE_ISR0_ERR_MASK;
2550 ++ advk_writel(pcie, val, PCIE_ISR0_MASK_REG);
2551 ++ }
2552 + if (mask & (PCI_BRIDGE_CTL_BUS_RESET << 16)) {
2553 + u32 val = advk_readl(pcie, PCIE_CORE_CTRL1_REG);
2554 + if (new & (PCI_BRIDGE_CTL_BUS_RESET << 16))
2555 +@@ -833,22 +862,11 @@ advk_pci_bridge_emul_pcie_conf_read(struct pci_bridge_emul *bridge,
2556 + *value = PCI_EXP_SLTSTA_PDS << 16;
2557 + return PCI_BRIDGE_EMUL_HANDLED;
2558 +
2559 +- case PCI_EXP_RTCTL: {
2560 +- u32 val = advk_readl(pcie, PCIE_ISR0_MASK_REG);
2561 +- *value = (val & PCIE_MSG_PM_PME_MASK) ? 0 : PCI_EXP_RTCTL_PMEIE;
2562 +- *value |= le16_to_cpu(bridge->pcie_conf.rootctl) & PCI_EXP_RTCTL_CRSSVE;
2563 +- *value |= PCI_EXP_RTCAP_CRSVIS << 16;
2564 +- return PCI_BRIDGE_EMUL_HANDLED;
2565 +- }
2566 +-
2567 +- case PCI_EXP_RTSTA: {
2568 +- u32 isr0 = advk_readl(pcie, PCIE_ISR0_REG);
2569 +- u32 msglog = advk_readl(pcie, PCIE_MSG_LOG_REG);
2570 +- *value = msglog >> 16;
2571 +- if (isr0 & PCIE_MSG_PM_PME_MASK)
2572 +- *value |= PCI_EXP_RTSTA_PME;
2573 +- return PCI_BRIDGE_EMUL_HANDLED;
2574 +- }
2575 ++ /*
2576 ++ * PCI_EXP_RTCTL and PCI_EXP_RTSTA are also supported, but do not need
2577 ++ * to be handled here, because their values are stored in emulated
2578 ++ * config space buffer, and we read them from there when needed.
2579 ++ */
2580 +
2581 + case PCI_EXP_LNKCAP: {
2582 + u32 val = advk_readl(pcie, PCIE_CORE_PCIEXP_CAP + reg);
2583 +@@ -876,8 +894,13 @@ advk_pci_bridge_emul_pcie_conf_read(struct pci_bridge_emul *bridge,
2584 +
2585 + case PCI_EXP_DEVCAP:
2586 + case PCI_EXP_DEVCTL:
2587 ++ case PCI_EXP_DEVCAP2:
2588 ++ case PCI_EXP_DEVCTL2:
2589 ++ case PCI_EXP_LNKCAP2:
2590 ++ case PCI_EXP_LNKCTL2:
2591 + *value = advk_readl(pcie, PCIE_CORE_PCIEXP_CAP + reg);
2592 + return PCI_BRIDGE_EMUL_HANDLED;
2593 ++
2594 + default:
2595 + return PCI_BRIDGE_EMUL_NOT_HANDLED;
2596 + }
2597 +@@ -891,10 +914,6 @@ advk_pci_bridge_emul_pcie_conf_write(struct pci_bridge_emul *bridge,
2598 + struct advk_pcie *pcie = bridge->data;
2599 +
2600 + switch (reg) {
2601 +- case PCI_EXP_DEVCTL:
2602 +- advk_writel(pcie, new, PCIE_CORE_PCIEXP_CAP + reg);
2603 +- break;
2604 +-
2605 + case PCI_EXP_LNKCTL:
2606 + advk_writel(pcie, new, PCIE_CORE_PCIEXP_CAP + reg);
2607 + if (new & PCI_EXP_LNKCTL_RL)
2608 +@@ -902,18 +921,23 @@ advk_pci_bridge_emul_pcie_conf_write(struct pci_bridge_emul *bridge,
2609 + break;
2610 +
2611 + case PCI_EXP_RTCTL: {
2612 +- /* Only mask/unmask PME interrupt */
2613 +- u32 val = advk_readl(pcie, PCIE_ISR0_MASK_REG) &
2614 +- ~PCIE_MSG_PM_PME_MASK;
2615 +- if ((new & PCI_EXP_RTCTL_PMEIE) == 0)
2616 +- val |= PCIE_MSG_PM_PME_MASK;
2617 +- advk_writel(pcie, val, PCIE_ISR0_MASK_REG);
2618 ++ u16 rootctl = le16_to_cpu(bridge->pcie_conf.rootctl);
2619 ++ /* Only emulation of PMEIE and CRSSVE bits is provided */
2620 ++ rootctl &= PCI_EXP_RTCTL_PMEIE | PCI_EXP_RTCTL_CRSSVE;
2621 ++ bridge->pcie_conf.rootctl = cpu_to_le16(rootctl);
2622 + break;
2623 + }
2624 +
2625 +- case PCI_EXP_RTSTA:
2626 +- new = (new & PCI_EXP_RTSTA_PME) >> 9;
2627 +- advk_writel(pcie, new, PCIE_ISR0_REG);
2628 ++ /*
2629 ++ * PCI_EXP_RTSTA is also supported, but does not need to be handled
2630 ++ * here, because its value is stored in emulated config space buffer,
2631 ++ * and we write it there when needed.
2632 ++ */
2633 ++
2634 ++ case PCI_EXP_DEVCTL:
2635 ++ case PCI_EXP_DEVCTL2:
2636 ++ case PCI_EXP_LNKCTL2:
2637 ++ advk_writel(pcie, new, PCIE_CORE_PCIEXP_CAP + reg);
2638 + break;
2639 +
2640 + default:
2641 +@@ -952,7 +976,7 @@ static int advk_sw_pci_bridge_init(struct advk_pcie *pcie)
2642 + bridge->conf.pref_mem_limit = cpu_to_le16(PCI_PREF_RANGE_TYPE_64);
2643 +
2644 + /* Support interrupt A for MSI feature */
2645 +- bridge->conf.intpin = PCIE_CORE_INT_A_ASSERT_ENABLE;
2646 ++ bridge->conf.intpin = PCI_INTERRUPT_INTA;
2647 +
2648 + /* Aardvark HW provides PCIe Capability structure in version 2 */
2649 + bridge->pcie_conf.cap = cpu_to_le16(2);
2650 +@@ -974,8 +998,12 @@ static bool advk_pcie_valid_device(struct advk_pcie *pcie, struct pci_bus *bus,
2651 + return false;
2652 +
2653 + /*
2654 +- * If the link goes down after we check for link-up, nothing bad
2655 +- * happens but the config access times out.
2656 ++ * If the link goes down after we check for link-up, we have a problem:
2657 ++ * if a PIO request is executed while link-down, the whole controller
2658 ++ * gets stuck in a non-functional state, and even after link comes up
2659 ++ * again, PIO requests won't work anymore, and a reset of the whole PCIe
2660 ++ * controller is needed. Therefore we need to prevent sending PIO
2661 ++ * requests while the link is down.
2662 + */
2663 + if (!pci_is_root_bus(bus) && !advk_pcie_link_up(pcie))
2664 + return false;
2665 +@@ -1175,10 +1203,10 @@ static void advk_msi_irq_compose_msi_msg(struct irq_data *data,
2666 + struct msi_msg *msg)
2667 + {
2668 + struct advk_pcie *pcie = irq_data_get_irq_chip_data(data);
2669 +- phys_addr_t msi_msg = virt_to_phys(&pcie->msi_msg);
2670 ++ phys_addr_t msi_addr = virt_to_phys(pcie);
2671 +
2672 +- msg->address_lo = lower_32_bits(msi_msg);
2673 +- msg->address_hi = upper_32_bits(msi_msg);
2674 ++ msg->address_lo = lower_32_bits(msi_addr);
2675 ++ msg->address_hi = upper_32_bits(msi_addr);
2676 + msg->data = data->hwirq;
2677 + }
2678 +
2679 +@@ -1188,6 +1216,54 @@ static int advk_msi_set_affinity(struct irq_data *irq_data,
2680 + return -EINVAL;
2681 + }
2682 +
2683 ++static void advk_msi_irq_mask(struct irq_data *d)
2684 ++{
2685 ++ struct advk_pcie *pcie = d->domain->host_data;
2686 ++ irq_hw_number_t hwirq = irqd_to_hwirq(d);
2687 ++ unsigned long flags;
2688 ++ u32 mask;
2689 ++
2690 ++ raw_spin_lock_irqsave(&pcie->msi_irq_lock, flags);
2691 ++ mask = advk_readl(pcie, PCIE_MSI_MASK_REG);
2692 ++ mask |= BIT(hwirq);
2693 ++ advk_writel(pcie, mask, PCIE_MSI_MASK_REG);
2694 ++ raw_spin_unlock_irqrestore(&pcie->msi_irq_lock, flags);
2695 ++}
2696 ++
2697 ++static void advk_msi_irq_unmask(struct irq_data *d)
2698 ++{
2699 ++ struct advk_pcie *pcie = d->domain->host_data;
2700 ++ irq_hw_number_t hwirq = irqd_to_hwirq(d);
2701 ++ unsigned long flags;
2702 ++ u32 mask;
2703 ++
2704 ++ raw_spin_lock_irqsave(&pcie->msi_irq_lock, flags);
2705 ++ mask = advk_readl(pcie, PCIE_MSI_MASK_REG);
2706 ++ mask &= ~BIT(hwirq);
2707 ++ advk_writel(pcie, mask, PCIE_MSI_MASK_REG);
2708 ++ raw_spin_unlock_irqrestore(&pcie->msi_irq_lock, flags);
2709 ++}
2710 ++
2711 ++static void advk_msi_top_irq_mask(struct irq_data *d)
2712 ++{
2713 ++ pci_msi_mask_irq(d);
2714 ++ irq_chip_mask_parent(d);
2715 ++}
2716 ++
2717 ++static void advk_msi_top_irq_unmask(struct irq_data *d)
2718 ++{
2719 ++ pci_msi_unmask_irq(d);
2720 ++ irq_chip_unmask_parent(d);
2721 ++}
2722 ++
2723 ++static struct irq_chip advk_msi_bottom_irq_chip = {
2724 ++ .name = "MSI",
2725 ++ .irq_compose_msi_msg = advk_msi_irq_compose_msi_msg,
2726 ++ .irq_set_affinity = advk_msi_set_affinity,
2727 ++ .irq_mask = advk_msi_irq_mask,
2728 ++ .irq_unmask = advk_msi_irq_unmask,
2729 ++};
2730 ++
2731 + static int advk_msi_irq_domain_alloc(struct irq_domain *domain,
2732 + unsigned int virq,
2733 + unsigned int nr_irqs, void *args)
2734 +@@ -1204,7 +1280,7 @@ static int advk_msi_irq_domain_alloc(struct irq_domain *domain,
2735 +
2736 + for (i = 0; i < nr_irqs; i++)
2737 + irq_domain_set_info(domain, virq + i, hwirq + i,
2738 +- &pcie->msi_bottom_irq_chip,
2739 ++ &advk_msi_bottom_irq_chip,
2740 + domain->host_data, handle_simple_irq,
2741 + NULL, NULL);
2742 +
2743 +@@ -1260,7 +1336,6 @@ static int advk_pcie_irq_map(struct irq_domain *h,
2744 + {
2745 + struct advk_pcie *pcie = h->host_data;
2746 +
2747 +- advk_pcie_irq_mask(irq_get_irq_data(virq));
2748 + irq_set_status_flags(virq, IRQ_LEVEL);
2749 + irq_set_chip_and_handler(virq, &pcie->irq_chip,
2750 + handle_level_irq);
2751 +@@ -1274,37 +1349,25 @@ static const struct irq_domain_ops advk_pcie_irq_domain_ops = {
2752 + .xlate = irq_domain_xlate_onecell,
2753 + };
2754 +
2755 ++static struct irq_chip advk_msi_irq_chip = {
2756 ++ .name = "advk-MSI",
2757 ++ .irq_mask = advk_msi_top_irq_mask,
2758 ++ .irq_unmask = advk_msi_top_irq_unmask,
2759 ++};
2760 ++
2761 ++static struct msi_domain_info advk_msi_domain_info = {
2762 ++ .flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
2763 ++ MSI_FLAG_MULTI_PCI_MSI | MSI_FLAG_PCI_MSIX,
2764 ++ .chip = &advk_msi_irq_chip,
2765 ++};
2766 ++
2767 + static int advk_pcie_init_msi_irq_domain(struct advk_pcie *pcie)
2768 + {
2769 + struct device *dev = &pcie->pdev->dev;
2770 +- struct device_node *node = dev->of_node;
2771 +- struct irq_chip *bottom_ic, *msi_ic;
2772 +- struct msi_domain_info *msi_di;
2773 +- phys_addr_t msi_msg_phys;
2774 +
2775 ++ raw_spin_lock_init(&pcie->msi_irq_lock);
2776 + mutex_init(&pcie->msi_used_lock);
2777 +
2778 +- bottom_ic = &pcie->msi_bottom_irq_chip;
2779 +-
2780 +- bottom_ic->name = "MSI";
2781 +- bottom_ic->irq_compose_msi_msg = advk_msi_irq_compose_msi_msg;
2782 +- bottom_ic->irq_set_affinity = advk_msi_set_affinity;
2783 +-
2784 +- msi_ic = &pcie->msi_irq_chip;
2785 +- msi_ic->name = "advk-MSI";
2786 +-
2787 +- msi_di = &pcie->msi_domain_info;
2788 +- msi_di->flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
2789 +- MSI_FLAG_MULTI_PCI_MSI;
2790 +- msi_di->chip = msi_ic;
2791 +-
2792 +- msi_msg_phys = virt_to_phys(&pcie->msi_msg);
2793 +-
2794 +- advk_writel(pcie, lower_32_bits(msi_msg_phys),
2795 +- PCIE_MSI_ADDR_LOW_REG);
2796 +- advk_writel(pcie, upper_32_bits(msi_msg_phys),
2797 +- PCIE_MSI_ADDR_HIGH_REG);
2798 +-
2799 + pcie->msi_inner_domain =
2800 + irq_domain_add_linear(NULL, MSI_IRQ_NUM,
2801 + &advk_msi_domain_ops, pcie);
2802 +@@ -1312,8 +1375,9 @@ static int advk_pcie_init_msi_irq_domain(struct advk_pcie *pcie)
2803 + return -ENOMEM;
2804 +
2805 + pcie->msi_domain =
2806 +- pci_msi_create_irq_domain(of_node_to_fwnode(node),
2807 +- msi_di, pcie->msi_inner_domain);
2808 ++ pci_msi_create_irq_domain(dev_fwnode(dev),
2809 ++ &advk_msi_domain_info,
2810 ++ pcie->msi_inner_domain);
2811 + if (!pcie->msi_domain) {
2812 + irq_domain_remove(pcie->msi_inner_domain);
2813 + return -ENOMEM;
2814 +@@ -1354,7 +1418,6 @@ static int advk_pcie_init_irq_domain(struct advk_pcie *pcie)
2815 + }
2816 +
2817 + irq_chip->irq_mask = advk_pcie_irq_mask;
2818 +- irq_chip->irq_mask_ack = advk_pcie_irq_mask;
2819 + irq_chip->irq_unmask = advk_pcie_irq_unmask;
2820 +
2821 + pcie->irq_domain =
2822 +@@ -1376,13 +1439,77 @@ static void advk_pcie_remove_irq_domain(struct advk_pcie *pcie)
2823 + irq_domain_remove(pcie->irq_domain);
2824 + }
2825 +
2826 ++static struct irq_chip advk_rp_irq_chip = {
2827 ++ .name = "advk-RP",
2828 ++};
2829 ++
2830 ++static int advk_pcie_rp_irq_map(struct irq_domain *h,
2831 ++ unsigned int virq, irq_hw_number_t hwirq)
2832 ++{
2833 ++ struct advk_pcie *pcie = h->host_data;
2834 ++
2835 ++ irq_set_chip_and_handler(virq, &advk_rp_irq_chip, handle_simple_irq);
2836 ++ irq_set_chip_data(virq, pcie);
2837 ++
2838 ++ return 0;
2839 ++}
2840 ++
2841 ++static const struct irq_domain_ops advk_pcie_rp_irq_domain_ops = {
2842 ++ .map = advk_pcie_rp_irq_map,
2843 ++ .xlate = irq_domain_xlate_onecell,
2844 ++};
2845 ++
2846 ++static int advk_pcie_init_rp_irq_domain(struct advk_pcie *pcie)
2847 ++{
2848 ++ pcie->rp_irq_domain = irq_domain_add_linear(NULL, 1,
2849 ++ &advk_pcie_rp_irq_domain_ops,
2850 ++ pcie);
2851 ++ if (!pcie->rp_irq_domain) {
2852 ++ dev_err(&pcie->pdev->dev, "Failed to add Root Port IRQ domain\n");
2853 ++ return -ENOMEM;
2854 ++ }
2855 ++
2856 ++ return 0;
2857 ++}
2858 ++
2859 ++static void advk_pcie_remove_rp_irq_domain(struct advk_pcie *pcie)
2860 ++{
2861 ++ irq_domain_remove(pcie->rp_irq_domain);
2862 ++}
2863 ++
2864 ++static void advk_pcie_handle_pme(struct advk_pcie *pcie)
2865 ++{
2866 ++ u32 requester = advk_readl(pcie, PCIE_MSG_LOG_REG) >> 16;
2867 ++
2868 ++ advk_writel(pcie, PCIE_MSG_PM_PME_MASK, PCIE_ISR0_REG);
2869 ++
2870 ++ /*
2871 ++ * PCIE_MSG_LOG_REG contains the last inbound message, so store
2872 ++ * the requester ID only when PME was not asserted yet.
2873 ++ * Also do not trigger PME interrupt when PME is still asserted.
2874 ++ */
2875 ++ if (!(le32_to_cpu(pcie->bridge.pcie_conf.rootsta) & PCI_EXP_RTSTA_PME)) {
2876 ++ pcie->bridge.pcie_conf.rootsta = cpu_to_le32(requester | PCI_EXP_RTSTA_PME);
2877 ++
2878 ++ /*
2879 ++ * Trigger PME interrupt only if PMEIE bit in Root Control is set.
2880 ++ * Aardvark HW returns zero for PCI_EXP_FLAGS_IRQ, so use PCIe interrupt 0.
2881 ++ */
2882 ++ if (!(le16_to_cpu(pcie->bridge.pcie_conf.rootctl) & PCI_EXP_RTCTL_PMEIE))
2883 ++ return;
2884 ++
2885 ++ if (generic_handle_domain_irq(pcie->rp_irq_domain, 0) == -EINVAL)
2886 ++ dev_err_ratelimited(&pcie->pdev->dev, "unhandled PME IRQ\n");
2887 ++ }
2888 ++}
2889 ++
2890 + static void advk_pcie_handle_msi(struct advk_pcie *pcie)
2891 + {
2892 + u32 msi_val, msi_mask, msi_status, msi_idx;
2893 +
2894 + msi_mask = advk_readl(pcie, PCIE_MSI_MASK_REG);
2895 + msi_val = advk_readl(pcie, PCIE_MSI_STATUS_REG);
2896 +- msi_status = msi_val & ~msi_mask;
2897 ++ msi_status = msi_val & ((~msi_mask) & PCIE_MSI_ALL_MASK);
2898 +
2899 + for (msi_idx = 0; msi_idx < MSI_IRQ_NUM; msi_idx++) {
2900 + if (!(BIT(msi_idx) & msi_status))
2901 +@@ -1411,6 +1538,22 @@ static void advk_pcie_handle_int(struct advk_pcie *pcie)
2902 + isr1_mask = advk_readl(pcie, PCIE_ISR1_MASK_REG);
2903 + isr1_status = isr1_val & ((~isr1_mask) & PCIE_ISR1_ALL_MASK);
2904 +
2905 ++ /* Process PME interrupt as the first one to do not miss PME requester id */
2906 ++ if (isr0_status & PCIE_MSG_PM_PME_MASK)
2907 ++ advk_pcie_handle_pme(pcie);
2908 ++
2909 ++ /* Process ERR interrupt */
2910 ++ if (isr0_status & PCIE_ISR0_ERR_MASK) {
2911 ++ advk_writel(pcie, PCIE_ISR0_ERR_MASK, PCIE_ISR0_REG);
2912 ++
2913 ++ /*
2914 ++ * Aardvark HW returns zero for PCI_ERR_ROOT_AER_IRQ, so use
2915 ++ * PCIe interrupt 0
2916 ++ */
2917 ++ if (generic_handle_domain_irq(pcie->rp_irq_domain, 0) == -EINVAL)
2918 ++ dev_err_ratelimited(&pcie->pdev->dev, "unhandled ERR IRQ\n");
2919 ++ }
2920 ++
2921 + /* Process MSI interrupts */
2922 + if (isr0_status & PCIE_ISR0_MSI_INT_PENDING)
2923 + advk_pcie_handle_msi(pcie);
2924 +@@ -1423,28 +1566,50 @@ static void advk_pcie_handle_int(struct advk_pcie *pcie)
2925 + advk_writel(pcie, PCIE_ISR1_INTX_ASSERT(i),
2926 + PCIE_ISR1_REG);
2927 +
2928 +- generic_handle_domain_irq(pcie->irq_domain, i);
2929 ++ if (generic_handle_domain_irq(pcie->irq_domain, i) == -EINVAL)
2930 ++ dev_err_ratelimited(&pcie->pdev->dev, "unexpected INT%c IRQ\n",
2931 ++ (char)i + 'A');
2932 + }
2933 + }
2934 +
2935 +-static irqreturn_t advk_pcie_irq_handler(int irq, void *arg)
2936 ++static void advk_pcie_irq_handler(struct irq_desc *desc)
2937 + {
2938 +- struct advk_pcie *pcie = arg;
2939 +- u32 status;
2940 ++ struct advk_pcie *pcie = irq_desc_get_handler_data(desc);
2941 ++ struct irq_chip *chip = irq_desc_get_chip(desc);
2942 ++ u32 val, mask, status;
2943 ++
2944 ++ chained_irq_enter(chip, desc);
2945 +
2946 +- status = advk_readl(pcie, HOST_CTRL_INT_STATUS_REG);
2947 +- if (!(status & PCIE_IRQ_CORE_INT))
2948 +- return IRQ_NONE;
2949 ++ val = advk_readl(pcie, HOST_CTRL_INT_STATUS_REG);
2950 ++ mask = advk_readl(pcie, HOST_CTRL_INT_MASK_REG);
2951 ++ status = val & ((~mask) & PCIE_IRQ_ALL_MASK);
2952 +
2953 +- advk_pcie_handle_int(pcie);
2954 ++ if (status & PCIE_IRQ_CORE_INT) {
2955 ++ advk_pcie_handle_int(pcie);
2956 +
2957 +- /* Clear interrupt */
2958 +- advk_writel(pcie, PCIE_IRQ_CORE_INT, HOST_CTRL_INT_STATUS_REG);
2959 ++ /* Clear interrupt */
2960 ++ advk_writel(pcie, PCIE_IRQ_CORE_INT, HOST_CTRL_INT_STATUS_REG);
2961 ++ }
2962 +
2963 +- return IRQ_HANDLED;
2964 ++ chained_irq_exit(chip, desc);
2965 + }
2966 +
2967 +-static void __maybe_unused advk_pcie_disable_phy(struct advk_pcie *pcie)
2968 ++static int advk_pcie_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
2969 ++{
2970 ++ struct advk_pcie *pcie = dev->bus->sysdata;
2971 ++
2972 ++ /*
2973 ++ * Emulated root bridge has its own emulated irq chip and irq domain.
2974 ++ * Argument pin is the INTx pin (1=INTA, 2=INTB, 3=INTC, 4=INTD) and
2975 ++ * hwirq for irq_create_mapping() is indexed from zero.
2976 ++ */
2977 ++ if (pci_is_root_bus(dev->bus))
2978 ++ return irq_create_mapping(pcie->rp_irq_domain, pin - 1);
2979 ++ else
2980 ++ return of_irq_parse_and_map_pci(dev, slot, pin);
2981 ++}
2982 ++
2983 ++static void advk_pcie_disable_phy(struct advk_pcie *pcie)
2984 + {
2985 + phy_power_off(pcie->phy);
2986 + phy_exit(pcie->phy);
2987 +@@ -1508,7 +1673,7 @@ static int advk_pcie_probe(struct platform_device *pdev)
2988 + struct advk_pcie *pcie;
2989 + struct pci_host_bridge *bridge;
2990 + struct resource_entry *entry;
2991 +- int ret, irq;
2992 ++ int ret;
2993 +
2994 + bridge = devm_pci_alloc_host_bridge(dev, sizeof(struct advk_pcie));
2995 + if (!bridge)
2996 +@@ -1594,17 +1759,9 @@ static int advk_pcie_probe(struct platform_device *pdev)
2997 + if (IS_ERR(pcie->base))
2998 + return PTR_ERR(pcie->base);
2999 +
3000 +- irq = platform_get_irq(pdev, 0);
3001 +- if (irq < 0)
3002 +- return irq;
3003 +-
3004 +- ret = devm_request_irq(dev, irq, advk_pcie_irq_handler,
3005 +- IRQF_SHARED | IRQF_NO_THREAD, "advk-pcie",
3006 +- pcie);
3007 +- if (ret) {
3008 +- dev_err(dev, "Failed to register interrupt\n");
3009 +- return ret;
3010 +- }
3011 ++ pcie->irq = platform_get_irq(pdev, 0);
3012 ++ if (pcie->irq < 0)
3013 ++ return pcie->irq;
3014 +
3015 + pcie->reset_gpio = devm_gpiod_get_from_of_node(dev, dev->of_node,
3016 + "reset-gpios", 0,
3017 +@@ -1653,11 +1810,24 @@ static int advk_pcie_probe(struct platform_device *pdev)
3018 + return ret;
3019 + }
3020 +
3021 ++ ret = advk_pcie_init_rp_irq_domain(pcie);
3022 ++ if (ret) {
3023 ++ dev_err(dev, "Failed to initialize irq\n");
3024 ++ advk_pcie_remove_msi_irq_domain(pcie);
3025 ++ advk_pcie_remove_irq_domain(pcie);
3026 ++ return ret;
3027 ++ }
3028 ++
3029 ++ irq_set_chained_handler_and_data(pcie->irq, advk_pcie_irq_handler, pcie);
3030 ++
3031 + bridge->sysdata = pcie;
3032 + bridge->ops = &advk_pcie_ops;
3033 ++ bridge->map_irq = advk_pcie_map_irq;
3034 +
3035 + ret = pci_host_probe(bridge);
3036 + if (ret < 0) {
3037 ++ irq_set_chained_handler_and_data(pcie->irq, NULL, NULL);
3038 ++ advk_pcie_remove_rp_irq_domain(pcie);
3039 + advk_pcie_remove_msi_irq_domain(pcie);
3040 + advk_pcie_remove_irq_domain(pcie);
3041 + return ret;
3042 +@@ -1670,20 +1840,68 @@ static int advk_pcie_remove(struct platform_device *pdev)
3043 + {
3044 + struct advk_pcie *pcie = platform_get_drvdata(pdev);
3045 + struct pci_host_bridge *bridge = pci_host_bridge_from_priv(pcie);
3046 ++ u32 val;
3047 + int i;
3048 +
3049 ++ /* Remove PCI bus with all devices */
3050 + pci_lock_rescan_remove();
3051 + pci_stop_root_bus(bridge->bus);
3052 + pci_remove_root_bus(bridge->bus);
3053 + pci_unlock_rescan_remove();
3054 +
3055 ++ /* Disable Root Bridge I/O space, memory space and bus mastering */
3056 ++ val = advk_readl(pcie, PCIE_CORE_CMD_STATUS_REG);
3057 ++ val &= ~(PCI_COMMAND_IO | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER);
3058 ++ advk_writel(pcie, val, PCIE_CORE_CMD_STATUS_REG);
3059 ++
3060 ++ /* Disable MSI */
3061 ++ val = advk_readl(pcie, PCIE_CORE_CTRL2_REG);
3062 ++ val &= ~PCIE_CORE_CTRL2_MSI_ENABLE;
3063 ++ advk_writel(pcie, val, PCIE_CORE_CTRL2_REG);
3064 ++
3065 ++ /* Clear MSI address */
3066 ++ advk_writel(pcie, 0, PCIE_MSI_ADDR_LOW_REG);
3067 ++ advk_writel(pcie, 0, PCIE_MSI_ADDR_HIGH_REG);
3068 ++
3069 ++ /* Mask all interrupts */
3070 ++ advk_writel(pcie, PCIE_MSI_ALL_MASK, PCIE_MSI_MASK_REG);
3071 ++ advk_writel(pcie, PCIE_ISR0_ALL_MASK, PCIE_ISR0_MASK_REG);
3072 ++ advk_writel(pcie, PCIE_ISR1_ALL_MASK, PCIE_ISR1_MASK_REG);
3073 ++ advk_writel(pcie, PCIE_IRQ_ALL_MASK, HOST_CTRL_INT_MASK_REG);
3074 ++
3075 ++ /* Clear all interrupts */
3076 ++ advk_writel(pcie, PCIE_MSI_ALL_MASK, PCIE_MSI_STATUS_REG);
3077 ++ advk_writel(pcie, PCIE_ISR0_ALL_MASK, PCIE_ISR0_REG);
3078 ++ advk_writel(pcie, PCIE_ISR1_ALL_MASK, PCIE_ISR1_REG);
3079 ++ advk_writel(pcie, PCIE_IRQ_ALL_MASK, HOST_CTRL_INT_STATUS_REG);
3080 ++
3081 ++ /* Remove IRQ handler */
3082 ++ irq_set_chained_handler_and_data(pcie->irq, NULL, NULL);
3083 ++
3084 ++ /* Remove IRQ domains */
3085 ++ advk_pcie_remove_rp_irq_domain(pcie);
3086 + advk_pcie_remove_msi_irq_domain(pcie);
3087 + advk_pcie_remove_irq_domain(pcie);
3088 +
3089 ++ /* Free config space for emulated root bridge */
3090 ++ pci_bridge_emul_cleanup(&pcie->bridge);
3091 ++
3092 ++ /* Assert PERST# signal which prepares PCIe card for power down */
3093 ++ if (pcie->reset_gpio)
3094 ++ gpiod_set_value_cansleep(pcie->reset_gpio, 1);
3095 ++
3096 ++ /* Disable link training */
3097 ++ val = advk_readl(pcie, PCIE_CORE_CTRL0_REG);
3098 ++ val &= ~LINK_TRAINING_EN;
3099 ++ advk_writel(pcie, val, PCIE_CORE_CTRL0_REG);
3100 ++
3101 + /* Disable outbound address windows mapping */
3102 + for (i = 0; i < OB_WIN_COUNT; i++)
3103 + advk_pcie_disable_ob_win(pcie, i);
3104 +
3105 ++ /* Disable phy */
3106 ++ advk_pcie_disable_phy(pcie);
3107 ++
3108 + return 0;
3109 + }
3110 +
3111 +diff --git a/drivers/pci/pci-bridge-emul.c b/drivers/pci/pci-bridge-emul.c
3112 +index 37504c2cce9b8..c994ebec23603 100644
3113 +--- a/drivers/pci/pci-bridge-emul.c
3114 ++++ b/drivers/pci/pci-bridge-emul.c
3115 +@@ -270,6 +270,49 @@ struct pci_bridge_reg_behavior pcie_cap_regs_behavior[PCI_CAP_PCIE_SIZEOF / 4] =
3116 + .ro = GENMASK(15, 0) | PCI_EXP_RTSTA_PENDING,
3117 + .w1c = PCI_EXP_RTSTA_PME,
3118 + },
3119 ++
3120 ++ [PCI_EXP_DEVCAP2 / 4] = {
3121 ++ /*
3122 ++ * Device capabilities 2 register has reserved bits [30:27].
3123 ++ * Also bits [26:24] are reserved for non-upstream ports.
3124 ++ */
3125 ++ .ro = BIT(31) | GENMASK(23, 0),
3126 ++ },
3127 ++
3128 ++ [PCI_EXP_DEVCTL2 / 4] = {
3129 ++ /*
3130 ++ * Device control 2 register is RW. Bit 11 is reserved for
3131 ++ * non-upstream ports.
3132 ++ *
3133 ++ * Device status 2 register is reserved.
3134 ++ */
3135 ++ .rw = GENMASK(15, 12) | GENMASK(10, 0),
3136 ++ },
3137 ++
3138 ++ [PCI_EXP_LNKCAP2 / 4] = {
3139 ++ /* Link capabilities 2 register has reserved bits [30:25] and 0. */
3140 ++ .ro = BIT(31) | GENMASK(24, 1),
3141 ++ },
3142 ++
3143 ++ [PCI_EXP_LNKCTL2 / 4] = {
3144 ++ /*
3145 ++ * Link control 2 register is RW.
3146 ++ *
3147 ++ * Link status 2 register has bits 5, 15 W1C;
3148 ++ * bits 10, 11 reserved and others are RO.
3149 ++ */
3150 ++ .rw = GENMASK(15, 0),
3151 ++ .w1c = (BIT(15) | BIT(5)) << 16,
3152 ++ .ro = (GENMASK(14, 12) | GENMASK(9, 6) | GENMASK(4, 0)) << 16,
3153 ++ },
3154 ++
3155 ++ [PCI_EXP_SLTCAP2 / 4] = {
3156 ++ /* Slot capabilities 2 register is reserved. */
3157 ++ },
3158 ++
3159 ++ [PCI_EXP_SLTCTL2 / 4] = {
3160 ++ /* Both Slot control 2 and Slot status 2 registers are reserved. */
3161 ++ },
3162 + };
3163 +
3164 + /*
3165 +@@ -284,7 +327,11 @@ int pci_bridge_emul_init(struct pci_bridge_emul *bridge,
3166 + {
3167 + BUILD_BUG_ON(sizeof(bridge->conf) != PCI_BRIDGE_CONF_END);
3168 +
3169 +- bridge->conf.class_revision |= cpu_to_le32(PCI_CLASS_BRIDGE_PCI << 16);
3170 ++ /*
3171 ++ * class_revision: Class is high 24 bits and revision is low 8 bit of this member,
3172 ++ * while class for PCI Bridge Normal Decode has the 24-bit value: PCI_CLASS_BRIDGE_PCI << 8
3173 ++ */
3174 ++ bridge->conf.class_revision |= cpu_to_le32((PCI_CLASS_BRIDGE_PCI << 8) << 8);
3175 + bridge->conf.header_type = PCI_HEADER_TYPE_BRIDGE;
3176 + bridge->conf.cache_line_size = 0x10;
3177 + bridge->conf.status = cpu_to_le16(PCI_STATUS_CAP_LIST);
3178 +diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
3179 +index e34c6cc61983b..8240b804ef847 100644
3180 +--- a/drivers/s390/block/dasd.c
3181 ++++ b/drivers/s390/block/dasd.c
3182 +@@ -1422,6 +1422,13 @@ int dasd_start_IO(struct dasd_ccw_req *cqr)
3183 + if (!cqr->lpm)
3184 + cqr->lpm = dasd_path_get_opm(device);
3185 + }
3186 ++ /*
3187 ++ * remember the amount of formatted tracks to prevent double format on
3188 ++ * ESE devices
3189 ++ */
3190 ++ if (cqr->block)
3191 ++ cqr->trkcount = atomic_read(&cqr->block->trkcount);
3192 ++
3193 + if (cqr->cpmode == 1) {
3194 + rc = ccw_device_tm_start(device->cdev, cqr->cpaddr,
3195 + (long) cqr, cqr->lpm);
3196 +@@ -1639,6 +1646,7 @@ void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm,
3197 + unsigned long now;
3198 + int nrf_suppressed = 0;
3199 + int fp_suppressed = 0;
3200 ++ struct request *req;
3201 + u8 *sense = NULL;
3202 + int expires;
3203 +
3204 +@@ -1739,7 +1747,12 @@ void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm,
3205 + }
3206 +
3207 + if (dasd_ese_needs_format(cqr->block, irb)) {
3208 +- if (rq_data_dir((struct request *)cqr->callback_data) == READ) {
3209 ++ req = dasd_get_callback_data(cqr);
3210 ++ if (!req) {
3211 ++ cqr->status = DASD_CQR_ERROR;
3212 ++ return;
3213 ++ }
3214 ++ if (rq_data_dir(req) == READ) {
3215 + device->discipline->ese_read(cqr, irb);
3216 + cqr->status = DASD_CQR_SUCCESS;
3217 + cqr->stopclk = now;
3218 +@@ -2762,8 +2775,7 @@ static void __dasd_cleanup_cqr(struct dasd_ccw_req *cqr)
3219 + * complete a request partially.
3220 + */
3221 + if (proc_bytes) {
3222 +- blk_update_request(req, BLK_STS_OK,
3223 +- blk_rq_bytes(req) - proc_bytes);
3224 ++ blk_update_request(req, BLK_STS_OK, proc_bytes);
3225 + blk_mq_requeue_request(req, true);
3226 + } else if (likely(!blk_should_fake_timeout(req->q))) {
3227 + blk_mq_complete_request(req);
3228 +diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
3229 +index 460e0f1cca533..ff7b7d470e96f 100644
3230 +--- a/drivers/s390/block/dasd_eckd.c
3231 ++++ b/drivers/s390/block/dasd_eckd.c
3232 +@@ -3095,13 +3095,24 @@ static int dasd_eckd_format_device(struct dasd_device *base,
3233 + }
3234 +
3235 + static bool test_and_set_format_track(struct dasd_format_entry *to_format,
3236 +- struct dasd_block *block)
3237 ++ struct dasd_ccw_req *cqr)
3238 + {
3239 ++ struct dasd_block *block = cqr->block;
3240 + struct dasd_format_entry *format;
3241 + unsigned long flags;
3242 + bool rc = false;
3243 +
3244 + spin_lock_irqsave(&block->format_lock, flags);
3245 ++ if (cqr->trkcount != atomic_read(&block->trkcount)) {
3246 ++ /*
3247 ++ * The number of formatted tracks has changed after request
3248 ++ * start and we can not tell if the current track was involved.
3249 ++ * To avoid data corruption treat it as if the current track is
3250 ++ * involved
3251 ++ */
3252 ++ rc = true;
3253 ++ goto out;
3254 ++ }
3255 + list_for_each_entry(format, &block->format_list, list) {
3256 + if (format->track == to_format->track) {
3257 + rc = true;
3258 +@@ -3121,6 +3132,7 @@ static void clear_format_track(struct dasd_format_entry *format,
3259 + unsigned long flags;
3260 +
3261 + spin_lock_irqsave(&block->format_lock, flags);
3262 ++ atomic_inc(&block->trkcount);
3263 + list_del_init(&format->list);
3264 + spin_unlock_irqrestore(&block->format_lock, flags);
3265 + }
3266 +@@ -3157,7 +3169,7 @@ dasd_eckd_ese_format(struct dasd_device *startdev, struct dasd_ccw_req *cqr,
3267 + sector_t curr_trk;
3268 + int rc;
3269 +
3270 +- req = cqr->callback_data;
3271 ++ req = dasd_get_callback_data(cqr);
3272 + block = cqr->block;
3273 + base = block->base;
3274 + private = base->private;
3275 +@@ -3182,8 +3194,11 @@ dasd_eckd_ese_format(struct dasd_device *startdev, struct dasd_ccw_req *cqr,
3276 + }
3277 + format->track = curr_trk;
3278 + /* test if track is already in formatting by another thread */
3279 +- if (test_and_set_format_track(format, block))
3280 ++ if (test_and_set_format_track(format, cqr)) {
3281 ++ /* this is no real error so do not count down retries */
3282 ++ cqr->retries++;
3283 + return ERR_PTR(-EEXIST);
3284 ++ }
3285 +
3286 + fdata.start_unit = curr_trk;
3287 + fdata.stop_unit = curr_trk;
3288 +@@ -3282,12 +3297,11 @@ static int dasd_eckd_ese_read(struct dasd_ccw_req *cqr, struct irb *irb)
3289 + cqr->proc_bytes = blk_count * blksize;
3290 + return 0;
3291 + }
3292 +- if (dst && !skip_block) {
3293 +- dst += off;
3294 ++ if (dst && !skip_block)
3295 + memset(dst, 0, blksize);
3296 +- } else {
3297 ++ else
3298 + skip_block--;
3299 +- }
3300 ++ dst += blksize;
3301 + blk_count++;
3302 + }
3303 + }
3304 +diff --git a/drivers/s390/block/dasd_int.h b/drivers/s390/block/dasd_int.h
3305 +index 155428bfed8ac..d94ae067f085e 100644
3306 +--- a/drivers/s390/block/dasd_int.h
3307 ++++ b/drivers/s390/block/dasd_int.h
3308 +@@ -188,6 +188,7 @@ struct dasd_ccw_req {
3309 + void (*callback)(struct dasd_ccw_req *, void *data);
3310 + void *callback_data;
3311 + unsigned int proc_bytes; /* bytes for partial completion */
3312 ++ unsigned int trkcount; /* count formatted tracks */
3313 + };
3314 +
3315 + /*
3316 +@@ -611,6 +612,7 @@ struct dasd_block {
3317 +
3318 + struct list_head format_list;
3319 + spinlock_t format_lock;
3320 ++ atomic_t trkcount;
3321 + };
3322 +
3323 + struct dasd_attention_data {
3324 +@@ -757,6 +759,18 @@ dasd_check_blocksize(int bsize)
3325 + return 0;
3326 + }
3327 +
3328 ++/*
3329 ++ * return the callback data of the original request in case there are
3330 ++ * ERP requests build on top of it
3331 ++ */
3332 ++static inline void *dasd_get_callback_data(struct dasd_ccw_req *cqr)
3333 ++{
3334 ++ while (cqr->refers)
3335 ++ cqr = cqr->refers;
3336 ++
3337 ++ return cqr->callback_data;
3338 ++}
3339 ++
3340 + /* externals in dasd.c */
3341 + #define DASD_PROFILE_OFF 0
3342 + #define DASD_PROFILE_ON 1
3343 +diff --git a/drivers/video/fbdev/core/fbmem.c b/drivers/video/fbdev/core/fbmem.c
3344 +index 0371ad233fdf2..8e38a7a5cf2f5 100644
3345 +--- a/drivers/video/fbdev/core/fbmem.c
3346 ++++ b/drivers/video/fbdev/core/fbmem.c
3347 +@@ -1436,7 +1436,10 @@ fb_release(struct inode *inode, struct file *file)
3348 + __acquires(&info->lock)
3349 + __releases(&info->lock)
3350 + {
3351 +- struct fb_info * const info = file->private_data;
3352 ++ struct fb_info * const info = file_fb_info(file);
3353 ++
3354 ++ if (!info)
3355 ++ return -ENODEV;
3356 +
3357 + lock_fb_info(info);
3358 + if (info->fbops->fb_release)
3359 +diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
3360 +index f1f7dbfa6ecd8..8cbed2f08d1bd 100644
3361 +--- a/fs/btrfs/disk-io.c
3362 ++++ b/fs/btrfs/disk-io.c
3363 +@@ -3415,6 +3415,17 @@ int __cold open_ctree(struct super_block *sb, struct btrfs_fs_devices *fs_device
3364 + }
3365 +
3366 + if (sectorsize != PAGE_SIZE) {
3367 ++ /*
3368 ++ * V1 space cache has some hardcoded PAGE_SIZE usage, and is
3369 ++ * going to be deprecated.
3370 ++ *
3371 ++ * Force to use v2 cache for subpage case.
3372 ++ */
3373 ++ btrfs_clear_opt(fs_info->mount_opt, SPACE_CACHE);
3374 ++ btrfs_set_and_info(fs_info, FREE_SPACE_TREE,
3375 ++ "forcing free space tree for sector size %u with page size %lu",
3376 ++ sectorsize, PAGE_SIZE);
3377 ++
3378 + btrfs_warn(fs_info,
3379 + "read-write for sector size %u with page size %lu is experimental",
3380 + sectorsize, PAGE_SIZE);
3381 +diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
3382 +index 290cfe11e7901..1221d8483d63b 100644
3383 +--- a/fs/btrfs/tree-log.c
3384 ++++ b/fs/btrfs/tree-log.c
3385 +@@ -5483,6 +5483,18 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans,
3386 + mutex_lock(&inode->log_mutex);
3387 + }
3388 +
3389 ++ /*
3390 ++ * For symlinks, we must always log their content, which is stored in an
3391 ++ * inline extent, otherwise we could end up with an empty symlink after
3392 ++ * log replay, which is invalid on linux (symlink(2) returns -ENOENT if
3393 ++ * one attempts to create an empty symlink).
3394 ++ * We don't need to worry about flushing delalloc, because when we create
3395 ++ * the inline extent when the symlink is created (we never have delalloc
3396 ++ * for symlinks).
3397 ++ */
3398 ++ if (S_ISLNK(inode->vfs_inode.i_mode))
3399 ++ inode_only = LOG_INODE_ALL;
3400 ++
3401 + /*
3402 + * This is for cases where logging a directory could result in losing a
3403 + * a file after replaying the log. For example, if we move a file from a
3404 +@@ -5853,7 +5865,7 @@ process_leaf:
3405 + }
3406 +
3407 + ctx->log_new_dentries = false;
3408 +- if (type == BTRFS_FT_DIR || type == BTRFS_FT_SYMLINK)
3409 ++ if (type == BTRFS_FT_DIR)
3410 + log_mode = LOG_INODE_ALL;
3411 + ret = btrfs_log_inode(trans, root, BTRFS_I(di_inode),
3412 + log_mode, ctx);
3413 +diff --git a/fs/btrfs/xattr.c b/fs/btrfs/xattr.c
3414 +index 8a4514283a4b8..c5c5b97c2a852 100644
3415 +--- a/fs/btrfs/xattr.c
3416 ++++ b/fs/btrfs/xattr.c
3417 +@@ -264,7 +264,8 @@ int btrfs_setxattr_trans(struct inode *inode, const char *name,
3418 + inode_inc_iversion(inode);
3419 + inode->i_ctime = current_time(inode);
3420 + ret = btrfs_update_inode(trans, root, BTRFS_I(inode));
3421 +- BUG_ON(ret);
3422 ++ if (ret)
3423 ++ btrfs_abort_transaction(trans, ret);
3424 + out:
3425 + if (start_trans)
3426 + btrfs_end_transaction(trans);
3427 +@@ -418,7 +419,8 @@ static int btrfs_xattr_handler_set_prop(const struct xattr_handler *handler,
3428 + inode_inc_iversion(inode);
3429 + inode->i_ctime = current_time(inode);
3430 + ret = btrfs_update_inode(trans, root, BTRFS_I(inode));
3431 +- BUG_ON(ret);
3432 ++ if (ret)
3433 ++ btrfs_abort_transaction(trans, ret);
3434 + }
3435 +
3436 + btrfs_end_transaction(trans);
3437 +diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
3438 +index dababa6cf3f42..ae6b3600ed11b 100644
3439 +--- a/fs/nfs/nfs4proc.c
3440 ++++ b/fs/nfs/nfs4proc.c
3441 +@@ -366,6 +366,14 @@ static void nfs4_setup_readdir(u64 cookie, __be32 *verifier, struct dentry *dent
3442 + kunmap_atomic(start);
3443 + }
3444 +
3445 ++static void nfs4_fattr_set_prechange(struct nfs_fattr *fattr, u64 version)
3446 ++{
3447 ++ if (!(fattr->valid & NFS_ATTR_FATTR_PRECHANGE)) {
3448 ++ fattr->pre_change_attr = version;
3449 ++ fattr->valid |= NFS_ATTR_FATTR_PRECHANGE;
3450 ++ }
3451 ++}
3452 ++
3453 + static void nfs4_test_and_free_stateid(struct nfs_server *server,
3454 + nfs4_stateid *stateid,
3455 + const struct cred *cred)
3456 +@@ -6558,7 +6566,9 @@ static void nfs4_delegreturn_release(void *calldata)
3457 + pnfs_roc_release(&data->lr.arg, &data->lr.res,
3458 + data->res.lr_ret);
3459 + if (inode) {
3460 +- nfs_post_op_update_inode_force_wcc(inode, &data->fattr);
3461 ++ nfs4_fattr_set_prechange(&data->fattr,
3462 ++ inode_peek_iversion_raw(inode));
3463 ++ nfs_refresh_inode(inode, &data->fattr);
3464 + nfs_iput_and_deactive(inode);
3465 + }
3466 + kfree(calldata);
3467 +diff --git a/include/linux/stmmac.h b/include/linux/stmmac.h
3468 +index 1450397fc0bcd..48d015ed21752 100644
3469 +--- a/include/linux/stmmac.h
3470 ++++ b/include/linux/stmmac.h
3471 +@@ -269,5 +269,6 @@ struct plat_stmmacenet_data {
3472 + int msi_rx_base_vec;
3473 + int msi_tx_base_vec;
3474 + bool use_phy_wol;
3475 ++ bool sph_disable;
3476 + };
3477 + #endif
3478 +diff --git a/kernel/irq/internals.h b/kernel/irq/internals.h
3479 +index 54363527feea4..e58342ace11f2 100644
3480 +--- a/kernel/irq/internals.h
3481 ++++ b/kernel/irq/internals.h
3482 +@@ -29,12 +29,14 @@ extern struct irqaction chained_action;
3483 + * IRQTF_WARNED - warning "IRQ_WAKE_THREAD w/o thread_fn" has been printed
3484 + * IRQTF_AFFINITY - irq thread is requested to adjust affinity
3485 + * IRQTF_FORCED_THREAD - irq action is force threaded
3486 ++ * IRQTF_READY - signals that irq thread is ready
3487 + */
3488 + enum {
3489 + IRQTF_RUNTHREAD,
3490 + IRQTF_WARNED,
3491 + IRQTF_AFFINITY,
3492 + IRQTF_FORCED_THREAD,
3493 ++ IRQTF_READY,
3494 + };
3495 +
3496 + /*
3497 +diff --git a/kernel/irq/irqdesc.c b/kernel/irq/irqdesc.c
3498 +index 4e3c29bb603c3..21b3ac2a29d20 100644
3499 +--- a/kernel/irq/irqdesc.c
3500 ++++ b/kernel/irq/irqdesc.c
3501 +@@ -407,6 +407,7 @@ static struct irq_desc *alloc_desc(int irq, int node, unsigned int flags,
3502 + lockdep_set_class(&desc->lock, &irq_desc_lock_class);
3503 + mutex_init(&desc->request_mutex);
3504 + init_rcu_head(&desc->rcu);
3505 ++ init_waitqueue_head(&desc->wait_for_threads);
3506 +
3507 + desc_set_defaults(irq, desc, node, affinity, owner);
3508 + irqd_set(&desc->irq_data, flags);
3509 +@@ -575,6 +576,7 @@ int __init early_irq_init(void)
3510 + raw_spin_lock_init(&desc[i].lock);
3511 + lockdep_set_class(&desc[i].lock, &irq_desc_lock_class);
3512 + mutex_init(&desc[i].request_mutex);
3513 ++ init_waitqueue_head(&desc[i].wait_for_threads);
3514 + desc_set_defaults(i, &desc[i], node, NULL, NULL);
3515 + }
3516 + return arch_early_irq_init();
3517 +diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
3518 +index 27667e82ecc91..0c3c26fb054f7 100644
3519 +--- a/kernel/irq/manage.c
3520 ++++ b/kernel/irq/manage.c
3521 +@@ -1248,6 +1248,31 @@ static void irq_wake_secondary(struct irq_desc *desc, struct irqaction *action)
3522 + raw_spin_unlock_irq(&desc->lock);
3523 + }
3524 +
3525 ++/*
3526 ++ * Internal function to notify that a interrupt thread is ready.
3527 ++ */
3528 ++static void irq_thread_set_ready(struct irq_desc *desc,
3529 ++ struct irqaction *action)
3530 ++{
3531 ++ set_bit(IRQTF_READY, &action->thread_flags);
3532 ++ wake_up(&desc->wait_for_threads);
3533 ++}
3534 ++
3535 ++/*
3536 ++ * Internal function to wake up a interrupt thread and wait until it is
3537 ++ * ready.
3538 ++ */
3539 ++static void wake_up_and_wait_for_irq_thread_ready(struct irq_desc *desc,
3540 ++ struct irqaction *action)
3541 ++{
3542 ++ if (!action || !action->thread)
3543 ++ return;
3544 ++
3545 ++ wake_up_process(action->thread);
3546 ++ wait_event(desc->wait_for_threads,
3547 ++ test_bit(IRQTF_READY, &action->thread_flags));
3548 ++}
3549 ++
3550 + /*
3551 + * Interrupt handler thread
3552 + */
3553 +@@ -1259,6 +1284,8 @@ static int irq_thread(void *data)
3554 + irqreturn_t (*handler_fn)(struct irq_desc *desc,
3555 + struct irqaction *action);
3556 +
3557 ++ irq_thread_set_ready(desc, action);
3558 ++
3559 + if (force_irqthreads() && test_bit(IRQTF_FORCED_THREAD,
3560 + &action->thread_flags))
3561 + handler_fn = irq_forced_thread_fn;
3562 +@@ -1683,8 +1710,6 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
3563 + }
3564 +
3565 + if (!shared) {
3566 +- init_waitqueue_head(&desc->wait_for_threads);
3567 +-
3568 + /* Setup the type (level, edge polarity) if configured: */
3569 + if (new->flags & IRQF_TRIGGER_MASK) {
3570 + ret = __irq_set_trigger(desc,
3571 +@@ -1780,14 +1805,8 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
3572 +
3573 + irq_setup_timings(desc, new);
3574 +
3575 +- /*
3576 +- * Strictly no need to wake it up, but hung_task complains
3577 +- * when no hard interrupt wakes the thread up.
3578 +- */
3579 +- if (new->thread)
3580 +- wake_up_process(new->thread);
3581 +- if (new->secondary)
3582 +- wake_up_process(new->secondary->thread);
3583 ++ wake_up_and_wait_for_irq_thread_ready(desc, new);
3584 ++ wake_up_and_wait_for_irq_thread_ready(desc, new->secondary);
3585 +
3586 + register_irq_proc(irq, desc);
3587 + new->dir = NULL;
3588 +diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
3589 +index 4ca6d5b199e8e..a4a9d68b1fdce 100644
3590 +--- a/kernel/rcu/tree.c
3591 ++++ b/kernel/rcu/tree.c
3592 +@@ -2476,7 +2476,7 @@ static void rcu_do_batch(struct rcu_data *rdp)
3593 + div = READ_ONCE(rcu_divisor);
3594 + div = div < 0 ? 7 : div > sizeof(long) * 8 - 2 ? sizeof(long) * 8 - 2 : div;
3595 + bl = max(rdp->blimit, pending >> div);
3596 +- if (unlikely(bl > 100)) {
3597 ++ if (in_serving_softirq() && unlikely(bl > 100)) {
3598 + long rrn = READ_ONCE(rcu_resched_ns);
3599 +
3600 + rrn = rrn < NSEC_PER_MSEC ? NSEC_PER_MSEC : rrn > NSEC_PER_SEC ? NSEC_PER_SEC : rrn;
3601 +@@ -2513,18 +2513,23 @@ static void rcu_do_batch(struct rcu_data *rdp)
3602 + /*
3603 + * Stop only if limit reached and CPU has something to do.
3604 + */
3605 +- if (count >= bl && !offloaded &&
3606 +- (need_resched() ||
3607 +- (!is_idle_task(current) && !rcu_is_callbacks_kthread())))
3608 +- break;
3609 +- if (unlikely(tlimit)) {
3610 +- /* only call local_clock() every 32 callbacks */
3611 +- if (likely((count & 31) || local_clock() < tlimit))
3612 +- continue;
3613 +- /* Exceeded the time limit, so leave. */
3614 +- break;
3615 +- }
3616 +- if (!in_serving_softirq()) {
3617 ++ if (in_serving_softirq()) {
3618 ++ if (count >= bl && (need_resched() ||
3619 ++ (!is_idle_task(current) && !rcu_is_callbacks_kthread())))
3620 ++ break;
3621 ++
3622 ++ /*
3623 ++ * Make sure we don't spend too much time here and deprive other
3624 ++ * softirq vectors of CPU cycles.
3625 ++ */
3626 ++ if (unlikely(tlimit)) {
3627 ++ /* only call local_clock() every 32 callbacks */
3628 ++ if (likely((count & 31) || local_clock() < tlimit))
3629 ++ continue;
3630 ++ /* Exceeded the time limit, so leave. */
3631 ++ break;
3632 ++ }
3633 ++ } else {
3634 + local_bh_enable();
3635 + lockdep_assert_irqs_enabled();
3636 + cond_resched_tasks_rcu_qs();
3637 +diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
3638 +index dcdcb85121e40..3b1398fbddaf8 100644
3639 +--- a/kernel/time/timekeeping.c
3640 ++++ b/kernel/time/timekeeping.c
3641 +@@ -482,7 +482,7 @@ static __always_inline u64 __ktime_get_fast_ns(struct tk_fast *tkf)
3642 + * of the following timestamps. Callers need to be aware of that and
3643 + * deal with it.
3644 + */
3645 +-u64 ktime_get_mono_fast_ns(void)
3646 ++u64 notrace ktime_get_mono_fast_ns(void)
3647 + {
3648 + return __ktime_get_fast_ns(&tk_fast_mono);
3649 + }
3650 +@@ -494,7 +494,7 @@ EXPORT_SYMBOL_GPL(ktime_get_mono_fast_ns);
3651 + * Contrary to ktime_get_mono_fast_ns() this is always correct because the
3652 + * conversion factor is not affected by NTP/PTP correction.
3653 + */
3654 +-u64 ktime_get_raw_fast_ns(void)
3655 ++u64 notrace ktime_get_raw_fast_ns(void)
3656 + {
3657 + return __ktime_get_fast_ns(&tk_fast_raw);
3658 + }
3659 +diff --git a/net/can/isotp.c b/net/can/isotp.c
3660 +index 8c753dcefe7fc..26821487a0573 100644
3661 +--- a/net/can/isotp.c
3662 ++++ b/net/can/isotp.c
3663 +@@ -1146,6 +1146,11 @@ static int isotp_bind(struct socket *sock, struct sockaddr *uaddr, int len)
3664 +
3665 + lock_sock(sk);
3666 +
3667 ++ if (so->bound) {
3668 ++ err = -EINVAL;
3669 ++ goto out;
3670 ++ }
3671 ++
3672 + /* do not register frame reception for functional addressing */
3673 + if (so->opt.flags & CAN_ISOTP_SF_BROADCAST)
3674 + do_rx_reg = 0;
3675 +@@ -1156,10 +1161,6 @@ static int isotp_bind(struct socket *sock, struct sockaddr *uaddr, int len)
3676 + goto out;
3677 + }
3678 +
3679 +- if (so->bound && addr->can_ifindex == so->ifindex &&
3680 +- rx_id == so->rxid && tx_id == so->txid)
3681 +- goto out;
3682 +-
3683 + dev = dev_get_by_index(net, addr->can_ifindex);
3684 + if (!dev) {
3685 + err = -ENODEV;
3686 +@@ -1186,19 +1187,6 @@ static int isotp_bind(struct socket *sock, struct sockaddr *uaddr, int len)
3687 +
3688 + dev_put(dev);
3689 +
3690 +- if (so->bound && do_rx_reg) {
3691 +- /* unregister old filter */
3692 +- if (so->ifindex) {
3693 +- dev = dev_get_by_index(net, so->ifindex);
3694 +- if (dev) {
3695 +- can_rx_unregister(net, dev, so->rxid,
3696 +- SINGLE_MASK(so->rxid),
3697 +- isotp_rcv, sk);
3698 +- dev_put(dev);
3699 +- }
3700 +- }
3701 +- }
3702 +-
3703 + /* switch to new settings */
3704 + so->ifindex = ifindex;
3705 + so->rxid = rx_id;
3706 +diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
3707 +index d2e2b3d18c668..930f6c41f519c 100644
3708 +--- a/net/ipv4/igmp.c
3709 ++++ b/net/ipv4/igmp.c
3710 +@@ -2403,9 +2403,10 @@ int ip_mc_source(int add, int omode, struct sock *sk, struct
3711 + /* decrease mem now to avoid the memleak warning */
3712 + atomic_sub(struct_size(psl, sl_addr, psl->sl_max),
3713 + &sk->sk_omem_alloc);
3714 +- kfree_rcu(psl, rcu);
3715 + }
3716 + rcu_assign_pointer(pmc->sflist, newpsl);
3717 ++ if (psl)
3718 ++ kfree_rcu(psl, rcu);
3719 + psl = newpsl;
3720 + }
3721 + rv = 1; /* > 0 for insert logic below if sl_count is 0 */
3722 +@@ -2507,11 +2508,13 @@ int ip_mc_msfilter(struct sock *sk, struct ip_msfilter *msf, int ifindex)
3723 + /* decrease mem now to avoid the memleak warning */
3724 + atomic_sub(struct_size(psl, sl_addr, psl->sl_max),
3725 + &sk->sk_omem_alloc);
3726 +- kfree_rcu(psl, rcu);
3727 +- } else
3728 ++ } else {
3729 + (void) ip_mc_del_src(in_dev, &msf->imsf_multiaddr, pmc->sfmode,
3730 + 0, NULL, 0);
3731 ++ }
3732 + rcu_assign_pointer(pmc->sflist, newpsl);
3733 ++ if (psl)
3734 ++ kfree_rcu(psl, rcu);
3735 + pmc->sfmode = msf->imsf_fmode;
3736 + err = 0;
3737 + done:
3738 +diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c
3739 +index 909f937befd71..7f695c39d9a8c 100644
3740 +--- a/net/ipv6/mcast.c
3741 ++++ b/net/ipv6/mcast.c
3742 +@@ -460,10 +460,10 @@ int ip6_mc_source(int add, int omode, struct sock *sk,
3743 + newpsl->sl_addr[i] = psl->sl_addr[i];
3744 + atomic_sub(struct_size(psl, sl_addr, psl->sl_max),
3745 + &sk->sk_omem_alloc);
3746 +- kfree_rcu(psl, rcu);
3747 + }
3748 ++ rcu_assign_pointer(pmc->sflist, newpsl);
3749 ++ kfree_rcu(psl, rcu);
3750 + psl = newpsl;
3751 +- rcu_assign_pointer(pmc->sflist, psl);
3752 + }
3753 + rv = 1; /* > 0 for insert logic below if sl_count is 0 */
3754 + for (i = 0; i < psl->sl_count; i++) {
3755 +@@ -565,12 +565,12 @@ int ip6_mc_msfilter(struct sock *sk, struct group_filter *gsf,
3756 + psl->sl_count, psl->sl_addr, 0);
3757 + atomic_sub(struct_size(psl, sl_addr, psl->sl_max),
3758 + &sk->sk_omem_alloc);
3759 +- kfree_rcu(psl, rcu);
3760 + } else {
3761 + ip6_mc_del_src(idev, group, pmc->sfmode, 0, NULL, 0);
3762 + }
3763 +- mutex_unlock(&idev->mc_lock);
3764 + rcu_assign_pointer(pmc->sflist, newpsl);
3765 ++ mutex_unlock(&idev->mc_lock);
3766 ++ kfree_rcu(psl, rcu);
3767 + pmc->sfmode = gsf->gf_fmode;
3768 + err = 0;
3769 + done:
3770 +diff --git a/net/nfc/core.c b/net/nfc/core.c
3771 +index dc7a2404efdf9..5b286e1e0a6ff 100644
3772 +--- a/net/nfc/core.c
3773 ++++ b/net/nfc/core.c
3774 +@@ -38,7 +38,7 @@ int nfc_fw_download(struct nfc_dev *dev, const char *firmware_name)
3775 +
3776 + device_lock(&dev->dev);
3777 +
3778 +- if (!device_is_registered(&dev->dev)) {
3779 ++ if (dev->shutting_down) {
3780 + rc = -ENODEV;
3781 + goto error;
3782 + }
3783 +@@ -94,7 +94,7 @@ int nfc_dev_up(struct nfc_dev *dev)
3784 +
3785 + device_lock(&dev->dev);
3786 +
3787 +- if (!device_is_registered(&dev->dev)) {
3788 ++ if (dev->shutting_down) {
3789 + rc = -ENODEV;
3790 + goto error;
3791 + }
3792 +@@ -142,7 +142,7 @@ int nfc_dev_down(struct nfc_dev *dev)
3793 +
3794 + device_lock(&dev->dev);
3795 +
3796 +- if (!device_is_registered(&dev->dev)) {
3797 ++ if (dev->shutting_down) {
3798 + rc = -ENODEV;
3799 + goto error;
3800 + }
3801 +@@ -207,7 +207,7 @@ int nfc_start_poll(struct nfc_dev *dev, u32 im_protocols, u32 tm_protocols)
3802 +
3803 + device_lock(&dev->dev);
3804 +
3805 +- if (!device_is_registered(&dev->dev)) {
3806 ++ if (dev->shutting_down) {
3807 + rc = -ENODEV;
3808 + goto error;
3809 + }
3810 +@@ -246,7 +246,7 @@ int nfc_stop_poll(struct nfc_dev *dev)
3811 +
3812 + device_lock(&dev->dev);
3813 +
3814 +- if (!device_is_registered(&dev->dev)) {
3815 ++ if (dev->shutting_down) {
3816 + rc = -ENODEV;
3817 + goto error;
3818 + }
3819 +@@ -291,7 +291,7 @@ int nfc_dep_link_up(struct nfc_dev *dev, int target_index, u8 comm_mode)
3820 +
3821 + device_lock(&dev->dev);
3822 +
3823 +- if (!device_is_registered(&dev->dev)) {
3824 ++ if (dev->shutting_down) {
3825 + rc = -ENODEV;
3826 + goto error;
3827 + }
3828 +@@ -335,7 +335,7 @@ int nfc_dep_link_down(struct nfc_dev *dev)
3829 +
3830 + device_lock(&dev->dev);
3831 +
3832 +- if (!device_is_registered(&dev->dev)) {
3833 ++ if (dev->shutting_down) {
3834 + rc = -ENODEV;
3835 + goto error;
3836 + }
3837 +@@ -401,7 +401,7 @@ int nfc_activate_target(struct nfc_dev *dev, u32 target_idx, u32 protocol)
3838 +
3839 + device_lock(&dev->dev);
3840 +
3841 +- if (!device_is_registered(&dev->dev)) {
3842 ++ if (dev->shutting_down) {
3843 + rc = -ENODEV;
3844 + goto error;
3845 + }
3846 +@@ -448,7 +448,7 @@ int nfc_deactivate_target(struct nfc_dev *dev, u32 target_idx, u8 mode)
3847 +
3848 + device_lock(&dev->dev);
3849 +
3850 +- if (!device_is_registered(&dev->dev)) {
3851 ++ if (dev->shutting_down) {
3852 + rc = -ENODEV;
3853 + goto error;
3854 + }
3855 +@@ -495,7 +495,7 @@ int nfc_data_exchange(struct nfc_dev *dev, u32 target_idx, struct sk_buff *skb,
3856 +
3857 + device_lock(&dev->dev);
3858 +
3859 +- if (!device_is_registered(&dev->dev)) {
3860 ++ if (dev->shutting_down) {
3861 + rc = -ENODEV;
3862 + kfree_skb(skb);
3863 + goto error;
3864 +@@ -552,7 +552,7 @@ int nfc_enable_se(struct nfc_dev *dev, u32 se_idx)
3865 +
3866 + device_lock(&dev->dev);
3867 +
3868 +- if (!device_is_registered(&dev->dev)) {
3869 ++ if (dev->shutting_down) {
3870 + rc = -ENODEV;
3871 + goto error;
3872 + }
3873 +@@ -601,7 +601,7 @@ int nfc_disable_se(struct nfc_dev *dev, u32 se_idx)
3874 +
3875 + device_lock(&dev->dev);
3876 +
3877 +- if (!device_is_registered(&dev->dev)) {
3878 ++ if (dev->shutting_down) {
3879 + rc = -ENODEV;
3880 + goto error;
3881 + }
3882 +@@ -1134,6 +1134,7 @@ int nfc_register_device(struct nfc_dev *dev)
3883 + dev->rfkill = NULL;
3884 + }
3885 + }
3886 ++ dev->shutting_down = false;
3887 + device_unlock(&dev->dev);
3888 +
3889 + rc = nfc_genl_device_added(dev);
3890 +@@ -1166,12 +1167,10 @@ void nfc_unregister_device(struct nfc_dev *dev)
3891 + rfkill_unregister(dev->rfkill);
3892 + rfkill_destroy(dev->rfkill);
3893 + }
3894 ++ dev->shutting_down = true;
3895 + device_unlock(&dev->dev);
3896 +
3897 + if (dev->ops->check_presence) {
3898 +- device_lock(&dev->dev);
3899 +- dev->shutting_down = true;
3900 +- device_unlock(&dev->dev);
3901 + del_timer_sync(&dev->check_pres_timer);
3902 + cancel_work_sync(&dev->check_pres_work);
3903 + }
3904 +diff --git a/net/nfc/netlink.c b/net/nfc/netlink.c
3905 +index 8048a3dcc5f8b..a207f0b8137b0 100644
3906 +--- a/net/nfc/netlink.c
3907 ++++ b/net/nfc/netlink.c
3908 +@@ -1244,7 +1244,7 @@ int nfc_genl_fw_download_done(struct nfc_dev *dev, const char *firmware_name,
3909 + struct sk_buff *msg;
3910 + void *hdr;
3911 +
3912 +- msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
3913 ++ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
3914 + if (!msg)
3915 + return -ENOMEM;
3916 +
3917 +@@ -1260,7 +1260,7 @@ int nfc_genl_fw_download_done(struct nfc_dev *dev, const char *firmware_name,
3918 +
3919 + genlmsg_end(msg, hdr);
3920 +
3921 +- genlmsg_multicast(&nfc_genl_family, msg, 0, 0, GFP_KERNEL);
3922 ++ genlmsg_multicast(&nfc_genl_family, msg, 0, 0, GFP_ATOMIC);
3923 +
3924 + return 0;
3925 +
3926 +diff --git a/net/rxrpc/local_object.c b/net/rxrpc/local_object.c
3927 +index a4111408ffd0c..6a1611b0e3037 100644
3928 +--- a/net/rxrpc/local_object.c
3929 ++++ b/net/rxrpc/local_object.c
3930 +@@ -117,6 +117,7 @@ static int rxrpc_open_socket(struct rxrpc_local *local, struct net *net)
3931 + local, srx->transport_type, srx->transport.family);
3932 +
3933 + udp_conf.family = srx->transport.family;
3934 ++ udp_conf.use_udp_checksums = true;
3935 + if (udp_conf.family == AF_INET) {
3936 + udp_conf.local_ip = srx->transport.sin.sin_addr;
3937 + udp_conf.local_udp_port = srx->transport.sin.sin_port;
3938 +@@ -124,6 +125,8 @@ static int rxrpc_open_socket(struct rxrpc_local *local, struct net *net)
3939 + } else {
3940 + udp_conf.local_ip6 = srx->transport.sin6.sin6_addr;
3941 + udp_conf.local_udp_port = srx->transport.sin6.sin6_port;
3942 ++ udp_conf.use_udp6_tx_checksums = true;
3943 ++ udp_conf.use_udp6_rx_checksums = true;
3944 + #endif
3945 + }
3946 + ret = udp_sock_create(net, &udp_conf, &local->socket);
3947 +diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
3948 +index 3286add1a9583..e4b9a38f12e07 100644
3949 +--- a/net/sunrpc/clnt.c
3950 ++++ b/net/sunrpc/clnt.c
3951 +@@ -1065,10 +1065,13 @@ rpc_task_get_next_xprt(struct rpc_clnt *clnt)
3952 + static
3953 + void rpc_task_set_transport(struct rpc_task *task, struct rpc_clnt *clnt)
3954 + {
3955 +- if (task->tk_xprt &&
3956 +- !(test_bit(XPRT_OFFLINE, &task->tk_xprt->state) &&
3957 +- (task->tk_flags & RPC_TASK_MOVEABLE)))
3958 +- return;
3959 ++ if (task->tk_xprt) {
3960 ++ if (!(test_bit(XPRT_OFFLINE, &task->tk_xprt->state) &&
3961 ++ (task->tk_flags & RPC_TASK_MOVEABLE)))
3962 ++ return;
3963 ++ xprt_release(task);
3964 ++ xprt_put(task->tk_xprt);
3965 ++ }
3966 + if (task->tk_flags & RPC_TASK_NO_ROUND_ROBIN)
3967 + task->tk_xprt = rpc_task_get_first_xprt(clnt);
3968 + else
3969 +diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
3970 +index c2f7819827b61..95a86f3fb5c6b 100644
3971 +--- a/net/sunrpc/xprtsock.c
3972 ++++ b/net/sunrpc/xprtsock.c
3973 +@@ -2848,9 +2848,6 @@ static struct rpc_xprt *xs_setup_local(struct xprt_create *args)
3974 + }
3975 + xprt_set_bound(xprt);
3976 + xs_format_peer_addresses(xprt, "local", RPCBIND_NETID_LOCAL);
3977 +- ret = ERR_PTR(xs_local_setup_socket(transport));
3978 +- if (ret)
3979 +- goto out_err;
3980 + break;
3981 + default:
3982 + ret = ERR_PTR(-EAFNOSUPPORT);
3983 +diff --git a/sound/firewire/fireworks/fireworks_hwdep.c b/sound/firewire/fireworks/fireworks_hwdep.c
3984 +index 626c0c34b0b66..3a53914277d35 100644
3985 +--- a/sound/firewire/fireworks/fireworks_hwdep.c
3986 ++++ b/sound/firewire/fireworks/fireworks_hwdep.c
3987 +@@ -34,6 +34,7 @@ hwdep_read_resp_buf(struct snd_efw *efw, char __user *buf, long remained,
3988 + type = SNDRV_FIREWIRE_EVENT_EFW_RESPONSE;
3989 + if (copy_to_user(buf, &type, sizeof(type)))
3990 + return -EFAULT;
3991 ++ count += sizeof(type);
3992 + remained -= sizeof(type);
3993 + buf += sizeof(type);
3994 +
3995 +diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
3996 +index 9771300683c47..30295283512c3 100644
3997 +--- a/sound/pci/hda/patch_realtek.c
3998 ++++ b/sound/pci/hda/patch_realtek.c
3999 +@@ -9034,6 +9034,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
4000 + SND_PCI_QUIRK(0x17aa, 0x3813, "Legion 7i 15IMHG05", ALC287_FIXUP_LEGION_15IMHG05_SPEAKERS),
4001 + SND_PCI_QUIRK(0x17aa, 0x3818, "Lenovo C940", ALC298_FIXUP_LENOVO_SPK_VOLUME),
4002 + SND_PCI_QUIRK(0x17aa, 0x3819, "Lenovo 13s Gen2 ITL", ALC287_FIXUP_13S_GEN2_SPEAKERS),
4003 ++ SND_PCI_QUIRK(0x17aa, 0x3820, "Yoga Duet 7 13ITL6", ALC287_FIXUP_YOGA7_14ITL_SPEAKERS),
4004 + SND_PCI_QUIRK(0x17aa, 0x3824, "Legion Y9000X 2020", ALC285_FIXUP_LEGION_Y9000X_SPEAKERS),
4005 + SND_PCI_QUIRK(0x17aa, 0x3827, "Ideapad S740", ALC285_FIXUP_IDEAPAD_S740_COEF),
4006 + SND_PCI_QUIRK(0x17aa, 0x3834, "Lenovo IdeaPad Slim 9i 14ITL5", ALC287_FIXUP_YOGA7_14ITL_SPEAKERS),
4007 +diff --git a/sound/soc/codecs/da7219.c b/sound/soc/codecs/da7219.c
4008 +index 13009d08b09ac..c7493549a9a50 100644
4009 +--- a/sound/soc/codecs/da7219.c
4010 ++++ b/sound/soc/codecs/da7219.c
4011 +@@ -446,7 +446,7 @@ static int da7219_tonegen_freq_put(struct snd_kcontrol *kcontrol,
4012 + struct soc_mixer_control *mixer_ctrl =
4013 + (struct soc_mixer_control *) kcontrol->private_value;
4014 + unsigned int reg = mixer_ctrl->reg;
4015 +- __le16 val;
4016 ++ __le16 val_new, val_old;
4017 + int ret;
4018 +
4019 + /*
4020 +@@ -454,13 +454,19 @@ static int da7219_tonegen_freq_put(struct snd_kcontrol *kcontrol,
4021 + * Therefore we need to convert to little endian here to align with
4022 + * HW registers.
4023 + */
4024 +- val = cpu_to_le16(ucontrol->value.integer.value[0]);
4025 ++ val_new = cpu_to_le16(ucontrol->value.integer.value[0]);
4026 +
4027 + mutex_lock(&da7219->ctrl_lock);
4028 +- ret = regmap_raw_write(da7219->regmap, reg, &val, sizeof(val));
4029 ++ ret = regmap_raw_read(da7219->regmap, reg, &val_old, sizeof(val_old));
4030 ++ if (ret == 0 && (val_old != val_new))
4031 ++ ret = regmap_raw_write(da7219->regmap, reg,
4032 ++ &val_new, sizeof(val_new));
4033 + mutex_unlock(&da7219->ctrl_lock);
4034 +
4035 +- return ret;
4036 ++ if (ret < 0)
4037 ++ return ret;
4038 ++
4039 ++ return val_old != val_new;
4040 + }
4041 +
4042 +
4043 +diff --git a/sound/soc/codecs/wm8958-dsp2.c b/sound/soc/codecs/wm8958-dsp2.c
4044 +index e4018ba3b19a2..7878c7a58ff10 100644
4045 +--- a/sound/soc/codecs/wm8958-dsp2.c
4046 ++++ b/sound/soc/codecs/wm8958-dsp2.c
4047 +@@ -530,7 +530,7 @@ static int wm8958_mbc_put(struct snd_kcontrol *kcontrol,
4048 +
4049 + wm8958_dsp_apply(component, mbc, wm8994->mbc_ena[mbc]);
4050 +
4051 +- return 0;
4052 ++ return 1;
4053 + }
4054 +
4055 + #define WM8958_MBC_SWITCH(xname, xval) {\
4056 +@@ -656,7 +656,7 @@ static int wm8958_vss_put(struct snd_kcontrol *kcontrol,
4057 +
4058 + wm8958_dsp_apply(component, vss, wm8994->vss_ena[vss]);
4059 +
4060 +- return 0;
4061 ++ return 1;
4062 + }
4063 +
4064 +
4065 +@@ -730,7 +730,7 @@ static int wm8958_hpf_put(struct snd_kcontrol *kcontrol,
4066 +
4067 + wm8958_dsp_apply(component, hpf % 3, ucontrol->value.integer.value[0]);
4068 +
4069 +- return 0;
4070 ++ return 1;
4071 + }
4072 +
4073 + #define WM8958_HPF_SWITCH(xname, xval) {\
4074 +@@ -824,7 +824,7 @@ static int wm8958_enh_eq_put(struct snd_kcontrol *kcontrol,
4075 +
4076 + wm8958_dsp_apply(component, eq, ucontrol->value.integer.value[0]);
4077 +
4078 +- return 0;
4079 ++ return 1;
4080 + }
4081 +
4082 + #define WM8958_ENH_EQ_SWITCH(xname, xval) {\
4083 +diff --git a/sound/soc/meson/aiu-acodec-ctrl.c b/sound/soc/meson/aiu-acodec-ctrl.c
4084 +index 27a6d3259c50a..442c215936d97 100644
4085 +--- a/sound/soc/meson/aiu-acodec-ctrl.c
4086 ++++ b/sound/soc/meson/aiu-acodec-ctrl.c
4087 +@@ -58,7 +58,7 @@ static int aiu_acodec_ctrl_mux_put_enum(struct snd_kcontrol *kcontrol,
4088 +
4089 + snd_soc_dapm_mux_update_power(dapm, kcontrol, mux, e, NULL);
4090 +
4091 +- return 0;
4092 ++ return 1;
4093 + }
4094 +
4095 + static SOC_ENUM_SINGLE_DECL(aiu_acodec_ctrl_mux_enum, AIU_ACODEC_CTRL,
4096 +diff --git a/sound/soc/meson/aiu-codec-ctrl.c b/sound/soc/meson/aiu-codec-ctrl.c
4097 +index c3ea733fce91f..c966fc60dc733 100644
4098 +--- a/sound/soc/meson/aiu-codec-ctrl.c
4099 ++++ b/sound/soc/meson/aiu-codec-ctrl.c
4100 +@@ -57,7 +57,7 @@ static int aiu_codec_ctrl_mux_put_enum(struct snd_kcontrol *kcontrol,
4101 +
4102 + snd_soc_dapm_mux_update_power(dapm, kcontrol, mux, e, NULL);
4103 +
4104 +- return 0;
4105 ++ return 1;
4106 + }
4107 +
4108 + static SOC_ENUM_SINGLE_DECL(aiu_hdmi_ctrl_mux_enum, AIU_HDMI_CLK_DATA_CTRL,
4109 +diff --git a/sound/soc/meson/g12a-tohdmitx.c b/sound/soc/meson/g12a-tohdmitx.c
4110 +index 9b2b59536ced0..6c99052feafd8 100644
4111 +--- a/sound/soc/meson/g12a-tohdmitx.c
4112 ++++ b/sound/soc/meson/g12a-tohdmitx.c
4113 +@@ -67,7 +67,7 @@ static int g12a_tohdmitx_i2s_mux_put_enum(struct snd_kcontrol *kcontrol,
4114 +
4115 + snd_soc_dapm_mux_update_power(dapm, kcontrol, mux, e, NULL);
4116 +
4117 +- return 0;
4118 ++ return 1;
4119 + }
4120 +
4121 + static SOC_ENUM_SINGLE_DECL(g12a_tohdmitx_i2s_mux_enum, TOHDMITX_CTRL0,
4122 +diff --git a/sound/soc/soc-generic-dmaengine-pcm.c b/sound/soc/soc-generic-dmaengine-pcm.c
4123 +index 38f3f36c1d729..4aa48c74f21a0 100644
4124 +--- a/sound/soc/soc-generic-dmaengine-pcm.c
4125 ++++ b/sound/soc/soc-generic-dmaengine-pcm.c
4126 +@@ -82,10 +82,10 @@ static int dmaengine_pcm_hw_params(struct snd_soc_component *component,
4127 +
4128 + memset(&slave_config, 0, sizeof(slave_config));
4129 +
4130 +- if (pcm->config && pcm->config->prepare_slave_config)
4131 +- prepare_slave_config = pcm->config->prepare_slave_config;
4132 +- else
4133 ++ if (!pcm->config)
4134 + prepare_slave_config = snd_dmaengine_pcm_prepare_slave_config;
4135 ++ else
4136 ++ prepare_slave_config = pcm->config->prepare_slave_config;
4137 +
4138 + if (prepare_slave_config) {
4139 + int ret = prepare_slave_config(substream, params, &slave_config);
4140 +diff --git a/sound/soc/soc-ops.c b/sound/soc/soc-ops.c
4141 +index ee3782ecd7e3a..63ee35ebeaabb 100644
4142 +--- a/sound/soc/soc-ops.c
4143 ++++ b/sound/soc/soc-ops.c
4144 +@@ -461,7 +461,7 @@ int snd_soc_put_volsw_sx(struct snd_kcontrol *kcontrol,
4145 + ret = err;
4146 + }
4147 + }
4148 +- return err;
4149 ++ return ret;
4150 + }
4151 + EXPORT_SYMBOL_GPL(snd_soc_put_volsw_sx);
4152 +
4153 +diff --git a/tools/testing/selftests/drivers/net/ocelot/tc_flower_chains.sh b/tools/testing/selftests/drivers/net/ocelot/tc_flower_chains.sh
4154 +index f7d84549cc3e3..79f751259098d 100755
4155 +--- a/tools/testing/selftests/drivers/net/ocelot/tc_flower_chains.sh
4156 ++++ b/tools/testing/selftests/drivers/net/ocelot/tc_flower_chains.sh
4157 +@@ -185,7 +185,7 @@ setup_prepare()
4158 +
4159 + tc filter add dev $eth0 ingress chain $(IS2 0 0) pref 1 \
4160 + protocol ipv4 flower skip_sw ip_proto udp dst_port 5201 \
4161 +- action police rate 50mbit burst 64k \
4162 ++ action police rate 50mbit burst 64k conform-exceed drop/pipe \
4163 + action goto chain $(IS2 1 0)
4164 + }
4165 +
4166 +diff --git a/tools/testing/selftests/kvm/include/x86_64/processor.h b/tools/testing/selftests/kvm/include/x86_64/processor.h
4167 +index 05e65ca1c30cd..23861c8faa61c 100644
4168 +--- a/tools/testing/selftests/kvm/include/x86_64/processor.h
4169 ++++ b/tools/testing/selftests/kvm/include/x86_64/processor.h
4170 +@@ -58,6 +58,21 @@
4171 + /* CPUID.0x8000_0001.EDX */
4172 + #define CPUID_GBPAGES (1ul << 26)
4173 +
4174 ++/* Page table bitfield declarations */
4175 ++#define PTE_PRESENT_MASK BIT_ULL(0)
4176 ++#define PTE_WRITABLE_MASK BIT_ULL(1)
4177 ++#define PTE_USER_MASK BIT_ULL(2)
4178 ++#define PTE_ACCESSED_MASK BIT_ULL(5)
4179 ++#define PTE_DIRTY_MASK BIT_ULL(6)
4180 ++#define PTE_LARGE_MASK BIT_ULL(7)
4181 ++#define PTE_GLOBAL_MASK BIT_ULL(8)
4182 ++#define PTE_NX_MASK BIT_ULL(63)
4183 ++
4184 ++#define PAGE_SHIFT 12
4185 ++
4186 ++#define PHYSICAL_PAGE_MASK GENMASK_ULL(51, 12)
4187 ++#define PTE_GET_PFN(pte) (((pte) & PHYSICAL_PAGE_MASK) >> PAGE_SHIFT)
4188 ++
4189 + /* General Registers in 64-Bit Mode */
4190 + struct gpr64_regs {
4191 + u64 rax;
4192 +diff --git a/tools/testing/selftests/kvm/kvm_page_table_test.c b/tools/testing/selftests/kvm/kvm_page_table_test.c
4193 +index 36407cb0ec85d..f1ddfe4c4a031 100644
4194 +--- a/tools/testing/selftests/kvm/kvm_page_table_test.c
4195 ++++ b/tools/testing/selftests/kvm/kvm_page_table_test.c
4196 +@@ -278,7 +278,7 @@ static struct kvm_vm *pre_init_before_test(enum vm_guest_mode mode, void *arg)
4197 + else
4198 + guest_test_phys_mem = p->phys_offset;
4199 + #ifdef __s390x__
4200 +- alignment = max(0x100000, alignment);
4201 ++ alignment = max(0x100000UL, alignment);
4202 + #endif
4203 + guest_test_phys_mem &= ~(alignment - 1);
4204 +
4205 +diff --git a/tools/testing/selftests/kvm/lib/x86_64/processor.c b/tools/testing/selftests/kvm/lib/x86_64/processor.c
4206 +index da73b97e1e6dc..46057079d8bba 100644
4207 +--- a/tools/testing/selftests/kvm/lib/x86_64/processor.c
4208 ++++ b/tools/testing/selftests/kvm/lib/x86_64/processor.c
4209 +@@ -19,38 +19,6 @@
4210 +
4211 + vm_vaddr_t exception_handlers;
4212 +
4213 +-/* Virtual translation table structure declarations */
4214 +-struct pageUpperEntry {
4215 +- uint64_t present:1;
4216 +- uint64_t writable:1;
4217 +- uint64_t user:1;
4218 +- uint64_t write_through:1;
4219 +- uint64_t cache_disable:1;
4220 +- uint64_t accessed:1;
4221 +- uint64_t ignored_06:1;
4222 +- uint64_t page_size:1;
4223 +- uint64_t ignored_11_08:4;
4224 +- uint64_t pfn:40;
4225 +- uint64_t ignored_62_52:11;
4226 +- uint64_t execute_disable:1;
4227 +-};
4228 +-
4229 +-struct pageTableEntry {
4230 +- uint64_t present:1;
4231 +- uint64_t writable:1;
4232 +- uint64_t user:1;
4233 +- uint64_t write_through:1;
4234 +- uint64_t cache_disable:1;
4235 +- uint64_t accessed:1;
4236 +- uint64_t dirty:1;
4237 +- uint64_t reserved_07:1;
4238 +- uint64_t global:1;
4239 +- uint64_t ignored_11_09:3;
4240 +- uint64_t pfn:40;
4241 +- uint64_t ignored_62_52:11;
4242 +- uint64_t execute_disable:1;
4243 +-};
4244 +-
4245 + void regs_dump(FILE *stream, struct kvm_regs *regs,
4246 + uint8_t indent)
4247 + {
4248 +@@ -195,23 +163,21 @@ static void *virt_get_pte(struct kvm_vm *vm, uint64_t pt_pfn, uint64_t vaddr,
4249 + return &page_table[index];
4250 + }
4251 +
4252 +-static struct pageUpperEntry *virt_create_upper_pte(struct kvm_vm *vm,
4253 +- uint64_t pt_pfn,
4254 +- uint64_t vaddr,
4255 +- uint64_t paddr,
4256 +- int level,
4257 +- enum x86_page_size page_size)
4258 ++static uint64_t *virt_create_upper_pte(struct kvm_vm *vm,
4259 ++ uint64_t pt_pfn,
4260 ++ uint64_t vaddr,
4261 ++ uint64_t paddr,
4262 ++ int level,
4263 ++ enum x86_page_size page_size)
4264 + {
4265 +- struct pageUpperEntry *pte = virt_get_pte(vm, pt_pfn, vaddr, level);
4266 +-
4267 +- if (!pte->present) {
4268 +- pte->writable = true;
4269 +- pte->present = true;
4270 +- pte->page_size = (level == page_size);
4271 +- if (pte->page_size)
4272 +- pte->pfn = paddr >> vm->page_shift;
4273 ++ uint64_t *pte = virt_get_pte(vm, pt_pfn, vaddr, level);
4274 ++
4275 ++ if (!(*pte & PTE_PRESENT_MASK)) {
4276 ++ *pte = PTE_PRESENT_MASK | PTE_WRITABLE_MASK;
4277 ++ if (level == page_size)
4278 ++ *pte |= PTE_LARGE_MASK | (paddr & PHYSICAL_PAGE_MASK);
4279 + else
4280 +- pte->pfn = vm_alloc_page_table(vm) >> vm->page_shift;
4281 ++ *pte |= vm_alloc_page_table(vm) & PHYSICAL_PAGE_MASK;
4282 + } else {
4283 + /*
4284 + * Entry already present. Assert that the caller doesn't want
4285 +@@ -221,7 +187,7 @@ static struct pageUpperEntry *virt_create_upper_pte(struct kvm_vm *vm,
4286 + TEST_ASSERT(level != page_size,
4287 + "Cannot create hugepage at level: %u, vaddr: 0x%lx\n",
4288 + page_size, vaddr);
4289 +- TEST_ASSERT(!pte->page_size,
4290 ++ TEST_ASSERT(!(*pte & PTE_LARGE_MASK),
4291 + "Cannot create page table at level: %u, vaddr: 0x%lx\n",
4292 + level, vaddr);
4293 + }
4294 +@@ -232,8 +198,8 @@ void __virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr,
4295 + enum x86_page_size page_size)
4296 + {
4297 + const uint64_t pg_size = 1ull << ((page_size * 9) + 12);
4298 +- struct pageUpperEntry *pml4e, *pdpe, *pde;
4299 +- struct pageTableEntry *pte;
4300 ++ uint64_t *pml4e, *pdpe, *pde;
4301 ++ uint64_t *pte;
4302 +
4303 + TEST_ASSERT(vm->mode == VM_MODE_PXXV48_4K,
4304 + "Unknown or unsupported guest mode, mode: 0x%x", vm->mode);
4305 +@@ -257,24 +223,22 @@ void __virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr,
4306 + */
4307 + pml4e = virt_create_upper_pte(vm, vm->pgd >> vm->page_shift,
4308 + vaddr, paddr, 3, page_size);
4309 +- if (pml4e->page_size)
4310 ++ if (*pml4e & PTE_LARGE_MASK)
4311 + return;
4312 +
4313 +- pdpe = virt_create_upper_pte(vm, pml4e->pfn, vaddr, paddr, 2, page_size);
4314 +- if (pdpe->page_size)
4315 ++ pdpe = virt_create_upper_pte(vm, PTE_GET_PFN(*pml4e), vaddr, paddr, 2, page_size);
4316 ++ if (*pdpe & PTE_LARGE_MASK)
4317 + return;
4318 +
4319 +- pde = virt_create_upper_pte(vm, pdpe->pfn, vaddr, paddr, 1, page_size);
4320 +- if (pde->page_size)
4321 ++ pde = virt_create_upper_pte(vm, PTE_GET_PFN(*pdpe), vaddr, paddr, 1, page_size);
4322 ++ if (*pde & PTE_LARGE_MASK)
4323 + return;
4324 +
4325 + /* Fill in page table entry. */
4326 +- pte = virt_get_pte(vm, pde->pfn, vaddr, 0);
4327 +- TEST_ASSERT(!pte->present,
4328 ++ pte = virt_get_pte(vm, PTE_GET_PFN(*pde), vaddr, 0);
4329 ++ TEST_ASSERT(!(*pte & PTE_PRESENT_MASK),
4330 + "PTE already present for 4k page at vaddr: 0x%lx\n", vaddr);
4331 +- pte->pfn = paddr >> vm->page_shift;
4332 +- pte->writable = true;
4333 +- pte->present = 1;
4334 ++ *pte = PTE_PRESENT_MASK | PTE_WRITABLE_MASK | (paddr & PHYSICAL_PAGE_MASK);
4335 + }
4336 +
4337 + void virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr)
4338 +@@ -282,12 +246,12 @@ void virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr)
4339 + __virt_pg_map(vm, vaddr, paddr, X86_PAGE_SIZE_4K);
4340 + }
4341 +
4342 +-static struct pageTableEntry *_vm_get_page_table_entry(struct kvm_vm *vm, int vcpuid,
4343 ++static uint64_t *_vm_get_page_table_entry(struct kvm_vm *vm, int vcpuid,
4344 + uint64_t vaddr)
4345 + {
4346 + uint16_t index[4];
4347 +- struct pageUpperEntry *pml4e, *pdpe, *pde;
4348 +- struct pageTableEntry *pte;
4349 ++ uint64_t *pml4e, *pdpe, *pde;
4350 ++ uint64_t *pte;
4351 + struct kvm_cpuid_entry2 *entry;
4352 + struct kvm_sregs sregs;
4353 + int max_phy_addr;
4354 +@@ -329,30 +293,29 @@ static struct pageTableEntry *_vm_get_page_table_entry(struct kvm_vm *vm, int vc
4355 + index[3] = (vaddr >> 39) & 0x1ffu;
4356 +
4357 + pml4e = addr_gpa2hva(vm, vm->pgd);
4358 +- TEST_ASSERT(pml4e[index[3]].present,
4359 ++ TEST_ASSERT(pml4e[index[3]] & PTE_PRESENT_MASK,
4360 + "Expected pml4e to be present for gva: 0x%08lx", vaddr);
4361 +- TEST_ASSERT((*(uint64_t*)(&pml4e[index[3]]) &
4362 +- (rsvd_mask | (1ull << 7))) == 0,
4363 ++ TEST_ASSERT((pml4e[index[3]] & (rsvd_mask | PTE_LARGE_MASK)) == 0,
4364 + "Unexpected reserved bits set.");
4365 +
4366 +- pdpe = addr_gpa2hva(vm, pml4e[index[3]].pfn * vm->page_size);
4367 +- TEST_ASSERT(pdpe[index[2]].present,
4368 ++ pdpe = addr_gpa2hva(vm, PTE_GET_PFN(pml4e[index[3]]) * vm->page_size);
4369 ++ TEST_ASSERT(pdpe[index[2]] & PTE_PRESENT_MASK,
4370 + "Expected pdpe to be present for gva: 0x%08lx", vaddr);
4371 +- TEST_ASSERT(pdpe[index[2]].page_size == 0,
4372 ++ TEST_ASSERT(!(pdpe[index[2]] & PTE_LARGE_MASK),
4373 + "Expected pdpe to map a pde not a 1-GByte page.");
4374 +- TEST_ASSERT((*(uint64_t*)(&pdpe[index[2]]) & rsvd_mask) == 0,
4375 ++ TEST_ASSERT((pdpe[index[2]] & rsvd_mask) == 0,
4376 + "Unexpected reserved bits set.");
4377 +
4378 +- pde = addr_gpa2hva(vm, pdpe[index[2]].pfn * vm->page_size);
4379 +- TEST_ASSERT(pde[index[1]].present,
4380 ++ pde = addr_gpa2hva(vm, PTE_GET_PFN(pdpe[index[2]]) * vm->page_size);
4381 ++ TEST_ASSERT(pde[index[1]] & PTE_PRESENT_MASK,
4382 + "Expected pde to be present for gva: 0x%08lx", vaddr);
4383 +- TEST_ASSERT(pde[index[1]].page_size == 0,
4384 ++ TEST_ASSERT(!(pde[index[1]] & PTE_LARGE_MASK),
4385 + "Expected pde to map a pte not a 2-MByte page.");
4386 +- TEST_ASSERT((*(uint64_t*)(&pde[index[1]]) & rsvd_mask) == 0,
4387 ++ TEST_ASSERT((pde[index[1]] & rsvd_mask) == 0,
4388 + "Unexpected reserved bits set.");
4389 +
4390 +- pte = addr_gpa2hva(vm, pde[index[1]].pfn * vm->page_size);
4391 +- TEST_ASSERT(pte[index[0]].present,
4392 ++ pte = addr_gpa2hva(vm, PTE_GET_PFN(pde[index[1]]) * vm->page_size);
4393 ++ TEST_ASSERT(pte[index[0]] & PTE_PRESENT_MASK,
4394 + "Expected pte to be present for gva: 0x%08lx", vaddr);
4395 +
4396 + return &pte[index[0]];
4397 +@@ -360,7 +323,7 @@ static struct pageTableEntry *_vm_get_page_table_entry(struct kvm_vm *vm, int vc
4398 +
4399 + uint64_t vm_get_page_table_entry(struct kvm_vm *vm, int vcpuid, uint64_t vaddr)
4400 + {
4401 +- struct pageTableEntry *pte = _vm_get_page_table_entry(vm, vcpuid, vaddr);
4402 ++ uint64_t *pte = _vm_get_page_table_entry(vm, vcpuid, vaddr);
4403 +
4404 + return *(uint64_t *)pte;
4405 + }
4406 +@@ -368,18 +331,17 @@ uint64_t vm_get_page_table_entry(struct kvm_vm *vm, int vcpuid, uint64_t vaddr)
4407 + void vm_set_page_table_entry(struct kvm_vm *vm, int vcpuid, uint64_t vaddr,
4408 + uint64_t pte)
4409 + {
4410 +- struct pageTableEntry *new_pte = _vm_get_page_table_entry(vm, vcpuid,
4411 +- vaddr);
4412 ++ uint64_t *new_pte = _vm_get_page_table_entry(vm, vcpuid, vaddr);
4413 +
4414 + *(uint64_t *)new_pte = pte;
4415 + }
4416 +
4417 + void virt_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent)
4418 + {
4419 +- struct pageUpperEntry *pml4e, *pml4e_start;
4420 +- struct pageUpperEntry *pdpe, *pdpe_start;
4421 +- struct pageUpperEntry *pde, *pde_start;
4422 +- struct pageTableEntry *pte, *pte_start;
4423 ++ uint64_t *pml4e, *pml4e_start;
4424 ++ uint64_t *pdpe, *pdpe_start;
4425 ++ uint64_t *pde, *pde_start;
4426 ++ uint64_t *pte, *pte_start;
4427 +
4428 + if (!vm->pgd_created)
4429 + return;
4430 +@@ -389,58 +351,58 @@ void virt_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent)
4431 + fprintf(stream, "%*s index hvaddr gpaddr "
4432 + "addr w exec dirty\n",
4433 + indent, "");
4434 +- pml4e_start = (struct pageUpperEntry *) addr_gpa2hva(vm, vm->pgd);
4435 ++ pml4e_start = (uint64_t *) addr_gpa2hva(vm, vm->pgd);
4436 + for (uint16_t n1 = 0; n1 <= 0x1ffu; n1++) {
4437 + pml4e = &pml4e_start[n1];
4438 +- if (!pml4e->present)
4439 ++ if (!(*pml4e & PTE_PRESENT_MASK))
4440 + continue;
4441 +- fprintf(stream, "%*spml4e 0x%-3zx %p 0x%-12lx 0x%-10lx %u "
4442 ++ fprintf(stream, "%*spml4e 0x%-3zx %p 0x%-12lx 0x%-10llx %u "
4443 + " %u\n",
4444 + indent, "",
4445 + pml4e - pml4e_start, pml4e,
4446 +- addr_hva2gpa(vm, pml4e), (uint64_t) pml4e->pfn,
4447 +- pml4e->writable, pml4e->execute_disable);
4448 ++ addr_hva2gpa(vm, pml4e), PTE_GET_PFN(*pml4e),
4449 ++ !!(*pml4e & PTE_WRITABLE_MASK), !!(*pml4e & PTE_NX_MASK));
4450 +
4451 +- pdpe_start = addr_gpa2hva(vm, pml4e->pfn * vm->page_size);
4452 ++ pdpe_start = addr_gpa2hva(vm, *pml4e & PHYSICAL_PAGE_MASK);
4453 + for (uint16_t n2 = 0; n2 <= 0x1ffu; n2++) {
4454 + pdpe = &pdpe_start[n2];
4455 +- if (!pdpe->present)
4456 ++ if (!(*pdpe & PTE_PRESENT_MASK))
4457 + continue;
4458 +- fprintf(stream, "%*spdpe 0x%-3zx %p 0x%-12lx 0x%-10lx "
4459 ++ fprintf(stream, "%*spdpe 0x%-3zx %p 0x%-12lx 0x%-10llx "
4460 + "%u %u\n",
4461 + indent, "",
4462 + pdpe - pdpe_start, pdpe,
4463 + addr_hva2gpa(vm, pdpe),
4464 +- (uint64_t) pdpe->pfn, pdpe->writable,
4465 +- pdpe->execute_disable);
4466 ++ PTE_GET_PFN(*pdpe), !!(*pdpe & PTE_WRITABLE_MASK),
4467 ++ !!(*pdpe & PTE_NX_MASK));
4468 +
4469 +- pde_start = addr_gpa2hva(vm, pdpe->pfn * vm->page_size);
4470 ++ pde_start = addr_gpa2hva(vm, *pdpe & PHYSICAL_PAGE_MASK);
4471 + for (uint16_t n3 = 0; n3 <= 0x1ffu; n3++) {
4472 + pde = &pde_start[n3];
4473 +- if (!pde->present)
4474 ++ if (!(*pde & PTE_PRESENT_MASK))
4475 + continue;
4476 + fprintf(stream, "%*spde 0x%-3zx %p "
4477 +- "0x%-12lx 0x%-10lx %u %u\n",
4478 ++ "0x%-12lx 0x%-10llx %u %u\n",
4479 + indent, "", pde - pde_start, pde,
4480 + addr_hva2gpa(vm, pde),
4481 +- (uint64_t) pde->pfn, pde->writable,
4482 +- pde->execute_disable);
4483 ++ PTE_GET_PFN(*pde), !!(*pde & PTE_WRITABLE_MASK),
4484 ++ !!(*pde & PTE_NX_MASK));
4485 +
4486 +- pte_start = addr_gpa2hva(vm, pde->pfn * vm->page_size);
4487 ++ pte_start = addr_gpa2hva(vm, *pde & PHYSICAL_PAGE_MASK);
4488 + for (uint16_t n4 = 0; n4 <= 0x1ffu; n4++) {
4489 + pte = &pte_start[n4];
4490 +- if (!pte->present)
4491 ++ if (!(*pte & PTE_PRESENT_MASK))
4492 + continue;
4493 + fprintf(stream, "%*spte 0x%-3zx %p "
4494 +- "0x%-12lx 0x%-10lx %u %u "
4495 ++ "0x%-12lx 0x%-10llx %u %u "
4496 + " %u 0x%-10lx\n",
4497 + indent, "",
4498 + pte - pte_start, pte,
4499 + addr_hva2gpa(vm, pte),
4500 +- (uint64_t) pte->pfn,
4501 +- pte->writable,
4502 +- pte->execute_disable,
4503 +- pte->dirty,
4504 ++ PTE_GET_PFN(*pte),
4505 ++ !!(*pte & PTE_WRITABLE_MASK),
4506 ++ !!(*pte & PTE_NX_MASK),
4507 ++ !!(*pte & PTE_DIRTY_MASK),
4508 + ((uint64_t) n1 << 27)
4509 + | ((uint64_t) n2 << 18)
4510 + | ((uint64_t) n3 << 9)
4511 +@@ -558,8 +520,8 @@ static void kvm_seg_set_kernel_data_64bit(struct kvm_vm *vm, uint16_t selector,
4512 + vm_paddr_t addr_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva)
4513 + {
4514 + uint16_t index[4];
4515 +- struct pageUpperEntry *pml4e, *pdpe, *pde;
4516 +- struct pageTableEntry *pte;
4517 ++ uint64_t *pml4e, *pdpe, *pde;
4518 ++ uint64_t *pte;
4519 +
4520 + TEST_ASSERT(vm->mode == VM_MODE_PXXV48_4K, "Attempt to use "
4521 + "unknown or unsupported guest mode, mode: 0x%x", vm->mode);
4522 +@@ -572,22 +534,22 @@ vm_paddr_t addr_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva)
4523 + if (!vm->pgd_created)
4524 + goto unmapped_gva;
4525 + pml4e = addr_gpa2hva(vm, vm->pgd);
4526 +- if (!pml4e[index[3]].present)
4527 ++ if (!(pml4e[index[3]] & PTE_PRESENT_MASK))
4528 + goto unmapped_gva;
4529 +
4530 +- pdpe = addr_gpa2hva(vm, pml4e[index[3]].pfn * vm->page_size);
4531 +- if (!pdpe[index[2]].present)
4532 ++ pdpe = addr_gpa2hva(vm, PTE_GET_PFN(pml4e[index[3]]) * vm->page_size);
4533 ++ if (!(pdpe[index[2]] & PTE_PRESENT_MASK))
4534 + goto unmapped_gva;
4535 +
4536 +- pde = addr_gpa2hva(vm, pdpe[index[2]].pfn * vm->page_size);
4537 +- if (!pde[index[1]].present)
4538 ++ pde = addr_gpa2hva(vm, PTE_GET_PFN(pdpe[index[2]]) * vm->page_size);
4539 ++ if (!(pde[index[1]] & PTE_PRESENT_MASK))
4540 + goto unmapped_gva;
4541 +
4542 +- pte = addr_gpa2hva(vm, pde[index[1]].pfn * vm->page_size);
4543 +- if (!pte[index[0]].present)
4544 ++ pte = addr_gpa2hva(vm, PTE_GET_PFN(pde[index[1]]) * vm->page_size);
4545 ++ if (!(pte[index[0]] & PTE_PRESENT_MASK))
4546 + goto unmapped_gva;
4547 +
4548 +- return (pte[index[0]].pfn * vm->page_size) + (gva & 0xfffu);
4549 ++ return (PTE_GET_PFN(pte[index[0]]) * vm->page_size) + (gva & 0xfffu);
4550 +
4551 + unmapped_gva:
4552 + TEST_FAIL("No mapping for vm virtual address, gva: 0x%lx", gva);
4553 +diff --git a/tools/testing/selftests/net/forwarding/mirror_gre_bridge_1q.sh b/tools/testing/selftests/net/forwarding/mirror_gre_bridge_1q.sh
4554 +index a3402cd8d5b68..9ff22f28032dd 100755
4555 +--- a/tools/testing/selftests/net/forwarding/mirror_gre_bridge_1q.sh
4556 ++++ b/tools/testing/selftests/net/forwarding/mirror_gre_bridge_1q.sh
4557 +@@ -61,9 +61,12 @@ setup_prepare()
4558 +
4559 + vrf_prepare
4560 + mirror_gre_topo_create
4561 ++ # Avoid changing br1's PVID while it is operational as a L3 interface.
4562 ++ ip link set dev br1 down
4563 +
4564 + ip link set dev $swp3 master br1
4565 + bridge vlan add dev br1 vid 555 pvid untagged self
4566 ++ ip link set dev br1 up
4567 + ip address add dev br1 192.0.2.129/28
4568 + ip address add dev br1 2001:db8:2::1/64
4569 +
4570 +diff --git a/tools/testing/selftests/net/so_txtime.c b/tools/testing/selftests/net/so_txtime.c
4571 +index 59067f64b7753..2672ac0b6d1f3 100644
4572 +--- a/tools/testing/selftests/net/so_txtime.c
4573 ++++ b/tools/testing/selftests/net/so_txtime.c
4574 +@@ -421,7 +421,7 @@ static void usage(const char *progname)
4575 + "Options:\n"
4576 + " -4 only IPv4\n"
4577 + " -6 only IPv6\n"
4578 +- " -c <clock> monotonic (default) or tai\n"
4579 ++ " -c <clock> monotonic or tai (default)\n"
4580 + " -D <addr> destination IP address (server)\n"
4581 + " -S <addr> source IP address (client)\n"
4582 + " -r run rx mode\n"
4583 +@@ -475,7 +475,7 @@ static void parse_opts(int argc, char **argv)
4584 + cfg_rx = true;
4585 + break;
4586 + case 't':
4587 +- cfg_start_time_ns = strtol(optarg, NULL, 0);
4588 ++ cfg_start_time_ns = strtoll(optarg, NULL, 0);
4589 + break;
4590 + case 'm':
4591 + cfg_mark = strtol(optarg, NULL, 0);
4592 +diff --git a/tools/testing/selftests/seccomp/seccomp_bpf.c b/tools/testing/selftests/seccomp/seccomp_bpf.c
4593 +index 1d64891e64923..34ebd1fe5eed2 100644
4594 +--- a/tools/testing/selftests/seccomp/seccomp_bpf.c
4595 ++++ b/tools/testing/selftests/seccomp/seccomp_bpf.c
4596 +@@ -955,7 +955,7 @@ TEST(ERRNO_valid)
4597 + ASSERT_EQ(0, ret);
4598 +
4599 + EXPECT_EQ(parent, syscall(__NR_getppid));
4600 +- EXPECT_EQ(-1, read(0, NULL, 0));
4601 ++ EXPECT_EQ(-1, read(-1, NULL, 0));
4602 + EXPECT_EQ(E2BIG, errno);
4603 + }
4604 +
4605 +@@ -974,7 +974,7 @@ TEST(ERRNO_zero)
4606 +
4607 + EXPECT_EQ(parent, syscall(__NR_getppid));
4608 + /* "errno" of 0 is ok. */
4609 +- EXPECT_EQ(0, read(0, NULL, 0));
4610 ++ EXPECT_EQ(0, read(-1, NULL, 0));
4611 + }
4612 +
4613 + /*
4614 +@@ -995,7 +995,7 @@ TEST(ERRNO_capped)
4615 + ASSERT_EQ(0, ret);
4616 +
4617 + EXPECT_EQ(parent, syscall(__NR_getppid));
4618 +- EXPECT_EQ(-1, read(0, NULL, 0));
4619 ++ EXPECT_EQ(-1, read(-1, NULL, 0));
4620 + EXPECT_EQ(4095, errno);
4621 + }
4622 +
4623 +@@ -1026,7 +1026,7 @@ TEST(ERRNO_order)
4624 + ASSERT_EQ(0, ret);
4625 +
4626 + EXPECT_EQ(parent, syscall(__NR_getppid));
4627 +- EXPECT_EQ(-1, read(0, NULL, 0));
4628 ++ EXPECT_EQ(-1, read(-1, NULL, 0));
4629 + EXPECT_EQ(12, errno);
4630 + }
4631 +
4632 +@@ -2579,7 +2579,7 @@ void *tsync_sibling(void *data)
4633 + ret = prctl(PR_GET_NO_NEW_PRIVS, 0, 0, 0, 0);
4634 + if (!ret)
4635 + return (void *)SIBLING_EXIT_NEWPRIVS;
4636 +- read(0, NULL, 0);
4637 ++ read(-1, NULL, 0);
4638 + return (void *)SIBLING_EXIT_UNKILLED;
4639 + }
4640 +
4641 +diff --git a/tools/testing/selftests/vm/mremap_test.c b/tools/testing/selftests/vm/mremap_test.c
4642 +index e3ce33a9954ea..8f4dbbd60c09c 100644
4643 +--- a/tools/testing/selftests/vm/mremap_test.c
4644 ++++ b/tools/testing/selftests/vm/mremap_test.c
4645 +@@ -119,6 +119,59 @@ static unsigned long long get_mmap_min_addr(void)
4646 + return addr;
4647 + }
4648 +
4649 ++/*
4650 ++ * Returns false if the requested remap region overlaps with an
4651 ++ * existing mapping (e.g text, stack) else returns true.
4652 ++ */
4653 ++static bool is_remap_region_valid(void *addr, unsigned long long size)
4654 ++{
4655 ++ void *remap_addr = NULL;
4656 ++ bool ret = true;
4657 ++
4658 ++ /* Use MAP_FIXED_NOREPLACE flag to ensure region is not mapped */
4659 ++ remap_addr = mmap(addr, size, PROT_READ | PROT_WRITE,
4660 ++ MAP_FIXED_NOREPLACE | MAP_ANONYMOUS | MAP_SHARED,
4661 ++ -1, 0);
4662 ++
4663 ++ if (remap_addr == MAP_FAILED) {
4664 ++ if (errno == EEXIST)
4665 ++ ret = false;
4666 ++ } else {
4667 ++ munmap(remap_addr, size);
4668 ++ }
4669 ++
4670 ++ return ret;
4671 ++}
4672 ++
4673 ++/* Returns mmap_min_addr sysctl tunable from procfs */
4674 ++static unsigned long long get_mmap_min_addr(void)
4675 ++{
4676 ++ FILE *fp;
4677 ++ int n_matched;
4678 ++ static unsigned long long addr;
4679 ++
4680 ++ if (addr)
4681 ++ return addr;
4682 ++
4683 ++ fp = fopen("/proc/sys/vm/mmap_min_addr", "r");
4684 ++ if (fp == NULL) {
4685 ++ ksft_print_msg("Failed to open /proc/sys/vm/mmap_min_addr: %s\n",
4686 ++ strerror(errno));
4687 ++ exit(KSFT_SKIP);
4688 ++ }
4689 ++
4690 ++ n_matched = fscanf(fp, "%llu", &addr);
4691 ++ if (n_matched != 1) {
4692 ++ ksft_print_msg("Failed to read /proc/sys/vm/mmap_min_addr: %s\n",
4693 ++ strerror(errno));
4694 ++ fclose(fp);
4695 ++ exit(KSFT_SKIP);
4696 ++ }
4697 ++
4698 ++ fclose(fp);
4699 ++ return addr;
4700 ++}
4701 ++
4702 + /*
4703 + * Returns the start address of the mapping on success, else returns
4704 + * NULL on failure.