Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:6.1 commit in: /
Date: Fri, 17 Mar 2023 10:43:32
Message-Id: 1679049795.37741a37bb8ed70e4d9d33faf73da22b41dbf0d1.mpagano@gentoo
1 commit: 37741a37bb8ed70e4d9d33faf73da22b41dbf0d1
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Fri Mar 17 10:43:15 2023 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Fri Mar 17 10:43:15 2023 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=37741a37
7
8 Linux patch 6.1.20
9
10 Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
11
12 0000_README | 4 +
13 1019_linux-6.1.20.patch | 6280 +++++++++++++++++++++++++++++++++++++++++++++++
14 2 files changed, 6284 insertions(+)
15
16 diff --git a/0000_README b/0000_README
17 index 3728b6a9..4b2b0a69 100644
18 --- a/0000_README
19 +++ b/0000_README
20 @@ -119,6 +119,10 @@ Patch: 1018_linux-6.1.19.patch
21 From: https://www.kernel.org
22 Desc: Linux 6.1.19
23
24 +Patch: 1019_linux-6.1.20.patch
25 +From: https://www.kernel.org
26 +Desc: Linux 6.1.20
27 +
28 Patch: 1500_XATTR_USER_PREFIX.patch
29 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
30 Desc: Support for namespace user.pax.* on tmpfs.
31
32 diff --git a/1019_linux-6.1.20.patch b/1019_linux-6.1.20.patch
33 new file mode 100644
34 index 00000000..66126f8f
35 --- /dev/null
36 +++ b/1019_linux-6.1.20.patch
37 @@ -0,0 +1,6280 @@
38 +diff --git a/Makefile b/Makefile
39 +index ea18c4c20738c..a842ec6d19325 100644
40 +--- a/Makefile
41 ++++ b/Makefile
42 +@@ -1,7 +1,7 @@
43 + # SPDX-License-Identifier: GPL-2.0
44 + VERSION = 6
45 + PATCHLEVEL = 1
46 +-SUBLEVEL = 19
47 ++SUBLEVEL = 20
48 + EXTRAVERSION =
49 + NAME = Hurr durr I'ma ninja sloth
50 +
51 +diff --git a/arch/alpha/kernel/module.c b/arch/alpha/kernel/module.c
52 +index 5b60c248de9ea..cbefa5a773846 100644
53 +--- a/arch/alpha/kernel/module.c
54 ++++ b/arch/alpha/kernel/module.c
55 +@@ -146,10 +146,8 @@ apply_relocate_add(Elf64_Shdr *sechdrs, const char *strtab,
56 + base = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr;
57 + symtab = (Elf64_Sym *)sechdrs[symindex].sh_addr;
58 +
59 +- /* The small sections were sorted to the end of the segment.
60 +- The following should definitely cover them. */
61 +- gp = (u64)me->core_layout.base + me->core_layout.size - 0x8000;
62 + got = sechdrs[me->arch.gotsecindex].sh_addr;
63 ++ gp = got + 0x8000;
64 +
65 + for (i = 0; i < n; i++) {
66 + unsigned long r_sym = ELF64_R_SYM (rela[i].r_info);
67 +diff --git a/arch/m68k/kernel/setup_mm.c b/arch/m68k/kernel/setup_mm.c
68 +index 3a2bb2e8fdad4..fbff1cea62caa 100644
69 +--- a/arch/m68k/kernel/setup_mm.c
70 ++++ b/arch/m68k/kernel/setup_mm.c
71 +@@ -326,16 +326,16 @@ void __init setup_arch(char **cmdline_p)
72 + panic("No configuration setup");
73 + }
74 +
75 +-#ifdef CONFIG_BLK_DEV_INITRD
76 +- if (m68k_ramdisk.size) {
77 ++ if (IS_ENABLED(CONFIG_BLK_DEV_INITRD) && m68k_ramdisk.size)
78 + memblock_reserve(m68k_ramdisk.addr, m68k_ramdisk.size);
79 ++
80 ++ paging_init();
81 ++
82 ++ if (IS_ENABLED(CONFIG_BLK_DEV_INITRD) && m68k_ramdisk.size) {
83 + initrd_start = (unsigned long)phys_to_virt(m68k_ramdisk.addr);
84 + initrd_end = initrd_start + m68k_ramdisk.size;
85 + pr_info("initrd: %08lx - %08lx\n", initrd_start, initrd_end);
86 + }
87 +-#endif
88 +-
89 +- paging_init();
90 +
91 + #ifdef CONFIG_NATFEAT
92 + nf_init();
93 +diff --git a/arch/mips/include/asm/mach-rc32434/pci.h b/arch/mips/include/asm/mach-rc32434/pci.h
94 +index 9a6eefd127571..3eb767c8a4eec 100644
95 +--- a/arch/mips/include/asm/mach-rc32434/pci.h
96 ++++ b/arch/mips/include/asm/mach-rc32434/pci.h
97 +@@ -374,7 +374,7 @@ struct pci_msu {
98 + PCI_CFG04_STAT_SSE | \
99 + PCI_CFG04_STAT_PE)
100 +
101 +-#define KORINA_CNFG1 ((KORINA_STAT<<16)|KORINA_CMD)
102 ++#define KORINA_CNFG1 (KORINA_STAT | KORINA_CMD)
103 +
104 + #define KORINA_REVID 0
105 + #define KORINA_CLASS_CODE 0
106 +diff --git a/arch/powerpc/boot/dts/fsl/t1040rdb-rev-a.dts b/arch/powerpc/boot/dts/fsl/t1040rdb-rev-a.dts
107 +index 73f8c998c64df..d4f5f159d6f23 100644
108 +--- a/arch/powerpc/boot/dts/fsl/t1040rdb-rev-a.dts
109 ++++ b/arch/powerpc/boot/dts/fsl/t1040rdb-rev-a.dts
110 +@@ -10,7 +10,6 @@
111 +
112 + / {
113 + model = "fsl,T1040RDB-REV-A";
114 +- compatible = "fsl,T1040RDB-REV-A";
115 + };
116 +
117 + &seville_port0 {
118 +diff --git a/arch/powerpc/include/asm/hw_irq.h b/arch/powerpc/include/asm/hw_irq.h
119 +index eb6d094083fd6..317659fdeacf2 100644
120 +--- a/arch/powerpc/include/asm/hw_irq.h
121 ++++ b/arch/powerpc/include/asm/hw_irq.h
122 +@@ -36,15 +36,17 @@
123 + #define PACA_IRQ_DEC 0x08 /* Or FIT */
124 + #define PACA_IRQ_HMI 0x10
125 + #define PACA_IRQ_PMI 0x20
126 ++#define PACA_IRQ_REPLAYING 0x40
127 +
128 + /*
129 + * Some soft-masked interrupts must be hard masked until they are replayed
130 + * (e.g., because the soft-masked handler does not clear the exception).
131 ++ * Interrupt replay itself must remain hard masked too.
132 + */
133 + #ifdef CONFIG_PPC_BOOK3S
134 +-#define PACA_IRQ_MUST_HARD_MASK (PACA_IRQ_EE|PACA_IRQ_PMI)
135 ++#define PACA_IRQ_MUST_HARD_MASK (PACA_IRQ_EE|PACA_IRQ_PMI|PACA_IRQ_REPLAYING)
136 + #else
137 +-#define PACA_IRQ_MUST_HARD_MASK (PACA_IRQ_EE)
138 ++#define PACA_IRQ_MUST_HARD_MASK (PACA_IRQ_EE|PACA_IRQ_REPLAYING)
139 + #endif
140 +
141 + #endif /* CONFIG_PPC64 */
142 +diff --git a/arch/powerpc/include/asm/paca.h b/arch/powerpc/include/asm/paca.h
143 +index 09f1790d0ae16..0ab3511a47d77 100644
144 +--- a/arch/powerpc/include/asm/paca.h
145 ++++ b/arch/powerpc/include/asm/paca.h
146 +@@ -295,7 +295,6 @@ extern void free_unused_pacas(void);
147 +
148 + #else /* CONFIG_PPC64 */
149 +
150 +-static inline void allocate_paca_ptrs(void) { }
151 + static inline void allocate_paca(int cpu) { }
152 + static inline void free_unused_pacas(void) { }
153 +
154 +diff --git a/arch/powerpc/include/asm/smp.h b/arch/powerpc/include/asm/smp.h
155 +index f63505d74932b..6c6cb53d70458 100644
156 +--- a/arch/powerpc/include/asm/smp.h
157 ++++ b/arch/powerpc/include/asm/smp.h
158 +@@ -26,6 +26,7 @@
159 + #include <asm/percpu.h>
160 +
161 + extern int boot_cpuid;
162 ++extern int boot_cpu_hwid; /* PPC64 only */
163 + extern int spinning_secondaries;
164 + extern u32 *cpu_to_phys_id;
165 + extern bool coregroup_enabled;
166 +diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c
167 +index caebe1431596e..ee95937bdaf14 100644
168 +--- a/arch/powerpc/kernel/iommu.c
169 ++++ b/arch/powerpc/kernel/iommu.c
170 +@@ -67,11 +67,9 @@ static void iommu_debugfs_add(struct iommu_table *tbl)
171 + static void iommu_debugfs_del(struct iommu_table *tbl)
172 + {
173 + char name[10];
174 +- struct dentry *liobn_entry;
175 +
176 + sprintf(name, "%08lx", tbl->it_index);
177 +- liobn_entry = debugfs_lookup(name, iommu_debugfs_dir);
178 +- debugfs_remove(liobn_entry);
179 ++ debugfs_lookup_and_remove(name, iommu_debugfs_dir);
180 + }
181 + #else
182 + static void iommu_debugfs_add(struct iommu_table *tbl){}
183 +diff --git a/arch/powerpc/kernel/irq_64.c b/arch/powerpc/kernel/irq_64.c
184 +index eb2b380e52a0d..9dc0ad3c533a8 100644
185 +--- a/arch/powerpc/kernel/irq_64.c
186 ++++ b/arch/powerpc/kernel/irq_64.c
187 +@@ -70,22 +70,19 @@ int distribute_irqs = 1;
188 +
189 + static inline void next_interrupt(struct pt_regs *regs)
190 + {
191 +- /*
192 +- * Softirq processing can enable/disable irqs, which will leave
193 +- * MSR[EE] enabled and the soft mask set to IRQS_DISABLED. Fix
194 +- * this up.
195 +- */
196 +- if (!(local_paca->irq_happened & PACA_IRQ_HARD_DIS))
197 +- hard_irq_disable();
198 +- else
199 +- irq_soft_mask_set(IRQS_ALL_DISABLED);
200 ++ if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG)) {
201 ++ WARN_ON(!(local_paca->irq_happened & PACA_IRQ_HARD_DIS));
202 ++ WARN_ON(irq_soft_mask_return() != IRQS_ALL_DISABLED);
203 ++ }
204 +
205 + /*
206 + * We are responding to the next interrupt, so interrupt-off
207 + * latencies should be reset here.
208 + */
209 ++ lockdep_hardirq_exit();
210 + trace_hardirqs_on();
211 + trace_hardirqs_off();
212 ++ lockdep_hardirq_enter();
213 + }
214 +
215 + static inline bool irq_happened_test_and_clear(u8 irq)
216 +@@ -97,22 +94,11 @@ static inline bool irq_happened_test_and_clear(u8 irq)
217 + return false;
218 + }
219 +
220 +-void replay_soft_interrupts(void)
221 ++static void __replay_soft_interrupts(void)
222 + {
223 + struct pt_regs regs;
224 +
225 + /*
226 +- * Be careful here, calling these interrupt handlers can cause
227 +- * softirqs to be raised, which they may run when calling irq_exit,
228 +- * which will cause local_irq_enable() to be run, which can then
229 +- * recurse into this function. Don't keep any state across
230 +- * interrupt handler calls which may change underneath us.
231 +- *
232 +- * Softirqs can not be disabled over replay to stop this recursion
233 +- * because interrupts taken in idle code may require RCU softirq
234 +- * to run in the irq RCU tracking context. This is a hard problem
235 +- * to fix without changes to the softirq or idle layer.
236 +- *
237 + * We use local_paca rather than get_paca() to avoid all the
238 + * debug_smp_processor_id() business in this low level function.
239 + */
240 +@@ -120,13 +106,20 @@ void replay_soft_interrupts(void)
241 + if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG)) {
242 + WARN_ON_ONCE(mfmsr() & MSR_EE);
243 + WARN_ON(!(local_paca->irq_happened & PACA_IRQ_HARD_DIS));
244 ++ WARN_ON(local_paca->irq_happened & PACA_IRQ_REPLAYING);
245 + }
246 +
247 ++ /*
248 ++ * PACA_IRQ_REPLAYING prevents interrupt handlers from enabling
249 ++ * MSR[EE] to get PMIs, which can result in more IRQs becoming
250 ++ * pending.
251 ++ */
252 ++ local_paca->irq_happened |= PACA_IRQ_REPLAYING;
253 ++
254 + ppc_save_regs(&regs);
255 + regs.softe = IRQS_ENABLED;
256 + regs.msr |= MSR_EE;
257 +
258 +-again:
259 + /*
260 + * Force the delivery of pending soft-disabled interrupts on PS3.
261 + * Any HV call will have this side effect.
262 +@@ -175,13 +168,14 @@ again:
263 + next_interrupt(&regs);
264 + }
265 +
266 +- /*
267 +- * Softirq processing can enable and disable interrupts, which can
268 +- * result in new irqs becoming pending. Must keep looping until we
269 +- * have cleared out all pending interrupts.
270 +- */
271 +- if (local_paca->irq_happened & ~PACA_IRQ_HARD_DIS)
272 +- goto again;
273 ++ local_paca->irq_happened &= ~PACA_IRQ_REPLAYING;
274 ++}
275 ++
276 ++void replay_soft_interrupts(void)
277 ++{
278 ++ irq_enter(); /* See comment in arch_local_irq_restore */
279 ++ __replay_soft_interrupts();
280 ++ irq_exit();
281 + }
282 +
283 + #if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_PPC_KUAP)
284 +@@ -200,13 +194,13 @@ static inline void replay_soft_interrupts_irqrestore(void)
285 + if (kuap_state != AMR_KUAP_BLOCKED)
286 + set_kuap(AMR_KUAP_BLOCKED);
287 +
288 +- replay_soft_interrupts();
289 ++ __replay_soft_interrupts();
290 +
291 + if (kuap_state != AMR_KUAP_BLOCKED)
292 + set_kuap(kuap_state);
293 + }
294 + #else
295 +-#define replay_soft_interrupts_irqrestore() replay_soft_interrupts()
296 ++#define replay_soft_interrupts_irqrestore() __replay_soft_interrupts()
297 + #endif
298 +
299 + notrace void arch_local_irq_restore(unsigned long mask)
300 +@@ -219,9 +213,13 @@ notrace void arch_local_irq_restore(unsigned long mask)
301 + return;
302 + }
303 +
304 +- if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG))
305 +- WARN_ON_ONCE(in_nmi() || in_hardirq());
306 ++ if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG)) {
307 ++ WARN_ON_ONCE(in_nmi());
308 ++ WARN_ON_ONCE(in_hardirq());
309 ++ WARN_ON_ONCE(local_paca->irq_happened & PACA_IRQ_REPLAYING);
310 ++ }
311 +
312 ++again:
313 + /*
314 + * After the stb, interrupts are unmasked and there are no interrupts
315 + * pending replay. The restart sequence makes this atomic with
316 +@@ -248,6 +246,12 @@ notrace void arch_local_irq_restore(unsigned long mask)
317 + if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG))
318 + WARN_ON_ONCE(!(mfmsr() & MSR_EE));
319 +
320 ++ /*
321 ++ * If we came here from the replay below, we might have a preempt
322 ++ * pending (due to preempt_enable_no_resched()). Have to check now.
323 ++ */
324 ++ preempt_check_resched();
325 ++
326 + return;
327 +
328 + happened:
329 +@@ -261,6 +265,7 @@ happened:
330 + irq_soft_mask_set(IRQS_ENABLED);
331 + local_paca->irq_happened = 0;
332 + __hard_irq_enable();
333 ++ preempt_check_resched();
334 + return;
335 + }
336 +
337 +@@ -296,12 +301,38 @@ happened:
338 + irq_soft_mask_set(IRQS_ALL_DISABLED);
339 + trace_hardirqs_off();
340 +
341 ++ /*
342 ++ * Now enter interrupt context. The interrupt handlers themselves
343 ++ * also call irq_enter/exit (which is okay, they can nest). But call
344 ++ * it here now to hold off softirqs until the below irq_exit(). If
345 ++ * we allowed replayed handlers to run softirqs, that enables irqs,
346 ++ * which must replay interrupts, which recurses in here and makes
347 ++ * things more complicated. The recursion is limited to 2, and it can
348 ++ * be made to work, but it's complicated.
349 ++ *
350 ++ * local_bh_disable can not be used here because interrupts taken in
351 ++ * idle are not in the right context (RCU, tick, etc) to run softirqs
352 ++ * so irq_enter must be called.
353 ++ */
354 ++ irq_enter();
355 ++
356 + replay_soft_interrupts_irqrestore();
357 +
358 ++ irq_exit();
359 ++
360 ++ if (unlikely(local_paca->irq_happened != PACA_IRQ_HARD_DIS)) {
361 ++ /*
362 ++ * The softirq processing in irq_exit() may enable interrupts
363 ++ * temporarily, which can result in MSR[EE] being enabled and
364 ++ * more irqs becoming pending. Go around again if that happens.
365 ++ */
366 ++ trace_hardirqs_on();
367 ++ preempt_enable_no_resched();
368 ++ goto again;
369 ++ }
370 ++
371 + trace_hardirqs_on();
372 + irq_soft_mask_set(IRQS_ENABLED);
373 +- if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG))
374 +- WARN_ON(local_paca->irq_happened != PACA_IRQ_HARD_DIS);
375 + local_paca->irq_happened = 0;
376 + __hard_irq_enable();
377 + preempt_enable();
378 +diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c
379 +index 1eed87d954ba8..8537c354c560b 100644
380 +--- a/arch/powerpc/kernel/prom.c
381 ++++ b/arch/powerpc/kernel/prom.c
382 +@@ -366,8 +366,8 @@ static int __init early_init_dt_scan_cpus(unsigned long node,
383 + be32_to_cpu(intserv[found_thread]));
384 + boot_cpuid = found;
385 +
386 +- // Pass the boot CPU's hard CPU id back to our caller
387 +- *((u32 *)data) = be32_to_cpu(intserv[found_thread]);
388 ++ if (IS_ENABLED(CONFIG_PPC64))
389 ++ boot_cpu_hwid = be32_to_cpu(intserv[found_thread]);
390 +
391 + /*
392 + * PAPR defines "logical" PVR values for cpus that
393 +@@ -751,7 +751,6 @@ static inline void save_fscr_to_task(void) {}
394 +
395 + void __init early_init_devtree(void *params)
396 + {
397 +- u32 boot_cpu_hwid;
398 + phys_addr_t limit;
399 +
400 + DBG(" -> early_init_devtree(%px)\n", params);
401 +@@ -847,7 +846,7 @@ void __init early_init_devtree(void *params)
402 + /* Retrieve CPU related informations from the flat tree
403 + * (altivec support, boot CPU ID, ...)
404 + */
405 +- of_scan_flat_dt(early_init_dt_scan_cpus, &boot_cpu_hwid);
406 ++ of_scan_flat_dt(early_init_dt_scan_cpus, NULL);
407 + if (boot_cpuid < 0) {
408 + printk("Failed to identify boot CPU !\n");
409 + BUG();
410 +@@ -864,11 +863,6 @@ void __init early_init_devtree(void *params)
411 +
412 + mmu_early_init_devtree();
413 +
414 +- // NB. paca is not installed until later in early_setup()
415 +- allocate_paca_ptrs();
416 +- allocate_paca(boot_cpuid);
417 +- set_hard_smp_processor_id(boot_cpuid, boot_cpu_hwid);
418 +-
419 + #ifdef CONFIG_PPC_POWERNV
420 + /* Scan and build the list of machine check recoverable ranges */
421 + of_scan_flat_dt(early_init_dt_scan_recoverable_ranges, NULL);
422 +diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c
423 +index 6d041993a45dc..efb301a4987ca 100644
424 +--- a/arch/powerpc/kernel/setup-common.c
425 ++++ b/arch/powerpc/kernel/setup-common.c
426 +@@ -86,6 +86,10 @@ EXPORT_SYMBOL(machine_id);
427 + int boot_cpuid = -1;
428 + EXPORT_SYMBOL_GPL(boot_cpuid);
429 +
430 ++#ifdef CONFIG_PPC64
431 ++int boot_cpu_hwid = -1;
432 ++#endif
433 ++
434 + /*
435 + * These are used in binfmt_elf.c to put aux entries on the stack
436 + * for each elf executable being started.
437 +diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
438 +index a0dee7354fe6b..b2e0d3ce4261c 100644
439 +--- a/arch/powerpc/kernel/setup_64.c
440 ++++ b/arch/powerpc/kernel/setup_64.c
441 +@@ -385,17 +385,21 @@ void __init early_setup(unsigned long dt_ptr)
442 + /*
443 + * Do early initialization using the flattened device
444 + * tree, such as retrieving the physical memory map or
445 +- * calculating/retrieving the hash table size.
446 ++ * calculating/retrieving the hash table size, discover
447 ++ * boot_cpuid and boot_cpu_hwid.
448 + */
449 + early_init_devtree(__va(dt_ptr));
450 +
451 +- /* Now we know the logical id of our boot cpu, setup the paca. */
452 +- if (boot_cpuid != 0) {
453 +- /* Poison paca_ptrs[0] again if it's not the boot cpu */
454 +- memset(&paca_ptrs[0], 0x88, sizeof(paca_ptrs[0]));
455 +- }
456 ++ allocate_paca_ptrs();
457 ++ allocate_paca(boot_cpuid);
458 ++ set_hard_smp_processor_id(boot_cpuid, boot_cpu_hwid);
459 + fixup_boot_paca(paca_ptrs[boot_cpuid]);
460 + setup_paca(paca_ptrs[boot_cpuid]); /* install the paca into registers */
461 ++ // smp_processor_id() now reports boot_cpuid
462 ++
463 ++#ifdef CONFIG_SMP
464 ++ task_thread_info(current)->cpu = boot_cpuid; // fix task_cpu(current)
465 ++#endif
466 +
467 + /*
468 + * Configure exception handlers. This include setting up trampolines
469 +diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
470 +index f157552d79b38..285159e65a3ba 100644
471 +--- a/arch/powerpc/kernel/time.c
472 ++++ b/arch/powerpc/kernel/time.c
473 +@@ -374,7 +374,7 @@ void vtime_flush(struct task_struct *tsk)
474 + #define calc_cputime_factors()
475 + #endif
476 +
477 +-void __delay(unsigned long loops)
478 ++void __no_kcsan __delay(unsigned long loops)
479 + {
480 + unsigned long start;
481 +
482 +@@ -395,7 +395,7 @@ void __delay(unsigned long loops)
483 + }
484 + EXPORT_SYMBOL(__delay);
485 +
486 +-void udelay(unsigned long usecs)
487 ++void __no_kcsan udelay(unsigned long usecs)
488 + {
489 + __delay(tb_ticks_per_usec * usecs);
490 + }
491 +diff --git a/arch/powerpc/net/bpf_jit_comp32.c b/arch/powerpc/net/bpf_jit_comp32.c
492 +index a379b0ce19ffa..8643b2c8b76ef 100644
493 +--- a/arch/powerpc/net/bpf_jit_comp32.c
494 ++++ b/arch/powerpc/net/bpf_jit_comp32.c
495 +@@ -79,6 +79,20 @@ static int bpf_jit_stack_offsetof(struct codegen_context *ctx, int reg)
496 + #define SEEN_NVREG_FULL_MASK 0x0003ffff /* Non volatile registers r14-r31 */
497 + #define SEEN_NVREG_TEMP_MASK 0x00001e01 /* BPF_REG_5, BPF_REG_AX, TMP_REG */
498 +
499 ++static inline bool bpf_has_stack_frame(struct codegen_context *ctx)
500 ++{
501 ++ /*
502 ++ * We only need a stack frame if:
503 ++ * - we call other functions (kernel helpers), or
504 ++ * - we use non volatile registers, or
505 ++ * - we use tail call counter
506 ++ * - the bpf program uses its stack area
507 ++ * The latter condition is deduced from the usage of BPF_REG_FP
508 ++ */
509 ++ return ctx->seen & (SEEN_FUNC | SEEN_TAILCALL | SEEN_NVREG_FULL_MASK) ||
510 ++ bpf_is_seen_register(ctx, bpf_to_ppc(BPF_REG_FP));
511 ++}
512 ++
513 + void bpf_jit_realloc_regs(struct codegen_context *ctx)
514 + {
515 + unsigned int nvreg_mask;
516 +@@ -118,7 +132,8 @@ void bpf_jit_build_prologue(u32 *image, struct codegen_context *ctx)
517 +
518 + #define BPF_TAILCALL_PROLOGUE_SIZE 4
519 +
520 +- EMIT(PPC_RAW_STWU(_R1, _R1, -BPF_PPC_STACKFRAME(ctx)));
521 ++ if (bpf_has_stack_frame(ctx))
522 ++ EMIT(PPC_RAW_STWU(_R1, _R1, -BPF_PPC_STACKFRAME(ctx)));
523 +
524 + if (ctx->seen & SEEN_TAILCALL)
525 + EMIT(PPC_RAW_STW(_R4, _R1, bpf_jit_stack_offsetof(ctx, BPF_PPC_TC)));
526 +@@ -171,7 +186,8 @@ static void bpf_jit_emit_common_epilogue(u32 *image, struct codegen_context *ctx
527 + EMIT(PPC_RAW_LWZ(_R0, _R1, BPF_PPC_STACKFRAME(ctx) + PPC_LR_STKOFF));
528 +
529 + /* Tear down our stack frame */
530 +- EMIT(PPC_RAW_ADDI(_R1, _R1, BPF_PPC_STACKFRAME(ctx)));
531 ++ if (bpf_has_stack_frame(ctx))
532 ++ EMIT(PPC_RAW_ADDI(_R1, _R1, BPF_PPC_STACKFRAME(ctx)));
533 +
534 + if (ctx->seen & SEEN_FUNC)
535 + EMIT(PPC_RAW_MTLR(_R0));
536 +diff --git a/arch/riscv/Makefile b/arch/riscv/Makefile
537 +index ba8050e63acfb..8b4ddccea2795 100644
538 +--- a/arch/riscv/Makefile
539 ++++ b/arch/riscv/Makefile
540 +@@ -87,6 +87,13 @@ endif
541 + # Avoid generating .eh_frame sections.
542 + KBUILD_CFLAGS += -fno-asynchronous-unwind-tables -fno-unwind-tables
543 +
544 ++# The RISC-V attributes frequently cause compatibility issues and provide no
545 ++# information, so just turn them off.
546 ++KBUILD_CFLAGS += $(call cc-option,-mno-riscv-attribute)
547 ++KBUILD_AFLAGS += $(call cc-option,-mno-riscv-attribute)
548 ++KBUILD_CFLAGS += $(call as-option,-Wa$(comma)-mno-arch-attr)
549 ++KBUILD_AFLAGS += $(call as-option,-Wa$(comma)-mno-arch-attr)
550 ++
551 + KBUILD_CFLAGS_MODULE += $(call cc-option,-mno-relax)
552 + KBUILD_AFLAGS_MODULE += $(call as-option,-Wa$(comma)-mno-relax)
553 +
554 +diff --git a/arch/riscv/include/asm/ftrace.h b/arch/riscv/include/asm/ftrace.h
555 +index 9e73922e1e2e5..d47d87c2d7e3d 100644
556 +--- a/arch/riscv/include/asm/ftrace.h
557 ++++ b/arch/riscv/include/asm/ftrace.h
558 +@@ -109,6 +109,6 @@ int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec);
559 + #define ftrace_init_nop ftrace_init_nop
560 + #endif
561 +
562 +-#endif
563 ++#endif /* CONFIG_DYNAMIC_FTRACE */
564 +
565 + #endif /* _ASM_RISCV_FTRACE_H */
566 +diff --git a/arch/riscv/include/asm/parse_asm.h b/arch/riscv/include/asm/parse_asm.h
567 +index f36368de839f5..3cd00332d70f5 100644
568 +--- a/arch/riscv/include/asm/parse_asm.h
569 ++++ b/arch/riscv/include/asm/parse_asm.h
570 +@@ -3,6 +3,9 @@
571 + * Copyright (C) 2020 SiFive
572 + */
573 +
574 ++#ifndef _ASM_RISCV_INSN_H
575 ++#define _ASM_RISCV_INSN_H
576 ++
577 + #include <linux/bits.h>
578 +
579 + /* The bit field of immediate value in I-type instruction */
580 +@@ -217,3 +220,5 @@ static inline bool is_ ## INSN_NAME ## _insn(long insn) \
581 + (RVC_X(x_, RVC_B_IMM_5_OPOFF, RVC_B_IMM_5_MASK) << RVC_B_IMM_5_OFF) | \
582 + (RVC_X(x_, RVC_B_IMM_7_6_OPOFF, RVC_B_IMM_7_6_MASK) << RVC_B_IMM_7_6_OFF) | \
583 + (RVC_IMM_SIGN(x_) << RVC_B_IMM_SIGN_OFF); })
584 ++
585 ++#endif /* _ASM_RISCV_INSN_H */
586 +diff --git a/arch/riscv/include/asm/patch.h b/arch/riscv/include/asm/patch.h
587 +index 9a7d7346001ee..98d9de07cba17 100644
588 +--- a/arch/riscv/include/asm/patch.h
589 ++++ b/arch/riscv/include/asm/patch.h
590 +@@ -9,4 +9,6 @@
591 + int patch_text_nosync(void *addr, const void *insns, size_t len);
592 + int patch_text(void *addr, u32 insn);
593 +
594 ++extern int riscv_patch_in_stop_machine;
595 ++
596 + #endif /* _ASM_RISCV_PATCH_H */
597 +diff --git a/arch/riscv/kernel/compat_vdso/Makefile b/arch/riscv/kernel/compat_vdso/Makefile
598 +index 260daf3236d3a..7f34f3c7c8827 100644
599 +--- a/arch/riscv/kernel/compat_vdso/Makefile
600 ++++ b/arch/riscv/kernel/compat_vdso/Makefile
601 +@@ -14,6 +14,10 @@ COMPAT_LD := $(LD)
602 + COMPAT_CC_FLAGS := -march=rv32g -mabi=ilp32
603 + COMPAT_LD_FLAGS := -melf32lriscv
604 +
605 ++# Disable attributes, as they're useless and break the build.
606 ++COMPAT_CC_FLAGS += $(call cc-option,-mno-riscv-attribute)
607 ++COMPAT_CC_FLAGS += $(call as-option,-Wa$(comma)-mno-arch-attr)
608 ++
609 + # Files to link into the compat_vdso
610 + obj-compat_vdso = $(patsubst %, %.o, $(compat_vdso-syms)) note.o
611 +
612 +diff --git a/arch/riscv/kernel/ftrace.c b/arch/riscv/kernel/ftrace.c
613 +index 5bff37af4770b..03a6434a8cdd0 100644
614 +--- a/arch/riscv/kernel/ftrace.c
615 ++++ b/arch/riscv/kernel/ftrace.c
616 +@@ -15,10 +15,19 @@
617 + void ftrace_arch_code_modify_prepare(void) __acquires(&text_mutex)
618 + {
619 + mutex_lock(&text_mutex);
620 ++
621 ++ /*
622 ++ * The code sequences we use for ftrace can't be patched while the
623 ++ * kernel is running, so we need to use stop_machine() to modify them
624 ++ * for now. This doesn't play nice with text_mutex, we use this flag
625 ++ * to elide the check.
626 ++ */
627 ++ riscv_patch_in_stop_machine = true;
628 + }
629 +
630 + void ftrace_arch_code_modify_post_process(void) __releases(&text_mutex)
631 + {
632 ++ riscv_patch_in_stop_machine = false;
633 + mutex_unlock(&text_mutex);
634 + }
635 +
636 +@@ -107,9 +116,9 @@ int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec)
637 + {
638 + int out;
639 +
640 +- ftrace_arch_code_modify_prepare();
641 ++ mutex_lock(&text_mutex);
642 + out = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
643 +- ftrace_arch_code_modify_post_process();
644 ++ mutex_unlock(&text_mutex);
645 +
646 + return out;
647 + }
648 +diff --git a/arch/riscv/kernel/patch.c b/arch/riscv/kernel/patch.c
649 +index 765004b605132..e099961453cca 100644
650 +--- a/arch/riscv/kernel/patch.c
651 ++++ b/arch/riscv/kernel/patch.c
652 +@@ -11,6 +11,7 @@
653 + #include <asm/kprobes.h>
654 + #include <asm/cacheflush.h>
655 + #include <asm/fixmap.h>
656 ++#include <asm/ftrace.h>
657 + #include <asm/patch.h>
658 +
659 + struct patch_insn {
660 +@@ -19,6 +20,8 @@ struct patch_insn {
661 + atomic_t cpu_count;
662 + };
663 +
664 ++int riscv_patch_in_stop_machine = false;
665 ++
666 + #ifdef CONFIG_MMU
667 + /*
668 + * The fix_to_virt(, idx) needs a const value (not a dynamic variable of
669 +@@ -59,8 +62,15 @@ static int patch_insn_write(void *addr, const void *insn, size_t len)
670 + * Before reaching here, it was expected to lock the text_mutex
671 + * already, so we don't need to give another lock here and could
672 + * ensure that it was safe between each cores.
673 ++ *
674 ++ * We're currently using stop_machine() for ftrace & kprobes, and while
675 ++ * that ensures text_mutex is held before installing the mappings it
676 ++ * does not ensure text_mutex is held by the calling thread. That's
677 ++ * safe but triggers a lockdep failure, so just elide it for that
678 ++ * specific case.
679 + */
680 +- lockdep_assert_held(&text_mutex);
681 ++ if (!riscv_patch_in_stop_machine)
682 ++ lockdep_assert_held(&text_mutex);
683 +
684 + if (across_pages)
685 + patch_map(addr + len, FIX_TEXT_POKE1);
686 +@@ -121,13 +131,25 @@ NOKPROBE_SYMBOL(patch_text_cb);
687 +
688 + int patch_text(void *addr, u32 insn)
689 + {
690 ++ int ret;
691 + struct patch_insn patch = {
692 + .addr = addr,
693 + .insn = insn,
694 + .cpu_count = ATOMIC_INIT(0),
695 + };
696 +
697 +- return stop_machine_cpuslocked(patch_text_cb,
698 +- &patch, cpu_online_mask);
699 ++ /*
700 ++ * kprobes takes text_mutex, before calling patch_text(), but as we call
701 ++ * calls stop_machine(), the lockdep assertion in patch_insn_write()
702 ++ * gets confused by the context in which the lock is taken.
703 ++ * Instead, ensure the lock is held before calling stop_machine(), and
704 ++ * set riscv_patch_in_stop_machine to skip the check in
705 ++ * patch_insn_write().
706 ++ */
707 ++ lockdep_assert_held(&text_mutex);
708 ++ riscv_patch_in_stop_machine = true;
709 ++ ret = stop_machine_cpuslocked(patch_text_cb, &patch, cpu_online_mask);
710 ++ riscv_patch_in_stop_machine = false;
711 ++ return ret;
712 + }
713 + NOKPROBE_SYMBOL(patch_text);
714 +diff --git a/arch/riscv/kernel/stacktrace.c b/arch/riscv/kernel/stacktrace.c
715 +index 85cd5442d2f81..17d7383f201a5 100644
716 +--- a/arch/riscv/kernel/stacktrace.c
717 ++++ b/arch/riscv/kernel/stacktrace.c
718 +@@ -92,7 +92,7 @@ void notrace walk_stackframe(struct task_struct *task,
719 + while (!kstack_end(ksp)) {
720 + if (__kernel_text_address(pc) && unlikely(!fn(arg, pc)))
721 + break;
722 +- pc = (*ksp++) - 0x4;
723 ++ pc = READ_ONCE_NOCHECK(*ksp++) - 0x4;
724 + }
725 + }
726 +
727 +diff --git a/arch/um/kernel/vmlinux.lds.S b/arch/um/kernel/vmlinux.lds.S
728 +index 16e49bfa2b426..53d719c04ba94 100644
729 +--- a/arch/um/kernel/vmlinux.lds.S
730 ++++ b/arch/um/kernel/vmlinux.lds.S
731 +@@ -1,4 +1,4 @@
732 +-
733 ++#define RUNTIME_DISCARD_EXIT
734 + KERNEL_STACK_SIZE = 4096 * (1 << CONFIG_KERNEL_STACK_ORDER);
735 +
736 + #ifdef CONFIG_LD_SCRIPT_STATIC
737 +diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
738 +index f05ebaa26f0ff..ef8cabfbe8540 100644
739 +--- a/arch/x86/include/asm/kvm_host.h
740 ++++ b/arch/x86/include/asm/kvm_host.h
741 +@@ -1695,6 +1695,9 @@ extern struct kvm_x86_ops kvm_x86_ops;
742 + #define KVM_X86_OP_OPTIONAL_RET0 KVM_X86_OP
743 + #include <asm/kvm-x86-ops.h>
744 +
745 ++int kvm_x86_vendor_init(struct kvm_x86_init_ops *ops);
746 ++void kvm_x86_vendor_exit(void);
747 ++
748 + #define __KVM_HAVE_ARCH_VM_ALLOC
749 + static inline struct kvm *kvm_arch_alloc_vm(void)
750 + {
751 +diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
752 +index c75d75b9f11aa..d2dbbc50b3a7b 100644
753 +--- a/arch/x86/kernel/cpu/amd.c
754 ++++ b/arch/x86/kernel/cpu/amd.c
755 +@@ -880,6 +880,15 @@ void init_spectral_chicken(struct cpuinfo_x86 *c)
756 + }
757 + }
758 + #endif
759 ++ /*
760 ++ * Work around Erratum 1386. The XSAVES instruction malfunctions in
761 ++ * certain circumstances on Zen1/2 uarch, and not all parts have had
762 ++ * updated microcode at the time of writing (March 2023).
763 ++ *
764 ++ * Affected parts all have no supervisor XSAVE states, meaning that
765 ++ * the XSAVEC instruction (which works fine) is equivalent.
766 ++ */
767 ++ clear_cpu_cap(c, X86_FEATURE_XSAVES);
768 + }
769 +
770 + static void init_amd_zn(struct cpuinfo_x86 *c)
771 +diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
772 +index bfe93a1c4f92e..3629dd979667c 100644
773 +--- a/arch/x86/kvm/svm/svm.c
774 ++++ b/arch/x86/kvm/svm/svm.c
775 +@@ -5080,15 +5080,34 @@ static struct kvm_x86_init_ops svm_init_ops __initdata = {
776 +
777 + static int __init svm_init(void)
778 + {
779 ++ int r;
780 ++
781 + __unused_size_checks();
782 +
783 +- return kvm_init(&svm_init_ops, sizeof(struct vcpu_svm),
784 +- __alignof__(struct vcpu_svm), THIS_MODULE);
785 ++ r = kvm_x86_vendor_init(&svm_init_ops);
786 ++ if (r)
787 ++ return r;
788 ++
789 ++ /*
790 ++ * Common KVM initialization _must_ come last, after this, /dev/kvm is
791 ++ * exposed to userspace!
792 ++ */
793 ++ r = kvm_init(&svm_init_ops, sizeof(struct vcpu_svm),
794 ++ __alignof__(struct vcpu_svm), THIS_MODULE);
795 ++ if (r)
796 ++ goto err_kvm_init;
797 ++
798 ++ return 0;
799 ++
800 ++err_kvm_init:
801 ++ kvm_x86_vendor_exit();
802 ++ return r;
803 + }
804 +
805 + static void __exit svm_exit(void)
806 + {
807 + kvm_exit();
808 ++ kvm_x86_vendor_exit();
809 + }
810 +
811 + module_init(svm_init)
812 +diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
813 +index f5c1cb7cec8a7..bc868958e91fe 100644
814 +--- a/arch/x86/kvm/vmx/vmx.c
815 ++++ b/arch/x86/kvm/vmx/vmx.c
816 +@@ -551,6 +551,33 @@ static int hv_enable_direct_tlbflush(struct kvm_vcpu *vcpu)
817 + return 0;
818 + }
819 +
820 ++static void hv_reset_evmcs(void)
821 ++{
822 ++ struct hv_vp_assist_page *vp_ap;
823 ++
824 ++ if (!static_branch_unlikely(&enable_evmcs))
825 ++ return;
826 ++
827 ++ /*
828 ++ * KVM should enable eVMCS if and only if all CPUs have a VP assist
829 ++ * page, and should reject CPU onlining if eVMCS is enabled the CPU
830 ++ * doesn't have a VP assist page allocated.
831 ++ */
832 ++ vp_ap = hv_get_vp_assist_page(smp_processor_id());
833 ++ if (WARN_ON_ONCE(!vp_ap))
834 ++ return;
835 ++
836 ++ /*
837 ++ * Reset everything to support using non-enlightened VMCS access later
838 ++ * (e.g. when we reload the module with enlightened_vmcs=0)
839 ++ */
840 ++ vp_ap->nested_control.features.directhypercall = 0;
841 ++ vp_ap->current_nested_vmcs = 0;
842 ++ vp_ap->enlighten_vmentry = 0;
843 ++}
844 ++
845 ++#else /* IS_ENABLED(CONFIG_HYPERV) */
846 ++static void hv_reset_evmcs(void) {}
847 + #endif /* IS_ENABLED(CONFIG_HYPERV) */
848 +
849 + /*
850 +@@ -2501,6 +2528,8 @@ static void vmx_hardware_disable(void)
851 + if (cpu_vmxoff())
852 + kvm_spurious_fault();
853 +
854 ++ hv_reset_evmcs();
855 ++
856 + intel_pt_handle_vmx(0);
857 + }
858 +
859 +@@ -8427,41 +8456,23 @@ static void vmx_cleanup_l1d_flush(void)
860 + l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_AUTO;
861 + }
862 +
863 +-static void vmx_exit(void)
864 ++static void __vmx_exit(void)
865 + {
866 ++ allow_smaller_maxphyaddr = false;
867 ++
868 + #ifdef CONFIG_KEXEC_CORE
869 + RCU_INIT_POINTER(crash_vmclear_loaded_vmcss, NULL);
870 + synchronize_rcu();
871 + #endif
872 ++ vmx_cleanup_l1d_flush();
873 ++}
874 +
875 ++static void vmx_exit(void)
876 ++{
877 + kvm_exit();
878 ++ kvm_x86_vendor_exit();
879 +
880 +-#if IS_ENABLED(CONFIG_HYPERV)
881 +- if (static_branch_unlikely(&enable_evmcs)) {
882 +- int cpu;
883 +- struct hv_vp_assist_page *vp_ap;
884 +- /*
885 +- * Reset everything to support using non-enlightened VMCS
886 +- * access later (e.g. when we reload the module with
887 +- * enlightened_vmcs=0)
888 +- */
889 +- for_each_online_cpu(cpu) {
890 +- vp_ap = hv_get_vp_assist_page(cpu);
891 +-
892 +- if (!vp_ap)
893 +- continue;
894 +-
895 +- vp_ap->nested_control.features.directhypercall = 0;
896 +- vp_ap->current_nested_vmcs = 0;
897 +- vp_ap->enlighten_vmentry = 0;
898 +- }
899 +-
900 +- static_branch_disable(&enable_evmcs);
901 +- }
902 +-#endif
903 +- vmx_cleanup_l1d_flush();
904 +-
905 +- allow_smaller_maxphyaddr = false;
906 ++ __vmx_exit();
907 + }
908 + module_exit(vmx_exit);
909 +
910 +@@ -8502,23 +8513,20 @@ static int __init vmx_init(void)
911 + }
912 + #endif
913 +
914 +- r = kvm_init(&vmx_init_ops, sizeof(struct vcpu_vmx),
915 +- __alignof__(struct vcpu_vmx), THIS_MODULE);
916 ++ r = kvm_x86_vendor_init(&vmx_init_ops);
917 + if (r)
918 + return r;
919 +
920 + /*
921 +- * Must be called after kvm_init() so enable_ept is properly set
922 ++ * Must be called after common x86 init so enable_ept is properly set
923 + * up. Hand the parameter mitigation value in which was stored in
924 + * the pre module init parser. If no parameter was given, it will
925 + * contain 'auto' which will be turned into the default 'cond'
926 + * mitigation mode.
927 + */
928 + r = vmx_setup_l1d_flush(vmentry_l1d_flush_param);
929 +- if (r) {
930 +- vmx_exit();
931 +- return r;
932 +- }
933 ++ if (r)
934 ++ goto err_l1d_flush;
935 +
936 + vmx_setup_fb_clear_ctrl();
937 +
938 +@@ -8542,6 +8550,21 @@ static int __init vmx_init(void)
939 + if (!enable_ept)
940 + allow_smaller_maxphyaddr = true;
941 +
942 ++ /*
943 ++ * Common KVM initialization _must_ come last, after this, /dev/kvm is
944 ++ * exposed to userspace!
945 ++ */
946 ++ r = kvm_init(&vmx_init_ops, sizeof(struct vcpu_vmx),
947 ++ __alignof__(struct vcpu_vmx), THIS_MODULE);
948 ++ if (r)
949 ++ goto err_kvm_init;
950 ++
951 + return 0;
952 ++
953 ++err_kvm_init:
954 ++ __vmx_exit();
955 ++err_l1d_flush:
956 ++ kvm_x86_vendor_exit();
957 ++ return r;
958 + }
959 + module_init(vmx_init);
960 +diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
961 +index 68827b8dc37a5..ab09d292bdede 100644
962 +--- a/arch/x86/kvm/x86.c
963 ++++ b/arch/x86/kvm/x86.c
964 +@@ -9351,7 +9351,16 @@ static struct notifier_block pvclock_gtod_notifier = {
965 +
966 + int kvm_arch_init(void *opaque)
967 + {
968 +- struct kvm_x86_init_ops *ops = opaque;
969 ++ return 0;
970 ++}
971 ++
972 ++void kvm_arch_exit(void)
973 ++{
974 ++
975 ++}
976 ++
977 ++int kvm_x86_vendor_init(struct kvm_x86_init_ops *ops)
978 ++{
979 + u64 host_pat;
980 + int r;
981 +
982 +@@ -9441,8 +9450,9 @@ out_free_x86_emulator_cache:
983 + kmem_cache_destroy(x86_emulator_cache);
984 + return r;
985 + }
986 ++EXPORT_SYMBOL_GPL(kvm_x86_vendor_init);
987 +
988 +-void kvm_arch_exit(void)
989 ++void kvm_x86_vendor_exit(void)
990 + {
991 + #ifdef CONFIG_X86_64
992 + if (hypervisor_is_type(X86_HYPER_MS_HYPERV))
993 +@@ -9468,6 +9478,7 @@ void kvm_arch_exit(void)
994 + WARN_ON(static_branch_unlikely(&kvm_xen_enabled.key));
995 + #endif
996 + }
997 ++EXPORT_SYMBOL_GPL(kvm_x86_vendor_exit);
998 +
999 + static int __kvm_emulate_halt(struct kvm_vcpu *vcpu, int state, int reason)
1000 + {
1001 +diff --git a/block/blk.h b/block/blk.h
1002 +index 8b75a95b28d60..a186ea20f39d8 100644
1003 +--- a/block/blk.h
1004 ++++ b/block/blk.h
1005 +@@ -436,7 +436,7 @@ static inline struct kmem_cache *blk_get_queue_kmem_cache(bool srcu)
1006 + }
1007 + struct request_queue *blk_alloc_queue(int node_id, bool alloc_srcu);
1008 +
1009 +-int disk_scan_partitions(struct gendisk *disk, fmode_t mode, void *owner);
1010 ++int disk_scan_partitions(struct gendisk *disk, fmode_t mode);
1011 +
1012 + int disk_alloc_events(struct gendisk *disk);
1013 + void disk_add_events(struct gendisk *disk);
1014 +diff --git a/block/genhd.c b/block/genhd.c
1015 +index c4765681a8b4b..0b6928e948f31 100644
1016 +--- a/block/genhd.c
1017 ++++ b/block/genhd.c
1018 +@@ -356,9 +356,10 @@ void disk_uevent(struct gendisk *disk, enum kobject_action action)
1019 + }
1020 + EXPORT_SYMBOL_GPL(disk_uevent);
1021 +
1022 +-int disk_scan_partitions(struct gendisk *disk, fmode_t mode, void *owner)
1023 ++int disk_scan_partitions(struct gendisk *disk, fmode_t mode)
1024 + {
1025 + struct block_device *bdev;
1026 ++ int ret = 0;
1027 +
1028 + if (disk->flags & (GENHD_FL_NO_PART | GENHD_FL_HIDDEN))
1029 + return -EINVAL;
1030 +@@ -366,16 +367,29 @@ int disk_scan_partitions(struct gendisk *disk, fmode_t mode, void *owner)
1031 + return -EINVAL;
1032 + if (disk->open_partitions)
1033 + return -EBUSY;
1034 +- /* Someone else has bdev exclusively open? */
1035 +- if (disk->part0->bd_holder && disk->part0->bd_holder != owner)
1036 +- return -EBUSY;
1037 +
1038 + set_bit(GD_NEED_PART_SCAN, &disk->state);
1039 +- bdev = blkdev_get_by_dev(disk_devt(disk), mode, NULL);
1040 ++ /*
1041 ++ * If the device is opened exclusively by current thread already, it's
1042 ++ * safe to scan partitons, otherwise, use bd_prepare_to_claim() to
1043 ++ * synchronize with other exclusive openers and other partition
1044 ++ * scanners.
1045 ++ */
1046 ++ if (!(mode & FMODE_EXCL)) {
1047 ++ ret = bd_prepare_to_claim(disk->part0, disk_scan_partitions);
1048 ++ if (ret)
1049 ++ return ret;
1050 ++ }
1051 ++
1052 ++ bdev = blkdev_get_by_dev(disk_devt(disk), mode & ~FMODE_EXCL, NULL);
1053 + if (IS_ERR(bdev))
1054 +- return PTR_ERR(bdev);
1055 +- blkdev_put(bdev, mode);
1056 +- return 0;
1057 ++ ret = PTR_ERR(bdev);
1058 ++ else
1059 ++ blkdev_put(bdev, mode & ~FMODE_EXCL);
1060 ++
1061 ++ if (!(mode & FMODE_EXCL))
1062 ++ bd_abort_claiming(disk->part0, disk_scan_partitions);
1063 ++ return ret;
1064 + }
1065 +
1066 + /**
1067 +@@ -501,9 +515,14 @@ int __must_check device_add_disk(struct device *parent, struct gendisk *disk,
1068 + if (ret)
1069 + goto out_unregister_bdi;
1070 +
1071 ++ /* Make sure the first partition scan will be proceed */
1072 ++ if (get_capacity(disk) && !(disk->flags & GENHD_FL_NO_PART) &&
1073 ++ !test_bit(GD_SUPPRESS_PART_SCAN, &disk->state))
1074 ++ set_bit(GD_NEED_PART_SCAN, &disk->state);
1075 ++
1076 + bdev_add(disk->part0, ddev->devt);
1077 + if (get_capacity(disk))
1078 +- disk_scan_partitions(disk, FMODE_READ, NULL);
1079 ++ disk_scan_partitions(disk, FMODE_READ);
1080 +
1081 + /*
1082 + * Announce the disk and partitions after all partitions are
1083 +diff --git a/block/ioctl.c b/block/ioctl.c
1084 +index 96617512982e5..9c5f637ff153f 100644
1085 +--- a/block/ioctl.c
1086 ++++ b/block/ioctl.c
1087 +@@ -467,10 +467,10 @@ static int blkdev_bszset(struct block_device *bdev, fmode_t mode,
1088 + * user space. Note the separate arg/argp parameters that are needed
1089 + * to deal with the compat_ptr() conversion.
1090 + */
1091 +-static int blkdev_common_ioctl(struct file *file, fmode_t mode, unsigned cmd,
1092 +- unsigned long arg, void __user *argp)
1093 ++static int blkdev_common_ioctl(struct block_device *bdev, fmode_t mode,
1094 ++ unsigned int cmd, unsigned long arg,
1095 ++ void __user *argp)
1096 + {
1097 +- struct block_device *bdev = I_BDEV(file->f_mapping->host);
1098 + unsigned int max_sectors;
1099 +
1100 + switch (cmd) {
1101 +@@ -528,8 +528,7 @@ static int blkdev_common_ioctl(struct file *file, fmode_t mode, unsigned cmd,
1102 + return -EACCES;
1103 + if (bdev_is_partition(bdev))
1104 + return -EINVAL;
1105 +- return disk_scan_partitions(bdev->bd_disk, mode & ~FMODE_EXCL,
1106 +- file);
1107 ++ return disk_scan_partitions(bdev->bd_disk, mode);
1108 + case BLKTRACESTART:
1109 + case BLKTRACESTOP:
1110 + case BLKTRACETEARDOWN:
1111 +@@ -607,7 +606,7 @@ long blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg)
1112 + break;
1113 + }
1114 +
1115 +- ret = blkdev_common_ioctl(file, mode, cmd, arg, argp);
1116 ++ ret = blkdev_common_ioctl(bdev, mode, cmd, arg, argp);
1117 + if (ret != -ENOIOCTLCMD)
1118 + return ret;
1119 +
1120 +@@ -676,7 +675,7 @@ long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg)
1121 + break;
1122 + }
1123 +
1124 +- ret = blkdev_common_ioctl(file, mode, cmd, arg, argp);
1125 ++ ret = blkdev_common_ioctl(bdev, mode, cmd, arg, argp);
1126 + if (ret == -ENOIOCTLCMD && disk->fops->compat_ioctl)
1127 + ret = disk->fops->compat_ioctl(bdev, mode, cmd, arg);
1128 +
1129 +diff --git a/drivers/bus/mhi/ep/main.c b/drivers/bus/mhi/ep/main.c
1130 +index 357c61c12ce5b..edd153dda40c0 100644
1131 +--- a/drivers/bus/mhi/ep/main.c
1132 ++++ b/drivers/bus/mhi/ep/main.c
1133 +@@ -990,44 +990,25 @@ static void mhi_ep_abort_transfer(struct mhi_ep_cntrl *mhi_cntrl)
1134 + static void mhi_ep_reset_worker(struct work_struct *work)
1135 + {
1136 + struct mhi_ep_cntrl *mhi_cntrl = container_of(work, struct mhi_ep_cntrl, reset_work);
1137 +- struct device *dev = &mhi_cntrl->mhi_dev->dev;
1138 + enum mhi_state cur_state;
1139 +- int ret;
1140 +
1141 +- mhi_ep_abort_transfer(mhi_cntrl);
1142 ++ mhi_ep_power_down(mhi_cntrl);
1143 ++
1144 ++ mutex_lock(&mhi_cntrl->state_lock);
1145 +
1146 +- spin_lock_bh(&mhi_cntrl->state_lock);
1147 + /* Reset MMIO to signal host that the MHI_RESET is completed in endpoint */
1148 + mhi_ep_mmio_reset(mhi_cntrl);
1149 + cur_state = mhi_cntrl->mhi_state;
1150 +- spin_unlock_bh(&mhi_cntrl->state_lock);
1151 +
1152 + /*
1153 + * Only proceed further if the reset is due to SYS_ERR. The host will
1154 + * issue reset during shutdown also and we don't need to do re-init in
1155 + * that case.
1156 + */
1157 +- if (cur_state == MHI_STATE_SYS_ERR) {
1158 +- mhi_ep_mmio_init(mhi_cntrl);
1159 +-
1160 +- /* Set AMSS EE before signaling ready state */
1161 +- mhi_ep_mmio_set_env(mhi_cntrl, MHI_EE_AMSS);
1162 +-
1163 +- /* All set, notify the host that we are ready */
1164 +- ret = mhi_ep_set_ready_state(mhi_cntrl);
1165 +- if (ret)
1166 +- return;
1167 +-
1168 +- dev_dbg(dev, "READY state notification sent to the host\n");
1169 ++ if (cur_state == MHI_STATE_SYS_ERR)
1170 ++ mhi_ep_power_up(mhi_cntrl);
1171 +
1172 +- ret = mhi_ep_enable(mhi_cntrl);
1173 +- if (ret) {
1174 +- dev_err(dev, "Failed to enable MHI endpoint: %d\n", ret);
1175 +- return;
1176 +- }
1177 +-
1178 +- enable_irq(mhi_cntrl->irq);
1179 +- }
1180 ++ mutex_unlock(&mhi_cntrl->state_lock);
1181 + }
1182 +
1183 + /*
1184 +@@ -1106,11 +1087,11 @@ EXPORT_SYMBOL_GPL(mhi_ep_power_up);
1185 +
1186 + void mhi_ep_power_down(struct mhi_ep_cntrl *mhi_cntrl)
1187 + {
1188 +- if (mhi_cntrl->enabled)
1189 ++ if (mhi_cntrl->enabled) {
1190 + mhi_ep_abort_transfer(mhi_cntrl);
1191 +-
1192 +- kfree(mhi_cntrl->mhi_event);
1193 +- disable_irq(mhi_cntrl->irq);
1194 ++ kfree(mhi_cntrl->mhi_event);
1195 ++ disable_irq(mhi_cntrl->irq);
1196 ++ }
1197 + }
1198 + EXPORT_SYMBOL_GPL(mhi_ep_power_down);
1199 +
1200 +@@ -1400,8 +1381,8 @@ int mhi_ep_register_controller(struct mhi_ep_cntrl *mhi_cntrl,
1201 +
1202 + INIT_LIST_HEAD(&mhi_cntrl->st_transition_list);
1203 + INIT_LIST_HEAD(&mhi_cntrl->ch_db_list);
1204 +- spin_lock_init(&mhi_cntrl->state_lock);
1205 + spin_lock_init(&mhi_cntrl->list_lock);
1206 ++ mutex_init(&mhi_cntrl->state_lock);
1207 + mutex_init(&mhi_cntrl->event_lock);
1208 +
1209 + /* Set MHI version and AMSS EE before enumeration */
1210 +diff --git a/drivers/bus/mhi/ep/sm.c b/drivers/bus/mhi/ep/sm.c
1211 +index 3655c19e23c7b..fd200b2ac0bb2 100644
1212 +--- a/drivers/bus/mhi/ep/sm.c
1213 ++++ b/drivers/bus/mhi/ep/sm.c
1214 +@@ -63,24 +63,23 @@ int mhi_ep_set_m0_state(struct mhi_ep_cntrl *mhi_cntrl)
1215 + int ret;
1216 +
1217 + /* If MHI is in M3, resume suspended channels */
1218 +- spin_lock_bh(&mhi_cntrl->state_lock);
1219 ++ mutex_lock(&mhi_cntrl->state_lock);
1220 ++
1221 + old_state = mhi_cntrl->mhi_state;
1222 + if (old_state == MHI_STATE_M3)
1223 + mhi_ep_resume_channels(mhi_cntrl);
1224 +
1225 + ret = mhi_ep_set_mhi_state(mhi_cntrl, MHI_STATE_M0);
1226 +- spin_unlock_bh(&mhi_cntrl->state_lock);
1227 +-
1228 + if (ret) {
1229 + mhi_ep_handle_syserr(mhi_cntrl);
1230 +- return ret;
1231 ++ goto err_unlock;
1232 + }
1233 +
1234 + /* Signal host that the device moved to M0 */
1235 + ret = mhi_ep_send_state_change_event(mhi_cntrl, MHI_STATE_M0);
1236 + if (ret) {
1237 + dev_err(dev, "Failed sending M0 state change event\n");
1238 +- return ret;
1239 ++ goto err_unlock;
1240 + }
1241 +
1242 + if (old_state == MHI_STATE_READY) {
1243 +@@ -88,11 +87,14 @@ int mhi_ep_set_m0_state(struct mhi_ep_cntrl *mhi_cntrl)
1244 + ret = mhi_ep_send_ee_event(mhi_cntrl, MHI_EE_AMSS);
1245 + if (ret) {
1246 + dev_err(dev, "Failed sending AMSS EE event\n");
1247 +- return ret;
1248 ++ goto err_unlock;
1249 + }
1250 + }
1251 +
1252 +- return 0;
1253 ++err_unlock:
1254 ++ mutex_unlock(&mhi_cntrl->state_lock);
1255 ++
1256 ++ return ret;
1257 + }
1258 +
1259 + int mhi_ep_set_m3_state(struct mhi_ep_cntrl *mhi_cntrl)
1260 +@@ -100,13 +102,12 @@ int mhi_ep_set_m3_state(struct mhi_ep_cntrl *mhi_cntrl)
1261 + struct device *dev = &mhi_cntrl->mhi_dev->dev;
1262 + int ret;
1263 +
1264 +- spin_lock_bh(&mhi_cntrl->state_lock);
1265 +- ret = mhi_ep_set_mhi_state(mhi_cntrl, MHI_STATE_M3);
1266 +- spin_unlock_bh(&mhi_cntrl->state_lock);
1267 ++ mutex_lock(&mhi_cntrl->state_lock);
1268 +
1269 ++ ret = mhi_ep_set_mhi_state(mhi_cntrl, MHI_STATE_M3);
1270 + if (ret) {
1271 + mhi_ep_handle_syserr(mhi_cntrl);
1272 +- return ret;
1273 ++ goto err_unlock;
1274 + }
1275 +
1276 + mhi_ep_suspend_channels(mhi_cntrl);
1277 +@@ -115,10 +116,13 @@ int mhi_ep_set_m3_state(struct mhi_ep_cntrl *mhi_cntrl)
1278 + ret = mhi_ep_send_state_change_event(mhi_cntrl, MHI_STATE_M3);
1279 + if (ret) {
1280 + dev_err(dev, "Failed sending M3 state change event\n");
1281 +- return ret;
1282 ++ goto err_unlock;
1283 + }
1284 +
1285 +- return 0;
1286 ++err_unlock:
1287 ++ mutex_unlock(&mhi_cntrl->state_lock);
1288 ++
1289 ++ return ret;
1290 + }
1291 +
1292 + int mhi_ep_set_ready_state(struct mhi_ep_cntrl *mhi_cntrl)
1293 +@@ -127,22 +131,24 @@ int mhi_ep_set_ready_state(struct mhi_ep_cntrl *mhi_cntrl)
1294 + enum mhi_state mhi_state;
1295 + int ret, is_ready;
1296 +
1297 +- spin_lock_bh(&mhi_cntrl->state_lock);
1298 ++ mutex_lock(&mhi_cntrl->state_lock);
1299 ++
1300 + /* Ensure that the MHISTATUS is set to RESET by host */
1301 + mhi_state = mhi_ep_mmio_masked_read(mhi_cntrl, EP_MHISTATUS, MHISTATUS_MHISTATE_MASK);
1302 + is_ready = mhi_ep_mmio_masked_read(mhi_cntrl, EP_MHISTATUS, MHISTATUS_READY_MASK);
1303 +
1304 + if (mhi_state != MHI_STATE_RESET || is_ready) {
1305 + dev_err(dev, "READY state transition failed. MHI host not in RESET state\n");
1306 +- spin_unlock_bh(&mhi_cntrl->state_lock);
1307 +- return -EIO;
1308 ++ ret = -EIO;
1309 ++ goto err_unlock;
1310 + }
1311 +
1312 + ret = mhi_ep_set_mhi_state(mhi_cntrl, MHI_STATE_READY);
1313 +- spin_unlock_bh(&mhi_cntrl->state_lock);
1314 +-
1315 + if (ret)
1316 + mhi_ep_handle_syserr(mhi_cntrl);
1317 +
1318 ++err_unlock:
1319 ++ mutex_unlock(&mhi_cntrl->state_lock);
1320 ++
1321 + return ret;
1322 + }
1323 +diff --git a/drivers/char/ipmi/ipmi_ssif.c b/drivers/char/ipmi/ipmi_ssif.c
1324 +index 7c606c49cd535..a5ddebb1edea4 100644
1325 +--- a/drivers/char/ipmi/ipmi_ssif.c
1326 ++++ b/drivers/char/ipmi/ipmi_ssif.c
1327 +@@ -74,7 +74,8 @@
1328 + /*
1329 + * Timer values
1330 + */
1331 +-#define SSIF_MSG_USEC 20000 /* 20ms between message tries. */
1332 ++#define SSIF_MSG_USEC 60000 /* 60ms between message tries (T3). */
1333 ++#define SSIF_REQ_RETRY_USEC 60000 /* 60ms between send retries (T6). */
1334 + #define SSIF_MSG_PART_USEC 5000 /* 5ms for a message part */
1335 +
1336 + /* How many times to we retry sending/receiving the message. */
1337 +@@ -82,7 +83,9 @@
1338 + #define SSIF_RECV_RETRIES 250
1339 +
1340 + #define SSIF_MSG_MSEC (SSIF_MSG_USEC / 1000)
1341 ++#define SSIF_REQ_RETRY_MSEC (SSIF_REQ_RETRY_USEC / 1000)
1342 + #define SSIF_MSG_JIFFIES ((SSIF_MSG_USEC * 1000) / TICK_NSEC)
1343 ++#define SSIF_REQ_RETRY_JIFFIES ((SSIF_REQ_RETRY_USEC * 1000) / TICK_NSEC)
1344 + #define SSIF_MSG_PART_JIFFIES ((SSIF_MSG_PART_USEC * 1000) / TICK_NSEC)
1345 +
1346 + /*
1347 +@@ -229,6 +232,9 @@ struct ssif_info {
1348 + bool got_alert;
1349 + bool waiting_alert;
1350 +
1351 ++ /* Used to inform the timeout that it should do a resend. */
1352 ++ bool do_resend;
1353 ++
1354 + /*
1355 + * If set to true, this will request events the next time the
1356 + * state machine is idle.
1357 +@@ -241,12 +247,6 @@ struct ssif_info {
1358 + */
1359 + bool req_flags;
1360 +
1361 +- /*
1362 +- * Used to perform timer operations when run-to-completion
1363 +- * mode is on. This is a countdown timer.
1364 +- */
1365 +- int rtc_us_timer;
1366 +-
1367 + /* Used for sending/receiving data. +1 for the length. */
1368 + unsigned char data[IPMI_MAX_MSG_LENGTH + 1];
1369 + unsigned int data_len;
1370 +@@ -530,7 +530,6 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result,
1371 +
1372 + static void start_get(struct ssif_info *ssif_info)
1373 + {
1374 +- ssif_info->rtc_us_timer = 0;
1375 + ssif_info->multi_pos = 0;
1376 +
1377 + ssif_i2c_send(ssif_info, msg_done_handler, I2C_SMBUS_READ,
1378 +@@ -538,22 +537,28 @@ static void start_get(struct ssif_info *ssif_info)
1379 + ssif_info->recv, I2C_SMBUS_BLOCK_DATA);
1380 + }
1381 +
1382 ++static void start_resend(struct ssif_info *ssif_info);
1383 ++
1384 + static void retry_timeout(struct timer_list *t)
1385 + {
1386 + struct ssif_info *ssif_info = from_timer(ssif_info, t, retry_timer);
1387 + unsigned long oflags, *flags;
1388 +- bool waiting;
1389 ++ bool waiting, resend;
1390 +
1391 + if (ssif_info->stopping)
1392 + return;
1393 +
1394 + flags = ipmi_ssif_lock_cond(ssif_info, &oflags);
1395 ++ resend = ssif_info->do_resend;
1396 ++ ssif_info->do_resend = false;
1397 + waiting = ssif_info->waiting_alert;
1398 + ssif_info->waiting_alert = false;
1399 + ipmi_ssif_unlock_cond(ssif_info, flags);
1400 +
1401 + if (waiting)
1402 + start_get(ssif_info);
1403 ++ if (resend)
1404 ++ start_resend(ssif_info);
1405 + }
1406 +
1407 + static void watch_timeout(struct timer_list *t)
1408 +@@ -602,8 +607,6 @@ static void ssif_alert(struct i2c_client *client, enum i2c_alert_protocol type,
1409 + start_get(ssif_info);
1410 + }
1411 +
1412 +-static void start_resend(struct ssif_info *ssif_info);
1413 +-
1414 + static void msg_done_handler(struct ssif_info *ssif_info, int result,
1415 + unsigned char *data, unsigned int len)
1416 + {
1417 +@@ -622,7 +625,6 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result,
1418 +
1419 + flags = ipmi_ssif_lock_cond(ssif_info, &oflags);
1420 + ssif_info->waiting_alert = true;
1421 +- ssif_info->rtc_us_timer = SSIF_MSG_USEC;
1422 + if (!ssif_info->stopping)
1423 + mod_timer(&ssif_info->retry_timer,
1424 + jiffies + SSIF_MSG_JIFFIES);
1425 +@@ -909,7 +911,13 @@ static void msg_written_handler(struct ssif_info *ssif_info, int result,
1426 + if (result < 0) {
1427 + ssif_info->retries_left--;
1428 + if (ssif_info->retries_left > 0) {
1429 +- start_resend(ssif_info);
1430 ++ /*
1431 ++ * Wait the retry timeout time per the spec,
1432 ++ * then redo the send.
1433 ++ */
1434 ++ ssif_info->do_resend = true;
1435 ++ mod_timer(&ssif_info->retry_timer,
1436 ++ jiffies + SSIF_REQ_RETRY_JIFFIES);
1437 + return;
1438 + }
1439 +
1440 +@@ -973,7 +981,6 @@ static void msg_written_handler(struct ssif_info *ssif_info, int result,
1441 + /* Wait a jiffie then request the next message */
1442 + ssif_info->waiting_alert = true;
1443 + ssif_info->retries_left = SSIF_RECV_RETRIES;
1444 +- ssif_info->rtc_us_timer = SSIF_MSG_PART_USEC;
1445 + if (!ssif_info->stopping)
1446 + mod_timer(&ssif_info->retry_timer,
1447 + jiffies + SSIF_MSG_PART_JIFFIES);
1448 +@@ -1320,8 +1327,10 @@ static int do_cmd(struct i2c_client *client, int len, unsigned char *msg,
1449 + ret = i2c_smbus_write_block_data(client, SSIF_IPMI_REQUEST, len, msg);
1450 + if (ret) {
1451 + retry_cnt--;
1452 +- if (retry_cnt > 0)
1453 ++ if (retry_cnt > 0) {
1454 ++ msleep(SSIF_REQ_RETRY_MSEC);
1455 + goto retry1;
1456 ++ }
1457 + return -ENODEV;
1458 + }
1459 +
1460 +@@ -1462,8 +1471,10 @@ retry_write:
1461 + 32, msg);
1462 + if (ret) {
1463 + retry_cnt--;
1464 +- if (retry_cnt > 0)
1465 ++ if (retry_cnt > 0) {
1466 ++ msleep(SSIF_REQ_RETRY_MSEC);
1467 + goto retry_write;
1468 ++ }
1469 + dev_err(&client->dev, "Could not write multi-part start, though the BMC said it could handle it. Just limit sends to one part.\n");
1470 + return ret;
1471 + }
1472 +diff --git a/drivers/char/tpm/eventlog/acpi.c b/drivers/char/tpm/eventlog/acpi.c
1473 +index 0913d3eb8d518..cd266021d0103 100644
1474 +--- a/drivers/char/tpm/eventlog/acpi.c
1475 ++++ b/drivers/char/tpm/eventlog/acpi.c
1476 +@@ -143,8 +143,12 @@ int tpm_read_log_acpi(struct tpm_chip *chip)
1477 +
1478 + ret = -EIO;
1479 + virt = acpi_os_map_iomem(start, len);
1480 +- if (!virt)
1481 ++ if (!virt) {
1482 ++ dev_warn(&chip->dev, "%s: Failed to map ACPI memory\n", __func__);
1483 ++ /* try EFI log next */
1484 ++ ret = -ENODEV;
1485 + goto err;
1486 ++ }
1487 +
1488 + memcpy_fromio(log->bios_event_log, virt, len);
1489 +
1490 +diff --git a/drivers/clk/renesas/Kconfig b/drivers/clk/renesas/Kconfig
1491 +index cacaf9b87d264..37632a0659d82 100644
1492 +--- a/drivers/clk/renesas/Kconfig
1493 ++++ b/drivers/clk/renesas/Kconfig
1494 +@@ -22,7 +22,7 @@ config CLK_RENESAS
1495 + select CLK_R8A7791 if ARCH_R8A7791 || ARCH_R8A7793
1496 + select CLK_R8A7792 if ARCH_R8A7792
1497 + select CLK_R8A7794 if ARCH_R8A7794
1498 +- select CLK_R8A7795 if ARCH_R8A77950 || ARCH_R8A77951
1499 ++ select CLK_R8A7795 if ARCH_R8A77951
1500 + select CLK_R8A77960 if ARCH_R8A77960
1501 + select CLK_R8A77961 if ARCH_R8A77961
1502 + select CLK_R8A77965 if ARCH_R8A77965
1503 +diff --git a/drivers/clk/renesas/r8a7795-cpg-mssr.c b/drivers/clk/renesas/r8a7795-cpg-mssr.c
1504 +index 301475c74f500..7a585a777d387 100644
1505 +--- a/drivers/clk/renesas/r8a7795-cpg-mssr.c
1506 ++++ b/drivers/clk/renesas/r8a7795-cpg-mssr.c
1507 +@@ -128,7 +128,6 @@ static struct cpg_core_clk r8a7795_core_clks[] __initdata = {
1508 + };
1509 +
1510 + static struct mssr_mod_clk r8a7795_mod_clks[] __initdata = {
1511 +- DEF_MOD("fdp1-2", 117, R8A7795_CLK_S2D1), /* ES1.x */
1512 + DEF_MOD("fdp1-1", 118, R8A7795_CLK_S0D1),
1513 + DEF_MOD("fdp1-0", 119, R8A7795_CLK_S0D1),
1514 + DEF_MOD("tmu4", 121, R8A7795_CLK_S0D6),
1515 +@@ -162,7 +161,6 @@ static struct mssr_mod_clk r8a7795_mod_clks[] __initdata = {
1516 + DEF_MOD("pcie1", 318, R8A7795_CLK_S3D1),
1517 + DEF_MOD("pcie0", 319, R8A7795_CLK_S3D1),
1518 + DEF_MOD("usb-dmac30", 326, R8A7795_CLK_S3D1),
1519 +- DEF_MOD("usb3-if1", 327, R8A7795_CLK_S3D1), /* ES1.x */
1520 + DEF_MOD("usb3-if0", 328, R8A7795_CLK_S3D1),
1521 + DEF_MOD("usb-dmac31", 329, R8A7795_CLK_S3D1),
1522 + DEF_MOD("usb-dmac0", 330, R8A7795_CLK_S3D1),
1523 +@@ -187,28 +185,21 @@ static struct mssr_mod_clk r8a7795_mod_clks[] __initdata = {
1524 + DEF_MOD("hscif0", 520, R8A7795_CLK_S3D1),
1525 + DEF_MOD("thermal", 522, R8A7795_CLK_CP),
1526 + DEF_MOD("pwm", 523, R8A7795_CLK_S0D12),
1527 +- DEF_MOD("fcpvd3", 600, R8A7795_CLK_S2D1), /* ES1.x */
1528 + DEF_MOD("fcpvd2", 601, R8A7795_CLK_S0D2),
1529 + DEF_MOD("fcpvd1", 602, R8A7795_CLK_S0D2),
1530 + DEF_MOD("fcpvd0", 603, R8A7795_CLK_S0D2),
1531 + DEF_MOD("fcpvb1", 606, R8A7795_CLK_S0D1),
1532 + DEF_MOD("fcpvb0", 607, R8A7795_CLK_S0D1),
1533 +- DEF_MOD("fcpvi2", 609, R8A7795_CLK_S2D1), /* ES1.x */
1534 + DEF_MOD("fcpvi1", 610, R8A7795_CLK_S0D1),
1535 + DEF_MOD("fcpvi0", 611, R8A7795_CLK_S0D1),
1536 +- DEF_MOD("fcpf2", 613, R8A7795_CLK_S2D1), /* ES1.x */
1537 + DEF_MOD("fcpf1", 614, R8A7795_CLK_S0D1),
1538 + DEF_MOD("fcpf0", 615, R8A7795_CLK_S0D1),
1539 +- DEF_MOD("fcpci1", 616, R8A7795_CLK_S2D1), /* ES1.x */
1540 +- DEF_MOD("fcpci0", 617, R8A7795_CLK_S2D1), /* ES1.x */
1541 + DEF_MOD("fcpcs", 619, R8A7795_CLK_S0D1),
1542 +- DEF_MOD("vspd3", 620, R8A7795_CLK_S2D1), /* ES1.x */
1543 + DEF_MOD("vspd2", 621, R8A7795_CLK_S0D2),
1544 + DEF_MOD("vspd1", 622, R8A7795_CLK_S0D2),
1545 + DEF_MOD("vspd0", 623, R8A7795_CLK_S0D2),
1546 + DEF_MOD("vspbc", 624, R8A7795_CLK_S0D1),
1547 + DEF_MOD("vspbd", 626, R8A7795_CLK_S0D1),
1548 +- DEF_MOD("vspi2", 629, R8A7795_CLK_S2D1), /* ES1.x */
1549 + DEF_MOD("vspi1", 630, R8A7795_CLK_S0D1),
1550 + DEF_MOD("vspi0", 631, R8A7795_CLK_S0D1),
1551 + DEF_MOD("ehci3", 700, R8A7795_CLK_S3D2),
1552 +@@ -221,7 +212,6 @@ static struct mssr_mod_clk r8a7795_mod_clks[] __initdata = {
1553 + DEF_MOD("cmm2", 709, R8A7795_CLK_S2D1),
1554 + DEF_MOD("cmm1", 710, R8A7795_CLK_S2D1),
1555 + DEF_MOD("cmm0", 711, R8A7795_CLK_S2D1),
1556 +- DEF_MOD("csi21", 713, R8A7795_CLK_CSI0), /* ES1.x */
1557 + DEF_MOD("csi20", 714, R8A7795_CLK_CSI0),
1558 + DEF_MOD("csi41", 715, R8A7795_CLK_CSI0),
1559 + DEF_MOD("csi40", 716, R8A7795_CLK_CSI0),
1560 +@@ -350,103 +340,26 @@ static const struct rcar_gen3_cpg_pll_config cpg_pll_configs[16] __initconst = {
1561 + { 2, 192, 1, 192, 1, 32, },
1562 + };
1563 +
1564 +-static const struct soc_device_attribute r8a7795es1[] __initconst = {
1565 ++static const struct soc_device_attribute r8a7795_denylist[] __initconst = {
1566 + { .soc_id = "r8a7795", .revision = "ES1.*" },
1567 + { /* sentinel */ }
1568 + };
1569 +
1570 +-
1571 +- /*
1572 +- * Fixups for R-Car H3 ES1.x
1573 +- */
1574 +-
1575 +-static const unsigned int r8a7795es1_mod_nullify[] __initconst = {
1576 +- MOD_CLK_ID(326), /* USB-DMAC3-0 */
1577 +- MOD_CLK_ID(329), /* USB-DMAC3-1 */
1578 +- MOD_CLK_ID(700), /* EHCI/OHCI3 */
1579 +- MOD_CLK_ID(705), /* HS-USB-IF3 */
1580 +-
1581 +-};
1582 +-
1583 +-static const struct mssr_mod_reparent r8a7795es1_mod_reparent[] __initconst = {
1584 +- { MOD_CLK_ID(118), R8A7795_CLK_S2D1 }, /* FDP1-1 */
1585 +- { MOD_CLK_ID(119), R8A7795_CLK_S2D1 }, /* FDP1-0 */
1586 +- { MOD_CLK_ID(121), R8A7795_CLK_S3D2 }, /* TMU4 */
1587 +- { MOD_CLK_ID(217), R8A7795_CLK_S3D1 }, /* SYS-DMAC2 */
1588 +- { MOD_CLK_ID(218), R8A7795_CLK_S3D1 }, /* SYS-DMAC1 */
1589 +- { MOD_CLK_ID(219), R8A7795_CLK_S3D1 }, /* SYS-DMAC0 */
1590 +- { MOD_CLK_ID(408), R8A7795_CLK_S3D1 }, /* INTC-AP */
1591 +- { MOD_CLK_ID(501), R8A7795_CLK_S3D1 }, /* AUDMAC1 */
1592 +- { MOD_CLK_ID(502), R8A7795_CLK_S3D1 }, /* AUDMAC0 */
1593 +- { MOD_CLK_ID(523), R8A7795_CLK_S3D4 }, /* PWM */
1594 +- { MOD_CLK_ID(601), R8A7795_CLK_S2D1 }, /* FCPVD2 */
1595 +- { MOD_CLK_ID(602), R8A7795_CLK_S2D1 }, /* FCPVD1 */
1596 +- { MOD_CLK_ID(603), R8A7795_CLK_S2D1 }, /* FCPVD0 */
1597 +- { MOD_CLK_ID(606), R8A7795_CLK_S2D1 }, /* FCPVB1 */
1598 +- { MOD_CLK_ID(607), R8A7795_CLK_S2D1 }, /* FCPVB0 */
1599 +- { MOD_CLK_ID(610), R8A7795_CLK_S2D1 }, /* FCPVI1 */
1600 +- { MOD_CLK_ID(611), R8A7795_CLK_S2D1 }, /* FCPVI0 */
1601 +- { MOD_CLK_ID(614), R8A7795_CLK_S2D1 }, /* FCPF1 */
1602 +- { MOD_CLK_ID(615), R8A7795_CLK_S2D1 }, /* FCPF0 */
1603 +- { MOD_CLK_ID(619), R8A7795_CLK_S2D1 }, /* FCPCS */
1604 +- { MOD_CLK_ID(621), R8A7795_CLK_S2D1 }, /* VSPD2 */
1605 +- { MOD_CLK_ID(622), R8A7795_CLK_S2D1 }, /* VSPD1 */
1606 +- { MOD_CLK_ID(623), R8A7795_CLK_S2D1 }, /* VSPD0 */
1607 +- { MOD_CLK_ID(624), R8A7795_CLK_S2D1 }, /* VSPBC */
1608 +- { MOD_CLK_ID(626), R8A7795_CLK_S2D1 }, /* VSPBD */
1609 +- { MOD_CLK_ID(630), R8A7795_CLK_S2D1 }, /* VSPI1 */
1610 +- { MOD_CLK_ID(631), R8A7795_CLK_S2D1 }, /* VSPI0 */
1611 +- { MOD_CLK_ID(804), R8A7795_CLK_S2D1 }, /* VIN7 */
1612 +- { MOD_CLK_ID(805), R8A7795_CLK_S2D1 }, /* VIN6 */
1613 +- { MOD_CLK_ID(806), R8A7795_CLK_S2D1 }, /* VIN5 */
1614 +- { MOD_CLK_ID(807), R8A7795_CLK_S2D1 }, /* VIN4 */
1615 +- { MOD_CLK_ID(808), R8A7795_CLK_S2D1 }, /* VIN3 */
1616 +- { MOD_CLK_ID(809), R8A7795_CLK_S2D1 }, /* VIN2 */
1617 +- { MOD_CLK_ID(810), R8A7795_CLK_S2D1 }, /* VIN1 */
1618 +- { MOD_CLK_ID(811), R8A7795_CLK_S2D1 }, /* VIN0 */
1619 +- { MOD_CLK_ID(812), R8A7795_CLK_S3D2 }, /* EAVB-IF */
1620 +- { MOD_CLK_ID(820), R8A7795_CLK_S2D1 }, /* IMR3 */
1621 +- { MOD_CLK_ID(821), R8A7795_CLK_S2D1 }, /* IMR2 */
1622 +- { MOD_CLK_ID(822), R8A7795_CLK_S2D1 }, /* IMR1 */
1623 +- { MOD_CLK_ID(823), R8A7795_CLK_S2D1 }, /* IMR0 */
1624 +- { MOD_CLK_ID(905), R8A7795_CLK_CP }, /* GPIO7 */
1625 +- { MOD_CLK_ID(906), R8A7795_CLK_CP }, /* GPIO6 */
1626 +- { MOD_CLK_ID(907), R8A7795_CLK_CP }, /* GPIO5 */
1627 +- { MOD_CLK_ID(908), R8A7795_CLK_CP }, /* GPIO4 */
1628 +- { MOD_CLK_ID(909), R8A7795_CLK_CP }, /* GPIO3 */
1629 +- { MOD_CLK_ID(910), R8A7795_CLK_CP }, /* GPIO2 */
1630 +- { MOD_CLK_ID(911), R8A7795_CLK_CP }, /* GPIO1 */
1631 +- { MOD_CLK_ID(912), R8A7795_CLK_CP }, /* GPIO0 */
1632 +- { MOD_CLK_ID(918), R8A7795_CLK_S3D2 }, /* I2C6 */
1633 +- { MOD_CLK_ID(919), R8A7795_CLK_S3D2 }, /* I2C5 */
1634 +- { MOD_CLK_ID(927), R8A7795_CLK_S3D2 }, /* I2C4 */
1635 +- { MOD_CLK_ID(928), R8A7795_CLK_S3D2 }, /* I2C3 */
1636 +-};
1637 +-
1638 +-
1639 +- /*
1640 +- * Fixups for R-Car H3 ES2.x
1641 +- */
1642 +-
1643 +-static const unsigned int r8a7795es2_mod_nullify[] __initconst = {
1644 +- MOD_CLK_ID(117), /* FDP1-2 */
1645 +- MOD_CLK_ID(327), /* USB3-IF1 */
1646 +- MOD_CLK_ID(600), /* FCPVD3 */
1647 +- MOD_CLK_ID(609), /* FCPVI2 */
1648 +- MOD_CLK_ID(613), /* FCPF2 */
1649 +- MOD_CLK_ID(616), /* FCPCI1 */
1650 +- MOD_CLK_ID(617), /* FCPCI0 */
1651 +- MOD_CLK_ID(620), /* VSPD3 */
1652 +- MOD_CLK_ID(629), /* VSPI2 */
1653 +- MOD_CLK_ID(713), /* CSI21 */
1654 +-};
1655 +-
1656 + static int __init r8a7795_cpg_mssr_init(struct device *dev)
1657 + {
1658 + const struct rcar_gen3_cpg_pll_config *cpg_pll_config;
1659 + u32 cpg_mode;
1660 + int error;
1661 +
1662 ++ /*
1663 ++ * We panic here to ensure removed SoCs and clk updates are always in
1664 ++ * sync to avoid overclocking damages. The panic can only be seen with
1665 ++ * commandline args 'earlycon keep_bootcon'. But these SoCs were for
1666 ++ * developers only anyhow.
1667 ++ */
1668 ++ if (soc_device_match(r8a7795_denylist))
1669 ++ panic("SoC not supported anymore!\n");
1670 ++
1671 + error = rcar_rst_read_mode_pins(&cpg_mode);
1672 + if (error)
1673 + return error;
1674 +@@ -457,25 +370,6 @@ static int __init r8a7795_cpg_mssr_init(struct device *dev)
1675 + return -EINVAL;
1676 + }
1677 +
1678 +- if (soc_device_match(r8a7795es1)) {
1679 +- cpg_core_nullify_range(r8a7795_core_clks,
1680 +- ARRAY_SIZE(r8a7795_core_clks),
1681 +- R8A7795_CLK_S0D2, R8A7795_CLK_S0D12);
1682 +- mssr_mod_nullify(r8a7795_mod_clks,
1683 +- ARRAY_SIZE(r8a7795_mod_clks),
1684 +- r8a7795es1_mod_nullify,
1685 +- ARRAY_SIZE(r8a7795es1_mod_nullify));
1686 +- mssr_mod_reparent(r8a7795_mod_clks,
1687 +- ARRAY_SIZE(r8a7795_mod_clks),
1688 +- r8a7795es1_mod_reparent,
1689 +- ARRAY_SIZE(r8a7795es1_mod_reparent));
1690 +- } else {
1691 +- mssr_mod_nullify(r8a7795_mod_clks,
1692 +- ARRAY_SIZE(r8a7795_mod_clks),
1693 +- r8a7795es2_mod_nullify,
1694 +- ARRAY_SIZE(r8a7795es2_mod_nullify));
1695 +- }
1696 +-
1697 + return rcar_gen3_cpg_init(cpg_pll_config, CLK_EXTALR, cpg_mode);
1698 + }
1699 +
1700 +diff --git a/drivers/clk/renesas/rcar-gen3-cpg.c b/drivers/clk/renesas/rcar-gen3-cpg.c
1701 +index e668f23c75e7d..b3ef62fa612e3 100644
1702 +--- a/drivers/clk/renesas/rcar-gen3-cpg.c
1703 ++++ b/drivers/clk/renesas/rcar-gen3-cpg.c
1704 +@@ -310,19 +310,10 @@ static unsigned int cpg_clk_extalr __initdata;
1705 + static u32 cpg_mode __initdata;
1706 + static u32 cpg_quirks __initdata;
1707 +
1708 +-#define PLL_ERRATA BIT(0) /* Missing PLL0/2/4 post-divider */
1709 + #define RCKCR_CKSEL BIT(1) /* Manual RCLK parent selection */
1710 +
1711 +
1712 + static const struct soc_device_attribute cpg_quirks_match[] __initconst = {
1713 +- {
1714 +- .soc_id = "r8a7795", .revision = "ES1.0",
1715 +- .data = (void *)(PLL_ERRATA | RCKCR_CKSEL),
1716 +- },
1717 +- {
1718 +- .soc_id = "r8a7795", .revision = "ES1.*",
1719 +- .data = (void *)(RCKCR_CKSEL),
1720 +- },
1721 + {
1722 + .soc_id = "r8a7796", .revision = "ES1.0",
1723 + .data = (void *)(RCKCR_CKSEL),
1724 +@@ -355,9 +346,8 @@ struct clk * __init rcar_gen3_cpg_clk_register(struct device *dev,
1725 + * multiplier when cpufreq changes between normal and boost
1726 + * modes.
1727 + */
1728 +- mult = (cpg_quirks & PLL_ERRATA) ? 4 : 2;
1729 + return cpg_pll_clk_register(core->name, __clk_get_name(parent),
1730 +- base, mult, CPG_PLL0CR, 0);
1731 ++ base, 2, CPG_PLL0CR, 0);
1732 +
1733 + case CLK_TYPE_GEN3_PLL1:
1734 + mult = cpg_pll_config->pll1_mult;
1735 +@@ -370,9 +360,8 @@ struct clk * __init rcar_gen3_cpg_clk_register(struct device *dev,
1736 + * multiplier when cpufreq changes between normal and boost
1737 + * modes.
1738 + */
1739 +- mult = (cpg_quirks & PLL_ERRATA) ? 4 : 2;
1740 + return cpg_pll_clk_register(core->name, __clk_get_name(parent),
1741 +- base, mult, CPG_PLL2CR, 2);
1742 ++ base, 2, CPG_PLL2CR, 2);
1743 +
1744 + case CLK_TYPE_GEN3_PLL3:
1745 + mult = cpg_pll_config->pll3_mult;
1746 +@@ -388,8 +377,6 @@ struct clk * __init rcar_gen3_cpg_clk_register(struct device *dev,
1747 + */
1748 + value = readl(base + CPG_PLL4CR);
1749 + mult = (((value >> 24) & 0x7f) + 1) * 2;
1750 +- if (cpg_quirks & PLL_ERRATA)
1751 +- mult *= 2;
1752 + break;
1753 +
1754 + case CLK_TYPE_GEN3_SDH:
1755 +diff --git a/drivers/clk/renesas/renesas-cpg-mssr.c b/drivers/clk/renesas/renesas-cpg-mssr.c
1756 +index 1a0cdf001b2f2..523fd45231571 100644
1757 +--- a/drivers/clk/renesas/renesas-cpg-mssr.c
1758 ++++ b/drivers/clk/renesas/renesas-cpg-mssr.c
1759 +@@ -1113,19 +1113,6 @@ static int __init cpg_mssr_init(void)
1760 +
1761 + subsys_initcall(cpg_mssr_init);
1762 +
1763 +-void __init cpg_core_nullify_range(struct cpg_core_clk *core_clks,
1764 +- unsigned int num_core_clks,
1765 +- unsigned int first_clk,
1766 +- unsigned int last_clk)
1767 +-{
1768 +- unsigned int i;
1769 +-
1770 +- for (i = 0; i < num_core_clks; i++)
1771 +- if (core_clks[i].id >= first_clk &&
1772 +- core_clks[i].id <= last_clk)
1773 +- core_clks[i].name = NULL;
1774 +-}
1775 +-
1776 + void __init mssr_mod_nullify(struct mssr_mod_clk *mod_clks,
1777 + unsigned int num_mod_clks,
1778 + const unsigned int *clks, unsigned int n)
1779 +@@ -1139,19 +1126,5 @@ void __init mssr_mod_nullify(struct mssr_mod_clk *mod_clks,
1780 + }
1781 + }
1782 +
1783 +-void __init mssr_mod_reparent(struct mssr_mod_clk *mod_clks,
1784 +- unsigned int num_mod_clks,
1785 +- const struct mssr_mod_reparent *clks,
1786 +- unsigned int n)
1787 +-{
1788 +- unsigned int i, j;
1789 +-
1790 +- for (i = 0, j = 0; i < num_mod_clks && j < n; i++)
1791 +- if (mod_clks[i].id == clks[j].clk) {
1792 +- mod_clks[i].parent = clks[j].parent;
1793 +- j++;
1794 +- }
1795 +-}
1796 +-
1797 + MODULE_DESCRIPTION("Renesas CPG/MSSR Driver");
1798 + MODULE_LICENSE("GPL v2");
1799 +diff --git a/drivers/clk/renesas/renesas-cpg-mssr.h b/drivers/clk/renesas/renesas-cpg-mssr.h
1800 +index 1c3c057d17f53..80c5b462924ac 100644
1801 +--- a/drivers/clk/renesas/renesas-cpg-mssr.h
1802 ++++ b/drivers/clk/renesas/renesas-cpg-mssr.h
1803 +@@ -187,21 +187,7 @@ void __init cpg_mssr_early_init(struct device_node *np,
1804 + /*
1805 + * Helpers for fixing up clock tables depending on SoC revision
1806 + */
1807 +-
1808 +-struct mssr_mod_reparent {
1809 +- unsigned int clk, parent;
1810 +-};
1811 +-
1812 +-
1813 +-extern void cpg_core_nullify_range(struct cpg_core_clk *core_clks,
1814 +- unsigned int num_core_clks,
1815 +- unsigned int first_clk,
1816 +- unsigned int last_clk);
1817 + extern void mssr_mod_nullify(struct mssr_mod_clk *mod_clks,
1818 + unsigned int num_mod_clks,
1819 + const unsigned int *clks, unsigned int n);
1820 +-extern void mssr_mod_reparent(struct mssr_mod_clk *mod_clks,
1821 +- unsigned int num_mod_clks,
1822 +- const struct mssr_mod_reparent *clks,
1823 +- unsigned int n);
1824 + #endif
1825 +diff --git a/drivers/gpu/drm/amd/amdgpu/nv.c b/drivers/gpu/drm/amd/amdgpu/nv.c
1826 +index 6853b93ac82e7..df3388e8dec00 100644
1827 +--- a/drivers/gpu/drm/amd/amdgpu/nv.c
1828 ++++ b/drivers/gpu/drm/amd/amdgpu/nv.c
1829 +@@ -393,9 +393,10 @@ static int nv_read_register(struct amdgpu_device *adev, u32 se_num,
1830 + *value = 0;
1831 + for (i = 0; i < ARRAY_SIZE(nv_allowed_read_registers); i++) {
1832 + en = &nv_allowed_read_registers[i];
1833 +- if (adev->reg_offset[en->hwip][en->inst] &&
1834 +- reg_offset != (adev->reg_offset[en->hwip][en->inst][en->seg]
1835 +- + en->reg_offset))
1836 ++ if (!adev->reg_offset[en->hwip][en->inst])
1837 ++ continue;
1838 ++ else if (reg_offset != (adev->reg_offset[en->hwip][en->inst][en->seg]
1839 ++ + en->reg_offset))
1840 + continue;
1841 +
1842 + *value = nv_get_register_value(adev,
1843 +diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
1844 +index 7cd17dda32ceb..2eddd7f6cd41e 100644
1845 +--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
1846 ++++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
1847 +@@ -439,8 +439,9 @@ static int soc15_read_register(struct amdgpu_device *adev, u32 se_num,
1848 + *value = 0;
1849 + for (i = 0; i < ARRAY_SIZE(soc15_allowed_read_registers); i++) {
1850 + en = &soc15_allowed_read_registers[i];
1851 +- if (adev->reg_offset[en->hwip][en->inst] &&
1852 +- reg_offset != (adev->reg_offset[en->hwip][en->inst][en->seg]
1853 ++ if (!adev->reg_offset[en->hwip][en->inst])
1854 ++ continue;
1855 ++ else if (reg_offset != (adev->reg_offset[en->hwip][en->inst][en->seg]
1856 + + en->reg_offset))
1857 + continue;
1858 +
1859 +diff --git a/drivers/gpu/drm/amd/amdgpu/soc21.c b/drivers/gpu/drm/amd/amdgpu/soc21.c
1860 +index 230e15fed755c..9c52af5005253 100644
1861 +--- a/drivers/gpu/drm/amd/amdgpu/soc21.c
1862 ++++ b/drivers/gpu/drm/amd/amdgpu/soc21.c
1863 +@@ -47,19 +47,31 @@
1864 + static const struct amd_ip_funcs soc21_common_ip_funcs;
1865 +
1866 + /* SOC21 */
1867 +-static const struct amdgpu_video_codec_info vcn_4_0_0_video_codecs_encode_array[] =
1868 ++static const struct amdgpu_video_codec_info vcn_4_0_0_video_codecs_encode_array_vcn0[] =
1869 + {
1870 + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 2304, 0)},
1871 + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 4096, 2304, 0)},
1872 + };
1873 +
1874 +-static const struct amdgpu_video_codecs vcn_4_0_0_video_codecs_encode =
1875 ++static const struct amdgpu_video_codec_info vcn_4_0_0_video_codecs_encode_array_vcn1[] =
1876 + {
1877 +- .codec_count = ARRAY_SIZE(vcn_4_0_0_video_codecs_encode_array),
1878 +- .codec_array = vcn_4_0_0_video_codecs_encode_array,
1879 ++ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 2304, 0)},
1880 ++ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 4096, 2304, 0)},
1881 ++};
1882 ++
1883 ++static const struct amdgpu_video_codecs vcn_4_0_0_video_codecs_encode_vcn0 =
1884 ++{
1885 ++ .codec_count = ARRAY_SIZE(vcn_4_0_0_video_codecs_encode_array_vcn0),
1886 ++ .codec_array = vcn_4_0_0_video_codecs_encode_array_vcn0,
1887 ++};
1888 ++
1889 ++static const struct amdgpu_video_codecs vcn_4_0_0_video_codecs_encode_vcn1 =
1890 ++{
1891 ++ .codec_count = ARRAY_SIZE(vcn_4_0_0_video_codecs_encode_array_vcn1),
1892 ++ .codec_array = vcn_4_0_0_video_codecs_encode_array_vcn1,
1893 + };
1894 +
1895 +-static const struct amdgpu_video_codec_info vcn_4_0_0_video_codecs_decode_array[] =
1896 ++static const struct amdgpu_video_codec_info vcn_4_0_0_video_codecs_decode_array_vcn0[] =
1897 + {
1898 + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 52)},
1899 + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)},
1900 +@@ -68,23 +80,47 @@ static const struct amdgpu_video_codec_info vcn_4_0_0_video_codecs_decode_array[
1901 + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_AV1, 8192, 4352, 0)},
1902 + };
1903 +
1904 +-static const struct amdgpu_video_codecs vcn_4_0_0_video_codecs_decode =
1905 ++static const struct amdgpu_video_codec_info vcn_4_0_0_video_codecs_decode_array_vcn1[] =
1906 ++{
1907 ++ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 52)},
1908 ++ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)},
1909 ++ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)},
1910 ++ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 8192, 4352, 0)},
1911 ++};
1912 ++
1913 ++static const struct amdgpu_video_codecs vcn_4_0_0_video_codecs_decode_vcn0 =
1914 ++{
1915 ++ .codec_count = ARRAY_SIZE(vcn_4_0_0_video_codecs_decode_array_vcn0),
1916 ++ .codec_array = vcn_4_0_0_video_codecs_decode_array_vcn0,
1917 ++};
1918 ++
1919 ++static const struct amdgpu_video_codecs vcn_4_0_0_video_codecs_decode_vcn1 =
1920 + {
1921 +- .codec_count = ARRAY_SIZE(vcn_4_0_0_video_codecs_decode_array),
1922 +- .codec_array = vcn_4_0_0_video_codecs_decode_array,
1923 ++ .codec_count = ARRAY_SIZE(vcn_4_0_0_video_codecs_decode_array_vcn1),
1924 ++ .codec_array = vcn_4_0_0_video_codecs_decode_array_vcn1,
1925 + };
1926 +
1927 + static int soc21_query_video_codecs(struct amdgpu_device *adev, bool encode,
1928 + const struct amdgpu_video_codecs **codecs)
1929 + {
1930 +- switch (adev->ip_versions[UVD_HWIP][0]) {
1931 ++ if (adev->vcn.num_vcn_inst == hweight8(adev->vcn.harvest_config))
1932 ++ return -EINVAL;
1933 +
1934 ++ switch (adev->ip_versions[UVD_HWIP][0]) {
1935 + case IP_VERSION(4, 0, 0):
1936 + case IP_VERSION(4, 0, 2):
1937 +- if (encode)
1938 +- *codecs = &vcn_4_0_0_video_codecs_encode;
1939 +- else
1940 +- *codecs = &vcn_4_0_0_video_codecs_decode;
1941 ++ case IP_VERSION(4, 0, 4):
1942 ++ if (adev->vcn.harvest_config & AMDGPU_VCN_HARVEST_VCN0) {
1943 ++ if (encode)
1944 ++ *codecs = &vcn_4_0_0_video_codecs_encode_vcn1;
1945 ++ else
1946 ++ *codecs = &vcn_4_0_0_video_codecs_decode_vcn1;
1947 ++ } else {
1948 ++ if (encode)
1949 ++ *codecs = &vcn_4_0_0_video_codecs_encode_vcn0;
1950 ++ else
1951 ++ *codecs = &vcn_4_0_0_video_codecs_decode_vcn0;
1952 ++ }
1953 + return 0;
1954 + default:
1955 + return -EINVAL;
1956 +@@ -254,9 +290,10 @@ static int soc21_read_register(struct amdgpu_device *adev, u32 se_num,
1957 + *value = 0;
1958 + for (i = 0; i < ARRAY_SIZE(soc21_allowed_read_registers); i++) {
1959 + en = &soc21_allowed_read_registers[i];
1960 +- if (adev->reg_offset[en->hwip][en->inst] &&
1961 +- reg_offset != (adev->reg_offset[en->hwip][en->inst][en->seg]
1962 +- + en->reg_offset))
1963 ++ if (!adev->reg_offset[en->hwip][en->inst])
1964 ++ continue;
1965 ++ else if (reg_offset != (adev->reg_offset[en->hwip][en->inst][en->seg]
1966 ++ + en->reg_offset))
1967 + continue;
1968 +
1969 + *value = soc21_get_register_value(adev,
1970 +diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c b/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c
1971 +index cd4e61bf04939..3ac599f74fea8 100644
1972 +--- a/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c
1973 ++++ b/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c
1974 +@@ -280,7 +280,7 @@ phys_addr_t kfd_get_process_doorbells(struct kfd_process_device *pdd)
1975 + if (!pdd->doorbell_index) {
1976 + int r = kfd_alloc_process_doorbells(pdd->dev,
1977 + &pdd->doorbell_index);
1978 +- if (r)
1979 ++ if (r < 0)
1980 + return 0;
1981 + }
1982 +
1983 +diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c
1984 +index 9919c39f7ea03..d70c64a9fcb2c 100644
1985 +--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c
1986 ++++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c
1987 +@@ -2109,13 +2109,19 @@ static bool dcn32_resource_construct(
1988 + dc->caps.max_cursor_size = 64;
1989 + dc->caps.min_horizontal_blanking_period = 80;
1990 + dc->caps.dmdata_alloc_size = 2048;
1991 +- dc->caps.mall_size_per_mem_channel = 0;
1992 ++ dc->caps.mall_size_per_mem_channel = 4;
1993 + dc->caps.mall_size_total = 0;
1994 + dc->caps.cursor_cache_size = dc->caps.max_cursor_size * dc->caps.max_cursor_size * 8;
1995 +
1996 + dc->caps.cache_line_size = 64;
1997 + dc->caps.cache_num_ways = 16;
1998 +- dc->caps.max_cab_allocation_bytes = 67108864; // 64MB = 1024 * 1024 * 64
1999 ++
2000 ++ /* Calculate the available MALL space */
2001 ++ dc->caps.max_cab_allocation_bytes = dcn32_calc_num_avail_chans_for_mall(
2002 ++ dc, dc->ctx->dc_bios->vram_info.num_chans) *
2003 ++ dc->caps.mall_size_per_mem_channel * 1024 * 1024;
2004 ++ dc->caps.mall_size_total = dc->caps.max_cab_allocation_bytes;
2005 ++
2006 + dc->caps.subvp_fw_processing_delay_us = 15;
2007 + dc->caps.subvp_prefetch_end_to_mall_start_us = 15;
2008 + dc->caps.subvp_swath_height_margin_lines = 16;
2009 +@@ -2545,3 +2551,55 @@ struct pipe_ctx *dcn32_acquire_idle_pipe_for_head_pipe_in_layer(
2010 +
2011 + return idle_pipe;
2012 + }
2013 ++
2014 ++unsigned int dcn32_calc_num_avail_chans_for_mall(struct dc *dc, int num_chans)
2015 ++{
2016 ++ /*
2017 ++ * DCN32 and DCN321 SKUs may have different sizes for MALL
2018 ++ * but we may not be able to access all the MALL space.
2019 ++ * If the num_chans is power of 2, then we can access all
2020 ++ * of the available MALL space. Otherwise, we can only
2021 ++ * access:
2022 ++ *
2023 ++ * max_cab_size_in_bytes = total_cache_size_in_bytes *
2024 ++ * ((2^floor(log2(num_chans)))/num_chans)
2025 ++ *
2026 ++ * Calculating the MALL sizes for all available SKUs, we
2027 ++ * have come up with the follow simplified check.
2028 ++ * - we have max_chans which provides the max MALL size.
2029 ++ * Each chans supports 4MB of MALL so:
2030 ++ *
2031 ++ * total_cache_size_in_bytes = max_chans * 4 MB
2032 ++ *
2033 ++ * - we have avail_chans which shows the number of channels
2034 ++ * we can use if we can't access the entire MALL space.
2035 ++ * It is generally half of max_chans
2036 ++ * - so we use the following checks:
2037 ++ *
2038 ++ * if (num_chans == max_chans), return max_chans
2039 ++ * if (num_chans < max_chans), return avail_chans
2040 ++ *
2041 ++ * - exception is GC_11_0_0 where we can't access max_chans,
2042 ++ * so we define max_avail_chans as the maximum available
2043 ++ * MALL space
2044 ++ *
2045 ++ */
2046 ++ int gc_11_0_0_max_chans = 48;
2047 ++ int gc_11_0_0_max_avail_chans = 32;
2048 ++ int gc_11_0_0_avail_chans = 16;
2049 ++ int gc_11_0_3_max_chans = 16;
2050 ++ int gc_11_0_3_avail_chans = 8;
2051 ++ int gc_11_0_2_max_chans = 8;
2052 ++ int gc_11_0_2_avail_chans = 4;
2053 ++
2054 ++ if (ASICREV_IS_GC_11_0_0(dc->ctx->asic_id.hw_internal_rev)) {
2055 ++ return (num_chans == gc_11_0_0_max_chans) ?
2056 ++ gc_11_0_0_max_avail_chans : gc_11_0_0_avail_chans;
2057 ++ } else if (ASICREV_IS_GC_11_0_2(dc->ctx->asic_id.hw_internal_rev)) {
2058 ++ return (num_chans == gc_11_0_2_max_chans) ?
2059 ++ gc_11_0_2_max_chans : gc_11_0_2_avail_chans;
2060 ++ } else { // if (ASICREV_IS_GC_11_0_3(dc->ctx->asic_id.hw_internal_rev)) {
2061 ++ return (num_chans == gc_11_0_3_max_chans) ?
2062 ++ gc_11_0_3_max_chans : gc_11_0_3_avail_chans;
2063 ++ }
2064 ++}
2065 +diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.h b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.h
2066 +index f76120e67c16a..615244a1f95d5 100644
2067 +--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.h
2068 ++++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.h
2069 +@@ -142,6 +142,10 @@ void dcn32_restore_mall_state(struct dc *dc,
2070 + struct dc_state *context,
2071 + struct mall_temp_config *temp_config);
2072 +
2073 ++bool dcn32_allow_subvp_with_active_margin(struct pipe_ctx *pipe);
2074 ++
2075 ++unsigned int dcn32_calc_num_avail_chans_for_mall(struct dc *dc, int num_chans);
2076 ++
2077 + /* definitions for run time init of reg offsets */
2078 +
2079 + /* CLK SRC */
2080 +diff --git a/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c b/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c
2081 +index 6292ac515d1a4..d320e21680da1 100644
2082 +--- a/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c
2083 ++++ b/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c
2084 +@@ -1697,11 +1697,18 @@ static bool dcn321_resource_construct(
2085 + dc->caps.max_cursor_size = 64;
2086 + dc->caps.min_horizontal_blanking_period = 80;
2087 + dc->caps.dmdata_alloc_size = 2048;
2088 +- dc->caps.mall_size_per_mem_channel = 0;
2089 ++ dc->caps.mall_size_per_mem_channel = 4;
2090 + dc->caps.mall_size_total = 0;
2091 + dc->caps.cursor_cache_size = dc->caps.max_cursor_size * dc->caps.max_cursor_size * 8;
2092 + dc->caps.cache_line_size = 64;
2093 + dc->caps.cache_num_ways = 16;
2094 ++
2095 ++ /* Calculate the available MALL space */
2096 ++ dc->caps.max_cab_allocation_bytes = dcn32_calc_num_avail_chans_for_mall(
2097 ++ dc, dc->ctx->dc_bios->vram_info.num_chans) *
2098 ++ dc->caps.mall_size_per_mem_channel * 1024 * 1024;
2099 ++ dc->caps.mall_size_total = dc->caps.max_cab_allocation_bytes;
2100 ++
2101 + dc->caps.max_cab_allocation_bytes = 33554432; // 32MB = 1024 * 1024 * 32
2102 + dc->caps.subvp_fw_processing_delay_us = 15;
2103 + dc->caps.subvp_prefetch_end_to_mall_start_us = 15;
2104 +diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
2105 +index 04cc96e700981..e22b4b3880af9 100644
2106 +--- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
2107 ++++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
2108 +@@ -676,7 +676,9 @@ static bool dcn32_assign_subvp_pipe(struct dc *dc,
2109 + */
2110 + if (pipe->plane_state && !pipe->top_pipe &&
2111 + pipe->stream->mall_stream_config.type == SUBVP_NONE && refresh_rate < 120 && !pipe->plane_state->address.tmz_surface &&
2112 +- vba->ActiveDRAMClockChangeLatencyMarginPerState[vba->VoltageLevel][vba->maxMpcComb][vba->pipe_plane[pipe_idx]] <= 0) {
2113 ++ (vba->ActiveDRAMClockChangeLatencyMarginPerState[vba->VoltageLevel][vba->maxMpcComb][vba->pipe_plane[pipe_idx]] <= 0 ||
2114 ++ (vba->ActiveDRAMClockChangeLatencyMarginPerState[vba->VoltageLevel][vba->maxMpcComb][vba->pipe_plane[pipe_idx]] > 0 &&
2115 ++ dcn32_allow_subvp_with_active_margin(pipe)))) {
2116 + while (pipe) {
2117 + num_pipes++;
2118 + pipe = pipe->bottom_pipe;
2119 +@@ -2379,8 +2381,11 @@ void dcn32_update_bw_bounding_box_fpu(struct dc *dc, struct clk_bw_params *bw_pa
2120 + }
2121 +
2122 + /* Override from VBIOS for num_chan */
2123 +- if (dc->ctx->dc_bios->vram_info.num_chans)
2124 ++ if (dc->ctx->dc_bios->vram_info.num_chans) {
2125 + dcn3_2_soc.num_chans = dc->ctx->dc_bios->vram_info.num_chans;
2126 ++ dcn3_2_soc.mall_allocated_for_dcn_mbytes = (double)(dcn32_calc_num_avail_chans_for_mall(dc,
2127 ++ dc->ctx->dc_bios->vram_info.num_chans) * dc->caps.mall_size_per_mem_channel);
2128 ++ }
2129 +
2130 + if (dc->ctx->dc_bios->vram_info.dram_channel_width_bytes)
2131 + dcn3_2_soc.dram_channel_width_bytes = dc->ctx->dc_bios->vram_info.dram_channel_width_bytes;
2132 +@@ -2558,3 +2563,30 @@ void dcn32_zero_pipe_dcc_fraction(display_e2e_pipe_params_st *pipes,
2133 + pipes[pipe_cnt].pipe.src.dcc_fraction_of_zs_req_luma = 0;
2134 + pipes[pipe_cnt].pipe.src.dcc_fraction_of_zs_req_chroma = 0;
2135 + }
2136 ++
2137 ++bool dcn32_allow_subvp_with_active_margin(struct pipe_ctx *pipe)
2138 ++{
2139 ++ bool allow = false;
2140 ++ uint32_t refresh_rate = 0;
2141 ++
2142 ++ /* Allow subvp on displays that have active margin for 2560x1440@60hz displays
2143 ++ * only for now. There must be no scaling as well.
2144 ++ *
2145 ++ * For now we only enable on 2560x1440@60hz displays to enable 4K60 + 1440p60 configs
2146 ++ * for p-state switching.
2147 ++ */
2148 ++ if (pipe->stream && pipe->plane_state) {
2149 ++ refresh_rate = (pipe->stream->timing.pix_clk_100hz * 100 +
2150 ++ pipe->stream->timing.v_total * pipe->stream->timing.h_total - 1)
2151 ++ / (double)(pipe->stream->timing.v_total * pipe->stream->timing.h_total);
2152 ++ if (pipe->stream->timing.v_addressable == 1440 &&
2153 ++ pipe->stream->timing.h_addressable == 2560 &&
2154 ++ refresh_rate >= 55 && refresh_rate <= 65 &&
2155 ++ pipe->plane_state->src_rect.height == 1440 &&
2156 ++ pipe->plane_state->src_rect.width == 2560 &&
2157 ++ pipe->plane_state->dst_rect.height == 1440 &&
2158 ++ pipe->plane_state->dst_rect.width == 2560)
2159 ++ allow = true;
2160 ++ }
2161 ++ return allow;
2162 ++}
2163 +diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c
2164 +index 0ea406145c1d7..b80cef70fa60f 100644
2165 +--- a/drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c
2166 ++++ b/drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c
2167 +@@ -534,8 +534,11 @@ void dcn321_update_bw_bounding_box_fpu(struct dc *dc, struct clk_bw_params *bw_p
2168 + }
2169 +
2170 + /* Override from VBIOS for num_chan */
2171 +- if (dc->ctx->dc_bios->vram_info.num_chans)
2172 ++ if (dc->ctx->dc_bios->vram_info.num_chans) {
2173 + dcn3_21_soc.num_chans = dc->ctx->dc_bios->vram_info.num_chans;
2174 ++ dcn3_21_soc.mall_allocated_for_dcn_mbytes = (double)(dcn32_calc_num_avail_chans_for_mall(dc,
2175 ++ dc->ctx->dc_bios->vram_info.num_chans) * dc->caps.mall_size_per_mem_channel);
2176 ++ }
2177 +
2178 + if (dc->ctx->dc_bios->vram_info.dram_channel_width_bytes)
2179 + dcn3_21_soc.dram_channel_width_bytes = dc->ctx->dc_bios->vram_info.dram_channel_width_bytes;
2180 +diff --git a/drivers/gpu/drm/display/drm_hdmi_helper.c b/drivers/gpu/drm/display/drm_hdmi_helper.c
2181 +index 0264abe552788..faf5e9efa7d33 100644
2182 +--- a/drivers/gpu/drm/display/drm_hdmi_helper.c
2183 ++++ b/drivers/gpu/drm/display/drm_hdmi_helper.c
2184 +@@ -44,10 +44,8 @@ int drm_hdmi_infoframe_set_hdr_metadata(struct hdmi_drm_infoframe *frame,
2185 +
2186 + /* Sink EOTF is Bit map while infoframe is absolute values */
2187 + if (!is_eotf_supported(hdr_metadata->hdmi_metadata_type1.eotf,
2188 +- connector->hdr_sink_metadata.hdmi_type1.eotf)) {
2189 +- DRM_DEBUG_KMS("EOTF Not Supported\n");
2190 +- return -EINVAL;
2191 +- }
2192 ++ connector->hdr_sink_metadata.hdmi_type1.eotf))
2193 ++ DRM_DEBUG_KMS("Unknown EOTF %d\n", hdr_metadata->hdmi_metadata_type1.eotf);
2194 +
2195 + err = hdmi_drm_infoframe_init(frame);
2196 + if (err < 0)
2197 +diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c
2198 +index f197f59f6d99b..c0dc5858a7237 100644
2199 +--- a/drivers/gpu/drm/drm_atomic.c
2200 ++++ b/drivers/gpu/drm/drm_atomic.c
2201 +@@ -1070,6 +1070,7 @@ static void drm_atomic_connector_print_state(struct drm_printer *p,
2202 + drm_printf(p, "connector[%u]: %s\n", connector->base.id, connector->name);
2203 + drm_printf(p, "\tcrtc=%s\n", state->crtc ? state->crtc->name : "(null)");
2204 + drm_printf(p, "\tself_refresh_aware=%d\n", state->self_refresh_aware);
2205 ++ drm_printf(p, "\tmax_requested_bpc=%d\n", state->max_requested_bpc);
2206 +
2207 + if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
2208 + if (state->writeback_job && state->writeback_job->fb)
2209 +diff --git a/drivers/gpu/drm/i915/display/icl_dsi.c b/drivers/gpu/drm/i915/display/icl_dsi.c
2210 +index ed4d93942dbd2..ecd6c5c3f4ded 100644
2211 +--- a/drivers/gpu/drm/i915/display/icl_dsi.c
2212 ++++ b/drivers/gpu/drm/i915/display/icl_dsi.c
2213 +@@ -2053,7 +2053,8 @@ void icl_dsi_init(struct drm_i915_private *dev_priv)
2214 + /* attach connector to encoder */
2215 + intel_connector_attach_encoder(intel_connector, encoder);
2216 +
2217 +- intel_bios_init_panel(dev_priv, &intel_connector->panel, NULL, NULL);
2218 ++ encoder->devdata = intel_bios_encoder_data_lookup(dev_priv, port);
2219 ++ intel_bios_init_panel_late(dev_priv, &intel_connector->panel, encoder->devdata, NULL);
2220 +
2221 + mutex_lock(&dev->mode_config.mutex);
2222 + intel_panel_add_vbt_lfp_fixed_mode(intel_connector);
2223 +diff --git a/drivers/gpu/drm/i915/display/intel_bios.c b/drivers/gpu/drm/i915/display/intel_bios.c
2224 +index 178a8cbb75838..a70b7061742a8 100644
2225 +--- a/drivers/gpu/drm/i915/display/intel_bios.c
2226 ++++ b/drivers/gpu/drm/i915/display/intel_bios.c
2227 +@@ -620,14 +620,14 @@ static void dump_pnp_id(struct drm_i915_private *i915,
2228 +
2229 + static int opregion_get_panel_type(struct drm_i915_private *i915,
2230 + const struct intel_bios_encoder_data *devdata,
2231 +- const struct edid *edid)
2232 ++ const struct edid *edid, bool use_fallback)
2233 + {
2234 + return intel_opregion_get_panel_type(i915);
2235 + }
2236 +
2237 + static int vbt_get_panel_type(struct drm_i915_private *i915,
2238 + const struct intel_bios_encoder_data *devdata,
2239 +- const struct edid *edid)
2240 ++ const struct edid *edid, bool use_fallback)
2241 + {
2242 + const struct bdb_lvds_options *lvds_options;
2243 +
2244 +@@ -652,7 +652,7 @@ static int vbt_get_panel_type(struct drm_i915_private *i915,
2245 +
2246 + static int pnpid_get_panel_type(struct drm_i915_private *i915,
2247 + const struct intel_bios_encoder_data *devdata,
2248 +- const struct edid *edid)
2249 ++ const struct edid *edid, bool use_fallback)
2250 + {
2251 + const struct bdb_lvds_lfp_data *data;
2252 + const struct bdb_lvds_lfp_data_ptrs *ptrs;
2253 +@@ -701,9 +701,9 @@ static int pnpid_get_panel_type(struct drm_i915_private *i915,
2254 +
2255 + static int fallback_get_panel_type(struct drm_i915_private *i915,
2256 + const struct intel_bios_encoder_data *devdata,
2257 +- const struct edid *edid)
2258 ++ const struct edid *edid, bool use_fallback)
2259 + {
2260 +- return 0;
2261 ++ return use_fallback ? 0 : -1;
2262 + }
2263 +
2264 + enum panel_type {
2265 +@@ -715,13 +715,13 @@ enum panel_type {
2266 +
2267 + static int get_panel_type(struct drm_i915_private *i915,
2268 + const struct intel_bios_encoder_data *devdata,
2269 +- const struct edid *edid)
2270 ++ const struct edid *edid, bool use_fallback)
2271 + {
2272 + struct {
2273 + const char *name;
2274 + int (*get_panel_type)(struct drm_i915_private *i915,
2275 + const struct intel_bios_encoder_data *devdata,
2276 +- const struct edid *edid);
2277 ++ const struct edid *edid, bool use_fallback);
2278 + int panel_type;
2279 + } panel_types[] = {
2280 + [PANEL_TYPE_OPREGION] = {
2281 +@@ -744,7 +744,8 @@ static int get_panel_type(struct drm_i915_private *i915,
2282 + int i;
2283 +
2284 + for (i = 0; i < ARRAY_SIZE(panel_types); i++) {
2285 +- panel_types[i].panel_type = panel_types[i].get_panel_type(i915, devdata, edid);
2286 ++ panel_types[i].panel_type = panel_types[i].get_panel_type(i915, devdata,
2287 ++ edid, use_fallback);
2288 +
2289 + drm_WARN_ON(&i915->drm, panel_types[i].panel_type > 0xf &&
2290 + panel_types[i].panel_type != 0xff);
2291 +@@ -2592,6 +2593,12 @@ intel_bios_encoder_supports_edp(const struct intel_bios_encoder_data *devdata)
2292 + devdata->child.device_type & DEVICE_TYPE_INTERNAL_CONNECTOR;
2293 + }
2294 +
2295 ++static bool
2296 ++intel_bios_encoder_supports_dsi(const struct intel_bios_encoder_data *devdata)
2297 ++{
2298 ++ return devdata->child.device_type & DEVICE_TYPE_MIPI_OUTPUT;
2299 ++}
2300 ++
2301 + static int _intel_bios_hdmi_level_shift(const struct intel_bios_encoder_data *devdata)
2302 + {
2303 + if (!devdata || devdata->i915->display.vbt.version < 158)
2304 +@@ -2642,7 +2649,7 @@ static void print_ddi_port(const struct intel_bios_encoder_data *devdata,
2305 + {
2306 + struct drm_i915_private *i915 = devdata->i915;
2307 + const struct child_device_config *child = &devdata->child;
2308 +- bool is_dvi, is_hdmi, is_dp, is_edp, is_crt, supports_typec_usb, supports_tbt;
2309 ++ bool is_dvi, is_hdmi, is_dp, is_edp, is_dsi, is_crt, supports_typec_usb, supports_tbt;
2310 + int dp_boost_level, dp_max_link_rate, hdmi_boost_level, hdmi_level_shift, max_tmds_clock;
2311 +
2312 + is_dvi = intel_bios_encoder_supports_dvi(devdata);
2313 +@@ -2650,13 +2657,14 @@ static void print_ddi_port(const struct intel_bios_encoder_data *devdata,
2314 + is_crt = intel_bios_encoder_supports_crt(devdata);
2315 + is_hdmi = intel_bios_encoder_supports_hdmi(devdata);
2316 + is_edp = intel_bios_encoder_supports_edp(devdata);
2317 ++ is_dsi = intel_bios_encoder_supports_dsi(devdata);
2318 +
2319 + supports_typec_usb = intel_bios_encoder_supports_typec_usb(devdata);
2320 + supports_tbt = intel_bios_encoder_supports_tbt(devdata);
2321 +
2322 + drm_dbg_kms(&i915->drm,
2323 +- "Port %c VBT info: CRT:%d DVI:%d HDMI:%d DP:%d eDP:%d LSPCON:%d USB-Type-C:%d TBT:%d DSC:%d\n",
2324 +- port_name(port), is_crt, is_dvi, is_hdmi, is_dp, is_edp,
2325 ++ "Port %c VBT info: CRT:%d DVI:%d HDMI:%d DP:%d eDP:%d DSI:%d LSPCON:%d USB-Type-C:%d TBT:%d DSC:%d\n",
2326 ++ port_name(port), is_crt, is_dvi, is_hdmi, is_dp, is_edp, is_dsi,
2327 + HAS_LSPCON(i915) && child->lspcon,
2328 + supports_typec_usb, supports_tbt,
2329 + devdata->dsc != NULL);
2330 +@@ -2701,6 +2709,8 @@ static void parse_ddi_port(struct intel_bios_encoder_data *devdata)
2331 + enum port port;
2332 +
2333 + port = dvo_port_to_port(i915, child->dvo_port);
2334 ++ if (port == PORT_NONE && DISPLAY_VER(i915) >= 11)
2335 ++ port = dsi_dvo_port_to_port(i915, child->dvo_port);
2336 + if (port == PORT_NONE)
2337 + return;
2338 +
2339 +@@ -3191,14 +3201,26 @@ out:
2340 + kfree(oprom_vbt);
2341 + }
2342 +
2343 +-void intel_bios_init_panel(struct drm_i915_private *i915,
2344 +- struct intel_panel *panel,
2345 +- const struct intel_bios_encoder_data *devdata,
2346 +- const struct edid *edid)
2347 ++static void intel_bios_init_panel(struct drm_i915_private *i915,
2348 ++ struct intel_panel *panel,
2349 ++ const struct intel_bios_encoder_data *devdata,
2350 ++ const struct edid *edid,
2351 ++ bool use_fallback)
2352 + {
2353 +- init_vbt_panel_defaults(panel);
2354 ++ /* already have it? */
2355 ++ if (panel->vbt.panel_type >= 0) {
2356 ++ drm_WARN_ON(&i915->drm, !use_fallback);
2357 ++ return;
2358 ++ }
2359 +
2360 +- panel->vbt.panel_type = get_panel_type(i915, devdata, edid);
2361 ++ panel->vbt.panel_type = get_panel_type(i915, devdata,
2362 ++ edid, use_fallback);
2363 ++ if (panel->vbt.panel_type < 0) {
2364 ++ drm_WARN_ON(&i915->drm, use_fallback);
2365 ++ return;
2366 ++ }
2367 ++
2368 ++ init_vbt_panel_defaults(panel);
2369 +
2370 + parse_panel_options(i915, panel);
2371 + parse_generic_dtd(i915, panel);
2372 +@@ -3213,6 +3235,21 @@ void intel_bios_init_panel(struct drm_i915_private *i915,
2373 + parse_mipi_sequence(i915, panel);
2374 + }
2375 +
2376 ++void intel_bios_init_panel_early(struct drm_i915_private *i915,
2377 ++ struct intel_panel *panel,
2378 ++ const struct intel_bios_encoder_data *devdata)
2379 ++{
2380 ++ intel_bios_init_panel(i915, panel, devdata, NULL, false);
2381 ++}
2382 ++
2383 ++void intel_bios_init_panel_late(struct drm_i915_private *i915,
2384 ++ struct intel_panel *panel,
2385 ++ const struct intel_bios_encoder_data *devdata,
2386 ++ const struct edid *edid)
2387 ++{
2388 ++ intel_bios_init_panel(i915, panel, devdata, edid, true);
2389 ++}
2390 ++
2391 + /**
2392 + * intel_bios_driver_remove - Free any resources allocated by intel_bios_init()
2393 + * @i915: i915 device instance
2394 +diff --git a/drivers/gpu/drm/i915/display/intel_bios.h b/drivers/gpu/drm/i915/display/intel_bios.h
2395 +index e375405a78284..ff1fdd2e0c1c5 100644
2396 +--- a/drivers/gpu/drm/i915/display/intel_bios.h
2397 ++++ b/drivers/gpu/drm/i915/display/intel_bios.h
2398 +@@ -232,10 +232,13 @@ struct mipi_pps_data {
2399 + } __packed;
2400 +
2401 + void intel_bios_init(struct drm_i915_private *dev_priv);
2402 +-void intel_bios_init_panel(struct drm_i915_private *dev_priv,
2403 +- struct intel_panel *panel,
2404 +- const struct intel_bios_encoder_data *devdata,
2405 +- const struct edid *edid);
2406 ++void intel_bios_init_panel_early(struct drm_i915_private *dev_priv,
2407 ++ struct intel_panel *panel,
2408 ++ const struct intel_bios_encoder_data *devdata);
2409 ++void intel_bios_init_panel_late(struct drm_i915_private *dev_priv,
2410 ++ struct intel_panel *panel,
2411 ++ const struct intel_bios_encoder_data *devdata,
2412 ++ const struct edid *edid);
2413 + void intel_bios_fini_panel(struct intel_panel *panel);
2414 + void intel_bios_driver_remove(struct drm_i915_private *dev_priv);
2415 + bool intel_bios_is_valid_vbt(const void *buf, size_t size);
2416 +diff --git a/drivers/gpu/drm/i915/display/intel_connector.c b/drivers/gpu/drm/i915/display/intel_connector.c
2417 +index 6d5cbeb8df4da..8bb296f3d6252 100644
2418 +--- a/drivers/gpu/drm/i915/display/intel_connector.c
2419 ++++ b/drivers/gpu/drm/i915/display/intel_connector.c
2420 +@@ -54,7 +54,7 @@ int intel_connector_init(struct intel_connector *connector)
2421 + __drm_atomic_helper_connector_reset(&connector->base,
2422 + &conn_state->base);
2423 +
2424 +- INIT_LIST_HEAD(&connector->panel.fixed_modes);
2425 ++ intel_panel_init_alloc(connector);
2426 +
2427 + return 0;
2428 + }
2429 +diff --git a/drivers/gpu/drm/i915/display/intel_display_types.h b/drivers/gpu/drm/i915/display/intel_display_types.h
2430 +index 298d00a11f473..135dbcab62b28 100644
2431 +--- a/drivers/gpu/drm/i915/display/intel_display_types.h
2432 ++++ b/drivers/gpu/drm/i915/display/intel_display_types.h
2433 +@@ -291,7 +291,7 @@ struct intel_vbt_panel_data {
2434 + struct drm_display_mode *sdvo_lvds_vbt_mode; /* if any */
2435 +
2436 + /* Feature bits */
2437 +- unsigned int panel_type:4;
2438 ++ int panel_type;
2439 + unsigned int lvds_dither:1;
2440 + unsigned int bios_lvds_val; /* initial [PCH_]LVDS reg val in VBIOS */
2441 +
2442 +diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c
2443 +index b94bcceeff705..2e09899f2f927 100644
2444 +--- a/drivers/gpu/drm/i915/display/intel_dp.c
2445 ++++ b/drivers/gpu/drm/i915/display/intel_dp.c
2446 +@@ -5179,6 +5179,9 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
2447 + return false;
2448 + }
2449 +
2450 ++ intel_bios_init_panel_early(dev_priv, &intel_connector->panel,
2451 ++ encoder->devdata);
2452 ++
2453 + intel_pps_init(intel_dp);
2454 +
2455 + /* Cache DPCD and EDID for edp. */
2456 +@@ -5213,8 +5216,8 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
2457 + }
2458 + intel_connector->edid = edid;
2459 +
2460 +- intel_bios_init_panel(dev_priv, &intel_connector->panel,
2461 +- encoder->devdata, IS_ERR(edid) ? NULL : edid);
2462 ++ intel_bios_init_panel_late(dev_priv, &intel_connector->panel,
2463 ++ encoder->devdata, IS_ERR(edid) ? NULL : edid);
2464 +
2465 + intel_panel_add_edid_fixed_modes(intel_connector, true);
2466 +
2467 +diff --git a/drivers/gpu/drm/i915/display/intel_lvds.c b/drivers/gpu/drm/i915/display/intel_lvds.c
2468 +index e5352239b2a2f..a749a5a66d624 100644
2469 +--- a/drivers/gpu/drm/i915/display/intel_lvds.c
2470 ++++ b/drivers/gpu/drm/i915/display/intel_lvds.c
2471 +@@ -967,8 +967,8 @@ void intel_lvds_init(struct drm_i915_private *dev_priv)
2472 + }
2473 + intel_connector->edid = edid;
2474 +
2475 +- intel_bios_init_panel(dev_priv, &intel_connector->panel, NULL,
2476 +- IS_ERR(edid) ? NULL : edid);
2477 ++ intel_bios_init_panel_late(dev_priv, &intel_connector->panel, NULL,
2478 ++ IS_ERR(edid) ? NULL : edid);
2479 +
2480 + /* Try EDID first */
2481 + intel_panel_add_edid_fixed_modes(intel_connector,
2482 +diff --git a/drivers/gpu/drm/i915/display/intel_panel.c b/drivers/gpu/drm/i915/display/intel_panel.c
2483 +index f72f4646c0d70..b50db0dd20fc5 100644
2484 +--- a/drivers/gpu/drm/i915/display/intel_panel.c
2485 ++++ b/drivers/gpu/drm/i915/display/intel_panel.c
2486 +@@ -648,6 +648,14 @@ intel_panel_mode_valid(struct intel_connector *connector,
2487 + return MODE_OK;
2488 + }
2489 +
2490 ++void intel_panel_init_alloc(struct intel_connector *connector)
2491 ++{
2492 ++ struct intel_panel *panel = &connector->panel;
2493 ++
2494 ++ connector->panel.vbt.panel_type = -1;
2495 ++ INIT_LIST_HEAD(&panel->fixed_modes);
2496 ++}
2497 ++
2498 + int intel_panel_init(struct intel_connector *connector)
2499 + {
2500 + struct intel_panel *panel = &connector->panel;
2501 +diff --git a/drivers/gpu/drm/i915/display/intel_panel.h b/drivers/gpu/drm/i915/display/intel_panel.h
2502 +index 5c5b5b7f95b6c..4b51e1c51da62 100644
2503 +--- a/drivers/gpu/drm/i915/display/intel_panel.h
2504 ++++ b/drivers/gpu/drm/i915/display/intel_panel.h
2505 +@@ -18,6 +18,7 @@ struct intel_connector;
2506 + struct intel_crtc_state;
2507 + struct intel_encoder;
2508 +
2509 ++void intel_panel_init_alloc(struct intel_connector *connector);
2510 + int intel_panel_init(struct intel_connector *connector);
2511 + void intel_panel_fini(struct intel_connector *connector);
2512 + enum drm_connector_status
2513 +diff --git a/drivers/gpu/drm/i915/display/intel_sdvo.c b/drivers/gpu/drm/i915/display/intel_sdvo.c
2514 +index 774c1dc31a521..a15e09b551708 100644
2515 +--- a/drivers/gpu/drm/i915/display/intel_sdvo.c
2516 ++++ b/drivers/gpu/drm/i915/display/intel_sdvo.c
2517 +@@ -2891,7 +2891,7 @@ intel_sdvo_lvds_init(struct intel_sdvo *intel_sdvo, int device)
2518 + if (!intel_sdvo_create_enhance_property(intel_sdvo, intel_sdvo_connector))
2519 + goto err;
2520 +
2521 +- intel_bios_init_panel(i915, &intel_connector->panel, NULL, NULL);
2522 ++ intel_bios_init_panel_late(i915, &intel_connector->panel, NULL, NULL);
2523 +
2524 + /*
2525 + * Fetch modes from VBT. For SDVO prefer the VBT mode since some
2526 +diff --git a/drivers/gpu/drm/i915/display/vlv_dsi.c b/drivers/gpu/drm/i915/display/vlv_dsi.c
2527 +index b3f5ca280ef26..90e3e41095b34 100644
2528 +--- a/drivers/gpu/drm/i915/display/vlv_dsi.c
2529 ++++ b/drivers/gpu/drm/i915/display/vlv_dsi.c
2530 +@@ -1925,7 +1925,7 @@ void vlv_dsi_init(struct drm_i915_private *dev_priv)
2531 +
2532 + intel_dsi->panel_power_off_time = ktime_get_boottime();
2533 +
2534 +- intel_bios_init_panel(dev_priv, &intel_connector->panel, NULL, NULL);
2535 ++ intel_bios_init_panel_late(dev_priv, &intel_connector->panel, NULL, NULL);
2536 +
2537 + if (intel_connector->panel.vbt.dsi.config->dual_link)
2538 + intel_dsi->ports = BIT(PORT_A) | BIT(PORT_C);
2539 +diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
2540 +index 3dcec7acb3840..4f0dbeebb79fb 100644
2541 +--- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
2542 ++++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
2543 +@@ -151,8 +151,8 @@ static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
2544 + OUT_RING(ring, 1);
2545 +
2546 + /* Enable local preemption for finegrain preemption */
2547 +- OUT_PKT7(ring, CP_PREEMPT_ENABLE_GLOBAL, 1);
2548 +- OUT_RING(ring, 0x02);
2549 ++ OUT_PKT7(ring, CP_PREEMPT_ENABLE_LOCAL, 1);
2550 ++ OUT_RING(ring, 0x1);
2551 +
2552 + /* Allow CP_CONTEXT_SWITCH_YIELD packets in the IB2 */
2553 + OUT_PKT7(ring, CP_YIELD_ENABLE, 1);
2554 +@@ -808,7 +808,7 @@ static int a5xx_hw_init(struct msm_gpu *gpu)
2555 + gpu_write(gpu, REG_A5XX_RBBM_AHB_CNTL2, 0x0000003F);
2556 +
2557 + /* Set the highest bank bit */
2558 +- if (adreno_is_a540(adreno_gpu))
2559 ++ if (adreno_is_a540(adreno_gpu) || adreno_is_a530(adreno_gpu))
2560 + regbit = 2;
2561 + else
2562 + regbit = 1;
2563 +diff --git a/drivers/gpu/drm/msm/adreno/a5xx_preempt.c b/drivers/gpu/drm/msm/adreno/a5xx_preempt.c
2564 +index 8abc9a2b114a2..e0eef47dae632 100644
2565 +--- a/drivers/gpu/drm/msm/adreno/a5xx_preempt.c
2566 ++++ b/drivers/gpu/drm/msm/adreno/a5xx_preempt.c
2567 +@@ -63,7 +63,7 @@ static struct msm_ringbuffer *get_next_ring(struct msm_gpu *gpu)
2568 + struct msm_ringbuffer *ring = gpu->rb[i];
2569 +
2570 + spin_lock_irqsave(&ring->preempt_lock, flags);
2571 +- empty = (get_wptr(ring) == ring->memptrs->rptr);
2572 ++ empty = (get_wptr(ring) == gpu->funcs->get_rptr(gpu, ring));
2573 + spin_unlock_irqrestore(&ring->preempt_lock, flags);
2574 +
2575 + if (!empty)
2576 +@@ -208,6 +208,7 @@ void a5xx_preempt_hw_init(struct msm_gpu *gpu)
2577 + a5xx_gpu->preempt[i]->wptr = 0;
2578 + a5xx_gpu->preempt[i]->rptr = 0;
2579 + a5xx_gpu->preempt[i]->rbase = gpu->rb[i]->iova;
2580 ++ a5xx_gpu->preempt[i]->rptr_addr = shadowptr(a5xx_gpu, gpu->rb[i]);
2581 + }
2582 +
2583 + /* Write a 0 to signal that we aren't switching pagetables */
2584 +@@ -259,7 +260,6 @@ static int preempt_init_ring(struct a5xx_gpu *a5xx_gpu,
2585 + ptr->data = 0;
2586 + ptr->cntl = MSM_GPU_RB_CNTL_DEFAULT | AXXX_CP_RB_CNTL_NO_UPDATE;
2587 +
2588 +- ptr->rptr_addr = shadowptr(a5xx_gpu, ring);
2589 + ptr->counter = counters_iova;
2590 +
2591 + return 0;
2592 +diff --git a/drivers/gpu/drm/msm/adreno/adreno_device.c b/drivers/gpu/drm/msm/adreno/adreno_device.c
2593 +index 628806423f7d2..c5c4c93b3689c 100644
2594 +--- a/drivers/gpu/drm/msm/adreno/adreno_device.c
2595 ++++ b/drivers/gpu/drm/msm/adreno/adreno_device.c
2596 +@@ -551,13 +551,15 @@ static int adreno_bind(struct device *dev, struct device *master, void *data)
2597 + return 0;
2598 + }
2599 +
2600 ++static int adreno_system_suspend(struct device *dev);
2601 + static void adreno_unbind(struct device *dev, struct device *master,
2602 + void *data)
2603 + {
2604 + struct msm_drm_private *priv = dev_get_drvdata(master);
2605 + struct msm_gpu *gpu = dev_to_gpu(dev);
2606 +
2607 +- pm_runtime_force_suspend(dev);
2608 ++ if (pm_runtime_enabled(dev))
2609 ++ WARN_ON_ONCE(adreno_system_suspend(dev));
2610 + gpu->funcs->destroy(gpu);
2611 +
2612 + priv->gpu_pdev = NULL;
2613 +@@ -609,7 +611,7 @@ static int adreno_remove(struct platform_device *pdev)
2614 +
2615 + static void adreno_shutdown(struct platform_device *pdev)
2616 + {
2617 +- pm_runtime_force_suspend(&pdev->dev);
2618 ++ WARN_ON_ONCE(adreno_system_suspend(&pdev->dev));
2619 + }
2620 +
2621 + static const struct of_device_id dt_match[] = {
2622 +diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
2623 +index 365738f40976a..41c93a18d5cb3 100644
2624 +--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
2625 ++++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
2626 +@@ -12,11 +12,15 @@
2627 + #include "dpu_hw_catalog.h"
2628 + #include "dpu_kms.h"
2629 +
2630 +-#define VIG_MASK \
2631 ++#define VIG_BASE_MASK \
2632 + (BIT(DPU_SSPP_SRC) | BIT(DPU_SSPP_QOS) |\
2633 +- BIT(DPU_SSPP_CSC_10BIT) | BIT(DPU_SSPP_CDP) |\
2634 ++ BIT(DPU_SSPP_CDP) |\
2635 + BIT(DPU_SSPP_TS_PREFILL) | BIT(DPU_SSPP_EXCL_RECT))
2636 +
2637 ++#define VIG_MASK \
2638 ++ (VIG_BASE_MASK | \
2639 ++ BIT(DPU_SSPP_CSC_10BIT))
2640 ++
2641 + #define VIG_MSM8998_MASK \
2642 + (VIG_MASK | BIT(DPU_SSPP_SCALER_QSEED3))
2643 +
2644 +@@ -29,7 +33,7 @@
2645 + #define VIG_SM8250_MASK \
2646 + (VIG_MASK | BIT(DPU_SSPP_QOS_8LVL) | BIT(DPU_SSPP_SCALER_QSEED3LITE))
2647 +
2648 +-#define VIG_QCM2290_MASK (VIG_MASK | BIT(DPU_SSPP_QOS_8LVL))
2649 ++#define VIG_QCM2290_MASK (VIG_BASE_MASK | BIT(DPU_SSPP_QOS_8LVL))
2650 +
2651 + #define DMA_MSM8998_MASK \
2652 + (BIT(DPU_SSPP_SRC) | BIT(DPU_SSPP_QOS) |\
2653 +@@ -51,7 +55,7 @@
2654 + (DMA_MSM8998_MASK | BIT(DPU_SSPP_CURSOR))
2655 +
2656 + #define MIXER_MSM8998_MASK \
2657 +- (BIT(DPU_MIXER_SOURCESPLIT) | BIT(DPU_DIM_LAYER))
2658 ++ (BIT(DPU_MIXER_SOURCESPLIT))
2659 +
2660 + #define MIXER_SDM845_MASK \
2661 + (BIT(DPU_MIXER_SOURCESPLIT) | BIT(DPU_DIM_LAYER) | BIT(DPU_MIXER_COMBINED_ALPHA))
2662 +@@ -283,7 +287,6 @@ static const struct dpu_caps qcm2290_dpu_caps = {
2663 + .max_mixer_width = DEFAULT_DPU_OUTPUT_LINE_WIDTH,
2664 + .max_mixer_blendstages = 0x4,
2665 + .smart_dma_rev = DPU_SSPP_SMART_DMA_V2,
2666 +- .ubwc_version = DPU_HW_UBWC_VER_20,
2667 + .has_dim_layer = true,
2668 + .has_idle_pc = true,
2669 + .max_linewidth = 2160,
2670 +@@ -604,19 +607,19 @@ static const struct dpu_ctl_cfg sdm845_ctl[] = {
2671 + static const struct dpu_ctl_cfg sc7180_ctl[] = {
2672 + {
2673 + .name = "ctl_0", .id = CTL_0,
2674 +- .base = 0x1000, .len = 0xE4,
2675 ++ .base = 0x1000, .len = 0x1dc,
2676 + .features = BIT(DPU_CTL_ACTIVE_CFG),
2677 + .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 9),
2678 + },
2679 + {
2680 + .name = "ctl_1", .id = CTL_1,
2681 +- .base = 0x1200, .len = 0xE4,
2682 ++ .base = 0x1200, .len = 0x1dc,
2683 + .features = BIT(DPU_CTL_ACTIVE_CFG),
2684 + .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 10),
2685 + },
2686 + {
2687 + .name = "ctl_2", .id = CTL_2,
2688 +- .base = 0x1400, .len = 0xE4,
2689 ++ .base = 0x1400, .len = 0x1dc,
2690 + .features = BIT(DPU_CTL_ACTIVE_CFG),
2691 + .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 11),
2692 + },
2693 +@@ -810,9 +813,9 @@ static const struct dpu_sspp_cfg msm8998_sspp[] = {
2694 + SSPP_BLK("sspp_9", SSPP_DMA1, 0x26000, DMA_MSM8998_MASK,
2695 + sdm845_dma_sblk_1, 5, SSPP_TYPE_DMA, DPU_CLK_CTRL_DMA1),
2696 + SSPP_BLK("sspp_10", SSPP_DMA2, 0x28000, DMA_CURSOR_MSM8998_MASK,
2697 +- sdm845_dma_sblk_2, 9, SSPP_TYPE_DMA, DPU_CLK_CTRL_CURSOR0),
2698 ++ sdm845_dma_sblk_2, 9, SSPP_TYPE_DMA, DPU_CLK_CTRL_DMA2),
2699 + SSPP_BLK("sspp_11", SSPP_DMA3, 0x2a000, DMA_CURSOR_MSM8998_MASK,
2700 +- sdm845_dma_sblk_3, 13, SSPP_TYPE_DMA, DPU_CLK_CTRL_CURSOR1),
2701 ++ sdm845_dma_sblk_3, 13, SSPP_TYPE_DMA, DPU_CLK_CTRL_DMA3),
2702 + };
2703 +
2704 + static const struct dpu_sspp_cfg sdm845_sspp[] = {
2705 +@@ -1918,8 +1921,6 @@ static const struct dpu_mdss_cfg qcm2290_dpu_cfg = {
2706 + .intf = qcm2290_intf,
2707 + .vbif_count = ARRAY_SIZE(sdm845_vbif),
2708 + .vbif = sdm845_vbif,
2709 +- .reg_dma_count = 1,
2710 +- .dma_cfg = &sdm845_regdma,
2711 + .perf = &qcm2290_perf_data,
2712 + .mdss_irqs = IRQ_SC7180_MASK,
2713 + };
2714 +diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c
2715 +index 7ada957adbbb8..58abf5fe97e20 100644
2716 +--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c
2717 ++++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c
2718 +@@ -572,6 +572,8 @@ void dpu_rm_release(struct dpu_global_state *global_state,
2719 + ARRAY_SIZE(global_state->ctl_to_enc_id), enc->base.id);
2720 + _dpu_rm_clear_mapping(global_state->dsc_to_enc_id,
2721 + ARRAY_SIZE(global_state->dsc_to_enc_id), enc->base.id);
2722 ++ _dpu_rm_clear_mapping(global_state->dspp_to_enc_id,
2723 ++ ARRAY_SIZE(global_state->dspp_to_enc_id), enc->base.id);
2724 + }
2725 +
2726 + int dpu_rm_reserve(
2727 +diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c
2728 +index 7c2cc1262c05d..d8c9d184190bb 100644
2729 +--- a/drivers/gpu/drm/msm/msm_gem_submit.c
2730 ++++ b/drivers/gpu/drm/msm/msm_gem_submit.c
2731 +@@ -627,8 +627,8 @@ static struct msm_submit_post_dep *msm_parse_post_deps(struct drm_device *dev,
2732 + int ret = 0;
2733 + uint32_t i, j;
2734 +
2735 +- post_deps = kmalloc_array(nr_syncobjs, sizeof(*post_deps),
2736 +- GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY);
2737 ++ post_deps = kcalloc(nr_syncobjs, sizeof(*post_deps),
2738 ++ GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY);
2739 + if (!post_deps)
2740 + return ERR_PTR(-ENOMEM);
2741 +
2742 +@@ -643,7 +643,6 @@ static struct msm_submit_post_dep *msm_parse_post_deps(struct drm_device *dev,
2743 + }
2744 +
2745 + post_deps[i].point = syncobj_desc.point;
2746 +- post_deps[i].chain = NULL;
2747 +
2748 + if (syncobj_desc.flags) {
2749 + ret = -EINVAL;
2750 +diff --git a/drivers/gpu/drm/nouveau/dispnv50/wndw.h b/drivers/gpu/drm/nouveau/dispnv50/wndw.h
2751 +index 591c852f326b9..76a6ae5d56526 100644
2752 +--- a/drivers/gpu/drm/nouveau/dispnv50/wndw.h
2753 ++++ b/drivers/gpu/drm/nouveau/dispnv50/wndw.h
2754 +@@ -35,8 +35,9 @@ struct nv50_wndw {
2755 +
2756 + int nv50_wndw_new_(const struct nv50_wndw_func *, struct drm_device *,
2757 + enum drm_plane_type, const char *name, int index,
2758 +- const u32 *format, enum nv50_disp_interlock_type,
2759 +- u32 interlock_data, u32 heads, struct nv50_wndw **);
2760 ++ const u32 *format, u32 heads,
2761 ++ enum nv50_disp_interlock_type, u32 interlock_data,
2762 ++ struct nv50_wndw **);
2763 + void nv50_wndw_flush_set(struct nv50_wndw *, u32 *interlock,
2764 + struct nv50_wndw_atom *);
2765 + void nv50_wndw_flush_clr(struct nv50_wndw *, u32 *interlock, bool flush,
2766 +diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
2767 +index 5c72aef3d3dd5..799a3086dbb06 100644
2768 +--- a/drivers/hid/hid-core.c
2769 ++++ b/drivers/hid/hid-core.c
2770 +@@ -261,6 +261,7 @@ static int hid_add_field(struct hid_parser *parser, unsigned report_type, unsign
2771 + {
2772 + struct hid_report *report;
2773 + struct hid_field *field;
2774 ++ unsigned int max_buffer_size = HID_MAX_BUFFER_SIZE;
2775 + unsigned int usages;
2776 + unsigned int offset;
2777 + unsigned int i;
2778 +@@ -291,8 +292,11 @@ static int hid_add_field(struct hid_parser *parser, unsigned report_type, unsign
2779 + offset = report->size;
2780 + report->size += parser->global.report_size * parser->global.report_count;
2781 +
2782 ++ if (parser->device->ll_driver->max_buffer_size)
2783 ++ max_buffer_size = parser->device->ll_driver->max_buffer_size;
2784 ++
2785 + /* Total size check: Allow for possible report index byte */
2786 +- if (report->size > (HID_MAX_BUFFER_SIZE - 1) << 3) {
2787 ++ if (report->size > (max_buffer_size - 1) << 3) {
2788 + hid_err(parser->device, "report is too long\n");
2789 + return -1;
2790 + }
2791 +@@ -1966,6 +1970,7 @@ int hid_report_raw_event(struct hid_device *hid, enum hid_report_type type, u8 *
2792 + struct hid_report_enum *report_enum = hid->report_enum + type;
2793 + struct hid_report *report;
2794 + struct hid_driver *hdrv;
2795 ++ int max_buffer_size = HID_MAX_BUFFER_SIZE;
2796 + u32 rsize, csize = size;
2797 + u8 *cdata = data;
2798 + int ret = 0;
2799 +@@ -1981,10 +1986,13 @@ int hid_report_raw_event(struct hid_device *hid, enum hid_report_type type, u8 *
2800 +
2801 + rsize = hid_compute_report_size(report);
2802 +
2803 +- if (report_enum->numbered && rsize >= HID_MAX_BUFFER_SIZE)
2804 +- rsize = HID_MAX_BUFFER_SIZE - 1;
2805 +- else if (rsize > HID_MAX_BUFFER_SIZE)
2806 +- rsize = HID_MAX_BUFFER_SIZE;
2807 ++ if (hid->ll_driver->max_buffer_size)
2808 ++ max_buffer_size = hid->ll_driver->max_buffer_size;
2809 ++
2810 ++ if (report_enum->numbered && rsize >= max_buffer_size)
2811 ++ rsize = max_buffer_size - 1;
2812 ++ else if (rsize > max_buffer_size)
2813 ++ rsize = max_buffer_size;
2814 +
2815 + if (csize < rsize) {
2816 + dbg_hid("report %d is too short, (%d < %d)\n", report->id,
2817 +@@ -2387,7 +2395,12 @@ int hid_hw_raw_request(struct hid_device *hdev,
2818 + unsigned char reportnum, __u8 *buf,
2819 + size_t len, enum hid_report_type rtype, enum hid_class_request reqtype)
2820 + {
2821 +- if (len < 1 || len > HID_MAX_BUFFER_SIZE || !buf)
2822 ++ unsigned int max_buffer_size = HID_MAX_BUFFER_SIZE;
2823 ++
2824 ++ if (hdev->ll_driver->max_buffer_size)
2825 ++ max_buffer_size = hdev->ll_driver->max_buffer_size;
2826 ++
2827 ++ if (len < 1 || len > max_buffer_size || !buf)
2828 + return -EINVAL;
2829 +
2830 + return hdev->ll_driver->raw_request(hdev, reportnum, buf, len,
2831 +@@ -2406,7 +2419,12 @@ EXPORT_SYMBOL_GPL(hid_hw_raw_request);
2832 + */
2833 + int hid_hw_output_report(struct hid_device *hdev, __u8 *buf, size_t len)
2834 + {
2835 +- if (len < 1 || len > HID_MAX_BUFFER_SIZE || !buf)
2836 ++ unsigned int max_buffer_size = HID_MAX_BUFFER_SIZE;
2837 ++
2838 ++ if (hdev->ll_driver->max_buffer_size)
2839 ++ max_buffer_size = hdev->ll_driver->max_buffer_size;
2840 ++
2841 ++ if (len < 1 || len > max_buffer_size || !buf)
2842 + return -EINVAL;
2843 +
2844 + if (hdev->ll_driver->output_report)
2845 +diff --git a/drivers/hid/uhid.c b/drivers/hid/uhid.c
2846 +index 2a918aeb0af13..59ac757c1d471 100644
2847 +--- a/drivers/hid/uhid.c
2848 ++++ b/drivers/hid/uhid.c
2849 +@@ -395,6 +395,7 @@ struct hid_ll_driver uhid_hid_driver = {
2850 + .parse = uhid_hid_parse,
2851 + .raw_request = uhid_hid_raw_request,
2852 + .output_report = uhid_hid_output_report,
2853 ++ .max_buffer_size = UHID_DATA_MAX,
2854 + };
2855 + EXPORT_SYMBOL_GPL(uhid_hid_driver);
2856 +
2857 +diff --git a/drivers/input/touchscreen/exc3000.c b/drivers/input/touchscreen/exc3000.c
2858 +index 4b7eee01c6aad..615646a03039b 100644
2859 +--- a/drivers/input/touchscreen/exc3000.c
2860 ++++ b/drivers/input/touchscreen/exc3000.c
2861 +@@ -109,6 +109,11 @@ static inline void exc3000_schedule_timer(struct exc3000_data *data)
2862 + mod_timer(&data->timer, jiffies + msecs_to_jiffies(EXC3000_TIMEOUT_MS));
2863 + }
2864 +
2865 ++static void exc3000_shutdown_timer(void *timer)
2866 ++{
2867 ++ del_timer_sync(timer);
2868 ++}
2869 ++
2870 + static int exc3000_read_frame(struct exc3000_data *data, u8 *buf)
2871 + {
2872 + struct i2c_client *client = data->client;
2873 +@@ -386,6 +391,11 @@ static int exc3000_probe(struct i2c_client *client)
2874 + if (error)
2875 + return error;
2876 +
2877 ++ error = devm_add_action_or_reset(&client->dev, exc3000_shutdown_timer,
2878 ++ &data->timer);
2879 ++ if (error)
2880 ++ return error;
2881 ++
2882 + error = devm_request_threaded_irq(&client->dev, client->irq,
2883 + NULL, exc3000_interrupt, IRQF_ONESHOT,
2884 + client->name, data);
2885 +diff --git a/drivers/macintosh/windfarm_lm75_sensor.c b/drivers/macintosh/windfarm_lm75_sensor.c
2886 +index 204661c8e918f..9dd80837a759a 100644
2887 +--- a/drivers/macintosh/windfarm_lm75_sensor.c
2888 ++++ b/drivers/macintosh/windfarm_lm75_sensor.c
2889 +@@ -33,8 +33,8 @@
2890 + #endif
2891 +
2892 + struct wf_lm75_sensor {
2893 +- int ds1775 : 1;
2894 +- int inited : 1;
2895 ++ unsigned int ds1775 : 1;
2896 ++ unsigned int inited : 1;
2897 + struct i2c_client *i2c;
2898 + struct wf_sensor sens;
2899 + };
2900 +diff --git a/drivers/macintosh/windfarm_smu_sensors.c b/drivers/macintosh/windfarm_smu_sensors.c
2901 +index 00c6fe25fcba0..2bdb73b34d291 100644
2902 +--- a/drivers/macintosh/windfarm_smu_sensors.c
2903 ++++ b/drivers/macintosh/windfarm_smu_sensors.c
2904 +@@ -274,8 +274,8 @@ struct smu_cpu_power_sensor {
2905 + struct list_head link;
2906 + struct wf_sensor *volts;
2907 + struct wf_sensor *amps;
2908 +- int fake_volts : 1;
2909 +- int quadratic : 1;
2910 ++ unsigned int fake_volts : 1;
2911 ++ unsigned int quadratic : 1;
2912 + struct wf_sensor sens;
2913 + };
2914 + #define to_smu_cpu_power(c) container_of(c, struct smu_cpu_power_sensor, sens)
2915 +diff --git a/drivers/media/i2c/ov5640.c b/drivers/media/i2c/ov5640.c
2916 +index 873087e180561..267f514023e72 100644
2917 +--- a/drivers/media/i2c/ov5640.c
2918 ++++ b/drivers/media/i2c/ov5640.c
2919 +@@ -3482,7 +3482,7 @@ static int ov5640_init_controls(struct ov5640_dev *sensor)
2920 + /* Auto/manual gain */
2921 + ctrls->auto_gain = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_AUTOGAIN,
2922 + 0, 1, 1, 1);
2923 +- ctrls->gain = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_GAIN,
2924 ++ ctrls->gain = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_ANALOGUE_GAIN,
2925 + 0, 1023, 1, 0);
2926 +
2927 + ctrls->saturation = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_SATURATION,
2928 +diff --git a/drivers/media/rc/gpio-ir-recv.c b/drivers/media/rc/gpio-ir-recv.c
2929 +index 22e524b69806a..a56c844d7f816 100644
2930 +--- a/drivers/media/rc/gpio-ir-recv.c
2931 ++++ b/drivers/media/rc/gpio-ir-recv.c
2932 +@@ -130,6 +130,23 @@ static int gpio_ir_recv_probe(struct platform_device *pdev)
2933 + "gpio-ir-recv-irq", gpio_dev);
2934 + }
2935 +
2936 ++static int gpio_ir_recv_remove(struct platform_device *pdev)
2937 ++{
2938 ++ struct gpio_rc_dev *gpio_dev = platform_get_drvdata(pdev);
2939 ++ struct device *pmdev = gpio_dev->pmdev;
2940 ++
2941 ++ if (pmdev) {
2942 ++ pm_runtime_get_sync(pmdev);
2943 ++ cpu_latency_qos_remove_request(&gpio_dev->qos);
2944 ++
2945 ++ pm_runtime_disable(pmdev);
2946 ++ pm_runtime_put_noidle(pmdev);
2947 ++ pm_runtime_set_suspended(pmdev);
2948 ++ }
2949 ++
2950 ++ return 0;
2951 ++}
2952 ++
2953 + #ifdef CONFIG_PM
2954 + static int gpio_ir_recv_suspend(struct device *dev)
2955 + {
2956 +@@ -189,6 +206,7 @@ MODULE_DEVICE_TABLE(of, gpio_ir_recv_of_match);
2957 +
2958 + static struct platform_driver gpio_ir_recv_driver = {
2959 + .probe = gpio_ir_recv_probe,
2960 ++ .remove = gpio_ir_recv_remove,
2961 + .driver = {
2962 + .name = KBUILD_MODNAME,
2963 + .of_match_table = of_match_ptr(gpio_ir_recv_of_match),
2964 +diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c
2965 +index a884f6f6a8c2c..1e0b8bcd59e6c 100644
2966 +--- a/drivers/net/dsa/mt7530.c
2967 ++++ b/drivers/net/dsa/mt7530.c
2968 +@@ -393,6 +393,24 @@ mt7530_fdb_write(struct mt7530_priv *priv, u16 vid,
2969 + mt7530_write(priv, MT7530_ATA1 + (i * 4), reg[i]);
2970 + }
2971 +
2972 ++/* Set up switch core clock for MT7530 */
2973 ++static void mt7530_pll_setup(struct mt7530_priv *priv)
2974 ++{
2975 ++ /* Disable PLL */
2976 ++ core_write(priv, CORE_GSWPLL_GRP1, 0);
2977 ++
2978 ++ /* Set core clock into 500Mhz */
2979 ++ core_write(priv, CORE_GSWPLL_GRP2,
2980 ++ RG_GSWPLL_POSDIV_500M(1) |
2981 ++ RG_GSWPLL_FBKDIV_500M(25));
2982 ++
2983 ++ /* Enable PLL */
2984 ++ core_write(priv, CORE_GSWPLL_GRP1,
2985 ++ RG_GSWPLL_EN_PRE |
2986 ++ RG_GSWPLL_POSDIV_200M(2) |
2987 ++ RG_GSWPLL_FBKDIV_200M(32));
2988 ++}
2989 ++
2990 + /* Setup TX circuit including relevant PAD and driving */
2991 + static int
2992 + mt7530_pad_clk_setup(struct dsa_switch *ds, phy_interface_t interface)
2993 +@@ -453,21 +471,6 @@ mt7530_pad_clk_setup(struct dsa_switch *ds, phy_interface_t interface)
2994 + core_clear(priv, CORE_TRGMII_GSW_CLK_CG,
2995 + REG_GSWCK_EN | REG_TRGMIICK_EN);
2996 +
2997 +- /* Setup core clock for MT7530 */
2998 +- /* Disable PLL */
2999 +- core_write(priv, CORE_GSWPLL_GRP1, 0);
3000 +-
3001 +- /* Set core clock into 500Mhz */
3002 +- core_write(priv, CORE_GSWPLL_GRP2,
3003 +- RG_GSWPLL_POSDIV_500M(1) |
3004 +- RG_GSWPLL_FBKDIV_500M(25));
3005 +-
3006 +- /* Enable PLL */
3007 +- core_write(priv, CORE_GSWPLL_GRP1,
3008 +- RG_GSWPLL_EN_PRE |
3009 +- RG_GSWPLL_POSDIV_200M(2) |
3010 +- RG_GSWPLL_FBKDIV_200M(32));
3011 +-
3012 + /* Setup the MT7530 TRGMII Tx Clock */
3013 + core_write(priv, CORE_PLL_GROUP5, RG_LCDDS_PCW_NCPO1(ncpo1));
3014 + core_write(priv, CORE_PLL_GROUP6, RG_LCDDS_PCW_NCPO0(0));
3015 +@@ -2201,6 +2204,8 @@ mt7530_setup(struct dsa_switch *ds)
3016 + SYS_CTRL_PHY_RST | SYS_CTRL_SW_RST |
3017 + SYS_CTRL_REG_RST);
3018 +
3019 ++ mt7530_pll_setup(priv);
3020 ++
3021 + /* Enable Port 6 only; P5 as GMAC5 which currently is not supported */
3022 + val = mt7530_read(priv, MT7530_MHWTRAP);
3023 + val &= ~MHWTRAP_P6_DIS & ~MHWTRAP_PHY_ACCESS;
3024 +diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c
3025 +index 3038386a5afd8..1761df8fb7f96 100644
3026 +--- a/drivers/net/ethernet/broadcom/bgmac.c
3027 ++++ b/drivers/net/ethernet/broadcom/bgmac.c
3028 +@@ -890,13 +890,13 @@ static void bgmac_chip_reset_idm_config(struct bgmac *bgmac)
3029 +
3030 + if (iost & BGMAC_BCMA_IOST_ATTACHED) {
3031 + flags = BGMAC_BCMA_IOCTL_SW_CLKEN;
3032 +- if (!bgmac->has_robosw)
3033 ++ if (bgmac->in_init || !bgmac->has_robosw)
3034 + flags |= BGMAC_BCMA_IOCTL_SW_RESET;
3035 + }
3036 + bgmac_clk_enable(bgmac, flags);
3037 + }
3038 +
3039 +- if (iost & BGMAC_BCMA_IOST_ATTACHED && !bgmac->has_robosw)
3040 ++ if (iost & BGMAC_BCMA_IOST_ATTACHED && (bgmac->in_init || !bgmac->has_robosw))
3041 + bgmac_idm_write(bgmac, BCMA_IOCTL,
3042 + bgmac_idm_read(bgmac, BCMA_IOCTL) &
3043 + ~BGMAC_BCMA_IOCTL_SW_RESET);
3044 +@@ -1490,6 +1490,8 @@ int bgmac_enet_probe(struct bgmac *bgmac)
3045 + struct net_device *net_dev = bgmac->net_dev;
3046 + int err;
3047 +
3048 ++ bgmac->in_init = true;
3049 ++
3050 + bgmac_chip_intrs_off(bgmac);
3051 +
3052 + net_dev->irq = bgmac->irq;
3053 +@@ -1542,6 +1544,8 @@ int bgmac_enet_probe(struct bgmac *bgmac)
3054 + /* Omit FCS from max MTU size */
3055 + net_dev->max_mtu = BGMAC_RX_MAX_FRAME_SIZE - ETH_FCS_LEN;
3056 +
3057 ++ bgmac->in_init = false;
3058 ++
3059 + err = register_netdev(bgmac->net_dev);
3060 + if (err) {
3061 + dev_err(bgmac->dev, "Cannot register net device\n");
3062 +diff --git a/drivers/net/ethernet/broadcom/bgmac.h b/drivers/net/ethernet/broadcom/bgmac.h
3063 +index e05ac92c06504..d73ef262991d6 100644
3064 +--- a/drivers/net/ethernet/broadcom/bgmac.h
3065 ++++ b/drivers/net/ethernet/broadcom/bgmac.h
3066 +@@ -472,6 +472,8 @@ struct bgmac {
3067 + int irq;
3068 + u32 int_mask;
3069 +
3070 ++ bool in_init;
3071 ++
3072 + /* Current MAC state */
3073 + int mac_speed;
3074 + int mac_duplex;
3075 +diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
3076 +index cecda545372f9..251b102d2792b 100644
3077 +--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
3078 ++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
3079 +@@ -3143,7 +3143,7 @@ static int bnxt_alloc_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem)
3080 +
3081 + static void bnxt_free_tpa_info(struct bnxt *bp)
3082 + {
3083 +- int i;
3084 ++ int i, j;
3085 +
3086 + for (i = 0; i < bp->rx_nr_rings; i++) {
3087 + struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
3088 +@@ -3151,8 +3151,10 @@ static void bnxt_free_tpa_info(struct bnxt *bp)
3089 + kfree(rxr->rx_tpa_idx_map);
3090 + rxr->rx_tpa_idx_map = NULL;
3091 + if (rxr->rx_tpa) {
3092 +- kfree(rxr->rx_tpa[0].agg_arr);
3093 +- rxr->rx_tpa[0].agg_arr = NULL;
3094 ++ for (j = 0; j < bp->max_tpa; j++) {
3095 ++ kfree(rxr->rx_tpa[j].agg_arr);
3096 ++ rxr->rx_tpa[j].agg_arr = NULL;
3097 ++ }
3098 + }
3099 + kfree(rxr->rx_tpa);
3100 + rxr->rx_tpa = NULL;
3101 +@@ -3161,14 +3163,13 @@ static void bnxt_free_tpa_info(struct bnxt *bp)
3102 +
3103 + static int bnxt_alloc_tpa_info(struct bnxt *bp)
3104 + {
3105 +- int i, j, total_aggs = 0;
3106 ++ int i, j;
3107 +
3108 + bp->max_tpa = MAX_TPA;
3109 + if (bp->flags & BNXT_FLAG_CHIP_P5) {
3110 + if (!bp->max_tpa_v2)
3111 + return 0;
3112 + bp->max_tpa = max_t(u16, bp->max_tpa_v2, MAX_TPA_P5);
3113 +- total_aggs = bp->max_tpa * MAX_SKB_FRAGS;
3114 + }
3115 +
3116 + for (i = 0; i < bp->rx_nr_rings; i++) {
3117 +@@ -3182,12 +3183,12 @@ static int bnxt_alloc_tpa_info(struct bnxt *bp)
3118 +
3119 + if (!(bp->flags & BNXT_FLAG_CHIP_P5))
3120 + continue;
3121 +- agg = kcalloc(total_aggs, sizeof(*agg), GFP_KERNEL);
3122 +- rxr->rx_tpa[0].agg_arr = agg;
3123 +- if (!agg)
3124 +- return -ENOMEM;
3125 +- for (j = 1; j < bp->max_tpa; j++)
3126 +- rxr->rx_tpa[j].agg_arr = agg + j * MAX_SKB_FRAGS;
3127 ++ for (j = 0; j < bp->max_tpa; j++) {
3128 ++ agg = kcalloc(MAX_SKB_FRAGS, sizeof(*agg), GFP_KERNEL);
3129 ++ if (!agg)
3130 ++ return -ENOMEM;
3131 ++ rxr->rx_tpa[j].agg_arr = agg;
3132 ++ }
3133 + rxr->rx_tpa_idx_map = kzalloc(sizeof(*rxr->rx_tpa_idx_map),
3134 + GFP_KERNEL);
3135 + if (!rxr->rx_tpa_idx_map)
3136 +diff --git a/drivers/net/ethernet/intel/ice/ice_dcb.c b/drivers/net/ethernet/intel/ice/ice_dcb.c
3137 +index 0b146a0d42058..6375372f87294 100644
3138 +--- a/drivers/net/ethernet/intel/ice/ice_dcb.c
3139 ++++ b/drivers/net/ethernet/intel/ice/ice_dcb.c
3140 +@@ -1372,7 +1372,7 @@ ice_add_dscp_pfc_tlv(struct ice_lldp_org_tlv *tlv, struct ice_dcbx_cfg *dcbcfg)
3141 + tlv->ouisubtype = htonl(ouisubtype);
3142 +
3143 + buf[0] = dcbcfg->pfc.pfccap & 0xF;
3144 +- buf[1] = dcbcfg->pfc.pfcena & 0xF;
3145 ++ buf[1] = dcbcfg->pfc.pfcena;
3146 + }
3147 +
3148 + /**
3149 +diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c
3150 +index e1f6373a3a2c0..02eb78df2378e 100644
3151 +--- a/drivers/net/ethernet/intel/ice/ice_ethtool.c
3152 ++++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c
3153 +@@ -4145,6 +4145,8 @@ ice_get_module_eeprom(struct net_device *netdev,
3154 + * SFP modules only ever use page 0.
3155 + */
3156 + if (page == 0 || !(data[0x2] & 0x4)) {
3157 ++ u32 copy_len;
3158 ++
3159 + /* If i2c bus is busy due to slow page change or
3160 + * link management access, call can fail. This is normal.
3161 + * So we retry this a few times.
3162 +@@ -4168,8 +4170,8 @@ ice_get_module_eeprom(struct net_device *netdev,
3163 + }
3164 +
3165 + /* Make sure we have enough room for the new block */
3166 +- if ((i + SFF_READ_BLOCK_SIZE) < ee->len)
3167 +- memcpy(data + i, value, SFF_READ_BLOCK_SIZE);
3168 ++ copy_len = min_t(u32, SFF_READ_BLOCK_SIZE, ee->len - i);
3169 ++ memcpy(data + i, value, copy_len);
3170 + }
3171 + }
3172 + return 0;
3173 +diff --git a/drivers/net/ethernet/intel/ice/ice_tc_lib.c b/drivers/net/ethernet/intel/ice/ice_tc_lib.c
3174 +index f68c555be4e9a..71cb15fcf63b9 100644
3175 +--- a/drivers/net/ethernet/intel/ice/ice_tc_lib.c
3176 ++++ b/drivers/net/ethernet/intel/ice/ice_tc_lib.c
3177 +@@ -1322,8 +1322,8 @@ ice_parse_cls_flower(struct net_device *filter_dev, struct ice_vsi *vsi,
3178 + if (match.mask->vlan_priority) {
3179 + fltr->flags |= ICE_TC_FLWR_FIELD_VLAN_PRIO;
3180 + headers->vlan_hdr.vlan_prio =
3181 +- cpu_to_be16((match.key->vlan_priority <<
3182 +- VLAN_PRIO_SHIFT) & VLAN_PRIO_MASK);
3183 ++ be16_encode_bits(match.key->vlan_priority,
3184 ++ VLAN_PRIO_MASK);
3185 + }
3186 +
3187 + if (match.mask->vlan_tpid)
3188 +@@ -1356,8 +1356,8 @@ ice_parse_cls_flower(struct net_device *filter_dev, struct ice_vsi *vsi,
3189 + if (match.mask->vlan_priority) {
3190 + fltr->flags |= ICE_TC_FLWR_FIELD_CVLAN_PRIO;
3191 + headers->cvlan_hdr.vlan_prio =
3192 +- cpu_to_be16((match.key->vlan_priority <<
3193 +- VLAN_PRIO_SHIFT) & VLAN_PRIO_MASK);
3194 ++ be16_encode_bits(match.key->vlan_priority,
3195 ++ VLAN_PRIO_MASK);
3196 + }
3197 + }
3198 +
3199 +diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
3200 +index 76474385a6027..b07c6f51b461b 100644
3201 +--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
3202 ++++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
3203 +@@ -859,6 +859,9 @@ int rvu_cpt_lf_teardown(struct rvu *rvu, u16 pcifunc, int blkaddr, int lf,
3204 + int slot);
3205 + int rvu_cpt_ctx_flush(struct rvu *rvu, u16 pcifunc);
3206 +
3207 ++#define NDC_AF_BANK_MASK GENMASK_ULL(7, 0)
3208 ++#define NDC_AF_BANK_LINE_MASK GENMASK_ULL(31, 16)
3209 ++
3210 + /* CN10K RVU */
3211 + int rvu_set_channels_base(struct rvu *rvu);
3212 + void rvu_program_channels(struct rvu *rvu);
3213 +@@ -874,6 +877,8 @@ static inline void rvu_dbg_init(struct rvu *rvu) {}
3214 + static inline void rvu_dbg_exit(struct rvu *rvu) {}
3215 + #endif
3216 +
3217 ++int rvu_ndc_fix_locked_cacheline(struct rvu *rvu, int blkaddr);
3218 ++
3219 + /* RVU Switch */
3220 + void rvu_switch_enable(struct rvu *rvu);
3221 + void rvu_switch_disable(struct rvu *rvu);
3222 +diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
3223 +index f66dde2b0f926..abef0fd4259a3 100644
3224 +--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
3225 ++++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
3226 +@@ -198,9 +198,6 @@ enum cpt_eng_type {
3227 + CPT_IE_TYPE = 3,
3228 + };
3229 +
3230 +-#define NDC_MAX_BANK(rvu, blk_addr) (rvu_read64(rvu, \
3231 +- blk_addr, NDC_AF_CONST) & 0xFF)
3232 +-
3233 + #define rvu_dbg_NULL NULL
3234 + #define rvu_dbg_open_NULL NULL
3235 +
3236 +@@ -1448,6 +1445,7 @@ static int ndc_blk_hits_miss_stats(struct seq_file *s, int idx, int blk_addr)
3237 + struct nix_hw *nix_hw;
3238 + struct rvu *rvu;
3239 + int bank, max_bank;
3240 ++ u64 ndc_af_const;
3241 +
3242 + if (blk_addr == BLKADDR_NDC_NPA0) {
3243 + rvu = s->private;
3244 +@@ -1456,7 +1454,8 @@ static int ndc_blk_hits_miss_stats(struct seq_file *s, int idx, int blk_addr)
3245 + rvu = nix_hw->rvu;
3246 + }
3247 +
3248 +- max_bank = NDC_MAX_BANK(rvu, blk_addr);
3249 ++ ndc_af_const = rvu_read64(rvu, blk_addr, NDC_AF_CONST);
3250 ++ max_bank = FIELD_GET(NDC_AF_BANK_MASK, ndc_af_const);
3251 + for (bank = 0; bank < max_bank; bank++) {
3252 + seq_printf(s, "BANK:%d\n", bank);
3253 + seq_printf(s, "\tHits:\t%lld\n",
3254 +diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
3255 +index a62c1b3220120..84f2ba53b8b68 100644
3256 +--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
3257 ++++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
3258 +@@ -790,6 +790,7 @@ static int nix_aq_enqueue_wait(struct rvu *rvu, struct rvu_block *block,
3259 + struct nix_aq_res_s *result;
3260 + int timeout = 1000;
3261 + u64 reg, head;
3262 ++ int ret;
3263 +
3264 + result = (struct nix_aq_res_s *)aq->res->base;
3265 +
3266 +@@ -813,9 +814,22 @@ static int nix_aq_enqueue_wait(struct rvu *rvu, struct rvu_block *block,
3267 + return -EBUSY;
3268 + }
3269 +
3270 +- if (result->compcode != NIX_AQ_COMP_GOOD)
3271 ++ if (result->compcode != NIX_AQ_COMP_GOOD) {
3272 + /* TODO: Replace this with some error code */
3273 ++ if (result->compcode == NIX_AQ_COMP_CTX_FAULT ||
3274 ++ result->compcode == NIX_AQ_COMP_LOCKERR ||
3275 ++ result->compcode == NIX_AQ_COMP_CTX_POISON) {
3276 ++ ret = rvu_ndc_fix_locked_cacheline(rvu, BLKADDR_NDC_NIX0_RX);
3277 ++ ret |= rvu_ndc_fix_locked_cacheline(rvu, BLKADDR_NDC_NIX0_TX);
3278 ++ ret |= rvu_ndc_fix_locked_cacheline(rvu, BLKADDR_NDC_NIX1_RX);
3279 ++ ret |= rvu_ndc_fix_locked_cacheline(rvu, BLKADDR_NDC_NIX1_TX);
3280 ++ if (ret)
3281 ++ dev_err(rvu->dev,
3282 ++ "%s: Not able to unlock cachelines\n", __func__);
3283 ++ }
3284 ++
3285 + return -EBUSY;
3286 ++ }
3287 +
3288 + return 0;
3289 + }
3290 +diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c
3291 +index 70bd036ed76e4..4f5ca5ab13a40 100644
3292 +--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c
3293 ++++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c
3294 +@@ -4,7 +4,7 @@
3295 + * Copyright (C) 2018 Marvell.
3296 + *
3297 + */
3298 +-
3299 ++#include <linux/bitfield.h>
3300 + #include <linux/module.h>
3301 + #include <linux/pci.h>
3302 +
3303 +@@ -42,9 +42,18 @@ static int npa_aq_enqueue_wait(struct rvu *rvu, struct rvu_block *block,
3304 + return -EBUSY;
3305 + }
3306 +
3307 +- if (result->compcode != NPA_AQ_COMP_GOOD)
3308 ++ if (result->compcode != NPA_AQ_COMP_GOOD) {
3309 + /* TODO: Replace this with some error code */
3310 ++ if (result->compcode == NPA_AQ_COMP_CTX_FAULT ||
3311 ++ result->compcode == NPA_AQ_COMP_LOCKERR ||
3312 ++ result->compcode == NPA_AQ_COMP_CTX_POISON) {
3313 ++ if (rvu_ndc_fix_locked_cacheline(rvu, BLKADDR_NDC_NPA0))
3314 ++ dev_err(rvu->dev,
3315 ++ "%s: Not able to unlock cachelines\n", __func__);
3316 ++ }
3317 ++
3318 + return -EBUSY;
3319 ++ }
3320 +
3321 + return 0;
3322 + }
3323 +@@ -545,3 +554,48 @@ void rvu_npa_lf_teardown(struct rvu *rvu, u16 pcifunc, int npalf)
3324 +
3325 + npa_ctx_free(rvu, pfvf);
3326 + }
3327 ++
3328 ++/* Due to an Hardware errata, in some corner cases, AQ context lock
3329 ++ * operations can result in a NDC way getting into an illegal state
3330 ++ * of not valid but locked.
3331 ++ *
3332 ++ * This API solves the problem by clearing the lock bit of the NDC block.
3333 ++ * The operation needs to be done for each line of all the NDC banks.
3334 ++ */
3335 ++int rvu_ndc_fix_locked_cacheline(struct rvu *rvu, int blkaddr)
3336 ++{
3337 ++ int bank, max_bank, line, max_line, err;
3338 ++ u64 reg, ndc_af_const;
3339 ++
3340 ++ /* Set the ENABLE bit(63) to '0' */
3341 ++ reg = rvu_read64(rvu, blkaddr, NDC_AF_CAMS_RD_INTERVAL);
3342 ++ rvu_write64(rvu, blkaddr, NDC_AF_CAMS_RD_INTERVAL, reg & GENMASK_ULL(62, 0));
3343 ++
3344 ++ /* Poll until the BUSY bits(47:32) are set to '0' */
3345 ++ err = rvu_poll_reg(rvu, blkaddr, NDC_AF_CAMS_RD_INTERVAL, GENMASK_ULL(47, 32), true);
3346 ++ if (err) {
3347 ++ dev_err(rvu->dev, "Timed out while polling for NDC CAM busy bits.\n");
3348 ++ return err;
3349 ++ }
3350 ++
3351 ++ ndc_af_const = rvu_read64(rvu, blkaddr, NDC_AF_CONST);
3352 ++ max_bank = FIELD_GET(NDC_AF_BANK_MASK, ndc_af_const);
3353 ++ max_line = FIELD_GET(NDC_AF_BANK_LINE_MASK, ndc_af_const);
3354 ++ for (bank = 0; bank < max_bank; bank++) {
3355 ++ for (line = 0; line < max_line; line++) {
3356 ++ /* Check if 'cache line valid bit(63)' is not set
3357 ++ * but 'cache line lock bit(60)' is set and on
3358 ++ * success, reset the lock bit(60).
3359 ++ */
3360 ++ reg = rvu_read64(rvu, blkaddr,
3361 ++ NDC_AF_BANKX_LINEX_METADATA(bank, line));
3362 ++ if (!(reg & BIT_ULL(63)) && (reg & BIT_ULL(60))) {
3363 ++ rvu_write64(rvu, blkaddr,
3364 ++ NDC_AF_BANKX_LINEX_METADATA(bank, line),
3365 ++ reg & ~BIT_ULL(60));
3366 ++ }
3367 ++ }
3368 ++ }
3369 ++
3370 ++ return 0;
3371 ++}
3372 +diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h
3373 +index 0e0d536645ac7..39f7a7cb27558 100644
3374 +--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h
3375 ++++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h
3376 +@@ -690,6 +690,7 @@
3377 + #define NDC_AF_INTR_ENA_W1S (0x00068)
3378 + #define NDC_AF_INTR_ENA_W1C (0x00070)
3379 + #define NDC_AF_ACTIVE_PC (0x00078)
3380 ++#define NDC_AF_CAMS_RD_INTERVAL (0x00080)
3381 + #define NDC_AF_BP_TEST_ENABLE (0x001F8)
3382 + #define NDC_AF_BP_TEST(a) (0x00200 | (a) << 3)
3383 + #define NDC_AF_BLK_RST (0x002F0)
3384 +@@ -705,6 +706,8 @@
3385 + (0x00F00 | (a) << 5 | (b) << 4)
3386 + #define NDC_AF_BANKX_HIT_PC(a) (0x01000 | (a) << 3)
3387 + #define NDC_AF_BANKX_MISS_PC(a) (0x01100 | (a) << 3)
3388 ++#define NDC_AF_BANKX_LINEX_METADATA(a, b) \
3389 ++ (0x10000 | (a) << 12 | (b) << 3)
3390 +
3391 + /* LBK */
3392 + #define LBK_CONST (0x10ull)
3393 +diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
3394 +index 53ee9dea66388..49975924e2426 100644
3395 +--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
3396 ++++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
3397 +@@ -561,7 +561,8 @@ static int mtk_mac_finish(struct phylink_config *config, unsigned int mode,
3398 + mcr_cur = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id));
3399 + mcr_new = mcr_cur;
3400 + mcr_new |= MAC_MCR_IPG_CFG | MAC_MCR_FORCE_MODE |
3401 +- MAC_MCR_BACKOFF_EN | MAC_MCR_BACKPR_EN | MAC_MCR_FORCE_LINK;
3402 ++ MAC_MCR_BACKOFF_EN | MAC_MCR_BACKPR_EN | MAC_MCR_FORCE_LINK |
3403 ++ MAC_MCR_RX_FIFO_CLR_DIS;
3404 +
3405 + /* Only update control register when needed! */
3406 + if (mcr_new != mcr_cur)
3407 +diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.h b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
3408 +index 306fdc2c608a4..dafa9a0baa58c 100644
3409 +--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
3410 ++++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
3411 +@@ -357,6 +357,7 @@
3412 + #define MAC_MCR_FORCE_MODE BIT(15)
3413 + #define MAC_MCR_TX_EN BIT(14)
3414 + #define MAC_MCR_RX_EN BIT(13)
3415 ++#define MAC_MCR_RX_FIFO_CLR_DIS BIT(12)
3416 + #define MAC_MCR_BACKOFF_EN BIT(9)
3417 + #define MAC_MCR_BACKPR_EN BIT(8)
3418 + #define MAC_MCR_FORCE_RX_FC BIT(5)
3419 +diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_police.c b/drivers/net/ethernet/microchip/lan966x/lan966x_police.c
3420 +index a9aec900d608d..7d66fe75cd3bf 100644
3421 +--- a/drivers/net/ethernet/microchip/lan966x/lan966x_police.c
3422 ++++ b/drivers/net/ethernet/microchip/lan966x/lan966x_police.c
3423 +@@ -194,7 +194,7 @@ int lan966x_police_port_del(struct lan966x_port *port,
3424 + return -EINVAL;
3425 + }
3426 +
3427 +- err = lan966x_police_del(port, port->tc.police_id);
3428 ++ err = lan966x_police_del(port, POL_IDX_PORT + port->chip_port);
3429 + if (err) {
3430 + NL_SET_ERR_MSG_MOD(extack,
3431 + "Failed to add policer to port");
3432 +diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
3433 +index 84e1740b12f1b..3c1d4b27668fe 100644
3434 +--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
3435 ++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
3436 +@@ -1168,6 +1168,7 @@ static int stmmac_init_phy(struct net_device *dev)
3437 +
3438 + phylink_ethtool_get_wol(priv->phylink, &wol);
3439 + device_set_wakeup_capable(priv->device, !!wol.supported);
3440 ++ device_set_wakeup_enable(priv->device, !!wol.wolopts);
3441 + }
3442 +
3443 + return ret;
3444 +diff --git a/drivers/net/phy/microchip.c b/drivers/net/phy/microchip.c
3445 +index ccecee2524ce6..0b88635f4fbca 100644
3446 +--- a/drivers/net/phy/microchip.c
3447 ++++ b/drivers/net/phy/microchip.c
3448 +@@ -342,6 +342,37 @@ static int lan88xx_config_aneg(struct phy_device *phydev)
3449 + return genphy_config_aneg(phydev);
3450 + }
3451 +
3452 ++static void lan88xx_link_change_notify(struct phy_device *phydev)
3453 ++{
3454 ++ int temp;
3455 ++
3456 ++ /* At forced 100 F/H mode, chip may fail to set mode correctly
3457 ++ * when cable is switched between long(~50+m) and short one.
3458 ++ * As workaround, set to 10 before setting to 100
3459 ++ * at forced 100 F/H mode.
3460 ++ */
3461 ++ if (!phydev->autoneg && phydev->speed == 100) {
3462 ++ /* disable phy interrupt */
3463 ++ temp = phy_read(phydev, LAN88XX_INT_MASK);
3464 ++ temp &= ~LAN88XX_INT_MASK_MDINTPIN_EN_;
3465 ++ phy_write(phydev, LAN88XX_INT_MASK, temp);
3466 ++
3467 ++ temp = phy_read(phydev, MII_BMCR);
3468 ++ temp &= ~(BMCR_SPEED100 | BMCR_SPEED1000);
3469 ++ phy_write(phydev, MII_BMCR, temp); /* set to 10 first */
3470 ++ temp |= BMCR_SPEED100;
3471 ++ phy_write(phydev, MII_BMCR, temp); /* set to 100 later */
3472 ++
3473 ++ /* clear pending interrupt generated while workaround */
3474 ++ temp = phy_read(phydev, LAN88XX_INT_STS);
3475 ++
3476 ++ /* enable phy interrupt back */
3477 ++ temp = phy_read(phydev, LAN88XX_INT_MASK);
3478 ++ temp |= LAN88XX_INT_MASK_MDINTPIN_EN_;
3479 ++ phy_write(phydev, LAN88XX_INT_MASK, temp);
3480 ++ }
3481 ++}
3482 ++
3483 + static struct phy_driver microchip_phy_driver[] = {
3484 + {
3485 + .phy_id = 0x0007c132,
3486 +@@ -359,6 +390,7 @@ static struct phy_driver microchip_phy_driver[] = {
3487 +
3488 + .config_init = lan88xx_config_init,
3489 + .config_aneg = lan88xx_config_aneg,
3490 ++ .link_change_notify = lan88xx_link_change_notify,
3491 +
3492 + .config_intr = lan88xx_phy_config_intr,
3493 + .handle_interrupt = lan88xx_handle_interrupt,
3494 +diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
3495 +index 8cff61dbc4b57..7fbb0904b3c0f 100644
3496 +--- a/drivers/net/phy/phy_device.c
3497 ++++ b/drivers/net/phy/phy_device.c
3498 +@@ -3041,8 +3041,6 @@ static int phy_probe(struct device *dev)
3499 + if (phydrv->flags & PHY_IS_INTERNAL)
3500 + phydev->is_internal = true;
3501 +
3502 +- mutex_lock(&phydev->lock);
3503 +-
3504 + /* Deassert the reset signal */
3505 + phy_device_reset(phydev, 0);
3506 +
3507 +@@ -3110,12 +3108,10 @@ static int phy_probe(struct device *dev)
3508 + phydev->state = PHY_READY;
3509 +
3510 + out:
3511 +- /* Assert the reset signal */
3512 ++ /* Re-assert the reset signal on error */
3513 + if (err)
3514 + phy_device_reset(phydev, 1);
3515 +
3516 +- mutex_unlock(&phydev->lock);
3517 +-
3518 + return err;
3519 + }
3520 +
3521 +@@ -3125,9 +3121,7 @@ static int phy_remove(struct device *dev)
3522 +
3523 + cancel_delayed_work_sync(&phydev->state_queue);
3524 +
3525 +- mutex_lock(&phydev->lock);
3526 + phydev->state = PHY_DOWN;
3527 +- mutex_unlock(&phydev->lock);
3528 +
3529 + sfp_bus_del_upstream(phydev->sfp_bus);
3530 + phydev->sfp_bus = NULL;
3531 +diff --git a/drivers/net/phy/smsc.c b/drivers/net/phy/smsc.c
3532 +index ac7481ce2fc16..00d9eff91dcfa 100644
3533 +--- a/drivers/net/phy/smsc.c
3534 ++++ b/drivers/net/phy/smsc.c
3535 +@@ -44,7 +44,6 @@ static struct smsc_hw_stat smsc_hw_stats[] = {
3536 + };
3537 +
3538 + struct smsc_phy_priv {
3539 +- u16 intmask;
3540 + bool energy_enable;
3541 + };
3542 +
3543 +@@ -57,7 +56,6 @@ static int smsc_phy_ack_interrupt(struct phy_device *phydev)
3544 +
3545 + static int smsc_phy_config_intr(struct phy_device *phydev)
3546 + {
3547 +- struct smsc_phy_priv *priv = phydev->priv;
3548 + int rc;
3549 +
3550 + if (phydev->interrupts == PHY_INTERRUPT_ENABLED) {
3551 +@@ -65,14 +63,9 @@ static int smsc_phy_config_intr(struct phy_device *phydev)
3552 + if (rc)
3553 + return rc;
3554 +
3555 +- priv->intmask = MII_LAN83C185_ISF_INT4 | MII_LAN83C185_ISF_INT6;
3556 +- if (priv->energy_enable)
3557 +- priv->intmask |= MII_LAN83C185_ISF_INT7;
3558 +-
3559 +- rc = phy_write(phydev, MII_LAN83C185_IM, priv->intmask);
3560 ++ rc = phy_write(phydev, MII_LAN83C185_IM,
3561 ++ MII_LAN83C185_ISF_INT_PHYLIB_EVENTS);
3562 + } else {
3563 +- priv->intmask = 0;
3564 +-
3565 + rc = phy_write(phydev, MII_LAN83C185_IM, 0);
3566 + if (rc)
3567 + return rc;
3568 +@@ -85,7 +78,6 @@ static int smsc_phy_config_intr(struct phy_device *phydev)
3569 +
3570 + static irqreturn_t smsc_phy_handle_interrupt(struct phy_device *phydev)
3571 + {
3572 +- struct smsc_phy_priv *priv = phydev->priv;
3573 + int irq_status;
3574 +
3575 + irq_status = phy_read(phydev, MII_LAN83C185_ISF);
3576 +@@ -96,7 +88,7 @@ static irqreturn_t smsc_phy_handle_interrupt(struct phy_device *phydev)
3577 + return IRQ_NONE;
3578 + }
3579 +
3580 +- if (!(irq_status & priv->intmask))
3581 ++ if (!(irq_status & MII_LAN83C185_ISF_INT_PHYLIB_EVENTS))
3582 + return IRQ_NONE;
3583 +
3584 + phy_trigger_machine(phydev);
3585 +diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c
3586 +index f18ab8e220db7..068488890d57b 100644
3587 +--- a/drivers/net/usb/lan78xx.c
3588 ++++ b/drivers/net/usb/lan78xx.c
3589 +@@ -2115,33 +2115,8 @@ static void lan78xx_remove_mdio(struct lan78xx_net *dev)
3590 + static void lan78xx_link_status_change(struct net_device *net)
3591 + {
3592 + struct phy_device *phydev = net->phydev;
3593 +- int temp;
3594 +-
3595 +- /* At forced 100 F/H mode, chip may fail to set mode correctly
3596 +- * when cable is switched between long(~50+m) and short one.
3597 +- * As workaround, set to 10 before setting to 100
3598 +- * at forced 100 F/H mode.
3599 +- */
3600 +- if (!phydev->autoneg && (phydev->speed == 100)) {
3601 +- /* disable phy interrupt */
3602 +- temp = phy_read(phydev, LAN88XX_INT_MASK);
3603 +- temp &= ~LAN88XX_INT_MASK_MDINTPIN_EN_;
3604 +- phy_write(phydev, LAN88XX_INT_MASK, temp);
3605 +
3606 +- temp = phy_read(phydev, MII_BMCR);
3607 +- temp &= ~(BMCR_SPEED100 | BMCR_SPEED1000);
3608 +- phy_write(phydev, MII_BMCR, temp); /* set to 10 first */
3609 +- temp |= BMCR_SPEED100;
3610 +- phy_write(phydev, MII_BMCR, temp); /* set to 100 later */
3611 +-
3612 +- /* clear pending interrupt generated while workaround */
3613 +- temp = phy_read(phydev, LAN88XX_INT_STS);
3614 +-
3615 +- /* enable phy interrupt back */
3616 +- temp = phy_read(phydev, LAN88XX_INT_MASK);
3617 +- temp |= LAN88XX_INT_MASK_MDINTPIN_EN_;
3618 +- phy_write(phydev, LAN88XX_INT_MASK, temp);
3619 +- }
3620 ++ phy_print_status(phydev);
3621 + }
3622 +
3623 + static int irq_map(struct irq_domain *d, unsigned int irq,
3624 +diff --git a/drivers/nfc/fdp/i2c.c b/drivers/nfc/fdp/i2c.c
3625 +index 2d53e0f88d2f9..1e0f2297f9c66 100644
3626 +--- a/drivers/nfc/fdp/i2c.c
3627 ++++ b/drivers/nfc/fdp/i2c.c
3628 +@@ -247,6 +247,9 @@ static void fdp_nci_i2c_read_device_properties(struct device *dev,
3629 + len, sizeof(**fw_vsc_cfg),
3630 + GFP_KERNEL);
3631 +
3632 ++ if (!*fw_vsc_cfg)
3633 ++ goto alloc_err;
3634 ++
3635 + r = device_property_read_u8_array(dev, FDP_DP_FW_VSC_CFG_NAME,
3636 + *fw_vsc_cfg, len);
3637 +
3638 +@@ -260,6 +263,7 @@ vsc_read_err:
3639 + *fw_vsc_cfg = NULL;
3640 + }
3641 +
3642 ++alloc_err:
3643 + dev_dbg(dev, "Clock type: %d, clock frequency: %d, VSC: %s",
3644 + *clock_type, *clock_freq, *fw_vsc_cfg != NULL ? "yes" : "no");
3645 + }
3646 +diff --git a/drivers/platform/mellanox/Kconfig b/drivers/platform/mellanox/Kconfig
3647 +index 09c7829e95c4b..382793e73a60a 100644
3648 +--- a/drivers/platform/mellanox/Kconfig
3649 ++++ b/drivers/platform/mellanox/Kconfig
3650 +@@ -16,17 +16,17 @@ if MELLANOX_PLATFORM
3651 +
3652 + config MLXREG_HOTPLUG
3653 + tristate "Mellanox platform hotplug driver support"
3654 +- depends on REGMAP
3655 + depends on HWMON
3656 + depends on I2C
3657 ++ select REGMAP
3658 + help
3659 + This driver handles hot-plug events for the power suppliers, power
3660 + cables and fans on the wide range Mellanox IB and Ethernet systems.
3661 +
3662 + config MLXREG_IO
3663 + tristate "Mellanox platform register access driver support"
3664 +- depends on REGMAP
3665 + depends on HWMON
3666 ++ select REGMAP
3667 + help
3668 + This driver allows access to Mellanox programmable device register
3669 + space through sysfs interface. The sets of registers for sysfs access
3670 +@@ -36,9 +36,9 @@ config MLXREG_IO
3671 +
3672 + config MLXREG_LC
3673 + tristate "Mellanox line card platform driver support"
3674 +- depends on REGMAP
3675 + depends on HWMON
3676 + depends on I2C
3677 ++ select REGMAP
3678 + help
3679 + This driver provides support for the Mellanox MSN4800-XX line cards,
3680 + which are the part of MSN4800 Ethernet modular switch systems
3681 +@@ -80,10 +80,9 @@ config MLXBF_PMC
3682 +
3683 + config NVSW_SN2201
3684 + tristate "Nvidia SN2201 platform driver support"
3685 +- depends on REGMAP
3686 + depends on HWMON
3687 + depends on I2C
3688 +- depends on REGMAP_I2C
3689 ++ select REGMAP_I2C
3690 + help
3691 + This driver provides support for the Nvidia SN2201 platform.
3692 + The SN2201 is a highly integrated for one rack unit system with
3693 +diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
3694 +index f5312f51de19f..b02a8125bc7d5 100644
3695 +--- a/drivers/platform/x86/Kconfig
3696 ++++ b/drivers/platform/x86/Kconfig
3697 +@@ -997,7 +997,8 @@ config SERIAL_MULTI_INSTANTIATE
3698 +
3699 + config MLX_PLATFORM
3700 + tristate "Mellanox Technologies platform support"
3701 +- depends on I2C && REGMAP
3702 ++ depends on I2C
3703 ++ select REGMAP
3704 + help
3705 + This option enables system support for the Mellanox Technologies
3706 + platform. The Mellanox systems provide data center networking
3707 +diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
3708 +index 9857dba09c951..85e66574ec414 100644
3709 +--- a/drivers/scsi/hosts.c
3710 ++++ b/drivers/scsi/hosts.c
3711 +@@ -181,6 +181,7 @@ void scsi_remove_host(struct Scsi_Host *shost)
3712 + scsi_forget_host(shost);
3713 + mutex_unlock(&shost->scan_mutex);
3714 + scsi_proc_host_rm(shost);
3715 ++ scsi_proc_hostdir_rm(shost->hostt);
3716 +
3717 + /*
3718 + * New SCSI devices cannot be attached anymore because of the SCSI host
3719 +@@ -340,6 +341,7 @@ static void scsi_host_dev_release(struct device *dev)
3720 + struct Scsi_Host *shost = dev_to_shost(dev);
3721 + struct device *parent = dev->parent;
3722 +
3723 ++ /* In case scsi_remove_host() has not been called. */
3724 + scsi_proc_hostdir_rm(shost->hostt);
3725 +
3726 + /* Wait for functions invoked through call_rcu(&scmd->rcu, ...) */
3727 +diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h
3728 +index 4919ea54b8277..2ef9d41fc6f42 100644
3729 +--- a/drivers/scsi/megaraid/megaraid_sas.h
3730 ++++ b/drivers/scsi/megaraid/megaraid_sas.h
3731 +@@ -1519,6 +1519,8 @@ struct megasas_ctrl_info {
3732 + #define MEGASAS_MAX_LD_IDS (MEGASAS_MAX_LD_CHANNELS * \
3733 + MEGASAS_MAX_DEV_PER_CHANNEL)
3734 +
3735 ++#define MEGASAS_MAX_SUPPORTED_LD_IDS 240
3736 ++
3737 + #define MEGASAS_MAX_SECTORS (2*1024)
3738 + #define MEGASAS_MAX_SECTORS_IEEE (2*128)
3739 + #define MEGASAS_DBG_LVL 1
3740 +diff --git a/drivers/scsi/megaraid/megaraid_sas_fp.c b/drivers/scsi/megaraid/megaraid_sas_fp.c
3741 +index da1cad1ee1238..4463a538102ad 100644
3742 +--- a/drivers/scsi/megaraid/megaraid_sas_fp.c
3743 ++++ b/drivers/scsi/megaraid/megaraid_sas_fp.c
3744 +@@ -358,7 +358,7 @@ u8 MR_ValidateMapInfo(struct megasas_instance *instance, u64 map_id)
3745 + ld = MR_TargetIdToLdGet(i, drv_map);
3746 +
3747 + /* For non existing VDs, iterate to next VD*/
3748 +- if (ld >= (MAX_LOGICAL_DRIVES_EXT - 1))
3749 ++ if (ld >= MEGASAS_MAX_SUPPORTED_LD_IDS)
3750 + continue;
3751 +
3752 + raid = MR_LdRaidGet(ld, drv_map);
3753 +diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
3754 +index eb76ba0550216..e934779bf05c8 100644
3755 +--- a/drivers/scsi/sd.c
3756 ++++ b/drivers/scsi/sd.c
3757 +@@ -2933,8 +2933,13 @@ static void sd_read_block_characteristics(struct scsi_disk *sdkp)
3758 + }
3759 +
3760 + if (sdkp->device->type == TYPE_ZBC) {
3761 +- /* Host-managed */
3762 ++ /*
3763 ++ * Host-managed: Per ZBC and ZAC specifications, writes in
3764 ++ * sequential write required zones of host-managed devices must
3765 ++ * be aligned to the device physical block size.
3766 ++ */
3767 + disk_set_zoned(sdkp->disk, BLK_ZONED_HM);
3768 ++ blk_queue_zone_write_granularity(q, sdkp->physical_block_size);
3769 + } else {
3770 + sdkp->zoned = zoned;
3771 + if (sdkp->zoned == 1) {
3772 +diff --git a/drivers/scsi/sd_zbc.c b/drivers/scsi/sd_zbc.c
3773 +index bd15624c63228..4c35b4a916355 100644
3774 +--- a/drivers/scsi/sd_zbc.c
3775 ++++ b/drivers/scsi/sd_zbc.c
3776 +@@ -956,14 +956,6 @@ int sd_zbc_read_zones(struct scsi_disk *sdkp, u8 buf[SD_BUF_SIZE])
3777 + disk_set_max_active_zones(disk, 0);
3778 + nr_zones = round_up(sdkp->capacity, zone_blocks) >> ilog2(zone_blocks);
3779 +
3780 +- /*
3781 +- * Per ZBC and ZAC specifications, writes in sequential write required
3782 +- * zones of host-managed devices must be aligned to the device physical
3783 +- * block size.
3784 +- */
3785 +- if (blk_queue_zoned_model(q) == BLK_ZONED_HM)
3786 +- blk_queue_zone_write_granularity(q, sdkp->physical_block_size);
3787 +-
3788 + sdkp->early_zone_info.nr_zones = nr_zones;
3789 + sdkp->early_zone_info.zone_blocks = zone_blocks;
3790 +
3791 +diff --git a/drivers/spi/spi-intel.c b/drivers/spi/spi-intel.c
3792 +index 3ac73691fbb54..54fc226e1cdf6 100644
3793 +--- a/drivers/spi/spi-intel.c
3794 ++++ b/drivers/spi/spi-intel.c
3795 +@@ -1366,14 +1366,14 @@ static int intel_spi_populate_chip(struct intel_spi *ispi)
3796 + if (!spi_new_device(ispi->master, &chip))
3797 + return -ENODEV;
3798 +
3799 +- /* Add the second chip if present */
3800 +- if (ispi->master->num_chipselect < 2)
3801 +- return 0;
3802 +-
3803 + ret = intel_spi_read_desc(ispi);
3804 + if (ret)
3805 + return ret;
3806 +
3807 ++ /* Add the second chip if present */
3808 ++ if (ispi->master->num_chipselect < 2)
3809 ++ return 0;
3810 ++
3811 + chip.platform_data = NULL;
3812 + chip.chip_select = 1;
3813 +
3814 +diff --git a/drivers/staging/rtl8723bs/include/rtw_security.h b/drivers/staging/rtl8723bs/include/rtw_security.h
3815 +index a68b738584623..7587fa8885274 100644
3816 +--- a/drivers/staging/rtl8723bs/include/rtw_security.h
3817 ++++ b/drivers/staging/rtl8723bs/include/rtw_security.h
3818 +@@ -107,13 +107,13 @@ struct security_priv {
3819 +
3820 + u32 dot118021XGrpPrivacy; /* This specify the privacy algthm. used for Grp key */
3821 + u32 dot118021XGrpKeyid; /* key id used for Grp Key (tx key index) */
3822 +- union Keytype dot118021XGrpKey[BIP_MAX_KEYID]; /* 802.1x Group Key, for inx0 and inx1 */
3823 +- union Keytype dot118021XGrptxmickey[BIP_MAX_KEYID];
3824 +- union Keytype dot118021XGrprxmickey[BIP_MAX_KEYID];
3825 ++ union Keytype dot118021XGrpKey[BIP_MAX_KEYID + 1]; /* 802.1x Group Key, for inx0 and inx1 */
3826 ++ union Keytype dot118021XGrptxmickey[BIP_MAX_KEYID + 1];
3827 ++ union Keytype dot118021XGrprxmickey[BIP_MAX_KEYID + 1];
3828 + union pn48 dot11Grptxpn; /* PN48 used for Grp Key xmit. */
3829 + union pn48 dot11Grprxpn; /* PN48 used for Grp Key recv. */
3830 + u32 dot11wBIPKeyid; /* key id used for BIP Key (tx key index) */
3831 +- union Keytype dot11wBIPKey[6]; /* BIP Key, for index4 and index5 */
3832 ++ union Keytype dot11wBIPKey[BIP_MAX_KEYID + 1]; /* BIP Key, for index4 and index5 */
3833 + union pn48 dot11wBIPtxpn; /* PN48 used for Grp Key xmit. */
3834 + union pn48 dot11wBIPrxpn; /* PN48 used for Grp Key recv. */
3835 +
3836 +diff --git a/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c b/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c
3837 +index 6aeb169c6ebf0..5c738011322fc 100644
3838 +--- a/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c
3839 ++++ b/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c
3840 +@@ -350,7 +350,7 @@ int rtw_cfg80211_check_bss(struct adapter *padapter)
3841 + bss = cfg80211_get_bss(padapter->rtw_wdev->wiphy, notify_channel,
3842 + pnetwork->mac_address, pnetwork->ssid.ssid,
3843 + pnetwork->ssid.ssid_length,
3844 +- WLAN_CAPABILITY_ESS, WLAN_CAPABILITY_ESS);
3845 ++ IEEE80211_BSS_TYPE_ANY, IEEE80211_PRIVACY_ANY);
3846 +
3847 + cfg80211_put_bss(padapter->rtw_wdev->wiphy, bss);
3848 +
3849 +@@ -711,6 +711,7 @@ exit:
3850 + static int rtw_cfg80211_set_encryption(struct net_device *dev, struct ieee_param *param, u32 param_len)
3851 + {
3852 + int ret = 0;
3853 ++ u8 max_idx;
3854 + u32 wep_key_idx, wep_key_len;
3855 + struct adapter *padapter = rtw_netdev_priv(dev);
3856 + struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
3857 +@@ -724,26 +725,29 @@ static int rtw_cfg80211_set_encryption(struct net_device *dev, struct ieee_param
3858 + goto exit;
3859 + }
3860 +
3861 +- if (param->sta_addr[0] == 0xff && param->sta_addr[1] == 0xff &&
3862 +- param->sta_addr[2] == 0xff && param->sta_addr[3] == 0xff &&
3863 +- param->sta_addr[4] == 0xff && param->sta_addr[5] == 0xff) {
3864 +- if (param->u.crypt.idx >= WEP_KEYS
3865 +- || param->u.crypt.idx >= BIP_MAX_KEYID) {
3866 +- ret = -EINVAL;
3867 +- goto exit;
3868 +- }
3869 +- } else {
3870 +- {
3871 ++ if (param->sta_addr[0] != 0xff || param->sta_addr[1] != 0xff ||
3872 ++ param->sta_addr[2] != 0xff || param->sta_addr[3] != 0xff ||
3873 ++ param->sta_addr[4] != 0xff || param->sta_addr[5] != 0xff) {
3874 + ret = -EINVAL;
3875 + goto exit;
3876 + }
3877 ++
3878 ++ if (strcmp(param->u.crypt.alg, "WEP") == 0)
3879 ++ max_idx = WEP_KEYS - 1;
3880 ++ else
3881 ++ max_idx = BIP_MAX_KEYID;
3882 ++
3883 ++ if (param->u.crypt.idx > max_idx) {
3884 ++ netdev_err(dev, "Error crypt.idx %d > %d\n", param->u.crypt.idx, max_idx);
3885 ++ ret = -EINVAL;
3886 ++ goto exit;
3887 + }
3888 +
3889 + if (strcmp(param->u.crypt.alg, "WEP") == 0) {
3890 + wep_key_idx = param->u.crypt.idx;
3891 + wep_key_len = param->u.crypt.key_len;
3892 +
3893 +- if ((wep_key_idx >= WEP_KEYS) || (wep_key_len <= 0)) {
3894 ++ if (wep_key_len <= 0) {
3895 + ret = -EINVAL;
3896 + goto exit;
3897 + }
3898 +@@ -1135,8 +1139,8 @@ void rtw_cfg80211_unlink_bss(struct adapter *padapter, struct wlan_network *pnet
3899 +
3900 + bss = cfg80211_get_bss(wiphy, NULL/*notify_channel*/,
3901 + select_network->mac_address, select_network->ssid.ssid,
3902 +- select_network->ssid.ssid_length, 0/*WLAN_CAPABILITY_ESS*/,
3903 +- 0/*WLAN_CAPABILITY_ESS*/);
3904 ++ select_network->ssid.ssid_length, IEEE80211_BSS_TYPE_ANY,
3905 ++ IEEE80211_PRIVACY_ANY);
3906 +
3907 + if (bss) {
3908 + cfg80211_unlink_bss(wiphy, bss);
3909 +diff --git a/drivers/staging/rtl8723bs/os_dep/ioctl_linux.c b/drivers/staging/rtl8723bs/os_dep/ioctl_linux.c
3910 +index 30374a820496e..40a3157fb7359 100644
3911 +--- a/drivers/staging/rtl8723bs/os_dep/ioctl_linux.c
3912 ++++ b/drivers/staging/rtl8723bs/os_dep/ioctl_linux.c
3913 +@@ -46,6 +46,7 @@ static int wpa_set_auth_algs(struct net_device *dev, u32 value)
3914 + static int wpa_set_encryption(struct net_device *dev, struct ieee_param *param, u32 param_len)
3915 + {
3916 + int ret = 0;
3917 ++ u8 max_idx;
3918 + u32 wep_key_idx, wep_key_len, wep_total_len;
3919 + struct ndis_802_11_wep *pwep = NULL;
3920 + struct adapter *padapter = rtw_netdev_priv(dev);
3921 +@@ -60,19 +61,22 @@ static int wpa_set_encryption(struct net_device *dev, struct ieee_param *param,
3922 + goto exit;
3923 + }
3924 +
3925 +- if (param->sta_addr[0] == 0xff && param->sta_addr[1] == 0xff &&
3926 +- param->sta_addr[2] == 0xff && param->sta_addr[3] == 0xff &&
3927 +- param->sta_addr[4] == 0xff && param->sta_addr[5] == 0xff) {
3928 +- if (param->u.crypt.idx >= WEP_KEYS ||
3929 +- param->u.crypt.idx >= BIP_MAX_KEYID) {
3930 +- ret = -EINVAL;
3931 +- goto exit;
3932 +- }
3933 +- } else {
3934 +- {
3935 +- ret = -EINVAL;
3936 +- goto exit;
3937 +- }
3938 ++ if (param->sta_addr[0] != 0xff || param->sta_addr[1] != 0xff ||
3939 ++ param->sta_addr[2] != 0xff || param->sta_addr[3] != 0xff ||
3940 ++ param->sta_addr[4] != 0xff || param->sta_addr[5] != 0xff) {
3941 ++ ret = -EINVAL;
3942 ++ goto exit;
3943 ++ }
3944 ++
3945 ++ if (strcmp(param->u.crypt.alg, "WEP") == 0)
3946 ++ max_idx = WEP_KEYS - 1;
3947 ++ else
3948 ++ max_idx = BIP_MAX_KEYID;
3949 ++
3950 ++ if (param->u.crypt.idx > max_idx) {
3951 ++ netdev_err(dev, "Error crypt.idx %d > %d\n", param->u.crypt.idx, max_idx);
3952 ++ ret = -EINVAL;
3953 ++ goto exit;
3954 + }
3955 +
3956 + if (strcmp(param->u.crypt.alg, "WEP") == 0) {
3957 +@@ -84,9 +88,6 @@ static int wpa_set_encryption(struct net_device *dev, struct ieee_param *param,
3958 + wep_key_idx = param->u.crypt.idx;
3959 + wep_key_len = param->u.crypt.key_len;
3960 +
3961 +- if (wep_key_idx > WEP_KEYS)
3962 +- return -EINVAL;
3963 +-
3964 + if (wep_key_len > 0) {
3965 + wep_key_len = wep_key_len <= 5 ? 5 : 13;
3966 + wep_total_len = wep_key_len + FIELD_OFFSET(struct ndis_802_11_wep, key_material);
3967 +diff --git a/fs/btrfs/block-group.c b/fs/btrfs/block-group.c
3968 +index deebc8ddbd932..4b69945755e4f 100644
3969 +--- a/fs/btrfs/block-group.c
3970 ++++ b/fs/btrfs/block-group.c
3971 +@@ -1616,7 +1616,8 @@ void btrfs_reclaim_bgs_work(struct work_struct *work)
3972 +
3973 + btrfs_info(fs_info,
3974 + "reclaiming chunk %llu with %llu%% used %llu%% unusable",
3975 +- bg->start, div_u64(bg->used * 100, bg->length),
3976 ++ bg->start,
3977 ++ div64_u64(bg->used * 100, bg->length),
3978 + div64_u64(zone_unusable * 100, bg->length));
3979 + trace_btrfs_reclaim_block_group(bg);
3980 + ret = btrfs_relocate_chunk(fs_info, bg->start);
3981 +diff --git a/fs/btrfs/extent_map.c b/fs/btrfs/extent_map.c
3982 +index 6092a4eedc923..b8ae02aa632e3 100644
3983 +--- a/fs/btrfs/extent_map.c
3984 ++++ b/fs/btrfs/extent_map.c
3985 +@@ -760,7 +760,13 @@ void btrfs_drop_extent_map_range(struct btrfs_inode *inode, u64 start, u64 end,
3986 + goto next;
3987 + }
3988 +
3989 ++ flags = em->flags;
3990 + clear_bit(EXTENT_FLAG_PINNED, &em->flags);
3991 ++ /*
3992 ++ * In case we split the extent map, we want to preserve the
3993 ++ * EXTENT_FLAG_LOGGING flag on our extent map, but we don't want
3994 ++ * it on the new extent maps.
3995 ++ */
3996 + clear_bit(EXTENT_FLAG_LOGGING, &flags);
3997 + modified = !list_empty(&em->list);
3998 +
3999 +@@ -771,7 +777,6 @@ void btrfs_drop_extent_map_range(struct btrfs_inode *inode, u64 start, u64 end,
4000 + if (em->start >= start && em_end <= end)
4001 + goto remove_em;
4002 +
4003 +- flags = em->flags;
4004 + gen = em->generation;
4005 + compressed = test_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
4006 +
4007 +diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
4008 +index 05f9cbbf6e1ef..f02b8cbd6ec41 100644
4009 +--- a/fs/btrfs/volumes.c
4010 ++++ b/fs/btrfs/volumes.c
4011 +@@ -6739,7 +6739,7 @@ static void btrfs_log_dev_io_error(struct bio *bio, struct btrfs_device *dev)
4012 +
4013 + if (btrfs_op(bio) == BTRFS_MAP_WRITE)
4014 + btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_WRITE_ERRS);
4015 +- if (!(bio->bi_opf & REQ_RAHEAD))
4016 ++ else if (!(bio->bi_opf & REQ_RAHEAD))
4017 + btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_READ_ERRS);
4018 + if (bio->bi_opf & REQ_PREFLUSH)
4019 + btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_FLUSH_ERRS);
4020 +diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h
4021 +index eb1a0de9dd553..bc4475f6c0827 100644
4022 +--- a/fs/cifs/cifsproto.h
4023 ++++ b/fs/cifs/cifsproto.h
4024 +@@ -664,11 +664,21 @@ static inline int get_dfs_path(const unsigned int xid, struct cifs_ses *ses,
4025 + int match_target_ip(struct TCP_Server_Info *server,
4026 + const char *share, size_t share_len,
4027 + bool *result);
4028 +-
4029 +-int cifs_dfs_query_info_nonascii_quirk(const unsigned int xid,
4030 +- struct cifs_tcon *tcon,
4031 +- struct cifs_sb_info *cifs_sb,
4032 +- const char *dfs_link_path);
4033 ++int cifs_inval_name_dfs_link_error(const unsigned int xid,
4034 ++ struct cifs_tcon *tcon,
4035 ++ struct cifs_sb_info *cifs_sb,
4036 ++ const char *full_path,
4037 ++ bool *islink);
4038 ++#else
4039 ++static inline int cifs_inval_name_dfs_link_error(const unsigned int xid,
4040 ++ struct cifs_tcon *tcon,
4041 ++ struct cifs_sb_info *cifs_sb,
4042 ++ const char *full_path,
4043 ++ bool *islink)
4044 ++{
4045 ++ *islink = false;
4046 ++ return 0;
4047 ++}
4048 + #endif
4049 +
4050 + static inline int cifs_create_options(struct cifs_sb_info *cifs_sb, int options)
4051 +diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
4052 +index 062175994e879..4e54736a06996 100644
4053 +--- a/fs/cifs/misc.c
4054 ++++ b/fs/cifs/misc.c
4055 +@@ -21,6 +21,7 @@
4056 + #include "cifsfs.h"
4057 + #ifdef CONFIG_CIFS_DFS_UPCALL
4058 + #include "dns_resolve.h"
4059 ++#include "dfs_cache.h"
4060 + #endif
4061 + #include "fs_context.h"
4062 + #include "cached_dir.h"
4063 +@@ -1314,4 +1315,70 @@ int cifs_update_super_prepath(struct cifs_sb_info *cifs_sb, char *prefix)
4064 + cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_USE_PREFIX_PATH;
4065 + return 0;
4066 + }
4067 ++
4068 ++/*
4069 ++ * Handle weird Windows SMB server behaviour. It responds with
4070 ++ * STATUS_OBJECT_NAME_INVALID code to SMB2 QUERY_INFO request for
4071 ++ * "\<server>\<dfsname>\<linkpath>" DFS reference, where <dfsname> contains
4072 ++ * non-ASCII unicode symbols.
4073 ++ */
4074 ++int cifs_inval_name_dfs_link_error(const unsigned int xid,
4075 ++ struct cifs_tcon *tcon,
4076 ++ struct cifs_sb_info *cifs_sb,
4077 ++ const char *full_path,
4078 ++ bool *islink)
4079 ++{
4080 ++ struct cifs_ses *ses = tcon->ses;
4081 ++ size_t len;
4082 ++ char *path;
4083 ++ char *ref_path;
4084 ++
4085 ++ *islink = false;
4086 ++
4087 ++ /*
4088 ++ * Fast path - skip check when @full_path doesn't have a prefix path to
4089 ++ * look up or tcon is not DFS.
4090 ++ */
4091 ++ if (strlen(full_path) < 2 || !cifs_sb ||
4092 ++ (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_DFS) ||
4093 ++ !is_tcon_dfs(tcon) || !ses->server->origin_fullpath)
4094 ++ return 0;
4095 ++
4096 ++ /*
4097 ++ * Slow path - tcon is DFS and @full_path has prefix path, so attempt
4098 ++ * to get a referral to figure out whether it is an DFS link.
4099 ++ */
4100 ++ len = strnlen(tcon->tree_name, MAX_TREE_SIZE + 1) + strlen(full_path) + 1;
4101 ++ path = kmalloc(len, GFP_KERNEL);
4102 ++ if (!path)
4103 ++ return -ENOMEM;
4104 ++
4105 ++ scnprintf(path, len, "%s%s", tcon->tree_name, full_path);
4106 ++ ref_path = dfs_cache_canonical_path(path + 1, cifs_sb->local_nls,
4107 ++ cifs_remap(cifs_sb));
4108 ++ kfree(path);
4109 ++
4110 ++ if (IS_ERR(ref_path)) {
4111 ++ if (PTR_ERR(ref_path) != -EINVAL)
4112 ++ return PTR_ERR(ref_path);
4113 ++ } else {
4114 ++ struct dfs_info3_param *refs = NULL;
4115 ++ int num_refs = 0;
4116 ++
4117 ++ /*
4118 ++ * XXX: we are not using dfs_cache_find() here because we might
4119 ++ * end filling all the DFS cache and thus potentially
4120 ++ * removing cached DFS targets that the client would eventually
4121 ++ * need during failover.
4122 ++ */
4123 ++ if (ses->server->ops->get_dfs_refer &&
4124 ++ !ses->server->ops->get_dfs_refer(xid, ses, ref_path, &refs,
4125 ++ &num_refs, cifs_sb->local_nls,
4126 ++ cifs_remap(cifs_sb)))
4127 ++ *islink = refs[0].server_type == DFS_TYPE_LINK;
4128 ++ free_dfs_info_array(refs, num_refs);
4129 ++ kfree(ref_path);
4130 ++ }
4131 ++ return 0;
4132 ++}
4133 + #endif
4134 +diff --git a/fs/cifs/smb2inode.c b/fs/cifs/smb2inode.c
4135 +index e1491440e8f1f..442718cf61b86 100644
4136 +--- a/fs/cifs/smb2inode.c
4137 ++++ b/fs/cifs/smb2inode.c
4138 +@@ -511,12 +511,13 @@ int smb2_query_path_info(const unsigned int xid, struct cifs_tcon *tcon,
4139 + struct cifs_sb_info *cifs_sb, const char *full_path,
4140 + struct cifs_open_info_data *data, bool *adjust_tz, bool *reparse)
4141 + {
4142 +- int rc;
4143 + __u32 create_options = 0;
4144 + struct cifsFileInfo *cfile;
4145 + struct cached_fid *cfid = NULL;
4146 + struct kvec err_iov[3] = {};
4147 + int err_buftype[3] = {};
4148 ++ bool islink;
4149 ++ int rc, rc2;
4150 +
4151 + *adjust_tz = false;
4152 + *reparse = false;
4153 +@@ -563,15 +564,15 @@ int smb2_query_path_info(const unsigned int xid, struct cifs_tcon *tcon,
4154 + create_options, ACL_NO_MODE, data,
4155 + SMB2_OP_QUERY_INFO, cfile, NULL, NULL);
4156 + goto out;
4157 +- } else if (rc != -EREMOTE && IS_ENABLED(CONFIG_CIFS_DFS_UPCALL) &&
4158 +- hdr->Status == STATUS_OBJECT_NAME_INVALID) {
4159 +- /*
4160 +- * Handle weird Windows SMB server behaviour. It responds with
4161 +- * STATUS_OBJECT_NAME_INVALID code to SMB2 QUERY_INFO request
4162 +- * for "\<server>\<dfsname>\<linkpath>" DFS reference,
4163 +- * where <dfsname> contains non-ASCII unicode symbols.
4164 +- */
4165 +- rc = -EREMOTE;
4166 ++ } else if (rc != -EREMOTE && hdr->Status == STATUS_OBJECT_NAME_INVALID) {
4167 ++ rc2 = cifs_inval_name_dfs_link_error(xid, tcon, cifs_sb,
4168 ++ full_path, &islink);
4169 ++ if (rc2) {
4170 ++ rc = rc2;
4171 ++ goto out;
4172 ++ }
4173 ++ if (islink)
4174 ++ rc = -EREMOTE;
4175 + }
4176 + if (rc == -EREMOTE && IS_ENABLED(CONFIG_CIFS_DFS_UPCALL) && cifs_sb &&
4177 + (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_DFS))
4178 +diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
4179 +index 6da495f593e17..0424876d22e5a 100644
4180 +--- a/fs/cifs/smb2ops.c
4181 ++++ b/fs/cifs/smb2ops.c
4182 +@@ -796,7 +796,6 @@ static int
4183 + smb2_is_path_accessible(const unsigned int xid, struct cifs_tcon *tcon,
4184 + struct cifs_sb_info *cifs_sb, const char *full_path)
4185 + {
4186 +- int rc;
4187 + __le16 *utf16_path;
4188 + __u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
4189 + int err_buftype = CIFS_NO_BUFFER;
4190 +@@ -804,6 +803,8 @@ smb2_is_path_accessible(const unsigned int xid, struct cifs_tcon *tcon,
4191 + struct kvec err_iov = {};
4192 + struct cifs_fid fid;
4193 + struct cached_fid *cfid;
4194 ++ bool islink;
4195 ++ int rc, rc2;
4196 +
4197 + rc = open_cached_dir(xid, tcon, full_path, cifs_sb, true, &cfid);
4198 + if (!rc) {
4199 +@@ -833,15 +834,17 @@ smb2_is_path_accessible(const unsigned int xid, struct cifs_tcon *tcon,
4200 +
4201 + if (unlikely(!hdr || err_buftype == CIFS_NO_BUFFER))
4202 + goto out;
4203 +- /*
4204 +- * Handle weird Windows SMB server behaviour. It responds with
4205 +- * STATUS_OBJECT_NAME_INVALID code to SMB2 QUERY_INFO request
4206 +- * for "\<server>\<dfsname>\<linkpath>" DFS reference,
4207 +- * where <dfsname> contains non-ASCII unicode symbols.
4208 +- */
4209 +- if (rc != -EREMOTE && IS_ENABLED(CONFIG_CIFS_DFS_UPCALL) &&
4210 +- hdr->Status == STATUS_OBJECT_NAME_INVALID)
4211 +- rc = -EREMOTE;
4212 ++
4213 ++ if (rc != -EREMOTE && hdr->Status == STATUS_OBJECT_NAME_INVALID) {
4214 ++ rc2 = cifs_inval_name_dfs_link_error(xid, tcon, cifs_sb,
4215 ++ full_path, &islink);
4216 ++ if (rc2) {
4217 ++ rc = rc2;
4218 ++ goto out;
4219 ++ }
4220 ++ if (islink)
4221 ++ rc = -EREMOTE;
4222 ++ }
4223 + if (rc == -EREMOTE && IS_ENABLED(CONFIG_CIFS_DFS_UPCALL) && cifs_sb &&
4224 + (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_DFS))
4225 + rc = -EOPNOTSUPP;
4226 +diff --git a/fs/dlm/lock.c b/fs/dlm/lock.c
4227 +index 94a72ede57646..0b1bc24536ceb 100644
4228 +--- a/fs/dlm/lock.c
4229 ++++ b/fs/dlm/lock.c
4230 +@@ -3611,9 +3611,10 @@ static int create_message(struct dlm_rsb *r, struct dlm_lkb *lkb,
4231 + /* further lowcomms enhancements or alternate implementations may make
4232 + the return value from this function useful at some point */
4233 +
4234 +-static int send_message(struct dlm_mhandle *mh, struct dlm_message *ms)
4235 ++static int send_message(struct dlm_mhandle *mh, struct dlm_message *ms,
4236 ++ const void *name, int namelen)
4237 + {
4238 +- dlm_midcomms_commit_mhandle(mh);
4239 ++ dlm_midcomms_commit_mhandle(mh, name, namelen);
4240 + return 0;
4241 + }
4242 +
4243 +@@ -3679,7 +3680,7 @@ static int send_common(struct dlm_rsb *r, struct dlm_lkb *lkb, int mstype)
4244 +
4245 + send_args(r, lkb, ms);
4246 +
4247 +- error = send_message(mh, ms);
4248 ++ error = send_message(mh, ms, r->res_name, r->res_length);
4249 + if (error)
4250 + goto fail;
4251 + return 0;
4252 +@@ -3742,7 +3743,7 @@ static int send_grant(struct dlm_rsb *r, struct dlm_lkb *lkb)
4253 +
4254 + ms->m_result = 0;
4255 +
4256 +- error = send_message(mh, ms);
4257 ++ error = send_message(mh, ms, r->res_name, r->res_length);
4258 + out:
4259 + return error;
4260 + }
4261 +@@ -3763,7 +3764,7 @@ static int send_bast(struct dlm_rsb *r, struct dlm_lkb *lkb, int mode)
4262 +
4263 + ms->m_bastmode = cpu_to_le32(mode);
4264 +
4265 +- error = send_message(mh, ms);
4266 ++ error = send_message(mh, ms, r->res_name, r->res_length);
4267 + out:
4268 + return error;
4269 + }
4270 +@@ -3786,7 +3787,7 @@ static int send_lookup(struct dlm_rsb *r, struct dlm_lkb *lkb)
4271 +
4272 + send_args(r, lkb, ms);
4273 +
4274 +- error = send_message(mh, ms);
4275 ++ error = send_message(mh, ms, r->res_name, r->res_length);
4276 + if (error)
4277 + goto fail;
4278 + return 0;
4279 +@@ -3811,7 +3812,7 @@ static int send_remove(struct dlm_rsb *r)
4280 + memcpy(ms->m_extra, r->res_name, r->res_length);
4281 + ms->m_hash = cpu_to_le32(r->res_hash);
4282 +
4283 +- error = send_message(mh, ms);
4284 ++ error = send_message(mh, ms, r->res_name, r->res_length);
4285 + out:
4286 + return error;
4287 + }
4288 +@@ -3833,7 +3834,7 @@ static int send_common_reply(struct dlm_rsb *r, struct dlm_lkb *lkb,
4289 +
4290 + ms->m_result = cpu_to_le32(to_dlm_errno(rv));
4291 +
4292 +- error = send_message(mh, ms);
4293 ++ error = send_message(mh, ms, r->res_name, r->res_length);
4294 + out:
4295 + return error;
4296 + }
4297 +@@ -3874,7 +3875,7 @@ static int send_lookup_reply(struct dlm_ls *ls, struct dlm_message *ms_in,
4298 + ms->m_result = cpu_to_le32(to_dlm_errno(rv));
4299 + ms->m_nodeid = cpu_to_le32(ret_nodeid);
4300 +
4301 +- error = send_message(mh, ms);
4302 ++ error = send_message(mh, ms, ms_in->m_extra, receive_extralen(ms_in));
4303 + out:
4304 + return error;
4305 + }
4306 +@@ -4044,66 +4045,6 @@ out:
4307 + return error;
4308 + }
4309 +
4310 +-static void send_repeat_remove(struct dlm_ls *ls, char *ms_name, int len)
4311 +-{
4312 +- char name[DLM_RESNAME_MAXLEN + 1];
4313 +- struct dlm_message *ms;
4314 +- struct dlm_mhandle *mh;
4315 +- struct dlm_rsb *r;
4316 +- uint32_t hash, b;
4317 +- int rv, dir_nodeid;
4318 +-
4319 +- memset(name, 0, sizeof(name));
4320 +- memcpy(name, ms_name, len);
4321 +-
4322 +- hash = jhash(name, len, 0);
4323 +- b = hash & (ls->ls_rsbtbl_size - 1);
4324 +-
4325 +- dir_nodeid = dlm_hash2nodeid(ls, hash);
4326 +-
4327 +- log_error(ls, "send_repeat_remove dir %d %s", dir_nodeid, name);
4328 +-
4329 +- spin_lock(&ls->ls_rsbtbl[b].lock);
4330 +- rv = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].keep, name, len, &r);
4331 +- if (!rv) {
4332 +- spin_unlock(&ls->ls_rsbtbl[b].lock);
4333 +- log_error(ls, "repeat_remove on keep %s", name);
4334 +- return;
4335 +- }
4336 +-
4337 +- rv = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].toss, name, len, &r);
4338 +- if (!rv) {
4339 +- spin_unlock(&ls->ls_rsbtbl[b].lock);
4340 +- log_error(ls, "repeat_remove on toss %s", name);
4341 +- return;
4342 +- }
4343 +-
4344 +- /* use ls->remove_name2 to avoid conflict with shrink? */
4345 +-
4346 +- spin_lock(&ls->ls_remove_spin);
4347 +- ls->ls_remove_len = len;
4348 +- memcpy(ls->ls_remove_name, name, DLM_RESNAME_MAXLEN);
4349 +- spin_unlock(&ls->ls_remove_spin);
4350 +- spin_unlock(&ls->ls_rsbtbl[b].lock);
4351 +-
4352 +- rv = _create_message(ls, sizeof(struct dlm_message) + len,
4353 +- dir_nodeid, DLM_MSG_REMOVE, &ms, &mh);
4354 +- if (rv)
4355 +- goto out;
4356 +-
4357 +- memcpy(ms->m_extra, name, len);
4358 +- ms->m_hash = cpu_to_le32(hash);
4359 +-
4360 +- send_message(mh, ms);
4361 +-
4362 +-out:
4363 +- spin_lock(&ls->ls_remove_spin);
4364 +- ls->ls_remove_len = 0;
4365 +- memset(ls->ls_remove_name, 0, DLM_RESNAME_MAXLEN);
4366 +- spin_unlock(&ls->ls_remove_spin);
4367 +- wake_up(&ls->ls_remove_wait);
4368 +-}
4369 +-
4370 + static int receive_request(struct dlm_ls *ls, struct dlm_message *ms)
4371 + {
4372 + struct dlm_lkb *lkb;
4373 +@@ -4173,25 +4114,11 @@ static int receive_request(struct dlm_ls *ls, struct dlm_message *ms)
4374 + ENOTBLK request failures when the lookup reply designating us
4375 + as master is delayed. */
4376 +
4377 +- /* We could repeatedly return -EBADR here if our send_remove() is
4378 +- delayed in being sent/arriving/being processed on the dir node.
4379 +- Another node would repeatedly lookup up the master, and the dir
4380 +- node would continue returning our nodeid until our send_remove
4381 +- took effect.
4382 +-
4383 +- We send another remove message in case our previous send_remove
4384 +- was lost/ignored/missed somehow. */
4385 +-
4386 + if (error != -ENOTBLK) {
4387 + log_limit(ls, "receive_request %x from %d %d",
4388 + le32_to_cpu(ms->m_lkid), from_nodeid, error);
4389 + }
4390 +
4391 +- if (namelen && error == -EBADR) {
4392 +- send_repeat_remove(ls, ms->m_extra, namelen);
4393 +- msleep(1000);
4394 +- }
4395 +-
4396 + setup_stub_lkb(ls, ms);
4397 + send_request_reply(&ls->ls_stub_rsb, &ls->ls_stub_lkb, error);
4398 + return error;
4399 +@@ -6374,7 +6301,7 @@ static int send_purge(struct dlm_ls *ls, int nodeid, int pid)
4400 + ms->m_nodeid = cpu_to_le32(nodeid);
4401 + ms->m_pid = cpu_to_le32(pid);
4402 +
4403 +- return send_message(mh, ms);
4404 ++ return send_message(mh, ms, NULL, 0);
4405 + }
4406 +
4407 + int dlm_user_purge(struct dlm_ls *ls, struct dlm_user_proc *proc,
4408 +diff --git a/fs/dlm/lockspace.c b/fs/dlm/lockspace.c
4409 +index bae050df7abff..7b29ea7bfb416 100644
4410 +--- a/fs/dlm/lockspace.c
4411 ++++ b/fs/dlm/lockspace.c
4412 +@@ -17,7 +17,6 @@
4413 + #include "recoverd.h"
4414 + #include "dir.h"
4415 + #include "midcomms.h"
4416 +-#include "lowcomms.h"
4417 + #include "config.h"
4418 + #include "memory.h"
4419 + #include "lock.h"
4420 +@@ -382,23 +381,23 @@ static int threads_start(void)
4421 + {
4422 + int error;
4423 +
4424 +- error = dlm_scand_start();
4425 ++ /* Thread for sending/receiving messages for all lockspace's */
4426 ++ error = dlm_midcomms_start();
4427 + if (error) {
4428 +- log_print("cannot start dlm_scand thread %d", error);
4429 ++ log_print("cannot start dlm midcomms %d", error);
4430 + goto fail;
4431 + }
4432 +
4433 +- /* Thread for sending/receiving messages for all lockspace's */
4434 +- error = dlm_midcomms_start();
4435 ++ error = dlm_scand_start();
4436 + if (error) {
4437 +- log_print("cannot start dlm lowcomms %d", error);
4438 +- goto scand_fail;
4439 ++ log_print("cannot start dlm_scand thread %d", error);
4440 ++ goto midcomms_fail;
4441 + }
4442 +
4443 + return 0;
4444 +
4445 +- scand_fail:
4446 +- dlm_scand_stop();
4447 ++ midcomms_fail:
4448 ++ dlm_midcomms_stop();
4449 + fail:
4450 + return error;
4451 + }
4452 +@@ -726,7 +725,7 @@ static int __dlm_new_lockspace(const char *name, const char *cluster,
4453 + if (!ls_count) {
4454 + dlm_scand_stop();
4455 + dlm_midcomms_shutdown();
4456 +- dlm_lowcomms_stop();
4457 ++ dlm_midcomms_stop();
4458 + }
4459 + out:
4460 + mutex_unlock(&ls_lock);
4461 +@@ -929,7 +928,7 @@ int dlm_release_lockspace(void *lockspace, int force)
4462 + if (!error)
4463 + ls_count--;
4464 + if (!ls_count)
4465 +- dlm_lowcomms_stop();
4466 ++ dlm_midcomms_stop();
4467 + mutex_unlock(&ls_lock);
4468 +
4469 + return error;
4470 +diff --git a/fs/dlm/lowcomms.c b/fs/dlm/lowcomms.c
4471 +index 871d4e9f49fb6..6ed09edabea0c 100644
4472 +--- a/fs/dlm/lowcomms.c
4473 ++++ b/fs/dlm/lowcomms.c
4474 +@@ -1982,10 +1982,6 @@ static const struct dlm_proto_ops dlm_sctp_ops = {
4475 + int dlm_lowcomms_start(void)
4476 + {
4477 + int error = -EINVAL;
4478 +- int i;
4479 +-
4480 +- for (i = 0; i < CONN_HASH_SIZE; i++)
4481 +- INIT_HLIST_HEAD(&connection_hash[i]);
4482 +
4483 + init_local();
4484 + if (!dlm_local_count) {
4485 +@@ -1994,8 +1990,6 @@ int dlm_lowcomms_start(void)
4486 + goto fail;
4487 + }
4488 +
4489 +- INIT_WORK(&listen_con.rwork, process_listen_recv_socket);
4490 +-
4491 + error = work_start();
4492 + if (error)
4493 + goto fail_local;
4494 +@@ -2034,6 +2028,16 @@ fail:
4495 + return error;
4496 + }
4497 +
4498 ++void dlm_lowcomms_init(void)
4499 ++{
4500 ++ int i;
4501 ++
4502 ++ for (i = 0; i < CONN_HASH_SIZE; i++)
4503 ++ INIT_HLIST_HEAD(&connection_hash[i]);
4504 ++
4505 ++ INIT_WORK(&listen_con.rwork, process_listen_recv_socket);
4506 ++}
4507 ++
4508 + void dlm_lowcomms_exit(void)
4509 + {
4510 + struct dlm_node_addr *na, *safe;
4511 +diff --git a/fs/dlm/lowcomms.h b/fs/dlm/lowcomms.h
4512 +index 29369feea9916..bbce7a18416dc 100644
4513 +--- a/fs/dlm/lowcomms.h
4514 ++++ b/fs/dlm/lowcomms.h
4515 +@@ -35,6 +35,7 @@ extern int dlm_allow_conn;
4516 + int dlm_lowcomms_start(void);
4517 + void dlm_lowcomms_shutdown(void);
4518 + void dlm_lowcomms_stop(void);
4519 ++void dlm_lowcomms_init(void);
4520 + void dlm_lowcomms_exit(void);
4521 + int dlm_lowcomms_close(int nodeid);
4522 + struct dlm_msg *dlm_lowcomms_new_msg(int nodeid, int len, gfp_t allocation,
4523 +diff --git a/fs/dlm/main.c b/fs/dlm/main.c
4524 +index 1c5be4b70ac1b..a77338be32371 100644
4525 +--- a/fs/dlm/main.c
4526 ++++ b/fs/dlm/main.c
4527 +@@ -17,7 +17,7 @@
4528 + #include "user.h"
4529 + #include "memory.h"
4530 + #include "config.h"
4531 +-#include "lowcomms.h"
4532 ++#include "midcomms.h"
4533 +
4534 + #define CREATE_TRACE_POINTS
4535 + #include <trace/events/dlm.h>
4536 +@@ -30,6 +30,8 @@ static int __init init_dlm(void)
4537 + if (error)
4538 + goto out;
4539 +
4540 ++ dlm_midcomms_init();
4541 ++
4542 + error = dlm_lockspace_init();
4543 + if (error)
4544 + goto out_mem;
4545 +@@ -66,6 +68,7 @@ static int __init init_dlm(void)
4546 + out_lockspace:
4547 + dlm_lockspace_exit();
4548 + out_mem:
4549 ++ dlm_midcomms_exit();
4550 + dlm_memory_exit();
4551 + out:
4552 + return error;
4553 +@@ -79,7 +82,7 @@ static void __exit exit_dlm(void)
4554 + dlm_config_exit();
4555 + dlm_memory_exit();
4556 + dlm_lockspace_exit();
4557 +- dlm_lowcomms_exit();
4558 ++ dlm_midcomms_exit();
4559 + dlm_unregister_debugfs();
4560 + }
4561 +
4562 +diff --git a/fs/dlm/midcomms.c b/fs/dlm/midcomms.c
4563 +index 546c52c46b1c9..b2a25a33a1488 100644
4564 +--- a/fs/dlm/midcomms.c
4565 ++++ b/fs/dlm/midcomms.c
4566 +@@ -132,6 +132,7 @@
4567 + */
4568 + #define DLM_DEBUG_FENCE_TERMINATION 0
4569 +
4570 ++#include <trace/events/dlm.h>
4571 + #include <net/tcp.h>
4572 +
4573 + #include "dlm_internal.h"
4574 +@@ -194,7 +195,7 @@ struct midcomms_node {
4575 + };
4576 +
4577 + struct dlm_mhandle {
4578 +- const struct dlm_header *inner_hd;
4579 ++ const union dlm_packet *inner_p;
4580 + struct midcomms_node *node;
4581 + struct dlm_opts *opts;
4582 + struct dlm_msg *msg;
4583 +@@ -405,6 +406,7 @@ static int dlm_send_fin(struct midcomms_node *node,
4584 + if (!mh)
4585 + return -ENOMEM;
4586 +
4587 ++ set_bit(DLM_NODE_FLAG_STOP_TX, &node->flags);
4588 + mh->ack_rcv = ack_rcv;
4589 +
4590 + m_header = (struct dlm_header *)ppc;
4591 +@@ -415,8 +417,7 @@ static int dlm_send_fin(struct midcomms_node *node,
4592 + m_header->h_cmd = DLM_FIN;
4593 +
4594 + pr_debug("sending fin msg to node %d\n", node->nodeid);
4595 +- dlm_midcomms_commit_mhandle(mh);
4596 +- set_bit(DLM_NODE_FLAG_STOP_TX, &node->flags);
4597 ++ dlm_midcomms_commit_mhandle(mh, NULL, 0);
4598 +
4599 + return 0;
4600 + }
4601 +@@ -468,12 +469,26 @@ static void dlm_pas_fin_ack_rcv(struct midcomms_node *node)
4602 + spin_unlock(&node->state_lock);
4603 + log_print("%s: unexpected state: %d\n",
4604 + __func__, node->state);
4605 +- WARN_ON(1);
4606 ++ WARN_ON_ONCE(1);
4607 + return;
4608 + }
4609 + spin_unlock(&node->state_lock);
4610 + }
4611 +
4612 ++static void dlm_receive_buffer_3_2_trace(uint32_t seq, union dlm_packet *p)
4613 ++{
4614 ++ switch (p->header.h_cmd) {
4615 ++ case DLM_MSG:
4616 ++ trace_dlm_recv_message(seq, &p->message);
4617 ++ break;
4618 ++ case DLM_RCOM:
4619 ++ trace_dlm_recv_rcom(seq, &p->rcom);
4620 ++ break;
4621 ++ default:
4622 ++ break;
4623 ++ }
4624 ++}
4625 ++
4626 + static void dlm_midcomms_receive_buffer(union dlm_packet *p,
4627 + struct midcomms_node *node,
4628 + uint32_t seq)
4629 +@@ -527,13 +542,14 @@ static void dlm_midcomms_receive_buffer(union dlm_packet *p,
4630 + spin_unlock(&node->state_lock);
4631 + log_print("%s: unexpected state: %d\n",
4632 + __func__, node->state);
4633 +- WARN_ON(1);
4634 ++ WARN_ON_ONCE(1);
4635 + return;
4636 + }
4637 + spin_unlock(&node->state_lock);
4638 + break;
4639 + default:
4640 +- WARN_ON(test_bit(DLM_NODE_FLAG_STOP_RX, &node->flags));
4641 ++ WARN_ON_ONCE(test_bit(DLM_NODE_FLAG_STOP_RX, &node->flags));
4642 ++ dlm_receive_buffer_3_2_trace(seq, p);
4643 + dlm_receive_buffer(p, node->nodeid);
4644 + set_bit(DLM_NODE_ULP_DELIVERED, &node->flags);
4645 + break;
4646 +@@ -748,7 +764,7 @@ static void dlm_midcomms_receive_buffer_3_2(union dlm_packet *p, int nodeid)
4647 + goto out;
4648 + }
4649 +
4650 +- WARN_ON(test_bit(DLM_NODE_FLAG_STOP_RX, &node->flags));
4651 ++ WARN_ON_ONCE(test_bit(DLM_NODE_FLAG_STOP_RX, &node->flags));
4652 + dlm_receive_buffer(p, nodeid);
4653 + break;
4654 + case DLM_OPTS:
4655 +@@ -1049,7 +1065,7 @@ static struct dlm_msg *dlm_midcomms_get_msg_3_2(struct dlm_mhandle *mh, int node
4656 + dlm_fill_opts_header(opts, len, mh->seq);
4657 +
4658 + *ppc += sizeof(*opts);
4659 +- mh->inner_hd = (const struct dlm_header *)*ppc;
4660 ++ mh->inner_p = (const union dlm_packet *)*ppc;
4661 + return msg;
4662 + }
4663 +
4664 +@@ -1073,7 +1089,7 @@ struct dlm_mhandle *dlm_midcomms_get_mhandle(int nodeid, int len,
4665 + }
4666 +
4667 + /* this is a bug, however we going on and hope it will be resolved */
4668 +- WARN_ON(test_bit(DLM_NODE_FLAG_STOP_TX, &node->flags));
4669 ++ WARN_ON_ONCE(test_bit(DLM_NODE_FLAG_STOP_TX, &node->flags));
4670 +
4671 + mh = dlm_allocate_mhandle();
4672 + if (!mh)
4673 +@@ -1105,7 +1121,7 @@ struct dlm_mhandle *dlm_midcomms_get_mhandle(int nodeid, int len,
4674 + break;
4675 + default:
4676 + dlm_free_mhandle(mh);
4677 +- WARN_ON(1);
4678 ++ WARN_ON_ONCE(1);
4679 + goto err;
4680 + }
4681 +
4682 +@@ -1124,11 +1140,30 @@ err:
4683 + }
4684 + #endif
4685 +
4686 +-static void dlm_midcomms_commit_msg_3_2(struct dlm_mhandle *mh)
4687 ++static void dlm_midcomms_commit_msg_3_2_trace(const struct dlm_mhandle *mh,
4688 ++ const void *name, int namelen)
4689 ++{
4690 ++ switch (mh->inner_p->header.h_cmd) {
4691 ++ case DLM_MSG:
4692 ++ trace_dlm_send_message(mh->seq, &mh->inner_p->message,
4693 ++ name, namelen);
4694 ++ break;
4695 ++ case DLM_RCOM:
4696 ++ trace_dlm_send_rcom(mh->seq, &mh->inner_p->rcom);
4697 ++ break;
4698 ++ default:
4699 ++ /* nothing to trace */
4700 ++ break;
4701 ++ }
4702 ++}
4703 ++
4704 ++static void dlm_midcomms_commit_msg_3_2(struct dlm_mhandle *mh,
4705 ++ const void *name, int namelen)
4706 + {
4707 + /* nexthdr chain for fast lookup */
4708 +- mh->opts->o_nextcmd = mh->inner_hd->h_cmd;
4709 ++ mh->opts->o_nextcmd = mh->inner_p->header.h_cmd;
4710 + mh->committed = true;
4711 ++ dlm_midcomms_commit_msg_3_2_trace(mh, name, namelen);
4712 + dlm_lowcomms_commit_msg(mh->msg);
4713 + }
4714 +
4715 +@@ -1136,8 +1171,10 @@ static void dlm_midcomms_commit_msg_3_2(struct dlm_mhandle *mh)
4716 + * dlm_midcomms_get_mhandle
4717 + */
4718 + #ifndef __CHECKER__
4719 +-void dlm_midcomms_commit_mhandle(struct dlm_mhandle *mh)
4720 ++void dlm_midcomms_commit_mhandle(struct dlm_mhandle *mh,
4721 ++ const void *name, int namelen)
4722 + {
4723 ++
4724 + switch (mh->node->version) {
4725 + case DLM_VERSION_3_1:
4726 + srcu_read_unlock(&nodes_srcu, mh->idx);
4727 +@@ -1148,25 +1185,47 @@ void dlm_midcomms_commit_mhandle(struct dlm_mhandle *mh)
4728 + dlm_free_mhandle(mh);
4729 + break;
4730 + case DLM_VERSION_3_2:
4731 +- dlm_midcomms_commit_msg_3_2(mh);
4732 ++ /* held rcu read lock here, because we sending the
4733 ++ * dlm message out, when we do that we could receive
4734 ++ * an ack back which releases the mhandle and we
4735 ++ * get a use after free.
4736 ++ */
4737 ++ rcu_read_lock();
4738 ++ dlm_midcomms_commit_msg_3_2(mh, name, namelen);
4739 + srcu_read_unlock(&nodes_srcu, mh->idx);
4740 ++ rcu_read_unlock();
4741 + break;
4742 + default:
4743 + srcu_read_unlock(&nodes_srcu, mh->idx);
4744 +- WARN_ON(1);
4745 ++ WARN_ON_ONCE(1);
4746 + break;
4747 + }
4748 + }
4749 + #endif
4750 +
4751 + int dlm_midcomms_start(void)
4752 ++{
4753 ++ return dlm_lowcomms_start();
4754 ++}
4755 ++
4756 ++void dlm_midcomms_stop(void)
4757 ++{
4758 ++ dlm_lowcomms_stop();
4759 ++}
4760 ++
4761 ++void dlm_midcomms_init(void)
4762 + {
4763 + int i;
4764 +
4765 + for (i = 0; i < CONN_HASH_SIZE; i++)
4766 + INIT_HLIST_HEAD(&node_hash[i]);
4767 +
4768 +- return dlm_lowcomms_start();
4769 ++ dlm_lowcomms_init();
4770 ++}
4771 ++
4772 ++void dlm_midcomms_exit(void)
4773 ++{
4774 ++ dlm_lowcomms_exit();
4775 + }
4776 +
4777 + static void dlm_act_fin_ack_rcv(struct midcomms_node *node)
4778 +@@ -1195,7 +1254,7 @@ static void dlm_act_fin_ack_rcv(struct midcomms_node *node)
4779 + spin_unlock(&node->state_lock);
4780 + log_print("%s: unexpected state: %d\n",
4781 + __func__, node->state);
4782 +- WARN_ON(1);
4783 ++ WARN_ON_ONCE(1);
4784 + return;
4785 + }
4786 + spin_unlock(&node->state_lock);
4787 +@@ -1307,7 +1366,8 @@ static void midcomms_node_release(struct rcu_head *rcu)
4788 + {
4789 + struct midcomms_node *node = container_of(rcu, struct midcomms_node, rcu);
4790 +
4791 +- WARN_ON(atomic_read(&node->send_queue_cnt));
4792 ++ WARN_ON_ONCE(atomic_read(&node->send_queue_cnt));
4793 ++ dlm_send_queue_flush(node);
4794 + kfree(node);
4795 + }
4796 +
4797 +diff --git a/fs/dlm/midcomms.h b/fs/dlm/midcomms.h
4798 +index 82bcd96619228..69296552d5add 100644
4799 +--- a/fs/dlm/midcomms.h
4800 ++++ b/fs/dlm/midcomms.h
4801 +@@ -17,9 +17,13 @@ struct midcomms_node;
4802 + int dlm_process_incoming_buffer(int nodeid, unsigned char *buf, int buflen);
4803 + struct dlm_mhandle *dlm_midcomms_get_mhandle(int nodeid, int len,
4804 + gfp_t allocation, char **ppc);
4805 +-void dlm_midcomms_commit_mhandle(struct dlm_mhandle *mh);
4806 ++void dlm_midcomms_commit_mhandle(struct dlm_mhandle *mh, const void *name,
4807 ++ int namelen);
4808 + int dlm_midcomms_close(int nodeid);
4809 + int dlm_midcomms_start(void);
4810 ++void dlm_midcomms_stop(void);
4811 ++void dlm_midcomms_init(void);
4812 ++void dlm_midcomms_exit(void);
4813 + void dlm_midcomms_shutdown(void);
4814 + void dlm_midcomms_add_member(int nodeid);
4815 + void dlm_midcomms_remove_member(int nodeid);
4816 +diff --git a/fs/dlm/rcom.c b/fs/dlm/rcom.c
4817 +index f19860315043a..b76d52e2f6bdd 100644
4818 +--- a/fs/dlm/rcom.c
4819 ++++ b/fs/dlm/rcom.c
4820 +@@ -91,7 +91,7 @@ static int create_rcom_stateless(struct dlm_ls *ls, int to_nodeid, int type,
4821 +
4822 + static void send_rcom(struct dlm_mhandle *mh, struct dlm_rcom *rc)
4823 + {
4824 +- dlm_midcomms_commit_mhandle(mh);
4825 ++ dlm_midcomms_commit_mhandle(mh, NULL, 0);
4826 + }
4827 +
4828 + static void send_rcom_stateless(struct dlm_msg *msg, struct dlm_rcom *rc)
4829 +@@ -516,7 +516,7 @@ int dlm_send_ls_not_ready(int nodeid, struct dlm_rcom *rc_in)
4830 + rf = (struct rcom_config *) rc->rc_buf;
4831 + rf->rf_lvblen = cpu_to_le32(~0U);
4832 +
4833 +- dlm_midcomms_commit_mhandle(mh);
4834 ++ dlm_midcomms_commit_mhandle(mh, NULL, 0);
4835 +
4836 + return 0;
4837 + }
4838 +diff --git a/fs/erofs/decompressor_lzma.c b/fs/erofs/decompressor_lzma.c
4839 +index 091fd5adf818f..5cd612a8f8584 100644
4840 +--- a/fs/erofs/decompressor_lzma.c
4841 ++++ b/fs/erofs/decompressor_lzma.c
4842 +@@ -278,7 +278,7 @@ again:
4843 + }
4844 + }
4845 + if (no < nrpages_out && strm->buf.out)
4846 +- kunmap(rq->in[no]);
4847 ++ kunmap(rq->out[no]);
4848 + if (ni < nrpages_in)
4849 + kunmap(rq->in[ni]);
4850 + /* 4. push back LZMA stream context to the global list */
4851 +diff --git a/fs/erofs/zdata.c b/fs/erofs/zdata.c
4852 +index ee7c88c9b5afa..cf4871834ebb2 100644
4853 +--- a/fs/erofs/zdata.c
4854 ++++ b/fs/erofs/zdata.c
4855 +@@ -1047,12 +1047,12 @@ static int z_erofs_decompress_pcluster(struct z_erofs_decompress_backend *be,
4856 +
4857 + if (!be->decompressed_pages)
4858 + be->decompressed_pages =
4859 +- kcalloc(be->nr_pages, sizeof(struct page *),
4860 +- GFP_KERNEL | __GFP_NOFAIL);
4861 ++ kvcalloc(be->nr_pages, sizeof(struct page *),
4862 ++ GFP_KERNEL | __GFP_NOFAIL);
4863 + if (!be->compressed_pages)
4864 + be->compressed_pages =
4865 +- kcalloc(pclusterpages, sizeof(struct page *),
4866 +- GFP_KERNEL | __GFP_NOFAIL);
4867 ++ kvcalloc(pclusterpages, sizeof(struct page *),
4868 ++ GFP_KERNEL | __GFP_NOFAIL);
4869 +
4870 + z_erofs_parse_out_bvecs(be);
4871 + err2 = z_erofs_parse_in_bvecs(be, &overlapped);
4872 +@@ -1100,7 +1100,7 @@ out:
4873 + }
4874 + if (be->compressed_pages < be->onstack_pages ||
4875 + be->compressed_pages >= be->onstack_pages + Z_EROFS_ONSTACK_PAGES)
4876 +- kfree(be->compressed_pages);
4877 ++ kvfree(be->compressed_pages);
4878 + z_erofs_fill_other_copies(be, err);
4879 +
4880 + for (i = 0; i < be->nr_pages; ++i) {
4881 +@@ -1119,7 +1119,7 @@ out:
4882 + }
4883 +
4884 + if (be->decompressed_pages != be->onstack_pages)
4885 +- kfree(be->decompressed_pages);
4886 ++ kvfree(be->decompressed_pages);
4887 +
4888 + pcl->length = 0;
4889 + pcl->partial = true;
4890 +diff --git a/fs/ext4/fsmap.c b/fs/ext4/fsmap.c
4891 +index 4493ef0c715e9..cdf9bfe10137f 100644
4892 +--- a/fs/ext4/fsmap.c
4893 ++++ b/fs/ext4/fsmap.c
4894 +@@ -486,6 +486,8 @@ static int ext4_getfsmap_datadev(struct super_block *sb,
4895 + keys[0].fmr_physical = bofs;
4896 + if (keys[1].fmr_physical >= eofs)
4897 + keys[1].fmr_physical = eofs - 1;
4898 ++ if (keys[1].fmr_physical < keys[0].fmr_physical)
4899 ++ return 0;
4900 + start_fsb = keys[0].fmr_physical;
4901 + end_fsb = keys[1].fmr_physical;
4902 +
4903 +diff --git a/fs/ext4/inline.c b/fs/ext4/inline.c
4904 +index a4fbe825694b1..c4475a74c7626 100644
4905 +--- a/fs/ext4/inline.c
4906 ++++ b/fs/ext4/inline.c
4907 +@@ -159,7 +159,6 @@ int ext4_find_inline_data_nolock(struct inode *inode)
4908 + (void *)ext4_raw_inode(&is.iloc));
4909 + EXT4_I(inode)->i_inline_size = EXT4_MIN_INLINE_DATA_SIZE +
4910 + le32_to_cpu(is.s.here->e_value_size);
4911 +- ext4_set_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA);
4912 + }
4913 + out:
4914 + brelse(is.iloc.bh);
4915 +diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
4916 +index 283afda26d9cb..34c87fcfd0617 100644
4917 +--- a/fs/ext4/inode.c
4918 ++++ b/fs/ext4/inode.c
4919 +@@ -4727,8 +4727,13 @@ static inline int ext4_iget_extra_inode(struct inode *inode,
4920 +
4921 + if (EXT4_INODE_HAS_XATTR_SPACE(inode) &&
4922 + *magic == cpu_to_le32(EXT4_XATTR_MAGIC)) {
4923 ++ int err;
4924 ++
4925 + ext4_set_inode_state(inode, EXT4_STATE_XATTR);
4926 +- return ext4_find_inline_data_nolock(inode);
4927 ++ err = ext4_find_inline_data_nolock(inode);
4928 ++ if (!err && ext4_has_inline_data(inode))
4929 ++ ext4_set_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA);
4930 ++ return err;
4931 + } else
4932 + EXT4_I(inode)->i_inline_off = 0;
4933 + return 0;
4934 +diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c
4935 +index 8067ccda34e45..8c2b1ff5e6959 100644
4936 +--- a/fs/ext4/ioctl.c
4937 ++++ b/fs/ext4/ioctl.c
4938 +@@ -434,6 +434,7 @@ static long swap_inode_boot_loader(struct super_block *sb,
4939 + ei_bl->i_flags = 0;
4940 + inode_set_iversion(inode_bl, 1);
4941 + i_size_write(inode_bl, 0);
4942 ++ EXT4_I(inode_bl)->i_disksize = inode_bl->i_size;
4943 + inode_bl->i_mode = S_IFREG;
4944 + if (ext4_has_feature_extents(sb)) {
4945 + ext4_set_inode_flag(inode_bl, EXT4_INODE_EXTENTS);
4946 +diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
4947 +index 1c5518a4bdf91..800d631c920b4 100644
4948 +--- a/fs/ext4/namei.c
4949 ++++ b/fs/ext4/namei.c
4950 +@@ -1595,11 +1595,10 @@ static struct buffer_head *__ext4_find_entry(struct inode *dir,
4951 + int has_inline_data = 1;
4952 + ret = ext4_find_inline_entry(dir, fname, res_dir,
4953 + &has_inline_data);
4954 +- if (has_inline_data) {
4955 +- if (inlined)
4956 +- *inlined = 1;
4957 ++ if (inlined)
4958 ++ *inlined = has_inline_data;
4959 ++ if (has_inline_data)
4960 + goto cleanup_and_exit;
4961 +- }
4962 + }
4963 +
4964 + if ((namelen <= 2) && (name[0] == '.') &&
4965 +@@ -3646,7 +3645,8 @@ static void ext4_resetent(handle_t *handle, struct ext4_renament *ent,
4966 + * so the old->de may no longer valid and need to find it again
4967 + * before reset old inode info.
4968 + */
4969 +- old.bh = ext4_find_entry(old.dir, &old.dentry->d_name, &old.de, NULL);
4970 ++ old.bh = ext4_find_entry(old.dir, &old.dentry->d_name, &old.de,
4971 ++ &old.inlined);
4972 + if (IS_ERR(old.bh))
4973 + retval = PTR_ERR(old.bh);
4974 + if (!old.bh)
4975 +@@ -3813,9 +3813,20 @@ static int ext4_rename(struct user_namespace *mnt_userns, struct inode *old_dir,
4976 + return retval;
4977 + }
4978 +
4979 +- old.bh = ext4_find_entry(old.dir, &old.dentry->d_name, &old.de, NULL);
4980 +- if (IS_ERR(old.bh))
4981 +- return PTR_ERR(old.bh);
4982 ++ /*
4983 ++ * We need to protect against old.inode directory getting converted
4984 ++ * from inline directory format into a normal one.
4985 ++ */
4986 ++ if (S_ISDIR(old.inode->i_mode))
4987 ++ inode_lock_nested(old.inode, I_MUTEX_NONDIR2);
4988 ++
4989 ++ old.bh = ext4_find_entry(old.dir, &old.dentry->d_name, &old.de,
4990 ++ &old.inlined);
4991 ++ if (IS_ERR(old.bh)) {
4992 ++ retval = PTR_ERR(old.bh);
4993 ++ goto unlock_moved_dir;
4994 ++ }
4995 ++
4996 + /*
4997 + * Check for inode number is _not_ due to possible IO errors.
4998 + * We might rmdir the source, keep it as pwd of some process
4999 +@@ -3873,8 +3884,10 @@ static int ext4_rename(struct user_namespace *mnt_userns, struct inode *old_dir,
5000 + goto end_rename;
5001 + }
5002 + retval = ext4_rename_dir_prepare(handle, &old);
5003 +- if (retval)
5004 ++ if (retval) {
5005 ++ inode_unlock(old.inode);
5006 + goto end_rename;
5007 ++ }
5008 + }
5009 + /*
5010 + * If we're renaming a file within an inline_data dir and adding or
5011 +@@ -4010,6 +4023,11 @@ release_bh:
5012 + brelse(old.dir_bh);
5013 + brelse(old.bh);
5014 + brelse(new.bh);
5015 ++
5016 ++unlock_moved_dir:
5017 ++ if (S_ISDIR(old.inode->i_mode))
5018 ++ inode_unlock(old.inode);
5019 ++
5020 + return retval;
5021 + }
5022 +
5023 +diff --git a/fs/ext4/page-io.c b/fs/ext4/page-io.c
5024 +index 97fa7b4c645fd..d0302b66c215d 100644
5025 +--- a/fs/ext4/page-io.c
5026 ++++ b/fs/ext4/page-io.c
5027 +@@ -409,7 +409,8 @@ static void io_submit_init_bio(struct ext4_io_submit *io,
5028 +
5029 + static void io_submit_add_bh(struct ext4_io_submit *io,
5030 + struct inode *inode,
5031 +- struct page *page,
5032 ++ struct page *pagecache_page,
5033 ++ struct page *bounce_page,
5034 + struct buffer_head *bh)
5035 + {
5036 + int ret;
5037 +@@ -421,10 +422,11 @@ submit_and_retry:
5038 + }
5039 + if (io->io_bio == NULL)
5040 + io_submit_init_bio(io, bh);
5041 +- ret = bio_add_page(io->io_bio, page, bh->b_size, bh_offset(bh));
5042 ++ ret = bio_add_page(io->io_bio, bounce_page ?: pagecache_page,
5043 ++ bh->b_size, bh_offset(bh));
5044 + if (ret != bh->b_size)
5045 + goto submit_and_retry;
5046 +- wbc_account_cgroup_owner(io->io_wbc, page, bh->b_size);
5047 ++ wbc_account_cgroup_owner(io->io_wbc, pagecache_page, bh->b_size);
5048 + io->io_next_block++;
5049 + }
5050 +
5051 +@@ -543,8 +545,7 @@ int ext4_bio_write_page(struct ext4_io_submit *io,
5052 + do {
5053 + if (!buffer_async_write(bh))
5054 + continue;
5055 +- io_submit_add_bh(io, inode,
5056 +- bounce_page ? bounce_page : page, bh);
5057 ++ io_submit_add_bh(io, inode, page, bounce_page, bh);
5058 + nr_submitted++;
5059 + clear_buffer_dirty(bh);
5060 + } while ((bh = bh->b_this_page) != head);
5061 +diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
5062 +index 099a87ec9b2ab..e0eb6eb02a834 100644
5063 +--- a/fs/ext4/xattr.c
5064 ++++ b/fs/ext4/xattr.c
5065 +@@ -2790,6 +2790,9 @@ shift:
5066 + (void *)header, total_ino);
5067 + EXT4_I(inode)->i_extra_isize = new_extra_isize;
5068 +
5069 ++ if (ext4_has_inline_data(inode))
5070 ++ error = ext4_find_inline_data_nolock(inode);
5071 ++
5072 + cleanup:
5073 + if (error && (mnt_count != le16_to_cpu(sbi->s_es->s_mnt_count))) {
5074 + ext4_warning(inode->i_sb, "Unable to expand inode %lu. Delete some EAs or run e2fsck.",
5075 +diff --git a/fs/file.c b/fs/file.c
5076 +index c942c89ca4cda..7893ea161d770 100644
5077 +--- a/fs/file.c
5078 ++++ b/fs/file.c
5079 +@@ -642,6 +642,7 @@ static struct file *pick_file(struct files_struct *files, unsigned fd)
5080 + if (fd >= fdt->max_fds)
5081 + return NULL;
5082 +
5083 ++ fd = array_index_nospec(fd, fdt->max_fds);
5084 + file = fdt->fd[fd];
5085 + if (file) {
5086 + rcu_assign_pointer(fdt->fd[fd], NULL);
5087 +diff --git a/fs/locks.c b/fs/locks.c
5088 +index 7dc129cc1a267..240b9309ed6d5 100644
5089 +--- a/fs/locks.c
5090 ++++ b/fs/locks.c
5091 +@@ -1862,9 +1862,10 @@ int generic_setlease(struct file *filp, long arg, struct file_lock **flp,
5092 + void **priv)
5093 + {
5094 + struct inode *inode = locks_inode(filp);
5095 ++ vfsuid_t vfsuid = i_uid_into_vfsuid(file_mnt_user_ns(filp), inode);
5096 + int error;
5097 +
5098 +- if ((!uid_eq(current_fsuid(), inode->i_uid)) && !capable(CAP_LEASE))
5099 ++ if ((!vfsuid_eq_kuid(vfsuid, current_fsuid())) && !capable(CAP_LEASE))
5100 + return -EACCES;
5101 + if (!S_ISREG(inode->i_mode))
5102 + return -EINVAL;
5103 +diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
5104 +index 0d49c6bb22eb1..59f9a8cee012a 100644
5105 +--- a/fs/nfsd/vfs.c
5106 ++++ b/fs/nfsd/vfs.c
5107 +@@ -1037,7 +1037,9 @@ nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct nfsd_file *nf,
5108 + since = READ_ONCE(file->f_wb_err);
5109 + if (verf)
5110 + nfsd_copy_write_verifier(verf, nn);
5111 ++ file_start_write(file);
5112 + host_err = vfs_iter_write(file, &iter, &pos, flags);
5113 ++ file_end_write(file);
5114 + if (host_err < 0) {
5115 + nfsd_reset_write_verifier(nn);
5116 + trace_nfsd_writeverf_reset(nn, rqstp, host_err);
5117 +diff --git a/fs/udf/inode.c b/fs/udf/inode.c
5118 +index 259152a08852b..a4e875b61f895 100644
5119 +--- a/fs/udf/inode.c
5120 ++++ b/fs/udf/inode.c
5121 +@@ -443,7 +443,7 @@ static int udf_get_block(struct inode *inode, sector_t block,
5122 + * Block beyond EOF and prealloc extents? Just discard preallocation
5123 + * as it is not useful and complicates things.
5124 + */
5125 +- if (((loff_t)block) << inode->i_blkbits > iinfo->i_lenExtents)
5126 ++ if (((loff_t)block) << inode->i_blkbits >= iinfo->i_lenExtents)
5127 + udf_discard_prealloc(inode);
5128 + udf_clear_extent_cache(inode);
5129 + phys = inode_getblk(inode, block, &err, &new);
5130 +diff --git a/include/linux/hid.h b/include/linux/hid.h
5131 +index 48563dc09e171..0a1ccc68e798a 100644
5132 +--- a/include/linux/hid.h
5133 ++++ b/include/linux/hid.h
5134 +@@ -827,6 +827,7 @@ struct hid_driver {
5135 + * @output_report: send output report to device
5136 + * @idle: send idle request to device
5137 + * @may_wakeup: return if device may act as a wakeup source during system-suspend
5138 ++ * @max_buffer_size: over-ride maximum data buffer size (default: HID_MAX_BUFFER_SIZE)
5139 + */
5140 + struct hid_ll_driver {
5141 + int (*start)(struct hid_device *hdev);
5142 +@@ -852,6 +853,8 @@ struct hid_ll_driver {
5143 +
5144 + int (*idle)(struct hid_device *hdev, int report, int idle, int reqtype);
5145 + bool (*may_wakeup)(struct hid_device *hdev);
5146 ++
5147 ++ unsigned int max_buffer_size;
5148 + };
5149 +
5150 + extern struct hid_ll_driver i2c_hid_ll_driver;
5151 +diff --git a/include/linux/mhi_ep.h b/include/linux/mhi_ep.h
5152 +index 478aece170462..f198a8ac7ee72 100644
5153 +--- a/include/linux/mhi_ep.h
5154 ++++ b/include/linux/mhi_ep.h
5155 +@@ -70,8 +70,8 @@ struct mhi_ep_db_info {
5156 + * @cmd_ctx_cache_phys: Physical address of the host command context cache
5157 + * @chdb: Array of channel doorbell interrupt info
5158 + * @event_lock: Lock for protecting event rings
5159 +- * @list_lock: Lock for protecting state transition and channel doorbell lists
5160 + * @state_lock: Lock for protecting state transitions
5161 ++ * @list_lock: Lock for protecting state transition and channel doorbell lists
5162 + * @st_transition_list: List of state transitions
5163 + * @ch_db_list: List of queued channel doorbells
5164 + * @wq: Dedicated workqueue for handling rings and state changes
5165 +@@ -117,8 +117,8 @@ struct mhi_ep_cntrl {
5166 +
5167 + struct mhi_ep_db_info chdb[4];
5168 + struct mutex event_lock;
5169 ++ struct mutex state_lock;
5170 + spinlock_t list_lock;
5171 +- spinlock_t state_lock;
5172 +
5173 + struct list_head st_transition_list;
5174 + struct list_head ch_db_list;
5175 +diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
5176 +index bc8f484cdcf3b..45c3d62e616d8 100644
5177 +--- a/include/linux/pci_ids.h
5178 ++++ b/include/linux/pci_ids.h
5179 +@@ -3094,6 +3094,8 @@
5180 +
5181 + #define PCI_VENDOR_ID_3COM_2 0xa727
5182 +
5183 ++#define PCI_VENDOR_ID_SOLIDRUN 0xd063
5184 ++
5185 + #define PCI_VENDOR_ID_DIGIUM 0xd161
5186 + #define PCI_DEVICE_ID_DIGIUM_HFC4S 0xb410
5187 +
5188 +diff --git a/include/net/netfilter/nf_tproxy.h b/include/net/netfilter/nf_tproxy.h
5189 +index 82d0e41b76f22..faa108b1ba675 100644
5190 +--- a/include/net/netfilter/nf_tproxy.h
5191 ++++ b/include/net/netfilter/nf_tproxy.h
5192 +@@ -17,6 +17,13 @@ static inline bool nf_tproxy_sk_is_transparent(struct sock *sk)
5193 + return false;
5194 + }
5195 +
5196 ++static inline void nf_tproxy_twsk_deschedule_put(struct inet_timewait_sock *tw)
5197 ++{
5198 ++ local_bh_disable();
5199 ++ inet_twsk_deschedule_put(tw);
5200 ++ local_bh_enable();
5201 ++}
5202 ++
5203 + /* assign a socket to the skb -- consumes sk */
5204 + static inline void nf_tproxy_assign_sock(struct sk_buff *skb, struct sock *sk)
5205 + {
5206 +diff --git a/include/trace/events/dlm.h b/include/trace/events/dlm.h
5207 +index da0eaae98fa34..4ec47828d55ed 100644
5208 +--- a/include/trace/events/dlm.h
5209 ++++ b/include/trace/events/dlm.h
5210 +@@ -46,6 +46,56 @@
5211 + { DLM_SBF_VALNOTVALID, "VALNOTVALID" }, \
5212 + { DLM_SBF_ALTMODE, "ALTMODE" })
5213 +
5214 ++#define show_lkb_flags(flags) __print_flags(flags, "|", \
5215 ++ { DLM_IFL_MSTCPY, "MSTCPY" }, \
5216 ++ { DLM_IFL_RESEND, "RESEND" }, \
5217 ++ { DLM_IFL_DEAD, "DEAD" }, \
5218 ++ { DLM_IFL_OVERLAP_UNLOCK, "OVERLAP_UNLOCK" }, \
5219 ++ { DLM_IFL_OVERLAP_CANCEL, "OVERLAP_CANCEL" }, \
5220 ++ { DLM_IFL_ENDOFLIFE, "ENDOFLIFE" }, \
5221 ++ { DLM_IFL_DEADLOCK_CANCEL, "DEADLOCK_CANCEL" }, \
5222 ++ { DLM_IFL_STUB_MS, "STUB_MS" }, \
5223 ++ { DLM_IFL_USER, "USER" }, \
5224 ++ { DLM_IFL_ORPHAN, "ORPHAN" })
5225 ++
5226 ++#define show_header_cmd(cmd) __print_symbolic(cmd, \
5227 ++ { DLM_MSG, "MSG"}, \
5228 ++ { DLM_RCOM, "RCOM"}, \
5229 ++ { DLM_OPTS, "OPTS"}, \
5230 ++ { DLM_ACK, "ACK"}, \
5231 ++ { DLM_FIN, "FIN"})
5232 ++
5233 ++#define show_message_version(version) __print_symbolic(version, \
5234 ++ { DLM_VERSION_3_1, "3.1"}, \
5235 ++ { DLM_VERSION_3_2, "3.2"})
5236 ++
5237 ++#define show_message_type(type) __print_symbolic(type, \
5238 ++ { DLM_MSG_REQUEST, "REQUEST"}, \
5239 ++ { DLM_MSG_CONVERT, "CONVERT"}, \
5240 ++ { DLM_MSG_UNLOCK, "UNLOCK"}, \
5241 ++ { DLM_MSG_CANCEL, "CANCEL"}, \
5242 ++ { DLM_MSG_REQUEST_REPLY, "REQUEST_REPLY"}, \
5243 ++ { DLM_MSG_CONVERT_REPLY, "CONVERT_REPLY"}, \
5244 ++ { DLM_MSG_UNLOCK_REPLY, "UNLOCK_REPLY"}, \
5245 ++ { DLM_MSG_CANCEL_REPLY, "CANCEL_REPLY"}, \
5246 ++ { DLM_MSG_GRANT, "GRANT"}, \
5247 ++ { DLM_MSG_BAST, "BAST"}, \
5248 ++ { DLM_MSG_LOOKUP, "LOOKUP"}, \
5249 ++ { DLM_MSG_REMOVE, "REMOVE"}, \
5250 ++ { DLM_MSG_LOOKUP_REPLY, "LOOKUP_REPLY"}, \
5251 ++ { DLM_MSG_PURGE, "PURGE"})
5252 ++
5253 ++#define show_rcom_type(type) __print_symbolic(type, \
5254 ++ { DLM_RCOM_STATUS, "STATUS"}, \
5255 ++ { DLM_RCOM_NAMES, "NAMES"}, \
5256 ++ { DLM_RCOM_LOOKUP, "LOOKUP"}, \
5257 ++ { DLM_RCOM_LOCK, "LOCK"}, \
5258 ++ { DLM_RCOM_STATUS_REPLY, "STATUS_REPLY"}, \
5259 ++ { DLM_RCOM_NAMES_REPLY, "NAMES_REPLY"}, \
5260 ++ { DLM_RCOM_LOOKUP_REPLY, "LOOKUP_REPLY"}, \
5261 ++ { DLM_RCOM_LOCK_REPLY, "LOCK_REPLY"})
5262 ++
5263 ++
5264 + /* note: we begin tracing dlm_lock_start() only if ls and lkb are found */
5265 + TRACE_EVENT(dlm_lock_start,
5266 +
5267 +@@ -290,6 +340,253 @@ TRACE_EVENT(dlm_unlock_end,
5268 +
5269 + );
5270 +
5271 ++DECLARE_EVENT_CLASS(dlm_rcom_template,
5272 ++
5273 ++ TP_PROTO(uint32_t seq, const struct dlm_rcom *rc),
5274 ++
5275 ++ TP_ARGS(seq, rc),
5276 ++
5277 ++ TP_STRUCT__entry(
5278 ++ __field(uint32_t, seq)
5279 ++ __field(uint32_t, h_version)
5280 ++ __field(uint32_t, h_lockspace)
5281 ++ __field(uint32_t, h_nodeid)
5282 ++ __field(uint16_t, h_length)
5283 ++ __field(uint8_t, h_cmd)
5284 ++ __field(uint32_t, rc_type)
5285 ++ __field(int32_t, rc_result)
5286 ++ __field(uint64_t, rc_id)
5287 ++ __field(uint64_t, rc_seq)
5288 ++ __field(uint64_t, rc_seq_reply)
5289 ++ __dynamic_array(unsigned char, rc_buf,
5290 ++ le16_to_cpu(rc->rc_header.h_length) - sizeof(*rc))
5291 ++ ),
5292 ++
5293 ++ TP_fast_assign(
5294 ++ __entry->seq = seq;
5295 ++ __entry->h_version = le32_to_cpu(rc->rc_header.h_version);
5296 ++ __entry->h_lockspace = le32_to_cpu(rc->rc_header.u.h_lockspace);
5297 ++ __entry->h_nodeid = le32_to_cpu(rc->rc_header.h_nodeid);
5298 ++ __entry->h_length = le16_to_cpu(rc->rc_header.h_length);
5299 ++ __entry->h_cmd = rc->rc_header.h_cmd;
5300 ++ __entry->rc_type = le32_to_cpu(rc->rc_type);
5301 ++ __entry->rc_result = le32_to_cpu(rc->rc_result);
5302 ++ __entry->rc_id = le64_to_cpu(rc->rc_id);
5303 ++ __entry->rc_seq = le64_to_cpu(rc->rc_seq);
5304 ++ __entry->rc_seq_reply = le64_to_cpu(rc->rc_seq_reply);
5305 ++ memcpy(__get_dynamic_array(rc_buf), rc->rc_buf,
5306 ++ __get_dynamic_array_len(rc_buf));
5307 ++ ),
5308 ++
5309 ++ TP_printk("seq=%u, h_version=%s h_lockspace=%u h_nodeid=%u "
5310 ++ "h_length=%u h_cmd=%s rc_type=%s rc_result=%d "
5311 ++ "rc_id=%llu rc_seq=%llu rc_seq_reply=%llu "
5312 ++ "rc_buf=0x%s", __entry->seq,
5313 ++ show_message_version(__entry->h_version),
5314 ++ __entry->h_lockspace, __entry->h_nodeid, __entry->h_length,
5315 ++ show_header_cmd(__entry->h_cmd),
5316 ++ show_rcom_type(__entry->rc_type),
5317 ++ __entry->rc_result, __entry->rc_id, __entry->rc_seq,
5318 ++ __entry->rc_seq_reply,
5319 ++ __print_hex_str(__get_dynamic_array(rc_buf),
5320 ++ __get_dynamic_array_len(rc_buf)))
5321 ++
5322 ++);
5323 ++
5324 ++DEFINE_EVENT(dlm_rcom_template, dlm_send_rcom,
5325 ++ TP_PROTO(uint32_t seq, const struct dlm_rcom *rc),
5326 ++ TP_ARGS(seq, rc));
5327 ++
5328 ++DEFINE_EVENT(dlm_rcom_template, dlm_recv_rcom,
5329 ++ TP_PROTO(uint32_t seq, const struct dlm_rcom *rc),
5330 ++ TP_ARGS(seq, rc));
5331 ++
5332 ++TRACE_EVENT(dlm_send_message,
5333 ++
5334 ++ TP_PROTO(uint32_t seq, const struct dlm_message *ms,
5335 ++ const void *name, int namelen),
5336 ++
5337 ++ TP_ARGS(seq, ms, name, namelen),
5338 ++
5339 ++ TP_STRUCT__entry(
5340 ++ __field(uint32_t, seq)
5341 ++ __field(uint32_t, h_version)
5342 ++ __field(uint32_t, h_lockspace)
5343 ++ __field(uint32_t, h_nodeid)
5344 ++ __field(uint16_t, h_length)
5345 ++ __field(uint8_t, h_cmd)
5346 ++ __field(uint32_t, m_type)
5347 ++ __field(uint32_t, m_nodeid)
5348 ++ __field(uint32_t, m_pid)
5349 ++ __field(uint32_t, m_lkid)
5350 ++ __field(uint32_t, m_remid)
5351 ++ __field(uint32_t, m_parent_lkid)
5352 ++ __field(uint32_t, m_parent_remid)
5353 ++ __field(uint32_t, m_exflags)
5354 ++ __field(uint32_t, m_sbflags)
5355 ++ __field(uint32_t, m_flags)
5356 ++ __field(uint32_t, m_lvbseq)
5357 ++ __field(uint32_t, m_hash)
5358 ++ __field(int32_t, m_status)
5359 ++ __field(int32_t, m_grmode)
5360 ++ __field(int32_t, m_rqmode)
5361 ++ __field(int32_t, m_bastmode)
5362 ++ __field(int32_t, m_asts)
5363 ++ __field(int32_t, m_result)
5364 ++ __dynamic_array(unsigned char, m_extra,
5365 ++ le16_to_cpu(ms->m_header.h_length) - sizeof(*ms))
5366 ++ __dynamic_array(unsigned char, res_name, namelen)
5367 ++ ),
5368 ++
5369 ++ TP_fast_assign(
5370 ++ __entry->seq = seq;
5371 ++ __entry->h_version = le32_to_cpu(ms->m_header.h_version);
5372 ++ __entry->h_lockspace = le32_to_cpu(ms->m_header.u.h_lockspace);
5373 ++ __entry->h_nodeid = le32_to_cpu(ms->m_header.h_nodeid);
5374 ++ __entry->h_length = le16_to_cpu(ms->m_header.h_length);
5375 ++ __entry->h_cmd = ms->m_header.h_cmd;
5376 ++ __entry->m_type = le32_to_cpu(ms->m_type);
5377 ++ __entry->m_nodeid = le32_to_cpu(ms->m_nodeid);
5378 ++ __entry->m_pid = le32_to_cpu(ms->m_pid);
5379 ++ __entry->m_lkid = le32_to_cpu(ms->m_lkid);
5380 ++ __entry->m_remid = le32_to_cpu(ms->m_remid);
5381 ++ __entry->m_parent_lkid = le32_to_cpu(ms->m_parent_lkid);
5382 ++ __entry->m_parent_remid = le32_to_cpu(ms->m_parent_remid);
5383 ++ __entry->m_exflags = le32_to_cpu(ms->m_exflags);
5384 ++ __entry->m_sbflags = le32_to_cpu(ms->m_sbflags);
5385 ++ __entry->m_flags = le32_to_cpu(ms->m_flags);
5386 ++ __entry->m_lvbseq = le32_to_cpu(ms->m_lvbseq);
5387 ++ __entry->m_hash = le32_to_cpu(ms->m_hash);
5388 ++ __entry->m_status = le32_to_cpu(ms->m_status);
5389 ++ __entry->m_grmode = le32_to_cpu(ms->m_grmode);
5390 ++ __entry->m_rqmode = le32_to_cpu(ms->m_rqmode);
5391 ++ __entry->m_bastmode = le32_to_cpu(ms->m_bastmode);
5392 ++ __entry->m_asts = le32_to_cpu(ms->m_asts);
5393 ++ __entry->m_result = le32_to_cpu(ms->m_result);
5394 ++ memcpy(__get_dynamic_array(m_extra), ms->m_extra,
5395 ++ __get_dynamic_array_len(m_extra));
5396 ++ memcpy(__get_dynamic_array(res_name), name,
5397 ++ __get_dynamic_array_len(res_name));
5398 ++ ),
5399 ++
5400 ++ TP_printk("seq=%u h_version=%s h_lockspace=%u h_nodeid=%u "
5401 ++ "h_length=%u h_cmd=%s m_type=%s m_nodeid=%u "
5402 ++ "m_pid=%u m_lkid=%u m_remid=%u m_parent_lkid=%u "
5403 ++ "m_parent_remid=%u m_exflags=%s m_sbflags=%s m_flags=%s "
5404 ++ "m_lvbseq=%u m_hash=%u m_status=%d m_grmode=%s "
5405 ++ "m_rqmode=%s m_bastmode=%s m_asts=%d m_result=%d "
5406 ++ "m_extra=0x%s res_name=0x%s",
5407 ++ __entry->seq, show_message_version(__entry->h_version),
5408 ++ __entry->h_lockspace, __entry->h_nodeid, __entry->h_length,
5409 ++ show_header_cmd(__entry->h_cmd),
5410 ++ show_message_type(__entry->m_type),
5411 ++ __entry->m_nodeid, __entry->m_pid, __entry->m_lkid,
5412 ++ __entry->m_remid, __entry->m_parent_lkid,
5413 ++ __entry->m_parent_remid, show_lock_flags(__entry->m_exflags),
5414 ++ show_dlm_sb_flags(__entry->m_sbflags),
5415 ++ show_lkb_flags(__entry->m_flags), __entry->m_lvbseq,
5416 ++ __entry->m_hash, __entry->m_status,
5417 ++ show_lock_mode(__entry->m_grmode),
5418 ++ show_lock_mode(__entry->m_rqmode),
5419 ++ show_lock_mode(__entry->m_bastmode),
5420 ++ __entry->m_asts, __entry->m_result,
5421 ++ __print_hex_str(__get_dynamic_array(m_extra),
5422 ++ __get_dynamic_array_len(m_extra)),
5423 ++ __print_hex_str(__get_dynamic_array(res_name),
5424 ++ __get_dynamic_array_len(res_name)))
5425 ++
5426 ++);
5427 ++
5428 ++TRACE_EVENT(dlm_recv_message,
5429 ++
5430 ++ TP_PROTO(uint32_t seq, const struct dlm_message *ms),
5431 ++
5432 ++ TP_ARGS(seq, ms),
5433 ++
5434 ++ TP_STRUCT__entry(
5435 ++ __field(uint32_t, seq)
5436 ++ __field(uint32_t, h_version)
5437 ++ __field(uint32_t, h_lockspace)
5438 ++ __field(uint32_t, h_nodeid)
5439 ++ __field(uint16_t, h_length)
5440 ++ __field(uint8_t, h_cmd)
5441 ++ __field(uint32_t, m_type)
5442 ++ __field(uint32_t, m_nodeid)
5443 ++ __field(uint32_t, m_pid)
5444 ++ __field(uint32_t, m_lkid)
5445 ++ __field(uint32_t, m_remid)
5446 ++ __field(uint32_t, m_parent_lkid)
5447 ++ __field(uint32_t, m_parent_remid)
5448 ++ __field(uint32_t, m_exflags)
5449 ++ __field(uint32_t, m_sbflags)
5450 ++ __field(uint32_t, m_flags)
5451 ++ __field(uint32_t, m_lvbseq)
5452 ++ __field(uint32_t, m_hash)
5453 ++ __field(int32_t, m_status)
5454 ++ __field(int32_t, m_grmode)
5455 ++ __field(int32_t, m_rqmode)
5456 ++ __field(int32_t, m_bastmode)
5457 ++ __field(int32_t, m_asts)
5458 ++ __field(int32_t, m_result)
5459 ++ __dynamic_array(unsigned char, m_extra,
5460 ++ le16_to_cpu(ms->m_header.h_length) - sizeof(*ms))
5461 ++ ),
5462 ++
5463 ++ TP_fast_assign(
5464 ++ __entry->seq = seq;
5465 ++ __entry->h_version = le32_to_cpu(ms->m_header.h_version);
5466 ++ __entry->h_lockspace = le32_to_cpu(ms->m_header.u.h_lockspace);
5467 ++ __entry->h_nodeid = le32_to_cpu(ms->m_header.h_nodeid);
5468 ++ __entry->h_length = le16_to_cpu(ms->m_header.h_length);
5469 ++ __entry->h_cmd = ms->m_header.h_cmd;
5470 ++ __entry->m_type = le32_to_cpu(ms->m_type);
5471 ++ __entry->m_nodeid = le32_to_cpu(ms->m_nodeid);
5472 ++ __entry->m_pid = le32_to_cpu(ms->m_pid);
5473 ++ __entry->m_lkid = le32_to_cpu(ms->m_lkid);
5474 ++ __entry->m_remid = le32_to_cpu(ms->m_remid);
5475 ++ __entry->m_parent_lkid = le32_to_cpu(ms->m_parent_lkid);
5476 ++ __entry->m_parent_remid = le32_to_cpu(ms->m_parent_remid);
5477 ++ __entry->m_exflags = le32_to_cpu(ms->m_exflags);
5478 ++ __entry->m_sbflags = le32_to_cpu(ms->m_sbflags);
5479 ++ __entry->m_flags = le32_to_cpu(ms->m_flags);
5480 ++ __entry->m_lvbseq = le32_to_cpu(ms->m_lvbseq);
5481 ++ __entry->m_hash = le32_to_cpu(ms->m_hash);
5482 ++ __entry->m_status = le32_to_cpu(ms->m_status);
5483 ++ __entry->m_grmode = le32_to_cpu(ms->m_grmode);
5484 ++ __entry->m_rqmode = le32_to_cpu(ms->m_rqmode);
5485 ++ __entry->m_bastmode = le32_to_cpu(ms->m_bastmode);
5486 ++ __entry->m_asts = le32_to_cpu(ms->m_asts);
5487 ++ __entry->m_result = le32_to_cpu(ms->m_result);
5488 ++ memcpy(__get_dynamic_array(m_extra), ms->m_extra,
5489 ++ __get_dynamic_array_len(m_extra));
5490 ++ ),
5491 ++
5492 ++ TP_printk("seq=%u h_version=%s h_lockspace=%u h_nodeid=%u "
5493 ++ "h_length=%u h_cmd=%s m_type=%s m_nodeid=%u "
5494 ++ "m_pid=%u m_lkid=%u m_remid=%u m_parent_lkid=%u "
5495 ++ "m_parent_remid=%u m_exflags=%s m_sbflags=%s m_flags=%s "
5496 ++ "m_lvbseq=%u m_hash=%u m_status=%d m_grmode=%s "
5497 ++ "m_rqmode=%s m_bastmode=%s m_asts=%d m_result=%d "
5498 ++ "m_extra=0x%s",
5499 ++ __entry->seq, show_message_version(__entry->h_version),
5500 ++ __entry->h_lockspace, __entry->h_nodeid, __entry->h_length,
5501 ++ show_header_cmd(__entry->h_cmd),
5502 ++ show_message_type(__entry->m_type),
5503 ++ __entry->m_nodeid, __entry->m_pid, __entry->m_lkid,
5504 ++ __entry->m_remid, __entry->m_parent_lkid,
5505 ++ __entry->m_parent_remid, show_lock_flags(__entry->m_exflags),
5506 ++ show_dlm_sb_flags(__entry->m_sbflags),
5507 ++ show_lkb_flags(__entry->m_flags), __entry->m_lvbseq,
5508 ++ __entry->m_hash, __entry->m_status,
5509 ++ show_lock_mode(__entry->m_grmode),
5510 ++ show_lock_mode(__entry->m_rqmode),
5511 ++ show_lock_mode(__entry->m_bastmode),
5512 ++ __entry->m_asts, __entry->m_result,
5513 ++ __print_hex_str(__get_dynamic_array(m_extra),
5514 ++ __get_dynamic_array_len(m_extra)))
5515 ++
5516 ++);
5517 ++
5518 + TRACE_EVENT(dlm_send,
5519 +
5520 + TP_PROTO(int nodeid, int ret),
5521 +diff --git a/io_uring/uring_cmd.c b/io_uring/uring_cmd.c
5522 +index e50de0b6b9f84..18dfc5f6a8b72 100644
5523 +--- a/io_uring/uring_cmd.c
5524 ++++ b/io_uring/uring_cmd.c
5525 +@@ -108,7 +108,7 @@ int io_uring_cmd(struct io_kiocb *req, unsigned int issue_flags)
5526 + struct file *file = req->file;
5527 + int ret;
5528 +
5529 +- if (!req->file->f_op->uring_cmd)
5530 ++ if (!file->f_op->uring_cmd)
5531 + return -EOPNOTSUPP;
5532 +
5533 + ret = security_uring_cmd(ioucmd);
5534 +@@ -120,6 +120,8 @@ int io_uring_cmd(struct io_kiocb *req, unsigned int issue_flags)
5535 + if (ctx->flags & IORING_SETUP_CQE32)
5536 + issue_flags |= IO_URING_F_CQE32;
5537 + if (ctx->flags & IORING_SETUP_IOPOLL) {
5538 ++ if (!file->f_op->uring_cmd_iopoll)
5539 ++ return -EOPNOTSUPP;
5540 + issue_flags |= IO_URING_F_IOPOLL;
5541 + req->iopoll_completed = 0;
5542 + WRITE_ONCE(ioucmd->cookie, NULL);
5543 +diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c
5544 +index 7fcbe5d002070..b73169737a01e 100644
5545 +--- a/kernel/bpf/btf.c
5546 ++++ b/kernel/bpf/btf.c
5547 +@@ -4163,6 +4163,7 @@ static int btf_datasec_resolve(struct btf_verifier_env *env,
5548 + struct btf *btf = env->btf;
5549 + u16 i;
5550 +
5551 ++ env->resolve_mode = RESOLVE_TBD;
5552 + for_each_vsi_from(i, v->next_member, v->t, vsi) {
5553 + u32 var_type_id = vsi->type, type_id, type_size = 0;
5554 + const struct btf_type *var_type = btf_type_by_id(env->btf,
5555 +diff --git a/kernel/fork.c b/kernel/fork.c
5556 +index 844dfdc8c639c..a6d243a50be3e 100644
5557 +--- a/kernel/fork.c
5558 ++++ b/kernel/fork.c
5559 +@@ -2928,7 +2928,7 @@ static bool clone3_args_valid(struct kernel_clone_args *kargs)
5560 + * - make the CLONE_DETACHED bit reusable for clone3
5561 + * - make the CSIGNAL bits reusable for clone3
5562 + */
5563 +- if (kargs->flags & (CLONE_DETACHED | CSIGNAL))
5564 ++ if (kargs->flags & (CLONE_DETACHED | (CSIGNAL & (~CLONE_NEWTIME))))
5565 + return false;
5566 +
5567 + if ((kargs->flags & (CLONE_SIGHAND | CLONE_CLEAR_SIGHAND)) ==
5568 +diff --git a/kernel/watch_queue.c b/kernel/watch_queue.c
5569 +index a6f9bdd956c39..f10f403104e7d 100644
5570 +--- a/kernel/watch_queue.c
5571 ++++ b/kernel/watch_queue.c
5572 +@@ -273,6 +273,7 @@ long watch_queue_set_size(struct pipe_inode_info *pipe, unsigned int nr_notes)
5573 + if (ret < 0)
5574 + goto error;
5575 +
5576 ++ ret = -ENOMEM;
5577 + pages = kcalloc(sizeof(struct page *), nr_pages, GFP_KERNEL);
5578 + if (!pages)
5579 + goto error;
5580 +diff --git a/net/caif/caif_usb.c b/net/caif/caif_usb.c
5581 +index ebc202ffdd8d8..bf61ea4b8132d 100644
5582 +--- a/net/caif/caif_usb.c
5583 ++++ b/net/caif/caif_usb.c
5584 +@@ -134,6 +134,9 @@ static int cfusbl_device_notify(struct notifier_block *me, unsigned long what,
5585 + struct usb_device *usbdev;
5586 + int res;
5587 +
5588 ++ if (what == NETDEV_UNREGISTER && dev->reg_state >= NETREG_UNREGISTERED)
5589 ++ return 0;
5590 ++
5591 + /* Check whether we have a NCM device, and find its VID/PID. */
5592 + if (!(dev->dev.parent && dev->dev.parent->driver &&
5593 + strcmp(dev->dev.parent->driver->name, "cdc_ncm") == 0))
5594 +diff --git a/net/core/sock.c b/net/core/sock.c
5595 +index 4dfdcdfd00114..eb0b76acd9df1 100644
5596 +--- a/net/core/sock.c
5597 ++++ b/net/core/sock.c
5598 +@@ -2805,7 +2805,8 @@ static void sk_enter_memory_pressure(struct sock *sk)
5599 + static void sk_leave_memory_pressure(struct sock *sk)
5600 + {
5601 + if (sk->sk_prot->leave_memory_pressure) {
5602 +- sk->sk_prot->leave_memory_pressure(sk);
5603 ++ INDIRECT_CALL_INET_1(sk->sk_prot->leave_memory_pressure,
5604 ++ tcp_leave_memory_pressure, sk);
5605 + } else {
5606 + unsigned long *memory_pressure = sk->sk_prot->memory_pressure;
5607 +
5608 +diff --git a/net/ipv4/netfilter/nf_tproxy_ipv4.c b/net/ipv4/netfilter/nf_tproxy_ipv4.c
5609 +index b22b2c745c76c..69e3317996043 100644
5610 +--- a/net/ipv4/netfilter/nf_tproxy_ipv4.c
5611 ++++ b/net/ipv4/netfilter/nf_tproxy_ipv4.c
5612 +@@ -38,7 +38,7 @@ nf_tproxy_handle_time_wait4(struct net *net, struct sk_buff *skb,
5613 + hp->source, lport ? lport : hp->dest,
5614 + skb->dev, NF_TPROXY_LOOKUP_LISTENER);
5615 + if (sk2) {
5616 +- inet_twsk_deschedule_put(inet_twsk(sk));
5617 ++ nf_tproxy_twsk_deschedule_put(inet_twsk(sk));
5618 + sk = sk2;
5619 + }
5620 + }
5621 +diff --git a/net/ipv4/tcp_bpf.c b/net/ipv4/tcp_bpf.c
5622 +index cf26d65ca3893..ebf9175119370 100644
5623 +--- a/net/ipv4/tcp_bpf.c
5624 ++++ b/net/ipv4/tcp_bpf.c
5625 +@@ -186,6 +186,9 @@ static int tcp_bpf_recvmsg_parser(struct sock *sk,
5626 + if (unlikely(flags & MSG_ERRQUEUE))
5627 + return inet_recv_error(sk, msg, len, addr_len);
5628 +
5629 ++ if (!len)
5630 ++ return 0;
5631 ++
5632 + psock = sk_psock_get(sk);
5633 + if (unlikely(!psock))
5634 + return tcp_recvmsg(sk, msg, len, flags, addr_len);
5635 +@@ -244,6 +247,9 @@ static int tcp_bpf_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
5636 + if (unlikely(flags & MSG_ERRQUEUE))
5637 + return inet_recv_error(sk, msg, len, addr_len);
5638 +
5639 ++ if (!len)
5640 ++ return 0;
5641 ++
5642 + psock = sk_psock_get(sk);
5643 + if (unlikely(!psock))
5644 + return tcp_recvmsg(sk, msg, len, flags, addr_len);
5645 +diff --git a/net/ipv4/udp_bpf.c b/net/ipv4/udp_bpf.c
5646 +index e5dc91d0e0793..0735d820e413f 100644
5647 +--- a/net/ipv4/udp_bpf.c
5648 ++++ b/net/ipv4/udp_bpf.c
5649 +@@ -68,6 +68,9 @@ static int udp_bpf_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
5650 + if (unlikely(flags & MSG_ERRQUEUE))
5651 + return inet_recv_error(sk, msg, len, addr_len);
5652 +
5653 ++ if (!len)
5654 ++ return 0;
5655 ++
5656 + psock = sk_psock_get(sk);
5657 + if (unlikely(!psock))
5658 + return sk_udp_recvmsg(sk, msg, len, flags, addr_len);
5659 +diff --git a/net/ipv6/ila/ila_xlat.c b/net/ipv6/ila/ila_xlat.c
5660 +index 47447f0241df6..bee45dfeb1874 100644
5661 +--- a/net/ipv6/ila/ila_xlat.c
5662 ++++ b/net/ipv6/ila/ila_xlat.c
5663 +@@ -477,6 +477,7 @@ int ila_xlat_nl_cmd_get_mapping(struct sk_buff *skb, struct genl_info *info)
5664 +
5665 + rcu_read_lock();
5666 +
5667 ++ ret = -ESRCH;
5668 + ila = ila_lookup_by_params(&xp, ilan);
5669 + if (ila) {
5670 + ret = ila_dump_info(ila,
5671 +diff --git a/net/ipv6/netfilter/nf_tproxy_ipv6.c b/net/ipv6/netfilter/nf_tproxy_ipv6.c
5672 +index 929502e51203b..52f828bb5a83d 100644
5673 +--- a/net/ipv6/netfilter/nf_tproxy_ipv6.c
5674 ++++ b/net/ipv6/netfilter/nf_tproxy_ipv6.c
5675 +@@ -63,7 +63,7 @@ nf_tproxy_handle_time_wait6(struct sk_buff *skb, int tproto, int thoff,
5676 + lport ? lport : hp->dest,
5677 + skb->dev, NF_TPROXY_LOOKUP_LISTENER);
5678 + if (sk2) {
5679 +- inet_twsk_deschedule_put(inet_twsk(sk));
5680 ++ nf_tproxy_twsk_deschedule_put(inet_twsk(sk));
5681 + sk = sk2;
5682 + }
5683 + }
5684 +diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
5685 +index 7f0f3bcaae031..30ed45b1b57df 100644
5686 +--- a/net/netfilter/nf_conntrack_core.c
5687 ++++ b/net/netfilter/nf_conntrack_core.c
5688 +@@ -96,8 +96,8 @@ static DEFINE_MUTEX(nf_conntrack_mutex);
5689 + #define GC_SCAN_MAX_DURATION msecs_to_jiffies(10)
5690 + #define GC_SCAN_EXPIRED_MAX (64000u / HZ)
5691 +
5692 +-#define MIN_CHAINLEN 8u
5693 +-#define MAX_CHAINLEN (32u - MIN_CHAINLEN)
5694 ++#define MIN_CHAINLEN 50u
5695 ++#define MAX_CHAINLEN (80u - MIN_CHAINLEN)
5696 +
5697 + static struct conntrack_gc_work conntrack_gc_work;
5698 +
5699 +diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
5700 +index 733bb56950c14..d095d3c1ceca6 100644
5701 +--- a/net/netfilter/nf_conntrack_netlink.c
5702 ++++ b/net/netfilter/nf_conntrack_netlink.c
5703 +@@ -328,11 +328,12 @@ nla_put_failure:
5704 + }
5705 +
5706 + #ifdef CONFIG_NF_CONNTRACK_MARK
5707 +-static int ctnetlink_dump_mark(struct sk_buff *skb, const struct nf_conn *ct)
5708 ++static int ctnetlink_dump_mark(struct sk_buff *skb, const struct nf_conn *ct,
5709 ++ bool dump)
5710 + {
5711 + u32 mark = READ_ONCE(ct->mark);
5712 +
5713 +- if (!mark)
5714 ++ if (!mark && !dump)
5715 + return 0;
5716 +
5717 + if (nla_put_be32(skb, CTA_MARK, htonl(mark)))
5718 +@@ -343,7 +344,7 @@ nla_put_failure:
5719 + return -1;
5720 + }
5721 + #else
5722 +-#define ctnetlink_dump_mark(a, b) (0)
5723 ++#define ctnetlink_dump_mark(a, b, c) (0)
5724 + #endif
5725 +
5726 + #ifdef CONFIG_NF_CONNTRACK_SECMARK
5727 +@@ -548,7 +549,7 @@ static int ctnetlink_dump_extinfo(struct sk_buff *skb,
5728 + static int ctnetlink_dump_info(struct sk_buff *skb, struct nf_conn *ct)
5729 + {
5730 + if (ctnetlink_dump_status(skb, ct) < 0 ||
5731 +- ctnetlink_dump_mark(skb, ct) < 0 ||
5732 ++ ctnetlink_dump_mark(skb, ct, true) < 0 ||
5733 + ctnetlink_dump_secctx(skb, ct) < 0 ||
5734 + ctnetlink_dump_id(skb, ct) < 0 ||
5735 + ctnetlink_dump_use(skb, ct) < 0 ||
5736 +@@ -831,8 +832,7 @@ ctnetlink_conntrack_event(unsigned int events, const struct nf_ct_event *item)
5737 + }
5738 +
5739 + #ifdef CONFIG_NF_CONNTRACK_MARK
5740 +- if (events & (1 << IPCT_MARK) &&
5741 +- ctnetlink_dump_mark(skb, ct) < 0)
5742 ++ if (ctnetlink_dump_mark(skb, ct, events & (1 << IPCT_MARK)))
5743 + goto nla_put_failure;
5744 + #endif
5745 + nlmsg_end(skb, nlh);
5746 +@@ -2735,7 +2735,7 @@ static int __ctnetlink_glue_build(struct sk_buff *skb, struct nf_conn *ct)
5747 + goto nla_put_failure;
5748 +
5749 + #ifdef CONFIG_NF_CONNTRACK_MARK
5750 +- if (ctnetlink_dump_mark(skb, ct) < 0)
5751 ++ if (ctnetlink_dump_mark(skb, ct, true) < 0)
5752 + goto nla_put_failure;
5753 + #endif
5754 + if (ctnetlink_dump_labels(skb, ct) < 0)
5755 +diff --git a/net/netfilter/nft_last.c b/net/netfilter/nft_last.c
5756 +index bb15a55dad5c0..eaa54964cf23c 100644
5757 +--- a/net/netfilter/nft_last.c
5758 ++++ b/net/netfilter/nft_last.c
5759 +@@ -104,11 +104,15 @@ static void nft_last_destroy(const struct nft_ctx *ctx,
5760 + static int nft_last_clone(struct nft_expr *dst, const struct nft_expr *src)
5761 + {
5762 + struct nft_last_priv *priv_dst = nft_expr_priv(dst);
5763 ++ struct nft_last_priv *priv_src = nft_expr_priv(src);
5764 +
5765 + priv_dst->last = kzalloc(sizeof(*priv_dst->last), GFP_ATOMIC);
5766 + if (!priv_dst->last)
5767 + return -ENOMEM;
5768 +
5769 ++ priv_dst->last->set = priv_src->last->set;
5770 ++ priv_dst->last->jiffies = priv_src->last->jiffies;
5771 ++
5772 + return 0;
5773 + }
5774 +
5775 +diff --git a/net/netfilter/nft_quota.c b/net/netfilter/nft_quota.c
5776 +index e6b0df68feeaf..410a5fcf88309 100644
5777 +--- a/net/netfilter/nft_quota.c
5778 ++++ b/net/netfilter/nft_quota.c
5779 +@@ -235,12 +235,16 @@ static void nft_quota_destroy(const struct nft_ctx *ctx,
5780 + static int nft_quota_clone(struct nft_expr *dst, const struct nft_expr *src)
5781 + {
5782 + struct nft_quota *priv_dst = nft_expr_priv(dst);
5783 ++ struct nft_quota *priv_src = nft_expr_priv(src);
5784 ++
5785 ++ priv_dst->quota = priv_src->quota;
5786 ++ priv_dst->flags = priv_src->flags;
5787 +
5788 + priv_dst->consumed = kmalloc(sizeof(*priv_dst->consumed), GFP_ATOMIC);
5789 + if (!priv_dst->consumed)
5790 + return -ENOMEM;
5791 +
5792 +- atomic64_set(priv_dst->consumed, 0);
5793 ++ *priv_dst->consumed = *priv_src->consumed;
5794 +
5795 + return 0;
5796 + }
5797 +diff --git a/net/nfc/netlink.c b/net/nfc/netlink.c
5798 +index 348bf561bc9fb..b9264e730fd93 100644
5799 +--- a/net/nfc/netlink.c
5800 ++++ b/net/nfc/netlink.c
5801 +@@ -1446,8 +1446,8 @@ static int nfc_se_io(struct nfc_dev *dev, u32 se_idx,
5802 + return rc;
5803 +
5804 + error:
5805 +- kfree(cb_context);
5806 + device_unlock(&dev->dev);
5807 ++ kfree(cb_context);
5808 + return rc;
5809 + }
5810 +
5811 +diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c
5812 +index d9413d43b1045..e8018b0fb7676 100644
5813 +--- a/net/smc/af_smc.c
5814 ++++ b/net/smc/af_smc.c
5815 +@@ -2644,16 +2644,14 @@ static int smc_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
5816 + {
5817 + struct sock *sk = sock->sk;
5818 + struct smc_sock *smc;
5819 +- int rc = -EPIPE;
5820 ++ int rc;
5821 +
5822 + smc = smc_sk(sk);
5823 + lock_sock(sk);
5824 +- if ((sk->sk_state != SMC_ACTIVE) &&
5825 +- (sk->sk_state != SMC_APPCLOSEWAIT1) &&
5826 +- (sk->sk_state != SMC_INIT))
5827 +- goto out;
5828 +
5829 ++ /* SMC does not support connect with fastopen */
5830 + if (msg->msg_flags & MSG_FASTOPEN) {
5831 ++ /* not connected yet, fallback */
5832 + if (sk->sk_state == SMC_INIT && !smc->connect_nonblock) {
5833 + rc = smc_switch_to_fallback(smc, SMC_CLC_DECL_OPTUNSUPP);
5834 + if (rc)
5835 +@@ -2662,6 +2660,11 @@ static int smc_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
5836 + rc = -EINVAL;
5837 + goto out;
5838 + }
5839 ++ } else if ((sk->sk_state != SMC_ACTIVE) &&
5840 ++ (sk->sk_state != SMC_APPCLOSEWAIT1) &&
5841 ++ (sk->sk_state != SMC_INIT)) {
5842 ++ rc = -EPIPE;
5843 ++ goto out;
5844 + }
5845 +
5846 + if (smc->use_fallback) {
5847 +diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c
5848 +index 24577d1b99079..9ee32e06f877e 100644
5849 +--- a/net/sunrpc/svc.c
5850 ++++ b/net/sunrpc/svc.c
5851 +@@ -787,6 +787,7 @@ svc_start_kthreads(struct svc_serv *serv, struct svc_pool *pool, int nrservs)
5852 + static int
5853 + svc_stop_kthreads(struct svc_serv *serv, struct svc_pool *pool, int nrservs)
5854 + {
5855 ++ struct svc_rqst *rqstp;
5856 + struct task_struct *task;
5857 + unsigned int state = serv->sv_nrthreads-1;
5858 +
5859 +@@ -795,7 +796,10 @@ svc_stop_kthreads(struct svc_serv *serv, struct svc_pool *pool, int nrservs)
5860 + task = choose_victim(serv, pool, &state);
5861 + if (task == NULL)
5862 + break;
5863 +- kthread_stop(task);
5864 ++ rqstp = kthread_data(task);
5865 ++ /* Did we lose a race to svo_function threadfn? */
5866 ++ if (kthread_stop(task) == -EINTR)
5867 ++ svc_exit_thread(rqstp);
5868 + nrservs++;
5869 + } while (nrservs < 0);
5870 + return 0;
5871 +diff --git a/net/tls/tls_device.c b/net/tls/tls_device.c
5872 +index 6c593788dc250..a7cc4f9faac28 100644
5873 +--- a/net/tls/tls_device.c
5874 ++++ b/net/tls/tls_device.c
5875 +@@ -508,6 +508,8 @@ handle_error:
5876 + zc_pfrag.offset = iter_offset.offset;
5877 + zc_pfrag.size = copy;
5878 + tls_append_frag(record, &zc_pfrag, copy);
5879 ++
5880 ++ iter_offset.offset += copy;
5881 + } else if (copy) {
5882 + copy = min_t(size_t, copy, pfrag->size - pfrag->offset);
5883 +
5884 +diff --git a/net/tls/tls_main.c b/net/tls/tls_main.c
5885 +index 3735cb00905df..b32c112984dd9 100644
5886 +--- a/net/tls/tls_main.c
5887 ++++ b/net/tls/tls_main.c
5888 +@@ -405,13 +405,11 @@ static int do_tls_getsockopt_conf(struct sock *sk, char __user *optval,
5889 + rc = -EINVAL;
5890 + goto out;
5891 + }
5892 +- lock_sock(sk);
5893 + memcpy(crypto_info_aes_gcm_128->iv,
5894 + cctx->iv + TLS_CIPHER_AES_GCM_128_SALT_SIZE,
5895 + TLS_CIPHER_AES_GCM_128_IV_SIZE);
5896 + memcpy(crypto_info_aes_gcm_128->rec_seq, cctx->rec_seq,
5897 + TLS_CIPHER_AES_GCM_128_REC_SEQ_SIZE);
5898 +- release_sock(sk);
5899 + if (copy_to_user(optval,
5900 + crypto_info_aes_gcm_128,
5901 + sizeof(*crypto_info_aes_gcm_128)))
5902 +@@ -429,13 +427,11 @@ static int do_tls_getsockopt_conf(struct sock *sk, char __user *optval,
5903 + rc = -EINVAL;
5904 + goto out;
5905 + }
5906 +- lock_sock(sk);
5907 + memcpy(crypto_info_aes_gcm_256->iv,
5908 + cctx->iv + TLS_CIPHER_AES_GCM_256_SALT_SIZE,
5909 + TLS_CIPHER_AES_GCM_256_IV_SIZE);
5910 + memcpy(crypto_info_aes_gcm_256->rec_seq, cctx->rec_seq,
5911 + TLS_CIPHER_AES_GCM_256_REC_SEQ_SIZE);
5912 +- release_sock(sk);
5913 + if (copy_to_user(optval,
5914 + crypto_info_aes_gcm_256,
5915 + sizeof(*crypto_info_aes_gcm_256)))
5916 +@@ -451,13 +447,11 @@ static int do_tls_getsockopt_conf(struct sock *sk, char __user *optval,
5917 + rc = -EINVAL;
5918 + goto out;
5919 + }
5920 +- lock_sock(sk);
5921 + memcpy(aes_ccm_128->iv,
5922 + cctx->iv + TLS_CIPHER_AES_CCM_128_SALT_SIZE,
5923 + TLS_CIPHER_AES_CCM_128_IV_SIZE);
5924 + memcpy(aes_ccm_128->rec_seq, cctx->rec_seq,
5925 + TLS_CIPHER_AES_CCM_128_REC_SEQ_SIZE);
5926 +- release_sock(sk);
5927 + if (copy_to_user(optval, aes_ccm_128, sizeof(*aes_ccm_128)))
5928 + rc = -EFAULT;
5929 + break;
5930 +@@ -472,13 +466,11 @@ static int do_tls_getsockopt_conf(struct sock *sk, char __user *optval,
5931 + rc = -EINVAL;
5932 + goto out;
5933 + }
5934 +- lock_sock(sk);
5935 + memcpy(chacha20_poly1305->iv,
5936 + cctx->iv + TLS_CIPHER_CHACHA20_POLY1305_SALT_SIZE,
5937 + TLS_CIPHER_CHACHA20_POLY1305_IV_SIZE);
5938 + memcpy(chacha20_poly1305->rec_seq, cctx->rec_seq,
5939 + TLS_CIPHER_CHACHA20_POLY1305_REC_SEQ_SIZE);
5940 +- release_sock(sk);
5941 + if (copy_to_user(optval, chacha20_poly1305,
5942 + sizeof(*chacha20_poly1305)))
5943 + rc = -EFAULT;
5944 +@@ -493,13 +485,11 @@ static int do_tls_getsockopt_conf(struct sock *sk, char __user *optval,
5945 + rc = -EINVAL;
5946 + goto out;
5947 + }
5948 +- lock_sock(sk);
5949 + memcpy(sm4_gcm_info->iv,
5950 + cctx->iv + TLS_CIPHER_SM4_GCM_SALT_SIZE,
5951 + TLS_CIPHER_SM4_GCM_IV_SIZE);
5952 + memcpy(sm4_gcm_info->rec_seq, cctx->rec_seq,
5953 + TLS_CIPHER_SM4_GCM_REC_SEQ_SIZE);
5954 +- release_sock(sk);
5955 + if (copy_to_user(optval, sm4_gcm_info, sizeof(*sm4_gcm_info)))
5956 + rc = -EFAULT;
5957 + break;
5958 +@@ -513,13 +503,11 @@ static int do_tls_getsockopt_conf(struct sock *sk, char __user *optval,
5959 + rc = -EINVAL;
5960 + goto out;
5961 + }
5962 +- lock_sock(sk);
5963 + memcpy(sm4_ccm_info->iv,
5964 + cctx->iv + TLS_CIPHER_SM4_CCM_SALT_SIZE,
5965 + TLS_CIPHER_SM4_CCM_IV_SIZE);
5966 + memcpy(sm4_ccm_info->rec_seq, cctx->rec_seq,
5967 + TLS_CIPHER_SM4_CCM_REC_SEQ_SIZE);
5968 +- release_sock(sk);
5969 + if (copy_to_user(optval, sm4_ccm_info, sizeof(*sm4_ccm_info)))
5970 + rc = -EFAULT;
5971 + break;
5972 +@@ -535,13 +523,11 @@ static int do_tls_getsockopt_conf(struct sock *sk, char __user *optval,
5973 + rc = -EINVAL;
5974 + goto out;
5975 + }
5976 +- lock_sock(sk);
5977 + memcpy(crypto_info_aria_gcm_128->iv,
5978 + cctx->iv + TLS_CIPHER_ARIA_GCM_128_SALT_SIZE,
5979 + TLS_CIPHER_ARIA_GCM_128_IV_SIZE);
5980 + memcpy(crypto_info_aria_gcm_128->rec_seq, cctx->rec_seq,
5981 + TLS_CIPHER_ARIA_GCM_128_REC_SEQ_SIZE);
5982 +- release_sock(sk);
5983 + if (copy_to_user(optval,
5984 + crypto_info_aria_gcm_128,
5985 + sizeof(*crypto_info_aria_gcm_128)))
5986 +@@ -559,13 +545,11 @@ static int do_tls_getsockopt_conf(struct sock *sk, char __user *optval,
5987 + rc = -EINVAL;
5988 + goto out;
5989 + }
5990 +- lock_sock(sk);
5991 + memcpy(crypto_info_aria_gcm_256->iv,
5992 + cctx->iv + TLS_CIPHER_ARIA_GCM_256_SALT_SIZE,
5993 + TLS_CIPHER_ARIA_GCM_256_IV_SIZE);
5994 + memcpy(crypto_info_aria_gcm_256->rec_seq, cctx->rec_seq,
5995 + TLS_CIPHER_ARIA_GCM_256_REC_SEQ_SIZE);
5996 +- release_sock(sk);
5997 + if (copy_to_user(optval,
5998 + crypto_info_aria_gcm_256,
5999 + sizeof(*crypto_info_aria_gcm_256)))
6000 +@@ -614,11 +598,9 @@ static int do_tls_getsockopt_no_pad(struct sock *sk, char __user *optval,
6001 + if (len < sizeof(value))
6002 + return -EINVAL;
6003 +
6004 +- lock_sock(sk);
6005 + value = -EINVAL;
6006 + if (ctx->rx_conf == TLS_SW || ctx->rx_conf == TLS_HW)
6007 + value = ctx->rx_no_pad;
6008 +- release_sock(sk);
6009 + if (value < 0)
6010 + return value;
6011 +
6012 +@@ -635,6 +617,8 @@ static int do_tls_getsockopt(struct sock *sk, int optname,
6013 + {
6014 + int rc = 0;
6015 +
6016 ++ lock_sock(sk);
6017 ++
6018 + switch (optname) {
6019 + case TLS_TX:
6020 + case TLS_RX:
6021 +@@ -651,6 +635,9 @@ static int do_tls_getsockopt(struct sock *sk, int optname,
6022 + rc = -ENOPROTOOPT;
6023 + break;
6024 + }
6025 ++
6026 ++ release_sock(sk);
6027 ++
6028 + return rc;
6029 + }
6030 +
6031 +diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c
6032 +index 38dcd9b401027..992092aeebad9 100644
6033 +--- a/net/tls/tls_sw.c
6034 ++++ b/net/tls/tls_sw.c
6035 +@@ -2114,7 +2114,7 @@ recv_end:
6036 + else
6037 + err = process_rx_list(ctx, msg, &control, 0,
6038 + async_copy_bytes, is_peek);
6039 +- decrypted = max(err, 0);
6040 ++ decrypted += max(err, 0);
6041 + }
6042 +
6043 + copied += decrypted;
6044 +diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
6045 +index f0c2293f1d3b8..7d17601ceee79 100644
6046 +--- a/net/unix/af_unix.c
6047 ++++ b/net/unix/af_unix.c
6048 +@@ -2104,7 +2104,8 @@ out:
6049 + #define UNIX_SKB_FRAGS_SZ (PAGE_SIZE << get_order(32768))
6050 +
6051 + #if IS_ENABLED(CONFIG_AF_UNIX_OOB)
6052 +-static int queue_oob(struct socket *sock, struct msghdr *msg, struct sock *other)
6053 ++static int queue_oob(struct socket *sock, struct msghdr *msg, struct sock *other,
6054 ++ struct scm_cookie *scm, bool fds_sent)
6055 + {
6056 + struct unix_sock *ousk = unix_sk(other);
6057 + struct sk_buff *skb;
6058 +@@ -2115,6 +2116,11 @@ static int queue_oob(struct socket *sock, struct msghdr *msg, struct sock *other
6059 + if (!skb)
6060 + return err;
6061 +
6062 ++ err = unix_scm_to_skb(scm, skb, !fds_sent);
6063 ++ if (err < 0) {
6064 ++ kfree_skb(skb);
6065 ++ return err;
6066 ++ }
6067 + skb_put(skb, 1);
6068 + err = skb_copy_datagram_from_iter(skb, 0, &msg->msg_iter, 1);
6069 +
6070 +@@ -2242,7 +2248,7 @@ static int unix_stream_sendmsg(struct socket *sock, struct msghdr *msg,
6071 +
6072 + #if IS_ENABLED(CONFIG_AF_UNIX_OOB)
6073 + if (msg->msg_flags & MSG_OOB) {
6074 +- err = queue_oob(sock, msg, other);
6075 ++ err = queue_oob(sock, msg, other, &scm, fds_sent);
6076 + if (err)
6077 + goto out_err;
6078 + sent++;
6079 +diff --git a/net/unix/unix_bpf.c b/net/unix/unix_bpf.c
6080 +index e9bf155139612..2f9d8271c6ec7 100644
6081 +--- a/net/unix/unix_bpf.c
6082 ++++ b/net/unix/unix_bpf.c
6083 +@@ -54,6 +54,9 @@ static int unix_bpf_recvmsg(struct sock *sk, struct msghdr *msg,
6084 + struct sk_psock *psock;
6085 + int copied;
6086 +
6087 ++ if (!len)
6088 ++ return 0;
6089 ++
6090 + psock = sk_psock_get(sk);
6091 + if (unlikely(!psock))
6092 + return __unix_recvmsg(sk, msg, len, flags);
6093 +diff --git a/scripts/checkkconfigsymbols.py b/scripts/checkkconfigsymbols.py
6094 +index 217d21abc86e8..36c920e713137 100755
6095 +--- a/scripts/checkkconfigsymbols.py
6096 ++++ b/scripts/checkkconfigsymbols.py
6097 +@@ -115,7 +115,7 @@ def parse_options():
6098 + return args
6099 +
6100 +
6101 +-def main():
6102 ++def print_undefined_symbols():
6103 + """Main function of this module."""
6104 + args = parse_options()
6105 +
6106 +@@ -467,5 +467,16 @@ def parse_kconfig_file(kfile):
6107 + return defined, references
6108 +
6109 +
6110 ++def main():
6111 ++ try:
6112 ++ print_undefined_symbols()
6113 ++ except BrokenPipeError:
6114 ++ # Python flushes standard streams on exit; redirect remaining output
6115 ++ # to devnull to avoid another BrokenPipeError at shutdown
6116 ++ devnull = os.open(os.devnull, os.O_WRONLY)
6117 ++ os.dup2(devnull, sys.stdout.fileno())
6118 ++ sys.exit(1) # Python exits with error code 1 on EPIPE
6119 ++
6120 ++
6121 + if __name__ == "__main__":
6122 + main()
6123 +diff --git a/scripts/clang-tools/run-clang-tools.py b/scripts/clang-tools/run-clang-tools.py
6124 +index 56f2ec8f0f40a..3266708a86586 100755
6125 +--- a/scripts/clang-tools/run-clang-tools.py
6126 ++++ b/scripts/clang-tools/run-clang-tools.py
6127 +@@ -61,14 +61,21 @@ def run_analysis(entry):
6128 +
6129 +
6130 + def main():
6131 +- args = parse_arguments()
6132 ++ try:
6133 ++ args = parse_arguments()
6134 +
6135 +- lock = multiprocessing.Lock()
6136 +- pool = multiprocessing.Pool(initializer=init, initargs=(lock, args))
6137 +- # Read JSON data into the datastore variable
6138 +- with open(args.path, "r") as f:
6139 +- datastore = json.load(f)
6140 +- pool.map(run_analysis, datastore)
6141 ++ lock = multiprocessing.Lock()
6142 ++ pool = multiprocessing.Pool(initializer=init, initargs=(lock, args))
6143 ++ # Read JSON data into the datastore variable
6144 ++ with open(args.path, "r") as f:
6145 ++ datastore = json.load(f)
6146 ++ pool.map(run_analysis, datastore)
6147 ++ except BrokenPipeError:
6148 ++ # Python flushes standard streams on exit; redirect remaining output
6149 ++ # to devnull to avoid another BrokenPipeError at shutdown
6150 ++ devnull = os.open(os.devnull, os.O_WRONLY)
6151 ++ os.dup2(devnull, sys.stdout.fileno())
6152 ++ sys.exit(1) # Python exits with error code 1 on EPIPE
6153 +
6154 +
6155 + if __name__ == "__main__":
6156 +diff --git a/scripts/diffconfig b/scripts/diffconfig
6157 +index d5da5fa05d1d3..43f0f3d273ae7 100755
6158 +--- a/scripts/diffconfig
6159 ++++ b/scripts/diffconfig
6160 +@@ -65,7 +65,7 @@ def print_config(op, config, value, new_value):
6161 + else:
6162 + print(" %s %s -> %s" % (config, value, new_value))
6163 +
6164 +-def main():
6165 ++def show_diff():
6166 + global merge_style
6167 +
6168 + # parse command line args
6169 +@@ -129,4 +129,16 @@ def main():
6170 + for config in new:
6171 + print_config("+", config, None, b[config])
6172 +
6173 +-main()
6174 ++def main():
6175 ++ try:
6176 ++ show_diff()
6177 ++ except BrokenPipeError:
6178 ++ # Python flushes standard streams on exit; redirect remaining output
6179 ++ # to devnull to avoid another BrokenPipeError at shutdown
6180 ++ devnull = os.open(os.devnull, os.O_WRONLY)
6181 ++ os.dup2(devnull, sys.stdout.fileno())
6182 ++ sys.exit(1) # Python exits with error code 1 on EPIPE
6183 ++
6184 ++
6185 ++if __name__ == '__main__':
6186 ++ main()
6187 +diff --git a/tools/perf/builtin-inject.c b/tools/perf/builtin-inject.c
6188 +index e2ce5f294cbd4..333d8941ce4de 100644
6189 +--- a/tools/perf/builtin-inject.c
6190 ++++ b/tools/perf/builtin-inject.c
6191 +@@ -538,6 +538,7 @@ static int perf_event__repipe_buildid_mmap2(struct perf_tool *tool,
6192 + dso->hit = 1;
6193 + }
6194 + dso__put(dso);
6195 ++ perf_event__repipe(tool, event, sample, machine);
6196 + return 0;
6197 + }
6198 +
6199 +diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c
6200 +index 978fdc60b4e84..f6427e3a47421 100644
6201 +--- a/tools/perf/builtin-stat.c
6202 ++++ b/tools/perf/builtin-stat.c
6203 +@@ -528,12 +528,7 @@ static int enable_counters(void)
6204 + return err;
6205 + }
6206 +
6207 +- /*
6208 +- * We need to enable counters only if:
6209 +- * - we don't have tracee (attaching to task or cpu)
6210 +- * - we have initial delay configured
6211 +- */
6212 +- if (!target__none(&target)) {
6213 ++ if (!target__enable_on_exec(&target)) {
6214 + if (!all_counters_use_bpf)
6215 + evlist__enable(evsel_list);
6216 + }
6217 +@@ -906,7 +901,7 @@ try_again_reset:
6218 + return err;
6219 + }
6220 +
6221 +- if (stat_config.initial_delay) {
6222 ++ if (target.initial_delay) {
6223 + pr_info(EVLIST_DISABLED_MSG);
6224 + } else {
6225 + err = enable_counters();
6226 +@@ -918,8 +913,8 @@ try_again_reset:
6227 + if (forks)
6228 + evlist__start_workload(evsel_list);
6229 +
6230 +- if (stat_config.initial_delay > 0) {
6231 +- usleep(stat_config.initial_delay * USEC_PER_MSEC);
6232 ++ if (target.initial_delay > 0) {
6233 ++ usleep(target.initial_delay * USEC_PER_MSEC);
6234 + err = enable_counters();
6235 + if (err)
6236 + return -1;
6237 +@@ -1243,7 +1238,7 @@ static struct option stat_options[] = {
6238 + "aggregate counts per thread", AGGR_THREAD),
6239 + OPT_SET_UINT(0, "per-node", &stat_config.aggr_mode,
6240 + "aggregate counts per numa node", AGGR_NODE),
6241 +- OPT_INTEGER('D', "delay", &stat_config.initial_delay,
6242 ++ OPT_INTEGER('D', "delay", &target.initial_delay,
6243 + "ms to wait before starting measurement after program start (-1: start with events disabled)"),
6244 + OPT_CALLBACK_NOOPT(0, "metric-only", &stat_config.metric_only, NULL,
6245 + "Only print computed metrics. No raw values", enable_metric_only),
6246 +diff --git a/tools/perf/util/stat.c b/tools/perf/util/stat.c
6247 +index 8ec8bb4a99129..b63b3a3129919 100644
6248 +--- a/tools/perf/util/stat.c
6249 ++++ b/tools/perf/util/stat.c
6250 +@@ -583,11 +583,7 @@ int create_perf_stat_counter(struct evsel *evsel,
6251 + if (evsel__is_group_leader(evsel)) {
6252 + attr->disabled = 1;
6253 +
6254 +- /*
6255 +- * In case of initial_delay we enable tracee
6256 +- * events manually.
6257 +- */
6258 +- if (target__none(target) && !config->initial_delay)
6259 ++ if (target__enable_on_exec(target))
6260 + attr->enable_on_exec = 1;
6261 + }
6262 +
6263 +diff --git a/tools/perf/util/stat.h b/tools/perf/util/stat.h
6264 +index 35c940d7f29cd..05c5125d7f419 100644
6265 +--- a/tools/perf/util/stat.h
6266 ++++ b/tools/perf/util/stat.h
6267 +@@ -145,7 +145,6 @@ struct perf_stat_config {
6268 + FILE *output;
6269 + unsigned int interval;
6270 + unsigned int timeout;
6271 +- int initial_delay;
6272 + unsigned int unit_width;
6273 + unsigned int metric_only_len;
6274 + int times;
6275 +diff --git a/tools/perf/util/target.h b/tools/perf/util/target.h
6276 +index daec6cba500d4..880f1af7f6ad6 100644
6277 +--- a/tools/perf/util/target.h
6278 ++++ b/tools/perf/util/target.h
6279 +@@ -18,6 +18,7 @@ struct target {
6280 + bool per_thread;
6281 + bool use_bpf;
6282 + bool hybrid;
6283 ++ int initial_delay;
6284 + const char *attr_map;
6285 + };
6286 +
6287 +@@ -72,6 +73,17 @@ static inline bool target__none(struct target *target)
6288 + return !target__has_task(target) && !target__has_cpu(target);
6289 + }
6290 +
6291 ++static inline bool target__enable_on_exec(struct target *target)
6292 ++{
6293 ++ /*
6294 ++ * Normally enable_on_exec should be set if:
6295 ++ * 1) The tracee process is forked (not attaching to existed task or cpu).
6296 ++ * 2) And initial_delay is not configured.
6297 ++ * Otherwise, we enable tracee events manually.
6298 ++ */
6299 ++ return target__none(target) && !target->initial_delay;
6300 ++}
6301 ++
6302 + static inline bool target__has_per_thread(struct target *target)
6303 + {
6304 + return target->system_wide && target->per_thread;
6305 +diff --git a/tools/testing/selftests/netfilter/nft_nat.sh b/tools/testing/selftests/netfilter/nft_nat.sh
6306 +index 924ecb3f1f737..dd40d9f6f2599 100755
6307 +--- a/tools/testing/selftests/netfilter/nft_nat.sh
6308 ++++ b/tools/testing/selftests/netfilter/nft_nat.sh
6309 +@@ -404,6 +404,8 @@ EOF
6310 + echo SERVER-$family | ip netns exec "$ns1" timeout 5 socat -u STDIN TCP-LISTEN:2000 &
6311 + sc_s=$!
6312 +
6313 ++ sleep 1
6314 ++
6315 + result=$(ip netns exec "$ns0" timeout 1 socat TCP:$daddr:2000 STDOUT)
6316 +
6317 + if [ "$result" = "SERVER-inet" ];then