Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:5.14 commit in: /
Date: Thu, 30 Sep 2021 10:48:23
Message-Id: 1632998885.9b265515473c671205e9fffa1f4c306cd5232d79.mpagano@gentoo
1 commit: 9b265515473c671205e9fffa1f4c306cd5232d79
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Thu Sep 30 10:48:05 2021 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Thu Sep 30 10:48:05 2021 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=9b265515
7
8 Linux patch 5.14.9
9
10 Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
11
12 0000_README | 4 +
13 1008_linux-5.14.9.patch | 6265 +++++++++++++++++++++++++++++++++++++++++++++++
14 2 files changed, 6269 insertions(+)
15
16 diff --git a/0000_README b/0000_README
17 index dcc9f9a..21444f8 100644
18 --- a/0000_README
19 +++ b/0000_README
20 @@ -79,6 +79,10 @@ Patch: 1007_linux-5.14.8.patch
21 From: http://www.kernel.org
22 Desc: Linux 5.14.8
23
24 +Patch: 1008_linux-5.14.9.patch
25 +From: http://www.kernel.org
26 +Desc: Linux 5.14.9
27 +
28 Patch: 1500_XATTR_USER_PREFIX.patch
29 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
30 Desc: Support for namespace user.pax.* on tmpfs.
31
32 diff --git a/1008_linux-5.14.9.patch b/1008_linux-5.14.9.patch
33 new file mode 100644
34 index 0000000..e5d16b9
35 --- /dev/null
36 +++ b/1008_linux-5.14.9.patch
37 @@ -0,0 +1,6265 @@
38 +diff --git a/Makefile b/Makefile
39 +index d6b4737194b88..50c17e63c54ef 100644
40 +--- a/Makefile
41 ++++ b/Makefile
42 +@@ -1,7 +1,7 @@
43 + # SPDX-License-Identifier: GPL-2.0
44 + VERSION = 5
45 + PATCHLEVEL = 14
46 +-SUBLEVEL = 8
47 ++SUBLEVEL = 9
48 + EXTRAVERSION =
49 + NAME = Opossums on Parade
50 +
51 +diff --git a/arch/alpha/include/asm/io.h b/arch/alpha/include/asm/io.h
52 +index 0fab5ac907758..c9cb554fbe54c 100644
53 +--- a/arch/alpha/include/asm/io.h
54 ++++ b/arch/alpha/include/asm/io.h
55 +@@ -60,7 +60,7 @@ extern inline void set_hae(unsigned long new_hae)
56 + * Change virtual addresses to physical addresses and vv.
57 + */
58 + #ifdef USE_48_BIT_KSEG
59 +-static inline unsigned long virt_to_phys(void *address)
60 ++static inline unsigned long virt_to_phys(volatile void *address)
61 + {
62 + return (unsigned long)address - IDENT_ADDR;
63 + }
64 +@@ -70,7 +70,7 @@ static inline void * phys_to_virt(unsigned long address)
65 + return (void *) (address + IDENT_ADDR);
66 + }
67 + #else
68 +-static inline unsigned long virt_to_phys(void *address)
69 ++static inline unsigned long virt_to_phys(volatile void *address)
70 + {
71 + unsigned long phys = (unsigned long)address;
72 +
73 +@@ -106,7 +106,7 @@ static inline void * phys_to_virt(unsigned long address)
74 + extern unsigned long __direct_map_base;
75 + extern unsigned long __direct_map_size;
76 +
77 +-static inline unsigned long __deprecated virt_to_bus(void *address)
78 ++static inline unsigned long __deprecated virt_to_bus(volatile void *address)
79 + {
80 + unsigned long phys = virt_to_phys(address);
81 + unsigned long bus = phys + __direct_map_base;
82 +diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h
83 +index 89faca0e740d0..bfa58409a4d4d 100644
84 +--- a/arch/arm64/include/asm/assembler.h
85 ++++ b/arch/arm64/include/asm/assembler.h
86 +@@ -525,6 +525,11 @@ alternative_endif
87 + #define EXPORT_SYMBOL_NOKASAN(name) EXPORT_SYMBOL(name)
88 + #endif
89 +
90 ++#ifdef CONFIG_KASAN_HW_TAGS
91 ++#define EXPORT_SYMBOL_NOHWKASAN(name)
92 ++#else
93 ++#define EXPORT_SYMBOL_NOHWKASAN(name) EXPORT_SYMBOL_NOKASAN(name)
94 ++#endif
95 + /*
96 + * Emit a 64-bit absolute little endian symbol reference in a way that
97 + * ensures that it will be resolved at build time, even when building a
98 +diff --git a/arch/arm64/include/asm/mte.h b/arch/arm64/include/asm/mte.h
99 +index 58c7f80f55961..c724a288a412d 100644
100 +--- a/arch/arm64/include/asm/mte.h
101 ++++ b/arch/arm64/include/asm/mte.h
102 +@@ -105,11 +105,17 @@ void mte_check_tfsr_el1(void);
103 +
104 + static inline void mte_check_tfsr_entry(void)
105 + {
106 ++ if (!system_supports_mte())
107 ++ return;
108 ++
109 + mte_check_tfsr_el1();
110 + }
111 +
112 + static inline void mte_check_tfsr_exit(void)
113 + {
114 ++ if (!system_supports_mte())
115 ++ return;
116 ++
117 + /*
118 + * The asynchronous faults are sync'ed automatically with
119 + * TFSR_EL1 on kernel entry but for exit an explicit dsb()
120 +diff --git a/arch/arm64/include/asm/string.h b/arch/arm64/include/asm/string.h
121 +index 3a3264ff47b97..95f7686b728d7 100644
122 +--- a/arch/arm64/include/asm/string.h
123 ++++ b/arch/arm64/include/asm/string.h
124 +@@ -12,11 +12,13 @@ extern char *strrchr(const char *, int c);
125 + #define __HAVE_ARCH_STRCHR
126 + extern char *strchr(const char *, int c);
127 +
128 ++#ifndef CONFIG_KASAN_HW_TAGS
129 + #define __HAVE_ARCH_STRCMP
130 + extern int strcmp(const char *, const char *);
131 +
132 + #define __HAVE_ARCH_STRNCMP
133 + extern int strncmp(const char *, const char *, __kernel_size_t);
134 ++#endif
135 +
136 + #define __HAVE_ARCH_STRLEN
137 + extern __kernel_size_t strlen(const char *);
138 +diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
139 +index 0ead8bfedf201..92c99472d2c90 100644
140 +--- a/arch/arm64/kernel/cpufeature.c
141 ++++ b/arch/arm64/kernel/cpufeature.c
142 +@@ -1500,9 +1500,13 @@ static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry,
143 + /*
144 + * For reasons that aren't entirely clear, enabling KPTI on Cavium
145 + * ThunderX leads to apparent I-cache corruption of kernel text, which
146 +- * ends as well as you might imagine. Don't even try.
147 ++ * ends as well as you might imagine. Don't even try. We cannot rely
148 ++ * on the cpus_have_*cap() helpers here to detect the CPU erratum
149 ++ * because cpucap detection order may change. However, since we know
150 ++ * affected CPUs are always in a homogeneous configuration, it is
151 ++ * safe to rely on this_cpu_has_cap() here.
152 + */
153 +- if (cpus_have_const_cap(ARM64_WORKAROUND_CAVIUM_27456)) {
154 ++ if (this_cpu_has_cap(ARM64_WORKAROUND_CAVIUM_27456)) {
155 + str = "ARM64_WORKAROUND_CAVIUM_27456";
156 + __kpti_forced = -1;
157 + }
158 +diff --git a/arch/arm64/kernel/mte.c b/arch/arm64/kernel/mte.c
159 +index 36f51b0e438a6..d223df11fc00b 100644
160 +--- a/arch/arm64/kernel/mte.c
161 ++++ b/arch/arm64/kernel/mte.c
162 +@@ -173,12 +173,7 @@ bool mte_report_once(void)
163 + #ifdef CONFIG_KASAN_HW_TAGS
164 + void mte_check_tfsr_el1(void)
165 + {
166 +- u64 tfsr_el1;
167 +-
168 +- if (!system_supports_mte())
169 +- return;
170 +-
171 +- tfsr_el1 = read_sysreg_s(SYS_TFSR_EL1);
172 ++ u64 tfsr_el1 = read_sysreg_s(SYS_TFSR_EL1);
173 +
174 + if (unlikely(tfsr_el1 & SYS_TFSR_EL1_TF1)) {
175 + /*
176 +@@ -221,6 +216,9 @@ void mte_thread_init_user(void)
177 +
178 + void mte_thread_switch(struct task_struct *next)
179 + {
180 ++ if (!system_supports_mte())
181 ++ return;
182 ++
183 + /*
184 + * Check if an async tag exception occurred at EL1.
185 + *
186 +diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
187 +index c8989b999250d..c858b857c1ecf 100644
188 +--- a/arch/arm64/kernel/process.c
189 ++++ b/arch/arm64/kernel/process.c
190 +@@ -60,7 +60,7 @@
191 +
192 + #if defined(CONFIG_STACKPROTECTOR) && !defined(CONFIG_STACKPROTECTOR_PER_TASK)
193 + #include <linux/stackprotector.h>
194 +-unsigned long __stack_chk_guard __read_mostly;
195 ++unsigned long __stack_chk_guard __ro_after_init;
196 + EXPORT_SYMBOL(__stack_chk_guard);
197 + #endif
198 +
199 +diff --git a/arch/arm64/lib/strcmp.S b/arch/arm64/lib/strcmp.S
200 +index d7bee210a798a..83bcad72ec972 100644
201 +--- a/arch/arm64/lib/strcmp.S
202 ++++ b/arch/arm64/lib/strcmp.S
203 +@@ -173,4 +173,4 @@ L(done):
204 + ret
205 +
206 + SYM_FUNC_END_PI(strcmp)
207 +-EXPORT_SYMBOL_NOKASAN(strcmp)
208 ++EXPORT_SYMBOL_NOHWKASAN(strcmp)
209 +diff --git a/arch/arm64/lib/strncmp.S b/arch/arm64/lib/strncmp.S
210 +index 48d44f7fddb13..e42bcfcd37e6f 100644
211 +--- a/arch/arm64/lib/strncmp.S
212 ++++ b/arch/arm64/lib/strncmp.S
213 +@@ -258,4 +258,4 @@ L(ret0):
214 + ret
215 +
216 + SYM_FUNC_END_PI(strncmp)
217 +-EXPORT_SYMBOL_NOKASAN(strncmp)
218 ++EXPORT_SYMBOL_NOHWKASAN(strncmp)
219 +diff --git a/arch/m68k/include/asm/raw_io.h b/arch/m68k/include/asm/raw_io.h
220 +index 911826ea83ce1..80eb2396d01eb 100644
221 +--- a/arch/m68k/include/asm/raw_io.h
222 ++++ b/arch/m68k/include/asm/raw_io.h
223 +@@ -17,21 +17,21 @@
224 + * two accesses to memory, which may be undesirable for some devices.
225 + */
226 + #define in_8(addr) \
227 +- ({ u8 __v = (*(__force volatile u8 *) (addr)); __v; })
228 ++ ({ u8 __v = (*(__force volatile u8 *) (unsigned long)(addr)); __v; })
229 + #define in_be16(addr) \
230 +- ({ u16 __v = (*(__force volatile u16 *) (addr)); __v; })
231 ++ ({ u16 __v = (*(__force volatile u16 *) (unsigned long)(addr)); __v; })
232 + #define in_be32(addr) \
233 +- ({ u32 __v = (*(__force volatile u32 *) (addr)); __v; })
234 ++ ({ u32 __v = (*(__force volatile u32 *) (unsigned long)(addr)); __v; })
235 + #define in_le16(addr) \
236 +- ({ u16 __v = le16_to_cpu(*(__force volatile __le16 *) (addr)); __v; })
237 ++ ({ u16 __v = le16_to_cpu(*(__force volatile __le16 *) (unsigned long)(addr)); __v; })
238 + #define in_le32(addr) \
239 +- ({ u32 __v = le32_to_cpu(*(__force volatile __le32 *) (addr)); __v; })
240 ++ ({ u32 __v = le32_to_cpu(*(__force volatile __le32 *) (unsigned long)(addr)); __v; })
241 +
242 +-#define out_8(addr,b) (void)((*(__force volatile u8 *) (addr)) = (b))
243 +-#define out_be16(addr,w) (void)((*(__force volatile u16 *) (addr)) = (w))
244 +-#define out_be32(addr,l) (void)((*(__force volatile u32 *) (addr)) = (l))
245 +-#define out_le16(addr,w) (void)((*(__force volatile __le16 *) (addr)) = cpu_to_le16(w))
246 +-#define out_le32(addr,l) (void)((*(__force volatile __le32 *) (addr)) = cpu_to_le32(l))
247 ++#define out_8(addr,b) (void)((*(__force volatile u8 *) (unsigned long)(addr)) = (b))
248 ++#define out_be16(addr,w) (void)((*(__force volatile u16 *) (unsigned long)(addr)) = (w))
249 ++#define out_be32(addr,l) (void)((*(__force volatile u32 *) (unsigned long)(addr)) = (l))
250 ++#define out_le16(addr,w) (void)((*(__force volatile __le16 *) (unsigned long)(addr)) = cpu_to_le16(w))
251 ++#define out_le32(addr,l) (void)((*(__force volatile __le32 *) (unsigned long)(addr)) = cpu_to_le32(l))
252 +
253 + #define raw_inb in_8
254 + #define raw_inw in_be16
255 +diff --git a/arch/parisc/include/asm/page.h b/arch/parisc/include/asm/page.h
256 +index d00313d1274e8..0561568f7b489 100644
257 +--- a/arch/parisc/include/asm/page.h
258 ++++ b/arch/parisc/include/asm/page.h
259 +@@ -184,7 +184,7 @@ extern int npmem_ranges;
260 + #include <asm-generic/getorder.h>
261 + #include <asm/pdc.h>
262 +
263 +-#define PAGE0 ((struct zeropage *)__PAGE_OFFSET)
264 ++#define PAGE0 ((struct zeropage *)absolute_pointer(__PAGE_OFFSET))
265 +
266 + /* DEFINITION OF THE ZERO-PAGE (PAG0) */
267 + /* based on work by Jason Eckhardt (jason@×××××××.com) */
268 +diff --git a/arch/sparc/kernel/ioport.c b/arch/sparc/kernel/ioport.c
269 +index 8e1d72a167594..7ceae24b0ca99 100644
270 +--- a/arch/sparc/kernel/ioport.c
271 ++++ b/arch/sparc/kernel/ioport.c
272 +@@ -356,7 +356,9 @@ err_nomem:
273 + void arch_dma_free(struct device *dev, size_t size, void *cpu_addr,
274 + dma_addr_t dma_addr, unsigned long attrs)
275 + {
276 +- if (!sparc_dma_free_resource(cpu_addr, PAGE_ALIGN(size)))
277 ++ size = PAGE_ALIGN(size);
278 ++
279 ++ if (!sparc_dma_free_resource(cpu_addr, size))
280 + return;
281 +
282 + dma_make_coherent(dma_addr, size);
283 +diff --git a/arch/sparc/kernel/mdesc.c b/arch/sparc/kernel/mdesc.c
284 +index 8e645ddac58e2..30f171b7b00c2 100644
285 +--- a/arch/sparc/kernel/mdesc.c
286 ++++ b/arch/sparc/kernel/mdesc.c
287 +@@ -39,6 +39,7 @@ struct mdesc_hdr {
288 + u32 node_sz; /* node block size */
289 + u32 name_sz; /* name block size */
290 + u32 data_sz; /* data block size */
291 ++ char data[];
292 + } __attribute__((aligned(16)));
293 +
294 + struct mdesc_elem {
295 +@@ -612,7 +613,7 @@ EXPORT_SYMBOL(mdesc_get_node_info);
296 +
297 + static struct mdesc_elem *node_block(struct mdesc_hdr *mdesc)
298 + {
299 +- return (struct mdesc_elem *) (mdesc + 1);
300 ++ return (struct mdesc_elem *) mdesc->data;
301 + }
302 +
303 + static void *name_block(struct mdesc_hdr *mdesc)
304 +diff --git a/arch/x86/include/asm/pkeys.h b/arch/x86/include/asm/pkeys.h
305 +index 5c7bcaa796232..1d5f14aff5f6f 100644
306 +--- a/arch/x86/include/asm/pkeys.h
307 ++++ b/arch/x86/include/asm/pkeys.h
308 +@@ -2,8 +2,6 @@
309 + #ifndef _ASM_X86_PKEYS_H
310 + #define _ASM_X86_PKEYS_H
311 +
312 +-#define ARCH_DEFAULT_PKEY 0
313 +-
314 + /*
315 + * If more than 16 keys are ever supported, a thorough audit
316 + * will be necessary to ensure that the types that store key
317 +diff --git a/arch/x86/include/asm/special_insns.h b/arch/x86/include/asm/special_insns.h
318 +index f3fbb84ff8a77..68c257a3de0d3 100644
319 +--- a/arch/x86/include/asm/special_insns.h
320 ++++ b/arch/x86/include/asm/special_insns.h
321 +@@ -275,7 +275,7 @@ static inline int enqcmds(void __iomem *dst, const void *src)
322 + {
323 + const struct { char _[64]; } *__src = src;
324 + struct { char _[64]; } __iomem *__dst = dst;
325 +- int zf;
326 ++ bool zf;
327 +
328 + /*
329 + * ENQCMDS %(rdx), rax
330 +diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
331 +index bff3a784aec5b..d103e8489ec17 100644
332 +--- a/arch/x86/kernel/setup.c
333 ++++ b/arch/x86/kernel/setup.c
334 +@@ -839,6 +839,20 @@ void __init setup_arch(char **cmdline_p)
335 +
336 + x86_init.oem.arch_setup();
337 +
338 ++ /*
339 ++ * Do some memory reservations *before* memory is added to memblock, so
340 ++ * memblock allocations won't overwrite it.
341 ++ *
342 ++ * After this point, everything still needed from the boot loader or
343 ++ * firmware or kernel text should be early reserved or marked not RAM in
344 ++ * e820. All other memory is free game.
345 ++ *
346 ++ * This call needs to happen before e820__memory_setup() which calls the
347 ++ * xen_memory_setup() on Xen dom0 which relies on the fact that those
348 ++ * early reservations have happened already.
349 ++ */
350 ++ early_reserve_memory();
351 ++
352 + iomem_resource.end = (1ULL << boot_cpu_data.x86_phys_bits) - 1;
353 + e820__memory_setup();
354 + parse_setup_data();
355 +@@ -885,18 +899,6 @@ void __init setup_arch(char **cmdline_p)
356 +
357 + parse_early_param();
358 +
359 +- /*
360 +- * Do some memory reservations *before* memory is added to
361 +- * memblock, so memblock allocations won't overwrite it.
362 +- * Do it after early param, so we could get (unlikely) panic from
363 +- * serial.
364 +- *
365 +- * After this point everything still needed from the boot loader or
366 +- * firmware or kernel text should be early reserved or marked not
367 +- * RAM in e820. All other memory is free game.
368 +- */
369 +- early_reserve_memory();
370 +-
371 + #ifdef CONFIG_MEMORY_HOTPLUG
372 + /*
373 + * Memory used by the kernel cannot be hot-removed because Linux
374 +diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
375 +index b2eefdefc1083..84a2c8c4af735 100644
376 +--- a/arch/x86/mm/fault.c
377 ++++ b/arch/x86/mm/fault.c
378 +@@ -710,7 +710,8 @@ oops:
379 +
380 + static noinline void
381 + kernelmode_fixup_or_oops(struct pt_regs *regs, unsigned long error_code,
382 +- unsigned long address, int signal, int si_code)
383 ++ unsigned long address, int signal, int si_code,
384 ++ u32 pkey)
385 + {
386 + WARN_ON_ONCE(user_mode(regs));
387 +
388 +@@ -735,8 +736,12 @@ kernelmode_fixup_or_oops(struct pt_regs *regs, unsigned long error_code,
389 +
390 + set_signal_archinfo(address, error_code);
391 +
392 +- /* XXX: hwpoison faults will set the wrong code. */
393 +- force_sig_fault(signal, si_code, (void __user *)address);
394 ++ if (si_code == SEGV_PKUERR) {
395 ++ force_sig_pkuerr((void __user *)address, pkey);
396 ++ } else {
397 ++ /* XXX: hwpoison faults will set the wrong code. */
398 ++ force_sig_fault(signal, si_code, (void __user *)address);
399 ++ }
400 + }
401 +
402 + /*
403 +@@ -798,7 +803,8 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
404 + struct task_struct *tsk = current;
405 +
406 + if (!user_mode(regs)) {
407 +- kernelmode_fixup_or_oops(regs, error_code, address, pkey, si_code);
408 ++ kernelmode_fixup_or_oops(regs, error_code, address,
409 ++ SIGSEGV, si_code, pkey);
410 + return;
411 + }
412 +
413 +@@ -930,7 +936,8 @@ do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address,
414 + {
415 + /* Kernel mode? Handle exceptions or die: */
416 + if (!user_mode(regs)) {
417 +- kernelmode_fixup_or_oops(regs, error_code, address, SIGBUS, BUS_ADRERR);
418 ++ kernelmode_fixup_or_oops(regs, error_code, address,
419 ++ SIGBUS, BUS_ADRERR, ARCH_DEFAULT_PKEY);
420 + return;
421 + }
422 +
423 +@@ -1396,7 +1403,8 @@ good_area:
424 + */
425 + if (!user_mode(regs))
426 + kernelmode_fixup_or_oops(regs, error_code, address,
427 +- SIGBUS, BUS_ADRERR);
428 ++ SIGBUS, BUS_ADRERR,
429 ++ ARCH_DEFAULT_PKEY);
430 + return;
431 + }
432 +
433 +@@ -1416,7 +1424,8 @@ good_area:
434 + return;
435 +
436 + if (fatal_signal_pending(current) && !user_mode(regs)) {
437 +- kernelmode_fixup_or_oops(regs, error_code, address, 0, 0);
438 ++ kernelmode_fixup_or_oops(regs, error_code, address,
439 ++ 0, 0, ARCH_DEFAULT_PKEY);
440 + return;
441 + }
442 +
443 +@@ -1424,7 +1433,8 @@ good_area:
444 + /* Kernel mode? Handle exceptions or die: */
445 + if (!user_mode(regs)) {
446 + kernelmode_fixup_or_oops(regs, error_code, address,
447 +- SIGSEGV, SEGV_MAPERR);
448 ++ SIGSEGV, SEGV_MAPERR,
449 ++ ARCH_DEFAULT_PKEY);
450 + return;
451 + }
452 +
453 +diff --git a/arch/x86/xen/enlighten_pv.c b/arch/x86/xen/enlighten_pv.c
454 +index 475d9c71b1713..d8aaccc9a246d 100644
455 +--- a/arch/x86/xen/enlighten_pv.c
456 ++++ b/arch/x86/xen/enlighten_pv.c
457 +@@ -756,8 +756,8 @@ static void xen_write_idt_entry(gate_desc *dt, int entrynum, const gate_desc *g)
458 + preempt_enable();
459 + }
460 +
461 +-static void xen_convert_trap_info(const struct desc_ptr *desc,
462 +- struct trap_info *traps)
463 ++static unsigned xen_convert_trap_info(const struct desc_ptr *desc,
464 ++ struct trap_info *traps, bool full)
465 + {
466 + unsigned in, out, count;
467 +
468 +@@ -767,17 +767,18 @@ static void xen_convert_trap_info(const struct desc_ptr *desc,
469 + for (in = out = 0; in < count; in++) {
470 + gate_desc *entry = (gate_desc *)(desc->address) + in;
471 +
472 +- if (cvt_gate_to_trap(in, entry, &traps[out]))
473 ++ if (cvt_gate_to_trap(in, entry, &traps[out]) || full)
474 + out++;
475 + }
476 +- traps[out].address = 0;
477 ++
478 ++ return out;
479 + }
480 +
481 + void xen_copy_trap_info(struct trap_info *traps)
482 + {
483 + const struct desc_ptr *desc = this_cpu_ptr(&idt_desc);
484 +
485 +- xen_convert_trap_info(desc, traps);
486 ++ xen_convert_trap_info(desc, traps, true);
487 + }
488 +
489 + /* Load a new IDT into Xen. In principle this can be per-CPU, so we
490 +@@ -787,6 +788,7 @@ static void xen_load_idt(const struct desc_ptr *desc)
491 + {
492 + static DEFINE_SPINLOCK(lock);
493 + static struct trap_info traps[257];
494 ++ unsigned out;
495 +
496 + trace_xen_cpu_load_idt(desc);
497 +
498 +@@ -794,7 +796,8 @@ static void xen_load_idt(const struct desc_ptr *desc)
499 +
500 + memcpy(this_cpu_ptr(&idt_desc), desc, sizeof(idt_desc));
501 +
502 +- xen_convert_trap_info(desc, traps);
503 ++ out = xen_convert_trap_info(desc, traps, false);
504 ++ memset(&traps[out], 0, sizeof(traps[0]));
505 +
506 + xen_mc_flush();
507 + if (HYPERVISOR_set_trap_table(traps))
508 +diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
509 +index 26446f97deee4..28e11decbac58 100644
510 +--- a/block/blk-cgroup.c
511 ++++ b/block/blk-cgroup.c
512 +@@ -1385,10 +1385,14 @@ enomem:
513 + /* alloc failed, nothing's initialized yet, free everything */
514 + spin_lock_irq(&q->queue_lock);
515 + list_for_each_entry(blkg, &q->blkg_list, q_node) {
516 ++ struct blkcg *blkcg = blkg->blkcg;
517 ++
518 ++ spin_lock(&blkcg->lock);
519 + if (blkg->pd[pol->plid]) {
520 + pol->pd_free_fn(blkg->pd[pol->plid]);
521 + blkg->pd[pol->plid] = NULL;
522 + }
523 ++ spin_unlock(&blkcg->lock);
524 + }
525 + spin_unlock_irq(&q->queue_lock);
526 + ret = -ENOMEM;
527 +@@ -1420,12 +1424,16 @@ void blkcg_deactivate_policy(struct request_queue *q,
528 + __clear_bit(pol->plid, q->blkcg_pols);
529 +
530 + list_for_each_entry(blkg, &q->blkg_list, q_node) {
531 ++ struct blkcg *blkcg = blkg->blkcg;
532 ++
533 ++ spin_lock(&blkcg->lock);
534 + if (blkg->pd[pol->plid]) {
535 + if (pol->pd_offline_fn)
536 + pol->pd_offline_fn(blkg->pd[pol->plid]);
537 + pol->pd_free_fn(blkg->pd[pol->plid]);
538 + blkg->pd[pol->plid] = NULL;
539 + }
540 ++ spin_unlock(&blkcg->lock);
541 + }
542 +
543 + spin_unlock_irq(&q->queue_lock);
544 +diff --git a/block/blk-integrity.c b/block/blk-integrity.c
545 +index 410da060d1f5a..9e83159f5a527 100644
546 +--- a/block/blk-integrity.c
547 ++++ b/block/blk-integrity.c
548 +@@ -426,8 +426,15 @@ EXPORT_SYMBOL(blk_integrity_register);
549 + */
550 + void blk_integrity_unregister(struct gendisk *disk)
551 + {
552 ++ struct blk_integrity *bi = &disk->queue->integrity;
553 ++
554 ++ if (!bi->profile)
555 ++ return;
556 ++
557 ++ /* ensure all bios are off the integrity workqueue */
558 ++ blk_flush_integrity();
559 + blk_queue_flag_clear(QUEUE_FLAG_STABLE_WRITES, disk->queue);
560 +- memset(&disk->queue->integrity, 0, sizeof(struct blk_integrity));
561 ++ memset(bi, 0, sizeof(*bi));
562 + }
563 + EXPORT_SYMBOL(blk_integrity_unregister);
564 +
565 +diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c
566 +index 86f87346232a6..ff5caeb825429 100644
567 +--- a/block/blk-mq-tag.c
568 ++++ b/block/blk-mq-tag.c
569 +@@ -208,7 +208,7 @@ static struct request *blk_mq_find_and_get_req(struct blk_mq_tags *tags,
570 +
571 + spin_lock_irqsave(&tags->lock, flags);
572 + rq = tags->rqs[bitnr];
573 +- if (!rq || !refcount_inc_not_zero(&rq->ref))
574 ++ if (!rq || rq->tag != bitnr || !refcount_inc_not_zero(&rq->ref))
575 + rq = NULL;
576 + spin_unlock_irqrestore(&tags->lock, flags);
577 + return rq;
578 +diff --git a/drivers/android/binder.c b/drivers/android/binder.c
579 +index bcec598b89f23..9edb776249efd 100644
580 +--- a/drivers/android/binder.c
581 ++++ b/drivers/android/binder.c
582 +@@ -1852,6 +1852,7 @@ static void binder_deferred_fd_close(int fd)
583 + }
584 +
585 + static void binder_transaction_buffer_release(struct binder_proc *proc,
586 ++ struct binder_thread *thread,
587 + struct binder_buffer *buffer,
588 + binder_size_t failed_at,
589 + bool is_failure)
590 +@@ -2011,8 +2012,16 @@ static void binder_transaction_buffer_release(struct binder_proc *proc,
591 + &proc->alloc, &fd, buffer,
592 + offset, sizeof(fd));
593 + WARN_ON(err);
594 +- if (!err)
595 ++ if (!err) {
596 + binder_deferred_fd_close(fd);
597 ++ /*
598 ++ * Need to make sure the thread goes
599 ++ * back to userspace to complete the
600 ++ * deferred close
601 ++ */
602 ++ if (thread)
603 ++ thread->looper_need_return = true;
604 ++ }
605 + }
606 + } break;
607 + default:
608 +@@ -3038,9 +3047,8 @@ static void binder_transaction(struct binder_proc *proc,
609 + if (reply) {
610 + binder_enqueue_thread_work(thread, tcomplete);
611 + binder_inner_proc_lock(target_proc);
612 +- if (target_thread->is_dead || target_proc->is_frozen) {
613 +- return_error = target_thread->is_dead ?
614 +- BR_DEAD_REPLY : BR_FROZEN_REPLY;
615 ++ if (target_thread->is_dead) {
616 ++ return_error = BR_DEAD_REPLY;
617 + binder_inner_proc_unlock(target_proc);
618 + goto err_dead_proc_or_thread;
619 + }
620 +@@ -3105,7 +3113,7 @@ err_bad_parent:
621 + err_copy_data_failed:
622 + binder_free_txn_fixups(t);
623 + trace_binder_transaction_failed_buffer_release(t->buffer);
624 +- binder_transaction_buffer_release(target_proc, t->buffer,
625 ++ binder_transaction_buffer_release(target_proc, NULL, t->buffer,
626 + buffer_offset, true);
627 + if (target_node)
628 + binder_dec_node_tmpref(target_node);
629 +@@ -3184,7 +3192,9 @@ err_invalid_target_handle:
630 + * Cleanup buffer and free it.
631 + */
632 + static void
633 +-binder_free_buf(struct binder_proc *proc, struct binder_buffer *buffer)
634 ++binder_free_buf(struct binder_proc *proc,
635 ++ struct binder_thread *thread,
636 ++ struct binder_buffer *buffer)
637 + {
638 + binder_inner_proc_lock(proc);
639 + if (buffer->transaction) {
640 +@@ -3212,7 +3222,7 @@ binder_free_buf(struct binder_proc *proc, struct binder_buffer *buffer)
641 + binder_node_inner_unlock(buf_node);
642 + }
643 + trace_binder_transaction_buffer_release(buffer);
644 +- binder_transaction_buffer_release(proc, buffer, 0, false);
645 ++ binder_transaction_buffer_release(proc, thread, buffer, 0, false);
646 + binder_alloc_free_buf(&proc->alloc, buffer);
647 + }
648 +
649 +@@ -3414,7 +3424,7 @@ static int binder_thread_write(struct binder_proc *proc,
650 + proc->pid, thread->pid, (u64)data_ptr,
651 + buffer->debug_id,
652 + buffer->transaction ? "active" : "finished");
653 +- binder_free_buf(proc, buffer);
654 ++ binder_free_buf(proc, thread, buffer);
655 + break;
656 + }
657 +
658 +@@ -4107,7 +4117,7 @@ retry:
659 + buffer->transaction = NULL;
660 + binder_cleanup_transaction(t, "fd fixups failed",
661 + BR_FAILED_REPLY);
662 +- binder_free_buf(proc, buffer);
663 ++ binder_free_buf(proc, thread, buffer);
664 + binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
665 + "%d:%d %stransaction %d fd fixups failed %d/%d, line %d\n",
666 + proc->pid, thread->pid,
667 +@@ -4648,6 +4658,22 @@ static int binder_ioctl_get_node_debug_info(struct binder_proc *proc,
668 + return 0;
669 + }
670 +
671 ++static bool binder_txns_pending_ilocked(struct binder_proc *proc)
672 ++{
673 ++ struct rb_node *n;
674 ++ struct binder_thread *thread;
675 ++
676 ++ if (proc->outstanding_txns > 0)
677 ++ return true;
678 ++
679 ++ for (n = rb_first(&proc->threads); n; n = rb_next(n)) {
680 ++ thread = rb_entry(n, struct binder_thread, rb_node);
681 ++ if (thread->transaction_stack)
682 ++ return true;
683 ++ }
684 ++ return false;
685 ++}
686 ++
687 + static int binder_ioctl_freeze(struct binder_freeze_info *info,
688 + struct binder_proc *target_proc)
689 + {
690 +@@ -4679,8 +4705,13 @@ static int binder_ioctl_freeze(struct binder_freeze_info *info,
691 + (!target_proc->outstanding_txns),
692 + msecs_to_jiffies(info->timeout_ms));
693 +
694 +- if (!ret && target_proc->outstanding_txns)
695 +- ret = -EAGAIN;
696 ++ /* Check pending transactions that wait for reply */
697 ++ if (ret >= 0) {
698 ++ binder_inner_proc_lock(target_proc);
699 ++ if (binder_txns_pending_ilocked(target_proc))
700 ++ ret = -EAGAIN;
701 ++ binder_inner_proc_unlock(target_proc);
702 ++ }
703 +
704 + if (ret < 0) {
705 + binder_inner_proc_lock(target_proc);
706 +@@ -4696,6 +4727,7 @@ static int binder_ioctl_get_freezer_info(
707 + {
708 + struct binder_proc *target_proc;
709 + bool found = false;
710 ++ __u32 txns_pending;
711 +
712 + info->sync_recv = 0;
713 + info->async_recv = 0;
714 +@@ -4705,7 +4737,9 @@ static int binder_ioctl_get_freezer_info(
715 + if (target_proc->pid == info->pid) {
716 + found = true;
717 + binder_inner_proc_lock(target_proc);
718 +- info->sync_recv |= target_proc->sync_recv;
719 ++ txns_pending = binder_txns_pending_ilocked(target_proc);
720 ++ info->sync_recv |= target_proc->sync_recv |
721 ++ (txns_pending << 1);
722 + info->async_recv |= target_proc->async_recv;
723 + binder_inner_proc_unlock(target_proc);
724 + }
725 +diff --git a/drivers/android/binder_internal.h b/drivers/android/binder_internal.h
726 +index 810c0b84d3f81..402c4d4362a83 100644
727 +--- a/drivers/android/binder_internal.h
728 ++++ b/drivers/android/binder_internal.h
729 +@@ -378,6 +378,8 @@ struct binder_ref {
730 + * binder transactions
731 + * (protected by @inner_lock)
732 + * @sync_recv: process received sync transactions since last frozen
733 ++ * bit 0: received sync transaction after being frozen
734 ++ * bit 1: new pending sync transaction during freezing
735 + * (protected by @inner_lock)
736 + * @async_recv: process received async transactions since last frozen
737 + * (protected by @inner_lock)
738 +diff --git a/drivers/base/swnode.c b/drivers/base/swnode.c
739 +index d1f1a82401207..bdb50a06c82ae 100644
740 +--- a/drivers/base/swnode.c
741 ++++ b/drivers/base/swnode.c
742 +@@ -1113,6 +1113,9 @@ int device_create_managed_software_node(struct device *dev,
743 + to_swnode(fwnode)->managed = true;
744 + set_secondary_fwnode(dev, fwnode);
745 +
746 ++ if (device_is_registered(dev))
747 ++ software_node_notify(dev, KOBJ_ADD);
748 ++
749 + return 0;
750 + }
751 + EXPORT_SYMBOL_GPL(device_create_managed_software_node);
752 +diff --git a/drivers/comedi/comedi_fops.c b/drivers/comedi/comedi_fops.c
753 +index df77b6bf5c641..763cea8418f8e 100644
754 +--- a/drivers/comedi/comedi_fops.c
755 ++++ b/drivers/comedi/comedi_fops.c
756 +@@ -3090,6 +3090,7 @@ static int compat_insnlist(struct file *file, unsigned long arg)
757 + mutex_lock(&dev->mutex);
758 + rc = do_insnlist_ioctl(dev, insns, insnlist32.n_insns, file);
759 + mutex_unlock(&dev->mutex);
760 ++ kfree(insns);
761 + return rc;
762 + }
763 +
764 +diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
765 +index bb4549959b113..e7cd3882bda4d 100644
766 +--- a/drivers/cpufreq/intel_pstate.c
767 ++++ b/drivers/cpufreq/intel_pstate.c
768 +@@ -3251,11 +3251,15 @@ static int __init intel_pstate_init(void)
769 + if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
770 + return -ENODEV;
771 +
772 +- if (no_load)
773 +- return -ENODEV;
774 +-
775 + id = x86_match_cpu(hwp_support_ids);
776 + if (id) {
777 ++ bool hwp_forced = intel_pstate_hwp_is_enabled();
778 ++
779 ++ if (hwp_forced)
780 ++ pr_info("HWP enabled by BIOS\n");
781 ++ else if (no_load)
782 ++ return -ENODEV;
783 ++
784 + copy_cpu_funcs(&core_funcs);
785 + /*
786 + * Avoid enabling HWP for processors without EPP support,
787 +@@ -3265,8 +3269,7 @@ static int __init intel_pstate_init(void)
788 + * If HWP is enabled already, though, there is no choice but to
789 + * deal with it.
790 + */
791 +- if ((!no_hwp && boot_cpu_has(X86_FEATURE_HWP_EPP)) ||
792 +- intel_pstate_hwp_is_enabled()) {
793 ++ if ((!no_hwp && boot_cpu_has(X86_FEATURE_HWP_EPP)) || hwp_forced) {
794 + hwp_active++;
795 + hwp_mode_bdw = id->driver_data;
796 + intel_pstate.attr = hwp_cpufreq_attrs;
797 +@@ -3278,7 +3281,11 @@ static int __init intel_pstate_init(void)
798 +
799 + goto hwp_cpu_matched;
800 + }
801 ++ pr_info("HWP not enabled\n");
802 + } else {
803 ++ if (no_load)
804 ++ return -ENODEV;
805 ++
806 + id = x86_match_cpu(intel_pstate_cpu_ids);
807 + if (!id) {
808 + pr_info("CPU model not supported\n");
809 +@@ -3357,10 +3364,9 @@ static int __init intel_pstate_setup(char *str)
810 + else if (!strcmp(str, "passive"))
811 + default_driver = &intel_cpufreq;
812 +
813 +- if (!strcmp(str, "no_hwp")) {
814 +- pr_info("HWP disabled\n");
815 ++ if (!strcmp(str, "no_hwp"))
816 + no_hwp = 1;
817 +- }
818 ++
819 + if (!strcmp(str, "force"))
820 + force_load = 1;
821 + if (!strcmp(str, "hwp_only"))
822 +diff --git a/drivers/edac/dmc520_edac.c b/drivers/edac/dmc520_edac.c
823 +index fc1153ab1ebbc..b8a7d9594afd4 100644
824 +--- a/drivers/edac/dmc520_edac.c
825 ++++ b/drivers/edac/dmc520_edac.c
826 +@@ -464,7 +464,7 @@ static void dmc520_init_csrow(struct mem_ctl_info *mci)
827 + dimm->grain = pvt->mem_width_in_bytes;
828 + dimm->dtype = dt;
829 + dimm->mtype = mt;
830 +- dimm->edac_mode = EDAC_FLAG_SECDED;
831 ++ dimm->edac_mode = EDAC_SECDED;
832 + dimm->nr_pages = pages_per_rank / csi->nr_channels;
833 + }
834 + }
835 +diff --git a/drivers/edac/synopsys_edac.c b/drivers/edac/synopsys_edac.c
836 +index 7e7146b22c160..7d08627e738b3 100644
837 +--- a/drivers/edac/synopsys_edac.c
838 ++++ b/drivers/edac/synopsys_edac.c
839 +@@ -782,7 +782,7 @@ static void init_csrows(struct mem_ctl_info *mci)
840 +
841 + for (j = 0; j < csi->nr_channels; j++) {
842 + dimm = csi->channels[j]->dimm;
843 +- dimm->edac_mode = EDAC_FLAG_SECDED;
844 ++ dimm->edac_mode = EDAC_SECDED;
845 + dimm->mtype = p_data->get_mtype(priv->baseaddr);
846 + dimm->nr_pages = (size >> PAGE_SHIFT) / csi->nr_channels;
847 + dimm->grain = SYNPS_EDAC_ERR_GRAIN;
848 +diff --git a/drivers/fpga/machxo2-spi.c b/drivers/fpga/machxo2-spi.c
849 +index 1afb41aa20d71..ea2ec3c6815cb 100644
850 +--- a/drivers/fpga/machxo2-spi.c
851 ++++ b/drivers/fpga/machxo2-spi.c
852 +@@ -225,8 +225,10 @@ static int machxo2_write_init(struct fpga_manager *mgr,
853 + goto fail;
854 +
855 + get_status(spi, &status);
856 +- if (test_bit(FAIL, &status))
857 ++ if (test_bit(FAIL, &status)) {
858 ++ ret = -EINVAL;
859 + goto fail;
860 ++ }
861 + dump_status_reg(&status);
862 +
863 + spi_message_init(&msg);
864 +@@ -313,6 +315,7 @@ static int machxo2_write_complete(struct fpga_manager *mgr,
865 + dump_status_reg(&status);
866 + if (!test_bit(DONE, &status)) {
867 + machxo2_cleanup(mgr);
868 ++ ret = -EINVAL;
869 + goto fail;
870 + }
871 +
872 +@@ -335,6 +338,7 @@ static int machxo2_write_complete(struct fpga_manager *mgr,
873 + break;
874 + if (++refreshloop == MACHXO2_MAX_REFRESH_LOOP) {
875 + machxo2_cleanup(mgr);
876 ++ ret = -EINVAL;
877 + goto fail;
878 + }
879 + } while (1);
880 +diff --git a/drivers/gpio/gpio-uniphier.c b/drivers/gpio/gpio-uniphier.c
881 +index f99f3c10bed03..39dca147d587a 100644
882 +--- a/drivers/gpio/gpio-uniphier.c
883 ++++ b/drivers/gpio/gpio-uniphier.c
884 +@@ -184,7 +184,7 @@ static void uniphier_gpio_irq_mask(struct irq_data *data)
885 +
886 + uniphier_gpio_reg_update(priv, UNIPHIER_GPIO_IRQ_EN, mask, 0);
887 +
888 +- return irq_chip_mask_parent(data);
889 ++ irq_chip_mask_parent(data);
890 + }
891 +
892 + static void uniphier_gpio_irq_unmask(struct irq_data *data)
893 +@@ -194,7 +194,7 @@ static void uniphier_gpio_irq_unmask(struct irq_data *data)
894 +
895 + uniphier_gpio_reg_update(priv, UNIPHIER_GPIO_IRQ_EN, mask, mask);
896 +
897 +- return irq_chip_unmask_parent(data);
898 ++ irq_chip_unmask_parent(data);
899 + }
900 +
901 + static int uniphier_gpio_irq_set_type(struct irq_data *data, unsigned int type)
902 +diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c
903 +index 411525ac4cc45..47712b6903b51 100644
904 +--- a/drivers/gpio/gpiolib-acpi.c
905 ++++ b/drivers/gpio/gpiolib-acpi.c
906 +@@ -313,9 +313,11 @@ static struct gpio_desc *acpi_request_own_gpiod(struct gpio_chip *chip,
907 +
908 + ret = gpio_set_debounce_timeout(desc, agpio->debounce_timeout);
909 + if (ret)
910 +- gpiochip_free_own_desc(desc);
911 ++ dev_warn(chip->parent,
912 ++ "Failed to set debounce-timeout for pin 0x%04X, err %d\n",
913 ++ pin, ret);
914 +
915 +- return ret ? ERR_PTR(ret) : desc;
916 ++ return desc;
917 + }
918 +
919 + static bool acpi_gpio_in_ignore_list(const char *controller_in, int pin_in)
920 +diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
921 +index 9e52948d49920..5a872adcfdb98 100644
922 +--- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c
923 ++++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
924 +@@ -447,6 +447,7 @@ static const struct kfd_device_info navi10_device_info = {
925 + .needs_iommu_device = false,
926 + .supports_cwsr = true,
927 + .needs_pci_atomics = true,
928 ++ .no_atomic_fw_version = 145,
929 + .num_sdma_engines = 2,
930 + .num_xgmi_sdma_engines = 0,
931 + .num_sdma_queues_per_engine = 8,
932 +@@ -465,6 +466,7 @@ static const struct kfd_device_info navi12_device_info = {
933 + .needs_iommu_device = false,
934 + .supports_cwsr = true,
935 + .needs_pci_atomics = true,
936 ++ .no_atomic_fw_version = 145,
937 + .num_sdma_engines = 2,
938 + .num_xgmi_sdma_engines = 0,
939 + .num_sdma_queues_per_engine = 8,
940 +@@ -483,6 +485,7 @@ static const struct kfd_device_info navi14_device_info = {
941 + .needs_iommu_device = false,
942 + .supports_cwsr = true,
943 + .needs_pci_atomics = true,
944 ++ .no_atomic_fw_version = 145,
945 + .num_sdma_engines = 2,
946 + .num_xgmi_sdma_engines = 0,
947 + .num_sdma_queues_per_engine = 8,
948 +@@ -501,6 +504,7 @@ static const struct kfd_device_info sienna_cichlid_device_info = {
949 + .needs_iommu_device = false,
950 + .supports_cwsr = true,
951 + .needs_pci_atomics = true,
952 ++ .no_atomic_fw_version = 92,
953 + .num_sdma_engines = 4,
954 + .num_xgmi_sdma_engines = 0,
955 + .num_sdma_queues_per_engine = 8,
956 +@@ -519,6 +523,7 @@ static const struct kfd_device_info navy_flounder_device_info = {
957 + .needs_iommu_device = false,
958 + .supports_cwsr = true,
959 + .needs_pci_atomics = true,
960 ++ .no_atomic_fw_version = 92,
961 + .num_sdma_engines = 2,
962 + .num_xgmi_sdma_engines = 0,
963 + .num_sdma_queues_per_engine = 8,
964 +@@ -536,7 +541,8 @@ static const struct kfd_device_info vangogh_device_info = {
965 + .mqd_size_aligned = MQD_SIZE_ALIGNED,
966 + .needs_iommu_device = false,
967 + .supports_cwsr = true,
968 +- .needs_pci_atomics = false,
969 ++ .needs_pci_atomics = true,
970 ++ .no_atomic_fw_version = 92,
971 + .num_sdma_engines = 1,
972 + .num_xgmi_sdma_engines = 0,
973 + .num_sdma_queues_per_engine = 2,
974 +@@ -555,6 +561,7 @@ static const struct kfd_device_info dimgrey_cavefish_device_info = {
975 + .needs_iommu_device = false,
976 + .supports_cwsr = true,
977 + .needs_pci_atomics = true,
978 ++ .no_atomic_fw_version = 92,
979 + .num_sdma_engines = 2,
980 + .num_xgmi_sdma_engines = 0,
981 + .num_sdma_queues_per_engine = 8,
982 +@@ -573,6 +580,7 @@ static const struct kfd_device_info beige_goby_device_info = {
983 + .needs_iommu_device = false,
984 + .supports_cwsr = true,
985 + .needs_pci_atomics = true,
986 ++ .no_atomic_fw_version = 92,
987 + .num_sdma_engines = 1,
988 + .num_xgmi_sdma_engines = 0,
989 + .num_sdma_queues_per_engine = 8,
990 +@@ -590,7 +598,8 @@ static const struct kfd_device_info yellow_carp_device_info = {
991 + .mqd_size_aligned = MQD_SIZE_ALIGNED,
992 + .needs_iommu_device = false,
993 + .supports_cwsr = true,
994 +- .needs_pci_atomics = false,
995 ++ .needs_pci_atomics = true,
996 ++ .no_atomic_fw_version = 92,
997 + .num_sdma_engines = 1,
998 + .num_xgmi_sdma_engines = 0,
999 + .num_sdma_queues_per_engine = 2,
1000 +@@ -659,20 +668,6 @@ struct kfd_dev *kgd2kfd_probe(struct kgd_dev *kgd,
1001 + if (!kfd)
1002 + return NULL;
1003 +
1004 +- /* Allow BIF to recode atomics to PCIe 3.0 AtomicOps.
1005 +- * 32 and 64-bit requests are possible and must be
1006 +- * supported.
1007 +- */
1008 +- kfd->pci_atomic_requested = amdgpu_amdkfd_have_atomics_support(kgd);
1009 +- if (device_info->needs_pci_atomics &&
1010 +- !kfd->pci_atomic_requested) {
1011 +- dev_info(kfd_device,
1012 +- "skipped device %x:%x, PCI rejects atomics\n",
1013 +- pdev->vendor, pdev->device);
1014 +- kfree(kfd);
1015 +- return NULL;
1016 +- }
1017 +-
1018 + kfd->kgd = kgd;
1019 + kfd->device_info = device_info;
1020 + kfd->pdev = pdev;
1021 +@@ -772,6 +767,23 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd,
1022 + kfd->vm_info.vmid_num_kfd = kfd->vm_info.last_vmid_kfd
1023 + - kfd->vm_info.first_vmid_kfd + 1;
1024 +
1025 ++ /* Allow BIF to recode atomics to PCIe 3.0 AtomicOps.
1026 ++ * 32 and 64-bit requests are possible and must be
1027 ++ * supported.
1028 ++ */
1029 ++ kfd->pci_atomic_requested = amdgpu_amdkfd_have_atomics_support(kfd->kgd);
1030 ++ if (!kfd->pci_atomic_requested &&
1031 ++ kfd->device_info->needs_pci_atomics &&
1032 ++ (!kfd->device_info->no_atomic_fw_version ||
1033 ++ kfd->mec_fw_version < kfd->device_info->no_atomic_fw_version)) {
1034 ++ dev_info(kfd_device,
1035 ++ "skipped device %x:%x, PCI rejects atomics %d<%d\n",
1036 ++ kfd->pdev->vendor, kfd->pdev->device,
1037 ++ kfd->mec_fw_version,
1038 ++ kfd->device_info->no_atomic_fw_version);
1039 ++ return false;
1040 ++ }
1041 ++
1042 + /* Verify module parameters regarding mapped process number*/
1043 + if ((hws_max_conc_proc < 0)
1044 + || (hws_max_conc_proc > kfd->vm_info.vmid_num_kfd)) {
1045 +diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
1046 +index 3426743ed228b..b38a84a274387 100644
1047 +--- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
1048 ++++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
1049 +@@ -206,6 +206,7 @@ struct kfd_device_info {
1050 + bool supports_cwsr;
1051 + bool needs_iommu_device;
1052 + bool needs_pci_atomics;
1053 ++ uint32_t no_atomic_fw_version;
1054 + unsigned int num_sdma_engines;
1055 + unsigned int num_xgmi_sdma_engines;
1056 + unsigned int num_sdma_queues_per_engine;
1057 +diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
1058 +index 0f7f1e5621ea4..e85035fd1ccb4 100644
1059 +--- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
1060 ++++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
1061 +@@ -118,8 +118,16 @@ static void svm_range_remove_notifier(struct svm_range *prange)
1062 + mmu_interval_notifier_remove(&prange->notifier);
1063 + }
1064 +
1065 ++static bool
1066 ++svm_is_valid_dma_mapping_addr(struct device *dev, dma_addr_t dma_addr)
1067 ++{
1068 ++ return dma_addr && !dma_mapping_error(dev, dma_addr) &&
1069 ++ !(dma_addr & SVM_RANGE_VRAM_DOMAIN);
1070 ++}
1071 ++
1072 + static int
1073 + svm_range_dma_map_dev(struct amdgpu_device *adev, struct svm_range *prange,
1074 ++ unsigned long offset, unsigned long npages,
1075 + unsigned long *hmm_pfns, uint32_t gpuidx)
1076 + {
1077 + enum dma_data_direction dir = DMA_BIDIRECTIONAL;
1078 +@@ -136,9 +144,9 @@ svm_range_dma_map_dev(struct amdgpu_device *adev, struct svm_range *prange,
1079 + prange->dma_addr[gpuidx] = addr;
1080 + }
1081 +
1082 +- for (i = 0; i < prange->npages; i++) {
1083 +- if (WARN_ONCE(addr[i] && !dma_mapping_error(dev, addr[i]),
1084 +- "leaking dma mapping\n"))
1085 ++ addr += offset;
1086 ++ for (i = 0; i < npages; i++) {
1087 ++ if (svm_is_valid_dma_mapping_addr(dev, addr[i]))
1088 + dma_unmap_page(dev, addr[i], PAGE_SIZE, dir);
1089 +
1090 + page = hmm_pfn_to_page(hmm_pfns[i]);
1091 +@@ -167,6 +175,7 @@ svm_range_dma_map_dev(struct amdgpu_device *adev, struct svm_range *prange,
1092 +
1093 + static int
1094 + svm_range_dma_map(struct svm_range *prange, unsigned long *bitmap,
1095 ++ unsigned long offset, unsigned long npages,
1096 + unsigned long *hmm_pfns)
1097 + {
1098 + struct kfd_process *p;
1099 +@@ -187,7 +196,8 @@ svm_range_dma_map(struct svm_range *prange, unsigned long *bitmap,
1100 + }
1101 + adev = (struct amdgpu_device *)pdd->dev->kgd;
1102 +
1103 +- r = svm_range_dma_map_dev(adev, prange, hmm_pfns, gpuidx);
1104 ++ r = svm_range_dma_map_dev(adev, prange, offset, npages,
1105 ++ hmm_pfns, gpuidx);
1106 + if (r)
1107 + break;
1108 + }
1109 +@@ -205,7 +215,7 @@ void svm_range_dma_unmap(struct device *dev, dma_addr_t *dma_addr,
1110 + return;
1111 +
1112 + for (i = offset; i < offset + npages; i++) {
1113 +- if (!dma_addr[i] || dma_mapping_error(dev, dma_addr[i]))
1114 ++ if (!svm_is_valid_dma_mapping_addr(dev, dma_addr[i]))
1115 + continue;
1116 + pr_debug("dma unmapping 0x%llx\n", dma_addr[i] >> PAGE_SHIFT);
1117 + dma_unmap_page(dev, dma_addr[i], PAGE_SIZE, dir);
1118 +@@ -1088,11 +1098,6 @@ svm_range_get_pte_flags(struct amdgpu_device *adev, struct svm_range *prange,
1119 + pte_flags |= snoop ? AMDGPU_PTE_SNOOPED : 0;
1120 +
1121 + pte_flags |= amdgpu_gem_va_map_flags(adev, mapping_flags);
1122 +-
1123 +- pr_debug("svms 0x%p [0x%lx 0x%lx] vram %d PTE 0x%llx mapping 0x%x\n",
1124 +- prange->svms, prange->start, prange->last,
1125 +- (domain == SVM_RANGE_VRAM_DOMAIN) ? 1:0, pte_flags, mapping_flags);
1126 +-
1127 + return pte_flags;
1128 + }
1129 +
1130 +@@ -1156,7 +1161,8 @@ svm_range_unmap_from_gpus(struct svm_range *prange, unsigned long start,
1131 +
1132 + static int
1133 + svm_range_map_to_gpu(struct amdgpu_device *adev, struct amdgpu_vm *vm,
1134 +- struct svm_range *prange, dma_addr_t *dma_addr,
1135 ++ struct svm_range *prange, unsigned long offset,
1136 ++ unsigned long npages, bool readonly, dma_addr_t *dma_addr,
1137 + struct amdgpu_device *bo_adev, struct dma_fence **fence)
1138 + {
1139 + struct amdgpu_bo_va bo_va;
1140 +@@ -1165,16 +1171,17 @@ svm_range_map_to_gpu(struct amdgpu_device *adev, struct amdgpu_vm *vm,
1141 + unsigned long last_start;
1142 + int last_domain;
1143 + int r = 0;
1144 +- int64_t i;
1145 ++ int64_t i, j;
1146 +
1147 +- pr_debug("svms 0x%p [0x%lx 0x%lx]\n", prange->svms, prange->start,
1148 +- prange->last);
1149 ++ last_start = prange->start + offset;
1150 ++
1151 ++ pr_debug("svms 0x%p [0x%lx 0x%lx] readonly %d\n", prange->svms,
1152 ++ last_start, last_start + npages - 1, readonly);
1153 +
1154 + if (prange->svm_bo && prange->ttm_res)
1155 + bo_va.is_xgmi = amdgpu_xgmi_same_hive(adev, bo_adev);
1156 +
1157 +- last_start = prange->start;
1158 +- for (i = 0; i < prange->npages; i++) {
1159 ++ for (i = offset; i < offset + npages; i++) {
1160 + last_domain = dma_addr[i] & SVM_RANGE_VRAM_DOMAIN;
1161 + dma_addr[i] &= ~SVM_RANGE_VRAM_DOMAIN;
1162 + if ((prange->start + i) < prange->last &&
1163 +@@ -1183,15 +1190,27 @@ svm_range_map_to_gpu(struct amdgpu_device *adev, struct amdgpu_vm *vm,
1164 +
1165 + pr_debug("Mapping range [0x%lx 0x%llx] on domain: %s\n",
1166 + last_start, prange->start + i, last_domain ? "GPU" : "CPU");
1167 ++
1168 + pte_flags = svm_range_get_pte_flags(adev, prange, last_domain);
1169 +- r = amdgpu_vm_bo_update_mapping(adev, bo_adev, vm, false, false, NULL,
1170 +- last_start,
1171 ++ if (readonly)
1172 ++ pte_flags &= ~AMDGPU_PTE_WRITEABLE;
1173 ++
1174 ++ pr_debug("svms 0x%p map [0x%lx 0x%llx] vram %d PTE 0x%llx\n",
1175 ++ prange->svms, last_start, prange->start + i,
1176 ++ (last_domain == SVM_RANGE_VRAM_DOMAIN) ? 1 : 0,
1177 ++ pte_flags);
1178 ++
1179 ++ r = amdgpu_vm_bo_update_mapping(adev, bo_adev, vm, false, false,
1180 ++ NULL, last_start,
1181 + prange->start + i, pte_flags,
1182 + last_start - prange->start,
1183 +- NULL,
1184 +- dma_addr,
1185 ++ NULL, dma_addr,
1186 + &vm->last_update,
1187 + &table_freed);
1188 ++
1189 ++ for (j = last_start - prange->start; j <= i; j++)
1190 ++ dma_addr[j] |= last_domain;
1191 ++
1192 + if (r) {
1193 + pr_debug("failed %d to map to gpu 0x%lx\n", r, prange->start);
1194 + goto out;
1195 +@@ -1220,8 +1239,10 @@ out:
1196 + return r;
1197 + }
1198 +
1199 +-static int svm_range_map_to_gpus(struct svm_range *prange,
1200 +- unsigned long *bitmap, bool wait)
1201 ++static int
1202 ++svm_range_map_to_gpus(struct svm_range *prange, unsigned long offset,
1203 ++ unsigned long npages, bool readonly,
1204 ++ unsigned long *bitmap, bool wait)
1205 + {
1206 + struct kfd_process_device *pdd;
1207 + struct amdgpu_device *bo_adev;
1208 +@@ -1257,7 +1278,8 @@ static int svm_range_map_to_gpus(struct svm_range *prange,
1209 + }
1210 +
1211 + r = svm_range_map_to_gpu(adev, drm_priv_to_vm(pdd->drm_priv),
1212 +- prange, prange->dma_addr[gpuidx],
1213 ++ prange, offset, npages, readonly,
1214 ++ prange->dma_addr[gpuidx],
1215 + bo_adev, wait ? &fence : NULL);
1216 + if (r)
1217 + break;
1218 +@@ -1390,7 +1412,7 @@ static int svm_range_validate_and_map(struct mm_struct *mm,
1219 + int32_t gpuidx, bool intr, bool wait)
1220 + {
1221 + struct svm_validate_context ctx;
1222 +- struct hmm_range *hmm_range;
1223 ++ unsigned long start, end, addr;
1224 + struct kfd_process *p;
1225 + void *owner;
1226 + int32_t idx;
1227 +@@ -1448,40 +1470,66 @@ static int svm_range_validate_and_map(struct mm_struct *mm,
1228 + break;
1229 + }
1230 + }
1231 +- r = amdgpu_hmm_range_get_pages(&prange->notifier, mm, NULL,
1232 +- prange->start << PAGE_SHIFT,
1233 +- prange->npages, &hmm_range,
1234 +- false, true, owner);
1235 +- if (r) {
1236 +- pr_debug("failed %d to get svm range pages\n", r);
1237 +- goto unreserve_out;
1238 +- }
1239 +
1240 +- r = svm_range_dma_map(prange, ctx.bitmap,
1241 +- hmm_range->hmm_pfns);
1242 +- if (r) {
1243 +- pr_debug("failed %d to dma map range\n", r);
1244 +- goto unreserve_out;
1245 +- }
1246 ++ start = prange->start << PAGE_SHIFT;
1247 ++ end = (prange->last + 1) << PAGE_SHIFT;
1248 ++ for (addr = start; addr < end && !r; ) {
1249 ++ struct hmm_range *hmm_range;
1250 ++ struct vm_area_struct *vma;
1251 ++ unsigned long next;
1252 ++ unsigned long offset;
1253 ++ unsigned long npages;
1254 ++ bool readonly;
1255 +
1256 +- prange->validated_once = true;
1257 ++ vma = find_vma(mm, addr);
1258 ++ if (!vma || addr < vma->vm_start) {
1259 ++ r = -EFAULT;
1260 ++ goto unreserve_out;
1261 ++ }
1262 ++ readonly = !(vma->vm_flags & VM_WRITE);
1263 +
1264 +- svm_range_lock(prange);
1265 +- if (amdgpu_hmm_range_get_pages_done(hmm_range)) {
1266 +- pr_debug("hmm update the range, need validate again\n");
1267 +- r = -EAGAIN;
1268 +- goto unlock_out;
1269 +- }
1270 +- if (!list_empty(&prange->child_list)) {
1271 +- pr_debug("range split by unmap in parallel, validate again\n");
1272 +- r = -EAGAIN;
1273 +- goto unlock_out;
1274 +- }
1275 ++ next = min(vma->vm_end, end);
1276 ++ npages = (next - addr) >> PAGE_SHIFT;
1277 ++ r = amdgpu_hmm_range_get_pages(&prange->notifier, mm, NULL,
1278 ++ addr, npages, &hmm_range,
1279 ++ readonly, true, owner);
1280 ++ if (r) {
1281 ++ pr_debug("failed %d to get svm range pages\n", r);
1282 ++ goto unreserve_out;
1283 ++ }
1284 ++
1285 ++ offset = (addr - start) >> PAGE_SHIFT;
1286 ++ r = svm_range_dma_map(prange, ctx.bitmap, offset, npages,
1287 ++ hmm_range->hmm_pfns);
1288 ++ if (r) {
1289 ++ pr_debug("failed %d to dma map range\n", r);
1290 ++ goto unreserve_out;
1291 ++ }
1292 ++
1293 ++ svm_range_lock(prange);
1294 ++ if (amdgpu_hmm_range_get_pages_done(hmm_range)) {
1295 ++ pr_debug("hmm update the range, need validate again\n");
1296 ++ r = -EAGAIN;
1297 ++ goto unlock_out;
1298 ++ }
1299 ++ if (!list_empty(&prange->child_list)) {
1300 ++ pr_debug("range split by unmap in parallel, validate again\n");
1301 ++ r = -EAGAIN;
1302 ++ goto unlock_out;
1303 ++ }
1304 +
1305 +- r = svm_range_map_to_gpus(prange, ctx.bitmap, wait);
1306 ++ r = svm_range_map_to_gpus(prange, offset, npages, readonly,
1307 ++ ctx.bitmap, wait);
1308 +
1309 + unlock_out:
1310 +- svm_range_unlock(prange);
1311 ++ svm_range_unlock(prange);
1312 ++
1313 ++ addr = next;
1314 ++ }
1315 ++
1316 ++ if (addr == end)
1317 ++ prange->validated_once = true;
1318 ++
1319 + unreserve_out:
1320 + svm_range_unreserve_bos(&ctx);
1321 +
1322 +diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
1323 +index 6a4c6c47dcfaf..3bb567ea2cef9 100644
1324 +--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
1325 ++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
1326 +@@ -7514,6 +7514,32 @@ static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
1327 + }
1328 + }
1329 +
1330 ++static void amdgpu_set_panel_orientation(struct drm_connector *connector)
1331 ++{
1332 ++ struct drm_encoder *encoder;
1333 ++ struct amdgpu_encoder *amdgpu_encoder;
1334 ++ const struct drm_display_mode *native_mode;
1335 ++
1336 ++ if (connector->connector_type != DRM_MODE_CONNECTOR_eDP &&
1337 ++ connector->connector_type != DRM_MODE_CONNECTOR_LVDS)
1338 ++ return;
1339 ++
1340 ++ encoder = amdgpu_dm_connector_to_encoder(connector);
1341 ++ if (!encoder)
1342 ++ return;
1343 ++
1344 ++ amdgpu_encoder = to_amdgpu_encoder(encoder);
1345 ++
1346 ++ native_mode = &amdgpu_encoder->native_mode;
1347 ++ if (native_mode->hdisplay == 0 || native_mode->vdisplay == 0)
1348 ++ return;
1349 ++
1350 ++ drm_connector_set_panel_orientation_with_quirk(connector,
1351 ++ DRM_MODE_PANEL_ORIENTATION_UNKNOWN,
1352 ++ native_mode->hdisplay,
1353 ++ native_mode->vdisplay);
1354 ++}
1355 ++
1356 + static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
1357 + struct edid *edid)
1358 + {
1359 +@@ -7542,6 +7568,8 @@ static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
1360 + * restored here.
1361 + */
1362 + amdgpu_dm_update_freesync_caps(connector, edid);
1363 ++
1364 ++ amdgpu_set_panel_orientation(connector);
1365 + } else {
1366 + amdgpu_dm_connector->num_modes = 0;
1367 + }
1368 +@@ -8051,8 +8079,26 @@ static bool is_content_protection_different(struct drm_connector_state *state,
1369 + state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
1370 + state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
1371 +
1372 +- /* Check if something is connected/enabled, otherwise we start hdcp but nothing is connected/enabled
1373 +- * hot-plug, headless s3, dpms
1374 ++ /* Stream removed and re-enabled
1375 ++ *
1376 ++ * Can sometimes overlap with the HPD case,
1377 ++ * thus set update_hdcp to false to avoid
1378 ++ * setting HDCP multiple times.
1379 ++ *
1380 ++ * Handles: DESIRED -> DESIRED (Special case)
1381 ++ */
1382 ++ if (!(old_state->crtc && old_state->crtc->enabled) &&
1383 ++ state->crtc && state->crtc->enabled &&
1384 ++ connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
1385 ++ dm_con_state->update_hdcp = false;
1386 ++ return true;
1387 ++ }
1388 ++
1389 ++ /* Hot-plug, headless s3, dpms
1390 ++ *
1391 ++ * Only start HDCP if the display is connected/enabled.
1392 ++ * update_hdcp flag will be set to false until the next
1393 ++ * HPD comes in.
1394 + *
1395 + * Handles: DESIRED -> DESIRED (Special case)
1396 + */
1397 +@@ -10469,7 +10515,8 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
1398 + goto fail;
1399 + status = dc_validate_global_state(dc, dm_state->context, false);
1400 + if (status != DC_OK) {
1401 +- DC_LOG_WARNING("DC global validation failure: %s (%d)",
1402 ++ drm_dbg_atomic(dev,
1403 ++ "DC global validation failure: %s (%d)",
1404 + dc_status_to_str(status), status);
1405 + ret = -EINVAL;
1406 + goto fail;
1407 +diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
1408 +index a6d0fd24fd02d..83ef72a3ebf41 100644
1409 +--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
1410 ++++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
1411 +@@ -1849,9 +1849,13 @@ bool perform_link_training_with_retries(
1412 + dp_disable_link_phy(link, signal);
1413 +
1414 + /* Abort link training if failure due to sink being unplugged. */
1415 +- if (status == LINK_TRAINING_ABORT)
1416 +- break;
1417 +- else if (do_fallback) {
1418 ++ if (status == LINK_TRAINING_ABORT) {
1419 ++ enum dc_connection_type type = dc_connection_none;
1420 ++
1421 ++ dc_link_detect_sink(link, &type);
1422 ++ if (type == dc_connection_none)
1423 ++ break;
1424 ++ } else if (do_fallback) {
1425 + decide_fallback_link_setting(*link_setting, &current_setting, status);
1426 + /* Fail link training if reduced link bandwidth no longer meets
1427 + * stream requirements.
1428 +diff --git a/drivers/gpu/drm/amd/pm/powerplay/si_dpm.c b/drivers/gpu/drm/amd/pm/powerplay/si_dpm.c
1429 +index 15c0b8af376f8..6e8fe1242752d 100644
1430 +--- a/drivers/gpu/drm/amd/pm/powerplay/si_dpm.c
1431 ++++ b/drivers/gpu/drm/amd/pm/powerplay/si_dpm.c
1432 +@@ -6870,6 +6870,8 @@ static int si_dpm_enable(struct amdgpu_device *adev)
1433 + si_enable_auto_throttle_source(adev, AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL, true);
1434 + si_thermal_start_thermal_controller(adev);
1435 +
1436 ++ ni_update_current_ps(adev, boot_ps);
1437 ++
1438 + return 0;
1439 + }
1440 +
1441 +diff --git a/drivers/gpu/drm/ttm/ttm_pool.c b/drivers/gpu/drm/ttm/ttm_pool.c
1442 +index cb38b1a17b098..82cbb29a05aa3 100644
1443 +--- a/drivers/gpu/drm/ttm/ttm_pool.c
1444 ++++ b/drivers/gpu/drm/ttm/ttm_pool.c
1445 +@@ -383,7 +383,8 @@ int ttm_pool_alloc(struct ttm_pool *pool, struct ttm_tt *tt,
1446 + else
1447 + gfp_flags |= GFP_HIGHUSER;
1448 +
1449 +- for (order = min(MAX_ORDER - 1UL, __fls(num_pages)); num_pages;
1450 ++ for (order = min_t(unsigned int, MAX_ORDER - 1, __fls(num_pages));
1451 ++ num_pages;
1452 + order = min_t(unsigned int, order, __fls(num_pages))) {
1453 + bool apply_caching = false;
1454 + struct ttm_pool_type *pt;
1455 +diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.c b/drivers/gpu/drm/vc4/vc4_hdmi.c
1456 +index f91d37beb1133..3b391dee30445 100644
1457 +--- a/drivers/gpu/drm/vc4/vc4_hdmi.c
1458 ++++ b/drivers/gpu/drm/vc4/vc4_hdmi.c
1459 +@@ -166,8 +166,6 @@ vc4_hdmi_connector_detect(struct drm_connector *connector, bool force)
1460 + struct vc4_hdmi *vc4_hdmi = connector_to_vc4_hdmi(connector);
1461 + bool connected = false;
1462 +
1463 +- WARN_ON(pm_runtime_resume_and_get(&vc4_hdmi->pdev->dev));
1464 +-
1465 + if (vc4_hdmi->hpd_gpio &&
1466 + gpiod_get_value_cansleep(vc4_hdmi->hpd_gpio)) {
1467 + connected = true;
1468 +@@ -188,12 +186,10 @@ vc4_hdmi_connector_detect(struct drm_connector *connector, bool force)
1469 + }
1470 + }
1471 +
1472 +- pm_runtime_put(&vc4_hdmi->pdev->dev);
1473 + return connector_status_connected;
1474 + }
1475 +
1476 + cec_phys_addr_invalidate(vc4_hdmi->cec_adap);
1477 +- pm_runtime_put(&vc4_hdmi->pdev->dev);
1478 + return connector_status_disconnected;
1479 + }
1480 +
1481 +@@ -635,6 +631,7 @@ static void vc4_hdmi_encoder_post_crtc_powerdown(struct drm_encoder *encoder,
1482 + vc4_hdmi->variant->phy_disable(vc4_hdmi);
1483 +
1484 + clk_disable_unprepare(vc4_hdmi->pixel_bvb_clock);
1485 ++ clk_disable_unprepare(vc4_hdmi->hsm_clock);
1486 + clk_disable_unprepare(vc4_hdmi->pixel_clock);
1487 +
1488 + ret = pm_runtime_put(&vc4_hdmi->pdev->dev);
1489 +@@ -945,6 +942,13 @@ static void vc4_hdmi_encoder_pre_crtc_configure(struct drm_encoder *encoder,
1490 + return;
1491 + }
1492 +
1493 ++ ret = clk_prepare_enable(vc4_hdmi->hsm_clock);
1494 ++ if (ret) {
1495 ++ DRM_ERROR("Failed to turn on HSM clock: %d\n", ret);
1496 ++ clk_disable_unprepare(vc4_hdmi->pixel_clock);
1497 ++ return;
1498 ++ }
1499 ++
1500 + vc4_hdmi_cec_update_clk_div(vc4_hdmi);
1501 +
1502 + if (pixel_rate > 297000000)
1503 +@@ -957,6 +961,7 @@ static void vc4_hdmi_encoder_pre_crtc_configure(struct drm_encoder *encoder,
1504 + ret = clk_set_min_rate(vc4_hdmi->pixel_bvb_clock, bvb_rate);
1505 + if (ret) {
1506 + DRM_ERROR("Failed to set pixel bvb clock rate: %d\n", ret);
1507 ++ clk_disable_unprepare(vc4_hdmi->hsm_clock);
1508 + clk_disable_unprepare(vc4_hdmi->pixel_clock);
1509 + return;
1510 + }
1511 +@@ -964,6 +969,7 @@ static void vc4_hdmi_encoder_pre_crtc_configure(struct drm_encoder *encoder,
1512 + ret = clk_prepare_enable(vc4_hdmi->pixel_bvb_clock);
1513 + if (ret) {
1514 + DRM_ERROR("Failed to turn on pixel bvb clock: %d\n", ret);
1515 ++ clk_disable_unprepare(vc4_hdmi->hsm_clock);
1516 + clk_disable_unprepare(vc4_hdmi->pixel_clock);
1517 + return;
1518 + }
1519 +@@ -2110,29 +2116,6 @@ static int vc5_hdmi_init_resources(struct vc4_hdmi *vc4_hdmi)
1520 + return 0;
1521 + }
1522 +
1523 +-#ifdef CONFIG_PM
1524 +-static int vc4_hdmi_runtime_suspend(struct device *dev)
1525 +-{
1526 +- struct vc4_hdmi *vc4_hdmi = dev_get_drvdata(dev);
1527 +-
1528 +- clk_disable_unprepare(vc4_hdmi->hsm_clock);
1529 +-
1530 +- return 0;
1531 +-}
1532 +-
1533 +-static int vc4_hdmi_runtime_resume(struct device *dev)
1534 +-{
1535 +- struct vc4_hdmi *vc4_hdmi = dev_get_drvdata(dev);
1536 +- int ret;
1537 +-
1538 +- ret = clk_prepare_enable(vc4_hdmi->hsm_clock);
1539 +- if (ret)
1540 +- return ret;
1541 +-
1542 +- return 0;
1543 +-}
1544 +-#endif
1545 +-
1546 + static int vc4_hdmi_bind(struct device *dev, struct device *master, void *data)
1547 + {
1548 + const struct vc4_hdmi_variant *variant = of_device_get_match_data(dev);
1549 +@@ -2380,18 +2363,11 @@ static const struct of_device_id vc4_hdmi_dt_match[] = {
1550 + {}
1551 + };
1552 +
1553 +-static const struct dev_pm_ops vc4_hdmi_pm_ops = {
1554 +- SET_RUNTIME_PM_OPS(vc4_hdmi_runtime_suspend,
1555 +- vc4_hdmi_runtime_resume,
1556 +- NULL)
1557 +-};
1558 +-
1559 + struct platform_driver vc4_hdmi_driver = {
1560 + .probe = vc4_hdmi_dev_probe,
1561 + .remove = vc4_hdmi_dev_remove,
1562 + .driver = {
1563 + .name = "vc4_hdmi",
1564 + .of_match_table = vc4_hdmi_dt_match,
1565 +- .pm = &vc4_hdmi_pm_ops,
1566 + },
1567 + };
1568 +diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig
1569 +index 4d5924e9f7666..aca7b595c4c78 100644
1570 +--- a/drivers/irqchip/Kconfig
1571 ++++ b/drivers/irqchip/Kconfig
1572 +@@ -409,6 +409,7 @@ config MESON_IRQ_GPIO
1573 + config GOLDFISH_PIC
1574 + bool "Goldfish programmable interrupt controller"
1575 + depends on MIPS && (GOLDFISH || COMPILE_TEST)
1576 ++ select GENERIC_IRQ_CHIP
1577 + select IRQ_DOMAIN
1578 + help
1579 + Say yes here to enable Goldfish interrupt controller driver used
1580 +diff --git a/drivers/irqchip/irq-armada-370-xp.c b/drivers/irqchip/irq-armada-370-xp.c
1581 +index 7557ab5512953..53e0fb0562c11 100644
1582 +--- a/drivers/irqchip/irq-armada-370-xp.c
1583 ++++ b/drivers/irqchip/irq-armada-370-xp.c
1584 +@@ -359,16 +359,16 @@ static void armada_370_xp_ipi_send_mask(struct irq_data *d,
1585 + ARMADA_370_XP_SW_TRIG_INT_OFFS);
1586 + }
1587 +
1588 +-static void armada_370_xp_ipi_eoi(struct irq_data *d)
1589 ++static void armada_370_xp_ipi_ack(struct irq_data *d)
1590 + {
1591 + writel(~BIT(d->hwirq), per_cpu_int_base + ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS);
1592 + }
1593 +
1594 + static struct irq_chip ipi_irqchip = {
1595 + .name = "IPI",
1596 ++ .irq_ack = armada_370_xp_ipi_ack,
1597 + .irq_mask = armada_370_xp_ipi_mask,
1598 + .irq_unmask = armada_370_xp_ipi_unmask,
1599 +- .irq_eoi = armada_370_xp_ipi_eoi,
1600 + .ipi_send_mask = armada_370_xp_ipi_send_mask,
1601 + };
1602 +
1603 +diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
1604 +index ba39668c3e085..51584f4cccf46 100644
1605 +--- a/drivers/irqchip/irq-gic-v3-its.c
1606 ++++ b/drivers/irqchip/irq-gic-v3-its.c
1607 +@@ -4501,7 +4501,7 @@ static int its_vpe_irq_domain_alloc(struct irq_domain *domain, unsigned int virq
1608 +
1609 + if (err) {
1610 + if (i > 0)
1611 +- its_vpe_irq_domain_free(domain, virq, i - 1);
1612 ++ its_vpe_irq_domain_free(domain, virq, i);
1613 +
1614 + its_lpi_free(bitmap, base, nr_ids);
1615 + its_free_prop_table(vprop_page);
1616 +diff --git a/drivers/mcb/mcb-core.c b/drivers/mcb/mcb-core.c
1617 +index 38fbb3b598731..38cc8340e817d 100644
1618 +--- a/drivers/mcb/mcb-core.c
1619 ++++ b/drivers/mcb/mcb-core.c
1620 +@@ -277,8 +277,8 @@ struct mcb_bus *mcb_alloc_bus(struct device *carrier)
1621 +
1622 + bus_nr = ida_simple_get(&mcb_ida, 0, 0, GFP_KERNEL);
1623 + if (bus_nr < 0) {
1624 +- rc = bus_nr;
1625 +- goto err_free;
1626 ++ kfree(bus);
1627 ++ return ERR_PTR(bus_nr);
1628 + }
1629 +
1630 + bus->bus_nr = bus_nr;
1631 +@@ -293,12 +293,12 @@ struct mcb_bus *mcb_alloc_bus(struct device *carrier)
1632 + dev_set_name(&bus->dev, "mcb:%d", bus_nr);
1633 + rc = device_add(&bus->dev);
1634 + if (rc)
1635 +- goto err_free;
1636 ++ goto err_put;
1637 +
1638 + return bus;
1639 +-err_free:
1640 +- put_device(carrier);
1641 +- kfree(bus);
1642 ++
1643 ++err_put:
1644 ++ put_device(&bus->dev);
1645 + return ERR_PTR(rc);
1646 + }
1647 + EXPORT_SYMBOL_NS_GPL(mcb_alloc_bus, MCB);
1648 +diff --git a/drivers/md/md.c b/drivers/md/md.c
1649 +index ae8fe54ea3581..6c0c3d0d905aa 100644
1650 +--- a/drivers/md/md.c
1651 ++++ b/drivers/md/md.c
1652 +@@ -5700,10 +5700,6 @@ static int md_alloc(dev_t dev, char *name)
1653 + disk->flags |= GENHD_FL_EXT_DEVT;
1654 + disk->events |= DISK_EVENT_MEDIA_CHANGE;
1655 + mddev->gendisk = disk;
1656 +- /* As soon as we call add_disk(), another thread could get
1657 +- * through to md_open, so make sure it doesn't get too far
1658 +- */
1659 +- mutex_lock(&mddev->open_mutex);
1660 + add_disk(disk);
1661 +
1662 + error = kobject_add(&mddev->kobj, &disk_to_dev(disk)->kobj, "%s", "md");
1663 +@@ -5718,7 +5714,6 @@ static int md_alloc(dev_t dev, char *name)
1664 + if (mddev->kobj.sd &&
1665 + sysfs_create_group(&mddev->kobj, &md_bitmap_group))
1666 + pr_debug("pointless warning\n");
1667 +- mutex_unlock(&mddev->open_mutex);
1668 + abort:
1669 + mutex_unlock(&disks_mutex);
1670 + if (!error && mddev->kobj.sd) {
1671 +diff --git a/drivers/misc/bcm-vk/bcm_vk_tty.c b/drivers/misc/bcm-vk/bcm_vk_tty.c
1672 +index dae9eeed84a2b..89edc936b544b 100644
1673 +--- a/drivers/misc/bcm-vk/bcm_vk_tty.c
1674 ++++ b/drivers/misc/bcm-vk/bcm_vk_tty.c
1675 +@@ -267,13 +267,13 @@ int bcm_vk_tty_init(struct bcm_vk *vk, char *name)
1676 + struct device *tty_dev;
1677 +
1678 + tty_port_init(&vk->tty[i].port);
1679 +- tty_dev = tty_port_register_device(&vk->tty[i].port, tty_drv,
1680 +- i, dev);
1681 ++ tty_dev = tty_port_register_device_attr(&vk->tty[i].port,
1682 ++ tty_drv, i, dev, vk,
1683 ++ NULL);
1684 + if (IS_ERR(tty_dev)) {
1685 + err = PTR_ERR(tty_dev);
1686 + goto unwind;
1687 + }
1688 +- dev_set_drvdata(tty_dev, vk);
1689 + vk->tty[i].is_opened = false;
1690 + }
1691 +
1692 +diff --git a/drivers/misc/genwqe/card_base.c b/drivers/misc/genwqe/card_base.c
1693 +index 2e1befbd1ad99..693981891870c 100644
1694 +--- a/drivers/misc/genwqe/card_base.c
1695 ++++ b/drivers/misc/genwqe/card_base.c
1696 +@@ -1090,7 +1090,7 @@ static int genwqe_pci_setup(struct genwqe_dev *cd)
1697 +
1698 + /* check for 64-bit DMA address supported (DAC) */
1699 + /* check for 32-bit DMA address supported (SAC) */
1700 +- if (dma_set_mask_and_coherent(&pci_dev->dev, DMA_BIT_MASK(64)) ||
1701 ++ if (dma_set_mask_and_coherent(&pci_dev->dev, DMA_BIT_MASK(64)) &&
1702 + dma_set_mask_and_coherent(&pci_dev->dev, DMA_BIT_MASK(32))) {
1703 + dev_err(&pci_dev->dev,
1704 + "err: neither DMA32 nor DMA64 supported\n");
1705 +diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
1706 +index 111a6d5985da6..1c122a1f2f97d 100644
1707 +--- a/drivers/net/dsa/mv88e6xxx/chip.c
1708 ++++ b/drivers/net/dsa/mv88e6xxx/chip.c
1709 +@@ -3012,7 +3012,7 @@ static void mv88e6xxx_teardown(struct dsa_switch *ds)
1710 + {
1711 + mv88e6xxx_teardown_devlink_params(ds);
1712 + dsa_devlink_resources_unregister(ds);
1713 +- mv88e6xxx_teardown_devlink_regions(ds);
1714 ++ mv88e6xxx_teardown_devlink_regions_global(ds);
1715 + }
1716 +
1717 + static int mv88e6xxx_setup(struct dsa_switch *ds)
1718 +@@ -3147,7 +3147,7 @@ unlock:
1719 + if (err)
1720 + goto out_resources;
1721 +
1722 +- err = mv88e6xxx_setup_devlink_regions(ds);
1723 ++ err = mv88e6xxx_setup_devlink_regions_global(ds);
1724 + if (err)
1725 + goto out_params;
1726 +
1727 +@@ -3161,6 +3161,16 @@ out_resources:
1728 + return err;
1729 + }
1730 +
1731 ++static int mv88e6xxx_port_setup(struct dsa_switch *ds, int port)
1732 ++{
1733 ++ return mv88e6xxx_setup_devlink_regions_port(ds, port);
1734 ++}
1735 ++
1736 ++static void mv88e6xxx_port_teardown(struct dsa_switch *ds, int port)
1737 ++{
1738 ++ mv88e6xxx_teardown_devlink_regions_port(ds, port);
1739 ++}
1740 ++
1741 + /* prod_id for switch families which do not have a PHY model number */
1742 + static const u16 family_prod_id_table[] = {
1743 + [MV88E6XXX_FAMILY_6341] = MV88E6XXX_PORT_SWITCH_ID_PROD_6341,
1744 +@@ -6055,6 +6065,8 @@ static const struct dsa_switch_ops mv88e6xxx_switch_ops = {
1745 + .change_tag_protocol = mv88e6xxx_change_tag_protocol,
1746 + .setup = mv88e6xxx_setup,
1747 + .teardown = mv88e6xxx_teardown,
1748 ++ .port_setup = mv88e6xxx_port_setup,
1749 ++ .port_teardown = mv88e6xxx_port_teardown,
1750 + .phylink_validate = mv88e6xxx_validate,
1751 + .phylink_mac_link_state = mv88e6xxx_serdes_pcs_get_state,
1752 + .phylink_mac_config = mv88e6xxx_mac_config,
1753 +diff --git a/drivers/net/dsa/mv88e6xxx/devlink.c b/drivers/net/dsa/mv88e6xxx/devlink.c
1754 +index 0c0f5ea6680c3..381068395c63b 100644
1755 +--- a/drivers/net/dsa/mv88e6xxx/devlink.c
1756 ++++ b/drivers/net/dsa/mv88e6xxx/devlink.c
1757 +@@ -647,26 +647,25 @@ static struct mv88e6xxx_region mv88e6xxx_regions[] = {
1758 + },
1759 + };
1760 +
1761 +-static void
1762 +-mv88e6xxx_teardown_devlink_regions_global(struct mv88e6xxx_chip *chip)
1763 ++void mv88e6xxx_teardown_devlink_regions_global(struct dsa_switch *ds)
1764 + {
1765 ++ struct mv88e6xxx_chip *chip = ds->priv;
1766 + int i;
1767 +
1768 + for (i = 0; i < ARRAY_SIZE(mv88e6xxx_regions); i++)
1769 + dsa_devlink_region_destroy(chip->regions[i]);
1770 + }
1771 +
1772 +-static void
1773 +-mv88e6xxx_teardown_devlink_regions_port(struct mv88e6xxx_chip *chip,
1774 +- int port)
1775 ++void mv88e6xxx_teardown_devlink_regions_port(struct dsa_switch *ds, int port)
1776 + {
1777 ++ struct mv88e6xxx_chip *chip = ds->priv;
1778 ++
1779 + dsa_devlink_region_destroy(chip->ports[port].region);
1780 + }
1781 +
1782 +-static int mv88e6xxx_setup_devlink_regions_port(struct dsa_switch *ds,
1783 +- struct mv88e6xxx_chip *chip,
1784 +- int port)
1785 ++int mv88e6xxx_setup_devlink_regions_port(struct dsa_switch *ds, int port)
1786 + {
1787 ++ struct mv88e6xxx_chip *chip = ds->priv;
1788 + struct devlink_region *region;
1789 +
1790 + region = dsa_devlink_port_region_create(ds,
1791 +@@ -681,40 +680,10 @@ static int mv88e6xxx_setup_devlink_regions_port(struct dsa_switch *ds,
1792 + return 0;
1793 + }
1794 +
1795 +-static void
1796 +-mv88e6xxx_teardown_devlink_regions_ports(struct mv88e6xxx_chip *chip)
1797 +-{
1798 +- int port;
1799 +-
1800 +- for (port = 0; port < mv88e6xxx_num_ports(chip); port++)
1801 +- mv88e6xxx_teardown_devlink_regions_port(chip, port);
1802 +-}
1803 +-
1804 +-static int mv88e6xxx_setup_devlink_regions_ports(struct dsa_switch *ds,
1805 +- struct mv88e6xxx_chip *chip)
1806 +-{
1807 +- int port;
1808 +- int err;
1809 +-
1810 +- for (port = 0; port < mv88e6xxx_num_ports(chip); port++) {
1811 +- err = mv88e6xxx_setup_devlink_regions_port(ds, chip, port);
1812 +- if (err)
1813 +- goto out;
1814 +- }
1815 +-
1816 +- return 0;
1817 +-
1818 +-out:
1819 +- while (port-- > 0)
1820 +- mv88e6xxx_teardown_devlink_regions_port(chip, port);
1821 +-
1822 +- return err;
1823 +-}
1824 +-
1825 +-static int mv88e6xxx_setup_devlink_regions_global(struct dsa_switch *ds,
1826 +- struct mv88e6xxx_chip *chip)
1827 ++int mv88e6xxx_setup_devlink_regions_global(struct dsa_switch *ds)
1828 + {
1829 + bool (*cond)(struct mv88e6xxx_chip *chip);
1830 ++ struct mv88e6xxx_chip *chip = ds->priv;
1831 + struct devlink_region_ops *ops;
1832 + struct devlink_region *region;
1833 + u64 size;
1834 +@@ -753,30 +722,6 @@ out:
1835 + return PTR_ERR(region);
1836 + }
1837 +
1838 +-int mv88e6xxx_setup_devlink_regions(struct dsa_switch *ds)
1839 +-{
1840 +- struct mv88e6xxx_chip *chip = ds->priv;
1841 +- int err;
1842 +-
1843 +- err = mv88e6xxx_setup_devlink_regions_global(ds, chip);
1844 +- if (err)
1845 +- return err;
1846 +-
1847 +- err = mv88e6xxx_setup_devlink_regions_ports(ds, chip);
1848 +- if (err)
1849 +- mv88e6xxx_teardown_devlink_regions_global(chip);
1850 +-
1851 +- return err;
1852 +-}
1853 +-
1854 +-void mv88e6xxx_teardown_devlink_regions(struct dsa_switch *ds)
1855 +-{
1856 +- struct mv88e6xxx_chip *chip = ds->priv;
1857 +-
1858 +- mv88e6xxx_teardown_devlink_regions_ports(chip);
1859 +- mv88e6xxx_teardown_devlink_regions_global(chip);
1860 +-}
1861 +-
1862 + int mv88e6xxx_devlink_info_get(struct dsa_switch *ds,
1863 + struct devlink_info_req *req,
1864 + struct netlink_ext_ack *extack)
1865 +diff --git a/drivers/net/dsa/mv88e6xxx/devlink.h b/drivers/net/dsa/mv88e6xxx/devlink.h
1866 +index 3d72db3dcf950..65ce6a6858b9f 100644
1867 +--- a/drivers/net/dsa/mv88e6xxx/devlink.h
1868 ++++ b/drivers/net/dsa/mv88e6xxx/devlink.h
1869 +@@ -12,8 +12,10 @@ int mv88e6xxx_devlink_param_get(struct dsa_switch *ds, u32 id,
1870 + struct devlink_param_gset_ctx *ctx);
1871 + int mv88e6xxx_devlink_param_set(struct dsa_switch *ds, u32 id,
1872 + struct devlink_param_gset_ctx *ctx);
1873 +-int mv88e6xxx_setup_devlink_regions(struct dsa_switch *ds);
1874 +-void mv88e6xxx_teardown_devlink_regions(struct dsa_switch *ds);
1875 ++int mv88e6xxx_setup_devlink_regions_global(struct dsa_switch *ds);
1876 ++void mv88e6xxx_teardown_devlink_regions_global(struct dsa_switch *ds);
1877 ++int mv88e6xxx_setup_devlink_regions_port(struct dsa_switch *ds, int port);
1878 ++void mv88e6xxx_teardown_devlink_regions_port(struct dsa_switch *ds, int port);
1879 +
1880 + int mv88e6xxx_devlink_info_get(struct dsa_switch *ds,
1881 + struct devlink_info_req *req,
1882 +diff --git a/drivers/net/dsa/realtek-smi-core.c b/drivers/net/dsa/realtek-smi-core.c
1883 +index 8e49d4f85d48c..6bf46d76c0281 100644
1884 +--- a/drivers/net/dsa/realtek-smi-core.c
1885 ++++ b/drivers/net/dsa/realtek-smi-core.c
1886 +@@ -368,7 +368,7 @@ int realtek_smi_setup_mdio(struct realtek_smi *smi)
1887 + smi->slave_mii_bus->parent = smi->dev;
1888 + smi->ds->slave_mii_bus = smi->slave_mii_bus;
1889 +
1890 +- ret = of_mdiobus_register(smi->slave_mii_bus, mdio_np);
1891 ++ ret = devm_of_mdiobus_register(smi->dev, smi->slave_mii_bus, mdio_np);
1892 + if (ret) {
1893 + dev_err(smi->dev, "unable to register MDIO bus %s\n",
1894 + smi->slave_mii_bus->id);
1895 +diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
1896 +index f26d037356191..5b996330f228b 100644
1897 +--- a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
1898 ++++ b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
1899 +@@ -419,13 +419,13 @@ static int atl_resume_common(struct device *dev, bool deep)
1900 + if (deep) {
1901 + /* Reinitialize Nic/Vecs objects */
1902 + aq_nic_deinit(nic, !nic->aq_hw->aq_nic_cfg->wol);
1903 ++ }
1904 +
1905 ++ if (netif_running(nic->ndev)) {
1906 + ret = aq_nic_init(nic);
1907 + if (ret)
1908 + goto err_exit;
1909 +- }
1910 +
1911 +- if (netif_running(nic->ndev)) {
1912 + ret = aq_nic_start(nic);
1913 + if (ret)
1914 + goto err_exit;
1915 +diff --git a/drivers/net/ethernet/broadcom/bgmac-bcma.c b/drivers/net/ethernet/broadcom/bgmac-bcma.c
1916 +index 85fa0ab7201c7..9513cfb5ba58c 100644
1917 +--- a/drivers/net/ethernet/broadcom/bgmac-bcma.c
1918 ++++ b/drivers/net/ethernet/broadcom/bgmac-bcma.c
1919 +@@ -129,6 +129,8 @@ static int bgmac_probe(struct bcma_device *core)
1920 + bcma_set_drvdata(core, bgmac);
1921 +
1922 + err = of_get_mac_address(bgmac->dev->of_node, bgmac->net_dev->dev_addr);
1923 ++ if (err == -EPROBE_DEFER)
1924 ++ return err;
1925 +
1926 + /* If no MAC address assigned via device tree, check SPROM */
1927 + if (err) {
1928 +diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
1929 +index fdbf47446a997..f20b57b8cd70e 100644
1930 +--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
1931 ++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
1932 +@@ -385,7 +385,7 @@ static bool bnxt_txr_netif_try_stop_queue(struct bnxt *bp,
1933 + * netif_tx_queue_stopped().
1934 + */
1935 + smp_mb();
1936 +- if (bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh) {
1937 ++ if (bnxt_tx_avail(bp, txr) >= bp->tx_wake_thresh) {
1938 + netif_tx_wake_queue(txq);
1939 + return false;
1940 + }
1941 +@@ -758,7 +758,7 @@ next_tx_int:
1942 + smp_mb();
1943 +
1944 + if (unlikely(netif_tx_queue_stopped(txq)) &&
1945 +- bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh &&
1946 ++ bnxt_tx_avail(bp, txr) >= bp->tx_wake_thresh &&
1947 + READ_ONCE(txr->dev_state) != BNXT_DEV_STATE_CLOSING)
1948 + netif_tx_wake_queue(txq);
1949 + }
1950 +@@ -2375,7 +2375,7 @@ static int __bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1951 + if (TX_CMP_TYPE(txcmp) == CMP_TYPE_TX_L2_CMP) {
1952 + tx_pkts++;
1953 + /* return full budget so NAPI will complete. */
1954 +- if (unlikely(tx_pkts > bp->tx_wake_thresh)) {
1955 ++ if (unlikely(tx_pkts >= bp->tx_wake_thresh)) {
1956 + rx_pkts = budget;
1957 + raw_cons = NEXT_RAW_CMP(raw_cons);
1958 + if (budget)
1959 +@@ -3531,7 +3531,7 @@ static int bnxt_init_tx_rings(struct bnxt *bp)
1960 + u16 i;
1961 +
1962 + bp->tx_wake_thresh = max_t(int, bp->tx_ring_size / 2,
1963 +- MAX_SKB_FRAGS + 1);
1964 ++ BNXT_MIN_TX_DESC_CNT);
1965 +
1966 + for (i = 0; i < bp->tx_nr_rings; i++) {
1967 + struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
1968 +diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
1969 +index ba4e0fc38520c..d4dca4508d268 100644
1970 +--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
1971 ++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
1972 +@@ -615,6 +615,11 @@ struct nqe_cn {
1973 + #define BNXT_MAX_RX_JUM_DESC_CNT (RX_DESC_CNT * MAX_RX_AGG_PAGES - 1)
1974 + #define BNXT_MAX_TX_DESC_CNT (TX_DESC_CNT * MAX_TX_PAGES - 1)
1975 +
1976 ++/* Minimum TX BDs for a TX packet with MAX_SKB_FRAGS + 1. We need one extra
1977 ++ * BD because the first TX BD is always a long BD.
1978 ++ */
1979 ++#define BNXT_MIN_TX_DESC_CNT (MAX_SKB_FRAGS + 2)
1980 ++
1981 + #define RX_RING(x) (((x) & ~(RX_DESC_CNT - 1)) >> (BNXT_PAGE_SHIFT - 4))
1982 + #define RX_IDX(x) ((x) & (RX_DESC_CNT - 1))
1983 +
1984 +diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
1985 +index 786ca51e669bc..3a8c284635922 100644
1986 +--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
1987 ++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
1988 +@@ -784,7 +784,7 @@ static int bnxt_set_ringparam(struct net_device *dev,
1989 +
1990 + if ((ering->rx_pending > BNXT_MAX_RX_DESC_CNT) ||
1991 + (ering->tx_pending > BNXT_MAX_TX_DESC_CNT) ||
1992 +- (ering->tx_pending <= MAX_SKB_FRAGS))
1993 ++ (ering->tx_pending < BNXT_MIN_TX_DESC_CNT))
1994 + return -EINVAL;
1995 +
1996 + if (netif_running(dev))
1997 +diff --git a/drivers/net/ethernet/cadence/macb_pci.c b/drivers/net/ethernet/cadence/macb_pci.c
1998 +index 8b7b59908a1ab..f66d22de5168d 100644
1999 +--- a/drivers/net/ethernet/cadence/macb_pci.c
2000 ++++ b/drivers/net/ethernet/cadence/macb_pci.c
2001 +@@ -111,9 +111,9 @@ static void macb_remove(struct pci_dev *pdev)
2002 + struct platform_device *plat_dev = pci_get_drvdata(pdev);
2003 + struct macb_platform_data *plat_data = dev_get_platdata(&plat_dev->dev);
2004 +
2005 +- platform_device_unregister(plat_dev);
2006 + clk_unregister(plat_data->pclk);
2007 + clk_unregister(plat_data->hclk);
2008 ++ platform_device_unregister(plat_dev);
2009 + }
2010 +
2011 + static const struct pci_device_id dev_id_table[] = {
2012 +diff --git a/drivers/net/ethernet/freescale/enetc/enetc.c b/drivers/net/ethernet/freescale/enetc/enetc.c
2013 +index 3ca93adb96628..042327b9981fa 100644
2014 +--- a/drivers/net/ethernet/freescale/enetc/enetc.c
2015 ++++ b/drivers/net/ethernet/freescale/enetc/enetc.c
2016 +@@ -419,7 +419,7 @@ static void enetc_rx_dim_work(struct work_struct *w)
2017 +
2018 + static void enetc_rx_net_dim(struct enetc_int_vector *v)
2019 + {
2020 +- struct dim_sample dim_sample;
2021 ++ struct dim_sample dim_sample = {};
2022 +
2023 + v->comp_cnt++;
2024 +
2025 +@@ -1879,7 +1879,6 @@ static void enetc_clear_bdrs(struct enetc_ndev_priv *priv)
2026 + static int enetc_setup_irqs(struct enetc_ndev_priv *priv)
2027 + {
2028 + struct pci_dev *pdev = priv->si->pdev;
2029 +- cpumask_t cpu_mask;
2030 + int i, j, err;
2031 +
2032 + for (i = 0; i < priv->bdr_int_num; i++) {
2033 +@@ -1908,9 +1907,7 @@ static int enetc_setup_irqs(struct enetc_ndev_priv *priv)
2034 +
2035 + enetc_wr(hw, ENETC_SIMSITRV(idx), entry);
2036 + }
2037 +- cpumask_clear(&cpu_mask);
2038 +- cpumask_set_cpu(i % num_online_cpus(), &cpu_mask);
2039 +- irq_set_affinity_hint(irq, &cpu_mask);
2040 ++ irq_set_affinity_hint(irq, get_cpu_mask(i % num_online_cpus()));
2041 + }
2042 +
2043 + return 0;
2044 +diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
2045 +index ec9a7f8bc3fed..2eeafd61a07ee 100644
2046 +--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
2047 ++++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
2048 +@@ -1878,12 +1878,12 @@ static void hclge_handle_over_8bd_err(struct hclge_dev *hdev,
2049 + return;
2050 + }
2051 +
2052 +- dev_err(dev, "PPU_PF_ABNORMAL_INT_ST over_8bd_no_fe found, vf_id(%u), queue_id(%u)\n",
2053 ++ dev_err(dev, "PPU_PF_ABNORMAL_INT_ST over_8bd_no_fe found, vport(%u), queue_id(%u)\n",
2054 + vf_id, q_id);
2055 +
2056 + if (vf_id) {
2057 + if (vf_id >= hdev->num_alloc_vport) {
2058 +- dev_err(dev, "invalid vf id(%u)\n", vf_id);
2059 ++ dev_err(dev, "invalid vport(%u)\n", vf_id);
2060 + return;
2061 + }
2062 +
2063 +@@ -1896,8 +1896,8 @@ static void hclge_handle_over_8bd_err(struct hclge_dev *hdev,
2064 +
2065 + ret = hclge_inform_reset_assert_to_vf(&hdev->vport[vf_id]);
2066 + if (ret)
2067 +- dev_err(dev, "inform reset to vf(%u) failed %d!\n",
2068 +- hdev->vport->vport_id, ret);
2069 ++ dev_err(dev, "inform reset to vport(%u) failed %d!\n",
2070 ++ vf_id, ret);
2071 + } else {
2072 + set_bit(HNAE3_FUNC_RESET, reset_requests);
2073 + }
2074 +diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
2075 +index 72d55c028ac4b..90a72c79fec99 100644
2076 +--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
2077 ++++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
2078 +@@ -3660,7 +3660,8 @@ static int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset)
2079 + if (ret) {
2080 + dev_err(&hdev->pdev->dev,
2081 + "set vf(%u) rst failed %d!\n",
2082 +- vport->vport_id, ret);
2083 ++ vport->vport_id - HCLGE_VF_VPORT_START_NUM,
2084 ++ ret);
2085 + return ret;
2086 + }
2087 +
2088 +@@ -3675,7 +3676,8 @@ static int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset)
2089 + if (ret)
2090 + dev_warn(&hdev->pdev->dev,
2091 + "inform reset to vf(%u) failed %d!\n",
2092 +- vport->vport_id, ret);
2093 ++ vport->vport_id - HCLGE_VF_VPORT_START_NUM,
2094 ++ ret);
2095 + }
2096 +
2097 + return 0;
2098 +@@ -4734,6 +4736,24 @@ static int hclge_get_rss(struct hnae3_handle *handle, u32 *indir,
2099 + return 0;
2100 + }
2101 +
2102 ++static int hclge_parse_rss_hfunc(struct hclge_vport *vport, const u8 hfunc,
2103 ++ u8 *hash_algo)
2104 ++{
2105 ++ switch (hfunc) {
2106 ++ case ETH_RSS_HASH_TOP:
2107 ++ *hash_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
2108 ++ return 0;
2109 ++ case ETH_RSS_HASH_XOR:
2110 ++ *hash_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
2111 ++ return 0;
2112 ++ case ETH_RSS_HASH_NO_CHANGE:
2113 ++ *hash_algo = vport->rss_algo;
2114 ++ return 0;
2115 ++ default:
2116 ++ return -EINVAL;
2117 ++ }
2118 ++}
2119 ++
2120 + static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir,
2121 + const u8 *key, const u8 hfunc)
2122 + {
2123 +@@ -4743,30 +4763,27 @@ static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir,
2124 + u8 hash_algo;
2125 + int ret, i;
2126 +
2127 ++ ret = hclge_parse_rss_hfunc(vport, hfunc, &hash_algo);
2128 ++ if (ret) {
2129 ++ dev_err(&hdev->pdev->dev, "invalid hfunc type %u\n", hfunc);
2130 ++ return ret;
2131 ++ }
2132 ++
2133 + /* Set the RSS Hash Key if specififed by the user */
2134 + if (key) {
2135 +- switch (hfunc) {
2136 +- case ETH_RSS_HASH_TOP:
2137 +- hash_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
2138 +- break;
2139 +- case ETH_RSS_HASH_XOR:
2140 +- hash_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
2141 +- break;
2142 +- case ETH_RSS_HASH_NO_CHANGE:
2143 +- hash_algo = vport->rss_algo;
2144 +- break;
2145 +- default:
2146 +- return -EINVAL;
2147 +- }
2148 +-
2149 + ret = hclge_set_rss_algo_key(hdev, hash_algo, key);
2150 + if (ret)
2151 + return ret;
2152 +
2153 + /* Update the shadow RSS key with user specified qids */
2154 + memcpy(vport->rss_hash_key, key, HCLGE_RSS_KEY_SIZE);
2155 +- vport->rss_algo = hash_algo;
2156 ++ } else {
2157 ++ ret = hclge_set_rss_algo_key(hdev, hash_algo,
2158 ++ vport->rss_hash_key);
2159 ++ if (ret)
2160 ++ return ret;
2161 + }
2162 ++ vport->rss_algo = hash_algo;
2163 +
2164 + /* Update the shadow RSS table with user specified qids */
2165 + for (i = 0; i < ae_dev->dev_specs.rss_ind_tbl_size; i++)
2166 +@@ -6620,10 +6637,13 @@ static int hclge_fd_parse_ring_cookie(struct hclge_dev *hdev, u64 ring_cookie,
2167 + u8 vf = ethtool_get_flow_spec_ring_vf(ring_cookie);
2168 + u16 tqps;
2169 +
2170 ++ /* To keep consistent with user's configuration, minus 1 when
2171 ++ * printing 'vf', because vf id from ethtool is added 1 for vf.
2172 ++ */
2173 + if (vf > hdev->num_req_vfs) {
2174 + dev_err(&hdev->pdev->dev,
2175 +- "Error: vf id (%u) > max vf num (%u)\n",
2176 +- vf, hdev->num_req_vfs);
2177 ++ "Error: vf id (%u) should be less than %u\n",
2178 ++ vf - 1, hdev->num_req_vfs);
2179 + return -EINVAL;
2180 + }
2181 +
2182 +@@ -9790,6 +9810,9 @@ static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto,
2183 + if (is_kill && !vlan_id)
2184 + return 0;
2185 +
2186 ++ if (vlan_id >= VLAN_N_VID)
2187 ++ return -EINVAL;
2188 ++
2189 + ret = hclge_set_vf_vlan_common(hdev, vport_id, is_kill, vlan_id);
2190 + if (ret) {
2191 + dev_err(&hdev->pdev->dev,
2192 +@@ -10696,7 +10719,8 @@ static int hclge_reset_tqp_cmd_send(struct hclge_dev *hdev, u16 queue_id,
2193 + return 0;
2194 + }
2195 +
2196 +-static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id)
2197 ++static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id,
2198 ++ u8 *reset_status)
2199 + {
2200 + struct hclge_reset_tqp_queue_cmd *req;
2201 + struct hclge_desc desc;
2202 +@@ -10714,7 +10738,9 @@ static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id)
2203 + return ret;
2204 + }
2205 +
2206 +- return hnae3_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B);
2207 ++ *reset_status = hnae3_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B);
2208 ++
2209 ++ return 0;
2210 + }
2211 +
2212 + u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, u16 queue_id)
2213 +@@ -10733,7 +10759,7 @@ static int hclge_reset_tqp_cmd(struct hnae3_handle *handle)
2214 + struct hclge_vport *vport = hclge_get_vport(handle);
2215 + struct hclge_dev *hdev = vport->back;
2216 + u16 reset_try_times = 0;
2217 +- int reset_status;
2218 ++ u8 reset_status;
2219 + u16 queue_gid;
2220 + int ret;
2221 + u16 i;
2222 +@@ -10749,7 +10775,11 @@ static int hclge_reset_tqp_cmd(struct hnae3_handle *handle)
2223 + }
2224 +
2225 + while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
2226 +- reset_status = hclge_get_reset_status(hdev, queue_gid);
2227 ++ ret = hclge_get_reset_status(hdev, queue_gid,
2228 ++ &reset_status);
2229 ++ if (ret)
2230 ++ return ret;
2231 ++
2232 + if (reset_status)
2233 + break;
2234 +
2235 +@@ -11442,11 +11472,11 @@ static void hclge_clear_resetting_state(struct hclge_dev *hdev)
2236 + struct hclge_vport *vport = &hdev->vport[i];
2237 + int ret;
2238 +
2239 +- /* Send cmd to clear VF's FUNC_RST_ING */
2240 ++ /* Send cmd to clear vport's FUNC_RST_ING */
2241 + ret = hclge_set_vf_rst(hdev, vport->vport_id, false);
2242 + if (ret)
2243 + dev_warn(&hdev->pdev->dev,
2244 +- "clear vf(%u) rst failed %d!\n",
2245 ++ "clear vport(%u) rst failed %d!\n",
2246 + vport->vport_id, ret);
2247 + }
2248 + }
2249 +diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
2250 +index 0dbed35645eda..c1a4b79a70504 100644
2251 +--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
2252 ++++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
2253 +@@ -564,7 +564,7 @@ static int hclge_reset_vf(struct hclge_vport *vport)
2254 + struct hclge_dev *hdev = vport->back;
2255 +
2256 + dev_warn(&hdev->pdev->dev, "PF received VF reset request from VF %u!",
2257 +- vport->vport_id);
2258 ++ vport->vport_id - HCLGE_VF_VPORT_START_NUM);
2259 +
2260 + return hclge_func_reset_cmd(hdev, vport->vport_id);
2261 + }
2262 +@@ -588,9 +588,17 @@ static void hclge_get_queue_id_in_pf(struct hclge_vport *vport,
2263 + struct hclge_mbx_vf_to_pf_cmd *mbx_req,
2264 + struct hclge_respond_to_vf_msg *resp_msg)
2265 + {
2266 ++ struct hnae3_handle *handle = &vport->nic;
2267 ++ struct hclge_dev *hdev = vport->back;
2268 + u16 queue_id, qid_in_pf;
2269 +
2270 + memcpy(&queue_id, mbx_req->msg.data, sizeof(queue_id));
2271 ++ if (queue_id >= handle->kinfo.num_tqps) {
2272 ++ dev_err(&hdev->pdev->dev, "Invalid queue id(%u) from VF %u\n",
2273 ++ queue_id, mbx_req->mbx_src_vfid);
2274 ++ return;
2275 ++ }
2276 ++
2277 + qid_in_pf = hclge_covert_handle_qid_global(&vport->nic, queue_id);
2278 + memcpy(resp_msg->data, &qid_in_pf, sizeof(qid_in_pf));
2279 + resp_msg->len = sizeof(qid_in_pf);
2280 +diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
2281 +index 78d5bf1ea5610..44618cc4cca10 100644
2282 +--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
2283 ++++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
2284 +@@ -581,7 +581,7 @@ int hclge_tm_qs_shaper_cfg(struct hclge_vport *vport, int max_tx_rate)
2285 + ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2286 + if (ret) {
2287 + dev_err(&hdev->pdev->dev,
2288 +- "vf%u, qs%u failed to set tx_rate:%d, ret=%d\n",
2289 ++ "vport%u, qs%u failed to set tx_rate:%d, ret=%d\n",
2290 + vport->vport_id, shap_cfg_cmd->qs_id,
2291 + max_tx_rate, ret);
2292 + return ret;
2293 +diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
2294 +index be3ea7023ed8c..22cf66004dfa2 100644
2295 +--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
2296 ++++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
2297 +@@ -814,40 +814,56 @@ static int hclgevf_get_rss(struct hnae3_handle *handle, u32 *indir, u8 *key,
2298 + return 0;
2299 + }
2300 +
2301 ++static int hclgevf_parse_rss_hfunc(struct hclgevf_dev *hdev, const u8 hfunc,
2302 ++ u8 *hash_algo)
2303 ++{
2304 ++ switch (hfunc) {
2305 ++ case ETH_RSS_HASH_TOP:
2306 ++ *hash_algo = HCLGEVF_RSS_HASH_ALGO_TOEPLITZ;
2307 ++ return 0;
2308 ++ case ETH_RSS_HASH_XOR:
2309 ++ *hash_algo = HCLGEVF_RSS_HASH_ALGO_SIMPLE;
2310 ++ return 0;
2311 ++ case ETH_RSS_HASH_NO_CHANGE:
2312 ++ *hash_algo = hdev->rss_cfg.hash_algo;
2313 ++ return 0;
2314 ++ default:
2315 ++ return -EINVAL;
2316 ++ }
2317 ++}
2318 ++
2319 + static int hclgevf_set_rss(struct hnae3_handle *handle, const u32 *indir,
2320 + const u8 *key, const u8 hfunc)
2321 + {
2322 + struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
2323 + struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg;
2324 ++ u8 hash_algo;
2325 + int ret, i;
2326 +
2327 + if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
2328 ++ ret = hclgevf_parse_rss_hfunc(hdev, hfunc, &hash_algo);
2329 ++ if (ret)
2330 ++ return ret;
2331 ++
2332 + /* Set the RSS Hash Key if specififed by the user */
2333 + if (key) {
2334 +- switch (hfunc) {
2335 +- case ETH_RSS_HASH_TOP:
2336 +- rss_cfg->hash_algo =
2337 +- HCLGEVF_RSS_HASH_ALGO_TOEPLITZ;
2338 +- break;
2339 +- case ETH_RSS_HASH_XOR:
2340 +- rss_cfg->hash_algo =
2341 +- HCLGEVF_RSS_HASH_ALGO_SIMPLE;
2342 +- break;
2343 +- case ETH_RSS_HASH_NO_CHANGE:
2344 +- break;
2345 +- default:
2346 +- return -EINVAL;
2347 +- }
2348 +-
2349 +- ret = hclgevf_set_rss_algo_key(hdev, rss_cfg->hash_algo,
2350 +- key);
2351 +- if (ret)
2352 ++ ret = hclgevf_set_rss_algo_key(hdev, hash_algo, key);
2353 ++ if (ret) {
2354 ++ dev_err(&hdev->pdev->dev,
2355 ++ "invalid hfunc type %u\n", hfunc);
2356 + return ret;
2357 ++ }
2358 +
2359 + /* Update the shadow RSS key with user specified qids */
2360 + memcpy(rss_cfg->rss_hash_key, key,
2361 + HCLGEVF_RSS_KEY_SIZE);
2362 ++ } else {
2363 ++ ret = hclgevf_set_rss_algo_key(hdev, hash_algo,
2364 ++ rss_cfg->rss_hash_key);
2365 ++ if (ret)
2366 ++ return ret;
2367 + }
2368 ++ rss_cfg->hash_algo = hash_algo;
2369 + }
2370 +
2371 + /* update the shadow RSS table with user specified qids */
2372 +diff --git a/drivers/net/ethernet/i825xx/82596.c b/drivers/net/ethernet/i825xx/82596.c
2373 +index fc8c7cd674712..8b12a5ab3818c 100644
2374 +--- a/drivers/net/ethernet/i825xx/82596.c
2375 ++++ b/drivers/net/ethernet/i825xx/82596.c
2376 +@@ -1155,7 +1155,7 @@ struct net_device * __init i82596_probe(int unit)
2377 + err = -ENODEV;
2378 + goto out;
2379 + }
2380 +- memcpy(eth_addr, (void *) 0xfffc1f2c, ETH_ALEN); /* YUCK! Get addr from NOVRAM */
2381 ++ memcpy(eth_addr, absolute_pointer(0xfffc1f2c), ETH_ALEN); /* YUCK! Get addr from NOVRAM */
2382 + dev->base_addr = MVME_I596_BASE;
2383 + dev->irq = (unsigned) MVME16x_IRQ_I596;
2384 + goto found;
2385 +diff --git a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
2386 +index b5f68f66d42a8..7bb1f20002b58 100644
2387 +--- a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
2388 ++++ b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
2389 +@@ -186,6 +186,9 @@ mtk_flow_offload_replace(struct mtk_eth *eth, struct flow_cls_offload *f)
2390 + int hash;
2391 + int i;
2392 +
2393 ++ if (rhashtable_lookup(&eth->flow_table, &f->cookie, mtk_flow_ht_params))
2394 ++ return -EEXIST;
2395 ++
2396 + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_META)) {
2397 + struct flow_match_meta match;
2398 +
2399 +diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
2400 +index 5d0c9c62382dc..1e672bc36c4dc 100644
2401 +--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
2402 ++++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
2403 +@@ -372,6 +372,9 @@ mlx4_en_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
2404 + int nhoff = skb_network_offset(skb);
2405 + int ret = 0;
2406 +
2407 ++ if (skb->encapsulation)
2408 ++ return -EPROTONOSUPPORT;
2409 ++
2410 + if (skb->protocol != htons(ETH_P_IP))
2411 + return -EPROTONOSUPPORT;
2412 +
2413 +diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c
2414 +index 2948d731a1c1c..512dff9551669 100644
2415 +--- a/drivers/net/ethernet/mscc/ocelot.c
2416 ++++ b/drivers/net/ethernet/mscc/ocelot.c
2417 +@@ -1260,14 +1260,19 @@ static u32 ocelot_get_bond_mask(struct ocelot *ocelot, struct net_device *bond,
2418 + return mask;
2419 + }
2420 +
2421 +-static u32 ocelot_get_bridge_fwd_mask(struct ocelot *ocelot,
2422 ++static u32 ocelot_get_bridge_fwd_mask(struct ocelot *ocelot, int src_port,
2423 + struct net_device *bridge)
2424 + {
2425 ++ struct ocelot_port *ocelot_port = ocelot->ports[src_port];
2426 + u32 mask = 0;
2427 + int port;
2428 +
2429 ++ if (!ocelot_port || ocelot_port->bridge != bridge ||
2430 ++ ocelot_port->stp_state != BR_STATE_FORWARDING)
2431 ++ return 0;
2432 ++
2433 + for (port = 0; port < ocelot->num_phys_ports; port++) {
2434 +- struct ocelot_port *ocelot_port = ocelot->ports[port];
2435 ++ ocelot_port = ocelot->ports[port];
2436 +
2437 + if (!ocelot_port)
2438 + continue;
2439 +@@ -1333,7 +1338,7 @@ void ocelot_apply_bridge_fwd_mask(struct ocelot *ocelot)
2440 + struct net_device *bridge = ocelot_port->bridge;
2441 + struct net_device *bond = ocelot_port->bond;
2442 +
2443 +- mask = ocelot_get_bridge_fwd_mask(ocelot, bridge);
2444 ++ mask = ocelot_get_bridge_fwd_mask(ocelot, port, bridge);
2445 + mask |= cpu_fwd_mask;
2446 + mask &= ~BIT(port);
2447 + if (bond) {
2448 +diff --git a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c
2449 +index a99861124630a..68fbe536a1f32 100644
2450 +--- a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c
2451 ++++ b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c
2452 +@@ -1297,6 +1297,14 @@ qed_iwarp_wait_cid_map_cleared(struct qed_hwfn *p_hwfn, struct qed_bmap *bmap)
2453 + prev_weight = weight;
2454 +
2455 + while (weight) {
2456 ++ /* If the HW device is during recovery, all resources are
2457 ++ * immediately reset without receiving a per-cid indication
2458 ++ * from HW. In this case we don't expect the cid_map to be
2459 ++ * cleared.
2460 ++ */
2461 ++ if (p_hwfn->cdev->recov_in_prog)
2462 ++ return 0;
2463 ++
2464 + msleep(QED_IWARP_MAX_CID_CLEAN_TIME);
2465 +
2466 + weight = bitmap_weight(bmap->bitmap, bmap->max_count);
2467 +diff --git a/drivers/net/ethernet/qlogic/qed/qed_roce.c b/drivers/net/ethernet/qlogic/qed/qed_roce.c
2468 +index f16a157bb95a0..cf5baa5e59bcc 100644
2469 +--- a/drivers/net/ethernet/qlogic/qed/qed_roce.c
2470 ++++ b/drivers/net/ethernet/qlogic/qed/qed_roce.c
2471 +@@ -77,6 +77,14 @@ void qed_roce_stop(struct qed_hwfn *p_hwfn)
2472 + * Beyond the added delay we clear the bitmap anyway.
2473 + */
2474 + while (bitmap_weight(rcid_map->bitmap, rcid_map->max_count)) {
2475 ++ /* If the HW device is during recovery, all resources are
2476 ++ * immediately reset without receiving a per-cid indication
2477 ++ * from HW. In this case we don't expect the cid bitmap to be
2478 ++ * cleared.
2479 ++ */
2480 ++ if (p_hwfn->cdev->recov_in_prog)
2481 ++ return;
2482 ++
2483 + msleep(100);
2484 + if (wait_count++ > 20) {
2485 + DP_NOTICE(p_hwfn, "cid bitmap wait timed out\n");
2486 +diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
2487 +index 0dbd189c2721d..2218bc3a624b4 100644
2488 +--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
2489 ++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
2490 +@@ -309,7 +309,7 @@ static void stmmac_clk_csr_set(struct stmmac_priv *priv)
2491 + priv->clk_csr = STMMAC_CSR_100_150M;
2492 + else if ((clk_rate >= CSR_F_150M) && (clk_rate < CSR_F_250M))
2493 + priv->clk_csr = STMMAC_CSR_150_250M;
2494 +- else if ((clk_rate >= CSR_F_250M) && (clk_rate < CSR_F_300M))
2495 ++ else if ((clk_rate >= CSR_F_250M) && (clk_rate <= CSR_F_300M))
2496 + priv->clk_csr = STMMAC_CSR_250_300M;
2497 + }
2498 +
2499 +diff --git a/drivers/net/hamradio/6pack.c b/drivers/net/hamradio/6pack.c
2500 +index 8fe8887d506a3..6192244b304ab 100644
2501 +--- a/drivers/net/hamradio/6pack.c
2502 ++++ b/drivers/net/hamradio/6pack.c
2503 +@@ -68,9 +68,9 @@
2504 + #define SIXP_DAMA_OFF 0
2505 +
2506 + /* default level 2 parameters */
2507 +-#define SIXP_TXDELAY (HZ/4) /* in 1 s */
2508 ++#define SIXP_TXDELAY 25 /* 250 ms */
2509 + #define SIXP_PERSIST 50 /* in 256ths */
2510 +-#define SIXP_SLOTTIME (HZ/10) /* in 1 s */
2511 ++#define SIXP_SLOTTIME 10 /* 100 ms */
2512 + #define SIXP_INIT_RESYNC_TIMEOUT (3*HZ/2) /* in 1 s */
2513 + #define SIXP_RESYNC_TIMEOUT 5*HZ /* in 1 s */
2514 +
2515 +diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c
2516 +index 42e5a681183f3..0d3d9c3ee83c8 100644
2517 +--- a/drivers/net/phy/phylink.c
2518 ++++ b/drivers/net/phy/phylink.c
2519 +@@ -1604,6 +1604,32 @@ int phylink_ethtool_ksettings_set(struct phylink *pl,
2520 + if (config.an_enabled && phylink_is_empty_linkmode(config.advertising))
2521 + return -EINVAL;
2522 +
2523 ++ /* If this link is with an SFP, ensure that changes to advertised modes
2524 ++ * also cause the associated interface to be selected such that the
2525 ++ * link can be configured correctly.
2526 ++ */
2527 ++ if (pl->sfp_port && pl->sfp_bus) {
2528 ++ config.interface = sfp_select_interface(pl->sfp_bus,
2529 ++ config.advertising);
2530 ++ if (config.interface == PHY_INTERFACE_MODE_NA) {
2531 ++ phylink_err(pl,
2532 ++ "selection of interface failed, advertisement %*pb\n",
2533 ++ __ETHTOOL_LINK_MODE_MASK_NBITS,
2534 ++ config.advertising);
2535 ++ return -EINVAL;
2536 ++ }
2537 ++
2538 ++ /* Revalidate with the selected interface */
2539 ++ linkmode_copy(support, pl->supported);
2540 ++ if (phylink_validate(pl, support, &config)) {
2541 ++ phylink_err(pl, "validation of %s/%s with support %*pb failed\n",
2542 ++ phylink_an_mode_str(pl->cur_link_an_mode),
2543 ++ phy_modes(config.interface),
2544 ++ __ETHTOOL_LINK_MODE_MASK_NBITS, support);
2545 ++ return -EINVAL;
2546 ++ }
2547 ++ }
2548 ++
2549 + mutex_lock(&pl->state_mutex);
2550 + pl->link_config.speed = config.speed;
2551 + pl->link_config.duplex = config.duplex;
2552 +@@ -2183,7 +2209,9 @@ static int phylink_sfp_config(struct phylink *pl, u8 mode,
2553 + if (phy_interface_mode_is_8023z(iface) && pl->phydev)
2554 + return -EINVAL;
2555 +
2556 +- changed = !linkmode_equal(pl->supported, support);
2557 ++ changed = !linkmode_equal(pl->supported, support) ||
2558 ++ !linkmode_equal(pl->link_config.advertising,
2559 ++ config.advertising);
2560 + if (changed) {
2561 + linkmode_copy(pl->supported, support);
2562 + linkmode_copy(pl->link_config.advertising, config.advertising);
2563 +diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
2564 +index 18e0ca85f6537..3c7120ec70798 100644
2565 +--- a/drivers/net/usb/hso.c
2566 ++++ b/drivers/net/usb/hso.c
2567 +@@ -2720,14 +2720,14 @@ struct hso_device *hso_create_mux_serial_device(struct usb_interface *interface,
2568 +
2569 + serial = kzalloc(sizeof(*serial), GFP_KERNEL);
2570 + if (!serial)
2571 +- goto exit;
2572 ++ goto err_free_dev;
2573 +
2574 + hso_dev->port_data.dev_serial = serial;
2575 + serial->parent = hso_dev;
2576 +
2577 + if (hso_serial_common_create
2578 + (serial, 1, CTRL_URB_RX_SIZE, CTRL_URB_TX_SIZE))
2579 +- goto exit;
2580 ++ goto err_free_serial;
2581 +
2582 + serial->tx_data_length--;
2583 + serial->write_data = hso_mux_serial_write_data;
2584 +@@ -2743,11 +2743,9 @@ struct hso_device *hso_create_mux_serial_device(struct usb_interface *interface,
2585 + /* done, return it */
2586 + return hso_dev;
2587 +
2588 +-exit:
2589 +- if (serial) {
2590 +- tty_unregister_device(tty_drv, serial->minor);
2591 +- kfree(serial);
2592 +- }
2593 ++err_free_serial:
2594 ++ kfree(serial);
2595 ++err_free_dev:
2596 + kfree(hso_dev);
2597 + return NULL;
2598 +
2599 +diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
2600 +index eee493685aad5..fb96658bb91ff 100644
2601 +--- a/drivers/net/virtio_net.c
2602 ++++ b/drivers/net/virtio_net.c
2603 +@@ -435,6 +435,10 @@ static struct sk_buff *page_to_skb(struct virtnet_info *vi,
2604 +
2605 + skb_reserve(skb, p - buf);
2606 + skb_put(skb, len);
2607 ++
2608 ++ page = (struct page *)page->private;
2609 ++ if (page)
2610 ++ give_pages(rq, page);
2611 + goto ok;
2612 + }
2613 +
2614 +diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
2615 +index 5a8df5a195cb5..141635a35c28a 100644
2616 +--- a/drivers/net/vxlan.c
2617 ++++ b/drivers/net/vxlan.c
2618 +@@ -4756,12 +4756,12 @@ static void __net_exit vxlan_exit_batch_net(struct list_head *net_list)
2619 + LIST_HEAD(list);
2620 + unsigned int h;
2621 +
2622 +- rtnl_lock();
2623 + list_for_each_entry(net, net_list, exit_list) {
2624 + struct vxlan_net *vn = net_generic(net, vxlan_net_id);
2625 +
2626 + unregister_nexthop_notifier(net, &vn->nexthop_notifier_block);
2627 + }
2628 ++ rtnl_lock();
2629 + list_for_each_entry(net, net_list, exit_list)
2630 + vxlan_destroy_tunnels(net, &list);
2631 +
2632 +diff --git a/drivers/nfc/st-nci/spi.c b/drivers/nfc/st-nci/spi.c
2633 +index 250d56f204c3e..e62b1a0916d89 100644
2634 +--- a/drivers/nfc/st-nci/spi.c
2635 ++++ b/drivers/nfc/st-nci/spi.c
2636 +@@ -278,6 +278,7 @@ static int st_nci_spi_remove(struct spi_device *dev)
2637 +
2638 + static struct spi_device_id st_nci_spi_id_table[] = {
2639 + {ST_NCI_SPI_DRIVER_NAME, 0},
2640 ++ {"st21nfcb-spi", 0},
2641 + {}
2642 + };
2643 + MODULE_DEVICE_TABLE(spi, st_nci_spi_id_table);
2644 +diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
2645 +index 84e7cb9f19681..e2374319df61a 100644
2646 +--- a/drivers/nvme/host/core.c
2647 ++++ b/drivers/nvme/host/core.c
2648 +@@ -13,7 +13,6 @@
2649 + #include <linux/kernel.h>
2650 + #include <linux/module.h>
2651 + #include <linux/backing-dev.h>
2652 +-#include <linux/list_sort.h>
2653 + #include <linux/slab.h>
2654 + #include <linux/types.h>
2655 + #include <linux/pr.h>
2656 +@@ -3688,15 +3687,6 @@ out_unlock:
2657 + return ret;
2658 + }
2659 +
2660 +-static int ns_cmp(void *priv, const struct list_head *a,
2661 +- const struct list_head *b)
2662 +-{
2663 +- struct nvme_ns *nsa = container_of(a, struct nvme_ns, list);
2664 +- struct nvme_ns *nsb = container_of(b, struct nvme_ns, list);
2665 +-
2666 +- return nsa->head->ns_id - nsb->head->ns_id;
2667 +-}
2668 +-
2669 + struct nvme_ns *nvme_find_get_ns(struct nvme_ctrl *ctrl, unsigned nsid)
2670 + {
2671 + struct nvme_ns *ns, *ret = NULL;
2672 +@@ -3717,6 +3707,22 @@ struct nvme_ns *nvme_find_get_ns(struct nvme_ctrl *ctrl, unsigned nsid)
2673 + }
2674 + EXPORT_SYMBOL_NS_GPL(nvme_find_get_ns, NVME_TARGET_PASSTHRU);
2675 +
2676 ++/*
2677 ++ * Add the namespace to the controller list while keeping the list ordered.
2678 ++ */
2679 ++static void nvme_ns_add_to_ctrl_list(struct nvme_ns *ns)
2680 ++{
2681 ++ struct nvme_ns *tmp;
2682 ++
2683 ++ list_for_each_entry_reverse(tmp, &ns->ctrl->namespaces, list) {
2684 ++ if (tmp->head->ns_id < ns->head->ns_id) {
2685 ++ list_add(&ns->list, &tmp->list);
2686 ++ return;
2687 ++ }
2688 ++ }
2689 ++ list_add(&ns->list, &ns->ctrl->namespaces);
2690 ++}
2691 ++
2692 + static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid,
2693 + struct nvme_ns_ids *ids)
2694 + {
2695 +@@ -3778,9 +3784,8 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid,
2696 + }
2697 +
2698 + down_write(&ctrl->namespaces_rwsem);
2699 +- list_add_tail(&ns->list, &ctrl->namespaces);
2700 ++ nvme_ns_add_to_ctrl_list(ns);
2701 + up_write(&ctrl->namespaces_rwsem);
2702 +-
2703 + nvme_get_ctrl(ctrl);
2704 +
2705 + device_add_disk(ctrl->device, ns->disk, nvme_ns_id_attr_groups);
2706 +@@ -4059,10 +4064,6 @@ static void nvme_scan_work(struct work_struct *work)
2707 + if (nvme_scan_ns_list(ctrl) != 0)
2708 + nvme_scan_ns_sequential(ctrl);
2709 + mutex_unlock(&ctrl->scan_lock);
2710 +-
2711 +- down_write(&ctrl->namespaces_rwsem);
2712 +- list_sort(NULL, &ctrl->namespaces, ns_cmp);
2713 +- up_write(&ctrl->namespaces_rwsem);
2714 + }
2715 +
2716 + /*
2717 +diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
2718 +index 3f32c5e86bfcb..abc9bdfd48bde 100644
2719 +--- a/drivers/nvme/host/multipath.c
2720 ++++ b/drivers/nvme/host/multipath.c
2721 +@@ -583,14 +583,17 @@ static int nvme_update_ana_state(struct nvme_ctrl *ctrl,
2722 +
2723 + down_read(&ctrl->namespaces_rwsem);
2724 + list_for_each_entry(ns, &ctrl->namespaces, list) {
2725 +- unsigned nsid = le32_to_cpu(desc->nsids[n]);
2726 +-
2727 ++ unsigned nsid;
2728 ++again:
2729 ++ nsid = le32_to_cpu(desc->nsids[n]);
2730 + if (ns->head->ns_id < nsid)
2731 + continue;
2732 + if (ns->head->ns_id == nsid)
2733 + nvme_update_ns_ana_state(desc, ns);
2734 + if (++n == nr_nsids)
2735 + break;
2736 ++ if (ns->head->ns_id > nsid)
2737 ++ goto again;
2738 + }
2739 + up_read(&ctrl->namespaces_rwsem);
2740 + return 0;
2741 +diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
2742 +index a68704e39084e..042c594bc57e2 100644
2743 +--- a/drivers/nvme/host/rdma.c
2744 ++++ b/drivers/nvme/host/rdma.c
2745 +@@ -656,8 +656,8 @@ static void nvme_rdma_free_queue(struct nvme_rdma_queue *queue)
2746 + if (!test_and_clear_bit(NVME_RDMA_Q_ALLOCATED, &queue->flags))
2747 + return;
2748 +
2749 +- nvme_rdma_destroy_queue_ib(queue);
2750 + rdma_destroy_id(queue->cm_id);
2751 ++ nvme_rdma_destroy_queue_ib(queue);
2752 + mutex_destroy(&queue->queue_lock);
2753 + }
2754 +
2755 +@@ -1815,14 +1815,10 @@ static int nvme_rdma_conn_established(struct nvme_rdma_queue *queue)
2756 + for (i = 0; i < queue->queue_size; i++) {
2757 + ret = nvme_rdma_post_recv(queue, &queue->rsp_ring[i]);
2758 + if (ret)
2759 +- goto out_destroy_queue_ib;
2760 ++ return ret;
2761 + }
2762 +
2763 + return 0;
2764 +-
2765 +-out_destroy_queue_ib:
2766 +- nvme_rdma_destroy_queue_ib(queue);
2767 +- return ret;
2768 + }
2769 +
2770 + static int nvme_rdma_conn_rejected(struct nvme_rdma_queue *queue,
2771 +@@ -1916,14 +1912,10 @@ static int nvme_rdma_route_resolved(struct nvme_rdma_queue *queue)
2772 + if (ret) {
2773 + dev_err(ctrl->ctrl.device,
2774 + "rdma_connect_locked failed (%d).\n", ret);
2775 +- goto out_destroy_queue_ib;
2776 ++ return ret;
2777 + }
2778 +
2779 + return 0;
2780 +-
2781 +-out_destroy_queue_ib:
2782 +- nvme_rdma_destroy_queue_ib(queue);
2783 +- return ret;
2784 + }
2785 +
2786 + static int nvme_rdma_cm_handler(struct rdma_cm_id *cm_id,
2787 +@@ -1954,8 +1946,6 @@ static int nvme_rdma_cm_handler(struct rdma_cm_id *cm_id,
2788 + case RDMA_CM_EVENT_ROUTE_ERROR:
2789 + case RDMA_CM_EVENT_CONNECT_ERROR:
2790 + case RDMA_CM_EVENT_UNREACHABLE:
2791 +- nvme_rdma_destroy_queue_ib(queue);
2792 +- fallthrough;
2793 + case RDMA_CM_EVENT_ADDR_ERROR:
2794 + dev_dbg(queue->ctrl->ctrl.device,
2795 + "CM error event %d\n", ev->event);
2796 +diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
2797 +index 19a711395cdc3..fd28a23d45ed6 100644
2798 +--- a/drivers/nvme/host/tcp.c
2799 ++++ b/drivers/nvme/host/tcp.c
2800 +@@ -614,7 +614,7 @@ static int nvme_tcp_setup_h2c_data_pdu(struct nvme_tcp_request *req,
2801 + cpu_to_le32(data->hdr.hlen + hdgst + req->pdu_len + ddgst);
2802 + data->ttag = pdu->ttag;
2803 + data->command_id = nvme_cid(rq);
2804 +- data->data_offset = cpu_to_le32(req->data_sent);
2805 ++ data->data_offset = pdu->r2t_offset;
2806 + data->data_length = cpu_to_le32(req->pdu_len);
2807 + return 0;
2808 + }
2809 +@@ -940,7 +940,15 @@ static int nvme_tcp_try_send_data(struct nvme_tcp_request *req)
2810 + nvme_tcp_ddgst_update(queue->snd_hash, page,
2811 + offset, ret);
2812 +
2813 +- /* fully successful last write*/
2814 ++ /*
2815 ++ * update the request iterator except for the last payload send
2816 ++ * in the request where we don't want to modify it as we may
2817 ++ * compete with the RX path completing the request.
2818 ++ */
2819 ++ if (req->data_sent + ret < req->data_len)
2820 ++ nvme_tcp_advance_req(req, ret);
2821 ++
2822 ++ /* fully successful last send in current PDU */
2823 + if (last && ret == len) {
2824 + if (queue->data_digest) {
2825 + nvme_tcp_ddgst_final(queue->snd_hash,
2826 +@@ -952,7 +960,6 @@ static int nvme_tcp_try_send_data(struct nvme_tcp_request *req)
2827 + }
2828 + return 1;
2829 + }
2830 +- nvme_tcp_advance_req(req, ret);
2831 + }
2832 + return -EAGAIN;
2833 + }
2834 +diff --git a/drivers/nvme/target/configfs.c b/drivers/nvme/target/configfs.c
2835 +index fa88bf9cba4d0..3e5053c5ec836 100644
2836 +--- a/drivers/nvme/target/configfs.c
2837 ++++ b/drivers/nvme/target/configfs.c
2838 +@@ -1067,7 +1067,7 @@ static ssize_t nvmet_subsys_attr_serial_show(struct config_item *item,
2839 + {
2840 + struct nvmet_subsys *subsys = to_subsys(item);
2841 +
2842 +- return snprintf(page, PAGE_SIZE, "%*s\n",
2843 ++ return snprintf(page, PAGE_SIZE, "%.*s\n",
2844 + NVMET_SN_MAX_SIZE, subsys->serial);
2845 + }
2846 +
2847 +diff --git a/drivers/platform/x86/amd-pmc.c b/drivers/platform/x86/amd-pmc.c
2848 +index 3481479a2942f..d6a7c896ac866 100644
2849 +--- a/drivers/platform/x86/amd-pmc.c
2850 ++++ b/drivers/platform/x86/amd-pmc.c
2851 +@@ -71,7 +71,7 @@
2852 + #define AMD_CPU_ID_YC 0x14B5
2853 +
2854 + #define PMC_MSG_DELAY_MIN_US 100
2855 +-#define RESPONSE_REGISTER_LOOP_MAX 200
2856 ++#define RESPONSE_REGISTER_LOOP_MAX 20000
2857 +
2858 + #define SOC_SUBSYSTEM_IP_MAX 12
2859 + #define DELAY_MIN_US 2000
2860 +diff --git a/drivers/platform/x86/dell/Kconfig b/drivers/platform/x86/dell/Kconfig
2861 +index 9e7314d90bea8..1e3da9700005e 100644
2862 +--- a/drivers/platform/x86/dell/Kconfig
2863 ++++ b/drivers/platform/x86/dell/Kconfig
2864 +@@ -166,8 +166,7 @@ config DELL_WMI
2865 +
2866 + config DELL_WMI_PRIVACY
2867 + bool "Dell WMI Hardware Privacy Support"
2868 +- depends on DELL_WMI
2869 +- depends on LEDS_TRIGGER_AUDIO
2870 ++ depends on LEDS_TRIGGER_AUDIO = y || DELL_WMI = LEDS_TRIGGER_AUDIO
2871 + help
2872 + This option adds integration with the "Dell Hardware Privacy"
2873 + feature of Dell laptops to the dell-wmi driver.
2874 +diff --git a/drivers/platform/x86/intel_punit_ipc.c b/drivers/platform/x86/intel_punit_ipc.c
2875 +index f58b8543f6ac5..66bb39fd0ef90 100644
2876 +--- a/drivers/platform/x86/intel_punit_ipc.c
2877 ++++ b/drivers/platform/x86/intel_punit_ipc.c
2878 +@@ -8,7 +8,6 @@
2879 + * which provide mailbox interface for power management usage.
2880 + */
2881 +
2882 +-#include <linux/acpi.h>
2883 + #include <linux/bitops.h>
2884 + #include <linux/delay.h>
2885 + #include <linux/device.h>
2886 +@@ -319,7 +318,7 @@ static struct platform_driver intel_punit_ipc_driver = {
2887 + .remove = intel_punit_ipc_remove,
2888 + .driver = {
2889 + .name = "intel_punit_ipc",
2890 +- .acpi_match_table = ACPI_PTR(punit_ipc_acpi_ids),
2891 ++ .acpi_match_table = punit_ipc_acpi_ids,
2892 + },
2893 + };
2894 +
2895 +diff --git a/drivers/regulator/max14577-regulator.c b/drivers/regulator/max14577-regulator.c
2896 +index 1d78b455cc48c..e34face736f48 100644
2897 +--- a/drivers/regulator/max14577-regulator.c
2898 ++++ b/drivers/regulator/max14577-regulator.c
2899 +@@ -269,5 +269,3 @@ module_exit(max14577_regulator_exit);
2900 + MODULE_AUTHOR("Krzysztof Kozlowski <krzk@××××××.org>");
2901 + MODULE_DESCRIPTION("Maxim 14577/77836 regulator driver");
2902 + MODULE_LICENSE("GPL");
2903 +-MODULE_ALIAS("platform:max14577-regulator");
2904 +-MODULE_ALIAS("platform:max77836-regulator");
2905 +diff --git a/drivers/regulator/qcom-rpmh-regulator.c b/drivers/regulator/qcom-rpmh-regulator.c
2906 +index 6cca910a76ded..7f458d510483f 100644
2907 +--- a/drivers/regulator/qcom-rpmh-regulator.c
2908 ++++ b/drivers/regulator/qcom-rpmh-regulator.c
2909 +@@ -991,7 +991,7 @@ static const struct rpmh_vreg_init_data pm8009_1_vreg_data[] = {
2910 + RPMH_VREG("ldo4", "ldo%s4", &pmic5_nldo, "vdd-l4"),
2911 + RPMH_VREG("ldo5", "ldo%s5", &pmic5_pldo, "vdd-l5-l6"),
2912 + RPMH_VREG("ldo6", "ldo%s6", &pmic5_pldo, "vdd-l5-l6"),
2913 +- RPMH_VREG("ldo7", "ldo%s6", &pmic5_pldo_lv, "vdd-l7"),
2914 ++ RPMH_VREG("ldo7", "ldo%s7", &pmic5_pldo_lv, "vdd-l7"),
2915 + {}
2916 + };
2917 +
2918 +diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
2919 +index 62f88ccbd03f8..51f7f4e680c34 100644
2920 +--- a/drivers/s390/net/qeth_core_main.c
2921 ++++ b/drivers/s390/net/qeth_core_main.c
2922 +@@ -207,6 +207,9 @@ static void qeth_clear_working_pool_list(struct qeth_card *card)
2923 + &card->qdio.in_buf_pool.entry_list, list)
2924 + list_del(&pool_entry->list);
2925 +
2926 ++ if (!queue)
2927 ++ return;
2928 ++
2929 + for (i = 0; i < ARRAY_SIZE(queue->bufs); i++)
2930 + queue->bufs[i].pool_entry = NULL;
2931 + }
2932 +diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
2933 +index eb88aaaf36eb3..c34a7f7446013 100644
2934 +--- a/drivers/scsi/lpfc/lpfc_attr.c
2935 ++++ b/drivers/scsi/lpfc/lpfc_attr.c
2936 +@@ -6022,7 +6022,8 @@ lpfc_sg_seg_cnt_show(struct device *dev, struct device_attribute *attr,
2937 + len = scnprintf(buf, PAGE_SIZE, "SGL sz: %d total SGEs: %d\n",
2938 + phba->cfg_sg_dma_buf_size, phba->cfg_total_seg_cnt);
2939 +
2940 +- len += scnprintf(buf + len, PAGE_SIZE, "Cfg: %d SCSI: %d NVME: %d\n",
2941 ++ len += scnprintf(buf + len, PAGE_SIZE - len,
2942 ++ "Cfg: %d SCSI: %d NVME: %d\n",
2943 + phba->cfg_sg_seg_cnt, phba->cfg_scsi_seg_cnt,
2944 + phba->cfg_nvme_seg_cnt);
2945 + return len;
2946 +diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
2947 +index f8f471157109e..70b507d177f14 100644
2948 +--- a/drivers/scsi/qla2xxx/qla_init.c
2949 ++++ b/drivers/scsi/qla2xxx/qla_init.c
2950 +@@ -7014,7 +7014,8 @@ qla2x00_abort_isp(scsi_qla_host_t *vha)
2951 + return 0;
2952 + break;
2953 + case QLA2XXX_INI_MODE_DUAL:
2954 +- if (!qla_dual_mode_enabled(vha))
2955 ++ if (!qla_dual_mode_enabled(vha) &&
2956 ++ !qla_ini_mode_enabled(vha))
2957 + return 0;
2958 + break;
2959 + case QLA2XXX_INI_MODE_ENABLED:
2960 +diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
2961 +index d8b05d8b54708..922e4c7bd88e4 100644
2962 +--- a/drivers/scsi/scsi_transport_iscsi.c
2963 ++++ b/drivers/scsi/scsi_transport_iscsi.c
2964 +@@ -441,9 +441,7 @@ static umode_t iscsi_iface_attr_is_visible(struct kobject *kobj,
2965 + struct iscsi_transport *t = iface->transport;
2966 + int param = -1;
2967 +
2968 +- if (attr == &dev_attr_iface_enabled.attr)
2969 +- param = ISCSI_NET_PARAM_IFACE_ENABLE;
2970 +- else if (attr == &dev_attr_iface_def_taskmgmt_tmo.attr)
2971 ++ if (attr == &dev_attr_iface_def_taskmgmt_tmo.attr)
2972 + param = ISCSI_IFACE_PARAM_DEF_TASKMGMT_TMO;
2973 + else if (attr == &dev_attr_iface_header_digest.attr)
2974 + param = ISCSI_IFACE_PARAM_HDRDGST_EN;
2975 +@@ -483,7 +481,9 @@ static umode_t iscsi_iface_attr_is_visible(struct kobject *kobj,
2976 + if (param != -1)
2977 + return t->attr_is_visible(ISCSI_IFACE_PARAM, param);
2978 +
2979 +- if (attr == &dev_attr_iface_vlan_id.attr)
2980 ++ if (attr == &dev_attr_iface_enabled.attr)
2981 ++ param = ISCSI_NET_PARAM_IFACE_ENABLE;
2982 ++ else if (attr == &dev_attr_iface_vlan_id.attr)
2983 + param = ISCSI_NET_PARAM_VLAN_ID;
2984 + else if (attr == &dev_attr_iface_vlan_priority.attr)
2985 + param = ISCSI_NET_PARAM_VLAN_PRIORITY;
2986 +diff --git a/drivers/scsi/sd_zbc.c b/drivers/scsi/sd_zbc.c
2987 +index 186b5ff52c3ab..06ee1f045e976 100644
2988 +--- a/drivers/scsi/sd_zbc.c
2989 ++++ b/drivers/scsi/sd_zbc.c
2990 +@@ -154,8 +154,8 @@ static void *sd_zbc_alloc_report_buffer(struct scsi_disk *sdkp,
2991 +
2992 + /*
2993 + * Report zone buffer size should be at most 64B times the number of
2994 +- * zones requested plus the 64B reply header, but should be at least
2995 +- * SECTOR_SIZE for ATA devices.
2996 ++ * zones requested plus the 64B reply header, but should be aligned
2997 ++ * to SECTOR_SIZE for ATA devices.
2998 + * Make sure that this size does not exceed the hardware capabilities.
2999 + * Furthermore, since the report zone command cannot be split, make
3000 + * sure that the allocated buffer can always be mapped by limiting the
3001 +@@ -174,7 +174,7 @@ static void *sd_zbc_alloc_report_buffer(struct scsi_disk *sdkp,
3002 + *buflen = bufsize;
3003 + return buf;
3004 + }
3005 +- bufsize >>= 1;
3006 ++ bufsize = rounddown(bufsize >> 1, SECTOR_SIZE);
3007 + }
3008 +
3009 + return NULL;
3010 +@@ -280,7 +280,7 @@ static void sd_zbc_update_wp_offset_workfn(struct work_struct *work)
3011 + {
3012 + struct scsi_disk *sdkp;
3013 + unsigned long flags;
3014 +- unsigned int zno;
3015 ++ sector_t zno;
3016 + int ret;
3017 +
3018 + sdkp = container_of(work, struct scsi_disk, zone_wp_offset_work);
3019 +diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
3020 +index 15ac5fa148058..3a204324151a8 100644
3021 +--- a/drivers/scsi/ufs/ufshcd.c
3022 ++++ b/drivers/scsi/ufs/ufshcd.c
3023 +@@ -2112,6 +2112,7 @@ static inline
3024 + void ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag)
3025 + {
3026 + struct ufshcd_lrb *lrbp = &hba->lrb[task_tag];
3027 ++ unsigned long flags;
3028 +
3029 + lrbp->issue_time_stamp = ktime_get();
3030 + lrbp->compl_time_stamp = ktime_set(0, 0);
3031 +@@ -2120,19 +2121,10 @@ void ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag)
3032 + ufshcd_clk_scaling_start_busy(hba);
3033 + if (unlikely(ufshcd_should_inform_monitor(hba, lrbp)))
3034 + ufshcd_start_monitor(hba, lrbp);
3035 +- if (ufshcd_has_utrlcnr(hba)) {
3036 +- set_bit(task_tag, &hba->outstanding_reqs);
3037 +- ufshcd_writel(hba, 1 << task_tag,
3038 +- REG_UTP_TRANSFER_REQ_DOOR_BELL);
3039 +- } else {
3040 +- unsigned long flags;
3041 +-
3042 +- spin_lock_irqsave(hba->host->host_lock, flags);
3043 +- set_bit(task_tag, &hba->outstanding_reqs);
3044 +- ufshcd_writel(hba, 1 << task_tag,
3045 +- REG_UTP_TRANSFER_REQ_DOOR_BELL);
3046 +- spin_unlock_irqrestore(hba->host->host_lock, flags);
3047 +- }
3048 ++ spin_lock_irqsave(hba->host->host_lock, flags);
3049 ++ set_bit(task_tag, &hba->outstanding_reqs);
3050 ++ ufshcd_writel(hba, 1 << task_tag, REG_UTP_TRANSFER_REQ_DOOR_BELL);
3051 ++ spin_unlock_irqrestore(hba->host->host_lock, flags);
3052 + /* Make sure that doorbell is committed immediately */
3053 + wmb();
3054 + }
3055 +@@ -5237,10 +5229,12 @@ static irqreturn_t ufshcd_uic_cmd_compl(struct ufs_hba *hba, u32 intr_status)
3056 + /**
3057 + * __ufshcd_transfer_req_compl - handle SCSI and query command completion
3058 + * @hba: per adapter instance
3059 +- * @completed_reqs: requests to complete
3060 ++ * @completed_reqs: bitmask that indicates which requests to complete
3061 ++ * @retry_requests: whether to ask the SCSI core to retry completed requests
3062 + */
3063 + static void __ufshcd_transfer_req_compl(struct ufs_hba *hba,
3064 +- unsigned long completed_reqs)
3065 ++ unsigned long completed_reqs,
3066 ++ bool retry_requests)
3067 + {
3068 + struct ufshcd_lrb *lrbp;
3069 + struct scsi_cmnd *cmd;
3070 +@@ -5258,7 +5252,8 @@ static void __ufshcd_transfer_req_compl(struct ufs_hba *hba,
3071 + if (unlikely(ufshcd_should_inform_monitor(hba, lrbp)))
3072 + ufshcd_update_monitor(hba, lrbp);
3073 + ufshcd_add_command_trace(hba, index, UFS_CMD_COMP);
3074 +- result = ufshcd_transfer_rsp_status(hba, lrbp);
3075 ++ result = retry_requests ? DID_BUS_BUSY << 16 :
3076 ++ ufshcd_transfer_rsp_status(hba, lrbp);
3077 + scsi_dma_unmap(cmd);
3078 + cmd->result = result;
3079 + /* Mark completed command as NULL in LRB */
3080 +@@ -5282,17 +5277,19 @@ static void __ufshcd_transfer_req_compl(struct ufs_hba *hba,
3081 + }
3082 +
3083 + /**
3084 +- * ufshcd_trc_handler - handle transfer requests completion
3085 ++ * ufshcd_transfer_req_compl - handle SCSI and query command completion
3086 + * @hba: per adapter instance
3087 +- * @use_utrlcnr: get completed requests from UTRLCNR
3088 ++ * @retry_requests: whether or not to ask to retry requests
3089 + *
3090 + * Returns
3091 + * IRQ_HANDLED - If interrupt is valid
3092 + * IRQ_NONE - If invalid interrupt
3093 + */
3094 +-static irqreturn_t ufshcd_trc_handler(struct ufs_hba *hba, bool use_utrlcnr)
3095 ++static irqreturn_t ufshcd_transfer_req_compl(struct ufs_hba *hba,
3096 ++ bool retry_requests)
3097 + {
3098 +- unsigned long completed_reqs = 0;
3099 ++ unsigned long completed_reqs, flags;
3100 ++ u32 tr_doorbell;
3101 +
3102 + /* Resetting interrupt aggregation counters first and reading the
3103 + * DOOR_BELL afterward allows us to handle all the completed requests.
3104 +@@ -5305,27 +5302,14 @@ static irqreturn_t ufshcd_trc_handler(struct ufs_hba *hba, bool use_utrlcnr)
3105 + !(hba->quirks & UFSHCI_QUIRK_SKIP_RESET_INTR_AGGR))
3106 + ufshcd_reset_intr_aggr(hba);
3107 +
3108 +- if (use_utrlcnr) {
3109 +- u32 utrlcnr;
3110 +-
3111 +- utrlcnr = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_LIST_COMPL);
3112 +- if (utrlcnr) {
3113 +- ufshcd_writel(hba, utrlcnr,
3114 +- REG_UTP_TRANSFER_REQ_LIST_COMPL);
3115 +- completed_reqs = utrlcnr;
3116 +- }
3117 +- } else {
3118 +- unsigned long flags;
3119 +- u32 tr_doorbell;
3120 +-
3121 +- spin_lock_irqsave(hba->host->host_lock, flags);
3122 +- tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
3123 +- completed_reqs = tr_doorbell ^ hba->outstanding_reqs;
3124 +- spin_unlock_irqrestore(hba->host->host_lock, flags);
3125 +- }
3126 ++ spin_lock_irqsave(hba->host->host_lock, flags);
3127 ++ tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
3128 ++ completed_reqs = tr_doorbell ^ hba->outstanding_reqs;
3129 ++ spin_unlock_irqrestore(hba->host->host_lock, flags);
3130 +
3131 + if (completed_reqs) {
3132 +- __ufshcd_transfer_req_compl(hba, completed_reqs);
3133 ++ __ufshcd_transfer_req_compl(hba, completed_reqs,
3134 ++ retry_requests);
3135 + return IRQ_HANDLED;
3136 + } else {
3137 + return IRQ_NONE;
3138 +@@ -5804,7 +5788,13 @@ out:
3139 + /* Complete requests that have door-bell cleared */
3140 + static void ufshcd_complete_requests(struct ufs_hba *hba)
3141 + {
3142 +- ufshcd_trc_handler(hba, false);
3143 ++ ufshcd_transfer_req_compl(hba, /*retry_requests=*/false);
3144 ++ ufshcd_tmc_handler(hba);
3145 ++}
3146 ++
3147 ++static void ufshcd_retry_aborted_requests(struct ufs_hba *hba)
3148 ++{
3149 ++ ufshcd_transfer_req_compl(hba, /*retry_requests=*/true);
3150 + ufshcd_tmc_handler(hba);
3151 + }
3152 +
3153 +@@ -6146,8 +6136,7 @@ static void ufshcd_err_handler(struct work_struct *work)
3154 + }
3155 +
3156 + lock_skip_pending_xfer_clear:
3157 +- /* Complete the requests that are cleared by s/w */
3158 +- ufshcd_complete_requests(hba);
3159 ++ ufshcd_retry_aborted_requests(hba);
3160 +
3161 + spin_lock_irqsave(hba->host->host_lock, flags);
3162 + hba->silence_err_logs = false;
3163 +@@ -6445,7 +6434,7 @@ static irqreturn_t ufshcd_sl_intr(struct ufs_hba *hba, u32 intr_status)
3164 + retval |= ufshcd_tmc_handler(hba);
3165 +
3166 + if (intr_status & UTP_TRANSFER_REQ_COMPL)
3167 +- retval |= ufshcd_trc_handler(hba, ufshcd_has_utrlcnr(hba));
3168 ++ retval |= ufshcd_transfer_req_compl(hba, /*retry_requests=*/false);
3169 +
3170 + return retval;
3171 + }
3172 +@@ -6869,7 +6858,7 @@ static int ufshcd_eh_device_reset_handler(struct scsi_cmnd *cmd)
3173 + err = ufshcd_clear_cmd(hba, pos);
3174 + if (err)
3175 + break;
3176 +- __ufshcd_transfer_req_compl(hba, pos);
3177 ++ __ufshcd_transfer_req_compl(hba, 1U << pos, false);
3178 + }
3179 + }
3180 +
3181 +@@ -7040,7 +7029,7 @@ static int ufshcd_abort(struct scsi_cmnd *cmd)
3182 + dev_err(hba->dev,
3183 + "%s: cmd was completed, but without a notifying intr, tag = %d",
3184 + __func__, tag);
3185 +- __ufshcd_transfer_req_compl(hba, 1UL << tag);
3186 ++ __ufshcd_transfer_req_compl(hba, 1UL << tag, /*retry_requests=*/false);
3187 + goto release;
3188 + }
3189 +
3190 +@@ -7105,7 +7094,7 @@ static int ufshcd_host_reset_and_restore(struct ufs_hba *hba)
3191 + */
3192 + ufshcd_hba_stop(hba);
3193 + hba->silence_err_logs = true;
3194 +- ufshcd_complete_requests(hba);
3195 ++ ufshcd_retry_aborted_requests(hba);
3196 + hba->silence_err_logs = false;
3197 +
3198 + /* scale up clocks to max frequency before full reinitialization */
3199 +diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h
3200 +index 194755c9ddfeb..86d4765a17b83 100644
3201 +--- a/drivers/scsi/ufs/ufshcd.h
3202 ++++ b/drivers/scsi/ufs/ufshcd.h
3203 +@@ -1160,11 +1160,6 @@ static inline u32 ufshcd_vops_get_ufs_hci_version(struct ufs_hba *hba)
3204 + return ufshcd_readl(hba, REG_UFS_VERSION);
3205 + }
3206 +
3207 +-static inline bool ufshcd_has_utrlcnr(struct ufs_hba *hba)
3208 +-{
3209 +- return (hba->ufs_version >= ufshci_version(3, 0));
3210 +-}
3211 +-
3212 + static inline int ufshcd_vops_clk_scale_notify(struct ufs_hba *hba,
3213 + bool up, enum ufs_notify_change_status status)
3214 + {
3215 +diff --git a/drivers/scsi/ufs/ufshci.h b/drivers/scsi/ufs/ufshci.h
3216 +index 5affb1fce5ad0..de95be5d11d4e 100644
3217 +--- a/drivers/scsi/ufs/ufshci.h
3218 ++++ b/drivers/scsi/ufs/ufshci.h
3219 +@@ -39,7 +39,6 @@ enum {
3220 + REG_UTP_TRANSFER_REQ_DOOR_BELL = 0x58,
3221 + REG_UTP_TRANSFER_REQ_LIST_CLEAR = 0x5C,
3222 + REG_UTP_TRANSFER_REQ_LIST_RUN_STOP = 0x60,
3223 +- REG_UTP_TRANSFER_REQ_LIST_COMPL = 0x64,
3224 + REG_UTP_TASK_REQ_LIST_BASE_L = 0x70,
3225 + REG_UTP_TASK_REQ_LIST_BASE_H = 0x74,
3226 + REG_UTP_TASK_REQ_DOOR_BELL = 0x78,
3227 +diff --git a/drivers/spi/spi-tegra20-slink.c b/drivers/spi/spi-tegra20-slink.c
3228 +index 6a726c95ac7a8..dc1a6899ba3b2 100644
3229 +--- a/drivers/spi/spi-tegra20-slink.c
3230 ++++ b/drivers/spi/spi-tegra20-slink.c
3231 +@@ -1206,7 +1206,7 @@ static int tegra_slink_resume(struct device *dev)
3232 + }
3233 + #endif
3234 +
3235 +-static int tegra_slink_runtime_suspend(struct device *dev)
3236 ++static int __maybe_unused tegra_slink_runtime_suspend(struct device *dev)
3237 + {
3238 + struct spi_master *master = dev_get_drvdata(dev);
3239 + struct tegra_slink_data *tspi = spi_master_get_devdata(master);
3240 +@@ -1218,7 +1218,7 @@ static int tegra_slink_runtime_suspend(struct device *dev)
3241 + return 0;
3242 + }
3243 +
3244 +-static int tegra_slink_runtime_resume(struct device *dev)
3245 ++static int __maybe_unused tegra_slink_runtime_resume(struct device *dev)
3246 + {
3247 + struct spi_master *master = dev_get_drvdata(dev);
3248 + struct tegra_slink_data *tspi = spi_master_get_devdata(master);
3249 +diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
3250 +index e4dc593b1f32a..f95f7666cb5b7 100644
3251 +--- a/drivers/spi/spi.c
3252 ++++ b/drivers/spi/spi.c
3253 +@@ -58,10 +58,6 @@ modalias_show(struct device *dev, struct device_attribute *a, char *buf)
3254 + const struct spi_device *spi = to_spi_device(dev);
3255 + int len;
3256 +
3257 +- len = of_device_modalias(dev, buf, PAGE_SIZE);
3258 +- if (len != -ENODEV)
3259 +- return len;
3260 +-
3261 + len = acpi_device_modalias(dev, buf, PAGE_SIZE - 1);
3262 + if (len != -ENODEV)
3263 + return len;
3264 +@@ -367,10 +363,6 @@ static int spi_uevent(struct device *dev, struct kobj_uevent_env *env)
3265 + const struct spi_device *spi = to_spi_device(dev);
3266 + int rc;
3267 +
3268 +- rc = of_device_uevent_modalias(dev, env);
3269 +- if (rc != -ENODEV)
3270 +- return rc;
3271 +-
3272 + rc = acpi_device_uevent_modalias(dev, env);
3273 + if (rc != -ENODEV)
3274 + return rc;
3275 +diff --git a/drivers/staging/greybus/uart.c b/drivers/staging/greybus/uart.c
3276 +index 73f01ed1e5b72..a943fce322be8 100644
3277 +--- a/drivers/staging/greybus/uart.c
3278 ++++ b/drivers/staging/greybus/uart.c
3279 +@@ -761,6 +761,17 @@ out:
3280 + gbphy_runtime_put_autosuspend(gb_tty->gbphy_dev);
3281 + }
3282 +
3283 ++static void gb_tty_port_destruct(struct tty_port *port)
3284 ++{
3285 ++ struct gb_tty *gb_tty = container_of(port, struct gb_tty, port);
3286 ++
3287 ++ if (gb_tty->minor != GB_NUM_MINORS)
3288 ++ release_minor(gb_tty);
3289 ++ kfifo_free(&gb_tty->write_fifo);
3290 ++ kfree(gb_tty->buffer);
3291 ++ kfree(gb_tty);
3292 ++}
3293 ++
3294 + static const struct tty_operations gb_ops = {
3295 + .install = gb_tty_install,
3296 + .open = gb_tty_open,
3297 +@@ -786,6 +797,7 @@ static const struct tty_port_operations gb_port_ops = {
3298 + .dtr_rts = gb_tty_dtr_rts,
3299 + .activate = gb_tty_port_activate,
3300 + .shutdown = gb_tty_port_shutdown,
3301 ++ .destruct = gb_tty_port_destruct,
3302 + };
3303 +
3304 + static int gb_uart_probe(struct gbphy_device *gbphy_dev,
3305 +@@ -798,17 +810,11 @@ static int gb_uart_probe(struct gbphy_device *gbphy_dev,
3306 + int retval;
3307 + int minor;
3308 +
3309 +- gb_tty = kzalloc(sizeof(*gb_tty), GFP_KERNEL);
3310 +- if (!gb_tty)
3311 +- return -ENOMEM;
3312 +-
3313 + connection = gb_connection_create(gbphy_dev->bundle,
3314 + le16_to_cpu(gbphy_dev->cport_desc->id),
3315 + gb_uart_request_handler);
3316 +- if (IS_ERR(connection)) {
3317 +- retval = PTR_ERR(connection);
3318 +- goto exit_tty_free;
3319 +- }
3320 ++ if (IS_ERR(connection))
3321 ++ return PTR_ERR(connection);
3322 +
3323 + max_payload = gb_operation_get_payload_size_max(connection);
3324 + if (max_payload < sizeof(struct gb_uart_send_data_request)) {
3325 +@@ -816,13 +822,23 @@ static int gb_uart_probe(struct gbphy_device *gbphy_dev,
3326 + goto exit_connection_destroy;
3327 + }
3328 +
3329 ++ gb_tty = kzalloc(sizeof(*gb_tty), GFP_KERNEL);
3330 ++ if (!gb_tty) {
3331 ++ retval = -ENOMEM;
3332 ++ goto exit_connection_destroy;
3333 ++ }
3334 ++
3335 ++ tty_port_init(&gb_tty->port);
3336 ++ gb_tty->port.ops = &gb_port_ops;
3337 ++ gb_tty->minor = GB_NUM_MINORS;
3338 ++
3339 + gb_tty->buffer_payload_max = max_payload -
3340 + sizeof(struct gb_uart_send_data_request);
3341 +
3342 + gb_tty->buffer = kzalloc(gb_tty->buffer_payload_max, GFP_KERNEL);
3343 + if (!gb_tty->buffer) {
3344 + retval = -ENOMEM;
3345 +- goto exit_connection_destroy;
3346 ++ goto exit_put_port;
3347 + }
3348 +
3349 + INIT_WORK(&gb_tty->tx_work, gb_uart_tx_write_work);
3350 +@@ -830,7 +846,7 @@ static int gb_uart_probe(struct gbphy_device *gbphy_dev,
3351 + retval = kfifo_alloc(&gb_tty->write_fifo, GB_UART_WRITE_FIFO_SIZE,
3352 + GFP_KERNEL);
3353 + if (retval)
3354 +- goto exit_buf_free;
3355 ++ goto exit_put_port;
3356 +
3357 + gb_tty->credits = GB_UART_FIRMWARE_CREDITS;
3358 + init_completion(&gb_tty->credits_complete);
3359 +@@ -844,7 +860,7 @@ static int gb_uart_probe(struct gbphy_device *gbphy_dev,
3360 + } else {
3361 + retval = minor;
3362 + }
3363 +- goto exit_kfifo_free;
3364 ++ goto exit_put_port;
3365 + }
3366 +
3367 + gb_tty->minor = minor;
3368 +@@ -853,9 +869,6 @@ static int gb_uart_probe(struct gbphy_device *gbphy_dev,
3369 + init_waitqueue_head(&gb_tty->wioctl);
3370 + mutex_init(&gb_tty->mutex);
3371 +
3372 +- tty_port_init(&gb_tty->port);
3373 +- gb_tty->port.ops = &gb_port_ops;
3374 +-
3375 + gb_tty->connection = connection;
3376 + gb_tty->gbphy_dev = gbphy_dev;
3377 + gb_connection_set_data(connection, gb_tty);
3378 +@@ -863,7 +876,7 @@ static int gb_uart_probe(struct gbphy_device *gbphy_dev,
3379 +
3380 + retval = gb_connection_enable_tx(connection);
3381 + if (retval)
3382 +- goto exit_release_minor;
3383 ++ goto exit_put_port;
3384 +
3385 + send_control(gb_tty, gb_tty->ctrlout);
3386 +
3387 +@@ -890,16 +903,10 @@ static int gb_uart_probe(struct gbphy_device *gbphy_dev,
3388 +
3389 + exit_connection_disable:
3390 + gb_connection_disable(connection);
3391 +-exit_release_minor:
3392 +- release_minor(gb_tty);
3393 +-exit_kfifo_free:
3394 +- kfifo_free(&gb_tty->write_fifo);
3395 +-exit_buf_free:
3396 +- kfree(gb_tty->buffer);
3397 ++exit_put_port:
3398 ++ tty_port_put(&gb_tty->port);
3399 + exit_connection_destroy:
3400 + gb_connection_destroy(connection);
3401 +-exit_tty_free:
3402 +- kfree(gb_tty);
3403 +
3404 + return retval;
3405 + }
3406 +@@ -930,15 +937,10 @@ static void gb_uart_remove(struct gbphy_device *gbphy_dev)
3407 + gb_connection_disable_rx(connection);
3408 + tty_unregister_device(gb_tty_driver, gb_tty->minor);
3409 +
3410 +- /* FIXME - free transmit / receive buffers */
3411 +-
3412 + gb_connection_disable(connection);
3413 +- tty_port_destroy(&gb_tty->port);
3414 + gb_connection_destroy(connection);
3415 +- release_minor(gb_tty);
3416 +- kfifo_free(&gb_tty->write_fifo);
3417 +- kfree(gb_tty->buffer);
3418 +- kfree(gb_tty);
3419 ++
3420 ++ tty_port_put(&gb_tty->port);
3421 + }
3422 +
3423 + static int gb_tty_init(void)
3424 +diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c
3425 +index 102ec644bc8a0..023bd4516a681 100644
3426 +--- a/drivers/target/target_core_configfs.c
3427 ++++ b/drivers/target/target_core_configfs.c
3428 +@@ -1110,20 +1110,24 @@ static ssize_t alua_support_store(struct config_item *item,
3429 + {
3430 + struct se_dev_attrib *da = to_attrib(item);
3431 + struct se_device *dev = da->da_dev;
3432 +- bool flag;
3433 ++ bool flag, oldflag;
3434 + int ret;
3435 +
3436 ++ ret = strtobool(page, &flag);
3437 ++ if (ret < 0)
3438 ++ return ret;
3439 ++
3440 ++ oldflag = !(dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_ALUA);
3441 ++ if (flag == oldflag)
3442 ++ return count;
3443 ++
3444 + if (!(dev->transport->transport_flags_changeable &
3445 + TRANSPORT_FLAG_PASSTHROUGH_ALUA)) {
3446 + pr_err("dev[%p]: Unable to change SE Device alua_support:"
3447 + " alua_support has fixed value\n", dev);
3448 +- return -EINVAL;
3449 ++ return -ENOSYS;
3450 + }
3451 +
3452 +- ret = strtobool(page, &flag);
3453 +- if (ret < 0)
3454 +- return ret;
3455 +-
3456 + if (flag)
3457 + dev->transport_flags &= ~TRANSPORT_FLAG_PASSTHROUGH_ALUA;
3458 + else
3459 +@@ -1145,20 +1149,24 @@ static ssize_t pgr_support_store(struct config_item *item,
3460 + {
3461 + struct se_dev_attrib *da = to_attrib(item);
3462 + struct se_device *dev = da->da_dev;
3463 +- bool flag;
3464 ++ bool flag, oldflag;
3465 + int ret;
3466 +
3467 ++ ret = strtobool(page, &flag);
3468 ++ if (ret < 0)
3469 ++ return ret;
3470 ++
3471 ++ oldflag = !(dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_PGR);
3472 ++ if (flag == oldflag)
3473 ++ return count;
3474 ++
3475 + if (!(dev->transport->transport_flags_changeable &
3476 + TRANSPORT_FLAG_PASSTHROUGH_PGR)) {
3477 + pr_err("dev[%p]: Unable to change SE Device pgr_support:"
3478 + " pgr_support has fixed value\n", dev);
3479 +- return -EINVAL;
3480 ++ return -ENOSYS;
3481 + }
3482 +
3483 +- ret = strtobool(page, &flag);
3484 +- if (ret < 0)
3485 +- return ret;
3486 +-
3487 + if (flag)
3488 + dev->transport_flags &= ~TRANSPORT_FLAG_PASSTHROUGH_PGR;
3489 + else
3490 +diff --git a/drivers/thermal/intel/int340x_thermal/processor_thermal_device.c b/drivers/thermal/intel/int340x_thermal/processor_thermal_device.c
3491 +index 0f0038af2ad48..fb64acfd5e07d 100644
3492 +--- a/drivers/thermal/intel/int340x_thermal/processor_thermal_device.c
3493 ++++ b/drivers/thermal/intel/int340x_thermal/processor_thermal_device.c
3494 +@@ -107,7 +107,7 @@ static int tcc_offset_update(unsigned int tcc)
3495 + return 0;
3496 + }
3497 +
3498 +-static unsigned int tcc_offset_save;
3499 ++static int tcc_offset_save = -1;
3500 +
3501 + static ssize_t tcc_offset_degree_celsius_store(struct device *dev,
3502 + struct device_attribute *attr, const char *buf,
3503 +@@ -352,7 +352,8 @@ int proc_thermal_resume(struct device *dev)
3504 + proc_dev = dev_get_drvdata(dev);
3505 + proc_thermal_read_ppcc(proc_dev);
3506 +
3507 +- tcc_offset_update(tcc_offset_save);
3508 ++ if (tcc_offset_save >= 0)
3509 ++ tcc_offset_update(tcc_offset_save);
3510 +
3511 + return 0;
3512 + }
3513 +diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c
3514 +index 97ef9b040b84a..51374f4e1ccaf 100644
3515 +--- a/drivers/thermal/thermal_core.c
3516 ++++ b/drivers/thermal/thermal_core.c
3517 +@@ -222,15 +222,14 @@ int thermal_build_list_of_policies(char *buf)
3518 + {
3519 + struct thermal_governor *pos;
3520 + ssize_t count = 0;
3521 +- ssize_t size = PAGE_SIZE;
3522 +
3523 + mutex_lock(&thermal_governor_lock);
3524 +
3525 + list_for_each_entry(pos, &thermal_governor_list, governor_list) {
3526 +- size = PAGE_SIZE - count;
3527 +- count += scnprintf(buf + count, size, "%s ", pos->name);
3528 ++ count += scnprintf(buf + count, PAGE_SIZE - count, "%s ",
3529 ++ pos->name);
3530 + }
3531 +- count += scnprintf(buf + count, size, "\n");
3532 ++ count += scnprintf(buf + count, PAGE_SIZE - count, "\n");
3533 +
3534 + mutex_unlock(&thermal_governor_lock);
3535 +
3536 +diff --git a/drivers/tty/serial/8250/8250_omap.c b/drivers/tty/serial/8250/8250_omap.c
3537 +index b6c731a267d26..7223e22c4b886 100644
3538 +--- a/drivers/tty/serial/8250/8250_omap.c
3539 ++++ b/drivers/tty/serial/8250/8250_omap.c
3540 +@@ -106,7 +106,7 @@
3541 + #define UART_OMAP_EFR2_TIMEOUT_BEHAVE BIT(6)
3542 +
3543 + /* RX FIFO occupancy indicator */
3544 +-#define UART_OMAP_RX_LVL 0x64
3545 ++#define UART_OMAP_RX_LVL 0x19
3546 +
3547 + struct omap8250_priv {
3548 + int line;
3549 +diff --git a/drivers/tty/serial/mvebu-uart.c b/drivers/tty/serial/mvebu-uart.c
3550 +index 231de29a64521..ab226da75f7ba 100644
3551 +--- a/drivers/tty/serial/mvebu-uart.c
3552 ++++ b/drivers/tty/serial/mvebu-uart.c
3553 +@@ -163,7 +163,7 @@ static unsigned int mvebu_uart_tx_empty(struct uart_port *port)
3554 + st = readl(port->membase + UART_STAT);
3555 + spin_unlock_irqrestore(&port->lock, flags);
3556 +
3557 +- return (st & STAT_TX_FIFO_EMP) ? TIOCSER_TEMT : 0;
3558 ++ return (st & STAT_TX_EMP) ? TIOCSER_TEMT : 0;
3559 + }
3560 +
3561 + static unsigned int mvebu_uart_get_mctrl(struct uart_port *port)
3562 +diff --git a/drivers/tty/synclink_gt.c b/drivers/tty/synclink_gt.c
3563 +index 5bb928b7873e7..2f5fbd7db7cac 100644
3564 +--- a/drivers/tty/synclink_gt.c
3565 ++++ b/drivers/tty/synclink_gt.c
3566 +@@ -438,8 +438,8 @@ static void reset_tbufs(struct slgt_info *info);
3567 + static void tdma_reset(struct slgt_info *info);
3568 + static bool tx_load(struct slgt_info *info, const char *buf, unsigned int count);
3569 +
3570 +-static void get_signals(struct slgt_info *info);
3571 +-static void set_signals(struct slgt_info *info);
3572 ++static void get_gtsignals(struct slgt_info *info);
3573 ++static void set_gtsignals(struct slgt_info *info);
3574 + static void set_rate(struct slgt_info *info, u32 data_rate);
3575 +
3576 + static void bh_transmit(struct slgt_info *info);
3577 +@@ -720,7 +720,7 @@ static void set_termios(struct tty_struct *tty, struct ktermios *old_termios)
3578 + if ((old_termios->c_cflag & CBAUD) && !C_BAUD(tty)) {
3579 + info->signals &= ~(SerialSignal_RTS | SerialSignal_DTR);
3580 + spin_lock_irqsave(&info->lock,flags);
3581 +- set_signals(info);
3582 ++ set_gtsignals(info);
3583 + spin_unlock_irqrestore(&info->lock,flags);
3584 + }
3585 +
3586 +@@ -730,7 +730,7 @@ static void set_termios(struct tty_struct *tty, struct ktermios *old_termios)
3587 + if (!C_CRTSCTS(tty) || !tty_throttled(tty))
3588 + info->signals |= SerialSignal_RTS;
3589 + spin_lock_irqsave(&info->lock,flags);
3590 +- set_signals(info);
3591 ++ set_gtsignals(info);
3592 + spin_unlock_irqrestore(&info->lock,flags);
3593 + }
3594 +
3595 +@@ -1181,7 +1181,7 @@ static inline void line_info(struct seq_file *m, struct slgt_info *info)
3596 +
3597 + /* output current serial signal states */
3598 + spin_lock_irqsave(&info->lock,flags);
3599 +- get_signals(info);
3600 ++ get_gtsignals(info);
3601 + spin_unlock_irqrestore(&info->lock,flags);
3602 +
3603 + stat_buf[0] = 0;
3604 +@@ -1281,7 +1281,7 @@ static void throttle(struct tty_struct * tty)
3605 + if (C_CRTSCTS(tty)) {
3606 + spin_lock_irqsave(&info->lock,flags);
3607 + info->signals &= ~SerialSignal_RTS;
3608 +- set_signals(info);
3609 ++ set_gtsignals(info);
3610 + spin_unlock_irqrestore(&info->lock,flags);
3611 + }
3612 + }
3613 +@@ -1306,7 +1306,7 @@ static void unthrottle(struct tty_struct * tty)
3614 + if (C_CRTSCTS(tty)) {
3615 + spin_lock_irqsave(&info->lock,flags);
3616 + info->signals |= SerialSignal_RTS;
3617 +- set_signals(info);
3618 ++ set_gtsignals(info);
3619 + spin_unlock_irqrestore(&info->lock,flags);
3620 + }
3621 + }
3622 +@@ -1477,7 +1477,7 @@ static int hdlcdev_open(struct net_device *dev)
3623 +
3624 + /* inform generic HDLC layer of current DCD status */
3625 + spin_lock_irqsave(&info->lock, flags);
3626 +- get_signals(info);
3627 ++ get_gtsignals(info);
3628 + spin_unlock_irqrestore(&info->lock, flags);
3629 + if (info->signals & SerialSignal_DCD)
3630 + netif_carrier_on(dev);
3631 +@@ -2232,7 +2232,7 @@ static void isr_txeom(struct slgt_info *info, unsigned short status)
3632 + if (info->params.mode != MGSL_MODE_ASYNC && info->drop_rts_on_tx_done) {
3633 + info->signals &= ~SerialSignal_RTS;
3634 + info->drop_rts_on_tx_done = false;
3635 +- set_signals(info);
3636 ++ set_gtsignals(info);
3637 + }
3638 +
3639 + #if SYNCLINK_GENERIC_HDLC
3640 +@@ -2397,7 +2397,7 @@ static void shutdown(struct slgt_info *info)
3641 +
3642 + if (!info->port.tty || info->port.tty->termios.c_cflag & HUPCL) {
3643 + info->signals &= ~(SerialSignal_RTS | SerialSignal_DTR);
3644 +- set_signals(info);
3645 ++ set_gtsignals(info);
3646 + }
3647 +
3648 + flush_cond_wait(&info->gpio_wait_q);
3649 +@@ -2425,7 +2425,7 @@ static void program_hw(struct slgt_info *info)
3650 + else
3651 + async_mode(info);
3652 +
3653 +- set_signals(info);
3654 ++ set_gtsignals(info);
3655 +
3656 + info->dcd_chkcount = 0;
3657 + info->cts_chkcount = 0;
3658 +@@ -2433,7 +2433,7 @@ static void program_hw(struct slgt_info *info)
3659 + info->dsr_chkcount = 0;
3660 +
3661 + slgt_irq_on(info, IRQ_DCD | IRQ_CTS | IRQ_DSR | IRQ_RI);
3662 +- get_signals(info);
3663 ++ get_gtsignals(info);
3664 +
3665 + if (info->netcount ||
3666 + (info->port.tty && info->port.tty->termios.c_cflag & CREAD))
3667 +@@ -2670,7 +2670,7 @@ static int wait_mgsl_event(struct slgt_info *info, int __user *mask_ptr)
3668 + spin_lock_irqsave(&info->lock,flags);
3669 +
3670 + /* return immediately if state matches requested events */
3671 +- get_signals(info);
3672 ++ get_gtsignals(info);
3673 + s = info->signals;
3674 +
3675 + events = mask &
3676 +@@ -3088,7 +3088,7 @@ static int tiocmget(struct tty_struct *tty)
3677 + unsigned long flags;
3678 +
3679 + spin_lock_irqsave(&info->lock,flags);
3680 +- get_signals(info);
3681 ++ get_gtsignals(info);
3682 + spin_unlock_irqrestore(&info->lock,flags);
3683 +
3684 + result = ((info->signals & SerialSignal_RTS) ? TIOCM_RTS:0) +
3685 +@@ -3127,7 +3127,7 @@ static int tiocmset(struct tty_struct *tty,
3686 + info->signals &= ~SerialSignal_DTR;
3687 +
3688 + spin_lock_irqsave(&info->lock,flags);
3689 +- set_signals(info);
3690 ++ set_gtsignals(info);
3691 + spin_unlock_irqrestore(&info->lock,flags);
3692 + return 0;
3693 + }
3694 +@@ -3138,7 +3138,7 @@ static int carrier_raised(struct tty_port *port)
3695 + struct slgt_info *info = container_of(port, struct slgt_info, port);
3696 +
3697 + spin_lock_irqsave(&info->lock,flags);
3698 +- get_signals(info);
3699 ++ get_gtsignals(info);
3700 + spin_unlock_irqrestore(&info->lock,flags);
3701 + return (info->signals & SerialSignal_DCD) ? 1 : 0;
3702 + }
3703 +@@ -3153,7 +3153,7 @@ static void dtr_rts(struct tty_port *port, int on)
3704 + info->signals |= SerialSignal_RTS | SerialSignal_DTR;
3705 + else
3706 + info->signals &= ~(SerialSignal_RTS | SerialSignal_DTR);
3707 +- set_signals(info);
3708 ++ set_gtsignals(info);
3709 + spin_unlock_irqrestore(&info->lock,flags);
3710 + }
3711 +
3712 +@@ -3951,10 +3951,10 @@ static void tx_start(struct slgt_info *info)
3713 +
3714 + if (info->params.mode != MGSL_MODE_ASYNC) {
3715 + if (info->params.flags & HDLC_FLAG_AUTO_RTS) {
3716 +- get_signals(info);
3717 ++ get_gtsignals(info);
3718 + if (!(info->signals & SerialSignal_RTS)) {
3719 + info->signals |= SerialSignal_RTS;
3720 +- set_signals(info);
3721 ++ set_gtsignals(info);
3722 + info->drop_rts_on_tx_done = true;
3723 + }
3724 + }
3725 +@@ -4008,7 +4008,7 @@ static void reset_port(struct slgt_info *info)
3726 + rx_stop(info);
3727 +
3728 + info->signals &= ~(SerialSignal_RTS | SerialSignal_DTR);
3729 +- set_signals(info);
3730 ++ set_gtsignals(info);
3731 +
3732 + slgt_irq_off(info, IRQ_ALL | IRQ_MASTER);
3733 + }
3734 +@@ -4430,7 +4430,7 @@ static void tx_set_idle(struct slgt_info *info)
3735 + /*
3736 + * get state of V24 status (input) signals
3737 + */
3738 +-static void get_signals(struct slgt_info *info)
3739 ++static void get_gtsignals(struct slgt_info *info)
3740 + {
3741 + unsigned short status = rd_reg16(info, SSR);
3742 +
3743 +@@ -4492,7 +4492,7 @@ static void msc_set_vcr(struct slgt_info *info)
3744 + /*
3745 + * set state of V24 control (output) signals
3746 + */
3747 +-static void set_signals(struct slgt_info *info)
3748 ++static void set_gtsignals(struct slgt_info *info)
3749 + {
3750 + unsigned char val = rd_reg8(info, VCR);
3751 + if (info->signals & SerialSignal_DTR)
3752 +diff --git a/drivers/usb/cdns3/cdns3-gadget.c b/drivers/usb/cdns3/cdns3-gadget.c
3753 +index 5d8c982019afc..1f3b4a1422126 100644
3754 +--- a/drivers/usb/cdns3/cdns3-gadget.c
3755 ++++ b/drivers/usb/cdns3/cdns3-gadget.c
3756 +@@ -1100,6 +1100,19 @@ static int cdns3_ep_run_stream_transfer(struct cdns3_endpoint *priv_ep,
3757 + return 0;
3758 + }
3759 +
3760 ++static void cdns3_rearm_drdy_if_needed(struct cdns3_endpoint *priv_ep)
3761 ++{
3762 ++ struct cdns3_device *priv_dev = priv_ep->cdns3_dev;
3763 ++
3764 ++ if (priv_dev->dev_ver < DEV_VER_V3)
3765 ++ return;
3766 ++
3767 ++ if (readl(&priv_dev->regs->ep_sts) & EP_STS_TRBERR) {
3768 ++ writel(EP_STS_TRBERR, &priv_dev->regs->ep_sts);
3769 ++ writel(EP_CMD_DRDY, &priv_dev->regs->ep_cmd);
3770 ++ }
3771 ++}
3772 ++
3773 + /**
3774 + * cdns3_ep_run_transfer - start transfer on no-default endpoint hardware
3775 + * @priv_ep: endpoint object
3776 +@@ -1351,6 +1364,7 @@ static int cdns3_ep_run_transfer(struct cdns3_endpoint *priv_ep,
3777 + /*clearing TRBERR and EP_STS_DESCMIS before seting DRDY*/
3778 + writel(EP_STS_TRBERR | EP_STS_DESCMIS, &priv_dev->regs->ep_sts);
3779 + writel(EP_CMD_DRDY, &priv_dev->regs->ep_cmd);
3780 ++ cdns3_rearm_drdy_if_needed(priv_ep);
3781 + trace_cdns3_doorbell_epx(priv_ep->name,
3782 + readl(&priv_dev->regs->ep_traddr));
3783 + }
3784 +diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
3785 +index 4895325b16a46..5b90d0979c607 100644
3786 +--- a/drivers/usb/class/cdc-acm.c
3787 ++++ b/drivers/usb/class/cdc-acm.c
3788 +@@ -726,7 +726,8 @@ static void acm_port_destruct(struct tty_port *port)
3789 + {
3790 + struct acm *acm = container_of(port, struct acm, port);
3791 +
3792 +- acm_release_minor(acm);
3793 ++ if (acm->minor != ACM_MINOR_INVALID)
3794 ++ acm_release_minor(acm);
3795 + usb_put_intf(acm->control);
3796 + kfree(acm->country_codes);
3797 + kfree(acm);
3798 +@@ -1323,8 +1324,10 @@ made_compressed_probe:
3799 + usb_get_intf(acm->control); /* undone in destruct() */
3800 +
3801 + minor = acm_alloc_minor(acm);
3802 +- if (minor < 0)
3803 ++ if (minor < 0) {
3804 ++ acm->minor = ACM_MINOR_INVALID;
3805 + goto err_put_port;
3806 ++ }
3807 +
3808 + acm->minor = minor;
3809 + acm->dev = usb_dev;
3810 +diff --git a/drivers/usb/class/cdc-acm.h b/drivers/usb/class/cdc-acm.h
3811 +index 8aef5eb769a0d..3aa7f0a3ad71e 100644
3812 +--- a/drivers/usb/class/cdc-acm.h
3813 ++++ b/drivers/usb/class/cdc-acm.h
3814 +@@ -22,6 +22,8 @@
3815 + #define ACM_TTY_MAJOR 166
3816 + #define ACM_TTY_MINORS 256
3817 +
3818 ++#define ACM_MINOR_INVALID ACM_TTY_MINORS
3819 ++
3820 + /*
3821 + * Requests.
3822 + */
3823 +diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
3824 +index 0f8b7c93310ea..99ff2d23be05e 100644
3825 +--- a/drivers/usb/core/hcd.c
3826 ++++ b/drivers/usb/core/hcd.c
3827 +@@ -2775,6 +2775,7 @@ int usb_add_hcd(struct usb_hcd *hcd,
3828 + {
3829 + int retval;
3830 + struct usb_device *rhdev;
3831 ++ struct usb_hcd *shared_hcd;
3832 +
3833 + if (!hcd->skip_phy_initialization && usb_hcd_is_primary_hcd(hcd)) {
3834 + hcd->phy_roothub = usb_phy_roothub_alloc(hcd->self.sysdev);
3835 +@@ -2935,13 +2936,26 @@ int usb_add_hcd(struct usb_hcd *hcd,
3836 + goto err_hcd_driver_start;
3837 + }
3838 +
3839 ++ /* starting here, usbcore will pay attention to the shared HCD roothub */
3840 ++ shared_hcd = hcd->shared_hcd;
3841 ++ if (!usb_hcd_is_primary_hcd(hcd) && shared_hcd && HCD_DEFER_RH_REGISTER(shared_hcd)) {
3842 ++ retval = register_root_hub(shared_hcd);
3843 ++ if (retval != 0)
3844 ++ goto err_register_root_hub;
3845 ++
3846 ++ if (shared_hcd->uses_new_polling && HCD_POLL_RH(shared_hcd))
3847 ++ usb_hcd_poll_rh_status(shared_hcd);
3848 ++ }
3849 ++
3850 + /* starting here, usbcore will pay attention to this root hub */
3851 +- retval = register_root_hub(hcd);
3852 +- if (retval != 0)
3853 +- goto err_register_root_hub;
3854 ++ if (!HCD_DEFER_RH_REGISTER(hcd)) {
3855 ++ retval = register_root_hub(hcd);
3856 ++ if (retval != 0)
3857 ++ goto err_register_root_hub;
3858 +
3859 +- if (hcd->uses_new_polling && HCD_POLL_RH(hcd))
3860 +- usb_hcd_poll_rh_status(hcd);
3861 ++ if (hcd->uses_new_polling && HCD_POLL_RH(hcd))
3862 ++ usb_hcd_poll_rh_status(hcd);
3863 ++ }
3864 +
3865 + return retval;
3866 +
3867 +@@ -2985,6 +2999,7 @@ EXPORT_SYMBOL_GPL(usb_add_hcd);
3868 + void usb_remove_hcd(struct usb_hcd *hcd)
3869 + {
3870 + struct usb_device *rhdev = hcd->self.root_hub;
3871 ++ bool rh_registered;
3872 +
3873 + dev_info(hcd->self.controller, "remove, state %x\n", hcd->state);
3874 +
3875 +@@ -2995,6 +3010,7 @@ void usb_remove_hcd(struct usb_hcd *hcd)
3876 +
3877 + dev_dbg(hcd->self.controller, "roothub graceful disconnect\n");
3878 + spin_lock_irq (&hcd_root_hub_lock);
3879 ++ rh_registered = hcd->rh_registered;
3880 + hcd->rh_registered = 0;
3881 + spin_unlock_irq (&hcd_root_hub_lock);
3882 +
3883 +@@ -3004,7 +3020,8 @@ void usb_remove_hcd(struct usb_hcd *hcd)
3884 + cancel_work_sync(&hcd->died_work);
3885 +
3886 + mutex_lock(&usb_bus_idr_lock);
3887 +- usb_disconnect(&rhdev); /* Sets rhdev to NULL */
3888 ++ if (rh_registered)
3889 ++ usb_disconnect(&rhdev); /* Sets rhdev to NULL */
3890 + mutex_unlock(&usb_bus_idr_lock);
3891 +
3892 + /*
3893 +diff --git a/drivers/usb/dwc2/gadget.c b/drivers/usb/dwc2/gadget.c
3894 +index 3146df6e6510d..8f7ee70f5bdcf 100644
3895 +--- a/drivers/usb/dwc2/gadget.c
3896 ++++ b/drivers/usb/dwc2/gadget.c
3897 +@@ -115,10 +115,16 @@ static inline bool using_desc_dma(struct dwc2_hsotg *hsotg)
3898 + */
3899 + static inline void dwc2_gadget_incr_frame_num(struct dwc2_hsotg_ep *hs_ep)
3900 + {
3901 ++ struct dwc2_hsotg *hsotg = hs_ep->parent;
3902 ++ u16 limit = DSTS_SOFFN_LIMIT;
3903 ++
3904 ++ if (hsotg->gadget.speed != USB_SPEED_HIGH)
3905 ++ limit >>= 3;
3906 ++
3907 + hs_ep->target_frame += hs_ep->interval;
3908 +- if (hs_ep->target_frame > DSTS_SOFFN_LIMIT) {
3909 ++ if (hs_ep->target_frame > limit) {
3910 + hs_ep->frame_overrun = true;
3911 +- hs_ep->target_frame &= DSTS_SOFFN_LIMIT;
3912 ++ hs_ep->target_frame &= limit;
3913 + } else {
3914 + hs_ep->frame_overrun = false;
3915 + }
3916 +@@ -136,10 +142,16 @@ static inline void dwc2_gadget_incr_frame_num(struct dwc2_hsotg_ep *hs_ep)
3917 + */
3918 + static inline void dwc2_gadget_dec_frame_num_by_one(struct dwc2_hsotg_ep *hs_ep)
3919 + {
3920 ++ struct dwc2_hsotg *hsotg = hs_ep->parent;
3921 ++ u16 limit = DSTS_SOFFN_LIMIT;
3922 ++
3923 ++ if (hsotg->gadget.speed != USB_SPEED_HIGH)
3924 ++ limit >>= 3;
3925 ++
3926 + if (hs_ep->target_frame)
3927 + hs_ep->target_frame -= 1;
3928 + else
3929 +- hs_ep->target_frame = DSTS_SOFFN_LIMIT;
3930 ++ hs_ep->target_frame = limit;
3931 + }
3932 +
3933 + /**
3934 +@@ -1018,6 +1030,12 @@ static void dwc2_gadget_start_isoc_ddma(struct dwc2_hsotg_ep *hs_ep)
3935 + dwc2_writel(hsotg, ctrl, depctl);
3936 + }
3937 +
3938 ++static bool dwc2_gadget_target_frame_elapsed(struct dwc2_hsotg_ep *hs_ep);
3939 ++static void dwc2_hsotg_complete_request(struct dwc2_hsotg *hsotg,
3940 ++ struct dwc2_hsotg_ep *hs_ep,
3941 ++ struct dwc2_hsotg_req *hs_req,
3942 ++ int result);
3943 ++
3944 + /**
3945 + * dwc2_hsotg_start_req - start a USB request from an endpoint's queue
3946 + * @hsotg: The controller state.
3947 +@@ -1170,14 +1188,19 @@ static void dwc2_hsotg_start_req(struct dwc2_hsotg *hsotg,
3948 + }
3949 + }
3950 +
3951 +- if (hs_ep->isochronous && hs_ep->interval == 1) {
3952 +- hs_ep->target_frame = dwc2_hsotg_read_frameno(hsotg);
3953 +- dwc2_gadget_incr_frame_num(hs_ep);
3954 +-
3955 +- if (hs_ep->target_frame & 0x1)
3956 +- ctrl |= DXEPCTL_SETODDFR;
3957 +- else
3958 +- ctrl |= DXEPCTL_SETEVENFR;
3959 ++ if (hs_ep->isochronous) {
3960 ++ if (!dwc2_gadget_target_frame_elapsed(hs_ep)) {
3961 ++ if (hs_ep->interval == 1) {
3962 ++ if (hs_ep->target_frame & 0x1)
3963 ++ ctrl |= DXEPCTL_SETODDFR;
3964 ++ else
3965 ++ ctrl |= DXEPCTL_SETEVENFR;
3966 ++ }
3967 ++ ctrl |= DXEPCTL_CNAK;
3968 ++ } else {
3969 ++ dwc2_hsotg_complete_request(hsotg, hs_ep, hs_req, -ENODATA);
3970 ++ return;
3971 ++ }
3972 + }
3973 +
3974 + ctrl |= DXEPCTL_EPENA; /* ensure ep enabled */
3975 +@@ -1325,12 +1348,16 @@ static bool dwc2_gadget_target_frame_elapsed(struct dwc2_hsotg_ep *hs_ep)
3976 + u32 target_frame = hs_ep->target_frame;
3977 + u32 current_frame = hsotg->frame_number;
3978 + bool frame_overrun = hs_ep->frame_overrun;
3979 ++ u16 limit = DSTS_SOFFN_LIMIT;
3980 ++
3981 ++ if (hsotg->gadget.speed != USB_SPEED_HIGH)
3982 ++ limit >>= 3;
3983 +
3984 + if (!frame_overrun && current_frame >= target_frame)
3985 + return true;
3986 +
3987 + if (frame_overrun && current_frame >= target_frame &&
3988 +- ((current_frame - target_frame) < DSTS_SOFFN_LIMIT / 2))
3989 ++ ((current_frame - target_frame) < limit / 2))
3990 + return true;
3991 +
3992 + return false;
3993 +@@ -1713,11 +1740,9 @@ static struct dwc2_hsotg_req *get_ep_head(struct dwc2_hsotg_ep *hs_ep)
3994 + */
3995 + static void dwc2_gadget_start_next_request(struct dwc2_hsotg_ep *hs_ep)
3996 + {
3997 +- u32 mask;
3998 + struct dwc2_hsotg *hsotg = hs_ep->parent;
3999 + int dir_in = hs_ep->dir_in;
4000 + struct dwc2_hsotg_req *hs_req;
4001 +- u32 epmsk_reg = dir_in ? DIEPMSK : DOEPMSK;
4002 +
4003 + if (!list_empty(&hs_ep->queue)) {
4004 + hs_req = get_ep_head(hs_ep);
4005 +@@ -1733,9 +1758,6 @@ static void dwc2_gadget_start_next_request(struct dwc2_hsotg_ep *hs_ep)
4006 + } else {
4007 + dev_dbg(hsotg->dev, "%s: No more ISOC-OUT requests\n",
4008 + __func__);
4009 +- mask = dwc2_readl(hsotg, epmsk_reg);
4010 +- mask |= DOEPMSK_OUTTKNEPDISMSK;
4011 +- dwc2_writel(hsotg, mask, epmsk_reg);
4012 + }
4013 + }
4014 +
4015 +@@ -2305,19 +2327,6 @@ static void dwc2_hsotg_ep0_zlp(struct dwc2_hsotg *hsotg, bool dir_in)
4016 + dwc2_hsotg_program_zlp(hsotg, hsotg->eps_out[0]);
4017 + }
4018 +
4019 +-static void dwc2_hsotg_change_ep_iso_parity(struct dwc2_hsotg *hsotg,
4020 +- u32 epctl_reg)
4021 +-{
4022 +- u32 ctrl;
4023 +-
4024 +- ctrl = dwc2_readl(hsotg, epctl_reg);
4025 +- if (ctrl & DXEPCTL_EOFRNUM)
4026 +- ctrl |= DXEPCTL_SETEVENFR;
4027 +- else
4028 +- ctrl |= DXEPCTL_SETODDFR;
4029 +- dwc2_writel(hsotg, ctrl, epctl_reg);
4030 +-}
4031 +-
4032 + /*
4033 + * dwc2_gadget_get_xfersize_ddma - get transferred bytes amount from desc
4034 + * @hs_ep - The endpoint on which transfer went
4035 +@@ -2438,20 +2447,11 @@ static void dwc2_hsotg_handle_outdone(struct dwc2_hsotg *hsotg, int epnum)
4036 + dwc2_hsotg_ep0_zlp(hsotg, true);
4037 + }
4038 +
4039 +- /*
4040 +- * Slave mode OUT transfers do not go through XferComplete so
4041 +- * adjust the ISOC parity here.
4042 +- */
4043 +- if (!using_dma(hsotg)) {
4044 +- if (hs_ep->isochronous && hs_ep->interval == 1)
4045 +- dwc2_hsotg_change_ep_iso_parity(hsotg, DOEPCTL(epnum));
4046 +- else if (hs_ep->isochronous && hs_ep->interval > 1)
4047 +- dwc2_gadget_incr_frame_num(hs_ep);
4048 +- }
4049 +-
4050 + /* Set actual frame number for completed transfers */
4051 +- if (!using_desc_dma(hsotg) && hs_ep->isochronous)
4052 +- req->frame_number = hsotg->frame_number;
4053 ++ if (!using_desc_dma(hsotg) && hs_ep->isochronous) {
4054 ++ req->frame_number = hs_ep->target_frame;
4055 ++ dwc2_gadget_incr_frame_num(hs_ep);
4056 ++ }
4057 +
4058 + dwc2_hsotg_complete_request(hsotg, hs_ep, hs_req, result);
4059 + }
4060 +@@ -2765,6 +2765,12 @@ static void dwc2_hsotg_complete_in(struct dwc2_hsotg *hsotg,
4061 + return;
4062 + }
4063 +
4064 ++ /* Set actual frame number for completed transfers */
4065 ++ if (!using_desc_dma(hsotg) && hs_ep->isochronous) {
4066 ++ hs_req->req.frame_number = hs_ep->target_frame;
4067 ++ dwc2_gadget_incr_frame_num(hs_ep);
4068 ++ }
4069 ++
4070 + dwc2_hsotg_complete_request(hsotg, hs_ep, hs_req, 0);
4071 + }
4072 +
4073 +@@ -2825,23 +2831,18 @@ static void dwc2_gadget_handle_ep_disabled(struct dwc2_hsotg_ep *hs_ep)
4074 +
4075 + dwc2_hsotg_txfifo_flush(hsotg, hs_ep->fifo_index);
4076 +
4077 +- if (hs_ep->isochronous) {
4078 +- dwc2_hsotg_complete_in(hsotg, hs_ep);
4079 +- return;
4080 +- }
4081 +-
4082 + if ((epctl & DXEPCTL_STALL) && (epctl & DXEPCTL_EPTYPE_BULK)) {
4083 + int dctl = dwc2_readl(hsotg, DCTL);
4084 +
4085 + dctl |= DCTL_CGNPINNAK;
4086 + dwc2_writel(hsotg, dctl, DCTL);
4087 + }
4088 +- return;
4089 +- }
4090 ++ } else {
4091 +
4092 +- if (dctl & DCTL_GOUTNAKSTS) {
4093 +- dctl |= DCTL_CGOUTNAK;
4094 +- dwc2_writel(hsotg, dctl, DCTL);
4095 ++ if (dctl & DCTL_GOUTNAKSTS) {
4096 ++ dctl |= DCTL_CGOUTNAK;
4097 ++ dwc2_writel(hsotg, dctl, DCTL);
4098 ++ }
4099 + }
4100 +
4101 + if (!hs_ep->isochronous)
4102 +@@ -2862,8 +2863,6 @@ static void dwc2_gadget_handle_ep_disabled(struct dwc2_hsotg_ep *hs_ep)
4103 + /* Update current frame number value. */
4104 + hsotg->frame_number = dwc2_hsotg_read_frameno(hsotg);
4105 + } while (dwc2_gadget_target_frame_elapsed(hs_ep));
4106 +-
4107 +- dwc2_gadget_start_next_request(hs_ep);
4108 + }
4109 +
4110 + /**
4111 +@@ -2880,8 +2879,8 @@ static void dwc2_gadget_handle_ep_disabled(struct dwc2_hsotg_ep *hs_ep)
4112 + static void dwc2_gadget_handle_out_token_ep_disabled(struct dwc2_hsotg_ep *ep)
4113 + {
4114 + struct dwc2_hsotg *hsotg = ep->parent;
4115 ++ struct dwc2_hsotg_req *hs_req;
4116 + int dir_in = ep->dir_in;
4117 +- u32 doepmsk;
4118 +
4119 + if (dir_in || !ep->isochronous)
4120 + return;
4121 +@@ -2895,28 +2894,39 @@ static void dwc2_gadget_handle_out_token_ep_disabled(struct dwc2_hsotg_ep *ep)
4122 + return;
4123 + }
4124 +
4125 +- if (ep->interval > 1 &&
4126 +- ep->target_frame == TARGET_FRAME_INITIAL) {
4127 ++ if (ep->target_frame == TARGET_FRAME_INITIAL) {
4128 + u32 ctrl;
4129 +
4130 + ep->target_frame = hsotg->frame_number;
4131 +- dwc2_gadget_incr_frame_num(ep);
4132 ++ if (ep->interval > 1) {
4133 ++ ctrl = dwc2_readl(hsotg, DOEPCTL(ep->index));
4134 ++ if (ep->target_frame & 0x1)
4135 ++ ctrl |= DXEPCTL_SETODDFR;
4136 ++ else
4137 ++ ctrl |= DXEPCTL_SETEVENFR;
4138 +
4139 +- ctrl = dwc2_readl(hsotg, DOEPCTL(ep->index));
4140 +- if (ep->target_frame & 0x1)
4141 +- ctrl |= DXEPCTL_SETODDFR;
4142 +- else
4143 +- ctrl |= DXEPCTL_SETEVENFR;
4144 ++ dwc2_writel(hsotg, ctrl, DOEPCTL(ep->index));
4145 ++ }
4146 ++ }
4147 ++
4148 ++ while (dwc2_gadget_target_frame_elapsed(ep)) {
4149 ++ hs_req = get_ep_head(ep);
4150 ++ if (hs_req)
4151 ++ dwc2_hsotg_complete_request(hsotg, ep, hs_req, -ENODATA);
4152 +
4153 +- dwc2_writel(hsotg, ctrl, DOEPCTL(ep->index));
4154 ++ dwc2_gadget_incr_frame_num(ep);
4155 ++ /* Update current frame number value. */
4156 ++ hsotg->frame_number = dwc2_hsotg_read_frameno(hsotg);
4157 + }
4158 +
4159 +- dwc2_gadget_start_next_request(ep);
4160 +- doepmsk = dwc2_readl(hsotg, DOEPMSK);
4161 +- doepmsk &= ~DOEPMSK_OUTTKNEPDISMSK;
4162 +- dwc2_writel(hsotg, doepmsk, DOEPMSK);
4163 ++ if (!ep->req)
4164 ++ dwc2_gadget_start_next_request(ep);
4165 ++
4166 + }
4167 +
4168 ++static void dwc2_hsotg_ep_stop_xfr(struct dwc2_hsotg *hsotg,
4169 ++ struct dwc2_hsotg_ep *hs_ep);
4170 ++
4171 + /**
4172 + * dwc2_gadget_handle_nak - handle NAK interrupt
4173 + * @hs_ep: The endpoint on which interrupt is asserted.
4174 +@@ -2934,7 +2944,9 @@ static void dwc2_gadget_handle_out_token_ep_disabled(struct dwc2_hsotg_ep *ep)
4175 + static void dwc2_gadget_handle_nak(struct dwc2_hsotg_ep *hs_ep)
4176 + {
4177 + struct dwc2_hsotg *hsotg = hs_ep->parent;
4178 ++ struct dwc2_hsotg_req *hs_req;
4179 + int dir_in = hs_ep->dir_in;
4180 ++ u32 ctrl;
4181 +
4182 + if (!dir_in || !hs_ep->isochronous)
4183 + return;
4184 +@@ -2976,13 +2988,29 @@ static void dwc2_gadget_handle_nak(struct dwc2_hsotg_ep *hs_ep)
4185 +
4186 + dwc2_writel(hsotg, ctrl, DIEPCTL(hs_ep->index));
4187 + }
4188 +-
4189 +- dwc2_hsotg_complete_request(hsotg, hs_ep,
4190 +- get_ep_head(hs_ep), 0);
4191 + }
4192 +
4193 +- if (!using_desc_dma(hsotg))
4194 ++ if (using_desc_dma(hsotg))
4195 ++ return;
4196 ++
4197 ++ ctrl = dwc2_readl(hsotg, DIEPCTL(hs_ep->index));
4198 ++ if (ctrl & DXEPCTL_EPENA)
4199 ++ dwc2_hsotg_ep_stop_xfr(hsotg, hs_ep);
4200 ++ else
4201 ++ dwc2_hsotg_txfifo_flush(hsotg, hs_ep->fifo_index);
4202 ++
4203 ++ while (dwc2_gadget_target_frame_elapsed(hs_ep)) {
4204 ++ hs_req = get_ep_head(hs_ep);
4205 ++ if (hs_req)
4206 ++ dwc2_hsotg_complete_request(hsotg, hs_ep, hs_req, -ENODATA);
4207 ++
4208 + dwc2_gadget_incr_frame_num(hs_ep);
4209 ++ /* Update current frame number value. */
4210 ++ hsotg->frame_number = dwc2_hsotg_read_frameno(hsotg);
4211 ++ }
4212 ++
4213 ++ if (!hs_ep->req)
4214 ++ dwc2_gadget_start_next_request(hs_ep);
4215 + }
4216 +
4217 + /**
4218 +@@ -3038,21 +3066,15 @@ static void dwc2_hsotg_epint(struct dwc2_hsotg *hsotg, unsigned int idx,
4219 +
4220 + /* In DDMA handle isochronous requests separately */
4221 + if (using_desc_dma(hsotg) && hs_ep->isochronous) {
4222 +- /* XferCompl set along with BNA */
4223 +- if (!(ints & DXEPINT_BNAINTR))
4224 +- dwc2_gadget_complete_isoc_request_ddma(hs_ep);
4225 ++ dwc2_gadget_complete_isoc_request_ddma(hs_ep);
4226 + } else if (dir_in) {
4227 + /*
4228 + * We get OutDone from the FIFO, so we only
4229 + * need to look at completing IN requests here
4230 + * if operating slave mode
4231 + */
4232 +- if (hs_ep->isochronous && hs_ep->interval > 1)
4233 +- dwc2_gadget_incr_frame_num(hs_ep);
4234 +-
4235 +- dwc2_hsotg_complete_in(hsotg, hs_ep);
4236 +- if (ints & DXEPINT_NAKINTRPT)
4237 +- ints &= ~DXEPINT_NAKINTRPT;
4238 ++ if (!hs_ep->isochronous || !(ints & DXEPINT_NAKINTRPT))
4239 ++ dwc2_hsotg_complete_in(hsotg, hs_ep);
4240 +
4241 + if (idx == 0 && !hs_ep->req)
4242 + dwc2_hsotg_enqueue_setup(hsotg);
4243 +@@ -3061,10 +3083,8 @@ static void dwc2_hsotg_epint(struct dwc2_hsotg *hsotg, unsigned int idx,
4244 + * We're using DMA, we need to fire an OutDone here
4245 + * as we ignore the RXFIFO.
4246 + */
4247 +- if (hs_ep->isochronous && hs_ep->interval > 1)
4248 +- dwc2_gadget_incr_frame_num(hs_ep);
4249 +-
4250 +- dwc2_hsotg_handle_outdone(hsotg, idx);
4251 ++ if (!hs_ep->isochronous || !(ints & DXEPINT_OUTTKNEPDIS))
4252 ++ dwc2_hsotg_handle_outdone(hsotg, idx);
4253 + }
4254 + }
4255 +
4256 +@@ -4083,6 +4103,7 @@ static int dwc2_hsotg_ep_enable(struct usb_ep *ep,
4257 + mask |= DIEPMSK_NAKMSK;
4258 + dwc2_writel(hsotg, mask, DIEPMSK);
4259 + } else {
4260 ++ epctrl |= DXEPCTL_SNAK;
4261 + mask = dwc2_readl(hsotg, DOEPMSK);
4262 + mask |= DOEPMSK_OUTTKNEPDISMSK;
4263 + dwc2_writel(hsotg, mask, DOEPMSK);
4264 +diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
4265 +index ba74ad7f6995e..2522d15c42447 100644
4266 +--- a/drivers/usb/dwc3/core.c
4267 ++++ b/drivers/usb/dwc3/core.c
4268 +@@ -264,19 +264,6 @@ static int dwc3_core_soft_reset(struct dwc3 *dwc)
4269 + {
4270 + u32 reg;
4271 + int retries = 1000;
4272 +- int ret;
4273 +-
4274 +- usb_phy_init(dwc->usb2_phy);
4275 +- usb_phy_init(dwc->usb3_phy);
4276 +- ret = phy_init(dwc->usb2_generic_phy);
4277 +- if (ret < 0)
4278 +- return ret;
4279 +-
4280 +- ret = phy_init(dwc->usb3_generic_phy);
4281 +- if (ret < 0) {
4282 +- phy_exit(dwc->usb2_generic_phy);
4283 +- return ret;
4284 +- }
4285 +
4286 + /*
4287 + * We're resetting only the device side because, if we're in host mode,
4288 +@@ -310,9 +297,6 @@ static int dwc3_core_soft_reset(struct dwc3 *dwc)
4289 + udelay(1);
4290 + } while (--retries);
4291 +
4292 +- phy_exit(dwc->usb3_generic_phy);
4293 +- phy_exit(dwc->usb2_generic_phy);
4294 +-
4295 + return -ETIMEDOUT;
4296 +
4297 + done:
4298 +@@ -982,9 +966,21 @@ static int dwc3_core_init(struct dwc3 *dwc)
4299 + dwc->phys_ready = true;
4300 + }
4301 +
4302 ++ usb_phy_init(dwc->usb2_phy);
4303 ++ usb_phy_init(dwc->usb3_phy);
4304 ++ ret = phy_init(dwc->usb2_generic_phy);
4305 ++ if (ret < 0)
4306 ++ goto err0a;
4307 ++
4308 ++ ret = phy_init(dwc->usb3_generic_phy);
4309 ++ if (ret < 0) {
4310 ++ phy_exit(dwc->usb2_generic_phy);
4311 ++ goto err0a;
4312 ++ }
4313 ++
4314 + ret = dwc3_core_soft_reset(dwc);
4315 + if (ret)
4316 +- goto err0a;
4317 ++ goto err1;
4318 +
4319 + if (hw_mode == DWC3_GHWPARAMS0_MODE_DRD &&
4320 + !DWC3_VER_IS_WITHIN(DWC3, ANY, 194A)) {
4321 +diff --git a/drivers/usb/gadget/function/f_uac2.c b/drivers/usb/gadget/function/f_uac2.c
4322 +index ae29ff2b2b686..37c94031af1ed 100644
4323 +--- a/drivers/usb/gadget/function/f_uac2.c
4324 ++++ b/drivers/usb/gadget/function/f_uac2.c
4325 +@@ -348,6 +348,14 @@ static struct usb_endpoint_descriptor ss_epin_fback_desc = {
4326 + .bInterval = 4,
4327 + };
4328 +
4329 ++static struct usb_ss_ep_comp_descriptor ss_epin_fback_desc_comp = {
4330 ++ .bLength = sizeof(ss_epin_fback_desc_comp),
4331 ++ .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
4332 ++ .bMaxBurst = 0,
4333 ++ .bmAttributes = 0,
4334 ++ .wBytesPerInterval = cpu_to_le16(4),
4335 ++};
4336 ++
4337 +
4338 + /* Audio Streaming IN Interface - Alt0 */
4339 + static struct usb_interface_descriptor std_as_in_if0_desc = {
4340 +@@ -527,6 +535,7 @@ static struct usb_descriptor_header *ss_audio_desc[] = {
4341 + (struct usb_descriptor_header *)&ss_epout_desc_comp,
4342 + (struct usb_descriptor_header *)&as_iso_out_desc,
4343 + (struct usb_descriptor_header *)&ss_epin_fback_desc,
4344 ++ (struct usb_descriptor_header *)&ss_epin_fback_desc_comp,
4345 +
4346 + (struct usb_descriptor_header *)&std_as_in_if0_desc,
4347 + (struct usb_descriptor_header *)&std_as_in_if1_desc,
4348 +@@ -604,6 +613,7 @@ static void setup_headers(struct f_uac2_opts *opts,
4349 + {
4350 + struct usb_ss_ep_comp_descriptor *epout_desc_comp = NULL;
4351 + struct usb_ss_ep_comp_descriptor *epin_desc_comp = NULL;
4352 ++ struct usb_ss_ep_comp_descriptor *epin_fback_desc_comp = NULL;
4353 + struct usb_endpoint_descriptor *epout_desc;
4354 + struct usb_endpoint_descriptor *epin_desc;
4355 + struct usb_endpoint_descriptor *epin_fback_desc;
4356 +@@ -626,6 +636,7 @@ static void setup_headers(struct f_uac2_opts *opts,
4357 + epout_desc_comp = &ss_epout_desc_comp;
4358 + epin_desc_comp = &ss_epin_desc_comp;
4359 + epin_fback_desc = &ss_epin_fback_desc;
4360 ++ epin_fback_desc_comp = &ss_epin_fback_desc_comp;
4361 + }
4362 +
4363 + i = 0;
4364 +@@ -654,8 +665,11 @@ static void setup_headers(struct f_uac2_opts *opts,
4365 +
4366 + headers[i++] = USBDHDR(&as_iso_out_desc);
4367 +
4368 +- if (EPOUT_FBACK_IN_EN(opts))
4369 ++ if (EPOUT_FBACK_IN_EN(opts)) {
4370 + headers[i++] = USBDHDR(epin_fback_desc);
4371 ++ if (epin_fback_desc_comp)
4372 ++ headers[i++] = USBDHDR(epin_fback_desc_comp);
4373 ++ }
4374 + }
4375 + if (EPIN_EN(opts)) {
4376 + headers[i++] = USBDHDR(&std_as_in_if0_desc);
4377 +@@ -937,6 +951,9 @@ afunc_bind(struct usb_configuration *cfg, struct usb_function *fn)
4378 + agdev->out_ep_maxpsize = max_t(u16, agdev->out_ep_maxpsize,
4379 + le16_to_cpu(ss_epout_desc.wMaxPacketSize));
4380 +
4381 ++ ss_epin_desc_comp.wBytesPerInterval = ss_epin_desc.wMaxPacketSize;
4382 ++ ss_epout_desc_comp.wBytesPerInterval = ss_epout_desc.wMaxPacketSize;
4383 ++
4384 + hs_epout_desc.bEndpointAddress = fs_epout_desc.bEndpointAddress;
4385 + hs_epin_fback_desc.bEndpointAddress = fs_epin_fback_desc.bEndpointAddress;
4386 + hs_epin_desc.bEndpointAddress = fs_epin_desc.bEndpointAddress;
4387 +diff --git a/drivers/usb/gadget/function/u_audio.c b/drivers/usb/gadget/function/u_audio.c
4388 +index 9e5c950612d06..b1aef892bfa38 100644
4389 +--- a/drivers/usb/gadget/function/u_audio.c
4390 ++++ b/drivers/usb/gadget/function/u_audio.c
4391 +@@ -76,11 +76,13 @@ static const struct snd_pcm_hardware uac_pcm_hardware = {
4392 + };
4393 +
4394 + static void u_audio_set_fback_frequency(enum usb_device_speed speed,
4395 ++ struct usb_ep *out_ep,
4396 + unsigned long long freq,
4397 + unsigned int pitch,
4398 + void *buf)
4399 + {
4400 + u32 ff = 0;
4401 ++ const struct usb_endpoint_descriptor *ep_desc;
4402 +
4403 + /*
4404 + * Because the pitch base is 1000000, the final divider here
4405 +@@ -108,8 +110,13 @@ static void u_audio_set_fback_frequency(enum usb_device_speed speed,
4406 + * byte fromat (that is Q16.16)
4407 + *
4408 + * ff = (freq << 16) / 8000
4409 ++ *
4410 ++ * Win10 and OSX UAC2 drivers require number of samples per packet
4411 ++ * in order to honor the feedback value.
4412 ++ * Linux snd-usb-audio detects the applied bit-shift automatically.
4413 + */
4414 +- freq <<= 4;
4415 ++ ep_desc = out_ep->desc;
4416 ++ freq <<= 4 + (ep_desc->bInterval - 1);
4417 + }
4418 +
4419 + ff = DIV_ROUND_CLOSEST_ULL((freq * pitch), 1953125);
4420 +@@ -247,7 +254,7 @@ static void u_audio_iso_fback_complete(struct usb_ep *ep,
4421 + pr_debug("%s: iso_complete status(%d) %d/%d\n",
4422 + __func__, status, req->actual, req->length);
4423 +
4424 +- u_audio_set_fback_frequency(audio_dev->gadget->speed,
4425 ++ u_audio_set_fback_frequency(audio_dev->gadget->speed, audio_dev->out_ep,
4426 + params->c_srate, prm->pitch,
4427 + req->buf);
4428 +
4429 +@@ -506,7 +513,7 @@ int u_audio_start_capture(struct g_audio *audio_dev)
4430 + * be meauserd at start of playback
4431 + */
4432 + prm->pitch = 1000000;
4433 +- u_audio_set_fback_frequency(audio_dev->gadget->speed,
4434 ++ u_audio_set_fback_frequency(audio_dev->gadget->speed, ep,
4435 + params->c_srate, prm->pitch,
4436 + req_fback->buf);
4437 +
4438 +diff --git a/drivers/usb/gadget/udc/r8a66597-udc.c b/drivers/usb/gadget/udc/r8a66597-udc.c
4439 +index 65cae48834545..38e4d6b505a05 100644
4440 +--- a/drivers/usb/gadget/udc/r8a66597-udc.c
4441 ++++ b/drivers/usb/gadget/udc/r8a66597-udc.c
4442 +@@ -1250,7 +1250,7 @@ static void set_feature(struct r8a66597 *r8a66597, struct usb_ctrlrequest *ctrl)
4443 + do {
4444 + tmp = r8a66597_read(r8a66597, INTSTS0) & CTSQ;
4445 + udelay(1);
4446 +- } while (tmp != CS_IDST || timeout-- > 0);
4447 ++ } while (tmp != CS_IDST && timeout-- > 0);
4448 +
4449 + if (tmp == CS_IDST)
4450 + r8a66597_bset(r8a66597,
4451 +diff --git a/drivers/usb/host/bcma-hcd.c b/drivers/usb/host/bcma-hcd.c
4452 +index 337b425dd4b04..2df52f75f6b3c 100644
4453 +--- a/drivers/usb/host/bcma-hcd.c
4454 ++++ b/drivers/usb/host/bcma-hcd.c
4455 +@@ -406,12 +406,9 @@ static int bcma_hcd_probe(struct bcma_device *core)
4456 + return -ENOMEM;
4457 + usb_dev->core = core;
4458 +
4459 +- if (core->dev.of_node) {
4460 ++ if (core->dev.of_node)
4461 + usb_dev->gpio_desc = devm_gpiod_get(&core->dev, "vcc",
4462 + GPIOD_OUT_HIGH);
4463 +- if (IS_ERR(usb_dev->gpio_desc))
4464 +- return PTR_ERR(usb_dev->gpio_desc);
4465 +- }
4466 +
4467 + switch (core->id.id) {
4468 + case BCMA_CORE_USB20_HOST:
4469 +diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
4470 +index 18a203c9011eb..4a1346e3de1b2 100644
4471 +--- a/drivers/usb/host/xhci.c
4472 ++++ b/drivers/usb/host/xhci.c
4473 +@@ -692,6 +692,7 @@ int xhci_run(struct usb_hcd *hcd)
4474 + if (ret)
4475 + xhci_free_command(xhci, command);
4476 + }
4477 ++ set_bit(HCD_FLAG_DEFER_RH_REGISTER, &hcd->flags);
4478 + xhci_dbg_trace(xhci, trace_xhci_dbg_init,
4479 + "Finished xhci_run for USB2 roothub");
4480 +
4481 +diff --git a/drivers/usb/isp1760/isp1760-hcd.c b/drivers/usb/isp1760/isp1760-hcd.c
4482 +index e517376c32917..cf13db3d1695d 100644
4483 +--- a/drivers/usb/isp1760/isp1760-hcd.c
4484 ++++ b/drivers/usb/isp1760/isp1760-hcd.c
4485 +@@ -251,7 +251,7 @@ static int isp1760_hcd_set_and_wait(struct usb_hcd *hcd, u32 field,
4486 + isp1760_hcd_set(hcd, field);
4487 +
4488 + return regmap_field_read_poll_timeout(priv->fields[field], val,
4489 +- val, 10, timeout_us);
4490 ++ val, 0, timeout_us);
4491 + }
4492 +
4493 + static int isp1760_hcd_set_and_wait_swap(struct usb_hcd *hcd, u32 field,
4494 +@@ -263,7 +263,7 @@ static int isp1760_hcd_set_and_wait_swap(struct usb_hcd *hcd, u32 field,
4495 + isp1760_hcd_set(hcd, field);
4496 +
4497 + return regmap_field_read_poll_timeout(priv->fields[field], val,
4498 +- !val, 10, timeout_us);
4499 ++ !val, 0, timeout_us);
4500 + }
4501 +
4502 + static int isp1760_hcd_clear_and_wait(struct usb_hcd *hcd, u32 field,
4503 +@@ -275,7 +275,7 @@ static int isp1760_hcd_clear_and_wait(struct usb_hcd *hcd, u32 field,
4504 + isp1760_hcd_clear(hcd, field);
4505 +
4506 + return regmap_field_read_poll_timeout(priv->fields[field], val,
4507 +- !val, 10, timeout_us);
4508 ++ !val, 0, timeout_us);
4509 + }
4510 +
4511 + static bool isp1760_hcd_is_set(struct usb_hcd *hcd, u32 field)
4512 +diff --git a/drivers/usb/musb/tusb6010.c b/drivers/usb/musb/tusb6010.c
4513 +index c429376922079..c968ecda42aa8 100644
4514 +--- a/drivers/usb/musb/tusb6010.c
4515 ++++ b/drivers/usb/musb/tusb6010.c
4516 +@@ -190,6 +190,7 @@ tusb_fifo_write_unaligned(void __iomem *fifo, const u8 *buf, u16 len)
4517 + }
4518 + if (len > 0) {
4519 + /* Write the rest 1 - 3 bytes to FIFO */
4520 ++ val = 0;
4521 + memcpy(&val, buf, len);
4522 + musb_writel(fifo, 0, val);
4523 + }
4524 +diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
4525 +index d48bed5782a5c..3aaf52d9985bd 100644
4526 +--- a/drivers/usb/serial/cp210x.c
4527 ++++ b/drivers/usb/serial/cp210x.c
4528 +@@ -233,6 +233,7 @@ static const struct usb_device_id id_table[] = {
4529 + { USB_DEVICE(0x1FB9, 0x0602) }, /* Lake Shore Model 648 Magnet Power Supply */
4530 + { USB_DEVICE(0x1FB9, 0x0700) }, /* Lake Shore Model 737 VSM Controller */
4531 + { USB_DEVICE(0x1FB9, 0x0701) }, /* Lake Shore Model 776 Hall Matrix */
4532 ++ { USB_DEVICE(0x2184, 0x0030) }, /* GW Instek GDM-834x Digital Multimeter */
4533 + { USB_DEVICE(0x2626, 0xEA60) }, /* Aruba Networks 7xxx USB Serial Console */
4534 + { USB_DEVICE(0x3195, 0xF190) }, /* Link Instruments MSO-19 */
4535 + { USB_DEVICE(0x3195, 0xF280) }, /* Link Instruments MSO-28 */
4536 +@@ -258,6 +259,7 @@ struct cp210x_serial_private {
4537 + speed_t max_speed;
4538 + bool use_actual_rate;
4539 + bool no_flow_control;
4540 ++ bool no_event_mode;
4541 + };
4542 +
4543 + enum cp210x_event_state {
4544 +@@ -1112,12 +1114,16 @@ static void cp210x_change_speed(struct tty_struct *tty,
4545 +
4546 + static void cp210x_enable_event_mode(struct usb_serial_port *port)
4547 + {
4548 ++ struct cp210x_serial_private *priv = usb_get_serial_data(port->serial);
4549 + struct cp210x_port_private *port_priv = usb_get_serial_port_data(port);
4550 + int ret;
4551 +
4552 + if (port_priv->event_mode)
4553 + return;
4554 +
4555 ++ if (priv->no_event_mode)
4556 ++ return;
4557 ++
4558 + port_priv->event_state = ES_DATA;
4559 + port_priv->event_mode = true;
4560 +
4561 +@@ -2097,6 +2103,33 @@ static void cp210x_init_max_speed(struct usb_serial *serial)
4562 + priv->use_actual_rate = use_actual_rate;
4563 + }
4564 +
4565 ++static void cp2102_determine_quirks(struct usb_serial *serial)
4566 ++{
4567 ++ struct cp210x_serial_private *priv = usb_get_serial_data(serial);
4568 ++ u8 *buf;
4569 ++ int ret;
4570 ++
4571 ++ buf = kmalloc(2, GFP_KERNEL);
4572 ++ if (!buf)
4573 ++ return;
4574 ++ /*
4575 ++ * Some (possibly counterfeit) CP2102 do not support event-insertion
4576 ++ * mode and respond differently to malformed vendor requests.
4577 ++ * Specifically, they return one instead of two bytes when sent a
4578 ++ * two-byte part-number request.
4579 ++ */
4580 ++ ret = usb_control_msg(serial->dev, usb_rcvctrlpipe(serial->dev, 0),
4581 ++ CP210X_VENDOR_SPECIFIC, REQTYPE_DEVICE_TO_HOST,
4582 ++ CP210X_GET_PARTNUM, 0, buf, 2, USB_CTRL_GET_TIMEOUT);
4583 ++ if (ret == 1) {
4584 ++ dev_dbg(&serial->interface->dev,
4585 ++ "device does not support event-insertion mode\n");
4586 ++ priv->no_event_mode = true;
4587 ++ }
4588 ++
4589 ++ kfree(buf);
4590 ++}
4591 ++
4592 + static int cp210x_get_fw_version(struct usb_serial *serial, u16 value)
4593 + {
4594 + struct cp210x_serial_private *priv = usb_get_serial_data(serial);
4595 +@@ -2122,6 +2155,9 @@ static void cp210x_determine_quirks(struct usb_serial *serial)
4596 + int ret;
4597 +
4598 + switch (priv->partnum) {
4599 ++ case CP210X_PARTNUM_CP2102:
4600 ++ cp2102_determine_quirks(serial);
4601 ++ break;
4602 + case CP210X_PARTNUM_CP2102N_QFN28:
4603 + case CP210X_PARTNUM_CP2102N_QFN24:
4604 + case CP210X_PARTNUM_CP2102N_QFN20:
4605 +diff --git a/drivers/usb/serial/mos7840.c b/drivers/usb/serial/mos7840.c
4606 +index d7fe33ca73e4c..925067a7978d4 100644
4607 +--- a/drivers/usb/serial/mos7840.c
4608 ++++ b/drivers/usb/serial/mos7840.c
4609 +@@ -107,7 +107,6 @@
4610 + #define BANDB_DEVICE_ID_USOPTL4_2P 0xBC02
4611 + #define BANDB_DEVICE_ID_USOPTL4_4 0xAC44
4612 + #define BANDB_DEVICE_ID_USOPTL4_4P 0xBC03
4613 +-#define BANDB_DEVICE_ID_USOPTL2_4 0xAC24
4614 +
4615 + /* Interrupt Routine Defines */
4616 +
4617 +@@ -186,7 +185,6 @@ static const struct usb_device_id id_table[] = {
4618 + { USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_2P) },
4619 + { USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_4) },
4620 + { USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_4P) },
4621 +- { USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL2_4) },
4622 + {} /* terminating entry */
4623 + };
4624 + MODULE_DEVICE_TABLE(usb, id_table);
4625 +diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
4626 +index 29c765cc84957..6cfb5d33609fb 100644
4627 +--- a/drivers/usb/serial/option.c
4628 ++++ b/drivers/usb/serial/option.c
4629 +@@ -1205,6 +1205,14 @@ static const struct usb_device_id option_ids[] = {
4630 + .driver_info = NCTRL(0) | RSVD(1) },
4631 + { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1056, 0xff), /* Telit FD980 */
4632 + .driver_info = NCTRL(2) | RSVD(3) },
4633 ++ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1060, 0xff), /* Telit LN920 (rmnet) */
4634 ++ .driver_info = NCTRL(0) | RSVD(1) | RSVD(2) },
4635 ++ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1061, 0xff), /* Telit LN920 (MBIM) */
4636 ++ .driver_info = NCTRL(0) | RSVD(1) },
4637 ++ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1062, 0xff), /* Telit LN920 (RNDIS) */
4638 ++ .driver_info = NCTRL(2) | RSVD(3) },
4639 ++ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1063, 0xff), /* Telit LN920 (ECM) */
4640 ++ .driver_info = NCTRL(0) | RSVD(1) },
4641 + { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910),
4642 + .driver_info = NCTRL(0) | RSVD(1) | RSVD(3) },
4643 + { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910_DUAL_MODEM),
4644 +@@ -1650,7 +1658,6 @@ static const struct usb_device_id option_ids[] = {
4645 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0060, 0xff, 0xff, 0xff) },
4646 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0070, 0xff, 0xff, 0xff) },
4647 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0073, 0xff, 0xff, 0xff) },
4648 +- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0094, 0xff, 0xff, 0xff) },
4649 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0130, 0xff, 0xff, 0xff),
4650 + .driver_info = RSVD(1) },
4651 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0133, 0xff, 0xff, 0xff),
4652 +@@ -2068,6 +2075,8 @@ static const struct usb_device_id option_ids[] = {
4653 + .driver_info = RSVD(0) | RSVD(1) | RSVD(6) },
4654 + { USB_DEVICE(0x0489, 0xe0b5), /* Foxconn T77W968 ESIM */
4655 + .driver_info = RSVD(0) | RSVD(1) | RSVD(6) },
4656 ++ { USB_DEVICE_INTERFACE_CLASS(0x0489, 0xe0db, 0xff), /* Foxconn T99W265 MBIM */
4657 ++ .driver_info = RSVD(3) },
4658 + { USB_DEVICE(0x1508, 0x1001), /* Fibocom NL668 (IOT version) */
4659 + .driver_info = RSVD(4) | RSVD(5) | RSVD(6) },
4660 + { USB_DEVICE(0x2cb7, 0x0104), /* Fibocom NL678 series */
4661 +diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
4662 +index efa972be2ee34..c6b3fcf901805 100644
4663 +--- a/drivers/usb/storage/unusual_devs.h
4664 ++++ b/drivers/usb/storage/unusual_devs.h
4665 +@@ -416,9 +416,16 @@ UNUSUAL_DEV( 0x04cb, 0x0100, 0x0000, 0x2210,
4666 + USB_SC_UFI, USB_PR_DEVICE, NULL, US_FL_FIX_INQUIRY | US_FL_SINGLE_LUN),
4667 +
4668 + /*
4669 +- * Reported by Ondrej Zary <linux@××××××××××××××××.org>
4670 ++ * Reported by Ondrej Zary <linux@××××.sk>
4671 + * The device reports one sector more and breaks when that sector is accessed
4672 ++ * Firmwares older than 2.6c (the latest one and the only that claims Linux
4673 ++ * support) have also broken tag handling
4674 + */
4675 ++UNUSUAL_DEV( 0x04ce, 0x0002, 0x0000, 0x026b,
4676 ++ "ScanLogic",
4677 ++ "SL11R-IDE",
4678 ++ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
4679 ++ US_FL_FIX_CAPACITY | US_FL_BULK_IGNORE_TAG),
4680 + UNUSUAL_DEV( 0x04ce, 0x0002, 0x026c, 0x026c,
4681 + "ScanLogic",
4682 + "SL11R-IDE",
4683 +diff --git a/drivers/usb/storage/unusual_uas.h b/drivers/usb/storage/unusual_uas.h
4684 +index c35a6db993f1b..4051c8cd0cd8a 100644
4685 +--- a/drivers/usb/storage/unusual_uas.h
4686 ++++ b/drivers/usb/storage/unusual_uas.h
4687 +@@ -50,7 +50,7 @@ UNUSUAL_DEV(0x059f, 0x1061, 0x0000, 0x9999,
4688 + "LaCie",
4689 + "Rugged USB3-FW",
4690 + USB_SC_DEVICE, USB_PR_DEVICE, NULL,
4691 +- US_FL_IGNORE_UAS),
4692 ++ US_FL_NO_REPORT_OPCODES | US_FL_NO_SAME),
4693 +
4694 + /*
4695 + * Apricorn USB3 dongle sometimes returns "USBSUSBSUSBS" in response to SCSI
4696 +diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c
4697 +index 671c71245a7b2..43ebfe36ac276 100644
4698 +--- a/drivers/xen/balloon.c
4699 ++++ b/drivers/xen/balloon.c
4700 +@@ -43,6 +43,8 @@
4701 + #include <linux/sched.h>
4702 + #include <linux/cred.h>
4703 + #include <linux/errno.h>
4704 ++#include <linux/freezer.h>
4705 ++#include <linux/kthread.h>
4706 + #include <linux/mm.h>
4707 + #include <linux/memblock.h>
4708 + #include <linux/pagemap.h>
4709 +@@ -115,7 +117,7 @@ static struct ctl_table xen_root[] = {
4710 + #define EXTENT_ORDER (fls(XEN_PFN_PER_PAGE) - 1)
4711 +
4712 + /*
4713 +- * balloon_process() state:
4714 ++ * balloon_thread() state:
4715 + *
4716 + * BP_DONE: done or nothing to do,
4717 + * BP_WAIT: wait to be rescheduled,
4718 +@@ -130,6 +132,8 @@ enum bp_state {
4719 + BP_ECANCELED
4720 + };
4721 +
4722 ++/* Main waiting point for xen-balloon thread. */
4723 ++static DECLARE_WAIT_QUEUE_HEAD(balloon_thread_wq);
4724 +
4725 + static DEFINE_MUTEX(balloon_mutex);
4726 +
4727 +@@ -144,10 +148,6 @@ static xen_pfn_t frame_list[PAGE_SIZE / sizeof(xen_pfn_t)];
4728 + static LIST_HEAD(ballooned_pages);
4729 + static DECLARE_WAIT_QUEUE_HEAD(balloon_wq);
4730 +
4731 +-/* Main work function, always executed in process context. */
4732 +-static void balloon_process(struct work_struct *work);
4733 +-static DECLARE_DELAYED_WORK(balloon_worker, balloon_process);
4734 +-
4735 + /* When ballooning out (allocating memory to return to Xen) we don't really
4736 + want the kernel to try too hard since that can trigger the oom killer. */
4737 + #define GFP_BALLOON \
4738 +@@ -366,7 +366,7 @@ static void xen_online_page(struct page *page, unsigned int order)
4739 + static int xen_memory_notifier(struct notifier_block *nb, unsigned long val, void *v)
4740 + {
4741 + if (val == MEM_ONLINE)
4742 +- schedule_delayed_work(&balloon_worker, 0);
4743 ++ wake_up(&balloon_thread_wq);
4744 +
4745 + return NOTIFY_OK;
4746 + }
4747 +@@ -491,18 +491,43 @@ static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp)
4748 + }
4749 +
4750 + /*
4751 +- * As this is a work item it is guaranteed to run as a single instance only.
4752 ++ * Stop waiting if either state is not BP_EAGAIN and ballooning action is
4753 ++ * needed, or if the credit has changed while state is BP_EAGAIN.
4754 ++ */
4755 ++static bool balloon_thread_cond(enum bp_state state, long credit)
4756 ++{
4757 ++ if (state != BP_EAGAIN)
4758 ++ credit = 0;
4759 ++
4760 ++ return current_credit() != credit || kthread_should_stop();
4761 ++}
4762 ++
4763 ++/*
4764 ++ * As this is a kthread it is guaranteed to run as a single instance only.
4765 + * We may of course race updates of the target counts (which are protected
4766 + * by the balloon lock), or with changes to the Xen hard limit, but we will
4767 + * recover from these in time.
4768 + */
4769 +-static void balloon_process(struct work_struct *work)
4770 ++static int balloon_thread(void *unused)
4771 + {
4772 + enum bp_state state = BP_DONE;
4773 + long credit;
4774 ++ unsigned long timeout;
4775 ++
4776 ++ set_freezable();
4777 ++ for (;;) {
4778 ++ if (state == BP_EAGAIN)
4779 ++ timeout = balloon_stats.schedule_delay * HZ;
4780 ++ else
4781 ++ timeout = 3600 * HZ;
4782 ++ credit = current_credit();
4783 +
4784 ++ wait_event_freezable_timeout(balloon_thread_wq,
4785 ++ balloon_thread_cond(state, credit), timeout);
4786 ++
4787 ++ if (kthread_should_stop())
4788 ++ return 0;
4789 +
4790 +- do {
4791 + mutex_lock(&balloon_mutex);
4792 +
4793 + credit = current_credit();
4794 +@@ -529,12 +554,7 @@ static void balloon_process(struct work_struct *work)
4795 + mutex_unlock(&balloon_mutex);
4796 +
4797 + cond_resched();
4798 +-
4799 +- } while (credit && state == BP_DONE);
4800 +-
4801 +- /* Schedule more work if there is some still to be done. */
4802 +- if (state == BP_EAGAIN)
4803 +- schedule_delayed_work(&balloon_worker, balloon_stats.schedule_delay * HZ);
4804 ++ }
4805 + }
4806 +
4807 + /* Resets the Xen limit, sets new target, and kicks off processing. */
4808 +@@ -542,7 +562,7 @@ void balloon_set_new_target(unsigned long target)
4809 + {
4810 + /* No need for lock. Not read-modify-write updates. */
4811 + balloon_stats.target_pages = target;
4812 +- schedule_delayed_work(&balloon_worker, 0);
4813 ++ wake_up(&balloon_thread_wq);
4814 + }
4815 + EXPORT_SYMBOL_GPL(balloon_set_new_target);
4816 +
4817 +@@ -647,7 +667,7 @@ void free_xenballooned_pages(int nr_pages, struct page **pages)
4818 +
4819 + /* The balloon may be too large now. Shrink it if needed. */
4820 + if (current_credit())
4821 +- schedule_delayed_work(&balloon_worker, 0);
4822 ++ wake_up(&balloon_thread_wq);
4823 +
4824 + mutex_unlock(&balloon_mutex);
4825 + }
4826 +@@ -679,6 +699,8 @@ static void __init balloon_add_region(unsigned long start_pfn,
4827 +
4828 + static int __init balloon_init(void)
4829 + {
4830 ++ struct task_struct *task;
4831 ++
4832 + if (!xen_domain())
4833 + return -ENODEV;
4834 +
4835 +@@ -722,6 +744,12 @@ static int __init balloon_init(void)
4836 + }
4837 + #endif
4838 +
4839 ++ task = kthread_run(balloon_thread, NULL, "xen-balloon");
4840 ++ if (IS_ERR(task)) {
4841 ++ pr_err("xen-balloon thread could not be started, ballooning will not work!\n");
4842 ++ return PTR_ERR(task);
4843 ++ }
4844 ++
4845 + /* Init the xen-balloon driver. */
4846 + xen_balloon_init();
4847 +
4848 +diff --git a/fs/afs/dir.c b/fs/afs/dir.c
4849 +index ac829e63c5704..54ee54ae36bc8 100644
4850 +--- a/fs/afs/dir.c
4851 ++++ b/fs/afs/dir.c
4852 +@@ -1077,9 +1077,9 @@ static struct dentry *afs_lookup(struct inode *dir, struct dentry *dentry,
4853 + */
4854 + static int afs_d_revalidate_rcu(struct dentry *dentry)
4855 + {
4856 +- struct afs_vnode *dvnode, *vnode;
4857 ++ struct afs_vnode *dvnode;
4858 + struct dentry *parent;
4859 +- struct inode *dir, *inode;
4860 ++ struct inode *dir;
4861 + long dir_version, de_version;
4862 +
4863 + _enter("%p", dentry);
4864 +@@ -1109,18 +1109,6 @@ static int afs_d_revalidate_rcu(struct dentry *dentry)
4865 + return -ECHILD;
4866 + }
4867 +
4868 +- /* Check to see if the vnode referred to by the dentry still
4869 +- * has a callback.
4870 +- */
4871 +- if (d_really_is_positive(dentry)) {
4872 +- inode = d_inode_rcu(dentry);
4873 +- if (inode) {
4874 +- vnode = AFS_FS_I(inode);
4875 +- if (!afs_check_validity(vnode))
4876 +- return -ECHILD;
4877 +- }
4878 +- }
4879 +-
4880 + return 1; /* Still valid */
4881 + }
4882 +
4883 +@@ -1156,17 +1144,7 @@ static int afs_d_revalidate(struct dentry *dentry, unsigned int flags)
4884 + if (IS_ERR(key))
4885 + key = NULL;
4886 +
4887 +- if (d_really_is_positive(dentry)) {
4888 +- inode = d_inode(dentry);
4889 +- if (inode) {
4890 +- vnode = AFS_FS_I(inode);
4891 +- afs_validate(vnode, key);
4892 +- if (test_bit(AFS_VNODE_DELETED, &vnode->flags))
4893 +- goto out_bad;
4894 +- }
4895 +- }
4896 +-
4897 +- /* lock down the parent dentry so we can peer at it */
4898 ++ /* Hold the parent dentry so we can peer at it */
4899 + parent = dget_parent(dentry);
4900 + dir = AFS_FS_I(d_inode(parent));
4901 +
4902 +@@ -1175,7 +1153,7 @@ static int afs_d_revalidate(struct dentry *dentry, unsigned int flags)
4903 +
4904 + if (test_bit(AFS_VNODE_DELETED, &dir->flags)) {
4905 + _debug("%pd: parent dir deleted", dentry);
4906 +- goto out_bad_parent;
4907 ++ goto not_found;
4908 + }
4909 +
4910 + /* We only need to invalidate a dentry if the server's copy changed
4911 +@@ -1201,12 +1179,12 @@ static int afs_d_revalidate(struct dentry *dentry, unsigned int flags)
4912 + case 0:
4913 + /* the filename maps to something */
4914 + if (d_really_is_negative(dentry))
4915 +- goto out_bad_parent;
4916 ++ goto not_found;
4917 + inode = d_inode(dentry);
4918 + if (is_bad_inode(inode)) {
4919 + printk("kAFS: afs_d_revalidate: %pd2 has bad inode\n",
4920 + dentry);
4921 +- goto out_bad_parent;
4922 ++ goto not_found;
4923 + }
4924 +
4925 + vnode = AFS_FS_I(inode);
4926 +@@ -1228,9 +1206,6 @@ static int afs_d_revalidate(struct dentry *dentry, unsigned int flags)
4927 + dentry, fid.unique,
4928 + vnode->fid.unique,
4929 + vnode->vfs_inode.i_generation);
4930 +- write_seqlock(&vnode->cb_lock);
4931 +- set_bit(AFS_VNODE_DELETED, &vnode->flags);
4932 +- write_sequnlock(&vnode->cb_lock);
4933 + goto not_found;
4934 + }
4935 + goto out_valid;
4936 +@@ -1245,7 +1220,7 @@ static int afs_d_revalidate(struct dentry *dentry, unsigned int flags)
4937 + default:
4938 + _debug("failed to iterate dir %pd: %d",
4939 + parent, ret);
4940 +- goto out_bad_parent;
4941 ++ goto not_found;
4942 + }
4943 +
4944 + out_valid:
4945 +@@ -1256,16 +1231,9 @@ out_valid_noupdate:
4946 + _leave(" = 1 [valid]");
4947 + return 1;
4948 +
4949 +- /* the dirent, if it exists, now points to a different vnode */
4950 + not_found:
4951 +- spin_lock(&dentry->d_lock);
4952 +- dentry->d_flags |= DCACHE_NFSFS_RENAMED;
4953 +- spin_unlock(&dentry->d_lock);
4954 +-
4955 +-out_bad_parent:
4956 + _debug("dropping dentry %pd2", dentry);
4957 + dput(parent);
4958 +-out_bad:
4959 + key_put(key);
4960 +
4961 + _leave(" = 0 [bad]");
4962 +diff --git a/fs/afs/dir_edit.c b/fs/afs/dir_edit.c
4963 +index f4600c1353adf..540b9fc96824a 100644
4964 +--- a/fs/afs/dir_edit.c
4965 ++++ b/fs/afs/dir_edit.c
4966 +@@ -263,7 +263,7 @@ void afs_edit_dir_add(struct afs_vnode *vnode,
4967 + if (b == nr_blocks) {
4968 + _debug("init %u", b);
4969 + afs_edit_init_block(meta, block, b);
4970 +- i_size_write(&vnode->vfs_inode, (b + 1) * AFS_DIR_BLOCK_SIZE);
4971 ++ afs_set_i_size(vnode, (b + 1) * AFS_DIR_BLOCK_SIZE);
4972 + }
4973 +
4974 + /* Only lower dir pages have a counter in the header. */
4975 +@@ -296,7 +296,7 @@ void afs_edit_dir_add(struct afs_vnode *vnode,
4976 + new_directory:
4977 + afs_edit_init_block(meta, meta, 0);
4978 + i_size = AFS_DIR_BLOCK_SIZE;
4979 +- i_size_write(&vnode->vfs_inode, i_size);
4980 ++ afs_set_i_size(vnode, i_size);
4981 + slot = AFS_DIR_RESV_BLOCKS0;
4982 + page = page0;
4983 + block = meta;
4984 +diff --git a/fs/afs/fs_probe.c b/fs/afs/fs_probe.c
4985 +index e7e98ad63a91a..c0031a3ab42f5 100644
4986 +--- a/fs/afs/fs_probe.c
4987 ++++ b/fs/afs/fs_probe.c
4988 +@@ -9,6 +9,7 @@
4989 + #include <linux/slab.h>
4990 + #include "afs_fs.h"
4991 + #include "internal.h"
4992 ++#include "protocol_afs.h"
4993 + #include "protocol_yfs.h"
4994 +
4995 + static unsigned int afs_fs_probe_fast_poll_interval = 30 * HZ;
4996 +@@ -102,7 +103,7 @@ void afs_fileserver_probe_result(struct afs_call *call)
4997 + struct afs_addr_list *alist = call->alist;
4998 + struct afs_server *server = call->server;
4999 + unsigned int index = call->addr_ix;
5000 +- unsigned int rtt_us = 0;
5001 ++ unsigned int rtt_us = 0, cap0;
5002 + int ret = call->error;
5003 +
5004 + _enter("%pU,%u", &server->uuid, index);
5005 +@@ -159,6 +160,11 @@ responded:
5006 + clear_bit(AFS_SERVER_FL_IS_YFS, &server->flags);
5007 + alist->addrs[index].srx_service = call->service_id;
5008 + }
5009 ++ cap0 = ntohl(call->tmp);
5010 ++ if (cap0 & AFS3_VICED_CAPABILITY_64BITFILES)
5011 ++ set_bit(AFS_SERVER_FL_HAS_FS64, &server->flags);
5012 ++ else
5013 ++ clear_bit(AFS_SERVER_FL_HAS_FS64, &server->flags);
5014 + }
5015 +
5016 + if (rxrpc_kernel_get_srtt(call->net->socket, call->rxcall, &rtt_us) &&
5017 +diff --git a/fs/afs/fsclient.c b/fs/afs/fsclient.c
5018 +index dd3f45d906d23..4943413d9c5f7 100644
5019 +--- a/fs/afs/fsclient.c
5020 ++++ b/fs/afs/fsclient.c
5021 +@@ -456,9 +456,7 @@ void afs_fs_fetch_data(struct afs_operation *op)
5022 + struct afs_read *req = op->fetch.req;
5023 + __be32 *bp;
5024 +
5025 +- if (upper_32_bits(req->pos) ||
5026 +- upper_32_bits(req->len) ||
5027 +- upper_32_bits(req->pos + req->len))
5028 ++ if (test_bit(AFS_SERVER_FL_HAS_FS64, &op->server->flags))
5029 + return afs_fs_fetch_data64(op);
5030 +
5031 + _enter("");
5032 +@@ -1113,9 +1111,7 @@ void afs_fs_store_data(struct afs_operation *op)
5033 + (unsigned long long)op->store.pos,
5034 + (unsigned long long)op->store.i_size);
5035 +
5036 +- if (upper_32_bits(op->store.pos) ||
5037 +- upper_32_bits(op->store.size) ||
5038 +- upper_32_bits(op->store.i_size))
5039 ++ if (test_bit(AFS_SERVER_FL_HAS_FS64, &op->server->flags))
5040 + return afs_fs_store_data64(op);
5041 +
5042 + call = afs_alloc_flat_call(op->net, &afs_RXFSStoreData,
5043 +@@ -1229,7 +1225,7 @@ static void afs_fs_setattr_size(struct afs_operation *op)
5044 + key_serial(op->key), vp->fid.vid, vp->fid.vnode);
5045 +
5046 + ASSERT(attr->ia_valid & ATTR_SIZE);
5047 +- if (upper_32_bits(attr->ia_size))
5048 ++ if (test_bit(AFS_SERVER_FL_HAS_FS64, &op->server->flags))
5049 + return afs_fs_setattr_size64(op);
5050 +
5051 + call = afs_alloc_flat_call(op->net, &afs_RXFSStoreData_as_Status,
5052 +@@ -1657,20 +1653,33 @@ static int afs_deliver_fs_get_capabilities(struct afs_call *call)
5053 + return ret;
5054 +
5055 + count = ntohl(call->tmp);
5056 +-
5057 + call->count = count;
5058 + call->count2 = count;
5059 +- afs_extract_discard(call, count * sizeof(__be32));
5060 ++ if (count == 0) {
5061 ++ call->unmarshall = 4;
5062 ++ call->tmp = 0;
5063 ++ break;
5064 ++ }
5065 ++
5066 ++ /* Extract the first word of the capabilities to call->tmp */
5067 ++ afs_extract_to_tmp(call);
5068 + call->unmarshall++;
5069 + fallthrough;
5070 +
5071 +- /* Extract capabilities words */
5072 + case 2:
5073 + ret = afs_extract_data(call, false);
5074 + if (ret < 0)
5075 + return ret;
5076 +
5077 +- /* TODO: Examine capabilities */
5078 ++ afs_extract_discard(call, (count - 1) * sizeof(__be32));
5079 ++ call->unmarshall++;
5080 ++ fallthrough;
5081 ++
5082 ++ /* Extract remaining capabilities words */
5083 ++ case 3:
5084 ++ ret = afs_extract_data(call, false);
5085 ++ if (ret < 0)
5086 ++ return ret;
5087 +
5088 + call->unmarshall++;
5089 + break;
5090 +diff --git a/fs/afs/inode.c b/fs/afs/inode.c
5091 +index 80b6c8d967d5c..c18cbc69fa582 100644
5092 +--- a/fs/afs/inode.c
5093 ++++ b/fs/afs/inode.c
5094 +@@ -53,16 +53,6 @@ static noinline void dump_vnode(struct afs_vnode *vnode, struct afs_vnode *paren
5095 + dump_stack();
5096 + }
5097 +
5098 +-/*
5099 +- * Set the file size and block count. Estimate the number of 512 bytes blocks
5100 +- * used, rounded up to nearest 1K for consistency with other AFS clients.
5101 +- */
5102 +-static void afs_set_i_size(struct afs_vnode *vnode, u64 size)
5103 +-{
5104 +- i_size_write(&vnode->vfs_inode, size);
5105 +- vnode->vfs_inode.i_blocks = ((size + 1023) >> 10) << 1;
5106 +-}
5107 +-
5108 + /*
5109 + * Initialise an inode from the vnode status.
5110 + */
5111 +diff --git a/fs/afs/internal.h b/fs/afs/internal.h
5112 +index 5ed416f4ff335..345494881f655 100644
5113 +--- a/fs/afs/internal.h
5114 ++++ b/fs/afs/internal.h
5115 +@@ -516,6 +516,7 @@ struct afs_server {
5116 + #define AFS_SERVER_FL_IS_YFS 16 /* Server is YFS not AFS */
5117 + #define AFS_SERVER_FL_NO_IBULK 17 /* Fileserver doesn't support FS.InlineBulkStatus */
5118 + #define AFS_SERVER_FL_NO_RM2 18 /* Fileserver doesn't support YFS.RemoveFile2 */
5119 ++#define AFS_SERVER_FL_HAS_FS64 19 /* Fileserver supports FS.{Fetch,Store}Data64 */
5120 + atomic_t ref; /* Object refcount */
5121 + atomic_t active; /* Active user count */
5122 + u32 addr_version; /* Address list version */
5123 +@@ -1585,6 +1586,16 @@ static inline void afs_update_dentry_version(struct afs_operation *op,
5124 + (void *)(unsigned long)dir_vp->scb.status.data_version;
5125 + }
5126 +
5127 ++/*
5128 ++ * Set the file size and block count. Estimate the number of 512 bytes blocks
5129 ++ * used, rounded up to nearest 1K for consistency with other AFS clients.
5130 ++ */
5131 ++static inline void afs_set_i_size(struct afs_vnode *vnode, u64 size)
5132 ++{
5133 ++ i_size_write(&vnode->vfs_inode, size);
5134 ++ vnode->vfs_inode.i_blocks = ((size + 1023) >> 10) << 1;
5135 ++}
5136 ++
5137 + /*
5138 + * Check for a conflicting operation on a directory that we just unlinked from.
5139 + * If someone managed to sneak a link or an unlink in on the file we just
5140 +diff --git a/fs/afs/protocol_afs.h b/fs/afs/protocol_afs.h
5141 +new file mode 100644
5142 +index 0000000000000..0c39358c8b702
5143 +--- /dev/null
5144 ++++ b/fs/afs/protocol_afs.h
5145 +@@ -0,0 +1,15 @@
5146 ++/* SPDX-License-Identifier: GPL-2.0-or-later */
5147 ++/* AFS protocol bits
5148 ++ *
5149 ++ * Copyright (C) 2021 Red Hat, Inc. All Rights Reserved.
5150 ++ * Written by David Howells (dhowells@××××××.com)
5151 ++ */
5152 ++
5153 ++
5154 ++#define AFSCAPABILITIESMAX 196 /* Maximum number of words in a capability set */
5155 ++
5156 ++/* AFS3 Fileserver capabilities word 0 */
5157 ++#define AFS3_VICED_CAPABILITY_ERRORTRANS 0x0001 /* Uses UAE errors */
5158 ++#define AFS3_VICED_CAPABILITY_64BITFILES 0x0002 /* FetchData64 & StoreData64 supported */
5159 ++#define AFS3_VICED_CAPABILITY_WRITELOCKACL 0x0004 /* Can lock a file even without lock perm */
5160 ++#define AFS3_VICED_CAPABILITY_SANEACLS 0x0008 /* ACLs reviewed for sanity - don't use */
5161 +diff --git a/fs/afs/protocol_yfs.h b/fs/afs/protocol_yfs.h
5162 +index b5bd03b1d3c7f..e4cd89c44c465 100644
5163 +--- a/fs/afs/protocol_yfs.h
5164 ++++ b/fs/afs/protocol_yfs.h
5165 +@@ -168,3 +168,9 @@ enum yfs_lock_type {
5166 + yfs_LockMandatoryWrite = 0x101,
5167 + yfs_LockMandatoryExtend = 0x102,
5168 + };
5169 ++
5170 ++/* RXYFS Viced Capability Flags */
5171 ++#define YFS_VICED_CAPABILITY_ERRORTRANS 0x0001 /* Deprecated v0.195 */
5172 ++#define YFS_VICED_CAPABILITY_64BITFILES 0x0002 /* Deprecated v0.195 */
5173 ++#define YFS_VICED_CAPABILITY_WRITELOCKACL 0x0004 /* Can lock a file even without lock perm */
5174 ++#define YFS_VICED_CAPABILITY_SANEACLS 0x0008 /* Deprecated v0.195 */
5175 +diff --git a/fs/afs/write.c b/fs/afs/write.c
5176 +index c0534697268ef..e86f5a245514d 100644
5177 +--- a/fs/afs/write.c
5178 ++++ b/fs/afs/write.c
5179 +@@ -137,7 +137,7 @@ int afs_write_end(struct file *file, struct address_space *mapping,
5180 + write_seqlock(&vnode->cb_lock);
5181 + i_size = i_size_read(&vnode->vfs_inode);
5182 + if (maybe_i_size > i_size)
5183 +- i_size_write(&vnode->vfs_inode, maybe_i_size);
5184 ++ afs_set_i_size(vnode, maybe_i_size);
5185 + write_sequnlock(&vnode->cb_lock);
5186 + }
5187 +
5188 +@@ -471,13 +471,18 @@ static void afs_extend_writeback(struct address_space *mapping,
5189 + }
5190 +
5191 + /* Has the page moved or been split? */
5192 +- if (unlikely(page != xas_reload(&xas)))
5193 ++ if (unlikely(page != xas_reload(&xas))) {
5194 ++ put_page(page);
5195 + break;
5196 ++ }
5197 +
5198 +- if (!trylock_page(page))
5199 ++ if (!trylock_page(page)) {
5200 ++ put_page(page);
5201 + break;
5202 ++ }
5203 + if (!PageDirty(page) || PageWriteback(page)) {
5204 + unlock_page(page);
5205 ++ put_page(page);
5206 + break;
5207 + }
5208 +
5209 +@@ -487,6 +492,7 @@ static void afs_extend_writeback(struct address_space *mapping,
5210 + t = afs_page_dirty_to(page, priv);
5211 + if (f != 0 && !new_content) {
5212 + unlock_page(page);
5213 ++ put_page(page);
5214 + break;
5215 + }
5216 +
5217 +diff --git a/fs/btrfs/space-info.c b/fs/btrfs/space-info.c
5218 +index 46e8415fa2c55..0842efa6f7120 100644
5219 +--- a/fs/btrfs/space-info.c
5220 ++++ b/fs/btrfs/space-info.c
5221 +@@ -414,9 +414,10 @@ static void __btrfs_dump_space_info(struct btrfs_fs_info *fs_info,
5222 + {
5223 + lockdep_assert_held(&info->lock);
5224 +
5225 +- btrfs_info(fs_info, "space_info %llu has %llu free, is %sfull",
5226 ++ /* The free space could be negative in case of overcommit */
5227 ++ btrfs_info(fs_info, "space_info %llu has %lld free, is %sfull",
5228 + info->flags,
5229 +- info->total_bytes - btrfs_space_info_used(info, true),
5230 ++ (s64)(info->total_bytes - btrfs_space_info_used(info, true)),
5231 + info->full ? "" : "not ");
5232 + btrfs_info(fs_info,
5233 + "space_info total=%llu, used=%llu, pinned=%llu, reserved=%llu, may_use=%llu, readonly=%llu zone_unusable=%llu",
5234 +diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
5235 +index c6a9542ca281b..cf2141483b37f 100644
5236 +--- a/fs/cifs/cifsglob.h
5237 ++++ b/fs/cifs/cifsglob.h
5238 +@@ -1403,6 +1403,7 @@ struct cifsInodeInfo {
5239 + #define CIFS_INO_INVALID_MAPPING (4) /* pagecache is invalid */
5240 + #define CIFS_INO_LOCK (5) /* lock bit for synchronization */
5241 + #define CIFS_INO_MODIFIED_ATTR (6) /* Indicate change in mtime/ctime */
5242 ++#define CIFS_INO_CLOSE_ON_LOCK (7) /* Not to defer the close when lock is set */
5243 + unsigned long flags;
5244 + spinlock_t writers_lock;
5245 + unsigned int writers; /* Number of writers on this inode */
5246 +diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
5247 +index 3781eee9360af..65d3cf80444bf 100644
5248 +--- a/fs/cifs/connect.c
5249 ++++ b/fs/cifs/connect.c
5250 +@@ -2382,9 +2382,10 @@ cifs_match_super(struct super_block *sb, void *data)
5251 + spin_lock(&cifs_tcp_ses_lock);
5252 + cifs_sb = CIFS_SB(sb);
5253 + tlink = cifs_get_tlink(cifs_sb_master_tlink(cifs_sb));
5254 +- if (IS_ERR(tlink)) {
5255 ++ if (tlink == NULL) {
5256 ++ /* can not match superblock if tlink were ever null */
5257 + spin_unlock(&cifs_tcp_ses_lock);
5258 +- return rc;
5259 ++ return 0;
5260 + }
5261 + tcon = tlink_tcon(tlink);
5262 + ses = tcon->ses;
5263 +diff --git a/fs/cifs/file.c b/fs/cifs/file.c
5264 +index bb98fbdd22a99..ab2734159c192 100644
5265 +--- a/fs/cifs/file.c
5266 ++++ b/fs/cifs/file.c
5267 +@@ -881,6 +881,7 @@ int cifs_close(struct inode *inode, struct file *file)
5268 + dclose = kmalloc(sizeof(struct cifs_deferred_close), GFP_KERNEL);
5269 + if ((cinode->oplock == CIFS_CACHE_RHW_FLG) &&
5270 + cinode->lease_granted &&
5271 ++ !test_bit(CIFS_INO_CLOSE_ON_LOCK, &cinode->flags) &&
5272 + dclose) {
5273 + if (test_bit(CIFS_INO_MODIFIED_ATTR, &cinode->flags))
5274 + inode->i_ctime = inode->i_mtime = current_time(inode);
5275 +@@ -1861,6 +1862,7 @@ int cifs_lock(struct file *file, int cmd, struct file_lock *flock)
5276 + cifs_read_flock(flock, &type, &lock, &unlock, &wait_flag,
5277 + tcon->ses->server);
5278 + cifs_sb = CIFS_FILE_SB(file);
5279 ++ set_bit(CIFS_INO_CLOSE_ON_LOCK, &CIFS_I(d_inode(cfile->dentry))->flags);
5280 +
5281 + if (cap_unix(tcon->ses) &&
5282 + (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
5283 +@@ -3108,7 +3110,7 @@ static void collect_uncached_write_data(struct cifs_aio_ctx *ctx)
5284 + struct cifs_tcon *tcon;
5285 + struct cifs_sb_info *cifs_sb;
5286 + struct dentry *dentry = ctx->cfile->dentry;
5287 +- int rc;
5288 ++ ssize_t rc;
5289 +
5290 + tcon = tlink_tcon(ctx->cfile->tlink);
5291 + cifs_sb = CIFS_SB(dentry->d_sb);
5292 +diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
5293 +index 9469f1cf0b46a..57e695e3c969b 100644
5294 +--- a/fs/cifs/misc.c
5295 ++++ b/fs/cifs/misc.c
5296 +@@ -736,7 +736,7 @@ cifs_close_deferred_file(struct cifsInodeInfo *cifs_inode)
5297 + if (cancel_delayed_work(&cfile->deferred)) {
5298 + tmp_list = kmalloc(sizeof(struct file_list), GFP_ATOMIC);
5299 + if (tmp_list == NULL)
5300 +- continue;
5301 ++ break;
5302 + tmp_list->cfile = cfile;
5303 + list_add_tail(&tmp_list->list, &file_head);
5304 + }
5305 +@@ -767,7 +767,7 @@ cifs_close_all_deferred_files(struct cifs_tcon *tcon)
5306 + if (cancel_delayed_work(&cfile->deferred)) {
5307 + tmp_list = kmalloc(sizeof(struct file_list), GFP_ATOMIC);
5308 + if (tmp_list == NULL)
5309 +- continue;
5310 ++ break;
5311 + tmp_list->cfile = cfile;
5312 + list_add_tail(&tmp_list->list, &file_head);
5313 + }
5314 +diff --git a/fs/io_uring.c b/fs/io_uring.c
5315 +index 754d59f734d84..699a08d724c24 100644
5316 +--- a/fs/io_uring.c
5317 ++++ b/fs/io_uring.c
5318 +@@ -4043,7 +4043,7 @@ static int io_add_buffers(struct io_provide_buf *pbuf, struct io_buffer **head)
5319 + int i, bid = pbuf->bid;
5320 +
5321 + for (i = 0; i < pbuf->nbufs; i++) {
5322 +- buf = kmalloc(sizeof(*buf), GFP_KERNEL);
5323 ++ buf = kmalloc(sizeof(*buf), GFP_KERNEL_ACCOUNT);
5324 + if (!buf)
5325 + break;
5326 +
5327 +@@ -4969,7 +4969,7 @@ static bool io_poll_complete(struct io_kiocb *req, __poll_t mask)
5328 + if (req->poll.events & EPOLLONESHOT)
5329 + flags = 0;
5330 + if (!io_cqring_fill_event(ctx, req->user_data, error, flags)) {
5331 +- req->poll.done = true;
5332 ++ req->poll.events |= EPOLLONESHOT;
5333 + flags = 0;
5334 + }
5335 + if (flags & IORING_CQE_F_MORE)
5336 +@@ -4993,6 +4993,7 @@ static void io_poll_task_func(struct io_kiocb *req)
5337 + if (done) {
5338 + io_poll_remove_double(req);
5339 + hash_del(&req->hash_node);
5340 ++ req->poll.done = true;
5341 + } else {
5342 + req->result = 0;
5343 + add_wait_queue(req->poll.head, &req->poll.wait);
5344 +@@ -5126,6 +5127,7 @@ static void io_async_task_func(struct io_kiocb *req)
5345 +
5346 + hash_del(&req->hash_node);
5347 + io_poll_remove_double(req);
5348 ++ apoll->poll.done = true;
5349 + spin_unlock_irq(&ctx->completion_lock);
5350 +
5351 + if (!READ_ONCE(apoll->poll.canceled))
5352 +@@ -5917,19 +5919,16 @@ static int io_files_update(struct io_kiocb *req, unsigned int issue_flags)
5353 + struct io_uring_rsrc_update2 up;
5354 + int ret;
5355 +
5356 +- if (issue_flags & IO_URING_F_NONBLOCK)
5357 +- return -EAGAIN;
5358 +-
5359 + up.offset = req->rsrc_update.offset;
5360 + up.data = req->rsrc_update.arg;
5361 + up.nr = 0;
5362 + up.tags = 0;
5363 + up.resv = 0;
5364 +
5365 +- mutex_lock(&ctx->uring_lock);
5366 ++ io_ring_submit_lock(ctx, !(issue_flags & IO_URING_F_NONBLOCK));
5367 + ret = __io_register_rsrc_update(ctx, IORING_RSRC_FILE,
5368 + &up, req->rsrc_update.nr_args);
5369 +- mutex_unlock(&ctx->uring_lock);
5370 ++ io_ring_submit_unlock(ctx, !(issue_flags & IO_URING_F_NONBLOCK));
5371 +
5372 + if (ret < 0)
5373 + req_set_fail(req);
5374 +diff --git a/fs/lockd/svcxdr.h b/fs/lockd/svcxdr.h
5375 +index c69a0bb76c940..4f1a451da5ba2 100644
5376 +--- a/fs/lockd/svcxdr.h
5377 ++++ b/fs/lockd/svcxdr.h
5378 +@@ -134,18 +134,9 @@ svcxdr_decode_owner(struct xdr_stream *xdr, struct xdr_netobj *obj)
5379 + static inline bool
5380 + svcxdr_encode_owner(struct xdr_stream *xdr, const struct xdr_netobj *obj)
5381 + {
5382 +- unsigned int quadlen = XDR_QUADLEN(obj->len);
5383 +- __be32 *p;
5384 +-
5385 +- if (xdr_stream_encode_u32(xdr, obj->len) < 0)
5386 +- return false;
5387 +- p = xdr_reserve_space(xdr, obj->len);
5388 +- if (!p)
5389 ++ if (obj->len > XDR_MAX_NETOBJ)
5390 + return false;
5391 +- p[quadlen - 1] = 0; /* XDR pad */
5392 +- memcpy(p, obj->data, obj->len);
5393 +-
5394 +- return true;
5395 ++ return xdr_stream_encode_opaque(xdr, obj->data, obj->len) > 0;
5396 + }
5397 +
5398 + #endif /* _LOCKD_SVCXDR_H_ */
5399 +diff --git a/fs/ocfs2/dlmglue.c b/fs/ocfs2/dlmglue.c
5400 +index 48fd369c29a4b..a2a2ae37b859a 100644
5401 +--- a/fs/ocfs2/dlmglue.c
5402 ++++ b/fs/ocfs2/dlmglue.c
5403 +@@ -3939,7 +3939,7 @@ static int ocfs2_data_convert_worker(struct ocfs2_lock_res *lockres,
5404 + oi = OCFS2_I(inode);
5405 + oi->ip_dir_lock_gen++;
5406 + mlog(0, "generation: %u\n", oi->ip_dir_lock_gen);
5407 +- goto out;
5408 ++ goto out_forget;
5409 + }
5410 +
5411 + if (!S_ISREG(inode->i_mode))
5412 +@@ -3970,6 +3970,7 @@ static int ocfs2_data_convert_worker(struct ocfs2_lock_res *lockres,
5413 + filemap_fdatawait(mapping);
5414 + }
5415 +
5416 ++out_forget:
5417 + forget_all_cached_acls(inode);
5418 +
5419 + out:
5420 +diff --git a/fs/qnx4/dir.c b/fs/qnx4/dir.c
5421 +index a6ee23aadd283..66645a5a35f30 100644
5422 +--- a/fs/qnx4/dir.c
5423 ++++ b/fs/qnx4/dir.c
5424 +@@ -15,13 +15,48 @@
5425 + #include <linux/buffer_head.h>
5426 + #include "qnx4.h"
5427 +
5428 ++/*
5429 ++ * A qnx4 directory entry is an inode entry or link info
5430 ++ * depending on the status field in the last byte. The
5431 ++ * first byte is where the name start either way, and a
5432 ++ * zero means it's empty.
5433 ++ *
5434 ++ * Also, due to a bug in gcc, we don't want to use the
5435 ++ * real (differently sized) name arrays in the inode and
5436 ++ * link entries, but always the 'de_name[]' one in the
5437 ++ * fake struct entry.
5438 ++ *
5439 ++ * See
5440 ++ *
5441 ++ * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=99578#c6
5442 ++ *
5443 ++ * for details, but basically gcc will take the size of the
5444 ++ * 'name' array from one of the used union entries randomly.
5445 ++ *
5446 ++ * This use of 'de_name[]' (48 bytes) avoids the false positive
5447 ++ * warnings that would happen if gcc decides to use 'inode.di_name'
5448 ++ * (16 bytes) even when the pointer and size were to come from
5449 ++ * 'link.dl_name' (48 bytes).
5450 ++ *
5451 ++ * In all cases the actual name pointer itself is the same, it's
5452 ++ * only the gcc internal 'what is the size of this field' logic
5453 ++ * that can get confused.
5454 ++ */
5455 ++union qnx4_directory_entry {
5456 ++ struct {
5457 ++ const char de_name[48];
5458 ++ u8 de_pad[15];
5459 ++ u8 de_status;
5460 ++ };
5461 ++ struct qnx4_inode_entry inode;
5462 ++ struct qnx4_link_info link;
5463 ++};
5464 ++
5465 + static int qnx4_readdir(struct file *file, struct dir_context *ctx)
5466 + {
5467 + struct inode *inode = file_inode(file);
5468 + unsigned int offset;
5469 + struct buffer_head *bh;
5470 +- struct qnx4_inode_entry *de;
5471 +- struct qnx4_link_info *le;
5472 + unsigned long blknum;
5473 + int ix, ino;
5474 + int size;
5475 +@@ -38,27 +73,27 @@ static int qnx4_readdir(struct file *file, struct dir_context *ctx)
5476 + }
5477 + ix = (ctx->pos >> QNX4_DIR_ENTRY_SIZE_BITS) % QNX4_INODES_PER_BLOCK;
5478 + for (; ix < QNX4_INODES_PER_BLOCK; ix++, ctx->pos += QNX4_DIR_ENTRY_SIZE) {
5479 ++ union qnx4_directory_entry *de;
5480 ++
5481 + offset = ix * QNX4_DIR_ENTRY_SIZE;
5482 +- de = (struct qnx4_inode_entry *) (bh->b_data + offset);
5483 +- if (!de->di_fname[0])
5484 ++ de = (union qnx4_directory_entry *) (bh->b_data + offset);
5485 ++
5486 ++ if (!de->de_name[0])
5487 + continue;
5488 +- if (!(de->di_status & (QNX4_FILE_USED|QNX4_FILE_LINK)))
5489 ++ if (!(de->de_status & (QNX4_FILE_USED|QNX4_FILE_LINK)))
5490 + continue;
5491 +- if (!(de->di_status & QNX4_FILE_LINK))
5492 +- size = QNX4_SHORT_NAME_MAX;
5493 +- else
5494 +- size = QNX4_NAME_MAX;
5495 +- size = strnlen(de->di_fname, size);
5496 +- QNX4DEBUG((KERN_INFO "qnx4_readdir:%.*s\n", size, de->di_fname));
5497 +- if (!(de->di_status & QNX4_FILE_LINK))
5498 ++ if (!(de->de_status & QNX4_FILE_LINK)) {
5499 ++ size = sizeof(de->inode.di_fname);
5500 + ino = blknum * QNX4_INODES_PER_BLOCK + ix - 1;
5501 +- else {
5502 +- le = (struct qnx4_link_info*)de;
5503 +- ino = ( le32_to_cpu(le->dl_inode_blk) - 1 ) *
5504 ++ } else {
5505 ++ size = sizeof(de->link.dl_fname);
5506 ++ ino = ( le32_to_cpu(de->link.dl_inode_blk) - 1 ) *
5507 + QNX4_INODES_PER_BLOCK +
5508 +- le->dl_inode_ndx;
5509 ++ de->link.dl_inode_ndx;
5510 + }
5511 +- if (!dir_emit(ctx, de->di_fname, size, ino, DT_UNKNOWN)) {
5512 ++ size = strnlen(de->de_name, size);
5513 ++ QNX4DEBUG((KERN_INFO "qnx4_readdir:%.*s\n", size, name));
5514 ++ if (!dir_emit(ctx, de->de_name, size, ino, DT_UNKNOWN)) {
5515 + brelse(bh);
5516 + return 0;
5517 + }
5518 +diff --git a/include/linux/compiler.h b/include/linux/compiler.h
5519 +index b67261a1e3e9c..3d5af56337bdb 100644
5520 +--- a/include/linux/compiler.h
5521 ++++ b/include/linux/compiler.h
5522 +@@ -188,6 +188,8 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val,
5523 + (typeof(ptr)) (__ptr + (off)); })
5524 + #endif
5525 +
5526 ++#define absolute_pointer(val) RELOC_HIDE((void *)(val), 0)
5527 ++
5528 + #ifndef OPTIMIZER_HIDE_VAR
5529 + /* Make the optimizer believe the variable can be manipulated arbitrarily. */
5530 + #define OPTIMIZER_HIDE_VAR(var) \
5531 +diff --git a/include/linux/pkeys.h b/include/linux/pkeys.h
5532 +index 6beb26b7151d2..86be8bf27b41b 100644
5533 +--- a/include/linux/pkeys.h
5534 ++++ b/include/linux/pkeys.h
5535 +@@ -4,6 +4,8 @@
5536 +
5537 + #include <linux/mm.h>
5538 +
5539 ++#define ARCH_DEFAULT_PKEY 0
5540 ++
5541 + #ifdef CONFIG_ARCH_HAS_PKEYS
5542 + #include <asm/pkeys.h>
5543 + #else /* ! CONFIG_ARCH_HAS_PKEYS */
5544 +diff --git a/include/linux/usb/hcd.h b/include/linux/usb/hcd.h
5545 +index 548a028f2dabb..2c1fc9212cf28 100644
5546 +--- a/include/linux/usb/hcd.h
5547 ++++ b/include/linux/usb/hcd.h
5548 +@@ -124,6 +124,7 @@ struct usb_hcd {
5549 + #define HCD_FLAG_RH_RUNNING 5 /* root hub is running? */
5550 + #define HCD_FLAG_DEAD 6 /* controller has died? */
5551 + #define HCD_FLAG_INTF_AUTHORIZED 7 /* authorize interfaces? */
5552 ++#define HCD_FLAG_DEFER_RH_REGISTER 8 /* Defer roothub registration */
5553 +
5554 + /* The flags can be tested using these macros; they are likely to
5555 + * be slightly faster than test_bit().
5556 +@@ -134,6 +135,7 @@ struct usb_hcd {
5557 + #define HCD_WAKEUP_PENDING(hcd) ((hcd)->flags & (1U << HCD_FLAG_WAKEUP_PENDING))
5558 + #define HCD_RH_RUNNING(hcd) ((hcd)->flags & (1U << HCD_FLAG_RH_RUNNING))
5559 + #define HCD_DEAD(hcd) ((hcd)->flags & (1U << HCD_FLAG_DEAD))
5560 ++#define HCD_DEFER_RH_REGISTER(hcd) ((hcd)->flags & (1U << HCD_FLAG_DEFER_RH_REGISTER))
5561 +
5562 + /*
5563 + * Specifies if interfaces are authorized by default
5564 +diff --git a/include/net/dsa.h b/include/net/dsa.h
5565 +index d833f717e8022..004514a21e306 100644
5566 +--- a/include/net/dsa.h
5567 ++++ b/include/net/dsa.h
5568 +@@ -575,8 +575,16 @@ struct dsa_switch_ops {
5569 + int (*change_tag_protocol)(struct dsa_switch *ds, int port,
5570 + enum dsa_tag_protocol proto);
5571 +
5572 ++ /* Optional switch-wide initialization and destruction methods */
5573 + int (*setup)(struct dsa_switch *ds);
5574 + void (*teardown)(struct dsa_switch *ds);
5575 ++
5576 ++ /* Per-port initialization and destruction methods. Mandatory if the
5577 ++ * driver registers devlink port regions, optional otherwise.
5578 ++ */
5579 ++ int (*port_setup)(struct dsa_switch *ds, int port);
5580 ++ void (*port_teardown)(struct dsa_switch *ds, int port);
5581 ++
5582 + u32 (*get_phy_flags)(struct dsa_switch *ds, int port);
5583 +
5584 + /*
5585 +diff --git a/include/trace/events/erofs.h b/include/trace/events/erofs.h
5586 +index bf9806fd13065..db4f2cec83606 100644
5587 +--- a/include/trace/events/erofs.h
5588 ++++ b/include/trace/events/erofs.h
5589 +@@ -35,20 +35,20 @@ TRACE_EVENT(erofs_lookup,
5590 + TP_STRUCT__entry(
5591 + __field(dev_t, dev )
5592 + __field(erofs_nid_t, nid )
5593 +- __field(const char *, name )
5594 ++ __string(name, dentry->d_name.name )
5595 + __field(unsigned int, flags )
5596 + ),
5597 +
5598 + TP_fast_assign(
5599 + __entry->dev = dir->i_sb->s_dev;
5600 + __entry->nid = EROFS_I(dir)->nid;
5601 +- __entry->name = dentry->d_name.name;
5602 ++ __assign_str(name, dentry->d_name.name);
5603 + __entry->flags = flags;
5604 + ),
5605 +
5606 + TP_printk("dev = (%d,%d), pnid = %llu, name:%s, flags:%x",
5607 + show_dev_nid(__entry),
5608 +- __entry->name,
5609 ++ __get_str(name),
5610 + __entry->flags)
5611 + );
5612 +
5613 +diff --git a/include/uapi/linux/android/binder.h b/include/uapi/linux/android/binder.h
5614 +index 20e435fe657a1..3246f2c746969 100644
5615 +--- a/include/uapi/linux/android/binder.h
5616 ++++ b/include/uapi/linux/android/binder.h
5617 +@@ -225,7 +225,14 @@ struct binder_freeze_info {
5618 +
5619 + struct binder_frozen_status_info {
5620 + __u32 pid;
5621 ++
5622 ++ /* process received sync transactions since last frozen
5623 ++ * bit 0: received sync transaction after being frozen
5624 ++ * bit 1: new pending sync transaction during freezing
5625 ++ */
5626 + __u32 sync_recv;
5627 ++
5628 ++ /* process received async transactions since last frozen */
5629 + __u32 async_recv;
5630 + };
5631 +
5632 +diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
5633 +index 9d94ac6ff50c4..592b9b68cbd93 100644
5634 +--- a/kernel/bpf/verifier.c
5635 ++++ b/kernel/bpf/verifier.c
5636 +@@ -9641,6 +9641,8 @@ static int check_btf_line(struct bpf_verifier_env *env,
5637 + nr_linfo = attr->line_info_cnt;
5638 + if (!nr_linfo)
5639 + return 0;
5640 ++ if (nr_linfo > INT_MAX / sizeof(struct bpf_line_info))
5641 ++ return -EINVAL;
5642 +
5643 + rec_size = attr->line_info_rec_size;
5644 + if (rec_size < MIN_BPF_LINEINFO_SIZE ||
5645 +diff --git a/kernel/dma/debug.c b/kernel/dma/debug.c
5646 +index f2faa13534e57..70519f67556f9 100644
5647 +--- a/kernel/dma/debug.c
5648 ++++ b/kernel/dma/debug.c
5649 +@@ -567,7 +567,8 @@ static void add_dma_entry(struct dma_debug_entry *entry)
5650 + pr_err("cacheline tracking ENOMEM, dma-debug disabled\n");
5651 + global_disable = true;
5652 + } else if (rc == -EEXIST) {
5653 +- pr_err("cacheline tracking EEXIST, overlapping mappings aren't supported\n");
5654 ++ err_printk(entry->dev, entry,
5655 ++ "cacheline tracking EEXIST, overlapping mappings aren't supported\n");
5656 + }
5657 + }
5658 +
5659 +diff --git a/kernel/entry/kvm.c b/kernel/entry/kvm.c
5660 +index 49972ee99aff6..049fd06b4c3de 100644
5661 +--- a/kernel/entry/kvm.c
5662 ++++ b/kernel/entry/kvm.c
5663 +@@ -19,8 +19,10 @@ static int xfer_to_guest_mode_work(struct kvm_vcpu *vcpu, unsigned long ti_work)
5664 + if (ti_work & _TIF_NEED_RESCHED)
5665 + schedule();
5666 +
5667 +- if (ti_work & _TIF_NOTIFY_RESUME)
5668 ++ if (ti_work & _TIF_NOTIFY_RESUME) {
5669 + tracehook_notify_resume(NULL);
5670 ++ rseq_handle_notify_resume(NULL, NULL);
5671 ++ }
5672 +
5673 + ret = arch_xfer_to_guest_mode_handle_work(vcpu, ti_work);
5674 + if (ret)
5675 +diff --git a/kernel/rseq.c b/kernel/rseq.c
5676 +index 35f7bd0fced0e..6d45ac3dae7fb 100644
5677 +--- a/kernel/rseq.c
5678 ++++ b/kernel/rseq.c
5679 +@@ -282,9 +282,17 @@ void __rseq_handle_notify_resume(struct ksignal *ksig, struct pt_regs *regs)
5680 +
5681 + if (unlikely(t->flags & PF_EXITING))
5682 + return;
5683 +- ret = rseq_ip_fixup(regs);
5684 +- if (unlikely(ret < 0))
5685 +- goto error;
5686 ++
5687 ++ /*
5688 ++ * regs is NULL if and only if the caller is in a syscall path. Skip
5689 ++ * fixup and leave rseq_cs as is so that rseq_sycall() will detect and
5690 ++ * kill a misbehaving userspace on debug kernels.
5691 ++ */
5692 ++ if (regs) {
5693 ++ ret = rseq_ip_fixup(regs);
5694 ++ if (unlikely(ret < 0))
5695 ++ goto error;
5696 ++ }
5697 + if (unlikely(rseq_update_cpu_id(t)))
5698 + goto error;
5699 + return;
5700 +diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
5701 +index c221e4c3f625c..fa91f398f28b7 100644
5702 +--- a/kernel/trace/blktrace.c
5703 ++++ b/kernel/trace/blktrace.c
5704 +@@ -1605,6 +1605,14 @@ static int blk_trace_remove_queue(struct request_queue *q)
5705 + if (bt == NULL)
5706 + return -EINVAL;
5707 +
5708 ++ if (bt->trace_state == Blktrace_running) {
5709 ++ bt->trace_state = Blktrace_stopped;
5710 ++ spin_lock_irq(&running_trace_lock);
5711 ++ list_del_init(&bt->running_list);
5712 ++ spin_unlock_irq(&running_trace_lock);
5713 ++ relay_flush(bt->rchan);
5714 ++ }
5715 ++
5716 + put_probe_ref();
5717 + synchronize_rcu();
5718 + blk_trace_free(bt);
5719 +diff --git a/mm/debug.c b/mm/debug.c
5720 +index e73fe0a8ec3d2..e61037cded980 100644
5721 +--- a/mm/debug.c
5722 ++++ b/mm/debug.c
5723 +@@ -24,7 +24,8 @@ const char *migrate_reason_names[MR_TYPES] = {
5724 + "syscall_or_cpuset",
5725 + "mempolicy_mbind",
5726 + "numa_misplaced",
5727 +- "cma",
5728 ++ "contig_range",
5729 ++ "longterm_pin",
5730 + };
5731 +
5732 + const struct trace_print_flags pageflag_names[] = {
5733 +diff --git a/mm/memory-failure.c b/mm/memory-failure.c
5734 +index 83811c976c0cb..7df9fde18004c 100644
5735 +--- a/mm/memory-failure.c
5736 ++++ b/mm/memory-failure.c
5737 +@@ -1127,7 +1127,7 @@ static int page_action(struct page_state *ps, struct page *p,
5738 + */
5739 + static inline bool HWPoisonHandlable(struct page *page)
5740 + {
5741 +- return PageLRU(page) || __PageMovable(page);
5742 ++ return PageLRU(page) || __PageMovable(page) || is_free_buddy_page(page);
5743 + }
5744 +
5745 + static int __get_hwpoison_page(struct page *page)
5746 +diff --git a/mm/util.c b/mm/util.c
5747 +index 9043d03750a73..c18202b3e659d 100644
5748 +--- a/mm/util.c
5749 ++++ b/mm/util.c
5750 +@@ -768,7 +768,7 @@ int overcommit_policy_handler(struct ctl_table *table, int write, void *buffer,
5751 + size_t *lenp, loff_t *ppos)
5752 + {
5753 + struct ctl_table t;
5754 +- int new_policy;
5755 ++ int new_policy = -1;
5756 + int ret;
5757 +
5758 + /*
5759 +@@ -786,7 +786,7 @@ int overcommit_policy_handler(struct ctl_table *table, int write, void *buffer,
5760 + t = *table;
5761 + t.data = &new_policy;
5762 + ret = proc_dointvec_minmax(&t, write, buffer, lenp, ppos);
5763 +- if (ret)
5764 ++ if (ret || new_policy == -1)
5765 + return ret;
5766 +
5767 + mm_compute_batch(new_policy);
5768 +diff --git a/net/core/dev.c b/net/core/dev.c
5769 +index 8f1a47ad6781a..693f15a056304 100644
5770 +--- a/net/core/dev.c
5771 ++++ b/net/core/dev.c
5772 +@@ -6988,12 +6988,16 @@ EXPORT_SYMBOL(napi_disable);
5773 + */
5774 + void napi_enable(struct napi_struct *n)
5775 + {
5776 +- BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state));
5777 +- smp_mb__before_atomic();
5778 +- clear_bit(NAPI_STATE_SCHED, &n->state);
5779 +- clear_bit(NAPI_STATE_NPSVC, &n->state);
5780 +- if (n->dev->threaded && n->thread)
5781 +- set_bit(NAPI_STATE_THREADED, &n->state);
5782 ++ unsigned long val, new;
5783 ++
5784 ++ do {
5785 ++ val = READ_ONCE(n->state);
5786 ++ BUG_ON(!test_bit(NAPI_STATE_SCHED, &val));
5787 ++
5788 ++ new = val & ~(NAPIF_STATE_SCHED | NAPIF_STATE_NPSVC);
5789 ++ if (n->dev->threaded && n->thread)
5790 ++ new |= NAPIF_STATE_THREADED;
5791 ++ } while (cmpxchg(&n->state, val, new) != val);
5792 + }
5793 + EXPORT_SYMBOL(napi_enable);
5794 +
5795 +diff --git a/net/dsa/dsa2.c b/net/dsa/dsa2.c
5796 +index 79267b00af68f..76ed5ef0e36a8 100644
5797 +--- a/net/dsa/dsa2.c
5798 ++++ b/net/dsa/dsa2.c
5799 +@@ -342,6 +342,7 @@ static int dsa_port_setup(struct dsa_port *dp)
5800 + {
5801 + struct devlink_port *dlp = &dp->devlink_port;
5802 + bool dsa_port_link_registered = false;
5803 ++ struct dsa_switch *ds = dp->ds;
5804 + bool dsa_port_enabled = false;
5805 + int err = 0;
5806 +
5807 +@@ -351,6 +352,12 @@ static int dsa_port_setup(struct dsa_port *dp)
5808 + INIT_LIST_HEAD(&dp->fdbs);
5809 + INIT_LIST_HEAD(&dp->mdbs);
5810 +
5811 ++ if (ds->ops->port_setup) {
5812 ++ err = ds->ops->port_setup(ds, dp->index);
5813 ++ if (err)
5814 ++ return err;
5815 ++ }
5816 ++
5817 + switch (dp->type) {
5818 + case DSA_PORT_TYPE_UNUSED:
5819 + dsa_port_disable(dp);
5820 +@@ -393,8 +400,11 @@ static int dsa_port_setup(struct dsa_port *dp)
5821 + dsa_port_disable(dp);
5822 + if (err && dsa_port_link_registered)
5823 + dsa_port_link_unregister_of(dp);
5824 +- if (err)
5825 ++ if (err) {
5826 ++ if (ds->ops->port_teardown)
5827 ++ ds->ops->port_teardown(ds, dp->index);
5828 + return err;
5829 ++ }
5830 +
5831 + dp->setup = true;
5832 +
5833 +@@ -446,11 +456,15 @@ static int dsa_port_devlink_setup(struct dsa_port *dp)
5834 + static void dsa_port_teardown(struct dsa_port *dp)
5835 + {
5836 + struct devlink_port *dlp = &dp->devlink_port;
5837 ++ struct dsa_switch *ds = dp->ds;
5838 + struct dsa_mac_addr *a, *tmp;
5839 +
5840 + if (!dp->setup)
5841 + return;
5842 +
5843 ++ if (ds->ops->port_teardown)
5844 ++ ds->ops->port_teardown(ds, dp->index);
5845 ++
5846 + devlink_port_type_clear(dlp);
5847 +
5848 + switch (dp->type) {
5849 +@@ -494,6 +508,36 @@ static void dsa_port_devlink_teardown(struct dsa_port *dp)
5850 + dp->devlink_port_setup = false;
5851 + }
5852 +
5853 ++/* Destroy the current devlink port, and create a new one which has the UNUSED
5854 ++ * flavour. At this point, any call to ds->ops->port_setup has been already
5855 ++ * balanced out by a call to ds->ops->port_teardown, so we know that any
5856 ++ * devlink port regions the driver had are now unregistered. We then call its
5857 ++ * ds->ops->port_setup again, in order for the driver to re-create them on the
5858 ++ * new devlink port.
5859 ++ */
5860 ++static int dsa_port_reinit_as_unused(struct dsa_port *dp)
5861 ++{
5862 ++ struct dsa_switch *ds = dp->ds;
5863 ++ int err;
5864 ++
5865 ++ dsa_port_devlink_teardown(dp);
5866 ++ dp->type = DSA_PORT_TYPE_UNUSED;
5867 ++ err = dsa_port_devlink_setup(dp);
5868 ++ if (err)
5869 ++ return err;
5870 ++
5871 ++ if (ds->ops->port_setup) {
5872 ++ /* On error, leave the devlink port registered,
5873 ++ * dsa_switch_teardown will clean it up later.
5874 ++ */
5875 ++ err = ds->ops->port_setup(ds, dp->index);
5876 ++ if (err)
5877 ++ return err;
5878 ++ }
5879 ++
5880 ++ return 0;
5881 ++}
5882 ++
5883 + static int dsa_devlink_info_get(struct devlink *dl,
5884 + struct devlink_info_req *req,
5885 + struct netlink_ext_ack *extack)
5886 +@@ -748,7 +792,7 @@ static int dsa_switch_setup(struct dsa_switch *ds)
5887 + devlink_params_publish(ds->devlink);
5888 +
5889 + if (!ds->slave_mii_bus && ds->ops->phy_read) {
5890 +- ds->slave_mii_bus = devm_mdiobus_alloc(ds->dev);
5891 ++ ds->slave_mii_bus = mdiobus_alloc();
5892 + if (!ds->slave_mii_bus) {
5893 + err = -ENOMEM;
5894 + goto teardown;
5895 +@@ -758,13 +802,16 @@ static int dsa_switch_setup(struct dsa_switch *ds)
5896 +
5897 + err = mdiobus_register(ds->slave_mii_bus);
5898 + if (err < 0)
5899 +- goto teardown;
5900 ++ goto free_slave_mii_bus;
5901 + }
5902 +
5903 + ds->setup = true;
5904 +
5905 + return 0;
5906 +
5907 ++free_slave_mii_bus:
5908 ++ if (ds->slave_mii_bus && ds->ops->phy_read)
5909 ++ mdiobus_free(ds->slave_mii_bus);
5910 + teardown:
5911 + if (ds->ops->teardown)
5912 + ds->ops->teardown(ds);
5913 +@@ -789,8 +836,11 @@ static void dsa_switch_teardown(struct dsa_switch *ds)
5914 + if (!ds->setup)
5915 + return;
5916 +
5917 +- if (ds->slave_mii_bus && ds->ops->phy_read)
5918 ++ if (ds->slave_mii_bus && ds->ops->phy_read) {
5919 + mdiobus_unregister(ds->slave_mii_bus);
5920 ++ mdiobus_free(ds->slave_mii_bus);
5921 ++ ds->slave_mii_bus = NULL;
5922 ++ }
5923 +
5924 + dsa_switch_unregister_notifier(ds);
5925 +
5926 +@@ -850,12 +900,9 @@ static int dsa_tree_setup_switches(struct dsa_switch_tree *dst)
5927 + list_for_each_entry(dp, &dst->ports, list) {
5928 + err = dsa_port_setup(dp);
5929 + if (err) {
5930 +- dsa_port_devlink_teardown(dp);
5931 +- dp->type = DSA_PORT_TYPE_UNUSED;
5932 +- err = dsa_port_devlink_setup(dp);
5933 ++ err = dsa_port_reinit_as_unused(dp);
5934 + if (err)
5935 + goto teardown;
5936 +- continue;
5937 + }
5938 + }
5939 +
5940 +@@ -960,6 +1007,7 @@ static int dsa_tree_setup(struct dsa_switch_tree *dst)
5941 + teardown_master:
5942 + dsa_tree_teardown_master(dst);
5943 + teardown_switches:
5944 ++ dsa_tree_teardown_ports(dst);
5945 + dsa_tree_teardown_switches(dst);
5946 + teardown_default_cpu:
5947 + dsa_tree_teardown_default_cpu(dst);
5948 +diff --git a/net/ipv4/nexthop.c b/net/ipv4/nexthop.c
5949 +index 75ca4b6e484f4..9e8100728d464 100644
5950 +--- a/net/ipv4/nexthop.c
5951 ++++ b/net/ipv4/nexthop.c
5952 +@@ -1982,6 +1982,8 @@ static int replace_nexthop_grp(struct net *net, struct nexthop *old,
5953 + rcu_assign_pointer(old->nh_grp, newg);
5954 +
5955 + if (newg->resilient) {
5956 ++ /* Make sure concurrent readers are not using 'oldg' anymore. */
5957 ++ synchronize_net();
5958 + rcu_assign_pointer(oldg->res_table, tmp_table);
5959 + rcu_assign_pointer(oldg->spare->res_table, tmp_table);
5960 + }
5961 +@@ -3565,6 +3567,7 @@ static struct notifier_block nh_netdev_notifier = {
5962 + };
5963 +
5964 + static int nexthops_dump(struct net *net, struct notifier_block *nb,
5965 ++ enum nexthop_event_type event_type,
5966 + struct netlink_ext_ack *extack)
5967 + {
5968 + struct rb_root *root = &net->nexthop.rb_root;
5969 +@@ -3575,8 +3578,7 @@ static int nexthops_dump(struct net *net, struct notifier_block *nb,
5970 + struct nexthop *nh;
5971 +
5972 + nh = rb_entry(node, struct nexthop, rb_node);
5973 +- err = call_nexthop_notifier(nb, net, NEXTHOP_EVENT_REPLACE, nh,
5974 +- extack);
5975 ++ err = call_nexthop_notifier(nb, net, event_type, nh, extack);
5976 + if (err)
5977 + break;
5978 + }
5979 +@@ -3590,7 +3592,7 @@ int register_nexthop_notifier(struct net *net, struct notifier_block *nb,
5980 + int err;
5981 +
5982 + rtnl_lock();
5983 +- err = nexthops_dump(net, nb, extack);
5984 ++ err = nexthops_dump(net, nb, NEXTHOP_EVENT_REPLACE, extack);
5985 + if (err)
5986 + goto unlock;
5987 + err = blocking_notifier_chain_register(&net->nexthop.notifier_chain,
5988 +@@ -3603,8 +3605,17 @@ EXPORT_SYMBOL(register_nexthop_notifier);
5989 +
5990 + int unregister_nexthop_notifier(struct net *net, struct notifier_block *nb)
5991 + {
5992 +- return blocking_notifier_chain_unregister(&net->nexthop.notifier_chain,
5993 +- nb);
5994 ++ int err;
5995 ++
5996 ++ rtnl_lock();
5997 ++ err = blocking_notifier_chain_unregister(&net->nexthop.notifier_chain,
5998 ++ nb);
5999 ++ if (err)
6000 ++ goto unlock;
6001 ++ nexthops_dump(net, nb, NEXTHOP_EVENT_DEL, NULL);
6002 ++unlock:
6003 ++ rtnl_unlock();
6004 ++ return err;
6005 + }
6006 + EXPORT_SYMBOL(unregister_nexthop_notifier);
6007 +
6008 +diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
6009 +index ef75c9b05f17e..68e94e9f5089a 100644
6010 +--- a/net/ipv6/ip6_fib.c
6011 ++++ b/net/ipv6/ip6_fib.c
6012 +@@ -1378,7 +1378,6 @@ int fib6_add(struct fib6_node *root, struct fib6_info *rt,
6013 + int err = -ENOMEM;
6014 + int allow_create = 1;
6015 + int replace_required = 0;
6016 +- int sernum = fib6_new_sernum(info->nl_net);
6017 +
6018 + if (info->nlh) {
6019 + if (!(info->nlh->nlmsg_flags & NLM_F_CREATE))
6020 +@@ -1478,7 +1477,7 @@ int fib6_add(struct fib6_node *root, struct fib6_info *rt,
6021 + if (!err) {
6022 + if (rt->nh)
6023 + list_add(&rt->nh_list, &rt->nh->f6i_list);
6024 +- __fib6_update_sernum_upto_root(rt, sernum);
6025 ++ __fib6_update_sernum_upto_root(rt, fib6_new_sernum(info->nl_net));
6026 + fib6_start_gc(info->nl_net, rt);
6027 + }
6028 +
6029 +diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c
6030 +index acbead7cf50f0..4d2abdd3cd3b1 100644
6031 +--- a/net/mptcp/protocol.c
6032 ++++ b/net/mptcp/protocol.c
6033 +@@ -1291,7 +1291,7 @@ static int mptcp_sendmsg_frag(struct sock *sk, struct sock *ssk,
6034 + goto alloc_skb;
6035 + }
6036 +
6037 +- must_collapse = (info->size_goal - skb->len > 0) &&
6038 ++ must_collapse = (info->size_goal > skb->len) &&
6039 + (skb_shinfo(skb)->nr_frags < sysctl_max_skb_frags);
6040 + if (must_collapse) {
6041 + size_bias = skb->len;
6042 +@@ -1300,7 +1300,7 @@ static int mptcp_sendmsg_frag(struct sock *sk, struct sock *ssk,
6043 + }
6044 +
6045 + alloc_skb:
6046 +- if (!must_collapse && !ssk->sk_tx_skb_cache &&
6047 ++ if (!must_collapse &&
6048 + !mptcp_alloc_tx_skb(sk, ssk, info->data_lock_held))
6049 + return 0;
6050 +
6051 +diff --git a/net/smc/smc_clc.c b/net/smc/smc_clc.c
6052 +index e286dafd6e886..6ec1ebe878ae0 100644
6053 +--- a/net/smc/smc_clc.c
6054 ++++ b/net/smc/smc_clc.c
6055 +@@ -230,7 +230,8 @@ static int smc_clc_prfx_set(struct socket *clcsock,
6056 + goto out_rel;
6057 + }
6058 + /* get address to which the internal TCP socket is bound */
6059 +- kernel_getsockname(clcsock, (struct sockaddr *)&addrs);
6060 ++ if (kernel_getsockname(clcsock, (struct sockaddr *)&addrs) < 0)
6061 ++ goto out_rel;
6062 + /* analyze IP specific data of net_device belonging to TCP socket */
6063 + addr6 = (struct sockaddr_in6 *)&addrs;
6064 + rcu_read_lock();
6065 +diff --git a/net/smc/smc_core.c b/net/smc/smc_core.c
6066 +index c160ff50c053a..116cfd6fac1ff 100644
6067 +--- a/net/smc/smc_core.c
6068 ++++ b/net/smc/smc_core.c
6069 +@@ -1474,7 +1474,9 @@ static void smc_conn_abort_work(struct work_struct *work)
6070 + abort_work);
6071 + struct smc_sock *smc = container_of(conn, struct smc_sock, conn);
6072 +
6073 ++ lock_sock(&smc->sk);
6074 + smc_conn_kill(conn, true);
6075 ++ release_sock(&smc->sk);
6076 + sock_put(&smc->sk); /* sock_hold done by schedulers of abort_work */
6077 + }
6078 +
6079 +diff --git a/tools/lib/perf/evsel.c b/tools/lib/perf/evsel.c
6080 +index d8886720e83d8..8441e3e1aaac3 100644
6081 +--- a/tools/lib/perf/evsel.c
6082 ++++ b/tools/lib/perf/evsel.c
6083 +@@ -43,7 +43,7 @@ void perf_evsel__delete(struct perf_evsel *evsel)
6084 + free(evsel);
6085 + }
6086 +
6087 +-#define FD(e, x, y) (*(int *) xyarray__entry(e->fd, x, y))
6088 ++#define FD(e, x, y) ((int *) xyarray__entry(e->fd, x, y))
6089 + #define MMAP(e, x, y) (e->mmap ? ((struct perf_mmap *) xyarray__entry(e->mmap, x, y)) : NULL)
6090 +
6091 + int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads)
6092 +@@ -54,7 +54,10 @@ int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads)
6093 + int cpu, thread;
6094 + for (cpu = 0; cpu < ncpus; cpu++) {
6095 + for (thread = 0; thread < nthreads; thread++) {
6096 +- FD(evsel, cpu, thread) = -1;
6097 ++ int *fd = FD(evsel, cpu, thread);
6098 ++
6099 ++ if (fd)
6100 ++ *fd = -1;
6101 + }
6102 + }
6103 + }
6104 +@@ -80,7 +83,7 @@ sys_perf_event_open(struct perf_event_attr *attr,
6105 + static int get_group_fd(struct perf_evsel *evsel, int cpu, int thread, int *group_fd)
6106 + {
6107 + struct perf_evsel *leader = evsel->leader;
6108 +- int fd;
6109 ++ int *fd;
6110 +
6111 + if (evsel == leader) {
6112 + *group_fd = -1;
6113 +@@ -95,10 +98,10 @@ static int get_group_fd(struct perf_evsel *evsel, int cpu, int thread, int *grou
6114 + return -ENOTCONN;
6115 +
6116 + fd = FD(leader, cpu, thread);
6117 +- if (fd == -1)
6118 ++ if (fd == NULL || *fd == -1)
6119 + return -EBADF;
6120 +
6121 +- *group_fd = fd;
6122 ++ *group_fd = *fd;
6123 +
6124 + return 0;
6125 + }
6126 +@@ -138,7 +141,11 @@ int perf_evsel__open(struct perf_evsel *evsel, struct perf_cpu_map *cpus,
6127 +
6128 + for (cpu = 0; cpu < cpus->nr; cpu++) {
6129 + for (thread = 0; thread < threads->nr; thread++) {
6130 +- int fd, group_fd;
6131 ++ int fd, group_fd, *evsel_fd;
6132 ++
6133 ++ evsel_fd = FD(evsel, cpu, thread);
6134 ++ if (evsel_fd == NULL)
6135 ++ return -EINVAL;
6136 +
6137 + err = get_group_fd(evsel, cpu, thread, &group_fd);
6138 + if (err < 0)
6139 +@@ -151,7 +158,7 @@ int perf_evsel__open(struct perf_evsel *evsel, struct perf_cpu_map *cpus,
6140 + if (fd < 0)
6141 + return -errno;
6142 +
6143 +- FD(evsel, cpu, thread) = fd;
6144 ++ *evsel_fd = fd;
6145 + }
6146 + }
6147 +
6148 +@@ -163,9 +170,12 @@ static void perf_evsel__close_fd_cpu(struct perf_evsel *evsel, int cpu)
6149 + int thread;
6150 +
6151 + for (thread = 0; thread < xyarray__max_y(evsel->fd); ++thread) {
6152 +- if (FD(evsel, cpu, thread) >= 0)
6153 +- close(FD(evsel, cpu, thread));
6154 +- FD(evsel, cpu, thread) = -1;
6155 ++ int *fd = FD(evsel, cpu, thread);
6156 ++
6157 ++ if (fd && *fd >= 0) {
6158 ++ close(*fd);
6159 ++ *fd = -1;
6160 ++ }
6161 + }
6162 + }
6163 +
6164 +@@ -209,13 +219,12 @@ void perf_evsel__munmap(struct perf_evsel *evsel)
6165 +
6166 + for (cpu = 0; cpu < xyarray__max_x(evsel->fd); cpu++) {
6167 + for (thread = 0; thread < xyarray__max_y(evsel->fd); thread++) {
6168 +- int fd = FD(evsel, cpu, thread);
6169 +- struct perf_mmap *map = MMAP(evsel, cpu, thread);
6170 ++ int *fd = FD(evsel, cpu, thread);
6171 +
6172 +- if (fd < 0)
6173 ++ if (fd == NULL || *fd < 0)
6174 + continue;
6175 +
6176 +- perf_mmap__munmap(map);
6177 ++ perf_mmap__munmap(MMAP(evsel, cpu, thread));
6178 + }
6179 + }
6180 +
6181 +@@ -239,15 +248,16 @@ int perf_evsel__mmap(struct perf_evsel *evsel, int pages)
6182 +
6183 + for (cpu = 0; cpu < xyarray__max_x(evsel->fd); cpu++) {
6184 + for (thread = 0; thread < xyarray__max_y(evsel->fd); thread++) {
6185 +- int fd = FD(evsel, cpu, thread);
6186 +- struct perf_mmap *map = MMAP(evsel, cpu, thread);
6187 ++ int *fd = FD(evsel, cpu, thread);
6188 ++ struct perf_mmap *map;
6189 +
6190 +- if (fd < 0)
6191 ++ if (fd == NULL || *fd < 0)
6192 + continue;
6193 +
6194 ++ map = MMAP(evsel, cpu, thread);
6195 + perf_mmap__init(map, NULL, false, NULL);
6196 +
6197 +- ret = perf_mmap__mmap(map, &mp, fd, cpu);
6198 ++ ret = perf_mmap__mmap(map, &mp, *fd, cpu);
6199 + if (ret) {
6200 + perf_evsel__munmap(evsel);
6201 + return ret;
6202 +@@ -260,7 +270,9 @@ int perf_evsel__mmap(struct perf_evsel *evsel, int pages)
6203 +
6204 + void *perf_evsel__mmap_base(struct perf_evsel *evsel, int cpu, int thread)
6205 + {
6206 +- if (FD(evsel, cpu, thread) < 0 || MMAP(evsel, cpu, thread) == NULL)
6207 ++ int *fd = FD(evsel, cpu, thread);
6208 ++
6209 ++ if (fd == NULL || *fd < 0 || MMAP(evsel, cpu, thread) == NULL)
6210 + return NULL;
6211 +
6212 + return MMAP(evsel, cpu, thread)->base;
6213 +@@ -295,17 +307,18 @@ int perf_evsel__read(struct perf_evsel *evsel, int cpu, int thread,
6214 + struct perf_counts_values *count)
6215 + {
6216 + size_t size = perf_evsel__read_size(evsel);
6217 ++ int *fd = FD(evsel, cpu, thread);
6218 +
6219 + memset(count, 0, sizeof(*count));
6220 +
6221 +- if (FD(evsel, cpu, thread) < 0)
6222 ++ if (fd == NULL || *fd < 0)
6223 + return -EINVAL;
6224 +
6225 + if (MMAP(evsel, cpu, thread) &&
6226 + !perf_mmap__read_self(MMAP(evsel, cpu, thread), count))
6227 + return 0;
6228 +
6229 +- if (readn(FD(evsel, cpu, thread), count->values, size) <= 0)
6230 ++ if (readn(*fd, count->values, size) <= 0)
6231 + return -errno;
6232 +
6233 + return 0;
6234 +@@ -318,8 +331,13 @@ static int perf_evsel__run_ioctl(struct perf_evsel *evsel,
6235 + int thread;
6236 +
6237 + for (thread = 0; thread < xyarray__max_y(evsel->fd); thread++) {
6238 +- int fd = FD(evsel, cpu, thread),
6239 +- err = ioctl(fd, ioc, arg);
6240 ++ int err;
6241 ++ int *fd = FD(evsel, cpu, thread);
6242 ++
6243 ++ if (fd == NULL || *fd < 0)
6244 ++ return -1;
6245 ++
6246 ++ err = ioctl(*fd, ioc, arg);
6247 +
6248 + if (err)
6249 + return err;
6250 +diff --git a/tools/testing/selftests/arm64/signal/test_signals.h b/tools/testing/selftests/arm64/signal/test_signals.h
6251 +index f96baf1cef1a9..ebe8694dbef0f 100644
6252 +--- a/tools/testing/selftests/arm64/signal/test_signals.h
6253 ++++ b/tools/testing/selftests/arm64/signal/test_signals.h
6254 +@@ -33,10 +33,12 @@
6255 + */
6256 + enum {
6257 + FSSBS_BIT,
6258 ++ FSVE_BIT,
6259 + FMAX_END
6260 + };
6261 +
6262 + #define FEAT_SSBS (1UL << FSSBS_BIT)
6263 ++#define FEAT_SVE (1UL << FSVE_BIT)
6264 +
6265 + /*
6266 + * A descriptor used to describe and configure a test case.
6267 +diff --git a/tools/testing/selftests/arm64/signal/test_signals_utils.c b/tools/testing/selftests/arm64/signal/test_signals_utils.c
6268 +index 2de6e5ed5e258..22722abc9dfa9 100644
6269 +--- a/tools/testing/selftests/arm64/signal/test_signals_utils.c
6270 ++++ b/tools/testing/selftests/arm64/signal/test_signals_utils.c
6271 +@@ -26,6 +26,7 @@ static int sig_copyctx = SIGTRAP;
6272 +
6273 + static char const *const feats_names[FMAX_END] = {
6274 + " SSBS ",
6275 ++ " SVE ",
6276 + };
6277 +
6278 + #define MAX_FEATS_SZ 128
6279 +@@ -263,16 +264,21 @@ int test_init(struct tdescr *td)
6280 + */
6281 + if (getauxval(AT_HWCAP) & HWCAP_SSBS)
6282 + td->feats_supported |= FEAT_SSBS;
6283 +- if (feats_ok(td))
6284 ++ if (getauxval(AT_HWCAP) & HWCAP_SVE)
6285 ++ td->feats_supported |= FEAT_SVE;
6286 ++ if (feats_ok(td)) {
6287 + fprintf(stderr,
6288 + "Required Features: [%s] supported\n",
6289 + feats_to_string(td->feats_required &
6290 + td->feats_supported));
6291 +- else
6292 ++ } else {
6293 + fprintf(stderr,
6294 + "Required Features: [%s] NOT supported\n",
6295 + feats_to_string(td->feats_required &
6296 + ~td->feats_supported));
6297 ++ td->result = KSFT_SKIP;
6298 ++ return 0;
6299 ++ }
6300 + }
6301 +
6302 + /* Perform test specific additional initialization */