Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:5.1 commit in: /
Date: Fri, 31 May 2019 14:04:26
Message-Id: 1559311442.7b28b2e87d40d965b55c189c5dceb90e6b9d31d2.mpagano@gentoo
1 commit: 7b28b2e87d40d965b55c189c5dceb90e6b9d31d2
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Fri May 31 14:04:02 2019 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Fri May 31 14:04:02 2019 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=7b28b2e8
7
8 Linux patch 5.1.6
9
10 Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
11
12 0000_README | 4 +
13 1005_linux-5.1.6.patch | 14203 +++++++++++++++++++++++++++++++++++++++++++++++
14 2 files changed, 14207 insertions(+)
15
16 diff --git a/0000_README b/0000_README
17 index 2431699..7713f53 100644
18 --- a/0000_README
19 +++ b/0000_README
20 @@ -63,6 +63,10 @@ Patch: 1004_linux-5.1.5.patch
21 From: http://www.kernel.org
22 Desc: Linux 5.1.5
23
24 +Patch: 1005_linux-5.1.6.patch
25 +From: http://www.kernel.org
26 +Desc: Linux 5.1.6
27 +
28 Patch: 1500_XATTR_USER_PREFIX.patch
29 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
30 Desc: Support for namespace user.pax.* on tmpfs.
31
32 diff --git a/1005_linux-5.1.6.patch b/1005_linux-5.1.6.patch
33 new file mode 100644
34 index 0000000..897ab6d
35 --- /dev/null
36 +++ b/1005_linux-5.1.6.patch
37 @@ -0,0 +1,14203 @@
38 +diff --git a/Documentation/arm64/silicon-errata.txt b/Documentation/arm64/silicon-errata.txt
39 +index d1e2bb801e1b..6e97a3f771ef 100644
40 +--- a/Documentation/arm64/silicon-errata.txt
41 ++++ b/Documentation/arm64/silicon-errata.txt
42 +@@ -61,6 +61,7 @@ stable kernels.
43 + | ARM | Cortex-A76 | #1188873 | ARM64_ERRATUM_1188873 |
44 + | ARM | Cortex-A76 | #1165522 | ARM64_ERRATUM_1165522 |
45 + | ARM | Cortex-A76 | #1286807 | ARM64_ERRATUM_1286807 |
46 ++| ARM | Cortex-A76 | #1463225 | ARM64_ERRATUM_1463225 |
47 + | ARM | MMU-500 | #841119,#826419 | N/A |
48 + | | | | |
49 + | Cavium | ThunderX ITS | #22375, #24313 | CAVIUM_ERRATUM_22375 |
50 +diff --git a/Documentation/devicetree/bindings/phy/qcom-qmp-phy.txt b/Documentation/devicetree/bindings/phy/qcom-qmp-phy.txt
51 +index 5d181fc3cc18..4a78ba8b85bc 100644
52 +--- a/Documentation/devicetree/bindings/phy/qcom-qmp-phy.txt
53 ++++ b/Documentation/devicetree/bindings/phy/qcom-qmp-phy.txt
54 +@@ -59,7 +59,8 @@ Required properties:
55 + one for each entry in reset-names.
56 + - reset-names: "phy" for reset of phy block,
57 + "common" for phy common block reset,
58 +- "cfg" for phy's ahb cfg block reset.
59 ++ "cfg" for phy's ahb cfg block reset,
60 ++ "ufsphy" for the PHY reset in the UFS controller.
61 +
62 + For "qcom,ipq8074-qmp-pcie-phy" must contain:
63 + "phy", "common".
64 +@@ -74,7 +75,8 @@ Required properties:
65 + "phy", "common".
66 + For "qcom,sdm845-qmp-usb3-uni-phy" must contain:
67 + "phy", "common".
68 +- For "qcom,sdm845-qmp-ufs-phy": no resets are listed.
69 ++ For "qcom,sdm845-qmp-ufs-phy": must contain:
70 ++ "ufsphy".
71 +
72 + - vdda-phy-supply: Phandle to a regulator supply to PHY core block.
73 + - vdda-pll-supply: Phandle to 1.8V regulator supply to PHY refclk pll block.
74 +diff --git a/Makefile b/Makefile
75 +index 24a16a544ffd..d8bdd2bb55dc 100644
76 +--- a/Makefile
77 ++++ b/Makefile
78 +@@ -1,7 +1,7 @@
79 + # SPDX-License-Identifier: GPL-2.0
80 + VERSION = 5
81 + PATCHLEVEL = 1
82 +-SUBLEVEL = 5
83 ++SUBLEVEL = 6
84 + EXTRAVERSION =
85 + NAME = Shy Crocodile
86 +
87 +diff --git a/arch/arm/include/asm/cp15.h b/arch/arm/include/asm/cp15.h
88 +index 07e27f212dc7..d2453e2d3f1f 100644
89 +--- a/arch/arm/include/asm/cp15.h
90 ++++ b/arch/arm/include/asm/cp15.h
91 +@@ -68,6 +68,8 @@
92 + #define BPIALL __ACCESS_CP15(c7, 0, c5, 6)
93 + #define ICIALLU __ACCESS_CP15(c7, 0, c5, 0)
94 +
95 ++#define CNTVCT __ACCESS_CP15_64(1, c14)
96 ++
97 + extern unsigned long cr_alignment; /* defined in entry-armv.S */
98 +
99 + static inline unsigned long get_cr(void)
100 +diff --git a/arch/arm/vdso/vgettimeofday.c b/arch/arm/vdso/vgettimeofday.c
101 +index a9dd619c6c29..7bdbf5d5c47d 100644
102 +--- a/arch/arm/vdso/vgettimeofday.c
103 ++++ b/arch/arm/vdso/vgettimeofday.c
104 +@@ -18,9 +18,9 @@
105 + #include <linux/compiler.h>
106 + #include <linux/hrtimer.h>
107 + #include <linux/time.h>
108 +-#include <asm/arch_timer.h>
109 + #include <asm/barrier.h>
110 + #include <asm/bug.h>
111 ++#include <asm/cp15.h>
112 + #include <asm/page.h>
113 + #include <asm/unistd.h>
114 + #include <asm/vdso_datapage.h>
115 +@@ -123,7 +123,8 @@ static notrace u64 get_ns(struct vdso_data *vdata)
116 + u64 cycle_now;
117 + u64 nsec;
118 +
119 +- cycle_now = arch_counter_get_cntvct();
120 ++ isb();
121 ++ cycle_now = read_sysreg(CNTVCT);
122 +
123 + cycle_delta = (cycle_now - vdata->cs_cycle_last) & vdata->cs_mask;
124 +
125 +diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
126 +index 7e34b9eba5de..d218729ec852 100644
127 +--- a/arch/arm64/Kconfig
128 ++++ b/arch/arm64/Kconfig
129 +@@ -517,6 +517,24 @@ config ARM64_ERRATUM_1286807
130 +
131 + If unsure, say Y.
132 +
133 ++config ARM64_ERRATUM_1463225
134 ++ bool "Cortex-A76: Software Step might prevent interrupt recognition"
135 ++ default y
136 ++ help
137 ++ This option adds a workaround for Arm Cortex-A76 erratum 1463225.
138 ++
139 ++ On the affected Cortex-A76 cores (r0p0 to r3p1), software stepping
140 ++ of a system call instruction (SVC) can prevent recognition of
141 ++ subsequent interrupts when software stepping is disabled in the
142 ++ exception handler of the system call and either kernel debugging
143 ++ is enabled or VHE is in use.
144 ++
145 ++ Work around the erratum by triggering a dummy step exception
146 ++ when handling a system call from a task that is being stepped
147 ++ in a VHE configuration of the kernel.
148 ++
149 ++ If unsure, say Y.
150 ++
151 + config CAVIUM_ERRATUM_22375
152 + bool "Cavium erratum 22375, 24313"
153 + default y
154 +@@ -1347,6 +1365,7 @@ config ARM64_MODULE_PLTS
155 +
156 + config ARM64_PSEUDO_NMI
157 + bool "Support for NMI-like interrupts"
158 ++ depends on BROKEN # 1556553607-46531-1-git-send-email-julien.thierry@×××.com
159 + select CONFIG_ARM_GIC_V3
160 + help
161 + Adds support for mimicking Non-Maskable Interrupts through the use of
162 +diff --git a/arch/arm64/include/asm/cpucaps.h b/arch/arm64/include/asm/cpucaps.h
163 +index f6a76e43f39e..4389d5d0ca0f 100644
164 +--- a/arch/arm64/include/asm/cpucaps.h
165 ++++ b/arch/arm64/include/asm/cpucaps.h
166 +@@ -61,7 +61,8 @@
167 + #define ARM64_HAS_GENERIC_AUTH_ARCH 40
168 + #define ARM64_HAS_GENERIC_AUTH_IMP_DEF 41
169 + #define ARM64_HAS_IRQ_PRIO_MASKING 42
170 ++#define ARM64_WORKAROUND_1463225 43
171 +
172 +-#define ARM64_NCAPS 43
173 ++#define ARM64_NCAPS 44
174 +
175 + #endif /* __ASM_CPUCAPS_H */
176 +diff --git a/arch/arm64/include/asm/futex.h b/arch/arm64/include/asm/futex.h
177 +index 6fb2214333a2..2d78ea6932b7 100644
178 +--- a/arch/arm64/include/asm/futex.h
179 ++++ b/arch/arm64/include/asm/futex.h
180 +@@ -58,7 +58,7 @@ do { \
181 + static inline int
182 + arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *_uaddr)
183 + {
184 +- int oldval = 0, ret, tmp;
185 ++ int oldval, ret, tmp;
186 + u32 __user *uaddr = __uaccess_mask_ptr(_uaddr);
187 +
188 + pagefault_disable();
189 +diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
190 +index de70c1eabf33..74ebe9693714 100644
191 +--- a/arch/arm64/include/asm/pgtable.h
192 ++++ b/arch/arm64/include/asm/pgtable.h
193 +@@ -478,6 +478,8 @@ static inline phys_addr_t pmd_page_paddr(pmd_t pmd)
194 + return __pmd_to_phys(pmd);
195 + }
196 +
197 ++static inline void pte_unmap(pte_t *pte) { }
198 ++
199 + /* Find an entry in the third-level page table. */
200 + #define pte_index(addr) (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
201 +
202 +@@ -486,7 +488,6 @@ static inline phys_addr_t pmd_page_paddr(pmd_t pmd)
203 +
204 + #define pte_offset_map(dir,addr) pte_offset_kernel((dir), (addr))
205 + #define pte_offset_map_nested(dir,addr) pte_offset_kernel((dir), (addr))
206 +-#define pte_unmap(pte) do { } while (0)
207 + #define pte_unmap_nested(pte) do { } while (0)
208 +
209 + #define pte_set_fixmap(addr) ((pte_t *)set_fixmap_offset(FIX_PTE, addr))
210 +diff --git a/arch/arm64/include/asm/vdso_datapage.h b/arch/arm64/include/asm/vdso_datapage.h
211 +index 2b9a63771eda..f89263c8e11a 100644
212 +--- a/arch/arm64/include/asm/vdso_datapage.h
213 ++++ b/arch/arm64/include/asm/vdso_datapage.h
214 +@@ -38,6 +38,7 @@ struct vdso_data {
215 + __u32 tz_minuteswest; /* Whacky timezone stuff */
216 + __u32 tz_dsttime;
217 + __u32 use_syscall;
218 ++ __u32 hrtimer_res;
219 + };
220 +
221 + #endif /* !__ASSEMBLY__ */
222 +diff --git a/arch/arm64/kernel/asm-offsets.c b/arch/arm64/kernel/asm-offsets.c
223 +index 7f40dcbdd51d..e10e2a5d9ddc 100644
224 +--- a/arch/arm64/kernel/asm-offsets.c
225 ++++ b/arch/arm64/kernel/asm-offsets.c
226 +@@ -94,7 +94,7 @@ int main(void)
227 + DEFINE(CLOCK_REALTIME, CLOCK_REALTIME);
228 + DEFINE(CLOCK_MONOTONIC, CLOCK_MONOTONIC);
229 + DEFINE(CLOCK_MONOTONIC_RAW, CLOCK_MONOTONIC_RAW);
230 +- DEFINE(CLOCK_REALTIME_RES, MONOTONIC_RES_NSEC);
231 ++ DEFINE(CLOCK_REALTIME_RES, offsetof(struct vdso_data, hrtimer_res));
232 + DEFINE(CLOCK_REALTIME_COARSE, CLOCK_REALTIME_COARSE);
233 + DEFINE(CLOCK_MONOTONIC_COARSE,CLOCK_MONOTONIC_COARSE);
234 + DEFINE(CLOCK_COARSE_RES, LOW_RES_NSEC);
235 +diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c
236 +index 9950bb0cbd52..87019cd73f22 100644
237 +--- a/arch/arm64/kernel/cpu_errata.c
238 ++++ b/arch/arm64/kernel/cpu_errata.c
239 +@@ -464,6 +464,22 @@ out_printmsg:
240 + }
241 + #endif /* CONFIG_ARM64_SSBD */
242 +
243 ++#ifdef CONFIG_ARM64_ERRATUM_1463225
244 ++DEFINE_PER_CPU(int, __in_cortex_a76_erratum_1463225_wa);
245 ++
246 ++static bool
247 ++has_cortex_a76_erratum_1463225(const struct arm64_cpu_capabilities *entry,
248 ++ int scope)
249 ++{
250 ++ u32 midr = read_cpuid_id();
251 ++ /* Cortex-A76 r0p0 - r3p1 */
252 ++ struct midr_range range = MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 3, 1);
253 ++
254 ++ WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
255 ++ return is_midr_in_range(midr, &range) && is_kernel_in_hyp_mode();
256 ++}
257 ++#endif
258 ++
259 + static void __maybe_unused
260 + cpu_enable_cache_maint_trap(const struct arm64_cpu_capabilities *__unused)
261 + {
262 +@@ -738,6 +754,14 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
263 + .capability = ARM64_WORKAROUND_1165522,
264 + ERRATA_MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 2, 0),
265 + },
266 ++#endif
267 ++#ifdef CONFIG_ARM64_ERRATUM_1463225
268 ++ {
269 ++ .desc = "ARM erratum 1463225",
270 ++ .capability = ARM64_WORKAROUND_1463225,
271 ++ .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
272 ++ .matches = has_cortex_a76_erratum_1463225,
273 ++ },
274 + #endif
275 + {
276 + }
277 +diff --git a/arch/arm64/kernel/cpu_ops.c b/arch/arm64/kernel/cpu_ops.c
278 +index ea001241bdd4..00f8b8612b69 100644
279 +--- a/arch/arm64/kernel/cpu_ops.c
280 ++++ b/arch/arm64/kernel/cpu_ops.c
281 +@@ -85,6 +85,7 @@ static const char *__init cpu_read_enable_method(int cpu)
282 + pr_err("%pOF: missing enable-method property\n",
283 + dn);
284 + }
285 ++ of_node_put(dn);
286 + } else {
287 + enable_method = acpi_get_enable_method(cpu);
288 + if (!enable_method) {
289 +diff --git a/arch/arm64/kernel/kaslr.c b/arch/arm64/kernel/kaslr.c
290 +index b09b6f75f759..06941c1fe418 100644
291 +--- a/arch/arm64/kernel/kaslr.c
292 ++++ b/arch/arm64/kernel/kaslr.c
293 +@@ -145,15 +145,15 @@ u64 __init kaslr_early_init(u64 dt_phys)
294 +
295 + if (IS_ENABLED(CONFIG_RANDOMIZE_MODULE_REGION_FULL)) {
296 + /*
297 +- * Randomize the module region over a 4 GB window covering the
298 ++ * Randomize the module region over a 2 GB window covering the
299 + * kernel. This reduces the risk of modules leaking information
300 + * about the address of the kernel itself, but results in
301 + * branches between modules and the core kernel that are
302 + * resolved via PLTs. (Branches between modules will be
303 + * resolved normally.)
304 + */
305 +- module_range = SZ_4G - (u64)(_end - _stext);
306 +- module_alloc_base = max((u64)_end + offset - SZ_4G,
307 ++ module_range = SZ_2G - (u64)(_end - _stext);
308 ++ module_alloc_base = max((u64)_end + offset - SZ_2G,
309 + (u64)MODULES_VADDR);
310 + } else {
311 + /*
312 +diff --git a/arch/arm64/kernel/module.c b/arch/arm64/kernel/module.c
313 +index f713e2fc4d75..1e418e69b58c 100644
314 +--- a/arch/arm64/kernel/module.c
315 ++++ b/arch/arm64/kernel/module.c
316 +@@ -56,7 +56,7 @@ void *module_alloc(unsigned long size)
317 + * can simply omit this fallback in that case.
318 + */
319 + p = __vmalloc_node_range(size, MODULE_ALIGN, module_alloc_base,
320 +- module_alloc_base + SZ_4G, GFP_KERNEL,
321 ++ module_alloc_base + SZ_2G, GFP_KERNEL,
322 + PAGE_KERNEL_EXEC, 0, NUMA_NO_NODE,
323 + __builtin_return_address(0));
324 +
325 +diff --git a/arch/arm64/kernel/syscall.c b/arch/arm64/kernel/syscall.c
326 +index 5610ac01c1ec..871c739f060a 100644
327 +--- a/arch/arm64/kernel/syscall.c
328 ++++ b/arch/arm64/kernel/syscall.c
329 +@@ -8,6 +8,7 @@
330 + #include <linux/syscalls.h>
331 +
332 + #include <asm/daifflags.h>
333 ++#include <asm/debug-monitors.h>
334 + #include <asm/fpsimd.h>
335 + #include <asm/syscall.h>
336 + #include <asm/thread_info.h>
337 +@@ -60,6 +61,35 @@ static inline bool has_syscall_work(unsigned long flags)
338 + int syscall_trace_enter(struct pt_regs *regs);
339 + void syscall_trace_exit(struct pt_regs *regs);
340 +
341 ++#ifdef CONFIG_ARM64_ERRATUM_1463225
342 ++DECLARE_PER_CPU(int, __in_cortex_a76_erratum_1463225_wa);
343 ++
344 ++static void cortex_a76_erratum_1463225_svc_handler(void)
345 ++{
346 ++ u32 reg, val;
347 ++
348 ++ if (!unlikely(test_thread_flag(TIF_SINGLESTEP)))
349 ++ return;
350 ++
351 ++ if (!unlikely(this_cpu_has_cap(ARM64_WORKAROUND_1463225)))
352 ++ return;
353 ++
354 ++ __this_cpu_write(__in_cortex_a76_erratum_1463225_wa, 1);
355 ++ reg = read_sysreg(mdscr_el1);
356 ++ val = reg | DBG_MDSCR_SS | DBG_MDSCR_KDE;
357 ++ write_sysreg(val, mdscr_el1);
358 ++ asm volatile("msr daifclr, #8");
359 ++ isb();
360 ++
361 ++ /* We will have taken a single-step exception by this point */
362 ++
363 ++ write_sysreg(reg, mdscr_el1);
364 ++ __this_cpu_write(__in_cortex_a76_erratum_1463225_wa, 0);
365 ++}
366 ++#else
367 ++static void cortex_a76_erratum_1463225_svc_handler(void) { }
368 ++#endif /* CONFIG_ARM64_ERRATUM_1463225 */
369 ++
370 + static void el0_svc_common(struct pt_regs *regs, int scno, int sc_nr,
371 + const syscall_fn_t syscall_table[])
372 + {
373 +@@ -68,6 +98,7 @@ static void el0_svc_common(struct pt_regs *regs, int scno, int sc_nr,
374 + regs->orig_x0 = regs->regs[0];
375 + regs->syscallno = scno;
376 +
377 ++ cortex_a76_erratum_1463225_svc_handler();
378 + local_daif_restore(DAIF_PROCCTX);
379 + user_exit();
380 +
381 +diff --git a/arch/arm64/kernel/vdso.c b/arch/arm64/kernel/vdso.c
382 +index 2d419006ad43..ec0bb588d755 100644
383 +--- a/arch/arm64/kernel/vdso.c
384 ++++ b/arch/arm64/kernel/vdso.c
385 +@@ -232,6 +232,9 @@ void update_vsyscall(struct timekeeper *tk)
386 + vdso_data->wtm_clock_sec = tk->wall_to_monotonic.tv_sec;
387 + vdso_data->wtm_clock_nsec = tk->wall_to_monotonic.tv_nsec;
388 +
389 ++ /* Read without the seqlock held by clock_getres() */
390 ++ WRITE_ONCE(vdso_data->hrtimer_res, hrtimer_resolution);
391 ++
392 + if (!use_syscall) {
393 + /* tkr_mono.cycle_last == tkr_raw.cycle_last */
394 + vdso_data->cs_cycle_last = tk->tkr_mono.cycle_last;
395 +diff --git a/arch/arm64/kernel/vdso/gettimeofday.S b/arch/arm64/kernel/vdso/gettimeofday.S
396 +index e8f60112818f..856fee6d3512 100644
397 +--- a/arch/arm64/kernel/vdso/gettimeofday.S
398 ++++ b/arch/arm64/kernel/vdso/gettimeofday.S
399 +@@ -308,13 +308,14 @@ ENTRY(__kernel_clock_getres)
400 + ccmp w0, #CLOCK_MONOTONIC_RAW, #0x4, ne
401 + b.ne 1f
402 +
403 +- ldr x2, 5f
404 ++ adr vdso_data, _vdso_data
405 ++ ldr w2, [vdso_data, #CLOCK_REALTIME_RES]
406 + b 2f
407 + 1:
408 + cmp w0, #CLOCK_REALTIME_COARSE
409 + ccmp w0, #CLOCK_MONOTONIC_COARSE, #0x4, ne
410 + b.ne 4f
411 +- ldr x2, 6f
412 ++ ldr x2, 5f
413 + 2:
414 + cbz x1, 3f
415 + stp xzr, x2, [x1]
416 +@@ -328,8 +329,6 @@ ENTRY(__kernel_clock_getres)
417 + svc #0
418 + ret
419 + 5:
420 +- .quad CLOCK_REALTIME_RES
421 +-6:
422 + .quad CLOCK_COARSE_RES
423 + .cfi_endproc
424 + ENDPROC(__kernel_clock_getres)
425 +diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c
426 +index 78c0a72f822c..674860e3e478 100644
427 +--- a/arch/arm64/mm/dma-mapping.c
428 ++++ b/arch/arm64/mm/dma-mapping.c
429 +@@ -249,6 +249,11 @@ static int __iommu_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
430 + if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret))
431 + return ret;
432 +
433 ++ if (!is_vmalloc_addr(cpu_addr)) {
434 ++ unsigned long pfn = page_to_pfn(virt_to_page(cpu_addr));
435 ++ return __swiotlb_mmap_pfn(vma, pfn, size);
436 ++ }
437 ++
438 + if (attrs & DMA_ATTR_FORCE_CONTIGUOUS) {
439 + /*
440 + * DMA_ATTR_FORCE_CONTIGUOUS allocations are always remapped,
441 +@@ -272,6 +277,11 @@ static int __iommu_get_sgtable(struct device *dev, struct sg_table *sgt,
442 + unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
443 + struct vm_struct *area = find_vm_area(cpu_addr);
444 +
445 ++ if (!is_vmalloc_addr(cpu_addr)) {
446 ++ struct page *page = virt_to_page(cpu_addr);
447 ++ return __swiotlb_get_sgtable_page(sgt, page, size);
448 ++ }
449 ++
450 + if (attrs & DMA_ATTR_FORCE_CONTIGUOUS) {
451 + /*
452 + * DMA_ATTR_FORCE_CONTIGUOUS allocations are always remapped,
453 +diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
454 +index 1a7e92ab69eb..9a6099a2c633 100644
455 +--- a/arch/arm64/mm/fault.c
456 ++++ b/arch/arm64/mm/fault.c
457 +@@ -810,14 +810,47 @@ void __init hook_debug_fault_code(int nr,
458 + debug_fault_info[nr].name = name;
459 + }
460 +
461 ++#ifdef CONFIG_ARM64_ERRATUM_1463225
462 ++DECLARE_PER_CPU(int, __in_cortex_a76_erratum_1463225_wa);
463 ++
464 ++static int __exception
465 ++cortex_a76_erratum_1463225_debug_handler(struct pt_regs *regs)
466 ++{
467 ++ if (user_mode(regs))
468 ++ return 0;
469 ++
470 ++ if (!__this_cpu_read(__in_cortex_a76_erratum_1463225_wa))
471 ++ return 0;
472 ++
473 ++ /*
474 ++ * We've taken a dummy step exception from the kernel to ensure
475 ++ * that interrupts are re-enabled on the syscall path. Return back
476 ++ * to cortex_a76_erratum_1463225_svc_handler() with debug exceptions
477 ++ * masked so that we can safely restore the mdscr and get on with
478 ++ * handling the syscall.
479 ++ */
480 ++ regs->pstate |= PSR_D_BIT;
481 ++ return 1;
482 ++}
483 ++#else
484 ++static int __exception
485 ++cortex_a76_erratum_1463225_debug_handler(struct pt_regs *regs)
486 ++{
487 ++ return 0;
488 ++}
489 ++#endif /* CONFIG_ARM64_ERRATUM_1463225 */
490 ++
491 + asmlinkage int __exception do_debug_exception(unsigned long addr_if_watchpoint,
492 +- unsigned int esr,
493 +- struct pt_regs *regs)
494 ++ unsigned int esr,
495 ++ struct pt_regs *regs)
496 + {
497 + const struct fault_info *inf = esr_to_debug_fault_info(esr);
498 + unsigned long pc = instruction_pointer(regs);
499 + int rv;
500 +
501 ++ if (cortex_a76_erratum_1463225_debug_handler(regs))
502 ++ return 0;
503 ++
504 + /*
505 + * Tell lockdep we disabled irqs in entry.S. Do nothing if they were
506 + * already disabled to preserve the last enabled/disabled addresses.
507 +diff --git a/arch/powerpc/boot/addnote.c b/arch/powerpc/boot/addnote.c
508 +index 9d9f6f334d3c..3da3e2b1b51b 100644
509 +--- a/arch/powerpc/boot/addnote.c
510 ++++ b/arch/powerpc/boot/addnote.c
511 +@@ -223,7 +223,11 @@ main(int ac, char **av)
512 + PUT_16(E_PHNUM, np + 2);
513 +
514 + /* write back */
515 +- lseek(fd, (long) 0, SEEK_SET);
516 ++ i = lseek(fd, (long) 0, SEEK_SET);
517 ++ if (i < 0) {
518 ++ perror("lseek");
519 ++ exit(1);
520 ++ }
521 + i = write(fd, buf, n);
522 + if (i < 0) {
523 + perror("write");
524 +diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S
525 +index 3fad8d499767..5321a11c2835 100644
526 +--- a/arch/powerpc/kernel/head_64.S
527 ++++ b/arch/powerpc/kernel/head_64.S
528 +@@ -968,7 +968,9 @@ start_here_multiplatform:
529 +
530 + /* Restore parameters passed from prom_init/kexec */
531 + mr r3,r31
532 +- bl early_setup /* also sets r13 and SPRG_PACA */
533 ++ LOAD_REG_ADDR(r12, DOTSYM(early_setup))
534 ++ mtctr r12
535 ++ bctrl /* also sets r13 and SPRG_PACA */
536 +
537 + LOAD_REG_ADDR(r3, start_here_common)
538 + ld r4,PACAKMSR(r13)
539 +diff --git a/arch/powerpc/kernel/watchdog.c b/arch/powerpc/kernel/watchdog.c
540 +index 3c6ab22a0c4e..af3c15a1d41e 100644
541 +--- a/arch/powerpc/kernel/watchdog.c
542 ++++ b/arch/powerpc/kernel/watchdog.c
543 +@@ -77,7 +77,7 @@ static u64 wd_smp_panic_timeout_tb __read_mostly; /* panic other CPUs */
544 +
545 + static u64 wd_timer_period_ms __read_mostly; /* interval between heartbeat */
546 +
547 +-static DEFINE_PER_CPU(struct timer_list, wd_timer);
548 ++static DEFINE_PER_CPU(struct hrtimer, wd_hrtimer);
549 + static DEFINE_PER_CPU(u64, wd_timer_tb);
550 +
551 + /* SMP checker bits */
552 +@@ -293,21 +293,21 @@ out:
553 + nmi_exit();
554 + }
555 +
556 +-static void wd_timer_reset(unsigned int cpu, struct timer_list *t)
557 +-{
558 +- t->expires = jiffies + msecs_to_jiffies(wd_timer_period_ms);
559 +- if (wd_timer_period_ms > 1000)
560 +- t->expires = __round_jiffies_up(t->expires, cpu);
561 +- add_timer_on(t, cpu);
562 +-}
563 +-
564 +-static void wd_timer_fn(struct timer_list *t)
565 ++static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
566 + {
567 + int cpu = smp_processor_id();
568 +
569 ++ if (!(watchdog_enabled & NMI_WATCHDOG_ENABLED))
570 ++ return HRTIMER_NORESTART;
571 ++
572 ++ if (!cpumask_test_cpu(cpu, &watchdog_cpumask))
573 ++ return HRTIMER_NORESTART;
574 ++
575 + watchdog_timer_interrupt(cpu);
576 +
577 +- wd_timer_reset(cpu, t);
578 ++ hrtimer_forward_now(hrtimer, ms_to_ktime(wd_timer_period_ms));
579 ++
580 ++ return HRTIMER_RESTART;
581 + }
582 +
583 + void arch_touch_nmi_watchdog(void)
584 +@@ -323,37 +323,22 @@ void arch_touch_nmi_watchdog(void)
585 + }
586 + EXPORT_SYMBOL(arch_touch_nmi_watchdog);
587 +
588 +-static void start_watchdog_timer_on(unsigned int cpu)
589 +-{
590 +- struct timer_list *t = per_cpu_ptr(&wd_timer, cpu);
591 +-
592 +- per_cpu(wd_timer_tb, cpu) = get_tb();
593 +-
594 +- timer_setup(t, wd_timer_fn, TIMER_PINNED);
595 +- wd_timer_reset(cpu, t);
596 +-}
597 +-
598 +-static void stop_watchdog_timer_on(unsigned int cpu)
599 +-{
600 +- struct timer_list *t = per_cpu_ptr(&wd_timer, cpu);
601 +-
602 +- del_timer_sync(t);
603 +-}
604 +-
605 +-static int start_wd_on_cpu(unsigned int cpu)
606 ++static void start_watchdog(void *arg)
607 + {
608 ++ struct hrtimer *hrtimer = this_cpu_ptr(&wd_hrtimer);
609 ++ int cpu = smp_processor_id();
610 + unsigned long flags;
611 +
612 + if (cpumask_test_cpu(cpu, &wd_cpus_enabled)) {
613 + WARN_ON(1);
614 +- return 0;
615 ++ return;
616 + }
617 +
618 + if (!(watchdog_enabled & NMI_WATCHDOG_ENABLED))
619 +- return 0;
620 ++ return;
621 +
622 + if (!cpumask_test_cpu(cpu, &watchdog_cpumask))
623 +- return 0;
624 ++ return;
625 +
626 + wd_smp_lock(&flags);
627 + cpumask_set_cpu(cpu, &wd_cpus_enabled);
628 +@@ -363,27 +348,40 @@ static int start_wd_on_cpu(unsigned int cpu)
629 + }
630 + wd_smp_unlock(&flags);
631 +
632 +- start_watchdog_timer_on(cpu);
633 ++ *this_cpu_ptr(&wd_timer_tb) = get_tb();
634 +
635 +- return 0;
636 ++ hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
637 ++ hrtimer->function = watchdog_timer_fn;
638 ++ hrtimer_start(hrtimer, ms_to_ktime(wd_timer_period_ms),
639 ++ HRTIMER_MODE_REL_PINNED);
640 + }
641 +
642 +-static int stop_wd_on_cpu(unsigned int cpu)
643 ++static int start_watchdog_on_cpu(unsigned int cpu)
644 + {
645 ++ return smp_call_function_single(cpu, start_watchdog, NULL, true);
646 ++}
647 ++
648 ++static void stop_watchdog(void *arg)
649 ++{
650 ++ struct hrtimer *hrtimer = this_cpu_ptr(&wd_hrtimer);
651 ++ int cpu = smp_processor_id();
652 + unsigned long flags;
653 +
654 + if (!cpumask_test_cpu(cpu, &wd_cpus_enabled))
655 +- return 0; /* Can happen in CPU unplug case */
656 ++ return; /* Can happen in CPU unplug case */
657 +
658 +- stop_watchdog_timer_on(cpu);
659 ++ hrtimer_cancel(hrtimer);
660 +
661 + wd_smp_lock(&flags);
662 + cpumask_clear_cpu(cpu, &wd_cpus_enabled);
663 + wd_smp_unlock(&flags);
664 +
665 + wd_smp_clear_cpu_pending(cpu, get_tb());
666 ++}
667 +
668 +- return 0;
669 ++static int stop_watchdog_on_cpu(unsigned int cpu)
670 ++{
671 ++ return smp_call_function_single(cpu, stop_watchdog, NULL, true);
672 + }
673 +
674 + static void watchdog_calc_timeouts(void)
675 +@@ -402,7 +400,7 @@ void watchdog_nmi_stop(void)
676 + int cpu;
677 +
678 + for_each_cpu(cpu, &wd_cpus_enabled)
679 +- stop_wd_on_cpu(cpu);
680 ++ stop_watchdog_on_cpu(cpu);
681 + }
682 +
683 + void watchdog_nmi_start(void)
684 +@@ -411,7 +409,7 @@ void watchdog_nmi_start(void)
685 +
686 + watchdog_calc_timeouts();
687 + for_each_cpu_and(cpu, cpu_online_mask, &watchdog_cpumask)
688 +- start_wd_on_cpu(cpu);
689 ++ start_watchdog_on_cpu(cpu);
690 + }
691 +
692 + /*
693 +@@ -423,7 +421,8 @@ int __init watchdog_nmi_probe(void)
694 +
695 + err = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
696 + "powerpc/watchdog:online",
697 +- start_wd_on_cpu, stop_wd_on_cpu);
698 ++ start_watchdog_on_cpu,
699 ++ stop_watchdog_on_cpu);
700 + if (err < 0) {
701 + pr_warn("could not be initialized");
702 + return err;
703 +diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
704 +index f976676004ad..48c9a97eb2c3 100644
705 +--- a/arch/powerpc/mm/numa.c
706 ++++ b/arch/powerpc/mm/numa.c
707 +@@ -1498,6 +1498,9 @@ int start_topology_update(void)
708 + {
709 + int rc = 0;
710 +
711 ++ if (!topology_updates_enabled)
712 ++ return 0;
713 ++
714 + if (firmware_has_feature(FW_FEATURE_PRRN)) {
715 + if (!prrn_enabled) {
716 + prrn_enabled = 1;
717 +@@ -1531,6 +1534,9 @@ int stop_topology_update(void)
718 + {
719 + int rc = 0;
720 +
721 ++ if (!topology_updates_enabled)
722 ++ return 0;
723 ++
724 + if (prrn_enabled) {
725 + prrn_enabled = 0;
726 + #ifdef CONFIG_SMP
727 +@@ -1588,11 +1594,13 @@ static ssize_t topology_write(struct file *file, const char __user *buf,
728 +
729 + kbuf[read_len] = '\0';
730 +
731 +- if (!strncmp(kbuf, "on", 2))
732 ++ if (!strncmp(kbuf, "on", 2)) {
733 ++ topology_updates_enabled = true;
734 + start_topology_update();
735 +- else if (!strncmp(kbuf, "off", 3))
736 ++ } else if (!strncmp(kbuf, "off", 3)) {
737 + stop_topology_update();
738 +- else
739 ++ topology_updates_enabled = false;
740 ++ } else
741 + return -EINVAL;
742 +
743 + return count;
744 +@@ -1607,9 +1615,7 @@ static const struct file_operations topology_ops = {
745 +
746 + static int topology_update_init(void)
747 + {
748 +- /* Do not poll for changes if disabled at boot */
749 +- if (topology_updates_enabled)
750 +- start_topology_update();
751 ++ start_topology_update();
752 +
753 + if (vphn_enabled)
754 + topology_schedule_update();
755 +diff --git a/arch/powerpc/perf/imc-pmu.c b/arch/powerpc/perf/imc-pmu.c
756 +index b1c37cc3fa98..2d12f0037e3a 100644
757 +--- a/arch/powerpc/perf/imc-pmu.c
758 ++++ b/arch/powerpc/perf/imc-pmu.c
759 +@@ -487,6 +487,11 @@ static int nest_imc_event_init(struct perf_event *event)
760 + * Get the base memory addresss for this cpu.
761 + */
762 + chip_id = cpu_to_chip_id(event->cpu);
763 ++
764 ++ /* Return, if chip_id is not valid */
765 ++ if (chip_id < 0)
766 ++ return -ENODEV;
767 ++
768 + pcni = pmu->mem_info;
769 + do {
770 + if (pcni->id == chip_id) {
771 +@@ -494,7 +499,7 @@ static int nest_imc_event_init(struct perf_event *event)
772 + break;
773 + }
774 + pcni++;
775 +- } while (pcni);
776 ++ } while (pcni->vbase != 0);
777 +
778 + if (!flag)
779 + return -ENODEV;
780 +diff --git a/arch/powerpc/platforms/powernv/opal-imc.c b/arch/powerpc/platforms/powernv/opal-imc.c
781 +index 58a07948c76e..3d27f02695e4 100644
782 +--- a/arch/powerpc/platforms/powernv/opal-imc.c
783 ++++ b/arch/powerpc/platforms/powernv/opal-imc.c
784 +@@ -127,7 +127,7 @@ static int imc_get_mem_addr_nest(struct device_node *node,
785 + nr_chips))
786 + goto error;
787 +
788 +- pmu_ptr->mem_info = kcalloc(nr_chips, sizeof(*pmu_ptr->mem_info),
789 ++ pmu_ptr->mem_info = kcalloc(nr_chips + 1, sizeof(*pmu_ptr->mem_info),
790 + GFP_KERNEL);
791 + if (!pmu_ptr->mem_info)
792 + goto error;
793 +diff --git a/arch/s390/kernel/kexec_elf.c b/arch/s390/kernel/kexec_elf.c
794 +index 5a286b012043..602e7cc26d11 100644
795 +--- a/arch/s390/kernel/kexec_elf.c
796 ++++ b/arch/s390/kernel/kexec_elf.c
797 +@@ -19,10 +19,15 @@ static int kexec_file_add_elf_kernel(struct kimage *image,
798 + struct kexec_buf buf;
799 + const Elf_Ehdr *ehdr;
800 + const Elf_Phdr *phdr;
801 ++ Elf_Addr entry;
802 + int i, ret;
803 +
804 + ehdr = (Elf_Ehdr *)kernel;
805 + buf.image = image;
806 ++ if (image->type == KEXEC_TYPE_CRASH)
807 ++ entry = STARTUP_KDUMP_OFFSET;
808 ++ else
809 ++ entry = ehdr->e_entry;
810 +
811 + phdr = (void *)ehdr + ehdr->e_phoff;
812 + for (i = 0; i < ehdr->e_phnum; i++, phdr++) {
813 +@@ -35,7 +40,7 @@ static int kexec_file_add_elf_kernel(struct kimage *image,
814 + buf.mem = ALIGN(phdr->p_paddr, phdr->p_align);
815 + buf.memsz = phdr->p_memsz;
816 +
817 +- if (phdr->p_paddr == 0) {
818 ++ if (entry - phdr->p_paddr < phdr->p_memsz) {
819 + data->kernel_buf = buf.buffer;
820 + data->memsz += STARTUP_NORMAL_OFFSET;
821 +
822 +diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
823 +index 8485d6dc2754..9ebd01219812 100644
824 +--- a/arch/s390/mm/pgtable.c
825 ++++ b/arch/s390/mm/pgtable.c
826 +@@ -410,6 +410,7 @@ static inline pmd_t pmdp_flush_lazy(struct mm_struct *mm,
827 + return old;
828 + }
829 +
830 ++#ifdef CONFIG_PGSTE
831 + static pmd_t *pmd_alloc_map(struct mm_struct *mm, unsigned long addr)
832 + {
833 + pgd_t *pgd;
834 +@@ -427,6 +428,7 @@ static pmd_t *pmd_alloc_map(struct mm_struct *mm, unsigned long addr)
835 + pmd = pmd_alloc(mm, pud, addr);
836 + return pmd;
837 + }
838 ++#endif
839 +
840 + pmd_t pmdp_xchg_direct(struct mm_struct *mm, unsigned long addr,
841 + pmd_t *pmdp, pmd_t new)
842 +diff --git a/arch/sh/include/cpu-sh4/cpu/sh7786.h b/arch/sh/include/cpu-sh4/cpu/sh7786.h
843 +index 8f9bfbf3cdb1..d6cce65b4871 100644
844 +--- a/arch/sh/include/cpu-sh4/cpu/sh7786.h
845 ++++ b/arch/sh/include/cpu-sh4/cpu/sh7786.h
846 +@@ -132,7 +132,7 @@ enum {
847 +
848 + static inline u32 sh7786_mm_sel(void)
849 + {
850 +- return __raw_readl(0xFC400020) & 0x7;
851 ++ return __raw_readl((const volatile void __iomem *)0xFC400020) & 0x7;
852 + }
853 +
854 + #endif /* __CPU_SH7786_H__ */
855 +diff --git a/arch/x86/Makefile b/arch/x86/Makefile
856 +index a587805c6687..56e748a7679f 100644
857 +--- a/arch/x86/Makefile
858 ++++ b/arch/x86/Makefile
859 +@@ -47,7 +47,7 @@ export REALMODE_CFLAGS
860 + export BITS
861 +
862 + ifdef CONFIG_X86_NEED_RELOCS
863 +- LDFLAGS_vmlinux := --emit-relocs
864 ++ LDFLAGS_vmlinux := --emit-relocs --discard-none
865 + endif
866 +
867 + #
868 +diff --git a/arch/x86/events/intel/cstate.c b/arch/x86/events/intel/cstate.c
869 +index d41de9af7a39..6072f92cb8ea 100644
870 +--- a/arch/x86/events/intel/cstate.c
871 ++++ b/arch/x86/events/intel/cstate.c
872 +@@ -578,6 +578,8 @@ static const struct x86_cpu_id intel_cstates_match[] __initconst = {
873 + X86_CSTATES_MODEL(INTEL_FAM6_ATOM_GOLDMONT_X, glm_cstates),
874 +
875 + X86_CSTATES_MODEL(INTEL_FAM6_ATOM_GOLDMONT_PLUS, glm_cstates),
876 ++
877 ++ X86_CSTATES_MODEL(INTEL_FAM6_ICELAKE_MOBILE, snb_cstates),
878 + { },
879 + };
880 + MODULE_DEVICE_TABLE(x86cpu, intel_cstates_match);
881 +diff --git a/arch/x86/events/intel/rapl.c b/arch/x86/events/intel/rapl.c
882 +index 94dc564146ca..37ebf6fc5415 100644
883 +--- a/arch/x86/events/intel/rapl.c
884 ++++ b/arch/x86/events/intel/rapl.c
885 +@@ -775,6 +775,8 @@ static const struct x86_cpu_id rapl_cpu_match[] __initconst = {
886 + X86_RAPL_MODEL_MATCH(INTEL_FAM6_ATOM_GOLDMONT_X, hsw_rapl_init),
887 +
888 + X86_RAPL_MODEL_MATCH(INTEL_FAM6_ATOM_GOLDMONT_PLUS, hsw_rapl_init),
889 ++
890 ++ X86_RAPL_MODEL_MATCH(INTEL_FAM6_ICELAKE_MOBILE, skl_rapl_init),
891 + {},
892 + };
893 +
894 +diff --git a/arch/x86/events/msr.c b/arch/x86/events/msr.c
895 +index a878e6286e4a..f3f4c2263501 100644
896 +--- a/arch/x86/events/msr.c
897 ++++ b/arch/x86/events/msr.c
898 +@@ -89,6 +89,7 @@ static bool test_intel(int idx)
899 + case INTEL_FAM6_SKYLAKE_X:
900 + case INTEL_FAM6_KABYLAKE_MOBILE:
901 + case INTEL_FAM6_KABYLAKE_DESKTOP:
902 ++ case INTEL_FAM6_ICELAKE_MOBILE:
903 + if (idx == PERF_MSR_SMI || idx == PERF_MSR_PPERF)
904 + return true;
905 + break;
906 +diff --git a/arch/x86/ia32/ia32_signal.c b/arch/x86/ia32/ia32_signal.c
907 +index 321fe5f5d0e9..4d5fcd47ab75 100644
908 +--- a/arch/x86/ia32/ia32_signal.c
909 ++++ b/arch/x86/ia32/ia32_signal.c
910 +@@ -61,9 +61,8 @@
911 + } while (0)
912 +
913 + #define RELOAD_SEG(seg) { \
914 +- unsigned int pre = GET_SEG(seg); \
915 ++ unsigned int pre = (seg) | 3; \
916 + unsigned int cur = get_user_seg(seg); \
917 +- pre |= 3; \
918 + if (pre != cur) \
919 + set_user_seg(seg, pre); \
920 + }
921 +@@ -72,6 +71,7 @@ static int ia32_restore_sigcontext(struct pt_regs *regs,
922 + struct sigcontext_32 __user *sc)
923 + {
924 + unsigned int tmpflags, err = 0;
925 ++ u16 gs, fs, es, ds;
926 + void __user *buf;
927 + u32 tmp;
928 +
929 +@@ -79,16 +79,10 @@ static int ia32_restore_sigcontext(struct pt_regs *regs,
930 + current->restart_block.fn = do_no_restart_syscall;
931 +
932 + get_user_try {
933 +- /*
934 +- * Reload fs and gs if they have changed in the signal
935 +- * handler. This does not handle long fs/gs base changes in
936 +- * the handler, but does not clobber them at least in the
937 +- * normal case.
938 +- */
939 +- RELOAD_SEG(gs);
940 +- RELOAD_SEG(fs);
941 +- RELOAD_SEG(ds);
942 +- RELOAD_SEG(es);
943 ++ gs = GET_SEG(gs);
944 ++ fs = GET_SEG(fs);
945 ++ ds = GET_SEG(ds);
946 ++ es = GET_SEG(es);
947 +
948 + COPY(di); COPY(si); COPY(bp); COPY(sp); COPY(bx);
949 + COPY(dx); COPY(cx); COPY(ip); COPY(ax);
950 +@@ -106,6 +100,17 @@ static int ia32_restore_sigcontext(struct pt_regs *regs,
951 + buf = compat_ptr(tmp);
952 + } get_user_catch(err);
953 +
954 ++ /*
955 ++ * Reload fs and gs if they have changed in the signal
956 ++ * handler. This does not handle long fs/gs base changes in
957 ++ * the handler, but does not clobber them at least in the
958 ++ * normal case.
959 ++ */
960 ++ RELOAD_SEG(gs);
961 ++ RELOAD_SEG(fs);
962 ++ RELOAD_SEG(ds);
963 ++ RELOAD_SEG(es);
964 ++
965 + err |= fpu__restore_sig(buf, 1);
966 +
967 + force_iret();
968 +diff --git a/arch/x86/include/asm/text-patching.h b/arch/x86/include/asm/text-patching.h
969 +index 05861cc08787..0bbb07eaed6b 100644
970 +--- a/arch/x86/include/asm/text-patching.h
971 ++++ b/arch/x86/include/asm/text-patching.h
972 +@@ -39,6 +39,7 @@ extern int poke_int3_handler(struct pt_regs *regs);
973 + extern void *text_poke_bp(void *addr, const void *opcode, size_t len, void *handler);
974 + extern int after_bootmem;
975 +
976 ++#ifndef CONFIG_UML_X86
977 + static inline void int3_emulate_jmp(struct pt_regs *regs, unsigned long ip)
978 + {
979 + regs->ip = ip;
980 +@@ -65,6 +66,7 @@ static inline void int3_emulate_call(struct pt_regs *regs, unsigned long func)
981 + int3_emulate_push(regs, regs->ip - INT3_INSN_SIZE + CALL_INSN_SIZE);
982 + int3_emulate_jmp(regs, func);
983 + }
984 +-#endif
985 ++#endif /* CONFIG_X86_64 */
986 ++#endif /* !CONFIG_UML_X86 */
987 +
988 + #endif /* _ASM_X86_TEXT_PATCHING_H */
989 +diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
990 +index 1954dd5552a2..3822cc8ac9d6 100644
991 +--- a/arch/x86/include/asm/uaccess.h
992 ++++ b/arch/x86/include/asm/uaccess.h
993 +@@ -427,10 +427,11 @@ do { \
994 + ({ \
995 + __label__ __pu_label; \
996 + int __pu_err = -EFAULT; \
997 +- __typeof__(*(ptr)) __pu_val; \
998 +- __pu_val = x; \
999 ++ __typeof__(*(ptr)) __pu_val = (x); \
1000 ++ __typeof__(ptr) __pu_ptr = (ptr); \
1001 ++ __typeof__(size) __pu_size = (size); \
1002 + __uaccess_begin(); \
1003 +- __put_user_size(__pu_val, (ptr), (size), __pu_label); \
1004 ++ __put_user_size(__pu_val, __pu_ptr, __pu_size, __pu_label); \
1005 + __pu_err = 0; \
1006 + __pu_label: \
1007 + __uaccess_end(); \
1008 +diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
1009 +index 9a79c7808f9c..d7df79fc448c 100644
1010 +--- a/arch/x86/kernel/alternative.c
1011 ++++ b/arch/x86/kernel/alternative.c
1012 +@@ -667,15 +667,29 @@ void __init alternative_instructions(void)
1013 + * handlers seeing an inconsistent instruction while you patch.
1014 + */
1015 + void *__init_or_module text_poke_early(void *addr, const void *opcode,
1016 +- size_t len)
1017 ++ size_t len)
1018 + {
1019 + unsigned long flags;
1020 +- local_irq_save(flags);
1021 +- memcpy(addr, opcode, len);
1022 +- local_irq_restore(flags);
1023 +- sync_core();
1024 +- /* Could also do a CLFLUSH here to speed up CPU recovery; but
1025 +- that causes hangs on some VIA CPUs. */
1026 ++
1027 ++ if (boot_cpu_has(X86_FEATURE_NX) &&
1028 ++ is_module_text_address((unsigned long)addr)) {
1029 ++ /*
1030 ++ * Modules text is marked initially as non-executable, so the
1031 ++ * code cannot be running and speculative code-fetches are
1032 ++ * prevented. Just change the code.
1033 ++ */
1034 ++ memcpy(addr, opcode, len);
1035 ++ } else {
1036 ++ local_irq_save(flags);
1037 ++ memcpy(addr, opcode, len);
1038 ++ local_irq_restore(flags);
1039 ++ sync_core();
1040 ++
1041 ++ /*
1042 ++ * Could also do a CLFLUSH here to speed up CPU recovery; but
1043 ++ * that causes hangs on some VIA CPUs.
1044 ++ */
1045 ++ }
1046 + return addr;
1047 + }
1048 +
1049 +diff --git a/arch/x86/kernel/cpu/hygon.c b/arch/x86/kernel/cpu/hygon.c
1050 +index cf25405444ab..415621ddb8a2 100644
1051 +--- a/arch/x86/kernel/cpu/hygon.c
1052 ++++ b/arch/x86/kernel/cpu/hygon.c
1053 +@@ -19,6 +19,8 @@
1054 +
1055 + #include "cpu.h"
1056 +
1057 ++#define APICID_SOCKET_ID_BIT 6
1058 ++
1059 + /*
1060 + * nodes_per_socket: Stores the number of nodes per socket.
1061 + * Refer to CPUID Fn8000_001E_ECX Node Identifiers[10:8]
1062 +@@ -87,6 +89,9 @@ static void hygon_get_topology(struct cpuinfo_x86 *c)
1063 + if (!err)
1064 + c->x86_coreid_bits = get_count_order(c->x86_max_cores);
1065 +
1066 ++ /* Socket ID is ApicId[6] for these processors. */
1067 ++ c->phys_proc_id = c->apicid >> APICID_SOCKET_ID_BIT;
1068 ++
1069 + cacheinfo_hygon_init_llc_id(c, cpu, node_id);
1070 + } else if (cpu_has(c, X86_FEATURE_NODEID_MSR)) {
1071 + u64 value;
1072 +diff --git a/arch/x86/kernel/cpu/mce/core.c b/arch/x86/kernel/cpu/mce/core.c
1073 +index 1a7084ba9a3b..9e6a94c208e0 100644
1074 +--- a/arch/x86/kernel/cpu/mce/core.c
1075 ++++ b/arch/x86/kernel/cpu/mce/core.c
1076 +@@ -712,19 +712,49 @@ bool machine_check_poll(enum mcp_flags flags, mce_banks_t *b)
1077 +
1078 + barrier();
1079 + m.status = mce_rdmsrl(msr_ops.status(i));
1080 ++
1081 ++ /* If this entry is not valid, ignore it */
1082 + if (!(m.status & MCI_STATUS_VAL))
1083 + continue;
1084 +
1085 + /*
1086 +- * Uncorrected or signalled events are handled by the exception
1087 +- * handler when it is enabled, so don't process those here.
1088 +- *
1089 +- * TBD do the same check for MCI_STATUS_EN here?
1090 ++ * If we are logging everything (at CPU online) or this
1091 ++ * is a corrected error, then we must log it.
1092 + */
1093 +- if (!(flags & MCP_UC) &&
1094 +- (m.status & (mca_cfg.ser ? MCI_STATUS_S : MCI_STATUS_UC)))
1095 +- continue;
1096 ++ if ((flags & MCP_UC) || !(m.status & MCI_STATUS_UC))
1097 ++ goto log_it;
1098 ++
1099 ++ /*
1100 ++ * Newer Intel systems that support software error
1101 ++ * recovery need to make additional checks. Other
1102 ++ * CPUs should skip over uncorrected errors, but log
1103 ++ * everything else.
1104 ++ */
1105 ++ if (!mca_cfg.ser) {
1106 ++ if (m.status & MCI_STATUS_UC)
1107 ++ continue;
1108 ++ goto log_it;
1109 ++ }
1110 ++
1111 ++ /* Log "not enabled" (speculative) errors */
1112 ++ if (!(m.status & MCI_STATUS_EN))
1113 ++ goto log_it;
1114 ++
1115 ++ /*
1116 ++ * Log UCNA (SDM: 15.6.3 "UCR Error Classification")
1117 ++ * UC == 1 && PCC == 0 && S == 0
1118 ++ */
1119 ++ if (!(m.status & MCI_STATUS_PCC) && !(m.status & MCI_STATUS_S))
1120 ++ goto log_it;
1121 ++
1122 ++ /*
1123 ++ * Skip anything else. Presumption is that our read of this
1124 ++ * bank is racing with a machine check. Leave the log alone
1125 ++ * for do_machine_check() to deal with it.
1126 ++ */
1127 ++ continue;
1128 +
1129 ++log_it:
1130 + error_seen = true;
1131 +
1132 + mce_read_aux(&m, i);
1133 +@@ -1451,13 +1481,12 @@ EXPORT_SYMBOL_GPL(mce_notify_irq);
1134 + static int __mcheck_cpu_mce_banks_init(void)
1135 + {
1136 + int i;
1137 +- u8 num_banks = mca_cfg.banks;
1138 +
1139 +- mce_banks = kcalloc(num_banks, sizeof(struct mce_bank), GFP_KERNEL);
1140 ++ mce_banks = kcalloc(MAX_NR_BANKS, sizeof(struct mce_bank), GFP_KERNEL);
1141 + if (!mce_banks)
1142 + return -ENOMEM;
1143 +
1144 +- for (i = 0; i < num_banks; i++) {
1145 ++ for (i = 0; i < MAX_NR_BANKS; i++) {
1146 + struct mce_bank *b = &mce_banks[i];
1147 +
1148 + b->ctl = -1ULL;
1149 +@@ -1471,28 +1500,19 @@ static int __mcheck_cpu_mce_banks_init(void)
1150 + */
1151 + static int __mcheck_cpu_cap_init(void)
1152 + {
1153 +- unsigned b;
1154 + u64 cap;
1155 ++ u8 b;
1156 +
1157 + rdmsrl(MSR_IA32_MCG_CAP, cap);
1158 +
1159 + b = cap & MCG_BANKCNT_MASK;
1160 +- if (!mca_cfg.banks)
1161 +- pr_info("CPU supports %d MCE banks\n", b);
1162 +-
1163 +- if (b > MAX_NR_BANKS) {
1164 +- pr_warn("Using only %u machine check banks out of %u\n",
1165 +- MAX_NR_BANKS, b);
1166 ++ if (WARN_ON_ONCE(b > MAX_NR_BANKS))
1167 + b = MAX_NR_BANKS;
1168 +- }
1169 +
1170 +- /* Don't support asymmetric configurations today */
1171 +- WARN_ON(mca_cfg.banks != 0 && b != mca_cfg.banks);
1172 +- mca_cfg.banks = b;
1173 ++ mca_cfg.banks = max(mca_cfg.banks, b);
1174 +
1175 + if (!mce_banks) {
1176 + int err = __mcheck_cpu_mce_banks_init();
1177 +-
1178 + if (err)
1179 + return err;
1180 + }
1181 +@@ -2459,6 +2479,8 @@ EXPORT_SYMBOL_GPL(mcsafe_key);
1182 +
1183 + static int __init mcheck_late_init(void)
1184 + {
1185 ++ pr_info("Using %d MCE banks\n", mca_cfg.banks);
1186 ++
1187 + if (mca_cfg.recovery)
1188 + static_branch_inc(&mcsafe_key);
1189 +
1190 +diff --git a/arch/x86/kernel/cpu/mce/inject.c b/arch/x86/kernel/cpu/mce/inject.c
1191 +index 8492ef7d9015..3f82afd0f46f 100644
1192 +--- a/arch/x86/kernel/cpu/mce/inject.c
1193 ++++ b/arch/x86/kernel/cpu/mce/inject.c
1194 +@@ -46,8 +46,6 @@
1195 + static struct mce i_mce;
1196 + static struct dentry *dfs_inj;
1197 +
1198 +-static u8 n_banks;
1199 +-
1200 + #define MAX_FLAG_OPT_SIZE 4
1201 + #define NBCFG 0x44
1202 +
1203 +@@ -570,9 +568,15 @@ err:
1204 + static int inj_bank_set(void *data, u64 val)
1205 + {
1206 + struct mce *m = (struct mce *)data;
1207 ++ u8 n_banks;
1208 ++ u64 cap;
1209 ++
1210 ++ /* Get bank count on target CPU so we can handle non-uniform values. */
1211 ++ rdmsrl_on_cpu(m->extcpu, MSR_IA32_MCG_CAP, &cap);
1212 ++ n_banks = cap & MCG_BANKCNT_MASK;
1213 +
1214 + if (val >= n_banks) {
1215 +- pr_err("Non-existent MCE bank: %llu\n", val);
1216 ++ pr_err("MCA bank %llu non-existent on CPU%d\n", val, m->extcpu);
1217 + return -EINVAL;
1218 + }
1219 +
1220 +@@ -665,10 +669,6 @@ static struct dfs_node {
1221 + static int __init debugfs_init(void)
1222 + {
1223 + unsigned int i;
1224 +- u64 cap;
1225 +-
1226 +- rdmsrl(MSR_IA32_MCG_CAP, cap);
1227 +- n_banks = cap & MCG_BANKCNT_MASK;
1228 +
1229 + dfs_inj = debugfs_create_dir("mce-inject", NULL);
1230 + if (!dfs_inj)
1231 +diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c
1232 +index 5260185cbf7b..8a4a7823451a 100644
1233 +--- a/arch/x86/kernel/cpu/microcode/core.c
1234 ++++ b/arch/x86/kernel/cpu/microcode/core.c
1235 +@@ -418,8 +418,9 @@ static int do_microcode_update(const void __user *buf, size_t size)
1236 + if (ustate == UCODE_ERROR) {
1237 + error = -1;
1238 + break;
1239 +- } else if (ustate == UCODE_OK)
1240 ++ } else if (ustate == UCODE_NEW) {
1241 + apply_microcode_on_target(cpu);
1242 ++ }
1243 + }
1244 +
1245 + return error;
1246 +diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
1247 +index bd553b3af22e..6e0c0ed8e4bf 100644
1248 +--- a/arch/x86/kernel/ftrace.c
1249 ++++ b/arch/x86/kernel/ftrace.c
1250 +@@ -749,6 +749,7 @@ create_trampoline(struct ftrace_ops *ops, unsigned int *tramp_size)
1251 + unsigned long end_offset;
1252 + unsigned long op_offset;
1253 + unsigned long offset;
1254 ++ unsigned long npages;
1255 + unsigned long size;
1256 + unsigned long retq;
1257 + unsigned long *ptr;
1258 +@@ -781,6 +782,7 @@ create_trampoline(struct ftrace_ops *ops, unsigned int *tramp_size)
1259 + return 0;
1260 +
1261 + *tramp_size = size + RET_SIZE + sizeof(void *);
1262 ++ npages = DIV_ROUND_UP(*tramp_size, PAGE_SIZE);
1263 +
1264 + /* Copy ftrace_caller onto the trampoline memory */
1265 + ret = probe_kernel_read(trampoline, (void *)start_offset, size);
1266 +@@ -825,6 +827,12 @@ create_trampoline(struct ftrace_ops *ops, unsigned int *tramp_size)
1267 + /* ALLOC_TRAMP flags lets us know we created it */
1268 + ops->flags |= FTRACE_OPS_FL_ALLOC_TRAMP;
1269 +
1270 ++ /*
1271 ++ * Module allocation needs to be completed by making the page
1272 ++ * executable. The page is still writable, which is a security hazard,
1273 ++ * but anyhow ftrace breaks W^X completely.
1274 ++ */
1275 ++ set_memory_x((unsigned long)trampoline, npages);
1276 + return (unsigned long)trampoline;
1277 + fail:
1278 + tramp_free(trampoline, *tramp_size);
1279 +diff --git a/arch/x86/kernel/irq_64.c b/arch/x86/kernel/irq_64.c
1280 +index 0469cd078db1..b50ac9c7397b 100644
1281 +--- a/arch/x86/kernel/irq_64.c
1282 ++++ b/arch/x86/kernel/irq_64.c
1283 +@@ -26,9 +26,18 @@ int sysctl_panic_on_stackoverflow;
1284 + /*
1285 + * Probabilistic stack overflow check:
1286 + *
1287 +- * Only check the stack in process context, because everything else
1288 +- * runs on the big interrupt stacks. Checking reliably is too expensive,
1289 +- * so we just check from interrupts.
1290 ++ * Regular device interrupts can enter on the following stacks:
1291 ++ *
1292 ++ * - User stack
1293 ++ *
1294 ++ * - Kernel task stack
1295 ++ *
1296 ++ * - Interrupt stack if a device driver reenables interrupts
1297 ++ * which should only happen in really old drivers.
1298 ++ *
1299 ++ * - Debug IST stack
1300 ++ *
1301 ++ * All other contexts are invalid.
1302 + */
1303 + static inline void stack_overflow_check(struct pt_regs *regs)
1304 + {
1305 +@@ -53,8 +62,8 @@ static inline void stack_overflow_check(struct pt_regs *regs)
1306 + return;
1307 +
1308 + oist = this_cpu_ptr(&orig_ist);
1309 +- estack_top = (u64)oist->ist[0] - EXCEPTION_STKSZ + STACK_TOP_MARGIN;
1310 +- estack_bottom = (u64)oist->ist[N_EXCEPTION_STACKS - 1];
1311 ++ estack_bottom = (u64)oist->ist[DEBUG_STACK];
1312 ++ estack_top = estack_bottom - DEBUG_STKSZ + STACK_TOP_MARGIN;
1313 + if (regs->sp >= estack_top && regs->sp <= estack_bottom)
1314 + return;
1315 +
1316 +diff --git a/arch/x86/kernel/module.c b/arch/x86/kernel/module.c
1317 +index b052e883dd8c..cfa3106faee4 100644
1318 +--- a/arch/x86/kernel/module.c
1319 ++++ b/arch/x86/kernel/module.c
1320 +@@ -87,7 +87,7 @@ void *module_alloc(unsigned long size)
1321 + p = __vmalloc_node_range(size, MODULE_ALIGN,
1322 + MODULES_VADDR + get_module_load_offset(),
1323 + MODULES_END, GFP_KERNEL,
1324 +- PAGE_KERNEL_EXEC, 0, NUMA_NO_NODE,
1325 ++ PAGE_KERNEL, 0, NUMA_NO_NODE,
1326 + __builtin_return_address(0));
1327 + if (p && (kasan_module_alloc(p, size) < 0)) {
1328 + vfree(p);
1329 +diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
1330 +index 08dfd4c1a4f9..c8aa58a2bab9 100644
1331 +--- a/arch/x86/kernel/signal.c
1332 ++++ b/arch/x86/kernel/signal.c
1333 +@@ -132,16 +132,6 @@ static int restore_sigcontext(struct pt_regs *regs,
1334 + COPY_SEG_CPL3(cs);
1335 + COPY_SEG_CPL3(ss);
1336 +
1337 +-#ifdef CONFIG_X86_64
1338 +- /*
1339 +- * Fix up SS if needed for the benefit of old DOSEMU and
1340 +- * CRIU.
1341 +- */
1342 +- if (unlikely(!(uc_flags & UC_STRICT_RESTORE_SS) &&
1343 +- user_64bit_mode(regs)))
1344 +- force_valid_ss(regs);
1345 +-#endif
1346 +-
1347 + get_user_ex(tmpflags, &sc->flags);
1348 + regs->flags = (regs->flags & ~FIX_EFLAGS) | (tmpflags & FIX_EFLAGS);
1349 + regs->orig_ax = -1; /* disable syscall checks */
1350 +@@ -150,6 +140,15 @@ static int restore_sigcontext(struct pt_regs *regs,
1351 + buf = (void __user *)buf_val;
1352 + } get_user_catch(err);
1353 +
1354 ++#ifdef CONFIG_X86_64
1355 ++ /*
1356 ++ * Fix up SS if needed for the benefit of old DOSEMU and
1357 ++ * CRIU.
1358 ++ */
1359 ++ if (unlikely(!(uc_flags & UC_STRICT_RESTORE_SS) && user_64bit_mode(regs)))
1360 ++ force_valid_ss(regs);
1361 ++#endif
1362 ++
1363 + err |= fpu__restore_sig(buf, IS_ENABLED(CONFIG_X86_32));
1364 +
1365 + force_iret();
1366 +@@ -461,6 +460,7 @@ static int __setup_rt_frame(int sig, struct ksignal *ksig,
1367 + {
1368 + struct rt_sigframe __user *frame;
1369 + void __user *fp = NULL;
1370 ++ unsigned long uc_flags;
1371 + int err = 0;
1372 +
1373 + frame = get_sigframe(&ksig->ka, regs, sizeof(struct rt_sigframe), &fp);
1374 +@@ -473,9 +473,11 @@ static int __setup_rt_frame(int sig, struct ksignal *ksig,
1375 + return -EFAULT;
1376 + }
1377 +
1378 ++ uc_flags = frame_uc_flags(regs);
1379 ++
1380 + put_user_try {
1381 + /* Create the ucontext. */
1382 +- put_user_ex(frame_uc_flags(regs), &frame->uc.uc_flags);
1383 ++ put_user_ex(uc_flags, &frame->uc.uc_flags);
1384 + put_user_ex(0, &frame->uc.uc_link);
1385 + save_altstack_ex(&frame->uc.uc_stack, regs->sp);
1386 +
1387 +@@ -541,6 +543,7 @@ static int x32_setup_rt_frame(struct ksignal *ksig,
1388 + {
1389 + #ifdef CONFIG_X86_X32_ABI
1390 + struct rt_sigframe_x32 __user *frame;
1391 ++ unsigned long uc_flags;
1392 + void __user *restorer;
1393 + int err = 0;
1394 + void __user *fpstate = NULL;
1395 +@@ -555,9 +558,11 @@ static int x32_setup_rt_frame(struct ksignal *ksig,
1396 + return -EFAULT;
1397 + }
1398 +
1399 ++ uc_flags = frame_uc_flags(regs);
1400 ++
1401 + put_user_try {
1402 + /* Create the ucontext. */
1403 +- put_user_ex(frame_uc_flags(regs), &frame->uc.uc_flags);
1404 ++ put_user_ex(uc_flags, &frame->uc.uc_flags);
1405 + put_user_ex(0, &frame->uc.uc_link);
1406 + compat_save_altstack_ex(&frame->uc.uc_stack, regs->sp);
1407 + put_user_ex(0, &frame->uc.uc__pad0);
1408 +diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
1409 +index a5127b2c195f..834659288ba9 100644
1410 +--- a/arch/x86/kernel/vmlinux.lds.S
1411 ++++ b/arch/x86/kernel/vmlinux.lds.S
1412 +@@ -141,11 +141,11 @@ SECTIONS
1413 + *(.text.__x86.indirect_thunk)
1414 + __indirect_thunk_end = .;
1415 + #endif
1416 +-
1417 +- /* End of text section */
1418 +- _etext = .;
1419 + } :text = 0x9090
1420 +
1421 ++ /* End of text section */
1422 ++ _etext = .;
1423 ++
1424 + NOTES :text :note
1425 +
1426 + EXCEPTION_TABLE(16) :text = 0x9090
1427 +diff --git a/arch/x86/kvm/irq.c b/arch/x86/kvm/irq.c
1428 +index faa264822cee..007bc654f928 100644
1429 +--- a/arch/x86/kvm/irq.c
1430 ++++ b/arch/x86/kvm/irq.c
1431 +@@ -172,3 +172,10 @@ void __kvm_migrate_timers(struct kvm_vcpu *vcpu)
1432 + __kvm_migrate_apic_timer(vcpu);
1433 + __kvm_migrate_pit_timer(vcpu);
1434 + }
1435 ++
1436 ++bool kvm_arch_irqfd_allowed(struct kvm *kvm, struct kvm_irqfd *args)
1437 ++{
1438 ++ bool resample = args->flags & KVM_IRQFD_FLAG_RESAMPLE;
1439 ++
1440 ++ return resample ? irqchip_kernel(kvm) : irqchip_in_kernel(kvm);
1441 ++}
1442 +diff --git a/arch/x86/kvm/irq.h b/arch/x86/kvm/irq.h
1443 +index d5005cc26521..fd210cdd4983 100644
1444 +--- a/arch/x86/kvm/irq.h
1445 ++++ b/arch/x86/kvm/irq.h
1446 +@@ -114,6 +114,7 @@ static inline int irqchip_in_kernel(struct kvm *kvm)
1447 + return mode != KVM_IRQCHIP_NONE;
1448 + }
1449 +
1450 ++bool kvm_arch_irqfd_allowed(struct kvm *kvm, struct kvm_irqfd *args);
1451 + void kvm_inject_pending_timer_irqs(struct kvm_vcpu *vcpu);
1452 + void kvm_inject_apic_timer_irqs(struct kvm_vcpu *vcpu);
1453 + void kvm_apic_nmi_wd_deliver(struct kvm_vcpu *vcpu);
1454 +diff --git a/arch/x86/kvm/pmu_amd.c b/arch/x86/kvm/pmu_amd.c
1455 +index 1495a735b38e..50fa9450fcf1 100644
1456 +--- a/arch/x86/kvm/pmu_amd.c
1457 ++++ b/arch/x86/kvm/pmu_amd.c
1458 +@@ -269,10 +269,10 @@ static void amd_pmu_refresh(struct kvm_vcpu *vcpu)
1459 +
1460 + pmu->counter_bitmask[KVM_PMC_GP] = ((u64)1 << 48) - 1;
1461 + pmu->reserved_bits = 0xffffffff00200000ull;
1462 ++ pmu->version = 1;
1463 + /* not applicable to AMD; but clean them to prevent any fall out */
1464 + pmu->counter_bitmask[KVM_PMC_FIXED] = 0;
1465 + pmu->nr_arch_fixed_counters = 0;
1466 +- pmu->version = 0;
1467 + pmu->global_status = 0;
1468 + }
1469 +
1470 +diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
1471 +index 406b558abfef..ae6e51828a54 100644
1472 +--- a/arch/x86/kvm/svm.c
1473 ++++ b/arch/x86/kvm/svm.c
1474 +@@ -2024,7 +2024,11 @@ static void avic_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
1475 + if (!kvm_vcpu_apicv_active(vcpu))
1476 + return;
1477 +
1478 +- if (WARN_ON(h_physical_id >= AVIC_MAX_PHYSICAL_ID_COUNT))
1479 ++ /*
1480 ++ * Since the host physical APIC id is 8 bits,
1481 ++ * we can support host APIC ID upto 255.
1482 ++ */
1483 ++ if (WARN_ON(h_physical_id > AVIC_PHYSICAL_ID_ENTRY_HOST_PHYSICAL_ID_MASK))
1484 + return;
1485 +
1486 + entry = READ_ONCE(*(svm->avic_physical_id_cache));
1487 +diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
1488 +index 0c601d079cd2..8f6f69c26c35 100644
1489 +--- a/arch/x86/kvm/vmx/nested.c
1490 ++++ b/arch/x86/kvm/vmx/nested.c
1491 +@@ -2792,14 +2792,13 @@ static int nested_vmx_check_vmentry_hw(struct kvm_vcpu *vcpu)
1492 + : "cc", "memory"
1493 + );
1494 +
1495 +- preempt_enable();
1496 +-
1497 + if (vmx->msr_autoload.host.nr)
1498 + vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.host.nr);
1499 + if (vmx->msr_autoload.guest.nr)
1500 + vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_autoload.guest.nr);
1501 +
1502 + if (vm_fail) {
1503 ++ preempt_enable();
1504 + WARN_ON_ONCE(vmcs_read32(VM_INSTRUCTION_ERROR) !=
1505 + VMXERR_ENTRY_INVALID_CONTROL_FIELD);
1506 + return 1;
1507 +@@ -2811,6 +2810,7 @@ static int nested_vmx_check_vmentry_hw(struct kvm_vcpu *vcpu)
1508 + local_irq_enable();
1509 + if (hw_breakpoint_active())
1510 + set_debugreg(__this_cpu_read(cpu_dr7), 7);
1511 ++ preempt_enable();
1512 +
1513 + /*
1514 + * A non-failing VMEntry means we somehow entered guest mode with
1515 +diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
1516 +index fed1ab6a825c..6b8575c547ee 100644
1517 +--- a/arch/x86/kvm/x86.c
1518 ++++ b/arch/x86/kvm/x86.c
1519 +@@ -1288,7 +1288,7 @@ static int set_efer(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
1520 + u64 efer = msr_info->data;
1521 +
1522 + if (efer & efer_reserved_bits)
1523 +- return false;
1524 ++ return 1;
1525 +
1526 + if (!msr_info->host_initiated) {
1527 + if (!__kvm_valid_efer(vcpu, efer))
1528 +diff --git a/arch/x86/lib/memcpy_64.S b/arch/x86/lib/memcpy_64.S
1529 +index 3b24dc05251c..9d05572370ed 100644
1530 +--- a/arch/x86/lib/memcpy_64.S
1531 ++++ b/arch/x86/lib/memcpy_64.S
1532 +@@ -257,6 +257,7 @@ ENTRY(__memcpy_mcsafe)
1533 + /* Copy successful. Return zero */
1534 + .L_done_memcpy_trap:
1535 + xorl %eax, %eax
1536 ++.L_done:
1537 + ret
1538 + ENDPROC(__memcpy_mcsafe)
1539 + EXPORT_SYMBOL_GPL(__memcpy_mcsafe)
1540 +@@ -273,7 +274,7 @@ EXPORT_SYMBOL_GPL(__memcpy_mcsafe)
1541 + addl %edx, %ecx
1542 + .E_trailing_bytes:
1543 + mov %ecx, %eax
1544 +- ret
1545 ++ jmp .L_done
1546 +
1547 + /*
1548 + * For write fault handling, given the destination is unaligned,
1549 +diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
1550 +index 667f1da36208..5eaf67e8314f 100644
1551 +--- a/arch/x86/mm/fault.c
1552 ++++ b/arch/x86/mm/fault.c
1553 +@@ -359,8 +359,6 @@ static noinline int vmalloc_fault(unsigned long address)
1554 + if (!(address >= VMALLOC_START && address < VMALLOC_END))
1555 + return -1;
1556 +
1557 +- WARN_ON_ONCE(in_nmi());
1558 +-
1559 + /*
1560 + * Copy kernel mappings over when needed. This can also
1561 + * happen within a race in page table update. In the later
1562 +diff --git a/arch/x86/platform/uv/tlb_uv.c b/arch/x86/platform/uv/tlb_uv.c
1563 +index 2c53b0f19329..1297e185b8c8 100644
1564 +--- a/arch/x86/platform/uv/tlb_uv.c
1565 ++++ b/arch/x86/platform/uv/tlb_uv.c
1566 +@@ -2133,14 +2133,19 @@ static int __init summarize_uvhub_sockets(int nuvhubs,
1567 + */
1568 + static int __init init_per_cpu(int nuvhubs, int base_part_pnode)
1569 + {
1570 +- unsigned char *uvhub_mask;
1571 + struct uvhub_desc *uvhub_descs;
1572 ++ unsigned char *uvhub_mask = NULL;
1573 +
1574 + if (is_uv3_hub() || is_uv2_hub() || is_uv1_hub())
1575 + timeout_us = calculate_destination_timeout();
1576 +
1577 + uvhub_descs = kcalloc(nuvhubs, sizeof(struct uvhub_desc), GFP_KERNEL);
1578 ++ if (!uvhub_descs)
1579 ++ goto fail;
1580 ++
1581 + uvhub_mask = kzalloc((nuvhubs+7)/8, GFP_KERNEL);
1582 ++ if (!uvhub_mask)
1583 ++ goto fail;
1584 +
1585 + if (get_cpu_topology(base_part_pnode, uvhub_descs, uvhub_mask))
1586 + goto fail;
1587 +diff --git a/block/bio.c b/block/bio.c
1588 +index 716510ecd7ff..a3c80a6c1fe5 100644
1589 +--- a/block/bio.c
1590 ++++ b/block/bio.c
1591 +@@ -776,6 +776,8 @@ bool __bio_try_merge_page(struct bio *bio, struct page *page,
1592 +
1593 + if (vec_end_addr + 1 != page_addr + off)
1594 + return false;
1595 ++ if (xen_domain() && !xen_biovec_phys_mergeable(bv, page))
1596 ++ return false;
1597 + if (same_page && (vec_end_addr & PAGE_MASK) != page_addr)
1598 + return false;
1599 +
1600 +diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c
1601 +index aa6bc5c02643..c59babca6857 100644
1602 +--- a/block/blk-mq-sched.c
1603 ++++ b/block/blk-mq-sched.c
1604 +@@ -413,6 +413,14 @@ void blk_mq_sched_insert_requests(struct blk_mq_hw_ctx *hctx,
1605 + struct list_head *list, bool run_queue_async)
1606 + {
1607 + struct elevator_queue *e;
1608 ++ struct request_queue *q = hctx->queue;
1609 ++
1610 ++ /*
1611 ++ * blk_mq_sched_insert_requests() is called from flush plug
1612 ++ * context only, and hold one usage counter to prevent queue
1613 ++ * from being released.
1614 ++ */
1615 ++ percpu_ref_get(&q->q_usage_counter);
1616 +
1617 + e = hctx->queue->elevator;
1618 + if (e && e->type->ops.insert_requests)
1619 +@@ -426,12 +434,14 @@ void blk_mq_sched_insert_requests(struct blk_mq_hw_ctx *hctx,
1620 + if (!hctx->dispatch_busy && !e && !run_queue_async) {
1621 + blk_mq_try_issue_list_directly(hctx, list);
1622 + if (list_empty(list))
1623 +- return;
1624 ++ goto out;
1625 + }
1626 + blk_mq_insert_requests(hctx, ctx, list);
1627 + }
1628 +
1629 + blk_mq_run_hw_queue(hctx, run_queue_async);
1630 ++ out:
1631 ++ percpu_ref_put(&q->q_usage_counter);
1632 + }
1633 +
1634 + static void blk_mq_sched_free_tags(struct blk_mq_tag_set *set,
1635 +diff --git a/block/blk-mq.c b/block/blk-mq.c
1636 +index b0e5e67e20a2..8a41cc5974fe 100644
1637 +--- a/block/blk-mq.c
1638 ++++ b/block/blk-mq.c
1639 +@@ -2284,15 +2284,65 @@ static void blk_mq_exit_hw_queues(struct request_queue *q,
1640 + }
1641 + }
1642 +
1643 ++static int blk_mq_hw_ctx_size(struct blk_mq_tag_set *tag_set)
1644 ++{
1645 ++ int hw_ctx_size = sizeof(struct blk_mq_hw_ctx);
1646 ++
1647 ++ BUILD_BUG_ON(ALIGN(offsetof(struct blk_mq_hw_ctx, srcu),
1648 ++ __alignof__(struct blk_mq_hw_ctx)) !=
1649 ++ sizeof(struct blk_mq_hw_ctx));
1650 ++
1651 ++ if (tag_set->flags & BLK_MQ_F_BLOCKING)
1652 ++ hw_ctx_size += sizeof(struct srcu_struct);
1653 ++
1654 ++ return hw_ctx_size;
1655 ++}
1656 ++
1657 + static int blk_mq_init_hctx(struct request_queue *q,
1658 + struct blk_mq_tag_set *set,
1659 + struct blk_mq_hw_ctx *hctx, unsigned hctx_idx)
1660 + {
1661 +- int node;
1662 ++ hctx->queue_num = hctx_idx;
1663 ++
1664 ++ cpuhp_state_add_instance_nocalls(CPUHP_BLK_MQ_DEAD, &hctx->cpuhp_dead);
1665 ++
1666 ++ hctx->tags = set->tags[hctx_idx];
1667 ++
1668 ++ if (set->ops->init_hctx &&
1669 ++ set->ops->init_hctx(hctx, set->driver_data, hctx_idx))
1670 ++ goto unregister_cpu_notifier;
1671 +
1672 +- node = hctx->numa_node;
1673 ++ if (blk_mq_init_request(set, hctx->fq->flush_rq, hctx_idx,
1674 ++ hctx->numa_node))
1675 ++ goto exit_hctx;
1676 ++ return 0;
1677 ++
1678 ++ exit_hctx:
1679 ++ if (set->ops->exit_hctx)
1680 ++ set->ops->exit_hctx(hctx, hctx_idx);
1681 ++ unregister_cpu_notifier:
1682 ++ blk_mq_remove_cpuhp(hctx);
1683 ++ return -1;
1684 ++}
1685 ++
1686 ++static struct blk_mq_hw_ctx *
1687 ++blk_mq_alloc_hctx(struct request_queue *q, struct blk_mq_tag_set *set,
1688 ++ int node)
1689 ++{
1690 ++ struct blk_mq_hw_ctx *hctx;
1691 ++ gfp_t gfp = GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY;
1692 ++
1693 ++ hctx = kzalloc_node(blk_mq_hw_ctx_size(set), gfp, node);
1694 ++ if (!hctx)
1695 ++ goto fail_alloc_hctx;
1696 ++
1697 ++ if (!zalloc_cpumask_var_node(&hctx->cpumask, gfp, node))
1698 ++ goto free_hctx;
1699 ++
1700 ++ atomic_set(&hctx->nr_active, 0);
1701 + if (node == NUMA_NO_NODE)
1702 +- node = hctx->numa_node = set->numa_node;
1703 ++ node = set->numa_node;
1704 ++ hctx->numa_node = node;
1705 +
1706 + INIT_DELAYED_WORK(&hctx->run_work, blk_mq_run_work_fn);
1707 + spin_lock_init(&hctx->lock);
1708 +@@ -2300,58 +2350,45 @@ static int blk_mq_init_hctx(struct request_queue *q,
1709 + hctx->queue = q;
1710 + hctx->flags = set->flags & ~BLK_MQ_F_TAG_SHARED;
1711 +
1712 +- cpuhp_state_add_instance_nocalls(CPUHP_BLK_MQ_DEAD, &hctx->cpuhp_dead);
1713 +-
1714 +- hctx->tags = set->tags[hctx_idx];
1715 +-
1716 + /*
1717 + * Allocate space for all possible cpus to avoid allocation at
1718 + * runtime
1719 + */
1720 + hctx->ctxs = kmalloc_array_node(nr_cpu_ids, sizeof(void *),
1721 +- GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY, node);
1722 ++ gfp, node);
1723 + if (!hctx->ctxs)
1724 +- goto unregister_cpu_notifier;
1725 ++ goto free_cpumask;
1726 +
1727 + if (sbitmap_init_node(&hctx->ctx_map, nr_cpu_ids, ilog2(8),
1728 +- GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY, node))
1729 ++ gfp, node))
1730 + goto free_ctxs;
1731 +-
1732 + hctx->nr_ctx = 0;
1733 +
1734 + spin_lock_init(&hctx->dispatch_wait_lock);
1735 + init_waitqueue_func_entry(&hctx->dispatch_wait, blk_mq_dispatch_wake);
1736 + INIT_LIST_HEAD(&hctx->dispatch_wait.entry);
1737 +
1738 +- if (set->ops->init_hctx &&
1739 +- set->ops->init_hctx(hctx, set->driver_data, hctx_idx))
1740 +- goto free_bitmap;
1741 +-
1742 + hctx->fq = blk_alloc_flush_queue(q, hctx->numa_node, set->cmd_size,
1743 +- GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY);
1744 ++ gfp);
1745 + if (!hctx->fq)
1746 +- goto exit_hctx;
1747 +-
1748 +- if (blk_mq_init_request(set, hctx->fq->flush_rq, hctx_idx, node))
1749 +- goto free_fq;
1750 ++ goto free_bitmap;
1751 +
1752 + if (hctx->flags & BLK_MQ_F_BLOCKING)
1753 + init_srcu_struct(hctx->srcu);
1754 ++ blk_mq_hctx_kobj_init(hctx);
1755 +
1756 +- return 0;
1757 ++ return hctx;
1758 +
1759 +- free_fq:
1760 +- blk_free_flush_queue(hctx->fq);
1761 +- exit_hctx:
1762 +- if (set->ops->exit_hctx)
1763 +- set->ops->exit_hctx(hctx, hctx_idx);
1764 + free_bitmap:
1765 + sbitmap_free(&hctx->ctx_map);
1766 + free_ctxs:
1767 + kfree(hctx->ctxs);
1768 +- unregister_cpu_notifier:
1769 +- blk_mq_remove_cpuhp(hctx);
1770 +- return -1;
1771 ++ free_cpumask:
1772 ++ free_cpumask_var(hctx->cpumask);
1773 ++ free_hctx:
1774 ++ kfree(hctx);
1775 ++ fail_alloc_hctx:
1776 ++ return NULL;
1777 + }
1778 +
1779 + static void blk_mq_init_cpu_queues(struct request_queue *q,
1780 +@@ -2695,51 +2732,25 @@ struct request_queue *blk_mq_init_sq_queue(struct blk_mq_tag_set *set,
1781 + }
1782 + EXPORT_SYMBOL(blk_mq_init_sq_queue);
1783 +
1784 +-static int blk_mq_hw_ctx_size(struct blk_mq_tag_set *tag_set)
1785 +-{
1786 +- int hw_ctx_size = sizeof(struct blk_mq_hw_ctx);
1787 +-
1788 +- BUILD_BUG_ON(ALIGN(offsetof(struct blk_mq_hw_ctx, srcu),
1789 +- __alignof__(struct blk_mq_hw_ctx)) !=
1790 +- sizeof(struct blk_mq_hw_ctx));
1791 +-
1792 +- if (tag_set->flags & BLK_MQ_F_BLOCKING)
1793 +- hw_ctx_size += sizeof(struct srcu_struct);
1794 +-
1795 +- return hw_ctx_size;
1796 +-}
1797 +-
1798 + static struct blk_mq_hw_ctx *blk_mq_alloc_and_init_hctx(
1799 + struct blk_mq_tag_set *set, struct request_queue *q,
1800 + int hctx_idx, int node)
1801 + {
1802 + struct blk_mq_hw_ctx *hctx;
1803 +
1804 +- hctx = kzalloc_node(blk_mq_hw_ctx_size(set),
1805 +- GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY,
1806 +- node);
1807 ++ hctx = blk_mq_alloc_hctx(q, set, node);
1808 + if (!hctx)
1809 +- return NULL;
1810 +-
1811 +- if (!zalloc_cpumask_var_node(&hctx->cpumask,
1812 +- GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY,
1813 +- node)) {
1814 +- kfree(hctx);
1815 +- return NULL;
1816 +- }
1817 +-
1818 +- atomic_set(&hctx->nr_active, 0);
1819 +- hctx->numa_node = node;
1820 +- hctx->queue_num = hctx_idx;
1821 ++ goto fail;
1822 +
1823 +- if (blk_mq_init_hctx(q, set, hctx, hctx_idx)) {
1824 +- free_cpumask_var(hctx->cpumask);
1825 +- kfree(hctx);
1826 +- return NULL;
1827 +- }
1828 +- blk_mq_hctx_kobj_init(hctx);
1829 ++ if (blk_mq_init_hctx(q, set, hctx, hctx_idx))
1830 ++ goto free_hctx;
1831 +
1832 + return hctx;
1833 ++
1834 ++ free_hctx:
1835 ++ kobject_put(&hctx->kobj);
1836 ++ fail:
1837 ++ return NULL;
1838 + }
1839 +
1840 + static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set,
1841 +diff --git a/block/blk.h b/block/blk.h
1842 +index 5d636ee41663..e27fd1512e4b 100644
1843 +--- a/block/blk.h
1844 ++++ b/block/blk.h
1845 +@@ -75,7 +75,7 @@ static inline bool biovec_phys_mergeable(struct request_queue *q,
1846 +
1847 + if (addr1 + vec1->bv_len != addr2)
1848 + return false;
1849 +- if (xen_domain() && !xen_biovec_phys_mergeable(vec1, vec2))
1850 ++ if (xen_domain() && !xen_biovec_phys_mergeable(vec1, vec2->bv_page))
1851 + return false;
1852 + if ((addr1 | mask) != ((addr2 + vec2->bv_len - 1) | mask))
1853 + return false;
1854 +diff --git a/block/genhd.c b/block/genhd.c
1855 +index 703267865f14..d8dff0b21f7d 100644
1856 +--- a/block/genhd.c
1857 ++++ b/block/genhd.c
1858 +@@ -531,6 +531,18 @@ void blk_free_devt(dev_t devt)
1859 + }
1860 + }
1861 +
1862 ++/**
1863 ++ * We invalidate devt by assigning NULL pointer for devt in idr.
1864 ++ */
1865 ++void blk_invalidate_devt(dev_t devt)
1866 ++{
1867 ++ if (MAJOR(devt) == BLOCK_EXT_MAJOR) {
1868 ++ spin_lock_bh(&ext_devt_lock);
1869 ++ idr_replace(&ext_devt_idr, NULL, blk_mangle_minor(MINOR(devt)));
1870 ++ spin_unlock_bh(&ext_devt_lock);
1871 ++ }
1872 ++}
1873 ++
1874 + static char *bdevt_str(dev_t devt, char *buf)
1875 + {
1876 + if (MAJOR(devt) <= 0xff && MINOR(devt) <= 0xff) {
1877 +@@ -793,6 +805,13 @@ void del_gendisk(struct gendisk *disk)
1878 +
1879 + if (!(disk->flags & GENHD_FL_HIDDEN))
1880 + blk_unregister_region(disk_devt(disk), disk->minors);
1881 ++ /*
1882 ++ * Remove gendisk pointer from idr so that it cannot be looked up
1883 ++ * while RCU period before freeing gendisk is running to prevent
1884 ++ * use-after-free issues. Note that the device number stays
1885 ++ * "in-use" until we really free the gendisk.
1886 ++ */
1887 ++ blk_invalidate_devt(disk_devt(disk));
1888 +
1889 + kobject_put(disk->part0.holder_dir);
1890 + kobject_put(disk->slave_dir);
1891 +diff --git a/block/partition-generic.c b/block/partition-generic.c
1892 +index 8e596a8dff32..aee643ce13d1 100644
1893 +--- a/block/partition-generic.c
1894 ++++ b/block/partition-generic.c
1895 +@@ -285,6 +285,13 @@ void delete_partition(struct gendisk *disk, int partno)
1896 + kobject_put(part->holder_dir);
1897 + device_del(part_to_dev(part));
1898 +
1899 ++ /*
1900 ++ * Remove gendisk pointer from idr so that it cannot be looked up
1901 ++ * while RCU period before freeing gendisk is running to prevent
1902 ++ * use-after-free issues. Note that the device number stays
1903 ++ * "in-use" until we really free the gendisk.
1904 ++ */
1905 ++ blk_invalidate_devt(part_devt(part));
1906 + hd_struct_kill(part);
1907 + }
1908 +
1909 +diff --git a/block/sed-opal.c b/block/sed-opal.c
1910 +index e0de4dd448b3..119640897293 100644
1911 +--- a/block/sed-opal.c
1912 ++++ b/block/sed-opal.c
1913 +@@ -2095,13 +2095,16 @@ static int opal_erase_locking_range(struct opal_dev *dev,
1914 + static int opal_enable_disable_shadow_mbr(struct opal_dev *dev,
1915 + struct opal_mbr_data *opal_mbr)
1916 + {
1917 ++ u8 enable_disable = opal_mbr->enable_disable == OPAL_MBR_ENABLE ?
1918 ++ OPAL_TRUE : OPAL_FALSE;
1919 ++
1920 + const struct opal_step mbr_steps[] = {
1921 + { opal_discovery0, },
1922 + { start_admin1LSP_opal_session, &opal_mbr->key },
1923 +- { set_mbr_done, &opal_mbr->enable_disable },
1924 ++ { set_mbr_done, &enable_disable },
1925 + { end_opal_session, },
1926 + { start_admin1LSP_opal_session, &opal_mbr->key },
1927 +- { set_mbr_enable_disable, &opal_mbr->enable_disable },
1928 ++ { set_mbr_enable_disable, &enable_disable },
1929 + { end_opal_session, },
1930 + { NULL, }
1931 + };
1932 +@@ -2221,7 +2224,7 @@ static int __opal_lock_unlock(struct opal_dev *dev,
1933 +
1934 + static int __opal_set_mbr_done(struct opal_dev *dev, struct opal_key *key)
1935 + {
1936 +- u8 mbr_done_tf = 1;
1937 ++ u8 mbr_done_tf = OPAL_TRUE;
1938 + const struct opal_step mbrdone_step [] = {
1939 + { opal_discovery0, },
1940 + { start_admin1LSP_opal_session, key },
1941 +diff --git a/crypto/hmac.c b/crypto/hmac.c
1942 +index e74730224f0a..4b8c8ee8f15c 100644
1943 +--- a/crypto/hmac.c
1944 ++++ b/crypto/hmac.c
1945 +@@ -168,6 +168,8 @@ static int hmac_init_tfm(struct crypto_tfm *tfm)
1946 +
1947 + parent->descsize = sizeof(struct shash_desc) +
1948 + crypto_shash_descsize(hash);
1949 ++ if (WARN_ON(parent->descsize > HASH_MAX_DESCSIZE))
1950 ++ return -EINVAL;
1951 +
1952 + ctx->hash = hash;
1953 + return 0;
1954 +diff --git a/drivers/acpi/arm64/iort.c b/drivers/acpi/arm64/iort.c
1955 +index e48894e002ba..a46c2c162c03 100644
1956 +--- a/drivers/acpi/arm64/iort.c
1957 ++++ b/drivers/acpi/arm64/iort.c
1958 +@@ -1232,18 +1232,24 @@ static bool __init arm_smmu_v3_is_coherent(struct acpi_iort_node *node)
1959 + /*
1960 + * set numa proximity domain for smmuv3 device
1961 + */
1962 +-static void __init arm_smmu_v3_set_proximity(struct device *dev,
1963 ++static int __init arm_smmu_v3_set_proximity(struct device *dev,
1964 + struct acpi_iort_node *node)
1965 + {
1966 + struct acpi_iort_smmu_v3 *smmu;
1967 +
1968 + smmu = (struct acpi_iort_smmu_v3 *)node->node_data;
1969 + if (smmu->flags & ACPI_IORT_SMMU_V3_PXM_VALID) {
1970 +- set_dev_node(dev, acpi_map_pxm_to_node(smmu->pxm));
1971 ++ int node = acpi_map_pxm_to_node(smmu->pxm);
1972 ++
1973 ++ if (node != NUMA_NO_NODE && !node_online(node))
1974 ++ return -EINVAL;
1975 ++
1976 ++ set_dev_node(dev, node);
1977 + pr_info("SMMU-v3[%llx] Mapped to Proximity domain %d\n",
1978 + smmu->base_address,
1979 + smmu->pxm);
1980 + }
1981 ++ return 0;
1982 + }
1983 + #else
1984 + #define arm_smmu_v3_set_proximity NULL
1985 +@@ -1318,7 +1324,7 @@ struct iort_dev_config {
1986 + int (*dev_count_resources)(struct acpi_iort_node *node);
1987 + void (*dev_init_resources)(struct resource *res,
1988 + struct acpi_iort_node *node);
1989 +- void (*dev_set_proximity)(struct device *dev,
1990 ++ int (*dev_set_proximity)(struct device *dev,
1991 + struct acpi_iort_node *node);
1992 + };
1993 +
1994 +@@ -1369,8 +1375,11 @@ static int __init iort_add_platform_device(struct acpi_iort_node *node,
1995 + if (!pdev)
1996 + return -ENOMEM;
1997 +
1998 +- if (ops->dev_set_proximity)
1999 +- ops->dev_set_proximity(&pdev->dev, node);
2000 ++ if (ops->dev_set_proximity) {
2001 ++ ret = ops->dev_set_proximity(&pdev->dev, node);
2002 ++ if (ret)
2003 ++ goto dev_put;
2004 ++ }
2005 +
2006 + count = ops->dev_count_resources(node);
2007 +
2008 +diff --git a/drivers/acpi/property.c b/drivers/acpi/property.c
2009 +index 77abe0ec4043..bd533f68b1de 100644
2010 +--- a/drivers/acpi/property.c
2011 ++++ b/drivers/acpi/property.c
2012 +@@ -1031,6 +1031,14 @@ struct fwnode_handle *acpi_get_next_subnode(const struct fwnode_handle *fwnode,
2013 + const struct acpi_data_node *data = to_acpi_data_node(fwnode);
2014 + struct acpi_data_node *dn;
2015 +
2016 ++ /*
2017 ++ * We can have a combination of device and data nodes, e.g. with
2018 ++ * hierarchical _DSD properties. Make sure the adev pointer is
2019 ++ * restored before going through data nodes, otherwise we will
2020 ++ * be looking for data_nodes below the last device found instead
2021 ++ * of the common fwnode shared by device_nodes and data_nodes.
2022 ++ */
2023 ++ adev = to_acpi_device_node(fwnode);
2024 + if (adev)
2025 + head = &adev->data.subnodes;
2026 + else if (data)
2027 +diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
2028 +index f80d298de3fa..8ad20ed0cb7c 100644
2029 +--- a/drivers/base/power/main.c
2030 ++++ b/drivers/base/power/main.c
2031 +@@ -1747,6 +1747,10 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
2032 + if (dev->power.syscore)
2033 + goto Complete;
2034 +
2035 ++ /* Avoid direct_complete to let wakeup_path propagate. */
2036 ++ if (device_may_wakeup(dev) || dev->power.wakeup_path)
2037 ++ dev->power.direct_complete = false;
2038 ++
2039 + if (dev->power.direct_complete) {
2040 + if (pm_runtime_status_suspended(dev)) {
2041 + pm_runtime_disable(dev);
2042 +diff --git a/drivers/bluetooth/btbcm.c b/drivers/bluetooth/btbcm.c
2043 +index d5d6e6e5da3b..62d3aa2b26f6 100644
2044 +--- a/drivers/bluetooth/btbcm.c
2045 ++++ b/drivers/bluetooth/btbcm.c
2046 +@@ -37,6 +37,7 @@
2047 + #define BDADDR_BCM43430A0 (&(bdaddr_t) {{0xac, 0x1f, 0x12, 0xa0, 0x43, 0x43}})
2048 + #define BDADDR_BCM4324B3 (&(bdaddr_t) {{0x00, 0x00, 0x00, 0xb3, 0x24, 0x43}})
2049 + #define BDADDR_BCM4330B1 (&(bdaddr_t) {{0x00, 0x00, 0x00, 0xb1, 0x30, 0x43}})
2050 ++#define BDADDR_BCM43341B (&(bdaddr_t) {{0xac, 0x1f, 0x00, 0x1b, 0x34, 0x43}})
2051 +
2052 + int btbcm_check_bdaddr(struct hci_dev *hdev)
2053 + {
2054 +@@ -82,7 +83,8 @@ int btbcm_check_bdaddr(struct hci_dev *hdev)
2055 + !bacmp(&bda->bdaddr, BDADDR_BCM20702A1) ||
2056 + !bacmp(&bda->bdaddr, BDADDR_BCM4324B3) ||
2057 + !bacmp(&bda->bdaddr, BDADDR_BCM4330B1) ||
2058 +- !bacmp(&bda->bdaddr, BDADDR_BCM43430A0)) {
2059 ++ !bacmp(&bda->bdaddr, BDADDR_BCM43430A0) ||
2060 ++ !bacmp(&bda->bdaddr, BDADDR_BCM43341B)) {
2061 + bt_dev_info(hdev, "BCM: Using default device address (%pMR)",
2062 + &bda->bdaddr);
2063 + set_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks);
2064 +diff --git a/drivers/bluetooth/btmtkuart.c b/drivers/bluetooth/btmtkuart.c
2065 +index b0b680dd69f4..f5dbeec8e274 100644
2066 +--- a/drivers/bluetooth/btmtkuart.c
2067 ++++ b/drivers/bluetooth/btmtkuart.c
2068 +@@ -661,7 +661,7 @@ static int btmtkuart_change_baudrate(struct hci_dev *hdev)
2069 + {
2070 + struct btmtkuart_dev *bdev = hci_get_drvdata(hdev);
2071 + struct btmtk_hci_wmt_params wmt_params;
2072 +- u32 baudrate;
2073 ++ __le32 baudrate;
2074 + u8 param;
2075 + int err;
2076 +
2077 +diff --git a/drivers/bluetooth/hci_qca.c b/drivers/bluetooth/hci_qca.c
2078 +index 237aea34b69f..d3b467792eb3 100644
2079 +--- a/drivers/bluetooth/hci_qca.c
2080 ++++ b/drivers/bluetooth/hci_qca.c
2081 +@@ -508,6 +508,8 @@ static int qca_open(struct hci_uart *hu)
2082 + qcadev = serdev_device_get_drvdata(hu->serdev);
2083 + if (qcadev->btsoc_type != QCA_WCN3990) {
2084 + gpiod_set_value_cansleep(qcadev->bt_en, 1);
2085 ++ /* Controller needs time to bootup. */
2086 ++ msleep(150);
2087 + } else {
2088 + hu->init_speed = qcadev->init_speed;
2089 + hu->oper_speed = qcadev->oper_speed;
2090 +@@ -992,7 +994,8 @@ static int qca_set_baudrate(struct hci_dev *hdev, uint8_t baudrate)
2091 + while (!skb_queue_empty(&qca->txq))
2092 + usleep_range(100, 200);
2093 +
2094 +- serdev_device_wait_until_sent(hu->serdev,
2095 ++ if (hu->serdev)
2096 ++ serdev_device_wait_until_sent(hu->serdev,
2097 + msecs_to_jiffies(CMD_TRANS_TIMEOUT_MS));
2098 +
2099 + /* Give the controller time to process the request */
2100 +diff --git a/drivers/char/hw_random/omap-rng.c b/drivers/char/hw_random/omap-rng.c
2101 +index b65ff6962899..e9b6ac61fb7f 100644
2102 +--- a/drivers/char/hw_random/omap-rng.c
2103 ++++ b/drivers/char/hw_random/omap-rng.c
2104 +@@ -443,6 +443,7 @@ static int omap_rng_probe(struct platform_device *pdev)
2105 + priv->rng.read = omap_rng_do_read;
2106 + priv->rng.init = omap_rng_init;
2107 + priv->rng.cleanup = omap_rng_cleanup;
2108 ++ priv->rng.quality = 900;
2109 +
2110 + priv->rng.priv = (unsigned long)priv;
2111 + platform_set_drvdata(pdev, priv);
2112 +diff --git a/drivers/char/random.c b/drivers/char/random.c
2113 +index 38c6d1af6d1c..af6e240f98ff 100644
2114 +--- a/drivers/char/random.c
2115 ++++ b/drivers/char/random.c
2116 +@@ -777,6 +777,7 @@ static struct crng_state **crng_node_pool __read_mostly;
2117 + #endif
2118 +
2119 + static void invalidate_batched_entropy(void);
2120 ++static void numa_crng_init(void);
2121 +
2122 + static bool trust_cpu __ro_after_init = IS_ENABLED(CONFIG_RANDOM_TRUST_CPU);
2123 + static int __init parse_trust_cpu(char *arg)
2124 +@@ -805,7 +806,9 @@ static void crng_initialize(struct crng_state *crng)
2125 + }
2126 + crng->state[i] ^= rv;
2127 + }
2128 +- if (trust_cpu && arch_init) {
2129 ++ if (trust_cpu && arch_init && crng == &primary_crng) {
2130 ++ invalidate_batched_entropy();
2131 ++ numa_crng_init();
2132 + crng_init = 2;
2133 + pr_notice("random: crng done (trusting CPU's manufacturer)\n");
2134 + }
2135 +@@ -2211,8 +2214,8 @@ struct batched_entropy {
2136 + u32 entropy_u32[CHACHA_BLOCK_SIZE / sizeof(u32)];
2137 + };
2138 + unsigned int position;
2139 ++ spinlock_t batch_lock;
2140 + };
2141 +-static rwlock_t batched_entropy_reset_lock = __RW_LOCK_UNLOCKED(batched_entropy_reset_lock);
2142 +
2143 + /*
2144 + * Get a random word for internal kernel use only. The quality of the random
2145 +@@ -2222,12 +2225,14 @@ static rwlock_t batched_entropy_reset_lock = __RW_LOCK_UNLOCKED(batched_entropy_
2146 + * wait_for_random_bytes() should be called and return 0 at least once
2147 + * at any point prior.
2148 + */
2149 +-static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u64);
2150 ++static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u64) = {
2151 ++ .batch_lock = __SPIN_LOCK_UNLOCKED(batched_entropy_u64.lock),
2152 ++};
2153 ++
2154 + u64 get_random_u64(void)
2155 + {
2156 + u64 ret;
2157 +- bool use_lock;
2158 +- unsigned long flags = 0;
2159 ++ unsigned long flags;
2160 + struct batched_entropy *batch;
2161 + static void *previous;
2162 +
2163 +@@ -2242,28 +2247,25 @@ u64 get_random_u64(void)
2164 +
2165 + warn_unseeded_randomness(&previous);
2166 +
2167 +- use_lock = READ_ONCE(crng_init) < 2;
2168 +- batch = &get_cpu_var(batched_entropy_u64);
2169 +- if (use_lock)
2170 +- read_lock_irqsave(&batched_entropy_reset_lock, flags);
2171 ++ batch = raw_cpu_ptr(&batched_entropy_u64);
2172 ++ spin_lock_irqsave(&batch->batch_lock, flags);
2173 + if (batch->position % ARRAY_SIZE(batch->entropy_u64) == 0) {
2174 + extract_crng((u8 *)batch->entropy_u64);
2175 + batch->position = 0;
2176 + }
2177 + ret = batch->entropy_u64[batch->position++];
2178 +- if (use_lock)
2179 +- read_unlock_irqrestore(&batched_entropy_reset_lock, flags);
2180 +- put_cpu_var(batched_entropy_u64);
2181 ++ spin_unlock_irqrestore(&batch->batch_lock, flags);
2182 + return ret;
2183 + }
2184 + EXPORT_SYMBOL(get_random_u64);
2185 +
2186 +-static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u32);
2187 ++static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u32) = {
2188 ++ .batch_lock = __SPIN_LOCK_UNLOCKED(batched_entropy_u32.lock),
2189 ++};
2190 + u32 get_random_u32(void)
2191 + {
2192 + u32 ret;
2193 +- bool use_lock;
2194 +- unsigned long flags = 0;
2195 ++ unsigned long flags;
2196 + struct batched_entropy *batch;
2197 + static void *previous;
2198 +
2199 +@@ -2272,18 +2274,14 @@ u32 get_random_u32(void)
2200 +
2201 + warn_unseeded_randomness(&previous);
2202 +
2203 +- use_lock = READ_ONCE(crng_init) < 2;
2204 +- batch = &get_cpu_var(batched_entropy_u32);
2205 +- if (use_lock)
2206 +- read_lock_irqsave(&batched_entropy_reset_lock, flags);
2207 ++ batch = raw_cpu_ptr(&batched_entropy_u32);
2208 ++ spin_lock_irqsave(&batch->batch_lock, flags);
2209 + if (batch->position % ARRAY_SIZE(batch->entropy_u32) == 0) {
2210 + extract_crng((u8 *)batch->entropy_u32);
2211 + batch->position = 0;
2212 + }
2213 + ret = batch->entropy_u32[batch->position++];
2214 +- if (use_lock)
2215 +- read_unlock_irqrestore(&batched_entropy_reset_lock, flags);
2216 +- put_cpu_var(batched_entropy_u32);
2217 ++ spin_unlock_irqrestore(&batch->batch_lock, flags);
2218 + return ret;
2219 + }
2220 + EXPORT_SYMBOL(get_random_u32);
2221 +@@ -2297,12 +2295,19 @@ static void invalidate_batched_entropy(void)
2222 + int cpu;
2223 + unsigned long flags;
2224 +
2225 +- write_lock_irqsave(&batched_entropy_reset_lock, flags);
2226 + for_each_possible_cpu (cpu) {
2227 +- per_cpu_ptr(&batched_entropy_u32, cpu)->position = 0;
2228 +- per_cpu_ptr(&batched_entropy_u64, cpu)->position = 0;
2229 ++ struct batched_entropy *batched_entropy;
2230 ++
2231 ++ batched_entropy = per_cpu_ptr(&batched_entropy_u32, cpu);
2232 ++ spin_lock_irqsave(&batched_entropy->batch_lock, flags);
2233 ++ batched_entropy->position = 0;
2234 ++ spin_unlock(&batched_entropy->batch_lock);
2235 ++
2236 ++ batched_entropy = per_cpu_ptr(&batched_entropy_u64, cpu);
2237 ++ spin_lock(&batched_entropy->batch_lock);
2238 ++ batched_entropy->position = 0;
2239 ++ spin_unlock_irqrestore(&batched_entropy->batch_lock, flags);
2240 + }
2241 +- write_unlock_irqrestore(&batched_entropy_reset_lock, flags);
2242 + }
2243 +
2244 + /**
2245 +diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
2246 +index fbeb71953526..05dbfdb9f4af 100644
2247 +--- a/drivers/char/virtio_console.c
2248 ++++ b/drivers/char/virtio_console.c
2249 +@@ -75,7 +75,7 @@ struct ports_driver_data {
2250 + /* All the console devices handled by this driver */
2251 + struct list_head consoles;
2252 + };
2253 +-static struct ports_driver_data pdrvdata;
2254 ++static struct ports_driver_data pdrvdata = { .next_vtermno = 1};
2255 +
2256 + static DEFINE_SPINLOCK(pdrvdata_lock);
2257 + static DECLARE_COMPLETION(early_console_added);
2258 +@@ -1394,6 +1394,7 @@ static int add_port(struct ports_device *portdev, u32 id)
2259 + port->async_queue = NULL;
2260 +
2261 + port->cons.ws.ws_row = port->cons.ws.ws_col = 0;
2262 ++ port->cons.vtermno = 0;
2263 +
2264 + port->host_connected = port->guest_connected = false;
2265 + port->stats = (struct port_stats) { 0 };
2266 +diff --git a/drivers/clk/renesas/r8a774a1-cpg-mssr.c b/drivers/clk/renesas/r8a774a1-cpg-mssr.c
2267 +index 4d92b27a6153..7a4c5957939a 100644
2268 +--- a/drivers/clk/renesas/r8a774a1-cpg-mssr.c
2269 ++++ b/drivers/clk/renesas/r8a774a1-cpg-mssr.c
2270 +@@ -123,8 +123,8 @@ static const struct mssr_mod_clk r8a774a1_mod_clks[] __initconst = {
2271 + DEF_MOD("msiof2", 209, R8A774A1_CLK_MSO),
2272 + DEF_MOD("msiof1", 210, R8A774A1_CLK_MSO),
2273 + DEF_MOD("msiof0", 211, R8A774A1_CLK_MSO),
2274 +- DEF_MOD("sys-dmac2", 217, R8A774A1_CLK_S0D3),
2275 +- DEF_MOD("sys-dmac1", 218, R8A774A1_CLK_S0D3),
2276 ++ DEF_MOD("sys-dmac2", 217, R8A774A1_CLK_S3D1),
2277 ++ DEF_MOD("sys-dmac1", 218, R8A774A1_CLK_S3D1),
2278 + DEF_MOD("sys-dmac0", 219, R8A774A1_CLK_S0D3),
2279 + DEF_MOD("cmt3", 300, R8A774A1_CLK_R),
2280 + DEF_MOD("cmt2", 301, R8A774A1_CLK_R),
2281 +@@ -143,8 +143,8 @@ static const struct mssr_mod_clk r8a774a1_mod_clks[] __initconst = {
2282 + DEF_MOD("rwdt", 402, R8A774A1_CLK_R),
2283 + DEF_MOD("intc-ex", 407, R8A774A1_CLK_CP),
2284 + DEF_MOD("intc-ap", 408, R8A774A1_CLK_S0D3),
2285 +- DEF_MOD("audmac1", 501, R8A774A1_CLK_S0D3),
2286 +- DEF_MOD("audmac0", 502, R8A774A1_CLK_S0D3),
2287 ++ DEF_MOD("audmac1", 501, R8A774A1_CLK_S1D2),
2288 ++ DEF_MOD("audmac0", 502, R8A774A1_CLK_S1D2),
2289 + DEF_MOD("hscif4", 516, R8A774A1_CLK_S3D1),
2290 + DEF_MOD("hscif3", 517, R8A774A1_CLK_S3D1),
2291 + DEF_MOD("hscif2", 518, R8A774A1_CLK_S3D1),
2292 +diff --git a/drivers/clk/renesas/r8a774c0-cpg-mssr.c b/drivers/clk/renesas/r8a774c0-cpg-mssr.c
2293 +index 34e274f2a273..93dacd826fd0 100644
2294 +--- a/drivers/clk/renesas/r8a774c0-cpg-mssr.c
2295 ++++ b/drivers/clk/renesas/r8a774c0-cpg-mssr.c
2296 +@@ -157,7 +157,7 @@ static const struct mssr_mod_clk r8a774c0_mod_clks[] __initconst = {
2297 + DEF_MOD("intc-ex", 407, R8A774C0_CLK_CP),
2298 + DEF_MOD("intc-ap", 408, R8A774C0_CLK_S0D3),
2299 +
2300 +- DEF_MOD("audmac0", 502, R8A774C0_CLK_S3D4),
2301 ++ DEF_MOD("audmac0", 502, R8A774C0_CLK_S1D2),
2302 + DEF_MOD("hscif4", 516, R8A774C0_CLK_S3D1C),
2303 + DEF_MOD("hscif3", 517, R8A774C0_CLK_S3D1C),
2304 + DEF_MOD("hscif2", 518, R8A774C0_CLK_S3D1C),
2305 +diff --git a/drivers/clk/renesas/r8a7795-cpg-mssr.c b/drivers/clk/renesas/r8a7795-cpg-mssr.c
2306 +index 86842c9fd314..0825cd0ff286 100644
2307 +--- a/drivers/clk/renesas/r8a7795-cpg-mssr.c
2308 ++++ b/drivers/clk/renesas/r8a7795-cpg-mssr.c
2309 +@@ -129,8 +129,8 @@ static struct mssr_mod_clk r8a7795_mod_clks[] __initdata = {
2310 + DEF_MOD("msiof2", 209, R8A7795_CLK_MSO),
2311 + DEF_MOD("msiof1", 210, R8A7795_CLK_MSO),
2312 + DEF_MOD("msiof0", 211, R8A7795_CLK_MSO),
2313 +- DEF_MOD("sys-dmac2", 217, R8A7795_CLK_S0D3),
2314 +- DEF_MOD("sys-dmac1", 218, R8A7795_CLK_S0D3),
2315 ++ DEF_MOD("sys-dmac2", 217, R8A7795_CLK_S3D1),
2316 ++ DEF_MOD("sys-dmac1", 218, R8A7795_CLK_S3D1),
2317 + DEF_MOD("sys-dmac0", 219, R8A7795_CLK_S0D3),
2318 + DEF_MOD("sceg-pub", 229, R8A7795_CLK_CR),
2319 + DEF_MOD("cmt3", 300, R8A7795_CLK_R),
2320 +@@ -153,8 +153,8 @@ static struct mssr_mod_clk r8a7795_mod_clks[] __initdata = {
2321 + DEF_MOD("rwdt", 402, R8A7795_CLK_R),
2322 + DEF_MOD("intc-ex", 407, R8A7795_CLK_CP),
2323 + DEF_MOD("intc-ap", 408, R8A7795_CLK_S0D3),
2324 +- DEF_MOD("audmac1", 501, R8A7795_CLK_S0D3),
2325 +- DEF_MOD("audmac0", 502, R8A7795_CLK_S0D3),
2326 ++ DEF_MOD("audmac1", 501, R8A7795_CLK_S1D2),
2327 ++ DEF_MOD("audmac0", 502, R8A7795_CLK_S1D2),
2328 + DEF_MOD("drif7", 508, R8A7795_CLK_S3D2),
2329 + DEF_MOD("drif6", 509, R8A7795_CLK_S3D2),
2330 + DEF_MOD("drif5", 510, R8A7795_CLK_S3D2),
2331 +diff --git a/drivers/clk/renesas/r8a7796-cpg-mssr.c b/drivers/clk/renesas/r8a7796-cpg-mssr.c
2332 +index 12c455859f2c..997cd956f12b 100644
2333 +--- a/drivers/clk/renesas/r8a7796-cpg-mssr.c
2334 ++++ b/drivers/clk/renesas/r8a7796-cpg-mssr.c
2335 +@@ -126,8 +126,8 @@ static const struct mssr_mod_clk r8a7796_mod_clks[] __initconst = {
2336 + DEF_MOD("msiof2", 209, R8A7796_CLK_MSO),
2337 + DEF_MOD("msiof1", 210, R8A7796_CLK_MSO),
2338 + DEF_MOD("msiof0", 211, R8A7796_CLK_MSO),
2339 +- DEF_MOD("sys-dmac2", 217, R8A7796_CLK_S0D3),
2340 +- DEF_MOD("sys-dmac1", 218, R8A7796_CLK_S0D3),
2341 ++ DEF_MOD("sys-dmac2", 217, R8A7796_CLK_S3D1),
2342 ++ DEF_MOD("sys-dmac1", 218, R8A7796_CLK_S3D1),
2343 + DEF_MOD("sys-dmac0", 219, R8A7796_CLK_S0D3),
2344 + DEF_MOD("cmt3", 300, R8A7796_CLK_R),
2345 + DEF_MOD("cmt2", 301, R8A7796_CLK_R),
2346 +@@ -146,8 +146,8 @@ static const struct mssr_mod_clk r8a7796_mod_clks[] __initconst = {
2347 + DEF_MOD("rwdt", 402, R8A7796_CLK_R),
2348 + DEF_MOD("intc-ex", 407, R8A7796_CLK_CP),
2349 + DEF_MOD("intc-ap", 408, R8A7796_CLK_S0D3),
2350 +- DEF_MOD("audmac1", 501, R8A7796_CLK_S0D3),
2351 +- DEF_MOD("audmac0", 502, R8A7796_CLK_S0D3),
2352 ++ DEF_MOD("audmac1", 501, R8A7796_CLK_S1D2),
2353 ++ DEF_MOD("audmac0", 502, R8A7796_CLK_S1D2),
2354 + DEF_MOD("drif7", 508, R8A7796_CLK_S3D2),
2355 + DEF_MOD("drif6", 509, R8A7796_CLK_S3D2),
2356 + DEF_MOD("drif5", 510, R8A7796_CLK_S3D2),
2357 +diff --git a/drivers/clk/renesas/r8a77965-cpg-mssr.c b/drivers/clk/renesas/r8a77965-cpg-mssr.c
2358 +index eb1cca58a1e1..afc9c72fa094 100644
2359 +--- a/drivers/clk/renesas/r8a77965-cpg-mssr.c
2360 ++++ b/drivers/clk/renesas/r8a77965-cpg-mssr.c
2361 +@@ -123,8 +123,8 @@ static const struct mssr_mod_clk r8a77965_mod_clks[] __initconst = {
2362 + DEF_MOD("msiof2", 209, R8A77965_CLK_MSO),
2363 + DEF_MOD("msiof1", 210, R8A77965_CLK_MSO),
2364 + DEF_MOD("msiof0", 211, R8A77965_CLK_MSO),
2365 +- DEF_MOD("sys-dmac2", 217, R8A77965_CLK_S0D3),
2366 +- DEF_MOD("sys-dmac1", 218, R8A77965_CLK_S0D3),
2367 ++ DEF_MOD("sys-dmac2", 217, R8A77965_CLK_S3D1),
2368 ++ DEF_MOD("sys-dmac1", 218, R8A77965_CLK_S3D1),
2369 + DEF_MOD("sys-dmac0", 219, R8A77965_CLK_S0D3),
2370 +
2371 + DEF_MOD("cmt3", 300, R8A77965_CLK_R),
2372 +@@ -146,8 +146,8 @@ static const struct mssr_mod_clk r8a77965_mod_clks[] __initconst = {
2373 + DEF_MOD("intc-ex", 407, R8A77965_CLK_CP),
2374 + DEF_MOD("intc-ap", 408, R8A77965_CLK_S0D3),
2375 +
2376 +- DEF_MOD("audmac1", 501, R8A77965_CLK_S0D3),
2377 +- DEF_MOD("audmac0", 502, R8A77965_CLK_S0D3),
2378 ++ DEF_MOD("audmac1", 501, R8A77965_CLK_S1D2),
2379 ++ DEF_MOD("audmac0", 502, R8A77965_CLK_S1D2),
2380 + DEF_MOD("drif7", 508, R8A77965_CLK_S3D2),
2381 + DEF_MOD("drif6", 509, R8A77965_CLK_S3D2),
2382 + DEF_MOD("drif5", 510, R8A77965_CLK_S3D2),
2383 +diff --git a/drivers/clk/renesas/r8a77990-cpg-mssr.c b/drivers/clk/renesas/r8a77990-cpg-mssr.c
2384 +index 9a278c75c918..03f445d47ef6 100644
2385 +--- a/drivers/clk/renesas/r8a77990-cpg-mssr.c
2386 ++++ b/drivers/clk/renesas/r8a77990-cpg-mssr.c
2387 +@@ -152,7 +152,7 @@ static const struct mssr_mod_clk r8a77990_mod_clks[] __initconst = {
2388 + DEF_MOD("intc-ex", 407, R8A77990_CLK_CP),
2389 + DEF_MOD("intc-ap", 408, R8A77990_CLK_S0D3),
2390 +
2391 +- DEF_MOD("audmac0", 502, R8A77990_CLK_S3D4),
2392 ++ DEF_MOD("audmac0", 502, R8A77990_CLK_S1D2),
2393 + DEF_MOD("drif7", 508, R8A77990_CLK_S3D2),
2394 + DEF_MOD("drif6", 509, R8A77990_CLK_S3D2),
2395 + DEF_MOD("drif5", 510, R8A77990_CLK_S3D2),
2396 +diff --git a/drivers/clk/renesas/r8a77995-cpg-mssr.c b/drivers/clk/renesas/r8a77995-cpg-mssr.c
2397 +index eee3874865a9..68707277b17b 100644
2398 +--- a/drivers/clk/renesas/r8a77995-cpg-mssr.c
2399 ++++ b/drivers/clk/renesas/r8a77995-cpg-mssr.c
2400 +@@ -133,7 +133,7 @@ static const struct mssr_mod_clk r8a77995_mod_clks[] __initconst = {
2401 + DEF_MOD("rwdt", 402, R8A77995_CLK_R),
2402 + DEF_MOD("intc-ex", 407, R8A77995_CLK_CP),
2403 + DEF_MOD("intc-ap", 408, R8A77995_CLK_S1D2),
2404 +- DEF_MOD("audmac0", 502, R8A77995_CLK_S3D1),
2405 ++ DEF_MOD("audmac0", 502, R8A77995_CLK_S1D2),
2406 + DEF_MOD("hscif3", 517, R8A77995_CLK_S3D1C),
2407 + DEF_MOD("hscif0", 520, R8A77995_CLK_S3D1C),
2408 + DEF_MOD("thermal", 522, R8A77995_CLK_CP),
2409 +diff --git a/drivers/clk/rockchip/clk-rk3288.c b/drivers/clk/rockchip/clk-rk3288.c
2410 +index 5a67b7869960..355d6a3611db 100644
2411 +--- a/drivers/clk/rockchip/clk-rk3288.c
2412 ++++ b/drivers/clk/rockchip/clk-rk3288.c
2413 +@@ -219,7 +219,7 @@ PNAME(mux_hsadcout_p) = { "hsadc_src", "ext_hsadc" };
2414 + PNAME(mux_edp_24m_p) = { "ext_edp_24m", "xin24m" };
2415 + PNAME(mux_tspout_p) = { "cpll", "gpll", "npll", "xin27m" };
2416 +
2417 +-PNAME(mux_aclk_vcodec_pre_p) = { "aclk_vepu", "aclk_vdpu" };
2418 ++PNAME(mux_aclk_vcodec_pre_p) = { "aclk_vdpu", "aclk_vepu" };
2419 + PNAME(mux_usbphy480m_p) = { "sclk_otgphy1_480m", "sclk_otgphy2_480m",
2420 + "sclk_otgphy0_480m" };
2421 + PNAME(mux_hsicphy480m_p) = { "cpll", "gpll", "usbphy480m_src" };
2422 +@@ -313,13 +313,13 @@ static struct rockchip_clk_branch rk3288_clk_branches[] __initdata = {
2423 + COMPOSITE_NOMUX(0, "aclk_core_mp", "armclk", CLK_IGNORE_UNUSED,
2424 + RK3288_CLKSEL_CON(0), 4, 4, DFLAGS | CLK_DIVIDER_READ_ONLY,
2425 + RK3288_CLKGATE_CON(12), 6, GFLAGS),
2426 +- COMPOSITE_NOMUX(0, "atclk", "armclk", CLK_IGNORE_UNUSED,
2427 ++ COMPOSITE_NOMUX(0, "atclk", "armclk", 0,
2428 + RK3288_CLKSEL_CON(37), 4, 5, DFLAGS | CLK_DIVIDER_READ_ONLY,
2429 + RK3288_CLKGATE_CON(12), 7, GFLAGS),
2430 + COMPOSITE_NOMUX(0, "pclk_dbg_pre", "armclk", CLK_IGNORE_UNUSED,
2431 + RK3288_CLKSEL_CON(37), 9, 5, DFLAGS | CLK_DIVIDER_READ_ONLY,
2432 + RK3288_CLKGATE_CON(12), 8, GFLAGS),
2433 +- GATE(0, "pclk_dbg", "pclk_dbg_pre", CLK_IGNORE_UNUSED,
2434 ++ GATE(0, "pclk_dbg", "pclk_dbg_pre", 0,
2435 + RK3288_CLKGATE_CON(12), 9, GFLAGS),
2436 + GATE(0, "cs_dbg", "pclk_dbg_pre", CLK_IGNORE_UNUSED,
2437 + RK3288_CLKGATE_CON(12), 10, GFLAGS),
2438 +@@ -420,7 +420,7 @@ static struct rockchip_clk_branch rk3288_clk_branches[] __initdata = {
2439 + COMPOSITE(0, "aclk_vdpu", mux_pll_src_cpll_gpll_usb480m_p, 0,
2440 + RK3288_CLKSEL_CON(32), 14, 2, MFLAGS, 8, 5, DFLAGS,
2441 + RK3288_CLKGATE_CON(3), 11, GFLAGS),
2442 +- MUXGRF(0, "aclk_vcodec_pre", mux_aclk_vcodec_pre_p, 0,
2443 ++ MUXGRF(0, "aclk_vcodec_pre", mux_aclk_vcodec_pre_p, CLK_SET_RATE_PARENT,
2444 + RK3288_GRF_SOC_CON(0), 7, 1, MFLAGS),
2445 + GATE(ACLK_VCODEC, "aclk_vcodec", "aclk_vcodec_pre", 0,
2446 + RK3288_CLKGATE_CON(9), 0, GFLAGS),
2447 +@@ -647,7 +647,7 @@ static struct rockchip_clk_branch rk3288_clk_branches[] __initdata = {
2448 + INVERTER(SCLK_HSADC, "sclk_hsadc", "sclk_hsadc_out",
2449 + RK3288_CLKSEL_CON(22), 7, IFLAGS),
2450 +
2451 +- GATE(0, "jtag", "ext_jtag", CLK_IGNORE_UNUSED,
2452 ++ GATE(0, "jtag", "ext_jtag", 0,
2453 + RK3288_CLKGATE_CON(4), 14, GFLAGS),
2454 +
2455 + COMPOSITE_NODIV(SCLK_USBPHY480M_SRC, "usbphy480m_src", mux_usbphy480m_p, 0,
2456 +@@ -656,7 +656,7 @@ static struct rockchip_clk_branch rk3288_clk_branches[] __initdata = {
2457 + COMPOSITE_NODIV(SCLK_HSICPHY480M, "sclk_hsicphy480m", mux_hsicphy480m_p, 0,
2458 + RK3288_CLKSEL_CON(29), 0, 2, MFLAGS,
2459 + RK3288_CLKGATE_CON(3), 6, GFLAGS),
2460 +- GATE(0, "hsicphy12m_xin12m", "xin12m", CLK_IGNORE_UNUSED,
2461 ++ GATE(0, "hsicphy12m_xin12m", "xin12m", 0,
2462 + RK3288_CLKGATE_CON(13), 9, GFLAGS),
2463 + DIV(0, "hsicphy12m_usbphy", "sclk_hsicphy480m", 0,
2464 + RK3288_CLKSEL_CON(11), 8, 6, DFLAGS),
2465 +@@ -697,7 +697,7 @@ static struct rockchip_clk_branch rk3288_clk_branches[] __initdata = {
2466 + GATE(PCLK_TZPC, "pclk_tzpc", "pclk_cpu", 0, RK3288_CLKGATE_CON(11), 3, GFLAGS),
2467 + GATE(PCLK_UART2, "pclk_uart2", "pclk_cpu", 0, RK3288_CLKGATE_CON(11), 9, GFLAGS),
2468 + GATE(PCLK_EFUSE256, "pclk_efuse_256", "pclk_cpu", 0, RK3288_CLKGATE_CON(11), 10, GFLAGS),
2469 +- GATE(PCLK_RKPWM, "pclk_rkpwm", "pclk_cpu", CLK_IGNORE_UNUSED, RK3288_CLKGATE_CON(11), 11, GFLAGS),
2470 ++ GATE(PCLK_RKPWM, "pclk_rkpwm", "pclk_cpu", 0, RK3288_CLKGATE_CON(11), 11, GFLAGS),
2471 +
2472 + /* ddrctrl [DDR Controller PHY clock] gates */
2473 + GATE(0, "nclk_ddrupctl0", "ddrphy", CLK_IGNORE_UNUSED, RK3288_CLKGATE_CON(11), 4, GFLAGS),
2474 +@@ -837,12 +837,9 @@ static const char *const rk3288_critical_clocks[] __initconst = {
2475 + "pclk_alive_niu",
2476 + "pclk_pd_pmu",
2477 + "pclk_pmu_niu",
2478 +- "pclk_core_niu",
2479 +- "pclk_ddrupctl0",
2480 +- "pclk_publ0",
2481 +- "pclk_ddrupctl1",
2482 +- "pclk_publ1",
2483 + "pmu_hclk_otg0",
2484 ++ /* pwm-regulators on some boards, so handoff-critical later */
2485 ++ "pclk_rkpwm",
2486 + };
2487 +
2488 + static void __iomem *rk3288_cru_base;
2489 +diff --git a/drivers/clk/zynqmp/divider.c b/drivers/clk/zynqmp/divider.c
2490 +index a371c66e72ef..bd9b5fbc443b 100644
2491 +--- a/drivers/clk/zynqmp/divider.c
2492 ++++ b/drivers/clk/zynqmp/divider.c
2493 +@@ -31,12 +31,14 @@
2494 + * struct zynqmp_clk_divider - adjustable divider clock
2495 + * @hw: handle between common and hardware-specific interfaces
2496 + * @flags: Hardware specific flags
2497 ++ * @is_frac: The divider is a fractional divider
2498 + * @clk_id: Id of clock
2499 + * @div_type: divisor type (TYPE_DIV1 or TYPE_DIV2)
2500 + */
2501 + struct zynqmp_clk_divider {
2502 + struct clk_hw hw;
2503 + u8 flags;
2504 ++ bool is_frac;
2505 + u32 clk_id;
2506 + u32 div_type;
2507 + };
2508 +@@ -116,8 +118,7 @@ static long zynqmp_clk_divider_round_rate(struct clk_hw *hw,
2509 +
2510 + bestdiv = zynqmp_divider_get_val(*prate, rate);
2511 +
2512 +- if ((clk_hw_get_flags(hw) & CLK_SET_RATE_PARENT) &&
2513 +- (divider->flags & CLK_FRAC))
2514 ++ if ((clk_hw_get_flags(hw) & CLK_SET_RATE_PARENT) && divider->is_frac)
2515 + bestdiv = rate % *prate ? 1 : bestdiv;
2516 + *prate = rate * bestdiv;
2517 +
2518 +@@ -195,11 +196,13 @@ struct clk_hw *zynqmp_clk_register_divider(const char *name,
2519 +
2520 + init.name = name;
2521 + init.ops = &zynqmp_clk_divider_ops;
2522 +- init.flags = nodes->flag;
2523 ++ /* CLK_FRAC is not defined in the common clk framework */
2524 ++ init.flags = nodes->flag & ~CLK_FRAC;
2525 + init.parent_names = parents;
2526 + init.num_parents = 1;
2527 +
2528 + /* struct clk_divider assignments */
2529 ++ div->is_frac = !!(nodes->flag & CLK_FRAC);
2530 + div->flags = nodes->type_flag;
2531 + div->hw.init = &init;
2532 + div->clk_id = clk_id;
2533 +diff --git a/drivers/cpufreq/armada-8k-cpufreq.c b/drivers/cpufreq/armada-8k-cpufreq.c
2534 +index b3f4bd647e9b..988ebc326bdb 100644
2535 +--- a/drivers/cpufreq/armada-8k-cpufreq.c
2536 ++++ b/drivers/cpufreq/armada-8k-cpufreq.c
2537 +@@ -132,6 +132,7 @@ static int __init armada_8k_cpufreq_init(void)
2538 + of_node_put(node);
2539 + return -ENODEV;
2540 + }
2541 ++ of_node_put(node);
2542 +
2543 + nb_cpus = num_possible_cpus();
2544 + freq_tables = kcalloc(nb_cpus, sizeof(*freq_tables), GFP_KERNEL);
2545 +diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
2546 +index e10922709d13..bbf79544d0ad 100644
2547 +--- a/drivers/cpufreq/cpufreq.c
2548 ++++ b/drivers/cpufreq/cpufreq.c
2549 +@@ -1098,6 +1098,7 @@ static struct cpufreq_policy *cpufreq_policy_alloc(unsigned int cpu)
2550 + cpufreq_global_kobject, "policy%u", cpu);
2551 + if (ret) {
2552 + pr_err("%s: failed to init policy->kobj: %d\n", __func__, ret);
2553 ++ kobject_put(&policy->kobj);
2554 + goto err_free_real_cpus;
2555 + }
2556 +
2557 +diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c
2558 +index ffa9adeaba31..9d1d9bf02710 100644
2559 +--- a/drivers/cpufreq/cpufreq_governor.c
2560 ++++ b/drivers/cpufreq/cpufreq_governor.c
2561 +@@ -459,6 +459,8 @@ int cpufreq_dbs_governor_init(struct cpufreq_policy *policy)
2562 + /* Failure, so roll back. */
2563 + pr_err("initialization failed (dbs_data kobject init error %d)\n", ret);
2564 +
2565 ++ kobject_put(&dbs_data->attr_set.kobj);
2566 ++
2567 + policy->governor_data = NULL;
2568 +
2569 + if (!have_governor_per_policy())
2570 +diff --git a/drivers/cpufreq/imx6q-cpufreq.c b/drivers/cpufreq/imx6q-cpufreq.c
2571 +index a4ff09f91c8f..3e17560b1efe 100644
2572 +--- a/drivers/cpufreq/imx6q-cpufreq.c
2573 ++++ b/drivers/cpufreq/imx6q-cpufreq.c
2574 +@@ -388,11 +388,11 @@ static int imx6q_cpufreq_probe(struct platform_device *pdev)
2575 + ret = imx6ul_opp_check_speed_grading(cpu_dev);
2576 + if (ret) {
2577 + if (ret == -EPROBE_DEFER)
2578 +- return ret;
2579 ++ goto put_node;
2580 +
2581 + dev_err(cpu_dev, "failed to read ocotp: %d\n",
2582 + ret);
2583 +- return ret;
2584 ++ goto put_node;
2585 + }
2586 + } else {
2587 + imx6q_opp_check_speed_grading(cpu_dev);
2588 +diff --git a/drivers/cpufreq/kirkwood-cpufreq.c b/drivers/cpufreq/kirkwood-cpufreq.c
2589 +index c2dd43f3f5d8..8d63a6dc8383 100644
2590 +--- a/drivers/cpufreq/kirkwood-cpufreq.c
2591 ++++ b/drivers/cpufreq/kirkwood-cpufreq.c
2592 +@@ -124,13 +124,14 @@ static int kirkwood_cpufreq_probe(struct platform_device *pdev)
2593 + priv.cpu_clk = of_clk_get_by_name(np, "cpu_clk");
2594 + if (IS_ERR(priv.cpu_clk)) {
2595 + dev_err(priv.dev, "Unable to get cpuclk\n");
2596 +- return PTR_ERR(priv.cpu_clk);
2597 ++ err = PTR_ERR(priv.cpu_clk);
2598 ++ goto out_node;
2599 + }
2600 +
2601 + err = clk_prepare_enable(priv.cpu_clk);
2602 + if (err) {
2603 + dev_err(priv.dev, "Unable to prepare cpuclk\n");
2604 +- return err;
2605 ++ goto out_node;
2606 + }
2607 +
2608 + kirkwood_freq_table[0].frequency = clk_get_rate(priv.cpu_clk) / 1000;
2609 +@@ -161,20 +162,22 @@ static int kirkwood_cpufreq_probe(struct platform_device *pdev)
2610 + goto out_ddr;
2611 + }
2612 +
2613 +- of_node_put(np);
2614 +- np = NULL;
2615 +-
2616 + err = cpufreq_register_driver(&kirkwood_cpufreq_driver);
2617 +- if (!err)
2618 +- return 0;
2619 ++ if (err) {
2620 ++ dev_err(priv.dev, "Failed to register cpufreq driver\n");
2621 ++ goto out_powersave;
2622 ++ }
2623 +
2624 +- dev_err(priv.dev, "Failed to register cpufreq driver\n");
2625 ++ of_node_put(np);
2626 ++ return 0;
2627 +
2628 ++out_powersave:
2629 + clk_disable_unprepare(priv.powersave_clk);
2630 + out_ddr:
2631 + clk_disable_unprepare(priv.ddr_clk);
2632 + out_cpu:
2633 + clk_disable_unprepare(priv.cpu_clk);
2634 ++out_node:
2635 + of_node_put(np);
2636 +
2637 + return err;
2638 +diff --git a/drivers/cpufreq/pasemi-cpufreq.c b/drivers/cpufreq/pasemi-cpufreq.c
2639 +index 75dfbd2a58ea..c7710c149de8 100644
2640 +--- a/drivers/cpufreq/pasemi-cpufreq.c
2641 ++++ b/drivers/cpufreq/pasemi-cpufreq.c
2642 +@@ -146,6 +146,7 @@ static int pas_cpufreq_cpu_init(struct cpufreq_policy *policy)
2643 +
2644 + cpu = of_get_cpu_node(policy->cpu, NULL);
2645 +
2646 ++ of_node_put(cpu);
2647 + if (!cpu)
2648 + goto out;
2649 +
2650 +diff --git a/drivers/cpufreq/pmac32-cpufreq.c b/drivers/cpufreq/pmac32-cpufreq.c
2651 +index 52f0d91d30c1..9b4ce2eb8222 100644
2652 +--- a/drivers/cpufreq/pmac32-cpufreq.c
2653 ++++ b/drivers/cpufreq/pmac32-cpufreq.c
2654 +@@ -552,6 +552,7 @@ static int pmac_cpufreq_init_7447A(struct device_node *cpunode)
2655 + volt_gpio_np = of_find_node_by_name(NULL, "cpu-vcore-select");
2656 + if (volt_gpio_np)
2657 + voltage_gpio = read_gpio(volt_gpio_np);
2658 ++ of_node_put(volt_gpio_np);
2659 + if (!voltage_gpio){
2660 + pr_err("missing cpu-vcore-select gpio\n");
2661 + return 1;
2662 +@@ -588,6 +589,7 @@ static int pmac_cpufreq_init_750FX(struct device_node *cpunode)
2663 + if (volt_gpio_np)
2664 + voltage_gpio = read_gpio(volt_gpio_np);
2665 +
2666 ++ of_node_put(volt_gpio_np);
2667 + pvr = mfspr(SPRN_PVR);
2668 + has_cpu_l2lve = !((pvr & 0xf00) == 0x100);
2669 +
2670 +diff --git a/drivers/cpufreq/ppc_cbe_cpufreq.c b/drivers/cpufreq/ppc_cbe_cpufreq.c
2671 +index 41a0f0be3f9f..8414c3a4ea08 100644
2672 +--- a/drivers/cpufreq/ppc_cbe_cpufreq.c
2673 ++++ b/drivers/cpufreq/ppc_cbe_cpufreq.c
2674 +@@ -86,6 +86,7 @@ static int cbe_cpufreq_cpu_init(struct cpufreq_policy *policy)
2675 + if (!cbe_get_cpu_pmd_regs(policy->cpu) ||
2676 + !cbe_get_cpu_mic_tm_regs(policy->cpu)) {
2677 + pr_info("invalid CBE regs pointers for cpufreq\n");
2678 ++ of_node_put(cpu);
2679 + return -EINVAL;
2680 + }
2681 +
2682 +diff --git a/drivers/crypto/sunxi-ss/sun4i-ss-hash.c b/drivers/crypto/sunxi-ss/sun4i-ss-hash.c
2683 +index a4b5ff2b72f8..f6936bb3b7be 100644
2684 +--- a/drivers/crypto/sunxi-ss/sun4i-ss-hash.c
2685 ++++ b/drivers/crypto/sunxi-ss/sun4i-ss-hash.c
2686 +@@ -240,7 +240,10 @@ static int sun4i_hash(struct ahash_request *areq)
2687 + }
2688 + } else {
2689 + /* Since we have the flag final, we can go up to modulo 4 */
2690 +- end = ((areq->nbytes + op->len) / 4) * 4 - op->len;
2691 ++ if (areq->nbytes < 4)
2692 ++ end = 0;
2693 ++ else
2694 ++ end = ((areq->nbytes + op->len) / 4) * 4 - op->len;
2695 + }
2696 +
2697 + /* TODO if SGlen % 4 and !op->len then DMA */
2698 +diff --git a/drivers/crypto/vmx/aesp8-ppc.pl b/drivers/crypto/vmx/aesp8-ppc.pl
2699 +index de78282b8f44..9c6b5c1d6a1a 100644
2700 +--- a/drivers/crypto/vmx/aesp8-ppc.pl
2701 ++++ b/drivers/crypto/vmx/aesp8-ppc.pl
2702 +@@ -1357,7 +1357,7 @@ Loop_ctr32_enc:
2703 + addi $idx,$idx,16
2704 + bdnz Loop_ctr32_enc
2705 +
2706 +- vadduwm $ivec,$ivec,$one
2707 ++ vadduqm $ivec,$ivec,$one
2708 + vmr $dat,$inptail
2709 + lvx $inptail,0,$inp
2710 + addi $inp,$inp,16
2711 +diff --git a/drivers/dax/super.c b/drivers/dax/super.c
2712 +index 0a339b85133e..d7f2257f2568 100644
2713 +--- a/drivers/dax/super.c
2714 ++++ b/drivers/dax/super.c
2715 +@@ -73,22 +73,12 @@ struct dax_device *fs_dax_get_by_bdev(struct block_device *bdev)
2716 + EXPORT_SYMBOL_GPL(fs_dax_get_by_bdev);
2717 + #endif
2718 +
2719 +-/**
2720 +- * __bdev_dax_supported() - Check if the device supports dax for filesystem
2721 +- * @bdev: block device to check
2722 +- * @blocksize: The block size of the device
2723 +- *
2724 +- * This is a library function for filesystems to check if the block device
2725 +- * can be mounted with dax option.
2726 +- *
2727 +- * Return: true if supported, false if unsupported
2728 +- */
2729 +-bool __bdev_dax_supported(struct block_device *bdev, int blocksize)
2730 ++bool __generic_fsdax_supported(struct dax_device *dax_dev,
2731 ++ struct block_device *bdev, int blocksize, sector_t start,
2732 ++ sector_t sectors)
2733 + {
2734 +- struct dax_device *dax_dev;
2735 + bool dax_enabled = false;
2736 + pgoff_t pgoff, pgoff_end;
2737 +- struct request_queue *q;
2738 + char buf[BDEVNAME_SIZE];
2739 + void *kaddr, *end_kaddr;
2740 + pfn_t pfn, end_pfn;
2741 +@@ -102,21 +92,14 @@ bool __bdev_dax_supported(struct block_device *bdev, int blocksize)
2742 + return false;
2743 + }
2744 +
2745 +- q = bdev_get_queue(bdev);
2746 +- if (!q || !blk_queue_dax(q)) {
2747 +- pr_debug("%s: error: request queue doesn't support dax\n",
2748 +- bdevname(bdev, buf));
2749 +- return false;
2750 +- }
2751 +-
2752 +- err = bdev_dax_pgoff(bdev, 0, PAGE_SIZE, &pgoff);
2753 ++ err = bdev_dax_pgoff(bdev, start, PAGE_SIZE, &pgoff);
2754 + if (err) {
2755 + pr_debug("%s: error: unaligned partition for dax\n",
2756 + bdevname(bdev, buf));
2757 + return false;
2758 + }
2759 +
2760 +- last_page = PFN_DOWN(i_size_read(bdev->bd_inode) - 1) * 8;
2761 ++ last_page = PFN_DOWN((start + sectors - 1) * 512) * PAGE_SIZE / 512;
2762 + err = bdev_dax_pgoff(bdev, last_page, PAGE_SIZE, &pgoff_end);
2763 + if (err) {
2764 + pr_debug("%s: error: unaligned partition for dax\n",
2765 +@@ -124,20 +107,11 @@ bool __bdev_dax_supported(struct block_device *bdev, int blocksize)
2766 + return false;
2767 + }
2768 +
2769 +- dax_dev = dax_get_by_host(bdev->bd_disk->disk_name);
2770 +- if (!dax_dev) {
2771 +- pr_debug("%s: error: device does not support dax\n",
2772 +- bdevname(bdev, buf));
2773 +- return false;
2774 +- }
2775 +-
2776 + id = dax_read_lock();
2777 + len = dax_direct_access(dax_dev, pgoff, 1, &kaddr, &pfn);
2778 + len2 = dax_direct_access(dax_dev, pgoff_end, 1, &end_kaddr, &end_pfn);
2779 + dax_read_unlock(id);
2780 +
2781 +- put_dax(dax_dev);
2782 +-
2783 + if (len < 1 || len2 < 1) {
2784 + pr_debug("%s: error: dax access failed (%ld)\n",
2785 + bdevname(bdev, buf), len < 1 ? len : len2);
2786 +@@ -178,6 +152,49 @@ bool __bdev_dax_supported(struct block_device *bdev, int blocksize)
2787 + }
2788 + return true;
2789 + }
2790 ++EXPORT_SYMBOL_GPL(__generic_fsdax_supported);
2791 ++
2792 ++/**
2793 ++ * __bdev_dax_supported() - Check if the device supports dax for filesystem
2794 ++ * @bdev: block device to check
2795 ++ * @blocksize: The block size of the device
2796 ++ *
2797 ++ * This is a library function for filesystems to check if the block device
2798 ++ * can be mounted with dax option.
2799 ++ *
2800 ++ * Return: true if supported, false if unsupported
2801 ++ */
2802 ++bool __bdev_dax_supported(struct block_device *bdev, int blocksize)
2803 ++{
2804 ++ struct dax_device *dax_dev;
2805 ++ struct request_queue *q;
2806 ++ char buf[BDEVNAME_SIZE];
2807 ++ bool ret;
2808 ++ int id;
2809 ++
2810 ++ q = bdev_get_queue(bdev);
2811 ++ if (!q || !blk_queue_dax(q)) {
2812 ++ pr_debug("%s: error: request queue doesn't support dax\n",
2813 ++ bdevname(bdev, buf));
2814 ++ return false;
2815 ++ }
2816 ++
2817 ++ dax_dev = dax_get_by_host(bdev->bd_disk->disk_name);
2818 ++ if (!dax_dev) {
2819 ++ pr_debug("%s: error: device does not support dax\n",
2820 ++ bdevname(bdev, buf));
2821 ++ return false;
2822 ++ }
2823 ++
2824 ++ id = dax_read_lock();
2825 ++ ret = dax_supported(dax_dev, bdev, blocksize, 0,
2826 ++ i_size_read(bdev->bd_inode) / 512);
2827 ++ dax_read_unlock(id);
2828 ++
2829 ++ put_dax(dax_dev);
2830 ++
2831 ++ return ret;
2832 ++}
2833 + EXPORT_SYMBOL_GPL(__bdev_dax_supported);
2834 + #endif
2835 +
2836 +@@ -303,6 +320,15 @@ long dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, long nr_pages,
2837 + }
2838 + EXPORT_SYMBOL_GPL(dax_direct_access);
2839 +
2840 ++bool dax_supported(struct dax_device *dax_dev, struct block_device *bdev,
2841 ++ int blocksize, sector_t start, sector_t len)
2842 ++{
2843 ++ if (!dax_alive(dax_dev))
2844 ++ return false;
2845 ++
2846 ++ return dax_dev->ops->dax_supported(dax_dev, bdev, blocksize, start, len);
2847 ++}
2848 ++
2849 + size_t dax_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr,
2850 + size_t bytes, struct iov_iter *i)
2851 + {
2852 +diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c
2853 +index 0ae3de76833b..839621b044f4 100644
2854 +--- a/drivers/devfreq/devfreq.c
2855 ++++ b/drivers/devfreq/devfreq.c
2856 +@@ -228,7 +228,7 @@ static struct devfreq_governor *find_devfreq_governor(const char *name)
2857 + * if is not found. This can happen when both drivers (the governor driver
2858 + * and the driver that call devfreq_add_device) are built as modules.
2859 + * devfreq_list_lock should be held by the caller. Returns the matched
2860 +- * governor's pointer.
2861 ++ * governor's pointer or an error pointer.
2862 + */
2863 + static struct devfreq_governor *try_then_request_governor(const char *name)
2864 + {
2865 +@@ -254,7 +254,7 @@ static struct devfreq_governor *try_then_request_governor(const char *name)
2866 + /* Restore previous state before return */
2867 + mutex_lock(&devfreq_list_lock);
2868 + if (err)
2869 +- return NULL;
2870 ++ return ERR_PTR(err);
2871 +
2872 + governor = find_devfreq_governor(name);
2873 + }
2874 +diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c
2875 +index fe69dccfa0c0..37a269420435 100644
2876 +--- a/drivers/dma/at_xdmac.c
2877 ++++ b/drivers/dma/at_xdmac.c
2878 +@@ -1606,7 +1606,11 @@ static void at_xdmac_tasklet(unsigned long data)
2879 + struct at_xdmac_desc,
2880 + xfer_node);
2881 + dev_vdbg(chan2dev(&atchan->chan), "%s: desc 0x%p\n", __func__, desc);
2882 +- BUG_ON(!desc->active_xfer);
2883 ++ if (!desc->active_xfer) {
2884 ++ dev_err(chan2dev(&atchan->chan), "Xfer not active: exiting");
2885 ++ spin_unlock_bh(&atchan->lock);
2886 ++ return;
2887 ++ }
2888 +
2889 + txd = &desc->tx_dma_desc;
2890 +
2891 +diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
2892 +index eec79fdf27a5..56695ffb5d37 100644
2893 +--- a/drivers/dma/pl330.c
2894 ++++ b/drivers/dma/pl330.c
2895 +@@ -966,6 +966,7 @@ static void _stop(struct pl330_thread *thrd)
2896 + {
2897 + void __iomem *regs = thrd->dmac->base;
2898 + u8 insn[6] = {0, 0, 0, 0, 0, 0};
2899 ++ u32 inten = readl(regs + INTEN);
2900 +
2901 + if (_state(thrd) == PL330_STATE_FAULT_COMPLETING)
2902 + UNTIL(thrd, PL330_STATE_FAULTING | PL330_STATE_KILLING);
2903 +@@ -978,10 +979,13 @@ static void _stop(struct pl330_thread *thrd)
2904 +
2905 + _emit_KILL(0, insn);
2906 +
2907 +- /* Stop generating interrupts for SEV */
2908 +- writel(readl(regs + INTEN) & ~(1 << thrd->ev), regs + INTEN);
2909 +-
2910 + _execute_DBGINSN(thrd, insn, is_manager(thrd));
2911 ++
2912 ++ /* clear the event */
2913 ++ if (inten & (1 << thrd->ev))
2914 ++ writel(1 << thrd->ev, regs + INTCLR);
2915 ++ /* Stop generating interrupts for SEV */
2916 ++ writel(inten & ~(1 << thrd->ev), regs + INTEN);
2917 + }
2918 +
2919 + /* Start doing req 'idx' of thread 'thrd' */
2920 +diff --git a/drivers/dma/tegra210-adma.c b/drivers/dma/tegra210-adma.c
2921 +index 5ec0dd97b397..1477cce33dbe 100644
2922 +--- a/drivers/dma/tegra210-adma.c
2923 ++++ b/drivers/dma/tegra210-adma.c
2924 +@@ -22,7 +22,6 @@
2925 + #include <linux/of_device.h>
2926 + #include <linux/of_dma.h>
2927 + #include <linux/of_irq.h>
2928 +-#include <linux/pm_clock.h>
2929 + #include <linux/pm_runtime.h>
2930 + #include <linux/slab.h>
2931 +
2932 +@@ -141,6 +140,7 @@ struct tegra_adma {
2933 + struct dma_device dma_dev;
2934 + struct device *dev;
2935 + void __iomem *base_addr;
2936 ++ struct clk *ahub_clk;
2937 + unsigned int nr_channels;
2938 + unsigned long rx_requests_reserved;
2939 + unsigned long tx_requests_reserved;
2940 +@@ -637,8 +637,9 @@ static int tegra_adma_runtime_suspend(struct device *dev)
2941 + struct tegra_adma *tdma = dev_get_drvdata(dev);
2942 +
2943 + tdma->global_cmd = tdma_read(tdma, ADMA_GLOBAL_CMD);
2944 ++ clk_disable_unprepare(tdma->ahub_clk);
2945 +
2946 +- return pm_clk_suspend(dev);
2947 ++ return 0;
2948 + }
2949 +
2950 + static int tegra_adma_runtime_resume(struct device *dev)
2951 +@@ -646,10 +647,11 @@ static int tegra_adma_runtime_resume(struct device *dev)
2952 + struct tegra_adma *tdma = dev_get_drvdata(dev);
2953 + int ret;
2954 +
2955 +- ret = pm_clk_resume(dev);
2956 +- if (ret)
2957 ++ ret = clk_prepare_enable(tdma->ahub_clk);
2958 ++ if (ret) {
2959 ++ dev_err(dev, "ahub clk_enable failed: %d\n", ret);
2960 + return ret;
2961 +-
2962 ++ }
2963 + tdma_write(tdma, ADMA_GLOBAL_CMD, tdma->global_cmd);
2964 +
2965 + return 0;
2966 +@@ -693,13 +695,11 @@ static int tegra_adma_probe(struct platform_device *pdev)
2967 + if (IS_ERR(tdma->base_addr))
2968 + return PTR_ERR(tdma->base_addr);
2969 +
2970 +- ret = pm_clk_create(&pdev->dev);
2971 +- if (ret)
2972 +- return ret;
2973 +-
2974 +- ret = of_pm_clk_add_clk(&pdev->dev, "d_audio");
2975 +- if (ret)
2976 +- goto clk_destroy;
2977 ++ tdma->ahub_clk = devm_clk_get(&pdev->dev, "d_audio");
2978 ++ if (IS_ERR(tdma->ahub_clk)) {
2979 ++ dev_err(&pdev->dev, "Error: Missing ahub controller clock\n");
2980 ++ return PTR_ERR(tdma->ahub_clk);
2981 ++ }
2982 +
2983 + pm_runtime_enable(&pdev->dev);
2984 +
2985 +@@ -776,8 +776,6 @@ rpm_put:
2986 + pm_runtime_put_sync(&pdev->dev);
2987 + rpm_disable:
2988 + pm_runtime_disable(&pdev->dev);
2989 +-clk_destroy:
2990 +- pm_clk_destroy(&pdev->dev);
2991 +
2992 + return ret;
2993 + }
2994 +@@ -787,6 +785,7 @@ static int tegra_adma_remove(struct platform_device *pdev)
2995 + struct tegra_adma *tdma = platform_get_drvdata(pdev);
2996 + int i;
2997 +
2998 ++ of_dma_controller_free(pdev->dev.of_node);
2999 + dma_async_device_unregister(&tdma->dma_dev);
3000 +
3001 + for (i = 0; i < tdma->nr_channels; ++i)
3002 +@@ -794,7 +793,6 @@ static int tegra_adma_remove(struct platform_device *pdev)
3003 +
3004 + pm_runtime_put_sync(&pdev->dev);
3005 + pm_runtime_disable(&pdev->dev);
3006 +- pm_clk_destroy(&pdev->dev);
3007 +
3008 + return 0;
3009 + }
3010 +diff --git a/drivers/extcon/Kconfig b/drivers/extcon/Kconfig
3011 +index 540e8cd16ee6..db3bcf96b98f 100644
3012 +--- a/drivers/extcon/Kconfig
3013 ++++ b/drivers/extcon/Kconfig
3014 +@@ -30,7 +30,7 @@ config EXTCON_ARIZONA
3015 +
3016 + config EXTCON_AXP288
3017 + tristate "X-Power AXP288 EXTCON support"
3018 +- depends on MFD_AXP20X && USB_SUPPORT && X86
3019 ++ depends on MFD_AXP20X && USB_SUPPORT && X86 && ACPI
3020 + select USB_ROLE_SWITCH
3021 + help
3022 + Say Y here to enable support for USB peripheral detection
3023 +diff --git a/drivers/extcon/extcon-arizona.c b/drivers/extcon/extcon-arizona.c
3024 +index da0e9bc4262f..9327479c719c 100644
3025 +--- a/drivers/extcon/extcon-arizona.c
3026 ++++ b/drivers/extcon/extcon-arizona.c
3027 +@@ -1726,6 +1726,16 @@ static int arizona_extcon_remove(struct platform_device *pdev)
3028 + struct arizona_extcon_info *info = platform_get_drvdata(pdev);
3029 + struct arizona *arizona = info->arizona;
3030 + int jack_irq_rise, jack_irq_fall;
3031 ++ bool change;
3032 ++
3033 ++ regmap_update_bits_check(arizona->regmap, ARIZONA_MIC_DETECT_1,
3034 ++ ARIZONA_MICD_ENA, 0,
3035 ++ &change);
3036 ++
3037 ++ if (change) {
3038 ++ regulator_disable(info->micvdd);
3039 ++ pm_runtime_put(info->dev);
3040 ++ }
3041 +
3042 + gpiod_put(info->micd_pol_gpio);
3043 +
3044 +diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile
3045 +index 466da5954a68..62bf9da25e4b 100644
3046 +--- a/drivers/gpu/drm/amd/amdgpu/Makefile
3047 ++++ b/drivers/gpu/drm/amd/amdgpu/Makefile
3048 +@@ -23,7 +23,7 @@
3049 + # Makefile for the drm device driver. This driver provides support for the
3050 + # Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
3051 +
3052 +-FULL_AMD_PATH=$(src)/..
3053 ++FULL_AMD_PATH=$(srctree)/$(src)/..
3054 + DISPLAY_FOLDER_NAME=display
3055 + FULL_AMD_DISPLAY_PATH = $(FULL_AMD_PATH)/$(DISPLAY_FOLDER_NAME)
3056 +
3057 +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
3058 +index ee47c11e92ce..4dee2326b29c 100644
3059 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
3060 ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
3061 +@@ -136,8 +136,9 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f,
3062 + {
3063 + struct amdgpu_device *adev = ring->adev;
3064 + struct amdgpu_fence *fence;
3065 +- struct dma_fence *old, **ptr;
3066 ++ struct dma_fence __rcu **ptr;
3067 + uint32_t seq;
3068 ++ int r;
3069 +
3070 + fence = kmem_cache_alloc(amdgpu_fence_slab, GFP_KERNEL);
3071 + if (fence == NULL)
3072 +@@ -153,15 +154,24 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f,
3073 + seq, flags | AMDGPU_FENCE_FLAG_INT);
3074 +
3075 + ptr = &ring->fence_drv.fences[seq & ring->fence_drv.num_fences_mask];
3076 ++ if (unlikely(rcu_dereference_protected(*ptr, 1))) {
3077 ++ struct dma_fence *old;
3078 ++
3079 ++ rcu_read_lock();
3080 ++ old = dma_fence_get_rcu_safe(ptr);
3081 ++ rcu_read_unlock();
3082 ++
3083 ++ if (old) {
3084 ++ r = dma_fence_wait(old, false);
3085 ++ dma_fence_put(old);
3086 ++ if (r)
3087 ++ return r;
3088 ++ }
3089 ++ }
3090 ++
3091 + /* This function can't be called concurrently anyway, otherwise
3092 + * emitting the fence would mess up the hardware ring buffer.
3093 + */
3094 +- old = rcu_dereference_protected(*ptr, 1);
3095 +- if (old && !dma_fence_is_signaled(old)) {
3096 +- DRM_INFO("rcu slot is busy\n");
3097 +- dma_fence_wait(old, false);
3098 +- }
3099 +-
3100 + rcu_assign_pointer(*ptr, dma_fence_get(&fence->base));
3101 +
3102 + *f = &fence->base;
3103 +diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
3104 +index 3082b55b1e77..0886b36c2344 100644
3105 +--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
3106 ++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
3107 +@@ -3587,6 +3587,8 @@ static void dm_drm_plane_reset(struct drm_plane *plane)
3108 + plane->state = &amdgpu_state->base;
3109 + plane->state->plane = plane;
3110 + plane->state->rotation = DRM_MODE_ROTATE_0;
3111 ++ plane->state->alpha = DRM_BLEND_ALPHA_OPAQUE;
3112 ++ plane->state->pixel_blend_mode = DRM_MODE_BLEND_PREMULTI;
3113 + }
3114 + }
3115 +
3116 +@@ -4953,8 +4955,7 @@ cleanup:
3117 + static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
3118 + struct dc_stream_state *stream_state)
3119 + {
3120 +- stream_state->mode_changed =
3121 +- crtc_state->mode_changed || crtc_state->active_changed;
3122 ++ stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
3123 + }
3124 +
3125 + static int amdgpu_dm_atomic_commit(struct drm_device *dev,
3126 +@@ -5661,6 +5662,9 @@ skip_modeset:
3127 + update_stream_scaling_settings(
3128 + &new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
3129 +
3130 ++ /* ABM settings */
3131 ++ dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
3132 ++
3133 + /*
3134 + * Color management settings. We also update color properties
3135 + * when a modeset is needed, to ensure it gets reprogrammed.
3136 +@@ -5858,7 +5862,9 @@ dm_determine_update_type_for_commit(struct dc *dc,
3137 + }
3138 +
3139 + for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
3140 +- struct dc_stream_update stream_update = { 0 };
3141 ++ struct dc_stream_update stream_update;
3142 ++
3143 ++ memset(&stream_update, 0, sizeof(stream_update));
3144 +
3145 + new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
3146 + old_dm_crtc_state = to_dm_crtc_state(old_crtc_state);
3147 +diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
3148 +index a6cda201c964..88fe4fb43bfd 100644
3149 +--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
3150 ++++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
3151 +@@ -524,6 +524,14 @@ void dc_link_set_preferred_link_settings(struct dc *dc,
3152 + struct dc_stream_state *link_stream;
3153 + struct dc_link_settings store_settings = *link_setting;
3154 +
3155 ++ link->preferred_link_setting = store_settings;
3156 ++
3157 ++ /* Retrain with preferred link settings only relevant for
3158 ++ * DP signal type
3159 ++ */
3160 ++ if (!dc_is_dp_signal(link->connector_signal))
3161 ++ return;
3162 ++
3163 + for (i = 0; i < MAX_PIPES; i++) {
3164 + pipe = &dc->current_state->res_ctx.pipe_ctx[i];
3165 + if (pipe->stream && pipe->stream->link) {
3166 +@@ -538,7 +546,10 @@ void dc_link_set_preferred_link_settings(struct dc *dc,
3167 +
3168 + link_stream = link->dc->current_state->res_ctx.pipe_ctx[i].stream;
3169 +
3170 +- link->preferred_link_setting = store_settings;
3171 ++ /* Cannot retrain link if backend is off */
3172 ++ if (link_stream->dpms_off)
3173 ++ return;
3174 ++
3175 + if (link_stream)
3176 + decide_link_settings(link_stream, &store_settings);
3177 +
3178 +@@ -1666,6 +1677,7 @@ static void commit_planes_do_stream_update(struct dc *dc,
3179 + continue;
3180 +
3181 + if (stream_update->dpms_off) {
3182 ++ dc->hwss.pipe_control_lock(dc, pipe_ctx, true);
3183 + if (*stream_update->dpms_off) {
3184 + core_link_disable_stream(pipe_ctx, KEEP_ACQUIRED_RESOURCE);
3185 + dc->hwss.optimize_bandwidth(dc, dc->current_state);
3186 +@@ -1673,6 +1685,7 @@ static void commit_planes_do_stream_update(struct dc *dc,
3187 + dc->hwss.prepare_bandwidth(dc, dc->current_state);
3188 + core_link_enable_stream(dc->current_state, pipe_ctx);
3189 + }
3190 ++ dc->hwss.pipe_control_lock(dc, pipe_ctx, false);
3191 + }
3192 +
3193 + if (stream_update->abm_level && pipe_ctx->stream_res.abm) {
3194 +diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
3195 +index ea18e9c2d8ce..419e8de8c0f4 100644
3196 +--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
3197 ++++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
3198 +@@ -2074,11 +2074,28 @@ static void disable_link(struct dc_link *link, enum signal_type signal)
3199 + }
3200 + }
3201 +
3202 ++static uint32_t get_timing_pixel_clock_100hz(const struct dc_crtc_timing *timing)
3203 ++{
3204 ++
3205 ++ uint32_t pxl_clk = timing->pix_clk_100hz;
3206 ++
3207 ++ if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR420)
3208 ++ pxl_clk /= 2;
3209 ++ else if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR422)
3210 ++ pxl_clk = pxl_clk * 2 / 3;
3211 ++
3212 ++ if (timing->display_color_depth == COLOR_DEPTH_101010)
3213 ++ pxl_clk = pxl_clk * 10 / 8;
3214 ++ else if (timing->display_color_depth == COLOR_DEPTH_121212)
3215 ++ pxl_clk = pxl_clk * 12 / 8;
3216 ++
3217 ++ return pxl_clk;
3218 ++}
3219 ++
3220 + static bool dp_active_dongle_validate_timing(
3221 + const struct dc_crtc_timing *timing,
3222 + const struct dpcd_caps *dpcd_caps)
3223 + {
3224 +- unsigned int required_pix_clk_100hz = timing->pix_clk_100hz;
3225 + const struct dc_dongle_caps *dongle_caps = &dpcd_caps->dongle_caps;
3226 +
3227 + switch (dpcd_caps->dongle_type) {
3228 +@@ -2115,13 +2132,6 @@ static bool dp_active_dongle_validate_timing(
3229 + return false;
3230 + }
3231 +
3232 +-
3233 +- /* Check Color Depth and Pixel Clock */
3234 +- if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR420)
3235 +- required_pix_clk_100hz /= 2;
3236 +- else if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR422)
3237 +- required_pix_clk_100hz = required_pix_clk_100hz * 2 / 3;
3238 +-
3239 + switch (timing->display_color_depth) {
3240 + case COLOR_DEPTH_666:
3241 + case COLOR_DEPTH_888:
3242 +@@ -2130,14 +2140,11 @@ static bool dp_active_dongle_validate_timing(
3243 + case COLOR_DEPTH_101010:
3244 + if (dongle_caps->dp_hdmi_max_bpc < 10)
3245 + return false;
3246 +- required_pix_clk_100hz = required_pix_clk_100hz * 10 / 8;
3247 + break;
3248 + case COLOR_DEPTH_121212:
3249 + if (dongle_caps->dp_hdmi_max_bpc < 12)
3250 + return false;
3251 +- required_pix_clk_100hz = required_pix_clk_100hz * 12 / 8;
3252 + break;
3253 +-
3254 + case COLOR_DEPTH_141414:
3255 + case COLOR_DEPTH_161616:
3256 + default:
3257 +@@ -2145,7 +2152,7 @@ static bool dp_active_dongle_validate_timing(
3258 + return false;
3259 + }
3260 +
3261 +- if (required_pix_clk_100hz > (dongle_caps->dp_hdmi_max_pixel_clk * 10))
3262 ++ if (get_timing_pixel_clock_100hz(timing) > (dongle_caps->dp_hdmi_max_pixel_clk * 10))
3263 + return false;
3264 +
3265 + return true;
3266 +@@ -2166,7 +2173,7 @@ enum dc_status dc_link_validate_mode_timing(
3267 + return DC_OK;
3268 +
3269 + /* Passive Dongle */
3270 +- if (0 != max_pix_clk && timing->pix_clk_100hz > max_pix_clk)
3271 ++ if (max_pix_clk != 0 && get_timing_pixel_clock_100hz(timing) > max_pix_clk)
3272 + return DC_EXCEED_DONGLE_CAP;
3273 +
3274 + /* Active Dongle*/
3275 +@@ -2316,7 +2323,7 @@ static struct fixed31_32 get_pbn_from_timing(struct pipe_ctx *pipe_ctx)
3276 + uint32_t denominator;
3277 +
3278 + bpc = get_color_depth(pipe_ctx->stream_res.pix_clk_params.color_depth);
3279 +- kbps = pipe_ctx->stream_res.pix_clk_params.requested_pix_clk_100hz / 10 * bpc * 3;
3280 ++ kbps = dc_bandwidth_in_kbps_from_timing(&pipe_ctx->stream->timing);
3281 +
3282 + /*
3283 + * margin 5300ppm + 300ppm ~ 0.6% as per spec, factor is 1.006
3284 +@@ -2736,3 +2743,49 @@ void dc_link_enable_hpd_filter(struct dc_link *link, bool enable)
3285 + }
3286 + }
3287 +
3288 ++uint32_t dc_bandwidth_in_kbps_from_timing(
3289 ++ const struct dc_crtc_timing *timing)
3290 ++{
3291 ++ uint32_t bits_per_channel = 0;
3292 ++ uint32_t kbps;
3293 ++
3294 ++ switch (timing->display_color_depth) {
3295 ++ case COLOR_DEPTH_666:
3296 ++ bits_per_channel = 6;
3297 ++ break;
3298 ++ case COLOR_DEPTH_888:
3299 ++ bits_per_channel = 8;
3300 ++ break;
3301 ++ case COLOR_DEPTH_101010:
3302 ++ bits_per_channel = 10;
3303 ++ break;
3304 ++ case COLOR_DEPTH_121212:
3305 ++ bits_per_channel = 12;
3306 ++ break;
3307 ++ case COLOR_DEPTH_141414:
3308 ++ bits_per_channel = 14;
3309 ++ break;
3310 ++ case COLOR_DEPTH_161616:
3311 ++ bits_per_channel = 16;
3312 ++ break;
3313 ++ default:
3314 ++ break;
3315 ++ }
3316 ++
3317 ++ ASSERT(bits_per_channel != 0);
3318 ++
3319 ++ kbps = timing->pix_clk_100hz / 10;
3320 ++ kbps *= bits_per_channel;
3321 ++
3322 ++ if (timing->flags.Y_ONLY != 1) {
3323 ++ /*Only YOnly make reduce bandwidth by 1/3 compares to RGB*/
3324 ++ kbps *= 3;
3325 ++ if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR420)
3326 ++ kbps /= 2;
3327 ++ else if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR422)
3328 ++ kbps = kbps * 2 / 3;
3329 ++ }
3330 ++
3331 ++ return kbps;
3332 ++
3333 ++}
3334 +diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
3335 +index 09d301216076..6809932e80be 100644
3336 +--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
3337 ++++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
3338 +@@ -1520,53 +1520,6 @@ static bool decide_fallback_link_setting(
3339 + return true;
3340 + }
3341 +
3342 +-static uint32_t bandwidth_in_kbps_from_timing(
3343 +- const struct dc_crtc_timing *timing)
3344 +-{
3345 +- uint32_t bits_per_channel = 0;
3346 +- uint32_t kbps;
3347 +-
3348 +- switch (timing->display_color_depth) {
3349 +- case COLOR_DEPTH_666:
3350 +- bits_per_channel = 6;
3351 +- break;
3352 +- case COLOR_DEPTH_888:
3353 +- bits_per_channel = 8;
3354 +- break;
3355 +- case COLOR_DEPTH_101010:
3356 +- bits_per_channel = 10;
3357 +- break;
3358 +- case COLOR_DEPTH_121212:
3359 +- bits_per_channel = 12;
3360 +- break;
3361 +- case COLOR_DEPTH_141414:
3362 +- bits_per_channel = 14;
3363 +- break;
3364 +- case COLOR_DEPTH_161616:
3365 +- bits_per_channel = 16;
3366 +- break;
3367 +- default:
3368 +- break;
3369 +- }
3370 +-
3371 +- ASSERT(bits_per_channel != 0);
3372 +-
3373 +- kbps = timing->pix_clk_100hz / 10;
3374 +- kbps *= bits_per_channel;
3375 +-
3376 +- if (timing->flags.Y_ONLY != 1) {
3377 +- /*Only YOnly make reduce bandwidth by 1/3 compares to RGB*/
3378 +- kbps *= 3;
3379 +- if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR420)
3380 +- kbps /= 2;
3381 +- else if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR422)
3382 +- kbps = kbps * 2 / 3;
3383 +- }
3384 +-
3385 +- return kbps;
3386 +-
3387 +-}
3388 +-
3389 + static uint32_t bandwidth_in_kbps_from_link_settings(
3390 + const struct dc_link_settings *link_setting)
3391 + {
3392 +@@ -1607,7 +1560,7 @@ bool dp_validate_mode_timing(
3393 + link_setting = &link->verified_link_cap;
3394 + */
3395 +
3396 +- req_bw = bandwidth_in_kbps_from_timing(timing);
3397 ++ req_bw = dc_bandwidth_in_kbps_from_timing(timing);
3398 + max_bw = bandwidth_in_kbps_from_link_settings(link_setting);
3399 +
3400 + if (req_bw <= max_bw) {
3401 +@@ -1641,7 +1594,7 @@ void decide_link_settings(struct dc_stream_state *stream,
3402 + uint32_t req_bw;
3403 + uint32_t link_bw;
3404 +
3405 +- req_bw = bandwidth_in_kbps_from_timing(&stream->timing);
3406 ++ req_bw = dc_bandwidth_in_kbps_from_timing(&stream->timing);
3407 +
3408 + link = stream->link;
3409 +
3410 +diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
3411 +index 349ab8017776..4c06eb52ab73 100644
3412 +--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
3413 ++++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
3414 +@@ -1266,10 +1266,12 @@ bool dc_remove_plane_from_context(
3415 + * For head pipe detach surfaces from pipe for tail
3416 + * pipe just zero it out
3417 + */
3418 +- if (!pipe_ctx->top_pipe) {
3419 ++ if (!pipe_ctx->top_pipe ||
3420 ++ (!pipe_ctx->top_pipe->top_pipe &&
3421 ++ pipe_ctx->top_pipe->stream_res.opp != pipe_ctx->stream_res.opp)) {
3422 + pipe_ctx->plane_state = NULL;
3423 + pipe_ctx->bottom_pipe = NULL;
3424 +- } else {
3425 ++ } else {
3426 + memset(pipe_ctx, 0, sizeof(*pipe_ctx));
3427 + }
3428 + }
3429 +diff --git a/drivers/gpu/drm/amd/display/dc/dc_link.h b/drivers/gpu/drm/amd/display/dc/dc_link.h
3430 +index 8fc223defed4..a83e1c60f9db 100644
3431 +--- a/drivers/gpu/drm/amd/display/dc/dc_link.h
3432 ++++ b/drivers/gpu/drm/amd/display/dc/dc_link.h
3433 +@@ -252,4 +252,6 @@ bool dc_submit_i2c(
3434 + uint32_t link_index,
3435 + struct i2c_command *cmd);
3436 +
3437 ++uint32_t dc_bandwidth_in_kbps_from_timing(
3438 ++ const struct dc_crtc_timing *timing);
3439 + #endif /* DC_LINK_H_ */
3440 +diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
3441 +index 4fe3664fb495..5ecfcb9ee8a0 100644
3442 +--- a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
3443 ++++ b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
3444 +@@ -377,7 +377,6 @@ static bool acquire(
3445 + struct dce_aux *engine,
3446 + struct ddc *ddc)
3447 + {
3448 +-
3449 + enum gpio_result result;
3450 +
3451 + if (!is_engine_available(engine))
3452 +@@ -458,7 +457,8 @@ int dce_aux_transfer(struct ddc_service *ddc,
3453 + memset(&aux_rep, 0, sizeof(aux_rep));
3454 +
3455 + aux_engine = ddc->ctx->dc->res_pool->engines[ddc_pin->pin_data->en];
3456 +- acquire(aux_engine, ddc_pin);
3457 ++ if (!acquire(aux_engine, ddc_pin))
3458 ++ return -1;
3459 +
3460 + if (payload->i2c_over_aux)
3461 + aux_req.type = AUX_TRANSACTION_TYPE_I2C;
3462 +diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c
3463 +index c7642e748297..ce21a290bf3e 100644
3464 +--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c
3465 ++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c
3466 +@@ -406,15 +406,25 @@ void dpp1_dscl_calc_lb_num_partitions(
3467 + int *num_part_y,
3468 + int *num_part_c)
3469 + {
3470 ++ int lb_memory_size, lb_memory_size_c, lb_memory_size_a, num_partitions_a,
3471 ++ lb_bpc, memory_line_size_y, memory_line_size_c, memory_line_size_a;
3472 ++
3473 + int line_size = scl_data->viewport.width < scl_data->recout.width ?
3474 + scl_data->viewport.width : scl_data->recout.width;
3475 + int line_size_c = scl_data->viewport_c.width < scl_data->recout.width ?
3476 + scl_data->viewport_c.width : scl_data->recout.width;
3477 +- int lb_bpc = dpp1_dscl_get_lb_depth_bpc(scl_data->lb_params.depth);
3478 +- int memory_line_size_y = (line_size * lb_bpc + 71) / 72; /* +71 to ceil */
3479 +- int memory_line_size_c = (line_size_c * lb_bpc + 71) / 72; /* +71 to ceil */
3480 +- int memory_line_size_a = (line_size + 5) / 6; /* +5 to ceil */
3481 +- int lb_memory_size, lb_memory_size_c, lb_memory_size_a, num_partitions_a;
3482 ++
3483 ++ if (line_size == 0)
3484 ++ line_size = 1;
3485 ++
3486 ++ if (line_size_c == 0)
3487 ++ line_size_c = 1;
3488 ++
3489 ++
3490 ++ lb_bpc = dpp1_dscl_get_lb_depth_bpc(scl_data->lb_params.depth);
3491 ++ memory_line_size_y = (line_size * lb_bpc + 71) / 72; /* +71 to ceil */
3492 ++ memory_line_size_c = (line_size_c * lb_bpc + 71) / 72; /* +71 to ceil */
3493 ++ memory_line_size_a = (line_size + 5) / 6; /* +5 to ceil */
3494 +
3495 + if (lb_config == LB_MEMORY_CONFIG_1) {
3496 + lb_memory_size = 816;
3497 +diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
3498 +index d1a8f1c302a9..5b551a544e82 100644
3499 +--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
3500 ++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
3501 +@@ -1008,9 +1008,14 @@ static void dcn10_init_pipes(struct dc *dc, struct dc_state *context)
3502 + * to non-preferred front end. If pipe_ctx->stream is not NULL,
3503 + * we will use the pipe, so don't disable
3504 + */
3505 +- if (pipe_ctx->stream != NULL)
3506 ++ if (pipe_ctx->stream != NULL &&
3507 ++ pipe_ctx->stream_res.tg->funcs->is_tg_enabled(
3508 ++ pipe_ctx->stream_res.tg))
3509 + continue;
3510 +
3511 ++ /* Disable on the current state so the new one isn't cleared. */
3512 ++ pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
3513 ++
3514 + dpp->funcs->dpp_reset(dpp);
3515 +
3516 + pipe_ctx->stream_res.tg = tg;
3517 +@@ -2692,9 +2697,15 @@ static void dcn10_set_cursor_position(struct pipe_ctx *pipe_ctx)
3518 + .rotation = pipe_ctx->plane_state->rotation,
3519 + .mirror = pipe_ctx->plane_state->horizontal_mirror
3520 + };
3521 +-
3522 +- pos_cpy.x_hotspot += pipe_ctx->plane_state->dst_rect.x;
3523 +- pos_cpy.y_hotspot += pipe_ctx->plane_state->dst_rect.y;
3524 ++ uint32_t x_plane = pipe_ctx->plane_state->dst_rect.x;
3525 ++ uint32_t y_plane = pipe_ctx->plane_state->dst_rect.y;
3526 ++ uint32_t x_offset = min(x_plane, pos_cpy.x);
3527 ++ uint32_t y_offset = min(y_plane, pos_cpy.y);
3528 ++
3529 ++ pos_cpy.x -= x_offset;
3530 ++ pos_cpy.y -= y_offset;
3531 ++ pos_cpy.x_hotspot += (x_plane - x_offset);
3532 ++ pos_cpy.y_hotspot += (y_plane - y_offset);
3533 +
3534 + if (pipe_ctx->plane_state->address.type
3535 + == PLN_ADDR_TYPE_VIDEO_PROGRESSIVE)
3536 +diff --git a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
3537 +index 0fbc8fbc3541..a1055413bade 100644
3538 +--- a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
3539 ++++ b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
3540 +@@ -1854,6 +1854,8 @@ bool mod_color_calculate_degamma_params(struct dc_transfer_func *input_tf,
3541 + coordinates_x, axis_x, curve,
3542 + MAX_HW_POINTS, tf_pts,
3543 + mapUserRamp && ramp && ramp->type == GAMMA_RGB_256);
3544 ++ if (ramp->type == GAMMA_CUSTOM)
3545 ++ apply_lut_1d(ramp, MAX_HW_POINTS, tf_pts);
3546 +
3547 + ret = true;
3548 +
3549 +diff --git a/drivers/gpu/drm/arm/display/komeda/Makefile b/drivers/gpu/drm/arm/display/komeda/Makefile
3550 +index 1b875e5dc0f6..a72e30c0e03d 100644
3551 +--- a/drivers/gpu/drm/arm/display/komeda/Makefile
3552 ++++ b/drivers/gpu/drm/arm/display/komeda/Makefile
3553 +@@ -1,8 +1,8 @@
3554 + # SPDX-License-Identifier: GPL-2.0
3555 +
3556 + ccflags-y := \
3557 +- -I$(src)/../include \
3558 +- -I$(src)
3559 ++ -I $(srctree)/$(src)/../include \
3560 ++ -I $(srctree)/$(src)
3561 +
3562 + komeda-y := \
3563 + komeda_drv.o \
3564 +diff --git a/drivers/gpu/drm/drm_atomic_state_helper.c b/drivers/gpu/drm/drm_atomic_state_helper.c
3565 +index 4985384e51f6..59ffb6b9c745 100644
3566 +--- a/drivers/gpu/drm/drm_atomic_state_helper.c
3567 ++++ b/drivers/gpu/drm/drm_atomic_state_helper.c
3568 +@@ -30,6 +30,7 @@
3569 + #include <drm/drm_connector.h>
3570 + #include <drm/drm_atomic.h>
3571 + #include <drm/drm_device.h>
3572 ++#include <drm/drm_writeback.h>
3573 +
3574 + #include <linux/slab.h>
3575 + #include <linux/dma-fence.h>
3576 +@@ -412,6 +413,9 @@ __drm_atomic_helper_connector_destroy_state(struct drm_connector_state *state)
3577 +
3578 + if (state->commit)
3579 + drm_crtc_commit_put(state->commit);
3580 ++
3581 ++ if (state->writeback_job)
3582 ++ drm_writeback_cleanup_job(state->writeback_job);
3583 + }
3584 + EXPORT_SYMBOL(__drm_atomic_helper_connector_destroy_state);
3585 +
3586 +diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
3587 +index 05bbc2b622fc..04aa6ccdfb24 100644
3588 +--- a/drivers/gpu/drm/drm_drv.c
3589 ++++ b/drivers/gpu/drm/drm_drv.c
3590 +@@ -497,7 +497,7 @@ int drm_dev_init(struct drm_device *dev,
3591 + BUG_ON(!parent);
3592 +
3593 + kref_init(&dev->ref);
3594 +- dev->dev = parent;
3595 ++ dev->dev = get_device(parent);
3596 + dev->driver = driver;
3597 +
3598 + /* no per-device feature limits by default */
3599 +@@ -567,6 +567,7 @@ err_minors:
3600 + drm_minor_free(dev, DRM_MINOR_RENDER);
3601 + drm_fs_inode_free(dev->anon_inode);
3602 + err_free:
3603 ++ put_device(dev->dev);
3604 + mutex_destroy(&dev->master_mutex);
3605 + mutex_destroy(&dev->ctxlist_mutex);
3606 + mutex_destroy(&dev->clientlist_mutex);
3607 +@@ -602,6 +603,8 @@ void drm_dev_fini(struct drm_device *dev)
3608 + drm_minor_free(dev, DRM_MINOR_PRIMARY);
3609 + drm_minor_free(dev, DRM_MINOR_RENDER);
3610 +
3611 ++ put_device(dev->dev);
3612 ++
3613 + mutex_destroy(&dev->master_mutex);
3614 + mutex_destroy(&dev->ctxlist_mutex);
3615 + mutex_destroy(&dev->clientlist_mutex);
3616 +diff --git a/drivers/gpu/drm/drm_file.c b/drivers/gpu/drm/drm_file.c
3617 +index 7caa3c7ed978..9701469a6e93 100644
3618 +--- a/drivers/gpu/drm/drm_file.c
3619 ++++ b/drivers/gpu/drm/drm_file.c
3620 +@@ -577,6 +577,7 @@ put_back_event:
3621 + file_priv->event_space -= length;
3622 + list_add(&e->link, &file_priv->event_list);
3623 + spin_unlock_irq(&dev->event_lock);
3624 ++ wake_up_interruptible(&file_priv->event_wait);
3625 + break;
3626 + }
3627 +
3628 +diff --git a/drivers/gpu/drm/drm_writeback.c b/drivers/gpu/drm/drm_writeback.c
3629 +index c20e6fe00cb3..2d75032f8159 100644
3630 +--- a/drivers/gpu/drm/drm_writeback.c
3631 ++++ b/drivers/gpu/drm/drm_writeback.c
3632 +@@ -268,6 +268,15 @@ void drm_writeback_queue_job(struct drm_writeback_connector *wb_connector,
3633 + }
3634 + EXPORT_SYMBOL(drm_writeback_queue_job);
3635 +
3636 ++void drm_writeback_cleanup_job(struct drm_writeback_job *job)
3637 ++{
3638 ++ if (job->fb)
3639 ++ drm_framebuffer_put(job->fb);
3640 ++
3641 ++ kfree(job);
3642 ++}
3643 ++EXPORT_SYMBOL(drm_writeback_cleanup_job);
3644 ++
3645 + /*
3646 + * @cleanup_work: deferred cleanup of a writeback job
3647 + *
3648 +@@ -280,10 +289,9 @@ static void cleanup_work(struct work_struct *work)
3649 + struct drm_writeback_job *job = container_of(work,
3650 + struct drm_writeback_job,
3651 + cleanup_work);
3652 +- drm_framebuffer_put(job->fb);
3653 +- kfree(job);
3654 +-}
3655 +
3656 ++ drm_writeback_cleanup_job(job);
3657 ++}
3658 +
3659 + /**
3660 + * drm_writeback_signal_completion - Signal the completion of a writeback job
3661 +diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.c b/drivers/gpu/drm/etnaviv/etnaviv_drv.c
3662 +index 18c27f795cf6..3156450723ba 100644
3663 +--- a/drivers/gpu/drm/etnaviv/etnaviv_drv.c
3664 ++++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.c
3665 +@@ -515,6 +515,9 @@ static int etnaviv_bind(struct device *dev)
3666 + }
3667 + drm->dev_private = priv;
3668 +
3669 ++ dev->dma_parms = &priv->dma_parms;
3670 ++ dma_set_max_seg_size(dev, SZ_2G);
3671 ++
3672 + mutex_init(&priv->gem_lock);
3673 + INIT_LIST_HEAD(&priv->gem_list);
3674 + priv->num_gpus = 0;
3675 +@@ -552,6 +555,8 @@ static void etnaviv_unbind(struct device *dev)
3676 +
3677 + component_unbind_all(dev, drm);
3678 +
3679 ++ dev->dma_parms = NULL;
3680 ++
3681 + drm->dev_private = NULL;
3682 + kfree(priv);
3683 +
3684 +diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.h b/drivers/gpu/drm/etnaviv/etnaviv_drv.h
3685 +index a6a7ded37ef1..6a4ea127c4f1 100644
3686 +--- a/drivers/gpu/drm/etnaviv/etnaviv_drv.h
3687 ++++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.h
3688 +@@ -42,6 +42,7 @@ struct etnaviv_file_private {
3689 +
3690 + struct etnaviv_drm_private {
3691 + int num_gpus;
3692 ++ struct device_dma_parameters dma_parms;
3693 + struct etnaviv_gpu *gpu[ETNA_MAX_PIPES];
3694 +
3695 + /* list of GEM objects: */
3696 +diff --git a/drivers/gpu/drm/i915/gvt/Makefile b/drivers/gpu/drm/i915/gvt/Makefile
3697 +index 271fb46d4dd0..ea8324abc784 100644
3698 +--- a/drivers/gpu/drm/i915/gvt/Makefile
3699 ++++ b/drivers/gpu/drm/i915/gvt/Makefile
3700 +@@ -5,5 +5,5 @@ GVT_SOURCE := gvt.o aperture_gm.o handlers.o vgpu.o trace_points.o firmware.o \
3701 + execlist.o scheduler.o sched_policy.o mmio_context.o cmd_parser.o debugfs.o \
3702 + fb_decoder.o dmabuf.o page_track.o
3703 +
3704 +-ccflags-y += -I$(src) -I$(src)/$(GVT_DIR)
3705 ++ccflags-y += -I $(srctree)/$(src) -I $(srctree)/$(src)/$(GVT_DIR)/
3706 + i915-y += $(addprefix $(GVT_DIR)/, $(GVT_SOURCE))
3707 +diff --git a/drivers/gpu/drm/msm/Makefile b/drivers/gpu/drm/msm/Makefile
3708 +index 56a70c74af4e..b7b1ebdc8190 100644
3709 +--- a/drivers/gpu/drm/msm/Makefile
3710 ++++ b/drivers/gpu/drm/msm/Makefile
3711 +@@ -1,7 +1,7 @@
3712 + # SPDX-License-Identifier: GPL-2.0
3713 +-ccflags-y := -Idrivers/gpu/drm/msm
3714 +-ccflags-y += -Idrivers/gpu/drm/msm/disp/dpu1
3715 +-ccflags-$(CONFIG_DRM_MSM_DSI) += -Idrivers/gpu/drm/msm/dsi
3716 ++ccflags-y := -I $(srctree)/$(src)
3717 ++ccflags-y += -I $(srctree)/$(src)/disp/dpu1
3718 ++ccflags-$(CONFIG_DRM_MSM_DSI) += -I $(srctree)/$(src)/dsi
3719 +
3720 + msm-y := \
3721 + adreno/adreno_device.o \
3722 +diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
3723 +index d5f5e56422f5..270da14cba67 100644
3724 +--- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
3725 ++++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
3726 +@@ -34,7 +34,7 @@ static int zap_shader_load_mdt(struct msm_gpu *gpu, const char *fwname)
3727 + {
3728 + struct device *dev = &gpu->pdev->dev;
3729 + const struct firmware *fw;
3730 +- struct device_node *np;
3731 ++ struct device_node *np, *mem_np;
3732 + struct resource r;
3733 + phys_addr_t mem_phys;
3734 + ssize_t mem_size;
3735 +@@ -48,11 +48,13 @@ static int zap_shader_load_mdt(struct msm_gpu *gpu, const char *fwname)
3736 + if (!np)
3737 + return -ENODEV;
3738 +
3739 +- np = of_parse_phandle(np, "memory-region", 0);
3740 +- if (!np)
3741 ++ mem_np = of_parse_phandle(np, "memory-region", 0);
3742 ++ of_node_put(np);
3743 ++ if (!mem_np)
3744 + return -EINVAL;
3745 +
3746 +- ret = of_address_to_resource(np, 0, &r);
3747 ++ ret = of_address_to_resource(mem_np, 0, &r);
3748 ++ of_node_put(mem_np);
3749 + if (ret)
3750 + return ret;
3751 +
3752 +diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
3753 +index 5aa3307f3f0c..dd2c4d11d0e1 100644
3754 +--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
3755 ++++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
3756 +@@ -1023,13 +1023,13 @@ static void dpu_encoder_virt_mode_set(struct drm_encoder *drm_enc,
3757 + if (!dpu_enc->hw_pp[i]) {
3758 + DPU_ERROR_ENC(dpu_enc, "no pp block assigned"
3759 + "at idx: %d\n", i);
3760 +- return;
3761 ++ goto error;
3762 + }
3763 +
3764 + if (!hw_ctl[i]) {
3765 + DPU_ERROR_ENC(dpu_enc, "no ctl block assigned"
3766 + "at idx: %d\n", i);
3767 +- return;
3768 ++ goto error;
3769 + }
3770 +
3771 + phys->hw_pp = dpu_enc->hw_pp[i];
3772 +@@ -1042,6 +1042,9 @@ static void dpu_encoder_virt_mode_set(struct drm_encoder *drm_enc,
3773 + }
3774 +
3775 + dpu_enc->mode_set_complete = true;
3776 ++
3777 ++error:
3778 ++ dpu_rm_release(&dpu_kms->rm, drm_enc);
3779 + }
3780 +
3781 + static void _dpu_encoder_virt_enable_helper(struct drm_encoder *drm_enc)
3782 +@@ -1547,8 +1550,14 @@ static void _dpu_encoder_kickoff_phys(struct dpu_encoder_virt *dpu_enc,
3783 + if (!ctl)
3784 + continue;
3785 +
3786 +- if (phys->split_role != ENC_ROLE_SLAVE)
3787 ++ /*
3788 ++ * This is cleared in frame_done worker, which isn't invoked
3789 ++ * for async commits. So don't set this for async, since it'll
3790 ++ * roll over to the next commit.
3791 ++ */
3792 ++ if (!async && phys->split_role != ENC_ROLE_SLAVE)
3793 + set_bit(i, dpu_enc->frame_busy_mask);
3794 ++
3795 + if (!phys->ops.needs_single_flush ||
3796 + !phys->ops.needs_single_flush(phys))
3797 + _dpu_encoder_trigger_flush(&dpu_enc->base, phys, 0x0,
3798 +diff --git a/drivers/gpu/drm/msm/msm_gem_vma.c b/drivers/gpu/drm/msm/msm_gem_vma.c
3799 +index 49c04829cf34..fcf7a83f0e6f 100644
3800 +--- a/drivers/gpu/drm/msm/msm_gem_vma.c
3801 ++++ b/drivers/gpu/drm/msm/msm_gem_vma.c
3802 +@@ -85,7 +85,7 @@ msm_gem_map_vma(struct msm_gem_address_space *aspace,
3803 +
3804 + vma->mapped = true;
3805 +
3806 +- if (aspace->mmu)
3807 ++ if (aspace && aspace->mmu)
3808 + ret = aspace->mmu->funcs->map(aspace->mmu, vma->iova, sgt,
3809 + size, prot);
3810 +
3811 +diff --git a/drivers/gpu/drm/nouveau/Kbuild b/drivers/gpu/drm/nouveau/Kbuild
3812 +index 581404e6544d..378c5dd692b0 100644
3813 +--- a/drivers/gpu/drm/nouveau/Kbuild
3814 ++++ b/drivers/gpu/drm/nouveau/Kbuild
3815 +@@ -1,7 +1,7 @@
3816 +-ccflags-y += -I$(src)/include
3817 +-ccflags-y += -I$(src)/include/nvkm
3818 +-ccflags-y += -I$(src)/nvkm
3819 +-ccflags-y += -I$(src)
3820 ++ccflags-y += -I $(srctree)/$(src)/include
3821 ++ccflags-y += -I $(srctree)/$(src)/include/nvkm
3822 ++ccflags-y += -I $(srctree)/$(src)/nvkm
3823 ++ccflags-y += -I $(srctree)/$(src)
3824 +
3825 + # NVKM - HW resource manager
3826 + #- code also used by various userspace tools/tests
3827 +diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.c
3828 +index 157b076a1272..38c9c086754b 100644
3829 +--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.c
3830 ++++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.c
3831 +@@ -109,7 +109,7 @@ nv50_bar_oneinit(struct nvkm_bar *base)
3832 + struct nvkm_device *device = bar->base.subdev.device;
3833 + static struct lock_class_key bar1_lock;
3834 + static struct lock_class_key bar2_lock;
3835 +- u64 start, limit;
3836 ++ u64 start, limit, size;
3837 + int ret;
3838 +
3839 + ret = nvkm_gpuobj_new(device, 0x20000, 0, false, NULL, &bar->mem);
3840 +@@ -127,7 +127,10 @@ nv50_bar_oneinit(struct nvkm_bar *base)
3841 +
3842 + /* BAR2 */
3843 + start = 0x0100000000ULL;
3844 +- limit = start + device->func->resource_size(device, 3);
3845 ++ size = device->func->resource_size(device, 3);
3846 ++ if (!size)
3847 ++ return -ENOMEM;
3848 ++ limit = start + size;
3849 +
3850 + ret = nvkm_vmm_new(device, start, limit-- - start, NULL, 0,
3851 + &bar2_lock, "bar2", &bar->bar2_vmm);
3852 +@@ -164,7 +167,10 @@ nv50_bar_oneinit(struct nvkm_bar *base)
3853 +
3854 + /* BAR1 */
3855 + start = 0x0000000000ULL;
3856 +- limit = start + device->func->resource_size(device, 1);
3857 ++ size = device->func->resource_size(device, 1);
3858 ++ if (!size)
3859 ++ return -ENOMEM;
3860 ++ limit = start + size;
3861 +
3862 + ret = nvkm_vmm_new(device, start, limit-- - start, NULL, 0,
3863 + &bar1_lock, "bar1", &bar->bar1_vmm);
3864 +diff --git a/drivers/gpu/drm/omapdrm/dss/dsi.c b/drivers/gpu/drm/omapdrm/dss/dsi.c
3865 +index 64fb788b6647..f0fe975ed46c 100644
3866 +--- a/drivers/gpu/drm/omapdrm/dss/dsi.c
3867 ++++ b/drivers/gpu/drm/omapdrm/dss/dsi.c
3868 +@@ -1342,12 +1342,9 @@ static int dsi_pll_enable(struct dss_pll *pll)
3869 + */
3870 + dsi_enable_scp_clk(dsi);
3871 +
3872 +- if (!dsi->vdds_dsi_enabled) {
3873 +- r = regulator_enable(dsi->vdds_dsi_reg);
3874 +- if (r)
3875 +- goto err0;
3876 +- dsi->vdds_dsi_enabled = true;
3877 +- }
3878 ++ r = regulator_enable(dsi->vdds_dsi_reg);
3879 ++ if (r)
3880 ++ goto err0;
3881 +
3882 + /* XXX PLL does not come out of reset without this... */
3883 + dispc_pck_free_enable(dsi->dss->dispc, 1);
3884 +@@ -1372,36 +1369,25 @@ static int dsi_pll_enable(struct dss_pll *pll)
3885 +
3886 + return 0;
3887 + err1:
3888 +- if (dsi->vdds_dsi_enabled) {
3889 +- regulator_disable(dsi->vdds_dsi_reg);
3890 +- dsi->vdds_dsi_enabled = false;
3891 +- }
3892 ++ regulator_disable(dsi->vdds_dsi_reg);
3893 + err0:
3894 + dsi_disable_scp_clk(dsi);
3895 + dsi_runtime_put(dsi);
3896 + return r;
3897 + }
3898 +
3899 +-static void dsi_pll_uninit(struct dsi_data *dsi, bool disconnect_lanes)
3900 ++static void dsi_pll_disable(struct dss_pll *pll)
3901 + {
3902 ++ struct dsi_data *dsi = container_of(pll, struct dsi_data, pll);
3903 ++
3904 + dsi_pll_power(dsi, DSI_PLL_POWER_OFF);
3905 +- if (disconnect_lanes) {
3906 +- WARN_ON(!dsi->vdds_dsi_enabled);
3907 +- regulator_disable(dsi->vdds_dsi_reg);
3908 +- dsi->vdds_dsi_enabled = false;
3909 +- }
3910 ++
3911 ++ regulator_disable(dsi->vdds_dsi_reg);
3912 +
3913 + dsi_disable_scp_clk(dsi);
3914 + dsi_runtime_put(dsi);
3915 +
3916 +- DSSDBG("PLL uninit done\n");
3917 +-}
3918 +-
3919 +-static void dsi_pll_disable(struct dss_pll *pll)
3920 +-{
3921 +- struct dsi_data *dsi = container_of(pll, struct dsi_data, pll);
3922 +-
3923 +- dsi_pll_uninit(dsi, true);
3924 ++ DSSDBG("PLL disable done\n");
3925 + }
3926 +
3927 + static int dsi_dump_dsi_clocks(struct seq_file *s, void *p)
3928 +@@ -4096,11 +4082,11 @@ static int dsi_display_init_dsi(struct dsi_data *dsi)
3929 +
3930 + r = dss_pll_enable(&dsi->pll);
3931 + if (r)
3932 +- goto err0;
3933 ++ return r;
3934 +
3935 + r = dsi_configure_dsi_clocks(dsi);
3936 + if (r)
3937 +- goto err1;
3938 ++ goto err0;
3939 +
3940 + dss_select_dsi_clk_source(dsi->dss, dsi->module_id,
3941 + dsi->module_id == 0 ?
3942 +@@ -4108,6 +4094,14 @@ static int dsi_display_init_dsi(struct dsi_data *dsi)
3943 +
3944 + DSSDBG("PLL OK\n");
3945 +
3946 ++ if (!dsi->vdds_dsi_enabled) {
3947 ++ r = regulator_enable(dsi->vdds_dsi_reg);
3948 ++ if (r)
3949 ++ goto err1;
3950 ++
3951 ++ dsi->vdds_dsi_enabled = true;
3952 ++ }
3953 ++
3954 + r = dsi_cio_init(dsi);
3955 + if (r)
3956 + goto err2;
3957 +@@ -4136,10 +4130,13 @@ static int dsi_display_init_dsi(struct dsi_data *dsi)
3958 + err3:
3959 + dsi_cio_uninit(dsi);
3960 + err2:
3961 +- dss_select_dsi_clk_source(dsi->dss, dsi->module_id, DSS_CLK_SRC_FCK);
3962 ++ regulator_disable(dsi->vdds_dsi_reg);
3963 ++ dsi->vdds_dsi_enabled = false;
3964 + err1:
3965 +- dss_pll_disable(&dsi->pll);
3966 ++ dss_select_dsi_clk_source(dsi->dss, dsi->module_id, DSS_CLK_SRC_FCK);
3967 + err0:
3968 ++ dss_pll_disable(&dsi->pll);
3969 ++
3970 + return r;
3971 + }
3972 +
3973 +@@ -4158,7 +4155,12 @@ static void dsi_display_uninit_dsi(struct dsi_data *dsi, bool disconnect_lanes,
3974 +
3975 + dss_select_dsi_clk_source(dsi->dss, dsi->module_id, DSS_CLK_SRC_FCK);
3976 + dsi_cio_uninit(dsi);
3977 +- dsi_pll_uninit(dsi, disconnect_lanes);
3978 ++ dss_pll_disable(&dsi->pll);
3979 ++
3980 ++ if (disconnect_lanes) {
3981 ++ regulator_disable(dsi->vdds_dsi_reg);
3982 ++ dsi->vdds_dsi_enabled = false;
3983 ++ }
3984 + }
3985 +
3986 + static int dsi_display_enable(struct omap_dss_device *dssdev)
3987 +diff --git a/drivers/gpu/drm/omapdrm/omap_connector.c b/drivers/gpu/drm/omapdrm/omap_connector.c
3988 +index 9da94d10782a..d37e3c001e24 100644
3989 +--- a/drivers/gpu/drm/omapdrm/omap_connector.c
3990 ++++ b/drivers/gpu/drm/omapdrm/omap_connector.c
3991 +@@ -36,18 +36,22 @@ struct omap_connector {
3992 + };
3993 +
3994 + static void omap_connector_hpd_notify(struct drm_connector *connector,
3995 +- struct omap_dss_device *src,
3996 + enum drm_connector_status status)
3997 + {
3998 +- if (status == connector_status_disconnected) {
3999 +- /*
4000 +- * If the source is an HDMI encoder, notify it of disconnection.
4001 +- * This is required to let the HDMI encoder reset any internal
4002 +- * state related to connection status, such as the CEC address.
4003 +- */
4004 +- if (src && src->type == OMAP_DISPLAY_TYPE_HDMI &&
4005 +- src->ops->hdmi.lost_hotplug)
4006 +- src->ops->hdmi.lost_hotplug(src);
4007 ++ struct omap_connector *omap_connector = to_omap_connector(connector);
4008 ++ struct omap_dss_device *dssdev;
4009 ++
4010 ++ if (status != connector_status_disconnected)
4011 ++ return;
4012 ++
4013 ++ /*
4014 ++ * Notify all devics in the pipeline of disconnection. This is required
4015 ++ * to let the HDMI encoders reset their internal state related to
4016 ++ * connection status, such as the CEC address.
4017 ++ */
4018 ++ for (dssdev = omap_connector->output; dssdev; dssdev = dssdev->next) {
4019 ++ if (dssdev->ops && dssdev->ops->hdmi.lost_hotplug)
4020 ++ dssdev->ops->hdmi.lost_hotplug(dssdev);
4021 + }
4022 + }
4023 +
4024 +@@ -67,7 +71,7 @@ static void omap_connector_hpd_cb(void *cb_data,
4025 + if (old_status == status)
4026 + return;
4027 +
4028 +- omap_connector_hpd_notify(connector, omap_connector->hpd, status);
4029 ++ omap_connector_hpd_notify(connector, status);
4030 +
4031 + drm_kms_helper_hotplug_event(dev);
4032 + }
4033 +@@ -128,7 +132,7 @@ static enum drm_connector_status omap_connector_detect(
4034 + ? connector_status_connected
4035 + : connector_status_disconnected;
4036 +
4037 +- omap_connector_hpd_notify(connector, dssdev->src, status);
4038 ++ omap_connector_hpd_notify(connector, status);
4039 + } else {
4040 + switch (omap_connector->display->type) {
4041 + case OMAP_DISPLAY_TYPE_DPI:
4042 +diff --git a/drivers/gpu/drm/panel/panel-orisetech-otm8009a.c b/drivers/gpu/drm/panel/panel-orisetech-otm8009a.c
4043 +index 87fa316e1d7b..58ccf648b70f 100644
4044 +--- a/drivers/gpu/drm/panel/panel-orisetech-otm8009a.c
4045 ++++ b/drivers/gpu/drm/panel/panel-orisetech-otm8009a.c
4046 +@@ -248,6 +248,9 @@ static int otm8009a_init_sequence(struct otm8009a *ctx)
4047 + /* Send Command GRAM memory write (no parameters) */
4048 + dcs_write_seq(ctx, MIPI_DCS_WRITE_MEMORY_START);
4049 +
4050 ++ /* Wait a short while to let the panel be ready before the 1st frame */
4051 ++ mdelay(10);
4052 ++
4053 + return 0;
4054 + }
4055 +
4056 +diff --git a/drivers/gpu/drm/pl111/pl111_versatile.c b/drivers/gpu/drm/pl111/pl111_versatile.c
4057 +index b9baefdba38a..1c318ad32a8c 100644
4058 +--- a/drivers/gpu/drm/pl111/pl111_versatile.c
4059 ++++ b/drivers/gpu/drm/pl111/pl111_versatile.c
4060 +@@ -330,6 +330,7 @@ int pl111_versatile_init(struct device *dev, struct pl111_drm_dev_private *priv)
4061 + ret = vexpress_muxfpga_init();
4062 + if (ret) {
4063 + dev_err(dev, "unable to initialize muxfpga driver\n");
4064 ++ of_node_put(np);
4065 + return ret;
4066 + }
4067 +
4068 +@@ -337,17 +338,20 @@ int pl111_versatile_init(struct device *dev, struct pl111_drm_dev_private *priv)
4069 + pdev = of_find_device_by_node(np);
4070 + if (!pdev) {
4071 + dev_err(dev, "can't find the sysreg device, deferring\n");
4072 ++ of_node_put(np);
4073 + return -EPROBE_DEFER;
4074 + }
4075 + map = dev_get_drvdata(&pdev->dev);
4076 + if (!map) {
4077 + dev_err(dev, "sysreg has not yet probed\n");
4078 + platform_device_put(pdev);
4079 ++ of_node_put(np);
4080 + return -EPROBE_DEFER;
4081 + }
4082 + } else {
4083 + map = syscon_node_to_regmap(np);
4084 + }
4085 ++ of_node_put(np);
4086 +
4087 + if (IS_ERR(map)) {
4088 + dev_err(dev, "no Versatile syscon regmap\n");
4089 +diff --git a/drivers/gpu/drm/rcar-du/rcar_lvds.c b/drivers/gpu/drm/rcar-du/rcar_lvds.c
4090 +index 7ef97b2a6eda..033f44e46daf 100644
4091 +--- a/drivers/gpu/drm/rcar-du/rcar_lvds.c
4092 ++++ b/drivers/gpu/drm/rcar-du/rcar_lvds.c
4093 +@@ -283,7 +283,7 @@ static void rcar_lvds_d3_e3_pll_calc(struct rcar_lvds *lvds, struct clk *clk,
4094 + * divider.
4095 + */
4096 + fout = fvco / (1 << e) / div7;
4097 +- div = DIV_ROUND_CLOSEST(fout, target);
4098 ++ div = max(1UL, DIV_ROUND_CLOSEST(fout, target));
4099 + diff = abs(fout / div - target);
4100 +
4101 + if (diff < pll->diff) {
4102 +@@ -485,9 +485,13 @@ static void rcar_lvds_enable(struct drm_bridge *bridge)
4103 + }
4104 +
4105 + if (lvds->info->quirks & RCAR_LVDS_QUIRK_GEN3_LVEN) {
4106 +- /* Turn on the LVDS PHY. */
4107 ++ /*
4108 ++ * Turn on the LVDS PHY. On D3, the LVEN and LVRES bit must be
4109 ++ * set at the same time, so don't write the register yet.
4110 ++ */
4111 + lvdcr0 |= LVDCR0_LVEN;
4112 +- rcar_lvds_write(lvds, LVDCR0, lvdcr0);
4113 ++ if (!(lvds->info->quirks & RCAR_LVDS_QUIRK_PWD))
4114 ++ rcar_lvds_write(lvds, LVDCR0, lvdcr0);
4115 + }
4116 +
4117 + if (!(lvds->info->quirks & RCAR_LVDS_QUIRK_EXT_PLL)) {
4118 +diff --git a/drivers/gpu/drm/sun4i/sun4i_tcon.c b/drivers/gpu/drm/sun4i/sun4i_tcon.c
4119 +index 7136fc91c603..e75f77ff8e0f 100644
4120 +--- a/drivers/gpu/drm/sun4i/sun4i_tcon.c
4121 ++++ b/drivers/gpu/drm/sun4i/sun4i_tcon.c
4122 +@@ -341,8 +341,8 @@ static void sun4i_tcon0_mode_set_cpu(struct sun4i_tcon *tcon,
4123 + u32 block_space, start_delay;
4124 + u32 tcon_div;
4125 +
4126 +- tcon->dclk_min_div = 4;
4127 +- tcon->dclk_max_div = 127;
4128 ++ tcon->dclk_min_div = SUN6I_DSI_TCON_DIV;
4129 ++ tcon->dclk_max_div = SUN6I_DSI_TCON_DIV;
4130 +
4131 + sun4i_tcon0_mode_set_common(tcon, mode);
4132 +
4133 +diff --git a/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c b/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c
4134 +index 318994cd1b85..869e0aedf343 100644
4135 +--- a/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c
4136 ++++ b/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c
4137 +@@ -358,7 +358,13 @@ static void sun6i_dsi_inst_init(struct sun6i_dsi *dsi,
4138 + static u16 sun6i_dsi_get_video_start_delay(struct sun6i_dsi *dsi,
4139 + struct drm_display_mode *mode)
4140 + {
4141 +- return mode->vtotal - (mode->vsync_end - mode->vdisplay) + 1;
4142 ++ u16 start = clamp(mode->vtotal - mode->vdisplay - 10, 8, 100);
4143 ++ u16 delay = mode->vtotal - (mode->vsync_end - mode->vdisplay) + start;
4144 ++
4145 ++ if (delay > mode->vtotal)
4146 ++ delay = delay % mode->vtotal;
4147 ++
4148 ++ return max_t(u16, delay, 1);
4149 + }
4150 +
4151 + static void sun6i_dsi_setup_burst(struct sun6i_dsi *dsi,
4152 +diff --git a/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.h b/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.h
4153 +index a07090579f84..5c3ad5be0690 100644
4154 +--- a/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.h
4155 ++++ b/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.h
4156 +@@ -13,6 +13,8 @@
4157 + #include <drm/drm_encoder.h>
4158 + #include <drm/drm_mipi_dsi.h>
4159 +
4160 ++#define SUN6I_DSI_TCON_DIV 4
4161 ++
4162 + struct sun6i_dsi {
4163 + struct drm_connector connector;
4164 + struct drm_encoder encoder;
4165 +diff --git a/drivers/gpu/drm/tinydrm/ili9225.c b/drivers/gpu/drm/tinydrm/ili9225.c
4166 +index 43a3b68d90a2..998d75be9e16 100644
4167 +--- a/drivers/gpu/drm/tinydrm/ili9225.c
4168 ++++ b/drivers/gpu/drm/tinydrm/ili9225.c
4169 +@@ -301,7 +301,7 @@ static void ili9225_pipe_disable(struct drm_simple_display_pipe *pipe)
4170 + mipi->enabled = false;
4171 + }
4172 +
4173 +-static int ili9225_dbi_command(struct mipi_dbi *mipi, u8 cmd, u8 *par,
4174 ++static int ili9225_dbi_command(struct mipi_dbi *mipi, u8 *cmd, u8 *par,
4175 + size_t num)
4176 + {
4177 + struct spi_device *spi = mipi->spi;
4178 +@@ -311,11 +311,11 @@ static int ili9225_dbi_command(struct mipi_dbi *mipi, u8 cmd, u8 *par,
4179 +
4180 + gpiod_set_value_cansleep(mipi->dc, 0);
4181 + speed_hz = mipi_dbi_spi_cmd_max_speed(spi, 1);
4182 +- ret = tinydrm_spi_transfer(spi, speed_hz, NULL, 8, &cmd, 1);
4183 ++ ret = tinydrm_spi_transfer(spi, speed_hz, NULL, 8, cmd, 1);
4184 + if (ret || !num)
4185 + return ret;
4186 +
4187 +- if (cmd == ILI9225_WRITE_DATA_TO_GRAM && !mipi->swap_bytes)
4188 ++ if (*cmd == ILI9225_WRITE_DATA_TO_GRAM && !mipi->swap_bytes)
4189 + bpw = 16;
4190 +
4191 + gpiod_set_value_cansleep(mipi->dc, 1);
4192 +diff --git a/drivers/gpu/drm/tinydrm/mipi-dbi.c b/drivers/gpu/drm/tinydrm/mipi-dbi.c
4193 +index 918f77c7de34..295cbcbc2bb6 100644
4194 +--- a/drivers/gpu/drm/tinydrm/mipi-dbi.c
4195 ++++ b/drivers/gpu/drm/tinydrm/mipi-dbi.c
4196 +@@ -153,16 +153,42 @@ EXPORT_SYMBOL(mipi_dbi_command_read);
4197 + */
4198 + int mipi_dbi_command_buf(struct mipi_dbi *mipi, u8 cmd, u8 *data, size_t len)
4199 + {
4200 ++ u8 *cmdbuf;
4201 + int ret;
4202 +
4203 ++ /* SPI requires dma-safe buffers */
4204 ++ cmdbuf = kmemdup(&cmd, 1, GFP_KERNEL);
4205 ++ if (!cmdbuf)
4206 ++ return -ENOMEM;
4207 ++
4208 + mutex_lock(&mipi->cmdlock);
4209 +- ret = mipi->command(mipi, cmd, data, len);
4210 ++ ret = mipi->command(mipi, cmdbuf, data, len);
4211 + mutex_unlock(&mipi->cmdlock);
4212 +
4213 ++ kfree(cmdbuf);
4214 ++
4215 + return ret;
4216 + }
4217 + EXPORT_SYMBOL(mipi_dbi_command_buf);
4218 +
4219 ++/* This should only be used by mipi_dbi_command() */
4220 ++int mipi_dbi_command_stackbuf(struct mipi_dbi *mipi, u8 cmd, u8 *data, size_t len)
4221 ++{
4222 ++ u8 *buf;
4223 ++ int ret;
4224 ++
4225 ++ buf = kmemdup(data, len, GFP_KERNEL);
4226 ++ if (!buf)
4227 ++ return -ENOMEM;
4228 ++
4229 ++ ret = mipi_dbi_command_buf(mipi, cmd, buf, len);
4230 ++
4231 ++ kfree(buf);
4232 ++
4233 ++ return ret;
4234 ++}
4235 ++EXPORT_SYMBOL(mipi_dbi_command_stackbuf);
4236 ++
4237 + /**
4238 + * mipi_dbi_buf_copy - Copy a framebuffer, transforming it if necessary
4239 + * @dst: The destination buffer
4240 +@@ -774,18 +800,18 @@ static int mipi_dbi_spi1_transfer(struct mipi_dbi *mipi, int dc,
4241 + return 0;
4242 + }
4243 +
4244 +-static int mipi_dbi_typec1_command(struct mipi_dbi *mipi, u8 cmd,
4245 ++static int mipi_dbi_typec1_command(struct mipi_dbi *mipi, u8 *cmd,
4246 + u8 *parameters, size_t num)
4247 + {
4248 +- unsigned int bpw = (cmd == MIPI_DCS_WRITE_MEMORY_START) ? 16 : 8;
4249 ++ unsigned int bpw = (*cmd == MIPI_DCS_WRITE_MEMORY_START) ? 16 : 8;
4250 + int ret;
4251 +
4252 +- if (mipi_dbi_command_is_read(mipi, cmd))
4253 ++ if (mipi_dbi_command_is_read(mipi, *cmd))
4254 + return -ENOTSUPP;
4255 +
4256 +- MIPI_DBI_DEBUG_COMMAND(cmd, parameters, num);
4257 ++ MIPI_DBI_DEBUG_COMMAND(*cmd, parameters, num);
4258 +
4259 +- ret = mipi_dbi_spi1_transfer(mipi, 0, &cmd, 1, 8);
4260 ++ ret = mipi_dbi_spi1_transfer(mipi, 0, cmd, 1, 8);
4261 + if (ret || !num)
4262 + return ret;
4263 +
4264 +@@ -794,7 +820,7 @@ static int mipi_dbi_typec1_command(struct mipi_dbi *mipi, u8 cmd,
4265 +
4266 + /* MIPI DBI Type C Option 3 */
4267 +
4268 +-static int mipi_dbi_typec3_command_read(struct mipi_dbi *mipi, u8 cmd,
4269 ++static int mipi_dbi_typec3_command_read(struct mipi_dbi *mipi, u8 *cmd,
4270 + u8 *data, size_t len)
4271 + {
4272 + struct spi_device *spi = mipi->spi;
4273 +@@ -803,7 +829,7 @@ static int mipi_dbi_typec3_command_read(struct mipi_dbi *mipi, u8 cmd,
4274 + struct spi_transfer tr[2] = {
4275 + {
4276 + .speed_hz = speed_hz,
4277 +- .tx_buf = &cmd,
4278 ++ .tx_buf = cmd,
4279 + .len = 1,
4280 + }, {
4281 + .speed_hz = speed_hz,
4282 +@@ -821,8 +847,8 @@ static int mipi_dbi_typec3_command_read(struct mipi_dbi *mipi, u8 cmd,
4283 + * Support non-standard 24-bit and 32-bit Nokia read commands which
4284 + * start with a dummy clock, so we need to read an extra byte.
4285 + */
4286 +- if (cmd == MIPI_DCS_GET_DISPLAY_ID ||
4287 +- cmd == MIPI_DCS_GET_DISPLAY_STATUS) {
4288 ++ if (*cmd == MIPI_DCS_GET_DISPLAY_ID ||
4289 ++ *cmd == MIPI_DCS_GET_DISPLAY_STATUS) {
4290 + if (!(len == 3 || len == 4))
4291 + return -EINVAL;
4292 +
4293 +@@ -852,7 +878,7 @@ static int mipi_dbi_typec3_command_read(struct mipi_dbi *mipi, u8 cmd,
4294 + data[i] = (buf[i] << 1) | !!(buf[i + 1] & BIT(7));
4295 + }
4296 +
4297 +- MIPI_DBI_DEBUG_COMMAND(cmd, data, len);
4298 ++ MIPI_DBI_DEBUG_COMMAND(*cmd, data, len);
4299 +
4300 + err_free:
4301 + kfree(buf);
4302 +@@ -860,7 +886,7 @@ err_free:
4303 + return ret;
4304 + }
4305 +
4306 +-static int mipi_dbi_typec3_command(struct mipi_dbi *mipi, u8 cmd,
4307 ++static int mipi_dbi_typec3_command(struct mipi_dbi *mipi, u8 *cmd,
4308 + u8 *par, size_t num)
4309 + {
4310 + struct spi_device *spi = mipi->spi;
4311 +@@ -868,18 +894,18 @@ static int mipi_dbi_typec3_command(struct mipi_dbi *mipi, u8 cmd,
4312 + u32 speed_hz;
4313 + int ret;
4314 +
4315 +- if (mipi_dbi_command_is_read(mipi, cmd))
4316 ++ if (mipi_dbi_command_is_read(mipi, *cmd))
4317 + return mipi_dbi_typec3_command_read(mipi, cmd, par, num);
4318 +
4319 +- MIPI_DBI_DEBUG_COMMAND(cmd, par, num);
4320 ++ MIPI_DBI_DEBUG_COMMAND(*cmd, par, num);
4321 +
4322 + gpiod_set_value_cansleep(mipi->dc, 0);
4323 + speed_hz = mipi_dbi_spi_cmd_max_speed(spi, 1);
4324 +- ret = tinydrm_spi_transfer(spi, speed_hz, NULL, 8, &cmd, 1);
4325 ++ ret = tinydrm_spi_transfer(spi, speed_hz, NULL, 8, cmd, 1);
4326 + if (ret || !num)
4327 + return ret;
4328 +
4329 +- if (cmd == MIPI_DCS_WRITE_MEMORY_START && !mipi->swap_bytes)
4330 ++ if (*cmd == MIPI_DCS_WRITE_MEMORY_START && !mipi->swap_bytes)
4331 + bpw = 16;
4332 +
4333 + gpiod_set_value_cansleep(mipi->dc, 1);
4334 +diff --git a/drivers/gpu/drm/v3d/v3d_drv.c b/drivers/gpu/drm/v3d/v3d_drv.c
4335 +index f0afcec72c34..30ae1c74edaa 100644
4336 +--- a/drivers/gpu/drm/v3d/v3d_drv.c
4337 ++++ b/drivers/gpu/drm/v3d/v3d_drv.c
4338 +@@ -312,14 +312,18 @@ static int v3d_platform_drm_probe(struct platform_device *pdev)
4339 + if (ret)
4340 + goto dev_destroy;
4341 +
4342 +- v3d_irq_init(v3d);
4343 ++ ret = v3d_irq_init(v3d);
4344 ++ if (ret)
4345 ++ goto gem_destroy;
4346 +
4347 + ret = drm_dev_register(drm, 0);
4348 + if (ret)
4349 +- goto gem_destroy;
4350 ++ goto irq_disable;
4351 +
4352 + return 0;
4353 +
4354 ++irq_disable:
4355 ++ v3d_irq_disable(v3d);
4356 + gem_destroy:
4357 + v3d_gem_destroy(drm);
4358 + dev_destroy:
4359 +diff --git a/drivers/gpu/drm/v3d/v3d_drv.h b/drivers/gpu/drm/v3d/v3d_drv.h
4360 +index fdda3037f7af..2fdb456b72d3 100644
4361 +--- a/drivers/gpu/drm/v3d/v3d_drv.h
4362 ++++ b/drivers/gpu/drm/v3d/v3d_drv.h
4363 +@@ -310,7 +310,7 @@ void v3d_reset(struct v3d_dev *v3d);
4364 + void v3d_invalidate_caches(struct v3d_dev *v3d);
4365 +
4366 + /* v3d_irq.c */
4367 +-void v3d_irq_init(struct v3d_dev *v3d);
4368 ++int v3d_irq_init(struct v3d_dev *v3d);
4369 + void v3d_irq_enable(struct v3d_dev *v3d);
4370 + void v3d_irq_disable(struct v3d_dev *v3d);
4371 + void v3d_irq_reset(struct v3d_dev *v3d);
4372 +diff --git a/drivers/gpu/drm/v3d/v3d_irq.c b/drivers/gpu/drm/v3d/v3d_irq.c
4373 +index 69338da70ddc..29d746cfce57 100644
4374 +--- a/drivers/gpu/drm/v3d/v3d_irq.c
4375 ++++ b/drivers/gpu/drm/v3d/v3d_irq.c
4376 +@@ -156,7 +156,7 @@ v3d_hub_irq(int irq, void *arg)
4377 + return status;
4378 + }
4379 +
4380 +-void
4381 ++int
4382 + v3d_irq_init(struct v3d_dev *v3d)
4383 + {
4384 + int ret, core;
4385 +@@ -173,13 +173,22 @@ v3d_irq_init(struct v3d_dev *v3d)
4386 + ret = devm_request_irq(v3d->dev, platform_get_irq(v3d->pdev, 0),
4387 + v3d_hub_irq, IRQF_SHARED,
4388 + "v3d_hub", v3d);
4389 ++ if (ret)
4390 ++ goto fail;
4391 ++
4392 + ret = devm_request_irq(v3d->dev, platform_get_irq(v3d->pdev, 1),
4393 + v3d_irq, IRQF_SHARED,
4394 + "v3d_core0", v3d);
4395 + if (ret)
4396 +- dev_err(v3d->dev, "IRQ setup failed: %d\n", ret);
4397 ++ goto fail;
4398 +
4399 + v3d_irq_enable(v3d);
4400 ++ return 0;
4401 ++
4402 ++fail:
4403 ++ if (ret != -EPROBE_DEFER)
4404 ++ dev_err(v3d->dev, "IRQ setup failed: %d\n", ret);
4405 ++ return ret;
4406 + }
4407 +
4408 + void
4409 +diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
4410 +index 860e21ec6a49..63a43726cce0 100644
4411 +--- a/drivers/hid/hid-core.c
4412 ++++ b/drivers/hid/hid-core.c
4413 +@@ -218,13 +218,14 @@ static unsigned hid_lookup_collection(struct hid_parser *parser, unsigned type)
4414 + * Add a usage to the temporary parser table.
4415 + */
4416 +
4417 +-static int hid_add_usage(struct hid_parser *parser, unsigned usage)
4418 ++static int hid_add_usage(struct hid_parser *parser, unsigned usage, u8 size)
4419 + {
4420 + if (parser->local.usage_index >= HID_MAX_USAGES) {
4421 + hid_err(parser->device, "usage index exceeded\n");
4422 + return -1;
4423 + }
4424 + parser->local.usage[parser->local.usage_index] = usage;
4425 ++ parser->local.usage_size[parser->local.usage_index] = size;
4426 + parser->local.collection_index[parser->local.usage_index] =
4427 + parser->collection_stack_ptr ?
4428 + parser->collection_stack[parser->collection_stack_ptr - 1] : 0;
4429 +@@ -486,10 +487,7 @@ static int hid_parser_local(struct hid_parser *parser, struct hid_item *item)
4430 + return 0;
4431 + }
4432 +
4433 +- if (item->size <= 2)
4434 +- data = (parser->global.usage_page << 16) + data;
4435 +-
4436 +- return hid_add_usage(parser, data);
4437 ++ return hid_add_usage(parser, data, item->size);
4438 +
4439 + case HID_LOCAL_ITEM_TAG_USAGE_MINIMUM:
4440 +
4441 +@@ -498,9 +496,6 @@ static int hid_parser_local(struct hid_parser *parser, struct hid_item *item)
4442 + return 0;
4443 + }
4444 +
4445 +- if (item->size <= 2)
4446 +- data = (parser->global.usage_page << 16) + data;
4447 +-
4448 + parser->local.usage_minimum = data;
4449 + return 0;
4450 +
4451 +@@ -511,9 +506,6 @@ static int hid_parser_local(struct hid_parser *parser, struct hid_item *item)
4452 + return 0;
4453 + }
4454 +
4455 +- if (item->size <= 2)
4456 +- data = (parser->global.usage_page << 16) + data;
4457 +-
4458 + count = data - parser->local.usage_minimum;
4459 + if (count + parser->local.usage_index >= HID_MAX_USAGES) {
4460 + /*
4461 +@@ -533,7 +525,7 @@ static int hid_parser_local(struct hid_parser *parser, struct hid_item *item)
4462 + }
4463 +
4464 + for (n = parser->local.usage_minimum; n <= data; n++)
4465 +- if (hid_add_usage(parser, n)) {
4466 ++ if (hid_add_usage(parser, n, item->size)) {
4467 + dbg_hid("hid_add_usage failed\n");
4468 + return -1;
4469 + }
4470 +@@ -547,6 +539,22 @@ static int hid_parser_local(struct hid_parser *parser, struct hid_item *item)
4471 + return 0;
4472 + }
4473 +
4474 ++/*
4475 ++ * Concatenate Usage Pages into Usages where relevant:
4476 ++ * As per specification, 6.2.2.8: "When the parser encounters a main item it
4477 ++ * concatenates the last declared Usage Page with a Usage to form a complete
4478 ++ * usage value."
4479 ++ */
4480 ++
4481 ++static void hid_concatenate_usage_page(struct hid_parser *parser)
4482 ++{
4483 ++ int i;
4484 ++
4485 ++ for (i = 0; i < parser->local.usage_index; i++)
4486 ++ if (parser->local.usage_size[i] <= 2)
4487 ++ parser->local.usage[i] += parser->global.usage_page << 16;
4488 ++}
4489 ++
4490 + /*
4491 + * Process a main item.
4492 + */
4493 +@@ -556,6 +564,8 @@ static int hid_parser_main(struct hid_parser *parser, struct hid_item *item)
4494 + __u32 data;
4495 + int ret;
4496 +
4497 ++ hid_concatenate_usage_page(parser);
4498 ++
4499 + data = item_udata(item);
4500 +
4501 + switch (item->tag) {
4502 +@@ -765,6 +775,8 @@ static int hid_scan_main(struct hid_parser *parser, struct hid_item *item)
4503 + __u32 data;
4504 + int i;
4505 +
4506 ++ hid_concatenate_usage_page(parser);
4507 ++
4508 + data = item_udata(item);
4509 +
4510 + switch (item->tag) {
4511 +diff --git a/drivers/hid/hid-logitech-hidpp.c b/drivers/hid/hid-logitech-hidpp.c
4512 +index 199cc256e9d9..e74fa990ba13 100644
4513 +--- a/drivers/hid/hid-logitech-hidpp.c
4514 ++++ b/drivers/hid/hid-logitech-hidpp.c
4515 +@@ -836,13 +836,16 @@ static int hidpp_root_get_feature(struct hidpp_device *hidpp, u16 feature,
4516 +
4517 + static int hidpp_root_get_protocol_version(struct hidpp_device *hidpp)
4518 + {
4519 ++ const u8 ping_byte = 0x5a;
4520 ++ u8 ping_data[3] = { 0, 0, ping_byte };
4521 + struct hidpp_report response;
4522 + int ret;
4523 +
4524 +- ret = hidpp_send_fap_command_sync(hidpp,
4525 ++ ret = hidpp_send_rap_command_sync(hidpp,
4526 ++ REPORT_ID_HIDPP_SHORT,
4527 + HIDPP_PAGE_ROOT_IDX,
4528 + CMD_ROOT_GET_PROTOCOL_VERSION,
4529 +- NULL, 0, &response);
4530 ++ ping_data, sizeof(ping_data), &response);
4531 +
4532 + if (ret == HIDPP_ERROR_INVALID_SUBID) {
4533 + hidpp->protocol_major = 1;
4534 +@@ -862,8 +865,14 @@ static int hidpp_root_get_protocol_version(struct hidpp_device *hidpp)
4535 + if (ret)
4536 + return ret;
4537 +
4538 +- hidpp->protocol_major = response.fap.params[0];
4539 +- hidpp->protocol_minor = response.fap.params[1];
4540 ++ if (response.rap.params[2] != ping_byte) {
4541 ++ hid_err(hidpp->hid_dev, "%s: ping mismatch 0x%02x != 0x%02x\n",
4542 ++ __func__, response.rap.params[2], ping_byte);
4543 ++ return -EPROTO;
4544 ++ }
4545 ++
4546 ++ hidpp->protocol_major = response.rap.params[0];
4547 ++ hidpp->protocol_minor = response.rap.params[1];
4548 +
4549 + return ret;
4550 + }
4551 +@@ -1012,7 +1021,11 @@ static int hidpp_map_battery_level(int capacity)
4552 + {
4553 + if (capacity < 11)
4554 + return POWER_SUPPLY_CAPACITY_LEVEL_CRITICAL;
4555 +- else if (capacity < 31)
4556 ++ /*
4557 ++ * The spec says this should be < 31 but some devices report 30
4558 ++ * with brand new batteries and Windows reports 30 as "Good".
4559 ++ */
4560 ++ else if (capacity < 30)
4561 + return POWER_SUPPLY_CAPACITY_LEVEL_LOW;
4562 + else if (capacity < 81)
4563 + return POWER_SUPPLY_CAPACITY_LEVEL_NORMAL;
4564 +diff --git a/drivers/hwmon/f71805f.c b/drivers/hwmon/f71805f.c
4565 +index 73c681162653..623736d2a7c1 100644
4566 +--- a/drivers/hwmon/f71805f.c
4567 ++++ b/drivers/hwmon/f71805f.c
4568 +@@ -96,17 +96,23 @@ superio_select(int base, int ld)
4569 + outb(ld, base + 1);
4570 + }
4571 +
4572 +-static inline void
4573 ++static inline int
4574 + superio_enter(int base)
4575 + {
4576 ++ if (!request_muxed_region(base, 2, DRVNAME))
4577 ++ return -EBUSY;
4578 ++
4579 + outb(0x87, base);
4580 + outb(0x87, base);
4581 ++
4582 ++ return 0;
4583 + }
4584 +
4585 + static inline void
4586 + superio_exit(int base)
4587 + {
4588 + outb(0xaa, base);
4589 ++ release_region(base, 2);
4590 + }
4591 +
4592 + /*
4593 +@@ -1561,7 +1567,7 @@ exit:
4594 + static int __init f71805f_find(int sioaddr, unsigned short *address,
4595 + struct f71805f_sio_data *sio_data)
4596 + {
4597 +- int err = -ENODEV;
4598 ++ int err;
4599 + u16 devid;
4600 +
4601 + static const char * const names[] = {
4602 +@@ -1569,8 +1575,11 @@ static int __init f71805f_find(int sioaddr, unsigned short *address,
4603 + "F71872F/FG or F71806F/FG",
4604 + };
4605 +
4606 +- superio_enter(sioaddr);
4607 ++ err = superio_enter(sioaddr);
4608 ++ if (err)
4609 ++ return err;
4610 +
4611 ++ err = -ENODEV;
4612 + devid = superio_inw(sioaddr, SIO_REG_MANID);
4613 + if (devid != SIO_FINTEK_ID)
4614 + goto exit;
4615 +diff --git a/drivers/hwmon/pc87427.c b/drivers/hwmon/pc87427.c
4616 +index d1a3f2040c00..58eee8fa3e6d 100644
4617 +--- a/drivers/hwmon/pc87427.c
4618 ++++ b/drivers/hwmon/pc87427.c
4619 +@@ -106,6 +106,13 @@ static const char *logdev_str[2] = { DRVNAME " FMC", DRVNAME " HMC" };
4620 + #define LD_IN 1
4621 + #define LD_TEMP 1
4622 +
4623 ++static inline int superio_enter(int sioaddr)
4624 ++{
4625 ++ if (!request_muxed_region(sioaddr, 2, DRVNAME))
4626 ++ return -EBUSY;
4627 ++ return 0;
4628 ++}
4629 ++
4630 + static inline void superio_outb(int sioaddr, int reg, int val)
4631 + {
4632 + outb(reg, sioaddr);
4633 +@@ -122,6 +129,7 @@ static inline void superio_exit(int sioaddr)
4634 + {
4635 + outb(0x02, sioaddr);
4636 + outb(0x02, sioaddr + 1);
4637 ++ release_region(sioaddr, 2);
4638 + }
4639 +
4640 + /*
4641 +@@ -1195,7 +1203,11 @@ static int __init pc87427_find(int sioaddr, struct pc87427_sio_data *sio_data)
4642 + {
4643 + u16 val;
4644 + u8 cfg, cfg_b;
4645 +- int i, err = 0;
4646 ++ int i, err;
4647 ++
4648 ++ err = superio_enter(sioaddr);
4649 ++ if (err)
4650 ++ return err;
4651 +
4652 + /* Identify device */
4653 + val = force_id ? force_id : superio_inb(sioaddr, SIOREG_DEVID);
4654 +diff --git a/drivers/hwmon/smsc47b397.c b/drivers/hwmon/smsc47b397.c
4655 +index c0775084dde0..60e193f2e970 100644
4656 +--- a/drivers/hwmon/smsc47b397.c
4657 ++++ b/drivers/hwmon/smsc47b397.c
4658 +@@ -72,14 +72,19 @@ static inline void superio_select(int ld)
4659 + superio_outb(0x07, ld);
4660 + }
4661 +
4662 +-static inline void superio_enter(void)
4663 ++static inline int superio_enter(void)
4664 + {
4665 ++ if (!request_muxed_region(REG, 2, DRVNAME))
4666 ++ return -EBUSY;
4667 ++
4668 + outb(0x55, REG);
4669 ++ return 0;
4670 + }
4671 +
4672 + static inline void superio_exit(void)
4673 + {
4674 + outb(0xAA, REG);
4675 ++ release_region(REG, 2);
4676 + }
4677 +
4678 + #define SUPERIO_REG_DEVID 0x20
4679 +@@ -300,8 +305,12 @@ static int __init smsc47b397_find(void)
4680 + u8 id, rev;
4681 + char *name;
4682 + unsigned short addr;
4683 ++ int err;
4684 ++
4685 ++ err = superio_enter();
4686 ++ if (err)
4687 ++ return err;
4688 +
4689 +- superio_enter();
4690 + id = force_id ? force_id : superio_inb(SUPERIO_REG_DEVID);
4691 +
4692 + switch (id) {
4693 +diff --git a/drivers/hwmon/smsc47m1.c b/drivers/hwmon/smsc47m1.c
4694 +index c7b6a425e2c0..5eeac9853d0a 100644
4695 +--- a/drivers/hwmon/smsc47m1.c
4696 ++++ b/drivers/hwmon/smsc47m1.c
4697 +@@ -73,16 +73,21 @@ superio_inb(int reg)
4698 + /* logical device for fans is 0x0A */
4699 + #define superio_select() superio_outb(0x07, 0x0A)
4700 +
4701 +-static inline void
4702 ++static inline int
4703 + superio_enter(void)
4704 + {
4705 ++ if (!request_muxed_region(REG, 2, DRVNAME))
4706 ++ return -EBUSY;
4707 ++
4708 + outb(0x55, REG);
4709 ++ return 0;
4710 + }
4711 +
4712 + static inline void
4713 + superio_exit(void)
4714 + {
4715 + outb(0xAA, REG);
4716 ++ release_region(REG, 2);
4717 + }
4718 +
4719 + #define SUPERIO_REG_ACT 0x30
4720 +@@ -531,8 +536,12 @@ static int __init smsc47m1_find(struct smsc47m1_sio_data *sio_data)
4721 + {
4722 + u8 val;
4723 + unsigned short addr;
4724 ++ int err;
4725 ++
4726 ++ err = superio_enter();
4727 ++ if (err)
4728 ++ return err;
4729 +
4730 +- superio_enter();
4731 + val = force_id ? force_id : superio_inb(SUPERIO_REG_DEVID);
4732 +
4733 + /*
4734 +@@ -608,13 +617,14 @@ static int __init smsc47m1_find(struct smsc47m1_sio_data *sio_data)
4735 + static void smsc47m1_restore(const struct smsc47m1_sio_data *sio_data)
4736 + {
4737 + if ((sio_data->activate & 0x01) == 0) {
4738 +- superio_enter();
4739 +- superio_select();
4740 +-
4741 +- pr_info("Disabling device\n");
4742 +- superio_outb(SUPERIO_REG_ACT, sio_data->activate);
4743 +-
4744 +- superio_exit();
4745 ++ if (!superio_enter()) {
4746 ++ superio_select();
4747 ++ pr_info("Disabling device\n");
4748 ++ superio_outb(SUPERIO_REG_ACT, sio_data->activate);
4749 ++ superio_exit();
4750 ++ } else {
4751 ++ pr_warn("Failed to disable device\n");
4752 ++ }
4753 + }
4754 + }
4755 +
4756 +diff --git a/drivers/hwmon/vt1211.c b/drivers/hwmon/vt1211.c
4757 +index 3a6bfa51cb94..95d5e8ec8b7f 100644
4758 +--- a/drivers/hwmon/vt1211.c
4759 ++++ b/drivers/hwmon/vt1211.c
4760 +@@ -226,15 +226,21 @@ static inline void superio_select(int sio_cip, int ldn)
4761 + outb(ldn, sio_cip + 1);
4762 + }
4763 +
4764 +-static inline void superio_enter(int sio_cip)
4765 ++static inline int superio_enter(int sio_cip)
4766 + {
4767 ++ if (!request_muxed_region(sio_cip, 2, DRVNAME))
4768 ++ return -EBUSY;
4769 ++
4770 + outb(0x87, sio_cip);
4771 + outb(0x87, sio_cip);
4772 ++
4773 ++ return 0;
4774 + }
4775 +
4776 + static inline void superio_exit(int sio_cip)
4777 + {
4778 + outb(0xaa, sio_cip);
4779 ++ release_region(sio_cip, 2);
4780 + }
4781 +
4782 + /* ---------------------------------------------------------------------
4783 +@@ -1282,11 +1288,14 @@ EXIT:
4784 +
4785 + static int __init vt1211_find(int sio_cip, unsigned short *address)
4786 + {
4787 +- int err = -ENODEV;
4788 ++ int err;
4789 + int devid;
4790 +
4791 +- superio_enter(sio_cip);
4792 ++ err = superio_enter(sio_cip);
4793 ++ if (err)
4794 ++ return err;
4795 +
4796 ++ err = -ENODEV;
4797 + devid = force_id ? force_id : superio_inb(sio_cip, SIO_VT1211_DEVID);
4798 + if (devid != SIO_VT1211_ID)
4799 + goto EXIT;
4800 +diff --git a/drivers/iio/adc/Kconfig b/drivers/iio/adc/Kconfig
4801 +index 76db6e5cc296..9ca21a8dfcd7 100644
4802 +--- a/drivers/iio/adc/Kconfig
4803 ++++ b/drivers/iio/adc/Kconfig
4804 +@@ -809,6 +809,7 @@ config STM32_DFSDM_ADC
4805 + depends on (ARCH_STM32 && OF) || COMPILE_TEST
4806 + select STM32_DFSDM_CORE
4807 + select REGMAP_MMIO
4808 ++ select IIO_BUFFER
4809 + select IIO_BUFFER_HW_CONSUMER
4810 + help
4811 + Select this option to support ADCSigma delta modulator for
4812 +diff --git a/drivers/iio/adc/ad_sigma_delta.c b/drivers/iio/adc/ad_sigma_delta.c
4813 +index 54d9978b2740..a4310600a853 100644
4814 +--- a/drivers/iio/adc/ad_sigma_delta.c
4815 ++++ b/drivers/iio/adc/ad_sigma_delta.c
4816 +@@ -62,7 +62,7 @@ int ad_sd_write_reg(struct ad_sigma_delta *sigma_delta, unsigned int reg,
4817 + struct spi_transfer t = {
4818 + .tx_buf = data,
4819 + .len = size + 1,
4820 +- .cs_change = sigma_delta->bus_locked,
4821 ++ .cs_change = sigma_delta->keep_cs_asserted,
4822 + };
4823 + struct spi_message m;
4824 + int ret;
4825 +@@ -218,6 +218,7 @@ static int ad_sd_calibrate(struct ad_sigma_delta *sigma_delta,
4826 +
4827 + spi_bus_lock(sigma_delta->spi->master);
4828 + sigma_delta->bus_locked = true;
4829 ++ sigma_delta->keep_cs_asserted = true;
4830 + reinit_completion(&sigma_delta->completion);
4831 +
4832 + ret = ad_sigma_delta_set_mode(sigma_delta, mode);
4833 +@@ -235,9 +236,10 @@ static int ad_sd_calibrate(struct ad_sigma_delta *sigma_delta,
4834 + ret = 0;
4835 + }
4836 + out:
4837 ++ sigma_delta->keep_cs_asserted = false;
4838 ++ ad_sigma_delta_set_mode(sigma_delta, AD_SD_MODE_IDLE);
4839 + sigma_delta->bus_locked = false;
4840 + spi_bus_unlock(sigma_delta->spi->master);
4841 +- ad_sigma_delta_set_mode(sigma_delta, AD_SD_MODE_IDLE);
4842 +
4843 + return ret;
4844 + }
4845 +@@ -290,6 +292,7 @@ int ad_sigma_delta_single_conversion(struct iio_dev *indio_dev,
4846 +
4847 + spi_bus_lock(sigma_delta->spi->master);
4848 + sigma_delta->bus_locked = true;
4849 ++ sigma_delta->keep_cs_asserted = true;
4850 + reinit_completion(&sigma_delta->completion);
4851 +
4852 + ad_sigma_delta_set_mode(sigma_delta, AD_SD_MODE_SINGLE);
4853 +@@ -299,9 +302,6 @@ int ad_sigma_delta_single_conversion(struct iio_dev *indio_dev,
4854 + ret = wait_for_completion_interruptible_timeout(
4855 + &sigma_delta->completion, HZ);
4856 +
4857 +- sigma_delta->bus_locked = false;
4858 +- spi_bus_unlock(sigma_delta->spi->master);
4859 +-
4860 + if (ret == 0)
4861 + ret = -EIO;
4862 + if (ret < 0)
4863 +@@ -322,7 +322,10 @@ out:
4864 + sigma_delta->irq_dis = true;
4865 + }
4866 +
4867 ++ sigma_delta->keep_cs_asserted = false;
4868 + ad_sigma_delta_set_mode(sigma_delta, AD_SD_MODE_IDLE);
4869 ++ sigma_delta->bus_locked = false;
4870 ++ spi_bus_unlock(sigma_delta->spi->master);
4871 + mutex_unlock(&indio_dev->mlock);
4872 +
4873 + if (ret)
4874 +@@ -359,6 +362,8 @@ static int ad_sd_buffer_postenable(struct iio_dev *indio_dev)
4875 +
4876 + spi_bus_lock(sigma_delta->spi->master);
4877 + sigma_delta->bus_locked = true;
4878 ++ sigma_delta->keep_cs_asserted = true;
4879 ++
4880 + ret = ad_sigma_delta_set_mode(sigma_delta, AD_SD_MODE_CONTINUOUS);
4881 + if (ret)
4882 + goto err_unlock;
4883 +@@ -387,6 +392,7 @@ static int ad_sd_buffer_postdisable(struct iio_dev *indio_dev)
4884 + sigma_delta->irq_dis = true;
4885 + }
4886 +
4887 ++ sigma_delta->keep_cs_asserted = false;
4888 + ad_sigma_delta_set_mode(sigma_delta, AD_SD_MODE_IDLE);
4889 +
4890 + sigma_delta->bus_locked = false;
4891 +diff --git a/drivers/iio/adc/ti-ads7950.c b/drivers/iio/adc/ti-ads7950.c
4892 +index 0ad63592cc3c..1e47bef72bb7 100644
4893 +--- a/drivers/iio/adc/ti-ads7950.c
4894 ++++ b/drivers/iio/adc/ti-ads7950.c
4895 +@@ -56,6 +56,9 @@ struct ti_ads7950_state {
4896 + struct spi_message ring_msg;
4897 + struct spi_message scan_single_msg;
4898 +
4899 ++ /* Lock to protect the spi xfer buffers */
4900 ++ struct mutex slock;
4901 ++
4902 + struct regulator *reg;
4903 + unsigned int vref_mv;
4904 +
4905 +@@ -268,6 +271,7 @@ static irqreturn_t ti_ads7950_trigger_handler(int irq, void *p)
4906 + struct ti_ads7950_state *st = iio_priv(indio_dev);
4907 + int ret;
4908 +
4909 ++ mutex_lock(&st->slock);
4910 + ret = spi_sync(st->spi, &st->ring_msg);
4911 + if (ret < 0)
4912 + goto out;
4913 +@@ -276,6 +280,7 @@ static irqreturn_t ti_ads7950_trigger_handler(int irq, void *p)
4914 + iio_get_time_ns(indio_dev));
4915 +
4916 + out:
4917 ++ mutex_unlock(&st->slock);
4918 + iio_trigger_notify_done(indio_dev->trig);
4919 +
4920 + return IRQ_HANDLED;
4921 +@@ -286,7 +291,7 @@ static int ti_ads7950_scan_direct(struct iio_dev *indio_dev, unsigned int ch)
4922 + struct ti_ads7950_state *st = iio_priv(indio_dev);
4923 + int ret, cmd;
4924 +
4925 +- mutex_lock(&indio_dev->mlock);
4926 ++ mutex_lock(&st->slock);
4927 +
4928 + cmd = TI_ADS7950_CR_WRITE | TI_ADS7950_CR_CHAN(ch) | st->settings;
4929 + st->single_tx = cmd;
4930 +@@ -298,7 +303,7 @@ static int ti_ads7950_scan_direct(struct iio_dev *indio_dev, unsigned int ch)
4931 + ret = st->single_rx;
4932 +
4933 + out:
4934 +- mutex_unlock(&indio_dev->mlock);
4935 ++ mutex_unlock(&st->slock);
4936 +
4937 + return ret;
4938 + }
4939 +@@ -432,16 +437,19 @@ static int ti_ads7950_probe(struct spi_device *spi)
4940 + if (ACPI_COMPANION(&spi->dev))
4941 + st->vref_mv = TI_ADS7950_VA_MV_ACPI_DEFAULT;
4942 +
4943 ++ mutex_init(&st->slock);
4944 ++
4945 + st->reg = devm_regulator_get(&spi->dev, "vref");
4946 + if (IS_ERR(st->reg)) {
4947 + dev_err(&spi->dev, "Failed get get regulator \"vref\"\n");
4948 +- return PTR_ERR(st->reg);
4949 ++ ret = PTR_ERR(st->reg);
4950 ++ goto error_destroy_mutex;
4951 + }
4952 +
4953 + ret = regulator_enable(st->reg);
4954 + if (ret) {
4955 + dev_err(&spi->dev, "Failed to enable regulator \"vref\"\n");
4956 +- return ret;
4957 ++ goto error_destroy_mutex;
4958 + }
4959 +
4960 + ret = iio_triggered_buffer_setup(indio_dev, NULL,
4961 +@@ -463,6 +471,8 @@ error_cleanup_ring:
4962 + iio_triggered_buffer_cleanup(indio_dev);
4963 + error_disable_reg:
4964 + regulator_disable(st->reg);
4965 ++error_destroy_mutex:
4966 ++ mutex_destroy(&st->slock);
4967 +
4968 + return ret;
4969 + }
4970 +@@ -475,6 +485,7 @@ static int ti_ads7950_remove(struct spi_device *spi)
4971 + iio_device_unregister(indio_dev);
4972 + iio_triggered_buffer_cleanup(indio_dev);
4973 + regulator_disable(st->reg);
4974 ++ mutex_destroy(&st->slock);
4975 +
4976 + return 0;
4977 + }
4978 +diff --git a/drivers/iio/common/ssp_sensors/ssp_iio.c b/drivers/iio/common/ssp_sensors/ssp_iio.c
4979 +index 645f2e3975db..e38f704d88b7 100644
4980 +--- a/drivers/iio/common/ssp_sensors/ssp_iio.c
4981 ++++ b/drivers/iio/common/ssp_sensors/ssp_iio.c
4982 +@@ -81,7 +81,7 @@ int ssp_common_process_data(struct iio_dev *indio_dev, void *buf,
4983 + unsigned int len, int64_t timestamp)
4984 + {
4985 + __le32 time;
4986 +- int64_t calculated_time;
4987 ++ int64_t calculated_time = 0;
4988 + struct ssp_sensor_data *spd = iio_priv(indio_dev);
4989 +
4990 + if (indio_dev->scan_bytes == 0)
4991 +diff --git a/drivers/iio/magnetometer/hmc5843_i2c.c b/drivers/iio/magnetometer/hmc5843_i2c.c
4992 +index 3de7f4426ac4..86abba5827a2 100644
4993 +--- a/drivers/iio/magnetometer/hmc5843_i2c.c
4994 ++++ b/drivers/iio/magnetometer/hmc5843_i2c.c
4995 +@@ -58,8 +58,13 @@ static const struct regmap_config hmc5843_i2c_regmap_config = {
4996 + static int hmc5843_i2c_probe(struct i2c_client *cli,
4997 + const struct i2c_device_id *id)
4998 + {
4999 ++ struct regmap *regmap = devm_regmap_init_i2c(cli,
5000 ++ &hmc5843_i2c_regmap_config);
5001 ++ if (IS_ERR(regmap))
5002 ++ return PTR_ERR(regmap);
5003 ++
5004 + return hmc5843_common_probe(&cli->dev,
5005 +- devm_regmap_init_i2c(cli, &hmc5843_i2c_regmap_config),
5006 ++ regmap,
5007 + id->driver_data, id->name);
5008 + }
5009 +
5010 +diff --git a/drivers/iio/magnetometer/hmc5843_spi.c b/drivers/iio/magnetometer/hmc5843_spi.c
5011 +index 535f03a70d63..79b2b707f90e 100644
5012 +--- a/drivers/iio/magnetometer/hmc5843_spi.c
5013 ++++ b/drivers/iio/magnetometer/hmc5843_spi.c
5014 +@@ -58,6 +58,7 @@ static const struct regmap_config hmc5843_spi_regmap_config = {
5015 + static int hmc5843_spi_probe(struct spi_device *spi)
5016 + {
5017 + int ret;
5018 ++ struct regmap *regmap;
5019 + const struct spi_device_id *id = spi_get_device_id(spi);
5020 +
5021 + spi->mode = SPI_MODE_3;
5022 +@@ -67,8 +68,12 @@ static int hmc5843_spi_probe(struct spi_device *spi)
5023 + if (ret)
5024 + return ret;
5025 +
5026 ++ regmap = devm_regmap_init_spi(spi, &hmc5843_spi_regmap_config);
5027 ++ if (IS_ERR(regmap))
5028 ++ return PTR_ERR(regmap);
5029 ++
5030 + return hmc5843_common_probe(&spi->dev,
5031 +- devm_regmap_init_spi(spi, &hmc5843_spi_regmap_config),
5032 ++ regmap,
5033 + id->driver_data, id->name);
5034 + }
5035 +
5036 +diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
5037 +index 68c997be2429..c54da16df0be 100644
5038 +--- a/drivers/infiniband/core/cma.c
5039 ++++ b/drivers/infiniband/core/cma.c
5040 +@@ -1173,18 +1173,31 @@ static inline bool cma_any_addr(const struct sockaddr *addr)
5041 + return cma_zero_addr(addr) || cma_loopback_addr(addr);
5042 + }
5043 +
5044 +-static int cma_addr_cmp(struct sockaddr *src, struct sockaddr *dst)
5045 ++static int cma_addr_cmp(const struct sockaddr *src, const struct sockaddr *dst)
5046 + {
5047 + if (src->sa_family != dst->sa_family)
5048 + return -1;
5049 +
5050 + switch (src->sa_family) {
5051 + case AF_INET:
5052 +- return ((struct sockaddr_in *) src)->sin_addr.s_addr !=
5053 +- ((struct sockaddr_in *) dst)->sin_addr.s_addr;
5054 +- case AF_INET6:
5055 +- return ipv6_addr_cmp(&((struct sockaddr_in6 *) src)->sin6_addr,
5056 +- &((struct sockaddr_in6 *) dst)->sin6_addr);
5057 ++ return ((struct sockaddr_in *)src)->sin_addr.s_addr !=
5058 ++ ((struct sockaddr_in *)dst)->sin_addr.s_addr;
5059 ++ case AF_INET6: {
5060 ++ struct sockaddr_in6 *src_addr6 = (struct sockaddr_in6 *)src;
5061 ++ struct sockaddr_in6 *dst_addr6 = (struct sockaddr_in6 *)dst;
5062 ++ bool link_local;
5063 ++
5064 ++ if (ipv6_addr_cmp(&src_addr6->sin6_addr,
5065 ++ &dst_addr6->sin6_addr))
5066 ++ return 1;
5067 ++ link_local = ipv6_addr_type(&dst_addr6->sin6_addr) &
5068 ++ IPV6_ADDR_LINKLOCAL;
5069 ++ /* Link local must match their scope_ids */
5070 ++ return link_local ? (src_addr6->sin6_scope_id !=
5071 ++ dst_addr6->sin6_scope_id) :
5072 ++ 0;
5073 ++ }
5074 ++
5075 + default:
5076 + return ib_addr_cmp(&((struct sockaddr_ib *) src)->sib_addr,
5077 + &((struct sockaddr_ib *) dst)->sib_addr);
5078 +diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
5079 +index 4d232bdf9e97..689ba6bc2ca9 100644
5080 +--- a/drivers/infiniband/hw/cxgb4/cm.c
5081 ++++ b/drivers/infiniband/hw/cxgb4/cm.c
5082 +@@ -457,6 +457,8 @@ static struct sk_buff *get_skb(struct sk_buff *skb, int len, gfp_t gfp)
5083 + skb_reset_transport_header(skb);
5084 + } else {
5085 + skb = alloc_skb(len, gfp);
5086 ++ if (!skb)
5087 ++ return NULL;
5088 + }
5089 + t4_set_arp_err_handler(skb, NULL, NULL);
5090 + return skb;
5091 +diff --git a/drivers/infiniband/hw/hfi1/init.c b/drivers/infiniband/hw/hfi1/init.c
5092 +index faaaac8fbc55..3af5eb10a5ff 100644
5093 +--- a/drivers/infiniband/hw/hfi1/init.c
5094 ++++ b/drivers/infiniband/hw/hfi1/init.c
5095 +@@ -805,7 +805,8 @@ static int create_workqueues(struct hfi1_devdata *dd)
5096 + ppd->hfi1_wq =
5097 + alloc_workqueue(
5098 + "hfi%d_%d",
5099 +- WQ_SYSFS | WQ_HIGHPRI | WQ_CPU_INTENSIVE,
5100 ++ WQ_SYSFS | WQ_HIGHPRI | WQ_CPU_INTENSIVE |
5101 ++ WQ_MEM_RECLAIM,
5102 + HFI1_MAX_ACTIVE_WORKQUEUE_ENTRIES,
5103 + dd->unit, pidx);
5104 + if (!ppd->hfi1_wq)
5105 +diff --git a/drivers/infiniband/hw/hns/hns_roce_ah.c b/drivers/infiniband/hw/hns/hns_roce_ah.c
5106 +index b3c8c45ec1e3..64e0c69b69c5 100644
5107 +--- a/drivers/infiniband/hw/hns/hns_roce_ah.c
5108 ++++ b/drivers/infiniband/hw/hns/hns_roce_ah.c
5109 +@@ -70,7 +70,7 @@ struct ib_ah *hns_roce_create_ah(struct ib_pd *ibpd,
5110 + HNS_ROCE_VLAN_SL_BIT_MASK) <<
5111 + HNS_ROCE_VLAN_SL_SHIFT;
5112 +
5113 +- ah->av.port_pd = cpu_to_be32(to_hr_pd(ibpd)->pdn |
5114 ++ ah->av.port_pd = cpu_to_le32(to_hr_pd(ibpd)->pdn |
5115 + (rdma_ah_get_port_num(ah_attr) <<
5116 + HNS_ROCE_PORT_NUM_SHIFT));
5117 + ah->av.gid_index = grh->sgid_index;
5118 +diff --git a/drivers/infiniband/hw/mlx5/odp.c b/drivers/infiniband/hw/mlx5/odp.c
5119 +index 0aa10ebda5d9..91669e35c6ca 100644
5120 +--- a/drivers/infiniband/hw/mlx5/odp.c
5121 ++++ b/drivers/infiniband/hw/mlx5/odp.c
5122 +@@ -711,6 +711,15 @@ struct pf_frame {
5123 + int depth;
5124 + };
5125 +
5126 ++static bool mkey_is_eq(struct mlx5_core_mkey *mmkey, u32 key)
5127 ++{
5128 ++ if (!mmkey)
5129 ++ return false;
5130 ++ if (mmkey->type == MLX5_MKEY_MW)
5131 ++ return mlx5_base_mkey(mmkey->key) == mlx5_base_mkey(key);
5132 ++ return mmkey->key == key;
5133 ++}
5134 ++
5135 + static int get_indirect_num_descs(struct mlx5_core_mkey *mmkey)
5136 + {
5137 + struct mlx5_ib_mw *mw;
5138 +@@ -760,7 +769,7 @@ static int pagefault_single_data_segment(struct mlx5_ib_dev *dev,
5139 +
5140 + next_mr:
5141 + mmkey = __mlx5_mr_lookup(dev->mdev, mlx5_base_mkey(key));
5142 +- if (!mmkey || mmkey->key != key) {
5143 ++ if (!mkey_is_eq(mmkey, key)) {
5144 + mlx5_ib_dbg(dev, "failed to find mkey %x\n", key);
5145 + ret = -EFAULT;
5146 + goto srcu_unlock;
5147 +diff --git a/drivers/infiniband/sw/rxe/rxe_mr.c b/drivers/infiniband/sw/rxe/rxe_mr.c
5148 +index 42f0f25e396c..ec89fbd06c53 100644
5149 +--- a/drivers/infiniband/sw/rxe/rxe_mr.c
5150 ++++ b/drivers/infiniband/sw/rxe/rxe_mr.c
5151 +@@ -199,6 +199,12 @@ int rxe_mem_init_user(struct rxe_pd *pd, u64 start,
5152 + buf = map[0]->buf;
5153 +
5154 + for_each_sg_page(umem->sg_head.sgl, &sg_iter, umem->nmap, 0) {
5155 ++ if (num_buf >= RXE_BUF_PER_MAP) {
5156 ++ map++;
5157 ++ buf = map[0]->buf;
5158 ++ num_buf = 0;
5159 ++ }
5160 ++
5161 + vaddr = page_address(sg_page_iter_page(&sg_iter));
5162 + if (!vaddr) {
5163 + pr_warn("null vaddr\n");
5164 +@@ -211,11 +217,6 @@ int rxe_mem_init_user(struct rxe_pd *pd, u64 start,
5165 + num_buf++;
5166 + buf++;
5167 +
5168 +- if (num_buf >= RXE_BUF_PER_MAP) {
5169 +- map++;
5170 +- buf = map[0]->buf;
5171 +- num_buf = 0;
5172 +- }
5173 + }
5174 + }
5175 +
5176 +diff --git a/drivers/md/bcache/alloc.c b/drivers/md/bcache/alloc.c
5177 +index 5002838ea476..f8986effcb50 100644
5178 +--- a/drivers/md/bcache/alloc.c
5179 ++++ b/drivers/md/bcache/alloc.c
5180 +@@ -327,10 +327,11 @@ static int bch_allocator_thread(void *arg)
5181 + * possibly issue discards to them, then we add the bucket to
5182 + * the free list:
5183 + */
5184 +- while (!fifo_empty(&ca->free_inc)) {
5185 ++ while (1) {
5186 + long bucket;
5187 +
5188 +- fifo_pop(&ca->free_inc, bucket);
5189 ++ if (!fifo_pop(&ca->free_inc, bucket))
5190 ++ break;
5191 +
5192 + if (ca->discard) {
5193 + mutex_unlock(&ca->set->bucket_lock);
5194 +diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c
5195 +index d3725c17ce3a..6c94fa007796 100644
5196 +--- a/drivers/md/bcache/journal.c
5197 ++++ b/drivers/md/bcache/journal.c
5198 +@@ -317,6 +317,18 @@ void bch_journal_mark(struct cache_set *c, struct list_head *list)
5199 + }
5200 + }
5201 +
5202 ++bool is_discard_enabled(struct cache_set *s)
5203 ++{
5204 ++ struct cache *ca;
5205 ++ unsigned int i;
5206 ++
5207 ++ for_each_cache(ca, s, i)
5208 ++ if (ca->discard)
5209 ++ return true;
5210 ++
5211 ++ return false;
5212 ++}
5213 ++
5214 + int bch_journal_replay(struct cache_set *s, struct list_head *list)
5215 + {
5216 + int ret = 0, keys = 0, entries = 0;
5217 +@@ -330,9 +342,17 @@ int bch_journal_replay(struct cache_set *s, struct list_head *list)
5218 + list_for_each_entry(i, list, list) {
5219 + BUG_ON(i->pin && atomic_read(i->pin) != 1);
5220 +
5221 +- cache_set_err_on(n != i->j.seq, s,
5222 +-"bcache: journal entries %llu-%llu missing! (replaying %llu-%llu)",
5223 +- n, i->j.seq - 1, start, end);
5224 ++ if (n != i->j.seq) {
5225 ++ if (n == start && is_discard_enabled(s))
5226 ++ pr_info("bcache: journal entries %llu-%llu may be discarded! (replaying %llu-%llu)",
5227 ++ n, i->j.seq - 1, start, end);
5228 ++ else {
5229 ++ pr_err("bcache: journal entries %llu-%llu missing! (replaying %llu-%llu)",
5230 ++ n, i->j.seq - 1, start, end);
5231 ++ ret = -EIO;
5232 ++ goto err;
5233 ++ }
5234 ++ }
5235 +
5236 + for (k = i->j.start;
5237 + k < bset_bkey_last(&i->j);
5238 +diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
5239 +index 171d5e0f698b..e489d2459569 100644
5240 +--- a/drivers/md/bcache/super.c
5241 ++++ b/drivers/md/bcache/super.c
5242 +@@ -1775,13 +1775,15 @@ err:
5243 + return NULL;
5244 + }
5245 +
5246 +-static void run_cache_set(struct cache_set *c)
5247 ++static int run_cache_set(struct cache_set *c)
5248 + {
5249 + const char *err = "cannot allocate memory";
5250 + struct cached_dev *dc, *t;
5251 + struct cache *ca;
5252 + struct closure cl;
5253 + unsigned int i;
5254 ++ LIST_HEAD(journal);
5255 ++ struct journal_replay *l;
5256 +
5257 + closure_init_stack(&cl);
5258 +
5259 +@@ -1869,7 +1871,9 @@ static void run_cache_set(struct cache_set *c)
5260 + if (j->version < BCACHE_JSET_VERSION_UUID)
5261 + __uuid_write(c);
5262 +
5263 +- bch_journal_replay(c, &journal);
5264 ++ err = "bcache: replay journal failed";
5265 ++ if (bch_journal_replay(c, &journal))
5266 ++ goto err;
5267 + } else {
5268 + pr_notice("invalidating existing data");
5269 +
5270 +@@ -1937,11 +1941,19 @@ static void run_cache_set(struct cache_set *c)
5271 + flash_devs_run(c);
5272 +
5273 + set_bit(CACHE_SET_RUNNING, &c->flags);
5274 +- return;
5275 ++ return 0;
5276 + err:
5277 ++ while (!list_empty(&journal)) {
5278 ++ l = list_first_entry(&journal, struct journal_replay, list);
5279 ++ list_del(&l->list);
5280 ++ kfree(l);
5281 ++ }
5282 ++
5283 + closure_sync(&cl);
5284 + /* XXX: test this, it's broken */
5285 + bch_cache_set_error(c, "%s", err);
5286 ++
5287 ++ return -EIO;
5288 + }
5289 +
5290 + static bool can_attach_cache(struct cache *ca, struct cache_set *c)
5291 +@@ -2005,8 +2017,11 @@ found:
5292 + ca->set->cache[ca->sb.nr_this_dev] = ca;
5293 + c->cache_by_alloc[c->caches_loaded++] = ca;
5294 +
5295 +- if (c->caches_loaded == c->sb.nr_in_set)
5296 +- run_cache_set(c);
5297 ++ if (c->caches_loaded == c->sb.nr_in_set) {
5298 ++ err = "failed to run cache set";
5299 ++ if (run_cache_set(c) < 0)
5300 ++ goto err;
5301 ++ }
5302 +
5303 + return NULL;
5304 + err:
5305 +diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
5306 +index cde3b49b2a91..350cf0451456 100644
5307 +--- a/drivers/md/dm-table.c
5308 ++++ b/drivers/md/dm-table.c
5309 +@@ -880,13 +880,17 @@ void dm_table_set_type(struct dm_table *t, enum dm_queue_mode type)
5310 + }
5311 + EXPORT_SYMBOL_GPL(dm_table_set_type);
5312 +
5313 ++/* validate the dax capability of the target device span */
5314 + static int device_supports_dax(struct dm_target *ti, struct dm_dev *dev,
5315 +- sector_t start, sector_t len, void *data)
5316 ++ sector_t start, sector_t len, void *data)
5317 + {
5318 +- return bdev_dax_supported(dev->bdev, PAGE_SIZE);
5319 ++ int blocksize = *(int *) data;
5320 ++
5321 ++ return generic_fsdax_supported(dev->dax_dev, dev->bdev, blocksize,
5322 ++ start, len);
5323 + }
5324 +
5325 +-static bool dm_table_supports_dax(struct dm_table *t)
5326 ++bool dm_table_supports_dax(struct dm_table *t, int blocksize)
5327 + {
5328 + struct dm_target *ti;
5329 + unsigned i;
5330 +@@ -899,7 +903,8 @@ static bool dm_table_supports_dax(struct dm_table *t)
5331 + return false;
5332 +
5333 + if (!ti->type->iterate_devices ||
5334 +- !ti->type->iterate_devices(ti, device_supports_dax, NULL))
5335 ++ !ti->type->iterate_devices(ti, device_supports_dax,
5336 ++ &blocksize))
5337 + return false;
5338 + }
5339 +
5340 +@@ -979,7 +984,7 @@ static int dm_table_determine_type(struct dm_table *t)
5341 + verify_bio_based:
5342 + /* We must use this table as bio-based */
5343 + t->type = DM_TYPE_BIO_BASED;
5344 +- if (dm_table_supports_dax(t) ||
5345 ++ if (dm_table_supports_dax(t, PAGE_SIZE) ||
5346 + (list_empty(devices) && live_md_type == DM_TYPE_DAX_BIO_BASED)) {
5347 + t->type = DM_TYPE_DAX_BIO_BASED;
5348 + } else {
5349 +@@ -1905,7 +1910,7 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
5350 + }
5351 + blk_queue_write_cache(q, wc, fua);
5352 +
5353 +- if (dm_table_supports_dax(t))
5354 ++ if (dm_table_supports_dax(t, PAGE_SIZE))
5355 + blk_queue_flag_set(QUEUE_FLAG_DAX, q);
5356 + else
5357 + blk_queue_flag_clear(QUEUE_FLAG_DAX, q);
5358 +diff --git a/drivers/md/dm.c b/drivers/md/dm.c
5359 +index 08e7d412af95..1cacf02633ec 100644
5360 +--- a/drivers/md/dm.c
5361 ++++ b/drivers/md/dm.c
5362 +@@ -1105,6 +1105,25 @@ static long dm_dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff,
5363 + return ret;
5364 + }
5365 +
5366 ++static bool dm_dax_supported(struct dax_device *dax_dev, struct block_device *bdev,
5367 ++ int blocksize, sector_t start, sector_t len)
5368 ++{
5369 ++ struct mapped_device *md = dax_get_private(dax_dev);
5370 ++ struct dm_table *map;
5371 ++ int srcu_idx;
5372 ++ bool ret;
5373 ++
5374 ++ map = dm_get_live_table(md, &srcu_idx);
5375 ++ if (!map)
5376 ++ return false;
5377 ++
5378 ++ ret = dm_table_supports_dax(map, blocksize);
5379 ++
5380 ++ dm_put_live_table(md, srcu_idx);
5381 ++
5382 ++ return ret;
5383 ++}
5384 ++
5385 + static size_t dm_dax_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff,
5386 + void *addr, size_t bytes, struct iov_iter *i)
5387 + {
5388 +@@ -3194,6 +3213,7 @@ static const struct block_device_operations dm_blk_dops = {
5389 +
5390 + static const struct dax_operations dm_dax_ops = {
5391 + .direct_access = dm_dax_direct_access,
5392 ++ .dax_supported = dm_dax_supported,
5393 + .copy_from_iter = dm_dax_copy_from_iter,
5394 + .copy_to_iter = dm_dax_copy_to_iter,
5395 + };
5396 +diff --git a/drivers/md/dm.h b/drivers/md/dm.h
5397 +index 2d539b82ec08..17e3db54404c 100644
5398 +--- a/drivers/md/dm.h
5399 ++++ b/drivers/md/dm.h
5400 +@@ -72,6 +72,7 @@ bool dm_table_bio_based(struct dm_table *t);
5401 + bool dm_table_request_based(struct dm_table *t);
5402 + void dm_table_free_md_mempools(struct dm_table *t);
5403 + struct dm_md_mempools *dm_table_get_md_mempools(struct dm_table *t);
5404 ++bool dm_table_supports_dax(struct dm_table *t, int blocksize);
5405 +
5406 + void dm_lock_md_type(struct mapped_device *md);
5407 + void dm_unlock_md_type(struct mapped_device *md);
5408 +diff --git a/drivers/media/common/videobuf2/videobuf2-core.c b/drivers/media/common/videobuf2/videobuf2-core.c
5409 +index 15b6b9c0a2e4..9c163f658aaf 100644
5410 +--- a/drivers/media/common/videobuf2/videobuf2-core.c
5411 ++++ b/drivers/media/common/videobuf2/videobuf2-core.c
5412 +@@ -672,6 +672,11 @@ int vb2_core_reqbufs(struct vb2_queue *q, enum vb2_memory memory,
5413 + return -EBUSY;
5414 + }
5415 +
5416 ++ if (q->waiting_in_dqbuf && *count) {
5417 ++ dprintk(1, "another dup()ped fd is waiting for a buffer\n");
5418 ++ return -EBUSY;
5419 ++ }
5420 ++
5421 + if (*count == 0 || q->num_buffers != 0 ||
5422 + (q->memory != VB2_MEMORY_UNKNOWN && q->memory != memory)) {
5423 + /*
5424 +@@ -807,6 +812,10 @@ int vb2_core_create_bufs(struct vb2_queue *q, enum vb2_memory memory,
5425 + }
5426 +
5427 + if (!q->num_buffers) {
5428 ++ if (q->waiting_in_dqbuf && *count) {
5429 ++ dprintk(1, "another dup()ped fd is waiting for a buffer\n");
5430 ++ return -EBUSY;
5431 ++ }
5432 + memset(q->alloc_devs, 0, sizeof(q->alloc_devs));
5433 + q->memory = memory;
5434 + q->waiting_for_buffers = !q->is_output;
5435 +@@ -1659,6 +1668,11 @@ static int __vb2_wait_for_done_vb(struct vb2_queue *q, int nonblocking)
5436 + for (;;) {
5437 + int ret;
5438 +
5439 ++ if (q->waiting_in_dqbuf) {
5440 ++ dprintk(1, "another dup()ped fd is waiting for a buffer\n");
5441 ++ return -EBUSY;
5442 ++ }
5443 ++
5444 + if (!q->streaming) {
5445 + dprintk(1, "streaming off, will not wait for buffers\n");
5446 + return -EINVAL;
5447 +@@ -1686,6 +1700,7 @@ static int __vb2_wait_for_done_vb(struct vb2_queue *q, int nonblocking)
5448 + return -EAGAIN;
5449 + }
5450 +
5451 ++ q->waiting_in_dqbuf = 1;
5452 + /*
5453 + * We are streaming and blocking, wait for another buffer to
5454 + * become ready or for streamoff. Driver's lock is released to
5455 +@@ -1706,6 +1721,7 @@ static int __vb2_wait_for_done_vb(struct vb2_queue *q, int nonblocking)
5456 + * the locks or return an error if one occurred.
5457 + */
5458 + call_void_qop(q, wait_finish, q);
5459 ++ q->waiting_in_dqbuf = 0;
5460 + if (ret) {
5461 + dprintk(1, "sleep was interrupted\n");
5462 + return ret;
5463 +@@ -2585,6 +2601,12 @@ static size_t __vb2_perform_fileio(struct vb2_queue *q, char __user *data, size_
5464 + if (!data)
5465 + return -EINVAL;
5466 +
5467 ++ if (q->waiting_in_dqbuf) {
5468 ++ dprintk(3, "another dup()ped fd is %s\n",
5469 ++ read ? "reading" : "writing");
5470 ++ return -EBUSY;
5471 ++ }
5472 ++
5473 + /*
5474 + * Initialize emulator on first call.
5475 + */
5476 +diff --git a/drivers/media/dvb-frontends/m88ds3103.c b/drivers/media/dvb-frontends/m88ds3103.c
5477 +index 123f2a33738b..403f42806455 100644
5478 +--- a/drivers/media/dvb-frontends/m88ds3103.c
5479 ++++ b/drivers/media/dvb-frontends/m88ds3103.c
5480 +@@ -309,6 +309,9 @@ static int m88ds3103_set_frontend(struct dvb_frontend *fe)
5481 + u16 u16tmp;
5482 + u32 tuner_frequency_khz, target_mclk;
5483 + s32 s32tmp;
5484 ++ static const struct reg_sequence reset_buf[] = {
5485 ++ {0x07, 0x80}, {0x07, 0x00}
5486 ++ };
5487 +
5488 + dev_dbg(&client->dev,
5489 + "delivery_system=%d modulation=%d frequency=%u symbol_rate=%d inversion=%d pilot=%d rolloff=%d\n",
5490 +@@ -321,11 +324,7 @@ static int m88ds3103_set_frontend(struct dvb_frontend *fe)
5491 + }
5492 +
5493 + /* reset */
5494 +- ret = regmap_write(dev->regmap, 0x07, 0x80);
5495 +- if (ret)
5496 +- goto err;
5497 +-
5498 +- ret = regmap_write(dev->regmap, 0x07, 0x00);
5499 ++ ret = regmap_multi_reg_write(dev->regmap, reset_buf, 2);
5500 + if (ret)
5501 + goto err;
5502 +
5503 +diff --git a/drivers/media/dvb-frontends/si2165.c b/drivers/media/dvb-frontends/si2165.c
5504 +index feacd8da421d..d55d8f169dca 100644
5505 +--- a/drivers/media/dvb-frontends/si2165.c
5506 ++++ b/drivers/media/dvb-frontends/si2165.c
5507 +@@ -275,18 +275,20 @@ static u32 si2165_get_fe_clk(struct si2165_state *state)
5508 +
5509 + static int si2165_wait_init_done(struct si2165_state *state)
5510 + {
5511 +- int ret = -EINVAL;
5512 ++ int ret;
5513 + u8 val = 0;
5514 + int i;
5515 +
5516 + for (i = 0; i < 3; ++i) {
5517 +- si2165_readreg8(state, REG_INIT_DONE, &val);
5518 ++ ret = si2165_readreg8(state, REG_INIT_DONE, &val);
5519 ++ if (ret < 0)
5520 ++ return ret;
5521 + if (val == 0x01)
5522 + return 0;
5523 + usleep_range(1000, 50000);
5524 + }
5525 + dev_err(&state->client->dev, "init_done was not set\n");
5526 +- return ret;
5527 ++ return -EINVAL;
5528 + }
5529 +
5530 + static int si2165_upload_firmware_block(struct si2165_state *state,
5531 +diff --git a/drivers/media/i2c/ov2659.c b/drivers/media/i2c/ov2659.c
5532 +index 799acce803fe..a1e9a980a445 100644
5533 +--- a/drivers/media/i2c/ov2659.c
5534 ++++ b/drivers/media/i2c/ov2659.c
5535 +@@ -1117,8 +1117,10 @@ static int ov2659_set_fmt(struct v4l2_subdev *sd,
5536 + if (ov2659_formats[index].code == mf->code)
5537 + break;
5538 +
5539 +- if (index < 0)
5540 +- return -EINVAL;
5541 ++ if (index < 0) {
5542 ++ index = 0;
5543 ++ mf->code = ov2659_formats[index].code;
5544 ++ }
5545 +
5546 + mf->colorspace = V4L2_COLORSPACE_SRGB;
5547 + mf->field = V4L2_FIELD_NONE;
5548 +diff --git a/drivers/media/i2c/ov6650.c b/drivers/media/i2c/ov6650.c
5549 +index f9359b11fa5c..de7d9790f054 100644
5550 +--- a/drivers/media/i2c/ov6650.c
5551 ++++ b/drivers/media/i2c/ov6650.c
5552 +@@ -810,9 +810,16 @@ static int ov6650_video_probe(struct i2c_client *client)
5553 + u8 pidh, pidl, midh, midl;
5554 + int ret;
5555 +
5556 ++ priv->clk = v4l2_clk_get(&client->dev, NULL);
5557 ++ if (IS_ERR(priv->clk)) {
5558 ++ ret = PTR_ERR(priv->clk);
5559 ++ dev_err(&client->dev, "v4l2_clk request err: %d\n", ret);
5560 ++ return ret;
5561 ++ }
5562 ++
5563 + ret = ov6650_s_power(&priv->subdev, 1);
5564 + if (ret < 0)
5565 +- return ret;
5566 ++ goto eclkput;
5567 +
5568 + msleep(20);
5569 +
5570 +@@ -849,6 +856,11 @@ static int ov6650_video_probe(struct i2c_client *client)
5571 +
5572 + done:
5573 + ov6650_s_power(&priv->subdev, 0);
5574 ++ if (!ret)
5575 ++ return 0;
5576 ++eclkput:
5577 ++ v4l2_clk_put(priv->clk);
5578 ++
5579 + return ret;
5580 + }
5581 +
5582 +@@ -991,18 +1003,9 @@ static int ov6650_probe(struct i2c_client *client,
5583 + priv->code = MEDIA_BUS_FMT_YUYV8_2X8;
5584 + priv->colorspace = V4L2_COLORSPACE_JPEG;
5585 +
5586 +- priv->clk = v4l2_clk_get(&client->dev, NULL);
5587 +- if (IS_ERR(priv->clk)) {
5588 +- ret = PTR_ERR(priv->clk);
5589 +- goto eclkget;
5590 +- }
5591 +-
5592 + ret = ov6650_video_probe(client);
5593 +- if (ret) {
5594 +- v4l2_clk_put(priv->clk);
5595 +-eclkget:
5596 ++ if (ret)
5597 + v4l2_ctrl_handler_free(&priv->hdl);
5598 +- }
5599 +
5600 + return ret;
5601 + }
5602 +diff --git a/drivers/media/i2c/ov7670.c b/drivers/media/i2c/ov7670.c
5603 +index a7d26b294eb5..e65693c2aad5 100644
5604 +--- a/drivers/media/i2c/ov7670.c
5605 ++++ b/drivers/media/i2c/ov7670.c
5606 +@@ -1664,6 +1664,7 @@ static int ov7670_s_power(struct v4l2_subdev *sd, int on)
5607 +
5608 + if (on) {
5609 + ov7670_power_on (sd);
5610 ++ ov7670_init(sd, 0);
5611 + ov7670_apply_fmt(sd);
5612 + ov7675_apply_framerate(sd);
5613 + v4l2_ctrl_handler_setup(&info->hdl);
5614 +diff --git a/drivers/media/pci/saa7146/hexium_gemini.c b/drivers/media/pci/saa7146/hexium_gemini.c
5615 +index 5817d9cde4d0..6d8e4afe9673 100644
5616 +--- a/drivers/media/pci/saa7146/hexium_gemini.c
5617 ++++ b/drivers/media/pci/saa7146/hexium_gemini.c
5618 +@@ -270,9 +270,8 @@ static int hexium_attach(struct saa7146_dev *dev, struct saa7146_pci_extension_d
5619 + /* enable i2c-port pins */
5620 + saa7146_write(dev, MC1, (MASK_08 | MASK_24 | MASK_10 | MASK_26));
5621 +
5622 +- hexium->i2c_adapter = (struct i2c_adapter) {
5623 +- .name = "hexium gemini",
5624 +- };
5625 ++ strscpy(hexium->i2c_adapter.name, "hexium gemini",
5626 ++ sizeof(hexium->i2c_adapter.name));
5627 + saa7146_i2c_adapter_prepare(dev, &hexium->i2c_adapter, SAA7146_I2C_BUS_BIT_RATE_480);
5628 + if (i2c_add_adapter(&hexium->i2c_adapter) < 0) {
5629 + DEB_S("cannot register i2c-device. skipping.\n");
5630 +diff --git a/drivers/media/pci/saa7146/hexium_orion.c b/drivers/media/pci/saa7146/hexium_orion.c
5631 +index 0a05176c18ab..a794f9e5f990 100644
5632 +--- a/drivers/media/pci/saa7146/hexium_orion.c
5633 ++++ b/drivers/media/pci/saa7146/hexium_orion.c
5634 +@@ -231,9 +231,8 @@ static int hexium_probe(struct saa7146_dev *dev)
5635 + saa7146_write(dev, DD1_STREAM_B, 0x00000000);
5636 + saa7146_write(dev, MC2, (MASK_09 | MASK_25 | MASK_10 | MASK_26));
5637 +
5638 +- hexium->i2c_adapter = (struct i2c_adapter) {
5639 +- .name = "hexium orion",
5640 +- };
5641 ++ strscpy(hexium->i2c_adapter.name, "hexium orion",
5642 ++ sizeof(hexium->i2c_adapter.name));
5643 + saa7146_i2c_adapter_prepare(dev, &hexium->i2c_adapter, SAA7146_I2C_BUS_BIT_RATE_480);
5644 + if (i2c_add_adapter(&hexium->i2c_adapter) < 0) {
5645 + DEB_S("cannot register i2c-device. skipping.\n");
5646 +diff --git a/drivers/media/platform/coda/coda-bit.c b/drivers/media/platform/coda/coda-bit.c
5647 +index b4f396c2e72c..eaa86737fa04 100644
5648 +--- a/drivers/media/platform/coda/coda-bit.c
5649 ++++ b/drivers/media/platform/coda/coda-bit.c
5650 +@@ -2010,6 +2010,9 @@ static int coda_prepare_decode(struct coda_ctx *ctx)
5651 + /* Clear decode success flag */
5652 + coda_write(dev, 0, CODA_RET_DEC_PIC_SUCCESS);
5653 +
5654 ++ /* Clear error return value */
5655 ++ coda_write(dev, 0, CODA_RET_DEC_PIC_ERR_MB);
5656 ++
5657 + trace_coda_dec_pic_run(ctx, meta);
5658 +
5659 + coda_command_async(ctx, CODA_COMMAND_PIC_RUN);
5660 +diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec.c b/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec.c
5661 +index d022c65bb34c..e20b340855e7 100644
5662 +--- a/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec.c
5663 ++++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec.c
5664 +@@ -388,7 +388,7 @@ static void mtk_vdec_worker(struct work_struct *work)
5665 + }
5666 + buf.va = vb2_plane_vaddr(&src_buf->vb2_buf, 0);
5667 + buf.dma_addr = vb2_dma_contig_plane_dma_addr(&src_buf->vb2_buf, 0);
5668 +- buf.size = (size_t)src_buf->planes[0].bytesused;
5669 ++ buf.size = (size_t)src_buf->vb2_buf.planes[0].bytesused;
5670 + if (!buf.va) {
5671 + v4l2_m2m_job_finish(dev->m2m_dev_dec, ctx->m2m_ctx);
5672 + mtk_v4l2_err("[%d] id=%d src_addr is NULL!!",
5673 +@@ -1155,10 +1155,10 @@ static void vb2ops_vdec_buf_queue(struct vb2_buffer *vb)
5674 +
5675 + src_mem.va = vb2_plane_vaddr(&src_buf->vb2_buf, 0);
5676 + src_mem.dma_addr = vb2_dma_contig_plane_dma_addr(&src_buf->vb2_buf, 0);
5677 +- src_mem.size = (size_t)src_buf->planes[0].bytesused;
5678 ++ src_mem.size = (size_t)src_buf->vb2_buf.planes[0].bytesused;
5679 + mtk_v4l2_debug(2,
5680 + "[%d] buf id=%d va=%p dma=%pad size=%zx",
5681 +- ctx->id, src_buf->index,
5682 ++ ctx->id, src_buf->vb2_buf.index,
5683 + src_mem.va, &src_mem.dma_addr,
5684 + src_mem.size);
5685 +
5686 +@@ -1182,7 +1182,7 @@ static void vb2ops_vdec_buf_queue(struct vb2_buffer *vb)
5687 + }
5688 + mtk_v4l2_debug(ret ? 0 : 1,
5689 + "[%d] vdec_if_decode() src_buf=%d, size=%zu, fail=%d, res_chg=%d",
5690 +- ctx->id, src_buf->index,
5691 ++ ctx->id, src_buf->vb2_buf.index,
5692 + src_mem.size, ret, res_chg);
5693 + return;
5694 + }
5695 +diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc.c b/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc.c
5696 +index c6b48b5925fb..50351adafc47 100644
5697 +--- a/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc.c
5698 ++++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc.c
5699 +@@ -894,7 +894,7 @@ static void vb2ops_venc_stop_streaming(struct vb2_queue *q)
5700 +
5701 + if (q->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
5702 + while ((dst_buf = v4l2_m2m_dst_buf_remove(ctx->m2m_ctx))) {
5703 +- dst_buf->planes[0].bytesused = 0;
5704 ++ dst_buf->vb2_buf.planes[0].bytesused = 0;
5705 + v4l2_m2m_buf_done(dst_buf, VB2_BUF_STATE_ERROR);
5706 + }
5707 + } else {
5708 +@@ -947,7 +947,7 @@ static int mtk_venc_encode_header(void *priv)
5709 +
5710 + bs_buf.va = vb2_plane_vaddr(&dst_buf->vb2_buf, 0);
5711 + bs_buf.dma_addr = vb2_dma_contig_plane_dma_addr(&dst_buf->vb2_buf, 0);
5712 +- bs_buf.size = (size_t)dst_buf->planes[0].length;
5713 ++ bs_buf.size = (size_t)dst_buf->vb2_buf.planes[0].length;
5714 +
5715 + mtk_v4l2_debug(1,
5716 + "[%d] buf id=%d va=0x%p dma_addr=0x%llx size=%zu",
5717 +@@ -976,7 +976,7 @@ static int mtk_venc_encode_header(void *priv)
5718 + }
5719 +
5720 + ctx->state = MTK_STATE_HEADER;
5721 +- dst_buf->planes[0].bytesused = enc_result.bs_size;
5722 ++ dst_buf->vb2_buf.planes[0].bytesused = enc_result.bs_size;
5723 + v4l2_m2m_buf_done(dst_buf, VB2_BUF_STATE_DONE);
5724 +
5725 + return 0;
5726 +@@ -1107,12 +1107,12 @@ static void mtk_venc_worker(struct work_struct *work)
5727 +
5728 + if (ret) {
5729 + v4l2_m2m_buf_done(src_buf, VB2_BUF_STATE_ERROR);
5730 +- dst_buf->planes[0].bytesused = 0;
5731 ++ dst_buf->vb2_buf.planes[0].bytesused = 0;
5732 + v4l2_m2m_buf_done(dst_buf, VB2_BUF_STATE_ERROR);
5733 + mtk_v4l2_err("venc_if_encode failed=%d", ret);
5734 + } else {
5735 + v4l2_m2m_buf_done(src_buf, VB2_BUF_STATE_DONE);
5736 +- dst_buf->planes[0].bytesused = enc_result.bs_size;
5737 ++ dst_buf->vb2_buf.planes[0].bytesused = enc_result.bs_size;
5738 + v4l2_m2m_buf_done(dst_buf, VB2_BUF_STATE_DONE);
5739 + mtk_v4l2_debug(2, "venc_if_encode bs size=%d",
5740 + enc_result.bs_size);
5741 +diff --git a/drivers/media/platform/stm32/stm32-dcmi.c b/drivers/media/platform/stm32/stm32-dcmi.c
5742 +index 5fe5b38fa901..922855b6025c 100644
5743 +--- a/drivers/media/platform/stm32/stm32-dcmi.c
5744 ++++ b/drivers/media/platform/stm32/stm32-dcmi.c
5745 +@@ -811,6 +811,9 @@ static int dcmi_try_fmt(struct stm32_dcmi *dcmi, struct v4l2_format *f,
5746 +
5747 + sd_fmt = find_format_by_fourcc(dcmi, pix->pixelformat);
5748 + if (!sd_fmt) {
5749 ++ if (!dcmi->num_of_sd_formats)
5750 ++ return -ENODATA;
5751 ++
5752 + sd_fmt = dcmi->sd_formats[dcmi->num_of_sd_formats - 1];
5753 + pix->pixelformat = sd_fmt->fourcc;
5754 + }
5755 +@@ -989,6 +992,9 @@ static int dcmi_set_sensor_format(struct stm32_dcmi *dcmi,
5756 +
5757 + sd_fmt = find_format_by_fourcc(dcmi, pix->pixelformat);
5758 + if (!sd_fmt) {
5759 ++ if (!dcmi->num_of_sd_formats)
5760 ++ return -ENODATA;
5761 ++
5762 + sd_fmt = dcmi->sd_formats[dcmi->num_of_sd_formats - 1];
5763 + pix->pixelformat = sd_fmt->fourcc;
5764 + }
5765 +@@ -1645,7 +1651,7 @@ static int dcmi_probe(struct platform_device *pdev)
5766 + dcmi->rstc = devm_reset_control_get_exclusive(&pdev->dev, NULL);
5767 + if (IS_ERR(dcmi->rstc)) {
5768 + dev_err(&pdev->dev, "Could not get reset control\n");
5769 +- return -ENODEV;
5770 ++ return PTR_ERR(dcmi->rstc);
5771 + }
5772 +
5773 + /* Get bus characteristics from devicetree */
5774 +@@ -1660,7 +1666,7 @@ static int dcmi_probe(struct platform_device *pdev)
5775 + of_node_put(np);
5776 + if (ret) {
5777 + dev_err(&pdev->dev, "Could not parse the endpoint\n");
5778 +- return -ENODEV;
5779 ++ return ret;
5780 + }
5781 +
5782 + if (ep.bus_type == V4L2_MBUS_CSI2_DPHY) {
5783 +@@ -1673,8 +1679,9 @@ static int dcmi_probe(struct platform_device *pdev)
5784 +
5785 + irq = platform_get_irq(pdev, 0);
5786 + if (irq <= 0) {
5787 +- dev_err(&pdev->dev, "Could not get irq\n");
5788 +- return -ENODEV;
5789 ++ if (irq != -EPROBE_DEFER)
5790 ++ dev_err(&pdev->dev, "Could not get irq\n");
5791 ++ return irq;
5792 + }
5793 +
5794 + dcmi->res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
5795 +@@ -1694,12 +1701,13 @@ static int dcmi_probe(struct platform_device *pdev)
5796 + dev_name(&pdev->dev), dcmi);
5797 + if (ret) {
5798 + dev_err(&pdev->dev, "Unable to request irq %d\n", irq);
5799 +- return -ENODEV;
5800 ++ return ret;
5801 + }
5802 +
5803 + mclk = devm_clk_get(&pdev->dev, "mclk");
5804 + if (IS_ERR(mclk)) {
5805 +- dev_err(&pdev->dev, "Unable to get mclk\n");
5806 ++ if (PTR_ERR(mclk) != -EPROBE_DEFER)
5807 ++ dev_err(&pdev->dev, "Unable to get mclk\n");
5808 + return PTR_ERR(mclk);
5809 + }
5810 +
5811 +diff --git a/drivers/media/platform/vicodec/codec-fwht.c b/drivers/media/platform/vicodec/codec-fwht.c
5812 +index d1d6085da9f1..cf469a1191aa 100644
5813 +--- a/drivers/media/platform/vicodec/codec-fwht.c
5814 ++++ b/drivers/media/platform/vicodec/codec-fwht.c
5815 +@@ -46,8 +46,12 @@ static const uint8_t zigzag[64] = {
5816 + 63,
5817 + };
5818 +
5819 +-
5820 +-static int rlc(const s16 *in, __be16 *output, int blocktype)
5821 ++/*
5822 ++ * noinline_for_stack to work around
5823 ++ * https://bugs.llvm.org/show_bug.cgi?id=38809
5824 ++ */
5825 ++static int noinline_for_stack
5826 ++rlc(const s16 *in, __be16 *output, int blocktype)
5827 + {
5828 + s16 block[8 * 8];
5829 + s16 *wp = block;
5830 +@@ -106,8 +110,8 @@ static int rlc(const s16 *in, __be16 *output, int blocktype)
5831 + * This function will worst-case increase rlc_in by 65*2 bytes:
5832 + * one s16 value for the header and 8 * 8 coefficients of type s16.
5833 + */
5834 +-static u16 derlc(const __be16 **rlc_in, s16 *dwht_out,
5835 +- const __be16 *end_of_input)
5836 ++static noinline_for_stack u16
5837 ++derlc(const __be16 **rlc_in, s16 *dwht_out, const __be16 *end_of_input)
5838 + {
5839 + /* header */
5840 + const __be16 *input = *rlc_in;
5841 +@@ -240,8 +244,9 @@ static void dequantize_inter(s16 *coeff)
5842 + *coeff <<= *quant;
5843 + }
5844 +
5845 +-static void fwht(const u8 *block, s16 *output_block, unsigned int stride,
5846 +- unsigned int input_step, bool intra)
5847 ++static void noinline_for_stack fwht(const u8 *block, s16 *output_block,
5848 ++ unsigned int stride,
5849 ++ unsigned int input_step, bool intra)
5850 + {
5851 + /* we'll need more than 8 bits for the transformed coefficients */
5852 + s32 workspace1[8], workspace2[8];
5853 +@@ -373,7 +378,8 @@ static void fwht(const u8 *block, s16 *output_block, unsigned int stride,
5854 + * Furthermore values can be negative... This is just a version that
5855 + * works with 16 signed data
5856 + */
5857 +-static void fwht16(const s16 *block, s16 *output_block, int stride, int intra)
5858 ++static void noinline_for_stack
5859 ++fwht16(const s16 *block, s16 *output_block, int stride, int intra)
5860 + {
5861 + /* we'll need more than 8 bits for the transformed coefficients */
5862 + s32 workspace1[8], workspace2[8];
5863 +@@ -456,7 +462,8 @@ static void fwht16(const s16 *block, s16 *output_block, int stride, int intra)
5864 + }
5865 + }
5866 +
5867 +-static void ifwht(const s16 *block, s16 *output_block, int intra)
5868 ++static noinline_for_stack void
5869 ++ifwht(const s16 *block, s16 *output_block, int intra)
5870 + {
5871 + /*
5872 + * we'll need more than 8 bits for the transformed coefficients
5873 +@@ -604,9 +611,9 @@ static int var_inter(const s16 *old, const s16 *new)
5874 + return ret;
5875 + }
5876 +
5877 +-static int decide_blocktype(const u8 *cur, const u8 *reference,
5878 +- s16 *deltablock, unsigned int stride,
5879 +- unsigned int input_step)
5880 ++static noinline_for_stack int
5881 ++decide_blocktype(const u8 *cur, const u8 *reference, s16 *deltablock,
5882 ++ unsigned int stride, unsigned int input_step)
5883 + {
5884 + s16 tmp[64];
5885 + s16 old[64];
5886 +diff --git a/drivers/media/platform/vicodec/vicodec-core.c b/drivers/media/platform/vicodec/vicodec-core.c
5887 +index d7636fe9e174..8788369e59a0 100644
5888 +--- a/drivers/media/platform/vicodec/vicodec-core.c
5889 ++++ b/drivers/media/platform/vicodec/vicodec-core.c
5890 +@@ -159,12 +159,10 @@ static int device_process(struct vicodec_ctx *ctx,
5891 + struct vb2_v4l2_buffer *dst_vb)
5892 + {
5893 + struct vicodec_dev *dev = ctx->dev;
5894 +- struct vicodec_q_data *q_dst;
5895 + struct v4l2_fwht_state *state = &ctx->state;
5896 + u8 *p_src, *p_dst;
5897 + int ret;
5898 +
5899 +- q_dst = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE);
5900 + if (ctx->is_enc)
5901 + p_src = vb2_plane_vaddr(&src_vb->vb2_buf, 0);
5902 + else
5903 +@@ -186,8 +184,10 @@ static int device_process(struct vicodec_ctx *ctx,
5904 + return ret;
5905 + vb2_set_plane_payload(&dst_vb->vb2_buf, 0, ret);
5906 + } else {
5907 ++ struct vicodec_q_data *q_dst;
5908 + unsigned int comp_frame_size = ntohl(ctx->state.header.size);
5909 +
5910 ++ q_dst = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE);
5911 + if (comp_frame_size > ctx->comp_max_size)
5912 + return -EINVAL;
5913 + state->info = q_dst->info;
5914 +@@ -196,11 +196,6 @@ static int device_process(struct vicodec_ctx *ctx,
5915 + return ret;
5916 + vb2_set_plane_payload(&dst_vb->vb2_buf, 0, q_dst->sizeimage);
5917 + }
5918 +-
5919 +- dst_vb->sequence = q_dst->sequence++;
5920 +- dst_vb->flags &= ~V4L2_BUF_FLAG_LAST;
5921 +- v4l2_m2m_buf_copy_metadata(src_vb, dst_vb, !ctx->is_enc);
5922 +-
5923 + return 0;
5924 + }
5925 +
5926 +@@ -274,16 +269,22 @@ static void device_run(void *priv)
5927 + struct vicodec_ctx *ctx = priv;
5928 + struct vicodec_dev *dev = ctx->dev;
5929 + struct vb2_v4l2_buffer *src_buf, *dst_buf;
5930 +- struct vicodec_q_data *q_src;
5931 ++ struct vicodec_q_data *q_src, *q_dst;
5932 + u32 state;
5933 +
5934 + src_buf = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
5935 + dst_buf = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
5936 + q_src = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT);
5937 ++ q_dst = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE);
5938 +
5939 + state = VB2_BUF_STATE_DONE;
5940 + if (device_process(ctx, src_buf, dst_buf))
5941 + state = VB2_BUF_STATE_ERROR;
5942 ++ else
5943 ++ dst_buf->sequence = q_dst->sequence++;
5944 ++ dst_buf->flags &= ~V4L2_BUF_FLAG_LAST;
5945 ++ v4l2_m2m_buf_copy_metadata(src_buf, dst_buf, !ctx->is_enc);
5946 ++
5947 + ctx->last_dst_buf = dst_buf;
5948 +
5949 + spin_lock(ctx->lock);
5950 +@@ -1338,8 +1339,11 @@ static int vicodec_start_streaming(struct vb2_queue *q,
5951 + chroma_div = info->width_div * info->height_div;
5952 + q_data->sequence = 0;
5953 +
5954 +- ctx->last_src_buf = NULL;
5955 +- ctx->last_dst_buf = NULL;
5956 ++ if (V4L2_TYPE_IS_OUTPUT(q->type))
5957 ++ ctx->last_src_buf = NULL;
5958 ++ else
5959 ++ ctx->last_dst_buf = NULL;
5960 ++
5961 + state->gop_cnt = 0;
5962 +
5963 + if ((V4L2_TYPE_IS_OUTPUT(q->type) && !ctx->is_enc) ||
5964 +diff --git a/drivers/media/platform/video-mux.c b/drivers/media/platform/video-mux.c
5965 +index 0ba30756e1e4..d8cd5f5cb10d 100644
5966 +--- a/drivers/media/platform/video-mux.c
5967 ++++ b/drivers/media/platform/video-mux.c
5968 +@@ -419,9 +419,14 @@ static int video_mux_probe(struct platform_device *pdev)
5969 + vmux->active = -1;
5970 + vmux->pads = devm_kcalloc(dev, num_pads, sizeof(*vmux->pads),
5971 + GFP_KERNEL);
5972 ++ if (!vmux->pads)
5973 ++ return -ENOMEM;
5974 ++
5975 + vmux->format_mbus = devm_kcalloc(dev, num_pads,
5976 + sizeof(*vmux->format_mbus),
5977 + GFP_KERNEL);
5978 ++ if (!vmux->format_mbus)
5979 ++ return -ENOMEM;
5980 +
5981 + for (i = 0; i < num_pads; i++) {
5982 + vmux->pads[i].flags = (i < num_pads - 1) ? MEDIA_PAD_FL_SINK
5983 +diff --git a/drivers/media/platform/vim2m.c b/drivers/media/platform/vim2m.c
5984 +index 34dcaca45d8b..dd47821fc661 100644
5985 +--- a/drivers/media/platform/vim2m.c
5986 ++++ b/drivers/media/platform/vim2m.c
5987 +@@ -1262,6 +1262,15 @@ static int vim2m_release(struct file *file)
5988 + return 0;
5989 + }
5990 +
5991 ++static void vim2m_device_release(struct video_device *vdev)
5992 ++{
5993 ++ struct vim2m_dev *dev = container_of(vdev, struct vim2m_dev, vfd);
5994 ++
5995 ++ v4l2_device_unregister(&dev->v4l2_dev);
5996 ++ v4l2_m2m_release(dev->m2m_dev);
5997 ++ kfree(dev);
5998 ++}
5999 ++
6000 + static const struct v4l2_file_operations vim2m_fops = {
6001 + .owner = THIS_MODULE,
6002 + .open = vim2m_open,
6003 +@@ -1277,7 +1286,7 @@ static const struct video_device vim2m_videodev = {
6004 + .fops = &vim2m_fops,
6005 + .ioctl_ops = &vim2m_ioctl_ops,
6006 + .minor = -1,
6007 +- .release = video_device_release_empty,
6008 ++ .release = vim2m_device_release,
6009 + .device_caps = V4L2_CAP_VIDEO_M2M | V4L2_CAP_STREAMING,
6010 + };
6011 +
6012 +@@ -1298,13 +1307,13 @@ static int vim2m_probe(struct platform_device *pdev)
6013 + struct video_device *vfd;
6014 + int ret;
6015 +
6016 +- dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL);
6017 ++ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
6018 + if (!dev)
6019 + return -ENOMEM;
6020 +
6021 + ret = v4l2_device_register(&pdev->dev, &dev->v4l2_dev);
6022 + if (ret)
6023 +- return ret;
6024 ++ goto error_free;
6025 +
6026 + atomic_set(&dev->num_inst, 0);
6027 + mutex_init(&dev->dev_mutex);
6028 +@@ -1317,7 +1326,7 @@ static int vim2m_probe(struct platform_device *pdev)
6029 + ret = video_register_device(vfd, VFL_TYPE_GRABBER, 0);
6030 + if (ret) {
6031 + v4l2_err(&dev->v4l2_dev, "Failed to register video device\n");
6032 +- goto unreg_v4l2;
6033 ++ goto error_v4l2;
6034 + }
6035 +
6036 + video_set_drvdata(vfd, dev);
6037 +@@ -1330,7 +1339,7 @@ static int vim2m_probe(struct platform_device *pdev)
6038 + if (IS_ERR(dev->m2m_dev)) {
6039 + v4l2_err(&dev->v4l2_dev, "Failed to init mem2mem device\n");
6040 + ret = PTR_ERR(dev->m2m_dev);
6041 +- goto unreg_dev;
6042 ++ goto error_dev;
6043 + }
6044 +
6045 + #ifdef CONFIG_MEDIA_CONTROLLER
6046 +@@ -1346,27 +1355,29 @@ static int vim2m_probe(struct platform_device *pdev)
6047 + MEDIA_ENT_F_PROC_VIDEO_SCALER);
6048 + if (ret) {
6049 + v4l2_err(&dev->v4l2_dev, "Failed to init mem2mem media controller\n");
6050 +- goto unreg_m2m;
6051 ++ goto error_m2m;
6052 + }
6053 +
6054 + ret = media_device_register(&dev->mdev);
6055 + if (ret) {
6056 + v4l2_err(&dev->v4l2_dev, "Failed to register mem2mem media device\n");
6057 +- goto unreg_m2m_mc;
6058 ++ goto error_m2m_mc;
6059 + }
6060 + #endif
6061 + return 0;
6062 +
6063 + #ifdef CONFIG_MEDIA_CONTROLLER
6064 +-unreg_m2m_mc:
6065 ++error_m2m_mc:
6066 + v4l2_m2m_unregister_media_controller(dev->m2m_dev);
6067 +-unreg_m2m:
6068 ++error_m2m:
6069 + v4l2_m2m_release(dev->m2m_dev);
6070 + #endif
6071 +-unreg_dev:
6072 ++error_dev:
6073 + video_unregister_device(&dev->vfd);
6074 +-unreg_v4l2:
6075 ++error_v4l2:
6076 + v4l2_device_unregister(&dev->v4l2_dev);
6077 ++error_free:
6078 ++ kfree(dev);
6079 +
6080 + return ret;
6081 + }
6082 +@@ -1382,9 +1393,7 @@ static int vim2m_remove(struct platform_device *pdev)
6083 + v4l2_m2m_unregister_media_controller(dev->m2m_dev);
6084 + media_device_cleanup(&dev->mdev);
6085 + #endif
6086 +- v4l2_m2m_release(dev->m2m_dev);
6087 + video_unregister_device(&dev->vfd);
6088 +- v4l2_device_unregister(&dev->v4l2_dev);
6089 +
6090 + return 0;
6091 + }
6092 +diff --git a/drivers/media/platform/vimc/vimc-core.c b/drivers/media/platform/vimc/vimc-core.c
6093 +index 0fbb7914098f..3aa62d7e3d0e 100644
6094 +--- a/drivers/media/platform/vimc/vimc-core.c
6095 ++++ b/drivers/media/platform/vimc/vimc-core.c
6096 +@@ -304,6 +304,8 @@ static int vimc_probe(struct platform_device *pdev)
6097 +
6098 + dev_dbg(&pdev->dev, "probe");
6099 +
6100 ++ memset(&vimc->mdev, 0, sizeof(vimc->mdev));
6101 ++
6102 + /* Create platform_device for each entity in the topology*/
6103 + vimc->subdevs = devm_kcalloc(&vimc->pdev.dev, vimc->pipe_cfg->num_ents,
6104 + sizeof(*vimc->subdevs), GFP_KERNEL);
6105 +diff --git a/drivers/media/platform/vimc/vimc-streamer.c b/drivers/media/platform/vimc/vimc-streamer.c
6106 +index fcc897fb247b..392754c18046 100644
6107 +--- a/drivers/media/platform/vimc/vimc-streamer.c
6108 ++++ b/drivers/media/platform/vimc/vimc-streamer.c
6109 +@@ -120,7 +120,6 @@ static int vimc_streamer_thread(void *data)
6110 + int i;
6111 +
6112 + set_freezable();
6113 +- set_current_state(TASK_UNINTERRUPTIBLE);
6114 +
6115 + for (;;) {
6116 + try_to_freeze();
6117 +@@ -137,6 +136,7 @@ static int vimc_streamer_thread(void *data)
6118 + break;
6119 + }
6120 + //wait for 60hz
6121 ++ set_current_state(TASK_UNINTERRUPTIBLE);
6122 + schedule_timeout(HZ / 60);
6123 + }
6124 +
6125 +diff --git a/drivers/media/platform/vivid/vivid-vid-cap.c b/drivers/media/platform/vivid/vivid-vid-cap.c
6126 +index 52eeda624d7e..530ac8decb25 100644
6127 +--- a/drivers/media/platform/vivid/vivid-vid-cap.c
6128 ++++ b/drivers/media/platform/vivid/vivid-vid-cap.c
6129 +@@ -1007,7 +1007,7 @@ int vivid_vid_cap_s_selection(struct file *file, void *fh, struct v4l2_selection
6130 + v4l2_rect_map_inside(&s->r, &dev->fmt_cap_rect);
6131 + if (dev->bitmap_cap && (compose->width != s->r.width ||
6132 + compose->height != s->r.height)) {
6133 +- kfree(dev->bitmap_cap);
6134 ++ vfree(dev->bitmap_cap);
6135 + dev->bitmap_cap = NULL;
6136 + }
6137 + *compose = s->r;
6138 +diff --git a/drivers/media/radio/wl128x/fmdrv_common.c b/drivers/media/radio/wl128x/fmdrv_common.c
6139 +index 3c8987af3772..ac5706b4cab8 100644
6140 +--- a/drivers/media/radio/wl128x/fmdrv_common.c
6141 ++++ b/drivers/media/radio/wl128x/fmdrv_common.c
6142 +@@ -489,7 +489,8 @@ int fmc_send_cmd(struct fmdev *fmdev, u8 fm_op, u16 type, void *payload,
6143 + return -EIO;
6144 + }
6145 + /* Send response data to caller */
6146 +- if (response != NULL && response_len != NULL && evt_hdr->dlen) {
6147 ++ if (response != NULL && response_len != NULL && evt_hdr->dlen &&
6148 ++ evt_hdr->dlen <= payload_len) {
6149 + /* Skip header info and copy only response data */
6150 + skb_pull(skb, sizeof(struct fm_event_msg_hdr));
6151 + memcpy(response, skb->data, evt_hdr->dlen);
6152 +@@ -583,6 +584,8 @@ static void fm_irq_handle_flag_getcmd_resp(struct fmdev *fmdev)
6153 + return;
6154 +
6155 + fm_evt_hdr = (void *)skb->data;
6156 ++ if (fm_evt_hdr->dlen > sizeof(fmdev->irq_info.flag))
6157 ++ return;
6158 +
6159 + /* Skip header info and copy only response data */
6160 + skb_pull(skb, sizeof(struct fm_event_msg_hdr));
6161 +@@ -1308,7 +1311,7 @@ static int load_default_rx_configuration(struct fmdev *fmdev)
6162 + static int fm_power_up(struct fmdev *fmdev, u8 mode)
6163 + {
6164 + u16 payload;
6165 +- __be16 asic_id, asic_ver;
6166 ++ __be16 asic_id = 0, asic_ver = 0;
6167 + int resp_len, ret;
6168 + u8 fw_name[50];
6169 +
6170 +diff --git a/drivers/media/rc/serial_ir.c b/drivers/media/rc/serial_ir.c
6171 +index ffe2c672d105..3998ba29beb6 100644
6172 +--- a/drivers/media/rc/serial_ir.c
6173 ++++ b/drivers/media/rc/serial_ir.c
6174 +@@ -773,8 +773,6 @@ static void serial_ir_exit(void)
6175 +
6176 + static int __init serial_ir_init_module(void)
6177 + {
6178 +- int result;
6179 +-
6180 + switch (type) {
6181 + case IR_HOMEBREW:
6182 + case IR_IRDEO:
6183 +@@ -802,12 +800,7 @@ static int __init serial_ir_init_module(void)
6184 + if (sense != -1)
6185 + sense = !!sense;
6186 +
6187 +- result = serial_ir_init();
6188 +- if (!result)
6189 +- return 0;
6190 +-
6191 +- serial_ir_exit();
6192 +- return result;
6193 ++ return serial_ir_init();
6194 + }
6195 +
6196 + static void __exit serial_ir_exit_module(void)
6197 +diff --git a/drivers/media/usb/au0828/au0828-video.c b/drivers/media/usb/au0828/au0828-video.c
6198 +index 7876c897cc1d..222723d946e4 100644
6199 +--- a/drivers/media/usb/au0828/au0828-video.c
6200 ++++ b/drivers/media/usb/au0828/au0828-video.c
6201 +@@ -758,6 +758,9 @@ static int au0828_analog_stream_enable(struct au0828_dev *d)
6202 +
6203 + dprintk(1, "au0828_analog_stream_enable called\n");
6204 +
6205 ++ if (test_bit(DEV_DISCONNECTED, &d->dev_state))
6206 ++ return -ENODEV;
6207 ++
6208 + iface = usb_ifnum_to_if(d->usbdev, 0);
6209 + if (iface && iface->cur_altsetting->desc.bAlternateSetting != 5) {
6210 + dprintk(1, "Changing intf#0 to alt 5\n");
6211 +@@ -839,9 +842,9 @@ int au0828_start_analog_streaming(struct vb2_queue *vq, unsigned int count)
6212 + return rc;
6213 + }
6214 +
6215 ++ v4l2_device_call_all(&dev->v4l2_dev, 0, video, s_stream, 1);
6216 ++
6217 + if (vq->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) {
6218 +- v4l2_device_call_all(&dev->v4l2_dev, 0, video,
6219 +- s_stream, 1);
6220 + dev->vid_timeout_running = 1;
6221 + mod_timer(&dev->vid_timeout, jiffies + (HZ / 10));
6222 + } else if (vq->type == V4L2_BUF_TYPE_VBI_CAPTURE) {
6223 +@@ -861,10 +864,11 @@ static void au0828_stop_streaming(struct vb2_queue *vq)
6224 +
6225 + dprintk(1, "au0828_stop_streaming called %d\n", dev->streaming_users);
6226 +
6227 +- if (dev->streaming_users-- == 1)
6228 ++ if (dev->streaming_users-- == 1) {
6229 + au0828_uninit_isoc(dev);
6230 ++ v4l2_device_call_all(&dev->v4l2_dev, 0, video, s_stream, 0);
6231 ++ }
6232 +
6233 +- v4l2_device_call_all(&dev->v4l2_dev, 0, video, s_stream, 0);
6234 + dev->vid_timeout_running = 0;
6235 + del_timer_sync(&dev->vid_timeout);
6236 +
6237 +@@ -893,8 +897,10 @@ void au0828_stop_vbi_streaming(struct vb2_queue *vq)
6238 + dprintk(1, "au0828_stop_vbi_streaming called %d\n",
6239 + dev->streaming_users);
6240 +
6241 +- if (dev->streaming_users-- == 1)
6242 ++ if (dev->streaming_users-- == 1) {
6243 + au0828_uninit_isoc(dev);
6244 ++ v4l2_device_call_all(&dev->v4l2_dev, 0, video, s_stream, 0);
6245 ++ }
6246 +
6247 + spin_lock_irqsave(&dev->slock, flags);
6248 + if (dev->isoc_ctl.vbi_buf != NULL) {
6249 +diff --git a/drivers/media/usb/cpia2/cpia2_v4l.c b/drivers/media/usb/cpia2/cpia2_v4l.c
6250 +index 95c0bd4a19dc..45caf78119c4 100644
6251 +--- a/drivers/media/usb/cpia2/cpia2_v4l.c
6252 ++++ b/drivers/media/usb/cpia2/cpia2_v4l.c
6253 +@@ -1240,8 +1240,7 @@ static int __init cpia2_init(void)
6254 + LOG("%s v%s\n",
6255 + ABOUT, CPIA_VERSION);
6256 + check_parameters();
6257 +- cpia2_usb_init();
6258 +- return 0;
6259 ++ return cpia2_usb_init();
6260 + }
6261 +
6262 +
6263 +diff --git a/drivers/media/usb/dvb-usb-v2/dvbsky.c b/drivers/media/usb/dvb-usb-v2/dvbsky.c
6264 +index e28bd8836751..ae0814dd202a 100644
6265 +--- a/drivers/media/usb/dvb-usb-v2/dvbsky.c
6266 ++++ b/drivers/media/usb/dvb-usb-v2/dvbsky.c
6267 +@@ -615,16 +615,18 @@ static int dvbsky_init(struct dvb_usb_device *d)
6268 + return 0;
6269 + }
6270 +
6271 +-static void dvbsky_exit(struct dvb_usb_device *d)
6272 ++static int dvbsky_frontend_detach(struct dvb_usb_adapter *adap)
6273 + {
6274 ++ struct dvb_usb_device *d = adap_to_d(adap);
6275 + struct dvbsky_state *state = d_to_priv(d);
6276 +- struct dvb_usb_adapter *adap = &d->adapter[0];
6277 ++
6278 ++ dev_dbg(&d->udev->dev, "%s: adap=%d\n", __func__, adap->id);
6279 +
6280 + dvb_module_release(state->i2c_client_tuner);
6281 + dvb_module_release(state->i2c_client_demod);
6282 + dvb_module_release(state->i2c_client_ci);
6283 +
6284 +- adap->fe[0] = NULL;
6285 ++ return 0;
6286 + }
6287 +
6288 + /* DVB USB Driver stuff */
6289 +@@ -640,11 +642,11 @@ static struct dvb_usb_device_properties dvbsky_s960_props = {
6290 +
6291 + .i2c_algo = &dvbsky_i2c_algo,
6292 + .frontend_attach = dvbsky_s960_attach,
6293 ++ .frontend_detach = dvbsky_frontend_detach,
6294 + .init = dvbsky_init,
6295 + .get_rc_config = dvbsky_get_rc_config,
6296 + .streaming_ctrl = dvbsky_streaming_ctrl,
6297 + .identify_state = dvbsky_identify_state,
6298 +- .exit = dvbsky_exit,
6299 + .read_mac_address = dvbsky_read_mac_addr,
6300 +
6301 + .num_adapters = 1,
6302 +@@ -667,11 +669,11 @@ static struct dvb_usb_device_properties dvbsky_s960c_props = {
6303 +
6304 + .i2c_algo = &dvbsky_i2c_algo,
6305 + .frontend_attach = dvbsky_s960c_attach,
6306 ++ .frontend_detach = dvbsky_frontend_detach,
6307 + .init = dvbsky_init,
6308 + .get_rc_config = dvbsky_get_rc_config,
6309 + .streaming_ctrl = dvbsky_streaming_ctrl,
6310 + .identify_state = dvbsky_identify_state,
6311 +- .exit = dvbsky_exit,
6312 + .read_mac_address = dvbsky_read_mac_addr,
6313 +
6314 + .num_adapters = 1,
6315 +@@ -694,11 +696,11 @@ static struct dvb_usb_device_properties dvbsky_t680c_props = {
6316 +
6317 + .i2c_algo = &dvbsky_i2c_algo,
6318 + .frontend_attach = dvbsky_t680c_attach,
6319 ++ .frontend_detach = dvbsky_frontend_detach,
6320 + .init = dvbsky_init,
6321 + .get_rc_config = dvbsky_get_rc_config,
6322 + .streaming_ctrl = dvbsky_streaming_ctrl,
6323 + .identify_state = dvbsky_identify_state,
6324 +- .exit = dvbsky_exit,
6325 + .read_mac_address = dvbsky_read_mac_addr,
6326 +
6327 + .num_adapters = 1,
6328 +@@ -721,11 +723,11 @@ static struct dvb_usb_device_properties dvbsky_t330_props = {
6329 +
6330 + .i2c_algo = &dvbsky_i2c_algo,
6331 + .frontend_attach = dvbsky_t330_attach,
6332 ++ .frontend_detach = dvbsky_frontend_detach,
6333 + .init = dvbsky_init,
6334 + .get_rc_config = dvbsky_get_rc_config,
6335 + .streaming_ctrl = dvbsky_streaming_ctrl,
6336 + .identify_state = dvbsky_identify_state,
6337 +- .exit = dvbsky_exit,
6338 + .read_mac_address = dvbsky_read_mac_addr,
6339 +
6340 + .num_adapters = 1,
6341 +@@ -748,11 +750,11 @@ static struct dvb_usb_device_properties mygica_t230c_props = {
6342 +
6343 + .i2c_algo = &dvbsky_i2c_algo,
6344 + .frontend_attach = dvbsky_mygica_t230c_attach,
6345 ++ .frontend_detach = dvbsky_frontend_detach,
6346 + .init = dvbsky_init,
6347 + .get_rc_config = dvbsky_get_rc_config,
6348 + .streaming_ctrl = dvbsky_streaming_ctrl,
6349 + .identify_state = dvbsky_identify_state,
6350 +- .exit = dvbsky_exit,
6351 +
6352 + .num_adapters = 1,
6353 + .adapter = {
6354 +diff --git a/drivers/media/usb/go7007/go7007-fw.c b/drivers/media/usb/go7007/go7007-fw.c
6355 +index 24f5b615dc7a..dfa9f899d0c2 100644
6356 +--- a/drivers/media/usb/go7007/go7007-fw.c
6357 ++++ b/drivers/media/usb/go7007/go7007-fw.c
6358 +@@ -1499,8 +1499,8 @@ static int modet_to_package(struct go7007 *go, __le16 *code, int space)
6359 + return cnt;
6360 + }
6361 +
6362 +-static int do_special(struct go7007 *go, u16 type, __le16 *code, int space,
6363 +- int *framelen)
6364 ++static noinline_for_stack int do_special(struct go7007 *go, u16 type,
6365 ++ __le16 *code, int space, int *framelen)
6366 + {
6367 + switch (type) {
6368 + case SPECIAL_FRM_HEAD:
6369 +diff --git a/drivers/media/usb/gspca/gspca.c b/drivers/media/usb/gspca/gspca.c
6370 +index ac70b36d67b7..4d7517411cc2 100644
6371 +--- a/drivers/media/usb/gspca/gspca.c
6372 ++++ b/drivers/media/usb/gspca/gspca.c
6373 +@@ -294,7 +294,7 @@ static void fill_frame(struct gspca_dev *gspca_dev,
6374 + /* check the packet status and length */
6375 + st = urb->iso_frame_desc[i].status;
6376 + if (st) {
6377 +- pr_err("ISOC data error: [%d] len=%d, status=%d\n",
6378 ++ gspca_dbg(gspca_dev, D_PACK, "ISOC data error: [%d] len=%d, status=%d\n",
6379 + i, len, st);
6380 + gspca_dev->last_packet_type = DISCARD_PACKET;
6381 + continue;
6382 +@@ -314,6 +314,8 @@ static void fill_frame(struct gspca_dev *gspca_dev,
6383 + }
6384 +
6385 + resubmit:
6386 ++ if (!gspca_dev->streaming)
6387 ++ return;
6388 + /* resubmit the URB */
6389 + st = usb_submit_urb(urb, GFP_ATOMIC);
6390 + if (st < 0)
6391 +@@ -330,7 +332,7 @@ static void isoc_irq(struct urb *urb)
6392 + struct gspca_dev *gspca_dev = (struct gspca_dev *) urb->context;
6393 +
6394 + gspca_dbg(gspca_dev, D_PACK, "isoc irq\n");
6395 +- if (!vb2_start_streaming_called(&gspca_dev->queue))
6396 ++ if (!gspca_dev->streaming)
6397 + return;
6398 + fill_frame(gspca_dev, urb);
6399 + }
6400 +@@ -344,7 +346,7 @@ static void bulk_irq(struct urb *urb)
6401 + int st;
6402 +
6403 + gspca_dbg(gspca_dev, D_PACK, "bulk irq\n");
6404 +- if (!vb2_start_streaming_called(&gspca_dev->queue))
6405 ++ if (!gspca_dev->streaming)
6406 + return;
6407 + switch (urb->status) {
6408 + case 0:
6409 +@@ -367,6 +369,8 @@ static void bulk_irq(struct urb *urb)
6410 + urb->actual_length);
6411 +
6412 + resubmit:
6413 ++ if (!gspca_dev->streaming)
6414 ++ return;
6415 + /* resubmit the URB */
6416 + if (gspca_dev->cam.bulk_nurbs != 0) {
6417 + st = usb_submit_urb(urb, GFP_ATOMIC);
6418 +@@ -1638,6 +1642,8 @@ void gspca_disconnect(struct usb_interface *intf)
6419 +
6420 + mutex_lock(&gspca_dev->usb_lock);
6421 + gspca_dev->present = false;
6422 ++ destroy_urbs(gspca_dev);
6423 ++ gspca_input_destroy_urb(gspca_dev);
6424 +
6425 + vb2_queue_error(&gspca_dev->queue);
6426 +
6427 +diff --git a/drivers/media/usb/pvrusb2/pvrusb2-hdw.c b/drivers/media/usb/pvrusb2/pvrusb2-hdw.c
6428 +index 446a999dd2ce..2bab4713bc5b 100644
6429 +--- a/drivers/media/usb/pvrusb2/pvrusb2-hdw.c
6430 ++++ b/drivers/media/usb/pvrusb2/pvrusb2-hdw.c
6431 +@@ -666,6 +666,8 @@ static int ctrl_get_input(struct pvr2_ctrl *cptr,int *vp)
6432 +
6433 + static int ctrl_check_input(struct pvr2_ctrl *cptr,int v)
6434 + {
6435 ++ if (v < 0 || v > PVR2_CVAL_INPUT_MAX)
6436 ++ return 0;
6437 + return ((1 << v) & cptr->hdw->input_allowed_mask) != 0;
6438 + }
6439 +
6440 +diff --git a/drivers/media/usb/pvrusb2/pvrusb2-hdw.h b/drivers/media/usb/pvrusb2/pvrusb2-hdw.h
6441 +index 25648add77e5..bd2b7a67b732 100644
6442 +--- a/drivers/media/usb/pvrusb2/pvrusb2-hdw.h
6443 ++++ b/drivers/media/usb/pvrusb2/pvrusb2-hdw.h
6444 +@@ -50,6 +50,7 @@
6445 + #define PVR2_CVAL_INPUT_COMPOSITE 2
6446 + #define PVR2_CVAL_INPUT_SVIDEO 3
6447 + #define PVR2_CVAL_INPUT_RADIO 4
6448 ++#define PVR2_CVAL_INPUT_MAX PVR2_CVAL_INPUT_RADIO
6449 +
6450 + enum pvr2_config {
6451 + pvr2_config_empty, /* No configuration */
6452 +diff --git a/drivers/media/v4l2-core/v4l2-fwnode.c b/drivers/media/v4l2-core/v4l2-fwnode.c
6453 +index 20571846e636..7495f8323147 100644
6454 +--- a/drivers/media/v4l2-core/v4l2-fwnode.c
6455 ++++ b/drivers/media/v4l2-core/v4l2-fwnode.c
6456 +@@ -225,6 +225,10 @@ static int v4l2_fwnode_endpoint_parse_csi2_bus(struct fwnode_handle *fwnode,
6457 + if (bus_type == V4L2_MBUS_CSI2_DPHY ||
6458 + bus_type == V4L2_MBUS_CSI2_CPHY || lanes_used ||
6459 + have_clk_lane || (flags & ~V4L2_MBUS_CSI2_CONTINUOUS_CLOCK)) {
6460 ++ /* Only D-PHY has a clock lane. */
6461 ++ unsigned int dfl_data_lane_index =
6462 ++ bus_type == V4L2_MBUS_CSI2_DPHY;
6463 ++
6464 + bus->flags = flags;
6465 + if (bus_type == V4L2_MBUS_UNKNOWN)
6466 + vep->bus_type = V4L2_MBUS_CSI2_DPHY;
6467 +@@ -233,7 +237,7 @@ static int v4l2_fwnode_endpoint_parse_csi2_bus(struct fwnode_handle *fwnode,
6468 + if (use_default_lane_mapping) {
6469 + bus->clock_lane = 0;
6470 + for (i = 0; i < num_data_lanes; i++)
6471 +- bus->data_lanes[i] = 1 + i;
6472 ++ bus->data_lanes[i] = dfl_data_lane_index + i;
6473 + } else {
6474 + bus->clock_lane = clock_lane;
6475 + for (i = 0; i < num_data_lanes; i++)
6476 +diff --git a/drivers/misc/fastrpc.c b/drivers/misc/fastrpc.c
6477 +index 36d0d5c9cfba..35be1cc11dd8 100644
6478 +--- a/drivers/misc/fastrpc.c
6479 ++++ b/drivers/misc/fastrpc.c
6480 +@@ -667,8 +667,16 @@ static int fastrpc_get_args(u32 kernel, struct fastrpc_invoke_ctx *ctx)
6481 + pages[i].size = roundup(len, PAGE_SIZE);
6482 +
6483 + if (ctx->maps[i]) {
6484 ++ struct vm_area_struct *vma = NULL;
6485 ++
6486 + rpra[i].pv = (u64) ctx->args[i].ptr;
6487 + pages[i].addr = ctx->maps[i]->phys;
6488 ++
6489 ++ vma = find_vma(current->mm, ctx->args[i].ptr);
6490 ++ if (vma)
6491 ++ pages[i].addr += ctx->args[i].ptr -
6492 ++ vma->vm_start;
6493 ++
6494 + } else {
6495 + rlen -= ALIGN(args, FASTRPC_ALIGN) - args;
6496 + args = ALIGN(args, FASTRPC_ALIGN);
6497 +@@ -782,6 +790,9 @@ static int fastrpc_internal_invoke(struct fastrpc_user *fl, u32 kernel,
6498 + if (err)
6499 + goto bail;
6500 + }
6501 ++
6502 ++ /* make sure that all CPU memory writes are seen by DSP */
6503 ++ dma_wmb();
6504 + /* Send invoke buffer to remote dsp */
6505 + err = fastrpc_invoke_send(fl->sctx, ctx, kernel, handle);
6506 + if (err)
6507 +@@ -798,6 +809,8 @@ static int fastrpc_internal_invoke(struct fastrpc_user *fl, u32 kernel,
6508 + goto bail;
6509 +
6510 + if (ctx->nscalars) {
6511 ++ /* make sure that all memory writes by DSP are seen by CPU */
6512 ++ dma_rmb();
6513 + /* populate all the output buffers with results */
6514 + err = fastrpc_put_args(ctx, kernel);
6515 + if (err)
6516 +@@ -843,12 +856,12 @@ static int fastrpc_init_create_process(struct fastrpc_user *fl,
6517 +
6518 + if (copy_from_user(&init, argp, sizeof(init))) {
6519 + err = -EFAULT;
6520 +- goto bail;
6521 ++ goto err;
6522 + }
6523 +
6524 + if (init.filelen > INIT_FILELEN_MAX) {
6525 + err = -EINVAL;
6526 +- goto bail;
6527 ++ goto err;
6528 + }
6529 +
6530 + inbuf.pgid = fl->tgid;
6531 +@@ -862,17 +875,15 @@ static int fastrpc_init_create_process(struct fastrpc_user *fl,
6532 + if (init.filelen && init.filefd) {
6533 + err = fastrpc_map_create(fl, init.filefd, init.filelen, &map);
6534 + if (err)
6535 +- goto bail;
6536 ++ goto err;
6537 + }
6538 +
6539 + memlen = ALIGN(max(INIT_FILELEN_MAX, (int)init.filelen * 4),
6540 + 1024 * 1024);
6541 + err = fastrpc_buf_alloc(fl, fl->sctx->dev, memlen,
6542 + &imem);
6543 +- if (err) {
6544 +- fastrpc_map_put(map);
6545 +- goto bail;
6546 +- }
6547 ++ if (err)
6548 ++ goto err_alloc;
6549 +
6550 + fl->init_mem = imem;
6551 + args[0].ptr = (u64)(uintptr_t)&inbuf;
6552 +@@ -908,13 +919,24 @@ static int fastrpc_init_create_process(struct fastrpc_user *fl,
6553 +
6554 + err = fastrpc_internal_invoke(fl, true, FASTRPC_INIT_HANDLE,
6555 + sc, args);
6556 ++ if (err)
6557 ++ goto err_invoke;
6558 +
6559 +- if (err) {
6560 ++ kfree(args);
6561 ++
6562 ++ return 0;
6563 ++
6564 ++err_invoke:
6565 ++ fl->init_mem = NULL;
6566 ++ fastrpc_buf_free(imem);
6567 ++err_alloc:
6568 ++ if (map) {
6569 ++ spin_lock(&fl->lock);
6570 ++ list_del(&map->node);
6571 ++ spin_unlock(&fl->lock);
6572 + fastrpc_map_put(map);
6573 +- fastrpc_buf_free(imem);
6574 + }
6575 +-
6576 +-bail:
6577 ++err:
6578 + kfree(args);
6579 +
6580 + return err;
6581 +diff --git a/drivers/misc/habanalabs/device.c b/drivers/misc/habanalabs/device.c
6582 +index 77d51be66c7e..652c8edb2164 100644
6583 +--- a/drivers/misc/habanalabs/device.c
6584 ++++ b/drivers/misc/habanalabs/device.c
6585 +@@ -498,11 +498,8 @@ disable_device:
6586 + return rc;
6587 + }
6588 +
6589 +-static void hl_device_hard_reset_pending(struct work_struct *work)
6590 ++static void device_kill_open_processes(struct hl_device *hdev)
6591 + {
6592 +- struct hl_device_reset_work *device_reset_work =
6593 +- container_of(work, struct hl_device_reset_work, reset_work);
6594 +- struct hl_device *hdev = device_reset_work->hdev;
6595 + u16 pending_total, pending_cnt;
6596 + struct task_struct *task = NULL;
6597 +
6598 +@@ -537,6 +534,12 @@ static void hl_device_hard_reset_pending(struct work_struct *work)
6599 + }
6600 + }
6601 +
6602 ++ /* We killed the open users, but because the driver cleans up after the
6603 ++ * user contexts are closed (e.g. mmu mappings), we need to wait again
6604 ++ * to make sure the cleaning phase is finished before continuing with
6605 ++ * the reset
6606 ++ */
6607 ++
6608 + pending_cnt = pending_total;
6609 +
6610 + while ((atomic_read(&hdev->fd_open_cnt)) && (pending_cnt)) {
6611 +@@ -552,6 +555,16 @@ static void hl_device_hard_reset_pending(struct work_struct *work)
6612 +
6613 + mutex_unlock(&hdev->fd_open_cnt_lock);
6614 +
6615 ++}
6616 ++
6617 ++static void device_hard_reset_pending(struct work_struct *work)
6618 ++{
6619 ++ struct hl_device_reset_work *device_reset_work =
6620 ++ container_of(work, struct hl_device_reset_work, reset_work);
6621 ++ struct hl_device *hdev = device_reset_work->hdev;
6622 ++
6623 ++ device_kill_open_processes(hdev);
6624 ++
6625 + hl_device_reset(hdev, true, true);
6626 +
6627 + kfree(device_reset_work);
6628 +@@ -635,7 +648,7 @@ again:
6629 + * from a dedicated work
6630 + */
6631 + INIT_WORK(&device_reset_work->reset_work,
6632 +- hl_device_hard_reset_pending);
6633 ++ device_hard_reset_pending);
6634 + device_reset_work->hdev = hdev;
6635 + schedule_work(&device_reset_work->reset_work);
6636 +
6637 +@@ -1035,6 +1048,15 @@ void hl_device_fini(struct hl_device *hdev)
6638 + /* Mark device as disabled */
6639 + hdev->disabled = true;
6640 +
6641 ++ /*
6642 ++ * Flush anyone that is inside the critical section of enqueue
6643 ++ * jobs to the H/W
6644 ++ */
6645 ++ hdev->asic_funcs->hw_queues_lock(hdev);
6646 ++ hdev->asic_funcs->hw_queues_unlock(hdev);
6647 ++
6648 ++ device_kill_open_processes(hdev);
6649 ++
6650 + hl_hwmon_fini(hdev);
6651 +
6652 + device_late_fini(hdev);
6653 +diff --git a/drivers/misc/habanalabs/goya/goya.c b/drivers/misc/habanalabs/goya/goya.c
6654 +index 3c509e19d69d..1533cb320540 100644
6655 +--- a/drivers/misc/habanalabs/goya/goya.c
6656 ++++ b/drivers/misc/habanalabs/goya/goya.c
6657 +@@ -4407,6 +4407,9 @@ static u64 goya_read_pte(struct hl_device *hdev, u64 addr)
6658 + {
6659 + struct goya_device *goya = hdev->asic_specific;
6660 +
6661 ++ if (hdev->hard_reset_pending)
6662 ++ return U64_MAX;
6663 ++
6664 + return readq(hdev->pcie_bar[DDR_BAR_ID] +
6665 + (addr - goya->ddr_bar_cur_addr));
6666 + }
6667 +@@ -4415,6 +4418,9 @@ static void goya_write_pte(struct hl_device *hdev, u64 addr, u64 val)
6668 + {
6669 + struct goya_device *goya = hdev->asic_specific;
6670 +
6671 ++ if (hdev->hard_reset_pending)
6672 ++ return;
6673 ++
6674 + writeq(val, hdev->pcie_bar[DDR_BAR_ID] +
6675 + (addr - goya->ddr_bar_cur_addr));
6676 + }
6677 +diff --git a/drivers/misc/habanalabs/memory.c b/drivers/misc/habanalabs/memory.c
6678 +index ce1fda40a8b8..fadaf557603f 100644
6679 +--- a/drivers/misc/habanalabs/memory.c
6680 ++++ b/drivers/misc/habanalabs/memory.c
6681 +@@ -1046,10 +1046,17 @@ static int unmap_device_va(struct hl_ctx *ctx, u64 vaddr)
6682 +
6683 + mutex_lock(&ctx->mmu_lock);
6684 +
6685 +- for (i = 0 ; i < phys_pg_pack->npages ; i++, next_vaddr += page_size)
6686 ++ for (i = 0 ; i < phys_pg_pack->npages ; i++, next_vaddr += page_size) {
6687 + if (hl_mmu_unmap(ctx, next_vaddr, page_size))
6688 + dev_warn_ratelimited(hdev->dev,
6689 +- "unmap failed for vaddr: 0x%llx\n", next_vaddr);
6690 ++ "unmap failed for vaddr: 0x%llx\n", next_vaddr);
6691 ++
6692 ++ /* unmapping on Palladium can be really long, so avoid a CPU
6693 ++ * soft lockup bug by sleeping a little between unmapping pages
6694 ++ */
6695 ++ if (hdev->pldm)
6696 ++ usleep_range(500, 1000);
6697 ++ }
6698 +
6699 + hdev->asic_funcs->mmu_invalidate_cache(hdev, true);
6700 +
6701 +diff --git a/drivers/mmc/core/pwrseq_emmc.c b/drivers/mmc/core/pwrseq_emmc.c
6702 +index efb8a7965dd4..154f4204d58c 100644
6703 +--- a/drivers/mmc/core/pwrseq_emmc.c
6704 ++++ b/drivers/mmc/core/pwrseq_emmc.c
6705 +@@ -30,19 +30,14 @@ struct mmc_pwrseq_emmc {
6706 +
6707 + #define to_pwrseq_emmc(p) container_of(p, struct mmc_pwrseq_emmc, pwrseq)
6708 +
6709 +-static void __mmc_pwrseq_emmc_reset(struct mmc_pwrseq_emmc *pwrseq)
6710 +-{
6711 +- gpiod_set_value(pwrseq->reset_gpio, 1);
6712 +- udelay(1);
6713 +- gpiod_set_value(pwrseq->reset_gpio, 0);
6714 +- udelay(200);
6715 +-}
6716 +-
6717 + static void mmc_pwrseq_emmc_reset(struct mmc_host *host)
6718 + {
6719 + struct mmc_pwrseq_emmc *pwrseq = to_pwrseq_emmc(host->pwrseq);
6720 +
6721 +- __mmc_pwrseq_emmc_reset(pwrseq);
6722 ++ gpiod_set_value_cansleep(pwrseq->reset_gpio, 1);
6723 ++ udelay(1);
6724 ++ gpiod_set_value_cansleep(pwrseq->reset_gpio, 0);
6725 ++ udelay(200);
6726 + }
6727 +
6728 + static int mmc_pwrseq_emmc_reset_nb(struct notifier_block *this,
6729 +@@ -50,8 +45,11 @@ static int mmc_pwrseq_emmc_reset_nb(struct notifier_block *this,
6730 + {
6731 + struct mmc_pwrseq_emmc *pwrseq = container_of(this,
6732 + struct mmc_pwrseq_emmc, reset_nb);
6733 ++ gpiod_set_value(pwrseq->reset_gpio, 1);
6734 ++ udelay(1);
6735 ++ gpiod_set_value(pwrseq->reset_gpio, 0);
6736 ++ udelay(200);
6737 +
6738 +- __mmc_pwrseq_emmc_reset(pwrseq);
6739 + return NOTIFY_DONE;
6740 + }
6741 +
6742 +@@ -72,14 +70,18 @@ static int mmc_pwrseq_emmc_probe(struct platform_device *pdev)
6743 + if (IS_ERR(pwrseq->reset_gpio))
6744 + return PTR_ERR(pwrseq->reset_gpio);
6745 +
6746 +- /*
6747 +- * register reset handler to ensure emmc reset also from
6748 +- * emergency_reboot(), priority 255 is the highest priority
6749 +- * so it will be executed before any system reboot handler.
6750 +- */
6751 +- pwrseq->reset_nb.notifier_call = mmc_pwrseq_emmc_reset_nb;
6752 +- pwrseq->reset_nb.priority = 255;
6753 +- register_restart_handler(&pwrseq->reset_nb);
6754 ++ if (!gpiod_cansleep(pwrseq->reset_gpio)) {
6755 ++ /*
6756 ++ * register reset handler to ensure emmc reset also from
6757 ++ * emergency_reboot(), priority 255 is the highest priority
6758 ++ * so it will be executed before any system reboot handler.
6759 ++ */
6760 ++ pwrseq->reset_nb.notifier_call = mmc_pwrseq_emmc_reset_nb;
6761 ++ pwrseq->reset_nb.priority = 255;
6762 ++ register_restart_handler(&pwrseq->reset_nb);
6763 ++ } else {
6764 ++ dev_notice(dev, "EMMC reset pin tied to a sleepy GPIO driver; reset on emergency-reboot disabled\n");
6765 ++ }
6766 +
6767 + pwrseq->pwrseq.ops = &mmc_pwrseq_emmc_ops;
6768 + pwrseq->pwrseq.dev = dev;
6769 +diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c
6770 +index 265e1aeeb9d8..d3d32f9a2cb1 100644
6771 +--- a/drivers/mmc/core/sd.c
6772 ++++ b/drivers/mmc/core/sd.c
6773 +@@ -221,6 +221,14 @@ static int mmc_decode_scr(struct mmc_card *card)
6774 +
6775 + if (scr->sda_spec3)
6776 + scr->cmds = UNSTUFF_BITS(resp, 32, 2);
6777 ++
6778 ++ /* SD Spec says: any SD Card shall set at least bits 0 and 2 */
6779 ++ if (!(scr->bus_widths & SD_SCR_BUS_WIDTH_1) ||
6780 ++ !(scr->bus_widths & SD_SCR_BUS_WIDTH_4)) {
6781 ++ pr_err("%s: invalid bus width\n", mmc_hostname(card->host));
6782 ++ return -EINVAL;
6783 ++ }
6784 ++
6785 + return 0;
6786 + }
6787 +
6788 +diff --git a/drivers/mmc/host/mmc_spi.c b/drivers/mmc/host/mmc_spi.c
6789 +index 1b1498805972..a3533935e282 100644
6790 +--- a/drivers/mmc/host/mmc_spi.c
6791 ++++ b/drivers/mmc/host/mmc_spi.c
6792 +@@ -819,6 +819,10 @@ mmc_spi_readblock(struct mmc_spi_host *host, struct spi_transfer *t,
6793 + }
6794 +
6795 + status = spi_sync_locked(spi, &host->m);
6796 ++ if (status < 0) {
6797 ++ dev_dbg(&spi->dev, "read error %d\n", status);
6798 ++ return status;
6799 ++ }
6800 +
6801 + if (host->dma_dev) {
6802 + dma_sync_single_for_cpu(host->dma_dev,
6803 +diff --git a/drivers/mmc/host/sdhci-iproc.c b/drivers/mmc/host/sdhci-iproc.c
6804 +index 9d12c06c7fd6..2feb4ef32035 100644
6805 +--- a/drivers/mmc/host/sdhci-iproc.c
6806 ++++ b/drivers/mmc/host/sdhci-iproc.c
6807 +@@ -196,7 +196,8 @@ static const struct sdhci_ops sdhci_iproc_32only_ops = {
6808 + };
6809 +
6810 + static const struct sdhci_pltfm_data sdhci_iproc_cygnus_pltfm_data = {
6811 +- .quirks = SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK,
6812 ++ .quirks = SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK |
6813 ++ SDHCI_QUIRK_NO_HISPD_BIT,
6814 + .quirks2 = SDHCI_QUIRK2_ACMD23_BROKEN | SDHCI_QUIRK2_HOST_OFF_CARD_ON,
6815 + .ops = &sdhci_iproc_32only_ops,
6816 + };
6817 +@@ -219,7 +220,8 @@ static const struct sdhci_iproc_data iproc_cygnus_data = {
6818 +
6819 + static const struct sdhci_pltfm_data sdhci_iproc_pltfm_data = {
6820 + .quirks = SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK |
6821 +- SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12,
6822 ++ SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12 |
6823 ++ SDHCI_QUIRK_NO_HISPD_BIT,
6824 + .quirks2 = SDHCI_QUIRK2_ACMD23_BROKEN,
6825 + .ops = &sdhci_iproc_ops,
6826 + };
6827 +diff --git a/drivers/mmc/host/sdhci-of-esdhc.c b/drivers/mmc/host/sdhci-of-esdhc.c
6828 +index 4e669b4edfc1..7e0eae8dafae 100644
6829 +--- a/drivers/mmc/host/sdhci-of-esdhc.c
6830 ++++ b/drivers/mmc/host/sdhci-of-esdhc.c
6831 +@@ -694,6 +694,9 @@ static void esdhc_reset(struct sdhci_host *host, u8 mask)
6832 + sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
6833 + sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
6834 +
6835 ++ if (of_find_compatible_node(NULL, NULL, "fsl,p2020-esdhc"))
6836 ++ mdelay(5);
6837 ++
6838 + if (mask & SDHCI_RESET_ALL) {
6839 + val = sdhci_readl(host, ESDHC_TBCTL);
6840 + val &= ~ESDHC_TB_EN;
6841 +@@ -1074,6 +1077,11 @@ static int sdhci_esdhc_probe(struct platform_device *pdev)
6842 + if (esdhc->vendor_ver > VENDOR_V_22)
6843 + host->quirks &= ~SDHCI_QUIRK_NO_BUSY_IRQ;
6844 +
6845 ++ if (of_find_compatible_node(NULL, NULL, "fsl,p2020-esdhc")) {
6846 ++ host->quirks2 |= SDHCI_QUIRK_RESET_AFTER_REQUEST;
6847 ++ host->quirks2 |= SDHCI_QUIRK_BROKEN_TIMEOUT_VAL;
6848 ++ }
6849 ++
6850 + if (of_device_is_compatible(np, "fsl,p5040-esdhc") ||
6851 + of_device_is_compatible(np, "fsl,p5020-esdhc") ||
6852 + of_device_is_compatible(np, "fsl,p4080-esdhc") ||
6853 +diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
6854 +index a6eacf2099c3..9b03d7e404f8 100644
6855 +--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
6856 ++++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
6857 +@@ -224,28 +224,23 @@ static int ena_setup_tx_resources(struct ena_adapter *adapter, int qid)
6858 + if (!tx_ring->tx_buffer_info) {
6859 + tx_ring->tx_buffer_info = vzalloc(size);
6860 + if (!tx_ring->tx_buffer_info)
6861 +- return -ENOMEM;
6862 ++ goto err_tx_buffer_info;
6863 + }
6864 +
6865 + size = sizeof(u16) * tx_ring->ring_size;
6866 + tx_ring->free_tx_ids = vzalloc_node(size, node);
6867 + if (!tx_ring->free_tx_ids) {
6868 + tx_ring->free_tx_ids = vzalloc(size);
6869 +- if (!tx_ring->free_tx_ids) {
6870 +- vfree(tx_ring->tx_buffer_info);
6871 +- return -ENOMEM;
6872 +- }
6873 ++ if (!tx_ring->free_tx_ids)
6874 ++ goto err_free_tx_ids;
6875 + }
6876 +
6877 + size = tx_ring->tx_max_header_size;
6878 + tx_ring->push_buf_intermediate_buf = vzalloc_node(size, node);
6879 + if (!tx_ring->push_buf_intermediate_buf) {
6880 + tx_ring->push_buf_intermediate_buf = vzalloc(size);
6881 +- if (!tx_ring->push_buf_intermediate_buf) {
6882 +- vfree(tx_ring->tx_buffer_info);
6883 +- vfree(tx_ring->free_tx_ids);
6884 +- return -ENOMEM;
6885 +- }
6886 ++ if (!tx_ring->push_buf_intermediate_buf)
6887 ++ goto err_push_buf_intermediate_buf;
6888 + }
6889 +
6890 + /* Req id ring for TX out of order completions */
6891 +@@ -259,6 +254,15 @@ static int ena_setup_tx_resources(struct ena_adapter *adapter, int qid)
6892 + tx_ring->next_to_clean = 0;
6893 + tx_ring->cpu = ena_irq->cpu;
6894 + return 0;
6895 ++
6896 ++err_push_buf_intermediate_buf:
6897 ++ vfree(tx_ring->free_tx_ids);
6898 ++ tx_ring->free_tx_ids = NULL;
6899 ++err_free_tx_ids:
6900 ++ vfree(tx_ring->tx_buffer_info);
6901 ++ tx_ring->tx_buffer_info = NULL;
6902 ++err_tx_buffer_info:
6903 ++ return -ENOMEM;
6904 + }
6905 +
6906 + /* ena_free_tx_resources - Free I/O Tx Resources per Queue
6907 +@@ -378,6 +382,7 @@ static int ena_setup_rx_resources(struct ena_adapter *adapter,
6908 + rx_ring->free_rx_ids = vzalloc(size);
6909 + if (!rx_ring->free_rx_ids) {
6910 + vfree(rx_ring->rx_buffer_info);
6911 ++ rx_ring->rx_buffer_info = NULL;
6912 + return -ENOMEM;
6913 + }
6914 + }
6915 +@@ -2292,7 +2297,7 @@ static void ena_config_host_info(struct ena_com_dev *ena_dev,
6916 + host_info->bdf = (pdev->bus->number << 8) | pdev->devfn;
6917 + host_info->os_type = ENA_ADMIN_OS_LINUX;
6918 + host_info->kernel_ver = LINUX_VERSION_CODE;
6919 +- strncpy(host_info->kernel_ver_str, utsname()->version,
6920 ++ strlcpy(host_info->kernel_ver_str, utsname()->version,
6921 + sizeof(host_info->kernel_ver_str) - 1);
6922 + host_info->os_dist = 0;
6923 + strncpy(host_info->os_dist_str, utsname()->release,
6924 +diff --git a/drivers/net/ethernet/chelsio/cxgb3/l2t.h b/drivers/net/ethernet/chelsio/cxgb3/l2t.h
6925 +index c2fd323c4078..ea75f275023f 100644
6926 +--- a/drivers/net/ethernet/chelsio/cxgb3/l2t.h
6927 ++++ b/drivers/net/ethernet/chelsio/cxgb3/l2t.h
6928 +@@ -75,8 +75,8 @@ struct l2t_data {
6929 + struct l2t_entry *rover; /* starting point for next allocation */
6930 + atomic_t nfree; /* number of free entries */
6931 + rwlock_t lock;
6932 +- struct l2t_entry l2tab[0];
6933 + struct rcu_head rcu_head; /* to handle rcu cleanup */
6934 ++ struct l2t_entry l2tab[];
6935 + };
6936 +
6937 + typedef void (*arp_failure_handler_func)(struct t3cdev * dev,
6938 +diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
6939 +index 89179e316687..4bc0c357cb8e 100644
6940 +--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
6941 ++++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
6942 +@@ -6161,15 +6161,24 @@ static int __init cxgb4_init_module(void)
6943 +
6944 + ret = pci_register_driver(&cxgb4_driver);
6945 + if (ret < 0)
6946 +- debugfs_remove(cxgb4_debugfs_root);
6947 ++ goto err_pci;
6948 +
6949 + #if IS_ENABLED(CONFIG_IPV6)
6950 + if (!inet6addr_registered) {
6951 +- register_inet6addr_notifier(&cxgb4_inet6addr_notifier);
6952 +- inet6addr_registered = true;
6953 ++ ret = register_inet6addr_notifier(&cxgb4_inet6addr_notifier);
6954 ++ if (ret)
6955 ++ pci_unregister_driver(&cxgb4_driver);
6956 ++ else
6957 ++ inet6addr_registered = true;
6958 + }
6959 + #endif
6960 +
6961 ++ if (ret == 0)
6962 ++ return ret;
6963 ++
6964 ++err_pci:
6965 ++ debugfs_remove(cxgb4_debugfs_root);
6966 ++
6967 + return ret;
6968 + }
6969 +
6970 +diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
6971 +index dc339dc1adb2..57cbaa38d247 100644
6972 +--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
6973 ++++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
6974 +@@ -2796,6 +2796,7 @@ int dpaa2_eth_set_hash(struct net_device *net_dev, u64 flags)
6975 + static int dpaa2_eth_set_cls(struct dpaa2_eth_priv *priv)
6976 + {
6977 + struct device *dev = priv->net_dev->dev.parent;
6978 ++ int err;
6979 +
6980 + /* Check if we actually support Rx flow classification */
6981 + if (dpaa2_eth_has_legacy_dist(priv)) {
6982 +@@ -2814,9 +2815,13 @@ static int dpaa2_eth_set_cls(struct dpaa2_eth_priv *priv)
6983 + return -EOPNOTSUPP;
6984 + }
6985 +
6986 ++ err = dpaa2_eth_set_dist_key(priv->net_dev, DPAA2_ETH_RX_DIST_CLS, 0);
6987 ++ if (err)
6988 ++ return err;
6989 ++
6990 + priv->rx_cls_enabled = 1;
6991 +
6992 +- return dpaa2_eth_set_dist_key(priv->net_dev, DPAA2_ETH_RX_DIST_CLS, 0);
6993 ++ return 0;
6994 + }
6995 +
6996 + /* Bind the DPNI to its needed objects and resources: buffer pool, DPIOs,
6997 +diff --git a/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h
6998 +index 299b277bc7ae..589b7ee32bff 100644
6999 +--- a/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h
7000 ++++ b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h
7001 +@@ -107,7 +107,7 @@ struct hclgevf_mbx_arq_ring {
7002 + struct hclgevf_dev *hdev;
7003 + u32 head;
7004 + u32 tail;
7005 +- u32 count;
7006 ++ atomic_t count;
7007 + u16 msg_q[HCLGE_MBX_MAX_ARQ_MSG_NUM][HCLGE_MBX_MAX_ARQ_MSG_SIZE];
7008 + };
7009 +
7010 +diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
7011 +index 162cb9afa0e7..c7d310903319 100644
7012 +--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
7013 ++++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
7014 +@@ -2705,7 +2705,7 @@ int hns3_clean_rx_ring(
7015 + #define RCB_NOF_ALLOC_RX_BUFF_ONCE 16
7016 + struct net_device *netdev = ring->tqp->handle->kinfo.netdev;
7017 + int recv_pkts, recv_bds, clean_count, err;
7018 +- int unused_count = hns3_desc_unused(ring) - ring->pending_buf;
7019 ++ int unused_count = hns3_desc_unused(ring);
7020 + struct sk_buff *skb = ring->skb;
7021 + int num;
7022 +
7023 +@@ -2714,6 +2714,7 @@ int hns3_clean_rx_ring(
7024 +
7025 + recv_pkts = 0, recv_bds = 0, clean_count = 0;
7026 + num -= unused_count;
7027 ++ unused_count -= ring->pending_buf;
7028 +
7029 + while (recv_pkts < budget && recv_bds < num) {
7030 + /* Reuse or realloc buffers */
7031 +@@ -3773,12 +3774,13 @@ static int hns3_recover_hw_addr(struct net_device *ndev)
7032 + struct netdev_hw_addr *ha, *tmp;
7033 + int ret = 0;
7034 +
7035 ++ netif_addr_lock_bh(ndev);
7036 + /* go through and sync uc_addr entries to the device */
7037 + list = &ndev->uc;
7038 + list_for_each_entry_safe(ha, tmp, &list->list, list) {
7039 + ret = hns3_nic_uc_sync(ndev, ha->addr);
7040 + if (ret)
7041 +- return ret;
7042 ++ goto out;
7043 + }
7044 +
7045 + /* go through and sync mc_addr entries to the device */
7046 +@@ -3786,9 +3788,11 @@ static int hns3_recover_hw_addr(struct net_device *ndev)
7047 + list_for_each_entry_safe(ha, tmp, &list->list, list) {
7048 + ret = hns3_nic_mc_sync(ndev, ha->addr);
7049 + if (ret)
7050 +- return ret;
7051 ++ goto out;
7052 + }
7053 +
7054 ++out:
7055 ++ netif_addr_unlock_bh(ndev);
7056 + return ret;
7057 + }
7058 +
7059 +@@ -3799,6 +3803,7 @@ static void hns3_remove_hw_addr(struct net_device *netdev)
7060 +
7061 + hns3_nic_uc_unsync(netdev, netdev->dev_addr);
7062 +
7063 ++ netif_addr_lock_bh(netdev);
7064 + /* go through and unsync uc_addr entries to the device */
7065 + list = &netdev->uc;
7066 + list_for_each_entry_safe(ha, tmp, &list->list, list)
7067 +@@ -3809,6 +3814,8 @@ static void hns3_remove_hw_addr(struct net_device *netdev)
7068 + list_for_each_entry_safe(ha, tmp, &list->list, list)
7069 + if (ha->refcount > 1)
7070 + hns3_nic_mc_unsync(netdev, ha->addr);
7071 ++
7072 ++ netif_addr_unlock_bh(netdev);
7073 + }
7074 +
7075 + static void hns3_clear_tx_ring(struct hns3_enet_ring *ring)
7076 +@@ -3850,6 +3857,13 @@ static int hns3_clear_rx_ring(struct hns3_enet_ring *ring)
7077 + ring_ptr_move_fw(ring, next_to_use);
7078 + }
7079 +
7080 ++ /* Free the pending skb in rx ring */
7081 ++ if (ring->skb) {
7082 ++ dev_kfree_skb_any(ring->skb);
7083 ++ ring->skb = NULL;
7084 ++ ring->pending_buf = 0;
7085 ++ }
7086 ++
7087 + return 0;
7088 + }
7089 +
7090 +diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
7091 +index 359d4731fb2d..ea94b5152963 100644
7092 +--- a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
7093 ++++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
7094 +@@ -483,6 +483,11 @@ static void hns3_get_stats(struct net_device *netdev,
7095 + struct hnae3_handle *h = hns3_get_handle(netdev);
7096 + u64 *p = data;
7097 +
7098 ++ if (hns3_nic_resetting(netdev)) {
7099 ++ netdev_err(netdev, "dev resetting, could not get stats\n");
7100 ++ return;
7101 ++ }
7102 ++
7103 + if (!h->ae_algo->ops->get_stats || !h->ae_algo->ops->update_stats) {
7104 + netdev_err(netdev, "could not get any statistics\n");
7105 + return;
7106 +diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c
7107 +index 3a093a92eac5..d92e4af11b1f 100644
7108 +--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c
7109 ++++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c
7110 +@@ -373,21 +373,26 @@ int hclge_cmd_init(struct hclge_dev *hdev)
7111 + * reset may happen when lower level reset is being processed.
7112 + */
7113 + if ((hclge_is_reset_pending(hdev))) {
7114 +- set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
7115 +- return -EBUSY;
7116 ++ ret = -EBUSY;
7117 ++ goto err_cmd_init;
7118 + }
7119 +
7120 + ret = hclge_cmd_query_firmware_version(&hdev->hw, &version);
7121 + if (ret) {
7122 + dev_err(&hdev->pdev->dev,
7123 + "firmware version query failed %d\n", ret);
7124 +- return ret;
7125 ++ goto err_cmd_init;
7126 + }
7127 + hdev->fw_version = version;
7128 +
7129 + dev_info(&hdev->pdev->dev, "The firmware version is %08x\n", version);
7130 +
7131 + return 0;
7132 ++
7133 ++err_cmd_init:
7134 ++ set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
7135 ++
7136 ++ return ret;
7137 + }
7138 +
7139 + static void hclge_cmd_uninit_regs(struct hclge_hw *hw)
7140 +diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
7141 +index aafc69f4bfdd..a7bbb6d3091a 100644
7142 +--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
7143 ++++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
7144 +@@ -1331,8 +1331,11 @@ int hclge_pause_setup_hw(struct hclge_dev *hdev, bool init)
7145 + ret = hclge_pfc_setup_hw(hdev);
7146 + if (init && ret == -EOPNOTSUPP)
7147 + dev_warn(&hdev->pdev->dev, "GE MAC does not support pfc\n");
7148 +- else
7149 ++ else if (ret) {
7150 ++ dev_err(&hdev->pdev->dev, "config pfc failed! ret = %d\n",
7151 ++ ret);
7152 + return ret;
7153 ++ }
7154 +
7155 + return hclge_tm_bp_setup(hdev);
7156 + }
7157 +diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c
7158 +index 9441b453d38d..382ecb15e743 100644
7159 +--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c
7160 ++++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c
7161 +@@ -327,7 +327,7 @@ int hclgevf_cmd_init(struct hclgevf_dev *hdev)
7162 + hdev->arq.hdev = hdev;
7163 + hdev->arq.head = 0;
7164 + hdev->arq.tail = 0;
7165 +- hdev->arq.count = 0;
7166 ++ atomic_set(&hdev->arq.count, 0);
7167 + hdev->hw.cmq.csq.next_to_clean = 0;
7168 + hdev->hw.cmq.csq.next_to_use = 0;
7169 + hdev->hw.cmq.crq.next_to_clean = 0;
7170 +@@ -344,8 +344,8 @@ int hclgevf_cmd_init(struct hclgevf_dev *hdev)
7171 + * reset may happen when lower level reset is being processed.
7172 + */
7173 + if (hclgevf_is_reset_pending(hdev)) {
7174 +- set_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state);
7175 +- return -EBUSY;
7176 ++ ret = -EBUSY;
7177 ++ goto err_cmd_init;
7178 + }
7179 +
7180 + /* get firmware version */
7181 +@@ -353,13 +353,18 @@ int hclgevf_cmd_init(struct hclgevf_dev *hdev)
7182 + if (ret) {
7183 + dev_err(&hdev->pdev->dev,
7184 + "failed(%d) to query firmware version\n", ret);
7185 +- return ret;
7186 ++ goto err_cmd_init;
7187 + }
7188 + hdev->fw_version = version;
7189 +
7190 + dev_info(&hdev->pdev->dev, "The firmware version is %08x\n", version);
7191 +
7192 + return 0;
7193 ++
7194 ++err_cmd_init:
7195 ++ set_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state);
7196 ++
7197 ++ return ret;
7198 + }
7199 +
7200 + static void hclgevf_cmd_uninit_regs(struct hclgevf_hw *hw)
7201 +diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
7202 +index 8bc28e6f465f..8dd7fef863f6 100644
7203 +--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
7204 ++++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
7205 +@@ -2007,9 +2007,15 @@ static int hclgevf_set_alive(struct hnae3_handle *handle, bool alive)
7206 + static int hclgevf_client_start(struct hnae3_handle *handle)
7207 + {
7208 + struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
7209 ++ int ret;
7210 ++
7211 ++ ret = hclgevf_set_alive(handle, true);
7212 ++ if (ret)
7213 ++ return ret;
7214 +
7215 + mod_timer(&hdev->keep_alive_timer, jiffies + 2 * HZ);
7216 +- return hclgevf_set_alive(handle, true);
7217 ++
7218 ++ return 0;
7219 + }
7220 +
7221 + static void hclgevf_client_stop(struct hnae3_handle *handle)
7222 +@@ -2051,6 +2057,10 @@ static void hclgevf_state_uninit(struct hclgevf_dev *hdev)
7223 + {
7224 + set_bit(HCLGEVF_STATE_DOWN, &hdev->state);
7225 +
7226 ++ if (hdev->keep_alive_timer.function)
7227 ++ del_timer_sync(&hdev->keep_alive_timer);
7228 ++ if (hdev->keep_alive_task.func)
7229 ++ cancel_work_sync(&hdev->keep_alive_task);
7230 + if (hdev->service_timer.function)
7231 + del_timer_sync(&hdev->service_timer);
7232 + if (hdev->service_task.func)
7233 +diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c
7234 +index 7dc3c9f79169..4f2c77283cb4 100644
7235 +--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c
7236 ++++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c
7237 +@@ -208,7 +208,8 @@ void hclgevf_mbx_handler(struct hclgevf_dev *hdev)
7238 + /* we will drop the async msg if we find ARQ as full
7239 + * and continue with next message
7240 + */
7241 +- if (hdev->arq.count >= HCLGE_MBX_MAX_ARQ_MSG_NUM) {
7242 ++ if (atomic_read(&hdev->arq.count) >=
7243 ++ HCLGE_MBX_MAX_ARQ_MSG_NUM) {
7244 + dev_warn(&hdev->pdev->dev,
7245 + "Async Q full, dropping msg(%d)\n",
7246 + req->msg[1]);
7247 +@@ -220,7 +221,7 @@ void hclgevf_mbx_handler(struct hclgevf_dev *hdev)
7248 + memcpy(&msg_q[0], req->msg,
7249 + HCLGE_MBX_MAX_ARQ_MSG_SIZE * sizeof(u16));
7250 + hclge_mbx_tail_ptr_move_arq(hdev->arq);
7251 +- hdev->arq.count++;
7252 ++ atomic_inc(&hdev->arq.count);
7253 +
7254 + hclgevf_mbx_task_schedule(hdev);
7255 +
7256 +@@ -308,7 +309,7 @@ void hclgevf_mbx_async_handler(struct hclgevf_dev *hdev)
7257 + }
7258 +
7259 + hclge_mbx_head_ptr_move_arq(hdev->arq);
7260 +- hdev->arq.count--;
7261 ++ atomic_dec(&hdev->arq.count);
7262 + msg_q = hdev->arq.msg_q[hdev->arq.head];
7263 + }
7264 + }
7265 +diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
7266 +index 7acc61e4f645..c10c9d7eadaa 100644
7267 +--- a/drivers/net/ethernet/intel/e1000e/netdev.c
7268 ++++ b/drivers/net/ethernet/intel/e1000e/netdev.c
7269 +@@ -7350,7 +7350,7 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
7270 +
7271 + dev_pm_set_driver_flags(&pdev->dev, DPM_FLAG_NEVER_SKIP);
7272 +
7273 +- if (pci_dev_run_wake(pdev))
7274 ++ if (pci_dev_run_wake(pdev) && hw->mac.type < e1000_pch_cnp)
7275 + pm_runtime_put_noidle(&pdev->dev);
7276 +
7277 + return 0;
7278 +diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
7279 +index b1c265012c8a..ac9fcb097689 100644
7280 +--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
7281 ++++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
7282 +@@ -2654,6 +2654,10 @@ void i40e_vlan_stripping_enable(struct i40e_vsi *vsi)
7283 + struct i40e_vsi_context ctxt;
7284 + i40e_status ret;
7285 +
7286 ++ /* Don't modify stripping options if a port VLAN is active */
7287 ++ if (vsi->info.pvid)
7288 ++ return;
7289 ++
7290 + if ((vsi->info.valid_sections &
7291 + cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID)) &&
7292 + ((vsi->info.port_vlan_flags & I40E_AQ_VSI_PVLAN_MODE_MASK) == 0))
7293 +@@ -2684,6 +2688,10 @@ void i40e_vlan_stripping_disable(struct i40e_vsi *vsi)
7294 + struct i40e_vsi_context ctxt;
7295 + i40e_status ret;
7296 +
7297 ++ /* Don't modify stripping options if a port VLAN is active */
7298 ++ if (vsi->info.pvid)
7299 ++ return;
7300 ++
7301 + if ((vsi->info.valid_sections &
7302 + cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID)) &&
7303 + ((vsi->info.port_vlan_flags & I40E_AQ_VSI_PVLAN_EMOD_MASK) ==
7304 +diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
7305 +index 831d52bc3c9a..2b0362c827e9 100644
7306 +--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
7307 ++++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
7308 +@@ -181,7 +181,7 @@ static inline bool i40e_vc_isvalid_vsi_id(struct i40e_vf *vf, u16 vsi_id)
7309 + * check for the valid queue id
7310 + **/
7311 + static inline bool i40e_vc_isvalid_queue_id(struct i40e_vf *vf, u16 vsi_id,
7312 +- u8 qid)
7313 ++ u16 qid)
7314 + {
7315 + struct i40e_pf *pf = vf->pf;
7316 + struct i40e_vsi *vsi = i40e_find_vsi_from_id(pf, vsi_id);
7317 +@@ -2454,8 +2454,10 @@ error_param:
7318 + (u8 *)&stats, sizeof(stats));
7319 + }
7320 +
7321 +-/* If the VF is not trusted restrict the number of MAC/VLAN it can program */
7322 +-#define I40E_VC_MAX_MAC_ADDR_PER_VF 12
7323 ++/* If the VF is not trusted restrict the number of MAC/VLAN it can program
7324 ++ * MAC filters: 16 for multicast, 1 for MAC, 1 for broadcast
7325 ++ */
7326 ++#define I40E_VC_MAX_MAC_ADDR_PER_VF (16 + 1 + 1)
7327 + #define I40E_VC_MAX_VLAN_PER_VF 8
7328 +
7329 + /**
7330 +@@ -3374,7 +3376,7 @@ static int i40e_vc_add_cloud_filter(struct i40e_vf *vf, u8 *msg)
7331 +
7332 + if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
7333 + aq_ret = I40E_ERR_PARAM;
7334 +- goto err;
7335 ++ goto err_out;
7336 + }
7337 +
7338 + if (!vf->adq_enabled) {
7339 +@@ -3382,7 +3384,7 @@ static int i40e_vc_add_cloud_filter(struct i40e_vf *vf, u8 *msg)
7340 + "VF %d: ADq is not enabled, can't apply cloud filter\n",
7341 + vf->vf_id);
7342 + aq_ret = I40E_ERR_PARAM;
7343 +- goto err;
7344 ++ goto err_out;
7345 + }
7346 +
7347 + if (i40e_validate_cloud_filter(vf, vcf)) {
7348 +@@ -3390,7 +3392,7 @@ static int i40e_vc_add_cloud_filter(struct i40e_vf *vf, u8 *msg)
7349 + "VF %d: Invalid input/s, can't apply cloud filter\n",
7350 + vf->vf_id);
7351 + aq_ret = I40E_ERR_PARAM;
7352 +- goto err;
7353 ++ goto err_out;
7354 + }
7355 +
7356 + cfilter = kzalloc(sizeof(*cfilter), GFP_KERNEL);
7357 +@@ -3451,13 +3453,17 @@ static int i40e_vc_add_cloud_filter(struct i40e_vf *vf, u8 *msg)
7358 + "VF %d: Failed to add cloud filter, err %s aq_err %s\n",
7359 + vf->vf_id, i40e_stat_str(&pf->hw, ret),
7360 + i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
7361 +- goto err;
7362 ++ goto err_free;
7363 + }
7364 +
7365 + INIT_HLIST_NODE(&cfilter->cloud_node);
7366 + hlist_add_head(&cfilter->cloud_node, &vf->cloud_filter_list);
7367 ++ /* release the pointer passing it to the collection */
7368 ++ cfilter = NULL;
7369 + vf->num_cloud_filters++;
7370 +-err:
7371 ++err_free:
7372 ++ kfree(cfilter);
7373 ++err_out:
7374 + return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ADD_CLOUD_FILTER,
7375 + aq_ret);
7376 + }
7377 +diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h
7378 +index 89440775aea1..6af5bd5883ca 100644
7379 +--- a/drivers/net/ethernet/intel/ice/ice.h
7380 ++++ b/drivers/net/ethernet/intel/ice/ice.h
7381 +@@ -277,6 +277,7 @@ struct ice_q_vector {
7382 + * value to the device
7383 + */
7384 + u8 intrl;
7385 ++ u8 itr_countdown; /* when 0 should adjust adaptive ITR */
7386 + } ____cacheline_internodealigned_in_smp;
7387 +
7388 + enum ice_pf_flags {
7389 +diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c
7390 +index fa61203bee26..b710545cf7d1 100644
7391 +--- a/drivers/net/ethernet/intel/ice/ice_lib.c
7392 ++++ b/drivers/net/ethernet/intel/ice/ice_lib.c
7393 +@@ -1848,6 +1848,10 @@ int ice_vsi_manage_vlan_insertion(struct ice_vsi *vsi)
7394 + */
7395 + ctxt->info.vlan_flags = ICE_AQ_VSI_VLAN_MODE_ALL;
7396 +
7397 ++ /* Preserve existing VLAN strip setting */
7398 ++ ctxt->info.vlan_flags |= (vsi->info.vlan_flags &
7399 ++ ICE_AQ_VSI_VLAN_EMOD_M);
7400 ++
7401 + ctxt->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID);
7402 +
7403 + status = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
7404 +diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
7405 +index 47cc3f905b7f..6ec73864019c 100644
7406 +--- a/drivers/net/ethernet/intel/ice/ice_main.c
7407 ++++ b/drivers/net/ethernet/intel/ice/ice_main.c
7408 +@@ -342,6 +342,10 @@ ice_prepare_for_reset(struct ice_pf *pf)
7409 + {
7410 + struct ice_hw *hw = &pf->hw;
7411 +
7412 ++ /* already prepared for reset */
7413 ++ if (test_bit(__ICE_PREPARED_FOR_RESET, pf->state))
7414 ++ return;
7415 ++
7416 + /* Notify VFs of impending reset */
7417 + if (ice_check_sq_alive(hw, &hw->mailboxq))
7418 + ice_vc_notify_reset(pf);
7419 +@@ -416,10 +420,15 @@ static void ice_reset_subtask(struct ice_pf *pf)
7420 + * for the reset now), poll for reset done, rebuild and return.
7421 + */
7422 + if (test_bit(__ICE_RESET_OICR_RECV, pf->state)) {
7423 +- clear_bit(__ICE_GLOBR_RECV, pf->state);
7424 +- clear_bit(__ICE_CORER_RECV, pf->state);
7425 +- if (!test_bit(__ICE_PREPARED_FOR_RESET, pf->state))
7426 +- ice_prepare_for_reset(pf);
7427 ++ /* Perform the largest reset requested */
7428 ++ if (test_and_clear_bit(__ICE_CORER_RECV, pf->state))
7429 ++ reset_type = ICE_RESET_CORER;
7430 ++ if (test_and_clear_bit(__ICE_GLOBR_RECV, pf->state))
7431 ++ reset_type = ICE_RESET_GLOBR;
7432 ++ /* return if no valid reset type requested */
7433 ++ if (reset_type == ICE_RESET_INVAL)
7434 ++ return;
7435 ++ ice_prepare_for_reset(pf);
7436 +
7437 + /* make sure we are ready to rebuild */
7438 + if (ice_check_reset(&pf->hw)) {
7439 +@@ -2545,6 +2554,9 @@ static int ice_set_features(struct net_device *netdev,
7440 + struct ice_vsi *vsi = np->vsi;
7441 + int ret = 0;
7442 +
7443 ++ /* Multiple features can be changed in one call so keep features in
7444 ++ * separate if/else statements to guarantee each feature is checked
7445 ++ */
7446 + if (features & NETIF_F_RXHASH && !(netdev->features & NETIF_F_RXHASH))
7447 + ret = ice_vsi_manage_rss_lut(vsi, true);
7448 + else if (!(features & NETIF_F_RXHASH) &&
7449 +@@ -2557,8 +2569,9 @@ static int ice_set_features(struct net_device *netdev,
7450 + else if (!(features & NETIF_F_HW_VLAN_CTAG_RX) &&
7451 + (netdev->features & NETIF_F_HW_VLAN_CTAG_RX))
7452 + ret = ice_vsi_manage_vlan_stripping(vsi, false);
7453 +- else if ((features & NETIF_F_HW_VLAN_CTAG_TX) &&
7454 +- !(netdev->features & NETIF_F_HW_VLAN_CTAG_TX))
7455 ++
7456 ++ if ((features & NETIF_F_HW_VLAN_CTAG_TX) &&
7457 ++ !(netdev->features & NETIF_F_HW_VLAN_CTAG_TX))
7458 + ret = ice_vsi_manage_vlan_insertion(vsi);
7459 + else if (!(features & NETIF_F_HW_VLAN_CTAG_TX) &&
7460 + (netdev->features & NETIF_F_HW_VLAN_CTAG_TX))
7461 +diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c
7462 +index c289d97f477d..851030ad5016 100644
7463 +--- a/drivers/net/ethernet/intel/ice/ice_txrx.c
7464 ++++ b/drivers/net/ethernet/intel/ice/ice_txrx.c
7465 +@@ -1048,18 +1048,257 @@ static int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget)
7466 + return failure ? budget : (int)total_rx_pkts;
7467 + }
7468 +
7469 ++static unsigned int ice_itr_divisor(struct ice_port_info *pi)
7470 ++{
7471 ++ switch (pi->phy.link_info.link_speed) {
7472 ++ case ICE_AQ_LINK_SPEED_40GB:
7473 ++ return ICE_ITR_ADAPTIVE_MIN_INC * 1024;
7474 ++ case ICE_AQ_LINK_SPEED_25GB:
7475 ++ case ICE_AQ_LINK_SPEED_20GB:
7476 ++ return ICE_ITR_ADAPTIVE_MIN_INC * 512;
7477 ++ case ICE_AQ_LINK_SPEED_100MB:
7478 ++ return ICE_ITR_ADAPTIVE_MIN_INC * 32;
7479 ++ default:
7480 ++ return ICE_ITR_ADAPTIVE_MIN_INC * 256;
7481 ++ }
7482 ++}
7483 ++
7484 ++/**
7485 ++ * ice_update_itr - update the adaptive ITR value based on statistics
7486 ++ * @q_vector: structure containing interrupt and ring information
7487 ++ * @rc: structure containing ring performance data
7488 ++ *
7489 ++ * Stores a new ITR value based on packets and byte
7490 ++ * counts during the last interrupt. The advantage of per interrupt
7491 ++ * computation is faster updates and more accurate ITR for the current
7492 ++ * traffic pattern. Constants in this function were computed
7493 ++ * based on theoretical maximum wire speed and thresholds were set based
7494 ++ * on testing data as well as attempting to minimize response time
7495 ++ * while increasing bulk throughput.
7496 ++ */
7497 ++static void
7498 ++ice_update_itr(struct ice_q_vector *q_vector, struct ice_ring_container *rc)
7499 ++{
7500 ++ unsigned int avg_wire_size, packets, bytes, itr;
7501 ++ unsigned long next_update = jiffies;
7502 ++ bool container_is_rx;
7503 ++
7504 ++ if (!rc->ring || !ITR_IS_DYNAMIC(rc->itr_setting))
7505 ++ return;
7506 ++
7507 ++ /* If itr_countdown is set it means we programmed an ITR within
7508 ++ * the last 4 interrupt cycles. This has a side effect of us
7509 ++ * potentially firing an early interrupt. In order to work around
7510 ++ * this we need to throw out any data received for a few
7511 ++ * interrupts following the update.
7512 ++ */
7513 ++ if (q_vector->itr_countdown) {
7514 ++ itr = rc->target_itr;
7515 ++ goto clear_counts;
7516 ++ }
7517 ++
7518 ++ container_is_rx = (&q_vector->rx == rc);
7519 ++ /* For Rx we want to push the delay up and default to low latency.
7520 ++ * for Tx we want to pull the delay down and default to high latency.
7521 ++ */
7522 ++ itr = container_is_rx ?
7523 ++ ICE_ITR_ADAPTIVE_MIN_USECS | ICE_ITR_ADAPTIVE_LATENCY :
7524 ++ ICE_ITR_ADAPTIVE_MAX_USECS | ICE_ITR_ADAPTIVE_LATENCY;
7525 ++
7526 ++ /* If we didn't update within up to 1 - 2 jiffies we can assume
7527 ++ * that either packets are coming in so slow there hasn't been
7528 ++ * any work, or that there is so much work that NAPI is dealing
7529 ++ * with interrupt moderation and we don't need to do anything.
7530 ++ */
7531 ++ if (time_after(next_update, rc->next_update))
7532 ++ goto clear_counts;
7533 ++
7534 ++ packets = rc->total_pkts;
7535 ++ bytes = rc->total_bytes;
7536 ++
7537 ++ if (container_is_rx) {
7538 ++ /* If Rx there are 1 to 4 packets and bytes are less than
7539 ++ * 9000 assume insufficient data to use bulk rate limiting
7540 ++ * approach unless Tx is already in bulk rate limiting. We
7541 ++ * are likely latency driven.
7542 ++ */
7543 ++ if (packets && packets < 4 && bytes < 9000 &&
7544 ++ (q_vector->tx.target_itr & ICE_ITR_ADAPTIVE_LATENCY)) {
7545 ++ itr = ICE_ITR_ADAPTIVE_LATENCY;
7546 ++ goto adjust_by_size;
7547 ++ }
7548 ++ } else if (packets < 4) {
7549 ++ /* If we have Tx and Rx ITR maxed and Tx ITR is running in
7550 ++ * bulk mode and we are receiving 4 or fewer packets just
7551 ++ * reset the ITR_ADAPTIVE_LATENCY bit for latency mode so
7552 ++ * that the Rx can relax.
7553 ++ */
7554 ++ if (rc->target_itr == ICE_ITR_ADAPTIVE_MAX_USECS &&
7555 ++ (q_vector->rx.target_itr & ICE_ITR_MASK) ==
7556 ++ ICE_ITR_ADAPTIVE_MAX_USECS)
7557 ++ goto clear_counts;
7558 ++ } else if (packets > 32) {
7559 ++ /* If we have processed over 32 packets in a single interrupt
7560 ++ * for Tx assume we need to switch over to "bulk" mode.
7561 ++ */
7562 ++ rc->target_itr &= ~ICE_ITR_ADAPTIVE_LATENCY;
7563 ++ }
7564 ++
7565 ++ /* We have no packets to actually measure against. This means
7566 ++ * either one of the other queues on this vector is active or
7567 ++ * we are a Tx queue doing TSO with too high of an interrupt rate.
7568 ++ *
7569 ++ * Between 4 and 56 we can assume that our current interrupt delay
7570 ++ * is only slightly too low. As such we should increase it by a small
7571 ++ * fixed amount.
7572 ++ */
7573 ++ if (packets < 56) {
7574 ++ itr = rc->target_itr + ICE_ITR_ADAPTIVE_MIN_INC;
7575 ++ if ((itr & ICE_ITR_MASK) > ICE_ITR_ADAPTIVE_MAX_USECS) {
7576 ++ itr &= ICE_ITR_ADAPTIVE_LATENCY;
7577 ++ itr += ICE_ITR_ADAPTIVE_MAX_USECS;
7578 ++ }
7579 ++ goto clear_counts;
7580 ++ }
7581 ++
7582 ++ if (packets <= 256) {
7583 ++ itr = min(q_vector->tx.current_itr, q_vector->rx.current_itr);
7584 ++ itr &= ICE_ITR_MASK;
7585 ++
7586 ++ /* Between 56 and 112 is our "goldilocks" zone where we are
7587 ++ * working out "just right". Just report that our current
7588 ++ * ITR is good for us.
7589 ++ */
7590 ++ if (packets <= 112)
7591 ++ goto clear_counts;
7592 ++
7593 ++ /* If packet count is 128 or greater we are likely looking
7594 ++ * at a slight overrun of the delay we want. Try halving
7595 ++ * our delay to see if that will cut the number of packets
7596 ++ * in half per interrupt.
7597 ++ */
7598 ++ itr >>= 1;
7599 ++ itr &= ICE_ITR_MASK;
7600 ++ if (itr < ICE_ITR_ADAPTIVE_MIN_USECS)
7601 ++ itr = ICE_ITR_ADAPTIVE_MIN_USECS;
7602 ++
7603 ++ goto clear_counts;
7604 ++ }
7605 ++
7606 ++ /* The paths below assume we are dealing with a bulk ITR since
7607 ++ * number of packets is greater than 256. We are just going to have
7608 ++ * to compute a value and try to bring the count under control,
7609 ++ * though for smaller packet sizes there isn't much we can do as
7610 ++ * NAPI polling will likely be kicking in sooner rather than later.
7611 ++ */
7612 ++ itr = ICE_ITR_ADAPTIVE_BULK;
7613 ++
7614 ++adjust_by_size:
7615 ++ /* If packet counts are 256 or greater we can assume we have a gross
7616 ++ * overestimation of what the rate should be. Instead of trying to fine
7617 ++ * tune it just use the formula below to try and dial in an exact value
7618 ++ * gives the current packet size of the frame.
7619 ++ */
7620 ++ avg_wire_size = bytes / packets;
7621 ++
7622 ++ /* The following is a crude approximation of:
7623 ++ * wmem_default / (size + overhead) = desired_pkts_per_int
7624 ++ * rate / bits_per_byte / (size + ethernet overhead) = pkt_rate
7625 ++ * (desired_pkt_rate / pkt_rate) * usecs_per_sec = ITR value
7626 ++ *
7627 ++ * Assuming wmem_default is 212992 and overhead is 640 bytes per
7628 ++ * packet, (256 skb, 64 headroom, 320 shared info), we can reduce the
7629 ++ * formula down to
7630 ++ *
7631 ++ * (170 * (size + 24)) / (size + 640) = ITR
7632 ++ *
7633 ++ * We first do some math on the packet size and then finally bitshift
7634 ++ * by 8 after rounding up. We also have to account for PCIe link speed
7635 ++ * difference as ITR scales based on this.
7636 ++ */
7637 ++ if (avg_wire_size <= 60) {
7638 ++ /* Start at 250k ints/sec */
7639 ++ avg_wire_size = 4096;
7640 ++ } else if (avg_wire_size <= 380) {
7641 ++ /* 250K ints/sec to 60K ints/sec */
7642 ++ avg_wire_size *= 40;
7643 ++ avg_wire_size += 1696;
7644 ++ } else if (avg_wire_size <= 1084) {
7645 ++ /* 60K ints/sec to 36K ints/sec */
7646 ++ avg_wire_size *= 15;
7647 ++ avg_wire_size += 11452;
7648 ++ } else if (avg_wire_size <= 1980) {
7649 ++ /* 36K ints/sec to 30K ints/sec */
7650 ++ avg_wire_size *= 5;
7651 ++ avg_wire_size += 22420;
7652 ++ } else {
7653 ++ /* plateau at a limit of 30K ints/sec */
7654 ++ avg_wire_size = 32256;
7655 ++ }
7656 ++
7657 ++ /* If we are in low latency mode halve our delay which doubles the
7658 ++ * rate to somewhere between 100K to 16K ints/sec
7659 ++ */
7660 ++ if (itr & ICE_ITR_ADAPTIVE_LATENCY)
7661 ++ avg_wire_size >>= 1;
7662 ++
7663 ++ /* Resultant value is 256 times larger than it needs to be. This
7664 ++ * gives us room to adjust the value as needed to either increase
7665 ++ * or decrease the value based on link speeds of 10G, 2.5G, 1G, etc.
7666 ++ *
7667 ++ * Use addition as we have already recorded the new latency flag
7668 ++ * for the ITR value.
7669 ++ */
7670 ++ itr += DIV_ROUND_UP(avg_wire_size,
7671 ++ ice_itr_divisor(q_vector->vsi->port_info)) *
7672 ++ ICE_ITR_ADAPTIVE_MIN_INC;
7673 ++
7674 ++ if ((itr & ICE_ITR_MASK) > ICE_ITR_ADAPTIVE_MAX_USECS) {
7675 ++ itr &= ICE_ITR_ADAPTIVE_LATENCY;
7676 ++ itr += ICE_ITR_ADAPTIVE_MAX_USECS;
7677 ++ }
7678 ++
7679 ++clear_counts:
7680 ++ /* write back value */
7681 ++ rc->target_itr = itr;
7682 ++
7683 ++ /* next update should occur within next jiffy */
7684 ++ rc->next_update = next_update + 1;
7685 ++
7686 ++ rc->total_bytes = 0;
7687 ++ rc->total_pkts = 0;
7688 ++}
7689 ++
7690 + /**
7691 + * ice_buildreg_itr - build value for writing to the GLINT_DYN_CTL register
7692 + * @itr_idx: interrupt throttling index
7693 +- * @reg_itr: interrupt throttling value adjusted based on ITR granularity
7694 ++ * @itr: interrupt throttling value in usecs
7695 + */
7696 +-static u32 ice_buildreg_itr(int itr_idx, u16 reg_itr)
7697 ++static u32 ice_buildreg_itr(int itr_idx, u16 itr)
7698 + {
7699 ++ /* The itr value is reported in microseconds, and the register value is
7700 ++ * recorded in 2 microsecond units. For this reason we only need to
7701 ++ * shift by the GLINT_DYN_CTL_INTERVAL_S - ICE_ITR_GRAN_S to apply this
7702 ++ * granularity as a shift instead of division. The mask makes sure the
7703 ++ * ITR value is never odd so we don't accidentally write into the field
7704 ++ * prior to the ITR field.
7705 ++ */
7706 ++ itr &= ICE_ITR_MASK;
7707 ++
7708 + return GLINT_DYN_CTL_INTENA_M | GLINT_DYN_CTL_CLEARPBA_M |
7709 + (itr_idx << GLINT_DYN_CTL_ITR_INDX_S) |
7710 +- (reg_itr << GLINT_DYN_CTL_INTERVAL_S);
7711 ++ (itr << (GLINT_DYN_CTL_INTERVAL_S - ICE_ITR_GRAN_S));
7712 + }
7713 +
7714 ++/* The act of updating the ITR will cause it to immediately trigger. In order
7715 ++ * to prevent this from throwing off adaptive update statistics we defer the
7716 ++ * update so that it can only happen so often. So after either Tx or Rx are
7717 ++ * updated we make the adaptive scheme wait until either the ITR completely
7718 ++ * expires via the next_update expiration or we have been through at least
7719 ++ * 3 interrupts.
7720 ++ */
7721 ++#define ITR_COUNTDOWN_START 3
7722 ++
7723 + /**
7724 + * ice_update_ena_itr - Update ITR and re-enable MSIX interrupt
7725 + * @vsi: the VSI associated with the q_vector
7726 +@@ -1068,10 +1307,14 @@ static u32 ice_buildreg_itr(int itr_idx, u16 reg_itr)
7727 + static void
7728 + ice_update_ena_itr(struct ice_vsi *vsi, struct ice_q_vector *q_vector)
7729 + {
7730 +- struct ice_hw *hw = &vsi->back->hw;
7731 +- struct ice_ring_container *rc;
7732 ++ struct ice_ring_container *tx = &q_vector->tx;
7733 ++ struct ice_ring_container *rx = &q_vector->rx;
7734 + u32 itr_val;
7735 +
7736 ++ /* This will do nothing if dynamic updates are not enabled */
7737 ++ ice_update_itr(q_vector, tx);
7738 ++ ice_update_itr(q_vector, rx);
7739 ++
7740 + /* This block of logic allows us to get away with only updating
7741 + * one ITR value with each interrupt. The idea is to perform a
7742 + * pseudo-lazy update with the following criteria.
7743 +@@ -1080,35 +1323,36 @@ ice_update_ena_itr(struct ice_vsi *vsi, struct ice_q_vector *q_vector)
7744 + * 2. If we must reduce an ITR that is given highest priority.
7745 + * 3. We then give priority to increasing ITR based on amount.
7746 + */
7747 +- if (q_vector->rx.target_itr < q_vector->rx.current_itr) {
7748 +- rc = &q_vector->rx;
7749 ++ if (rx->target_itr < rx->current_itr) {
7750 + /* Rx ITR needs to be reduced, this is highest priority */
7751 +- itr_val = ice_buildreg_itr(rc->itr_idx, rc->target_itr);
7752 +- rc->current_itr = rc->target_itr;
7753 +- } else if ((q_vector->tx.target_itr < q_vector->tx.current_itr) ||
7754 +- ((q_vector->rx.target_itr - q_vector->rx.current_itr) <
7755 +- (q_vector->tx.target_itr - q_vector->tx.current_itr))) {
7756 +- rc = &q_vector->tx;
7757 ++ itr_val = ice_buildreg_itr(rx->itr_idx, rx->target_itr);
7758 ++ rx->current_itr = rx->target_itr;
7759 ++ q_vector->itr_countdown = ITR_COUNTDOWN_START;
7760 ++ } else if ((tx->target_itr < tx->current_itr) ||
7761 ++ ((rx->target_itr - rx->current_itr) <
7762 ++ (tx->target_itr - tx->current_itr))) {
7763 + /* Tx ITR needs to be reduced, this is second priority
7764 + * Tx ITR needs to be increased more than Rx, fourth priority
7765 + */
7766 +- itr_val = ice_buildreg_itr(rc->itr_idx, rc->target_itr);
7767 +- rc->current_itr = rc->target_itr;
7768 +- } else if (q_vector->rx.current_itr != q_vector->rx.target_itr) {
7769 +- rc = &q_vector->rx;
7770 ++ itr_val = ice_buildreg_itr(tx->itr_idx, tx->target_itr);
7771 ++ tx->current_itr = tx->target_itr;
7772 ++ q_vector->itr_countdown = ITR_COUNTDOWN_START;
7773 ++ } else if (rx->current_itr != rx->target_itr) {
7774 + /* Rx ITR needs to be increased, third priority */
7775 +- itr_val = ice_buildreg_itr(rc->itr_idx, rc->target_itr);
7776 +- rc->current_itr = rc->target_itr;
7777 ++ itr_val = ice_buildreg_itr(rx->itr_idx, rx->target_itr);
7778 ++ rx->current_itr = rx->target_itr;
7779 ++ q_vector->itr_countdown = ITR_COUNTDOWN_START;
7780 + } else {
7781 + /* Still have to re-enable the interrupts */
7782 + itr_val = ice_buildreg_itr(ICE_ITR_NONE, 0);
7783 ++ if (q_vector->itr_countdown)
7784 ++ q_vector->itr_countdown--;
7785 + }
7786 +
7787 +- if (!test_bit(__ICE_DOWN, vsi->state)) {
7788 +- int vector = vsi->hw_base_vector + q_vector->v_idx;
7789 +-
7790 +- wr32(hw, GLINT_DYN_CTL(vector), itr_val);
7791 +- }
7792 ++ if (!test_bit(__ICE_DOWN, vsi->state))
7793 ++ wr32(&vsi->back->hw,
7794 ++ GLINT_DYN_CTL(vsi->hw_base_vector + q_vector->v_idx),
7795 ++ itr_val);
7796 + }
7797 +
7798 + /**
7799 +diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.h b/drivers/net/ethernet/intel/ice/ice_txrx.h
7800 +index fc358ea81816..74a031fbd732 100644
7801 +--- a/drivers/net/ethernet/intel/ice/ice_txrx.h
7802 ++++ b/drivers/net/ethernet/intel/ice/ice_txrx.h
7803 +@@ -128,6 +128,12 @@ enum ice_rx_dtype {
7804 + #define ICE_ITR_MASK 0x1FFE /* ITR register value alignment mask */
7805 + #define ITR_REG_ALIGN(setting) __ALIGN_MASK(setting, ~ICE_ITR_MASK)
7806 +
7807 ++#define ICE_ITR_ADAPTIVE_MIN_INC 0x0002
7808 ++#define ICE_ITR_ADAPTIVE_MIN_USECS 0x0002
7809 ++#define ICE_ITR_ADAPTIVE_MAX_USECS 0x00FA
7810 ++#define ICE_ITR_ADAPTIVE_LATENCY 0x8000
7811 ++#define ICE_ITR_ADAPTIVE_BULK 0x0000
7812 ++
7813 + #define ICE_DFLT_INTRL 0
7814 +
7815 + /* Legacy or Advanced Mode Queue */
7816 +diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
7817 +index 57155b4a59dc..8b1ee9f3a39d 100644
7818 +--- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
7819 ++++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
7820 +@@ -764,6 +764,7 @@ static void ice_cleanup_and_realloc_vf(struct ice_vf *vf)
7821 + bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr)
7822 + {
7823 + struct ice_hw *hw = &pf->hw;
7824 ++ struct ice_vf *vf;
7825 + int v, i;
7826 +
7827 + /* If we don't have any VFs, then there is nothing to reset */
7828 +@@ -778,12 +779,17 @@ bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr)
7829 + for (v = 0; v < pf->num_alloc_vfs; v++)
7830 + ice_trigger_vf_reset(&pf->vf[v], is_vflr);
7831 +
7832 +- /* Call Disable LAN Tx queue AQ call with VFR bit set and 0
7833 +- * queues to inform Firmware about VF reset.
7834 +- */
7835 +- for (v = 0; v < pf->num_alloc_vfs; v++)
7836 +- ice_dis_vsi_txq(pf->vsi[0]->port_info, 0, NULL, NULL,
7837 +- ICE_VF_RESET, v, NULL);
7838 ++ for (v = 0; v < pf->num_alloc_vfs; v++) {
7839 ++ struct ice_vsi *vsi;
7840 ++
7841 ++ vf = &pf->vf[v];
7842 ++ vsi = pf->vsi[vf->lan_vsi_idx];
7843 ++ if (test_bit(ICE_VF_STATE_ENA, vf->vf_states)) {
7844 ++ ice_vsi_stop_lan_tx_rings(vsi, ICE_VF_RESET, vf->vf_id);
7845 ++ ice_vsi_stop_rx_rings(vsi);
7846 ++ clear_bit(ICE_VF_STATE_ENA, vf->vf_states);
7847 ++ }
7848 ++ }
7849 +
7850 + /* HW requires some time to make sure it can flush the FIFO for a VF
7851 + * when it resets it. Poll the VPGEN_VFRSTAT register for each VF in
7852 +@@ -796,9 +802,9 @@ bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr)
7853 +
7854 + /* Check each VF in sequence */
7855 + while (v < pf->num_alloc_vfs) {
7856 +- struct ice_vf *vf = &pf->vf[v];
7857 + u32 reg;
7858 +
7859 ++ vf = &pf->vf[v];
7860 + reg = rd32(hw, VPGEN_VFRSTAT(vf->vf_id));
7861 + if (!(reg & VPGEN_VFRSTAT_VFRD_M))
7862 + break;
7863 +diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
7864 +index 3269d8e94744..580d14b49fda 100644
7865 +--- a/drivers/net/ethernet/intel/igb/igb_main.c
7866 ++++ b/drivers/net/ethernet/intel/igb/igb_main.c
7867 +@@ -3452,6 +3452,9 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
7868 + break;
7869 + }
7870 + }
7871 ++
7872 ++ dev_pm_set_driver_flags(&pdev->dev, DPM_FLAG_NEVER_SKIP);
7873 ++
7874 + pm_runtime_put_noidle(&pdev->dev);
7875 + return 0;
7876 +
7877 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
7878 +index 3f3cd32ae60a..e0ba59b5296f 100644
7879 +--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
7880 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
7881 +@@ -431,6 +431,9 @@ static inline int mlx5_eswitch_index_to_vport_num(struct mlx5_eswitch *esw,
7882 + return index;
7883 + }
7884 +
7885 ++/* TODO: This mlx5e_tc function shouldn't be called by eswitch */
7886 ++void mlx5e_tc_clean_fdb_peer_flows(struct mlx5_eswitch *esw);
7887 ++
7888 + #else /* CONFIG_MLX5_ESWITCH */
7889 + /* eswitch API stubs */
7890 + static inline int mlx5_eswitch_init(struct mlx5_core_dev *dev) { return 0; }
7891 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
7892 +index 9b2d78ee22b8..a97ffd0dbf01 100644
7893 +--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
7894 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
7895 +@@ -363,7 +363,7 @@ static int esw_set_global_vlan_pop(struct mlx5_eswitch *esw, u8 val)
7896 + esw_debug(esw->dev, "%s applying global %s policy\n", __func__, val ? "pop" : "none");
7897 + for (vf_vport = 1; vf_vport < esw->enabled_vports; vf_vport++) {
7898 + rep = &esw->offloads.vport_reps[vf_vport];
7899 +- if (rep->rep_if[REP_ETH].state != REP_LOADED)
7900 ++ if (atomic_read(&rep->rep_if[REP_ETH].state) != REP_LOADED)
7901 + continue;
7902 +
7903 + err = __mlx5_eswitch_set_vport_vlan(esw, rep->vport, 0, 0, val);
7904 +@@ -1306,7 +1306,8 @@ int esw_offloads_init_reps(struct mlx5_eswitch *esw)
7905 + ether_addr_copy(rep->hw_id, hw_id);
7906 +
7907 + for (rep_type = 0; rep_type < NUM_REP_TYPES; rep_type++)
7908 +- rep->rep_if[rep_type].state = REP_UNREGISTERED;
7909 ++ atomic_set(&rep->rep_if[rep_type].state,
7910 ++ REP_UNREGISTERED);
7911 + }
7912 +
7913 + return 0;
7914 +@@ -1315,11 +1316,9 @@ int esw_offloads_init_reps(struct mlx5_eswitch *esw)
7915 + static void __esw_offloads_unload_rep(struct mlx5_eswitch *esw,
7916 + struct mlx5_eswitch_rep *rep, u8 rep_type)
7917 + {
7918 +- if (rep->rep_if[rep_type].state != REP_LOADED)
7919 +- return;
7920 +-
7921 +- rep->rep_if[rep_type].unload(rep);
7922 +- rep->rep_if[rep_type].state = REP_REGISTERED;
7923 ++ if (atomic_cmpxchg(&rep->rep_if[rep_type].state,
7924 ++ REP_LOADED, REP_REGISTERED) == REP_LOADED)
7925 ++ rep->rep_if[rep_type].unload(rep);
7926 + }
7927 +
7928 + static void __unload_reps_special_vport(struct mlx5_eswitch *esw, u8 rep_type)
7929 +@@ -1380,16 +1379,15 @@ static int __esw_offloads_load_rep(struct mlx5_eswitch *esw,
7930 + {
7931 + int err = 0;
7932 +
7933 +- if (rep->rep_if[rep_type].state != REP_REGISTERED)
7934 +- return 0;
7935 +-
7936 +- err = rep->rep_if[rep_type].load(esw->dev, rep);
7937 +- if (err)
7938 +- return err;
7939 +-
7940 +- rep->rep_if[rep_type].state = REP_LOADED;
7941 ++ if (atomic_cmpxchg(&rep->rep_if[rep_type].state,
7942 ++ REP_REGISTERED, REP_LOADED) == REP_REGISTERED) {
7943 ++ err = rep->rep_if[rep_type].load(esw->dev, rep);
7944 ++ if (err)
7945 ++ atomic_set(&rep->rep_if[rep_type].state,
7946 ++ REP_REGISTERED);
7947 ++ }
7948 +
7949 +- return 0;
7950 ++ return err;
7951 + }
7952 +
7953 + static int __load_reps_special_vport(struct mlx5_eswitch *esw, u8 rep_type)
7954 +@@ -1523,8 +1521,6 @@ static int mlx5_esw_offloads_pair(struct mlx5_eswitch *esw,
7955 + return 0;
7956 + }
7957 +
7958 +-void mlx5e_tc_clean_fdb_peer_flows(struct mlx5_eswitch *esw);
7959 +-
7960 + static void mlx5_esw_offloads_unpair(struct mlx5_eswitch *esw)
7961 + {
7962 + mlx5e_tc_clean_fdb_peer_flows(esw);
7963 +@@ -2076,7 +2072,7 @@ void mlx5_eswitch_register_vport_reps(struct mlx5_eswitch *esw,
7964 + rep_if->get_proto_dev = __rep_if->get_proto_dev;
7965 + rep_if->priv = __rep_if->priv;
7966 +
7967 +- rep_if->state = REP_REGISTERED;
7968 ++ atomic_set(&rep_if->state, REP_REGISTERED);
7969 + }
7970 + }
7971 + EXPORT_SYMBOL(mlx5_eswitch_register_vport_reps);
7972 +@@ -2091,7 +2087,7 @@ void mlx5_eswitch_unregister_vport_reps(struct mlx5_eswitch *esw, u8 rep_type)
7973 + __unload_reps_all_vport(esw, max_vf, rep_type);
7974 +
7975 + mlx5_esw_for_all_reps(esw, i, rep)
7976 +- rep->rep_if[rep_type].state = REP_UNREGISTERED;
7977 ++ atomic_set(&rep->rep_if[rep_type].state, REP_UNREGISTERED);
7978 + }
7979 + EXPORT_SYMBOL(mlx5_eswitch_unregister_vport_reps);
7980 +
7981 +@@ -2111,7 +2107,7 @@ void *mlx5_eswitch_get_proto_dev(struct mlx5_eswitch *esw,
7982 +
7983 + rep = mlx5_eswitch_get_rep(esw, vport);
7984 +
7985 +- if (rep->rep_if[rep_type].state == REP_LOADED &&
7986 ++ if (atomic_read(&rep->rep_if[rep_type].state) == REP_LOADED &&
7987 + rep->rep_if[rep_type].get_proto_dev)
7988 + return rep->rep_if[rep_type].get_proto_dev(rep);
7989 + return NULL;
7990 +diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
7991 +index a591583d120e..dd12b73a8853 100644
7992 +--- a/drivers/net/ethernet/ti/cpsw.c
7993 ++++ b/drivers/net/ethernet/ti/cpsw.c
7994 +@@ -800,12 +800,17 @@ static int cpsw_purge_all_mc(struct net_device *ndev, const u8 *addr, int num)
7995 +
7996 + static void cpsw_ndo_set_rx_mode(struct net_device *ndev)
7997 + {
7998 +- struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
7999 ++ struct cpsw_priv *priv = netdev_priv(ndev);
8000 ++ struct cpsw_common *cpsw = priv->cpsw;
8001 ++ int slave_port = -1;
8002 ++
8003 ++ if (cpsw->data.dual_emac)
8004 ++ slave_port = priv->emac_port + 1;
8005 +
8006 + if (ndev->flags & IFF_PROMISC) {
8007 + /* Enable promiscuous mode */
8008 + cpsw_set_promiscious(ndev, true);
8009 +- cpsw_ale_set_allmulti(cpsw->ale, IFF_ALLMULTI);
8010 ++ cpsw_ale_set_allmulti(cpsw->ale, IFF_ALLMULTI, slave_port);
8011 + return;
8012 + } else {
8013 + /* Disable promiscuous mode */
8014 +@@ -813,7 +818,8 @@ static void cpsw_ndo_set_rx_mode(struct net_device *ndev)
8015 + }
8016 +
8017 + /* Restore allmulti on vlans if necessary */
8018 +- cpsw_ale_set_allmulti(cpsw->ale, ndev->flags & IFF_ALLMULTI);
8019 ++ cpsw_ale_set_allmulti(cpsw->ale,
8020 ++ ndev->flags & IFF_ALLMULTI, slave_port);
8021 +
8022 + /* add/remove mcast address either for real netdev or for vlan */
8023 + __hw_addr_ref_sync_dev(&ndev->mc, ndev, cpsw_add_mc_addr,
8024 +diff --git a/drivers/net/ethernet/ti/cpsw_ale.c b/drivers/net/ethernet/ti/cpsw_ale.c
8025 +index 798c989d5d93..b3d9591b4824 100644
8026 +--- a/drivers/net/ethernet/ti/cpsw_ale.c
8027 ++++ b/drivers/net/ethernet/ti/cpsw_ale.c
8028 +@@ -482,24 +482,25 @@ int cpsw_ale_del_vlan(struct cpsw_ale *ale, u16 vid, int port_mask)
8029 + }
8030 + EXPORT_SYMBOL_GPL(cpsw_ale_del_vlan);
8031 +
8032 +-void cpsw_ale_set_allmulti(struct cpsw_ale *ale, int allmulti)
8033 ++void cpsw_ale_set_allmulti(struct cpsw_ale *ale, int allmulti, int port)
8034 + {
8035 + u32 ale_entry[ALE_ENTRY_WORDS];
8036 +- int type, idx;
8037 + int unreg_mcast = 0;
8038 +-
8039 +- /* Only bother doing the work if the setting is actually changing */
8040 +- if (ale->allmulti == allmulti)
8041 +- return;
8042 +-
8043 +- /* Remember the new setting to check against next time */
8044 +- ale->allmulti = allmulti;
8045 ++ int type, idx;
8046 +
8047 + for (idx = 0; idx < ale->params.ale_entries; idx++) {
8048 ++ int vlan_members;
8049 ++
8050 + cpsw_ale_read(ale, idx, ale_entry);
8051 + type = cpsw_ale_get_entry_type(ale_entry);
8052 + if (type != ALE_TYPE_VLAN)
8053 + continue;
8054 ++ vlan_members =
8055 ++ cpsw_ale_get_vlan_member_list(ale_entry,
8056 ++ ale->vlan_field_bits);
8057 ++
8058 ++ if (port != -1 && !(vlan_members & BIT(port)))
8059 ++ continue;
8060 +
8061 + unreg_mcast =
8062 + cpsw_ale_get_vlan_unreg_mcast(ale_entry,
8063 +diff --git a/drivers/net/ethernet/ti/cpsw_ale.h b/drivers/net/ethernet/ti/cpsw_ale.h
8064 +index cd07a3e96d57..1fe196d8a5e4 100644
8065 +--- a/drivers/net/ethernet/ti/cpsw_ale.h
8066 ++++ b/drivers/net/ethernet/ti/cpsw_ale.h
8067 +@@ -37,7 +37,6 @@ struct cpsw_ale {
8068 + struct cpsw_ale_params params;
8069 + struct timer_list timer;
8070 + unsigned long ageout;
8071 +- int allmulti;
8072 + u32 version;
8073 + /* These bits are different on NetCP NU Switch ALE */
8074 + u32 port_mask_bits;
8075 +@@ -116,7 +115,7 @@ int cpsw_ale_del_mcast(struct cpsw_ale *ale, const u8 *addr, int port_mask,
8076 + int cpsw_ale_add_vlan(struct cpsw_ale *ale, u16 vid, int port, int untag,
8077 + int reg_mcast, int unreg_mcast);
8078 + int cpsw_ale_del_vlan(struct cpsw_ale *ale, u16 vid, int port);
8079 +-void cpsw_ale_set_allmulti(struct cpsw_ale *ale, int allmulti);
8080 ++void cpsw_ale_set_allmulti(struct cpsw_ale *ale, int allmulti, int port);
8081 +
8082 + int cpsw_ale_control_get(struct cpsw_ale *ale, int port, int control);
8083 + int cpsw_ale_control_set(struct cpsw_ale *ale, int port,
8084 +diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
8085 +index e0dce373cdd9..3d4a166a49d5 100644
8086 +--- a/drivers/net/hyperv/netvsc.c
8087 ++++ b/drivers/net/hyperv/netvsc.c
8088 +@@ -875,12 +875,6 @@ static inline int netvsc_send_pkt(
8089 + } else if (ret == -EAGAIN) {
8090 + netif_tx_stop_queue(txq);
8091 + ndev_ctx->eth_stats.stop_queue++;
8092 +- if (atomic_read(&nvchan->queue_sends) < 1 &&
8093 +- !net_device->tx_disable) {
8094 +- netif_tx_wake_queue(txq);
8095 +- ndev_ctx->eth_stats.wake_queue++;
8096 +- ret = -ENOSPC;
8097 +- }
8098 + } else {
8099 + netdev_err(ndev,
8100 + "Unable to send packet pages %u len %u, ret %d\n",
8101 +@@ -888,6 +882,15 @@ static inline int netvsc_send_pkt(
8102 + ret);
8103 + }
8104 +
8105 ++ if (netif_tx_queue_stopped(txq) &&
8106 ++ atomic_read(&nvchan->queue_sends) < 1 &&
8107 ++ !net_device->tx_disable) {
8108 ++ netif_tx_wake_queue(txq);
8109 ++ ndev_ctx->eth_stats.wake_queue++;
8110 ++ if (ret == -EAGAIN)
8111 ++ ret = -ENOSPC;
8112 ++ }
8113 ++
8114 + return ret;
8115 + }
8116 +
8117 +diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
8118 +index cd5966b0db57..f6a6cc5bf118 100644
8119 +--- a/drivers/net/phy/phy_device.c
8120 ++++ b/drivers/net/phy/phy_device.c
8121 +@@ -1829,13 +1829,25 @@ EXPORT_SYMBOL(genphy_read_status);
8122 + */
8123 + int genphy_soft_reset(struct phy_device *phydev)
8124 + {
8125 ++ u16 res = BMCR_RESET;
8126 + int ret;
8127 +
8128 +- ret = phy_set_bits(phydev, MII_BMCR, BMCR_RESET);
8129 ++ if (phydev->autoneg == AUTONEG_ENABLE)
8130 ++ res |= BMCR_ANRESTART;
8131 ++
8132 ++ ret = phy_modify(phydev, MII_BMCR, BMCR_ISOLATE, res);
8133 + if (ret < 0)
8134 + return ret;
8135 +
8136 +- return phy_poll_reset(phydev);
8137 ++ ret = phy_poll_reset(phydev);
8138 ++ if (ret)
8139 ++ return ret;
8140 ++
8141 ++ /* BMCR may be reset to defaults */
8142 ++ if (phydev->autoneg == AUTONEG_DISABLE)
8143 ++ ret = genphy_setup_forced(phydev);
8144 ++
8145 ++ return ret;
8146 + }
8147 + EXPORT_SYMBOL(genphy_soft_reset);
8148 +
8149 +diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
8150 +index 366217263d70..d9a6699abe59 100644
8151 +--- a/drivers/net/usb/qmi_wwan.c
8152 ++++ b/drivers/net/usb/qmi_wwan.c
8153 +@@ -63,6 +63,7 @@ enum qmi_wwan_flags {
8154 +
8155 + enum qmi_wwan_quirks {
8156 + QMI_WWAN_QUIRK_DTR = 1 << 0, /* needs "set DTR" request */
8157 ++ QMI_WWAN_QUIRK_QUECTEL_DYNCFG = 1 << 1, /* check num. endpoints */
8158 + };
8159 +
8160 + struct qmimux_hdr {
8161 +@@ -845,6 +846,16 @@ static const struct driver_info qmi_wwan_info_quirk_dtr = {
8162 + .data = QMI_WWAN_QUIRK_DTR,
8163 + };
8164 +
8165 ++static const struct driver_info qmi_wwan_info_quirk_quectel_dyncfg = {
8166 ++ .description = "WWAN/QMI device",
8167 ++ .flags = FLAG_WWAN | FLAG_SEND_ZLP,
8168 ++ .bind = qmi_wwan_bind,
8169 ++ .unbind = qmi_wwan_unbind,
8170 ++ .manage_power = qmi_wwan_manage_power,
8171 ++ .rx_fixup = qmi_wwan_rx_fixup,
8172 ++ .data = QMI_WWAN_QUIRK_DTR | QMI_WWAN_QUIRK_QUECTEL_DYNCFG,
8173 ++};
8174 ++
8175 + #define HUAWEI_VENDOR_ID 0x12D1
8176 +
8177 + /* map QMI/wwan function by a fixed interface number */
8178 +@@ -865,6 +876,15 @@ static const struct driver_info qmi_wwan_info_quirk_dtr = {
8179 + #define QMI_GOBI_DEVICE(vend, prod) \
8180 + QMI_FIXED_INTF(vend, prod, 0)
8181 +
8182 ++/* Quectel does not use fixed interface numbers on at least some of their
8183 ++ * devices. We need to check the number of endpoints to ensure that we bind to
8184 ++ * the correct interface.
8185 ++ */
8186 ++#define QMI_QUIRK_QUECTEL_DYNCFG(vend, prod) \
8187 ++ USB_DEVICE_AND_INTERFACE_INFO(vend, prod, USB_CLASS_VENDOR_SPEC, \
8188 ++ USB_SUBCLASS_VENDOR_SPEC, 0xff), \
8189 ++ .driver_info = (unsigned long)&qmi_wwan_info_quirk_quectel_dyncfg
8190 ++
8191 + static const struct usb_device_id products[] = {
8192 + /* 1. CDC ECM like devices match on the control interface */
8193 + { /* Huawei E392, E398 and possibly others sharing both device id and more... */
8194 +@@ -969,20 +989,9 @@ static const struct usb_device_id products[] = {
8195 + USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0x581d, USB_CLASS_VENDOR_SPEC, 1, 7),
8196 + .driver_info = (unsigned long)&qmi_wwan_info,
8197 + },
8198 +- { /* Quectel EP06/EG06/EM06 */
8199 +- USB_DEVICE_AND_INTERFACE_INFO(0x2c7c, 0x0306,
8200 +- USB_CLASS_VENDOR_SPEC,
8201 +- USB_SUBCLASS_VENDOR_SPEC,
8202 +- 0xff),
8203 +- .driver_info = (unsigned long)&qmi_wwan_info_quirk_dtr,
8204 +- },
8205 +- { /* Quectel EG12/EM12 */
8206 +- USB_DEVICE_AND_INTERFACE_INFO(0x2c7c, 0x0512,
8207 +- USB_CLASS_VENDOR_SPEC,
8208 +- USB_SUBCLASS_VENDOR_SPEC,
8209 +- 0xff),
8210 +- .driver_info = (unsigned long)&qmi_wwan_info_quirk_dtr,
8211 +- },
8212 ++ {QMI_QUIRK_QUECTEL_DYNCFG(0x2c7c, 0x0125)}, /* Quectel EC25, EC20 R2.0 Mini PCIe */
8213 ++ {QMI_QUIRK_QUECTEL_DYNCFG(0x2c7c, 0x0306)}, /* Quectel EP06/EG06/EM06 */
8214 ++ {QMI_QUIRK_QUECTEL_DYNCFG(0x2c7c, 0x0512)}, /* Quectel EG12/EM12 */
8215 +
8216 + /* 3. Combined interface devices matching on interface number */
8217 + {QMI_FIXED_INTF(0x0408, 0xea42, 4)}, /* Yota / Megafon M100-1 */
8218 +@@ -1283,7 +1292,6 @@ static const struct usb_device_id products[] = {
8219 + {QMI_FIXED_INTF(0x03f0, 0x9d1d, 1)}, /* HP lt4120 Snapdragon X5 LTE */
8220 + {QMI_FIXED_INTF(0x22de, 0x9061, 3)}, /* WeTelecom WPD-600N */
8221 + {QMI_QUIRK_SET_DTR(0x1e0e, 0x9001, 5)}, /* SIMCom 7100E, 7230E, 7600E ++ */
8222 +- {QMI_QUIRK_SET_DTR(0x2c7c, 0x0125, 4)}, /* Quectel EC25, EC20 R2.0 Mini PCIe */
8223 + {QMI_QUIRK_SET_DTR(0x2c7c, 0x0121, 4)}, /* Quectel EC21 Mini PCIe */
8224 + {QMI_QUIRK_SET_DTR(0x2c7c, 0x0191, 4)}, /* Quectel EG91 */
8225 + {QMI_FIXED_INTF(0x2c7c, 0x0296, 4)}, /* Quectel BG96 */
8226 +@@ -1363,27 +1371,12 @@ static bool quectel_ec20_detected(struct usb_interface *intf)
8227 + return false;
8228 + }
8229 +
8230 +-static bool quectel_diag_detected(struct usb_interface *intf)
8231 +-{
8232 +- struct usb_device *dev = interface_to_usbdev(intf);
8233 +- struct usb_interface_descriptor intf_desc = intf->cur_altsetting->desc;
8234 +- u16 id_vendor = le16_to_cpu(dev->descriptor.idVendor);
8235 +- u16 id_product = le16_to_cpu(dev->descriptor.idProduct);
8236 +-
8237 +- if (id_vendor != 0x2c7c || intf_desc.bNumEndpoints != 2)
8238 +- return false;
8239 +-
8240 +- if (id_product == 0x0306 || id_product == 0x0512)
8241 +- return true;
8242 +- else
8243 +- return false;
8244 +-}
8245 +-
8246 + static int qmi_wwan_probe(struct usb_interface *intf,
8247 + const struct usb_device_id *prod)
8248 + {
8249 + struct usb_device_id *id = (struct usb_device_id *)prod;
8250 + struct usb_interface_descriptor *desc = &intf->cur_altsetting->desc;
8251 ++ const struct driver_info *info;
8252 +
8253 + /* Workaround to enable dynamic IDs. This disables usbnet
8254 + * blacklisting functionality. Which, if required, can be
8255 +@@ -1417,10 +1410,14 @@ static int qmi_wwan_probe(struct usb_interface *intf,
8256 + * we need to match on class/subclass/protocol. These values are
8257 + * identical for the diagnostic- and QMI-interface, but bNumEndpoints is
8258 + * different. Ignore the current interface if the number of endpoints
8259 +- * the number for the diag interface (two).
8260 ++ * equals the number for the diag interface (two).
8261 + */
8262 +- if (quectel_diag_detected(intf))
8263 +- return -ENODEV;
8264 ++ info = (void *)&id->driver_info;
8265 ++
8266 ++ if (info->data & QMI_WWAN_QUIRK_QUECTEL_DYNCFG) {
8267 ++ if (desc->bNumEndpoints == 2)
8268 ++ return -ENODEV;
8269 ++ }
8270 +
8271 + return usbnet_probe(intf, id);
8272 + }
8273 +diff --git a/drivers/net/wireless/ath/wil6210/cfg80211.c b/drivers/net/wireless/ath/wil6210/cfg80211.c
8274 +index a1e226652b4a..692730415d78 100644
8275 +--- a/drivers/net/wireless/ath/wil6210/cfg80211.c
8276 ++++ b/drivers/net/wireless/ath/wil6210/cfg80211.c
8277 +@@ -1274,7 +1274,12 @@ int wil_cfg80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
8278 + params->wait);
8279 +
8280 + out:
8281 ++ /* when the sent packet was not acked by receiver(ACK=0), rc will
8282 ++ * be -EAGAIN. In this case this function needs to return success,
8283 ++ * the ACK=0 will be reflected in tx_status.
8284 ++ */
8285 + tx_status = (rc == 0);
8286 ++ rc = (rc == -EAGAIN) ? 0 : rc;
8287 + cfg80211_mgmt_tx_status(wdev, cookie ? *cookie : 0, buf, len,
8288 + tx_status, GFP_KERNEL);
8289 +
8290 +diff --git a/drivers/net/wireless/ath/wil6210/wmi.c b/drivers/net/wireless/ath/wil6210/wmi.c
8291 +index bda4a9712f91..63116f4b62c7 100644
8292 +--- a/drivers/net/wireless/ath/wil6210/wmi.c
8293 ++++ b/drivers/net/wireless/ath/wil6210/wmi.c
8294 +@@ -3502,8 +3502,9 @@ int wmi_mgmt_tx(struct wil6210_vif *vif, const u8 *buf, size_t len)
8295 + rc = wmi_call(wil, WMI_SW_TX_REQ_CMDID, vif->mid, cmd, total,
8296 + WMI_SW_TX_COMPLETE_EVENTID, &evt, sizeof(evt), 2000);
8297 + if (!rc && evt.evt.status != WMI_FW_STATUS_SUCCESS) {
8298 +- wil_err(wil, "mgmt_tx failed with status %d\n", evt.evt.status);
8299 +- rc = -EINVAL;
8300 ++ wil_dbg_wmi(wil, "mgmt_tx failed with status %d\n",
8301 ++ evt.evt.status);
8302 ++ rc = -EAGAIN;
8303 + }
8304 +
8305 + kfree(cmd);
8306 +@@ -3555,9 +3556,9 @@ int wmi_mgmt_tx_ext(struct wil6210_vif *vif, const u8 *buf, size_t len,
8307 + rc = wmi_call(wil, WMI_SW_TX_REQ_EXT_CMDID, vif->mid, cmd, total,
8308 + WMI_SW_TX_COMPLETE_EVENTID, &evt, sizeof(evt), 2000);
8309 + if (!rc && evt.evt.status != WMI_FW_STATUS_SUCCESS) {
8310 +- wil_err(wil, "mgmt_tx_ext failed with status %d\n",
8311 +- evt.evt.status);
8312 +- rc = -EINVAL;
8313 ++ wil_dbg_wmi(wil, "mgmt_tx_ext failed with status %d\n",
8314 ++ evt.evt.status);
8315 ++ rc = -EAGAIN;
8316 + }
8317 +
8318 + kfree(cmd);
8319 +diff --git a/drivers/net/wireless/atmel/at76c50x-usb.c b/drivers/net/wireless/atmel/at76c50x-usb.c
8320 +index e99e766a3028..1cabae424839 100644
8321 +--- a/drivers/net/wireless/atmel/at76c50x-usb.c
8322 ++++ b/drivers/net/wireless/atmel/at76c50x-usb.c
8323 +@@ -2585,8 +2585,8 @@ static int __init at76_mod_init(void)
8324 + if (result < 0)
8325 + printk(KERN_ERR DRIVER_NAME
8326 + ": usb_register failed (status %d)\n", result);
8327 +-
8328 +- led_trigger_register_simple("at76_usb-tx", &ledtrig_tx);
8329 ++ else
8330 ++ led_trigger_register_simple("at76_usb-tx", &ledtrig_tx);
8331 + return result;
8332 + }
8333 +
8334 +diff --git a/drivers/net/wireless/broadcom/b43/phy_lp.c b/drivers/net/wireless/broadcom/b43/phy_lp.c
8335 +index 46408a560814..aedee026c5e2 100644
8336 +--- a/drivers/net/wireless/broadcom/b43/phy_lp.c
8337 ++++ b/drivers/net/wireless/broadcom/b43/phy_lp.c
8338 +@@ -1835,7 +1835,7 @@ static void lpphy_papd_cal(struct b43_wldev *dev, struct lpphy_tx_gains gains,
8339 + static void lpphy_papd_cal_txpwr(struct b43_wldev *dev)
8340 + {
8341 + struct b43_phy_lp *lpphy = dev->phy.lp;
8342 +- struct lpphy_tx_gains gains, oldgains;
8343 ++ struct lpphy_tx_gains oldgains;
8344 + int old_txpctl, old_afe_ovr, old_rf, old_bbmult;
8345 +
8346 + lpphy_read_tx_pctl_mode_from_hardware(dev);
8347 +@@ -1849,9 +1849,9 @@ static void lpphy_papd_cal_txpwr(struct b43_wldev *dev)
8348 + lpphy_set_tx_power_control(dev, B43_LPPHY_TXPCTL_OFF);
8349 +
8350 + if (dev->dev->chip_id == 0x4325 && dev->dev->chip_rev == 0)
8351 +- lpphy_papd_cal(dev, gains, 0, 1, 30);
8352 ++ lpphy_papd_cal(dev, oldgains, 0, 1, 30);
8353 + else
8354 +- lpphy_papd_cal(dev, gains, 0, 1, 65);
8355 ++ lpphy_papd_cal(dev, oldgains, 0, 1, 65);
8356 +
8357 + if (old_afe_ovr)
8358 + lpphy_set_tx_gains(dev, oldgains);
8359 +diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
8360 +index e92f6351bd22..8ee8af4e7ec4 100644
8361 +--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
8362 ++++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
8363 +@@ -5464,6 +5464,8 @@ static s32 brcmf_get_assoc_ies(struct brcmf_cfg80211_info *cfg,
8364 + conn_info->req_ie =
8365 + kmemdup(cfg->extra_buf, conn_info->req_ie_len,
8366 + GFP_KERNEL);
8367 ++ if (!conn_info->req_ie)
8368 ++ conn_info->req_ie_len = 0;
8369 + } else {
8370 + conn_info->req_ie_len = 0;
8371 + conn_info->req_ie = NULL;
8372 +@@ -5480,6 +5482,8 @@ static s32 brcmf_get_assoc_ies(struct brcmf_cfg80211_info *cfg,
8373 + conn_info->resp_ie =
8374 + kmemdup(cfg->extra_buf, conn_info->resp_ie_len,
8375 + GFP_KERNEL);
8376 ++ if (!conn_info->resp_ie)
8377 ++ conn_info->resp_ie_len = 0;
8378 + } else {
8379 + conn_info->resp_ie_len = 0;
8380 + conn_info->resp_ie = NULL;
8381 +diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
8382 +index 4fbe8791f674..24ed19ed116e 100644
8383 +--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
8384 ++++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
8385 +@@ -841,17 +841,17 @@ static void brcmf_del_if(struct brcmf_pub *drvr, s32 bsscfgidx,
8386 + bool rtnl_locked)
8387 + {
8388 + struct brcmf_if *ifp;
8389 ++ int ifidx;
8390 +
8391 + ifp = drvr->iflist[bsscfgidx];
8392 +- drvr->iflist[bsscfgidx] = NULL;
8393 + if (!ifp) {
8394 + bphy_err(drvr, "Null interface, bsscfgidx=%d\n", bsscfgidx);
8395 + return;
8396 + }
8397 + brcmf_dbg(TRACE, "Enter, bsscfgidx=%d, ifidx=%d\n", bsscfgidx,
8398 + ifp->ifidx);
8399 +- if (drvr->if2bss[ifp->ifidx] == bsscfgidx)
8400 +- drvr->if2bss[ifp->ifidx] = BRCMF_BSSIDX_INVALID;
8401 ++ ifidx = ifp->ifidx;
8402 ++
8403 + if (ifp->ndev) {
8404 + if (bsscfgidx == 0) {
8405 + if (ifp->ndev->netdev_ops == &brcmf_netdev_ops_pri) {
8406 +@@ -879,6 +879,10 @@ static void brcmf_del_if(struct brcmf_pub *drvr, s32 bsscfgidx,
8407 + brcmf_p2p_ifp_removed(ifp, rtnl_locked);
8408 + kfree(ifp);
8409 + }
8410 ++
8411 ++ drvr->iflist[bsscfgidx] = NULL;
8412 ++ if (drvr->if2bss[ifidx] == bsscfgidx)
8413 ++ drvr->if2bss[ifidx] = BRCMF_BSSIDX_INVALID;
8414 + }
8415 +
8416 + void brcmf_remove_interface(struct brcmf_if *ifp, bool rtnl_locked)
8417 +diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c
8418 +index abeb305492e0..d48b8b2d946f 100644
8419 +--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c
8420 ++++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c
8421 +@@ -580,24 +580,6 @@ static bool brcmf_fws_ifidx_match(struct sk_buff *skb, void *arg)
8422 + return ifidx == *(int *)arg;
8423 + }
8424 +
8425 +-static void brcmf_fws_psq_flush(struct brcmf_fws_info *fws, struct pktq *q,
8426 +- int ifidx)
8427 +-{
8428 +- bool (*matchfn)(struct sk_buff *, void *) = NULL;
8429 +- struct sk_buff *skb;
8430 +- int prec;
8431 +-
8432 +- if (ifidx != -1)
8433 +- matchfn = brcmf_fws_ifidx_match;
8434 +- for (prec = 0; prec < q->num_prec; prec++) {
8435 +- skb = brcmu_pktq_pdeq_match(q, prec, matchfn, &ifidx);
8436 +- while (skb) {
8437 +- brcmu_pkt_buf_free_skb(skb);
8438 +- skb = brcmu_pktq_pdeq_match(q, prec, matchfn, &ifidx);
8439 +- }
8440 +- }
8441 +-}
8442 +-
8443 + static void brcmf_fws_hanger_init(struct brcmf_fws_hanger *hanger)
8444 + {
8445 + int i;
8446 +@@ -669,6 +651,28 @@ static inline int brcmf_fws_hanger_poppkt(struct brcmf_fws_hanger *h,
8447 + return 0;
8448 + }
8449 +
8450 ++static void brcmf_fws_psq_flush(struct brcmf_fws_info *fws, struct pktq *q,
8451 ++ int ifidx)
8452 ++{
8453 ++ bool (*matchfn)(struct sk_buff *, void *) = NULL;
8454 ++ struct sk_buff *skb;
8455 ++ int prec;
8456 ++ u32 hslot;
8457 ++
8458 ++ if (ifidx != -1)
8459 ++ matchfn = brcmf_fws_ifidx_match;
8460 ++ for (prec = 0; prec < q->num_prec; prec++) {
8461 ++ skb = brcmu_pktq_pdeq_match(q, prec, matchfn, &ifidx);
8462 ++ while (skb) {
8463 ++ hslot = brcmf_skb_htod_tag_get_field(skb, HSLOT);
8464 ++ brcmf_fws_hanger_poppkt(&fws->hanger, hslot, &skb,
8465 ++ true);
8466 ++ brcmu_pkt_buf_free_skb(skb);
8467 ++ skb = brcmu_pktq_pdeq_match(q, prec, matchfn, &ifidx);
8468 ++ }
8469 ++ }
8470 ++}
8471 ++
8472 + static int brcmf_fws_hanger_mark_suppressed(struct brcmf_fws_hanger *h,
8473 + u32 slot_id)
8474 + {
8475 +@@ -2200,6 +2204,8 @@ void brcmf_fws_del_interface(struct brcmf_if *ifp)
8476 + brcmf_fws_lock(fws);
8477 + ifp->fws_desc = NULL;
8478 + brcmf_dbg(TRACE, "deleting %s\n", entry->name);
8479 ++ brcmf_fws_macdesc_cleanup(fws, &fws->desc.iface[ifp->ifidx],
8480 ++ ifp->ifidx);
8481 + brcmf_fws_macdesc_deinit(entry);
8482 + brcmf_fws_cleanup(fws, ifp->ifidx);
8483 + brcmf_fws_unlock(fws);
8484 +diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c
8485 +index e9cbfd077710..81e1842f1d8c 100644
8486 +--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c
8487 ++++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c
8488 +@@ -160,7 +160,7 @@ struct brcmf_usbdev_info {
8489 +
8490 + struct usb_device *usbdev;
8491 + struct device *dev;
8492 +- struct mutex dev_init_lock;
8493 ++ struct completion dev_init_done;
8494 +
8495 + int ctl_in_pipe, ctl_out_pipe;
8496 + struct urb *ctl_urb; /* URB for control endpoint */
8497 +@@ -682,12 +682,18 @@ static int brcmf_usb_up(struct device *dev)
8498 +
8499 + static void brcmf_cancel_all_urbs(struct brcmf_usbdev_info *devinfo)
8500 + {
8501 ++ int i;
8502 ++
8503 + if (devinfo->ctl_urb)
8504 + usb_kill_urb(devinfo->ctl_urb);
8505 + if (devinfo->bulk_urb)
8506 + usb_kill_urb(devinfo->bulk_urb);
8507 +- brcmf_usb_free_q(&devinfo->tx_postq, true);
8508 +- brcmf_usb_free_q(&devinfo->rx_postq, true);
8509 ++ if (devinfo->tx_reqs)
8510 ++ for (i = 0; i < devinfo->bus_pub.ntxq; i++)
8511 ++ usb_kill_urb(devinfo->tx_reqs[i].urb);
8512 ++ if (devinfo->rx_reqs)
8513 ++ for (i = 0; i < devinfo->bus_pub.nrxq; i++)
8514 ++ usb_kill_urb(devinfo->rx_reqs[i].urb);
8515 + }
8516 +
8517 + static void brcmf_usb_down(struct device *dev)
8518 +@@ -1193,11 +1199,11 @@ static void brcmf_usb_probe_phase2(struct device *dev, int ret,
8519 + if (ret)
8520 + goto error;
8521 +
8522 +- mutex_unlock(&devinfo->dev_init_lock);
8523 ++ complete(&devinfo->dev_init_done);
8524 + return;
8525 + error:
8526 + brcmf_dbg(TRACE, "failed: dev=%s, err=%d\n", dev_name(dev), ret);
8527 +- mutex_unlock(&devinfo->dev_init_lock);
8528 ++ complete(&devinfo->dev_init_done);
8529 + device_release_driver(dev);
8530 + }
8531 +
8532 +@@ -1265,7 +1271,7 @@ static int brcmf_usb_probe_cb(struct brcmf_usbdev_info *devinfo)
8533 + if (ret)
8534 + goto fail;
8535 + /* we are done */
8536 +- mutex_unlock(&devinfo->dev_init_lock);
8537 ++ complete(&devinfo->dev_init_done);
8538 + return 0;
8539 + }
8540 + bus->chip = bus_pub->devid;
8541 +@@ -1325,11 +1331,10 @@ brcmf_usb_probe(struct usb_interface *intf, const struct usb_device_id *id)
8542 +
8543 + devinfo->usbdev = usb;
8544 + devinfo->dev = &usb->dev;
8545 +- /* Take an init lock, to protect for disconnect while still loading.
8546 ++ /* Init completion, to protect for disconnect while still loading.
8547 + * Necessary because of the asynchronous firmware load construction
8548 + */
8549 +- mutex_init(&devinfo->dev_init_lock);
8550 +- mutex_lock(&devinfo->dev_init_lock);
8551 ++ init_completion(&devinfo->dev_init_done);
8552 +
8553 + usb_set_intfdata(intf, devinfo);
8554 +
8555 +@@ -1407,7 +1412,7 @@ brcmf_usb_probe(struct usb_interface *intf, const struct usb_device_id *id)
8556 + return 0;
8557 +
8558 + fail:
8559 +- mutex_unlock(&devinfo->dev_init_lock);
8560 ++ complete(&devinfo->dev_init_done);
8561 + kfree(devinfo);
8562 + usb_set_intfdata(intf, NULL);
8563 + return ret;
8564 +@@ -1422,7 +1427,7 @@ brcmf_usb_disconnect(struct usb_interface *intf)
8565 + devinfo = (struct brcmf_usbdev_info *)usb_get_intfdata(intf);
8566 +
8567 + if (devinfo) {
8568 +- mutex_lock(&devinfo->dev_init_lock);
8569 ++ wait_for_completion(&devinfo->dev_init_done);
8570 + /* Make sure that devinfo still exists. Firmware probe routines
8571 + * may have released the device and cleared the intfdata.
8572 + */
8573 +diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/vendor.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/vendor.c
8574 +index 8eff2753abad..d493021f6031 100644
8575 +--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/vendor.c
8576 ++++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/vendor.c
8577 +@@ -35,9 +35,10 @@ static int brcmf_cfg80211_vndr_cmds_dcmd_handler(struct wiphy *wiphy,
8578 + struct brcmf_if *ifp;
8579 + const struct brcmf_vndr_dcmd_hdr *cmdhdr = data;
8580 + struct sk_buff *reply;
8581 +- int ret, payload, ret_len;
8582 ++ unsigned int payload, ret_len;
8583 + void *dcmd_buf = NULL, *wr_pointer;
8584 + u16 msglen, maxmsglen = PAGE_SIZE - 0x100;
8585 ++ int ret;
8586 +
8587 + if (len < sizeof(*cmdhdr)) {
8588 + brcmf_err("vendor command too short: %d\n", len);
8589 +@@ -65,7 +66,7 @@ static int brcmf_cfg80211_vndr_cmds_dcmd_handler(struct wiphy *wiphy,
8590 + brcmf_err("oversize return buffer %d\n", ret_len);
8591 + ret_len = BRCMF_DCMD_MAXLEN;
8592 + }
8593 +- payload = max(ret_len, len) + 1;
8594 ++ payload = max_t(unsigned int, ret_len, len) + 1;
8595 + dcmd_buf = vzalloc(payload);
8596 + if (NULL == dcmd_buf)
8597 + return -ENOMEM;
8598 +diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
8599 +index 98d123dd7177..eb452e9dce05 100644
8600 +--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
8601 ++++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
8602 +@@ -2277,7 +2277,8 @@ int iwl_mvm_add_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
8603 + static const u8 _maddr[] = {0x03, 0x00, 0x00, 0x00, 0x00, 0x00};
8604 + const u8 *maddr = _maddr;
8605 + struct iwl_trans_txq_scd_cfg cfg = {
8606 +- .fifo = IWL_MVM_TX_FIFO_MCAST,
8607 ++ .fifo = vif->type == NL80211_IFTYPE_AP ?
8608 ++ IWL_MVM_TX_FIFO_MCAST : IWL_MVM_TX_FIFO_BE,
8609 + .sta_id = msta->sta_id,
8610 + .tid = 0,
8611 + .aggregate = false,
8612 +diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
8613 +index 8d4f0628622b..12f02aaf923e 100644
8614 +--- a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
8615 ++++ b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
8616 +@@ -1434,10 +1434,15 @@ out_err:
8617 + static void iwl_pcie_rx_handle(struct iwl_trans *trans, int queue)
8618 + {
8619 + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
8620 +- struct iwl_rxq *rxq = &trans_pcie->rxq[queue];
8621 ++ struct iwl_rxq *rxq;
8622 + u32 r, i, count = 0;
8623 + bool emergency = false;
8624 +
8625 ++ if (WARN_ON_ONCE(!trans_pcie->rxq || !trans_pcie->rxq[queue].bd))
8626 ++ return;
8627 ++
8628 ++ rxq = &trans_pcie->rxq[queue];
8629 ++
8630 + restart:
8631 + spin_lock(&rxq->lock);
8632 + /* uCode's read index (stored in shared DRAM) indicates the last Rx
8633 +diff --git a/drivers/net/wireless/marvell/mwifiex/cfg80211.c b/drivers/net/wireless/marvell/mwifiex/cfg80211.c
8634 +index c46f0a54a0c7..e582d9b3e50c 100644
8635 +--- a/drivers/net/wireless/marvell/mwifiex/cfg80211.c
8636 ++++ b/drivers/net/wireless/marvell/mwifiex/cfg80211.c
8637 +@@ -4082,16 +4082,20 @@ static int mwifiex_tm_cmd(struct wiphy *wiphy, struct wireless_dev *wdev,
8638 +
8639 + if (mwifiex_send_cmd(priv, 0, 0, 0, hostcmd, true)) {
8640 + dev_err(priv->adapter->dev, "Failed to process hostcmd\n");
8641 ++ kfree(hostcmd);
8642 + return -EFAULT;
8643 + }
8644 +
8645 + /* process hostcmd response*/
8646 + skb = cfg80211_testmode_alloc_reply_skb(wiphy, hostcmd->len);
8647 +- if (!skb)
8648 ++ if (!skb) {
8649 ++ kfree(hostcmd);
8650 + return -ENOMEM;
8651 ++ }
8652 + err = nla_put(skb, MWIFIEX_TM_ATTR_DATA,
8653 + hostcmd->len, hostcmd->cmd);
8654 + if (err) {
8655 ++ kfree(hostcmd);
8656 + kfree_skb(skb);
8657 + return -EMSGSIZE;
8658 + }
8659 +diff --git a/drivers/net/wireless/marvell/mwifiex/cfp.c b/drivers/net/wireless/marvell/mwifiex/cfp.c
8660 +index bfe84e55df77..f1522fb1c1e8 100644
8661 +--- a/drivers/net/wireless/marvell/mwifiex/cfp.c
8662 ++++ b/drivers/net/wireless/marvell/mwifiex/cfp.c
8663 +@@ -531,5 +531,8 @@ u8 mwifiex_adjust_data_rate(struct mwifiex_private *priv,
8664 + rate_index = (rx_rate > MWIFIEX_RATE_INDEX_OFDM0) ?
8665 + rx_rate - 1 : rx_rate;
8666 +
8667 ++ if (rate_index >= MWIFIEX_MAX_AC_RX_RATES)
8668 ++ rate_index = MWIFIEX_MAX_AC_RX_RATES - 1;
8669 ++
8670 + return rate_index;
8671 + }
8672 +diff --git a/drivers/net/wireless/mediatek/mt76/dma.c b/drivers/net/wireless/mediatek/mt76/dma.c
8673 +index 76629b98c78d..8c7ee8302fb8 100644
8674 +--- a/drivers/net/wireless/mediatek/mt76/dma.c
8675 ++++ b/drivers/net/wireless/mediatek/mt76/dma.c
8676 +@@ -271,10 +271,11 @@ mt76_dma_tx_queue_skb_raw(struct mt76_dev *dev, enum mt76_txq_id qid,
8677 + return 0;
8678 + }
8679 +
8680 +-int mt76_dma_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q,
8681 ++int mt76_dma_tx_queue_skb(struct mt76_dev *dev, enum mt76_txq_id qid,
8682 + struct sk_buff *skb, struct mt76_wcid *wcid,
8683 + struct ieee80211_sta *sta)
8684 + {
8685 ++ struct mt76_queue *q = &dev->q_tx[qid];
8686 + struct mt76_queue_entry e;
8687 + struct mt76_txwi_cache *t;
8688 + struct mt76_queue_buf buf[32];
8689 +diff --git a/drivers/net/wireless/mediatek/mt76/mt76.h b/drivers/net/wireless/mediatek/mt76/mt76.h
8690 +index bcbfd3c4a44b..eb882b2cbc0e 100644
8691 +--- a/drivers/net/wireless/mediatek/mt76/mt76.h
8692 ++++ b/drivers/net/wireless/mediatek/mt76/mt76.h
8693 +@@ -156,7 +156,7 @@ struct mt76_queue_ops {
8694 + struct mt76_queue_buf *buf, int nbufs, u32 info,
8695 + struct sk_buff *skb, void *txwi);
8696 +
8697 +- int (*tx_queue_skb)(struct mt76_dev *dev, struct mt76_queue *q,
8698 ++ int (*tx_queue_skb)(struct mt76_dev *dev, enum mt76_txq_id qid,
8699 + struct sk_buff *skb, struct mt76_wcid *wcid,
8700 + struct ieee80211_sta *sta);
8701 +
8702 +@@ -645,7 +645,7 @@ static inline struct mt76_tx_cb *mt76_tx_skb_cb(struct sk_buff *skb)
8703 + return ((void *) IEEE80211_SKB_CB(skb)->status.status_driver_data);
8704 + }
8705 +
8706 +-int mt76_dma_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q,
8707 ++int mt76_dma_tx_queue_skb(struct mt76_dev *dev, enum mt76_txq_id qid,
8708 + struct sk_buff *skb, struct mt76_wcid *wcid,
8709 + struct ieee80211_sta *sta);
8710 +
8711 +diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/beacon.c b/drivers/net/wireless/mediatek/mt76/mt7603/beacon.c
8712 +index 4dcb465095d1..99c0a3ba37cb 100644
8713 +--- a/drivers/net/wireless/mediatek/mt76/mt7603/beacon.c
8714 ++++ b/drivers/net/wireless/mediatek/mt76/mt7603/beacon.c
8715 +@@ -23,7 +23,7 @@ mt7603_update_beacon_iter(void *priv, u8 *mac, struct ieee80211_vif *vif)
8716 + if (!skb)
8717 + return;
8718 +
8719 +- mt76_dma_tx_queue_skb(&dev->mt76, &dev->mt76.q_tx[MT_TXQ_BEACON], skb,
8720 ++ mt76_dma_tx_queue_skb(&dev->mt76, MT_TXQ_BEACON, skb,
8721 + &mvif->sta.wcid, NULL);
8722 +
8723 + spin_lock_bh(&dev->ps_lock);
8724 +@@ -118,8 +118,8 @@ void mt7603_pre_tbtt_tasklet(unsigned long arg)
8725 + struct ieee80211_vif *vif = info->control.vif;
8726 + struct mt7603_vif *mvif = (struct mt7603_vif *)vif->drv_priv;
8727 +
8728 +- mt76_dma_tx_queue_skb(&dev->mt76, q, skb, &mvif->sta.wcid,
8729 +- NULL);
8730 ++ mt76_dma_tx_queue_skb(&dev->mt76, MT_TXQ_CAB, skb,
8731 ++ &mvif->sta.wcid, NULL);
8732 + }
8733 + mt76_queue_kick(dev, q);
8734 + spin_unlock_bh(&q->lock);
8735 +diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c b/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c
8736 +index daaed1220147..952fe19cba9b 100644
8737 +--- a/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c
8738 ++++ b/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c
8739 +@@ -146,8 +146,8 @@ static void mt76x02_pre_tbtt_tasklet(unsigned long arg)
8740 + struct ieee80211_vif *vif = info->control.vif;
8741 + struct mt76x02_vif *mvif = (struct mt76x02_vif *)vif->drv_priv;
8742 +
8743 +- mt76_dma_tx_queue_skb(&dev->mt76, q, skb, &mvif->group_wcid,
8744 +- NULL);
8745 ++ mt76_dma_tx_queue_skb(&dev->mt76, MT_TXQ_PSD, skb,
8746 ++ &mvif->group_wcid, NULL);
8747 + }
8748 + spin_unlock_bh(&q->lock);
8749 + }
8750 +diff --git a/drivers/net/wireless/mediatek/mt76/tx.c b/drivers/net/wireless/mediatek/mt76/tx.c
8751 +index 2585df512335..0c1036da9a92 100644
8752 +--- a/drivers/net/wireless/mediatek/mt76/tx.c
8753 ++++ b/drivers/net/wireless/mediatek/mt76/tx.c
8754 +@@ -286,7 +286,7 @@ mt76_tx(struct mt76_dev *dev, struct ieee80211_sta *sta,
8755 + q = &dev->q_tx[qid];
8756 +
8757 + spin_lock_bh(&q->lock);
8758 +- dev->queue_ops->tx_queue_skb(dev, q, skb, wcid, sta);
8759 ++ dev->queue_ops->tx_queue_skb(dev, qid, skb, wcid, sta);
8760 + dev->queue_ops->kick(dev, q);
8761 +
8762 + if (q->queued > q->ndesc - 8 && !q->stopped) {
8763 +@@ -327,7 +327,6 @@ mt76_queue_ps_skb(struct mt76_dev *dev, struct ieee80211_sta *sta,
8764 + {
8765 + struct mt76_wcid *wcid = (struct mt76_wcid *) sta->drv_priv;
8766 + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
8767 +- struct mt76_queue *hwq = &dev->q_tx[MT_TXQ_PSD];
8768 +
8769 + info->control.flags |= IEEE80211_TX_CTRL_PS_RESPONSE;
8770 + if (last)
8771 +@@ -335,7 +334,7 @@ mt76_queue_ps_skb(struct mt76_dev *dev, struct ieee80211_sta *sta,
8772 + IEEE80211_TX_CTL_REQ_TX_STATUS;
8773 +
8774 + mt76_skb_set_moredata(skb, !last);
8775 +- dev->queue_ops->tx_queue_skb(dev, hwq, skb, wcid, sta);
8776 ++ dev->queue_ops->tx_queue_skb(dev, MT_TXQ_PSD, skb, wcid, sta);
8777 + }
8778 +
8779 + void
8780 +@@ -390,6 +389,7 @@ mt76_txq_send_burst(struct mt76_dev *dev, struct mt76_queue *hwq,
8781 + struct mt76_txq *mtxq, bool *empty)
8782 + {
8783 + struct ieee80211_txq *txq = mtxq_to_txq(mtxq);
8784 ++ enum mt76_txq_id qid = mt76_txq_get_qid(txq);
8785 + struct ieee80211_tx_info *info;
8786 + struct mt76_wcid *wcid = mtxq->wcid;
8787 + struct sk_buff *skb;
8788 +@@ -423,7 +423,7 @@ mt76_txq_send_burst(struct mt76_dev *dev, struct mt76_queue *hwq,
8789 + if (ampdu)
8790 + mt76_check_agg_ssn(mtxq, skb);
8791 +
8792 +- idx = dev->queue_ops->tx_queue_skb(dev, hwq, skb, wcid, txq->sta);
8793 ++ idx = dev->queue_ops->tx_queue_skb(dev, qid, skb, wcid, txq->sta);
8794 +
8795 + if (idx < 0)
8796 + return idx;
8797 +@@ -458,7 +458,7 @@ mt76_txq_send_burst(struct mt76_dev *dev, struct mt76_queue *hwq,
8798 + if (cur_ampdu)
8799 + mt76_check_agg_ssn(mtxq, skb);
8800 +
8801 +- idx = dev->queue_ops->tx_queue_skb(dev, hwq, skb, wcid,
8802 ++ idx = dev->queue_ops->tx_queue_skb(dev, qid, skb, wcid,
8803 + txq->sta);
8804 + if (idx < 0)
8805 + return idx;
8806 +diff --git a/drivers/net/wireless/mediatek/mt76/usb.c b/drivers/net/wireless/mediatek/mt76/usb.c
8807 +index 4c1abd492405..b1551419338f 100644
8808 +--- a/drivers/net/wireless/mediatek/mt76/usb.c
8809 ++++ b/drivers/net/wireless/mediatek/mt76/usb.c
8810 +@@ -726,10 +726,11 @@ mt76u_tx_build_sg(struct mt76_dev *dev, struct sk_buff *skb,
8811 + }
8812 +
8813 + static int
8814 +-mt76u_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q,
8815 ++mt76u_tx_queue_skb(struct mt76_dev *dev, enum mt76_txq_id qid,
8816 + struct sk_buff *skb, struct mt76_wcid *wcid,
8817 + struct ieee80211_sta *sta)
8818 + {
8819 ++ struct mt76_queue *q = &dev->q_tx[qid];
8820 + struct mt76u_buf *buf;
8821 + u16 idx = q->tail;
8822 + int err;
8823 +diff --git a/drivers/net/wireless/realtek/rtlwifi/base.c b/drivers/net/wireless/realtek/rtlwifi/base.c
8824 +index 217d2a7a43c7..ac746c322554 100644
8825 +--- a/drivers/net/wireless/realtek/rtlwifi/base.c
8826 ++++ b/drivers/net/wireless/realtek/rtlwifi/base.c
8827 +@@ -448,6 +448,11 @@ static void _rtl_init_deferred_work(struct ieee80211_hw *hw)
8828 + /* <2> work queue */
8829 + rtlpriv->works.hw = hw;
8830 + rtlpriv->works.rtl_wq = alloc_workqueue("%s", 0, 0, rtlpriv->cfg->name);
8831 ++ if (unlikely(!rtlpriv->works.rtl_wq)) {
8832 ++ pr_err("Failed to allocate work queue\n");
8833 ++ return;
8834 ++ }
8835 ++
8836 + INIT_DELAYED_WORK(&rtlpriv->works.watchdog_wq,
8837 + (void *)rtl_watchdog_wq_callback);
8838 + INIT_DELAYED_WORK(&rtlpriv->works.ips_nic_off_wq,
8839 +diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/fw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/fw.c
8840 +index 203e7b574e84..e2e0bfbc24fe 100644
8841 +--- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/fw.c
8842 ++++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/fw.c
8843 +@@ -600,6 +600,8 @@ void rtl88e_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool b_dl_finished)
8844 + u1rsvdpageloc, 3);
8845 +
8846 + skb = dev_alloc_skb(totalpacketlen);
8847 ++ if (!skb)
8848 ++ return;
8849 + skb_put_data(skb, &reserved_page_packet, totalpacketlen);
8850 +
8851 + rtstatus = rtl_cmd_send_packet(hw, skb);
8852 +diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192c/fw_common.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/fw_common.c
8853 +index 18c76990a089..86b1b88cc4ed 100644
8854 +--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192c/fw_common.c
8855 ++++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/fw_common.c
8856 +@@ -623,6 +623,8 @@ void rtl92c_set_fw_rsvdpagepkt(struct ieee80211_hw *hw,
8857 + u1rsvdpageloc, 3);
8858 +
8859 + skb = dev_alloc_skb(totalpacketlen);
8860 ++ if (!skb)
8861 ++ return;
8862 + skb_put_data(skb, &reserved_page_packet, totalpacketlen);
8863 +
8864 + if (cmd_send_packet)
8865 +diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.c
8866 +index 7c5b54b71a92..67305ce915ec 100644
8867 +--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.c
8868 ++++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.c
8869 +@@ -744,6 +744,8 @@ void rtl92ee_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool b_dl_finished)
8870 + u1rsvdpageloc, 3);
8871 +
8872 + skb = dev_alloc_skb(totalpacketlen);
8873 ++ if (!skb)
8874 ++ return;
8875 + skb_put_data(skb, &reserved_page_packet, totalpacketlen);
8876 +
8877 + rtstatus = rtl_cmd_send_packet(hw, skb);
8878 +diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/fw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/fw.c
8879 +index be451a6f7dbe..33481232fad0 100644
8880 +--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/fw.c
8881 ++++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/fw.c
8882 +@@ -448,6 +448,8 @@ void rtl8723e_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool b_dl_finished)
8883 + u1rsvdpageloc, 3);
8884 +
8885 + skb = dev_alloc_skb(totalpacketlen);
8886 ++ if (!skb)
8887 ++ return;
8888 + skb_put_data(skb, &reserved_page_packet, totalpacketlen);
8889 +
8890 + rtstatus = rtl_cmd_send_packet(hw, skb);
8891 +diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/fw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/fw.c
8892 +index 4d7fa27f55ca..aa56058af56e 100644
8893 +--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/fw.c
8894 ++++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/fw.c
8895 +@@ -562,6 +562,8 @@ void rtl8723be_set_fw_rsvdpagepkt(struct ieee80211_hw *hw,
8896 + u1rsvdpageloc, sizeof(u1rsvdpageloc));
8897 +
8898 + skb = dev_alloc_skb(totalpacketlen);
8899 ++ if (!skb)
8900 ++ return;
8901 + skb_put_data(skb, &reserved_page_packet, totalpacketlen);
8902 +
8903 + rtstatus = rtl_cmd_send_packet(hw, skb);
8904 +diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.c
8905 +index dc0eb692088f..fe32d397d287 100644
8906 +--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.c
8907 ++++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.c
8908 +@@ -1623,6 +1623,8 @@ out:
8909 + &reserved_page_packet_8812[0], totalpacketlen);
8910 +
8911 + skb = dev_alloc_skb(totalpacketlen);
8912 ++ if (!skb)
8913 ++ return;
8914 + skb_put_data(skb, &reserved_page_packet_8812, totalpacketlen);
8915 +
8916 + rtstatus = rtl_cmd_send_packet(hw, skb);
8917 +@@ -1759,6 +1761,8 @@ out:
8918 + &reserved_page_packet_8821[0], totalpacketlen);
8919 +
8920 + skb = dev_alloc_skb(totalpacketlen);
8921 ++ if (!skb)
8922 ++ return;
8923 + skb_put_data(skb, &reserved_page_packet_8821, totalpacketlen);
8924 +
8925 + rtstatus = rtl_cmd_send_packet(hw, skb);
8926 +diff --git a/drivers/net/wireless/rsi/rsi_91x_mac80211.c b/drivers/net/wireless/rsi/rsi_91x_mac80211.c
8927 +index 831046e760f8..49df3bb08d41 100644
8928 +--- a/drivers/net/wireless/rsi/rsi_91x_mac80211.c
8929 ++++ b/drivers/net/wireless/rsi/rsi_91x_mac80211.c
8930 +@@ -188,27 +188,27 @@ bool rsi_is_cipher_wep(struct rsi_common *common)
8931 + * @adapter: Pointer to the adapter structure.
8932 + * @band: Operating band to be set.
8933 + *
8934 +- * Return: None.
8935 ++ * Return: int - 0 on success, negative error on failure.
8936 + */
8937 +-static void rsi_register_rates_channels(struct rsi_hw *adapter, int band)
8938 ++static int rsi_register_rates_channels(struct rsi_hw *adapter, int band)
8939 + {
8940 + struct ieee80211_supported_band *sbands = &adapter->sbands[band];
8941 + void *channels = NULL;
8942 +
8943 + if (band == NL80211_BAND_2GHZ) {
8944 +- channels = kmalloc(sizeof(rsi_2ghz_channels), GFP_KERNEL);
8945 +- memcpy(channels,
8946 +- rsi_2ghz_channels,
8947 +- sizeof(rsi_2ghz_channels));
8948 ++ channels = kmemdup(rsi_2ghz_channels, sizeof(rsi_2ghz_channels),
8949 ++ GFP_KERNEL);
8950 ++ if (!channels)
8951 ++ return -ENOMEM;
8952 + sbands->band = NL80211_BAND_2GHZ;
8953 + sbands->n_channels = ARRAY_SIZE(rsi_2ghz_channels);
8954 + sbands->bitrates = rsi_rates;
8955 + sbands->n_bitrates = ARRAY_SIZE(rsi_rates);
8956 + } else {
8957 +- channels = kmalloc(sizeof(rsi_5ghz_channels), GFP_KERNEL);
8958 +- memcpy(channels,
8959 +- rsi_5ghz_channels,
8960 +- sizeof(rsi_5ghz_channels));
8961 ++ channels = kmemdup(rsi_5ghz_channels, sizeof(rsi_5ghz_channels),
8962 ++ GFP_KERNEL);
8963 ++ if (!channels)
8964 ++ return -ENOMEM;
8965 + sbands->band = NL80211_BAND_5GHZ;
8966 + sbands->n_channels = ARRAY_SIZE(rsi_5ghz_channels);
8967 + sbands->bitrates = &rsi_rates[4];
8968 +@@ -227,6 +227,7 @@ static void rsi_register_rates_channels(struct rsi_hw *adapter, int band)
8969 + sbands->ht_cap.mcs.rx_mask[0] = 0xff;
8970 + sbands->ht_cap.mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED;
8971 + /* sbands->ht_cap.mcs.rx_highest = 0x82; */
8972 ++ return 0;
8973 + }
8974 +
8975 + static int rsi_mac80211_hw_scan_start(struct ieee80211_hw *hw,
8976 +@@ -2064,11 +2065,16 @@ int rsi_mac80211_attach(struct rsi_common *common)
8977 + wiphy->available_antennas_rx = 1;
8978 + wiphy->available_antennas_tx = 1;
8979 +
8980 +- rsi_register_rates_channels(adapter, NL80211_BAND_2GHZ);
8981 ++ status = rsi_register_rates_channels(adapter, NL80211_BAND_2GHZ);
8982 ++ if (status)
8983 ++ return status;
8984 + wiphy->bands[NL80211_BAND_2GHZ] =
8985 + &adapter->sbands[NL80211_BAND_2GHZ];
8986 + if (common->num_supp_bands > 1) {
8987 +- rsi_register_rates_channels(adapter, NL80211_BAND_5GHZ);
8988 ++ status = rsi_register_rates_channels(adapter,
8989 ++ NL80211_BAND_5GHZ);
8990 ++ if (status)
8991 ++ return status;
8992 + wiphy->bands[NL80211_BAND_5GHZ] =
8993 + &adapter->sbands[NL80211_BAND_5GHZ];
8994 + }
8995 +diff --git a/drivers/net/wireless/st/cw1200/main.c b/drivers/net/wireless/st/cw1200/main.c
8996 +index 90dc979f260b..c1608f0bf6d0 100644
8997 +--- a/drivers/net/wireless/st/cw1200/main.c
8998 ++++ b/drivers/net/wireless/st/cw1200/main.c
8999 +@@ -345,6 +345,11 @@ static struct ieee80211_hw *cw1200_init_common(const u8 *macaddr,
9000 + mutex_init(&priv->wsm_cmd_mux);
9001 + mutex_init(&priv->conf_mutex);
9002 + priv->workqueue = create_singlethread_workqueue("cw1200_wq");
9003 ++ if (!priv->workqueue) {
9004 ++ ieee80211_free_hw(hw);
9005 ++ return NULL;
9006 ++ }
9007 ++
9008 + sema_init(&priv->scan.lock, 1);
9009 + INIT_WORK(&priv->scan.work, cw1200_scan_work);
9010 + INIT_DELAYED_WORK(&priv->scan.probe_work, cw1200_probe_work);
9011 +diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c
9012 +index 0279eb1da3ef..d9d845077b8b 100644
9013 +--- a/drivers/nvdimm/pmem.c
9014 ++++ b/drivers/nvdimm/pmem.c
9015 +@@ -281,20 +281,27 @@ static long pmem_dax_direct_access(struct dax_device *dax_dev,
9016 + return __pmem_direct_access(pmem, pgoff, nr_pages, kaddr, pfn);
9017 + }
9018 +
9019 ++/*
9020 ++ * Use the 'no check' versions of copy_from_iter_flushcache() and
9021 ++ * copy_to_iter_mcsafe() to bypass HARDENED_USERCOPY overhead. Bounds
9022 ++ * checking, both file offset and device offset, is handled by
9023 ++ * dax_iomap_actor()
9024 ++ */
9025 + static size_t pmem_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff,
9026 + void *addr, size_t bytes, struct iov_iter *i)
9027 + {
9028 +- return copy_from_iter_flushcache(addr, bytes, i);
9029 ++ return _copy_from_iter_flushcache(addr, bytes, i);
9030 + }
9031 +
9032 + static size_t pmem_copy_to_iter(struct dax_device *dax_dev, pgoff_t pgoff,
9033 + void *addr, size_t bytes, struct iov_iter *i)
9034 + {
9035 +- return copy_to_iter_mcsafe(addr, bytes, i);
9036 ++ return _copy_to_iter_mcsafe(addr, bytes, i);
9037 + }
9038 +
9039 + static const struct dax_operations pmem_dax_ops = {
9040 + .direct_access = pmem_dax_direct_access,
9041 ++ .dax_supported = generic_fsdax_supported,
9042 + .copy_from_iter = pmem_copy_from_iter,
9043 + .copy_to_iter = pmem_copy_to_iter,
9044 + };
9045 +diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
9046 +index 2c43e12b70af..8782d86a8ca3 100644
9047 +--- a/drivers/nvme/host/core.c
9048 ++++ b/drivers/nvme/host/core.c
9049 +@@ -1591,6 +1591,10 @@ static void nvme_update_disk_info(struct gendisk *disk,
9050 + sector_t capacity = le64_to_cpup(&id->nsze) << (ns->lba_shift - 9);
9051 + unsigned short bs = 1 << ns->lba_shift;
9052 +
9053 ++ if (ns->lba_shift > PAGE_SHIFT) {
9054 ++ /* unsupported block size, set capacity to 0 later */
9055 ++ bs = (1 << 9);
9056 ++ }
9057 + blk_mq_freeze_queue(disk->queue);
9058 + blk_integrity_unregister(disk);
9059 +
9060 +@@ -1601,7 +1605,8 @@ static void nvme_update_disk_info(struct gendisk *disk,
9061 + if (ns->ms && !ns->ext &&
9062 + (ns->ctrl->ops->flags & NVME_F_METADATA_SUPPORTED))
9063 + nvme_init_integrity(disk, ns->ms, ns->pi_type);
9064 +- if (ns->ms && !nvme_ns_has_pi(ns) && !blk_get_integrity(disk))
9065 ++ if ((ns->ms && !nvme_ns_has_pi(ns) && !blk_get_integrity(disk)) ||
9066 ++ ns->lba_shift > PAGE_SHIFT)
9067 + capacity = 0;
9068 +
9069 + set_capacity(disk, capacity);
9070 +diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
9071 +index 11a5ecae78c8..e1824c2e0a1c 100644
9072 +--- a/drivers/nvme/host/rdma.c
9073 ++++ b/drivers/nvme/host/rdma.c
9074 +@@ -914,8 +914,9 @@ static void nvme_rdma_teardown_admin_queue(struct nvme_rdma_ctrl *ctrl,
9075 + {
9076 + blk_mq_quiesce_queue(ctrl->ctrl.admin_q);
9077 + nvme_rdma_stop_queue(&ctrl->queues[0]);
9078 +- blk_mq_tagset_busy_iter(&ctrl->admin_tag_set, nvme_cancel_request,
9079 +- &ctrl->ctrl);
9080 ++ if (ctrl->ctrl.admin_tagset)
9081 ++ blk_mq_tagset_busy_iter(ctrl->ctrl.admin_tagset,
9082 ++ nvme_cancel_request, &ctrl->ctrl);
9083 + blk_mq_unquiesce_queue(ctrl->ctrl.admin_q);
9084 + nvme_rdma_destroy_admin_queue(ctrl, remove);
9085 + }
9086 +@@ -926,8 +927,9 @@ static void nvme_rdma_teardown_io_queues(struct nvme_rdma_ctrl *ctrl,
9087 + if (ctrl->ctrl.queue_count > 1) {
9088 + nvme_stop_queues(&ctrl->ctrl);
9089 + nvme_rdma_stop_io_queues(ctrl);
9090 +- blk_mq_tagset_busy_iter(&ctrl->tag_set, nvme_cancel_request,
9091 +- &ctrl->ctrl);
9092 ++ if (ctrl->ctrl.tagset)
9093 ++ blk_mq_tagset_busy_iter(ctrl->ctrl.tagset,
9094 ++ nvme_cancel_request, &ctrl->ctrl);
9095 + if (remove)
9096 + nvme_start_queues(&ctrl->ctrl);
9097 + nvme_rdma_destroy_io_queues(ctrl, remove);
9098 +diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
9099 +index 68c49dd67210..aae5374d2b93 100644
9100 +--- a/drivers/nvme/host/tcp.c
9101 ++++ b/drivers/nvme/host/tcp.c
9102 +@@ -1710,7 +1710,9 @@ static void nvme_tcp_teardown_admin_queue(struct nvme_ctrl *ctrl,
9103 + {
9104 + blk_mq_quiesce_queue(ctrl->admin_q);
9105 + nvme_tcp_stop_queue(ctrl, 0);
9106 +- blk_mq_tagset_busy_iter(ctrl->admin_tagset, nvme_cancel_request, ctrl);
9107 ++ if (ctrl->admin_tagset)
9108 ++ blk_mq_tagset_busy_iter(ctrl->admin_tagset,
9109 ++ nvme_cancel_request, ctrl);
9110 + blk_mq_unquiesce_queue(ctrl->admin_q);
9111 + nvme_tcp_destroy_admin_queue(ctrl, remove);
9112 + }
9113 +@@ -1722,7 +1724,9 @@ static void nvme_tcp_teardown_io_queues(struct nvme_ctrl *ctrl,
9114 + return;
9115 + nvme_stop_queues(ctrl);
9116 + nvme_tcp_stop_io_queues(ctrl);
9117 +- blk_mq_tagset_busy_iter(ctrl->tagset, nvme_cancel_request, ctrl);
9118 ++ if (ctrl->tagset)
9119 ++ blk_mq_tagset_busy_iter(ctrl->tagset,
9120 ++ nvme_cancel_request, ctrl);
9121 + if (remove)
9122 + nvme_start_queues(ctrl);
9123 + nvme_tcp_destroy_io_queues(ctrl, remove);
9124 +diff --git a/drivers/perf/arm-cci.c b/drivers/perf/arm-cci.c
9125 +index bfd03e023308..8f8606b9bc9e 100644
9126 +--- a/drivers/perf/arm-cci.c
9127 ++++ b/drivers/perf/arm-cci.c
9128 +@@ -1684,21 +1684,24 @@ static int cci_pmu_probe(struct platform_device *pdev)
9129 + raw_spin_lock_init(&cci_pmu->hw_events.pmu_lock);
9130 + mutex_init(&cci_pmu->reserve_mutex);
9131 + atomic_set(&cci_pmu->active_events, 0);
9132 +- cci_pmu->cpu = get_cpu();
9133 +-
9134 +- ret = cci_pmu_init(cci_pmu, pdev);
9135 +- if (ret) {
9136 +- put_cpu();
9137 +- return ret;
9138 +- }
9139 +
9140 ++ cci_pmu->cpu = raw_smp_processor_id();
9141 ++ g_cci_pmu = cci_pmu;
9142 + cpuhp_setup_state_nocalls(CPUHP_AP_PERF_ARM_CCI_ONLINE,
9143 + "perf/arm/cci:online", NULL,
9144 + cci_pmu_offline_cpu);
9145 +- put_cpu();
9146 +- g_cci_pmu = cci_pmu;
9147 ++
9148 ++ ret = cci_pmu_init(cci_pmu, pdev);
9149 ++ if (ret)
9150 ++ goto error_pmu_init;
9151 ++
9152 + pr_info("ARM %s PMU driver probed", cci_pmu->model->name);
9153 + return 0;
9154 ++
9155 ++error_pmu_init:
9156 ++ cpuhp_remove_state(CPUHP_AP_PERF_ARM_CCI_ONLINE);
9157 ++ g_cci_pmu = NULL;
9158 ++ return ret;
9159 + }
9160 +
9161 + static int cci_pmu_remove(struct platform_device *pdev)
9162 +diff --git a/drivers/phy/allwinner/phy-sun4i-usb.c b/drivers/phy/allwinner/phy-sun4i-usb.c
9163 +index 4bbd9ede38c8..cc5af961778d 100644
9164 +--- a/drivers/phy/allwinner/phy-sun4i-usb.c
9165 ++++ b/drivers/phy/allwinner/phy-sun4i-usb.c
9166 +@@ -554,6 +554,7 @@ static void sun4i_usb_phy0_id_vbus_det_scan(struct work_struct *work)
9167 + struct sun4i_usb_phy_data *data =
9168 + container_of(work, struct sun4i_usb_phy_data, detect.work);
9169 + struct phy *phy0 = data->phys[0].phy;
9170 ++ struct sun4i_usb_phy *phy = phy_get_drvdata(phy0);
9171 + bool force_session_end, id_notify = false, vbus_notify = false;
9172 + int id_det, vbus_det;
9173 +
9174 +@@ -610,6 +611,9 @@ static void sun4i_usb_phy0_id_vbus_det_scan(struct work_struct *work)
9175 + mutex_unlock(&phy0->mutex);
9176 + }
9177 +
9178 ++ /* Enable PHY0 passby for host mode only. */
9179 ++ sun4i_usb_phy_passby(phy, !id_det);
9180 ++
9181 + /* Re-route PHY0 if necessary */
9182 + if (data->cfg->phy0_dual_route)
9183 + sun4i_usb_phy0_reroute(data, id_det);
9184 +diff --git a/drivers/phy/motorola/Kconfig b/drivers/phy/motorola/Kconfig
9185 +index 82651524ffb9..718f8729701d 100644
9186 +--- a/drivers/phy/motorola/Kconfig
9187 ++++ b/drivers/phy/motorola/Kconfig
9188 +@@ -13,7 +13,7 @@ config PHY_CPCAP_USB
9189 +
9190 + config PHY_MAPPHONE_MDM6600
9191 + tristate "Motorola Mapphone MDM6600 modem USB PHY driver"
9192 +- depends on OF && USB_SUPPORT
9193 ++ depends on OF && USB_SUPPORT && GPIOLIB
9194 + select GENERIC_PHY
9195 + help
9196 + Enable this for MDM6600 USB modem to work on Motorola phones
9197 +diff --git a/drivers/phy/ti/Kconfig b/drivers/phy/ti/Kconfig
9198 +index 103efc456a12..022ac16f626c 100644
9199 +--- a/drivers/phy/ti/Kconfig
9200 ++++ b/drivers/phy/ti/Kconfig
9201 +@@ -37,7 +37,7 @@ config OMAP_USB2
9202 + depends on USB_SUPPORT
9203 + select GENERIC_PHY
9204 + select USB_PHY
9205 +- select OMAP_CONTROL_PHY if ARCH_OMAP2PLUS
9206 ++ select OMAP_CONTROL_PHY if ARCH_OMAP2PLUS || COMPILE_TEST
9207 + help
9208 + Enable this to support the transceiver that is part of SOC. This
9209 + driver takes care of all the PHY functionality apart from comparator.
9210 +diff --git a/drivers/pinctrl/pinctrl-pistachio.c b/drivers/pinctrl/pinctrl-pistachio.c
9211 +index aa5f949ef219..5b0678f310e5 100644
9212 +--- a/drivers/pinctrl/pinctrl-pistachio.c
9213 ++++ b/drivers/pinctrl/pinctrl-pistachio.c
9214 +@@ -1367,6 +1367,7 @@ static int pistachio_gpio_register(struct pistachio_pinctrl *pctl)
9215 + if (!of_find_property(child, "gpio-controller", NULL)) {
9216 + dev_err(pctl->dev,
9217 + "No gpio-controller property for bank %u\n", i);
9218 ++ of_node_put(child);
9219 + ret = -ENODEV;
9220 + goto err;
9221 + }
9222 +@@ -1374,6 +1375,7 @@ static int pistachio_gpio_register(struct pistachio_pinctrl *pctl)
9223 + irq = irq_of_parse_and_map(child, 0);
9224 + if (irq < 0) {
9225 + dev_err(pctl->dev, "No IRQ for bank %u: %d\n", i, irq);
9226 ++ of_node_put(child);
9227 + ret = irq;
9228 + goto err;
9229 + }
9230 +diff --git a/drivers/pinctrl/pinctrl-st.c b/drivers/pinctrl/pinctrl-st.c
9231 +index e66af93f2cbf..195b442a2343 100644
9232 +--- a/drivers/pinctrl/pinctrl-st.c
9233 ++++ b/drivers/pinctrl/pinctrl-st.c
9234 +@@ -1170,7 +1170,7 @@ static int st_pctl_dt_parse_groups(struct device_node *np,
9235 + struct property *pp;
9236 + struct st_pinconf *conf;
9237 + struct device_node *pins;
9238 +- int i = 0, npins = 0, nr_props;
9239 ++ int i = 0, npins = 0, nr_props, ret = 0;
9240 +
9241 + pins = of_get_child_by_name(np, "st,pins");
9242 + if (!pins)
9243 +@@ -1185,7 +1185,8 @@ static int st_pctl_dt_parse_groups(struct device_node *np,
9244 + npins++;
9245 + } else {
9246 + pr_warn("Invalid st,pins in %pOFn node\n", np);
9247 +- return -EINVAL;
9248 ++ ret = -EINVAL;
9249 ++ goto out_put_node;
9250 + }
9251 + }
9252 +
9253 +@@ -1195,8 +1196,10 @@ static int st_pctl_dt_parse_groups(struct device_node *np,
9254 + grp->pin_conf = devm_kcalloc(info->dev,
9255 + npins, sizeof(*conf), GFP_KERNEL);
9256 +
9257 +- if (!grp->pins || !grp->pin_conf)
9258 +- return -ENOMEM;
9259 ++ if (!grp->pins || !grp->pin_conf) {
9260 ++ ret = -ENOMEM;
9261 ++ goto out_put_node;
9262 ++ }
9263 +
9264 + /* <bank offset mux direction rt_type rt_delay rt_clk> */
9265 + for_each_property_of_node(pins, pp) {
9266 +@@ -1229,9 +1232,11 @@ static int st_pctl_dt_parse_groups(struct device_node *np,
9267 + }
9268 + i++;
9269 + }
9270 ++
9271 ++out_put_node:
9272 + of_node_put(pins);
9273 +
9274 +- return 0;
9275 ++ return ret;
9276 + }
9277 +
9278 + static int st_pctl_parse_functions(struct device_node *np,
9279 +diff --git a/drivers/pinctrl/samsung/pinctrl-exynos-arm.c b/drivers/pinctrl/samsung/pinctrl-exynos-arm.c
9280 +index 44c6b753f692..85ddf49a5188 100644
9281 +--- a/drivers/pinctrl/samsung/pinctrl-exynos-arm.c
9282 ++++ b/drivers/pinctrl/samsung/pinctrl-exynos-arm.c
9283 +@@ -71,6 +71,7 @@ s5pv210_retention_init(struct samsung_pinctrl_drv_data *drvdata,
9284 + }
9285 +
9286 + clk_base = of_iomap(np, 0);
9287 ++ of_node_put(np);
9288 + if (!clk_base) {
9289 + pr_err("%s: failed to map clock registers\n", __func__);
9290 + return ERR_PTR(-EINVAL);
9291 +diff --git a/drivers/pinctrl/zte/pinctrl-zx.c b/drivers/pinctrl/zte/pinctrl-zx.c
9292 +index caa44dd2880a..3cb69309912b 100644
9293 +--- a/drivers/pinctrl/zte/pinctrl-zx.c
9294 ++++ b/drivers/pinctrl/zte/pinctrl-zx.c
9295 +@@ -411,6 +411,7 @@ int zx_pinctrl_init(struct platform_device *pdev,
9296 + }
9297 +
9298 + zpctl->aux_base = of_iomap(np, 0);
9299 ++ of_node_put(np);
9300 + if (!zpctl->aux_base)
9301 + return -ENOMEM;
9302 +
9303 +diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
9304 +index 968dcd9d7a07..35a7d020afec 100644
9305 +--- a/drivers/regulator/core.c
9306 ++++ b/drivers/regulator/core.c
9307 +@@ -2256,6 +2256,7 @@ static void regulator_ena_gpio_free(struct regulator_dev *rdev)
9308 + if (pin->gpiod == rdev->ena_pin->gpiod) {
9309 + if (pin->request_count <= 1) {
9310 + pin->request_count = 0;
9311 ++ gpiod_put(pin->gpiod);
9312 + list_del(&pin->list);
9313 + kfree(pin);
9314 + rdev->ena_pin = NULL;
9315 +@@ -5061,10 +5062,11 @@ void regulator_unregister(struct regulator_dev *rdev)
9316 + regulator_put(rdev->supply);
9317 + }
9318 +
9319 ++ flush_work(&rdev->disable_work.work);
9320 ++
9321 + mutex_lock(&regulator_list_mutex);
9322 +
9323 + debugfs_remove_recursive(rdev->debugfs);
9324 +- flush_work(&rdev->disable_work.work);
9325 + WARN_ON(rdev->open_count);
9326 + regulator_remove_coupling(rdev);
9327 + unset_regulator_supplies(rdev);
9328 +diff --git a/drivers/regulator/da9055-regulator.c b/drivers/regulator/da9055-regulator.c
9329 +index 3c6fac793658..3ade4b8d204e 100644
9330 +--- a/drivers/regulator/da9055-regulator.c
9331 ++++ b/drivers/regulator/da9055-regulator.c
9332 +@@ -487,8 +487,10 @@ static irqreturn_t da9055_ldo5_6_oc_irq(int irq, void *data)
9333 + {
9334 + struct da9055_regulator *regulator = data;
9335 +
9336 ++ regulator_lock(regulator->rdev);
9337 + regulator_notifier_call_chain(regulator->rdev,
9338 + REGULATOR_EVENT_OVER_CURRENT, NULL);
9339 ++ regulator_unlock(regulator->rdev);
9340 +
9341 + return IRQ_HANDLED;
9342 + }
9343 +diff --git a/drivers/regulator/da9062-regulator.c b/drivers/regulator/da9062-regulator.c
9344 +index b064d8a19d4c..bab88ddfc509 100644
9345 +--- a/drivers/regulator/da9062-regulator.c
9346 ++++ b/drivers/regulator/da9062-regulator.c
9347 +@@ -974,8 +974,10 @@ static irqreturn_t da9062_ldo_lim_event(int irq, void *data)
9348 + continue;
9349 +
9350 + if (BIT(regl->info->oc_event.lsb) & bits) {
9351 ++ regulator_lock(regl->rdev);
9352 + regulator_notifier_call_chain(regl->rdev,
9353 + REGULATOR_EVENT_OVER_CURRENT, NULL);
9354 ++ regulator_unlock(regl->rdev);
9355 + handled = IRQ_HANDLED;
9356 + }
9357 + }
9358 +diff --git a/drivers/regulator/da9063-regulator.c b/drivers/regulator/da9063-regulator.c
9359 +index 2b0c7a85306a..d7bdb95b7602 100644
9360 +--- a/drivers/regulator/da9063-regulator.c
9361 ++++ b/drivers/regulator/da9063-regulator.c
9362 +@@ -615,9 +615,12 @@ static irqreturn_t da9063_ldo_lim_event(int irq, void *data)
9363 + if (regl->info->oc_event.reg != DA9063_REG_STATUS_D)
9364 + continue;
9365 +
9366 +- if (BIT(regl->info->oc_event.lsb) & bits)
9367 ++ if (BIT(regl->info->oc_event.lsb) & bits) {
9368 ++ regulator_lock(regl->rdev);
9369 + regulator_notifier_call_chain(regl->rdev,
9370 + REGULATOR_EVENT_OVER_CURRENT, NULL);
9371 ++ regulator_unlock(regl->rdev);
9372 ++ }
9373 + }
9374 +
9375 + return IRQ_HANDLED;
9376 +diff --git a/drivers/regulator/da9211-regulator.c b/drivers/regulator/da9211-regulator.c
9377 +index 109ee12d4362..4d7fe4819c1c 100644
9378 +--- a/drivers/regulator/da9211-regulator.c
9379 ++++ b/drivers/regulator/da9211-regulator.c
9380 +@@ -322,8 +322,10 @@ static irqreturn_t da9211_irq_handler(int irq, void *data)
9381 + goto error_i2c;
9382 +
9383 + if (reg_val & DA9211_E_OV_CURR_A) {
9384 ++ regulator_lock(chip->rdev[0]);
9385 + regulator_notifier_call_chain(chip->rdev[0],
9386 + REGULATOR_EVENT_OVER_CURRENT, NULL);
9387 ++ regulator_unlock(chip->rdev[0]);
9388 +
9389 + err = regmap_write(chip->regmap, DA9211_REG_EVENT_B,
9390 + DA9211_E_OV_CURR_A);
9391 +@@ -334,8 +336,10 @@ static irqreturn_t da9211_irq_handler(int irq, void *data)
9392 + }
9393 +
9394 + if (reg_val & DA9211_E_OV_CURR_B) {
9395 ++ regulator_lock(chip->rdev[1]);
9396 + regulator_notifier_call_chain(chip->rdev[1],
9397 + REGULATOR_EVENT_OVER_CURRENT, NULL);
9398 ++ regulator_unlock(chip->rdev[1]);
9399 +
9400 + err = regmap_write(chip->regmap, DA9211_REG_EVENT_B,
9401 + DA9211_E_OV_CURR_B);
9402 +diff --git a/drivers/regulator/lp8755.c b/drivers/regulator/lp8755.c
9403 +index 14fd38807134..2e16a6ab491d 100644
9404 +--- a/drivers/regulator/lp8755.c
9405 ++++ b/drivers/regulator/lp8755.c
9406 +@@ -372,10 +372,13 @@ static irqreturn_t lp8755_irq_handler(int irq, void *data)
9407 + for (icnt = 0; icnt < LP8755_BUCK_MAX; icnt++)
9408 + if ((flag0 & (0x4 << icnt))
9409 + && (pchip->irqmask & (0x04 << icnt))
9410 +- && (pchip->rdev[icnt] != NULL))
9411 ++ && (pchip->rdev[icnt] != NULL)) {
9412 ++ regulator_lock(pchip->rdev[icnt]);
9413 + regulator_notifier_call_chain(pchip->rdev[icnt],
9414 + LP8755_EVENT_PWR_FAULT,
9415 + NULL);
9416 ++ regulator_unlock(pchip->rdev[icnt]);
9417 ++ }
9418 +
9419 + /* read flag1 register */
9420 + ret = lp8755_read(pchip, 0x0E, &flag1);
9421 +@@ -389,18 +392,24 @@ static irqreturn_t lp8755_irq_handler(int irq, void *data)
9422 + /* send OCP event to all regulator devices */
9423 + if ((flag1 & 0x01) && (pchip->irqmask & 0x01))
9424 + for (icnt = 0; icnt < LP8755_BUCK_MAX; icnt++)
9425 +- if (pchip->rdev[icnt] != NULL)
9426 ++ if (pchip->rdev[icnt] != NULL) {
9427 ++ regulator_lock(pchip->rdev[icnt]);
9428 + regulator_notifier_call_chain(pchip->rdev[icnt],
9429 + LP8755_EVENT_OCP,
9430 + NULL);
9431 ++ regulator_unlock(pchip->rdev[icnt]);
9432 ++ }
9433 +
9434 + /* send OVP event to all regulator devices */
9435 + if ((flag1 & 0x02) && (pchip->irqmask & 0x02))
9436 + for (icnt = 0; icnt < LP8755_BUCK_MAX; icnt++)
9437 +- if (pchip->rdev[icnt] != NULL)
9438 ++ if (pchip->rdev[icnt] != NULL) {
9439 ++ regulator_lock(pchip->rdev[icnt]);
9440 + regulator_notifier_call_chain(pchip->rdev[icnt],
9441 + LP8755_EVENT_OVP,
9442 + NULL);
9443 ++ regulator_unlock(pchip->rdev[icnt]);
9444 ++ }
9445 + return IRQ_HANDLED;
9446 +
9447 + err_i2c:
9448 +diff --git a/drivers/regulator/ltc3589.c b/drivers/regulator/ltc3589.c
9449 +index 63f724f260ef..75089b037b72 100644
9450 +--- a/drivers/regulator/ltc3589.c
9451 ++++ b/drivers/regulator/ltc3589.c
9452 +@@ -419,16 +419,22 @@ static irqreturn_t ltc3589_isr(int irq, void *dev_id)
9453 +
9454 + if (irqstat & LTC3589_IRQSTAT_THERMAL_WARN) {
9455 + event = REGULATOR_EVENT_OVER_TEMP;
9456 +- for (i = 0; i < LTC3589_NUM_REGULATORS; i++)
9457 ++ for (i = 0; i < LTC3589_NUM_REGULATORS; i++) {
9458 ++ regulator_lock(ltc3589->regulators[i]);
9459 + regulator_notifier_call_chain(ltc3589->regulators[i],
9460 + event, NULL);
9461 ++ regulator_unlock(ltc3589->regulators[i]);
9462 ++ }
9463 + }
9464 +
9465 + if (irqstat & LTC3589_IRQSTAT_UNDERVOLT_WARN) {
9466 + event = REGULATOR_EVENT_UNDER_VOLTAGE;
9467 +- for (i = 0; i < LTC3589_NUM_REGULATORS; i++)
9468 ++ for (i = 0; i < LTC3589_NUM_REGULATORS; i++) {
9469 ++ regulator_lock(ltc3589->regulators[i]);
9470 + regulator_notifier_call_chain(ltc3589->regulators[i],
9471 + event, NULL);
9472 ++ regulator_unlock(ltc3589->regulators[i]);
9473 ++ }
9474 + }
9475 +
9476 + /* Clear warning condition */
9477 +diff --git a/drivers/regulator/ltc3676.c b/drivers/regulator/ltc3676.c
9478 +index e6d66e492b85..4be90c78c720 100644
9479 +--- a/drivers/regulator/ltc3676.c
9480 ++++ b/drivers/regulator/ltc3676.c
9481 +@@ -285,17 +285,23 @@ static irqreturn_t ltc3676_isr(int irq, void *dev_id)
9482 + if (irqstat & LTC3676_IRQSTAT_THERMAL_WARN) {
9483 + dev_warn(dev, "Over-temperature Warning\n");
9484 + event = REGULATOR_EVENT_OVER_TEMP;
9485 +- for (i = 0; i < LTC3676_NUM_REGULATORS; i++)
9486 ++ for (i = 0; i < LTC3676_NUM_REGULATORS; i++) {
9487 ++ regulator_lock(ltc3676->regulators[i]);
9488 + regulator_notifier_call_chain(ltc3676->regulators[i],
9489 + event, NULL);
9490 ++ regulator_unlock(ltc3676->regulators[i]);
9491 ++ }
9492 + }
9493 +
9494 + if (irqstat & LTC3676_IRQSTAT_UNDERVOLT_WARN) {
9495 + dev_info(dev, "Undervoltage Warning\n");
9496 + event = REGULATOR_EVENT_UNDER_VOLTAGE;
9497 +- for (i = 0; i < LTC3676_NUM_REGULATORS; i++)
9498 ++ for (i = 0; i < LTC3676_NUM_REGULATORS; i++) {
9499 ++ regulator_lock(ltc3676->regulators[i]);
9500 + regulator_notifier_call_chain(ltc3676->regulators[i],
9501 + event, NULL);
9502 ++ regulator_unlock(ltc3676->regulators[i]);
9503 ++ }
9504 + }
9505 +
9506 + /* Clear warning condition */
9507 +diff --git a/drivers/regulator/pv88060-regulator.c b/drivers/regulator/pv88060-regulator.c
9508 +index 1600f9821891..810816e9df5d 100644
9509 +--- a/drivers/regulator/pv88060-regulator.c
9510 ++++ b/drivers/regulator/pv88060-regulator.c
9511 +@@ -244,9 +244,11 @@ static irqreturn_t pv88060_irq_handler(int irq, void *data)
9512 + if (reg_val & PV88060_E_VDD_FLT) {
9513 + for (i = 0; i < PV88060_MAX_REGULATORS; i++) {
9514 + if (chip->rdev[i] != NULL) {
9515 ++ regulator_lock(chip->rdev[i]);
9516 + regulator_notifier_call_chain(chip->rdev[i],
9517 + REGULATOR_EVENT_UNDER_VOLTAGE,
9518 + NULL);
9519 ++ regulator_unlock(chip->rdev[i]);
9520 + }
9521 + }
9522 +
9523 +@@ -261,9 +263,11 @@ static irqreturn_t pv88060_irq_handler(int irq, void *data)
9524 + if (reg_val & PV88060_E_OVER_TEMP) {
9525 + for (i = 0; i < PV88060_MAX_REGULATORS; i++) {
9526 + if (chip->rdev[i] != NULL) {
9527 ++ regulator_lock(chip->rdev[i]);
9528 + regulator_notifier_call_chain(chip->rdev[i],
9529 + REGULATOR_EVENT_OVER_TEMP,
9530 + NULL);
9531 ++ regulator_unlock(chip->rdev[i]);
9532 + }
9533 + }
9534 +
9535 +diff --git a/drivers/regulator/pv88080-regulator.c b/drivers/regulator/pv88080-regulator.c
9536 +index bdddacdbeb99..6279216fb254 100644
9537 +--- a/drivers/regulator/pv88080-regulator.c
9538 ++++ b/drivers/regulator/pv88080-regulator.c
9539 +@@ -345,9 +345,11 @@ static irqreturn_t pv88080_irq_handler(int irq, void *data)
9540 + if (reg_val & PV88080_E_VDD_FLT) {
9541 + for (i = 0; i < PV88080_MAX_REGULATORS; i++) {
9542 + if (chip->rdev[i] != NULL) {
9543 ++ regulator_lock(chip->rdev[i]);
9544 + regulator_notifier_call_chain(chip->rdev[i],
9545 + REGULATOR_EVENT_UNDER_VOLTAGE,
9546 + NULL);
9547 ++ regulator_unlock(chip->rdev[i]);
9548 + }
9549 + }
9550 +
9551 +@@ -362,9 +364,11 @@ static irqreturn_t pv88080_irq_handler(int irq, void *data)
9552 + if (reg_val & PV88080_E_OVER_TEMP) {
9553 + for (i = 0; i < PV88080_MAX_REGULATORS; i++) {
9554 + if (chip->rdev[i] != NULL) {
9555 ++ regulator_lock(chip->rdev[i]);
9556 + regulator_notifier_call_chain(chip->rdev[i],
9557 + REGULATOR_EVENT_OVER_TEMP,
9558 + NULL);
9559 ++ regulator_unlock(chip->rdev[i]);
9560 + }
9561 + }
9562 +
9563 +diff --git a/drivers/regulator/pv88090-regulator.c b/drivers/regulator/pv88090-regulator.c
9564 +index 6e97cc6df2ee..90f4f907fb3f 100644
9565 +--- a/drivers/regulator/pv88090-regulator.c
9566 ++++ b/drivers/regulator/pv88090-regulator.c
9567 +@@ -237,9 +237,11 @@ static irqreturn_t pv88090_irq_handler(int irq, void *data)
9568 + if (reg_val & PV88090_E_VDD_FLT) {
9569 + for (i = 0; i < PV88090_MAX_REGULATORS; i++) {
9570 + if (chip->rdev[i] != NULL) {
9571 ++ regulator_lock(chip->rdev[i]);
9572 + regulator_notifier_call_chain(chip->rdev[i],
9573 + REGULATOR_EVENT_UNDER_VOLTAGE,
9574 + NULL);
9575 ++ regulator_unlock(chip->rdev[i]);
9576 + }
9577 + }
9578 +
9579 +@@ -254,9 +256,11 @@ static irqreturn_t pv88090_irq_handler(int irq, void *data)
9580 + if (reg_val & PV88090_E_OVER_TEMP) {
9581 + for (i = 0; i < PV88090_MAX_REGULATORS; i++) {
9582 + if (chip->rdev[i] != NULL) {
9583 ++ regulator_lock(chip->rdev[i]);
9584 + regulator_notifier_call_chain(chip->rdev[i],
9585 + REGULATOR_EVENT_OVER_TEMP,
9586 + NULL);
9587 ++ regulator_unlock(chip->rdev[i]);
9588 + }
9589 + }
9590 +
9591 +diff --git a/drivers/regulator/wm831x-dcdc.c b/drivers/regulator/wm831x-dcdc.c
9592 +index 12b422373580..d1873f94bca7 100644
9593 +--- a/drivers/regulator/wm831x-dcdc.c
9594 ++++ b/drivers/regulator/wm831x-dcdc.c
9595 +@@ -183,9 +183,11 @@ static irqreturn_t wm831x_dcdc_uv_irq(int irq, void *data)
9596 + {
9597 + struct wm831x_dcdc *dcdc = data;
9598 +
9599 ++ regulator_lock(dcdc->regulator);
9600 + regulator_notifier_call_chain(dcdc->regulator,
9601 + REGULATOR_EVENT_UNDER_VOLTAGE,
9602 + NULL);
9603 ++ regulator_unlock(dcdc->regulator);
9604 +
9605 + return IRQ_HANDLED;
9606 + }
9607 +@@ -194,9 +196,11 @@ static irqreturn_t wm831x_dcdc_oc_irq(int irq, void *data)
9608 + {
9609 + struct wm831x_dcdc *dcdc = data;
9610 +
9611 ++ regulator_lock(dcdc->regulator);
9612 + regulator_notifier_call_chain(dcdc->regulator,
9613 + REGULATOR_EVENT_OVER_CURRENT,
9614 + NULL);
9615 ++ regulator_unlock(dcdc->regulator);
9616 +
9617 + return IRQ_HANDLED;
9618 + }
9619 +diff --git a/drivers/regulator/wm831x-isink.c b/drivers/regulator/wm831x-isink.c
9620 +index 6dd891d7eee3..11f351191dba 100644
9621 +--- a/drivers/regulator/wm831x-isink.c
9622 ++++ b/drivers/regulator/wm831x-isink.c
9623 +@@ -140,9 +140,11 @@ static irqreturn_t wm831x_isink_irq(int irq, void *data)
9624 + {
9625 + struct wm831x_isink *isink = data;
9626 +
9627 ++ regulator_lock(isink->regulator);
9628 + regulator_notifier_call_chain(isink->regulator,
9629 + REGULATOR_EVENT_OVER_CURRENT,
9630 + NULL);
9631 ++ regulator_unlock(isink->regulator);
9632 +
9633 + return IRQ_HANDLED;
9634 + }
9635 +diff --git a/drivers/regulator/wm831x-ldo.c b/drivers/regulator/wm831x-ldo.c
9636 +index e4a6f888484e..fcd038e7cd80 100644
9637 +--- a/drivers/regulator/wm831x-ldo.c
9638 ++++ b/drivers/regulator/wm831x-ldo.c
9639 +@@ -51,9 +51,11 @@ static irqreturn_t wm831x_ldo_uv_irq(int irq, void *data)
9640 + {
9641 + struct wm831x_ldo *ldo = data;
9642 +
9643 ++ regulator_lock(ldo->regulator);
9644 + regulator_notifier_call_chain(ldo->regulator,
9645 + REGULATOR_EVENT_UNDER_VOLTAGE,
9646 + NULL);
9647 ++ regulator_unlock(ldo->regulator);
9648 +
9649 + return IRQ_HANDLED;
9650 + }
9651 +diff --git a/drivers/rtc/rtc-88pm860x.c b/drivers/rtc/rtc-88pm860x.c
9652 +index d25282b4a7dd..73697e4b18a9 100644
9653 +--- a/drivers/rtc/rtc-88pm860x.c
9654 ++++ b/drivers/rtc/rtc-88pm860x.c
9655 +@@ -421,7 +421,7 @@ static int pm860x_rtc_remove(struct platform_device *pdev)
9656 + struct pm860x_rtc_info *info = platform_get_drvdata(pdev);
9657 +
9658 + #ifdef VRTC_CALIBRATION
9659 +- flush_scheduled_work();
9660 ++ cancel_delayed_work_sync(&info->calib_work);
9661 + /* disable measurement */
9662 + pm860x_set_bits(info->i2c, PM8607_MEAS_EN2, MEAS2_VRTC, 0);
9663 + #endif /* VRTC_CALIBRATION */
9664 +diff --git a/drivers/rtc/rtc-stm32.c b/drivers/rtc/rtc-stm32.c
9665 +index c5908cfea234..8e6c9b3bcc29 100644
9666 +--- a/drivers/rtc/rtc-stm32.c
9667 ++++ b/drivers/rtc/rtc-stm32.c
9668 +@@ -788,11 +788,14 @@ static int stm32_rtc_probe(struct platform_device *pdev)
9669 + ret = device_init_wakeup(&pdev->dev, true);
9670 + if (rtc->data->has_wakeirq) {
9671 + rtc->wakeirq_alarm = platform_get_irq(pdev, 1);
9672 +- if (rtc->wakeirq_alarm <= 0)
9673 +- ret = rtc->wakeirq_alarm;
9674 +- else
9675 ++ if (rtc->wakeirq_alarm > 0) {
9676 + ret = dev_pm_set_dedicated_wake_irq(&pdev->dev,
9677 + rtc->wakeirq_alarm);
9678 ++ } else {
9679 ++ ret = rtc->wakeirq_alarm;
9680 ++ if (rtc->wakeirq_alarm == -EPROBE_DEFER)
9681 ++ goto err;
9682 ++ }
9683 + }
9684 + if (ret)
9685 + dev_warn(&pdev->dev, "alarm can't wake up the system: %d", ret);
9686 +diff --git a/drivers/rtc/rtc-xgene.c b/drivers/rtc/rtc-xgene.c
9687 +index 153820876a82..2f741f455c30 100644
9688 +--- a/drivers/rtc/rtc-xgene.c
9689 ++++ b/drivers/rtc/rtc-xgene.c
9690 +@@ -168,6 +168,10 @@ static int xgene_rtc_probe(struct platform_device *pdev)
9691 + if (IS_ERR(pdata->csr_base))
9692 + return PTR_ERR(pdata->csr_base);
9693 +
9694 ++ pdata->rtc = devm_rtc_allocate_device(&pdev->dev);
9695 ++ if (IS_ERR(pdata->rtc))
9696 ++ return PTR_ERR(pdata->rtc);
9697 ++
9698 + irq = platform_get_irq(pdev, 0);
9699 + if (irq < 0) {
9700 + dev_err(&pdev->dev, "No IRQ resource\n");
9701 +@@ -198,15 +202,15 @@ static int xgene_rtc_probe(struct platform_device *pdev)
9702 + return ret;
9703 + }
9704 +
9705 +- pdata->rtc = devm_rtc_device_register(&pdev->dev, pdev->name,
9706 +- &xgene_rtc_ops, THIS_MODULE);
9707 +- if (IS_ERR(pdata->rtc)) {
9708 +- clk_disable_unprepare(pdata->clk);
9709 +- return PTR_ERR(pdata->rtc);
9710 +- }
9711 +-
9712 + /* HW does not support update faster than 1 seconds */
9713 + pdata->rtc->uie_unsupported = 1;
9714 ++ pdata->rtc->ops = &xgene_rtc_ops;
9715 ++
9716 ++ ret = rtc_register_device(pdata->rtc);
9717 ++ if (ret) {
9718 ++ clk_disable_unprepare(pdata->clk);
9719 ++ return ret;
9720 ++ }
9721 +
9722 + return 0;
9723 + }
9724 +diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c
9725 +index 4e8aedd50cb0..d04d4378ca50 100644
9726 +--- a/drivers/s390/block/dcssblk.c
9727 ++++ b/drivers/s390/block/dcssblk.c
9728 +@@ -59,6 +59,7 @@ static size_t dcssblk_dax_copy_to_iter(struct dax_device *dax_dev,
9729 +
9730 + static const struct dax_operations dcssblk_dax_ops = {
9731 + .direct_access = dcssblk_dax_direct_access,
9732 ++ .dax_supported = generic_fsdax_supported,
9733 + .copy_from_iter = dcssblk_dax_copy_from_iter,
9734 + .copy_to_iter = dcssblk_dax_copy_to_iter,
9735 + };
9736 +diff --git a/drivers/s390/cio/cio.h b/drivers/s390/cio/cio.h
9737 +index 9811fd8a0c73..92eabbb5f18d 100644
9738 +--- a/drivers/s390/cio/cio.h
9739 ++++ b/drivers/s390/cio/cio.h
9740 +@@ -115,7 +115,7 @@ struct subchannel {
9741 + struct schib_config config;
9742 + } __attribute__ ((aligned(8)));
9743 +
9744 +-DECLARE_PER_CPU(struct irb, cio_irb);
9745 ++DECLARE_PER_CPU_ALIGNED(struct irb, cio_irb);
9746 +
9747 + #define to_subchannel(n) container_of(n, struct subchannel, dev)
9748 +
9749 +diff --git a/drivers/s390/cio/vfio_ccw_drv.c b/drivers/s390/cio/vfio_ccw_drv.c
9750 +index 0b3b9de45c60..9e84d8a971ad 100644
9751 +--- a/drivers/s390/cio/vfio_ccw_drv.c
9752 ++++ b/drivers/s390/cio/vfio_ccw_drv.c
9753 +@@ -40,26 +40,30 @@ int vfio_ccw_sch_quiesce(struct subchannel *sch)
9754 + if (ret != -EBUSY)
9755 + goto out_unlock;
9756 +
9757 ++ iretry = 255;
9758 + do {
9759 +- iretry = 255;
9760 +
9761 + ret = cio_cancel_halt_clear(sch, &iretry);
9762 +- while (ret == -EBUSY) {
9763 +- /*
9764 +- * Flush all I/O and wait for
9765 +- * cancel/halt/clear completion.
9766 +- */
9767 +- private->completion = &completion;
9768 +- spin_unlock_irq(sch->lock);
9769 +
9770 +- wait_for_completion_timeout(&completion, 3*HZ);
9771 ++ if (ret == -EIO) {
9772 ++ pr_err("vfio_ccw: could not quiesce subchannel 0.%x.%04x!\n",
9773 ++ sch->schid.ssid, sch->schid.sch_no);
9774 ++ break;
9775 ++ }
9776 ++
9777 ++ /*
9778 ++ * Flush all I/O and wait for
9779 ++ * cancel/halt/clear completion.
9780 ++ */
9781 ++ private->completion = &completion;
9782 ++ spin_unlock_irq(sch->lock);
9783 +
9784 +- spin_lock_irq(sch->lock);
9785 +- private->completion = NULL;
9786 +- flush_workqueue(vfio_ccw_work_q);
9787 +- ret = cio_cancel_halt_clear(sch, &iretry);
9788 +- };
9789 ++ if (ret == -EBUSY)
9790 ++ wait_for_completion_timeout(&completion, 3*HZ);
9791 +
9792 ++ private->completion = NULL;
9793 ++ flush_workqueue(vfio_ccw_work_q);
9794 ++ spin_lock_irq(sch->lock);
9795 + ret = cio_disable_subchannel(sch);
9796 + } while (ret == -EBUSY);
9797 + out_unlock:
9798 +diff --git a/drivers/s390/cio/vfio_ccw_ops.c b/drivers/s390/cio/vfio_ccw_ops.c
9799 +index f673e106c041..dc5ff47de3fe 100644
9800 +--- a/drivers/s390/cio/vfio_ccw_ops.c
9801 ++++ b/drivers/s390/cio/vfio_ccw_ops.c
9802 +@@ -130,11 +130,12 @@ static int vfio_ccw_mdev_remove(struct mdev_device *mdev)
9803 +
9804 + if ((private->state != VFIO_CCW_STATE_NOT_OPER) &&
9805 + (private->state != VFIO_CCW_STATE_STANDBY)) {
9806 +- if (!vfio_ccw_mdev_reset(mdev))
9807 ++ if (!vfio_ccw_sch_quiesce(private->sch))
9808 + private->state = VFIO_CCW_STATE_STANDBY;
9809 + /* The state will be NOT_OPER on error. */
9810 + }
9811 +
9812 ++ cp_free(&private->cp);
9813 + private->mdev = NULL;
9814 + atomic_inc(&private->avail);
9815 +
9816 +@@ -158,6 +159,14 @@ static void vfio_ccw_mdev_release(struct mdev_device *mdev)
9817 + struct vfio_ccw_private *private =
9818 + dev_get_drvdata(mdev_parent_dev(mdev));
9819 +
9820 ++ if ((private->state != VFIO_CCW_STATE_NOT_OPER) &&
9821 ++ (private->state != VFIO_CCW_STATE_STANDBY)) {
9822 ++ if (!vfio_ccw_mdev_reset(mdev))
9823 ++ private->state = VFIO_CCW_STATE_STANDBY;
9824 ++ /* The state will be NOT_OPER on error. */
9825 ++ }
9826 ++
9827 ++ cp_free(&private->cp);
9828 + vfio_unregister_notifier(mdev_dev(mdev), VFIO_IOMMU_NOTIFY,
9829 + &private->nb);
9830 + }
9831 +diff --git a/drivers/s390/crypto/zcrypt_api.c b/drivers/s390/crypto/zcrypt_api.c
9832 +index 689c2af7026a..c31b2d31cd83 100644
9833 +--- a/drivers/s390/crypto/zcrypt_api.c
9834 ++++ b/drivers/s390/crypto/zcrypt_api.c
9835 +@@ -659,6 +659,7 @@ static long zcrypt_rsa_modexpo(struct ap_perms *perms,
9836 + trace_s390_zcrypt_req(mex, TP_ICARSAMODEXPO);
9837 +
9838 + if (mex->outputdatalength < mex->inputdatalength) {
9839 ++ func_code = 0;
9840 + rc = -EINVAL;
9841 + goto out;
9842 + }
9843 +@@ -742,6 +743,7 @@ static long zcrypt_rsa_crt(struct ap_perms *perms,
9844 + trace_s390_zcrypt_req(crt, TP_ICARSACRT);
9845 +
9846 + if (crt->outputdatalength < crt->inputdatalength) {
9847 ++ func_code = 0;
9848 + rc = -EINVAL;
9849 + goto out;
9850 + }
9851 +@@ -951,6 +953,7 @@ static long zcrypt_send_ep11_cprb(struct ap_perms *perms,
9852 +
9853 + targets = kcalloc(target_num, sizeof(*targets), GFP_KERNEL);
9854 + if (!targets) {
9855 ++ func_code = 0;
9856 + rc = -ENOMEM;
9857 + goto out;
9858 + }
9859 +@@ -958,6 +961,7 @@ static long zcrypt_send_ep11_cprb(struct ap_perms *perms,
9860 + uptr = (struct ep11_target_dev __force __user *) xcrb->targets;
9861 + if (copy_from_user(targets, uptr,
9862 + target_num * sizeof(*targets))) {
9863 ++ func_code = 0;
9864 + rc = -EFAULT;
9865 + goto out_free;
9866 + }
9867 +diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
9868 +index c851cf6e01c4..d603dfea97ab 100644
9869 +--- a/drivers/s390/net/qeth_core.h
9870 ++++ b/drivers/s390/net/qeth_core.h
9871 +@@ -163,6 +163,12 @@ struct qeth_vnicc_info {
9872 + bool rx_bcast_enabled;
9873 + };
9874 +
9875 ++static inline int qeth_is_adp_supported(struct qeth_ipa_info *ipa,
9876 ++ enum qeth_ipa_setadp_cmd func)
9877 ++{
9878 ++ return (ipa->supported_funcs & func);
9879 ++}
9880 ++
9881 + static inline int qeth_is_ipa_supported(struct qeth_ipa_info *ipa,
9882 + enum qeth_ipa_funcs func)
9883 + {
9884 +@@ -176,9 +182,7 @@ static inline int qeth_is_ipa_enabled(struct qeth_ipa_info *ipa,
9885 + }
9886 +
9887 + #define qeth_adp_supported(c, f) \
9888 +- qeth_is_ipa_supported(&c->options.adp, f)
9889 +-#define qeth_adp_enabled(c, f) \
9890 +- qeth_is_ipa_enabled(&c->options.adp, f)
9891 ++ qeth_is_adp_supported(&c->options.adp, f)
9892 + #define qeth_is_supported(c, f) \
9893 + qeth_is_ipa_supported(&c->options.ipa4, f)
9894 + #define qeth_is_enabled(c, f) \
9895 +diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
9896 +index 44bd6f04c145..8c73a99daff3 100644
9897 +--- a/drivers/s390/net/qeth_core_main.c
9898 ++++ b/drivers/s390/net/qeth_core_main.c
9899 +@@ -1308,7 +1308,7 @@ static void qeth_set_multiple_write_queues(struct qeth_card *card)
9900 + card->qdio.no_out_queues = 4;
9901 + }
9902 +
9903 +-static void qeth_update_from_chp_desc(struct qeth_card *card)
9904 ++static int qeth_update_from_chp_desc(struct qeth_card *card)
9905 + {
9906 + struct ccw_device *ccwdev;
9907 + struct channel_path_desc_fmt0 *chp_dsc;
9908 +@@ -1318,7 +1318,7 @@ static void qeth_update_from_chp_desc(struct qeth_card *card)
9909 + ccwdev = card->data.ccwdev;
9910 + chp_dsc = ccw_device_get_chp_desc(ccwdev, 0);
9911 + if (!chp_dsc)
9912 +- goto out;
9913 ++ return -ENOMEM;
9914 +
9915 + card->info.func_level = 0x4100 + chp_dsc->desc;
9916 + if (card->info.type == QETH_CARD_TYPE_IQD)
9917 +@@ -1333,6 +1333,7 @@ out:
9918 + kfree(chp_dsc);
9919 + QETH_DBF_TEXT_(SETUP, 2, "nr:%x", card->qdio.no_out_queues);
9920 + QETH_DBF_TEXT_(SETUP, 2, "lvl:%02x", card->info.func_level);
9921 ++ return 0;
9922 + }
9923 +
9924 + static void qeth_init_qdio_info(struct qeth_card *card)
9925 +@@ -4986,7 +4987,9 @@ int qeth_core_hardsetup_card(struct qeth_card *card, bool *carrier_ok)
9926 +
9927 + QETH_DBF_TEXT(SETUP, 2, "hrdsetup");
9928 + atomic_set(&card->force_alloc_skb, 0);
9929 +- qeth_update_from_chp_desc(card);
9930 ++ rc = qeth_update_from_chp_desc(card);
9931 ++ if (rc)
9932 ++ return rc;
9933 + retry:
9934 + if (retries < 3)
9935 + QETH_DBF_MESSAGE(2, "Retrying to do IDX activates on device %x.\n",
9936 +@@ -5641,7 +5644,9 @@ static int qeth_core_probe_device(struct ccwgroup_device *gdev)
9937 + }
9938 +
9939 + qeth_setup_card(card);
9940 +- qeth_update_from_chp_desc(card);
9941 ++ rc = qeth_update_from_chp_desc(card);
9942 ++ if (rc)
9943 ++ goto err_chp_desc;
9944 +
9945 + card->dev = qeth_alloc_netdev(card);
9946 + if (!card->dev) {
9947 +@@ -5676,6 +5681,7 @@ err_disc:
9948 + qeth_core_free_discipline(card);
9949 + err_load:
9950 + free_netdev(card->dev);
9951 ++err_chp_desc:
9952 + err_card:
9953 + qeth_core_free_card(card);
9954 + err_dev:
9955 +diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c
9956 +index 17b45a0c7bc3..3611a4ef0d15 100644
9957 +--- a/drivers/scsi/libsas/sas_expander.c
9958 ++++ b/drivers/scsi/libsas/sas_expander.c
9959 +@@ -2052,6 +2052,11 @@ static int sas_rediscover_dev(struct domain_device *dev, int phy_id, bool last)
9960 + if ((SAS_ADDR(sas_addr) == 0) || (res == -ECOMM)) {
9961 + phy->phy_state = PHY_EMPTY;
9962 + sas_unregister_devs_sas_addr(dev, phy_id, last);
9963 ++ /*
9964 ++ * Even though the PHY is empty, for convenience we discover
9965 ++ * the PHY to update the PHY info, like negotiated linkrate.
9966 ++ */
9967 ++ sas_ex_phy_discover(dev, phy_id);
9968 + return res;
9969 + } else if (SAS_ADDR(sas_addr) == SAS_ADDR(phy->attached_sas_addr) &&
9970 + dev_type_flutter(type, phy->attached_dev_type)) {
9971 +diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c
9972 +index 2e3949c6cd07..25553e7ba85c 100644
9973 +--- a/drivers/scsi/lpfc/lpfc_ct.c
9974 ++++ b/drivers/scsi/lpfc/lpfc_ct.c
9975 +@@ -2005,8 +2005,11 @@ lpfc_fdmi_hba_attr_manufacturer(struct lpfc_vport *vport,
9976 + ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
9977 + memset(ae, 0, 256);
9978 +
9979 ++ /* This string MUST be consistent with other FC platforms
9980 ++ * supported by Broadcom.
9981 ++ */
9982 + strncpy(ae->un.AttrString,
9983 +- "Broadcom Inc.",
9984 ++ "Emulex Corporation",
9985 + sizeof(ae->un.AttrString));
9986 + len = strnlen(ae->un.AttrString,
9987 + sizeof(ae->un.AttrString));
9988 +@@ -2360,10 +2363,11 @@ lpfc_fdmi_port_attr_fc4type(struct lpfc_vport *vport,
9989 + ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
9990 + memset(ae, 0, 32);
9991 +
9992 +- ae->un.AttrTypes[3] = 0x02; /* Type 1 - ELS */
9993 +- ae->un.AttrTypes[2] = 0x01; /* Type 8 - FCP */
9994 +- ae->un.AttrTypes[6] = 0x01; /* Type 40 - NVME */
9995 +- ae->un.AttrTypes[7] = 0x01; /* Type 32 - CT */
9996 ++ ae->un.AttrTypes[3] = 0x02; /* Type 0x1 - ELS */
9997 ++ ae->un.AttrTypes[2] = 0x01; /* Type 0x8 - FCP */
9998 ++ if (vport->nvmei_support || vport->phba->nvmet_support)
9999 ++ ae->un.AttrTypes[6] = 0x01; /* Type 0x28 - NVME */
10000 ++ ae->un.AttrTypes[7] = 0x01; /* Type 0x20 - CT */
10001 + size = FOURBYTES + 32;
10002 + ad->AttrLen = cpu_to_be16(size);
10003 + ad->AttrType = cpu_to_be16(RPRT_SUPPORTED_FC4_TYPES);
10004 +@@ -2673,9 +2677,11 @@ lpfc_fdmi_port_attr_active_fc4type(struct lpfc_vport *vport,
10005 + ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
10006 + memset(ae, 0, 32);
10007 +
10008 +- ae->un.AttrTypes[3] = 0x02; /* Type 1 - ELS */
10009 +- ae->un.AttrTypes[2] = 0x01; /* Type 8 - FCP */
10010 +- ae->un.AttrTypes[7] = 0x01; /* Type 32 - CT */
10011 ++ ae->un.AttrTypes[3] = 0x02; /* Type 0x1 - ELS */
10012 ++ ae->un.AttrTypes[2] = 0x01; /* Type 0x8 - FCP */
10013 ++ if (vport->phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
10014 ++ ae->un.AttrTypes[6] = 0x1; /* Type 0x28 - NVME */
10015 ++ ae->un.AttrTypes[7] = 0x01; /* Type 0x20 - CT */
10016 + size = FOURBYTES + 32;
10017 + ad->AttrLen = cpu_to_be16(size);
10018 + ad->AttrType = cpu_to_be16(RPRT_ACTIVE_FC4_TYPES);
10019 +diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
10020 +index aa4961a2caf8..75e9d46d44d4 100644
10021 +--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
10022 ++++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
10023 +@@ -932,7 +932,11 @@ lpfc_linkdown(struct lpfc_hba *phba)
10024 + }
10025 + }
10026 + lpfc_destroy_vport_work_array(phba, vports);
10027 +- /* Clean up any firmware default rpi's */
10028 ++
10029 ++ /* Clean up any SLI3 firmware default rpi's */
10030 ++ if (phba->sli_rev > LPFC_SLI_REV3)
10031 ++ goto skip_unreg_did;
10032 ++
10033 + mb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
10034 + if (mb) {
10035 + lpfc_unreg_did(phba, 0xffff, LPFC_UNREG_ALL_DFLT_RPIS, mb);
10036 +@@ -944,6 +948,7 @@ lpfc_linkdown(struct lpfc_hba *phba)
10037 + }
10038 + }
10039 +
10040 ++ skip_unreg_did:
10041 + /* Setup myDID for link up if we are in pt2pt mode */
10042 + if (phba->pport->fc_flag & FC_PT2PT) {
10043 + mb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
10044 +@@ -4868,6 +4873,10 @@ lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
10045 + * accept PLOGIs after unreg_rpi_cmpl
10046 + */
10047 + acc_plogi = 0;
10048 ++ } else if (vport->load_flag & FC_UNLOADING) {
10049 ++ mbox->ctx_ndlp = NULL;
10050 ++ mbox->mbox_cmpl =
10051 ++ lpfc_sli_def_mbox_cmpl;
10052 + } else {
10053 + mbox->ctx_ndlp = ndlp;
10054 + mbox->mbox_cmpl =
10055 +@@ -4979,6 +4988,10 @@ lpfc_unreg_default_rpis(struct lpfc_vport *vport)
10056 + LPFC_MBOXQ_t *mbox;
10057 + int rc;
10058 +
10059 ++ /* Unreg DID is an SLI3 operation. */
10060 ++ if (phba->sli_rev > LPFC_SLI_REV3)
10061 ++ return;
10062 ++
10063 + mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
10064 + if (mbox) {
10065 + lpfc_unreg_did(phba, vport->vpi, LPFC_UNREG_ALL_DFLT_RPIS,
10066 +diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
10067 +index 7fcdaed3fa94..46e155d1fa15 100644
10068 +--- a/drivers/scsi/lpfc/lpfc_init.c
10069 ++++ b/drivers/scsi/lpfc/lpfc_init.c
10070 +@@ -3245,6 +3245,13 @@ void lpfc_destroy_multixri_pools(struct lpfc_hba *phba)
10071 + if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
10072 + lpfc_destroy_expedite_pool(phba);
10073 +
10074 ++ if (!(phba->pport->load_flag & FC_UNLOADING)) {
10075 ++ lpfc_sli_flush_fcp_rings(phba);
10076 ++
10077 ++ if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
10078 ++ lpfc_sli_flush_nvme_rings(phba);
10079 ++ }
10080 ++
10081 + hwq_count = phba->cfg_hdw_queue;
10082 +
10083 + for (i = 0; i < hwq_count; i++) {
10084 +@@ -3611,8 +3618,6 @@ lpfc_io_free(struct lpfc_hba *phba)
10085 + struct lpfc_sli4_hdw_queue *qp;
10086 + int idx;
10087 +
10088 +- spin_lock_irq(&phba->hbalock);
10089 +-
10090 + for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
10091 + qp = &phba->sli4_hba.hdwq[idx];
10092 + /* Release all the lpfc_nvme_bufs maintained by this host. */
10093 +@@ -3642,8 +3647,6 @@ lpfc_io_free(struct lpfc_hba *phba)
10094 + }
10095 + spin_unlock(&qp->io_buf_list_get_lock);
10096 + }
10097 +-
10098 +- spin_unlock_irq(&phba->hbalock);
10099 + }
10100 +
10101 + /**
10102 +diff --git a/drivers/scsi/lpfc/lpfc_nvme.c b/drivers/scsi/lpfc/lpfc_nvme.c
10103 +index 1aa00d2c3f74..9defff711884 100644
10104 +--- a/drivers/scsi/lpfc/lpfc_nvme.c
10105 ++++ b/drivers/scsi/lpfc/lpfc_nvme.c
10106 +@@ -2080,15 +2080,15 @@ lpfc_nvme_create_localport(struct lpfc_vport *vport)
10107 + lpfc_nvme_template.max_hw_queues =
10108 + phba->sli4_hba.num_present_cpu;
10109 +
10110 ++ if (!IS_ENABLED(CONFIG_NVME_FC))
10111 ++ return ret;
10112 ++
10113 + /* localport is allocated from the stack, but the registration
10114 + * call allocates heap memory as well as the private area.
10115 + */
10116 +-#if (IS_ENABLED(CONFIG_NVME_FC))
10117 ++
10118 + ret = nvme_fc_register_localport(&nfcp_info, &lpfc_nvme_template,
10119 + &vport->phba->pcidev->dev, &localport);
10120 +-#else
10121 +- ret = -ENOMEM;
10122 +-#endif
10123 + if (!ret) {
10124 + lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME | LOG_NVME_DISC,
10125 + "6005 Successfully registered local "
10126 +diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
10127 +index a497b2c0cb79..25501d4605ff 100644
10128 +--- a/drivers/scsi/lpfc/lpfc_scsi.c
10129 ++++ b/drivers/scsi/lpfc/lpfc_scsi.c
10130 +@@ -3670,7 +3670,7 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
10131 + #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
10132 + if (phba->cpucheck_on & LPFC_CHECK_SCSI_IO) {
10133 + cpu = smp_processor_id();
10134 +- if (cpu < LPFC_CHECK_CPU_CNT)
10135 ++ if (cpu < LPFC_CHECK_CPU_CNT && phba->sli4_hba.hdwq)
10136 + phba->sli4_hba.hdwq[idx].cpucheck_cmpl_io[cpu]++;
10137 + }
10138 + #endif
10139 +diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
10140 +index 57b4a463b589..dc933b6d7800 100644
10141 +--- a/drivers/scsi/lpfc/lpfc_sli.c
10142 ++++ b/drivers/scsi/lpfc/lpfc_sli.c
10143 +@@ -2502,8 +2502,8 @@ lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
10144 + } else {
10145 + ndlp->nlp_flag &= ~NLP_UNREG_INP;
10146 + }
10147 ++ pmb->ctx_ndlp = NULL;
10148 + }
10149 +- pmb->ctx_ndlp = NULL;
10150 + }
10151 +
10152 + /* Check security permission status on INIT_LINK mailbox command */
10153 +@@ -7652,12 +7652,6 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
10154 + phba->cfg_xri_rebalancing = 0;
10155 + }
10156 +
10157 +- /* Arm the CQs and then EQs on device */
10158 +- lpfc_sli4_arm_cqeq_intr(phba);
10159 +-
10160 +- /* Indicate device interrupt mode */
10161 +- phba->sli4_hba.intr_enable = 1;
10162 +-
10163 + /* Allow asynchronous mailbox command to go through */
10164 + spin_lock_irq(&phba->hbalock);
10165 + phba->sli.sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
10166 +@@ -7726,6 +7720,12 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
10167 + phba->trunk_link.link3.state = LPFC_LINK_DOWN;
10168 + spin_unlock_irq(&phba->hbalock);
10169 +
10170 ++ /* Arm the CQs and then EQs on device */
10171 ++ lpfc_sli4_arm_cqeq_intr(phba);
10172 ++
10173 ++ /* Indicate device interrupt mode */
10174 ++ phba->sli4_hba.intr_enable = 1;
10175 ++
10176 + if (!(phba->hba_flag & HBA_FCOE_MODE) &&
10177 + (phba->hba_flag & LINK_DISABLED)) {
10178 + lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_SLI,
10179 +diff --git a/drivers/scsi/qedf/qedf_io.c b/drivers/scsi/qedf/qedf_io.c
10180 +index 6ca583bdde23..29b51c466721 100644
10181 +--- a/drivers/scsi/qedf/qedf_io.c
10182 ++++ b/drivers/scsi/qedf/qedf_io.c
10183 +@@ -902,6 +902,7 @@ int qedf_post_io_req(struct qedf_rport *fcport, struct qedf_ioreq *io_req)
10184 + if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
10185 + QEDF_ERR(&(qedf->dbg_ctx), "Session not offloaded yet.\n");
10186 + kref_put(&io_req->refcount, qedf_release_cmd);
10187 ++ return -EINVAL;
10188 + }
10189 +
10190 + /* Obtain free SQE */
10191 +diff --git a/drivers/scsi/qedi/qedi_iscsi.c b/drivers/scsi/qedi/qedi_iscsi.c
10192 +index 6d6d6013e35b..bf371e7b957d 100644
10193 +--- a/drivers/scsi/qedi/qedi_iscsi.c
10194 ++++ b/drivers/scsi/qedi/qedi_iscsi.c
10195 +@@ -1000,6 +1000,9 @@ static void qedi_ep_disconnect(struct iscsi_endpoint *ep)
10196 + qedi_ep = ep->dd_data;
10197 + qedi = qedi_ep->qedi;
10198 +
10199 ++ if (qedi_ep->state == EP_STATE_OFLDCONN_START)
10200 ++ goto ep_exit_recover;
10201 ++
10202 + flush_work(&qedi_ep->offload_work);
10203 +
10204 + if (qedi_ep->conn) {
10205 +diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
10206 +index 69bbea9239cc..add17843148d 100644
10207 +--- a/drivers/scsi/qla2xxx/qla_isr.c
10208 ++++ b/drivers/scsi/qla2xxx/qla_isr.c
10209 +@@ -3475,7 +3475,7 @@ qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
10210 + ql_log(ql_log_fatal, vha, 0x00c8,
10211 + "Failed to allocate memory for ha->msix_entries.\n");
10212 + ret = -ENOMEM;
10213 +- goto msix_out;
10214 ++ goto free_irqs;
10215 + }
10216 + ha->flags.msix_enabled = 1;
10217 +
10218 +@@ -3558,6 +3558,10 @@ msix_register_fail:
10219 +
10220 + msix_out:
10221 + return ret;
10222 ++
10223 ++free_irqs:
10224 ++ pci_free_irq_vectors(ha->pdev);
10225 ++ goto msix_out;
10226 + }
10227 +
10228 + int
10229 +diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
10230 +index 697eee1d8847..b210a8296c27 100644
10231 +--- a/drivers/scsi/qla2xxx/qla_target.c
10232 ++++ b/drivers/scsi/qla2xxx/qla_target.c
10233 +@@ -680,7 +680,6 @@ done:
10234 + void qla24xx_do_nack_work(struct scsi_qla_host *vha, struct qla_work_evt *e)
10235 + {
10236 + fc_port_t *t;
10237 +- unsigned long flags;
10238 +
10239 + switch (e->u.nack.type) {
10240 + case SRB_NACK_PRLI:
10241 +@@ -693,10 +692,8 @@ void qla24xx_do_nack_work(struct scsi_qla_host *vha, struct qla_work_evt *e)
10242 + if (t) {
10243 + ql_log(ql_log_info, vha, 0xd034,
10244 + "%s create sess success %p", __func__, t);
10245 +- spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
10246 + /* create sess has an extra kref */
10247 + vha->hw->tgt.tgt_ops->put_sess(e->u.nack.fcport);
10248 +- spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
10249 + }
10250 + break;
10251 + }
10252 +@@ -708,9 +705,6 @@ void qla24xx_delete_sess_fn(struct work_struct *work)
10253 + {
10254 + fc_port_t *fcport = container_of(work, struct fc_port, del_work);
10255 + struct qla_hw_data *ha = fcport->vha->hw;
10256 +- unsigned long flags;
10257 +-
10258 +- spin_lock_irqsave(&ha->tgt.sess_lock, flags);
10259 +
10260 + if (fcport->se_sess) {
10261 + ha->tgt.tgt_ops->shutdown_sess(fcport);
10262 +@@ -718,7 +712,6 @@ void qla24xx_delete_sess_fn(struct work_struct *work)
10263 + } else {
10264 + qlt_unreg_sess(fcport);
10265 + }
10266 +- spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
10267 + }
10268 +
10269 + /*
10270 +@@ -787,8 +780,9 @@ void qlt_fc_port_added(struct scsi_qla_host *vha, fc_port_t *fcport)
10271 + fcport->port_name, sess->loop_id);
10272 + sess->local = 0;
10273 + }
10274 +- ha->tgt.tgt_ops->put_sess(sess);
10275 + spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
10276 ++
10277 ++ ha->tgt.tgt_ops->put_sess(sess);
10278 + }
10279 +
10280 + /*
10281 +@@ -4242,9 +4236,7 @@ static void __qlt_do_work(struct qla_tgt_cmd *cmd)
10282 + /*
10283 + * Drop extra session reference from qla_tgt_handle_cmd_for_atio*(
10284 + */
10285 +- spin_lock_irqsave(&ha->tgt.sess_lock, flags);
10286 + ha->tgt.tgt_ops->put_sess(sess);
10287 +- spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
10288 + return;
10289 +
10290 + out_term:
10291 +@@ -4261,9 +4253,7 @@ out_term:
10292 + target_free_tag(sess->se_sess, &cmd->se_cmd);
10293 + spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
10294 +
10295 +- spin_lock_irqsave(&ha->tgt.sess_lock, flags);
10296 + ha->tgt.tgt_ops->put_sess(sess);
10297 +- spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
10298 + }
10299 +
10300 + static void qlt_do_work(struct work_struct *work)
10301 +@@ -4472,9 +4462,7 @@ static int qlt_handle_cmd_for_atio(struct scsi_qla_host *vha,
10302 + if (!cmd) {
10303 + ql_dbg(ql_dbg_io, vha, 0x3062,
10304 + "qla_target(%d): Allocation of cmd failed\n", vha->vp_idx);
10305 +- spin_lock_irqsave(&ha->tgt.sess_lock, flags);
10306 + ha->tgt.tgt_ops->put_sess(sess);
10307 +- spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
10308 + return -EBUSY;
10309 + }
10310 +
10311 +@@ -6318,17 +6306,19 @@ static void qlt_abort_work(struct qla_tgt *tgt,
10312 + }
10313 +
10314 + rc = __qlt_24xx_handle_abts(vha, &prm->abts, sess);
10315 +- ha->tgt.tgt_ops->put_sess(sess);
10316 + spin_unlock_irqrestore(&ha->tgt.sess_lock, flags2);
10317 +
10318 ++ ha->tgt.tgt_ops->put_sess(sess);
10319 ++
10320 + if (rc != 0)
10321 + goto out_term;
10322 + return;
10323 +
10324 + out_term2:
10325 ++ spin_unlock_irqrestore(&ha->tgt.sess_lock, flags2);
10326 ++
10327 + if (sess)
10328 + ha->tgt.tgt_ops->put_sess(sess);
10329 +- spin_unlock_irqrestore(&ha->tgt.sess_lock, flags2);
10330 +
10331 + out_term:
10332 + spin_lock_irqsave(&ha->hardware_lock, flags);
10333 +@@ -6386,9 +6376,10 @@ static void qlt_tmr_work(struct qla_tgt *tgt,
10334 + scsilun_to_int((struct scsi_lun *)&a->u.isp24.fcp_cmnd.lun);
10335 +
10336 + rc = qlt_issue_task_mgmt(sess, unpacked_lun, fn, iocb, 0);
10337 +- ha->tgt.tgt_ops->put_sess(sess);
10338 + spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
10339 +
10340 ++ ha->tgt.tgt_ops->put_sess(sess);
10341 ++
10342 + if (rc != 0)
10343 + goto out_term;
10344 + return;
10345 +diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
10346 +index 8a3075d17c63..e58becb790fa 100644
10347 +--- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c
10348 ++++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
10349 +@@ -359,7 +359,6 @@ static void tcm_qla2xxx_put_sess(struct fc_port *sess)
10350 + if (!sess)
10351 + return;
10352 +
10353 +- assert_spin_locked(&sess->vha->hw->tgt.sess_lock);
10354 + kref_put(&sess->sess_kref, tcm_qla2xxx_release_session);
10355 + }
10356 +
10357 +@@ -374,8 +373,9 @@ static void tcm_qla2xxx_close_session(struct se_session *se_sess)
10358 +
10359 + spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
10360 + target_sess_cmd_list_set_waiting(se_sess);
10361 +- tcm_qla2xxx_put_sess(sess);
10362 + spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
10363 ++
10364 ++ tcm_qla2xxx_put_sess(sess);
10365 + }
10366 +
10367 + static u32 tcm_qla2xxx_sess_get_index(struct se_session *se_sess)
10368 +@@ -399,6 +399,8 @@ static int tcm_qla2xxx_write_pending(struct se_cmd *se_cmd)
10369 + cmd->se_cmd.transport_state,
10370 + cmd->se_cmd.t_state,
10371 + cmd->se_cmd.se_cmd_flags);
10372 ++ transport_generic_request_failure(&cmd->se_cmd,
10373 ++ TCM_CHECK_CONDITION_ABORT_CMD);
10374 + return 0;
10375 + }
10376 + cmd->trc_flags |= TRC_XFR_RDY;
10377 +@@ -829,7 +831,6 @@ static void tcm_qla2xxx_clear_nacl_from_fcport_map(struct fc_port *sess)
10378 +
10379 + static void tcm_qla2xxx_shutdown_sess(struct fc_port *sess)
10380 + {
10381 +- assert_spin_locked(&sess->vha->hw->tgt.sess_lock);
10382 + target_sess_cmd_list_set_waiting(sess->se_sess);
10383 + }
10384 +
10385 +diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
10386 +index 6e4f4931ae17..8c674eca09f1 100644
10387 +--- a/drivers/scsi/qla4xxx/ql4_os.c
10388 ++++ b/drivers/scsi/qla4xxx/ql4_os.c
10389 +@@ -5930,7 +5930,7 @@ static int get_fw_boot_info(struct scsi_qla_host *ha, uint16_t ddb_index[])
10390 + val = rd_nvram_byte(ha, sec_addr);
10391 + if (val & BIT_7)
10392 + ddb_index[1] = (val & 0x7f);
10393 +-
10394 ++ goto exit_boot_info;
10395 + } else if (is_qla80XX(ha)) {
10396 + buf = dma_alloc_coherent(&ha->pdev->dev, size,
10397 + &buf_dma, GFP_KERNEL);
10398 +diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
10399 +index 2b2bc4b49d78..b894786df6c2 100644
10400 +--- a/drivers/scsi/sd.c
10401 ++++ b/drivers/scsi/sd.c
10402 +@@ -2603,7 +2603,6 @@ sd_read_write_protect_flag(struct scsi_disk *sdkp, unsigned char *buffer)
10403 + int res;
10404 + struct scsi_device *sdp = sdkp->device;
10405 + struct scsi_mode_data data;
10406 +- int disk_ro = get_disk_ro(sdkp->disk);
10407 + int old_wp = sdkp->write_prot;
10408 +
10409 + set_disk_ro(sdkp->disk, 0);
10410 +@@ -2644,7 +2643,7 @@ sd_read_write_protect_flag(struct scsi_disk *sdkp, unsigned char *buffer)
10411 + "Test WP failed, assume Write Enabled\n");
10412 + } else {
10413 + sdkp->write_prot = ((data.device_specific & 0x80) != 0);
10414 +- set_disk_ro(sdkp->disk, sdkp->write_prot || disk_ro);
10415 ++ set_disk_ro(sdkp->disk, sdkp->write_prot);
10416 + if (sdkp->first_scan || old_wp != sdkp->write_prot) {
10417 + sd_printk(KERN_NOTICE, sdkp, "Write Protect is %s\n",
10418 + sdkp->write_prot ? "on" : "off");
10419 +diff --git a/drivers/scsi/ufs/ufs-hisi.c b/drivers/scsi/ufs/ufs-hisi.c
10420 +index 0e855b5afe82..2f592df921d9 100644
10421 +--- a/drivers/scsi/ufs/ufs-hisi.c
10422 ++++ b/drivers/scsi/ufs/ufs-hisi.c
10423 +@@ -587,6 +587,10 @@ static int ufs_hisi_init_common(struct ufs_hba *hba)
10424 + ufshcd_set_variant(hba, host);
10425 +
10426 + host->rst = devm_reset_control_get(dev, "rst");
10427 ++ if (IS_ERR(host->rst)) {
10428 ++ dev_err(dev, "%s: failed to get reset control\n", __func__);
10429 ++ return PTR_ERR(host->rst);
10430 ++ }
10431 +
10432 + ufs_hisi_set_pm_lvl(hba);
10433 +
10434 +diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
10435 +index e040f9dd9ff3..5ba49c8cd2a3 100644
10436 +--- a/drivers/scsi/ufs/ufshcd.c
10437 ++++ b/drivers/scsi/ufs/ufshcd.c
10438 +@@ -6294,19 +6294,19 @@ static u32 ufshcd_find_max_sup_active_icc_level(struct ufs_hba *hba,
10439 + goto out;
10440 + }
10441 +
10442 +- if (hba->vreg_info.vcc)
10443 ++ if (hba->vreg_info.vcc && hba->vreg_info.vcc->max_uA)
10444 + icc_level = ufshcd_get_max_icc_level(
10445 + hba->vreg_info.vcc->max_uA,
10446 + POWER_DESC_MAX_ACTV_ICC_LVLS - 1,
10447 + &desc_buf[PWR_DESC_ACTIVE_LVLS_VCC_0]);
10448 +
10449 +- if (hba->vreg_info.vccq)
10450 ++ if (hba->vreg_info.vccq && hba->vreg_info.vccq->max_uA)
10451 + icc_level = ufshcd_get_max_icc_level(
10452 + hba->vreg_info.vccq->max_uA,
10453 + icc_level,
10454 + &desc_buf[PWR_DESC_ACTIVE_LVLS_VCCQ_0]);
10455 +
10456 +- if (hba->vreg_info.vccq2)
10457 ++ if (hba->vreg_info.vccq2 && hba->vreg_info.vccq2->max_uA)
10458 + icc_level = ufshcd_get_max_icc_level(
10459 + hba->vreg_info.vccq2->max_uA,
10460 + icc_level,
10461 +@@ -7004,6 +7004,15 @@ static int ufshcd_config_vreg_load(struct device *dev, struct ufs_vreg *vreg,
10462 + if (!vreg)
10463 + return 0;
10464 +
10465 ++ /*
10466 ++ * "set_load" operation shall be required on those regulators
10467 ++ * which specifically configured current limitation. Otherwise
10468 ++ * zero max_uA may cause unexpected behavior when regulator is
10469 ++ * enabled or set as high power mode.
10470 ++ */
10471 ++ if (!vreg->max_uA)
10472 ++ return 0;
10473 ++
10474 + ret = regulator_set_load(vreg->reg, ua);
10475 + if (ret < 0) {
10476 + dev_err(dev, "%s: %s set load (ua=%d) failed, err=%d\n",
10477 +@@ -7039,12 +7048,15 @@ static int ufshcd_config_vreg(struct device *dev,
10478 + name = vreg->name;
10479 +
10480 + if (regulator_count_voltages(reg) > 0) {
10481 +- min_uV = on ? vreg->min_uV : 0;
10482 +- ret = regulator_set_voltage(reg, min_uV, vreg->max_uV);
10483 +- if (ret) {
10484 +- dev_err(dev, "%s: %s set voltage failed, err=%d\n",
10485 ++ if (vreg->min_uV && vreg->max_uV) {
10486 ++ min_uV = on ? vreg->min_uV : 0;
10487 ++ ret = regulator_set_voltage(reg, min_uV, vreg->max_uV);
10488 ++ if (ret) {
10489 ++ dev_err(dev,
10490 ++ "%s: %s set voltage failed, err=%d\n",
10491 + __func__, name, ret);
10492 +- goto out;
10493 ++ goto out;
10494 ++ }
10495 + }
10496 +
10497 + uA_load = on ? vreg->max_uA : 0;
10498 +diff --git a/drivers/slimbus/qcom-ngd-ctrl.c b/drivers/slimbus/qcom-ngd-ctrl.c
10499 +index 71f094c9ec68..f3585777324c 100644
10500 +--- a/drivers/slimbus/qcom-ngd-ctrl.c
10501 ++++ b/drivers/slimbus/qcom-ngd-ctrl.c
10502 +@@ -1342,6 +1342,10 @@ static int of_qcom_slim_ngd_register(struct device *parent,
10503 + return -ENOMEM;
10504 +
10505 + ngd->pdev = platform_device_alloc(QCOM_SLIM_NGD_DRV_NAME, id);
10506 ++ if (!ngd->pdev) {
10507 ++ kfree(ngd);
10508 ++ return -ENOMEM;
10509 ++ }
10510 + ngd->id = id;
10511 + ngd->pdev->dev.parent = parent;
10512 + ngd->pdev->driver_override = QCOM_SLIM_NGD_DRV_NAME;
10513 +diff --git a/drivers/spi/atmel-quadspi.c b/drivers/spi/atmel-quadspi.c
10514 +index fffc21cd5f79..b3173ebddade 100644
10515 +--- a/drivers/spi/atmel-quadspi.c
10516 ++++ b/drivers/spi/atmel-quadspi.c
10517 +@@ -570,7 +570,8 @@ static int atmel_qspi_remove(struct platform_device *pdev)
10518 +
10519 + static int __maybe_unused atmel_qspi_suspend(struct device *dev)
10520 + {
10521 +- struct atmel_qspi *aq = dev_get_drvdata(dev);
10522 ++ struct spi_controller *ctrl = dev_get_drvdata(dev);
10523 ++ struct atmel_qspi *aq = spi_controller_get_devdata(ctrl);
10524 +
10525 + clk_disable_unprepare(aq->qspick);
10526 + clk_disable_unprepare(aq->pclk);
10527 +@@ -580,7 +581,8 @@ static int __maybe_unused atmel_qspi_suspend(struct device *dev)
10528 +
10529 + static int __maybe_unused atmel_qspi_resume(struct device *dev)
10530 + {
10531 +- struct atmel_qspi *aq = dev_get_drvdata(dev);
10532 ++ struct spi_controller *ctrl = dev_get_drvdata(dev);
10533 ++ struct atmel_qspi *aq = spi_controller_get_devdata(ctrl);
10534 +
10535 + clk_prepare_enable(aq->pclk);
10536 + clk_prepare_enable(aq->qspick);
10537 +diff --git a/drivers/spi/spi-imx.c b/drivers/spi/spi-imx.c
10538 +index 6ec647bbba77..a81ae29aa68a 100644
10539 +--- a/drivers/spi/spi-imx.c
10540 ++++ b/drivers/spi/spi-imx.c
10541 +@@ -1494,7 +1494,7 @@ static int spi_imx_transfer(struct spi_device *spi,
10542 +
10543 + /* flush rxfifo before transfer */
10544 + while (spi_imx->devtype_data->rx_available(spi_imx))
10545 +- spi_imx->rx(spi_imx);
10546 ++ readl(spi_imx->base + MXC_CSPIRXDATA);
10547 +
10548 + if (spi_imx->slave_mode)
10549 + return spi_imx_pio_transfer_slave(spi, transfer);
10550 +diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c
10551 +index b6ddba833d02..d2076f2f468f 100644
10552 +--- a/drivers/spi/spi-pxa2xx.c
10553 ++++ b/drivers/spi/spi-pxa2xx.c
10554 +@@ -884,10 +884,14 @@ static unsigned int ssp_get_clk_div(struct driver_data *drv_data, int rate)
10555 +
10556 + rate = min_t(int, ssp_clk, rate);
10557 +
10558 ++ /*
10559 ++ * Calculate the divisor for the SCR (Serial Clock Rate), avoiding
10560 ++ * that the SSP transmission rate can be greater than the device rate
10561 ++ */
10562 + if (ssp->type == PXA25x_SSP || ssp->type == CE4100_SSP)
10563 +- return (ssp_clk / (2 * rate) - 1) & 0xff;
10564 ++ return (DIV_ROUND_UP(ssp_clk, 2 * rate) - 1) & 0xff;
10565 + else
10566 +- return (ssp_clk / rate - 1) & 0xfff;
10567 ++ return (DIV_ROUND_UP(ssp_clk, rate) - 1) & 0xfff;
10568 + }
10569 +
10570 + static unsigned int pxa2xx_ssp_get_clk_div(struct driver_data *drv_data,
10571 +diff --git a/drivers/spi/spi-rspi.c b/drivers/spi/spi-rspi.c
10572 +index 556870dcdf79..5d35a82945cd 100644
10573 +--- a/drivers/spi/spi-rspi.c
10574 ++++ b/drivers/spi/spi-rspi.c
10575 +@@ -271,7 +271,8 @@ static int rspi_set_config_register(struct rspi_data *rspi, int access_size)
10576 + /* Sets parity, interrupt mask */
10577 + rspi_write8(rspi, 0x00, RSPI_SPCR2);
10578 +
10579 +- /* Sets SPCMD */
10580 ++ /* Resets sequencer */
10581 ++ rspi_write8(rspi, 0, RSPI_SPSCR);
10582 + rspi->spcmd |= SPCMD_SPB_8_TO_16(access_size);
10583 + rspi_write16(rspi, rspi->spcmd, RSPI_SPCMD0);
10584 +
10585 +@@ -315,7 +316,8 @@ static int rspi_rz_set_config_register(struct rspi_data *rspi, int access_size)
10586 + rspi_write8(rspi, 0x00, RSPI_SSLND);
10587 + rspi_write8(rspi, 0x00, RSPI_SPND);
10588 +
10589 +- /* Sets SPCMD */
10590 ++ /* Resets sequencer */
10591 ++ rspi_write8(rspi, 0, RSPI_SPSCR);
10592 + rspi->spcmd |= SPCMD_SPB_8_TO_16(access_size);
10593 + rspi_write16(rspi, rspi->spcmd, RSPI_SPCMD0);
10594 +
10595 +@@ -366,7 +368,8 @@ static int qspi_set_config_register(struct rspi_data *rspi, int access_size)
10596 + /* Sets buffer to allow normal operation */
10597 + rspi_write8(rspi, 0x00, QSPI_SPBFCR);
10598 +
10599 +- /* Sets SPCMD */
10600 ++ /* Resets sequencer */
10601 ++ rspi_write8(rspi, 0, RSPI_SPSCR);
10602 + rspi_write16(rspi, rspi->spcmd, RSPI_SPCMD0);
10603 +
10604 + /* Sets RSPI mode */
10605 +diff --git a/drivers/spi/spi-stm32-qspi.c b/drivers/spi/spi-stm32-qspi.c
10606 +index 3b2a9a6b990d..0b9a8bddb939 100644
10607 +--- a/drivers/spi/spi-stm32-qspi.c
10608 ++++ b/drivers/spi/spi-stm32-qspi.c
10609 +@@ -93,6 +93,7 @@ struct stm32_qspi_flash {
10610 +
10611 + struct stm32_qspi {
10612 + struct device *dev;
10613 ++ struct spi_controller *ctrl;
10614 + void __iomem *io_base;
10615 + void __iomem *mm_base;
10616 + resource_size_t mm_size;
10617 +@@ -397,6 +398,7 @@ static void stm32_qspi_release(struct stm32_qspi *qspi)
10618 + writel_relaxed(0, qspi->io_base + QSPI_CR);
10619 + mutex_destroy(&qspi->lock);
10620 + clk_disable_unprepare(qspi->clk);
10621 ++ spi_master_put(qspi->ctrl);
10622 + }
10623 +
10624 + static int stm32_qspi_probe(struct platform_device *pdev)
10625 +@@ -413,43 +415,54 @@ static int stm32_qspi_probe(struct platform_device *pdev)
10626 + return -ENOMEM;
10627 +
10628 + qspi = spi_controller_get_devdata(ctrl);
10629 ++ qspi->ctrl = ctrl;
10630 +
10631 + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "qspi");
10632 + qspi->io_base = devm_ioremap_resource(dev, res);
10633 +- if (IS_ERR(qspi->io_base))
10634 +- return PTR_ERR(qspi->io_base);
10635 ++ if (IS_ERR(qspi->io_base)) {
10636 ++ ret = PTR_ERR(qspi->io_base);
10637 ++ goto err;
10638 ++ }
10639 +
10640 + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "qspi_mm");
10641 + qspi->mm_base = devm_ioremap_resource(dev, res);
10642 +- if (IS_ERR(qspi->mm_base))
10643 +- return PTR_ERR(qspi->mm_base);
10644 ++ if (IS_ERR(qspi->mm_base)) {
10645 ++ ret = PTR_ERR(qspi->mm_base);
10646 ++ goto err;
10647 ++ }
10648 +
10649 + qspi->mm_size = resource_size(res);
10650 +- if (qspi->mm_size > STM32_QSPI_MAX_MMAP_SZ)
10651 +- return -EINVAL;
10652 ++ if (qspi->mm_size > STM32_QSPI_MAX_MMAP_SZ) {
10653 ++ ret = -EINVAL;
10654 ++ goto err;
10655 ++ }
10656 +
10657 + irq = platform_get_irq(pdev, 0);
10658 + ret = devm_request_irq(dev, irq, stm32_qspi_irq, 0,
10659 + dev_name(dev), qspi);
10660 + if (ret) {
10661 + dev_err(dev, "failed to request irq\n");
10662 +- return ret;
10663 ++ goto err;
10664 + }
10665 +
10666 + init_completion(&qspi->data_completion);
10667 +
10668 + qspi->clk = devm_clk_get(dev, NULL);
10669 +- if (IS_ERR(qspi->clk))
10670 +- return PTR_ERR(qspi->clk);
10671 ++ if (IS_ERR(qspi->clk)) {
10672 ++ ret = PTR_ERR(qspi->clk);
10673 ++ goto err;
10674 ++ }
10675 +
10676 + qspi->clk_rate = clk_get_rate(qspi->clk);
10677 +- if (!qspi->clk_rate)
10678 +- return -EINVAL;
10679 ++ if (!qspi->clk_rate) {
10680 ++ ret = -EINVAL;
10681 ++ goto err;
10682 ++ }
10683 +
10684 + ret = clk_prepare_enable(qspi->clk);
10685 + if (ret) {
10686 + dev_err(dev, "can not enable the clock\n");
10687 +- return ret;
10688 ++ goto err;
10689 + }
10690 +
10691 + rstc = devm_reset_control_get_exclusive(dev, NULL);
10692 +@@ -472,14 +485,11 @@ static int stm32_qspi_probe(struct platform_device *pdev)
10693 + ctrl->dev.of_node = dev->of_node;
10694 +
10695 + ret = devm_spi_register_master(dev, ctrl);
10696 +- if (ret)
10697 +- goto err_spi_register;
10698 +-
10699 +- return 0;
10700 ++ if (!ret)
10701 ++ return 0;
10702 +
10703 +-err_spi_register:
10704 ++err:
10705 + stm32_qspi_release(qspi);
10706 +-
10707 + return ret;
10708 + }
10709 +
10710 +diff --git a/drivers/spi/spi-tegra114.c b/drivers/spi/spi-tegra114.c
10711 +index a76acedd7e2f..a1888dc6a938 100644
10712 +--- a/drivers/spi/spi-tegra114.c
10713 ++++ b/drivers/spi/spi-tegra114.c
10714 +@@ -1067,27 +1067,19 @@ static int tegra_spi_probe(struct platform_device *pdev)
10715 +
10716 + spi_irq = platform_get_irq(pdev, 0);
10717 + tspi->irq = spi_irq;
10718 +- ret = request_threaded_irq(tspi->irq, tegra_spi_isr,
10719 +- tegra_spi_isr_thread, IRQF_ONESHOT,
10720 +- dev_name(&pdev->dev), tspi);
10721 +- if (ret < 0) {
10722 +- dev_err(&pdev->dev, "Failed to register ISR for IRQ %d\n",
10723 +- tspi->irq);
10724 +- goto exit_free_master;
10725 +- }
10726 +
10727 + tspi->clk = devm_clk_get(&pdev->dev, "spi");
10728 + if (IS_ERR(tspi->clk)) {
10729 + dev_err(&pdev->dev, "can not get clock\n");
10730 + ret = PTR_ERR(tspi->clk);
10731 +- goto exit_free_irq;
10732 ++ goto exit_free_master;
10733 + }
10734 +
10735 + tspi->rst = devm_reset_control_get_exclusive(&pdev->dev, "spi");
10736 + if (IS_ERR(tspi->rst)) {
10737 + dev_err(&pdev->dev, "can not get reset\n");
10738 + ret = PTR_ERR(tspi->rst);
10739 +- goto exit_free_irq;
10740 ++ goto exit_free_master;
10741 + }
10742 +
10743 + tspi->max_buf_size = SPI_FIFO_DEPTH << 2;
10744 +@@ -1095,7 +1087,7 @@ static int tegra_spi_probe(struct platform_device *pdev)
10745 +
10746 + ret = tegra_spi_init_dma_param(tspi, true);
10747 + if (ret < 0)
10748 +- goto exit_free_irq;
10749 ++ goto exit_free_master;
10750 + ret = tegra_spi_init_dma_param(tspi, false);
10751 + if (ret < 0)
10752 + goto exit_rx_dma_free;
10753 +@@ -1117,18 +1109,32 @@ static int tegra_spi_probe(struct platform_device *pdev)
10754 + dev_err(&pdev->dev, "pm runtime get failed, e = %d\n", ret);
10755 + goto exit_pm_disable;
10756 + }
10757 ++
10758 ++ reset_control_assert(tspi->rst);
10759 ++ udelay(2);
10760 ++ reset_control_deassert(tspi->rst);
10761 + tspi->def_command1_reg = SPI_M_S;
10762 + tegra_spi_writel(tspi, tspi->def_command1_reg, SPI_COMMAND1);
10763 + pm_runtime_put(&pdev->dev);
10764 ++ ret = request_threaded_irq(tspi->irq, tegra_spi_isr,
10765 ++ tegra_spi_isr_thread, IRQF_ONESHOT,
10766 ++ dev_name(&pdev->dev), tspi);
10767 ++ if (ret < 0) {
10768 ++ dev_err(&pdev->dev, "Failed to register ISR for IRQ %d\n",
10769 ++ tspi->irq);
10770 ++ goto exit_pm_disable;
10771 ++ }
10772 +
10773 + master->dev.of_node = pdev->dev.of_node;
10774 + ret = devm_spi_register_master(&pdev->dev, master);
10775 + if (ret < 0) {
10776 + dev_err(&pdev->dev, "can not register to master err %d\n", ret);
10777 +- goto exit_pm_disable;
10778 ++ goto exit_free_irq;
10779 + }
10780 + return ret;
10781 +
10782 ++exit_free_irq:
10783 ++ free_irq(spi_irq, tspi);
10784 + exit_pm_disable:
10785 + pm_runtime_disable(&pdev->dev);
10786 + if (!pm_runtime_status_suspended(&pdev->dev))
10787 +@@ -1136,8 +1142,6 @@ exit_pm_disable:
10788 + tegra_spi_deinit_dma_param(tspi, false);
10789 + exit_rx_dma_free:
10790 + tegra_spi_deinit_dma_param(tspi, true);
10791 +-exit_free_irq:
10792 +- free_irq(spi_irq, tspi);
10793 + exit_free_master:
10794 + spi_master_put(master);
10795 + return ret;
10796 +diff --git a/drivers/spi/spi-topcliff-pch.c b/drivers/spi/spi-topcliff-pch.c
10797 +index fba3f180f233..8a5966963834 100644
10798 +--- a/drivers/spi/spi-topcliff-pch.c
10799 ++++ b/drivers/spi/spi-topcliff-pch.c
10800 +@@ -1299,18 +1299,27 @@ static void pch_free_dma_buf(struct pch_spi_board_data *board_dat,
10801 + dma->rx_buf_virt, dma->rx_buf_dma);
10802 + }
10803 +
10804 +-static void pch_alloc_dma_buf(struct pch_spi_board_data *board_dat,
10805 ++static int pch_alloc_dma_buf(struct pch_spi_board_data *board_dat,
10806 + struct pch_spi_data *data)
10807 + {
10808 + struct pch_spi_dma_ctrl *dma;
10809 ++ int ret;
10810 +
10811 + dma = &data->dma;
10812 ++ ret = 0;
10813 + /* Get Consistent memory for Tx DMA */
10814 + dma->tx_buf_virt = dma_alloc_coherent(&board_dat->pdev->dev,
10815 + PCH_BUF_SIZE, &dma->tx_buf_dma, GFP_KERNEL);
10816 ++ if (!dma->tx_buf_virt)
10817 ++ ret = -ENOMEM;
10818 ++
10819 + /* Get Consistent memory for Rx DMA */
10820 + dma->rx_buf_virt = dma_alloc_coherent(&board_dat->pdev->dev,
10821 + PCH_BUF_SIZE, &dma->rx_buf_dma, GFP_KERNEL);
10822 ++ if (!dma->rx_buf_virt)
10823 ++ ret = -ENOMEM;
10824 ++
10825 ++ return ret;
10826 + }
10827 +
10828 + static int pch_spi_pd_probe(struct platform_device *plat_dev)
10829 +@@ -1387,7 +1396,9 @@ static int pch_spi_pd_probe(struct platform_device *plat_dev)
10830 +
10831 + if (use_dma) {
10832 + dev_info(&plat_dev->dev, "Use DMA for data transfers\n");
10833 +- pch_alloc_dma_buf(board_dat, data);
10834 ++ ret = pch_alloc_dma_buf(board_dat, data);
10835 ++ if (ret)
10836 ++ goto err_spi_register_master;
10837 + }
10838 +
10839 + ret = spi_register_master(master);
10840 +diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
10841 +index 93986f879b09..a83fcddf1dad 100644
10842 +--- a/drivers/spi/spi.c
10843 ++++ b/drivers/spi/spi.c
10844 +@@ -36,6 +36,8 @@
10845 +
10846 + #define CREATE_TRACE_POINTS
10847 + #include <trace/events/spi.h>
10848 ++EXPORT_TRACEPOINT_SYMBOL(spi_transfer_start);
10849 ++EXPORT_TRACEPOINT_SYMBOL(spi_transfer_stop);
10850 +
10851 + #include "internals.h"
10852 +
10853 +@@ -1039,6 +1041,8 @@ static int spi_map_msg(struct spi_controller *ctlr, struct spi_message *msg)
10854 + if (max_tx || max_rx) {
10855 + list_for_each_entry(xfer, &msg->transfers,
10856 + transfer_list) {
10857 ++ if (!xfer->len)
10858 ++ continue;
10859 + if (!xfer->tx_buf)
10860 + xfer->tx_buf = ctlr->dummy_tx;
10861 + if (!xfer->rx_buf)
10862 +@@ -2195,6 +2199,8 @@ static int spi_get_gpio_descs(struct spi_controller *ctlr)
10863 + */
10864 + cs[i] = devm_gpiod_get_index_optional(dev, "cs", i,
10865 + GPIOD_OUT_LOW);
10866 ++ if (IS_ERR(cs[i]))
10867 ++ return PTR_ERR(cs[i]);
10868 +
10869 + if (cs[i]) {
10870 + /*
10871 +@@ -2275,24 +2281,6 @@ int spi_register_controller(struct spi_controller *ctlr)
10872 + if (status)
10873 + return status;
10874 +
10875 +- if (!spi_controller_is_slave(ctlr)) {
10876 +- if (ctlr->use_gpio_descriptors) {
10877 +- status = spi_get_gpio_descs(ctlr);
10878 +- if (status)
10879 +- return status;
10880 +- /*
10881 +- * A controller using GPIO descriptors always
10882 +- * supports SPI_CS_HIGH if need be.
10883 +- */
10884 +- ctlr->mode_bits |= SPI_CS_HIGH;
10885 +- } else {
10886 +- /* Legacy code path for GPIOs from DT */
10887 +- status = of_spi_register_master(ctlr);
10888 +- if (status)
10889 +- return status;
10890 +- }
10891 +- }
10892 +-
10893 + /* even if it's just one always-selected device, there must
10894 + * be at least one chipselect
10895 + */
10896 +@@ -2349,6 +2337,25 @@ int spi_register_controller(struct spi_controller *ctlr)
10897 + * registration fails if the bus ID is in use.
10898 + */
10899 + dev_set_name(&ctlr->dev, "spi%u", ctlr->bus_num);
10900 ++
10901 ++ if (!spi_controller_is_slave(ctlr)) {
10902 ++ if (ctlr->use_gpio_descriptors) {
10903 ++ status = spi_get_gpio_descs(ctlr);
10904 ++ if (status)
10905 ++ return status;
10906 ++ /*
10907 ++ * A controller using GPIO descriptors always
10908 ++ * supports SPI_CS_HIGH if need be.
10909 ++ */
10910 ++ ctlr->mode_bits |= SPI_CS_HIGH;
10911 ++ } else {
10912 ++ /* Legacy code path for GPIOs from DT */
10913 ++ status = of_spi_register_master(ctlr);
10914 ++ if (status)
10915 ++ return status;
10916 ++ }
10917 ++ }
10918 ++
10919 + status = device_add(&ctlr->dev);
10920 + if (status < 0) {
10921 + /* free bus id */
10922 +diff --git a/drivers/ssb/bridge_pcmcia_80211.c b/drivers/ssb/bridge_pcmcia_80211.c
10923 +index f51f150307df..ffa379efff83 100644
10924 +--- a/drivers/ssb/bridge_pcmcia_80211.c
10925 ++++ b/drivers/ssb/bridge_pcmcia_80211.c
10926 +@@ -113,16 +113,21 @@ static struct pcmcia_driver ssb_host_pcmcia_driver = {
10927 + .resume = ssb_host_pcmcia_resume,
10928 + };
10929 +
10930 ++static int pcmcia_init_failed;
10931 ++
10932 + /*
10933 + * These are not module init/exit functions!
10934 + * The module_pcmcia_driver() helper cannot be used here.
10935 + */
10936 + int ssb_host_pcmcia_init(void)
10937 + {
10938 +- return pcmcia_register_driver(&ssb_host_pcmcia_driver);
10939 ++ pcmcia_init_failed = pcmcia_register_driver(&ssb_host_pcmcia_driver);
10940 ++
10941 ++ return pcmcia_init_failed;
10942 + }
10943 +
10944 + void ssb_host_pcmcia_exit(void)
10945 + {
10946 +- pcmcia_unregister_driver(&ssb_host_pcmcia_driver);
10947 ++ if (!pcmcia_init_failed)
10948 ++ pcmcia_unregister_driver(&ssb_host_pcmcia_driver);
10949 + }
10950 +diff --git a/drivers/staging/media/davinci_vpfe/Kconfig b/drivers/staging/media/davinci_vpfe/Kconfig
10951 +index aea449a8dbf8..76818cc48ddc 100644
10952 +--- a/drivers/staging/media/davinci_vpfe/Kconfig
10953 ++++ b/drivers/staging/media/davinci_vpfe/Kconfig
10954 +@@ -1,7 +1,7 @@
10955 + config VIDEO_DM365_VPFE
10956 + tristate "DM365 VPFE Media Controller Capture Driver"
10957 + depends on VIDEO_V4L2
10958 +- depends on (ARCH_DAVINCI_DM365 && !VIDEO_DM365_ISIF) || COMPILE_TEST
10959 ++ depends on (ARCH_DAVINCI_DM365 && !VIDEO_DM365_ISIF) || (COMPILE_TEST && !ARCH_OMAP1)
10960 + depends on VIDEO_V4L2_SUBDEV_API
10961 + depends on VIDEO_DAVINCI_VPBE_DISPLAY
10962 + select VIDEOBUF2_DMA_CONTIG
10963 +diff --git a/drivers/staging/media/imx/imx-media-vdic.c b/drivers/staging/media/imx/imx-media-vdic.c
10964 +index 8a9af4688fd4..8cdd3daa6c5f 100644
10965 +--- a/drivers/staging/media/imx/imx-media-vdic.c
10966 ++++ b/drivers/staging/media/imx/imx-media-vdic.c
10967 +@@ -231,6 +231,12 @@ static void __maybe_unused prepare_vdi_in_buffers(struct vdic_priv *priv,
10968 + curr_phys = vb2_dma_contig_plane_dma_addr(curr_vb, 0);
10969 + next_phys = vb2_dma_contig_plane_dma_addr(curr_vb, 0) + is;
10970 + break;
10971 ++ default:
10972 ++ /*
10973 ++ * can't get here, priv->fieldtype can only be one of
10974 ++ * the above. This is to quiet smatch errors.
10975 ++ */
10976 ++ return;
10977 + }
10978 +
10979 + ipu_cpmem_set_buffer(priv->vdi_in_ch_p, 0, prev_phys);
10980 +diff --git a/drivers/staging/media/ipu3/ipu3.c b/drivers/staging/media/ipu3/ipu3.c
10981 +index d575ac78c8f0..d00d26264c37 100644
10982 +--- a/drivers/staging/media/ipu3/ipu3.c
10983 ++++ b/drivers/staging/media/ipu3/ipu3.c
10984 +@@ -791,7 +791,7 @@ out:
10985 + * PCI rpm framework checks the existence of driver rpm callbacks.
10986 + * Place a dummy callback here to avoid rpm going into error state.
10987 + */
10988 +-static int imgu_rpm_dummy_cb(struct device *dev)
10989 ++static __maybe_unused int imgu_rpm_dummy_cb(struct device *dev)
10990 + {
10991 + return 0;
10992 + }
10993 +diff --git a/drivers/staging/media/sunxi/cedrus/cedrus.h b/drivers/staging/media/sunxi/cedrus/cedrus.h
10994 +index 4aedd24a9848..c57c04b41d2e 100644
10995 +--- a/drivers/staging/media/sunxi/cedrus/cedrus.h
10996 ++++ b/drivers/staging/media/sunxi/cedrus/cedrus.h
10997 +@@ -28,6 +28,8 @@
10998 +
10999 + #define CEDRUS_CAPABILITY_UNTILED BIT(0)
11000 +
11001 ++#define CEDRUS_QUIRK_NO_DMA_OFFSET BIT(0)
11002 ++
11003 + enum cedrus_codec {
11004 + CEDRUS_CODEC_MPEG2,
11005 +
11006 +@@ -91,6 +93,7 @@ struct cedrus_dec_ops {
11007 +
11008 + struct cedrus_variant {
11009 + unsigned int capabilities;
11010 ++ unsigned int quirks;
11011 + };
11012 +
11013 + struct cedrus_dev {
11014 +diff --git a/drivers/staging/media/sunxi/cedrus/cedrus_hw.c b/drivers/staging/media/sunxi/cedrus/cedrus_hw.c
11015 +index 0acf219a8c91..fbfff7c1c771 100644
11016 +--- a/drivers/staging/media/sunxi/cedrus/cedrus_hw.c
11017 ++++ b/drivers/staging/media/sunxi/cedrus/cedrus_hw.c
11018 +@@ -177,7 +177,8 @@ int cedrus_hw_probe(struct cedrus_dev *dev)
11019 + */
11020 +
11021 + #ifdef PHYS_PFN_OFFSET
11022 +- dev->dev->dma_pfn_offset = PHYS_PFN_OFFSET;
11023 ++ if (!(variant->quirks & CEDRUS_QUIRK_NO_DMA_OFFSET))
11024 ++ dev->dev->dma_pfn_offset = PHYS_PFN_OFFSET;
11025 + #endif
11026 +
11027 + ret = of_reserved_mem_device_init(dev->dev);
11028 +diff --git a/drivers/staging/mt7621-mmc/sd.c b/drivers/staging/mt7621-mmc/sd.c
11029 +index 4b26ec896a96..38f9ea02ee3a 100644
11030 +--- a/drivers/staging/mt7621-mmc/sd.c
11031 ++++ b/drivers/staging/mt7621-mmc/sd.c
11032 +@@ -468,7 +468,11 @@ static unsigned int msdc_command_start(struct msdc_host *host,
11033 + host->cmd = cmd;
11034 + host->cmd_rsp = resp;
11035 +
11036 +- init_completion(&host->cmd_done);
11037 ++ // The completion should have been consumed by the previous command
11038 ++ // response handler, because the mmc requests should be serialized
11039 ++ if (completion_done(&host->cmd_done))
11040 ++ dev_err(mmc_dev(host->mmc),
11041 ++ "previous command was not handled\n");
11042 +
11043 + sdr_set_bits(host->base + MSDC_INTEN, wints);
11044 + sdc_send_cmd(rawcmd, cmd->arg);
11045 +@@ -490,7 +494,6 @@ static unsigned int msdc_command_resp(struct msdc_host *host,
11046 + MSDC_INT_ACMD19_DONE;
11047 +
11048 + BUG_ON(in_interrupt());
11049 +- //init_completion(&host->cmd_done);
11050 + //sdr_set_bits(host->base + MSDC_INTEN, wints);
11051 +
11052 + spin_unlock(&host->lock);
11053 +@@ -593,8 +596,6 @@ static void msdc_dma_setup(struct msdc_host *host, struct msdc_dma *dma,
11054 + struct bd *bd;
11055 + u32 j;
11056 +
11057 +- BUG_ON(sglen > MAX_BD_NUM); /* not support currently */
11058 +-
11059 + gpd = dma->gpd;
11060 + bd = dma->bd;
11061 +
11062 +@@ -674,7 +675,13 @@ static int msdc_do_request(struct mmc_host *mmc, struct mmc_request *mrq)
11063 + //msdc_clr_fifo(host); /* no need */
11064 +
11065 + msdc_dma_on(); /* enable DMA mode first!! */
11066 +- init_completion(&host->xfer_done);
11067 ++
11068 ++ // The completion should have been consumed by the previous
11069 ++ // xfer response handler, because the mmc requests should be
11070 ++ // serialized
11071 ++ if (completion_done(&host->cmd_done))
11072 ++ dev_err(mmc_dev(host->mmc),
11073 ++ "previous transfer was not handled\n");
11074 +
11075 + /* start the command first*/
11076 + if (msdc_command_start(host, cmd, CMD_TIMEOUT) != 0)
11077 +@@ -683,6 +690,13 @@ static int msdc_do_request(struct mmc_host *mmc, struct mmc_request *mrq)
11078 + data->sg_count = dma_map_sg(mmc_dev(mmc), data->sg,
11079 + data->sg_len,
11080 + mmc_get_dma_dir(data));
11081 ++
11082 ++ if (data->sg_count == 0) {
11083 ++ dev_err(mmc_dev(host->mmc), "failed to map DMA for transfer\n");
11084 ++ data->error = -ENOMEM;
11085 ++ goto done;
11086 ++ }
11087 ++
11088 + msdc_dma_setup(host, &host->dma, data->sg,
11089 + data->sg_count);
11090 +
11091 +@@ -693,7 +707,6 @@ static int msdc_do_request(struct mmc_host *mmc, struct mmc_request *mrq)
11092 + /* for read, the data coming too fast, then CRC error
11093 + * start DMA no business with CRC.
11094 + */
11095 +- //init_completion(&host->xfer_done);
11096 + msdc_dma_start(host);
11097 +
11098 + spin_unlock(&host->lock);
11099 +@@ -1688,6 +1701,8 @@ static int msdc_drv_probe(struct platform_device *pdev)
11100 + }
11101 + msdc_init_gpd_bd(host, &host->dma);
11102 +
11103 ++ init_completion(&host->cmd_done);
11104 ++ init_completion(&host->xfer_done);
11105 + INIT_DELAYED_WORK(&host->card_delaywork, msdc_tasklet_card);
11106 + spin_lock_init(&host->lock);
11107 + msdc_init_hw(host);
11108 +diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c
11109 +index dd4898861b83..eb1e5dcb0d52 100644
11110 +--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c
11111 ++++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c
11112 +@@ -209,6 +209,9 @@ vchiq_platform_init_state(struct vchiq_state *state)
11113 + struct vchiq_2835_state *platform_state;
11114 +
11115 + state->platform_state = kzalloc(sizeof(*platform_state), GFP_KERNEL);
11116 ++ if (!state->platform_state)
11117 ++ return VCHIQ_ERROR;
11118 ++
11119 + platform_state = (struct vchiq_2835_state *)state->platform_state;
11120 +
11121 + platform_state->inited = 1;
11122 +diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c
11123 +index 53f5a1cb4636..819813e742d8 100644
11124 +--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c
11125 ++++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c
11126 +@@ -2239,6 +2239,8 @@ vchiq_init_state(struct vchiq_state *state, struct vchiq_slot_zero *slot_zero)
11127 + local->debug[DEBUG_ENTRIES] = DEBUG_MAX;
11128 +
11129 + status = vchiq_platform_init_state(state);
11130 ++ if (status != VCHIQ_SUCCESS)
11131 ++ return VCHIQ_ERROR;
11132 +
11133 + /*
11134 + bring up slot handler thread
11135 +diff --git a/drivers/thunderbolt/icm.c b/drivers/thunderbolt/icm.c
11136 +index e3fc920af682..8b7f9131e9d1 100644
11137 +--- a/drivers/thunderbolt/icm.c
11138 ++++ b/drivers/thunderbolt/icm.c
11139 +@@ -473,6 +473,11 @@ static void add_switch(struct tb_switch *parent_sw, u64 route,
11140 + goto out;
11141 +
11142 + sw->uuid = kmemdup(uuid, sizeof(*uuid), GFP_KERNEL);
11143 ++ if (!sw->uuid) {
11144 ++ tb_sw_warn(sw, "cannot allocate memory for switch\n");
11145 ++ tb_switch_put(sw);
11146 ++ goto out;
11147 ++ }
11148 + sw->connection_id = connection_id;
11149 + sw->connection_key = connection_key;
11150 + sw->link = link;
11151 +diff --git a/drivers/thunderbolt/property.c b/drivers/thunderbolt/property.c
11152 +index b2f0d6386cee..8c077c4f3b5b 100644
11153 +--- a/drivers/thunderbolt/property.c
11154 ++++ b/drivers/thunderbolt/property.c
11155 +@@ -548,6 +548,11 @@ int tb_property_add_data(struct tb_property_dir *parent, const char *key,
11156 +
11157 + property->length = size / 4;
11158 + property->value.data = kzalloc(size, GFP_KERNEL);
11159 ++ if (!property->value.data) {
11160 ++ kfree(property);
11161 ++ return -ENOMEM;
11162 ++ }
11163 ++
11164 + memcpy(property->value.data, buf, buflen);
11165 +
11166 + list_add_tail(&property->list, &parent->properties);
11167 +@@ -578,7 +583,12 @@ int tb_property_add_text(struct tb_property_dir *parent, const char *key,
11168 + return -ENOMEM;
11169 +
11170 + property->length = size / 4;
11171 +- property->value.data = kzalloc(size, GFP_KERNEL);
11172 ++ property->value.text = kzalloc(size, GFP_KERNEL);
11173 ++ if (!property->value.text) {
11174 ++ kfree(property);
11175 ++ return -ENOMEM;
11176 ++ }
11177 ++
11178 + strcpy(property->value.text, text);
11179 +
11180 + list_add_tail(&property->list, &parent->properties);
11181 +diff --git a/drivers/thunderbolt/switch.c b/drivers/thunderbolt/switch.c
11182 +index cd96994dc094..f569a2673742 100644
11183 +--- a/drivers/thunderbolt/switch.c
11184 ++++ b/drivers/thunderbolt/switch.c
11185 +@@ -10,15 +10,13 @@
11186 + #include <linux/idr.h>
11187 + #include <linux/nvmem-provider.h>
11188 + #include <linux/pm_runtime.h>
11189 ++#include <linux/sched/signal.h>
11190 + #include <linux/sizes.h>
11191 + #include <linux/slab.h>
11192 + #include <linux/vmalloc.h>
11193 +
11194 + #include "tb.h"
11195 +
11196 +-/* Switch authorization from userspace is serialized by this lock */
11197 +-static DEFINE_MUTEX(switch_lock);
11198 +-
11199 + /* Switch NVM support */
11200 +
11201 + #define NVM_DEVID 0x05
11202 +@@ -254,8 +252,8 @@ static int tb_switch_nvm_write(void *priv, unsigned int offset, void *val,
11203 + struct tb_switch *sw = priv;
11204 + int ret = 0;
11205 +
11206 +- if (mutex_lock_interruptible(&switch_lock))
11207 +- return -ERESTARTSYS;
11208 ++ if (!mutex_trylock(&sw->tb->lock))
11209 ++ return restart_syscall();
11210 +
11211 + /*
11212 + * Since writing the NVM image might require some special steps,
11213 +@@ -275,7 +273,7 @@ static int tb_switch_nvm_write(void *priv, unsigned int offset, void *val,
11214 + memcpy(sw->nvm->buf + offset, val, bytes);
11215 +
11216 + unlock:
11217 +- mutex_unlock(&switch_lock);
11218 ++ mutex_unlock(&sw->tb->lock);
11219 +
11220 + return ret;
11221 + }
11222 +@@ -364,10 +362,7 @@ static int tb_switch_nvm_add(struct tb_switch *sw)
11223 + }
11224 + nvm->non_active = nvm_dev;
11225 +
11226 +- mutex_lock(&switch_lock);
11227 + sw->nvm = nvm;
11228 +- mutex_unlock(&switch_lock);
11229 +-
11230 + return 0;
11231 +
11232 + err_nvm_active:
11233 +@@ -384,10 +379,8 @@ static void tb_switch_nvm_remove(struct tb_switch *sw)
11234 + {
11235 + struct tb_switch_nvm *nvm;
11236 +
11237 +- mutex_lock(&switch_lock);
11238 + nvm = sw->nvm;
11239 + sw->nvm = NULL;
11240 +- mutex_unlock(&switch_lock);
11241 +
11242 + if (!nvm)
11243 + return;
11244 +@@ -716,8 +709,8 @@ static int tb_switch_set_authorized(struct tb_switch *sw, unsigned int val)
11245 + {
11246 + int ret = -EINVAL;
11247 +
11248 +- if (mutex_lock_interruptible(&switch_lock))
11249 +- return -ERESTARTSYS;
11250 ++ if (!mutex_trylock(&sw->tb->lock))
11251 ++ return restart_syscall();
11252 +
11253 + if (sw->authorized)
11254 + goto unlock;
11255 +@@ -760,7 +753,7 @@ static int tb_switch_set_authorized(struct tb_switch *sw, unsigned int val)
11256 + }
11257 +
11258 + unlock:
11259 +- mutex_unlock(&switch_lock);
11260 ++ mutex_unlock(&sw->tb->lock);
11261 + return ret;
11262 + }
11263 +
11264 +@@ -817,15 +810,15 @@ static ssize_t key_show(struct device *dev, struct device_attribute *attr,
11265 + struct tb_switch *sw = tb_to_switch(dev);
11266 + ssize_t ret;
11267 +
11268 +- if (mutex_lock_interruptible(&switch_lock))
11269 +- return -ERESTARTSYS;
11270 ++ if (!mutex_trylock(&sw->tb->lock))
11271 ++ return restart_syscall();
11272 +
11273 + if (sw->key)
11274 + ret = sprintf(buf, "%*phN\n", TB_SWITCH_KEY_SIZE, sw->key);
11275 + else
11276 + ret = sprintf(buf, "\n");
11277 +
11278 +- mutex_unlock(&switch_lock);
11279 ++ mutex_unlock(&sw->tb->lock);
11280 + return ret;
11281 + }
11282 +
11283 +@@ -842,8 +835,8 @@ static ssize_t key_store(struct device *dev, struct device_attribute *attr,
11284 + else if (hex2bin(key, buf, sizeof(key)))
11285 + return -EINVAL;
11286 +
11287 +- if (mutex_lock_interruptible(&switch_lock))
11288 +- return -ERESTARTSYS;
11289 ++ if (!mutex_trylock(&sw->tb->lock))
11290 ++ return restart_syscall();
11291 +
11292 + if (sw->authorized) {
11293 + ret = -EBUSY;
11294 +@@ -858,7 +851,7 @@ static ssize_t key_store(struct device *dev, struct device_attribute *attr,
11295 + }
11296 + }
11297 +
11298 +- mutex_unlock(&switch_lock);
11299 ++ mutex_unlock(&sw->tb->lock);
11300 + return ret;
11301 + }
11302 + static DEVICE_ATTR(key, 0600, key_show, key_store);
11303 +@@ -904,8 +897,8 @@ static ssize_t nvm_authenticate_store(struct device *dev,
11304 + bool val;
11305 + int ret;
11306 +
11307 +- if (mutex_lock_interruptible(&switch_lock))
11308 +- return -ERESTARTSYS;
11309 ++ if (!mutex_trylock(&sw->tb->lock))
11310 ++ return restart_syscall();
11311 +
11312 + /* If NVMem devices are not yet added */
11313 + if (!sw->nvm) {
11314 +@@ -953,7 +946,7 @@ static ssize_t nvm_authenticate_store(struct device *dev,
11315 + }
11316 +
11317 + exit_unlock:
11318 +- mutex_unlock(&switch_lock);
11319 ++ mutex_unlock(&sw->tb->lock);
11320 +
11321 + if (ret)
11322 + return ret;
11323 +@@ -967,8 +960,8 @@ static ssize_t nvm_version_show(struct device *dev,
11324 + struct tb_switch *sw = tb_to_switch(dev);
11325 + int ret;
11326 +
11327 +- if (mutex_lock_interruptible(&switch_lock))
11328 +- return -ERESTARTSYS;
11329 ++ if (!mutex_trylock(&sw->tb->lock))
11330 ++ return restart_syscall();
11331 +
11332 + if (sw->safe_mode)
11333 + ret = -ENODATA;
11334 +@@ -977,7 +970,7 @@ static ssize_t nvm_version_show(struct device *dev,
11335 + else
11336 + ret = sprintf(buf, "%x.%x\n", sw->nvm->major, sw->nvm->minor);
11337 +
11338 +- mutex_unlock(&switch_lock);
11339 ++ mutex_unlock(&sw->tb->lock);
11340 +
11341 + return ret;
11342 + }
11343 +@@ -1294,13 +1287,14 @@ int tb_switch_configure(struct tb_switch *sw)
11344 + return tb_plug_events_active(sw, true);
11345 + }
11346 +
11347 +-static void tb_switch_set_uuid(struct tb_switch *sw)
11348 ++static int tb_switch_set_uuid(struct tb_switch *sw)
11349 + {
11350 + u32 uuid[4];
11351 +- int cap;
11352 ++ int cap, ret;
11353 +
11354 ++ ret = 0;
11355 + if (sw->uuid)
11356 +- return;
11357 ++ return ret;
11358 +
11359 + /*
11360 + * The newer controllers include fused UUID as part of link
11361 +@@ -1308,7 +1302,9 @@ static void tb_switch_set_uuid(struct tb_switch *sw)
11362 + */
11363 + cap = tb_switch_find_vse_cap(sw, TB_VSE_CAP_LINK_CONTROLLER);
11364 + if (cap > 0) {
11365 +- tb_sw_read(sw, uuid, TB_CFG_SWITCH, cap + 3, 4);
11366 ++ ret = tb_sw_read(sw, uuid, TB_CFG_SWITCH, cap + 3, 4);
11367 ++ if (ret)
11368 ++ return ret;
11369 + } else {
11370 + /*
11371 + * ICM generates UUID based on UID and fills the upper
11372 +@@ -1323,6 +1319,9 @@ static void tb_switch_set_uuid(struct tb_switch *sw)
11373 + }
11374 +
11375 + sw->uuid = kmemdup(uuid, sizeof(uuid), GFP_KERNEL);
11376 ++ if (!sw->uuid)
11377 ++ ret = -ENOMEM;
11378 ++ return ret;
11379 + }
11380 +
11381 + static int tb_switch_add_dma_port(struct tb_switch *sw)
11382 +@@ -1372,7 +1371,9 @@ static int tb_switch_add_dma_port(struct tb_switch *sw)
11383 +
11384 + if (status) {
11385 + tb_sw_info(sw, "switch flash authentication failed\n");
11386 +- tb_switch_set_uuid(sw);
11387 ++ ret = tb_switch_set_uuid(sw);
11388 ++ if (ret)
11389 ++ return ret;
11390 + nvm_set_auth_status(sw, status);
11391 + }
11392 +
11393 +@@ -1422,7 +1423,9 @@ int tb_switch_add(struct tb_switch *sw)
11394 + }
11395 + tb_sw_dbg(sw, "uid: %#llx\n", sw->uid);
11396 +
11397 +- tb_switch_set_uuid(sw);
11398 ++ ret = tb_switch_set_uuid(sw);
11399 ++ if (ret)
11400 ++ return ret;
11401 +
11402 + for (i = 0; i <= sw->config.max_port_number; i++) {
11403 + if (sw->ports[i].disabled) {
11404 +diff --git a/drivers/thunderbolt/tb.h b/drivers/thunderbolt/tb.h
11405 +index 52584c4003e3..f5e0282225d1 100644
11406 +--- a/drivers/thunderbolt/tb.h
11407 ++++ b/drivers/thunderbolt/tb.h
11408 +@@ -80,8 +80,7 @@ struct tb_switch_nvm {
11409 + * @depth: Depth in the chain this switch is connected (ICM only)
11410 + *
11411 + * When the switch is being added or removed to the domain (other
11412 +- * switches) you need to have domain lock held. For switch authorization
11413 +- * internal switch_lock is enough.
11414 ++ * switches) you need to have domain lock held.
11415 + */
11416 + struct tb_switch {
11417 + struct device dev;
11418 +diff --git a/drivers/thunderbolt/xdomain.c b/drivers/thunderbolt/xdomain.c
11419 +index e27dd8beb94b..e0642dcb8b9b 100644
11420 +--- a/drivers/thunderbolt/xdomain.c
11421 ++++ b/drivers/thunderbolt/xdomain.c
11422 +@@ -740,6 +740,7 @@ static void enumerate_services(struct tb_xdomain *xd)
11423 + struct tb_service *svc;
11424 + struct tb_property *p;
11425 + struct device *dev;
11426 ++ int id;
11427 +
11428 + /*
11429 + * First remove all services that are not available anymore in
11430 +@@ -768,7 +769,12 @@ static void enumerate_services(struct tb_xdomain *xd)
11431 + break;
11432 + }
11433 +
11434 +- svc->id = ida_simple_get(&xd->service_ids, 0, 0, GFP_KERNEL);
11435 ++ id = ida_simple_get(&xd->service_ids, 0, 0, GFP_KERNEL);
11436 ++ if (id < 0) {
11437 ++ kfree(svc);
11438 ++ break;
11439 ++ }
11440 ++ svc->id = id;
11441 + svc->dev.bus = &tb_bus_type;
11442 + svc->dev.type = &tb_service_type;
11443 + svc->dev.parent = &xd->dev;
11444 +diff --git a/drivers/tty/ipwireless/main.c b/drivers/tty/ipwireless/main.c
11445 +index 3475e841ef5c..4c18bbfe1a92 100644
11446 +--- a/drivers/tty/ipwireless/main.c
11447 ++++ b/drivers/tty/ipwireless/main.c
11448 +@@ -114,6 +114,10 @@ static int ipwireless_probe(struct pcmcia_device *p_dev, void *priv_data)
11449 +
11450 + ipw->common_memory = ioremap(p_dev->resource[2]->start,
11451 + resource_size(p_dev->resource[2]));
11452 ++ if (!ipw->common_memory) {
11453 ++ ret = -ENOMEM;
11454 ++ goto exit1;
11455 ++ }
11456 + if (!request_mem_region(p_dev->resource[2]->start,
11457 + resource_size(p_dev->resource[2]),
11458 + IPWIRELESS_PCCARD_NAME)) {
11459 +@@ -134,6 +138,10 @@ static int ipwireless_probe(struct pcmcia_device *p_dev, void *priv_data)
11460 +
11461 + ipw->attr_memory = ioremap(p_dev->resource[3]->start,
11462 + resource_size(p_dev->resource[3]));
11463 ++ if (!ipw->attr_memory) {
11464 ++ ret = -ENOMEM;
11465 ++ goto exit3;
11466 ++ }
11467 + if (!request_mem_region(p_dev->resource[3]->start,
11468 + resource_size(p_dev->resource[3]),
11469 + IPWIRELESS_PCCARD_NAME)) {
11470 +diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
11471 +index 975d7c1288e3..e9f740484001 100644
11472 +--- a/drivers/usb/core/hcd.c
11473 ++++ b/drivers/usb/core/hcd.c
11474 +@@ -3020,6 +3020,9 @@ usb_hcd_platform_shutdown(struct platform_device *dev)
11475 + {
11476 + struct usb_hcd *hcd = platform_get_drvdata(dev);
11477 +
11478 ++ /* No need for pm_runtime_put(), we're shutting down */
11479 ++ pm_runtime_get_sync(&dev->dev);
11480 ++
11481 + if (hcd->driver->shutdown)
11482 + hcd->driver->shutdown(hcd);
11483 + }
11484 +diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
11485 +index 8d4631c81b9f..310eef451db8 100644
11486 +--- a/drivers/usb/core/hub.c
11487 ++++ b/drivers/usb/core/hub.c
11488 +@@ -5902,7 +5902,10 @@ int usb_reset_device(struct usb_device *udev)
11489 + cintf->needs_binding = 1;
11490 + }
11491 + }
11492 +- usb_unbind_and_rebind_marked_interfaces(udev);
11493 ++
11494 ++ /* If the reset failed, hub_wq will unbind drivers later */
11495 ++ if (ret == 0)
11496 ++ usb_unbind_and_rebind_marked_interfaces(udev);
11497 + }
11498 +
11499 + usb_autosuspend_device(udev);
11500 +diff --git a/drivers/usb/dwc2/gadget.c b/drivers/usb/dwc2/gadget.c
11501 +index 6812a8a3a98b..a749de7604c6 100644
11502 +--- a/drivers/usb/dwc2/gadget.c
11503 ++++ b/drivers/usb/dwc2/gadget.c
11504 +@@ -714,13 +714,11 @@ static unsigned int dwc2_gadget_get_chain_limit(struct dwc2_hsotg_ep *hs_ep)
11505 + unsigned int maxsize;
11506 +
11507 + if (is_isoc)
11508 +- maxsize = hs_ep->dir_in ? DEV_DMA_ISOC_TX_NBYTES_LIMIT :
11509 +- DEV_DMA_ISOC_RX_NBYTES_LIMIT;
11510 ++ maxsize = (hs_ep->dir_in ? DEV_DMA_ISOC_TX_NBYTES_LIMIT :
11511 ++ DEV_DMA_ISOC_RX_NBYTES_LIMIT) *
11512 ++ MAX_DMA_DESC_NUM_HS_ISOC;
11513 + else
11514 +- maxsize = DEV_DMA_NBYTES_LIMIT;
11515 +-
11516 +- /* Above size of one descriptor was chosen, multiple it */
11517 +- maxsize *= MAX_DMA_DESC_NUM_GENERIC;
11518 ++ maxsize = DEV_DMA_NBYTES_LIMIT * MAX_DMA_DESC_NUM_GENERIC;
11519 +
11520 + return maxsize;
11521 + }
11522 +@@ -932,7 +930,7 @@ static int dwc2_gadget_fill_isoc_desc(struct dwc2_hsotg_ep *hs_ep,
11523 +
11524 + /* Update index of last configured entry in the chain */
11525 + hs_ep->next_desc++;
11526 +- if (hs_ep->next_desc >= MAX_DMA_DESC_NUM_GENERIC)
11527 ++ if (hs_ep->next_desc >= MAX_DMA_DESC_NUM_HS_ISOC)
11528 + hs_ep->next_desc = 0;
11529 +
11530 + return 0;
11531 +@@ -964,7 +962,7 @@ static void dwc2_gadget_start_isoc_ddma(struct dwc2_hsotg_ep *hs_ep)
11532 + }
11533 +
11534 + /* Initialize descriptor chain by Host Busy status */
11535 +- for (i = 0; i < MAX_DMA_DESC_NUM_GENERIC; i++) {
11536 ++ for (i = 0; i < MAX_DMA_DESC_NUM_HS_ISOC; i++) {
11537 + desc = &hs_ep->desc_list[i];
11538 + desc->status = 0;
11539 + desc->status |= (DEV_DMA_BUFF_STS_HBUSY
11540 +@@ -2162,7 +2160,7 @@ static void dwc2_gadget_complete_isoc_request_ddma(struct dwc2_hsotg_ep *hs_ep)
11541 + dwc2_hsotg_complete_request(hsotg, hs_ep, hs_req, 0);
11542 +
11543 + hs_ep->compl_desc++;
11544 +- if (hs_ep->compl_desc > (MAX_DMA_DESC_NUM_GENERIC - 1))
11545 ++ if (hs_ep->compl_desc > (MAX_DMA_DESC_NUM_HS_ISOC - 1))
11546 + hs_ep->compl_desc = 0;
11547 + desc_sts = hs_ep->desc_list[hs_ep->compl_desc].status;
11548 + }
11549 +@@ -3899,6 +3897,7 @@ static int dwc2_hsotg_ep_enable(struct usb_ep *ep,
11550 + unsigned int i, val, size;
11551 + int ret = 0;
11552 + unsigned char ep_type;
11553 ++ int desc_num;
11554 +
11555 + dev_dbg(hsotg->dev,
11556 + "%s: ep %s: a 0x%02x, attr 0x%02x, mps 0x%04x, intr %d\n",
11557 +@@ -3945,11 +3944,15 @@ static int dwc2_hsotg_ep_enable(struct usb_ep *ep,
11558 + dev_dbg(hsotg->dev, "%s: read DxEPCTL=0x%08x from 0x%08x\n",
11559 + __func__, epctrl, epctrl_reg);
11560 +
11561 ++ if (using_desc_dma(hsotg) && ep_type == USB_ENDPOINT_XFER_ISOC)
11562 ++ desc_num = MAX_DMA_DESC_NUM_HS_ISOC;
11563 ++ else
11564 ++ desc_num = MAX_DMA_DESC_NUM_GENERIC;
11565 ++
11566 + /* Allocate DMA descriptor chain for non-ctrl endpoints */
11567 + if (using_desc_dma(hsotg) && !hs_ep->desc_list) {
11568 + hs_ep->desc_list = dmam_alloc_coherent(hsotg->dev,
11569 +- MAX_DMA_DESC_NUM_GENERIC *
11570 +- sizeof(struct dwc2_dma_desc),
11571 ++ desc_num * sizeof(struct dwc2_dma_desc),
11572 + &hs_ep->desc_list_dma, GFP_ATOMIC);
11573 + if (!hs_ep->desc_list) {
11574 + ret = -ENOMEM;
11575 +@@ -4092,7 +4095,7 @@ error1:
11576 +
11577 + error2:
11578 + if (ret && using_desc_dma(hsotg) && hs_ep->desc_list) {
11579 +- dmam_free_coherent(hsotg->dev, MAX_DMA_DESC_NUM_GENERIC *
11580 ++ dmam_free_coherent(hsotg->dev, desc_num *
11581 + sizeof(struct dwc2_dma_desc),
11582 + hs_ep->desc_list, hs_ep->desc_list_dma);
11583 + hs_ep->desc_list = NULL;
11584 +diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
11585 +index f944cea4056b..72110a8c49d6 100644
11586 +--- a/drivers/usb/dwc3/core.c
11587 ++++ b/drivers/usb/dwc3/core.c
11588 +@@ -1600,6 +1600,7 @@ static int dwc3_suspend_common(struct dwc3 *dwc, pm_message_t msg)
11589 + spin_lock_irqsave(&dwc->lock, flags);
11590 + dwc3_gadget_suspend(dwc);
11591 + spin_unlock_irqrestore(&dwc->lock, flags);
11592 ++ synchronize_irq(dwc->irq_gadget);
11593 + dwc3_core_exit(dwc);
11594 + break;
11595 + case DWC3_GCTL_PRTCAP_HOST:
11596 +@@ -1632,6 +1633,7 @@ static int dwc3_suspend_common(struct dwc3 *dwc, pm_message_t msg)
11597 + spin_lock_irqsave(&dwc->lock, flags);
11598 + dwc3_gadget_suspend(dwc);
11599 + spin_unlock_irqrestore(&dwc->lock, flags);
11600 ++ synchronize_irq(dwc->irq_gadget);
11601 + }
11602 +
11603 + dwc3_otg_exit(dwc);
11604 +diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
11605 +index e293400cc6e9..2bb0ff9608d3 100644
11606 +--- a/drivers/usb/dwc3/gadget.c
11607 ++++ b/drivers/usb/dwc3/gadget.c
11608 +@@ -3384,8 +3384,6 @@ int dwc3_gadget_suspend(struct dwc3 *dwc)
11609 + dwc3_disconnect_gadget(dwc);
11610 + __dwc3_gadget_stop(dwc);
11611 +
11612 +- synchronize_irq(dwc->irq_gadget);
11613 +-
11614 + return 0;
11615 + }
11616 +
11617 +diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
11618 +index 20413c276c61..47be961f1bf3 100644
11619 +--- a/drivers/usb/gadget/function/f_fs.c
11620 ++++ b/drivers/usb/gadget/function/f_fs.c
11621 +@@ -1133,7 +1133,8 @@ error_lock:
11622 + error_mutex:
11623 + mutex_unlock(&epfile->mutex);
11624 + error:
11625 +- ffs_free_buffer(io_data);
11626 ++ if (ret != -EIOCBQUEUED) /* don't free if there is iocb queued */
11627 ++ ffs_free_buffer(io_data);
11628 + return ret;
11629 + }
11630 +
11631 +diff --git a/drivers/video/fbdev/core/fbcmap.c b/drivers/video/fbdev/core/fbcmap.c
11632 +index 68a113594808..2811c4afde01 100644
11633 +--- a/drivers/video/fbdev/core/fbcmap.c
11634 ++++ b/drivers/video/fbdev/core/fbcmap.c
11635 +@@ -94,6 +94,8 @@ int fb_alloc_cmap_gfp(struct fb_cmap *cmap, int len, int transp, gfp_t flags)
11636 + int size = len * sizeof(u16);
11637 + int ret = -ENOMEM;
11638 +
11639 ++ flags |= __GFP_NOWARN;
11640 ++
11641 + if (cmap->len != len) {
11642 + fb_dealloc_cmap(cmap);
11643 + if (!len)
11644 +diff --git a/drivers/video/fbdev/core/modedb.c b/drivers/video/fbdev/core/modedb.c
11645 +index 283d9307df21..ac049871704d 100644
11646 +--- a/drivers/video/fbdev/core/modedb.c
11647 ++++ b/drivers/video/fbdev/core/modedb.c
11648 +@@ -935,6 +935,9 @@ void fb_var_to_videomode(struct fb_videomode *mode,
11649 + if (var->vmode & FB_VMODE_DOUBLE)
11650 + vtotal *= 2;
11651 +
11652 ++ if (!htotal || !vtotal)
11653 ++ return;
11654 ++
11655 + hfreq = pixclock/htotal;
11656 + mode->refresh = hfreq/vtotal;
11657 + }
11658 +diff --git a/drivers/video/fbdev/efifb.c b/drivers/video/fbdev/efifb.c
11659 +index fd02e8a4841d..9f39f0c360e0 100644
11660 +--- a/drivers/video/fbdev/efifb.c
11661 ++++ b/drivers/video/fbdev/efifb.c
11662 +@@ -464,7 +464,8 @@ static int efifb_probe(struct platform_device *dev)
11663 + info->apertures->ranges[0].base = efifb_fix.smem_start;
11664 + info->apertures->ranges[0].size = size_remap;
11665 +
11666 +- if (!efi_mem_desc_lookup(efifb_fix.smem_start, &md)) {
11667 ++ if (efi_enabled(EFI_BOOT) &&
11668 ++ !efi_mem_desc_lookup(efifb_fix.smem_start, &md)) {
11669 + if ((efifb_fix.smem_start + efifb_fix.smem_len) >
11670 + (md.phys_addr + (md.num_pages << EFI_PAGE_SHIFT))) {
11671 + pr_err("efifb: video memory @ 0x%lx spans multiple EFI memory regions\n",
11672 +diff --git a/drivers/w1/w1_io.c b/drivers/w1/w1_io.c
11673 +index 0364d3329c52..3516ce6718d9 100644
11674 +--- a/drivers/w1/w1_io.c
11675 ++++ b/drivers/w1/w1_io.c
11676 +@@ -432,8 +432,7 @@ int w1_reset_resume_command(struct w1_master *dev)
11677 + if (w1_reset_bus(dev))
11678 + return -1;
11679 +
11680 +- /* This will make only the last matched slave perform a skip ROM. */
11681 +- w1_write_8(dev, W1_RESUME_CMD);
11682 ++ w1_write_8(dev, dev->slave_count > 1 ? W1_RESUME_CMD : W1_SKIP_ROM);
11683 + return 0;
11684 + }
11685 + EXPORT_SYMBOL_GPL(w1_reset_resume_command);
11686 +diff --git a/drivers/xen/biomerge.c b/drivers/xen/biomerge.c
11687 +index f3fbb700f569..05a286d24f14 100644
11688 +--- a/drivers/xen/biomerge.c
11689 ++++ b/drivers/xen/biomerge.c
11690 +@@ -4,12 +4,13 @@
11691 + #include <xen/xen.h>
11692 + #include <xen/page.h>
11693 +
11694 ++/* check if @page can be merged with 'vec1' */
11695 + bool xen_biovec_phys_mergeable(const struct bio_vec *vec1,
11696 +- const struct bio_vec *vec2)
11697 ++ const struct page *page)
11698 + {
11699 + #if XEN_PAGE_SIZE == PAGE_SIZE
11700 + unsigned long bfn1 = pfn_to_bfn(page_to_pfn(vec1->bv_page));
11701 +- unsigned long bfn2 = pfn_to_bfn(page_to_pfn(vec2->bv_page));
11702 ++ unsigned long bfn2 = pfn_to_bfn(page_to_pfn(page));
11703 +
11704 + return bfn1 + PFN_DOWN(vec1->bv_offset + vec1->bv_len) == bfn2;
11705 + #else
11706 +diff --git a/fs/afs/xattr.c b/fs/afs/xattr.c
11707 +index a2cdf25573e2..706801c6c4c4 100644
11708 +--- a/fs/afs/xattr.c
11709 ++++ b/fs/afs/xattr.c
11710 +@@ -69,11 +69,20 @@ static int afs_xattr_get_fid(const struct xattr_handler *handler,
11711 + void *buffer, size_t size)
11712 + {
11713 + struct afs_vnode *vnode = AFS_FS_I(inode);
11714 +- char text[8 + 1 + 8 + 1 + 8 + 1];
11715 ++ char text[16 + 1 + 24 + 1 + 8 + 1];
11716 + size_t len;
11717 +
11718 +- len = sprintf(text, "%llx:%llx:%x",
11719 +- vnode->fid.vid, vnode->fid.vnode, vnode->fid.unique);
11720 ++ /* The volume ID is 64-bit, the vnode ID is 96-bit and the
11721 ++ * uniquifier is 32-bit.
11722 ++ */
11723 ++ len = sprintf(text, "%llx:", vnode->fid.vid);
11724 ++ if (vnode->fid.vnode_hi)
11725 ++ len += sprintf(text + len, "%x%016llx",
11726 ++ vnode->fid.vnode_hi, vnode->fid.vnode);
11727 ++ else
11728 ++ len += sprintf(text + len, "%llx", vnode->fid.vnode);
11729 ++ len += sprintf(text + len, ":%x", vnode->fid.unique);
11730 ++
11731 + if (size == 0)
11732 + return len;
11733 + if (len > size)
11734 +diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c
11735 +index 4f2a8ae0aa42..716656d502a9 100644
11736 +--- a/fs/btrfs/compression.c
11737 ++++ b/fs/btrfs/compression.c
11738 +@@ -1009,6 +1009,7 @@ int btrfs_compress_pages(unsigned int type_level, struct address_space *mapping,
11739 + struct list_head *workspace;
11740 + int ret;
11741 +
11742 ++ level = btrfs_compress_op[type]->set_level(level);
11743 + workspace = get_workspace(type, level);
11744 + ret = btrfs_compress_op[type]->compress_pages(workspace, mapping,
11745 + start, pages,
11746 +diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
11747 +index d789542edc5a..5e40c8f1e97a 100644
11748 +--- a/fs/btrfs/extent-tree.c
11749 ++++ b/fs/btrfs/extent-tree.c
11750 +@@ -3981,8 +3981,7 @@ static int create_space_info(struct btrfs_fs_info *info, u64 flags)
11751 + info->space_info_kobj, "%s",
11752 + alloc_name(space_info->flags));
11753 + if (ret) {
11754 +- percpu_counter_destroy(&space_info->total_bytes_pinned);
11755 +- kfree(space_info);
11756 ++ kobject_put(&space_info->kobj);
11757 + return ret;
11758 + }
11759 +
11760 +@@ -11315,9 +11314,9 @@ int btrfs_error_unpin_extent_range(struct btrfs_fs_info *fs_info,
11761 + * held back allocations.
11762 + */
11763 + static int btrfs_trim_free_extents(struct btrfs_device *device,
11764 +- struct fstrim_range *range, u64 *trimmed)
11765 ++ u64 minlen, u64 *trimmed)
11766 + {
11767 +- u64 start = range->start, len = 0;
11768 ++ u64 start = 0, len = 0;
11769 + int ret;
11770 +
11771 + *trimmed = 0;
11772 +@@ -11360,8 +11359,8 @@ static int btrfs_trim_free_extents(struct btrfs_device *device,
11773 + if (!trans)
11774 + up_read(&fs_info->commit_root_sem);
11775 +
11776 +- ret = find_free_dev_extent_start(trans, device, range->minlen,
11777 +- start, &start, &len);
11778 ++ ret = find_free_dev_extent_start(trans, device, minlen, start,
11779 ++ &start, &len);
11780 + if (trans) {
11781 + up_read(&fs_info->commit_root_sem);
11782 + btrfs_put_transaction(trans);
11783 +@@ -11374,16 +11373,6 @@ static int btrfs_trim_free_extents(struct btrfs_device *device,
11784 + break;
11785 + }
11786 +
11787 +- /* If we are out of the passed range break */
11788 +- if (start > range->start + range->len - 1) {
11789 +- mutex_unlock(&fs_info->chunk_mutex);
11790 +- ret = 0;
11791 +- break;
11792 +- }
11793 +-
11794 +- start = max(range->start, start);
11795 +- len = min(range->len, len);
11796 +-
11797 + ret = btrfs_issue_discard(device->bdev, start, len, &bytes);
11798 + mutex_unlock(&fs_info->chunk_mutex);
11799 +
11800 +@@ -11393,10 +11382,6 @@ static int btrfs_trim_free_extents(struct btrfs_device *device,
11801 + start += len;
11802 + *trimmed += bytes;
11803 +
11804 +- /* We've trimmed enough */
11805 +- if (*trimmed >= range->len)
11806 +- break;
11807 +-
11808 + if (fatal_signal_pending(current)) {
11809 + ret = -ERESTARTSYS;
11810 + break;
11811 +@@ -11480,7 +11465,8 @@ int btrfs_trim_fs(struct btrfs_fs_info *fs_info, struct fstrim_range *range)
11812 + mutex_lock(&fs_info->fs_devices->device_list_mutex);
11813 + devices = &fs_info->fs_devices->devices;
11814 + list_for_each_entry(device, devices, dev_list) {
11815 +- ret = btrfs_trim_free_extents(device, range, &group_trimmed);
11816 ++ ret = btrfs_trim_free_extents(device, range->minlen,
11817 ++ &group_trimmed);
11818 + if (ret) {
11819 + dev_failed++;
11820 + dev_ret = ret;
11821 +diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
11822 +index 34fe8a58b0e9..ef11808b592b 100644
11823 +--- a/fs/btrfs/file.c
11824 ++++ b/fs/btrfs/file.c
11825 +@@ -2058,6 +2058,18 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
11826 + int ret = 0, err;
11827 + u64 len;
11828 +
11829 ++ /*
11830 ++ * If the inode needs a full sync, make sure we use a full range to
11831 ++ * avoid log tree corruption, due to hole detection racing with ordered
11832 ++ * extent completion for adjacent ranges, and assertion failures during
11833 ++ * hole detection.
11834 ++ */
11835 ++ if (test_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
11836 ++ &BTRFS_I(inode)->runtime_flags)) {
11837 ++ start = 0;
11838 ++ end = LLONG_MAX;
11839 ++ }
11840 ++
11841 + /*
11842 + * The range length can be represented by u64, we have to do the typecasts
11843 + * to avoid signed overflow if it's [0, LLONG_MAX] eg. from fsync()
11844 +@@ -2546,10 +2558,8 @@ static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
11845 +
11846 + ret = btrfs_punch_hole_lock_range(inode, lockstart, lockend,
11847 + &cached_state);
11848 +- if (ret) {
11849 +- inode_unlock(inode);
11850 ++ if (ret)
11851 + goto out_only_mutex;
11852 +- }
11853 +
11854 + path = btrfs_alloc_path();
11855 + if (!path) {
11856 +@@ -3132,6 +3142,7 @@ static long btrfs_fallocate(struct file *file, int mode,
11857 + ret = btrfs_qgroup_reserve_data(inode, &data_reserved,
11858 + cur_offset, last_byte - cur_offset);
11859 + if (ret < 0) {
11860 ++ cur_offset = last_byte;
11861 + free_extent_map(em);
11862 + break;
11863 + }
11864 +@@ -3181,7 +3192,7 @@ out:
11865 + /* Let go of our reservation. */
11866 + if (ret != 0 && !(mode & FALLOC_FL_ZERO_RANGE))
11867 + btrfs_free_reserved_data_space(inode, data_reserved,
11868 +- alloc_start, alloc_end - cur_offset);
11869 ++ cur_offset, alloc_end - cur_offset);
11870 + extent_changeset_free(data_reserved);
11871 + return ret;
11872 + }
11873 +diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
11874 +index 351fa506dc9b..1d82ee4883eb 100644
11875 +--- a/fs/btrfs/relocation.c
11876 ++++ b/fs/btrfs/relocation.c
11877 +@@ -4330,27 +4330,36 @@ int btrfs_relocate_block_group(struct btrfs_fs_info *fs_info, u64 group_start)
11878 + mutex_lock(&fs_info->cleaner_mutex);
11879 + ret = relocate_block_group(rc);
11880 + mutex_unlock(&fs_info->cleaner_mutex);
11881 +- if (ret < 0) {
11882 ++ if (ret < 0)
11883 + err = ret;
11884 +- goto out;
11885 +- }
11886 +-
11887 +- if (rc->extents_found == 0)
11888 +- break;
11889 +-
11890 +- btrfs_info(fs_info, "found %llu extents", rc->extents_found);
11891 +
11892 ++ /*
11893 ++ * We may have gotten ENOSPC after we already dirtied some
11894 ++ * extents. If writeout happens while we're relocating a
11895 ++ * different block group we could end up hitting the
11896 ++ * BUG_ON(rc->stage == UPDATE_DATA_PTRS) in
11897 ++ * btrfs_reloc_cow_block. Make sure we write everything out
11898 ++ * properly so we don't trip over this problem, and then break
11899 ++ * out of the loop if we hit an error.
11900 ++ */
11901 + if (rc->stage == MOVE_DATA_EXTENTS && rc->found_file_extent) {
11902 + ret = btrfs_wait_ordered_range(rc->data_inode, 0,
11903 + (u64)-1);
11904 +- if (ret) {
11905 ++ if (ret)
11906 + err = ret;
11907 +- goto out;
11908 +- }
11909 + invalidate_mapping_pages(rc->data_inode->i_mapping,
11910 + 0, -1);
11911 + rc->stage = UPDATE_DATA_PTRS;
11912 + }
11913 ++
11914 ++ if (err < 0)
11915 ++ goto out;
11916 ++
11917 ++ if (rc->extents_found == 0)
11918 ++ break;
11919 ++
11920 ++ btrfs_info(fs_info, "found %llu extents", rc->extents_found);
11921 ++
11922 + }
11923 +
11924 + WARN_ON(rc->block_group->pinned > 0);
11925 +diff --git a/fs/btrfs/root-tree.c b/fs/btrfs/root-tree.c
11926 +index 893d12fbfda0..22124122728c 100644
11927 +--- a/fs/btrfs/root-tree.c
11928 ++++ b/fs/btrfs/root-tree.c
11929 +@@ -132,16 +132,17 @@ int btrfs_update_root(struct btrfs_trans_handle *trans, struct btrfs_root
11930 + return -ENOMEM;
11931 +
11932 + ret = btrfs_search_slot(trans, root, key, path, 0, 1);
11933 +- if (ret < 0) {
11934 +- btrfs_abort_transaction(trans, ret);
11935 ++ if (ret < 0)
11936 + goto out;
11937 +- }
11938 +
11939 +- if (ret != 0) {
11940 +- btrfs_print_leaf(path->nodes[0]);
11941 +- btrfs_crit(fs_info, "unable to update root key %llu %u %llu",
11942 +- key->objectid, key->type, key->offset);
11943 +- BUG_ON(1);
11944 ++ if (ret > 0) {
11945 ++ btrfs_crit(fs_info,
11946 ++ "unable to find root key (%llu %u %llu) in tree %llu",
11947 ++ key->objectid, key->type, key->offset,
11948 ++ root->root_key.objectid);
11949 ++ ret = -EUCLEAN;
11950 ++ btrfs_abort_transaction(trans, ret);
11951 ++ goto out;
11952 + }
11953 +
11954 + l = path->nodes[0];
11955 +diff --git a/fs/btrfs/sysfs.c b/fs/btrfs/sysfs.c
11956 +index 5a5930e3d32b..2f078b77fe14 100644
11957 +--- a/fs/btrfs/sysfs.c
11958 ++++ b/fs/btrfs/sysfs.c
11959 +@@ -825,7 +825,12 @@ int btrfs_sysfs_add_fsid(struct btrfs_fs_devices *fs_devs,
11960 + fs_devs->fsid_kobj.kset = btrfs_kset;
11961 + error = kobject_init_and_add(&fs_devs->fsid_kobj,
11962 + &btrfs_ktype, parent, "%pU", fs_devs->fsid);
11963 +- return error;
11964 ++ if (error) {
11965 ++ kobject_put(&fs_devs->fsid_kobj);
11966 ++ return error;
11967 ++ }
11968 ++
11969 ++ return 0;
11970 + }
11971 +
11972 + int btrfs_sysfs_add_mounted(struct btrfs_fs_info *fs_info)
11973 +diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
11974 +index 561884f60d35..60aac95be54b 100644
11975 +--- a/fs/btrfs/tree-log.c
11976 ++++ b/fs/btrfs/tree-log.c
11977 +@@ -4169,6 +4169,7 @@ fill_holes:
11978 + *last_extent, 0,
11979 + 0, len, 0, len,
11980 + 0, 0, 0);
11981 ++ *last_extent += len;
11982 + }
11983 + }
11984 + }
11985 +diff --git a/fs/char_dev.c b/fs/char_dev.c
11986 +index a279c58fe360..8a63cfa29005 100644
11987 +--- a/fs/char_dev.c
11988 ++++ b/fs/char_dev.c
11989 +@@ -159,6 +159,12 @@ __register_chrdev_region(unsigned int major, unsigned int baseminor,
11990 + ret = -EBUSY;
11991 + goto out;
11992 + }
11993 ++
11994 ++ if (new_min < old_min && new_max > old_max) {
11995 ++ ret = -EBUSY;
11996 ++ goto out;
11997 ++ }
11998 ++
11999 + }
12000 +
12001 + cd->next = *cp;
12002 +diff --git a/fs/crypto/crypto.c b/fs/crypto/crypto.c
12003 +index 4dc788e3bc96..fe38b5306045 100644
12004 +--- a/fs/crypto/crypto.c
12005 ++++ b/fs/crypto/crypto.c
12006 +@@ -334,7 +334,7 @@ static int fscrypt_d_revalidate(struct dentry *dentry, unsigned int flags)
12007 + spin_lock(&dentry->d_lock);
12008 + cached_with_key = dentry->d_flags & DCACHE_ENCRYPTED_WITH_KEY;
12009 + spin_unlock(&dentry->d_lock);
12010 +- dir_has_key = (d_inode(dir)->i_crypt_info != NULL);
12011 ++ dir_has_key = fscrypt_has_encryption_key(d_inode(dir));
12012 + dput(dir);
12013 +
12014 + /*
12015 +diff --git a/fs/crypto/fname.c b/fs/crypto/fname.c
12016 +index 7ff40a73dbec..050384c79f40 100644
12017 +--- a/fs/crypto/fname.c
12018 ++++ b/fs/crypto/fname.c
12019 +@@ -269,7 +269,7 @@ int fscrypt_fname_disk_to_usr(struct inode *inode,
12020 + if (iname->len < FS_CRYPTO_BLOCK_SIZE)
12021 + return -EUCLEAN;
12022 +
12023 +- if (inode->i_crypt_info)
12024 ++ if (fscrypt_has_encryption_key(inode))
12025 + return fname_decrypt(inode, iname, oname);
12026 +
12027 + if (iname->len <= FSCRYPT_FNAME_MAX_UNDIGESTED_SIZE) {
12028 +@@ -336,7 +336,7 @@ int fscrypt_setup_filename(struct inode *dir, const struct qstr *iname,
12029 + if (ret)
12030 + return ret;
12031 +
12032 +- if (dir->i_crypt_info) {
12033 ++ if (fscrypt_has_encryption_key(dir)) {
12034 + if (!fscrypt_fname_encrypted_size(dir, iname->len,
12035 + dir->i_sb->s_cop->max_namelen,
12036 + &fname->crypto_buf.len))
12037 +diff --git a/fs/crypto/keyinfo.c b/fs/crypto/keyinfo.c
12038 +index 322ce9686bdb..bf291c10c682 100644
12039 +--- a/fs/crypto/keyinfo.c
12040 ++++ b/fs/crypto/keyinfo.c
12041 +@@ -509,7 +509,7 @@ int fscrypt_get_encryption_info(struct inode *inode)
12042 + u8 *raw_key = NULL;
12043 + int res;
12044 +
12045 +- if (inode->i_crypt_info)
12046 ++ if (fscrypt_has_encryption_key(inode))
12047 + return 0;
12048 +
12049 + res = fscrypt_initialize(inode->i_sb->s_cop->flags);
12050 +@@ -573,7 +573,7 @@ int fscrypt_get_encryption_info(struct inode *inode)
12051 + if (res)
12052 + goto out;
12053 +
12054 +- if (cmpxchg(&inode->i_crypt_info, NULL, crypt_info) == NULL)
12055 ++ if (cmpxchg_release(&inode->i_crypt_info, NULL, crypt_info) == NULL)
12056 + crypt_info = NULL;
12057 + out:
12058 + if (res == -ENOKEY)
12059 +diff --git a/fs/crypto/policy.c b/fs/crypto/policy.c
12060 +index bd7eaf9b3f00..d536889ac31b 100644
12061 +--- a/fs/crypto/policy.c
12062 ++++ b/fs/crypto/policy.c
12063 +@@ -194,8 +194,8 @@ int fscrypt_has_permitted_context(struct inode *parent, struct inode *child)
12064 + res = fscrypt_get_encryption_info(child);
12065 + if (res)
12066 + return 0;
12067 +- parent_ci = parent->i_crypt_info;
12068 +- child_ci = child->i_crypt_info;
12069 ++ parent_ci = READ_ONCE(parent->i_crypt_info);
12070 ++ child_ci = READ_ONCE(child->i_crypt_info);
12071 +
12072 + if (parent_ci && child_ci) {
12073 + return memcmp(parent_ci->ci_master_key_descriptor,
12074 +@@ -246,7 +246,7 @@ int fscrypt_inherit_context(struct inode *parent, struct inode *child,
12075 + if (res < 0)
12076 + return res;
12077 +
12078 +- ci = parent->i_crypt_info;
12079 ++ ci = READ_ONCE(parent->i_crypt_info);
12080 + if (ci == NULL)
12081 + return -ENOKEY;
12082 +
12083 +diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
12084 +index b32a57bc5d5d..7fd2d14dc27c 100644
12085 +--- a/fs/ext4/inode.c
12086 ++++ b/fs/ext4/inode.c
12087 +@@ -5619,25 +5619,22 @@ int ext4_setattr(struct dentry *dentry, struct iattr *attr)
12088 + up_write(&EXT4_I(inode)->i_data_sem);
12089 + ext4_journal_stop(handle);
12090 + if (error) {
12091 +- if (orphan)
12092 ++ if (orphan && inode->i_nlink)
12093 + ext4_orphan_del(NULL, inode);
12094 + goto err_out;
12095 + }
12096 + }
12097 +- if (!shrink)
12098 ++ if (!shrink) {
12099 + pagecache_isize_extended(inode, oldsize, inode->i_size);
12100 +-
12101 +- /*
12102 +- * Blocks are going to be removed from the inode. Wait
12103 +- * for dio in flight. Temporarily disable
12104 +- * dioread_nolock to prevent livelock.
12105 +- */
12106 +- if (orphan) {
12107 +- if (!ext4_should_journal_data(inode)) {
12108 +- inode_dio_wait(inode);
12109 +- } else
12110 +- ext4_wait_for_tail_page_commit(inode);
12111 ++ } else {
12112 ++ /*
12113 ++ * Blocks are going to be removed from the inode. Wait
12114 ++ * for dio in flight.
12115 ++ */
12116 ++ inode_dio_wait(inode);
12117 + }
12118 ++ if (orphan && ext4_should_journal_data(inode))
12119 ++ ext4_wait_for_tail_page_commit(inode);
12120 + down_write(&EXT4_I(inode)->i_mmap_sem);
12121 +
12122 + rc = ext4_break_layouts(inode);
12123 +diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
12124 +index d32964cd1117..71c28ff98b56 100644
12125 +--- a/fs/gfs2/glock.c
12126 ++++ b/fs/gfs2/glock.c
12127 +@@ -140,6 +140,7 @@ void gfs2_glock_free(struct gfs2_glock *gl)
12128 + {
12129 + struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
12130 +
12131 ++ BUG_ON(atomic_read(&gl->gl_revokes));
12132 + rhashtable_remove_fast(&gl_hash_table, &gl->gl_node, ht_parms);
12133 + smp_mb();
12134 + wake_up_glock(gl);
12135 +@@ -183,15 +184,19 @@ static int demote_ok(const struct gfs2_glock *gl)
12136 +
12137 + void gfs2_glock_add_to_lru(struct gfs2_glock *gl)
12138 + {
12139 ++ if (!(gl->gl_ops->go_flags & GLOF_LRU))
12140 ++ return;
12141 ++
12142 + spin_lock(&lru_lock);
12143 +
12144 +- if (!list_empty(&gl->gl_lru))
12145 +- list_del_init(&gl->gl_lru);
12146 +- else
12147 ++ list_del(&gl->gl_lru);
12148 ++ list_add_tail(&gl->gl_lru, &lru_list);
12149 ++
12150 ++ if (!test_bit(GLF_LRU, &gl->gl_flags)) {
12151 ++ set_bit(GLF_LRU, &gl->gl_flags);
12152 + atomic_inc(&lru_count);
12153 ++ }
12154 +
12155 +- list_add_tail(&gl->gl_lru, &lru_list);
12156 +- set_bit(GLF_LRU, &gl->gl_flags);
12157 + spin_unlock(&lru_lock);
12158 + }
12159 +
12160 +@@ -201,7 +206,7 @@ static void gfs2_glock_remove_from_lru(struct gfs2_glock *gl)
12161 + return;
12162 +
12163 + spin_lock(&lru_lock);
12164 +- if (!list_empty(&gl->gl_lru)) {
12165 ++ if (test_bit(GLF_LRU, &gl->gl_flags)) {
12166 + list_del_init(&gl->gl_lru);
12167 + atomic_dec(&lru_count);
12168 + clear_bit(GLF_LRU, &gl->gl_flags);
12169 +@@ -1159,8 +1164,7 @@ void gfs2_glock_dq(struct gfs2_holder *gh)
12170 + !test_bit(GLF_DEMOTE, &gl->gl_flags))
12171 + fast_path = 1;
12172 + }
12173 +- if (!test_bit(GLF_LFLUSH, &gl->gl_flags) && demote_ok(gl) &&
12174 +- (glops->go_flags & GLOF_LRU))
12175 ++ if (!test_bit(GLF_LFLUSH, &gl->gl_flags) && demote_ok(gl))
12176 + gfs2_glock_add_to_lru(gl);
12177 +
12178 + trace_gfs2_glock_queue(gh, 0);
12179 +@@ -1456,6 +1460,7 @@ __acquires(&lru_lock)
12180 + if (!spin_trylock(&gl->gl_lockref.lock)) {
12181 + add_back_to_lru:
12182 + list_add(&gl->gl_lru, &lru_list);
12183 ++ set_bit(GLF_LRU, &gl->gl_flags);
12184 + atomic_inc(&lru_count);
12185 + continue;
12186 + }
12187 +@@ -1463,7 +1468,6 @@ add_back_to_lru:
12188 + spin_unlock(&gl->gl_lockref.lock);
12189 + goto add_back_to_lru;
12190 + }
12191 +- clear_bit(GLF_LRU, &gl->gl_flags);
12192 + gl->gl_lockref.count++;
12193 + if (demote_ok(gl))
12194 + handle_callback(gl, LM_ST_UNLOCKED, 0, false);
12195 +@@ -1498,6 +1502,7 @@ static long gfs2_scan_glock_lru(int nr)
12196 + if (!test_bit(GLF_LOCK, &gl->gl_flags)) {
12197 + list_move(&gl->gl_lru, &dispose);
12198 + atomic_dec(&lru_count);
12199 ++ clear_bit(GLF_LRU, &gl->gl_flags);
12200 + freed++;
12201 + continue;
12202 + }
12203 +diff --git a/fs/gfs2/incore.h b/fs/gfs2/incore.h
12204 +index cdf07b408f54..539e8dc5a3f6 100644
12205 +--- a/fs/gfs2/incore.h
12206 ++++ b/fs/gfs2/incore.h
12207 +@@ -621,6 +621,7 @@ enum {
12208 + SDF_SKIP_DLM_UNLOCK = 8,
12209 + SDF_FORCE_AIL_FLUSH = 9,
12210 + SDF_AIL1_IO_ERROR = 10,
12211 ++ SDF_FS_FROZEN = 11,
12212 + };
12213 +
12214 + enum gfs2_freeze_state {
12215 +diff --git a/fs/gfs2/lock_dlm.c b/fs/gfs2/lock_dlm.c
12216 +index 31df26ed7854..69bd1597bacf 100644
12217 +--- a/fs/gfs2/lock_dlm.c
12218 ++++ b/fs/gfs2/lock_dlm.c
12219 +@@ -31,9 +31,10 @@
12220 + * @delta is the difference between the current rtt sample and the
12221 + * running average srtt. We add 1/8 of that to the srtt in order to
12222 + * update the current srtt estimate. The variance estimate is a bit
12223 +- * more complicated. We subtract the abs value of the @delta from
12224 +- * the current variance estimate and add 1/4 of that to the running
12225 +- * total.
12226 ++ * more complicated. We subtract the current variance estimate from
12227 ++ * the abs value of the @delta and add 1/4 of that to the running
12228 ++ * total. That's equivalent to 3/4 of the current variance
12229 ++ * estimate plus 1/4 of the abs of @delta.
12230 + *
12231 + * Note that the index points at the array entry containing the smoothed
12232 + * mean value, and the variance is always in the following entry
12233 +@@ -49,7 +50,7 @@ static inline void gfs2_update_stats(struct gfs2_lkstats *s, unsigned index,
12234 + s64 delta = sample - s->stats[index];
12235 + s->stats[index] += (delta >> 3);
12236 + index++;
12237 +- s->stats[index] += ((abs(delta) - s->stats[index]) >> 2);
12238 ++ s->stats[index] += (s64)(abs(delta) - s->stats[index]) >> 2;
12239 + }
12240 +
12241 + /**
12242 +diff --git a/fs/gfs2/log.c b/fs/gfs2/log.c
12243 +index b8830fda51e8..0e04f87a7ddd 100644
12244 +--- a/fs/gfs2/log.c
12245 ++++ b/fs/gfs2/log.c
12246 +@@ -606,7 +606,8 @@ void gfs2_add_revoke(struct gfs2_sbd *sdp, struct gfs2_bufdata *bd)
12247 + gfs2_remove_from_ail(bd); /* drops ref on bh */
12248 + bd->bd_bh = NULL;
12249 + sdp->sd_log_num_revoke++;
12250 +- atomic_inc(&gl->gl_revokes);
12251 ++ if (atomic_inc_return(&gl->gl_revokes) == 1)
12252 ++ gfs2_glock_hold(gl);
12253 + set_bit(GLF_LFLUSH, &gl->gl_flags);
12254 + list_add(&bd->bd_list, &sdp->sd_log_le_revoke);
12255 + }
12256 +diff --git a/fs/gfs2/lops.c b/fs/gfs2/lops.c
12257 +index 8722c60b11fe..4b280611246d 100644
12258 +--- a/fs/gfs2/lops.c
12259 ++++ b/fs/gfs2/lops.c
12260 +@@ -669,8 +669,10 @@ static void revoke_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
12261 + bd = list_entry(head->next, struct gfs2_bufdata, bd_list);
12262 + list_del_init(&bd->bd_list);
12263 + gl = bd->bd_gl;
12264 +- atomic_dec(&gl->gl_revokes);
12265 +- clear_bit(GLF_LFLUSH, &gl->gl_flags);
12266 ++ if (atomic_dec_return(&gl->gl_revokes) == 0) {
12267 ++ clear_bit(GLF_LFLUSH, &gl->gl_flags);
12268 ++ gfs2_glock_queue_put(gl);
12269 ++ }
12270 + kmem_cache_free(gfs2_bufdata_cachep, bd);
12271 + }
12272 + }
12273 +diff --git a/fs/gfs2/super.c b/fs/gfs2/super.c
12274 +index ca71163ff7cf..360206704a14 100644
12275 +--- a/fs/gfs2/super.c
12276 ++++ b/fs/gfs2/super.c
12277 +@@ -973,8 +973,7 @@ void gfs2_freeze_func(struct work_struct *work)
12278 + if (error) {
12279 + printk(KERN_INFO "GFS2: couldn't get freeze lock : %d\n", error);
12280 + gfs2_assert_withdraw(sdp, 0);
12281 +- }
12282 +- else {
12283 ++ } else {
12284 + atomic_set(&sdp->sd_freeze_state, SFS_UNFROZEN);
12285 + error = thaw_super(sb);
12286 + if (error) {
12287 +@@ -987,6 +986,8 @@ void gfs2_freeze_func(struct work_struct *work)
12288 + gfs2_glock_dq_uninit(&freeze_gh);
12289 + }
12290 + deactivate_super(sb);
12291 ++ clear_bit_unlock(SDF_FS_FROZEN, &sdp->sd_flags);
12292 ++ wake_up_bit(&sdp->sd_flags, SDF_FS_FROZEN);
12293 + return;
12294 + }
12295 +
12296 +@@ -1029,6 +1030,7 @@ static int gfs2_freeze(struct super_block *sb)
12297 + msleep(1000);
12298 + }
12299 + error = 0;
12300 ++ set_bit(SDF_FS_FROZEN, &sdp->sd_flags);
12301 + out:
12302 + mutex_unlock(&sdp->sd_freeze_mutex);
12303 + return error;
12304 +@@ -1053,7 +1055,7 @@ static int gfs2_unfreeze(struct super_block *sb)
12305 +
12306 + gfs2_glock_dq_uninit(&sdp->sd_freeze_gh);
12307 + mutex_unlock(&sdp->sd_freeze_mutex);
12308 +- return 0;
12309 ++ return wait_on_bit(&sdp->sd_flags, SDF_FS_FROZEN, TASK_INTERRUPTIBLE);
12310 + }
12311 +
12312 + /**
12313 +diff --git a/fs/internal.h b/fs/internal.h
12314 +index 6a8b71643af4..2e7362837a6e 100644
12315 +--- a/fs/internal.h
12316 ++++ b/fs/internal.h
12317 +@@ -89,9 +89,7 @@ extern int sb_prepare_remount_readonly(struct super_block *);
12318 +
12319 + extern void __init mnt_init(void);
12320 +
12321 +-extern int __mnt_want_write(struct vfsmount *);
12322 + extern int __mnt_want_write_file(struct file *);
12323 +-extern void __mnt_drop_write(struct vfsmount *);
12324 + extern void __mnt_drop_write_file(struct file *);
12325 +
12326 + /*
12327 +diff --git a/fs/io_uring.c b/fs/io_uring.c
12328 +index 84efb8956734..30a5687a17b6 100644
12329 +--- a/fs/io_uring.c
12330 ++++ b/fs/io_uring.c
12331 +@@ -2334,7 +2334,7 @@ static int io_sq_offload_start(struct io_ring_ctx *ctx,
12332 + nr_cpu_ids);
12333 +
12334 + ret = -EINVAL;
12335 +- if (!cpu_possible(cpu))
12336 ++ if (!cpu_online(cpu))
12337 + goto err;
12338 +
12339 + ctx->sqo_thread = kthread_create_on_cpu(io_sq_thread,
12340 +diff --git a/fs/nfs/client.c b/fs/nfs/client.c
12341 +index 90d71fda65ce..dfb796eab912 100644
12342 +--- a/fs/nfs/client.c
12343 ++++ b/fs/nfs/client.c
12344 +@@ -284,6 +284,7 @@ static struct nfs_client *nfs_match_client(const struct nfs_client_initdata *dat
12345 + struct nfs_client *clp;
12346 + const struct sockaddr *sap = data->addr;
12347 + struct nfs_net *nn = net_generic(data->net, nfs_net_id);
12348 ++ int error;
12349 +
12350 + again:
12351 + list_for_each_entry(clp, &nn->nfs_client_list, cl_share_link) {
12352 +@@ -296,9 +297,11 @@ again:
12353 + if (clp->cl_cons_state > NFS_CS_READY) {
12354 + refcount_inc(&clp->cl_count);
12355 + spin_unlock(&nn->nfs_client_lock);
12356 +- nfs_wait_client_init_complete(clp);
12357 ++ error = nfs_wait_client_init_complete(clp);
12358 + nfs_put_client(clp);
12359 + spin_lock(&nn->nfs_client_lock);
12360 ++ if (error < 0)
12361 ++ return ERR_PTR(error);
12362 + goto again;
12363 + }
12364 +
12365 +@@ -407,6 +410,8 @@ struct nfs_client *nfs_get_client(const struct nfs_client_initdata *cl_init)
12366 + clp = nfs_match_client(cl_init);
12367 + if (clp) {
12368 + spin_unlock(&nn->nfs_client_lock);
12369 ++ if (IS_ERR(clp))
12370 ++ return clp;
12371 + if (new)
12372 + new->rpc_ops->free_client(new);
12373 + return nfs_found_client(cl_init, clp);
12374 +diff --git a/fs/nfs/nfs4file.c b/fs/nfs/nfs4file.c
12375 +index 00d17198ee12..f10b660805fc 100644
12376 +--- a/fs/nfs/nfs4file.c
12377 ++++ b/fs/nfs/nfs4file.c
12378 +@@ -187,7 +187,7 @@ static loff_t nfs42_remap_file_range(struct file *src_file, loff_t src_off,
12379 + bool same_inode = false;
12380 + int ret;
12381 +
12382 +- if (remap_flags & ~REMAP_FILE_ADVISORY)
12383 ++ if (remap_flags & ~(REMAP_FILE_DEDUP | REMAP_FILE_ADVISORY))
12384 + return -EINVAL;
12385 +
12386 + /* check alignment w.r.t. clone_blksize */
12387 +diff --git a/fs/overlayfs/dir.c b/fs/overlayfs/dir.c
12388 +index 82c129bfe58d..93872bb50230 100644
12389 +--- a/fs/overlayfs/dir.c
12390 ++++ b/fs/overlayfs/dir.c
12391 +@@ -260,7 +260,7 @@ static int ovl_instantiate(struct dentry *dentry, struct inode *inode,
12392 + * hashed directory inode aliases.
12393 + */
12394 + inode = ovl_get_inode(dentry->d_sb, &oip);
12395 +- if (WARN_ON(IS_ERR(inode)))
12396 ++ if (IS_ERR(inode))
12397 + return PTR_ERR(inode);
12398 + } else {
12399 + WARN_ON(ovl_inode_real(inode) != d_inode(newdentry));
12400 +diff --git a/fs/overlayfs/inode.c b/fs/overlayfs/inode.c
12401 +index 3b7ed5d2279c..b48273e846ad 100644
12402 +--- a/fs/overlayfs/inode.c
12403 ++++ b/fs/overlayfs/inode.c
12404 +@@ -832,7 +832,7 @@ struct inode *ovl_get_inode(struct super_block *sb,
12405 + int fsid = bylower ? oip->lowerpath->layer->fsid : 0;
12406 + bool is_dir, metacopy = false;
12407 + unsigned long ino = 0;
12408 +- int err = -ENOMEM;
12409 ++ int err = oip->newinode ? -EEXIST : -ENOMEM;
12410 +
12411 + if (!realinode)
12412 + realinode = d_inode(lowerdentry);
12413 +@@ -917,6 +917,7 @@ out:
12414 + return inode;
12415 +
12416 + out_err:
12417 ++ pr_warn_ratelimited("overlayfs: failed to get inode (%i)\n", err);
12418 + inode = ERR_PTR(err);
12419 + goto out;
12420 + }
12421 +diff --git a/include/crypto/hash.h b/include/crypto/hash.h
12422 +index 3b31c1b349ae..bc143b410359 100644
12423 +--- a/include/crypto/hash.h
12424 ++++ b/include/crypto/hash.h
12425 +@@ -152,7 +152,13 @@ struct shash_desc {
12426 + };
12427 +
12428 + #define HASH_MAX_DIGESTSIZE 64
12429 +-#define HASH_MAX_DESCSIZE 360
12430 ++
12431 ++/*
12432 ++ * Worst case is hmac(sha3-224-generic). Its context is a nested 'shash_desc'
12433 ++ * containing a 'struct sha3_state'.
12434 ++ */
12435 ++#define HASH_MAX_DESCSIZE (sizeof(struct shash_desc) + 360)
12436 ++
12437 + #define HASH_MAX_STATESIZE 512
12438 +
12439 + #define SHASH_DESC_ON_STACK(shash, ctx) \
12440 +diff --git a/include/drm/tinydrm/mipi-dbi.h b/include/drm/tinydrm/mipi-dbi.h
12441 +index f4ec2834bc22..7dfa67a15a04 100644
12442 +--- a/include/drm/tinydrm/mipi-dbi.h
12443 ++++ b/include/drm/tinydrm/mipi-dbi.h
12444 +@@ -43,7 +43,7 @@ struct mipi_dbi {
12445 + struct spi_device *spi;
12446 + bool enabled;
12447 + struct mutex cmdlock;
12448 +- int (*command)(struct mipi_dbi *mipi, u8 cmd, u8 *param, size_t num);
12449 ++ int (*command)(struct mipi_dbi *mipi, u8 *cmd, u8 *param, size_t num);
12450 + const u8 *read_commands;
12451 + struct gpio_desc *dc;
12452 + u16 *tx_buf;
12453 +@@ -82,6 +82,7 @@ u32 mipi_dbi_spi_cmd_max_speed(struct spi_device *spi, size_t len);
12454 +
12455 + int mipi_dbi_command_read(struct mipi_dbi *mipi, u8 cmd, u8 *val);
12456 + int mipi_dbi_command_buf(struct mipi_dbi *mipi, u8 cmd, u8 *data, size_t len);
12457 ++int mipi_dbi_command_stackbuf(struct mipi_dbi *mipi, u8 cmd, u8 *data, size_t len);
12458 + int mipi_dbi_buf_copy(void *dst, struct drm_framebuffer *fb,
12459 + struct drm_rect *clip, bool swap);
12460 + /**
12461 +@@ -99,7 +100,7 @@ int mipi_dbi_buf_copy(void *dst, struct drm_framebuffer *fb,
12462 + #define mipi_dbi_command(mipi, cmd, seq...) \
12463 + ({ \
12464 + u8 d[] = { seq }; \
12465 +- mipi_dbi_command_buf(mipi, cmd, d, ARRAY_SIZE(d)); \
12466 ++ mipi_dbi_command_stackbuf(mipi, cmd, d, ARRAY_SIZE(d)); \
12467 + })
12468 +
12469 + #ifdef CONFIG_DEBUG_FS
12470 +diff --git a/include/linux/bio.h b/include/linux/bio.h
12471 +index e584673c1881..5becbafb84e8 100644
12472 +--- a/include/linux/bio.h
12473 ++++ b/include/linux/bio.h
12474 +@@ -224,7 +224,7 @@ static inline void bio_cnt_set(struct bio *bio, unsigned int count)
12475 + {
12476 + if (count != 1) {
12477 + bio->bi_flags |= (1 << BIO_REFFED);
12478 +- smp_mb__before_atomic();
12479 ++ smp_mb();
12480 + }
12481 + atomic_set(&bio->__bi_cnt, count);
12482 + }
12483 +diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h
12484 +index 1c70803e9f77..7d57890cec67 100644
12485 +--- a/include/linux/cgroup-defs.h
12486 ++++ b/include/linux/cgroup-defs.h
12487 +@@ -349,6 +349,11 @@ struct cgroup {
12488 + * Dying cgroups are cgroups which were deleted by a user,
12489 + * but are still existing because someone else is holding a reference.
12490 + * max_descendants is a maximum allowed number of descent cgroups.
12491 ++ *
12492 ++ * nr_descendants and nr_dying_descendants are protected
12493 ++ * by cgroup_mutex and css_set_lock. It's fine to read them holding
12494 ++ * any of cgroup_mutex and css_set_lock; for writing both locks
12495 ++ * should be held.
12496 + */
12497 + int nr_descendants;
12498 + int nr_dying_descendants;
12499 +diff --git a/include/linux/dax.h b/include/linux/dax.h
12500 +index 0dd316a74a29..becaea5f4488 100644
12501 +--- a/include/linux/dax.h
12502 ++++ b/include/linux/dax.h
12503 +@@ -19,6 +19,12 @@ struct dax_operations {
12504 + */
12505 + long (*direct_access)(struct dax_device *, pgoff_t, long,
12506 + void **, pfn_t *);
12507 ++ /*
12508 ++ * Validate whether this device is usable as an fsdax backing
12509 ++ * device.
12510 ++ */
12511 ++ bool (*dax_supported)(struct dax_device *, struct block_device *, int,
12512 ++ sector_t, sector_t);
12513 + /* copy_from_iter: required operation for fs-dax direct-i/o */
12514 + size_t (*copy_from_iter)(struct dax_device *, pgoff_t, void *, size_t,
12515 + struct iov_iter *);
12516 +@@ -75,6 +81,17 @@ static inline bool bdev_dax_supported(struct block_device *bdev, int blocksize)
12517 + return __bdev_dax_supported(bdev, blocksize);
12518 + }
12519 +
12520 ++bool __generic_fsdax_supported(struct dax_device *dax_dev,
12521 ++ struct block_device *bdev, int blocksize, sector_t start,
12522 ++ sector_t sectors);
12523 ++static inline bool generic_fsdax_supported(struct dax_device *dax_dev,
12524 ++ struct block_device *bdev, int blocksize, sector_t start,
12525 ++ sector_t sectors)
12526 ++{
12527 ++ return __generic_fsdax_supported(dax_dev, bdev, blocksize, start,
12528 ++ sectors);
12529 ++}
12530 ++
12531 + static inline struct dax_device *fs_dax_get_by_host(const char *host)
12532 + {
12533 + return dax_get_by_host(host);
12534 +@@ -99,6 +116,13 @@ static inline bool bdev_dax_supported(struct block_device *bdev,
12535 + return false;
12536 + }
12537 +
12538 ++static inline bool generic_fsdax_supported(struct dax_device *dax_dev,
12539 ++ struct block_device *bdev, int blocksize, sector_t start,
12540 ++ sector_t sectors)
12541 ++{
12542 ++ return false;
12543 ++}
12544 ++
12545 + static inline struct dax_device *fs_dax_get_by_host(const char *host)
12546 + {
12547 + return NULL;
12548 +@@ -142,6 +166,8 @@ bool dax_alive(struct dax_device *dax_dev);
12549 + void *dax_get_private(struct dax_device *dax_dev);
12550 + long dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, long nr_pages,
12551 + void **kaddr, pfn_t *pfn);
12552 ++bool dax_supported(struct dax_device *dax_dev, struct block_device *bdev,
12553 ++ int blocksize, sector_t start, sector_t len);
12554 + size_t dax_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr,
12555 + size_t bytes, struct iov_iter *i);
12556 + size_t dax_copy_to_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr,
12557 +diff --git a/include/linux/filter.h b/include/linux/filter.h
12558 +index 6074aa064b54..14ec3bdad9a9 100644
12559 +--- a/include/linux/filter.h
12560 ++++ b/include/linux/filter.h
12561 +@@ -746,6 +746,7 @@ static inline void bpf_prog_unlock_ro(struct bpf_prog *fp)
12562 + static inline void bpf_jit_binary_lock_ro(struct bpf_binary_header *hdr)
12563 + {
12564 + set_memory_ro((unsigned long)hdr, hdr->pages);
12565 ++ set_memory_x((unsigned long)hdr, hdr->pages);
12566 + }
12567 +
12568 + static inline void bpf_jit_binary_unlock_ro(struct bpf_binary_header *hdr)
12569 +diff --git a/include/linux/fscrypt.h b/include/linux/fscrypt.h
12570 +index e5194fc3983e..08246f068fd8 100644
12571 +--- a/include/linux/fscrypt.h
12572 ++++ b/include/linux/fscrypt.h
12573 +@@ -79,7 +79,8 @@ struct fscrypt_ctx {
12574 +
12575 + static inline bool fscrypt_has_encryption_key(const struct inode *inode)
12576 + {
12577 +- return (inode->i_crypt_info != NULL);
12578 ++ /* pairs with cmpxchg_release() in fscrypt_get_encryption_info() */
12579 ++ return READ_ONCE(inode->i_crypt_info) != NULL;
12580 + }
12581 +
12582 + static inline bool fscrypt_dummy_context_enabled(struct inode *inode)
12583 +diff --git a/include/linux/genhd.h b/include/linux/genhd.h
12584 +index 06c0fd594097..69db1affedb0 100644
12585 +--- a/include/linux/genhd.h
12586 ++++ b/include/linux/genhd.h
12587 +@@ -610,6 +610,7 @@ struct unixware_disklabel {
12588 +
12589 + extern int blk_alloc_devt(struct hd_struct *part, dev_t *devt);
12590 + extern void blk_free_devt(dev_t devt);
12591 ++extern void blk_invalidate_devt(dev_t devt);
12592 + extern dev_t blk_lookup_devt(const char *name, int partno);
12593 + extern char *disk_name (struct gendisk *hd, int partno, char *buf);
12594 +
12595 +diff --git a/include/linux/hid.h b/include/linux/hid.h
12596 +index f9707d1dcb58..ac0c70b4ce10 100644
12597 +--- a/include/linux/hid.h
12598 ++++ b/include/linux/hid.h
12599 +@@ -417,6 +417,7 @@ struct hid_global {
12600 +
12601 + struct hid_local {
12602 + unsigned usage[HID_MAX_USAGES]; /* usage array */
12603 ++ u8 usage_size[HID_MAX_USAGES]; /* usage size array */
12604 + unsigned collection_index[HID_MAX_USAGES]; /* collection index array */
12605 + unsigned usage_index;
12606 + unsigned usage_minimum;
12607 +diff --git a/include/linux/iio/adc/ad_sigma_delta.h b/include/linux/iio/adc/ad_sigma_delta.h
12608 +index 7e84351fa2c0..6e9fb1932dde 100644
12609 +--- a/include/linux/iio/adc/ad_sigma_delta.h
12610 ++++ b/include/linux/iio/adc/ad_sigma_delta.h
12611 +@@ -69,6 +69,7 @@ struct ad_sigma_delta {
12612 + bool irq_dis;
12613 +
12614 + bool bus_locked;
12615 ++ bool keep_cs_asserted;
12616 +
12617 + uint8_t comm;
12618 +
12619 +diff --git a/include/linux/mlx5/eswitch.h b/include/linux/mlx5/eswitch.h
12620 +index 96d8435421de..0ca77dd1429c 100644
12621 +--- a/include/linux/mlx5/eswitch.h
12622 ++++ b/include/linux/mlx5/eswitch.h
12623 +@@ -35,7 +35,7 @@ struct mlx5_eswitch_rep_if {
12624 + void (*unload)(struct mlx5_eswitch_rep *rep);
12625 + void *(*get_proto_dev)(struct mlx5_eswitch_rep *rep);
12626 + void *priv;
12627 +- u8 state;
12628 ++ atomic_t state;
12629 + };
12630 +
12631 + struct mlx5_eswitch_rep {
12632 +diff --git a/include/linux/mount.h b/include/linux/mount.h
12633 +index 9197ddbf35fb..bf8cc4108b8f 100644
12634 +--- a/include/linux/mount.h
12635 ++++ b/include/linux/mount.h
12636 +@@ -87,6 +87,8 @@ extern bool mnt_may_suid(struct vfsmount *mnt);
12637 +
12638 + struct path;
12639 + extern struct vfsmount *clone_private_mount(const struct path *path);
12640 ++extern int __mnt_want_write(struct vfsmount *);
12641 ++extern void __mnt_drop_write(struct vfsmount *);
12642 +
12643 + struct file_system_type;
12644 + extern struct vfsmount *fc_mount(struct fs_context *fc);
12645 +diff --git a/include/linux/overflow.h b/include/linux/overflow.h
12646 +index 40b48e2133cb..15eb85de9226 100644
12647 +--- a/include/linux/overflow.h
12648 ++++ b/include/linux/overflow.h
12649 +@@ -36,6 +36,12 @@
12650 + #define type_max(T) ((T)((__type_half_max(T) - 1) + __type_half_max(T)))
12651 + #define type_min(T) ((T)((T)-type_max(T)-(T)1))
12652 +
12653 ++/*
12654 ++ * Avoids triggering -Wtype-limits compilation warning,
12655 ++ * while using unsigned data types to check a < 0.
12656 ++ */
12657 ++#define is_non_negative(a) ((a) > 0 || (a) == 0)
12658 ++#define is_negative(a) (!(is_non_negative(a)))
12659 +
12660 + #ifdef COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW
12661 + /*
12662 +@@ -227,10 +233,10 @@
12663 + typeof(d) _d = d; \
12664 + u64 _a_full = _a; \
12665 + unsigned int _to_shift = \
12666 +- _s >= 0 && _s < 8 * sizeof(*d) ? _s : 0; \
12667 ++ is_non_negative(_s) && _s < 8 * sizeof(*d) ? _s : 0; \
12668 + *_d = (_a_full << _to_shift); \
12669 +- (_to_shift != _s || *_d < 0 || _a < 0 || \
12670 +- (*_d >> _to_shift) != _a); \
12671 ++ (_to_shift != _s || is_negative(*_d) || is_negative(_a) || \
12672 ++ (*_d >> _to_shift) != _a); \
12673 + })
12674 +
12675 + /**
12676 +diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
12677 +index 6cdb1db776cf..922bb6848813 100644
12678 +--- a/include/linux/rcupdate.h
12679 ++++ b/include/linux/rcupdate.h
12680 +@@ -878,9 +878,11 @@ static inline void rcu_head_init(struct rcu_head *rhp)
12681 + static inline bool
12682 + rcu_head_after_call_rcu(struct rcu_head *rhp, rcu_callback_t f)
12683 + {
12684 +- if (READ_ONCE(rhp->func) == f)
12685 ++ rcu_callback_t func = READ_ONCE(rhp->func);
12686 ++
12687 ++ if (func == f)
12688 + return true;
12689 +- WARN_ON_ONCE(READ_ONCE(rhp->func) != (rcu_callback_t)~0L);
12690 ++ WARN_ON_ONCE(func != (rcu_callback_t)~0L);
12691 + return false;
12692 + }
12693 +
12694 +diff --git a/include/linux/regulator/consumer.h b/include/linux/regulator/consumer.h
12695 +index f3f76051e8b0..aaf3cee70439 100644
12696 +--- a/include/linux/regulator/consumer.h
12697 ++++ b/include/linux/regulator/consumer.h
12698 +@@ -478,6 +478,11 @@ static inline int regulator_is_supported_voltage(struct regulator *regulator,
12699 + return 0;
12700 + }
12701 +
12702 ++static inline unsigned int regulator_get_linear_step(struct regulator *regulator)
12703 ++{
12704 ++ return 0;
12705 ++}
12706 ++
12707 + static inline int regulator_set_current_limit(struct regulator *regulator,
12708 + int min_uA, int max_uA)
12709 + {
12710 +diff --git a/include/linux/smpboot.h b/include/linux/smpboot.h
12711 +index d0884b525001..9d1bc65d226c 100644
12712 +--- a/include/linux/smpboot.h
12713 ++++ b/include/linux/smpboot.h
12714 +@@ -29,7 +29,7 @@ struct smpboot_thread_data;
12715 + * @thread_comm: The base name of the thread
12716 + */
12717 + struct smp_hotplug_thread {
12718 +- struct task_struct __percpu **store;
12719 ++ struct task_struct * __percpu *store;
12720 + struct list_head list;
12721 + int (*thread_should_run)(unsigned int cpu);
12722 + void (*thread_fn)(unsigned int cpu);
12723 +diff --git a/include/linux/time64.h b/include/linux/time64.h
12724 +index f38d382ffec1..a620ee610b9f 100644
12725 +--- a/include/linux/time64.h
12726 ++++ b/include/linux/time64.h
12727 +@@ -33,6 +33,17 @@ struct itimerspec64 {
12728 + #define KTIME_MAX ((s64)~((u64)1 << 63))
12729 + #define KTIME_SEC_MAX (KTIME_MAX / NSEC_PER_SEC)
12730 +
12731 ++/*
12732 ++ * Limits for settimeofday():
12733 ++ *
12734 ++ * To prevent setting the time close to the wraparound point time setting
12735 ++ * is limited so a reasonable uptime can be accomodated. Uptime of 30 years
12736 ++ * should be really sufficient, which means the cutoff is 2232. At that
12737 ++ * point the cutoff is just a small part of the larger problem.
12738 ++ */
12739 ++#define TIME_UPTIME_SEC_MAX (30LL * 365 * 24 *3600)
12740 ++#define TIME_SETTOD_SEC_MAX (KTIME_SEC_MAX - TIME_UPTIME_SEC_MAX)
12741 ++
12742 + static inline int timespec64_equal(const struct timespec64 *a,
12743 + const struct timespec64 *b)
12744 + {
12745 +@@ -100,6 +111,16 @@ static inline bool timespec64_valid_strict(const struct timespec64 *ts)
12746 + return true;
12747 + }
12748 +
12749 ++static inline bool timespec64_valid_settod(const struct timespec64 *ts)
12750 ++{
12751 ++ if (!timespec64_valid(ts))
12752 ++ return false;
12753 ++ /* Disallow values which cause overflow issues vs. CLOCK_REALTIME */
12754 ++ if ((unsigned long long)ts->tv_sec >= TIME_SETTOD_SEC_MAX)
12755 ++ return false;
12756 ++ return true;
12757 ++}
12758 ++
12759 + /**
12760 + * timespec64_to_ns - Convert timespec64 to nanoseconds
12761 + * @ts: pointer to the timespec64 variable to be converted
12762 +diff --git a/include/media/videobuf2-core.h b/include/media/videobuf2-core.h
12763 +index 910f3d469005..65108819de5a 100644
12764 +--- a/include/media/videobuf2-core.h
12765 ++++ b/include/media/videobuf2-core.h
12766 +@@ -595,6 +595,7 @@ struct vb2_queue {
12767 + unsigned int start_streaming_called:1;
12768 + unsigned int error:1;
12769 + unsigned int waiting_for_buffers:1;
12770 ++ unsigned int waiting_in_dqbuf:1;
12771 + unsigned int is_multiplanar:1;
12772 + unsigned int is_output:1;
12773 + unsigned int copy_timestamp:1;
12774 +diff --git a/include/net/bluetooth/hci.h b/include/net/bluetooth/hci.h
12775 +index fbba43e9bef5..9a5330eed794 100644
12776 +--- a/include/net/bluetooth/hci.h
12777 ++++ b/include/net/bluetooth/hci.h
12778 +@@ -282,6 +282,7 @@ enum {
12779 + HCI_FORCE_BREDR_SMP,
12780 + HCI_FORCE_STATIC_ADDR,
12781 + HCI_LL_RPA_RESOLUTION,
12782 ++ HCI_CMD_PENDING,
12783 +
12784 + __HCI_NUM_FLAGS,
12785 + };
12786 +diff --git a/include/xen/xen.h b/include/xen/xen.h
12787 +index 19d032373de5..19a72f591e2b 100644
12788 +--- a/include/xen/xen.h
12789 ++++ b/include/xen/xen.h
12790 +@@ -43,8 +43,10 @@ extern struct hvm_start_info pvh_start_info;
12791 + #endif /* CONFIG_XEN_DOM0 */
12792 +
12793 + struct bio_vec;
12794 ++struct page;
12795 ++
12796 + bool xen_biovec_phys_mergeable(const struct bio_vec *vec1,
12797 +- const struct bio_vec *vec2);
12798 ++ const struct page *page);
12799 +
12800 + #if defined(CONFIG_MEMORY_HOTPLUG) && defined(CONFIG_XEN_BALLOON)
12801 + extern u64 xen_saved_max_mem_size;
12802 +diff --git a/kernel/acct.c b/kernel/acct.c
12803 +index addf7732fb56..81f9831a7859 100644
12804 +--- a/kernel/acct.c
12805 ++++ b/kernel/acct.c
12806 +@@ -227,7 +227,7 @@ static int acct_on(struct filename *pathname)
12807 + filp_close(file, NULL);
12808 + return PTR_ERR(internal);
12809 + }
12810 +- err = mnt_want_write(internal);
12811 ++ err = __mnt_want_write(internal);
12812 + if (err) {
12813 + mntput(internal);
12814 + kfree(acct);
12815 +@@ -252,7 +252,7 @@ static int acct_on(struct filename *pathname)
12816 + old = xchg(&ns->bacct, &acct->pin);
12817 + mutex_unlock(&acct->lock);
12818 + pin_kill(old);
12819 +- mnt_drop_write(mnt);
12820 ++ __mnt_drop_write(mnt);
12821 + mntput(mnt);
12822 + return 0;
12823 + }
12824 +diff --git a/kernel/auditfilter.c b/kernel/auditfilter.c
12825 +index 63f8b3f26fab..3ac71c4fda49 100644
12826 +--- a/kernel/auditfilter.c
12827 ++++ b/kernel/auditfilter.c
12828 +@@ -1114,22 +1114,24 @@ int audit_rule_change(int type, int seq, void *data, size_t datasz)
12829 + int err = 0;
12830 + struct audit_entry *entry;
12831 +
12832 +- entry = audit_data_to_entry(data, datasz);
12833 +- if (IS_ERR(entry))
12834 +- return PTR_ERR(entry);
12835 +-
12836 + switch (type) {
12837 + case AUDIT_ADD_RULE:
12838 ++ entry = audit_data_to_entry(data, datasz);
12839 ++ if (IS_ERR(entry))
12840 ++ return PTR_ERR(entry);
12841 + err = audit_add_rule(entry);
12842 + audit_log_rule_change("add_rule", &entry->rule, !err);
12843 + break;
12844 + case AUDIT_DEL_RULE:
12845 ++ entry = audit_data_to_entry(data, datasz);
12846 ++ if (IS_ERR(entry))
12847 ++ return PTR_ERR(entry);
12848 + err = audit_del_rule(entry);
12849 + audit_log_rule_change("remove_rule", &entry->rule, !err);
12850 + break;
12851 + default:
12852 +- err = -EINVAL;
12853 + WARN_ON(1);
12854 ++ return -EINVAL;
12855 + }
12856 +
12857 + if (err || type == AUDIT_DEL_RULE) {
12858 +diff --git a/kernel/auditsc.c b/kernel/auditsc.c
12859 +index d1eab1d4a930..fa7b8047aab8 100644
12860 +--- a/kernel/auditsc.c
12861 ++++ b/kernel/auditsc.c
12862 +@@ -840,6 +840,13 @@ static inline void audit_proctitle_free(struct audit_context *context)
12863 + context->proctitle.len = 0;
12864 + }
12865 +
12866 ++static inline void audit_free_module(struct audit_context *context)
12867 ++{
12868 ++ if (context->type == AUDIT_KERN_MODULE) {
12869 ++ kfree(context->module.name);
12870 ++ context->module.name = NULL;
12871 ++ }
12872 ++}
12873 + static inline void audit_free_names(struct audit_context *context)
12874 + {
12875 + struct audit_names *n, *next;
12876 +@@ -923,6 +930,7 @@ int audit_alloc(struct task_struct *tsk)
12877 +
12878 + static inline void audit_free_context(struct audit_context *context)
12879 + {
12880 ++ audit_free_module(context);
12881 + audit_free_names(context);
12882 + unroll_tree_refs(context, NULL, 0);
12883 + free_tree_refs(context);
12884 +@@ -1266,7 +1274,6 @@ static void show_special(struct audit_context *context, int *call_panic)
12885 + audit_log_format(ab, "name=");
12886 + if (context->module.name) {
12887 + audit_log_untrustedstring(ab, context->module.name);
12888 +- kfree(context->module.name);
12889 + } else
12890 + audit_log_format(ab, "(null)");
12891 +
12892 +@@ -1697,6 +1704,7 @@ void __audit_syscall_exit(int success, long return_code)
12893 + context->in_syscall = 0;
12894 + context->prio = context->state == AUDIT_RECORD_CONTEXT ? ~0ULL : 0;
12895 +
12896 ++ audit_free_module(context);
12897 + audit_free_names(context);
12898 + unroll_tree_refs(context, NULL, 0);
12899 + audit_free_aux(context);
12900 +diff --git a/kernel/bpf/devmap.c b/kernel/bpf/devmap.c
12901 +index 191b79948424..1e525d70f833 100644
12902 +--- a/kernel/bpf/devmap.c
12903 ++++ b/kernel/bpf/devmap.c
12904 +@@ -164,6 +164,9 @@ static void dev_map_free(struct bpf_map *map)
12905 + bpf_clear_redirect_map(map);
12906 + synchronize_rcu();
12907 +
12908 ++ /* Make sure prior __dev_map_entry_free() have completed. */
12909 ++ rcu_barrier();
12910 ++
12911 + /* To ensure all pending flush operations have completed wait for flush
12912 + * bitmap to indicate all flush_needed bits to be zero on _all_ cpus.
12913 + * Because the above synchronize_rcu() ensures the map is disconnected
12914 +diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c
12915 +index 3f2b4bde0f9c..9fcf6338ea5f 100644
12916 +--- a/kernel/cgroup/cgroup.c
12917 ++++ b/kernel/cgroup/cgroup.c
12918 +@@ -4781,9 +4781,11 @@ static void css_release_work_fn(struct work_struct *work)
12919 + if (cgroup_on_dfl(cgrp))
12920 + cgroup_rstat_flush(cgrp);
12921 +
12922 ++ spin_lock_irq(&css_set_lock);
12923 + for (tcgrp = cgroup_parent(cgrp); tcgrp;
12924 + tcgrp = cgroup_parent(tcgrp))
12925 + tcgrp->nr_dying_descendants--;
12926 ++ spin_unlock_irq(&css_set_lock);
12927 +
12928 + cgroup_idr_remove(&cgrp->root->cgroup_idr, cgrp->id);
12929 + cgrp->id = -1;
12930 +@@ -5001,12 +5003,14 @@ static struct cgroup *cgroup_create(struct cgroup *parent)
12931 + if (ret)
12932 + goto out_psi_free;
12933 +
12934 ++ spin_lock_irq(&css_set_lock);
12935 + for (tcgrp = cgrp; tcgrp; tcgrp = cgroup_parent(tcgrp)) {
12936 + cgrp->ancestor_ids[tcgrp->level] = tcgrp->id;
12937 +
12938 + if (tcgrp != cgrp)
12939 + tcgrp->nr_descendants++;
12940 + }
12941 ++ spin_unlock_irq(&css_set_lock);
12942 +
12943 + if (notify_on_release(parent))
12944 + set_bit(CGRP_NOTIFY_ON_RELEASE, &cgrp->flags);
12945 +@@ -5291,10 +5295,12 @@ static int cgroup_destroy_locked(struct cgroup *cgrp)
12946 + if (parent && cgroup_is_threaded(cgrp))
12947 + parent->nr_threaded_children--;
12948 +
12949 ++ spin_lock_irq(&css_set_lock);
12950 + for (tcgrp = cgroup_parent(cgrp); tcgrp; tcgrp = cgroup_parent(tcgrp)) {
12951 + tcgrp->nr_descendants--;
12952 + tcgrp->nr_dying_descendants++;
12953 + }
12954 ++ spin_unlock_irq(&css_set_lock);
12955 +
12956 + cgroup1_check_for_release(parent);
12957 +
12958 +diff --git a/kernel/irq_work.c b/kernel/irq_work.c
12959 +index 6b7cdf17ccf8..73288914ed5e 100644
12960 +--- a/kernel/irq_work.c
12961 ++++ b/kernel/irq_work.c
12962 +@@ -56,61 +56,70 @@ void __weak arch_irq_work_raise(void)
12963 + */
12964 + }
12965 +
12966 +-/*
12967 +- * Enqueue the irq_work @work on @cpu unless it's already pending
12968 +- * somewhere.
12969 +- *
12970 +- * Can be re-enqueued while the callback is still in progress.
12971 +- */
12972 +-bool irq_work_queue_on(struct irq_work *work, int cpu)
12973 ++/* Enqueue on current CPU, work must already be claimed and preempt disabled */
12974 ++static void __irq_work_queue_local(struct irq_work *work)
12975 + {
12976 +- /* All work should have been flushed before going offline */
12977 +- WARN_ON_ONCE(cpu_is_offline(cpu));
12978 +-
12979 +-#ifdef CONFIG_SMP
12980 +-
12981 +- /* Arch remote IPI send/receive backend aren't NMI safe */
12982 +- WARN_ON_ONCE(in_nmi());
12983 ++ /* If the work is "lazy", handle it from next tick if any */
12984 ++ if (work->flags & IRQ_WORK_LAZY) {
12985 ++ if (llist_add(&work->llnode, this_cpu_ptr(&lazy_list)) &&
12986 ++ tick_nohz_tick_stopped())
12987 ++ arch_irq_work_raise();
12988 ++ } else {
12989 ++ if (llist_add(&work->llnode, this_cpu_ptr(&raised_list)))
12990 ++ arch_irq_work_raise();
12991 ++ }
12992 ++}
12993 +
12994 ++/* Enqueue the irq work @work on the current CPU */
12995 ++bool irq_work_queue(struct irq_work *work)
12996 ++{
12997 + /* Only queue if not already pending */
12998 + if (!irq_work_claim(work))
12999 + return false;
13000 +
13001 +- if (llist_add(&work->llnode, &per_cpu(raised_list, cpu)))
13002 +- arch_send_call_function_single_ipi(cpu);
13003 +-
13004 +-#else /* #ifdef CONFIG_SMP */
13005 +- irq_work_queue(work);
13006 +-#endif /* #else #ifdef CONFIG_SMP */
13007 ++ /* Queue the entry and raise the IPI if needed. */
13008 ++ preempt_disable();
13009 ++ __irq_work_queue_local(work);
13010 ++ preempt_enable();
13011 +
13012 + return true;
13013 + }
13014 ++EXPORT_SYMBOL_GPL(irq_work_queue);
13015 +
13016 +-/* Enqueue the irq work @work on the current CPU */
13017 +-bool irq_work_queue(struct irq_work *work)
13018 ++/*
13019 ++ * Enqueue the irq_work @work on @cpu unless it's already pending
13020 ++ * somewhere.
13021 ++ *
13022 ++ * Can be re-enqueued while the callback is still in progress.
13023 ++ */
13024 ++bool irq_work_queue_on(struct irq_work *work, int cpu)
13025 + {
13026 ++#ifndef CONFIG_SMP
13027 ++ return irq_work_queue(work);
13028 ++
13029 ++#else /* CONFIG_SMP: */
13030 ++ /* All work should have been flushed before going offline */
13031 ++ WARN_ON_ONCE(cpu_is_offline(cpu));
13032 ++
13033 + /* Only queue if not already pending */
13034 + if (!irq_work_claim(work))
13035 + return false;
13036 +
13037 +- /* Queue the entry and raise the IPI if needed. */
13038 + preempt_disable();
13039 +-
13040 +- /* If the work is "lazy", handle it from next tick if any */
13041 +- if (work->flags & IRQ_WORK_LAZY) {
13042 +- if (llist_add(&work->llnode, this_cpu_ptr(&lazy_list)) &&
13043 +- tick_nohz_tick_stopped())
13044 +- arch_irq_work_raise();
13045 ++ if (cpu != smp_processor_id()) {
13046 ++ /* Arch remote IPI send/receive backend aren't NMI safe */
13047 ++ WARN_ON_ONCE(in_nmi());
13048 ++ if (llist_add(&work->llnode, &per_cpu(raised_list, cpu)))
13049 ++ arch_send_call_function_single_ipi(cpu);
13050 + } else {
13051 +- if (llist_add(&work->llnode, this_cpu_ptr(&raised_list)))
13052 +- arch_irq_work_raise();
13053 ++ __irq_work_queue_local(work);
13054 + }
13055 +-
13056 + preempt_enable();
13057 +
13058 + return true;
13059 ++#endif /* CONFIG_SMP */
13060 + }
13061 +-EXPORT_SYMBOL_GPL(irq_work_queue);
13062 ++
13063 +
13064 + bool irq_work_needs_cpu(void)
13065 + {
13066 +diff --git a/kernel/jump_label.c b/kernel/jump_label.c
13067 +index bad96b476eb6..a799b1ac6b2f 100644
13068 +--- a/kernel/jump_label.c
13069 ++++ b/kernel/jump_label.c
13070 +@@ -206,6 +206,8 @@ static void __static_key_slow_dec_cpuslocked(struct static_key *key,
13071 + unsigned long rate_limit,
13072 + struct delayed_work *work)
13073 + {
13074 ++ int val;
13075 ++
13076 + lockdep_assert_cpus_held();
13077 +
13078 + /*
13079 +@@ -215,17 +217,20 @@ static void __static_key_slow_dec_cpuslocked(struct static_key *key,
13080 + * returns is unbalanced, because all other static_key_slow_inc()
13081 + * instances block while the update is in progress.
13082 + */
13083 +- if (!atomic_dec_and_mutex_lock(&key->enabled, &jump_label_mutex)) {
13084 +- WARN(atomic_read(&key->enabled) < 0,
13085 +- "jump label: negative count!\n");
13086 ++ val = atomic_fetch_add_unless(&key->enabled, -1, 1);
13087 ++ if (val != 1) {
13088 ++ WARN(val < 0, "jump label: negative count!\n");
13089 + return;
13090 + }
13091 +
13092 +- if (rate_limit) {
13093 +- atomic_inc(&key->enabled);
13094 +- schedule_delayed_work(work, rate_limit);
13095 +- } else {
13096 +- jump_label_update(key);
13097 ++ jump_label_lock();
13098 ++ if (atomic_dec_and_test(&key->enabled)) {
13099 ++ if (rate_limit) {
13100 ++ atomic_inc(&key->enabled);
13101 ++ schedule_delayed_work(work, rate_limit);
13102 ++ } else {
13103 ++ jump_label_update(key);
13104 ++ }
13105 + }
13106 + jump_label_unlock();
13107 + }
13108 +diff --git a/kernel/module.c b/kernel/module.c
13109 +index 0b9aa8ab89f0..2b2845ae983e 100644
13110 +--- a/kernel/module.c
13111 ++++ b/kernel/module.c
13112 +@@ -1950,8 +1950,13 @@ void module_enable_ro(const struct module *mod, bool after_init)
13113 + return;
13114 +
13115 + frob_text(&mod->core_layout, set_memory_ro);
13116 ++ frob_text(&mod->core_layout, set_memory_x);
13117 ++
13118 + frob_rodata(&mod->core_layout, set_memory_ro);
13119 ++
13120 + frob_text(&mod->init_layout, set_memory_ro);
13121 ++ frob_text(&mod->init_layout, set_memory_x);
13122 ++
13123 + frob_rodata(&mod->init_layout, set_memory_ro);
13124 +
13125 + if (after_init)
13126 +diff --git a/kernel/rcu/rcuperf.c b/kernel/rcu/rcuperf.c
13127 +index c29761152874..7a6890b23c5f 100644
13128 +--- a/kernel/rcu/rcuperf.c
13129 ++++ b/kernel/rcu/rcuperf.c
13130 +@@ -494,6 +494,10 @@ rcu_perf_cleanup(void)
13131 +
13132 + if (torture_cleanup_begin())
13133 + return;
13134 ++ if (!cur_ops) {
13135 ++ torture_cleanup_end();
13136 ++ return;
13137 ++ }
13138 +
13139 + if (reader_tasks) {
13140 + for (i = 0; i < nrealreaders; i++)
13141 +@@ -614,6 +618,7 @@ rcu_perf_init(void)
13142 + pr_cont("\n");
13143 + WARN_ON(!IS_MODULE(CONFIG_RCU_PERF_TEST));
13144 + firsterr = -EINVAL;
13145 ++ cur_ops = NULL;
13146 + goto unwind;
13147 + }
13148 + if (cur_ops->init)
13149 +diff --git a/kernel/rcu/rcutorture.c b/kernel/rcu/rcutorture.c
13150 +index f14d1b18a74f..a2efe27317be 100644
13151 +--- a/kernel/rcu/rcutorture.c
13152 ++++ b/kernel/rcu/rcutorture.c
13153 +@@ -2094,6 +2094,10 @@ rcu_torture_cleanup(void)
13154 + cur_ops->cb_barrier();
13155 + return;
13156 + }
13157 ++ if (!cur_ops) {
13158 ++ torture_cleanup_end();
13159 ++ return;
13160 ++ }
13161 +
13162 + rcu_torture_barrier_cleanup();
13163 + torture_stop_kthread(rcu_torture_fwd_prog, fwd_prog_task);
13164 +@@ -2267,6 +2271,7 @@ rcu_torture_init(void)
13165 + pr_cont("\n");
13166 + WARN_ON(!IS_MODULE(CONFIG_RCU_TORTURE_TEST));
13167 + firsterr = -EINVAL;
13168 ++ cur_ops = NULL;
13169 + goto unwind;
13170 + }
13171 + if (cur_ops->fqs == NULL && fqs_duration != 0) {
13172 +diff --git a/kernel/sched/core.c b/kernel/sched/core.c
13173 +index 4778c48a7fda..a75ad50b5e2f 100644
13174 +--- a/kernel/sched/core.c
13175 ++++ b/kernel/sched/core.c
13176 +@@ -6559,6 +6559,8 @@ static void cpu_cgroup_attach(struct cgroup_taskset *tset)
13177 + static int cpu_shares_write_u64(struct cgroup_subsys_state *css,
13178 + struct cftype *cftype, u64 shareval)
13179 + {
13180 ++ if (shareval > scale_load_down(ULONG_MAX))
13181 ++ shareval = MAX_SHARES;
13182 + return sched_group_set_shares(css_tg(css), scale_load(shareval));
13183 + }
13184 +
13185 +@@ -6661,8 +6663,10 @@ int tg_set_cfs_quota(struct task_group *tg, long cfs_quota_us)
13186 + period = ktime_to_ns(tg->cfs_bandwidth.period);
13187 + if (cfs_quota_us < 0)
13188 + quota = RUNTIME_INF;
13189 +- else
13190 ++ else if ((u64)cfs_quota_us <= U64_MAX / NSEC_PER_USEC)
13191 + quota = (u64)cfs_quota_us * NSEC_PER_USEC;
13192 ++ else
13193 ++ return -EINVAL;
13194 +
13195 + return tg_set_cfs_bandwidth(tg, period, quota);
13196 + }
13197 +@@ -6684,6 +6688,9 @@ int tg_set_cfs_period(struct task_group *tg, long cfs_period_us)
13198 + {
13199 + u64 quota, period;
13200 +
13201 ++ if ((u64)cfs_period_us > U64_MAX / NSEC_PER_USEC)
13202 ++ return -EINVAL;
13203 ++
13204 + period = (u64)cfs_period_us * NSEC_PER_USEC;
13205 + quota = tg->cfs_bandwidth.quota;
13206 +
13207 +diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
13208 +index 35f3ea375084..232491e3ed0d 100644
13209 +--- a/kernel/sched/fair.c
13210 ++++ b/kernel/sched/fair.c
13211 +@@ -9551,22 +9551,26 @@ static inline int on_null_domain(struct rq *rq)
13212 + * - When one of the busy CPUs notice that there may be an idle rebalancing
13213 + * needed, they will kick the idle load balancer, which then does idle
13214 + * load balancing for all the idle CPUs.
13215 ++ * - HK_FLAG_MISC CPUs are used for this task, because HK_FLAG_SCHED not set
13216 ++ * anywhere yet.
13217 + */
13218 +
13219 + static inline int find_new_ilb(void)
13220 + {
13221 +- int ilb = cpumask_first(nohz.idle_cpus_mask);
13222 ++ int ilb;
13223 +
13224 +- if (ilb < nr_cpu_ids && idle_cpu(ilb))
13225 +- return ilb;
13226 ++ for_each_cpu_and(ilb, nohz.idle_cpus_mask,
13227 ++ housekeeping_cpumask(HK_FLAG_MISC)) {
13228 ++ if (idle_cpu(ilb))
13229 ++ return ilb;
13230 ++ }
13231 +
13232 + return nr_cpu_ids;
13233 + }
13234 +
13235 + /*
13236 +- * Kick a CPU to do the nohz balancing, if it is time for it. We pick the
13237 +- * nohz_load_balancer CPU (if there is one) otherwise fallback to any idle
13238 +- * CPU (if there is one).
13239 ++ * Kick a CPU to do the nohz balancing, if it is time for it. We pick any
13240 ++ * idle CPU in the HK_FLAG_MISC housekeeping set (if there is one).
13241 + */
13242 + static void kick_ilb(unsigned int flags)
13243 + {
13244 +diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
13245 +index 90fa23d36565..1e6b909dca36 100644
13246 +--- a/kernel/sched/rt.c
13247 ++++ b/kernel/sched/rt.c
13248 +@@ -2555,6 +2555,8 @@ int sched_group_set_rt_runtime(struct task_group *tg, long rt_runtime_us)
13249 + rt_runtime = (u64)rt_runtime_us * NSEC_PER_USEC;
13250 + if (rt_runtime_us < 0)
13251 + rt_runtime = RUNTIME_INF;
13252 ++ else if ((u64)rt_runtime_us > U64_MAX / NSEC_PER_USEC)
13253 ++ return -EINVAL;
13254 +
13255 + return tg_set_rt_bandwidth(tg, rt_period, rt_runtime);
13256 + }
13257 +@@ -2575,6 +2577,9 @@ int sched_group_set_rt_period(struct task_group *tg, u64 rt_period_us)
13258 + {
13259 + u64 rt_runtime, rt_period;
13260 +
13261 ++ if (rt_period_us > U64_MAX / NSEC_PER_USEC)
13262 ++ return -EINVAL;
13263 ++
13264 + rt_period = rt_period_us * NSEC_PER_USEC;
13265 + rt_runtime = tg->rt_bandwidth.rt_runtime;
13266 +
13267 +diff --git a/kernel/time/time.c b/kernel/time/time.c
13268 +index c3f756f8534b..86656bbac232 100644
13269 +--- a/kernel/time/time.c
13270 ++++ b/kernel/time/time.c
13271 +@@ -171,7 +171,7 @@ int do_sys_settimeofday64(const struct timespec64 *tv, const struct timezone *tz
13272 + static int firsttime = 1;
13273 + int error = 0;
13274 +
13275 +- if (tv && !timespec64_valid(tv))
13276 ++ if (tv && !timespec64_valid_settod(tv))
13277 + return -EINVAL;
13278 +
13279 + error = security_settime64(tv, tz);
13280 +diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
13281 +index f986e1918d12..f136c56c2805 100644
13282 +--- a/kernel/time/timekeeping.c
13283 ++++ b/kernel/time/timekeeping.c
13284 +@@ -1221,7 +1221,7 @@ int do_settimeofday64(const struct timespec64 *ts)
13285 + unsigned long flags;
13286 + int ret = 0;
13287 +
13288 +- if (!timespec64_valid_strict(ts))
13289 ++ if (!timespec64_valid_settod(ts))
13290 + return -EINVAL;
13291 +
13292 + raw_spin_lock_irqsave(&timekeeper_lock, flags);
13293 +@@ -1278,7 +1278,7 @@ static int timekeeping_inject_offset(const struct timespec64 *ts)
13294 + /* Make sure the proposed value is valid */
13295 + tmp = timespec64_add(tk_xtime(tk), *ts);
13296 + if (timespec64_compare(&tk->wall_to_monotonic, ts) > 0 ||
13297 +- !timespec64_valid_strict(&tmp)) {
13298 ++ !timespec64_valid_settod(&tmp)) {
13299 + ret = -EINVAL;
13300 + goto error;
13301 + }
13302 +@@ -1527,7 +1527,7 @@ void __init timekeeping_init(void)
13303 + unsigned long flags;
13304 +
13305 + read_persistent_wall_and_boot_offset(&wall_time, &boot_offset);
13306 +- if (timespec64_valid_strict(&wall_time) &&
13307 ++ if (timespec64_valid_settod(&wall_time) &&
13308 + timespec64_to_ns(&wall_time) > 0) {
13309 + persistent_clock_exists = true;
13310 + } else if (timespec64_to_ns(&wall_time) != 0) {
13311 +diff --git a/kernel/trace/trace_branch.c b/kernel/trace/trace_branch.c
13312 +index 4ad967453b6f..3ea65cdff30d 100644
13313 +--- a/kernel/trace/trace_branch.c
13314 ++++ b/kernel/trace/trace_branch.c
13315 +@@ -205,6 +205,8 @@ void trace_likely_condition(struct ftrace_likely_data *f, int val, int expect)
13316 + void ftrace_likely_update(struct ftrace_likely_data *f, int val,
13317 + int expect, int is_constant)
13318 + {
13319 ++ unsigned long flags = user_access_save();
13320 ++
13321 + /* A constant is always correct */
13322 + if (is_constant) {
13323 + f->constant++;
13324 +@@ -223,6 +225,8 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val,
13325 + f->data.correct++;
13326 + else
13327 + f->data.incorrect++;
13328 ++
13329 ++ user_access_restore(flags);
13330 + }
13331 + EXPORT_SYMBOL(ftrace_likely_update);
13332 +
13333 +diff --git a/kernel/trace/trace_events_hist.c b/kernel/trace/trace_events_hist.c
13334 +index 795aa2038377..0a200d42fa96 100644
13335 +--- a/kernel/trace/trace_events_hist.c
13336 ++++ b/kernel/trace/trace_events_hist.c
13337 +@@ -3543,14 +3543,20 @@ static bool cond_snapshot_update(struct trace_array *tr, void *cond_data)
13338 + struct track_data *track_data = tr->cond_snapshot->cond_data;
13339 + struct hist_elt_data *elt_data, *track_elt_data;
13340 + struct snapshot_context *context = cond_data;
13341 ++ struct action_data *action;
13342 + u64 track_val;
13343 +
13344 + if (!track_data)
13345 + return false;
13346 +
13347 ++ action = track_data->action_data;
13348 ++
13349 + track_val = get_track_val(track_data->hist_data, context->elt,
13350 + track_data->action_data);
13351 +
13352 ++ if (!action->track_data.check_val(track_data->track_val, track_val))
13353 ++ return false;
13354 ++
13355 + track_data->track_val = track_val;
13356 + memcpy(track_data->key, context->key, track_data->key_len);
13357 +
13358 +diff --git a/lib/kobject_uevent.c b/lib/kobject_uevent.c
13359 +index f05802687ba4..7998affa45d4 100644
13360 +--- a/lib/kobject_uevent.c
13361 ++++ b/lib/kobject_uevent.c
13362 +@@ -466,6 +466,13 @@ int kobject_uevent_env(struct kobject *kobj, enum kobject_action action,
13363 + int i = 0;
13364 + int retval = 0;
13365 +
13366 ++ /*
13367 ++ * Mark "remove" event done regardless of result, for some subsystems
13368 ++ * do not want to re-trigger "remove" event via automatic cleanup.
13369 ++ */
13370 ++ if (action == KOBJ_REMOVE)
13371 ++ kobj->state_remove_uevent_sent = 1;
13372 ++
13373 + pr_debug("kobject: '%s' (%p): %s\n",
13374 + kobject_name(kobj), kobj, __func__);
13375 +
13376 +@@ -567,10 +574,6 @@ int kobject_uevent_env(struct kobject *kobj, enum kobject_action action,
13377 + kobj->state_add_uevent_sent = 1;
13378 + break;
13379 +
13380 +- case KOBJ_REMOVE:
13381 +- kobj->state_remove_uevent_sent = 1;
13382 +- break;
13383 +-
13384 + case KOBJ_UNBIND:
13385 + zap_modalias_env(env);
13386 + break;
13387 +diff --git a/lib/sbitmap.c b/lib/sbitmap.c
13388 +index 155fe38756ec..4a7fc4915dfc 100644
13389 +--- a/lib/sbitmap.c
13390 ++++ b/lib/sbitmap.c
13391 +@@ -435,7 +435,7 @@ static void sbitmap_queue_update_wake_batch(struct sbitmap_queue *sbq,
13392 + * to ensure that the batch size is updated before the wait
13393 + * counts.
13394 + */
13395 +- smp_mb__before_atomic();
13396 ++ smp_mb();
13397 + for (i = 0; i < SBQ_WAIT_QUEUES; i++)
13398 + atomic_set(&sbq->ws[i].wait_cnt, 1);
13399 + }
13400 +diff --git a/lib/strncpy_from_user.c b/lib/strncpy_from_user.c
13401 +index 58eacd41526c..023ba9f3b99f 100644
13402 +--- a/lib/strncpy_from_user.c
13403 ++++ b/lib/strncpy_from_user.c
13404 +@@ -23,10 +23,11 @@
13405 + * hit it), 'max' is the address space maximum (and we return
13406 + * -EFAULT if we hit it).
13407 + */
13408 +-static inline long do_strncpy_from_user(char *dst, const char __user *src, long count, unsigned long max)
13409 ++static inline long do_strncpy_from_user(char *dst, const char __user *src,
13410 ++ unsigned long count, unsigned long max)
13411 + {
13412 + const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
13413 +- long res = 0;
13414 ++ unsigned long res = 0;
13415 +
13416 + /*
13417 + * Truncate 'max' to the user-specified limit, so that
13418 +diff --git a/lib/strnlen_user.c b/lib/strnlen_user.c
13419 +index 1c1a1b0e38a5..7f2db3fe311f 100644
13420 +--- a/lib/strnlen_user.c
13421 ++++ b/lib/strnlen_user.c
13422 +@@ -28,7 +28,7 @@
13423 + static inline long do_strnlen_user(const char __user *src, unsigned long count, unsigned long max)
13424 + {
13425 + const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
13426 +- long align, res = 0;
13427 ++ unsigned long align, res = 0;
13428 + unsigned long c;
13429 +
13430 + /*
13431 +@@ -42,7 +42,7 @@ static inline long do_strnlen_user(const char __user *src, unsigned long count,
13432 + * Do everything aligned. But that means that we
13433 + * need to also expand the maximum..
13434 + */
13435 +- align = (sizeof(long) - 1) & (unsigned long)src;
13436 ++ align = (sizeof(unsigned long) - 1) & (unsigned long)src;
13437 + src -= align;
13438 + max += align;
13439 +
13440 +diff --git a/net/batman-adv/distributed-arp-table.c b/net/batman-adv/distributed-arp-table.c
13441 +index 310a4f353008..8d290da0d596 100644
13442 +--- a/net/batman-adv/distributed-arp-table.c
13443 ++++ b/net/batman-adv/distributed-arp-table.c
13444 +@@ -1444,7 +1444,6 @@ bool batadv_dat_snoop_incoming_arp_reply(struct batadv_priv *bat_priv,
13445 + hw_src, &ip_src, hw_dst, &ip_dst,
13446 + dat_entry->mac_addr, &dat_entry->ip);
13447 + dropped = true;
13448 +- goto out;
13449 + }
13450 +
13451 + /* Update our internal cache with both the IP addresses the node got
13452 +@@ -1453,6 +1452,9 @@ bool batadv_dat_snoop_incoming_arp_reply(struct batadv_priv *bat_priv,
13453 + batadv_dat_entry_add(bat_priv, ip_src, hw_src, vid);
13454 + batadv_dat_entry_add(bat_priv, ip_dst, hw_dst, vid);
13455 +
13456 ++ if (dropped)
13457 ++ goto out;
13458 ++
13459 + /* If BLA is enabled, only forward ARP replies if we have claimed the
13460 + * source of the ARP reply or if no one else of the same backbone has
13461 + * already claimed that client. This prevents that different gateways
13462 +diff --git a/net/batman-adv/main.c b/net/batman-adv/main.c
13463 +index 75750870cf04..f8725786b596 100644
13464 +--- a/net/batman-adv/main.c
13465 ++++ b/net/batman-adv/main.c
13466 +@@ -161,6 +161,7 @@ int batadv_mesh_init(struct net_device *soft_iface)
13467 + spin_lock_init(&bat_priv->tt.commit_lock);
13468 + spin_lock_init(&bat_priv->gw.list_lock);
13469 + #ifdef CONFIG_BATMAN_ADV_MCAST
13470 ++ spin_lock_init(&bat_priv->mcast.mla_lock);
13471 + spin_lock_init(&bat_priv->mcast.want_lists_lock);
13472 + #endif
13473 + spin_lock_init(&bat_priv->tvlv.container_list_lock);
13474 +diff --git a/net/batman-adv/multicast.c b/net/batman-adv/multicast.c
13475 +index f91b1b6265cf..1b985ab89c08 100644
13476 +--- a/net/batman-adv/multicast.c
13477 ++++ b/net/batman-adv/multicast.c
13478 +@@ -325,8 +325,6 @@ static void batadv_mcast_mla_list_free(struct hlist_head *mcast_list)
13479 + * translation table except the ones listed in the given mcast_list.
13480 + *
13481 + * If mcast_list is NULL then all are retracted.
13482 +- *
13483 +- * Do not call outside of the mcast worker! (or cancel mcast worker first)
13484 + */
13485 + static void batadv_mcast_mla_tt_retract(struct batadv_priv *bat_priv,
13486 + struct hlist_head *mcast_list)
13487 +@@ -334,8 +332,6 @@ static void batadv_mcast_mla_tt_retract(struct batadv_priv *bat_priv,
13488 + struct batadv_hw_addr *mcast_entry;
13489 + struct hlist_node *tmp;
13490 +
13491 +- WARN_ON(delayed_work_pending(&bat_priv->mcast.work));
13492 +-
13493 + hlist_for_each_entry_safe(mcast_entry, tmp, &bat_priv->mcast.mla_list,
13494 + list) {
13495 + if (mcast_list &&
13496 +@@ -359,8 +355,6 @@ static void batadv_mcast_mla_tt_retract(struct batadv_priv *bat_priv,
13497 + *
13498 + * Adds multicast listener announcements from the given mcast_list to the
13499 + * translation table if they have not been added yet.
13500 +- *
13501 +- * Do not call outside of the mcast worker! (or cancel mcast worker first)
13502 + */
13503 + static void batadv_mcast_mla_tt_add(struct batadv_priv *bat_priv,
13504 + struct hlist_head *mcast_list)
13505 +@@ -368,8 +362,6 @@ static void batadv_mcast_mla_tt_add(struct batadv_priv *bat_priv,
13506 + struct batadv_hw_addr *mcast_entry;
13507 + struct hlist_node *tmp;
13508 +
13509 +- WARN_ON(delayed_work_pending(&bat_priv->mcast.work));
13510 +-
13511 + if (!mcast_list)
13512 + return;
13513 +
13514 +@@ -658,7 +650,10 @@ static void batadv_mcast_mla_update(struct work_struct *work)
13515 + priv_mcast = container_of(delayed_work, struct batadv_priv_mcast, work);
13516 + bat_priv = container_of(priv_mcast, struct batadv_priv, mcast);
13517 +
13518 ++ spin_lock(&bat_priv->mcast.mla_lock);
13519 + __batadv_mcast_mla_update(bat_priv);
13520 ++ spin_unlock(&bat_priv->mcast.mla_lock);
13521 ++
13522 + batadv_mcast_start_timer(bat_priv);
13523 + }
13524 +
13525 +diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h
13526 +index a21b34ed6548..ed0f6a519de5 100644
13527 +--- a/net/batman-adv/types.h
13528 ++++ b/net/batman-adv/types.h
13529 +@@ -1223,6 +1223,11 @@ struct batadv_priv_mcast {
13530 + /** @bridged: whether the soft interface has a bridge on top */
13531 + unsigned char bridged:1;
13532 +
13533 ++ /**
13534 ++ * @mla_lock: a lock protecting mla_list and mla_flags
13535 ++ */
13536 ++ spinlock_t mla_lock;
13537 ++
13538 + /**
13539 + * @num_want_all_unsnoopables: number of nodes wanting unsnoopable IP
13540 + * traffic
13541 +diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
13542 +index d6b2540ba7f8..f275c9905650 100644
13543 +--- a/net/bluetooth/hci_core.c
13544 ++++ b/net/bluetooth/hci_core.c
13545 +@@ -4383,6 +4383,9 @@ void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status,
13546 + return;
13547 + }
13548 +
13549 ++ /* If we reach this point this event matches the last command sent */
13550 ++ hci_dev_clear_flag(hdev, HCI_CMD_PENDING);
13551 ++
13552 + /* If the command succeeded and there's still more commands in
13553 + * this request the request is not yet complete.
13554 + */
13555 +@@ -4493,6 +4496,8 @@ static void hci_cmd_work(struct work_struct *work)
13556 +
13557 + hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
13558 + if (hdev->sent_cmd) {
13559 ++ if (hci_req_status_pend(hdev))
13560 ++ hci_dev_set_flag(hdev, HCI_CMD_PENDING);
13561 + atomic_dec(&hdev->cmd_cnt);
13562 + hci_send_frame(hdev, skb);
13563 + if (test_bit(HCI_RESET, &hdev->flags))
13564 +diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
13565 +index 609fd6871c5a..8b893baf9bbe 100644
13566 +--- a/net/bluetooth/hci_event.c
13567 ++++ b/net/bluetooth/hci_event.c
13568 +@@ -3404,6 +3404,12 @@ static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb,
13569 + hci_req_cmd_complete(hdev, *opcode, *status, req_complete,
13570 + req_complete_skb);
13571 +
13572 ++ if (hci_dev_test_flag(hdev, HCI_CMD_PENDING)) {
13573 ++ bt_dev_err(hdev,
13574 ++ "unexpected event for opcode 0x%4.4x", *opcode);
13575 ++ return;
13576 ++ }
13577 ++
13578 + if (atomic_read(&hdev->cmd_cnt) && !skb_queue_empty(&hdev->cmd_q))
13579 + queue_work(hdev->workqueue, &hdev->cmd_work);
13580 + }
13581 +@@ -3511,6 +3517,12 @@ static void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb,
13582 + hci_req_cmd_complete(hdev, *opcode, ev->status, req_complete,
13583 + req_complete_skb);
13584 +
13585 ++ if (hci_dev_test_flag(hdev, HCI_CMD_PENDING)) {
13586 ++ bt_dev_err(hdev,
13587 ++ "unexpected event for opcode 0x%4.4x", *opcode);
13588 ++ return;
13589 ++ }
13590 ++
13591 + if (atomic_read(&hdev->cmd_cnt) && !skb_queue_empty(&hdev->cmd_q))
13592 + queue_work(hdev->workqueue, &hdev->cmd_work);
13593 + }
13594 +diff --git a/net/bluetooth/hci_request.c b/net/bluetooth/hci_request.c
13595 +index ca73d36cc149..e9a95ed65491 100644
13596 +--- a/net/bluetooth/hci_request.c
13597 ++++ b/net/bluetooth/hci_request.c
13598 +@@ -46,6 +46,11 @@ void hci_req_purge(struct hci_request *req)
13599 + skb_queue_purge(&req->cmd_q);
13600 + }
13601 +
13602 ++bool hci_req_status_pend(struct hci_dev *hdev)
13603 ++{
13604 ++ return hdev->req_status == HCI_REQ_PEND;
13605 ++}
13606 ++
13607 + static int req_run(struct hci_request *req, hci_req_complete_t complete,
13608 + hci_req_complete_skb_t complete_skb)
13609 + {
13610 +diff --git a/net/bluetooth/hci_request.h b/net/bluetooth/hci_request.h
13611 +index 692cc8b13368..55b2050cc9ff 100644
13612 +--- a/net/bluetooth/hci_request.h
13613 ++++ b/net/bluetooth/hci_request.h
13614 +@@ -37,6 +37,7 @@ struct hci_request {
13615 +
13616 + void hci_req_init(struct hci_request *req, struct hci_dev *hdev);
13617 + void hci_req_purge(struct hci_request *req);
13618 ++bool hci_req_status_pend(struct hci_dev *hdev);
13619 + int hci_req_run(struct hci_request *req, hci_req_complete_t complete);
13620 + int hci_req_run_skb(struct hci_request *req, hci_req_complete_skb_t complete);
13621 + void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
13622 +diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
13623 +index 2dbcf5d5512e..b7a9fe3d5fcb 100644
13624 +--- a/net/mac80211/mlme.c
13625 ++++ b/net/mac80211/mlme.c
13626 +@@ -1188,9 +1188,6 @@ static void ieee80211_chswitch_work(struct work_struct *work)
13627 + goto out;
13628 + }
13629 +
13630 +- /* XXX: shouldn't really modify cfg80211-owned data! */
13631 +- ifmgd->associated->channel = sdata->csa_chandef.chan;
13632 +-
13633 + ifmgd->csa_waiting_bcn = true;
13634 +
13635 + ieee80211_sta_reset_beacon_monitor(sdata);
13636 +diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
13637 +index d7f61b0547c6..d2715b4d2e72 100644
13638 +--- a/net/netfilter/nf_conntrack_netlink.c
13639 ++++ b/net/netfilter/nf_conntrack_netlink.c
13640 +@@ -1254,7 +1254,7 @@ static int ctnetlink_del_conntrack(struct net *net, struct sock *ctnl,
13641 + struct nf_conntrack_tuple tuple;
13642 + struct nf_conn *ct;
13643 + struct nfgenmsg *nfmsg = nlmsg_data(nlh);
13644 +- u_int8_t u3 = nfmsg->nfgen_family;
13645 ++ u_int8_t u3 = nfmsg->version ? nfmsg->nfgen_family : AF_UNSPEC;
13646 + struct nf_conntrack_zone zone;
13647 + int err;
13648 +
13649 +diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
13650 +index 47e30a58566c..d2a7459a5da4 100644
13651 +--- a/net/wireless/nl80211.c
13652 ++++ b/net/wireless/nl80211.c
13653 +@@ -15727,6 +15727,11 @@ void cfg80211_ch_switch_notify(struct net_device *dev,
13654 +
13655 + wdev->chandef = *chandef;
13656 + wdev->preset_chandef = *chandef;
13657 ++
13658 ++ if (wdev->iftype == NL80211_IFTYPE_STATION &&
13659 ++ !WARN_ON(!wdev->current_bss))
13660 ++ wdev->current_bss->pub.channel = chandef->chan;
13661 ++
13662 + nl80211_ch_switch_notify(rdev, dev, chandef, GFP_KERNEL,
13663 + NL80211_CMD_CH_SWITCH_NOTIFY, 0);
13664 + }
13665 +diff --git a/samples/bpf/asm_goto_workaround.h b/samples/bpf/asm_goto_workaround.h
13666 +index 5cd7c1d1a5d5..7409722727ca 100644
13667 +--- a/samples/bpf/asm_goto_workaround.h
13668 ++++ b/samples/bpf/asm_goto_workaround.h
13669 +@@ -13,4 +13,5 @@
13670 + #define asm_volatile_goto(x...) asm volatile("invalid use of asm_volatile_goto")
13671 + #endif
13672 +
13673 ++#define volatile(x...) volatile("")
13674 + #endif
13675 +diff --git a/security/selinux/netlabel.c b/security/selinux/netlabel.c
13676 +index 186e727b737b..6fd9954e1c08 100644
13677 +--- a/security/selinux/netlabel.c
13678 ++++ b/security/selinux/netlabel.c
13679 +@@ -288,11 +288,8 @@ int selinux_netlbl_sctp_assoc_request(struct sctp_endpoint *ep,
13680 + int rc;
13681 + struct netlbl_lsm_secattr secattr;
13682 + struct sk_security_struct *sksec = ep->base.sk->sk_security;
13683 +- struct sockaddr *addr;
13684 + struct sockaddr_in addr4;
13685 +-#if IS_ENABLED(CONFIG_IPV6)
13686 + struct sockaddr_in6 addr6;
13687 +-#endif
13688 +
13689 + if (ep->base.sk->sk_family != PF_INET &&
13690 + ep->base.sk->sk_family != PF_INET6)
13691 +@@ -310,16 +307,15 @@ int selinux_netlbl_sctp_assoc_request(struct sctp_endpoint *ep,
13692 + if (ip_hdr(skb)->version == 4) {
13693 + addr4.sin_family = AF_INET;
13694 + addr4.sin_addr.s_addr = ip_hdr(skb)->saddr;
13695 +- addr = (struct sockaddr *)&addr4;
13696 +-#if IS_ENABLED(CONFIG_IPV6)
13697 +- } else {
13698 ++ rc = netlbl_conn_setattr(ep->base.sk, (void *)&addr4, &secattr);
13699 ++ } else if (IS_ENABLED(CONFIG_IPV6) && ip_hdr(skb)->version == 6) {
13700 + addr6.sin6_family = AF_INET6;
13701 + addr6.sin6_addr = ipv6_hdr(skb)->saddr;
13702 +- addr = (struct sockaddr *)&addr6;
13703 +-#endif
13704 ++ rc = netlbl_conn_setattr(ep->base.sk, (void *)&addr6, &secattr);
13705 ++ } else {
13706 ++ rc = -EAFNOSUPPORT;
13707 + }
13708 +
13709 +- rc = netlbl_conn_setattr(ep->base.sk, addr, &secattr);
13710 + if (rc == 0)
13711 + sksec->nlbl_state = NLBL_LABELED;
13712 +
13713 +diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c
13714 +index 701a69d856f5..b20eb7fc83eb 100644
13715 +--- a/sound/pci/hda/hda_codec.c
13716 ++++ b/sound/pci/hda/hda_codec.c
13717 +@@ -832,7 +832,13 @@ static int snd_hda_codec_dev_free(struct snd_device *device)
13718 + struct hda_codec *codec = device->device_data;
13719 +
13720 + codec->in_freeing = 1;
13721 +- snd_hdac_device_unregister(&codec->core);
13722 ++ /*
13723 ++ * snd_hda_codec_device_new() is used by legacy HDA and ASoC driver.
13724 ++ * We can't unregister ASoC device since it will be unregistered in
13725 ++ * snd_hdac_ext_bus_device_remove().
13726 ++ */
13727 ++ if (codec->core.type == HDA_DEV_LEGACY)
13728 ++ snd_hdac_device_unregister(&codec->core);
13729 + codec_display_power(codec, false);
13730 + put_device(hda_codec_dev(codec));
13731 + return 0;
13732 +diff --git a/sound/soc/codecs/hdmi-codec.c b/sound/soc/codecs/hdmi-codec.c
13733 +index 35df73e42cbc..fb2f0ac1f16f 100644
13734 +--- a/sound/soc/codecs/hdmi-codec.c
13735 ++++ b/sound/soc/codecs/hdmi-codec.c
13736 +@@ -439,8 +439,12 @@ static int hdmi_codec_startup(struct snd_pcm_substream *substream,
13737 + if (!ret) {
13738 + ret = snd_pcm_hw_constraint_eld(substream->runtime,
13739 + hcp->eld);
13740 +- if (ret)
13741 ++ if (ret) {
13742 ++ mutex_lock(&hcp->current_stream_lock);
13743 ++ hcp->current_stream = NULL;
13744 ++ mutex_unlock(&hcp->current_stream_lock);
13745 + return ret;
13746 ++ }
13747 + }
13748 + /* Select chmap supported */
13749 + hdmi_codec_eld_chmap(hcp);
13750 +diff --git a/sound/soc/codecs/wcd9335.c b/sound/soc/codecs/wcd9335.c
13751 +index 981f88a5f615..a04a7cedd99d 100644
13752 +--- a/sound/soc/codecs/wcd9335.c
13753 ++++ b/sound/soc/codecs/wcd9335.c
13754 +@@ -5188,6 +5188,7 @@ static int wcd9335_slim_status(struct slim_device *sdev,
13755 +
13756 + wcd->slim = sdev;
13757 + wcd->slim_ifc_dev = of_slim_get_device(sdev->ctrl, ifc_dev_np);
13758 ++ of_node_put(ifc_dev_np);
13759 + if (!wcd->slim_ifc_dev) {
13760 + dev_err(dev, "Unable to get SLIM Interface device\n");
13761 + return -EINVAL;
13762 +diff --git a/sound/soc/fsl/Kconfig b/sound/soc/fsl/Kconfig
13763 +index 7b1d9970be8b..1f65cf555ebe 100644
13764 +--- a/sound/soc/fsl/Kconfig
13765 ++++ b/sound/soc/fsl/Kconfig
13766 +@@ -182,16 +182,17 @@ config SND_MPC52xx_SOC_EFIKA
13767 +
13768 + endif # SND_POWERPC_SOC
13769 +
13770 ++config SND_SOC_IMX_PCM_FIQ
13771 ++ tristate
13772 ++ default y if SND_SOC_IMX_SSI=y && (SND_SOC_FSL_SSI=m || SND_SOC_FSL_SPDIF=m) && (MXC_TZIC || MXC_AVIC)
13773 ++ select FIQ
13774 ++
13775 + if SND_IMX_SOC
13776 +
13777 + config SND_SOC_IMX_SSI
13778 + tristate
13779 + select SND_SOC_FSL_UTILS
13780 +
13781 +-config SND_SOC_IMX_PCM_FIQ
13782 +- tristate
13783 +- select FIQ
13784 +-
13785 + comment "SoC Audio support for Freescale i.MX boards:"
13786 +
13787 + config SND_MXC_SOC_WM1133_EV1
13788 +diff --git a/sound/soc/fsl/eukrea-tlv320.c b/sound/soc/fsl/eukrea-tlv320.c
13789 +index 191426a6d9ad..30a3d68b5c03 100644
13790 +--- a/sound/soc/fsl/eukrea-tlv320.c
13791 ++++ b/sound/soc/fsl/eukrea-tlv320.c
13792 +@@ -118,13 +118,13 @@ static int eukrea_tlv320_probe(struct platform_device *pdev)
13793 + if (ret) {
13794 + dev_err(&pdev->dev,
13795 + "fsl,mux-int-port node missing or invalid.\n");
13796 +- return ret;
13797 ++ goto err;
13798 + }
13799 + ret = of_property_read_u32(np, "fsl,mux-ext-port", &ext_port);
13800 + if (ret) {
13801 + dev_err(&pdev->dev,
13802 + "fsl,mux-ext-port node missing or invalid.\n");
13803 +- return ret;
13804 ++ goto err;
13805 + }
13806 +
13807 + /*
13808 +diff --git a/sound/soc/fsl/fsl_sai.c b/sound/soc/fsl/fsl_sai.c
13809 +index db9e0872f73d..7549b74e464e 100644
13810 +--- a/sound/soc/fsl/fsl_sai.c
13811 ++++ b/sound/soc/fsl/fsl_sai.c
13812 +@@ -268,12 +268,14 @@ static int fsl_sai_set_dai_fmt_tr(struct snd_soc_dai *cpu_dai,
13813 + case SND_SOC_DAIFMT_CBS_CFS:
13814 + val_cr2 |= FSL_SAI_CR2_BCD_MSTR;
13815 + val_cr4 |= FSL_SAI_CR4_FSD_MSTR;
13816 ++ sai->is_slave_mode = false;
13817 + break;
13818 + case SND_SOC_DAIFMT_CBM_CFM:
13819 + sai->is_slave_mode = true;
13820 + break;
13821 + case SND_SOC_DAIFMT_CBS_CFM:
13822 + val_cr2 |= FSL_SAI_CR2_BCD_MSTR;
13823 ++ sai->is_slave_mode = false;
13824 + break;
13825 + case SND_SOC_DAIFMT_CBM_CFS:
13826 + val_cr4 |= FSL_SAI_CR4_FSD_MSTR;
13827 +diff --git a/sound/soc/fsl/fsl_utils.c b/sound/soc/fsl/fsl_utils.c
13828 +index 9981668ab590..040d06b89f00 100644
13829 +--- a/sound/soc/fsl/fsl_utils.c
13830 ++++ b/sound/soc/fsl/fsl_utils.c
13831 +@@ -71,6 +71,7 @@ int fsl_asoc_get_dma_channel(struct device_node *ssi_np,
13832 + iprop = of_get_property(dma_np, "cell-index", NULL);
13833 + if (!iprop) {
13834 + of_node_put(dma_np);
13835 ++ of_node_put(dma_channel_np);
13836 + return -EINVAL;
13837 + }
13838 + *dma_id = be32_to_cpup(iprop);
13839 +diff --git a/sound/soc/intel/boards/kbl_da7219_max98357a.c b/sound/soc/intel/boards/kbl_da7219_max98357a.c
13840 +index 38f6ab74709d..07491a0f8fb8 100644
13841 +--- a/sound/soc/intel/boards/kbl_da7219_max98357a.c
13842 ++++ b/sound/soc/intel/boards/kbl_da7219_max98357a.c
13843 +@@ -188,7 +188,7 @@ static int kabylake_da7219_codec_init(struct snd_soc_pcm_runtime *rtd)
13844 +
13845 + jack = &ctx->kabylake_headset;
13846 +
13847 +- snd_jack_set_key(jack->jack, SND_JACK_BTN_0, KEY_MEDIA);
13848 ++ snd_jack_set_key(jack->jack, SND_JACK_BTN_0, KEY_PLAYPAUSE);
13849 + snd_jack_set_key(jack->jack, SND_JACK_BTN_1, KEY_VOLUMEUP);
13850 + snd_jack_set_key(jack->jack, SND_JACK_BTN_2, KEY_VOLUMEDOWN);
13851 + snd_jack_set_key(jack->jack, SND_JACK_BTN_3, KEY_VOICECOMMAND);
13852 +diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c
13853 +index 46e3ab0fced4..fe99b02bbf17 100644
13854 +--- a/sound/soc/soc-core.c
13855 ++++ b/sound/soc/soc-core.c
13856 +@@ -2828,10 +2828,21 @@ EXPORT_SYMBOL_GPL(snd_soc_register_card);
13857 +
13858 + static void snd_soc_unbind_card(struct snd_soc_card *card, bool unregister)
13859 + {
13860 ++ struct snd_soc_pcm_runtime *rtd;
13861 ++ int order;
13862 ++
13863 + if (card->instantiated) {
13864 + card->instantiated = false;
13865 + snd_soc_dapm_shutdown(card);
13866 + snd_soc_flush_all_delayed_work(card);
13867 ++
13868 ++ /* remove all components used by DAI links on this card */
13869 ++ for_each_comp_order(order) {
13870 ++ for_each_card_rtds(card, rtd) {
13871 ++ soc_remove_link_components(card, rtd, order);
13872 ++ }
13873 ++ }
13874 ++
13875 + soc_cleanup_card_resources(card);
13876 + if (!unregister)
13877 + list_add(&card->list, &unbind_card_list);
13878 +diff --git a/sound/soc/ti/Kconfig b/sound/soc/ti/Kconfig
13879 +index 4bf3c15d4e51..ee7c202c69b7 100644
13880 +--- a/sound/soc/ti/Kconfig
13881 ++++ b/sound/soc/ti/Kconfig
13882 +@@ -21,8 +21,8 @@ config SND_SOC_DAVINCI_ASP
13883 +
13884 + config SND_SOC_DAVINCI_MCASP
13885 + tristate "Multichannel Audio Serial Port (McASP) support"
13886 +- select SND_SOC_TI_EDMA_PCM if TI_EDMA
13887 +- select SND_SOC_TI_SDMA_PCM if DMA_OMAP
13888 ++ select SND_SOC_TI_EDMA_PCM
13889 ++ select SND_SOC_TI_SDMA_PCM
13890 + help
13891 + Say Y or M here if you want to have support for McASP IP found in
13892 + various Texas Instruments SoCs like:
13893 +diff --git a/sound/soc/ti/davinci-mcasp.c b/sound/soc/ti/davinci-mcasp.c
13894 +index a3a67a8f0f54..9fbc759fdefe 100644
13895 +--- a/sound/soc/ti/davinci-mcasp.c
13896 ++++ b/sound/soc/ti/davinci-mcasp.c
13897 +@@ -45,6 +45,7 @@
13898 +
13899 + #define MCASP_MAX_AFIFO_DEPTH 64
13900 +
13901 ++#ifdef CONFIG_PM
13902 + static u32 context_regs[] = {
13903 + DAVINCI_MCASP_TXFMCTL_REG,
13904 + DAVINCI_MCASP_RXFMCTL_REG,
13905 +@@ -68,6 +69,7 @@ struct davinci_mcasp_context {
13906 + u32 *xrsr_regs; /* for serializer configuration */
13907 + bool pm_state;
13908 + };
13909 ++#endif
13910 +
13911 + struct davinci_mcasp_ruledata {
13912 + struct davinci_mcasp *mcasp;
13913 +diff --git a/tools/bpf/bpftool/.gitignore b/tools/bpf/bpftool/.gitignore
13914 +index 67167e44b726..8248b8dd89d4 100644
13915 +--- a/tools/bpf/bpftool/.gitignore
13916 ++++ b/tools/bpf/bpftool/.gitignore
13917 +@@ -1,5 +1,5 @@
13918 + *.d
13919 +-bpftool
13920 ++/bpftool
13921 + bpftool*.8
13922 + bpf-helpers.*
13923 + FEATURE-DUMP.bpftool
13924 +diff --git a/tools/lib/bpf/bpf.c b/tools/lib/bpf/bpf.c
13925 +index 9cd015574e83..d82edadf7589 100644
13926 +--- a/tools/lib/bpf/bpf.c
13927 ++++ b/tools/lib/bpf/bpf.c
13928 +@@ -46,6 +46,8 @@
13929 + # define __NR_bpf 349
13930 + # elif defined(__s390__)
13931 + # define __NR_bpf 351
13932 ++# elif defined(__arc__)
13933 ++# define __NR_bpf 280
13934 + # else
13935 + # error __NR_bpf not defined. libbpf does not support your arch.
13936 + # endif
13937 +diff --git a/tools/lib/bpf/bpf.h b/tools/lib/bpf/bpf.h
13938 +index 6ffdd79bea89..6dc1f418034f 100644
13939 +--- a/tools/lib/bpf/bpf.h
13940 ++++ b/tools/lib/bpf/bpf.h
13941 +@@ -26,6 +26,7 @@
13942 + #include <linux/bpf.h>
13943 + #include <stdbool.h>
13944 + #include <stddef.h>
13945 ++#include <stdint.h>
13946 +
13947 + #ifdef __cplusplus
13948 + extern "C" {
13949 +diff --git a/tools/lib/bpf/xsk.c b/tools/lib/bpf/xsk.c
13950 +index 8d0078b65486..af5f310ecca1 100644
13951 +--- a/tools/lib/bpf/xsk.c
13952 ++++ b/tools/lib/bpf/xsk.c
13953 +@@ -248,8 +248,7 @@ int xsk_umem__create(struct xsk_umem **umem_ptr, void *umem_area, __u64 size,
13954 + return 0;
13955 +
13956 + out_mmap:
13957 +- munmap(umem->fill,
13958 +- off.fr.desc + umem->config.fill_size * sizeof(__u64));
13959 ++ munmap(map, off.fr.desc + umem->config.fill_size * sizeof(__u64));
13960 + out_socket:
13961 + close(umem->fd);
13962 + out_umem_alloc:
13963 +@@ -523,11 +522,11 @@ int xsk_socket__create(struct xsk_socket **xsk_ptr, const char *ifname,
13964 + struct xsk_ring_cons *rx, struct xsk_ring_prod *tx,
13965 + const struct xsk_socket_config *usr_config)
13966 + {
13967 ++ void *rx_map = NULL, *tx_map = NULL;
13968 + struct sockaddr_xdp sxdp = {};
13969 + struct xdp_mmap_offsets off;
13970 + struct xsk_socket *xsk;
13971 + socklen_t optlen;
13972 +- void *map;
13973 + int err;
13974 +
13975 + if (!umem || !xsk_ptr || !rx || !tx)
13976 +@@ -593,40 +592,40 @@ int xsk_socket__create(struct xsk_socket **xsk_ptr, const char *ifname,
13977 + }
13978 +
13979 + if (rx) {
13980 +- map = xsk_mmap(NULL, off.rx.desc +
13981 +- xsk->config.rx_size * sizeof(struct xdp_desc),
13982 +- PROT_READ | PROT_WRITE,
13983 +- MAP_SHARED | MAP_POPULATE,
13984 +- xsk->fd, XDP_PGOFF_RX_RING);
13985 +- if (map == MAP_FAILED) {
13986 ++ rx_map = xsk_mmap(NULL, off.rx.desc +
13987 ++ xsk->config.rx_size * sizeof(struct xdp_desc),
13988 ++ PROT_READ | PROT_WRITE,
13989 ++ MAP_SHARED | MAP_POPULATE,
13990 ++ xsk->fd, XDP_PGOFF_RX_RING);
13991 ++ if (rx_map == MAP_FAILED) {
13992 + err = -errno;
13993 + goto out_socket;
13994 + }
13995 +
13996 + rx->mask = xsk->config.rx_size - 1;
13997 + rx->size = xsk->config.rx_size;
13998 +- rx->producer = map + off.rx.producer;
13999 +- rx->consumer = map + off.rx.consumer;
14000 +- rx->ring = map + off.rx.desc;
14001 ++ rx->producer = rx_map + off.rx.producer;
14002 ++ rx->consumer = rx_map + off.rx.consumer;
14003 ++ rx->ring = rx_map + off.rx.desc;
14004 + }
14005 + xsk->rx = rx;
14006 +
14007 + if (tx) {
14008 +- map = xsk_mmap(NULL, off.tx.desc +
14009 +- xsk->config.tx_size * sizeof(struct xdp_desc),
14010 +- PROT_READ | PROT_WRITE,
14011 +- MAP_SHARED | MAP_POPULATE,
14012 +- xsk->fd, XDP_PGOFF_TX_RING);
14013 +- if (map == MAP_FAILED) {
14014 ++ tx_map = xsk_mmap(NULL, off.tx.desc +
14015 ++ xsk->config.tx_size * sizeof(struct xdp_desc),
14016 ++ PROT_READ | PROT_WRITE,
14017 ++ MAP_SHARED | MAP_POPULATE,
14018 ++ xsk->fd, XDP_PGOFF_TX_RING);
14019 ++ if (tx_map == MAP_FAILED) {
14020 + err = -errno;
14021 + goto out_mmap_rx;
14022 + }
14023 +
14024 + tx->mask = xsk->config.tx_size - 1;
14025 + tx->size = xsk->config.tx_size;
14026 +- tx->producer = map + off.tx.producer;
14027 +- tx->consumer = map + off.tx.consumer;
14028 +- tx->ring = map + off.tx.desc;
14029 ++ tx->producer = tx_map + off.tx.producer;
14030 ++ tx->consumer = tx_map + off.tx.consumer;
14031 ++ tx->ring = tx_map + off.tx.desc;
14032 + tx->cached_cons = xsk->config.tx_size;
14033 + }
14034 + xsk->tx = tx;
14035 +@@ -653,13 +652,11 @@ int xsk_socket__create(struct xsk_socket **xsk_ptr, const char *ifname,
14036 +
14037 + out_mmap_tx:
14038 + if (tx)
14039 +- munmap(xsk->tx,
14040 +- off.tx.desc +
14041 ++ munmap(tx_map, off.tx.desc +
14042 + xsk->config.tx_size * sizeof(struct xdp_desc));
14043 + out_mmap_rx:
14044 + if (rx)
14045 +- munmap(xsk->rx,
14046 +- off.rx.desc +
14047 ++ munmap(rx_map, off.rx.desc +
14048 + xsk->config.rx_size * sizeof(struct xdp_desc));
14049 + out_socket:
14050 + if (--umem->refcount)
14051 +@@ -684,10 +681,12 @@ int xsk_umem__delete(struct xsk_umem *umem)
14052 + optlen = sizeof(off);
14053 + err = getsockopt(umem->fd, SOL_XDP, XDP_MMAP_OFFSETS, &off, &optlen);
14054 + if (!err) {
14055 +- munmap(umem->fill->ring,
14056 +- off.fr.desc + umem->config.fill_size * sizeof(__u64));
14057 +- munmap(umem->comp->ring,
14058 +- off.cr.desc + umem->config.comp_size * sizeof(__u64));
14059 ++ (void)munmap(umem->fill->ring - off.fr.desc,
14060 ++ off.fr.desc +
14061 ++ umem->config.fill_size * sizeof(__u64));
14062 ++ (void)munmap(umem->comp->ring - off.cr.desc,
14063 ++ off.cr.desc +
14064 ++ umem->config.comp_size * sizeof(__u64));
14065 + }
14066 +
14067 + close(umem->fd);
14068 +@@ -698,6 +697,7 @@ int xsk_umem__delete(struct xsk_umem *umem)
14069 +
14070 + void xsk_socket__delete(struct xsk_socket *xsk)
14071 + {
14072 ++ size_t desc_sz = sizeof(struct xdp_desc);
14073 + struct xdp_mmap_offsets off;
14074 + socklen_t optlen;
14075 + int err;
14076 +@@ -710,14 +710,17 @@ void xsk_socket__delete(struct xsk_socket *xsk)
14077 + optlen = sizeof(off);
14078 + err = getsockopt(xsk->fd, SOL_XDP, XDP_MMAP_OFFSETS, &off, &optlen);
14079 + if (!err) {
14080 +- if (xsk->rx)
14081 +- munmap(xsk->rx->ring,
14082 +- off.rx.desc +
14083 +- xsk->config.rx_size * sizeof(struct xdp_desc));
14084 +- if (xsk->tx)
14085 +- munmap(xsk->tx->ring,
14086 +- off.tx.desc +
14087 +- xsk->config.tx_size * sizeof(struct xdp_desc));
14088 ++ if (xsk->rx) {
14089 ++ (void)munmap(xsk->rx->ring - off.rx.desc,
14090 ++ off.rx.desc +
14091 ++ xsk->config.rx_size * desc_sz);
14092 ++ }
14093 ++ if (xsk->tx) {
14094 ++ (void)munmap(xsk->tx->ring - off.tx.desc,
14095 ++ off.tx.desc +
14096 ++ xsk->config.tx_size * desc_sz);
14097 ++ }
14098 ++
14099 + }
14100 +
14101 + xsk->umem->refcount--;
14102 +diff --git a/tools/testing/selftests/bpf/test_libbpf_open.c b/tools/testing/selftests/bpf/test_libbpf_open.c
14103 +index 65cbd30704b5..9e9db202d218 100644
14104 +--- a/tools/testing/selftests/bpf/test_libbpf_open.c
14105 ++++ b/tools/testing/selftests/bpf/test_libbpf_open.c
14106 +@@ -11,6 +11,8 @@ static const char *__doc__ =
14107 + #include <bpf/libbpf.h>
14108 + #include <getopt.h>
14109 +
14110 ++#include "bpf_rlimit.h"
14111 ++
14112 + static const struct option long_options[] = {
14113 + {"help", no_argument, NULL, 'h' },
14114 + {"debug", no_argument, NULL, 'D' },
14115 +diff --git a/tools/testing/selftests/bpf/trace_helpers.c b/tools/testing/selftests/bpf/trace_helpers.c
14116 +index 4cdb63bf0521..9a9fc6c9b70b 100644
14117 +--- a/tools/testing/selftests/bpf/trace_helpers.c
14118 ++++ b/tools/testing/selftests/bpf/trace_helpers.c
14119 +@@ -52,6 +52,10 @@ struct ksym *ksym_search(long key)
14120 + int start = 0, end = sym_cnt;
14121 + int result;
14122 +
14123 ++ /* kallsyms not loaded. return NULL */
14124 ++ if (sym_cnt <= 0)
14125 ++ return NULL;
14126 ++
14127 + while (start < end) {
14128 + size_t mid = start + (end - start) / 2;
14129 +
14130 +diff --git a/tools/testing/selftests/cgroup/test_memcontrol.c b/tools/testing/selftests/cgroup/test_memcontrol.c
14131 +index 28d321ba311b..6f339882a6ca 100644
14132 +--- a/tools/testing/selftests/cgroup/test_memcontrol.c
14133 ++++ b/tools/testing/selftests/cgroup/test_memcontrol.c
14134 +@@ -26,7 +26,7 @@
14135 + */
14136 + static int test_memcg_subtree_control(const char *root)
14137 + {
14138 +- char *parent, *child, *parent2, *child2;
14139 ++ char *parent, *child, *parent2 = NULL, *child2 = NULL;
14140 + int ret = KSFT_FAIL;
14141 + char buf[PAGE_SIZE];
14142 +
14143 +@@ -34,50 +34,54 @@ static int test_memcg_subtree_control(const char *root)
14144 + parent = cg_name(root, "memcg_test_0");
14145 + child = cg_name(root, "memcg_test_0/memcg_test_1");
14146 + if (!parent || !child)
14147 +- goto cleanup;
14148 ++ goto cleanup_free;
14149 +
14150 + if (cg_create(parent))
14151 +- goto cleanup;
14152 ++ goto cleanup_free;
14153 +
14154 + if (cg_write(parent, "cgroup.subtree_control", "+memory"))
14155 +- goto cleanup;
14156 ++ goto cleanup_parent;
14157 +
14158 + if (cg_create(child))
14159 +- goto cleanup;
14160 ++ goto cleanup_parent;
14161 +
14162 + if (cg_read_strstr(child, "cgroup.controllers", "memory"))
14163 +- goto cleanup;
14164 ++ goto cleanup_child;
14165 +
14166 + /* Create two nested cgroups without enabling memory controller */
14167 + parent2 = cg_name(root, "memcg_test_1");
14168 + child2 = cg_name(root, "memcg_test_1/memcg_test_1");
14169 + if (!parent2 || !child2)
14170 +- goto cleanup;
14171 ++ goto cleanup_free2;
14172 +
14173 + if (cg_create(parent2))
14174 +- goto cleanup;
14175 ++ goto cleanup_free2;
14176 +
14177 + if (cg_create(child2))
14178 +- goto cleanup;
14179 ++ goto cleanup_parent2;
14180 +
14181 + if (cg_read(child2, "cgroup.controllers", buf, sizeof(buf)))
14182 +- goto cleanup;
14183 ++ goto cleanup_all;
14184 +
14185 + if (!cg_read_strstr(child2, "cgroup.controllers", "memory"))
14186 +- goto cleanup;
14187 ++ goto cleanup_all;
14188 +
14189 + ret = KSFT_PASS;
14190 +
14191 +-cleanup:
14192 +- cg_destroy(child);
14193 +- cg_destroy(parent);
14194 +- free(parent);
14195 +- free(child);
14196 +-
14197 ++cleanup_all:
14198 + cg_destroy(child2);
14199 ++cleanup_parent2:
14200 + cg_destroy(parent2);
14201 ++cleanup_free2:
14202 + free(parent2);
14203 + free(child2);
14204 ++cleanup_child:
14205 ++ cg_destroy(child);
14206 ++cleanup_parent:
14207 ++ cg_destroy(parent);
14208 ++cleanup_free:
14209 ++ free(parent);
14210 ++ free(child);
14211 +
14212 + return ret;
14213 + }
14214 +diff --git a/virt/kvm/eventfd.c b/virt/kvm/eventfd.c
14215 +index 001aeda4c154..3972a9564c76 100644
14216 +--- a/virt/kvm/eventfd.c
14217 ++++ b/virt/kvm/eventfd.c
14218 +@@ -44,6 +44,12 @@
14219 +
14220 + static struct workqueue_struct *irqfd_cleanup_wq;
14221 +
14222 ++bool __attribute__((weak))
14223 ++kvm_arch_irqfd_allowed(struct kvm *kvm, struct kvm_irqfd *args)
14224 ++{
14225 ++ return true;
14226 ++}
14227 ++
14228 + static void
14229 + irqfd_inject(struct work_struct *work)
14230 + {
14231 +@@ -297,6 +303,9 @@ kvm_irqfd_assign(struct kvm *kvm, struct kvm_irqfd *args)
14232 + if (!kvm_arch_intc_initialized(kvm))
14233 + return -EAGAIN;
14234 +
14235 ++ if (!kvm_arch_irqfd_allowed(kvm, args))
14236 ++ return -EINVAL;
14237 ++
14238 + irqfd = kzalloc(sizeof(*irqfd), GFP_KERNEL_ACCOUNT);
14239 + if (!irqfd)
14240 + return -ENOMEM;