Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:5.4 commit in: /
Date: Fri, 28 Feb 2020 16:41:49
Message-Id: 1582908086.b1bd048a3be78094255666cd125bae1fc1a0f8a1.mpagano@gentoo
1 commit: b1bd048a3be78094255666cd125bae1fc1a0f8a1
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Fri Feb 28 16:41:26 2020 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Fri Feb 28 16:41:26 2020 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=b1bd048a
7
8 Linux patch 5.4.23
9
10 Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
11
12 0000_README | 4 +
13 1022_linux-5.4.23.patch | 5941 +++++++++++++++++++++++++++++++++++++++++++++++
14 2 files changed, 5945 insertions(+)
15
16 diff --git a/0000_README b/0000_README
17 index 1a081c6..8dd1902 100644
18 --- a/0000_README
19 +++ b/0000_README
20 @@ -131,6 +131,10 @@ Patch: 1021_linux-5.4.22.patch
21 From: http://www.kernel.org
22 Desc: Linux 5.4.22
23
24 +Patch: 1022_linux-5.4.23.patch
25 +From: http://www.kernel.org
26 +Desc: Linux 5.4.23
27 +
28 Patch: 1500_XATTR_USER_PREFIX.patch
29 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
30 Desc: Support for namespace user.pax.* on tmpfs.
31
32 diff --git a/1022_linux-5.4.23.patch b/1022_linux-5.4.23.patch
33 new file mode 100644
34 index 0000000..b1928cc
35 --- /dev/null
36 +++ b/1022_linux-5.4.23.patch
37 @@ -0,0 +1,5941 @@
38 +diff --git a/Documentation/arm64/tagged-address-abi.rst b/Documentation/arm64/tagged-address-abi.rst
39 +index d4a85d535bf9..4a9d9c794ee5 100644
40 +--- a/Documentation/arm64/tagged-address-abi.rst
41 ++++ b/Documentation/arm64/tagged-address-abi.rst
42 +@@ -44,8 +44,15 @@ The AArch64 Tagged Address ABI has two stages of relaxation depending
43 + how the user addresses are used by the kernel:
44 +
45 + 1. User addresses not accessed by the kernel but used for address space
46 +- management (e.g. ``mmap()``, ``mprotect()``, ``madvise()``). The use
47 +- of valid tagged pointers in this context is always allowed.
48 ++ management (e.g. ``mprotect()``, ``madvise()``). The use of valid
49 ++ tagged pointers in this context is allowed with the exception of
50 ++ ``brk()``, ``mmap()`` and the ``new_address`` argument to
51 ++ ``mremap()`` as these have the potential to alias with existing
52 ++ user addresses.
53 ++
54 ++ NOTE: This behaviour changed in v5.6 and so some earlier kernels may
55 ++ incorrectly accept valid tagged pointers for the ``brk()``,
56 ++ ``mmap()`` and ``mremap()`` system calls.
57 +
58 + 2. User addresses accessed by the kernel (e.g. ``write()``). This ABI
59 + relaxation is disabled by default and the application thread needs to
60 +diff --git a/MAINTAINERS b/MAINTAINERS
61 +index d1aeebb59e6a..fe6fa5d3a63e 100644
62 +--- a/MAINTAINERS
63 ++++ b/MAINTAINERS
64 +@@ -8201,7 +8201,7 @@ M: Joonas Lahtinen <joonas.lahtinen@×××××××××××.com>
65 + M: Rodrigo Vivi <rodrigo.vivi@×××××.com>
66 + L: intel-gfx@×××××××××××××××××.org
67 + W: https://01.org/linuxgraphics/
68 +-B: https://01.org/linuxgraphics/documentation/how-report-bugs
69 ++B: https://gitlab.freedesktop.org/drm/intel/-/wikis/How-to-file-i915-bugs
70 + C: irc://chat.freenode.net/intel-gfx
71 + Q: http://patchwork.freedesktop.org/project/intel-gfx/
72 + T: git git://anongit.freedesktop.org/drm-intel
73 +diff --git a/Makefile b/Makefile
74 +index 9428ec3b611a..af5e90075514 100644
75 +--- a/Makefile
76 ++++ b/Makefile
77 +@@ -1,7 +1,7 @@
78 + # SPDX-License-Identifier: GPL-2.0
79 + VERSION = 5
80 + PATCHLEVEL = 4
81 +-SUBLEVEL = 22
82 ++SUBLEVEL = 23
83 + EXTRAVERSION =
84 + NAME = Kleptomaniac Octopus
85 +
86 +diff --git a/arch/arm64/include/asm/lse.h b/arch/arm64/include/asm/lse.h
87 +index 73834996c4b6..5de132100b6d 100644
88 +--- a/arch/arm64/include/asm/lse.h
89 ++++ b/arch/arm64/include/asm/lse.h
90 +@@ -6,7 +6,7 @@
91 +
92 + #if defined(CONFIG_AS_LSE) && defined(CONFIG_ARM64_LSE_ATOMICS)
93 +
94 +-#define __LSE_PREAMBLE ".arch armv8-a+lse\n"
95 ++#define __LSE_PREAMBLE ".arch_extension lse\n"
96 +
97 + #include <linux/compiler_types.h>
98 + #include <linux/export.h>
99 +diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h
100 +index c23c47360664..08df42e4db96 100644
101 +--- a/arch/arm64/include/asm/memory.h
102 ++++ b/arch/arm64/include/asm/memory.h
103 +@@ -219,7 +219,7 @@ static inline unsigned long kaslr_offset(void)
104 + ((__force __typeof__(addr))sign_extend64((__force u64)(addr), 55))
105 +
106 + #define untagged_addr(addr) ({ \
107 +- u64 __addr = (__force u64)addr; \
108 ++ u64 __addr = (__force u64)(addr); \
109 + __addr &= __untagged_addr(__addr); \
110 + (__force __typeof__(addr))__addr; \
111 + })
112 +diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h
113 +index c8bb14ff4713..6ba5adb96a3b 100644
114 +--- a/arch/powerpc/include/asm/page.h
115 ++++ b/arch/powerpc/include/asm/page.h
116 +@@ -295,8 +295,13 @@ static inline bool pfn_valid(unsigned long pfn)
117 + /*
118 + * Some number of bits at the level of the page table that points to
119 + * a hugepte are used to encode the size. This masks those bits.
120 ++ * On 8xx, HW assistance requires 4k alignment for the hugepte.
121 + */
122 ++#ifdef CONFIG_PPC_8xx
123 ++#define HUGEPD_SHIFT_MASK 0xfff
124 ++#else
125 + #define HUGEPD_SHIFT_MASK 0x3f
126 ++#endif
127 +
128 + #ifndef __ASSEMBLY__
129 +
130 +diff --git a/arch/powerpc/kernel/eeh_driver.c b/arch/powerpc/kernel/eeh_driver.c
131 +index 2fb166928e91..4fd7efdf2a53 100644
132 +--- a/arch/powerpc/kernel/eeh_driver.c
133 ++++ b/arch/powerpc/kernel/eeh_driver.c
134 +@@ -1200,6 +1200,17 @@ void eeh_handle_special_event(void)
135 + eeh_pe_state_mark(pe, EEH_PE_RECOVERING);
136 + eeh_handle_normal_event(pe);
137 + } else {
138 ++ eeh_for_each_pe(pe, tmp_pe)
139 ++ eeh_pe_for_each_dev(tmp_pe, edev, tmp_edev)
140 ++ edev->mode &= ~EEH_DEV_NO_HANDLER;
141 ++
142 ++ /* Notify all devices to be down */
143 ++ eeh_pe_state_clear(pe, EEH_PE_PRI_BUS, true);
144 ++ eeh_set_channel_state(pe, pci_channel_io_perm_failure);
145 ++ eeh_pe_report(
146 ++ "error_detected(permanent failure)", pe,
147 ++ eeh_report_failure, NULL);
148 ++
149 + pci_lock_rescan_remove();
150 + list_for_each_entry(hose, &hose_list, list_node) {
151 + phb_pe = eeh_phb_pe_get(hose);
152 +@@ -1208,16 +1219,6 @@ void eeh_handle_special_event(void)
153 + (phb_pe->state & EEH_PE_RECOVERING))
154 + continue;
155 +
156 +- eeh_for_each_pe(pe, tmp_pe)
157 +- eeh_pe_for_each_dev(tmp_pe, edev, tmp_edev)
158 +- edev->mode &= ~EEH_DEV_NO_HANDLER;
159 +-
160 +- /* Notify all devices to be down */
161 +- eeh_pe_state_clear(pe, EEH_PE_PRI_BUS, true);
162 +- eeh_set_channel_state(pe, pci_channel_io_perm_failure);
163 +- eeh_pe_report(
164 +- "error_detected(permanent failure)", pe,
165 +- eeh_report_failure, NULL);
166 + bus = eeh_pe_bus_get(phb_pe);
167 + if (!bus) {
168 + pr_err("%s: Cannot find PCI bus for "
169 +diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S
170 +index 59bb4f4ae316..13f699256258 100644
171 +--- a/arch/powerpc/kernel/entry_32.S
172 ++++ b/arch/powerpc/kernel/entry_32.S
173 +@@ -778,7 +778,7 @@ fast_exception_return:
174 + 1: lis r3,exc_exit_restart_end@ha
175 + addi r3,r3,exc_exit_restart_end@l
176 + cmplw r12,r3
177 +-#if CONFIG_PPC_BOOK3S_601
178 ++#ifdef CONFIG_PPC_BOOK3S_601
179 + bge 2b
180 + #else
181 + bge 3f
182 +@@ -786,7 +786,7 @@ fast_exception_return:
183 + lis r4,exc_exit_restart@ha
184 + addi r4,r4,exc_exit_restart@l
185 + cmplw r12,r4
186 +-#if CONFIG_PPC_BOOK3S_601
187 ++#ifdef CONFIG_PPC_BOOK3S_601
188 + blt 2b
189 + #else
190 + blt 3f
191 +diff --git a/arch/powerpc/kernel/head_8xx.S b/arch/powerpc/kernel/head_8xx.S
192 +index 19f583e18402..98d8b6832fcb 100644
193 +--- a/arch/powerpc/kernel/head_8xx.S
194 ++++ b/arch/powerpc/kernel/head_8xx.S
195 +@@ -289,7 +289,7 @@ InstructionTLBMiss:
196 + * set. All other Linux PTE bits control the behavior
197 + * of the MMU.
198 + */
199 +- rlwimi r10, r10, 0, 0x0f00 /* Clear bits 20-23 */
200 ++ rlwinm r10, r10, 0, ~0x0f00 /* Clear bits 20-23 */
201 + rlwimi r10, r10, 4, 0x0400 /* Copy _PAGE_EXEC into bit 21 */
202 + ori r10, r10, RPN_PATTERN | 0x200 /* Set 22 and 24-27 */
203 + mtspr SPRN_MI_RPN, r10 /* Update TLB entry */
204 +diff --git a/arch/powerpc/kernel/signal.c b/arch/powerpc/kernel/signal.c
205 +index e6c30cee6abf..d215f9554553 100644
206 +--- a/arch/powerpc/kernel/signal.c
207 ++++ b/arch/powerpc/kernel/signal.c
208 +@@ -200,14 +200,27 @@ unsigned long get_tm_stackpointer(struct task_struct *tsk)
209 + * normal/non-checkpointed stack pointer.
210 + */
211 +
212 ++ unsigned long ret = tsk->thread.regs->gpr[1];
213 ++
214 + #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
215 + BUG_ON(tsk != current);
216 +
217 + if (MSR_TM_ACTIVE(tsk->thread.regs->msr)) {
218 ++ preempt_disable();
219 + tm_reclaim_current(TM_CAUSE_SIGNAL);
220 + if (MSR_TM_TRANSACTIONAL(tsk->thread.regs->msr))
221 +- return tsk->thread.ckpt_regs.gpr[1];
222 ++ ret = tsk->thread.ckpt_regs.gpr[1];
223 ++
224 ++ /*
225 ++ * If we treclaim, we must clear the current thread's TM bits
226 ++ * before re-enabling preemption. Otherwise we might be
227 ++ * preempted and have the live MSR[TS] changed behind our back
228 ++ * (tm_recheckpoint_new_task() would recheckpoint). Besides, we
229 ++ * enter the signal handler in non-transactional state.
230 ++ */
231 ++ tsk->thread.regs->msr &= ~MSR_TS_MASK;
232 ++ preempt_enable();
233 + }
234 + #endif
235 +- return tsk->thread.regs->gpr[1];
236 ++ return ret;
237 + }
238 +diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
239 +index 98600b276f76..1b090a76b444 100644
240 +--- a/arch/powerpc/kernel/signal_32.c
241 ++++ b/arch/powerpc/kernel/signal_32.c
242 +@@ -489,19 +489,11 @@ static int save_user_regs(struct pt_regs *regs, struct mcontext __user *frame,
243 + */
244 + static int save_tm_user_regs(struct pt_regs *regs,
245 + struct mcontext __user *frame,
246 +- struct mcontext __user *tm_frame, int sigret)
247 ++ struct mcontext __user *tm_frame, int sigret,
248 ++ unsigned long msr)
249 + {
250 +- unsigned long msr = regs->msr;
251 +-
252 + WARN_ON(tm_suspend_disabled);
253 +
254 +- /* Remove TM bits from thread's MSR. The MSR in the sigcontext
255 +- * just indicates to userland that we were doing a transaction, but we
256 +- * don't want to return in transactional state. This also ensures
257 +- * that flush_fp_to_thread won't set TIF_RESTORE_TM again.
258 +- */
259 +- regs->msr &= ~MSR_TS_MASK;
260 +-
261 + /* Save both sets of general registers */
262 + if (save_general_regs(&current->thread.ckpt_regs, frame)
263 + || save_general_regs(regs, tm_frame))
264 +@@ -912,6 +904,10 @@ int handle_rt_signal32(struct ksignal *ksig, sigset_t *oldset,
265 + int sigret;
266 + unsigned long tramp;
267 + struct pt_regs *regs = tsk->thread.regs;
268 ++#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
269 ++ /* Save the thread's msr before get_tm_stackpointer() changes it */
270 ++ unsigned long msr = regs->msr;
271 ++#endif
272 +
273 + BUG_ON(tsk != current);
274 +
275 +@@ -944,13 +940,13 @@ int handle_rt_signal32(struct ksignal *ksig, sigset_t *oldset,
276 +
277 + #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
278 + tm_frame = &rt_sf->uc_transact.uc_mcontext;
279 +- if (MSR_TM_ACTIVE(regs->msr)) {
280 ++ if (MSR_TM_ACTIVE(msr)) {
281 + if (__put_user((unsigned long)&rt_sf->uc_transact,
282 + &rt_sf->uc.uc_link) ||
283 + __put_user((unsigned long)tm_frame,
284 + &rt_sf->uc_transact.uc_regs))
285 + goto badframe;
286 +- if (save_tm_user_regs(regs, frame, tm_frame, sigret))
287 ++ if (save_tm_user_regs(regs, frame, tm_frame, sigret, msr))
288 + goto badframe;
289 + }
290 + else
291 +@@ -1369,6 +1365,10 @@ int handle_signal32(struct ksignal *ksig, sigset_t *oldset,
292 + int sigret;
293 + unsigned long tramp;
294 + struct pt_regs *regs = tsk->thread.regs;
295 ++#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
296 ++ /* Save the thread's msr before get_tm_stackpointer() changes it */
297 ++ unsigned long msr = regs->msr;
298 ++#endif
299 +
300 + BUG_ON(tsk != current);
301 +
302 +@@ -1402,9 +1402,9 @@ int handle_signal32(struct ksignal *ksig, sigset_t *oldset,
303 +
304 + #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
305 + tm_mctx = &frame->mctx_transact;
306 +- if (MSR_TM_ACTIVE(regs->msr)) {
307 ++ if (MSR_TM_ACTIVE(msr)) {
308 + if (save_tm_user_regs(regs, &frame->mctx, &frame->mctx_transact,
309 +- sigret))
310 ++ sigret, msr))
311 + goto badframe;
312 + }
313 + else
314 +diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
315 +index 117515564ec7..84ed2e77ef9c 100644
316 +--- a/arch/powerpc/kernel/signal_64.c
317 ++++ b/arch/powerpc/kernel/signal_64.c
318 +@@ -192,7 +192,8 @@ static long setup_sigcontext(struct sigcontext __user *sc,
319 + static long setup_tm_sigcontexts(struct sigcontext __user *sc,
320 + struct sigcontext __user *tm_sc,
321 + struct task_struct *tsk,
322 +- int signr, sigset_t *set, unsigned long handler)
323 ++ int signr, sigset_t *set, unsigned long handler,
324 ++ unsigned long msr)
325 + {
326 + /* When CONFIG_ALTIVEC is set, we _always_ setup v_regs even if the
327 + * process never used altivec yet (MSR_VEC is zero in pt_regs of
328 +@@ -207,12 +208,11 @@ static long setup_tm_sigcontexts(struct sigcontext __user *sc,
329 + elf_vrreg_t __user *tm_v_regs = sigcontext_vmx_regs(tm_sc);
330 + #endif
331 + struct pt_regs *regs = tsk->thread.regs;
332 +- unsigned long msr = tsk->thread.regs->msr;
333 + long err = 0;
334 +
335 + BUG_ON(tsk != current);
336 +
337 +- BUG_ON(!MSR_TM_ACTIVE(regs->msr));
338 ++ BUG_ON(!MSR_TM_ACTIVE(msr));
339 +
340 + WARN_ON(tm_suspend_disabled);
341 +
342 +@@ -222,13 +222,6 @@ static long setup_tm_sigcontexts(struct sigcontext __user *sc,
343 + */
344 + msr |= tsk->thread.ckpt_regs.msr & (MSR_FP | MSR_VEC | MSR_VSX);
345 +
346 +- /* Remove TM bits from thread's MSR. The MSR in the sigcontext
347 +- * just indicates to userland that we were doing a transaction, but we
348 +- * don't want to return in transactional state. This also ensures
349 +- * that flush_fp_to_thread won't set TIF_RESTORE_TM again.
350 +- */
351 +- regs->msr &= ~MSR_TS_MASK;
352 +-
353 + #ifdef CONFIG_ALTIVEC
354 + err |= __put_user(v_regs, &sc->v_regs);
355 + err |= __put_user(tm_v_regs, &tm_sc->v_regs);
356 +@@ -824,6 +817,10 @@ int handle_rt_signal64(struct ksignal *ksig, sigset_t *set,
357 + unsigned long newsp = 0;
358 + long err = 0;
359 + struct pt_regs *regs = tsk->thread.regs;
360 ++#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
361 ++ /* Save the thread's msr before get_tm_stackpointer() changes it */
362 ++ unsigned long msr = regs->msr;
363 ++#endif
364 +
365 + BUG_ON(tsk != current);
366 +
367 +@@ -841,7 +838,7 @@ int handle_rt_signal64(struct ksignal *ksig, sigset_t *set,
368 + err |= __put_user(0, &frame->uc.uc_flags);
369 + err |= __save_altstack(&frame->uc.uc_stack, regs->gpr[1]);
370 + #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
371 +- if (MSR_TM_ACTIVE(regs->msr)) {
372 ++ if (MSR_TM_ACTIVE(msr)) {
373 + /* The ucontext_t passed to userland points to the second
374 + * ucontext_t (for transactional state) with its uc_link ptr.
375 + */
376 +@@ -849,7 +846,8 @@ int handle_rt_signal64(struct ksignal *ksig, sigset_t *set,
377 + err |= setup_tm_sigcontexts(&frame->uc.uc_mcontext,
378 + &frame->uc_transact.uc_mcontext,
379 + tsk, ksig->sig, NULL,
380 +- (unsigned long)ksig->ka.sa.sa_handler);
381 ++ (unsigned long)ksig->ka.sa.sa_handler,
382 ++ msr);
383 + } else
384 + #endif
385 + {
386 +diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c
387 +index 73d4873fc7f8..33b3461d91e8 100644
388 +--- a/arch/powerpc/mm/hugetlbpage.c
389 ++++ b/arch/powerpc/mm/hugetlbpage.c
390 +@@ -53,20 +53,24 @@ static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp,
391 + if (pshift >= pdshift) {
392 + cachep = PGT_CACHE(PTE_T_ORDER);
393 + num_hugepd = 1 << (pshift - pdshift);
394 ++ new = NULL;
395 + } else if (IS_ENABLED(CONFIG_PPC_8xx)) {
396 +- cachep = PGT_CACHE(PTE_INDEX_SIZE);
397 ++ cachep = NULL;
398 + num_hugepd = 1;
399 ++ new = pte_alloc_one(mm);
400 + } else {
401 + cachep = PGT_CACHE(pdshift - pshift);
402 + num_hugepd = 1;
403 ++ new = NULL;
404 + }
405 +
406 +- if (!cachep) {
407 ++ if (!cachep && !new) {
408 + WARN_ONCE(1, "No page table cache created for hugetlb tables");
409 + return -ENOMEM;
410 + }
411 +
412 +- new = kmem_cache_alloc(cachep, pgtable_gfp_flags(mm, GFP_KERNEL));
413 ++ if (cachep)
414 ++ new = kmem_cache_alloc(cachep, pgtable_gfp_flags(mm, GFP_KERNEL));
415 +
416 + BUG_ON(pshift > HUGEPD_SHIFT_MASK);
417 + BUG_ON((unsigned long)new & HUGEPD_SHIFT_MASK);
418 +@@ -97,7 +101,10 @@ static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp,
419 + if (i < num_hugepd) {
420 + for (i = i - 1 ; i >= 0; i--, hpdp--)
421 + *hpdp = __hugepd(0);
422 +- kmem_cache_free(cachep, new);
423 ++ if (cachep)
424 ++ kmem_cache_free(cachep, new);
425 ++ else
426 ++ pte_free(mm, new);
427 + } else {
428 + kmemleak_ignore(new);
429 + }
430 +@@ -324,8 +331,7 @@ static void free_hugepd_range(struct mmu_gather *tlb, hugepd_t *hpdp, int pdshif
431 + if (shift >= pdshift)
432 + hugepd_free(tlb, hugepte);
433 + else if (IS_ENABLED(CONFIG_PPC_8xx))
434 +- pgtable_free_tlb(tlb, hugepte,
435 +- get_hugepd_cache_index(PTE_INDEX_SIZE));
436 ++ pgtable_free_tlb(tlb, hugepte, 0);
437 + else
438 + pgtable_free_tlb(tlb, hugepte,
439 + get_hugepd_cache_index(pdshift - shift));
440 +@@ -639,12 +645,13 @@ static int __init hugetlbpage_init(void)
441 + * if we have pdshift and shift value same, we don't
442 + * use pgt cache for hugepd.
443 + */
444 +- if (pdshift > shift && IS_ENABLED(CONFIG_PPC_8xx))
445 +- pgtable_cache_add(PTE_INDEX_SIZE);
446 +- else if (pdshift > shift)
447 +- pgtable_cache_add(pdshift - shift);
448 +- else if (IS_ENABLED(CONFIG_PPC_FSL_BOOK3E) || IS_ENABLED(CONFIG_PPC_8xx))
449 ++ if (pdshift > shift) {
450 ++ if (!IS_ENABLED(CONFIG_PPC_8xx))
451 ++ pgtable_cache_add(pdshift - shift);
452 ++ } else if (IS_ENABLED(CONFIG_PPC_FSL_BOOK3E) ||
453 ++ IS_ENABLED(CONFIG_PPC_8xx)) {
454 + pgtable_cache_add(PTE_T_ORDER);
455 ++ }
456 +
457 + configured = true;
458 + }
459 +diff --git a/arch/s390/boot/kaslr.c b/arch/s390/boot/kaslr.c
460 +index 5d12352545c5..5591243d673e 100644
461 +--- a/arch/s390/boot/kaslr.c
462 ++++ b/arch/s390/boot/kaslr.c
463 +@@ -75,7 +75,7 @@ static unsigned long get_random(unsigned long limit)
464 + *(unsigned long *) prng.parm_block ^= seed;
465 + for (i = 0; i < 16; i++) {
466 + cpacf_kmc(CPACF_KMC_PRNG, prng.parm_block,
467 +- (char *) entropy, (char *) entropy,
468 ++ (u8 *) entropy, (u8 *) entropy,
469 + sizeof(entropy));
470 + memcpy(prng.parm_block, entropy, sizeof(entropy));
471 + }
472 +diff --git a/arch/s390/include/asm/page.h b/arch/s390/include/asm/page.h
473 +index 3f5cb55cde35..e399102367af 100644
474 +--- a/arch/s390/include/asm/page.h
475 ++++ b/arch/s390/include/asm/page.h
476 +@@ -42,7 +42,7 @@ void __storage_key_init_range(unsigned long start, unsigned long end);
477 +
478 + static inline void storage_key_init_range(unsigned long start, unsigned long end)
479 + {
480 +- if (PAGE_DEFAULT_KEY)
481 ++ if (PAGE_DEFAULT_KEY != 0)
482 + __storage_key_init_range(start, end);
483 + }
484 +
485 +diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
486 +index c1ed054c103c..734a3334e0f0 100644
487 +--- a/arch/x86/include/asm/kvm_host.h
488 ++++ b/arch/x86/include/asm/kvm_host.h
489 +@@ -1098,7 +1098,7 @@ struct kvm_x86_ops {
490 + void (*load_eoi_exitmap)(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap);
491 + void (*set_virtual_apic_mode)(struct kvm_vcpu *vcpu);
492 + void (*set_apic_access_page_addr)(struct kvm_vcpu *vcpu, hpa_t hpa);
493 +- void (*deliver_posted_interrupt)(struct kvm_vcpu *vcpu, int vector);
494 ++ int (*deliver_posted_interrupt)(struct kvm_vcpu *vcpu, int vector);
495 + int (*sync_pir_to_irr)(struct kvm_vcpu *vcpu);
496 + int (*set_tss_addr)(struct kvm *kvm, unsigned int addr);
497 + int (*set_identity_map_addr)(struct kvm *kvm, u64 ident_addr);
498 +diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
499 +index 6a3124664289..1682e4b5ce75 100644
500 +--- a/arch/x86/include/asm/msr-index.h
501 ++++ b/arch/x86/include/asm/msr-index.h
502 +@@ -510,6 +510,8 @@
503 + #define MSR_K7_HWCR 0xc0010015
504 + #define MSR_K7_HWCR_SMMLOCK_BIT 0
505 + #define MSR_K7_HWCR_SMMLOCK BIT_ULL(MSR_K7_HWCR_SMMLOCK_BIT)
506 ++#define MSR_K7_HWCR_IRPERF_EN_BIT 30
507 ++#define MSR_K7_HWCR_IRPERF_EN BIT_ULL(MSR_K7_HWCR_IRPERF_EN_BIT)
508 + #define MSR_K7_FID_VID_CTL 0xc0010041
509 + #define MSR_K7_FID_VID_STATUS 0xc0010042
510 +
511 +diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
512 +index 62c30279be77..c3f4dd4ae155 100644
513 +--- a/arch/x86/kernel/cpu/amd.c
514 ++++ b/arch/x86/kernel/cpu/amd.c
515 +@@ -28,6 +28,7 @@
516 +
517 + static const int amd_erratum_383[];
518 + static const int amd_erratum_400[];
519 ++static const int amd_erratum_1054[];
520 + static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum);
521 +
522 + /*
523 +@@ -978,6 +979,15 @@ static void init_amd(struct cpuinfo_x86 *c)
524 + /* AMD CPUs don't reset SS attributes on SYSRET, Xen does. */
525 + if (!cpu_has(c, X86_FEATURE_XENPV))
526 + set_cpu_bug(c, X86_BUG_SYSRET_SS_ATTRS);
527 ++
528 ++ /*
529 ++ * Turn on the Instructions Retired free counter on machines not
530 ++ * susceptible to erratum #1054 "Instructions Retired Performance
531 ++ * Counter May Be Inaccurate".
532 ++ */
533 ++ if (cpu_has(c, X86_FEATURE_IRPERF) &&
534 ++ !cpu_has_amd_erratum(c, amd_erratum_1054))
535 ++ msr_set_bit(MSR_K7_HWCR, MSR_K7_HWCR_IRPERF_EN_BIT);
536 + }
537 +
538 + #ifdef CONFIG_X86_32
539 +@@ -1105,6 +1115,10 @@ static const int amd_erratum_400[] =
540 + static const int amd_erratum_383[] =
541 + AMD_OSVW_ERRATUM(3, AMD_MODEL_RANGE(0x10, 0, 0, 0xff, 0xf));
542 +
543 ++/* #1054: Instructions Retired Performance Counter May Be Inaccurate */
544 ++static const int amd_erratum_1054[] =
545 ++ AMD_OSVW_ERRATUM(0, AMD_MODEL_RANGE(0x17, 0, 0, 0x2f, 0xf));
546 ++
547 +
548 + static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum)
549 + {
550 +diff --git a/arch/x86/kernel/cpu/mce/amd.c b/arch/x86/kernel/cpu/mce/amd.c
551 +index 259f3f4e2e5f..1cf34fcc3a8e 100644
552 +--- a/arch/x86/kernel/cpu/mce/amd.c
553 ++++ b/arch/x86/kernel/cpu/mce/amd.c
554 +@@ -1161,9 +1161,12 @@ static const struct sysfs_ops threshold_ops = {
555 + .store = store,
556 + };
557 +
558 ++static void threshold_block_release(struct kobject *kobj);
559 ++
560 + static struct kobj_type threshold_ktype = {
561 + .sysfs_ops = &threshold_ops,
562 + .default_attrs = default_attrs,
563 ++ .release = threshold_block_release,
564 + };
565 +
566 + static const char *get_name(unsigned int bank, struct threshold_block *b)
567 +@@ -1196,8 +1199,9 @@ static const char *get_name(unsigned int bank, struct threshold_block *b)
568 + return buf_mcatype;
569 + }
570 +
571 +-static int allocate_threshold_blocks(unsigned int cpu, unsigned int bank,
572 +- unsigned int block, u32 address)
573 ++static int allocate_threshold_blocks(unsigned int cpu, struct threshold_bank *tb,
574 ++ unsigned int bank, unsigned int block,
575 ++ u32 address)
576 + {
577 + struct threshold_block *b = NULL;
578 + u32 low, high;
579 +@@ -1241,16 +1245,12 @@ static int allocate_threshold_blocks(unsigned int cpu, unsigned int bank,
580 +
581 + INIT_LIST_HEAD(&b->miscj);
582 +
583 +- if (per_cpu(threshold_banks, cpu)[bank]->blocks) {
584 +- list_add(&b->miscj,
585 +- &per_cpu(threshold_banks, cpu)[bank]->blocks->miscj);
586 +- } else {
587 +- per_cpu(threshold_banks, cpu)[bank]->blocks = b;
588 +- }
589 ++ if (tb->blocks)
590 ++ list_add(&b->miscj, &tb->blocks->miscj);
591 ++ else
592 ++ tb->blocks = b;
593 +
594 +- err = kobject_init_and_add(&b->kobj, &threshold_ktype,
595 +- per_cpu(threshold_banks, cpu)[bank]->kobj,
596 +- get_name(bank, b));
597 ++ err = kobject_init_and_add(&b->kobj, &threshold_ktype, tb->kobj, get_name(bank, b));
598 + if (err)
599 + goto out_free;
600 + recurse:
601 +@@ -1258,7 +1258,7 @@ recurse:
602 + if (!address)
603 + return 0;
604 +
605 +- err = allocate_threshold_blocks(cpu, bank, block, address);
606 ++ err = allocate_threshold_blocks(cpu, tb, bank, block, address);
607 + if (err)
608 + goto out_free;
609 +
610 +@@ -1343,8 +1343,6 @@ static int threshold_create_bank(unsigned int cpu, unsigned int bank)
611 + goto out_free;
612 + }
613 +
614 +- per_cpu(threshold_banks, cpu)[bank] = b;
615 +-
616 + if (is_shared_bank(bank)) {
617 + refcount_set(&b->cpus, 1);
618 +
619 +@@ -1355,9 +1353,13 @@ static int threshold_create_bank(unsigned int cpu, unsigned int bank)
620 + }
621 + }
622 +
623 +- err = allocate_threshold_blocks(cpu, bank, 0, msr_ops.misc(bank));
624 +- if (!err)
625 +- goto out;
626 ++ err = allocate_threshold_blocks(cpu, b, bank, 0, msr_ops.misc(bank));
627 ++ if (err)
628 ++ goto out_free;
629 ++
630 ++ per_cpu(threshold_banks, cpu)[bank] = b;
631 ++
632 ++ return 0;
633 +
634 + out_free:
635 + kfree(b);
636 +@@ -1366,8 +1368,12 @@ static int threshold_create_bank(unsigned int cpu, unsigned int bank)
637 + return err;
638 + }
639 +
640 +-static void deallocate_threshold_block(unsigned int cpu,
641 +- unsigned int bank)
642 ++static void threshold_block_release(struct kobject *kobj)
643 ++{
644 ++ kfree(to_block(kobj));
645 ++}
646 ++
647 ++static void deallocate_threshold_block(unsigned int cpu, unsigned int bank)
648 + {
649 + struct threshold_block *pos = NULL;
650 + struct threshold_block *tmp = NULL;
651 +@@ -1377,13 +1383,11 @@ static void deallocate_threshold_block(unsigned int cpu,
652 + return;
653 +
654 + list_for_each_entry_safe(pos, tmp, &head->blocks->miscj, miscj) {
655 +- kobject_put(&pos->kobj);
656 + list_del(&pos->miscj);
657 +- kfree(pos);
658 ++ kobject_put(&pos->kobj);
659 + }
660 +
661 +- kfree(per_cpu(threshold_banks, cpu)[bank]->blocks);
662 +- per_cpu(threshold_banks, cpu)[bank]->blocks = NULL;
663 ++ kobject_put(&head->blocks->kobj);
664 + }
665 +
666 + static void __threshold_remove_blocks(struct threshold_bank *b)
667 +diff --git a/arch/x86/kernel/ima_arch.c b/arch/x86/kernel/ima_arch.c
668 +index 4d4f5d9faac3..23054909c8dd 100644
669 +--- a/arch/x86/kernel/ima_arch.c
670 ++++ b/arch/x86/kernel/ima_arch.c
671 +@@ -10,8 +10,6 @@ extern struct boot_params boot_params;
672 +
673 + static enum efi_secureboot_mode get_sb_mode(void)
674 + {
675 +- efi_char16_t efi_SecureBoot_name[] = L"SecureBoot";
676 +- efi_char16_t efi_SetupMode_name[] = L"SecureBoot";
677 + efi_guid_t efi_variable_guid = EFI_GLOBAL_VARIABLE_GUID;
678 + efi_status_t status;
679 + unsigned long size;
680 +@@ -25,7 +23,7 @@ static enum efi_secureboot_mode get_sb_mode(void)
681 + }
682 +
683 + /* Get variable contents into buffer */
684 +- status = efi.get_variable(efi_SecureBoot_name, &efi_variable_guid,
685 ++ status = efi.get_variable(L"SecureBoot", &efi_variable_guid,
686 + NULL, &size, &secboot);
687 + if (status == EFI_NOT_FOUND) {
688 + pr_info("ima: secureboot mode disabled\n");
689 +@@ -38,7 +36,7 @@ static enum efi_secureboot_mode get_sb_mode(void)
690 + }
691 +
692 + size = sizeof(setupmode);
693 +- status = efi.get_variable(efi_SetupMode_name, &efi_variable_guid,
694 ++ status = efi.get_variable(L"SetupMode", &efi_variable_guid,
695 + NULL, &size, &setupmode);
696 +
697 + if (status != EFI_SUCCESS) /* ignore unknown SetupMode */
698 +diff --git a/arch/x86/kvm/irq_comm.c b/arch/x86/kvm/irq_comm.c
699 +index 8ecd48d31800..5ddcaacef291 100644
700 +--- a/arch/x86/kvm/irq_comm.c
701 ++++ b/arch/x86/kvm/irq_comm.c
702 +@@ -416,7 +416,7 @@ void kvm_scan_ioapic_routes(struct kvm_vcpu *vcpu,
703 +
704 + kvm_set_msi_irq(vcpu->kvm, entry, &irq);
705 +
706 +- if (irq.level && kvm_apic_match_dest(vcpu, NULL, 0,
707 ++ if (irq.trig_mode && kvm_apic_match_dest(vcpu, NULL, 0,
708 + irq.dest_id, irq.dest_mode))
709 + __set_bit(irq.vector, ioapic_handled_vectors);
710 + }
711 +diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
712 +index 15728971a430..5d2587005d0e 100644
713 +--- a/arch/x86/kvm/lapic.c
714 ++++ b/arch/x86/kvm/lapic.c
715 +@@ -637,9 +637,11 @@ static inline bool pv_eoi_enabled(struct kvm_vcpu *vcpu)
716 + static bool pv_eoi_get_pending(struct kvm_vcpu *vcpu)
717 + {
718 + u8 val;
719 +- if (pv_eoi_get_user(vcpu, &val) < 0)
720 ++ if (pv_eoi_get_user(vcpu, &val) < 0) {
721 + printk(KERN_WARNING "Can't read EOI MSR value: 0x%llx\n",
722 + (unsigned long long)vcpu->arch.pv_eoi.msr_val);
723 ++ return false;
724 ++ }
725 + return val & 0x1;
726 + }
727 +
728 +@@ -1056,11 +1058,8 @@ static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode,
729 + apic->regs + APIC_TMR);
730 + }
731 +
732 +- if (vcpu->arch.apicv_active)
733 +- kvm_x86_ops->deliver_posted_interrupt(vcpu, vector);
734 +- else {
735 ++ if (kvm_x86_ops->deliver_posted_interrupt(vcpu, vector)) {
736 + kvm_lapic_set_irr(vector, apic);
737 +-
738 + kvm_make_request(KVM_REQ_EVENT, vcpu);
739 + kvm_vcpu_kick(vcpu);
740 + }
741 +diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
742 +index 8d1be7c61f10..207030db3481 100644
743 +--- a/arch/x86/kvm/svm.c
744 ++++ b/arch/x86/kvm/svm.c
745 +@@ -5141,8 +5141,11 @@ static void svm_load_eoi_exitmap(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap)
746 + return;
747 + }
748 +
749 +-static void svm_deliver_avic_intr(struct kvm_vcpu *vcpu, int vec)
750 ++static int svm_deliver_avic_intr(struct kvm_vcpu *vcpu, int vec)
751 + {
752 ++ if (!vcpu->arch.apicv_active)
753 ++ return -1;
754 ++
755 + kvm_lapic_set_irr(vec, vcpu->arch.apic);
756 + smp_mb__after_atomic();
757 +
758 +@@ -5154,6 +5157,8 @@ static void svm_deliver_avic_intr(struct kvm_vcpu *vcpu, int vec)
759 + put_cpu();
760 + } else
761 + kvm_vcpu_wake_up(vcpu);
762 ++
763 ++ return 0;
764 + }
765 +
766 + static bool svm_dy_apicv_has_pending_interrupt(struct kvm_vcpu *vcpu)
767 +diff --git a/arch/x86/kvm/vmx/capabilities.h b/arch/x86/kvm/vmx/capabilities.h
768 +index 283bdb7071af..f486e2606247 100644
769 +--- a/arch/x86/kvm/vmx/capabilities.h
770 ++++ b/arch/x86/kvm/vmx/capabilities.h
771 +@@ -12,6 +12,7 @@ extern bool __read_mostly enable_ept;
772 + extern bool __read_mostly enable_unrestricted_guest;
773 + extern bool __read_mostly enable_ept_ad_bits;
774 + extern bool __read_mostly enable_pml;
775 ++extern bool __read_mostly enable_apicv;
776 + extern int __read_mostly pt_mode;
777 +
778 + #define PT_MODE_SYSTEM 0
779 +diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
780 +index 931d3b5f3acd..802ef7177d53 100644
781 +--- a/arch/x86/kvm/vmx/nested.c
782 ++++ b/arch/x86/kvm/vmx/nested.c
783 +@@ -5132,24 +5132,17 @@ fail:
784 + return 1;
785 + }
786 +
787 +-
788 +-static bool nested_vmx_exit_handled_io(struct kvm_vcpu *vcpu,
789 +- struct vmcs12 *vmcs12)
790 ++/*
791 ++ * Return true if an IO instruction with the specified port and size should cause
792 ++ * a VM-exit into L1.
793 ++ */
794 ++bool nested_vmx_check_io_bitmaps(struct kvm_vcpu *vcpu, unsigned int port,
795 ++ int size)
796 + {
797 +- unsigned long exit_qualification;
798 ++ struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
799 + gpa_t bitmap, last_bitmap;
800 +- unsigned int port;
801 +- int size;
802 + u8 b;
803 +
804 +- if (!nested_cpu_has(vmcs12, CPU_BASED_USE_IO_BITMAPS))
805 +- return nested_cpu_has(vmcs12, CPU_BASED_UNCOND_IO_EXITING);
806 +-
807 +- exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
808 +-
809 +- port = exit_qualification >> 16;
810 +- size = (exit_qualification & 7) + 1;
811 +-
812 + last_bitmap = (gpa_t)-1;
813 + b = -1;
814 +
815 +@@ -5176,6 +5169,24 @@ static bool nested_vmx_exit_handled_io(struct kvm_vcpu *vcpu,
816 + return false;
817 + }
818 +
819 ++static bool nested_vmx_exit_handled_io(struct kvm_vcpu *vcpu,
820 ++ struct vmcs12 *vmcs12)
821 ++{
822 ++ unsigned long exit_qualification;
823 ++ unsigned short port;
824 ++ int size;
825 ++
826 ++ if (!nested_cpu_has(vmcs12, CPU_BASED_USE_IO_BITMAPS))
827 ++ return nested_cpu_has(vmcs12, CPU_BASED_UNCOND_IO_EXITING);
828 ++
829 ++ exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
830 ++
831 ++ port = exit_qualification >> 16;
832 ++ size = (exit_qualification & 7) + 1;
833 ++
834 ++ return nested_vmx_check_io_bitmaps(vcpu, port, size);
835 ++}
836 ++
837 + /*
838 + * Return 1 if we should exit from L2 to L1 to handle an MSR access access,
839 + * rather than handle it ourselves in L0. I.e., check whether L1 expressed
840 +@@ -5796,8 +5807,7 @@ void nested_vmx_vcpu_setup(void)
841 + * bit in the high half is on if the corresponding bit in the control field
842 + * may be on. See also vmx_control_verify().
843 + */
844 +-void nested_vmx_setup_ctls_msrs(struct nested_vmx_msrs *msrs, u32 ept_caps,
845 +- bool apicv)
846 ++void nested_vmx_setup_ctls_msrs(struct nested_vmx_msrs *msrs, u32 ept_caps)
847 + {
848 + /*
849 + * Note that as a general rule, the high half of the MSRs (bits in
850 +@@ -5824,7 +5834,7 @@ void nested_vmx_setup_ctls_msrs(struct nested_vmx_msrs *msrs, u32 ept_caps,
851 + PIN_BASED_EXT_INTR_MASK |
852 + PIN_BASED_NMI_EXITING |
853 + PIN_BASED_VIRTUAL_NMIS |
854 +- (apicv ? PIN_BASED_POSTED_INTR : 0);
855 ++ (enable_apicv ? PIN_BASED_POSTED_INTR : 0);
856 + msrs->pinbased_ctls_high |=
857 + PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR |
858 + PIN_BASED_VMX_PREEMPTION_TIMER;
859 +diff --git a/arch/x86/kvm/vmx/nested.h b/arch/x86/kvm/vmx/nested.h
860 +index 6280f33e5fa6..b8521c451bb0 100644
861 +--- a/arch/x86/kvm/vmx/nested.h
862 ++++ b/arch/x86/kvm/vmx/nested.h
863 +@@ -17,8 +17,7 @@ enum nvmx_vmentry_status {
864 + };
865 +
866 + void vmx_leave_nested(struct kvm_vcpu *vcpu);
867 +-void nested_vmx_setup_ctls_msrs(struct nested_vmx_msrs *msrs, u32 ept_caps,
868 +- bool apicv);
869 ++void nested_vmx_setup_ctls_msrs(struct nested_vmx_msrs *msrs, u32 ept_caps);
870 + void nested_vmx_hardware_unsetup(void);
871 + __init int nested_vmx_hardware_setup(int (*exit_handlers[])(struct kvm_vcpu *));
872 + void nested_vmx_vcpu_setup(void);
873 +@@ -33,6 +32,8 @@ int vmx_set_vmx_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data);
874 + int vmx_get_vmx_msr(struct nested_vmx_msrs *msrs, u32 msr_index, u64 *pdata);
875 + int get_vmx_mem_address(struct kvm_vcpu *vcpu, unsigned long exit_qualification,
876 + u32 vmx_instruction_info, bool wr, int len, gva_t *ret);
877 ++bool nested_vmx_check_io_bitmaps(struct kvm_vcpu *vcpu, unsigned int port,
878 ++ int size);
879 +
880 + static inline struct vmcs12 *get_vmcs12(struct kvm_vcpu *vcpu)
881 + {
882 +diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
883 +index 84b57b461ad6..8ebcd9de87a2 100644
884 +--- a/arch/x86/kvm/vmx/vmx.c
885 ++++ b/arch/x86/kvm/vmx/vmx.c
886 +@@ -95,7 +95,7 @@ module_param(emulate_invalid_guest_state, bool, S_IRUGO);
887 + static bool __read_mostly fasteoi = 1;
888 + module_param(fasteoi, bool, S_IRUGO);
889 +
890 +-static bool __read_mostly enable_apicv = 1;
891 ++bool __read_mostly enable_apicv = 1;
892 + module_param(enable_apicv, bool, S_IRUGO);
893 +
894 + /*
895 +@@ -3853,24 +3853,29 @@ static int vmx_deliver_nested_posted_interrupt(struct kvm_vcpu *vcpu,
896 + * 2. If target vcpu isn't running(root mode), kick it to pick up the
897 + * interrupt from PIR in next vmentry.
898 + */
899 +-static void vmx_deliver_posted_interrupt(struct kvm_vcpu *vcpu, int vector)
900 ++static int vmx_deliver_posted_interrupt(struct kvm_vcpu *vcpu, int vector)
901 + {
902 + struct vcpu_vmx *vmx = to_vmx(vcpu);
903 + int r;
904 +
905 + r = vmx_deliver_nested_posted_interrupt(vcpu, vector);
906 + if (!r)
907 +- return;
908 ++ return 0;
909 ++
910 ++ if (!vcpu->arch.apicv_active)
911 ++ return -1;
912 +
913 + if (pi_test_and_set_pir(vector, &vmx->pi_desc))
914 +- return;
915 ++ return 0;
916 +
917 + /* If a previous notification has sent the IPI, nothing to do. */
918 + if (pi_test_and_set_on(&vmx->pi_desc))
919 +- return;
920 ++ return 0;
921 +
922 + if (!kvm_vcpu_trigger_posted_interrupt(vcpu, false))
923 + kvm_vcpu_kick(vcpu);
924 ++
925 ++ return 0;
926 + }
927 +
928 + /*
929 +@@ -6802,8 +6807,7 @@ static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id)
930 +
931 + if (nested)
932 + nested_vmx_setup_ctls_msrs(&vmx->nested.msrs,
933 +- vmx_capability.ept,
934 +- kvm_vcpu_apicv_active(&vmx->vcpu));
935 ++ vmx_capability.ept);
936 + else
937 + memset(&vmx->nested.msrs, 0, sizeof(vmx->nested.msrs));
938 +
939 +@@ -6885,8 +6889,7 @@ static int __init vmx_check_processor_compat(void)
940 + if (setup_vmcs_config(&vmcs_conf, &vmx_cap) < 0)
941 + return -EIO;
942 + if (nested)
943 +- nested_vmx_setup_ctls_msrs(&vmcs_conf.nested, vmx_cap.ept,
944 +- enable_apicv);
945 ++ nested_vmx_setup_ctls_msrs(&vmcs_conf.nested, vmx_cap.ept);
946 + if (memcmp(&vmcs_config, &vmcs_conf, sizeof(struct vmcs_config)) != 0) {
947 + printk(KERN_ERR "kvm: CPU %d feature inconsistency!\n",
948 + smp_processor_id());
949 +@@ -7132,6 +7135,39 @@ static void vmx_request_immediate_exit(struct kvm_vcpu *vcpu)
950 + to_vmx(vcpu)->req_immediate_exit = true;
951 + }
952 +
953 ++static int vmx_check_intercept_io(struct kvm_vcpu *vcpu,
954 ++ struct x86_instruction_info *info)
955 ++{
956 ++ struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
957 ++ unsigned short port;
958 ++ bool intercept;
959 ++ int size;
960 ++
961 ++ if (info->intercept == x86_intercept_in ||
962 ++ info->intercept == x86_intercept_ins) {
963 ++ port = info->src_val;
964 ++ size = info->dst_bytes;
965 ++ } else {
966 ++ port = info->dst_val;
967 ++ size = info->src_bytes;
968 ++ }
969 ++
970 ++ /*
971 ++ * If the 'use IO bitmaps' VM-execution control is 0, IO instruction
972 ++ * VM-exits depend on the 'unconditional IO exiting' VM-execution
973 ++ * control.
974 ++ *
975 ++ * Otherwise, IO instruction VM-exits are controlled by the IO bitmaps.
976 ++ */
977 ++ if (!nested_cpu_has(vmcs12, CPU_BASED_USE_IO_BITMAPS))
978 ++ intercept = nested_cpu_has(vmcs12,
979 ++ CPU_BASED_UNCOND_IO_EXITING);
980 ++ else
981 ++ intercept = nested_vmx_check_io_bitmaps(vcpu, port, size);
982 ++
983 ++ return intercept ? X86EMUL_UNHANDLEABLE : X86EMUL_CONTINUE;
984 ++}
985 ++
986 + static int vmx_check_intercept(struct kvm_vcpu *vcpu,
987 + struct x86_instruction_info *info,
988 + enum x86_intercept_stage stage)
989 +@@ -7139,19 +7175,31 @@ static int vmx_check_intercept(struct kvm_vcpu *vcpu,
990 + struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
991 + struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt;
992 +
993 ++ switch (info->intercept) {
994 + /*
995 + * RDPID causes #UD if disabled through secondary execution controls.
996 + * Because it is marked as EmulateOnUD, we need to intercept it here.
997 + */
998 +- if (info->intercept == x86_intercept_rdtscp &&
999 +- !nested_cpu_has2(vmcs12, SECONDARY_EXEC_RDTSCP)) {
1000 +- ctxt->exception.vector = UD_VECTOR;
1001 +- ctxt->exception.error_code_valid = false;
1002 +- return X86EMUL_PROPAGATE_FAULT;
1003 +- }
1004 ++ case x86_intercept_rdtscp:
1005 ++ if (!nested_cpu_has2(vmcs12, SECONDARY_EXEC_RDTSCP)) {
1006 ++ ctxt->exception.vector = UD_VECTOR;
1007 ++ ctxt->exception.error_code_valid = false;
1008 ++ return X86EMUL_PROPAGATE_FAULT;
1009 ++ }
1010 ++ break;
1011 ++
1012 ++ case x86_intercept_in:
1013 ++ case x86_intercept_ins:
1014 ++ case x86_intercept_out:
1015 ++ case x86_intercept_outs:
1016 ++ return vmx_check_intercept_io(vcpu, info);
1017 +
1018 + /* TODO: check more intercepts... */
1019 +- return X86EMUL_CONTINUE;
1020 ++ default:
1021 ++ break;
1022 ++ }
1023 ++
1024 ++ return X86EMUL_UNHANDLEABLE;
1025 + }
1026 +
1027 + #ifdef CONFIG_X86_64
1028 +@@ -7736,7 +7784,7 @@ static __init int hardware_setup(void)
1029 +
1030 + if (nested) {
1031 + nested_vmx_setup_ctls_msrs(&vmcs_config.nested,
1032 +- vmx_capability.ept, enable_apicv);
1033 ++ vmx_capability.ept);
1034 +
1035 + r = nested_vmx_hardware_setup(kvm_vmx_exit_handlers);
1036 + if (r)
1037 +diff --git a/crypto/hash_info.c b/crypto/hash_info.c
1038 +index c754cb75dd1a..a49ff96bde77 100644
1039 +--- a/crypto/hash_info.c
1040 ++++ b/crypto/hash_info.c
1041 +@@ -26,7 +26,7 @@ const char *const hash_algo_name[HASH_ALGO__LAST] = {
1042 + [HASH_ALGO_TGR_128] = "tgr128",
1043 + [HASH_ALGO_TGR_160] = "tgr160",
1044 + [HASH_ALGO_TGR_192] = "tgr192",
1045 +- [HASH_ALGO_SM3_256] = "sm3-256",
1046 ++ [HASH_ALGO_SM3_256] = "sm3",
1047 + [HASH_ALGO_STREEBOG_256] = "streebog256",
1048 + [HASH_ALGO_STREEBOG_512] = "streebog512",
1049 + };
1050 +diff --git a/drivers/acpi/acpica/evevent.c b/drivers/acpi/acpica/evevent.c
1051 +index 9e2f5a05c066..bad2257356fe 100644
1052 +--- a/drivers/acpi/acpica/evevent.c
1053 ++++ b/drivers/acpi/acpica/evevent.c
1054 +@@ -265,4 +265,49 @@ static u32 acpi_ev_fixed_event_dispatch(u32 event)
1055 + handler) (acpi_gbl_fixed_event_handlers[event].context));
1056 + }
1057 +
1058 ++/*******************************************************************************
1059 ++ *
1060 ++ * FUNCTION: acpi_any_fixed_event_status_set
1061 ++ *
1062 ++ * PARAMETERS: None
1063 ++ *
1064 ++ * RETURN: TRUE or FALSE
1065 ++ *
1066 ++ * DESCRIPTION: Checks the PM status register for active fixed events
1067 ++ *
1068 ++ ******************************************************************************/
1069 ++
1070 ++u32 acpi_any_fixed_event_status_set(void)
1071 ++{
1072 ++ acpi_status status;
1073 ++ u32 in_status;
1074 ++ u32 in_enable;
1075 ++ u32 i;
1076 ++
1077 ++ status = acpi_hw_register_read(ACPI_REGISTER_PM1_ENABLE, &in_enable);
1078 ++ if (ACPI_FAILURE(status)) {
1079 ++ return (FALSE);
1080 ++ }
1081 ++
1082 ++ status = acpi_hw_register_read(ACPI_REGISTER_PM1_STATUS, &in_status);
1083 ++ if (ACPI_FAILURE(status)) {
1084 ++ return (FALSE);
1085 ++ }
1086 ++
1087 ++ /*
1088 ++ * Check for all possible Fixed Events and dispatch those that are active
1089 ++ */
1090 ++ for (i = 0; i < ACPI_NUM_FIXED_EVENTS; i++) {
1091 ++
1092 ++ /* Both the status and enable bits must be on for this event */
1093 ++
1094 ++ if ((in_status & acpi_gbl_fixed_event_info[i].status_bit_mask) &&
1095 ++ (in_enable & acpi_gbl_fixed_event_info[i].enable_bit_mask)) {
1096 ++ return (TRUE);
1097 ++ }
1098 ++ }
1099 ++
1100 ++ return (FALSE);
1101 ++}
1102 ++
1103 + #endif /* !ACPI_REDUCED_HARDWARE */
1104 +diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
1105 +index 62348ec2a807..827530dae682 100644
1106 +--- a/drivers/acpi/sleep.c
1107 ++++ b/drivers/acpi/sleep.c
1108 +@@ -992,6 +992,13 @@ static bool acpi_s2idle_wake(void)
1109 + if (irqd_is_wakeup_armed(irq_get_irq_data(acpi_sci_irq)))
1110 + return true;
1111 +
1112 ++ /*
1113 ++ * If the status bit of any enabled fixed event is set, the
1114 ++ * wakeup is regarded as valid.
1115 ++ */
1116 ++ if (acpi_any_fixed_event_status_set())
1117 ++ return true;
1118 ++
1119 + /*
1120 + * If there are no EC events to process and at least one of the
1121 + * other enabled GPEs is active, the wakeup is regarded as a
1122 +diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
1123 +index 05c2b32dcc4d..1787e3ad9c44 100644
1124 +--- a/drivers/ata/ahci.c
1125 ++++ b/drivers/ata/ahci.c
1126 +@@ -80,6 +80,7 @@ enum board_ids {
1127 +
1128 + static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
1129 + static void ahci_remove_one(struct pci_dev *dev);
1130 ++static void ahci_shutdown_one(struct pci_dev *dev);
1131 + static int ahci_vt8251_hardreset(struct ata_link *link, unsigned int *class,
1132 + unsigned long deadline);
1133 + static int ahci_avn_hardreset(struct ata_link *link, unsigned int *class,
1134 +@@ -593,6 +594,7 @@ static struct pci_driver ahci_pci_driver = {
1135 + .id_table = ahci_pci_tbl,
1136 + .probe = ahci_init_one,
1137 + .remove = ahci_remove_one,
1138 ++ .shutdown = ahci_shutdown_one,
1139 + .driver = {
1140 + .pm = &ahci_pci_pm_ops,
1141 + },
1142 +@@ -1864,6 +1866,11 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1143 + return 0;
1144 + }
1145 +
1146 ++static void ahci_shutdown_one(struct pci_dev *pdev)
1147 ++{
1148 ++ ata_pci_shutdown_one(pdev);
1149 ++}
1150 ++
1151 + static void ahci_remove_one(struct pci_dev *pdev)
1152 + {
1153 + pm_runtime_get_noresume(&pdev->dev);
1154 +diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
1155 +index 84b183a6424e..581595b35573 100644
1156 +--- a/drivers/ata/libata-core.c
1157 ++++ b/drivers/ata/libata-core.c
1158 +@@ -6762,6 +6762,26 @@ void ata_pci_remove_one(struct pci_dev *pdev)
1159 + ata_host_detach(host);
1160 + }
1161 +
1162 ++void ata_pci_shutdown_one(struct pci_dev *pdev)
1163 ++{
1164 ++ struct ata_host *host = pci_get_drvdata(pdev);
1165 ++ int i;
1166 ++
1167 ++ for (i = 0; i < host->n_ports; i++) {
1168 ++ struct ata_port *ap = host->ports[i];
1169 ++
1170 ++ ap->pflags |= ATA_PFLAG_FROZEN;
1171 ++
1172 ++ /* Disable port interrupts */
1173 ++ if (ap->ops->freeze)
1174 ++ ap->ops->freeze(ap);
1175 ++
1176 ++ /* Stop the port DMA engines */
1177 ++ if (ap->ops->port_stop)
1178 ++ ap->ops->port_stop(ap);
1179 ++ }
1180 ++}
1181 ++
1182 + /* move to PCI subsystem */
1183 + int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits)
1184 + {
1185 +@@ -7382,6 +7402,7 @@ EXPORT_SYMBOL_GPL(ata_timing_cycle2mode);
1186 +
1187 + #ifdef CONFIG_PCI
1188 + EXPORT_SYMBOL_GPL(pci_test_config_bits);
1189 ++EXPORT_SYMBOL_GPL(ata_pci_shutdown_one);
1190 + EXPORT_SYMBOL_GPL(ata_pci_remove_one);
1191 + #ifdef CONFIG_PM
1192 + EXPORT_SYMBOL_GPL(ata_pci_device_do_suspend);
1193 +diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
1194 +index 485865fd0412..f19a03b62365 100644
1195 +--- a/drivers/block/floppy.c
1196 ++++ b/drivers/block/floppy.c
1197 +@@ -853,14 +853,17 @@ static void reset_fdc_info(int mode)
1198 + /* selects the fdc and drive, and enables the fdc's input/dma. */
1199 + static void set_fdc(int drive)
1200 + {
1201 ++ unsigned int new_fdc = fdc;
1202 ++
1203 + if (drive >= 0 && drive < N_DRIVE) {
1204 +- fdc = FDC(drive);
1205 ++ new_fdc = FDC(drive);
1206 + current_drive = drive;
1207 + }
1208 +- if (fdc != 1 && fdc != 0) {
1209 ++ if (new_fdc >= N_FDC) {
1210 + pr_info("bad fdc value\n");
1211 + return;
1212 + }
1213 ++ fdc = new_fdc;
1214 + set_dor(fdc, ~0, 8);
1215 + #if N_FDC > 1
1216 + set_dor(1 - fdc, ~8, 0);
1217 +diff --git a/drivers/char/tpm/tpm2-cmd.c b/drivers/char/tpm/tpm2-cmd.c
1218 +index 5817dfe5c5d2..2f8026b71933 100644
1219 +--- a/drivers/char/tpm/tpm2-cmd.c
1220 ++++ b/drivers/char/tpm/tpm2-cmd.c
1221 +@@ -831,6 +831,8 @@ static int tpm2_init_bank_info(struct tpm_chip *chip, u32 bank_index)
1222 + return 0;
1223 + }
1224 +
1225 ++ bank->crypto_id = HASH_ALGO__LAST;
1226 ++
1227 + return tpm2_pcr_read(chip, 0, &digest, &bank->digest_size);
1228 + }
1229 +
1230 +diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c
1231 +index 66f1b2ac5cde..c27e206a764c 100644
1232 +--- a/drivers/dma/imx-sdma.c
1233 ++++ b/drivers/dma/imx-sdma.c
1234 +@@ -760,8 +760,12 @@ static void sdma_start_desc(struct sdma_channel *sdmac)
1235 + return;
1236 + }
1237 + sdmac->desc = desc = to_sdma_desc(&vd->tx);
1238 +-
1239 +- list_del(&vd->node);
1240 ++ /*
1241 ++ * Do not delete the node in desc_issued list in cyclic mode, otherwise
1242 ++ * the desc allocated will never be freed in vchan_dma_desc_free_list
1243 ++ */
1244 ++ if (!(sdmac->flags & IMX_DMA_SG_LOOP))
1245 ++ list_del(&vd->node);
1246 +
1247 + sdma->channel_control[channel].base_bd_ptr = desc->bd_phys;
1248 + sdma->channel_control[channel].current_bd_ptr = desc->bd_phys;
1249 +@@ -1067,6 +1071,7 @@ static void sdma_channel_terminate_work(struct work_struct *work)
1250 +
1251 + spin_lock_irqsave(&sdmac->vc.lock, flags);
1252 + vchan_get_all_descriptors(&sdmac->vc, &head);
1253 ++ sdmac->desc = NULL;
1254 + spin_unlock_irqrestore(&sdmac->vc.lock, flags);
1255 + vchan_dma_desc_free_list(&sdmac->vc, &head);
1256 + sdmac->context_loaded = false;
1257 +@@ -1075,19 +1080,11 @@ static void sdma_channel_terminate_work(struct work_struct *work)
1258 + static int sdma_disable_channel_async(struct dma_chan *chan)
1259 + {
1260 + struct sdma_channel *sdmac = to_sdma_chan(chan);
1261 +- unsigned long flags;
1262 +-
1263 +- spin_lock_irqsave(&sdmac->vc.lock, flags);
1264 +
1265 + sdma_disable_channel(chan);
1266 +
1267 +- if (sdmac->desc) {
1268 +- vchan_terminate_vdesc(&sdmac->desc->vd);
1269 +- sdmac->desc = NULL;
1270 ++ if (sdmac->desc)
1271 + schedule_work(&sdmac->terminate_worker);
1272 +- }
1273 +-
1274 +- spin_unlock_irqrestore(&sdmac->vc.lock, flags);
1275 +
1276 + return 0;
1277 + }
1278 +diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
1279 +index 596722e79a26..2816d0329738 100644
1280 +--- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
1281 ++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
1282 +@@ -3977,11 +3977,13 @@ static uint64_t gfx_v10_0_get_gpu_clock_counter(struct amdgpu_device *adev)
1283 + {
1284 + uint64_t clock;
1285 +
1286 ++ amdgpu_gfx_off_ctrl(adev, false);
1287 + mutex_lock(&adev->gfx.gpu_clock_mutex);
1288 + WREG32_SOC15(GC, 0, mmRLC_CAPTURE_GPU_CLOCK_COUNT, 1);
1289 + clock = (uint64_t)RREG32_SOC15(GC, 0, mmRLC_GPU_CLOCK_COUNT_LSB) |
1290 + ((uint64_t)RREG32_SOC15(GC, 0, mmRLC_GPU_CLOCK_COUNT_MSB) << 32ULL);
1291 + mutex_unlock(&adev->gfx.gpu_clock_mutex);
1292 ++ amdgpu_gfx_off_ctrl(adev, true);
1293 + return clock;
1294 + }
1295 +
1296 +diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
1297 +index 0125ea7c4103..d85e1e559c82 100644
1298 +--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
1299 ++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
1300 +@@ -4080,11 +4080,13 @@ static uint64_t gfx_v9_0_get_gpu_clock_counter(struct amdgpu_device *adev)
1301 + {
1302 + uint64_t clock;
1303 +
1304 ++ amdgpu_gfx_off_ctrl(adev, false);
1305 + mutex_lock(&adev->gfx.gpu_clock_mutex);
1306 + WREG32_SOC15(GC, 0, mmRLC_CAPTURE_GPU_CLOCK_COUNT, 1);
1307 + clock = (uint64_t)RREG32_SOC15(GC, 0, mmRLC_GPU_CLOCK_COUNT_LSB) |
1308 + ((uint64_t)RREG32_SOC15(GC, 0, mmRLC_GPU_CLOCK_COUNT_MSB) << 32ULL);
1309 + mutex_unlock(&adev->gfx.gpu_clock_mutex);
1310 ++ amdgpu_gfx_off_ctrl(adev, true);
1311 + return clock;
1312 + }
1313 +
1314 +diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
1315 +index 4ccfcdf8f16a..80934ca17260 100644
1316 +--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
1317 ++++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
1318 +@@ -267,7 +267,12 @@ static u32 soc15_get_config_memsize(struct amdgpu_device *adev)
1319 +
1320 + static u32 soc15_get_xclk(struct amdgpu_device *adev)
1321 + {
1322 +- return adev->clock.spll.reference_freq;
1323 ++ u32 reference_clock = adev->clock.spll.reference_freq;
1324 ++
1325 ++ if (adev->asic_type == CHIP_RAVEN)
1326 ++ return reference_clock / 4;
1327 ++
1328 ++ return reference_clock;
1329 + }
1330 +
1331 +
1332 +diff --git a/drivers/gpu/drm/bridge/tc358767.c b/drivers/gpu/drm/bridge/tc358767.c
1333 +index 8a8d605021f0..0454675a44cb 100644
1334 +--- a/drivers/gpu/drm/bridge/tc358767.c
1335 ++++ b/drivers/gpu/drm/bridge/tc358767.c
1336 +@@ -294,7 +294,7 @@ static inline int tc_poll_timeout(struct tc_data *tc, unsigned int addr,
1337 +
1338 + static int tc_aux_wait_busy(struct tc_data *tc)
1339 + {
1340 +- return tc_poll_timeout(tc, DP0_AUXSTATUS, AUX_BUSY, 0, 1000, 100000);
1341 ++ return tc_poll_timeout(tc, DP0_AUXSTATUS, AUX_BUSY, 0, 100, 100000);
1342 + }
1343 +
1344 + static int tc_aux_write_data(struct tc_data *tc, const void *data,
1345 +@@ -637,7 +637,7 @@ static int tc_aux_link_setup(struct tc_data *tc)
1346 + if (ret)
1347 + goto err;
1348 +
1349 +- ret = tc_poll_timeout(tc, DP_PHY_CTRL, PHY_RDY, PHY_RDY, 1, 1000);
1350 ++ ret = tc_poll_timeout(tc, DP_PHY_CTRL, PHY_RDY, PHY_RDY, 100, 100000);
1351 + if (ret == -ETIMEDOUT) {
1352 + dev_err(tc->dev, "Timeout waiting for PHY to become ready");
1353 + return ret;
1354 +@@ -861,7 +861,7 @@ static int tc_wait_link_training(struct tc_data *tc)
1355 + int ret;
1356 +
1357 + ret = tc_poll_timeout(tc, DP0_LTSTAT, LT_LOOPDONE,
1358 +- LT_LOOPDONE, 1, 1000);
1359 ++ LT_LOOPDONE, 500, 100000);
1360 + if (ret) {
1361 + dev_err(tc->dev, "Link training timeout waiting for LT_LOOPDONE!\n");
1362 + return ret;
1363 +@@ -934,7 +934,7 @@ static int tc_main_link_enable(struct tc_data *tc)
1364 + dp_phy_ctrl &= ~(DP_PHY_RST | PHY_M1_RST | PHY_M0_RST);
1365 + ret = regmap_write(tc->regmap, DP_PHY_CTRL, dp_phy_ctrl);
1366 +
1367 +- ret = tc_poll_timeout(tc, DP_PHY_CTRL, PHY_RDY, PHY_RDY, 1, 1000);
1368 ++ ret = tc_poll_timeout(tc, DP_PHY_CTRL, PHY_RDY, PHY_RDY, 500, 100000);
1369 + if (ret) {
1370 + dev_err(dev, "timeout waiting for phy become ready");
1371 + return ret;
1372 +diff --git a/drivers/gpu/drm/i915/Kconfig b/drivers/gpu/drm/i915/Kconfig
1373 +index 0d21402945ab..3317798945e8 100644
1374 +--- a/drivers/gpu/drm/i915/Kconfig
1375 ++++ b/drivers/gpu/drm/i915/Kconfig
1376 +@@ -75,9 +75,8 @@ config DRM_I915_CAPTURE_ERROR
1377 + help
1378 + This option enables capturing the GPU state when a hang is detected.
1379 + This information is vital for triaging hangs and assists in debugging.
1380 +- Please report any hang to
1381 +- https://bugs.freedesktop.org/enter_bug.cgi?product=DRI
1382 +- for triaging.
1383 ++ Please report any hang for triaging according to:
1384 ++ https://gitlab.freedesktop.org/drm/intel/-/wikis/How-to-file-i915-bugs
1385 +
1386 + If in doubt, say "Y".
1387 +
1388 +diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c
1389 +index af50f05f4e9d..272503615378 100644
1390 +--- a/drivers/gpu/drm/i915/display/intel_display.c
1391 ++++ b/drivers/gpu/drm/i915/display/intel_display.c
1392 +@@ -10510,7 +10510,7 @@ static u32 intel_cursor_base(const struct intel_plane_state *plane_state)
1393 + u32 base;
1394 +
1395 + if (INTEL_INFO(dev_priv)->display.cursor_needs_physical)
1396 +- base = obj->phys_handle->busaddr;
1397 ++ base = sg_dma_address(obj->mm.pages->sgl);
1398 + else
1399 + base = intel_plane_ggtt_offset(plane_state);
1400 +
1401 +diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
1402 +index 646859fea224..08b35587bc6d 100644
1403 +--- a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
1404 ++++ b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
1405 +@@ -240,9 +240,6 @@ struct drm_i915_gem_object {
1406 +
1407 + void *gvt_info;
1408 + };
1409 +-
1410 +- /** for phys allocated objects */
1411 +- struct drm_dma_handle *phys_handle;
1412 + };
1413 +
1414 + static inline struct drm_i915_gem_object *
1415 +diff --git a/drivers/gpu/drm/i915/gem/i915_gem_phys.c b/drivers/gpu/drm/i915/gem/i915_gem_phys.c
1416 +index 768356908160..0cfe9bd76377 100644
1417 +--- a/drivers/gpu/drm/i915/gem/i915_gem_phys.c
1418 ++++ b/drivers/gpu/drm/i915/gem/i915_gem_phys.c
1419 +@@ -21,88 +21,87 @@
1420 + static int i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
1421 + {
1422 + struct address_space *mapping = obj->base.filp->f_mapping;
1423 +- struct drm_dma_handle *phys;
1424 +- struct sg_table *st;
1425 + struct scatterlist *sg;
1426 +- char *vaddr;
1427 ++ struct sg_table *st;
1428 ++ dma_addr_t dma;
1429 ++ void *vaddr;
1430 ++ void *dst;
1431 + int i;
1432 +- int err;
1433 +
1434 + if (WARN_ON(i915_gem_object_needs_bit17_swizzle(obj)))
1435 + return -EINVAL;
1436 +
1437 +- /* Always aligning to the object size, allows a single allocation
1438 ++ /*
1439 ++ * Always aligning to the object size, allows a single allocation
1440 + * to handle all possible callers, and given typical object sizes,
1441 + * the alignment of the buddy allocation will naturally match.
1442 + */
1443 +- phys = drm_pci_alloc(obj->base.dev,
1444 +- roundup_pow_of_two(obj->base.size),
1445 +- roundup_pow_of_two(obj->base.size));
1446 +- if (!phys)
1447 ++ vaddr = dma_alloc_coherent(&obj->base.dev->pdev->dev,
1448 ++ roundup_pow_of_two(obj->base.size),
1449 ++ &dma, GFP_KERNEL);
1450 ++ if (!vaddr)
1451 + return -ENOMEM;
1452 +
1453 +- vaddr = phys->vaddr;
1454 ++ st = kmalloc(sizeof(*st), GFP_KERNEL);
1455 ++ if (!st)
1456 ++ goto err_pci;
1457 ++
1458 ++ if (sg_alloc_table(st, 1, GFP_KERNEL))
1459 ++ goto err_st;
1460 ++
1461 ++ sg = st->sgl;
1462 ++ sg->offset = 0;
1463 ++ sg->length = obj->base.size;
1464 ++
1465 ++ sg_assign_page(sg, (struct page *)vaddr);
1466 ++ sg_dma_address(sg) = dma;
1467 ++ sg_dma_len(sg) = obj->base.size;
1468 ++
1469 ++ dst = vaddr;
1470 + for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
1471 + struct page *page;
1472 +- char *src;
1473 ++ void *src;
1474 +
1475 + page = shmem_read_mapping_page(mapping, i);
1476 +- if (IS_ERR(page)) {
1477 +- err = PTR_ERR(page);
1478 +- goto err_phys;
1479 +- }
1480 ++ if (IS_ERR(page))
1481 ++ goto err_st;
1482 +
1483 + src = kmap_atomic(page);
1484 +- memcpy(vaddr, src, PAGE_SIZE);
1485 +- drm_clflush_virt_range(vaddr, PAGE_SIZE);
1486 ++ memcpy(dst, src, PAGE_SIZE);
1487 ++ drm_clflush_virt_range(dst, PAGE_SIZE);
1488 + kunmap_atomic(src);
1489 +
1490 + put_page(page);
1491 +- vaddr += PAGE_SIZE;
1492 ++ dst += PAGE_SIZE;
1493 + }
1494 +
1495 + intel_gt_chipset_flush(&to_i915(obj->base.dev)->gt);
1496 +
1497 +- st = kmalloc(sizeof(*st), GFP_KERNEL);
1498 +- if (!st) {
1499 +- err = -ENOMEM;
1500 +- goto err_phys;
1501 +- }
1502 +-
1503 +- if (sg_alloc_table(st, 1, GFP_KERNEL)) {
1504 +- kfree(st);
1505 +- err = -ENOMEM;
1506 +- goto err_phys;
1507 +- }
1508 +-
1509 +- sg = st->sgl;
1510 +- sg->offset = 0;
1511 +- sg->length = obj->base.size;
1512 +-
1513 +- sg_dma_address(sg) = phys->busaddr;
1514 +- sg_dma_len(sg) = obj->base.size;
1515 +-
1516 +- obj->phys_handle = phys;
1517 +-
1518 + __i915_gem_object_set_pages(obj, st, sg->length);
1519 +
1520 + return 0;
1521 +
1522 +-err_phys:
1523 +- drm_pci_free(obj->base.dev, phys);
1524 +-
1525 +- return err;
1526 ++err_st:
1527 ++ kfree(st);
1528 ++err_pci:
1529 ++ dma_free_coherent(&obj->base.dev->pdev->dev,
1530 ++ roundup_pow_of_two(obj->base.size),
1531 ++ vaddr, dma);
1532 ++ return -ENOMEM;
1533 + }
1534 +
1535 + static void
1536 + i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj,
1537 + struct sg_table *pages)
1538 + {
1539 ++ dma_addr_t dma = sg_dma_address(pages->sgl);
1540 ++ void *vaddr = sg_page(pages->sgl);
1541 ++
1542 + __i915_gem_object_release_shmem(obj, pages, false);
1543 +
1544 + if (obj->mm.dirty) {
1545 + struct address_space *mapping = obj->base.filp->f_mapping;
1546 +- char *vaddr = obj->phys_handle->vaddr;
1547 ++ void *src = vaddr;
1548 + int i;
1549 +
1550 + for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
1551 +@@ -114,15 +113,16 @@ i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj,
1552 + continue;
1553 +
1554 + dst = kmap_atomic(page);
1555 +- drm_clflush_virt_range(vaddr, PAGE_SIZE);
1556 +- memcpy(dst, vaddr, PAGE_SIZE);
1557 ++ drm_clflush_virt_range(src, PAGE_SIZE);
1558 ++ memcpy(dst, src, PAGE_SIZE);
1559 + kunmap_atomic(dst);
1560 +
1561 + set_page_dirty(page);
1562 + if (obj->mm.madv == I915_MADV_WILLNEED)
1563 + mark_page_accessed(page);
1564 + put_page(page);
1565 +- vaddr += PAGE_SIZE;
1566 ++
1567 ++ src += PAGE_SIZE;
1568 + }
1569 + obj->mm.dirty = false;
1570 + }
1571 +@@ -130,7 +130,9 @@ i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj,
1572 + sg_free_table(pages);
1573 + kfree(pages);
1574 +
1575 +- drm_pci_free(obj->base.dev, obj->phys_handle);
1576 ++ dma_free_coherent(&obj->base.dev->pdev->dev,
1577 ++ roundup_pow_of_two(obj->base.size),
1578 ++ vaddr, dma);
1579 + }
1580 +
1581 + static void phys_release(struct drm_i915_gem_object *obj)
1582 +diff --git a/drivers/gpu/drm/i915/gt/intel_engine.h b/drivers/gpu/drm/i915/gt/intel_engine.h
1583 +index 22aab8593abf..926272b5a0ca 100644
1584 +--- a/drivers/gpu/drm/i915/gt/intel_engine.h
1585 ++++ b/drivers/gpu/drm/i915/gt/intel_engine.h
1586 +@@ -250,6 +250,14 @@ static inline u32 intel_ring_wrap(const struct intel_ring *ring, u32 pos)
1587 + return pos & (ring->size - 1);
1588 + }
1589 +
1590 ++static inline int intel_ring_direction(const struct intel_ring *ring,
1591 ++ u32 next, u32 prev)
1592 ++{
1593 ++ typecheck(typeof(ring->size), next);
1594 ++ typecheck(typeof(ring->size), prev);
1595 ++ return (next - prev) << ring->wrap;
1596 ++}
1597 ++
1598 + static inline bool
1599 + intel_ring_offset_valid(const struct intel_ring *ring,
1600 + unsigned int pos)
1601 +diff --git a/drivers/gpu/drm/i915/gt/intel_engine_types.h b/drivers/gpu/drm/i915/gt/intel_engine_types.h
1602 +index 798e1b024406..c77c9518c58b 100644
1603 +--- a/drivers/gpu/drm/i915/gt/intel_engine_types.h
1604 ++++ b/drivers/gpu/drm/i915/gt/intel_engine_types.h
1605 +@@ -107,6 +107,7 @@ struct intel_ring {
1606 +
1607 + u32 space;
1608 + u32 size;
1609 ++ u32 wrap;
1610 + u32 effective_size;
1611 + };
1612 +
1613 +diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c
1614 +index 4949b5ad860f..66f6d1a897f2 100644
1615 +--- a/drivers/gpu/drm/i915/gt/intel_lrc.c
1616 ++++ b/drivers/gpu/drm/i915/gt/intel_lrc.c
1617 +@@ -471,12 +471,6 @@ lrc_descriptor(struct intel_context *ce, struct intel_engine_cs *engine)
1618 + return desc;
1619 + }
1620 +
1621 +-static void unwind_wa_tail(struct i915_request *rq)
1622 +-{
1623 +- rq->tail = intel_ring_wrap(rq->ring, rq->wa_tail - WA_TAIL_BYTES);
1624 +- assert_ring_tail_valid(rq->ring, rq->tail);
1625 +-}
1626 +-
1627 + static struct i915_request *
1628 + __unwind_incomplete_requests(struct intel_engine_cs *engine)
1629 + {
1630 +@@ -495,7 +489,6 @@ __unwind_incomplete_requests(struct intel_engine_cs *engine)
1631 + continue; /* XXX */
1632 +
1633 + __i915_request_unsubmit(rq);
1634 +- unwind_wa_tail(rq);
1635 +
1636 + /*
1637 + * Push the request back into the queue for later resubmission.
1638 +@@ -650,13 +643,35 @@ execlists_schedule_out(struct i915_request *rq)
1639 + i915_request_put(rq);
1640 + }
1641 +
1642 +-static u64 execlists_update_context(const struct i915_request *rq)
1643 ++static u64 execlists_update_context(struct i915_request *rq)
1644 + {
1645 + struct intel_context *ce = rq->hw_context;
1646 +- u64 desc;
1647 ++ u64 desc = ce->lrc_desc;
1648 ++ u32 tail, prev;
1649 +
1650 +- ce->lrc_reg_state[CTX_RING_TAIL + 1] =
1651 +- intel_ring_set_tail(rq->ring, rq->tail);
1652 ++ /*
1653 ++ * WaIdleLiteRestore:bdw,skl
1654 ++ *
1655 ++ * We should never submit the context with the same RING_TAIL twice
1656 ++ * just in case we submit an empty ring, which confuses the HW.
1657 ++ *
1658 ++ * We append a couple of NOOPs (gen8_emit_wa_tail) after the end of
1659 ++ * the normal request to be able to always advance the RING_TAIL on
1660 ++ * subsequent resubmissions (for lite restore). Should that fail us,
1661 ++ * and we try and submit the same tail again, force the context
1662 ++ * reload.
1663 ++ *
1664 ++ * If we need to return to a preempted context, we need to skip the
1665 ++ * lite-restore and force it to reload the RING_TAIL. Otherwise, the
1666 ++ * HW has a tendency to ignore us rewinding the TAIL to the end of
1667 ++ * an earlier request.
1668 ++ */
1669 ++ tail = intel_ring_set_tail(rq->ring, rq->tail);
1670 ++ prev = ce->lrc_reg_state[CTX_RING_TAIL + 1];
1671 ++ if (unlikely(intel_ring_direction(rq->ring, tail, prev) <= 0))
1672 ++ desc |= CTX_DESC_FORCE_RESTORE;
1673 ++ ce->lrc_reg_state[CTX_RING_TAIL + 1] = tail;
1674 ++ rq->tail = rq->wa_tail;
1675 +
1676 + /*
1677 + * Make sure the context image is complete before we submit it to HW.
1678 +@@ -675,7 +690,6 @@ static u64 execlists_update_context(const struct i915_request *rq)
1679 + */
1680 + mb();
1681 +
1682 +- desc = ce->lrc_desc;
1683 + ce->lrc_desc &= ~CTX_DESC_FORCE_RESTORE;
1684 +
1685 + return desc;
1686 +@@ -919,6 +933,11 @@ last_active(const struct intel_engine_execlists *execlists)
1687 + return *last;
1688 + }
1689 +
1690 ++#define for_each_waiter(p__, rq__) \
1691 ++ list_for_each_entry_lockless(p__, \
1692 ++ &(rq__)->sched.waiters_list, \
1693 ++ wait_link)
1694 ++
1695 + static void defer_request(struct i915_request *rq, struct list_head * const pl)
1696 + {
1697 + LIST_HEAD(list);
1698 +@@ -936,7 +955,7 @@ static void defer_request(struct i915_request *rq, struct list_head * const pl)
1699 + GEM_BUG_ON(i915_request_is_active(rq));
1700 + list_move_tail(&rq->sched.link, pl);
1701 +
1702 +- list_for_each_entry(p, &rq->sched.waiters_list, wait_link) {
1703 ++ for_each_waiter(p, rq) {
1704 + struct i915_request *w =
1705 + container_of(p->waiter, typeof(*w), sched);
1706 +
1707 +@@ -1102,14 +1121,6 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
1708 + */
1709 + __unwind_incomplete_requests(engine);
1710 +
1711 +- /*
1712 +- * If we need to return to the preempted context, we
1713 +- * need to skip the lite-restore and force it to
1714 +- * reload the RING_TAIL. Otherwise, the HW has a
1715 +- * tendency to ignore us rewinding the TAIL to the
1716 +- * end of an earlier request.
1717 +- */
1718 +- last->hw_context->lrc_desc |= CTX_DESC_FORCE_RESTORE;
1719 + last = NULL;
1720 + } else if (need_timeslice(engine, last) &&
1721 + !timer_pending(&engine->execlists.timer)) {
1722 +@@ -1150,16 +1161,6 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
1723 + if (!list_is_last(&last->sched.link,
1724 + &engine->active.requests))
1725 + return;
1726 +-
1727 +- /*
1728 +- * WaIdleLiteRestore:bdw,skl
1729 +- * Apply the wa NOOPs to prevent
1730 +- * ring:HEAD == rq:TAIL as we resubmit the
1731 +- * request. See gen8_emit_fini_breadcrumb() for
1732 +- * where we prepare the padding after the
1733 +- * end of the request.
1734 +- */
1735 +- last->tail = last->wa_tail;
1736 + }
1737 + }
1738 +
1739 +diff --git a/drivers/gpu/drm/i915/gt/intel_ringbuffer.c b/drivers/gpu/drm/i915/gt/intel_ringbuffer.c
1740 +index bacaa7bb8c9a..eee9fcbe0434 100644
1741 +--- a/drivers/gpu/drm/i915/gt/intel_ringbuffer.c
1742 ++++ b/drivers/gpu/drm/i915/gt/intel_ringbuffer.c
1743 +@@ -1312,6 +1312,8 @@ intel_engine_create_ring(struct intel_engine_cs *engine, int size)
1744 + kref_init(&ring->ref);
1745 +
1746 + ring->size = size;
1747 ++ ring->wrap = BITS_PER_TYPE(ring->size) - ilog2(size);
1748 ++
1749 + /* Workaround an erratum on the i830 which causes a hang if
1750 + * the TAIL pointer points to within the last 2 cachelines
1751 + * of the buffer.
1752 +diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c
1753 +index 4b04af569c05..7dc7bb850d0a 100644
1754 +--- a/drivers/gpu/drm/i915/gvt/gtt.c
1755 ++++ b/drivers/gpu/drm/i915/gvt/gtt.c
1756 +@@ -1956,7 +1956,11 @@ void _intel_vgpu_mm_release(struct kref *mm_ref)
1757 +
1758 + if (mm->type == INTEL_GVT_MM_PPGTT) {
1759 + list_del(&mm->ppgtt_mm.list);
1760 ++
1761 ++ mutex_lock(&mm->vgpu->gvt->gtt.ppgtt_mm_lock);
1762 + list_del(&mm->ppgtt_mm.lru_list);
1763 ++ mutex_unlock(&mm->vgpu->gvt->gtt.ppgtt_mm_lock);
1764 ++
1765 + invalidate_ppgtt_mm(mm);
1766 + } else {
1767 + vfree(mm->ggtt_mm.virtual_ggtt);
1768 +diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
1769 +index 98305d987ac1..4d561da3dcea 100644
1770 +--- a/drivers/gpu/drm/i915/i915_gem.c
1771 ++++ b/drivers/gpu/drm/i915/i915_gem.c
1772 +@@ -136,7 +136,7 @@ i915_gem_phys_pwrite(struct drm_i915_gem_object *obj,
1773 + struct drm_i915_gem_pwrite *args,
1774 + struct drm_file *file)
1775 + {
1776 +- void *vaddr = obj->phys_handle->vaddr + args->offset;
1777 ++ void *vaddr = sg_page(obj->mm.pages->sgl) + args->offset;
1778 + char __user *user_data = u64_to_user_ptr(args->data_ptr);
1779 +
1780 + /*
1781 +@@ -802,10 +802,10 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
1782 + ret = i915_gem_gtt_pwrite_fast(obj, args);
1783 +
1784 + if (ret == -EFAULT || ret == -ENOSPC) {
1785 +- if (obj->phys_handle)
1786 +- ret = i915_gem_phys_pwrite(obj, args, file);
1787 +- else
1788 ++ if (i915_gem_object_has_struct_page(obj))
1789 + ret = i915_gem_shmem_pwrite(obj, args);
1790 ++ else
1791 ++ ret = i915_gem_phys_pwrite(obj, args, file);
1792 + }
1793 +
1794 + i915_gem_object_unpin_pages(obj);
1795 +diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
1796 +index e284bd76fa86..fe9edbba997c 100644
1797 +--- a/drivers/gpu/drm/i915/i915_gpu_error.c
1798 ++++ b/drivers/gpu/drm/i915/i915_gpu_error.c
1799 +@@ -1768,7 +1768,8 @@ void i915_capture_error_state(struct drm_i915_private *i915,
1800 + if (!xchg(&warned, true) &&
1801 + ktime_get_real_seconds() - DRIVER_TIMESTAMP < DAY_AS_SECONDS(180)) {
1802 + pr_info("GPU hangs can indicate a bug anywhere in the entire gfx stack, including userspace.\n");
1803 +- pr_info("Please file a _new_ bug report on bugs.freedesktop.org against DRI -> DRM/Intel\n");
1804 ++ pr_info("Please file a _new_ bug report at https://gitlab.freedesktop.org/drm/intel/issues/new.\n");
1805 ++ pr_info("Please see https://gitlab.freedesktop.org/drm/intel/-/wikis/How-to-file-i915-bugs for details.\n");
1806 + pr_info("drm/i915 developers can then reassign to the right component if it's not a kernel issue.\n");
1807 + pr_info("The GPU crash dump is required to analyze GPU hangs, so please always attach it.\n");
1808 + pr_info("GPU crash dump saved to /sys/class/drm/card%d/error\n",
1809 +diff --git a/drivers/gpu/drm/i915/i915_scheduler.c b/drivers/gpu/drm/i915/i915_scheduler.c
1810 +index 3eba8a2b39c2..0ef205fe5e29 100644
1811 +--- a/drivers/gpu/drm/i915/i915_scheduler.c
1812 ++++ b/drivers/gpu/drm/i915/i915_scheduler.c
1813 +@@ -418,8 +418,6 @@ bool __i915_sched_node_add_dependency(struct i915_sched_node *node,
1814 +
1815 + if (!node_signaled(signal)) {
1816 + INIT_LIST_HEAD(&dep->dfs_link);
1817 +- list_add(&dep->wait_link, &signal->waiters_list);
1818 +- list_add(&dep->signal_link, &node->signalers_list);
1819 + dep->signaler = signal;
1820 + dep->waiter = node;
1821 + dep->flags = flags;
1822 +@@ -429,6 +427,10 @@ bool __i915_sched_node_add_dependency(struct i915_sched_node *node,
1823 + !node_started(signal))
1824 + node->flags |= I915_SCHED_HAS_SEMAPHORE_CHAIN;
1825 +
1826 ++ /* All set, now publish. Beware the lockless walkers. */
1827 ++ list_add(&dep->signal_link, &node->signalers_list);
1828 ++ list_add_rcu(&dep->wait_link, &signal->waiters_list);
1829 ++
1830 + /*
1831 + * As we do not allow WAIT to preempt inflight requests,
1832 + * once we have executed a request, along with triggering
1833 +diff --git a/drivers/gpu/drm/i915/i915_utils.c b/drivers/gpu/drm/i915/i915_utils.c
1834 +index 16acdf7bdbe6..17cfeef35a24 100644
1835 +--- a/drivers/gpu/drm/i915/i915_utils.c
1836 ++++ b/drivers/gpu/drm/i915/i915_utils.c
1837 +@@ -8,9 +8,8 @@
1838 + #include "i915_drv.h"
1839 + #include "i915_utils.h"
1840 +
1841 +-#define FDO_BUG_URL "https://bugs.freedesktop.org/enter_bug.cgi?product=DRI"
1842 +-#define FDO_BUG_MSG "Please file a bug at " FDO_BUG_URL " against DRM/Intel " \
1843 +- "providing the dmesg log by booting with drm.debug=0xf"
1844 ++#define FDO_BUG_URL "https://gitlab.freedesktop.org/drm/intel/-/wikis/How-to-file-i915-bugs"
1845 ++#define FDO_BUG_MSG "Please file a bug on drm/i915; see " FDO_BUG_URL " for details."
1846 +
1847 + void
1848 + __i915_printk(struct drm_i915_private *dev_priv, const char *level,
1849 +diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c
1850 +index 24ab6249083a..6f420cc73dbd 100644
1851 +--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c
1852 ++++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c
1853 +@@ -255,13 +255,13 @@ static const struct dpu_format dpu_format_map[] = {
1854 +
1855 + INTERLEAVED_RGB_FMT(RGB565,
1856 + 0, COLOR_5BIT, COLOR_6BIT, COLOR_5BIT,
1857 +- C2_R_Cr, C0_G_Y, C1_B_Cb, 0, 3,
1858 ++ C1_B_Cb, C0_G_Y, C2_R_Cr, 0, 3,
1859 + false, 2, 0,
1860 + DPU_FETCH_LINEAR, 1),
1861 +
1862 + INTERLEAVED_RGB_FMT(BGR565,
1863 + 0, COLOR_5BIT, COLOR_6BIT, COLOR_5BIT,
1864 +- C1_B_Cb, C0_G_Y, C2_R_Cr, 0, 3,
1865 ++ C2_R_Cr, C0_G_Y, C1_B_Cb, 0, 3,
1866 + false, 2, 0,
1867 + DPU_FETCH_LINEAR, 1),
1868 +
1869 +diff --git a/drivers/gpu/drm/nouveau/dispnv50/wndw.c b/drivers/gpu/drm/nouveau/dispnv50/wndw.c
1870 +index 5193b6257061..b856e87574fd 100644
1871 +--- a/drivers/gpu/drm/nouveau/dispnv50/wndw.c
1872 ++++ b/drivers/gpu/drm/nouveau/dispnv50/wndw.c
1873 +@@ -451,6 +451,8 @@ nv50_wndw_atomic_check(struct drm_plane *plane, struct drm_plane_state *state)
1874 + asyw->clr.ntfy = armw->ntfy.handle != 0;
1875 + asyw->clr.sema = armw->sema.handle != 0;
1876 + asyw->clr.xlut = armw->xlut.handle != 0;
1877 ++ if (asyw->clr.xlut && asyw->visible)
1878 ++ asyw->set.xlut = asyw->xlut.handle != 0;
1879 + asyw->clr.csc = armw->csc.valid;
1880 + if (wndw->func->image_clr)
1881 + asyw->clr.image = armw->image.handle[0] != 0;
1882 +diff --git a/drivers/gpu/drm/panfrost/panfrost_mmu.c b/drivers/gpu/drm/panfrost/panfrost_mmu.c
1883 +index 763cfca886a7..3107b0738e40 100644
1884 +--- a/drivers/gpu/drm/panfrost/panfrost_mmu.c
1885 ++++ b/drivers/gpu/drm/panfrost/panfrost_mmu.c
1886 +@@ -151,7 +151,12 @@ u32 panfrost_mmu_as_get(struct panfrost_device *pfdev, struct panfrost_mmu *mmu)
1887 + as = mmu->as;
1888 + if (as >= 0) {
1889 + int en = atomic_inc_return(&mmu->as_count);
1890 +- WARN_ON(en >= NUM_JOB_SLOTS);
1891 ++
1892 ++ /*
1893 ++ * AS can be retained by active jobs or a perfcnt context,
1894 ++ * hence the '+ 1' here.
1895 ++ */
1896 ++ WARN_ON(en >= (NUM_JOB_SLOTS + 1));
1897 +
1898 + list_move(&mmu->list, &pfdev->as_lru_list);
1899 + goto out;
1900 +diff --git a/drivers/gpu/drm/panfrost/panfrost_perfcnt.c b/drivers/gpu/drm/panfrost/panfrost_perfcnt.c
1901 +index 684820448be3..6913578d5aa7 100644
1902 +--- a/drivers/gpu/drm/panfrost/panfrost_perfcnt.c
1903 ++++ b/drivers/gpu/drm/panfrost/panfrost_perfcnt.c
1904 +@@ -73,7 +73,7 @@ static int panfrost_perfcnt_enable_locked(struct panfrost_device *pfdev,
1905 + struct panfrost_file_priv *user = file_priv->driver_priv;
1906 + struct panfrost_perfcnt *perfcnt = pfdev->perfcnt;
1907 + struct drm_gem_shmem_object *bo;
1908 +- u32 cfg;
1909 ++ u32 cfg, as;
1910 + int ret;
1911 +
1912 + if (user == perfcnt->user)
1913 +@@ -126,12 +126,8 @@ static int panfrost_perfcnt_enable_locked(struct panfrost_device *pfdev,
1914 +
1915 + perfcnt->user = user;
1916 +
1917 +- /*
1918 +- * Always use address space 0 for now.
1919 +- * FIXME: this needs to be updated when we start using different
1920 +- * address space.
1921 +- */
1922 +- cfg = GPU_PERFCNT_CFG_AS(0) |
1923 ++ as = panfrost_mmu_as_get(pfdev, perfcnt->mapping->mmu);
1924 ++ cfg = GPU_PERFCNT_CFG_AS(as) |
1925 + GPU_PERFCNT_CFG_MODE(GPU_PERFCNT_CFG_MODE_MANUAL);
1926 +
1927 + /*
1928 +@@ -195,6 +191,7 @@ static int panfrost_perfcnt_disable_locked(struct panfrost_device *pfdev,
1929 + drm_gem_shmem_vunmap(&perfcnt->mapping->obj->base.base, perfcnt->buf);
1930 + perfcnt->buf = NULL;
1931 + panfrost_gem_close(&perfcnt->mapping->obj->base.base, file_priv);
1932 ++ panfrost_mmu_as_put(pfdev, perfcnt->mapping->mmu);
1933 + panfrost_gem_mapping_put(perfcnt->mapping);
1934 + perfcnt->mapping = NULL;
1935 + pm_runtime_mark_last_busy(pfdev->dev);
1936 +diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
1937 +index b273e421e910..a1a035270cab 100644
1938 +--- a/drivers/infiniband/ulp/isert/ib_isert.c
1939 ++++ b/drivers/infiniband/ulp/isert/ib_isert.c
1940 +@@ -2575,6 +2575,17 @@ isert_wait4logout(struct isert_conn *isert_conn)
1941 + }
1942 + }
1943 +
1944 ++static void
1945 ++isert_wait4cmds(struct iscsi_conn *conn)
1946 ++{
1947 ++ isert_info("iscsi_conn %p\n", conn);
1948 ++
1949 ++ if (conn->sess) {
1950 ++ target_sess_cmd_list_set_waiting(conn->sess->se_sess);
1951 ++ target_wait_for_sess_cmds(conn->sess->se_sess);
1952 ++ }
1953 ++}
1954 ++
1955 + /**
1956 + * isert_put_unsol_pending_cmds() - Drop commands waiting for
1957 + * unsolicitate dataout
1958 +@@ -2622,6 +2633,7 @@ static void isert_wait_conn(struct iscsi_conn *conn)
1959 +
1960 + ib_drain_qp(isert_conn->qp);
1961 + isert_put_unsol_pending_cmds(conn);
1962 ++ isert_wait4cmds(conn);
1963 + isert_wait4logout(isert_conn);
1964 +
1965 + queue_work(isert_release_wq, &isert_conn->release_work);
1966 +diff --git a/drivers/iommu/qcom_iommu.c b/drivers/iommu/qcom_iommu.c
1967 +index c31e7bc4ccbe..e0b3fa2bb7ab 100644
1968 +--- a/drivers/iommu/qcom_iommu.c
1969 ++++ b/drivers/iommu/qcom_iommu.c
1970 +@@ -345,21 +345,19 @@ static void qcom_iommu_domain_free(struct iommu_domain *domain)
1971 + {
1972 + struct qcom_iommu_domain *qcom_domain = to_qcom_iommu_domain(domain);
1973 +
1974 +- if (WARN_ON(qcom_domain->iommu)) /* forgot to detach? */
1975 +- return;
1976 +-
1977 + iommu_put_dma_cookie(domain);
1978 +
1979 +- /* NOTE: unmap can be called after client device is powered off,
1980 +- * for example, with GPUs or anything involving dma-buf. So we
1981 +- * cannot rely on the device_link. Make sure the IOMMU is on to
1982 +- * avoid unclocked accesses in the TLB inv path:
1983 +- */
1984 +- pm_runtime_get_sync(qcom_domain->iommu->dev);
1985 +-
1986 +- free_io_pgtable_ops(qcom_domain->pgtbl_ops);
1987 +-
1988 +- pm_runtime_put_sync(qcom_domain->iommu->dev);
1989 ++ if (qcom_domain->iommu) {
1990 ++ /*
1991 ++ * NOTE: unmap can be called after client device is powered
1992 ++ * off, for example, with GPUs or anything involving dma-buf.
1993 ++ * So we cannot rely on the device_link. Make sure the IOMMU
1994 ++ * is on to avoid unclocked accesses in the TLB inv path:
1995 ++ */
1996 ++ pm_runtime_get_sync(qcom_domain->iommu->dev);
1997 ++ free_io_pgtable_ops(qcom_domain->pgtbl_ops);
1998 ++ pm_runtime_put_sync(qcom_domain->iommu->dev);
1999 ++ }
2000 +
2001 + kfree(qcom_domain);
2002 + }
2003 +@@ -405,7 +403,7 @@ static void qcom_iommu_detach_dev(struct iommu_domain *domain, struct device *de
2004 + struct qcom_iommu_domain *qcom_domain = to_qcom_iommu_domain(domain);
2005 + unsigned i;
2006 +
2007 +- if (!qcom_domain->iommu)
2008 ++ if (WARN_ON(!qcom_domain->iommu))
2009 + return;
2010 +
2011 + pm_runtime_get_sync(qcom_iommu->dev);
2012 +@@ -418,8 +416,6 @@ static void qcom_iommu_detach_dev(struct iommu_domain *domain, struct device *de
2013 + ctx->domain = NULL;
2014 + }
2015 + pm_runtime_put_sync(qcom_iommu->dev);
2016 +-
2017 +- qcom_domain->iommu = NULL;
2018 + }
2019 +
2020 + static int qcom_iommu_map(struct iommu_domain *domain, unsigned long iova,
2021 +diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
2022 +index c27ed7363768..8c4507838325 100644
2023 +--- a/drivers/net/ethernet/intel/e1000e/netdev.c
2024 ++++ b/drivers/net/ethernet/intel/e1000e/netdev.c
2025 +@@ -4713,12 +4713,12 @@ int e1000e_close(struct net_device *netdev)
2026 +
2027 + pm_runtime_get_sync(&pdev->dev);
2028 +
2029 +- if (!test_bit(__E1000_DOWN, &adapter->state)) {
2030 ++ if (netif_device_present(netdev)) {
2031 + e1000e_down(adapter, true);
2032 + e1000_free_irq(adapter);
2033 +
2034 + /* Link status message must follow this format */
2035 +- pr_info("%s NIC Link is Down\n", adapter->netdev->name);
2036 ++ pr_info("%s NIC Link is Down\n", netdev->name);
2037 + }
2038 +
2039 + napi_disable(&adapter->napi);
2040 +@@ -6309,10 +6309,14 @@ static int e1000e_pm_freeze(struct device *dev)
2041 + {
2042 + struct net_device *netdev = dev_get_drvdata(dev);
2043 + struct e1000_adapter *adapter = netdev_priv(netdev);
2044 ++ bool present;
2045 +
2046 ++ rtnl_lock();
2047 ++
2048 ++ present = netif_device_present(netdev);
2049 + netif_device_detach(netdev);
2050 +
2051 +- if (netif_running(netdev)) {
2052 ++ if (present && netif_running(netdev)) {
2053 + int count = E1000_CHECK_RESET_COUNT;
2054 +
2055 + while (test_bit(__E1000_RESETTING, &adapter->state) && count--)
2056 +@@ -6324,6 +6328,8 @@ static int e1000e_pm_freeze(struct device *dev)
2057 + e1000e_down(adapter, false);
2058 + e1000_free_irq(adapter);
2059 + }
2060 ++ rtnl_unlock();
2061 ++
2062 + e1000e_reset_interrupt_capability(adapter);
2063 +
2064 + /* Allow time for pending master requests to run */
2065 +@@ -6571,6 +6577,30 @@ static void e1000e_disable_aspm_locked(struct pci_dev *pdev, u16 state)
2066 + __e1000e_disable_aspm(pdev, state, 1);
2067 + }
2068 +
2069 ++static int e1000e_pm_thaw(struct device *dev)
2070 ++{
2071 ++ struct net_device *netdev = dev_get_drvdata(dev);
2072 ++ struct e1000_adapter *adapter = netdev_priv(netdev);
2073 ++ int rc = 0;
2074 ++
2075 ++ e1000e_set_interrupt_capability(adapter);
2076 ++
2077 ++ rtnl_lock();
2078 ++ if (netif_running(netdev)) {
2079 ++ rc = e1000_request_irq(adapter);
2080 ++ if (rc)
2081 ++ goto err_irq;
2082 ++
2083 ++ e1000e_up(adapter);
2084 ++ }
2085 ++
2086 ++ netif_device_attach(netdev);
2087 ++err_irq:
2088 ++ rtnl_unlock();
2089 ++
2090 ++ return rc;
2091 ++}
2092 ++
2093 + #ifdef CONFIG_PM
2094 + static int __e1000_resume(struct pci_dev *pdev)
2095 + {
2096 +@@ -6638,26 +6668,6 @@ static int __e1000_resume(struct pci_dev *pdev)
2097 + }
2098 +
2099 + #ifdef CONFIG_PM_SLEEP
2100 +-static int e1000e_pm_thaw(struct device *dev)
2101 +-{
2102 +- struct net_device *netdev = dev_get_drvdata(dev);
2103 +- struct e1000_adapter *adapter = netdev_priv(netdev);
2104 +-
2105 +- e1000e_set_interrupt_capability(adapter);
2106 +- if (netif_running(netdev)) {
2107 +- u32 err = e1000_request_irq(adapter);
2108 +-
2109 +- if (err)
2110 +- return err;
2111 +-
2112 +- e1000e_up(adapter);
2113 +- }
2114 +-
2115 +- netif_device_attach(netdev);
2116 +-
2117 +- return 0;
2118 +-}
2119 +-
2120 + static int e1000e_pm_suspend(struct device *dev)
2121 + {
2122 + struct pci_dev *pdev = to_pci_dev(dev);
2123 +@@ -6829,16 +6839,11 @@ static void e1000_netpoll(struct net_device *netdev)
2124 + static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev,
2125 + pci_channel_state_t state)
2126 + {
2127 +- struct net_device *netdev = pci_get_drvdata(pdev);
2128 +- struct e1000_adapter *adapter = netdev_priv(netdev);
2129 +-
2130 +- netif_device_detach(netdev);
2131 ++ e1000e_pm_freeze(&pdev->dev);
2132 +
2133 + if (state == pci_channel_io_perm_failure)
2134 + return PCI_ERS_RESULT_DISCONNECT;
2135 +
2136 +- if (netif_running(netdev))
2137 +- e1000e_down(adapter, true);
2138 + pci_disable_device(pdev);
2139 +
2140 + /* Request a slot slot reset. */
2141 +@@ -6904,10 +6909,7 @@ static void e1000_io_resume(struct pci_dev *pdev)
2142 +
2143 + e1000_init_manageability_pt(adapter);
2144 +
2145 +- if (netif_running(netdev))
2146 +- e1000e_up(adapter);
2147 +-
2148 +- netif_device_attach(netdev);
2149 ++ e1000e_pm_thaw(&pdev->dev);
2150 +
2151 + /* If the controller has AMT, do not set DRV_LOAD until the interface
2152 + * is up. For all other cases, let the f/w know that the h/w is now
2153 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/health.c b/drivers/net/ethernet/mellanox/mlx5/core/en/health.c
2154 +index 3a975641f902..20b907dc1e29 100644
2155 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en/health.c
2156 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/health.c
2157 +@@ -200,7 +200,7 @@ int mlx5e_health_report(struct mlx5e_priv *priv,
2158 + netdev_err(priv->netdev, err_str);
2159 +
2160 + if (!reporter)
2161 +- return err_ctx->recover(&err_ctx->ctx);
2162 ++ return err_ctx->recover(err_ctx->ctx);
2163 +
2164 + return devlink_health_report(reporter, err_str, err_ctx);
2165 + }
2166 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
2167 +index 7c8796d9743f..a226277b0980 100644
2168 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
2169 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
2170 +@@ -179,6 +179,14 @@ mlx5e_tx_dma_unmap(struct device *pdev, struct mlx5e_sq_dma *dma)
2171 + }
2172 + }
2173 +
2174 ++static inline void mlx5e_rqwq_reset(struct mlx5e_rq *rq)
2175 ++{
2176 ++ if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ)
2177 ++ mlx5_wq_ll_reset(&rq->mpwqe.wq);
2178 ++ else
2179 ++ mlx5_wq_cyc_reset(&rq->wqe.wq);
2180 ++}
2181 ++
2182 + /* SW parser related functions */
2183 +
2184 + struct mlx5e_swp_spec {
2185 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
2186 +index 29a5a8c894e3..e5e91cbcbc31 100644
2187 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
2188 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
2189 +@@ -723,6 +723,9 @@ int mlx5e_modify_rq_state(struct mlx5e_rq *rq, int curr_state, int next_state)
2190 + if (!in)
2191 + return -ENOMEM;
2192 +
2193 ++ if (curr_state == MLX5_RQC_STATE_RST && next_state == MLX5_RQC_STATE_RDY)
2194 ++ mlx5e_rqwq_reset(rq);
2195 ++
2196 + rqc = MLX5_ADDR_OF(modify_rq_in, in, ctx);
2197 +
2198 + MLX5_SET(modify_rq_in, in, rq_state, curr_state);
2199 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
2200 +index 60fddf8afc99..c6ed4b7f4f97 100644
2201 +--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
2202 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
2203 +@@ -2319,25 +2319,17 @@ out:
2204 +
2205 + int mlx5_eswitch_get_vepa(struct mlx5_eswitch *esw, u8 *setting)
2206 + {
2207 +- int err = 0;
2208 +-
2209 + if (!esw)
2210 + return -EOPNOTSUPP;
2211 +
2212 + if (!ESW_ALLOWED(esw))
2213 + return -EPERM;
2214 +
2215 +- mutex_lock(&esw->state_lock);
2216 +- if (esw->mode != MLX5_ESWITCH_LEGACY) {
2217 +- err = -EOPNOTSUPP;
2218 +- goto out;
2219 +- }
2220 ++ if (esw->mode != MLX5_ESWITCH_LEGACY)
2221 ++ return -EOPNOTSUPP;
2222 +
2223 + *setting = esw->fdb_table.legacy.vepa_uplink_rule ? 1 : 0;
2224 +-
2225 +-out:
2226 +- mutex_unlock(&esw->state_lock);
2227 +- return err;
2228 ++ return 0;
2229 + }
2230 +
2231 + int mlx5_eswitch_set_vport_trust(struct mlx5_eswitch *esw,
2232 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/wq.c b/drivers/net/ethernet/mellanox/mlx5/core/wq.c
2233 +index dd2315ce4441..41e35b341b70 100644
2234 +--- a/drivers/net/ethernet/mellanox/mlx5/core/wq.c
2235 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/wq.c
2236 +@@ -96,6 +96,13 @@ err_db_free:
2237 + return err;
2238 + }
2239 +
2240 ++void mlx5_wq_cyc_reset(struct mlx5_wq_cyc *wq)
2241 ++{
2242 ++ wq->wqe_ctr = 0;
2243 ++ wq->cur_sz = 0;
2244 ++ mlx5_wq_cyc_update_db_record(wq);
2245 ++}
2246 ++
2247 + int mlx5_wq_qp_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
2248 + void *qpc, struct mlx5_wq_qp *wq,
2249 + struct mlx5_wq_ctrl *wq_ctrl)
2250 +@@ -194,6 +201,19 @@ err_db_free:
2251 + return err;
2252 + }
2253 +
2254 ++static void mlx5_wq_ll_init_list(struct mlx5_wq_ll *wq)
2255 ++{
2256 ++ struct mlx5_wqe_srq_next_seg *next_seg;
2257 ++ int i;
2258 ++
2259 ++ for (i = 0; i < wq->fbc.sz_m1; i++) {
2260 ++ next_seg = mlx5_wq_ll_get_wqe(wq, i);
2261 ++ next_seg->next_wqe_index = cpu_to_be16(i + 1);
2262 ++ }
2263 ++ next_seg = mlx5_wq_ll_get_wqe(wq, i);
2264 ++ wq->tail_next = &next_seg->next_wqe_index;
2265 ++}
2266 ++
2267 + int mlx5_wq_ll_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
2268 + void *wqc, struct mlx5_wq_ll *wq,
2269 + struct mlx5_wq_ctrl *wq_ctrl)
2270 +@@ -201,9 +221,7 @@ int mlx5_wq_ll_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
2271 + u8 log_wq_stride = MLX5_GET(wq, wqc, log_wq_stride);
2272 + u8 log_wq_sz = MLX5_GET(wq, wqc, log_wq_sz);
2273 + struct mlx5_frag_buf_ctrl *fbc = &wq->fbc;
2274 +- struct mlx5_wqe_srq_next_seg *next_seg;
2275 + int err;
2276 +- int i;
2277 +
2278 + err = mlx5_db_alloc_node(mdev, &wq_ctrl->db, param->db_numa_node);
2279 + if (err) {
2280 +@@ -222,13 +240,7 @@ int mlx5_wq_ll_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
2281 +
2282 + mlx5_init_fbc(wq_ctrl->buf.frags, log_wq_stride, log_wq_sz, fbc);
2283 +
2284 +- for (i = 0; i < fbc->sz_m1; i++) {
2285 +- next_seg = mlx5_wq_ll_get_wqe(wq, i);
2286 +- next_seg->next_wqe_index = cpu_to_be16(i + 1);
2287 +- }
2288 +- next_seg = mlx5_wq_ll_get_wqe(wq, i);
2289 +- wq->tail_next = &next_seg->next_wqe_index;
2290 +-
2291 ++ mlx5_wq_ll_init_list(wq);
2292 + wq_ctrl->mdev = mdev;
2293 +
2294 + return 0;
2295 +@@ -239,6 +251,15 @@ err_db_free:
2296 + return err;
2297 + }
2298 +
2299 ++void mlx5_wq_ll_reset(struct mlx5_wq_ll *wq)
2300 ++{
2301 ++ wq->head = 0;
2302 ++ wq->wqe_ctr = 0;
2303 ++ wq->cur_sz = 0;
2304 ++ mlx5_wq_ll_init_list(wq);
2305 ++ mlx5_wq_ll_update_db_record(wq);
2306 ++}
2307 ++
2308 + void mlx5_wq_destroy(struct mlx5_wq_ctrl *wq_ctrl)
2309 + {
2310 + mlx5_frag_buf_free(wq_ctrl->mdev, &wq_ctrl->buf);
2311 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/wq.h b/drivers/net/ethernet/mellanox/mlx5/core/wq.h
2312 +index 55791f71a778..5efc038440df 100644
2313 +--- a/drivers/net/ethernet/mellanox/mlx5/core/wq.h
2314 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/wq.h
2315 +@@ -80,10 +80,12 @@ int mlx5_wq_cyc_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
2316 + void *wqc, struct mlx5_wq_cyc *wq,
2317 + struct mlx5_wq_ctrl *wq_ctrl);
2318 + u32 mlx5_wq_cyc_get_size(struct mlx5_wq_cyc *wq);
2319 ++void mlx5_wq_cyc_reset(struct mlx5_wq_cyc *wq);
2320 +
2321 + int mlx5_wq_qp_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
2322 + void *qpc, struct mlx5_wq_qp *wq,
2323 + struct mlx5_wq_ctrl *wq_ctrl);
2324 ++void mlx5_wq_ll_reset(struct mlx5_wq_ll *wq);
2325 +
2326 + int mlx5_cqwq_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
2327 + void *cqc, struct mlx5_cqwq *wq,
2328 +diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
2329 +index 132ade51ee87..aed6354cb271 100644
2330 +--- a/drivers/nvme/host/multipath.c
2331 ++++ b/drivers/nvme/host/multipath.c
2332 +@@ -711,6 +711,7 @@ int nvme_mpath_init(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
2333 + }
2334 +
2335 + INIT_WORK(&ctrl->ana_work, nvme_ana_work);
2336 ++ kfree(ctrl->ana_log_buf);
2337 + ctrl->ana_log_buf = kmalloc(ctrl->ana_log_size, GFP_KERNEL);
2338 + if (!ctrl->ana_log_buf) {
2339 + error = -ENOMEM;
2340 +diff --git a/drivers/staging/android/ashmem.c b/drivers/staging/android/ashmem.c
2341 +index 74d497d39c5a..c6695354b123 100644
2342 +--- a/drivers/staging/android/ashmem.c
2343 ++++ b/drivers/staging/android/ashmem.c
2344 +@@ -351,8 +351,23 @@ static inline vm_flags_t calc_vm_may_flags(unsigned long prot)
2345 + _calc_vm_trans(prot, PROT_EXEC, VM_MAYEXEC);
2346 + }
2347 +
2348 ++static int ashmem_vmfile_mmap(struct file *file, struct vm_area_struct *vma)
2349 ++{
2350 ++ /* do not allow to mmap ashmem backing shmem file directly */
2351 ++ return -EPERM;
2352 ++}
2353 ++
2354 ++static unsigned long
2355 ++ashmem_vmfile_get_unmapped_area(struct file *file, unsigned long addr,
2356 ++ unsigned long len, unsigned long pgoff,
2357 ++ unsigned long flags)
2358 ++{
2359 ++ return current->mm->get_unmapped_area(file, addr, len, pgoff, flags);
2360 ++}
2361 ++
2362 + static int ashmem_mmap(struct file *file, struct vm_area_struct *vma)
2363 + {
2364 ++ static struct file_operations vmfile_fops;
2365 + struct ashmem_area *asma = file->private_data;
2366 + int ret = 0;
2367 +
2368 +@@ -393,6 +408,19 @@ static int ashmem_mmap(struct file *file, struct vm_area_struct *vma)
2369 + }
2370 + vmfile->f_mode |= FMODE_LSEEK;
2371 + asma->file = vmfile;
2372 ++ /*
2373 ++ * override mmap operation of the vmfile so that it can't be
2374 ++ * remapped which would lead to creation of a new vma with no
2375 ++ * asma permission checks. Have to override get_unmapped_area
2376 ++ * as well to prevent VM_BUG_ON check for f_ops modification.
2377 ++ */
2378 ++ if (!vmfile_fops.mmap) {
2379 ++ vmfile_fops = *vmfile->f_op;
2380 ++ vmfile_fops.mmap = ashmem_vmfile_mmap;
2381 ++ vmfile_fops.get_unmapped_area =
2382 ++ ashmem_vmfile_get_unmapped_area;
2383 ++ }
2384 ++ vmfile->f_op = &vmfile_fops;
2385 + }
2386 + get_file(asma->file);
2387 +
2388 +diff --git a/drivers/staging/greybus/audio_manager.c b/drivers/staging/greybus/audio_manager.c
2389 +index 9b19ea9d3fa1..9a3f7c034ab4 100644
2390 +--- a/drivers/staging/greybus/audio_manager.c
2391 ++++ b/drivers/staging/greybus/audio_manager.c
2392 +@@ -92,8 +92,8 @@ void gb_audio_manager_remove_all(void)
2393 +
2394 + list_for_each_entry_safe(module, next, &modules_list, list) {
2395 + list_del(&module->list);
2396 +- kobject_put(&module->kobj);
2397 + ida_simple_remove(&module_id, module->id);
2398 ++ kobject_put(&module->kobj);
2399 + }
2400 +
2401 + is_empty = list_empty(&modules_list);
2402 +diff --git a/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c b/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c
2403 +index 9f0418ee7528..630e7d933b10 100644
2404 +--- a/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c
2405 ++++ b/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c
2406 +@@ -2025,7 +2025,7 @@ static int wpa_supplicant_ioctl(struct net_device *dev, struct iw_point *p)
2407 + struct ieee_param *param;
2408 + uint ret = 0;
2409 +
2410 +- if (p->length < sizeof(struct ieee_param) || !p->pointer) {
2411 ++ if (!p->pointer || p->length != sizeof(struct ieee_param)) {
2412 + ret = -EINVAL;
2413 + goto out;
2414 + }
2415 +@@ -2812,7 +2812,7 @@ static int rtw_hostapd_ioctl(struct net_device *dev, struct iw_point *p)
2416 + goto out;
2417 + }
2418 +
2419 +- if (!p->pointer) {
2420 ++ if (!p->pointer || p->length != sizeof(struct ieee_param)) {
2421 + ret = -EINVAL;
2422 + goto out;
2423 + }
2424 +diff --git a/drivers/staging/rtl8723bs/hal/rtl8723bs_xmit.c b/drivers/staging/rtl8723bs/hal/rtl8723bs_xmit.c
2425 +index b44e902ed338..b6d56cfb0a19 100644
2426 +--- a/drivers/staging/rtl8723bs/hal/rtl8723bs_xmit.c
2427 ++++ b/drivers/staging/rtl8723bs/hal/rtl8723bs_xmit.c
2428 +@@ -476,14 +476,13 @@ int rtl8723bs_xmit_thread(void *context)
2429 + s32 ret;
2430 + struct adapter *padapter;
2431 + struct xmit_priv *pxmitpriv;
2432 +- u8 thread_name[20] = "RTWHALXT";
2433 +-
2434 ++ u8 thread_name[20];
2435 +
2436 + ret = _SUCCESS;
2437 + padapter = context;
2438 + pxmitpriv = &padapter->xmitpriv;
2439 +
2440 +- rtw_sprintf(thread_name, 20, "%s-"ADPT_FMT, thread_name, ADPT_ARG(padapter));
2441 ++ rtw_sprintf(thread_name, 20, "RTWHALXT-" ADPT_FMT, ADPT_ARG(padapter));
2442 + thread_enter(thread_name);
2443 +
2444 + DBG_871X("start "FUNC_ADPT_FMT"\n", FUNC_ADPT_ARG(padapter));
2445 +diff --git a/drivers/staging/rtl8723bs/os_dep/ioctl_linux.c b/drivers/staging/rtl8723bs/os_dep/ioctl_linux.c
2446 +index d1b199e3e5bd..d8d44fd9a92f 100644
2447 +--- a/drivers/staging/rtl8723bs/os_dep/ioctl_linux.c
2448 ++++ b/drivers/staging/rtl8723bs/os_dep/ioctl_linux.c
2449 +@@ -3379,7 +3379,7 @@ static int wpa_supplicant_ioctl(struct net_device *dev, struct iw_point *p)
2450 +
2451 + /* down(&ieee->wx_sem); */
2452 +
2453 +- if (p->length < sizeof(struct ieee_param) || !p->pointer) {
2454 ++ if (!p->pointer || p->length != sizeof(struct ieee_param)) {
2455 + ret = -EINVAL;
2456 + goto out;
2457 + }
2458 +@@ -4213,7 +4213,7 @@ static int rtw_hostapd_ioctl(struct net_device *dev, struct iw_point *p)
2459 +
2460 +
2461 + /* if (p->length < sizeof(struct ieee_param) || !p->pointer) { */
2462 +- if (!p->pointer) {
2463 ++ if (!p->pointer || p->length != sizeof(*param)) {
2464 + ret = -EINVAL;
2465 + goto out;
2466 + }
2467 +diff --git a/drivers/staging/vt6656/dpc.c b/drivers/staging/vt6656/dpc.c
2468 +index 3b94e80f1d5e..879ceef517fb 100644
2469 +--- a/drivers/staging/vt6656/dpc.c
2470 ++++ b/drivers/staging/vt6656/dpc.c
2471 +@@ -130,7 +130,7 @@ int vnt_rx_data(struct vnt_private *priv, struct vnt_rcb *ptr_rcb,
2472 +
2473 + vnt_rf_rssi_to_dbm(priv, *rssi, &rx_dbm);
2474 +
2475 +- priv->bb_pre_ed_rssi = (u8)rx_dbm + 1;
2476 ++ priv->bb_pre_ed_rssi = (u8)-rx_dbm + 1;
2477 + priv->current_rssi = priv->bb_pre_ed_rssi;
2478 +
2479 + skb_pull(skb, 8);
2480 +diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
2481 +index c070cb2a6a5b..d19e051f2bc2 100644
2482 +--- a/drivers/target/iscsi/iscsi_target.c
2483 ++++ b/drivers/target/iscsi/iscsi_target.c
2484 +@@ -1165,9 +1165,7 @@ int iscsit_setup_scsi_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
2485 + hdr->cmdsn, be32_to_cpu(hdr->data_length), payload_length,
2486 + conn->cid);
2487 +
2488 +- if (target_get_sess_cmd(&cmd->se_cmd, true) < 0)
2489 +- return iscsit_add_reject_cmd(cmd,
2490 +- ISCSI_REASON_WAITING_FOR_LOGOUT, buf);
2491 ++ target_get_sess_cmd(&cmd->se_cmd, true);
2492 +
2493 + cmd->sense_reason = transport_lookup_cmd_lun(&cmd->se_cmd,
2494 + scsilun_to_int(&hdr->lun));
2495 +@@ -2004,9 +2002,7 @@ iscsit_handle_task_mgt_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
2496 + conn->sess->se_sess, 0, DMA_NONE,
2497 + TCM_SIMPLE_TAG, cmd->sense_buffer + 2);
2498 +
2499 +- if (target_get_sess_cmd(&cmd->se_cmd, true) < 0)
2500 +- return iscsit_add_reject_cmd(cmd,
2501 +- ISCSI_REASON_WAITING_FOR_LOGOUT, buf);
2502 ++ target_get_sess_cmd(&cmd->se_cmd, true);
2503 +
2504 + /*
2505 + * TASK_REASSIGN for ERL=2 / connection stays inside of
2506 +@@ -4151,6 +4147,9 @@ int iscsit_close_connection(
2507 + iscsit_stop_nopin_response_timer(conn);
2508 + iscsit_stop_nopin_timer(conn);
2509 +
2510 ++ if (conn->conn_transport->iscsit_wait_conn)
2511 ++ conn->conn_transport->iscsit_wait_conn(conn);
2512 ++
2513 + /*
2514 + * During Connection recovery drop unacknowledged out of order
2515 + * commands for this connection, and prepare the other commands
2516 +@@ -4233,11 +4232,6 @@ int iscsit_close_connection(
2517 + * must wait until they have completed.
2518 + */
2519 + iscsit_check_conn_usage_count(conn);
2520 +- target_sess_cmd_list_set_waiting(sess->se_sess);
2521 +- target_wait_for_sess_cmds(sess->se_sess);
2522 +-
2523 +- if (conn->conn_transport->iscsit_wait_conn)
2524 +- conn->conn_transport->iscsit_wait_conn(conn);
2525 +
2526 + ahash_request_free(conn->conn_tx_hash);
2527 + if (conn->conn_rx_hash) {
2528 +diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
2529 +index eda8b4736c15..d542e26ca56a 100644
2530 +--- a/drivers/target/target_core_transport.c
2531 ++++ b/drivers/target/target_core_transport.c
2532 +@@ -666,6 +666,11 @@ static int transport_cmd_check_stop_to_fabric(struct se_cmd *cmd)
2533 +
2534 + target_remove_from_state_list(cmd);
2535 +
2536 ++ /*
2537 ++ * Clear struct se_cmd->se_lun before the handoff to FE.
2538 ++ */
2539 ++ cmd->se_lun = NULL;
2540 ++
2541 + spin_lock_irqsave(&cmd->t_state_lock, flags);
2542 + /*
2543 + * Determine if frontend context caller is requesting the stopping of
2544 +@@ -693,6 +698,17 @@ static int transport_cmd_check_stop_to_fabric(struct se_cmd *cmd)
2545 + return cmd->se_tfo->check_stop_free(cmd);
2546 + }
2547 +
2548 ++static void transport_lun_remove_cmd(struct se_cmd *cmd)
2549 ++{
2550 ++ struct se_lun *lun = cmd->se_lun;
2551 ++
2552 ++ if (!lun)
2553 ++ return;
2554 ++
2555 ++ if (cmpxchg(&cmd->lun_ref_active, true, false))
2556 ++ percpu_ref_put(&lun->lun_ref);
2557 ++}
2558 ++
2559 + static void target_complete_failure_work(struct work_struct *work)
2560 + {
2561 + struct se_cmd *cmd = container_of(work, struct se_cmd, work);
2562 +@@ -783,6 +799,8 @@ static void target_handle_abort(struct se_cmd *cmd)
2563 +
2564 + WARN_ON_ONCE(kref_read(&cmd->cmd_kref) == 0);
2565 +
2566 ++ transport_lun_remove_cmd(cmd);
2567 ++
2568 + transport_cmd_check_stop_to_fabric(cmd);
2569 + }
2570 +
2571 +@@ -1695,6 +1713,7 @@ static void target_complete_tmr_failure(struct work_struct *work)
2572 + se_cmd->se_tmr_req->response = TMR_LUN_DOES_NOT_EXIST;
2573 + se_cmd->se_tfo->queue_tm_rsp(se_cmd);
2574 +
2575 ++ transport_lun_remove_cmd(se_cmd);
2576 + transport_cmd_check_stop_to_fabric(se_cmd);
2577 + }
2578 +
2579 +@@ -1885,6 +1904,7 @@ void transport_generic_request_failure(struct se_cmd *cmd,
2580 + goto queue_full;
2581 +
2582 + check_stop:
2583 ++ transport_lun_remove_cmd(cmd);
2584 + transport_cmd_check_stop_to_fabric(cmd);
2585 + return;
2586 +
2587 +@@ -2182,6 +2202,7 @@ queue_status:
2588 + transport_handle_queue_full(cmd, cmd->se_dev, ret, false);
2589 + return;
2590 + }
2591 ++ transport_lun_remove_cmd(cmd);
2592 + transport_cmd_check_stop_to_fabric(cmd);
2593 + }
2594 +
2595 +@@ -2276,6 +2297,7 @@ static void target_complete_ok_work(struct work_struct *work)
2596 + if (ret)
2597 + goto queue_full;
2598 +
2599 ++ transport_lun_remove_cmd(cmd);
2600 + transport_cmd_check_stop_to_fabric(cmd);
2601 + return;
2602 + }
2603 +@@ -2301,6 +2323,7 @@ static void target_complete_ok_work(struct work_struct *work)
2604 + if (ret)
2605 + goto queue_full;
2606 +
2607 ++ transport_lun_remove_cmd(cmd);
2608 + transport_cmd_check_stop_to_fabric(cmd);
2609 + return;
2610 + }
2611 +@@ -2336,6 +2359,7 @@ queue_rsp:
2612 + if (ret)
2613 + goto queue_full;
2614 +
2615 ++ transport_lun_remove_cmd(cmd);
2616 + transport_cmd_check_stop_to_fabric(cmd);
2617 + return;
2618 + }
2619 +@@ -2371,6 +2395,7 @@ queue_status:
2620 + break;
2621 + }
2622 +
2623 ++ transport_lun_remove_cmd(cmd);
2624 + transport_cmd_check_stop_to_fabric(cmd);
2625 + return;
2626 +
2627 +@@ -2697,6 +2722,9 @@ int transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks)
2628 + */
2629 + if (cmd->state_active)
2630 + target_remove_from_state_list(cmd);
2631 ++
2632 ++ if (cmd->se_lun)
2633 ++ transport_lun_remove_cmd(cmd);
2634 + }
2635 + if (aborted)
2636 + cmd->free_compl = &compl;
2637 +@@ -2768,9 +2796,6 @@ static void target_release_cmd_kref(struct kref *kref)
2638 + struct completion *abrt_compl = se_cmd->abrt_compl;
2639 + unsigned long flags;
2640 +
2641 +- if (se_cmd->lun_ref_active)
2642 +- percpu_ref_put(&se_cmd->se_lun->lun_ref);
2643 +-
2644 + if (se_sess) {
2645 + spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
2646 + list_del_init(&se_cmd->se_cmd_list);
2647 +diff --git a/drivers/thunderbolt/switch.c b/drivers/thunderbolt/switch.c
2648 +index c5974c9af841..e53932d27ac5 100644
2649 +--- a/drivers/thunderbolt/switch.c
2650 ++++ b/drivers/thunderbolt/switch.c
2651 +@@ -274,6 +274,12 @@ out:
2652 + return ret;
2653 + }
2654 +
2655 ++static int tb_switch_nvm_no_read(void *priv, unsigned int offset, void *val,
2656 ++ size_t bytes)
2657 ++{
2658 ++ return -EPERM;
2659 ++}
2660 ++
2661 + static int tb_switch_nvm_write(void *priv, unsigned int offset, void *val,
2662 + size_t bytes)
2663 + {
2664 +@@ -319,6 +325,7 @@ static struct nvmem_device *register_nvmem(struct tb_switch *sw, int id,
2665 + config.read_only = true;
2666 + } else {
2667 + config.name = "nvm_non_active";
2668 ++ config.reg_read = tb_switch_nvm_no_read;
2669 + config.reg_write = tb_switch_nvm_write;
2670 + config.root_only = true;
2671 + }
2672 +diff --git a/drivers/tty/serdev/serdev-ttyport.c b/drivers/tty/serdev/serdev-ttyport.c
2673 +index d1cdd2ab8b4c..d367803e2044 100644
2674 +--- a/drivers/tty/serdev/serdev-ttyport.c
2675 ++++ b/drivers/tty/serdev/serdev-ttyport.c
2676 +@@ -265,7 +265,6 @@ struct device *serdev_tty_port_register(struct tty_port *port,
2677 + struct device *parent,
2678 + struct tty_driver *drv, int idx)
2679 + {
2680 +- const struct tty_port_client_operations *old_ops;
2681 + struct serdev_controller *ctrl;
2682 + struct serport *serport;
2683 + int ret;
2684 +@@ -284,7 +283,6 @@ struct device *serdev_tty_port_register(struct tty_port *port,
2685 +
2686 + ctrl->ops = &ctrl_ops;
2687 +
2688 +- old_ops = port->client_ops;
2689 + port->client_ops = &client_ops;
2690 + port->client_data = ctrl;
2691 +
2692 +@@ -297,7 +295,7 @@ struct device *serdev_tty_port_register(struct tty_port *port,
2693 +
2694 + err_reset_data:
2695 + port->client_data = NULL;
2696 +- port->client_ops = old_ops;
2697 ++ port->client_ops = &tty_port_default_client_ops;
2698 + serdev_controller_put(ctrl);
2699 +
2700 + return ERR_PTR(ret);
2701 +@@ -312,8 +310,8 @@ int serdev_tty_port_unregister(struct tty_port *port)
2702 + return -ENODEV;
2703 +
2704 + serdev_controller_remove(ctrl);
2705 +- port->client_ops = NULL;
2706 + port->client_data = NULL;
2707 ++ port->client_ops = &tty_port_default_client_ops;
2708 + serdev_controller_put(ctrl);
2709 +
2710 + return 0;
2711 +diff --git a/drivers/tty/serial/8250/8250_aspeed_vuart.c b/drivers/tty/serial/8250/8250_aspeed_vuart.c
2712 +index 0438d9a905ce..6ba2efde7252 100644
2713 +--- a/drivers/tty/serial/8250/8250_aspeed_vuart.c
2714 ++++ b/drivers/tty/serial/8250/8250_aspeed_vuart.c
2715 +@@ -379,7 +379,6 @@ static int aspeed_vuart_probe(struct platform_device *pdev)
2716 + port.port.line = rc;
2717 +
2718 + port.port.irq = irq_of_parse_and_map(np, 0);
2719 +- port.port.irqflags = IRQF_SHARED;
2720 + port.port.handle_irq = aspeed_vuart_handle_irq;
2721 + port.port.iotype = UPIO_MEM;
2722 + port.port.type = PORT_16550A;
2723 +diff --git a/drivers/tty/serial/8250/8250_core.c b/drivers/tty/serial/8250/8250_core.c
2724 +index e682390ce0de..28bdbd7b4ab2 100644
2725 +--- a/drivers/tty/serial/8250/8250_core.c
2726 ++++ b/drivers/tty/serial/8250/8250_core.c
2727 +@@ -174,7 +174,7 @@ static int serial_link_irq_chain(struct uart_8250_port *up)
2728 + struct hlist_head *h;
2729 + struct hlist_node *n;
2730 + struct irq_info *i;
2731 +- int ret, irq_flags = up->port.flags & UPF_SHARE_IRQ ? IRQF_SHARED : 0;
2732 ++ int ret;
2733 +
2734 + mutex_lock(&hash_mutex);
2735 +
2736 +@@ -209,9 +209,8 @@ static int serial_link_irq_chain(struct uart_8250_port *up)
2737 + INIT_LIST_HEAD(&up->list);
2738 + i->head = &up->list;
2739 + spin_unlock_irq(&i->lock);
2740 +- irq_flags |= up->port.irqflags;
2741 + ret = request_irq(up->port.irq, serial8250_interrupt,
2742 +- irq_flags, up->port.name, i);
2743 ++ up->port.irqflags, up->port.name, i);
2744 + if (ret < 0)
2745 + serial_do_unlink(i, up);
2746 + }
2747 +diff --git a/drivers/tty/serial/8250/8250_of.c b/drivers/tty/serial/8250/8250_of.c
2748 +index 0826cfdbd406..9ba31701a372 100644
2749 +--- a/drivers/tty/serial/8250/8250_of.c
2750 ++++ b/drivers/tty/serial/8250/8250_of.c
2751 +@@ -172,7 +172,6 @@ static int of_platform_serial_setup(struct platform_device *ofdev,
2752 +
2753 + port->type = type;
2754 + port->uartclk = clk;
2755 +- port->irqflags |= IRQF_SHARED;
2756 +
2757 + if (of_property_read_bool(np, "no-loopback-test"))
2758 + port->flags |= UPF_SKIP_TEST;
2759 +diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c
2760 +index 8407166610ce..2c65c775bf5a 100644
2761 +--- a/drivers/tty/serial/8250/8250_port.c
2762 ++++ b/drivers/tty/serial/8250/8250_port.c
2763 +@@ -2192,6 +2192,10 @@ int serial8250_do_startup(struct uart_port *port)
2764 + }
2765 + }
2766 +
2767 ++ /* Check if we need to have shared IRQs */
2768 ++ if (port->irq && (up->port.flags & UPF_SHARE_IRQ))
2769 ++ up->port.irqflags |= IRQF_SHARED;
2770 ++
2771 + if (port->irq && !(up->port.flags & UPF_NO_THRE_TEST)) {
2772 + unsigned char iir1;
2773 + /*
2774 +diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c
2775 +index 1ba9bc667e13..8a909d556185 100644
2776 +--- a/drivers/tty/serial/atmel_serial.c
2777 ++++ b/drivers/tty/serial/atmel_serial.c
2778 +@@ -574,7 +574,8 @@ static void atmel_stop_tx(struct uart_port *port)
2779 + atmel_uart_writel(port, ATMEL_US_IDR, atmel_port->tx_done_mask);
2780 +
2781 + if (atmel_uart_is_half_duplex(port))
2782 +- atmel_start_rx(port);
2783 ++ if (!atomic_read(&atmel_port->tasklet_shutdown))
2784 ++ atmel_start_rx(port);
2785 +
2786 + }
2787 +
2788 +diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c
2789 +index 9d8c660dc289..22d8705cd5cd 100644
2790 +--- a/drivers/tty/serial/imx.c
2791 ++++ b/drivers/tty/serial/imx.c
2792 +@@ -603,7 +603,7 @@ static void imx_uart_dma_tx(struct imx_port *sport)
2793 +
2794 + sport->tx_bytes = uart_circ_chars_pending(xmit);
2795 +
2796 +- if (xmit->tail < xmit->head) {
2797 ++ if (xmit->tail < xmit->head || xmit->head == 0) {
2798 + sport->dma_tx_nents = 1;
2799 + sg_init_one(sgl, xmit->buf + xmit->tail, sport->tx_bytes);
2800 + } else {
2801 +diff --git a/drivers/tty/serial/qcom_geni_serial.c b/drivers/tty/serial/qcom_geni_serial.c
2802 +index 14c6306bc462..f98a79172ad2 100644
2803 +--- a/drivers/tty/serial/qcom_geni_serial.c
2804 ++++ b/drivers/tty/serial/qcom_geni_serial.c
2805 +@@ -125,6 +125,7 @@ static int handle_rx_console(struct uart_port *uport, u32 bytes, bool drop);
2806 + static int handle_rx_uart(struct uart_port *uport, u32 bytes, bool drop);
2807 + static unsigned int qcom_geni_serial_tx_empty(struct uart_port *port);
2808 + static void qcom_geni_serial_stop_rx(struct uart_port *uport);
2809 ++static void qcom_geni_serial_handle_rx(struct uart_port *uport, bool drop);
2810 +
2811 + static const unsigned long root_freq[] = {7372800, 14745600, 19200000, 29491200,
2812 + 32000000, 48000000, 64000000, 80000000,
2813 +@@ -615,7 +616,7 @@ static void qcom_geni_serial_stop_rx(struct uart_port *uport)
2814 + u32 irq_en;
2815 + u32 status;
2816 + struct qcom_geni_serial_port *port = to_dev_port(uport, uport);
2817 +- u32 irq_clear = S_CMD_DONE_EN;
2818 ++ u32 s_irq_status;
2819 +
2820 + irq_en = readl(uport->membase + SE_GENI_S_IRQ_EN);
2821 + irq_en &= ~(S_RX_FIFO_WATERMARK_EN | S_RX_FIFO_LAST_EN);
2822 +@@ -631,10 +632,19 @@ static void qcom_geni_serial_stop_rx(struct uart_port *uport)
2823 + return;
2824 +
2825 + geni_se_cancel_s_cmd(&port->se);
2826 +- qcom_geni_serial_poll_bit(uport, SE_GENI_S_CMD_CTRL_REG,
2827 +- S_GENI_CMD_CANCEL, false);
2828 ++ qcom_geni_serial_poll_bit(uport, SE_GENI_S_IRQ_STATUS,
2829 ++ S_CMD_CANCEL_EN, true);
2830 ++ /*
2831 ++ * If timeout occurs secondary engine remains active
2832 ++ * and Abort sequence is executed.
2833 ++ */
2834 ++ s_irq_status = readl(uport->membase + SE_GENI_S_IRQ_STATUS);
2835 ++ /* Flush the Rx buffer */
2836 ++ if (s_irq_status & S_RX_FIFO_LAST_EN)
2837 ++ qcom_geni_serial_handle_rx(uport, true);
2838 ++ writel(s_irq_status, uport->membase + SE_GENI_S_IRQ_CLEAR);
2839 ++
2840 + status = readl(uport->membase + SE_GENI_STATUS);
2841 +- writel(irq_clear, uport->membase + SE_GENI_S_IRQ_CLEAR);
2842 + if (status & S_GENI_CMD_ACTIVE)
2843 + qcom_geni_serial_abort_rx(uport);
2844 + }
2845 +diff --git a/drivers/tty/tty_port.c b/drivers/tty/tty_port.c
2846 +index 044c3cbdcfa4..ea80bf872f54 100644
2847 +--- a/drivers/tty/tty_port.c
2848 ++++ b/drivers/tty/tty_port.c
2849 +@@ -52,10 +52,11 @@ static void tty_port_default_wakeup(struct tty_port *port)
2850 + }
2851 + }
2852 +
2853 +-static const struct tty_port_client_operations default_client_ops = {
2854 ++const struct tty_port_client_operations tty_port_default_client_ops = {
2855 + .receive_buf = tty_port_default_receive_buf,
2856 + .write_wakeup = tty_port_default_wakeup,
2857 + };
2858 ++EXPORT_SYMBOL_GPL(tty_port_default_client_ops);
2859 +
2860 + void tty_port_init(struct tty_port *port)
2861 + {
2862 +@@ -68,7 +69,7 @@ void tty_port_init(struct tty_port *port)
2863 + spin_lock_init(&port->lock);
2864 + port->close_delay = (50 * HZ) / 100;
2865 + port->closing_wait = (3000 * HZ) / 100;
2866 +- port->client_ops = &default_client_ops;
2867 ++ port->client_ops = &tty_port_default_client_ops;
2868 + kref_init(&port->kref);
2869 + }
2870 + EXPORT_SYMBOL(tty_port_init);
2871 +diff --git a/drivers/tty/vt/selection.c b/drivers/tty/vt/selection.c
2872 +index 78732feaf65b..44d974d4159f 100644
2873 +--- a/drivers/tty/vt/selection.c
2874 ++++ b/drivers/tty/vt/selection.c
2875 +@@ -29,6 +29,8 @@
2876 + #include <linux/console.h>
2877 + #include <linux/tty_flip.h>
2878 +
2879 ++#include <linux/sched/signal.h>
2880 ++
2881 + /* Don't take this from <ctype.h>: 011-015 on the screen aren't spaces */
2882 + #define isspace(c) ((c) == ' ')
2883 +
2884 +@@ -350,6 +352,7 @@ int paste_selection(struct tty_struct *tty)
2885 + unsigned int count;
2886 + struct tty_ldisc *ld;
2887 + DECLARE_WAITQUEUE(wait, current);
2888 ++ int ret = 0;
2889 +
2890 + console_lock();
2891 + poke_blanked_console();
2892 +@@ -363,6 +366,10 @@ int paste_selection(struct tty_struct *tty)
2893 + add_wait_queue(&vc->paste_wait, &wait);
2894 + while (sel_buffer && sel_buffer_lth > pasted) {
2895 + set_current_state(TASK_INTERRUPTIBLE);
2896 ++ if (signal_pending(current)) {
2897 ++ ret = -EINTR;
2898 ++ break;
2899 ++ }
2900 + if (tty_throttled(tty)) {
2901 + schedule();
2902 + continue;
2903 +@@ -378,6 +385,6 @@ int paste_selection(struct tty_struct *tty)
2904 +
2905 + tty_buffer_unlock_exclusive(&vc->port);
2906 + tty_ldisc_deref(ld);
2907 +- return 0;
2908 ++ return ret;
2909 + }
2910 + EXPORT_SYMBOL_GPL(paste_selection);
2911 +diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
2912 +index 34aa39d1aed9..3b4ccc2a30c1 100644
2913 +--- a/drivers/tty/vt/vt.c
2914 ++++ b/drivers/tty/vt/vt.c
2915 +@@ -936,10 +936,21 @@ static void flush_scrollback(struct vc_data *vc)
2916 + WARN_CONSOLE_UNLOCKED();
2917 +
2918 + set_origin(vc);
2919 +- if (vc->vc_sw->con_flush_scrollback)
2920 ++ if (vc->vc_sw->con_flush_scrollback) {
2921 + vc->vc_sw->con_flush_scrollback(vc);
2922 +- else
2923 ++ } else if (con_is_visible(vc)) {
2924 ++ /*
2925 ++ * When no con_flush_scrollback method is provided then the
2926 ++ * legacy way for flushing the scrollback buffer is to use
2927 ++ * a side effect of the con_switch method. We do it only on
2928 ++ * the foreground console as background consoles have no
2929 ++ * scrollback buffers in that case and we obviously don't
2930 ++ * want to switch to them.
2931 ++ */
2932 ++ hide_cursor(vc);
2933 + vc->vc_sw->con_switch(vc);
2934 ++ set_cursor(vc);
2935 ++ }
2936 + }
2937 +
2938 + /*
2939 +diff --git a/drivers/tty/vt/vt_ioctl.c b/drivers/tty/vt/vt_ioctl.c
2940 +index 8b0ed139592f..ee6c91ef1f6c 100644
2941 +--- a/drivers/tty/vt/vt_ioctl.c
2942 ++++ b/drivers/tty/vt/vt_ioctl.c
2943 +@@ -876,15 +876,20 @@ int vt_ioctl(struct tty_struct *tty,
2944 + return -EINVAL;
2945 +
2946 + for (i = 0; i < MAX_NR_CONSOLES; i++) {
2947 ++ struct vc_data *vcp;
2948 ++
2949 + if (!vc_cons[i].d)
2950 + continue;
2951 + console_lock();
2952 +- if (v.v_vlin)
2953 +- vc_cons[i].d->vc_scan_lines = v.v_vlin;
2954 +- if (v.v_clin)
2955 +- vc_cons[i].d->vc_font.height = v.v_clin;
2956 +- vc_cons[i].d->vc_resize_user = 1;
2957 +- vc_resize(vc_cons[i].d, v.v_cols, v.v_rows);
2958 ++ vcp = vc_cons[i].d;
2959 ++ if (vcp) {
2960 ++ if (v.v_vlin)
2961 ++ vcp->vc_scan_lines = v.v_vlin;
2962 ++ if (v.v_clin)
2963 ++ vcp->vc_font.height = v.v_clin;
2964 ++ vcp->vc_resize_user = 1;
2965 ++ vc_resize(vcp, v.v_cols, v.v_rows);
2966 ++ }
2967 + console_unlock();
2968 + }
2969 + break;
2970 +diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c
2971 +index 3e94259406d7..c68217b7dace 100644
2972 +--- a/drivers/usb/core/config.c
2973 ++++ b/drivers/usb/core/config.c
2974 +@@ -256,6 +256,7 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno,
2975 + struct usb_host_interface *ifp, int num_ep,
2976 + unsigned char *buffer, int size)
2977 + {
2978 ++ struct usb_device *udev = to_usb_device(ddev);
2979 + unsigned char *buffer0 = buffer;
2980 + struct usb_endpoint_descriptor *d;
2981 + struct usb_host_endpoint *endpoint;
2982 +@@ -297,6 +298,16 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno,
2983 + goto skip_to_next_endpoint_or_interface_descriptor;
2984 + }
2985 +
2986 ++ /* Ignore blacklisted endpoints */
2987 ++ if (udev->quirks & USB_QUIRK_ENDPOINT_BLACKLIST) {
2988 ++ if (usb_endpoint_is_blacklisted(udev, ifp, d)) {
2989 ++ dev_warn(ddev, "config %d interface %d altsetting %d has a blacklisted endpoint with address 0x%X, skipping\n",
2990 ++ cfgno, inum, asnum,
2991 ++ d->bEndpointAddress);
2992 ++ goto skip_to_next_endpoint_or_interface_descriptor;
2993 ++ }
2994 ++ }
2995 ++
2996 + endpoint = &ifp->endpoint[ifp->desc.bNumEndpoints];
2997 + ++ifp->desc.bNumEndpoints;
2998 +
2999 +diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
3000 +index 4ac74b354801..f381faa10f15 100644
3001 +--- a/drivers/usb/core/hub.c
3002 ++++ b/drivers/usb/core/hub.c
3003 +@@ -37,7 +37,9 @@
3004 + #include "otg_whitelist.h"
3005 +
3006 + #define USB_VENDOR_GENESYS_LOGIC 0x05e3
3007 ++#define USB_VENDOR_SMSC 0x0424
3008 + #define HUB_QUIRK_CHECK_PORT_AUTOSUSPEND 0x01
3009 ++#define HUB_QUIRK_DISABLE_AUTOSUSPEND 0x02
3010 +
3011 + #define USB_TP_TRANSMISSION_DELAY 40 /* ns */
3012 + #define USB_TP_TRANSMISSION_DELAY_MAX 65535 /* ns */
3013 +@@ -1216,11 +1218,6 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type)
3014 + #ifdef CONFIG_PM
3015 + udev->reset_resume = 1;
3016 + #endif
3017 +- /* Don't set the change_bits when the device
3018 +- * was powered off.
3019 +- */
3020 +- if (test_bit(port1, hub->power_bits))
3021 +- set_bit(port1, hub->change_bits);
3022 +
3023 + } else {
3024 + /* The power session is gone; tell hub_wq */
3025 +@@ -1730,6 +1727,10 @@ static void hub_disconnect(struct usb_interface *intf)
3026 + kfree(hub->buffer);
3027 +
3028 + pm_suspend_ignore_children(&intf->dev, false);
3029 ++
3030 ++ if (hub->quirk_disable_autosuspend)
3031 ++ usb_autopm_put_interface(intf);
3032 ++
3033 + kref_put(&hub->kref, hub_release);
3034 + }
3035 +
3036 +@@ -1862,6 +1863,11 @@ static int hub_probe(struct usb_interface *intf, const struct usb_device_id *id)
3037 + if (id->driver_info & HUB_QUIRK_CHECK_PORT_AUTOSUSPEND)
3038 + hub->quirk_check_port_auto_suspend = 1;
3039 +
3040 ++ if (id->driver_info & HUB_QUIRK_DISABLE_AUTOSUSPEND) {
3041 ++ hub->quirk_disable_autosuspend = 1;
3042 ++ usb_autopm_get_interface(intf);
3043 ++ }
3044 ++
3045 + if (hub_configure(hub, &desc->endpoint[0].desc) >= 0)
3046 + return 0;
3047 +
3048 +@@ -5484,6 +5490,10 @@ out_hdev_lock:
3049 + }
3050 +
3051 + static const struct usb_device_id hub_id_table[] = {
3052 ++ { .match_flags = USB_DEVICE_ID_MATCH_VENDOR | USB_DEVICE_ID_MATCH_INT_CLASS,
3053 ++ .idVendor = USB_VENDOR_SMSC,
3054 ++ .bInterfaceClass = USB_CLASS_HUB,
3055 ++ .driver_info = HUB_QUIRK_DISABLE_AUTOSUSPEND},
3056 + { .match_flags = USB_DEVICE_ID_MATCH_VENDOR
3057 + | USB_DEVICE_ID_MATCH_INT_CLASS,
3058 + .idVendor = USB_VENDOR_GENESYS_LOGIC,
3059 +diff --git a/drivers/usb/core/hub.h b/drivers/usb/core/hub.h
3060 +index a9e24e4b8df1..a97dd1ba964e 100644
3061 +--- a/drivers/usb/core/hub.h
3062 ++++ b/drivers/usb/core/hub.h
3063 +@@ -61,6 +61,7 @@ struct usb_hub {
3064 + unsigned quiescing:1;
3065 + unsigned disconnected:1;
3066 + unsigned in_reset:1;
3067 ++ unsigned quirk_disable_autosuspend:1;
3068 +
3069 + unsigned quirk_check_port_auto_suspend:1;
3070 +
3071 +diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
3072 +index 6b6413073584..2b24336a72e5 100644
3073 +--- a/drivers/usb/core/quirks.c
3074 ++++ b/drivers/usb/core/quirks.c
3075 +@@ -354,6 +354,10 @@ static const struct usb_device_id usb_quirk_list[] = {
3076 + { USB_DEVICE(0x0904, 0x6103), .driver_info =
3077 + USB_QUIRK_LINEAR_FRAME_INTR_BINTERVAL },
3078 +
3079 ++ /* Sound Devices USBPre2 */
3080 ++ { USB_DEVICE(0x0926, 0x0202), .driver_info =
3081 ++ USB_QUIRK_ENDPOINT_BLACKLIST },
3082 ++
3083 + /* Keytouch QWERTY Panel keyboard */
3084 + { USB_DEVICE(0x0926, 0x3333), .driver_info =
3085 + USB_QUIRK_CONFIG_INTF_STRINGS },
3086 +@@ -445,6 +449,9 @@ static const struct usb_device_id usb_quirk_list[] = {
3087 + /* INTEL VALUE SSD */
3088 + { USB_DEVICE(0x8086, 0xf1a5), .driver_info = USB_QUIRK_RESET_RESUME },
3089 +
3090 ++ /* novation SoundControl XL */
3091 ++ { USB_DEVICE(0x1235, 0x0061), .driver_info = USB_QUIRK_RESET_RESUME },
3092 ++
3093 + { } /* terminating entry must be last */
3094 + };
3095 +
3096 +@@ -472,6 +479,39 @@ static const struct usb_device_id usb_amd_resume_quirk_list[] = {
3097 + { } /* terminating entry must be last */
3098 + };
3099 +
3100 ++/*
3101 ++ * Entries for blacklisted endpoints that should be ignored when parsing
3102 ++ * configuration descriptors.
3103 ++ *
3104 ++ * Matched for devices with USB_QUIRK_ENDPOINT_BLACKLIST.
3105 ++ */
3106 ++static const struct usb_device_id usb_endpoint_blacklist[] = {
3107 ++ { USB_DEVICE_INTERFACE_NUMBER(0x0926, 0x0202, 1), .driver_info = 0x85 },
3108 ++ { }
3109 ++};
3110 ++
3111 ++bool usb_endpoint_is_blacklisted(struct usb_device *udev,
3112 ++ struct usb_host_interface *intf,
3113 ++ struct usb_endpoint_descriptor *epd)
3114 ++{
3115 ++ const struct usb_device_id *id;
3116 ++ unsigned int address;
3117 ++
3118 ++ for (id = usb_endpoint_blacklist; id->match_flags; ++id) {
3119 ++ if (!usb_match_device(udev, id))
3120 ++ continue;
3121 ++
3122 ++ if (!usb_match_one_id_intf(udev, intf, id))
3123 ++ continue;
3124 ++
3125 ++ address = id->driver_info;
3126 ++ if (address == epd->bEndpointAddress)
3127 ++ return true;
3128 ++ }
3129 ++
3130 ++ return false;
3131 ++}
3132 ++
3133 + static bool usb_match_any_interface(struct usb_device *udev,
3134 + const struct usb_device_id *id)
3135 + {
3136 +diff --git a/drivers/usb/core/usb.h b/drivers/usb/core/usb.h
3137 +index cf4783cf661a..3ad0ee57e859 100644
3138 +--- a/drivers/usb/core/usb.h
3139 ++++ b/drivers/usb/core/usb.h
3140 +@@ -37,6 +37,9 @@ extern void usb_authorize_interface(struct usb_interface *);
3141 + extern void usb_detect_quirks(struct usb_device *udev);
3142 + extern void usb_detect_interface_quirks(struct usb_device *udev);
3143 + extern void usb_release_quirk_list(void);
3144 ++extern bool usb_endpoint_is_blacklisted(struct usb_device *udev,
3145 ++ struct usb_host_interface *intf,
3146 ++ struct usb_endpoint_descriptor *epd);
3147 + extern int usb_remove_device(struct usb_device *udev);
3148 +
3149 + extern int usb_get_device_descriptor(struct usb_device *dev,
3150 +diff --git a/drivers/usb/dwc2/gadget.c b/drivers/usb/dwc2/gadget.c
3151 +index a9133773b89e..7fd0900a9cb0 100644
3152 +--- a/drivers/usb/dwc2/gadget.c
3153 ++++ b/drivers/usb/dwc2/gadget.c
3154 +@@ -1083,11 +1083,6 @@ static void dwc2_hsotg_start_req(struct dwc2_hsotg *hsotg,
3155 + else
3156 + packets = 1; /* send one packet if length is zero. */
3157 +
3158 +- if (hs_ep->isochronous && length > (hs_ep->mc * hs_ep->ep.maxpacket)) {
3159 +- dev_err(hsotg->dev, "req length > maxpacket*mc\n");
3160 +- return;
3161 +- }
3162 +-
3163 + if (dir_in && index != 0)
3164 + if (hs_ep->isochronous)
3165 + epsize = DXEPTSIZ_MC(packets);
3166 +@@ -1391,6 +1386,13 @@ static int dwc2_hsotg_ep_queue(struct usb_ep *ep, struct usb_request *req,
3167 + req->actual = 0;
3168 + req->status = -EINPROGRESS;
3169 +
3170 ++ /* Don't queue ISOC request if length greater than mps*mc */
3171 ++ if (hs_ep->isochronous &&
3172 ++ req->length > (hs_ep->mc * hs_ep->ep.maxpacket)) {
3173 ++ dev_err(hs->dev, "req length > maxpacket*mc\n");
3174 ++ return -EINVAL;
3175 ++ }
3176 ++
3177 + /* In DDMA mode for ISOC's don't queue request if length greater
3178 + * than descriptor limits.
3179 + */
3180 +@@ -1632,6 +1634,7 @@ static int dwc2_hsotg_process_req_status(struct dwc2_hsotg *hsotg,
3181 + struct dwc2_hsotg_ep *ep0 = hsotg->eps_out[0];
3182 + struct dwc2_hsotg_ep *ep;
3183 + __le16 reply;
3184 ++ u16 status;
3185 + int ret;
3186 +
3187 + dev_dbg(hsotg->dev, "%s: USB_REQ_GET_STATUS\n", __func__);
3188 +@@ -1643,11 +1646,10 @@ static int dwc2_hsotg_process_req_status(struct dwc2_hsotg *hsotg,
3189 +
3190 + switch (ctrl->bRequestType & USB_RECIP_MASK) {
3191 + case USB_RECIP_DEVICE:
3192 +- /*
3193 +- * bit 0 => self powered
3194 +- * bit 1 => remote wakeup
3195 +- */
3196 +- reply = cpu_to_le16(0);
3197 ++ status = 1 << USB_DEVICE_SELF_POWERED;
3198 ++ status |= hsotg->remote_wakeup_allowed <<
3199 ++ USB_DEVICE_REMOTE_WAKEUP;
3200 ++ reply = cpu_to_le16(status);
3201 + break;
3202 +
3203 + case USB_RECIP_INTERFACE:
3204 +@@ -1758,7 +1760,10 @@ static int dwc2_hsotg_process_req_feature(struct dwc2_hsotg *hsotg,
3205 + case USB_RECIP_DEVICE:
3206 + switch (wValue) {
3207 + case USB_DEVICE_REMOTE_WAKEUP:
3208 +- hsotg->remote_wakeup_allowed = 1;
3209 ++ if (set)
3210 ++ hsotg->remote_wakeup_allowed = 1;
3211 ++ else
3212 ++ hsotg->remote_wakeup_allowed = 0;
3213 + break;
3214 +
3215 + case USB_DEVICE_TEST_MODE:
3216 +@@ -1768,16 +1773,17 @@ static int dwc2_hsotg_process_req_feature(struct dwc2_hsotg *hsotg,
3217 + return -EINVAL;
3218 +
3219 + hsotg->test_mode = wIndex >> 8;
3220 +- ret = dwc2_hsotg_send_reply(hsotg, ep0, NULL, 0);
3221 +- if (ret) {
3222 +- dev_err(hsotg->dev,
3223 +- "%s: failed to send reply\n", __func__);
3224 +- return ret;
3225 +- }
3226 + break;
3227 + default:
3228 + return -ENOENT;
3229 + }
3230 ++
3231 ++ ret = dwc2_hsotg_send_reply(hsotg, ep0, NULL, 0);
3232 ++ if (ret) {
3233 ++ dev_err(hsotg->dev,
3234 ++ "%s: failed to send reply\n", __func__);
3235 ++ return ret;
3236 ++ }
3237 + break;
3238 +
3239 + case USB_RECIP_ENDPOINT:
3240 +diff --git a/drivers/usb/dwc3/debug.h b/drivers/usb/dwc3/debug.h
3241 +index 9baabed87d61..f2c97058a00b 100644
3242 +--- a/drivers/usb/dwc3/debug.h
3243 ++++ b/drivers/usb/dwc3/debug.h
3244 +@@ -256,86 +256,77 @@ static inline const char *dwc3_ep_event_string(char *str, size_t size,
3245 + u8 epnum = event->endpoint_number;
3246 + size_t len;
3247 + int status;
3248 +- int ret;
3249 +
3250 +- ret = snprintf(str, size, "ep%d%s: ", epnum >> 1,
3251 ++ len = scnprintf(str, size, "ep%d%s: ", epnum >> 1,
3252 + (epnum & 1) ? "in" : "out");
3253 +- if (ret < 0)
3254 +- return "UNKNOWN";
3255 +
3256 + status = event->status;
3257 +
3258 + switch (event->endpoint_event) {
3259 + case DWC3_DEPEVT_XFERCOMPLETE:
3260 +- len = strlen(str);
3261 +- snprintf(str + len, size - len, "Transfer Complete (%c%c%c)",
3262 ++ len += scnprintf(str + len, size - len,
3263 ++ "Transfer Complete (%c%c%c)",
3264 + status & DEPEVT_STATUS_SHORT ? 'S' : 's',
3265 + status & DEPEVT_STATUS_IOC ? 'I' : 'i',
3266 + status & DEPEVT_STATUS_LST ? 'L' : 'l');
3267 +
3268 +- len = strlen(str);
3269 +-
3270 + if (epnum <= 1)
3271 +- snprintf(str + len, size - len, " [%s]",
3272 ++ scnprintf(str + len, size - len, " [%s]",
3273 + dwc3_ep0_state_string(ep0state));
3274 + break;
3275 + case DWC3_DEPEVT_XFERINPROGRESS:
3276 +- len = strlen(str);
3277 +-
3278 +- snprintf(str + len, size - len, "Transfer In Progress [%d] (%c%c%c)",
3279 ++ scnprintf(str + len, size - len,
3280 ++ "Transfer In Progress [%d] (%c%c%c)",
3281 + event->parameters,
3282 + status & DEPEVT_STATUS_SHORT ? 'S' : 's',
3283 + status & DEPEVT_STATUS_IOC ? 'I' : 'i',
3284 + status & DEPEVT_STATUS_LST ? 'M' : 'm');
3285 + break;
3286 + case DWC3_DEPEVT_XFERNOTREADY:
3287 +- len = strlen(str);
3288 +-
3289 +- snprintf(str + len, size - len, "Transfer Not Ready [%d]%s",
3290 ++ len += scnprintf(str + len, size - len,
3291 ++ "Transfer Not Ready [%d]%s",
3292 + event->parameters,
3293 + status & DEPEVT_STATUS_TRANSFER_ACTIVE ?
3294 + " (Active)" : " (Not Active)");
3295 +
3296 +- len = strlen(str);
3297 +-
3298 + /* Control Endpoints */
3299 + if (epnum <= 1) {
3300 + int phase = DEPEVT_STATUS_CONTROL_PHASE(event->status);
3301 +
3302 + switch (phase) {
3303 + case DEPEVT_STATUS_CONTROL_DATA:
3304 +- snprintf(str + ret, size - ret,
3305 ++ scnprintf(str + len, size - len,
3306 + " [Data Phase]");
3307 + break;
3308 + case DEPEVT_STATUS_CONTROL_STATUS:
3309 +- snprintf(str + ret, size - ret,
3310 ++ scnprintf(str + len, size - len,
3311 + " [Status Phase]");
3312 + }
3313 + }
3314 + break;
3315 + case DWC3_DEPEVT_RXTXFIFOEVT:
3316 +- snprintf(str + ret, size - ret, "FIFO");
3317 ++ scnprintf(str + len, size - len, "FIFO");
3318 + break;
3319 + case DWC3_DEPEVT_STREAMEVT:
3320 + status = event->status;
3321 +
3322 + switch (status) {
3323 + case DEPEVT_STREAMEVT_FOUND:
3324 +- snprintf(str + ret, size - ret, " Stream %d Found",
3325 ++ scnprintf(str + len, size - len, " Stream %d Found",
3326 + event->parameters);
3327 + break;
3328 + case DEPEVT_STREAMEVT_NOTFOUND:
3329 + default:
3330 +- snprintf(str + ret, size - ret, " Stream Not Found");
3331 ++ scnprintf(str + len, size - len, " Stream Not Found");
3332 + break;
3333 + }
3334 +
3335 + break;
3336 + case DWC3_DEPEVT_EPCMDCMPLT:
3337 +- snprintf(str + ret, size - ret, "Endpoint Command Complete");
3338 ++ scnprintf(str + len, size - len, "Endpoint Command Complete");
3339 + break;
3340 + default:
3341 +- snprintf(str, size, "UNKNOWN");
3342 ++ scnprintf(str + len, size - len, "UNKNOWN");
3343 + }
3344 +
3345 + return str;
3346 +diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
3347 +index 8b95be897078..e0cb1c2d5675 100644
3348 +--- a/drivers/usb/dwc3/gadget.c
3349 ++++ b/drivers/usb/dwc3/gadget.c
3350 +@@ -2426,7 +2426,8 @@ static int dwc3_gadget_ep_reclaim_completed_trb(struct dwc3_ep *dep,
3351 + if (event->status & DEPEVT_STATUS_SHORT && !chain)
3352 + return 1;
3353 +
3354 +- if (event->status & DEPEVT_STATUS_IOC)
3355 ++ if ((trb->ctrl & DWC3_TRB_CTRL_IOC) ||
3356 ++ (trb->ctrl & DWC3_TRB_CTRL_LST))
3357 + return 1;
3358 +
3359 + return 0;
3360 +diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c
3361 +index 5ec54b69c29c..0d45d7a4f949 100644
3362 +--- a/drivers/usb/gadget/composite.c
3363 ++++ b/drivers/usb/gadget/composite.c
3364 +@@ -437,12 +437,10 @@ static u8 encode_bMaxPower(enum usb_device_speed speed,
3365 + val = CONFIG_USB_GADGET_VBUS_DRAW;
3366 + if (!val)
3367 + return 0;
3368 +- switch (speed) {
3369 +- case USB_SPEED_SUPER:
3370 +- return DIV_ROUND_UP(val, 8);
3371 +- default:
3372 ++ if (speed < USB_SPEED_SUPER)
3373 + return DIV_ROUND_UP(val, 2);
3374 +- }
3375 ++ else
3376 ++ return DIV_ROUND_UP(val, 8);
3377 + }
3378 +
3379 + static int config_buf(struct usb_configuration *config,
3380 +diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
3381 +index 7a3a29e5e9d2..af92b2576fe9 100644
3382 +--- a/drivers/usb/host/xhci-hub.c
3383 ++++ b/drivers/usb/host/xhci-hub.c
3384 +@@ -55,6 +55,7 @@ static u8 usb_bos_descriptor [] = {
3385 + static int xhci_create_usb3_bos_desc(struct xhci_hcd *xhci, char *buf,
3386 + u16 wLength)
3387 + {
3388 ++ struct xhci_port_cap *port_cap = NULL;
3389 + int i, ssa_count;
3390 + u32 temp;
3391 + u16 desc_size, ssp_cap_size, ssa_size = 0;
3392 +@@ -64,16 +65,24 @@ static int xhci_create_usb3_bos_desc(struct xhci_hcd *xhci, char *buf,
3393 + ssp_cap_size = sizeof(usb_bos_descriptor) - desc_size;
3394 +
3395 + /* does xhci support USB 3.1 Enhanced SuperSpeed */
3396 +- if (xhci->usb3_rhub.min_rev >= 0x01) {
3397 ++ for (i = 0; i < xhci->num_port_caps; i++) {
3398 ++ if (xhci->port_caps[i].maj_rev == 0x03 &&
3399 ++ xhci->port_caps[i].min_rev >= 0x01) {
3400 ++ usb3_1 = true;
3401 ++ port_cap = &xhci->port_caps[i];
3402 ++ break;
3403 ++ }
3404 ++ }
3405 ++
3406 ++ if (usb3_1) {
3407 + /* does xhci provide a PSI table for SSA speed attributes? */
3408 +- if (xhci->usb3_rhub.psi_count) {
3409 ++ if (port_cap->psi_count) {
3410 + /* two SSA entries for each unique PSI ID, RX and TX */
3411 +- ssa_count = xhci->usb3_rhub.psi_uid_count * 2;
3412 ++ ssa_count = port_cap->psi_uid_count * 2;
3413 + ssa_size = ssa_count * sizeof(u32);
3414 + ssp_cap_size -= 16; /* skip copying the default SSA */
3415 + }
3416 + desc_size += ssp_cap_size;
3417 +- usb3_1 = true;
3418 + }
3419 + memcpy(buf, &usb_bos_descriptor, min(desc_size, wLength));
3420 +
3421 +@@ -99,7 +108,7 @@ static int xhci_create_usb3_bos_desc(struct xhci_hcd *xhci, char *buf,
3422 + }
3423 +
3424 + /* If PSI table exists, add the custom speed attributes from it */
3425 +- if (usb3_1 && xhci->usb3_rhub.psi_count) {
3426 ++ if (usb3_1 && port_cap->psi_count) {
3427 + u32 ssp_cap_base, bm_attrib, psi, psi_mant, psi_exp;
3428 + int offset;
3429 +
3430 +@@ -111,7 +120,7 @@ static int xhci_create_usb3_bos_desc(struct xhci_hcd *xhci, char *buf,
3431 +
3432 + /* attribute count SSAC bits 4:0 and ID count SSIC bits 8:5 */
3433 + bm_attrib = (ssa_count - 1) & 0x1f;
3434 +- bm_attrib |= (xhci->usb3_rhub.psi_uid_count - 1) << 5;
3435 ++ bm_attrib |= (port_cap->psi_uid_count - 1) << 5;
3436 + put_unaligned_le32(bm_attrib, &buf[ssp_cap_base + 4]);
3437 +
3438 + if (wLength < desc_size + ssa_size)
3439 +@@ -124,8 +133,8 @@ static int xhci_create_usb3_bos_desc(struct xhci_hcd *xhci, char *buf,
3440 + * USB 3.1 requires two SSA entries (RX and TX) for every link
3441 + */
3442 + offset = desc_size;
3443 +- for (i = 0; i < xhci->usb3_rhub.psi_count; i++) {
3444 +- psi = xhci->usb3_rhub.psi[i];
3445 ++ for (i = 0; i < port_cap->psi_count; i++) {
3446 ++ psi = port_cap->psi[i];
3447 + psi &= ~USB_SSP_SUBLINK_SPEED_RSVD;
3448 + psi_exp = XHCI_EXT_PORT_PSIE(psi);
3449 + psi_mant = XHCI_EXT_PORT_PSIM(psi);
3450 +diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
3451 +index 3b1388fa2f36..884c601bfa15 100644
3452 +--- a/drivers/usb/host/xhci-mem.c
3453 ++++ b/drivers/usb/host/xhci-mem.c
3454 +@@ -1475,9 +1475,15 @@ int xhci_endpoint_init(struct xhci_hcd *xhci,
3455 + /* Allow 3 retries for everything but isoc, set CErr = 3 */
3456 + if (!usb_endpoint_xfer_isoc(&ep->desc))
3457 + err_count = 3;
3458 +- /* Some devices get this wrong */
3459 +- if (usb_endpoint_xfer_bulk(&ep->desc) && udev->speed == USB_SPEED_HIGH)
3460 +- max_packet = 512;
3461 ++ /* HS bulk max packet should be 512, FS bulk supports 8, 16, 32 or 64 */
3462 ++ if (usb_endpoint_xfer_bulk(&ep->desc)) {
3463 ++ if (udev->speed == USB_SPEED_HIGH)
3464 ++ max_packet = 512;
3465 ++ if (udev->speed == USB_SPEED_FULL) {
3466 ++ max_packet = rounddown_pow_of_two(max_packet);
3467 ++ max_packet = clamp_val(max_packet, 8, 64);
3468 ++ }
3469 ++ }
3470 + /* xHCI 1.0 and 1.1 indicates that ctrl ep avg TRB Length should be 8 */
3471 + if (usb_endpoint_xfer_control(&ep->desc) && xhci->hci_version >= 0x100)
3472 + avg_trb_len = 8;
3473 +@@ -1909,17 +1915,17 @@ no_bw:
3474 + xhci->usb3_rhub.num_ports = 0;
3475 + xhci->num_active_eps = 0;
3476 + kfree(xhci->usb2_rhub.ports);
3477 +- kfree(xhci->usb2_rhub.psi);
3478 + kfree(xhci->usb3_rhub.ports);
3479 +- kfree(xhci->usb3_rhub.psi);
3480 + kfree(xhci->hw_ports);
3481 + kfree(xhci->rh_bw);
3482 + kfree(xhci->ext_caps);
3483 ++ for (i = 0; i < xhci->num_port_caps; i++)
3484 ++ kfree(xhci->port_caps[i].psi);
3485 ++ kfree(xhci->port_caps);
3486 ++ xhci->num_port_caps = 0;
3487 +
3488 + xhci->usb2_rhub.ports = NULL;
3489 +- xhci->usb2_rhub.psi = NULL;
3490 + xhci->usb3_rhub.ports = NULL;
3491 +- xhci->usb3_rhub.psi = NULL;
3492 + xhci->hw_ports = NULL;
3493 + xhci->rh_bw = NULL;
3494 + xhci->ext_caps = NULL;
3495 +@@ -2120,6 +2126,7 @@ static void xhci_add_in_port(struct xhci_hcd *xhci, unsigned int num_ports,
3496 + u8 major_revision, minor_revision;
3497 + struct xhci_hub *rhub;
3498 + struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
3499 ++ struct xhci_port_cap *port_cap;
3500 +
3501 + temp = readl(addr);
3502 + major_revision = XHCI_EXT_PORT_MAJOR(temp);
3503 +@@ -2154,31 +2161,39 @@ static void xhci_add_in_port(struct xhci_hcd *xhci, unsigned int num_ports,
3504 + /* WTF? "Valid values are ‘1’ to MaxPorts" */
3505 + return;
3506 +
3507 +- rhub->psi_count = XHCI_EXT_PORT_PSIC(temp);
3508 +- if (rhub->psi_count) {
3509 +- rhub->psi = kcalloc_node(rhub->psi_count, sizeof(*rhub->psi),
3510 +- GFP_KERNEL, dev_to_node(dev));
3511 +- if (!rhub->psi)
3512 +- rhub->psi_count = 0;
3513 ++ port_cap = &xhci->port_caps[xhci->num_port_caps++];
3514 ++ if (xhci->num_port_caps > max_caps)
3515 ++ return;
3516 ++
3517 ++ port_cap->maj_rev = major_revision;
3518 ++ port_cap->min_rev = minor_revision;
3519 ++ port_cap->psi_count = XHCI_EXT_PORT_PSIC(temp);
3520 +
3521 +- rhub->psi_uid_count++;
3522 +- for (i = 0; i < rhub->psi_count; i++) {
3523 +- rhub->psi[i] = readl(addr + 4 + i);
3524 ++ if (port_cap->psi_count) {
3525 ++ port_cap->psi = kcalloc_node(port_cap->psi_count,
3526 ++ sizeof(*port_cap->psi),
3527 ++ GFP_KERNEL, dev_to_node(dev));
3528 ++ if (!port_cap->psi)
3529 ++ port_cap->psi_count = 0;
3530 ++
3531 ++ port_cap->psi_uid_count++;
3532 ++ for (i = 0; i < port_cap->psi_count; i++) {
3533 ++ port_cap->psi[i] = readl(addr + 4 + i);
3534 +
3535 + /* count unique ID values, two consecutive entries can
3536 + * have the same ID if link is assymetric
3537 + */
3538 +- if (i && (XHCI_EXT_PORT_PSIV(rhub->psi[i]) !=
3539 +- XHCI_EXT_PORT_PSIV(rhub->psi[i - 1])))
3540 +- rhub->psi_uid_count++;
3541 ++ if (i && (XHCI_EXT_PORT_PSIV(port_cap->psi[i]) !=
3542 ++ XHCI_EXT_PORT_PSIV(port_cap->psi[i - 1])))
3543 ++ port_cap->psi_uid_count++;
3544 +
3545 + xhci_dbg(xhci, "PSIV:%d PSIE:%d PLT:%d PFD:%d LP:%d PSIM:%d\n",
3546 +- XHCI_EXT_PORT_PSIV(rhub->psi[i]),
3547 +- XHCI_EXT_PORT_PSIE(rhub->psi[i]),
3548 +- XHCI_EXT_PORT_PLT(rhub->psi[i]),
3549 +- XHCI_EXT_PORT_PFD(rhub->psi[i]),
3550 +- XHCI_EXT_PORT_LP(rhub->psi[i]),
3551 +- XHCI_EXT_PORT_PSIM(rhub->psi[i]));
3552 ++ XHCI_EXT_PORT_PSIV(port_cap->psi[i]),
3553 ++ XHCI_EXT_PORT_PSIE(port_cap->psi[i]),
3554 ++ XHCI_EXT_PORT_PLT(port_cap->psi[i]),
3555 ++ XHCI_EXT_PORT_PFD(port_cap->psi[i]),
3556 ++ XHCI_EXT_PORT_LP(port_cap->psi[i]),
3557 ++ XHCI_EXT_PORT_PSIM(port_cap->psi[i]));
3558 + }
3559 + }
3560 + /* cache usb2 port capabilities */
3561 +@@ -2213,6 +2228,7 @@ static void xhci_add_in_port(struct xhci_hcd *xhci, unsigned int num_ports,
3562 + continue;
3563 + }
3564 + hw_port->rhub = rhub;
3565 ++ hw_port->port_cap = port_cap;
3566 + rhub->num_ports++;
3567 + }
3568 + /* FIXME: Should we disable ports not in the Extended Capabilities? */
3569 +@@ -2303,6 +2319,11 @@ static int xhci_setup_port_arrays(struct xhci_hcd *xhci, gfp_t flags)
3570 + if (!xhci->ext_caps)
3571 + return -ENOMEM;
3572 +
3573 ++ xhci->port_caps = kcalloc_node(cap_count, sizeof(*xhci->port_caps),
3574 ++ flags, dev_to_node(dev));
3575 ++ if (!xhci->port_caps)
3576 ++ return -ENOMEM;
3577 ++
3578 + offset = cap_start;
3579 +
3580 + while (offset) {
3581 +diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
3582 +index 4917c5b033fa..5e9b537df631 100644
3583 +--- a/drivers/usb/host/xhci-pci.c
3584 ++++ b/drivers/usb/host/xhci-pci.c
3585 +@@ -49,6 +49,7 @@
3586 + #define PCI_DEVICE_ID_INTEL_TITAN_RIDGE_4C_XHCI 0x15ec
3587 + #define PCI_DEVICE_ID_INTEL_TITAN_RIDGE_DD_XHCI 0x15f0
3588 + #define PCI_DEVICE_ID_INTEL_ICE_LAKE_XHCI 0x8a13
3589 ++#define PCI_DEVICE_ID_INTEL_CML_XHCI 0xa3af
3590 +
3591 + #define PCI_DEVICE_ID_AMD_PROMONTORYA_4 0x43b9
3592 + #define PCI_DEVICE_ID_AMD_PROMONTORYA_3 0x43ba
3593 +@@ -187,7 +188,8 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
3594 + pdev->device == PCI_DEVICE_ID_INTEL_BROXTON_M_XHCI ||
3595 + pdev->device == PCI_DEVICE_ID_INTEL_BROXTON_B_XHCI ||
3596 + pdev->device == PCI_DEVICE_ID_INTEL_APL_XHCI ||
3597 +- pdev->device == PCI_DEVICE_ID_INTEL_DNV_XHCI)) {
3598 ++ pdev->device == PCI_DEVICE_ID_INTEL_DNV_XHCI ||
3599 ++ pdev->device == PCI_DEVICE_ID_INTEL_CML_XHCI)) {
3600 + xhci->quirks |= XHCI_PME_STUCK_QUIRK;
3601 + }
3602 + if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
3603 +@@ -302,6 +304,9 @@ static int xhci_pci_setup(struct usb_hcd *hcd)
3604 + if (!usb_hcd_is_primary_hcd(hcd))
3605 + return 0;
3606 +
3607 ++ if (xhci->quirks & XHCI_PME_STUCK_QUIRK)
3608 ++ xhci_pme_acpi_rtd3_enable(pdev);
3609 ++
3610 + xhci_dbg(xhci, "Got SBRN %u\n", (unsigned int) xhci->sbrn);
3611 +
3612 + /* Find any debug ports */
3613 +@@ -359,9 +364,6 @@ static int xhci_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
3614 + HCC_MAX_PSA(xhci->hcc_params) >= 4)
3615 + xhci->shared_hcd->can_do_streams = 1;
3616 +
3617 +- if (xhci->quirks & XHCI_PME_STUCK_QUIRK)
3618 +- xhci_pme_acpi_rtd3_enable(dev);
3619 +-
3620 + /* USB-2 and USB-3 roothubs initialized, allow runtime pm suspend */
3621 + pm_runtime_put_noidle(&dev->dev);
3622 +
3623 +diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
3624 +index 4a2fe56940bd..f7a190fb2353 100644
3625 +--- a/drivers/usb/host/xhci-ring.c
3626 ++++ b/drivers/usb/host/xhci-ring.c
3627 +@@ -2740,6 +2740,42 @@ static int xhci_handle_event(struct xhci_hcd *xhci)
3628 + return 1;
3629 + }
3630 +
3631 ++/*
3632 ++ * Update Event Ring Dequeue Pointer:
3633 ++ * - When all events have finished
3634 ++ * - To avoid "Event Ring Full Error" condition
3635 ++ */
3636 ++static void xhci_update_erst_dequeue(struct xhci_hcd *xhci,
3637 ++ union xhci_trb *event_ring_deq)
3638 ++{
3639 ++ u64 temp_64;
3640 ++ dma_addr_t deq;
3641 ++
3642 ++ temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
3643 ++ /* If necessary, update the HW's version of the event ring deq ptr. */
3644 ++ if (event_ring_deq != xhci->event_ring->dequeue) {
3645 ++ deq = xhci_trb_virt_to_dma(xhci->event_ring->deq_seg,
3646 ++ xhci->event_ring->dequeue);
3647 ++ if (deq == 0)
3648 ++ xhci_warn(xhci, "WARN something wrong with SW event ring dequeue ptr\n");
3649 ++ /*
3650 ++ * Per 4.9.4, Software writes to the ERDP register shall
3651 ++ * always advance the Event Ring Dequeue Pointer value.
3652 ++ */
3653 ++ if ((temp_64 & (u64) ~ERST_PTR_MASK) ==
3654 ++ ((u64) deq & (u64) ~ERST_PTR_MASK))
3655 ++ return;
3656 ++
3657 ++ /* Update HC event ring dequeue pointer */
3658 ++ temp_64 &= ERST_PTR_MASK;
3659 ++ temp_64 |= ((u64) deq & (u64) ~ERST_PTR_MASK);
3660 ++ }
3661 ++
3662 ++ /* Clear the event handler busy flag (RW1C) */
3663 ++ temp_64 |= ERST_EHB;
3664 ++ xhci_write_64(xhci, temp_64, &xhci->ir_set->erst_dequeue);
3665 ++}
3666 ++
3667 + /*
3668 + * xHCI spec says we can get an interrupt, and if the HC has an error condition,
3669 + * we might get bad data out of the event ring. Section 4.10.2.7 has a list of
3670 +@@ -2751,9 +2787,9 @@ irqreturn_t xhci_irq(struct usb_hcd *hcd)
3671 + union xhci_trb *event_ring_deq;
3672 + irqreturn_t ret = IRQ_NONE;
3673 + unsigned long flags;
3674 +- dma_addr_t deq;
3675 + u64 temp_64;
3676 + u32 status;
3677 ++ int event_loop = 0;
3678 +
3679 + spin_lock_irqsave(&xhci->lock, flags);
3680 + /* Check if the xHC generated the interrupt, or the irq is shared */
3681 +@@ -2807,24 +2843,14 @@ irqreturn_t xhci_irq(struct usb_hcd *hcd)
3682 + /* FIXME this should be a delayed service routine
3683 + * that clears the EHB.
3684 + */
3685 +- while (xhci_handle_event(xhci) > 0) {}
3686 +-
3687 +- temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
3688 +- /* If necessary, update the HW's version of the event ring deq ptr. */
3689 +- if (event_ring_deq != xhci->event_ring->dequeue) {
3690 +- deq = xhci_trb_virt_to_dma(xhci->event_ring->deq_seg,
3691 +- xhci->event_ring->dequeue);
3692 +- if (deq == 0)
3693 +- xhci_warn(xhci, "WARN something wrong with SW event "
3694 +- "ring dequeue ptr.\n");
3695 +- /* Update HC event ring dequeue pointer */
3696 +- temp_64 &= ERST_PTR_MASK;
3697 +- temp_64 |= ((u64) deq & (u64) ~ERST_PTR_MASK);
3698 ++ while (xhci_handle_event(xhci) > 0) {
3699 ++ if (event_loop++ < TRBS_PER_SEGMENT / 2)
3700 ++ continue;
3701 ++ xhci_update_erst_dequeue(xhci, event_ring_deq);
3702 ++ event_loop = 0;
3703 + }
3704 +
3705 +- /* Clear the event handler busy flag (RW1C); event ring is empty. */
3706 +- temp_64 |= ERST_EHB;
3707 +- xhci_write_64(xhci, temp_64, &xhci->ir_set->erst_dequeue);
3708 ++ xhci_update_erst_dequeue(xhci, event_ring_deq);
3709 + ret = IRQ_HANDLED;
3710 +
3711 + out:
3712 +diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
3713 +index 973d665052a2..98b98a0cd2a8 100644
3714 +--- a/drivers/usb/host/xhci.h
3715 ++++ b/drivers/usb/host/xhci.h
3716 +@@ -1702,12 +1702,20 @@ struct xhci_bus_state {
3717 + * Intel Lynx Point LP xHCI host.
3718 + */
3719 + #define XHCI_MAX_REXIT_TIMEOUT_MS 20
3720 ++struct xhci_port_cap {
3721 ++ u32 *psi; /* array of protocol speed ID entries */
3722 ++ u8 psi_count;
3723 ++ u8 psi_uid_count;
3724 ++ u8 maj_rev;
3725 ++ u8 min_rev;
3726 ++};
3727 +
3728 + struct xhci_port {
3729 + __le32 __iomem *addr;
3730 + int hw_portnum;
3731 + int hcd_portnum;
3732 + struct xhci_hub *rhub;
3733 ++ struct xhci_port_cap *port_cap;
3734 + };
3735 +
3736 + struct xhci_hub {
3737 +@@ -1719,9 +1727,6 @@ struct xhci_hub {
3738 + /* supported prococol extended capabiliy values */
3739 + u8 maj_rev;
3740 + u8 min_rev;
3741 +- u32 *psi; /* array of protocol speed ID entries */
3742 +- u8 psi_count;
3743 +- u8 psi_uid_count;
3744 + };
3745 +
3746 + /* There is one xhci_hcd structure per controller */
3747 +@@ -1880,6 +1885,9 @@ struct xhci_hcd {
3748 + /* cached usb2 extened protocol capabilites */
3749 + u32 *ext_caps;
3750 + unsigned int num_ext_caps;
3751 ++ /* cached extended protocol port capabilities */
3752 ++ struct xhci_port_cap *port_caps;
3753 ++ unsigned int num_port_caps;
3754 + /* Compliance Mode Recovery Data */
3755 + struct timer_list comp_mode_recovery_timer;
3756 + u32 port_status_u0;
3757 +diff --git a/drivers/usb/misc/iowarrior.c b/drivers/usb/misc/iowarrior.c
3758 +index dce44fbf031f..dce20301e367 100644
3759 +--- a/drivers/usb/misc/iowarrior.c
3760 ++++ b/drivers/usb/misc/iowarrior.c
3761 +@@ -33,6 +33,14 @@
3762 + #define USB_DEVICE_ID_CODEMERCS_IOWPV2 0x1512
3763 + /* full speed iowarrior */
3764 + #define USB_DEVICE_ID_CODEMERCS_IOW56 0x1503
3765 ++/* fuller speed iowarrior */
3766 ++#define USB_DEVICE_ID_CODEMERCS_IOW28 0x1504
3767 ++#define USB_DEVICE_ID_CODEMERCS_IOW28L 0x1505
3768 ++#define USB_DEVICE_ID_CODEMERCS_IOW100 0x1506
3769 ++
3770 ++/* OEMed devices */
3771 ++#define USB_DEVICE_ID_CODEMERCS_IOW24SAG 0x158a
3772 ++#define USB_DEVICE_ID_CODEMERCS_IOW56AM 0x158b
3773 +
3774 + /* Get a minor range for your devices from the usb maintainer */
3775 + #ifdef CONFIG_USB_DYNAMIC_MINORS
3776 +@@ -133,6 +141,11 @@ static const struct usb_device_id iowarrior_ids[] = {
3777 + {USB_DEVICE(USB_VENDOR_ID_CODEMERCS, USB_DEVICE_ID_CODEMERCS_IOWPV1)},
3778 + {USB_DEVICE(USB_VENDOR_ID_CODEMERCS, USB_DEVICE_ID_CODEMERCS_IOWPV2)},
3779 + {USB_DEVICE(USB_VENDOR_ID_CODEMERCS, USB_DEVICE_ID_CODEMERCS_IOW56)},
3780 ++ {USB_DEVICE(USB_VENDOR_ID_CODEMERCS, USB_DEVICE_ID_CODEMERCS_IOW24SAG)},
3781 ++ {USB_DEVICE(USB_VENDOR_ID_CODEMERCS, USB_DEVICE_ID_CODEMERCS_IOW56AM)},
3782 ++ {USB_DEVICE(USB_VENDOR_ID_CODEMERCS, USB_DEVICE_ID_CODEMERCS_IOW28)},
3783 ++ {USB_DEVICE(USB_VENDOR_ID_CODEMERCS, USB_DEVICE_ID_CODEMERCS_IOW28L)},
3784 ++ {USB_DEVICE(USB_VENDOR_ID_CODEMERCS, USB_DEVICE_ID_CODEMERCS_IOW100)},
3785 + {} /* Terminating entry */
3786 + };
3787 + MODULE_DEVICE_TABLE(usb, iowarrior_ids);
3788 +@@ -357,6 +370,7 @@ static ssize_t iowarrior_write(struct file *file,
3789 + }
3790 + switch (dev->product_id) {
3791 + case USB_DEVICE_ID_CODEMERCS_IOW24:
3792 ++ case USB_DEVICE_ID_CODEMERCS_IOW24SAG:
3793 + case USB_DEVICE_ID_CODEMERCS_IOWPV1:
3794 + case USB_DEVICE_ID_CODEMERCS_IOWPV2:
3795 + case USB_DEVICE_ID_CODEMERCS_IOW40:
3796 +@@ -371,6 +385,10 @@ static ssize_t iowarrior_write(struct file *file,
3797 + goto exit;
3798 + break;
3799 + case USB_DEVICE_ID_CODEMERCS_IOW56:
3800 ++ case USB_DEVICE_ID_CODEMERCS_IOW56AM:
3801 ++ case USB_DEVICE_ID_CODEMERCS_IOW28:
3802 ++ case USB_DEVICE_ID_CODEMERCS_IOW28L:
3803 ++ case USB_DEVICE_ID_CODEMERCS_IOW100:
3804 + /* The IOW56 uses asynchronous IO and more urbs */
3805 + if (atomic_read(&dev->write_busy) == MAX_WRITES_IN_FLIGHT) {
3806 + /* Wait until we are below the limit for submitted urbs */
3807 +@@ -493,6 +511,7 @@ static long iowarrior_ioctl(struct file *file, unsigned int cmd,
3808 + switch (cmd) {
3809 + case IOW_WRITE:
3810 + if (dev->product_id == USB_DEVICE_ID_CODEMERCS_IOW24 ||
3811 ++ dev->product_id == USB_DEVICE_ID_CODEMERCS_IOW24SAG ||
3812 + dev->product_id == USB_DEVICE_ID_CODEMERCS_IOWPV1 ||
3813 + dev->product_id == USB_DEVICE_ID_CODEMERCS_IOWPV2 ||
3814 + dev->product_id == USB_DEVICE_ID_CODEMERCS_IOW40) {
3815 +@@ -767,7 +786,11 @@ static int iowarrior_probe(struct usb_interface *interface,
3816 + goto error;
3817 + }
3818 +
3819 +- if (dev->product_id == USB_DEVICE_ID_CODEMERCS_IOW56) {
3820 ++ if ((dev->product_id == USB_DEVICE_ID_CODEMERCS_IOW56) ||
3821 ++ (dev->product_id == USB_DEVICE_ID_CODEMERCS_IOW56AM) ||
3822 ++ (dev->product_id == USB_DEVICE_ID_CODEMERCS_IOW28) ||
3823 ++ (dev->product_id == USB_DEVICE_ID_CODEMERCS_IOW28L) ||
3824 ++ (dev->product_id == USB_DEVICE_ID_CODEMERCS_IOW100)) {
3825 + res = usb_find_last_int_out_endpoint(iface_desc,
3826 + &dev->int_out_endpoint);
3827 + if (res) {
3828 +@@ -780,7 +803,11 @@ static int iowarrior_probe(struct usb_interface *interface,
3829 + /* we have to check the report_size often, so remember it in the endianness suitable for our machine */
3830 + dev->report_size = usb_endpoint_maxp(dev->int_in_endpoint);
3831 + if ((dev->interface->cur_altsetting->desc.bInterfaceNumber == 0) &&
3832 +- (dev->product_id == USB_DEVICE_ID_CODEMERCS_IOW56))
3833 ++ ((dev->product_id == USB_DEVICE_ID_CODEMERCS_IOW56) ||
3834 ++ (dev->product_id == USB_DEVICE_ID_CODEMERCS_IOW56AM) ||
3835 ++ (dev->product_id == USB_DEVICE_ID_CODEMERCS_IOW28) ||
3836 ++ (dev->product_id == USB_DEVICE_ID_CODEMERCS_IOW28L) ||
3837 ++ (dev->product_id == USB_DEVICE_ID_CODEMERCS_IOW100)))
3838 + /* IOWarrior56 has wMaxPacketSize different from report size */
3839 + dev->report_size = 7;
3840 +
3841 +diff --git a/drivers/usb/storage/uas.c b/drivers/usb/storage/uas.c
3842 +index 475b9c692827..bb2198496f42 100644
3843 +--- a/drivers/usb/storage/uas.c
3844 ++++ b/drivers/usb/storage/uas.c
3845 +@@ -45,6 +45,7 @@ struct uas_dev_info {
3846 + struct scsi_cmnd *cmnd[MAX_CMNDS];
3847 + spinlock_t lock;
3848 + struct work_struct work;
3849 ++ struct work_struct scan_work; /* for async scanning */
3850 + };
3851 +
3852 + enum {
3853 +@@ -114,6 +115,17 @@ out:
3854 + spin_unlock_irqrestore(&devinfo->lock, flags);
3855 + }
3856 +
3857 ++static void uas_scan_work(struct work_struct *work)
3858 ++{
3859 ++ struct uas_dev_info *devinfo =
3860 ++ container_of(work, struct uas_dev_info, scan_work);
3861 ++ struct Scsi_Host *shost = usb_get_intfdata(devinfo->intf);
3862 ++
3863 ++ dev_dbg(&devinfo->intf->dev, "starting scan\n");
3864 ++ scsi_scan_host(shost);
3865 ++ dev_dbg(&devinfo->intf->dev, "scan complete\n");
3866 ++}
3867 ++
3868 + static void uas_add_work(struct uas_cmd_info *cmdinfo)
3869 + {
3870 + struct scsi_pointer *scp = (void *)cmdinfo;
3871 +@@ -983,6 +995,7 @@ static int uas_probe(struct usb_interface *intf, const struct usb_device_id *id)
3872 + init_usb_anchor(&devinfo->data_urbs);
3873 + spin_lock_init(&devinfo->lock);
3874 + INIT_WORK(&devinfo->work, uas_do_work);
3875 ++ INIT_WORK(&devinfo->scan_work, uas_scan_work);
3876 +
3877 + result = uas_configure_endpoints(devinfo);
3878 + if (result)
3879 +@@ -999,7 +1012,9 @@ static int uas_probe(struct usb_interface *intf, const struct usb_device_id *id)
3880 + if (result)
3881 + goto free_streams;
3882 +
3883 +- scsi_scan_host(shost);
3884 ++ /* Submit the delayed_work for SCSI-device scanning */
3885 ++ schedule_work(&devinfo->scan_work);
3886 ++
3887 + return result;
3888 +
3889 + free_streams:
3890 +@@ -1167,6 +1182,12 @@ static void uas_disconnect(struct usb_interface *intf)
3891 + usb_kill_anchored_urbs(&devinfo->data_urbs);
3892 + uas_zap_pending(devinfo, DID_NO_CONNECT);
3893 +
3894 ++ /*
3895 ++ * Prevent SCSI scanning (if it hasn't started yet)
3896 ++ * or wait for the SCSI-scanning routine to stop.
3897 ++ */
3898 ++ cancel_work_sync(&devinfo->scan_work);
3899 ++
3900 + scsi_remove_host(shost);
3901 + uas_free_streams(devinfo);
3902 + scsi_host_put(shost);
3903 +diff --git a/drivers/xen/preempt.c b/drivers/xen/preempt.c
3904 +index 8b9919c26095..456a164364a2 100644
3905 +--- a/drivers/xen/preempt.c
3906 ++++ b/drivers/xen/preempt.c
3907 +@@ -33,7 +33,9 @@ asmlinkage __visible void xen_maybe_preempt_hcall(void)
3908 + * cpu.
3909 + */
3910 + __this_cpu_write(xen_in_preemptible_hcall, false);
3911 +- _cond_resched();
3912 ++ local_irq_enable();
3913 ++ cond_resched();
3914 ++ local_irq_disable();
3915 + __this_cpu_write(xen_in_preemptible_hcall, true);
3916 + }
3917 + }
3918 +diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
3919 +index b0ccca5d08b5..5cdd1b51285b 100644
3920 +--- a/fs/btrfs/disk-io.c
3921 ++++ b/fs/btrfs/disk-io.c
3922 +@@ -3203,6 +3203,7 @@ retry_root_backup:
3923 + if (IS_ERR(fs_info->fs_root)) {
3924 + err = PTR_ERR(fs_info->fs_root);
3925 + btrfs_warn(fs_info, "failed to read fs tree: %d", err);
3926 ++ fs_info->fs_root = NULL;
3927 + goto fail_qgroup;
3928 + }
3929 +
3930 +@@ -4293,6 +4294,7 @@ static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
3931 + cond_resched();
3932 + spin_lock(&delayed_refs->lock);
3933 + }
3934 ++ btrfs_qgroup_destroy_extent_records(trans);
3935 +
3936 + spin_unlock(&delayed_refs->lock);
3937 +
3938 +@@ -4518,7 +4520,6 @@ void btrfs_cleanup_one_transaction(struct btrfs_transaction *cur_trans,
3939 + wake_up(&fs_info->transaction_wait);
3940 +
3941 + btrfs_destroy_delayed_inodes(fs_info);
3942 +- btrfs_assert_delayed_root_empty(fs_info);
3943 +
3944 + btrfs_destroy_marked_extents(fs_info, &cur_trans->dirty_pages,
3945 + EXTENT_DIRTY);
3946 +diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
3947 +index dc50605ecbda..47ecf7216b3e 100644
3948 +--- a/fs/btrfs/extent-tree.c
3949 ++++ b/fs/btrfs/extent-tree.c
3950 +@@ -4411,6 +4411,8 @@ int btrfs_alloc_logged_file_extent(struct btrfs_trans_handle *trans,
3951 +
3952 + ret = alloc_reserved_file_extent(trans, 0, root_objectid, 0, owner,
3953 + offset, ins, 1);
3954 ++ if (ret)
3955 ++ btrfs_pin_extent(fs_info, ins->objectid, ins->offset, 1);
3956 + btrfs_put_block_group(block_group);
3957 + return ret;
3958 + }
3959 +diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
3960 +index b83eef445db3..50feb01f27f3 100644
3961 +--- a/fs/btrfs/inode.c
3962 ++++ b/fs/btrfs/inode.c
3963 +@@ -4734,6 +4734,8 @@ int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans,
3964 + u64 bytes_deleted = 0;
3965 + bool be_nice = false;
3966 + bool should_throttle = false;
3967 ++ const u64 lock_start = ALIGN_DOWN(new_size, fs_info->sectorsize);
3968 ++ struct extent_state *cached_state = NULL;
3969 +
3970 + BUG_ON(new_size > 0 && min_type != BTRFS_EXTENT_DATA_KEY);
3971 +
3972 +@@ -4750,6 +4752,10 @@ int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans,
3973 + return -ENOMEM;
3974 + path->reada = READA_BACK;
3975 +
3976 ++ if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID)
3977 ++ lock_extent_bits(&BTRFS_I(inode)->io_tree, lock_start, (u64)-1,
3978 ++ &cached_state);
3979 ++
3980 + /*
3981 + * We want to drop from the next block forward in case this new size is
3982 + * not block aligned since we will be keeping the last block of the
3983 +@@ -4786,7 +4792,6 @@ search_again:
3984 + goto out;
3985 + }
3986 +
3987 +- path->leave_spinning = 1;
3988 + ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
3989 + if (ret < 0)
3990 + goto out;
3991 +@@ -4938,7 +4943,6 @@ delete:
3992 + root == fs_info->tree_root)) {
3993 + struct btrfs_ref ref = { 0 };
3994 +
3995 +- btrfs_set_path_blocking(path);
3996 + bytes_deleted += extent_num_bytes;
3997 +
3998 + btrfs_init_generic_ref(&ref, BTRFS_DROP_DELAYED_REF,
3999 +@@ -5014,6 +5018,8 @@ out:
4000 + if (!ret && last_size > new_size)
4001 + last_size = new_size;
4002 + btrfs_ordered_update_i_size(inode, last_size, NULL);
4003 ++ unlock_extent_cached(&BTRFS_I(inode)->io_tree, lock_start,
4004 ++ (u64)-1, &cached_state);
4005 + }
4006 +
4007 + btrfs_free_path(path);
4008 +@@ -10464,6 +10470,7 @@ static int __btrfs_prealloc_file_range(struct inode *inode, int mode,
4009 + struct btrfs_root *root = BTRFS_I(inode)->root;
4010 + struct btrfs_key ins;
4011 + u64 cur_offset = start;
4012 ++ u64 clear_offset = start;
4013 + u64 i_size;
4014 + u64 cur_bytes;
4015 + u64 last_alloc = (u64)-1;
4016 +@@ -10498,6 +10505,15 @@ static int __btrfs_prealloc_file_range(struct inode *inode, int mode,
4017 + btrfs_end_transaction(trans);
4018 + break;
4019 + }
4020 ++
4021 ++ /*
4022 ++ * We've reserved this space, and thus converted it from
4023 ++ * ->bytes_may_use to ->bytes_reserved. Any error that happens
4024 ++ * from here on out we will only need to clear our reservation
4025 ++ * for the remaining unreserved area, so advance our
4026 ++ * clear_offset by our extent size.
4027 ++ */
4028 ++ clear_offset += ins.offset;
4029 + btrfs_dec_block_group_reservations(fs_info, ins.objectid);
4030 +
4031 + last_alloc = ins.offset;
4032 +@@ -10578,9 +10594,9 @@ next:
4033 + if (own_trans)
4034 + btrfs_end_transaction(trans);
4035 + }
4036 +- if (cur_offset < end)
4037 +- btrfs_free_reserved_data_space(inode, NULL, cur_offset,
4038 +- end - cur_offset + 1);
4039 ++ if (clear_offset < end)
4040 ++ btrfs_free_reserved_data_space(inode, NULL, clear_offset,
4041 ++ end - clear_offset + 1);
4042 + return ret;
4043 + }
4044 +
4045 +diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c
4046 +index 6240a5a1f2c0..00e1ef4f7979 100644
4047 +--- a/fs/btrfs/ordered-data.c
4048 ++++ b/fs/btrfs/ordered-data.c
4049 +@@ -690,10 +690,15 @@ int btrfs_wait_ordered_range(struct inode *inode, u64 start, u64 len)
4050 + }
4051 + btrfs_start_ordered_extent(inode, ordered, 1);
4052 + end = ordered->file_offset;
4053 ++ /*
4054 ++ * If the ordered extent had an error save the error but don't
4055 ++ * exit without waiting first for all other ordered extents in
4056 ++ * the range to complete.
4057 ++ */
4058 + if (test_bit(BTRFS_ORDERED_IOERR, &ordered->flags))
4059 + ret = -EIO;
4060 + btrfs_put_ordered_extent(ordered);
4061 +- if (ret || end == 0 || end == start)
4062 ++ if (end == 0 || end == start)
4063 + break;
4064 + end--;
4065 + }
4066 +diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c
4067 +index 50517221638a..286c8c11c8d3 100644
4068 +--- a/fs/btrfs/qgroup.c
4069 ++++ b/fs/btrfs/qgroup.c
4070 +@@ -4018,3 +4018,16 @@ out:
4071 + }
4072 + return ret;
4073 + }
4074 ++
4075 ++void btrfs_qgroup_destroy_extent_records(struct btrfs_transaction *trans)
4076 ++{
4077 ++ struct btrfs_qgroup_extent_record *entry;
4078 ++ struct btrfs_qgroup_extent_record *next;
4079 ++ struct rb_root *root;
4080 ++
4081 ++ root = &trans->delayed_refs.dirty_extent_root;
4082 ++ rbtree_postorder_for_each_entry_safe(entry, next, root, node) {
4083 ++ ulist_free(entry->old_roots);
4084 ++ kfree(entry);
4085 ++ }
4086 ++}
4087 +diff --git a/fs/btrfs/qgroup.h b/fs/btrfs/qgroup.h
4088 +index 46ba7bd2961c..17e8ac992c50 100644
4089 +--- a/fs/btrfs/qgroup.h
4090 ++++ b/fs/btrfs/qgroup.h
4091 +@@ -414,5 +414,6 @@ int btrfs_qgroup_add_swapped_blocks(struct btrfs_trans_handle *trans,
4092 + u64 last_snapshot);
4093 + int btrfs_qgroup_trace_subtree_after_cow(struct btrfs_trans_handle *trans,
4094 + struct btrfs_root *root, struct extent_buffer *eb);
4095 ++void btrfs_qgroup_destroy_extent_records(struct btrfs_transaction *trans);
4096 +
4097 + #endif
4098 +diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
4099 +index ceffec752234..98b6903e3938 100644
4100 +--- a/fs/btrfs/transaction.c
4101 ++++ b/fs/btrfs/transaction.c
4102 +@@ -51,6 +51,8 @@ void btrfs_put_transaction(struct btrfs_transaction *transaction)
4103 + BUG_ON(!list_empty(&transaction->list));
4104 + WARN_ON(!RB_EMPTY_ROOT(
4105 + &transaction->delayed_refs.href_root.rb_root));
4106 ++ WARN_ON(!RB_EMPTY_ROOT(
4107 ++ &transaction->delayed_refs.dirty_extent_root));
4108 + if (transaction->delayed_refs.pending_csums)
4109 + btrfs_err(transaction->fs_info,
4110 + "pending csums is %llu",
4111 +diff --git a/fs/ecryptfs/crypto.c b/fs/ecryptfs/crypto.c
4112 +index f91db24bbf3b..a064b408d841 100644
4113 +--- a/fs/ecryptfs/crypto.c
4114 ++++ b/fs/ecryptfs/crypto.c
4115 +@@ -311,8 +311,10 @@ static int crypt_scatterlist(struct ecryptfs_crypt_stat *crypt_stat,
4116 + struct extent_crypt_result ecr;
4117 + int rc = 0;
4118 +
4119 +- BUG_ON(!crypt_stat || !crypt_stat->tfm
4120 +- || !(crypt_stat->flags & ECRYPTFS_STRUCT_INITIALIZED));
4121 ++ if (!crypt_stat || !crypt_stat->tfm
4122 ++ || !(crypt_stat->flags & ECRYPTFS_STRUCT_INITIALIZED))
4123 ++ return -EINVAL;
4124 ++
4125 + if (unlikely(ecryptfs_verbosity > 0)) {
4126 + ecryptfs_printk(KERN_DEBUG, "Key size [%zd]; key:\n",
4127 + crypt_stat->key_size);
4128 +diff --git a/fs/ecryptfs/keystore.c b/fs/ecryptfs/keystore.c
4129 +index 216fbe6a4837..4dc09638de8f 100644
4130 +--- a/fs/ecryptfs/keystore.c
4131 ++++ b/fs/ecryptfs/keystore.c
4132 +@@ -1304,7 +1304,7 @@ parse_tag_1_packet(struct ecryptfs_crypt_stat *crypt_stat,
4133 + printk(KERN_WARNING "Tag 1 packet contains key larger "
4134 + "than ECRYPTFS_MAX_ENCRYPTED_KEY_BYTES\n");
4135 + rc = -EINVAL;
4136 +- goto out;
4137 ++ goto out_free;
4138 + }
4139 + memcpy((*new_auth_tok)->session_key.encrypted_key,
4140 + &data[(*packet_size)], (body_size - (ECRYPTFS_SIG_SIZE + 2)));
4141 +diff --git a/fs/ecryptfs/messaging.c b/fs/ecryptfs/messaging.c
4142 +index d668e60b85b5..c05ca39aa449 100644
4143 +--- a/fs/ecryptfs/messaging.c
4144 ++++ b/fs/ecryptfs/messaging.c
4145 +@@ -379,6 +379,7 @@ int __init ecryptfs_init_messaging(void)
4146 + * ecryptfs_message_buf_len),
4147 + GFP_KERNEL);
4148 + if (!ecryptfs_msg_ctx_arr) {
4149 ++ kfree(ecryptfs_daemon_hash);
4150 + rc = -ENOMEM;
4151 + goto out;
4152 + }
4153 +diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
4154 +index 0b202e00d93f..5aba67a504cf 100644
4155 +--- a/fs/ext4/balloc.c
4156 ++++ b/fs/ext4/balloc.c
4157 +@@ -270,6 +270,7 @@ struct ext4_group_desc * ext4_get_group_desc(struct super_block *sb,
4158 + ext4_group_t ngroups = ext4_get_groups_count(sb);
4159 + struct ext4_group_desc *desc;
4160 + struct ext4_sb_info *sbi = EXT4_SB(sb);
4161 ++ struct buffer_head *bh_p;
4162 +
4163 + if (block_group >= ngroups) {
4164 + ext4_error(sb, "block_group >= groups_count - block_group = %u,"
4165 +@@ -280,7 +281,14 @@ struct ext4_group_desc * ext4_get_group_desc(struct super_block *sb,
4166 +
4167 + group_desc = block_group >> EXT4_DESC_PER_BLOCK_BITS(sb);
4168 + offset = block_group & (EXT4_DESC_PER_BLOCK(sb) - 1);
4169 +- if (!sbi->s_group_desc[group_desc]) {
4170 ++ bh_p = sbi_array_rcu_deref(sbi, s_group_desc, group_desc);
4171 ++ /*
4172 ++ * sbi_array_rcu_deref returns with rcu unlocked, this is ok since
4173 ++ * the pointer being dereferenced won't be dereferenced again. By
4174 ++ * looking at the usage in add_new_gdb() the value isn't modified,
4175 ++ * just the pointer, and so it remains valid.
4176 ++ */
4177 ++ if (!bh_p) {
4178 + ext4_error(sb, "Group descriptor not loaded - "
4179 + "block_group = %u, group_desc = %u, desc = %u",
4180 + block_group, group_desc, offset);
4181 +@@ -288,10 +296,10 @@ struct ext4_group_desc * ext4_get_group_desc(struct super_block *sb,
4182 + }
4183 +
4184 + desc = (struct ext4_group_desc *)(
4185 +- (__u8 *)sbi->s_group_desc[group_desc]->b_data +
4186 ++ (__u8 *)bh_p->b_data +
4187 + offset * EXT4_DESC_SIZE(sb));
4188 + if (bh)
4189 +- *bh = sbi->s_group_desc[group_desc];
4190 ++ *bh = bh_p;
4191 + return desc;
4192 + }
4193 +
4194 +diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
4195 +index e2f65b565c1f..d576addfdd03 100644
4196 +--- a/fs/ext4/ext4.h
4197 ++++ b/fs/ext4/ext4.h
4198 +@@ -1396,7 +1396,7 @@ struct ext4_sb_info {
4199 + loff_t s_bitmap_maxbytes; /* max bytes for bitmap files */
4200 + struct buffer_head * s_sbh; /* Buffer containing the super block */
4201 + struct ext4_super_block *s_es; /* Pointer to the super block in the buffer */
4202 +- struct buffer_head **s_group_desc;
4203 ++ struct buffer_head * __rcu *s_group_desc;
4204 + unsigned int s_mount_opt;
4205 + unsigned int s_mount_opt2;
4206 + unsigned int s_mount_flags;
4207 +@@ -1458,7 +1458,7 @@ struct ext4_sb_info {
4208 + #endif
4209 +
4210 + /* for buddy allocator */
4211 +- struct ext4_group_info ***s_group_info;
4212 ++ struct ext4_group_info ** __rcu *s_group_info;
4213 + struct inode *s_buddy_cache;
4214 + spinlock_t s_md_lock;
4215 + unsigned short *s_mb_offsets;
4216 +@@ -1508,7 +1508,7 @@ struct ext4_sb_info {
4217 + unsigned int s_extent_max_zeroout_kb;
4218 +
4219 + unsigned int s_log_groups_per_flex;
4220 +- struct flex_groups *s_flex_groups;
4221 ++ struct flex_groups * __rcu *s_flex_groups;
4222 + ext4_group_t s_flex_groups_allocated;
4223 +
4224 + /* workqueue for reserved extent conversions (buffered io) */
4225 +@@ -1548,8 +1548,11 @@ struct ext4_sb_info {
4226 + struct ratelimit_state s_warning_ratelimit_state;
4227 + struct ratelimit_state s_msg_ratelimit_state;
4228 +
4229 +- /* Barrier between changing inodes' journal flags and writepages ops. */
4230 +- struct percpu_rw_semaphore s_journal_flag_rwsem;
4231 ++ /*
4232 ++ * Barrier between writepages ops and changing any inode's JOURNAL_DATA
4233 ++ * or EXTENTS flag.
4234 ++ */
4235 ++ struct percpu_rw_semaphore s_writepages_rwsem;
4236 + struct dax_device *s_daxdev;
4237 + };
4238 +
4239 +@@ -1569,6 +1572,23 @@ static inline int ext4_valid_inum(struct super_block *sb, unsigned long ino)
4240 + ino <= le32_to_cpu(EXT4_SB(sb)->s_es->s_inodes_count));
4241 + }
4242 +
4243 ++/*
4244 ++ * Returns: sbi->field[index]
4245 ++ * Used to access an array element from the following sbi fields which require
4246 ++ * rcu protection to avoid dereferencing an invalid pointer due to reassignment
4247 ++ * - s_group_desc
4248 ++ * - s_group_info
4249 ++ * - s_flex_group
4250 ++ */
4251 ++#define sbi_array_rcu_deref(sbi, field, index) \
4252 ++({ \
4253 ++ typeof(*((sbi)->field)) _v; \
4254 ++ rcu_read_lock(); \
4255 ++ _v = ((typeof(_v)*)rcu_dereference((sbi)->field))[index]; \
4256 ++ rcu_read_unlock(); \
4257 ++ _v; \
4258 ++})
4259 ++
4260 + /*
4261 + * Inode dynamic state flags
4262 + */
4263 +@@ -2666,6 +2686,7 @@ extern int ext4_generic_delete_entry(handle_t *handle,
4264 + extern bool ext4_empty_dir(struct inode *inode);
4265 +
4266 + /* resize.c */
4267 ++extern void ext4_kvfree_array_rcu(void *to_free);
4268 + extern int ext4_group_add(struct super_block *sb,
4269 + struct ext4_new_group_data *input);
4270 + extern int ext4_group_extend(struct super_block *sb,
4271 +@@ -2913,13 +2934,13 @@ static inline
4272 + struct ext4_group_info *ext4_get_group_info(struct super_block *sb,
4273 + ext4_group_t group)
4274 + {
4275 +- struct ext4_group_info ***grp_info;
4276 ++ struct ext4_group_info **grp_info;
4277 + long indexv, indexh;
4278 + BUG_ON(group >= EXT4_SB(sb)->s_groups_count);
4279 +- grp_info = EXT4_SB(sb)->s_group_info;
4280 + indexv = group >> (EXT4_DESC_PER_BLOCK_BITS(sb));
4281 + indexh = group & ((EXT4_DESC_PER_BLOCK(sb)) - 1);
4282 +- return grp_info[indexv][indexh];
4283 ++ grp_info = sbi_array_rcu_deref(EXT4_SB(sb), s_group_info, indexv);
4284 ++ return grp_info[indexh];
4285 + }
4286 +
4287 + /*
4288 +@@ -2969,7 +2990,7 @@ static inline void ext4_update_i_disksize(struct inode *inode, loff_t newsize)
4289 + !inode_is_locked(inode));
4290 + down_write(&EXT4_I(inode)->i_data_sem);
4291 + if (newsize > EXT4_I(inode)->i_disksize)
4292 +- EXT4_I(inode)->i_disksize = newsize;
4293 ++ WRITE_ONCE(EXT4_I(inode)->i_disksize, newsize);
4294 + up_write(&EXT4_I(inode)->i_data_sem);
4295 + }
4296 +
4297 +diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c
4298 +index 564e2ceb8417..a6288730210e 100644
4299 +--- a/fs/ext4/ialloc.c
4300 ++++ b/fs/ext4/ialloc.c
4301 +@@ -325,11 +325,13 @@ void ext4_free_inode(handle_t *handle, struct inode *inode)
4302 +
4303 + percpu_counter_inc(&sbi->s_freeinodes_counter);
4304 + if (sbi->s_log_groups_per_flex) {
4305 +- ext4_group_t f = ext4_flex_group(sbi, block_group);
4306 ++ struct flex_groups *fg;
4307 +
4308 +- atomic_inc(&sbi->s_flex_groups[f].free_inodes);
4309 ++ fg = sbi_array_rcu_deref(sbi, s_flex_groups,
4310 ++ ext4_flex_group(sbi, block_group));
4311 ++ atomic_inc(&fg->free_inodes);
4312 + if (is_directory)
4313 +- atomic_dec(&sbi->s_flex_groups[f].used_dirs);
4314 ++ atomic_dec(&fg->used_dirs);
4315 + }
4316 + BUFFER_TRACE(bh2, "call ext4_handle_dirty_metadata");
4317 + fatal = ext4_handle_dirty_metadata(handle, NULL, bh2);
4318 +@@ -365,12 +367,13 @@ static void get_orlov_stats(struct super_block *sb, ext4_group_t g,
4319 + int flex_size, struct orlov_stats *stats)
4320 + {
4321 + struct ext4_group_desc *desc;
4322 +- struct flex_groups *flex_group = EXT4_SB(sb)->s_flex_groups;
4323 +
4324 + if (flex_size > 1) {
4325 +- stats->free_inodes = atomic_read(&flex_group[g].free_inodes);
4326 +- stats->free_clusters = atomic64_read(&flex_group[g].free_clusters);
4327 +- stats->used_dirs = atomic_read(&flex_group[g].used_dirs);
4328 ++ struct flex_groups *fg = sbi_array_rcu_deref(EXT4_SB(sb),
4329 ++ s_flex_groups, g);
4330 ++ stats->free_inodes = atomic_read(&fg->free_inodes);
4331 ++ stats->free_clusters = atomic64_read(&fg->free_clusters);
4332 ++ stats->used_dirs = atomic_read(&fg->used_dirs);
4333 + return;
4334 + }
4335 +
4336 +@@ -1051,7 +1054,8 @@ got:
4337 + if (sbi->s_log_groups_per_flex) {
4338 + ext4_group_t f = ext4_flex_group(sbi, group);
4339 +
4340 +- atomic_inc(&sbi->s_flex_groups[f].used_dirs);
4341 ++ atomic_inc(&sbi_array_rcu_deref(sbi, s_flex_groups,
4342 ++ f)->used_dirs);
4343 + }
4344 + }
4345 + if (ext4_has_group_desc_csum(sb)) {
4346 +@@ -1074,7 +1078,8 @@ got:
4347 +
4348 + if (sbi->s_log_groups_per_flex) {
4349 + flex_group = ext4_flex_group(sbi, group);
4350 +- atomic_dec(&sbi->s_flex_groups[flex_group].free_inodes);
4351 ++ atomic_dec(&sbi_array_rcu_deref(sbi, s_flex_groups,
4352 ++ flex_group)->free_inodes);
4353 + }
4354 +
4355 + inode->i_ino = ino + group * EXT4_INODES_PER_GROUP(sb);
4356 +diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
4357 +index 76a38ef5f226..70ef4a714b33 100644
4358 +--- a/fs/ext4/inode.c
4359 ++++ b/fs/ext4/inode.c
4360 +@@ -2573,7 +2573,7 @@ update_disksize:
4361 + * truncate are avoided by checking i_size under i_data_sem.
4362 + */
4363 + disksize = ((loff_t)mpd->first_page) << PAGE_SHIFT;
4364 +- if (disksize > EXT4_I(inode)->i_disksize) {
4365 ++ if (disksize > READ_ONCE(EXT4_I(inode)->i_disksize)) {
4366 + int err2;
4367 + loff_t i_size;
4368 +
4369 +@@ -2734,7 +2734,7 @@ static int ext4_writepages(struct address_space *mapping,
4370 + if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
4371 + return -EIO;
4372 +
4373 +- percpu_down_read(&sbi->s_journal_flag_rwsem);
4374 ++ percpu_down_read(&sbi->s_writepages_rwsem);
4375 + trace_ext4_writepages(inode, wbc);
4376 +
4377 + /*
4378 +@@ -2955,7 +2955,7 @@ unplug:
4379 + out_writepages:
4380 + trace_ext4_writepages_result(inode, wbc, ret,
4381 + nr_to_write - wbc->nr_to_write);
4382 +- percpu_up_read(&sbi->s_journal_flag_rwsem);
4383 ++ percpu_up_read(&sbi->s_writepages_rwsem);
4384 + return ret;
4385 + }
4386 +
4387 +@@ -2970,13 +2970,13 @@ static int ext4_dax_writepages(struct address_space *mapping,
4388 + if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
4389 + return -EIO;
4390 +
4391 +- percpu_down_read(&sbi->s_journal_flag_rwsem);
4392 ++ percpu_down_read(&sbi->s_writepages_rwsem);
4393 + trace_ext4_writepages(inode, wbc);
4394 +
4395 + ret = dax_writeback_mapping_range(mapping, inode->i_sb->s_bdev, wbc);
4396 + trace_ext4_writepages_result(inode, wbc, ret,
4397 + nr_to_write - wbc->nr_to_write);
4398 +- percpu_up_read(&sbi->s_journal_flag_rwsem);
4399 ++ percpu_up_read(&sbi->s_writepages_rwsem);
4400 + return ret;
4401 + }
4402 +
4403 +@@ -6185,7 +6185,7 @@ int ext4_change_inode_journal_flag(struct inode *inode, int val)
4404 + }
4405 + }
4406 +
4407 +- percpu_down_write(&sbi->s_journal_flag_rwsem);
4408 ++ percpu_down_write(&sbi->s_writepages_rwsem);
4409 + jbd2_journal_lock_updates(journal);
4410 +
4411 + /*
4412 +@@ -6202,7 +6202,7 @@ int ext4_change_inode_journal_flag(struct inode *inode, int val)
4413 + err = jbd2_journal_flush(journal);
4414 + if (err < 0) {
4415 + jbd2_journal_unlock_updates(journal);
4416 +- percpu_up_write(&sbi->s_journal_flag_rwsem);
4417 ++ percpu_up_write(&sbi->s_writepages_rwsem);
4418 + return err;
4419 + }
4420 + ext4_clear_inode_flag(inode, EXT4_INODE_JOURNAL_DATA);
4421 +@@ -6210,7 +6210,7 @@ int ext4_change_inode_journal_flag(struct inode *inode, int val)
4422 + ext4_set_aops(inode);
4423 +
4424 + jbd2_journal_unlock_updates(journal);
4425 +- percpu_up_write(&sbi->s_journal_flag_rwsem);
4426 ++ percpu_up_write(&sbi->s_writepages_rwsem);
4427 +
4428 + if (val)
4429 + up_write(&EXT4_I(inode)->i_mmap_sem);
4430 +diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
4431 +index a3e2767bdf2f..c76ffc259d19 100644
4432 +--- a/fs/ext4/mballoc.c
4433 ++++ b/fs/ext4/mballoc.c
4434 +@@ -2356,7 +2356,7 @@ int ext4_mb_alloc_groupinfo(struct super_block *sb, ext4_group_t ngroups)
4435 + {
4436 + struct ext4_sb_info *sbi = EXT4_SB(sb);
4437 + unsigned size;
4438 +- struct ext4_group_info ***new_groupinfo;
4439 ++ struct ext4_group_info ***old_groupinfo, ***new_groupinfo;
4440 +
4441 + size = (ngroups + EXT4_DESC_PER_BLOCK(sb) - 1) >>
4442 + EXT4_DESC_PER_BLOCK_BITS(sb);
4443 +@@ -2369,13 +2369,16 @@ int ext4_mb_alloc_groupinfo(struct super_block *sb, ext4_group_t ngroups)
4444 + ext4_msg(sb, KERN_ERR, "can't allocate buddy meta group");
4445 + return -ENOMEM;
4446 + }
4447 +- if (sbi->s_group_info) {
4448 +- memcpy(new_groupinfo, sbi->s_group_info,
4449 ++ rcu_read_lock();
4450 ++ old_groupinfo = rcu_dereference(sbi->s_group_info);
4451 ++ if (old_groupinfo)
4452 ++ memcpy(new_groupinfo, old_groupinfo,
4453 + sbi->s_group_info_size * sizeof(*sbi->s_group_info));
4454 +- kvfree(sbi->s_group_info);
4455 +- }
4456 +- sbi->s_group_info = new_groupinfo;
4457 ++ rcu_read_unlock();
4458 ++ rcu_assign_pointer(sbi->s_group_info, new_groupinfo);
4459 + sbi->s_group_info_size = size / sizeof(*sbi->s_group_info);
4460 ++ if (old_groupinfo)
4461 ++ ext4_kvfree_array_rcu(old_groupinfo);
4462 + ext4_debug("allocated s_groupinfo array for %d meta_bg's\n",
4463 + sbi->s_group_info_size);
4464 + return 0;
4465 +@@ -2387,6 +2390,7 @@ int ext4_mb_add_groupinfo(struct super_block *sb, ext4_group_t group,
4466 + {
4467 + int i;
4468 + int metalen = 0;
4469 ++ int idx = group >> EXT4_DESC_PER_BLOCK_BITS(sb);
4470 + struct ext4_sb_info *sbi = EXT4_SB(sb);
4471 + struct ext4_group_info **meta_group_info;
4472 + struct kmem_cache *cachep = get_groupinfo_cache(sb->s_blocksize_bits);
4473 +@@ -2405,12 +2409,12 @@ int ext4_mb_add_groupinfo(struct super_block *sb, ext4_group_t group,
4474 + "for a buddy group");
4475 + goto exit_meta_group_info;
4476 + }
4477 +- sbi->s_group_info[group >> EXT4_DESC_PER_BLOCK_BITS(sb)] =
4478 +- meta_group_info;
4479 ++ rcu_read_lock();
4480 ++ rcu_dereference(sbi->s_group_info)[idx] = meta_group_info;
4481 ++ rcu_read_unlock();
4482 + }
4483 +
4484 +- meta_group_info =
4485 +- sbi->s_group_info[group >> EXT4_DESC_PER_BLOCK_BITS(sb)];
4486 ++ meta_group_info = sbi_array_rcu_deref(sbi, s_group_info, idx);
4487 + i = group & (EXT4_DESC_PER_BLOCK(sb) - 1);
4488 +
4489 + meta_group_info[i] = kmem_cache_zalloc(cachep, GFP_NOFS);
4490 +@@ -2458,8 +2462,13 @@ int ext4_mb_add_groupinfo(struct super_block *sb, ext4_group_t group,
4491 + exit_group_info:
4492 + /* If a meta_group_info table has been allocated, release it now */
4493 + if (group % EXT4_DESC_PER_BLOCK(sb) == 0) {
4494 +- kfree(sbi->s_group_info[group >> EXT4_DESC_PER_BLOCK_BITS(sb)]);
4495 +- sbi->s_group_info[group >> EXT4_DESC_PER_BLOCK_BITS(sb)] = NULL;
4496 ++ struct ext4_group_info ***group_info;
4497 ++
4498 ++ rcu_read_lock();
4499 ++ group_info = rcu_dereference(sbi->s_group_info);
4500 ++ kfree(group_info[idx]);
4501 ++ group_info[idx] = NULL;
4502 ++ rcu_read_unlock();
4503 + }
4504 + exit_meta_group_info:
4505 + return -ENOMEM;
4506 +@@ -2472,6 +2481,7 @@ static int ext4_mb_init_backend(struct super_block *sb)
4507 + struct ext4_sb_info *sbi = EXT4_SB(sb);
4508 + int err;
4509 + struct ext4_group_desc *desc;
4510 ++ struct ext4_group_info ***group_info;
4511 + struct kmem_cache *cachep;
4512 +
4513 + err = ext4_mb_alloc_groupinfo(sb, ngroups);
4514 +@@ -2507,11 +2517,16 @@ err_freebuddy:
4515 + while (i-- > 0)
4516 + kmem_cache_free(cachep, ext4_get_group_info(sb, i));
4517 + i = sbi->s_group_info_size;
4518 ++ rcu_read_lock();
4519 ++ group_info = rcu_dereference(sbi->s_group_info);
4520 + while (i-- > 0)
4521 +- kfree(sbi->s_group_info[i]);
4522 ++ kfree(group_info[i]);
4523 ++ rcu_read_unlock();
4524 + iput(sbi->s_buddy_cache);
4525 + err_freesgi:
4526 +- kvfree(sbi->s_group_info);
4527 ++ rcu_read_lock();
4528 ++ kvfree(rcu_dereference(sbi->s_group_info));
4529 ++ rcu_read_unlock();
4530 + return -ENOMEM;
4531 + }
4532 +
4533 +@@ -2700,7 +2715,7 @@ int ext4_mb_release(struct super_block *sb)
4534 + ext4_group_t ngroups = ext4_get_groups_count(sb);
4535 + ext4_group_t i;
4536 + int num_meta_group_infos;
4537 +- struct ext4_group_info *grinfo;
4538 ++ struct ext4_group_info *grinfo, ***group_info;
4539 + struct ext4_sb_info *sbi = EXT4_SB(sb);
4540 + struct kmem_cache *cachep = get_groupinfo_cache(sb->s_blocksize_bits);
4541 +
4542 +@@ -2719,9 +2734,12 @@ int ext4_mb_release(struct super_block *sb)
4543 + num_meta_group_infos = (ngroups +
4544 + EXT4_DESC_PER_BLOCK(sb) - 1) >>
4545 + EXT4_DESC_PER_BLOCK_BITS(sb);
4546 ++ rcu_read_lock();
4547 ++ group_info = rcu_dereference(sbi->s_group_info);
4548 + for (i = 0; i < num_meta_group_infos; i++)
4549 +- kfree(sbi->s_group_info[i]);
4550 +- kvfree(sbi->s_group_info);
4551 ++ kfree(group_info[i]);
4552 ++ kvfree(group_info);
4553 ++ rcu_read_unlock();
4554 + }
4555 + kfree(sbi->s_mb_offsets);
4556 + kfree(sbi->s_mb_maxs);
4557 +@@ -3020,7 +3038,8 @@ ext4_mb_mark_diskspace_used(struct ext4_allocation_context *ac,
4558 + ext4_group_t flex_group = ext4_flex_group(sbi,
4559 + ac->ac_b_ex.fe_group);
4560 + atomic64_sub(ac->ac_b_ex.fe_len,
4561 +- &sbi->s_flex_groups[flex_group].free_clusters);
4562 ++ &sbi_array_rcu_deref(sbi, s_flex_groups,
4563 ++ flex_group)->free_clusters);
4564 + }
4565 +
4566 + err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh);
4567 +@@ -4914,7 +4933,8 @@ do_more:
4568 + if (sbi->s_log_groups_per_flex) {
4569 + ext4_group_t flex_group = ext4_flex_group(sbi, block_group);
4570 + atomic64_add(count_clusters,
4571 +- &sbi->s_flex_groups[flex_group].free_clusters);
4572 ++ &sbi_array_rcu_deref(sbi, s_flex_groups,
4573 ++ flex_group)->free_clusters);
4574 + }
4575 +
4576 + /*
4577 +@@ -5071,7 +5091,8 @@ int ext4_group_add_blocks(handle_t *handle, struct super_block *sb,
4578 + if (sbi->s_log_groups_per_flex) {
4579 + ext4_group_t flex_group = ext4_flex_group(sbi, block_group);
4580 + atomic64_add(clusters_freed,
4581 +- &sbi->s_flex_groups[flex_group].free_clusters);
4582 ++ &sbi_array_rcu_deref(sbi, s_flex_groups,
4583 ++ flex_group)->free_clusters);
4584 + }
4585 +
4586 + ext4_mb_unload_buddy(&e4b);
4587 +diff --git a/fs/ext4/migrate.c b/fs/ext4/migrate.c
4588 +index b1e4d359f73b..be4ee3dcc5cf 100644
4589 +--- a/fs/ext4/migrate.c
4590 ++++ b/fs/ext4/migrate.c
4591 +@@ -427,6 +427,7 @@ static int free_ext_block(handle_t *handle, struct inode *inode)
4592 +
4593 + int ext4_ext_migrate(struct inode *inode)
4594 + {
4595 ++ struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
4596 + handle_t *handle;
4597 + int retval = 0, i;
4598 + __le32 *i_data;
4599 +@@ -451,6 +452,8 @@ int ext4_ext_migrate(struct inode *inode)
4600 + */
4601 + return retval;
4602 +
4603 ++ percpu_down_write(&sbi->s_writepages_rwsem);
4604 ++
4605 + /*
4606 + * Worst case we can touch the allocation bitmaps, a bgd
4607 + * block, and a block to link in the orphan list. We do need
4608 +@@ -461,7 +464,7 @@ int ext4_ext_migrate(struct inode *inode)
4609 +
4610 + if (IS_ERR(handle)) {
4611 + retval = PTR_ERR(handle);
4612 +- return retval;
4613 ++ goto out_unlock;
4614 + }
4615 + goal = (((inode->i_ino - 1) / EXT4_INODES_PER_GROUP(inode->i_sb)) *
4616 + EXT4_INODES_PER_GROUP(inode->i_sb)) + 1;
4617 +@@ -472,7 +475,7 @@ int ext4_ext_migrate(struct inode *inode)
4618 + if (IS_ERR(tmp_inode)) {
4619 + retval = PTR_ERR(tmp_inode);
4620 + ext4_journal_stop(handle);
4621 +- return retval;
4622 ++ goto out_unlock;
4623 + }
4624 + i_size_write(tmp_inode, i_size_read(inode));
4625 + /*
4626 +@@ -514,7 +517,7 @@ int ext4_ext_migrate(struct inode *inode)
4627 + */
4628 + ext4_orphan_del(NULL, tmp_inode);
4629 + retval = PTR_ERR(handle);
4630 +- goto out;
4631 ++ goto out_tmp_inode;
4632 + }
4633 +
4634 + ei = EXT4_I(inode);
4635 +@@ -595,10 +598,11 @@ err_out:
4636 + /* Reset the extent details */
4637 + ext4_ext_tree_init(handle, tmp_inode);
4638 + ext4_journal_stop(handle);
4639 +-out:
4640 ++out_tmp_inode:
4641 + unlock_new_inode(tmp_inode);
4642 + iput(tmp_inode);
4643 +-
4644 ++out_unlock:
4645 ++ percpu_up_write(&sbi->s_writepages_rwsem);
4646 + return retval;
4647 + }
4648 +
4649 +@@ -608,7 +612,8 @@ out:
4650 + int ext4_ind_migrate(struct inode *inode)
4651 + {
4652 + struct ext4_extent_header *eh;
4653 +- struct ext4_super_block *es = EXT4_SB(inode->i_sb)->s_es;
4654 ++ struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
4655 ++ struct ext4_super_block *es = sbi->s_es;
4656 + struct ext4_inode_info *ei = EXT4_I(inode);
4657 + struct ext4_extent *ex;
4658 + unsigned int i, len;
4659 +@@ -632,9 +637,13 @@ int ext4_ind_migrate(struct inode *inode)
4660 + if (test_opt(inode->i_sb, DELALLOC))
4661 + ext4_alloc_da_blocks(inode);
4662 +
4663 ++ percpu_down_write(&sbi->s_writepages_rwsem);
4664 ++
4665 + handle = ext4_journal_start(inode, EXT4_HT_MIGRATE, 1);
4666 +- if (IS_ERR(handle))
4667 +- return PTR_ERR(handle);
4668 ++ if (IS_ERR(handle)) {
4669 ++ ret = PTR_ERR(handle);
4670 ++ goto out_unlock;
4671 ++ }
4672 +
4673 + down_write(&EXT4_I(inode)->i_data_sem);
4674 + ret = ext4_ext_check_inode(inode);
4675 +@@ -669,5 +678,7 @@ int ext4_ind_migrate(struct inode *inode)
4676 + errout:
4677 + ext4_journal_stop(handle);
4678 + up_write(&EXT4_I(inode)->i_data_sem);
4679 ++out_unlock:
4680 ++ percpu_up_write(&sbi->s_writepages_rwsem);
4681 + return ret;
4682 + }
4683 +diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
4684 +index 94d84910dc1e..a564d0289a70 100644
4685 +--- a/fs/ext4/namei.c
4686 ++++ b/fs/ext4/namei.c
4687 +@@ -1507,6 +1507,7 @@ restart:
4688 + /*
4689 + * We deal with the read-ahead logic here.
4690 + */
4691 ++ cond_resched();
4692 + if (ra_ptr >= ra_max) {
4693 + /* Refill the readahead buffer */
4694 + ra_ptr = 0;
4695 +diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c
4696 +index c0e9aef376a7..080e25f6ef56 100644
4697 +--- a/fs/ext4/resize.c
4698 ++++ b/fs/ext4/resize.c
4699 +@@ -17,6 +17,33 @@
4700 +
4701 + #include "ext4_jbd2.h"
4702 +
4703 ++struct ext4_rcu_ptr {
4704 ++ struct rcu_head rcu;
4705 ++ void *ptr;
4706 ++};
4707 ++
4708 ++static void ext4_rcu_ptr_callback(struct rcu_head *head)
4709 ++{
4710 ++ struct ext4_rcu_ptr *ptr;
4711 ++
4712 ++ ptr = container_of(head, struct ext4_rcu_ptr, rcu);
4713 ++ kvfree(ptr->ptr);
4714 ++ kfree(ptr);
4715 ++}
4716 ++
4717 ++void ext4_kvfree_array_rcu(void *to_free)
4718 ++{
4719 ++ struct ext4_rcu_ptr *ptr = kzalloc(sizeof(*ptr), GFP_KERNEL);
4720 ++
4721 ++ if (ptr) {
4722 ++ ptr->ptr = to_free;
4723 ++ call_rcu(&ptr->rcu, ext4_rcu_ptr_callback);
4724 ++ return;
4725 ++ }
4726 ++ synchronize_rcu();
4727 ++ kvfree(to_free);
4728 ++}
4729 ++
4730 + int ext4_resize_begin(struct super_block *sb)
4731 + {
4732 + struct ext4_sb_info *sbi = EXT4_SB(sb);
4733 +@@ -560,8 +587,8 @@ static int setup_new_flex_group_blocks(struct super_block *sb,
4734 + brelse(gdb);
4735 + goto out;
4736 + }
4737 +- memcpy(gdb->b_data, sbi->s_group_desc[j]->b_data,
4738 +- gdb->b_size);
4739 ++ memcpy(gdb->b_data, sbi_array_rcu_deref(sbi,
4740 ++ s_group_desc, j)->b_data, gdb->b_size);
4741 + set_buffer_uptodate(gdb);
4742 +
4743 + err = ext4_handle_dirty_metadata(handle, NULL, gdb);
4744 +@@ -879,13 +906,15 @@ static int add_new_gdb(handle_t *handle, struct inode *inode,
4745 + }
4746 + brelse(dind);
4747 +
4748 +- o_group_desc = EXT4_SB(sb)->s_group_desc;
4749 ++ rcu_read_lock();
4750 ++ o_group_desc = rcu_dereference(EXT4_SB(sb)->s_group_desc);
4751 + memcpy(n_group_desc, o_group_desc,
4752 + EXT4_SB(sb)->s_gdb_count * sizeof(struct buffer_head *));
4753 ++ rcu_read_unlock();
4754 + n_group_desc[gdb_num] = gdb_bh;
4755 +- EXT4_SB(sb)->s_group_desc = n_group_desc;
4756 ++ rcu_assign_pointer(EXT4_SB(sb)->s_group_desc, n_group_desc);
4757 + EXT4_SB(sb)->s_gdb_count++;
4758 +- kvfree(o_group_desc);
4759 ++ ext4_kvfree_array_rcu(o_group_desc);
4760 +
4761 + le16_add_cpu(&es->s_reserved_gdt_blocks, -1);
4762 + err = ext4_handle_dirty_super(handle, sb);
4763 +@@ -929,9 +958,11 @@ static int add_new_gdb_meta_bg(struct super_block *sb,
4764 + return err;
4765 + }
4766 +
4767 +- o_group_desc = EXT4_SB(sb)->s_group_desc;
4768 ++ rcu_read_lock();
4769 ++ o_group_desc = rcu_dereference(EXT4_SB(sb)->s_group_desc);
4770 + memcpy(n_group_desc, o_group_desc,
4771 + EXT4_SB(sb)->s_gdb_count * sizeof(struct buffer_head *));
4772 ++ rcu_read_unlock();
4773 + n_group_desc[gdb_num] = gdb_bh;
4774 +
4775 + BUFFER_TRACE(gdb_bh, "get_write_access");
4776 +@@ -942,9 +973,9 @@ static int add_new_gdb_meta_bg(struct super_block *sb,
4777 + return err;
4778 + }
4779 +
4780 +- EXT4_SB(sb)->s_group_desc = n_group_desc;
4781 ++ rcu_assign_pointer(EXT4_SB(sb)->s_group_desc, n_group_desc);
4782 + EXT4_SB(sb)->s_gdb_count++;
4783 +- kvfree(o_group_desc);
4784 ++ ext4_kvfree_array_rcu(o_group_desc);
4785 + return err;
4786 + }
4787 +
4788 +@@ -1210,7 +1241,8 @@ static int ext4_add_new_descs(handle_t *handle, struct super_block *sb,
4789 + * use non-sparse filesystems anymore. This is already checked above.
4790 + */
4791 + if (gdb_off) {
4792 +- gdb_bh = sbi->s_group_desc[gdb_num];
4793 ++ gdb_bh = sbi_array_rcu_deref(sbi, s_group_desc,
4794 ++ gdb_num);
4795 + BUFFER_TRACE(gdb_bh, "get_write_access");
4796 + err = ext4_journal_get_write_access(handle, gdb_bh);
4797 +
4798 +@@ -1292,7 +1324,7 @@ static int ext4_setup_new_descs(handle_t *handle, struct super_block *sb,
4799 + /*
4800 + * get_write_access() has been called on gdb_bh by ext4_add_new_desc().
4801 + */
4802 +- gdb_bh = sbi->s_group_desc[gdb_num];
4803 ++ gdb_bh = sbi_array_rcu_deref(sbi, s_group_desc, gdb_num);
4804 + /* Update group descriptor block for new group */
4805 + gdp = (struct ext4_group_desc *)(gdb_bh->b_data +
4806 + gdb_off * EXT4_DESC_SIZE(sb));
4807 +@@ -1420,11 +1452,14 @@ static void ext4_update_super(struct super_block *sb,
4808 + percpu_counter_read(&sbi->s_freeclusters_counter));
4809 + if (ext4_has_feature_flex_bg(sb) && sbi->s_log_groups_per_flex) {
4810 + ext4_group_t flex_group;
4811 ++ struct flex_groups *fg;
4812 ++
4813 + flex_group = ext4_flex_group(sbi, group_data[0].group);
4814 ++ fg = sbi_array_rcu_deref(sbi, s_flex_groups, flex_group);
4815 + atomic64_add(EXT4_NUM_B2C(sbi, free_blocks),
4816 +- &sbi->s_flex_groups[flex_group].free_clusters);
4817 ++ &fg->free_clusters);
4818 + atomic_add(EXT4_INODES_PER_GROUP(sb) * flex_gd->count,
4819 +- &sbi->s_flex_groups[flex_group].free_inodes);
4820 ++ &fg->free_inodes);
4821 + }
4822 +
4823 + /*
4824 +@@ -1519,7 +1554,8 @@ exit_journal:
4825 + for (; gdb_num <= gdb_num_end; gdb_num++) {
4826 + struct buffer_head *gdb_bh;
4827 +
4828 +- gdb_bh = sbi->s_group_desc[gdb_num];
4829 ++ gdb_bh = sbi_array_rcu_deref(sbi, s_group_desc,
4830 ++ gdb_num);
4831 + if (old_gdb == gdb_bh->b_blocknr)
4832 + continue;
4833 + update_backups(sb, gdb_bh->b_blocknr, gdb_bh->b_data,
4834 +diff --git a/fs/ext4/super.c b/fs/ext4/super.c
4835 +index 914230e63054..3ca604807839 100644
4836 +--- a/fs/ext4/super.c
4837 ++++ b/fs/ext4/super.c
4838 +@@ -970,6 +970,8 @@ static void ext4_put_super(struct super_block *sb)
4839 + {
4840 + struct ext4_sb_info *sbi = EXT4_SB(sb);
4841 + struct ext4_super_block *es = sbi->s_es;
4842 ++ struct buffer_head **group_desc;
4843 ++ struct flex_groups **flex_groups;
4844 + int aborted = 0;
4845 + int i, err;
4846 +
4847 +@@ -1000,15 +1002,23 @@ static void ext4_put_super(struct super_block *sb)
4848 + if (!sb_rdonly(sb))
4849 + ext4_commit_super(sb, 1);
4850 +
4851 ++ rcu_read_lock();
4852 ++ group_desc = rcu_dereference(sbi->s_group_desc);
4853 + for (i = 0; i < sbi->s_gdb_count; i++)
4854 +- brelse(sbi->s_group_desc[i]);
4855 +- kvfree(sbi->s_group_desc);
4856 +- kvfree(sbi->s_flex_groups);
4857 ++ brelse(group_desc[i]);
4858 ++ kvfree(group_desc);
4859 ++ flex_groups = rcu_dereference(sbi->s_flex_groups);
4860 ++ if (flex_groups) {
4861 ++ for (i = 0; i < sbi->s_flex_groups_allocated; i++)
4862 ++ kvfree(flex_groups[i]);
4863 ++ kvfree(flex_groups);
4864 ++ }
4865 ++ rcu_read_unlock();
4866 + percpu_counter_destroy(&sbi->s_freeclusters_counter);
4867 + percpu_counter_destroy(&sbi->s_freeinodes_counter);
4868 + percpu_counter_destroy(&sbi->s_dirs_counter);
4869 + percpu_counter_destroy(&sbi->s_dirtyclusters_counter);
4870 +- percpu_free_rwsem(&sbi->s_journal_flag_rwsem);
4871 ++ percpu_free_rwsem(&sbi->s_writepages_rwsem);
4872 + #ifdef CONFIG_QUOTA
4873 + for (i = 0; i < EXT4_MAXQUOTAS; i++)
4874 + kfree(get_qf_name(sb, sbi, i));
4875 +@@ -2332,8 +2342,8 @@ done:
4876 + int ext4_alloc_flex_bg_array(struct super_block *sb, ext4_group_t ngroup)
4877 + {
4878 + struct ext4_sb_info *sbi = EXT4_SB(sb);
4879 +- struct flex_groups *new_groups;
4880 +- int size;
4881 ++ struct flex_groups **old_groups, **new_groups;
4882 ++ int size, i;
4883 +
4884 + if (!sbi->s_log_groups_per_flex)
4885 + return 0;
4886 +@@ -2342,22 +2352,37 @@ int ext4_alloc_flex_bg_array(struct super_block *sb, ext4_group_t ngroup)
4887 + if (size <= sbi->s_flex_groups_allocated)
4888 + return 0;
4889 +
4890 +- size = roundup_pow_of_two(size * sizeof(struct flex_groups));
4891 +- new_groups = kvzalloc(size, GFP_KERNEL);
4892 ++ new_groups = kvzalloc(roundup_pow_of_two(size *
4893 ++ sizeof(*sbi->s_flex_groups)), GFP_KERNEL);
4894 + if (!new_groups) {
4895 +- ext4_msg(sb, KERN_ERR, "not enough memory for %d flex groups",
4896 +- size / (int) sizeof(struct flex_groups));
4897 ++ ext4_msg(sb, KERN_ERR,
4898 ++ "not enough memory for %d flex group pointers", size);
4899 + return -ENOMEM;
4900 + }
4901 +-
4902 +- if (sbi->s_flex_groups) {
4903 +- memcpy(new_groups, sbi->s_flex_groups,
4904 +- (sbi->s_flex_groups_allocated *
4905 +- sizeof(struct flex_groups)));
4906 +- kvfree(sbi->s_flex_groups);
4907 ++ for (i = sbi->s_flex_groups_allocated; i < size; i++) {
4908 ++ new_groups[i] = kvzalloc(roundup_pow_of_two(
4909 ++ sizeof(struct flex_groups)),
4910 ++ GFP_KERNEL);
4911 ++ if (!new_groups[i]) {
4912 ++ for (i--; i >= sbi->s_flex_groups_allocated; i--)
4913 ++ kvfree(new_groups[i]);
4914 ++ kvfree(new_groups);
4915 ++ ext4_msg(sb, KERN_ERR,
4916 ++ "not enough memory for %d flex groups", size);
4917 ++ return -ENOMEM;
4918 ++ }
4919 + }
4920 +- sbi->s_flex_groups = new_groups;
4921 +- sbi->s_flex_groups_allocated = size / sizeof(struct flex_groups);
4922 ++ rcu_read_lock();
4923 ++ old_groups = rcu_dereference(sbi->s_flex_groups);
4924 ++ if (old_groups)
4925 ++ memcpy(new_groups, old_groups,
4926 ++ (sbi->s_flex_groups_allocated *
4927 ++ sizeof(struct flex_groups *)));
4928 ++ rcu_read_unlock();
4929 ++ rcu_assign_pointer(sbi->s_flex_groups, new_groups);
4930 ++ sbi->s_flex_groups_allocated = size;
4931 ++ if (old_groups)
4932 ++ ext4_kvfree_array_rcu(old_groups);
4933 + return 0;
4934 + }
4935 +
4936 +@@ -2365,6 +2390,7 @@ static int ext4_fill_flex_info(struct super_block *sb)
4937 + {
4938 + struct ext4_sb_info *sbi = EXT4_SB(sb);
4939 + struct ext4_group_desc *gdp = NULL;
4940 ++ struct flex_groups *fg;
4941 + ext4_group_t flex_group;
4942 + int i, err;
4943 +
4944 +@@ -2382,12 +2408,11 @@ static int ext4_fill_flex_info(struct super_block *sb)
4945 + gdp = ext4_get_group_desc(sb, i, NULL);
4946 +
4947 + flex_group = ext4_flex_group(sbi, i);
4948 +- atomic_add(ext4_free_inodes_count(sb, gdp),
4949 +- &sbi->s_flex_groups[flex_group].free_inodes);
4950 ++ fg = sbi_array_rcu_deref(sbi, s_flex_groups, flex_group);
4951 ++ atomic_add(ext4_free_inodes_count(sb, gdp), &fg->free_inodes);
4952 + atomic64_add(ext4_free_group_clusters(sb, gdp),
4953 +- &sbi->s_flex_groups[flex_group].free_clusters);
4954 +- atomic_add(ext4_used_dirs_count(sb, gdp),
4955 +- &sbi->s_flex_groups[flex_group].used_dirs);
4956 ++ &fg->free_clusters);
4957 ++ atomic_add(ext4_used_dirs_count(sb, gdp), &fg->used_dirs);
4958 + }
4959 +
4960 + return 1;
4961 +@@ -2961,7 +2986,7 @@ static int ext4_feature_set_ok(struct super_block *sb, int readonly)
4962 + return 0;
4963 + }
4964 +
4965 +-#if !defined(CONFIG_QUOTA) || !defined(CONFIG_QFMT_V2)
4966 ++#if !IS_ENABLED(CONFIG_QUOTA) || !IS_ENABLED(CONFIG_QFMT_V2)
4967 + if (!readonly && (ext4_has_feature_quota(sb) ||
4968 + ext4_has_feature_project(sb))) {
4969 + ext4_msg(sb, KERN_ERR,
4970 +@@ -3586,9 +3611,10 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
4971 + {
4972 + struct dax_device *dax_dev = fs_dax_get_by_bdev(sb->s_bdev);
4973 + char *orig_data = kstrdup(data, GFP_KERNEL);
4974 +- struct buffer_head *bh;
4975 ++ struct buffer_head *bh, **group_desc;
4976 + struct ext4_super_block *es = NULL;
4977 + struct ext4_sb_info *sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
4978 ++ struct flex_groups **flex_groups;
4979 + ext4_fsblk_t block;
4980 + ext4_fsblk_t sb_block = get_sb_block(&data);
4981 + ext4_fsblk_t logical_sb_block;
4982 +@@ -4242,9 +4268,10 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
4983 + goto failed_mount;
4984 + }
4985 + }
4986 +- sbi->s_group_desc = kvmalloc_array(db_count,
4987 +- sizeof(struct buffer_head *),
4988 +- GFP_KERNEL);
4989 ++ rcu_assign_pointer(sbi->s_group_desc,
4990 ++ kvmalloc_array(db_count,
4991 ++ sizeof(struct buffer_head *),
4992 ++ GFP_KERNEL));
4993 + if (sbi->s_group_desc == NULL) {
4994 + ext4_msg(sb, KERN_ERR, "not enough memory");
4995 + ret = -ENOMEM;
4996 +@@ -4260,14 +4287,19 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
4997 + }
4998 +
4999 + for (i = 0; i < db_count; i++) {
5000 ++ struct buffer_head *bh;
5001 ++
5002 + block = descriptor_loc(sb, logical_sb_block, i);
5003 +- sbi->s_group_desc[i] = sb_bread_unmovable(sb, block);
5004 +- if (!sbi->s_group_desc[i]) {
5005 ++ bh = sb_bread_unmovable(sb, block);
5006 ++ if (!bh) {
5007 + ext4_msg(sb, KERN_ERR,
5008 + "can't read group descriptor %d", i);
5009 + db_count = i;
5010 + goto failed_mount2;
5011 + }
5012 ++ rcu_read_lock();
5013 ++ rcu_dereference(sbi->s_group_desc)[i] = bh;
5014 ++ rcu_read_unlock();
5015 + }
5016 + sbi->s_gdb_count = db_count;
5017 + if (!ext4_check_descriptors(sb, logical_sb_block, &first_not_zeroed)) {
5018 +@@ -4553,7 +4585,7 @@ no_journal:
5019 + err = percpu_counter_init(&sbi->s_dirtyclusters_counter, 0,
5020 + GFP_KERNEL);
5021 + if (!err)
5022 +- err = percpu_init_rwsem(&sbi->s_journal_flag_rwsem);
5023 ++ err = percpu_init_rwsem(&sbi->s_writepages_rwsem);
5024 +
5025 + if (err) {
5026 + ext4_msg(sb, KERN_ERR, "insufficient memory");
5027 +@@ -4641,13 +4673,19 @@ failed_mount7:
5028 + ext4_unregister_li_request(sb);
5029 + failed_mount6:
5030 + ext4_mb_release(sb);
5031 +- if (sbi->s_flex_groups)
5032 +- kvfree(sbi->s_flex_groups);
5033 ++ rcu_read_lock();
5034 ++ flex_groups = rcu_dereference(sbi->s_flex_groups);
5035 ++ if (flex_groups) {
5036 ++ for (i = 0; i < sbi->s_flex_groups_allocated; i++)
5037 ++ kvfree(flex_groups[i]);
5038 ++ kvfree(flex_groups);
5039 ++ }
5040 ++ rcu_read_unlock();
5041 + percpu_counter_destroy(&sbi->s_freeclusters_counter);
5042 + percpu_counter_destroy(&sbi->s_freeinodes_counter);
5043 + percpu_counter_destroy(&sbi->s_dirs_counter);
5044 + percpu_counter_destroy(&sbi->s_dirtyclusters_counter);
5045 +- percpu_free_rwsem(&sbi->s_journal_flag_rwsem);
5046 ++ percpu_free_rwsem(&sbi->s_writepages_rwsem);
5047 + failed_mount5:
5048 + ext4_ext_release(sb);
5049 + ext4_release_system_zone(sb);
5050 +@@ -4676,9 +4714,12 @@ failed_mount3:
5051 + if (sbi->s_mmp_tsk)
5052 + kthread_stop(sbi->s_mmp_tsk);
5053 + failed_mount2:
5054 ++ rcu_read_lock();
5055 ++ group_desc = rcu_dereference(sbi->s_group_desc);
5056 + for (i = 0; i < db_count; i++)
5057 +- brelse(sbi->s_group_desc[i]);
5058 +- kvfree(sbi->s_group_desc);
5059 ++ brelse(group_desc[i]);
5060 ++ kvfree(group_desc);
5061 ++ rcu_read_unlock();
5062 + failed_mount:
5063 + if (sbi->s_chksum_driver)
5064 + crypto_free_shash(sbi->s_chksum_driver);
5065 +diff --git a/fs/io_uring.c b/fs/io_uring.c
5066 +index 709671faaed6..ed9a551882cf 100644
5067 +--- a/fs/io_uring.c
5068 ++++ b/fs/io_uring.c
5069 +@@ -882,11 +882,17 @@ static void io_iopoll_reap_events(struct io_ring_ctx *ctx)
5070 + mutex_unlock(&ctx->uring_lock);
5071 + }
5072 +
5073 +-static int __io_iopoll_check(struct io_ring_ctx *ctx, unsigned *nr_events,
5074 +- long min)
5075 ++static int io_iopoll_check(struct io_ring_ctx *ctx, unsigned *nr_events,
5076 ++ long min)
5077 + {
5078 + int iters = 0, ret = 0;
5079 +
5080 ++ /*
5081 ++ * We disallow the app entering submit/complete with polling, but we
5082 ++ * still need to lock the ring to prevent racing with polled issue
5083 ++ * that got punted to a workqueue.
5084 ++ */
5085 ++ mutex_lock(&ctx->uring_lock);
5086 + do {
5087 + int tmin = 0;
5088 +
5089 +@@ -922,21 +928,6 @@ static int __io_iopoll_check(struct io_ring_ctx *ctx, unsigned *nr_events,
5090 + ret = 0;
5091 + } while (min && !*nr_events && !need_resched());
5092 +
5093 +- return ret;
5094 +-}
5095 +-
5096 +-static int io_iopoll_check(struct io_ring_ctx *ctx, unsigned *nr_events,
5097 +- long min)
5098 +-{
5099 +- int ret;
5100 +-
5101 +- /*
5102 +- * We disallow the app entering submit/complete with polling, but we
5103 +- * still need to lock the ring to prevent racing with polled issue
5104 +- * that got punted to a workqueue.
5105 +- */
5106 +- mutex_lock(&ctx->uring_lock);
5107 +- ret = __io_iopoll_check(ctx, nr_events, min);
5108 + mutex_unlock(&ctx->uring_lock);
5109 + return ret;
5110 + }
5111 +@@ -2721,7 +2712,7 @@ static int io_sq_thread(void *data)
5112 + */
5113 + mutex_lock(&ctx->uring_lock);
5114 + if (!list_empty(&ctx->poll_list))
5115 +- __io_iopoll_check(ctx, &nr_events, 0);
5116 ++ io_iopoll_getevents(ctx, &nr_events, 0);
5117 + else
5118 + inflight = 0;
5119 + mutex_unlock(&ctx->uring_lock);
5120 +@@ -2740,16 +2731,6 @@ static int io_sq_thread(void *data)
5121 +
5122 + to_submit = io_sqring_entries(ctx);
5123 + if (!to_submit) {
5124 +- /*
5125 +- * We're polling. If we're within the defined idle
5126 +- * period, then let us spin without work before going
5127 +- * to sleep.
5128 +- */
5129 +- if (inflight || !time_after(jiffies, timeout)) {
5130 +- cond_resched();
5131 +- continue;
5132 +- }
5133 +-
5134 + /*
5135 + * Drop cur_mm before scheduling, we can't hold it for
5136 + * long periods (or over schedule()). Do this before
5137 +@@ -2762,6 +2743,16 @@ static int io_sq_thread(void *data)
5138 + cur_mm = NULL;
5139 + }
5140 +
5141 ++ /*
5142 ++ * We're polling. If we're within the defined idle
5143 ++ * period, then let us spin without work before going
5144 ++ * to sleep.
5145 ++ */
5146 ++ if (inflight || !time_after(jiffies, timeout)) {
5147 ++ cond_resched();
5148 ++ continue;
5149 ++ }
5150 ++
5151 + prepare_to_wait(&ctx->sqo_wait, &wait,
5152 + TASK_INTERRUPTIBLE);
5153 +
5154 +diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c
5155 +index 3930c68a9c20..b17f05ae6011 100644
5156 +--- a/fs/jbd2/transaction.c
5157 ++++ b/fs/jbd2/transaction.c
5158 +@@ -865,8 +865,6 @@ do_get_write_access(handle_t *handle, struct journal_head *jh,
5159 + char *frozen_buffer = NULL;
5160 + unsigned long start_lock, time_lock;
5161 +
5162 +- if (is_handle_aborted(handle))
5163 +- return -EROFS;
5164 + journal = transaction->t_journal;
5165 +
5166 + jbd_debug(5, "journal_head %p, force_copy %d\n", jh, force_copy);
5167 +@@ -1118,6 +1116,9 @@ int jbd2_journal_get_write_access(handle_t *handle, struct buffer_head *bh)
5168 + struct journal_head *jh;
5169 + int rc;
5170 +
5171 ++ if (is_handle_aborted(handle))
5172 ++ return -EROFS;
5173 ++
5174 + if (jbd2_write_access_granted(handle, bh, false))
5175 + return 0;
5176 +
5177 +@@ -1255,6 +1256,9 @@ int jbd2_journal_get_undo_access(handle_t *handle, struct buffer_head *bh)
5178 + struct journal_head *jh;
5179 + char *committed_data = NULL;
5180 +
5181 ++ if (is_handle_aborted(handle))
5182 ++ return -EROFS;
5183 ++
5184 + if (jbd2_write_access_granted(handle, bh, true))
5185 + return 0;
5186 +
5187 +diff --git a/include/acpi/acpixf.h b/include/acpi/acpixf.h
5188 +index d1fdf26ccb33..4010c42e40bd 100644
5189 +--- a/include/acpi/acpixf.h
5190 ++++ b/include/acpi/acpixf.h
5191 +@@ -749,6 +749,7 @@ ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status acpi_disable_all_gpes(void))
5192 + ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status acpi_enable_all_runtime_gpes(void))
5193 + ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status acpi_enable_all_wakeup_gpes(void))
5194 + ACPI_HW_DEPENDENT_RETURN_UINT32(u32 acpi_any_gpe_status_set(void))
5195 ++ACPI_HW_DEPENDENT_RETURN_UINT32(u32 acpi_any_fixed_event_status_set(void))
5196 +
5197 + ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status
5198 + acpi_get_gpe_device(u32 gpe_index,
5199 +diff --git a/include/linux/intel-svm.h b/include/linux/intel-svm.h
5200 +index 94f047a8a845..d7c403d0dd27 100644
5201 +--- a/include/linux/intel-svm.h
5202 ++++ b/include/linux/intel-svm.h
5203 +@@ -122,7 +122,7 @@ static inline int intel_svm_unbind_mm(struct device *dev, int pasid)
5204 + BUG();
5205 + }
5206 +
5207 +-static int intel_svm_is_pasid_valid(struct device *dev, int pasid)
5208 ++static inline int intel_svm_is_pasid_valid(struct device *dev, int pasid)
5209 + {
5210 + return -EINVAL;
5211 + }
5212 +diff --git a/include/linux/irqdomain.h b/include/linux/irqdomain.h
5213 +index aba5ada373d6..e85f714a623e 100644
5214 +--- a/include/linux/irqdomain.h
5215 ++++ b/include/linux/irqdomain.h
5216 +@@ -191,7 +191,7 @@ enum {
5217 + IRQ_DOMAIN_FLAG_HIERARCHY = (1 << 0),
5218 +
5219 + /* Irq domain name was allocated in __irq_domain_add() */
5220 +- IRQ_DOMAIN_NAME_ALLOCATED = (1 << 6),
5221 ++ IRQ_DOMAIN_NAME_ALLOCATED = (1 << 1),
5222 +
5223 + /* Irq domain is an IPI domain with virq per cpu */
5224 + IRQ_DOMAIN_FLAG_IPI_PER_CPU = (1 << 2),
5225 +diff --git a/include/linux/libata.h b/include/linux/libata.h
5226 +index fa0c3dae2094..c44e4cfbcb16 100644
5227 +--- a/include/linux/libata.h
5228 ++++ b/include/linux/libata.h
5229 +@@ -1220,6 +1220,7 @@ struct pci_bits {
5230 + };
5231 +
5232 + extern int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits);
5233 ++extern void ata_pci_shutdown_one(struct pci_dev *pdev);
5234 + extern void ata_pci_remove_one(struct pci_dev *pdev);
5235 +
5236 + #ifdef CONFIG_PM
5237 +diff --git a/include/linux/tty.h b/include/linux/tty.h
5238 +index bfa4e2ee94a9..bd5fe0e907e8 100644
5239 +--- a/include/linux/tty.h
5240 ++++ b/include/linux/tty.h
5241 +@@ -225,6 +225,8 @@ struct tty_port_client_operations {
5242 + void (*write_wakeup)(struct tty_port *port);
5243 + };
5244 +
5245 ++extern const struct tty_port_client_operations tty_port_default_client_ops;
5246 ++
5247 + struct tty_port {
5248 + struct tty_bufhead buf; /* Locked internally */
5249 + struct tty_struct *tty; /* Back pointer */
5250 +diff --git a/include/linux/usb/quirks.h b/include/linux/usb/quirks.h
5251 +index a1be64c9940f..22c1f579afe3 100644
5252 +--- a/include/linux/usb/quirks.h
5253 ++++ b/include/linux/usb/quirks.h
5254 +@@ -69,4 +69,7 @@
5255 + /* Hub needs extra delay after resetting its port. */
5256 + #define USB_QUIRK_HUB_SLOW_RESET BIT(14)
5257 +
5258 ++/* device has blacklisted endpoints */
5259 ++#define USB_QUIRK_ENDPOINT_BLACKLIST BIT(15)
5260 ++
5261 + #endif /* __LINUX_USB_QUIRKS_H */
5262 +diff --git a/include/scsi/iscsi_proto.h b/include/scsi/iscsi_proto.h
5263 +index 533f56733ba8..b71b5c4f418c 100644
5264 +--- a/include/scsi/iscsi_proto.h
5265 ++++ b/include/scsi/iscsi_proto.h
5266 +@@ -627,7 +627,6 @@ struct iscsi_reject {
5267 + #define ISCSI_REASON_BOOKMARK_INVALID 9
5268 + #define ISCSI_REASON_BOOKMARK_NO_RESOURCES 10
5269 + #define ISCSI_REASON_NEGOTIATION_RESET 11
5270 +-#define ISCSI_REASON_WAITING_FOR_LOGOUT 12
5271 +
5272 + /* Max. number of Key=Value pairs in a text message */
5273 + #define MAX_KEY_VALUE_PAIRS 8192
5274 +diff --git a/include/sound/rawmidi.h b/include/sound/rawmidi.h
5275 +index 40ab20439fee..a36b7227a15a 100644
5276 +--- a/include/sound/rawmidi.h
5277 ++++ b/include/sound/rawmidi.h
5278 +@@ -77,9 +77,9 @@ struct snd_rawmidi_substream {
5279 + struct list_head list; /* list of all substream for given stream */
5280 + int stream; /* direction */
5281 + int number; /* substream number */
5282 +- unsigned int opened: 1, /* open flag */
5283 +- append: 1, /* append flag (merge more streams) */
5284 +- active_sensing: 1; /* send active sensing when close */
5285 ++ bool opened; /* open flag */
5286 ++ bool append; /* append flag (merge more streams) */
5287 ++ bool active_sensing; /* send active sensing when close */
5288 + int use_count; /* use counter (for output) */
5289 + size_t bytes;
5290 + struct snd_rawmidi *rmidi;
5291 +diff --git a/ipc/sem.c b/ipc/sem.c
5292 +index ec97a7072413..fe12ea8dd2b3 100644
5293 +--- a/ipc/sem.c
5294 ++++ b/ipc/sem.c
5295 +@@ -2368,11 +2368,9 @@ void exit_sem(struct task_struct *tsk)
5296 + ipc_assert_locked_object(&sma->sem_perm);
5297 + list_del(&un->list_id);
5298 +
5299 +- /* we are the last process using this ulp, acquiring ulp->lock
5300 +- * isn't required. Besides that, we are also protected against
5301 +- * IPC_RMID as we hold sma->sem_perm lock now
5302 +- */
5303 ++ spin_lock(&ulp->lock);
5304 + list_del_rcu(&un->list_proc);
5305 ++ spin_unlock(&ulp->lock);
5306 +
5307 + /* perform adjustments registered in un */
5308 + for (i = 0; i < sma->sem_nsems; i++) {
5309 +diff --git a/kernel/bpf/offload.c b/kernel/bpf/offload.c
5310 +index 5b9da0954a27..3668a0bc18ec 100644
5311 +--- a/kernel/bpf/offload.c
5312 ++++ b/kernel/bpf/offload.c
5313 +@@ -321,7 +321,7 @@ int bpf_prog_offload_info_fill(struct bpf_prog_info *info,
5314 +
5315 + ulen = info->jited_prog_len;
5316 + info->jited_prog_len = aux->offload->jited_len;
5317 +- if (info->jited_prog_len & ulen) {
5318 ++ if (info->jited_prog_len && ulen) {
5319 + uinsns = u64_to_user_ptr(info->jited_prog_insns);
5320 + ulen = min_t(u32, info->jited_prog_len, ulen);
5321 + if (copy_to_user(uinsns, aux->offload->jited_image, ulen)) {
5322 +diff --git a/kernel/irq/internals.h b/kernel/irq/internals.h
5323 +index 3924fbe829d4..c9d8eb7f5c02 100644
5324 +--- a/kernel/irq/internals.h
5325 ++++ b/kernel/irq/internals.h
5326 +@@ -128,8 +128,6 @@ static inline void unregister_handler_proc(unsigned int irq,
5327 +
5328 + extern bool irq_can_set_affinity_usr(unsigned int irq);
5329 +
5330 +-extern int irq_select_affinity_usr(unsigned int irq);
5331 +-
5332 + extern void irq_set_thread_affinity(struct irq_desc *desc);
5333 +
5334 + extern int irq_do_set_affinity(struct irq_data *data,
5335 +diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
5336 +index 1753486b440c..55b080101a20 100644
5337 +--- a/kernel/irq/manage.c
5338 ++++ b/kernel/irq/manage.c
5339 +@@ -442,23 +442,9 @@ int irq_setup_affinity(struct irq_desc *desc)
5340 + {
5341 + return irq_select_affinity(irq_desc_get_irq(desc));
5342 + }
5343 +-#endif
5344 ++#endif /* CONFIG_AUTO_IRQ_AFFINITY */
5345 ++#endif /* CONFIG_SMP */
5346 +
5347 +-/*
5348 +- * Called when a bogus affinity is set via /proc/irq
5349 +- */
5350 +-int irq_select_affinity_usr(unsigned int irq)
5351 +-{
5352 +- struct irq_desc *desc = irq_to_desc(irq);
5353 +- unsigned long flags;
5354 +- int ret;
5355 +-
5356 +- raw_spin_lock_irqsave(&desc->lock, flags);
5357 +- ret = irq_setup_affinity(desc);
5358 +- raw_spin_unlock_irqrestore(&desc->lock, flags);
5359 +- return ret;
5360 +-}
5361 +-#endif
5362 +
5363 + /**
5364 + * irq_set_vcpu_affinity - Set vcpu affinity for the interrupt
5365 +diff --git a/kernel/irq/proc.c b/kernel/irq/proc.c
5366 +index cfc4f088a0e7..f5958c55406f 100644
5367 +--- a/kernel/irq/proc.c
5368 ++++ b/kernel/irq/proc.c
5369 +@@ -111,6 +111,28 @@ static int irq_affinity_list_proc_show(struct seq_file *m, void *v)
5370 + return show_irq_affinity(AFFINITY_LIST, m);
5371 + }
5372 +
5373 ++#ifndef CONFIG_AUTO_IRQ_AFFINITY
5374 ++static inline int irq_select_affinity_usr(unsigned int irq)
5375 ++{
5376 ++ /*
5377 ++ * If the interrupt is started up already then this fails. The
5378 ++ * interrupt is assigned to an online CPU already. There is no
5379 ++ * point to move it around randomly. Tell user space that the
5380 ++ * selected mask is bogus.
5381 ++ *
5382 ++ * If not then any change to the affinity is pointless because the
5383 ++ * startup code invokes irq_setup_affinity() which will select
5384 ++ * a online CPU anyway.
5385 ++ */
5386 ++ return -EINVAL;
5387 ++}
5388 ++#else
5389 ++/* ALPHA magic affinity auto selector. Keep it for historical reasons. */
5390 ++static inline int irq_select_affinity_usr(unsigned int irq)
5391 ++{
5392 ++ return irq_select_affinity(irq);
5393 ++}
5394 ++#endif
5395 +
5396 + static ssize_t write_irq_affinity(int type, struct file *file,
5397 + const char __user *buffer, size_t count, loff_t *pos)
5398 +diff --git a/kernel/sched/psi.c b/kernel/sched/psi.c
5399 +index ce8f6748678a..9154e745f097 100644
5400 +--- a/kernel/sched/psi.c
5401 ++++ b/kernel/sched/psi.c
5402 +@@ -1199,6 +1199,9 @@ static ssize_t psi_write(struct file *file, const char __user *user_buf,
5403 + if (static_branch_likely(&psi_disabled))
5404 + return -EOPNOTSUPP;
5405 +
5406 ++ if (!nbytes)
5407 ++ return -EINVAL;
5408 ++
5409 + buf_size = min(nbytes, sizeof(buf));
5410 + if (copy_from_user(buf, user_buf, buf_size))
5411 + return -EFAULT;
5412 +diff --git a/lib/stackdepot.c b/lib/stackdepot.c
5413 +index ed717dd08ff3..81c69c08d1d1 100644
5414 +--- a/lib/stackdepot.c
5415 ++++ b/lib/stackdepot.c
5416 +@@ -83,15 +83,19 @@ static bool init_stack_slab(void **prealloc)
5417 + return true;
5418 + if (stack_slabs[depot_index] == NULL) {
5419 + stack_slabs[depot_index] = *prealloc;
5420 ++ *prealloc = NULL;
5421 + } else {
5422 +- stack_slabs[depot_index + 1] = *prealloc;
5423 ++ /* If this is the last depot slab, do not touch the next one. */
5424 ++ if (depot_index + 1 < STACK_ALLOC_MAX_SLABS) {
5425 ++ stack_slabs[depot_index + 1] = *prealloc;
5426 ++ *prealloc = NULL;
5427 ++ }
5428 + /*
5429 + * This smp_store_release pairs with smp_load_acquire() from
5430 + * |next_slab_inited| above and in stack_depot_save().
5431 + */
5432 + smp_store_release(&next_slab_inited, 1);
5433 + }
5434 +- *prealloc = NULL;
5435 + return true;
5436 + }
5437 +
5438 +diff --git a/mm/memcontrol.c b/mm/memcontrol.c
5439 +index b5b4e310fe70..ae9044bc9f80 100644
5440 +--- a/mm/memcontrol.c
5441 ++++ b/mm/memcontrol.c
5442 +@@ -418,8 +418,10 @@ int memcg_expand_shrinker_maps(int new_id)
5443 + if (mem_cgroup_is_root(memcg))
5444 + continue;
5445 + ret = memcg_expand_one_shrinker_map(memcg, size, old_size);
5446 +- if (ret)
5447 ++ if (ret) {
5448 ++ mem_cgroup_iter_break(NULL, memcg);
5449 + goto unlock;
5450 ++ }
5451 + }
5452 + unlock:
5453 + if (!ret)
5454 +diff --git a/mm/mmap.c b/mm/mmap.c
5455 +index 4390dbea4aa5..514cc19c5916 100644
5456 +--- a/mm/mmap.c
5457 ++++ b/mm/mmap.c
5458 +@@ -195,8 +195,6 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
5459 + bool downgraded = false;
5460 + LIST_HEAD(uf);
5461 +
5462 +- brk = untagged_addr(brk);
5463 +-
5464 + if (down_write_killable(&mm->mmap_sem))
5465 + return -EINTR;
5466 +
5467 +@@ -1583,8 +1581,6 @@ unsigned long ksys_mmap_pgoff(unsigned long addr, unsigned long len,
5468 + struct file *file = NULL;
5469 + unsigned long retval;
5470 +
5471 +- addr = untagged_addr(addr);
5472 +-
5473 + if (!(flags & MAP_ANONYMOUS)) {
5474 + audit_mmap_fd(fd, flags);
5475 + file = fget(fd);
5476 +diff --git a/mm/mremap.c b/mm/mremap.c
5477 +index 1fc8a29fbe3f..1d98281f7204 100644
5478 +--- a/mm/mremap.c
5479 ++++ b/mm/mremap.c
5480 +@@ -607,7 +607,6 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
5481 + LIST_HEAD(uf_unmap);
5482 +
5483 + addr = untagged_addr(addr);
5484 +- new_addr = untagged_addr(new_addr);
5485 +
5486 + if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE))
5487 + return ret;
5488 +diff --git a/mm/sparse.c b/mm/sparse.c
5489 +index 69b41b6046a5..a5e5c1c3a2a8 100644
5490 +--- a/mm/sparse.c
5491 ++++ b/mm/sparse.c
5492 +@@ -884,7 +884,7 @@ int __meminit sparse_add_section(int nid, unsigned long start_pfn,
5493 + * Poison uninitialized struct pages in order to catch invalid flags
5494 + * combinations.
5495 + */
5496 +- page_init_poison(pfn_to_page(start_pfn), sizeof(struct page) * nr_pages);
5497 ++ page_init_poison(memmap, sizeof(struct page) * nr_pages);
5498 +
5499 + ms = __nr_to_section(section_nr);
5500 + set_section_nid(section_nr, nid);
5501 +diff --git a/mm/vmscan.c b/mm/vmscan.c
5502 +index e7f10c4b40f0..7fde5f904c8d 100644
5503 +--- a/mm/vmscan.c
5504 ++++ b/mm/vmscan.c
5505 +@@ -2530,10 +2530,13 @@ out:
5506 + /*
5507 + * Scan types proportional to swappiness and
5508 + * their relative recent reclaim efficiency.
5509 +- * Make sure we don't miss the last page
5510 +- * because of a round-off error.
5511 ++ * Make sure we don't miss the last page on
5512 ++ * the offlined memory cgroups because of a
5513 ++ * round-off error.
5514 + */
5515 +- scan = DIV64_U64_ROUND_UP(scan * fraction[file],
5516 ++ scan = mem_cgroup_online(memcg) ?
5517 ++ div64_u64(scan * fraction[file], denominator) :
5518 ++ DIV64_U64_ROUND_UP(scan * fraction[file],
5519 + denominator);
5520 + break;
5521 + case SCAN_FILE:
5522 +diff --git a/net/netfilter/xt_hashlimit.c b/net/netfilter/xt_hashlimit.c
5523 +index ced3fc8fad7c..6520d9ec1297 100644
5524 +--- a/net/netfilter/xt_hashlimit.c
5525 ++++ b/net/netfilter/xt_hashlimit.c
5526 +@@ -851,6 +851,8 @@ hashlimit_mt(const struct sk_buff *skb, struct xt_action_param *par)
5527 + return hashlimit_mt_common(skb, par, hinfo, &info->cfg, 3);
5528 + }
5529 +
5530 ++#define HASHLIMIT_MAX_SIZE 1048576
5531 ++
5532 + static int hashlimit_mt_check_common(const struct xt_mtchk_param *par,
5533 + struct xt_hashlimit_htable **hinfo,
5534 + struct hashlimit_cfg3 *cfg,
5535 +@@ -861,6 +863,14 @@ static int hashlimit_mt_check_common(const struct xt_mtchk_param *par,
5536 +
5537 + if (cfg->gc_interval == 0 || cfg->expire == 0)
5538 + return -EINVAL;
5539 ++ if (cfg->size > HASHLIMIT_MAX_SIZE) {
5540 ++ cfg->size = HASHLIMIT_MAX_SIZE;
5541 ++ pr_info_ratelimited("size too large, truncated to %u\n", cfg->size);
5542 ++ }
5543 ++ if (cfg->max > HASHLIMIT_MAX_SIZE) {
5544 ++ cfg->max = HASHLIMIT_MAX_SIZE;
5545 ++ pr_info_ratelimited("max too large, truncated to %u\n", cfg->max);
5546 ++ }
5547 + if (par->family == NFPROTO_IPV4) {
5548 + if (cfg->srcmask > 32 || cfg->dstmask > 32)
5549 + return -EINVAL;
5550 +diff --git a/net/rxrpc/call_object.c b/net/rxrpc/call_object.c
5551 +index dbdbc4f18b5e..c9f34b0a11df 100644
5552 +--- a/net/rxrpc/call_object.c
5553 ++++ b/net/rxrpc/call_object.c
5554 +@@ -562,11 +562,11 @@ void rxrpc_put_call(struct rxrpc_call *call, enum rxrpc_call_trace op)
5555 + }
5556 +
5557 + /*
5558 +- * Final call destruction under RCU.
5559 ++ * Final call destruction - but must be done in process context.
5560 + */
5561 +-static void rxrpc_rcu_destroy_call(struct rcu_head *rcu)
5562 ++static void rxrpc_destroy_call(struct work_struct *work)
5563 + {
5564 +- struct rxrpc_call *call = container_of(rcu, struct rxrpc_call, rcu);
5565 ++ struct rxrpc_call *call = container_of(work, struct rxrpc_call, processor);
5566 + struct rxrpc_net *rxnet = call->rxnet;
5567 +
5568 + rxrpc_put_connection(call->conn);
5569 +@@ -578,6 +578,22 @@ static void rxrpc_rcu_destroy_call(struct rcu_head *rcu)
5570 + wake_up_var(&rxnet->nr_calls);
5571 + }
5572 +
5573 ++/*
5574 ++ * Final call destruction under RCU.
5575 ++ */
5576 ++static void rxrpc_rcu_destroy_call(struct rcu_head *rcu)
5577 ++{
5578 ++ struct rxrpc_call *call = container_of(rcu, struct rxrpc_call, rcu);
5579 ++
5580 ++ if (in_softirq()) {
5581 ++ INIT_WORK(&call->processor, rxrpc_destroy_call);
5582 ++ if (!rxrpc_queue_work(&call->processor))
5583 ++ BUG();
5584 ++ } else {
5585 ++ rxrpc_destroy_call(&call->processor);
5586 ++ }
5587 ++}
5588 ++
5589 + /*
5590 + * clean up a call
5591 + */
5592 +diff --git a/sound/core/seq/seq_clientmgr.c b/sound/core/seq/seq_clientmgr.c
5593 +index 6d9592f0ae1d..cc93157fa950 100644
5594 +--- a/sound/core/seq/seq_clientmgr.c
5595 ++++ b/sound/core/seq/seq_clientmgr.c
5596 +@@ -580,7 +580,7 @@ static int update_timestamp_of_queue(struct snd_seq_event *event,
5597 + event->queue = queue;
5598 + event->flags &= ~SNDRV_SEQ_TIME_STAMP_MASK;
5599 + if (real_time) {
5600 +- event->time.time = snd_seq_timer_get_cur_time(q->timer);
5601 ++ event->time.time = snd_seq_timer_get_cur_time(q->timer, true);
5602 + event->flags |= SNDRV_SEQ_TIME_STAMP_REAL;
5603 + } else {
5604 + event->time.tick = snd_seq_timer_get_cur_tick(q->timer);
5605 +@@ -1659,7 +1659,7 @@ static int snd_seq_ioctl_get_queue_status(struct snd_seq_client *client,
5606 + tmr = queue->timer;
5607 + status->events = queue->tickq->cells + queue->timeq->cells;
5608 +
5609 +- status->time = snd_seq_timer_get_cur_time(tmr);
5610 ++ status->time = snd_seq_timer_get_cur_time(tmr, true);
5611 + status->tick = snd_seq_timer_get_cur_tick(tmr);
5612 +
5613 + status->running = tmr->running;
5614 +diff --git a/sound/core/seq/seq_queue.c b/sound/core/seq/seq_queue.c
5615 +index caf68bf42f13..71a6ea62c3be 100644
5616 +--- a/sound/core/seq/seq_queue.c
5617 ++++ b/sound/core/seq/seq_queue.c
5618 +@@ -238,6 +238,8 @@ void snd_seq_check_queue(struct snd_seq_queue *q, int atomic, int hop)
5619 + {
5620 + unsigned long flags;
5621 + struct snd_seq_event_cell *cell;
5622 ++ snd_seq_tick_time_t cur_tick;
5623 ++ snd_seq_real_time_t cur_time;
5624 +
5625 + if (q == NULL)
5626 + return;
5627 +@@ -254,17 +256,18 @@ void snd_seq_check_queue(struct snd_seq_queue *q, int atomic, int hop)
5628 +
5629 + __again:
5630 + /* Process tick queue... */
5631 ++ cur_tick = snd_seq_timer_get_cur_tick(q->timer);
5632 + for (;;) {
5633 +- cell = snd_seq_prioq_cell_out(q->tickq,
5634 +- &q->timer->tick.cur_tick);
5635 ++ cell = snd_seq_prioq_cell_out(q->tickq, &cur_tick);
5636 + if (!cell)
5637 + break;
5638 + snd_seq_dispatch_event(cell, atomic, hop);
5639 + }
5640 +
5641 + /* Process time queue... */
5642 ++ cur_time = snd_seq_timer_get_cur_time(q->timer, false);
5643 + for (;;) {
5644 +- cell = snd_seq_prioq_cell_out(q->timeq, &q->timer->cur_time);
5645 ++ cell = snd_seq_prioq_cell_out(q->timeq, &cur_time);
5646 + if (!cell)
5647 + break;
5648 + snd_seq_dispatch_event(cell, atomic, hop);
5649 +@@ -392,6 +395,7 @@ int snd_seq_queue_check_access(int queueid, int client)
5650 + int snd_seq_queue_set_owner(int queueid, int client, int locked)
5651 + {
5652 + struct snd_seq_queue *q = queueptr(queueid);
5653 ++ unsigned long flags;
5654 +
5655 + if (q == NULL)
5656 + return -EINVAL;
5657 +@@ -401,8 +405,10 @@ int snd_seq_queue_set_owner(int queueid, int client, int locked)
5658 + return -EPERM;
5659 + }
5660 +
5661 ++ spin_lock_irqsave(&q->owner_lock, flags);
5662 + q->locked = locked ? 1 : 0;
5663 + q->owner = client;
5664 ++ spin_unlock_irqrestore(&q->owner_lock, flags);
5665 + queue_access_unlock(q);
5666 + queuefree(q);
5667 +
5668 +@@ -539,15 +545,17 @@ void snd_seq_queue_client_termination(int client)
5669 + unsigned long flags;
5670 + int i;
5671 + struct snd_seq_queue *q;
5672 ++ bool matched;
5673 +
5674 + for (i = 0; i < SNDRV_SEQ_MAX_QUEUES; i++) {
5675 + if ((q = queueptr(i)) == NULL)
5676 + continue;
5677 + spin_lock_irqsave(&q->owner_lock, flags);
5678 +- if (q->owner == client)
5679 ++ matched = (q->owner == client);
5680 ++ if (matched)
5681 + q->klocked = 1;
5682 + spin_unlock_irqrestore(&q->owner_lock, flags);
5683 +- if (q->owner == client) {
5684 ++ if (matched) {
5685 + if (q->timer->running)
5686 + snd_seq_timer_stop(q->timer);
5687 + snd_seq_timer_reset(q->timer);
5688 +@@ -739,6 +747,8 @@ void snd_seq_info_queues_read(struct snd_info_entry *entry,
5689 + int i, bpm;
5690 + struct snd_seq_queue *q;
5691 + struct snd_seq_timer *tmr;
5692 ++ bool locked;
5693 ++ int owner;
5694 +
5695 + for (i = 0; i < SNDRV_SEQ_MAX_QUEUES; i++) {
5696 + if ((q = queueptr(i)) == NULL)
5697 +@@ -750,9 +760,14 @@ void snd_seq_info_queues_read(struct snd_info_entry *entry,
5698 + else
5699 + bpm = 0;
5700 +
5701 ++ spin_lock_irq(&q->owner_lock);
5702 ++ locked = q->locked;
5703 ++ owner = q->owner;
5704 ++ spin_unlock_irq(&q->owner_lock);
5705 ++
5706 + snd_iprintf(buffer, "queue %d: [%s]\n", q->queue, q->name);
5707 +- snd_iprintf(buffer, "owned by client : %d\n", q->owner);
5708 +- snd_iprintf(buffer, "lock status : %s\n", q->locked ? "Locked" : "Free");
5709 ++ snd_iprintf(buffer, "owned by client : %d\n", owner);
5710 ++ snd_iprintf(buffer, "lock status : %s\n", locked ? "Locked" : "Free");
5711 + snd_iprintf(buffer, "queued time events : %d\n", snd_seq_prioq_avail(q->timeq));
5712 + snd_iprintf(buffer, "queued tick events : %d\n", snd_seq_prioq_avail(q->tickq));
5713 + snd_iprintf(buffer, "timer state : %s\n", tmr->running ? "Running" : "Stopped");
5714 +diff --git a/sound/core/seq/seq_timer.c b/sound/core/seq/seq_timer.c
5715 +index 3bc6095df44d..0b43fc5fe349 100644
5716 +--- a/sound/core/seq/seq_timer.c
5717 ++++ b/sound/core/seq/seq_timer.c
5718 +@@ -422,14 +422,15 @@ int snd_seq_timer_continue(struct snd_seq_timer *tmr)
5719 + }
5720 +
5721 + /* return current 'real' time. use timeofday() to get better granularity. */
5722 +-snd_seq_real_time_t snd_seq_timer_get_cur_time(struct snd_seq_timer *tmr)
5723 ++snd_seq_real_time_t snd_seq_timer_get_cur_time(struct snd_seq_timer *tmr,
5724 ++ bool adjust_ktime)
5725 + {
5726 + snd_seq_real_time_t cur_time;
5727 + unsigned long flags;
5728 +
5729 + spin_lock_irqsave(&tmr->lock, flags);
5730 + cur_time = tmr->cur_time;
5731 +- if (tmr->running) {
5732 ++ if (adjust_ktime && tmr->running) {
5733 + struct timespec64 tm;
5734 +
5735 + ktime_get_ts64(&tm);
5736 +@@ -446,7 +447,13 @@ snd_seq_real_time_t snd_seq_timer_get_cur_time(struct snd_seq_timer *tmr)
5737 + high PPQ values) */
5738 + snd_seq_tick_time_t snd_seq_timer_get_cur_tick(struct snd_seq_timer *tmr)
5739 + {
5740 +- return tmr->tick.cur_tick;
5741 ++ snd_seq_tick_time_t cur_tick;
5742 ++ unsigned long flags;
5743 ++
5744 ++ spin_lock_irqsave(&tmr->lock, flags);
5745 ++ cur_tick = tmr->tick.cur_tick;
5746 ++ spin_unlock_irqrestore(&tmr->lock, flags);
5747 ++ return cur_tick;
5748 + }
5749 +
5750 +
5751 +diff --git a/sound/core/seq/seq_timer.h b/sound/core/seq/seq_timer.h
5752 +index 66c3e344eae3..4bec57df8158 100644
5753 +--- a/sound/core/seq/seq_timer.h
5754 ++++ b/sound/core/seq/seq_timer.h
5755 +@@ -120,7 +120,8 @@ int snd_seq_timer_set_tempo_ppq(struct snd_seq_timer *tmr, int tempo, int ppq);
5756 + int snd_seq_timer_set_position_tick(struct snd_seq_timer *tmr, snd_seq_tick_time_t position);
5757 + int snd_seq_timer_set_position_time(struct snd_seq_timer *tmr, snd_seq_real_time_t position);
5758 + int snd_seq_timer_set_skew(struct snd_seq_timer *tmr, unsigned int skew, unsigned int base);
5759 +-snd_seq_real_time_t snd_seq_timer_get_cur_time(struct snd_seq_timer *tmr);
5760 ++snd_seq_real_time_t snd_seq_timer_get_cur_time(struct snd_seq_timer *tmr,
5761 ++ bool adjust_ktime);
5762 + snd_seq_tick_time_t snd_seq_timer_get_cur_tick(struct snd_seq_timer *tmr);
5763 +
5764 + extern int seq_default_timer_class;
5765 +diff --git a/sound/hda/hdmi_chmap.c b/sound/hda/hdmi_chmap.c
5766 +index 886cb7811bd6..2efee794cac6 100644
5767 +--- a/sound/hda/hdmi_chmap.c
5768 ++++ b/sound/hda/hdmi_chmap.c
5769 +@@ -250,7 +250,7 @@ void snd_hdac_print_channel_allocation(int spk_alloc, char *buf, int buflen)
5770 +
5771 + for (i = 0, j = 0; i < ARRAY_SIZE(cea_speaker_allocation_names); i++) {
5772 + if (spk_alloc & (1 << i))
5773 +- j += snprintf(buf + j, buflen - j, " %s",
5774 ++ j += scnprintf(buf + j, buflen - j, " %s",
5775 + cea_speaker_allocation_names[i]);
5776 + }
5777 + buf[j] = '\0'; /* necessary when j == 0 */
5778 +diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c
5779 +index a2fb19129219..6cb72336433a 100644
5780 +--- a/sound/pci/hda/hda_codec.c
5781 ++++ b/sound/pci/hda/hda_codec.c
5782 +@@ -4019,7 +4019,7 @@ void snd_print_pcm_bits(int pcm, char *buf, int buflen)
5783 +
5784 + for (i = 0, j = 0; i < ARRAY_SIZE(bits); i++)
5785 + if (pcm & (AC_SUPPCM_BITS_8 << i))
5786 +- j += snprintf(buf + j, buflen - j, " %d", bits[i]);
5787 ++ j += scnprintf(buf + j, buflen - j, " %d", bits[i]);
5788 +
5789 + buf[j] = '\0'; /* necessary when j == 0 */
5790 + }
5791 +diff --git a/sound/pci/hda/hda_eld.c b/sound/pci/hda/hda_eld.c
5792 +index d081fb2880a0..82cf1da2ff12 100644
5793 +--- a/sound/pci/hda/hda_eld.c
5794 ++++ b/sound/pci/hda/hda_eld.c
5795 +@@ -360,7 +360,7 @@ static void hdmi_print_pcm_rates(int pcm, char *buf, int buflen)
5796 +
5797 + for (i = 0, j = 0; i < ARRAY_SIZE(alsa_rates); i++)
5798 + if (pcm & (1 << i))
5799 +- j += snprintf(buf + j, buflen - j, " %d",
5800 ++ j += scnprintf(buf + j, buflen - j, " %d",
5801 + alsa_rates[i]);
5802 +
5803 + buf[j] = '\0'; /* necessary when j == 0 */
5804 +diff --git a/sound/pci/hda/hda_sysfs.c b/sound/pci/hda/hda_sysfs.c
5805 +index fcc34417cbce..6dbe99131bc4 100644
5806 +--- a/sound/pci/hda/hda_sysfs.c
5807 ++++ b/sound/pci/hda/hda_sysfs.c
5808 +@@ -222,7 +222,7 @@ static ssize_t init_verbs_show(struct device *dev,
5809 + int i, len = 0;
5810 + mutex_lock(&codec->user_mutex);
5811 + snd_array_for_each(&codec->init_verbs, i, v) {
5812 +- len += snprintf(buf + len, PAGE_SIZE - len,
5813 ++ len += scnprintf(buf + len, PAGE_SIZE - len,
5814 + "0x%02x 0x%03x 0x%04x\n",
5815 + v->nid, v->verb, v->param);
5816 + }
5817 +@@ -272,7 +272,7 @@ static ssize_t hints_show(struct device *dev,
5818 + int i, len = 0;
5819 + mutex_lock(&codec->user_mutex);
5820 + snd_array_for_each(&codec->hints, i, hint) {
5821 +- len += snprintf(buf + len, PAGE_SIZE - len,
5822 ++ len += scnprintf(buf + len, PAGE_SIZE - len,
5823 + "%s = %s\n", hint->key, hint->val);
5824 + }
5825 + mutex_unlock(&codec->user_mutex);
5826 +diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
5827 +index f162e607fc6c..4f78b40831d8 100644
5828 +--- a/sound/pci/hda/patch_realtek.c
5829 ++++ b/sound/pci/hda/patch_realtek.c
5830 +@@ -2447,7 +2447,9 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
5831 + SND_PCI_QUIRK(0x1071, 0x8258, "Evesham Voyaeger", ALC882_FIXUP_EAPD),
5832 + SND_PCI_QUIRK(0x1458, 0xa002, "Gigabyte EP45-DS3/Z87X-UD3H", ALC889_FIXUP_FRONT_HP_NO_PRESENCE),
5833 + SND_PCI_QUIRK(0x1458, 0xa0b8, "Gigabyte AZ370-Gaming", ALC1220_FIXUP_GB_DUAL_CODECS),
5834 ++ SND_PCI_QUIRK(0x1462, 0x1228, "MSI-GP63", ALC1220_FIXUP_CLEVO_P950),
5835 + SND_PCI_QUIRK(0x1462, 0x1276, "MSI-GL73", ALC1220_FIXUP_CLEVO_P950),
5836 ++ SND_PCI_QUIRK(0x1462, 0x1293, "MSI-GP65", ALC1220_FIXUP_CLEVO_P950),
5837 + SND_PCI_QUIRK(0x1462, 0x7350, "MSI-7350", ALC889_FIXUP_CD),
5838 + SND_PCI_QUIRK(0x1462, 0xda57, "MSI Z270-Gaming", ALC1220_FIXUP_GB_DUAL_CODECS),
5839 + SND_PCI_QUIRK_VENDOR(0x1462, "MSI", ALC882_FIXUP_GPIO3),
5840 +diff --git a/sound/soc/atmel/Kconfig b/sound/soc/atmel/Kconfig
5841 +index d1dc8e6366dc..71f2d42188c4 100644
5842 +--- a/sound/soc/atmel/Kconfig
5843 ++++ b/sound/soc/atmel/Kconfig
5844 +@@ -10,11 +10,11 @@ config SND_ATMEL_SOC
5845 + if SND_ATMEL_SOC
5846 +
5847 + config SND_ATMEL_SOC_PDC
5848 +- tristate
5849 ++ bool
5850 + depends on HAS_DMA
5851 +
5852 + config SND_ATMEL_SOC_DMA
5853 +- tristate
5854 ++ bool
5855 + select SND_SOC_GENERIC_DMAENGINE_PCM
5856 +
5857 + config SND_ATMEL_SOC_SSC
5858 +diff --git a/sound/soc/atmel/Makefile b/sound/soc/atmel/Makefile
5859 +index 1f6890ed3738..c7d2989791be 100644
5860 +--- a/sound/soc/atmel/Makefile
5861 ++++ b/sound/soc/atmel/Makefile
5862 +@@ -6,8 +6,14 @@ snd-soc-atmel_ssc_dai-objs := atmel_ssc_dai.o
5863 + snd-soc-atmel-i2s-objs := atmel-i2s.o
5864 + snd-soc-mchp-i2s-mcc-objs := mchp-i2s-mcc.o
5865 +
5866 +-obj-$(CONFIG_SND_ATMEL_SOC_PDC) += snd-soc-atmel-pcm-pdc.o
5867 +-obj-$(CONFIG_SND_ATMEL_SOC_DMA) += snd-soc-atmel-pcm-dma.o
5868 ++# pdc and dma need to both be built-in if any user of
5869 ++# ssc is built-in.
5870 ++ifdef CONFIG_SND_ATMEL_SOC_PDC
5871 ++obj-$(CONFIG_SND_ATMEL_SOC_SSC) += snd-soc-atmel-pcm-pdc.o
5872 ++endif
5873 ++ifdef CONFIG_SND_ATMEL_SOC_DMA
5874 ++obj-$(CONFIG_SND_ATMEL_SOC_SSC) += snd-soc-atmel-pcm-dma.o
5875 ++endif
5876 + obj-$(CONFIG_SND_ATMEL_SOC_SSC) += snd-soc-atmel_ssc_dai.o
5877 + obj-$(CONFIG_SND_ATMEL_SOC_I2S) += snd-soc-atmel-i2s.o
5878 + obj-$(CONFIG_SND_MCHP_SOC_I2S_MCC) += snd-soc-mchp-i2s-mcc.o
5879 +diff --git a/sound/soc/fsl/fsl_sai.c b/sound/soc/fsl/fsl_sai.c
5880 +index b517e4bc1b87..41b83ecaf008 100644
5881 +--- a/sound/soc/fsl/fsl_sai.c
5882 ++++ b/sound/soc/fsl/fsl_sai.c
5883 +@@ -1019,12 +1019,24 @@ static int fsl_sai_probe(struct platform_device *pdev)
5884 + ret = devm_snd_soc_register_component(&pdev->dev, &fsl_component,
5885 + &fsl_sai_dai, 1);
5886 + if (ret)
5887 +- return ret;
5888 ++ goto err_pm_disable;
5889 +
5890 +- if (sai->soc_data->use_imx_pcm)
5891 +- return imx_pcm_dma_init(pdev, IMX_SAI_DMABUF_SIZE);
5892 +- else
5893 +- return devm_snd_dmaengine_pcm_register(&pdev->dev, NULL, 0);
5894 ++ if (sai->soc_data->use_imx_pcm) {
5895 ++ ret = imx_pcm_dma_init(pdev, IMX_SAI_DMABUF_SIZE);
5896 ++ if (ret)
5897 ++ goto err_pm_disable;
5898 ++ } else {
5899 ++ ret = devm_snd_dmaengine_pcm_register(&pdev->dev, NULL, 0);
5900 ++ if (ret)
5901 ++ goto err_pm_disable;
5902 ++ }
5903 ++
5904 ++ return ret;
5905 ++
5906 ++err_pm_disable:
5907 ++ pm_runtime_disable(&pdev->dev);
5908 ++
5909 ++ return ret;
5910 + }
5911 +
5912 + static int fsl_sai_remove(struct platform_device *pdev)
5913 +diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
5914 +index b6378f025836..935b5375ecc5 100644
5915 +--- a/sound/soc/soc-dapm.c
5916 ++++ b/sound/soc/soc-dapm.c
5917 +@@ -3888,9 +3888,6 @@ snd_soc_dai_link_event_pre_pmu(struct snd_soc_dapm_widget *w,
5918 + runtime->rate = params_rate(params);
5919 +
5920 + out:
5921 +- if (ret < 0)
5922 +- kfree(runtime);
5923 +-
5924 + kfree(params);
5925 + return ret;
5926 + }
5927 +diff --git a/sound/soc/sof/intel/hda-dai.c b/sound/soc/sof/intel/hda-dai.c
5928 +index 1923b0c36bce..3f645200d3a5 100644
5929 +--- a/sound/soc/sof/intel/hda-dai.c
5930 ++++ b/sound/soc/sof/intel/hda-dai.c
5931 +@@ -443,6 +443,10 @@ struct snd_soc_dai_driver skl_dai[] = {
5932 + .name = "iDisp3 Pin",
5933 + .ops = &hda_link_dai_ops,
5934 + },
5935 ++{
5936 ++ .name = "iDisp4 Pin",
5937 ++ .ops = &hda_link_dai_ops,
5938 ++},
5939 + {
5940 + .name = "Analog CPU DAI",
5941 + .ops = &hda_link_dai_ops,
5942 +diff --git a/sound/soc/sunxi/sun8i-codec.c b/sound/soc/sunxi/sun8i-codec.c
5943 +index 55798bc8eae2..686561df8e13 100644
5944 +--- a/sound/soc/sunxi/sun8i-codec.c
5945 ++++ b/sound/soc/sunxi/sun8i-codec.c
5946 +@@ -80,6 +80,7 @@
5947 +
5948 + #define SUN8I_SYS_SR_CTRL_AIF1_FS_MASK GENMASK(15, 12)
5949 + #define SUN8I_SYS_SR_CTRL_AIF2_FS_MASK GENMASK(11, 8)
5950 ++#define SUN8I_AIF1CLK_CTRL_AIF1_DATA_FMT_MASK GENMASK(3, 2)
5951 + #define SUN8I_AIF1CLK_CTRL_AIF1_WORD_SIZ_MASK GENMASK(5, 4)
5952 + #define SUN8I_AIF1CLK_CTRL_AIF1_LRCK_DIV_MASK GENMASK(8, 6)
5953 + #define SUN8I_AIF1CLK_CTRL_AIF1_BCLK_DIV_MASK GENMASK(12, 9)
5954 +@@ -241,7 +242,7 @@ static int sun8i_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
5955 + return -EINVAL;
5956 + }
5957 + regmap_update_bits(scodec->regmap, SUN8I_AIF1CLK_CTRL,
5958 +- BIT(SUN8I_AIF1CLK_CTRL_AIF1_DATA_FMT),
5959 ++ SUN8I_AIF1CLK_CTRL_AIF1_DATA_FMT_MASK,
5960 + value << SUN8I_AIF1CLK_CTRL_AIF1_DATA_FMT);
5961 +
5962 + return 0;
5963 +diff --git a/tools/testing/selftests/bpf/prog_tests/sockmap_basic.c b/tools/testing/selftests/bpf/prog_tests/sockmap_basic.c
5964 +index 07f5b462c2ef..aa43e0bd210c 100644
5965 +--- a/tools/testing/selftests/bpf/prog_tests/sockmap_basic.c
5966 ++++ b/tools/testing/selftests/bpf/prog_tests/sockmap_basic.c
5967 +@@ -3,6 +3,11 @@
5968 +
5969 + #include "test_progs.h"
5970 +
5971 ++#define TCP_REPAIR 19 /* TCP sock is under repair right now */
5972 ++
5973 ++#define TCP_REPAIR_ON 1
5974 ++#define TCP_REPAIR_OFF_NO_WP -1 /* Turn off without window probes */
5975 ++
5976 + static int connected_socket_v4(void)
5977 + {
5978 + struct sockaddr_in addr = {