Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:5.5 commit in: /
Date: Fri, 28 Feb 2020 18:31:35
Message-Id: 1582914678.73ca5fd154594c0936d64b6e648d3083d1826fe2.mpagano@gentoo
1 commit: 73ca5fd154594c0936d64b6e648d3083d1826fe2
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Fri Feb 28 18:31:18 2020 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Fri Feb 28 18:31:18 2020 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=73ca5fd1
7
8 Linux patch 5.5.7
9
10 Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
11
12 0000_README | 4 +
13 1006_linux-5.5.7.patch | 6813 ++++++++++++++++++++++++++++++++++++++++++++++++
14 2 files changed, 6817 insertions(+)
15
16 diff --git a/0000_README b/0000_README
17 index ff99e11..7611ed2 100644
18 --- a/0000_README
19 +++ b/0000_README
20 @@ -67,6 +67,10 @@ Patch: 1005_linux-5.5.6.patch
21 From: http://www.kernel.org
22 Desc: Linux 5.5.6
23
24 +Patch: 1006_linux-5.5.7.patch
25 +From: http://www.kernel.org
26 +Desc: Linux 5.5.7
27 +
28 Patch: 1500_XATTR_USER_PREFIX.patch
29 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
30 Desc: Support for namespace user.pax.* on tmpfs.
31
32 diff --git a/1006_linux-5.5.7.patch b/1006_linux-5.5.7.patch
33 new file mode 100644
34 index 0000000..345bb5c
35 --- /dev/null
36 +++ b/1006_linux-5.5.7.patch
37 @@ -0,0 +1,6813 @@
38 +diff --git a/Documentation/arm64/tagged-address-abi.rst b/Documentation/arm64/tagged-address-abi.rst
39 +index d4a85d535bf9..4a9d9c794ee5 100644
40 +--- a/Documentation/arm64/tagged-address-abi.rst
41 ++++ b/Documentation/arm64/tagged-address-abi.rst
42 +@@ -44,8 +44,15 @@ The AArch64 Tagged Address ABI has two stages of relaxation depending
43 + how the user addresses are used by the kernel:
44 +
45 + 1. User addresses not accessed by the kernel but used for address space
46 +- management (e.g. ``mmap()``, ``mprotect()``, ``madvise()``). The use
47 +- of valid tagged pointers in this context is always allowed.
48 ++ management (e.g. ``mprotect()``, ``madvise()``). The use of valid
49 ++ tagged pointers in this context is allowed with the exception of
50 ++ ``brk()``, ``mmap()`` and the ``new_address`` argument to
51 ++ ``mremap()`` as these have the potential to alias with existing
52 ++ user addresses.
53 ++
54 ++ NOTE: This behaviour changed in v5.6 and so some earlier kernels may
55 ++ incorrectly accept valid tagged pointers for the ``brk()``,
56 ++ ``mmap()`` and ``mremap()`` system calls.
57 +
58 + 2. User addresses accessed by the kernel (e.g. ``write()``). This ABI
59 + relaxation is disabled by default and the application thread needs to
60 +diff --git a/MAINTAINERS b/MAINTAINERS
61 +index 44bc9d7f04a4..e73a47a881b0 100644
62 +--- a/MAINTAINERS
63 ++++ b/MAINTAINERS
64 +@@ -8302,7 +8302,7 @@ M: Joonas Lahtinen <joonas.lahtinen@×××××××××××.com>
65 + M: Rodrigo Vivi <rodrigo.vivi@×××××.com>
66 + L: intel-gfx@×××××××××××××××××.org
67 + W: https://01.org/linuxgraphics/
68 +-B: https://01.org/linuxgraphics/documentation/how-report-bugs
69 ++B: https://gitlab.freedesktop.org/drm/intel/-/wikis/How-to-file-i915-bugs
70 + C: irc://chat.freenode.net/intel-gfx
71 + Q: http://patchwork.freedesktop.org/project/intel-gfx/
72 + T: git git://anongit.freedesktop.org/drm-intel
73 +diff --git a/Makefile b/Makefile
74 +index 7fb236f30926..0f64b92fa39a 100644
75 +--- a/Makefile
76 ++++ b/Makefile
77 +@@ -1,7 +1,7 @@
78 + # SPDX-License-Identifier: GPL-2.0
79 + VERSION = 5
80 + PATCHLEVEL = 5
81 +-SUBLEVEL = 6
82 ++SUBLEVEL = 7
83 + EXTRAVERSION =
84 + NAME = Kleptomaniac Octopus
85 +
86 +diff --git a/arch/arm64/include/asm/lse.h b/arch/arm64/include/asm/lse.h
87 +index 73834996c4b6..5de132100b6d 100644
88 +--- a/arch/arm64/include/asm/lse.h
89 ++++ b/arch/arm64/include/asm/lse.h
90 +@@ -6,7 +6,7 @@
91 +
92 + #if defined(CONFIG_AS_LSE) && defined(CONFIG_ARM64_LSE_ATOMICS)
93 +
94 +-#define __LSE_PREAMBLE ".arch armv8-a+lse\n"
95 ++#define __LSE_PREAMBLE ".arch_extension lse\n"
96 +
97 + #include <linux/compiler_types.h>
98 + #include <linux/export.h>
99 +diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h
100 +index a4f9ca5479b0..4d94676e5a8b 100644
101 +--- a/arch/arm64/include/asm/memory.h
102 ++++ b/arch/arm64/include/asm/memory.h
103 +@@ -213,7 +213,7 @@ static inline unsigned long kaslr_offset(void)
104 + ((__force __typeof__(addr))sign_extend64((__force u64)(addr), 55))
105 +
106 + #define untagged_addr(addr) ({ \
107 +- u64 __addr = (__force u64)addr; \
108 ++ u64 __addr = (__force u64)(addr); \
109 + __addr &= __untagged_addr(__addr); \
110 + (__force __typeof__(addr))__addr; \
111 + })
112 +diff --git a/arch/mips/boot/dts/ingenic/jz4740.dtsi b/arch/mips/boot/dts/ingenic/jz4740.dtsi
113 +index 5accda2767be..a3301bab9231 100644
114 +--- a/arch/mips/boot/dts/ingenic/jz4740.dtsi
115 ++++ b/arch/mips/boot/dts/ingenic/jz4740.dtsi
116 +@@ -1,5 +1,6 @@
117 + // SPDX-License-Identifier: GPL-2.0
118 + #include <dt-bindings/clock/jz4740-cgu.h>
119 ++#include <dt-bindings/clock/ingenic,tcu.h>
120 +
121 + / {
122 + #address-cells = <1>;
123 +@@ -45,14 +46,6 @@
124 + #clock-cells = <1>;
125 + };
126 +
127 +- watchdog: watchdog@10002000 {
128 +- compatible = "ingenic,jz4740-watchdog";
129 +- reg = <0x10002000 0x10>;
130 +-
131 +- clocks = <&cgu JZ4740_CLK_RTC>;
132 +- clock-names = "rtc";
133 +- };
134 +-
135 + tcu: timer@10002000 {
136 + compatible = "ingenic,jz4740-tcu", "simple-mfd";
137 + reg = <0x10002000 0x1000>;
138 +@@ -73,6 +66,14 @@
139 +
140 + interrupt-parent = <&intc>;
141 + interrupts = <23 22 21>;
142 ++
143 ++ watchdog: watchdog@0 {
144 ++ compatible = "ingenic,jz4740-watchdog";
145 ++ reg = <0x0 0xc>;
146 ++
147 ++ clocks = <&tcu TCU_CLK_WDT>;
148 ++ clock-names = "wdt";
149 ++ };
150 + };
151 +
152 + rtc_dev: rtc@10003000 {
153 +diff --git a/arch/mips/boot/dts/ingenic/jz4780.dtsi b/arch/mips/boot/dts/ingenic/jz4780.dtsi
154 +index f928329b034b..bb89653d16a3 100644
155 +--- a/arch/mips/boot/dts/ingenic/jz4780.dtsi
156 ++++ b/arch/mips/boot/dts/ingenic/jz4780.dtsi
157 +@@ -1,5 +1,6 @@
158 + // SPDX-License-Identifier: GPL-2.0
159 + #include <dt-bindings/clock/jz4780-cgu.h>
160 ++#include <dt-bindings/clock/ingenic,tcu.h>
161 + #include <dt-bindings/dma/jz4780-dma.h>
162 +
163 + / {
164 +@@ -67,6 +68,14 @@
165 +
166 + interrupt-parent = <&intc>;
167 + interrupts = <27 26 25>;
168 ++
169 ++ watchdog: watchdog@0 {
170 ++ compatible = "ingenic,jz4780-watchdog";
171 ++ reg = <0x0 0xc>;
172 ++
173 ++ clocks = <&tcu TCU_CLK_WDT>;
174 ++ clock-names = "wdt";
175 ++ };
176 + };
177 +
178 + rtc_dev: rtc@10003000 {
179 +@@ -348,14 +357,6 @@
180 + status = "disabled";
181 + };
182 +
183 +- watchdog: watchdog@10002000 {
184 +- compatible = "ingenic,jz4780-watchdog";
185 +- reg = <0x10002000 0x10>;
186 +-
187 +- clocks = <&cgu JZ4780_CLK_RTCLK>;
188 +- clock-names = "rtc";
189 +- };
190 +-
191 + nemc: nemc@13410000 {
192 + compatible = "ingenic,jz4780-nemc";
193 + reg = <0x13410000 0x10000>;
194 +diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h
195 +index 7f1fd41e3065..9b97c6091c5c 100644
196 +--- a/arch/powerpc/include/asm/page.h
197 ++++ b/arch/powerpc/include/asm/page.h
198 +@@ -295,8 +295,13 @@ static inline bool pfn_valid(unsigned long pfn)
199 + /*
200 + * Some number of bits at the level of the page table that points to
201 + * a hugepte are used to encode the size. This masks those bits.
202 ++ * On 8xx, HW assistance requires 4k alignment for the hugepte.
203 + */
204 ++#ifdef CONFIG_PPC_8xx
205 ++#define HUGEPD_SHIFT_MASK 0xfff
206 ++#else
207 + #define HUGEPD_SHIFT_MASK 0x3f
208 ++#endif
209 +
210 + #ifndef __ASSEMBLY__
211 +
212 +diff --git a/arch/powerpc/kernel/eeh_driver.c b/arch/powerpc/kernel/eeh_driver.c
213 +index a1eaffe868de..7b048cee767c 100644
214 +--- a/arch/powerpc/kernel/eeh_driver.c
215 ++++ b/arch/powerpc/kernel/eeh_driver.c
216 +@@ -1184,6 +1184,17 @@ void eeh_handle_special_event(void)
217 + eeh_pe_state_mark(pe, EEH_PE_RECOVERING);
218 + eeh_handle_normal_event(pe);
219 + } else {
220 ++ eeh_for_each_pe(pe, tmp_pe)
221 ++ eeh_pe_for_each_dev(tmp_pe, edev, tmp_edev)
222 ++ edev->mode &= ~EEH_DEV_NO_HANDLER;
223 ++
224 ++ /* Notify all devices to be down */
225 ++ eeh_pe_state_clear(pe, EEH_PE_PRI_BUS, true);
226 ++ eeh_set_channel_state(pe, pci_channel_io_perm_failure);
227 ++ eeh_pe_report(
228 ++ "error_detected(permanent failure)", pe,
229 ++ eeh_report_failure, NULL);
230 ++
231 + pci_lock_rescan_remove();
232 + list_for_each_entry(hose, &hose_list, list_node) {
233 + phb_pe = eeh_phb_pe_get(hose);
234 +@@ -1192,16 +1203,6 @@ void eeh_handle_special_event(void)
235 + (phb_pe->state & EEH_PE_RECOVERING))
236 + continue;
237 +
238 +- eeh_for_each_pe(pe, tmp_pe)
239 +- eeh_pe_for_each_dev(tmp_pe, edev, tmp_edev)
240 +- edev->mode &= ~EEH_DEV_NO_HANDLER;
241 +-
242 +- /* Notify all devices to be down */
243 +- eeh_pe_state_clear(pe, EEH_PE_PRI_BUS, true);
244 +- eeh_set_channel_state(pe, pci_channel_io_perm_failure);
245 +- eeh_pe_report(
246 +- "error_detected(permanent failure)", pe,
247 +- eeh_report_failure, NULL);
248 + bus = eeh_pe_bus_get(phb_pe);
249 + if (!bus) {
250 + pr_err("%s: Cannot find PCI bus for "
251 +diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S
252 +index 59bb4f4ae316..13f699256258 100644
253 +--- a/arch/powerpc/kernel/entry_32.S
254 ++++ b/arch/powerpc/kernel/entry_32.S
255 +@@ -778,7 +778,7 @@ fast_exception_return:
256 + 1: lis r3,exc_exit_restart_end@ha
257 + addi r3,r3,exc_exit_restart_end@l
258 + cmplw r12,r3
259 +-#if CONFIG_PPC_BOOK3S_601
260 ++#ifdef CONFIG_PPC_BOOK3S_601
261 + bge 2b
262 + #else
263 + bge 3f
264 +@@ -786,7 +786,7 @@ fast_exception_return:
265 + lis r4,exc_exit_restart@ha
266 + addi r4,r4,exc_exit_restart@l
267 + cmplw r12,r4
268 +-#if CONFIG_PPC_BOOK3S_601
269 ++#ifdef CONFIG_PPC_BOOK3S_601
270 + blt 2b
271 + #else
272 + blt 3f
273 +diff --git a/arch/powerpc/kernel/head_8xx.S b/arch/powerpc/kernel/head_8xx.S
274 +index 19f583e18402..98d8b6832fcb 100644
275 +--- a/arch/powerpc/kernel/head_8xx.S
276 ++++ b/arch/powerpc/kernel/head_8xx.S
277 +@@ -289,7 +289,7 @@ InstructionTLBMiss:
278 + * set. All other Linux PTE bits control the behavior
279 + * of the MMU.
280 + */
281 +- rlwimi r10, r10, 0, 0x0f00 /* Clear bits 20-23 */
282 ++ rlwinm r10, r10, 0, ~0x0f00 /* Clear bits 20-23 */
283 + rlwimi r10, r10, 4, 0x0400 /* Copy _PAGE_EXEC into bit 21 */
284 + ori r10, r10, RPN_PATTERN | 0x200 /* Set 22 and 24-27 */
285 + mtspr SPRN_MI_RPN, r10 /* Update TLB entry */
286 +diff --git a/arch/powerpc/kernel/signal.c b/arch/powerpc/kernel/signal.c
287 +index e6c30cee6abf..d215f9554553 100644
288 +--- a/arch/powerpc/kernel/signal.c
289 ++++ b/arch/powerpc/kernel/signal.c
290 +@@ -200,14 +200,27 @@ unsigned long get_tm_stackpointer(struct task_struct *tsk)
291 + * normal/non-checkpointed stack pointer.
292 + */
293 +
294 ++ unsigned long ret = tsk->thread.regs->gpr[1];
295 ++
296 + #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
297 + BUG_ON(tsk != current);
298 +
299 + if (MSR_TM_ACTIVE(tsk->thread.regs->msr)) {
300 ++ preempt_disable();
301 + tm_reclaim_current(TM_CAUSE_SIGNAL);
302 + if (MSR_TM_TRANSACTIONAL(tsk->thread.regs->msr))
303 +- return tsk->thread.ckpt_regs.gpr[1];
304 ++ ret = tsk->thread.ckpt_regs.gpr[1];
305 ++
306 ++ /*
307 ++ * If we treclaim, we must clear the current thread's TM bits
308 ++ * before re-enabling preemption. Otherwise we might be
309 ++ * preempted and have the live MSR[TS] changed behind our back
310 ++ * (tm_recheckpoint_new_task() would recheckpoint). Besides, we
311 ++ * enter the signal handler in non-transactional state.
312 ++ */
313 ++ tsk->thread.regs->msr &= ~MSR_TS_MASK;
314 ++ preempt_enable();
315 + }
316 + #endif
317 +- return tsk->thread.regs->gpr[1];
318 ++ return ret;
319 + }
320 +diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
321 +index 98600b276f76..1b090a76b444 100644
322 +--- a/arch/powerpc/kernel/signal_32.c
323 ++++ b/arch/powerpc/kernel/signal_32.c
324 +@@ -489,19 +489,11 @@ static int save_user_regs(struct pt_regs *regs, struct mcontext __user *frame,
325 + */
326 + static int save_tm_user_regs(struct pt_regs *regs,
327 + struct mcontext __user *frame,
328 +- struct mcontext __user *tm_frame, int sigret)
329 ++ struct mcontext __user *tm_frame, int sigret,
330 ++ unsigned long msr)
331 + {
332 +- unsigned long msr = regs->msr;
333 +-
334 + WARN_ON(tm_suspend_disabled);
335 +
336 +- /* Remove TM bits from thread's MSR. The MSR in the sigcontext
337 +- * just indicates to userland that we were doing a transaction, but we
338 +- * don't want to return in transactional state. This also ensures
339 +- * that flush_fp_to_thread won't set TIF_RESTORE_TM again.
340 +- */
341 +- regs->msr &= ~MSR_TS_MASK;
342 +-
343 + /* Save both sets of general registers */
344 + if (save_general_regs(&current->thread.ckpt_regs, frame)
345 + || save_general_regs(regs, tm_frame))
346 +@@ -912,6 +904,10 @@ int handle_rt_signal32(struct ksignal *ksig, sigset_t *oldset,
347 + int sigret;
348 + unsigned long tramp;
349 + struct pt_regs *regs = tsk->thread.regs;
350 ++#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
351 ++ /* Save the thread's msr before get_tm_stackpointer() changes it */
352 ++ unsigned long msr = regs->msr;
353 ++#endif
354 +
355 + BUG_ON(tsk != current);
356 +
357 +@@ -944,13 +940,13 @@ int handle_rt_signal32(struct ksignal *ksig, sigset_t *oldset,
358 +
359 + #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
360 + tm_frame = &rt_sf->uc_transact.uc_mcontext;
361 +- if (MSR_TM_ACTIVE(regs->msr)) {
362 ++ if (MSR_TM_ACTIVE(msr)) {
363 + if (__put_user((unsigned long)&rt_sf->uc_transact,
364 + &rt_sf->uc.uc_link) ||
365 + __put_user((unsigned long)tm_frame,
366 + &rt_sf->uc_transact.uc_regs))
367 + goto badframe;
368 +- if (save_tm_user_regs(regs, frame, tm_frame, sigret))
369 ++ if (save_tm_user_regs(regs, frame, tm_frame, sigret, msr))
370 + goto badframe;
371 + }
372 + else
373 +@@ -1369,6 +1365,10 @@ int handle_signal32(struct ksignal *ksig, sigset_t *oldset,
374 + int sigret;
375 + unsigned long tramp;
376 + struct pt_regs *regs = tsk->thread.regs;
377 ++#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
378 ++ /* Save the thread's msr before get_tm_stackpointer() changes it */
379 ++ unsigned long msr = regs->msr;
380 ++#endif
381 +
382 + BUG_ON(tsk != current);
383 +
384 +@@ -1402,9 +1402,9 @@ int handle_signal32(struct ksignal *ksig, sigset_t *oldset,
385 +
386 + #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
387 + tm_mctx = &frame->mctx_transact;
388 +- if (MSR_TM_ACTIVE(regs->msr)) {
389 ++ if (MSR_TM_ACTIVE(msr)) {
390 + if (save_tm_user_regs(regs, &frame->mctx, &frame->mctx_transact,
391 +- sigret))
392 ++ sigret, msr))
393 + goto badframe;
394 + }
395 + else
396 +diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
397 +index 117515564ec7..84ed2e77ef9c 100644
398 +--- a/arch/powerpc/kernel/signal_64.c
399 ++++ b/arch/powerpc/kernel/signal_64.c
400 +@@ -192,7 +192,8 @@ static long setup_sigcontext(struct sigcontext __user *sc,
401 + static long setup_tm_sigcontexts(struct sigcontext __user *sc,
402 + struct sigcontext __user *tm_sc,
403 + struct task_struct *tsk,
404 +- int signr, sigset_t *set, unsigned long handler)
405 ++ int signr, sigset_t *set, unsigned long handler,
406 ++ unsigned long msr)
407 + {
408 + /* When CONFIG_ALTIVEC is set, we _always_ setup v_regs even if the
409 + * process never used altivec yet (MSR_VEC is zero in pt_regs of
410 +@@ -207,12 +208,11 @@ static long setup_tm_sigcontexts(struct sigcontext __user *sc,
411 + elf_vrreg_t __user *tm_v_regs = sigcontext_vmx_regs(tm_sc);
412 + #endif
413 + struct pt_regs *regs = tsk->thread.regs;
414 +- unsigned long msr = tsk->thread.regs->msr;
415 + long err = 0;
416 +
417 + BUG_ON(tsk != current);
418 +
419 +- BUG_ON(!MSR_TM_ACTIVE(regs->msr));
420 ++ BUG_ON(!MSR_TM_ACTIVE(msr));
421 +
422 + WARN_ON(tm_suspend_disabled);
423 +
424 +@@ -222,13 +222,6 @@ static long setup_tm_sigcontexts(struct sigcontext __user *sc,
425 + */
426 + msr |= tsk->thread.ckpt_regs.msr & (MSR_FP | MSR_VEC | MSR_VSX);
427 +
428 +- /* Remove TM bits from thread's MSR. The MSR in the sigcontext
429 +- * just indicates to userland that we were doing a transaction, but we
430 +- * don't want to return in transactional state. This also ensures
431 +- * that flush_fp_to_thread won't set TIF_RESTORE_TM again.
432 +- */
433 +- regs->msr &= ~MSR_TS_MASK;
434 +-
435 + #ifdef CONFIG_ALTIVEC
436 + err |= __put_user(v_regs, &sc->v_regs);
437 + err |= __put_user(tm_v_regs, &tm_sc->v_regs);
438 +@@ -824,6 +817,10 @@ int handle_rt_signal64(struct ksignal *ksig, sigset_t *set,
439 + unsigned long newsp = 0;
440 + long err = 0;
441 + struct pt_regs *regs = tsk->thread.regs;
442 ++#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
443 ++ /* Save the thread's msr before get_tm_stackpointer() changes it */
444 ++ unsigned long msr = regs->msr;
445 ++#endif
446 +
447 + BUG_ON(tsk != current);
448 +
449 +@@ -841,7 +838,7 @@ int handle_rt_signal64(struct ksignal *ksig, sigset_t *set,
450 + err |= __put_user(0, &frame->uc.uc_flags);
451 + err |= __save_altstack(&frame->uc.uc_stack, regs->gpr[1]);
452 + #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
453 +- if (MSR_TM_ACTIVE(regs->msr)) {
454 ++ if (MSR_TM_ACTIVE(msr)) {
455 + /* The ucontext_t passed to userland points to the second
456 + * ucontext_t (for transactional state) with its uc_link ptr.
457 + */
458 +@@ -849,7 +846,8 @@ int handle_rt_signal64(struct ksignal *ksig, sigset_t *set,
459 + err |= setup_tm_sigcontexts(&frame->uc.uc_mcontext,
460 + &frame->uc_transact.uc_mcontext,
461 + tsk, ksig->sig, NULL,
462 +- (unsigned long)ksig->ka.sa.sa_handler);
463 ++ (unsigned long)ksig->ka.sa.sa_handler,
464 ++ msr);
465 + } else
466 + #endif
467 + {
468 +diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c
469 +index 73d4873fc7f8..33b3461d91e8 100644
470 +--- a/arch/powerpc/mm/hugetlbpage.c
471 ++++ b/arch/powerpc/mm/hugetlbpage.c
472 +@@ -53,20 +53,24 @@ static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp,
473 + if (pshift >= pdshift) {
474 + cachep = PGT_CACHE(PTE_T_ORDER);
475 + num_hugepd = 1 << (pshift - pdshift);
476 ++ new = NULL;
477 + } else if (IS_ENABLED(CONFIG_PPC_8xx)) {
478 +- cachep = PGT_CACHE(PTE_INDEX_SIZE);
479 ++ cachep = NULL;
480 + num_hugepd = 1;
481 ++ new = pte_alloc_one(mm);
482 + } else {
483 + cachep = PGT_CACHE(pdshift - pshift);
484 + num_hugepd = 1;
485 ++ new = NULL;
486 + }
487 +
488 +- if (!cachep) {
489 ++ if (!cachep && !new) {
490 + WARN_ONCE(1, "No page table cache created for hugetlb tables");
491 + return -ENOMEM;
492 + }
493 +
494 +- new = kmem_cache_alloc(cachep, pgtable_gfp_flags(mm, GFP_KERNEL));
495 ++ if (cachep)
496 ++ new = kmem_cache_alloc(cachep, pgtable_gfp_flags(mm, GFP_KERNEL));
497 +
498 + BUG_ON(pshift > HUGEPD_SHIFT_MASK);
499 + BUG_ON((unsigned long)new & HUGEPD_SHIFT_MASK);
500 +@@ -97,7 +101,10 @@ static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp,
501 + if (i < num_hugepd) {
502 + for (i = i - 1 ; i >= 0; i--, hpdp--)
503 + *hpdp = __hugepd(0);
504 +- kmem_cache_free(cachep, new);
505 ++ if (cachep)
506 ++ kmem_cache_free(cachep, new);
507 ++ else
508 ++ pte_free(mm, new);
509 + } else {
510 + kmemleak_ignore(new);
511 + }
512 +@@ -324,8 +331,7 @@ static void free_hugepd_range(struct mmu_gather *tlb, hugepd_t *hpdp, int pdshif
513 + if (shift >= pdshift)
514 + hugepd_free(tlb, hugepte);
515 + else if (IS_ENABLED(CONFIG_PPC_8xx))
516 +- pgtable_free_tlb(tlb, hugepte,
517 +- get_hugepd_cache_index(PTE_INDEX_SIZE));
518 ++ pgtable_free_tlb(tlb, hugepte, 0);
519 + else
520 + pgtable_free_tlb(tlb, hugepte,
521 + get_hugepd_cache_index(pdshift - shift));
522 +@@ -639,12 +645,13 @@ static int __init hugetlbpage_init(void)
523 + * if we have pdshift and shift value same, we don't
524 + * use pgt cache for hugepd.
525 + */
526 +- if (pdshift > shift && IS_ENABLED(CONFIG_PPC_8xx))
527 +- pgtable_cache_add(PTE_INDEX_SIZE);
528 +- else if (pdshift > shift)
529 +- pgtable_cache_add(pdshift - shift);
530 +- else if (IS_ENABLED(CONFIG_PPC_FSL_BOOK3E) || IS_ENABLED(CONFIG_PPC_8xx))
531 ++ if (pdshift > shift) {
532 ++ if (!IS_ENABLED(CONFIG_PPC_8xx))
533 ++ pgtable_cache_add(pdshift - shift);
534 ++ } else if (IS_ENABLED(CONFIG_PPC_FSL_BOOK3E) ||
535 ++ IS_ENABLED(CONFIG_PPC_8xx)) {
536 + pgtable_cache_add(PTE_T_ORDER);
537 ++ }
538 +
539 + configured = true;
540 + }
541 +diff --git a/arch/s390/boot/kaslr.c b/arch/s390/boot/kaslr.c
542 +index 5d12352545c5..5591243d673e 100644
543 +--- a/arch/s390/boot/kaslr.c
544 ++++ b/arch/s390/boot/kaslr.c
545 +@@ -75,7 +75,7 @@ static unsigned long get_random(unsigned long limit)
546 + *(unsigned long *) prng.parm_block ^= seed;
547 + for (i = 0; i < 16; i++) {
548 + cpacf_kmc(CPACF_KMC_PRNG, prng.parm_block,
549 +- (char *) entropy, (char *) entropy,
550 ++ (u8 *) entropy, (u8 *) entropy,
551 + sizeof(entropy));
552 + memcpy(prng.parm_block, entropy, sizeof(entropy));
553 + }
554 +diff --git a/arch/s390/include/asm/page.h b/arch/s390/include/asm/page.h
555 +index 85e944f04c70..1019efd85b9d 100644
556 +--- a/arch/s390/include/asm/page.h
557 ++++ b/arch/s390/include/asm/page.h
558 +@@ -42,7 +42,7 @@ void __storage_key_init_range(unsigned long start, unsigned long end);
559 +
560 + static inline void storage_key_init_range(unsigned long start, unsigned long end)
561 + {
562 +- if (PAGE_DEFAULT_KEY)
563 ++ if (PAGE_DEFAULT_KEY != 0)
564 + __storage_key_init_range(start, end);
565 + }
566 +
567 +diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
568 +index 178e4e1a47f5..7425c83fd343 100644
569 +--- a/arch/x86/include/asm/kvm_host.h
570 ++++ b/arch/x86/include/asm/kvm_host.h
571 +@@ -1115,7 +1115,7 @@ struct kvm_x86_ops {
572 + void (*load_eoi_exitmap)(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap);
573 + void (*set_virtual_apic_mode)(struct kvm_vcpu *vcpu);
574 + void (*set_apic_access_page_addr)(struct kvm_vcpu *vcpu, hpa_t hpa);
575 +- void (*deliver_posted_interrupt)(struct kvm_vcpu *vcpu, int vector);
576 ++ int (*deliver_posted_interrupt)(struct kvm_vcpu *vcpu, int vector);
577 + int (*sync_pir_to_irr)(struct kvm_vcpu *vcpu);
578 + int (*set_tss_addr)(struct kvm *kvm, unsigned int addr);
579 + int (*set_identity_map_addr)(struct kvm *kvm, u64 ident_addr);
580 +diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
581 +index 084e98da04a7..717660f82f8f 100644
582 +--- a/arch/x86/include/asm/msr-index.h
583 ++++ b/arch/x86/include/asm/msr-index.h
584 +@@ -512,6 +512,8 @@
585 + #define MSR_K7_HWCR 0xc0010015
586 + #define MSR_K7_HWCR_SMMLOCK_BIT 0
587 + #define MSR_K7_HWCR_SMMLOCK BIT_ULL(MSR_K7_HWCR_SMMLOCK_BIT)
588 ++#define MSR_K7_HWCR_IRPERF_EN_BIT 30
589 ++#define MSR_K7_HWCR_IRPERF_EN BIT_ULL(MSR_K7_HWCR_IRPERF_EN_BIT)
590 + #define MSR_K7_FID_VID_CTL 0xc0010041
591 + #define MSR_K7_FID_VID_STATUS 0xc0010042
592 +
593 +diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
594 +index 62c30279be77..c3f4dd4ae155 100644
595 +--- a/arch/x86/kernel/cpu/amd.c
596 ++++ b/arch/x86/kernel/cpu/amd.c
597 +@@ -28,6 +28,7 @@
598 +
599 + static const int amd_erratum_383[];
600 + static const int amd_erratum_400[];
601 ++static const int amd_erratum_1054[];
602 + static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum);
603 +
604 + /*
605 +@@ -978,6 +979,15 @@ static void init_amd(struct cpuinfo_x86 *c)
606 + /* AMD CPUs don't reset SS attributes on SYSRET, Xen does. */
607 + if (!cpu_has(c, X86_FEATURE_XENPV))
608 + set_cpu_bug(c, X86_BUG_SYSRET_SS_ATTRS);
609 ++
610 ++ /*
611 ++ * Turn on the Instructions Retired free counter on machines not
612 ++ * susceptible to erratum #1054 "Instructions Retired Performance
613 ++ * Counter May Be Inaccurate".
614 ++ */
615 ++ if (cpu_has(c, X86_FEATURE_IRPERF) &&
616 ++ !cpu_has_amd_erratum(c, amd_erratum_1054))
617 ++ msr_set_bit(MSR_K7_HWCR, MSR_K7_HWCR_IRPERF_EN_BIT);
618 + }
619 +
620 + #ifdef CONFIG_X86_32
621 +@@ -1105,6 +1115,10 @@ static const int amd_erratum_400[] =
622 + static const int amd_erratum_383[] =
623 + AMD_OSVW_ERRATUM(3, AMD_MODEL_RANGE(0x10, 0, 0, 0xff, 0xf));
624 +
625 ++/* #1054: Instructions Retired Performance Counter May Be Inaccurate */
626 ++static const int amd_erratum_1054[] =
627 ++ AMD_OSVW_ERRATUM(0, AMD_MODEL_RANGE(0x17, 0, 0, 0x2f, 0xf));
628 ++
629 +
630 + static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum)
631 + {
632 +diff --git a/arch/x86/kernel/cpu/mce/amd.c b/arch/x86/kernel/cpu/mce/amd.c
633 +index d6cf5c18a7e0..f031c651dd32 100644
634 +--- a/arch/x86/kernel/cpu/mce/amd.c
635 ++++ b/arch/x86/kernel/cpu/mce/amd.c
636 +@@ -1161,9 +1161,12 @@ static const struct sysfs_ops threshold_ops = {
637 + .store = store,
638 + };
639 +
640 ++static void threshold_block_release(struct kobject *kobj);
641 ++
642 + static struct kobj_type threshold_ktype = {
643 + .sysfs_ops = &threshold_ops,
644 + .default_attrs = default_attrs,
645 ++ .release = threshold_block_release,
646 + };
647 +
648 + static const char *get_name(unsigned int bank, struct threshold_block *b)
649 +@@ -1196,8 +1199,9 @@ static const char *get_name(unsigned int bank, struct threshold_block *b)
650 + return buf_mcatype;
651 + }
652 +
653 +-static int allocate_threshold_blocks(unsigned int cpu, unsigned int bank,
654 +- unsigned int block, u32 address)
655 ++static int allocate_threshold_blocks(unsigned int cpu, struct threshold_bank *tb,
656 ++ unsigned int bank, unsigned int block,
657 ++ u32 address)
658 + {
659 + struct threshold_block *b = NULL;
660 + u32 low, high;
661 +@@ -1241,16 +1245,12 @@ static int allocate_threshold_blocks(unsigned int cpu, unsigned int bank,
662 +
663 + INIT_LIST_HEAD(&b->miscj);
664 +
665 +- if (per_cpu(threshold_banks, cpu)[bank]->blocks) {
666 +- list_add(&b->miscj,
667 +- &per_cpu(threshold_banks, cpu)[bank]->blocks->miscj);
668 +- } else {
669 +- per_cpu(threshold_banks, cpu)[bank]->blocks = b;
670 +- }
671 ++ if (tb->blocks)
672 ++ list_add(&b->miscj, &tb->blocks->miscj);
673 ++ else
674 ++ tb->blocks = b;
675 +
676 +- err = kobject_init_and_add(&b->kobj, &threshold_ktype,
677 +- per_cpu(threshold_banks, cpu)[bank]->kobj,
678 +- get_name(bank, b));
679 ++ err = kobject_init_and_add(&b->kobj, &threshold_ktype, tb->kobj, get_name(bank, b));
680 + if (err)
681 + goto out_free;
682 + recurse:
683 +@@ -1258,7 +1258,7 @@ recurse:
684 + if (!address)
685 + return 0;
686 +
687 +- err = allocate_threshold_blocks(cpu, bank, block, address);
688 ++ err = allocate_threshold_blocks(cpu, tb, bank, block, address);
689 + if (err)
690 + goto out_free;
691 +
692 +@@ -1343,8 +1343,6 @@ static int threshold_create_bank(unsigned int cpu, unsigned int bank)
693 + goto out_free;
694 + }
695 +
696 +- per_cpu(threshold_banks, cpu)[bank] = b;
697 +-
698 + if (is_shared_bank(bank)) {
699 + refcount_set(&b->cpus, 1);
700 +
701 +@@ -1355,9 +1353,13 @@ static int threshold_create_bank(unsigned int cpu, unsigned int bank)
702 + }
703 + }
704 +
705 +- err = allocate_threshold_blocks(cpu, bank, 0, msr_ops.misc(bank));
706 +- if (!err)
707 +- goto out;
708 ++ err = allocate_threshold_blocks(cpu, b, bank, 0, msr_ops.misc(bank));
709 ++ if (err)
710 ++ goto out_free;
711 ++
712 ++ per_cpu(threshold_banks, cpu)[bank] = b;
713 ++
714 ++ return 0;
715 +
716 + out_free:
717 + kfree(b);
718 +@@ -1366,8 +1368,12 @@ static int threshold_create_bank(unsigned int cpu, unsigned int bank)
719 + return err;
720 + }
721 +
722 +-static void deallocate_threshold_block(unsigned int cpu,
723 +- unsigned int bank)
724 ++static void threshold_block_release(struct kobject *kobj)
725 ++{
726 ++ kfree(to_block(kobj));
727 ++}
728 ++
729 ++static void deallocate_threshold_block(unsigned int cpu, unsigned int bank)
730 + {
731 + struct threshold_block *pos = NULL;
732 + struct threshold_block *tmp = NULL;
733 +@@ -1377,13 +1383,11 @@ static void deallocate_threshold_block(unsigned int cpu,
734 + return;
735 +
736 + list_for_each_entry_safe(pos, tmp, &head->blocks->miscj, miscj) {
737 +- kobject_put(&pos->kobj);
738 + list_del(&pos->miscj);
739 +- kfree(pos);
740 ++ kobject_put(&pos->kobj);
741 + }
742 +
743 +- kfree(per_cpu(threshold_banks, cpu)[bank]->blocks);
744 +- per_cpu(threshold_banks, cpu)[bank]->blocks = NULL;
745 ++ kobject_put(&head->blocks->kobj);
746 + }
747 +
748 + static void __threshold_remove_blocks(struct threshold_bank *b)
749 +diff --git a/arch/x86/kernel/ima_arch.c b/arch/x86/kernel/ima_arch.c
750 +index 4d4f5d9faac3..23054909c8dd 100644
751 +--- a/arch/x86/kernel/ima_arch.c
752 ++++ b/arch/x86/kernel/ima_arch.c
753 +@@ -10,8 +10,6 @@ extern struct boot_params boot_params;
754 +
755 + static enum efi_secureboot_mode get_sb_mode(void)
756 + {
757 +- efi_char16_t efi_SecureBoot_name[] = L"SecureBoot";
758 +- efi_char16_t efi_SetupMode_name[] = L"SecureBoot";
759 + efi_guid_t efi_variable_guid = EFI_GLOBAL_VARIABLE_GUID;
760 + efi_status_t status;
761 + unsigned long size;
762 +@@ -25,7 +23,7 @@ static enum efi_secureboot_mode get_sb_mode(void)
763 + }
764 +
765 + /* Get variable contents into buffer */
766 +- status = efi.get_variable(efi_SecureBoot_name, &efi_variable_guid,
767 ++ status = efi.get_variable(L"SecureBoot", &efi_variable_guid,
768 + NULL, &size, &secboot);
769 + if (status == EFI_NOT_FOUND) {
770 + pr_info("ima: secureboot mode disabled\n");
771 +@@ -38,7 +36,7 @@ static enum efi_secureboot_mode get_sb_mode(void)
772 + }
773 +
774 + size = sizeof(setupmode);
775 +- status = efi.get_variable(efi_SetupMode_name, &efi_variable_guid,
776 ++ status = efi.get_variable(L"SetupMode", &efi_variable_guid,
777 + NULL, &size, &setupmode);
778 +
779 + if (status != EFI_SUCCESS) /* ignore unknown SetupMode */
780 +diff --git a/arch/x86/kvm/irq_comm.c b/arch/x86/kvm/irq_comm.c
781 +index 8ecd48d31800..5ddcaacef291 100644
782 +--- a/arch/x86/kvm/irq_comm.c
783 ++++ b/arch/x86/kvm/irq_comm.c
784 +@@ -416,7 +416,7 @@ void kvm_scan_ioapic_routes(struct kvm_vcpu *vcpu,
785 +
786 + kvm_set_msi_irq(vcpu->kvm, entry, &irq);
787 +
788 +- if (irq.level && kvm_apic_match_dest(vcpu, NULL, 0,
789 ++ if (irq.trig_mode && kvm_apic_match_dest(vcpu, NULL, 0,
790 + irq.dest_id, irq.dest_mode))
791 + __set_bit(irq.vector, ioapic_handled_vectors);
792 + }
793 +diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
794 +index 3323115f52d5..f05123acaa64 100644
795 +--- a/arch/x86/kvm/lapic.c
796 ++++ b/arch/x86/kvm/lapic.c
797 +@@ -630,9 +630,11 @@ static inline bool pv_eoi_enabled(struct kvm_vcpu *vcpu)
798 + static bool pv_eoi_get_pending(struct kvm_vcpu *vcpu)
799 + {
800 + u8 val;
801 +- if (pv_eoi_get_user(vcpu, &val) < 0)
802 ++ if (pv_eoi_get_user(vcpu, &val) < 0) {
803 + printk(KERN_WARNING "Can't read EOI MSR value: 0x%llx\n",
804 + (unsigned long long)vcpu->arch.pv_eoi.msr_val);
805 ++ return false;
806 ++ }
807 + return val & 0x1;
808 + }
809 +
810 +@@ -1049,11 +1051,8 @@ static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode,
811 + apic->regs + APIC_TMR);
812 + }
813 +
814 +- if (vcpu->arch.apicv_active)
815 +- kvm_x86_ops->deliver_posted_interrupt(vcpu, vector);
816 +- else {
817 ++ if (kvm_x86_ops->deliver_posted_interrupt(vcpu, vector)) {
818 + kvm_lapic_set_irr(vector, apic);
819 +-
820 + kvm_make_request(KVM_REQ_EVENT, vcpu);
821 + kvm_vcpu_kick(vcpu);
822 + }
823 +diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
824 +index 8b0620f3aed6..aace3b6ca2f7 100644
825 +--- a/arch/x86/kvm/svm.c
826 ++++ b/arch/x86/kvm/svm.c
827 +@@ -5160,8 +5160,11 @@ static void svm_load_eoi_exitmap(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap)
828 + return;
829 + }
830 +
831 +-static void svm_deliver_avic_intr(struct kvm_vcpu *vcpu, int vec)
832 ++static int svm_deliver_avic_intr(struct kvm_vcpu *vcpu, int vec)
833 + {
834 ++ if (!vcpu->arch.apicv_active)
835 ++ return -1;
836 ++
837 + kvm_lapic_set_irr(vec, vcpu->arch.apic);
838 + smp_mb__after_atomic();
839 +
840 +@@ -5173,6 +5176,8 @@ static void svm_deliver_avic_intr(struct kvm_vcpu *vcpu, int vec)
841 + put_cpu();
842 + } else
843 + kvm_vcpu_wake_up(vcpu);
844 ++
845 ++ return 0;
846 + }
847 +
848 + static bool svm_dy_apicv_has_pending_interrupt(struct kvm_vcpu *vcpu)
849 +diff --git a/arch/x86/kvm/vmx/capabilities.h b/arch/x86/kvm/vmx/capabilities.h
850 +index 283bdb7071af..f486e2606247 100644
851 +--- a/arch/x86/kvm/vmx/capabilities.h
852 ++++ b/arch/x86/kvm/vmx/capabilities.h
853 +@@ -12,6 +12,7 @@ extern bool __read_mostly enable_ept;
854 + extern bool __read_mostly enable_unrestricted_guest;
855 + extern bool __read_mostly enable_ept_ad_bits;
856 + extern bool __read_mostly enable_pml;
857 ++extern bool __read_mostly enable_apicv;
858 + extern int __read_mostly pt_mode;
859 +
860 + #define PT_MODE_SYSTEM 0
861 +diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
862 +index 3babe5e29429..af5a36dfc88a 100644
863 +--- a/arch/x86/kvm/vmx/nested.c
864 ++++ b/arch/x86/kvm/vmx/nested.c
865 +@@ -5304,24 +5304,17 @@ fail:
866 + return 1;
867 + }
868 +
869 +-
870 +-static bool nested_vmx_exit_handled_io(struct kvm_vcpu *vcpu,
871 +- struct vmcs12 *vmcs12)
872 ++/*
873 ++ * Return true if an IO instruction with the specified port and size should cause
874 ++ * a VM-exit into L1.
875 ++ */
876 ++bool nested_vmx_check_io_bitmaps(struct kvm_vcpu *vcpu, unsigned int port,
877 ++ int size)
878 + {
879 +- unsigned long exit_qualification;
880 ++ struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
881 + gpa_t bitmap, last_bitmap;
882 +- unsigned int port;
883 +- int size;
884 + u8 b;
885 +
886 +- if (!nested_cpu_has(vmcs12, CPU_BASED_USE_IO_BITMAPS))
887 +- return nested_cpu_has(vmcs12, CPU_BASED_UNCOND_IO_EXITING);
888 +-
889 +- exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
890 +-
891 +- port = exit_qualification >> 16;
892 +- size = (exit_qualification & 7) + 1;
893 +-
894 + last_bitmap = (gpa_t)-1;
895 + b = -1;
896 +
897 +@@ -5348,6 +5341,24 @@ static bool nested_vmx_exit_handled_io(struct kvm_vcpu *vcpu,
898 + return false;
899 + }
900 +
901 ++static bool nested_vmx_exit_handled_io(struct kvm_vcpu *vcpu,
902 ++ struct vmcs12 *vmcs12)
903 ++{
904 ++ unsigned long exit_qualification;
905 ++ unsigned short port;
906 ++ int size;
907 ++
908 ++ if (!nested_cpu_has(vmcs12, CPU_BASED_USE_IO_BITMAPS))
909 ++ return nested_cpu_has(vmcs12, CPU_BASED_UNCOND_IO_EXITING);
910 ++
911 ++ exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
912 ++
913 ++ port = exit_qualification >> 16;
914 ++ size = (exit_qualification & 7) + 1;
915 ++
916 ++ return nested_vmx_check_io_bitmaps(vcpu, port, size);
917 ++}
918 ++
919 + /*
920 + * Return 1 if we should exit from L2 to L1 to handle an MSR access access,
921 + * rather than handle it ourselves in L0. I.e., check whether L1 expressed
922 +@@ -5968,8 +5979,7 @@ void nested_vmx_set_vmcs_shadowing_bitmap(void)
923 + * bit in the high half is on if the corresponding bit in the control field
924 + * may be on. See also vmx_control_verify().
925 + */
926 +-void nested_vmx_setup_ctls_msrs(struct nested_vmx_msrs *msrs, u32 ept_caps,
927 +- bool apicv)
928 ++void nested_vmx_setup_ctls_msrs(struct nested_vmx_msrs *msrs, u32 ept_caps)
929 + {
930 + /*
931 + * Note that as a general rule, the high half of the MSRs (bits in
932 +@@ -5996,7 +6006,7 @@ void nested_vmx_setup_ctls_msrs(struct nested_vmx_msrs *msrs, u32 ept_caps,
933 + PIN_BASED_EXT_INTR_MASK |
934 + PIN_BASED_NMI_EXITING |
935 + PIN_BASED_VIRTUAL_NMIS |
936 +- (apicv ? PIN_BASED_POSTED_INTR : 0);
937 ++ (enable_apicv ? PIN_BASED_POSTED_INTR : 0);
938 + msrs->pinbased_ctls_high |=
939 + PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR |
940 + PIN_BASED_VMX_PREEMPTION_TIMER;
941 +diff --git a/arch/x86/kvm/vmx/nested.h b/arch/x86/kvm/vmx/nested.h
942 +index fc874d4ead0f..e1c7faed7df4 100644
943 +--- a/arch/x86/kvm/vmx/nested.h
944 ++++ b/arch/x86/kvm/vmx/nested.h
945 +@@ -17,8 +17,7 @@ enum nvmx_vmentry_status {
946 + };
947 +
948 + void vmx_leave_nested(struct kvm_vcpu *vcpu);
949 +-void nested_vmx_setup_ctls_msrs(struct nested_vmx_msrs *msrs, u32 ept_caps,
950 +- bool apicv);
951 ++void nested_vmx_setup_ctls_msrs(struct nested_vmx_msrs *msrs, u32 ept_caps);
952 + void nested_vmx_hardware_unsetup(void);
953 + __init int nested_vmx_hardware_setup(int (*exit_handlers[])(struct kvm_vcpu *));
954 + void nested_vmx_set_vmcs_shadowing_bitmap(void);
955 +@@ -34,6 +33,8 @@ int vmx_get_vmx_msr(struct nested_vmx_msrs *msrs, u32 msr_index, u64 *pdata);
956 + int get_vmx_mem_address(struct kvm_vcpu *vcpu, unsigned long exit_qualification,
957 + u32 vmx_instruction_info, bool wr, int len, gva_t *ret);
958 + void nested_vmx_pmu_entry_exit_ctls_update(struct kvm_vcpu *vcpu);
959 ++bool nested_vmx_check_io_bitmaps(struct kvm_vcpu *vcpu, unsigned int port,
960 ++ int size);
961 +
962 + static inline struct vmcs12 *get_vmcs12(struct kvm_vcpu *vcpu)
963 + {
964 +diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
965 +index c0d837c37f34..be438bc7cfa3 100644
966 +--- a/arch/x86/kvm/vmx/vmx.c
967 ++++ b/arch/x86/kvm/vmx/vmx.c
968 +@@ -95,7 +95,7 @@ module_param(emulate_invalid_guest_state, bool, S_IRUGO);
969 + static bool __read_mostly fasteoi = 1;
970 + module_param(fasteoi, bool, S_IRUGO);
971 +
972 +-static bool __read_mostly enable_apicv = 1;
973 ++bool __read_mostly enable_apicv = 1;
974 + module_param(enable_apicv, bool, S_IRUGO);
975 +
976 + /*
977 +@@ -3848,24 +3848,29 @@ static int vmx_deliver_nested_posted_interrupt(struct kvm_vcpu *vcpu,
978 + * 2. If target vcpu isn't running(root mode), kick it to pick up the
979 + * interrupt from PIR in next vmentry.
980 + */
981 +-static void vmx_deliver_posted_interrupt(struct kvm_vcpu *vcpu, int vector)
982 ++static int vmx_deliver_posted_interrupt(struct kvm_vcpu *vcpu, int vector)
983 + {
984 + struct vcpu_vmx *vmx = to_vmx(vcpu);
985 + int r;
986 +
987 + r = vmx_deliver_nested_posted_interrupt(vcpu, vector);
988 + if (!r)
989 +- return;
990 ++ return 0;
991 ++
992 ++ if (!vcpu->arch.apicv_active)
993 ++ return -1;
994 +
995 + if (pi_test_and_set_pir(vector, &vmx->pi_desc))
996 +- return;
997 ++ return 0;
998 +
999 + /* If a previous notification has sent the IPI, nothing to do. */
1000 + if (pi_test_and_set_on(&vmx->pi_desc))
1001 +- return;
1002 ++ return 0;
1003 +
1004 + if (!kvm_vcpu_trigger_posted_interrupt(vcpu, false))
1005 + kvm_vcpu_kick(vcpu);
1006 ++
1007 ++ return 0;
1008 + }
1009 +
1010 + /*
1011 +@@ -6803,8 +6808,7 @@ static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id)
1012 +
1013 + if (nested)
1014 + nested_vmx_setup_ctls_msrs(&vmx->nested.msrs,
1015 +- vmx_capability.ept,
1016 +- kvm_vcpu_apicv_active(&vmx->vcpu));
1017 ++ vmx_capability.ept);
1018 + else
1019 + memset(&vmx->nested.msrs, 0, sizeof(vmx->nested.msrs));
1020 +
1021 +@@ -6884,8 +6888,7 @@ static int __init vmx_check_processor_compat(void)
1022 + if (setup_vmcs_config(&vmcs_conf, &vmx_cap) < 0)
1023 + return -EIO;
1024 + if (nested)
1025 +- nested_vmx_setup_ctls_msrs(&vmcs_conf.nested, vmx_cap.ept,
1026 +- enable_apicv);
1027 ++ nested_vmx_setup_ctls_msrs(&vmcs_conf.nested, vmx_cap.ept);
1028 + if (memcmp(&vmcs_config, &vmcs_conf, sizeof(struct vmcs_config)) != 0) {
1029 + printk(KERN_ERR "kvm: CPU %d feature inconsistency!\n",
1030 + smp_processor_id());
1031 +@@ -7146,6 +7149,39 @@ static void vmx_request_immediate_exit(struct kvm_vcpu *vcpu)
1032 + to_vmx(vcpu)->req_immediate_exit = true;
1033 + }
1034 +
1035 ++static int vmx_check_intercept_io(struct kvm_vcpu *vcpu,
1036 ++ struct x86_instruction_info *info)
1037 ++{
1038 ++ struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
1039 ++ unsigned short port;
1040 ++ bool intercept;
1041 ++ int size;
1042 ++
1043 ++ if (info->intercept == x86_intercept_in ||
1044 ++ info->intercept == x86_intercept_ins) {
1045 ++ port = info->src_val;
1046 ++ size = info->dst_bytes;
1047 ++ } else {
1048 ++ port = info->dst_val;
1049 ++ size = info->src_bytes;
1050 ++ }
1051 ++
1052 ++ /*
1053 ++ * If the 'use IO bitmaps' VM-execution control is 0, IO instruction
1054 ++ * VM-exits depend on the 'unconditional IO exiting' VM-execution
1055 ++ * control.
1056 ++ *
1057 ++ * Otherwise, IO instruction VM-exits are controlled by the IO bitmaps.
1058 ++ */
1059 ++ if (!nested_cpu_has(vmcs12, CPU_BASED_USE_IO_BITMAPS))
1060 ++ intercept = nested_cpu_has(vmcs12,
1061 ++ CPU_BASED_UNCOND_IO_EXITING);
1062 ++ else
1063 ++ intercept = nested_vmx_check_io_bitmaps(vcpu, port, size);
1064 ++
1065 ++ return intercept ? X86EMUL_UNHANDLEABLE : X86EMUL_CONTINUE;
1066 ++}
1067 ++
1068 + static int vmx_check_intercept(struct kvm_vcpu *vcpu,
1069 + struct x86_instruction_info *info,
1070 + enum x86_intercept_stage stage)
1071 +@@ -7153,19 +7189,31 @@ static int vmx_check_intercept(struct kvm_vcpu *vcpu,
1072 + struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
1073 + struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt;
1074 +
1075 ++ switch (info->intercept) {
1076 + /*
1077 + * RDPID causes #UD if disabled through secondary execution controls.
1078 + * Because it is marked as EmulateOnUD, we need to intercept it here.
1079 + */
1080 +- if (info->intercept == x86_intercept_rdtscp &&
1081 +- !nested_cpu_has2(vmcs12, SECONDARY_EXEC_RDTSCP)) {
1082 +- ctxt->exception.vector = UD_VECTOR;
1083 +- ctxt->exception.error_code_valid = false;
1084 +- return X86EMUL_PROPAGATE_FAULT;
1085 +- }
1086 ++ case x86_intercept_rdtscp:
1087 ++ if (!nested_cpu_has2(vmcs12, SECONDARY_EXEC_RDTSCP)) {
1088 ++ ctxt->exception.vector = UD_VECTOR;
1089 ++ ctxt->exception.error_code_valid = false;
1090 ++ return X86EMUL_PROPAGATE_FAULT;
1091 ++ }
1092 ++ break;
1093 ++
1094 ++ case x86_intercept_in:
1095 ++ case x86_intercept_ins:
1096 ++ case x86_intercept_out:
1097 ++ case x86_intercept_outs:
1098 ++ return vmx_check_intercept_io(vcpu, info);
1099 +
1100 + /* TODO: check more intercepts... */
1101 +- return X86EMUL_CONTINUE;
1102 ++ default:
1103 ++ break;
1104 ++ }
1105 ++
1106 ++ return X86EMUL_UNHANDLEABLE;
1107 + }
1108 +
1109 + #ifdef CONFIG_X86_64
1110 +@@ -7747,7 +7795,7 @@ static __init int hardware_setup(void)
1111 +
1112 + if (nested) {
1113 + nested_vmx_setup_ctls_msrs(&vmcs_config.nested,
1114 +- vmx_capability.ept, enable_apicv);
1115 ++ vmx_capability.ept);
1116 +
1117 + r = nested_vmx_hardware_setup(kvm_vmx_exit_handlers);
1118 + if (r)
1119 +diff --git a/crypto/hash_info.c b/crypto/hash_info.c
1120 +index c754cb75dd1a..a49ff96bde77 100644
1121 +--- a/crypto/hash_info.c
1122 ++++ b/crypto/hash_info.c
1123 +@@ -26,7 +26,7 @@ const char *const hash_algo_name[HASH_ALGO__LAST] = {
1124 + [HASH_ALGO_TGR_128] = "tgr128",
1125 + [HASH_ALGO_TGR_160] = "tgr160",
1126 + [HASH_ALGO_TGR_192] = "tgr192",
1127 +- [HASH_ALGO_SM3_256] = "sm3-256",
1128 ++ [HASH_ALGO_SM3_256] = "sm3",
1129 + [HASH_ALGO_STREEBOG_256] = "streebog256",
1130 + [HASH_ALGO_STREEBOG_512] = "streebog512",
1131 + };
1132 +diff --git a/drivers/acpi/acpica/evevent.c b/drivers/acpi/acpica/evevent.c
1133 +index 9e2f5a05c066..bad2257356fe 100644
1134 +--- a/drivers/acpi/acpica/evevent.c
1135 ++++ b/drivers/acpi/acpica/evevent.c
1136 +@@ -265,4 +265,49 @@ static u32 acpi_ev_fixed_event_dispatch(u32 event)
1137 + handler) (acpi_gbl_fixed_event_handlers[event].context));
1138 + }
1139 +
1140 ++/*******************************************************************************
1141 ++ *
1142 ++ * FUNCTION: acpi_any_fixed_event_status_set
1143 ++ *
1144 ++ * PARAMETERS: None
1145 ++ *
1146 ++ * RETURN: TRUE or FALSE
1147 ++ *
1148 ++ * DESCRIPTION: Checks the PM status register for active fixed events
1149 ++ *
1150 ++ ******************************************************************************/
1151 ++
1152 ++u32 acpi_any_fixed_event_status_set(void)
1153 ++{
1154 ++ acpi_status status;
1155 ++ u32 in_status;
1156 ++ u32 in_enable;
1157 ++ u32 i;
1158 ++
1159 ++ status = acpi_hw_register_read(ACPI_REGISTER_PM1_ENABLE, &in_enable);
1160 ++ if (ACPI_FAILURE(status)) {
1161 ++ return (FALSE);
1162 ++ }
1163 ++
1164 ++ status = acpi_hw_register_read(ACPI_REGISTER_PM1_STATUS, &in_status);
1165 ++ if (ACPI_FAILURE(status)) {
1166 ++ return (FALSE);
1167 ++ }
1168 ++
1169 ++ /*
1170 ++ * Check for all possible Fixed Events and dispatch those that are active
1171 ++ */
1172 ++ for (i = 0; i < ACPI_NUM_FIXED_EVENTS; i++) {
1173 ++
1174 ++ /* Both the status and enable bits must be on for this event */
1175 ++
1176 ++ if ((in_status & acpi_gbl_fixed_event_info[i].status_bit_mask) &&
1177 ++ (in_enable & acpi_gbl_fixed_event_info[i].enable_bit_mask)) {
1178 ++ return (TRUE);
1179 ++ }
1180 ++ }
1181 ++
1182 ++ return (FALSE);
1183 ++}
1184 ++
1185 + #endif /* !ACPI_REDUCED_HARDWARE */
1186 +diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
1187 +index 5672fa8cb300..ce59a3f32eac 100644
1188 +--- a/drivers/acpi/sleep.c
1189 ++++ b/drivers/acpi/sleep.c
1190 +@@ -1002,6 +1002,13 @@ static bool acpi_s2idle_wake(void)
1191 + if (irqd_is_wakeup_armed(irq_get_irq_data(acpi_sci_irq)))
1192 + return true;
1193 +
1194 ++ /*
1195 ++ * If the status bit of any enabled fixed event is set, the
1196 ++ * wakeup is regarded as valid.
1197 ++ */
1198 ++ if (acpi_any_fixed_event_status_set())
1199 ++ return true;
1200 ++
1201 + /*
1202 + * If there are no EC events to process and at least one of the
1203 + * other enabled GPEs is active, the wakeup is regarded as a
1204 +diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
1205 +index 4bfd1b14b390..11ea1aff40db 100644
1206 +--- a/drivers/ata/ahci.c
1207 ++++ b/drivers/ata/ahci.c
1208 +@@ -81,6 +81,7 @@ enum board_ids {
1209 +
1210 + static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
1211 + static void ahci_remove_one(struct pci_dev *dev);
1212 ++static void ahci_shutdown_one(struct pci_dev *dev);
1213 + static int ahci_vt8251_hardreset(struct ata_link *link, unsigned int *class,
1214 + unsigned long deadline);
1215 + static int ahci_avn_hardreset(struct ata_link *link, unsigned int *class,
1216 +@@ -606,6 +607,7 @@ static struct pci_driver ahci_pci_driver = {
1217 + .id_table = ahci_pci_tbl,
1218 + .probe = ahci_init_one,
1219 + .remove = ahci_remove_one,
1220 ++ .shutdown = ahci_shutdown_one,
1221 + .driver = {
1222 + .pm = &ahci_pci_pm_ops,
1223 + },
1224 +@@ -1877,6 +1879,11 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1225 + return 0;
1226 + }
1227 +
1228 ++static void ahci_shutdown_one(struct pci_dev *pdev)
1229 ++{
1230 ++ ata_pci_shutdown_one(pdev);
1231 ++}
1232 ++
1233 + static void ahci_remove_one(struct pci_dev *pdev)
1234 + {
1235 + pm_runtime_get_noresume(&pdev->dev);
1236 +diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
1237 +index 6f4ab5c5b52d..42c8728f6117 100644
1238 +--- a/drivers/ata/libata-core.c
1239 ++++ b/drivers/ata/libata-core.c
1240 +@@ -6767,6 +6767,26 @@ void ata_pci_remove_one(struct pci_dev *pdev)
1241 + ata_host_detach(host);
1242 + }
1243 +
1244 ++void ata_pci_shutdown_one(struct pci_dev *pdev)
1245 ++{
1246 ++ struct ata_host *host = pci_get_drvdata(pdev);
1247 ++ int i;
1248 ++
1249 ++ for (i = 0; i < host->n_ports; i++) {
1250 ++ struct ata_port *ap = host->ports[i];
1251 ++
1252 ++ ap->pflags |= ATA_PFLAG_FROZEN;
1253 ++
1254 ++ /* Disable port interrupts */
1255 ++ if (ap->ops->freeze)
1256 ++ ap->ops->freeze(ap);
1257 ++
1258 ++ /* Stop the port DMA engines */
1259 ++ if (ap->ops->port_stop)
1260 ++ ap->ops->port_stop(ap);
1261 ++ }
1262 ++}
1263 ++
1264 + /* move to PCI subsystem */
1265 + int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits)
1266 + {
1267 +@@ -7387,6 +7407,7 @@ EXPORT_SYMBOL_GPL(ata_timing_cycle2mode);
1268 +
1269 + #ifdef CONFIG_PCI
1270 + EXPORT_SYMBOL_GPL(pci_test_config_bits);
1271 ++EXPORT_SYMBOL_GPL(ata_pci_shutdown_one);
1272 + EXPORT_SYMBOL_GPL(ata_pci_remove_one);
1273 + #ifdef CONFIG_PM
1274 + EXPORT_SYMBOL_GPL(ata_pci_device_do_suspend);
1275 +diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
1276 +index 485865fd0412..f19a03b62365 100644
1277 +--- a/drivers/block/floppy.c
1278 ++++ b/drivers/block/floppy.c
1279 +@@ -853,14 +853,17 @@ static void reset_fdc_info(int mode)
1280 + /* selects the fdc and drive, and enables the fdc's input/dma. */
1281 + static void set_fdc(int drive)
1282 + {
1283 ++ unsigned int new_fdc = fdc;
1284 ++
1285 + if (drive >= 0 && drive < N_DRIVE) {
1286 +- fdc = FDC(drive);
1287 ++ new_fdc = FDC(drive);
1288 + current_drive = drive;
1289 + }
1290 +- if (fdc != 1 && fdc != 0) {
1291 ++ if (new_fdc >= N_FDC) {
1292 + pr_info("bad fdc value\n");
1293 + return;
1294 + }
1295 ++ fdc = new_fdc;
1296 + set_dor(fdc, ~0, 8);
1297 + #if N_FDC > 1
1298 + set_dor(1 - fdc, ~8, 0);
1299 +diff --git a/drivers/char/tpm/Makefile b/drivers/char/tpm/Makefile
1300 +index 5a0d99d4fec0..9567e5197f74 100644
1301 +--- a/drivers/char/tpm/Makefile
1302 ++++ b/drivers/char/tpm/Makefile
1303 +@@ -21,9 +21,11 @@ tpm-$(CONFIG_EFI) += eventlog/efi.o
1304 + tpm-$(CONFIG_OF) += eventlog/of.o
1305 + obj-$(CONFIG_TCG_TIS_CORE) += tpm_tis_core.o
1306 + obj-$(CONFIG_TCG_TIS) += tpm_tis.o
1307 +-obj-$(CONFIG_TCG_TIS_SPI) += tpm_tis_spi_mod.o
1308 +-tpm_tis_spi_mod-y := tpm_tis_spi.o
1309 +-tpm_tis_spi_mod-$(CONFIG_TCG_TIS_SPI_CR50) += tpm_tis_spi_cr50.o
1310 ++
1311 ++obj-$(CONFIG_TCG_TIS_SPI) += tpm_tis_spi.o
1312 ++tpm_tis_spi-y := tpm_tis_spi_main.o
1313 ++tpm_tis_spi-$(CONFIG_TCG_TIS_SPI_CR50) += tpm_tis_spi_cr50.o
1314 ++
1315 + obj-$(CONFIG_TCG_TIS_I2C_ATMEL) += tpm_i2c_atmel.o
1316 + obj-$(CONFIG_TCG_TIS_I2C_INFINEON) += tpm_i2c_infineon.o
1317 + obj-$(CONFIG_TCG_TIS_I2C_NUVOTON) += tpm_i2c_nuvoton.o
1318 +diff --git a/drivers/char/tpm/tpm2-cmd.c b/drivers/char/tpm/tpm2-cmd.c
1319 +index 13696deceae8..760329598b99 100644
1320 +--- a/drivers/char/tpm/tpm2-cmd.c
1321 ++++ b/drivers/char/tpm/tpm2-cmd.c
1322 +@@ -525,6 +525,8 @@ static int tpm2_init_bank_info(struct tpm_chip *chip, u32 bank_index)
1323 + return 0;
1324 + }
1325 +
1326 ++ bank->crypto_id = HASH_ALGO__LAST;
1327 ++
1328 + return tpm2_pcr_read(chip, 0, &digest, &bank->digest_size);
1329 + }
1330 +
1331 +diff --git a/drivers/char/tpm/tpm_tis_spi.c b/drivers/char/tpm/tpm_tis_spi.c
1332 +deleted file mode 100644
1333 +index d1754fd6c573..000000000000
1334 +--- a/drivers/char/tpm/tpm_tis_spi.c
1335 ++++ /dev/null
1336 +@@ -1,298 +0,0 @@
1337 +-// SPDX-License-Identifier: GPL-2.0-only
1338 +-/*
1339 +- * Copyright (C) 2015 Infineon Technologies AG
1340 +- * Copyright (C) 2016 STMicroelectronics SAS
1341 +- *
1342 +- * Authors:
1343 +- * Peter Huewe <peter.huewe@××××××××.com>
1344 +- * Christophe Ricard <christophe-h.ricard@××.com>
1345 +- *
1346 +- * Maintained by: <tpmdd-devel@×××××××××××××××××.net>
1347 +- *
1348 +- * Device driver for TCG/TCPA TPM (trusted platform module).
1349 +- * Specifications at www.trustedcomputinggroup.org
1350 +- *
1351 +- * This device driver implements the TPM interface as defined in
1352 +- * the TCG TPM Interface Spec version 1.3, revision 27 via _raw/native
1353 +- * SPI access_.
1354 +- *
1355 +- * It is based on the original tpm_tis device driver from Leendert van
1356 +- * Dorn and Kyleen Hall and Jarko Sakkinnen.
1357 +- */
1358 +-
1359 +-#include <linux/acpi.h>
1360 +-#include <linux/completion.h>
1361 +-#include <linux/init.h>
1362 +-#include <linux/interrupt.h>
1363 +-#include <linux/kernel.h>
1364 +-#include <linux/module.h>
1365 +-#include <linux/slab.h>
1366 +-
1367 +-#include <linux/of_device.h>
1368 +-#include <linux/spi/spi.h>
1369 +-#include <linux/tpm.h>
1370 +-
1371 +-#include "tpm.h"
1372 +-#include "tpm_tis_core.h"
1373 +-#include "tpm_tis_spi.h"
1374 +-
1375 +-#define MAX_SPI_FRAMESIZE 64
1376 +-
1377 +-/*
1378 +- * TCG SPI flow control is documented in section 6.4 of the spec[1]. In short,
1379 +- * keep trying to read from the device until MISO goes high indicating the
1380 +- * wait state has ended.
1381 +- *
1382 +- * [1] https://trustedcomputinggroup.org/resource/pc-client-platform-tpm-profile-ptp-specification/
1383 +- */
1384 +-static int tpm_tis_spi_flow_control(struct tpm_tis_spi_phy *phy,
1385 +- struct spi_transfer *spi_xfer)
1386 +-{
1387 +- struct spi_message m;
1388 +- int ret, i;
1389 +-
1390 +- if ((phy->iobuf[3] & 0x01) == 0) {
1391 +- // handle SPI wait states
1392 +- phy->iobuf[0] = 0;
1393 +-
1394 +- for (i = 0; i < TPM_RETRY; i++) {
1395 +- spi_xfer->len = 1;
1396 +- spi_message_init(&m);
1397 +- spi_message_add_tail(spi_xfer, &m);
1398 +- ret = spi_sync_locked(phy->spi_device, &m);
1399 +- if (ret < 0)
1400 +- return ret;
1401 +- if (phy->iobuf[0] & 0x01)
1402 +- break;
1403 +- }
1404 +-
1405 +- if (i == TPM_RETRY)
1406 +- return -ETIMEDOUT;
1407 +- }
1408 +-
1409 +- return 0;
1410 +-}
1411 +-
1412 +-int tpm_tis_spi_transfer(struct tpm_tis_data *data, u32 addr, u16 len,
1413 +- u8 *in, const u8 *out)
1414 +-{
1415 +- struct tpm_tis_spi_phy *phy = to_tpm_tis_spi_phy(data);
1416 +- int ret = 0;
1417 +- struct spi_message m;
1418 +- struct spi_transfer spi_xfer;
1419 +- u8 transfer_len;
1420 +-
1421 +- spi_bus_lock(phy->spi_device->master);
1422 +-
1423 +- while (len) {
1424 +- transfer_len = min_t(u16, len, MAX_SPI_FRAMESIZE);
1425 +-
1426 +- phy->iobuf[0] = (in ? 0x80 : 0) | (transfer_len - 1);
1427 +- phy->iobuf[1] = 0xd4;
1428 +- phy->iobuf[2] = addr >> 8;
1429 +- phy->iobuf[3] = addr;
1430 +-
1431 +- memset(&spi_xfer, 0, sizeof(spi_xfer));
1432 +- spi_xfer.tx_buf = phy->iobuf;
1433 +- spi_xfer.rx_buf = phy->iobuf;
1434 +- spi_xfer.len = 4;
1435 +- spi_xfer.cs_change = 1;
1436 +-
1437 +- spi_message_init(&m);
1438 +- spi_message_add_tail(&spi_xfer, &m);
1439 +- ret = spi_sync_locked(phy->spi_device, &m);
1440 +- if (ret < 0)
1441 +- goto exit;
1442 +-
1443 +- ret = phy->flow_control(phy, &spi_xfer);
1444 +- if (ret < 0)
1445 +- goto exit;
1446 +-
1447 +- spi_xfer.cs_change = 0;
1448 +- spi_xfer.len = transfer_len;
1449 +- spi_xfer.delay_usecs = 5;
1450 +-
1451 +- if (in) {
1452 +- spi_xfer.tx_buf = NULL;
1453 +- } else if (out) {
1454 +- spi_xfer.rx_buf = NULL;
1455 +- memcpy(phy->iobuf, out, transfer_len);
1456 +- out += transfer_len;
1457 +- }
1458 +-
1459 +- spi_message_init(&m);
1460 +- spi_message_add_tail(&spi_xfer, &m);
1461 +- reinit_completion(&phy->ready);
1462 +- ret = spi_sync_locked(phy->spi_device, &m);
1463 +- if (ret < 0)
1464 +- goto exit;
1465 +-
1466 +- if (in) {
1467 +- memcpy(in, phy->iobuf, transfer_len);
1468 +- in += transfer_len;
1469 +- }
1470 +-
1471 +- len -= transfer_len;
1472 +- }
1473 +-
1474 +-exit:
1475 +- spi_bus_unlock(phy->spi_device->master);
1476 +- return ret;
1477 +-}
1478 +-
1479 +-static int tpm_tis_spi_read_bytes(struct tpm_tis_data *data, u32 addr,
1480 +- u16 len, u8 *result)
1481 +-{
1482 +- return tpm_tis_spi_transfer(data, addr, len, result, NULL);
1483 +-}
1484 +-
1485 +-static int tpm_tis_spi_write_bytes(struct tpm_tis_data *data, u32 addr,
1486 +- u16 len, const u8 *value)
1487 +-{
1488 +- return tpm_tis_spi_transfer(data, addr, len, NULL, value);
1489 +-}
1490 +-
1491 +-int tpm_tis_spi_read16(struct tpm_tis_data *data, u32 addr, u16 *result)
1492 +-{
1493 +- __le16 result_le;
1494 +- int rc;
1495 +-
1496 +- rc = data->phy_ops->read_bytes(data, addr, sizeof(u16),
1497 +- (u8 *)&result_le);
1498 +- if (!rc)
1499 +- *result = le16_to_cpu(result_le);
1500 +-
1501 +- return rc;
1502 +-}
1503 +-
1504 +-int tpm_tis_spi_read32(struct tpm_tis_data *data, u32 addr, u32 *result)
1505 +-{
1506 +- __le32 result_le;
1507 +- int rc;
1508 +-
1509 +- rc = data->phy_ops->read_bytes(data, addr, sizeof(u32),
1510 +- (u8 *)&result_le);
1511 +- if (!rc)
1512 +- *result = le32_to_cpu(result_le);
1513 +-
1514 +- return rc;
1515 +-}
1516 +-
1517 +-int tpm_tis_spi_write32(struct tpm_tis_data *data, u32 addr, u32 value)
1518 +-{
1519 +- __le32 value_le;
1520 +- int rc;
1521 +-
1522 +- value_le = cpu_to_le32(value);
1523 +- rc = data->phy_ops->write_bytes(data, addr, sizeof(u32),
1524 +- (u8 *)&value_le);
1525 +-
1526 +- return rc;
1527 +-}
1528 +-
1529 +-int tpm_tis_spi_init(struct spi_device *spi, struct tpm_tis_spi_phy *phy,
1530 +- int irq, const struct tpm_tis_phy_ops *phy_ops)
1531 +-{
1532 +- phy->iobuf = devm_kmalloc(&spi->dev, MAX_SPI_FRAMESIZE, GFP_KERNEL);
1533 +- if (!phy->iobuf)
1534 +- return -ENOMEM;
1535 +-
1536 +- phy->spi_device = spi;
1537 +-
1538 +- return tpm_tis_core_init(&spi->dev, &phy->priv, irq, phy_ops, NULL);
1539 +-}
1540 +-
1541 +-static const struct tpm_tis_phy_ops tpm_spi_phy_ops = {
1542 +- .read_bytes = tpm_tis_spi_read_bytes,
1543 +- .write_bytes = tpm_tis_spi_write_bytes,
1544 +- .read16 = tpm_tis_spi_read16,
1545 +- .read32 = tpm_tis_spi_read32,
1546 +- .write32 = tpm_tis_spi_write32,
1547 +-};
1548 +-
1549 +-static int tpm_tis_spi_probe(struct spi_device *dev)
1550 +-{
1551 +- struct tpm_tis_spi_phy *phy;
1552 +- int irq;
1553 +-
1554 +- phy = devm_kzalloc(&dev->dev, sizeof(struct tpm_tis_spi_phy),
1555 +- GFP_KERNEL);
1556 +- if (!phy)
1557 +- return -ENOMEM;
1558 +-
1559 +- phy->flow_control = tpm_tis_spi_flow_control;
1560 +-
1561 +- /* If the SPI device has an IRQ then use that */
1562 +- if (dev->irq > 0)
1563 +- irq = dev->irq;
1564 +- else
1565 +- irq = -1;
1566 +-
1567 +- init_completion(&phy->ready);
1568 +- return tpm_tis_spi_init(dev, phy, irq, &tpm_spi_phy_ops);
1569 +-}
1570 +-
1571 +-typedef int (*tpm_tis_spi_probe_func)(struct spi_device *);
1572 +-
1573 +-static int tpm_tis_spi_driver_probe(struct spi_device *spi)
1574 +-{
1575 +- const struct spi_device_id *spi_dev_id = spi_get_device_id(spi);
1576 +- tpm_tis_spi_probe_func probe_func;
1577 +-
1578 +- probe_func = of_device_get_match_data(&spi->dev);
1579 +- if (!probe_func && spi_dev_id)
1580 +- probe_func = (tpm_tis_spi_probe_func)spi_dev_id->driver_data;
1581 +- if (!probe_func)
1582 +- return -ENODEV;
1583 +-
1584 +- return probe_func(spi);
1585 +-}
1586 +-
1587 +-static SIMPLE_DEV_PM_OPS(tpm_tis_pm, tpm_pm_suspend, tpm_tis_spi_resume);
1588 +-
1589 +-static int tpm_tis_spi_remove(struct spi_device *dev)
1590 +-{
1591 +- struct tpm_chip *chip = spi_get_drvdata(dev);
1592 +-
1593 +- tpm_chip_unregister(chip);
1594 +- tpm_tis_remove(chip);
1595 +- return 0;
1596 +-}
1597 +-
1598 +-static const struct spi_device_id tpm_tis_spi_id[] = {
1599 +- { "tpm_tis_spi", (unsigned long)tpm_tis_spi_probe },
1600 +- { "cr50", (unsigned long)cr50_spi_probe },
1601 +- {}
1602 +-};
1603 +-MODULE_DEVICE_TABLE(spi, tpm_tis_spi_id);
1604 +-
1605 +-static const struct of_device_id of_tis_spi_match[] = {
1606 +- { .compatible = "st,st33htpm-spi", .data = tpm_tis_spi_probe },
1607 +- { .compatible = "infineon,slb9670", .data = tpm_tis_spi_probe },
1608 +- { .compatible = "tcg,tpm_tis-spi", .data = tpm_tis_spi_probe },
1609 +- { .compatible = "google,cr50", .data = cr50_spi_probe },
1610 +- {}
1611 +-};
1612 +-MODULE_DEVICE_TABLE(of, of_tis_spi_match);
1613 +-
1614 +-static const struct acpi_device_id acpi_tis_spi_match[] = {
1615 +- {"SMO0768", 0},
1616 +- {}
1617 +-};
1618 +-MODULE_DEVICE_TABLE(acpi, acpi_tis_spi_match);
1619 +-
1620 +-static struct spi_driver tpm_tis_spi_driver = {
1621 +- .driver = {
1622 +- .name = "tpm_tis_spi",
1623 +- .pm = &tpm_tis_pm,
1624 +- .of_match_table = of_match_ptr(of_tis_spi_match),
1625 +- .acpi_match_table = ACPI_PTR(acpi_tis_spi_match),
1626 +- },
1627 +- .probe = tpm_tis_spi_driver_probe,
1628 +- .remove = tpm_tis_spi_remove,
1629 +- .id_table = tpm_tis_spi_id,
1630 +-};
1631 +-module_spi_driver(tpm_tis_spi_driver);
1632 +-
1633 +-MODULE_DESCRIPTION("TPM Driver for native SPI access");
1634 +-MODULE_LICENSE("GPL");
1635 +diff --git a/drivers/char/tpm/tpm_tis_spi_main.c b/drivers/char/tpm/tpm_tis_spi_main.c
1636 +new file mode 100644
1637 +index 000000000000..d1754fd6c573
1638 +--- /dev/null
1639 ++++ b/drivers/char/tpm/tpm_tis_spi_main.c
1640 +@@ -0,0 +1,298 @@
1641 ++// SPDX-License-Identifier: GPL-2.0-only
1642 ++/*
1643 ++ * Copyright (C) 2015 Infineon Technologies AG
1644 ++ * Copyright (C) 2016 STMicroelectronics SAS
1645 ++ *
1646 ++ * Authors:
1647 ++ * Peter Huewe <peter.huewe@××××××××.com>
1648 ++ * Christophe Ricard <christophe-h.ricard@××.com>
1649 ++ *
1650 ++ * Maintained by: <tpmdd-devel@×××××××××××××××××.net>
1651 ++ *
1652 ++ * Device driver for TCG/TCPA TPM (trusted platform module).
1653 ++ * Specifications at www.trustedcomputinggroup.org
1654 ++ *
1655 ++ * This device driver implements the TPM interface as defined in
1656 ++ * the TCG TPM Interface Spec version 1.3, revision 27 via _raw/native
1657 ++ * SPI access_.
1658 ++ *
1659 ++ * It is based on the original tpm_tis device driver from Leendert van
1660 ++ * Dorn and Kyleen Hall and Jarko Sakkinnen.
1661 ++ */
1662 ++
1663 ++#include <linux/acpi.h>
1664 ++#include <linux/completion.h>
1665 ++#include <linux/init.h>
1666 ++#include <linux/interrupt.h>
1667 ++#include <linux/kernel.h>
1668 ++#include <linux/module.h>
1669 ++#include <linux/slab.h>
1670 ++
1671 ++#include <linux/of_device.h>
1672 ++#include <linux/spi/spi.h>
1673 ++#include <linux/tpm.h>
1674 ++
1675 ++#include "tpm.h"
1676 ++#include "tpm_tis_core.h"
1677 ++#include "tpm_tis_spi.h"
1678 ++
1679 ++#define MAX_SPI_FRAMESIZE 64
1680 ++
1681 ++/*
1682 ++ * TCG SPI flow control is documented in section 6.4 of the spec[1]. In short,
1683 ++ * keep trying to read from the device until MISO goes high indicating the
1684 ++ * wait state has ended.
1685 ++ *
1686 ++ * [1] https://trustedcomputinggroup.org/resource/pc-client-platform-tpm-profile-ptp-specification/
1687 ++ */
1688 ++static int tpm_tis_spi_flow_control(struct tpm_tis_spi_phy *phy,
1689 ++ struct spi_transfer *spi_xfer)
1690 ++{
1691 ++ struct spi_message m;
1692 ++ int ret, i;
1693 ++
1694 ++ if ((phy->iobuf[3] & 0x01) == 0) {
1695 ++ // handle SPI wait states
1696 ++ phy->iobuf[0] = 0;
1697 ++
1698 ++ for (i = 0; i < TPM_RETRY; i++) {
1699 ++ spi_xfer->len = 1;
1700 ++ spi_message_init(&m);
1701 ++ spi_message_add_tail(spi_xfer, &m);
1702 ++ ret = spi_sync_locked(phy->spi_device, &m);
1703 ++ if (ret < 0)
1704 ++ return ret;
1705 ++ if (phy->iobuf[0] & 0x01)
1706 ++ break;
1707 ++ }
1708 ++
1709 ++ if (i == TPM_RETRY)
1710 ++ return -ETIMEDOUT;
1711 ++ }
1712 ++
1713 ++ return 0;
1714 ++}
1715 ++
1716 ++int tpm_tis_spi_transfer(struct tpm_tis_data *data, u32 addr, u16 len,
1717 ++ u8 *in, const u8 *out)
1718 ++{
1719 ++ struct tpm_tis_spi_phy *phy = to_tpm_tis_spi_phy(data);
1720 ++ int ret = 0;
1721 ++ struct spi_message m;
1722 ++ struct spi_transfer spi_xfer;
1723 ++ u8 transfer_len;
1724 ++
1725 ++ spi_bus_lock(phy->spi_device->master);
1726 ++
1727 ++ while (len) {
1728 ++ transfer_len = min_t(u16, len, MAX_SPI_FRAMESIZE);
1729 ++
1730 ++ phy->iobuf[0] = (in ? 0x80 : 0) | (transfer_len - 1);
1731 ++ phy->iobuf[1] = 0xd4;
1732 ++ phy->iobuf[2] = addr >> 8;
1733 ++ phy->iobuf[3] = addr;
1734 ++
1735 ++ memset(&spi_xfer, 0, sizeof(spi_xfer));
1736 ++ spi_xfer.tx_buf = phy->iobuf;
1737 ++ spi_xfer.rx_buf = phy->iobuf;
1738 ++ spi_xfer.len = 4;
1739 ++ spi_xfer.cs_change = 1;
1740 ++
1741 ++ spi_message_init(&m);
1742 ++ spi_message_add_tail(&spi_xfer, &m);
1743 ++ ret = spi_sync_locked(phy->spi_device, &m);
1744 ++ if (ret < 0)
1745 ++ goto exit;
1746 ++
1747 ++ ret = phy->flow_control(phy, &spi_xfer);
1748 ++ if (ret < 0)
1749 ++ goto exit;
1750 ++
1751 ++ spi_xfer.cs_change = 0;
1752 ++ spi_xfer.len = transfer_len;
1753 ++ spi_xfer.delay_usecs = 5;
1754 ++
1755 ++ if (in) {
1756 ++ spi_xfer.tx_buf = NULL;
1757 ++ } else if (out) {
1758 ++ spi_xfer.rx_buf = NULL;
1759 ++ memcpy(phy->iobuf, out, transfer_len);
1760 ++ out += transfer_len;
1761 ++ }
1762 ++
1763 ++ spi_message_init(&m);
1764 ++ spi_message_add_tail(&spi_xfer, &m);
1765 ++ reinit_completion(&phy->ready);
1766 ++ ret = spi_sync_locked(phy->spi_device, &m);
1767 ++ if (ret < 0)
1768 ++ goto exit;
1769 ++
1770 ++ if (in) {
1771 ++ memcpy(in, phy->iobuf, transfer_len);
1772 ++ in += transfer_len;
1773 ++ }
1774 ++
1775 ++ len -= transfer_len;
1776 ++ }
1777 ++
1778 ++exit:
1779 ++ spi_bus_unlock(phy->spi_device->master);
1780 ++ return ret;
1781 ++}
1782 ++
1783 ++static int tpm_tis_spi_read_bytes(struct tpm_tis_data *data, u32 addr,
1784 ++ u16 len, u8 *result)
1785 ++{
1786 ++ return tpm_tis_spi_transfer(data, addr, len, result, NULL);
1787 ++}
1788 ++
1789 ++static int tpm_tis_spi_write_bytes(struct tpm_tis_data *data, u32 addr,
1790 ++ u16 len, const u8 *value)
1791 ++{
1792 ++ return tpm_tis_spi_transfer(data, addr, len, NULL, value);
1793 ++}
1794 ++
1795 ++int tpm_tis_spi_read16(struct tpm_tis_data *data, u32 addr, u16 *result)
1796 ++{
1797 ++ __le16 result_le;
1798 ++ int rc;
1799 ++
1800 ++ rc = data->phy_ops->read_bytes(data, addr, sizeof(u16),
1801 ++ (u8 *)&result_le);
1802 ++ if (!rc)
1803 ++ *result = le16_to_cpu(result_le);
1804 ++
1805 ++ return rc;
1806 ++}
1807 ++
1808 ++int tpm_tis_spi_read32(struct tpm_tis_data *data, u32 addr, u32 *result)
1809 ++{
1810 ++ __le32 result_le;
1811 ++ int rc;
1812 ++
1813 ++ rc = data->phy_ops->read_bytes(data, addr, sizeof(u32),
1814 ++ (u8 *)&result_le);
1815 ++ if (!rc)
1816 ++ *result = le32_to_cpu(result_le);
1817 ++
1818 ++ return rc;
1819 ++}
1820 ++
1821 ++int tpm_tis_spi_write32(struct tpm_tis_data *data, u32 addr, u32 value)
1822 ++{
1823 ++ __le32 value_le;
1824 ++ int rc;
1825 ++
1826 ++ value_le = cpu_to_le32(value);
1827 ++ rc = data->phy_ops->write_bytes(data, addr, sizeof(u32),
1828 ++ (u8 *)&value_le);
1829 ++
1830 ++ return rc;
1831 ++}
1832 ++
1833 ++int tpm_tis_spi_init(struct spi_device *spi, struct tpm_tis_spi_phy *phy,
1834 ++ int irq, const struct tpm_tis_phy_ops *phy_ops)
1835 ++{
1836 ++ phy->iobuf = devm_kmalloc(&spi->dev, MAX_SPI_FRAMESIZE, GFP_KERNEL);
1837 ++ if (!phy->iobuf)
1838 ++ return -ENOMEM;
1839 ++
1840 ++ phy->spi_device = spi;
1841 ++
1842 ++ return tpm_tis_core_init(&spi->dev, &phy->priv, irq, phy_ops, NULL);
1843 ++}
1844 ++
1845 ++static const struct tpm_tis_phy_ops tpm_spi_phy_ops = {
1846 ++ .read_bytes = tpm_tis_spi_read_bytes,
1847 ++ .write_bytes = tpm_tis_spi_write_bytes,
1848 ++ .read16 = tpm_tis_spi_read16,
1849 ++ .read32 = tpm_tis_spi_read32,
1850 ++ .write32 = tpm_tis_spi_write32,
1851 ++};
1852 ++
1853 ++static int tpm_tis_spi_probe(struct spi_device *dev)
1854 ++{
1855 ++ struct tpm_tis_spi_phy *phy;
1856 ++ int irq;
1857 ++
1858 ++ phy = devm_kzalloc(&dev->dev, sizeof(struct tpm_tis_spi_phy),
1859 ++ GFP_KERNEL);
1860 ++ if (!phy)
1861 ++ return -ENOMEM;
1862 ++
1863 ++ phy->flow_control = tpm_tis_spi_flow_control;
1864 ++
1865 ++ /* If the SPI device has an IRQ then use that */
1866 ++ if (dev->irq > 0)
1867 ++ irq = dev->irq;
1868 ++ else
1869 ++ irq = -1;
1870 ++
1871 ++ init_completion(&phy->ready);
1872 ++ return tpm_tis_spi_init(dev, phy, irq, &tpm_spi_phy_ops);
1873 ++}
1874 ++
1875 ++typedef int (*tpm_tis_spi_probe_func)(struct spi_device *);
1876 ++
1877 ++static int tpm_tis_spi_driver_probe(struct spi_device *spi)
1878 ++{
1879 ++ const struct spi_device_id *spi_dev_id = spi_get_device_id(spi);
1880 ++ tpm_tis_spi_probe_func probe_func;
1881 ++
1882 ++ probe_func = of_device_get_match_data(&spi->dev);
1883 ++ if (!probe_func && spi_dev_id)
1884 ++ probe_func = (tpm_tis_spi_probe_func)spi_dev_id->driver_data;
1885 ++ if (!probe_func)
1886 ++ return -ENODEV;
1887 ++
1888 ++ return probe_func(spi);
1889 ++}
1890 ++
1891 ++static SIMPLE_DEV_PM_OPS(tpm_tis_pm, tpm_pm_suspend, tpm_tis_spi_resume);
1892 ++
1893 ++static int tpm_tis_spi_remove(struct spi_device *dev)
1894 ++{
1895 ++ struct tpm_chip *chip = spi_get_drvdata(dev);
1896 ++
1897 ++ tpm_chip_unregister(chip);
1898 ++ tpm_tis_remove(chip);
1899 ++ return 0;
1900 ++}
1901 ++
1902 ++static const struct spi_device_id tpm_tis_spi_id[] = {
1903 ++ { "tpm_tis_spi", (unsigned long)tpm_tis_spi_probe },
1904 ++ { "cr50", (unsigned long)cr50_spi_probe },
1905 ++ {}
1906 ++};
1907 ++MODULE_DEVICE_TABLE(spi, tpm_tis_spi_id);
1908 ++
1909 ++static const struct of_device_id of_tis_spi_match[] = {
1910 ++ { .compatible = "st,st33htpm-spi", .data = tpm_tis_spi_probe },
1911 ++ { .compatible = "infineon,slb9670", .data = tpm_tis_spi_probe },
1912 ++ { .compatible = "tcg,tpm_tis-spi", .data = tpm_tis_spi_probe },
1913 ++ { .compatible = "google,cr50", .data = cr50_spi_probe },
1914 ++ {}
1915 ++};
1916 ++MODULE_DEVICE_TABLE(of, of_tis_spi_match);
1917 ++
1918 ++static const struct acpi_device_id acpi_tis_spi_match[] = {
1919 ++ {"SMO0768", 0},
1920 ++ {}
1921 ++};
1922 ++MODULE_DEVICE_TABLE(acpi, acpi_tis_spi_match);
1923 ++
1924 ++static struct spi_driver tpm_tis_spi_driver = {
1925 ++ .driver = {
1926 ++ .name = "tpm_tis_spi",
1927 ++ .pm = &tpm_tis_pm,
1928 ++ .of_match_table = of_match_ptr(of_tis_spi_match),
1929 ++ .acpi_match_table = ACPI_PTR(acpi_tis_spi_match),
1930 ++ },
1931 ++ .probe = tpm_tis_spi_driver_probe,
1932 ++ .remove = tpm_tis_spi_remove,
1933 ++ .id_table = tpm_tis_spi_id,
1934 ++};
1935 ++module_spi_driver(tpm_tis_spi_driver);
1936 ++
1937 ++MODULE_DESCRIPTION("TPM Driver for native SPI access");
1938 ++MODULE_LICENSE("GPL");
1939 +diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c
1940 +index 66f1b2ac5cde..c27e206a764c 100644
1941 +--- a/drivers/dma/imx-sdma.c
1942 ++++ b/drivers/dma/imx-sdma.c
1943 +@@ -760,8 +760,12 @@ static void sdma_start_desc(struct sdma_channel *sdmac)
1944 + return;
1945 + }
1946 + sdmac->desc = desc = to_sdma_desc(&vd->tx);
1947 +-
1948 +- list_del(&vd->node);
1949 ++ /*
1950 ++ * Do not delete the node in desc_issued list in cyclic mode, otherwise
1951 ++ * the desc allocated will never be freed in vchan_dma_desc_free_list
1952 ++ */
1953 ++ if (!(sdmac->flags & IMX_DMA_SG_LOOP))
1954 ++ list_del(&vd->node);
1955 +
1956 + sdma->channel_control[channel].base_bd_ptr = desc->bd_phys;
1957 + sdma->channel_control[channel].current_bd_ptr = desc->bd_phys;
1958 +@@ -1067,6 +1071,7 @@ static void sdma_channel_terminate_work(struct work_struct *work)
1959 +
1960 + spin_lock_irqsave(&sdmac->vc.lock, flags);
1961 + vchan_get_all_descriptors(&sdmac->vc, &head);
1962 ++ sdmac->desc = NULL;
1963 + spin_unlock_irqrestore(&sdmac->vc.lock, flags);
1964 + vchan_dma_desc_free_list(&sdmac->vc, &head);
1965 + sdmac->context_loaded = false;
1966 +@@ -1075,19 +1080,11 @@ static void sdma_channel_terminate_work(struct work_struct *work)
1967 + static int sdma_disable_channel_async(struct dma_chan *chan)
1968 + {
1969 + struct sdma_channel *sdmac = to_sdma_chan(chan);
1970 +- unsigned long flags;
1971 +-
1972 +- spin_lock_irqsave(&sdmac->vc.lock, flags);
1973 +
1974 + sdma_disable_channel(chan);
1975 +
1976 +- if (sdmac->desc) {
1977 +- vchan_terminate_vdesc(&sdmac->desc->vd);
1978 +- sdmac->desc = NULL;
1979 ++ if (sdmac->desc)
1980 + schedule_work(&sdmac->terminate_worker);
1981 +- }
1982 +-
1983 +- spin_unlock_irqrestore(&sdmac->vc.lock, flags);
1984 +
1985 + return 0;
1986 + }
1987 +diff --git a/drivers/fsi/Kconfig b/drivers/fsi/Kconfig
1988 +index 92ce6d85802c..4cc0e630ab79 100644
1989 +--- a/drivers/fsi/Kconfig
1990 ++++ b/drivers/fsi/Kconfig
1991 +@@ -55,6 +55,7 @@ config FSI_MASTER_AST_CF
1992 +
1993 + config FSI_MASTER_ASPEED
1994 + tristate "FSI ASPEED master"
1995 ++ depends on HAS_IOMEM
1996 + help
1997 + This option enables a FSI master that is present behind an OPB bridge
1998 + in the AST2600.
1999 +diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
2000 +index ba9e53a1abc3..d9b8e3298d78 100644
2001 +--- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
2002 ++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
2003 +@@ -3909,11 +3909,13 @@ static uint64_t gfx_v10_0_get_gpu_clock_counter(struct amdgpu_device *adev)
2004 + {
2005 + uint64_t clock;
2006 +
2007 ++ amdgpu_gfx_off_ctrl(adev, false);
2008 + mutex_lock(&adev->gfx.gpu_clock_mutex);
2009 + WREG32_SOC15(GC, 0, mmRLC_CAPTURE_GPU_CLOCK_COUNT, 1);
2010 + clock = (uint64_t)RREG32_SOC15(GC, 0, mmRLC_GPU_CLOCK_COUNT_LSB) |
2011 + ((uint64_t)RREG32_SOC15(GC, 0, mmRLC_GPU_CLOCK_COUNT_MSB) << 32ULL);
2012 + mutex_unlock(&adev->gfx.gpu_clock_mutex);
2013 ++ amdgpu_gfx_off_ctrl(adev, true);
2014 + return clock;
2015 + }
2016 +
2017 +diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
2018 +index 97105a5bb246..085b84322e92 100644
2019 +--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
2020 ++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
2021 +@@ -3852,6 +3852,7 @@ static uint64_t gfx_v9_0_get_gpu_clock_counter(struct amdgpu_device *adev)
2022 + {
2023 + uint64_t clock;
2024 +
2025 ++ amdgpu_gfx_off_ctrl(adev, false);
2026 + mutex_lock(&adev->gfx.gpu_clock_mutex);
2027 + if (adev->asic_type == CHIP_VEGA10 && amdgpu_sriov_runtime(adev)) {
2028 + uint32_t tmp, lsb, msb, i = 0;
2029 +@@ -3870,6 +3871,7 @@ static uint64_t gfx_v9_0_get_gpu_clock_counter(struct amdgpu_device *adev)
2030 + ((uint64_t)RREG32_SOC15(GC, 0, mmRLC_GPU_CLOCK_COUNT_MSB) << 32ULL);
2031 + }
2032 + mutex_unlock(&adev->gfx.gpu_clock_mutex);
2033 ++ amdgpu_gfx_off_ctrl(adev, true);
2034 + return clock;
2035 + }
2036 +
2037 +diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
2038 +index 04ea7cd69295..624e223175c2 100644
2039 +--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
2040 ++++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
2041 +@@ -270,7 +270,12 @@ static u32 soc15_get_config_memsize(struct amdgpu_device *adev)
2042 +
2043 + static u32 soc15_get_xclk(struct amdgpu_device *adev)
2044 + {
2045 +- return adev->clock.spll.reference_freq;
2046 ++ u32 reference_clock = adev->clock.spll.reference_freq;
2047 ++
2048 ++ if (adev->asic_type == CHIP_RAVEN)
2049 ++ return reference_clock / 4;
2050 ++
2051 ++ return reference_clock;
2052 + }
2053 +
2054 +
2055 +diff --git a/drivers/gpu/drm/bridge/tc358767.c b/drivers/gpu/drm/bridge/tc358767.c
2056 +index 8029478ffebb..b0b0ccbb059d 100644
2057 +--- a/drivers/gpu/drm/bridge/tc358767.c
2058 ++++ b/drivers/gpu/drm/bridge/tc358767.c
2059 +@@ -297,7 +297,7 @@ static inline int tc_poll_timeout(struct tc_data *tc, unsigned int addr,
2060 +
2061 + static int tc_aux_wait_busy(struct tc_data *tc)
2062 + {
2063 +- return tc_poll_timeout(tc, DP0_AUXSTATUS, AUX_BUSY, 0, 1000, 100000);
2064 ++ return tc_poll_timeout(tc, DP0_AUXSTATUS, AUX_BUSY, 0, 100, 100000);
2065 + }
2066 +
2067 + static int tc_aux_write_data(struct tc_data *tc, const void *data,
2068 +@@ -640,7 +640,7 @@ static int tc_aux_link_setup(struct tc_data *tc)
2069 + if (ret)
2070 + goto err;
2071 +
2072 +- ret = tc_poll_timeout(tc, DP_PHY_CTRL, PHY_RDY, PHY_RDY, 1, 1000);
2073 ++ ret = tc_poll_timeout(tc, DP_PHY_CTRL, PHY_RDY, PHY_RDY, 100, 100000);
2074 + if (ret == -ETIMEDOUT) {
2075 + dev_err(tc->dev, "Timeout waiting for PHY to become ready");
2076 + return ret;
2077 +@@ -876,7 +876,7 @@ static int tc_wait_link_training(struct tc_data *tc)
2078 + int ret;
2079 +
2080 + ret = tc_poll_timeout(tc, DP0_LTSTAT, LT_LOOPDONE,
2081 +- LT_LOOPDONE, 1, 1000);
2082 ++ LT_LOOPDONE, 500, 100000);
2083 + if (ret) {
2084 + dev_err(tc->dev, "Link training timeout waiting for LT_LOOPDONE!\n");
2085 + return ret;
2086 +@@ -949,7 +949,7 @@ static int tc_main_link_enable(struct tc_data *tc)
2087 + dp_phy_ctrl &= ~(DP_PHY_RST | PHY_M1_RST | PHY_M0_RST);
2088 + ret = regmap_write(tc->regmap, DP_PHY_CTRL, dp_phy_ctrl);
2089 +
2090 +- ret = tc_poll_timeout(tc, DP_PHY_CTRL, PHY_RDY, PHY_RDY, 1, 1000);
2091 ++ ret = tc_poll_timeout(tc, DP_PHY_CTRL, PHY_RDY, PHY_RDY, 500, 100000);
2092 + if (ret) {
2093 + dev_err(dev, "timeout waiting for phy become ready");
2094 + return ret;
2095 +diff --git a/drivers/gpu/drm/i915/Kconfig b/drivers/gpu/drm/i915/Kconfig
2096 +index ba9595960bbe..907c4471f591 100644
2097 +--- a/drivers/gpu/drm/i915/Kconfig
2098 ++++ b/drivers/gpu/drm/i915/Kconfig
2099 +@@ -75,9 +75,8 @@ config DRM_I915_CAPTURE_ERROR
2100 + help
2101 + This option enables capturing the GPU state when a hang is detected.
2102 + This information is vital for triaging hangs and assists in debugging.
2103 +- Please report any hang to
2104 +- https://bugs.freedesktop.org/enter_bug.cgi?product=DRI
2105 +- for triaging.
2106 ++ Please report any hang for triaging according to:
2107 ++ https://gitlab.freedesktop.org/drm/intel/-/wikis/How-to-file-i915-bugs
2108 +
2109 + If in doubt, say "Y".
2110 +
2111 +diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c
2112 +index 2a27fb5d7dc6..1488822398fe 100644
2113 +--- a/drivers/gpu/drm/i915/display/intel_ddi.c
2114 ++++ b/drivers/gpu/drm/i915/display/intel_ddi.c
2115 +@@ -4227,7 +4227,9 @@ static bool intel_ddi_is_audio_enabled(struct drm_i915_private *dev_priv,
2116 + void intel_ddi_compute_min_voltage_level(struct drm_i915_private *dev_priv,
2117 + struct intel_crtc_state *crtc_state)
2118 + {
2119 +- if (INTEL_GEN(dev_priv) >= 11 && crtc_state->port_clock > 594000)
2120 ++ if (IS_ELKHARTLAKE(dev_priv) && crtc_state->port_clock > 594000)
2121 ++ crtc_state->min_voltage_level = 3;
2122 ++ else if (INTEL_GEN(dev_priv) >= 11 && crtc_state->port_clock > 594000)
2123 + crtc_state->min_voltage_level = 1;
2124 + else if (IS_CANNONLAKE(dev_priv) && crtc_state->port_clock > 594000)
2125 + crtc_state->min_voltage_level = 2;
2126 +diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c
2127 +index 301897791627..b670239a293b 100644
2128 +--- a/drivers/gpu/drm/i915/display/intel_display.c
2129 ++++ b/drivers/gpu/drm/i915/display/intel_display.c
2130 +@@ -10731,7 +10731,7 @@ static u32 intel_cursor_base(const struct intel_plane_state *plane_state)
2131 + u32 base;
2132 +
2133 + if (INTEL_INFO(dev_priv)->display.cursor_needs_physical)
2134 +- base = obj->phys_handle->busaddr;
2135 ++ base = sg_dma_address(obj->mm.pages->sgl);
2136 + else
2137 + base = intel_plane_ggtt_offset(plane_state);
2138 +
2139 +diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.c b/drivers/gpu/drm/i915/gem/i915_gem_context.c
2140 +index 42385277c684..f3d608df1c4d 100644
2141 +--- a/drivers/gpu/drm/i915/gem/i915_gem_context.c
2142 ++++ b/drivers/gpu/drm/i915/gem/i915_gem_context.c
2143 +@@ -484,6 +484,22 @@ static int __context_set_persistence(struct i915_gem_context *ctx, bool state)
2144 + if (!(ctx->i915->caps.scheduler & I915_SCHEDULER_CAP_PREEMPTION))
2145 + return -ENODEV;
2146 +
2147 ++ /*
2148 ++ * If the cancel fails, we then need to reset, cleanly!
2149 ++ *
2150 ++ * If the per-engine reset fails, all hope is lost! We resort
2151 ++ * to a full GPU reset in that unlikely case, but realistically
2152 ++ * if the engine could not reset, the full reset does not fare
2153 ++ * much better. The damage has been done.
2154 ++ *
2155 ++ * However, if we cannot reset an engine by itself, we cannot
2156 ++ * cleanup a hanging persistent context without causing
2157 ++ * colateral damage, and we should not pretend we can by
2158 ++ * exposing the interface.
2159 ++ */
2160 ++ if (!intel_has_reset_engine(&ctx->i915->gt))
2161 ++ return -ENODEV;
2162 ++
2163 + i915_gem_context_clear_persistence(ctx);
2164 + }
2165 +
2166 +diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
2167 +index e3f3944fbd90..1078a76d6d84 100644
2168 +--- a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
2169 ++++ b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
2170 +@@ -260,9 +260,6 @@ struct drm_i915_gem_object {
2171 +
2172 + void *gvt_info;
2173 + };
2174 +-
2175 +- /** for phys allocated objects */
2176 +- struct drm_dma_handle *phys_handle;
2177 + };
2178 +
2179 + static inline struct drm_i915_gem_object *
2180 +diff --git a/drivers/gpu/drm/i915/gem/i915_gem_phys.c b/drivers/gpu/drm/i915/gem/i915_gem_phys.c
2181 +index 8043ff63d73f..5e2e0109c9ba 100644
2182 +--- a/drivers/gpu/drm/i915/gem/i915_gem_phys.c
2183 ++++ b/drivers/gpu/drm/i915/gem/i915_gem_phys.c
2184 +@@ -22,88 +22,87 @@
2185 + static int i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
2186 + {
2187 + struct address_space *mapping = obj->base.filp->f_mapping;
2188 +- struct drm_dma_handle *phys;
2189 +- struct sg_table *st;
2190 + struct scatterlist *sg;
2191 +- char *vaddr;
2192 ++ struct sg_table *st;
2193 ++ dma_addr_t dma;
2194 ++ void *vaddr;
2195 ++ void *dst;
2196 + int i;
2197 +- int err;
2198 +
2199 + if (WARN_ON(i915_gem_object_needs_bit17_swizzle(obj)))
2200 + return -EINVAL;
2201 +
2202 +- /* Always aligning to the object size, allows a single allocation
2203 ++ /*
2204 ++ * Always aligning to the object size, allows a single allocation
2205 + * to handle all possible callers, and given typical object sizes,
2206 + * the alignment of the buddy allocation will naturally match.
2207 + */
2208 +- phys = drm_pci_alloc(obj->base.dev,
2209 +- roundup_pow_of_two(obj->base.size),
2210 +- roundup_pow_of_two(obj->base.size));
2211 +- if (!phys)
2212 ++ vaddr = dma_alloc_coherent(&obj->base.dev->pdev->dev,
2213 ++ roundup_pow_of_two(obj->base.size),
2214 ++ &dma, GFP_KERNEL);
2215 ++ if (!vaddr)
2216 + return -ENOMEM;
2217 +
2218 +- vaddr = phys->vaddr;
2219 ++ st = kmalloc(sizeof(*st), GFP_KERNEL);
2220 ++ if (!st)
2221 ++ goto err_pci;
2222 ++
2223 ++ if (sg_alloc_table(st, 1, GFP_KERNEL))
2224 ++ goto err_st;
2225 ++
2226 ++ sg = st->sgl;
2227 ++ sg->offset = 0;
2228 ++ sg->length = obj->base.size;
2229 ++
2230 ++ sg_assign_page(sg, (struct page *)vaddr);
2231 ++ sg_dma_address(sg) = dma;
2232 ++ sg_dma_len(sg) = obj->base.size;
2233 ++
2234 ++ dst = vaddr;
2235 + for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
2236 + struct page *page;
2237 +- char *src;
2238 ++ void *src;
2239 +
2240 + page = shmem_read_mapping_page(mapping, i);
2241 +- if (IS_ERR(page)) {
2242 +- err = PTR_ERR(page);
2243 +- goto err_phys;
2244 +- }
2245 ++ if (IS_ERR(page))
2246 ++ goto err_st;
2247 +
2248 + src = kmap_atomic(page);
2249 +- memcpy(vaddr, src, PAGE_SIZE);
2250 +- drm_clflush_virt_range(vaddr, PAGE_SIZE);
2251 ++ memcpy(dst, src, PAGE_SIZE);
2252 ++ drm_clflush_virt_range(dst, PAGE_SIZE);
2253 + kunmap_atomic(src);
2254 +
2255 + put_page(page);
2256 +- vaddr += PAGE_SIZE;
2257 ++ dst += PAGE_SIZE;
2258 + }
2259 +
2260 + intel_gt_chipset_flush(&to_i915(obj->base.dev)->gt);
2261 +
2262 +- st = kmalloc(sizeof(*st), GFP_KERNEL);
2263 +- if (!st) {
2264 +- err = -ENOMEM;
2265 +- goto err_phys;
2266 +- }
2267 +-
2268 +- if (sg_alloc_table(st, 1, GFP_KERNEL)) {
2269 +- kfree(st);
2270 +- err = -ENOMEM;
2271 +- goto err_phys;
2272 +- }
2273 +-
2274 +- sg = st->sgl;
2275 +- sg->offset = 0;
2276 +- sg->length = obj->base.size;
2277 +-
2278 +- sg_dma_address(sg) = phys->busaddr;
2279 +- sg_dma_len(sg) = obj->base.size;
2280 +-
2281 +- obj->phys_handle = phys;
2282 +-
2283 + __i915_gem_object_set_pages(obj, st, sg->length);
2284 +
2285 + return 0;
2286 +
2287 +-err_phys:
2288 +- drm_pci_free(obj->base.dev, phys);
2289 +-
2290 +- return err;
2291 ++err_st:
2292 ++ kfree(st);
2293 ++err_pci:
2294 ++ dma_free_coherent(&obj->base.dev->pdev->dev,
2295 ++ roundup_pow_of_two(obj->base.size),
2296 ++ vaddr, dma);
2297 ++ return -ENOMEM;
2298 + }
2299 +
2300 + static void
2301 + i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj,
2302 + struct sg_table *pages)
2303 + {
2304 ++ dma_addr_t dma = sg_dma_address(pages->sgl);
2305 ++ void *vaddr = sg_page(pages->sgl);
2306 ++
2307 + __i915_gem_object_release_shmem(obj, pages, false);
2308 +
2309 + if (obj->mm.dirty) {
2310 + struct address_space *mapping = obj->base.filp->f_mapping;
2311 +- char *vaddr = obj->phys_handle->vaddr;
2312 ++ void *src = vaddr;
2313 + int i;
2314 +
2315 + for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
2316 +@@ -115,15 +114,16 @@ i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj,
2317 + continue;
2318 +
2319 + dst = kmap_atomic(page);
2320 +- drm_clflush_virt_range(vaddr, PAGE_SIZE);
2321 +- memcpy(dst, vaddr, PAGE_SIZE);
2322 ++ drm_clflush_virt_range(src, PAGE_SIZE);
2323 ++ memcpy(dst, src, PAGE_SIZE);
2324 + kunmap_atomic(dst);
2325 +
2326 + set_page_dirty(page);
2327 + if (obj->mm.madv == I915_MADV_WILLNEED)
2328 + mark_page_accessed(page);
2329 + put_page(page);
2330 +- vaddr += PAGE_SIZE;
2331 ++
2332 ++ src += PAGE_SIZE;
2333 + }
2334 + obj->mm.dirty = false;
2335 + }
2336 +@@ -131,7 +131,9 @@ i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj,
2337 + sg_free_table(pages);
2338 + kfree(pages);
2339 +
2340 +- drm_pci_free(obj->base.dev, obj->phys_handle);
2341 ++ dma_free_coherent(&obj->base.dev->pdev->dev,
2342 ++ roundup_pow_of_two(obj->base.size),
2343 ++ vaddr, dma);
2344 + }
2345 +
2346 + static void phys_release(struct drm_i915_gem_object *obj)
2347 +diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c
2348 +index d925a1035c9d..0d80472c0f29 100644
2349 +--- a/drivers/gpu/drm/i915/gt/intel_lrc.c
2350 ++++ b/drivers/gpu/drm/i915/gt/intel_lrc.c
2351 +@@ -1157,7 +1157,7 @@ static u64 execlists_update_context(struct i915_request *rq)
2352 + {
2353 + struct intel_context *ce = rq->hw_context;
2354 + u64 desc = ce->lrc_desc;
2355 +- u32 tail;
2356 ++ u32 tail, prev;
2357 +
2358 + /*
2359 + * WaIdleLiteRestore:bdw,skl
2360 +@@ -1170,9 +1170,15 @@ static u64 execlists_update_context(struct i915_request *rq)
2361 + * subsequent resubmissions (for lite restore). Should that fail us,
2362 + * and we try and submit the same tail again, force the context
2363 + * reload.
2364 ++ *
2365 ++ * If we need to return to a preempted context, we need to skip the
2366 ++ * lite-restore and force it to reload the RING_TAIL. Otherwise, the
2367 ++ * HW has a tendency to ignore us rewinding the TAIL to the end of
2368 ++ * an earlier request.
2369 + */
2370 + tail = intel_ring_set_tail(rq->ring, rq->tail);
2371 +- if (unlikely(ce->lrc_reg_state[CTX_RING_TAIL] == tail))
2372 ++ prev = ce->lrc_reg_state[CTX_RING_TAIL];
2373 ++ if (unlikely(intel_ring_direction(rq->ring, tail, prev) <= 0))
2374 + desc |= CTX_DESC_FORCE_RESTORE;
2375 + ce->lrc_reg_state[CTX_RING_TAIL] = tail;
2376 + rq->tail = rq->wa_tail;
2377 +@@ -1427,6 +1433,11 @@ last_active(const struct intel_engine_execlists *execlists)
2378 + return *last;
2379 + }
2380 +
2381 ++#define for_each_waiter(p__, rq__) \
2382 ++ list_for_each_entry_lockless(p__, \
2383 ++ &(rq__)->sched.waiters_list, \
2384 ++ wait_link)
2385 ++
2386 + static void defer_request(struct i915_request *rq, struct list_head * const pl)
2387 + {
2388 + LIST_HEAD(list);
2389 +@@ -1444,7 +1455,7 @@ static void defer_request(struct i915_request *rq, struct list_head * const pl)
2390 + GEM_BUG_ON(i915_request_is_active(rq));
2391 + list_move_tail(&rq->sched.link, pl);
2392 +
2393 +- list_for_each_entry(p, &rq->sched.waiters_list, wait_link) {
2394 ++ for_each_waiter(p, rq) {
2395 + struct i915_request *w =
2396 + container_of(p->waiter, typeof(*w), sched);
2397 +
2398 +@@ -1651,14 +1662,6 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
2399 + */
2400 + __unwind_incomplete_requests(engine);
2401 +
2402 +- /*
2403 +- * If we need to return to the preempted context, we
2404 +- * need to skip the lite-restore and force it to
2405 +- * reload the RING_TAIL. Otherwise, the HW has a
2406 +- * tendency to ignore us rewinding the TAIL to the
2407 +- * end of an earlier request.
2408 +- */
2409 +- last->hw_context->lrc_desc |= CTX_DESC_FORCE_RESTORE;
2410 + last = NULL;
2411 + } else if (need_timeslice(engine, last) &&
2412 + timer_expired(&engine->execlists.timer)) {
2413 +diff --git a/drivers/gpu/drm/i915/gt/intel_ring.c b/drivers/gpu/drm/i915/gt/intel_ring.c
2414 +index 374b28f13ca0..6ff803f397c4 100644
2415 +--- a/drivers/gpu/drm/i915/gt/intel_ring.c
2416 ++++ b/drivers/gpu/drm/i915/gt/intel_ring.c
2417 +@@ -145,6 +145,7 @@ intel_engine_create_ring(struct intel_engine_cs *engine, int size)
2418 +
2419 + kref_init(&ring->ref);
2420 + ring->size = size;
2421 ++ ring->wrap = BITS_PER_TYPE(ring->size) - ilog2(size);
2422 +
2423 + /*
2424 + * Workaround an erratum on the i830 which causes a hang if
2425 +diff --git a/drivers/gpu/drm/i915/gt/intel_ring.h b/drivers/gpu/drm/i915/gt/intel_ring.h
2426 +index ea2839d9e044..5bdce24994aa 100644
2427 +--- a/drivers/gpu/drm/i915/gt/intel_ring.h
2428 ++++ b/drivers/gpu/drm/i915/gt/intel_ring.h
2429 +@@ -56,6 +56,14 @@ static inline u32 intel_ring_wrap(const struct intel_ring *ring, u32 pos)
2430 + return pos & (ring->size - 1);
2431 + }
2432 +
2433 ++static inline int intel_ring_direction(const struct intel_ring *ring,
2434 ++ u32 next, u32 prev)
2435 ++{
2436 ++ typecheck(typeof(ring->size), next);
2437 ++ typecheck(typeof(ring->size), prev);
2438 ++ return (next - prev) << ring->wrap;
2439 ++}
2440 ++
2441 + static inline bool
2442 + intel_ring_offset_valid(const struct intel_ring *ring,
2443 + unsigned int pos)
2444 +diff --git a/drivers/gpu/drm/i915/gt/intel_ring_types.h b/drivers/gpu/drm/i915/gt/intel_ring_types.h
2445 +index d9f17f38e0cc..3cd7fec7fd8d 100644
2446 +--- a/drivers/gpu/drm/i915/gt/intel_ring_types.h
2447 ++++ b/drivers/gpu/drm/i915/gt/intel_ring_types.h
2448 +@@ -45,6 +45,7 @@ struct intel_ring {
2449 +
2450 + u32 space;
2451 + u32 size;
2452 ++ u32 wrap;
2453 + u32 effective_size;
2454 + };
2455 +
2456 +diff --git a/drivers/gpu/drm/i915/gt/mock_engine.c b/drivers/gpu/drm/i915/gt/mock_engine.c
2457 +index 83f549d203a0..a635cf832d69 100644
2458 +--- a/drivers/gpu/drm/i915/gt/mock_engine.c
2459 ++++ b/drivers/gpu/drm/i915/gt/mock_engine.c
2460 +@@ -59,11 +59,26 @@ static struct intel_ring *mock_ring(struct intel_engine_cs *engine)
2461 + ring->vaddr = (void *)(ring + 1);
2462 + atomic_set(&ring->pin_count, 1);
2463 +
2464 ++ ring->vma = i915_vma_alloc();
2465 ++ if (!ring->vma) {
2466 ++ kfree(ring);
2467 ++ return NULL;
2468 ++ }
2469 ++ i915_active_init(&ring->vma->active, NULL, NULL);
2470 ++
2471 + intel_ring_update_space(ring);
2472 +
2473 + return ring;
2474 + }
2475 +
2476 ++static void mock_ring_free(struct intel_ring *ring)
2477 ++{
2478 ++ i915_active_fini(&ring->vma->active);
2479 ++ i915_vma_free(ring->vma);
2480 ++
2481 ++ kfree(ring);
2482 ++}
2483 ++
2484 + static struct i915_request *first_request(struct mock_engine *engine)
2485 + {
2486 + return list_first_entry_or_null(&engine->hw_queue,
2487 +@@ -121,7 +136,7 @@ static void mock_context_destroy(struct kref *ref)
2488 + GEM_BUG_ON(intel_context_is_pinned(ce));
2489 +
2490 + if (test_bit(CONTEXT_ALLOC_BIT, &ce->flags)) {
2491 +- kfree(ce->ring);
2492 ++ mock_ring_free(ce->ring);
2493 + mock_timeline_unpin(ce->timeline);
2494 + }
2495 +
2496 +diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c
2497 +index 4b04af569c05..7dc7bb850d0a 100644
2498 +--- a/drivers/gpu/drm/i915/gvt/gtt.c
2499 ++++ b/drivers/gpu/drm/i915/gvt/gtt.c
2500 +@@ -1956,7 +1956,11 @@ void _intel_vgpu_mm_release(struct kref *mm_ref)
2501 +
2502 + if (mm->type == INTEL_GVT_MM_PPGTT) {
2503 + list_del(&mm->ppgtt_mm.list);
2504 ++
2505 ++ mutex_lock(&mm->vgpu->gvt->gtt.ppgtt_mm_lock);
2506 + list_del(&mm->ppgtt_mm.lru_list);
2507 ++ mutex_unlock(&mm->vgpu->gvt->gtt.ppgtt_mm_lock);
2508 ++
2509 + invalidate_ppgtt_mm(mm);
2510 + } else {
2511 + vfree(mm->ggtt_mm.virtual_ggtt);
2512 +diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
2513 +index 905890e3ac24..3f07948ea4da 100644
2514 +--- a/drivers/gpu/drm/i915/i915_gem.c
2515 ++++ b/drivers/gpu/drm/i915/i915_gem.c
2516 +@@ -154,7 +154,7 @@ i915_gem_phys_pwrite(struct drm_i915_gem_object *obj,
2517 + struct drm_i915_gem_pwrite *args,
2518 + struct drm_file *file)
2519 + {
2520 +- void *vaddr = obj->phys_handle->vaddr + args->offset;
2521 ++ void *vaddr = sg_page(obj->mm.pages->sgl) + args->offset;
2522 + char __user *user_data = u64_to_user_ptr(args->data_ptr);
2523 +
2524 + /*
2525 +@@ -800,10 +800,10 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
2526 + ret = i915_gem_gtt_pwrite_fast(obj, args);
2527 +
2528 + if (ret == -EFAULT || ret == -ENOSPC) {
2529 +- if (obj->phys_handle)
2530 +- ret = i915_gem_phys_pwrite(obj, args, file);
2531 +- else
2532 ++ if (i915_gem_object_has_struct_page(obj))
2533 + ret = i915_gem_shmem_pwrite(obj, args);
2534 ++ else
2535 ++ ret = i915_gem_phys_pwrite(obj, args, file);
2536 + }
2537 +
2538 + i915_gem_object_unpin_pages(obj);
2539 +diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
2540 +index 3c85cb0ee99f..354845800085 100644
2541 +--- a/drivers/gpu/drm/i915/i915_gpu_error.c
2542 ++++ b/drivers/gpu/drm/i915/i915_gpu_error.c
2543 +@@ -1820,7 +1820,8 @@ void i915_capture_error_state(struct drm_i915_private *i915,
2544 + if (!xchg(&warned, true) &&
2545 + ktime_get_real_seconds() - DRIVER_TIMESTAMP < DAY_AS_SECONDS(180)) {
2546 + pr_info("GPU hangs can indicate a bug anywhere in the entire gfx stack, including userspace.\n");
2547 +- pr_info("Please file a _new_ bug report on bugs.freedesktop.org against DRI -> DRM/Intel\n");
2548 ++ pr_info("Please file a _new_ bug report at https://gitlab.freedesktop.org/drm/intel/issues/new.\n");
2549 ++ pr_info("Please see https://gitlab.freedesktop.org/drm/intel/-/wikis/How-to-file-i915-bugs for details.\n");
2550 + pr_info("drm/i915 developers can then reassign to the right component if it's not a kernel issue.\n");
2551 + pr_info("The GPU crash dump is required to analyze GPU hangs, so please always attach it.\n");
2552 + pr_info("GPU crash dump saved to /sys/class/drm/card%d/error\n",
2553 +diff --git a/drivers/gpu/drm/i915/i915_scheduler.c b/drivers/gpu/drm/i915/i915_scheduler.c
2554 +index 247a9671bca5..e954fa6109c5 100644
2555 +--- a/drivers/gpu/drm/i915/i915_scheduler.c
2556 ++++ b/drivers/gpu/drm/i915/i915_scheduler.c
2557 +@@ -415,8 +415,6 @@ bool __i915_sched_node_add_dependency(struct i915_sched_node *node,
2558 +
2559 + if (!node_signaled(signal)) {
2560 + INIT_LIST_HEAD(&dep->dfs_link);
2561 +- list_add(&dep->wait_link, &signal->waiters_list);
2562 +- list_add(&dep->signal_link, &node->signalers_list);
2563 + dep->signaler = signal;
2564 + dep->waiter = node;
2565 + dep->flags = flags;
2566 +@@ -426,6 +424,10 @@ bool __i915_sched_node_add_dependency(struct i915_sched_node *node,
2567 + !node_started(signal))
2568 + node->flags |= I915_SCHED_HAS_SEMAPHORE_CHAIN;
2569 +
2570 ++ /* All set, now publish. Beware the lockless walkers. */
2571 ++ list_add(&dep->signal_link, &node->signalers_list);
2572 ++ list_add_rcu(&dep->wait_link, &signal->waiters_list);
2573 ++
2574 + /*
2575 + * As we do not allow WAIT to preempt inflight requests,
2576 + * once we have executed a request, along with triggering
2577 +diff --git a/drivers/gpu/drm/i915/i915_utils.c b/drivers/gpu/drm/i915/i915_utils.c
2578 +index 0348c6d0ef5f..412135a07d5d 100644
2579 +--- a/drivers/gpu/drm/i915/i915_utils.c
2580 ++++ b/drivers/gpu/drm/i915/i915_utils.c
2581 +@@ -8,9 +8,8 @@
2582 + #include "i915_drv.h"
2583 + #include "i915_utils.h"
2584 +
2585 +-#define FDO_BUG_URL "https://bugs.freedesktop.org/enter_bug.cgi?product=DRI"
2586 +-#define FDO_BUG_MSG "Please file a bug at " FDO_BUG_URL " against DRM/Intel " \
2587 +- "providing the dmesg log by booting with drm.debug=0xf"
2588 ++#define FDO_BUG_URL "https://gitlab.freedesktop.org/drm/intel/-/wikis/How-to-file-i915-bugs"
2589 ++#define FDO_BUG_MSG "Please file a bug on drm/i915; see " FDO_BUG_URL " for details."
2590 +
2591 + void
2592 + __i915_printk(struct drm_i915_private *dev_priv, const char *level,
2593 +diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c
2594 +index 24ab6249083a..6f420cc73dbd 100644
2595 +--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c
2596 ++++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c
2597 +@@ -255,13 +255,13 @@ static const struct dpu_format dpu_format_map[] = {
2598 +
2599 + INTERLEAVED_RGB_FMT(RGB565,
2600 + 0, COLOR_5BIT, COLOR_6BIT, COLOR_5BIT,
2601 +- C2_R_Cr, C0_G_Y, C1_B_Cb, 0, 3,
2602 ++ C1_B_Cb, C0_G_Y, C2_R_Cr, 0, 3,
2603 + false, 2, 0,
2604 + DPU_FETCH_LINEAR, 1),
2605 +
2606 + INTERLEAVED_RGB_FMT(BGR565,
2607 + 0, COLOR_5BIT, COLOR_6BIT, COLOR_5BIT,
2608 +- C1_B_Cb, C0_G_Y, C2_R_Cr, 0, 3,
2609 ++ C2_R_Cr, C0_G_Y, C1_B_Cb, 0, 3,
2610 + false, 2, 0,
2611 + DPU_FETCH_LINEAR, 1),
2612 +
2613 +diff --git a/drivers/gpu/drm/nouveau/dispnv50/wndw.c b/drivers/gpu/drm/nouveau/dispnv50/wndw.c
2614 +index 5193b6257061..b856e87574fd 100644
2615 +--- a/drivers/gpu/drm/nouveau/dispnv50/wndw.c
2616 ++++ b/drivers/gpu/drm/nouveau/dispnv50/wndw.c
2617 +@@ -451,6 +451,8 @@ nv50_wndw_atomic_check(struct drm_plane *plane, struct drm_plane_state *state)
2618 + asyw->clr.ntfy = armw->ntfy.handle != 0;
2619 + asyw->clr.sema = armw->sema.handle != 0;
2620 + asyw->clr.xlut = armw->xlut.handle != 0;
2621 ++ if (asyw->clr.xlut && asyw->visible)
2622 ++ asyw->set.xlut = asyw->xlut.handle != 0;
2623 + asyw->clr.csc = armw->csc.valid;
2624 + if (wndw->func->image_clr)
2625 + asyw->clr.image = armw->image.handle[0] != 0;
2626 +diff --git a/drivers/gpu/drm/panfrost/panfrost_mmu.c b/drivers/gpu/drm/panfrost/panfrost_mmu.c
2627 +index 763cfca886a7..3107b0738e40 100644
2628 +--- a/drivers/gpu/drm/panfrost/panfrost_mmu.c
2629 ++++ b/drivers/gpu/drm/panfrost/panfrost_mmu.c
2630 +@@ -151,7 +151,12 @@ u32 panfrost_mmu_as_get(struct panfrost_device *pfdev, struct panfrost_mmu *mmu)
2631 + as = mmu->as;
2632 + if (as >= 0) {
2633 + int en = atomic_inc_return(&mmu->as_count);
2634 +- WARN_ON(en >= NUM_JOB_SLOTS);
2635 ++
2636 ++ /*
2637 ++ * AS can be retained by active jobs or a perfcnt context,
2638 ++ * hence the '+ 1' here.
2639 ++ */
2640 ++ WARN_ON(en >= (NUM_JOB_SLOTS + 1));
2641 +
2642 + list_move(&mmu->list, &pfdev->as_lru_list);
2643 + goto out;
2644 +diff --git a/drivers/gpu/drm/panfrost/panfrost_perfcnt.c b/drivers/gpu/drm/panfrost/panfrost_perfcnt.c
2645 +index 684820448be3..6913578d5aa7 100644
2646 +--- a/drivers/gpu/drm/panfrost/panfrost_perfcnt.c
2647 ++++ b/drivers/gpu/drm/panfrost/panfrost_perfcnt.c
2648 +@@ -73,7 +73,7 @@ static int panfrost_perfcnt_enable_locked(struct panfrost_device *pfdev,
2649 + struct panfrost_file_priv *user = file_priv->driver_priv;
2650 + struct panfrost_perfcnt *perfcnt = pfdev->perfcnt;
2651 + struct drm_gem_shmem_object *bo;
2652 +- u32 cfg;
2653 ++ u32 cfg, as;
2654 + int ret;
2655 +
2656 + if (user == perfcnt->user)
2657 +@@ -126,12 +126,8 @@ static int panfrost_perfcnt_enable_locked(struct panfrost_device *pfdev,
2658 +
2659 + perfcnt->user = user;
2660 +
2661 +- /*
2662 +- * Always use address space 0 for now.
2663 +- * FIXME: this needs to be updated when we start using different
2664 +- * address space.
2665 +- */
2666 +- cfg = GPU_PERFCNT_CFG_AS(0) |
2667 ++ as = panfrost_mmu_as_get(pfdev, perfcnt->mapping->mmu);
2668 ++ cfg = GPU_PERFCNT_CFG_AS(as) |
2669 + GPU_PERFCNT_CFG_MODE(GPU_PERFCNT_CFG_MODE_MANUAL);
2670 +
2671 + /*
2672 +@@ -195,6 +191,7 @@ static int panfrost_perfcnt_disable_locked(struct panfrost_device *pfdev,
2673 + drm_gem_shmem_vunmap(&perfcnt->mapping->obj->base.base, perfcnt->buf);
2674 + perfcnt->buf = NULL;
2675 + panfrost_gem_close(&perfcnt->mapping->obj->base.base, file_priv);
2676 ++ panfrost_mmu_as_put(pfdev, perfcnt->mapping->mmu);
2677 + panfrost_gem_mapping_put(perfcnt->mapping);
2678 + perfcnt->mapping = NULL;
2679 + pm_runtime_mark_last_busy(pfdev->dev);
2680 +diff --git a/drivers/hwmon/acpi_power_meter.c b/drivers/hwmon/acpi_power_meter.c
2681 +index 4cf25458f0b9..0db8ef4fd6e1 100644
2682 +--- a/drivers/hwmon/acpi_power_meter.c
2683 ++++ b/drivers/hwmon/acpi_power_meter.c
2684 +@@ -355,7 +355,9 @@ static ssize_t show_str(struct device *dev,
2685 + struct acpi_device *acpi_dev = to_acpi_device(dev);
2686 + struct acpi_power_meter_resource *resource = acpi_dev->driver_data;
2687 + acpi_string val;
2688 ++ int ret;
2689 +
2690 ++ mutex_lock(&resource->lock);
2691 + switch (attr->index) {
2692 + case 0:
2693 + val = resource->model_number;
2694 +@@ -372,8 +374,9 @@ static ssize_t show_str(struct device *dev,
2695 + val = "";
2696 + break;
2697 + }
2698 +-
2699 +- return sprintf(buf, "%s\n", val);
2700 ++ ret = sprintf(buf, "%s\n", val);
2701 ++ mutex_unlock(&resource->lock);
2702 ++ return ret;
2703 + }
2704 +
2705 + static ssize_t show_val(struct device *dev,
2706 +@@ -817,11 +820,12 @@ static void acpi_power_meter_notify(struct acpi_device *device, u32 event)
2707 +
2708 + resource = acpi_driver_data(device);
2709 +
2710 +- mutex_lock(&resource->lock);
2711 + switch (event) {
2712 + case METER_NOTIFY_CONFIG:
2713 ++ mutex_lock(&resource->lock);
2714 + free_capabilities(resource);
2715 + res = read_capabilities(resource);
2716 ++ mutex_unlock(&resource->lock);
2717 + if (res)
2718 + break;
2719 +
2720 +@@ -830,15 +834,12 @@ static void acpi_power_meter_notify(struct acpi_device *device, u32 event)
2721 + break;
2722 + case METER_NOTIFY_TRIP:
2723 + sysfs_notify(&device->dev.kobj, NULL, POWER_AVERAGE_NAME);
2724 +- update_meter(resource);
2725 + break;
2726 + case METER_NOTIFY_CAP:
2727 + sysfs_notify(&device->dev.kobj, NULL, POWER_CAP_NAME);
2728 +- update_cap(resource);
2729 + break;
2730 + case METER_NOTIFY_INTERVAL:
2731 + sysfs_notify(&device->dev.kobj, NULL, POWER_AVG_INTERVAL_NAME);
2732 +- update_avg_interval(resource);
2733 + break;
2734 + case METER_NOTIFY_CAPPING:
2735 + sysfs_notify(&device->dev.kobj, NULL, POWER_ALARM_NAME);
2736 +@@ -848,7 +849,6 @@ static void acpi_power_meter_notify(struct acpi_device *device, u32 event)
2737 + WARN(1, "Unexpected event %d\n", event);
2738 + break;
2739 + }
2740 +- mutex_unlock(&resource->lock);
2741 +
2742 + acpi_bus_generate_netlink_event(ACPI_POWER_METER_CLASS,
2743 + dev_name(&device->dev), event, 0);
2744 +@@ -912,8 +912,8 @@ static int acpi_power_meter_remove(struct acpi_device *device)
2745 + resource = acpi_driver_data(device);
2746 + hwmon_device_unregister(resource->hwmon_dev);
2747 +
2748 +- free_capabilities(resource);
2749 + remove_attrs(resource);
2750 ++ free_capabilities(resource);
2751 +
2752 + kfree(resource);
2753 + return 0;
2754 +diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
2755 +index b273e421e910..a1a035270cab 100644
2756 +--- a/drivers/infiniband/ulp/isert/ib_isert.c
2757 ++++ b/drivers/infiniband/ulp/isert/ib_isert.c
2758 +@@ -2575,6 +2575,17 @@ isert_wait4logout(struct isert_conn *isert_conn)
2759 + }
2760 + }
2761 +
2762 ++static void
2763 ++isert_wait4cmds(struct iscsi_conn *conn)
2764 ++{
2765 ++ isert_info("iscsi_conn %p\n", conn);
2766 ++
2767 ++ if (conn->sess) {
2768 ++ target_sess_cmd_list_set_waiting(conn->sess->se_sess);
2769 ++ target_wait_for_sess_cmds(conn->sess->se_sess);
2770 ++ }
2771 ++}
2772 ++
2773 + /**
2774 + * isert_put_unsol_pending_cmds() - Drop commands waiting for
2775 + * unsolicitate dataout
2776 +@@ -2622,6 +2633,7 @@ static void isert_wait_conn(struct iscsi_conn *conn)
2777 +
2778 + ib_drain_qp(isert_conn->qp);
2779 + isert_put_unsol_pending_cmds(conn);
2780 ++ isert_wait4cmds(conn);
2781 + isert_wait4logout(isert_conn);
2782 +
2783 + queue_work(isert_release_wq, &isert_conn->release_work);
2784 +diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
2785 +index dfedbb04f647..e7fc9e928788 100644
2786 +--- a/drivers/iommu/intel-iommu.c
2787 ++++ b/drivers/iommu/intel-iommu.c
2788 +@@ -732,6 +732,11 @@ static int iommu_dummy(struct device *dev)
2789 + return dev->archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO;
2790 + }
2791 +
2792 ++static bool attach_deferred(struct device *dev)
2793 ++{
2794 ++ return dev->archdata.iommu == DEFER_DEVICE_DOMAIN_INFO;
2795 ++}
2796 ++
2797 + /**
2798 + * is_downstream_to_pci_bridge - test if a device belongs to the PCI
2799 + * sub-hierarchy of a candidate PCI-PCI bridge
2800 +@@ -2424,8 +2429,7 @@ static struct dmar_domain *find_domain(struct device *dev)
2801 + {
2802 + struct device_domain_info *info;
2803 +
2804 +- if (unlikely(dev->archdata.iommu == DEFER_DEVICE_DOMAIN_INFO ||
2805 +- dev->archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO))
2806 ++ if (unlikely(attach_deferred(dev) || iommu_dummy(dev)))
2807 + return NULL;
2808 +
2809 + /* No lock here, assumes no domain exit in normal case */
2810 +@@ -2436,18 +2440,14 @@ static struct dmar_domain *find_domain(struct device *dev)
2811 + return NULL;
2812 + }
2813 +
2814 +-static struct dmar_domain *deferred_attach_domain(struct device *dev)
2815 ++static void do_deferred_attach(struct device *dev)
2816 + {
2817 +- if (unlikely(dev->archdata.iommu == DEFER_DEVICE_DOMAIN_INFO)) {
2818 +- struct iommu_domain *domain;
2819 +-
2820 +- dev->archdata.iommu = NULL;
2821 +- domain = iommu_get_domain_for_dev(dev);
2822 +- if (domain)
2823 +- intel_iommu_attach_device(domain, dev);
2824 +- }
2825 ++ struct iommu_domain *domain;
2826 +
2827 +- return find_domain(dev);
2828 ++ dev->archdata.iommu = NULL;
2829 ++ domain = iommu_get_domain_for_dev(dev);
2830 ++ if (domain)
2831 ++ intel_iommu_attach_device(domain, dev);
2832 + }
2833 +
2834 + static inline struct device_domain_info *
2835 +@@ -2799,7 +2799,7 @@ static int identity_mapping(struct device *dev)
2836 + struct device_domain_info *info;
2837 +
2838 + info = dev->archdata.iommu;
2839 +- if (info && info != DUMMY_DEVICE_DOMAIN_INFO && info != DEFER_DEVICE_DOMAIN_INFO)
2840 ++ if (info)
2841 + return (info->domain == si_domain);
2842 +
2843 + return 0;
2844 +@@ -3470,6 +3470,9 @@ static bool iommu_need_mapping(struct device *dev)
2845 + if (iommu_dummy(dev))
2846 + return false;
2847 +
2848 ++ if (unlikely(attach_deferred(dev)))
2849 ++ do_deferred_attach(dev);
2850 ++
2851 + ret = identity_mapping(dev);
2852 + if (ret) {
2853 + u64 dma_mask = *dev->dma_mask;
2854 +@@ -3518,7 +3521,7 @@ static dma_addr_t __intel_map_single(struct device *dev, phys_addr_t paddr,
2855 +
2856 + BUG_ON(dir == DMA_NONE);
2857 +
2858 +- domain = deferred_attach_domain(dev);
2859 ++ domain = find_domain(dev);
2860 + if (!domain)
2861 + return DMA_MAPPING_ERROR;
2862 +
2863 +@@ -3738,7 +3741,7 @@ static int intel_map_sg(struct device *dev, struct scatterlist *sglist, int nele
2864 + if (!iommu_need_mapping(dev))
2865 + return dma_direct_map_sg(dev, sglist, nelems, dir, attrs);
2866 +
2867 +- domain = deferred_attach_domain(dev);
2868 ++ domain = find_domain(dev);
2869 + if (!domain)
2870 + return 0;
2871 +
2872 +@@ -3833,7 +3836,11 @@ bounce_map_single(struct device *dev, phys_addr_t paddr, size_t size,
2873 + int prot = 0;
2874 + int ret;
2875 +
2876 +- domain = deferred_attach_domain(dev);
2877 ++ if (unlikely(attach_deferred(dev)))
2878 ++ do_deferred_attach(dev);
2879 ++
2880 ++ domain = find_domain(dev);
2881 ++
2882 + if (WARN_ON(dir == DMA_NONE || !domain))
2883 + return DMA_MAPPING_ERROR;
2884 +
2885 +@@ -5989,7 +5996,7 @@ intel_iommu_aux_get_pasid(struct iommu_domain *domain, struct device *dev)
2886 + static bool intel_iommu_is_attach_deferred(struct iommu_domain *domain,
2887 + struct device *dev)
2888 + {
2889 +- return dev->archdata.iommu == DEFER_DEVICE_DOMAIN_INFO;
2890 ++ return attach_deferred(dev);
2891 + }
2892 +
2893 + const struct iommu_ops intel_iommu_ops = {
2894 +diff --git a/drivers/iommu/qcom_iommu.c b/drivers/iommu/qcom_iommu.c
2895 +index 52f38292df5b..c3de46acf50a 100644
2896 +--- a/drivers/iommu/qcom_iommu.c
2897 ++++ b/drivers/iommu/qcom_iommu.c
2898 +@@ -345,21 +345,19 @@ static void qcom_iommu_domain_free(struct iommu_domain *domain)
2899 + {
2900 + struct qcom_iommu_domain *qcom_domain = to_qcom_iommu_domain(domain);
2901 +
2902 +- if (WARN_ON(qcom_domain->iommu)) /* forgot to detach? */
2903 +- return;
2904 +-
2905 + iommu_put_dma_cookie(domain);
2906 +
2907 +- /* NOTE: unmap can be called after client device is powered off,
2908 +- * for example, with GPUs or anything involving dma-buf. So we
2909 +- * cannot rely on the device_link. Make sure the IOMMU is on to
2910 +- * avoid unclocked accesses in the TLB inv path:
2911 +- */
2912 +- pm_runtime_get_sync(qcom_domain->iommu->dev);
2913 +-
2914 +- free_io_pgtable_ops(qcom_domain->pgtbl_ops);
2915 +-
2916 +- pm_runtime_put_sync(qcom_domain->iommu->dev);
2917 ++ if (qcom_domain->iommu) {
2918 ++ /*
2919 ++ * NOTE: unmap can be called after client device is powered
2920 ++ * off, for example, with GPUs or anything involving dma-buf.
2921 ++ * So we cannot rely on the device_link. Make sure the IOMMU
2922 ++ * is on to avoid unclocked accesses in the TLB inv path:
2923 ++ */
2924 ++ pm_runtime_get_sync(qcom_domain->iommu->dev);
2925 ++ free_io_pgtable_ops(qcom_domain->pgtbl_ops);
2926 ++ pm_runtime_put_sync(qcom_domain->iommu->dev);
2927 ++ }
2928 +
2929 + kfree(qcom_domain);
2930 + }
2931 +@@ -405,7 +403,7 @@ static void qcom_iommu_detach_dev(struct iommu_domain *domain, struct device *de
2932 + struct qcom_iommu_domain *qcom_domain = to_qcom_iommu_domain(domain);
2933 + unsigned i;
2934 +
2935 +- if (!qcom_domain->iommu)
2936 ++ if (WARN_ON(!qcom_domain->iommu))
2937 + return;
2938 +
2939 + pm_runtime_get_sync(qcom_iommu->dev);
2940 +@@ -418,8 +416,6 @@ static void qcom_iommu_detach_dev(struct iommu_domain *domain, struct device *de
2941 + ctx->domain = NULL;
2942 + }
2943 + pm_runtime_put_sync(qcom_iommu->dev);
2944 +-
2945 +- qcom_domain->iommu = NULL;
2946 + }
2947 +
2948 + static int qcom_iommu_map(struct iommu_domain *domain, unsigned long iova,
2949 +diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c
2950 +index 8b2b9e254d28..f4015a5fb5c0 100644
2951 +--- a/drivers/net/ethernet/intel/ice/ice_txrx.c
2952 ++++ b/drivers/net/ethernet/intel/ice/ice_txrx.c
2953 +@@ -1078,8 +1078,6 @@ construct_skb:
2954 + skb = ice_build_skb(rx_ring, rx_buf, &xdp);
2955 + else
2956 + skb = ice_construct_skb(rx_ring, rx_buf, &xdp);
2957 +- } else {
2958 +- skb = ice_construct_skb(rx_ring, rx_buf, &xdp);
2959 + }
2960 + /* exit if we failed to retrieve a buffer */
2961 + if (!skb) {
2962 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/health.c b/drivers/net/ethernet/mellanox/mlx5/core/en/health.c
2963 +index 3a975641f902..20b907dc1e29 100644
2964 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en/health.c
2965 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/health.c
2966 +@@ -200,7 +200,7 @@ int mlx5e_health_report(struct mlx5e_priv *priv,
2967 + netdev_err(priv->netdev, err_str);
2968 +
2969 + if (!reporter)
2970 +- return err_ctx->recover(&err_ctx->ctx);
2971 ++ return err_ctx->recover(err_ctx->ctx);
2972 +
2973 + return devlink_health_report(reporter, err_str, err_ctx);
2974 + }
2975 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
2976 +index 7c8796d9743f..a226277b0980 100644
2977 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
2978 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
2979 +@@ -179,6 +179,14 @@ mlx5e_tx_dma_unmap(struct device *pdev, struct mlx5e_sq_dma *dma)
2980 + }
2981 + }
2982 +
2983 ++static inline void mlx5e_rqwq_reset(struct mlx5e_rq *rq)
2984 ++{
2985 ++ if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ)
2986 ++ mlx5_wq_ll_reset(&rq->mpwqe.wq);
2987 ++ else
2988 ++ mlx5_wq_cyc_reset(&rq->wqe.wq);
2989 ++}
2990 ++
2991 + /* SW parser related functions */
2992 +
2993 + struct mlx5e_swp_spec {
2994 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
2995 +index 4997b8a51994..5d9cfac67236 100644
2996 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
2997 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
2998 +@@ -721,6 +721,9 @@ int mlx5e_modify_rq_state(struct mlx5e_rq *rq, int curr_state, int next_state)
2999 + if (!in)
3000 + return -ENOMEM;
3001 +
3002 ++ if (curr_state == MLX5_RQC_STATE_RST && next_state == MLX5_RQC_STATE_RDY)
3003 ++ mlx5e_rqwq_reset(rq);
3004 ++
3005 + rqc = MLX5_ADDR_OF(modify_rq_in, in, ctx);
3006 +
3007 + MLX5_SET(modify_rq_in, in, rq_state, curr_state);
3008 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
3009 +index 3df3604e8929..07282c679dcd 100644
3010 +--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
3011 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
3012 +@@ -456,12 +456,16 @@ static void esw_destroy_legacy_table(struct mlx5_eswitch *esw)
3013 +
3014 + static int esw_legacy_enable(struct mlx5_eswitch *esw)
3015 + {
3016 +- int ret;
3017 ++ struct mlx5_vport *vport;
3018 ++ int ret, i;
3019 +
3020 + ret = esw_create_legacy_table(esw);
3021 + if (ret)
3022 + return ret;
3023 +
3024 ++ mlx5_esw_for_each_vf_vport(esw, i, vport, esw->esw_funcs.num_vfs)
3025 ++ vport->info.link_state = MLX5_VPORT_ADMIN_STATE_AUTO;
3026 ++
3027 + ret = mlx5_eswitch_enable_pf_vf_vports(esw, MLX5_LEGACY_SRIOV_VPORT_EVENTS);
3028 + if (ret)
3029 + esw_destroy_legacy_table(esw);
3030 +@@ -2449,25 +2453,17 @@ out:
3031 +
3032 + int mlx5_eswitch_get_vepa(struct mlx5_eswitch *esw, u8 *setting)
3033 + {
3034 +- int err = 0;
3035 +-
3036 + if (!esw)
3037 + return -EOPNOTSUPP;
3038 +
3039 + if (!ESW_ALLOWED(esw))
3040 + return -EPERM;
3041 +
3042 +- mutex_lock(&esw->state_lock);
3043 +- if (esw->mode != MLX5_ESWITCH_LEGACY) {
3044 +- err = -EOPNOTSUPP;
3045 +- goto out;
3046 +- }
3047 ++ if (esw->mode != MLX5_ESWITCH_LEGACY)
3048 ++ return -EOPNOTSUPP;
3049 +
3050 + *setting = esw->fdb_table.legacy.vepa_uplink_rule ? 1 : 0;
3051 +-
3052 +-out:
3053 +- mutex_unlock(&esw->state_lock);
3054 +- return err;
3055 ++ return 0;
3056 + }
3057 +
3058 + int mlx5_eswitch_set_vport_trust(struct mlx5_eswitch *esw,
3059 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
3060 +index 3e6412783078..dfefc6250f23 100644
3061 +--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
3062 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
3063 +@@ -1377,7 +1377,7 @@ static int esw_offloads_start(struct mlx5_eswitch *esw,
3064 + return -EINVAL;
3065 + }
3066 +
3067 +- mlx5_eswitch_disable(esw, true);
3068 ++ mlx5_eswitch_disable(esw, false);
3069 + mlx5_eswitch_update_num_of_vfs(esw, esw->dev->priv.sriov.num_vfs);
3070 + err = mlx5_eswitch_enable(esw, MLX5_ESWITCH_OFFLOADS);
3071 + if (err) {
3072 +@@ -2271,7 +2271,7 @@ static int esw_offloads_stop(struct mlx5_eswitch *esw,
3073 + {
3074 + int err, err1;
3075 +
3076 +- mlx5_eswitch_disable(esw, true);
3077 ++ mlx5_eswitch_disable(esw, false);
3078 + err = mlx5_eswitch_enable(esw, MLX5_ESWITCH_LEGACY);
3079 + if (err) {
3080 + NL_SET_ERR_MSG_MOD(extack, "Failed setting eswitch to legacy");
3081 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/wq.c b/drivers/net/ethernet/mellanox/mlx5/core/wq.c
3082 +index 02f7e4a39578..01f075fac276 100644
3083 +--- a/drivers/net/ethernet/mellanox/mlx5/core/wq.c
3084 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/wq.c
3085 +@@ -94,6 +94,13 @@ void mlx5_wq_cyc_wqe_dump(struct mlx5_wq_cyc *wq, u16 ix, u8 nstrides)
3086 + print_hex_dump(KERN_WARNING, "", DUMP_PREFIX_OFFSET, 16, 1, wqe, len, false);
3087 + }
3088 +
3089 ++void mlx5_wq_cyc_reset(struct mlx5_wq_cyc *wq)
3090 ++{
3091 ++ wq->wqe_ctr = 0;
3092 ++ wq->cur_sz = 0;
3093 ++ mlx5_wq_cyc_update_db_record(wq);
3094 ++}
3095 ++
3096 + int mlx5_wq_qp_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
3097 + void *qpc, struct mlx5_wq_qp *wq,
3098 + struct mlx5_wq_ctrl *wq_ctrl)
3099 +@@ -192,6 +199,19 @@ err_db_free:
3100 + return err;
3101 + }
3102 +
3103 ++static void mlx5_wq_ll_init_list(struct mlx5_wq_ll *wq)
3104 ++{
3105 ++ struct mlx5_wqe_srq_next_seg *next_seg;
3106 ++ int i;
3107 ++
3108 ++ for (i = 0; i < wq->fbc.sz_m1; i++) {
3109 ++ next_seg = mlx5_wq_ll_get_wqe(wq, i);
3110 ++ next_seg->next_wqe_index = cpu_to_be16(i + 1);
3111 ++ }
3112 ++ next_seg = mlx5_wq_ll_get_wqe(wq, i);
3113 ++ wq->tail_next = &next_seg->next_wqe_index;
3114 ++}
3115 ++
3116 + int mlx5_wq_ll_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
3117 + void *wqc, struct mlx5_wq_ll *wq,
3118 + struct mlx5_wq_ctrl *wq_ctrl)
3119 +@@ -199,9 +219,7 @@ int mlx5_wq_ll_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
3120 + u8 log_wq_stride = MLX5_GET(wq, wqc, log_wq_stride);
3121 + u8 log_wq_sz = MLX5_GET(wq, wqc, log_wq_sz);
3122 + struct mlx5_frag_buf_ctrl *fbc = &wq->fbc;
3123 +- struct mlx5_wqe_srq_next_seg *next_seg;
3124 + int err;
3125 +- int i;
3126 +
3127 + err = mlx5_db_alloc_node(mdev, &wq_ctrl->db, param->db_numa_node);
3128 + if (err) {
3129 +@@ -220,13 +238,7 @@ int mlx5_wq_ll_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
3130 +
3131 + mlx5_init_fbc(wq_ctrl->buf.frags, log_wq_stride, log_wq_sz, fbc);
3132 +
3133 +- for (i = 0; i < fbc->sz_m1; i++) {
3134 +- next_seg = mlx5_wq_ll_get_wqe(wq, i);
3135 +- next_seg->next_wqe_index = cpu_to_be16(i + 1);
3136 +- }
3137 +- next_seg = mlx5_wq_ll_get_wqe(wq, i);
3138 +- wq->tail_next = &next_seg->next_wqe_index;
3139 +-
3140 ++ mlx5_wq_ll_init_list(wq);
3141 + wq_ctrl->mdev = mdev;
3142 +
3143 + return 0;
3144 +@@ -237,6 +249,15 @@ err_db_free:
3145 + return err;
3146 + }
3147 +
3148 ++void mlx5_wq_ll_reset(struct mlx5_wq_ll *wq)
3149 ++{
3150 ++ wq->head = 0;
3151 ++ wq->wqe_ctr = 0;
3152 ++ wq->cur_sz = 0;
3153 ++ mlx5_wq_ll_init_list(wq);
3154 ++ mlx5_wq_ll_update_db_record(wq);
3155 ++}
3156 ++
3157 + void mlx5_wq_destroy(struct mlx5_wq_ctrl *wq_ctrl)
3158 + {
3159 + mlx5_frag_buf_free(wq_ctrl->mdev, &wq_ctrl->buf);
3160 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/wq.h b/drivers/net/ethernet/mellanox/mlx5/core/wq.h
3161 +index d9a94bc223c0..4cadc336593f 100644
3162 +--- a/drivers/net/ethernet/mellanox/mlx5/core/wq.h
3163 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/wq.h
3164 +@@ -80,6 +80,7 @@ int mlx5_wq_cyc_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
3165 + void *wqc, struct mlx5_wq_cyc *wq,
3166 + struct mlx5_wq_ctrl *wq_ctrl);
3167 + void mlx5_wq_cyc_wqe_dump(struct mlx5_wq_cyc *wq, u16 ix, u8 nstrides);
3168 ++void mlx5_wq_cyc_reset(struct mlx5_wq_cyc *wq);
3169 +
3170 + int mlx5_wq_qp_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
3171 + void *qpc, struct mlx5_wq_qp *wq,
3172 +@@ -92,6 +93,7 @@ int mlx5_cqwq_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
3173 + int mlx5_wq_ll_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
3174 + void *wqc, struct mlx5_wq_ll *wq,
3175 + struct mlx5_wq_ctrl *wq_ctrl);
3176 ++void mlx5_wq_ll_reset(struct mlx5_wq_ll *wq);
3177 +
3178 + void mlx5_wq_destroy(struct mlx5_wq_ctrl *wq_ctrl);
3179 +
3180 +diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
3181 +index 797c18337d96..a11900cf3a36 100644
3182 +--- a/drivers/nvme/host/multipath.c
3183 ++++ b/drivers/nvme/host/multipath.c
3184 +@@ -715,6 +715,7 @@ int nvme_mpath_init(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
3185 + }
3186 +
3187 + INIT_WORK(&ctrl->ana_work, nvme_ana_work);
3188 ++ kfree(ctrl->ana_log_buf);
3189 + ctrl->ana_log_buf = kmalloc(ctrl->ana_log_size, GFP_KERNEL);
3190 + if (!ctrl->ana_log_buf) {
3191 + error = -ENOMEM;
3192 +diff --git a/drivers/staging/android/ashmem.c b/drivers/staging/android/ashmem.c
3193 +index 74d497d39c5a..c6695354b123 100644
3194 +--- a/drivers/staging/android/ashmem.c
3195 ++++ b/drivers/staging/android/ashmem.c
3196 +@@ -351,8 +351,23 @@ static inline vm_flags_t calc_vm_may_flags(unsigned long prot)
3197 + _calc_vm_trans(prot, PROT_EXEC, VM_MAYEXEC);
3198 + }
3199 +
3200 ++static int ashmem_vmfile_mmap(struct file *file, struct vm_area_struct *vma)
3201 ++{
3202 ++ /* do not allow to mmap ashmem backing shmem file directly */
3203 ++ return -EPERM;
3204 ++}
3205 ++
3206 ++static unsigned long
3207 ++ashmem_vmfile_get_unmapped_area(struct file *file, unsigned long addr,
3208 ++ unsigned long len, unsigned long pgoff,
3209 ++ unsigned long flags)
3210 ++{
3211 ++ return current->mm->get_unmapped_area(file, addr, len, pgoff, flags);
3212 ++}
3213 ++
3214 + static int ashmem_mmap(struct file *file, struct vm_area_struct *vma)
3215 + {
3216 ++ static struct file_operations vmfile_fops;
3217 + struct ashmem_area *asma = file->private_data;
3218 + int ret = 0;
3219 +
3220 +@@ -393,6 +408,19 @@ static int ashmem_mmap(struct file *file, struct vm_area_struct *vma)
3221 + }
3222 + vmfile->f_mode |= FMODE_LSEEK;
3223 + asma->file = vmfile;
3224 ++ /*
3225 ++ * override mmap operation of the vmfile so that it can't be
3226 ++ * remapped which would lead to creation of a new vma with no
3227 ++ * asma permission checks. Have to override get_unmapped_area
3228 ++ * as well to prevent VM_BUG_ON check for f_ops modification.
3229 ++ */
3230 ++ if (!vmfile_fops.mmap) {
3231 ++ vmfile_fops = *vmfile->f_op;
3232 ++ vmfile_fops.mmap = ashmem_vmfile_mmap;
3233 ++ vmfile_fops.get_unmapped_area =
3234 ++ ashmem_vmfile_get_unmapped_area;
3235 ++ }
3236 ++ vmfile->f_op = &vmfile_fops;
3237 + }
3238 + get_file(asma->file);
3239 +
3240 +diff --git a/drivers/staging/greybus/audio_manager.c b/drivers/staging/greybus/audio_manager.c
3241 +index 9b19ea9d3fa1..9a3f7c034ab4 100644
3242 +--- a/drivers/staging/greybus/audio_manager.c
3243 ++++ b/drivers/staging/greybus/audio_manager.c
3244 +@@ -92,8 +92,8 @@ void gb_audio_manager_remove_all(void)
3245 +
3246 + list_for_each_entry_safe(module, next, &modules_list, list) {
3247 + list_del(&module->list);
3248 +- kobject_put(&module->kobj);
3249 + ida_simple_remove(&module_id, module->id);
3250 ++ kobject_put(&module->kobj);
3251 + }
3252 +
3253 + is_empty = list_empty(&modules_list);
3254 +diff --git a/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c b/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c
3255 +index 47f4cc6a19a9..df945a059cf6 100644
3256 +--- a/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c
3257 ++++ b/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c
3258 +@@ -2011,7 +2011,7 @@ static int wpa_supplicant_ioctl(struct net_device *dev, struct iw_point *p)
3259 + struct ieee_param *param;
3260 + uint ret = 0;
3261 +
3262 +- if (p->length < sizeof(struct ieee_param) || !p->pointer) {
3263 ++ if (!p->pointer || p->length != sizeof(struct ieee_param)) {
3264 + ret = -EINVAL;
3265 + goto out;
3266 + }
3267 +@@ -2798,7 +2798,7 @@ static int rtw_hostapd_ioctl(struct net_device *dev, struct iw_point *p)
3268 + goto out;
3269 + }
3270 +
3271 +- if (!p->pointer) {
3272 ++ if (!p->pointer || p->length != sizeof(struct ieee_param)) {
3273 + ret = -EINVAL;
3274 + goto out;
3275 + }
3276 +diff --git a/drivers/staging/rtl8723bs/hal/rtl8723bs_xmit.c b/drivers/staging/rtl8723bs/hal/rtl8723bs_xmit.c
3277 +index b44e902ed338..b6d56cfb0a19 100644
3278 +--- a/drivers/staging/rtl8723bs/hal/rtl8723bs_xmit.c
3279 ++++ b/drivers/staging/rtl8723bs/hal/rtl8723bs_xmit.c
3280 +@@ -476,14 +476,13 @@ int rtl8723bs_xmit_thread(void *context)
3281 + s32 ret;
3282 + struct adapter *padapter;
3283 + struct xmit_priv *pxmitpriv;
3284 +- u8 thread_name[20] = "RTWHALXT";
3285 +-
3286 ++ u8 thread_name[20];
3287 +
3288 + ret = _SUCCESS;
3289 + padapter = context;
3290 + pxmitpriv = &padapter->xmitpriv;
3291 +
3292 +- rtw_sprintf(thread_name, 20, "%s-"ADPT_FMT, thread_name, ADPT_ARG(padapter));
3293 ++ rtw_sprintf(thread_name, 20, "RTWHALXT-" ADPT_FMT, ADPT_ARG(padapter));
3294 + thread_enter(thread_name);
3295 +
3296 + DBG_871X("start "FUNC_ADPT_FMT"\n", FUNC_ADPT_ARG(padapter));
3297 +diff --git a/drivers/staging/rtl8723bs/os_dep/ioctl_linux.c b/drivers/staging/rtl8723bs/os_dep/ioctl_linux.c
3298 +index db6528a01229..2ac0d84f090e 100644
3299 +--- a/drivers/staging/rtl8723bs/os_dep/ioctl_linux.c
3300 ++++ b/drivers/staging/rtl8723bs/os_dep/ioctl_linux.c
3301 +@@ -3373,7 +3373,7 @@ static int wpa_supplicant_ioctl(struct net_device *dev, struct iw_point *p)
3302 +
3303 + /* down(&ieee->wx_sem); */
3304 +
3305 +- if (p->length < sizeof(struct ieee_param) || !p->pointer) {
3306 ++ if (!p->pointer || p->length != sizeof(struct ieee_param)) {
3307 + ret = -EINVAL;
3308 + goto out;
3309 + }
3310 +@@ -4207,7 +4207,7 @@ static int rtw_hostapd_ioctl(struct net_device *dev, struct iw_point *p)
3311 +
3312 +
3313 + /* if (p->length < sizeof(struct ieee_param) || !p->pointer) { */
3314 +- if (!p->pointer) {
3315 ++ if (!p->pointer || p->length != sizeof(*param)) {
3316 + ret = -EINVAL;
3317 + goto out;
3318 + }
3319 +diff --git a/drivers/staging/vt6656/dpc.c b/drivers/staging/vt6656/dpc.c
3320 +index 3b94e80f1d5e..879ceef517fb 100644
3321 +--- a/drivers/staging/vt6656/dpc.c
3322 ++++ b/drivers/staging/vt6656/dpc.c
3323 +@@ -130,7 +130,7 @@ int vnt_rx_data(struct vnt_private *priv, struct vnt_rcb *ptr_rcb,
3324 +
3325 + vnt_rf_rssi_to_dbm(priv, *rssi, &rx_dbm);
3326 +
3327 +- priv->bb_pre_ed_rssi = (u8)rx_dbm + 1;
3328 ++ priv->bb_pre_ed_rssi = (u8)-rx_dbm + 1;
3329 + priv->current_rssi = priv->bb_pre_ed_rssi;
3330 +
3331 + skb_pull(skb, 8);
3332 +diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
3333 +index b94ed4e30770..09e55ea0bf5d 100644
3334 +--- a/drivers/target/iscsi/iscsi_target.c
3335 ++++ b/drivers/target/iscsi/iscsi_target.c
3336 +@@ -1165,9 +1165,7 @@ int iscsit_setup_scsi_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
3337 + hdr->cmdsn, be32_to_cpu(hdr->data_length), payload_length,
3338 + conn->cid);
3339 +
3340 +- if (target_get_sess_cmd(&cmd->se_cmd, true) < 0)
3341 +- return iscsit_add_reject_cmd(cmd,
3342 +- ISCSI_REASON_WAITING_FOR_LOGOUT, buf);
3343 ++ target_get_sess_cmd(&cmd->se_cmd, true);
3344 +
3345 + cmd->sense_reason = transport_lookup_cmd_lun(&cmd->se_cmd,
3346 + scsilun_to_int(&hdr->lun));
3347 +@@ -2004,9 +2002,7 @@ iscsit_handle_task_mgt_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
3348 + conn->sess->se_sess, 0, DMA_NONE,
3349 + TCM_SIMPLE_TAG, cmd->sense_buffer + 2);
3350 +
3351 +- if (target_get_sess_cmd(&cmd->se_cmd, true) < 0)
3352 +- return iscsit_add_reject_cmd(cmd,
3353 +- ISCSI_REASON_WAITING_FOR_LOGOUT, buf);
3354 ++ target_get_sess_cmd(&cmd->se_cmd, true);
3355 +
3356 + /*
3357 + * TASK_REASSIGN for ERL=2 / connection stays inside of
3358 +@@ -4149,6 +4145,9 @@ int iscsit_close_connection(
3359 + iscsit_stop_nopin_response_timer(conn);
3360 + iscsit_stop_nopin_timer(conn);
3361 +
3362 ++ if (conn->conn_transport->iscsit_wait_conn)
3363 ++ conn->conn_transport->iscsit_wait_conn(conn);
3364 ++
3365 + /*
3366 + * During Connection recovery drop unacknowledged out of order
3367 + * commands for this connection, and prepare the other commands
3368 +@@ -4231,11 +4230,6 @@ int iscsit_close_connection(
3369 + * must wait until they have completed.
3370 + */
3371 + iscsit_check_conn_usage_count(conn);
3372 +- target_sess_cmd_list_set_waiting(sess->se_sess);
3373 +- target_wait_for_sess_cmds(sess->se_sess);
3374 +-
3375 +- if (conn->conn_transport->iscsit_wait_conn)
3376 +- conn->conn_transport->iscsit_wait_conn(conn);
3377 +
3378 + ahash_request_free(conn->conn_tx_hash);
3379 + if (conn->conn_rx_hash) {
3380 +diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
3381 +index ea482d4b1f00..0ae9e60fc4d5 100644
3382 +--- a/drivers/target/target_core_transport.c
3383 ++++ b/drivers/target/target_core_transport.c
3384 +@@ -666,6 +666,11 @@ static int transport_cmd_check_stop_to_fabric(struct se_cmd *cmd)
3385 +
3386 + target_remove_from_state_list(cmd);
3387 +
3388 ++ /*
3389 ++ * Clear struct se_cmd->se_lun before the handoff to FE.
3390 ++ */
3391 ++ cmd->se_lun = NULL;
3392 ++
3393 + spin_lock_irqsave(&cmd->t_state_lock, flags);
3394 + /*
3395 + * Determine if frontend context caller is requesting the stopping of
3396 +@@ -693,6 +698,17 @@ static int transport_cmd_check_stop_to_fabric(struct se_cmd *cmd)
3397 + return cmd->se_tfo->check_stop_free(cmd);
3398 + }
3399 +
3400 ++static void transport_lun_remove_cmd(struct se_cmd *cmd)
3401 ++{
3402 ++ struct se_lun *lun = cmd->se_lun;
3403 ++
3404 ++ if (!lun)
3405 ++ return;
3406 ++
3407 ++ if (cmpxchg(&cmd->lun_ref_active, true, false))
3408 ++ percpu_ref_put(&lun->lun_ref);
3409 ++}
3410 ++
3411 + static void target_complete_failure_work(struct work_struct *work)
3412 + {
3413 + struct se_cmd *cmd = container_of(work, struct se_cmd, work);
3414 +@@ -783,6 +799,8 @@ static void target_handle_abort(struct se_cmd *cmd)
3415 +
3416 + WARN_ON_ONCE(kref_read(&cmd->cmd_kref) == 0);
3417 +
3418 ++ transport_lun_remove_cmd(cmd);
3419 ++
3420 + transport_cmd_check_stop_to_fabric(cmd);
3421 + }
3422 +
3423 +@@ -1708,6 +1726,7 @@ static void target_complete_tmr_failure(struct work_struct *work)
3424 + se_cmd->se_tmr_req->response = TMR_LUN_DOES_NOT_EXIST;
3425 + se_cmd->se_tfo->queue_tm_rsp(se_cmd);
3426 +
3427 ++ transport_lun_remove_cmd(se_cmd);
3428 + transport_cmd_check_stop_to_fabric(se_cmd);
3429 + }
3430 +
3431 +@@ -1898,6 +1917,7 @@ void transport_generic_request_failure(struct se_cmd *cmd,
3432 + goto queue_full;
3433 +
3434 + check_stop:
3435 ++ transport_lun_remove_cmd(cmd);
3436 + transport_cmd_check_stop_to_fabric(cmd);
3437 + return;
3438 +
3439 +@@ -2195,6 +2215,7 @@ queue_status:
3440 + transport_handle_queue_full(cmd, cmd->se_dev, ret, false);
3441 + return;
3442 + }
3443 ++ transport_lun_remove_cmd(cmd);
3444 + transport_cmd_check_stop_to_fabric(cmd);
3445 + }
3446 +
3447 +@@ -2289,6 +2310,7 @@ static void target_complete_ok_work(struct work_struct *work)
3448 + if (ret)
3449 + goto queue_full;
3450 +
3451 ++ transport_lun_remove_cmd(cmd);
3452 + transport_cmd_check_stop_to_fabric(cmd);
3453 + return;
3454 + }
3455 +@@ -2314,6 +2336,7 @@ static void target_complete_ok_work(struct work_struct *work)
3456 + if (ret)
3457 + goto queue_full;
3458 +
3459 ++ transport_lun_remove_cmd(cmd);
3460 + transport_cmd_check_stop_to_fabric(cmd);
3461 + return;
3462 + }
3463 +@@ -2349,6 +2372,7 @@ queue_rsp:
3464 + if (ret)
3465 + goto queue_full;
3466 +
3467 ++ transport_lun_remove_cmd(cmd);
3468 + transport_cmd_check_stop_to_fabric(cmd);
3469 + return;
3470 + }
3471 +@@ -2384,6 +2408,7 @@ queue_status:
3472 + break;
3473 + }
3474 +
3475 ++ transport_lun_remove_cmd(cmd);
3476 + transport_cmd_check_stop_to_fabric(cmd);
3477 + return;
3478 +
3479 +@@ -2710,6 +2735,9 @@ int transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks)
3480 + */
3481 + if (cmd->state_active)
3482 + target_remove_from_state_list(cmd);
3483 ++
3484 ++ if (cmd->se_lun)
3485 ++ transport_lun_remove_cmd(cmd);
3486 + }
3487 + if (aborted)
3488 + cmd->free_compl = &compl;
3489 +@@ -2781,9 +2809,6 @@ static void target_release_cmd_kref(struct kref *kref)
3490 + struct completion *abrt_compl = se_cmd->abrt_compl;
3491 + unsigned long flags;
3492 +
3493 +- if (se_cmd->lun_ref_active)
3494 +- percpu_ref_put(&se_cmd->se_lun->lun_ref);
3495 +-
3496 + if (se_sess) {
3497 + spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
3498 + list_del_init(&se_cmd->se_cmd_list);
3499 +diff --git a/drivers/thunderbolt/switch.c b/drivers/thunderbolt/switch.c
3500 +index ca86a8e09c77..43bfeb886614 100644
3501 +--- a/drivers/thunderbolt/switch.c
3502 ++++ b/drivers/thunderbolt/switch.c
3503 +@@ -274,6 +274,12 @@ out:
3504 + return ret;
3505 + }
3506 +
3507 ++static int tb_switch_nvm_no_read(void *priv, unsigned int offset, void *val,
3508 ++ size_t bytes)
3509 ++{
3510 ++ return -EPERM;
3511 ++}
3512 ++
3513 + static int tb_switch_nvm_write(void *priv, unsigned int offset, void *val,
3514 + size_t bytes)
3515 + {
3516 +@@ -319,6 +325,7 @@ static struct nvmem_device *register_nvmem(struct tb_switch *sw, int id,
3517 + config.read_only = true;
3518 + } else {
3519 + config.name = "nvm_non_active";
3520 ++ config.reg_read = tb_switch_nvm_no_read;
3521 + config.reg_write = tb_switch_nvm_write;
3522 + config.root_only = true;
3523 + }
3524 +diff --git a/drivers/tty/serdev/serdev-ttyport.c b/drivers/tty/serdev/serdev-ttyport.c
3525 +index d1cdd2ab8b4c..d367803e2044 100644
3526 +--- a/drivers/tty/serdev/serdev-ttyport.c
3527 ++++ b/drivers/tty/serdev/serdev-ttyport.c
3528 +@@ -265,7 +265,6 @@ struct device *serdev_tty_port_register(struct tty_port *port,
3529 + struct device *parent,
3530 + struct tty_driver *drv, int idx)
3531 + {
3532 +- const struct tty_port_client_operations *old_ops;
3533 + struct serdev_controller *ctrl;
3534 + struct serport *serport;
3535 + int ret;
3536 +@@ -284,7 +283,6 @@ struct device *serdev_tty_port_register(struct tty_port *port,
3537 +
3538 + ctrl->ops = &ctrl_ops;
3539 +
3540 +- old_ops = port->client_ops;
3541 + port->client_ops = &client_ops;
3542 + port->client_data = ctrl;
3543 +
3544 +@@ -297,7 +295,7 @@ struct device *serdev_tty_port_register(struct tty_port *port,
3545 +
3546 + err_reset_data:
3547 + port->client_data = NULL;
3548 +- port->client_ops = old_ops;
3549 ++ port->client_ops = &tty_port_default_client_ops;
3550 + serdev_controller_put(ctrl);
3551 +
3552 + return ERR_PTR(ret);
3553 +@@ -312,8 +310,8 @@ int serdev_tty_port_unregister(struct tty_port *port)
3554 + return -ENODEV;
3555 +
3556 + serdev_controller_remove(ctrl);
3557 +- port->client_ops = NULL;
3558 + port->client_data = NULL;
3559 ++ port->client_ops = &tty_port_default_client_ops;
3560 + serdev_controller_put(ctrl);
3561 +
3562 + return 0;
3563 +diff --git a/drivers/tty/serial/8250/8250_aspeed_vuart.c b/drivers/tty/serial/8250/8250_aspeed_vuart.c
3564 +index 6e67fd89445a..0ed5404f35d6 100644
3565 +--- a/drivers/tty/serial/8250/8250_aspeed_vuart.c
3566 ++++ b/drivers/tty/serial/8250/8250_aspeed_vuart.c
3567 +@@ -449,7 +449,6 @@ static int aspeed_vuart_probe(struct platform_device *pdev)
3568 + port.port.line = rc;
3569 +
3570 + port.port.irq = irq_of_parse_and_map(np, 0);
3571 +- port.port.irqflags = IRQF_SHARED;
3572 + port.port.handle_irq = aspeed_vuart_handle_irq;
3573 + port.port.iotype = UPIO_MEM;
3574 + port.port.type = PORT_16550A;
3575 +diff --git a/drivers/tty/serial/8250/8250_core.c b/drivers/tty/serial/8250/8250_core.c
3576 +index e682390ce0de..28bdbd7b4ab2 100644
3577 +--- a/drivers/tty/serial/8250/8250_core.c
3578 ++++ b/drivers/tty/serial/8250/8250_core.c
3579 +@@ -174,7 +174,7 @@ static int serial_link_irq_chain(struct uart_8250_port *up)
3580 + struct hlist_head *h;
3581 + struct hlist_node *n;
3582 + struct irq_info *i;
3583 +- int ret, irq_flags = up->port.flags & UPF_SHARE_IRQ ? IRQF_SHARED : 0;
3584 ++ int ret;
3585 +
3586 + mutex_lock(&hash_mutex);
3587 +
3588 +@@ -209,9 +209,8 @@ static int serial_link_irq_chain(struct uart_8250_port *up)
3589 + INIT_LIST_HEAD(&up->list);
3590 + i->head = &up->list;
3591 + spin_unlock_irq(&i->lock);
3592 +- irq_flags |= up->port.irqflags;
3593 + ret = request_irq(up->port.irq, serial8250_interrupt,
3594 +- irq_flags, up->port.name, i);
3595 ++ up->port.irqflags, up->port.name, i);
3596 + if (ret < 0)
3597 + serial_do_unlink(i, up);
3598 + }
3599 +diff --git a/drivers/tty/serial/8250/8250_of.c b/drivers/tty/serial/8250/8250_of.c
3600 +index 92fbf46ce3bd..3205c83577e0 100644
3601 +--- a/drivers/tty/serial/8250/8250_of.c
3602 ++++ b/drivers/tty/serial/8250/8250_of.c
3603 +@@ -202,7 +202,6 @@ static int of_platform_serial_setup(struct platform_device *ofdev,
3604 +
3605 + port->type = type;
3606 + port->uartclk = clk;
3607 +- port->irqflags |= IRQF_SHARED;
3608 +
3609 + if (of_property_read_bool(np, "no-loopback-test"))
3610 + port->flags |= UPF_SKIP_TEST;
3611 +diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c
3612 +index 90655910b0c7..5741b3822cf6 100644
3613 +--- a/drivers/tty/serial/8250/8250_port.c
3614 ++++ b/drivers/tty/serial/8250/8250_port.c
3615 +@@ -2178,6 +2178,10 @@ int serial8250_do_startup(struct uart_port *port)
3616 + }
3617 + }
3618 +
3619 ++ /* Check if we need to have shared IRQs */
3620 ++ if (port->irq && (up->port.flags & UPF_SHARE_IRQ))
3621 ++ up->port.irqflags |= IRQF_SHARED;
3622 ++
3623 + if (port->irq && !(up->port.flags & UPF_NO_THRE_TEST)) {
3624 + unsigned char iir1;
3625 + /*
3626 +diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c
3627 +index 1ba9bc667e13..8a909d556185 100644
3628 +--- a/drivers/tty/serial/atmel_serial.c
3629 ++++ b/drivers/tty/serial/atmel_serial.c
3630 +@@ -574,7 +574,8 @@ static void atmel_stop_tx(struct uart_port *port)
3631 + atmel_uart_writel(port, ATMEL_US_IDR, atmel_port->tx_done_mask);
3632 +
3633 + if (atmel_uart_is_half_duplex(port))
3634 +- atmel_start_rx(port);
3635 ++ if (!atomic_read(&atmel_port->tasklet_shutdown))
3636 ++ atmel_start_rx(port);
3637 +
3638 + }
3639 +
3640 +diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c
3641 +index dd3120c5db2b..0357fad48247 100644
3642 +--- a/drivers/tty/serial/imx.c
3643 ++++ b/drivers/tty/serial/imx.c
3644 +@@ -603,7 +603,7 @@ static void imx_uart_dma_tx(struct imx_port *sport)
3645 +
3646 + sport->tx_bytes = uart_circ_chars_pending(xmit);
3647 +
3648 +- if (xmit->tail < xmit->head) {
3649 ++ if (xmit->tail < xmit->head || xmit->head == 0) {
3650 + sport->dma_tx_nents = 1;
3651 + sg_init_one(sgl, xmit->buf + xmit->tail, sport->tx_bytes);
3652 + } else {
3653 +diff --git a/drivers/tty/serial/qcom_geni_serial.c b/drivers/tty/serial/qcom_geni_serial.c
3654 +index ff63728a95f4..ebace5ad175c 100644
3655 +--- a/drivers/tty/serial/qcom_geni_serial.c
3656 ++++ b/drivers/tty/serial/qcom_geni_serial.c
3657 +@@ -128,6 +128,7 @@ static int handle_rx_console(struct uart_port *uport, u32 bytes, bool drop);
3658 + static int handle_rx_uart(struct uart_port *uport, u32 bytes, bool drop);
3659 + static unsigned int qcom_geni_serial_tx_empty(struct uart_port *port);
3660 + static void qcom_geni_serial_stop_rx(struct uart_port *uport);
3661 ++static void qcom_geni_serial_handle_rx(struct uart_port *uport, bool drop);
3662 +
3663 + static const unsigned long root_freq[] = {7372800, 14745600, 19200000, 29491200,
3664 + 32000000, 48000000, 64000000, 80000000,
3665 +@@ -618,7 +619,7 @@ static void qcom_geni_serial_stop_rx(struct uart_port *uport)
3666 + u32 irq_en;
3667 + u32 status;
3668 + struct qcom_geni_serial_port *port = to_dev_port(uport, uport);
3669 +- u32 irq_clear = S_CMD_DONE_EN;
3670 ++ u32 s_irq_status;
3671 +
3672 + irq_en = readl(uport->membase + SE_GENI_S_IRQ_EN);
3673 + irq_en &= ~(S_RX_FIFO_WATERMARK_EN | S_RX_FIFO_LAST_EN);
3674 +@@ -634,10 +635,19 @@ static void qcom_geni_serial_stop_rx(struct uart_port *uport)
3675 + return;
3676 +
3677 + geni_se_cancel_s_cmd(&port->se);
3678 +- qcom_geni_serial_poll_bit(uport, SE_GENI_S_CMD_CTRL_REG,
3679 +- S_GENI_CMD_CANCEL, false);
3680 ++ qcom_geni_serial_poll_bit(uport, SE_GENI_S_IRQ_STATUS,
3681 ++ S_CMD_CANCEL_EN, true);
3682 ++ /*
3683 ++ * If timeout occurs secondary engine remains active
3684 ++ * and Abort sequence is executed.
3685 ++ */
3686 ++ s_irq_status = readl(uport->membase + SE_GENI_S_IRQ_STATUS);
3687 ++ /* Flush the Rx buffer */
3688 ++ if (s_irq_status & S_RX_FIFO_LAST_EN)
3689 ++ qcom_geni_serial_handle_rx(uport, true);
3690 ++ writel(s_irq_status, uport->membase + SE_GENI_S_IRQ_CLEAR);
3691 ++
3692 + status = readl(uport->membase + SE_GENI_STATUS);
3693 +- writel(irq_clear, uport->membase + SE_GENI_S_IRQ_CLEAR);
3694 + if (status & S_GENI_CMD_ACTIVE)
3695 + qcom_geni_serial_abort_rx(uport);
3696 + }
3697 +diff --git a/drivers/tty/tty_port.c b/drivers/tty/tty_port.c
3698 +index 044c3cbdcfa4..ea80bf872f54 100644
3699 +--- a/drivers/tty/tty_port.c
3700 ++++ b/drivers/tty/tty_port.c
3701 +@@ -52,10 +52,11 @@ static void tty_port_default_wakeup(struct tty_port *port)
3702 + }
3703 + }
3704 +
3705 +-static const struct tty_port_client_operations default_client_ops = {
3706 ++const struct tty_port_client_operations tty_port_default_client_ops = {
3707 + .receive_buf = tty_port_default_receive_buf,
3708 + .write_wakeup = tty_port_default_wakeup,
3709 + };
3710 ++EXPORT_SYMBOL_GPL(tty_port_default_client_ops);
3711 +
3712 + void tty_port_init(struct tty_port *port)
3713 + {
3714 +@@ -68,7 +69,7 @@ void tty_port_init(struct tty_port *port)
3715 + spin_lock_init(&port->lock);
3716 + port->close_delay = (50 * HZ) / 100;
3717 + port->closing_wait = (3000 * HZ) / 100;
3718 +- port->client_ops = &default_client_ops;
3719 ++ port->client_ops = &tty_port_default_client_ops;
3720 + kref_init(&port->kref);
3721 + }
3722 + EXPORT_SYMBOL(tty_port_init);
3723 +diff --git a/drivers/tty/vt/selection.c b/drivers/tty/vt/selection.c
3724 +index 78732feaf65b..44d974d4159f 100644
3725 +--- a/drivers/tty/vt/selection.c
3726 ++++ b/drivers/tty/vt/selection.c
3727 +@@ -29,6 +29,8 @@
3728 + #include <linux/console.h>
3729 + #include <linux/tty_flip.h>
3730 +
3731 ++#include <linux/sched/signal.h>
3732 ++
3733 + /* Don't take this from <ctype.h>: 011-015 on the screen aren't spaces */
3734 + #define isspace(c) ((c) == ' ')
3735 +
3736 +@@ -350,6 +352,7 @@ int paste_selection(struct tty_struct *tty)
3737 + unsigned int count;
3738 + struct tty_ldisc *ld;
3739 + DECLARE_WAITQUEUE(wait, current);
3740 ++ int ret = 0;
3741 +
3742 + console_lock();
3743 + poke_blanked_console();
3744 +@@ -363,6 +366,10 @@ int paste_selection(struct tty_struct *tty)
3745 + add_wait_queue(&vc->paste_wait, &wait);
3746 + while (sel_buffer && sel_buffer_lth > pasted) {
3747 + set_current_state(TASK_INTERRUPTIBLE);
3748 ++ if (signal_pending(current)) {
3749 ++ ret = -EINTR;
3750 ++ break;
3751 ++ }
3752 + if (tty_throttled(tty)) {
3753 + schedule();
3754 + continue;
3755 +@@ -378,6 +385,6 @@ int paste_selection(struct tty_struct *tty)
3756 +
3757 + tty_buffer_unlock_exclusive(&vc->port);
3758 + tty_ldisc_deref(ld);
3759 +- return 0;
3760 ++ return ret;
3761 + }
3762 + EXPORT_SYMBOL_GPL(paste_selection);
3763 +diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
3764 +index 34aa39d1aed9..3b4ccc2a30c1 100644
3765 +--- a/drivers/tty/vt/vt.c
3766 ++++ b/drivers/tty/vt/vt.c
3767 +@@ -936,10 +936,21 @@ static void flush_scrollback(struct vc_data *vc)
3768 + WARN_CONSOLE_UNLOCKED();
3769 +
3770 + set_origin(vc);
3771 +- if (vc->vc_sw->con_flush_scrollback)
3772 ++ if (vc->vc_sw->con_flush_scrollback) {
3773 + vc->vc_sw->con_flush_scrollback(vc);
3774 +- else
3775 ++ } else if (con_is_visible(vc)) {
3776 ++ /*
3777 ++ * When no con_flush_scrollback method is provided then the
3778 ++ * legacy way for flushing the scrollback buffer is to use
3779 ++ * a side effect of the con_switch method. We do it only on
3780 ++ * the foreground console as background consoles have no
3781 ++ * scrollback buffers in that case and we obviously don't
3782 ++ * want to switch to them.
3783 ++ */
3784 ++ hide_cursor(vc);
3785 + vc->vc_sw->con_switch(vc);
3786 ++ set_cursor(vc);
3787 ++ }
3788 + }
3789 +
3790 + /*
3791 +diff --git a/drivers/tty/vt/vt_ioctl.c b/drivers/tty/vt/vt_ioctl.c
3792 +index 8b0ed139592f..ee6c91ef1f6c 100644
3793 +--- a/drivers/tty/vt/vt_ioctl.c
3794 ++++ b/drivers/tty/vt/vt_ioctl.c
3795 +@@ -876,15 +876,20 @@ int vt_ioctl(struct tty_struct *tty,
3796 + return -EINVAL;
3797 +
3798 + for (i = 0; i < MAX_NR_CONSOLES; i++) {
3799 ++ struct vc_data *vcp;
3800 ++
3801 + if (!vc_cons[i].d)
3802 + continue;
3803 + console_lock();
3804 +- if (v.v_vlin)
3805 +- vc_cons[i].d->vc_scan_lines = v.v_vlin;
3806 +- if (v.v_clin)
3807 +- vc_cons[i].d->vc_font.height = v.v_clin;
3808 +- vc_cons[i].d->vc_resize_user = 1;
3809 +- vc_resize(vc_cons[i].d, v.v_cols, v.v_rows);
3810 ++ vcp = vc_cons[i].d;
3811 ++ if (vcp) {
3812 ++ if (v.v_vlin)
3813 ++ vcp->vc_scan_lines = v.v_vlin;
3814 ++ if (v.v_clin)
3815 ++ vcp->vc_font.height = v.v_clin;
3816 ++ vcp->vc_resize_user = 1;
3817 ++ vc_resize(vcp, v.v_cols, v.v_rows);
3818 ++ }
3819 + console_unlock();
3820 + }
3821 + break;
3822 +diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c
3823 +index 26bc05e48d8a..7df22bcefa9d 100644
3824 +--- a/drivers/usb/core/config.c
3825 ++++ b/drivers/usb/core/config.c
3826 +@@ -256,6 +256,7 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno,
3827 + struct usb_host_interface *ifp, int num_ep,
3828 + unsigned char *buffer, int size)
3829 + {
3830 ++ struct usb_device *udev = to_usb_device(ddev);
3831 + unsigned char *buffer0 = buffer;
3832 + struct usb_endpoint_descriptor *d;
3833 + struct usb_host_endpoint *endpoint;
3834 +@@ -297,6 +298,16 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno,
3835 + goto skip_to_next_endpoint_or_interface_descriptor;
3836 + }
3837 +
3838 ++ /* Ignore blacklisted endpoints */
3839 ++ if (udev->quirks & USB_QUIRK_ENDPOINT_BLACKLIST) {
3840 ++ if (usb_endpoint_is_blacklisted(udev, ifp, d)) {
3841 ++ dev_warn(ddev, "config %d interface %d altsetting %d has a blacklisted endpoint with address 0x%X, skipping\n",
3842 ++ cfgno, inum, asnum,
3843 ++ d->bEndpointAddress);
3844 ++ goto skip_to_next_endpoint_or_interface_descriptor;
3845 ++ }
3846 ++ }
3847 ++
3848 + endpoint = &ifp->endpoint[ifp->desc.bNumEndpoints];
3849 + ++ifp->desc.bNumEndpoints;
3850 +
3851 +diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
3852 +index 3405b146edc9..1d212f82c69b 100644
3853 +--- a/drivers/usb/core/hub.c
3854 ++++ b/drivers/usb/core/hub.c
3855 +@@ -38,7 +38,9 @@
3856 + #include "otg_whitelist.h"
3857 +
3858 + #define USB_VENDOR_GENESYS_LOGIC 0x05e3
3859 ++#define USB_VENDOR_SMSC 0x0424
3860 + #define HUB_QUIRK_CHECK_PORT_AUTOSUSPEND 0x01
3861 ++#define HUB_QUIRK_DISABLE_AUTOSUSPEND 0x02
3862 +
3863 + #define USB_TP_TRANSMISSION_DELAY 40 /* ns */
3864 + #define USB_TP_TRANSMISSION_DELAY_MAX 65535 /* ns */
3865 +@@ -1217,11 +1219,6 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type)
3866 + #ifdef CONFIG_PM
3867 + udev->reset_resume = 1;
3868 + #endif
3869 +- /* Don't set the change_bits when the device
3870 +- * was powered off.
3871 +- */
3872 +- if (test_bit(port1, hub->power_bits))
3873 +- set_bit(port1, hub->change_bits);
3874 +
3875 + } else {
3876 + /* The power session is gone; tell hub_wq */
3877 +@@ -1731,6 +1728,10 @@ static void hub_disconnect(struct usb_interface *intf)
3878 + kfree(hub->buffer);
3879 +
3880 + pm_suspend_ignore_children(&intf->dev, false);
3881 ++
3882 ++ if (hub->quirk_disable_autosuspend)
3883 ++ usb_autopm_put_interface(intf);
3884 ++
3885 + kref_put(&hub->kref, hub_release);
3886 + }
3887 +
3888 +@@ -1863,6 +1864,11 @@ static int hub_probe(struct usb_interface *intf, const struct usb_device_id *id)
3889 + if (id->driver_info & HUB_QUIRK_CHECK_PORT_AUTOSUSPEND)
3890 + hub->quirk_check_port_auto_suspend = 1;
3891 +
3892 ++ if (id->driver_info & HUB_QUIRK_DISABLE_AUTOSUSPEND) {
3893 ++ hub->quirk_disable_autosuspend = 1;
3894 ++ usb_autopm_get_interface(intf);
3895 ++ }
3896 ++
3897 + if (hub_configure(hub, &desc->endpoint[0].desc) >= 0)
3898 + return 0;
3899 +
3900 +@@ -5599,6 +5605,10 @@ out_hdev_lock:
3901 + }
3902 +
3903 + static const struct usb_device_id hub_id_table[] = {
3904 ++ { .match_flags = USB_DEVICE_ID_MATCH_VENDOR | USB_DEVICE_ID_MATCH_INT_CLASS,
3905 ++ .idVendor = USB_VENDOR_SMSC,
3906 ++ .bInterfaceClass = USB_CLASS_HUB,
3907 ++ .driver_info = HUB_QUIRK_DISABLE_AUTOSUSPEND},
3908 + { .match_flags = USB_DEVICE_ID_MATCH_VENDOR
3909 + | USB_DEVICE_ID_MATCH_INT_CLASS,
3910 + .idVendor = USB_VENDOR_GENESYS_LOGIC,
3911 +diff --git a/drivers/usb/core/hub.h b/drivers/usb/core/hub.h
3912 +index a9e24e4b8df1..a97dd1ba964e 100644
3913 +--- a/drivers/usb/core/hub.h
3914 ++++ b/drivers/usb/core/hub.h
3915 +@@ -61,6 +61,7 @@ struct usb_hub {
3916 + unsigned quiescing:1;
3917 + unsigned disconnected:1;
3918 + unsigned in_reset:1;
3919 ++ unsigned quirk_disable_autosuspend:1;
3920 +
3921 + unsigned quirk_check_port_auto_suspend:1;
3922 +
3923 +diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
3924 +index 6b6413073584..2b24336a72e5 100644
3925 +--- a/drivers/usb/core/quirks.c
3926 ++++ b/drivers/usb/core/quirks.c
3927 +@@ -354,6 +354,10 @@ static const struct usb_device_id usb_quirk_list[] = {
3928 + { USB_DEVICE(0x0904, 0x6103), .driver_info =
3929 + USB_QUIRK_LINEAR_FRAME_INTR_BINTERVAL },
3930 +
3931 ++ /* Sound Devices USBPre2 */
3932 ++ { USB_DEVICE(0x0926, 0x0202), .driver_info =
3933 ++ USB_QUIRK_ENDPOINT_BLACKLIST },
3934 ++
3935 + /* Keytouch QWERTY Panel keyboard */
3936 + { USB_DEVICE(0x0926, 0x3333), .driver_info =
3937 + USB_QUIRK_CONFIG_INTF_STRINGS },
3938 +@@ -445,6 +449,9 @@ static const struct usb_device_id usb_quirk_list[] = {
3939 + /* INTEL VALUE SSD */
3940 + { USB_DEVICE(0x8086, 0xf1a5), .driver_info = USB_QUIRK_RESET_RESUME },
3941 +
3942 ++ /* novation SoundControl XL */
3943 ++ { USB_DEVICE(0x1235, 0x0061), .driver_info = USB_QUIRK_RESET_RESUME },
3944 ++
3945 + { } /* terminating entry must be last */
3946 + };
3947 +
3948 +@@ -472,6 +479,39 @@ static const struct usb_device_id usb_amd_resume_quirk_list[] = {
3949 + { } /* terminating entry must be last */
3950 + };
3951 +
3952 ++/*
3953 ++ * Entries for blacklisted endpoints that should be ignored when parsing
3954 ++ * configuration descriptors.
3955 ++ *
3956 ++ * Matched for devices with USB_QUIRK_ENDPOINT_BLACKLIST.
3957 ++ */
3958 ++static const struct usb_device_id usb_endpoint_blacklist[] = {
3959 ++ { USB_DEVICE_INTERFACE_NUMBER(0x0926, 0x0202, 1), .driver_info = 0x85 },
3960 ++ { }
3961 ++};
3962 ++
3963 ++bool usb_endpoint_is_blacklisted(struct usb_device *udev,
3964 ++ struct usb_host_interface *intf,
3965 ++ struct usb_endpoint_descriptor *epd)
3966 ++{
3967 ++ const struct usb_device_id *id;
3968 ++ unsigned int address;
3969 ++
3970 ++ for (id = usb_endpoint_blacklist; id->match_flags; ++id) {
3971 ++ if (!usb_match_device(udev, id))
3972 ++ continue;
3973 ++
3974 ++ if (!usb_match_one_id_intf(udev, intf, id))
3975 ++ continue;
3976 ++
3977 ++ address = id->driver_info;
3978 ++ if (address == epd->bEndpointAddress)
3979 ++ return true;
3980 ++ }
3981 ++
3982 ++ return false;
3983 ++}
3984 ++
3985 + static bool usb_match_any_interface(struct usb_device *udev,
3986 + const struct usb_device_id *id)
3987 + {
3988 +diff --git a/drivers/usb/core/usb.h b/drivers/usb/core/usb.h
3989 +index cf4783cf661a..3ad0ee57e859 100644
3990 +--- a/drivers/usb/core/usb.h
3991 ++++ b/drivers/usb/core/usb.h
3992 +@@ -37,6 +37,9 @@ extern void usb_authorize_interface(struct usb_interface *);
3993 + extern void usb_detect_quirks(struct usb_device *udev);
3994 + extern void usb_detect_interface_quirks(struct usb_device *udev);
3995 + extern void usb_release_quirk_list(void);
3996 ++extern bool usb_endpoint_is_blacklisted(struct usb_device *udev,
3997 ++ struct usb_host_interface *intf,
3998 ++ struct usb_endpoint_descriptor *epd);
3999 + extern int usb_remove_device(struct usb_device *udev);
4000 +
4001 + extern int usb_get_device_descriptor(struct usb_device *dev,
4002 +diff --git a/drivers/usb/dwc2/gadget.c b/drivers/usb/dwc2/gadget.c
4003 +index a9133773b89e..7fd0900a9cb0 100644
4004 +--- a/drivers/usb/dwc2/gadget.c
4005 ++++ b/drivers/usb/dwc2/gadget.c
4006 +@@ -1083,11 +1083,6 @@ static void dwc2_hsotg_start_req(struct dwc2_hsotg *hsotg,
4007 + else
4008 + packets = 1; /* send one packet if length is zero. */
4009 +
4010 +- if (hs_ep->isochronous && length > (hs_ep->mc * hs_ep->ep.maxpacket)) {
4011 +- dev_err(hsotg->dev, "req length > maxpacket*mc\n");
4012 +- return;
4013 +- }
4014 +-
4015 + if (dir_in && index != 0)
4016 + if (hs_ep->isochronous)
4017 + epsize = DXEPTSIZ_MC(packets);
4018 +@@ -1391,6 +1386,13 @@ static int dwc2_hsotg_ep_queue(struct usb_ep *ep, struct usb_request *req,
4019 + req->actual = 0;
4020 + req->status = -EINPROGRESS;
4021 +
4022 ++ /* Don't queue ISOC request if length greater than mps*mc */
4023 ++ if (hs_ep->isochronous &&
4024 ++ req->length > (hs_ep->mc * hs_ep->ep.maxpacket)) {
4025 ++ dev_err(hs->dev, "req length > maxpacket*mc\n");
4026 ++ return -EINVAL;
4027 ++ }
4028 ++
4029 + /* In DDMA mode for ISOC's don't queue request if length greater
4030 + * than descriptor limits.
4031 + */
4032 +@@ -1632,6 +1634,7 @@ static int dwc2_hsotg_process_req_status(struct dwc2_hsotg *hsotg,
4033 + struct dwc2_hsotg_ep *ep0 = hsotg->eps_out[0];
4034 + struct dwc2_hsotg_ep *ep;
4035 + __le16 reply;
4036 ++ u16 status;
4037 + int ret;
4038 +
4039 + dev_dbg(hsotg->dev, "%s: USB_REQ_GET_STATUS\n", __func__);
4040 +@@ -1643,11 +1646,10 @@ static int dwc2_hsotg_process_req_status(struct dwc2_hsotg *hsotg,
4041 +
4042 + switch (ctrl->bRequestType & USB_RECIP_MASK) {
4043 + case USB_RECIP_DEVICE:
4044 +- /*
4045 +- * bit 0 => self powered
4046 +- * bit 1 => remote wakeup
4047 +- */
4048 +- reply = cpu_to_le16(0);
4049 ++ status = 1 << USB_DEVICE_SELF_POWERED;
4050 ++ status |= hsotg->remote_wakeup_allowed <<
4051 ++ USB_DEVICE_REMOTE_WAKEUP;
4052 ++ reply = cpu_to_le16(status);
4053 + break;
4054 +
4055 + case USB_RECIP_INTERFACE:
4056 +@@ -1758,7 +1760,10 @@ static int dwc2_hsotg_process_req_feature(struct dwc2_hsotg *hsotg,
4057 + case USB_RECIP_DEVICE:
4058 + switch (wValue) {
4059 + case USB_DEVICE_REMOTE_WAKEUP:
4060 +- hsotg->remote_wakeup_allowed = 1;
4061 ++ if (set)
4062 ++ hsotg->remote_wakeup_allowed = 1;
4063 ++ else
4064 ++ hsotg->remote_wakeup_allowed = 0;
4065 + break;
4066 +
4067 + case USB_DEVICE_TEST_MODE:
4068 +@@ -1768,16 +1773,17 @@ static int dwc2_hsotg_process_req_feature(struct dwc2_hsotg *hsotg,
4069 + return -EINVAL;
4070 +
4071 + hsotg->test_mode = wIndex >> 8;
4072 +- ret = dwc2_hsotg_send_reply(hsotg, ep0, NULL, 0);
4073 +- if (ret) {
4074 +- dev_err(hsotg->dev,
4075 +- "%s: failed to send reply\n", __func__);
4076 +- return ret;
4077 +- }
4078 + break;
4079 + default:
4080 + return -ENOENT;
4081 + }
4082 ++
4083 ++ ret = dwc2_hsotg_send_reply(hsotg, ep0, NULL, 0);
4084 ++ if (ret) {
4085 ++ dev_err(hsotg->dev,
4086 ++ "%s: failed to send reply\n", __func__);
4087 ++ return ret;
4088 ++ }
4089 + break;
4090 +
4091 + case USB_RECIP_ENDPOINT:
4092 +diff --git a/drivers/usb/dwc3/debug.h b/drivers/usb/dwc3/debug.h
4093 +index e56beb9d1e36..4a13ceaf4093 100644
4094 +--- a/drivers/usb/dwc3/debug.h
4095 ++++ b/drivers/usb/dwc3/debug.h
4096 +@@ -256,86 +256,77 @@ static inline const char *dwc3_ep_event_string(char *str, size_t size,
4097 + u8 epnum = event->endpoint_number;
4098 + size_t len;
4099 + int status;
4100 +- int ret;
4101 +
4102 +- ret = snprintf(str, size, "ep%d%s: ", epnum >> 1,
4103 ++ len = scnprintf(str, size, "ep%d%s: ", epnum >> 1,
4104 + (epnum & 1) ? "in" : "out");
4105 +- if (ret < 0)
4106 +- return "UNKNOWN";
4107 +
4108 + status = event->status;
4109 +
4110 + switch (event->endpoint_event) {
4111 + case DWC3_DEPEVT_XFERCOMPLETE:
4112 +- len = strlen(str);
4113 +- snprintf(str + len, size - len, "Transfer Complete (%c%c%c)",
4114 ++ len += scnprintf(str + len, size - len,
4115 ++ "Transfer Complete (%c%c%c)",
4116 + status & DEPEVT_STATUS_SHORT ? 'S' : 's',
4117 + status & DEPEVT_STATUS_IOC ? 'I' : 'i',
4118 + status & DEPEVT_STATUS_LST ? 'L' : 'l');
4119 +
4120 +- len = strlen(str);
4121 +-
4122 + if (epnum <= 1)
4123 +- snprintf(str + len, size - len, " [%s]",
4124 ++ scnprintf(str + len, size - len, " [%s]",
4125 + dwc3_ep0_state_string(ep0state));
4126 + break;
4127 + case DWC3_DEPEVT_XFERINPROGRESS:
4128 +- len = strlen(str);
4129 +-
4130 +- snprintf(str + len, size - len, "Transfer In Progress [%d] (%c%c%c)",
4131 ++ scnprintf(str + len, size - len,
4132 ++ "Transfer In Progress [%d] (%c%c%c)",
4133 + event->parameters,
4134 + status & DEPEVT_STATUS_SHORT ? 'S' : 's',
4135 + status & DEPEVT_STATUS_IOC ? 'I' : 'i',
4136 + status & DEPEVT_STATUS_LST ? 'M' : 'm');
4137 + break;
4138 + case DWC3_DEPEVT_XFERNOTREADY:
4139 +- len = strlen(str);
4140 +-
4141 +- snprintf(str + len, size - len, "Transfer Not Ready [%d]%s",
4142 ++ len += scnprintf(str + len, size - len,
4143 ++ "Transfer Not Ready [%d]%s",
4144 + event->parameters,
4145 + status & DEPEVT_STATUS_TRANSFER_ACTIVE ?
4146 + " (Active)" : " (Not Active)");
4147 +
4148 +- len = strlen(str);
4149 +-
4150 + /* Control Endpoints */
4151 + if (epnum <= 1) {
4152 + int phase = DEPEVT_STATUS_CONTROL_PHASE(event->status);
4153 +
4154 + switch (phase) {
4155 + case DEPEVT_STATUS_CONTROL_DATA:
4156 +- snprintf(str + ret, size - ret,
4157 ++ scnprintf(str + len, size - len,
4158 + " [Data Phase]");
4159 + break;
4160 + case DEPEVT_STATUS_CONTROL_STATUS:
4161 +- snprintf(str + ret, size - ret,
4162 ++ scnprintf(str + len, size - len,
4163 + " [Status Phase]");
4164 + }
4165 + }
4166 + break;
4167 + case DWC3_DEPEVT_RXTXFIFOEVT:
4168 +- snprintf(str + ret, size - ret, "FIFO");
4169 ++ scnprintf(str + len, size - len, "FIFO");
4170 + break;
4171 + case DWC3_DEPEVT_STREAMEVT:
4172 + status = event->status;
4173 +
4174 + switch (status) {
4175 + case DEPEVT_STREAMEVT_FOUND:
4176 +- snprintf(str + ret, size - ret, " Stream %d Found",
4177 ++ scnprintf(str + len, size - len, " Stream %d Found",
4178 + event->parameters);
4179 + break;
4180 + case DEPEVT_STREAMEVT_NOTFOUND:
4181 + default:
4182 +- snprintf(str + ret, size - ret, " Stream Not Found");
4183 ++ scnprintf(str + len, size - len, " Stream Not Found");
4184 + break;
4185 + }
4186 +
4187 + break;
4188 + case DWC3_DEPEVT_EPCMDCMPLT:
4189 +- snprintf(str + ret, size - ret, "Endpoint Command Complete");
4190 ++ scnprintf(str + len, size - len, "Endpoint Command Complete");
4191 + break;
4192 + default:
4193 +- snprintf(str, size, "UNKNOWN");
4194 ++ scnprintf(str + len, size - len, "UNKNOWN");
4195 + }
4196 +
4197 + return str;
4198 +diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
4199 +index 8b95be897078..e0cb1c2d5675 100644
4200 +--- a/drivers/usb/dwc3/gadget.c
4201 ++++ b/drivers/usb/dwc3/gadget.c
4202 +@@ -2426,7 +2426,8 @@ static int dwc3_gadget_ep_reclaim_completed_trb(struct dwc3_ep *dep,
4203 + if (event->status & DEPEVT_STATUS_SHORT && !chain)
4204 + return 1;
4205 +
4206 +- if (event->status & DEPEVT_STATUS_IOC)
4207 ++ if ((trb->ctrl & DWC3_TRB_CTRL_IOC) ||
4208 ++ (trb->ctrl & DWC3_TRB_CTRL_LST))
4209 + return 1;
4210 +
4211 + return 0;
4212 +diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c
4213 +index 3b4f67000315..cd303a3ea680 100644
4214 +--- a/drivers/usb/gadget/composite.c
4215 ++++ b/drivers/usb/gadget/composite.c
4216 +@@ -437,12 +437,10 @@ static u8 encode_bMaxPower(enum usb_device_speed speed,
4217 + val = CONFIG_USB_GADGET_VBUS_DRAW;
4218 + if (!val)
4219 + return 0;
4220 +- switch (speed) {
4221 +- case USB_SPEED_SUPER:
4222 +- return DIV_ROUND_UP(val, 8);
4223 +- default:
4224 ++ if (speed < USB_SPEED_SUPER)
4225 + return DIV_ROUND_UP(val, 2);
4226 +- }
4227 ++ else
4228 ++ return DIV_ROUND_UP(val, 8);
4229 + }
4230 +
4231 + static int config_buf(struct usb_configuration *config,
4232 +diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
4233 +index 7a3a29e5e9d2..af92b2576fe9 100644
4234 +--- a/drivers/usb/host/xhci-hub.c
4235 ++++ b/drivers/usb/host/xhci-hub.c
4236 +@@ -55,6 +55,7 @@ static u8 usb_bos_descriptor [] = {
4237 + static int xhci_create_usb3_bos_desc(struct xhci_hcd *xhci, char *buf,
4238 + u16 wLength)
4239 + {
4240 ++ struct xhci_port_cap *port_cap = NULL;
4241 + int i, ssa_count;
4242 + u32 temp;
4243 + u16 desc_size, ssp_cap_size, ssa_size = 0;
4244 +@@ -64,16 +65,24 @@ static int xhci_create_usb3_bos_desc(struct xhci_hcd *xhci, char *buf,
4245 + ssp_cap_size = sizeof(usb_bos_descriptor) - desc_size;
4246 +
4247 + /* does xhci support USB 3.1 Enhanced SuperSpeed */
4248 +- if (xhci->usb3_rhub.min_rev >= 0x01) {
4249 ++ for (i = 0; i < xhci->num_port_caps; i++) {
4250 ++ if (xhci->port_caps[i].maj_rev == 0x03 &&
4251 ++ xhci->port_caps[i].min_rev >= 0x01) {
4252 ++ usb3_1 = true;
4253 ++ port_cap = &xhci->port_caps[i];
4254 ++ break;
4255 ++ }
4256 ++ }
4257 ++
4258 ++ if (usb3_1) {
4259 + /* does xhci provide a PSI table for SSA speed attributes? */
4260 +- if (xhci->usb3_rhub.psi_count) {
4261 ++ if (port_cap->psi_count) {
4262 + /* two SSA entries for each unique PSI ID, RX and TX */
4263 +- ssa_count = xhci->usb3_rhub.psi_uid_count * 2;
4264 ++ ssa_count = port_cap->psi_uid_count * 2;
4265 + ssa_size = ssa_count * sizeof(u32);
4266 + ssp_cap_size -= 16; /* skip copying the default SSA */
4267 + }
4268 + desc_size += ssp_cap_size;
4269 +- usb3_1 = true;
4270 + }
4271 + memcpy(buf, &usb_bos_descriptor, min(desc_size, wLength));
4272 +
4273 +@@ -99,7 +108,7 @@ static int xhci_create_usb3_bos_desc(struct xhci_hcd *xhci, char *buf,
4274 + }
4275 +
4276 + /* If PSI table exists, add the custom speed attributes from it */
4277 +- if (usb3_1 && xhci->usb3_rhub.psi_count) {
4278 ++ if (usb3_1 && port_cap->psi_count) {
4279 + u32 ssp_cap_base, bm_attrib, psi, psi_mant, psi_exp;
4280 + int offset;
4281 +
4282 +@@ -111,7 +120,7 @@ static int xhci_create_usb3_bos_desc(struct xhci_hcd *xhci, char *buf,
4283 +
4284 + /* attribute count SSAC bits 4:0 and ID count SSIC bits 8:5 */
4285 + bm_attrib = (ssa_count - 1) & 0x1f;
4286 +- bm_attrib |= (xhci->usb3_rhub.psi_uid_count - 1) << 5;
4287 ++ bm_attrib |= (port_cap->psi_uid_count - 1) << 5;
4288 + put_unaligned_le32(bm_attrib, &buf[ssp_cap_base + 4]);
4289 +
4290 + if (wLength < desc_size + ssa_size)
4291 +@@ -124,8 +133,8 @@ static int xhci_create_usb3_bos_desc(struct xhci_hcd *xhci, char *buf,
4292 + * USB 3.1 requires two SSA entries (RX and TX) for every link
4293 + */
4294 + offset = desc_size;
4295 +- for (i = 0; i < xhci->usb3_rhub.psi_count; i++) {
4296 +- psi = xhci->usb3_rhub.psi[i];
4297 ++ for (i = 0; i < port_cap->psi_count; i++) {
4298 ++ psi = port_cap->psi[i];
4299 + psi &= ~USB_SSP_SUBLINK_SPEED_RSVD;
4300 + psi_exp = XHCI_EXT_PORT_PSIE(psi);
4301 + psi_mant = XHCI_EXT_PORT_PSIM(psi);
4302 +diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
4303 +index 3b1388fa2f36..884c601bfa15 100644
4304 +--- a/drivers/usb/host/xhci-mem.c
4305 ++++ b/drivers/usb/host/xhci-mem.c
4306 +@@ -1475,9 +1475,15 @@ int xhci_endpoint_init(struct xhci_hcd *xhci,
4307 + /* Allow 3 retries for everything but isoc, set CErr = 3 */
4308 + if (!usb_endpoint_xfer_isoc(&ep->desc))
4309 + err_count = 3;
4310 +- /* Some devices get this wrong */
4311 +- if (usb_endpoint_xfer_bulk(&ep->desc) && udev->speed == USB_SPEED_HIGH)
4312 +- max_packet = 512;
4313 ++ /* HS bulk max packet should be 512, FS bulk supports 8, 16, 32 or 64 */
4314 ++ if (usb_endpoint_xfer_bulk(&ep->desc)) {
4315 ++ if (udev->speed == USB_SPEED_HIGH)
4316 ++ max_packet = 512;
4317 ++ if (udev->speed == USB_SPEED_FULL) {
4318 ++ max_packet = rounddown_pow_of_two(max_packet);
4319 ++ max_packet = clamp_val(max_packet, 8, 64);
4320 ++ }
4321 ++ }
4322 + /* xHCI 1.0 and 1.1 indicates that ctrl ep avg TRB Length should be 8 */
4323 + if (usb_endpoint_xfer_control(&ep->desc) && xhci->hci_version >= 0x100)
4324 + avg_trb_len = 8;
4325 +@@ -1909,17 +1915,17 @@ no_bw:
4326 + xhci->usb3_rhub.num_ports = 0;
4327 + xhci->num_active_eps = 0;
4328 + kfree(xhci->usb2_rhub.ports);
4329 +- kfree(xhci->usb2_rhub.psi);
4330 + kfree(xhci->usb3_rhub.ports);
4331 +- kfree(xhci->usb3_rhub.psi);
4332 + kfree(xhci->hw_ports);
4333 + kfree(xhci->rh_bw);
4334 + kfree(xhci->ext_caps);
4335 ++ for (i = 0; i < xhci->num_port_caps; i++)
4336 ++ kfree(xhci->port_caps[i].psi);
4337 ++ kfree(xhci->port_caps);
4338 ++ xhci->num_port_caps = 0;
4339 +
4340 + xhci->usb2_rhub.ports = NULL;
4341 +- xhci->usb2_rhub.psi = NULL;
4342 + xhci->usb3_rhub.ports = NULL;
4343 +- xhci->usb3_rhub.psi = NULL;
4344 + xhci->hw_ports = NULL;
4345 + xhci->rh_bw = NULL;
4346 + xhci->ext_caps = NULL;
4347 +@@ -2120,6 +2126,7 @@ static void xhci_add_in_port(struct xhci_hcd *xhci, unsigned int num_ports,
4348 + u8 major_revision, minor_revision;
4349 + struct xhci_hub *rhub;
4350 + struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
4351 ++ struct xhci_port_cap *port_cap;
4352 +
4353 + temp = readl(addr);
4354 + major_revision = XHCI_EXT_PORT_MAJOR(temp);
4355 +@@ -2154,31 +2161,39 @@ static void xhci_add_in_port(struct xhci_hcd *xhci, unsigned int num_ports,
4356 + /* WTF? "Valid values are ‘1’ to MaxPorts" */
4357 + return;
4358 +
4359 +- rhub->psi_count = XHCI_EXT_PORT_PSIC(temp);
4360 +- if (rhub->psi_count) {
4361 +- rhub->psi = kcalloc_node(rhub->psi_count, sizeof(*rhub->psi),
4362 +- GFP_KERNEL, dev_to_node(dev));
4363 +- if (!rhub->psi)
4364 +- rhub->psi_count = 0;
4365 ++ port_cap = &xhci->port_caps[xhci->num_port_caps++];
4366 ++ if (xhci->num_port_caps > max_caps)
4367 ++ return;
4368 ++
4369 ++ port_cap->maj_rev = major_revision;
4370 ++ port_cap->min_rev = minor_revision;
4371 ++ port_cap->psi_count = XHCI_EXT_PORT_PSIC(temp);
4372 +
4373 +- rhub->psi_uid_count++;
4374 +- for (i = 0; i < rhub->psi_count; i++) {
4375 +- rhub->psi[i] = readl(addr + 4 + i);
4376 ++ if (port_cap->psi_count) {
4377 ++ port_cap->psi = kcalloc_node(port_cap->psi_count,
4378 ++ sizeof(*port_cap->psi),
4379 ++ GFP_KERNEL, dev_to_node(dev));
4380 ++ if (!port_cap->psi)
4381 ++ port_cap->psi_count = 0;
4382 ++
4383 ++ port_cap->psi_uid_count++;
4384 ++ for (i = 0; i < port_cap->psi_count; i++) {
4385 ++ port_cap->psi[i] = readl(addr + 4 + i);
4386 +
4387 + /* count unique ID values, two consecutive entries can
4388 + * have the same ID if link is assymetric
4389 + */
4390 +- if (i && (XHCI_EXT_PORT_PSIV(rhub->psi[i]) !=
4391 +- XHCI_EXT_PORT_PSIV(rhub->psi[i - 1])))
4392 +- rhub->psi_uid_count++;
4393 ++ if (i && (XHCI_EXT_PORT_PSIV(port_cap->psi[i]) !=
4394 ++ XHCI_EXT_PORT_PSIV(port_cap->psi[i - 1])))
4395 ++ port_cap->psi_uid_count++;
4396 +
4397 + xhci_dbg(xhci, "PSIV:%d PSIE:%d PLT:%d PFD:%d LP:%d PSIM:%d\n",
4398 +- XHCI_EXT_PORT_PSIV(rhub->psi[i]),
4399 +- XHCI_EXT_PORT_PSIE(rhub->psi[i]),
4400 +- XHCI_EXT_PORT_PLT(rhub->psi[i]),
4401 +- XHCI_EXT_PORT_PFD(rhub->psi[i]),
4402 +- XHCI_EXT_PORT_LP(rhub->psi[i]),
4403 +- XHCI_EXT_PORT_PSIM(rhub->psi[i]));
4404 ++ XHCI_EXT_PORT_PSIV(port_cap->psi[i]),
4405 ++ XHCI_EXT_PORT_PSIE(port_cap->psi[i]),
4406 ++ XHCI_EXT_PORT_PLT(port_cap->psi[i]),
4407 ++ XHCI_EXT_PORT_PFD(port_cap->psi[i]),
4408 ++ XHCI_EXT_PORT_LP(port_cap->psi[i]),
4409 ++ XHCI_EXT_PORT_PSIM(port_cap->psi[i]));
4410 + }
4411 + }
4412 + /* cache usb2 port capabilities */
4413 +@@ -2213,6 +2228,7 @@ static void xhci_add_in_port(struct xhci_hcd *xhci, unsigned int num_ports,
4414 + continue;
4415 + }
4416 + hw_port->rhub = rhub;
4417 ++ hw_port->port_cap = port_cap;
4418 + rhub->num_ports++;
4419 + }
4420 + /* FIXME: Should we disable ports not in the Extended Capabilities? */
4421 +@@ -2303,6 +2319,11 @@ static int xhci_setup_port_arrays(struct xhci_hcd *xhci, gfp_t flags)
4422 + if (!xhci->ext_caps)
4423 + return -ENOMEM;
4424 +
4425 ++ xhci->port_caps = kcalloc_node(cap_count, sizeof(*xhci->port_caps),
4426 ++ flags, dev_to_node(dev));
4427 ++ if (!xhci->port_caps)
4428 ++ return -ENOMEM;
4429 ++
4430 + offset = cap_start;
4431 +
4432 + while (offset) {
4433 +diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
4434 +index 4917c5b033fa..5e9b537df631 100644
4435 +--- a/drivers/usb/host/xhci-pci.c
4436 ++++ b/drivers/usb/host/xhci-pci.c
4437 +@@ -49,6 +49,7 @@
4438 + #define PCI_DEVICE_ID_INTEL_TITAN_RIDGE_4C_XHCI 0x15ec
4439 + #define PCI_DEVICE_ID_INTEL_TITAN_RIDGE_DD_XHCI 0x15f0
4440 + #define PCI_DEVICE_ID_INTEL_ICE_LAKE_XHCI 0x8a13
4441 ++#define PCI_DEVICE_ID_INTEL_CML_XHCI 0xa3af
4442 +
4443 + #define PCI_DEVICE_ID_AMD_PROMONTORYA_4 0x43b9
4444 + #define PCI_DEVICE_ID_AMD_PROMONTORYA_3 0x43ba
4445 +@@ -187,7 +188,8 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
4446 + pdev->device == PCI_DEVICE_ID_INTEL_BROXTON_M_XHCI ||
4447 + pdev->device == PCI_DEVICE_ID_INTEL_BROXTON_B_XHCI ||
4448 + pdev->device == PCI_DEVICE_ID_INTEL_APL_XHCI ||
4449 +- pdev->device == PCI_DEVICE_ID_INTEL_DNV_XHCI)) {
4450 ++ pdev->device == PCI_DEVICE_ID_INTEL_DNV_XHCI ||
4451 ++ pdev->device == PCI_DEVICE_ID_INTEL_CML_XHCI)) {
4452 + xhci->quirks |= XHCI_PME_STUCK_QUIRK;
4453 + }
4454 + if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
4455 +@@ -302,6 +304,9 @@ static int xhci_pci_setup(struct usb_hcd *hcd)
4456 + if (!usb_hcd_is_primary_hcd(hcd))
4457 + return 0;
4458 +
4459 ++ if (xhci->quirks & XHCI_PME_STUCK_QUIRK)
4460 ++ xhci_pme_acpi_rtd3_enable(pdev);
4461 ++
4462 + xhci_dbg(xhci, "Got SBRN %u\n", (unsigned int) xhci->sbrn);
4463 +
4464 + /* Find any debug ports */
4465 +@@ -359,9 +364,6 @@ static int xhci_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
4466 + HCC_MAX_PSA(xhci->hcc_params) >= 4)
4467 + xhci->shared_hcd->can_do_streams = 1;
4468 +
4469 +- if (xhci->quirks & XHCI_PME_STUCK_QUIRK)
4470 +- xhci_pme_acpi_rtd3_enable(dev);
4471 +-
4472 + /* USB-2 and USB-3 roothubs initialized, allow runtime pm suspend */
4473 + pm_runtime_put_noidle(&dev->dev);
4474 +
4475 +diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
4476 +index 13d8838cd552..3ecee10fdcdc 100644
4477 +--- a/drivers/usb/host/xhci.h
4478 ++++ b/drivers/usb/host/xhci.h
4479 +@@ -1702,12 +1702,20 @@ struct xhci_bus_state {
4480 + * Intel Lynx Point LP xHCI host.
4481 + */
4482 + #define XHCI_MAX_REXIT_TIMEOUT_MS 20
4483 ++struct xhci_port_cap {
4484 ++ u32 *psi; /* array of protocol speed ID entries */
4485 ++ u8 psi_count;
4486 ++ u8 psi_uid_count;
4487 ++ u8 maj_rev;
4488 ++ u8 min_rev;
4489 ++};
4490 +
4491 + struct xhci_port {
4492 + __le32 __iomem *addr;
4493 + int hw_portnum;
4494 + int hcd_portnum;
4495 + struct xhci_hub *rhub;
4496 ++ struct xhci_port_cap *port_cap;
4497 + };
4498 +
4499 + struct xhci_hub {
4500 +@@ -1719,9 +1727,6 @@ struct xhci_hub {
4501 + /* supported prococol extended capabiliy values */
4502 + u8 maj_rev;
4503 + u8 min_rev;
4504 +- u32 *psi; /* array of protocol speed ID entries */
4505 +- u8 psi_count;
4506 +- u8 psi_uid_count;
4507 + };
4508 +
4509 + /* There is one xhci_hcd structure per controller */
4510 +@@ -1880,6 +1885,9 @@ struct xhci_hcd {
4511 + /* cached usb2 extened protocol capabilites */
4512 + u32 *ext_caps;
4513 + unsigned int num_ext_caps;
4514 ++ /* cached extended protocol port capabilities */
4515 ++ struct xhci_port_cap *port_caps;
4516 ++ unsigned int num_port_caps;
4517 + /* Compliance Mode Recovery Data */
4518 + struct timer_list comp_mode_recovery_timer;
4519 + u32 port_status_u0;
4520 +diff --git a/drivers/usb/misc/iowarrior.c b/drivers/usb/misc/iowarrior.c
4521 +index dce44fbf031f..dce20301e367 100644
4522 +--- a/drivers/usb/misc/iowarrior.c
4523 ++++ b/drivers/usb/misc/iowarrior.c
4524 +@@ -33,6 +33,14 @@
4525 + #define USB_DEVICE_ID_CODEMERCS_IOWPV2 0x1512
4526 + /* full speed iowarrior */
4527 + #define USB_DEVICE_ID_CODEMERCS_IOW56 0x1503
4528 ++/* fuller speed iowarrior */
4529 ++#define USB_DEVICE_ID_CODEMERCS_IOW28 0x1504
4530 ++#define USB_DEVICE_ID_CODEMERCS_IOW28L 0x1505
4531 ++#define USB_DEVICE_ID_CODEMERCS_IOW100 0x1506
4532 ++
4533 ++/* OEMed devices */
4534 ++#define USB_DEVICE_ID_CODEMERCS_IOW24SAG 0x158a
4535 ++#define USB_DEVICE_ID_CODEMERCS_IOW56AM 0x158b
4536 +
4537 + /* Get a minor range for your devices from the usb maintainer */
4538 + #ifdef CONFIG_USB_DYNAMIC_MINORS
4539 +@@ -133,6 +141,11 @@ static const struct usb_device_id iowarrior_ids[] = {
4540 + {USB_DEVICE(USB_VENDOR_ID_CODEMERCS, USB_DEVICE_ID_CODEMERCS_IOWPV1)},
4541 + {USB_DEVICE(USB_VENDOR_ID_CODEMERCS, USB_DEVICE_ID_CODEMERCS_IOWPV2)},
4542 + {USB_DEVICE(USB_VENDOR_ID_CODEMERCS, USB_DEVICE_ID_CODEMERCS_IOW56)},
4543 ++ {USB_DEVICE(USB_VENDOR_ID_CODEMERCS, USB_DEVICE_ID_CODEMERCS_IOW24SAG)},
4544 ++ {USB_DEVICE(USB_VENDOR_ID_CODEMERCS, USB_DEVICE_ID_CODEMERCS_IOW56AM)},
4545 ++ {USB_DEVICE(USB_VENDOR_ID_CODEMERCS, USB_DEVICE_ID_CODEMERCS_IOW28)},
4546 ++ {USB_DEVICE(USB_VENDOR_ID_CODEMERCS, USB_DEVICE_ID_CODEMERCS_IOW28L)},
4547 ++ {USB_DEVICE(USB_VENDOR_ID_CODEMERCS, USB_DEVICE_ID_CODEMERCS_IOW100)},
4548 + {} /* Terminating entry */
4549 + };
4550 + MODULE_DEVICE_TABLE(usb, iowarrior_ids);
4551 +@@ -357,6 +370,7 @@ static ssize_t iowarrior_write(struct file *file,
4552 + }
4553 + switch (dev->product_id) {
4554 + case USB_DEVICE_ID_CODEMERCS_IOW24:
4555 ++ case USB_DEVICE_ID_CODEMERCS_IOW24SAG:
4556 + case USB_DEVICE_ID_CODEMERCS_IOWPV1:
4557 + case USB_DEVICE_ID_CODEMERCS_IOWPV2:
4558 + case USB_DEVICE_ID_CODEMERCS_IOW40:
4559 +@@ -371,6 +385,10 @@ static ssize_t iowarrior_write(struct file *file,
4560 + goto exit;
4561 + break;
4562 + case USB_DEVICE_ID_CODEMERCS_IOW56:
4563 ++ case USB_DEVICE_ID_CODEMERCS_IOW56AM:
4564 ++ case USB_DEVICE_ID_CODEMERCS_IOW28:
4565 ++ case USB_DEVICE_ID_CODEMERCS_IOW28L:
4566 ++ case USB_DEVICE_ID_CODEMERCS_IOW100:
4567 + /* The IOW56 uses asynchronous IO and more urbs */
4568 + if (atomic_read(&dev->write_busy) == MAX_WRITES_IN_FLIGHT) {
4569 + /* Wait until we are below the limit for submitted urbs */
4570 +@@ -493,6 +511,7 @@ static long iowarrior_ioctl(struct file *file, unsigned int cmd,
4571 + switch (cmd) {
4572 + case IOW_WRITE:
4573 + if (dev->product_id == USB_DEVICE_ID_CODEMERCS_IOW24 ||
4574 ++ dev->product_id == USB_DEVICE_ID_CODEMERCS_IOW24SAG ||
4575 + dev->product_id == USB_DEVICE_ID_CODEMERCS_IOWPV1 ||
4576 + dev->product_id == USB_DEVICE_ID_CODEMERCS_IOWPV2 ||
4577 + dev->product_id == USB_DEVICE_ID_CODEMERCS_IOW40) {
4578 +@@ -767,7 +786,11 @@ static int iowarrior_probe(struct usb_interface *interface,
4579 + goto error;
4580 + }
4581 +
4582 +- if (dev->product_id == USB_DEVICE_ID_CODEMERCS_IOW56) {
4583 ++ if ((dev->product_id == USB_DEVICE_ID_CODEMERCS_IOW56) ||
4584 ++ (dev->product_id == USB_DEVICE_ID_CODEMERCS_IOW56AM) ||
4585 ++ (dev->product_id == USB_DEVICE_ID_CODEMERCS_IOW28) ||
4586 ++ (dev->product_id == USB_DEVICE_ID_CODEMERCS_IOW28L) ||
4587 ++ (dev->product_id == USB_DEVICE_ID_CODEMERCS_IOW100)) {
4588 + res = usb_find_last_int_out_endpoint(iface_desc,
4589 + &dev->int_out_endpoint);
4590 + if (res) {
4591 +@@ -780,7 +803,11 @@ static int iowarrior_probe(struct usb_interface *interface,
4592 + /* we have to check the report_size often, so remember it in the endianness suitable for our machine */
4593 + dev->report_size = usb_endpoint_maxp(dev->int_in_endpoint);
4594 + if ((dev->interface->cur_altsetting->desc.bInterfaceNumber == 0) &&
4595 +- (dev->product_id == USB_DEVICE_ID_CODEMERCS_IOW56))
4596 ++ ((dev->product_id == USB_DEVICE_ID_CODEMERCS_IOW56) ||
4597 ++ (dev->product_id == USB_DEVICE_ID_CODEMERCS_IOW56AM) ||
4598 ++ (dev->product_id == USB_DEVICE_ID_CODEMERCS_IOW28) ||
4599 ++ (dev->product_id == USB_DEVICE_ID_CODEMERCS_IOW28L) ||
4600 ++ (dev->product_id == USB_DEVICE_ID_CODEMERCS_IOW100)))
4601 + /* IOWarrior56 has wMaxPacketSize different from report size */
4602 + dev->report_size = 7;
4603 +
4604 +diff --git a/drivers/usb/serial/ch341.c b/drivers/usb/serial/ch341.c
4605 +index d3f420f3a083..c5ecdcd51ffc 100644
4606 +--- a/drivers/usb/serial/ch341.c
4607 ++++ b/drivers/usb/serial/ch341.c
4608 +@@ -205,6 +205,16 @@ static int ch341_get_divisor(speed_t speed)
4609 + 16 * speed - 16 * CH341_CLKRATE / (clk_div * (div + 1)))
4610 + div++;
4611 +
4612 ++ /*
4613 ++ * Prefer lower base clock (fact = 0) if even divisor.
4614 ++ *
4615 ++ * Note that this makes the receiver more tolerant to errors.
4616 ++ */
4617 ++ if (fact == 1 && div % 2 == 0) {
4618 ++ div /= 2;
4619 ++ fact = 0;
4620 ++ }
4621 ++
4622 + return (0x100 - div) << 8 | fact << 2 | ps;
4623 + }
4624 +
4625 +diff --git a/drivers/usb/storage/uas.c b/drivers/usb/storage/uas.c
4626 +index 95bba3ba6ac6..3670fda02c34 100644
4627 +--- a/drivers/usb/storage/uas.c
4628 ++++ b/drivers/usb/storage/uas.c
4629 +@@ -45,6 +45,7 @@ struct uas_dev_info {
4630 + struct scsi_cmnd *cmnd[MAX_CMNDS];
4631 + spinlock_t lock;
4632 + struct work_struct work;
4633 ++ struct work_struct scan_work; /* for async scanning */
4634 + };
4635 +
4636 + enum {
4637 +@@ -114,6 +115,17 @@ out:
4638 + spin_unlock_irqrestore(&devinfo->lock, flags);
4639 + }
4640 +
4641 ++static void uas_scan_work(struct work_struct *work)
4642 ++{
4643 ++ struct uas_dev_info *devinfo =
4644 ++ container_of(work, struct uas_dev_info, scan_work);
4645 ++ struct Scsi_Host *shost = usb_get_intfdata(devinfo->intf);
4646 ++
4647 ++ dev_dbg(&devinfo->intf->dev, "starting scan\n");
4648 ++ scsi_scan_host(shost);
4649 ++ dev_dbg(&devinfo->intf->dev, "scan complete\n");
4650 ++}
4651 ++
4652 + static void uas_add_work(struct uas_cmd_info *cmdinfo)
4653 + {
4654 + struct scsi_pointer *scp = (void *)cmdinfo;
4655 +@@ -982,6 +994,7 @@ static int uas_probe(struct usb_interface *intf, const struct usb_device_id *id)
4656 + init_usb_anchor(&devinfo->data_urbs);
4657 + spin_lock_init(&devinfo->lock);
4658 + INIT_WORK(&devinfo->work, uas_do_work);
4659 ++ INIT_WORK(&devinfo->scan_work, uas_scan_work);
4660 +
4661 + result = uas_configure_endpoints(devinfo);
4662 + if (result)
4663 +@@ -998,7 +1011,9 @@ static int uas_probe(struct usb_interface *intf, const struct usb_device_id *id)
4664 + if (result)
4665 + goto free_streams;
4666 +
4667 +- scsi_scan_host(shost);
4668 ++ /* Submit the delayed_work for SCSI-device scanning */
4669 ++ schedule_work(&devinfo->scan_work);
4670 ++
4671 + return result;
4672 +
4673 + free_streams:
4674 +@@ -1166,6 +1181,12 @@ static void uas_disconnect(struct usb_interface *intf)
4675 + usb_kill_anchored_urbs(&devinfo->data_urbs);
4676 + uas_zap_pending(devinfo, DID_NO_CONNECT);
4677 +
4678 ++ /*
4679 ++ * Prevent SCSI scanning (if it hasn't started yet)
4680 ++ * or wait for the SCSI-scanning routine to stop.
4681 ++ */
4682 ++ cancel_work_sync(&devinfo->scan_work);
4683 ++
4684 + scsi_remove_host(shost);
4685 + uas_free_streams(devinfo);
4686 + scsi_host_put(shost);
4687 +diff --git a/drivers/xen/preempt.c b/drivers/xen/preempt.c
4688 +index 8b9919c26095..456a164364a2 100644
4689 +--- a/drivers/xen/preempt.c
4690 ++++ b/drivers/xen/preempt.c
4691 +@@ -33,7 +33,9 @@ asmlinkage __visible void xen_maybe_preempt_hcall(void)
4692 + * cpu.
4693 + */
4694 + __this_cpu_write(xen_in_preemptible_hcall, false);
4695 +- _cond_resched();
4696 ++ local_irq_enable();
4697 ++ cond_resched();
4698 ++ local_irq_disable();
4699 + __this_cpu_write(xen_in_preemptible_hcall, true);
4700 + }
4701 + }
4702 +diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
4703 +index c1e47db439e2..3bb4bc2c9fd1 100644
4704 +--- a/fs/btrfs/disk-io.c
4705 ++++ b/fs/btrfs/disk-io.c
4706 +@@ -3200,6 +3200,7 @@ int __cold open_ctree(struct super_block *sb,
4707 + if (IS_ERR(fs_info->fs_root)) {
4708 + err = PTR_ERR(fs_info->fs_root);
4709 + btrfs_warn(fs_info, "failed to read fs tree: %d", err);
4710 ++ fs_info->fs_root = NULL;
4711 + goto fail_qgroup;
4712 + }
4713 +
4714 +@@ -4272,6 +4273,7 @@ static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
4715 + cond_resched();
4716 + spin_lock(&delayed_refs->lock);
4717 + }
4718 ++ btrfs_qgroup_destroy_extent_records(trans);
4719 +
4720 + spin_unlock(&delayed_refs->lock);
4721 +
4722 +@@ -4497,7 +4499,6 @@ void btrfs_cleanup_one_transaction(struct btrfs_transaction *cur_trans,
4723 + wake_up(&fs_info->transaction_wait);
4724 +
4725 + btrfs_destroy_delayed_inodes(fs_info);
4726 +- btrfs_assert_delayed_root_empty(fs_info);
4727 +
4728 + btrfs_destroy_marked_extents(fs_info, &cur_trans->dirty_pages,
4729 + EXTENT_DIRTY);
4730 +diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
4731 +index 274318e9114e..f50341ce5d44 100644
4732 +--- a/fs/btrfs/extent-tree.c
4733 ++++ b/fs/btrfs/extent-tree.c
4734 +@@ -4430,6 +4430,8 @@ int btrfs_alloc_logged_file_extent(struct btrfs_trans_handle *trans,
4735 +
4736 + ret = alloc_reserved_file_extent(trans, 0, root_objectid, 0, owner,
4737 + offset, ins, 1);
4738 ++ if (ret)
4739 ++ btrfs_pin_extent(fs_info, ins->objectid, ins->offset, 1);
4740 + btrfs_put_block_group(block_group);
4741 + return ret;
4742 + }
4743 +diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
4744 +index 537b4c563f09..e6901744a5be 100644
4745 +--- a/fs/btrfs/inode.c
4746 ++++ b/fs/btrfs/inode.c
4747 +@@ -4757,6 +4757,8 @@ int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans,
4748 + u64 bytes_deleted = 0;
4749 + bool be_nice = false;
4750 + bool should_throttle = false;
4751 ++ const u64 lock_start = ALIGN_DOWN(new_size, fs_info->sectorsize);
4752 ++ struct extent_state *cached_state = NULL;
4753 +
4754 + BUG_ON(new_size > 0 && min_type != BTRFS_EXTENT_DATA_KEY);
4755 +
4756 +@@ -4773,6 +4775,10 @@ int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans,
4757 + return -ENOMEM;
4758 + path->reada = READA_BACK;
4759 +
4760 ++ if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID)
4761 ++ lock_extent_bits(&BTRFS_I(inode)->io_tree, lock_start, (u64)-1,
4762 ++ &cached_state);
4763 ++
4764 + /*
4765 + * We want to drop from the next block forward in case this new size is
4766 + * not block aligned since we will be keeping the last block of the
4767 +@@ -4809,7 +4815,6 @@ search_again:
4768 + goto out;
4769 + }
4770 +
4771 +- path->leave_spinning = 1;
4772 + ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
4773 + if (ret < 0)
4774 + goto out;
4775 +@@ -4961,7 +4966,6 @@ delete:
4776 + root == fs_info->tree_root)) {
4777 + struct btrfs_ref ref = { 0 };
4778 +
4779 +- btrfs_set_path_blocking(path);
4780 + bytes_deleted += extent_num_bytes;
4781 +
4782 + btrfs_init_generic_ref(&ref, BTRFS_DROP_DELAYED_REF,
4783 +@@ -5037,6 +5041,8 @@ out:
4784 + if (!ret && last_size > new_size)
4785 + last_size = new_size;
4786 + btrfs_ordered_update_i_size(inode, last_size, NULL);
4787 ++ unlock_extent_cached(&BTRFS_I(inode)->io_tree, lock_start,
4788 ++ (u64)-1, &cached_state);
4789 + }
4790 +
4791 + btrfs_free_path(path);
4792 +@@ -10481,6 +10487,7 @@ static int __btrfs_prealloc_file_range(struct inode *inode, int mode,
4793 + struct btrfs_root *root = BTRFS_I(inode)->root;
4794 + struct btrfs_key ins;
4795 + u64 cur_offset = start;
4796 ++ u64 clear_offset = start;
4797 + u64 i_size;
4798 + u64 cur_bytes;
4799 + u64 last_alloc = (u64)-1;
4800 +@@ -10515,6 +10522,15 @@ static int __btrfs_prealloc_file_range(struct inode *inode, int mode,
4801 + btrfs_end_transaction(trans);
4802 + break;
4803 + }
4804 ++
4805 ++ /*
4806 ++ * We've reserved this space, and thus converted it from
4807 ++ * ->bytes_may_use to ->bytes_reserved. Any error that happens
4808 ++ * from here on out we will only need to clear our reservation
4809 ++ * for the remaining unreserved area, so advance our
4810 ++ * clear_offset by our extent size.
4811 ++ */
4812 ++ clear_offset += ins.offset;
4813 + btrfs_dec_block_group_reservations(fs_info, ins.objectid);
4814 +
4815 + last_alloc = ins.offset;
4816 +@@ -10594,9 +10610,9 @@ next:
4817 + if (own_trans)
4818 + btrfs_end_transaction(trans);
4819 + }
4820 +- if (cur_offset < end)
4821 +- btrfs_free_reserved_data_space(inode, NULL, cur_offset,
4822 +- end - cur_offset + 1);
4823 ++ if (clear_offset < end)
4824 ++ btrfs_free_reserved_data_space(inode, NULL, clear_offset,
4825 ++ end - clear_offset + 1);
4826 + return ret;
4827 + }
4828 +
4829 +diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c
4830 +index fb09bc2f8e4d..0596117202a2 100644
4831 +--- a/fs/btrfs/ordered-data.c
4832 ++++ b/fs/btrfs/ordered-data.c
4833 +@@ -686,10 +686,15 @@ int btrfs_wait_ordered_range(struct inode *inode, u64 start, u64 len)
4834 + }
4835 + btrfs_start_ordered_extent(inode, ordered, 1);
4836 + end = ordered->file_offset;
4837 ++ /*
4838 ++ * If the ordered extent had an error save the error but don't
4839 ++ * exit without waiting first for all other ordered extents in
4840 ++ * the range to complete.
4841 ++ */
4842 + if (test_bit(BTRFS_ORDERED_IOERR, &ordered->flags))
4843 + ret = -EIO;
4844 + btrfs_put_ordered_extent(ordered);
4845 +- if (ret || end == 0 || end == start)
4846 ++ if (end == 0 || end == start)
4847 + break;
4848 + end--;
4849 + }
4850 +diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c
4851 +index 39fc8c3d3a75..410b791f28a5 100644
4852 +--- a/fs/btrfs/qgroup.c
4853 ++++ b/fs/btrfs/qgroup.c
4854 +@@ -4016,3 +4016,16 @@ out:
4855 + }
4856 + return ret;
4857 + }
4858 ++
4859 ++void btrfs_qgroup_destroy_extent_records(struct btrfs_transaction *trans)
4860 ++{
4861 ++ struct btrfs_qgroup_extent_record *entry;
4862 ++ struct btrfs_qgroup_extent_record *next;
4863 ++ struct rb_root *root;
4864 ++
4865 ++ root = &trans->delayed_refs.dirty_extent_root;
4866 ++ rbtree_postorder_for_each_entry_safe(entry, next, root, node) {
4867 ++ ulist_free(entry->old_roots);
4868 ++ kfree(entry);
4869 ++ }
4870 ++}
4871 +diff --git a/fs/btrfs/qgroup.h b/fs/btrfs/qgroup.h
4872 +index 236f12224d52..1bc654459469 100644
4873 +--- a/fs/btrfs/qgroup.h
4874 ++++ b/fs/btrfs/qgroup.h
4875 +@@ -414,5 +414,6 @@ int btrfs_qgroup_add_swapped_blocks(struct btrfs_trans_handle *trans,
4876 + u64 last_snapshot);
4877 + int btrfs_qgroup_trace_subtree_after_cow(struct btrfs_trans_handle *trans,
4878 + struct btrfs_root *root, struct extent_buffer *eb);
4879 ++void btrfs_qgroup_destroy_extent_records(struct btrfs_transaction *trans);
4880 +
4881 + #endif
4882 +diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
4883 +index 33dcc88b428a..beb6c69cd1e5 100644
4884 +--- a/fs/btrfs/transaction.c
4885 ++++ b/fs/btrfs/transaction.c
4886 +@@ -121,6 +121,8 @@ void btrfs_put_transaction(struct btrfs_transaction *transaction)
4887 + BUG_ON(!list_empty(&transaction->list));
4888 + WARN_ON(!RB_EMPTY_ROOT(
4889 + &transaction->delayed_refs.href_root.rb_root));
4890 ++ WARN_ON(!RB_EMPTY_ROOT(
4891 ++ &transaction->delayed_refs.dirty_extent_root));
4892 + if (transaction->delayed_refs.pending_csums)
4893 + btrfs_err(transaction->fs_info,
4894 + "pending csums is %llu",
4895 +diff --git a/fs/ecryptfs/crypto.c b/fs/ecryptfs/crypto.c
4896 +index f91db24bbf3b..a064b408d841 100644
4897 +--- a/fs/ecryptfs/crypto.c
4898 ++++ b/fs/ecryptfs/crypto.c
4899 +@@ -311,8 +311,10 @@ static int crypt_scatterlist(struct ecryptfs_crypt_stat *crypt_stat,
4900 + struct extent_crypt_result ecr;
4901 + int rc = 0;
4902 +
4903 +- BUG_ON(!crypt_stat || !crypt_stat->tfm
4904 +- || !(crypt_stat->flags & ECRYPTFS_STRUCT_INITIALIZED));
4905 ++ if (!crypt_stat || !crypt_stat->tfm
4906 ++ || !(crypt_stat->flags & ECRYPTFS_STRUCT_INITIALIZED))
4907 ++ return -EINVAL;
4908 ++
4909 + if (unlikely(ecryptfs_verbosity > 0)) {
4910 + ecryptfs_printk(KERN_DEBUG, "Key size [%zd]; key:\n",
4911 + crypt_stat->key_size);
4912 +diff --git a/fs/ecryptfs/keystore.c b/fs/ecryptfs/keystore.c
4913 +index 216fbe6a4837..4dc09638de8f 100644
4914 +--- a/fs/ecryptfs/keystore.c
4915 ++++ b/fs/ecryptfs/keystore.c
4916 +@@ -1304,7 +1304,7 @@ parse_tag_1_packet(struct ecryptfs_crypt_stat *crypt_stat,
4917 + printk(KERN_WARNING "Tag 1 packet contains key larger "
4918 + "than ECRYPTFS_MAX_ENCRYPTED_KEY_BYTES\n");
4919 + rc = -EINVAL;
4920 +- goto out;
4921 ++ goto out_free;
4922 + }
4923 + memcpy((*new_auth_tok)->session_key.encrypted_key,
4924 + &data[(*packet_size)], (body_size - (ECRYPTFS_SIG_SIZE + 2)));
4925 +diff --git a/fs/ecryptfs/messaging.c b/fs/ecryptfs/messaging.c
4926 +index d668e60b85b5..c05ca39aa449 100644
4927 +--- a/fs/ecryptfs/messaging.c
4928 ++++ b/fs/ecryptfs/messaging.c
4929 +@@ -379,6 +379,7 @@ int __init ecryptfs_init_messaging(void)
4930 + * ecryptfs_message_buf_len),
4931 + GFP_KERNEL);
4932 + if (!ecryptfs_msg_ctx_arr) {
4933 ++ kfree(ecryptfs_daemon_hash);
4934 + rc = -ENOMEM;
4935 + goto out;
4936 + }
4937 +diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
4938 +index 0b202e00d93f..5aba67a504cf 100644
4939 +--- a/fs/ext4/balloc.c
4940 ++++ b/fs/ext4/balloc.c
4941 +@@ -270,6 +270,7 @@ struct ext4_group_desc * ext4_get_group_desc(struct super_block *sb,
4942 + ext4_group_t ngroups = ext4_get_groups_count(sb);
4943 + struct ext4_group_desc *desc;
4944 + struct ext4_sb_info *sbi = EXT4_SB(sb);
4945 ++ struct buffer_head *bh_p;
4946 +
4947 + if (block_group >= ngroups) {
4948 + ext4_error(sb, "block_group >= groups_count - block_group = %u,"
4949 +@@ -280,7 +281,14 @@ struct ext4_group_desc * ext4_get_group_desc(struct super_block *sb,
4950 +
4951 + group_desc = block_group >> EXT4_DESC_PER_BLOCK_BITS(sb);
4952 + offset = block_group & (EXT4_DESC_PER_BLOCK(sb) - 1);
4953 +- if (!sbi->s_group_desc[group_desc]) {
4954 ++ bh_p = sbi_array_rcu_deref(sbi, s_group_desc, group_desc);
4955 ++ /*
4956 ++ * sbi_array_rcu_deref returns with rcu unlocked, this is ok since
4957 ++ * the pointer being dereferenced won't be dereferenced again. By
4958 ++ * looking at the usage in add_new_gdb() the value isn't modified,
4959 ++ * just the pointer, and so it remains valid.
4960 ++ */
4961 ++ if (!bh_p) {
4962 + ext4_error(sb, "Group descriptor not loaded - "
4963 + "block_group = %u, group_desc = %u, desc = %u",
4964 + block_group, group_desc, offset);
4965 +@@ -288,10 +296,10 @@ struct ext4_group_desc * ext4_get_group_desc(struct super_block *sb,
4966 + }
4967 +
4968 + desc = (struct ext4_group_desc *)(
4969 +- (__u8 *)sbi->s_group_desc[group_desc]->b_data +
4970 ++ (__u8 *)bh_p->b_data +
4971 + offset * EXT4_DESC_SIZE(sb));
4972 + if (bh)
4973 +- *bh = sbi->s_group_desc[group_desc];
4974 ++ *bh = bh_p;
4975 + return desc;
4976 + }
4977 +
4978 +diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
4979 +index 1fd6c1e2ce2a..7a14e553d58f 100644
4980 +--- a/fs/ext4/ext4.h
4981 ++++ b/fs/ext4/ext4.h
4982 +@@ -1401,7 +1401,7 @@ struct ext4_sb_info {
4983 + loff_t s_bitmap_maxbytes; /* max bytes for bitmap files */
4984 + struct buffer_head * s_sbh; /* Buffer containing the super block */
4985 + struct ext4_super_block *s_es; /* Pointer to the super block in the buffer */
4986 +- struct buffer_head **s_group_desc;
4987 ++ struct buffer_head * __rcu *s_group_desc;
4988 + unsigned int s_mount_opt;
4989 + unsigned int s_mount_opt2;
4990 + unsigned int s_mount_flags;
4991 +@@ -1463,7 +1463,7 @@ struct ext4_sb_info {
4992 + #endif
4993 +
4994 + /* for buddy allocator */
4995 +- struct ext4_group_info ***s_group_info;
4996 ++ struct ext4_group_info ** __rcu *s_group_info;
4997 + struct inode *s_buddy_cache;
4998 + spinlock_t s_md_lock;
4999 + unsigned short *s_mb_offsets;
5000 +@@ -1513,7 +1513,7 @@ struct ext4_sb_info {
5001 + unsigned int s_extent_max_zeroout_kb;
5002 +
5003 + unsigned int s_log_groups_per_flex;
5004 +- struct flex_groups *s_flex_groups;
5005 ++ struct flex_groups * __rcu *s_flex_groups;
5006 + ext4_group_t s_flex_groups_allocated;
5007 +
5008 + /* workqueue for reserved extent conversions (buffered io) */
5009 +@@ -1553,8 +1553,11 @@ struct ext4_sb_info {
5010 + struct ratelimit_state s_warning_ratelimit_state;
5011 + struct ratelimit_state s_msg_ratelimit_state;
5012 +
5013 +- /* Barrier between changing inodes' journal flags and writepages ops. */
5014 +- struct percpu_rw_semaphore s_journal_flag_rwsem;
5015 ++ /*
5016 ++ * Barrier between writepages ops and changing any inode's JOURNAL_DATA
5017 ++ * or EXTENTS flag.
5018 ++ */
5019 ++ struct percpu_rw_semaphore s_writepages_rwsem;
5020 + struct dax_device *s_daxdev;
5021 + };
5022 +
5023 +@@ -1574,6 +1577,23 @@ static inline int ext4_valid_inum(struct super_block *sb, unsigned long ino)
5024 + ino <= le32_to_cpu(EXT4_SB(sb)->s_es->s_inodes_count));
5025 + }
5026 +
5027 ++/*
5028 ++ * Returns: sbi->field[index]
5029 ++ * Used to access an array element from the following sbi fields which require
5030 ++ * rcu protection to avoid dereferencing an invalid pointer due to reassignment
5031 ++ * - s_group_desc
5032 ++ * - s_group_info
5033 ++ * - s_flex_group
5034 ++ */
5035 ++#define sbi_array_rcu_deref(sbi, field, index) \
5036 ++({ \
5037 ++ typeof(*((sbi)->field)) _v; \
5038 ++ rcu_read_lock(); \
5039 ++ _v = ((typeof(_v)*)rcu_dereference((sbi)->field))[index]; \
5040 ++ rcu_read_unlock(); \
5041 ++ _v; \
5042 ++})
5043 ++
5044 + /*
5045 + * Inode dynamic state flags
5046 + */
5047 +@@ -2669,6 +2689,7 @@ extern int ext4_generic_delete_entry(handle_t *handle,
5048 + extern bool ext4_empty_dir(struct inode *inode);
5049 +
5050 + /* resize.c */
5051 ++extern void ext4_kvfree_array_rcu(void *to_free);
5052 + extern int ext4_group_add(struct super_block *sb,
5053 + struct ext4_new_group_data *input);
5054 + extern int ext4_group_extend(struct super_block *sb,
5055 +@@ -2916,13 +2937,13 @@ static inline
5056 + struct ext4_group_info *ext4_get_group_info(struct super_block *sb,
5057 + ext4_group_t group)
5058 + {
5059 +- struct ext4_group_info ***grp_info;
5060 ++ struct ext4_group_info **grp_info;
5061 + long indexv, indexh;
5062 + BUG_ON(group >= EXT4_SB(sb)->s_groups_count);
5063 +- grp_info = EXT4_SB(sb)->s_group_info;
5064 + indexv = group >> (EXT4_DESC_PER_BLOCK_BITS(sb));
5065 + indexh = group & ((EXT4_DESC_PER_BLOCK(sb)) - 1);
5066 +- return grp_info[indexv][indexh];
5067 ++ grp_info = sbi_array_rcu_deref(EXT4_SB(sb), s_group_info, indexv);
5068 ++ return grp_info[indexh];
5069 + }
5070 +
5071 + /*
5072 +@@ -2972,7 +2993,7 @@ static inline void ext4_update_i_disksize(struct inode *inode, loff_t newsize)
5073 + !inode_is_locked(inode));
5074 + down_write(&EXT4_I(inode)->i_data_sem);
5075 + if (newsize > EXT4_I(inode)->i_disksize)
5076 +- EXT4_I(inode)->i_disksize = newsize;
5077 ++ WRITE_ONCE(EXT4_I(inode)->i_disksize, newsize);
5078 + up_write(&EXT4_I(inode)->i_data_sem);
5079 + }
5080 +
5081 +diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c
5082 +index 8ca4a23129aa..7db0c8814f2e 100644
5083 +--- a/fs/ext4/ialloc.c
5084 ++++ b/fs/ext4/ialloc.c
5085 +@@ -325,11 +325,13 @@ void ext4_free_inode(handle_t *handle, struct inode *inode)
5086 +
5087 + percpu_counter_inc(&sbi->s_freeinodes_counter);
5088 + if (sbi->s_log_groups_per_flex) {
5089 +- ext4_group_t f = ext4_flex_group(sbi, block_group);
5090 ++ struct flex_groups *fg;
5091 +
5092 +- atomic_inc(&sbi->s_flex_groups[f].free_inodes);
5093 ++ fg = sbi_array_rcu_deref(sbi, s_flex_groups,
5094 ++ ext4_flex_group(sbi, block_group));
5095 ++ atomic_inc(&fg->free_inodes);
5096 + if (is_directory)
5097 +- atomic_dec(&sbi->s_flex_groups[f].used_dirs);
5098 ++ atomic_dec(&fg->used_dirs);
5099 + }
5100 + BUFFER_TRACE(bh2, "call ext4_handle_dirty_metadata");
5101 + fatal = ext4_handle_dirty_metadata(handle, NULL, bh2);
5102 +@@ -365,12 +367,13 @@ static void get_orlov_stats(struct super_block *sb, ext4_group_t g,
5103 + int flex_size, struct orlov_stats *stats)
5104 + {
5105 + struct ext4_group_desc *desc;
5106 +- struct flex_groups *flex_group = EXT4_SB(sb)->s_flex_groups;
5107 +
5108 + if (flex_size > 1) {
5109 +- stats->free_inodes = atomic_read(&flex_group[g].free_inodes);
5110 +- stats->free_clusters = atomic64_read(&flex_group[g].free_clusters);
5111 +- stats->used_dirs = atomic_read(&flex_group[g].used_dirs);
5112 ++ struct flex_groups *fg = sbi_array_rcu_deref(EXT4_SB(sb),
5113 ++ s_flex_groups, g);
5114 ++ stats->free_inodes = atomic_read(&fg->free_inodes);
5115 ++ stats->free_clusters = atomic64_read(&fg->free_clusters);
5116 ++ stats->used_dirs = atomic_read(&fg->used_dirs);
5117 + return;
5118 + }
5119 +
5120 +@@ -1051,7 +1054,8 @@ got:
5121 + if (sbi->s_log_groups_per_flex) {
5122 + ext4_group_t f = ext4_flex_group(sbi, group);
5123 +
5124 +- atomic_inc(&sbi->s_flex_groups[f].used_dirs);
5125 ++ atomic_inc(&sbi_array_rcu_deref(sbi, s_flex_groups,
5126 ++ f)->used_dirs);
5127 + }
5128 + }
5129 + if (ext4_has_group_desc_csum(sb)) {
5130 +@@ -1074,7 +1078,8 @@ got:
5131 +
5132 + if (sbi->s_log_groups_per_flex) {
5133 + flex_group = ext4_flex_group(sbi, group);
5134 +- atomic_dec(&sbi->s_flex_groups[flex_group].free_inodes);
5135 ++ atomic_dec(&sbi_array_rcu_deref(sbi, s_flex_groups,
5136 ++ flex_group)->free_inodes);
5137 + }
5138 +
5139 + inode->i_ino = ino + group * EXT4_INODES_PER_GROUP(sb);
5140 +diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
5141 +index 25191201ccdc..74a941e920cf 100644
5142 +--- a/fs/ext4/inode.c
5143 ++++ b/fs/ext4/inode.c
5144 +@@ -2466,7 +2466,7 @@ update_disksize:
5145 + * truncate are avoided by checking i_size under i_data_sem.
5146 + */
5147 + disksize = ((loff_t)mpd->first_page) << PAGE_SHIFT;
5148 +- if (disksize > EXT4_I(inode)->i_disksize) {
5149 ++ if (disksize > READ_ONCE(EXT4_I(inode)->i_disksize)) {
5150 + int err2;
5151 + loff_t i_size;
5152 +
5153 +@@ -2627,7 +2627,7 @@ static int ext4_writepages(struct address_space *mapping,
5154 + if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
5155 + return -EIO;
5156 +
5157 +- percpu_down_read(&sbi->s_journal_flag_rwsem);
5158 ++ percpu_down_read(&sbi->s_writepages_rwsem);
5159 + trace_ext4_writepages(inode, wbc);
5160 +
5161 + /*
5162 +@@ -2848,7 +2848,7 @@ unplug:
5163 + out_writepages:
5164 + trace_ext4_writepages_result(inode, wbc, ret,
5165 + nr_to_write - wbc->nr_to_write);
5166 +- percpu_up_read(&sbi->s_journal_flag_rwsem);
5167 ++ percpu_up_read(&sbi->s_writepages_rwsem);
5168 + return ret;
5169 + }
5170 +
5171 +@@ -2863,13 +2863,13 @@ static int ext4_dax_writepages(struct address_space *mapping,
5172 + if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
5173 + return -EIO;
5174 +
5175 +- percpu_down_read(&sbi->s_journal_flag_rwsem);
5176 ++ percpu_down_read(&sbi->s_writepages_rwsem);
5177 + trace_ext4_writepages(inode, wbc);
5178 +
5179 + ret = dax_writeback_mapping_range(mapping, inode->i_sb->s_bdev, wbc);
5180 + trace_ext4_writepages_result(inode, wbc, ret,
5181 + nr_to_write - wbc->nr_to_write);
5182 +- percpu_up_read(&sbi->s_journal_flag_rwsem);
5183 ++ percpu_up_read(&sbi->s_writepages_rwsem);
5184 + return ret;
5185 + }
5186 +
5187 +@@ -5830,7 +5830,7 @@ int ext4_change_inode_journal_flag(struct inode *inode, int val)
5188 + }
5189 + }
5190 +
5191 +- percpu_down_write(&sbi->s_journal_flag_rwsem);
5192 ++ percpu_down_write(&sbi->s_writepages_rwsem);
5193 + jbd2_journal_lock_updates(journal);
5194 +
5195 + /*
5196 +@@ -5847,7 +5847,7 @@ int ext4_change_inode_journal_flag(struct inode *inode, int val)
5197 + err = jbd2_journal_flush(journal);
5198 + if (err < 0) {
5199 + jbd2_journal_unlock_updates(journal);
5200 +- percpu_up_write(&sbi->s_journal_flag_rwsem);
5201 ++ percpu_up_write(&sbi->s_writepages_rwsem);
5202 + return err;
5203 + }
5204 + ext4_clear_inode_flag(inode, EXT4_INODE_JOURNAL_DATA);
5205 +@@ -5855,7 +5855,7 @@ int ext4_change_inode_journal_flag(struct inode *inode, int val)
5206 + ext4_set_aops(inode);
5207 +
5208 + jbd2_journal_unlock_updates(journal);
5209 +- percpu_up_write(&sbi->s_journal_flag_rwsem);
5210 ++ percpu_up_write(&sbi->s_writepages_rwsem);
5211 +
5212 + if (val)
5213 + up_write(&EXT4_I(inode)->i_mmap_sem);
5214 +diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
5215 +index a3e2767bdf2f..c76ffc259d19 100644
5216 +--- a/fs/ext4/mballoc.c
5217 ++++ b/fs/ext4/mballoc.c
5218 +@@ -2356,7 +2356,7 @@ int ext4_mb_alloc_groupinfo(struct super_block *sb, ext4_group_t ngroups)
5219 + {
5220 + struct ext4_sb_info *sbi = EXT4_SB(sb);
5221 + unsigned size;
5222 +- struct ext4_group_info ***new_groupinfo;
5223 ++ struct ext4_group_info ***old_groupinfo, ***new_groupinfo;
5224 +
5225 + size = (ngroups + EXT4_DESC_PER_BLOCK(sb) - 1) >>
5226 + EXT4_DESC_PER_BLOCK_BITS(sb);
5227 +@@ -2369,13 +2369,16 @@ int ext4_mb_alloc_groupinfo(struct super_block *sb, ext4_group_t ngroups)
5228 + ext4_msg(sb, KERN_ERR, "can't allocate buddy meta group");
5229 + return -ENOMEM;
5230 + }
5231 +- if (sbi->s_group_info) {
5232 +- memcpy(new_groupinfo, sbi->s_group_info,
5233 ++ rcu_read_lock();
5234 ++ old_groupinfo = rcu_dereference(sbi->s_group_info);
5235 ++ if (old_groupinfo)
5236 ++ memcpy(new_groupinfo, old_groupinfo,
5237 + sbi->s_group_info_size * sizeof(*sbi->s_group_info));
5238 +- kvfree(sbi->s_group_info);
5239 +- }
5240 +- sbi->s_group_info = new_groupinfo;
5241 ++ rcu_read_unlock();
5242 ++ rcu_assign_pointer(sbi->s_group_info, new_groupinfo);
5243 + sbi->s_group_info_size = size / sizeof(*sbi->s_group_info);
5244 ++ if (old_groupinfo)
5245 ++ ext4_kvfree_array_rcu(old_groupinfo);
5246 + ext4_debug("allocated s_groupinfo array for %d meta_bg's\n",
5247 + sbi->s_group_info_size);
5248 + return 0;
5249 +@@ -2387,6 +2390,7 @@ int ext4_mb_add_groupinfo(struct super_block *sb, ext4_group_t group,
5250 + {
5251 + int i;
5252 + int metalen = 0;
5253 ++ int idx = group >> EXT4_DESC_PER_BLOCK_BITS(sb);
5254 + struct ext4_sb_info *sbi = EXT4_SB(sb);
5255 + struct ext4_group_info **meta_group_info;
5256 + struct kmem_cache *cachep = get_groupinfo_cache(sb->s_blocksize_bits);
5257 +@@ -2405,12 +2409,12 @@ int ext4_mb_add_groupinfo(struct super_block *sb, ext4_group_t group,
5258 + "for a buddy group");
5259 + goto exit_meta_group_info;
5260 + }
5261 +- sbi->s_group_info[group >> EXT4_DESC_PER_BLOCK_BITS(sb)] =
5262 +- meta_group_info;
5263 ++ rcu_read_lock();
5264 ++ rcu_dereference(sbi->s_group_info)[idx] = meta_group_info;
5265 ++ rcu_read_unlock();
5266 + }
5267 +
5268 +- meta_group_info =
5269 +- sbi->s_group_info[group >> EXT4_DESC_PER_BLOCK_BITS(sb)];
5270 ++ meta_group_info = sbi_array_rcu_deref(sbi, s_group_info, idx);
5271 + i = group & (EXT4_DESC_PER_BLOCK(sb) - 1);
5272 +
5273 + meta_group_info[i] = kmem_cache_zalloc(cachep, GFP_NOFS);
5274 +@@ -2458,8 +2462,13 @@ int ext4_mb_add_groupinfo(struct super_block *sb, ext4_group_t group,
5275 + exit_group_info:
5276 + /* If a meta_group_info table has been allocated, release it now */
5277 + if (group % EXT4_DESC_PER_BLOCK(sb) == 0) {
5278 +- kfree(sbi->s_group_info[group >> EXT4_DESC_PER_BLOCK_BITS(sb)]);
5279 +- sbi->s_group_info[group >> EXT4_DESC_PER_BLOCK_BITS(sb)] = NULL;
5280 ++ struct ext4_group_info ***group_info;
5281 ++
5282 ++ rcu_read_lock();
5283 ++ group_info = rcu_dereference(sbi->s_group_info);
5284 ++ kfree(group_info[idx]);
5285 ++ group_info[idx] = NULL;
5286 ++ rcu_read_unlock();
5287 + }
5288 + exit_meta_group_info:
5289 + return -ENOMEM;
5290 +@@ -2472,6 +2481,7 @@ static int ext4_mb_init_backend(struct super_block *sb)
5291 + struct ext4_sb_info *sbi = EXT4_SB(sb);
5292 + int err;
5293 + struct ext4_group_desc *desc;
5294 ++ struct ext4_group_info ***group_info;
5295 + struct kmem_cache *cachep;
5296 +
5297 + err = ext4_mb_alloc_groupinfo(sb, ngroups);
5298 +@@ -2507,11 +2517,16 @@ err_freebuddy:
5299 + while (i-- > 0)
5300 + kmem_cache_free(cachep, ext4_get_group_info(sb, i));
5301 + i = sbi->s_group_info_size;
5302 ++ rcu_read_lock();
5303 ++ group_info = rcu_dereference(sbi->s_group_info);
5304 + while (i-- > 0)
5305 +- kfree(sbi->s_group_info[i]);
5306 ++ kfree(group_info[i]);
5307 ++ rcu_read_unlock();
5308 + iput(sbi->s_buddy_cache);
5309 + err_freesgi:
5310 +- kvfree(sbi->s_group_info);
5311 ++ rcu_read_lock();
5312 ++ kvfree(rcu_dereference(sbi->s_group_info));
5313 ++ rcu_read_unlock();
5314 + return -ENOMEM;
5315 + }
5316 +
5317 +@@ -2700,7 +2715,7 @@ int ext4_mb_release(struct super_block *sb)
5318 + ext4_group_t ngroups = ext4_get_groups_count(sb);
5319 + ext4_group_t i;
5320 + int num_meta_group_infos;
5321 +- struct ext4_group_info *grinfo;
5322 ++ struct ext4_group_info *grinfo, ***group_info;
5323 + struct ext4_sb_info *sbi = EXT4_SB(sb);
5324 + struct kmem_cache *cachep = get_groupinfo_cache(sb->s_blocksize_bits);
5325 +
5326 +@@ -2719,9 +2734,12 @@ int ext4_mb_release(struct super_block *sb)
5327 + num_meta_group_infos = (ngroups +
5328 + EXT4_DESC_PER_BLOCK(sb) - 1) >>
5329 + EXT4_DESC_PER_BLOCK_BITS(sb);
5330 ++ rcu_read_lock();
5331 ++ group_info = rcu_dereference(sbi->s_group_info);
5332 + for (i = 0; i < num_meta_group_infos; i++)
5333 +- kfree(sbi->s_group_info[i]);
5334 +- kvfree(sbi->s_group_info);
5335 ++ kfree(group_info[i]);
5336 ++ kvfree(group_info);
5337 ++ rcu_read_unlock();
5338 + }
5339 + kfree(sbi->s_mb_offsets);
5340 + kfree(sbi->s_mb_maxs);
5341 +@@ -3020,7 +3038,8 @@ ext4_mb_mark_diskspace_used(struct ext4_allocation_context *ac,
5342 + ext4_group_t flex_group = ext4_flex_group(sbi,
5343 + ac->ac_b_ex.fe_group);
5344 + atomic64_sub(ac->ac_b_ex.fe_len,
5345 +- &sbi->s_flex_groups[flex_group].free_clusters);
5346 ++ &sbi_array_rcu_deref(sbi, s_flex_groups,
5347 ++ flex_group)->free_clusters);
5348 + }
5349 +
5350 + err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh);
5351 +@@ -4914,7 +4933,8 @@ do_more:
5352 + if (sbi->s_log_groups_per_flex) {
5353 + ext4_group_t flex_group = ext4_flex_group(sbi, block_group);
5354 + atomic64_add(count_clusters,
5355 +- &sbi->s_flex_groups[flex_group].free_clusters);
5356 ++ &sbi_array_rcu_deref(sbi, s_flex_groups,
5357 ++ flex_group)->free_clusters);
5358 + }
5359 +
5360 + /*
5361 +@@ -5071,7 +5091,8 @@ int ext4_group_add_blocks(handle_t *handle, struct super_block *sb,
5362 + if (sbi->s_log_groups_per_flex) {
5363 + ext4_group_t flex_group = ext4_flex_group(sbi, block_group);
5364 + atomic64_add(clusters_freed,
5365 +- &sbi->s_flex_groups[flex_group].free_clusters);
5366 ++ &sbi_array_rcu_deref(sbi, s_flex_groups,
5367 ++ flex_group)->free_clusters);
5368 + }
5369 +
5370 + ext4_mb_unload_buddy(&e4b);
5371 +diff --git a/fs/ext4/migrate.c b/fs/ext4/migrate.c
5372 +index 89725fa42573..fb6520f37135 100644
5373 +--- a/fs/ext4/migrate.c
5374 ++++ b/fs/ext4/migrate.c
5375 +@@ -407,6 +407,7 @@ static int free_ext_block(handle_t *handle, struct inode *inode)
5376 +
5377 + int ext4_ext_migrate(struct inode *inode)
5378 + {
5379 ++ struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
5380 + handle_t *handle;
5381 + int retval = 0, i;
5382 + __le32 *i_data;
5383 +@@ -431,6 +432,8 @@ int ext4_ext_migrate(struct inode *inode)
5384 + */
5385 + return retval;
5386 +
5387 ++ percpu_down_write(&sbi->s_writepages_rwsem);
5388 ++
5389 + /*
5390 + * Worst case we can touch the allocation bitmaps, a bgd
5391 + * block, and a block to link in the orphan list. We do need
5392 +@@ -441,7 +444,7 @@ int ext4_ext_migrate(struct inode *inode)
5393 +
5394 + if (IS_ERR(handle)) {
5395 + retval = PTR_ERR(handle);
5396 +- return retval;
5397 ++ goto out_unlock;
5398 + }
5399 + goal = (((inode->i_ino - 1) / EXT4_INODES_PER_GROUP(inode->i_sb)) *
5400 + EXT4_INODES_PER_GROUP(inode->i_sb)) + 1;
5401 +@@ -452,7 +455,7 @@ int ext4_ext_migrate(struct inode *inode)
5402 + if (IS_ERR(tmp_inode)) {
5403 + retval = PTR_ERR(tmp_inode);
5404 + ext4_journal_stop(handle);
5405 +- return retval;
5406 ++ goto out_unlock;
5407 + }
5408 + i_size_write(tmp_inode, i_size_read(inode));
5409 + /*
5410 +@@ -494,7 +497,7 @@ int ext4_ext_migrate(struct inode *inode)
5411 + */
5412 + ext4_orphan_del(NULL, tmp_inode);
5413 + retval = PTR_ERR(handle);
5414 +- goto out;
5415 ++ goto out_tmp_inode;
5416 + }
5417 +
5418 + ei = EXT4_I(inode);
5419 +@@ -576,10 +579,11 @@ err_out:
5420 + ext4_ext_tree_init(handle, tmp_inode);
5421 + out_stop:
5422 + ext4_journal_stop(handle);
5423 +-out:
5424 ++out_tmp_inode:
5425 + unlock_new_inode(tmp_inode);
5426 + iput(tmp_inode);
5427 +-
5428 ++out_unlock:
5429 ++ percpu_up_write(&sbi->s_writepages_rwsem);
5430 + return retval;
5431 + }
5432 +
5433 +@@ -589,7 +593,8 @@ out:
5434 + int ext4_ind_migrate(struct inode *inode)
5435 + {
5436 + struct ext4_extent_header *eh;
5437 +- struct ext4_super_block *es = EXT4_SB(inode->i_sb)->s_es;
5438 ++ struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
5439 ++ struct ext4_super_block *es = sbi->s_es;
5440 + struct ext4_inode_info *ei = EXT4_I(inode);
5441 + struct ext4_extent *ex;
5442 + unsigned int i, len;
5443 +@@ -613,9 +618,13 @@ int ext4_ind_migrate(struct inode *inode)
5444 + if (test_opt(inode->i_sb, DELALLOC))
5445 + ext4_alloc_da_blocks(inode);
5446 +
5447 ++ percpu_down_write(&sbi->s_writepages_rwsem);
5448 ++
5449 + handle = ext4_journal_start(inode, EXT4_HT_MIGRATE, 1);
5450 +- if (IS_ERR(handle))
5451 +- return PTR_ERR(handle);
5452 ++ if (IS_ERR(handle)) {
5453 ++ ret = PTR_ERR(handle);
5454 ++ goto out_unlock;
5455 ++ }
5456 +
5457 + down_write(&EXT4_I(inode)->i_data_sem);
5458 + ret = ext4_ext_check_inode(inode);
5459 +@@ -650,5 +659,7 @@ int ext4_ind_migrate(struct inode *inode)
5460 + errout:
5461 + ext4_journal_stop(handle);
5462 + up_write(&EXT4_I(inode)->i_data_sem);
5463 ++out_unlock:
5464 ++ percpu_up_write(&sbi->s_writepages_rwsem);
5465 + return ret;
5466 + }
5467 +diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
5468 +index deb9f7a02976..ee615a93af6e 100644
5469 +--- a/fs/ext4/namei.c
5470 ++++ b/fs/ext4/namei.c
5471 +@@ -1507,6 +1507,7 @@ restart:
5472 + /*
5473 + * We deal with the read-ahead logic here.
5474 + */
5475 ++ cond_resched();
5476 + if (ra_ptr >= ra_max) {
5477 + /* Refill the readahead buffer */
5478 + ra_ptr = 0;
5479 +diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c
5480 +index a8c0f2b5b6e1..f178af1dffe0 100644
5481 +--- a/fs/ext4/resize.c
5482 ++++ b/fs/ext4/resize.c
5483 +@@ -17,6 +17,33 @@
5484 +
5485 + #include "ext4_jbd2.h"
5486 +
5487 ++struct ext4_rcu_ptr {
5488 ++ struct rcu_head rcu;
5489 ++ void *ptr;
5490 ++};
5491 ++
5492 ++static void ext4_rcu_ptr_callback(struct rcu_head *head)
5493 ++{
5494 ++ struct ext4_rcu_ptr *ptr;
5495 ++
5496 ++ ptr = container_of(head, struct ext4_rcu_ptr, rcu);
5497 ++ kvfree(ptr->ptr);
5498 ++ kfree(ptr);
5499 ++}
5500 ++
5501 ++void ext4_kvfree_array_rcu(void *to_free)
5502 ++{
5503 ++ struct ext4_rcu_ptr *ptr = kzalloc(sizeof(*ptr), GFP_KERNEL);
5504 ++
5505 ++ if (ptr) {
5506 ++ ptr->ptr = to_free;
5507 ++ call_rcu(&ptr->rcu, ext4_rcu_ptr_callback);
5508 ++ return;
5509 ++ }
5510 ++ synchronize_rcu();
5511 ++ kvfree(to_free);
5512 ++}
5513 ++
5514 + int ext4_resize_begin(struct super_block *sb)
5515 + {
5516 + struct ext4_sb_info *sbi = EXT4_SB(sb);
5517 +@@ -542,8 +569,8 @@ static int setup_new_flex_group_blocks(struct super_block *sb,
5518 + brelse(gdb);
5519 + goto out;
5520 + }
5521 +- memcpy(gdb->b_data, sbi->s_group_desc[j]->b_data,
5522 +- gdb->b_size);
5523 ++ memcpy(gdb->b_data, sbi_array_rcu_deref(sbi,
5524 ++ s_group_desc, j)->b_data, gdb->b_size);
5525 + set_buffer_uptodate(gdb);
5526 +
5527 + err = ext4_handle_dirty_metadata(handle, NULL, gdb);
5528 +@@ -861,13 +888,15 @@ static int add_new_gdb(handle_t *handle, struct inode *inode,
5529 + }
5530 + brelse(dind);
5531 +
5532 +- o_group_desc = EXT4_SB(sb)->s_group_desc;
5533 ++ rcu_read_lock();
5534 ++ o_group_desc = rcu_dereference(EXT4_SB(sb)->s_group_desc);
5535 + memcpy(n_group_desc, o_group_desc,
5536 + EXT4_SB(sb)->s_gdb_count * sizeof(struct buffer_head *));
5537 ++ rcu_read_unlock();
5538 + n_group_desc[gdb_num] = gdb_bh;
5539 +- EXT4_SB(sb)->s_group_desc = n_group_desc;
5540 ++ rcu_assign_pointer(EXT4_SB(sb)->s_group_desc, n_group_desc);
5541 + EXT4_SB(sb)->s_gdb_count++;
5542 +- kvfree(o_group_desc);
5543 ++ ext4_kvfree_array_rcu(o_group_desc);
5544 +
5545 + le16_add_cpu(&es->s_reserved_gdt_blocks, -1);
5546 + err = ext4_handle_dirty_super(handle, sb);
5547 +@@ -911,9 +940,11 @@ static int add_new_gdb_meta_bg(struct super_block *sb,
5548 + return err;
5549 + }
5550 +
5551 +- o_group_desc = EXT4_SB(sb)->s_group_desc;
5552 ++ rcu_read_lock();
5553 ++ o_group_desc = rcu_dereference(EXT4_SB(sb)->s_group_desc);
5554 + memcpy(n_group_desc, o_group_desc,
5555 + EXT4_SB(sb)->s_gdb_count * sizeof(struct buffer_head *));
5556 ++ rcu_read_unlock();
5557 + n_group_desc[gdb_num] = gdb_bh;
5558 +
5559 + BUFFER_TRACE(gdb_bh, "get_write_access");
5560 +@@ -924,9 +955,9 @@ static int add_new_gdb_meta_bg(struct super_block *sb,
5561 + return err;
5562 + }
5563 +
5564 +- EXT4_SB(sb)->s_group_desc = n_group_desc;
5565 ++ rcu_assign_pointer(EXT4_SB(sb)->s_group_desc, n_group_desc);
5566 + EXT4_SB(sb)->s_gdb_count++;
5567 +- kvfree(o_group_desc);
5568 ++ ext4_kvfree_array_rcu(o_group_desc);
5569 + return err;
5570 + }
5571 +
5572 +@@ -1190,7 +1221,8 @@ static int ext4_add_new_descs(handle_t *handle, struct super_block *sb,
5573 + * use non-sparse filesystems anymore. This is already checked above.
5574 + */
5575 + if (gdb_off) {
5576 +- gdb_bh = sbi->s_group_desc[gdb_num];
5577 ++ gdb_bh = sbi_array_rcu_deref(sbi, s_group_desc,
5578 ++ gdb_num);
5579 + BUFFER_TRACE(gdb_bh, "get_write_access");
5580 + err = ext4_journal_get_write_access(handle, gdb_bh);
5581 +
5582 +@@ -1272,7 +1304,7 @@ static int ext4_setup_new_descs(handle_t *handle, struct super_block *sb,
5583 + /*
5584 + * get_write_access() has been called on gdb_bh by ext4_add_new_desc().
5585 + */
5586 +- gdb_bh = sbi->s_group_desc[gdb_num];
5587 ++ gdb_bh = sbi_array_rcu_deref(sbi, s_group_desc, gdb_num);
5588 + /* Update group descriptor block for new group */
5589 + gdp = (struct ext4_group_desc *)(gdb_bh->b_data +
5590 + gdb_off * EXT4_DESC_SIZE(sb));
5591 +@@ -1400,11 +1432,14 @@ static void ext4_update_super(struct super_block *sb,
5592 + percpu_counter_read(&sbi->s_freeclusters_counter));
5593 + if (ext4_has_feature_flex_bg(sb) && sbi->s_log_groups_per_flex) {
5594 + ext4_group_t flex_group;
5595 ++ struct flex_groups *fg;
5596 ++
5597 + flex_group = ext4_flex_group(sbi, group_data[0].group);
5598 ++ fg = sbi_array_rcu_deref(sbi, s_flex_groups, flex_group);
5599 + atomic64_add(EXT4_NUM_B2C(sbi, free_blocks),
5600 +- &sbi->s_flex_groups[flex_group].free_clusters);
5601 ++ &fg->free_clusters);
5602 + atomic_add(EXT4_INODES_PER_GROUP(sb) * flex_gd->count,
5603 +- &sbi->s_flex_groups[flex_group].free_inodes);
5604 ++ &fg->free_inodes);
5605 + }
5606 +
5607 + /*
5608 +@@ -1499,7 +1534,8 @@ exit_journal:
5609 + for (; gdb_num <= gdb_num_end; gdb_num++) {
5610 + struct buffer_head *gdb_bh;
5611 +
5612 +- gdb_bh = sbi->s_group_desc[gdb_num];
5613 ++ gdb_bh = sbi_array_rcu_deref(sbi, s_group_desc,
5614 ++ gdb_num);
5615 + if (old_gdb == gdb_bh->b_blocknr)
5616 + continue;
5617 + update_backups(sb, gdb_bh->b_blocknr, gdb_bh->b_data,
5618 +diff --git a/fs/ext4/super.c b/fs/ext4/super.c
5619 +index c51d7ef2e467..12806be10a18 100644
5620 +--- a/fs/ext4/super.c
5621 ++++ b/fs/ext4/super.c
5622 +@@ -970,6 +970,8 @@ static void ext4_put_super(struct super_block *sb)
5623 + {
5624 + struct ext4_sb_info *sbi = EXT4_SB(sb);
5625 + struct ext4_super_block *es = sbi->s_es;
5626 ++ struct buffer_head **group_desc;
5627 ++ struct flex_groups **flex_groups;
5628 + int aborted = 0;
5629 + int i, err;
5630 +
5631 +@@ -1000,15 +1002,23 @@ static void ext4_put_super(struct super_block *sb)
5632 + if (!sb_rdonly(sb))
5633 + ext4_commit_super(sb, 1);
5634 +
5635 ++ rcu_read_lock();
5636 ++ group_desc = rcu_dereference(sbi->s_group_desc);
5637 + for (i = 0; i < sbi->s_gdb_count; i++)
5638 +- brelse(sbi->s_group_desc[i]);
5639 +- kvfree(sbi->s_group_desc);
5640 +- kvfree(sbi->s_flex_groups);
5641 ++ brelse(group_desc[i]);
5642 ++ kvfree(group_desc);
5643 ++ flex_groups = rcu_dereference(sbi->s_flex_groups);
5644 ++ if (flex_groups) {
5645 ++ for (i = 0; i < sbi->s_flex_groups_allocated; i++)
5646 ++ kvfree(flex_groups[i]);
5647 ++ kvfree(flex_groups);
5648 ++ }
5649 ++ rcu_read_unlock();
5650 + percpu_counter_destroy(&sbi->s_freeclusters_counter);
5651 + percpu_counter_destroy(&sbi->s_freeinodes_counter);
5652 + percpu_counter_destroy(&sbi->s_dirs_counter);
5653 + percpu_counter_destroy(&sbi->s_dirtyclusters_counter);
5654 +- percpu_free_rwsem(&sbi->s_journal_flag_rwsem);
5655 ++ percpu_free_rwsem(&sbi->s_writepages_rwsem);
5656 + #ifdef CONFIG_QUOTA
5657 + for (i = 0; i < EXT4_MAXQUOTAS; i++)
5658 + kfree(get_qf_name(sb, sbi, i));
5659 +@@ -2335,8 +2345,8 @@ done:
5660 + int ext4_alloc_flex_bg_array(struct super_block *sb, ext4_group_t ngroup)
5661 + {
5662 + struct ext4_sb_info *sbi = EXT4_SB(sb);
5663 +- struct flex_groups *new_groups;
5664 +- int size;
5665 ++ struct flex_groups **old_groups, **new_groups;
5666 ++ int size, i;
5667 +
5668 + if (!sbi->s_log_groups_per_flex)
5669 + return 0;
5670 +@@ -2345,22 +2355,37 @@ int ext4_alloc_flex_bg_array(struct super_block *sb, ext4_group_t ngroup)
5671 + if (size <= sbi->s_flex_groups_allocated)
5672 + return 0;
5673 +
5674 +- size = roundup_pow_of_two(size * sizeof(struct flex_groups));
5675 +- new_groups = kvzalloc(size, GFP_KERNEL);
5676 ++ new_groups = kvzalloc(roundup_pow_of_two(size *
5677 ++ sizeof(*sbi->s_flex_groups)), GFP_KERNEL);
5678 + if (!new_groups) {
5679 +- ext4_msg(sb, KERN_ERR, "not enough memory for %d flex groups",
5680 +- size / (int) sizeof(struct flex_groups));
5681 ++ ext4_msg(sb, KERN_ERR,
5682 ++ "not enough memory for %d flex group pointers", size);
5683 + return -ENOMEM;
5684 + }
5685 +-
5686 +- if (sbi->s_flex_groups) {
5687 +- memcpy(new_groups, sbi->s_flex_groups,
5688 +- (sbi->s_flex_groups_allocated *
5689 +- sizeof(struct flex_groups)));
5690 +- kvfree(sbi->s_flex_groups);
5691 ++ for (i = sbi->s_flex_groups_allocated; i < size; i++) {
5692 ++ new_groups[i] = kvzalloc(roundup_pow_of_two(
5693 ++ sizeof(struct flex_groups)),
5694 ++ GFP_KERNEL);
5695 ++ if (!new_groups[i]) {
5696 ++ for (i--; i >= sbi->s_flex_groups_allocated; i--)
5697 ++ kvfree(new_groups[i]);
5698 ++ kvfree(new_groups);
5699 ++ ext4_msg(sb, KERN_ERR,
5700 ++ "not enough memory for %d flex groups", size);
5701 ++ return -ENOMEM;
5702 ++ }
5703 + }
5704 +- sbi->s_flex_groups = new_groups;
5705 +- sbi->s_flex_groups_allocated = size / sizeof(struct flex_groups);
5706 ++ rcu_read_lock();
5707 ++ old_groups = rcu_dereference(sbi->s_flex_groups);
5708 ++ if (old_groups)
5709 ++ memcpy(new_groups, old_groups,
5710 ++ (sbi->s_flex_groups_allocated *
5711 ++ sizeof(struct flex_groups *)));
5712 ++ rcu_read_unlock();
5713 ++ rcu_assign_pointer(sbi->s_flex_groups, new_groups);
5714 ++ sbi->s_flex_groups_allocated = size;
5715 ++ if (old_groups)
5716 ++ ext4_kvfree_array_rcu(old_groups);
5717 + return 0;
5718 + }
5719 +
5720 +@@ -2368,6 +2393,7 @@ static int ext4_fill_flex_info(struct super_block *sb)
5721 + {
5722 + struct ext4_sb_info *sbi = EXT4_SB(sb);
5723 + struct ext4_group_desc *gdp = NULL;
5724 ++ struct flex_groups *fg;
5725 + ext4_group_t flex_group;
5726 + int i, err;
5727 +
5728 +@@ -2385,12 +2411,11 @@ static int ext4_fill_flex_info(struct super_block *sb)
5729 + gdp = ext4_get_group_desc(sb, i, NULL);
5730 +
5731 + flex_group = ext4_flex_group(sbi, i);
5732 +- atomic_add(ext4_free_inodes_count(sb, gdp),
5733 +- &sbi->s_flex_groups[flex_group].free_inodes);
5734 ++ fg = sbi_array_rcu_deref(sbi, s_flex_groups, flex_group);
5735 ++ atomic_add(ext4_free_inodes_count(sb, gdp), &fg->free_inodes);
5736 + atomic64_add(ext4_free_group_clusters(sb, gdp),
5737 +- &sbi->s_flex_groups[flex_group].free_clusters);
5738 +- atomic_add(ext4_used_dirs_count(sb, gdp),
5739 +- &sbi->s_flex_groups[flex_group].used_dirs);
5740 ++ &fg->free_clusters);
5741 ++ atomic_add(ext4_used_dirs_count(sb, gdp), &fg->used_dirs);
5742 + }
5743 +
5744 + return 1;
5745 +@@ -2964,7 +2989,7 @@ static int ext4_feature_set_ok(struct super_block *sb, int readonly)
5746 + return 0;
5747 + }
5748 +
5749 +-#if !defined(CONFIG_QUOTA) || !defined(CONFIG_QFMT_V2)
5750 ++#if !IS_ENABLED(CONFIG_QUOTA) || !IS_ENABLED(CONFIG_QFMT_V2)
5751 + if (!readonly && (ext4_has_feature_quota(sb) ||
5752 + ext4_has_feature_project(sb))) {
5753 + ext4_msg(sb, KERN_ERR,
5754 +@@ -3589,9 +3614,10 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
5755 + {
5756 + struct dax_device *dax_dev = fs_dax_get_by_bdev(sb->s_bdev);
5757 + char *orig_data = kstrdup(data, GFP_KERNEL);
5758 +- struct buffer_head *bh;
5759 ++ struct buffer_head *bh, **group_desc;
5760 + struct ext4_super_block *es = NULL;
5761 + struct ext4_sb_info *sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
5762 ++ struct flex_groups **flex_groups;
5763 + ext4_fsblk_t block;
5764 + ext4_fsblk_t sb_block = get_sb_block(&data);
5765 + ext4_fsblk_t logical_sb_block;
5766 +@@ -4245,9 +4271,10 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
5767 + goto failed_mount;
5768 + }
5769 + }
5770 +- sbi->s_group_desc = kvmalloc_array(db_count,
5771 +- sizeof(struct buffer_head *),
5772 +- GFP_KERNEL);
5773 ++ rcu_assign_pointer(sbi->s_group_desc,
5774 ++ kvmalloc_array(db_count,
5775 ++ sizeof(struct buffer_head *),
5776 ++ GFP_KERNEL));
5777 + if (sbi->s_group_desc == NULL) {
5778 + ext4_msg(sb, KERN_ERR, "not enough memory");
5779 + ret = -ENOMEM;
5780 +@@ -4263,14 +4290,19 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
5781 + }
5782 +
5783 + for (i = 0; i < db_count; i++) {
5784 ++ struct buffer_head *bh;
5785 ++
5786 + block = descriptor_loc(sb, logical_sb_block, i);
5787 +- sbi->s_group_desc[i] = sb_bread_unmovable(sb, block);
5788 +- if (!sbi->s_group_desc[i]) {
5789 ++ bh = sb_bread_unmovable(sb, block);
5790 ++ if (!bh) {
5791 + ext4_msg(sb, KERN_ERR,
5792 + "can't read group descriptor %d", i);
5793 + db_count = i;
5794 + goto failed_mount2;
5795 + }
5796 ++ rcu_read_lock();
5797 ++ rcu_dereference(sbi->s_group_desc)[i] = bh;
5798 ++ rcu_read_unlock();
5799 + }
5800 + sbi->s_gdb_count = db_count;
5801 + if (!ext4_check_descriptors(sb, logical_sb_block, &first_not_zeroed)) {
5802 +@@ -4549,7 +4581,7 @@ no_journal:
5803 + err = percpu_counter_init(&sbi->s_dirtyclusters_counter, 0,
5804 + GFP_KERNEL);
5805 + if (!err)
5806 +- err = percpu_init_rwsem(&sbi->s_journal_flag_rwsem);
5807 ++ err = percpu_init_rwsem(&sbi->s_writepages_rwsem);
5808 +
5809 + if (err) {
5810 + ext4_msg(sb, KERN_ERR, "insufficient memory");
5811 +@@ -4637,13 +4669,19 @@ failed_mount7:
5812 + ext4_unregister_li_request(sb);
5813 + failed_mount6:
5814 + ext4_mb_release(sb);
5815 +- if (sbi->s_flex_groups)
5816 +- kvfree(sbi->s_flex_groups);
5817 ++ rcu_read_lock();
5818 ++ flex_groups = rcu_dereference(sbi->s_flex_groups);
5819 ++ if (flex_groups) {
5820 ++ for (i = 0; i < sbi->s_flex_groups_allocated; i++)
5821 ++ kvfree(flex_groups[i]);
5822 ++ kvfree(flex_groups);
5823 ++ }
5824 ++ rcu_read_unlock();
5825 + percpu_counter_destroy(&sbi->s_freeclusters_counter);
5826 + percpu_counter_destroy(&sbi->s_freeinodes_counter);
5827 + percpu_counter_destroy(&sbi->s_dirs_counter);
5828 + percpu_counter_destroy(&sbi->s_dirtyclusters_counter);
5829 +- percpu_free_rwsem(&sbi->s_journal_flag_rwsem);
5830 ++ percpu_free_rwsem(&sbi->s_writepages_rwsem);
5831 + failed_mount5:
5832 + ext4_ext_release(sb);
5833 + ext4_release_system_zone(sb);
5834 +@@ -4672,9 +4710,12 @@ failed_mount3:
5835 + if (sbi->s_mmp_tsk)
5836 + kthread_stop(sbi->s_mmp_tsk);
5837 + failed_mount2:
5838 ++ rcu_read_lock();
5839 ++ group_desc = rcu_dereference(sbi->s_group_desc);
5840 + for (i = 0; i < db_count; i++)
5841 +- brelse(sbi->s_group_desc[i]);
5842 +- kvfree(sbi->s_group_desc);
5843 ++ brelse(group_desc[i]);
5844 ++ kvfree(group_desc);
5845 ++ rcu_read_unlock();
5846 + failed_mount:
5847 + if (sbi->s_chksum_driver)
5848 + crypto_free_shash(sbi->s_chksum_driver);
5849 +diff --git a/fs/io_uring.c b/fs/io_uring.c
5850 +index 6ae692b02980..678c62782ba3 100644
5851 +--- a/fs/io_uring.c
5852 ++++ b/fs/io_uring.c
5853 +@@ -1286,11 +1286,17 @@ static void io_iopoll_reap_events(struct io_ring_ctx *ctx)
5854 + mutex_unlock(&ctx->uring_lock);
5855 + }
5856 +
5857 +-static int __io_iopoll_check(struct io_ring_ctx *ctx, unsigned *nr_events,
5858 +- long min)
5859 ++static int io_iopoll_check(struct io_ring_ctx *ctx, unsigned *nr_events,
5860 ++ long min)
5861 + {
5862 + int iters = 0, ret = 0;
5863 +
5864 ++ /*
5865 ++ * We disallow the app entering submit/complete with polling, but we
5866 ++ * still need to lock the ring to prevent racing with polled issue
5867 ++ * that got punted to a workqueue.
5868 ++ */
5869 ++ mutex_lock(&ctx->uring_lock);
5870 + do {
5871 + int tmin = 0;
5872 +
5873 +@@ -1326,21 +1332,6 @@ static int __io_iopoll_check(struct io_ring_ctx *ctx, unsigned *nr_events,
5874 + ret = 0;
5875 + } while (min && !*nr_events && !need_resched());
5876 +
5877 +- return ret;
5878 +-}
5879 +-
5880 +-static int io_iopoll_check(struct io_ring_ctx *ctx, unsigned *nr_events,
5881 +- long min)
5882 +-{
5883 +- int ret;
5884 +-
5885 +- /*
5886 +- * We disallow the app entering submit/complete with polling, but we
5887 +- * still need to lock the ring to prevent racing with polled issue
5888 +- * that got punted to a workqueue.
5889 +- */
5890 +- mutex_lock(&ctx->uring_lock);
5891 +- ret = __io_iopoll_check(ctx, nr_events, min);
5892 + mutex_unlock(&ctx->uring_lock);
5893 + return ret;
5894 + }
5895 +@@ -3884,7 +3875,7 @@ static int io_sq_thread(void *data)
5896 + */
5897 + mutex_lock(&ctx->uring_lock);
5898 + if (!list_empty(&ctx->poll_list))
5899 +- __io_iopoll_check(ctx, &nr_events, 0);
5900 ++ io_iopoll_getevents(ctx, &nr_events, 0);
5901 + else
5902 + inflight = 0;
5903 + mutex_unlock(&ctx->uring_lock);
5904 +@@ -3908,6 +3899,18 @@ static int io_sq_thread(void *data)
5905 + * to enter the kernel to reap and flush events.
5906 + */
5907 + if (!to_submit || ret == -EBUSY) {
5908 ++ /*
5909 ++ * Drop cur_mm before scheduling, we can't hold it for
5910 ++ * long periods (or over schedule()). Do this before
5911 ++ * adding ourselves to the waitqueue, as the unuse/drop
5912 ++ * may sleep.
5913 ++ */
5914 ++ if (cur_mm) {
5915 ++ unuse_mm(cur_mm);
5916 ++ mmput(cur_mm);
5917 ++ cur_mm = NULL;
5918 ++ }
5919 ++
5920 + /*
5921 + * We're polling. If we're within the defined idle
5922 + * period, then let us spin without work before going
5923 +@@ -3922,18 +3925,6 @@ static int io_sq_thread(void *data)
5924 + continue;
5925 + }
5926 +
5927 +- /*
5928 +- * Drop cur_mm before scheduling, we can't hold it for
5929 +- * long periods (or over schedule()). Do this before
5930 +- * adding ourselves to the waitqueue, as the unuse/drop
5931 +- * may sleep.
5932 +- */
5933 +- if (cur_mm) {
5934 +- unuse_mm(cur_mm);
5935 +- mmput(cur_mm);
5936 +- cur_mm = NULL;
5937 +- }
5938 +-
5939 + prepare_to_wait(&ctx->sqo_wait, &wait,
5940 + TASK_INTERRUPTIBLE);
5941 +
5942 +diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c
5943 +index 0603dfa9ad90..ab1078e85a58 100644
5944 +--- a/fs/jbd2/transaction.c
5945 ++++ b/fs/jbd2/transaction.c
5946 +@@ -936,8 +936,6 @@ do_get_write_access(handle_t *handle, struct journal_head *jh,
5947 + char *frozen_buffer = NULL;
5948 + unsigned long start_lock, time_lock;
5949 +
5950 +- if (is_handle_aborted(handle))
5951 +- return -EROFS;
5952 + journal = transaction->t_journal;
5953 +
5954 + jbd_debug(5, "journal_head %p, force_copy %d\n", jh, force_copy);
5955 +@@ -1189,6 +1187,9 @@ int jbd2_journal_get_write_access(handle_t *handle, struct buffer_head *bh)
5956 + struct journal_head *jh;
5957 + int rc;
5958 +
5959 ++ if (is_handle_aborted(handle))
5960 ++ return -EROFS;
5961 ++
5962 + if (jbd2_write_access_granted(handle, bh, false))
5963 + return 0;
5964 +
5965 +@@ -1326,6 +1327,9 @@ int jbd2_journal_get_undo_access(handle_t *handle, struct buffer_head *bh)
5966 + struct journal_head *jh;
5967 + char *committed_data = NULL;
5968 +
5969 ++ if (is_handle_aborted(handle))
5970 ++ return -EROFS;
5971 ++
5972 + if (jbd2_write_access_granted(handle, bh, true))
5973 + return 0;
5974 +
5975 +diff --git a/include/acpi/acpixf.h b/include/acpi/acpixf.h
5976 +index 11fdb0cc9a83..546e6adfeced 100644
5977 +--- a/include/acpi/acpixf.h
5978 ++++ b/include/acpi/acpixf.h
5979 +@@ -753,6 +753,7 @@ ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status acpi_disable_all_gpes(void))
5980 + ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status acpi_enable_all_runtime_gpes(void))
5981 + ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status acpi_enable_all_wakeup_gpes(void))
5982 + ACPI_HW_DEPENDENT_RETURN_UINT32(u32 acpi_any_gpe_status_set(void))
5983 ++ACPI_HW_DEPENDENT_RETURN_UINT32(u32 acpi_any_fixed_event_status_set(void))
5984 +
5985 + ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status
5986 + acpi_get_gpe_device(u32 gpe_index,
5987 +diff --git a/include/linux/intel-svm.h b/include/linux/intel-svm.h
5988 +index 94f047a8a845..d7c403d0dd27 100644
5989 +--- a/include/linux/intel-svm.h
5990 ++++ b/include/linux/intel-svm.h
5991 +@@ -122,7 +122,7 @@ static inline int intel_svm_unbind_mm(struct device *dev, int pasid)
5992 + BUG();
5993 + }
5994 +
5995 +-static int intel_svm_is_pasid_valid(struct device *dev, int pasid)
5996 ++static inline int intel_svm_is_pasid_valid(struct device *dev, int pasid)
5997 + {
5998 + return -EINVAL;
5999 + }
6000 +diff --git a/include/linux/irqdomain.h b/include/linux/irqdomain.h
6001 +index 4da8df57618a..6b7b35b5394e 100644
6002 +--- a/include/linux/irqdomain.h
6003 ++++ b/include/linux/irqdomain.h
6004 +@@ -192,7 +192,7 @@ enum {
6005 + IRQ_DOMAIN_FLAG_HIERARCHY = (1 << 0),
6006 +
6007 + /* Irq domain name was allocated in __irq_domain_add() */
6008 +- IRQ_DOMAIN_NAME_ALLOCATED = (1 << 6),
6009 ++ IRQ_DOMAIN_NAME_ALLOCATED = (1 << 1),
6010 +
6011 + /* Irq domain is an IPI domain with virq per cpu */
6012 + IRQ_DOMAIN_FLAG_IPI_PER_CPU = (1 << 2),
6013 +diff --git a/include/linux/libata.h b/include/linux/libata.h
6014 +index 2dbde119721d..bff539918d82 100644
6015 +--- a/include/linux/libata.h
6016 ++++ b/include/linux/libata.h
6017 +@@ -1221,6 +1221,7 @@ struct pci_bits {
6018 + };
6019 +
6020 + extern int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits);
6021 ++extern void ata_pci_shutdown_one(struct pci_dev *pdev);
6022 + extern void ata_pci_remove_one(struct pci_dev *pdev);
6023 +
6024 + #ifdef CONFIG_PM
6025 +diff --git a/include/linux/tty.h b/include/linux/tty.h
6026 +index bfa4e2ee94a9..bd5fe0e907e8 100644
6027 +--- a/include/linux/tty.h
6028 ++++ b/include/linux/tty.h
6029 +@@ -225,6 +225,8 @@ struct tty_port_client_operations {
6030 + void (*write_wakeup)(struct tty_port *port);
6031 + };
6032 +
6033 ++extern const struct tty_port_client_operations tty_port_default_client_ops;
6034 ++
6035 + struct tty_port {
6036 + struct tty_bufhead buf; /* Locked internally */
6037 + struct tty_struct *tty; /* Back pointer */
6038 +diff --git a/include/linux/usb/quirks.h b/include/linux/usb/quirks.h
6039 +index a1be64c9940f..22c1f579afe3 100644
6040 +--- a/include/linux/usb/quirks.h
6041 ++++ b/include/linux/usb/quirks.h
6042 +@@ -69,4 +69,7 @@
6043 + /* Hub needs extra delay after resetting its port. */
6044 + #define USB_QUIRK_HUB_SLOW_RESET BIT(14)
6045 +
6046 ++/* device has blacklisted endpoints */
6047 ++#define USB_QUIRK_ENDPOINT_BLACKLIST BIT(15)
6048 ++
6049 + #endif /* __LINUX_USB_QUIRKS_H */
6050 +diff --git a/include/scsi/iscsi_proto.h b/include/scsi/iscsi_proto.h
6051 +index 533f56733ba8..b71b5c4f418c 100644
6052 +--- a/include/scsi/iscsi_proto.h
6053 ++++ b/include/scsi/iscsi_proto.h
6054 +@@ -627,7 +627,6 @@ struct iscsi_reject {
6055 + #define ISCSI_REASON_BOOKMARK_INVALID 9
6056 + #define ISCSI_REASON_BOOKMARK_NO_RESOURCES 10
6057 + #define ISCSI_REASON_NEGOTIATION_RESET 11
6058 +-#define ISCSI_REASON_WAITING_FOR_LOGOUT 12
6059 +
6060 + /* Max. number of Key=Value pairs in a text message */
6061 + #define MAX_KEY_VALUE_PAIRS 8192
6062 +diff --git a/include/sound/rawmidi.h b/include/sound/rawmidi.h
6063 +index 40ab20439fee..a36b7227a15a 100644
6064 +--- a/include/sound/rawmidi.h
6065 ++++ b/include/sound/rawmidi.h
6066 +@@ -77,9 +77,9 @@ struct snd_rawmidi_substream {
6067 + struct list_head list; /* list of all substream for given stream */
6068 + int stream; /* direction */
6069 + int number; /* substream number */
6070 +- unsigned int opened: 1, /* open flag */
6071 +- append: 1, /* append flag (merge more streams) */
6072 +- active_sensing: 1; /* send active sensing when close */
6073 ++ bool opened; /* open flag */
6074 ++ bool append; /* append flag (merge more streams) */
6075 ++ bool active_sensing; /* send active sensing when close */
6076 + int use_count; /* use counter (for output) */
6077 + size_t bytes;
6078 + struct snd_rawmidi *rmidi;
6079 +diff --git a/ipc/sem.c b/ipc/sem.c
6080 +index ec97a7072413..fe12ea8dd2b3 100644
6081 +--- a/ipc/sem.c
6082 ++++ b/ipc/sem.c
6083 +@@ -2368,11 +2368,9 @@ void exit_sem(struct task_struct *tsk)
6084 + ipc_assert_locked_object(&sma->sem_perm);
6085 + list_del(&un->list_id);
6086 +
6087 +- /* we are the last process using this ulp, acquiring ulp->lock
6088 +- * isn't required. Besides that, we are also protected against
6089 +- * IPC_RMID as we hold sma->sem_perm lock now
6090 +- */
6091 ++ spin_lock(&ulp->lock);
6092 + list_del_rcu(&un->list_proc);
6093 ++ spin_unlock(&ulp->lock);
6094 +
6095 + /* perform adjustments registered in un */
6096 + for (i = 0; i < sma->sem_nsems; i++) {
6097 +diff --git a/kernel/bpf/offload.c b/kernel/bpf/offload.c
6098 +index 5b9da0954a27..3668a0bc18ec 100644
6099 +--- a/kernel/bpf/offload.c
6100 ++++ b/kernel/bpf/offload.c
6101 +@@ -321,7 +321,7 @@ int bpf_prog_offload_info_fill(struct bpf_prog_info *info,
6102 +
6103 + ulen = info->jited_prog_len;
6104 + info->jited_prog_len = aux->offload->jited_len;
6105 +- if (info->jited_prog_len & ulen) {
6106 ++ if (info->jited_prog_len && ulen) {
6107 + uinsns = u64_to_user_ptr(info->jited_prog_insns);
6108 + ulen = min_t(u32, info->jited_prog_len, ulen);
6109 + if (copy_to_user(uinsns, aux->offload->jited_image, ulen)) {
6110 +diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c
6111 +index 6af7ae83c4ad..32ec69cdba54 100644
6112 +--- a/kernel/dma/direct.c
6113 ++++ b/kernel/dma/direct.c
6114 +@@ -472,28 +472,26 @@ int dma_direct_mmap(struct device *dev, struct vm_area_struct *vma,
6115 + }
6116 + #endif /* CONFIG_MMU */
6117 +
6118 +-/*
6119 +- * Because 32-bit DMA masks are so common we expect every architecture to be
6120 +- * able to satisfy them - either by not supporting more physical memory, or by
6121 +- * providing a ZONE_DMA32. If neither is the case, the architecture needs to
6122 +- * use an IOMMU instead of the direct mapping.
6123 +- */
6124 + int dma_direct_supported(struct device *dev, u64 mask)
6125 + {
6126 +- u64 min_mask;
6127 +-
6128 +- if (IS_ENABLED(CONFIG_ZONE_DMA))
6129 +- min_mask = DMA_BIT_MASK(zone_dma_bits);
6130 +- else
6131 +- min_mask = DMA_BIT_MASK(32);
6132 ++ u64 min_mask = (max_pfn - 1) << PAGE_SHIFT;
6133 +
6134 +- min_mask = min_t(u64, min_mask, (max_pfn - 1) << PAGE_SHIFT);
6135 ++ /*
6136 ++ * Because 32-bit DMA masks are so common we expect every architecture
6137 ++ * to be able to satisfy them - either by not supporting more physical
6138 ++ * memory, or by providing a ZONE_DMA32. If neither is the case, the
6139 ++ * architecture needs to use an IOMMU instead of the direct mapping.
6140 ++ */
6141 ++ if (mask >= DMA_BIT_MASK(32))
6142 ++ return 1;
6143 +
6144 + /*
6145 + * This check needs to be against the actual bit mask value, so
6146 + * use __phys_to_dma() here so that the SME encryption mask isn't
6147 + * part of the check.
6148 + */
6149 ++ if (IS_ENABLED(CONFIG_ZONE_DMA))
6150 ++ min_mask = min_t(u64, min_mask, DMA_BIT_MASK(zone_dma_bits));
6151 + return mask >= __phys_to_dma(dev, min_mask);
6152 + }
6153 +
6154 +diff --git a/kernel/irq/internals.h b/kernel/irq/internals.h
6155 +index 3924fbe829d4..c9d8eb7f5c02 100644
6156 +--- a/kernel/irq/internals.h
6157 ++++ b/kernel/irq/internals.h
6158 +@@ -128,8 +128,6 @@ static inline void unregister_handler_proc(unsigned int irq,
6159 +
6160 + extern bool irq_can_set_affinity_usr(unsigned int irq);
6161 +
6162 +-extern int irq_select_affinity_usr(unsigned int irq);
6163 +-
6164 + extern void irq_set_thread_affinity(struct irq_desc *desc);
6165 +
6166 + extern int irq_do_set_affinity(struct irq_data *data,
6167 +diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
6168 +index 1753486b440c..55b080101a20 100644
6169 +--- a/kernel/irq/manage.c
6170 ++++ b/kernel/irq/manage.c
6171 +@@ -442,23 +442,9 @@ int irq_setup_affinity(struct irq_desc *desc)
6172 + {
6173 + return irq_select_affinity(irq_desc_get_irq(desc));
6174 + }
6175 +-#endif
6176 ++#endif /* CONFIG_AUTO_IRQ_AFFINITY */
6177 ++#endif /* CONFIG_SMP */
6178 +
6179 +-/*
6180 +- * Called when a bogus affinity is set via /proc/irq
6181 +- */
6182 +-int irq_select_affinity_usr(unsigned int irq)
6183 +-{
6184 +- struct irq_desc *desc = irq_to_desc(irq);
6185 +- unsigned long flags;
6186 +- int ret;
6187 +-
6188 +- raw_spin_lock_irqsave(&desc->lock, flags);
6189 +- ret = irq_setup_affinity(desc);
6190 +- raw_spin_unlock_irqrestore(&desc->lock, flags);
6191 +- return ret;
6192 +-}
6193 +-#endif
6194 +
6195 + /**
6196 + * irq_set_vcpu_affinity - Set vcpu affinity for the interrupt
6197 +diff --git a/kernel/irq/proc.c b/kernel/irq/proc.c
6198 +index cfc4f088a0e7..f5958c55406f 100644
6199 +--- a/kernel/irq/proc.c
6200 ++++ b/kernel/irq/proc.c
6201 +@@ -111,6 +111,28 @@ static int irq_affinity_list_proc_show(struct seq_file *m, void *v)
6202 + return show_irq_affinity(AFFINITY_LIST, m);
6203 + }
6204 +
6205 ++#ifndef CONFIG_AUTO_IRQ_AFFINITY
6206 ++static inline int irq_select_affinity_usr(unsigned int irq)
6207 ++{
6208 ++ /*
6209 ++ * If the interrupt is started up already then this fails. The
6210 ++ * interrupt is assigned to an online CPU already. There is no
6211 ++ * point to move it around randomly. Tell user space that the
6212 ++ * selected mask is bogus.
6213 ++ *
6214 ++ * If not then any change to the affinity is pointless because the
6215 ++ * startup code invokes irq_setup_affinity() which will select
6216 ++ * a online CPU anyway.
6217 ++ */
6218 ++ return -EINVAL;
6219 ++}
6220 ++#else
6221 ++/* ALPHA magic affinity auto selector. Keep it for historical reasons. */
6222 ++static inline int irq_select_affinity_usr(unsigned int irq)
6223 ++{
6224 ++ return irq_select_affinity(irq);
6225 ++}
6226 ++#endif
6227 +
6228 + static ssize_t write_irq_affinity(int type, struct file *file,
6229 + const char __user *buffer, size_t count, loff_t *pos)
6230 +diff --git a/kernel/sched/psi.c b/kernel/sched/psi.c
6231 +index ce8f6748678a..9154e745f097 100644
6232 +--- a/kernel/sched/psi.c
6233 ++++ b/kernel/sched/psi.c
6234 +@@ -1199,6 +1199,9 @@ static ssize_t psi_write(struct file *file, const char __user *user_buf,
6235 + if (static_branch_likely(&psi_disabled))
6236 + return -EOPNOTSUPP;
6237 +
6238 ++ if (!nbytes)
6239 ++ return -EINVAL;
6240 ++
6241 + buf_size = min(nbytes, sizeof(buf));
6242 + if (copy_from_user(buf, user_buf, buf_size))
6243 + return -EFAULT;
6244 +diff --git a/lib/crypto/chacha20poly1305.c b/lib/crypto/chacha20poly1305.c
6245 +index 6d83cafebc69..ad0699ce702f 100644
6246 +--- a/lib/crypto/chacha20poly1305.c
6247 ++++ b/lib/crypto/chacha20poly1305.c
6248 +@@ -235,6 +235,9 @@ bool chacha20poly1305_crypt_sg_inplace(struct scatterlist *src,
6249 + __le64 lens[2];
6250 + } b __aligned(16);
6251 +
6252 ++ if (WARN_ON(src_len > INT_MAX))
6253 ++ return false;
6254 ++
6255 + chacha_load_key(b.k, key);
6256 +
6257 + b.iv[0] = 0;
6258 +diff --git a/lib/stackdepot.c b/lib/stackdepot.c
6259 +index ed717dd08ff3..81c69c08d1d1 100644
6260 +--- a/lib/stackdepot.c
6261 ++++ b/lib/stackdepot.c
6262 +@@ -83,15 +83,19 @@ static bool init_stack_slab(void **prealloc)
6263 + return true;
6264 + if (stack_slabs[depot_index] == NULL) {
6265 + stack_slabs[depot_index] = *prealloc;
6266 ++ *prealloc = NULL;
6267 + } else {
6268 +- stack_slabs[depot_index + 1] = *prealloc;
6269 ++ /* If this is the last depot slab, do not touch the next one. */
6270 ++ if (depot_index + 1 < STACK_ALLOC_MAX_SLABS) {
6271 ++ stack_slabs[depot_index + 1] = *prealloc;
6272 ++ *prealloc = NULL;
6273 ++ }
6274 + /*
6275 + * This smp_store_release pairs with smp_load_acquire() from
6276 + * |next_slab_inited| above and in stack_depot_save().
6277 + */
6278 + smp_store_release(&next_slab_inited, 1);
6279 + }
6280 +- *prealloc = NULL;
6281 + return true;
6282 + }
6283 +
6284 +diff --git a/mm/memcontrol.c b/mm/memcontrol.c
6285 +index 27c231bf4565..eda490113372 100644
6286 +--- a/mm/memcontrol.c
6287 ++++ b/mm/memcontrol.c
6288 +@@ -409,8 +409,10 @@ int memcg_expand_shrinker_maps(int new_id)
6289 + if (mem_cgroup_is_root(memcg))
6290 + continue;
6291 + ret = memcg_expand_one_shrinker_map(memcg, size, old_size);
6292 +- if (ret)
6293 ++ if (ret) {
6294 ++ mem_cgroup_iter_break(NULL, memcg);
6295 + goto unlock;
6296 ++ }
6297 + }
6298 + unlock:
6299 + if (!ret)
6300 +diff --git a/mm/mmap.c b/mm/mmap.c
6301 +index 71e4ffc83bcd..cb2c79a3e914 100644
6302 +--- a/mm/mmap.c
6303 ++++ b/mm/mmap.c
6304 +@@ -195,8 +195,6 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
6305 + bool downgraded = false;
6306 + LIST_HEAD(uf);
6307 +
6308 +- brk = untagged_addr(brk);
6309 +-
6310 + if (down_write_killable(&mm->mmap_sem))
6311 + return -EINTR;
6312 +
6313 +@@ -1561,8 +1559,6 @@ unsigned long ksys_mmap_pgoff(unsigned long addr, unsigned long len,
6314 + struct file *file = NULL;
6315 + unsigned long retval;
6316 +
6317 +- addr = untagged_addr(addr);
6318 +-
6319 + if (!(flags & MAP_ANONYMOUS)) {
6320 + audit_mmap_fd(fd, flags);
6321 + file = fget(fd);
6322 +diff --git a/mm/mremap.c b/mm/mremap.c
6323 +index 122938dcec15..af363063ea23 100644
6324 +--- a/mm/mremap.c
6325 ++++ b/mm/mremap.c
6326 +@@ -607,7 +607,6 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
6327 + LIST_HEAD(uf_unmap);
6328 +
6329 + addr = untagged_addr(addr);
6330 +- new_addr = untagged_addr(new_addr);
6331 +
6332 + if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE))
6333 + return ret;
6334 +diff --git a/mm/sparse.c b/mm/sparse.c
6335 +index 3918fc3eaef1..29d92e7f55c4 100644
6336 +--- a/mm/sparse.c
6337 ++++ b/mm/sparse.c
6338 +@@ -886,7 +886,7 @@ int __meminit sparse_add_section(int nid, unsigned long start_pfn,
6339 + * Poison uninitialized struct pages in order to catch invalid flags
6340 + * combinations.
6341 + */
6342 +- page_init_poison(pfn_to_page(start_pfn), sizeof(struct page) * nr_pages);
6343 ++ page_init_poison(memmap, sizeof(struct page) * nr_pages);
6344 +
6345 + ms = __nr_to_section(section_nr);
6346 + set_section_nid(section_nr, nid);
6347 +diff --git a/mm/vmscan.c b/mm/vmscan.c
6348 +index 572fb17c6273..af4b2b3d4e0d 100644
6349 +--- a/mm/vmscan.c
6350 ++++ b/mm/vmscan.c
6351 +@@ -2429,10 +2429,13 @@ out:
6352 + /*
6353 + * Scan types proportional to swappiness and
6354 + * their relative recent reclaim efficiency.
6355 +- * Make sure we don't miss the last page
6356 +- * because of a round-off error.
6357 ++ * Make sure we don't miss the last page on
6358 ++ * the offlined memory cgroups because of a
6359 ++ * round-off error.
6360 + */
6361 +- scan = DIV64_U64_ROUND_UP(scan * fraction[file],
6362 ++ scan = mem_cgroup_online(memcg) ?
6363 ++ div64_u64(scan * fraction[file], denominator) :
6364 ++ DIV64_U64_ROUND_UP(scan * fraction[file],
6365 + denominator);
6366 + break;
6367 + case SCAN_FILE:
6368 +diff --git a/net/netfilter/xt_hashlimit.c b/net/netfilter/xt_hashlimit.c
6369 +index ced3fc8fad7c..6520d9ec1297 100644
6370 +--- a/net/netfilter/xt_hashlimit.c
6371 ++++ b/net/netfilter/xt_hashlimit.c
6372 +@@ -851,6 +851,8 @@ hashlimit_mt(const struct sk_buff *skb, struct xt_action_param *par)
6373 + return hashlimit_mt_common(skb, par, hinfo, &info->cfg, 3);
6374 + }
6375 +
6376 ++#define HASHLIMIT_MAX_SIZE 1048576
6377 ++
6378 + static int hashlimit_mt_check_common(const struct xt_mtchk_param *par,
6379 + struct xt_hashlimit_htable **hinfo,
6380 + struct hashlimit_cfg3 *cfg,
6381 +@@ -861,6 +863,14 @@ static int hashlimit_mt_check_common(const struct xt_mtchk_param *par,
6382 +
6383 + if (cfg->gc_interval == 0 || cfg->expire == 0)
6384 + return -EINVAL;
6385 ++ if (cfg->size > HASHLIMIT_MAX_SIZE) {
6386 ++ cfg->size = HASHLIMIT_MAX_SIZE;
6387 ++ pr_info_ratelimited("size too large, truncated to %u\n", cfg->size);
6388 ++ }
6389 ++ if (cfg->max > HASHLIMIT_MAX_SIZE) {
6390 ++ cfg->max = HASHLIMIT_MAX_SIZE;
6391 ++ pr_info_ratelimited("max too large, truncated to %u\n", cfg->max);
6392 ++ }
6393 + if (par->family == NFPROTO_IPV4) {
6394 + if (cfg->srcmask > 32 || cfg->dstmask > 32)
6395 + return -EINVAL;
6396 +diff --git a/net/rxrpc/call_object.c b/net/rxrpc/call_object.c
6397 +index dbdbc4f18b5e..c9f34b0a11df 100644
6398 +--- a/net/rxrpc/call_object.c
6399 ++++ b/net/rxrpc/call_object.c
6400 +@@ -562,11 +562,11 @@ void rxrpc_put_call(struct rxrpc_call *call, enum rxrpc_call_trace op)
6401 + }
6402 +
6403 + /*
6404 +- * Final call destruction under RCU.
6405 ++ * Final call destruction - but must be done in process context.
6406 + */
6407 +-static void rxrpc_rcu_destroy_call(struct rcu_head *rcu)
6408 ++static void rxrpc_destroy_call(struct work_struct *work)
6409 + {
6410 +- struct rxrpc_call *call = container_of(rcu, struct rxrpc_call, rcu);
6411 ++ struct rxrpc_call *call = container_of(work, struct rxrpc_call, processor);
6412 + struct rxrpc_net *rxnet = call->rxnet;
6413 +
6414 + rxrpc_put_connection(call->conn);
6415 +@@ -578,6 +578,22 @@ static void rxrpc_rcu_destroy_call(struct rcu_head *rcu)
6416 + wake_up_var(&rxnet->nr_calls);
6417 + }
6418 +
6419 ++/*
6420 ++ * Final call destruction under RCU.
6421 ++ */
6422 ++static void rxrpc_rcu_destroy_call(struct rcu_head *rcu)
6423 ++{
6424 ++ struct rxrpc_call *call = container_of(rcu, struct rxrpc_call, rcu);
6425 ++
6426 ++ if (in_softirq()) {
6427 ++ INIT_WORK(&call->processor, rxrpc_destroy_call);
6428 ++ if (!rxrpc_queue_work(&call->processor))
6429 ++ BUG();
6430 ++ } else {
6431 ++ rxrpc_destroy_call(&call->processor);
6432 ++ }
6433 ++}
6434 ++
6435 + /*
6436 + * clean up a call
6437 + */
6438 +diff --git a/scripts/get_maintainer.pl b/scripts/get_maintainer.pl
6439 +index 34085d146fa2..7a228681f89f 100755
6440 +--- a/scripts/get_maintainer.pl
6441 ++++ b/scripts/get_maintainer.pl
6442 +@@ -932,10 +932,6 @@ sub get_maintainers {
6443 + }
6444 + }
6445 +
6446 +- foreach my $fix (@fixes) {
6447 +- vcs_add_commit_signers($fix, "blamed_fixes");
6448 +- }
6449 +-
6450 + foreach my $email (@email_to, @list_to) {
6451 + $email->[0] = deduplicate_email($email->[0]);
6452 + }
6453 +@@ -974,6 +970,10 @@ sub get_maintainers {
6454 + }
6455 + }
6456 +
6457 ++ foreach my $fix (@fixes) {
6458 ++ vcs_add_commit_signers($fix, "blamed_fixes");
6459 ++ }
6460 ++
6461 + my @to = ();
6462 + if ($email || $email_list) {
6463 + if ($email) {
6464 +diff --git a/sound/core/seq/seq_clientmgr.c b/sound/core/seq/seq_clientmgr.c
6465 +index 6d9592f0ae1d..cc93157fa950 100644
6466 +--- a/sound/core/seq/seq_clientmgr.c
6467 ++++ b/sound/core/seq/seq_clientmgr.c
6468 +@@ -580,7 +580,7 @@ static int update_timestamp_of_queue(struct snd_seq_event *event,
6469 + event->queue = queue;
6470 + event->flags &= ~SNDRV_SEQ_TIME_STAMP_MASK;
6471 + if (real_time) {
6472 +- event->time.time = snd_seq_timer_get_cur_time(q->timer);
6473 ++ event->time.time = snd_seq_timer_get_cur_time(q->timer, true);
6474 + event->flags |= SNDRV_SEQ_TIME_STAMP_REAL;
6475 + } else {
6476 + event->time.tick = snd_seq_timer_get_cur_tick(q->timer);
6477 +@@ -1659,7 +1659,7 @@ static int snd_seq_ioctl_get_queue_status(struct snd_seq_client *client,
6478 + tmr = queue->timer;
6479 + status->events = queue->tickq->cells + queue->timeq->cells;
6480 +
6481 +- status->time = snd_seq_timer_get_cur_time(tmr);
6482 ++ status->time = snd_seq_timer_get_cur_time(tmr, true);
6483 + status->tick = snd_seq_timer_get_cur_tick(tmr);
6484 +
6485 + status->running = tmr->running;
6486 +diff --git a/sound/core/seq/seq_queue.c b/sound/core/seq/seq_queue.c
6487 +index caf68bf42f13..71a6ea62c3be 100644
6488 +--- a/sound/core/seq/seq_queue.c
6489 ++++ b/sound/core/seq/seq_queue.c
6490 +@@ -238,6 +238,8 @@ void snd_seq_check_queue(struct snd_seq_queue *q, int atomic, int hop)
6491 + {
6492 + unsigned long flags;
6493 + struct snd_seq_event_cell *cell;
6494 ++ snd_seq_tick_time_t cur_tick;
6495 ++ snd_seq_real_time_t cur_time;
6496 +
6497 + if (q == NULL)
6498 + return;
6499 +@@ -254,17 +256,18 @@ void snd_seq_check_queue(struct snd_seq_queue *q, int atomic, int hop)
6500 +
6501 + __again:
6502 + /* Process tick queue... */
6503 ++ cur_tick = snd_seq_timer_get_cur_tick(q->timer);
6504 + for (;;) {
6505 +- cell = snd_seq_prioq_cell_out(q->tickq,
6506 +- &q->timer->tick.cur_tick);
6507 ++ cell = snd_seq_prioq_cell_out(q->tickq, &cur_tick);
6508 + if (!cell)
6509 + break;
6510 + snd_seq_dispatch_event(cell, atomic, hop);
6511 + }
6512 +
6513 + /* Process time queue... */
6514 ++ cur_time = snd_seq_timer_get_cur_time(q->timer, false);
6515 + for (;;) {
6516 +- cell = snd_seq_prioq_cell_out(q->timeq, &q->timer->cur_time);
6517 ++ cell = snd_seq_prioq_cell_out(q->timeq, &cur_time);
6518 + if (!cell)
6519 + break;
6520 + snd_seq_dispatch_event(cell, atomic, hop);
6521 +@@ -392,6 +395,7 @@ int snd_seq_queue_check_access(int queueid, int client)
6522 + int snd_seq_queue_set_owner(int queueid, int client, int locked)
6523 + {
6524 + struct snd_seq_queue *q = queueptr(queueid);
6525 ++ unsigned long flags;
6526 +
6527 + if (q == NULL)
6528 + return -EINVAL;
6529 +@@ -401,8 +405,10 @@ int snd_seq_queue_set_owner(int queueid, int client, int locked)
6530 + return -EPERM;
6531 + }
6532 +
6533 ++ spin_lock_irqsave(&q->owner_lock, flags);
6534 + q->locked = locked ? 1 : 0;
6535 + q->owner = client;
6536 ++ spin_unlock_irqrestore(&q->owner_lock, flags);
6537 + queue_access_unlock(q);
6538 + queuefree(q);
6539 +
6540 +@@ -539,15 +545,17 @@ void snd_seq_queue_client_termination(int client)
6541 + unsigned long flags;
6542 + int i;
6543 + struct snd_seq_queue *q;
6544 ++ bool matched;
6545 +
6546 + for (i = 0; i < SNDRV_SEQ_MAX_QUEUES; i++) {
6547 + if ((q = queueptr(i)) == NULL)
6548 + continue;
6549 + spin_lock_irqsave(&q->owner_lock, flags);
6550 +- if (q->owner == client)
6551 ++ matched = (q->owner == client);
6552 ++ if (matched)
6553 + q->klocked = 1;
6554 + spin_unlock_irqrestore(&q->owner_lock, flags);
6555 +- if (q->owner == client) {
6556 ++ if (matched) {
6557 + if (q->timer->running)
6558 + snd_seq_timer_stop(q->timer);
6559 + snd_seq_timer_reset(q->timer);
6560 +@@ -739,6 +747,8 @@ void snd_seq_info_queues_read(struct snd_info_entry *entry,
6561 + int i, bpm;
6562 + struct snd_seq_queue *q;
6563 + struct snd_seq_timer *tmr;
6564 ++ bool locked;
6565 ++ int owner;
6566 +
6567 + for (i = 0; i < SNDRV_SEQ_MAX_QUEUES; i++) {
6568 + if ((q = queueptr(i)) == NULL)
6569 +@@ -750,9 +760,14 @@ void snd_seq_info_queues_read(struct snd_info_entry *entry,
6570 + else
6571 + bpm = 0;
6572 +
6573 ++ spin_lock_irq(&q->owner_lock);
6574 ++ locked = q->locked;
6575 ++ owner = q->owner;
6576 ++ spin_unlock_irq(&q->owner_lock);
6577 ++
6578 + snd_iprintf(buffer, "queue %d: [%s]\n", q->queue, q->name);
6579 +- snd_iprintf(buffer, "owned by client : %d\n", q->owner);
6580 +- snd_iprintf(buffer, "lock status : %s\n", q->locked ? "Locked" : "Free");
6581 ++ snd_iprintf(buffer, "owned by client : %d\n", owner);
6582 ++ snd_iprintf(buffer, "lock status : %s\n", locked ? "Locked" : "Free");
6583 + snd_iprintf(buffer, "queued time events : %d\n", snd_seq_prioq_avail(q->timeq));
6584 + snd_iprintf(buffer, "queued tick events : %d\n", snd_seq_prioq_avail(q->tickq));
6585 + snd_iprintf(buffer, "timer state : %s\n", tmr->running ? "Running" : "Stopped");
6586 +diff --git a/sound/core/seq/seq_timer.c b/sound/core/seq/seq_timer.c
6587 +index be59b59c9be4..1645e4142e30 100644
6588 +--- a/sound/core/seq/seq_timer.c
6589 ++++ b/sound/core/seq/seq_timer.c
6590 +@@ -428,14 +428,15 @@ int snd_seq_timer_continue(struct snd_seq_timer *tmr)
6591 + }
6592 +
6593 + /* return current 'real' time. use timeofday() to get better granularity. */
6594 +-snd_seq_real_time_t snd_seq_timer_get_cur_time(struct snd_seq_timer *tmr)
6595 ++snd_seq_real_time_t snd_seq_timer_get_cur_time(struct snd_seq_timer *tmr,
6596 ++ bool adjust_ktime)
6597 + {
6598 + snd_seq_real_time_t cur_time;
6599 + unsigned long flags;
6600 +
6601 + spin_lock_irqsave(&tmr->lock, flags);
6602 + cur_time = tmr->cur_time;
6603 +- if (tmr->running) {
6604 ++ if (adjust_ktime && tmr->running) {
6605 + struct timespec64 tm;
6606 +
6607 + ktime_get_ts64(&tm);
6608 +@@ -452,7 +453,13 @@ snd_seq_real_time_t snd_seq_timer_get_cur_time(struct snd_seq_timer *tmr)
6609 + high PPQ values) */
6610 + snd_seq_tick_time_t snd_seq_timer_get_cur_tick(struct snd_seq_timer *tmr)
6611 + {
6612 +- return tmr->tick.cur_tick;
6613 ++ snd_seq_tick_time_t cur_tick;
6614 ++ unsigned long flags;
6615 ++
6616 ++ spin_lock_irqsave(&tmr->lock, flags);
6617 ++ cur_tick = tmr->tick.cur_tick;
6618 ++ spin_unlock_irqrestore(&tmr->lock, flags);
6619 ++ return cur_tick;
6620 + }
6621 +
6622 +
6623 +diff --git a/sound/core/seq/seq_timer.h b/sound/core/seq/seq_timer.h
6624 +index 66c3e344eae3..4bec57df8158 100644
6625 +--- a/sound/core/seq/seq_timer.h
6626 ++++ b/sound/core/seq/seq_timer.h
6627 +@@ -120,7 +120,8 @@ int snd_seq_timer_set_tempo_ppq(struct snd_seq_timer *tmr, int tempo, int ppq);
6628 + int snd_seq_timer_set_position_tick(struct snd_seq_timer *tmr, snd_seq_tick_time_t position);
6629 + int snd_seq_timer_set_position_time(struct snd_seq_timer *tmr, snd_seq_real_time_t position);
6630 + int snd_seq_timer_set_skew(struct snd_seq_timer *tmr, unsigned int skew, unsigned int base);
6631 +-snd_seq_real_time_t snd_seq_timer_get_cur_time(struct snd_seq_timer *tmr);
6632 ++snd_seq_real_time_t snd_seq_timer_get_cur_time(struct snd_seq_timer *tmr,
6633 ++ bool adjust_ktime);
6634 + snd_seq_tick_time_t snd_seq_timer_get_cur_tick(struct snd_seq_timer *tmr);
6635 +
6636 + extern int seq_default_timer_class;
6637 +diff --git a/sound/hda/hdmi_chmap.c b/sound/hda/hdmi_chmap.c
6638 +index 886cb7811bd6..2efee794cac6 100644
6639 +--- a/sound/hda/hdmi_chmap.c
6640 ++++ b/sound/hda/hdmi_chmap.c
6641 +@@ -250,7 +250,7 @@ void snd_hdac_print_channel_allocation(int spk_alloc, char *buf, int buflen)
6642 +
6643 + for (i = 0, j = 0; i < ARRAY_SIZE(cea_speaker_allocation_names); i++) {
6644 + if (spk_alloc & (1 << i))
6645 +- j += snprintf(buf + j, buflen - j, " %s",
6646 ++ j += scnprintf(buf + j, buflen - j, " %s",
6647 + cea_speaker_allocation_names[i]);
6648 + }
6649 + buf[j] = '\0'; /* necessary when j == 0 */
6650 +diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c
6651 +index a2fb19129219..6cb72336433a 100644
6652 +--- a/sound/pci/hda/hda_codec.c
6653 ++++ b/sound/pci/hda/hda_codec.c
6654 +@@ -4019,7 +4019,7 @@ void snd_print_pcm_bits(int pcm, char *buf, int buflen)
6655 +
6656 + for (i = 0, j = 0; i < ARRAY_SIZE(bits); i++)
6657 + if (pcm & (AC_SUPPCM_BITS_8 << i))
6658 +- j += snprintf(buf + j, buflen - j, " %d", bits[i]);
6659 ++ j += scnprintf(buf + j, buflen - j, " %d", bits[i]);
6660 +
6661 + buf[j] = '\0'; /* necessary when j == 0 */
6662 + }
6663 +diff --git a/sound/pci/hda/hda_eld.c b/sound/pci/hda/hda_eld.c
6664 +index d081fb2880a0..82cf1da2ff12 100644
6665 +--- a/sound/pci/hda/hda_eld.c
6666 ++++ b/sound/pci/hda/hda_eld.c
6667 +@@ -360,7 +360,7 @@ static void hdmi_print_pcm_rates(int pcm, char *buf, int buflen)
6668 +
6669 + for (i = 0, j = 0; i < ARRAY_SIZE(alsa_rates); i++)
6670 + if (pcm & (1 << i))
6671 +- j += snprintf(buf + j, buflen - j, " %d",
6672 ++ j += scnprintf(buf + j, buflen - j, " %d",
6673 + alsa_rates[i]);
6674 +
6675 + buf[j] = '\0'; /* necessary when j == 0 */
6676 +diff --git a/sound/pci/hda/hda_sysfs.c b/sound/pci/hda/hda_sysfs.c
6677 +index fcc34417cbce..6dbe99131bc4 100644
6678 +--- a/sound/pci/hda/hda_sysfs.c
6679 ++++ b/sound/pci/hda/hda_sysfs.c
6680 +@@ -222,7 +222,7 @@ static ssize_t init_verbs_show(struct device *dev,
6681 + int i, len = 0;
6682 + mutex_lock(&codec->user_mutex);
6683 + snd_array_for_each(&codec->init_verbs, i, v) {
6684 +- len += snprintf(buf + len, PAGE_SIZE - len,
6685 ++ len += scnprintf(buf + len, PAGE_SIZE - len,
6686 + "0x%02x 0x%03x 0x%04x\n",
6687 + v->nid, v->verb, v->param);
6688 + }
6689 +@@ -272,7 +272,7 @@ static ssize_t hints_show(struct device *dev,
6690 + int i, len = 0;
6691 + mutex_lock(&codec->user_mutex);
6692 + snd_array_for_each(&codec->hints, i, hint) {
6693 +- len += snprintf(buf + len, PAGE_SIZE - len,
6694 ++ len += scnprintf(buf + len, PAGE_SIZE - len,
6695 + "%s = %s\n", hint->key, hint->val);
6696 + }
6697 + mutex_unlock(&codec->user_mutex);
6698 +diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
6699 +index c6b1581c6ffa..7ba3ef6b673d 100644
6700 +--- a/sound/pci/hda/patch_realtek.c
6701 ++++ b/sound/pci/hda/patch_realtek.c
6702 +@@ -2447,7 +2447,9 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
6703 + SND_PCI_QUIRK(0x1071, 0x8258, "Evesham Voyaeger", ALC882_FIXUP_EAPD),
6704 + SND_PCI_QUIRK(0x1458, 0xa002, "Gigabyte EP45-DS3/Z87X-UD3H", ALC889_FIXUP_FRONT_HP_NO_PRESENCE),
6705 + SND_PCI_QUIRK(0x1458, 0xa0b8, "Gigabyte AZ370-Gaming", ALC1220_FIXUP_GB_DUAL_CODECS),
6706 ++ SND_PCI_QUIRK(0x1462, 0x1228, "MSI-GP63", ALC1220_FIXUP_CLEVO_P950),
6707 + SND_PCI_QUIRK(0x1462, 0x1276, "MSI-GL73", ALC1220_FIXUP_CLEVO_P950),
6708 ++ SND_PCI_QUIRK(0x1462, 0x1293, "MSI-GP65", ALC1220_FIXUP_CLEVO_P950),
6709 + SND_PCI_QUIRK(0x1462, 0x7350, "MSI-7350", ALC889_FIXUP_CD),
6710 + SND_PCI_QUIRK(0x1462, 0xda57, "MSI Z270-Gaming", ALC1220_FIXUP_GB_DUAL_CODECS),
6711 + SND_PCI_QUIRK_VENDOR(0x1462, "MSI", ALC882_FIXUP_GPIO3),
6712 +diff --git a/sound/soc/atmel/Kconfig b/sound/soc/atmel/Kconfig
6713 +index d1dc8e6366dc..71f2d42188c4 100644
6714 +--- a/sound/soc/atmel/Kconfig
6715 ++++ b/sound/soc/atmel/Kconfig
6716 +@@ -10,11 +10,11 @@ config SND_ATMEL_SOC
6717 + if SND_ATMEL_SOC
6718 +
6719 + config SND_ATMEL_SOC_PDC
6720 +- tristate
6721 ++ bool
6722 + depends on HAS_DMA
6723 +
6724 + config SND_ATMEL_SOC_DMA
6725 +- tristate
6726 ++ bool
6727 + select SND_SOC_GENERIC_DMAENGINE_PCM
6728 +
6729 + config SND_ATMEL_SOC_SSC
6730 +diff --git a/sound/soc/atmel/Makefile b/sound/soc/atmel/Makefile
6731 +index 1f6890ed3738..c7d2989791be 100644
6732 +--- a/sound/soc/atmel/Makefile
6733 ++++ b/sound/soc/atmel/Makefile
6734 +@@ -6,8 +6,14 @@ snd-soc-atmel_ssc_dai-objs := atmel_ssc_dai.o
6735 + snd-soc-atmel-i2s-objs := atmel-i2s.o
6736 + snd-soc-mchp-i2s-mcc-objs := mchp-i2s-mcc.o
6737 +
6738 +-obj-$(CONFIG_SND_ATMEL_SOC_PDC) += snd-soc-atmel-pcm-pdc.o
6739 +-obj-$(CONFIG_SND_ATMEL_SOC_DMA) += snd-soc-atmel-pcm-dma.o
6740 ++# pdc and dma need to both be built-in if any user of
6741 ++# ssc is built-in.
6742 ++ifdef CONFIG_SND_ATMEL_SOC_PDC
6743 ++obj-$(CONFIG_SND_ATMEL_SOC_SSC) += snd-soc-atmel-pcm-pdc.o
6744 ++endif
6745 ++ifdef CONFIG_SND_ATMEL_SOC_DMA
6746 ++obj-$(CONFIG_SND_ATMEL_SOC_SSC) += snd-soc-atmel-pcm-dma.o
6747 ++endif
6748 + obj-$(CONFIG_SND_ATMEL_SOC_SSC) += snd-soc-atmel_ssc_dai.o
6749 + obj-$(CONFIG_SND_ATMEL_SOC_I2S) += snd-soc-atmel-i2s.o
6750 + obj-$(CONFIG_SND_MCHP_SOC_I2S_MCC) += snd-soc-mchp-i2s-mcc.o
6751 +diff --git a/sound/soc/fsl/fsl_sai.c b/sound/soc/fsl/fsl_sai.c
6752 +index b517e4bc1b87..41b83ecaf008 100644
6753 +--- a/sound/soc/fsl/fsl_sai.c
6754 ++++ b/sound/soc/fsl/fsl_sai.c
6755 +@@ -1019,12 +1019,24 @@ static int fsl_sai_probe(struct platform_device *pdev)
6756 + ret = devm_snd_soc_register_component(&pdev->dev, &fsl_component,
6757 + &fsl_sai_dai, 1);
6758 + if (ret)
6759 +- return ret;
6760 ++ goto err_pm_disable;
6761 +
6762 +- if (sai->soc_data->use_imx_pcm)
6763 +- return imx_pcm_dma_init(pdev, IMX_SAI_DMABUF_SIZE);
6764 +- else
6765 +- return devm_snd_dmaengine_pcm_register(&pdev->dev, NULL, 0);
6766 ++ if (sai->soc_data->use_imx_pcm) {
6767 ++ ret = imx_pcm_dma_init(pdev, IMX_SAI_DMABUF_SIZE);
6768 ++ if (ret)
6769 ++ goto err_pm_disable;
6770 ++ } else {
6771 ++ ret = devm_snd_dmaengine_pcm_register(&pdev->dev, NULL, 0);
6772 ++ if (ret)
6773 ++ goto err_pm_disable;
6774 ++ }
6775 ++
6776 ++ return ret;
6777 ++
6778 ++err_pm_disable:
6779 ++ pm_runtime_disable(&pdev->dev);
6780 ++
6781 ++ return ret;
6782 + }
6783 +
6784 + static int fsl_sai_remove(struct platform_device *pdev)
6785 +diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
6786 +index b6378f025836..935b5375ecc5 100644
6787 +--- a/sound/soc/soc-dapm.c
6788 ++++ b/sound/soc/soc-dapm.c
6789 +@@ -3888,9 +3888,6 @@ snd_soc_dai_link_event_pre_pmu(struct snd_soc_dapm_widget *w,
6790 + runtime->rate = params_rate(params);
6791 +
6792 + out:
6793 +- if (ret < 0)
6794 +- kfree(runtime);
6795 +-
6796 + kfree(params);
6797 + return ret;
6798 + }
6799 +diff --git a/sound/soc/sof/intel/hda-dai.c b/sound/soc/sof/intel/hda-dai.c
6800 +index 1923b0c36bce..3f645200d3a5 100644
6801 +--- a/sound/soc/sof/intel/hda-dai.c
6802 ++++ b/sound/soc/sof/intel/hda-dai.c
6803 +@@ -443,6 +443,10 @@ struct snd_soc_dai_driver skl_dai[] = {
6804 + .name = "iDisp3 Pin",
6805 + .ops = &hda_link_dai_ops,
6806 + },
6807 ++{
6808 ++ .name = "iDisp4 Pin",
6809 ++ .ops = &hda_link_dai_ops,
6810 ++},
6811 + {
6812 + .name = "Analog CPU DAI",
6813 + .ops = &hda_link_dai_ops,
6814 +diff --git a/sound/soc/sunxi/sun8i-codec.c b/sound/soc/sunxi/sun8i-codec.c
6815 +index 55798bc8eae2..686561df8e13 100644
6816 +--- a/sound/soc/sunxi/sun8i-codec.c
6817 ++++ b/sound/soc/sunxi/sun8i-codec.c
6818 +@@ -80,6 +80,7 @@
6819 +
6820 + #define SUN8I_SYS_SR_CTRL_AIF1_FS_MASK GENMASK(15, 12)
6821 + #define SUN8I_SYS_SR_CTRL_AIF2_FS_MASK GENMASK(11, 8)
6822 ++#define SUN8I_AIF1CLK_CTRL_AIF1_DATA_FMT_MASK GENMASK(3, 2)
6823 + #define SUN8I_AIF1CLK_CTRL_AIF1_WORD_SIZ_MASK GENMASK(5, 4)
6824 + #define SUN8I_AIF1CLK_CTRL_AIF1_LRCK_DIV_MASK GENMASK(8, 6)
6825 + #define SUN8I_AIF1CLK_CTRL_AIF1_BCLK_DIV_MASK GENMASK(12, 9)
6826 +@@ -241,7 +242,7 @@ static int sun8i_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
6827 + return -EINVAL;
6828 + }
6829 + regmap_update_bits(scodec->regmap, SUN8I_AIF1CLK_CTRL,
6830 +- BIT(SUN8I_AIF1CLK_CTRL_AIF1_DATA_FMT),
6831 ++ SUN8I_AIF1CLK_CTRL_AIF1_DATA_FMT_MASK,
6832 + value << SUN8I_AIF1CLK_CTRL_AIF1_DATA_FMT);
6833 +
6834 + return 0;
6835 +diff --git a/tools/testing/selftests/bpf/prog_tests/sockmap_basic.c b/tools/testing/selftests/bpf/prog_tests/sockmap_basic.c
6836 +index 07f5b462c2ef..aa43e0bd210c 100644
6837 +--- a/tools/testing/selftests/bpf/prog_tests/sockmap_basic.c
6838 ++++ b/tools/testing/selftests/bpf/prog_tests/sockmap_basic.c
6839 +@@ -3,6 +3,11 @@
6840 +
6841 + #include "test_progs.h"
6842 +
6843 ++#define TCP_REPAIR 19 /* TCP sock is under repair right now */
6844 ++
6845 ++#define TCP_REPAIR_ON 1
6846 ++#define TCP_REPAIR_OFF_NO_WP -1 /* Turn off without window probes */
6847 ++
6848 + static int connected_socket_v4(void)
6849 + {
6850 + struct sockaddr_in addr = {