Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:4.19 commit in: /
Date: Fri, 28 Feb 2020 16:38:24
Message-Id: 1582907884.95211ef5ab2b6b97467a0a274eeb89815029df2e.mpagano@gentoo
1 commit: 95211ef5ab2b6b97467a0a274eeb89815029df2e
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Fri Feb 28 16:38:04 2020 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Fri Feb 28 16:38:04 2020 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=95211ef5
7
8 Linux patch 4.19.107
9
10 Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
11
12 0000_README | 4 +
13 1106_linux-4.19.107.patch | 4497 +++++++++++++++++++++++++++++++++++++++++++++
14 2 files changed, 4501 insertions(+)
15
16 diff --git a/0000_README b/0000_README
17 index 3213eab..7d48aad 100644
18 --- a/0000_README
19 +++ b/0000_README
20 @@ -463,6 +463,10 @@ Patch: 1105_linux-4.19.106.patch
21 From: https://www.kernel.org
22 Desc: Linux 4.19.106
23
24 +Patch: 1106_linux-4.19.107.patch
25 +From: https://www.kernel.org
26 +Desc: Linux 4.19.107
27 +
28 Patch: 1500_XATTR_USER_PREFIX.patch
29 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
30 Desc: Support for namespace user.pax.* on tmpfs.
31
32 diff --git a/1106_linux-4.19.107.patch b/1106_linux-4.19.107.patch
33 new file mode 100644
34 index 0000000..4d8e950
35 --- /dev/null
36 +++ b/1106_linux-4.19.107.patch
37 @@ -0,0 +1,4497 @@
38 +diff --git a/MAINTAINERS b/MAINTAINERS
39 +index d735500d3dad..b9f9da0b886f 100644
40 +--- a/MAINTAINERS
41 ++++ b/MAINTAINERS
42 +@@ -7340,7 +7340,7 @@ M: Joonas Lahtinen <joonas.lahtinen@×××××××××××.com>
43 + M: Rodrigo Vivi <rodrigo.vivi@×××××.com>
44 + L: intel-gfx@×××××××××××××××××.org
45 + W: https://01.org/linuxgraphics/
46 +-B: https://01.org/linuxgraphics/documentation/how-report-bugs
47 ++B: https://gitlab.freedesktop.org/drm/intel/-/wikis/How-to-file-i915-bugs
48 + C: irc://chat.freenode.net/intel-gfx
49 + Q: http://patchwork.freedesktop.org/project/intel-gfx/
50 + T: git git://anongit.freedesktop.org/drm-intel
51 +diff --git a/Makefile b/Makefile
52 +index c010fd4a3286..69e2527a6968 100644
53 +--- a/Makefile
54 ++++ b/Makefile
55 +@@ -1,7 +1,7 @@
56 + # SPDX-License-Identifier: GPL-2.0
57 + VERSION = 4
58 + PATCHLEVEL = 19
59 +-SUBLEVEL = 106
60 ++SUBLEVEL = 107
61 + EXTRAVERSION =
62 + NAME = "People's Front"
63 +
64 +diff --git a/arch/powerpc/kernel/signal.c b/arch/powerpc/kernel/signal.c
65 +index b3e8db376ecd..57b3745f7f1b 100644
66 +--- a/arch/powerpc/kernel/signal.c
67 ++++ b/arch/powerpc/kernel/signal.c
68 +@@ -200,14 +200,27 @@ unsigned long get_tm_stackpointer(struct task_struct *tsk)
69 + * normal/non-checkpointed stack pointer.
70 + */
71 +
72 ++ unsigned long ret = tsk->thread.regs->gpr[1];
73 ++
74 + #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
75 + BUG_ON(tsk != current);
76 +
77 + if (MSR_TM_ACTIVE(tsk->thread.regs->msr)) {
78 ++ preempt_disable();
79 + tm_reclaim_current(TM_CAUSE_SIGNAL);
80 + if (MSR_TM_TRANSACTIONAL(tsk->thread.regs->msr))
81 +- return tsk->thread.ckpt_regs.gpr[1];
82 ++ ret = tsk->thread.ckpt_regs.gpr[1];
83 ++
84 ++ /*
85 ++ * If we treclaim, we must clear the current thread's TM bits
86 ++ * before re-enabling preemption. Otherwise we might be
87 ++ * preempted and have the live MSR[TS] changed behind our back
88 ++ * (tm_recheckpoint_new_task() would recheckpoint). Besides, we
89 ++ * enter the signal handler in non-transactional state.
90 ++ */
91 ++ tsk->thread.regs->msr &= ~MSR_TS_MASK;
92 ++ preempt_enable();
93 + }
94 + #endif
95 +- return tsk->thread.regs->gpr[1];
96 ++ return ret;
97 + }
98 +diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
99 +index 906b05c2adae..06b4b828d258 100644
100 +--- a/arch/powerpc/kernel/signal_32.c
101 ++++ b/arch/powerpc/kernel/signal_32.c
102 +@@ -493,19 +493,11 @@ static int save_user_regs(struct pt_regs *regs, struct mcontext __user *frame,
103 + */
104 + static int save_tm_user_regs(struct pt_regs *regs,
105 + struct mcontext __user *frame,
106 +- struct mcontext __user *tm_frame, int sigret)
107 ++ struct mcontext __user *tm_frame, int sigret,
108 ++ unsigned long msr)
109 + {
110 +- unsigned long msr = regs->msr;
111 +-
112 + WARN_ON(tm_suspend_disabled);
113 +
114 +- /* Remove TM bits from thread's MSR. The MSR in the sigcontext
115 +- * just indicates to userland that we were doing a transaction, but we
116 +- * don't want to return in transactional state. This also ensures
117 +- * that flush_fp_to_thread won't set TIF_RESTORE_TM again.
118 +- */
119 +- regs->msr &= ~MSR_TS_MASK;
120 +-
121 + /* Save both sets of general registers */
122 + if (save_general_regs(&current->thread.ckpt_regs, frame)
123 + || save_general_regs(regs, tm_frame))
124 +@@ -916,6 +908,10 @@ int handle_rt_signal32(struct ksignal *ksig, sigset_t *oldset,
125 + int sigret;
126 + unsigned long tramp;
127 + struct pt_regs *regs = tsk->thread.regs;
128 ++#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
129 ++ /* Save the thread's msr before get_tm_stackpointer() changes it */
130 ++ unsigned long msr = regs->msr;
131 ++#endif
132 +
133 + BUG_ON(tsk != current);
134 +
135 +@@ -948,13 +944,13 @@ int handle_rt_signal32(struct ksignal *ksig, sigset_t *oldset,
136 +
137 + #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
138 + tm_frame = &rt_sf->uc_transact.uc_mcontext;
139 +- if (MSR_TM_ACTIVE(regs->msr)) {
140 ++ if (MSR_TM_ACTIVE(msr)) {
141 + if (__put_user((unsigned long)&rt_sf->uc_transact,
142 + &rt_sf->uc.uc_link) ||
143 + __put_user((unsigned long)tm_frame,
144 + &rt_sf->uc_transact.uc_regs))
145 + goto badframe;
146 +- if (save_tm_user_regs(regs, frame, tm_frame, sigret))
147 ++ if (save_tm_user_regs(regs, frame, tm_frame, sigret, msr))
148 + goto badframe;
149 + }
150 + else
151 +@@ -1365,6 +1361,10 @@ int handle_signal32(struct ksignal *ksig, sigset_t *oldset,
152 + int sigret;
153 + unsigned long tramp;
154 + struct pt_regs *regs = tsk->thread.regs;
155 ++#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
156 ++ /* Save the thread's msr before get_tm_stackpointer() changes it */
157 ++ unsigned long msr = regs->msr;
158 ++#endif
159 +
160 + BUG_ON(tsk != current);
161 +
162 +@@ -1398,9 +1398,9 @@ int handle_signal32(struct ksignal *ksig, sigset_t *oldset,
163 +
164 + #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
165 + tm_mctx = &frame->mctx_transact;
166 +- if (MSR_TM_ACTIVE(regs->msr)) {
167 ++ if (MSR_TM_ACTIVE(msr)) {
168 + if (save_tm_user_regs(regs, &frame->mctx, &frame->mctx_transact,
169 +- sigret))
170 ++ sigret, msr))
171 + goto badframe;
172 + }
173 + else
174 +diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
175 +index b5933d7219db..b088b0700d0d 100644
176 +--- a/arch/powerpc/kernel/signal_64.c
177 ++++ b/arch/powerpc/kernel/signal_64.c
178 +@@ -196,7 +196,8 @@ static long setup_sigcontext(struct sigcontext __user *sc,
179 + static long setup_tm_sigcontexts(struct sigcontext __user *sc,
180 + struct sigcontext __user *tm_sc,
181 + struct task_struct *tsk,
182 +- int signr, sigset_t *set, unsigned long handler)
183 ++ int signr, sigset_t *set, unsigned long handler,
184 ++ unsigned long msr)
185 + {
186 + /* When CONFIG_ALTIVEC is set, we _always_ setup v_regs even if the
187 + * process never used altivec yet (MSR_VEC is zero in pt_regs of
188 +@@ -211,12 +212,11 @@ static long setup_tm_sigcontexts(struct sigcontext __user *sc,
189 + elf_vrreg_t __user *tm_v_regs = sigcontext_vmx_regs(tm_sc);
190 + #endif
191 + struct pt_regs *regs = tsk->thread.regs;
192 +- unsigned long msr = tsk->thread.regs->msr;
193 + long err = 0;
194 +
195 + BUG_ON(tsk != current);
196 +
197 +- BUG_ON(!MSR_TM_ACTIVE(regs->msr));
198 ++ BUG_ON(!MSR_TM_ACTIVE(msr));
199 +
200 + WARN_ON(tm_suspend_disabled);
201 +
202 +@@ -226,13 +226,6 @@ static long setup_tm_sigcontexts(struct sigcontext __user *sc,
203 + */
204 + msr |= tsk->thread.ckpt_regs.msr & (MSR_FP | MSR_VEC | MSR_VSX);
205 +
206 +- /* Remove TM bits from thread's MSR. The MSR in the sigcontext
207 +- * just indicates to userland that we were doing a transaction, but we
208 +- * don't want to return in transactional state. This also ensures
209 +- * that flush_fp_to_thread won't set TIF_RESTORE_TM again.
210 +- */
211 +- regs->msr &= ~MSR_TS_MASK;
212 +-
213 + #ifdef CONFIG_ALTIVEC
214 + err |= __put_user(v_regs, &sc->v_regs);
215 + err |= __put_user(tm_v_regs, &tm_sc->v_regs);
216 +@@ -803,6 +796,10 @@ int handle_rt_signal64(struct ksignal *ksig, sigset_t *set,
217 + unsigned long newsp = 0;
218 + long err = 0;
219 + struct pt_regs *regs = tsk->thread.regs;
220 ++#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
221 ++ /* Save the thread's msr before get_tm_stackpointer() changes it */
222 ++ unsigned long msr = regs->msr;
223 ++#endif
224 +
225 + BUG_ON(tsk != current);
226 +
227 +@@ -820,7 +817,7 @@ int handle_rt_signal64(struct ksignal *ksig, sigset_t *set,
228 + err |= __put_user(0, &frame->uc.uc_flags);
229 + err |= __save_altstack(&frame->uc.uc_stack, regs->gpr[1]);
230 + #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
231 +- if (MSR_TM_ACTIVE(regs->msr)) {
232 ++ if (MSR_TM_ACTIVE(msr)) {
233 + /* The ucontext_t passed to userland points to the second
234 + * ucontext_t (for transactional state) with its uc_link ptr.
235 + */
236 +@@ -828,7 +825,8 @@ int handle_rt_signal64(struct ksignal *ksig, sigset_t *set,
237 + err |= setup_tm_sigcontexts(&frame->uc.uc_mcontext,
238 + &frame->uc_transact.uc_mcontext,
239 + tsk, ksig->sig, NULL,
240 +- (unsigned long)ksig->ka.sa.sa_handler);
241 ++ (unsigned long)ksig->ka.sa.sa_handler,
242 ++ msr);
243 + } else
244 + #endif
245 + {
246 +diff --git a/arch/s390/include/asm/page.h b/arch/s390/include/asm/page.h
247 +index ac3c86b21d79..349b1c1ef779 100644
248 +--- a/arch/s390/include/asm/page.h
249 ++++ b/arch/s390/include/asm/page.h
250 +@@ -42,7 +42,7 @@ void __storage_key_init_range(unsigned long start, unsigned long end);
251 +
252 + static inline void storage_key_init_range(unsigned long start, unsigned long end)
253 + {
254 +- if (PAGE_DEFAULT_KEY)
255 ++ if (PAGE_DEFAULT_KEY != 0)
256 + __storage_key_init_range(start, end);
257 + }
258 +
259 +diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
260 +index 21a58fcc3dd4..067288d4ef6e 100644
261 +--- a/arch/x86/include/asm/kvm_host.h
262 ++++ b/arch/x86/include/asm/kvm_host.h
263 +@@ -1040,7 +1040,7 @@ struct kvm_x86_ops {
264 + void (*load_eoi_exitmap)(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap);
265 + void (*set_virtual_apic_mode)(struct kvm_vcpu *vcpu);
266 + void (*set_apic_access_page_addr)(struct kvm_vcpu *vcpu, hpa_t hpa);
267 +- void (*deliver_posted_interrupt)(struct kvm_vcpu *vcpu, int vector);
268 ++ int (*deliver_posted_interrupt)(struct kvm_vcpu *vcpu, int vector);
269 + int (*sync_pir_to_irr)(struct kvm_vcpu *vcpu);
270 + int (*set_tss_addr)(struct kvm *kvm, unsigned int addr);
271 + int (*set_identity_map_addr)(struct kvm *kvm, u64 ident_addr);
272 +diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
273 +index 0f4feee6d082..d2c25a13e1ce 100644
274 +--- a/arch/x86/include/asm/msr-index.h
275 ++++ b/arch/x86/include/asm/msr-index.h
276 +@@ -455,6 +455,8 @@
277 + #define MSR_K7_HWCR 0xc0010015
278 + #define MSR_K7_HWCR_SMMLOCK_BIT 0
279 + #define MSR_K7_HWCR_SMMLOCK BIT_ULL(MSR_K7_HWCR_SMMLOCK_BIT)
280 ++#define MSR_K7_HWCR_IRPERF_EN_BIT 30
281 ++#define MSR_K7_HWCR_IRPERF_EN BIT_ULL(MSR_K7_HWCR_IRPERF_EN_BIT)
282 + #define MSR_K7_FID_VID_CTL 0xc0010041
283 + #define MSR_K7_FID_VID_STATUS 0xc0010042
284 +
285 +diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
286 +index 75715fa0e822..120769955687 100644
287 +--- a/arch/x86/kernel/cpu/amd.c
288 ++++ b/arch/x86/kernel/cpu/amd.c
289 +@@ -25,6 +25,7 @@
290 +
291 + static const int amd_erratum_383[];
292 + static const int amd_erratum_400[];
293 ++static const int amd_erratum_1054[];
294 + static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum);
295 +
296 + /*
297 +@@ -983,6 +984,15 @@ static void init_amd(struct cpuinfo_x86 *c)
298 + /* AMD CPUs don't reset SS attributes on SYSRET, Xen does. */
299 + if (!cpu_has(c, X86_FEATURE_XENPV))
300 + set_cpu_bug(c, X86_BUG_SYSRET_SS_ATTRS);
301 ++
302 ++ /*
303 ++ * Turn on the Instructions Retired free counter on machines not
304 ++ * susceptible to erratum #1054 "Instructions Retired Performance
305 ++ * Counter May Be Inaccurate".
306 ++ */
307 ++ if (cpu_has(c, X86_FEATURE_IRPERF) &&
308 ++ !cpu_has_amd_erratum(c, amd_erratum_1054))
309 ++ msr_set_bit(MSR_K7_HWCR, MSR_K7_HWCR_IRPERF_EN_BIT);
310 + }
311 +
312 + #ifdef CONFIG_X86_32
313 +@@ -1110,6 +1120,10 @@ static const int amd_erratum_400[] =
314 + static const int amd_erratum_383[] =
315 + AMD_OSVW_ERRATUM(3, AMD_MODEL_RANGE(0x10, 0, 0, 0xff, 0xf));
316 +
317 ++/* #1054: Instructions Retired Performance Counter May Be Inaccurate */
318 ++static const int amd_erratum_1054[] =
319 ++ AMD_OSVW_ERRATUM(0, AMD_MODEL_RANGE(0x17, 0, 0, 0x2f, 0xf));
320 ++
321 +
322 + static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum)
323 + {
324 +diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd.c b/arch/x86/kernel/cpu/mcheck/mce_amd.c
325 +index da0b6967349a..f878d24ff3c1 100644
326 +--- a/arch/x86/kernel/cpu/mcheck/mce_amd.c
327 ++++ b/arch/x86/kernel/cpu/mcheck/mce_amd.c
328 +@@ -1117,9 +1117,12 @@ static const struct sysfs_ops threshold_ops = {
329 + .store = store,
330 + };
331 +
332 ++static void threshold_block_release(struct kobject *kobj);
333 ++
334 + static struct kobj_type threshold_ktype = {
335 + .sysfs_ops = &threshold_ops,
336 + .default_attrs = default_attrs,
337 ++ .release = threshold_block_release,
338 + };
339 +
340 + static const char *get_name(unsigned int bank, struct threshold_block *b)
341 +@@ -1152,8 +1155,9 @@ static const char *get_name(unsigned int bank, struct threshold_block *b)
342 + return buf_mcatype;
343 + }
344 +
345 +-static int allocate_threshold_blocks(unsigned int cpu, unsigned int bank,
346 +- unsigned int block, u32 address)
347 ++static int allocate_threshold_blocks(unsigned int cpu, struct threshold_bank *tb,
348 ++ unsigned int bank, unsigned int block,
349 ++ u32 address)
350 + {
351 + struct threshold_block *b = NULL;
352 + u32 low, high;
353 +@@ -1197,16 +1201,12 @@ static int allocate_threshold_blocks(unsigned int cpu, unsigned int bank,
354 +
355 + INIT_LIST_HEAD(&b->miscj);
356 +
357 +- if (per_cpu(threshold_banks, cpu)[bank]->blocks) {
358 +- list_add(&b->miscj,
359 +- &per_cpu(threshold_banks, cpu)[bank]->blocks->miscj);
360 +- } else {
361 +- per_cpu(threshold_banks, cpu)[bank]->blocks = b;
362 +- }
363 ++ if (tb->blocks)
364 ++ list_add(&b->miscj, &tb->blocks->miscj);
365 ++ else
366 ++ tb->blocks = b;
367 +
368 +- err = kobject_init_and_add(&b->kobj, &threshold_ktype,
369 +- per_cpu(threshold_banks, cpu)[bank]->kobj,
370 +- get_name(bank, b));
371 ++ err = kobject_init_and_add(&b->kobj, &threshold_ktype, tb->kobj, get_name(bank, b));
372 + if (err)
373 + goto out_free;
374 + recurse:
375 +@@ -1214,7 +1214,7 @@ recurse:
376 + if (!address)
377 + return 0;
378 +
379 +- err = allocate_threshold_blocks(cpu, bank, block, address);
380 ++ err = allocate_threshold_blocks(cpu, tb, bank, block, address);
381 + if (err)
382 + goto out_free;
383 +
384 +@@ -1299,8 +1299,6 @@ static int threshold_create_bank(unsigned int cpu, unsigned int bank)
385 + goto out_free;
386 + }
387 +
388 +- per_cpu(threshold_banks, cpu)[bank] = b;
389 +-
390 + if (is_shared_bank(bank)) {
391 + refcount_set(&b->cpus, 1);
392 +
393 +@@ -1311,9 +1309,13 @@ static int threshold_create_bank(unsigned int cpu, unsigned int bank)
394 + }
395 + }
396 +
397 +- err = allocate_threshold_blocks(cpu, bank, 0, msr_ops.misc(bank));
398 +- if (!err)
399 +- goto out;
400 ++ err = allocate_threshold_blocks(cpu, b, bank, 0, msr_ops.misc(bank));
401 ++ if (err)
402 ++ goto out_free;
403 ++
404 ++ per_cpu(threshold_banks, cpu)[bank] = b;
405 ++
406 ++ return 0;
407 +
408 + out_free:
409 + kfree(b);
410 +@@ -1322,8 +1324,12 @@ static int threshold_create_bank(unsigned int cpu, unsigned int bank)
411 + return err;
412 + }
413 +
414 +-static void deallocate_threshold_block(unsigned int cpu,
415 +- unsigned int bank)
416 ++static void threshold_block_release(struct kobject *kobj)
417 ++{
418 ++ kfree(to_block(kobj));
419 ++}
420 ++
421 ++static void deallocate_threshold_block(unsigned int cpu, unsigned int bank)
422 + {
423 + struct threshold_block *pos = NULL;
424 + struct threshold_block *tmp = NULL;
425 +@@ -1333,13 +1339,11 @@ static void deallocate_threshold_block(unsigned int cpu,
426 + return;
427 +
428 + list_for_each_entry_safe(pos, tmp, &head->blocks->miscj, miscj) {
429 +- kobject_put(&pos->kobj);
430 + list_del(&pos->miscj);
431 +- kfree(pos);
432 ++ kobject_put(&pos->kobj);
433 + }
434 +
435 +- kfree(per_cpu(threshold_banks, cpu)[bank]->blocks);
436 +- per_cpu(threshold_banks, cpu)[bank]->blocks = NULL;
437 ++ kobject_put(&head->blocks->kobj);
438 + }
439 +
440 + static void __threshold_remove_blocks(struct threshold_bank *b)
441 +diff --git a/arch/x86/kvm/irq_comm.c b/arch/x86/kvm/irq_comm.c
442 +index 3cc3b2d130a0..4d000aea05e0 100644
443 +--- a/arch/x86/kvm/irq_comm.c
444 ++++ b/arch/x86/kvm/irq_comm.c
445 +@@ -427,7 +427,7 @@ void kvm_scan_ioapic_routes(struct kvm_vcpu *vcpu,
446 +
447 + kvm_set_msi_irq(vcpu->kvm, entry, &irq);
448 +
449 +- if (irq.level && kvm_apic_match_dest(vcpu, NULL, 0,
450 ++ if (irq.trig_mode && kvm_apic_match_dest(vcpu, NULL, 0,
451 + irq.dest_id, irq.dest_mode))
452 + __set_bit(irq.vector, ioapic_handled_vectors);
453 + }
454 +diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
455 +index 05905961ecca..8c6392534d14 100644
456 +--- a/arch/x86/kvm/lapic.c
457 ++++ b/arch/x86/kvm/lapic.c
458 +@@ -633,9 +633,11 @@ static inline bool pv_eoi_enabled(struct kvm_vcpu *vcpu)
459 + static bool pv_eoi_get_pending(struct kvm_vcpu *vcpu)
460 + {
461 + u8 val;
462 +- if (pv_eoi_get_user(vcpu, &val) < 0)
463 ++ if (pv_eoi_get_user(vcpu, &val) < 0) {
464 + apic_debug("Can't read EOI MSR value: 0x%llx\n",
465 + (unsigned long long)vcpu->arch.pv_eoi.msr_val);
466 ++ return false;
467 ++ }
468 + return val & 0x1;
469 + }
470 +
471 +@@ -1060,11 +1062,8 @@ static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode,
472 + apic_clear_vector(vector, apic->regs + APIC_TMR);
473 + }
474 +
475 +- if (vcpu->arch.apicv_active)
476 +- kvm_x86_ops->deliver_posted_interrupt(vcpu, vector);
477 +- else {
478 ++ if (kvm_x86_ops->deliver_posted_interrupt(vcpu, vector)) {
479 + kvm_lapic_set_irr(vector, apic);
480 +-
481 + kvm_make_request(KVM_REQ_EVENT, vcpu);
482 + kvm_vcpu_kick(vcpu);
483 + }
484 +diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
485 +index 7657dcd72134..0219693bf08e 100644
486 +--- a/arch/x86/kvm/svm.c
487 ++++ b/arch/x86/kvm/svm.c
488 +@@ -5140,8 +5140,11 @@ static void svm_load_eoi_exitmap(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap)
489 + return;
490 + }
491 +
492 +-static void svm_deliver_avic_intr(struct kvm_vcpu *vcpu, int vec)
493 ++static int svm_deliver_avic_intr(struct kvm_vcpu *vcpu, int vec)
494 + {
495 ++ if (!vcpu->arch.apicv_active)
496 ++ return -1;
497 ++
498 + kvm_lapic_set_irr(vec, vcpu->arch.apic);
499 + smp_mb__after_atomic();
500 +
501 +@@ -5150,6 +5153,8 @@ static void svm_deliver_avic_intr(struct kvm_vcpu *vcpu, int vec)
502 + kvm_cpu_get_apicid(vcpu->cpu));
503 + else
504 + kvm_vcpu_wake_up(vcpu);
505 ++
506 ++ return 0;
507 + }
508 +
509 + static bool svm_dy_apicv_has_pending_interrupt(struct kvm_vcpu *vcpu)
510 +diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
511 +index aead984d89ad..9c48484dbe23 100644
512 +--- a/arch/x86/kvm/vmx.c
513 ++++ b/arch/x86/kvm/vmx.c
514 +@@ -5725,6 +5725,26 @@ static bool cs_ss_rpl_check(struct kvm_vcpu *vcpu)
515 + (ss.selector & SEGMENT_RPL_MASK));
516 + }
517 +
518 ++static bool nested_vmx_check_io_bitmaps(struct kvm_vcpu *vcpu,
519 ++ unsigned int port, int size);
520 ++static bool nested_vmx_exit_handled_io(struct kvm_vcpu *vcpu,
521 ++ struct vmcs12 *vmcs12)
522 ++{
523 ++ unsigned long exit_qualification;
524 ++ unsigned short port;
525 ++ int size;
526 ++
527 ++ if (!nested_cpu_has(vmcs12, CPU_BASED_USE_IO_BITMAPS))
528 ++ return nested_cpu_has(vmcs12, CPU_BASED_UNCOND_IO_EXITING);
529 ++
530 ++ exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
531 ++
532 ++ port = exit_qualification >> 16;
533 ++ size = (exit_qualification & 7) + 1;
534 ++
535 ++ return nested_vmx_check_io_bitmaps(vcpu, port, size);
536 ++}
537 ++
538 + /*
539 + * Check if guest state is valid. Returns true if valid, false if
540 + * not.
541 +@@ -6264,24 +6284,29 @@ static int vmx_deliver_nested_posted_interrupt(struct kvm_vcpu *vcpu,
542 + * 2. If target vcpu isn't running(root mode), kick it to pick up the
543 + * interrupt from PIR in next vmentry.
544 + */
545 +-static void vmx_deliver_posted_interrupt(struct kvm_vcpu *vcpu, int vector)
546 ++static int vmx_deliver_posted_interrupt(struct kvm_vcpu *vcpu, int vector)
547 + {
548 + struct vcpu_vmx *vmx = to_vmx(vcpu);
549 + int r;
550 +
551 + r = vmx_deliver_nested_posted_interrupt(vcpu, vector);
552 + if (!r)
553 +- return;
554 ++ return 0;
555 ++
556 ++ if (!vcpu->arch.apicv_active)
557 ++ return -1;
558 +
559 + if (pi_test_and_set_pir(vector, &vmx->pi_desc))
560 +- return;
561 ++ return 0;
562 +
563 + /* If a previous notification has sent the IPI, nothing to do. */
564 + if (pi_test_and_set_on(&vmx->pi_desc))
565 +- return;
566 ++ return 0;
567 +
568 + if (!kvm_vcpu_trigger_posted_interrupt(vcpu, false))
569 + kvm_vcpu_kick(vcpu);
570 ++
571 ++ return 0;
572 + }
573 +
574 + /*
575 +@@ -9469,23 +9494,17 @@ static int (*const kvm_vmx_exit_handlers[])(struct kvm_vcpu *vcpu) = {
576 + static const int kvm_vmx_max_exit_handlers =
577 + ARRAY_SIZE(kvm_vmx_exit_handlers);
578 +
579 +-static bool nested_vmx_exit_handled_io(struct kvm_vcpu *vcpu,
580 +- struct vmcs12 *vmcs12)
581 ++/*
582 ++ * Return true if an IO instruction with the specified port and size should cause
583 ++ * a VM-exit into L1.
584 ++ */
585 ++bool nested_vmx_check_io_bitmaps(struct kvm_vcpu *vcpu, unsigned int port,
586 ++ int size)
587 + {
588 +- unsigned long exit_qualification;
589 ++ struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
590 + gpa_t bitmap, last_bitmap;
591 +- unsigned int port;
592 +- int size;
593 + u8 b;
594 +
595 +- if (!nested_cpu_has(vmcs12, CPU_BASED_USE_IO_BITMAPS))
596 +- return nested_cpu_has(vmcs12, CPU_BASED_UNCOND_IO_EXITING);
597 +-
598 +- exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
599 +-
600 +- port = exit_qualification >> 16;
601 +- size = (exit_qualification & 7) + 1;
602 +-
603 + last_bitmap = (gpa_t)-1;
604 + b = -1;
605 +
606 +@@ -13675,6 +13694,39 @@ static void nested_vmx_entry_failure(struct kvm_vcpu *vcpu,
607 + to_vmx(vcpu)->nested.sync_shadow_vmcs = true;
608 + }
609 +
610 ++static int vmx_check_intercept_io(struct kvm_vcpu *vcpu,
611 ++ struct x86_instruction_info *info)
612 ++{
613 ++ struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
614 ++ unsigned short port;
615 ++ bool intercept;
616 ++ int size;
617 ++
618 ++ if (info->intercept == x86_intercept_in ||
619 ++ info->intercept == x86_intercept_ins) {
620 ++ port = info->src_val;
621 ++ size = info->dst_bytes;
622 ++ } else {
623 ++ port = info->dst_val;
624 ++ size = info->src_bytes;
625 ++ }
626 ++
627 ++ /*
628 ++ * If the 'use IO bitmaps' VM-execution control is 0, IO instruction
629 ++ * VM-exits depend on the 'unconditional IO exiting' VM-execution
630 ++ * control.
631 ++ *
632 ++ * Otherwise, IO instruction VM-exits are controlled by the IO bitmaps.
633 ++ */
634 ++ if (!nested_cpu_has(vmcs12, CPU_BASED_USE_IO_BITMAPS))
635 ++ intercept = nested_cpu_has(vmcs12,
636 ++ CPU_BASED_UNCOND_IO_EXITING);
637 ++ else
638 ++ intercept = nested_vmx_check_io_bitmaps(vcpu, port, size);
639 ++
640 ++ return intercept ? X86EMUL_UNHANDLEABLE : X86EMUL_CONTINUE;
641 ++}
642 ++
643 + static int vmx_check_intercept(struct kvm_vcpu *vcpu,
644 + struct x86_instruction_info *info,
645 + enum x86_intercept_stage stage)
646 +@@ -13682,19 +13734,31 @@ static int vmx_check_intercept(struct kvm_vcpu *vcpu,
647 + struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
648 + struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt;
649 +
650 ++ switch (info->intercept) {
651 + /*
652 + * RDPID causes #UD if disabled through secondary execution controls.
653 + * Because it is marked as EmulateOnUD, we need to intercept it here.
654 + */
655 +- if (info->intercept == x86_intercept_rdtscp &&
656 +- !nested_cpu_has2(vmcs12, SECONDARY_EXEC_RDTSCP)) {
657 +- ctxt->exception.vector = UD_VECTOR;
658 +- ctxt->exception.error_code_valid = false;
659 +- return X86EMUL_PROPAGATE_FAULT;
660 +- }
661 ++ case x86_intercept_rdtscp:
662 ++ if (!nested_cpu_has2(vmcs12, SECONDARY_EXEC_RDTSCP)) {
663 ++ ctxt->exception.vector = UD_VECTOR;
664 ++ ctxt->exception.error_code_valid = false;
665 ++ return X86EMUL_PROPAGATE_FAULT;
666 ++ }
667 ++ break;
668 ++
669 ++ case x86_intercept_in:
670 ++ case x86_intercept_ins:
671 ++ case x86_intercept_out:
672 ++ case x86_intercept_outs:
673 ++ return vmx_check_intercept_io(vcpu, info);
674 +
675 + /* TODO: check more intercepts... */
676 +- return X86EMUL_CONTINUE;
677 ++ default:
678 ++ break;
679 ++ }
680 ++
681 ++ return X86EMUL_UNHANDLEABLE;
682 + }
683 +
684 + #ifdef CONFIG_X86_64
685 +diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
686 +index fa1c5a442957..bbc8710704e2 100644
687 +--- a/drivers/ata/ahci.c
688 ++++ b/drivers/ata/ahci.c
689 +@@ -96,6 +96,7 @@ enum board_ids {
690 +
691 + static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
692 + static void ahci_remove_one(struct pci_dev *dev);
693 ++static void ahci_shutdown_one(struct pci_dev *dev);
694 + static int ahci_vt8251_hardreset(struct ata_link *link, unsigned int *class,
695 + unsigned long deadline);
696 + static int ahci_avn_hardreset(struct ata_link *link, unsigned int *class,
697 +@@ -609,6 +610,7 @@ static struct pci_driver ahci_pci_driver = {
698 + .id_table = ahci_pci_tbl,
699 + .probe = ahci_init_one,
700 + .remove = ahci_remove_one,
701 ++ .shutdown = ahci_shutdown_one,
702 + .driver = {
703 + .pm = &ahci_pci_pm_ops,
704 + },
705 +@@ -1897,6 +1899,11 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
706 + return 0;
707 + }
708 +
709 ++static void ahci_shutdown_one(struct pci_dev *pdev)
710 ++{
711 ++ ata_pci_shutdown_one(pdev);
712 ++}
713 ++
714 + static void ahci_remove_one(struct pci_dev *pdev)
715 + {
716 + pm_runtime_get_noresume(&pdev->dev);
717 +diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
718 +index b45b6f7722ce..75d582ca917f 100644
719 +--- a/drivers/ata/libata-core.c
720 ++++ b/drivers/ata/libata-core.c
721 +@@ -6780,6 +6780,26 @@ void ata_pci_remove_one(struct pci_dev *pdev)
722 + ata_host_detach(host);
723 + }
724 +
725 ++void ata_pci_shutdown_one(struct pci_dev *pdev)
726 ++{
727 ++ struct ata_host *host = pci_get_drvdata(pdev);
728 ++ int i;
729 ++
730 ++ for (i = 0; i < host->n_ports; i++) {
731 ++ struct ata_port *ap = host->ports[i];
732 ++
733 ++ ap->pflags |= ATA_PFLAG_FROZEN;
734 ++
735 ++ /* Disable port interrupts */
736 ++ if (ap->ops->freeze)
737 ++ ap->ops->freeze(ap);
738 ++
739 ++ /* Stop the port DMA engines */
740 ++ if (ap->ops->port_stop)
741 ++ ap->ops->port_stop(ap);
742 ++ }
743 ++}
744 ++
745 + /* move to PCI subsystem */
746 + int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits)
747 + {
748 +@@ -7400,6 +7420,7 @@ EXPORT_SYMBOL_GPL(ata_timing_cycle2mode);
749 +
750 + #ifdef CONFIG_PCI
751 + EXPORT_SYMBOL_GPL(pci_test_config_bits);
752 ++EXPORT_SYMBOL_GPL(ata_pci_shutdown_one);
753 + EXPORT_SYMBOL_GPL(ata_pci_remove_one);
754 + #ifdef CONFIG_PM
755 + EXPORT_SYMBOL_GPL(ata_pci_device_do_suspend);
756 +diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
757 +index e71589e244fb..bf222c4b2f82 100644
758 +--- a/drivers/block/floppy.c
759 ++++ b/drivers/block/floppy.c
760 +@@ -852,14 +852,17 @@ static void reset_fdc_info(int mode)
761 + /* selects the fdc and drive, and enables the fdc's input/dma. */
762 + static void set_fdc(int drive)
763 + {
764 ++ unsigned int new_fdc = fdc;
765 ++
766 + if (drive >= 0 && drive < N_DRIVE) {
767 +- fdc = FDC(drive);
768 ++ new_fdc = FDC(drive);
769 + current_drive = drive;
770 + }
771 +- if (fdc != 1 && fdc != 0) {
772 ++ if (new_fdc >= N_FDC) {
773 + pr_info("bad fdc value\n");
774 + return;
775 + }
776 ++ fdc = new_fdc;
777 + set_dor(fdc, ~0, 8);
778 + #if N_FDC > 1
779 + set_dor(1 - fdc, ~8, 0);
780 +diff --git a/drivers/char/random.c b/drivers/char/random.c
781 +index 28b110cd3977..53e822793d46 100644
782 +--- a/drivers/char/random.c
783 ++++ b/drivers/char/random.c
784 +@@ -1609,9 +1609,8 @@ static void _warn_unseeded_randomness(const char *func_name, void *caller,
785 + print_once = true;
786 + #endif
787 + if (__ratelimit(&unseeded_warning))
788 +- printk_deferred(KERN_NOTICE "random: %s called from %pS "
789 +- "with crng_init=%d\n", func_name, caller,
790 +- crng_init);
791 ++ pr_notice("random: %s called from %pS with crng_init=%d\n",
792 ++ func_name, caller, crng_init);
793 + }
794 +
795 + /*
796 +diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c
797 +index d66a7fdff898..ceb82e74f5b4 100644
798 +--- a/drivers/dma/imx-sdma.c
799 ++++ b/drivers/dma/imx-sdma.c
800 +@@ -738,8 +738,12 @@ static void sdma_start_desc(struct sdma_channel *sdmac)
801 + return;
802 + }
803 + sdmac->desc = desc = to_sdma_desc(&vd->tx);
804 +-
805 +- list_del(&vd->node);
806 ++ /*
807 ++ * Do not delete the node in desc_issued list in cyclic mode, otherwise
808 ++ * the desc allocated will never be freed in vchan_dma_desc_free_list
809 ++ */
810 ++ if (!(sdmac->flags & IMX_DMA_SG_LOOP))
811 ++ list_del(&vd->node);
812 +
813 + sdma->channel_control[channel].base_bd_ptr = desc->bd_phys;
814 + sdma->channel_control[channel].current_bd_ptr = desc->bd_phys;
815 +@@ -1040,6 +1044,7 @@ static void sdma_channel_terminate_work(struct work_struct *work)
816 +
817 + spin_lock_irqsave(&sdmac->vc.lock, flags);
818 + vchan_get_all_descriptors(&sdmac->vc, &head);
819 ++ sdmac->desc = NULL;
820 + spin_unlock_irqrestore(&sdmac->vc.lock, flags);
821 + vchan_dma_desc_free_list(&sdmac->vc, &head);
822 + }
823 +@@ -1047,19 +1052,11 @@ static void sdma_channel_terminate_work(struct work_struct *work)
824 + static int sdma_disable_channel_async(struct dma_chan *chan)
825 + {
826 + struct sdma_channel *sdmac = to_sdma_chan(chan);
827 +- unsigned long flags;
828 +-
829 +- spin_lock_irqsave(&sdmac->vc.lock, flags);
830 +
831 + sdma_disable_channel(chan);
832 +
833 +- if (sdmac->desc) {
834 +- vchan_terminate_vdesc(&sdmac->desc->vd);
835 +- sdmac->desc = NULL;
836 ++ if (sdmac->desc)
837 + schedule_work(&sdmac->terminate_worker);
838 +- }
839 +-
840 +- spin_unlock_irqrestore(&sdmac->vc.lock, flags);
841 +
842 + return 0;
843 + }
844 +diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
845 +index 83f2717fcf81..9e74f4304313 100644
846 +--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
847 ++++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
848 +@@ -205,7 +205,12 @@ static u32 soc15_get_config_memsize(struct amdgpu_device *adev)
849 +
850 + static u32 soc15_get_xclk(struct amdgpu_device *adev)
851 + {
852 +- return adev->clock.spll.reference_freq;
853 ++ u32 reference_clock = adev->clock.spll.reference_freq;
854 ++
855 ++ if (adev->asic_type == CHIP_RAVEN)
856 ++ return reference_clock / 4;
857 ++
858 ++ return reference_clock;
859 + }
860 +
861 +
862 +diff --git a/drivers/gpu/drm/nouveau/dispnv50/wndw.c b/drivers/gpu/drm/nouveau/dispnv50/wndw.c
863 +index b3db4553098d..d343ae66c64f 100644
864 +--- a/drivers/gpu/drm/nouveau/dispnv50/wndw.c
865 ++++ b/drivers/gpu/drm/nouveau/dispnv50/wndw.c
866 +@@ -405,6 +405,8 @@ nv50_wndw_atomic_check(struct drm_plane *plane, struct drm_plane_state *state)
867 + asyw->clr.ntfy = armw->ntfy.handle != 0;
868 + asyw->clr.sema = armw->sema.handle != 0;
869 + asyw->clr.xlut = armw->xlut.handle != 0;
870 ++ if (asyw->clr.xlut && asyw->visible)
871 ++ asyw->set.xlut = asyw->xlut.handle != 0;
872 + if (wndw->func->image_clr)
873 + asyw->clr.image = armw->image.handle[0] != 0;
874 + }
875 +diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
876 +index 9899f7e155a5..f39670c5c25c 100644
877 +--- a/drivers/infiniband/ulp/isert/ib_isert.c
878 ++++ b/drivers/infiniband/ulp/isert/ib_isert.c
879 +@@ -2584,6 +2584,17 @@ isert_wait4logout(struct isert_conn *isert_conn)
880 + }
881 + }
882 +
883 ++static void
884 ++isert_wait4cmds(struct iscsi_conn *conn)
885 ++{
886 ++ isert_info("iscsi_conn %p\n", conn);
887 ++
888 ++ if (conn->sess) {
889 ++ target_sess_cmd_list_set_waiting(conn->sess->se_sess);
890 ++ target_wait_for_sess_cmds(conn->sess->se_sess);
891 ++ }
892 ++}
893 ++
894 + /**
895 + * isert_put_unsol_pending_cmds() - Drop commands waiting for
896 + * unsolicitate dataout
897 +@@ -2631,6 +2642,7 @@ static void isert_wait_conn(struct iscsi_conn *conn)
898 +
899 + ib_drain_qp(isert_conn->qp);
900 + isert_put_unsol_pending_cmds(conn);
901 ++ isert_wait4cmds(conn);
902 + isert_wait4logout(isert_conn);
903 +
904 + queue_work(isert_release_wq, &isert_conn->release_work);
905 +diff --git a/drivers/iommu/qcom_iommu.c b/drivers/iommu/qcom_iommu.c
906 +index ee70e9921cf1..9a6ed5eeaad1 100644
907 +--- a/drivers/iommu/qcom_iommu.c
908 ++++ b/drivers/iommu/qcom_iommu.c
909 +@@ -333,21 +333,19 @@ static void qcom_iommu_domain_free(struct iommu_domain *domain)
910 + {
911 + struct qcom_iommu_domain *qcom_domain = to_qcom_iommu_domain(domain);
912 +
913 +- if (WARN_ON(qcom_domain->iommu)) /* forgot to detach? */
914 +- return;
915 +-
916 + iommu_put_dma_cookie(domain);
917 +
918 +- /* NOTE: unmap can be called after client device is powered off,
919 +- * for example, with GPUs or anything involving dma-buf. So we
920 +- * cannot rely on the device_link. Make sure the IOMMU is on to
921 +- * avoid unclocked accesses in the TLB inv path:
922 +- */
923 +- pm_runtime_get_sync(qcom_domain->iommu->dev);
924 +-
925 +- free_io_pgtable_ops(qcom_domain->pgtbl_ops);
926 +-
927 +- pm_runtime_put_sync(qcom_domain->iommu->dev);
928 ++ if (qcom_domain->iommu) {
929 ++ /*
930 ++ * NOTE: unmap can be called after client device is powered
931 ++ * off, for example, with GPUs or anything involving dma-buf.
932 ++ * So we cannot rely on the device_link. Make sure the IOMMU
933 ++ * is on to avoid unclocked accesses in the TLB inv path:
934 ++ */
935 ++ pm_runtime_get_sync(qcom_domain->iommu->dev);
936 ++ free_io_pgtable_ops(qcom_domain->pgtbl_ops);
937 ++ pm_runtime_put_sync(qcom_domain->iommu->dev);
938 ++ }
939 +
940 + kfree(qcom_domain);
941 + }
942 +@@ -392,7 +390,7 @@ static void qcom_iommu_detach_dev(struct iommu_domain *domain, struct device *de
943 + struct qcom_iommu_domain *qcom_domain = to_qcom_iommu_domain(domain);
944 + unsigned i;
945 +
946 +- if (!qcom_domain->iommu)
947 ++ if (WARN_ON(!qcom_domain->iommu))
948 + return;
949 +
950 + pm_runtime_get_sync(qcom_iommu->dev);
951 +@@ -405,8 +403,6 @@ static void qcom_iommu_detach_dev(struct iommu_domain *domain, struct device *de
952 + ctx->domain = NULL;
953 + }
954 + pm_runtime_put_sync(qcom_iommu->dev);
955 +-
956 +- qcom_domain->iommu = NULL;
957 + }
958 +
959 + static int qcom_iommu_map(struct iommu_domain *domain, unsigned long iova,
960 +diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
961 +index 838ee58d80cd..e8bc25aed44c 100644
962 +--- a/drivers/nvme/host/multipath.c
963 ++++ b/drivers/nvme/host/multipath.c
964 +@@ -569,6 +569,7 @@ int nvme_mpath_init(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
965 + }
966 +
967 + INIT_WORK(&ctrl->ana_work, nvme_ana_work);
968 ++ kfree(ctrl->ana_log_buf);
969 + ctrl->ana_log_buf = kmalloc(ctrl->ana_log_size, GFP_KERNEL);
970 + if (!ctrl->ana_log_buf) {
971 + error = -ENOMEM;
972 +diff --git a/drivers/staging/android/ashmem.c b/drivers/staging/android/ashmem.c
973 +index be815330ed95..e3df4bf521b5 100644
974 +--- a/drivers/staging/android/ashmem.c
975 ++++ b/drivers/staging/android/ashmem.c
976 +@@ -350,8 +350,23 @@ static inline vm_flags_t calc_vm_may_flags(unsigned long prot)
977 + _calc_vm_trans(prot, PROT_EXEC, VM_MAYEXEC);
978 + }
979 +
980 ++static int ashmem_vmfile_mmap(struct file *file, struct vm_area_struct *vma)
981 ++{
982 ++ /* do not allow to mmap ashmem backing shmem file directly */
983 ++ return -EPERM;
984 ++}
985 ++
986 ++static unsigned long
987 ++ashmem_vmfile_get_unmapped_area(struct file *file, unsigned long addr,
988 ++ unsigned long len, unsigned long pgoff,
989 ++ unsigned long flags)
990 ++{
991 ++ return current->mm->get_unmapped_area(file, addr, len, pgoff, flags);
992 ++}
993 ++
994 + static int ashmem_mmap(struct file *file, struct vm_area_struct *vma)
995 + {
996 ++ static struct file_operations vmfile_fops;
997 + struct ashmem_area *asma = file->private_data;
998 + int ret = 0;
999 +
1000 +@@ -392,6 +407,19 @@ static int ashmem_mmap(struct file *file, struct vm_area_struct *vma)
1001 + }
1002 + vmfile->f_mode |= FMODE_LSEEK;
1003 + asma->file = vmfile;
1004 ++ /*
1005 ++ * override mmap operation of the vmfile so that it can't be
1006 ++ * remapped which would lead to creation of a new vma with no
1007 ++ * asma permission checks. Have to override get_unmapped_area
1008 ++ * as well to prevent VM_BUG_ON check for f_ops modification.
1009 ++ */
1010 ++ if (!vmfile_fops.mmap) {
1011 ++ vmfile_fops = *vmfile->f_op;
1012 ++ vmfile_fops.mmap = ashmem_vmfile_mmap;
1013 ++ vmfile_fops.get_unmapped_area =
1014 ++ ashmem_vmfile_get_unmapped_area;
1015 ++ }
1016 ++ vmfile->f_op = &vmfile_fops;
1017 + }
1018 + get_file(asma->file);
1019 +
1020 +diff --git a/drivers/staging/greybus/audio_manager.c b/drivers/staging/greybus/audio_manager.c
1021 +index d44b070d8862..0f5c68edf2c1 100644
1022 +--- a/drivers/staging/greybus/audio_manager.c
1023 ++++ b/drivers/staging/greybus/audio_manager.c
1024 +@@ -89,8 +89,8 @@ void gb_audio_manager_remove_all(void)
1025 +
1026 + list_for_each_entry_safe(module, next, &modules_list, list) {
1027 + list_del(&module->list);
1028 +- kobject_put(&module->kobj);
1029 + ida_simple_remove(&module_id, module->id);
1030 ++ kobject_put(&module->kobj);
1031 + }
1032 +
1033 + is_empty = list_empty(&modules_list);
1034 +diff --git a/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c b/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c
1035 +index 2db4444267a7..0003f0c38038 100644
1036 +--- a/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c
1037 ++++ b/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c
1038 +@@ -2026,7 +2026,7 @@ static int wpa_supplicant_ioctl(struct net_device *dev, struct iw_point *p)
1039 + struct ieee_param *param;
1040 + uint ret = 0;
1041 +
1042 +- if (p->length < sizeof(struct ieee_param) || !p->pointer) {
1043 ++ if (!p->pointer || p->length != sizeof(struct ieee_param)) {
1044 + ret = -EINVAL;
1045 + goto out;
1046 + }
1047 +@@ -2819,7 +2819,7 @@ static int rtw_hostapd_ioctl(struct net_device *dev, struct iw_point *p)
1048 + goto out;
1049 + }
1050 +
1051 +- if (!p->pointer) {
1052 ++ if (!p->pointer || p->length != sizeof(struct ieee_param)) {
1053 + ret = -EINVAL;
1054 + goto out;
1055 + }
1056 +diff --git a/drivers/staging/rtl8723bs/hal/rtl8723bs_xmit.c b/drivers/staging/rtl8723bs/hal/rtl8723bs_xmit.c
1057 +index 10b3f9733bad..4a27c3927da9 100644
1058 +--- a/drivers/staging/rtl8723bs/hal/rtl8723bs_xmit.c
1059 ++++ b/drivers/staging/rtl8723bs/hal/rtl8723bs_xmit.c
1060 +@@ -478,14 +478,13 @@ int rtl8723bs_xmit_thread(void *context)
1061 + s32 ret;
1062 + struct adapter *padapter;
1063 + struct xmit_priv *pxmitpriv;
1064 +- u8 thread_name[20] = "RTWHALXT";
1065 +-
1066 ++ u8 thread_name[20];
1067 +
1068 + ret = _SUCCESS;
1069 + padapter = context;
1070 + pxmitpriv = &padapter->xmitpriv;
1071 +
1072 +- rtw_sprintf(thread_name, 20, "%s-"ADPT_FMT, thread_name, ADPT_ARG(padapter));
1073 ++ rtw_sprintf(thread_name, 20, "RTWHALXT-" ADPT_FMT, ADPT_ARG(padapter));
1074 + thread_enter(thread_name);
1075 +
1076 + DBG_871X("start "FUNC_ADPT_FMT"\n", FUNC_ADPT_ARG(padapter));
1077 +diff --git a/drivers/staging/rtl8723bs/os_dep/ioctl_linux.c b/drivers/staging/rtl8723bs/os_dep/ioctl_linux.c
1078 +index 4f120e72c7d2..466d25ccc4bb 100644
1079 +--- a/drivers/staging/rtl8723bs/os_dep/ioctl_linux.c
1080 ++++ b/drivers/staging/rtl8723bs/os_dep/ioctl_linux.c
1081 +@@ -3400,7 +3400,7 @@ static int wpa_supplicant_ioctl(struct net_device *dev, struct iw_point *p)
1082 +
1083 + /* down(&ieee->wx_sem); */
1084 +
1085 +- if (p->length < sizeof(struct ieee_param) || !p->pointer) {
1086 ++ if (!p->pointer || p->length != sizeof(struct ieee_param)) {
1087 + ret = -EINVAL;
1088 + goto out;
1089 + }
1090 +@@ -4236,7 +4236,7 @@ static int rtw_hostapd_ioctl(struct net_device *dev, struct iw_point *p)
1091 +
1092 +
1093 + /* if (p->length < sizeof(struct ieee_param) || !p->pointer) { */
1094 +- if (!p->pointer) {
1095 ++ if (!p->pointer || p->length != sizeof(*param)) {
1096 + ret = -EINVAL;
1097 + goto out;
1098 + }
1099 +diff --git a/drivers/staging/vt6656/dpc.c b/drivers/staging/vt6656/dpc.c
1100 +index 3b94e80f1d5e..879ceef517fb 100644
1101 +--- a/drivers/staging/vt6656/dpc.c
1102 ++++ b/drivers/staging/vt6656/dpc.c
1103 +@@ -130,7 +130,7 @@ int vnt_rx_data(struct vnt_private *priv, struct vnt_rcb *ptr_rcb,
1104 +
1105 + vnt_rf_rssi_to_dbm(priv, *rssi, &rx_dbm);
1106 +
1107 +- priv->bb_pre_ed_rssi = (u8)rx_dbm + 1;
1108 ++ priv->bb_pre_ed_rssi = (u8)-rx_dbm + 1;
1109 + priv->current_rssi = priv->bb_pre_ed_rssi;
1110 +
1111 + skb_pull(skb, 8);
1112 +diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
1113 +index 14bd54d0e79d..03e9cb156df9 100644
1114 +--- a/drivers/target/iscsi/iscsi_target.c
1115 ++++ b/drivers/target/iscsi/iscsi_target.c
1116 +@@ -1157,9 +1157,7 @@ int iscsit_setup_scsi_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
1117 + hdr->cmdsn, be32_to_cpu(hdr->data_length), payload_length,
1118 + conn->cid);
1119 +
1120 +- if (target_get_sess_cmd(&cmd->se_cmd, true) < 0)
1121 +- return iscsit_add_reject_cmd(cmd,
1122 +- ISCSI_REASON_WAITING_FOR_LOGOUT, buf);
1123 ++ target_get_sess_cmd(&cmd->se_cmd, true);
1124 +
1125 + cmd->sense_reason = transport_lookup_cmd_lun(&cmd->se_cmd,
1126 + scsilun_to_int(&hdr->lun));
1127 +@@ -2000,9 +1998,7 @@ iscsit_handle_task_mgt_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
1128 + conn->sess->se_sess, 0, DMA_NONE,
1129 + TCM_SIMPLE_TAG, cmd->sense_buffer + 2);
1130 +
1131 +- if (target_get_sess_cmd(&cmd->se_cmd, true) < 0)
1132 +- return iscsit_add_reject_cmd(cmd,
1133 +- ISCSI_REASON_WAITING_FOR_LOGOUT, buf);
1134 ++ target_get_sess_cmd(&cmd->se_cmd, true);
1135 +
1136 + /*
1137 + * TASK_REASSIGN for ERL=2 / connection stays inside of
1138 +@@ -4123,6 +4119,9 @@ int iscsit_close_connection(
1139 + iscsit_stop_nopin_response_timer(conn);
1140 + iscsit_stop_nopin_timer(conn);
1141 +
1142 ++ if (conn->conn_transport->iscsit_wait_conn)
1143 ++ conn->conn_transport->iscsit_wait_conn(conn);
1144 ++
1145 + /*
1146 + * During Connection recovery drop unacknowledged out of order
1147 + * commands for this connection, and prepare the other commands
1148 +@@ -4205,11 +4204,6 @@ int iscsit_close_connection(
1149 + * must wait until they have completed.
1150 + */
1151 + iscsit_check_conn_usage_count(conn);
1152 +- target_sess_cmd_list_set_waiting(sess->se_sess);
1153 +- target_wait_for_sess_cmds(sess->se_sess);
1154 +-
1155 +- if (conn->conn_transport->iscsit_wait_conn)
1156 +- conn->conn_transport->iscsit_wait_conn(conn);
1157 +
1158 + ahash_request_free(conn->conn_tx_hash);
1159 + if (conn->conn_rx_hash) {
1160 +diff --git a/drivers/thunderbolt/switch.c b/drivers/thunderbolt/switch.c
1161 +index 678bf3365947..42d90ceec279 100644
1162 +--- a/drivers/thunderbolt/switch.c
1163 ++++ b/drivers/thunderbolt/switch.c
1164 +@@ -264,6 +264,12 @@ static int tb_switch_nvm_read(void *priv, unsigned int offset, void *val,
1165 + return ret;
1166 + }
1167 +
1168 ++static int tb_switch_nvm_no_read(void *priv, unsigned int offset, void *val,
1169 ++ size_t bytes)
1170 ++{
1171 ++ return -EPERM;
1172 ++}
1173 ++
1174 + static int tb_switch_nvm_write(void *priv, unsigned int offset, void *val,
1175 + size_t bytes)
1176 + {
1177 +@@ -309,6 +315,7 @@ static struct nvmem_device *register_nvmem(struct tb_switch *sw, int id,
1178 + config.read_only = true;
1179 + } else {
1180 + config.name = "nvm_non_active";
1181 ++ config.reg_read = tb_switch_nvm_no_read;
1182 + config.reg_write = tb_switch_nvm_write;
1183 + config.root_only = true;
1184 + }
1185 +diff --git a/drivers/tty/serdev/serdev-ttyport.c b/drivers/tty/serdev/serdev-ttyport.c
1186 +index fa1672993b4c..048a7bcae5f9 100644
1187 +--- a/drivers/tty/serdev/serdev-ttyport.c
1188 ++++ b/drivers/tty/serdev/serdev-ttyport.c
1189 +@@ -265,7 +265,6 @@ struct device *serdev_tty_port_register(struct tty_port *port,
1190 + struct device *parent,
1191 + struct tty_driver *drv, int idx)
1192 + {
1193 +- const struct tty_port_client_operations *old_ops;
1194 + struct serdev_controller *ctrl;
1195 + struct serport *serport;
1196 + int ret;
1197 +@@ -284,7 +283,6 @@ struct device *serdev_tty_port_register(struct tty_port *port,
1198 +
1199 + ctrl->ops = &ctrl_ops;
1200 +
1201 +- old_ops = port->client_ops;
1202 + port->client_ops = &client_ops;
1203 + port->client_data = ctrl;
1204 +
1205 +@@ -297,7 +295,7 @@ struct device *serdev_tty_port_register(struct tty_port *port,
1206 +
1207 + err_reset_data:
1208 + port->client_data = NULL;
1209 +- port->client_ops = old_ops;
1210 ++ port->client_ops = &tty_port_default_client_ops;
1211 + serdev_controller_put(ctrl);
1212 +
1213 + return ERR_PTR(ret);
1214 +@@ -312,8 +310,8 @@ int serdev_tty_port_unregister(struct tty_port *port)
1215 + return -ENODEV;
1216 +
1217 + serdev_controller_remove(ctrl);
1218 +- port->client_ops = NULL;
1219 + port->client_data = NULL;
1220 ++ port->client_ops = &tty_port_default_client_ops;
1221 + serdev_controller_put(ctrl);
1222 +
1223 + return 0;
1224 +diff --git a/drivers/tty/serial/8250/8250_aspeed_vuart.c b/drivers/tty/serial/8250/8250_aspeed_vuart.c
1225 +index 435bec40dee6..2d5c3643e6a5 100644
1226 +--- a/drivers/tty/serial/8250/8250_aspeed_vuart.c
1227 ++++ b/drivers/tty/serial/8250/8250_aspeed_vuart.c
1228 +@@ -375,7 +375,6 @@ static int aspeed_vuart_probe(struct platform_device *pdev)
1229 + port.port.line = rc;
1230 +
1231 + port.port.irq = irq_of_parse_and_map(np, 0);
1232 +- port.port.irqflags = IRQF_SHARED;
1233 + port.port.handle_irq = aspeed_vuart_handle_irq;
1234 + port.port.iotype = UPIO_MEM;
1235 + port.port.type = PORT_16550A;
1236 +diff --git a/drivers/tty/serial/8250/8250_core.c b/drivers/tty/serial/8250/8250_core.c
1237 +index 69aaee5d7fe1..b9567ef843fc 100644
1238 +--- a/drivers/tty/serial/8250/8250_core.c
1239 ++++ b/drivers/tty/serial/8250/8250_core.c
1240 +@@ -177,7 +177,7 @@ static int serial_link_irq_chain(struct uart_8250_port *up)
1241 + struct hlist_head *h;
1242 + struct hlist_node *n;
1243 + struct irq_info *i;
1244 +- int ret, irq_flags = up->port.flags & UPF_SHARE_IRQ ? IRQF_SHARED : 0;
1245 ++ int ret;
1246 +
1247 + mutex_lock(&hash_mutex);
1248 +
1249 +@@ -212,9 +212,8 @@ static int serial_link_irq_chain(struct uart_8250_port *up)
1250 + INIT_LIST_HEAD(&up->list);
1251 + i->head = &up->list;
1252 + spin_unlock_irq(&i->lock);
1253 +- irq_flags |= up->port.irqflags;
1254 + ret = request_irq(up->port.irq, serial8250_interrupt,
1255 +- irq_flags, up->port.name, i);
1256 ++ up->port.irqflags, up->port.name, i);
1257 + if (ret < 0)
1258 + serial_do_unlink(i, up);
1259 + }
1260 +diff --git a/drivers/tty/serial/8250/8250_of.c b/drivers/tty/serial/8250/8250_of.c
1261 +index 2488de1c4bc4..8fedc075fb1e 100644
1262 +--- a/drivers/tty/serial/8250/8250_of.c
1263 ++++ b/drivers/tty/serial/8250/8250_of.c
1264 +@@ -171,7 +171,6 @@ static int of_platform_serial_setup(struct platform_device *ofdev,
1265 +
1266 + port->type = type;
1267 + port->uartclk = clk;
1268 +- port->irqflags |= IRQF_SHARED;
1269 +
1270 + if (of_property_read_bool(np, "no-loopback-test"))
1271 + port->flags |= UPF_SKIP_TEST;
1272 +diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c
1273 +index aa4de6907f77..5a04d4ddca73 100644
1274 +--- a/drivers/tty/serial/8250/8250_port.c
1275 ++++ b/drivers/tty/serial/8250/8250_port.c
1276 +@@ -2253,6 +2253,10 @@ int serial8250_do_startup(struct uart_port *port)
1277 + }
1278 + }
1279 +
1280 ++ /* Check if we need to have shared IRQs */
1281 ++ if (port->irq && (up->port.flags & UPF_SHARE_IRQ))
1282 ++ up->port.irqflags |= IRQF_SHARED;
1283 ++
1284 + if (port->irq && !(up->port.flags & UPF_NO_THRE_TEST)) {
1285 + unsigned char iir1;
1286 + /*
1287 +diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c
1288 +index f34520e9ad6e..936d401f20b9 100644
1289 +--- a/drivers/tty/serial/atmel_serial.c
1290 ++++ b/drivers/tty/serial/atmel_serial.c
1291 +@@ -490,7 +490,8 @@ static void atmel_stop_tx(struct uart_port *port)
1292 + atmel_uart_writel(port, ATMEL_US_IDR, atmel_port->tx_done_mask);
1293 +
1294 + if (atmel_uart_is_half_duplex(port))
1295 +- atmel_start_rx(port);
1296 ++ if (!atomic_read(&atmel_port->tasklet_shutdown))
1297 ++ atmel_start_rx(port);
1298 +
1299 + }
1300 +
1301 +diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c
1302 +index 672e97978279..4066cb2b79cb 100644
1303 +--- a/drivers/tty/serial/imx.c
1304 ++++ b/drivers/tty/serial/imx.c
1305 +@@ -608,7 +608,7 @@ static void imx_uart_dma_tx(struct imx_port *sport)
1306 +
1307 + sport->tx_bytes = uart_circ_chars_pending(xmit);
1308 +
1309 +- if (xmit->tail < xmit->head) {
1310 ++ if (xmit->tail < xmit->head || xmit->head == 0) {
1311 + sport->dma_tx_nents = 1;
1312 + sg_init_one(sgl, xmit->buf + xmit->tail, sport->tx_bytes);
1313 + } else {
1314 +diff --git a/drivers/tty/serial/qcom_geni_serial.c b/drivers/tty/serial/qcom_geni_serial.c
1315 +index b3f7d1a1e97f..4458419f053b 100644
1316 +--- a/drivers/tty/serial/qcom_geni_serial.c
1317 ++++ b/drivers/tty/serial/qcom_geni_serial.c
1318 +@@ -85,7 +85,7 @@
1319 + #define DEF_FIFO_DEPTH_WORDS 16
1320 + #define DEF_TX_WM 2
1321 + #define DEF_FIFO_WIDTH_BITS 32
1322 +-#define UART_CONSOLE_RX_WM 2
1323 ++#define UART_RX_WM 2
1324 + #define MAX_LOOPBACK_CFG 3
1325 +
1326 + #ifdef CONFIG_CONSOLE_POLL
1327 +@@ -101,10 +101,6 @@ struct qcom_geni_serial_port {
1328 + u32 tx_fifo_depth;
1329 + u32 tx_fifo_width;
1330 + u32 rx_fifo_depth;
1331 +- u32 tx_wm;
1332 +- u32 rx_wm;
1333 +- u32 rx_rfr;
1334 +- enum geni_se_xfer_mode xfer_mode;
1335 + bool setup;
1336 + int (*handle_rx)(struct uart_port *uport, u32 bytes, bool drop);
1337 + unsigned int baud;
1338 +@@ -125,6 +121,7 @@ static int handle_rx_console(struct uart_port *uport, u32 bytes, bool drop);
1339 + static int handle_rx_uart(struct uart_port *uport, u32 bytes, bool drop);
1340 + static unsigned int qcom_geni_serial_tx_empty(struct uart_port *port);
1341 + static void qcom_geni_serial_stop_rx(struct uart_port *uport);
1342 ++static void qcom_geni_serial_handle_rx(struct uart_port *uport, bool drop);
1343 +
1344 + static const unsigned long root_freq[] = {7372800, 14745600, 19200000, 29491200,
1345 + 32000000, 48000000, 64000000, 80000000,
1346 +@@ -226,7 +223,7 @@ static unsigned int qcom_geni_serial_get_mctrl(struct uart_port *uport)
1347 + if (uart_console(uport)) {
1348 + mctrl |= TIOCM_CTS;
1349 + } else {
1350 +- geni_ios = readl_relaxed(uport->membase + SE_GENI_IOS);
1351 ++ geni_ios = readl(uport->membase + SE_GENI_IOS);
1352 + if (!(geni_ios & IO2_DATA_IN))
1353 + mctrl |= TIOCM_CTS;
1354 + }
1355 +@@ -244,7 +241,7 @@ static void qcom_geni_serial_set_mctrl(struct uart_port *uport,
1356 +
1357 + if (!(mctrl & TIOCM_RTS))
1358 + uart_manual_rfr = UART_MANUAL_RFR_EN | UART_RFR_NOT_READY;
1359 +- writel_relaxed(uart_manual_rfr, uport->membase + SE_UART_MANUAL_RFR);
1360 ++ writel(uart_manual_rfr, uport->membase + SE_UART_MANUAL_RFR);
1361 + }
1362 +
1363 + static const char *qcom_geni_serial_get_type(struct uart_port *uport)
1364 +@@ -273,9 +270,6 @@ static bool qcom_geni_serial_poll_bit(struct uart_port *uport,
1365 + unsigned int fifo_bits;
1366 + unsigned long timeout_us = 20000;
1367 +
1368 +- /* Ensure polling is not re-ordered before the prior writes/reads */
1369 +- mb();
1370 +-
1371 + if (uport->private_data) {
1372 + port = to_dev_port(uport, uport);
1373 + baud = port->baud;
1374 +@@ -295,7 +289,7 @@ static bool qcom_geni_serial_poll_bit(struct uart_port *uport,
1375 + */
1376 + timeout_us = DIV_ROUND_UP(timeout_us, 10) * 10;
1377 + while (timeout_us) {
1378 +- reg = readl_relaxed(uport->membase + offset);
1379 ++ reg = readl(uport->membase + offset);
1380 + if ((bool)(reg & field) == set)
1381 + return true;
1382 + udelay(10);
1383 +@@ -308,7 +302,7 @@ static void qcom_geni_serial_setup_tx(struct uart_port *uport, u32 xmit_size)
1384 + {
1385 + u32 m_cmd;
1386 +
1387 +- writel_relaxed(xmit_size, uport->membase + SE_UART_TX_TRANS_LEN);
1388 ++ writel(xmit_size, uport->membase + SE_UART_TX_TRANS_LEN);
1389 + m_cmd = UART_START_TX << M_OPCODE_SHFT;
1390 + writel(m_cmd, uport->membase + SE_GENI_M_CMD0);
1391 + }
1392 +@@ -321,13 +315,13 @@ static void qcom_geni_serial_poll_tx_done(struct uart_port *uport)
1393 + done = qcom_geni_serial_poll_bit(uport, SE_GENI_M_IRQ_STATUS,
1394 + M_CMD_DONE_EN, true);
1395 + if (!done) {
1396 +- writel_relaxed(M_GENI_CMD_ABORT, uport->membase +
1397 ++ writel(M_GENI_CMD_ABORT, uport->membase +
1398 + SE_GENI_M_CMD_CTRL_REG);
1399 + irq_clear |= M_CMD_ABORT_EN;
1400 + qcom_geni_serial_poll_bit(uport, SE_GENI_M_IRQ_STATUS,
1401 + M_CMD_ABORT_EN, true);
1402 + }
1403 +- writel_relaxed(irq_clear, uport->membase + SE_GENI_M_IRQ_CLEAR);
1404 ++ writel(irq_clear, uport->membase + SE_GENI_M_IRQ_CLEAR);
1405 + }
1406 +
1407 + static void qcom_geni_serial_abort_rx(struct uart_port *uport)
1408 +@@ -337,8 +331,8 @@ static void qcom_geni_serial_abort_rx(struct uart_port *uport)
1409 + writel(S_GENI_CMD_ABORT, uport->membase + SE_GENI_S_CMD_CTRL_REG);
1410 + qcom_geni_serial_poll_bit(uport, SE_GENI_S_CMD_CTRL_REG,
1411 + S_GENI_CMD_ABORT, false);
1412 +- writel_relaxed(irq_clear, uport->membase + SE_GENI_S_IRQ_CLEAR);
1413 +- writel_relaxed(FORCE_DEFAULT, uport->membase + GENI_FORCE_DEFAULT_REG);
1414 ++ writel(irq_clear, uport->membase + SE_GENI_S_IRQ_CLEAR);
1415 ++ writel(FORCE_DEFAULT, uport->membase + GENI_FORCE_DEFAULT_REG);
1416 + }
1417 +
1418 + #ifdef CONFIG_CONSOLE_POLL
1419 +@@ -347,19 +341,13 @@ static int qcom_geni_serial_get_char(struct uart_port *uport)
1420 + u32 rx_fifo;
1421 + u32 status;
1422 +
1423 +- status = readl_relaxed(uport->membase + SE_GENI_M_IRQ_STATUS);
1424 +- writel_relaxed(status, uport->membase + SE_GENI_M_IRQ_CLEAR);
1425 +-
1426 +- status = readl_relaxed(uport->membase + SE_GENI_S_IRQ_STATUS);
1427 +- writel_relaxed(status, uport->membase + SE_GENI_S_IRQ_CLEAR);
1428 ++ status = readl(uport->membase + SE_GENI_M_IRQ_STATUS);
1429 ++ writel(status, uport->membase + SE_GENI_M_IRQ_CLEAR);
1430 +
1431 +- /*
1432 +- * Ensure the writes to clear interrupts is not re-ordered after
1433 +- * reading the data.
1434 +- */
1435 +- mb();
1436 ++ status = readl(uport->membase + SE_GENI_S_IRQ_STATUS);
1437 ++ writel(status, uport->membase + SE_GENI_S_IRQ_CLEAR);
1438 +
1439 +- status = readl_relaxed(uport->membase + SE_GENI_RX_FIFO_STATUS);
1440 ++ status = readl(uport->membase + SE_GENI_RX_FIFO_STATUS);
1441 + if (!(status & RX_FIFO_WC_MSK))
1442 + return NO_POLL_CHAR;
1443 +
1444 +@@ -370,15 +358,12 @@ static int qcom_geni_serial_get_char(struct uart_port *uport)
1445 + static void qcom_geni_serial_poll_put_char(struct uart_port *uport,
1446 + unsigned char c)
1447 + {
1448 +- struct qcom_geni_serial_port *port = to_dev_port(uport, uport);
1449 +-
1450 +- writel_relaxed(port->tx_wm, uport->membase + SE_GENI_TX_WATERMARK_REG);
1451 ++ writel(DEF_TX_WM, uport->membase + SE_GENI_TX_WATERMARK_REG);
1452 + qcom_geni_serial_setup_tx(uport, 1);
1453 + WARN_ON(!qcom_geni_serial_poll_bit(uport, SE_GENI_M_IRQ_STATUS,
1454 + M_TX_FIFO_WATERMARK_EN, true));
1455 +- writel_relaxed(c, uport->membase + SE_GENI_TX_FIFOn);
1456 +- writel_relaxed(M_TX_FIFO_WATERMARK_EN, uport->membase +
1457 +- SE_GENI_M_IRQ_CLEAR);
1458 ++ writel(c, uport->membase + SE_GENI_TX_FIFOn);
1459 ++ writel(M_TX_FIFO_WATERMARK_EN, uport->membase + SE_GENI_M_IRQ_CLEAR);
1460 + qcom_geni_serial_poll_tx_done(uport);
1461 + }
1462 + #endif
1463 +@@ -386,7 +371,7 @@ static void qcom_geni_serial_poll_put_char(struct uart_port *uport,
1464 + #ifdef CONFIG_SERIAL_QCOM_GENI_CONSOLE
1465 + static void qcom_geni_serial_wr_char(struct uart_port *uport, int ch)
1466 + {
1467 +- writel_relaxed(ch, uport->membase + SE_GENI_TX_FIFOn);
1468 ++ writel(ch, uport->membase + SE_GENI_TX_FIFOn);
1469 + }
1470 +
1471 + static void
1472 +@@ -405,7 +390,7 @@ __qcom_geni_serial_console_write(struct uart_port *uport, const char *s,
1473 + bytes_to_send++;
1474 + }
1475 +
1476 +- writel_relaxed(DEF_TX_WM, uport->membase + SE_GENI_TX_WATERMARK_REG);
1477 ++ writel(DEF_TX_WM, uport->membase + SE_GENI_TX_WATERMARK_REG);
1478 + qcom_geni_serial_setup_tx(uport, bytes_to_send);
1479 + for (i = 0; i < count; ) {
1480 + size_t chars_to_write = 0;
1481 +@@ -423,7 +408,7 @@ __qcom_geni_serial_console_write(struct uart_port *uport, const char *s,
1482 + chars_to_write = min_t(size_t, count - i, avail / 2);
1483 + uart_console_write(uport, s + i, chars_to_write,
1484 + qcom_geni_serial_wr_char);
1485 +- writel_relaxed(M_TX_FIFO_WATERMARK_EN, uport->membase +
1486 ++ writel(M_TX_FIFO_WATERMARK_EN, uport->membase +
1487 + SE_GENI_M_IRQ_CLEAR);
1488 + i += chars_to_write;
1489 + }
1490 +@@ -438,6 +423,7 @@ static void qcom_geni_serial_console_write(struct console *co, const char *s,
1491 + bool locked = true;
1492 + unsigned long flags;
1493 + u32 geni_status;
1494 ++ u32 irq_en;
1495 +
1496 + WARN_ON(co->index < 0 || co->index >= GENI_UART_CONS_PORTS);
1497 +
1498 +@@ -451,7 +437,7 @@ static void qcom_geni_serial_console_write(struct console *co, const char *s,
1499 + else
1500 + spin_lock_irqsave(&uport->lock, flags);
1501 +
1502 +- geni_status = readl_relaxed(uport->membase + SE_GENI_STATUS);
1503 ++ geni_status = readl(uport->membase + SE_GENI_STATUS);
1504 +
1505 + /* Cancel the current write to log the fault */
1506 + if (!locked) {
1507 +@@ -461,17 +447,22 @@ static void qcom_geni_serial_console_write(struct console *co, const char *s,
1508 + geni_se_abort_m_cmd(&port->se);
1509 + qcom_geni_serial_poll_bit(uport, SE_GENI_M_IRQ_STATUS,
1510 + M_CMD_ABORT_EN, true);
1511 +- writel_relaxed(M_CMD_ABORT_EN, uport->membase +
1512 ++ writel(M_CMD_ABORT_EN, uport->membase +
1513 + SE_GENI_M_IRQ_CLEAR);
1514 + }
1515 +- writel_relaxed(M_CMD_CANCEL_EN, uport->membase +
1516 +- SE_GENI_M_IRQ_CLEAR);
1517 ++ writel(M_CMD_CANCEL_EN, uport->membase + SE_GENI_M_IRQ_CLEAR);
1518 + } else if ((geni_status & M_GENI_CMD_ACTIVE) && !port->tx_remaining) {
1519 + /*
1520 + * It seems we can't interrupt existing transfers if all data
1521 + * has been sent, in which case we need to look for done first.
1522 + */
1523 + qcom_geni_serial_poll_tx_done(uport);
1524 ++
1525 ++ if (uart_circ_chars_pending(&uport->state->xmit)) {
1526 ++ irq_en = readl(uport->membase + SE_GENI_M_IRQ_EN);
1527 ++ writel(irq_en | M_TX_FIFO_WATERMARK_EN,
1528 ++ uport->membase + SE_GENI_M_IRQ_EN);
1529 ++ }
1530 + }
1531 +
1532 + __qcom_geni_serial_console_write(uport, s, count);
1533 +@@ -556,29 +547,20 @@ static int handle_rx_uart(struct uart_port *uport, u32 bytes, bool drop)
1534 + static void qcom_geni_serial_start_tx(struct uart_port *uport)
1535 + {
1536 + u32 irq_en;
1537 +- struct qcom_geni_serial_port *port = to_dev_port(uport, uport);
1538 + u32 status;
1539 +
1540 +- if (port->xfer_mode == GENI_SE_FIFO) {
1541 +- /*
1542 +- * readl ensures reading & writing of IRQ_EN register
1543 +- * is not re-ordered before checking the status of the
1544 +- * Serial Engine.
1545 +- */
1546 +- status = readl(uport->membase + SE_GENI_STATUS);
1547 +- if (status & M_GENI_CMD_ACTIVE)
1548 +- return;
1549 ++ status = readl(uport->membase + SE_GENI_STATUS);
1550 ++ if (status & M_GENI_CMD_ACTIVE)
1551 ++ return;
1552 +
1553 +- if (!qcom_geni_serial_tx_empty(uport))
1554 +- return;
1555 ++ if (!qcom_geni_serial_tx_empty(uport))
1556 ++ return;
1557 +
1558 +- irq_en = readl_relaxed(uport->membase + SE_GENI_M_IRQ_EN);
1559 +- irq_en |= M_TX_FIFO_WATERMARK_EN | M_CMD_DONE_EN;
1560 ++ irq_en = readl(uport->membase + SE_GENI_M_IRQ_EN);
1561 ++ irq_en |= M_TX_FIFO_WATERMARK_EN | M_CMD_DONE_EN;
1562 +
1563 +- writel_relaxed(port->tx_wm, uport->membase +
1564 +- SE_GENI_TX_WATERMARK_REG);
1565 +- writel_relaxed(irq_en, uport->membase + SE_GENI_M_IRQ_EN);
1566 +- }
1567 ++ writel(DEF_TX_WM, uport->membase + SE_GENI_TX_WATERMARK_REG);
1568 ++ writel(irq_en, uport->membase + SE_GENI_M_IRQ_EN);
1569 + }
1570 +
1571 + static void qcom_geni_serial_stop_tx(struct uart_port *uport)
1572 +@@ -587,35 +569,24 @@ static void qcom_geni_serial_stop_tx(struct uart_port *uport)
1573 + u32 status;
1574 + struct qcom_geni_serial_port *port = to_dev_port(uport, uport);
1575 +
1576 +- irq_en = readl_relaxed(uport->membase + SE_GENI_M_IRQ_EN);
1577 +- irq_en &= ~M_CMD_DONE_EN;
1578 +- if (port->xfer_mode == GENI_SE_FIFO) {
1579 +- irq_en &= ~M_TX_FIFO_WATERMARK_EN;
1580 +- writel_relaxed(0, uport->membase +
1581 +- SE_GENI_TX_WATERMARK_REG);
1582 +- }
1583 +- writel_relaxed(irq_en, uport->membase + SE_GENI_M_IRQ_EN);
1584 +- status = readl_relaxed(uport->membase + SE_GENI_STATUS);
1585 ++ irq_en = readl(uport->membase + SE_GENI_M_IRQ_EN);
1586 ++ irq_en &= ~(M_CMD_DONE_EN | M_TX_FIFO_WATERMARK_EN);
1587 ++ writel(0, uport->membase + SE_GENI_TX_WATERMARK_REG);
1588 ++ writel(irq_en, uport->membase + SE_GENI_M_IRQ_EN);
1589 ++ status = readl(uport->membase + SE_GENI_STATUS);
1590 + /* Possible stop tx is called multiple times. */
1591 + if (!(status & M_GENI_CMD_ACTIVE))
1592 + return;
1593 +
1594 +- /*
1595 +- * Ensure cancel command write is not re-ordered before checking
1596 +- * the status of the Primary Sequencer.
1597 +- */
1598 +- mb();
1599 +-
1600 + geni_se_cancel_m_cmd(&port->se);
1601 + if (!qcom_geni_serial_poll_bit(uport, SE_GENI_M_IRQ_STATUS,
1602 + M_CMD_CANCEL_EN, true)) {
1603 + geni_se_abort_m_cmd(&port->se);
1604 + qcom_geni_serial_poll_bit(uport, SE_GENI_M_IRQ_STATUS,
1605 + M_CMD_ABORT_EN, true);
1606 +- writel_relaxed(M_CMD_ABORT_EN, uport->membase +
1607 +- SE_GENI_M_IRQ_CLEAR);
1608 ++ writel(M_CMD_ABORT_EN, uport->membase + SE_GENI_M_IRQ_CLEAR);
1609 + }
1610 +- writel_relaxed(M_CMD_CANCEL_EN, uport->membase + SE_GENI_M_IRQ_CLEAR);
1611 ++ writel(M_CMD_CANCEL_EN, uport->membase + SE_GENI_M_IRQ_CLEAR);
1612 + }
1613 +
1614 + static void qcom_geni_serial_start_rx(struct uart_port *uport)
1615 +@@ -624,27 +595,19 @@ static void qcom_geni_serial_start_rx(struct uart_port *uport)
1616 + u32 status;
1617 + struct qcom_geni_serial_port *port = to_dev_port(uport, uport);
1618 +
1619 +- status = readl_relaxed(uport->membase + SE_GENI_STATUS);
1620 ++ status = readl(uport->membase + SE_GENI_STATUS);
1621 + if (status & S_GENI_CMD_ACTIVE)
1622 + qcom_geni_serial_stop_rx(uport);
1623 +
1624 +- /*
1625 +- * Ensure setup command write is not re-ordered before checking
1626 +- * the status of the Secondary Sequencer.
1627 +- */
1628 +- mb();
1629 +-
1630 + geni_se_setup_s_cmd(&port->se, UART_START_READ, 0);
1631 +
1632 +- if (port->xfer_mode == GENI_SE_FIFO) {
1633 +- irq_en = readl_relaxed(uport->membase + SE_GENI_S_IRQ_EN);
1634 +- irq_en |= S_RX_FIFO_WATERMARK_EN | S_RX_FIFO_LAST_EN;
1635 +- writel_relaxed(irq_en, uport->membase + SE_GENI_S_IRQ_EN);
1636 ++ irq_en = readl(uport->membase + SE_GENI_S_IRQ_EN);
1637 ++ irq_en |= S_RX_FIFO_WATERMARK_EN | S_RX_FIFO_LAST_EN;
1638 ++ writel(irq_en, uport->membase + SE_GENI_S_IRQ_EN);
1639 +
1640 +- irq_en = readl_relaxed(uport->membase + SE_GENI_M_IRQ_EN);
1641 +- irq_en |= M_RX_FIFO_WATERMARK_EN | M_RX_FIFO_LAST_EN;
1642 +- writel_relaxed(irq_en, uport->membase + SE_GENI_M_IRQ_EN);
1643 +- }
1644 ++ irq_en = readl(uport->membase + SE_GENI_M_IRQ_EN);
1645 ++ irq_en |= M_RX_FIFO_WATERMARK_EN | M_RX_FIFO_LAST_EN;
1646 ++ writel(irq_en, uport->membase + SE_GENI_M_IRQ_EN);
1647 + }
1648 +
1649 + static void qcom_geni_serial_stop_rx(struct uart_port *uport)
1650 +@@ -652,34 +615,35 @@ static void qcom_geni_serial_stop_rx(struct uart_port *uport)
1651 + u32 irq_en;
1652 + u32 status;
1653 + struct qcom_geni_serial_port *port = to_dev_port(uport, uport);
1654 +- u32 irq_clear = S_CMD_DONE_EN;
1655 ++ u32 s_irq_status;
1656 +
1657 +- if (port->xfer_mode == GENI_SE_FIFO) {
1658 +- irq_en = readl_relaxed(uport->membase + SE_GENI_S_IRQ_EN);
1659 +- irq_en &= ~(S_RX_FIFO_WATERMARK_EN | S_RX_FIFO_LAST_EN);
1660 +- writel_relaxed(irq_en, uport->membase + SE_GENI_S_IRQ_EN);
1661 ++ irq_en = readl(uport->membase + SE_GENI_S_IRQ_EN);
1662 ++ irq_en &= ~(S_RX_FIFO_WATERMARK_EN | S_RX_FIFO_LAST_EN);
1663 ++ writel(irq_en, uport->membase + SE_GENI_S_IRQ_EN);
1664 +
1665 +- irq_en = readl_relaxed(uport->membase + SE_GENI_M_IRQ_EN);
1666 +- irq_en &= ~(M_RX_FIFO_WATERMARK_EN | M_RX_FIFO_LAST_EN);
1667 +- writel_relaxed(irq_en, uport->membase + SE_GENI_M_IRQ_EN);
1668 +- }
1669 ++ irq_en = readl(uport->membase + SE_GENI_M_IRQ_EN);
1670 ++ irq_en &= ~(M_RX_FIFO_WATERMARK_EN | M_RX_FIFO_LAST_EN);
1671 ++ writel(irq_en, uport->membase + SE_GENI_M_IRQ_EN);
1672 +
1673 +- status = readl_relaxed(uport->membase + SE_GENI_STATUS);
1674 ++ status = readl(uport->membase + SE_GENI_STATUS);
1675 + /* Possible stop rx is called multiple times. */
1676 + if (!(status & S_GENI_CMD_ACTIVE))
1677 + return;
1678 +
1679 ++ geni_se_cancel_s_cmd(&port->se);
1680 ++ qcom_geni_serial_poll_bit(uport, SE_GENI_S_IRQ_STATUS,
1681 ++ S_CMD_CANCEL_EN, true);
1682 + /*
1683 +- * Ensure cancel command write is not re-ordered before checking
1684 +- * the status of the Secondary Sequencer.
1685 ++ * If timeout occurs secondary engine remains active
1686 ++ * and Abort sequence is executed.
1687 + */
1688 +- mb();
1689 ++ s_irq_status = readl(uport->membase + SE_GENI_S_IRQ_STATUS);
1690 ++ /* Flush the Rx buffer */
1691 ++ if (s_irq_status & S_RX_FIFO_LAST_EN)
1692 ++ qcom_geni_serial_handle_rx(uport, true);
1693 ++ writel(s_irq_status, uport->membase + SE_GENI_S_IRQ_CLEAR);
1694 +
1695 +- geni_se_cancel_s_cmd(&port->se);
1696 +- qcom_geni_serial_poll_bit(uport, SE_GENI_S_CMD_CTRL_REG,
1697 +- S_GENI_CMD_CANCEL, false);
1698 +- status = readl_relaxed(uport->membase + SE_GENI_STATUS);
1699 +- writel_relaxed(irq_clear, uport->membase + SE_GENI_S_IRQ_CLEAR);
1700 ++ status = readl(uport->membase + SE_GENI_STATUS);
1701 + if (status & S_GENI_CMD_ACTIVE)
1702 + qcom_geni_serial_abort_rx(uport);
1703 + }
1704 +@@ -693,7 +657,7 @@ static void qcom_geni_serial_handle_rx(struct uart_port *uport, bool drop)
1705 + u32 total_bytes;
1706 + struct qcom_geni_serial_port *port = to_dev_port(uport, uport);
1707 +
1708 +- status = readl_relaxed(uport->membase + SE_GENI_RX_FIFO_STATUS);
1709 ++ status = readl(uport->membase + SE_GENI_RX_FIFO_STATUS);
1710 + word_cnt = status & RX_FIFO_WC_MSK;
1711 + last_word_partial = status & RX_LAST;
1712 + last_word_byte_cnt = (status & RX_LAST_BYTE_VALID_MSK) >>
1713 +@@ -719,10 +683,11 @@ static void qcom_geni_serial_handle_tx(struct uart_port *uport, bool done,
1714 + size_t pending;
1715 + int i;
1716 + u32 status;
1717 ++ u32 irq_en;
1718 + unsigned int chunk;
1719 + int tail;
1720 +
1721 +- status = readl_relaxed(uport->membase + SE_GENI_TX_FIFO_STATUS);
1722 ++ status = readl(uport->membase + SE_GENI_TX_FIFO_STATUS);
1723 +
1724 + /* Complete the current tx command before taking newly added data */
1725 + if (active)
1726 +@@ -747,6 +712,11 @@ static void qcom_geni_serial_handle_tx(struct uart_port *uport, bool done,
1727 + if (!port->tx_remaining) {
1728 + qcom_geni_serial_setup_tx(uport, pending);
1729 + port->tx_remaining = pending;
1730 ++
1731 ++ irq_en = readl(uport->membase + SE_GENI_M_IRQ_EN);
1732 ++ if (!(irq_en & M_TX_FIFO_WATERMARK_EN))
1733 ++ writel(irq_en | M_TX_FIFO_WATERMARK_EN,
1734 ++ uport->membase + SE_GENI_M_IRQ_EN);
1735 + }
1736 +
1737 + remaining = chunk;
1738 +@@ -770,7 +740,23 @@ static void qcom_geni_serial_handle_tx(struct uart_port *uport, bool done,
1739 + }
1740 +
1741 + xmit->tail = tail & (UART_XMIT_SIZE - 1);
1742 ++
1743 ++ /*
1744 ++ * The tx fifo watermark is level triggered and latched. Though we had
1745 ++ * cleared it in qcom_geni_serial_isr it will have already reasserted
1746 ++ * so we must clear it again here after our writes.
1747 ++ */
1748 ++ writel(M_TX_FIFO_WATERMARK_EN,
1749 ++ uport->membase + SE_GENI_M_IRQ_CLEAR);
1750 ++
1751 + out_write_wakeup:
1752 ++ if (!port->tx_remaining) {
1753 ++ irq_en = readl(uport->membase + SE_GENI_M_IRQ_EN);
1754 ++ if (irq_en & M_TX_FIFO_WATERMARK_EN)
1755 ++ writel(irq_en & ~M_TX_FIFO_WATERMARK_EN,
1756 ++ uport->membase + SE_GENI_M_IRQ_EN);
1757 ++ }
1758 ++
1759 + if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
1760 + uart_write_wakeup(uport);
1761 + }
1762 +@@ -791,12 +777,12 @@ static irqreturn_t qcom_geni_serial_isr(int isr, void *dev)
1763 + return IRQ_NONE;
1764 +
1765 + spin_lock_irqsave(&uport->lock, flags);
1766 +- m_irq_status = readl_relaxed(uport->membase + SE_GENI_M_IRQ_STATUS);
1767 +- s_irq_status = readl_relaxed(uport->membase + SE_GENI_S_IRQ_STATUS);
1768 +- geni_status = readl_relaxed(uport->membase + SE_GENI_STATUS);
1769 +- m_irq_en = readl_relaxed(uport->membase + SE_GENI_M_IRQ_EN);
1770 +- writel_relaxed(m_irq_status, uport->membase + SE_GENI_M_IRQ_CLEAR);
1771 +- writel_relaxed(s_irq_status, uport->membase + SE_GENI_S_IRQ_CLEAR);
1772 ++ m_irq_status = readl(uport->membase + SE_GENI_M_IRQ_STATUS);
1773 ++ s_irq_status = readl(uport->membase + SE_GENI_S_IRQ_STATUS);
1774 ++ geni_status = readl(uport->membase + SE_GENI_STATUS);
1775 ++ m_irq_en = readl(uport->membase + SE_GENI_M_IRQ_EN);
1776 ++ writel(m_irq_status, uport->membase + SE_GENI_M_IRQ_CLEAR);
1777 ++ writel(s_irq_status, uport->membase + SE_GENI_S_IRQ_CLEAR);
1778 +
1779 + if (WARN_ON(m_irq_status & M_ILLEGAL_CMD_EN))
1780 + goto out_unlock;
1781 +@@ -806,8 +792,7 @@ static irqreturn_t qcom_geni_serial_isr(int isr, void *dev)
1782 + tty_insert_flip_char(tport, 0, TTY_OVERRUN);
1783 + }
1784 +
1785 +- if (m_irq_status & (M_TX_FIFO_WATERMARK_EN | M_CMD_DONE_EN) &&
1786 +- m_irq_en & (M_TX_FIFO_WATERMARK_EN | M_CMD_DONE_EN))
1787 ++ if (m_irq_status & m_irq_en & (M_TX_FIFO_WATERMARK_EN | M_CMD_DONE_EN))
1788 + qcom_geni_serial_handle_tx(uport, m_irq_status & M_CMD_DONE_EN,
1789 + geni_status & M_GENI_CMD_ACTIVE);
1790 +
1791 +@@ -842,17 +827,6 @@ static void get_tx_fifo_size(struct qcom_geni_serial_port *port)
1792 + (port->tx_fifo_depth * port->tx_fifo_width) / BITS_PER_BYTE;
1793 + }
1794 +
1795 +-static void set_rfr_wm(struct qcom_geni_serial_port *port)
1796 +-{
1797 +- /*
1798 +- * Set RFR (Flow off) to FIFO_DEPTH - 2.
1799 +- * RX WM level at 10% RX_FIFO_DEPTH.
1800 +- * TX WM level at 10% TX_FIFO_DEPTH.
1801 +- */
1802 +- port->rx_rfr = port->rx_fifo_depth - 2;
1803 +- port->rx_wm = UART_CONSOLE_RX_WM;
1804 +- port->tx_wm = DEF_TX_WM;
1805 +-}
1806 +
1807 + static void qcom_geni_serial_shutdown(struct uart_port *uport)
1808 + {
1809 +@@ -891,21 +865,19 @@ static int qcom_geni_serial_port_setup(struct uart_port *uport)
1810 +
1811 + get_tx_fifo_size(port);
1812 +
1813 +- set_rfr_wm(port);
1814 +- writel_relaxed(rxstale, uport->membase + SE_UART_RX_STALE_CNT);
1815 ++ writel(rxstale, uport->membase + SE_UART_RX_STALE_CNT);
1816 + /*
1817 + * Make an unconditional cancel on the main sequencer to reset
1818 + * it else we could end up in data loss scenarios.
1819 + */
1820 +- port->xfer_mode = GENI_SE_FIFO;
1821 + if (uart_console(uport))
1822 + qcom_geni_serial_poll_tx_done(uport);
1823 + geni_se_config_packing(&port->se, BITS_PER_BYTE, port->tx_bytes_pw,
1824 + false, true, false);
1825 + geni_se_config_packing(&port->se, BITS_PER_BYTE, port->rx_bytes_pw,
1826 + false, false, true);
1827 +- geni_se_init(&port->se, port->rx_wm, port->rx_rfr);
1828 +- geni_se_select_mode(&port->se, port->xfer_mode);
1829 ++ geni_se_init(&port->se, UART_RX_WM, port->rx_fifo_depth - 2);
1830 ++ geni_se_select_mode(&port->se, GENI_SE_FIFO);
1831 + if (!uart_console(uport)) {
1832 + port->rx_fifo = devm_kcalloc(uport->dev,
1833 + port->rx_fifo_depth, sizeof(u32), GFP_KERNEL);
1834 +@@ -996,10 +968,10 @@ static void qcom_geni_serial_set_termios(struct uart_port *uport,
1835 + ser_clk_cfg |= clk_div << CLK_DIV_SHFT;
1836 +
1837 + /* parity */
1838 +- tx_trans_cfg = readl_relaxed(uport->membase + SE_UART_TX_TRANS_CFG);
1839 +- tx_parity_cfg = readl_relaxed(uport->membase + SE_UART_TX_PARITY_CFG);
1840 +- rx_trans_cfg = readl_relaxed(uport->membase + SE_UART_RX_TRANS_CFG);
1841 +- rx_parity_cfg = readl_relaxed(uport->membase + SE_UART_RX_PARITY_CFG);
1842 ++ tx_trans_cfg = readl(uport->membase + SE_UART_TX_TRANS_CFG);
1843 ++ tx_parity_cfg = readl(uport->membase + SE_UART_TX_PARITY_CFG);
1844 ++ rx_trans_cfg = readl(uport->membase + SE_UART_RX_TRANS_CFG);
1845 ++ rx_parity_cfg = readl(uport->membase + SE_UART_RX_PARITY_CFG);
1846 + if (termios->c_cflag & PARENB) {
1847 + tx_trans_cfg |= UART_TX_PAR_EN;
1848 + rx_trans_cfg |= UART_RX_PAR_EN;
1849 +@@ -1055,17 +1027,17 @@ static void qcom_geni_serial_set_termios(struct uart_port *uport,
1850 + uart_update_timeout(uport, termios->c_cflag, baud);
1851 +
1852 + if (!uart_console(uport))
1853 +- writel_relaxed(port->loopback,
1854 ++ writel(port->loopback,
1855 + uport->membase + SE_UART_LOOPBACK_CFG);
1856 +- writel_relaxed(tx_trans_cfg, uport->membase + SE_UART_TX_TRANS_CFG);
1857 +- writel_relaxed(tx_parity_cfg, uport->membase + SE_UART_TX_PARITY_CFG);
1858 +- writel_relaxed(rx_trans_cfg, uport->membase + SE_UART_RX_TRANS_CFG);
1859 +- writel_relaxed(rx_parity_cfg, uport->membase + SE_UART_RX_PARITY_CFG);
1860 +- writel_relaxed(bits_per_char, uport->membase + SE_UART_TX_WORD_LEN);
1861 +- writel_relaxed(bits_per_char, uport->membase + SE_UART_RX_WORD_LEN);
1862 +- writel_relaxed(stop_bit_len, uport->membase + SE_UART_TX_STOP_BIT_LEN);
1863 +- writel_relaxed(ser_clk_cfg, uport->membase + GENI_SER_M_CLK_CFG);
1864 +- writel_relaxed(ser_clk_cfg, uport->membase + GENI_SER_S_CLK_CFG);
1865 ++ writel(tx_trans_cfg, uport->membase + SE_UART_TX_TRANS_CFG);
1866 ++ writel(tx_parity_cfg, uport->membase + SE_UART_TX_PARITY_CFG);
1867 ++ writel(rx_trans_cfg, uport->membase + SE_UART_RX_TRANS_CFG);
1868 ++ writel(rx_parity_cfg, uport->membase + SE_UART_RX_PARITY_CFG);
1869 ++ writel(bits_per_char, uport->membase + SE_UART_TX_WORD_LEN);
1870 ++ writel(bits_per_char, uport->membase + SE_UART_RX_WORD_LEN);
1871 ++ writel(stop_bit_len, uport->membase + SE_UART_TX_STOP_BIT_LEN);
1872 ++ writel(ser_clk_cfg, uport->membase + GENI_SER_M_CLK_CFG);
1873 ++ writel(ser_clk_cfg, uport->membase + GENI_SER_S_CLK_CFG);
1874 + out_restart_rx:
1875 + qcom_geni_serial_start_rx(uport);
1876 + }
1877 +@@ -1156,13 +1128,13 @@ static int __init qcom_geni_serial_earlycon_setup(struct earlycon_device *dev,
1878 + geni_se_init(&se, DEF_FIFO_DEPTH_WORDS / 2, DEF_FIFO_DEPTH_WORDS - 2);
1879 + geni_se_select_mode(&se, GENI_SE_FIFO);
1880 +
1881 +- writel_relaxed(tx_trans_cfg, uport->membase + SE_UART_TX_TRANS_CFG);
1882 +- writel_relaxed(tx_parity_cfg, uport->membase + SE_UART_TX_PARITY_CFG);
1883 +- writel_relaxed(rx_trans_cfg, uport->membase + SE_UART_RX_TRANS_CFG);
1884 +- writel_relaxed(rx_parity_cfg, uport->membase + SE_UART_RX_PARITY_CFG);
1885 +- writel_relaxed(bits_per_char, uport->membase + SE_UART_TX_WORD_LEN);
1886 +- writel_relaxed(bits_per_char, uport->membase + SE_UART_RX_WORD_LEN);
1887 +- writel_relaxed(stop_bit_len, uport->membase + SE_UART_TX_STOP_BIT_LEN);
1888 ++ writel(tx_trans_cfg, uport->membase + SE_UART_TX_TRANS_CFG);
1889 ++ writel(tx_parity_cfg, uport->membase + SE_UART_TX_PARITY_CFG);
1890 ++ writel(rx_trans_cfg, uport->membase + SE_UART_RX_TRANS_CFG);
1891 ++ writel(rx_parity_cfg, uport->membase + SE_UART_RX_PARITY_CFG);
1892 ++ writel(bits_per_char, uport->membase + SE_UART_TX_WORD_LEN);
1893 ++ writel(bits_per_char, uport->membase + SE_UART_RX_WORD_LEN);
1894 ++ writel(stop_bit_len, uport->membase + SE_UART_TX_STOP_BIT_LEN);
1895 +
1896 + dev->con->write = qcom_geni_serial_earlycon_write;
1897 + dev->con->setup = NULL;
1898 +diff --git a/drivers/tty/tty_port.c b/drivers/tty/tty_port.c
1899 +index c699d41a2a48..fbacb00c2601 100644
1900 +--- a/drivers/tty/tty_port.c
1901 ++++ b/drivers/tty/tty_port.c
1902 +@@ -52,10 +52,11 @@ static void tty_port_default_wakeup(struct tty_port *port)
1903 + }
1904 + }
1905 +
1906 +-static const struct tty_port_client_operations default_client_ops = {
1907 ++const struct tty_port_client_operations tty_port_default_client_ops = {
1908 + .receive_buf = tty_port_default_receive_buf,
1909 + .write_wakeup = tty_port_default_wakeup,
1910 + };
1911 ++EXPORT_SYMBOL_GPL(tty_port_default_client_ops);
1912 +
1913 + void tty_port_init(struct tty_port *port)
1914 + {
1915 +@@ -68,7 +69,7 @@ void tty_port_init(struct tty_port *port)
1916 + spin_lock_init(&port->lock);
1917 + port->close_delay = (50 * HZ) / 100;
1918 + port->closing_wait = (3000 * HZ) / 100;
1919 +- port->client_ops = &default_client_ops;
1920 ++ port->client_ops = &tty_port_default_client_ops;
1921 + kref_init(&port->kref);
1922 + }
1923 + EXPORT_SYMBOL(tty_port_init);
1924 +diff --git a/drivers/tty/vt/selection.c b/drivers/tty/vt/selection.c
1925 +index 07496c711d7d..3ac4fe549c2e 100644
1926 +--- a/drivers/tty/vt/selection.c
1927 ++++ b/drivers/tty/vt/selection.c
1928 +@@ -27,6 +27,8 @@
1929 + #include <linux/console.h>
1930 + #include <linux/tty_flip.h>
1931 +
1932 ++#include <linux/sched/signal.h>
1933 ++
1934 + /* Don't take this from <ctype.h>: 011-015 on the screen aren't spaces */
1935 + #define isspace(c) ((c) == ' ')
1936 +
1937 +@@ -337,6 +339,7 @@ int paste_selection(struct tty_struct *tty)
1938 + unsigned int count;
1939 + struct tty_ldisc *ld;
1940 + DECLARE_WAITQUEUE(wait, current);
1941 ++ int ret = 0;
1942 +
1943 + console_lock();
1944 + poke_blanked_console();
1945 +@@ -350,6 +353,10 @@ int paste_selection(struct tty_struct *tty)
1946 + add_wait_queue(&vc->paste_wait, &wait);
1947 + while (sel_buffer && sel_buffer_lth > pasted) {
1948 + set_current_state(TASK_INTERRUPTIBLE);
1949 ++ if (signal_pending(current)) {
1950 ++ ret = -EINTR;
1951 ++ break;
1952 ++ }
1953 + if (tty_throttled(tty)) {
1954 + schedule();
1955 + continue;
1956 +@@ -365,5 +372,5 @@ int paste_selection(struct tty_struct *tty)
1957 +
1958 + tty_buffer_unlock_exclusive(&vc->port);
1959 + tty_ldisc_deref(ld);
1960 +- return 0;
1961 ++ return ret;
1962 + }
1963 +diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
1964 +index d673e3592662..ddaecb1bd9fd 100644
1965 +--- a/drivers/tty/vt/vt.c
1966 ++++ b/drivers/tty/vt/vt.c
1967 +@@ -936,10 +936,21 @@ static void flush_scrollback(struct vc_data *vc)
1968 + WARN_CONSOLE_UNLOCKED();
1969 +
1970 + set_origin(vc);
1971 +- if (vc->vc_sw->con_flush_scrollback)
1972 ++ if (vc->vc_sw->con_flush_scrollback) {
1973 + vc->vc_sw->con_flush_scrollback(vc);
1974 +- else
1975 ++ } else if (con_is_visible(vc)) {
1976 ++ /*
1977 ++ * When no con_flush_scrollback method is provided then the
1978 ++ * legacy way for flushing the scrollback buffer is to use
1979 ++ * a side effect of the con_switch method. We do it only on
1980 ++ * the foreground console as background consoles have no
1981 ++ * scrollback buffers in that case and we obviously don't
1982 ++ * want to switch to them.
1983 ++ */
1984 ++ hide_cursor(vc);
1985 + vc->vc_sw->con_switch(vc);
1986 ++ set_cursor(vc);
1987 ++ }
1988 + }
1989 +
1990 + /*
1991 +diff --git a/drivers/tty/vt/vt_ioctl.c b/drivers/tty/vt/vt_ioctl.c
1992 +index 73cdc0d633dd..2bb6de89b029 100644
1993 +--- a/drivers/tty/vt/vt_ioctl.c
1994 ++++ b/drivers/tty/vt/vt_ioctl.c
1995 +@@ -876,15 +876,20 @@ int vt_ioctl(struct tty_struct *tty,
1996 + return -EINVAL;
1997 +
1998 + for (i = 0; i < MAX_NR_CONSOLES; i++) {
1999 ++ struct vc_data *vcp;
2000 ++
2001 + if (!vc_cons[i].d)
2002 + continue;
2003 + console_lock();
2004 +- if (v.v_vlin)
2005 +- vc_cons[i].d->vc_scan_lines = v.v_vlin;
2006 +- if (v.v_clin)
2007 +- vc_cons[i].d->vc_font.height = v.v_clin;
2008 +- vc_cons[i].d->vc_resize_user = 1;
2009 +- vc_resize(vc_cons[i].d, v.v_cols, v.v_rows);
2010 ++ vcp = vc_cons[i].d;
2011 ++ if (vcp) {
2012 ++ if (v.v_vlin)
2013 ++ vcp->vc_scan_lines = v.v_vlin;
2014 ++ if (v.v_clin)
2015 ++ vcp->vc_font.height = v.v_clin;
2016 ++ vcp->vc_resize_user = 1;
2017 ++ vc_resize(vcp, v.v_cols, v.v_rows);
2018 ++ }
2019 + console_unlock();
2020 + }
2021 + break;
2022 +diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c
2023 +index 0bf0e62bede3..2025261e97a1 100644
2024 +--- a/drivers/usb/core/config.c
2025 ++++ b/drivers/usb/core/config.c
2026 +@@ -256,6 +256,7 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno,
2027 + struct usb_host_interface *ifp, int num_ep,
2028 + unsigned char *buffer, int size)
2029 + {
2030 ++ struct usb_device *udev = to_usb_device(ddev);
2031 + unsigned char *buffer0 = buffer;
2032 + struct usb_endpoint_descriptor *d;
2033 + struct usb_host_endpoint *endpoint;
2034 +@@ -297,6 +298,16 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno,
2035 + goto skip_to_next_endpoint_or_interface_descriptor;
2036 + }
2037 +
2038 ++ /* Ignore blacklisted endpoints */
2039 ++ if (udev->quirks & USB_QUIRK_ENDPOINT_BLACKLIST) {
2040 ++ if (usb_endpoint_is_blacklisted(udev, ifp, d)) {
2041 ++ dev_warn(ddev, "config %d interface %d altsetting %d has a blacklisted endpoint with address 0x%X, skipping\n",
2042 ++ cfgno, inum, asnum,
2043 ++ d->bEndpointAddress);
2044 ++ goto skip_to_next_endpoint_or_interface_descriptor;
2045 ++ }
2046 ++ }
2047 ++
2048 + endpoint = &ifp->endpoint[ifp->desc.bNumEndpoints];
2049 + ++ifp->desc.bNumEndpoints;
2050 +
2051 +diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
2052 +index 6ab4ca1d9ae1..27486b0a027a 100644
2053 +--- a/drivers/usb/core/hub.c
2054 ++++ b/drivers/usb/core/hub.c
2055 +@@ -36,7 +36,9 @@
2056 + #include "otg_whitelist.h"
2057 +
2058 + #define USB_VENDOR_GENESYS_LOGIC 0x05e3
2059 ++#define USB_VENDOR_SMSC 0x0424
2060 + #define HUB_QUIRK_CHECK_PORT_AUTOSUSPEND 0x01
2061 ++#define HUB_QUIRK_DISABLE_AUTOSUSPEND 0x02
2062 +
2063 + #define USB_TP_TRANSMISSION_DELAY 40 /* ns */
2064 + #define USB_TP_TRANSMISSION_DELAY_MAX 65535 /* ns */
2065 +@@ -1190,11 +1192,6 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type)
2066 + #ifdef CONFIG_PM
2067 + udev->reset_resume = 1;
2068 + #endif
2069 +- /* Don't set the change_bits when the device
2070 +- * was powered off.
2071 +- */
2072 +- if (test_bit(port1, hub->power_bits))
2073 +- set_bit(port1, hub->change_bits);
2074 +
2075 + } else {
2076 + /* The power session is gone; tell hub_wq */
2077 +@@ -1700,6 +1697,10 @@ static void hub_disconnect(struct usb_interface *intf)
2078 + kfree(hub->buffer);
2079 +
2080 + pm_suspend_ignore_children(&intf->dev, false);
2081 ++
2082 ++ if (hub->quirk_disable_autosuspend)
2083 ++ usb_autopm_put_interface(intf);
2084 ++
2085 + kref_put(&hub->kref, hub_release);
2086 + }
2087 +
2088 +@@ -1830,6 +1831,11 @@ static int hub_probe(struct usb_interface *intf, const struct usb_device_id *id)
2089 + if (id->driver_info & HUB_QUIRK_CHECK_PORT_AUTOSUSPEND)
2090 + hub->quirk_check_port_auto_suspend = 1;
2091 +
2092 ++ if (id->driver_info & HUB_QUIRK_DISABLE_AUTOSUSPEND) {
2093 ++ hub->quirk_disable_autosuspend = 1;
2094 ++ usb_autopm_get_interface(intf);
2095 ++ }
2096 ++
2097 + if (hub_configure(hub, &desc->endpoint[0].desc) >= 0)
2098 + return 0;
2099 +
2100 +@@ -5410,6 +5416,10 @@ out_hdev_lock:
2101 + }
2102 +
2103 + static const struct usb_device_id hub_id_table[] = {
2104 ++ { .match_flags = USB_DEVICE_ID_MATCH_VENDOR | USB_DEVICE_ID_MATCH_INT_CLASS,
2105 ++ .idVendor = USB_VENDOR_SMSC,
2106 ++ .bInterfaceClass = USB_CLASS_HUB,
2107 ++ .driver_info = HUB_QUIRK_DISABLE_AUTOSUSPEND},
2108 + { .match_flags = USB_DEVICE_ID_MATCH_VENDOR
2109 + | USB_DEVICE_ID_MATCH_INT_CLASS,
2110 + .idVendor = USB_VENDOR_GENESYS_LOGIC,
2111 +diff --git a/drivers/usb/core/hub.h b/drivers/usb/core/hub.h
2112 +index 4accfb63f7dc..d0bbbd76ba8e 100644
2113 +--- a/drivers/usb/core/hub.h
2114 ++++ b/drivers/usb/core/hub.h
2115 +@@ -61,6 +61,7 @@ struct usb_hub {
2116 + unsigned quiescing:1;
2117 + unsigned disconnected:1;
2118 + unsigned in_reset:1;
2119 ++ unsigned quirk_disable_autosuspend:1;
2120 +
2121 + unsigned quirk_check_port_auto_suspend:1;
2122 +
2123 +diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
2124 +index 6b6413073584..2b24336a72e5 100644
2125 +--- a/drivers/usb/core/quirks.c
2126 ++++ b/drivers/usb/core/quirks.c
2127 +@@ -354,6 +354,10 @@ static const struct usb_device_id usb_quirk_list[] = {
2128 + { USB_DEVICE(0x0904, 0x6103), .driver_info =
2129 + USB_QUIRK_LINEAR_FRAME_INTR_BINTERVAL },
2130 +
2131 ++ /* Sound Devices USBPre2 */
2132 ++ { USB_DEVICE(0x0926, 0x0202), .driver_info =
2133 ++ USB_QUIRK_ENDPOINT_BLACKLIST },
2134 ++
2135 + /* Keytouch QWERTY Panel keyboard */
2136 + { USB_DEVICE(0x0926, 0x3333), .driver_info =
2137 + USB_QUIRK_CONFIG_INTF_STRINGS },
2138 +@@ -445,6 +449,9 @@ static const struct usb_device_id usb_quirk_list[] = {
2139 + /* INTEL VALUE SSD */
2140 + { USB_DEVICE(0x8086, 0xf1a5), .driver_info = USB_QUIRK_RESET_RESUME },
2141 +
2142 ++ /* novation SoundControl XL */
2143 ++ { USB_DEVICE(0x1235, 0x0061), .driver_info = USB_QUIRK_RESET_RESUME },
2144 ++
2145 + { } /* terminating entry must be last */
2146 + };
2147 +
2148 +@@ -472,6 +479,39 @@ static const struct usb_device_id usb_amd_resume_quirk_list[] = {
2149 + { } /* terminating entry must be last */
2150 + };
2151 +
2152 ++/*
2153 ++ * Entries for blacklisted endpoints that should be ignored when parsing
2154 ++ * configuration descriptors.
2155 ++ *
2156 ++ * Matched for devices with USB_QUIRK_ENDPOINT_BLACKLIST.
2157 ++ */
2158 ++static const struct usb_device_id usb_endpoint_blacklist[] = {
2159 ++ { USB_DEVICE_INTERFACE_NUMBER(0x0926, 0x0202, 1), .driver_info = 0x85 },
2160 ++ { }
2161 ++};
2162 ++
2163 ++bool usb_endpoint_is_blacklisted(struct usb_device *udev,
2164 ++ struct usb_host_interface *intf,
2165 ++ struct usb_endpoint_descriptor *epd)
2166 ++{
2167 ++ const struct usb_device_id *id;
2168 ++ unsigned int address;
2169 ++
2170 ++ for (id = usb_endpoint_blacklist; id->match_flags; ++id) {
2171 ++ if (!usb_match_device(udev, id))
2172 ++ continue;
2173 ++
2174 ++ if (!usb_match_one_id_intf(udev, intf, id))
2175 ++ continue;
2176 ++
2177 ++ address = id->driver_info;
2178 ++ if (address == epd->bEndpointAddress)
2179 ++ return true;
2180 ++ }
2181 ++
2182 ++ return false;
2183 ++}
2184 ++
2185 + static bool usb_match_any_interface(struct usb_device *udev,
2186 + const struct usb_device_id *id)
2187 + {
2188 +diff --git a/drivers/usb/core/usb.h b/drivers/usb/core/usb.h
2189 +index d95a5358f73d..c0df5a468d78 100644
2190 +--- a/drivers/usb/core/usb.h
2191 ++++ b/drivers/usb/core/usb.h
2192 +@@ -37,6 +37,9 @@ extern void usb_authorize_interface(struct usb_interface *);
2193 + extern void usb_detect_quirks(struct usb_device *udev);
2194 + extern void usb_detect_interface_quirks(struct usb_device *udev);
2195 + extern void usb_release_quirk_list(void);
2196 ++extern bool usb_endpoint_is_blacklisted(struct usb_device *udev,
2197 ++ struct usb_host_interface *intf,
2198 ++ struct usb_endpoint_descriptor *epd);
2199 + extern int usb_remove_device(struct usb_device *udev);
2200 +
2201 + extern int usb_get_device_descriptor(struct usb_device *dev,
2202 +diff --git a/drivers/usb/dwc2/gadget.c b/drivers/usb/dwc2/gadget.c
2203 +index 17f3e7b4d4fe..d8424834902d 100644
2204 +--- a/drivers/usb/dwc2/gadget.c
2205 ++++ b/drivers/usb/dwc2/gadget.c
2206 +@@ -1004,11 +1004,6 @@ static void dwc2_hsotg_start_req(struct dwc2_hsotg *hsotg,
2207 + else
2208 + packets = 1; /* send one packet if length is zero. */
2209 +
2210 +- if (hs_ep->isochronous && length > (hs_ep->mc * hs_ep->ep.maxpacket)) {
2211 +- dev_err(hsotg->dev, "req length > maxpacket*mc\n");
2212 +- return;
2213 +- }
2214 +-
2215 + if (dir_in && index != 0)
2216 + if (hs_ep->isochronous)
2217 + epsize = DXEPTSIZ_MC(packets);
2218 +@@ -1312,6 +1307,13 @@ static int dwc2_hsotg_ep_queue(struct usb_ep *ep, struct usb_request *req,
2219 + req->actual = 0;
2220 + req->status = -EINPROGRESS;
2221 +
2222 ++ /* Don't queue ISOC request if length greater than mps*mc */
2223 ++ if (hs_ep->isochronous &&
2224 ++ req->length > (hs_ep->mc * hs_ep->ep.maxpacket)) {
2225 ++ dev_err(hs->dev, "req length > maxpacket*mc\n");
2226 ++ return -EINVAL;
2227 ++ }
2228 ++
2229 + /* In DDMA mode for ISOC's don't queue request if length greater
2230 + * than descriptor limits.
2231 + */
2232 +@@ -1542,6 +1544,7 @@ static int dwc2_hsotg_process_req_status(struct dwc2_hsotg *hsotg,
2233 + struct dwc2_hsotg_ep *ep0 = hsotg->eps_out[0];
2234 + struct dwc2_hsotg_ep *ep;
2235 + __le16 reply;
2236 ++ u16 status;
2237 + int ret;
2238 +
2239 + dev_dbg(hsotg->dev, "%s: USB_REQ_GET_STATUS\n", __func__);
2240 +@@ -1553,11 +1556,10 @@ static int dwc2_hsotg_process_req_status(struct dwc2_hsotg *hsotg,
2241 +
2242 + switch (ctrl->bRequestType & USB_RECIP_MASK) {
2243 + case USB_RECIP_DEVICE:
2244 +- /*
2245 +- * bit 0 => self powered
2246 +- * bit 1 => remote wakeup
2247 +- */
2248 +- reply = cpu_to_le16(0);
2249 ++ status = 1 << USB_DEVICE_SELF_POWERED;
2250 ++ status |= hsotg->remote_wakeup_allowed <<
2251 ++ USB_DEVICE_REMOTE_WAKEUP;
2252 ++ reply = cpu_to_le16(status);
2253 + break;
2254 +
2255 + case USB_RECIP_INTERFACE:
2256 +@@ -1668,7 +1670,10 @@ static int dwc2_hsotg_process_req_feature(struct dwc2_hsotg *hsotg,
2257 + case USB_RECIP_DEVICE:
2258 + switch (wValue) {
2259 + case USB_DEVICE_REMOTE_WAKEUP:
2260 +- hsotg->remote_wakeup_allowed = 1;
2261 ++ if (set)
2262 ++ hsotg->remote_wakeup_allowed = 1;
2263 ++ else
2264 ++ hsotg->remote_wakeup_allowed = 0;
2265 + break;
2266 +
2267 + case USB_DEVICE_TEST_MODE:
2268 +@@ -1678,16 +1683,17 @@ static int dwc2_hsotg_process_req_feature(struct dwc2_hsotg *hsotg,
2269 + return -EINVAL;
2270 +
2271 + hsotg->test_mode = wIndex >> 8;
2272 +- ret = dwc2_hsotg_send_reply(hsotg, ep0, NULL, 0);
2273 +- if (ret) {
2274 +- dev_err(hsotg->dev,
2275 +- "%s: failed to send reply\n", __func__);
2276 +- return ret;
2277 +- }
2278 + break;
2279 + default:
2280 + return -ENOENT;
2281 + }
2282 ++
2283 ++ ret = dwc2_hsotg_send_reply(hsotg, ep0, NULL, 0);
2284 ++ if (ret) {
2285 ++ dev_err(hsotg->dev,
2286 ++ "%s: failed to send reply\n", __func__);
2287 ++ return ret;
2288 ++ }
2289 + break;
2290 +
2291 + case USB_RECIP_ENDPOINT:
2292 +diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
2293 +index a6e682a000fc..430cfd620854 100644
2294 +--- a/drivers/usb/dwc3/gadget.c
2295 ++++ b/drivers/usb/dwc3/gadget.c
2296 +@@ -2224,7 +2224,8 @@ static int dwc3_gadget_ep_reclaim_completed_trb(struct dwc3_ep *dep,
2297 + if (event->status & DEPEVT_STATUS_SHORT && !chain)
2298 + return 1;
2299 +
2300 +- if (event->status & DEPEVT_STATUS_IOC)
2301 ++ if ((trb->ctrl & DWC3_TRB_CTRL_IOC) ||
2302 ++ (trb->ctrl & DWC3_TRB_CTRL_LST))
2303 + return 1;
2304 +
2305 + return 0;
2306 +diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c
2307 +index 33115e19756c..fea7c7e0143f 100644
2308 +--- a/drivers/usb/gadget/composite.c
2309 ++++ b/drivers/usb/gadget/composite.c
2310 +@@ -437,12 +437,10 @@ static u8 encode_bMaxPower(enum usb_device_speed speed,
2311 + val = CONFIG_USB_GADGET_VBUS_DRAW;
2312 + if (!val)
2313 + return 0;
2314 +- switch (speed) {
2315 +- case USB_SPEED_SUPER:
2316 +- return DIV_ROUND_UP(val, 8);
2317 +- default:
2318 ++ if (speed < USB_SPEED_SUPER)
2319 + return DIV_ROUND_UP(val, 2);
2320 +- }
2321 ++ else
2322 ++ return DIV_ROUND_UP(val, 8);
2323 + }
2324 +
2325 + static int config_buf(struct usb_configuration *config,
2326 +diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
2327 +index 9772c0de59b7..a024230f00e2 100644
2328 +--- a/drivers/usb/host/xhci-hub.c
2329 ++++ b/drivers/usb/host/xhci-hub.c
2330 +@@ -55,6 +55,7 @@ static u8 usb_bos_descriptor [] = {
2331 + static int xhci_create_usb3_bos_desc(struct xhci_hcd *xhci, char *buf,
2332 + u16 wLength)
2333 + {
2334 ++ struct xhci_port_cap *port_cap = NULL;
2335 + int i, ssa_count;
2336 + u32 temp;
2337 + u16 desc_size, ssp_cap_size, ssa_size = 0;
2338 +@@ -64,16 +65,24 @@ static int xhci_create_usb3_bos_desc(struct xhci_hcd *xhci, char *buf,
2339 + ssp_cap_size = sizeof(usb_bos_descriptor) - desc_size;
2340 +
2341 + /* does xhci support USB 3.1 Enhanced SuperSpeed */
2342 +- if (xhci->usb3_rhub.min_rev >= 0x01) {
2343 ++ for (i = 0; i < xhci->num_port_caps; i++) {
2344 ++ if (xhci->port_caps[i].maj_rev == 0x03 &&
2345 ++ xhci->port_caps[i].min_rev >= 0x01) {
2346 ++ usb3_1 = true;
2347 ++ port_cap = &xhci->port_caps[i];
2348 ++ break;
2349 ++ }
2350 ++ }
2351 ++
2352 ++ if (usb3_1) {
2353 + /* does xhci provide a PSI table for SSA speed attributes? */
2354 +- if (xhci->usb3_rhub.psi_count) {
2355 ++ if (port_cap->psi_count) {
2356 + /* two SSA entries for each unique PSI ID, RX and TX */
2357 +- ssa_count = xhci->usb3_rhub.psi_uid_count * 2;
2358 ++ ssa_count = port_cap->psi_uid_count * 2;
2359 + ssa_size = ssa_count * sizeof(u32);
2360 + ssp_cap_size -= 16; /* skip copying the default SSA */
2361 + }
2362 + desc_size += ssp_cap_size;
2363 +- usb3_1 = true;
2364 + }
2365 + memcpy(buf, &usb_bos_descriptor, min(desc_size, wLength));
2366 +
2367 +@@ -99,7 +108,7 @@ static int xhci_create_usb3_bos_desc(struct xhci_hcd *xhci, char *buf,
2368 + }
2369 +
2370 + /* If PSI table exists, add the custom speed attributes from it */
2371 +- if (usb3_1 && xhci->usb3_rhub.psi_count) {
2372 ++ if (usb3_1 && port_cap->psi_count) {
2373 + u32 ssp_cap_base, bm_attrib, psi, psi_mant, psi_exp;
2374 + int offset;
2375 +
2376 +@@ -111,7 +120,7 @@ static int xhci_create_usb3_bos_desc(struct xhci_hcd *xhci, char *buf,
2377 +
2378 + /* attribute count SSAC bits 4:0 and ID count SSIC bits 8:5 */
2379 + bm_attrib = (ssa_count - 1) & 0x1f;
2380 +- bm_attrib |= (xhci->usb3_rhub.psi_uid_count - 1) << 5;
2381 ++ bm_attrib |= (port_cap->psi_uid_count - 1) << 5;
2382 + put_unaligned_le32(bm_attrib, &buf[ssp_cap_base + 4]);
2383 +
2384 + if (wLength < desc_size + ssa_size)
2385 +@@ -124,8 +133,8 @@ static int xhci_create_usb3_bos_desc(struct xhci_hcd *xhci, char *buf,
2386 + * USB 3.1 requires two SSA entries (RX and TX) for every link
2387 + */
2388 + offset = desc_size;
2389 +- for (i = 0; i < xhci->usb3_rhub.psi_count; i++) {
2390 +- psi = xhci->usb3_rhub.psi[i];
2391 ++ for (i = 0; i < port_cap->psi_count; i++) {
2392 ++ psi = port_cap->psi[i];
2393 + psi &= ~USB_SSP_SUBLINK_SPEED_RSVD;
2394 + psi_exp = XHCI_EXT_PORT_PSIE(psi);
2395 + psi_mant = XHCI_EXT_PORT_PSIM(psi);
2396 +diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
2397 +index 82ce6d8b708d..9e87c282a743 100644
2398 +--- a/drivers/usb/host/xhci-mem.c
2399 ++++ b/drivers/usb/host/xhci-mem.c
2400 +@@ -1475,9 +1475,15 @@ int xhci_endpoint_init(struct xhci_hcd *xhci,
2401 + /* Allow 3 retries for everything but isoc, set CErr = 3 */
2402 + if (!usb_endpoint_xfer_isoc(&ep->desc))
2403 + err_count = 3;
2404 +- /* Some devices get this wrong */
2405 +- if (usb_endpoint_xfer_bulk(&ep->desc) && udev->speed == USB_SPEED_HIGH)
2406 +- max_packet = 512;
2407 ++ /* HS bulk max packet should be 512, FS bulk supports 8, 16, 32 or 64 */
2408 ++ if (usb_endpoint_xfer_bulk(&ep->desc)) {
2409 ++ if (udev->speed == USB_SPEED_HIGH)
2410 ++ max_packet = 512;
2411 ++ if (udev->speed == USB_SPEED_FULL) {
2412 ++ max_packet = rounddown_pow_of_two(max_packet);
2413 ++ max_packet = clamp_val(max_packet, 8, 64);
2414 ++ }
2415 ++ }
2416 + /* xHCI 1.0 and 1.1 indicates that ctrl ep avg TRB Length should be 8 */
2417 + if (usb_endpoint_xfer_control(&ep->desc) && xhci->hci_version >= 0x100)
2418 + avg_trb_len = 8;
2419 +@@ -1909,17 +1915,17 @@ no_bw:
2420 + xhci->usb3_rhub.num_ports = 0;
2421 + xhci->num_active_eps = 0;
2422 + kfree(xhci->usb2_rhub.ports);
2423 +- kfree(xhci->usb2_rhub.psi);
2424 + kfree(xhci->usb3_rhub.ports);
2425 +- kfree(xhci->usb3_rhub.psi);
2426 + kfree(xhci->hw_ports);
2427 + kfree(xhci->rh_bw);
2428 + kfree(xhci->ext_caps);
2429 ++ for (i = 0; i < xhci->num_port_caps; i++)
2430 ++ kfree(xhci->port_caps[i].psi);
2431 ++ kfree(xhci->port_caps);
2432 ++ xhci->num_port_caps = 0;
2433 +
2434 + xhci->usb2_rhub.ports = NULL;
2435 +- xhci->usb2_rhub.psi = NULL;
2436 + xhci->usb3_rhub.ports = NULL;
2437 +- xhci->usb3_rhub.psi = NULL;
2438 + xhci->hw_ports = NULL;
2439 + xhci->rh_bw = NULL;
2440 + xhci->ext_caps = NULL;
2441 +@@ -2120,6 +2126,7 @@ static void xhci_add_in_port(struct xhci_hcd *xhci, unsigned int num_ports,
2442 + u8 major_revision, minor_revision;
2443 + struct xhci_hub *rhub;
2444 + struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
2445 ++ struct xhci_port_cap *port_cap;
2446 +
2447 + temp = readl(addr);
2448 + major_revision = XHCI_EXT_PORT_MAJOR(temp);
2449 +@@ -2154,31 +2161,39 @@ static void xhci_add_in_port(struct xhci_hcd *xhci, unsigned int num_ports,
2450 + /* WTF? "Valid values are ‘1’ to MaxPorts" */
2451 + return;
2452 +
2453 +- rhub->psi_count = XHCI_EXT_PORT_PSIC(temp);
2454 +- if (rhub->psi_count) {
2455 +- rhub->psi = kcalloc_node(rhub->psi_count, sizeof(*rhub->psi),
2456 +- GFP_KERNEL, dev_to_node(dev));
2457 +- if (!rhub->psi)
2458 +- rhub->psi_count = 0;
2459 ++ port_cap = &xhci->port_caps[xhci->num_port_caps++];
2460 ++ if (xhci->num_port_caps > max_caps)
2461 ++ return;
2462 ++
2463 ++ port_cap->maj_rev = major_revision;
2464 ++ port_cap->min_rev = minor_revision;
2465 ++ port_cap->psi_count = XHCI_EXT_PORT_PSIC(temp);
2466 +
2467 +- rhub->psi_uid_count++;
2468 +- for (i = 0; i < rhub->psi_count; i++) {
2469 +- rhub->psi[i] = readl(addr + 4 + i);
2470 ++ if (port_cap->psi_count) {
2471 ++ port_cap->psi = kcalloc_node(port_cap->psi_count,
2472 ++ sizeof(*port_cap->psi),
2473 ++ GFP_KERNEL, dev_to_node(dev));
2474 ++ if (!port_cap->psi)
2475 ++ port_cap->psi_count = 0;
2476 ++
2477 ++ port_cap->psi_uid_count++;
2478 ++ for (i = 0; i < port_cap->psi_count; i++) {
2479 ++ port_cap->psi[i] = readl(addr + 4 + i);
2480 +
2481 + /* count unique ID values, two consecutive entries can
2482 + * have the same ID if link is assymetric
2483 + */
2484 +- if (i && (XHCI_EXT_PORT_PSIV(rhub->psi[i]) !=
2485 +- XHCI_EXT_PORT_PSIV(rhub->psi[i - 1])))
2486 +- rhub->psi_uid_count++;
2487 ++ if (i && (XHCI_EXT_PORT_PSIV(port_cap->psi[i]) !=
2488 ++ XHCI_EXT_PORT_PSIV(port_cap->psi[i - 1])))
2489 ++ port_cap->psi_uid_count++;
2490 +
2491 + xhci_dbg(xhci, "PSIV:%d PSIE:%d PLT:%d PFD:%d LP:%d PSIM:%d\n",
2492 +- XHCI_EXT_PORT_PSIV(rhub->psi[i]),
2493 +- XHCI_EXT_PORT_PSIE(rhub->psi[i]),
2494 +- XHCI_EXT_PORT_PLT(rhub->psi[i]),
2495 +- XHCI_EXT_PORT_PFD(rhub->psi[i]),
2496 +- XHCI_EXT_PORT_LP(rhub->psi[i]),
2497 +- XHCI_EXT_PORT_PSIM(rhub->psi[i]));
2498 ++ XHCI_EXT_PORT_PSIV(port_cap->psi[i]),
2499 ++ XHCI_EXT_PORT_PSIE(port_cap->psi[i]),
2500 ++ XHCI_EXT_PORT_PLT(port_cap->psi[i]),
2501 ++ XHCI_EXT_PORT_PFD(port_cap->psi[i]),
2502 ++ XHCI_EXT_PORT_LP(port_cap->psi[i]),
2503 ++ XHCI_EXT_PORT_PSIM(port_cap->psi[i]));
2504 + }
2505 + }
2506 + /* cache usb2 port capabilities */
2507 +@@ -2225,6 +2240,7 @@ static void xhci_add_in_port(struct xhci_hcd *xhci, unsigned int num_ports,
2508 + continue;
2509 + }
2510 + hw_port->rhub = rhub;
2511 ++ hw_port->port_cap = port_cap;
2512 + rhub->num_ports++;
2513 + }
2514 + /* FIXME: Should we disable ports not in the Extended Capabilities? */
2515 +@@ -2315,6 +2331,11 @@ static int xhci_setup_port_arrays(struct xhci_hcd *xhci, gfp_t flags)
2516 + if (!xhci->ext_caps)
2517 + return -ENOMEM;
2518 +
2519 ++ xhci->port_caps = kcalloc_node(cap_count, sizeof(*xhci->port_caps),
2520 ++ flags, dev_to_node(dev));
2521 ++ if (!xhci->port_caps)
2522 ++ return -ENOMEM;
2523 ++
2524 + offset = cap_start;
2525 +
2526 + while (offset) {
2527 +diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
2528 +index 075c49cfe60f..58cf551a1246 100644
2529 +--- a/drivers/usb/host/xhci-pci.c
2530 ++++ b/drivers/usb/host/xhci-pci.c
2531 +@@ -41,6 +41,7 @@
2532 + #define PCI_DEVICE_ID_INTEL_BROXTON_B_XHCI 0x1aa8
2533 + #define PCI_DEVICE_ID_INTEL_APL_XHCI 0x5aa8
2534 + #define PCI_DEVICE_ID_INTEL_DNV_XHCI 0x19d0
2535 ++#define PCI_DEVICE_ID_INTEL_CML_XHCI 0xa3af
2536 +
2537 + #define PCI_DEVICE_ID_AMD_PROMONTORYA_4 0x43b9
2538 + #define PCI_DEVICE_ID_AMD_PROMONTORYA_3 0x43ba
2539 +@@ -179,7 +180,8 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
2540 + pdev->device == PCI_DEVICE_ID_INTEL_BROXTON_M_XHCI ||
2541 + pdev->device == PCI_DEVICE_ID_INTEL_BROXTON_B_XHCI ||
2542 + pdev->device == PCI_DEVICE_ID_INTEL_APL_XHCI ||
2543 +- pdev->device == PCI_DEVICE_ID_INTEL_DNV_XHCI)) {
2544 ++ pdev->device == PCI_DEVICE_ID_INTEL_DNV_XHCI ||
2545 ++ pdev->device == PCI_DEVICE_ID_INTEL_CML_XHCI)) {
2546 + xhci->quirks |= XHCI_PME_STUCK_QUIRK;
2547 + }
2548 + if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
2549 +@@ -283,6 +285,9 @@ static int xhci_pci_setup(struct usb_hcd *hcd)
2550 + if (!usb_hcd_is_primary_hcd(hcd))
2551 + return 0;
2552 +
2553 ++ if (xhci->quirks & XHCI_PME_STUCK_QUIRK)
2554 ++ xhci_pme_acpi_rtd3_enable(pdev);
2555 ++
2556 + xhci_dbg(xhci, "Got SBRN %u\n", (unsigned int) xhci->sbrn);
2557 +
2558 + /* Find any debug ports */
2559 +@@ -340,9 +345,6 @@ static int xhci_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
2560 + HCC_MAX_PSA(xhci->hcc_params) >= 4)
2561 + xhci->shared_hcd->can_do_streams = 1;
2562 +
2563 +- if (xhci->quirks & XHCI_PME_STUCK_QUIRK)
2564 +- xhci_pme_acpi_rtd3_enable(dev);
2565 +-
2566 + /* USB-2 and USB-3 roothubs initialized, allow runtime pm suspend */
2567 + pm_runtime_put_noidle(&dev->dev);
2568 +
2569 +diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
2570 +index 98b67605d3cf..509a7fce8f05 100644
2571 +--- a/drivers/usb/host/xhci-ring.c
2572 ++++ b/drivers/usb/host/xhci-ring.c
2573 +@@ -2692,6 +2692,42 @@ static int xhci_handle_event(struct xhci_hcd *xhci)
2574 + return 1;
2575 + }
2576 +
2577 ++/*
2578 ++ * Update Event Ring Dequeue Pointer:
2579 ++ * - When all events have finished
2580 ++ * - To avoid "Event Ring Full Error" condition
2581 ++ */
2582 ++static void xhci_update_erst_dequeue(struct xhci_hcd *xhci,
2583 ++ union xhci_trb *event_ring_deq)
2584 ++{
2585 ++ u64 temp_64;
2586 ++ dma_addr_t deq;
2587 ++
2588 ++ temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
2589 ++ /* If necessary, update the HW's version of the event ring deq ptr. */
2590 ++ if (event_ring_deq != xhci->event_ring->dequeue) {
2591 ++ deq = xhci_trb_virt_to_dma(xhci->event_ring->deq_seg,
2592 ++ xhci->event_ring->dequeue);
2593 ++ if (deq == 0)
2594 ++ xhci_warn(xhci, "WARN something wrong with SW event ring dequeue ptr\n");
2595 ++ /*
2596 ++ * Per 4.9.4, Software writes to the ERDP register shall
2597 ++ * always advance the Event Ring Dequeue Pointer value.
2598 ++ */
2599 ++ if ((temp_64 & (u64) ~ERST_PTR_MASK) ==
2600 ++ ((u64) deq & (u64) ~ERST_PTR_MASK))
2601 ++ return;
2602 ++
2603 ++ /* Update HC event ring dequeue pointer */
2604 ++ temp_64 &= ERST_PTR_MASK;
2605 ++ temp_64 |= ((u64) deq & (u64) ~ERST_PTR_MASK);
2606 ++ }
2607 ++
2608 ++ /* Clear the event handler busy flag (RW1C) */
2609 ++ temp_64 |= ERST_EHB;
2610 ++ xhci_write_64(xhci, temp_64, &xhci->ir_set->erst_dequeue);
2611 ++}
2612 ++
2613 + /*
2614 + * xHCI spec says we can get an interrupt, and if the HC has an error condition,
2615 + * we might get bad data out of the event ring. Section 4.10.2.7 has a list of
2616 +@@ -2703,9 +2739,9 @@ irqreturn_t xhci_irq(struct usb_hcd *hcd)
2617 + union xhci_trb *event_ring_deq;
2618 + irqreturn_t ret = IRQ_NONE;
2619 + unsigned long flags;
2620 +- dma_addr_t deq;
2621 + u64 temp_64;
2622 + u32 status;
2623 ++ int event_loop = 0;
2624 +
2625 + spin_lock_irqsave(&xhci->lock, flags);
2626 + /* Check if the xHC generated the interrupt, or the irq is shared */
2627 +@@ -2759,24 +2795,14 @@ irqreturn_t xhci_irq(struct usb_hcd *hcd)
2628 + /* FIXME this should be a delayed service routine
2629 + * that clears the EHB.
2630 + */
2631 +- while (xhci_handle_event(xhci) > 0) {}
2632 +-
2633 +- temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
2634 +- /* If necessary, update the HW's version of the event ring deq ptr. */
2635 +- if (event_ring_deq != xhci->event_ring->dequeue) {
2636 +- deq = xhci_trb_virt_to_dma(xhci->event_ring->deq_seg,
2637 +- xhci->event_ring->dequeue);
2638 +- if (deq == 0)
2639 +- xhci_warn(xhci, "WARN something wrong with SW event "
2640 +- "ring dequeue ptr.\n");
2641 +- /* Update HC event ring dequeue pointer */
2642 +- temp_64 &= ERST_PTR_MASK;
2643 +- temp_64 |= ((u64) deq & (u64) ~ERST_PTR_MASK);
2644 ++ while (xhci_handle_event(xhci) > 0) {
2645 ++ if (event_loop++ < TRBS_PER_SEGMENT / 2)
2646 ++ continue;
2647 ++ xhci_update_erst_dequeue(xhci, event_ring_deq);
2648 ++ event_loop = 0;
2649 + }
2650 +
2651 +- /* Clear the event handler busy flag (RW1C); event ring is empty. */
2652 +- temp_64 |= ERST_EHB;
2653 +- xhci_write_64(xhci, temp_64, &xhci->ir_set->erst_dequeue);
2654 ++ xhci_update_erst_dequeue(xhci, event_ring_deq);
2655 + ret = IRQ_HANDLED;
2656 +
2657 + out:
2658 +diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
2659 +index 9b33031cf6fc..4dedc822237f 100644
2660 +--- a/drivers/usb/host/xhci.h
2661 ++++ b/drivers/usb/host/xhci.h
2662 +@@ -1704,11 +1704,21 @@ static inline unsigned int hcd_index(struct usb_hcd *hcd)
2663 + else
2664 + return 1;
2665 + }
2666 ++
2667 ++struct xhci_port_cap {
2668 ++ u32 *psi; /* array of protocol speed ID entries */
2669 ++ u8 psi_count;
2670 ++ u8 psi_uid_count;
2671 ++ u8 maj_rev;
2672 ++ u8 min_rev;
2673 ++};
2674 ++
2675 + struct xhci_port {
2676 + __le32 __iomem *addr;
2677 + int hw_portnum;
2678 + int hcd_portnum;
2679 + struct xhci_hub *rhub;
2680 ++ struct xhci_port_cap *port_cap;
2681 + };
2682 +
2683 + struct xhci_hub {
2684 +@@ -1718,9 +1728,6 @@ struct xhci_hub {
2685 + /* supported prococol extended capabiliy values */
2686 + u8 maj_rev;
2687 + u8 min_rev;
2688 +- u32 *psi; /* array of protocol speed ID entries */
2689 +- u8 psi_count;
2690 +- u8 psi_uid_count;
2691 + };
2692 +
2693 + /* There is one xhci_hcd structure per controller */
2694 +@@ -1882,6 +1889,9 @@ struct xhci_hcd {
2695 + /* cached usb2 extened protocol capabilites */
2696 + u32 *ext_caps;
2697 + unsigned int num_ext_caps;
2698 ++ /* cached extended protocol port capabilities */
2699 ++ struct xhci_port_cap *port_caps;
2700 ++ unsigned int num_port_caps;
2701 + /* Compliance Mode Recovery Data */
2702 + struct timer_list comp_mode_recovery_timer;
2703 + u32 port_status_u0;
2704 +diff --git a/drivers/usb/misc/iowarrior.c b/drivers/usb/misc/iowarrior.c
2705 +index 2d9d9490cdd4..92875a264b14 100644
2706 +--- a/drivers/usb/misc/iowarrior.c
2707 ++++ b/drivers/usb/misc/iowarrior.c
2708 +@@ -33,6 +33,14 @@
2709 + #define USB_DEVICE_ID_CODEMERCS_IOWPV2 0x1512
2710 + /* full speed iowarrior */
2711 + #define USB_DEVICE_ID_CODEMERCS_IOW56 0x1503
2712 ++/* fuller speed iowarrior */
2713 ++#define USB_DEVICE_ID_CODEMERCS_IOW28 0x1504
2714 ++#define USB_DEVICE_ID_CODEMERCS_IOW28L 0x1505
2715 ++#define USB_DEVICE_ID_CODEMERCS_IOW100 0x1506
2716 ++
2717 ++/* OEMed devices */
2718 ++#define USB_DEVICE_ID_CODEMERCS_IOW24SAG 0x158a
2719 ++#define USB_DEVICE_ID_CODEMERCS_IOW56AM 0x158b
2720 +
2721 + /* Get a minor range for your devices from the usb maintainer */
2722 + #ifdef CONFIG_USB_DYNAMIC_MINORS
2723 +@@ -137,6 +145,11 @@ static const struct usb_device_id iowarrior_ids[] = {
2724 + {USB_DEVICE(USB_VENDOR_ID_CODEMERCS, USB_DEVICE_ID_CODEMERCS_IOWPV1)},
2725 + {USB_DEVICE(USB_VENDOR_ID_CODEMERCS, USB_DEVICE_ID_CODEMERCS_IOWPV2)},
2726 + {USB_DEVICE(USB_VENDOR_ID_CODEMERCS, USB_DEVICE_ID_CODEMERCS_IOW56)},
2727 ++ {USB_DEVICE(USB_VENDOR_ID_CODEMERCS, USB_DEVICE_ID_CODEMERCS_IOW24SAG)},
2728 ++ {USB_DEVICE(USB_VENDOR_ID_CODEMERCS, USB_DEVICE_ID_CODEMERCS_IOW56AM)},
2729 ++ {USB_DEVICE(USB_VENDOR_ID_CODEMERCS, USB_DEVICE_ID_CODEMERCS_IOW28)},
2730 ++ {USB_DEVICE(USB_VENDOR_ID_CODEMERCS, USB_DEVICE_ID_CODEMERCS_IOW28L)},
2731 ++ {USB_DEVICE(USB_VENDOR_ID_CODEMERCS, USB_DEVICE_ID_CODEMERCS_IOW100)},
2732 + {} /* Terminating entry */
2733 + };
2734 + MODULE_DEVICE_TABLE(usb, iowarrior_ids);
2735 +@@ -361,6 +374,7 @@ static ssize_t iowarrior_write(struct file *file,
2736 + }
2737 + switch (dev->product_id) {
2738 + case USB_DEVICE_ID_CODEMERCS_IOW24:
2739 ++ case USB_DEVICE_ID_CODEMERCS_IOW24SAG:
2740 + case USB_DEVICE_ID_CODEMERCS_IOWPV1:
2741 + case USB_DEVICE_ID_CODEMERCS_IOWPV2:
2742 + case USB_DEVICE_ID_CODEMERCS_IOW40:
2743 +@@ -375,6 +389,10 @@ static ssize_t iowarrior_write(struct file *file,
2744 + goto exit;
2745 + break;
2746 + case USB_DEVICE_ID_CODEMERCS_IOW56:
2747 ++ case USB_DEVICE_ID_CODEMERCS_IOW56AM:
2748 ++ case USB_DEVICE_ID_CODEMERCS_IOW28:
2749 ++ case USB_DEVICE_ID_CODEMERCS_IOW28L:
2750 ++ case USB_DEVICE_ID_CODEMERCS_IOW100:
2751 + /* The IOW56 uses asynchronous IO and more urbs */
2752 + if (atomic_read(&dev->write_busy) == MAX_WRITES_IN_FLIGHT) {
2753 + /* Wait until we are below the limit for submitted urbs */
2754 +@@ -499,6 +517,7 @@ static long iowarrior_ioctl(struct file *file, unsigned int cmd,
2755 + switch (cmd) {
2756 + case IOW_WRITE:
2757 + if (dev->product_id == USB_DEVICE_ID_CODEMERCS_IOW24 ||
2758 ++ dev->product_id == USB_DEVICE_ID_CODEMERCS_IOW24SAG ||
2759 + dev->product_id == USB_DEVICE_ID_CODEMERCS_IOWPV1 ||
2760 + dev->product_id == USB_DEVICE_ID_CODEMERCS_IOWPV2 ||
2761 + dev->product_id == USB_DEVICE_ID_CODEMERCS_IOW40) {
2762 +@@ -782,7 +801,11 @@ static int iowarrior_probe(struct usb_interface *interface,
2763 + goto error;
2764 + }
2765 +
2766 +- if (dev->product_id == USB_DEVICE_ID_CODEMERCS_IOW56) {
2767 ++ if ((dev->product_id == USB_DEVICE_ID_CODEMERCS_IOW56) ||
2768 ++ (dev->product_id == USB_DEVICE_ID_CODEMERCS_IOW56AM) ||
2769 ++ (dev->product_id == USB_DEVICE_ID_CODEMERCS_IOW28) ||
2770 ++ (dev->product_id == USB_DEVICE_ID_CODEMERCS_IOW28L) ||
2771 ++ (dev->product_id == USB_DEVICE_ID_CODEMERCS_IOW100)) {
2772 + res = usb_find_last_int_out_endpoint(iface_desc,
2773 + &dev->int_out_endpoint);
2774 + if (res) {
2775 +@@ -795,7 +818,11 @@ static int iowarrior_probe(struct usb_interface *interface,
2776 + /* we have to check the report_size often, so remember it in the endianness suitable for our machine */
2777 + dev->report_size = usb_endpoint_maxp(dev->int_in_endpoint);
2778 + if ((dev->interface->cur_altsetting->desc.bInterfaceNumber == 0) &&
2779 +- (dev->product_id == USB_DEVICE_ID_CODEMERCS_IOW56))
2780 ++ ((dev->product_id == USB_DEVICE_ID_CODEMERCS_IOW56) ||
2781 ++ (dev->product_id == USB_DEVICE_ID_CODEMERCS_IOW56AM) ||
2782 ++ (dev->product_id == USB_DEVICE_ID_CODEMERCS_IOW28) ||
2783 ++ (dev->product_id == USB_DEVICE_ID_CODEMERCS_IOW28L) ||
2784 ++ (dev->product_id == USB_DEVICE_ID_CODEMERCS_IOW100)))
2785 + /* IOWarrior56 has wMaxPacketSize different from report size */
2786 + dev->report_size = 7;
2787 +
2788 +diff --git a/drivers/usb/storage/uas.c b/drivers/usb/storage/uas.c
2789 +index 1c6eb3a8741e..62ca8e29da48 100644
2790 +--- a/drivers/usb/storage/uas.c
2791 ++++ b/drivers/usb/storage/uas.c
2792 +@@ -45,6 +45,7 @@ struct uas_dev_info {
2793 + struct scsi_cmnd *cmnd[MAX_CMNDS];
2794 + spinlock_t lock;
2795 + struct work_struct work;
2796 ++ struct work_struct scan_work; /* for async scanning */
2797 + };
2798 +
2799 + enum {
2800 +@@ -114,6 +115,17 @@ out:
2801 + spin_unlock_irqrestore(&devinfo->lock, flags);
2802 + }
2803 +
2804 ++static void uas_scan_work(struct work_struct *work)
2805 ++{
2806 ++ struct uas_dev_info *devinfo =
2807 ++ container_of(work, struct uas_dev_info, scan_work);
2808 ++ struct Scsi_Host *shost = usb_get_intfdata(devinfo->intf);
2809 ++
2810 ++ dev_dbg(&devinfo->intf->dev, "starting scan\n");
2811 ++ scsi_scan_host(shost);
2812 ++ dev_dbg(&devinfo->intf->dev, "scan complete\n");
2813 ++}
2814 ++
2815 + static void uas_add_work(struct uas_cmd_info *cmdinfo)
2816 + {
2817 + struct scsi_pointer *scp = (void *)cmdinfo;
2818 +@@ -989,6 +1001,7 @@ static int uas_probe(struct usb_interface *intf, const struct usb_device_id *id)
2819 + init_usb_anchor(&devinfo->data_urbs);
2820 + spin_lock_init(&devinfo->lock);
2821 + INIT_WORK(&devinfo->work, uas_do_work);
2822 ++ INIT_WORK(&devinfo->scan_work, uas_scan_work);
2823 +
2824 + result = uas_configure_endpoints(devinfo);
2825 + if (result)
2826 +@@ -1005,7 +1018,9 @@ static int uas_probe(struct usb_interface *intf, const struct usb_device_id *id)
2827 + if (result)
2828 + goto free_streams;
2829 +
2830 +- scsi_scan_host(shost);
2831 ++ /* Submit the delayed_work for SCSI-device scanning */
2832 ++ schedule_work(&devinfo->scan_work);
2833 ++
2834 + return result;
2835 +
2836 + free_streams:
2837 +@@ -1173,6 +1188,12 @@ static void uas_disconnect(struct usb_interface *intf)
2838 + usb_kill_anchored_urbs(&devinfo->data_urbs);
2839 + uas_zap_pending(devinfo, DID_NO_CONNECT);
2840 +
2841 ++ /*
2842 ++ * Prevent SCSI scanning (if it hasn't started yet)
2843 ++ * or wait for the SCSI-scanning routine to stop.
2844 ++ */
2845 ++ cancel_work_sync(&devinfo->scan_work);
2846 ++
2847 + scsi_remove_host(shost);
2848 + uas_free_streams(devinfo);
2849 + scsi_host_put(shost);
2850 +diff --git a/drivers/xen/preempt.c b/drivers/xen/preempt.c
2851 +index 08cb419eb4e6..5f6b77ea34fb 100644
2852 +--- a/drivers/xen/preempt.c
2853 ++++ b/drivers/xen/preempt.c
2854 +@@ -37,7 +37,9 @@ asmlinkage __visible void xen_maybe_preempt_hcall(void)
2855 + * cpu.
2856 + */
2857 + __this_cpu_write(xen_in_preemptible_hcall, false);
2858 +- _cond_resched();
2859 ++ local_irq_enable();
2860 ++ cond_resched();
2861 ++ local_irq_disable();
2862 + __this_cpu_write(xen_in_preemptible_hcall, true);
2863 + }
2864 + }
2865 +diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
2866 +index ea45112a98be..b5039b16de93 100644
2867 +--- a/fs/btrfs/disk-io.c
2868 ++++ b/fs/btrfs/disk-io.c
2869 +@@ -3153,6 +3153,7 @@ retry_root_backup:
2870 + if (IS_ERR(fs_info->fs_root)) {
2871 + err = PTR_ERR(fs_info->fs_root);
2872 + btrfs_warn(fs_info, "failed to read fs tree: %d", err);
2873 ++ fs_info->fs_root = NULL;
2874 + goto fail_qgroup;
2875 + }
2876 +
2877 +@@ -4468,7 +4469,6 @@ void btrfs_cleanup_one_transaction(struct btrfs_transaction *cur_trans,
2878 + wake_up(&fs_info->transaction_wait);
2879 +
2880 + btrfs_destroy_delayed_inodes(fs_info);
2881 +- btrfs_assert_delayed_root_empty(fs_info);
2882 +
2883 + btrfs_destroy_marked_extents(fs_info, &cur_trans->dirty_pages,
2884 + EXTENT_DIRTY);
2885 +diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
2886 +index 4ea9dd93a545..dec508a28ffa 100644
2887 +--- a/fs/btrfs/inode.c
2888 ++++ b/fs/btrfs/inode.c
2889 +@@ -10348,6 +10348,7 @@ static int __btrfs_prealloc_file_range(struct inode *inode, int mode,
2890 + struct btrfs_root *root = BTRFS_I(inode)->root;
2891 + struct btrfs_key ins;
2892 + u64 cur_offset = start;
2893 ++ u64 clear_offset = start;
2894 + u64 i_size;
2895 + u64 cur_bytes;
2896 + u64 last_alloc = (u64)-1;
2897 +@@ -10382,6 +10383,15 @@ static int __btrfs_prealloc_file_range(struct inode *inode, int mode,
2898 + btrfs_end_transaction(trans);
2899 + break;
2900 + }
2901 ++
2902 ++ /*
2903 ++ * We've reserved this space, and thus converted it from
2904 ++ * ->bytes_may_use to ->bytes_reserved. Any error that happens
2905 ++ * from here on out we will only need to clear our reservation
2906 ++ * for the remaining unreserved area, so advance our
2907 ++ * clear_offset by our extent size.
2908 ++ */
2909 ++ clear_offset += ins.offset;
2910 + btrfs_dec_block_group_reservations(fs_info, ins.objectid);
2911 +
2912 + last_alloc = ins.offset;
2913 +@@ -10462,9 +10472,9 @@ next:
2914 + if (own_trans)
2915 + btrfs_end_transaction(trans);
2916 + }
2917 +- if (cur_offset < end)
2918 +- btrfs_free_reserved_data_space(inode, NULL, cur_offset,
2919 +- end - cur_offset + 1);
2920 ++ if (clear_offset < end)
2921 ++ btrfs_free_reserved_data_space(inode, NULL, clear_offset,
2922 ++ end - clear_offset + 1);
2923 + return ret;
2924 + }
2925 +
2926 +diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c
2927 +index 0c4ef208b8b9..0f6d53ec78ed 100644
2928 +--- a/fs/btrfs/ordered-data.c
2929 ++++ b/fs/btrfs/ordered-data.c
2930 +@@ -712,10 +712,15 @@ int btrfs_wait_ordered_range(struct inode *inode, u64 start, u64 len)
2931 + }
2932 + btrfs_start_ordered_extent(inode, ordered, 1);
2933 + end = ordered->file_offset;
2934 ++ /*
2935 ++ * If the ordered extent had an error save the error but don't
2936 ++ * exit without waiting first for all other ordered extents in
2937 ++ * the range to complete.
2938 ++ */
2939 + if (test_bit(BTRFS_ORDERED_IOERR, &ordered->flags))
2940 + ret = -EIO;
2941 + btrfs_put_ordered_extent(ordered);
2942 +- if (ret || end == 0 || end == start)
2943 ++ if (end == 0 || end == start)
2944 + break;
2945 + end--;
2946 + }
2947 +diff --git a/fs/ecryptfs/crypto.c b/fs/ecryptfs/crypto.c
2948 +index 708f931c36f1..8e5353bd72cf 100644
2949 +--- a/fs/ecryptfs/crypto.c
2950 ++++ b/fs/ecryptfs/crypto.c
2951 +@@ -325,8 +325,10 @@ static int crypt_scatterlist(struct ecryptfs_crypt_stat *crypt_stat,
2952 + struct extent_crypt_result ecr;
2953 + int rc = 0;
2954 +
2955 +- BUG_ON(!crypt_stat || !crypt_stat->tfm
2956 +- || !(crypt_stat->flags & ECRYPTFS_STRUCT_INITIALIZED));
2957 ++ if (!crypt_stat || !crypt_stat->tfm
2958 ++ || !(crypt_stat->flags & ECRYPTFS_STRUCT_INITIALIZED))
2959 ++ return -EINVAL;
2960 ++
2961 + if (unlikely(ecryptfs_verbosity > 0)) {
2962 + ecryptfs_printk(KERN_DEBUG, "Key size [%zd]; key:\n",
2963 + crypt_stat->key_size);
2964 +diff --git a/fs/ecryptfs/keystore.c b/fs/ecryptfs/keystore.c
2965 +index e74fe84d0886..250cb23ae69f 100644
2966 +--- a/fs/ecryptfs/keystore.c
2967 ++++ b/fs/ecryptfs/keystore.c
2968 +@@ -1318,7 +1318,7 @@ parse_tag_1_packet(struct ecryptfs_crypt_stat *crypt_stat,
2969 + printk(KERN_WARNING "Tag 1 packet contains key larger "
2970 + "than ECRYPTFS_MAX_ENCRYPTED_KEY_BYTES\n");
2971 + rc = -EINVAL;
2972 +- goto out;
2973 ++ goto out_free;
2974 + }
2975 + memcpy((*new_auth_tok)->session_key.encrypted_key,
2976 + &data[(*packet_size)], (body_size - (ECRYPTFS_SIG_SIZE + 2)));
2977 +diff --git a/fs/ecryptfs/messaging.c b/fs/ecryptfs/messaging.c
2978 +index 9fdd5bcf4564..aa3ddb48ebac 100644
2979 +--- a/fs/ecryptfs/messaging.c
2980 ++++ b/fs/ecryptfs/messaging.c
2981 +@@ -392,6 +392,7 @@ int __init ecryptfs_init_messaging(void)
2982 + * ecryptfs_message_buf_len),
2983 + GFP_KERNEL);
2984 + if (!ecryptfs_msg_ctx_arr) {
2985 ++ kfree(ecryptfs_daemon_hash);
2986 + rc = -ENOMEM;
2987 + goto out;
2988 + }
2989 +diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
2990 +index e5d6ee61ff48..f9645de9d04c 100644
2991 +--- a/fs/ext4/balloc.c
2992 ++++ b/fs/ext4/balloc.c
2993 +@@ -270,6 +270,7 @@ struct ext4_group_desc * ext4_get_group_desc(struct super_block *sb,
2994 + ext4_group_t ngroups = ext4_get_groups_count(sb);
2995 + struct ext4_group_desc *desc;
2996 + struct ext4_sb_info *sbi = EXT4_SB(sb);
2997 ++ struct buffer_head *bh_p;
2998 +
2999 + if (block_group >= ngroups) {
3000 + ext4_error(sb, "block_group >= groups_count - block_group = %u,"
3001 +@@ -280,7 +281,14 @@ struct ext4_group_desc * ext4_get_group_desc(struct super_block *sb,
3002 +
3003 + group_desc = block_group >> EXT4_DESC_PER_BLOCK_BITS(sb);
3004 + offset = block_group & (EXT4_DESC_PER_BLOCK(sb) - 1);
3005 +- if (!sbi->s_group_desc[group_desc]) {
3006 ++ bh_p = sbi_array_rcu_deref(sbi, s_group_desc, group_desc);
3007 ++ /*
3008 ++ * sbi_array_rcu_deref returns with rcu unlocked, this is ok since
3009 ++ * the pointer being dereferenced won't be dereferenced again. By
3010 ++ * looking at the usage in add_new_gdb() the value isn't modified,
3011 ++ * just the pointer, and so it remains valid.
3012 ++ */
3013 ++ if (!bh_p) {
3014 + ext4_error(sb, "Group descriptor not loaded - "
3015 + "block_group = %u, group_desc = %u, desc = %u",
3016 + block_group, group_desc, offset);
3017 +@@ -288,10 +296,10 @@ struct ext4_group_desc * ext4_get_group_desc(struct super_block *sb,
3018 + }
3019 +
3020 + desc = (struct ext4_group_desc *)(
3021 +- (__u8 *)sbi->s_group_desc[group_desc]->b_data +
3022 ++ (__u8 *)bh_p->b_data +
3023 + offset * EXT4_DESC_SIZE(sb));
3024 + if (bh)
3025 +- *bh = sbi->s_group_desc[group_desc];
3026 ++ *bh = bh_p;
3027 + return desc;
3028 + }
3029 +
3030 +diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
3031 +index 5c0e06645b1e..0a4461ac4225 100644
3032 +--- a/fs/ext4/ext4.h
3033 ++++ b/fs/ext4/ext4.h
3034 +@@ -1372,7 +1372,7 @@ struct ext4_sb_info {
3035 + loff_t s_bitmap_maxbytes; /* max bytes for bitmap files */
3036 + struct buffer_head * s_sbh; /* Buffer containing the super block */
3037 + struct ext4_super_block *s_es; /* Pointer to the super block in the buffer */
3038 +- struct buffer_head **s_group_desc;
3039 ++ struct buffer_head * __rcu *s_group_desc;
3040 + unsigned int s_mount_opt;
3041 + unsigned int s_mount_opt2;
3042 + unsigned int s_mount_flags;
3043 +@@ -1430,7 +1430,7 @@ struct ext4_sb_info {
3044 + #endif
3045 +
3046 + /* for buddy allocator */
3047 +- struct ext4_group_info ***s_group_info;
3048 ++ struct ext4_group_info ** __rcu *s_group_info;
3049 + struct inode *s_buddy_cache;
3050 + spinlock_t s_md_lock;
3051 + unsigned short *s_mb_offsets;
3052 +@@ -1480,7 +1480,7 @@ struct ext4_sb_info {
3053 + unsigned int s_extent_max_zeroout_kb;
3054 +
3055 + unsigned int s_log_groups_per_flex;
3056 +- struct flex_groups *s_flex_groups;
3057 ++ struct flex_groups * __rcu *s_flex_groups;
3058 + ext4_group_t s_flex_groups_allocated;
3059 +
3060 + /* workqueue for reserved extent conversions (buffered io) */
3061 +@@ -1520,8 +1520,11 @@ struct ext4_sb_info {
3062 + struct ratelimit_state s_warning_ratelimit_state;
3063 + struct ratelimit_state s_msg_ratelimit_state;
3064 +
3065 +- /* Barrier between changing inodes' journal flags and writepages ops. */
3066 +- struct percpu_rw_semaphore s_journal_flag_rwsem;
3067 ++ /*
3068 ++ * Barrier between writepages ops and changing any inode's JOURNAL_DATA
3069 ++ * or EXTENTS flag.
3070 ++ */
3071 ++ struct percpu_rw_semaphore s_writepages_rwsem;
3072 + struct dax_device *s_daxdev;
3073 + };
3074 +
3075 +@@ -1541,6 +1544,23 @@ static inline int ext4_valid_inum(struct super_block *sb, unsigned long ino)
3076 + ino <= le32_to_cpu(EXT4_SB(sb)->s_es->s_inodes_count));
3077 + }
3078 +
3079 ++/*
3080 ++ * Returns: sbi->field[index]
3081 ++ * Used to access an array element from the following sbi fields which require
3082 ++ * rcu protection to avoid dereferencing an invalid pointer due to reassignment
3083 ++ * - s_group_desc
3084 ++ * - s_group_info
3085 ++ * - s_flex_group
3086 ++ */
3087 ++#define sbi_array_rcu_deref(sbi, field, index) \
3088 ++({ \
3089 ++ typeof(*((sbi)->field)) _v; \
3090 ++ rcu_read_lock(); \
3091 ++ _v = ((typeof(_v)*)rcu_dereference((sbi)->field))[index]; \
3092 ++ rcu_read_unlock(); \
3093 ++ _v; \
3094 ++})
3095 ++
3096 + /*
3097 + * Inode dynamic state flags
3098 + */
3099 +@@ -2564,6 +2584,7 @@ extern int ext4_generic_delete_entry(handle_t *handle,
3100 + extern bool ext4_empty_dir(struct inode *inode);
3101 +
3102 + /* resize.c */
3103 ++extern void ext4_kvfree_array_rcu(void *to_free);
3104 + extern int ext4_group_add(struct super_block *sb,
3105 + struct ext4_new_group_data *input);
3106 + extern int ext4_group_extend(struct super_block *sb,
3107 +@@ -2811,13 +2832,13 @@ static inline
3108 + struct ext4_group_info *ext4_get_group_info(struct super_block *sb,
3109 + ext4_group_t group)
3110 + {
3111 +- struct ext4_group_info ***grp_info;
3112 ++ struct ext4_group_info **grp_info;
3113 + long indexv, indexh;
3114 + BUG_ON(group >= EXT4_SB(sb)->s_groups_count);
3115 +- grp_info = EXT4_SB(sb)->s_group_info;
3116 + indexv = group >> (EXT4_DESC_PER_BLOCK_BITS(sb));
3117 + indexh = group & ((EXT4_DESC_PER_BLOCK(sb)) - 1);
3118 +- return grp_info[indexv][indexh];
3119 ++ grp_info = sbi_array_rcu_deref(EXT4_SB(sb), s_group_info, indexv);
3120 ++ return grp_info[indexh];
3121 + }
3122 +
3123 + /*
3124 +@@ -2867,7 +2888,7 @@ static inline void ext4_update_i_disksize(struct inode *inode, loff_t newsize)
3125 + !inode_is_locked(inode));
3126 + down_write(&EXT4_I(inode)->i_data_sem);
3127 + if (newsize > EXT4_I(inode)->i_disksize)
3128 +- EXT4_I(inode)->i_disksize = newsize;
3129 ++ WRITE_ONCE(EXT4_I(inode)->i_disksize, newsize);
3130 + up_write(&EXT4_I(inode)->i_data_sem);
3131 + }
3132 +
3133 +diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c
3134 +index 091a18a51c99..dafa7e4aaecb 100644
3135 +--- a/fs/ext4/ialloc.c
3136 ++++ b/fs/ext4/ialloc.c
3137 +@@ -330,11 +330,13 @@ void ext4_free_inode(handle_t *handle, struct inode *inode)
3138 +
3139 + percpu_counter_inc(&sbi->s_freeinodes_counter);
3140 + if (sbi->s_log_groups_per_flex) {
3141 +- ext4_group_t f = ext4_flex_group(sbi, block_group);
3142 ++ struct flex_groups *fg;
3143 +
3144 +- atomic_inc(&sbi->s_flex_groups[f].free_inodes);
3145 ++ fg = sbi_array_rcu_deref(sbi, s_flex_groups,
3146 ++ ext4_flex_group(sbi, block_group));
3147 ++ atomic_inc(&fg->free_inodes);
3148 + if (is_directory)
3149 +- atomic_dec(&sbi->s_flex_groups[f].used_dirs);
3150 ++ atomic_dec(&fg->used_dirs);
3151 + }
3152 + BUFFER_TRACE(bh2, "call ext4_handle_dirty_metadata");
3153 + fatal = ext4_handle_dirty_metadata(handle, NULL, bh2);
3154 +@@ -370,12 +372,13 @@ static void get_orlov_stats(struct super_block *sb, ext4_group_t g,
3155 + int flex_size, struct orlov_stats *stats)
3156 + {
3157 + struct ext4_group_desc *desc;
3158 +- struct flex_groups *flex_group = EXT4_SB(sb)->s_flex_groups;
3159 +
3160 + if (flex_size > 1) {
3161 +- stats->free_inodes = atomic_read(&flex_group[g].free_inodes);
3162 +- stats->free_clusters = atomic64_read(&flex_group[g].free_clusters);
3163 +- stats->used_dirs = atomic_read(&flex_group[g].used_dirs);
3164 ++ struct flex_groups *fg = sbi_array_rcu_deref(EXT4_SB(sb),
3165 ++ s_flex_groups, g);
3166 ++ stats->free_inodes = atomic_read(&fg->free_inodes);
3167 ++ stats->free_clusters = atomic64_read(&fg->free_clusters);
3168 ++ stats->used_dirs = atomic_read(&fg->used_dirs);
3169 + return;
3170 + }
3171 +
3172 +@@ -1056,7 +1059,8 @@ got:
3173 + if (sbi->s_log_groups_per_flex) {
3174 + ext4_group_t f = ext4_flex_group(sbi, group);
3175 +
3176 +- atomic_inc(&sbi->s_flex_groups[f].used_dirs);
3177 ++ atomic_inc(&sbi_array_rcu_deref(sbi, s_flex_groups,
3178 ++ f)->used_dirs);
3179 + }
3180 + }
3181 + if (ext4_has_group_desc_csum(sb)) {
3182 +@@ -1079,7 +1083,8 @@ got:
3183 +
3184 + if (sbi->s_log_groups_per_flex) {
3185 + flex_group = ext4_flex_group(sbi, group);
3186 +- atomic_dec(&sbi->s_flex_groups[flex_group].free_inodes);
3187 ++ atomic_dec(&sbi_array_rcu_deref(sbi, s_flex_groups,
3188 ++ flex_group)->free_inodes);
3189 + }
3190 +
3191 + inode->i_ino = ino + group * EXT4_INODES_PER_GROUP(sb);
3192 +diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
3193 +index 8e535bb34d5f..23b4b1745a39 100644
3194 +--- a/fs/ext4/inode.c
3195 ++++ b/fs/ext4/inode.c
3196 +@@ -2569,7 +2569,7 @@ update_disksize:
3197 + * truncate are avoided by checking i_size under i_data_sem.
3198 + */
3199 + disksize = ((loff_t)mpd->first_page) << PAGE_SHIFT;
3200 +- if (disksize > EXT4_I(inode)->i_disksize) {
3201 ++ if (disksize > READ_ONCE(EXT4_I(inode)->i_disksize)) {
3202 + int err2;
3203 + loff_t i_size;
3204 +
3205 +@@ -2730,7 +2730,7 @@ static int ext4_writepages(struct address_space *mapping,
3206 + if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
3207 + return -EIO;
3208 +
3209 +- percpu_down_read(&sbi->s_journal_flag_rwsem);
3210 ++ percpu_down_read(&sbi->s_writepages_rwsem);
3211 + trace_ext4_writepages(inode, wbc);
3212 +
3213 + /*
3214 +@@ -2950,7 +2950,7 @@ unplug:
3215 + out_writepages:
3216 + trace_ext4_writepages_result(inode, wbc, ret,
3217 + nr_to_write - wbc->nr_to_write);
3218 +- percpu_up_read(&sbi->s_journal_flag_rwsem);
3219 ++ percpu_up_read(&sbi->s_writepages_rwsem);
3220 + return ret;
3221 + }
3222 +
3223 +@@ -2965,13 +2965,13 @@ static int ext4_dax_writepages(struct address_space *mapping,
3224 + if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
3225 + return -EIO;
3226 +
3227 +- percpu_down_read(&sbi->s_journal_flag_rwsem);
3228 ++ percpu_down_read(&sbi->s_writepages_rwsem);
3229 + trace_ext4_writepages(inode, wbc);
3230 +
3231 + ret = dax_writeback_mapping_range(mapping, inode->i_sb->s_bdev, wbc);
3232 + trace_ext4_writepages_result(inode, wbc, ret,
3233 + nr_to_write - wbc->nr_to_write);
3234 +- percpu_up_read(&sbi->s_journal_flag_rwsem);
3235 ++ percpu_up_read(&sbi->s_writepages_rwsem);
3236 + return ret;
3237 + }
3238 +
3239 +@@ -6207,7 +6207,7 @@ int ext4_change_inode_journal_flag(struct inode *inode, int val)
3240 + }
3241 + }
3242 +
3243 +- percpu_down_write(&sbi->s_journal_flag_rwsem);
3244 ++ percpu_down_write(&sbi->s_writepages_rwsem);
3245 + jbd2_journal_lock_updates(journal);
3246 +
3247 + /*
3248 +@@ -6224,7 +6224,7 @@ int ext4_change_inode_journal_flag(struct inode *inode, int val)
3249 + err = jbd2_journal_flush(journal);
3250 + if (err < 0) {
3251 + jbd2_journal_unlock_updates(journal);
3252 +- percpu_up_write(&sbi->s_journal_flag_rwsem);
3253 ++ percpu_up_write(&sbi->s_writepages_rwsem);
3254 + return err;
3255 + }
3256 + ext4_clear_inode_flag(inode, EXT4_INODE_JOURNAL_DATA);
3257 +@@ -6232,7 +6232,7 @@ int ext4_change_inode_journal_flag(struct inode *inode, int val)
3258 + ext4_set_aops(inode);
3259 +
3260 + jbd2_journal_unlock_updates(journal);
3261 +- percpu_up_write(&sbi->s_journal_flag_rwsem);
3262 ++ percpu_up_write(&sbi->s_writepages_rwsem);
3263 +
3264 + if (val)
3265 + up_write(&EXT4_I(inode)->i_mmap_sem);
3266 +diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
3267 +index cc229f3357f7..71121fcf9e8c 100644
3268 +--- a/fs/ext4/mballoc.c
3269 ++++ b/fs/ext4/mballoc.c
3270 +@@ -2356,7 +2356,7 @@ int ext4_mb_alloc_groupinfo(struct super_block *sb, ext4_group_t ngroups)
3271 + {
3272 + struct ext4_sb_info *sbi = EXT4_SB(sb);
3273 + unsigned size;
3274 +- struct ext4_group_info ***new_groupinfo;
3275 ++ struct ext4_group_info ***old_groupinfo, ***new_groupinfo;
3276 +
3277 + size = (ngroups + EXT4_DESC_PER_BLOCK(sb) - 1) >>
3278 + EXT4_DESC_PER_BLOCK_BITS(sb);
3279 +@@ -2369,13 +2369,16 @@ int ext4_mb_alloc_groupinfo(struct super_block *sb, ext4_group_t ngroups)
3280 + ext4_msg(sb, KERN_ERR, "can't allocate buddy meta group");
3281 + return -ENOMEM;
3282 + }
3283 +- if (sbi->s_group_info) {
3284 +- memcpy(new_groupinfo, sbi->s_group_info,
3285 ++ rcu_read_lock();
3286 ++ old_groupinfo = rcu_dereference(sbi->s_group_info);
3287 ++ if (old_groupinfo)
3288 ++ memcpy(new_groupinfo, old_groupinfo,
3289 + sbi->s_group_info_size * sizeof(*sbi->s_group_info));
3290 +- kvfree(sbi->s_group_info);
3291 +- }
3292 +- sbi->s_group_info = new_groupinfo;
3293 ++ rcu_read_unlock();
3294 ++ rcu_assign_pointer(sbi->s_group_info, new_groupinfo);
3295 + sbi->s_group_info_size = size / sizeof(*sbi->s_group_info);
3296 ++ if (old_groupinfo)
3297 ++ ext4_kvfree_array_rcu(old_groupinfo);
3298 + ext4_debug("allocated s_groupinfo array for %d meta_bg's\n",
3299 + sbi->s_group_info_size);
3300 + return 0;
3301 +@@ -2387,6 +2390,7 @@ int ext4_mb_add_groupinfo(struct super_block *sb, ext4_group_t group,
3302 + {
3303 + int i;
3304 + int metalen = 0;
3305 ++ int idx = group >> EXT4_DESC_PER_BLOCK_BITS(sb);
3306 + struct ext4_sb_info *sbi = EXT4_SB(sb);
3307 + struct ext4_group_info **meta_group_info;
3308 + struct kmem_cache *cachep = get_groupinfo_cache(sb->s_blocksize_bits);
3309 +@@ -2405,12 +2409,12 @@ int ext4_mb_add_groupinfo(struct super_block *sb, ext4_group_t group,
3310 + "for a buddy group");
3311 + goto exit_meta_group_info;
3312 + }
3313 +- sbi->s_group_info[group >> EXT4_DESC_PER_BLOCK_BITS(sb)] =
3314 +- meta_group_info;
3315 ++ rcu_read_lock();
3316 ++ rcu_dereference(sbi->s_group_info)[idx] = meta_group_info;
3317 ++ rcu_read_unlock();
3318 + }
3319 +
3320 +- meta_group_info =
3321 +- sbi->s_group_info[group >> EXT4_DESC_PER_BLOCK_BITS(sb)];
3322 ++ meta_group_info = sbi_array_rcu_deref(sbi, s_group_info, idx);
3323 + i = group & (EXT4_DESC_PER_BLOCK(sb) - 1);
3324 +
3325 + meta_group_info[i] = kmem_cache_zalloc(cachep, GFP_NOFS);
3326 +@@ -2458,8 +2462,13 @@ int ext4_mb_add_groupinfo(struct super_block *sb, ext4_group_t group,
3327 + exit_group_info:
3328 + /* If a meta_group_info table has been allocated, release it now */
3329 + if (group % EXT4_DESC_PER_BLOCK(sb) == 0) {
3330 +- kfree(sbi->s_group_info[group >> EXT4_DESC_PER_BLOCK_BITS(sb)]);
3331 +- sbi->s_group_info[group >> EXT4_DESC_PER_BLOCK_BITS(sb)] = NULL;
3332 ++ struct ext4_group_info ***group_info;
3333 ++
3334 ++ rcu_read_lock();
3335 ++ group_info = rcu_dereference(sbi->s_group_info);
3336 ++ kfree(group_info[idx]);
3337 ++ group_info[idx] = NULL;
3338 ++ rcu_read_unlock();
3339 + }
3340 + exit_meta_group_info:
3341 + return -ENOMEM;
3342 +@@ -2472,6 +2481,7 @@ static int ext4_mb_init_backend(struct super_block *sb)
3343 + struct ext4_sb_info *sbi = EXT4_SB(sb);
3344 + int err;
3345 + struct ext4_group_desc *desc;
3346 ++ struct ext4_group_info ***group_info;
3347 + struct kmem_cache *cachep;
3348 +
3349 + err = ext4_mb_alloc_groupinfo(sb, ngroups);
3350 +@@ -2506,11 +2516,16 @@ err_freebuddy:
3351 + while (i-- > 0)
3352 + kmem_cache_free(cachep, ext4_get_group_info(sb, i));
3353 + i = sbi->s_group_info_size;
3354 ++ rcu_read_lock();
3355 ++ group_info = rcu_dereference(sbi->s_group_info);
3356 + while (i-- > 0)
3357 +- kfree(sbi->s_group_info[i]);
3358 ++ kfree(group_info[i]);
3359 ++ rcu_read_unlock();
3360 + iput(sbi->s_buddy_cache);
3361 + err_freesgi:
3362 +- kvfree(sbi->s_group_info);
3363 ++ rcu_read_lock();
3364 ++ kvfree(rcu_dereference(sbi->s_group_info));
3365 ++ rcu_read_unlock();
3366 + return -ENOMEM;
3367 + }
3368 +
3369 +@@ -2699,7 +2714,7 @@ int ext4_mb_release(struct super_block *sb)
3370 + ext4_group_t ngroups = ext4_get_groups_count(sb);
3371 + ext4_group_t i;
3372 + int num_meta_group_infos;
3373 +- struct ext4_group_info *grinfo;
3374 ++ struct ext4_group_info *grinfo, ***group_info;
3375 + struct ext4_sb_info *sbi = EXT4_SB(sb);
3376 + struct kmem_cache *cachep = get_groupinfo_cache(sb->s_blocksize_bits);
3377 +
3378 +@@ -2717,9 +2732,12 @@ int ext4_mb_release(struct super_block *sb)
3379 + num_meta_group_infos = (ngroups +
3380 + EXT4_DESC_PER_BLOCK(sb) - 1) >>
3381 + EXT4_DESC_PER_BLOCK_BITS(sb);
3382 ++ rcu_read_lock();
3383 ++ group_info = rcu_dereference(sbi->s_group_info);
3384 + for (i = 0; i < num_meta_group_infos; i++)
3385 +- kfree(sbi->s_group_info[i]);
3386 +- kvfree(sbi->s_group_info);
3387 ++ kfree(group_info[i]);
3388 ++ kvfree(group_info);
3389 ++ rcu_read_unlock();
3390 + }
3391 + kfree(sbi->s_mb_offsets);
3392 + kfree(sbi->s_mb_maxs);
3393 +@@ -3018,7 +3036,8 @@ ext4_mb_mark_diskspace_used(struct ext4_allocation_context *ac,
3394 + ext4_group_t flex_group = ext4_flex_group(sbi,
3395 + ac->ac_b_ex.fe_group);
3396 + atomic64_sub(ac->ac_b_ex.fe_len,
3397 +- &sbi->s_flex_groups[flex_group].free_clusters);
3398 ++ &sbi_array_rcu_deref(sbi, s_flex_groups,
3399 ++ flex_group)->free_clusters);
3400 + }
3401 +
3402 + err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh);
3403 +@@ -4912,7 +4931,8 @@ do_more:
3404 + if (sbi->s_log_groups_per_flex) {
3405 + ext4_group_t flex_group = ext4_flex_group(sbi, block_group);
3406 + atomic64_add(count_clusters,
3407 +- &sbi->s_flex_groups[flex_group].free_clusters);
3408 ++ &sbi_array_rcu_deref(sbi, s_flex_groups,
3409 ++ flex_group)->free_clusters);
3410 + }
3411 +
3412 + if (!(flags & EXT4_FREE_BLOCKS_NO_QUOT_UPDATE))
3413 +@@ -5061,7 +5081,8 @@ int ext4_group_add_blocks(handle_t *handle, struct super_block *sb,
3414 + if (sbi->s_log_groups_per_flex) {
3415 + ext4_group_t flex_group = ext4_flex_group(sbi, block_group);
3416 + atomic64_add(clusters_freed,
3417 +- &sbi->s_flex_groups[flex_group].free_clusters);
3418 ++ &sbi_array_rcu_deref(sbi, s_flex_groups,
3419 ++ flex_group)->free_clusters);
3420 + }
3421 +
3422 + ext4_mb_unload_buddy(&e4b);
3423 +diff --git a/fs/ext4/migrate.c b/fs/ext4/migrate.c
3424 +index a98bfca9c463..bec4ad787c7d 100644
3425 +--- a/fs/ext4/migrate.c
3426 ++++ b/fs/ext4/migrate.c
3427 +@@ -427,6 +427,7 @@ static int free_ext_block(handle_t *handle, struct inode *inode)
3428 +
3429 + int ext4_ext_migrate(struct inode *inode)
3430 + {
3431 ++ struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
3432 + handle_t *handle;
3433 + int retval = 0, i;
3434 + __le32 *i_data;
3435 +@@ -451,6 +452,8 @@ int ext4_ext_migrate(struct inode *inode)
3436 + */
3437 + return retval;
3438 +
3439 ++ percpu_down_write(&sbi->s_writepages_rwsem);
3440 ++
3441 + /*
3442 + * Worst case we can touch the allocation bitmaps, a bgd
3443 + * block, and a block to link in the orphan list. We do need
3444 +@@ -461,7 +464,7 @@ int ext4_ext_migrate(struct inode *inode)
3445 +
3446 + if (IS_ERR(handle)) {
3447 + retval = PTR_ERR(handle);
3448 +- return retval;
3449 ++ goto out_unlock;
3450 + }
3451 + goal = (((inode->i_ino - 1) / EXT4_INODES_PER_GROUP(inode->i_sb)) *
3452 + EXT4_INODES_PER_GROUP(inode->i_sb)) + 1;
3453 +@@ -472,7 +475,7 @@ int ext4_ext_migrate(struct inode *inode)
3454 + if (IS_ERR(tmp_inode)) {
3455 + retval = PTR_ERR(tmp_inode);
3456 + ext4_journal_stop(handle);
3457 +- return retval;
3458 ++ goto out_unlock;
3459 + }
3460 + i_size_write(tmp_inode, i_size_read(inode));
3461 + /*
3462 +@@ -514,7 +517,7 @@ int ext4_ext_migrate(struct inode *inode)
3463 + */
3464 + ext4_orphan_del(NULL, tmp_inode);
3465 + retval = PTR_ERR(handle);
3466 +- goto out;
3467 ++ goto out_tmp_inode;
3468 + }
3469 +
3470 + ei = EXT4_I(inode);
3471 +@@ -595,10 +598,11 @@ err_out:
3472 + /* Reset the extent details */
3473 + ext4_ext_tree_init(handle, tmp_inode);
3474 + ext4_journal_stop(handle);
3475 +-out:
3476 ++out_tmp_inode:
3477 + unlock_new_inode(tmp_inode);
3478 + iput(tmp_inode);
3479 +-
3480 ++out_unlock:
3481 ++ percpu_up_write(&sbi->s_writepages_rwsem);
3482 + return retval;
3483 + }
3484 +
3485 +@@ -608,7 +612,8 @@ out:
3486 + int ext4_ind_migrate(struct inode *inode)
3487 + {
3488 + struct ext4_extent_header *eh;
3489 +- struct ext4_super_block *es = EXT4_SB(inode->i_sb)->s_es;
3490 ++ struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
3491 ++ struct ext4_super_block *es = sbi->s_es;
3492 + struct ext4_inode_info *ei = EXT4_I(inode);
3493 + struct ext4_extent *ex;
3494 + unsigned int i, len;
3495 +@@ -632,9 +637,13 @@ int ext4_ind_migrate(struct inode *inode)
3496 + if (test_opt(inode->i_sb, DELALLOC))
3497 + ext4_alloc_da_blocks(inode);
3498 +
3499 ++ percpu_down_write(&sbi->s_writepages_rwsem);
3500 ++
3501 + handle = ext4_journal_start(inode, EXT4_HT_MIGRATE, 1);
3502 +- if (IS_ERR(handle))
3503 +- return PTR_ERR(handle);
3504 ++ if (IS_ERR(handle)) {
3505 ++ ret = PTR_ERR(handle);
3506 ++ goto out_unlock;
3507 ++ }
3508 +
3509 + down_write(&EXT4_I(inode)->i_data_sem);
3510 + ret = ext4_ext_check_inode(inode);
3511 +@@ -669,5 +678,7 @@ int ext4_ind_migrate(struct inode *inode)
3512 + errout:
3513 + ext4_journal_stop(handle);
3514 + up_write(&EXT4_I(inode)->i_data_sem);
3515 ++out_unlock:
3516 ++ percpu_up_write(&sbi->s_writepages_rwsem);
3517 + return ret;
3518 + }
3519 +diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
3520 +index 4608d0d3b7f9..a8f2e3549bb9 100644
3521 +--- a/fs/ext4/namei.c
3522 ++++ b/fs/ext4/namei.c
3523 +@@ -1431,6 +1431,7 @@ restart:
3524 + /*
3525 + * We deal with the read-ahead logic here.
3526 + */
3527 ++ cond_resched();
3528 + if (ra_ptr >= ra_max) {
3529 + /* Refill the readahead buffer */
3530 + ra_ptr = 0;
3531 +diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c
3532 +index 4d5c0fc9d23a..ef552d93708e 100644
3533 +--- a/fs/ext4/resize.c
3534 ++++ b/fs/ext4/resize.c
3535 +@@ -17,6 +17,33 @@
3536 +
3537 + #include "ext4_jbd2.h"
3538 +
3539 ++struct ext4_rcu_ptr {
3540 ++ struct rcu_head rcu;
3541 ++ void *ptr;
3542 ++};
3543 ++
3544 ++static void ext4_rcu_ptr_callback(struct rcu_head *head)
3545 ++{
3546 ++ struct ext4_rcu_ptr *ptr;
3547 ++
3548 ++ ptr = container_of(head, struct ext4_rcu_ptr, rcu);
3549 ++ kvfree(ptr->ptr);
3550 ++ kfree(ptr);
3551 ++}
3552 ++
3553 ++void ext4_kvfree_array_rcu(void *to_free)
3554 ++{
3555 ++ struct ext4_rcu_ptr *ptr = kzalloc(sizeof(*ptr), GFP_KERNEL);
3556 ++
3557 ++ if (ptr) {
3558 ++ ptr->ptr = to_free;
3559 ++ call_rcu(&ptr->rcu, ext4_rcu_ptr_callback);
3560 ++ return;
3561 ++ }
3562 ++ synchronize_rcu();
3563 ++ kvfree(to_free);
3564 ++}
3565 ++
3566 + int ext4_resize_begin(struct super_block *sb)
3567 + {
3568 + struct ext4_sb_info *sbi = EXT4_SB(sb);
3569 +@@ -560,8 +587,8 @@ static int setup_new_flex_group_blocks(struct super_block *sb,
3570 + brelse(gdb);
3571 + goto out;
3572 + }
3573 +- memcpy(gdb->b_data, sbi->s_group_desc[j]->b_data,
3574 +- gdb->b_size);
3575 ++ memcpy(gdb->b_data, sbi_array_rcu_deref(sbi,
3576 ++ s_group_desc, j)->b_data, gdb->b_size);
3577 + set_buffer_uptodate(gdb);
3578 +
3579 + err = ext4_handle_dirty_metadata(handle, NULL, gdb);
3580 +@@ -879,13 +906,15 @@ static int add_new_gdb(handle_t *handle, struct inode *inode,
3581 + }
3582 + brelse(dind);
3583 +
3584 +- o_group_desc = EXT4_SB(sb)->s_group_desc;
3585 ++ rcu_read_lock();
3586 ++ o_group_desc = rcu_dereference(EXT4_SB(sb)->s_group_desc);
3587 + memcpy(n_group_desc, o_group_desc,
3588 + EXT4_SB(sb)->s_gdb_count * sizeof(struct buffer_head *));
3589 ++ rcu_read_unlock();
3590 + n_group_desc[gdb_num] = gdb_bh;
3591 +- EXT4_SB(sb)->s_group_desc = n_group_desc;
3592 ++ rcu_assign_pointer(EXT4_SB(sb)->s_group_desc, n_group_desc);
3593 + EXT4_SB(sb)->s_gdb_count++;
3594 +- kvfree(o_group_desc);
3595 ++ ext4_kvfree_array_rcu(o_group_desc);
3596 +
3597 + le16_add_cpu(&es->s_reserved_gdt_blocks, -1);
3598 + err = ext4_handle_dirty_super(handle, sb);
3599 +@@ -929,9 +958,11 @@ static int add_new_gdb_meta_bg(struct super_block *sb,
3600 + return err;
3601 + }
3602 +
3603 +- o_group_desc = EXT4_SB(sb)->s_group_desc;
3604 ++ rcu_read_lock();
3605 ++ o_group_desc = rcu_dereference(EXT4_SB(sb)->s_group_desc);
3606 + memcpy(n_group_desc, o_group_desc,
3607 + EXT4_SB(sb)->s_gdb_count * sizeof(struct buffer_head *));
3608 ++ rcu_read_unlock();
3609 + n_group_desc[gdb_num] = gdb_bh;
3610 +
3611 + BUFFER_TRACE(gdb_bh, "get_write_access");
3612 +@@ -942,9 +973,9 @@ static int add_new_gdb_meta_bg(struct super_block *sb,
3613 + return err;
3614 + }
3615 +
3616 +- EXT4_SB(sb)->s_group_desc = n_group_desc;
3617 ++ rcu_assign_pointer(EXT4_SB(sb)->s_group_desc, n_group_desc);
3618 + EXT4_SB(sb)->s_gdb_count++;
3619 +- kvfree(o_group_desc);
3620 ++ ext4_kvfree_array_rcu(o_group_desc);
3621 + return err;
3622 + }
3623 +
3624 +@@ -1210,7 +1241,8 @@ static int ext4_add_new_descs(handle_t *handle, struct super_block *sb,
3625 + * use non-sparse filesystems anymore. This is already checked above.
3626 + */
3627 + if (gdb_off) {
3628 +- gdb_bh = sbi->s_group_desc[gdb_num];
3629 ++ gdb_bh = sbi_array_rcu_deref(sbi, s_group_desc,
3630 ++ gdb_num);
3631 + BUFFER_TRACE(gdb_bh, "get_write_access");
3632 + err = ext4_journal_get_write_access(handle, gdb_bh);
3633 +
3634 +@@ -1292,7 +1324,7 @@ static int ext4_setup_new_descs(handle_t *handle, struct super_block *sb,
3635 + /*
3636 + * get_write_access() has been called on gdb_bh by ext4_add_new_desc().
3637 + */
3638 +- gdb_bh = sbi->s_group_desc[gdb_num];
3639 ++ gdb_bh = sbi_array_rcu_deref(sbi, s_group_desc, gdb_num);
3640 + /* Update group descriptor block for new group */
3641 + gdp = (struct ext4_group_desc *)(gdb_bh->b_data +
3642 + gdb_off * EXT4_DESC_SIZE(sb));
3643 +@@ -1420,11 +1452,14 @@ static void ext4_update_super(struct super_block *sb,
3644 + percpu_counter_read(&sbi->s_freeclusters_counter));
3645 + if (ext4_has_feature_flex_bg(sb) && sbi->s_log_groups_per_flex) {
3646 + ext4_group_t flex_group;
3647 ++ struct flex_groups *fg;
3648 ++
3649 + flex_group = ext4_flex_group(sbi, group_data[0].group);
3650 ++ fg = sbi_array_rcu_deref(sbi, s_flex_groups, flex_group);
3651 + atomic64_add(EXT4_NUM_B2C(sbi, free_blocks),
3652 +- &sbi->s_flex_groups[flex_group].free_clusters);
3653 ++ &fg->free_clusters);
3654 + atomic_add(EXT4_INODES_PER_GROUP(sb) * flex_gd->count,
3655 +- &sbi->s_flex_groups[flex_group].free_inodes);
3656 ++ &fg->free_inodes);
3657 + }
3658 +
3659 + /*
3660 +@@ -1519,7 +1554,8 @@ exit_journal:
3661 + for (; gdb_num <= gdb_num_end; gdb_num++) {
3662 + struct buffer_head *gdb_bh;
3663 +
3664 +- gdb_bh = sbi->s_group_desc[gdb_num];
3665 ++ gdb_bh = sbi_array_rcu_deref(sbi, s_group_desc,
3666 ++ gdb_num);
3667 + if (old_gdb == gdb_bh->b_blocknr)
3668 + continue;
3669 + update_backups(sb, gdb_bh->b_blocknr, gdb_bh->b_data,
3670 +diff --git a/fs/ext4/super.c b/fs/ext4/super.c
3671 +index e080e90178a0..cb797489b2d8 100644
3672 +--- a/fs/ext4/super.c
3673 ++++ b/fs/ext4/super.c
3674 +@@ -969,6 +969,8 @@ static void ext4_put_super(struct super_block *sb)
3675 + {
3676 + struct ext4_sb_info *sbi = EXT4_SB(sb);
3677 + struct ext4_super_block *es = sbi->s_es;
3678 ++ struct buffer_head **group_desc;
3679 ++ struct flex_groups **flex_groups;
3680 + int aborted = 0;
3681 + int i, err;
3682 +
3683 +@@ -999,15 +1001,23 @@ static void ext4_put_super(struct super_block *sb)
3684 + if (!sb_rdonly(sb))
3685 + ext4_commit_super(sb, 1);
3686 +
3687 ++ rcu_read_lock();
3688 ++ group_desc = rcu_dereference(sbi->s_group_desc);
3689 + for (i = 0; i < sbi->s_gdb_count; i++)
3690 +- brelse(sbi->s_group_desc[i]);
3691 +- kvfree(sbi->s_group_desc);
3692 +- kvfree(sbi->s_flex_groups);
3693 ++ brelse(group_desc[i]);
3694 ++ kvfree(group_desc);
3695 ++ flex_groups = rcu_dereference(sbi->s_flex_groups);
3696 ++ if (flex_groups) {
3697 ++ for (i = 0; i < sbi->s_flex_groups_allocated; i++)
3698 ++ kvfree(flex_groups[i]);
3699 ++ kvfree(flex_groups);
3700 ++ }
3701 ++ rcu_read_unlock();
3702 + percpu_counter_destroy(&sbi->s_freeclusters_counter);
3703 + percpu_counter_destroy(&sbi->s_freeinodes_counter);
3704 + percpu_counter_destroy(&sbi->s_dirs_counter);
3705 + percpu_counter_destroy(&sbi->s_dirtyclusters_counter);
3706 +- percpu_free_rwsem(&sbi->s_journal_flag_rwsem);
3707 ++ percpu_free_rwsem(&sbi->s_writepages_rwsem);
3708 + #ifdef CONFIG_QUOTA
3709 + for (i = 0; i < EXT4_MAXQUOTAS; i++)
3710 + kfree(get_qf_name(sb, sbi, i));
3711 +@@ -2287,8 +2297,8 @@ done:
3712 + int ext4_alloc_flex_bg_array(struct super_block *sb, ext4_group_t ngroup)
3713 + {
3714 + struct ext4_sb_info *sbi = EXT4_SB(sb);
3715 +- struct flex_groups *new_groups;
3716 +- int size;
3717 ++ struct flex_groups **old_groups, **new_groups;
3718 ++ int size, i;
3719 +
3720 + if (!sbi->s_log_groups_per_flex)
3721 + return 0;
3722 +@@ -2297,22 +2307,37 @@ int ext4_alloc_flex_bg_array(struct super_block *sb, ext4_group_t ngroup)
3723 + if (size <= sbi->s_flex_groups_allocated)
3724 + return 0;
3725 +
3726 +- size = roundup_pow_of_two(size * sizeof(struct flex_groups));
3727 +- new_groups = kvzalloc(size, GFP_KERNEL);
3728 ++ new_groups = kvzalloc(roundup_pow_of_two(size *
3729 ++ sizeof(*sbi->s_flex_groups)), GFP_KERNEL);
3730 + if (!new_groups) {
3731 +- ext4_msg(sb, KERN_ERR, "not enough memory for %d flex groups",
3732 +- size / (int) sizeof(struct flex_groups));
3733 ++ ext4_msg(sb, KERN_ERR,
3734 ++ "not enough memory for %d flex group pointers", size);
3735 + return -ENOMEM;
3736 + }
3737 +-
3738 +- if (sbi->s_flex_groups) {
3739 +- memcpy(new_groups, sbi->s_flex_groups,
3740 +- (sbi->s_flex_groups_allocated *
3741 +- sizeof(struct flex_groups)));
3742 +- kvfree(sbi->s_flex_groups);
3743 ++ for (i = sbi->s_flex_groups_allocated; i < size; i++) {
3744 ++ new_groups[i] = kvzalloc(roundup_pow_of_two(
3745 ++ sizeof(struct flex_groups)),
3746 ++ GFP_KERNEL);
3747 ++ if (!new_groups[i]) {
3748 ++ for (i--; i >= sbi->s_flex_groups_allocated; i--)
3749 ++ kvfree(new_groups[i]);
3750 ++ kvfree(new_groups);
3751 ++ ext4_msg(sb, KERN_ERR,
3752 ++ "not enough memory for %d flex groups", size);
3753 ++ return -ENOMEM;
3754 ++ }
3755 + }
3756 +- sbi->s_flex_groups = new_groups;
3757 +- sbi->s_flex_groups_allocated = size / sizeof(struct flex_groups);
3758 ++ rcu_read_lock();
3759 ++ old_groups = rcu_dereference(sbi->s_flex_groups);
3760 ++ if (old_groups)
3761 ++ memcpy(new_groups, old_groups,
3762 ++ (sbi->s_flex_groups_allocated *
3763 ++ sizeof(struct flex_groups *)));
3764 ++ rcu_read_unlock();
3765 ++ rcu_assign_pointer(sbi->s_flex_groups, new_groups);
3766 ++ sbi->s_flex_groups_allocated = size;
3767 ++ if (old_groups)
3768 ++ ext4_kvfree_array_rcu(old_groups);
3769 + return 0;
3770 + }
3771 +
3772 +@@ -2320,6 +2345,7 @@ static int ext4_fill_flex_info(struct super_block *sb)
3773 + {
3774 + struct ext4_sb_info *sbi = EXT4_SB(sb);
3775 + struct ext4_group_desc *gdp = NULL;
3776 ++ struct flex_groups *fg;
3777 + ext4_group_t flex_group;
3778 + int i, err;
3779 +
3780 +@@ -2337,12 +2363,11 @@ static int ext4_fill_flex_info(struct super_block *sb)
3781 + gdp = ext4_get_group_desc(sb, i, NULL);
3782 +
3783 + flex_group = ext4_flex_group(sbi, i);
3784 +- atomic_add(ext4_free_inodes_count(sb, gdp),
3785 +- &sbi->s_flex_groups[flex_group].free_inodes);
3786 ++ fg = sbi_array_rcu_deref(sbi, s_flex_groups, flex_group);
3787 ++ atomic_add(ext4_free_inodes_count(sb, gdp), &fg->free_inodes);
3788 + atomic64_add(ext4_free_group_clusters(sb, gdp),
3789 +- &sbi->s_flex_groups[flex_group].free_clusters);
3790 +- atomic_add(ext4_used_dirs_count(sb, gdp),
3791 +- &sbi->s_flex_groups[flex_group].used_dirs);
3792 ++ &fg->free_clusters);
3793 ++ atomic_add(ext4_used_dirs_count(sb, gdp), &fg->used_dirs);
3794 + }
3795 +
3796 + return 1;
3797 +@@ -2923,7 +2948,7 @@ static int ext4_feature_set_ok(struct super_block *sb, int readonly)
3798 + return 0;
3799 + }
3800 +
3801 +-#if !defined(CONFIG_QUOTA) || !defined(CONFIG_QFMT_V2)
3802 ++#if !IS_ENABLED(CONFIG_QUOTA) || !IS_ENABLED(CONFIG_QFMT_V2)
3803 + if (!readonly && (ext4_has_feature_quota(sb) ||
3804 + ext4_has_feature_project(sb))) {
3805 + ext4_msg(sb, KERN_ERR,
3806 +@@ -3548,9 +3573,10 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
3807 + {
3808 + struct dax_device *dax_dev = fs_dax_get_by_bdev(sb->s_bdev);
3809 + char *orig_data = kstrdup(data, GFP_KERNEL);
3810 +- struct buffer_head *bh;
3811 ++ struct buffer_head *bh, **group_desc;
3812 + struct ext4_super_block *es = NULL;
3813 + struct ext4_sb_info *sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
3814 ++ struct flex_groups **flex_groups;
3815 + ext4_fsblk_t block;
3816 + ext4_fsblk_t sb_block = get_sb_block(&data);
3817 + ext4_fsblk_t logical_sb_block;
3818 +@@ -4166,9 +4192,10 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
3819 + goto failed_mount;
3820 + }
3821 + }
3822 +- sbi->s_group_desc = kvmalloc_array(db_count,
3823 +- sizeof(struct buffer_head *),
3824 +- GFP_KERNEL);
3825 ++ rcu_assign_pointer(sbi->s_group_desc,
3826 ++ kvmalloc_array(db_count,
3827 ++ sizeof(struct buffer_head *),
3828 ++ GFP_KERNEL));
3829 + if (sbi->s_group_desc == NULL) {
3830 + ext4_msg(sb, KERN_ERR, "not enough memory");
3831 + ret = -ENOMEM;
3832 +@@ -4184,14 +4211,19 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
3833 + }
3834 +
3835 + for (i = 0; i < db_count; i++) {
3836 ++ struct buffer_head *bh;
3837 ++
3838 + block = descriptor_loc(sb, logical_sb_block, i);
3839 +- sbi->s_group_desc[i] = sb_bread_unmovable(sb, block);
3840 +- if (!sbi->s_group_desc[i]) {
3841 ++ bh = sb_bread_unmovable(sb, block);
3842 ++ if (!bh) {
3843 + ext4_msg(sb, KERN_ERR,
3844 + "can't read group descriptor %d", i);
3845 + db_count = i;
3846 + goto failed_mount2;
3847 + }
3848 ++ rcu_read_lock();
3849 ++ rcu_dereference(sbi->s_group_desc)[i] = bh;
3850 ++ rcu_read_unlock();
3851 + }
3852 + sbi->s_gdb_count = db_count;
3853 + if (!ext4_check_descriptors(sb, logical_sb_block, &first_not_zeroed)) {
3854 +@@ -4463,7 +4495,7 @@ no_journal:
3855 + err = percpu_counter_init(&sbi->s_dirtyclusters_counter, 0,
3856 + GFP_KERNEL);
3857 + if (!err)
3858 +- err = percpu_init_rwsem(&sbi->s_journal_flag_rwsem);
3859 ++ err = percpu_init_rwsem(&sbi->s_writepages_rwsem);
3860 +
3861 + if (err) {
3862 + ext4_msg(sb, KERN_ERR, "insufficient memory");
3863 +@@ -4551,13 +4583,19 @@ failed_mount7:
3864 + ext4_unregister_li_request(sb);
3865 + failed_mount6:
3866 + ext4_mb_release(sb);
3867 +- if (sbi->s_flex_groups)
3868 +- kvfree(sbi->s_flex_groups);
3869 ++ rcu_read_lock();
3870 ++ flex_groups = rcu_dereference(sbi->s_flex_groups);
3871 ++ if (flex_groups) {
3872 ++ for (i = 0; i < sbi->s_flex_groups_allocated; i++)
3873 ++ kvfree(flex_groups[i]);
3874 ++ kvfree(flex_groups);
3875 ++ }
3876 ++ rcu_read_unlock();
3877 + percpu_counter_destroy(&sbi->s_freeclusters_counter);
3878 + percpu_counter_destroy(&sbi->s_freeinodes_counter);
3879 + percpu_counter_destroy(&sbi->s_dirs_counter);
3880 + percpu_counter_destroy(&sbi->s_dirtyclusters_counter);
3881 +- percpu_free_rwsem(&sbi->s_journal_flag_rwsem);
3882 ++ percpu_free_rwsem(&sbi->s_writepages_rwsem);
3883 + failed_mount5:
3884 + ext4_ext_release(sb);
3885 + ext4_release_system_zone(sb);
3886 +@@ -4588,9 +4626,12 @@ failed_mount3:
3887 + if (sbi->s_mmp_tsk)
3888 + kthread_stop(sbi->s_mmp_tsk);
3889 + failed_mount2:
3890 ++ rcu_read_lock();
3891 ++ group_desc = rcu_dereference(sbi->s_group_desc);
3892 + for (i = 0; i < db_count; i++)
3893 +- brelse(sbi->s_group_desc[i]);
3894 +- kvfree(sbi->s_group_desc);
3895 ++ brelse(group_desc[i]);
3896 ++ kvfree(group_desc);
3897 ++ rcu_read_unlock();
3898 + failed_mount:
3899 + if (sbi->s_chksum_driver)
3900 + crypto_free_shash(sbi->s_chksum_driver);
3901 +diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c
3902 +index 97ffe12a2262..04ffef9cea8c 100644
3903 +--- a/fs/jbd2/transaction.c
3904 ++++ b/fs/jbd2/transaction.c
3905 +@@ -831,8 +831,6 @@ do_get_write_access(handle_t *handle, struct journal_head *jh,
3906 + char *frozen_buffer = NULL;
3907 + unsigned long start_lock, time_lock;
3908 +
3909 +- if (is_handle_aborted(handle))
3910 +- return -EROFS;
3911 + journal = transaction->t_journal;
3912 +
3913 + jbd_debug(5, "journal_head %p, force_copy %d\n", jh, force_copy);
3914 +@@ -1084,6 +1082,9 @@ int jbd2_journal_get_write_access(handle_t *handle, struct buffer_head *bh)
3915 + struct journal_head *jh;
3916 + int rc;
3917 +
3918 ++ if (is_handle_aborted(handle))
3919 ++ return -EROFS;
3920 ++
3921 + if (jbd2_write_access_granted(handle, bh, false))
3922 + return 0;
3923 +
3924 +@@ -1221,6 +1222,9 @@ int jbd2_journal_get_undo_access(handle_t *handle, struct buffer_head *bh)
3925 + struct journal_head *jh;
3926 + char *committed_data = NULL;
3927 +
3928 ++ if (is_handle_aborted(handle))
3929 ++ return -EROFS;
3930 ++
3931 + if (jbd2_write_access_granted(handle, bh, true))
3932 + return 0;
3933 +
3934 +diff --git a/include/linux/intel-svm.h b/include/linux/intel-svm.h
3935 +index 99bc5b3ae26e..733eaf95e207 100644
3936 +--- a/include/linux/intel-svm.h
3937 ++++ b/include/linux/intel-svm.h
3938 +@@ -130,7 +130,7 @@ static inline int intel_svm_unbind_mm(struct device *dev, int pasid)
3939 + BUG();
3940 + }
3941 +
3942 +-static int intel_svm_is_pasid_valid(struct device *dev, int pasid)
3943 ++static inline int intel_svm_is_pasid_valid(struct device *dev, int pasid)
3944 + {
3945 + return -EINVAL;
3946 + }
3947 +diff --git a/include/linux/irqdomain.h b/include/linux/irqdomain.h
3948 +index 8301f1df0682..092445543258 100644
3949 +--- a/include/linux/irqdomain.h
3950 ++++ b/include/linux/irqdomain.h
3951 +@@ -188,7 +188,7 @@ enum {
3952 + IRQ_DOMAIN_FLAG_HIERARCHY = (1 << 0),
3953 +
3954 + /* Irq domain name was allocated in __irq_domain_add() */
3955 +- IRQ_DOMAIN_NAME_ALLOCATED = (1 << 6),
3956 ++ IRQ_DOMAIN_NAME_ALLOCATED = (1 << 1),
3957 +
3958 + /* Irq domain is an IPI domain with virq per cpu */
3959 + IRQ_DOMAIN_FLAG_IPI_PER_CPU = (1 << 2),
3960 +diff --git a/include/linux/libata.h b/include/linux/libata.h
3961 +index aff09d0b3545..75a916d7ab2a 100644
3962 +--- a/include/linux/libata.h
3963 ++++ b/include/linux/libata.h
3964 +@@ -1236,6 +1236,7 @@ struct pci_bits {
3965 + };
3966 +
3967 + extern int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits);
3968 ++extern void ata_pci_shutdown_one(struct pci_dev *pdev);
3969 + extern void ata_pci_remove_one(struct pci_dev *pdev);
3970 +
3971 + #ifdef CONFIG_PM
3972 +diff --git a/include/linux/tty.h b/include/linux/tty.h
3973 +index 76db046f09ab..248a137112e8 100644
3974 +--- a/include/linux/tty.h
3975 ++++ b/include/linux/tty.h
3976 +@@ -225,6 +225,8 @@ struct tty_port_client_operations {
3977 + void (*write_wakeup)(struct tty_port *port);
3978 + };
3979 +
3980 ++extern const struct tty_port_client_operations tty_port_default_client_ops;
3981 ++
3982 + struct tty_port {
3983 + struct tty_bufhead buf; /* Locked internally */
3984 + struct tty_struct *tty; /* Back pointer */
3985 +diff --git a/include/linux/usb/quirks.h b/include/linux/usb/quirks.h
3986 +index a1be64c9940f..22c1f579afe3 100644
3987 +--- a/include/linux/usb/quirks.h
3988 ++++ b/include/linux/usb/quirks.h
3989 +@@ -69,4 +69,7 @@
3990 + /* Hub needs extra delay after resetting its port. */
3991 + #define USB_QUIRK_HUB_SLOW_RESET BIT(14)
3992 +
3993 ++/* device has blacklisted endpoints */
3994 ++#define USB_QUIRK_ENDPOINT_BLACKLIST BIT(15)
3995 ++
3996 + #endif /* __LINUX_USB_QUIRKS_H */
3997 +diff --git a/include/scsi/iscsi_proto.h b/include/scsi/iscsi_proto.h
3998 +index f0a01a54bd15..df156f1d50b2 100644
3999 +--- a/include/scsi/iscsi_proto.h
4000 ++++ b/include/scsi/iscsi_proto.h
4001 +@@ -638,7 +638,6 @@ struct iscsi_reject {
4002 + #define ISCSI_REASON_BOOKMARK_INVALID 9
4003 + #define ISCSI_REASON_BOOKMARK_NO_RESOURCES 10
4004 + #define ISCSI_REASON_NEGOTIATION_RESET 11
4005 +-#define ISCSI_REASON_WAITING_FOR_LOGOUT 12
4006 +
4007 + /* Max. number of Key=Value pairs in a text message */
4008 + #define MAX_KEY_VALUE_PAIRS 8192
4009 +diff --git a/include/sound/rawmidi.h b/include/sound/rawmidi.h
4010 +index 6665cb29e1a2..c2a71fd8dfaf 100644
4011 +--- a/include/sound/rawmidi.h
4012 ++++ b/include/sound/rawmidi.h
4013 +@@ -92,9 +92,9 @@ struct snd_rawmidi_substream {
4014 + struct list_head list; /* list of all substream for given stream */
4015 + int stream; /* direction */
4016 + int number; /* substream number */
4017 +- unsigned int opened: 1, /* open flag */
4018 +- append: 1, /* append flag (merge more streams) */
4019 +- active_sensing: 1; /* send active sensing when close */
4020 ++ bool opened; /* open flag */
4021 ++ bool append; /* append flag (merge more streams) */
4022 ++ bool active_sensing; /* send active sensing when close */
4023 + int use_count; /* use counter (for output) */
4024 + size_t bytes;
4025 + struct snd_rawmidi *rmidi;
4026 +diff --git a/ipc/sem.c b/ipc/sem.c
4027 +index 26f8e37fcdcb..2bf535dd0b93 100644
4028 +--- a/ipc/sem.c
4029 ++++ b/ipc/sem.c
4030 +@@ -2345,11 +2345,9 @@ void exit_sem(struct task_struct *tsk)
4031 + ipc_assert_locked_object(&sma->sem_perm);
4032 + list_del(&un->list_id);
4033 +
4034 +- /* we are the last process using this ulp, acquiring ulp->lock
4035 +- * isn't required. Besides that, we are also protected against
4036 +- * IPC_RMID as we hold sma->sem_perm lock now
4037 +- */
4038 ++ spin_lock(&ulp->lock);
4039 + list_del_rcu(&un->list_proc);
4040 ++ spin_unlock(&ulp->lock);
4041 +
4042 + /* perform adjustments registered in un */
4043 + for (i = 0; i < sma->sem_nsems; i++) {
4044 +diff --git a/kernel/bpf/offload.c b/kernel/bpf/offload.c
4045 +index 86477f3894e5..66e13aace241 100644
4046 +--- a/kernel/bpf/offload.c
4047 ++++ b/kernel/bpf/offload.c
4048 +@@ -289,7 +289,7 @@ int bpf_prog_offload_info_fill(struct bpf_prog_info *info,
4049 +
4050 + ulen = info->jited_prog_len;
4051 + info->jited_prog_len = aux->offload->jited_len;
4052 +- if (info->jited_prog_len & ulen) {
4053 ++ if (info->jited_prog_len && ulen) {
4054 + uinsns = u64_to_user_ptr(info->jited_prog_insns);
4055 + ulen = min_t(u32, info->jited_prog_len, ulen);
4056 + if (copy_to_user(uinsns, aux->offload->jited_image, ulen)) {
4057 +diff --git a/kernel/irq/internals.h b/kernel/irq/internals.h
4058 +index ea57f3d397fe..3f4618510d05 100644
4059 +--- a/kernel/irq/internals.h
4060 ++++ b/kernel/irq/internals.h
4061 +@@ -126,8 +126,6 @@ static inline void unregister_handler_proc(unsigned int irq,
4062 +
4063 + extern bool irq_can_set_affinity_usr(unsigned int irq);
4064 +
4065 +-extern int irq_select_affinity_usr(unsigned int irq);
4066 +-
4067 + extern void irq_set_thread_affinity(struct irq_desc *desc);
4068 +
4069 + extern int irq_do_set_affinity(struct irq_data *data,
4070 +diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
4071 +index 23bcfa71077f..eb69b805f908 100644
4072 +--- a/kernel/irq/manage.c
4073 ++++ b/kernel/irq/manage.c
4074 +@@ -441,23 +441,9 @@ int irq_setup_affinity(struct irq_desc *desc)
4075 + {
4076 + return irq_select_affinity(irq_desc_get_irq(desc));
4077 + }
4078 +-#endif
4079 ++#endif /* CONFIG_AUTO_IRQ_AFFINITY */
4080 ++#endif /* CONFIG_SMP */
4081 +
4082 +-/*
4083 +- * Called when a bogus affinity is set via /proc/irq
4084 +- */
4085 +-int irq_select_affinity_usr(unsigned int irq)
4086 +-{
4087 +- struct irq_desc *desc = irq_to_desc(irq);
4088 +- unsigned long flags;
4089 +- int ret;
4090 +-
4091 +- raw_spin_lock_irqsave(&desc->lock, flags);
4092 +- ret = irq_setup_affinity(desc);
4093 +- raw_spin_unlock_irqrestore(&desc->lock, flags);
4094 +- return ret;
4095 +-}
4096 +-#endif
4097 +
4098 + /**
4099 + * irq_set_vcpu_affinity - Set vcpu affinity for the interrupt
4100 +diff --git a/kernel/irq/proc.c b/kernel/irq/proc.c
4101 +index da9addb8d655..e8c655b7a430 100644
4102 +--- a/kernel/irq/proc.c
4103 ++++ b/kernel/irq/proc.c
4104 +@@ -115,6 +115,28 @@ static int irq_affinity_list_proc_show(struct seq_file *m, void *v)
4105 + return show_irq_affinity(AFFINITY_LIST, m);
4106 + }
4107 +
4108 ++#ifndef CONFIG_AUTO_IRQ_AFFINITY
4109 ++static inline int irq_select_affinity_usr(unsigned int irq)
4110 ++{
4111 ++ /*
4112 ++ * If the interrupt is started up already then this fails. The
4113 ++ * interrupt is assigned to an online CPU already. There is no
4114 ++ * point to move it around randomly. Tell user space that the
4115 ++ * selected mask is bogus.
4116 ++ *
4117 ++ * If not then any change to the affinity is pointless because the
4118 ++ * startup code invokes irq_setup_affinity() which will select
4119 ++ * a online CPU anyway.
4120 ++ */
4121 ++ return -EINVAL;
4122 ++}
4123 ++#else
4124 ++/* ALPHA magic affinity auto selector. Keep it for historical reasons. */
4125 ++static inline int irq_select_affinity_usr(unsigned int irq)
4126 ++{
4127 ++ return irq_select_affinity(irq);
4128 ++}
4129 ++#endif
4130 +
4131 + static ssize_t write_irq_affinity(int type, struct file *file,
4132 + const char __user *buffer, size_t count, loff_t *pos)
4133 +diff --git a/lib/stackdepot.c b/lib/stackdepot.c
4134 +index e513459a5601..3376a3291186 100644
4135 +--- a/lib/stackdepot.c
4136 ++++ b/lib/stackdepot.c
4137 +@@ -92,15 +92,19 @@ static bool init_stack_slab(void **prealloc)
4138 + return true;
4139 + if (stack_slabs[depot_index] == NULL) {
4140 + stack_slabs[depot_index] = *prealloc;
4141 ++ *prealloc = NULL;
4142 + } else {
4143 +- stack_slabs[depot_index + 1] = *prealloc;
4144 ++ /* If this is the last depot slab, do not touch the next one. */
4145 ++ if (depot_index + 1 < STACK_ALLOC_MAX_SLABS) {
4146 ++ stack_slabs[depot_index + 1] = *prealloc;
4147 ++ *prealloc = NULL;
4148 ++ }
4149 + /*
4150 + * This smp_store_release pairs with smp_load_acquire() from
4151 + * |next_slab_inited| above and in depot_save_stack().
4152 + */
4153 + smp_store_release(&next_slab_inited, 1);
4154 + }
4155 +- *prealloc = NULL;
4156 + return true;
4157 + }
4158 +
4159 +diff --git a/mm/memcontrol.c b/mm/memcontrol.c
4160 +index 3a3d109dce21..0f7ff204083e 100644
4161 +--- a/mm/memcontrol.c
4162 ++++ b/mm/memcontrol.c
4163 +@@ -419,8 +419,10 @@ int memcg_expand_shrinker_maps(int new_id)
4164 + if (mem_cgroup_is_root(memcg))
4165 + continue;
4166 + ret = memcg_expand_one_shrinker_map(memcg, size, old_size);
4167 +- if (ret)
4168 ++ if (ret) {
4169 ++ mem_cgroup_iter_break(NULL, memcg);
4170 + goto unlock;
4171 ++ }
4172 + }
4173 + unlock:
4174 + if (!ret)
4175 +diff --git a/mm/vmscan.c b/mm/vmscan.c
4176 +index b37610c0eac6..bc2ecd43251a 100644
4177 +--- a/mm/vmscan.c
4178 ++++ b/mm/vmscan.c
4179 +@@ -2446,10 +2446,13 @@ out:
4180 + /*
4181 + * Scan types proportional to swappiness and
4182 + * their relative recent reclaim efficiency.
4183 +- * Make sure we don't miss the last page
4184 +- * because of a round-off error.
4185 ++ * Make sure we don't miss the last page on
4186 ++ * the offlined memory cgroups because of a
4187 ++ * round-off error.
4188 + */
4189 +- scan = DIV64_U64_ROUND_UP(scan * fraction[file],
4190 ++ scan = mem_cgroup_online(memcg) ?
4191 ++ div64_u64(scan * fraction[file], denominator) :
4192 ++ DIV64_U64_ROUND_UP(scan * fraction[file],
4193 + denominator);
4194 + break;
4195 + case SCAN_FILE:
4196 +diff --git a/net/netfilter/xt_hashlimit.c b/net/netfilter/xt_hashlimit.c
4197 +index 1ad4017f9b73..0c2dc6def86d 100644
4198 +--- a/net/netfilter/xt_hashlimit.c
4199 ++++ b/net/netfilter/xt_hashlimit.c
4200 +@@ -845,6 +845,8 @@ hashlimit_mt(const struct sk_buff *skb, struct xt_action_param *par)
4201 + return hashlimit_mt_common(skb, par, hinfo, &info->cfg, 3);
4202 + }
4203 +
4204 ++#define HASHLIMIT_MAX_SIZE 1048576
4205 ++
4206 + static int hashlimit_mt_check_common(const struct xt_mtchk_param *par,
4207 + struct xt_hashlimit_htable **hinfo,
4208 + struct hashlimit_cfg3 *cfg,
4209 +@@ -855,6 +857,14 @@ static int hashlimit_mt_check_common(const struct xt_mtchk_param *par,
4210 +
4211 + if (cfg->gc_interval == 0 || cfg->expire == 0)
4212 + return -EINVAL;
4213 ++ if (cfg->size > HASHLIMIT_MAX_SIZE) {
4214 ++ cfg->size = HASHLIMIT_MAX_SIZE;
4215 ++ pr_info_ratelimited("size too large, truncated to %u\n", cfg->size);
4216 ++ }
4217 ++ if (cfg->max > HASHLIMIT_MAX_SIZE) {
4218 ++ cfg->max = HASHLIMIT_MAX_SIZE;
4219 ++ pr_info_ratelimited("max too large, truncated to %u\n", cfg->max);
4220 ++ }
4221 + if (par->family == NFPROTO_IPV4) {
4222 + if (cfg->srcmask > 32 || cfg->dstmask > 32)
4223 + return -EINVAL;
4224 +diff --git a/net/rxrpc/call_object.c b/net/rxrpc/call_object.c
4225 +index 17fdfce1625f..964c4e45de11 100644
4226 +--- a/net/rxrpc/call_object.c
4227 ++++ b/net/rxrpc/call_object.c
4228 +@@ -647,11 +647,11 @@ void rxrpc_put_call(struct rxrpc_call *call, enum rxrpc_call_trace op)
4229 + }
4230 +
4231 + /*
4232 +- * Final call destruction under RCU.
4233 ++ * Final call destruction - but must be done in process context.
4234 + */
4235 +-static void rxrpc_rcu_destroy_call(struct rcu_head *rcu)
4236 ++static void rxrpc_destroy_call(struct work_struct *work)
4237 + {
4238 +- struct rxrpc_call *call = container_of(rcu, struct rxrpc_call, rcu);
4239 ++ struct rxrpc_call *call = container_of(work, struct rxrpc_call, processor);
4240 + struct rxrpc_net *rxnet = call->rxnet;
4241 +
4242 + rxrpc_put_connection(call->conn);
4243 +@@ -663,6 +663,22 @@ static void rxrpc_rcu_destroy_call(struct rcu_head *rcu)
4244 + wake_up_var(&rxnet->nr_calls);
4245 + }
4246 +
4247 ++/*
4248 ++ * Final call destruction under RCU.
4249 ++ */
4250 ++static void rxrpc_rcu_destroy_call(struct rcu_head *rcu)
4251 ++{
4252 ++ struct rxrpc_call *call = container_of(rcu, struct rxrpc_call, rcu);
4253 ++
4254 ++ if (in_softirq()) {
4255 ++ INIT_WORK(&call->processor, rxrpc_destroy_call);
4256 ++ if (!rxrpc_queue_work(&call->processor))
4257 ++ BUG();
4258 ++ } else {
4259 ++ rxrpc_destroy_call(&call->processor);
4260 ++ }
4261 ++}
4262 ++
4263 + /*
4264 + * clean up a call
4265 + */
4266 +diff --git a/sound/core/seq/seq_clientmgr.c b/sound/core/seq/seq_clientmgr.c
4267 +index bd3d68e0489d..aaf9c419c3dd 100644
4268 +--- a/sound/core/seq/seq_clientmgr.c
4269 ++++ b/sound/core/seq/seq_clientmgr.c
4270 +@@ -563,7 +563,7 @@ static int update_timestamp_of_queue(struct snd_seq_event *event,
4271 + event->queue = queue;
4272 + event->flags &= ~SNDRV_SEQ_TIME_STAMP_MASK;
4273 + if (real_time) {
4274 +- event->time.time = snd_seq_timer_get_cur_time(q->timer);
4275 ++ event->time.time = snd_seq_timer_get_cur_time(q->timer, true);
4276 + event->flags |= SNDRV_SEQ_TIME_STAMP_REAL;
4277 + } else {
4278 + event->time.tick = snd_seq_timer_get_cur_tick(q->timer);
4279 +@@ -1642,7 +1642,7 @@ static int snd_seq_ioctl_get_queue_status(struct snd_seq_client *client,
4280 + tmr = queue->timer;
4281 + status->events = queue->tickq->cells + queue->timeq->cells;
4282 +
4283 +- status->time = snd_seq_timer_get_cur_time(tmr);
4284 ++ status->time = snd_seq_timer_get_cur_time(tmr, true);
4285 + status->tick = snd_seq_timer_get_cur_tick(tmr);
4286 +
4287 + status->running = tmr->running;
4288 +diff --git a/sound/core/seq/seq_queue.c b/sound/core/seq/seq_queue.c
4289 +index 3b3ac96f1f5f..28b4dd45b8d1 100644
4290 +--- a/sound/core/seq/seq_queue.c
4291 ++++ b/sound/core/seq/seq_queue.c
4292 +@@ -251,6 +251,8 @@ void snd_seq_check_queue(struct snd_seq_queue *q, int atomic, int hop)
4293 + {
4294 + unsigned long flags;
4295 + struct snd_seq_event_cell *cell;
4296 ++ snd_seq_tick_time_t cur_tick;
4297 ++ snd_seq_real_time_t cur_time;
4298 +
4299 + if (q == NULL)
4300 + return;
4301 +@@ -267,17 +269,18 @@ void snd_seq_check_queue(struct snd_seq_queue *q, int atomic, int hop)
4302 +
4303 + __again:
4304 + /* Process tick queue... */
4305 ++ cur_tick = snd_seq_timer_get_cur_tick(q->timer);
4306 + for (;;) {
4307 +- cell = snd_seq_prioq_cell_out(q->tickq,
4308 +- &q->timer->tick.cur_tick);
4309 ++ cell = snd_seq_prioq_cell_out(q->tickq, &cur_tick);
4310 + if (!cell)
4311 + break;
4312 + snd_seq_dispatch_event(cell, atomic, hop);
4313 + }
4314 +
4315 + /* Process time queue... */
4316 ++ cur_time = snd_seq_timer_get_cur_time(q->timer, false);
4317 + for (;;) {
4318 +- cell = snd_seq_prioq_cell_out(q->timeq, &q->timer->cur_time);
4319 ++ cell = snd_seq_prioq_cell_out(q->timeq, &cur_time);
4320 + if (!cell)
4321 + break;
4322 + snd_seq_dispatch_event(cell, atomic, hop);
4323 +@@ -405,6 +408,7 @@ int snd_seq_queue_check_access(int queueid, int client)
4324 + int snd_seq_queue_set_owner(int queueid, int client, int locked)
4325 + {
4326 + struct snd_seq_queue *q = queueptr(queueid);
4327 ++ unsigned long flags;
4328 +
4329 + if (q == NULL)
4330 + return -EINVAL;
4331 +@@ -414,8 +418,10 @@ int snd_seq_queue_set_owner(int queueid, int client, int locked)
4332 + return -EPERM;
4333 + }
4334 +
4335 ++ spin_lock_irqsave(&q->owner_lock, flags);
4336 + q->locked = locked ? 1 : 0;
4337 + q->owner = client;
4338 ++ spin_unlock_irqrestore(&q->owner_lock, flags);
4339 + queue_access_unlock(q);
4340 + queuefree(q);
4341 +
4342 +@@ -552,15 +558,17 @@ void snd_seq_queue_client_termination(int client)
4343 + unsigned long flags;
4344 + int i;
4345 + struct snd_seq_queue *q;
4346 ++ bool matched;
4347 +
4348 + for (i = 0; i < SNDRV_SEQ_MAX_QUEUES; i++) {
4349 + if ((q = queueptr(i)) == NULL)
4350 + continue;
4351 + spin_lock_irqsave(&q->owner_lock, flags);
4352 +- if (q->owner == client)
4353 ++ matched = (q->owner == client);
4354 ++ if (matched)
4355 + q->klocked = 1;
4356 + spin_unlock_irqrestore(&q->owner_lock, flags);
4357 +- if (q->owner == client) {
4358 ++ if (matched) {
4359 + if (q->timer->running)
4360 + snd_seq_timer_stop(q->timer);
4361 + snd_seq_timer_reset(q->timer);
4362 +@@ -752,6 +760,8 @@ void snd_seq_info_queues_read(struct snd_info_entry *entry,
4363 + int i, bpm;
4364 + struct snd_seq_queue *q;
4365 + struct snd_seq_timer *tmr;
4366 ++ bool locked;
4367 ++ int owner;
4368 +
4369 + for (i = 0; i < SNDRV_SEQ_MAX_QUEUES; i++) {
4370 + if ((q = queueptr(i)) == NULL)
4371 +@@ -763,9 +773,14 @@ void snd_seq_info_queues_read(struct snd_info_entry *entry,
4372 + else
4373 + bpm = 0;
4374 +
4375 ++ spin_lock_irq(&q->owner_lock);
4376 ++ locked = q->locked;
4377 ++ owner = q->owner;
4378 ++ spin_unlock_irq(&q->owner_lock);
4379 ++
4380 + snd_iprintf(buffer, "queue %d: [%s]\n", q->queue, q->name);
4381 +- snd_iprintf(buffer, "owned by client : %d\n", q->owner);
4382 +- snd_iprintf(buffer, "lock status : %s\n", q->locked ? "Locked" : "Free");
4383 ++ snd_iprintf(buffer, "owned by client : %d\n", owner);
4384 ++ snd_iprintf(buffer, "lock status : %s\n", locked ? "Locked" : "Free");
4385 + snd_iprintf(buffer, "queued time events : %d\n", snd_seq_prioq_avail(q->timeq));
4386 + snd_iprintf(buffer, "queued tick events : %d\n", snd_seq_prioq_avail(q->tickq));
4387 + snd_iprintf(buffer, "timer state : %s\n", tmr->running ? "Running" : "Stopped");
4388 +diff --git a/sound/core/seq/seq_timer.c b/sound/core/seq/seq_timer.c
4389 +index aed8e1c1f02f..3da44a4f9257 100644
4390 +--- a/sound/core/seq/seq_timer.c
4391 ++++ b/sound/core/seq/seq_timer.c
4392 +@@ -437,14 +437,15 @@ int snd_seq_timer_continue(struct snd_seq_timer *tmr)
4393 + }
4394 +
4395 + /* return current 'real' time. use timeofday() to get better granularity. */
4396 +-snd_seq_real_time_t snd_seq_timer_get_cur_time(struct snd_seq_timer *tmr)
4397 ++snd_seq_real_time_t snd_seq_timer_get_cur_time(struct snd_seq_timer *tmr,
4398 ++ bool adjust_ktime)
4399 + {
4400 + snd_seq_real_time_t cur_time;
4401 + unsigned long flags;
4402 +
4403 + spin_lock_irqsave(&tmr->lock, flags);
4404 + cur_time = tmr->cur_time;
4405 +- if (tmr->running) {
4406 ++ if (adjust_ktime && tmr->running) {
4407 + struct timespec64 tm;
4408 +
4409 + ktime_get_ts64(&tm);
4410 +@@ -461,7 +462,13 @@ snd_seq_real_time_t snd_seq_timer_get_cur_time(struct snd_seq_timer *tmr)
4411 + high PPQ values) */
4412 + snd_seq_tick_time_t snd_seq_timer_get_cur_tick(struct snd_seq_timer *tmr)
4413 + {
4414 +- return tmr->tick.cur_tick;
4415 ++ snd_seq_tick_time_t cur_tick;
4416 ++ unsigned long flags;
4417 ++
4418 ++ spin_lock_irqsave(&tmr->lock, flags);
4419 ++ cur_tick = tmr->tick.cur_tick;
4420 ++ spin_unlock_irqrestore(&tmr->lock, flags);
4421 ++ return cur_tick;
4422 + }
4423 +
4424 +
4425 +diff --git a/sound/core/seq/seq_timer.h b/sound/core/seq/seq_timer.h
4426 +index 62f390671096..44f52f5963db 100644
4427 +--- a/sound/core/seq/seq_timer.h
4428 ++++ b/sound/core/seq/seq_timer.h
4429 +@@ -135,7 +135,8 @@ int snd_seq_timer_set_tempo_ppq(struct snd_seq_timer *tmr, int tempo, int ppq);
4430 + int snd_seq_timer_set_position_tick(struct snd_seq_timer *tmr, snd_seq_tick_time_t position);
4431 + int snd_seq_timer_set_position_time(struct snd_seq_timer *tmr, snd_seq_real_time_t position);
4432 + int snd_seq_timer_set_skew(struct snd_seq_timer *tmr, unsigned int skew, unsigned int base);
4433 +-snd_seq_real_time_t snd_seq_timer_get_cur_time(struct snd_seq_timer *tmr);
4434 ++snd_seq_real_time_t snd_seq_timer_get_cur_time(struct snd_seq_timer *tmr,
4435 ++ bool adjust_ktime);
4436 + snd_seq_tick_time_t snd_seq_timer_get_cur_tick(struct snd_seq_timer *tmr);
4437 +
4438 + extern int seq_default_timer_class;
4439 +diff --git a/sound/hda/hdmi_chmap.c b/sound/hda/hdmi_chmap.c
4440 +index f21633cd9b38..acbe61b8db7b 100644
4441 +--- a/sound/hda/hdmi_chmap.c
4442 ++++ b/sound/hda/hdmi_chmap.c
4443 +@@ -249,7 +249,7 @@ void snd_hdac_print_channel_allocation(int spk_alloc, char *buf, int buflen)
4444 +
4445 + for (i = 0, j = 0; i < ARRAY_SIZE(cea_speaker_allocation_names); i++) {
4446 + if (spk_alloc & (1 << i))
4447 +- j += snprintf(buf + j, buflen - j, " %s",
4448 ++ j += scnprintf(buf + j, buflen - j, " %s",
4449 + cea_speaker_allocation_names[i]);
4450 + }
4451 + buf[j] = '\0'; /* necessary when j == 0 */
4452 +diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c
4453 +index 82b0dc9f528f..f3a6b1d869d8 100644
4454 +--- a/sound/pci/hda/hda_codec.c
4455 ++++ b/sound/pci/hda/hda_codec.c
4456 +@@ -4019,7 +4019,7 @@ void snd_print_pcm_bits(int pcm, char *buf, int buflen)
4457 +
4458 + for (i = 0, j = 0; i < ARRAY_SIZE(bits); i++)
4459 + if (pcm & (AC_SUPPCM_BITS_8 << i))
4460 +- j += snprintf(buf + j, buflen - j, " %d", bits[i]);
4461 ++ j += scnprintf(buf + j, buflen - j, " %d", bits[i]);
4462 +
4463 + buf[j] = '\0'; /* necessary when j == 0 */
4464 + }
4465 +diff --git a/sound/pci/hda/hda_eld.c b/sound/pci/hda/hda_eld.c
4466 +index ba7fe9b6655c..864cc8c9ada0 100644
4467 +--- a/sound/pci/hda/hda_eld.c
4468 ++++ b/sound/pci/hda/hda_eld.c
4469 +@@ -373,7 +373,7 @@ static void hdmi_print_pcm_rates(int pcm, char *buf, int buflen)
4470 +
4471 + for (i = 0, j = 0; i < ARRAY_SIZE(alsa_rates); i++)
4472 + if (pcm & (1 << i))
4473 +- j += snprintf(buf + j, buflen - j, " %d",
4474 ++ j += scnprintf(buf + j, buflen - j, " %d",
4475 + alsa_rates[i]);
4476 +
4477 + buf[j] = '\0'; /* necessary when j == 0 */
4478 +diff --git a/sound/pci/hda/hda_sysfs.c b/sound/pci/hda/hda_sysfs.c
4479 +index 6ec79c58d48d..6535155e992d 100644
4480 +--- a/sound/pci/hda/hda_sysfs.c
4481 ++++ b/sound/pci/hda/hda_sysfs.c
4482 +@@ -221,7 +221,7 @@ static ssize_t init_verbs_show(struct device *dev,
4483 + int i, len = 0;
4484 + mutex_lock(&codec->user_mutex);
4485 + snd_array_for_each(&codec->init_verbs, i, v) {
4486 +- len += snprintf(buf + len, PAGE_SIZE - len,
4487 ++ len += scnprintf(buf + len, PAGE_SIZE - len,
4488 + "0x%02x 0x%03x 0x%04x\n",
4489 + v->nid, v->verb, v->param);
4490 + }
4491 +@@ -271,7 +271,7 @@ static ssize_t hints_show(struct device *dev,
4492 + int i, len = 0;
4493 + mutex_lock(&codec->user_mutex);
4494 + snd_array_for_each(&codec->hints, i, hint) {
4495 +- len += snprintf(buf + len, PAGE_SIZE - len,
4496 ++ len += scnprintf(buf + len, PAGE_SIZE - len,
4497 + "%s = %s\n", hint->key, hint->val);
4498 + }
4499 + mutex_unlock(&codec->user_mutex);
4500 +diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
4501 +index a8a47e1596dd..94fffc0675a7 100644
4502 +--- a/sound/pci/hda/patch_realtek.c
4503 ++++ b/sound/pci/hda/patch_realtek.c
4504 +@@ -2442,7 +2442,9 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
4505 + SND_PCI_QUIRK(0x1071, 0x8258, "Evesham Voyaeger", ALC882_FIXUP_EAPD),
4506 + SND_PCI_QUIRK(0x1458, 0xa002, "Gigabyte EP45-DS3/Z87X-UD3H", ALC889_FIXUP_FRONT_HP_NO_PRESENCE),
4507 + SND_PCI_QUIRK(0x1458, 0xa0b8, "Gigabyte AZ370-Gaming", ALC1220_FIXUP_GB_DUAL_CODECS),
4508 ++ SND_PCI_QUIRK(0x1462, 0x1228, "MSI-GP63", ALC1220_FIXUP_CLEVO_P950),
4509 + SND_PCI_QUIRK(0x1462, 0x1276, "MSI-GL73", ALC1220_FIXUP_CLEVO_P950),
4510 ++ SND_PCI_QUIRK(0x1462, 0x1293, "MSI-GP65", ALC1220_FIXUP_CLEVO_P950),
4511 + SND_PCI_QUIRK(0x1462, 0x7350, "MSI-7350", ALC889_FIXUP_CD),
4512 + SND_PCI_QUIRK(0x1462, 0xda57, "MSI Z270-Gaming", ALC1220_FIXUP_GB_DUAL_CODECS),
4513 + SND_PCI_QUIRK_VENDOR(0x1462, "MSI", ALC882_FIXUP_GPIO3),
4514 +diff --git a/sound/soc/sunxi/sun8i-codec.c b/sound/soc/sunxi/sun8i-codec.c
4515 +index a3db6a68dfe6..8bcdeb281770 100644
4516 +--- a/sound/soc/sunxi/sun8i-codec.c
4517 ++++ b/sound/soc/sunxi/sun8i-codec.c
4518 +@@ -89,6 +89,7 @@
4519 +
4520 + #define SUN8I_SYS_SR_CTRL_AIF1_FS_MASK GENMASK(15, 12)
4521 + #define SUN8I_SYS_SR_CTRL_AIF2_FS_MASK GENMASK(11, 8)
4522 ++#define SUN8I_AIF1CLK_CTRL_AIF1_DATA_FMT_MASK GENMASK(3, 2)
4523 + #define SUN8I_AIF1CLK_CTRL_AIF1_WORD_SIZ_MASK GENMASK(5, 4)
4524 + #define SUN8I_AIF1CLK_CTRL_AIF1_LRCK_DIV_MASK GENMASK(8, 6)
4525 + #define SUN8I_AIF1CLK_CTRL_AIF1_BCLK_DIV_MASK GENMASK(12, 9)
4526 +@@ -250,7 +251,7 @@ static int sun8i_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
4527 + return -EINVAL;
4528 + }
4529 + regmap_update_bits(scodec->regmap, SUN8I_AIF1CLK_CTRL,
4530 +- BIT(SUN8I_AIF1CLK_CTRL_AIF1_DATA_FMT),
4531 ++ SUN8I_AIF1CLK_CTRL_AIF1_DATA_FMT_MASK,
4532 + value << SUN8I_AIF1CLK_CTRL_AIF1_DATA_FMT);
4533 +
4534 + return 0;