Gentoo Archives: gentoo-commits

From: Thomas Deutschmann <whissi@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:5.4 commit in: /
Date: Fri, 29 Nov 2019 21:22:01
Message-Id: 1575062472.800e4600a238a8f9088cb796b6079295c3881562.whissi@gentoo
1 commit: 800e4600a238a8f9088cb796b6079295c3881562
2 Author: Thomas Deutschmann <whissi <AT> whissi <DOT> de>
3 AuthorDate: Fri Nov 29 21:21:12 2019 +0000
4 Commit: Thomas Deutschmann <whissi <AT> gentoo <DOT> org>
5 CommitDate: Fri Nov 29 21:21:12 2019 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=800e4600
7
8 Linux patch 5.4.1
9
10 Signed-off-by: Thomas Deutschmann <whissi <AT> whissi.de>
11
12 1000_linux-5.4.1.patch | 3504 ++++++++++++++++++++++++++++++++++++++++++++++++
13 1 file changed, 3504 insertions(+)
14
15 diff --git a/1000_linux-5.4.1.patch b/1000_linux-5.4.1.patch
16 new file mode 100644
17 index 0000000..4437c5b
18 --- /dev/null
19 +++ b/1000_linux-5.4.1.patch
20 @@ -0,0 +1,3504 @@
21 +diff --git a/Documentation/admin-guide/hw-vuln/mds.rst b/Documentation/admin-guide/hw-vuln/mds.rst
22 +index e3a796c0d3a2..2d19c9f4c1fe 100644
23 +--- a/Documentation/admin-guide/hw-vuln/mds.rst
24 ++++ b/Documentation/admin-guide/hw-vuln/mds.rst
25 +@@ -265,8 +265,11 @@ time with the option "mds=". The valid arguments for this option are:
26 +
27 + ============ =============================================================
28 +
29 +-Not specifying this option is equivalent to "mds=full".
30 +-
31 ++Not specifying this option is equivalent to "mds=full". For processors
32 ++that are affected by both TAA (TSX Asynchronous Abort) and MDS,
33 ++specifying just "mds=off" without an accompanying "tsx_async_abort=off"
34 ++will have no effect as the same mitigation is used for both
35 ++vulnerabilities.
36 +
37 + Mitigation selection guide
38 + --------------------------
39 +diff --git a/Documentation/admin-guide/hw-vuln/tsx_async_abort.rst b/Documentation/admin-guide/hw-vuln/tsx_async_abort.rst
40 +index fddbd7579c53..af6865b822d2 100644
41 +--- a/Documentation/admin-guide/hw-vuln/tsx_async_abort.rst
42 ++++ b/Documentation/admin-guide/hw-vuln/tsx_async_abort.rst
43 +@@ -174,7 +174,10 @@ the option "tsx_async_abort=". The valid arguments for this option are:
44 + CPU is not vulnerable to cross-thread TAA attacks.
45 + ============ =============================================================
46 +
47 +-Not specifying this option is equivalent to "tsx_async_abort=full".
48 ++Not specifying this option is equivalent to "tsx_async_abort=full". For
49 ++processors that are affected by both TAA and MDS, specifying just
50 ++"tsx_async_abort=off" without an accompanying "mds=off" will have no
51 ++effect as the same mitigation is used for both vulnerabilities.
52 +
53 + The kernel command line also allows to control the TSX feature using the
54 + parameter "tsx=" on CPUs which support TSX control. MSR_IA32_TSX_CTRL is used
55 +diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
56 +index 8dee8f68fe15..9983ac73b66d 100644
57 +--- a/Documentation/admin-guide/kernel-parameters.txt
58 ++++ b/Documentation/admin-guide/kernel-parameters.txt
59 +@@ -2473,6 +2473,12 @@
60 + SMT on vulnerable CPUs
61 + off - Unconditionally disable MDS mitigation
62 +
63 ++ On TAA-affected machines, mds=off can be prevented by
64 ++ an active TAA mitigation as both vulnerabilities are
65 ++ mitigated with the same mechanism so in order to disable
66 ++ this mitigation, you need to specify tsx_async_abort=off
67 ++ too.
68 ++
69 + Not specifying this option is equivalent to
70 + mds=full.
71 +
72 +@@ -4931,6 +4937,11 @@
73 + vulnerable to cross-thread TAA attacks.
74 + off - Unconditionally disable TAA mitigation
75 +
76 ++ On MDS-affected machines, tsx_async_abort=off can be
77 ++ prevented by an active MDS mitigation as both vulnerabilities
78 ++ are mitigated with the same mechanism so in order to disable
79 ++ this mitigation, you need to specify mds=off too.
80 ++
81 + Not specifying this option is equivalent to
82 + tsx_async_abort=full. On CPUs which are MDS affected
83 + and deploy MDS mitigation, TAA mitigation is not
84 +diff --git a/Documentation/devicetree/bindings/net/wireless/qcom,ath10k.txt b/Documentation/devicetree/bindings/net/wireless/qcom,ath10k.txt
85 +index ae661e65354e..f9499b20d840 100644
86 +--- a/Documentation/devicetree/bindings/net/wireless/qcom,ath10k.txt
87 ++++ b/Documentation/devicetree/bindings/net/wireless/qcom,ath10k.txt
88 +@@ -81,6 +81,12 @@ Optional properties:
89 + Definition: Name of external front end module used. Some valid FEM names
90 + for example: "microsemi-lx5586", "sky85703-11"
91 + and "sky85803" etc.
92 ++- qcom,snoc-host-cap-8bit-quirk:
93 ++ Usage: Optional
94 ++ Value type: <empty>
95 ++ Definition: Quirk specifying that the firmware expects the 8bit version
96 ++ of the host capability QMI request
97 ++
98 +
99 + Example (to supply PCI based wifi block details):
100 +
101 +diff --git a/Makefile b/Makefile
102 +index d4d36c61940b..641a62423fd6 100644
103 +--- a/Makefile
104 ++++ b/Makefile
105 +@@ -1,7 +1,7 @@
106 + # SPDX-License-Identifier: GPL-2.0
107 + VERSION = 5
108 + PATCHLEVEL = 4
109 +-SUBLEVEL = 0
110 ++SUBLEVEL = 1
111 + EXTRAVERSION =
112 + NAME = Kleptomaniac Octopus
113 +
114 +diff --git a/arch/powerpc/include/asm/asm-prototypes.h b/arch/powerpc/include/asm/asm-prototypes.h
115 +index 8561498e653c..d84d1417ddb6 100644
116 +--- a/arch/powerpc/include/asm/asm-prototypes.h
117 ++++ b/arch/powerpc/include/asm/asm-prototypes.h
118 +@@ -152,9 +152,12 @@ void _kvmppc_save_tm_pr(struct kvm_vcpu *vcpu, u64 guest_msr);
119 + /* Patch sites */
120 + extern s32 patch__call_flush_count_cache;
121 + extern s32 patch__flush_count_cache_return;
122 ++extern s32 patch__flush_link_stack_return;
123 ++extern s32 patch__call_kvm_flush_link_stack;
124 + extern s32 patch__memset_nocache, patch__memcpy_nocache;
125 +
126 + extern long flush_count_cache;
127 ++extern long kvm_flush_link_stack;
128 +
129 + #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
130 + void kvmppc_save_tm_hv(struct kvm_vcpu *vcpu, u64 msr, bool preserve_nv);
131 +diff --git a/arch/powerpc/include/asm/security_features.h b/arch/powerpc/include/asm/security_features.h
132 +index 759597bf0fd8..ccf44c135389 100644
133 +--- a/arch/powerpc/include/asm/security_features.h
134 ++++ b/arch/powerpc/include/asm/security_features.h
135 +@@ -81,6 +81,9 @@ static inline bool security_ftr_enabled(unsigned long feature)
136 + // Software required to flush count cache on context switch
137 + #define SEC_FTR_FLUSH_COUNT_CACHE 0x0000000000000400ull
138 +
139 ++// Software required to flush link stack on context switch
140 ++#define SEC_FTR_FLUSH_LINK_STACK 0x0000000000001000ull
141 ++
142 +
143 + // Features enabled by default
144 + #define SEC_FTR_DEFAULT \
145 +diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
146 +index 6467bdab8d40..3fd3ef352e3f 100644
147 +--- a/arch/powerpc/kernel/entry_64.S
148 ++++ b/arch/powerpc/kernel/entry_64.S
149 +@@ -537,6 +537,7 @@ flush_count_cache:
150 + /* Save LR into r9 */
151 + mflr r9
152 +
153 ++ // Flush the link stack
154 + .rept 64
155 + bl .+4
156 + .endr
157 +@@ -546,6 +547,11 @@ flush_count_cache:
158 + .balign 32
159 + /* Restore LR */
160 + 1: mtlr r9
161 ++
162 ++ // If we're just flushing the link stack, return here
163 ++3: nop
164 ++ patch_site 3b patch__flush_link_stack_return
165 ++
166 + li r9,0x7fff
167 + mtctr r9
168 +
169 +diff --git a/arch/powerpc/kernel/security.c b/arch/powerpc/kernel/security.c
170 +index 7cfcb294b11c..bd91dceb7010 100644
171 +--- a/arch/powerpc/kernel/security.c
172 ++++ b/arch/powerpc/kernel/security.c
173 +@@ -24,6 +24,7 @@ enum count_cache_flush_type {
174 + COUNT_CACHE_FLUSH_HW = 0x4,
175 + };
176 + static enum count_cache_flush_type count_cache_flush_type = COUNT_CACHE_FLUSH_NONE;
177 ++static bool link_stack_flush_enabled;
178 +
179 + bool barrier_nospec_enabled;
180 + static bool no_nospec;
181 +@@ -212,11 +213,19 @@ ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr, c
182 +
183 + if (ccd)
184 + seq_buf_printf(&s, "Indirect branch cache disabled");
185 ++
186 ++ if (link_stack_flush_enabled)
187 ++ seq_buf_printf(&s, ", Software link stack flush");
188 ++
189 + } else if (count_cache_flush_type != COUNT_CACHE_FLUSH_NONE) {
190 + seq_buf_printf(&s, "Mitigation: Software count cache flush");
191 +
192 + if (count_cache_flush_type == COUNT_CACHE_FLUSH_HW)
193 + seq_buf_printf(&s, " (hardware accelerated)");
194 ++
195 ++ if (link_stack_flush_enabled)
196 ++ seq_buf_printf(&s, ", Software link stack flush");
197 ++
198 + } else if (btb_flush_enabled) {
199 + seq_buf_printf(&s, "Mitigation: Branch predictor state flush");
200 + } else {
201 +@@ -377,18 +386,49 @@ static __init int stf_barrier_debugfs_init(void)
202 + device_initcall(stf_barrier_debugfs_init);
203 + #endif /* CONFIG_DEBUG_FS */
204 +
205 ++static void no_count_cache_flush(void)
206 ++{
207 ++ count_cache_flush_type = COUNT_CACHE_FLUSH_NONE;
208 ++ pr_info("count-cache-flush: software flush disabled.\n");
209 ++}
210 ++
211 + static void toggle_count_cache_flush(bool enable)
212 + {
213 +- if (!enable || !security_ftr_enabled(SEC_FTR_FLUSH_COUNT_CACHE)) {
214 ++ if (!security_ftr_enabled(SEC_FTR_FLUSH_COUNT_CACHE) &&
215 ++ !security_ftr_enabled(SEC_FTR_FLUSH_LINK_STACK))
216 ++ enable = false;
217 ++
218 ++ if (!enable) {
219 + patch_instruction_site(&patch__call_flush_count_cache, PPC_INST_NOP);
220 +- count_cache_flush_type = COUNT_CACHE_FLUSH_NONE;
221 +- pr_info("count-cache-flush: software flush disabled.\n");
222 ++#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
223 ++ patch_instruction_site(&patch__call_kvm_flush_link_stack, PPC_INST_NOP);
224 ++#endif
225 ++ pr_info("link-stack-flush: software flush disabled.\n");
226 ++ link_stack_flush_enabled = false;
227 ++ no_count_cache_flush();
228 + return;
229 + }
230 +
231 ++ // This enables the branch from _switch to flush_count_cache
232 + patch_branch_site(&patch__call_flush_count_cache,
233 + (u64)&flush_count_cache, BRANCH_SET_LINK);
234 +
235 ++#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
236 ++ // This enables the branch from guest_exit_cont to kvm_flush_link_stack
237 ++ patch_branch_site(&patch__call_kvm_flush_link_stack,
238 ++ (u64)&kvm_flush_link_stack, BRANCH_SET_LINK);
239 ++#endif
240 ++
241 ++ pr_info("link-stack-flush: software flush enabled.\n");
242 ++ link_stack_flush_enabled = true;
243 ++
244 ++ // If we just need to flush the link stack, patch an early return
245 ++ if (!security_ftr_enabled(SEC_FTR_FLUSH_COUNT_CACHE)) {
246 ++ patch_instruction_site(&patch__flush_link_stack_return, PPC_INST_BLR);
247 ++ no_count_cache_flush();
248 ++ return;
249 ++ }
250 ++
251 + if (!security_ftr_enabled(SEC_FTR_BCCTR_FLUSH_ASSIST)) {
252 + count_cache_flush_type = COUNT_CACHE_FLUSH_SW;
253 + pr_info("count-cache-flush: full software flush sequence enabled.\n");
254 +@@ -407,11 +447,20 @@ void setup_count_cache_flush(void)
255 + if (no_spectrev2 || cpu_mitigations_off()) {
256 + if (security_ftr_enabled(SEC_FTR_BCCTRL_SERIALISED) ||
257 + security_ftr_enabled(SEC_FTR_COUNT_CACHE_DISABLED))
258 +- pr_warn("Spectre v2 mitigations not under software control, can't disable\n");
259 ++ pr_warn("Spectre v2 mitigations not fully under software control, can't disable\n");
260 +
261 + enable = false;
262 + }
263 +
264 ++ /*
265 ++ * There's no firmware feature flag/hypervisor bit to tell us we need to
266 ++ * flush the link stack on context switch. So we set it here if we see
267 ++ * either of the Spectre v2 mitigations that aim to protect userspace.
268 ++ */
269 ++ if (security_ftr_enabled(SEC_FTR_COUNT_CACHE_DISABLED) ||
270 ++ security_ftr_enabled(SEC_FTR_FLUSH_COUNT_CACHE))
271 ++ security_ftr_set(SEC_FTR_FLUSH_LINK_STACK);
272 ++
273 + toggle_count_cache_flush(enable);
274 + }
275 +
276 +diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
277 +index faebcbb8c4db..0496e66aaa56 100644
278 +--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
279 ++++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
280 +@@ -11,6 +11,7 @@
281 + */
282 +
283 + #include <asm/ppc_asm.h>
284 ++#include <asm/code-patching-asm.h>
285 + #include <asm/kvm_asm.h>
286 + #include <asm/reg.h>
287 + #include <asm/mmu.h>
288 +@@ -1487,6 +1488,13 @@ guest_exit_cont: /* r9 = vcpu, r12 = trap, r13 = paca */
289 + 1:
290 + #endif /* CONFIG_KVM_XICS */
291 +
292 ++ /*
293 ++ * Possibly flush the link stack here, before we do a blr in
294 ++ * guest_exit_short_path.
295 ++ */
296 ++1: nop
297 ++ patch_site 1b patch__call_kvm_flush_link_stack
298 ++
299 + /* If we came in through the P9 short path, go back out to C now */
300 + lwz r0, STACK_SLOT_SHORT_PATH(r1)
301 + cmpwi r0, 0
302 +@@ -1963,6 +1971,28 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
303 + mtlr r0
304 + blr
305 +
306 ++.balign 32
307 ++.global kvm_flush_link_stack
308 ++kvm_flush_link_stack:
309 ++ /* Save LR into r0 */
310 ++ mflr r0
311 ++
312 ++ /* Flush the link stack. On Power8 it's up to 32 entries in size. */
313 ++ .rept 32
314 ++ bl .+4
315 ++ .endr
316 ++
317 ++ /* And on Power9 it's up to 64. */
318 ++BEGIN_FTR_SECTION
319 ++ .rept 32
320 ++ bl .+4
321 ++ .endr
322 ++END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
323 ++
324 ++ /* Restore LR */
325 ++ mtlr r0
326 ++ blr
327 ++
328 + kvmppc_guest_external:
329 + /* External interrupt, first check for host_ipi. If this is
330 + * set, we know the host wants us out so let's do it now
331 +diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S
332 +index f83ca5aa8b77..f07baf0388bc 100644
333 +--- a/arch/x86/entry/entry_32.S
334 ++++ b/arch/x86/entry/entry_32.S
335 +@@ -172,7 +172,7 @@
336 + ALTERNATIVE "jmp .Lend_\@", "", X86_FEATURE_PTI
337 + .if \no_user_check == 0
338 + /* coming from usermode? */
339 +- testl $SEGMENT_RPL_MASK, PT_CS(%esp)
340 ++ testl $USER_SEGMENT_RPL_MASK, PT_CS(%esp)
341 + jz .Lend_\@
342 + .endif
343 + /* On user-cr3? */
344 +@@ -205,64 +205,76 @@
345 + #define CS_FROM_ENTRY_STACK (1 << 31)
346 + #define CS_FROM_USER_CR3 (1 << 30)
347 + #define CS_FROM_KERNEL (1 << 29)
348 ++#define CS_FROM_ESPFIX (1 << 28)
349 +
350 + .macro FIXUP_FRAME
351 + /*
352 + * The high bits of the CS dword (__csh) are used for CS_FROM_*.
353 + * Clear them in case hardware didn't do this for us.
354 + */
355 +- andl $0x0000ffff, 3*4(%esp)
356 ++ andl $0x0000ffff, 4*4(%esp)
357 +
358 + #ifdef CONFIG_VM86
359 +- testl $X86_EFLAGS_VM, 4*4(%esp)
360 ++ testl $X86_EFLAGS_VM, 5*4(%esp)
361 + jnz .Lfrom_usermode_no_fixup_\@
362 + #endif
363 +- testl $SEGMENT_RPL_MASK, 3*4(%esp)
364 ++ testl $USER_SEGMENT_RPL_MASK, 4*4(%esp)
365 + jnz .Lfrom_usermode_no_fixup_\@
366 +
367 +- orl $CS_FROM_KERNEL, 3*4(%esp)
368 ++ orl $CS_FROM_KERNEL, 4*4(%esp)
369 +
370 + /*
371 + * When we're here from kernel mode; the (exception) stack looks like:
372 + *
373 +- * 5*4(%esp) - <previous context>
374 +- * 4*4(%esp) - flags
375 +- * 3*4(%esp) - cs
376 +- * 2*4(%esp) - ip
377 +- * 1*4(%esp) - orig_eax
378 +- * 0*4(%esp) - gs / function
379 ++ * 6*4(%esp) - <previous context>
380 ++ * 5*4(%esp) - flags
381 ++ * 4*4(%esp) - cs
382 ++ * 3*4(%esp) - ip
383 ++ * 2*4(%esp) - orig_eax
384 ++ * 1*4(%esp) - gs / function
385 ++ * 0*4(%esp) - fs
386 + *
387 + * Lets build a 5 entry IRET frame after that, such that struct pt_regs
388 + * is complete and in particular regs->sp is correct. This gives us
389 +- * the original 5 enties as gap:
390 ++ * the original 6 enties as gap:
391 + *
392 +- * 12*4(%esp) - <previous context>
393 +- * 11*4(%esp) - gap / flags
394 +- * 10*4(%esp) - gap / cs
395 +- * 9*4(%esp) - gap / ip
396 +- * 8*4(%esp) - gap / orig_eax
397 +- * 7*4(%esp) - gap / gs / function
398 +- * 6*4(%esp) - ss
399 +- * 5*4(%esp) - sp
400 +- * 4*4(%esp) - flags
401 +- * 3*4(%esp) - cs
402 +- * 2*4(%esp) - ip
403 +- * 1*4(%esp) - orig_eax
404 +- * 0*4(%esp) - gs / function
405 ++ * 14*4(%esp) - <previous context>
406 ++ * 13*4(%esp) - gap / flags
407 ++ * 12*4(%esp) - gap / cs
408 ++ * 11*4(%esp) - gap / ip
409 ++ * 10*4(%esp) - gap / orig_eax
410 ++ * 9*4(%esp) - gap / gs / function
411 ++ * 8*4(%esp) - gap / fs
412 ++ * 7*4(%esp) - ss
413 ++ * 6*4(%esp) - sp
414 ++ * 5*4(%esp) - flags
415 ++ * 4*4(%esp) - cs
416 ++ * 3*4(%esp) - ip
417 ++ * 2*4(%esp) - orig_eax
418 ++ * 1*4(%esp) - gs / function
419 ++ * 0*4(%esp) - fs
420 + */
421 +
422 + pushl %ss # ss
423 + pushl %esp # sp (points at ss)
424 +- addl $6*4, (%esp) # point sp back at the previous context
425 +- pushl 6*4(%esp) # flags
426 +- pushl 6*4(%esp) # cs
427 +- pushl 6*4(%esp) # ip
428 +- pushl 6*4(%esp) # orig_eax
429 +- pushl 6*4(%esp) # gs / function
430 ++ addl $7*4, (%esp) # point sp back at the previous context
431 ++ pushl 7*4(%esp) # flags
432 ++ pushl 7*4(%esp) # cs
433 ++ pushl 7*4(%esp) # ip
434 ++ pushl 7*4(%esp) # orig_eax
435 ++ pushl 7*4(%esp) # gs / function
436 ++ pushl 7*4(%esp) # fs
437 + .Lfrom_usermode_no_fixup_\@:
438 + .endm
439 +
440 + .macro IRET_FRAME
441 ++ /*
442 ++ * We're called with %ds, %es, %fs, and %gs from the interrupted
443 ++ * frame, so we shouldn't use them. Also, we may be in ESPFIX
444 ++ * mode and therefore have a nonzero SS base and an offset ESP,
445 ++ * so any attempt to access the stack needs to use SS. (except for
446 ++ * accesses through %esp, which automatically use SS.)
447 ++ */
448 + testl $CS_FROM_KERNEL, 1*4(%esp)
449 + jz .Lfinished_frame_\@
450 +
451 +@@ -276,31 +288,40 @@
452 + movl 5*4(%esp), %eax # (modified) regs->sp
453 +
454 + movl 4*4(%esp), %ecx # flags
455 +- movl %ecx, -4(%eax)
456 ++ movl %ecx, %ss:-1*4(%eax)
457 +
458 + movl 3*4(%esp), %ecx # cs
459 + andl $0x0000ffff, %ecx
460 +- movl %ecx, -8(%eax)
461 ++ movl %ecx, %ss:-2*4(%eax)
462 +
463 + movl 2*4(%esp), %ecx # ip
464 +- movl %ecx, -12(%eax)
465 ++ movl %ecx, %ss:-3*4(%eax)
466 +
467 + movl 1*4(%esp), %ecx # eax
468 +- movl %ecx, -16(%eax)
469 ++ movl %ecx, %ss:-4*4(%eax)
470 +
471 + popl %ecx
472 +- lea -16(%eax), %esp
473 ++ lea -4*4(%eax), %esp
474 + popl %eax
475 + .Lfinished_frame_\@:
476 + .endm
477 +
478 +-.macro SAVE_ALL pt_regs_ax=%eax switch_stacks=0 skip_gs=0
479 ++.macro SAVE_ALL pt_regs_ax=%eax switch_stacks=0 skip_gs=0 unwind_espfix=0
480 + cld
481 + .if \skip_gs == 0
482 + PUSH_GS
483 + .endif
484 +- FIXUP_FRAME
485 + pushl %fs
486 ++
487 ++ pushl %eax
488 ++ movl $(__KERNEL_PERCPU), %eax
489 ++ movl %eax, %fs
490 ++.if \unwind_espfix > 0
491 ++ UNWIND_ESPFIX_STACK
492 ++.endif
493 ++ popl %eax
494 ++
495 ++ FIXUP_FRAME
496 + pushl %es
497 + pushl %ds
498 + pushl \pt_regs_ax
499 +@@ -313,8 +334,6 @@
500 + movl $(__USER_DS), %edx
501 + movl %edx, %ds
502 + movl %edx, %es
503 +- movl $(__KERNEL_PERCPU), %edx
504 +- movl %edx, %fs
505 + .if \skip_gs == 0
506 + SET_KERNEL_GS %edx
507 + .endif
508 +@@ -324,8 +343,8 @@
509 + .endif
510 + .endm
511 +
512 +-.macro SAVE_ALL_NMI cr3_reg:req
513 +- SAVE_ALL
514 ++.macro SAVE_ALL_NMI cr3_reg:req unwind_espfix=0
515 ++ SAVE_ALL unwind_espfix=\unwind_espfix
516 +
517 + BUG_IF_WRONG_CR3
518 +
519 +@@ -357,6 +376,7 @@
520 + 2: popl %es
521 + 3: popl %fs
522 + POP_GS \pop
523 ++ IRET_FRAME
524 + .pushsection .fixup, "ax"
525 + 4: movl $0, (%esp)
526 + jmp 1b
527 +@@ -395,7 +415,8 @@
528 +
529 + .macro CHECK_AND_APPLY_ESPFIX
530 + #ifdef CONFIG_X86_ESPFIX32
531 +-#define GDT_ESPFIX_SS PER_CPU_VAR(gdt_page) + (GDT_ENTRY_ESPFIX_SS * 8)
532 ++#define GDT_ESPFIX_OFFSET (GDT_ENTRY_ESPFIX_SS * 8)
533 ++#define GDT_ESPFIX_SS PER_CPU_VAR(gdt_page) + GDT_ESPFIX_OFFSET
534 +
535 + ALTERNATIVE "jmp .Lend_\@", "", X86_BUG_ESPFIX
536 +
537 +@@ -1075,7 +1096,6 @@ restore_all:
538 + /* Restore user state */
539 + RESTORE_REGS pop=4 # skip orig_eax/error_code
540 + .Lirq_return:
541 +- IRET_FRAME
542 + /*
543 + * ARCH_HAS_MEMBARRIER_SYNC_CORE rely on IRET core serialization
544 + * when returning from IPI handler and when returning from
545 +@@ -1128,30 +1148,43 @@ ENDPROC(entry_INT80_32)
546 + * We can't call C functions using the ESPFIX stack. This code reads
547 + * the high word of the segment base from the GDT and swiches to the
548 + * normal stack and adjusts ESP with the matching offset.
549 ++ *
550 ++ * We might be on user CR3 here, so percpu data is not mapped and we can't
551 ++ * access the GDT through the percpu segment. Instead, use SGDT to find
552 ++ * the cpu_entry_area alias of the GDT.
553 + */
554 + #ifdef CONFIG_X86_ESPFIX32
555 + /* fixup the stack */
556 +- mov GDT_ESPFIX_SS + 4, %al /* bits 16..23 */
557 +- mov GDT_ESPFIX_SS + 7, %ah /* bits 24..31 */
558 ++ pushl %ecx
559 ++ subl $2*4, %esp
560 ++ sgdt (%esp)
561 ++ movl 2(%esp), %ecx /* GDT address */
562 ++ /*
563 ++ * Careful: ECX is a linear pointer, so we need to force base
564 ++ * zero. %cs is the only known-linear segment we have right now.
565 ++ */
566 ++ mov %cs:GDT_ESPFIX_OFFSET + 4(%ecx), %al /* bits 16..23 */
567 ++ mov %cs:GDT_ESPFIX_OFFSET + 7(%ecx), %ah /* bits 24..31 */
568 + shl $16, %eax
569 ++ addl $2*4, %esp
570 ++ popl %ecx
571 + addl %esp, %eax /* the adjusted stack pointer */
572 + pushl $__KERNEL_DS
573 + pushl %eax
574 + lss (%esp), %esp /* switch to the normal stack segment */
575 + #endif
576 + .endm
577 ++
578 + .macro UNWIND_ESPFIX_STACK
579 ++ /* It's safe to clobber %eax, all other regs need to be preserved */
580 + #ifdef CONFIG_X86_ESPFIX32
581 + movl %ss, %eax
582 + /* see if on espfix stack */
583 + cmpw $__ESPFIX_SS, %ax
584 +- jne 27f
585 +- movl $__KERNEL_DS, %eax
586 +- movl %eax, %ds
587 +- movl %eax, %es
588 ++ jne .Lno_fixup_\@
589 + /* switch to normal stack */
590 + FIXUP_ESPFIX_STACK
591 +-27:
592 ++.Lno_fixup_\@:
593 + #endif
594 + .endm
595 +
596 +@@ -1341,11 +1374,6 @@ END(spurious_interrupt_bug)
597 +
598 + #ifdef CONFIG_XEN_PV
599 + ENTRY(xen_hypervisor_callback)
600 +- pushl $-1 /* orig_ax = -1 => not a system call */
601 +- SAVE_ALL
602 +- ENCODE_FRAME_POINTER
603 +- TRACE_IRQS_OFF
604 +-
605 + /*
606 + * Check to see if we got the event in the critical
607 + * region in xen_iret_direct, after we've reenabled
608 +@@ -1353,16 +1381,17 @@ ENTRY(xen_hypervisor_callback)
609 + * iret instruction's behaviour where it delivers a
610 + * pending interrupt when enabling interrupts:
611 + */
612 +- movl PT_EIP(%esp), %eax
613 +- cmpl $xen_iret_start_crit, %eax
614 ++ cmpl $xen_iret_start_crit, (%esp)
615 + jb 1f
616 +- cmpl $xen_iret_end_crit, %eax
617 ++ cmpl $xen_iret_end_crit, (%esp)
618 + jae 1f
619 +-
620 +- jmp xen_iret_crit_fixup
621 +-
622 +-ENTRY(xen_do_upcall)
623 +-1: mov %esp, %eax
624 ++ call xen_iret_crit_fixup
625 ++1:
626 ++ pushl $-1 /* orig_ax = -1 => not a system call */
627 ++ SAVE_ALL
628 ++ ENCODE_FRAME_POINTER
629 ++ TRACE_IRQS_OFF
630 ++ mov %esp, %eax
631 + call xen_evtchn_do_upcall
632 + #ifndef CONFIG_PREEMPTION
633 + call xen_maybe_preempt_hcall
634 +@@ -1449,10 +1478,9 @@ END(page_fault)
635 +
636 + common_exception_read_cr2:
637 + /* the function address is in %gs's slot on the stack */
638 +- SAVE_ALL switch_stacks=1 skip_gs=1
639 ++ SAVE_ALL switch_stacks=1 skip_gs=1 unwind_espfix=1
640 +
641 + ENCODE_FRAME_POINTER
642 +- UNWIND_ESPFIX_STACK
643 +
644 + /* fixup %gs */
645 + GS_TO_REG %ecx
646 +@@ -1474,9 +1502,8 @@ END(common_exception_read_cr2)
647 +
648 + common_exception:
649 + /* the function address is in %gs's slot on the stack */
650 +- SAVE_ALL switch_stacks=1 skip_gs=1
651 ++ SAVE_ALL switch_stacks=1 skip_gs=1 unwind_espfix=1
652 + ENCODE_FRAME_POINTER
653 +- UNWIND_ESPFIX_STACK
654 +
655 + /* fixup %gs */
656 + GS_TO_REG %ecx
657 +@@ -1515,6 +1542,10 @@ ENTRY(nmi)
658 + ASM_CLAC
659 +
660 + #ifdef CONFIG_X86_ESPFIX32
661 ++ /*
662 ++ * ESPFIX_SS is only ever set on the return to user path
663 ++ * after we've switched to the entry stack.
664 ++ */
665 + pushl %eax
666 + movl %ss, %eax
667 + cmpw $__ESPFIX_SS, %ax
668 +@@ -1550,6 +1581,11 @@ ENTRY(nmi)
669 + movl %ebx, %esp
670 +
671 + .Lnmi_return:
672 ++#ifdef CONFIG_X86_ESPFIX32
673 ++ testl $CS_FROM_ESPFIX, PT_CS(%esp)
674 ++ jnz .Lnmi_from_espfix
675 ++#endif
676 ++
677 + CHECK_AND_APPLY_ESPFIX
678 + RESTORE_ALL_NMI cr3_reg=%edi pop=4
679 + jmp .Lirq_return
680 +@@ -1557,23 +1593,42 @@ ENTRY(nmi)
681 + #ifdef CONFIG_X86_ESPFIX32
682 + .Lnmi_espfix_stack:
683 + /*
684 +- * create the pointer to lss back
685 ++ * Create the pointer to LSS back
686 + */
687 + pushl %ss
688 + pushl %esp
689 + addl $4, (%esp)
690 +- /* copy the iret frame of 12 bytes */
691 +- .rept 3
692 +- pushl 16(%esp)
693 +- .endr
694 +- pushl %eax
695 +- SAVE_ALL_NMI cr3_reg=%edi
696 ++
697 ++ /* Copy the (short) IRET frame */
698 ++ pushl 4*4(%esp) # flags
699 ++ pushl 4*4(%esp) # cs
700 ++ pushl 4*4(%esp) # ip
701 ++
702 ++ pushl %eax # orig_ax
703 ++
704 ++ SAVE_ALL_NMI cr3_reg=%edi unwind_espfix=1
705 + ENCODE_FRAME_POINTER
706 +- FIXUP_ESPFIX_STACK # %eax == %esp
707 ++
708 ++ /* clear CS_FROM_KERNEL, set CS_FROM_ESPFIX */
709 ++ xorl $(CS_FROM_ESPFIX | CS_FROM_KERNEL), PT_CS(%esp)
710 ++
711 + xorl %edx, %edx # zero error code
712 +- call do_nmi
713 ++ movl %esp, %eax # pt_regs pointer
714 ++ jmp .Lnmi_from_sysenter_stack
715 ++
716 ++.Lnmi_from_espfix:
717 + RESTORE_ALL_NMI cr3_reg=%edi
718 +- lss 12+4(%esp), %esp # back to espfix stack
719 ++ /*
720 ++ * Because we cleared CS_FROM_KERNEL, IRET_FRAME 'forgot' to
721 ++ * fix up the gap and long frame:
722 ++ *
723 ++ * 3 - original frame (exception)
724 ++ * 2 - ESPFIX block (above)
725 ++ * 6 - gap (FIXUP_FRAME)
726 ++ * 5 - long frame (FIXUP_FRAME)
727 ++ * 1 - orig_ax
728 ++ */
729 ++ lss (1+5+6)*4(%esp), %esp # back to espfix stack
730 + jmp .Lirq_return
731 + #endif
732 + END(nmi)
733 +diff --git a/arch/x86/include/asm/cpu_entry_area.h b/arch/x86/include/asm/cpu_entry_area.h
734 +index 8348f7d69fd5..ea866c7bf31d 100644
735 +--- a/arch/x86/include/asm/cpu_entry_area.h
736 ++++ b/arch/x86/include/asm/cpu_entry_area.h
737 +@@ -78,8 +78,12 @@ struct cpu_entry_area {
738 +
739 + /*
740 + * The GDT is just below entry_stack and thus serves (on x86_64) as
741 +- * a a read-only guard page.
742 ++ * a read-only guard page. On 32-bit the GDT must be writeable, so
743 ++ * it needs an extra guard page.
744 + */
745 ++#ifdef CONFIG_X86_32
746 ++ char guard_entry_stack[PAGE_SIZE];
747 ++#endif
748 + struct entry_stack_page entry_stack_page;
749 +
750 + /*
751 +@@ -94,7 +98,6 @@ struct cpu_entry_area {
752 + */
753 + struct cea_exception_stacks estacks;
754 + #endif
755 +-#ifdef CONFIG_CPU_SUP_INTEL
756 + /*
757 + * Per CPU debug store for Intel performance monitoring. Wastes a
758 + * full page at the moment.
759 +@@ -105,11 +108,13 @@ struct cpu_entry_area {
760 + * Reserve enough fixmap PTEs.
761 + */
762 + struct debug_store_buffers cpu_debug_buffers;
763 +-#endif
764 + };
765 +
766 +-#define CPU_ENTRY_AREA_SIZE (sizeof(struct cpu_entry_area))
767 +-#define CPU_ENTRY_AREA_TOT_SIZE (CPU_ENTRY_AREA_SIZE * NR_CPUS)
768 ++#define CPU_ENTRY_AREA_SIZE (sizeof(struct cpu_entry_area))
769 ++#define CPU_ENTRY_AREA_ARRAY_SIZE (CPU_ENTRY_AREA_SIZE * NR_CPUS)
770 ++
771 ++/* Total size includes the readonly IDT mapping page as well: */
772 ++#define CPU_ENTRY_AREA_TOTAL_SIZE (CPU_ENTRY_AREA_ARRAY_SIZE + PAGE_SIZE)
773 +
774 + DECLARE_PER_CPU(struct cpu_entry_area *, cpu_entry_area);
775 + DECLARE_PER_CPU(struct cea_exception_stacks *, cea_exception_stacks);
776 +@@ -117,13 +122,14 @@ DECLARE_PER_CPU(struct cea_exception_stacks *, cea_exception_stacks);
777 + extern void setup_cpu_entry_areas(void);
778 + extern void cea_set_pte(void *cea_vaddr, phys_addr_t pa, pgprot_t flags);
779 +
780 ++/* Single page reserved for the readonly IDT mapping: */
781 + #define CPU_ENTRY_AREA_RO_IDT CPU_ENTRY_AREA_BASE
782 + #define CPU_ENTRY_AREA_PER_CPU (CPU_ENTRY_AREA_RO_IDT + PAGE_SIZE)
783 +
784 + #define CPU_ENTRY_AREA_RO_IDT_VADDR ((void *)CPU_ENTRY_AREA_RO_IDT)
785 +
786 + #define CPU_ENTRY_AREA_MAP_SIZE \
787 +- (CPU_ENTRY_AREA_PER_CPU + CPU_ENTRY_AREA_TOT_SIZE - CPU_ENTRY_AREA_BASE)
788 ++ (CPU_ENTRY_AREA_PER_CPU + CPU_ENTRY_AREA_ARRAY_SIZE - CPU_ENTRY_AREA_BASE)
789 +
790 + extern struct cpu_entry_area *get_cpu_entry_area(int cpu);
791 +
792 +diff --git a/arch/x86/include/asm/pgtable_32_types.h b/arch/x86/include/asm/pgtable_32_types.h
793 +index b0bc0fff5f1f..1636eb8e5a5b 100644
794 +--- a/arch/x86/include/asm/pgtable_32_types.h
795 ++++ b/arch/x86/include/asm/pgtable_32_types.h
796 +@@ -44,11 +44,11 @@ extern bool __vmalloc_start_set; /* set once high_memory is set */
797 + * Define this here and validate with BUILD_BUG_ON() in pgtable_32.c
798 + * to avoid include recursion hell
799 + */
800 +-#define CPU_ENTRY_AREA_PAGES (NR_CPUS * 40)
801 ++#define CPU_ENTRY_AREA_PAGES (NR_CPUS * 39)
802 +
803 +-#define CPU_ENTRY_AREA_BASE \
804 +- ((FIXADDR_TOT_START - PAGE_SIZE * (CPU_ENTRY_AREA_PAGES + 1)) \
805 +- & PMD_MASK)
806 ++/* The +1 is for the readonly IDT page: */
807 ++#define CPU_ENTRY_AREA_BASE \
808 ++ ((FIXADDR_TOT_START - PAGE_SIZE*(CPU_ENTRY_AREA_PAGES+1)) & PMD_MASK)
809 +
810 + #define LDT_BASE_ADDR \
811 + ((CPU_ENTRY_AREA_BASE - PAGE_SIZE) & PMD_MASK)
812 +diff --git a/arch/x86/include/asm/segment.h b/arch/x86/include/asm/segment.h
813 +index ac3892920419..6669164abadc 100644
814 +--- a/arch/x86/include/asm/segment.h
815 ++++ b/arch/x86/include/asm/segment.h
816 +@@ -31,6 +31,18 @@
817 + */
818 + #define SEGMENT_RPL_MASK 0x3
819 +
820 ++/*
821 ++ * When running on Xen PV, the actual privilege level of the kernel is 1,
822 ++ * not 0. Testing the Requested Privilege Level in a segment selector to
823 ++ * determine whether the context is user mode or kernel mode with
824 ++ * SEGMENT_RPL_MASK is wrong because the PV kernel's privilege level
825 ++ * matches the 0x3 mask.
826 ++ *
827 ++ * Testing with USER_SEGMENT_RPL_MASK is valid for both native and Xen PV
828 ++ * kernels because privilege level 2 is never used.
829 ++ */
830 ++#define USER_SEGMENT_RPL_MASK 0x2
831 ++
832 + /* User mode is privilege level 3: */
833 + #define USER_RPL 0x3
834 +
835 +diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
836 +index 4c7b0fa15a19..8bf64899f56a 100644
837 +--- a/arch/x86/kernel/cpu/bugs.c
838 ++++ b/arch/x86/kernel/cpu/bugs.c
839 +@@ -39,6 +39,7 @@ static void __init spectre_v2_select_mitigation(void);
840 + static void __init ssb_select_mitigation(void);
841 + static void __init l1tf_select_mitigation(void);
842 + static void __init mds_select_mitigation(void);
843 ++static void __init mds_print_mitigation(void);
844 + static void __init taa_select_mitigation(void);
845 +
846 + /* The base value of the SPEC_CTRL MSR that always has to be preserved. */
847 +@@ -108,6 +109,12 @@ void __init check_bugs(void)
848 + mds_select_mitigation();
849 + taa_select_mitigation();
850 +
851 ++ /*
852 ++ * As MDS and TAA mitigations are inter-related, print MDS
853 ++ * mitigation until after TAA mitigation selection is done.
854 ++ */
855 ++ mds_print_mitigation();
856 ++
857 + arch_smt_update();
858 +
859 + #ifdef CONFIG_X86_32
860 +@@ -245,6 +252,12 @@ static void __init mds_select_mitigation(void)
861 + (mds_nosmt || cpu_mitigations_auto_nosmt()))
862 + cpu_smt_disable(false);
863 + }
864 ++}
865 ++
866 ++static void __init mds_print_mitigation(void)
867 ++{
868 ++ if (!boot_cpu_has_bug(X86_BUG_MDS) || cpu_mitigations_off())
869 ++ return;
870 +
871 + pr_info("%s\n", mds_strings[mds_mitigation]);
872 + }
873 +@@ -304,8 +317,12 @@ static void __init taa_select_mitigation(void)
874 + return;
875 + }
876 +
877 +- /* TAA mitigation is turned off on the cmdline (tsx_async_abort=off) */
878 +- if (taa_mitigation == TAA_MITIGATION_OFF)
879 ++ /*
880 ++ * TAA mitigation via VERW is turned off if both
881 ++ * tsx_async_abort=off and mds=off are specified.
882 ++ */
883 ++ if (taa_mitigation == TAA_MITIGATION_OFF &&
884 ++ mds_mitigation == MDS_MITIGATION_OFF)
885 + goto out;
886 +
887 + if (boot_cpu_has(X86_FEATURE_MD_CLEAR))
888 +@@ -339,6 +356,15 @@ static void __init taa_select_mitigation(void)
889 + if (taa_nosmt || cpu_mitigations_auto_nosmt())
890 + cpu_smt_disable(false);
891 +
892 ++ /*
893 ++ * Update MDS mitigation, if necessary, as the mds_user_clear is
894 ++ * now enabled for TAA mitigation.
895 ++ */
896 ++ if (mds_mitigation == MDS_MITIGATION_OFF &&
897 ++ boot_cpu_has_bug(X86_BUG_MDS)) {
898 ++ mds_mitigation = MDS_MITIGATION_FULL;
899 ++ mds_select_mitigation();
900 ++ }
901 + out:
902 + pr_info("%s\n", taa_strings[taa_mitigation]);
903 + }
904 +diff --git a/arch/x86/kernel/doublefault.c b/arch/x86/kernel/doublefault.c
905 +index 0b8cedb20d6d..d5c9b13bafdf 100644
906 +--- a/arch/x86/kernel/doublefault.c
907 ++++ b/arch/x86/kernel/doublefault.c
908 +@@ -65,6 +65,9 @@ struct x86_hw_tss doublefault_tss __cacheline_aligned = {
909 + .ss = __KERNEL_DS,
910 + .ds = __USER_DS,
911 + .fs = __KERNEL_PERCPU,
912 ++#ifndef CONFIG_X86_32_LAZY_GS
913 ++ .gs = __KERNEL_STACK_CANARY,
914 ++#endif
915 +
916 + .__cr3 = __pa_nodebug(swapper_pg_dir),
917 + };
918 +diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
919 +index 30f9cb2c0b55..2e6a0676c1f4 100644
920 +--- a/arch/x86/kernel/head_32.S
921 ++++ b/arch/x86/kernel/head_32.S
922 +@@ -571,6 +571,16 @@ ENTRY(initial_page_table)
923 + # error "Kernel PMDs should be 1, 2 or 3"
924 + # endif
925 + .align PAGE_SIZE /* needs to be page-sized too */
926 ++
927 ++#ifdef CONFIG_PAGE_TABLE_ISOLATION
928 ++ /*
929 ++ * PTI needs another page so sync_initial_pagetable() works correctly
930 ++ * and does not scribble over the data which is placed behind the
931 ++ * actual initial_page_table. See clone_pgd_range().
932 ++ */
933 ++ .fill 1024, 4, 0
934 ++#endif
935 ++
936 + #endif
937 +
938 + .data
939 +diff --git a/arch/x86/mm/cpu_entry_area.c b/arch/x86/mm/cpu_entry_area.c
940 +index 752ad11d6868..d9643647a9ce 100644
941 +--- a/arch/x86/mm/cpu_entry_area.c
942 ++++ b/arch/x86/mm/cpu_entry_area.c
943 +@@ -178,7 +178,9 @@ static __init void setup_cpu_entry_area_ptes(void)
944 + #ifdef CONFIG_X86_32
945 + unsigned long start, end;
946 +
947 +- BUILD_BUG_ON(CPU_ENTRY_AREA_PAGES * PAGE_SIZE < CPU_ENTRY_AREA_MAP_SIZE);
948 ++ /* The +1 is for the readonly IDT: */
949 ++ BUILD_BUG_ON((CPU_ENTRY_AREA_PAGES+1)*PAGE_SIZE != CPU_ENTRY_AREA_MAP_SIZE);
950 ++ BUILD_BUG_ON(CPU_ENTRY_AREA_TOTAL_SIZE != CPU_ENTRY_AREA_MAP_SIZE);
951 + BUG_ON(CPU_ENTRY_AREA_BASE & ~PMD_MASK);
952 +
953 + start = CPU_ENTRY_AREA_BASE;
954 +diff --git a/arch/x86/tools/gen-insn-attr-x86.awk b/arch/x86/tools/gen-insn-attr-x86.awk
955 +index b02a36b2c14f..a42015b305f4 100644
956 +--- a/arch/x86/tools/gen-insn-attr-x86.awk
957 ++++ b/arch/x86/tools/gen-insn-attr-x86.awk
958 +@@ -69,7 +69,7 @@ BEGIN {
959 +
960 + lprefix1_expr = "\\((66|!F3)\\)"
961 + lprefix2_expr = "\\(F3\\)"
962 +- lprefix3_expr = "\\((F2|!F3|66\\&F2)\\)"
963 ++ lprefix3_expr = "\\((F2|!F3|66&F2)\\)"
964 + lprefix_expr = "\\((66|F2|F3)\\)"
965 + max_lprefix = 4
966 +
967 +@@ -257,7 +257,7 @@ function convert_operands(count,opnd, i,j,imm,mod)
968 + return add_flags(imm, mod)
969 + }
970 +
971 +-/^[0-9a-f]+\:/ {
972 ++/^[0-9a-f]+:/ {
973 + if (NR == 1)
974 + next
975 + # get index
976 +diff --git a/arch/x86/xen/xen-asm_32.S b/arch/x86/xen/xen-asm_32.S
977 +index c15db060a242..cd177772fe4d 100644
978 +--- a/arch/x86/xen/xen-asm_32.S
979 ++++ b/arch/x86/xen/xen-asm_32.S
980 +@@ -126,10 +126,9 @@ hyper_iret:
981 + .globl xen_iret_start_crit, xen_iret_end_crit
982 +
983 + /*
984 +- * This is called by xen_hypervisor_callback in entry.S when it sees
985 ++ * This is called by xen_hypervisor_callback in entry_32.S when it sees
986 + * that the EIP at the time of interrupt was between
987 +- * xen_iret_start_crit and xen_iret_end_crit. We're passed the EIP in
988 +- * %eax so we can do a more refined determination of what to do.
989 ++ * xen_iret_start_crit and xen_iret_end_crit.
990 + *
991 + * The stack format at this point is:
992 + * ----------------
993 +@@ -138,70 +137,46 @@ hyper_iret:
994 + * eflags } outer exception info
995 + * cs }
996 + * eip }
997 +- * ---------------- <- edi (copy dest)
998 +- * eax : outer eax if it hasn't been restored
999 + * ----------------
1000 +- * eflags } nested exception info
1001 +- * cs } (no ss/esp because we're nested
1002 +- * eip } from the same ring)
1003 +- * orig_eax }<- esi (copy src)
1004 +- * - - - - - - - -
1005 +- * fs }
1006 +- * es }
1007 +- * ds } SAVE_ALL state
1008 +- * eax }
1009 +- * : :
1010 +- * ebx }<- esp
1011 ++ * eax : outer eax if it hasn't been restored
1012 + * ----------------
1013 ++ * eflags }
1014 ++ * cs } nested exception info
1015 ++ * eip }
1016 ++ * return address : (into xen_hypervisor_callback)
1017 + *
1018 +- * In order to deliver the nested exception properly, we need to shift
1019 +- * everything from the return addr up to the error code so it sits
1020 +- * just under the outer exception info. This means that when we
1021 +- * handle the exception, we do it in the context of the outer
1022 +- * exception rather than starting a new one.
1023 ++ * In order to deliver the nested exception properly, we need to discard the
1024 ++ * nested exception frame such that when we handle the exception, we do it
1025 ++ * in the context of the outer exception rather than starting a new one.
1026 + *
1027 +- * The only caveat is that if the outer eax hasn't been restored yet
1028 +- * (ie, it's still on stack), we need to insert its value into the
1029 +- * SAVE_ALL state before going on, since it's usermode state which we
1030 +- * eventually need to restore.
1031 ++ * The only caveat is that if the outer eax hasn't been restored yet (i.e.
1032 ++ * it's still on stack), we need to restore its value here.
1033 + */
1034 + ENTRY(xen_iret_crit_fixup)
1035 + /*
1036 + * Paranoia: Make sure we're really coming from kernel space.
1037 + * One could imagine a case where userspace jumps into the
1038 + * critical range address, but just before the CPU delivers a
1039 +- * GP, it decides to deliver an interrupt instead. Unlikely?
1040 +- * Definitely. Easy to avoid? Yes. The Intel documents
1041 +- * explicitly say that the reported EIP for a bad jump is the
1042 +- * jump instruction itself, not the destination, but some
1043 +- * virtual environments get this wrong.
1044 ++ * PF, it decides to deliver an interrupt instead. Unlikely?
1045 ++ * Definitely. Easy to avoid? Yes.
1046 + */
1047 +- movl PT_CS(%esp), %ecx
1048 +- andl $SEGMENT_RPL_MASK, %ecx
1049 +- cmpl $USER_RPL, %ecx
1050 +- je 2f
1051 +-
1052 +- lea PT_ORIG_EAX(%esp), %esi
1053 +- lea PT_EFLAGS(%esp), %edi
1054 ++ testb $2, 2*4(%esp) /* nested CS */
1055 ++ jnz 2f
1056 +
1057 + /*
1058 + * If eip is before iret_restore_end then stack
1059 + * hasn't been restored yet.
1060 + */
1061 +- cmp $iret_restore_end, %eax
1062 ++ cmpl $iret_restore_end, 1*4(%esp)
1063 + jae 1f
1064 +
1065 +- movl 0+4(%edi), %eax /* copy EAX (just above top of frame) */
1066 +- movl %eax, PT_EAX(%esp)
1067 ++ movl 4*4(%esp), %eax /* load outer EAX */
1068 ++ ret $4*4 /* discard nested EIP, CS, and EFLAGS as
1069 ++ * well as the just restored EAX */
1070 +
1071 +- lea ESP_OFFSET(%edi), %edi /* move dest up over saved regs */
1072 +-
1073 +- /* set up the copy */
1074 +-1: std
1075 +- mov $PT_EIP / 4, %ecx /* saved regs up to orig_eax */
1076 +- rep movsl
1077 +- cld
1078 +-
1079 +- lea 4(%edi), %esp /* point esp to new frame */
1080 +-2: jmp xen_do_upcall
1081 ++1:
1082 ++ ret $3*4 /* discard nested EIP, CS, and EFLAGS */
1083 +
1084 ++2:
1085 ++ ret
1086 ++END(xen_iret_crit_fixup)
1087 +diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
1088 +index 19e75999bb15..57532465fb83 100644
1089 +--- a/drivers/block/nbd.c
1090 ++++ b/drivers/block/nbd.c
1091 +@@ -1032,14 +1032,15 @@ static int nbd_add_socket(struct nbd_device *nbd, unsigned long arg,
1092 + sockfd_put(sock);
1093 + return -ENOMEM;
1094 + }
1095 ++
1096 ++ config->socks = socks;
1097 ++
1098 + nsock = kzalloc(sizeof(struct nbd_sock), GFP_KERNEL);
1099 + if (!nsock) {
1100 + sockfd_put(sock);
1101 + return -ENOMEM;
1102 + }
1103 +
1104 +- config->socks = socks;
1105 +-
1106 + nsock->fallback_index = -1;
1107 + nsock->dead = false;
1108 + mutex_init(&nsock->tx_lock);
1109 +diff --git a/drivers/bluetooth/hci_bcsp.c b/drivers/bluetooth/hci_bcsp.c
1110 +index fe2e307009f4..cf4a56095817 100644
1111 +--- a/drivers/bluetooth/hci_bcsp.c
1112 ++++ b/drivers/bluetooth/hci_bcsp.c
1113 +@@ -591,6 +591,7 @@ static int bcsp_recv(struct hci_uart *hu, const void *data, int count)
1114 + if (*ptr == 0xc0) {
1115 + BT_ERR("Short BCSP packet");
1116 + kfree_skb(bcsp->rx_skb);
1117 ++ bcsp->rx_skb = NULL;
1118 + bcsp->rx_state = BCSP_W4_PKT_START;
1119 + bcsp->rx_count = 0;
1120 + } else
1121 +@@ -606,6 +607,7 @@ static int bcsp_recv(struct hci_uart *hu, const void *data, int count)
1122 + bcsp->rx_skb->data[2])) != bcsp->rx_skb->data[3]) {
1123 + BT_ERR("Error in BCSP hdr checksum");
1124 + kfree_skb(bcsp->rx_skb);
1125 ++ bcsp->rx_skb = NULL;
1126 + bcsp->rx_state = BCSP_W4_PKT_DELIMITER;
1127 + bcsp->rx_count = 0;
1128 + continue;
1129 +@@ -630,6 +632,7 @@ static int bcsp_recv(struct hci_uart *hu, const void *data, int count)
1130 + bscp_get_crc(bcsp));
1131 +
1132 + kfree_skb(bcsp->rx_skb);
1133 ++ bcsp->rx_skb = NULL;
1134 + bcsp->rx_state = BCSP_W4_PKT_DELIMITER;
1135 + bcsp->rx_count = 0;
1136 + continue;
1137 +diff --git a/drivers/bluetooth/hci_ll.c b/drivers/bluetooth/hci_ll.c
1138 +index 285706618f8a..d9a4c6c691e0 100644
1139 +--- a/drivers/bluetooth/hci_ll.c
1140 ++++ b/drivers/bluetooth/hci_ll.c
1141 +@@ -621,13 +621,6 @@ static int ll_setup(struct hci_uart *hu)
1142 +
1143 + serdev_device_set_flow_control(serdev, true);
1144 +
1145 +- if (hu->oper_speed)
1146 +- speed = hu->oper_speed;
1147 +- else if (hu->proto->oper_speed)
1148 +- speed = hu->proto->oper_speed;
1149 +- else
1150 +- speed = 0;
1151 +-
1152 + do {
1153 + /* Reset the Bluetooth device */
1154 + gpiod_set_value_cansleep(lldev->enable_gpio, 0);
1155 +@@ -639,20 +632,6 @@ static int ll_setup(struct hci_uart *hu)
1156 + return err;
1157 + }
1158 +
1159 +- if (speed) {
1160 +- __le32 speed_le = cpu_to_le32(speed);
1161 +- struct sk_buff *skb;
1162 +-
1163 +- skb = __hci_cmd_sync(hu->hdev,
1164 +- HCI_VS_UPDATE_UART_HCI_BAUDRATE,
1165 +- sizeof(speed_le), &speed_le,
1166 +- HCI_INIT_TIMEOUT);
1167 +- if (!IS_ERR(skb)) {
1168 +- kfree_skb(skb);
1169 +- serdev_device_set_baudrate(serdev, speed);
1170 +- }
1171 +- }
1172 +-
1173 + err = download_firmware(lldev);
1174 + if (!err)
1175 + break;
1176 +@@ -677,7 +656,25 @@ static int ll_setup(struct hci_uart *hu)
1177 + }
1178 +
1179 + /* Operational speed if any */
1180 ++ if (hu->oper_speed)
1181 ++ speed = hu->oper_speed;
1182 ++ else if (hu->proto->oper_speed)
1183 ++ speed = hu->proto->oper_speed;
1184 ++ else
1185 ++ speed = 0;
1186 ++
1187 ++ if (speed) {
1188 ++ __le32 speed_le = cpu_to_le32(speed);
1189 ++ struct sk_buff *skb;
1190 +
1191 ++ skb = __hci_cmd_sync(hu->hdev, HCI_VS_UPDATE_UART_HCI_BAUDRATE,
1192 ++ sizeof(speed_le), &speed_le,
1193 ++ HCI_INIT_TIMEOUT);
1194 ++ if (!IS_ERR(skb)) {
1195 ++ kfree_skb(skb);
1196 ++ serdev_device_set_baudrate(serdev, speed);
1197 ++ }
1198 ++ }
1199 +
1200 + return 0;
1201 + }
1202 +diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
1203 +index 48a224a6b178..bc19d6c16aaa 100644
1204 +--- a/drivers/cpufreq/cpufreq.c
1205 ++++ b/drivers/cpufreq/cpufreq.c
1206 +@@ -933,6 +933,9 @@ static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
1207 + struct freq_attr *fattr = to_attr(attr);
1208 + ssize_t ret;
1209 +
1210 ++ if (!fattr->show)
1211 ++ return -EIO;
1212 ++
1213 + down_read(&policy->rwsem);
1214 + ret = fattr->show(policy, buf);
1215 + up_read(&policy->rwsem);
1216 +@@ -947,6 +950,9 @@ static ssize_t store(struct kobject *kobj, struct attribute *attr,
1217 + struct freq_attr *fattr = to_attr(attr);
1218 + ssize_t ret = -EINVAL;
1219 +
1220 ++ if (!fattr->store)
1221 ++ return -EIO;
1222 ++
1223 + /*
1224 + * cpus_read_trylock() is used here to work around a circular lock
1225 + * dependency problem with respect to the cpufreq_register_driver().
1226 +diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
1227 +index f87f6495652f..eb9782fc93fe 100644
1228 +--- a/drivers/md/dm-crypt.c
1229 ++++ b/drivers/md/dm-crypt.c
1230 +@@ -2700,21 +2700,18 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
1231 + }
1232 +
1233 + ret = -ENOMEM;
1234 +- cc->io_queue = alloc_workqueue("kcryptd_io/%s",
1235 +- WQ_HIGHPRI | WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM,
1236 +- 1, devname);
1237 ++ cc->io_queue = alloc_workqueue("kcryptd_io/%s", WQ_MEM_RECLAIM, 1, devname);
1238 + if (!cc->io_queue) {
1239 + ti->error = "Couldn't create kcryptd io queue";
1240 + goto bad;
1241 + }
1242 +
1243 + if (test_bit(DM_CRYPT_SAME_CPU, &cc->flags))
1244 +- cc->crypt_queue = alloc_workqueue("kcryptd/%s",
1245 +- WQ_HIGHPRI | WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM,
1246 ++ cc->crypt_queue = alloc_workqueue("kcryptd/%s", WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM,
1247 + 1, devname);
1248 + else
1249 + cc->crypt_queue = alloc_workqueue("kcryptd/%s",
1250 +- WQ_HIGHPRI | WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM | WQ_UNBOUND,
1251 ++ WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM | WQ_UNBOUND,
1252 + num_online_cpus(), devname);
1253 + if (!cc->crypt_queue) {
1254 + ti->error = "Couldn't create kcryptd queue";
1255 +diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
1256 +index 299c7b1c9718..8a62c920bb65 100644
1257 +--- a/drivers/md/raid10.c
1258 ++++ b/drivers/md/raid10.c
1259 +@@ -191,7 +191,7 @@ static void * r10buf_pool_alloc(gfp_t gfp_flags, void *data)
1260 +
1261 + out_free_pages:
1262 + while (--j >= 0)
1263 +- resync_free_pages(&rps[j * 2]);
1264 ++ resync_free_pages(&rps[j]);
1265 +
1266 + j = 0;
1267 + out_free_bio:
1268 +diff --git a/drivers/media/platform/vivid/vivid-kthread-cap.c b/drivers/media/platform/vivid/vivid-kthread-cap.c
1269 +index 003319d7816d..31f78d6a05a4 100644
1270 +--- a/drivers/media/platform/vivid/vivid-kthread-cap.c
1271 ++++ b/drivers/media/platform/vivid/vivid-kthread-cap.c
1272 +@@ -796,7 +796,11 @@ static int vivid_thread_vid_cap(void *data)
1273 + if (kthread_should_stop())
1274 + break;
1275 +
1276 +- mutex_lock(&dev->mutex);
1277 ++ if (!mutex_trylock(&dev->mutex)) {
1278 ++ schedule_timeout_uninterruptible(1);
1279 ++ continue;
1280 ++ }
1281 ++
1282 + cur_jiffies = jiffies;
1283 + if (dev->cap_seq_resync) {
1284 + dev->jiffies_vid_cap = cur_jiffies;
1285 +@@ -956,8 +960,6 @@ void vivid_stop_generating_vid_cap(struct vivid_dev *dev, bool *pstreaming)
1286 +
1287 + /* shutdown control thread */
1288 + vivid_grab_controls(dev, false);
1289 +- mutex_unlock(&dev->mutex);
1290 + kthread_stop(dev->kthread_vid_cap);
1291 + dev->kthread_vid_cap = NULL;
1292 +- mutex_lock(&dev->mutex);
1293 + }
1294 +diff --git a/drivers/media/platform/vivid/vivid-kthread-out.c b/drivers/media/platform/vivid/vivid-kthread-out.c
1295 +index ce5bcda2348c..1e165a6a2207 100644
1296 +--- a/drivers/media/platform/vivid/vivid-kthread-out.c
1297 ++++ b/drivers/media/platform/vivid/vivid-kthread-out.c
1298 +@@ -143,7 +143,11 @@ static int vivid_thread_vid_out(void *data)
1299 + if (kthread_should_stop())
1300 + break;
1301 +
1302 +- mutex_lock(&dev->mutex);
1303 ++ if (!mutex_trylock(&dev->mutex)) {
1304 ++ schedule_timeout_uninterruptible(1);
1305 ++ continue;
1306 ++ }
1307 ++
1308 + cur_jiffies = jiffies;
1309 + if (dev->out_seq_resync) {
1310 + dev->jiffies_vid_out = cur_jiffies;
1311 +@@ -301,8 +305,6 @@ void vivid_stop_generating_vid_out(struct vivid_dev *dev, bool *pstreaming)
1312 +
1313 + /* shutdown control thread */
1314 + vivid_grab_controls(dev, false);
1315 +- mutex_unlock(&dev->mutex);
1316 + kthread_stop(dev->kthread_vid_out);
1317 + dev->kthread_vid_out = NULL;
1318 +- mutex_lock(&dev->mutex);
1319 + }
1320 +diff --git a/drivers/media/platform/vivid/vivid-sdr-cap.c b/drivers/media/platform/vivid/vivid-sdr-cap.c
1321 +index 9acc709b0740..2b7522e16efc 100644
1322 +--- a/drivers/media/platform/vivid/vivid-sdr-cap.c
1323 ++++ b/drivers/media/platform/vivid/vivid-sdr-cap.c
1324 +@@ -141,7 +141,11 @@ static int vivid_thread_sdr_cap(void *data)
1325 + if (kthread_should_stop())
1326 + break;
1327 +
1328 +- mutex_lock(&dev->mutex);
1329 ++ if (!mutex_trylock(&dev->mutex)) {
1330 ++ schedule_timeout_uninterruptible(1);
1331 ++ continue;
1332 ++ }
1333 ++
1334 + cur_jiffies = jiffies;
1335 + if (dev->sdr_cap_seq_resync) {
1336 + dev->jiffies_sdr_cap = cur_jiffies;
1337 +@@ -303,10 +307,8 @@ static void sdr_cap_stop_streaming(struct vb2_queue *vq)
1338 + }
1339 +
1340 + /* shutdown control thread */
1341 +- mutex_unlock(&dev->mutex);
1342 + kthread_stop(dev->kthread_sdr_cap);
1343 + dev->kthread_sdr_cap = NULL;
1344 +- mutex_lock(&dev->mutex);
1345 + }
1346 +
1347 + static void sdr_cap_buf_request_complete(struct vb2_buffer *vb)
1348 +diff --git a/drivers/media/platform/vivid/vivid-vid-cap.c b/drivers/media/platform/vivid/vivid-vid-cap.c
1349 +index 8cbaa0c998ed..2d030732feac 100644
1350 +--- a/drivers/media/platform/vivid/vivid-vid-cap.c
1351 ++++ b/drivers/media/platform/vivid/vivid-vid-cap.c
1352 +@@ -223,9 +223,6 @@ static int vid_cap_start_streaming(struct vb2_queue *vq, unsigned count)
1353 + if (vb2_is_streaming(&dev->vb_vid_out_q))
1354 + dev->can_loop_video = vivid_vid_can_loop(dev);
1355 +
1356 +- if (dev->kthread_vid_cap)
1357 +- return 0;
1358 +-
1359 + dev->vid_cap_seq_count = 0;
1360 + dprintk(dev, 1, "%s\n", __func__);
1361 + for (i = 0; i < VIDEO_MAX_FRAME; i++)
1362 +diff --git a/drivers/media/platform/vivid/vivid-vid-out.c b/drivers/media/platform/vivid/vivid-vid-out.c
1363 +index 148b663a6075..a0364ac497f9 100644
1364 +--- a/drivers/media/platform/vivid/vivid-vid-out.c
1365 ++++ b/drivers/media/platform/vivid/vivid-vid-out.c
1366 +@@ -161,9 +161,6 @@ static int vid_out_start_streaming(struct vb2_queue *vq, unsigned count)
1367 + if (vb2_is_streaming(&dev->vb_vid_cap_q))
1368 + dev->can_loop_video = vivid_vid_can_loop(dev);
1369 +
1370 +- if (dev->kthread_vid_out)
1371 +- return 0;
1372 +-
1373 + dev->vid_out_seq_count = 0;
1374 + dprintk(dev, 1, "%s\n", __func__);
1375 + if (dev->start_streaming_error) {
1376 +diff --git a/drivers/media/rc/imon.c b/drivers/media/rc/imon.c
1377 +index 37a850421fbb..c683a244b9fa 100644
1378 +--- a/drivers/media/rc/imon.c
1379 ++++ b/drivers/media/rc/imon.c
1380 +@@ -1598,8 +1598,7 @@ static void imon_incoming_packet(struct imon_context *ictx,
1381 + spin_unlock_irqrestore(&ictx->kc_lock, flags);
1382 +
1383 + /* send touchscreen events through input subsystem if touchpad data */
1384 +- if (ictx->display_type == IMON_DISPLAY_TYPE_VGA && len == 8 &&
1385 +- buf[7] == 0x86) {
1386 ++ if (ictx->touch && len == 8 && buf[7] == 0x86) {
1387 + imon_touch_event(ictx, buf);
1388 + return;
1389 +
1390 +diff --git a/drivers/media/rc/mceusb.c b/drivers/media/rc/mceusb.c
1391 +index 3fc9829a9233..f9616158bcf4 100644
1392 +--- a/drivers/media/rc/mceusb.c
1393 ++++ b/drivers/media/rc/mceusb.c
1394 +@@ -564,7 +564,7 @@ static int mceusb_cmd_datasize(u8 cmd, u8 subcmd)
1395 + datasize = 4;
1396 + break;
1397 + case MCE_CMD_G_REVISION:
1398 +- datasize = 2;
1399 ++ datasize = 4;
1400 + break;
1401 + case MCE_RSP_EQWAKESUPPORT:
1402 + case MCE_RSP_GETWAKESOURCE:
1403 +@@ -600,14 +600,9 @@ static void mceusb_dev_printdata(struct mceusb_dev *ir, u8 *buf, int buf_len,
1404 + char *inout;
1405 + u8 cmd, subcmd, *data;
1406 + struct device *dev = ir->dev;
1407 +- int start, skip = 0;
1408 + u32 carrier, period;
1409 +
1410 +- /* skip meaningless 0xb1 0x60 header bytes on orig receiver */
1411 +- if (ir->flags.microsoft_gen1 && !out && !offset)
1412 +- skip = 2;
1413 +-
1414 +- if (len <= skip)
1415 ++ if (offset < 0 || offset >= buf_len)
1416 + return;
1417 +
1418 + dev_dbg(dev, "%cx data[%d]: %*ph (len=%d sz=%d)",
1419 +@@ -616,11 +611,32 @@ static void mceusb_dev_printdata(struct mceusb_dev *ir, u8 *buf, int buf_len,
1420 +
1421 + inout = out ? "Request" : "Got";
1422 +
1423 +- start = offset + skip;
1424 +- cmd = buf[start] & 0xff;
1425 +- subcmd = buf[start + 1] & 0xff;
1426 +- data = buf + start + 2;
1427 ++ cmd = buf[offset];
1428 ++ subcmd = (offset + 1 < buf_len) ? buf[offset + 1] : 0;
1429 ++ data = &buf[offset] + 2;
1430 ++
1431 ++ /* Trace meaningless 0xb1 0x60 header bytes on original receiver */
1432 ++ if (ir->flags.microsoft_gen1 && !out && !offset) {
1433 ++ dev_dbg(dev, "MCE gen 1 header");
1434 ++ return;
1435 ++ }
1436 ++
1437 ++ /* Trace IR data header or trailer */
1438 ++ if (cmd != MCE_CMD_PORT_IR &&
1439 ++ (cmd & MCE_PORT_MASK) == MCE_COMMAND_IRDATA) {
1440 ++ if (cmd == MCE_IRDATA_TRAILER)
1441 ++ dev_dbg(dev, "End of raw IR data");
1442 ++ else
1443 ++ dev_dbg(dev, "Raw IR data, %d pulse/space samples",
1444 ++ cmd & MCE_PACKET_LENGTH_MASK);
1445 ++ return;
1446 ++ }
1447 ++
1448 ++ /* Unexpected end of buffer? */
1449 ++ if (offset + len > buf_len)
1450 ++ return;
1451 +
1452 ++ /* Decode MCE command/response */
1453 + switch (cmd) {
1454 + case MCE_CMD_NULL:
1455 + if (subcmd == MCE_CMD_NULL)
1456 +@@ -644,7 +660,7 @@ static void mceusb_dev_printdata(struct mceusb_dev *ir, u8 *buf, int buf_len,
1457 + dev_dbg(dev, "Get hw/sw rev?");
1458 + else
1459 + dev_dbg(dev, "hw/sw rev %*ph",
1460 +- 4, &buf[start + 2]);
1461 ++ 4, &buf[offset + 2]);
1462 + break;
1463 + case MCE_CMD_RESUME:
1464 + dev_dbg(dev, "Device resume requested");
1465 +@@ -746,13 +762,6 @@ static void mceusb_dev_printdata(struct mceusb_dev *ir, u8 *buf, int buf_len,
1466 + default:
1467 + break;
1468 + }
1469 +-
1470 +- if (cmd == MCE_IRDATA_TRAILER)
1471 +- dev_dbg(dev, "End of raw IR data");
1472 +- else if ((cmd != MCE_CMD_PORT_IR) &&
1473 +- ((cmd & MCE_PORT_MASK) == MCE_COMMAND_IRDATA))
1474 +- dev_dbg(dev, "Raw IR data, %d pulse/space samples",
1475 +- cmd & MCE_PACKET_LENGTH_MASK);
1476 + #endif
1477 + }
1478 +
1479 +@@ -1136,32 +1145,62 @@ static int mceusb_set_rx_carrier_report(struct rc_dev *dev, int enable)
1480 + }
1481 +
1482 + /*
1483 ++ * Handle PORT_SYS/IR command response received from the MCE device.
1484 ++ *
1485 ++ * Assumes single response with all its data (not truncated)
1486 ++ * in buf_in[]. The response itself determines its total length
1487 ++ * (mceusb_cmd_datasize() + 2) and hence the minimum size of buf_in[].
1488 ++ *
1489 + * We don't do anything but print debug spew for many of the command bits
1490 + * we receive from the hardware, but some of them are useful information
1491 + * we want to store so that we can use them.
1492 + */
1493 +-static void mceusb_handle_command(struct mceusb_dev *ir, int index)
1494 ++static void mceusb_handle_command(struct mceusb_dev *ir, u8 *buf_in)
1495 + {
1496 ++ u8 cmd = buf_in[0];
1497 ++ u8 subcmd = buf_in[1];
1498 ++ u8 *hi = &buf_in[2]; /* read only when required */
1499 ++ u8 *lo = &buf_in[3]; /* read only when required */
1500 + struct ir_raw_event rawir = {};
1501 +- u8 hi = ir->buf_in[index + 1] & 0xff;
1502 +- u8 lo = ir->buf_in[index + 2] & 0xff;
1503 + u32 carrier_cycles;
1504 + u32 cycles_fix;
1505 +
1506 +- switch (ir->buf_in[index]) {
1507 +- /* the one and only 5-byte return value command */
1508 +- case MCE_RSP_GETPORTSTATUS:
1509 +- if ((ir->buf_in[index + 4] & 0xff) == 0x00)
1510 +- ir->txports_cabled |= 1 << hi;
1511 +- break;
1512 ++ if (cmd == MCE_CMD_PORT_SYS) {
1513 ++ switch (subcmd) {
1514 ++ /* the one and only 5-byte return value command */
1515 ++ case MCE_RSP_GETPORTSTATUS:
1516 ++ if (buf_in[5] == 0)
1517 ++ ir->txports_cabled |= 1 << *hi;
1518 ++ break;
1519 ++
1520 ++ /* 1-byte return value commands */
1521 ++ case MCE_RSP_EQEMVER:
1522 ++ ir->emver = *hi;
1523 ++ break;
1524 ++
1525 ++ /* No return value commands */
1526 ++ case MCE_RSP_CMD_ILLEGAL:
1527 ++ ir->need_reset = true;
1528 ++ break;
1529 ++
1530 ++ default:
1531 ++ break;
1532 ++ }
1533 ++
1534 ++ return;
1535 ++ }
1536 +
1537 ++ if (cmd != MCE_CMD_PORT_IR)
1538 ++ return;
1539 ++
1540 ++ switch (subcmd) {
1541 + /* 2-byte return value commands */
1542 + case MCE_RSP_EQIRTIMEOUT:
1543 +- ir->rc->timeout = US_TO_NS((hi << 8 | lo) * MCE_TIME_UNIT);
1544 ++ ir->rc->timeout = US_TO_NS((*hi << 8 | *lo) * MCE_TIME_UNIT);
1545 + break;
1546 + case MCE_RSP_EQIRNUMPORTS:
1547 +- ir->num_txports = hi;
1548 +- ir->num_rxports = lo;
1549 ++ ir->num_txports = *hi;
1550 ++ ir->num_rxports = *lo;
1551 + break;
1552 + case MCE_RSP_EQIRRXCFCNT:
1553 + /*
1554 +@@ -1174,7 +1213,7 @@ static void mceusb_handle_command(struct mceusb_dev *ir, int index)
1555 + */
1556 + if (ir->carrier_report_enabled && ir->learning_active &&
1557 + ir->pulse_tunit > 0) {
1558 +- carrier_cycles = (hi << 8 | lo);
1559 ++ carrier_cycles = (*hi << 8 | *lo);
1560 + /*
1561 + * Adjust carrier cycle count by adding
1562 + * 1 missed count per pulse "on"
1563 +@@ -1192,24 +1231,24 @@ static void mceusb_handle_command(struct mceusb_dev *ir, int index)
1564 + break;
1565 +
1566 + /* 1-byte return value commands */
1567 +- case MCE_RSP_EQEMVER:
1568 +- ir->emver = hi;
1569 +- break;
1570 + case MCE_RSP_EQIRTXPORTS:
1571 +- ir->tx_mask = hi;
1572 ++ ir->tx_mask = *hi;
1573 + break;
1574 + case MCE_RSP_EQIRRXPORTEN:
1575 +- ir->learning_active = ((hi & 0x02) == 0x02);
1576 +- if (ir->rxports_active != hi) {
1577 ++ ir->learning_active = ((*hi & 0x02) == 0x02);
1578 ++ if (ir->rxports_active != *hi) {
1579 + dev_info(ir->dev, "%s-range (0x%x) receiver active",
1580 +- ir->learning_active ? "short" : "long", hi);
1581 +- ir->rxports_active = hi;
1582 ++ ir->learning_active ? "short" : "long", *hi);
1583 ++ ir->rxports_active = *hi;
1584 + }
1585 + break;
1586 ++
1587 ++ /* No return value commands */
1588 + case MCE_RSP_CMD_ILLEGAL:
1589 + case MCE_RSP_TX_TIMEOUT:
1590 + ir->need_reset = true;
1591 + break;
1592 ++
1593 + default:
1594 + break;
1595 + }
1596 +@@ -1235,7 +1274,8 @@ static void mceusb_process_ir_data(struct mceusb_dev *ir, int buf_len)
1597 + ir->rem = mceusb_cmd_datasize(ir->cmd, ir->buf_in[i]);
1598 + mceusb_dev_printdata(ir, ir->buf_in, buf_len, i - 1,
1599 + ir->rem + 2, false);
1600 +- mceusb_handle_command(ir, i);
1601 ++ if (i + ir->rem < buf_len)
1602 ++ mceusb_handle_command(ir, &ir->buf_in[i - 1]);
1603 + ir->parser_state = CMD_DATA;
1604 + break;
1605 + case PARSE_IRDATA:
1606 +@@ -1264,15 +1304,22 @@ static void mceusb_process_ir_data(struct mceusb_dev *ir, int buf_len)
1607 + ir->rem--;
1608 + break;
1609 + case CMD_HEADER:
1610 +- /* decode mce packets of the form (84),AA,BB,CC,DD */
1611 +- /* IR data packets can span USB messages - rem */
1612 + ir->cmd = ir->buf_in[i];
1613 + if ((ir->cmd == MCE_CMD_PORT_IR) ||
1614 + ((ir->cmd & MCE_PORT_MASK) !=
1615 + MCE_COMMAND_IRDATA)) {
1616 ++ /*
1617 ++ * got PORT_SYS, PORT_IR, or unknown
1618 ++ * command response prefix
1619 ++ */
1620 + ir->parser_state = SUBCMD;
1621 + continue;
1622 + }
1623 ++ /*
1624 ++ * got IR data prefix (0x80 + num_bytes)
1625 ++ * decode MCE packets of the form {0x83, AA, BB, CC}
1626 ++ * IR data packets can span USB messages
1627 ++ */
1628 + ir->rem = (ir->cmd & MCE_PACKET_LENGTH_MASK);
1629 + mceusb_dev_printdata(ir, ir->buf_in, buf_len,
1630 + i, ir->rem + 1, false);
1631 +@@ -1296,6 +1343,14 @@ static void mceusb_process_ir_data(struct mceusb_dev *ir, int buf_len)
1632 + if (ir->parser_state != CMD_HEADER && !ir->rem)
1633 + ir->parser_state = CMD_HEADER;
1634 + }
1635 ++
1636 ++ /*
1637 ++ * Accept IR data spanning multiple rx buffers.
1638 ++ * Reject MCE command response spanning multiple rx buffers.
1639 ++ */
1640 ++ if (ir->parser_state != PARSE_IRDATA || !ir->rem)
1641 ++ ir->parser_state = CMD_HEADER;
1642 ++
1643 + if (event) {
1644 + dev_dbg(ir->dev, "processed IR data");
1645 + ir_raw_event_handle(ir->rc);
1646 +diff --git a/drivers/media/usb/b2c2/flexcop-usb.c b/drivers/media/usb/b2c2/flexcop-usb.c
1647 +index 1826ff825c2e..1a801dc286f8 100644
1648 +--- a/drivers/media/usb/b2c2/flexcop-usb.c
1649 ++++ b/drivers/media/usb/b2c2/flexcop-usb.c
1650 +@@ -538,6 +538,9 @@ static int flexcop_usb_probe(struct usb_interface *intf,
1651 + struct flexcop_device *fc = NULL;
1652 + int ret;
1653 +
1654 ++ if (intf->cur_altsetting->desc.bNumEndpoints < 1)
1655 ++ return -ENODEV;
1656 ++
1657 + if ((fc = flexcop_device_kmalloc(sizeof(struct flexcop_usb))) == NULL) {
1658 + err("out of memory\n");
1659 + return -ENOMEM;
1660 +diff --git a/drivers/media/usb/dvb-usb/cxusb.c b/drivers/media/usb/dvb-usb/cxusb.c
1661 +index f02fa0a67aa4..fac19ec46089 100644
1662 +--- a/drivers/media/usb/dvb-usb/cxusb.c
1663 ++++ b/drivers/media/usb/dvb-usb/cxusb.c
1664 +@@ -521,7 +521,8 @@ static int cxusb_rc_query(struct dvb_usb_device *d)
1665 + {
1666 + u8 ircode[4];
1667 +
1668 +- cxusb_ctrl_msg(d, CMD_GET_IR_CODE, NULL, 0, ircode, 4);
1669 ++ if (cxusb_ctrl_msg(d, CMD_GET_IR_CODE, NULL, 0, ircode, 4) < 0)
1670 ++ return 0;
1671 +
1672 + if (ircode[2] || ircode[3])
1673 + rc_keydown(d->rc_dev, RC_PROTO_NEC,
1674 +diff --git a/drivers/media/usb/usbvision/usbvision-video.c b/drivers/media/usb/usbvision/usbvision-video.c
1675 +index cdc66adda755..93d36aab824f 100644
1676 +--- a/drivers/media/usb/usbvision/usbvision-video.c
1677 ++++ b/drivers/media/usb/usbvision/usbvision-video.c
1678 +@@ -314,6 +314,10 @@ static int usbvision_v4l2_open(struct file *file)
1679 + if (mutex_lock_interruptible(&usbvision->v4l2_lock))
1680 + return -ERESTARTSYS;
1681 +
1682 ++ if (usbvision->remove_pending) {
1683 ++ err_code = -ENODEV;
1684 ++ goto unlock;
1685 ++ }
1686 + if (usbvision->user) {
1687 + err_code = -EBUSY;
1688 + } else {
1689 +@@ -377,6 +381,7 @@ unlock:
1690 + static int usbvision_v4l2_close(struct file *file)
1691 + {
1692 + struct usb_usbvision *usbvision = video_drvdata(file);
1693 ++ int r;
1694 +
1695 + PDEBUG(DBG_IO, "close");
1696 +
1697 +@@ -391,9 +396,10 @@ static int usbvision_v4l2_close(struct file *file)
1698 + usbvision_scratch_free(usbvision);
1699 +
1700 + usbvision->user--;
1701 ++ r = usbvision->remove_pending;
1702 + mutex_unlock(&usbvision->v4l2_lock);
1703 +
1704 +- if (usbvision->remove_pending) {
1705 ++ if (r) {
1706 + printk(KERN_INFO "%s: Final disconnect\n", __func__);
1707 + usbvision_release(usbvision);
1708 + return 0;
1709 +@@ -453,6 +459,9 @@ static int vidioc_querycap(struct file *file, void *priv,
1710 + {
1711 + struct usb_usbvision *usbvision = video_drvdata(file);
1712 +
1713 ++ if (!usbvision->dev)
1714 ++ return -ENODEV;
1715 ++
1716 + strscpy(vc->driver, "USBVision", sizeof(vc->driver));
1717 + strscpy(vc->card,
1718 + usbvision_device_data[usbvision->dev_model].model_string,
1719 +@@ -1061,6 +1070,11 @@ static int usbvision_radio_open(struct file *file)
1720 +
1721 + if (mutex_lock_interruptible(&usbvision->v4l2_lock))
1722 + return -ERESTARTSYS;
1723 ++
1724 ++ if (usbvision->remove_pending) {
1725 ++ err_code = -ENODEV;
1726 ++ goto out;
1727 ++ }
1728 + err_code = v4l2_fh_open(file);
1729 + if (err_code)
1730 + goto out;
1731 +@@ -1093,21 +1107,24 @@ out:
1732 + static int usbvision_radio_close(struct file *file)
1733 + {
1734 + struct usb_usbvision *usbvision = video_drvdata(file);
1735 ++ int r;
1736 +
1737 + PDEBUG(DBG_IO, "");
1738 +
1739 + mutex_lock(&usbvision->v4l2_lock);
1740 + /* Set packet size to 0 */
1741 + usbvision->iface_alt = 0;
1742 +- usb_set_interface(usbvision->dev, usbvision->iface,
1743 +- usbvision->iface_alt);
1744 ++ if (usbvision->dev)
1745 ++ usb_set_interface(usbvision->dev, usbvision->iface,
1746 ++ usbvision->iface_alt);
1747 +
1748 + usbvision_audio_off(usbvision);
1749 + usbvision->radio = 0;
1750 + usbvision->user--;
1751 ++ r = usbvision->remove_pending;
1752 + mutex_unlock(&usbvision->v4l2_lock);
1753 +
1754 +- if (usbvision->remove_pending) {
1755 ++ if (r) {
1756 + printk(KERN_INFO "%s: Final disconnect\n", __func__);
1757 + v4l2_fh_release(file);
1758 + usbvision_release(usbvision);
1759 +@@ -1539,6 +1556,7 @@ err_usb:
1760 + static void usbvision_disconnect(struct usb_interface *intf)
1761 + {
1762 + struct usb_usbvision *usbvision = to_usbvision(usb_get_intfdata(intf));
1763 ++ int u;
1764 +
1765 + PDEBUG(DBG_PROBE, "");
1766 +
1767 +@@ -1555,13 +1573,14 @@ static void usbvision_disconnect(struct usb_interface *intf)
1768 + v4l2_device_disconnect(&usbvision->v4l2_dev);
1769 + usbvision_i2c_unregister(usbvision);
1770 + usbvision->remove_pending = 1; /* Now all ISO data will be ignored */
1771 ++ u = usbvision->user;
1772 +
1773 + usb_put_dev(usbvision->dev);
1774 + usbvision->dev = NULL; /* USB device is no more */
1775 +
1776 + mutex_unlock(&usbvision->v4l2_lock);
1777 +
1778 +- if (usbvision->user) {
1779 ++ if (u) {
1780 + printk(KERN_INFO "%s: In use, disconnect pending\n",
1781 + __func__);
1782 + wake_up_interruptible(&usbvision->wait_frame);
1783 +diff --git a/drivers/media/usb/uvc/uvc_driver.c b/drivers/media/usb/uvc/uvc_driver.c
1784 +index 66ee168ddc7e..428235ca2635 100644
1785 +--- a/drivers/media/usb/uvc/uvc_driver.c
1786 ++++ b/drivers/media/usb/uvc/uvc_driver.c
1787 +@@ -2151,6 +2151,20 @@ static int uvc_probe(struct usb_interface *intf,
1788 + sizeof(dev->name) - len);
1789 + }
1790 +
1791 ++ /* Initialize the media device. */
1792 ++#ifdef CONFIG_MEDIA_CONTROLLER
1793 ++ dev->mdev.dev = &intf->dev;
1794 ++ strscpy(dev->mdev.model, dev->name, sizeof(dev->mdev.model));
1795 ++ if (udev->serial)
1796 ++ strscpy(dev->mdev.serial, udev->serial,
1797 ++ sizeof(dev->mdev.serial));
1798 ++ usb_make_path(udev, dev->mdev.bus_info, sizeof(dev->mdev.bus_info));
1799 ++ dev->mdev.hw_revision = le16_to_cpu(udev->descriptor.bcdDevice);
1800 ++ media_device_init(&dev->mdev);
1801 ++
1802 ++ dev->vdev.mdev = &dev->mdev;
1803 ++#endif
1804 ++
1805 + /* Parse the Video Class control descriptor. */
1806 + if (uvc_parse_control(dev) < 0) {
1807 + uvc_trace(UVC_TRACE_PROBE, "Unable to parse UVC "
1808 +@@ -2171,19 +2185,7 @@ static int uvc_probe(struct usb_interface *intf,
1809 + "linux-uvc-devel mailing list.\n");
1810 + }
1811 +
1812 +- /* Initialize the media device and register the V4L2 device. */
1813 +-#ifdef CONFIG_MEDIA_CONTROLLER
1814 +- dev->mdev.dev = &intf->dev;
1815 +- strscpy(dev->mdev.model, dev->name, sizeof(dev->mdev.model));
1816 +- if (udev->serial)
1817 +- strscpy(dev->mdev.serial, udev->serial,
1818 +- sizeof(dev->mdev.serial));
1819 +- usb_make_path(udev, dev->mdev.bus_info, sizeof(dev->mdev.bus_info));
1820 +- dev->mdev.hw_revision = le16_to_cpu(udev->descriptor.bcdDevice);
1821 +- media_device_init(&dev->mdev);
1822 +-
1823 +- dev->vdev.mdev = &dev->mdev;
1824 +-#endif
1825 ++ /* Register the V4L2 device. */
1826 + if (v4l2_device_register(&intf->dev, &dev->vdev) < 0)
1827 + goto error;
1828 +
1829 +diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c
1830 +index a0b4d265c6eb..347bb92e4130 100644
1831 +--- a/drivers/net/wireless/ath/ath10k/pci.c
1832 ++++ b/drivers/net/wireless/ath/ath10k/pci.c
1833 +@@ -3490,7 +3490,7 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
1834 + struct ath10k_pci *ar_pci;
1835 + enum ath10k_hw_rev hw_rev;
1836 + struct ath10k_bus_params bus_params = {};
1837 +- bool pci_ps;
1838 ++ bool pci_ps, is_qca988x = false;
1839 + int (*pci_soft_reset)(struct ath10k *ar);
1840 + int (*pci_hard_reset)(struct ath10k *ar);
1841 + u32 (*targ_cpu_to_ce_addr)(struct ath10k *ar, u32 addr);
1842 +@@ -3500,6 +3500,7 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
1843 + case QCA988X_2_0_DEVICE_ID:
1844 + hw_rev = ATH10K_HW_QCA988X;
1845 + pci_ps = false;
1846 ++ is_qca988x = true;
1847 + pci_soft_reset = ath10k_pci_warm_reset;
1848 + pci_hard_reset = ath10k_pci_qca988x_chip_reset;
1849 + targ_cpu_to_ce_addr = ath10k_pci_qca988x_targ_cpu_to_ce_addr;
1850 +@@ -3619,25 +3620,34 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
1851 + goto err_deinit_irq;
1852 + }
1853 +
1854 ++ bus_params.dev_type = ATH10K_DEV_TYPE_LL;
1855 ++ bus_params.link_can_suspend = true;
1856 ++ /* Read CHIP_ID before reset to catch QCA9880-AR1A v1 devices that
1857 ++ * fall off the bus during chip_reset. These chips have the same pci
1858 ++ * device id as the QCA9880 BR4A or 2R4E. So that's why the check.
1859 ++ */
1860 ++ if (is_qca988x) {
1861 ++ bus_params.chip_id =
1862 ++ ath10k_pci_soc_read32(ar, SOC_CHIP_ID_ADDRESS);
1863 ++ if (bus_params.chip_id != 0xffffffff) {
1864 ++ if (!ath10k_pci_chip_is_supported(pdev->device,
1865 ++ bus_params.chip_id))
1866 ++ goto err_unsupported;
1867 ++ }
1868 ++ }
1869 ++
1870 + ret = ath10k_pci_chip_reset(ar);
1871 + if (ret) {
1872 + ath10k_err(ar, "failed to reset chip: %d\n", ret);
1873 + goto err_free_irq;
1874 + }
1875 +
1876 +- bus_params.dev_type = ATH10K_DEV_TYPE_LL;
1877 +- bus_params.link_can_suspend = true;
1878 + bus_params.chip_id = ath10k_pci_soc_read32(ar, SOC_CHIP_ID_ADDRESS);
1879 +- if (bus_params.chip_id == 0xffffffff) {
1880 +- ath10k_err(ar, "failed to get chip id\n");
1881 +- goto err_free_irq;
1882 +- }
1883 ++ if (bus_params.chip_id == 0xffffffff)
1884 ++ goto err_unsupported;
1885 +
1886 +- if (!ath10k_pci_chip_is_supported(pdev->device, bus_params.chip_id)) {
1887 +- ath10k_err(ar, "device %04x with chip_id %08x isn't supported\n",
1888 +- pdev->device, bus_params.chip_id);
1889 ++ if (!ath10k_pci_chip_is_supported(pdev->device, bus_params.chip_id))
1890 + goto err_free_irq;
1891 +- }
1892 +
1893 + ret = ath10k_core_register(ar, &bus_params);
1894 + if (ret) {
1895 +@@ -3647,6 +3657,10 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
1896 +
1897 + return 0;
1898 +
1899 ++err_unsupported:
1900 ++ ath10k_err(ar, "device %04x with chip_id %08x isn't supported\n",
1901 ++ pdev->device, bus_params.chip_id);
1902 ++
1903 + err_free_irq:
1904 + ath10k_pci_free_irq(ar);
1905 + ath10k_pci_rx_retry_sync(ar);
1906 +diff --git a/drivers/net/wireless/ath/ath10k/qmi.c b/drivers/net/wireless/ath/ath10k/qmi.c
1907 +index 3b63b6257c43..545ac1f06997 100644
1908 +--- a/drivers/net/wireless/ath/ath10k/qmi.c
1909 ++++ b/drivers/net/wireless/ath/ath10k/qmi.c
1910 +@@ -581,22 +581,29 @@ static int ath10k_qmi_host_cap_send_sync(struct ath10k_qmi *qmi)
1911 + {
1912 + struct wlfw_host_cap_resp_msg_v01 resp = {};
1913 + struct wlfw_host_cap_req_msg_v01 req = {};
1914 ++ struct qmi_elem_info *req_ei;
1915 + struct ath10k *ar = qmi->ar;
1916 ++ struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
1917 + struct qmi_txn txn;
1918 + int ret;
1919 +
1920 + req.daemon_support_valid = 1;
1921 + req.daemon_support = 0;
1922 +
1923 +- ret = qmi_txn_init(&qmi->qmi_hdl, &txn,
1924 +- wlfw_host_cap_resp_msg_v01_ei, &resp);
1925 ++ ret = qmi_txn_init(&qmi->qmi_hdl, &txn, wlfw_host_cap_resp_msg_v01_ei,
1926 ++ &resp);
1927 + if (ret < 0)
1928 + goto out;
1929 +
1930 ++ if (test_bit(ATH10K_SNOC_FLAG_8BIT_HOST_CAP_QUIRK, &ar_snoc->flags))
1931 ++ req_ei = wlfw_host_cap_8bit_req_msg_v01_ei;
1932 ++ else
1933 ++ req_ei = wlfw_host_cap_req_msg_v01_ei;
1934 ++
1935 + ret = qmi_send_request(&qmi->qmi_hdl, NULL, &txn,
1936 + QMI_WLFW_HOST_CAP_REQ_V01,
1937 + WLFW_HOST_CAP_REQ_MSG_V01_MAX_MSG_LEN,
1938 +- wlfw_host_cap_req_msg_v01_ei, &req);
1939 ++ req_ei, &req);
1940 + if (ret < 0) {
1941 + qmi_txn_cancel(&txn);
1942 + ath10k_err(ar, "failed to send host capability request: %d\n", ret);
1943 +diff --git a/drivers/net/wireless/ath/ath10k/qmi_wlfw_v01.c b/drivers/net/wireless/ath/ath10k/qmi_wlfw_v01.c
1944 +index 1fe05c6218c3..86fcf4e1de5f 100644
1945 +--- a/drivers/net/wireless/ath/ath10k/qmi_wlfw_v01.c
1946 ++++ b/drivers/net/wireless/ath/ath10k/qmi_wlfw_v01.c
1947 +@@ -1988,6 +1988,28 @@ struct qmi_elem_info wlfw_host_cap_req_msg_v01_ei[] = {
1948 + {}
1949 + };
1950 +
1951 ++struct qmi_elem_info wlfw_host_cap_8bit_req_msg_v01_ei[] = {
1952 ++ {
1953 ++ .data_type = QMI_OPT_FLAG,
1954 ++ .elem_len = 1,
1955 ++ .elem_size = sizeof(u8),
1956 ++ .array_type = NO_ARRAY,
1957 ++ .tlv_type = 0x10,
1958 ++ .offset = offsetof(struct wlfw_host_cap_req_msg_v01,
1959 ++ daemon_support_valid),
1960 ++ },
1961 ++ {
1962 ++ .data_type = QMI_UNSIGNED_1_BYTE,
1963 ++ .elem_len = 1,
1964 ++ .elem_size = sizeof(u8),
1965 ++ .array_type = NO_ARRAY,
1966 ++ .tlv_type = 0x10,
1967 ++ .offset = offsetof(struct wlfw_host_cap_req_msg_v01,
1968 ++ daemon_support),
1969 ++ },
1970 ++ {}
1971 ++};
1972 ++
1973 + struct qmi_elem_info wlfw_host_cap_resp_msg_v01_ei[] = {
1974 + {
1975 + .data_type = QMI_STRUCT,
1976 +diff --git a/drivers/net/wireless/ath/ath10k/qmi_wlfw_v01.h b/drivers/net/wireless/ath/ath10k/qmi_wlfw_v01.h
1977 +index bca1186e1560..4d107e1364a8 100644
1978 +--- a/drivers/net/wireless/ath/ath10k/qmi_wlfw_v01.h
1979 ++++ b/drivers/net/wireless/ath/ath10k/qmi_wlfw_v01.h
1980 +@@ -575,6 +575,7 @@ struct wlfw_host_cap_req_msg_v01 {
1981 +
1982 + #define WLFW_HOST_CAP_REQ_MSG_V01_MAX_MSG_LEN 189
1983 + extern struct qmi_elem_info wlfw_host_cap_req_msg_v01_ei[];
1984 ++extern struct qmi_elem_info wlfw_host_cap_8bit_req_msg_v01_ei[];
1985 +
1986 + struct wlfw_host_cap_resp_msg_v01 {
1987 + struct qmi_response_type_v01 resp;
1988 +diff --git a/drivers/net/wireless/ath/ath10k/snoc.c b/drivers/net/wireless/ath/ath10k/snoc.c
1989 +index b491361e6ed4..fc15a0037f0e 100644
1990 +--- a/drivers/net/wireless/ath/ath10k/snoc.c
1991 ++++ b/drivers/net/wireless/ath/ath10k/snoc.c
1992 +@@ -1261,6 +1261,15 @@ out:
1993 + return ret;
1994 + }
1995 +
1996 ++static void ath10k_snoc_quirks_init(struct ath10k *ar)
1997 ++{
1998 ++ struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
1999 ++ struct device *dev = &ar_snoc->dev->dev;
2000 ++
2001 ++ if (of_property_read_bool(dev->of_node, "qcom,snoc-host-cap-8bit-quirk"))
2002 ++ set_bit(ATH10K_SNOC_FLAG_8BIT_HOST_CAP_QUIRK, &ar_snoc->flags);
2003 ++}
2004 ++
2005 + int ath10k_snoc_fw_indication(struct ath10k *ar, u64 type)
2006 + {
2007 + struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
2008 +@@ -1678,6 +1687,8 @@ static int ath10k_snoc_probe(struct platform_device *pdev)
2009 + ar->ce_priv = &ar_snoc->ce;
2010 + msa_size = drv_data->msa_size;
2011 +
2012 ++ ath10k_snoc_quirks_init(ar);
2013 ++
2014 + ret = ath10k_snoc_resource_init(ar);
2015 + if (ret) {
2016 + ath10k_warn(ar, "failed to initialize resource: %d\n", ret);
2017 +diff --git a/drivers/net/wireless/ath/ath10k/snoc.h b/drivers/net/wireless/ath/ath10k/snoc.h
2018 +index d62f53501fbb..9db823e46314 100644
2019 +--- a/drivers/net/wireless/ath/ath10k/snoc.h
2020 ++++ b/drivers/net/wireless/ath/ath10k/snoc.h
2021 +@@ -63,6 +63,7 @@ enum ath10k_snoc_flags {
2022 + ATH10K_SNOC_FLAG_REGISTERED,
2023 + ATH10K_SNOC_FLAG_UNREGISTERING,
2024 + ATH10K_SNOC_FLAG_RECOVERY,
2025 ++ ATH10K_SNOC_FLAG_8BIT_HOST_CAP_QUIRK,
2026 + };
2027 +
2028 + struct ath10k_snoc {
2029 +diff --git a/drivers/net/wireless/ath/ath10k/usb.c b/drivers/net/wireless/ath/ath10k/usb.c
2030 +index e1420f67f776..9ebe74ee4aef 100644
2031 +--- a/drivers/net/wireless/ath/ath10k/usb.c
2032 ++++ b/drivers/net/wireless/ath/ath10k/usb.c
2033 +@@ -38,6 +38,10 @@ ath10k_usb_alloc_urb_from_pipe(struct ath10k_usb_pipe *pipe)
2034 + struct ath10k_urb_context *urb_context = NULL;
2035 + unsigned long flags;
2036 +
2037 ++ /* bail if this pipe is not initialized */
2038 ++ if (!pipe->ar_usb)
2039 ++ return NULL;
2040 ++
2041 + spin_lock_irqsave(&pipe->ar_usb->cs_lock, flags);
2042 + if (!list_empty(&pipe->urb_list_head)) {
2043 + urb_context = list_first_entry(&pipe->urb_list_head,
2044 +@@ -55,6 +59,10 @@ static void ath10k_usb_free_urb_to_pipe(struct ath10k_usb_pipe *pipe,
2045 + {
2046 + unsigned long flags;
2047 +
2048 ++ /* bail if this pipe is not initialized */
2049 ++ if (!pipe->ar_usb)
2050 ++ return;
2051 ++
2052 + spin_lock_irqsave(&pipe->ar_usb->cs_lock, flags);
2053 +
2054 + pipe->urb_cnt++;
2055 +diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
2056 +index 2b29bf4730f6..b4885a700296 100644
2057 +--- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
2058 ++++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
2059 +@@ -4183,7 +4183,7 @@ static void ar9003_hw_thermometer_apply(struct ath_hw *ah)
2060 +
2061 + static void ar9003_hw_thermo_cal_apply(struct ath_hw *ah)
2062 + {
2063 +- u32 data, ko, kg;
2064 ++ u32 data = 0, ko, kg;
2065 +
2066 + if (!AR_SREV_9462_20_OR_LATER(ah))
2067 + return;
2068 +diff --git a/drivers/staging/comedi/drivers/usbduxfast.c b/drivers/staging/comedi/drivers/usbduxfast.c
2069 +index 04bc488385e6..4af012968cb6 100644
2070 +--- a/drivers/staging/comedi/drivers/usbduxfast.c
2071 ++++ b/drivers/staging/comedi/drivers/usbduxfast.c
2072 +@@ -1,6 +1,6 @@
2073 + // SPDX-License-Identifier: GPL-2.0+
2074 + /*
2075 +- * Copyright (C) 2004-2014 Bernd Porr, mail@××××××××××××.uk
2076 ++ * Copyright (C) 2004-2019 Bernd Porr, mail@××××××××××××.uk
2077 + */
2078 +
2079 + /*
2080 +@@ -8,7 +8,7 @@
2081 + * Description: University of Stirling USB DAQ & INCITE Technology Limited
2082 + * Devices: [ITL] USB-DUX-FAST (usbduxfast)
2083 + * Author: Bernd Porr <mail@××××××××××××.uk>
2084 +- * Updated: 10 Oct 2014
2085 ++ * Updated: 16 Nov 2019
2086 + * Status: stable
2087 + */
2088 +
2089 +@@ -22,6 +22,7 @@
2090 + *
2091 + *
2092 + * Revision history:
2093 ++ * 1.0: Fixed a rounding error in usbduxfast_ai_cmdtest
2094 + * 0.9: Dropping the first data packet which seems to be from the last transfer.
2095 + * Buffer overflows in the FX2 are handed over to comedi.
2096 + * 0.92: Dropping now 4 packets. The quad buffer has to be emptied.
2097 +@@ -350,6 +351,7 @@ static int usbduxfast_ai_cmdtest(struct comedi_device *dev,
2098 + struct comedi_cmd *cmd)
2099 + {
2100 + int err = 0;
2101 ++ int err2 = 0;
2102 + unsigned int steps;
2103 + unsigned int arg;
2104 +
2105 +@@ -399,11 +401,16 @@ static int usbduxfast_ai_cmdtest(struct comedi_device *dev,
2106 + */
2107 + steps = (cmd->convert_arg * 30) / 1000;
2108 + if (cmd->chanlist_len != 1)
2109 +- err |= comedi_check_trigger_arg_min(&steps,
2110 +- MIN_SAMPLING_PERIOD);
2111 +- err |= comedi_check_trigger_arg_max(&steps, MAX_SAMPLING_PERIOD);
2112 +- arg = (steps * 1000) / 30;
2113 +- err |= comedi_check_trigger_arg_is(&cmd->convert_arg, arg);
2114 ++ err2 |= comedi_check_trigger_arg_min(&steps,
2115 ++ MIN_SAMPLING_PERIOD);
2116 ++ else
2117 ++ err2 |= comedi_check_trigger_arg_min(&steps, 1);
2118 ++ err2 |= comedi_check_trigger_arg_max(&steps, MAX_SAMPLING_PERIOD);
2119 ++ if (err2) {
2120 ++ err |= err2;
2121 ++ arg = (steps * 1000) / 30;
2122 ++ err |= comedi_check_trigger_arg_is(&cmd->convert_arg, arg);
2123 ++ }
2124 +
2125 + if (cmd->stop_src == TRIG_COUNT)
2126 + err |= comedi_check_trigger_arg_min(&cmd->stop_arg, 1);
2127 +diff --git a/drivers/usb/misc/appledisplay.c b/drivers/usb/misc/appledisplay.c
2128 +index ac92725458b5..ba1eaabc7796 100644
2129 +--- a/drivers/usb/misc/appledisplay.c
2130 ++++ b/drivers/usb/misc/appledisplay.c
2131 +@@ -164,7 +164,12 @@ static int appledisplay_bl_get_brightness(struct backlight_device *bd)
2132 + 0,
2133 + pdata->msgdata, 2,
2134 + ACD_USB_TIMEOUT);
2135 +- brightness = pdata->msgdata[1];
2136 ++ if (retval < 2) {
2137 ++ if (retval >= 0)
2138 ++ retval = -EMSGSIZE;
2139 ++ } else {
2140 ++ brightness = pdata->msgdata[1];
2141 ++ }
2142 + mutex_unlock(&pdata->sysfslock);
2143 +
2144 + if (retval < 0)
2145 +@@ -299,6 +304,7 @@ error:
2146 + if (pdata) {
2147 + if (pdata->urb) {
2148 + usb_kill_urb(pdata->urb);
2149 ++ cancel_delayed_work_sync(&pdata->work);
2150 + if (pdata->urbdata)
2151 + usb_free_coherent(pdata->udev, ACD_URB_BUFFER_LEN,
2152 + pdata->urbdata, pdata->urb->transfer_dma);
2153 +diff --git a/drivers/usb/misc/chaoskey.c b/drivers/usb/misc/chaoskey.c
2154 +index 34e6cd6f40d3..87067c3d6109 100644
2155 +--- a/drivers/usb/misc/chaoskey.c
2156 ++++ b/drivers/usb/misc/chaoskey.c
2157 +@@ -384,13 +384,17 @@ static int _chaoskey_fill(struct chaoskey *dev)
2158 + !dev->reading,
2159 + (started ? NAK_TIMEOUT : ALEA_FIRST_TIMEOUT) );
2160 +
2161 +- if (result < 0)
2162 ++ if (result < 0) {
2163 ++ usb_kill_urb(dev->urb);
2164 + goto out;
2165 ++ }
2166 +
2167 +- if (result == 0)
2168 ++ if (result == 0) {
2169 + result = -ETIMEDOUT;
2170 +- else
2171 ++ usb_kill_urb(dev->urb);
2172 ++ } else {
2173 + result = dev->valid;
2174 ++ }
2175 + out:
2176 + /* Let the device go back to sleep eventually */
2177 + usb_autopm_put_interface(dev->interface);
2178 +@@ -526,7 +530,21 @@ static int chaoskey_suspend(struct usb_interface *interface,
2179 +
2180 + static int chaoskey_resume(struct usb_interface *interface)
2181 + {
2182 ++ struct chaoskey *dev;
2183 ++ struct usb_device *udev = interface_to_usbdev(interface);
2184 ++
2185 + usb_dbg(interface, "resume");
2186 ++ dev = usb_get_intfdata(interface);
2187 ++
2188 ++ /*
2189 ++ * We may have lost power.
2190 ++ * In that case the device that needs a long time
2191 ++ * for the first requests needs an extended timeout
2192 ++ * again
2193 ++ */
2194 ++ if (le16_to_cpu(udev->descriptor.idVendor) == ALEA_VENDOR_ID)
2195 ++ dev->reads_started = false;
2196 ++
2197 + return 0;
2198 + }
2199 + #else
2200 +diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
2201 +index 979bef9bfb6b..f5143eedbc48 100644
2202 +--- a/drivers/usb/serial/cp210x.c
2203 ++++ b/drivers/usb/serial/cp210x.c
2204 +@@ -125,6 +125,7 @@ static const struct usb_device_id id_table[] = {
2205 + { USB_DEVICE(0x10C4, 0x8341) }, /* Siemens MC35PU GPRS Modem */
2206 + { USB_DEVICE(0x10C4, 0x8382) }, /* Cygnal Integrated Products, Inc. */
2207 + { USB_DEVICE(0x10C4, 0x83A8) }, /* Amber Wireless AMB2560 */
2208 ++ { USB_DEVICE(0x10C4, 0x83AA) }, /* Mark-10 Digital Force Gauge */
2209 + { USB_DEVICE(0x10C4, 0x83D8) }, /* DekTec DTA Plus VHF/UHF Booster/Attenuator */
2210 + { USB_DEVICE(0x10C4, 0x8411) }, /* Kyocera GPS Module */
2211 + { USB_DEVICE(0x10C4, 0x8418) }, /* IRZ Automation Teleport SG-10 GSM/GPRS Modem */
2212 +diff --git a/drivers/usb/serial/mos7720.c b/drivers/usb/serial/mos7720.c
2213 +index 18110225d506..2ec4eeacebc7 100644
2214 +--- a/drivers/usb/serial/mos7720.c
2215 ++++ b/drivers/usb/serial/mos7720.c
2216 +@@ -1833,10 +1833,6 @@ static int mos7720_startup(struct usb_serial *serial)
2217 + product = le16_to_cpu(serial->dev->descriptor.idProduct);
2218 + dev = serial->dev;
2219 +
2220 +- /* setting configuration feature to one */
2221 +- usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0),
2222 +- (__u8)0x03, 0x00, 0x01, 0x00, NULL, 0x00, 5000);
2223 +-
2224 + if (product == MOSCHIP_DEVICE_ID_7715) {
2225 + struct urb *urb = serial->port[0]->interrupt_in_urb;
2226 +
2227 +diff --git a/drivers/usb/serial/mos7840.c b/drivers/usb/serial/mos7840.c
2228 +index a698d46ba773..ab4bf8d6d7df 100644
2229 +--- a/drivers/usb/serial/mos7840.c
2230 ++++ b/drivers/usb/serial/mos7840.c
2231 +@@ -119,11 +119,15 @@
2232 + /* This driver also supports
2233 + * ATEN UC2324 device using Moschip MCS7840
2234 + * ATEN UC2322 device using Moschip MCS7820
2235 ++ * MOXA UPort 2210 device using Moschip MCS7820
2236 + */
2237 + #define USB_VENDOR_ID_ATENINTL 0x0557
2238 + #define ATENINTL_DEVICE_ID_UC2324 0x2011
2239 + #define ATENINTL_DEVICE_ID_UC2322 0x7820
2240 +
2241 ++#define USB_VENDOR_ID_MOXA 0x110a
2242 ++#define MOXA_DEVICE_ID_2210 0x2210
2243 ++
2244 + /* Interrupt Routine Defines */
2245 +
2246 + #define SERIAL_IIR_RLS 0x06
2247 +@@ -195,6 +199,7 @@ static const struct usb_device_id id_table[] = {
2248 + {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL2_4)},
2249 + {USB_DEVICE(USB_VENDOR_ID_ATENINTL, ATENINTL_DEVICE_ID_UC2324)},
2250 + {USB_DEVICE(USB_VENDOR_ID_ATENINTL, ATENINTL_DEVICE_ID_UC2322)},
2251 ++ {USB_DEVICE(USB_VENDOR_ID_MOXA, MOXA_DEVICE_ID_2210)},
2252 + {} /* terminating entry */
2253 + };
2254 + MODULE_DEVICE_TABLE(usb, id_table);
2255 +@@ -2020,6 +2025,7 @@ static int mos7840_probe(struct usb_serial *serial,
2256 + const struct usb_device_id *id)
2257 + {
2258 + u16 product = le16_to_cpu(serial->dev->descriptor.idProduct);
2259 ++ u16 vid = le16_to_cpu(serial->dev->descriptor.idVendor);
2260 + u8 *buf;
2261 + int device_type;
2262 +
2263 +@@ -2030,6 +2036,11 @@ static int mos7840_probe(struct usb_serial *serial,
2264 + goto out;
2265 + }
2266 +
2267 ++ if (vid == USB_VENDOR_ID_MOXA && product == MOXA_DEVICE_ID_2210) {
2268 ++ device_type = MOSCHIP_DEVICE_ID_7820;
2269 ++ goto out;
2270 ++ }
2271 ++
2272 + buf = kzalloc(VENDOR_READ_LENGTH, GFP_KERNEL);
2273 + if (!buf)
2274 + return -ENOMEM;
2275 +@@ -2279,11 +2290,6 @@ out:
2276 + goto error;
2277 + } else
2278 + dev_dbg(&port->dev, "ZLP_REG5 Writing success status%d\n", status);
2279 +-
2280 +- /* setting configuration feature to one */
2281 +- usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0),
2282 +- 0x03, 0x00, 0x01, 0x00, NULL, 0x00,
2283 +- MOS_WDR_TIMEOUT);
2284 + }
2285 + return 0;
2286 + error:
2287 +diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
2288 +index 06ab016be0b6..e9491d400a24 100644
2289 +--- a/drivers/usb/serial/option.c
2290 ++++ b/drivers/usb/serial/option.c
2291 +@@ -197,6 +197,7 @@ static void option_instat_callback(struct urb *urb);
2292 + #define DELL_PRODUCT_5804_MINICARD_ATT 0x819b /* Novatel E371 */
2293 +
2294 + #define DELL_PRODUCT_5821E 0x81d7
2295 ++#define DELL_PRODUCT_5821E_ESIM 0x81e0
2296 +
2297 + #define KYOCERA_VENDOR_ID 0x0c88
2298 + #define KYOCERA_PRODUCT_KPC650 0x17da
2299 +@@ -1044,6 +1045,8 @@ static const struct usb_device_id option_ids[] = {
2300 + { USB_DEVICE_AND_INTERFACE_INFO(DELL_VENDOR_ID, DELL_PRODUCT_5804_MINICARD_ATT, 0xff, 0xff, 0xff) },
2301 + { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5821E),
2302 + .driver_info = RSVD(0) | RSVD(1) | RSVD(6) },
2303 ++ { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5821E_ESIM),
2304 ++ .driver_info = RSVD(0) | RSVD(1) | RSVD(6) },
2305 + { USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_E100A) }, /* ADU-E100, ADU-310 */
2306 + { USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_500A) },
2307 + { USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_620UW) },
2308 +@@ -1990,6 +1993,10 @@ static const struct usb_device_id option_ids[] = {
2309 + { USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0xa31d, 0xff, 0x06, 0x13) },
2310 + { USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0xa31d, 0xff, 0x06, 0x14) },
2311 + { USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0xa31d, 0xff, 0x06, 0x1b) },
2312 ++ { USB_DEVICE(0x0489, 0xe0b4), /* Foxconn T77W968 */
2313 ++ .driver_info = RSVD(0) | RSVD(1) | RSVD(6) },
2314 ++ { USB_DEVICE(0x0489, 0xe0b5), /* Foxconn T77W968 ESIM */
2315 ++ .driver_info = RSVD(0) | RSVD(1) | RSVD(6) },
2316 + { USB_DEVICE(0x1508, 0x1001), /* Fibocom NL668 */
2317 + .driver_info = RSVD(4) | RSVD(5) | RSVD(6) },
2318 + { USB_DEVICE(0x2cb7, 0x0104), /* Fibocom NL678 series */
2319 +diff --git a/drivers/usb/usbip/Kconfig b/drivers/usb/usbip/Kconfig
2320 +index 2f86b28fa3da..7bbae7a08642 100644
2321 +--- a/drivers/usb/usbip/Kconfig
2322 ++++ b/drivers/usb/usbip/Kconfig
2323 +@@ -4,6 +4,7 @@ config USBIP_CORE
2324 + tristate "USB/IP support"
2325 + depends on NET
2326 + select USB_COMMON
2327 ++ select SGL_ALLOC
2328 + ---help---
2329 + This enables pushing USB packets over IP to allow remote
2330 + machines direct access to USB devices. It provides the
2331 +diff --git a/drivers/usb/usbip/stub_rx.c b/drivers/usb/usbip/stub_rx.c
2332 +index 66edfeea68fe..e2b019532234 100644
2333 +--- a/drivers/usb/usbip/stub_rx.c
2334 ++++ b/drivers/usb/usbip/stub_rx.c
2335 +@@ -470,18 +470,50 @@ static void stub_recv_cmd_submit(struct stub_device *sdev,
2336 + if (pipe == -1)
2337 + return;
2338 +
2339 ++ /*
2340 ++ * Smatch reported the error case where use_sg is true and buf_len is 0.
2341 ++ * In this case, It adds SDEV_EVENT_ERROR_MALLOC and stub_priv will be
2342 ++ * released by stub event handler and connection will be shut down.
2343 ++ */
2344 + priv = stub_priv_alloc(sdev, pdu);
2345 + if (!priv)
2346 + return;
2347 +
2348 + buf_len = (unsigned long long)pdu->u.cmd_submit.transfer_buffer_length;
2349 +
2350 ++ if (use_sg && !buf_len) {
2351 ++ dev_err(&udev->dev, "sg buffer with zero length\n");
2352 ++ goto err_malloc;
2353 ++ }
2354 ++
2355 + /* allocate urb transfer buffer, if needed */
2356 + if (buf_len) {
2357 + if (use_sg) {
2358 + sgl = sgl_alloc(buf_len, GFP_KERNEL, &nents);
2359 + if (!sgl)
2360 + goto err_malloc;
2361 ++
2362 ++ /* Check if the server's HCD supports SG */
2363 ++ if (!udev->bus->sg_tablesize) {
2364 ++ /*
2365 ++ * If the server's HCD doesn't support SG, break
2366 ++ * a single SG request into several URBs and map
2367 ++ * each SG list entry to corresponding URB
2368 ++ * buffer. The previously allocated SG list is
2369 ++ * stored in priv->sgl (If the server's HCD
2370 ++ * support SG, SG list is stored only in
2371 ++ * urb->sg) and it is used as an indicator that
2372 ++ * the server split single SG request into
2373 ++ * several URBs. Later, priv->sgl is used by
2374 ++ * stub_complete() and stub_send_ret_submit() to
2375 ++ * reassemble the divied URBs.
2376 ++ */
2377 ++ support_sg = 0;
2378 ++ num_urbs = nents;
2379 ++ priv->completed_urbs = 0;
2380 ++ pdu->u.cmd_submit.transfer_flags &=
2381 ++ ~URB_DMA_MAP_SG;
2382 ++ }
2383 + } else {
2384 + buffer = kzalloc(buf_len, GFP_KERNEL);
2385 + if (!buffer)
2386 +@@ -489,24 +521,6 @@ static void stub_recv_cmd_submit(struct stub_device *sdev,
2387 + }
2388 + }
2389 +
2390 +- /* Check if the server's HCD supports SG */
2391 +- if (use_sg && !udev->bus->sg_tablesize) {
2392 +- /*
2393 +- * If the server's HCD doesn't support SG, break a single SG
2394 +- * request into several URBs and map each SG list entry to
2395 +- * corresponding URB buffer. The previously allocated SG
2396 +- * list is stored in priv->sgl (If the server's HCD support SG,
2397 +- * SG list is stored only in urb->sg) and it is used as an
2398 +- * indicator that the server split single SG request into
2399 +- * several URBs. Later, priv->sgl is used by stub_complete() and
2400 +- * stub_send_ret_submit() to reassemble the divied URBs.
2401 +- */
2402 +- support_sg = 0;
2403 +- num_urbs = nents;
2404 +- priv->completed_urbs = 0;
2405 +- pdu->u.cmd_submit.transfer_flags &= ~URB_DMA_MAP_SG;
2406 +- }
2407 +-
2408 + /* allocate urb array */
2409 + priv->num_urbs = num_urbs;
2410 + priv->urbs = kmalloc_array(num_urbs, sizeof(*priv->urbs), GFP_KERNEL);
2411 +diff --git a/fs/exec.c b/fs/exec.c
2412 +index 555e93c7dec8..c27231234764 100644
2413 +--- a/fs/exec.c
2414 ++++ b/fs/exec.c
2415 +@@ -1015,7 +1015,7 @@ static int exec_mmap(struct mm_struct *mm)
2416 + /* Notify parent that we're no longer interested in the old VM */
2417 + tsk = current;
2418 + old_mm = current->mm;
2419 +- mm_release(tsk, old_mm);
2420 ++ exec_mm_release(tsk, old_mm);
2421 +
2422 + if (old_mm) {
2423 + sync_mm_rss(old_mm);
2424 +diff --git a/include/linux/compat.h b/include/linux/compat.h
2425 +index 16dafd9f4b86..c4c389c7e1b4 100644
2426 +--- a/include/linux/compat.h
2427 ++++ b/include/linux/compat.h
2428 +@@ -410,8 +410,6 @@ struct compat_kexec_segment;
2429 + struct compat_mq_attr;
2430 + struct compat_msgbuf;
2431 +
2432 +-extern void compat_exit_robust_list(struct task_struct *curr);
2433 +-
2434 + #define BITS_PER_COMPAT_LONG (8*sizeof(compat_long_t))
2435 +
2436 + #define BITS_TO_COMPAT_LONGS(bits) DIV_ROUND_UP(bits, BITS_PER_COMPAT_LONG)
2437 +diff --git a/include/linux/futex.h b/include/linux/futex.h
2438 +index ccaef0097785..5cc3fed27d4c 100644
2439 +--- a/include/linux/futex.h
2440 ++++ b/include/linux/futex.h
2441 +@@ -2,7 +2,9 @@
2442 + #ifndef _LINUX_FUTEX_H
2443 + #define _LINUX_FUTEX_H
2444 +
2445 ++#include <linux/sched.h>
2446 + #include <linux/ktime.h>
2447 ++
2448 + #include <uapi/linux/futex.h>
2449 +
2450 + struct inode;
2451 +@@ -48,15 +50,35 @@ union futex_key {
2452 + #define FUTEX_KEY_INIT (union futex_key) { .both = { .ptr = NULL } }
2453 +
2454 + #ifdef CONFIG_FUTEX
2455 +-extern void exit_robust_list(struct task_struct *curr);
2456 ++enum {
2457 ++ FUTEX_STATE_OK,
2458 ++ FUTEX_STATE_EXITING,
2459 ++ FUTEX_STATE_DEAD,
2460 ++};
2461 +
2462 +-long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout,
2463 +- u32 __user *uaddr2, u32 val2, u32 val3);
2464 +-#else
2465 +-static inline void exit_robust_list(struct task_struct *curr)
2466 ++static inline void futex_init_task(struct task_struct *tsk)
2467 + {
2468 ++ tsk->robust_list = NULL;
2469 ++#ifdef CONFIG_COMPAT
2470 ++ tsk->compat_robust_list = NULL;
2471 ++#endif
2472 ++ INIT_LIST_HEAD(&tsk->pi_state_list);
2473 ++ tsk->pi_state_cache = NULL;
2474 ++ tsk->futex_state = FUTEX_STATE_OK;
2475 ++ mutex_init(&tsk->futex_exit_mutex);
2476 + }
2477 +
2478 ++void futex_exit_recursive(struct task_struct *tsk);
2479 ++void futex_exit_release(struct task_struct *tsk);
2480 ++void futex_exec_release(struct task_struct *tsk);
2481 ++
2482 ++long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout,
2483 ++ u32 __user *uaddr2, u32 val2, u32 val3);
2484 ++#else
2485 ++static inline void futex_init_task(struct task_struct *tsk) { }
2486 ++static inline void futex_exit_recursive(struct task_struct *tsk) { }
2487 ++static inline void futex_exit_release(struct task_struct *tsk) { }
2488 ++static inline void futex_exec_release(struct task_struct *tsk) { }
2489 + static inline long do_futex(u32 __user *uaddr, int op, u32 val,
2490 + ktime_t *timeout, u32 __user *uaddr2,
2491 + u32 val2, u32 val3)
2492 +@@ -65,12 +87,4 @@ static inline long do_futex(u32 __user *uaddr, int op, u32 val,
2493 + }
2494 + #endif
2495 +
2496 +-#ifdef CONFIG_FUTEX_PI
2497 +-extern void exit_pi_state_list(struct task_struct *curr);
2498 +-#else
2499 +-static inline void exit_pi_state_list(struct task_struct *curr)
2500 +-{
2501 +-}
2502 +-#endif
2503 +-
2504 + #endif
2505 +diff --git a/include/linux/sched.h b/include/linux/sched.h
2506 +index 67a1d86981a9..775503573ed7 100644
2507 +--- a/include/linux/sched.h
2508 ++++ b/include/linux/sched.h
2509 +@@ -1054,6 +1054,8 @@ struct task_struct {
2510 + #endif
2511 + struct list_head pi_state_list;
2512 + struct futex_pi_state *pi_state_cache;
2513 ++ struct mutex futex_exit_mutex;
2514 ++ unsigned int futex_state;
2515 + #endif
2516 + #ifdef CONFIG_PERF_EVENTS
2517 + struct perf_event_context *perf_event_ctxp[perf_nr_task_contexts];
2518 +@@ -1442,7 +1444,6 @@ extern struct pid *cad_pid;
2519 + */
2520 + #define PF_IDLE 0x00000002 /* I am an IDLE thread */
2521 + #define PF_EXITING 0x00000004 /* Getting shut down */
2522 +-#define PF_EXITPIDONE 0x00000008 /* PI exit done on shut down */
2523 + #define PF_VCPU 0x00000010 /* I'm a virtual CPU */
2524 + #define PF_WQ_WORKER 0x00000020 /* I'm a workqueue worker */
2525 + #define PF_FORKNOEXEC 0x00000040 /* Forked but didn't exec */
2526 +diff --git a/include/linux/sched/mm.h b/include/linux/sched/mm.h
2527 +index e6770012db18..c49257a3b510 100644
2528 +--- a/include/linux/sched/mm.h
2529 ++++ b/include/linux/sched/mm.h
2530 +@@ -117,8 +117,10 @@ extern struct mm_struct *get_task_mm(struct task_struct *task);
2531 + * succeeds.
2532 + */
2533 + extern struct mm_struct *mm_access(struct task_struct *task, unsigned int mode);
2534 +-/* Remove the current tasks stale references to the old mm_struct */
2535 +-extern void mm_release(struct task_struct *, struct mm_struct *);
2536 ++/* Remove the current tasks stale references to the old mm_struct on exit() */
2537 ++extern void exit_mm_release(struct task_struct *, struct mm_struct *);
2538 ++/* Remove the current tasks stale references to the old mm_struct on exec() */
2539 ++extern void exec_mm_release(struct task_struct *, struct mm_struct *);
2540 +
2541 + #ifdef CONFIG_MEMCG
2542 + extern void mm_update_next_owner(struct mm_struct *mm);
2543 +diff --git a/kernel/exit.c b/kernel/exit.c
2544 +index a46a50d67002..d351fd09e739 100644
2545 +--- a/kernel/exit.c
2546 ++++ b/kernel/exit.c
2547 +@@ -437,7 +437,7 @@ static void exit_mm(void)
2548 + struct mm_struct *mm = current->mm;
2549 + struct core_state *core_state;
2550 +
2551 +- mm_release(current, mm);
2552 ++ exit_mm_release(current, mm);
2553 + if (!mm)
2554 + return;
2555 + sync_mm_rss(mm);
2556 +@@ -746,32 +746,12 @@ void __noreturn do_exit(long code)
2557 + */
2558 + if (unlikely(tsk->flags & PF_EXITING)) {
2559 + pr_alert("Fixing recursive fault but reboot is needed!\n");
2560 +- /*
2561 +- * We can do this unlocked here. The futex code uses
2562 +- * this flag just to verify whether the pi state
2563 +- * cleanup has been done or not. In the worst case it
2564 +- * loops once more. We pretend that the cleanup was
2565 +- * done as there is no way to return. Either the
2566 +- * OWNER_DIED bit is set by now or we push the blocked
2567 +- * task into the wait for ever nirwana as well.
2568 +- */
2569 +- tsk->flags |= PF_EXITPIDONE;
2570 ++ futex_exit_recursive(tsk);
2571 + set_current_state(TASK_UNINTERRUPTIBLE);
2572 + schedule();
2573 + }
2574 +
2575 + exit_signals(tsk); /* sets PF_EXITING */
2576 +- /*
2577 +- * Ensure that all new tsk->pi_lock acquisitions must observe
2578 +- * PF_EXITING. Serializes against futex.c:attach_to_pi_owner().
2579 +- */
2580 +- smp_mb();
2581 +- /*
2582 +- * Ensure that we must observe the pi_state in exit_mm() ->
2583 +- * mm_release() -> exit_pi_state_list().
2584 +- */
2585 +- raw_spin_lock_irq(&tsk->pi_lock);
2586 +- raw_spin_unlock_irq(&tsk->pi_lock);
2587 +
2588 + if (unlikely(in_atomic())) {
2589 + pr_info("note: %s[%d] exited with preempt_count %d\n",
2590 +@@ -846,12 +826,6 @@ void __noreturn do_exit(long code)
2591 + * Make sure we are holding no locks:
2592 + */
2593 + debug_check_no_locks_held();
2594 +- /*
2595 +- * We can do this unlocked here. The futex code uses this flag
2596 +- * just to verify whether the pi state cleanup has been done
2597 +- * or not. In the worst case it loops once more.
2598 +- */
2599 +- tsk->flags |= PF_EXITPIDONE;
2600 +
2601 + if (tsk->io_context)
2602 + exit_io_context(tsk);
2603 +diff --git a/kernel/fork.c b/kernel/fork.c
2604 +index 13b38794efb5..6cabc124378c 100644
2605 +--- a/kernel/fork.c
2606 ++++ b/kernel/fork.c
2607 +@@ -1283,24 +1283,8 @@ static int wait_for_vfork_done(struct task_struct *child,
2608 + * restoring the old one. . .
2609 + * Eric Biederman 10 January 1998
2610 + */
2611 +-void mm_release(struct task_struct *tsk, struct mm_struct *mm)
2612 ++static void mm_release(struct task_struct *tsk, struct mm_struct *mm)
2613 + {
2614 +- /* Get rid of any futexes when releasing the mm */
2615 +-#ifdef CONFIG_FUTEX
2616 +- if (unlikely(tsk->robust_list)) {
2617 +- exit_robust_list(tsk);
2618 +- tsk->robust_list = NULL;
2619 +- }
2620 +-#ifdef CONFIG_COMPAT
2621 +- if (unlikely(tsk->compat_robust_list)) {
2622 +- compat_exit_robust_list(tsk);
2623 +- tsk->compat_robust_list = NULL;
2624 +- }
2625 +-#endif
2626 +- if (unlikely(!list_empty(&tsk->pi_state_list)))
2627 +- exit_pi_state_list(tsk);
2628 +-#endif
2629 +-
2630 + uprobe_free_utask(tsk);
2631 +
2632 + /* Get rid of any cached register state */
2633 +@@ -1333,6 +1317,18 @@ void mm_release(struct task_struct *tsk, struct mm_struct *mm)
2634 + complete_vfork_done(tsk);
2635 + }
2636 +
2637 ++void exit_mm_release(struct task_struct *tsk, struct mm_struct *mm)
2638 ++{
2639 ++ futex_exit_release(tsk);
2640 ++ mm_release(tsk, mm);
2641 ++}
2642 ++
2643 ++void exec_mm_release(struct task_struct *tsk, struct mm_struct *mm)
2644 ++{
2645 ++ futex_exec_release(tsk);
2646 ++ mm_release(tsk, mm);
2647 ++}
2648 ++
2649 + /**
2650 + * dup_mm() - duplicates an existing mm structure
2651 + * @tsk: the task_struct with which the new mm will be associated.
2652 +@@ -2062,14 +2058,8 @@ static __latent_entropy struct task_struct *copy_process(
2653 + #ifdef CONFIG_BLOCK
2654 + p->plug = NULL;
2655 + #endif
2656 +-#ifdef CONFIG_FUTEX
2657 +- p->robust_list = NULL;
2658 +-#ifdef CONFIG_COMPAT
2659 +- p->compat_robust_list = NULL;
2660 +-#endif
2661 +- INIT_LIST_HEAD(&p->pi_state_list);
2662 +- p->pi_state_cache = NULL;
2663 +-#endif
2664 ++ futex_init_task(p);
2665 ++
2666 + /*
2667 + * sigaltstack should be cleared when sharing the same VM
2668 + */
2669 +diff --git a/kernel/futex.c b/kernel/futex.c
2670 +index bd18f60e4c6c..afbf928d6a6b 100644
2671 +--- a/kernel/futex.c
2672 ++++ b/kernel/futex.c
2673 +@@ -325,6 +325,12 @@ static inline bool should_fail_futex(bool fshared)
2674 + }
2675 + #endif /* CONFIG_FAIL_FUTEX */
2676 +
2677 ++#ifdef CONFIG_COMPAT
2678 ++static void compat_exit_robust_list(struct task_struct *curr);
2679 ++#else
2680 ++static inline void compat_exit_robust_list(struct task_struct *curr) { }
2681 ++#endif
2682 ++
2683 + static inline void futex_get_mm(union futex_key *key)
2684 + {
2685 + mmgrab(key->private.mm);
2686 +@@ -890,7 +896,7 @@ static void put_pi_state(struct futex_pi_state *pi_state)
2687 + * Kernel cleans up PI-state, but userspace is likely hosed.
2688 + * (Robust-futex cleanup is separate and might save the day for userspace.)
2689 + */
2690 +-void exit_pi_state_list(struct task_struct *curr)
2691 ++static void exit_pi_state_list(struct task_struct *curr)
2692 + {
2693 + struct list_head *next, *head = &curr->pi_state_list;
2694 + struct futex_pi_state *pi_state;
2695 +@@ -960,7 +966,8 @@ void exit_pi_state_list(struct task_struct *curr)
2696 + }
2697 + raw_spin_unlock_irq(&curr->pi_lock);
2698 + }
2699 +-
2700 ++#else
2701 ++static inline void exit_pi_state_list(struct task_struct *curr) { }
2702 + #endif
2703 +
2704 + /*
2705 +@@ -1169,16 +1176,47 @@ out_error:
2706 + return ret;
2707 + }
2708 +
2709 ++/**
2710 ++ * wait_for_owner_exiting - Block until the owner has exited
2711 ++ * @exiting: Pointer to the exiting task
2712 ++ *
2713 ++ * Caller must hold a refcount on @exiting.
2714 ++ */
2715 ++static void wait_for_owner_exiting(int ret, struct task_struct *exiting)
2716 ++{
2717 ++ if (ret != -EBUSY) {
2718 ++ WARN_ON_ONCE(exiting);
2719 ++ return;
2720 ++ }
2721 ++
2722 ++ if (WARN_ON_ONCE(ret == -EBUSY && !exiting))
2723 ++ return;
2724 ++
2725 ++ mutex_lock(&exiting->futex_exit_mutex);
2726 ++ /*
2727 ++ * No point in doing state checking here. If the waiter got here
2728 ++ * while the task was in exec()->exec_futex_release() then it can
2729 ++ * have any FUTEX_STATE_* value when the waiter has acquired the
2730 ++ * mutex. OK, if running, EXITING or DEAD if it reached exit()
2731 ++ * already. Highly unlikely and not a problem. Just one more round
2732 ++ * through the futex maze.
2733 ++ */
2734 ++ mutex_unlock(&exiting->futex_exit_mutex);
2735 ++
2736 ++ put_task_struct(exiting);
2737 ++}
2738 ++
2739 + static int handle_exit_race(u32 __user *uaddr, u32 uval,
2740 + struct task_struct *tsk)
2741 + {
2742 + u32 uval2;
2743 +
2744 + /*
2745 +- * If PF_EXITPIDONE is not yet set, then try again.
2746 ++ * If the futex exit state is not yet FUTEX_STATE_DEAD, tell the
2747 ++ * caller that the alleged owner is busy.
2748 + */
2749 +- if (tsk && !(tsk->flags & PF_EXITPIDONE))
2750 +- return -EAGAIN;
2751 ++ if (tsk && tsk->futex_state != FUTEX_STATE_DEAD)
2752 ++ return -EBUSY;
2753 +
2754 + /*
2755 + * Reread the user space value to handle the following situation:
2756 +@@ -1196,8 +1234,9 @@ static int handle_exit_race(u32 __user *uaddr, u32 uval,
2757 + * *uaddr = 0xC0000000; tsk = get_task(PID);
2758 + * } if (!tsk->flags & PF_EXITING) {
2759 + * ... attach();
2760 +- * tsk->flags |= PF_EXITPIDONE; } else {
2761 +- * if (!(tsk->flags & PF_EXITPIDONE))
2762 ++ * tsk->futex_state = } else {
2763 ++ * FUTEX_STATE_DEAD; if (tsk->futex_state !=
2764 ++ * FUTEX_STATE_DEAD)
2765 + * return -EAGAIN;
2766 + * return -ESRCH; <--- FAIL
2767 + * }
2768 +@@ -1228,7 +1267,8 @@ static int handle_exit_race(u32 __user *uaddr, u32 uval,
2769 + * it after doing proper sanity checks.
2770 + */
2771 + static int attach_to_pi_owner(u32 __user *uaddr, u32 uval, union futex_key *key,
2772 +- struct futex_pi_state **ps)
2773 ++ struct futex_pi_state **ps,
2774 ++ struct task_struct **exiting)
2775 + {
2776 + pid_t pid = uval & FUTEX_TID_MASK;
2777 + struct futex_pi_state *pi_state;
2778 +@@ -1253,22 +1293,33 @@ static int attach_to_pi_owner(u32 __user *uaddr, u32 uval, union futex_key *key,
2779 + }
2780 +
2781 + /*
2782 +- * We need to look at the task state flags to figure out,
2783 +- * whether the task is exiting. To protect against the do_exit
2784 +- * change of the task flags, we do this protected by
2785 +- * p->pi_lock:
2786 ++ * We need to look at the task state to figure out, whether the
2787 ++ * task is exiting. To protect against the change of the task state
2788 ++ * in futex_exit_release(), we do this protected by p->pi_lock:
2789 + */
2790 + raw_spin_lock_irq(&p->pi_lock);
2791 +- if (unlikely(p->flags & PF_EXITING)) {
2792 ++ if (unlikely(p->futex_state != FUTEX_STATE_OK)) {
2793 + /*
2794 +- * The task is on the way out. When PF_EXITPIDONE is
2795 +- * set, we know that the task has finished the
2796 +- * cleanup:
2797 ++ * The task is on the way out. When the futex state is
2798 ++ * FUTEX_STATE_DEAD, we know that the task has finished
2799 ++ * the cleanup:
2800 + */
2801 + int ret = handle_exit_race(uaddr, uval, p);
2802 +
2803 + raw_spin_unlock_irq(&p->pi_lock);
2804 +- put_task_struct(p);
2805 ++ /*
2806 ++ * If the owner task is between FUTEX_STATE_EXITING and
2807 ++ * FUTEX_STATE_DEAD then store the task pointer and keep
2808 ++ * the reference on the task struct. The calling code will
2809 ++ * drop all locks, wait for the task to reach
2810 ++ * FUTEX_STATE_DEAD and then drop the refcount. This is
2811 ++ * required to prevent a live lock when the current task
2812 ++ * preempted the exiting task between the two states.
2813 ++ */
2814 ++ if (ret == -EBUSY)
2815 ++ *exiting = p;
2816 ++ else
2817 ++ put_task_struct(p);
2818 + return ret;
2819 + }
2820 +
2821 +@@ -1307,7 +1358,8 @@ static int attach_to_pi_owner(u32 __user *uaddr, u32 uval, union futex_key *key,
2822 +
2823 + static int lookup_pi_state(u32 __user *uaddr, u32 uval,
2824 + struct futex_hash_bucket *hb,
2825 +- union futex_key *key, struct futex_pi_state **ps)
2826 ++ union futex_key *key, struct futex_pi_state **ps,
2827 ++ struct task_struct **exiting)
2828 + {
2829 + struct futex_q *top_waiter = futex_top_waiter(hb, key);
2830 +
2831 +@@ -1322,7 +1374,7 @@ static int lookup_pi_state(u32 __user *uaddr, u32 uval,
2832 + * We are the first waiter - try to look up the owner based on
2833 + * @uval and attach to it.
2834 + */
2835 +- return attach_to_pi_owner(uaddr, uval, key, ps);
2836 ++ return attach_to_pi_owner(uaddr, uval, key, ps, exiting);
2837 + }
2838 +
2839 + static int lock_pi_update_atomic(u32 __user *uaddr, u32 uval, u32 newval)
2840 +@@ -1350,6 +1402,8 @@ static int lock_pi_update_atomic(u32 __user *uaddr, u32 uval, u32 newval)
2841 + * lookup
2842 + * @task: the task to perform the atomic lock work for. This will
2843 + * be "current" except in the case of requeue pi.
2844 ++ * @exiting: Pointer to store the task pointer of the owner task
2845 ++ * which is in the middle of exiting
2846 + * @set_waiters: force setting the FUTEX_WAITERS bit (1) or not (0)
2847 + *
2848 + * Return:
2849 +@@ -1358,11 +1412,17 @@ static int lock_pi_update_atomic(u32 __user *uaddr, u32 uval, u32 newval)
2850 + * - <0 - error
2851 + *
2852 + * The hb->lock and futex_key refs shall be held by the caller.
2853 ++ *
2854 ++ * @exiting is only set when the return value is -EBUSY. If so, this holds
2855 ++ * a refcount on the exiting task on return and the caller needs to drop it
2856 ++ * after waiting for the exit to complete.
2857 + */
2858 + static int futex_lock_pi_atomic(u32 __user *uaddr, struct futex_hash_bucket *hb,
2859 + union futex_key *key,
2860 + struct futex_pi_state **ps,
2861 +- struct task_struct *task, int set_waiters)
2862 ++ struct task_struct *task,
2863 ++ struct task_struct **exiting,
2864 ++ int set_waiters)
2865 + {
2866 + u32 uval, newval, vpid = task_pid_vnr(task);
2867 + struct futex_q *top_waiter;
2868 +@@ -1432,7 +1492,7 @@ static int futex_lock_pi_atomic(u32 __user *uaddr, struct futex_hash_bucket *hb,
2869 + * attach to the owner. If that fails, no harm done, we only
2870 + * set the FUTEX_WAITERS bit in the user space variable.
2871 + */
2872 +- return attach_to_pi_owner(uaddr, newval, key, ps);
2873 ++ return attach_to_pi_owner(uaddr, newval, key, ps, exiting);
2874 + }
2875 +
2876 + /**
2877 +@@ -1850,6 +1910,8 @@ void requeue_pi_wake_futex(struct futex_q *q, union futex_key *key,
2878 + * @key1: the from futex key
2879 + * @key2: the to futex key
2880 + * @ps: address to store the pi_state pointer
2881 ++ * @exiting: Pointer to store the task pointer of the owner task
2882 ++ * which is in the middle of exiting
2883 + * @set_waiters: force setting the FUTEX_WAITERS bit (1) or not (0)
2884 + *
2885 + * Try and get the lock on behalf of the top waiter if we can do it atomically.
2886 +@@ -1857,16 +1919,20 @@ void requeue_pi_wake_futex(struct futex_q *q, union futex_key *key,
2887 + * then direct futex_lock_pi_atomic() to force setting the FUTEX_WAITERS bit.
2888 + * hb1 and hb2 must be held by the caller.
2889 + *
2890 ++ * @exiting is only set when the return value is -EBUSY. If so, this holds
2891 ++ * a refcount on the exiting task on return and the caller needs to drop it
2892 ++ * after waiting for the exit to complete.
2893 ++ *
2894 + * Return:
2895 + * - 0 - failed to acquire the lock atomically;
2896 + * - >0 - acquired the lock, return value is vpid of the top_waiter
2897 + * - <0 - error
2898 + */
2899 +-static int futex_proxy_trylock_atomic(u32 __user *pifutex,
2900 +- struct futex_hash_bucket *hb1,
2901 +- struct futex_hash_bucket *hb2,
2902 +- union futex_key *key1, union futex_key *key2,
2903 +- struct futex_pi_state **ps, int set_waiters)
2904 ++static int
2905 ++futex_proxy_trylock_atomic(u32 __user *pifutex, struct futex_hash_bucket *hb1,
2906 ++ struct futex_hash_bucket *hb2, union futex_key *key1,
2907 ++ union futex_key *key2, struct futex_pi_state **ps,
2908 ++ struct task_struct **exiting, int set_waiters)
2909 + {
2910 + struct futex_q *top_waiter = NULL;
2911 + u32 curval;
2912 +@@ -1903,7 +1969,7 @@ static int futex_proxy_trylock_atomic(u32 __user *pifutex,
2913 + */
2914 + vpid = task_pid_vnr(top_waiter->task);
2915 + ret = futex_lock_pi_atomic(pifutex, hb2, key2, ps, top_waiter->task,
2916 +- set_waiters);
2917 ++ exiting, set_waiters);
2918 + if (ret == 1) {
2919 + requeue_pi_wake_futex(top_waiter, key2, hb2);
2920 + return vpid;
2921 +@@ -2032,6 +2098,8 @@ retry_private:
2922 + }
2923 +
2924 + if (requeue_pi && (task_count - nr_wake < nr_requeue)) {
2925 ++ struct task_struct *exiting = NULL;
2926 ++
2927 + /*
2928 + * Attempt to acquire uaddr2 and wake the top waiter. If we
2929 + * intend to requeue waiters, force setting the FUTEX_WAITERS
2930 +@@ -2039,7 +2107,8 @@ retry_private:
2931 + * faults rather in the requeue loop below.
2932 + */
2933 + ret = futex_proxy_trylock_atomic(uaddr2, hb1, hb2, &key1,
2934 +- &key2, &pi_state, nr_requeue);
2935 ++ &key2, &pi_state,
2936 ++ &exiting, nr_requeue);
2937 +
2938 + /*
2939 + * At this point the top_waiter has either taken uaddr2 or is
2940 +@@ -2066,7 +2135,8 @@ retry_private:
2941 + * If that call succeeds then we have pi_state and an
2942 + * initial refcount on it.
2943 + */
2944 +- ret = lookup_pi_state(uaddr2, ret, hb2, &key2, &pi_state);
2945 ++ ret = lookup_pi_state(uaddr2, ret, hb2, &key2,
2946 ++ &pi_state, &exiting);
2947 + }
2948 +
2949 + switch (ret) {
2950 +@@ -2084,17 +2154,24 @@ retry_private:
2951 + if (!ret)
2952 + goto retry;
2953 + goto out;
2954 ++ case -EBUSY:
2955 + case -EAGAIN:
2956 + /*
2957 + * Two reasons for this:
2958 +- * - Owner is exiting and we just wait for the
2959 ++ * - EBUSY: Owner is exiting and we just wait for the
2960 + * exit to complete.
2961 +- * - The user space value changed.
2962 ++ * - EAGAIN: The user space value changed.
2963 + */
2964 + double_unlock_hb(hb1, hb2);
2965 + hb_waiters_dec(hb2);
2966 + put_futex_key(&key2);
2967 + put_futex_key(&key1);
2968 ++ /*
2969 ++ * Handle the case where the owner is in the middle of
2970 ++ * exiting. Wait for the exit to complete otherwise
2971 ++ * this task might loop forever, aka. live lock.
2972 ++ */
2973 ++ wait_for_owner_exiting(ret, exiting);
2974 + cond_resched();
2975 + goto retry;
2976 + default:
2977 +@@ -2801,6 +2878,7 @@ static int futex_lock_pi(u32 __user *uaddr, unsigned int flags,
2978 + {
2979 + struct hrtimer_sleeper timeout, *to;
2980 + struct futex_pi_state *pi_state = NULL;
2981 ++ struct task_struct *exiting = NULL;
2982 + struct rt_mutex_waiter rt_waiter;
2983 + struct futex_hash_bucket *hb;
2984 + struct futex_q q = futex_q_init;
2985 +@@ -2822,7 +2900,8 @@ retry:
2986 + retry_private:
2987 + hb = queue_lock(&q);
2988 +
2989 +- ret = futex_lock_pi_atomic(uaddr, hb, &q.key, &q.pi_state, current, 0);
2990 ++ ret = futex_lock_pi_atomic(uaddr, hb, &q.key, &q.pi_state, current,
2991 ++ &exiting, 0);
2992 + if (unlikely(ret)) {
2993 + /*
2994 + * Atomic work succeeded and we got the lock,
2995 +@@ -2835,15 +2914,22 @@ retry_private:
2996 + goto out_unlock_put_key;
2997 + case -EFAULT:
2998 + goto uaddr_faulted;
2999 ++ case -EBUSY:
3000 + case -EAGAIN:
3001 + /*
3002 + * Two reasons for this:
3003 +- * - Task is exiting and we just wait for the
3004 ++ * - EBUSY: Task is exiting and we just wait for the
3005 + * exit to complete.
3006 +- * - The user space value changed.
3007 ++ * - EAGAIN: The user space value changed.
3008 + */
3009 + queue_unlock(hb);
3010 + put_futex_key(&q.key);
3011 ++ /*
3012 ++ * Handle the case where the owner is in the middle of
3013 ++ * exiting. Wait for the exit to complete otherwise
3014 ++ * this task might loop forever, aka. live lock.
3015 ++ */
3016 ++ wait_for_owner_exiting(ret, exiting);
3017 + cond_resched();
3018 + goto retry;
3019 + default:
3020 +@@ -3452,11 +3538,16 @@ err_unlock:
3021 + return ret;
3022 + }
3023 +
3024 ++/* Constants for the pending_op argument of handle_futex_death */
3025 ++#define HANDLE_DEATH_PENDING true
3026 ++#define HANDLE_DEATH_LIST false
3027 ++
3028 + /*
3029 + * Process a futex-list entry, check whether it's owned by the
3030 + * dying task, and do notification if so:
3031 + */
3032 +-static int handle_futex_death(u32 __user *uaddr, struct task_struct *curr, int pi)
3033 ++static int handle_futex_death(u32 __user *uaddr, struct task_struct *curr,
3034 ++ bool pi, bool pending_op)
3035 + {
3036 + u32 uval, uninitialized_var(nval), mval;
3037 + int err;
3038 +@@ -3469,6 +3560,42 @@ retry:
3039 + if (get_user(uval, uaddr))
3040 + return -1;
3041 +
3042 ++ /*
3043 ++ * Special case for regular (non PI) futexes. The unlock path in
3044 ++ * user space has two race scenarios:
3045 ++ *
3046 ++ * 1. The unlock path releases the user space futex value and
3047 ++ * before it can execute the futex() syscall to wake up
3048 ++ * waiters it is killed.
3049 ++ *
3050 ++ * 2. A woken up waiter is killed before it can acquire the
3051 ++ * futex in user space.
3052 ++ *
3053 ++ * In both cases the TID validation below prevents a wakeup of
3054 ++ * potential waiters which can cause these waiters to block
3055 ++ * forever.
3056 ++ *
3057 ++ * In both cases the following conditions are met:
3058 ++ *
3059 ++ * 1) task->robust_list->list_op_pending != NULL
3060 ++ * @pending_op == true
3061 ++ * 2) User space futex value == 0
3062 ++ * 3) Regular futex: @pi == false
3063 ++ *
3064 ++ * If these conditions are met, it is safe to attempt waking up a
3065 ++ * potential waiter without touching the user space futex value and
3066 ++ * trying to set the OWNER_DIED bit. The user space futex value is
3067 ++ * uncontended and the rest of the user space mutex state is
3068 ++ * consistent, so a woken waiter will just take over the
3069 ++ * uncontended futex. Setting the OWNER_DIED bit would create
3070 ++ * inconsistent state and malfunction of the user space owner died
3071 ++ * handling.
3072 ++ */
3073 ++ if (pending_op && !pi && !uval) {
3074 ++ futex_wake(uaddr, 1, 1, FUTEX_BITSET_MATCH_ANY);
3075 ++ return 0;
3076 ++ }
3077 ++
3078 + if ((uval & FUTEX_TID_MASK) != task_pid_vnr(curr))
3079 + return 0;
3080 +
3081 +@@ -3547,7 +3674,7 @@ static inline int fetch_robust_entry(struct robust_list __user **entry,
3082 + *
3083 + * We silently return on any sign of list-walking problem.
3084 + */
3085 +-void exit_robust_list(struct task_struct *curr)
3086 ++static void exit_robust_list(struct task_struct *curr)
3087 + {
3088 + struct robust_list_head __user *head = curr->robust_list;
3089 + struct robust_list __user *entry, *next_entry, *pending;
3090 +@@ -3588,10 +3715,11 @@ void exit_robust_list(struct task_struct *curr)
3091 + * A pending lock might already be on the list, so
3092 + * don't process it twice:
3093 + */
3094 +- if (entry != pending)
3095 ++ if (entry != pending) {
3096 + if (handle_futex_death((void __user *)entry + futex_offset,
3097 +- curr, pi))
3098 ++ curr, pi, HANDLE_DEATH_LIST))
3099 + return;
3100 ++ }
3101 + if (rc)
3102 + return;
3103 + entry = next_entry;
3104 +@@ -3605,9 +3733,118 @@ void exit_robust_list(struct task_struct *curr)
3105 + cond_resched();
3106 + }
3107 +
3108 +- if (pending)
3109 ++ if (pending) {
3110 + handle_futex_death((void __user *)pending + futex_offset,
3111 +- curr, pip);
3112 ++ curr, pip, HANDLE_DEATH_PENDING);
3113 ++ }
3114 ++}
3115 ++
3116 ++static void futex_cleanup(struct task_struct *tsk)
3117 ++{
3118 ++ if (unlikely(tsk->robust_list)) {
3119 ++ exit_robust_list(tsk);
3120 ++ tsk->robust_list = NULL;
3121 ++ }
3122 ++
3123 ++#ifdef CONFIG_COMPAT
3124 ++ if (unlikely(tsk->compat_robust_list)) {
3125 ++ compat_exit_robust_list(tsk);
3126 ++ tsk->compat_robust_list = NULL;
3127 ++ }
3128 ++#endif
3129 ++
3130 ++ if (unlikely(!list_empty(&tsk->pi_state_list)))
3131 ++ exit_pi_state_list(tsk);
3132 ++}
3133 ++
3134 ++/**
3135 ++ * futex_exit_recursive - Set the tasks futex state to FUTEX_STATE_DEAD
3136 ++ * @tsk: task to set the state on
3137 ++ *
3138 ++ * Set the futex exit state of the task lockless. The futex waiter code
3139 ++ * observes that state when a task is exiting and loops until the task has
3140 ++ * actually finished the futex cleanup. The worst case for this is that the
3141 ++ * waiter runs through the wait loop until the state becomes visible.
3142 ++ *
3143 ++ * This is called from the recursive fault handling path in do_exit().
3144 ++ *
3145 ++ * This is best effort. Either the futex exit code has run already or
3146 ++ * not. If the OWNER_DIED bit has been set on the futex then the waiter can
3147 ++ * take it over. If not, the problem is pushed back to user space. If the
3148 ++ * futex exit code did not run yet, then an already queued waiter might
3149 ++ * block forever, but there is nothing which can be done about that.
3150 ++ */
3151 ++void futex_exit_recursive(struct task_struct *tsk)
3152 ++{
3153 ++ /* If the state is FUTEX_STATE_EXITING then futex_exit_mutex is held */
3154 ++ if (tsk->futex_state == FUTEX_STATE_EXITING)
3155 ++ mutex_unlock(&tsk->futex_exit_mutex);
3156 ++ tsk->futex_state = FUTEX_STATE_DEAD;
3157 ++}
3158 ++
3159 ++static void futex_cleanup_begin(struct task_struct *tsk)
3160 ++{
3161 ++ /*
3162 ++ * Prevent various race issues against a concurrent incoming waiter
3163 ++ * including live locks by forcing the waiter to block on
3164 ++ * tsk->futex_exit_mutex when it observes FUTEX_STATE_EXITING in
3165 ++ * attach_to_pi_owner().
3166 ++ */
3167 ++ mutex_lock(&tsk->futex_exit_mutex);
3168 ++
3169 ++ /*
3170 ++ * Switch the state to FUTEX_STATE_EXITING under tsk->pi_lock.
3171 ++ *
3172 ++ * This ensures that all subsequent checks of tsk->futex_state in
3173 ++ * attach_to_pi_owner() must observe FUTEX_STATE_EXITING with
3174 ++ * tsk->pi_lock held.
3175 ++ *
3176 ++ * It guarantees also that a pi_state which was queued right before
3177 ++ * the state change under tsk->pi_lock by a concurrent waiter must
3178 ++ * be observed in exit_pi_state_list().
3179 ++ */
3180 ++ raw_spin_lock_irq(&tsk->pi_lock);
3181 ++ tsk->futex_state = FUTEX_STATE_EXITING;
3182 ++ raw_spin_unlock_irq(&tsk->pi_lock);
3183 ++}
3184 ++
3185 ++static void futex_cleanup_end(struct task_struct *tsk, int state)
3186 ++{
3187 ++ /*
3188 ++ * Lockless store. The only side effect is that an observer might
3189 ++ * take another loop until it becomes visible.
3190 ++ */
3191 ++ tsk->futex_state = state;
3192 ++ /*
3193 ++ * Drop the exit protection. This unblocks waiters which observed
3194 ++ * FUTEX_STATE_EXITING to reevaluate the state.
3195 ++ */
3196 ++ mutex_unlock(&tsk->futex_exit_mutex);
3197 ++}
3198 ++
3199 ++void futex_exec_release(struct task_struct *tsk)
3200 ++{
3201 ++ /*
3202 ++ * The state handling is done for consistency, but in the case of
3203 ++ * exec() there is no way to prevent futher damage as the PID stays
3204 ++ * the same. But for the unlikely and arguably buggy case that a
3205 ++ * futex is held on exec(), this provides at least as much state
3206 ++ * consistency protection which is possible.
3207 ++ */
3208 ++ futex_cleanup_begin(tsk);
3209 ++ futex_cleanup(tsk);
3210 ++ /*
3211 ++ * Reset the state to FUTEX_STATE_OK. The task is alive and about
3212 ++ * exec a new binary.
3213 ++ */
3214 ++ futex_cleanup_end(tsk, FUTEX_STATE_OK);
3215 ++}
3216 ++
3217 ++void futex_exit_release(struct task_struct *tsk)
3218 ++{
3219 ++ futex_cleanup_begin(tsk);
3220 ++ futex_cleanup(tsk);
3221 ++ futex_cleanup_end(tsk, FUTEX_STATE_DEAD);
3222 + }
3223 +
3224 + long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout,
3225 +@@ -3737,7 +3974,7 @@ static void __user *futex_uaddr(struct robust_list __user *entry,
3226 + *
3227 + * We silently return on any sign of list-walking problem.
3228 + */
3229 +-void compat_exit_robust_list(struct task_struct *curr)
3230 ++static void compat_exit_robust_list(struct task_struct *curr)
3231 + {
3232 + struct compat_robust_list_head __user *head = curr->compat_robust_list;
3233 + struct robust_list __user *entry, *next_entry, *pending;
3234 +@@ -3784,7 +4021,8 @@ void compat_exit_robust_list(struct task_struct *curr)
3235 + if (entry != pending) {
3236 + void __user *uaddr = futex_uaddr(entry, futex_offset);
3237 +
3238 +- if (handle_futex_death(uaddr, curr, pi))
3239 ++ if (handle_futex_death(uaddr, curr, pi,
3240 ++ HANDLE_DEATH_LIST))
3241 + return;
3242 + }
3243 + if (rc)
3244 +@@ -3803,7 +4041,7 @@ void compat_exit_robust_list(struct task_struct *curr)
3245 + if (pending) {
3246 + void __user *uaddr = futex_uaddr(pending, futex_offset);
3247 +
3248 +- handle_futex_death(uaddr, curr, pip);
3249 ++ handle_futex_death(uaddr, curr, pip, HANDLE_DEATH_PENDING);
3250 + }
3251 + }
3252 +
3253 +diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
3254 +index 78bd2e3722c7..d14f6684737d 100644
3255 +--- a/sound/pci/hda/patch_hdmi.c
3256 ++++ b/sound/pci/hda/patch_hdmi.c
3257 +@@ -3454,26 +3454,6 @@ static int nvhdmi_chmap_validate(struct hdac_chmap *chmap,
3258 + return 0;
3259 + }
3260 +
3261 +-/* map from pin NID to port; port is 0-based */
3262 +-/* for Nvidia: assume widget NID starting from 4, with step 1 (4, 5, 6, ...) */
3263 +-static int nvhdmi_pin2port(void *audio_ptr, int pin_nid)
3264 +-{
3265 +- return pin_nid - 4;
3266 +-}
3267 +-
3268 +-/* reverse-map from port to pin NID: see above */
3269 +-static int nvhdmi_port2pin(struct hda_codec *codec, int port)
3270 +-{
3271 +- return port + 4;
3272 +-}
3273 +-
3274 +-static const struct drm_audio_component_audio_ops nvhdmi_audio_ops = {
3275 +- .pin2port = nvhdmi_pin2port,
3276 +- .pin_eld_notify = generic_acomp_pin_eld_notify,
3277 +- .master_bind = generic_acomp_master_bind,
3278 +- .master_unbind = generic_acomp_master_unbind,
3279 +-};
3280 +-
3281 + static int patch_nvhdmi(struct hda_codec *codec)
3282 + {
3283 + struct hdmi_spec *spec;
3284 +@@ -3492,8 +3472,6 @@ static int patch_nvhdmi(struct hda_codec *codec)
3285 +
3286 + codec->link_down_at_suspend = 1;
3287 +
3288 +- generic_acomp_init(codec, &nvhdmi_audio_ops, nvhdmi_port2pin);
3289 +-
3290 + return 0;
3291 + }
3292 +
3293 +diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c
3294 +index 45eee5cc312e..6cd4ff09c5ee 100644
3295 +--- a/sound/usb/mixer.c
3296 ++++ b/sound/usb/mixer.c
3297 +@@ -2930,6 +2930,9 @@ static int snd_usb_mixer_controls_badd(struct usb_mixer_interface *mixer,
3298 + continue;
3299 +
3300 + iface = usb_ifnum_to_if(dev, intf);
3301 ++ if (!iface)
3302 ++ continue;
3303 ++
3304 + num = iface->num_altsetting;
3305 +
3306 + if (num < 2)
3307 +diff --git a/sound/usb/mixer_scarlett_gen2.c b/sound/usb/mixer_scarlett_gen2.c
3308 +index 7d460b1f1735..94b903d95afa 100644
3309 +--- a/sound/usb/mixer_scarlett_gen2.c
3310 ++++ b/sound/usb/mixer_scarlett_gen2.c
3311 +@@ -261,34 +261,34 @@ static const struct scarlett2_device_info s6i6_gen2_info = {
3312 + },
3313 +
3314 + .ports = {
3315 +- {
3316 ++ [SCARLETT2_PORT_TYPE_NONE] = {
3317 + .id = 0x000,
3318 + .num = { 1, 0, 8, 8, 8 },
3319 + .src_descr = "Off",
3320 + .src_num_offset = 0,
3321 + },
3322 +- {
3323 ++ [SCARLETT2_PORT_TYPE_ANALOGUE] = {
3324 + .id = 0x080,
3325 + .num = { 4, 4, 4, 4, 4 },
3326 + .src_descr = "Analogue %d",
3327 + .src_num_offset = 1,
3328 + .dst_descr = "Analogue Output %02d Playback"
3329 + },
3330 +- {
3331 ++ [SCARLETT2_PORT_TYPE_SPDIF] = {
3332 + .id = 0x180,
3333 + .num = { 2, 2, 2, 2, 2 },
3334 + .src_descr = "S/PDIF %d",
3335 + .src_num_offset = 1,
3336 + .dst_descr = "S/PDIF Output %d Playback"
3337 + },
3338 +- {
3339 ++ [SCARLETT2_PORT_TYPE_MIX] = {
3340 + .id = 0x300,
3341 + .num = { 10, 18, 18, 18, 18 },
3342 + .src_descr = "Mix %c",
3343 + .src_num_offset = 65,
3344 + .dst_descr = "Mixer Input %02d Capture"
3345 + },
3346 +- {
3347 ++ [SCARLETT2_PORT_TYPE_PCM] = {
3348 + .id = 0x600,
3349 + .num = { 6, 6, 6, 6, 6 },
3350 + .src_descr = "PCM %d",
3351 +@@ -317,44 +317,44 @@ static const struct scarlett2_device_info s18i8_gen2_info = {
3352 + },
3353 +
3354 + .ports = {
3355 +- {
3356 ++ [SCARLETT2_PORT_TYPE_NONE] = {
3357 + .id = 0x000,
3358 + .num = { 1, 0, 8, 8, 4 },
3359 + .src_descr = "Off",
3360 + .src_num_offset = 0,
3361 + },
3362 +- {
3363 ++ [SCARLETT2_PORT_TYPE_ANALOGUE] = {
3364 + .id = 0x080,
3365 + .num = { 8, 6, 6, 6, 6 },
3366 + .src_descr = "Analogue %d",
3367 + .src_num_offset = 1,
3368 + .dst_descr = "Analogue Output %02d Playback"
3369 + },
3370 +- {
3371 ++ [SCARLETT2_PORT_TYPE_SPDIF] = {
3372 ++ .id = 0x180,
3373 + /* S/PDIF outputs aren't available at 192KHz
3374 + * but are included in the USB mux I/O
3375 + * assignment message anyway
3376 + */
3377 +- .id = 0x180,
3378 + .num = { 2, 2, 2, 2, 2 },
3379 + .src_descr = "S/PDIF %d",
3380 + .src_num_offset = 1,
3381 + .dst_descr = "S/PDIF Output %d Playback"
3382 + },
3383 +- {
3384 ++ [SCARLETT2_PORT_TYPE_ADAT] = {
3385 + .id = 0x200,
3386 + .num = { 8, 0, 0, 0, 0 },
3387 + .src_descr = "ADAT %d",
3388 + .src_num_offset = 1,
3389 + },
3390 +- {
3391 ++ [SCARLETT2_PORT_TYPE_MIX] = {
3392 + .id = 0x300,
3393 + .num = { 10, 18, 18, 18, 18 },
3394 + .src_descr = "Mix %c",
3395 + .src_num_offset = 65,
3396 + .dst_descr = "Mixer Input %02d Capture"
3397 + },
3398 +- {
3399 ++ [SCARLETT2_PORT_TYPE_PCM] = {
3400 + .id = 0x600,
3401 + .num = { 20, 18, 18, 14, 10 },
3402 + .src_descr = "PCM %d",
3403 +@@ -387,20 +387,20 @@ static const struct scarlett2_device_info s18i20_gen2_info = {
3404 + },
3405 +
3406 + .ports = {
3407 +- {
3408 ++ [SCARLETT2_PORT_TYPE_NONE] = {
3409 + .id = 0x000,
3410 + .num = { 1, 0, 8, 8, 6 },
3411 + .src_descr = "Off",
3412 + .src_num_offset = 0,
3413 + },
3414 +- {
3415 ++ [SCARLETT2_PORT_TYPE_ANALOGUE] = {
3416 + .id = 0x080,
3417 + .num = { 8, 10, 10, 10, 10 },
3418 + .src_descr = "Analogue %d",
3419 + .src_num_offset = 1,
3420 + .dst_descr = "Analogue Output %02d Playback"
3421 + },
3422 +- {
3423 ++ [SCARLETT2_PORT_TYPE_SPDIF] = {
3424 + /* S/PDIF outputs aren't available at 192KHz
3425 + * but are included in the USB mux I/O
3426 + * assignment message anyway
3427 +@@ -411,21 +411,21 @@ static const struct scarlett2_device_info s18i20_gen2_info = {
3428 + .src_num_offset = 1,
3429 + .dst_descr = "S/PDIF Output %d Playback"
3430 + },
3431 +- {
3432 ++ [SCARLETT2_PORT_TYPE_ADAT] = {
3433 + .id = 0x200,
3434 + .num = { 8, 8, 8, 4, 0 },
3435 + .src_descr = "ADAT %d",
3436 + .src_num_offset = 1,
3437 + .dst_descr = "ADAT Output %d Playback"
3438 + },
3439 +- {
3440 ++ [SCARLETT2_PORT_TYPE_MIX] = {
3441 + .id = 0x300,
3442 + .num = { 10, 18, 18, 18, 18 },
3443 + .src_descr = "Mix %c",
3444 + .src_num_offset = 65,
3445 + .dst_descr = "Mixer Input %02d Capture"
3446 + },
3447 +- {
3448 ++ [SCARLETT2_PORT_TYPE_PCM] = {
3449 + .id = 0x600,
3450 + .num = { 20, 18, 18, 14, 10 },
3451 + .src_descr = "PCM %d",
3452 +diff --git a/tools/arch/x86/tools/gen-insn-attr-x86.awk b/tools/arch/x86/tools/gen-insn-attr-x86.awk
3453 +index b02a36b2c14f..a42015b305f4 100644
3454 +--- a/tools/arch/x86/tools/gen-insn-attr-x86.awk
3455 ++++ b/tools/arch/x86/tools/gen-insn-attr-x86.awk
3456 +@@ -69,7 +69,7 @@ BEGIN {
3457 +
3458 + lprefix1_expr = "\\((66|!F3)\\)"
3459 + lprefix2_expr = "\\(F3\\)"
3460 +- lprefix3_expr = "\\((F2|!F3|66\\&F2)\\)"
3461 ++ lprefix3_expr = "\\((F2|!F3|66&F2)\\)"
3462 + lprefix_expr = "\\((66|F2|F3)\\)"
3463 + max_lprefix = 4
3464 +
3465 +@@ -257,7 +257,7 @@ function convert_operands(count,opnd, i,j,imm,mod)
3466 + return add_flags(imm, mod)
3467 + }
3468 +
3469 +-/^[0-9a-f]+\:/ {
3470 ++/^[0-9a-f]+:/ {
3471 + if (NR == 1)
3472 + next
3473 + # get index
3474 +diff --git a/tools/testing/selftests/x86/mov_ss_trap.c b/tools/testing/selftests/x86/mov_ss_trap.c
3475 +index 3c3a022654f3..6da0ac3f0135 100644
3476 +--- a/tools/testing/selftests/x86/mov_ss_trap.c
3477 ++++ b/tools/testing/selftests/x86/mov_ss_trap.c
3478 +@@ -257,7 +257,8 @@ int main()
3479 + err(1, "sigaltstack");
3480 + sethandler(SIGSEGV, handle_and_longjmp, SA_RESETHAND | SA_ONSTACK);
3481 + nr = SYS_getpid;
3482 +- asm volatile ("mov %[ss], %%ss; SYSENTER" : "+a" (nr)
3483 ++ /* Clear EBP first to make sure we segfault cleanly. */
3484 ++ asm volatile ("xorl %%ebp, %%ebp; mov %[ss], %%ss; SYSENTER" : "+a" (nr)
3485 + : [ss] "m" (ss) : "flags", "rcx"
3486 + #ifdef __x86_64__
3487 + , "r11"
3488 +diff --git a/tools/testing/selftests/x86/sigreturn.c b/tools/testing/selftests/x86/sigreturn.c
3489 +index 3e49a7873f3e..57c4f67f16ef 100644
3490 +--- a/tools/testing/selftests/x86/sigreturn.c
3491 ++++ b/tools/testing/selftests/x86/sigreturn.c
3492 +@@ -451,6 +451,19 @@ static void sigusr1(int sig, siginfo_t *info, void *ctx_void)
3493 + ctx->uc_mcontext.gregs[REG_SP] = (unsigned long)0x8badf00d5aadc0deULL;
3494 + ctx->uc_mcontext.gregs[REG_CX] = 0;
3495 +
3496 ++#ifdef __i386__
3497 ++ /*
3498 ++ * Make sure the kernel doesn't inadvertently use DS or ES-relative
3499 ++ * accesses in a region where user DS or ES is loaded.
3500 ++ *
3501 ++ * Skip this for 64-bit builds because long mode doesn't care about
3502 ++ * DS and ES and skipping it increases test coverage a little bit,
3503 ++ * since 64-bit kernels can still run the 32-bit build.
3504 ++ */
3505 ++ ctx->uc_mcontext.gregs[REG_DS] = 0;
3506 ++ ctx->uc_mcontext.gregs[REG_ES] = 0;
3507 ++#endif
3508 ++
3509 + memcpy(&requested_regs, &ctx->uc_mcontext.gregs, sizeof(gregset_t));
3510 + requested_regs[REG_CX] = *ssptr(ctx); /* The asm code does this. */
3511 +
3512 +diff --git a/tools/usb/usbip/libsrc/usbip_host_common.c b/tools/usb/usbip/libsrc/usbip_host_common.c
3513 +index 2813aa821c82..d1d8ba2a4a40 100644
3514 +--- a/tools/usb/usbip/libsrc/usbip_host_common.c
3515 ++++ b/tools/usb/usbip/libsrc/usbip_host_common.c
3516 +@@ -57,7 +57,7 @@ static int32_t read_attr_usbip_status(struct usbip_usb_device *udev)
3517 + }
3518 +
3519 + value = atoi(status);
3520 +-
3521 ++ close(fd);
3522 + return value;
3523 + }
3524 +