1 |
commit: 57acb6027fa35bec9bb58c8f2fa8a716595c302b |
2 |
Author: Thomas Deutschmann <whissi <AT> whissi <DOT> de> |
3 |
AuthorDate: Fri Nov 29 21:37:56 2019 +0000 |
4 |
Commit: Thomas Deutschmann <whissi <AT> gentoo <DOT> org> |
5 |
CommitDate: Fri Nov 29 21:37:56 2019 +0000 |
6 |
URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=57acb602 |
7 |
|
8 |
Linux patch 5.3.14 |
9 |
|
10 |
Signed-off-by: Thomas Deutschmann <whissi <AT> whissi.de> |
11 |
|
12 |
1013_linux-5.3.14.patch | 4004 +++++++++++++++++++++++++++++++++++++++++++++++ |
13 |
1 file changed, 4004 insertions(+) |
14 |
|
15 |
diff --git a/1013_linux-5.3.14.patch b/1013_linux-5.3.14.patch |
16 |
new file mode 100644 |
17 |
index 0000000..038253d |
18 |
--- /dev/null |
19 |
+++ b/1013_linux-5.3.14.patch |
20 |
@@ -0,0 +1,4004 @@ |
21 |
+diff --git a/Documentation/admin-guide/hw-vuln/mds.rst b/Documentation/admin-guide/hw-vuln/mds.rst |
22 |
+index e3a796c0d3a2..2d19c9f4c1fe 100644 |
23 |
+--- a/Documentation/admin-guide/hw-vuln/mds.rst |
24 |
++++ b/Documentation/admin-guide/hw-vuln/mds.rst |
25 |
+@@ -265,8 +265,11 @@ time with the option "mds=". The valid arguments for this option are: |
26 |
+ |
27 |
+ ============ ============================================================= |
28 |
+ |
29 |
+-Not specifying this option is equivalent to "mds=full". |
30 |
+- |
31 |
++Not specifying this option is equivalent to "mds=full". For processors |
32 |
++that are affected by both TAA (TSX Asynchronous Abort) and MDS, |
33 |
++specifying just "mds=off" without an accompanying "tsx_async_abort=off" |
34 |
++will have no effect as the same mitigation is used for both |
35 |
++vulnerabilities. |
36 |
+ |
37 |
+ Mitigation selection guide |
38 |
+ -------------------------- |
39 |
+diff --git a/Documentation/admin-guide/hw-vuln/tsx_async_abort.rst b/Documentation/admin-guide/hw-vuln/tsx_async_abort.rst |
40 |
+index fddbd7579c53..af6865b822d2 100644 |
41 |
+--- a/Documentation/admin-guide/hw-vuln/tsx_async_abort.rst |
42 |
++++ b/Documentation/admin-guide/hw-vuln/tsx_async_abort.rst |
43 |
+@@ -174,7 +174,10 @@ the option "tsx_async_abort=". The valid arguments for this option are: |
44 |
+ CPU is not vulnerable to cross-thread TAA attacks. |
45 |
+ ============ ============================================================= |
46 |
+ |
47 |
+-Not specifying this option is equivalent to "tsx_async_abort=full". |
48 |
++Not specifying this option is equivalent to "tsx_async_abort=full". For |
49 |
++processors that are affected by both TAA and MDS, specifying just |
50 |
++"tsx_async_abort=off" without an accompanying "mds=off" will have no |
51 |
++effect as the same mitigation is used for both vulnerabilities. |
52 |
+ |
53 |
+ The kernel command line also allows to control the TSX feature using the |
54 |
+ parameter "tsx=" on CPUs which support TSX control. MSR_IA32_TSX_CTRL is used |
55 |
+diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt |
56 |
+index 49d1719177ea..c4894b716fbe 100644 |
57 |
+--- a/Documentation/admin-guide/kernel-parameters.txt |
58 |
++++ b/Documentation/admin-guide/kernel-parameters.txt |
59 |
+@@ -2449,6 +2449,12 @@ |
60 |
+ SMT on vulnerable CPUs |
61 |
+ off - Unconditionally disable MDS mitigation |
62 |
+ |
63 |
++ On TAA-affected machines, mds=off can be prevented by |
64 |
++ an active TAA mitigation as both vulnerabilities are |
65 |
++ mitigated with the same mechanism so in order to disable |
66 |
++ this mitigation, you need to specify tsx_async_abort=off |
67 |
++ too. |
68 |
++ |
69 |
+ Not specifying this option is equivalent to |
70 |
+ mds=full. |
71 |
+ |
72 |
+@@ -4896,6 +4902,11 @@ |
73 |
+ vulnerable to cross-thread TAA attacks. |
74 |
+ off - Unconditionally disable TAA mitigation |
75 |
+ |
76 |
++ On MDS-affected machines, tsx_async_abort=off can be |
77 |
++ prevented by an active MDS mitigation as both vulnerabilities |
78 |
++ are mitigated with the same mechanism so in order to disable |
79 |
++ this mitigation, you need to specify mds=off too. |
80 |
++ |
81 |
+ Not specifying this option is equivalent to |
82 |
+ tsx_async_abort=full. On CPUs which are MDS affected |
83 |
+ and deploy MDS mitigation, TAA mitigation is not |
84 |
+diff --git a/Documentation/devicetree/bindings/net/wireless/qcom,ath10k.txt b/Documentation/devicetree/bindings/net/wireless/qcom,ath10k.txt |
85 |
+index ae661e65354e..f9499b20d840 100644 |
86 |
+--- a/Documentation/devicetree/bindings/net/wireless/qcom,ath10k.txt |
87 |
++++ b/Documentation/devicetree/bindings/net/wireless/qcom,ath10k.txt |
88 |
+@@ -81,6 +81,12 @@ Optional properties: |
89 |
+ Definition: Name of external front end module used. Some valid FEM names |
90 |
+ for example: "microsemi-lx5586", "sky85703-11" |
91 |
+ and "sky85803" etc. |
92 |
++- qcom,snoc-host-cap-8bit-quirk: |
93 |
++ Usage: Optional |
94 |
++ Value type: <empty> |
95 |
++ Definition: Quirk specifying that the firmware expects the 8bit version |
96 |
++ of the host capability QMI request |
97 |
++ |
98 |
+ |
99 |
+ Example (to supply PCI based wifi block details): |
100 |
+ |
101 |
+diff --git a/Makefile b/Makefile |
102 |
+index f9d3d58ae801..1e5933d6dc97 100644 |
103 |
+--- a/Makefile |
104 |
++++ b/Makefile |
105 |
+@@ -1,7 +1,7 @@ |
106 |
+ # SPDX-License-Identifier: GPL-2.0 |
107 |
+ VERSION = 5 |
108 |
+ PATCHLEVEL = 3 |
109 |
+-SUBLEVEL = 13 |
110 |
++SUBLEVEL = 14 |
111 |
+ EXTRAVERSION = |
112 |
+ NAME = Bobtail Squid |
113 |
+ |
114 |
+diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c |
115 |
+index d5e0b908f0ba..25da9b2d9610 100644 |
116 |
+--- a/arch/arm/mm/mmu.c |
117 |
++++ b/arch/arm/mm/mmu.c |
118 |
+@@ -1197,6 +1197,9 @@ void __init adjust_lowmem_bounds(void) |
119 |
+ phys_addr_t block_start = reg->base; |
120 |
+ phys_addr_t block_end = reg->base + reg->size; |
121 |
+ |
122 |
++ if (memblock_is_nomap(reg)) |
123 |
++ continue; |
124 |
++ |
125 |
+ if (reg->base < vmalloc_limit) { |
126 |
+ if (block_end > lowmem_limit) |
127 |
+ /* |
128 |
+diff --git a/arch/powerpc/include/asm/asm-prototypes.h b/arch/powerpc/include/asm/asm-prototypes.h |
129 |
+index ec1c97a8e8cb..baaafc9b9d88 100644 |
130 |
+--- a/arch/powerpc/include/asm/asm-prototypes.h |
131 |
++++ b/arch/powerpc/include/asm/asm-prototypes.h |
132 |
+@@ -140,9 +140,12 @@ void _kvmppc_save_tm_pr(struct kvm_vcpu *vcpu, u64 guest_msr); |
133 |
+ /* Patch sites */ |
134 |
+ extern s32 patch__call_flush_count_cache; |
135 |
+ extern s32 patch__flush_count_cache_return; |
136 |
++extern s32 patch__flush_link_stack_return; |
137 |
++extern s32 patch__call_kvm_flush_link_stack; |
138 |
+ extern s32 patch__memset_nocache, patch__memcpy_nocache; |
139 |
+ |
140 |
+ extern long flush_count_cache; |
141 |
++extern long kvm_flush_link_stack; |
142 |
+ |
143 |
+ #ifdef CONFIG_PPC_TRANSACTIONAL_MEM |
144 |
+ void kvmppc_save_tm_hv(struct kvm_vcpu *vcpu, u64 msr, bool preserve_nv); |
145 |
+diff --git a/arch/powerpc/include/asm/security_features.h b/arch/powerpc/include/asm/security_features.h |
146 |
+index 759597bf0fd8..ccf44c135389 100644 |
147 |
+--- a/arch/powerpc/include/asm/security_features.h |
148 |
++++ b/arch/powerpc/include/asm/security_features.h |
149 |
+@@ -81,6 +81,9 @@ static inline bool security_ftr_enabled(unsigned long feature) |
150 |
+ // Software required to flush count cache on context switch |
151 |
+ #define SEC_FTR_FLUSH_COUNT_CACHE 0x0000000000000400ull |
152 |
+ |
153 |
++// Software required to flush link stack on context switch |
154 |
++#define SEC_FTR_FLUSH_LINK_STACK 0x0000000000001000ull |
155 |
++ |
156 |
+ |
157 |
+ // Features enabled by default |
158 |
+ #define SEC_FTR_DEFAULT \ |
159 |
+diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S |
160 |
+index 0a0b5310f54a..81d61770f9c2 100644 |
161 |
+--- a/arch/powerpc/kernel/entry_64.S |
162 |
++++ b/arch/powerpc/kernel/entry_64.S |
163 |
+@@ -546,6 +546,7 @@ flush_count_cache: |
164 |
+ /* Save LR into r9 */ |
165 |
+ mflr r9 |
166 |
+ |
167 |
++ // Flush the link stack |
168 |
+ .rept 64 |
169 |
+ bl .+4 |
170 |
+ .endr |
171 |
+@@ -555,6 +556,11 @@ flush_count_cache: |
172 |
+ .balign 32 |
173 |
+ /* Restore LR */ |
174 |
+ 1: mtlr r9 |
175 |
++ |
176 |
++ // If we're just flushing the link stack, return here |
177 |
++3: nop |
178 |
++ patch_site 3b patch__flush_link_stack_return |
179 |
++ |
180 |
+ li r9,0x7fff |
181 |
+ mtctr r9 |
182 |
+ |
183 |
+diff --git a/arch/powerpc/kernel/security.c b/arch/powerpc/kernel/security.c |
184 |
+index e1c9cf079503..bd91dceb7010 100644 |
185 |
+--- a/arch/powerpc/kernel/security.c |
186 |
++++ b/arch/powerpc/kernel/security.c |
187 |
+@@ -24,11 +24,12 @@ enum count_cache_flush_type { |
188 |
+ COUNT_CACHE_FLUSH_HW = 0x4, |
189 |
+ }; |
190 |
+ static enum count_cache_flush_type count_cache_flush_type = COUNT_CACHE_FLUSH_NONE; |
191 |
++static bool link_stack_flush_enabled; |
192 |
+ |
193 |
+ bool barrier_nospec_enabled; |
194 |
+ static bool no_nospec; |
195 |
+ static bool btb_flush_enabled; |
196 |
+-#ifdef CONFIG_PPC_FSL_BOOK3E |
197 |
++#if defined(CONFIG_PPC_FSL_BOOK3E) || defined(CONFIG_PPC_BOOK3S_64) |
198 |
+ static bool no_spectrev2; |
199 |
+ #endif |
200 |
+ |
201 |
+@@ -114,7 +115,7 @@ static __init int security_feature_debugfs_init(void) |
202 |
+ device_initcall(security_feature_debugfs_init); |
203 |
+ #endif /* CONFIG_DEBUG_FS */ |
204 |
+ |
205 |
+-#ifdef CONFIG_PPC_FSL_BOOK3E |
206 |
++#if defined(CONFIG_PPC_FSL_BOOK3E) || defined(CONFIG_PPC_BOOK3S_64) |
207 |
+ static int __init handle_nospectre_v2(char *p) |
208 |
+ { |
209 |
+ no_spectrev2 = true; |
210 |
+@@ -122,6 +123,9 @@ static int __init handle_nospectre_v2(char *p) |
211 |
+ return 0; |
212 |
+ } |
213 |
+ early_param("nospectre_v2", handle_nospectre_v2); |
214 |
++#endif /* CONFIG_PPC_FSL_BOOK3E || CONFIG_PPC_BOOK3S_64 */ |
215 |
++ |
216 |
++#ifdef CONFIG_PPC_FSL_BOOK3E |
217 |
+ void setup_spectre_v2(void) |
218 |
+ { |
219 |
+ if (no_spectrev2 || cpu_mitigations_off()) |
220 |
+@@ -209,11 +213,19 @@ ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr, c |
221 |
+ |
222 |
+ if (ccd) |
223 |
+ seq_buf_printf(&s, "Indirect branch cache disabled"); |
224 |
++ |
225 |
++ if (link_stack_flush_enabled) |
226 |
++ seq_buf_printf(&s, ", Software link stack flush"); |
227 |
++ |
228 |
+ } else if (count_cache_flush_type != COUNT_CACHE_FLUSH_NONE) { |
229 |
+ seq_buf_printf(&s, "Mitigation: Software count cache flush"); |
230 |
+ |
231 |
+ if (count_cache_flush_type == COUNT_CACHE_FLUSH_HW) |
232 |
+ seq_buf_printf(&s, " (hardware accelerated)"); |
233 |
++ |
234 |
++ if (link_stack_flush_enabled) |
235 |
++ seq_buf_printf(&s, ", Software link stack flush"); |
236 |
++ |
237 |
+ } else if (btb_flush_enabled) { |
238 |
+ seq_buf_printf(&s, "Mitigation: Branch predictor state flush"); |
239 |
+ } else { |
240 |
+@@ -374,18 +386,49 @@ static __init int stf_barrier_debugfs_init(void) |
241 |
+ device_initcall(stf_barrier_debugfs_init); |
242 |
+ #endif /* CONFIG_DEBUG_FS */ |
243 |
+ |
244 |
++static void no_count_cache_flush(void) |
245 |
++{ |
246 |
++ count_cache_flush_type = COUNT_CACHE_FLUSH_NONE; |
247 |
++ pr_info("count-cache-flush: software flush disabled.\n"); |
248 |
++} |
249 |
++ |
250 |
+ static void toggle_count_cache_flush(bool enable) |
251 |
+ { |
252 |
+- if (!enable || !security_ftr_enabled(SEC_FTR_FLUSH_COUNT_CACHE)) { |
253 |
++ if (!security_ftr_enabled(SEC_FTR_FLUSH_COUNT_CACHE) && |
254 |
++ !security_ftr_enabled(SEC_FTR_FLUSH_LINK_STACK)) |
255 |
++ enable = false; |
256 |
++ |
257 |
++ if (!enable) { |
258 |
+ patch_instruction_site(&patch__call_flush_count_cache, PPC_INST_NOP); |
259 |
+- count_cache_flush_type = COUNT_CACHE_FLUSH_NONE; |
260 |
+- pr_info("count-cache-flush: software flush disabled.\n"); |
261 |
++#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE |
262 |
++ patch_instruction_site(&patch__call_kvm_flush_link_stack, PPC_INST_NOP); |
263 |
++#endif |
264 |
++ pr_info("link-stack-flush: software flush disabled.\n"); |
265 |
++ link_stack_flush_enabled = false; |
266 |
++ no_count_cache_flush(); |
267 |
+ return; |
268 |
+ } |
269 |
+ |
270 |
++ // This enables the branch from _switch to flush_count_cache |
271 |
+ patch_branch_site(&patch__call_flush_count_cache, |
272 |
+ (u64)&flush_count_cache, BRANCH_SET_LINK); |
273 |
+ |
274 |
++#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE |
275 |
++ // This enables the branch from guest_exit_cont to kvm_flush_link_stack |
276 |
++ patch_branch_site(&patch__call_kvm_flush_link_stack, |
277 |
++ (u64)&kvm_flush_link_stack, BRANCH_SET_LINK); |
278 |
++#endif |
279 |
++ |
280 |
++ pr_info("link-stack-flush: software flush enabled.\n"); |
281 |
++ link_stack_flush_enabled = true; |
282 |
++ |
283 |
++ // If we just need to flush the link stack, patch an early return |
284 |
++ if (!security_ftr_enabled(SEC_FTR_FLUSH_COUNT_CACHE)) { |
285 |
++ patch_instruction_site(&patch__flush_link_stack_return, PPC_INST_BLR); |
286 |
++ no_count_cache_flush(); |
287 |
++ return; |
288 |
++ } |
289 |
++ |
290 |
+ if (!security_ftr_enabled(SEC_FTR_BCCTR_FLUSH_ASSIST)) { |
291 |
+ count_cache_flush_type = COUNT_CACHE_FLUSH_SW; |
292 |
+ pr_info("count-cache-flush: full software flush sequence enabled.\n"); |
293 |
+@@ -399,7 +442,26 @@ static void toggle_count_cache_flush(bool enable) |
294 |
+ |
295 |
+ void setup_count_cache_flush(void) |
296 |
+ { |
297 |
+- toggle_count_cache_flush(true); |
298 |
++ bool enable = true; |
299 |
++ |
300 |
++ if (no_spectrev2 || cpu_mitigations_off()) { |
301 |
++ if (security_ftr_enabled(SEC_FTR_BCCTRL_SERIALISED) || |
302 |
++ security_ftr_enabled(SEC_FTR_COUNT_CACHE_DISABLED)) |
303 |
++ pr_warn("Spectre v2 mitigations not fully under software control, can't disable\n"); |
304 |
++ |
305 |
++ enable = false; |
306 |
++ } |
307 |
++ |
308 |
++ /* |
309 |
++ * There's no firmware feature flag/hypervisor bit to tell us we need to |
310 |
++ * flush the link stack on context switch. So we set it here if we see |
311 |
++ * either of the Spectre v2 mitigations that aim to protect userspace. |
312 |
++ */ |
313 |
++ if (security_ftr_enabled(SEC_FTR_COUNT_CACHE_DISABLED) || |
314 |
++ security_ftr_enabled(SEC_FTR_FLUSH_COUNT_CACHE)) |
315 |
++ security_ftr_set(SEC_FTR_FLUSH_LINK_STACK); |
316 |
++ |
317 |
++ toggle_count_cache_flush(enable); |
318 |
+ } |
319 |
+ |
320 |
+ #ifdef CONFIG_DEBUG_FS |
321 |
+diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S |
322 |
+index 07181d0dfcb7..0ba1d7abb798 100644 |
323 |
+--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S |
324 |
++++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S |
325 |
+@@ -11,6 +11,7 @@ |
326 |
+ */ |
327 |
+ |
328 |
+ #include <asm/ppc_asm.h> |
329 |
++#include <asm/code-patching-asm.h> |
330 |
+ #include <asm/kvm_asm.h> |
331 |
+ #include <asm/reg.h> |
332 |
+ #include <asm/mmu.h> |
333 |
+@@ -1458,6 +1459,13 @@ guest_exit_cont: /* r9 = vcpu, r12 = trap, r13 = paca */ |
334 |
+ 1: |
335 |
+ #endif /* CONFIG_KVM_XICS */ |
336 |
+ |
337 |
++ /* |
338 |
++ * Possibly flush the link stack here, before we do a blr in |
339 |
++ * guest_exit_short_path. |
340 |
++ */ |
341 |
++1: nop |
342 |
++ patch_site 1b patch__call_kvm_flush_link_stack |
343 |
++ |
344 |
+ /* If we came in through the P9 short path, go back out to C now */ |
345 |
+ lwz r0, STACK_SLOT_SHORT_PATH(r1) |
346 |
+ cmpwi r0, 0 |
347 |
+@@ -1933,6 +1941,28 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) |
348 |
+ mtlr r0 |
349 |
+ blr |
350 |
+ |
351 |
++.balign 32 |
352 |
++.global kvm_flush_link_stack |
353 |
++kvm_flush_link_stack: |
354 |
++ /* Save LR into r0 */ |
355 |
++ mflr r0 |
356 |
++ |
357 |
++ /* Flush the link stack. On Power8 it's up to 32 entries in size. */ |
358 |
++ .rept 32 |
359 |
++ bl .+4 |
360 |
++ .endr |
361 |
++ |
362 |
++ /* And on Power9 it's up to 64. */ |
363 |
++BEGIN_FTR_SECTION |
364 |
++ .rept 32 |
365 |
++ bl .+4 |
366 |
++ .endr |
367 |
++END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) |
368 |
++ |
369 |
++ /* Restore LR */ |
370 |
++ mtlr r0 |
371 |
++ blr |
372 |
++ |
373 |
+ kvmppc_guest_external: |
374 |
+ /* External interrupt, first check for host_ipi. If this is |
375 |
+ * set, we know the host wants us out so let's do it now |
376 |
+diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S |
377 |
+index 4f86928246e7..1153e510cedd 100644 |
378 |
+--- a/arch/x86/entry/entry_32.S |
379 |
++++ b/arch/x86/entry/entry_32.S |
380 |
+@@ -172,7 +172,7 @@ |
381 |
+ ALTERNATIVE "jmp .Lend_\@", "", X86_FEATURE_PTI |
382 |
+ .if \no_user_check == 0 |
383 |
+ /* coming from usermode? */ |
384 |
+- testl $SEGMENT_RPL_MASK, PT_CS(%esp) |
385 |
++ testl $USER_SEGMENT_RPL_MASK, PT_CS(%esp) |
386 |
+ jz .Lend_\@ |
387 |
+ .endif |
388 |
+ /* On user-cr3? */ |
389 |
+@@ -205,64 +205,76 @@ |
390 |
+ #define CS_FROM_ENTRY_STACK (1 << 31) |
391 |
+ #define CS_FROM_USER_CR3 (1 << 30) |
392 |
+ #define CS_FROM_KERNEL (1 << 29) |
393 |
++#define CS_FROM_ESPFIX (1 << 28) |
394 |
+ |
395 |
+ .macro FIXUP_FRAME |
396 |
+ /* |
397 |
+ * The high bits of the CS dword (__csh) are used for CS_FROM_*. |
398 |
+ * Clear them in case hardware didn't do this for us. |
399 |
+ */ |
400 |
+- andl $0x0000ffff, 3*4(%esp) |
401 |
++ andl $0x0000ffff, 4*4(%esp) |
402 |
+ |
403 |
+ #ifdef CONFIG_VM86 |
404 |
+- testl $X86_EFLAGS_VM, 4*4(%esp) |
405 |
++ testl $X86_EFLAGS_VM, 5*4(%esp) |
406 |
+ jnz .Lfrom_usermode_no_fixup_\@ |
407 |
+ #endif |
408 |
+- testl $SEGMENT_RPL_MASK, 3*4(%esp) |
409 |
++ testl $USER_SEGMENT_RPL_MASK, 4*4(%esp) |
410 |
+ jnz .Lfrom_usermode_no_fixup_\@ |
411 |
+ |
412 |
+- orl $CS_FROM_KERNEL, 3*4(%esp) |
413 |
++ orl $CS_FROM_KERNEL, 4*4(%esp) |
414 |
+ |
415 |
+ /* |
416 |
+ * When we're here from kernel mode; the (exception) stack looks like: |
417 |
+ * |
418 |
+- * 5*4(%esp) - <previous context> |
419 |
+- * 4*4(%esp) - flags |
420 |
+- * 3*4(%esp) - cs |
421 |
+- * 2*4(%esp) - ip |
422 |
+- * 1*4(%esp) - orig_eax |
423 |
+- * 0*4(%esp) - gs / function |
424 |
++ * 6*4(%esp) - <previous context> |
425 |
++ * 5*4(%esp) - flags |
426 |
++ * 4*4(%esp) - cs |
427 |
++ * 3*4(%esp) - ip |
428 |
++ * 2*4(%esp) - orig_eax |
429 |
++ * 1*4(%esp) - gs / function |
430 |
++ * 0*4(%esp) - fs |
431 |
+ * |
432 |
+ * Lets build a 5 entry IRET frame after that, such that struct pt_regs |
433 |
+ * is complete and in particular regs->sp is correct. This gives us |
434 |
+- * the original 5 enties as gap: |
435 |
++ * the original 6 enties as gap: |
436 |
+ * |
437 |
+- * 12*4(%esp) - <previous context> |
438 |
+- * 11*4(%esp) - gap / flags |
439 |
+- * 10*4(%esp) - gap / cs |
440 |
+- * 9*4(%esp) - gap / ip |
441 |
+- * 8*4(%esp) - gap / orig_eax |
442 |
+- * 7*4(%esp) - gap / gs / function |
443 |
+- * 6*4(%esp) - ss |
444 |
+- * 5*4(%esp) - sp |
445 |
+- * 4*4(%esp) - flags |
446 |
+- * 3*4(%esp) - cs |
447 |
+- * 2*4(%esp) - ip |
448 |
+- * 1*4(%esp) - orig_eax |
449 |
+- * 0*4(%esp) - gs / function |
450 |
++ * 14*4(%esp) - <previous context> |
451 |
++ * 13*4(%esp) - gap / flags |
452 |
++ * 12*4(%esp) - gap / cs |
453 |
++ * 11*4(%esp) - gap / ip |
454 |
++ * 10*4(%esp) - gap / orig_eax |
455 |
++ * 9*4(%esp) - gap / gs / function |
456 |
++ * 8*4(%esp) - gap / fs |
457 |
++ * 7*4(%esp) - ss |
458 |
++ * 6*4(%esp) - sp |
459 |
++ * 5*4(%esp) - flags |
460 |
++ * 4*4(%esp) - cs |
461 |
++ * 3*4(%esp) - ip |
462 |
++ * 2*4(%esp) - orig_eax |
463 |
++ * 1*4(%esp) - gs / function |
464 |
++ * 0*4(%esp) - fs |
465 |
+ */ |
466 |
+ |
467 |
+ pushl %ss # ss |
468 |
+ pushl %esp # sp (points at ss) |
469 |
+- addl $6*4, (%esp) # point sp back at the previous context |
470 |
+- pushl 6*4(%esp) # flags |
471 |
+- pushl 6*4(%esp) # cs |
472 |
+- pushl 6*4(%esp) # ip |
473 |
+- pushl 6*4(%esp) # orig_eax |
474 |
+- pushl 6*4(%esp) # gs / function |
475 |
++ addl $7*4, (%esp) # point sp back at the previous context |
476 |
++ pushl 7*4(%esp) # flags |
477 |
++ pushl 7*4(%esp) # cs |
478 |
++ pushl 7*4(%esp) # ip |
479 |
++ pushl 7*4(%esp) # orig_eax |
480 |
++ pushl 7*4(%esp) # gs / function |
481 |
++ pushl 7*4(%esp) # fs |
482 |
+ .Lfrom_usermode_no_fixup_\@: |
483 |
+ .endm |
484 |
+ |
485 |
+ .macro IRET_FRAME |
486 |
++ /* |
487 |
++ * We're called with %ds, %es, %fs, and %gs from the interrupted |
488 |
++ * frame, so we shouldn't use them. Also, we may be in ESPFIX |
489 |
++ * mode and therefore have a nonzero SS base and an offset ESP, |
490 |
++ * so any attempt to access the stack needs to use SS. (except for |
491 |
++ * accesses through %esp, which automatically use SS.) |
492 |
++ */ |
493 |
+ testl $CS_FROM_KERNEL, 1*4(%esp) |
494 |
+ jz .Lfinished_frame_\@ |
495 |
+ |
496 |
+@@ -276,31 +288,40 @@ |
497 |
+ movl 5*4(%esp), %eax # (modified) regs->sp |
498 |
+ |
499 |
+ movl 4*4(%esp), %ecx # flags |
500 |
+- movl %ecx, -4(%eax) |
501 |
++ movl %ecx, %ss:-1*4(%eax) |
502 |
+ |
503 |
+ movl 3*4(%esp), %ecx # cs |
504 |
+ andl $0x0000ffff, %ecx |
505 |
+- movl %ecx, -8(%eax) |
506 |
++ movl %ecx, %ss:-2*4(%eax) |
507 |
+ |
508 |
+ movl 2*4(%esp), %ecx # ip |
509 |
+- movl %ecx, -12(%eax) |
510 |
++ movl %ecx, %ss:-3*4(%eax) |
511 |
+ |
512 |
+ movl 1*4(%esp), %ecx # eax |
513 |
+- movl %ecx, -16(%eax) |
514 |
++ movl %ecx, %ss:-4*4(%eax) |
515 |
+ |
516 |
+ popl %ecx |
517 |
+- lea -16(%eax), %esp |
518 |
++ lea -4*4(%eax), %esp |
519 |
+ popl %eax |
520 |
+ .Lfinished_frame_\@: |
521 |
+ .endm |
522 |
+ |
523 |
+-.macro SAVE_ALL pt_regs_ax=%eax switch_stacks=0 skip_gs=0 |
524 |
++.macro SAVE_ALL pt_regs_ax=%eax switch_stacks=0 skip_gs=0 unwind_espfix=0 |
525 |
+ cld |
526 |
+ .if \skip_gs == 0 |
527 |
+ PUSH_GS |
528 |
+ .endif |
529 |
+- FIXUP_FRAME |
530 |
+ pushl %fs |
531 |
++ |
532 |
++ pushl %eax |
533 |
++ movl $(__KERNEL_PERCPU), %eax |
534 |
++ movl %eax, %fs |
535 |
++.if \unwind_espfix > 0 |
536 |
++ UNWIND_ESPFIX_STACK |
537 |
++.endif |
538 |
++ popl %eax |
539 |
++ |
540 |
++ FIXUP_FRAME |
541 |
+ pushl %es |
542 |
+ pushl %ds |
543 |
+ pushl \pt_regs_ax |
544 |
+@@ -313,8 +334,6 @@ |
545 |
+ movl $(__USER_DS), %edx |
546 |
+ movl %edx, %ds |
547 |
+ movl %edx, %es |
548 |
+- movl $(__KERNEL_PERCPU), %edx |
549 |
+- movl %edx, %fs |
550 |
+ .if \skip_gs == 0 |
551 |
+ SET_KERNEL_GS %edx |
552 |
+ .endif |
553 |
+@@ -324,8 +343,8 @@ |
554 |
+ .endif |
555 |
+ .endm |
556 |
+ |
557 |
+-.macro SAVE_ALL_NMI cr3_reg:req |
558 |
+- SAVE_ALL |
559 |
++.macro SAVE_ALL_NMI cr3_reg:req unwind_espfix=0 |
560 |
++ SAVE_ALL unwind_espfix=\unwind_espfix |
561 |
+ |
562 |
+ BUG_IF_WRONG_CR3 |
563 |
+ |
564 |
+@@ -357,6 +376,7 @@ |
565 |
+ 2: popl %es |
566 |
+ 3: popl %fs |
567 |
+ POP_GS \pop |
568 |
++ IRET_FRAME |
569 |
+ .pushsection .fixup, "ax" |
570 |
+ 4: movl $0, (%esp) |
571 |
+ jmp 1b |
572 |
+@@ -395,7 +415,8 @@ |
573 |
+ |
574 |
+ .macro CHECK_AND_APPLY_ESPFIX |
575 |
+ #ifdef CONFIG_X86_ESPFIX32 |
576 |
+-#define GDT_ESPFIX_SS PER_CPU_VAR(gdt_page) + (GDT_ENTRY_ESPFIX_SS * 8) |
577 |
++#define GDT_ESPFIX_OFFSET (GDT_ENTRY_ESPFIX_SS * 8) |
578 |
++#define GDT_ESPFIX_SS PER_CPU_VAR(gdt_page) + GDT_ESPFIX_OFFSET |
579 |
+ |
580 |
+ ALTERNATIVE "jmp .Lend_\@", "", X86_BUG_ESPFIX |
581 |
+ |
582 |
+@@ -1075,7 +1096,6 @@ restore_all: |
583 |
+ /* Restore user state */ |
584 |
+ RESTORE_REGS pop=4 # skip orig_eax/error_code |
585 |
+ .Lirq_return: |
586 |
+- IRET_FRAME |
587 |
+ /* |
588 |
+ * ARCH_HAS_MEMBARRIER_SYNC_CORE rely on IRET core serialization |
589 |
+ * when returning from IPI handler and when returning from |
590 |
+@@ -1128,30 +1148,43 @@ ENDPROC(entry_INT80_32) |
591 |
+ * We can't call C functions using the ESPFIX stack. This code reads |
592 |
+ * the high word of the segment base from the GDT and swiches to the |
593 |
+ * normal stack and adjusts ESP with the matching offset. |
594 |
++ * |
595 |
++ * We might be on user CR3 here, so percpu data is not mapped and we can't |
596 |
++ * access the GDT through the percpu segment. Instead, use SGDT to find |
597 |
++ * the cpu_entry_area alias of the GDT. |
598 |
+ */ |
599 |
+ #ifdef CONFIG_X86_ESPFIX32 |
600 |
+ /* fixup the stack */ |
601 |
+- mov GDT_ESPFIX_SS + 4, %al /* bits 16..23 */ |
602 |
+- mov GDT_ESPFIX_SS + 7, %ah /* bits 24..31 */ |
603 |
++ pushl %ecx |
604 |
++ subl $2*4, %esp |
605 |
++ sgdt (%esp) |
606 |
++ movl 2(%esp), %ecx /* GDT address */ |
607 |
++ /* |
608 |
++ * Careful: ECX is a linear pointer, so we need to force base |
609 |
++ * zero. %cs is the only known-linear segment we have right now. |
610 |
++ */ |
611 |
++ mov %cs:GDT_ESPFIX_OFFSET + 4(%ecx), %al /* bits 16..23 */ |
612 |
++ mov %cs:GDT_ESPFIX_OFFSET + 7(%ecx), %ah /* bits 24..31 */ |
613 |
+ shl $16, %eax |
614 |
++ addl $2*4, %esp |
615 |
++ popl %ecx |
616 |
+ addl %esp, %eax /* the adjusted stack pointer */ |
617 |
+ pushl $__KERNEL_DS |
618 |
+ pushl %eax |
619 |
+ lss (%esp), %esp /* switch to the normal stack segment */ |
620 |
+ #endif |
621 |
+ .endm |
622 |
++ |
623 |
+ .macro UNWIND_ESPFIX_STACK |
624 |
++ /* It's safe to clobber %eax, all other regs need to be preserved */ |
625 |
+ #ifdef CONFIG_X86_ESPFIX32 |
626 |
+ movl %ss, %eax |
627 |
+ /* see if on espfix stack */ |
628 |
+ cmpw $__ESPFIX_SS, %ax |
629 |
+- jne 27f |
630 |
+- movl $__KERNEL_DS, %eax |
631 |
+- movl %eax, %ds |
632 |
+- movl %eax, %es |
633 |
++ jne .Lno_fixup_\@ |
634 |
+ /* switch to normal stack */ |
635 |
+ FIXUP_ESPFIX_STACK |
636 |
+-27: |
637 |
++.Lno_fixup_\@: |
638 |
+ #endif |
639 |
+ .endm |
640 |
+ |
641 |
+@@ -1341,11 +1374,6 @@ END(spurious_interrupt_bug) |
642 |
+ |
643 |
+ #ifdef CONFIG_XEN_PV |
644 |
+ ENTRY(xen_hypervisor_callback) |
645 |
+- pushl $-1 /* orig_ax = -1 => not a system call */ |
646 |
+- SAVE_ALL |
647 |
+- ENCODE_FRAME_POINTER |
648 |
+- TRACE_IRQS_OFF |
649 |
+- |
650 |
+ /* |
651 |
+ * Check to see if we got the event in the critical |
652 |
+ * region in xen_iret_direct, after we've reenabled |
653 |
+@@ -1353,16 +1381,17 @@ ENTRY(xen_hypervisor_callback) |
654 |
+ * iret instruction's behaviour where it delivers a |
655 |
+ * pending interrupt when enabling interrupts: |
656 |
+ */ |
657 |
+- movl PT_EIP(%esp), %eax |
658 |
+- cmpl $xen_iret_start_crit, %eax |
659 |
++ cmpl $xen_iret_start_crit, (%esp) |
660 |
+ jb 1f |
661 |
+- cmpl $xen_iret_end_crit, %eax |
662 |
++ cmpl $xen_iret_end_crit, (%esp) |
663 |
+ jae 1f |
664 |
+- |
665 |
+- jmp xen_iret_crit_fixup |
666 |
+- |
667 |
+-ENTRY(xen_do_upcall) |
668 |
+-1: mov %esp, %eax |
669 |
++ call xen_iret_crit_fixup |
670 |
++1: |
671 |
++ pushl $-1 /* orig_ax = -1 => not a system call */ |
672 |
++ SAVE_ALL |
673 |
++ ENCODE_FRAME_POINTER |
674 |
++ TRACE_IRQS_OFF |
675 |
++ mov %esp, %eax |
676 |
+ call xen_evtchn_do_upcall |
677 |
+ #ifndef CONFIG_PREEMPT |
678 |
+ call xen_maybe_preempt_hcall |
679 |
+@@ -1449,10 +1478,9 @@ END(page_fault) |
680 |
+ |
681 |
+ common_exception_read_cr2: |
682 |
+ /* the function address is in %gs's slot on the stack */ |
683 |
+- SAVE_ALL switch_stacks=1 skip_gs=1 |
684 |
++ SAVE_ALL switch_stacks=1 skip_gs=1 unwind_espfix=1 |
685 |
+ |
686 |
+ ENCODE_FRAME_POINTER |
687 |
+- UNWIND_ESPFIX_STACK |
688 |
+ |
689 |
+ /* fixup %gs */ |
690 |
+ GS_TO_REG %ecx |
691 |
+@@ -1474,9 +1502,8 @@ END(common_exception_read_cr2) |
692 |
+ |
693 |
+ common_exception: |
694 |
+ /* the function address is in %gs's slot on the stack */ |
695 |
+- SAVE_ALL switch_stacks=1 skip_gs=1 |
696 |
++ SAVE_ALL switch_stacks=1 skip_gs=1 unwind_espfix=1 |
697 |
+ ENCODE_FRAME_POINTER |
698 |
+- UNWIND_ESPFIX_STACK |
699 |
+ |
700 |
+ /* fixup %gs */ |
701 |
+ GS_TO_REG %ecx |
702 |
+@@ -1515,6 +1542,10 @@ ENTRY(nmi) |
703 |
+ ASM_CLAC |
704 |
+ |
705 |
+ #ifdef CONFIG_X86_ESPFIX32 |
706 |
++ /* |
707 |
++ * ESPFIX_SS is only ever set on the return to user path |
708 |
++ * after we've switched to the entry stack. |
709 |
++ */ |
710 |
+ pushl %eax |
711 |
+ movl %ss, %eax |
712 |
+ cmpw $__ESPFIX_SS, %ax |
713 |
+@@ -1550,6 +1581,11 @@ ENTRY(nmi) |
714 |
+ movl %ebx, %esp |
715 |
+ |
716 |
+ .Lnmi_return: |
717 |
++#ifdef CONFIG_X86_ESPFIX32 |
718 |
++ testl $CS_FROM_ESPFIX, PT_CS(%esp) |
719 |
++ jnz .Lnmi_from_espfix |
720 |
++#endif |
721 |
++ |
722 |
+ CHECK_AND_APPLY_ESPFIX |
723 |
+ RESTORE_ALL_NMI cr3_reg=%edi pop=4 |
724 |
+ jmp .Lirq_return |
725 |
+@@ -1557,23 +1593,42 @@ ENTRY(nmi) |
726 |
+ #ifdef CONFIG_X86_ESPFIX32 |
727 |
+ .Lnmi_espfix_stack: |
728 |
+ /* |
729 |
+- * create the pointer to lss back |
730 |
++ * Create the pointer to LSS back |
731 |
+ */ |
732 |
+ pushl %ss |
733 |
+ pushl %esp |
734 |
+ addl $4, (%esp) |
735 |
+- /* copy the iret frame of 12 bytes */ |
736 |
+- .rept 3 |
737 |
+- pushl 16(%esp) |
738 |
+- .endr |
739 |
+- pushl %eax |
740 |
+- SAVE_ALL_NMI cr3_reg=%edi |
741 |
++ |
742 |
++ /* Copy the (short) IRET frame */ |
743 |
++ pushl 4*4(%esp) # flags |
744 |
++ pushl 4*4(%esp) # cs |
745 |
++ pushl 4*4(%esp) # ip |
746 |
++ |
747 |
++ pushl %eax # orig_ax |
748 |
++ |
749 |
++ SAVE_ALL_NMI cr3_reg=%edi unwind_espfix=1 |
750 |
+ ENCODE_FRAME_POINTER |
751 |
+- FIXUP_ESPFIX_STACK # %eax == %esp |
752 |
++ |
753 |
++ /* clear CS_FROM_KERNEL, set CS_FROM_ESPFIX */ |
754 |
++ xorl $(CS_FROM_ESPFIX | CS_FROM_KERNEL), PT_CS(%esp) |
755 |
++ |
756 |
+ xorl %edx, %edx # zero error code |
757 |
+- call do_nmi |
758 |
++ movl %esp, %eax # pt_regs pointer |
759 |
++ jmp .Lnmi_from_sysenter_stack |
760 |
++ |
761 |
++.Lnmi_from_espfix: |
762 |
+ RESTORE_ALL_NMI cr3_reg=%edi |
763 |
+- lss 12+4(%esp), %esp # back to espfix stack |
764 |
++ /* |
765 |
++ * Because we cleared CS_FROM_KERNEL, IRET_FRAME 'forgot' to |
766 |
++ * fix up the gap and long frame: |
767 |
++ * |
768 |
++ * 3 - original frame (exception) |
769 |
++ * 2 - ESPFIX block (above) |
770 |
++ * 6 - gap (FIXUP_FRAME) |
771 |
++ * 5 - long frame (FIXUP_FRAME) |
772 |
++ * 1 - orig_ax |
773 |
++ */ |
774 |
++ lss (1+5+6)*4(%esp), %esp # back to espfix stack |
775 |
+ jmp .Lirq_return |
776 |
+ #endif |
777 |
+ END(nmi) |
778 |
+diff --git a/arch/x86/include/asm/cpu_entry_area.h b/arch/x86/include/asm/cpu_entry_area.h |
779 |
+index cff3f3f3bfe0..6e9c9af3255a 100644 |
780 |
+--- a/arch/x86/include/asm/cpu_entry_area.h |
781 |
++++ b/arch/x86/include/asm/cpu_entry_area.h |
782 |
+@@ -78,8 +78,12 @@ struct cpu_entry_area { |
783 |
+ |
784 |
+ /* |
785 |
+ * The GDT is just below entry_stack and thus serves (on x86_64) as |
786 |
+- * a a read-only guard page. |
787 |
++ * a read-only guard page. On 32-bit the GDT must be writeable, so |
788 |
++ * it needs an extra guard page. |
789 |
+ */ |
790 |
++#ifdef CONFIG_X86_32 |
791 |
++ char guard_entry_stack[PAGE_SIZE]; |
792 |
++#endif |
793 |
+ struct entry_stack_page entry_stack_page; |
794 |
+ |
795 |
+ /* |
796 |
+@@ -94,7 +98,6 @@ struct cpu_entry_area { |
797 |
+ */ |
798 |
+ struct cea_exception_stacks estacks; |
799 |
+ #endif |
800 |
+-#ifdef CONFIG_CPU_SUP_INTEL |
801 |
+ /* |
802 |
+ * Per CPU debug store for Intel performance monitoring. Wastes a |
803 |
+ * full page at the moment. |
804 |
+@@ -105,11 +108,13 @@ struct cpu_entry_area { |
805 |
+ * Reserve enough fixmap PTEs. |
806 |
+ */ |
807 |
+ struct debug_store_buffers cpu_debug_buffers; |
808 |
+-#endif |
809 |
+ }; |
810 |
+ |
811 |
+-#define CPU_ENTRY_AREA_SIZE (sizeof(struct cpu_entry_area)) |
812 |
+-#define CPU_ENTRY_AREA_TOT_SIZE (CPU_ENTRY_AREA_SIZE * NR_CPUS) |
813 |
++#define CPU_ENTRY_AREA_SIZE (sizeof(struct cpu_entry_area)) |
814 |
++#define CPU_ENTRY_AREA_ARRAY_SIZE (CPU_ENTRY_AREA_SIZE * NR_CPUS) |
815 |
++ |
816 |
++/* Total size includes the readonly IDT mapping page as well: */ |
817 |
++#define CPU_ENTRY_AREA_TOTAL_SIZE (CPU_ENTRY_AREA_ARRAY_SIZE + PAGE_SIZE) |
818 |
+ |
819 |
+ DECLARE_PER_CPU(struct cpu_entry_area *, cpu_entry_area); |
820 |
+ DECLARE_PER_CPU(struct cea_exception_stacks *, cea_exception_stacks); |
821 |
+@@ -117,13 +122,14 @@ DECLARE_PER_CPU(struct cea_exception_stacks *, cea_exception_stacks); |
822 |
+ extern void setup_cpu_entry_areas(void); |
823 |
+ extern void cea_set_pte(void *cea_vaddr, phys_addr_t pa, pgprot_t flags); |
824 |
+ |
825 |
++/* Single page reserved for the readonly IDT mapping: */ |
826 |
+ #define CPU_ENTRY_AREA_RO_IDT CPU_ENTRY_AREA_BASE |
827 |
+ #define CPU_ENTRY_AREA_PER_CPU (CPU_ENTRY_AREA_RO_IDT + PAGE_SIZE) |
828 |
+ |
829 |
+ #define CPU_ENTRY_AREA_RO_IDT_VADDR ((void *)CPU_ENTRY_AREA_RO_IDT) |
830 |
+ |
831 |
+ #define CPU_ENTRY_AREA_MAP_SIZE \ |
832 |
+- (CPU_ENTRY_AREA_PER_CPU + CPU_ENTRY_AREA_TOT_SIZE - CPU_ENTRY_AREA_BASE) |
833 |
++ (CPU_ENTRY_AREA_PER_CPU + CPU_ENTRY_AREA_ARRAY_SIZE - CPU_ENTRY_AREA_BASE) |
834 |
+ |
835 |
+ extern struct cpu_entry_area *get_cpu_entry_area(int cpu); |
836 |
+ |
837 |
+diff --git a/arch/x86/include/asm/pgtable_32_types.h b/arch/x86/include/asm/pgtable_32_types.h |
838 |
+index b0bc0fff5f1f..1636eb8e5a5b 100644 |
839 |
+--- a/arch/x86/include/asm/pgtable_32_types.h |
840 |
++++ b/arch/x86/include/asm/pgtable_32_types.h |
841 |
+@@ -44,11 +44,11 @@ extern bool __vmalloc_start_set; /* set once high_memory is set */ |
842 |
+ * Define this here and validate with BUILD_BUG_ON() in pgtable_32.c |
843 |
+ * to avoid include recursion hell |
844 |
+ */ |
845 |
+-#define CPU_ENTRY_AREA_PAGES (NR_CPUS * 40) |
846 |
++#define CPU_ENTRY_AREA_PAGES (NR_CPUS * 39) |
847 |
+ |
848 |
+-#define CPU_ENTRY_AREA_BASE \ |
849 |
+- ((FIXADDR_TOT_START - PAGE_SIZE * (CPU_ENTRY_AREA_PAGES + 1)) \ |
850 |
+- & PMD_MASK) |
851 |
++/* The +1 is for the readonly IDT page: */ |
852 |
++#define CPU_ENTRY_AREA_BASE \ |
853 |
++ ((FIXADDR_TOT_START - PAGE_SIZE*(CPU_ENTRY_AREA_PAGES+1)) & PMD_MASK) |
854 |
+ |
855 |
+ #define LDT_BASE_ADDR \ |
856 |
+ ((CPU_ENTRY_AREA_BASE - PAGE_SIZE) & PMD_MASK) |
857 |
+diff --git a/arch/x86/include/asm/segment.h b/arch/x86/include/asm/segment.h |
858 |
+index ac3892920419..6669164abadc 100644 |
859 |
+--- a/arch/x86/include/asm/segment.h |
860 |
++++ b/arch/x86/include/asm/segment.h |
861 |
+@@ -31,6 +31,18 @@ |
862 |
+ */ |
863 |
+ #define SEGMENT_RPL_MASK 0x3 |
864 |
+ |
865 |
++/* |
866 |
++ * When running on Xen PV, the actual privilege level of the kernel is 1, |
867 |
++ * not 0. Testing the Requested Privilege Level in a segment selector to |
868 |
++ * determine whether the context is user mode or kernel mode with |
869 |
++ * SEGMENT_RPL_MASK is wrong because the PV kernel's privilege level |
870 |
++ * matches the 0x3 mask. |
871 |
++ * |
872 |
++ * Testing with USER_SEGMENT_RPL_MASK is valid for both native and Xen PV |
873 |
++ * kernels because privilege level 2 is never used. |
874 |
++ */ |
875 |
++#define USER_SEGMENT_RPL_MASK 0x2 |
876 |
++ |
877 |
+ /* User mode is privilege level 3: */ |
878 |
+ #define USER_RPL 0x3 |
879 |
+ |
880 |
+diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c |
881 |
+index 9b7586204cd2..cc5b535d2448 100644 |
882 |
+--- a/arch/x86/kernel/cpu/bugs.c |
883 |
++++ b/arch/x86/kernel/cpu/bugs.c |
884 |
+@@ -39,6 +39,7 @@ static void __init spectre_v2_select_mitigation(void); |
885 |
+ static void __init ssb_select_mitigation(void); |
886 |
+ static void __init l1tf_select_mitigation(void); |
887 |
+ static void __init mds_select_mitigation(void); |
888 |
++static void __init mds_print_mitigation(void); |
889 |
+ static void __init taa_select_mitigation(void); |
890 |
+ |
891 |
+ /* The base value of the SPEC_CTRL MSR that always has to be preserved. */ |
892 |
+@@ -108,6 +109,12 @@ void __init check_bugs(void) |
893 |
+ mds_select_mitigation(); |
894 |
+ taa_select_mitigation(); |
895 |
+ |
896 |
++ /* |
897 |
++ * As MDS and TAA mitigations are inter-related, print MDS |
898 |
++ * mitigation until after TAA mitigation selection is done. |
899 |
++ */ |
900 |
++ mds_print_mitigation(); |
901 |
++ |
902 |
+ arch_smt_update(); |
903 |
+ |
904 |
+ #ifdef CONFIG_X86_32 |
905 |
+@@ -245,6 +252,12 @@ static void __init mds_select_mitigation(void) |
906 |
+ (mds_nosmt || cpu_mitigations_auto_nosmt())) |
907 |
+ cpu_smt_disable(false); |
908 |
+ } |
909 |
++} |
910 |
++ |
911 |
++static void __init mds_print_mitigation(void) |
912 |
++{ |
913 |
++ if (!boot_cpu_has_bug(X86_BUG_MDS) || cpu_mitigations_off()) |
914 |
++ return; |
915 |
+ |
916 |
+ pr_info("%s\n", mds_strings[mds_mitigation]); |
917 |
+ } |
918 |
+@@ -304,8 +317,12 @@ static void __init taa_select_mitigation(void) |
919 |
+ return; |
920 |
+ } |
921 |
+ |
922 |
+- /* TAA mitigation is turned off on the cmdline (tsx_async_abort=off) */ |
923 |
+- if (taa_mitigation == TAA_MITIGATION_OFF) |
924 |
++ /* |
925 |
++ * TAA mitigation via VERW is turned off if both |
926 |
++ * tsx_async_abort=off and mds=off are specified. |
927 |
++ */ |
928 |
++ if (taa_mitigation == TAA_MITIGATION_OFF && |
929 |
++ mds_mitigation == MDS_MITIGATION_OFF) |
930 |
+ goto out; |
931 |
+ |
932 |
+ if (boot_cpu_has(X86_FEATURE_MD_CLEAR)) |
933 |
+@@ -339,6 +356,15 @@ static void __init taa_select_mitigation(void) |
934 |
+ if (taa_nosmt || cpu_mitigations_auto_nosmt()) |
935 |
+ cpu_smt_disable(false); |
936 |
+ |
937 |
++ /* |
938 |
++ * Update MDS mitigation, if necessary, as the mds_user_clear is |
939 |
++ * now enabled for TAA mitigation. |
940 |
++ */ |
941 |
++ if (mds_mitigation == MDS_MITIGATION_OFF && |
942 |
++ boot_cpu_has_bug(X86_BUG_MDS)) { |
943 |
++ mds_mitigation = MDS_MITIGATION_FULL; |
944 |
++ mds_select_mitigation(); |
945 |
++ } |
946 |
+ out: |
947 |
+ pr_info("%s\n", taa_strings[taa_mitigation]); |
948 |
+ } |
949 |
+diff --git a/arch/x86/kernel/doublefault.c b/arch/x86/kernel/doublefault.c |
950 |
+index 0b8cedb20d6d..d5c9b13bafdf 100644 |
951 |
+--- a/arch/x86/kernel/doublefault.c |
952 |
++++ b/arch/x86/kernel/doublefault.c |
953 |
+@@ -65,6 +65,9 @@ struct x86_hw_tss doublefault_tss __cacheline_aligned = { |
954 |
+ .ss = __KERNEL_DS, |
955 |
+ .ds = __USER_DS, |
956 |
+ .fs = __KERNEL_PERCPU, |
957 |
++#ifndef CONFIG_X86_32_LAZY_GS |
958 |
++ .gs = __KERNEL_STACK_CANARY, |
959 |
++#endif |
960 |
+ |
961 |
+ .__cr3 = __pa_nodebug(swapper_pg_dir), |
962 |
+ }; |
963 |
+diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S |
964 |
+index 30f9cb2c0b55..2e6a0676c1f4 100644 |
965 |
+--- a/arch/x86/kernel/head_32.S |
966 |
++++ b/arch/x86/kernel/head_32.S |
967 |
+@@ -571,6 +571,16 @@ ENTRY(initial_page_table) |
968 |
+ # error "Kernel PMDs should be 1, 2 or 3" |
969 |
+ # endif |
970 |
+ .align PAGE_SIZE /* needs to be page-sized too */ |
971 |
++ |
972 |
++#ifdef CONFIG_PAGE_TABLE_ISOLATION |
973 |
++ /* |
974 |
++ * PTI needs another page so sync_initial_pagetable() works correctly |
975 |
++ * and does not scribble over the data which is placed behind the |
976 |
++ * actual initial_page_table. See clone_pgd_range(). |
977 |
++ */ |
978 |
++ .fill 1024, 4, 0 |
979 |
++#endif |
980 |
++ |
981 |
+ #endif |
982 |
+ |
983 |
+ .data |
984 |
+diff --git a/arch/x86/mm/cpu_entry_area.c b/arch/x86/mm/cpu_entry_area.c |
985 |
+index 752ad11d6868..d9643647a9ce 100644 |
986 |
+--- a/arch/x86/mm/cpu_entry_area.c |
987 |
++++ b/arch/x86/mm/cpu_entry_area.c |
988 |
+@@ -178,7 +178,9 @@ static __init void setup_cpu_entry_area_ptes(void) |
989 |
+ #ifdef CONFIG_X86_32 |
990 |
+ unsigned long start, end; |
991 |
+ |
992 |
+- BUILD_BUG_ON(CPU_ENTRY_AREA_PAGES * PAGE_SIZE < CPU_ENTRY_AREA_MAP_SIZE); |
993 |
++ /* The +1 is for the readonly IDT: */ |
994 |
++ BUILD_BUG_ON((CPU_ENTRY_AREA_PAGES+1)*PAGE_SIZE != CPU_ENTRY_AREA_MAP_SIZE); |
995 |
++ BUILD_BUG_ON(CPU_ENTRY_AREA_TOTAL_SIZE != CPU_ENTRY_AREA_MAP_SIZE); |
996 |
+ BUG_ON(CPU_ENTRY_AREA_BASE & ~PMD_MASK); |
997 |
+ |
998 |
+ start = CPU_ENTRY_AREA_BASE; |
999 |
+diff --git a/arch/x86/tools/gen-insn-attr-x86.awk b/arch/x86/tools/gen-insn-attr-x86.awk |
1000 |
+index b02a36b2c14f..a42015b305f4 100644 |
1001 |
+--- a/arch/x86/tools/gen-insn-attr-x86.awk |
1002 |
++++ b/arch/x86/tools/gen-insn-attr-x86.awk |
1003 |
+@@ -69,7 +69,7 @@ BEGIN { |
1004 |
+ |
1005 |
+ lprefix1_expr = "\\((66|!F3)\\)" |
1006 |
+ lprefix2_expr = "\\(F3\\)" |
1007 |
+- lprefix3_expr = "\\((F2|!F3|66\\&F2)\\)" |
1008 |
++ lprefix3_expr = "\\((F2|!F3|66&F2)\\)" |
1009 |
+ lprefix_expr = "\\((66|F2|F3)\\)" |
1010 |
+ max_lprefix = 4 |
1011 |
+ |
1012 |
+@@ -257,7 +257,7 @@ function convert_operands(count,opnd, i,j,imm,mod) |
1013 |
+ return add_flags(imm, mod) |
1014 |
+ } |
1015 |
+ |
1016 |
+-/^[0-9a-f]+\:/ { |
1017 |
++/^[0-9a-f]+:/ { |
1018 |
+ if (NR == 1) |
1019 |
+ next |
1020 |
+ # get index |
1021 |
+diff --git a/arch/x86/xen/xen-asm_32.S b/arch/x86/xen/xen-asm_32.S |
1022 |
+index c15db060a242..cd177772fe4d 100644 |
1023 |
+--- a/arch/x86/xen/xen-asm_32.S |
1024 |
++++ b/arch/x86/xen/xen-asm_32.S |
1025 |
+@@ -126,10 +126,9 @@ hyper_iret: |
1026 |
+ .globl xen_iret_start_crit, xen_iret_end_crit |
1027 |
+ |
1028 |
+ /* |
1029 |
+- * This is called by xen_hypervisor_callback in entry.S when it sees |
1030 |
++ * This is called by xen_hypervisor_callback in entry_32.S when it sees |
1031 |
+ * that the EIP at the time of interrupt was between |
1032 |
+- * xen_iret_start_crit and xen_iret_end_crit. We're passed the EIP in |
1033 |
+- * %eax so we can do a more refined determination of what to do. |
1034 |
++ * xen_iret_start_crit and xen_iret_end_crit. |
1035 |
+ * |
1036 |
+ * The stack format at this point is: |
1037 |
+ * ---------------- |
1038 |
+@@ -138,70 +137,46 @@ hyper_iret: |
1039 |
+ * eflags } outer exception info |
1040 |
+ * cs } |
1041 |
+ * eip } |
1042 |
+- * ---------------- <- edi (copy dest) |
1043 |
+- * eax : outer eax if it hasn't been restored |
1044 |
+ * ---------------- |
1045 |
+- * eflags } nested exception info |
1046 |
+- * cs } (no ss/esp because we're nested |
1047 |
+- * eip } from the same ring) |
1048 |
+- * orig_eax }<- esi (copy src) |
1049 |
+- * - - - - - - - - |
1050 |
+- * fs } |
1051 |
+- * es } |
1052 |
+- * ds } SAVE_ALL state |
1053 |
+- * eax } |
1054 |
+- * : : |
1055 |
+- * ebx }<- esp |
1056 |
++ * eax : outer eax if it hasn't been restored |
1057 |
+ * ---------------- |
1058 |
++ * eflags } |
1059 |
++ * cs } nested exception info |
1060 |
++ * eip } |
1061 |
++ * return address : (into xen_hypervisor_callback) |
1062 |
+ * |
1063 |
+- * In order to deliver the nested exception properly, we need to shift |
1064 |
+- * everything from the return addr up to the error code so it sits |
1065 |
+- * just under the outer exception info. This means that when we |
1066 |
+- * handle the exception, we do it in the context of the outer |
1067 |
+- * exception rather than starting a new one. |
1068 |
++ * In order to deliver the nested exception properly, we need to discard the |
1069 |
++ * nested exception frame such that when we handle the exception, we do it |
1070 |
++ * in the context of the outer exception rather than starting a new one. |
1071 |
+ * |
1072 |
+- * The only caveat is that if the outer eax hasn't been restored yet |
1073 |
+- * (ie, it's still on stack), we need to insert its value into the |
1074 |
+- * SAVE_ALL state before going on, since it's usermode state which we |
1075 |
+- * eventually need to restore. |
1076 |
++ * The only caveat is that if the outer eax hasn't been restored yet (i.e. |
1077 |
++ * it's still on stack), we need to restore its value here. |
1078 |
+ */ |
1079 |
+ ENTRY(xen_iret_crit_fixup) |
1080 |
+ /* |
1081 |
+ * Paranoia: Make sure we're really coming from kernel space. |
1082 |
+ * One could imagine a case where userspace jumps into the |
1083 |
+ * critical range address, but just before the CPU delivers a |
1084 |
+- * GP, it decides to deliver an interrupt instead. Unlikely? |
1085 |
+- * Definitely. Easy to avoid? Yes. The Intel documents |
1086 |
+- * explicitly say that the reported EIP for a bad jump is the |
1087 |
+- * jump instruction itself, not the destination, but some |
1088 |
+- * virtual environments get this wrong. |
1089 |
++ * PF, it decides to deliver an interrupt instead. Unlikely? |
1090 |
++ * Definitely. Easy to avoid? Yes. |
1091 |
+ */ |
1092 |
+- movl PT_CS(%esp), %ecx |
1093 |
+- andl $SEGMENT_RPL_MASK, %ecx |
1094 |
+- cmpl $USER_RPL, %ecx |
1095 |
+- je 2f |
1096 |
+- |
1097 |
+- lea PT_ORIG_EAX(%esp), %esi |
1098 |
+- lea PT_EFLAGS(%esp), %edi |
1099 |
++ testb $2, 2*4(%esp) /* nested CS */ |
1100 |
++ jnz 2f |
1101 |
+ |
1102 |
+ /* |
1103 |
+ * If eip is before iret_restore_end then stack |
1104 |
+ * hasn't been restored yet. |
1105 |
+ */ |
1106 |
+- cmp $iret_restore_end, %eax |
1107 |
++ cmpl $iret_restore_end, 1*4(%esp) |
1108 |
+ jae 1f |
1109 |
+ |
1110 |
+- movl 0+4(%edi), %eax /* copy EAX (just above top of frame) */ |
1111 |
+- movl %eax, PT_EAX(%esp) |
1112 |
++ movl 4*4(%esp), %eax /* load outer EAX */ |
1113 |
++ ret $4*4 /* discard nested EIP, CS, and EFLAGS as |
1114 |
++ * well as the just restored EAX */ |
1115 |
+ |
1116 |
+- lea ESP_OFFSET(%edi), %edi /* move dest up over saved regs */ |
1117 |
+- |
1118 |
+- /* set up the copy */ |
1119 |
+-1: std |
1120 |
+- mov $PT_EIP / 4, %ecx /* saved regs up to orig_eax */ |
1121 |
+- rep movsl |
1122 |
+- cld |
1123 |
+- |
1124 |
+- lea 4(%edi), %esp /* point esp to new frame */ |
1125 |
+-2: jmp xen_do_upcall |
1126 |
++1: |
1127 |
++ ret $3*4 /* discard nested EIP, CS, and EFLAGS */ |
1128 |
+ |
1129 |
++2: |
1130 |
++ ret |
1131 |
++END(xen_iret_crit_fixup) |
1132 |
+diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c |
1133 |
+index 5f9d12ce91e5..f4140f077324 100644 |
1134 |
+--- a/drivers/block/nbd.c |
1135 |
++++ b/drivers/block/nbd.c |
1136 |
+@@ -956,6 +956,7 @@ static struct socket *nbd_get_socket(struct nbd_device *nbd, unsigned long fd, |
1137 |
+ if (sock->ops->shutdown == sock_no_shutdown) { |
1138 |
+ dev_err(disk_to_dev(nbd->disk), "Unsupported socket: shutdown callout must be supported.\n"); |
1139 |
+ *err = -EINVAL; |
1140 |
++ sockfd_put(sock); |
1141 |
+ return NULL; |
1142 |
+ } |
1143 |
+ |
1144 |
+@@ -994,14 +995,15 @@ static int nbd_add_socket(struct nbd_device *nbd, unsigned long arg, |
1145 |
+ sockfd_put(sock); |
1146 |
+ return -ENOMEM; |
1147 |
+ } |
1148 |
++ |
1149 |
++ config->socks = socks; |
1150 |
++ |
1151 |
+ nsock = kzalloc(sizeof(struct nbd_sock), GFP_KERNEL); |
1152 |
+ if (!nsock) { |
1153 |
+ sockfd_put(sock); |
1154 |
+ return -ENOMEM; |
1155 |
+ } |
1156 |
+ |
1157 |
+- config->socks = socks; |
1158 |
+- |
1159 |
+ nsock->fallback_index = -1; |
1160 |
+ nsock->dead = false; |
1161 |
+ mutex_init(&nsock->tx_lock); |
1162 |
+diff --git a/drivers/bluetooth/hci_bcsp.c b/drivers/bluetooth/hci_bcsp.c |
1163 |
+index fe2e307009f4..cf4a56095817 100644 |
1164 |
+--- a/drivers/bluetooth/hci_bcsp.c |
1165 |
++++ b/drivers/bluetooth/hci_bcsp.c |
1166 |
+@@ -591,6 +591,7 @@ static int bcsp_recv(struct hci_uart *hu, const void *data, int count) |
1167 |
+ if (*ptr == 0xc0) { |
1168 |
+ BT_ERR("Short BCSP packet"); |
1169 |
+ kfree_skb(bcsp->rx_skb); |
1170 |
++ bcsp->rx_skb = NULL; |
1171 |
+ bcsp->rx_state = BCSP_W4_PKT_START; |
1172 |
+ bcsp->rx_count = 0; |
1173 |
+ } else |
1174 |
+@@ -606,6 +607,7 @@ static int bcsp_recv(struct hci_uart *hu, const void *data, int count) |
1175 |
+ bcsp->rx_skb->data[2])) != bcsp->rx_skb->data[3]) { |
1176 |
+ BT_ERR("Error in BCSP hdr checksum"); |
1177 |
+ kfree_skb(bcsp->rx_skb); |
1178 |
++ bcsp->rx_skb = NULL; |
1179 |
+ bcsp->rx_state = BCSP_W4_PKT_DELIMITER; |
1180 |
+ bcsp->rx_count = 0; |
1181 |
+ continue; |
1182 |
+@@ -630,6 +632,7 @@ static int bcsp_recv(struct hci_uart *hu, const void *data, int count) |
1183 |
+ bscp_get_crc(bcsp)); |
1184 |
+ |
1185 |
+ kfree_skb(bcsp->rx_skb); |
1186 |
++ bcsp->rx_skb = NULL; |
1187 |
+ bcsp->rx_state = BCSP_W4_PKT_DELIMITER; |
1188 |
+ bcsp->rx_count = 0; |
1189 |
+ continue; |
1190 |
+diff --git a/drivers/bluetooth/hci_ll.c b/drivers/bluetooth/hci_ll.c |
1191 |
+index 285706618f8a..d9a4c6c691e0 100644 |
1192 |
+--- a/drivers/bluetooth/hci_ll.c |
1193 |
++++ b/drivers/bluetooth/hci_ll.c |
1194 |
+@@ -621,13 +621,6 @@ static int ll_setup(struct hci_uart *hu) |
1195 |
+ |
1196 |
+ serdev_device_set_flow_control(serdev, true); |
1197 |
+ |
1198 |
+- if (hu->oper_speed) |
1199 |
+- speed = hu->oper_speed; |
1200 |
+- else if (hu->proto->oper_speed) |
1201 |
+- speed = hu->proto->oper_speed; |
1202 |
+- else |
1203 |
+- speed = 0; |
1204 |
+- |
1205 |
+ do { |
1206 |
+ /* Reset the Bluetooth device */ |
1207 |
+ gpiod_set_value_cansleep(lldev->enable_gpio, 0); |
1208 |
+@@ -639,20 +632,6 @@ static int ll_setup(struct hci_uart *hu) |
1209 |
+ return err; |
1210 |
+ } |
1211 |
+ |
1212 |
+- if (speed) { |
1213 |
+- __le32 speed_le = cpu_to_le32(speed); |
1214 |
+- struct sk_buff *skb; |
1215 |
+- |
1216 |
+- skb = __hci_cmd_sync(hu->hdev, |
1217 |
+- HCI_VS_UPDATE_UART_HCI_BAUDRATE, |
1218 |
+- sizeof(speed_le), &speed_le, |
1219 |
+- HCI_INIT_TIMEOUT); |
1220 |
+- if (!IS_ERR(skb)) { |
1221 |
+- kfree_skb(skb); |
1222 |
+- serdev_device_set_baudrate(serdev, speed); |
1223 |
+- } |
1224 |
+- } |
1225 |
+- |
1226 |
+ err = download_firmware(lldev); |
1227 |
+ if (!err) |
1228 |
+ break; |
1229 |
+@@ -677,7 +656,25 @@ static int ll_setup(struct hci_uart *hu) |
1230 |
+ } |
1231 |
+ |
1232 |
+ /* Operational speed if any */ |
1233 |
++ if (hu->oper_speed) |
1234 |
++ speed = hu->oper_speed; |
1235 |
++ else if (hu->proto->oper_speed) |
1236 |
++ speed = hu->proto->oper_speed; |
1237 |
++ else |
1238 |
++ speed = 0; |
1239 |
++ |
1240 |
++ if (speed) { |
1241 |
++ __le32 speed_le = cpu_to_le32(speed); |
1242 |
++ struct sk_buff *skb; |
1243 |
+ |
1244 |
++ skb = __hci_cmd_sync(hu->hdev, HCI_VS_UPDATE_UART_HCI_BAUDRATE, |
1245 |
++ sizeof(speed_le), &speed_le, |
1246 |
++ HCI_INIT_TIMEOUT); |
1247 |
++ if (!IS_ERR(skb)) { |
1248 |
++ kfree_skb(skb); |
1249 |
++ serdev_device_set_baudrate(serdev, speed); |
1250 |
++ } |
1251 |
++ } |
1252 |
+ |
1253 |
+ return 0; |
1254 |
+ } |
1255 |
+diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c |
1256 |
+index 7270e7b69262..3259426f01dc 100644 |
1257 |
+--- a/drivers/char/virtio_console.c |
1258 |
++++ b/drivers/char/virtio_console.c |
1259 |
+@@ -1325,24 +1325,24 @@ static void set_console_size(struct port *port, u16 rows, u16 cols) |
1260 |
+ port->cons.ws.ws_col = cols; |
1261 |
+ } |
1262 |
+ |
1263 |
+-static unsigned int fill_queue(struct virtqueue *vq, spinlock_t *lock) |
1264 |
++static int fill_queue(struct virtqueue *vq, spinlock_t *lock) |
1265 |
+ { |
1266 |
+ struct port_buffer *buf; |
1267 |
+- unsigned int nr_added_bufs; |
1268 |
++ int nr_added_bufs; |
1269 |
+ int ret; |
1270 |
+ |
1271 |
+ nr_added_bufs = 0; |
1272 |
+ do { |
1273 |
+ buf = alloc_buf(vq->vdev, PAGE_SIZE, 0); |
1274 |
+ if (!buf) |
1275 |
+- break; |
1276 |
++ return -ENOMEM; |
1277 |
+ |
1278 |
+ spin_lock_irq(lock); |
1279 |
+ ret = add_inbuf(vq, buf); |
1280 |
+ if (ret < 0) { |
1281 |
+ spin_unlock_irq(lock); |
1282 |
+ free_buf(buf, true); |
1283 |
+- break; |
1284 |
++ return ret; |
1285 |
+ } |
1286 |
+ nr_added_bufs++; |
1287 |
+ spin_unlock_irq(lock); |
1288 |
+@@ -1362,7 +1362,6 @@ static int add_port(struct ports_device *portdev, u32 id) |
1289 |
+ char debugfs_name[16]; |
1290 |
+ struct port *port; |
1291 |
+ dev_t devt; |
1292 |
+- unsigned int nr_added_bufs; |
1293 |
+ int err; |
1294 |
+ |
1295 |
+ port = kmalloc(sizeof(*port), GFP_KERNEL); |
1296 |
+@@ -1421,11 +1420,13 @@ static int add_port(struct ports_device *portdev, u32 id) |
1297 |
+ spin_lock_init(&port->outvq_lock); |
1298 |
+ init_waitqueue_head(&port->waitqueue); |
1299 |
+ |
1300 |
+- /* Fill the in_vq with buffers so the host can send us data. */ |
1301 |
+- nr_added_bufs = fill_queue(port->in_vq, &port->inbuf_lock); |
1302 |
+- if (!nr_added_bufs) { |
1303 |
++ /* We can safely ignore ENOSPC because it means |
1304 |
++ * the queue already has buffers. Buffers are removed |
1305 |
++ * only by virtcons_remove(), not by unplug_port() |
1306 |
++ */ |
1307 |
++ err = fill_queue(port->in_vq, &port->inbuf_lock); |
1308 |
++ if (err < 0 && err != -ENOSPC) { |
1309 |
+ dev_err(port->dev, "Error allocating inbufs\n"); |
1310 |
+- err = -ENOMEM; |
1311 |
+ goto free_device; |
1312 |
+ } |
1313 |
+ |
1314 |
+@@ -2059,14 +2060,11 @@ static int virtcons_probe(struct virtio_device *vdev) |
1315 |
+ INIT_WORK(&portdev->control_work, &control_work_handler); |
1316 |
+ |
1317 |
+ if (multiport) { |
1318 |
+- unsigned int nr_added_bufs; |
1319 |
+- |
1320 |
+ spin_lock_init(&portdev->c_ivq_lock); |
1321 |
+ spin_lock_init(&portdev->c_ovq_lock); |
1322 |
+ |
1323 |
+- nr_added_bufs = fill_queue(portdev->c_ivq, |
1324 |
+- &portdev->c_ivq_lock); |
1325 |
+- if (!nr_added_bufs) { |
1326 |
++ err = fill_queue(portdev->c_ivq, &portdev->c_ivq_lock); |
1327 |
++ if (err < 0) { |
1328 |
+ dev_err(&vdev->dev, |
1329 |
+ "Error allocating buffers for control queue\n"); |
1330 |
+ /* |
1331 |
+@@ -2077,7 +2075,7 @@ static int virtcons_probe(struct virtio_device *vdev) |
1332 |
+ VIRTIO_CONSOLE_DEVICE_READY, 0); |
1333 |
+ /* Device was functional: we need full cleanup. */ |
1334 |
+ virtcons_remove(vdev); |
1335 |
+- return -ENOMEM; |
1336 |
++ return err; |
1337 |
+ } |
1338 |
+ } else { |
1339 |
+ /* |
1340 |
+diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c |
1341 |
+index f970f87ce86e..9b6a674f83de 100644 |
1342 |
+--- a/drivers/cpufreq/cpufreq.c |
1343 |
++++ b/drivers/cpufreq/cpufreq.c |
1344 |
+@@ -933,6 +933,9 @@ static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf) |
1345 |
+ struct freq_attr *fattr = to_attr(attr); |
1346 |
+ ssize_t ret; |
1347 |
+ |
1348 |
++ if (!fattr->show) |
1349 |
++ return -EIO; |
1350 |
++ |
1351 |
+ down_read(&policy->rwsem); |
1352 |
+ ret = fattr->show(policy, buf); |
1353 |
+ up_read(&policy->rwsem); |
1354 |
+@@ -947,6 +950,9 @@ static ssize_t store(struct kobject *kobj, struct attribute *attr, |
1355 |
+ struct freq_attr *fattr = to_attr(attr); |
1356 |
+ ssize_t ret = -EINVAL; |
1357 |
+ |
1358 |
++ if (!fattr->store) |
1359 |
++ return -EIO; |
1360 |
++ |
1361 |
+ /* |
1362 |
+ * cpus_read_trylock() is used here to work around a circular lock |
1363 |
+ * dependency problem with respect to the cpufreq_register_driver(). |
1364 |
+diff --git a/drivers/gpio/gpio-bd70528.c b/drivers/gpio/gpio-bd70528.c |
1365 |
+index fd85605d2dab..01e122c3a9f1 100644 |
1366 |
+--- a/drivers/gpio/gpio-bd70528.c |
1367 |
++++ b/drivers/gpio/gpio-bd70528.c |
1368 |
+@@ -25,13 +25,13 @@ static int bd70528_set_debounce(struct bd70528_gpio *bdgpio, |
1369 |
+ case 0: |
1370 |
+ val = BD70528_DEBOUNCE_DISABLE; |
1371 |
+ break; |
1372 |
+- case 1 ... 15: |
1373 |
++ case 1 ... 15000: |
1374 |
+ val = BD70528_DEBOUNCE_15MS; |
1375 |
+ break; |
1376 |
+- case 16 ... 30: |
1377 |
++ case 15001 ... 30000: |
1378 |
+ val = BD70528_DEBOUNCE_30MS; |
1379 |
+ break; |
1380 |
+- case 31 ... 50: |
1381 |
++ case 30001 ... 50000: |
1382 |
+ val = BD70528_DEBOUNCE_50MS; |
1383 |
+ break; |
1384 |
+ default: |
1385 |
+diff --git a/drivers/gpio/gpio-max77620.c b/drivers/gpio/gpio-max77620.c |
1386 |
+index 06e8caaafa81..4ead063bfe38 100644 |
1387 |
+--- a/drivers/gpio/gpio-max77620.c |
1388 |
++++ b/drivers/gpio/gpio-max77620.c |
1389 |
+@@ -192,13 +192,13 @@ static int max77620_gpio_set_debounce(struct max77620_gpio *mgpio, |
1390 |
+ case 0: |
1391 |
+ val = MAX77620_CNFG_GPIO_DBNC_None; |
1392 |
+ break; |
1393 |
+- case 1000 ... 8000: |
1394 |
++ case 1 ... 8000: |
1395 |
+ val = MAX77620_CNFG_GPIO_DBNC_8ms; |
1396 |
+ break; |
1397 |
+- case 9000 ... 16000: |
1398 |
++ case 8001 ... 16000: |
1399 |
+ val = MAX77620_CNFG_GPIO_DBNC_16ms; |
1400 |
+ break; |
1401 |
+- case 17000 ... 32000: |
1402 |
++ case 16001 ... 32000: |
1403 |
+ val = MAX77620_CNFG_GPIO_DBNC_32ms; |
1404 |
+ break; |
1405 |
+ default: |
1406 |
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c |
1407 |
+index 56b4c241a14b..65f6619f0c0c 100644 |
1408 |
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c |
1409 |
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c |
1410 |
+@@ -635,15 +635,19 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file |
1411 |
+ return -ENOMEM; |
1412 |
+ alloc_size = info->read_mmr_reg.count * sizeof(*regs); |
1413 |
+ |
1414 |
+- for (i = 0; i < info->read_mmr_reg.count; i++) |
1415 |
++ amdgpu_gfx_off_ctrl(adev, false); |
1416 |
++ for (i = 0; i < info->read_mmr_reg.count; i++) { |
1417 |
+ if (amdgpu_asic_read_register(adev, se_num, sh_num, |
1418 |
+ info->read_mmr_reg.dword_offset + i, |
1419 |
+ ®s[i])) { |
1420 |
+ DRM_DEBUG_KMS("unallowed offset %#x\n", |
1421 |
+ info->read_mmr_reg.dword_offset + i); |
1422 |
+ kfree(regs); |
1423 |
++ amdgpu_gfx_off_ctrl(adev, true); |
1424 |
+ return -EFAULT; |
1425 |
+ } |
1426 |
++ } |
1427 |
++ amdgpu_gfx_off_ctrl(adev, true); |
1428 |
+ n = copy_to_user(out, regs, min(size, alloc_size)); |
1429 |
+ kfree(regs); |
1430 |
+ return n ? -EFAULT : 0; |
1431 |
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c |
1432 |
+index c066e1d3f981..75faa56f243a 100644 |
1433 |
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c |
1434 |
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c |
1435 |
+@@ -596,8 +596,13 @@ static void gfx_v9_0_check_if_need_gfxoff(struct amdgpu_device *adev) |
1436 |
+ case CHIP_VEGA20: |
1437 |
+ break; |
1438 |
+ case CHIP_RAVEN: |
1439 |
+- if (!(adev->rev_id >= 0x8 || adev->pdev->device == 0x15d8) |
1440 |
+- &&((adev->gfx.rlc_fw_version != 106 && |
1441 |
++ /* Disable GFXOFF on original raven. There are combinations |
1442 |
++ * of sbios and platforms that are not stable. |
1443 |
++ */ |
1444 |
++ if (!(adev->rev_id >= 0x8 || adev->pdev->device == 0x15d8)) |
1445 |
++ adev->pm.pp_feature &= ~PP_GFXOFF_MASK; |
1446 |
++ else if (!(adev->rev_id >= 0x8 || adev->pdev->device == 0x15d8) |
1447 |
++ &&((adev->gfx.rlc_fw_version != 106 && |
1448 |
+ adev->gfx.rlc_fw_version < 531) || |
1449 |
+ (adev->gfx.rlc_fw_version == 53815) || |
1450 |
+ (adev->gfx.rlc_feature_version < 1) || |
1451 |
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c |
1452 |
+index 3c1084de5d59..ec62747b4bbb 100644 |
1453 |
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c |
1454 |
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c |
1455 |
+@@ -3477,18 +3477,31 @@ static int smu7_get_pp_table_entry(struct pp_hwmgr *hwmgr, |
1456 |
+ |
1457 |
+ static int smu7_get_gpu_power(struct pp_hwmgr *hwmgr, u32 *query) |
1458 |
+ { |
1459 |
++ struct amdgpu_device *adev = hwmgr->adev; |
1460 |
+ int i; |
1461 |
+ u32 tmp = 0; |
1462 |
+ |
1463 |
+ if (!query) |
1464 |
+ return -EINVAL; |
1465 |
+ |
1466 |
+- smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetCurrPkgPwr, 0); |
1467 |
+- tmp = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0); |
1468 |
+- *query = tmp; |
1469 |
++ /* |
1470 |
++ * PPSMC_MSG_GetCurrPkgPwr is not supported on: |
1471 |
++ * - Hawaii |
1472 |
++ * - Bonaire |
1473 |
++ * - Fiji |
1474 |
++ * - Tonga |
1475 |
++ */ |
1476 |
++ if ((adev->asic_type != CHIP_HAWAII) && |
1477 |
++ (adev->asic_type != CHIP_BONAIRE) && |
1478 |
++ (adev->asic_type != CHIP_FIJI) && |
1479 |
++ (adev->asic_type != CHIP_TONGA)) { |
1480 |
++ smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetCurrPkgPwr, 0); |
1481 |
++ tmp = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0); |
1482 |
++ *query = tmp; |
1483 |
+ |
1484 |
+- if (tmp != 0) |
1485 |
+- return 0; |
1486 |
++ if (tmp != 0) |
1487 |
++ return 0; |
1488 |
++ } |
1489 |
+ |
1490 |
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PmStatusLogStart); |
1491 |
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, |
1492 |
+diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c |
1493 |
+index dae45b6a35b7..5c8c11deb857 100644 |
1494 |
+--- a/drivers/gpu/drm/i915/display/intel_display.c |
1495 |
++++ b/drivers/gpu/drm/i915/display/intel_display.c |
1496 |
+@@ -2519,6 +2519,9 @@ u32 intel_plane_fb_max_stride(struct drm_i915_private *dev_priv, |
1497 |
+ * the highest stride limits of them all. |
1498 |
+ */ |
1499 |
+ crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_A); |
1500 |
++ if (!crtc) |
1501 |
++ return 0; |
1502 |
++ |
1503 |
+ plane = to_intel_plane(crtc->base.primary); |
1504 |
+ |
1505 |
+ return plane->max_stride(plane, pixel_format, modifier, |
1506 |
+diff --git a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c |
1507 |
+index cd30e83c3205..33046a3aef06 100644 |
1508 |
+--- a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c |
1509 |
++++ b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c |
1510 |
+@@ -663,8 +663,28 @@ i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj, |
1511 |
+ i915_gem_gtt_finish_pages(obj, pages); |
1512 |
+ |
1513 |
+ for_each_sgt_page(page, sgt_iter, pages) { |
1514 |
+- if (obj->mm.dirty) |
1515 |
++ if (obj->mm.dirty && trylock_page(page)) { |
1516 |
++ /* |
1517 |
++ * As this may not be anonymous memory (e.g. shmem) |
1518 |
++ * but exist on a real mapping, we have to lock |
1519 |
++ * the page in order to dirty it -- holding |
1520 |
++ * the page reference is not sufficient to |
1521 |
++ * prevent the inode from being truncated. |
1522 |
++ * Play safe and take the lock. |
1523 |
++ * |
1524 |
++ * However...! |
1525 |
++ * |
1526 |
++ * The mmu-notifier can be invalidated for a |
1527 |
++ * migrate_page, that is alreadying holding the lock |
1528 |
++ * on the page. Such a try_to_unmap() will result |
1529 |
++ * in us calling put_pages() and so recursively try |
1530 |
++ * to lock the page. We avoid that deadlock with |
1531 |
++ * a trylock_page() and in exchange we risk missing |
1532 |
++ * some page dirtying. |
1533 |
++ */ |
1534 |
+ set_page_dirty(page); |
1535 |
++ unlock_page(page); |
1536 |
++ } |
1537 |
+ |
1538 |
+ mark_page_accessed(page); |
1539 |
+ put_page(page); |
1540 |
+diff --git a/drivers/gpu/drm/i915/i915_pmu.c b/drivers/gpu/drm/i915/i915_pmu.c |
1541 |
+index 8fe46ee920a0..c599d9db01ac 100644 |
1542 |
+--- a/drivers/gpu/drm/i915/i915_pmu.c |
1543 |
++++ b/drivers/gpu/drm/i915/i915_pmu.c |
1544 |
+@@ -833,8 +833,8 @@ create_event_attributes(struct drm_i915_private *i915) |
1545 |
+ const char *name; |
1546 |
+ const char *unit; |
1547 |
+ } events[] = { |
1548 |
+- __event(I915_PMU_ACTUAL_FREQUENCY, "actual-frequency", "MHz"), |
1549 |
+- __event(I915_PMU_REQUESTED_FREQUENCY, "requested-frequency", "MHz"), |
1550 |
++ __event(I915_PMU_ACTUAL_FREQUENCY, "actual-frequency", "M"), |
1551 |
++ __event(I915_PMU_REQUESTED_FREQUENCY, "requested-frequency", "M"), |
1552 |
+ __event(I915_PMU_INTERRUPTS, "interrupts", NULL), |
1553 |
+ __event(I915_PMU_RC6_RESIDENCY, "rc6-residency", "ns"), |
1554 |
+ }; |
1555 |
+diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c |
1556 |
+index d5216bcc4649..e8446c3cad11 100644 |
1557 |
+--- a/drivers/md/dm-crypt.c |
1558 |
++++ b/drivers/md/dm-crypt.c |
1559 |
+@@ -2911,21 +2911,18 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv) |
1560 |
+ } |
1561 |
+ |
1562 |
+ ret = -ENOMEM; |
1563 |
+- cc->io_queue = alloc_workqueue("kcryptd_io/%s", |
1564 |
+- WQ_HIGHPRI | WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM, |
1565 |
+- 1, devname); |
1566 |
++ cc->io_queue = alloc_workqueue("kcryptd_io/%s", WQ_MEM_RECLAIM, 1, devname); |
1567 |
+ if (!cc->io_queue) { |
1568 |
+ ti->error = "Couldn't create kcryptd io queue"; |
1569 |
+ goto bad; |
1570 |
+ } |
1571 |
+ |
1572 |
+ if (test_bit(DM_CRYPT_SAME_CPU, &cc->flags)) |
1573 |
+- cc->crypt_queue = alloc_workqueue("kcryptd/%s", |
1574 |
+- WQ_HIGHPRI | WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM, |
1575 |
++ cc->crypt_queue = alloc_workqueue("kcryptd/%s", WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM, |
1576 |
+ 1, devname); |
1577 |
+ else |
1578 |
+ cc->crypt_queue = alloc_workqueue("kcryptd/%s", |
1579 |
+- WQ_HIGHPRI | WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM | WQ_UNBOUND, |
1580 |
++ WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM | WQ_UNBOUND, |
1581 |
+ num_online_cpus(), devname); |
1582 |
+ if (!cc->crypt_queue) { |
1583 |
+ ti->error = "Couldn't create kcryptd queue"; |
1584 |
+diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c |
1585 |
+index 8a1354a08a1a..c0c653e35fbb 100644 |
1586 |
+--- a/drivers/md/raid10.c |
1587 |
++++ b/drivers/md/raid10.c |
1588 |
+@@ -191,7 +191,7 @@ static void * r10buf_pool_alloc(gfp_t gfp_flags, void *data) |
1589 |
+ |
1590 |
+ out_free_pages: |
1591 |
+ while (--j >= 0) |
1592 |
+- resync_free_pages(&rps[j * 2]); |
1593 |
++ resync_free_pages(&rps[j]); |
1594 |
+ |
1595 |
+ j = 0; |
1596 |
+ out_free_bio: |
1597 |
+diff --git a/drivers/media/platform/vivid/vivid-kthread-cap.c b/drivers/media/platform/vivid/vivid-kthread-cap.c |
1598 |
+index 003319d7816d..31f78d6a05a4 100644 |
1599 |
+--- a/drivers/media/platform/vivid/vivid-kthread-cap.c |
1600 |
++++ b/drivers/media/platform/vivid/vivid-kthread-cap.c |
1601 |
+@@ -796,7 +796,11 @@ static int vivid_thread_vid_cap(void *data) |
1602 |
+ if (kthread_should_stop()) |
1603 |
+ break; |
1604 |
+ |
1605 |
+- mutex_lock(&dev->mutex); |
1606 |
++ if (!mutex_trylock(&dev->mutex)) { |
1607 |
++ schedule_timeout_uninterruptible(1); |
1608 |
++ continue; |
1609 |
++ } |
1610 |
++ |
1611 |
+ cur_jiffies = jiffies; |
1612 |
+ if (dev->cap_seq_resync) { |
1613 |
+ dev->jiffies_vid_cap = cur_jiffies; |
1614 |
+@@ -956,8 +960,6 @@ void vivid_stop_generating_vid_cap(struct vivid_dev *dev, bool *pstreaming) |
1615 |
+ |
1616 |
+ /* shutdown control thread */ |
1617 |
+ vivid_grab_controls(dev, false); |
1618 |
+- mutex_unlock(&dev->mutex); |
1619 |
+ kthread_stop(dev->kthread_vid_cap); |
1620 |
+ dev->kthread_vid_cap = NULL; |
1621 |
+- mutex_lock(&dev->mutex); |
1622 |
+ } |
1623 |
+diff --git a/drivers/media/platform/vivid/vivid-kthread-out.c b/drivers/media/platform/vivid/vivid-kthread-out.c |
1624 |
+index ce5bcda2348c..1e165a6a2207 100644 |
1625 |
+--- a/drivers/media/platform/vivid/vivid-kthread-out.c |
1626 |
++++ b/drivers/media/platform/vivid/vivid-kthread-out.c |
1627 |
+@@ -143,7 +143,11 @@ static int vivid_thread_vid_out(void *data) |
1628 |
+ if (kthread_should_stop()) |
1629 |
+ break; |
1630 |
+ |
1631 |
+- mutex_lock(&dev->mutex); |
1632 |
++ if (!mutex_trylock(&dev->mutex)) { |
1633 |
++ schedule_timeout_uninterruptible(1); |
1634 |
++ continue; |
1635 |
++ } |
1636 |
++ |
1637 |
+ cur_jiffies = jiffies; |
1638 |
+ if (dev->out_seq_resync) { |
1639 |
+ dev->jiffies_vid_out = cur_jiffies; |
1640 |
+@@ -301,8 +305,6 @@ void vivid_stop_generating_vid_out(struct vivid_dev *dev, bool *pstreaming) |
1641 |
+ |
1642 |
+ /* shutdown control thread */ |
1643 |
+ vivid_grab_controls(dev, false); |
1644 |
+- mutex_unlock(&dev->mutex); |
1645 |
+ kthread_stop(dev->kthread_vid_out); |
1646 |
+ dev->kthread_vid_out = NULL; |
1647 |
+- mutex_lock(&dev->mutex); |
1648 |
+ } |
1649 |
+diff --git a/drivers/media/platform/vivid/vivid-sdr-cap.c b/drivers/media/platform/vivid/vivid-sdr-cap.c |
1650 |
+index 9acc709b0740..2b7522e16efc 100644 |
1651 |
+--- a/drivers/media/platform/vivid/vivid-sdr-cap.c |
1652 |
++++ b/drivers/media/platform/vivid/vivid-sdr-cap.c |
1653 |
+@@ -141,7 +141,11 @@ static int vivid_thread_sdr_cap(void *data) |
1654 |
+ if (kthread_should_stop()) |
1655 |
+ break; |
1656 |
+ |
1657 |
+- mutex_lock(&dev->mutex); |
1658 |
++ if (!mutex_trylock(&dev->mutex)) { |
1659 |
++ schedule_timeout_uninterruptible(1); |
1660 |
++ continue; |
1661 |
++ } |
1662 |
++ |
1663 |
+ cur_jiffies = jiffies; |
1664 |
+ if (dev->sdr_cap_seq_resync) { |
1665 |
+ dev->jiffies_sdr_cap = cur_jiffies; |
1666 |
+@@ -303,10 +307,8 @@ static void sdr_cap_stop_streaming(struct vb2_queue *vq) |
1667 |
+ } |
1668 |
+ |
1669 |
+ /* shutdown control thread */ |
1670 |
+- mutex_unlock(&dev->mutex); |
1671 |
+ kthread_stop(dev->kthread_sdr_cap); |
1672 |
+ dev->kthread_sdr_cap = NULL; |
1673 |
+- mutex_lock(&dev->mutex); |
1674 |
+ } |
1675 |
+ |
1676 |
+ static void sdr_cap_buf_request_complete(struct vb2_buffer *vb) |
1677 |
+diff --git a/drivers/media/platform/vivid/vivid-vid-cap.c b/drivers/media/platform/vivid/vivid-vid-cap.c |
1678 |
+index 8cbaa0c998ed..2d030732feac 100644 |
1679 |
+--- a/drivers/media/platform/vivid/vivid-vid-cap.c |
1680 |
++++ b/drivers/media/platform/vivid/vivid-vid-cap.c |
1681 |
+@@ -223,9 +223,6 @@ static int vid_cap_start_streaming(struct vb2_queue *vq, unsigned count) |
1682 |
+ if (vb2_is_streaming(&dev->vb_vid_out_q)) |
1683 |
+ dev->can_loop_video = vivid_vid_can_loop(dev); |
1684 |
+ |
1685 |
+- if (dev->kthread_vid_cap) |
1686 |
+- return 0; |
1687 |
+- |
1688 |
+ dev->vid_cap_seq_count = 0; |
1689 |
+ dprintk(dev, 1, "%s\n", __func__); |
1690 |
+ for (i = 0; i < VIDEO_MAX_FRAME; i++) |
1691 |
+diff --git a/drivers/media/platform/vivid/vivid-vid-out.c b/drivers/media/platform/vivid/vivid-vid-out.c |
1692 |
+index 148b663a6075..a0364ac497f9 100644 |
1693 |
+--- a/drivers/media/platform/vivid/vivid-vid-out.c |
1694 |
++++ b/drivers/media/platform/vivid/vivid-vid-out.c |
1695 |
+@@ -161,9 +161,6 @@ static int vid_out_start_streaming(struct vb2_queue *vq, unsigned count) |
1696 |
+ if (vb2_is_streaming(&dev->vb_vid_cap_q)) |
1697 |
+ dev->can_loop_video = vivid_vid_can_loop(dev); |
1698 |
+ |
1699 |
+- if (dev->kthread_vid_out) |
1700 |
+- return 0; |
1701 |
+- |
1702 |
+ dev->vid_out_seq_count = 0; |
1703 |
+ dprintk(dev, 1, "%s\n", __func__); |
1704 |
+ if (dev->start_streaming_error) { |
1705 |
+diff --git a/drivers/media/rc/imon.c b/drivers/media/rc/imon.c |
1706 |
+index 37a850421fbb..c683a244b9fa 100644 |
1707 |
+--- a/drivers/media/rc/imon.c |
1708 |
++++ b/drivers/media/rc/imon.c |
1709 |
+@@ -1598,8 +1598,7 @@ static void imon_incoming_packet(struct imon_context *ictx, |
1710 |
+ spin_unlock_irqrestore(&ictx->kc_lock, flags); |
1711 |
+ |
1712 |
+ /* send touchscreen events through input subsystem if touchpad data */ |
1713 |
+- if (ictx->display_type == IMON_DISPLAY_TYPE_VGA && len == 8 && |
1714 |
+- buf[7] == 0x86) { |
1715 |
++ if (ictx->touch && len == 8 && buf[7] == 0x86) { |
1716 |
+ imon_touch_event(ictx, buf); |
1717 |
+ return; |
1718 |
+ |
1719 |
+diff --git a/drivers/media/rc/mceusb.c b/drivers/media/rc/mceusb.c |
1720 |
+index 9929fcdec74d..b59a4a6d4d34 100644 |
1721 |
+--- a/drivers/media/rc/mceusb.c |
1722 |
++++ b/drivers/media/rc/mceusb.c |
1723 |
+@@ -562,7 +562,7 @@ static int mceusb_cmd_datasize(u8 cmd, u8 subcmd) |
1724 |
+ datasize = 4; |
1725 |
+ break; |
1726 |
+ case MCE_CMD_G_REVISION: |
1727 |
+- datasize = 2; |
1728 |
++ datasize = 4; |
1729 |
+ break; |
1730 |
+ case MCE_RSP_EQWAKESUPPORT: |
1731 |
+ case MCE_RSP_GETWAKESOURCE: |
1732 |
+@@ -598,14 +598,9 @@ static void mceusb_dev_printdata(struct mceusb_dev *ir, u8 *buf, int buf_len, |
1733 |
+ char *inout; |
1734 |
+ u8 cmd, subcmd, *data; |
1735 |
+ struct device *dev = ir->dev; |
1736 |
+- int start, skip = 0; |
1737 |
+ u32 carrier, period; |
1738 |
+ |
1739 |
+- /* skip meaningless 0xb1 0x60 header bytes on orig receiver */ |
1740 |
+- if (ir->flags.microsoft_gen1 && !out && !offset) |
1741 |
+- skip = 2; |
1742 |
+- |
1743 |
+- if (len <= skip) |
1744 |
++ if (offset < 0 || offset >= buf_len) |
1745 |
+ return; |
1746 |
+ |
1747 |
+ dev_dbg(dev, "%cx data[%d]: %*ph (len=%d sz=%d)", |
1748 |
+@@ -614,11 +609,32 @@ static void mceusb_dev_printdata(struct mceusb_dev *ir, u8 *buf, int buf_len, |
1749 |
+ |
1750 |
+ inout = out ? "Request" : "Got"; |
1751 |
+ |
1752 |
+- start = offset + skip; |
1753 |
+- cmd = buf[start] & 0xff; |
1754 |
+- subcmd = buf[start + 1] & 0xff; |
1755 |
+- data = buf + start + 2; |
1756 |
++ cmd = buf[offset]; |
1757 |
++ subcmd = (offset + 1 < buf_len) ? buf[offset + 1] : 0; |
1758 |
++ data = &buf[offset] + 2; |
1759 |
++ |
1760 |
++ /* Trace meaningless 0xb1 0x60 header bytes on original receiver */ |
1761 |
++ if (ir->flags.microsoft_gen1 && !out && !offset) { |
1762 |
++ dev_dbg(dev, "MCE gen 1 header"); |
1763 |
++ return; |
1764 |
++ } |
1765 |
++ |
1766 |
++ /* Trace IR data header or trailer */ |
1767 |
++ if (cmd != MCE_CMD_PORT_IR && |
1768 |
++ (cmd & MCE_PORT_MASK) == MCE_COMMAND_IRDATA) { |
1769 |
++ if (cmd == MCE_IRDATA_TRAILER) |
1770 |
++ dev_dbg(dev, "End of raw IR data"); |
1771 |
++ else |
1772 |
++ dev_dbg(dev, "Raw IR data, %d pulse/space samples", |
1773 |
++ cmd & MCE_PACKET_LENGTH_MASK); |
1774 |
++ return; |
1775 |
++ } |
1776 |
++ |
1777 |
++ /* Unexpected end of buffer? */ |
1778 |
++ if (offset + len > buf_len) |
1779 |
++ return; |
1780 |
+ |
1781 |
++ /* Decode MCE command/response */ |
1782 |
+ switch (cmd) { |
1783 |
+ case MCE_CMD_NULL: |
1784 |
+ if (subcmd == MCE_CMD_NULL) |
1785 |
+@@ -642,7 +658,7 @@ static void mceusb_dev_printdata(struct mceusb_dev *ir, u8 *buf, int buf_len, |
1786 |
+ dev_dbg(dev, "Get hw/sw rev?"); |
1787 |
+ else |
1788 |
+ dev_dbg(dev, "hw/sw rev %*ph", |
1789 |
+- 4, &buf[start + 2]); |
1790 |
++ 4, &buf[offset + 2]); |
1791 |
+ break; |
1792 |
+ case MCE_CMD_RESUME: |
1793 |
+ dev_dbg(dev, "Device resume requested"); |
1794 |
+@@ -744,13 +760,6 @@ static void mceusb_dev_printdata(struct mceusb_dev *ir, u8 *buf, int buf_len, |
1795 |
+ default: |
1796 |
+ break; |
1797 |
+ } |
1798 |
+- |
1799 |
+- if (cmd == MCE_IRDATA_TRAILER) |
1800 |
+- dev_dbg(dev, "End of raw IR data"); |
1801 |
+- else if ((cmd != MCE_CMD_PORT_IR) && |
1802 |
+- ((cmd & MCE_PORT_MASK) == MCE_COMMAND_IRDATA)) |
1803 |
+- dev_dbg(dev, "Raw IR data, %d pulse/space samples", |
1804 |
+- cmd & MCE_PACKET_LENGTH_MASK); |
1805 |
+ #endif |
1806 |
+ } |
1807 |
+ |
1808 |
+@@ -1127,32 +1136,62 @@ static int mceusb_set_rx_carrier_report(struct rc_dev *dev, int enable) |
1809 |
+ } |
1810 |
+ |
1811 |
+ /* |
1812 |
++ * Handle PORT_SYS/IR command response received from the MCE device. |
1813 |
++ * |
1814 |
++ * Assumes single response with all its data (not truncated) |
1815 |
++ * in buf_in[]. The response itself determines its total length |
1816 |
++ * (mceusb_cmd_datasize() + 2) and hence the minimum size of buf_in[]. |
1817 |
++ * |
1818 |
+ * We don't do anything but print debug spew for many of the command bits |
1819 |
+ * we receive from the hardware, but some of them are useful information |
1820 |
+ * we want to store so that we can use them. |
1821 |
+ */ |
1822 |
+-static void mceusb_handle_command(struct mceusb_dev *ir, int index) |
1823 |
++static void mceusb_handle_command(struct mceusb_dev *ir, u8 *buf_in) |
1824 |
+ { |
1825 |
++ u8 cmd = buf_in[0]; |
1826 |
++ u8 subcmd = buf_in[1]; |
1827 |
++ u8 *hi = &buf_in[2]; /* read only when required */ |
1828 |
++ u8 *lo = &buf_in[3]; /* read only when required */ |
1829 |
+ struct ir_raw_event rawir = {}; |
1830 |
+- u8 hi = ir->buf_in[index + 1] & 0xff; |
1831 |
+- u8 lo = ir->buf_in[index + 2] & 0xff; |
1832 |
+ u32 carrier_cycles; |
1833 |
+ u32 cycles_fix; |
1834 |
+ |
1835 |
+- switch (ir->buf_in[index]) { |
1836 |
+- /* the one and only 5-byte return value command */ |
1837 |
+- case MCE_RSP_GETPORTSTATUS: |
1838 |
+- if ((ir->buf_in[index + 4] & 0xff) == 0x00) |
1839 |
+- ir->txports_cabled |= 1 << hi; |
1840 |
+- break; |
1841 |
++ if (cmd == MCE_CMD_PORT_SYS) { |
1842 |
++ switch (subcmd) { |
1843 |
++ /* the one and only 5-byte return value command */ |
1844 |
++ case MCE_RSP_GETPORTSTATUS: |
1845 |
++ if (buf_in[5] == 0) |
1846 |
++ ir->txports_cabled |= 1 << *hi; |
1847 |
++ break; |
1848 |
++ |
1849 |
++ /* 1-byte return value commands */ |
1850 |
++ case MCE_RSP_EQEMVER: |
1851 |
++ ir->emver = *hi; |
1852 |
++ break; |
1853 |
++ |
1854 |
++ /* No return value commands */ |
1855 |
++ case MCE_RSP_CMD_ILLEGAL: |
1856 |
++ ir->need_reset = true; |
1857 |
++ break; |
1858 |
++ |
1859 |
++ default: |
1860 |
++ break; |
1861 |
++ } |
1862 |
++ |
1863 |
++ return; |
1864 |
++ } |
1865 |
+ |
1866 |
++ if (cmd != MCE_CMD_PORT_IR) |
1867 |
++ return; |
1868 |
++ |
1869 |
++ switch (subcmd) { |
1870 |
+ /* 2-byte return value commands */ |
1871 |
+ case MCE_RSP_EQIRTIMEOUT: |
1872 |
+- ir->rc->timeout = US_TO_NS((hi << 8 | lo) * MCE_TIME_UNIT); |
1873 |
++ ir->rc->timeout = US_TO_NS((*hi << 8 | *lo) * MCE_TIME_UNIT); |
1874 |
+ break; |
1875 |
+ case MCE_RSP_EQIRNUMPORTS: |
1876 |
+- ir->num_txports = hi; |
1877 |
+- ir->num_rxports = lo; |
1878 |
++ ir->num_txports = *hi; |
1879 |
++ ir->num_rxports = *lo; |
1880 |
+ break; |
1881 |
+ case MCE_RSP_EQIRRXCFCNT: |
1882 |
+ /* |
1883 |
+@@ -1165,7 +1204,7 @@ static void mceusb_handle_command(struct mceusb_dev *ir, int index) |
1884 |
+ */ |
1885 |
+ if (ir->carrier_report_enabled && ir->learning_active && |
1886 |
+ ir->pulse_tunit > 0) { |
1887 |
+- carrier_cycles = (hi << 8 | lo); |
1888 |
++ carrier_cycles = (*hi << 8 | *lo); |
1889 |
+ /* |
1890 |
+ * Adjust carrier cycle count by adding |
1891 |
+ * 1 missed count per pulse "on" |
1892 |
+@@ -1183,24 +1222,24 @@ static void mceusb_handle_command(struct mceusb_dev *ir, int index) |
1893 |
+ break; |
1894 |
+ |
1895 |
+ /* 1-byte return value commands */ |
1896 |
+- case MCE_RSP_EQEMVER: |
1897 |
+- ir->emver = hi; |
1898 |
+- break; |
1899 |
+ case MCE_RSP_EQIRTXPORTS: |
1900 |
+- ir->tx_mask = hi; |
1901 |
++ ir->tx_mask = *hi; |
1902 |
+ break; |
1903 |
+ case MCE_RSP_EQIRRXPORTEN: |
1904 |
+- ir->learning_active = ((hi & 0x02) == 0x02); |
1905 |
+- if (ir->rxports_active != hi) { |
1906 |
++ ir->learning_active = ((*hi & 0x02) == 0x02); |
1907 |
++ if (ir->rxports_active != *hi) { |
1908 |
+ dev_info(ir->dev, "%s-range (0x%x) receiver active", |
1909 |
+- ir->learning_active ? "short" : "long", hi); |
1910 |
+- ir->rxports_active = hi; |
1911 |
++ ir->learning_active ? "short" : "long", *hi); |
1912 |
++ ir->rxports_active = *hi; |
1913 |
+ } |
1914 |
+ break; |
1915 |
++ |
1916 |
++ /* No return value commands */ |
1917 |
+ case MCE_RSP_CMD_ILLEGAL: |
1918 |
+ case MCE_RSP_TX_TIMEOUT: |
1919 |
+ ir->need_reset = true; |
1920 |
+ break; |
1921 |
++ |
1922 |
+ default: |
1923 |
+ break; |
1924 |
+ } |
1925 |
+@@ -1226,7 +1265,8 @@ static void mceusb_process_ir_data(struct mceusb_dev *ir, int buf_len) |
1926 |
+ ir->rem = mceusb_cmd_datasize(ir->cmd, ir->buf_in[i]); |
1927 |
+ mceusb_dev_printdata(ir, ir->buf_in, buf_len, i - 1, |
1928 |
+ ir->rem + 2, false); |
1929 |
+- mceusb_handle_command(ir, i); |
1930 |
++ if (i + ir->rem < buf_len) |
1931 |
++ mceusb_handle_command(ir, &ir->buf_in[i - 1]); |
1932 |
+ ir->parser_state = CMD_DATA; |
1933 |
+ break; |
1934 |
+ case PARSE_IRDATA: |
1935 |
+@@ -1255,15 +1295,22 @@ static void mceusb_process_ir_data(struct mceusb_dev *ir, int buf_len) |
1936 |
+ ir->rem--; |
1937 |
+ break; |
1938 |
+ case CMD_HEADER: |
1939 |
+- /* decode mce packets of the form (84),AA,BB,CC,DD */ |
1940 |
+- /* IR data packets can span USB messages - rem */ |
1941 |
+ ir->cmd = ir->buf_in[i]; |
1942 |
+ if ((ir->cmd == MCE_CMD_PORT_IR) || |
1943 |
+ ((ir->cmd & MCE_PORT_MASK) != |
1944 |
+ MCE_COMMAND_IRDATA)) { |
1945 |
++ /* |
1946 |
++ * got PORT_SYS, PORT_IR, or unknown |
1947 |
++ * command response prefix |
1948 |
++ */ |
1949 |
+ ir->parser_state = SUBCMD; |
1950 |
+ continue; |
1951 |
+ } |
1952 |
++ /* |
1953 |
++ * got IR data prefix (0x80 + num_bytes) |
1954 |
++ * decode MCE packets of the form {0x83, AA, BB, CC} |
1955 |
++ * IR data packets can span USB messages |
1956 |
++ */ |
1957 |
+ ir->rem = (ir->cmd & MCE_PACKET_LENGTH_MASK); |
1958 |
+ mceusb_dev_printdata(ir, ir->buf_in, buf_len, |
1959 |
+ i, ir->rem + 1, false); |
1960 |
+@@ -1287,6 +1334,14 @@ static void mceusb_process_ir_data(struct mceusb_dev *ir, int buf_len) |
1961 |
+ if (ir->parser_state != CMD_HEADER && !ir->rem) |
1962 |
+ ir->parser_state = CMD_HEADER; |
1963 |
+ } |
1964 |
++ |
1965 |
++ /* |
1966 |
++ * Accept IR data spanning multiple rx buffers. |
1967 |
++ * Reject MCE command response spanning multiple rx buffers. |
1968 |
++ */ |
1969 |
++ if (ir->parser_state != PARSE_IRDATA || !ir->rem) |
1970 |
++ ir->parser_state = CMD_HEADER; |
1971 |
++ |
1972 |
+ if (event) { |
1973 |
+ dev_dbg(ir->dev, "processed IR data"); |
1974 |
+ ir_raw_event_handle(ir->rc); |
1975 |
+diff --git a/drivers/media/usb/b2c2/flexcop-usb.c b/drivers/media/usb/b2c2/flexcop-usb.c |
1976 |
+index 1826ff825c2e..1a801dc286f8 100644 |
1977 |
+--- a/drivers/media/usb/b2c2/flexcop-usb.c |
1978 |
++++ b/drivers/media/usb/b2c2/flexcop-usb.c |
1979 |
+@@ -538,6 +538,9 @@ static int flexcop_usb_probe(struct usb_interface *intf, |
1980 |
+ struct flexcop_device *fc = NULL; |
1981 |
+ int ret; |
1982 |
+ |
1983 |
++ if (intf->cur_altsetting->desc.bNumEndpoints < 1) |
1984 |
++ return -ENODEV; |
1985 |
++ |
1986 |
+ if ((fc = flexcop_device_kmalloc(sizeof(struct flexcop_usb))) == NULL) { |
1987 |
+ err("out of memory\n"); |
1988 |
+ return -ENOMEM; |
1989 |
+diff --git a/drivers/media/usb/dvb-usb/cxusb.c b/drivers/media/usb/dvb-usb/cxusb.c |
1990 |
+index bac0778f7def..e3d58f5247ae 100644 |
1991 |
+--- a/drivers/media/usb/dvb-usb/cxusb.c |
1992 |
++++ b/drivers/media/usb/dvb-usb/cxusb.c |
1993 |
+@@ -542,7 +542,8 @@ static int cxusb_rc_query(struct dvb_usb_device *d) |
1994 |
+ { |
1995 |
+ u8 ircode[4]; |
1996 |
+ |
1997 |
+- cxusb_ctrl_msg(d, CMD_GET_IR_CODE, NULL, 0, ircode, 4); |
1998 |
++ if (cxusb_ctrl_msg(d, CMD_GET_IR_CODE, NULL, 0, ircode, 4) < 0) |
1999 |
++ return 0; |
2000 |
+ |
2001 |
+ if (ircode[2] || ircode[3]) |
2002 |
+ rc_keydown(d->rc_dev, RC_PROTO_NEC, |
2003 |
+diff --git a/drivers/media/usb/usbvision/usbvision-video.c b/drivers/media/usb/usbvision/usbvision-video.c |
2004 |
+index 93750af82d98..044d18e9b7ec 100644 |
2005 |
+--- a/drivers/media/usb/usbvision/usbvision-video.c |
2006 |
++++ b/drivers/media/usb/usbvision/usbvision-video.c |
2007 |
+@@ -314,6 +314,10 @@ static int usbvision_v4l2_open(struct file *file) |
2008 |
+ if (mutex_lock_interruptible(&usbvision->v4l2_lock)) |
2009 |
+ return -ERESTARTSYS; |
2010 |
+ |
2011 |
++ if (usbvision->remove_pending) { |
2012 |
++ err_code = -ENODEV; |
2013 |
++ goto unlock; |
2014 |
++ } |
2015 |
+ if (usbvision->user) { |
2016 |
+ err_code = -EBUSY; |
2017 |
+ } else { |
2018 |
+@@ -377,6 +381,7 @@ unlock: |
2019 |
+ static int usbvision_v4l2_close(struct file *file) |
2020 |
+ { |
2021 |
+ struct usb_usbvision *usbvision = video_drvdata(file); |
2022 |
++ int r; |
2023 |
+ |
2024 |
+ PDEBUG(DBG_IO, "close"); |
2025 |
+ |
2026 |
+@@ -391,9 +396,10 @@ static int usbvision_v4l2_close(struct file *file) |
2027 |
+ usbvision_scratch_free(usbvision); |
2028 |
+ |
2029 |
+ usbvision->user--; |
2030 |
++ r = usbvision->remove_pending; |
2031 |
+ mutex_unlock(&usbvision->v4l2_lock); |
2032 |
+ |
2033 |
+- if (usbvision->remove_pending) { |
2034 |
++ if (r) { |
2035 |
+ printk(KERN_INFO "%s: Final disconnect\n", __func__); |
2036 |
+ usbvision_release(usbvision); |
2037 |
+ return 0; |
2038 |
+@@ -453,6 +459,9 @@ static int vidioc_querycap(struct file *file, void *priv, |
2039 |
+ { |
2040 |
+ struct usb_usbvision *usbvision = video_drvdata(file); |
2041 |
+ |
2042 |
++ if (!usbvision->dev) |
2043 |
++ return -ENODEV; |
2044 |
++ |
2045 |
+ strscpy(vc->driver, "USBVision", sizeof(vc->driver)); |
2046 |
+ strscpy(vc->card, |
2047 |
+ usbvision_device_data[usbvision->dev_model].model_string, |
2048 |
+@@ -1073,6 +1082,11 @@ static int usbvision_radio_open(struct file *file) |
2049 |
+ |
2050 |
+ if (mutex_lock_interruptible(&usbvision->v4l2_lock)) |
2051 |
+ return -ERESTARTSYS; |
2052 |
++ |
2053 |
++ if (usbvision->remove_pending) { |
2054 |
++ err_code = -ENODEV; |
2055 |
++ goto out; |
2056 |
++ } |
2057 |
+ err_code = v4l2_fh_open(file); |
2058 |
+ if (err_code) |
2059 |
+ goto out; |
2060 |
+@@ -1105,21 +1119,24 @@ out: |
2061 |
+ static int usbvision_radio_close(struct file *file) |
2062 |
+ { |
2063 |
+ struct usb_usbvision *usbvision = video_drvdata(file); |
2064 |
++ int r; |
2065 |
+ |
2066 |
+ PDEBUG(DBG_IO, ""); |
2067 |
+ |
2068 |
+ mutex_lock(&usbvision->v4l2_lock); |
2069 |
+ /* Set packet size to 0 */ |
2070 |
+ usbvision->iface_alt = 0; |
2071 |
+- usb_set_interface(usbvision->dev, usbvision->iface, |
2072 |
+- usbvision->iface_alt); |
2073 |
++ if (usbvision->dev) |
2074 |
++ usb_set_interface(usbvision->dev, usbvision->iface, |
2075 |
++ usbvision->iface_alt); |
2076 |
+ |
2077 |
+ usbvision_audio_off(usbvision); |
2078 |
+ usbvision->radio = 0; |
2079 |
+ usbvision->user--; |
2080 |
++ r = usbvision->remove_pending; |
2081 |
+ mutex_unlock(&usbvision->v4l2_lock); |
2082 |
+ |
2083 |
+- if (usbvision->remove_pending) { |
2084 |
++ if (r) { |
2085 |
+ printk(KERN_INFO "%s: Final disconnect\n", __func__); |
2086 |
+ v4l2_fh_release(file); |
2087 |
+ usbvision_release(usbvision); |
2088 |
+@@ -1551,6 +1568,7 @@ err_usb: |
2089 |
+ static void usbvision_disconnect(struct usb_interface *intf) |
2090 |
+ { |
2091 |
+ struct usb_usbvision *usbvision = to_usbvision(usb_get_intfdata(intf)); |
2092 |
++ int u; |
2093 |
+ |
2094 |
+ PDEBUG(DBG_PROBE, ""); |
2095 |
+ |
2096 |
+@@ -1567,13 +1585,14 @@ static void usbvision_disconnect(struct usb_interface *intf) |
2097 |
+ v4l2_device_disconnect(&usbvision->v4l2_dev); |
2098 |
+ usbvision_i2c_unregister(usbvision); |
2099 |
+ usbvision->remove_pending = 1; /* Now all ISO data will be ignored */ |
2100 |
++ u = usbvision->user; |
2101 |
+ |
2102 |
+ usb_put_dev(usbvision->dev); |
2103 |
+ usbvision->dev = NULL; /* USB device is no more */ |
2104 |
+ |
2105 |
+ mutex_unlock(&usbvision->v4l2_lock); |
2106 |
+ |
2107 |
+- if (usbvision->user) { |
2108 |
++ if (u) { |
2109 |
+ printk(KERN_INFO "%s: In use, disconnect pending\n", |
2110 |
+ __func__); |
2111 |
+ wake_up_interruptible(&usbvision->wait_frame); |
2112 |
+diff --git a/drivers/media/usb/uvc/uvc_driver.c b/drivers/media/usb/uvc/uvc_driver.c |
2113 |
+index 66ee168ddc7e..428235ca2635 100644 |
2114 |
+--- a/drivers/media/usb/uvc/uvc_driver.c |
2115 |
++++ b/drivers/media/usb/uvc/uvc_driver.c |
2116 |
+@@ -2151,6 +2151,20 @@ static int uvc_probe(struct usb_interface *intf, |
2117 |
+ sizeof(dev->name) - len); |
2118 |
+ } |
2119 |
+ |
2120 |
++ /* Initialize the media device. */ |
2121 |
++#ifdef CONFIG_MEDIA_CONTROLLER |
2122 |
++ dev->mdev.dev = &intf->dev; |
2123 |
++ strscpy(dev->mdev.model, dev->name, sizeof(dev->mdev.model)); |
2124 |
++ if (udev->serial) |
2125 |
++ strscpy(dev->mdev.serial, udev->serial, |
2126 |
++ sizeof(dev->mdev.serial)); |
2127 |
++ usb_make_path(udev, dev->mdev.bus_info, sizeof(dev->mdev.bus_info)); |
2128 |
++ dev->mdev.hw_revision = le16_to_cpu(udev->descriptor.bcdDevice); |
2129 |
++ media_device_init(&dev->mdev); |
2130 |
++ |
2131 |
++ dev->vdev.mdev = &dev->mdev; |
2132 |
++#endif |
2133 |
++ |
2134 |
+ /* Parse the Video Class control descriptor. */ |
2135 |
+ if (uvc_parse_control(dev) < 0) { |
2136 |
+ uvc_trace(UVC_TRACE_PROBE, "Unable to parse UVC " |
2137 |
+@@ -2171,19 +2185,7 @@ static int uvc_probe(struct usb_interface *intf, |
2138 |
+ "linux-uvc-devel mailing list.\n"); |
2139 |
+ } |
2140 |
+ |
2141 |
+- /* Initialize the media device and register the V4L2 device. */ |
2142 |
+-#ifdef CONFIG_MEDIA_CONTROLLER |
2143 |
+- dev->mdev.dev = &intf->dev; |
2144 |
+- strscpy(dev->mdev.model, dev->name, sizeof(dev->mdev.model)); |
2145 |
+- if (udev->serial) |
2146 |
+- strscpy(dev->mdev.serial, udev->serial, |
2147 |
+- sizeof(dev->mdev.serial)); |
2148 |
+- usb_make_path(udev, dev->mdev.bus_info, sizeof(dev->mdev.bus_info)); |
2149 |
+- dev->mdev.hw_revision = le16_to_cpu(udev->descriptor.bcdDevice); |
2150 |
+- media_device_init(&dev->mdev); |
2151 |
+- |
2152 |
+- dev->vdev.mdev = &dev->mdev; |
2153 |
+-#endif |
2154 |
++ /* Register the V4L2 device. */ |
2155 |
+ if (v4l2_device_register(&intf->dev, &dev->vdev) < 0) |
2156 |
+ goto error; |
2157 |
+ |
2158 |
+diff --git a/drivers/net/ethernet/google/gve/gve_tx.c b/drivers/net/ethernet/google/gve/gve_tx.c |
2159 |
+index 0a9a7ee2a866..f4889431f9b7 100644 |
2160 |
+--- a/drivers/net/ethernet/google/gve/gve_tx.c |
2161 |
++++ b/drivers/net/ethernet/google/gve/gve_tx.c |
2162 |
+@@ -393,12 +393,13 @@ static void gve_tx_fill_seg_desc(union gve_tx_desc *seg_desc, |
2163 |
+ static void gve_dma_sync_for_device(struct device *dev, dma_addr_t *page_buses, |
2164 |
+ u64 iov_offset, u64 iov_len) |
2165 |
+ { |
2166 |
++ u64 last_page = (iov_offset + iov_len - 1) / PAGE_SIZE; |
2167 |
++ u64 first_page = iov_offset / PAGE_SIZE; |
2168 |
+ dma_addr_t dma; |
2169 |
+- u64 addr; |
2170 |
++ u64 page; |
2171 |
+ |
2172 |
+- for (addr = iov_offset; addr < iov_offset + iov_len; |
2173 |
+- addr += PAGE_SIZE) { |
2174 |
+- dma = page_buses[addr / PAGE_SIZE]; |
2175 |
++ for (page = first_page; page <= last_page; page++) { |
2176 |
++ dma = page_buses[page]; |
2177 |
+ dma_sync_single_for_device(dev, dma, PAGE_SIZE, DMA_TO_DEVICE); |
2178 |
+ } |
2179 |
+ } |
2180 |
+diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c |
2181 |
+index 94c59939a8cf..e639a365ac2d 100644 |
2182 |
+--- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c |
2183 |
++++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c |
2184 |
+@@ -1745,6 +1745,7 @@ static int mlx4_en_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd, |
2185 |
+ err = mlx4_en_get_flow(dev, cmd, cmd->fs.location); |
2186 |
+ break; |
2187 |
+ case ETHTOOL_GRXCLSRLALL: |
2188 |
++ cmd->data = MAX_NUM_OF_FS_RULES; |
2189 |
+ while ((!err || err == -ENOENT) && priority < cmd->rule_cnt) { |
2190 |
+ err = mlx4_en_get_flow(dev, cmd, i); |
2191 |
+ if (!err) |
2192 |
+@@ -1811,6 +1812,7 @@ static int mlx4_en_set_channels(struct net_device *dev, |
2193 |
+ struct mlx4_en_dev *mdev = priv->mdev; |
2194 |
+ struct mlx4_en_port_profile new_prof; |
2195 |
+ struct mlx4_en_priv *tmp; |
2196 |
++ int total_tx_count; |
2197 |
+ int port_up = 0; |
2198 |
+ int xdp_count; |
2199 |
+ int err = 0; |
2200 |
+@@ -1825,13 +1827,12 @@ static int mlx4_en_set_channels(struct net_device *dev, |
2201 |
+ |
2202 |
+ mutex_lock(&mdev->state_lock); |
2203 |
+ xdp_count = priv->tx_ring_num[TX_XDP] ? channel->rx_count : 0; |
2204 |
+- if (channel->tx_count * priv->prof->num_up + xdp_count > |
2205 |
+- priv->mdev->profile.max_num_tx_rings_p_up * priv->prof->num_up) { |
2206 |
++ total_tx_count = channel->tx_count * priv->prof->num_up + xdp_count; |
2207 |
++ if (total_tx_count > MAX_TX_RINGS) { |
2208 |
+ err = -EINVAL; |
2209 |
+ en_err(priv, |
2210 |
+ "Total number of TX and XDP rings (%d) exceeds the maximum supported (%d)\n", |
2211 |
+- channel->tx_count * priv->prof->num_up + xdp_count, |
2212 |
+- MAX_TX_RINGS); |
2213 |
++ total_tx_count, MAX_TX_RINGS); |
2214 |
+ goto out; |
2215 |
+ } |
2216 |
+ |
2217 |
+diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c |
2218 |
+index c1438ae52a11..ba4f195a36d6 100644 |
2219 |
+--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c |
2220 |
++++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c |
2221 |
+@@ -91,6 +91,7 @@ int mlx4_en_alloc_tx_queue_per_tc(struct net_device *dev, u8 tc) |
2222 |
+ struct mlx4_en_dev *mdev = priv->mdev; |
2223 |
+ struct mlx4_en_port_profile new_prof; |
2224 |
+ struct mlx4_en_priv *tmp; |
2225 |
++ int total_count; |
2226 |
+ int port_up = 0; |
2227 |
+ int err = 0; |
2228 |
+ |
2229 |
+@@ -104,6 +105,14 @@ int mlx4_en_alloc_tx_queue_per_tc(struct net_device *dev, u8 tc) |
2230 |
+ MLX4_EN_NUM_UP_HIGH; |
2231 |
+ new_prof.tx_ring_num[TX] = new_prof.num_tx_rings_p_up * |
2232 |
+ new_prof.num_up; |
2233 |
++ total_count = new_prof.tx_ring_num[TX] + new_prof.tx_ring_num[TX_XDP]; |
2234 |
++ if (total_count > MAX_TX_RINGS) { |
2235 |
++ err = -EINVAL; |
2236 |
++ en_err(priv, |
2237 |
++ "Total number of TX and XDP rings (%d) exceeds the maximum supported (%d)\n", |
2238 |
++ total_count, MAX_TX_RINGS); |
2239 |
++ goto out; |
2240 |
++ } |
2241 |
+ err = mlx4_en_try_alloc_resources(priv, tmp, &new_prof, true); |
2242 |
+ if (err) |
2243 |
+ goto out; |
2244 |
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c |
2245 |
+index 310f65ef5446..d41c520ce0a8 100644 |
2246 |
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c |
2247 |
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c |
2248 |
+@@ -232,12 +232,15 @@ int mlx5e_tc_tun_create_header_ipv4(struct mlx5e_priv *priv, |
2249 |
+ if (max_encap_size < ipv4_encap_size) { |
2250 |
+ mlx5_core_warn(priv->mdev, "encap size %d too big, max supported is %d\n", |
2251 |
+ ipv4_encap_size, max_encap_size); |
2252 |
+- return -EOPNOTSUPP; |
2253 |
++ err = -EOPNOTSUPP; |
2254 |
++ goto out; |
2255 |
+ } |
2256 |
+ |
2257 |
+ encap_header = kzalloc(ipv4_encap_size, GFP_KERNEL); |
2258 |
+- if (!encap_header) |
2259 |
+- return -ENOMEM; |
2260 |
++ if (!encap_header) { |
2261 |
++ err = -ENOMEM; |
2262 |
++ goto out; |
2263 |
++ } |
2264 |
+ |
2265 |
+ /* used by mlx5e_detach_encap to lookup a neigh hash table |
2266 |
+ * entry in the neigh hash table when a user deletes a rule |
2267 |
+@@ -348,12 +351,15 @@ int mlx5e_tc_tun_create_header_ipv6(struct mlx5e_priv *priv, |
2268 |
+ if (max_encap_size < ipv6_encap_size) { |
2269 |
+ mlx5_core_warn(priv->mdev, "encap size %d too big, max supported is %d\n", |
2270 |
+ ipv6_encap_size, max_encap_size); |
2271 |
+- return -EOPNOTSUPP; |
2272 |
++ err = -EOPNOTSUPP; |
2273 |
++ goto out; |
2274 |
+ } |
2275 |
+ |
2276 |
+ encap_header = kzalloc(ipv6_encap_size, GFP_KERNEL); |
2277 |
+- if (!encap_header) |
2278 |
+- return -ENOMEM; |
2279 |
++ if (!encap_header) { |
2280 |
++ err = -ENOMEM; |
2281 |
++ goto out; |
2282 |
++ } |
2283 |
+ |
2284 |
+ /* used by mlx5e_detach_encap to lookup a neigh hash table |
2285 |
+ * entry in the neigh hash table when a user deletes a rule |
2286 |
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c |
2287 |
+index a9bb8e2b34a7..8d4856860365 100644 |
2288 |
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c |
2289 |
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c |
2290 |
+@@ -708,9 +708,9 @@ static int get_fec_supported_advertised(struct mlx5_core_dev *dev, |
2291 |
+ |
2292 |
+ static void ptys2ethtool_supported_advertised_port(struct ethtool_link_ksettings *link_ksettings, |
2293 |
+ u32 eth_proto_cap, |
2294 |
+- u8 connector_type) |
2295 |
++ u8 connector_type, bool ext) |
2296 |
+ { |
2297 |
+- if (!connector_type || connector_type >= MLX5E_CONNECTOR_TYPE_NUMBER) { |
2298 |
++ if ((!connector_type && !ext) || connector_type >= MLX5E_CONNECTOR_TYPE_NUMBER) { |
2299 |
+ if (eth_proto_cap & (MLX5E_PROT_MASK(MLX5E_10GBASE_CR) |
2300 |
+ | MLX5E_PROT_MASK(MLX5E_10GBASE_SR) |
2301 |
+ | MLX5E_PROT_MASK(MLX5E_40GBASE_CR4) |
2302 |
+@@ -842,9 +842,9 @@ static int ptys2connector_type[MLX5E_CONNECTOR_TYPE_NUMBER] = { |
2303 |
+ [MLX5E_PORT_OTHER] = PORT_OTHER, |
2304 |
+ }; |
2305 |
+ |
2306 |
+-static u8 get_connector_port(u32 eth_proto, u8 connector_type) |
2307 |
++static u8 get_connector_port(u32 eth_proto, u8 connector_type, bool ext) |
2308 |
+ { |
2309 |
+- if (connector_type && connector_type < MLX5E_CONNECTOR_TYPE_NUMBER) |
2310 |
++ if ((connector_type || ext) && connector_type < MLX5E_CONNECTOR_TYPE_NUMBER) |
2311 |
+ return ptys2connector_type[connector_type]; |
2312 |
+ |
2313 |
+ if (eth_proto & |
2314 |
+@@ -945,9 +945,9 @@ int mlx5e_ethtool_get_link_ksettings(struct mlx5e_priv *priv, |
2315 |
+ eth_proto_oper = eth_proto_oper ? eth_proto_oper : eth_proto_cap; |
2316 |
+ |
2317 |
+ link_ksettings->base.port = get_connector_port(eth_proto_oper, |
2318 |
+- connector_type); |
2319 |
++ connector_type, ext); |
2320 |
+ ptys2ethtool_supported_advertised_port(link_ksettings, eth_proto_admin, |
2321 |
+- connector_type); |
2322 |
++ connector_type, ext); |
2323 |
+ get_lp_advertising(mdev, eth_proto_lp, link_ksettings); |
2324 |
+ |
2325 |
+ if (an_status == MLX5_AN_COMPLETE) |
2326 |
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c |
2327 |
+index 1f3891fde2eb..a3b2ce112508 100644 |
2328 |
+--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c |
2329 |
++++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c |
2330 |
+@@ -2044,7 +2044,7 @@ int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw, |
2331 |
+ |
2332 |
+ unlock: |
2333 |
+ mutex_unlock(&esw->state_lock); |
2334 |
+- return 0; |
2335 |
++ return err; |
2336 |
+ } |
2337 |
+ |
2338 |
+ int mlx5_eswitch_get_vport_config(struct mlx5_eswitch *esw, |
2339 |
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c |
2340 |
+index 3e99799bdb40..a6a64531bc43 100644 |
2341 |
+--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c |
2342 |
++++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c |
2343 |
+@@ -549,7 +549,7 @@ static void del_sw_flow_group(struct fs_node *node) |
2344 |
+ |
2345 |
+ rhashtable_destroy(&fg->ftes_hash); |
2346 |
+ ida_destroy(&fg->fte_allocator); |
2347 |
+- if (ft->autogroup.active) |
2348 |
++ if (ft->autogroup.active && fg->max_ftes == ft->autogroup.group_size) |
2349 |
+ ft->autogroup.num_groups--; |
2350 |
+ err = rhltable_remove(&ft->fgs_hash, |
2351 |
+ &fg->hash, |
2352 |
+@@ -1095,6 +1095,8 @@ mlx5_create_auto_grouped_flow_table(struct mlx5_flow_namespace *ns, |
2353 |
+ |
2354 |
+ ft->autogroup.active = true; |
2355 |
+ ft->autogroup.required_groups = max_num_groups; |
2356 |
++ /* We save place for flow groups in addition to max types */ |
2357 |
++ ft->autogroup.group_size = ft->max_fte / (max_num_groups + 1); |
2358 |
+ |
2359 |
+ return ft; |
2360 |
+ } |
2361 |
+@@ -1297,8 +1299,7 @@ static struct mlx5_flow_group *alloc_auto_flow_group(struct mlx5_flow_table *ft |
2362 |
+ return ERR_PTR(-ENOENT); |
2363 |
+ |
2364 |
+ if (ft->autogroup.num_groups < ft->autogroup.required_groups) |
2365 |
+- /* We save place for flow groups in addition to max types */ |
2366 |
+- group_size = ft->max_fte / (ft->autogroup.required_groups + 1); |
2367 |
++ group_size = ft->autogroup.group_size; |
2368 |
+ |
2369 |
+ /* ft->max_fte == ft->autogroup.max_types */ |
2370 |
+ if (group_size == 0) |
2371 |
+@@ -1325,7 +1326,8 @@ static struct mlx5_flow_group *alloc_auto_flow_group(struct mlx5_flow_table *ft |
2372 |
+ if (IS_ERR(fg)) |
2373 |
+ goto out; |
2374 |
+ |
2375 |
+- ft->autogroup.num_groups++; |
2376 |
++ if (group_size == ft->autogroup.group_size) |
2377 |
++ ft->autogroup.num_groups++; |
2378 |
+ |
2379 |
+ out: |
2380 |
+ return fg; |
2381 |
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h |
2382 |
+index c1252d6be0ef..80906aff21d7 100644 |
2383 |
+--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h |
2384 |
++++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h |
2385 |
+@@ -137,6 +137,7 @@ struct mlx5_flow_table { |
2386 |
+ struct { |
2387 |
+ bool active; |
2388 |
+ unsigned int required_groups; |
2389 |
++ unsigned int group_size; |
2390 |
+ unsigned int num_groups; |
2391 |
+ } autogroup; |
2392 |
+ /* Protect fwd_rules */ |
2393 |
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c |
2394 |
+index fda4964c5cf4..5e2b56305a3a 100644 |
2395 |
+--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c |
2396 |
++++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c |
2397 |
+@@ -1552,6 +1552,7 @@ static const struct pci_device_id mlx5_core_pci_table[] = { |
2398 |
+ { PCI_VDEVICE(MELLANOX, 0x101c), MLX5_PCI_DEV_IS_VF}, /* ConnectX-6 VF */ |
2399 |
+ { PCI_VDEVICE(MELLANOX, 0x101d) }, /* ConnectX-6 Dx */ |
2400 |
+ { PCI_VDEVICE(MELLANOX, 0x101e), MLX5_PCI_DEV_IS_VF}, /* ConnectX Family mlx5Gen Virtual Function */ |
2401 |
++ { PCI_VDEVICE(MELLANOX, 0x101f) }, /* ConnectX-6 LX */ |
2402 |
+ { PCI_VDEVICE(MELLANOX, 0xa2d2) }, /* BlueField integrated ConnectX-5 network controller */ |
2403 |
+ { PCI_VDEVICE(MELLANOX, 0xa2d3), MLX5_PCI_DEV_IS_VF}, /* BlueField integrated ConnectX-5 network controller VF */ |
2404 |
+ { PCI_VDEVICE(MELLANOX, 0xa2d6) }, /* BlueField-2 integrated ConnectX-6 Dx network controller */ |
2405 |
+diff --git a/drivers/net/ethernet/mellanox/mlxfw/mlxfw_fsm.c b/drivers/net/ethernet/mellanox/mlxfw/mlxfw_fsm.c |
2406 |
+index 67990406cba2..29e95d0a6ad1 100644 |
2407 |
+--- a/drivers/net/ethernet/mellanox/mlxfw/mlxfw_fsm.c |
2408 |
++++ b/drivers/net/ethernet/mellanox/mlxfw/mlxfw_fsm.c |
2409 |
+@@ -66,6 +66,8 @@ retry: |
2410 |
+ return err; |
2411 |
+ |
2412 |
+ if (fsm_state_err != MLXFW_FSM_STATE_ERR_OK) { |
2413 |
++ fsm_state_err = min_t(enum mlxfw_fsm_state_err, |
2414 |
++ fsm_state_err, MLXFW_FSM_STATE_ERR_MAX); |
2415 |
+ pr_err("Firmware flash failed: %s\n", |
2416 |
+ mlxfw_fsm_state_err_str[fsm_state_err]); |
2417 |
+ NL_SET_ERR_MSG_MOD(extack, "Firmware flash failed"); |
2418 |
+diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c |
2419 |
+index e618be7ce6c6..7b7e50d25d25 100644 |
2420 |
+--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c |
2421 |
++++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c |
2422 |
+@@ -994,7 +994,7 @@ u32 mlxsw_sp_ipip_dev_ul_tb_id(const struct net_device *ol_dev) |
2423 |
+ if (d) |
2424 |
+ return l3mdev_fib_table(d) ? : RT_TABLE_MAIN; |
2425 |
+ else |
2426 |
+- return l3mdev_fib_table(ol_dev) ? : RT_TABLE_MAIN; |
2427 |
++ return RT_TABLE_MAIN; |
2428 |
+ } |
2429 |
+ |
2430 |
+ static struct mlxsw_sp_rif * |
2431 |
+@@ -1598,27 +1598,10 @@ static int mlxsw_sp_netdevice_ipip_ol_vrf_event(struct mlxsw_sp *mlxsw_sp, |
2432 |
+ { |
2433 |
+ struct mlxsw_sp_ipip_entry *ipip_entry = |
2434 |
+ mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev); |
2435 |
+- enum mlxsw_sp_l3proto ul_proto; |
2436 |
+- union mlxsw_sp_l3addr saddr; |
2437 |
+- u32 ul_tb_id; |
2438 |
+ |
2439 |
+ if (!ipip_entry) |
2440 |
+ return 0; |
2441 |
+ |
2442 |
+- /* For flat configuration cases, moving overlay to a different VRF might |
2443 |
+- * cause local address conflict, and the conflicting tunnels need to be |
2444 |
+- * demoted. |
2445 |
+- */ |
2446 |
+- ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(ol_dev); |
2447 |
+- ul_proto = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt]->ul_proto; |
2448 |
+- saddr = mlxsw_sp_ipip_netdev_saddr(ul_proto, ol_dev); |
2449 |
+- if (mlxsw_sp_ipip_demote_tunnel_by_saddr(mlxsw_sp, ul_proto, |
2450 |
+- saddr, ul_tb_id, |
2451 |
+- ipip_entry)) { |
2452 |
+- mlxsw_sp_ipip_entry_demote_tunnel(mlxsw_sp, ipip_entry); |
2453 |
+- return 0; |
2454 |
+- } |
2455 |
+- |
2456 |
+ return __mlxsw_sp_ipip_entry_update_tunnel(mlxsw_sp, ipip_entry, |
2457 |
+ true, false, false, extack); |
2458 |
+ } |
2459 |
+diff --git a/drivers/net/ethernet/sfc/ptp.c b/drivers/net/ethernet/sfc/ptp.c |
2460 |
+index 02ed6d1b716c..af15a737c675 100644 |
2461 |
+--- a/drivers/net/ethernet/sfc/ptp.c |
2462 |
++++ b/drivers/net/ethernet/sfc/ptp.c |
2463 |
+@@ -1531,7 +1531,8 @@ void efx_ptp_remove(struct efx_nic *efx) |
2464 |
+ (void)efx_ptp_disable(efx); |
2465 |
+ |
2466 |
+ cancel_work_sync(&efx->ptp_data->work); |
2467 |
+- cancel_work_sync(&efx->ptp_data->pps_work); |
2468 |
++ if (efx->ptp_data->pps_workwq) |
2469 |
++ cancel_work_sync(&efx->ptp_data->pps_work); |
2470 |
+ |
2471 |
+ skb_queue_purge(&efx->ptp_data->rxq); |
2472 |
+ skb_queue_purge(&efx->ptp_data->txq); |
2473 |
+diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c |
2474 |
+index bd04fe762056..2a79c7a7e920 100644 |
2475 |
+--- a/drivers/net/phy/mdio_bus.c |
2476 |
++++ b/drivers/net/phy/mdio_bus.c |
2477 |
+@@ -68,11 +68,12 @@ static int mdiobus_register_reset(struct mdio_device *mdiodev) |
2478 |
+ if (mdiodev->dev.of_node) |
2479 |
+ reset = devm_reset_control_get_exclusive(&mdiodev->dev, |
2480 |
+ "phy"); |
2481 |
+- if (PTR_ERR(reset) == -ENOENT || |
2482 |
+- PTR_ERR(reset) == -ENOTSUPP) |
2483 |
+- reset = NULL; |
2484 |
+- else if (IS_ERR(reset)) |
2485 |
+- return PTR_ERR(reset); |
2486 |
++ if (IS_ERR(reset)) { |
2487 |
++ if (PTR_ERR(reset) == -ENOENT || PTR_ERR(reset) == -ENOTSUPP) |
2488 |
++ reset = NULL; |
2489 |
++ else |
2490 |
++ return PTR_ERR(reset); |
2491 |
++ } |
2492 |
+ |
2493 |
+ mdiodev->reset_ctrl = reset; |
2494 |
+ |
2495 |
+diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c |
2496 |
+index a0b4d265c6eb..347bb92e4130 100644 |
2497 |
+--- a/drivers/net/wireless/ath/ath10k/pci.c |
2498 |
++++ b/drivers/net/wireless/ath/ath10k/pci.c |
2499 |
+@@ -3490,7 +3490,7 @@ static int ath10k_pci_probe(struct pci_dev *pdev, |
2500 |
+ struct ath10k_pci *ar_pci; |
2501 |
+ enum ath10k_hw_rev hw_rev; |
2502 |
+ struct ath10k_bus_params bus_params = {}; |
2503 |
+- bool pci_ps; |
2504 |
++ bool pci_ps, is_qca988x = false; |
2505 |
+ int (*pci_soft_reset)(struct ath10k *ar); |
2506 |
+ int (*pci_hard_reset)(struct ath10k *ar); |
2507 |
+ u32 (*targ_cpu_to_ce_addr)(struct ath10k *ar, u32 addr); |
2508 |
+@@ -3500,6 +3500,7 @@ static int ath10k_pci_probe(struct pci_dev *pdev, |
2509 |
+ case QCA988X_2_0_DEVICE_ID: |
2510 |
+ hw_rev = ATH10K_HW_QCA988X; |
2511 |
+ pci_ps = false; |
2512 |
++ is_qca988x = true; |
2513 |
+ pci_soft_reset = ath10k_pci_warm_reset; |
2514 |
+ pci_hard_reset = ath10k_pci_qca988x_chip_reset; |
2515 |
+ targ_cpu_to_ce_addr = ath10k_pci_qca988x_targ_cpu_to_ce_addr; |
2516 |
+@@ -3619,25 +3620,34 @@ static int ath10k_pci_probe(struct pci_dev *pdev, |
2517 |
+ goto err_deinit_irq; |
2518 |
+ } |
2519 |
+ |
2520 |
++ bus_params.dev_type = ATH10K_DEV_TYPE_LL; |
2521 |
++ bus_params.link_can_suspend = true; |
2522 |
++ /* Read CHIP_ID before reset to catch QCA9880-AR1A v1 devices that |
2523 |
++ * fall off the bus during chip_reset. These chips have the same pci |
2524 |
++ * device id as the QCA9880 BR4A or 2R4E. So that's why the check. |
2525 |
++ */ |
2526 |
++ if (is_qca988x) { |
2527 |
++ bus_params.chip_id = |
2528 |
++ ath10k_pci_soc_read32(ar, SOC_CHIP_ID_ADDRESS); |
2529 |
++ if (bus_params.chip_id != 0xffffffff) { |
2530 |
++ if (!ath10k_pci_chip_is_supported(pdev->device, |
2531 |
++ bus_params.chip_id)) |
2532 |
++ goto err_unsupported; |
2533 |
++ } |
2534 |
++ } |
2535 |
++ |
2536 |
+ ret = ath10k_pci_chip_reset(ar); |
2537 |
+ if (ret) { |
2538 |
+ ath10k_err(ar, "failed to reset chip: %d\n", ret); |
2539 |
+ goto err_free_irq; |
2540 |
+ } |
2541 |
+ |
2542 |
+- bus_params.dev_type = ATH10K_DEV_TYPE_LL; |
2543 |
+- bus_params.link_can_suspend = true; |
2544 |
+ bus_params.chip_id = ath10k_pci_soc_read32(ar, SOC_CHIP_ID_ADDRESS); |
2545 |
+- if (bus_params.chip_id == 0xffffffff) { |
2546 |
+- ath10k_err(ar, "failed to get chip id\n"); |
2547 |
+- goto err_free_irq; |
2548 |
+- } |
2549 |
++ if (bus_params.chip_id == 0xffffffff) |
2550 |
++ goto err_unsupported; |
2551 |
+ |
2552 |
+- if (!ath10k_pci_chip_is_supported(pdev->device, bus_params.chip_id)) { |
2553 |
+- ath10k_err(ar, "device %04x with chip_id %08x isn't supported\n", |
2554 |
+- pdev->device, bus_params.chip_id); |
2555 |
++ if (!ath10k_pci_chip_is_supported(pdev->device, bus_params.chip_id)) |
2556 |
+ goto err_free_irq; |
2557 |
+- } |
2558 |
+ |
2559 |
+ ret = ath10k_core_register(ar, &bus_params); |
2560 |
+ if (ret) { |
2561 |
+@@ -3647,6 +3657,10 @@ static int ath10k_pci_probe(struct pci_dev *pdev, |
2562 |
+ |
2563 |
+ return 0; |
2564 |
+ |
2565 |
++err_unsupported: |
2566 |
++ ath10k_err(ar, "device %04x with chip_id %08x isn't supported\n", |
2567 |
++ pdev->device, bus_params.chip_id); |
2568 |
++ |
2569 |
+ err_free_irq: |
2570 |
+ ath10k_pci_free_irq(ar); |
2571 |
+ ath10k_pci_rx_retry_sync(ar); |
2572 |
+diff --git a/drivers/net/wireless/ath/ath10k/qmi.c b/drivers/net/wireless/ath/ath10k/qmi.c |
2573 |
+index 3b63b6257c43..545ac1f06997 100644 |
2574 |
+--- a/drivers/net/wireless/ath/ath10k/qmi.c |
2575 |
++++ b/drivers/net/wireless/ath/ath10k/qmi.c |
2576 |
+@@ -581,22 +581,29 @@ static int ath10k_qmi_host_cap_send_sync(struct ath10k_qmi *qmi) |
2577 |
+ { |
2578 |
+ struct wlfw_host_cap_resp_msg_v01 resp = {}; |
2579 |
+ struct wlfw_host_cap_req_msg_v01 req = {}; |
2580 |
++ struct qmi_elem_info *req_ei; |
2581 |
+ struct ath10k *ar = qmi->ar; |
2582 |
++ struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar); |
2583 |
+ struct qmi_txn txn; |
2584 |
+ int ret; |
2585 |
+ |
2586 |
+ req.daemon_support_valid = 1; |
2587 |
+ req.daemon_support = 0; |
2588 |
+ |
2589 |
+- ret = qmi_txn_init(&qmi->qmi_hdl, &txn, |
2590 |
+- wlfw_host_cap_resp_msg_v01_ei, &resp); |
2591 |
++ ret = qmi_txn_init(&qmi->qmi_hdl, &txn, wlfw_host_cap_resp_msg_v01_ei, |
2592 |
++ &resp); |
2593 |
+ if (ret < 0) |
2594 |
+ goto out; |
2595 |
+ |
2596 |
++ if (test_bit(ATH10K_SNOC_FLAG_8BIT_HOST_CAP_QUIRK, &ar_snoc->flags)) |
2597 |
++ req_ei = wlfw_host_cap_8bit_req_msg_v01_ei; |
2598 |
++ else |
2599 |
++ req_ei = wlfw_host_cap_req_msg_v01_ei; |
2600 |
++ |
2601 |
+ ret = qmi_send_request(&qmi->qmi_hdl, NULL, &txn, |
2602 |
+ QMI_WLFW_HOST_CAP_REQ_V01, |
2603 |
+ WLFW_HOST_CAP_REQ_MSG_V01_MAX_MSG_LEN, |
2604 |
+- wlfw_host_cap_req_msg_v01_ei, &req); |
2605 |
++ req_ei, &req); |
2606 |
+ if (ret < 0) { |
2607 |
+ qmi_txn_cancel(&txn); |
2608 |
+ ath10k_err(ar, "failed to send host capability request: %d\n", ret); |
2609 |
+diff --git a/drivers/net/wireless/ath/ath10k/qmi_wlfw_v01.c b/drivers/net/wireless/ath/ath10k/qmi_wlfw_v01.c |
2610 |
+index 1fe05c6218c3..86fcf4e1de5f 100644 |
2611 |
+--- a/drivers/net/wireless/ath/ath10k/qmi_wlfw_v01.c |
2612 |
++++ b/drivers/net/wireless/ath/ath10k/qmi_wlfw_v01.c |
2613 |
+@@ -1988,6 +1988,28 @@ struct qmi_elem_info wlfw_host_cap_req_msg_v01_ei[] = { |
2614 |
+ {} |
2615 |
+ }; |
2616 |
+ |
2617 |
++struct qmi_elem_info wlfw_host_cap_8bit_req_msg_v01_ei[] = { |
2618 |
++ { |
2619 |
++ .data_type = QMI_OPT_FLAG, |
2620 |
++ .elem_len = 1, |
2621 |
++ .elem_size = sizeof(u8), |
2622 |
++ .array_type = NO_ARRAY, |
2623 |
++ .tlv_type = 0x10, |
2624 |
++ .offset = offsetof(struct wlfw_host_cap_req_msg_v01, |
2625 |
++ daemon_support_valid), |
2626 |
++ }, |
2627 |
++ { |
2628 |
++ .data_type = QMI_UNSIGNED_1_BYTE, |
2629 |
++ .elem_len = 1, |
2630 |
++ .elem_size = sizeof(u8), |
2631 |
++ .array_type = NO_ARRAY, |
2632 |
++ .tlv_type = 0x10, |
2633 |
++ .offset = offsetof(struct wlfw_host_cap_req_msg_v01, |
2634 |
++ daemon_support), |
2635 |
++ }, |
2636 |
++ {} |
2637 |
++}; |
2638 |
++ |
2639 |
+ struct qmi_elem_info wlfw_host_cap_resp_msg_v01_ei[] = { |
2640 |
+ { |
2641 |
+ .data_type = QMI_STRUCT, |
2642 |
+diff --git a/drivers/net/wireless/ath/ath10k/qmi_wlfw_v01.h b/drivers/net/wireless/ath/ath10k/qmi_wlfw_v01.h |
2643 |
+index bca1186e1560..4d107e1364a8 100644 |
2644 |
+--- a/drivers/net/wireless/ath/ath10k/qmi_wlfw_v01.h |
2645 |
++++ b/drivers/net/wireless/ath/ath10k/qmi_wlfw_v01.h |
2646 |
+@@ -575,6 +575,7 @@ struct wlfw_host_cap_req_msg_v01 { |
2647 |
+ |
2648 |
+ #define WLFW_HOST_CAP_REQ_MSG_V01_MAX_MSG_LEN 189 |
2649 |
+ extern struct qmi_elem_info wlfw_host_cap_req_msg_v01_ei[]; |
2650 |
++extern struct qmi_elem_info wlfw_host_cap_8bit_req_msg_v01_ei[]; |
2651 |
+ |
2652 |
+ struct wlfw_host_cap_resp_msg_v01 { |
2653 |
+ struct qmi_response_type_v01 resp; |
2654 |
+diff --git a/drivers/net/wireless/ath/ath10k/snoc.c b/drivers/net/wireless/ath/ath10k/snoc.c |
2655 |
+index b491361e6ed4..fc15a0037f0e 100644 |
2656 |
+--- a/drivers/net/wireless/ath/ath10k/snoc.c |
2657 |
++++ b/drivers/net/wireless/ath/ath10k/snoc.c |
2658 |
+@@ -1261,6 +1261,15 @@ out: |
2659 |
+ return ret; |
2660 |
+ } |
2661 |
+ |
2662 |
++static void ath10k_snoc_quirks_init(struct ath10k *ar) |
2663 |
++{ |
2664 |
++ struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar); |
2665 |
++ struct device *dev = &ar_snoc->dev->dev; |
2666 |
++ |
2667 |
++ if (of_property_read_bool(dev->of_node, "qcom,snoc-host-cap-8bit-quirk")) |
2668 |
++ set_bit(ATH10K_SNOC_FLAG_8BIT_HOST_CAP_QUIRK, &ar_snoc->flags); |
2669 |
++} |
2670 |
++ |
2671 |
+ int ath10k_snoc_fw_indication(struct ath10k *ar, u64 type) |
2672 |
+ { |
2673 |
+ struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar); |
2674 |
+@@ -1678,6 +1687,8 @@ static int ath10k_snoc_probe(struct platform_device *pdev) |
2675 |
+ ar->ce_priv = &ar_snoc->ce; |
2676 |
+ msa_size = drv_data->msa_size; |
2677 |
+ |
2678 |
++ ath10k_snoc_quirks_init(ar); |
2679 |
++ |
2680 |
+ ret = ath10k_snoc_resource_init(ar); |
2681 |
+ if (ret) { |
2682 |
+ ath10k_warn(ar, "failed to initialize resource: %d\n", ret); |
2683 |
+diff --git a/drivers/net/wireless/ath/ath10k/snoc.h b/drivers/net/wireless/ath/ath10k/snoc.h |
2684 |
+index d62f53501fbb..9db823e46314 100644 |
2685 |
+--- a/drivers/net/wireless/ath/ath10k/snoc.h |
2686 |
++++ b/drivers/net/wireless/ath/ath10k/snoc.h |
2687 |
+@@ -63,6 +63,7 @@ enum ath10k_snoc_flags { |
2688 |
+ ATH10K_SNOC_FLAG_REGISTERED, |
2689 |
+ ATH10K_SNOC_FLAG_UNREGISTERING, |
2690 |
+ ATH10K_SNOC_FLAG_RECOVERY, |
2691 |
++ ATH10K_SNOC_FLAG_8BIT_HOST_CAP_QUIRK, |
2692 |
+ }; |
2693 |
+ |
2694 |
+ struct ath10k_snoc { |
2695 |
+diff --git a/drivers/net/wireless/ath/ath10k/usb.c b/drivers/net/wireless/ath/ath10k/usb.c |
2696 |
+index e1420f67f776..9ebe74ee4aef 100644 |
2697 |
+--- a/drivers/net/wireless/ath/ath10k/usb.c |
2698 |
++++ b/drivers/net/wireless/ath/ath10k/usb.c |
2699 |
+@@ -38,6 +38,10 @@ ath10k_usb_alloc_urb_from_pipe(struct ath10k_usb_pipe *pipe) |
2700 |
+ struct ath10k_urb_context *urb_context = NULL; |
2701 |
+ unsigned long flags; |
2702 |
+ |
2703 |
++ /* bail if this pipe is not initialized */ |
2704 |
++ if (!pipe->ar_usb) |
2705 |
++ return NULL; |
2706 |
++ |
2707 |
+ spin_lock_irqsave(&pipe->ar_usb->cs_lock, flags); |
2708 |
+ if (!list_empty(&pipe->urb_list_head)) { |
2709 |
+ urb_context = list_first_entry(&pipe->urb_list_head, |
2710 |
+@@ -55,6 +59,10 @@ static void ath10k_usb_free_urb_to_pipe(struct ath10k_usb_pipe *pipe, |
2711 |
+ { |
2712 |
+ unsigned long flags; |
2713 |
+ |
2714 |
++ /* bail if this pipe is not initialized */ |
2715 |
++ if (!pipe->ar_usb) |
2716 |
++ return; |
2717 |
++ |
2718 |
+ spin_lock_irqsave(&pipe->ar_usb->cs_lock, flags); |
2719 |
+ |
2720 |
+ pipe->urb_cnt++; |
2721 |
+diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c |
2722 |
+index 2b29bf4730f6..b4885a700296 100644 |
2723 |
+--- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c |
2724 |
++++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c |
2725 |
+@@ -4183,7 +4183,7 @@ static void ar9003_hw_thermometer_apply(struct ath_hw *ah) |
2726 |
+ |
2727 |
+ static void ar9003_hw_thermo_cal_apply(struct ath_hw *ah) |
2728 |
+ { |
2729 |
+- u32 data, ko, kg; |
2730 |
++ u32 data = 0, ko, kg; |
2731 |
+ |
2732 |
+ if (!AR_SREV_9462_20_OR_LATER(ah)) |
2733 |
+ return; |
2734 |
+diff --git a/drivers/nfc/port100.c b/drivers/nfc/port100.c |
2735 |
+index 145ddf3f0a45..604dba4f18af 100644 |
2736 |
+--- a/drivers/nfc/port100.c |
2737 |
++++ b/drivers/nfc/port100.c |
2738 |
+@@ -783,7 +783,7 @@ static int port100_send_frame_async(struct port100 *dev, struct sk_buff *out, |
2739 |
+ |
2740 |
+ rc = port100_submit_urb_for_ack(dev, GFP_KERNEL); |
2741 |
+ if (rc) |
2742 |
+- usb_unlink_urb(dev->out_urb); |
2743 |
++ usb_kill_urb(dev->out_urb); |
2744 |
+ |
2745 |
+ exit: |
2746 |
+ mutex_unlock(&dev->out_urb_lock); |
2747 |
+diff --git a/drivers/staging/comedi/drivers/usbduxfast.c b/drivers/staging/comedi/drivers/usbduxfast.c |
2748 |
+index 04bc488385e6..4af012968cb6 100644 |
2749 |
+--- a/drivers/staging/comedi/drivers/usbduxfast.c |
2750 |
++++ b/drivers/staging/comedi/drivers/usbduxfast.c |
2751 |
+@@ -1,6 +1,6 @@ |
2752 |
+ // SPDX-License-Identifier: GPL-2.0+ |
2753 |
+ /* |
2754 |
+- * Copyright (C) 2004-2014 Bernd Porr, mail@××××××××××××.uk |
2755 |
++ * Copyright (C) 2004-2019 Bernd Porr, mail@××××××××××××.uk |
2756 |
+ */ |
2757 |
+ |
2758 |
+ /* |
2759 |
+@@ -8,7 +8,7 @@ |
2760 |
+ * Description: University of Stirling USB DAQ & INCITE Technology Limited |
2761 |
+ * Devices: [ITL] USB-DUX-FAST (usbduxfast) |
2762 |
+ * Author: Bernd Porr <mail@××××××××××××.uk> |
2763 |
+- * Updated: 10 Oct 2014 |
2764 |
++ * Updated: 16 Nov 2019 |
2765 |
+ * Status: stable |
2766 |
+ */ |
2767 |
+ |
2768 |
+@@ -22,6 +22,7 @@ |
2769 |
+ * |
2770 |
+ * |
2771 |
+ * Revision history: |
2772 |
++ * 1.0: Fixed a rounding error in usbduxfast_ai_cmdtest |
2773 |
+ * 0.9: Dropping the first data packet which seems to be from the last transfer. |
2774 |
+ * Buffer overflows in the FX2 are handed over to comedi. |
2775 |
+ * 0.92: Dropping now 4 packets. The quad buffer has to be emptied. |
2776 |
+@@ -350,6 +351,7 @@ static int usbduxfast_ai_cmdtest(struct comedi_device *dev, |
2777 |
+ struct comedi_cmd *cmd) |
2778 |
+ { |
2779 |
+ int err = 0; |
2780 |
++ int err2 = 0; |
2781 |
+ unsigned int steps; |
2782 |
+ unsigned int arg; |
2783 |
+ |
2784 |
+@@ -399,11 +401,16 @@ static int usbduxfast_ai_cmdtest(struct comedi_device *dev, |
2785 |
+ */ |
2786 |
+ steps = (cmd->convert_arg * 30) / 1000; |
2787 |
+ if (cmd->chanlist_len != 1) |
2788 |
+- err |= comedi_check_trigger_arg_min(&steps, |
2789 |
+- MIN_SAMPLING_PERIOD); |
2790 |
+- err |= comedi_check_trigger_arg_max(&steps, MAX_SAMPLING_PERIOD); |
2791 |
+- arg = (steps * 1000) / 30; |
2792 |
+- err |= comedi_check_trigger_arg_is(&cmd->convert_arg, arg); |
2793 |
++ err2 |= comedi_check_trigger_arg_min(&steps, |
2794 |
++ MIN_SAMPLING_PERIOD); |
2795 |
++ else |
2796 |
++ err2 |= comedi_check_trigger_arg_min(&steps, 1); |
2797 |
++ err2 |= comedi_check_trigger_arg_max(&steps, MAX_SAMPLING_PERIOD); |
2798 |
++ if (err2) { |
2799 |
++ err |= err2; |
2800 |
++ arg = (steps * 1000) / 30; |
2801 |
++ err |= comedi_check_trigger_arg_is(&cmd->convert_arg, arg); |
2802 |
++ } |
2803 |
+ |
2804 |
+ if (cmd->stop_src == TRIG_COUNT) |
2805 |
+ err |= comedi_check_trigger_arg_min(&cmd->stop_arg, 1); |
2806 |
+diff --git a/drivers/usb/misc/appledisplay.c b/drivers/usb/misc/appledisplay.c |
2807 |
+index ac92725458b5..ba1eaabc7796 100644 |
2808 |
+--- a/drivers/usb/misc/appledisplay.c |
2809 |
++++ b/drivers/usb/misc/appledisplay.c |
2810 |
+@@ -164,7 +164,12 @@ static int appledisplay_bl_get_brightness(struct backlight_device *bd) |
2811 |
+ 0, |
2812 |
+ pdata->msgdata, 2, |
2813 |
+ ACD_USB_TIMEOUT); |
2814 |
+- brightness = pdata->msgdata[1]; |
2815 |
++ if (retval < 2) { |
2816 |
++ if (retval >= 0) |
2817 |
++ retval = -EMSGSIZE; |
2818 |
++ } else { |
2819 |
++ brightness = pdata->msgdata[1]; |
2820 |
++ } |
2821 |
+ mutex_unlock(&pdata->sysfslock); |
2822 |
+ |
2823 |
+ if (retval < 0) |
2824 |
+@@ -299,6 +304,7 @@ error: |
2825 |
+ if (pdata) { |
2826 |
+ if (pdata->urb) { |
2827 |
+ usb_kill_urb(pdata->urb); |
2828 |
++ cancel_delayed_work_sync(&pdata->work); |
2829 |
+ if (pdata->urbdata) |
2830 |
+ usb_free_coherent(pdata->udev, ACD_URB_BUFFER_LEN, |
2831 |
+ pdata->urbdata, pdata->urb->transfer_dma); |
2832 |
+diff --git a/drivers/usb/misc/chaoskey.c b/drivers/usb/misc/chaoskey.c |
2833 |
+index 34e6cd6f40d3..87067c3d6109 100644 |
2834 |
+--- a/drivers/usb/misc/chaoskey.c |
2835 |
++++ b/drivers/usb/misc/chaoskey.c |
2836 |
+@@ -384,13 +384,17 @@ static int _chaoskey_fill(struct chaoskey *dev) |
2837 |
+ !dev->reading, |
2838 |
+ (started ? NAK_TIMEOUT : ALEA_FIRST_TIMEOUT) ); |
2839 |
+ |
2840 |
+- if (result < 0) |
2841 |
++ if (result < 0) { |
2842 |
++ usb_kill_urb(dev->urb); |
2843 |
+ goto out; |
2844 |
++ } |
2845 |
+ |
2846 |
+- if (result == 0) |
2847 |
++ if (result == 0) { |
2848 |
+ result = -ETIMEDOUT; |
2849 |
+- else |
2850 |
++ usb_kill_urb(dev->urb); |
2851 |
++ } else { |
2852 |
+ result = dev->valid; |
2853 |
++ } |
2854 |
+ out: |
2855 |
+ /* Let the device go back to sleep eventually */ |
2856 |
+ usb_autopm_put_interface(dev->interface); |
2857 |
+@@ -526,7 +530,21 @@ static int chaoskey_suspend(struct usb_interface *interface, |
2858 |
+ |
2859 |
+ static int chaoskey_resume(struct usb_interface *interface) |
2860 |
+ { |
2861 |
++ struct chaoskey *dev; |
2862 |
++ struct usb_device *udev = interface_to_usbdev(interface); |
2863 |
++ |
2864 |
+ usb_dbg(interface, "resume"); |
2865 |
++ dev = usb_get_intfdata(interface); |
2866 |
++ |
2867 |
++ /* |
2868 |
++ * We may have lost power. |
2869 |
++ * In that case the device that needs a long time |
2870 |
++ * for the first requests needs an extended timeout |
2871 |
++ * again |
2872 |
++ */ |
2873 |
++ if (le16_to_cpu(udev->descriptor.idVendor) == ALEA_VENDOR_ID) |
2874 |
++ dev->reads_started = false; |
2875 |
++ |
2876 |
+ return 0; |
2877 |
+ } |
2878 |
+ #else |
2879 |
+diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c |
2880 |
+index 979bef9bfb6b..f5143eedbc48 100644 |
2881 |
+--- a/drivers/usb/serial/cp210x.c |
2882 |
++++ b/drivers/usb/serial/cp210x.c |
2883 |
+@@ -125,6 +125,7 @@ static const struct usb_device_id id_table[] = { |
2884 |
+ { USB_DEVICE(0x10C4, 0x8341) }, /* Siemens MC35PU GPRS Modem */ |
2885 |
+ { USB_DEVICE(0x10C4, 0x8382) }, /* Cygnal Integrated Products, Inc. */ |
2886 |
+ { USB_DEVICE(0x10C4, 0x83A8) }, /* Amber Wireless AMB2560 */ |
2887 |
++ { USB_DEVICE(0x10C4, 0x83AA) }, /* Mark-10 Digital Force Gauge */ |
2888 |
+ { USB_DEVICE(0x10C4, 0x83D8) }, /* DekTec DTA Plus VHF/UHF Booster/Attenuator */ |
2889 |
+ { USB_DEVICE(0x10C4, 0x8411) }, /* Kyocera GPS Module */ |
2890 |
+ { USB_DEVICE(0x10C4, 0x8418) }, /* IRZ Automation Teleport SG-10 GSM/GPRS Modem */ |
2891 |
+diff --git a/drivers/usb/serial/mos7720.c b/drivers/usb/serial/mos7720.c |
2892 |
+index 18110225d506..2ec4eeacebc7 100644 |
2893 |
+--- a/drivers/usb/serial/mos7720.c |
2894 |
++++ b/drivers/usb/serial/mos7720.c |
2895 |
+@@ -1833,10 +1833,6 @@ static int mos7720_startup(struct usb_serial *serial) |
2896 |
+ product = le16_to_cpu(serial->dev->descriptor.idProduct); |
2897 |
+ dev = serial->dev; |
2898 |
+ |
2899 |
+- /* setting configuration feature to one */ |
2900 |
+- usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0), |
2901 |
+- (__u8)0x03, 0x00, 0x01, 0x00, NULL, 0x00, 5000); |
2902 |
+- |
2903 |
+ if (product == MOSCHIP_DEVICE_ID_7715) { |
2904 |
+ struct urb *urb = serial->port[0]->interrupt_in_urb; |
2905 |
+ |
2906 |
+diff --git a/drivers/usb/serial/mos7840.c b/drivers/usb/serial/mos7840.c |
2907 |
+index a698d46ba773..ab4bf8d6d7df 100644 |
2908 |
+--- a/drivers/usb/serial/mos7840.c |
2909 |
++++ b/drivers/usb/serial/mos7840.c |
2910 |
+@@ -119,11 +119,15 @@ |
2911 |
+ /* This driver also supports |
2912 |
+ * ATEN UC2324 device using Moschip MCS7840 |
2913 |
+ * ATEN UC2322 device using Moschip MCS7820 |
2914 |
++ * MOXA UPort 2210 device using Moschip MCS7820 |
2915 |
+ */ |
2916 |
+ #define USB_VENDOR_ID_ATENINTL 0x0557 |
2917 |
+ #define ATENINTL_DEVICE_ID_UC2324 0x2011 |
2918 |
+ #define ATENINTL_DEVICE_ID_UC2322 0x7820 |
2919 |
+ |
2920 |
++#define USB_VENDOR_ID_MOXA 0x110a |
2921 |
++#define MOXA_DEVICE_ID_2210 0x2210 |
2922 |
++ |
2923 |
+ /* Interrupt Routine Defines */ |
2924 |
+ |
2925 |
+ #define SERIAL_IIR_RLS 0x06 |
2926 |
+@@ -195,6 +199,7 @@ static const struct usb_device_id id_table[] = { |
2927 |
+ {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL2_4)}, |
2928 |
+ {USB_DEVICE(USB_VENDOR_ID_ATENINTL, ATENINTL_DEVICE_ID_UC2324)}, |
2929 |
+ {USB_DEVICE(USB_VENDOR_ID_ATENINTL, ATENINTL_DEVICE_ID_UC2322)}, |
2930 |
++ {USB_DEVICE(USB_VENDOR_ID_MOXA, MOXA_DEVICE_ID_2210)}, |
2931 |
+ {} /* terminating entry */ |
2932 |
+ }; |
2933 |
+ MODULE_DEVICE_TABLE(usb, id_table); |
2934 |
+@@ -2020,6 +2025,7 @@ static int mos7840_probe(struct usb_serial *serial, |
2935 |
+ const struct usb_device_id *id) |
2936 |
+ { |
2937 |
+ u16 product = le16_to_cpu(serial->dev->descriptor.idProduct); |
2938 |
++ u16 vid = le16_to_cpu(serial->dev->descriptor.idVendor); |
2939 |
+ u8 *buf; |
2940 |
+ int device_type; |
2941 |
+ |
2942 |
+@@ -2030,6 +2036,11 @@ static int mos7840_probe(struct usb_serial *serial, |
2943 |
+ goto out; |
2944 |
+ } |
2945 |
+ |
2946 |
++ if (vid == USB_VENDOR_ID_MOXA && product == MOXA_DEVICE_ID_2210) { |
2947 |
++ device_type = MOSCHIP_DEVICE_ID_7820; |
2948 |
++ goto out; |
2949 |
++ } |
2950 |
++ |
2951 |
+ buf = kzalloc(VENDOR_READ_LENGTH, GFP_KERNEL); |
2952 |
+ if (!buf) |
2953 |
+ return -ENOMEM; |
2954 |
+@@ -2279,11 +2290,6 @@ out: |
2955 |
+ goto error; |
2956 |
+ } else |
2957 |
+ dev_dbg(&port->dev, "ZLP_REG5 Writing success status%d\n", status); |
2958 |
+- |
2959 |
+- /* setting configuration feature to one */ |
2960 |
+- usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0), |
2961 |
+- 0x03, 0x00, 0x01, 0x00, NULL, 0x00, |
2962 |
+- MOS_WDR_TIMEOUT); |
2963 |
+ } |
2964 |
+ return 0; |
2965 |
+ error: |
2966 |
+diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c |
2967 |
+index 06ab016be0b6..e9491d400a24 100644 |
2968 |
+--- a/drivers/usb/serial/option.c |
2969 |
++++ b/drivers/usb/serial/option.c |
2970 |
+@@ -197,6 +197,7 @@ static void option_instat_callback(struct urb *urb); |
2971 |
+ #define DELL_PRODUCT_5804_MINICARD_ATT 0x819b /* Novatel E371 */ |
2972 |
+ |
2973 |
+ #define DELL_PRODUCT_5821E 0x81d7 |
2974 |
++#define DELL_PRODUCT_5821E_ESIM 0x81e0 |
2975 |
+ |
2976 |
+ #define KYOCERA_VENDOR_ID 0x0c88 |
2977 |
+ #define KYOCERA_PRODUCT_KPC650 0x17da |
2978 |
+@@ -1044,6 +1045,8 @@ static const struct usb_device_id option_ids[] = { |
2979 |
+ { USB_DEVICE_AND_INTERFACE_INFO(DELL_VENDOR_ID, DELL_PRODUCT_5804_MINICARD_ATT, 0xff, 0xff, 0xff) }, |
2980 |
+ { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5821E), |
2981 |
+ .driver_info = RSVD(0) | RSVD(1) | RSVD(6) }, |
2982 |
++ { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5821E_ESIM), |
2983 |
++ .driver_info = RSVD(0) | RSVD(1) | RSVD(6) }, |
2984 |
+ { USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_E100A) }, /* ADU-E100, ADU-310 */ |
2985 |
+ { USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_500A) }, |
2986 |
+ { USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_620UW) }, |
2987 |
+@@ -1990,6 +1993,10 @@ static const struct usb_device_id option_ids[] = { |
2988 |
+ { USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0xa31d, 0xff, 0x06, 0x13) }, |
2989 |
+ { USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0xa31d, 0xff, 0x06, 0x14) }, |
2990 |
+ { USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0xa31d, 0xff, 0x06, 0x1b) }, |
2991 |
++ { USB_DEVICE(0x0489, 0xe0b4), /* Foxconn T77W968 */ |
2992 |
++ .driver_info = RSVD(0) | RSVD(1) | RSVD(6) }, |
2993 |
++ { USB_DEVICE(0x0489, 0xe0b5), /* Foxconn T77W968 ESIM */ |
2994 |
++ .driver_info = RSVD(0) | RSVD(1) | RSVD(6) }, |
2995 |
+ { USB_DEVICE(0x1508, 0x1001), /* Fibocom NL668 */ |
2996 |
+ .driver_info = RSVD(4) | RSVD(5) | RSVD(6) }, |
2997 |
+ { USB_DEVICE(0x2cb7, 0x0104), /* Fibocom NL678 series */ |
2998 |
+diff --git a/drivers/usb/usbip/Kconfig b/drivers/usb/usbip/Kconfig |
2999 |
+index 2f86b28fa3da..7bbae7a08642 100644 |
3000 |
+--- a/drivers/usb/usbip/Kconfig |
3001 |
++++ b/drivers/usb/usbip/Kconfig |
3002 |
+@@ -4,6 +4,7 @@ config USBIP_CORE |
3003 |
+ tristate "USB/IP support" |
3004 |
+ depends on NET |
3005 |
+ select USB_COMMON |
3006 |
++ select SGL_ALLOC |
3007 |
+ ---help--- |
3008 |
+ This enables pushing USB packets over IP to allow remote |
3009 |
+ machines direct access to USB devices. It provides the |
3010 |
+diff --git a/drivers/usb/usbip/stub_rx.c b/drivers/usb/usbip/stub_rx.c |
3011 |
+index 66edfeea68fe..e2b019532234 100644 |
3012 |
+--- a/drivers/usb/usbip/stub_rx.c |
3013 |
++++ b/drivers/usb/usbip/stub_rx.c |
3014 |
+@@ -470,18 +470,50 @@ static void stub_recv_cmd_submit(struct stub_device *sdev, |
3015 |
+ if (pipe == -1) |
3016 |
+ return; |
3017 |
+ |
3018 |
++ /* |
3019 |
++ * Smatch reported the error case where use_sg is true and buf_len is 0. |
3020 |
++ * In this case, It adds SDEV_EVENT_ERROR_MALLOC and stub_priv will be |
3021 |
++ * released by stub event handler and connection will be shut down. |
3022 |
++ */ |
3023 |
+ priv = stub_priv_alloc(sdev, pdu); |
3024 |
+ if (!priv) |
3025 |
+ return; |
3026 |
+ |
3027 |
+ buf_len = (unsigned long long)pdu->u.cmd_submit.transfer_buffer_length; |
3028 |
+ |
3029 |
++ if (use_sg && !buf_len) { |
3030 |
++ dev_err(&udev->dev, "sg buffer with zero length\n"); |
3031 |
++ goto err_malloc; |
3032 |
++ } |
3033 |
++ |
3034 |
+ /* allocate urb transfer buffer, if needed */ |
3035 |
+ if (buf_len) { |
3036 |
+ if (use_sg) { |
3037 |
+ sgl = sgl_alloc(buf_len, GFP_KERNEL, &nents); |
3038 |
+ if (!sgl) |
3039 |
+ goto err_malloc; |
3040 |
++ |
3041 |
++ /* Check if the server's HCD supports SG */ |
3042 |
++ if (!udev->bus->sg_tablesize) { |
3043 |
++ /* |
3044 |
++ * If the server's HCD doesn't support SG, break |
3045 |
++ * a single SG request into several URBs and map |
3046 |
++ * each SG list entry to corresponding URB |
3047 |
++ * buffer. The previously allocated SG list is |
3048 |
++ * stored in priv->sgl (If the server's HCD |
3049 |
++ * support SG, SG list is stored only in |
3050 |
++ * urb->sg) and it is used as an indicator that |
3051 |
++ * the server split single SG request into |
3052 |
++ * several URBs. Later, priv->sgl is used by |
3053 |
++ * stub_complete() and stub_send_ret_submit() to |
3054 |
++ * reassemble the divied URBs. |
3055 |
++ */ |
3056 |
++ support_sg = 0; |
3057 |
++ num_urbs = nents; |
3058 |
++ priv->completed_urbs = 0; |
3059 |
++ pdu->u.cmd_submit.transfer_flags &= |
3060 |
++ ~URB_DMA_MAP_SG; |
3061 |
++ } |
3062 |
+ } else { |
3063 |
+ buffer = kzalloc(buf_len, GFP_KERNEL); |
3064 |
+ if (!buffer) |
3065 |
+@@ -489,24 +521,6 @@ static void stub_recv_cmd_submit(struct stub_device *sdev, |
3066 |
+ } |
3067 |
+ } |
3068 |
+ |
3069 |
+- /* Check if the server's HCD supports SG */ |
3070 |
+- if (use_sg && !udev->bus->sg_tablesize) { |
3071 |
+- /* |
3072 |
+- * If the server's HCD doesn't support SG, break a single SG |
3073 |
+- * request into several URBs and map each SG list entry to |
3074 |
+- * corresponding URB buffer. The previously allocated SG |
3075 |
+- * list is stored in priv->sgl (If the server's HCD support SG, |
3076 |
+- * SG list is stored only in urb->sg) and it is used as an |
3077 |
+- * indicator that the server split single SG request into |
3078 |
+- * several URBs. Later, priv->sgl is used by stub_complete() and |
3079 |
+- * stub_send_ret_submit() to reassemble the divied URBs. |
3080 |
+- */ |
3081 |
+- support_sg = 0; |
3082 |
+- num_urbs = nents; |
3083 |
+- priv->completed_urbs = 0; |
3084 |
+- pdu->u.cmd_submit.transfer_flags &= ~URB_DMA_MAP_SG; |
3085 |
+- } |
3086 |
+- |
3087 |
+ /* allocate urb array */ |
3088 |
+ priv->num_urbs = num_urbs; |
3089 |
+ priv->urbs = kmalloc_array(num_urbs, sizeof(*priv->urbs), GFP_KERNEL); |
3090 |
+diff --git a/drivers/vhost/vsock.c b/drivers/vhost/vsock.c |
3091 |
+index 6a50e1d0529c..d91fe6dd172c 100644 |
3092 |
+--- a/drivers/vhost/vsock.c |
3093 |
++++ b/drivers/vhost/vsock.c |
3094 |
+@@ -102,7 +102,7 @@ vhost_transport_do_send_pkt(struct vhost_vsock *vsock, |
3095 |
+ struct iov_iter iov_iter; |
3096 |
+ unsigned out, in; |
3097 |
+ size_t nbytes; |
3098 |
+- size_t len; |
3099 |
++ size_t iov_len, payload_len; |
3100 |
+ int head; |
3101 |
+ |
3102 |
+ spin_lock_bh(&vsock->send_pkt_list_lock); |
3103 |
+@@ -147,8 +147,24 @@ vhost_transport_do_send_pkt(struct vhost_vsock *vsock, |
3104 |
+ break; |
3105 |
+ } |
3106 |
+ |
3107 |
+- len = iov_length(&vq->iov[out], in); |
3108 |
+- iov_iter_init(&iov_iter, READ, &vq->iov[out], in, len); |
3109 |
++ iov_len = iov_length(&vq->iov[out], in); |
3110 |
++ if (iov_len < sizeof(pkt->hdr)) { |
3111 |
++ virtio_transport_free_pkt(pkt); |
3112 |
++ vq_err(vq, "Buffer len [%zu] too small\n", iov_len); |
3113 |
++ break; |
3114 |
++ } |
3115 |
++ |
3116 |
++ iov_iter_init(&iov_iter, READ, &vq->iov[out], in, iov_len); |
3117 |
++ payload_len = pkt->len - pkt->off; |
3118 |
++ |
3119 |
++ /* If the packet is greater than the space available in the |
3120 |
++ * buffer, we split it using multiple buffers. |
3121 |
++ */ |
3122 |
++ if (payload_len > iov_len - sizeof(pkt->hdr)) |
3123 |
++ payload_len = iov_len - sizeof(pkt->hdr); |
3124 |
++ |
3125 |
++ /* Set the correct length in the header */ |
3126 |
++ pkt->hdr.len = cpu_to_le32(payload_len); |
3127 |
+ |
3128 |
+ nbytes = copy_to_iter(&pkt->hdr, sizeof(pkt->hdr), &iov_iter); |
3129 |
+ if (nbytes != sizeof(pkt->hdr)) { |
3130 |
+@@ -157,33 +173,47 @@ vhost_transport_do_send_pkt(struct vhost_vsock *vsock, |
3131 |
+ break; |
3132 |
+ } |
3133 |
+ |
3134 |
+- nbytes = copy_to_iter(pkt->buf, pkt->len, &iov_iter); |
3135 |
+- if (nbytes != pkt->len) { |
3136 |
++ nbytes = copy_to_iter(pkt->buf + pkt->off, payload_len, |
3137 |
++ &iov_iter); |
3138 |
++ if (nbytes != payload_len) { |
3139 |
+ virtio_transport_free_pkt(pkt); |
3140 |
+ vq_err(vq, "Faulted on copying pkt buf\n"); |
3141 |
+ break; |
3142 |
+ } |
3143 |
+ |
3144 |
+- vhost_add_used(vq, head, sizeof(pkt->hdr) + pkt->len); |
3145 |
++ vhost_add_used(vq, head, sizeof(pkt->hdr) + payload_len); |
3146 |
+ added = true; |
3147 |
+ |
3148 |
+- if (pkt->reply) { |
3149 |
+- int val; |
3150 |
+- |
3151 |
+- val = atomic_dec_return(&vsock->queued_replies); |
3152 |
+- |
3153 |
+- /* Do we have resources to resume tx processing? */ |
3154 |
+- if (val + 1 == tx_vq->num) |
3155 |
+- restart_tx = true; |
3156 |
+- } |
3157 |
+- |
3158 |
+ /* Deliver to monitoring devices all correctly transmitted |
3159 |
+ * packets. |
3160 |
+ */ |
3161 |
+ virtio_transport_deliver_tap_pkt(pkt); |
3162 |
+ |
3163 |
+- total_len += pkt->len; |
3164 |
+- virtio_transport_free_pkt(pkt); |
3165 |
++ pkt->off += payload_len; |
3166 |
++ total_len += payload_len; |
3167 |
++ |
3168 |
++ /* If we didn't send all the payload we can requeue the packet |
3169 |
++ * to send it with the next available buffer. |
3170 |
++ */ |
3171 |
++ if (pkt->off < pkt->len) { |
3172 |
++ spin_lock_bh(&vsock->send_pkt_list_lock); |
3173 |
++ list_add(&pkt->list, &vsock->send_pkt_list); |
3174 |
++ spin_unlock_bh(&vsock->send_pkt_list_lock); |
3175 |
++ } else { |
3176 |
++ if (pkt->reply) { |
3177 |
++ int val; |
3178 |
++ |
3179 |
++ val = atomic_dec_return(&vsock->queued_replies); |
3180 |
++ |
3181 |
++ /* Do we have resources to resume tx |
3182 |
++ * processing? |
3183 |
++ */ |
3184 |
++ if (val + 1 == tx_vq->num) |
3185 |
++ restart_tx = true; |
3186 |
++ } |
3187 |
++ |
3188 |
++ virtio_transport_free_pkt(pkt); |
3189 |
++ } |
3190 |
+ } while(likely(!vhost_exceeds_weight(vq, ++pkts, total_len))); |
3191 |
+ if (added) |
3192 |
+ vhost_signal(&vsock->dev, vq); |
3193 |
+diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c |
3194 |
+index 226fbb995fb0..b9f8355947d5 100644 |
3195 |
+--- a/drivers/virtio/virtio_balloon.c |
3196 |
++++ b/drivers/virtio/virtio_balloon.c |
3197 |
+@@ -820,7 +820,7 @@ static unsigned long virtio_balloon_shrinker_count(struct shrinker *shrinker, |
3198 |
+ unsigned long count; |
3199 |
+ |
3200 |
+ count = vb->num_pages / VIRTIO_BALLOON_PAGES_PER_PAGE; |
3201 |
+- count += vb->num_free_page_blocks >> VIRTIO_BALLOON_FREE_PAGE_ORDER; |
3202 |
++ count += vb->num_free_page_blocks << VIRTIO_BALLOON_FREE_PAGE_ORDER; |
3203 |
+ |
3204 |
+ return count; |
3205 |
+ } |
3206 |
+diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c |
3207 |
+index a8041e451e9e..867c7ebd3f10 100644 |
3208 |
+--- a/drivers/virtio/virtio_ring.c |
3209 |
++++ b/drivers/virtio/virtio_ring.c |
3210 |
+@@ -583,7 +583,7 @@ unmap_release: |
3211 |
+ kfree(desc); |
3212 |
+ |
3213 |
+ END_USE(vq); |
3214 |
+- return -EIO; |
3215 |
++ return -ENOMEM; |
3216 |
+ } |
3217 |
+ |
3218 |
+ static bool virtqueue_kick_prepare_split(struct virtqueue *_vq) |
3219 |
+@@ -1085,7 +1085,7 @@ unmap_release: |
3220 |
+ kfree(desc); |
3221 |
+ |
3222 |
+ END_USE(vq); |
3223 |
+- return -EIO; |
3224 |
++ return -ENOMEM; |
3225 |
+ } |
3226 |
+ |
3227 |
+ static inline int virtqueue_add_packed(struct virtqueue *_vq, |
3228 |
+diff --git a/fs/ocfs2/xattr.c b/fs/ocfs2/xattr.c |
3229 |
+index d8507972ee13..90c830e3758e 100644 |
3230 |
+--- a/fs/ocfs2/xattr.c |
3231 |
++++ b/fs/ocfs2/xattr.c |
3232 |
+@@ -1490,6 +1490,18 @@ static int ocfs2_xa_check_space(struct ocfs2_xa_loc *loc, |
3233 |
+ return loc->xl_ops->xlo_check_space(loc, xi); |
3234 |
+ } |
3235 |
+ |
3236 |
++static void ocfs2_xa_add_entry(struct ocfs2_xa_loc *loc, u32 name_hash) |
3237 |
++{ |
3238 |
++ loc->xl_ops->xlo_add_entry(loc, name_hash); |
3239 |
++ loc->xl_entry->xe_name_hash = cpu_to_le32(name_hash); |
3240 |
++ /* |
3241 |
++ * We can't leave the new entry's xe_name_offset at zero or |
3242 |
++ * add_namevalue() will go nuts. We set it to the size of our |
3243 |
++ * storage so that it can never be less than any other entry. |
3244 |
++ */ |
3245 |
++ loc->xl_entry->xe_name_offset = cpu_to_le16(loc->xl_size); |
3246 |
++} |
3247 |
++ |
3248 |
+ static void ocfs2_xa_add_namevalue(struct ocfs2_xa_loc *loc, |
3249 |
+ struct ocfs2_xattr_info *xi) |
3250 |
+ { |
3251 |
+@@ -2121,31 +2133,29 @@ static int ocfs2_xa_prepare_entry(struct ocfs2_xa_loc *loc, |
3252 |
+ if (rc) |
3253 |
+ goto out; |
3254 |
+ |
3255 |
+- if (!loc->xl_entry) { |
3256 |
+- rc = -EINVAL; |
3257 |
+- goto out; |
3258 |
+- } |
3259 |
+- |
3260 |
+- if (ocfs2_xa_can_reuse_entry(loc, xi)) { |
3261 |
+- orig_value_size = loc->xl_entry->xe_value_size; |
3262 |
+- rc = ocfs2_xa_reuse_entry(loc, xi, ctxt); |
3263 |
+- if (rc) |
3264 |
+- goto out; |
3265 |
+- goto alloc_value; |
3266 |
+- } |
3267 |
++ if (loc->xl_entry) { |
3268 |
++ if (ocfs2_xa_can_reuse_entry(loc, xi)) { |
3269 |
++ orig_value_size = loc->xl_entry->xe_value_size; |
3270 |
++ rc = ocfs2_xa_reuse_entry(loc, xi, ctxt); |
3271 |
++ if (rc) |
3272 |
++ goto out; |
3273 |
++ goto alloc_value; |
3274 |
++ } |
3275 |
+ |
3276 |
+- if (!ocfs2_xattr_is_local(loc->xl_entry)) { |
3277 |
+- orig_clusters = ocfs2_xa_value_clusters(loc); |
3278 |
+- rc = ocfs2_xa_value_truncate(loc, 0, ctxt); |
3279 |
+- if (rc) { |
3280 |
+- mlog_errno(rc); |
3281 |
+- ocfs2_xa_cleanup_value_truncate(loc, |
3282 |
+- "overwriting", |
3283 |
+- orig_clusters); |
3284 |
+- goto out; |
3285 |
++ if (!ocfs2_xattr_is_local(loc->xl_entry)) { |
3286 |
++ orig_clusters = ocfs2_xa_value_clusters(loc); |
3287 |
++ rc = ocfs2_xa_value_truncate(loc, 0, ctxt); |
3288 |
++ if (rc) { |
3289 |
++ mlog_errno(rc); |
3290 |
++ ocfs2_xa_cleanup_value_truncate(loc, |
3291 |
++ "overwriting", |
3292 |
++ orig_clusters); |
3293 |
++ goto out; |
3294 |
++ } |
3295 |
+ } |
3296 |
+- } |
3297 |
+- ocfs2_xa_wipe_namevalue(loc); |
3298 |
++ ocfs2_xa_wipe_namevalue(loc); |
3299 |
++ } else |
3300 |
++ ocfs2_xa_add_entry(loc, name_hash); |
3301 |
+ |
3302 |
+ /* |
3303 |
+ * If we get here, we have a blank entry. Fill it. We grow our |
3304 |
+diff --git a/include/net/tls.h b/include/net/tls.h |
3305 |
+index bd1ef1a915e9..9bf04a74a6cb 100644 |
3306 |
+--- a/include/net/tls.h |
3307 |
++++ b/include/net/tls.h |
3308 |
+@@ -364,6 +364,8 @@ int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx); |
3309 |
+ void tls_sw_strparser_arm(struct sock *sk, struct tls_context *ctx); |
3310 |
+ void tls_sw_strparser_done(struct tls_context *tls_ctx); |
3311 |
+ int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size); |
3312 |
++int tls_sw_sendpage_locked(struct sock *sk, struct page *page, |
3313 |
++ int offset, size_t size, int flags); |
3314 |
+ int tls_sw_sendpage(struct sock *sk, struct page *page, |
3315 |
+ int offset, size_t size, int flags); |
3316 |
+ void tls_sw_cancel_work_tx(struct tls_context *tls_ctx); |
3317 |
+diff --git a/kernel/fork.c b/kernel/fork.c |
3318 |
+index 8bbd39585301..eafb81c99921 100644 |
3319 |
+--- a/kernel/fork.c |
3320 |
++++ b/kernel/fork.c |
3321 |
+@@ -1713,11 +1713,11 @@ static void pidfd_show_fdinfo(struct seq_file *m, struct file *f) |
3322 |
+ /* |
3323 |
+ * Poll support for process exit notification. |
3324 |
+ */ |
3325 |
+-static unsigned int pidfd_poll(struct file *file, struct poll_table_struct *pts) |
3326 |
++static __poll_t pidfd_poll(struct file *file, struct poll_table_struct *pts) |
3327 |
+ { |
3328 |
+ struct task_struct *task; |
3329 |
+ struct pid *pid = file->private_data; |
3330 |
+- int poll_flags = 0; |
3331 |
++ __poll_t poll_flags = 0; |
3332 |
+ |
3333 |
+ poll_wait(file, &pid->wait_pidfd, pts); |
3334 |
+ |
3335 |
+@@ -1729,7 +1729,7 @@ static unsigned int pidfd_poll(struct file *file, struct poll_table_struct *pts) |
3336 |
+ * group, then poll(2) should block, similar to the wait(2) family. |
3337 |
+ */ |
3338 |
+ if (!task || (task->exit_state && thread_group_empty(task))) |
3339 |
+- poll_flags = POLLIN | POLLRDNORM; |
3340 |
++ poll_flags = EPOLLIN | EPOLLRDNORM; |
3341 |
+ rcu_read_unlock(); |
3342 |
+ |
3343 |
+ return poll_flags; |
3344 |
+diff --git a/kernel/futex.c b/kernel/futex.c |
3345 |
+index 6d50728ef2e7..ff7035567f9f 100644 |
3346 |
+--- a/kernel/futex.c |
3347 |
++++ b/kernel/futex.c |
3348 |
+@@ -3454,11 +3454,16 @@ err_unlock: |
3349 |
+ return ret; |
3350 |
+ } |
3351 |
+ |
3352 |
++/* Constants for the pending_op argument of handle_futex_death */ |
3353 |
++#define HANDLE_DEATH_PENDING true |
3354 |
++#define HANDLE_DEATH_LIST false |
3355 |
++ |
3356 |
+ /* |
3357 |
+ * Process a futex-list entry, check whether it's owned by the |
3358 |
+ * dying task, and do notification if so: |
3359 |
+ */ |
3360 |
+-static int handle_futex_death(u32 __user *uaddr, struct task_struct *curr, int pi) |
3361 |
++static int handle_futex_death(u32 __user *uaddr, struct task_struct *curr, |
3362 |
++ bool pi, bool pending_op) |
3363 |
+ { |
3364 |
+ u32 uval, uninitialized_var(nval), mval; |
3365 |
+ int err; |
3366 |
+@@ -3471,6 +3476,42 @@ retry: |
3367 |
+ if (get_user(uval, uaddr)) |
3368 |
+ return -1; |
3369 |
+ |
3370 |
++ /* |
3371 |
++ * Special case for regular (non PI) futexes. The unlock path in |
3372 |
++ * user space has two race scenarios: |
3373 |
++ * |
3374 |
++ * 1. The unlock path releases the user space futex value and |
3375 |
++ * before it can execute the futex() syscall to wake up |
3376 |
++ * waiters it is killed. |
3377 |
++ * |
3378 |
++ * 2. A woken up waiter is killed before it can acquire the |
3379 |
++ * futex in user space. |
3380 |
++ * |
3381 |
++ * In both cases the TID validation below prevents a wakeup of |
3382 |
++ * potential waiters which can cause these waiters to block |
3383 |
++ * forever. |
3384 |
++ * |
3385 |
++ * In both cases the following conditions are met: |
3386 |
++ * |
3387 |
++ * 1) task->robust_list->list_op_pending != NULL |
3388 |
++ * @pending_op == true |
3389 |
++ * 2) User space futex value == 0 |
3390 |
++ * 3) Regular futex: @pi == false |
3391 |
++ * |
3392 |
++ * If these conditions are met, it is safe to attempt waking up a |
3393 |
++ * potential waiter without touching the user space futex value and |
3394 |
++ * trying to set the OWNER_DIED bit. The user space futex value is |
3395 |
++ * uncontended and the rest of the user space mutex state is |
3396 |
++ * consistent, so a woken waiter will just take over the |
3397 |
++ * uncontended futex. Setting the OWNER_DIED bit would create |
3398 |
++ * inconsistent state and malfunction of the user space owner died |
3399 |
++ * handling. |
3400 |
++ */ |
3401 |
++ if (pending_op && !pi && !uval) { |
3402 |
++ futex_wake(uaddr, 1, 1, FUTEX_BITSET_MATCH_ANY); |
3403 |
++ return 0; |
3404 |
++ } |
3405 |
++ |
3406 |
+ if ((uval & FUTEX_TID_MASK) != task_pid_vnr(curr)) |
3407 |
+ return 0; |
3408 |
+ |
3409 |
+@@ -3590,10 +3631,11 @@ void exit_robust_list(struct task_struct *curr) |
3410 |
+ * A pending lock might already be on the list, so |
3411 |
+ * don't process it twice: |
3412 |
+ */ |
3413 |
+- if (entry != pending) |
3414 |
++ if (entry != pending) { |
3415 |
+ if (handle_futex_death((void __user *)entry + futex_offset, |
3416 |
+- curr, pi)) |
3417 |
++ curr, pi, HANDLE_DEATH_LIST)) |
3418 |
+ return; |
3419 |
++ } |
3420 |
+ if (rc) |
3421 |
+ return; |
3422 |
+ entry = next_entry; |
3423 |
+@@ -3607,9 +3649,10 @@ void exit_robust_list(struct task_struct *curr) |
3424 |
+ cond_resched(); |
3425 |
+ } |
3426 |
+ |
3427 |
+- if (pending) |
3428 |
++ if (pending) { |
3429 |
+ handle_futex_death((void __user *)pending + futex_offset, |
3430 |
+- curr, pip); |
3431 |
++ curr, pip, HANDLE_DEATH_PENDING); |
3432 |
++ } |
3433 |
+ } |
3434 |
+ |
3435 |
+ long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout, |
3436 |
+@@ -3786,7 +3829,8 @@ void compat_exit_robust_list(struct task_struct *curr) |
3437 |
+ if (entry != pending) { |
3438 |
+ void __user *uaddr = futex_uaddr(entry, futex_offset); |
3439 |
+ |
3440 |
+- if (handle_futex_death(uaddr, curr, pi)) |
3441 |
++ if (handle_futex_death(uaddr, curr, pi, |
3442 |
++ HANDLE_DEATH_LIST)) |
3443 |
+ return; |
3444 |
+ } |
3445 |
+ if (rc) |
3446 |
+@@ -3805,7 +3849,7 @@ void compat_exit_robust_list(struct task_struct *curr) |
3447 |
+ if (pending) { |
3448 |
+ void __user *uaddr = futex_uaddr(pending, futex_offset); |
3449 |
+ |
3450 |
+- handle_futex_death(uaddr, curr, pip); |
3451 |
++ handle_futex_death(uaddr, curr, pip, HANDLE_DEATH_PENDING); |
3452 |
+ } |
3453 |
+ } |
3454 |
+ |
3455 |
+diff --git a/mm/ksm.c b/mm/ksm.c |
3456 |
+index 3dc4346411e4..4d5998ca31ae 100644 |
3457 |
+--- a/mm/ksm.c |
3458 |
++++ b/mm/ksm.c |
3459 |
+@@ -885,13 +885,13 @@ static int remove_stable_node(struct stable_node *stable_node) |
3460 |
+ return 0; |
3461 |
+ } |
3462 |
+ |
3463 |
+- if (WARN_ON_ONCE(page_mapped(page))) { |
3464 |
+- /* |
3465 |
+- * This should not happen: but if it does, just refuse to let |
3466 |
+- * merge_across_nodes be switched - there is no need to panic. |
3467 |
+- */ |
3468 |
+- err = -EBUSY; |
3469 |
+- } else { |
3470 |
++ /* |
3471 |
++ * Page could be still mapped if this races with __mmput() running in |
3472 |
++ * between ksm_exit() and exit_mmap(). Just refuse to let |
3473 |
++ * merge_across_nodes/max_page_sharing be switched. |
3474 |
++ */ |
3475 |
++ err = -EBUSY; |
3476 |
++ if (!page_mapped(page)) { |
3477 |
+ /* |
3478 |
+ * The stable node did not yet appear stale to get_ksm_page(), |
3479 |
+ * since that allows for an unmapped ksm page to be recognized |
3480 |
+diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c |
3481 |
+index f363fed0db4f..8431897acb54 100644 |
3482 |
+--- a/mm/memory_hotplug.c |
3483 |
++++ b/mm/memory_hotplug.c |
3484 |
+@@ -331,7 +331,7 @@ static unsigned long find_smallest_section_pfn(int nid, struct zone *zone, |
3485 |
+ unsigned long end_pfn) |
3486 |
+ { |
3487 |
+ for (; start_pfn < end_pfn; start_pfn += PAGES_PER_SUBSECTION) { |
3488 |
+- if (unlikely(!pfn_valid(start_pfn))) |
3489 |
++ if (unlikely(!pfn_to_online_page(start_pfn))) |
3490 |
+ continue; |
3491 |
+ |
3492 |
+ if (unlikely(pfn_to_nid(start_pfn) != nid)) |
3493 |
+@@ -356,7 +356,7 @@ static unsigned long find_biggest_section_pfn(int nid, struct zone *zone, |
3494 |
+ /* pfn is the end pfn of a memory section. */ |
3495 |
+ pfn = end_pfn - 1; |
3496 |
+ for (; pfn >= start_pfn; pfn -= PAGES_PER_SUBSECTION) { |
3497 |
+- if (unlikely(!pfn_valid(pfn))) |
3498 |
++ if (unlikely(!pfn_to_online_page(pfn))) |
3499 |
+ continue; |
3500 |
+ |
3501 |
+ if (unlikely(pfn_to_nid(pfn) != nid)) |
3502 |
+@@ -415,7 +415,7 @@ static void shrink_zone_span(struct zone *zone, unsigned long start_pfn, |
3503 |
+ */ |
3504 |
+ pfn = zone_start_pfn; |
3505 |
+ for (; pfn < zone_end_pfn; pfn += PAGES_PER_SUBSECTION) { |
3506 |
+- if (unlikely(!pfn_valid(pfn))) |
3507 |
++ if (unlikely(!pfn_to_online_page(pfn))) |
3508 |
+ continue; |
3509 |
+ |
3510 |
+ if (page_zone(pfn_to_page(pfn)) != zone) |
3511 |
+@@ -471,6 +471,16 @@ static void __remove_zone(struct zone *zone, unsigned long start_pfn, |
3512 |
+ struct pglist_data *pgdat = zone->zone_pgdat; |
3513 |
+ unsigned long flags; |
3514 |
+ |
3515 |
++#ifdef CONFIG_ZONE_DEVICE |
3516 |
++ /* |
3517 |
++ * Zone shrinking code cannot properly deal with ZONE_DEVICE. So |
3518 |
++ * we will not try to shrink the zones - which is okay as |
3519 |
++ * set_zone_contiguous() cannot deal with ZONE_DEVICE either way. |
3520 |
++ */ |
3521 |
++ if (zone_idx(zone) == ZONE_DEVICE) |
3522 |
++ return; |
3523 |
++#endif |
3524 |
++ |
3525 |
+ pgdat_resize_lock(zone->zone_pgdat, &flags); |
3526 |
+ shrink_zone_span(zone, start_pfn, start_pfn + nr_pages); |
3527 |
+ update_pgdat_span(pgdat); |
3528 |
+diff --git a/mm/slub.c b/mm/slub.c |
3529 |
+index d2445dd1c7ed..f24ea152cdbb 100644 |
3530 |
+--- a/mm/slub.c |
3531 |
++++ b/mm/slub.c |
3532 |
+@@ -2648,6 +2648,17 @@ static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, |
3533 |
+ return p; |
3534 |
+ } |
3535 |
+ |
3536 |
++/* |
3537 |
++ * If the object has been wiped upon free, make sure it's fully initialized by |
3538 |
++ * zeroing out freelist pointer. |
3539 |
++ */ |
3540 |
++static __always_inline void maybe_wipe_obj_freeptr(struct kmem_cache *s, |
3541 |
++ void *obj) |
3542 |
++{ |
3543 |
++ if (unlikely(slab_want_init_on_free(s)) && obj) |
3544 |
++ memset((void *)((char *)obj + s->offset), 0, sizeof(void *)); |
3545 |
++} |
3546 |
++ |
3547 |
+ /* |
3548 |
+ * Inlined fastpath so that allocation functions (kmalloc, kmem_cache_alloc) |
3549 |
+ * have the fastpath folded into their functions. So no function call |
3550 |
+@@ -2736,12 +2747,8 @@ redo: |
3551 |
+ prefetch_freepointer(s, next_object); |
3552 |
+ stat(s, ALLOC_FASTPATH); |
3553 |
+ } |
3554 |
+- /* |
3555 |
+- * If the object has been wiped upon free, make sure it's fully |
3556 |
+- * initialized by zeroing out freelist pointer. |
3557 |
+- */ |
3558 |
+- if (unlikely(slab_want_init_on_free(s)) && object) |
3559 |
+- memset(object + s->offset, 0, sizeof(void *)); |
3560 |
++ |
3561 |
++ maybe_wipe_obj_freeptr(s, object); |
3562 |
+ |
3563 |
+ if (unlikely(slab_want_init_on_alloc(gfpflags, s)) && object) |
3564 |
+ memset(object, 0, s->object_size); |
3565 |
+@@ -3155,10 +3162,13 @@ int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size, |
3566 |
+ goto error; |
3567 |
+ |
3568 |
+ c = this_cpu_ptr(s->cpu_slab); |
3569 |
++ maybe_wipe_obj_freeptr(s, p[i]); |
3570 |
++ |
3571 |
+ continue; /* goto for-loop */ |
3572 |
+ } |
3573 |
+ c->freelist = get_freepointer(s, object); |
3574 |
+ p[i] = object; |
3575 |
++ maybe_wipe_obj_freeptr(s, p[i]); |
3576 |
+ } |
3577 |
+ c->tid = next_tid(c->tid); |
3578 |
+ local_irq_enable(); |
3579 |
+diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c |
3580 |
+index 868a768f7300..60987be7fdaa 100644 |
3581 |
+--- a/net/core/rtnetlink.c |
3582 |
++++ b/net/core/rtnetlink.c |
3583 |
+@@ -2195,6 +2195,8 @@ static int do_setvfinfo(struct net_device *dev, struct nlattr **tb) |
3584 |
+ if (tb[IFLA_VF_MAC]) { |
3585 |
+ struct ifla_vf_mac *ivm = nla_data(tb[IFLA_VF_MAC]); |
3586 |
+ |
3587 |
++ if (ivm->vf >= INT_MAX) |
3588 |
++ return -EINVAL; |
3589 |
+ err = -EOPNOTSUPP; |
3590 |
+ if (ops->ndo_set_vf_mac) |
3591 |
+ err = ops->ndo_set_vf_mac(dev, ivm->vf, |
3592 |
+@@ -2206,6 +2208,8 @@ static int do_setvfinfo(struct net_device *dev, struct nlattr **tb) |
3593 |
+ if (tb[IFLA_VF_VLAN]) { |
3594 |
+ struct ifla_vf_vlan *ivv = nla_data(tb[IFLA_VF_VLAN]); |
3595 |
+ |
3596 |
++ if (ivv->vf >= INT_MAX) |
3597 |
++ return -EINVAL; |
3598 |
+ err = -EOPNOTSUPP; |
3599 |
+ if (ops->ndo_set_vf_vlan) |
3600 |
+ err = ops->ndo_set_vf_vlan(dev, ivv->vf, ivv->vlan, |
3601 |
+@@ -2238,6 +2242,8 @@ static int do_setvfinfo(struct net_device *dev, struct nlattr **tb) |
3602 |
+ if (len == 0) |
3603 |
+ return -EINVAL; |
3604 |
+ |
3605 |
++ if (ivvl[0]->vf >= INT_MAX) |
3606 |
++ return -EINVAL; |
3607 |
+ err = ops->ndo_set_vf_vlan(dev, ivvl[0]->vf, ivvl[0]->vlan, |
3608 |
+ ivvl[0]->qos, ivvl[0]->vlan_proto); |
3609 |
+ if (err < 0) |
3610 |
+@@ -2248,6 +2254,8 @@ static int do_setvfinfo(struct net_device *dev, struct nlattr **tb) |
3611 |
+ struct ifla_vf_tx_rate *ivt = nla_data(tb[IFLA_VF_TX_RATE]); |
3612 |
+ struct ifla_vf_info ivf; |
3613 |
+ |
3614 |
++ if (ivt->vf >= INT_MAX) |
3615 |
++ return -EINVAL; |
3616 |
+ err = -EOPNOTSUPP; |
3617 |
+ if (ops->ndo_get_vf_config) |
3618 |
+ err = ops->ndo_get_vf_config(dev, ivt->vf, &ivf); |
3619 |
+@@ -2266,6 +2274,8 @@ static int do_setvfinfo(struct net_device *dev, struct nlattr **tb) |
3620 |
+ if (tb[IFLA_VF_RATE]) { |
3621 |
+ struct ifla_vf_rate *ivt = nla_data(tb[IFLA_VF_RATE]); |
3622 |
+ |
3623 |
++ if (ivt->vf >= INT_MAX) |
3624 |
++ return -EINVAL; |
3625 |
+ err = -EOPNOTSUPP; |
3626 |
+ if (ops->ndo_set_vf_rate) |
3627 |
+ err = ops->ndo_set_vf_rate(dev, ivt->vf, |
3628 |
+@@ -2278,6 +2288,8 @@ static int do_setvfinfo(struct net_device *dev, struct nlattr **tb) |
3629 |
+ if (tb[IFLA_VF_SPOOFCHK]) { |
3630 |
+ struct ifla_vf_spoofchk *ivs = nla_data(tb[IFLA_VF_SPOOFCHK]); |
3631 |
+ |
3632 |
++ if (ivs->vf >= INT_MAX) |
3633 |
++ return -EINVAL; |
3634 |
+ err = -EOPNOTSUPP; |
3635 |
+ if (ops->ndo_set_vf_spoofchk) |
3636 |
+ err = ops->ndo_set_vf_spoofchk(dev, ivs->vf, |
3637 |
+@@ -2289,6 +2301,8 @@ static int do_setvfinfo(struct net_device *dev, struct nlattr **tb) |
3638 |
+ if (tb[IFLA_VF_LINK_STATE]) { |
3639 |
+ struct ifla_vf_link_state *ivl = nla_data(tb[IFLA_VF_LINK_STATE]); |
3640 |
+ |
3641 |
++ if (ivl->vf >= INT_MAX) |
3642 |
++ return -EINVAL; |
3643 |
+ err = -EOPNOTSUPP; |
3644 |
+ if (ops->ndo_set_vf_link_state) |
3645 |
+ err = ops->ndo_set_vf_link_state(dev, ivl->vf, |
3646 |
+@@ -2302,6 +2316,8 @@ static int do_setvfinfo(struct net_device *dev, struct nlattr **tb) |
3647 |
+ |
3648 |
+ err = -EOPNOTSUPP; |
3649 |
+ ivrssq_en = nla_data(tb[IFLA_VF_RSS_QUERY_EN]); |
3650 |
++ if (ivrssq_en->vf >= INT_MAX) |
3651 |
++ return -EINVAL; |
3652 |
+ if (ops->ndo_set_vf_rss_query_en) |
3653 |
+ err = ops->ndo_set_vf_rss_query_en(dev, ivrssq_en->vf, |
3654 |
+ ivrssq_en->setting); |
3655 |
+@@ -2312,6 +2328,8 @@ static int do_setvfinfo(struct net_device *dev, struct nlattr **tb) |
3656 |
+ if (tb[IFLA_VF_TRUST]) { |
3657 |
+ struct ifla_vf_trust *ivt = nla_data(tb[IFLA_VF_TRUST]); |
3658 |
+ |
3659 |
++ if (ivt->vf >= INT_MAX) |
3660 |
++ return -EINVAL; |
3661 |
+ err = -EOPNOTSUPP; |
3662 |
+ if (ops->ndo_set_vf_trust) |
3663 |
+ err = ops->ndo_set_vf_trust(dev, ivt->vf, ivt->setting); |
3664 |
+@@ -2322,15 +2340,18 @@ static int do_setvfinfo(struct net_device *dev, struct nlattr **tb) |
3665 |
+ if (tb[IFLA_VF_IB_NODE_GUID]) { |
3666 |
+ struct ifla_vf_guid *ivt = nla_data(tb[IFLA_VF_IB_NODE_GUID]); |
3667 |
+ |
3668 |
++ if (ivt->vf >= INT_MAX) |
3669 |
++ return -EINVAL; |
3670 |
+ if (!ops->ndo_set_vf_guid) |
3671 |
+ return -EOPNOTSUPP; |
3672 |
+- |
3673 |
+ return handle_vf_guid(dev, ivt, IFLA_VF_IB_NODE_GUID); |
3674 |
+ } |
3675 |
+ |
3676 |
+ if (tb[IFLA_VF_IB_PORT_GUID]) { |
3677 |
+ struct ifla_vf_guid *ivt = nla_data(tb[IFLA_VF_IB_PORT_GUID]); |
3678 |
+ |
3679 |
++ if (ivt->vf >= INT_MAX) |
3680 |
++ return -EINVAL; |
3681 |
+ if (!ops->ndo_set_vf_guid) |
3682 |
+ return -EOPNOTSUPP; |
3683 |
+ |
3684 |
+diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c |
3685 |
+index 0b980e841927..c45b7d738cd1 100644 |
3686 |
+--- a/net/ipv4/sysctl_net_ipv4.c |
3687 |
++++ b/net/ipv4/sysctl_net_ipv4.c |
3688 |
+@@ -1028,7 +1028,7 @@ static struct ctl_table ipv4_net_table[] = { |
3689 |
+ .mode = 0644, |
3690 |
+ .proc_handler = proc_fib_multipath_hash_policy, |
3691 |
+ .extra1 = SYSCTL_ZERO, |
3692 |
+- .extra2 = SYSCTL_ONE, |
3693 |
++ .extra2 = &two, |
3694 |
+ }, |
3695 |
+ #endif |
3696 |
+ { |
3697 |
+diff --git a/net/ipv6/route.c b/net/ipv6/route.c |
3698 |
+index 2b25a0de0364..56c8c990b6f2 100644 |
3699 |
+--- a/net/ipv6/route.c |
3700 |
++++ b/net/ipv6/route.c |
3701 |
+@@ -634,7 +634,7 @@ static void rt6_probe(struct fib6_nh *fib6_nh) |
3702 |
+ * Router Reachability Probe MUST be rate-limited |
3703 |
+ * to no more than one per minute. |
3704 |
+ */ |
3705 |
+- if (fib6_nh->fib_nh_gw_family) |
3706 |
++ if (!fib6_nh->fib_nh_gw_family) |
3707 |
+ return; |
3708 |
+ |
3709 |
+ nh_gw = &fib6_nh->fib_nh_gw6; |
3710 |
+diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c |
3711 |
+index cdfaa79382a2..b5bc631b96b7 100644 |
3712 |
+--- a/net/sched/act_pedit.c |
3713 |
++++ b/net/sched/act_pedit.c |
3714 |
+@@ -43,7 +43,7 @@ static struct tcf_pedit_key_ex *tcf_pedit_keys_ex_parse(struct nlattr *nla, |
3715 |
+ int err = -EINVAL; |
3716 |
+ int rem; |
3717 |
+ |
3718 |
+- if (!nla || !n) |
3719 |
++ if (!nla) |
3720 |
+ return NULL; |
3721 |
+ |
3722 |
+ keys_ex = kcalloc(n, sizeof(*k), GFP_KERNEL); |
3723 |
+@@ -170,6 +170,10 @@ static int tcf_pedit_init(struct net *net, struct nlattr *nla, |
3724 |
+ } |
3725 |
+ |
3726 |
+ parm = nla_data(pattr); |
3727 |
++ if (!parm->nkeys) { |
3728 |
++ NL_SET_ERR_MSG_MOD(extack, "Pedit requires keys to be passed"); |
3729 |
++ return -EINVAL; |
3730 |
++ } |
3731 |
+ ksize = parm->nkeys * sizeof(struct tc_pedit_key); |
3732 |
+ if (nla_len(pattr) < sizeof(*parm) + ksize) { |
3733 |
+ NL_SET_ERR_MSG_ATTR(extack, pattr, "Length of TCA_PEDIT_PARMS or TCA_PEDIT_PARMS_EX pedit attribute is invalid"); |
3734 |
+@@ -183,12 +187,6 @@ static int tcf_pedit_init(struct net *net, struct nlattr *nla, |
3735 |
+ index = parm->index; |
3736 |
+ err = tcf_idr_check_alloc(tn, &index, a, bind); |
3737 |
+ if (!err) { |
3738 |
+- if (!parm->nkeys) { |
3739 |
+- tcf_idr_cleanup(tn, index); |
3740 |
+- NL_SET_ERR_MSG_MOD(extack, "Pedit requires keys to be passed"); |
3741 |
+- ret = -EINVAL; |
3742 |
+- goto out_free; |
3743 |
+- } |
3744 |
+ ret = tcf_idr_create(tn, index, est, a, |
3745 |
+ &act_pedit_ops, bind, false); |
3746 |
+ if (ret) { |
3747 |
+diff --git a/net/sched/act_tunnel_key.c b/net/sched/act_tunnel_key.c |
3748 |
+index 2f83a79f76aa..d55669e14741 100644 |
3749 |
+--- a/net/sched/act_tunnel_key.c |
3750 |
++++ b/net/sched/act_tunnel_key.c |
3751 |
+@@ -135,6 +135,10 @@ static int tunnel_key_copy_opts(const struct nlattr *nla, u8 *dst, |
3752 |
+ if (opt_len < 0) |
3753 |
+ return opt_len; |
3754 |
+ opts_len += opt_len; |
3755 |
++ if (opts_len > IP_TUNNEL_OPTS_MAX) { |
3756 |
++ NL_SET_ERR_MSG(extack, "Tunnel options exceeds max size"); |
3757 |
++ return -EINVAL; |
3758 |
++ } |
3759 |
+ if (dst) { |
3760 |
+ dst_len -= opt_len; |
3761 |
+ dst += opt_len; |
3762 |
+diff --git a/net/sched/sch_taprio.c b/net/sched/sch_taprio.c |
3763 |
+index 76bebe516194..92c0766d7f4f 100644 |
3764 |
+--- a/net/sched/sch_taprio.c |
3765 |
++++ b/net/sched/sch_taprio.c |
3766 |
+@@ -842,7 +842,7 @@ static int taprio_parse_mqprio_opt(struct net_device *dev, |
3767 |
+ } |
3768 |
+ |
3769 |
+ /* Verify priority mapping uses valid tcs */ |
3770 |
+- for (i = 0; i < TC_BITMASK + 1; i++) { |
3771 |
++ for (i = 0; i <= TC_BITMASK; i++) { |
3772 |
+ if (qopt->prio_tc_map[i] >= qopt->num_tc) { |
3773 |
+ NL_SET_ERR_MSG(extack, "Invalid traffic class in priority to traffic class mapping"); |
3774 |
+ return -EINVAL; |
3775 |
+@@ -1014,6 +1014,26 @@ static void setup_txtime(struct taprio_sched *q, |
3776 |
+ } |
3777 |
+ } |
3778 |
+ |
3779 |
++static int taprio_mqprio_cmp(const struct net_device *dev, |
3780 |
++ const struct tc_mqprio_qopt *mqprio) |
3781 |
++{ |
3782 |
++ int i; |
3783 |
++ |
3784 |
++ if (!mqprio || mqprio->num_tc != dev->num_tc) |
3785 |
++ return -1; |
3786 |
++ |
3787 |
++ for (i = 0; i < mqprio->num_tc; i++) |
3788 |
++ if (dev->tc_to_txq[i].count != mqprio->count[i] || |
3789 |
++ dev->tc_to_txq[i].offset != mqprio->offset[i]) |
3790 |
++ return -1; |
3791 |
++ |
3792 |
++ for (i = 0; i <= TC_BITMASK; i++) |
3793 |
++ if (dev->prio_tc_map[i] != mqprio->prio_tc_map[i]) |
3794 |
++ return -1; |
3795 |
++ |
3796 |
++ return 0; |
3797 |
++} |
3798 |
++ |
3799 |
+ static int taprio_change(struct Qdisc *sch, struct nlattr *opt, |
3800 |
+ struct netlink_ext_ack *extack) |
3801 |
+ { |
3802 |
+@@ -1065,6 +1085,10 @@ static int taprio_change(struct Qdisc *sch, struct nlattr *opt, |
3803 |
+ admin = rcu_dereference(q->admin_sched); |
3804 |
+ rcu_read_unlock(); |
3805 |
+ |
3806 |
++ /* no changes - no new mqprio settings */ |
3807 |
++ if (!taprio_mqprio_cmp(dev, mqprio)) |
3808 |
++ mqprio = NULL; |
3809 |
++ |
3810 |
+ if (mqprio && (oper || admin)) { |
3811 |
+ NL_SET_ERR_MSG(extack, "Changing the traffic mapping of a running schedule is not supported"); |
3812 |
+ err = -ENOTSUPP; |
3813 |
+@@ -1132,7 +1156,7 @@ static int taprio_change(struct Qdisc *sch, struct nlattr *opt, |
3814 |
+ mqprio->offset[i]); |
3815 |
+ |
3816 |
+ /* Always use supplied priority mappings */ |
3817 |
+- for (i = 0; i < TC_BITMASK + 1; i++) |
3818 |
++ for (i = 0; i <= TC_BITMASK; i++) |
3819 |
+ netdev_set_prio_tc_map(dev, i, |
3820 |
+ mqprio->prio_tc_map[i]); |
3821 |
+ } |
3822 |
+diff --git a/net/tls/tls_main.c b/net/tls/tls_main.c |
3823 |
+index 9313dd51023a..ac2dfe36022d 100644 |
3824 |
+--- a/net/tls/tls_main.c |
3825 |
++++ b/net/tls/tls_main.c |
3826 |
+@@ -852,6 +852,7 @@ static int __init tls_register(void) |
3827 |
+ { |
3828 |
+ tls_sw_proto_ops = inet_stream_ops; |
3829 |
+ tls_sw_proto_ops.splice_read = tls_sw_splice_read; |
3830 |
++ tls_sw_proto_ops.sendpage_locked = tls_sw_sendpage_locked, |
3831 |
+ |
3832 |
+ #ifdef CONFIG_TLS_DEVICE |
3833 |
+ tls_device_init(); |
3834 |
+diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c |
3835 |
+index 881f06f465f8..41b2bdc05ba3 100644 |
3836 |
+--- a/net/tls/tls_sw.c |
3837 |
++++ b/net/tls/tls_sw.c |
3838 |
+@@ -1204,6 +1204,17 @@ sendpage_end: |
3839 |
+ return copied ? copied : ret; |
3840 |
+ } |
3841 |
+ |
3842 |
++int tls_sw_sendpage_locked(struct sock *sk, struct page *page, |
3843 |
++ int offset, size_t size, int flags) |
3844 |
++{ |
3845 |
++ if (flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL | |
3846 |
++ MSG_SENDPAGE_NOTLAST | MSG_SENDPAGE_NOPOLICY | |
3847 |
++ MSG_NO_SHARED_FRAGS)) |
3848 |
++ return -ENOTSUPP; |
3849 |
++ |
3850 |
++ return tls_sw_do_sendpage(sk, page, offset, size, flags); |
3851 |
++} |
3852 |
++ |
3853 |
+ int tls_sw_sendpage(struct sock *sk, struct page *page, |
3854 |
+ int offset, size_t size, int flags) |
3855 |
+ { |
3856 |
+diff --git a/net/vmw_vsock/virtio_transport_common.c b/net/vmw_vsock/virtio_transport_common.c |
3857 |
+index 058d59fceddd..279d838784e5 100644 |
3858 |
+--- a/net/vmw_vsock/virtio_transport_common.c |
3859 |
++++ b/net/vmw_vsock/virtio_transport_common.c |
3860 |
+@@ -91,8 +91,17 @@ static struct sk_buff *virtio_transport_build_skb(void *opaque) |
3861 |
+ struct virtio_vsock_pkt *pkt = opaque; |
3862 |
+ struct af_vsockmon_hdr *hdr; |
3863 |
+ struct sk_buff *skb; |
3864 |
++ size_t payload_len; |
3865 |
++ void *payload_buf; |
3866 |
+ |
3867 |
+- skb = alloc_skb(sizeof(*hdr) + sizeof(pkt->hdr) + pkt->len, |
3868 |
++ /* A packet could be split to fit the RX buffer, so we can retrieve |
3869 |
++ * the payload length from the header and the buffer pointer taking |
3870 |
++ * care of the offset in the original packet. |
3871 |
++ */ |
3872 |
++ payload_len = le32_to_cpu(pkt->hdr.len); |
3873 |
++ payload_buf = pkt->buf + pkt->off; |
3874 |
++ |
3875 |
++ skb = alloc_skb(sizeof(*hdr) + sizeof(pkt->hdr) + payload_len, |
3876 |
+ GFP_ATOMIC); |
3877 |
+ if (!skb) |
3878 |
+ return NULL; |
3879 |
+@@ -132,8 +141,8 @@ static struct sk_buff *virtio_transport_build_skb(void *opaque) |
3880 |
+ |
3881 |
+ skb_put_data(skb, &pkt->hdr, sizeof(pkt->hdr)); |
3882 |
+ |
3883 |
+- if (pkt->len) { |
3884 |
+- skb_put_data(skb, pkt->buf, pkt->len); |
3885 |
++ if (payload_len) { |
3886 |
++ skb_put_data(skb, payload_buf, payload_len); |
3887 |
+ } |
3888 |
+ |
3889 |
+ return skb; |
3890 |
+diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c |
3891 |
+index 90cd59a1869a..bd1cffb2ab50 100644 |
3892 |
+--- a/sound/usb/mixer.c |
3893 |
++++ b/sound/usb/mixer.c |
3894 |
+@@ -2930,6 +2930,9 @@ static int snd_usb_mixer_controls_badd(struct usb_mixer_interface *mixer, |
3895 |
+ continue; |
3896 |
+ |
3897 |
+ iface = usb_ifnum_to_if(dev, intf); |
3898 |
++ if (!iface) |
3899 |
++ continue; |
3900 |
++ |
3901 |
+ num = iface->num_altsetting; |
3902 |
+ |
3903 |
+ if (num < 2) |
3904 |
+diff --git a/tools/gpio/Build b/tools/gpio/Build |
3905 |
+index 620c1937d957..4141f35837db 100644 |
3906 |
+--- a/tools/gpio/Build |
3907 |
++++ b/tools/gpio/Build |
3908 |
+@@ -1,3 +1,4 @@ |
3909 |
++gpio-utils-y += gpio-utils.o |
3910 |
+ lsgpio-y += lsgpio.o gpio-utils.o |
3911 |
+ gpio-hammer-y += gpio-hammer.o gpio-utils.o |
3912 |
+ gpio-event-mon-y += gpio-event-mon.o gpio-utils.o |
3913 |
+diff --git a/tools/gpio/Makefile b/tools/gpio/Makefile |
3914 |
+index 1178d302757e..6080de58861f 100644 |
3915 |
+--- a/tools/gpio/Makefile |
3916 |
++++ b/tools/gpio/Makefile |
3917 |
+@@ -35,11 +35,15 @@ $(OUTPUT)include/linux/gpio.h: ../../include/uapi/linux/gpio.h |
3918 |
+ |
3919 |
+ prepare: $(OUTPUT)include/linux/gpio.h |
3920 |
+ |
3921 |
++GPIO_UTILS_IN := $(output)gpio-utils-in.o |
3922 |
++$(GPIO_UTILS_IN): prepare FORCE |
3923 |
++ $(Q)$(MAKE) $(build)=gpio-utils |
3924 |
++ |
3925 |
+ # |
3926 |
+ # lsgpio |
3927 |
+ # |
3928 |
+ LSGPIO_IN := $(OUTPUT)lsgpio-in.o |
3929 |
+-$(LSGPIO_IN): prepare FORCE |
3930 |
++$(LSGPIO_IN): prepare FORCE $(OUTPUT)gpio-utils-in.o |
3931 |
+ $(Q)$(MAKE) $(build)=lsgpio |
3932 |
+ $(OUTPUT)lsgpio: $(LSGPIO_IN) |
3933 |
+ $(QUIET_LINK)$(CC) $(CFLAGS) $(LDFLAGS) $< -o $@ |
3934 |
+@@ -48,7 +52,7 @@ $(OUTPUT)lsgpio: $(LSGPIO_IN) |
3935 |
+ # gpio-hammer |
3936 |
+ # |
3937 |
+ GPIO_HAMMER_IN := $(OUTPUT)gpio-hammer-in.o |
3938 |
+-$(GPIO_HAMMER_IN): prepare FORCE |
3939 |
++$(GPIO_HAMMER_IN): prepare FORCE $(OUTPUT)gpio-utils-in.o |
3940 |
+ $(Q)$(MAKE) $(build)=gpio-hammer |
3941 |
+ $(OUTPUT)gpio-hammer: $(GPIO_HAMMER_IN) |
3942 |
+ $(QUIET_LINK)$(CC) $(CFLAGS) $(LDFLAGS) $< -o $@ |
3943 |
+@@ -57,7 +61,7 @@ $(OUTPUT)gpio-hammer: $(GPIO_HAMMER_IN) |
3944 |
+ # gpio-event-mon |
3945 |
+ # |
3946 |
+ GPIO_EVENT_MON_IN := $(OUTPUT)gpio-event-mon-in.o |
3947 |
+-$(GPIO_EVENT_MON_IN): prepare FORCE |
3948 |
++$(GPIO_EVENT_MON_IN): prepare FORCE $(OUTPUT)gpio-utils-in.o |
3949 |
+ $(Q)$(MAKE) $(build)=gpio-event-mon |
3950 |
+ $(OUTPUT)gpio-event-mon: $(GPIO_EVENT_MON_IN) |
3951 |
+ $(QUIET_LINK)$(CC) $(CFLAGS) $(LDFLAGS) $< -o $@ |
3952 |
+diff --git a/tools/objtool/arch/x86/tools/gen-insn-attr-x86.awk b/tools/objtool/arch/x86/tools/gen-insn-attr-x86.awk |
3953 |
+index b02a36b2c14f..a42015b305f4 100644 |
3954 |
+--- a/tools/objtool/arch/x86/tools/gen-insn-attr-x86.awk |
3955 |
++++ b/tools/objtool/arch/x86/tools/gen-insn-attr-x86.awk |
3956 |
+@@ -69,7 +69,7 @@ BEGIN { |
3957 |
+ |
3958 |
+ lprefix1_expr = "\\((66|!F3)\\)" |
3959 |
+ lprefix2_expr = "\\(F3\\)" |
3960 |
+- lprefix3_expr = "\\((F2|!F3|66\\&F2)\\)" |
3961 |
++ lprefix3_expr = "\\((F2|!F3|66&F2)\\)" |
3962 |
+ lprefix_expr = "\\((66|F2|F3)\\)" |
3963 |
+ max_lprefix = 4 |
3964 |
+ |
3965 |
+@@ -257,7 +257,7 @@ function convert_operands(count,opnd, i,j,imm,mod) |
3966 |
+ return add_flags(imm, mod) |
3967 |
+ } |
3968 |
+ |
3969 |
+-/^[0-9a-f]+\:/ { |
3970 |
++/^[0-9a-f]+:/ { |
3971 |
+ if (NR == 1) |
3972 |
+ next |
3973 |
+ # get index |
3974 |
+diff --git a/tools/testing/selftests/x86/mov_ss_trap.c b/tools/testing/selftests/x86/mov_ss_trap.c |
3975 |
+index 3c3a022654f3..6da0ac3f0135 100644 |
3976 |
+--- a/tools/testing/selftests/x86/mov_ss_trap.c |
3977 |
++++ b/tools/testing/selftests/x86/mov_ss_trap.c |
3978 |
+@@ -257,7 +257,8 @@ int main() |
3979 |
+ err(1, "sigaltstack"); |
3980 |
+ sethandler(SIGSEGV, handle_and_longjmp, SA_RESETHAND | SA_ONSTACK); |
3981 |
+ nr = SYS_getpid; |
3982 |
+- asm volatile ("mov %[ss], %%ss; SYSENTER" : "+a" (nr) |
3983 |
++ /* Clear EBP first to make sure we segfault cleanly. */ |
3984 |
++ asm volatile ("xorl %%ebp, %%ebp; mov %[ss], %%ss; SYSENTER" : "+a" (nr) |
3985 |
+ : [ss] "m" (ss) : "flags", "rcx" |
3986 |
+ #ifdef __x86_64__ |
3987 |
+ , "r11" |
3988 |
+diff --git a/tools/testing/selftests/x86/sigreturn.c b/tools/testing/selftests/x86/sigreturn.c |
3989 |
+index 3e49a7873f3e..57c4f67f16ef 100644 |
3990 |
+--- a/tools/testing/selftests/x86/sigreturn.c |
3991 |
++++ b/tools/testing/selftests/x86/sigreturn.c |
3992 |
+@@ -451,6 +451,19 @@ static void sigusr1(int sig, siginfo_t *info, void *ctx_void) |
3993 |
+ ctx->uc_mcontext.gregs[REG_SP] = (unsigned long)0x8badf00d5aadc0deULL; |
3994 |
+ ctx->uc_mcontext.gregs[REG_CX] = 0; |
3995 |
+ |
3996 |
++#ifdef __i386__ |
3997 |
++ /* |
3998 |
++ * Make sure the kernel doesn't inadvertently use DS or ES-relative |
3999 |
++ * accesses in a region where user DS or ES is loaded. |
4000 |
++ * |
4001 |
++ * Skip this for 64-bit builds because long mode doesn't care about |
4002 |
++ * DS and ES and skipping it increases test coverage a little bit, |
4003 |
++ * since 64-bit kernels can still run the 32-bit build. |
4004 |
++ */ |
4005 |
++ ctx->uc_mcontext.gregs[REG_DS] = 0; |
4006 |
++ ctx->uc_mcontext.gregs[REG_ES] = 0; |
4007 |
++#endif |
4008 |
++ |
4009 |
+ memcpy(&requested_regs, &ctx->uc_mcontext.gregs, sizeof(gregset_t)); |
4010 |
+ requested_regs[REG_CX] = *ssptr(ctx); /* The asm code does this. */ |
4011 |
+ |
4012 |
+diff --git a/tools/usb/usbip/libsrc/usbip_host_common.c b/tools/usb/usbip/libsrc/usbip_host_common.c |
4013 |
+index 2813aa821c82..d1d8ba2a4a40 100644 |
4014 |
+--- a/tools/usb/usbip/libsrc/usbip_host_common.c |
4015 |
++++ b/tools/usb/usbip/libsrc/usbip_host_common.c |
4016 |
+@@ -57,7 +57,7 @@ static int32_t read_attr_usbip_status(struct usbip_usb_device *udev) |
4017 |
+ } |
4018 |
+ |
4019 |
+ value = atoi(status); |
4020 |
+- |
4021 |
++ close(fd); |
4022 |
+ return value; |
4023 |
+ } |
4024 |
+ |