1 |
commit: 71d470c013d9ba1388d32e220ff5cf87fb8eb6cd |
2 |
Author: Mike Pagano <mpagano <AT> gentoo <DOT> org> |
3 |
AuthorDate: Mon Jun 6 11:00:51 2022 +0000 |
4 |
Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org> |
5 |
CommitDate: Mon Jun 6 11:00:51 2022 +0000 |
6 |
URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=71d470c0 |
7 |
|
8 |
Linux patch 5.18.2 |
9 |
|
10 |
Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org> |
11 |
|
12 |
0000_README | 4 + |
13 |
1001_linux-5.18.2.patch | 2830 +++++++++++++++++++++++++++++++++++++++++++++++ |
14 |
2 files changed, 2834 insertions(+) |
15 |
|
16 |
diff --git a/0000_README b/0000_README |
17 |
index 62ab5b31..561c7140 100644 |
18 |
--- a/0000_README |
19 |
+++ b/0000_README |
20 |
@@ -47,6 +47,10 @@ Patch: 1000_linux-5.18.1.patch |
21 |
From: http://www.kernel.org |
22 |
Desc: Linux 5.18.1 |
23 |
|
24 |
+Patch: 1001_linux-5.18.2.patch |
25 |
+From: http://www.kernel.org |
26 |
+Desc: Linux 5.18.2 |
27 |
+ |
28 |
Patch: 1500_XATTR_USER_PREFIX.patch |
29 |
From: https://bugs.gentoo.org/show_bug.cgi?id=470644 |
30 |
Desc: Support for namespace user.pax.* on tmpfs. |
31 |
|
32 |
diff --git a/1001_linux-5.18.2.patch b/1001_linux-5.18.2.patch |
33 |
new file mode 100644 |
34 |
index 00000000..609efb82 |
35 |
--- /dev/null |
36 |
+++ b/1001_linux-5.18.2.patch |
37 |
@@ -0,0 +1,2830 @@ |
38 |
+diff --git a/Documentation/process/submitting-patches.rst b/Documentation/process/submitting-patches.rst |
39 |
+index fb496b2ebfd38..92d3432460d75 100644 |
40 |
+--- a/Documentation/process/submitting-patches.rst |
41 |
++++ b/Documentation/process/submitting-patches.rst |
42 |
+@@ -77,7 +77,7 @@ as you intend it to. |
43 |
+ |
44 |
+ The maintainer will thank you if you write your patch description in a |
45 |
+ form which can be easily pulled into Linux's source code management |
46 |
+-system, ``git``, as a "commit log". See :ref:`explicit_in_reply_to`. |
47 |
++system, ``git``, as a "commit log". See :ref:`the_canonical_patch_format`. |
48 |
+ |
49 |
+ Solve only one problem per patch. If your description starts to get |
50 |
+ long, that's a sign that you probably need to split up your patch. |
51 |
+diff --git a/Makefile b/Makefile |
52 |
+index 2bb168acb8f43..6b1d606a92f6f 100644 |
53 |
+--- a/Makefile |
54 |
++++ b/Makefile |
55 |
+@@ -1,7 +1,7 @@ |
56 |
+ # SPDX-License-Identifier: GPL-2.0 |
57 |
+ VERSION = 5 |
58 |
+ PATCHLEVEL = 18 |
59 |
+-SUBLEVEL = 1 |
60 |
++SUBLEVEL = 2 |
61 |
+ EXTRAVERSION = |
62 |
+ NAME = Superb Owl |
63 |
+ |
64 |
+diff --git a/arch/arm/boot/dts/s5pv210-aries.dtsi b/arch/arm/boot/dts/s5pv210-aries.dtsi |
65 |
+index c8f1c324a6c26..26f2be2d9faa2 100644 |
66 |
+--- a/arch/arm/boot/dts/s5pv210-aries.dtsi |
67 |
++++ b/arch/arm/boot/dts/s5pv210-aries.dtsi |
68 |
+@@ -895,7 +895,7 @@ |
69 |
+ device-wakeup-gpios = <&gpg3 4 GPIO_ACTIVE_HIGH>; |
70 |
+ interrupt-parent = <&gph2>; |
71 |
+ interrupts = <5 IRQ_TYPE_LEVEL_HIGH>; |
72 |
+- interrupt-names = "host-wake"; |
73 |
++ interrupt-names = "host-wakeup"; |
74 |
+ }; |
75 |
+ }; |
76 |
+ |
77 |
+diff --git a/arch/powerpc/kvm/book3s_hv_uvmem.c b/arch/powerpc/kvm/book3s_hv_uvmem.c |
78 |
+index 45c993dd05f5e..36f2314c58e5f 100644 |
79 |
+--- a/arch/powerpc/kvm/book3s_hv_uvmem.c |
80 |
++++ b/arch/powerpc/kvm/book3s_hv_uvmem.c |
81 |
+@@ -361,13 +361,15 @@ static bool kvmppc_gfn_is_uvmem_pfn(unsigned long gfn, struct kvm *kvm, |
82 |
+ static bool kvmppc_next_nontransitioned_gfn(const struct kvm_memory_slot *memslot, |
83 |
+ struct kvm *kvm, unsigned long *gfn) |
84 |
+ { |
85 |
+- struct kvmppc_uvmem_slot *p; |
86 |
++ struct kvmppc_uvmem_slot *p = NULL, *iter; |
87 |
+ bool ret = false; |
88 |
+ unsigned long i; |
89 |
+ |
90 |
+- list_for_each_entry(p, &kvm->arch.uvmem_pfns, list) |
91 |
+- if (*gfn >= p->base_pfn && *gfn < p->base_pfn + p->nr_pfns) |
92 |
++ list_for_each_entry(iter, &kvm->arch.uvmem_pfns, list) |
93 |
++ if (*gfn >= iter->base_pfn && *gfn < iter->base_pfn + iter->nr_pfns) { |
94 |
++ p = iter; |
95 |
+ break; |
96 |
++ } |
97 |
+ if (!p) |
98 |
+ return ret; |
99 |
+ /* |
100 |
+diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h |
101 |
+index f78e2b3501a19..35f222aa66bfc 100644 |
102 |
+--- a/arch/x86/include/asm/uaccess.h |
103 |
++++ b/arch/x86/include/asm/uaccess.h |
104 |
+@@ -382,6 +382,103 @@ do { \ |
105 |
+ |
106 |
+ #endif // CONFIG_CC_HAS_ASM_GOTO_OUTPUT |
107 |
+ |
108 |
++#ifdef CONFIG_CC_HAS_ASM_GOTO_TIED_OUTPUT |
109 |
++#define __try_cmpxchg_user_asm(itype, ltype, _ptr, _pold, _new, label) ({ \ |
110 |
++ bool success; \ |
111 |
++ __typeof__(_ptr) _old = (__typeof__(_ptr))(_pold); \ |
112 |
++ __typeof__(*(_ptr)) __old = *_old; \ |
113 |
++ __typeof__(*(_ptr)) __new = (_new); \ |
114 |
++ asm_volatile_goto("\n" \ |
115 |
++ "1: " LOCK_PREFIX "cmpxchg"itype" %[new], %[ptr]\n"\ |
116 |
++ _ASM_EXTABLE_UA(1b, %l[label]) \ |
117 |
++ : CC_OUT(z) (success), \ |
118 |
++ [ptr] "+m" (*_ptr), \ |
119 |
++ [old] "+a" (__old) \ |
120 |
++ : [new] ltype (__new) \ |
121 |
++ : "memory" \ |
122 |
++ : label); \ |
123 |
++ if (unlikely(!success)) \ |
124 |
++ *_old = __old; \ |
125 |
++ likely(success); }) |
126 |
++ |
127 |
++#ifdef CONFIG_X86_32 |
128 |
++#define __try_cmpxchg64_user_asm(_ptr, _pold, _new, label) ({ \ |
129 |
++ bool success; \ |
130 |
++ __typeof__(_ptr) _old = (__typeof__(_ptr))(_pold); \ |
131 |
++ __typeof__(*(_ptr)) __old = *_old; \ |
132 |
++ __typeof__(*(_ptr)) __new = (_new); \ |
133 |
++ asm_volatile_goto("\n" \ |
134 |
++ "1: " LOCK_PREFIX "cmpxchg8b %[ptr]\n" \ |
135 |
++ _ASM_EXTABLE_UA(1b, %l[label]) \ |
136 |
++ : CC_OUT(z) (success), \ |
137 |
++ "+A" (__old), \ |
138 |
++ [ptr] "+m" (*_ptr) \ |
139 |
++ : "b" ((u32)__new), \ |
140 |
++ "c" ((u32)((u64)__new >> 32)) \ |
141 |
++ : "memory" \ |
142 |
++ : label); \ |
143 |
++ if (unlikely(!success)) \ |
144 |
++ *_old = __old; \ |
145 |
++ likely(success); }) |
146 |
++#endif // CONFIG_X86_32 |
147 |
++#else // !CONFIG_CC_HAS_ASM_GOTO_TIED_OUTPUT |
148 |
++#define __try_cmpxchg_user_asm(itype, ltype, _ptr, _pold, _new, label) ({ \ |
149 |
++ int __err = 0; \ |
150 |
++ bool success; \ |
151 |
++ __typeof__(_ptr) _old = (__typeof__(_ptr))(_pold); \ |
152 |
++ __typeof__(*(_ptr)) __old = *_old; \ |
153 |
++ __typeof__(*(_ptr)) __new = (_new); \ |
154 |
++ asm volatile("\n" \ |
155 |
++ "1: " LOCK_PREFIX "cmpxchg"itype" %[new], %[ptr]\n"\ |
156 |
++ CC_SET(z) \ |
157 |
++ "2:\n" \ |
158 |
++ _ASM_EXTABLE_TYPE_REG(1b, 2b, EX_TYPE_EFAULT_REG, \ |
159 |
++ %[errout]) \ |
160 |
++ : CC_OUT(z) (success), \ |
161 |
++ [errout] "+r" (__err), \ |
162 |
++ [ptr] "+m" (*_ptr), \ |
163 |
++ [old] "+a" (__old) \ |
164 |
++ : [new] ltype (__new) \ |
165 |
++ : "memory", "cc"); \ |
166 |
++ if (unlikely(__err)) \ |
167 |
++ goto label; \ |
168 |
++ if (unlikely(!success)) \ |
169 |
++ *_old = __old; \ |
170 |
++ likely(success); }) |
171 |
++ |
172 |
++#ifdef CONFIG_X86_32 |
173 |
++/* |
174 |
++ * Unlike the normal CMPXCHG, hardcode ECX for both success/fail and error. |
175 |
++ * There are only six GPRs available and four (EAX, EBX, ECX, and EDX) are |
176 |
++ * hardcoded by CMPXCHG8B, leaving only ESI and EDI. If the compiler uses |
177 |
++ * both ESI and EDI for the memory operand, compilation will fail if the error |
178 |
++ * is an input+output as there will be no register available for input. |
179 |
++ */ |
180 |
++#define __try_cmpxchg64_user_asm(_ptr, _pold, _new, label) ({ \ |
181 |
++ int __result; \ |
182 |
++ __typeof__(_ptr) _old = (__typeof__(_ptr))(_pold); \ |
183 |
++ __typeof__(*(_ptr)) __old = *_old; \ |
184 |
++ __typeof__(*(_ptr)) __new = (_new); \ |
185 |
++ asm volatile("\n" \ |
186 |
++ "1: " LOCK_PREFIX "cmpxchg8b %[ptr]\n" \ |
187 |
++ "mov $0, %%ecx\n\t" \ |
188 |
++ "setz %%cl\n" \ |
189 |
++ "2:\n" \ |
190 |
++ _ASM_EXTABLE_TYPE_REG(1b, 2b, EX_TYPE_EFAULT_REG, %%ecx) \ |
191 |
++ : [result]"=c" (__result), \ |
192 |
++ "+A" (__old), \ |
193 |
++ [ptr] "+m" (*_ptr) \ |
194 |
++ : "b" ((u32)__new), \ |
195 |
++ "c" ((u32)((u64)__new >> 32)) \ |
196 |
++ : "memory", "cc"); \ |
197 |
++ if (unlikely(__result < 0)) \ |
198 |
++ goto label; \ |
199 |
++ if (unlikely(!__result)) \ |
200 |
++ *_old = __old; \ |
201 |
++ likely(__result); }) |
202 |
++#endif // CONFIG_X86_32 |
203 |
++#endif // CONFIG_CC_HAS_ASM_GOTO_TIED_OUTPUT |
204 |
++ |
205 |
+ /* FIXME: this hack is definitely wrong -AK */ |
206 |
+ struct __large_struct { unsigned long buf[100]; }; |
207 |
+ #define __m(x) (*(struct __large_struct __user *)(x)) |
208 |
+@@ -474,6 +571,51 @@ do { \ |
209 |
+ } while (0) |
210 |
+ #endif // CONFIG_CC_HAS_ASM_GOTO_OUTPUT |
211 |
+ |
212 |
++extern void __try_cmpxchg_user_wrong_size(void); |
213 |
++ |
214 |
++#ifndef CONFIG_X86_32 |
215 |
++#define __try_cmpxchg64_user_asm(_ptr, _oldp, _nval, _label) \ |
216 |
++ __try_cmpxchg_user_asm("q", "r", (_ptr), (_oldp), (_nval), _label) |
217 |
++#endif |
218 |
++ |
219 |
++/* |
220 |
++ * Force the pointer to u<size> to match the size expected by the asm helper. |
221 |
++ * clang/LLVM compiles all cases and only discards the unused paths after |
222 |
++ * processing errors, which breaks i386 if the pointer is an 8-byte value. |
223 |
++ */ |
224 |
++#define unsafe_try_cmpxchg_user(_ptr, _oldp, _nval, _label) ({ \ |
225 |
++ bool __ret; \ |
226 |
++ __chk_user_ptr(_ptr); \ |
227 |
++ switch (sizeof(*(_ptr))) { \ |
228 |
++ case 1: __ret = __try_cmpxchg_user_asm("b", "q", \ |
229 |
++ (__force u8 *)(_ptr), (_oldp), \ |
230 |
++ (_nval), _label); \ |
231 |
++ break; \ |
232 |
++ case 2: __ret = __try_cmpxchg_user_asm("w", "r", \ |
233 |
++ (__force u16 *)(_ptr), (_oldp), \ |
234 |
++ (_nval), _label); \ |
235 |
++ break; \ |
236 |
++ case 4: __ret = __try_cmpxchg_user_asm("l", "r", \ |
237 |
++ (__force u32 *)(_ptr), (_oldp), \ |
238 |
++ (_nval), _label); \ |
239 |
++ break; \ |
240 |
++ case 8: __ret = __try_cmpxchg64_user_asm((__force u64 *)(_ptr), (_oldp),\ |
241 |
++ (_nval), _label); \ |
242 |
++ break; \ |
243 |
++ default: __try_cmpxchg_user_wrong_size(); \ |
244 |
++ } \ |
245 |
++ __ret; }) |
246 |
++ |
247 |
++/* "Returns" 0 on success, 1 on failure, -EFAULT if the access faults. */ |
248 |
++#define __try_cmpxchg_user(_ptr, _oldp, _nval, _label) ({ \ |
249 |
++ int __ret = -EFAULT; \ |
250 |
++ __uaccess_begin_nospec(); \ |
251 |
++ __ret = !unsafe_try_cmpxchg_user(_ptr, _oldp, _nval, _label); \ |
252 |
++_label: \ |
253 |
++ __uaccess_end(); \ |
254 |
++ __ret; \ |
255 |
++ }) |
256 |
++ |
257 |
+ /* |
258 |
+ * We want the unsafe accessors to always be inlined and use |
259 |
+ * the error labels - thus the macro games. |
260 |
+diff --git a/arch/x86/kernel/cpu/sgx/encl.c b/arch/x86/kernel/cpu/sgx/encl.c |
261 |
+index 7c63a1911fae9..3c24e6124d955 100644 |
262 |
+--- a/arch/x86/kernel/cpu/sgx/encl.c |
263 |
++++ b/arch/x86/kernel/cpu/sgx/encl.c |
264 |
+@@ -12,6 +12,92 @@ |
265 |
+ #include "encls.h" |
266 |
+ #include "sgx.h" |
267 |
+ |
268 |
++#define PCMDS_PER_PAGE (PAGE_SIZE / sizeof(struct sgx_pcmd)) |
269 |
++/* |
270 |
++ * 32 PCMD entries share a PCMD page. PCMD_FIRST_MASK is used to |
271 |
++ * determine the page index associated with the first PCMD entry |
272 |
++ * within a PCMD page. |
273 |
++ */ |
274 |
++#define PCMD_FIRST_MASK GENMASK(4, 0) |
275 |
++ |
276 |
++/** |
277 |
++ * reclaimer_writing_to_pcmd() - Query if any enclave page associated with |
278 |
++ * a PCMD page is in process of being reclaimed. |
279 |
++ * @encl: Enclave to which PCMD page belongs |
280 |
++ * @start_addr: Address of enclave page using first entry within the PCMD page |
281 |
++ * |
282 |
++ * When an enclave page is reclaimed some Paging Crypto MetaData (PCMD) is |
283 |
++ * stored. The PCMD data of a reclaimed enclave page contains enough |
284 |
++ * information for the processor to verify the page at the time |
285 |
++ * it is loaded back into the Enclave Page Cache (EPC). |
286 |
++ * |
287 |
++ * The backing storage to which enclave pages are reclaimed is laid out as |
288 |
++ * follows: |
289 |
++ * Encrypted enclave pages:SECS page:PCMD pages |
290 |
++ * |
291 |
++ * Each PCMD page contains the PCMD metadata of |
292 |
++ * PAGE_SIZE/sizeof(struct sgx_pcmd) enclave pages. |
293 |
++ * |
294 |
++ * A PCMD page can only be truncated if it is (a) empty, and (b) not in the |
295 |
++ * process of getting data (and thus soon being non-empty). (b) is tested with |
296 |
++ * a check if an enclave page sharing the PCMD page is in the process of being |
297 |
++ * reclaimed. |
298 |
++ * |
299 |
++ * The reclaimer sets the SGX_ENCL_PAGE_BEING_RECLAIMED flag when it |
300 |
++ * intends to reclaim that enclave page - it means that the PCMD page |
301 |
++ * associated with that enclave page is about to get some data and thus |
302 |
++ * even if the PCMD page is empty, it should not be truncated. |
303 |
++ * |
304 |
++ * Context: Enclave mutex (&sgx_encl->lock) must be held. |
305 |
++ * Return: 1 if the reclaimer is about to write to the PCMD page |
306 |
++ * 0 if the reclaimer has no intention to write to the PCMD page |
307 |
++ */ |
308 |
++static int reclaimer_writing_to_pcmd(struct sgx_encl *encl, |
309 |
++ unsigned long start_addr) |
310 |
++{ |
311 |
++ int reclaimed = 0; |
312 |
++ int i; |
313 |
++ |
314 |
++ /* |
315 |
++ * PCMD_FIRST_MASK is based on number of PCMD entries within |
316 |
++ * PCMD page being 32. |
317 |
++ */ |
318 |
++ BUILD_BUG_ON(PCMDS_PER_PAGE != 32); |
319 |
++ |
320 |
++ for (i = 0; i < PCMDS_PER_PAGE; i++) { |
321 |
++ struct sgx_encl_page *entry; |
322 |
++ unsigned long addr; |
323 |
++ |
324 |
++ addr = start_addr + i * PAGE_SIZE; |
325 |
++ |
326 |
++ /* |
327 |
++ * Stop when reaching the SECS page - it does not |
328 |
++ * have a page_array entry and its reclaim is |
329 |
++ * started and completed with enclave mutex held so |
330 |
++ * it does not use the SGX_ENCL_PAGE_BEING_RECLAIMED |
331 |
++ * flag. |
332 |
++ */ |
333 |
++ if (addr == encl->base + encl->size) |
334 |
++ break; |
335 |
++ |
336 |
++ entry = xa_load(&encl->page_array, PFN_DOWN(addr)); |
337 |
++ if (!entry) |
338 |
++ continue; |
339 |
++ |
340 |
++ /* |
341 |
++ * VA page slot ID uses same bit as the flag so it is important |
342 |
++ * to ensure that the page is not already in backing store. |
343 |
++ */ |
344 |
++ if (entry->epc_page && |
345 |
++ (entry->desc & SGX_ENCL_PAGE_BEING_RECLAIMED)) { |
346 |
++ reclaimed = 1; |
347 |
++ break; |
348 |
++ } |
349 |
++ } |
350 |
++ |
351 |
++ return reclaimed; |
352 |
++} |
353 |
++ |
354 |
+ /* |
355 |
+ * Calculate byte offset of a PCMD struct associated with an enclave page. PCMD's |
356 |
+ * follow right after the EPC data in the backing storage. In addition to the |
357 |
+@@ -47,6 +133,7 @@ static int __sgx_encl_eldu(struct sgx_encl_page *encl_page, |
358 |
+ unsigned long va_offset = encl_page->desc & SGX_ENCL_PAGE_VA_OFFSET_MASK; |
359 |
+ struct sgx_encl *encl = encl_page->encl; |
360 |
+ pgoff_t page_index, page_pcmd_off; |
361 |
++ unsigned long pcmd_first_page; |
362 |
+ struct sgx_pageinfo pginfo; |
363 |
+ struct sgx_backing b; |
364 |
+ bool pcmd_page_empty; |
365 |
+@@ -58,6 +145,11 @@ static int __sgx_encl_eldu(struct sgx_encl_page *encl_page, |
366 |
+ else |
367 |
+ page_index = PFN_DOWN(encl->size); |
368 |
+ |
369 |
++ /* |
370 |
++ * Address of enclave page using the first entry within the PCMD page. |
371 |
++ */ |
372 |
++ pcmd_first_page = PFN_PHYS(page_index & ~PCMD_FIRST_MASK) + encl->base; |
373 |
++ |
374 |
+ page_pcmd_off = sgx_encl_get_backing_page_pcmd_offset(encl, page_index); |
375 |
+ |
376 |
+ ret = sgx_encl_get_backing(encl, page_index, &b); |
377 |
+@@ -84,6 +176,7 @@ static int __sgx_encl_eldu(struct sgx_encl_page *encl_page, |
378 |
+ } |
379 |
+ |
380 |
+ memset(pcmd_page + b.pcmd_offset, 0, sizeof(struct sgx_pcmd)); |
381 |
++ set_page_dirty(b.pcmd); |
382 |
+ |
383 |
+ /* |
384 |
+ * The area for the PCMD in the page was zeroed above. Check if the |
385 |
+@@ -94,12 +187,20 @@ static int __sgx_encl_eldu(struct sgx_encl_page *encl_page, |
386 |
+ kunmap_atomic(pcmd_page); |
387 |
+ kunmap_atomic((void *)(unsigned long)pginfo.contents); |
388 |
+ |
389 |
+- sgx_encl_put_backing(&b, false); |
390 |
++ get_page(b.pcmd); |
391 |
++ sgx_encl_put_backing(&b); |
392 |
+ |
393 |
+ sgx_encl_truncate_backing_page(encl, page_index); |
394 |
+ |
395 |
+- if (pcmd_page_empty) |
396 |
++ if (pcmd_page_empty && !reclaimer_writing_to_pcmd(encl, pcmd_first_page)) { |
397 |
+ sgx_encl_truncate_backing_page(encl, PFN_DOWN(page_pcmd_off)); |
398 |
++ pcmd_page = kmap_atomic(b.pcmd); |
399 |
++ if (memchr_inv(pcmd_page, 0, PAGE_SIZE)) |
400 |
++ pr_warn("PCMD page not empty after truncate.\n"); |
401 |
++ kunmap_atomic(pcmd_page); |
402 |
++ } |
403 |
++ |
404 |
++ put_page(b.pcmd); |
405 |
+ |
406 |
+ return ret; |
407 |
+ } |
408 |
+@@ -645,15 +746,9 @@ int sgx_encl_get_backing(struct sgx_encl *encl, unsigned long page_index, |
409 |
+ /** |
410 |
+ * sgx_encl_put_backing() - Unpin the backing storage |
411 |
+ * @backing: data for accessing backing storage for the page |
412 |
+- * @do_write: mark pages dirty |
413 |
+ */ |
414 |
+-void sgx_encl_put_backing(struct sgx_backing *backing, bool do_write) |
415 |
++void sgx_encl_put_backing(struct sgx_backing *backing) |
416 |
+ { |
417 |
+- if (do_write) { |
418 |
+- set_page_dirty(backing->pcmd); |
419 |
+- set_page_dirty(backing->contents); |
420 |
+- } |
421 |
+- |
422 |
+ put_page(backing->pcmd); |
423 |
+ put_page(backing->contents); |
424 |
+ } |
425 |
+diff --git a/arch/x86/kernel/cpu/sgx/encl.h b/arch/x86/kernel/cpu/sgx/encl.h |
426 |
+index fec43ca65065b..d44e7372151f0 100644 |
427 |
+--- a/arch/x86/kernel/cpu/sgx/encl.h |
428 |
++++ b/arch/x86/kernel/cpu/sgx/encl.h |
429 |
+@@ -107,7 +107,7 @@ void sgx_encl_release(struct kref *ref); |
430 |
+ int sgx_encl_mm_add(struct sgx_encl *encl, struct mm_struct *mm); |
431 |
+ int sgx_encl_get_backing(struct sgx_encl *encl, unsigned long page_index, |
432 |
+ struct sgx_backing *backing); |
433 |
+-void sgx_encl_put_backing(struct sgx_backing *backing, bool do_write); |
434 |
++void sgx_encl_put_backing(struct sgx_backing *backing); |
435 |
+ int sgx_encl_test_and_clear_young(struct mm_struct *mm, |
436 |
+ struct sgx_encl_page *page); |
437 |
+ |
438 |
+diff --git a/arch/x86/kernel/cpu/sgx/main.c b/arch/x86/kernel/cpu/sgx/main.c |
439 |
+index 8e4bc6453d263..ab4ec54bbdd94 100644 |
440 |
+--- a/arch/x86/kernel/cpu/sgx/main.c |
441 |
++++ b/arch/x86/kernel/cpu/sgx/main.c |
442 |
+@@ -191,6 +191,8 @@ static int __sgx_encl_ewb(struct sgx_epc_page *epc_page, void *va_slot, |
443 |
+ backing->pcmd_offset; |
444 |
+ |
445 |
+ ret = __ewb(&pginfo, sgx_get_epc_virt_addr(epc_page), va_slot); |
446 |
++ set_page_dirty(backing->pcmd); |
447 |
++ set_page_dirty(backing->contents); |
448 |
+ |
449 |
+ kunmap_atomic((void *)(unsigned long)(pginfo.metadata - |
450 |
+ backing->pcmd_offset)); |
451 |
+@@ -308,6 +310,7 @@ static void sgx_reclaimer_write(struct sgx_epc_page *epc_page, |
452 |
+ sgx_encl_ewb(epc_page, backing); |
453 |
+ encl_page->epc_page = NULL; |
454 |
+ encl->secs_child_cnt--; |
455 |
++ sgx_encl_put_backing(backing); |
456 |
+ |
457 |
+ if (!encl->secs_child_cnt && test_bit(SGX_ENCL_INITIALIZED, &encl->flags)) { |
458 |
+ ret = sgx_encl_get_backing(encl, PFN_DOWN(encl->size), |
459 |
+@@ -320,7 +323,7 @@ static void sgx_reclaimer_write(struct sgx_epc_page *epc_page, |
460 |
+ sgx_encl_free_epc_page(encl->secs.epc_page); |
461 |
+ encl->secs.epc_page = NULL; |
462 |
+ |
463 |
+- sgx_encl_put_backing(&secs_backing, true); |
464 |
++ sgx_encl_put_backing(&secs_backing); |
465 |
+ } |
466 |
+ |
467 |
+ out: |
468 |
+@@ -379,11 +382,14 @@ static void sgx_reclaim_pages(void) |
469 |
+ goto skip; |
470 |
+ |
471 |
+ page_index = PFN_DOWN(encl_page->desc - encl_page->encl->base); |
472 |
++ |
473 |
++ mutex_lock(&encl_page->encl->lock); |
474 |
+ ret = sgx_encl_get_backing(encl_page->encl, page_index, &backing[i]); |
475 |
+- if (ret) |
476 |
++ if (ret) { |
477 |
++ mutex_unlock(&encl_page->encl->lock); |
478 |
+ goto skip; |
479 |
++ } |
480 |
+ |
481 |
+- mutex_lock(&encl_page->encl->lock); |
482 |
+ encl_page->desc |= SGX_ENCL_PAGE_BEING_RECLAIMED; |
483 |
+ mutex_unlock(&encl_page->encl->lock); |
484 |
+ continue; |
485 |
+@@ -411,7 +417,6 @@ skip: |
486 |
+ |
487 |
+ encl_page = epc_page->owner; |
488 |
+ sgx_reclaimer_write(epc_page, &backing[i]); |
489 |
+- sgx_encl_put_backing(&backing[i], true); |
490 |
+ |
491 |
+ kref_put(&encl_page->encl->refcount, sgx_encl_release); |
492 |
+ epc_page->flags &= ~SGX_EPC_PAGE_RECLAIMER_TRACKED; |
493 |
+diff --git a/arch/x86/kernel/fpu/core.c b/arch/x86/kernel/fpu/core.c |
494 |
+index e28ab0ecc5378..0fdc807ae13f8 100644 |
495 |
+--- a/arch/x86/kernel/fpu/core.c |
496 |
++++ b/arch/x86/kernel/fpu/core.c |
497 |
+@@ -14,6 +14,8 @@ |
498 |
+ #include <asm/traps.h> |
499 |
+ #include <asm/irq_regs.h> |
500 |
+ |
501 |
++#include <uapi/asm/kvm.h> |
502 |
++ |
503 |
+ #include <linux/hardirq.h> |
504 |
+ #include <linux/pkeys.h> |
505 |
+ #include <linux/vmalloc.h> |
506 |
+@@ -232,7 +234,20 @@ bool fpu_alloc_guest_fpstate(struct fpu_guest *gfpu) |
507 |
+ gfpu->fpstate = fpstate; |
508 |
+ gfpu->xfeatures = fpu_user_cfg.default_features; |
509 |
+ gfpu->perm = fpu_user_cfg.default_features; |
510 |
+- gfpu->uabi_size = fpu_user_cfg.default_size; |
511 |
++ |
512 |
++ /* |
513 |
++ * KVM sets the FP+SSE bits in the XSAVE header when copying FPU state |
514 |
++ * to userspace, even when XSAVE is unsupported, so that restoring FPU |
515 |
++ * state on a different CPU that does support XSAVE can cleanly load |
516 |
++ * the incoming state using its natural XSAVE. In other words, KVM's |
517 |
++ * uABI size may be larger than this host's default size. Conversely, |
518 |
++ * the default size should never be larger than KVM's base uABI size; |
519 |
++ * all features that can expand the uABI size must be opt-in. |
520 |
++ */ |
521 |
++ gfpu->uabi_size = sizeof(struct kvm_xsave); |
522 |
++ if (WARN_ON_ONCE(fpu_user_cfg.default_size > gfpu->uabi_size)) |
523 |
++ gfpu->uabi_size = fpu_user_cfg.default_size; |
524 |
++ |
525 |
+ fpu_init_guest_permissions(gfpu); |
526 |
+ |
527 |
+ return true; |
528 |
+diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c |
529 |
+index 8b1c45c9cda87..1a55bf700f926 100644 |
530 |
+--- a/arch/x86/kernel/kvm.c |
531 |
++++ b/arch/x86/kernel/kvm.c |
532 |
+@@ -191,7 +191,7 @@ void kvm_async_pf_task_wake(u32 token) |
533 |
+ { |
534 |
+ u32 key = hash_32(token, KVM_TASK_SLEEP_HASHBITS); |
535 |
+ struct kvm_task_sleep_head *b = &async_pf_sleepers[key]; |
536 |
+- struct kvm_task_sleep_node *n; |
537 |
++ struct kvm_task_sleep_node *n, *dummy = NULL; |
538 |
+ |
539 |
+ if (token == ~0) { |
540 |
+ apf_task_wake_all(); |
541 |
+@@ -203,28 +203,41 @@ again: |
542 |
+ n = _find_apf_task(b, token); |
543 |
+ if (!n) { |
544 |
+ /* |
545 |
+- * async PF was not yet handled. |
546 |
+- * Add dummy entry for the token. |
547 |
++ * Async #PF not yet handled, add a dummy entry for the token. |
548 |
++ * Allocating the token must be down outside of the raw lock |
549 |
++ * as the allocator is preemptible on PREEMPT_RT kernels. |
550 |
+ */ |
551 |
+- n = kzalloc(sizeof(*n), GFP_ATOMIC); |
552 |
+- if (!n) { |
553 |
++ if (!dummy) { |
554 |
++ raw_spin_unlock(&b->lock); |
555 |
++ dummy = kzalloc(sizeof(*dummy), GFP_ATOMIC); |
556 |
++ |
557 |
+ /* |
558 |
+- * Allocation failed! Busy wait while other cpu |
559 |
+- * handles async PF. |
560 |
++ * Continue looping on allocation failure, eventually |
561 |
++ * the async #PF will be handled and allocating a new |
562 |
++ * node will be unnecessary. |
563 |
++ */ |
564 |
++ if (!dummy) |
565 |
++ cpu_relax(); |
566 |
++ |
567 |
++ /* |
568 |
++ * Recheck for async #PF completion before enqueueing |
569 |
++ * the dummy token to avoid duplicate list entries. |
570 |
+ */ |
571 |
+- raw_spin_unlock(&b->lock); |
572 |
+- cpu_relax(); |
573 |
+ goto again; |
574 |
+ } |
575 |
+- n->token = token; |
576 |
+- n->cpu = smp_processor_id(); |
577 |
+- init_swait_queue_head(&n->wq); |
578 |
+- hlist_add_head(&n->link, &b->list); |
579 |
++ dummy->token = token; |
580 |
++ dummy->cpu = smp_processor_id(); |
581 |
++ init_swait_queue_head(&dummy->wq); |
582 |
++ hlist_add_head(&dummy->link, &b->list); |
583 |
++ dummy = NULL; |
584 |
+ } else { |
585 |
+ apf_task_wake_one(n); |
586 |
+ } |
587 |
+ raw_spin_unlock(&b->lock); |
588 |
+- return; |
589 |
++ |
590 |
++ /* A dummy token might be allocated and ultimately not used. */ |
591 |
++ if (dummy) |
592 |
++ kfree(dummy); |
593 |
+ } |
594 |
+ EXPORT_SYMBOL_GPL(kvm_async_pf_task_wake); |
595 |
+ |
596 |
+diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c |
597 |
+index 45e1573f8f1d3..cf48ac96ceecb 100644 |
598 |
+--- a/arch/x86/kvm/mmu/mmu.c |
599 |
++++ b/arch/x86/kvm/mmu/mmu.c |
600 |
+@@ -1843,17 +1843,14 @@ static void kvm_mmu_commit_zap_page(struct kvm *kvm, |
601 |
+ &(_kvm)->arch.mmu_page_hash[kvm_page_table_hashfn(_gfn)]) \ |
602 |
+ if ((_sp)->gfn != (_gfn) || (_sp)->role.direct) {} else |
603 |
+ |
604 |
+-static bool kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, |
605 |
++static int kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, |
606 |
+ struct list_head *invalid_list) |
607 |
+ { |
608 |
+ int ret = vcpu->arch.mmu->sync_page(vcpu, sp); |
609 |
+ |
610 |
+- if (ret < 0) { |
611 |
++ if (ret < 0) |
612 |
+ kvm_mmu_prepare_zap_page(vcpu->kvm, sp, invalid_list); |
613 |
+- return false; |
614 |
+- } |
615 |
+- |
616 |
+- return !!ret; |
617 |
++ return ret; |
618 |
+ } |
619 |
+ |
620 |
+ static bool kvm_mmu_remote_flush_or_zap(struct kvm *kvm, |
621 |
+@@ -1975,7 +1972,7 @@ static int mmu_sync_children(struct kvm_vcpu *vcpu, |
622 |
+ |
623 |
+ for_each_sp(pages, sp, parents, i) { |
624 |
+ kvm_unlink_unsync_page(vcpu->kvm, sp); |
625 |
+- flush |= kvm_sync_page(vcpu, sp, &invalid_list); |
626 |
++ flush |= kvm_sync_page(vcpu, sp, &invalid_list) > 0; |
627 |
+ mmu_pages_clear_parents(&parents); |
628 |
+ } |
629 |
+ if (need_resched() || rwlock_needbreak(&vcpu->kvm->mmu_lock)) { |
630 |
+@@ -2016,6 +2013,7 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu, |
631 |
+ struct hlist_head *sp_list; |
632 |
+ unsigned quadrant; |
633 |
+ struct kvm_mmu_page *sp; |
634 |
++ int ret; |
635 |
+ int collisions = 0; |
636 |
+ LIST_HEAD(invalid_list); |
637 |
+ |
638 |
+@@ -2068,11 +2066,13 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu, |
639 |
+ * If the sync fails, the page is zapped. If so, break |
640 |
+ * in order to rebuild it. |
641 |
+ */ |
642 |
+- if (!kvm_sync_page(vcpu, sp, &invalid_list)) |
643 |
++ ret = kvm_sync_page(vcpu, sp, &invalid_list); |
644 |
++ if (ret < 0) |
645 |
+ break; |
646 |
+ |
647 |
+ WARN_ON(!list_empty(&invalid_list)); |
648 |
+- kvm_flush_remote_tlbs(vcpu->kvm); |
649 |
++ if (ret > 0) |
650 |
++ kvm_flush_remote_tlbs(vcpu->kvm); |
651 |
+ } |
652 |
+ |
653 |
+ __clear_sp_write_flooding_count(sp); |
654 |
+diff --git a/arch/x86/kvm/mmu/paging_tmpl.h b/arch/x86/kvm/mmu/paging_tmpl.h |
655 |
+index 01fee5f67ac37..beb3ce8d94eb3 100644 |
656 |
+--- a/arch/x86/kvm/mmu/paging_tmpl.h |
657 |
++++ b/arch/x86/kvm/mmu/paging_tmpl.h |
658 |
+@@ -144,42 +144,6 @@ static bool FNAME(is_rsvd_bits_set)(struct kvm_mmu *mmu, u64 gpte, int level) |
659 |
+ FNAME(is_bad_mt_xwr)(&mmu->guest_rsvd_check, gpte); |
660 |
+ } |
661 |
+ |
662 |
+-static int FNAME(cmpxchg_gpte)(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, |
663 |
+- pt_element_t __user *ptep_user, unsigned index, |
664 |
+- pt_element_t orig_pte, pt_element_t new_pte) |
665 |
+-{ |
666 |
+- signed char r; |
667 |
+- |
668 |
+- if (!user_access_begin(ptep_user, sizeof(pt_element_t))) |
669 |
+- return -EFAULT; |
670 |
+- |
671 |
+-#ifdef CMPXCHG |
672 |
+- asm volatile("1:" LOCK_PREFIX CMPXCHG " %[new], %[ptr]\n" |
673 |
+- "setnz %b[r]\n" |
674 |
+- "2:" |
675 |
+- _ASM_EXTABLE_TYPE_REG(1b, 2b, EX_TYPE_EFAULT_REG, %k[r]) |
676 |
+- : [ptr] "+m" (*ptep_user), |
677 |
+- [old] "+a" (orig_pte), |
678 |
+- [r] "=q" (r) |
679 |
+- : [new] "r" (new_pte) |
680 |
+- : "memory"); |
681 |
+-#else |
682 |
+- asm volatile("1:" LOCK_PREFIX "cmpxchg8b %[ptr]\n" |
683 |
+- "setnz %b[r]\n" |
684 |
+- "2:" |
685 |
+- _ASM_EXTABLE_TYPE_REG(1b, 2b, EX_TYPE_EFAULT_REG, %k[r]) |
686 |
+- : [ptr] "+m" (*ptep_user), |
687 |
+- [old] "+A" (orig_pte), |
688 |
+- [r] "=q" (r) |
689 |
+- : [new_lo] "b" ((u32)new_pte), |
690 |
+- [new_hi] "c" ((u32)(new_pte >> 32)) |
691 |
+- : "memory"); |
692 |
+-#endif |
693 |
+- |
694 |
+- user_access_end(); |
695 |
+- return r; |
696 |
+-} |
697 |
+- |
698 |
+ static bool FNAME(prefetch_invalid_gpte)(struct kvm_vcpu *vcpu, |
699 |
+ struct kvm_mmu_page *sp, u64 *spte, |
700 |
+ u64 gpte) |
701 |
+@@ -278,7 +242,7 @@ static int FNAME(update_accessed_dirty_bits)(struct kvm_vcpu *vcpu, |
702 |
+ if (unlikely(!walker->pte_writable[level - 1])) |
703 |
+ continue; |
704 |
+ |
705 |
+- ret = FNAME(cmpxchg_gpte)(vcpu, mmu, ptep_user, index, orig_pte, pte); |
706 |
++ ret = __try_cmpxchg_user(ptep_user, &orig_pte, pte, fault); |
707 |
+ if (ret) |
708 |
+ return ret; |
709 |
+ |
710 |
+diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c |
711 |
+index 96bab464967f2..1a9b60cb6bcb8 100644 |
712 |
+--- a/arch/x86/kvm/svm/nested.c |
713 |
++++ b/arch/x86/kvm/svm/nested.c |
714 |
+@@ -819,9 +819,6 @@ int nested_svm_vmexit(struct vcpu_svm *svm) |
715 |
+ struct kvm_host_map map; |
716 |
+ int rc; |
717 |
+ |
718 |
+- /* Triple faults in L2 should never escape. */ |
719 |
+- WARN_ON_ONCE(kvm_check_request(KVM_REQ_TRIPLE_FAULT, vcpu)); |
720 |
+- |
721 |
+ rc = kvm_vcpu_map(vcpu, gpa_to_gfn(svm->nested.vmcb12_gpa), &map); |
722 |
+ if (rc) { |
723 |
+ if (rc == -EINVAL) |
724 |
+diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c |
725 |
+index 7c392873626fd..4b7d490c0b639 100644 |
726 |
+--- a/arch/x86/kvm/svm/sev.c |
727 |
++++ b/arch/x86/kvm/svm/sev.c |
728 |
+@@ -688,7 +688,7 @@ static int sev_launch_measure(struct kvm *kvm, struct kvm_sev_cmd *argp) |
729 |
+ if (params.len > SEV_FW_BLOB_MAX_SIZE) |
730 |
+ return -EINVAL; |
731 |
+ |
732 |
+- blob = kmalloc(params.len, GFP_KERNEL_ACCOUNT); |
733 |
++ blob = kzalloc(params.len, GFP_KERNEL_ACCOUNT); |
734 |
+ if (!blob) |
735 |
+ return -ENOMEM; |
736 |
+ |
737 |
+@@ -808,7 +808,7 @@ static int __sev_dbg_decrypt_user(struct kvm *kvm, unsigned long paddr, |
738 |
+ if (!IS_ALIGNED(dst_paddr, 16) || |
739 |
+ !IS_ALIGNED(paddr, 16) || |
740 |
+ !IS_ALIGNED(size, 16)) { |
741 |
+- tpage = (void *)alloc_page(GFP_KERNEL); |
742 |
++ tpage = (void *)alloc_page(GFP_KERNEL | __GFP_ZERO); |
743 |
+ if (!tpage) |
744 |
+ return -ENOMEM; |
745 |
+ |
746 |
+@@ -1094,7 +1094,7 @@ static int sev_get_attestation_report(struct kvm *kvm, struct kvm_sev_cmd *argp) |
747 |
+ if (params.len > SEV_FW_BLOB_MAX_SIZE) |
748 |
+ return -EINVAL; |
749 |
+ |
750 |
+- blob = kmalloc(params.len, GFP_KERNEL_ACCOUNT); |
751 |
++ blob = kzalloc(params.len, GFP_KERNEL_ACCOUNT); |
752 |
+ if (!blob) |
753 |
+ return -ENOMEM; |
754 |
+ |
755 |
+@@ -1176,7 +1176,7 @@ static int sev_send_start(struct kvm *kvm, struct kvm_sev_cmd *argp) |
756 |
+ return -EINVAL; |
757 |
+ |
758 |
+ /* allocate the memory to hold the session data blob */ |
759 |
+- session_data = kmalloc(params.session_len, GFP_KERNEL_ACCOUNT); |
760 |
++ session_data = kzalloc(params.session_len, GFP_KERNEL_ACCOUNT); |
761 |
+ if (!session_data) |
762 |
+ return -ENOMEM; |
763 |
+ |
764 |
+@@ -1300,11 +1300,11 @@ static int sev_send_update_data(struct kvm *kvm, struct kvm_sev_cmd *argp) |
765 |
+ |
766 |
+ /* allocate memory for header and transport buffer */ |
767 |
+ ret = -ENOMEM; |
768 |
+- hdr = kmalloc(params.hdr_len, GFP_KERNEL_ACCOUNT); |
769 |
++ hdr = kzalloc(params.hdr_len, GFP_KERNEL_ACCOUNT); |
770 |
+ if (!hdr) |
771 |
+ goto e_unpin; |
772 |
+ |
773 |
+- trans_data = kmalloc(params.trans_len, GFP_KERNEL_ACCOUNT); |
774 |
++ trans_data = kzalloc(params.trans_len, GFP_KERNEL_ACCOUNT); |
775 |
+ if (!trans_data) |
776 |
+ goto e_free_hdr; |
777 |
+ |
778 |
+diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c |
779 |
+index 856c875638833..880d0b0c9315b 100644 |
780 |
+--- a/arch/x86/kvm/vmx/nested.c |
781 |
++++ b/arch/x86/kvm/vmx/nested.c |
782 |
+@@ -4518,9 +4518,6 @@ void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 vm_exit_reason, |
783 |
+ /* trying to cancel vmlaunch/vmresume is a bug */ |
784 |
+ WARN_ON_ONCE(vmx->nested.nested_run_pending); |
785 |
+ |
786 |
+- /* Similarly, triple faults in L2 should never escape. */ |
787 |
+- WARN_ON_ONCE(kvm_check_request(KVM_REQ_TRIPLE_FAULT, vcpu)); |
788 |
+- |
789 |
+ if (kvm_check_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu)) { |
790 |
+ /* |
791 |
+ * KVM_REQ_GET_NESTED_STATE_PAGES is also used to map |
792 |
+diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c |
793 |
+index 610355b9ccceb..982df9c000d31 100644 |
794 |
+--- a/arch/x86/kvm/vmx/vmx.c |
795 |
++++ b/arch/x86/kvm/vmx/vmx.c |
796 |
+@@ -7856,7 +7856,7 @@ static unsigned int vmx_handle_intel_pt_intr(void) |
797 |
+ struct kvm_vcpu *vcpu = kvm_get_running_vcpu(); |
798 |
+ |
799 |
+ /* '0' on failure so that the !PT case can use a RET0 static call. */ |
800 |
+- if (!kvm_arch_pmi_in_guest(vcpu)) |
801 |
++ if (!vcpu || !kvm_handling_nmi_from_guest(vcpu)) |
802 |
+ return 0; |
803 |
+ |
804 |
+ kvm_make_request(KVM_REQ_PMI, vcpu); |
805 |
+diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c |
806 |
+index 4790f0d7d40b8..39c571224ac28 100644 |
807 |
+--- a/arch/x86/kvm/x86.c |
808 |
++++ b/arch/x86/kvm/x86.c |
809 |
+@@ -7229,15 +7229,8 @@ static int emulator_write_emulated(struct x86_emulate_ctxt *ctxt, |
810 |
+ exception, &write_emultor); |
811 |
+ } |
812 |
+ |
813 |
+-#define CMPXCHG_TYPE(t, ptr, old, new) \ |
814 |
+- (cmpxchg((t *)(ptr), *(t *)(old), *(t *)(new)) == *(t *)(old)) |
815 |
+- |
816 |
+-#ifdef CONFIG_X86_64 |
817 |
+-# define CMPXCHG64(ptr, old, new) CMPXCHG_TYPE(u64, ptr, old, new) |
818 |
+-#else |
819 |
+-# define CMPXCHG64(ptr, old, new) \ |
820 |
+- (cmpxchg64((u64 *)(ptr), *(u64 *)(old), *(u64 *)(new)) == *(u64 *)(old)) |
821 |
+-#endif |
822 |
++#define emulator_try_cmpxchg_user(t, ptr, old, new) \ |
823 |
++ (__try_cmpxchg_user((t __user *)(ptr), (t *)(old), *(t *)(new), efault ## t)) |
824 |
+ |
825 |
+ static int emulator_cmpxchg_emulated(struct x86_emulate_ctxt *ctxt, |
826 |
+ unsigned long addr, |
827 |
+@@ -7246,12 +7239,11 @@ static int emulator_cmpxchg_emulated(struct x86_emulate_ctxt *ctxt, |
828 |
+ unsigned int bytes, |
829 |
+ struct x86_exception *exception) |
830 |
+ { |
831 |
+- struct kvm_host_map map; |
832 |
+ struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); |
833 |
+ u64 page_line_mask; |
834 |
++ unsigned long hva; |
835 |
+ gpa_t gpa; |
836 |
+- char *kaddr; |
837 |
+- bool exchanged; |
838 |
++ int r; |
839 |
+ |
840 |
+ /* guests cmpxchg8b have to be emulated atomically */ |
841 |
+ if (bytes > 8 || (bytes & (bytes - 1))) |
842 |
+@@ -7275,31 +7267,32 @@ static int emulator_cmpxchg_emulated(struct x86_emulate_ctxt *ctxt, |
843 |
+ if (((gpa + bytes - 1) & page_line_mask) != (gpa & page_line_mask)) |
844 |
+ goto emul_write; |
845 |
+ |
846 |
+- if (kvm_vcpu_map(vcpu, gpa_to_gfn(gpa), &map)) |
847 |
++ hva = kvm_vcpu_gfn_to_hva(vcpu, gpa_to_gfn(gpa)); |
848 |
++ if (kvm_is_error_hva(hva)) |
849 |
+ goto emul_write; |
850 |
+ |
851 |
+- kaddr = map.hva + offset_in_page(gpa); |
852 |
++ hva += offset_in_page(gpa); |
853 |
+ |
854 |
+ switch (bytes) { |
855 |
+ case 1: |
856 |
+- exchanged = CMPXCHG_TYPE(u8, kaddr, old, new); |
857 |
++ r = emulator_try_cmpxchg_user(u8, hva, old, new); |
858 |
+ break; |
859 |
+ case 2: |
860 |
+- exchanged = CMPXCHG_TYPE(u16, kaddr, old, new); |
861 |
++ r = emulator_try_cmpxchg_user(u16, hva, old, new); |
862 |
+ break; |
863 |
+ case 4: |
864 |
+- exchanged = CMPXCHG_TYPE(u32, kaddr, old, new); |
865 |
++ r = emulator_try_cmpxchg_user(u32, hva, old, new); |
866 |
+ break; |
867 |
+ case 8: |
868 |
+- exchanged = CMPXCHG64(kaddr, old, new); |
869 |
++ r = emulator_try_cmpxchg_user(u64, hva, old, new); |
870 |
+ break; |
871 |
+ default: |
872 |
+ BUG(); |
873 |
+ } |
874 |
+ |
875 |
+- kvm_vcpu_unmap(vcpu, &map, true); |
876 |
+- |
877 |
+- if (!exchanged) |
878 |
++ if (r < 0) |
879 |
++ goto emul_write; |
880 |
++ if (r) |
881 |
+ return X86EMUL_CMPXCHG_FAILED; |
882 |
+ |
883 |
+ kvm_page_track_write(vcpu, gpa, new, bytes); |
884 |
+@@ -8251,7 +8244,7 @@ int kvm_skip_emulated_instruction(struct kvm_vcpu *vcpu) |
885 |
+ } |
886 |
+ EXPORT_SYMBOL_GPL(kvm_skip_emulated_instruction); |
887 |
+ |
888 |
+-static bool kvm_vcpu_check_breakpoint(struct kvm_vcpu *vcpu, int *r) |
889 |
++static bool kvm_vcpu_check_code_breakpoint(struct kvm_vcpu *vcpu, int *r) |
890 |
+ { |
891 |
+ if (unlikely(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) && |
892 |
+ (vcpu->arch.guest_debug_dr7 & DR7_BP_EN_MASK)) { |
893 |
+@@ -8320,25 +8313,23 @@ static bool is_vmware_backdoor_opcode(struct x86_emulate_ctxt *ctxt) |
894 |
+ } |
895 |
+ |
896 |
+ /* |
897 |
+- * Decode to be emulated instruction. Return EMULATION_OK if success. |
898 |
++ * Decode an instruction for emulation. The caller is responsible for handling |
899 |
++ * code breakpoints. Note, manually detecting code breakpoints is unnecessary |
900 |
++ * (and wrong) when emulating on an intercepted fault-like exception[*], as |
901 |
++ * code breakpoints have higher priority and thus have already been done by |
902 |
++ * hardware. |
903 |
++ * |
904 |
++ * [*] Except #MC, which is higher priority, but KVM should never emulate in |
905 |
++ * response to a machine check. |
906 |
+ */ |
907 |
+ int x86_decode_emulated_instruction(struct kvm_vcpu *vcpu, int emulation_type, |
908 |
+ void *insn, int insn_len) |
909 |
+ { |
910 |
+- int r = EMULATION_OK; |
911 |
+ struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt; |
912 |
++ int r; |
913 |
+ |
914 |
+ init_emulate_ctxt(vcpu); |
915 |
+ |
916 |
+- /* |
917 |
+- * We will reenter on the same instruction since we do not set |
918 |
+- * complete_userspace_io. This does not handle watchpoints yet, |
919 |
+- * those would be handled in the emulate_ops. |
920 |
+- */ |
921 |
+- if (!(emulation_type & EMULTYPE_SKIP) && |
922 |
+- kvm_vcpu_check_breakpoint(vcpu, &r)) |
923 |
+- return r; |
924 |
+- |
925 |
+ r = x86_decode_insn(ctxt, insn, insn_len, emulation_type); |
926 |
+ |
927 |
+ trace_kvm_emulate_insn_start(vcpu); |
928 |
+@@ -8371,6 +8362,15 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, |
929 |
+ if (!(emulation_type & EMULTYPE_NO_DECODE)) { |
930 |
+ kvm_clear_exception_queue(vcpu); |
931 |
+ |
932 |
++ /* |
933 |
++ * Return immediately if RIP hits a code breakpoint, such #DBs |
934 |
++ * are fault-like and are higher priority than any faults on |
935 |
++ * the code fetch itself. |
936 |
++ */ |
937 |
++ if (!(emulation_type & EMULTYPE_SKIP) && |
938 |
++ kvm_vcpu_check_code_breakpoint(vcpu, &r)) |
939 |
++ return r; |
940 |
++ |
941 |
+ r = x86_decode_emulated_instruction(vcpu, emulation_type, |
942 |
+ insn, insn_len); |
943 |
+ if (r != EMULATION_OK) { |
944 |
+@@ -11747,20 +11747,15 @@ static void kvm_unload_vcpu_mmu(struct kvm_vcpu *vcpu) |
945 |
+ vcpu_put(vcpu); |
946 |
+ } |
947 |
+ |
948 |
+-static void kvm_free_vcpus(struct kvm *kvm) |
949 |
++static void kvm_unload_vcpu_mmus(struct kvm *kvm) |
950 |
+ { |
951 |
+ unsigned long i; |
952 |
+ struct kvm_vcpu *vcpu; |
953 |
+ |
954 |
+- /* |
955 |
+- * Unpin any mmu pages first. |
956 |
+- */ |
957 |
+ kvm_for_each_vcpu(i, vcpu, kvm) { |
958 |
+ kvm_clear_async_pf_completion_queue(vcpu); |
959 |
+ kvm_unload_vcpu_mmu(vcpu); |
960 |
+ } |
961 |
+- |
962 |
+- kvm_destroy_vcpus(kvm); |
963 |
+ } |
964 |
+ |
965 |
+ void kvm_arch_sync_events(struct kvm *kvm) |
966 |
+@@ -11866,11 +11861,12 @@ void kvm_arch_destroy_vm(struct kvm *kvm) |
967 |
+ __x86_set_memory_region(kvm, TSS_PRIVATE_MEMSLOT, 0, 0); |
968 |
+ mutex_unlock(&kvm->slots_lock); |
969 |
+ } |
970 |
++ kvm_unload_vcpu_mmus(kvm); |
971 |
+ static_call_cond(kvm_x86_vm_destroy)(kvm); |
972 |
+ kvm_free_msr_filter(srcu_dereference_check(kvm->arch.msr_filter, &kvm->srcu, 1)); |
973 |
+ kvm_pic_destroy(kvm); |
974 |
+ kvm_ioapic_destroy(kvm); |
975 |
+- kvm_free_vcpus(kvm); |
976 |
++ kvm_destroy_vcpus(kvm); |
977 |
+ kvfree(rcu_dereference_check(kvm->arch.apic_map, 1)); |
978 |
+ kfree(srcu_dereference_check(kvm->arch.pmu_event_filter, &kvm->srcu, 1)); |
979 |
+ kvm_mmu_uninit_vm(kvm); |
980 |
+diff --git a/crypto/ecrdsa.c b/crypto/ecrdsa.c |
981 |
+index b32ffcaad9adf..f3c6b5e15e75b 100644 |
982 |
+--- a/crypto/ecrdsa.c |
983 |
++++ b/crypto/ecrdsa.c |
984 |
+@@ -113,15 +113,15 @@ static int ecrdsa_verify(struct akcipher_request *req) |
985 |
+ |
986 |
+ /* Step 1: verify that 0 < r < q, 0 < s < q */ |
987 |
+ if (vli_is_zero(r, ndigits) || |
988 |
+- vli_cmp(r, ctx->curve->n, ndigits) == 1 || |
989 |
++ vli_cmp(r, ctx->curve->n, ndigits) >= 0 || |
990 |
+ vli_is_zero(s, ndigits) || |
991 |
+- vli_cmp(s, ctx->curve->n, ndigits) == 1) |
992 |
++ vli_cmp(s, ctx->curve->n, ndigits) >= 0) |
993 |
+ return -EKEYREJECTED; |
994 |
+ |
995 |
+ /* Step 2: calculate hash (h) of the message (passed as input) */ |
996 |
+ /* Step 3: calculate e = h \mod q */ |
997 |
+ vli_from_le64(e, digest, ndigits); |
998 |
+- if (vli_cmp(e, ctx->curve->n, ndigits) == 1) |
999 |
++ if (vli_cmp(e, ctx->curve->n, ndigits) >= 0) |
1000 |
+ vli_sub(e, e, ctx->curve->n, ndigits); |
1001 |
+ if (vli_is_zero(e, ndigits)) |
1002 |
+ e[0] = 1; |
1003 |
+@@ -137,7 +137,7 @@ static int ecrdsa_verify(struct akcipher_request *req) |
1004 |
+ /* Step 6: calculate point C = z_1P + z_2Q, and R = x_c \mod q */ |
1005 |
+ ecc_point_mult_shamir(&cc, z1, &ctx->curve->g, z2, &ctx->pub_key, |
1006 |
+ ctx->curve); |
1007 |
+- if (vli_cmp(cc.x, ctx->curve->n, ndigits) == 1) |
1008 |
++ if (vli_cmp(cc.x, ctx->curve->n, ndigits) >= 0) |
1009 |
+ vli_sub(cc.x, cc.x, ctx->curve->n, ndigits); |
1010 |
+ |
1011 |
+ /* Step 7: if R == r signature is valid */ |
1012 |
+diff --git a/drivers/bluetooth/hci_qca.c b/drivers/bluetooth/hci_qca.c |
1013 |
+index f6e91fb432a3b..eab34e24d9446 100644 |
1014 |
+--- a/drivers/bluetooth/hci_qca.c |
1015 |
++++ b/drivers/bluetooth/hci_qca.c |
1016 |
+@@ -696,9 +696,9 @@ static int qca_close(struct hci_uart *hu) |
1017 |
+ skb_queue_purge(&qca->tx_wait_q); |
1018 |
+ skb_queue_purge(&qca->txq); |
1019 |
+ skb_queue_purge(&qca->rx_memdump_q); |
1020 |
+- del_timer(&qca->tx_idle_timer); |
1021 |
+- del_timer(&qca->wake_retrans_timer); |
1022 |
+ destroy_workqueue(qca->workqueue); |
1023 |
++ del_timer_sync(&qca->tx_idle_timer); |
1024 |
++ del_timer_sync(&qca->wake_retrans_timer); |
1025 |
+ qca->hu = NULL; |
1026 |
+ |
1027 |
+ kfree_skb(qca->rx_skb); |
1028 |
+diff --git a/drivers/char/tpm/tpm2-cmd.c b/drivers/char/tpm/tpm2-cmd.c |
1029 |
+index 4704fa553098b..04a3e23a4afc7 100644 |
1030 |
+--- a/drivers/char/tpm/tpm2-cmd.c |
1031 |
++++ b/drivers/char/tpm/tpm2-cmd.c |
1032 |
+@@ -400,7 +400,16 @@ ssize_t tpm2_get_tpm_pt(struct tpm_chip *chip, u32 property_id, u32 *value, |
1033 |
+ if (!rc) { |
1034 |
+ out = (struct tpm2_get_cap_out *) |
1035 |
+ &buf.data[TPM_HEADER_SIZE]; |
1036 |
+- *value = be32_to_cpu(out->value); |
1037 |
++ /* |
1038 |
++ * To prevent failing boot up of some systems, Infineon TPM2.0 |
1039 |
++ * returns SUCCESS on TPM2_Startup in field upgrade mode. Also |
1040 |
++ * the TPM2_Getcapability command returns a zero length list |
1041 |
++ * in field upgrade mode. |
1042 |
++ */ |
1043 |
++ if (be32_to_cpu(out->property_cnt) > 0) |
1044 |
++ *value = be32_to_cpu(out->value); |
1045 |
++ else |
1046 |
++ rc = -ENODATA; |
1047 |
+ } |
1048 |
+ tpm_buf_destroy(&buf); |
1049 |
+ return rc; |
1050 |
+diff --git a/drivers/char/tpm/tpm_ibmvtpm.c b/drivers/char/tpm/tpm_ibmvtpm.c |
1051 |
+index 3af4c07a9342f..d3989b257f422 100644 |
1052 |
+--- a/drivers/char/tpm/tpm_ibmvtpm.c |
1053 |
++++ b/drivers/char/tpm/tpm_ibmvtpm.c |
1054 |
+@@ -681,6 +681,7 @@ static int tpm_ibmvtpm_probe(struct vio_dev *vio_dev, |
1055 |
+ if (!wait_event_timeout(ibmvtpm->crq_queue.wq, |
1056 |
+ ibmvtpm->rtce_buf != NULL, |
1057 |
+ HZ)) { |
1058 |
++ rc = -ENODEV; |
1059 |
+ dev_err(dev, "CRQ response timed out\n"); |
1060 |
+ goto init_irq_cleanup; |
1061 |
+ } |
1062 |
+diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c |
1063 |
+index ca0361b2dbb07..f87aa2169e5f5 100644 |
1064 |
+--- a/drivers/crypto/caam/ctrl.c |
1065 |
++++ b/drivers/crypto/caam/ctrl.c |
1066 |
+@@ -609,6 +609,13 @@ static bool check_version(struct fsl_mc_version *mc_version, u32 major, |
1067 |
+ } |
1068 |
+ #endif |
1069 |
+ |
1070 |
++static bool needs_entropy_delay_adjustment(void) |
1071 |
++{ |
1072 |
++ if (of_machine_is_compatible("fsl,imx6sx")) |
1073 |
++ return true; |
1074 |
++ return false; |
1075 |
++} |
1076 |
++ |
1077 |
+ /* Probe routine for CAAM top (controller) level */ |
1078 |
+ static int caam_probe(struct platform_device *pdev) |
1079 |
+ { |
1080 |
+@@ -855,6 +862,8 @@ static int caam_probe(struct platform_device *pdev) |
1081 |
+ * Also, if a handle was instantiated, do not change |
1082 |
+ * the TRNG parameters. |
1083 |
+ */ |
1084 |
++ if (needs_entropy_delay_adjustment()) |
1085 |
++ ent_delay = 12000; |
1086 |
+ if (!(ctrlpriv->rng4_sh_init || inst_handles)) { |
1087 |
+ dev_info(dev, |
1088 |
+ "Entropy delay = %u\n", |
1089 |
+@@ -871,6 +880,15 @@ static int caam_probe(struct platform_device *pdev) |
1090 |
+ */ |
1091 |
+ ret = instantiate_rng(dev, inst_handles, |
1092 |
+ gen_sk); |
1093 |
++ /* |
1094 |
++ * Entropy delay is determined via TRNG characterization. |
1095 |
++ * TRNG characterization is run across different voltages |
1096 |
++ * and temperatures. |
1097 |
++ * If worst case value for ent_dly is identified, |
1098 |
++ * the loop can be skipped for that platform. |
1099 |
++ */ |
1100 |
++ if (needs_entropy_delay_adjustment()) |
1101 |
++ break; |
1102 |
+ if (ret == -EAGAIN) |
1103 |
+ /* |
1104 |
+ * if here, the loop will rerun, |
1105 |
+diff --git a/drivers/crypto/qat/qat_common/adf_accel_devices.h b/drivers/crypto/qat/qat_common/adf_accel_devices.h |
1106 |
+index a03c6cf723312..dfa7ee41c5e9c 100644 |
1107 |
+--- a/drivers/crypto/qat/qat_common/adf_accel_devices.h |
1108 |
++++ b/drivers/crypto/qat/qat_common/adf_accel_devices.h |
1109 |
+@@ -152,9 +152,9 @@ struct adf_pfvf_ops { |
1110 |
+ int (*enable_comms)(struct adf_accel_dev *accel_dev); |
1111 |
+ u32 (*get_pf2vf_offset)(u32 i); |
1112 |
+ u32 (*get_vf2pf_offset)(u32 i); |
1113 |
+- u32 (*get_vf2pf_sources)(void __iomem *pmisc_addr); |
1114 |
+ void (*enable_vf2pf_interrupts)(void __iomem *pmisc_addr, u32 vf_mask); |
1115 |
+ void (*disable_vf2pf_interrupts)(void __iomem *pmisc_addr, u32 vf_mask); |
1116 |
++ u32 (*disable_pending_vf2pf_interrupts)(void __iomem *pmisc_addr); |
1117 |
+ int (*send_msg)(struct adf_accel_dev *accel_dev, struct pfvf_message msg, |
1118 |
+ u32 pfvf_offset, struct mutex *csr_lock); |
1119 |
+ struct pfvf_message (*recv_msg)(struct adf_accel_dev *accel_dev, |
1120 |
+diff --git a/drivers/crypto/qat/qat_common/adf_gen2_pfvf.c b/drivers/crypto/qat/qat_common/adf_gen2_pfvf.c |
1121 |
+index 1a9072aac2ca9..def4cc8e1039a 100644 |
1122 |
+--- a/drivers/crypto/qat/qat_common/adf_gen2_pfvf.c |
1123 |
++++ b/drivers/crypto/qat/qat_common/adf_gen2_pfvf.c |
1124 |
+@@ -13,6 +13,7 @@ |
1125 |
+ #include "adf_pfvf_utils.h" |
1126 |
+ |
1127 |
+ /* VF2PF interrupts */ |
1128 |
++#define ADF_GEN2_VF_MSK 0xFFFF |
1129 |
+ #define ADF_GEN2_ERR_REG_VF2PF(vf_src) (((vf_src) & 0x01FFFE00) >> 9) |
1130 |
+ #define ADF_GEN2_ERR_MSK_VF2PF(vf_mask) (((vf_mask) & 0xFFFF) << 9) |
1131 |
+ |
1132 |
+@@ -50,23 +51,6 @@ static u32 adf_gen2_vf_get_pfvf_offset(u32 i) |
1133 |
+ return ADF_GEN2_VF_PF2VF_OFFSET; |
1134 |
+ } |
1135 |
+ |
1136 |
+-static u32 adf_gen2_get_vf2pf_sources(void __iomem *pmisc_addr) |
1137 |
+-{ |
1138 |
+- u32 errsou3, errmsk3, vf_int_mask; |
1139 |
+- |
1140 |
+- /* Get the interrupt sources triggered by VFs */ |
1141 |
+- errsou3 = ADF_CSR_RD(pmisc_addr, ADF_GEN2_ERRSOU3); |
1142 |
+- vf_int_mask = ADF_GEN2_ERR_REG_VF2PF(errsou3); |
1143 |
+- |
1144 |
+- /* To avoid adding duplicate entries to work queue, clear |
1145 |
+- * vf_int_mask_sets bits that are already masked in ERRMSK register. |
1146 |
+- */ |
1147 |
+- errmsk3 = ADF_CSR_RD(pmisc_addr, ADF_GEN2_ERRMSK3); |
1148 |
+- vf_int_mask &= ~ADF_GEN2_ERR_REG_VF2PF(errmsk3); |
1149 |
+- |
1150 |
+- return vf_int_mask; |
1151 |
+-} |
1152 |
+- |
1153 |
+ static void adf_gen2_enable_vf2pf_interrupts(void __iomem *pmisc_addr, |
1154 |
+ u32 vf_mask) |
1155 |
+ { |
1156 |
+@@ -89,6 +73,44 @@ static void adf_gen2_disable_vf2pf_interrupts(void __iomem *pmisc_addr, |
1157 |
+ } |
1158 |
+ } |
1159 |
+ |
1160 |
++static u32 adf_gen2_disable_pending_vf2pf_interrupts(void __iomem *pmisc_addr) |
1161 |
++{ |
1162 |
++ u32 sources, disabled, pending; |
1163 |
++ u32 errsou3, errmsk3; |
1164 |
++ |
1165 |
++ /* Get the interrupt sources triggered by VFs */ |
1166 |
++ errsou3 = ADF_CSR_RD(pmisc_addr, ADF_GEN2_ERRSOU3); |
1167 |
++ sources = ADF_GEN2_ERR_REG_VF2PF(errsou3); |
1168 |
++ |
1169 |
++ if (!sources) |
1170 |
++ return 0; |
1171 |
++ |
1172 |
++ /* Get the already disabled interrupts */ |
1173 |
++ errmsk3 = ADF_CSR_RD(pmisc_addr, ADF_GEN2_ERRMSK3); |
1174 |
++ disabled = ADF_GEN2_ERR_REG_VF2PF(errmsk3); |
1175 |
++ |
1176 |
++ pending = sources & ~disabled; |
1177 |
++ if (!pending) |
1178 |
++ return 0; |
1179 |
++ |
1180 |
++ /* Due to HW limitations, when disabling the interrupts, we can't |
1181 |
++ * just disable the requested sources, as this would lead to missed |
1182 |
++ * interrupts if ERRSOU3 changes just before writing to ERRMSK3. |
1183 |
++ * To work around it, disable all and re-enable only the sources that |
1184 |
++ * are not in vf_mask and were not already disabled. Re-enabling will |
1185 |
++ * trigger a new interrupt for the sources that have changed in the |
1186 |
++ * meantime, if any. |
1187 |
++ */ |
1188 |
++ errmsk3 |= ADF_GEN2_ERR_MSK_VF2PF(ADF_GEN2_VF_MSK); |
1189 |
++ ADF_CSR_WR(pmisc_addr, ADF_GEN2_ERRMSK3, errmsk3); |
1190 |
++ |
1191 |
++ errmsk3 &= ADF_GEN2_ERR_MSK_VF2PF(sources | disabled); |
1192 |
++ ADF_CSR_WR(pmisc_addr, ADF_GEN2_ERRMSK3, errmsk3); |
1193 |
++ |
1194 |
++ /* Return the sources of the (new) interrupt(s) */ |
1195 |
++ return pending; |
1196 |
++} |
1197 |
++ |
1198 |
+ static u32 gen2_csr_get_int_bit(enum gen2_csr_pos offset) |
1199 |
+ { |
1200 |
+ return ADF_PFVF_INT << offset; |
1201 |
+@@ -362,9 +384,9 @@ void adf_gen2_init_pf_pfvf_ops(struct adf_pfvf_ops *pfvf_ops) |
1202 |
+ pfvf_ops->enable_comms = adf_enable_pf2vf_comms; |
1203 |
+ pfvf_ops->get_pf2vf_offset = adf_gen2_pf_get_pfvf_offset; |
1204 |
+ pfvf_ops->get_vf2pf_offset = adf_gen2_pf_get_pfvf_offset; |
1205 |
+- pfvf_ops->get_vf2pf_sources = adf_gen2_get_vf2pf_sources; |
1206 |
+ pfvf_ops->enable_vf2pf_interrupts = adf_gen2_enable_vf2pf_interrupts; |
1207 |
+ pfvf_ops->disable_vf2pf_interrupts = adf_gen2_disable_vf2pf_interrupts; |
1208 |
++ pfvf_ops->disable_pending_vf2pf_interrupts = adf_gen2_disable_pending_vf2pf_interrupts; |
1209 |
+ pfvf_ops->send_msg = adf_gen2_pf2vf_send; |
1210 |
+ pfvf_ops->recv_msg = adf_gen2_vf2pf_recv; |
1211 |
+ } |
1212 |
+diff --git a/drivers/crypto/qat/qat_common/adf_gen4_pfvf.c b/drivers/crypto/qat/qat_common/adf_gen4_pfvf.c |
1213 |
+index d80d493a77568..40fdab857f959 100644 |
1214 |
+--- a/drivers/crypto/qat/qat_common/adf_gen4_pfvf.c |
1215 |
++++ b/drivers/crypto/qat/qat_common/adf_gen4_pfvf.c |
1216 |
+@@ -15,6 +15,7 @@ |
1217 |
+ /* VF2PF interrupt source registers */ |
1218 |
+ #define ADF_4XXX_VM2PF_SOU 0x41A180 |
1219 |
+ #define ADF_4XXX_VM2PF_MSK 0x41A1C0 |
1220 |
++#define ADF_GEN4_VF_MSK 0xFFFF |
1221 |
+ |
1222 |
+ #define ADF_PFVF_GEN4_MSGTYPE_SHIFT 2 |
1223 |
+ #define ADF_PFVF_GEN4_MSGTYPE_MASK 0x3F |
1224 |
+@@ -36,16 +37,6 @@ static u32 adf_gen4_pf_get_vf2pf_offset(u32 i) |
1225 |
+ return ADF_4XXX_VM2PF_OFFSET(i); |
1226 |
+ } |
1227 |
+ |
1228 |
+-static u32 adf_gen4_get_vf2pf_sources(void __iomem *pmisc_addr) |
1229 |
+-{ |
1230 |
+- u32 sou, mask; |
1231 |
+- |
1232 |
+- sou = ADF_CSR_RD(pmisc_addr, ADF_4XXX_VM2PF_SOU); |
1233 |
+- mask = ADF_CSR_RD(pmisc_addr, ADF_4XXX_VM2PF_MSK); |
1234 |
+- |
1235 |
+- return sou & ~mask; |
1236 |
+-} |
1237 |
+- |
1238 |
+ static void adf_gen4_enable_vf2pf_interrupts(void __iomem *pmisc_addr, |
1239 |
+ u32 vf_mask) |
1240 |
+ { |
1241 |
+@@ -64,6 +55,37 @@ static void adf_gen4_disable_vf2pf_interrupts(void __iomem *pmisc_addr, |
1242 |
+ ADF_CSR_WR(pmisc_addr, ADF_4XXX_VM2PF_MSK, val); |
1243 |
+ } |
1244 |
+ |
1245 |
++static u32 adf_gen4_disable_pending_vf2pf_interrupts(void __iomem *pmisc_addr) |
1246 |
++{ |
1247 |
++ u32 sources, disabled, pending; |
1248 |
++ |
1249 |
++ /* Get the interrupt sources triggered by VFs */ |
1250 |
++ sources = ADF_CSR_RD(pmisc_addr, ADF_4XXX_VM2PF_SOU); |
1251 |
++ if (!sources) |
1252 |
++ return 0; |
1253 |
++ |
1254 |
++ /* Get the already disabled interrupts */ |
1255 |
++ disabled = ADF_CSR_RD(pmisc_addr, ADF_4XXX_VM2PF_MSK); |
1256 |
++ |
1257 |
++ pending = sources & ~disabled; |
1258 |
++ if (!pending) |
1259 |
++ return 0; |
1260 |
++ |
1261 |
++ /* Due to HW limitations, when disabling the interrupts, we can't |
1262 |
++ * just disable the requested sources, as this would lead to missed |
1263 |
++ * interrupts if VM2PF_SOU changes just before writing to VM2PF_MSK. |
1264 |
++ * To work around it, disable all and re-enable only the sources that |
1265 |
++ * are not in vf_mask and were not already disabled. Re-enabling will |
1266 |
++ * trigger a new interrupt for the sources that have changed in the |
1267 |
++ * meantime, if any. |
1268 |
++ */ |
1269 |
++ ADF_CSR_WR(pmisc_addr, ADF_4XXX_VM2PF_MSK, ADF_GEN4_VF_MSK); |
1270 |
++ ADF_CSR_WR(pmisc_addr, ADF_4XXX_VM2PF_MSK, disabled | sources); |
1271 |
++ |
1272 |
++ /* Return the sources of the (new) interrupt(s) */ |
1273 |
++ return pending; |
1274 |
++} |
1275 |
++ |
1276 |
+ static int adf_gen4_pfvf_send(struct adf_accel_dev *accel_dev, |
1277 |
+ struct pfvf_message msg, u32 pfvf_offset, |
1278 |
+ struct mutex *csr_lock) |
1279 |
+@@ -115,9 +137,9 @@ void adf_gen4_init_pf_pfvf_ops(struct adf_pfvf_ops *pfvf_ops) |
1280 |
+ pfvf_ops->enable_comms = adf_enable_pf2vf_comms; |
1281 |
+ pfvf_ops->get_pf2vf_offset = adf_gen4_pf_get_pf2vf_offset; |
1282 |
+ pfvf_ops->get_vf2pf_offset = adf_gen4_pf_get_vf2pf_offset; |
1283 |
+- pfvf_ops->get_vf2pf_sources = adf_gen4_get_vf2pf_sources; |
1284 |
+ pfvf_ops->enable_vf2pf_interrupts = adf_gen4_enable_vf2pf_interrupts; |
1285 |
+ pfvf_ops->disable_vf2pf_interrupts = adf_gen4_disable_vf2pf_interrupts; |
1286 |
++ pfvf_ops->disable_pending_vf2pf_interrupts = adf_gen4_disable_pending_vf2pf_interrupts; |
1287 |
+ pfvf_ops->send_msg = adf_gen4_pfvf_send; |
1288 |
+ pfvf_ops->recv_msg = adf_gen4_pfvf_recv; |
1289 |
+ } |
1290 |
+diff --git a/drivers/crypto/qat/qat_common/adf_isr.c b/drivers/crypto/qat/qat_common/adf_isr.c |
1291 |
+index a35149f8bf1ee..23f7fff32c642 100644 |
1292 |
+--- a/drivers/crypto/qat/qat_common/adf_isr.c |
1293 |
++++ b/drivers/crypto/qat/qat_common/adf_isr.c |
1294 |
+@@ -76,32 +76,29 @@ void adf_disable_vf2pf_interrupts(struct adf_accel_dev *accel_dev, u32 vf_mask) |
1295 |
+ spin_unlock_irqrestore(&accel_dev->pf.vf2pf_ints_lock, flags); |
1296 |
+ } |
1297 |
+ |
1298 |
+-static void adf_disable_vf2pf_interrupts_irq(struct adf_accel_dev *accel_dev, |
1299 |
+- u32 vf_mask) |
1300 |
++static u32 adf_disable_pending_vf2pf_interrupts(struct adf_accel_dev *accel_dev) |
1301 |
+ { |
1302 |
+ void __iomem *pmisc_addr = adf_get_pmisc_base(accel_dev); |
1303 |
++ u32 pending; |
1304 |
+ |
1305 |
+ spin_lock(&accel_dev->pf.vf2pf_ints_lock); |
1306 |
+- GET_PFVF_OPS(accel_dev)->disable_vf2pf_interrupts(pmisc_addr, vf_mask); |
1307 |
++ pending = GET_PFVF_OPS(accel_dev)->disable_pending_vf2pf_interrupts(pmisc_addr); |
1308 |
+ spin_unlock(&accel_dev->pf.vf2pf_ints_lock); |
1309 |
++ |
1310 |
++ return pending; |
1311 |
+ } |
1312 |
+ |
1313 |
+ static bool adf_handle_vf2pf_int(struct adf_accel_dev *accel_dev) |
1314 |
+ { |
1315 |
+- void __iomem *pmisc_addr = adf_get_pmisc_base(accel_dev); |
1316 |
+ bool irq_handled = false; |
1317 |
+ unsigned long vf_mask; |
1318 |
+ |
1319 |
+- /* Get the interrupt sources triggered by VFs */ |
1320 |
+- vf_mask = GET_PFVF_OPS(accel_dev)->get_vf2pf_sources(pmisc_addr); |
1321 |
+- |
1322 |
++ /* Get the interrupt sources triggered by VFs, except for those already disabled */ |
1323 |
++ vf_mask = adf_disable_pending_vf2pf_interrupts(accel_dev); |
1324 |
+ if (vf_mask) { |
1325 |
+ struct adf_accel_vf_info *vf_info; |
1326 |
+ int i; |
1327 |
+ |
1328 |
+- /* Disable VF2PF interrupts for VFs with pending ints */ |
1329 |
+- adf_disable_vf2pf_interrupts_irq(accel_dev, vf_mask); |
1330 |
+- |
1331 |
+ /* |
1332 |
+ * Handle VF2PF interrupt unless the VF is malicious and |
1333 |
+ * is attempting to flood the host OS with VF2PF interrupts. |
1334 |
+diff --git a/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c b/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c |
1335 |
+index 09599fe4d2f3f..1e7bed8b011fe 100644 |
1336 |
+--- a/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c |
1337 |
++++ b/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c |
1338 |
+@@ -7,6 +7,8 @@ |
1339 |
+ #include "adf_dh895xcc_hw_data.h" |
1340 |
+ #include "icp_qat_hw.h" |
1341 |
+ |
1342 |
++#define ADF_DH895XCC_VF_MSK 0xFFFFFFFF |
1343 |
++ |
1344 |
+ /* Worker thread to service arbiter mappings */ |
1345 |
+ static const u32 thrd_to_arb_map[ADF_DH895XCC_MAX_ACCELENGINES] = { |
1346 |
+ 0x12222AAA, 0x11666666, 0x12222AAA, 0x11666666, |
1347 |
+@@ -114,29 +116,6 @@ static void adf_enable_ints(struct adf_accel_dev *accel_dev) |
1348 |
+ ADF_DH895XCC_SMIA1_MASK); |
1349 |
+ } |
1350 |
+ |
1351 |
+-static u32 get_vf2pf_sources(void __iomem *pmisc_bar) |
1352 |
+-{ |
1353 |
+- u32 errsou3, errmsk3, errsou5, errmsk5, vf_int_mask; |
1354 |
+- |
1355 |
+- /* Get the interrupt sources triggered by VFs */ |
1356 |
+- errsou3 = ADF_CSR_RD(pmisc_bar, ADF_GEN2_ERRSOU3); |
1357 |
+- vf_int_mask = ADF_DH895XCC_ERR_REG_VF2PF_L(errsou3); |
1358 |
+- |
1359 |
+- /* To avoid adding duplicate entries to work queue, clear |
1360 |
+- * vf_int_mask_sets bits that are already masked in ERRMSK register. |
1361 |
+- */ |
1362 |
+- errmsk3 = ADF_CSR_RD(pmisc_bar, ADF_GEN2_ERRMSK3); |
1363 |
+- vf_int_mask &= ~ADF_DH895XCC_ERR_REG_VF2PF_L(errmsk3); |
1364 |
+- |
1365 |
+- /* Do the same for ERRSOU5 */ |
1366 |
+- errsou5 = ADF_CSR_RD(pmisc_bar, ADF_GEN2_ERRSOU5); |
1367 |
+- errmsk5 = ADF_CSR_RD(pmisc_bar, ADF_GEN2_ERRMSK5); |
1368 |
+- vf_int_mask |= ADF_DH895XCC_ERR_REG_VF2PF_U(errsou5); |
1369 |
+- vf_int_mask &= ~ADF_DH895XCC_ERR_REG_VF2PF_U(errmsk5); |
1370 |
+- |
1371 |
+- return vf_int_mask; |
1372 |
+-} |
1373 |
+- |
1374 |
+ static void enable_vf2pf_interrupts(void __iomem *pmisc_addr, u32 vf_mask) |
1375 |
+ { |
1376 |
+ /* Enable VF2PF Messaging Ints - VFs 0 through 15 per vf_mask[15:0] */ |
1377 |
+@@ -150,7 +129,6 @@ static void enable_vf2pf_interrupts(void __iomem *pmisc_addr, u32 vf_mask) |
1378 |
+ if (vf_mask >> 16) { |
1379 |
+ u32 val = ADF_CSR_RD(pmisc_addr, ADF_GEN2_ERRMSK5) |
1380 |
+ & ~ADF_DH895XCC_ERR_MSK_VF2PF_U(vf_mask); |
1381 |
+- |
1382 |
+ ADF_CSR_WR(pmisc_addr, ADF_GEN2_ERRMSK5, val); |
1383 |
+ } |
1384 |
+ } |
1385 |
+@@ -173,6 +151,54 @@ static void disable_vf2pf_interrupts(void __iomem *pmisc_addr, u32 vf_mask) |
1386 |
+ } |
1387 |
+ } |
1388 |
+ |
1389 |
++static u32 disable_pending_vf2pf_interrupts(void __iomem *pmisc_addr) |
1390 |
++{ |
1391 |
++ u32 sources, pending, disabled; |
1392 |
++ u32 errsou3, errmsk3; |
1393 |
++ u32 errsou5, errmsk5; |
1394 |
++ |
1395 |
++ /* Get the interrupt sources triggered by VFs */ |
1396 |
++ errsou3 = ADF_CSR_RD(pmisc_addr, ADF_GEN2_ERRSOU3); |
1397 |
++ errsou5 = ADF_CSR_RD(pmisc_addr, ADF_GEN2_ERRSOU5); |
1398 |
++ sources = ADF_DH895XCC_ERR_REG_VF2PF_L(errsou3) |
1399 |
++ | ADF_DH895XCC_ERR_REG_VF2PF_U(errsou5); |
1400 |
++ |
1401 |
++ if (!sources) |
1402 |
++ return 0; |
1403 |
++ |
1404 |
++ /* Get the already disabled interrupts */ |
1405 |
++ errmsk3 = ADF_CSR_RD(pmisc_addr, ADF_GEN2_ERRMSK3); |
1406 |
++ errmsk5 = ADF_CSR_RD(pmisc_addr, ADF_GEN2_ERRMSK5); |
1407 |
++ disabled = ADF_DH895XCC_ERR_REG_VF2PF_L(errmsk3) |
1408 |
++ | ADF_DH895XCC_ERR_REG_VF2PF_U(errmsk5); |
1409 |
++ |
1410 |
++ pending = sources & ~disabled; |
1411 |
++ if (!pending) |
1412 |
++ return 0; |
1413 |
++ |
1414 |
++ /* Due to HW limitations, when disabling the interrupts, we can't |
1415 |
++ * just disable the requested sources, as this would lead to missed |
1416 |
++ * interrupts if sources changes just before writing to ERRMSK3 and |
1417 |
++ * ERRMSK5. |
1418 |
++ * To work around it, disable all and re-enable only the sources that |
1419 |
++ * are not in vf_mask and were not already disabled. Re-enabling will |
1420 |
++ * trigger a new interrupt for the sources that have changed in the |
1421 |
++ * meantime, if any. |
1422 |
++ */ |
1423 |
++ errmsk3 |= ADF_DH895XCC_ERR_MSK_VF2PF_L(ADF_DH895XCC_VF_MSK); |
1424 |
++ errmsk5 |= ADF_DH895XCC_ERR_MSK_VF2PF_U(ADF_DH895XCC_VF_MSK); |
1425 |
++ ADF_CSR_WR(pmisc_addr, ADF_GEN2_ERRMSK3, errmsk3); |
1426 |
++ ADF_CSR_WR(pmisc_addr, ADF_GEN2_ERRMSK5, errmsk5); |
1427 |
++ |
1428 |
++ errmsk3 &= ADF_DH895XCC_ERR_MSK_VF2PF_L(sources | disabled); |
1429 |
++ errmsk5 &= ADF_DH895XCC_ERR_MSK_VF2PF_U(sources | disabled); |
1430 |
++ ADF_CSR_WR(pmisc_addr, ADF_GEN2_ERRMSK3, errmsk3); |
1431 |
++ ADF_CSR_WR(pmisc_addr, ADF_GEN2_ERRMSK5, errmsk5); |
1432 |
++ |
1433 |
++ /* Return the sources of the (new) interrupt(s) */ |
1434 |
++ return pending; |
1435 |
++} |
1436 |
++ |
1437 |
+ static void configure_iov_threads(struct adf_accel_dev *accel_dev, bool enable) |
1438 |
+ { |
1439 |
+ adf_gen2_cfg_iov_thds(accel_dev, enable, |
1440 |
+@@ -220,9 +246,9 @@ void adf_init_hw_data_dh895xcc(struct adf_hw_device_data *hw_data) |
1441 |
+ hw_data->disable_iov = adf_disable_sriov; |
1442 |
+ |
1443 |
+ adf_gen2_init_pf_pfvf_ops(&hw_data->pfvf_ops); |
1444 |
+- hw_data->pfvf_ops.get_vf2pf_sources = get_vf2pf_sources; |
1445 |
+ hw_data->pfvf_ops.enable_vf2pf_interrupts = enable_vf2pf_interrupts; |
1446 |
+ hw_data->pfvf_ops.disable_vf2pf_interrupts = disable_vf2pf_interrupts; |
1447 |
++ hw_data->pfvf_ops.disable_pending_vf2pf_interrupts = disable_pending_vf2pf_interrupts; |
1448 |
+ adf_gen2_init_hw_csr_ops(&hw_data->csr_ops); |
1449 |
+ } |
1450 |
+ |
1451 |
+diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c |
1452 |
+index 9333f732cda8e..5167d63010b99 100644 |
1453 |
+--- a/drivers/gpu/drm/i915/intel_pm.c |
1454 |
++++ b/drivers/gpu/drm/i915/intel_pm.c |
1455 |
+@@ -2859,7 +2859,7 @@ static void ilk_compute_wm_level(const struct drm_i915_private *dev_priv, |
1456 |
+ } |
1457 |
+ |
1458 |
+ static void intel_read_wm_latency(struct drm_i915_private *dev_priv, |
1459 |
+- u16 wm[8]) |
1460 |
++ u16 wm[]) |
1461 |
+ { |
1462 |
+ struct intel_uncore *uncore = &dev_priv->uncore; |
1463 |
+ |
1464 |
+diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h |
1465 |
+index 053853a891c50..c297c63f3ec5c 100644 |
1466 |
+--- a/drivers/hid/hid-ids.h |
1467 |
++++ b/drivers/hid/hid-ids.h |
1468 |
+@@ -768,6 +768,7 @@ |
1469 |
+ #define USB_DEVICE_ID_LENOVO_X1_COVER 0x6085 |
1470 |
+ #define USB_DEVICE_ID_LENOVO_X1_TAB 0x60a3 |
1471 |
+ #define USB_DEVICE_ID_LENOVO_X1_TAB3 0x60b5 |
1472 |
++#define USB_DEVICE_ID_LENOVO_X12_TAB 0x60fe |
1473 |
+ #define USB_DEVICE_ID_LENOVO_OPTICAL_USB_MOUSE_600E 0x600e |
1474 |
+ #define USB_DEVICE_ID_LENOVO_PIXART_USB_MOUSE_608D 0x608d |
1475 |
+ #define USB_DEVICE_ID_LENOVO_PIXART_USB_MOUSE_6019 0x6019 |
1476 |
+diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c |
1477 |
+index 99eabfb4145b5..6bb3890b0f2c9 100644 |
1478 |
+--- a/drivers/hid/hid-multitouch.c |
1479 |
++++ b/drivers/hid/hid-multitouch.c |
1480 |
+@@ -2034,6 +2034,12 @@ static const struct hid_device_id mt_devices[] = { |
1481 |
+ USB_VENDOR_ID_LENOVO, |
1482 |
+ USB_DEVICE_ID_LENOVO_X1_TAB3) }, |
1483 |
+ |
1484 |
++ /* Lenovo X12 TAB Gen 1 */ |
1485 |
++ { .driver_data = MT_CLS_WIN_8_FORCE_MULTI_INPUT, |
1486 |
++ HID_DEVICE(BUS_USB, HID_GROUP_MULTITOUCH_WIN_8, |
1487 |
++ USB_VENDOR_ID_LENOVO, |
1488 |
++ USB_DEVICE_ID_LENOVO_X12_TAB) }, |
1489 |
++ |
1490 |
+ /* MosArt panels */ |
1491 |
+ { .driver_data = MT_CLS_CONFIDENCE_MINUS_ONE, |
1492 |
+ MT_USB_DEVICE(USB_VENDOR_ID_ASUS, |
1493 |
+@@ -2178,6 +2184,9 @@ static const struct hid_device_id mt_devices[] = { |
1494 |
+ { .driver_data = MT_CLS_GOOGLE, |
1495 |
+ HID_DEVICE(HID_BUS_ANY, HID_GROUP_ANY, USB_VENDOR_ID_GOOGLE, |
1496 |
+ USB_DEVICE_ID_GOOGLE_TOUCH_ROSE) }, |
1497 |
++ { .driver_data = MT_CLS_GOOGLE, |
1498 |
++ HID_DEVICE(BUS_USB, HID_GROUP_MULTITOUCH_WIN_8, USB_VENDOR_ID_GOOGLE, |
1499 |
++ USB_DEVICE_ID_GOOGLE_WHISKERS) }, |
1500 |
+ |
1501 |
+ /* Generic MT device */ |
1502 |
+ { HID_DEVICE(HID_BUS_ANY, HID_GROUP_MULTITOUCH, HID_ANY_ID, HID_ANY_ID) }, |
1503 |
+diff --git a/drivers/i2c/busses/i2c-ismt.c b/drivers/i2c/busses/i2c-ismt.c |
1504 |
+index c16157ee8c520..6078fa0c0d488 100644 |
1505 |
+--- a/drivers/i2c/busses/i2c-ismt.c |
1506 |
++++ b/drivers/i2c/busses/i2c-ismt.c |
1507 |
+@@ -528,6 +528,9 @@ static int ismt_access(struct i2c_adapter *adap, u16 addr, |
1508 |
+ |
1509 |
+ case I2C_SMBUS_BLOCK_PROC_CALL: |
1510 |
+ dev_dbg(dev, "I2C_SMBUS_BLOCK_PROC_CALL\n"); |
1511 |
++ if (data->block[0] > I2C_SMBUS_BLOCK_MAX) |
1512 |
++ return -EINVAL; |
1513 |
++ |
1514 |
+ dma_size = I2C_SMBUS_BLOCK_MAX; |
1515 |
+ desc->tgtaddr_rw = ISMT_DESC_ADDR_RW(addr, 1); |
1516 |
+ desc->wr_len_cmd = data->block[0] + 1; |
1517 |
+diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c |
1518 |
+index fb80539865d7c..159c6806c19b8 100644 |
1519 |
+--- a/drivers/md/dm-crypt.c |
1520 |
++++ b/drivers/md/dm-crypt.c |
1521 |
+@@ -3439,6 +3439,11 @@ static int crypt_map(struct dm_target *ti, struct bio *bio) |
1522 |
+ return DM_MAPIO_SUBMITTED; |
1523 |
+ } |
1524 |
+ |
1525 |
++static char hex2asc(unsigned char c) |
1526 |
++{ |
1527 |
++ return c + '0' + ((unsigned)(9 - c) >> 4 & 0x27); |
1528 |
++} |
1529 |
++ |
1530 |
+ static void crypt_status(struct dm_target *ti, status_type_t type, |
1531 |
+ unsigned status_flags, char *result, unsigned maxlen) |
1532 |
+ { |
1533 |
+@@ -3457,9 +3462,12 @@ static void crypt_status(struct dm_target *ti, status_type_t type, |
1534 |
+ if (cc->key_size > 0) { |
1535 |
+ if (cc->key_string) |
1536 |
+ DMEMIT(":%u:%s", cc->key_size, cc->key_string); |
1537 |
+- else |
1538 |
+- for (i = 0; i < cc->key_size; i++) |
1539 |
+- DMEMIT("%02x", cc->key[i]); |
1540 |
++ else { |
1541 |
++ for (i = 0; i < cc->key_size; i++) { |
1542 |
++ DMEMIT("%c%c", hex2asc(cc->key[i] >> 4), |
1543 |
++ hex2asc(cc->key[i] & 0xf)); |
1544 |
++ } |
1545 |
++ } |
1546 |
+ } else |
1547 |
+ DMEMIT("-"); |
1548 |
+ |
1549 |
+diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c |
1550 |
+index 36ae30b73a6e0..3d5a0ce123c90 100644 |
1551 |
+--- a/drivers/md/dm-integrity.c |
1552 |
++++ b/drivers/md/dm-integrity.c |
1553 |
+@@ -4494,8 +4494,6 @@ try_smaller_buffer: |
1554 |
+ } |
1555 |
+ |
1556 |
+ if (should_write_sb) { |
1557 |
+- int r; |
1558 |
+- |
1559 |
+ init_journal(ic, 0, ic->journal_sections, 0); |
1560 |
+ r = dm_integrity_failed(ic); |
1561 |
+ if (unlikely(r)) { |
1562 |
+diff --git a/drivers/md/dm-stats.c b/drivers/md/dm-stats.c |
1563 |
+index 0e039a8c0bf2e..a3f2050b9c9b4 100644 |
1564 |
+--- a/drivers/md/dm-stats.c |
1565 |
++++ b/drivers/md/dm-stats.c |
1566 |
+@@ -225,6 +225,7 @@ void dm_stats_cleanup(struct dm_stats *stats) |
1567 |
+ atomic_read(&shared->in_flight[READ]), |
1568 |
+ atomic_read(&shared->in_flight[WRITE])); |
1569 |
+ } |
1570 |
++ cond_resched(); |
1571 |
+ } |
1572 |
+ dm_stat_free(&s->rcu_head); |
1573 |
+ } |
1574 |
+@@ -330,6 +331,7 @@ static int dm_stats_create(struct dm_stats *stats, sector_t start, sector_t end, |
1575 |
+ for (ni = 0; ni < n_entries; ni++) { |
1576 |
+ atomic_set(&s->stat_shared[ni].in_flight[READ], 0); |
1577 |
+ atomic_set(&s->stat_shared[ni].in_flight[WRITE], 0); |
1578 |
++ cond_resched(); |
1579 |
+ } |
1580 |
+ |
1581 |
+ if (s->n_histogram_entries) { |
1582 |
+@@ -342,6 +344,7 @@ static int dm_stats_create(struct dm_stats *stats, sector_t start, sector_t end, |
1583 |
+ for (ni = 0; ni < n_entries; ni++) { |
1584 |
+ s->stat_shared[ni].tmp.histogram = hi; |
1585 |
+ hi += s->n_histogram_entries + 1; |
1586 |
++ cond_resched(); |
1587 |
+ } |
1588 |
+ } |
1589 |
+ |
1590 |
+@@ -362,6 +365,7 @@ static int dm_stats_create(struct dm_stats *stats, sector_t start, sector_t end, |
1591 |
+ for (ni = 0; ni < n_entries; ni++) { |
1592 |
+ p[ni].histogram = hi; |
1593 |
+ hi += s->n_histogram_entries + 1; |
1594 |
++ cond_resched(); |
1595 |
+ } |
1596 |
+ } |
1597 |
+ } |
1598 |
+@@ -497,6 +501,7 @@ static int dm_stats_list(struct dm_stats *stats, const char *program, |
1599 |
+ } |
1600 |
+ DMEMIT("\n"); |
1601 |
+ } |
1602 |
++ cond_resched(); |
1603 |
+ } |
1604 |
+ mutex_unlock(&stats->mutex); |
1605 |
+ |
1606 |
+@@ -774,6 +779,7 @@ static void __dm_stat_clear(struct dm_stat *s, size_t idx_start, size_t idx_end, |
1607 |
+ local_irq_enable(); |
1608 |
+ } |
1609 |
+ } |
1610 |
++ cond_resched(); |
1611 |
+ } |
1612 |
+ } |
1613 |
+ |
1614 |
+@@ -889,6 +895,8 @@ static int dm_stats_print(struct dm_stats *stats, int id, |
1615 |
+ |
1616 |
+ if (unlikely(sz + 1 >= maxlen)) |
1617 |
+ goto buffer_overflow; |
1618 |
++ |
1619 |
++ cond_resched(); |
1620 |
+ } |
1621 |
+ |
1622 |
+ if (clear) |
1623 |
+diff --git a/drivers/md/dm-verity-target.c b/drivers/md/dm-verity-target.c |
1624 |
+index 80133aae0db37..d6dbd47492a85 100644 |
1625 |
+--- a/drivers/md/dm-verity-target.c |
1626 |
++++ b/drivers/md/dm-verity-target.c |
1627 |
+@@ -1312,6 +1312,7 @@ bad: |
1628 |
+ |
1629 |
+ static struct target_type verity_target = { |
1630 |
+ .name = "verity", |
1631 |
++ .features = DM_TARGET_IMMUTABLE, |
1632 |
+ .version = {1, 8, 0}, |
1633 |
+ .module = THIS_MODULE, |
1634 |
+ .ctr = verity_ctr, |
1635 |
+diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c |
1636 |
+index 351d341a1ffa4..d6ce5a09fd358 100644 |
1637 |
+--- a/drivers/md/raid5.c |
1638 |
++++ b/drivers/md/raid5.c |
1639 |
+@@ -686,17 +686,17 @@ int raid5_calc_degraded(struct r5conf *conf) |
1640 |
+ return degraded; |
1641 |
+ } |
1642 |
+ |
1643 |
+-static int has_failed(struct r5conf *conf) |
1644 |
++static bool has_failed(struct r5conf *conf) |
1645 |
+ { |
1646 |
+- int degraded; |
1647 |
++ int degraded = conf->mddev->degraded; |
1648 |
+ |
1649 |
+- if (conf->mddev->reshape_position == MaxSector) |
1650 |
+- return conf->mddev->degraded > conf->max_degraded; |
1651 |
++ if (test_bit(MD_BROKEN, &conf->mddev->flags)) |
1652 |
++ return true; |
1653 |
+ |
1654 |
+- degraded = raid5_calc_degraded(conf); |
1655 |
+- if (degraded > conf->max_degraded) |
1656 |
+- return 1; |
1657 |
+- return 0; |
1658 |
++ if (conf->mddev->reshape_position != MaxSector) |
1659 |
++ degraded = raid5_calc_degraded(conf); |
1660 |
++ |
1661 |
++ return degraded > conf->max_degraded; |
1662 |
+ } |
1663 |
+ |
1664 |
+ struct stripe_head * |
1665 |
+@@ -2863,34 +2863,31 @@ static void raid5_error(struct mddev *mddev, struct md_rdev *rdev) |
1666 |
+ unsigned long flags; |
1667 |
+ pr_debug("raid456: error called\n"); |
1668 |
+ |
1669 |
++ pr_crit("md/raid:%s: Disk failure on %s, disabling device.\n", |
1670 |
++ mdname(mddev), bdevname(rdev->bdev, b)); |
1671 |
++ |
1672 |
+ spin_lock_irqsave(&conf->device_lock, flags); |
1673 |
++ set_bit(Faulty, &rdev->flags); |
1674 |
++ clear_bit(In_sync, &rdev->flags); |
1675 |
++ mddev->degraded = raid5_calc_degraded(conf); |
1676 |
+ |
1677 |
+- if (test_bit(In_sync, &rdev->flags) && |
1678 |
+- mddev->degraded == conf->max_degraded) { |
1679 |
+- /* |
1680 |
+- * Don't allow to achieve failed state |
1681 |
+- * Don't try to recover this device |
1682 |
+- */ |
1683 |
++ if (has_failed(conf)) { |
1684 |
++ set_bit(MD_BROKEN, &conf->mddev->flags); |
1685 |
+ conf->recovery_disabled = mddev->recovery_disabled; |
1686 |
+- spin_unlock_irqrestore(&conf->device_lock, flags); |
1687 |
+- return; |
1688 |
++ |
1689 |
++ pr_crit("md/raid:%s: Cannot continue operation (%d/%d failed).\n", |
1690 |
++ mdname(mddev), mddev->degraded, conf->raid_disks); |
1691 |
++ } else { |
1692 |
++ pr_crit("md/raid:%s: Operation continuing on %d devices.\n", |
1693 |
++ mdname(mddev), conf->raid_disks - mddev->degraded); |
1694 |
+ } |
1695 |
+ |
1696 |
+- set_bit(Faulty, &rdev->flags); |
1697 |
+- clear_bit(In_sync, &rdev->flags); |
1698 |
+- mddev->degraded = raid5_calc_degraded(conf); |
1699 |
+ spin_unlock_irqrestore(&conf->device_lock, flags); |
1700 |
+ set_bit(MD_RECOVERY_INTR, &mddev->recovery); |
1701 |
+ |
1702 |
+ set_bit(Blocked, &rdev->flags); |
1703 |
+ set_mask_bits(&mddev->sb_flags, 0, |
1704 |
+ BIT(MD_SB_CHANGE_DEVS) | BIT(MD_SB_CHANGE_PENDING)); |
1705 |
+- pr_crit("md/raid:%s: Disk failure on %s, disabling device.\n" |
1706 |
+- "md/raid:%s: Operation continuing on %d devices.\n", |
1707 |
+- mdname(mddev), |
1708 |
+- bdevname(rdev->bdev, b), |
1709 |
+- mdname(mddev), |
1710 |
+- conf->raid_disks - mddev->degraded); |
1711 |
+ r5c_update_on_rdev_error(mddev, rdev); |
1712 |
+ } |
1713 |
+ |
1714 |
+diff --git a/drivers/media/i2c/imx412.c b/drivers/media/i2c/imx412.c |
1715 |
+index be3f6ea555597..84279a6808730 100644 |
1716 |
+--- a/drivers/media/i2c/imx412.c |
1717 |
++++ b/drivers/media/i2c/imx412.c |
1718 |
+@@ -1011,7 +1011,7 @@ static int imx412_power_on(struct device *dev) |
1719 |
+ struct imx412 *imx412 = to_imx412(sd); |
1720 |
+ int ret; |
1721 |
+ |
1722 |
+- gpiod_set_value_cansleep(imx412->reset_gpio, 1); |
1723 |
++ gpiod_set_value_cansleep(imx412->reset_gpio, 0); |
1724 |
+ |
1725 |
+ ret = clk_prepare_enable(imx412->inclk); |
1726 |
+ if (ret) { |
1727 |
+@@ -1024,7 +1024,7 @@ static int imx412_power_on(struct device *dev) |
1728 |
+ return 0; |
1729 |
+ |
1730 |
+ error_reset: |
1731 |
+- gpiod_set_value_cansleep(imx412->reset_gpio, 0); |
1732 |
++ gpiod_set_value_cansleep(imx412->reset_gpio, 1); |
1733 |
+ |
1734 |
+ return ret; |
1735 |
+ } |
1736 |
+@@ -1040,10 +1040,10 @@ static int imx412_power_off(struct device *dev) |
1737 |
+ struct v4l2_subdev *sd = dev_get_drvdata(dev); |
1738 |
+ struct imx412 *imx412 = to_imx412(sd); |
1739 |
+ |
1740 |
+- gpiod_set_value_cansleep(imx412->reset_gpio, 0); |
1741 |
+- |
1742 |
+ clk_disable_unprepare(imx412->inclk); |
1743 |
+ |
1744 |
++ gpiod_set_value_cansleep(imx412->reset_gpio, 1); |
1745 |
++ |
1746 |
+ return 0; |
1747 |
+ } |
1748 |
+ |
1749 |
+diff --git a/drivers/net/ipa/ipa_endpoint.c b/drivers/net/ipa/ipa_endpoint.c |
1750 |
+index cea7b2e2ce969..53764f3c0c7e4 100644 |
1751 |
+--- a/drivers/net/ipa/ipa_endpoint.c |
1752 |
++++ b/drivers/net/ipa/ipa_endpoint.c |
1753 |
+@@ -130,9 +130,10 @@ static bool ipa_endpoint_data_valid_one(struct ipa *ipa, u32 count, |
1754 |
+ */ |
1755 |
+ if (data->endpoint.config.aggregation) { |
1756 |
+ limit += SZ_1K * aggr_byte_limit_max(ipa->version); |
1757 |
+- if (buffer_size > limit) { |
1758 |
++ if (buffer_size - NET_SKB_PAD > limit) { |
1759 |
+ dev_err(dev, "RX buffer size too large for aggregated RX endpoint %u (%u > %u)\n", |
1760 |
+- data->endpoint_id, buffer_size, limit); |
1761 |
++ data->endpoint_id, |
1762 |
++ buffer_size - NET_SKB_PAD, limit); |
1763 |
+ |
1764 |
+ return false; |
1765 |
+ } |
1766 |
+@@ -739,6 +740,7 @@ static void ipa_endpoint_init_aggr(struct ipa_endpoint *endpoint) |
1767 |
+ if (endpoint->data->aggregation) { |
1768 |
+ if (!endpoint->toward_ipa) { |
1769 |
+ const struct ipa_endpoint_rx_data *rx_data; |
1770 |
++ u32 buffer_size; |
1771 |
+ bool close_eof; |
1772 |
+ u32 limit; |
1773 |
+ |
1774 |
+@@ -746,7 +748,8 @@ static void ipa_endpoint_init_aggr(struct ipa_endpoint *endpoint) |
1775 |
+ val |= u32_encode_bits(IPA_ENABLE_AGGR, AGGR_EN_FMASK); |
1776 |
+ val |= u32_encode_bits(IPA_GENERIC, AGGR_TYPE_FMASK); |
1777 |
+ |
1778 |
+- limit = ipa_aggr_size_kb(rx_data->buffer_size); |
1779 |
++ buffer_size = rx_data->buffer_size; |
1780 |
++ limit = ipa_aggr_size_kb(buffer_size - NET_SKB_PAD); |
1781 |
+ val |= aggr_byte_limit_encoded(version, limit); |
1782 |
+ |
1783 |
+ limit = IPA_AGGR_TIME_LIMIT; |
1784 |
+diff --git a/fs/exfat/balloc.c b/fs/exfat/balloc.c |
1785 |
+index 03f1423071749..9f42f25fab920 100644 |
1786 |
+--- a/fs/exfat/balloc.c |
1787 |
++++ b/fs/exfat/balloc.c |
1788 |
+@@ -148,7 +148,9 @@ int exfat_set_bitmap(struct inode *inode, unsigned int clu, bool sync) |
1789 |
+ struct super_block *sb = inode->i_sb; |
1790 |
+ struct exfat_sb_info *sbi = EXFAT_SB(sb); |
1791 |
+ |
1792 |
+- WARN_ON(clu < EXFAT_FIRST_CLUSTER); |
1793 |
++ if (!is_valid_cluster(sbi, clu)) |
1794 |
++ return -EINVAL; |
1795 |
++ |
1796 |
+ ent_idx = CLUSTER_TO_BITMAP_ENT(clu); |
1797 |
+ i = BITMAP_OFFSET_SECTOR_INDEX(sb, ent_idx); |
1798 |
+ b = BITMAP_OFFSET_BIT_IN_SECTOR(sb, ent_idx); |
1799 |
+@@ -166,7 +168,9 @@ void exfat_clear_bitmap(struct inode *inode, unsigned int clu, bool sync) |
1800 |
+ struct exfat_sb_info *sbi = EXFAT_SB(sb); |
1801 |
+ struct exfat_mount_options *opts = &sbi->options; |
1802 |
+ |
1803 |
+- WARN_ON(clu < EXFAT_FIRST_CLUSTER); |
1804 |
++ if (!is_valid_cluster(sbi, clu)) |
1805 |
++ return; |
1806 |
++ |
1807 |
+ ent_idx = CLUSTER_TO_BITMAP_ENT(clu); |
1808 |
+ i = BITMAP_OFFSET_SECTOR_INDEX(sb, ent_idx); |
1809 |
+ b = BITMAP_OFFSET_BIT_IN_SECTOR(sb, ent_idx); |
1810 |
+diff --git a/fs/exfat/exfat_fs.h b/fs/exfat/exfat_fs.h |
1811 |
+index c6800b8809203..42d06c68d5c5e 100644 |
1812 |
+--- a/fs/exfat/exfat_fs.h |
1813 |
++++ b/fs/exfat/exfat_fs.h |
1814 |
+@@ -381,6 +381,12 @@ static inline int exfat_sector_to_cluster(struct exfat_sb_info *sbi, |
1815 |
+ EXFAT_RESERVED_CLUSTERS; |
1816 |
+ } |
1817 |
+ |
1818 |
++static inline bool is_valid_cluster(struct exfat_sb_info *sbi, |
1819 |
++ unsigned int clus) |
1820 |
++{ |
1821 |
++ return clus >= EXFAT_FIRST_CLUSTER && clus < sbi->num_clusters; |
1822 |
++} |
1823 |
++ |
1824 |
+ /* super.c */ |
1825 |
+ int exfat_set_volume_dirty(struct super_block *sb); |
1826 |
+ int exfat_clear_volume_dirty(struct super_block *sb); |
1827 |
+diff --git a/fs/exfat/fatent.c b/fs/exfat/fatent.c |
1828 |
+index a3464e56a7e16..421c273531049 100644 |
1829 |
+--- a/fs/exfat/fatent.c |
1830 |
++++ b/fs/exfat/fatent.c |
1831 |
+@@ -81,12 +81,6 @@ int exfat_ent_set(struct super_block *sb, unsigned int loc, |
1832 |
+ return 0; |
1833 |
+ } |
1834 |
+ |
1835 |
+-static inline bool is_valid_cluster(struct exfat_sb_info *sbi, |
1836 |
+- unsigned int clus) |
1837 |
+-{ |
1838 |
+- return clus >= EXFAT_FIRST_CLUSTER && clus < sbi->num_clusters; |
1839 |
+-} |
1840 |
+- |
1841 |
+ int exfat_ent_get(struct super_block *sb, unsigned int loc, |
1842 |
+ unsigned int *content) |
1843 |
+ { |
1844 |
+diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h |
1845 |
+index 7eefa16ed381b..8f8cd6e2d4dbc 100644 |
1846 |
+--- a/fs/nfs/internal.h |
1847 |
++++ b/fs/nfs/internal.h |
1848 |
+@@ -841,6 +841,7 @@ static inline bool nfs_error_is_fatal_on_server(int err) |
1849 |
+ case 0: |
1850 |
+ case -ERESTARTSYS: |
1851 |
+ case -EINTR: |
1852 |
++ case -ENOMEM: |
1853 |
+ return false; |
1854 |
+ } |
1855 |
+ return nfs_error_is_fatal(err); |
1856 |
+diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c |
1857 |
+index 234e852fcdfad..d6e1f95ccfd8a 100644 |
1858 |
+--- a/fs/nfsd/nfs4state.c |
1859 |
++++ b/fs/nfsd/nfs4state.c |
1860 |
+@@ -7330,16 +7330,12 @@ nfsd4_release_lockowner(struct svc_rqst *rqstp, |
1861 |
+ if (sop->so_is_open_owner || !same_owner_str(sop, owner)) |
1862 |
+ continue; |
1863 |
+ |
1864 |
+- /* see if there are still any locks associated with it */ |
1865 |
+- lo = lockowner(sop); |
1866 |
+- list_for_each_entry(stp, &sop->so_stateids, st_perstateowner) { |
1867 |
+- if (check_for_locks(stp->st_stid.sc_file, lo)) { |
1868 |
+- status = nfserr_locks_held; |
1869 |
+- spin_unlock(&clp->cl_lock); |
1870 |
+- return status; |
1871 |
+- } |
1872 |
++ if (atomic_read(&sop->so_count) != 1) { |
1873 |
++ spin_unlock(&clp->cl_lock); |
1874 |
++ return nfserr_locks_held; |
1875 |
+ } |
1876 |
+ |
1877 |
++ lo = lockowner(sop); |
1878 |
+ nfs4_get_stateowner(sop); |
1879 |
+ break; |
1880 |
+ } |
1881 |
+diff --git a/fs/ntfs3/super.c b/fs/ntfs3/super.c |
1882 |
+index 278dcf5024102..b2b54c4553f91 100644 |
1883 |
+--- a/fs/ntfs3/super.c |
1884 |
++++ b/fs/ntfs3/super.c |
1885 |
+@@ -668,9 +668,11 @@ static u32 format_size_gb(const u64 bytes, u32 *mb) |
1886 |
+ |
1887 |
+ static u32 true_sectors_per_clst(const struct NTFS_BOOT *boot) |
1888 |
+ { |
1889 |
+- return boot->sectors_per_clusters <= 0x80 |
1890 |
+- ? boot->sectors_per_clusters |
1891 |
+- : (1u << (0 - boot->sectors_per_clusters)); |
1892 |
++ if (boot->sectors_per_clusters <= 0x80) |
1893 |
++ return boot->sectors_per_clusters; |
1894 |
++ if (boot->sectors_per_clusters >= 0xf4) /* limit shift to 2MB max */ |
1895 |
++ return 1U << (0 - boot->sectors_per_clusters); |
1896 |
++ return -EINVAL; |
1897 |
+ } |
1898 |
+ |
1899 |
+ /* |
1900 |
+@@ -713,6 +715,8 @@ static int ntfs_init_from_boot(struct super_block *sb, u32 sector_size, |
1901 |
+ |
1902 |
+ /* cluster size: 512, 1K, 2K, 4K, ... 2M */ |
1903 |
+ sct_per_clst = true_sectors_per_clst(boot); |
1904 |
++ if ((int)sct_per_clst < 0) |
1905 |
++ goto out; |
1906 |
+ if (!is_power_of_2(sct_per_clst)) |
1907 |
+ goto out; |
1908 |
+ |
1909 |
+diff --git a/fs/pipe.c b/fs/pipe.c |
1910 |
+index e140ea150bbb1..74ae9fafd25a1 100644 |
1911 |
+--- a/fs/pipe.c |
1912 |
++++ b/fs/pipe.c |
1913 |
+@@ -653,7 +653,7 @@ pipe_poll(struct file *filp, poll_table *wait) |
1914 |
+ unsigned int head, tail; |
1915 |
+ |
1916 |
+ /* Epoll has some historical nasty semantics, this enables them */ |
1917 |
+- pipe->poll_usage = 1; |
1918 |
++ WRITE_ONCE(pipe->poll_usage, true); |
1919 |
+ |
1920 |
+ /* |
1921 |
+ * Reading pipe state only -- no need for acquiring the semaphore. |
1922 |
+@@ -1245,30 +1245,33 @@ unsigned int round_pipe_size(unsigned long size) |
1923 |
+ |
1924 |
+ /* |
1925 |
+ * Resize the pipe ring to a number of slots. |
1926 |
++ * |
1927 |
++ * Note the pipe can be reduced in capacity, but only if the current |
1928 |
++ * occupancy doesn't exceed nr_slots; if it does, EBUSY will be |
1929 |
++ * returned instead. |
1930 |
+ */ |
1931 |
+ int pipe_resize_ring(struct pipe_inode_info *pipe, unsigned int nr_slots) |
1932 |
+ { |
1933 |
+ struct pipe_buffer *bufs; |
1934 |
+ unsigned int head, tail, mask, n; |
1935 |
+ |
1936 |
+- /* |
1937 |
+- * We can shrink the pipe, if arg is greater than the ring occupancy. |
1938 |
+- * Since we don't expect a lot of shrink+grow operations, just free and |
1939 |
+- * allocate again like we would do for growing. If the pipe currently |
1940 |
+- * contains more buffers than arg, then return busy. |
1941 |
+- */ |
1942 |
+- mask = pipe->ring_size - 1; |
1943 |
+- head = pipe->head; |
1944 |
+- tail = pipe->tail; |
1945 |
+- n = pipe_occupancy(pipe->head, pipe->tail); |
1946 |
+- if (nr_slots < n) |
1947 |
+- return -EBUSY; |
1948 |
+- |
1949 |
+ bufs = kcalloc(nr_slots, sizeof(*bufs), |
1950 |
+ GFP_KERNEL_ACCOUNT | __GFP_NOWARN); |
1951 |
+ if (unlikely(!bufs)) |
1952 |
+ return -ENOMEM; |
1953 |
+ |
1954 |
++ spin_lock_irq(&pipe->rd_wait.lock); |
1955 |
++ mask = pipe->ring_size - 1; |
1956 |
++ head = pipe->head; |
1957 |
++ tail = pipe->tail; |
1958 |
++ |
1959 |
++ n = pipe_occupancy(head, tail); |
1960 |
++ if (nr_slots < n) { |
1961 |
++ spin_unlock_irq(&pipe->rd_wait.lock); |
1962 |
++ kfree(bufs); |
1963 |
++ return -EBUSY; |
1964 |
++ } |
1965 |
++ |
1966 |
+ /* |
1967 |
+ * The pipe array wraps around, so just start the new one at zero |
1968 |
+ * and adjust the indices. |
1969 |
+@@ -1300,6 +1303,8 @@ int pipe_resize_ring(struct pipe_inode_info *pipe, unsigned int nr_slots) |
1970 |
+ pipe->tail = tail; |
1971 |
+ pipe->head = head; |
1972 |
+ |
1973 |
++ spin_unlock_irq(&pipe->rd_wait.lock); |
1974 |
++ |
1975 |
+ /* This might have made more room for writers */ |
1976 |
+ wake_up_interruptible(&pipe->wr_wait); |
1977 |
+ return 0; |
1978 |
+diff --git a/include/linux/bpf_local_storage.h b/include/linux/bpf_local_storage.h |
1979 |
+index 493e632584970..7ea18d4da84b8 100644 |
1980 |
+--- a/include/linux/bpf_local_storage.h |
1981 |
++++ b/include/linux/bpf_local_storage.h |
1982 |
+@@ -143,9 +143,9 @@ void bpf_selem_link_storage_nolock(struct bpf_local_storage *local_storage, |
1983 |
+ |
1984 |
+ bool bpf_selem_unlink_storage_nolock(struct bpf_local_storage *local_storage, |
1985 |
+ struct bpf_local_storage_elem *selem, |
1986 |
+- bool uncharge_omem); |
1987 |
++ bool uncharge_omem, bool use_trace_rcu); |
1988 |
+ |
1989 |
+-void bpf_selem_unlink(struct bpf_local_storage_elem *selem); |
1990 |
++void bpf_selem_unlink(struct bpf_local_storage_elem *selem, bool use_trace_rcu); |
1991 |
+ |
1992 |
+ void bpf_selem_link_map(struct bpf_local_storage_map *smap, |
1993 |
+ struct bpf_local_storage_elem *selem); |
1994 |
+diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h |
1995 |
+index c00c618ef290d..cb0fd633a6106 100644 |
1996 |
+--- a/include/linux/pipe_fs_i.h |
1997 |
++++ b/include/linux/pipe_fs_i.h |
1998 |
+@@ -71,7 +71,7 @@ struct pipe_inode_info { |
1999 |
+ unsigned int files; |
2000 |
+ unsigned int r_counter; |
2001 |
+ unsigned int w_counter; |
2002 |
+- unsigned int poll_usage; |
2003 |
++ bool poll_usage; |
2004 |
+ struct page *tmp_page; |
2005 |
+ struct fasync_struct *fasync_readers; |
2006 |
+ struct fasync_struct *fasync_writers; |
2007 |
+diff --git a/include/net/netfilter/nf_conntrack_core.h b/include/net/netfilter/nf_conntrack_core.h |
2008 |
+index 13807ea94cd2b..2d524782f53b7 100644 |
2009 |
+--- a/include/net/netfilter/nf_conntrack_core.h |
2010 |
++++ b/include/net/netfilter/nf_conntrack_core.h |
2011 |
+@@ -58,8 +58,13 @@ static inline int nf_conntrack_confirm(struct sk_buff *skb) |
2012 |
+ int ret = NF_ACCEPT; |
2013 |
+ |
2014 |
+ if (ct) { |
2015 |
+- if (!nf_ct_is_confirmed(ct)) |
2016 |
++ if (!nf_ct_is_confirmed(ct)) { |
2017 |
+ ret = __nf_conntrack_confirm(skb); |
2018 |
++ |
2019 |
++ if (ret == NF_ACCEPT) |
2020 |
++ ct = (struct nf_conn *)skb_nfct(skb); |
2021 |
++ } |
2022 |
++ |
2023 |
+ if (likely(ret == NF_ACCEPT)) |
2024 |
+ nf_ct_deliver_cached_events(ct); |
2025 |
+ } |
2026 |
+diff --git a/kernel/bpf/bpf_inode_storage.c b/kernel/bpf/bpf_inode_storage.c |
2027 |
+index 96be8d518885c..10424a1cda51d 100644 |
2028 |
+--- a/kernel/bpf/bpf_inode_storage.c |
2029 |
++++ b/kernel/bpf/bpf_inode_storage.c |
2030 |
+@@ -90,7 +90,7 @@ void bpf_inode_storage_free(struct inode *inode) |
2031 |
+ */ |
2032 |
+ bpf_selem_unlink_map(selem); |
2033 |
+ free_inode_storage = bpf_selem_unlink_storage_nolock( |
2034 |
+- local_storage, selem, false); |
2035 |
++ local_storage, selem, false, false); |
2036 |
+ } |
2037 |
+ raw_spin_unlock_bh(&local_storage->lock); |
2038 |
+ rcu_read_unlock(); |
2039 |
+@@ -149,7 +149,7 @@ static int inode_storage_delete(struct inode *inode, struct bpf_map *map) |
2040 |
+ if (!sdata) |
2041 |
+ return -ENOENT; |
2042 |
+ |
2043 |
+- bpf_selem_unlink(SELEM(sdata)); |
2044 |
++ bpf_selem_unlink(SELEM(sdata), true); |
2045 |
+ |
2046 |
+ return 0; |
2047 |
+ } |
2048 |
+diff --git a/kernel/bpf/bpf_local_storage.c b/kernel/bpf/bpf_local_storage.c |
2049 |
+index 01aa2b51ec4df..8ce40fd869f6a 100644 |
2050 |
+--- a/kernel/bpf/bpf_local_storage.c |
2051 |
++++ b/kernel/bpf/bpf_local_storage.c |
2052 |
+@@ -106,7 +106,7 @@ static void bpf_selem_free_rcu(struct rcu_head *rcu) |
2053 |
+ */ |
2054 |
+ bool bpf_selem_unlink_storage_nolock(struct bpf_local_storage *local_storage, |
2055 |
+ struct bpf_local_storage_elem *selem, |
2056 |
+- bool uncharge_mem) |
2057 |
++ bool uncharge_mem, bool use_trace_rcu) |
2058 |
+ { |
2059 |
+ struct bpf_local_storage_map *smap; |
2060 |
+ bool free_local_storage; |
2061 |
+@@ -150,11 +150,16 @@ bool bpf_selem_unlink_storage_nolock(struct bpf_local_storage *local_storage, |
2062 |
+ SDATA(selem)) |
2063 |
+ RCU_INIT_POINTER(local_storage->cache[smap->cache_idx], NULL); |
2064 |
+ |
2065 |
+- call_rcu_tasks_trace(&selem->rcu, bpf_selem_free_rcu); |
2066 |
++ if (use_trace_rcu) |
2067 |
++ call_rcu_tasks_trace(&selem->rcu, bpf_selem_free_rcu); |
2068 |
++ else |
2069 |
++ kfree_rcu(selem, rcu); |
2070 |
++ |
2071 |
+ return free_local_storage; |
2072 |
+ } |
2073 |
+ |
2074 |
+-static void __bpf_selem_unlink_storage(struct bpf_local_storage_elem *selem) |
2075 |
++static void __bpf_selem_unlink_storage(struct bpf_local_storage_elem *selem, |
2076 |
++ bool use_trace_rcu) |
2077 |
+ { |
2078 |
+ struct bpf_local_storage *local_storage; |
2079 |
+ bool free_local_storage = false; |
2080 |
+@@ -169,12 +174,16 @@ static void __bpf_selem_unlink_storage(struct bpf_local_storage_elem *selem) |
2081 |
+ raw_spin_lock_irqsave(&local_storage->lock, flags); |
2082 |
+ if (likely(selem_linked_to_storage(selem))) |
2083 |
+ free_local_storage = bpf_selem_unlink_storage_nolock( |
2084 |
+- local_storage, selem, true); |
2085 |
++ local_storage, selem, true, use_trace_rcu); |
2086 |
+ raw_spin_unlock_irqrestore(&local_storage->lock, flags); |
2087 |
+ |
2088 |
+- if (free_local_storage) |
2089 |
+- call_rcu_tasks_trace(&local_storage->rcu, |
2090 |
++ if (free_local_storage) { |
2091 |
++ if (use_trace_rcu) |
2092 |
++ call_rcu_tasks_trace(&local_storage->rcu, |
2093 |
+ bpf_local_storage_free_rcu); |
2094 |
++ else |
2095 |
++ kfree_rcu(local_storage, rcu); |
2096 |
++ } |
2097 |
+ } |
2098 |
+ |
2099 |
+ void bpf_selem_link_storage_nolock(struct bpf_local_storage *local_storage, |
2100 |
+@@ -214,14 +223,14 @@ void bpf_selem_link_map(struct bpf_local_storage_map *smap, |
2101 |
+ raw_spin_unlock_irqrestore(&b->lock, flags); |
2102 |
+ } |
2103 |
+ |
2104 |
+-void bpf_selem_unlink(struct bpf_local_storage_elem *selem) |
2105 |
++void bpf_selem_unlink(struct bpf_local_storage_elem *selem, bool use_trace_rcu) |
2106 |
+ { |
2107 |
+ /* Always unlink from map before unlinking from local_storage |
2108 |
+ * because selem will be freed after successfully unlinked from |
2109 |
+ * the local_storage. |
2110 |
+ */ |
2111 |
+ bpf_selem_unlink_map(selem); |
2112 |
+- __bpf_selem_unlink_storage(selem); |
2113 |
++ __bpf_selem_unlink_storage(selem, use_trace_rcu); |
2114 |
+ } |
2115 |
+ |
2116 |
+ struct bpf_local_storage_data * |
2117 |
+@@ -466,7 +475,7 @@ bpf_local_storage_update(void *owner, struct bpf_local_storage_map *smap, |
2118 |
+ if (old_sdata) { |
2119 |
+ bpf_selem_unlink_map(SELEM(old_sdata)); |
2120 |
+ bpf_selem_unlink_storage_nolock(local_storage, SELEM(old_sdata), |
2121 |
+- false); |
2122 |
++ false, true); |
2123 |
+ } |
2124 |
+ |
2125 |
+ unlock: |
2126 |
+@@ -548,7 +557,7 @@ void bpf_local_storage_map_free(struct bpf_local_storage_map *smap, |
2127 |
+ migrate_disable(); |
2128 |
+ __this_cpu_inc(*busy_counter); |
2129 |
+ } |
2130 |
+- bpf_selem_unlink(selem); |
2131 |
++ bpf_selem_unlink(selem, false); |
2132 |
+ if (busy_counter) { |
2133 |
+ __this_cpu_dec(*busy_counter); |
2134 |
+ migrate_enable(); |
2135 |
+diff --git a/kernel/bpf/bpf_task_storage.c b/kernel/bpf/bpf_task_storage.c |
2136 |
+index 6638a0ecc3d21..57904263a710f 100644 |
2137 |
+--- a/kernel/bpf/bpf_task_storage.c |
2138 |
++++ b/kernel/bpf/bpf_task_storage.c |
2139 |
+@@ -102,7 +102,7 @@ void bpf_task_storage_free(struct task_struct *task) |
2140 |
+ */ |
2141 |
+ bpf_selem_unlink_map(selem); |
2142 |
+ free_task_storage = bpf_selem_unlink_storage_nolock( |
2143 |
+- local_storage, selem, false); |
2144 |
++ local_storage, selem, false, false); |
2145 |
+ } |
2146 |
+ raw_spin_unlock_irqrestore(&local_storage->lock, flags); |
2147 |
+ bpf_task_storage_unlock(); |
2148 |
+@@ -192,7 +192,7 @@ static int task_storage_delete(struct task_struct *task, struct bpf_map *map) |
2149 |
+ if (!sdata) |
2150 |
+ return -ENOENT; |
2151 |
+ |
2152 |
+- bpf_selem_unlink(SELEM(sdata)); |
2153 |
++ bpf_selem_unlink(SELEM(sdata), true); |
2154 |
+ |
2155 |
+ return 0; |
2156 |
+ } |
2157 |
+diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c |
2158 |
+index 13e9dbeeedf36..05e701f0da81d 100644 |
2159 |
+--- a/kernel/bpf/core.c |
2160 |
++++ b/kernel/bpf/core.c |
2161 |
+@@ -873,7 +873,7 @@ static size_t select_bpf_prog_pack_size(void) |
2162 |
+ return size; |
2163 |
+ } |
2164 |
+ |
2165 |
+-static struct bpf_prog_pack *alloc_new_pack(void) |
2166 |
++static struct bpf_prog_pack *alloc_new_pack(bpf_jit_fill_hole_t bpf_fill_ill_insns) |
2167 |
+ { |
2168 |
+ struct bpf_prog_pack *pack; |
2169 |
+ |
2170 |
+@@ -886,6 +886,7 @@ static struct bpf_prog_pack *alloc_new_pack(void) |
2171 |
+ kfree(pack); |
2172 |
+ return NULL; |
2173 |
+ } |
2174 |
++ bpf_fill_ill_insns(pack->ptr, bpf_prog_pack_size); |
2175 |
+ bitmap_zero(pack->bitmap, bpf_prog_pack_size / BPF_PROG_CHUNK_SIZE); |
2176 |
+ list_add_tail(&pack->list, &pack_list); |
2177 |
+ |
2178 |
+@@ -895,7 +896,7 @@ static struct bpf_prog_pack *alloc_new_pack(void) |
2179 |
+ return pack; |
2180 |
+ } |
2181 |
+ |
2182 |
+-static void *bpf_prog_pack_alloc(u32 size) |
2183 |
++static void *bpf_prog_pack_alloc(u32 size, bpf_jit_fill_hole_t bpf_fill_ill_insns) |
2184 |
+ { |
2185 |
+ unsigned int nbits = BPF_PROG_SIZE_TO_NBITS(size); |
2186 |
+ struct bpf_prog_pack *pack; |
2187 |
+@@ -910,6 +911,7 @@ static void *bpf_prog_pack_alloc(u32 size) |
2188 |
+ size = round_up(size, PAGE_SIZE); |
2189 |
+ ptr = module_alloc(size); |
2190 |
+ if (ptr) { |
2191 |
++ bpf_fill_ill_insns(ptr, size); |
2192 |
+ set_vm_flush_reset_perms(ptr); |
2193 |
+ set_memory_ro((unsigned long)ptr, size / PAGE_SIZE); |
2194 |
+ set_memory_x((unsigned long)ptr, size / PAGE_SIZE); |
2195 |
+@@ -923,7 +925,7 @@ static void *bpf_prog_pack_alloc(u32 size) |
2196 |
+ goto found_free_area; |
2197 |
+ } |
2198 |
+ |
2199 |
+- pack = alloc_new_pack(); |
2200 |
++ pack = alloc_new_pack(bpf_fill_ill_insns); |
2201 |
+ if (!pack) |
2202 |
+ goto out; |
2203 |
+ |
2204 |
+@@ -1102,7 +1104,7 @@ bpf_jit_binary_pack_alloc(unsigned int proglen, u8 **image_ptr, |
2205 |
+ |
2206 |
+ if (bpf_jit_charge_modmem(size)) |
2207 |
+ return NULL; |
2208 |
+- ro_header = bpf_prog_pack_alloc(size); |
2209 |
++ ro_header = bpf_prog_pack_alloc(size, bpf_fill_ill_insns); |
2210 |
+ if (!ro_header) { |
2211 |
+ bpf_jit_uncharge_modmem(size); |
2212 |
+ return NULL; |
2213 |
+@@ -1434,6 +1436,16 @@ struct bpf_prog *bpf_jit_blind_constants(struct bpf_prog *prog) |
2214 |
+ insn = clone->insnsi; |
2215 |
+ |
2216 |
+ for (i = 0; i < insn_cnt; i++, insn++) { |
2217 |
++ if (bpf_pseudo_func(insn)) { |
2218 |
++ /* ld_imm64 with an address of bpf subprog is not |
2219 |
++ * a user controlled constant. Don't randomize it, |
2220 |
++ * since it will conflict with jit_subprogs() logic. |
2221 |
++ */ |
2222 |
++ insn++; |
2223 |
++ i++; |
2224 |
++ continue; |
2225 |
++ } |
2226 |
++ |
2227 |
+ /* We temporarily need to hold the original ld64 insn |
2228 |
+ * so that we can still access the first part in the |
2229 |
+ * second blinding run. |
2230 |
+diff --git a/kernel/bpf/stackmap.c b/kernel/bpf/stackmap.c |
2231 |
+index 34725bfa1e97b..5c6c96d0e634d 100644 |
2232 |
+--- a/kernel/bpf/stackmap.c |
2233 |
++++ b/kernel/bpf/stackmap.c |
2234 |
+@@ -100,7 +100,6 @@ static struct bpf_map *stack_map_alloc(union bpf_attr *attr) |
2235 |
+ return ERR_PTR(-E2BIG); |
2236 |
+ |
2237 |
+ cost = n_buckets * sizeof(struct stack_map_bucket *) + sizeof(*smap); |
2238 |
+- cost += n_buckets * (value_size + sizeof(struct stack_map_bucket)); |
2239 |
+ smap = bpf_map_area_alloc(cost, bpf_map_attr_numa_node(attr)); |
2240 |
+ if (!smap) |
2241 |
+ return ERR_PTR(-ENOMEM); |
2242 |
+diff --git a/kernel/bpf/trampoline.c b/kernel/bpf/trampoline.c |
2243 |
+index ada97751ae1b2..5d8bfb5ef239d 100644 |
2244 |
+--- a/kernel/bpf/trampoline.c |
2245 |
++++ b/kernel/bpf/trampoline.c |
2246 |
+@@ -411,7 +411,7 @@ int bpf_trampoline_link_prog(struct bpf_prog *prog, struct bpf_trampoline *tr) |
2247 |
+ { |
2248 |
+ enum bpf_tramp_prog_type kind; |
2249 |
+ int err = 0; |
2250 |
+- int cnt; |
2251 |
++ int cnt = 0, i; |
2252 |
+ |
2253 |
+ kind = bpf_attach_type_to_tramp(prog); |
2254 |
+ mutex_lock(&tr->mutex); |
2255 |
+@@ -422,7 +422,10 @@ int bpf_trampoline_link_prog(struct bpf_prog *prog, struct bpf_trampoline *tr) |
2256 |
+ err = -EBUSY; |
2257 |
+ goto out; |
2258 |
+ } |
2259 |
+- cnt = tr->progs_cnt[BPF_TRAMP_FENTRY] + tr->progs_cnt[BPF_TRAMP_FEXIT]; |
2260 |
++ |
2261 |
++ for (i = 0; i < BPF_TRAMP_MAX; i++) |
2262 |
++ cnt += tr->progs_cnt[i]; |
2263 |
++ |
2264 |
+ if (kind == BPF_TRAMP_REPLACE) { |
2265 |
+ /* Cannot attach extension if fentry/fexit are in use. */ |
2266 |
+ if (cnt) { |
2267 |
+@@ -500,16 +503,19 @@ out: |
2268 |
+ |
2269 |
+ void bpf_trampoline_put(struct bpf_trampoline *tr) |
2270 |
+ { |
2271 |
++ int i; |
2272 |
++ |
2273 |
+ if (!tr) |
2274 |
+ return; |
2275 |
+ mutex_lock(&trampoline_mutex); |
2276 |
+ if (!refcount_dec_and_test(&tr->refcnt)) |
2277 |
+ goto out; |
2278 |
+ WARN_ON_ONCE(mutex_is_locked(&tr->mutex)); |
2279 |
+- if (WARN_ON_ONCE(!hlist_empty(&tr->progs_hlist[BPF_TRAMP_FENTRY]))) |
2280 |
+- goto out; |
2281 |
+- if (WARN_ON_ONCE(!hlist_empty(&tr->progs_hlist[BPF_TRAMP_FEXIT]))) |
2282 |
+- goto out; |
2283 |
++ |
2284 |
++ for (i = 0; i < BPF_TRAMP_MAX; i++) |
2285 |
++ if (WARN_ON_ONCE(!hlist_empty(&tr->progs_hlist[i]))) |
2286 |
++ goto out; |
2287 |
++ |
2288 |
+ /* This code will be executed even when the last bpf_tramp_image |
2289 |
+ * is alive. All progs are detached from the trampoline and the |
2290 |
+ * trampoline image is patched with jmp into epilogue to skip |
2291 |
+diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c |
2292 |
+index d175b70067b30..9c1a02b82ecd0 100644 |
2293 |
+--- a/kernel/bpf/verifier.c |
2294 |
++++ b/kernel/bpf/verifier.c |
2295 |
+@@ -4861,6 +4861,11 @@ static int check_helper_mem_access(struct bpf_verifier_env *env, int regno, |
2296 |
+ return check_packet_access(env, regno, reg->off, access_size, |
2297 |
+ zero_size_allowed); |
2298 |
+ case PTR_TO_MAP_KEY: |
2299 |
++ if (meta && meta->raw_mode) { |
2300 |
++ verbose(env, "R%d cannot write into %s\n", regno, |
2301 |
++ reg_type_str(env, reg->type)); |
2302 |
++ return -EACCES; |
2303 |
++ } |
2304 |
+ return check_mem_region_access(env, regno, reg->off, access_size, |
2305 |
+ reg->map_ptr->key_size, false); |
2306 |
+ case PTR_TO_MAP_VALUE: |
2307 |
+@@ -4871,13 +4876,23 @@ static int check_helper_mem_access(struct bpf_verifier_env *env, int regno, |
2308 |
+ return check_map_access(env, regno, reg->off, access_size, |
2309 |
+ zero_size_allowed); |
2310 |
+ case PTR_TO_MEM: |
2311 |
++ if (type_is_rdonly_mem(reg->type)) { |
2312 |
++ if (meta && meta->raw_mode) { |
2313 |
++ verbose(env, "R%d cannot write into %s\n", regno, |
2314 |
++ reg_type_str(env, reg->type)); |
2315 |
++ return -EACCES; |
2316 |
++ } |
2317 |
++ } |
2318 |
+ return check_mem_region_access(env, regno, reg->off, |
2319 |
+ access_size, reg->mem_size, |
2320 |
+ zero_size_allowed); |
2321 |
+ case PTR_TO_BUF: |
2322 |
+ if (type_is_rdonly_mem(reg->type)) { |
2323 |
+- if (meta && meta->raw_mode) |
2324 |
++ if (meta && meta->raw_mode) { |
2325 |
++ verbose(env, "R%d cannot write into %s\n", regno, |
2326 |
++ reg_type_str(env, reg->type)); |
2327 |
+ return -EACCES; |
2328 |
++ } |
2329 |
+ |
2330 |
+ max_access = &env->prog->aux->max_rdonly_access; |
2331 |
+ } else { |
2332 |
+@@ -4919,8 +4934,7 @@ static int check_mem_size_reg(struct bpf_verifier_env *env, |
2333 |
+ * out. Only upper bounds can be learned because retval is an |
2334 |
+ * int type and negative retvals are allowed. |
2335 |
+ */ |
2336 |
+- if (meta) |
2337 |
+- meta->msize_max_value = reg->umax_value; |
2338 |
++ meta->msize_max_value = reg->umax_value; |
2339 |
+ |
2340 |
+ /* The register is SCALAR_VALUE; the access check |
2341 |
+ * happens using its boundaries. |
2342 |
+@@ -4963,24 +4977,33 @@ static int check_mem_size_reg(struct bpf_verifier_env *env, |
2343 |
+ int check_mem_reg(struct bpf_verifier_env *env, struct bpf_reg_state *reg, |
2344 |
+ u32 regno, u32 mem_size) |
2345 |
+ { |
2346 |
++ bool may_be_null = type_may_be_null(reg->type); |
2347 |
++ struct bpf_reg_state saved_reg; |
2348 |
++ struct bpf_call_arg_meta meta; |
2349 |
++ int err; |
2350 |
++ |
2351 |
+ if (register_is_null(reg)) |
2352 |
+ return 0; |
2353 |
+ |
2354 |
+- if (type_may_be_null(reg->type)) { |
2355 |
+- /* Assuming that the register contains a value check if the memory |
2356 |
+- * access is safe. Temporarily save and restore the register's state as |
2357 |
+- * the conversion shouldn't be visible to a caller. |
2358 |
+- */ |
2359 |
+- const struct bpf_reg_state saved_reg = *reg; |
2360 |
+- int rv; |
2361 |
+- |
2362 |
++ memset(&meta, 0, sizeof(meta)); |
2363 |
++ /* Assuming that the register contains a value check if the memory |
2364 |
++ * access is safe. Temporarily save and restore the register's state as |
2365 |
++ * the conversion shouldn't be visible to a caller. |
2366 |
++ */ |
2367 |
++ if (may_be_null) { |
2368 |
++ saved_reg = *reg; |
2369 |
+ mark_ptr_not_null_reg(reg); |
2370 |
+- rv = check_helper_mem_access(env, regno, mem_size, true, NULL); |
2371 |
+- *reg = saved_reg; |
2372 |
+- return rv; |
2373 |
+ } |
2374 |
+ |
2375 |
+- return check_helper_mem_access(env, regno, mem_size, true, NULL); |
2376 |
++ err = check_helper_mem_access(env, regno, mem_size, true, &meta); |
2377 |
++ /* Check access for BPF_WRITE */ |
2378 |
++ meta.raw_mode = true; |
2379 |
++ err = err ?: check_helper_mem_access(env, regno, mem_size, true, &meta); |
2380 |
++ |
2381 |
++ if (may_be_null) |
2382 |
++ *reg = saved_reg; |
2383 |
++ |
2384 |
++ return err; |
2385 |
+ } |
2386 |
+ |
2387 |
+ int check_kfunc_mem_size_reg(struct bpf_verifier_env *env, struct bpf_reg_state *reg, |
2388 |
+@@ -4989,16 +5012,22 @@ int check_kfunc_mem_size_reg(struct bpf_verifier_env *env, struct bpf_reg_state |
2389 |
+ struct bpf_reg_state *mem_reg = &cur_regs(env)[regno - 1]; |
2390 |
+ bool may_be_null = type_may_be_null(mem_reg->type); |
2391 |
+ struct bpf_reg_state saved_reg; |
2392 |
++ struct bpf_call_arg_meta meta; |
2393 |
+ int err; |
2394 |
+ |
2395 |
+ WARN_ON_ONCE(regno < BPF_REG_2 || regno > BPF_REG_5); |
2396 |
+ |
2397 |
++ memset(&meta, 0, sizeof(meta)); |
2398 |
++ |
2399 |
+ if (may_be_null) { |
2400 |
+ saved_reg = *mem_reg; |
2401 |
+ mark_ptr_not_null_reg(mem_reg); |
2402 |
+ } |
2403 |
+ |
2404 |
+- err = check_mem_size_reg(env, reg, regno, true, NULL); |
2405 |
++ err = check_mem_size_reg(env, reg, regno, true, &meta); |
2406 |
++ /* Check access for BPF_WRITE */ |
2407 |
++ meta.raw_mode = true; |
2408 |
++ err = err ?: check_mem_size_reg(env, reg, regno, true, &meta); |
2409 |
+ |
2410 |
+ if (may_be_null) |
2411 |
+ *mem_reg = saved_reg; |
2412 |
+diff --git a/lib/assoc_array.c b/lib/assoc_array.c |
2413 |
+index 079c72e26493e..ca0b4f360c1a0 100644 |
2414 |
+--- a/lib/assoc_array.c |
2415 |
++++ b/lib/assoc_array.c |
2416 |
+@@ -1461,6 +1461,7 @@ int assoc_array_gc(struct assoc_array *array, |
2417 |
+ struct assoc_array_ptr *cursor, *ptr; |
2418 |
+ struct assoc_array_ptr *new_root, *new_parent, **new_ptr_pp; |
2419 |
+ unsigned long nr_leaves_on_tree; |
2420 |
++ bool retained; |
2421 |
+ int keylen, slot, nr_free, next_slot, i; |
2422 |
+ |
2423 |
+ pr_devel("-->%s()\n", __func__); |
2424 |
+@@ -1536,6 +1537,7 @@ continue_node: |
2425 |
+ goto descend; |
2426 |
+ } |
2427 |
+ |
2428 |
++retry_compress: |
2429 |
+ pr_devel("-- compress node %p --\n", new_n); |
2430 |
+ |
2431 |
+ /* Count up the number of empty slots in this node and work out the |
2432 |
+@@ -1553,6 +1555,7 @@ continue_node: |
2433 |
+ pr_devel("free=%d, leaves=%lu\n", nr_free, new_n->nr_leaves_on_branch); |
2434 |
+ |
2435 |
+ /* See what we can fold in */ |
2436 |
++ retained = false; |
2437 |
+ next_slot = 0; |
2438 |
+ for (slot = 0; slot < ASSOC_ARRAY_FAN_OUT; slot++) { |
2439 |
+ struct assoc_array_shortcut *s; |
2440 |
+@@ -1602,9 +1605,14 @@ continue_node: |
2441 |
+ pr_devel("[%d] retain node %lu/%d [nx %d]\n", |
2442 |
+ slot, child->nr_leaves_on_branch, nr_free + 1, |
2443 |
+ next_slot); |
2444 |
++ retained = true; |
2445 |
+ } |
2446 |
+ } |
2447 |
+ |
2448 |
++ if (retained && new_n->nr_leaves_on_branch <= ASSOC_ARRAY_FAN_OUT) { |
2449 |
++ pr_devel("internal nodes remain despite enough space, retrying\n"); |
2450 |
++ goto retry_compress; |
2451 |
++ } |
2452 |
+ pr_devel("after: %lu\n", new_n->nr_leaves_on_branch); |
2453 |
+ |
2454 |
+ nr_leaves_on_tree = new_n->nr_leaves_on_branch; |
2455 |
+diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c |
2456 |
+index 9152fbde33b50..5d5fc04385b8d 100644 |
2457 |
+--- a/mm/zsmalloc.c |
2458 |
++++ b/mm/zsmalloc.c |
2459 |
+@@ -1718,11 +1718,40 @@ static enum fullness_group putback_zspage(struct size_class *class, |
2460 |
+ */ |
2461 |
+ static void lock_zspage(struct zspage *zspage) |
2462 |
+ { |
2463 |
+- struct page *page = get_first_page(zspage); |
2464 |
++ struct page *curr_page, *page; |
2465 |
+ |
2466 |
+- do { |
2467 |
+- lock_page(page); |
2468 |
+- } while ((page = get_next_page(page)) != NULL); |
2469 |
++ /* |
2470 |
++ * Pages we haven't locked yet can be migrated off the list while we're |
2471 |
++ * trying to lock them, so we need to be careful and only attempt to |
2472 |
++ * lock each page under migrate_read_lock(). Otherwise, the page we lock |
2473 |
++ * may no longer belong to the zspage. This means that we may wait for |
2474 |
++ * the wrong page to unlock, so we must take a reference to the page |
2475 |
++ * prior to waiting for it to unlock outside migrate_read_lock(). |
2476 |
++ */ |
2477 |
++ while (1) { |
2478 |
++ migrate_read_lock(zspage); |
2479 |
++ page = get_first_page(zspage); |
2480 |
++ if (trylock_page(page)) |
2481 |
++ break; |
2482 |
++ get_page(page); |
2483 |
++ migrate_read_unlock(zspage); |
2484 |
++ wait_on_page_locked(page); |
2485 |
++ put_page(page); |
2486 |
++ } |
2487 |
++ |
2488 |
++ curr_page = page; |
2489 |
++ while ((page = get_next_page(curr_page))) { |
2490 |
++ if (trylock_page(page)) { |
2491 |
++ curr_page = page; |
2492 |
++ } else { |
2493 |
++ get_page(page); |
2494 |
++ migrate_read_unlock(zspage); |
2495 |
++ wait_on_page_locked(page); |
2496 |
++ put_page(page); |
2497 |
++ migrate_read_lock(zspage); |
2498 |
++ } |
2499 |
++ } |
2500 |
++ migrate_read_unlock(zspage); |
2501 |
+ } |
2502 |
+ |
2503 |
+ static int zs_init_fs_context(struct fs_context *fc) |
2504 |
+diff --git a/net/core/bpf_sk_storage.c b/net/core/bpf_sk_storage.c |
2505 |
+index e3ac363805203..83d7641ef67b0 100644 |
2506 |
+--- a/net/core/bpf_sk_storage.c |
2507 |
++++ b/net/core/bpf_sk_storage.c |
2508 |
+@@ -40,7 +40,7 @@ static int bpf_sk_storage_del(struct sock *sk, struct bpf_map *map) |
2509 |
+ if (!sdata) |
2510 |
+ return -ENOENT; |
2511 |
+ |
2512 |
+- bpf_selem_unlink(SELEM(sdata)); |
2513 |
++ bpf_selem_unlink(SELEM(sdata), true); |
2514 |
+ |
2515 |
+ return 0; |
2516 |
+ } |
2517 |
+@@ -75,8 +75,8 @@ void bpf_sk_storage_free(struct sock *sk) |
2518 |
+ * sk_storage. |
2519 |
+ */ |
2520 |
+ bpf_selem_unlink_map(selem); |
2521 |
+- free_sk_storage = bpf_selem_unlink_storage_nolock(sk_storage, |
2522 |
+- selem, true); |
2523 |
++ free_sk_storage = bpf_selem_unlink_storage_nolock( |
2524 |
++ sk_storage, selem, true, false); |
2525 |
+ } |
2526 |
+ raw_spin_unlock_bh(&sk_storage->lock); |
2527 |
+ rcu_read_unlock(); |
2528 |
+diff --git a/net/core/filter.c b/net/core/filter.c |
2529 |
+index 64470a727ef77..966796b345e78 100644 |
2530 |
+--- a/net/core/filter.c |
2531 |
++++ b/net/core/filter.c |
2532 |
+@@ -1687,7 +1687,7 @@ BPF_CALL_5(bpf_skb_store_bytes, struct sk_buff *, skb, u32, offset, |
2533 |
+ |
2534 |
+ if (unlikely(flags & ~(BPF_F_RECOMPUTE_CSUM | BPF_F_INVALIDATE_HASH))) |
2535 |
+ return -EINVAL; |
2536 |
+- if (unlikely(offset > 0xffff)) |
2537 |
++ if (unlikely(offset > INT_MAX)) |
2538 |
+ return -EFAULT; |
2539 |
+ if (unlikely(bpf_try_make_writable(skb, offset + len))) |
2540 |
+ return -EFAULT; |
2541 |
+@@ -1722,7 +1722,7 @@ BPF_CALL_4(bpf_skb_load_bytes, const struct sk_buff *, skb, u32, offset, |
2542 |
+ { |
2543 |
+ void *ptr; |
2544 |
+ |
2545 |
+- if (unlikely(offset > 0xffff)) |
2546 |
++ if (unlikely(offset > INT_MAX)) |
2547 |
+ goto err_clear; |
2548 |
+ |
2549 |
+ ptr = skb_header_pointer(skb, offset, len, to); |
2550 |
+diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c |
2551 |
+index a096b9fbbbdff..b6a9208130051 100644 |
2552 |
+--- a/net/netfilter/nf_tables_api.c |
2553 |
++++ b/net/netfilter/nf_tables_api.c |
2554 |
+@@ -222,12 +222,18 @@ err_register: |
2555 |
+ } |
2556 |
+ |
2557 |
+ static void nft_netdev_unregister_hooks(struct net *net, |
2558 |
+- struct list_head *hook_list) |
2559 |
++ struct list_head *hook_list, |
2560 |
++ bool release_netdev) |
2561 |
+ { |
2562 |
+- struct nft_hook *hook; |
2563 |
++ struct nft_hook *hook, *next; |
2564 |
+ |
2565 |
+- list_for_each_entry(hook, hook_list, list) |
2566 |
++ list_for_each_entry_safe(hook, next, hook_list, list) { |
2567 |
+ nf_unregister_net_hook(net, &hook->ops); |
2568 |
++ if (release_netdev) { |
2569 |
++ list_del(&hook->list); |
2570 |
++ kfree_rcu(hook, rcu); |
2571 |
++ } |
2572 |
++ } |
2573 |
+ } |
2574 |
+ |
2575 |
+ static int nf_tables_register_hook(struct net *net, |
2576 |
+@@ -253,9 +259,10 @@ static int nf_tables_register_hook(struct net *net, |
2577 |
+ return nf_register_net_hook(net, &basechain->ops); |
2578 |
+ } |
2579 |
+ |
2580 |
+-static void nf_tables_unregister_hook(struct net *net, |
2581 |
+- const struct nft_table *table, |
2582 |
+- struct nft_chain *chain) |
2583 |
++static void __nf_tables_unregister_hook(struct net *net, |
2584 |
++ const struct nft_table *table, |
2585 |
++ struct nft_chain *chain, |
2586 |
++ bool release_netdev) |
2587 |
+ { |
2588 |
+ struct nft_base_chain *basechain; |
2589 |
+ const struct nf_hook_ops *ops; |
2590 |
+@@ -270,11 +277,19 @@ static void nf_tables_unregister_hook(struct net *net, |
2591 |
+ return basechain->type->ops_unregister(net, ops); |
2592 |
+ |
2593 |
+ if (nft_base_chain_netdev(table->family, basechain->ops.hooknum)) |
2594 |
+- nft_netdev_unregister_hooks(net, &basechain->hook_list); |
2595 |
++ nft_netdev_unregister_hooks(net, &basechain->hook_list, |
2596 |
++ release_netdev); |
2597 |
+ else |
2598 |
+ nf_unregister_net_hook(net, &basechain->ops); |
2599 |
+ } |
2600 |
+ |
2601 |
++static void nf_tables_unregister_hook(struct net *net, |
2602 |
++ const struct nft_table *table, |
2603 |
++ struct nft_chain *chain) |
2604 |
++{ |
2605 |
++ return __nf_tables_unregister_hook(net, table, chain, false); |
2606 |
++} |
2607 |
++ |
2608 |
+ static void nft_trans_commit_list_add_tail(struct net *net, struct nft_trans *trans) |
2609 |
+ { |
2610 |
+ struct nftables_pernet *nft_net = nft_pernet(net); |
2611 |
+@@ -2873,27 +2888,31 @@ static struct nft_expr *nft_expr_init(const struct nft_ctx *ctx, |
2612 |
+ |
2613 |
+ err = nf_tables_expr_parse(ctx, nla, &expr_info); |
2614 |
+ if (err < 0) |
2615 |
+- goto err1; |
2616 |
++ goto err_expr_parse; |
2617 |
++ |
2618 |
++ err = -EOPNOTSUPP; |
2619 |
++ if (!(expr_info.ops->type->flags & NFT_EXPR_STATEFUL)) |
2620 |
++ goto err_expr_stateful; |
2621 |
+ |
2622 |
+ err = -ENOMEM; |
2623 |
+ expr = kzalloc(expr_info.ops->size, GFP_KERNEL_ACCOUNT); |
2624 |
+ if (expr == NULL) |
2625 |
+- goto err2; |
2626 |
++ goto err_expr_stateful; |
2627 |
+ |
2628 |
+ err = nf_tables_newexpr(ctx, &expr_info, expr); |
2629 |
+ if (err < 0) |
2630 |
+- goto err3; |
2631 |
++ goto err_expr_new; |
2632 |
+ |
2633 |
+ return expr; |
2634 |
+-err3: |
2635 |
++err_expr_new: |
2636 |
+ kfree(expr); |
2637 |
+-err2: |
2638 |
++err_expr_stateful: |
2639 |
+ owner = expr_info.ops->type->owner; |
2640 |
+ if (expr_info.ops->type->release_ops) |
2641 |
+ expr_info.ops->type->release_ops(expr_info.ops); |
2642 |
+ |
2643 |
+ module_put(owner); |
2644 |
+-err1: |
2645 |
++err_expr_parse: |
2646 |
+ return ERR_PTR(err); |
2647 |
+ } |
2648 |
+ |
2649 |
+@@ -4242,6 +4261,9 @@ static int nft_set_desc_concat_parse(const struct nlattr *attr, |
2650 |
+ u32 len; |
2651 |
+ int err; |
2652 |
+ |
2653 |
++ if (desc->field_count >= ARRAY_SIZE(desc->field_len)) |
2654 |
++ return -E2BIG; |
2655 |
++ |
2656 |
+ err = nla_parse_nested_deprecated(tb, NFTA_SET_FIELD_MAX, attr, |
2657 |
+ nft_concat_policy, NULL); |
2658 |
+ if (err < 0) |
2659 |
+@@ -4251,9 +4273,8 @@ static int nft_set_desc_concat_parse(const struct nlattr *attr, |
2660 |
+ return -EINVAL; |
2661 |
+ |
2662 |
+ len = ntohl(nla_get_be32(tb[NFTA_SET_FIELD_LEN])); |
2663 |
+- |
2664 |
+- if (len * BITS_PER_BYTE / 32 > NFT_REG32_COUNT) |
2665 |
+- return -E2BIG; |
2666 |
++ if (!len || len > U8_MAX) |
2667 |
++ return -EINVAL; |
2668 |
+ |
2669 |
+ desc->field_len[desc->field_count++] = len; |
2670 |
+ |
2671 |
+@@ -4264,7 +4285,8 @@ static int nft_set_desc_concat(struct nft_set_desc *desc, |
2672 |
+ const struct nlattr *nla) |
2673 |
+ { |
2674 |
+ struct nlattr *attr; |
2675 |
+- int rem, err; |
2676 |
++ u32 num_regs = 0; |
2677 |
++ int rem, err, i; |
2678 |
+ |
2679 |
+ nla_for_each_nested(attr, nla, rem) { |
2680 |
+ if (nla_type(attr) != NFTA_LIST_ELEM) |
2681 |
+@@ -4275,6 +4297,12 @@ static int nft_set_desc_concat(struct nft_set_desc *desc, |
2682 |
+ return err; |
2683 |
+ } |
2684 |
+ |
2685 |
++ for (i = 0; i < desc->field_count; i++) |
2686 |
++ num_regs += DIV_ROUND_UP(desc->field_len[i], sizeof(u32)); |
2687 |
++ |
2688 |
++ if (num_regs > NFT_REG32_COUNT) |
2689 |
++ return -E2BIG; |
2690 |
++ |
2691 |
+ return 0; |
2692 |
+ } |
2693 |
+ |
2694 |
+@@ -5413,9 +5441,6 @@ struct nft_expr *nft_set_elem_expr_alloc(const struct nft_ctx *ctx, |
2695 |
+ return expr; |
2696 |
+ |
2697 |
+ err = -EOPNOTSUPP; |
2698 |
+- if (!(expr->ops->type->flags & NFT_EXPR_STATEFUL)) |
2699 |
+- goto err_set_elem_expr; |
2700 |
+- |
2701 |
+ if (expr->ops->type->flags & NFT_EXPR_GC) { |
2702 |
+ if (set->flags & NFT_SET_TIMEOUT) |
2703 |
+ goto err_set_elem_expr; |
2704 |
+@@ -7291,13 +7316,25 @@ static void nft_unregister_flowtable_hook(struct net *net, |
2705 |
+ FLOW_BLOCK_UNBIND); |
2706 |
+ } |
2707 |
+ |
2708 |
+-static void nft_unregister_flowtable_net_hooks(struct net *net, |
2709 |
+- struct list_head *hook_list) |
2710 |
++static void __nft_unregister_flowtable_net_hooks(struct net *net, |
2711 |
++ struct list_head *hook_list, |
2712 |
++ bool release_netdev) |
2713 |
+ { |
2714 |
+- struct nft_hook *hook; |
2715 |
++ struct nft_hook *hook, *next; |
2716 |
+ |
2717 |
+- list_for_each_entry(hook, hook_list, list) |
2718 |
++ list_for_each_entry_safe(hook, next, hook_list, list) { |
2719 |
+ nf_unregister_net_hook(net, &hook->ops); |
2720 |
++ if (release_netdev) { |
2721 |
++ list_del(&hook->list); |
2722 |
++ kfree_rcu(hook); |
2723 |
++ } |
2724 |
++ } |
2725 |
++} |
2726 |
++ |
2727 |
++static void nft_unregister_flowtable_net_hooks(struct net *net, |
2728 |
++ struct list_head *hook_list) |
2729 |
++{ |
2730 |
++ __nft_unregister_flowtable_net_hooks(net, hook_list, false); |
2731 |
+ } |
2732 |
+ |
2733 |
+ static int nft_register_flowtable_net_hooks(struct net *net, |
2734 |
+@@ -9741,9 +9778,10 @@ static void __nft_release_hook(struct net *net, struct nft_table *table) |
2735 |
+ struct nft_chain *chain; |
2736 |
+ |
2737 |
+ list_for_each_entry(chain, &table->chains, list) |
2738 |
+- nf_tables_unregister_hook(net, table, chain); |
2739 |
++ __nf_tables_unregister_hook(net, table, chain, true); |
2740 |
+ list_for_each_entry(flowtable, &table->flowtables, list) |
2741 |
+- nft_unregister_flowtable_net_hooks(net, &flowtable->hook_list); |
2742 |
++ __nft_unregister_flowtable_net_hooks(net, &flowtable->hook_list, |
2743 |
++ true); |
2744 |
+ } |
2745 |
+ |
2746 |
+ static void __nft_release_hooks(struct net *net) |
2747 |
+@@ -9882,7 +9920,11 @@ static int __net_init nf_tables_init_net(struct net *net) |
2748 |
+ |
2749 |
+ static void __net_exit nf_tables_pre_exit_net(struct net *net) |
2750 |
+ { |
2751 |
++ struct nftables_pernet *nft_net = nft_pernet(net); |
2752 |
++ |
2753 |
++ mutex_lock(&nft_net->commit_mutex); |
2754 |
+ __nft_release_hooks(net); |
2755 |
++ mutex_unlock(&nft_net->commit_mutex); |
2756 |
+ } |
2757 |
+ |
2758 |
+ static void __net_exit nf_tables_exit_net(struct net *net) |
2759 |
+diff --git a/net/netfilter/nft_limit.c b/net/netfilter/nft_limit.c |
2760 |
+index 04ea8b9bf2028..981addb2d0515 100644 |
2761 |
+--- a/net/netfilter/nft_limit.c |
2762 |
++++ b/net/netfilter/nft_limit.c |
2763 |
+@@ -213,6 +213,8 @@ static int nft_limit_pkts_clone(struct nft_expr *dst, const struct nft_expr *src |
2764 |
+ struct nft_limit_priv_pkts *priv_dst = nft_expr_priv(dst); |
2765 |
+ struct nft_limit_priv_pkts *priv_src = nft_expr_priv(src); |
2766 |
+ |
2767 |
++ priv_dst->cost = priv_src->cost; |
2768 |
++ |
2769 |
+ return nft_limit_clone(&priv_dst->limit, &priv_src->limit); |
2770 |
+ } |
2771 |
+ |
2772 |
+diff --git a/sound/usb/clock.c b/sound/usb/clock.c |
2773 |
+index 4dfe76416794f..33db334e65566 100644 |
2774 |
+--- a/sound/usb/clock.c |
2775 |
++++ b/sound/usb/clock.c |
2776 |
+@@ -572,6 +572,17 @@ static int set_sample_rate_v2v3(struct snd_usb_audio *chip, |
2777 |
+ /* continue processing */ |
2778 |
+ } |
2779 |
+ |
2780 |
++ /* FIXME - TEAC devices require the immediate interface setup */ |
2781 |
++ if (USB_ID_VENDOR(chip->usb_id) == 0x0644) { |
2782 |
++ bool cur_base_48k = (rate % 48000 == 0); |
2783 |
++ bool prev_base_48k = (prev_rate % 48000 == 0); |
2784 |
++ if (cur_base_48k != prev_base_48k) { |
2785 |
++ usb_set_interface(chip->dev, fmt->iface, fmt->altsetting); |
2786 |
++ if (chip->quirk_flags & QUIRK_FLAG_IFACE_DELAY) |
2787 |
++ msleep(50); |
2788 |
++ } |
2789 |
++ } |
2790 |
++ |
2791 |
+ validation: |
2792 |
+ /* validate clock after rate change */ |
2793 |
+ if (!uac_clock_source_is_valid(chip, fmt, clock)) |
2794 |
+diff --git a/sound/usb/pcm.c b/sound/usb/pcm.c |
2795 |
+index 6d699065e81a2..b470404a5376c 100644 |
2796 |
+--- a/sound/usb/pcm.c |
2797 |
++++ b/sound/usb/pcm.c |
2798 |
+@@ -439,16 +439,21 @@ static int configure_endpoints(struct snd_usb_audio *chip, |
2799 |
+ /* stop any running stream beforehand */ |
2800 |
+ if (stop_endpoints(subs, false)) |
2801 |
+ sync_pending_stops(subs); |
2802 |
++ if (subs->sync_endpoint) { |
2803 |
++ err = snd_usb_endpoint_configure(chip, subs->sync_endpoint); |
2804 |
++ if (err < 0) |
2805 |
++ return err; |
2806 |
++ } |
2807 |
+ err = snd_usb_endpoint_configure(chip, subs->data_endpoint); |
2808 |
+ if (err < 0) |
2809 |
+ return err; |
2810 |
+ snd_usb_set_format_quirk(subs, subs->cur_audiofmt); |
2811 |
+- } |
2812 |
+- |
2813 |
+- if (subs->sync_endpoint) { |
2814 |
+- err = snd_usb_endpoint_configure(chip, subs->sync_endpoint); |
2815 |
+- if (err < 0) |
2816 |
+- return err; |
2817 |
++ } else { |
2818 |
++ if (subs->sync_endpoint) { |
2819 |
++ err = snd_usb_endpoint_configure(chip, subs->sync_endpoint); |
2820 |
++ if (err < 0) |
2821 |
++ return err; |
2822 |
++ } |
2823 |
+ } |
2824 |
+ |
2825 |
+ return 0; |
2826 |
+diff --git a/sound/usb/quirks-table.h b/sound/usb/quirks-table.h |
2827 |
+index 40a5e3eb4ef26..78eb41b621d63 100644 |
2828 |
+--- a/sound/usb/quirks-table.h |
2829 |
++++ b/sound/usb/quirks-table.h |
2830 |
+@@ -2672,6 +2672,7 @@ YAMAHA_DEVICE(0x7010, "UB99"), |
2831 |
+ .altset_idx = 1, |
2832 |
+ .attributes = 0, |
2833 |
+ .endpoint = 0x82, |
2834 |
++ .ep_idx = 1, |
2835 |
+ .ep_attr = USB_ENDPOINT_XFER_ISOC, |
2836 |
+ .datainterval = 1, |
2837 |
+ .maxpacksize = 0x0126, |
2838 |
+@@ -2875,6 +2876,7 @@ YAMAHA_DEVICE(0x7010, "UB99"), |
2839 |
+ .altset_idx = 1, |
2840 |
+ .attributes = 0x4, |
2841 |
+ .endpoint = 0x81, |
2842 |
++ .ep_idx = 1, |
2843 |
+ .ep_attr = USB_ENDPOINT_XFER_ISOC | |
2844 |
+ USB_ENDPOINT_SYNC_ASYNC, |
2845 |
+ .maxpacksize = 0x130, |
2846 |
+@@ -3391,6 +3393,7 @@ YAMAHA_DEVICE(0x7010, "UB99"), |
2847 |
+ .altset_idx = 1, |
2848 |
+ .attributes = 0, |
2849 |
+ .endpoint = 0x03, |
2850 |
++ .ep_idx = 1, |
2851 |
+ .rates = SNDRV_PCM_RATE_96000, |
2852 |
+ .ep_attr = USB_ENDPOINT_XFER_ISOC | |
2853 |
+ USB_ENDPOINT_SYNC_ASYNC, |
2854 |
+diff --git a/tools/memory-model/README b/tools/memory-model/README |
2855 |
+index 9edd402704c4f..dab38904206a0 100644 |
2856 |
+--- a/tools/memory-model/README |
2857 |
++++ b/tools/memory-model/README |
2858 |
+@@ -54,7 +54,8 @@ klitmus7 Compatibility Table |
2859 |
+ -- 4.14 7.48 -- |
2860 |
+ 4.15 -- 4.19 7.49 -- |
2861 |
+ 4.20 -- 5.5 7.54 -- |
2862 |
+- 5.6 -- 7.56 -- |
2863 |
++ 5.6 -- 5.16 7.56 -- |
2864 |
++ 5.17 -- 7.56.1 -- |
2865 |
+ ============ ========== |
2866 |
+ |
2867 |
+ |