Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:4.15 commit in: /
Date: Sat, 03 Feb 2018 21:20:18
Message-Id: 1517692797.cd10231abdae4f2e8ddd323db3031b9d5778b320.mpagano@gentoo
1 commit: cd10231abdae4f2e8ddd323db3031b9d5778b320
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Sat Feb 3 21:19:57 2018 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Sat Feb 3 21:19:57 2018 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=cd10231a
7
8 Linux patch 4.15.1
9
10 0000_README | 4 +
11 1000_linux-4.15.1.patch | 1808 +++++++++++++++++++++++++++++++++++++++++++++++
12 2 files changed, 1812 insertions(+)
13
14 diff --git a/0000_README b/0000_README
15 index 01553d4..da07a38 100644
16 --- a/0000_README
17 +++ b/0000_README
18 @@ -43,6 +43,10 @@ EXPERIMENTAL
19 Individual Patch Descriptions:
20 --------------------------------------------------------------------------
21
22 +Patch: 1000_linux-4.15.1.patch
23 +From: http://www.kernel.org
24 +Desc: Linux 4.15.1
25 +
26 Patch: 1500_XATTR_USER_PREFIX.patch
27 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
28 Desc: Support for namespace user.pax.* on tmpfs.
29
30 diff --git a/1000_linux-4.15.1.patch b/1000_linux-4.15.1.patch
31 new file mode 100644
32 index 0000000..7a10ddd
33 --- /dev/null
34 +++ b/1000_linux-4.15.1.patch
35 @@ -0,0 +1,1808 @@
36 +diff --git a/Makefile b/Makefile
37 +index c8b8e902d5a4..af101b556ba0 100644
38 +--- a/Makefile
39 ++++ b/Makefile
40 +@@ -1,7 +1,7 @@
41 + # SPDX-License-Identifier: GPL-2.0
42 + VERSION = 4
43 + PATCHLEVEL = 15
44 +-SUBLEVEL = 0
45 ++SUBLEVEL = 1
46 + EXTRAVERSION =
47 + NAME = Fearless Coyote
48 +
49 +diff --git a/arch/x86/crypto/aesni-intel_asm.S b/arch/x86/crypto/aesni-intel_asm.S
50 +index 3d09e3aca18d..12e8484a8ee7 100644
51 +--- a/arch/x86/crypto/aesni-intel_asm.S
52 ++++ b/arch/x86/crypto/aesni-intel_asm.S
53 +@@ -90,30 +90,6 @@ SHIFT_MASK: .octa 0x0f0e0d0c0b0a09080706050403020100
54 + ALL_F: .octa 0xffffffffffffffffffffffffffffffff
55 + .octa 0x00000000000000000000000000000000
56 +
57 +-.section .rodata
58 +-.align 16
59 +-.type aad_shift_arr, @object
60 +-.size aad_shift_arr, 272
61 +-aad_shift_arr:
62 +- .octa 0xffffffffffffffffffffffffffffffff
63 +- .octa 0xffffffffffffffffffffffffffffff0C
64 +- .octa 0xffffffffffffffffffffffffffff0D0C
65 +- .octa 0xffffffffffffffffffffffffff0E0D0C
66 +- .octa 0xffffffffffffffffffffffff0F0E0D0C
67 +- .octa 0xffffffffffffffffffffff0C0B0A0908
68 +- .octa 0xffffffffffffffffffff0D0C0B0A0908
69 +- .octa 0xffffffffffffffffff0E0D0C0B0A0908
70 +- .octa 0xffffffffffffffff0F0E0D0C0B0A0908
71 +- .octa 0xffffffffffffff0C0B0A090807060504
72 +- .octa 0xffffffffffff0D0C0B0A090807060504
73 +- .octa 0xffffffffff0E0D0C0B0A090807060504
74 +- .octa 0xffffffff0F0E0D0C0B0A090807060504
75 +- .octa 0xffffff0C0B0A09080706050403020100
76 +- .octa 0xffff0D0C0B0A09080706050403020100
77 +- .octa 0xff0E0D0C0B0A09080706050403020100
78 +- .octa 0x0F0E0D0C0B0A09080706050403020100
79 +-
80 +-
81 + .text
82 +
83 +
84 +@@ -257,6 +233,37 @@ aad_shift_arr:
85 + pxor \TMP1, \GH # result is in TMP1
86 + .endm
87 +
88 ++# Reads DLEN bytes starting at DPTR and stores in XMMDst
89 ++# where 0 < DLEN < 16
90 ++# Clobbers %rax, DLEN and XMM1
91 ++.macro READ_PARTIAL_BLOCK DPTR DLEN XMM1 XMMDst
92 ++ cmp $8, \DLEN
93 ++ jl _read_lt8_\@
94 ++ mov (\DPTR), %rax
95 ++ MOVQ_R64_XMM %rax, \XMMDst
96 ++ sub $8, \DLEN
97 ++ jz _done_read_partial_block_\@
98 ++ xor %eax, %eax
99 ++_read_next_byte_\@:
100 ++ shl $8, %rax
101 ++ mov 7(\DPTR, \DLEN, 1), %al
102 ++ dec \DLEN
103 ++ jnz _read_next_byte_\@
104 ++ MOVQ_R64_XMM %rax, \XMM1
105 ++ pslldq $8, \XMM1
106 ++ por \XMM1, \XMMDst
107 ++ jmp _done_read_partial_block_\@
108 ++_read_lt8_\@:
109 ++ xor %eax, %eax
110 ++_read_next_byte_lt8_\@:
111 ++ shl $8, %rax
112 ++ mov -1(\DPTR, \DLEN, 1), %al
113 ++ dec \DLEN
114 ++ jnz _read_next_byte_lt8_\@
115 ++ MOVQ_R64_XMM %rax, \XMMDst
116 ++_done_read_partial_block_\@:
117 ++.endm
118 ++
119 + /*
120 + * if a = number of total plaintext bytes
121 + * b = floor(a/16)
122 +@@ -273,62 +280,30 @@ aad_shift_arr:
123 + XMM2 XMM3 XMM4 XMMDst TMP6 TMP7 i i_seq operation
124 + MOVADQ SHUF_MASK(%rip), %xmm14
125 + mov arg7, %r10 # %r10 = AAD
126 +- mov arg8, %r12 # %r12 = aadLen
127 +- mov %r12, %r11
128 ++ mov arg8, %r11 # %r11 = aadLen
129 + pxor %xmm\i, %xmm\i
130 + pxor \XMM2, \XMM2
131 +
132 + cmp $16, %r11
133 +- jl _get_AAD_rest8\num_initial_blocks\operation
134 ++ jl _get_AAD_rest\num_initial_blocks\operation
135 + _get_AAD_blocks\num_initial_blocks\operation:
136 + movdqu (%r10), %xmm\i
137 + PSHUFB_XMM %xmm14, %xmm\i # byte-reflect the AAD data
138 + pxor %xmm\i, \XMM2
139 + GHASH_MUL \XMM2, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1
140 + add $16, %r10
141 +- sub $16, %r12
142 + sub $16, %r11
143 + cmp $16, %r11
144 + jge _get_AAD_blocks\num_initial_blocks\operation
145 +
146 + movdqu \XMM2, %xmm\i
147 ++
148 ++ /* read the last <16B of AAD */
149 ++_get_AAD_rest\num_initial_blocks\operation:
150 + cmp $0, %r11
151 + je _get_AAD_done\num_initial_blocks\operation
152 +
153 +- pxor %xmm\i,%xmm\i
154 +-
155 +- /* read the last <16B of AAD. since we have at least 4B of
156 +- data right after the AAD (the ICV, and maybe some CT), we can
157 +- read 4B/8B blocks safely, and then get rid of the extra stuff */
158 +-_get_AAD_rest8\num_initial_blocks\operation:
159 +- cmp $4, %r11
160 +- jle _get_AAD_rest4\num_initial_blocks\operation
161 +- movq (%r10), \TMP1
162 +- add $8, %r10
163 +- sub $8, %r11
164 +- pslldq $8, \TMP1
165 +- psrldq $8, %xmm\i
166 +- pxor \TMP1, %xmm\i
167 +- jmp _get_AAD_rest8\num_initial_blocks\operation
168 +-_get_AAD_rest4\num_initial_blocks\operation:
169 +- cmp $0, %r11
170 +- jle _get_AAD_rest0\num_initial_blocks\operation
171 +- mov (%r10), %eax
172 +- movq %rax, \TMP1
173 +- add $4, %r10
174 +- sub $4, %r10
175 +- pslldq $12, \TMP1
176 +- psrldq $4, %xmm\i
177 +- pxor \TMP1, %xmm\i
178 +-_get_AAD_rest0\num_initial_blocks\operation:
179 +- /* finalize: shift out the extra bytes we read, and align
180 +- left. since pslldq can only shift by an immediate, we use
181 +- vpshufb and an array of shuffle masks */
182 +- movq %r12, %r11
183 +- salq $4, %r11
184 +- movdqu aad_shift_arr(%r11), \TMP1
185 +- PSHUFB_XMM \TMP1, %xmm\i
186 +-_get_AAD_rest_final\num_initial_blocks\operation:
187 ++ READ_PARTIAL_BLOCK %r10, %r11, \TMP1, %xmm\i
188 + PSHUFB_XMM %xmm14, %xmm\i # byte-reflect the AAD data
189 + pxor \XMM2, %xmm\i
190 + GHASH_MUL %xmm\i, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1
191 +@@ -532,62 +507,30 @@ _initial_blocks_done\num_initial_blocks\operation:
192 + XMM2 XMM3 XMM4 XMMDst TMP6 TMP7 i i_seq operation
193 + MOVADQ SHUF_MASK(%rip), %xmm14
194 + mov arg7, %r10 # %r10 = AAD
195 +- mov arg8, %r12 # %r12 = aadLen
196 +- mov %r12, %r11
197 ++ mov arg8, %r11 # %r11 = aadLen
198 + pxor %xmm\i, %xmm\i
199 + pxor \XMM2, \XMM2
200 +
201 + cmp $16, %r11
202 +- jl _get_AAD_rest8\num_initial_blocks\operation
203 ++ jl _get_AAD_rest\num_initial_blocks\operation
204 + _get_AAD_blocks\num_initial_blocks\operation:
205 + movdqu (%r10), %xmm\i
206 + PSHUFB_XMM %xmm14, %xmm\i # byte-reflect the AAD data
207 + pxor %xmm\i, \XMM2
208 + GHASH_MUL \XMM2, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1
209 + add $16, %r10
210 +- sub $16, %r12
211 + sub $16, %r11
212 + cmp $16, %r11
213 + jge _get_AAD_blocks\num_initial_blocks\operation
214 +
215 + movdqu \XMM2, %xmm\i
216 ++
217 ++ /* read the last <16B of AAD */
218 ++_get_AAD_rest\num_initial_blocks\operation:
219 + cmp $0, %r11
220 + je _get_AAD_done\num_initial_blocks\operation
221 +
222 +- pxor %xmm\i,%xmm\i
223 +-
224 +- /* read the last <16B of AAD. since we have at least 4B of
225 +- data right after the AAD (the ICV, and maybe some PT), we can
226 +- read 4B/8B blocks safely, and then get rid of the extra stuff */
227 +-_get_AAD_rest8\num_initial_blocks\operation:
228 +- cmp $4, %r11
229 +- jle _get_AAD_rest4\num_initial_blocks\operation
230 +- movq (%r10), \TMP1
231 +- add $8, %r10
232 +- sub $8, %r11
233 +- pslldq $8, \TMP1
234 +- psrldq $8, %xmm\i
235 +- pxor \TMP1, %xmm\i
236 +- jmp _get_AAD_rest8\num_initial_blocks\operation
237 +-_get_AAD_rest4\num_initial_blocks\operation:
238 +- cmp $0, %r11
239 +- jle _get_AAD_rest0\num_initial_blocks\operation
240 +- mov (%r10), %eax
241 +- movq %rax, \TMP1
242 +- add $4, %r10
243 +- sub $4, %r10
244 +- pslldq $12, \TMP1
245 +- psrldq $4, %xmm\i
246 +- pxor \TMP1, %xmm\i
247 +-_get_AAD_rest0\num_initial_blocks\operation:
248 +- /* finalize: shift out the extra bytes we read, and align
249 +- left. since pslldq can only shift by an immediate, we use
250 +- vpshufb and an array of shuffle masks */
251 +- movq %r12, %r11
252 +- salq $4, %r11
253 +- movdqu aad_shift_arr(%r11), \TMP1
254 +- PSHUFB_XMM \TMP1, %xmm\i
255 +-_get_AAD_rest_final\num_initial_blocks\operation:
256 ++ READ_PARTIAL_BLOCK %r10, %r11, \TMP1, %xmm\i
257 + PSHUFB_XMM %xmm14, %xmm\i # byte-reflect the AAD data
258 + pxor \XMM2, %xmm\i
259 + GHASH_MUL %xmm\i, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1
260 +@@ -1386,14 +1329,6 @@ _esb_loop_\@:
261 + *
262 + * AAD Format with 64-bit Extended Sequence Number
263 + *
264 +-* aadLen:
265 +-* from the definition of the spec, aadLen can only be 8 or 12 bytes.
266 +-* The code supports 16 too but for other sizes, the code will fail.
267 +-*
268 +-* TLen:
269 +-* from the definition of the spec, TLen can only be 8, 12 or 16 bytes.
270 +-* For other sizes, the code will fail.
271 +-*
272 + * poly = x^128 + x^127 + x^126 + x^121 + 1
273 + *
274 + *****************************************************************************/
275 +@@ -1487,19 +1422,16 @@ _zero_cipher_left_decrypt:
276 + PSHUFB_XMM %xmm10, %xmm0
277 +
278 + ENCRYPT_SINGLE_BLOCK %xmm0, %xmm1 # E(K, Yn)
279 +- sub $16, %r11
280 +- add %r13, %r11
281 +- movdqu (%arg3,%r11,1), %xmm1 # receive the last <16 byte block
282 +- lea SHIFT_MASK+16(%rip), %r12
283 +- sub %r13, %r12
284 +-# adjust the shuffle mask pointer to be able to shift 16-%r13 bytes
285 +-# (%r13 is the number of bytes in plaintext mod 16)
286 +- movdqu (%r12), %xmm2 # get the appropriate shuffle mask
287 +- PSHUFB_XMM %xmm2, %xmm1 # right shift 16-%r13 butes
288 +
289 ++ lea (%arg3,%r11,1), %r10
290 ++ mov %r13, %r12
291 ++ READ_PARTIAL_BLOCK %r10 %r12 %xmm2 %xmm1
292 ++
293 ++ lea ALL_F+16(%rip), %r12
294 ++ sub %r13, %r12
295 + movdqa %xmm1, %xmm2
296 + pxor %xmm1, %xmm0 # Ciphertext XOR E(K, Yn)
297 +- movdqu ALL_F-SHIFT_MASK(%r12), %xmm1
298 ++ movdqu (%r12), %xmm1
299 + # get the appropriate mask to mask out top 16-%r13 bytes of %xmm0
300 + pand %xmm1, %xmm0 # mask out top 16-%r13 bytes of %xmm0
301 + pand %xmm1, %xmm2
302 +@@ -1508,9 +1440,6 @@ _zero_cipher_left_decrypt:
303 +
304 + pxor %xmm2, %xmm8
305 + GHASH_MUL %xmm8, %xmm13, %xmm9, %xmm10, %xmm11, %xmm5, %xmm6
306 +- # GHASH computation for the last <16 byte block
307 +- sub %r13, %r11
308 +- add $16, %r11
309 +
310 + # output %r13 bytes
311 + MOVQ_R64_XMM %xmm0, %rax
312 +@@ -1664,14 +1593,6 @@ ENDPROC(aesni_gcm_dec)
313 + *
314 + * AAD Format with 64-bit Extended Sequence Number
315 + *
316 +-* aadLen:
317 +-* from the definition of the spec, aadLen can only be 8 or 12 bytes.
318 +-* The code supports 16 too but for other sizes, the code will fail.
319 +-*
320 +-* TLen:
321 +-* from the definition of the spec, TLen can only be 8, 12 or 16 bytes.
322 +-* For other sizes, the code will fail.
323 +-*
324 + * poly = x^128 + x^127 + x^126 + x^121 + 1
325 + ***************************************************************************/
326 + ENTRY(aesni_gcm_enc)
327 +@@ -1764,19 +1685,16 @@ _zero_cipher_left_encrypt:
328 + movdqa SHUF_MASK(%rip), %xmm10
329 + PSHUFB_XMM %xmm10, %xmm0
330 +
331 +-
332 + ENCRYPT_SINGLE_BLOCK %xmm0, %xmm1 # Encrypt(K, Yn)
333 +- sub $16, %r11
334 +- add %r13, %r11
335 +- movdqu (%arg3,%r11,1), %xmm1 # receive the last <16 byte blocks
336 +- lea SHIFT_MASK+16(%rip), %r12
337 ++
338 ++ lea (%arg3,%r11,1), %r10
339 ++ mov %r13, %r12
340 ++ READ_PARTIAL_BLOCK %r10 %r12 %xmm2 %xmm1
341 ++
342 ++ lea ALL_F+16(%rip), %r12
343 + sub %r13, %r12
344 +- # adjust the shuffle mask pointer to be able to shift 16-r13 bytes
345 +- # (%r13 is the number of bytes in plaintext mod 16)
346 +- movdqu (%r12), %xmm2 # get the appropriate shuffle mask
347 +- PSHUFB_XMM %xmm2, %xmm1 # shift right 16-r13 byte
348 + pxor %xmm1, %xmm0 # Plaintext XOR Encrypt(K, Yn)
349 +- movdqu ALL_F-SHIFT_MASK(%r12), %xmm1
350 ++ movdqu (%r12), %xmm1
351 + # get the appropriate mask to mask out top 16-r13 bytes of xmm0
352 + pand %xmm1, %xmm0 # mask out top 16-r13 bytes of xmm0
353 + movdqa SHUF_MASK(%rip), %xmm10
354 +@@ -1785,9 +1703,6 @@ _zero_cipher_left_encrypt:
355 + pxor %xmm0, %xmm8
356 + GHASH_MUL %xmm8, %xmm13, %xmm9, %xmm10, %xmm11, %xmm5, %xmm6
357 + # GHASH computation for the last <16 byte block
358 +- sub %r13, %r11
359 +- add $16, %r11
360 +-
361 + movdqa SHUF_MASK(%rip), %xmm10
362 + PSHUFB_XMM %xmm10, %xmm0
363 +
364 +diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c
365 +index 3bf3dcf29825..34cf1c1f8c98 100644
366 +--- a/arch/x86/crypto/aesni-intel_glue.c
367 ++++ b/arch/x86/crypto/aesni-intel_glue.c
368 +@@ -690,8 +690,8 @@ static int common_rfc4106_set_key(struct crypto_aead *aead, const u8 *key,
369 + rfc4106_set_hash_subkey(ctx->hash_subkey, key, key_len);
370 + }
371 +
372 +-static int rfc4106_set_key(struct crypto_aead *parent, const u8 *key,
373 +- unsigned int key_len)
374 ++static int gcmaes_wrapper_set_key(struct crypto_aead *parent, const u8 *key,
375 ++ unsigned int key_len)
376 + {
377 + struct cryptd_aead **ctx = crypto_aead_ctx(parent);
378 + struct cryptd_aead *cryptd_tfm = *ctx;
379 +@@ -716,8 +716,8 @@ static int common_rfc4106_set_authsize(struct crypto_aead *aead,
380 +
381 + /* This is the Integrity Check Value (aka the authentication tag length and can
382 + * be 8, 12 or 16 bytes long. */
383 +-static int rfc4106_set_authsize(struct crypto_aead *parent,
384 +- unsigned int authsize)
385 ++static int gcmaes_wrapper_set_authsize(struct crypto_aead *parent,
386 ++ unsigned int authsize)
387 + {
388 + struct cryptd_aead **ctx = crypto_aead_ctx(parent);
389 + struct cryptd_aead *cryptd_tfm = *ctx;
390 +@@ -824,7 +824,7 @@ static int gcmaes_decrypt(struct aead_request *req, unsigned int assoclen,
391 + if (sg_is_last(req->src) &&
392 + (!PageHighMem(sg_page(req->src)) ||
393 + req->src->offset + req->src->length <= PAGE_SIZE) &&
394 +- sg_is_last(req->dst) &&
395 ++ sg_is_last(req->dst) && req->dst->length &&
396 + (!PageHighMem(sg_page(req->dst)) ||
397 + req->dst->offset + req->dst->length <= PAGE_SIZE)) {
398 + one_entry_in_sg = 1;
399 +@@ -929,7 +929,7 @@ static int helper_rfc4106_decrypt(struct aead_request *req)
400 + aes_ctx);
401 + }
402 +
403 +-static int rfc4106_encrypt(struct aead_request *req)
404 ++static int gcmaes_wrapper_encrypt(struct aead_request *req)
405 + {
406 + struct crypto_aead *tfm = crypto_aead_reqtfm(req);
407 + struct cryptd_aead **ctx = crypto_aead_ctx(tfm);
408 +@@ -945,7 +945,7 @@ static int rfc4106_encrypt(struct aead_request *req)
409 + return crypto_aead_encrypt(req);
410 + }
411 +
412 +-static int rfc4106_decrypt(struct aead_request *req)
413 ++static int gcmaes_wrapper_decrypt(struct aead_request *req)
414 + {
415 + struct crypto_aead *tfm = crypto_aead_reqtfm(req);
416 + struct cryptd_aead **ctx = crypto_aead_ctx(tfm);
417 +@@ -1117,7 +1117,7 @@ static int generic_gcmaes_decrypt(struct aead_request *req)
418 + {
419 + __be32 counter = cpu_to_be32(1);
420 + struct crypto_aead *tfm = crypto_aead_reqtfm(req);
421 +- struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
422 ++ struct generic_gcmaes_ctx *ctx = generic_gcmaes_ctx_get(tfm);
423 + void *aes_ctx = &(ctx->aes_key_expanded);
424 + u8 iv[16] __attribute__ ((__aligned__(AESNI_ALIGN)));
425 +
426 +@@ -1128,6 +1128,30 @@ static int generic_gcmaes_decrypt(struct aead_request *req)
427 + aes_ctx);
428 + }
429 +
430 ++static int generic_gcmaes_init(struct crypto_aead *aead)
431 ++{
432 ++ struct cryptd_aead *cryptd_tfm;
433 ++ struct cryptd_aead **ctx = crypto_aead_ctx(aead);
434 ++
435 ++ cryptd_tfm = cryptd_alloc_aead("__driver-generic-gcm-aes-aesni",
436 ++ CRYPTO_ALG_INTERNAL,
437 ++ CRYPTO_ALG_INTERNAL);
438 ++ if (IS_ERR(cryptd_tfm))
439 ++ return PTR_ERR(cryptd_tfm);
440 ++
441 ++ *ctx = cryptd_tfm;
442 ++ crypto_aead_set_reqsize(aead, crypto_aead_reqsize(&cryptd_tfm->base));
443 ++
444 ++ return 0;
445 ++}
446 ++
447 ++static void generic_gcmaes_exit(struct crypto_aead *aead)
448 ++{
449 ++ struct cryptd_aead **ctx = crypto_aead_ctx(aead);
450 ++
451 ++ cryptd_free_aead(*ctx);
452 ++}
453 ++
454 + static struct aead_alg aesni_aead_algs[] = { {
455 + .setkey = common_rfc4106_set_key,
456 + .setauthsize = common_rfc4106_set_authsize,
457 +@@ -1147,10 +1171,10 @@ static struct aead_alg aesni_aead_algs[] = { {
458 + }, {
459 + .init = rfc4106_init,
460 + .exit = rfc4106_exit,
461 +- .setkey = rfc4106_set_key,
462 +- .setauthsize = rfc4106_set_authsize,
463 +- .encrypt = rfc4106_encrypt,
464 +- .decrypt = rfc4106_decrypt,
465 ++ .setkey = gcmaes_wrapper_set_key,
466 ++ .setauthsize = gcmaes_wrapper_set_authsize,
467 ++ .encrypt = gcmaes_wrapper_encrypt,
468 ++ .decrypt = gcmaes_wrapper_decrypt,
469 + .ivsize = GCM_RFC4106_IV_SIZE,
470 + .maxauthsize = 16,
471 + .base = {
472 +@@ -1169,14 +1193,32 @@ static struct aead_alg aesni_aead_algs[] = { {
473 + .decrypt = generic_gcmaes_decrypt,
474 + .ivsize = GCM_AES_IV_SIZE,
475 + .maxauthsize = 16,
476 ++ .base = {
477 ++ .cra_name = "__generic-gcm-aes-aesni",
478 ++ .cra_driver_name = "__driver-generic-gcm-aes-aesni",
479 ++ .cra_priority = 0,
480 ++ .cra_flags = CRYPTO_ALG_INTERNAL,
481 ++ .cra_blocksize = 1,
482 ++ .cra_ctxsize = sizeof(struct generic_gcmaes_ctx),
483 ++ .cra_alignmask = AESNI_ALIGN - 1,
484 ++ .cra_module = THIS_MODULE,
485 ++ },
486 ++}, {
487 ++ .init = generic_gcmaes_init,
488 ++ .exit = generic_gcmaes_exit,
489 ++ .setkey = gcmaes_wrapper_set_key,
490 ++ .setauthsize = gcmaes_wrapper_set_authsize,
491 ++ .encrypt = gcmaes_wrapper_encrypt,
492 ++ .decrypt = gcmaes_wrapper_decrypt,
493 ++ .ivsize = GCM_AES_IV_SIZE,
494 ++ .maxauthsize = 16,
495 + .base = {
496 + .cra_name = "gcm(aes)",
497 + .cra_driver_name = "generic-gcm-aesni",
498 + .cra_priority = 400,
499 + .cra_flags = CRYPTO_ALG_ASYNC,
500 + .cra_blocksize = 1,
501 +- .cra_ctxsize = sizeof(struct generic_gcmaes_ctx),
502 +- .cra_alignmask = AESNI_ALIGN - 1,
503 ++ .cra_ctxsize = sizeof(struct cryptd_aead *),
504 + .cra_module = THIS_MODULE,
505 + },
506 + } };
507 +diff --git a/crypto/Kconfig b/crypto/Kconfig
508 +index f7911963bb79..9327fbfccf5a 100644
509 +--- a/crypto/Kconfig
510 ++++ b/crypto/Kconfig
511 +@@ -130,7 +130,7 @@ config CRYPTO_DH
512 +
513 + config CRYPTO_ECDH
514 + tristate "ECDH algorithm"
515 +- select CRYTPO_KPP
516 ++ select CRYPTO_KPP
517 + select CRYPTO_RNG_DEFAULT
518 + help
519 + Generic implementation of the ECDH algorithm
520 +diff --git a/crypto/af_alg.c b/crypto/af_alg.c
521 +index 35d4dcea381f..5231f421ad00 100644
522 +--- a/crypto/af_alg.c
523 ++++ b/crypto/af_alg.c
524 +@@ -150,7 +150,7 @@ EXPORT_SYMBOL_GPL(af_alg_release_parent);
525 +
526 + static int alg_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
527 + {
528 +- const u32 forbidden = CRYPTO_ALG_INTERNAL;
529 ++ const u32 allowed = CRYPTO_ALG_KERN_DRIVER_ONLY;
530 + struct sock *sk = sock->sk;
531 + struct alg_sock *ask = alg_sk(sk);
532 + struct sockaddr_alg *sa = (void *)uaddr;
533 +@@ -158,6 +158,10 @@ static int alg_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
534 + void *private;
535 + int err;
536 +
537 ++ /* If caller uses non-allowed flag, return error. */
538 ++ if ((sa->salg_feat & ~allowed) || (sa->salg_mask & ~allowed))
539 ++ return -EINVAL;
540 ++
541 + if (sock->state == SS_CONNECTED)
542 + return -EINVAL;
543 +
544 +@@ -176,9 +180,7 @@ static int alg_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
545 + if (IS_ERR(type))
546 + return PTR_ERR(type);
547 +
548 +- private = type->bind(sa->salg_name,
549 +- sa->salg_feat & ~forbidden,
550 +- sa->salg_mask & ~forbidden);
551 ++ private = type->bind(sa->salg_name, sa->salg_feat, sa->salg_mask);
552 + if (IS_ERR(private)) {
553 + module_put(type->owner);
554 + return PTR_ERR(private);
555 +diff --git a/crypto/sha3_generic.c b/crypto/sha3_generic.c
556 +index 7e8ed96236ce..a68be626017c 100644
557 +--- a/crypto/sha3_generic.c
558 ++++ b/crypto/sha3_generic.c
559 +@@ -18,6 +18,7 @@
560 + #include <linux/types.h>
561 + #include <crypto/sha3.h>
562 + #include <asm/byteorder.h>
563 ++#include <asm/unaligned.h>
564 +
565 + #define KECCAK_ROUNDS 24
566 +
567 +@@ -149,7 +150,7 @@ static int sha3_update(struct shash_desc *desc, const u8 *data,
568 + unsigned int i;
569 +
570 + for (i = 0; i < sctx->rsizw; i++)
571 +- sctx->st[i] ^= ((u64 *) src)[i];
572 ++ sctx->st[i] ^= get_unaligned_le64(src + 8 * i);
573 + keccakf(sctx->st);
574 +
575 + done += sctx->rsiz;
576 +@@ -174,7 +175,7 @@ static int sha3_final(struct shash_desc *desc, u8 *out)
577 + sctx->buf[sctx->rsiz - 1] |= 0x80;
578 +
579 + for (i = 0; i < sctx->rsizw; i++)
580 +- sctx->st[i] ^= ((u64 *) sctx->buf)[i];
581 ++ sctx->st[i] ^= get_unaligned_le64(sctx->buf + 8 * i);
582 +
583 + keccakf(sctx->st);
584 +
585 +diff --git a/drivers/android/binder.c b/drivers/android/binder.c
586 +index a7ecfde66b7b..ec0917fb7cca 100644
587 +--- a/drivers/android/binder.c
588 ++++ b/drivers/android/binder.c
589 +@@ -4302,6 +4302,18 @@ static int binder_thread_release(struct binder_proc *proc,
590 + if (t)
591 + spin_lock(&t->lock);
592 + }
593 ++
594 ++ /*
595 ++ * If this thread used poll, make sure we remove the waitqueue
596 ++ * from any epoll data structures holding it with POLLFREE.
597 ++ * waitqueue_active() is safe to use here because we're holding
598 ++ * the inner lock.
599 ++ */
600 ++ if ((thread->looper & BINDER_LOOPER_STATE_POLL) &&
601 ++ waitqueue_active(&thread->wait)) {
602 ++ wake_up_poll(&thread->wait, POLLHUP | POLLFREE);
603 ++ }
604 ++
605 + binder_inner_proc_unlock(thread->proc);
606 +
607 + if (send_reply)
608 +diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c
609 +index 6f6f745605af..bef8c65af8fc 100644
610 +--- a/drivers/android/binder_alloc.c
611 ++++ b/drivers/android/binder_alloc.c
612 +@@ -666,7 +666,7 @@ int binder_alloc_mmap_handler(struct binder_alloc *alloc,
613 + goto err_already_mapped;
614 + }
615 +
616 +- area = get_vm_area(vma->vm_end - vma->vm_start, VM_IOREMAP);
617 ++ area = get_vm_area(vma->vm_end - vma->vm_start, VM_ALLOC);
618 + if (area == NULL) {
619 + ret = -ENOMEM;
620 + failure_string = "get_vm_area";
621 +diff --git a/drivers/bluetooth/hci_serdev.c b/drivers/bluetooth/hci_serdev.c
622 +index 71664b22ec9d..e0e6461b9200 100644
623 +--- a/drivers/bluetooth/hci_serdev.c
624 ++++ b/drivers/bluetooth/hci_serdev.c
625 +@@ -303,6 +303,7 @@ int hci_uart_register_device(struct hci_uart *hu,
626 + hci_set_drvdata(hdev, hu);
627 +
628 + INIT_WORK(&hu->write_work, hci_uart_write_work);
629 ++ percpu_init_rwsem(&hu->proto_lock);
630 +
631 + /* Only when vendor specific setup callback is provided, consider
632 + * the manufacturer information valid. This avoids filling in the
633 +diff --git a/drivers/crypto/inside-secure/safexcel_hash.c b/drivers/crypto/inside-secure/safexcel_hash.c
634 +index 0c5a5820b06e..da9d040bccc2 100644
635 +--- a/drivers/crypto/inside-secure/safexcel_hash.c
636 ++++ b/drivers/crypto/inside-secure/safexcel_hash.c
637 +@@ -34,6 +34,8 @@ struct safexcel_ahash_req {
638 + bool hmac;
639 + bool needs_inv;
640 +
641 ++ int nents;
642 ++
643 + u8 state_sz; /* expected sate size, only set once */
644 + u32 state[SHA256_DIGEST_SIZE / sizeof(u32)] __aligned(sizeof(u32));
645 +
646 +@@ -152,8 +154,10 @@ static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv, int rin
647 + memcpy(areq->result, sreq->state,
648 + crypto_ahash_digestsize(ahash));
649 +
650 +- dma_unmap_sg(priv->dev, areq->src,
651 +- sg_nents_for_len(areq->src, areq->nbytes), DMA_TO_DEVICE);
652 ++ if (sreq->nents) {
653 ++ dma_unmap_sg(priv->dev, areq->src, sreq->nents, DMA_TO_DEVICE);
654 ++ sreq->nents = 0;
655 ++ }
656 +
657 + safexcel_free_context(priv, async, sreq->state_sz);
658 +
659 +@@ -178,7 +182,7 @@ static int safexcel_ahash_send_req(struct crypto_async_request *async, int ring,
660 + struct safexcel_command_desc *cdesc, *first_cdesc = NULL;
661 + struct safexcel_result_desc *rdesc;
662 + struct scatterlist *sg;
663 +- int i, nents, queued, len, cache_len, extra, n_cdesc = 0, ret = 0;
664 ++ int i, queued, len, cache_len, extra, n_cdesc = 0, ret = 0;
665 +
666 + queued = len = req->len - req->processed;
667 + if (queued < crypto_ahash_blocksize(ahash))
668 +@@ -186,17 +190,31 @@ static int safexcel_ahash_send_req(struct crypto_async_request *async, int ring,
669 + else
670 + cache_len = queued - areq->nbytes;
671 +
672 +- /*
673 +- * If this is not the last request and the queued data does not fit
674 +- * into full blocks, cache it for the next send() call.
675 +- */
676 +- extra = queued & (crypto_ahash_blocksize(ahash) - 1);
677 +- if (!req->last_req && extra) {
678 +- sg_pcopy_to_buffer(areq->src, sg_nents(areq->src),
679 +- req->cache_next, extra, areq->nbytes - extra);
680 +-
681 +- queued -= extra;
682 +- len -= extra;
683 ++ if (!req->last_req) {
684 ++ /* If this is not the last request and the queued data does not
685 ++ * fit into full blocks, cache it for the next send() call.
686 ++ */
687 ++ extra = queued & (crypto_ahash_blocksize(ahash) - 1);
688 ++ if (!extra)
689 ++ /* If this is not the last request and the queued data
690 ++ * is a multiple of a block, cache the last one for now.
691 ++ */
692 ++ extra = queued - crypto_ahash_blocksize(ahash);
693 ++
694 ++ if (extra) {
695 ++ sg_pcopy_to_buffer(areq->src, sg_nents(areq->src),
696 ++ req->cache_next, extra,
697 ++ areq->nbytes - extra);
698 ++
699 ++ queued -= extra;
700 ++ len -= extra;
701 ++
702 ++ if (!queued) {
703 ++ *commands = 0;
704 ++ *results = 0;
705 ++ return 0;
706 ++ }
707 ++ }
708 + }
709 +
710 + spin_lock_bh(&priv->ring[ring].egress_lock);
711 +@@ -234,15 +252,15 @@ static int safexcel_ahash_send_req(struct crypto_async_request *async, int ring,
712 + }
713 +
714 + /* Now handle the current ahash request buffer(s) */
715 +- nents = dma_map_sg(priv->dev, areq->src,
716 +- sg_nents_for_len(areq->src, areq->nbytes),
717 +- DMA_TO_DEVICE);
718 +- if (!nents) {
719 ++ req->nents = dma_map_sg(priv->dev, areq->src,
720 ++ sg_nents_for_len(areq->src, areq->nbytes),
721 ++ DMA_TO_DEVICE);
722 ++ if (!req->nents) {
723 + ret = -ENOMEM;
724 + goto cdesc_rollback;
725 + }
726 +
727 +- for_each_sg(areq->src, sg, nents, i) {
728 ++ for_each_sg(areq->src, sg, req->nents, i) {
729 + int sglen = sg_dma_len(sg);
730 +
731 + /* Do not overflow the request */
732 +diff --git a/drivers/firmware/efi/Kconfig b/drivers/firmware/efi/Kconfig
733 +index 2b4c39fdfa91..86210f75d233 100644
734 +--- a/drivers/firmware/efi/Kconfig
735 ++++ b/drivers/firmware/efi/Kconfig
736 +@@ -159,7 +159,10 @@ config RESET_ATTACK_MITIGATION
737 + using the TCG Platform Reset Attack Mitigation specification. This
738 + protects against an attacker forcibly rebooting the system while it
739 + still contains secrets in RAM, booting another OS and extracting the
740 +- secrets.
741 ++ secrets. This should only be enabled when userland is configured to
742 ++ clear the MemoryOverwriteRequest flag on clean shutdown after secrets
743 ++ have been evicted, since otherwise it will trigger even on clean
744 ++ reboots.
745 +
746 + endmenu
747 +
748 +diff --git a/drivers/gpio/gpio-ath79.c b/drivers/gpio/gpio-ath79.c
749 +index 5fad89dfab7e..3ae7c1876bf4 100644
750 +--- a/drivers/gpio/gpio-ath79.c
751 ++++ b/drivers/gpio/gpio-ath79.c
752 +@@ -324,3 +324,6 @@ static struct platform_driver ath79_gpio_driver = {
753 + };
754 +
755 + module_platform_driver(ath79_gpio_driver);
756 ++
757 ++MODULE_DESCRIPTION("Atheros AR71XX/AR724X/AR913X GPIO API support");
758 ++MODULE_LICENSE("GPL v2");
759 +diff --git a/drivers/gpio/gpio-iop.c b/drivers/gpio/gpio-iop.c
760 +index 98c7ff2a76e7..8d62db447ec1 100644
761 +--- a/drivers/gpio/gpio-iop.c
762 ++++ b/drivers/gpio/gpio-iop.c
763 +@@ -58,3 +58,7 @@ static int __init iop3xx_gpio_init(void)
764 + return platform_driver_register(&iop3xx_gpio_driver);
765 + }
766 + arch_initcall(iop3xx_gpio_init);
767 ++
768 ++MODULE_DESCRIPTION("GPIO handling for Intel IOP3xx processors");
769 ++MODULE_AUTHOR("Lennert Buytenhek <buytenh@××××××××××.org>");
770 ++MODULE_LICENSE("GPL");
771 +diff --git a/drivers/gpio/gpio-stmpe.c b/drivers/gpio/gpio-stmpe.c
772 +index e6e5cca624a7..a365feff2377 100644
773 +--- a/drivers/gpio/gpio-stmpe.c
774 ++++ b/drivers/gpio/gpio-stmpe.c
775 +@@ -190,6 +190,16 @@ static void stmpe_gpio_irq_sync_unlock(struct irq_data *d)
776 + };
777 + int i, j;
778 +
779 ++ /*
780 ++ * STMPE1600: to be able to get IRQ from pins,
781 ++ * a read must be done on GPMR register, or a write in
782 ++ * GPSR or GPCR registers
783 ++ */
784 ++ if (stmpe->partnum == STMPE1600) {
785 ++ stmpe_reg_read(stmpe, stmpe->regs[STMPE_IDX_GPMR_LSB]);
786 ++ stmpe_reg_read(stmpe, stmpe->regs[STMPE_IDX_GPMR_CSB]);
787 ++ }
788 ++
789 + for (i = 0; i < CACHE_NR_REGS; i++) {
790 + /* STMPE801 and STMPE1600 don't have RE and FE registers */
791 + if ((stmpe->partnum == STMPE801 ||
792 +@@ -227,21 +237,11 @@ static void stmpe_gpio_irq_unmask(struct irq_data *d)
793 + {
794 + struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
795 + struct stmpe_gpio *stmpe_gpio = gpiochip_get_data(gc);
796 +- struct stmpe *stmpe = stmpe_gpio->stmpe;
797 + int offset = d->hwirq;
798 + int regoffset = offset / 8;
799 + int mask = BIT(offset % 8);
800 +
801 + stmpe_gpio->regs[REG_IE][regoffset] |= mask;
802 +-
803 +- /*
804 +- * STMPE1600 workaround: to be able to get IRQ from pins,
805 +- * a read must be done on GPMR register, or a write in
806 +- * GPSR or GPCR registers
807 +- */
808 +- if (stmpe->partnum == STMPE1600)
809 +- stmpe_reg_read(stmpe,
810 +- stmpe->regs[STMPE_IDX_GPMR_LSB + regoffset]);
811 + }
812 +
813 + static void stmpe_dbg_show_one(struct seq_file *s,
814 +diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
815 +index 14532d9576e4..f6efcf94f6ad 100644
816 +--- a/drivers/gpio/gpiolib.c
817 ++++ b/drivers/gpio/gpiolib.c
818 +@@ -732,6 +732,9 @@ static irqreturn_t lineevent_irq_thread(int irq, void *p)
819 + struct gpioevent_data ge;
820 + int ret, level;
821 +
822 ++ /* Do not leak kernel stack to userspace */
823 ++ memset(&ge, 0, sizeof(ge));
824 ++
825 + ge.timestamp = ktime_get_real_ns();
826 + level = gpiod_get_value_cansleep(le->desc);
827 +
828 +diff --git a/drivers/hid/wacom_sys.c b/drivers/hid/wacom_sys.c
829 +index ee71ad9b6cc1..76531796bd3c 100644
830 +--- a/drivers/hid/wacom_sys.c
831 ++++ b/drivers/hid/wacom_sys.c
832 +@@ -2347,23 +2347,23 @@ static void wacom_remote_destroy_one(struct wacom *wacom, unsigned int index)
833 + int i;
834 + unsigned long flags;
835 +
836 +- spin_lock_irqsave(&remote->remote_lock, flags);
837 +- remote->remotes[index].registered = false;
838 +- spin_unlock_irqrestore(&remote->remote_lock, flags);
839 ++ for (i = 0; i < WACOM_MAX_REMOTES; i++) {
840 ++ if (remote->remotes[i].serial == serial) {
841 +
842 +- if (remote->remotes[index].battery.battery)
843 +- devres_release_group(&wacom->hdev->dev,
844 +- &remote->remotes[index].battery.bat_desc);
845 ++ spin_lock_irqsave(&remote->remote_lock, flags);
846 ++ remote->remotes[i].registered = false;
847 ++ spin_unlock_irqrestore(&remote->remote_lock, flags);
848 +
849 +- if (remote->remotes[index].group.name)
850 +- devres_release_group(&wacom->hdev->dev,
851 +- &remote->remotes[index]);
852 ++ if (remote->remotes[i].battery.battery)
853 ++ devres_release_group(&wacom->hdev->dev,
854 ++ &remote->remotes[i].battery.bat_desc);
855 ++
856 ++ if (remote->remotes[i].group.name)
857 ++ devres_release_group(&wacom->hdev->dev,
858 ++ &remote->remotes[i]);
859 +
860 +- for (i = 0; i < WACOM_MAX_REMOTES; i++) {
861 +- if (remote->remotes[i].serial == serial) {
862 + remote->remotes[i].serial = 0;
863 + remote->remotes[i].group.name = NULL;
864 +- remote->remotes[i].registered = false;
865 + remote->remotes[i].battery.battery = NULL;
866 + wacom->led.groups[i].select = WACOM_STATUS_UNKNOWN;
867 + }
868 +diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
869 +index 16af6886e828..7dbff253c05c 100644
870 +--- a/drivers/hid/wacom_wac.c
871 ++++ b/drivers/hid/wacom_wac.c
872 +@@ -1924,7 +1924,6 @@ static void wacom_wac_pad_event(struct hid_device *hdev, struct hid_field *field
873 + struct wacom_features *features = &wacom_wac->features;
874 + unsigned equivalent_usage = wacom_equivalent_usage(usage->hid);
875 + int i;
876 +- bool is_touch_on = value;
877 + bool do_report = false;
878 +
879 + /*
880 +@@ -1969,16 +1968,17 @@ static void wacom_wac_pad_event(struct hid_device *hdev, struct hid_field *field
881 + break;
882 +
883 + case WACOM_HID_WD_MUTE_DEVICE:
884 +- if (wacom_wac->shared->touch_input && value) {
885 +- wacom_wac->shared->is_touch_on = !wacom_wac->shared->is_touch_on;
886 +- is_touch_on = wacom_wac->shared->is_touch_on;
887 +- }
888 +-
889 +- /* fall through*/
890 + case WACOM_HID_WD_TOUCHONOFF:
891 + if (wacom_wac->shared->touch_input) {
892 ++ bool *is_touch_on = &wacom_wac->shared->is_touch_on;
893 ++
894 ++ if (equivalent_usage == WACOM_HID_WD_MUTE_DEVICE && value)
895 ++ *is_touch_on = !(*is_touch_on);
896 ++ else if (equivalent_usage == WACOM_HID_WD_TOUCHONOFF)
897 ++ *is_touch_on = value;
898 ++
899 + input_report_switch(wacom_wac->shared->touch_input,
900 +- SW_MUTE_DEVICE, !is_touch_on);
901 ++ SW_MUTE_DEVICE, !(*is_touch_on));
902 + input_sync(wacom_wac->shared->touch_input);
903 + }
904 + break;
905 +diff --git a/drivers/iio/adc/stm32-adc.c b/drivers/iio/adc/stm32-adc.c
906 +index c9d96f935dba..cecf1e5b244c 100644
907 +--- a/drivers/iio/adc/stm32-adc.c
908 ++++ b/drivers/iio/adc/stm32-adc.c
909 +@@ -1315,6 +1315,7 @@ static int stm32_adc_set_watermark(struct iio_dev *indio_dev, unsigned int val)
910 + {
911 + struct stm32_adc *adc = iio_priv(indio_dev);
912 + unsigned int watermark = STM32_DMA_BUFFER_SIZE / 2;
913 ++ unsigned int rx_buf_sz = STM32_DMA_BUFFER_SIZE;
914 +
915 + /*
916 + * dma cyclic transfers are used, buffer is split into two periods.
917 +@@ -1323,7 +1324,7 @@ static int stm32_adc_set_watermark(struct iio_dev *indio_dev, unsigned int val)
918 + * - one buffer (period) driver can push with iio_trigger_poll().
919 + */
920 + watermark = min(watermark, val * (unsigned)(sizeof(u16)));
921 +- adc->rx_buf_sz = watermark * 2;
922 ++ adc->rx_buf_sz = min(rx_buf_sz, watermark * 2 * adc->num_conv);
923 +
924 + return 0;
925 + }
926 +diff --git a/drivers/iio/chemical/ccs811.c b/drivers/iio/chemical/ccs811.c
927 +index 97bce8345c6a..fbe2431f5b81 100644
928 +--- a/drivers/iio/chemical/ccs811.c
929 ++++ b/drivers/iio/chemical/ccs811.c
930 +@@ -96,7 +96,6 @@ static const struct iio_chan_spec ccs811_channels[] = {
931 + .channel2 = IIO_MOD_CO2,
932 + .modified = 1,
933 + .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
934 +- BIT(IIO_CHAN_INFO_OFFSET) |
935 + BIT(IIO_CHAN_INFO_SCALE),
936 + .scan_index = 0,
937 + .scan_type = {
938 +@@ -255,24 +254,18 @@ static int ccs811_read_raw(struct iio_dev *indio_dev,
939 + switch (chan->channel2) {
940 + case IIO_MOD_CO2:
941 + *val = 0;
942 +- *val2 = 12834;
943 ++ *val2 = 100;
944 + return IIO_VAL_INT_PLUS_MICRO;
945 + case IIO_MOD_VOC:
946 + *val = 0;
947 +- *val2 = 84246;
948 +- return IIO_VAL_INT_PLUS_MICRO;
949 ++ *val2 = 100;
950 ++ return IIO_VAL_INT_PLUS_NANO;
951 + default:
952 + return -EINVAL;
953 + }
954 + default:
955 + return -EINVAL;
956 + }
957 +- case IIO_CHAN_INFO_OFFSET:
958 +- if (!(chan->type == IIO_CONCENTRATION &&
959 +- chan->channel2 == IIO_MOD_CO2))
960 +- return -EINVAL;
961 +- *val = -400;
962 +- return IIO_VAL_INT;
963 + default:
964 + return -EINVAL;
965 + }
966 +diff --git a/drivers/input/rmi4/rmi_driver.c b/drivers/input/rmi4/rmi_driver.c
967 +index 141ea228aac6..f5954981e9ee 100644
968 +--- a/drivers/input/rmi4/rmi_driver.c
969 ++++ b/drivers/input/rmi4/rmi_driver.c
970 +@@ -41,6 +41,13 @@ void rmi_free_function_list(struct rmi_device *rmi_dev)
971 +
972 + rmi_dbg(RMI_DEBUG_CORE, &rmi_dev->dev, "Freeing function list\n");
973 +
974 ++ /* Doing it in the reverse order so F01 will be removed last */
975 ++ list_for_each_entry_safe_reverse(fn, tmp,
976 ++ &data->function_list, node) {
977 ++ list_del(&fn->node);
978 ++ rmi_unregister_function(fn);
979 ++ }
980 ++
981 + devm_kfree(&rmi_dev->dev, data->irq_memory);
982 + data->irq_memory = NULL;
983 + data->irq_status = NULL;
984 +@@ -50,13 +57,6 @@ void rmi_free_function_list(struct rmi_device *rmi_dev)
985 +
986 + data->f01_container = NULL;
987 + data->f34_container = NULL;
988 +-
989 +- /* Doing it in the reverse order so F01 will be removed last */
990 +- list_for_each_entry_safe_reverse(fn, tmp,
991 +- &data->function_list, node) {
992 +- list_del(&fn->node);
993 +- rmi_unregister_function(fn);
994 +- }
995 + }
996 +
997 + static int reset_one_function(struct rmi_function *fn)
998 +diff --git a/drivers/input/rmi4/rmi_f03.c b/drivers/input/rmi4/rmi_f03.c
999 +index ad71a5e768dc..7ccbb370a9a8 100644
1000 +--- a/drivers/input/rmi4/rmi_f03.c
1001 ++++ b/drivers/input/rmi4/rmi_f03.c
1002 +@@ -32,6 +32,7 @@ struct f03_data {
1003 + struct rmi_function *fn;
1004 +
1005 + struct serio *serio;
1006 ++ bool serio_registered;
1007 +
1008 + unsigned int overwrite_buttons;
1009 +
1010 +@@ -138,6 +139,37 @@ static int rmi_f03_initialize(struct f03_data *f03)
1011 + return 0;
1012 + }
1013 +
1014 ++static int rmi_f03_pt_open(struct serio *serio)
1015 ++{
1016 ++ struct f03_data *f03 = serio->port_data;
1017 ++ struct rmi_function *fn = f03->fn;
1018 ++ const u8 ob_len = f03->rx_queue_length * RMI_F03_OB_SIZE;
1019 ++ const u16 data_addr = fn->fd.data_base_addr + RMI_F03_OB_OFFSET;
1020 ++ u8 obs[RMI_F03_QUEUE_LENGTH * RMI_F03_OB_SIZE];
1021 ++ int error;
1022 ++
1023 ++ /*
1024 ++ * Consume any pending data. Some devices like to spam with
1025 ++ * 0xaa 0x00 announcements which may confuse us as we try to
1026 ++ * probe the device.
1027 ++ */
1028 ++ error = rmi_read_block(fn->rmi_dev, data_addr, &obs, ob_len);
1029 ++ if (!error)
1030 ++ rmi_dbg(RMI_DEBUG_FN, &fn->dev,
1031 ++ "%s: Consumed %*ph (%d) from PS2 guest\n",
1032 ++ __func__, ob_len, obs, ob_len);
1033 ++
1034 ++ return fn->rmi_dev->driver->set_irq_bits(fn->rmi_dev, fn->irq_mask);
1035 ++}
1036 ++
1037 ++static void rmi_f03_pt_close(struct serio *serio)
1038 ++{
1039 ++ struct f03_data *f03 = serio->port_data;
1040 ++ struct rmi_function *fn = f03->fn;
1041 ++
1042 ++ fn->rmi_dev->driver->clear_irq_bits(fn->rmi_dev, fn->irq_mask);
1043 ++}
1044 ++
1045 + static int rmi_f03_register_pt(struct f03_data *f03)
1046 + {
1047 + struct serio *serio;
1048 +@@ -148,6 +180,8 @@ static int rmi_f03_register_pt(struct f03_data *f03)
1049 +
1050 + serio->id.type = SERIO_PS_PSTHRU;
1051 + serio->write = rmi_f03_pt_write;
1052 ++ serio->open = rmi_f03_pt_open;
1053 ++ serio->close = rmi_f03_pt_close;
1054 + serio->port_data = f03;
1055 +
1056 + strlcpy(serio->name, "Synaptics RMI4 PS/2 pass-through",
1057 +@@ -184,17 +218,27 @@ static int rmi_f03_probe(struct rmi_function *fn)
1058 + f03->device_count);
1059 +
1060 + dev_set_drvdata(dev, f03);
1061 +-
1062 +- error = rmi_f03_register_pt(f03);
1063 +- if (error)
1064 +- return error;
1065 +-
1066 + return 0;
1067 + }
1068 +
1069 + static int rmi_f03_config(struct rmi_function *fn)
1070 + {
1071 +- fn->rmi_dev->driver->set_irq_bits(fn->rmi_dev, fn->irq_mask);
1072 ++ struct f03_data *f03 = dev_get_drvdata(&fn->dev);
1073 ++ int error;
1074 ++
1075 ++ if (!f03->serio_registered) {
1076 ++ error = rmi_f03_register_pt(f03);
1077 ++ if (error)
1078 ++ return error;
1079 ++
1080 ++ f03->serio_registered = true;
1081 ++ } else {
1082 ++ /*
1083 ++ * We must be re-configuring the sensor, just enable
1084 ++ * interrupts for this function.
1085 ++ */
1086 ++ fn->rmi_dev->driver->set_irq_bits(fn->rmi_dev, fn->irq_mask);
1087 ++ }
1088 +
1089 + return 0;
1090 + }
1091 +@@ -204,7 +248,7 @@ static int rmi_f03_attention(struct rmi_function *fn, unsigned long *irq_bits)
1092 + struct rmi_device *rmi_dev = fn->rmi_dev;
1093 + struct rmi_driver_data *drvdata = dev_get_drvdata(&rmi_dev->dev);
1094 + struct f03_data *f03 = dev_get_drvdata(&fn->dev);
1095 +- u16 data_addr = fn->fd.data_base_addr;
1096 ++ const u16 data_addr = fn->fd.data_base_addr + RMI_F03_OB_OFFSET;
1097 + const u8 ob_len = f03->rx_queue_length * RMI_F03_OB_SIZE;
1098 + u8 obs[RMI_F03_QUEUE_LENGTH * RMI_F03_OB_SIZE];
1099 + u8 ob_status;
1100 +@@ -226,8 +270,7 @@ static int rmi_f03_attention(struct rmi_function *fn, unsigned long *irq_bits)
1101 + drvdata->attn_data.size -= ob_len;
1102 + } else {
1103 + /* Grab all of the data registers, and check them for data */
1104 +- error = rmi_read_block(fn->rmi_dev, data_addr + RMI_F03_OB_OFFSET,
1105 +- &obs, ob_len);
1106 ++ error = rmi_read_block(fn->rmi_dev, data_addr, &obs, ob_len);
1107 + if (error) {
1108 + dev_err(&fn->dev,
1109 + "%s: Failed to read F03 output buffers: %d\n",
1110 +@@ -266,7 +309,8 @@ static void rmi_f03_remove(struct rmi_function *fn)
1111 + {
1112 + struct f03_data *f03 = dev_get_drvdata(&fn->dev);
1113 +
1114 +- serio_unregister_port(f03->serio);
1115 ++ if (f03->serio_registered)
1116 ++ serio_unregister_port(f03->serio);
1117 + }
1118 +
1119 + struct rmi_function_handler rmi_f03_handler = {
1120 +diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c
1121 +index f4f17552c9b8..4a0ccda4d04b 100644
1122 +--- a/drivers/misc/mei/pci-me.c
1123 ++++ b/drivers/misc/mei/pci-me.c
1124 +@@ -238,8 +238,11 @@ static int mei_me_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1125 + */
1126 + mei_me_set_pm_domain(dev);
1127 +
1128 +- if (mei_pg_is_enabled(dev))
1129 ++ if (mei_pg_is_enabled(dev)) {
1130 + pm_runtime_put_noidle(&pdev->dev);
1131 ++ if (hw->d0i3_supported)
1132 ++ pm_runtime_allow(&pdev->dev);
1133 ++ }
1134 +
1135 + dev_dbg(&pdev->dev, "initialization successful.\n");
1136 +
1137 +diff --git a/drivers/mtd/nand/denali_pci.c b/drivers/mtd/nand/denali_pci.c
1138 +index 57fb7ae31412..49cb3e1f8bd0 100644
1139 +--- a/drivers/mtd/nand/denali_pci.c
1140 ++++ b/drivers/mtd/nand/denali_pci.c
1141 +@@ -125,3 +125,7 @@ static struct pci_driver denali_pci_driver = {
1142 + .remove = denali_pci_remove,
1143 + };
1144 + module_pci_driver(denali_pci_driver);
1145 ++
1146 ++MODULE_DESCRIPTION("PCI driver for Denali NAND controller");
1147 ++MODULE_AUTHOR("Intel Corporation and its suppliers");
1148 ++MODULE_LICENSE("GPL v2");
1149 +diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
1150 +index c208753ff5b7..c69a5b3ae8c8 100644
1151 +--- a/drivers/net/ethernet/intel/igb/igb_main.c
1152 ++++ b/drivers/net/ethernet/intel/igb/igb_main.c
1153 +@@ -3676,7 +3676,7 @@ static int __igb_close(struct net_device *netdev, bool suspending)
1154 +
1155 + int igb_close(struct net_device *netdev)
1156 + {
1157 +- if (netif_device_present(netdev))
1158 ++ if (netif_device_present(netdev) || netdev->dismantle)
1159 + return __igb_close(netdev, false);
1160 + return 0;
1161 + }
1162 +diff --git a/drivers/power/reset/zx-reboot.c b/drivers/power/reset/zx-reboot.c
1163 +index 7549c7f74a3c..c03e96e6a041 100644
1164 +--- a/drivers/power/reset/zx-reboot.c
1165 ++++ b/drivers/power/reset/zx-reboot.c
1166 +@@ -82,3 +82,7 @@ static struct platform_driver zx_reboot_driver = {
1167 + },
1168 + };
1169 + module_platform_driver(zx_reboot_driver);
1170 ++
1171 ++MODULE_DESCRIPTION("ZTE SoCs reset driver");
1172 ++MODULE_AUTHOR("Jun Nie <jun.nie@××××××.org>");
1173 ++MODULE_LICENSE("GPL v2");
1174 +diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c
1175 +index af3e4d3f9735..7173ae53c526 100644
1176 +--- a/drivers/scsi/aacraid/aachba.c
1177 ++++ b/drivers/scsi/aacraid/aachba.c
1178 +@@ -913,8 +913,15 @@ static void setinqstr(struct aac_dev *dev, void *data, int tindex)
1179 + memset(str, ' ', sizeof(*str));
1180 +
1181 + if (sup_adap_info->adapter_type_text[0]) {
1182 +- char *cp = sup_adap_info->adapter_type_text;
1183 + int c;
1184 ++ char *cp;
1185 ++ char *cname = kmemdup(sup_adap_info->adapter_type_text,
1186 ++ sizeof(sup_adap_info->adapter_type_text),
1187 ++ GFP_ATOMIC);
1188 ++ if (!cname)
1189 ++ return;
1190 ++
1191 ++ cp = cname;
1192 + if ((cp[0] == 'A') && (cp[1] == 'O') && (cp[2] == 'C'))
1193 + inqstrcpy("SMC", str->vid);
1194 + else {
1195 +@@ -923,7 +930,7 @@ static void setinqstr(struct aac_dev *dev, void *data, int tindex)
1196 + ++cp;
1197 + c = *cp;
1198 + *cp = '\0';
1199 +- inqstrcpy(sup_adap_info->adapter_type_text, str->vid);
1200 ++ inqstrcpy(cname, str->vid);
1201 + *cp = c;
1202 + while (*cp && *cp != ' ')
1203 + ++cp;
1204 +@@ -937,8 +944,8 @@ static void setinqstr(struct aac_dev *dev, void *data, int tindex)
1205 + cp[sizeof(str->pid)] = '\0';
1206 + }
1207 + inqstrcpy (cp, str->pid);
1208 +- if (c)
1209 +- cp[sizeof(str->pid)] = c;
1210 ++
1211 ++ kfree(cname);
1212 + } else {
1213 + struct aac_driver_ident *mp = aac_get_driver_ident(dev->cardtype);
1214 +
1215 +diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c
1216 +index 80a8cb26cdea..d9b20dada109 100644
1217 +--- a/drivers/scsi/aacraid/commsup.c
1218 ++++ b/drivers/scsi/aacraid/commsup.c
1219 +@@ -1643,14 +1643,7 @@ static int _aac_reset_adapter(struct aac_dev *aac, int forced, u8 reset_type)
1220 + out:
1221 + aac->in_reset = 0;
1222 + scsi_unblock_requests(host);
1223 +- /*
1224 +- * Issue bus rescan to catch any configuration that might have
1225 +- * occurred
1226 +- */
1227 +- if (!retval) {
1228 +- dev_info(&aac->pdev->dev, "Issuing bus rescan\n");
1229 +- scsi_scan_host(host);
1230 +- }
1231 ++
1232 + if (jafo) {
1233 + spin_lock_irq(host->host_lock);
1234 + }
1235 +diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
1236 +index 3b3d1d050cac..40fc7a590e81 100644
1237 +--- a/drivers/scsi/storvsc_drv.c
1238 ++++ b/drivers/scsi/storvsc_drv.c
1239 +@@ -1834,8 +1834,10 @@ static int storvsc_probe(struct hv_device *device,
1240 + fc_host_node_name(host) = stor_device->node_name;
1241 + fc_host_port_name(host) = stor_device->port_name;
1242 + stor_device->rport = fc_remote_port_add(host, 0, &ids);
1243 +- if (!stor_device->rport)
1244 ++ if (!stor_device->rport) {
1245 ++ ret = -ENOMEM;
1246 + goto err_out4;
1247 ++ }
1248 + }
1249 + #endif
1250 + return 0;
1251 +diff --git a/drivers/spi/spi-imx.c b/drivers/spi/spi-imx.c
1252 +index 79ddefe4180d..40390d31a93b 100644
1253 +--- a/drivers/spi/spi-imx.c
1254 ++++ b/drivers/spi/spi-imx.c
1255 +@@ -1668,12 +1668,23 @@ static int spi_imx_remove(struct platform_device *pdev)
1256 + {
1257 + struct spi_master *master = platform_get_drvdata(pdev);
1258 + struct spi_imx_data *spi_imx = spi_master_get_devdata(master);
1259 ++ int ret;
1260 +
1261 + spi_bitbang_stop(&spi_imx->bitbang);
1262 +
1263 ++ ret = clk_enable(spi_imx->clk_per);
1264 ++ if (ret)
1265 ++ return ret;
1266 ++
1267 ++ ret = clk_enable(spi_imx->clk_ipg);
1268 ++ if (ret) {
1269 ++ clk_disable(spi_imx->clk_per);
1270 ++ return ret;
1271 ++ }
1272 ++
1273 + writel(0, spi_imx->base + MXC_CSPICTRL);
1274 +- clk_unprepare(spi_imx->clk_ipg);
1275 +- clk_unprepare(spi_imx->clk_per);
1276 ++ clk_disable_unprepare(spi_imx->clk_ipg);
1277 ++ clk_disable_unprepare(spi_imx->clk_per);
1278 + spi_imx_sdma_exit(spi_imx);
1279 + spi_master_put(master);
1280 +
1281 +diff --git a/drivers/staging/ccree/ssi_cipher.c b/drivers/staging/ccree/ssi_cipher.c
1282 +index ee85cbf7c9ae..980ff42128b4 100644
1283 +--- a/drivers/staging/ccree/ssi_cipher.c
1284 ++++ b/drivers/staging/ccree/ssi_cipher.c
1285 +@@ -908,6 +908,7 @@ static int ssi_ablkcipher_decrypt(struct ablkcipher_request *req)
1286 + scatterwalk_map_and_copy(req_ctx->backup_info, req->src,
1287 + (req->nbytes - ivsize), ivsize, 0);
1288 + req_ctx->is_giv = false;
1289 ++ req_ctx->backup_info = NULL;
1290 +
1291 + return ssi_blkcipher_process(tfm, req_ctx, req->dst, req->src, req->nbytes, req->info, ivsize, (void *)req, DRV_CRYPTO_DIRECTION_DECRYPT);
1292 + }
1293 +diff --git a/drivers/staging/ccree/ssi_driver.c b/drivers/staging/ccree/ssi_driver.c
1294 +index 1a3c481fa92a..ce16c3440319 100644
1295 +--- a/drivers/staging/ccree/ssi_driver.c
1296 ++++ b/drivers/staging/ccree/ssi_driver.c
1297 +@@ -117,7 +117,7 @@ static irqreturn_t cc_isr(int irq, void *dev_id)
1298 + irr &= ~SSI_COMP_IRQ_MASK;
1299 + complete_request(drvdata);
1300 + }
1301 +-#ifdef CC_SUPPORT_FIPS
1302 ++#ifdef CONFIG_CRYPTO_FIPS
1303 + /* TEE FIPS interrupt */
1304 + if (likely((irr & SSI_GPR0_IRQ_MASK) != 0)) {
1305 + /* Mask interrupt - will be unmasked in Deferred service handler */
1306 +diff --git a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c
1307 +index 8024843521ab..7b256d716251 100644
1308 +--- a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c
1309 ++++ b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c
1310 +@@ -826,14 +826,15 @@ struct kib_conn *kiblnd_create_conn(struct kib_peer *peer, struct rdma_cm_id *cm
1311 + return conn;
1312 +
1313 + failed_2:
1314 +- kiblnd_destroy_conn(conn, true);
1315 ++ kiblnd_destroy_conn(conn);
1316 ++ LIBCFS_FREE(conn, sizeof(*conn));
1317 + failed_1:
1318 + LIBCFS_FREE(init_qp_attr, sizeof(*init_qp_attr));
1319 + failed_0:
1320 + return NULL;
1321 + }
1322 +
1323 +-void kiblnd_destroy_conn(struct kib_conn *conn, bool free_conn)
1324 ++void kiblnd_destroy_conn(struct kib_conn *conn)
1325 + {
1326 + struct rdma_cm_id *cmid = conn->ibc_cmid;
1327 + struct kib_peer *peer = conn->ibc_peer;
1328 +@@ -896,8 +897,6 @@ void kiblnd_destroy_conn(struct kib_conn *conn, bool free_conn)
1329 + rdma_destroy_id(cmid);
1330 + atomic_dec(&net->ibn_nconns);
1331 + }
1332 +-
1333 +- LIBCFS_FREE(conn, sizeof(*conn));
1334 + }
1335 +
1336 + int kiblnd_close_peer_conns_locked(struct kib_peer *peer, int why)
1337 +diff --git a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.h b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.h
1338 +index 171eced213f8..b18911d09e9a 100644
1339 +--- a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.h
1340 ++++ b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.h
1341 +@@ -1016,7 +1016,7 @@ int kiblnd_close_peer_conns_locked(struct kib_peer *peer, int why);
1342 + struct kib_conn *kiblnd_create_conn(struct kib_peer *peer,
1343 + struct rdma_cm_id *cmid,
1344 + int state, int version);
1345 +-void kiblnd_destroy_conn(struct kib_conn *conn, bool free_conn);
1346 ++void kiblnd_destroy_conn(struct kib_conn *conn);
1347 + void kiblnd_close_conn(struct kib_conn *conn, int error);
1348 + void kiblnd_close_conn_locked(struct kib_conn *conn, int error);
1349 +
1350 +diff --git a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c
1351 +index 40e3af5d8b04..2f25642ea1a6 100644
1352 +--- a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c
1353 ++++ b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c
1354 +@@ -3314,11 +3314,13 @@ kiblnd_connd(void *arg)
1355 + spin_unlock_irqrestore(lock, flags);
1356 + dropped_lock = 1;
1357 +
1358 +- kiblnd_destroy_conn(conn, !peer);
1359 ++ kiblnd_destroy_conn(conn);
1360 +
1361 + spin_lock_irqsave(lock, flags);
1362 +- if (!peer)
1363 ++ if (!peer) {
1364 ++ kfree(conn);
1365 + continue;
1366 ++ }
1367 +
1368 + conn->ibc_peer = peer;
1369 + if (peer->ibp_reconnected < KIB_RECONN_HIGH_RACE)
1370 +diff --git a/drivers/tty/serial/8250/8250_dw.c b/drivers/tty/serial/8250/8250_dw.c
1371 +index 5bb0c42c88dd..7070203e3157 100644
1372 +--- a/drivers/tty/serial/8250/8250_dw.c
1373 ++++ b/drivers/tty/serial/8250/8250_dw.c
1374 +@@ -252,31 +252,25 @@ static void dw8250_set_termios(struct uart_port *p, struct ktermios *termios,
1375 + struct ktermios *old)
1376 + {
1377 + unsigned int baud = tty_termios_baud_rate(termios);
1378 +- unsigned int target_rate, min_rate, max_rate;
1379 + struct dw8250_data *d = p->private_data;
1380 + long rate;
1381 +- int i, ret;
1382 ++ int ret;
1383 +
1384 + if (IS_ERR(d->clk) || !old)
1385 + goto out;
1386 +
1387 +- /* Find a clk rate within +/-1.6% of an integer multiple of baudx16 */
1388 +- target_rate = baud * 16;
1389 +- min_rate = target_rate - (target_rate >> 6);
1390 +- max_rate = target_rate + (target_rate >> 6);
1391 +-
1392 +- for (i = 1; i <= UART_DIV_MAX; i++) {
1393 +- rate = clk_round_rate(d->clk, i * target_rate);
1394 +- if (rate >= i * min_rate && rate <= i * max_rate)
1395 +- break;
1396 +- }
1397 +- if (i <= UART_DIV_MAX) {
1398 +- clk_disable_unprepare(d->clk);
1399 ++ clk_disable_unprepare(d->clk);
1400 ++ rate = clk_round_rate(d->clk, baud * 16);
1401 ++ if (rate < 0)
1402 ++ ret = rate;
1403 ++ else if (rate == 0)
1404 ++ ret = -ENOENT;
1405 ++ else
1406 + ret = clk_set_rate(d->clk, rate);
1407 +- clk_prepare_enable(d->clk);
1408 +- if (!ret)
1409 +- p->uartclk = rate;
1410 +- }
1411 ++ clk_prepare_enable(d->clk);
1412 ++
1413 ++ if (!ret)
1414 ++ p->uartclk = rate;
1415 +
1416 + out:
1417 + p->status &= ~UPSTAT_AUTOCTS;
1418 +diff --git a/drivers/tty/serial/8250/8250_of.c b/drivers/tty/serial/8250/8250_of.c
1419 +index 1e67a7e4a5fd..160b8906d9b9 100644
1420 +--- a/drivers/tty/serial/8250/8250_of.c
1421 ++++ b/drivers/tty/serial/8250/8250_of.c
1422 +@@ -136,8 +136,11 @@ static int of_platform_serial_setup(struct platform_device *ofdev,
1423 + }
1424 +
1425 + info->rst = devm_reset_control_get_optional_shared(&ofdev->dev, NULL);
1426 +- if (IS_ERR(info->rst))
1427 ++ if (IS_ERR(info->rst)) {
1428 ++ ret = PTR_ERR(info->rst);
1429 + goto err_dispose;
1430 ++ }
1431 ++
1432 + ret = reset_control_deassert(info->rst);
1433 + if (ret)
1434 + goto err_dispose;
1435 +diff --git a/drivers/tty/serial/8250/8250_uniphier.c b/drivers/tty/serial/8250/8250_uniphier.c
1436 +index 45ef506293ae..28d88ccf5a0c 100644
1437 +--- a/drivers/tty/serial/8250/8250_uniphier.c
1438 ++++ b/drivers/tty/serial/8250/8250_uniphier.c
1439 +@@ -250,12 +250,13 @@ static int uniphier_uart_probe(struct platform_device *pdev)
1440 + up.dl_read = uniphier_serial_dl_read;
1441 + up.dl_write = uniphier_serial_dl_write;
1442 +
1443 +- priv->line = serial8250_register_8250_port(&up);
1444 +- if (priv->line < 0) {
1445 ++ ret = serial8250_register_8250_port(&up);
1446 ++ if (ret < 0) {
1447 + dev_err(dev, "failed to register 8250 port\n");
1448 + clk_disable_unprepare(priv->clk);
1449 + return ret;
1450 + }
1451 ++ priv->line = ret;
1452 +
1453 + platform_set_drvdata(pdev, priv);
1454 +
1455 +diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c
1456 +index e4b3d9123a03..e9145ed0d6e7 100644
1457 +--- a/drivers/tty/serial/imx.c
1458 ++++ b/drivers/tty/serial/imx.c
1459 +@@ -2238,12 +2238,14 @@ static void serial_imx_enable_wakeup(struct imx_port *sport, bool on)
1460 + val &= ~UCR3_AWAKEN;
1461 + writel(val, sport->port.membase + UCR3);
1462 +
1463 +- val = readl(sport->port.membase + UCR1);
1464 +- if (on)
1465 +- val |= UCR1_RTSDEN;
1466 +- else
1467 +- val &= ~UCR1_RTSDEN;
1468 +- writel(val, sport->port.membase + UCR1);
1469 ++ if (sport->have_rtscts) {
1470 ++ val = readl(sport->port.membase + UCR1);
1471 ++ if (on)
1472 ++ val |= UCR1_RTSDEN;
1473 ++ else
1474 ++ val &= ~UCR1_RTSDEN;
1475 ++ writel(val, sport->port.membase + UCR1);
1476 ++ }
1477 + }
1478 +
1479 + static int imx_serial_port_suspend_noirq(struct device *dev)
1480 +diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
1481 +index dc60aeea87d8..4b506f2d3522 100644
1482 +--- a/drivers/tty/tty_io.c
1483 ++++ b/drivers/tty/tty_io.c
1484 +@@ -1323,6 +1323,9 @@ struct tty_struct *tty_init_dev(struct tty_driver *driver, int idx)
1485 + "%s: %s driver does not set tty->port. This will crash the kernel later. Fix the driver!\n",
1486 + __func__, tty->driver->name);
1487 +
1488 ++ retval = tty_ldisc_lock(tty, 5 * HZ);
1489 ++ if (retval)
1490 ++ goto err_release_lock;
1491 + tty->port->itty = tty;
1492 +
1493 + /*
1494 +@@ -1333,6 +1336,7 @@ struct tty_struct *tty_init_dev(struct tty_driver *driver, int idx)
1495 + retval = tty_ldisc_setup(tty, tty->link);
1496 + if (retval)
1497 + goto err_release_tty;
1498 ++ tty_ldisc_unlock(tty);
1499 + /* Return the tty locked so that it cannot vanish under the caller */
1500 + return tty;
1501 +
1502 +@@ -1345,9 +1349,11 @@ struct tty_struct *tty_init_dev(struct tty_driver *driver, int idx)
1503 +
1504 + /* call the tty release_tty routine to clean out this slot */
1505 + err_release_tty:
1506 +- tty_unlock(tty);
1507 ++ tty_ldisc_unlock(tty);
1508 + tty_info_ratelimited(tty, "ldisc open failed (%d), clearing slot %d\n",
1509 + retval, idx);
1510 ++err_release_lock:
1511 ++ tty_unlock(tty);
1512 + release_tty(tty, idx);
1513 + return ERR_PTR(retval);
1514 + }
1515 +diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c
1516 +index 24ec5c7e6b20..4e7946c0484b 100644
1517 +--- a/drivers/tty/tty_ldisc.c
1518 ++++ b/drivers/tty/tty_ldisc.c
1519 +@@ -337,7 +337,7 @@ static inline void __tty_ldisc_unlock(struct tty_struct *tty)
1520 + ldsem_up_write(&tty->ldisc_sem);
1521 + }
1522 +
1523 +-static int tty_ldisc_lock(struct tty_struct *tty, unsigned long timeout)
1524 ++int tty_ldisc_lock(struct tty_struct *tty, unsigned long timeout)
1525 + {
1526 + int ret;
1527 +
1528 +@@ -348,7 +348,7 @@ static int tty_ldisc_lock(struct tty_struct *tty, unsigned long timeout)
1529 + return 0;
1530 + }
1531 +
1532 +-static void tty_ldisc_unlock(struct tty_struct *tty)
1533 ++void tty_ldisc_unlock(struct tty_struct *tty)
1534 + {
1535 + clear_bit(TTY_LDISC_HALTED, &tty->flags);
1536 + __tty_ldisc_unlock(tty);
1537 +diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
1538 +index 8e0636c963a7..18bbe3fedb8b 100644
1539 +--- a/drivers/usb/class/cdc-acm.c
1540 ++++ b/drivers/usb/class/cdc-acm.c
1541 +@@ -425,7 +425,7 @@ static int acm_submit_read_urb(struct acm *acm, int index, gfp_t mem_flags)
1542 +
1543 + res = usb_submit_urb(acm->read_urbs[index], mem_flags);
1544 + if (res) {
1545 +- if (res != -EPERM) {
1546 ++ if (res != -EPERM && res != -ENODEV) {
1547 + dev_err(&acm->data->dev,
1548 + "urb %d failed submission with %d\n",
1549 + index, res);
1550 +@@ -1752,6 +1752,9 @@ static const struct usb_device_id acm_ids[] = {
1551 + { USB_DEVICE(0x0ace, 0x1611), /* ZyDAS 56K USB MODEM - new version */
1552 + .driver_info = SINGLE_RX_URB, /* firmware bug */
1553 + },
1554 ++ { USB_DEVICE(0x11ca, 0x0201), /* VeriFone Mx870 Gadget Serial */
1555 ++ .driver_info = SINGLE_RX_URB,
1556 ++ },
1557 + { USB_DEVICE(0x22b8, 0x7000), /* Motorola Q Phone */
1558 + .driver_info = NO_UNION_NORMAL, /* has no union descriptor */
1559 + },
1560 +diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
1561 +index b6cf5ab5a0a1..f9bd351637cd 100644
1562 +--- a/drivers/usb/gadget/function/f_fs.c
1563 ++++ b/drivers/usb/gadget/function/f_fs.c
1564 +@@ -3700,7 +3700,8 @@ static void ffs_closed(struct ffs_data *ffs)
1565 + ci = opts->func_inst.group.cg_item.ci_parent->ci_parent;
1566 + ffs_dev_unlock();
1567 +
1568 +- unregister_gadget_item(ci);
1569 ++ if (test_bit(FFS_FL_BOUND, &ffs->flags))
1570 ++ unregister_gadget_item(ci);
1571 + return;
1572 + done:
1573 + ffs_dev_unlock();
1574 +diff --git a/drivers/usb/gadget/udc/core.c b/drivers/usb/gadget/udc/core.c
1575 +index 1b3efb14aec7..ac0541529499 100644
1576 +--- a/drivers/usb/gadget/udc/core.c
1577 ++++ b/drivers/usb/gadget/udc/core.c
1578 +@@ -912,7 +912,7 @@ int usb_gadget_ep_match_desc(struct usb_gadget *gadget,
1579 + return 0;
1580 +
1581 + /* "high bandwidth" works only at high speed */
1582 +- if (!gadget_is_dualspeed(gadget) && usb_endpoint_maxp(desc) & (3<<11))
1583 ++ if (!gadget_is_dualspeed(gadget) && usb_endpoint_maxp_mult(desc) > 1)
1584 + return 0;
1585 +
1586 + switch (type) {
1587 +diff --git a/drivers/usb/serial/Kconfig b/drivers/usb/serial/Kconfig
1588 +index a8d5f2e4878d..c66b93664d54 100644
1589 +--- a/drivers/usb/serial/Kconfig
1590 ++++ b/drivers/usb/serial/Kconfig
1591 +@@ -63,6 +63,7 @@ config USB_SERIAL_SIMPLE
1592 + - Google USB serial devices
1593 + - HP4x calculators
1594 + - a number of Motorola phones
1595 ++ - Motorola Tetra devices
1596 + - Novatel Wireless GPS receivers
1597 + - Siemens USB/MPI adapter.
1598 + - ViVOtech ViVOpay USB device.
1599 +diff --git a/drivers/usb/serial/io_edgeport.c b/drivers/usb/serial/io_edgeport.c
1600 +index 219265ce3711..17283f4b4779 100644
1601 +--- a/drivers/usb/serial/io_edgeport.c
1602 ++++ b/drivers/usb/serial/io_edgeport.c
1603 +@@ -2282,7 +2282,6 @@ static int write_cmd_usb(struct edgeport_port *edge_port,
1604 + /* something went wrong */
1605 + dev_err(dev, "%s - usb_submit_urb(write command) failed, status = %d\n",
1606 + __func__, status);
1607 +- usb_kill_urb(urb);
1608 + usb_free_urb(urb);
1609 + atomic_dec(&CmdUrbs);
1610 + return status;
1611 +diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
1612 +index b6320e3be429..5db8ed517e0e 100644
1613 +--- a/drivers/usb/serial/option.c
1614 ++++ b/drivers/usb/serial/option.c
1615 +@@ -380,6 +380,9 @@ static void option_instat_callback(struct urb *urb);
1616 + #define FOUR_G_SYSTEMS_PRODUCT_W14 0x9603
1617 + #define FOUR_G_SYSTEMS_PRODUCT_W100 0x9b01
1618 +
1619 ++/* Fujisoft products */
1620 ++#define FUJISOFT_PRODUCT_FS040U 0x9b02
1621 ++
1622 + /* iBall 3.5G connect wireless modem */
1623 + #define IBALL_3_5G_CONNECT 0x9605
1624 +
1625 +@@ -1894,6 +1897,8 @@ static const struct usb_device_id option_ids[] = {
1626 + { USB_DEVICE(LONGCHEER_VENDOR_ID, FOUR_G_SYSTEMS_PRODUCT_W100),
1627 + .driver_info = (kernel_ulong_t)&four_g_w100_blacklist
1628 + },
1629 ++ {USB_DEVICE(LONGCHEER_VENDOR_ID, FUJISOFT_PRODUCT_FS040U),
1630 ++ .driver_info = (kernel_ulong_t)&net_intf3_blacklist},
1631 + { USB_DEVICE_INTERFACE_CLASS(LONGCHEER_VENDOR_ID, SPEEDUP_PRODUCT_SU9800, 0xff) },
1632 + { USB_DEVICE_INTERFACE_CLASS(LONGCHEER_VENDOR_ID, 0x9801, 0xff),
1633 + .driver_info = (kernel_ulong_t)&net_intf3_blacklist },
1634 +diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c
1635 +index 57ae832a49ff..46dd09da2434 100644
1636 +--- a/drivers/usb/serial/pl2303.c
1637 ++++ b/drivers/usb/serial/pl2303.c
1638 +@@ -38,6 +38,7 @@ static const struct usb_device_id id_table[] = {
1639 + { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_RSAQ2) },
1640 + { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_DCU11) },
1641 + { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_RSAQ3) },
1642 ++ { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_CHILITAG) },
1643 + { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_PHAROS) },
1644 + { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_ALDIGA) },
1645 + { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_MMX) },
1646 +diff --git a/drivers/usb/serial/pl2303.h b/drivers/usb/serial/pl2303.h
1647 +index f98fd84890de..fcd72396a7b6 100644
1648 +--- a/drivers/usb/serial/pl2303.h
1649 ++++ b/drivers/usb/serial/pl2303.h
1650 +@@ -12,6 +12,7 @@
1651 + #define PL2303_PRODUCT_ID_DCU11 0x1234
1652 + #define PL2303_PRODUCT_ID_PHAROS 0xaaa0
1653 + #define PL2303_PRODUCT_ID_RSAQ3 0xaaa2
1654 ++#define PL2303_PRODUCT_ID_CHILITAG 0xaaa8
1655 + #define PL2303_PRODUCT_ID_ALDIGA 0x0611
1656 + #define PL2303_PRODUCT_ID_MMX 0x0612
1657 + #define PL2303_PRODUCT_ID_GPRS 0x0609
1658 +diff --git a/drivers/usb/serial/usb-serial-simple.c b/drivers/usb/serial/usb-serial-simple.c
1659 +index 74172fe158df..4ef79e29cb26 100644
1660 +--- a/drivers/usb/serial/usb-serial-simple.c
1661 ++++ b/drivers/usb/serial/usb-serial-simple.c
1662 +@@ -77,6 +77,11 @@ DEVICE(vivopay, VIVOPAY_IDS);
1663 + { USB_DEVICE(0x22b8, 0x2c64) } /* Motorola V950 phone */
1664 + DEVICE(moto_modem, MOTO_IDS);
1665 +
1666 ++/* Motorola Tetra driver */
1667 ++#define MOTOROLA_TETRA_IDS() \
1668 ++ { USB_DEVICE(0x0cad, 0x9011) } /* Motorola Solutions TETRA PEI */
1669 ++DEVICE(motorola_tetra, MOTOROLA_TETRA_IDS);
1670 ++
1671 + /* Novatel Wireless GPS driver */
1672 + #define NOVATEL_IDS() \
1673 + { USB_DEVICE(0x09d7, 0x0100) } /* NovAtel FlexPack GPS */
1674 +@@ -107,6 +112,7 @@ static struct usb_serial_driver * const serial_drivers[] = {
1675 + &google_device,
1676 + &vivopay_device,
1677 + &moto_modem_device,
1678 ++ &motorola_tetra_device,
1679 + &novatel_gps_device,
1680 + &hp4x_device,
1681 + &suunto_device,
1682 +@@ -122,6 +128,7 @@ static const struct usb_device_id id_table[] = {
1683 + GOOGLE_IDS(),
1684 + VIVOPAY_IDS(),
1685 + MOTO_IDS(),
1686 ++ MOTOROLA_TETRA_IDS(),
1687 + NOVATEL_IDS(),
1688 + HP4X_IDS(),
1689 + SUUNTO_IDS(),
1690 +diff --git a/drivers/usb/storage/uas.c b/drivers/usb/storage/uas.c
1691 +index 5d04c40ee40a..3b1b9695177a 100644
1692 +--- a/drivers/usb/storage/uas.c
1693 ++++ b/drivers/usb/storage/uas.c
1694 +@@ -1076,20 +1076,19 @@ static int uas_post_reset(struct usb_interface *intf)
1695 + return 0;
1696 +
1697 + err = uas_configure_endpoints(devinfo);
1698 +- if (err) {
1699 ++ if (err && err != ENODEV)
1700 + shost_printk(KERN_ERR, shost,
1701 + "%s: alloc streams error %d after reset",
1702 + __func__, err);
1703 +- return 1;
1704 +- }
1705 +
1706 ++ /* we must unblock the host in every case lest we deadlock */
1707 + spin_lock_irqsave(shost->host_lock, flags);
1708 + scsi_report_bus_reset(shost, 0);
1709 + spin_unlock_irqrestore(shost->host_lock, flags);
1710 +
1711 + scsi_unblock_requests(shost);
1712 +
1713 +- return 0;
1714 ++ return err ? 1 : 0;
1715 + }
1716 +
1717 + static int uas_suspend(struct usb_interface *intf, pm_message_t message)
1718 +diff --git a/include/linux/tty.h b/include/linux/tty.h
1719 +index 7ac8ba208b1f..0a6c71e0ad01 100644
1720 +--- a/include/linux/tty.h
1721 ++++ b/include/linux/tty.h
1722 +@@ -405,6 +405,8 @@ extern const char *tty_name(const struct tty_struct *tty);
1723 + extern struct tty_struct *tty_kopen(dev_t device);
1724 + extern void tty_kclose(struct tty_struct *tty);
1725 + extern int tty_dev_name_to_number(const char *name, dev_t *number);
1726 ++extern int tty_ldisc_lock(struct tty_struct *tty, unsigned long timeout);
1727 ++extern void tty_ldisc_unlock(struct tty_struct *tty);
1728 + #else
1729 + static inline void tty_kref_put(struct tty_struct *tty)
1730 + { }
1731 +diff --git a/lib/test_firmware.c b/lib/test_firmware.c
1732 +index 64a4c76cba2b..e7008688769b 100644
1733 +--- a/lib/test_firmware.c
1734 ++++ b/lib/test_firmware.c
1735 +@@ -371,6 +371,7 @@ static ssize_t config_num_requests_store(struct device *dev,
1736 + if (test_fw_config->reqs) {
1737 + pr_err("Must call release_all_firmware prior to changing config\n");
1738 + rc = -EINVAL;
1739 ++ mutex_unlock(&test_fw_mutex);
1740 + goto out;
1741 + }
1742 + mutex_unlock(&test_fw_mutex);
1743 +diff --git a/security/integrity/ima/ima_policy.c b/security/integrity/ima/ima_policy.c
1744 +index ee4613fa5840..f19f4841a97a 100644
1745 +--- a/security/integrity/ima/ima_policy.c
1746 ++++ b/security/integrity/ima/ima_policy.c
1747 +@@ -743,7 +743,7 @@ static int ima_parse_rule(char *rule, struct ima_rule_entry *entry)
1748 + case Opt_fsuuid:
1749 + ima_log_string(ab, "fsuuid", args[0].from);
1750 +
1751 +- if (uuid_is_null(&entry->fsuuid)) {
1752 ++ if (!uuid_is_null(&entry->fsuuid)) {
1753 + result = -EINVAL;
1754 + break;
1755 + }
1756 +diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
1757 +index 9aafc6c86132..1750e00c5bb4 100644
1758 +--- a/sound/pci/hda/patch_realtek.c
1759 ++++ b/sound/pci/hda/patch_realtek.c
1760 +@@ -3154,11 +3154,13 @@ static void alc256_shutup(struct hda_codec *codec)
1761 + if (hp_pin_sense)
1762 + msleep(85);
1763 +
1764 ++ /* 3k pull low control for Headset jack. */
1765 ++ /* NOTE: call this before clearing the pin, otherwise codec stalls */
1766 ++ alc_update_coef_idx(codec, 0x46, 0, 3 << 12);
1767 ++
1768 + snd_hda_codec_write(codec, hp_pin, 0,
1769 + AC_VERB_SET_PIN_WIDGET_CONTROL, 0x0);
1770 +
1771 +- alc_update_coef_idx(codec, 0x46, 0, 3 << 12); /* 3k pull low control for Headset jack. */
1772 +-
1773 + if (hp_pin_sense)
1774 + msleep(100);
1775 +
1776 +diff --git a/tools/gpio/gpio-event-mon.c b/tools/gpio/gpio-event-mon.c
1777 +index 1c14c2595158..4b36323ea64b 100644
1778 +--- a/tools/gpio/gpio-event-mon.c
1779 ++++ b/tools/gpio/gpio-event-mon.c
1780 +@@ -23,6 +23,7 @@
1781 + #include <getopt.h>
1782 + #include <inttypes.h>
1783 + #include <sys/ioctl.h>
1784 ++#include <sys/types.h>
1785 + #include <linux/gpio.h>
1786 +
1787 + int monitor_device(const char *device_name,
1788 +diff --git a/tools/usb/usbip/src/usbip_bind.c b/tools/usb/usbip/src/usbip_bind.c
1789 +index fa46141ae68b..e121cfb1746a 100644
1790 +--- a/tools/usb/usbip/src/usbip_bind.c
1791 ++++ b/tools/usb/usbip/src/usbip_bind.c
1792 +@@ -144,6 +144,7 @@ static int bind_device(char *busid)
1793 + int rc;
1794 + struct udev *udev;
1795 + struct udev_device *dev;
1796 ++ const char *devpath;
1797 +
1798 + /* Check whether the device with this bus ID exists. */
1799 + udev = udev_new();
1800 +@@ -152,8 +153,16 @@ static int bind_device(char *busid)
1801 + err("device with the specified bus ID does not exist");
1802 + return -1;
1803 + }
1804 ++ devpath = udev_device_get_devpath(dev);
1805 + udev_unref(udev);
1806 +
1807 ++ /* If the device is already attached to vhci_hcd - bail out */
1808 ++ if (strstr(devpath, USBIP_VHCI_DRV_NAME)) {
1809 ++ err("bind loop detected: device: %s is attached to %s\n",
1810 ++ devpath, USBIP_VHCI_DRV_NAME);
1811 ++ return -1;
1812 ++ }
1813 ++
1814 + rc = unbind_other(busid);
1815 + if (rc == UNBIND_ST_FAILED) {
1816 + err("could not unbind driver from device on busid %s", busid);
1817 +diff --git a/tools/usb/usbip/src/usbip_list.c b/tools/usb/usbip/src/usbip_list.c
1818 +index f1b38e866dd7..d65a9f444174 100644
1819 +--- a/tools/usb/usbip/src/usbip_list.c
1820 ++++ b/tools/usb/usbip/src/usbip_list.c
1821 +@@ -187,6 +187,7 @@ static int list_devices(bool parsable)
1822 + const char *busid;
1823 + char product_name[128];
1824 + int ret = -1;
1825 ++ const char *devpath;
1826 +
1827 + /* Create libudev context. */
1828 + udev = udev_new();
1829 +@@ -209,6 +210,14 @@ static int list_devices(bool parsable)
1830 + path = udev_list_entry_get_name(dev_list_entry);
1831 + dev = udev_device_new_from_syspath(udev, path);
1832 +
1833 ++ /* Ignore devices attached to vhci_hcd */
1834 ++ devpath = udev_device_get_devpath(dev);
1835 ++ if (strstr(devpath, USBIP_VHCI_DRV_NAME)) {
1836 ++ dbg("Skip the device %s already attached to %s\n",
1837 ++ devpath, USBIP_VHCI_DRV_NAME);
1838 ++ continue;
1839 ++ }
1840 ++
1841 + /* Get device information. */
1842 + idVendor = udev_device_get_sysattr_value(dev, "idVendor");
1843 + idProduct = udev_device_get_sysattr_value(dev, "idProduct");