Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:4.4 commit in: /
Date: Thu, 18 Feb 2016 00:20:43
Message-Id: 1455754846.91978da2903783b7f76fcfb76c00fcad910ab011.mpagano@gentoo
1 commit: 91978da2903783b7f76fcfb76c00fcad910ab011
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Thu Feb 18 00:20:46 2016 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Thu Feb 18 00:20:46 2016 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=91978da2
7
8 Linux patch 4.4.2
9
10 0000_README | 4 +
11 1001_linux-4.4.2.patch | 5320 ++++++++++++++++++++++++++++++++++++++++++++++++
12 2 files changed, 5324 insertions(+)
13
14 diff --git a/0000_README b/0000_README
15 index c18256b..de28467 100644
16 --- a/0000_README
17 +++ b/0000_README
18 @@ -47,6 +47,10 @@ Patch: 1000_linux-4.4.1.patch
19 From: http://www.kernel.org
20 Desc: Linux 4.4.1
21
22 +Patch: 1001_linux-4.4.2.patch
23 +From: http://www.kernel.org
24 +Desc: Linux 4.4.2
25 +
26 Patch: 1500_XATTR_USER_PREFIX.patch
27 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
28 Desc: Support for namespace user.pax.* on tmpfs.
29
30 diff --git a/1001_linux-4.4.2.patch b/1001_linux-4.4.2.patch
31 new file mode 100644
32 index 0000000..054aeb7
33 --- /dev/null
34 +++ b/1001_linux-4.4.2.patch
35 @@ -0,0 +1,5320 @@
36 +diff --git a/Makefile b/Makefile
37 +index c6a265b52c93..e7a2958eb771 100644
38 +--- a/Makefile
39 ++++ b/Makefile
40 +@@ -1,6 +1,6 @@
41 + VERSION = 4
42 + PATCHLEVEL = 4
43 +-SUBLEVEL = 1
44 ++SUBLEVEL = 2
45 + EXTRAVERSION =
46 + NAME = Blurry Fish Butt
47 +
48 +diff --git a/arch/parisc/include/asm/hugetlb.h b/arch/parisc/include/asm/hugetlb.h
49 +index 7d56a9ccb752..a65d888716c4 100644
50 +--- a/arch/parisc/include/asm/hugetlb.h
51 ++++ b/arch/parisc/include/asm/hugetlb.h
52 +@@ -54,24 +54,12 @@ static inline pte_t huge_pte_wrprotect(pte_t pte)
53 + return pte_wrprotect(pte);
54 + }
55 +
56 +-static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
57 +- unsigned long addr, pte_t *ptep)
58 +-{
59 +- pte_t old_pte = *ptep;
60 +- set_huge_pte_at(mm, addr, ptep, pte_wrprotect(old_pte));
61 +-}
62 ++void huge_ptep_set_wrprotect(struct mm_struct *mm,
63 ++ unsigned long addr, pte_t *ptep);
64 +
65 +-static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma,
66 ++int huge_ptep_set_access_flags(struct vm_area_struct *vma,
67 + unsigned long addr, pte_t *ptep,
68 +- pte_t pte, int dirty)
69 +-{
70 +- int changed = !pte_same(*ptep, pte);
71 +- if (changed) {
72 +- set_huge_pte_at(vma->vm_mm, addr, ptep, pte);
73 +- flush_tlb_page(vma, addr);
74 +- }
75 +- return changed;
76 +-}
77 ++ pte_t pte, int dirty);
78 +
79 + static inline pte_t huge_ptep_get(pte_t *ptep)
80 + {
81 +diff --git a/arch/parisc/include/uapi/asm/siginfo.h b/arch/parisc/include/uapi/asm/siginfo.h
82 +index d7034728f377..1c75565d984b 100644
83 +--- a/arch/parisc/include/uapi/asm/siginfo.h
84 ++++ b/arch/parisc/include/uapi/asm/siginfo.h
85 +@@ -1,6 +1,10 @@
86 + #ifndef _PARISC_SIGINFO_H
87 + #define _PARISC_SIGINFO_H
88 +
89 ++#if defined(__LP64__)
90 ++#define __ARCH_SI_PREAMBLE_SIZE (4 * sizeof(int))
91 ++#endif
92 ++
93 + #include <asm-generic/siginfo.h>
94 +
95 + #undef NSIGTRAP
96 +diff --git a/arch/parisc/mm/hugetlbpage.c b/arch/parisc/mm/hugetlbpage.c
97 +index f6fdc77a72bd..54ba39262b82 100644
98 +--- a/arch/parisc/mm/hugetlbpage.c
99 ++++ b/arch/parisc/mm/hugetlbpage.c
100 +@@ -105,15 +105,13 @@ static inline void purge_tlb_entries_huge(struct mm_struct *mm, unsigned long ad
101 + addr |= _HUGE_PAGE_SIZE_ENCODING_DEFAULT;
102 +
103 + for (i = 0; i < (1 << (HPAGE_SHIFT-REAL_HPAGE_SHIFT)); i++) {
104 +- mtsp(mm->context, 1);
105 +- pdtlb(addr);
106 +- if (unlikely(split_tlb))
107 +- pitlb(addr);
108 ++ purge_tlb_entries(mm, addr);
109 + addr += (1UL << REAL_HPAGE_SHIFT);
110 + }
111 + }
112 +
113 +-void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
114 ++/* __set_huge_pte_at() must be called holding the pa_tlb_lock. */
115 ++static void __set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
116 + pte_t *ptep, pte_t entry)
117 + {
118 + unsigned long addr_start;
119 +@@ -123,14 +121,9 @@ void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
120 + addr_start = addr;
121 +
122 + for (i = 0; i < (1 << HUGETLB_PAGE_ORDER); i++) {
123 +- /* Directly write pte entry. We could call set_pte_at(mm, addr, ptep, entry)
124 +- * instead, but then we get double locking on pa_tlb_lock. */
125 +- *ptep = entry;
126 ++ set_pte(ptep, entry);
127 + ptep++;
128 +
129 +- /* Drop the PAGE_SIZE/non-huge tlb entry */
130 +- purge_tlb_entries(mm, addr);
131 +-
132 + addr += PAGE_SIZE;
133 + pte_val(entry) += PAGE_SIZE;
134 + }
135 +@@ -138,18 +131,61 @@ void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
136 + purge_tlb_entries_huge(mm, addr_start);
137 + }
138 +
139 ++void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
140 ++ pte_t *ptep, pte_t entry)
141 ++{
142 ++ unsigned long flags;
143 ++
144 ++ purge_tlb_start(flags);
145 ++ __set_huge_pte_at(mm, addr, ptep, entry);
146 ++ purge_tlb_end(flags);
147 ++}
148 ++
149 +
150 + pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
151 + pte_t *ptep)
152 + {
153 ++ unsigned long flags;
154 + pte_t entry;
155 +
156 ++ purge_tlb_start(flags);
157 + entry = *ptep;
158 +- set_huge_pte_at(mm, addr, ptep, __pte(0));
159 ++ __set_huge_pte_at(mm, addr, ptep, __pte(0));
160 ++ purge_tlb_end(flags);
161 +
162 + return entry;
163 + }
164 +
165 ++
166 ++void huge_ptep_set_wrprotect(struct mm_struct *mm,
167 ++ unsigned long addr, pte_t *ptep)
168 ++{
169 ++ unsigned long flags;
170 ++ pte_t old_pte;
171 ++
172 ++ purge_tlb_start(flags);
173 ++ old_pte = *ptep;
174 ++ __set_huge_pte_at(mm, addr, ptep, pte_wrprotect(old_pte));
175 ++ purge_tlb_end(flags);
176 ++}
177 ++
178 ++int huge_ptep_set_access_flags(struct vm_area_struct *vma,
179 ++ unsigned long addr, pte_t *ptep,
180 ++ pte_t pte, int dirty)
181 ++{
182 ++ unsigned long flags;
183 ++ int changed;
184 ++
185 ++ purge_tlb_start(flags);
186 ++ changed = !pte_same(*ptep, pte);
187 ++ if (changed) {
188 ++ __set_huge_pte_at(vma->vm_mm, addr, ptep, pte);
189 ++ }
190 ++ purge_tlb_end(flags);
191 ++ return changed;
192 ++}
193 ++
194 ++
195 + int pmd_huge(pmd_t pmd)
196 + {
197 + return 0;
198 +diff --git a/arch/x86/crypto/chacha20-ssse3-x86_64.S b/arch/x86/crypto/chacha20-ssse3-x86_64.S
199 +index 712b13047b41..3a33124e9112 100644
200 +--- a/arch/x86/crypto/chacha20-ssse3-x86_64.S
201 ++++ b/arch/x86/crypto/chacha20-ssse3-x86_64.S
202 +@@ -157,7 +157,9 @@ ENTRY(chacha20_4block_xor_ssse3)
203 + # done with the slightly better performing SSSE3 byte shuffling,
204 + # 7/12-bit word rotation uses traditional shift+OR.
205 +
206 +- sub $0x40,%rsp
207 ++ mov %rsp,%r11
208 ++ sub $0x80,%rsp
209 ++ and $~63,%rsp
210 +
211 + # x0..15[0-3] = s0..3[0..3]
212 + movq 0x00(%rdi),%xmm1
213 +@@ -620,6 +622,6 @@ ENTRY(chacha20_4block_xor_ssse3)
214 + pxor %xmm1,%xmm15
215 + movdqu %xmm15,0xf0(%rsi)
216 +
217 +- add $0x40,%rsp
218 ++ mov %r11,%rsp
219 + ret
220 + ENDPROC(chacha20_4block_xor_ssse3)
221 +diff --git a/block/blk-merge.c b/block/blk-merge.c
222 +index e01405a3e8b3..b966db8f3556 100644
223 +--- a/block/blk-merge.c
224 ++++ b/block/blk-merge.c
225 +@@ -68,6 +68,18 @@ static struct bio *blk_bio_write_same_split(struct request_queue *q,
226 + return bio_split(bio, q->limits.max_write_same_sectors, GFP_NOIO, bs);
227 + }
228 +
229 ++static inline unsigned get_max_io_size(struct request_queue *q,
230 ++ struct bio *bio)
231 ++{
232 ++ unsigned sectors = blk_max_size_offset(q, bio->bi_iter.bi_sector);
233 ++ unsigned mask = queue_logical_block_size(q) - 1;
234 ++
235 ++ /* aligned to logical block size */
236 ++ sectors &= ~(mask >> 9);
237 ++
238 ++ return sectors;
239 ++}
240 ++
241 + static struct bio *blk_bio_segment_split(struct request_queue *q,
242 + struct bio *bio,
243 + struct bio_set *bs,
244 +@@ -79,11 +91,9 @@ static struct bio *blk_bio_segment_split(struct request_queue *q,
245 + unsigned front_seg_size = bio->bi_seg_front_size;
246 + bool do_split = true;
247 + struct bio *new = NULL;
248 ++ const unsigned max_sectors = get_max_io_size(q, bio);
249 +
250 + bio_for_each_segment(bv, bio, iter) {
251 +- if (sectors + (bv.bv_len >> 9) > queue_max_sectors(q))
252 +- goto split;
253 +-
254 + /*
255 + * If the queue doesn't support SG gaps and adding this
256 + * offset would create a gap, disallow it.
257 +@@ -91,6 +101,21 @@ static struct bio *blk_bio_segment_split(struct request_queue *q,
258 + if (bvprvp && bvec_gap_to_prev(q, bvprvp, bv.bv_offset))
259 + goto split;
260 +
261 ++ if (sectors + (bv.bv_len >> 9) > max_sectors) {
262 ++ /*
263 ++ * Consider this a new segment if we're splitting in
264 ++ * the middle of this vector.
265 ++ */
266 ++ if (nsegs < queue_max_segments(q) &&
267 ++ sectors < max_sectors) {
268 ++ nsegs++;
269 ++ sectors = max_sectors;
270 ++ }
271 ++ if (sectors)
272 ++ goto split;
273 ++ /* Make this single bvec as the 1st segment */
274 ++ }
275 ++
276 + if (bvprvp && blk_queue_cluster(q)) {
277 + if (seg_size + bv.bv_len > queue_max_segment_size(q))
278 + goto new_segment;
279 +diff --git a/crypto/af_alg.c b/crypto/af_alg.c
280 +index a8e7aa3e257b..f5e18c2a4852 100644
281 +--- a/crypto/af_alg.c
282 ++++ b/crypto/af_alg.c
283 +@@ -76,6 +76,8 @@ int af_alg_register_type(const struct af_alg_type *type)
284 + goto unlock;
285 +
286 + type->ops->owner = THIS_MODULE;
287 ++ if (type->ops_nokey)
288 ++ type->ops_nokey->owner = THIS_MODULE;
289 + node->type = type;
290 + list_add(&node->list, &alg_types);
291 + err = 0;
292 +@@ -125,6 +127,26 @@ int af_alg_release(struct socket *sock)
293 + }
294 + EXPORT_SYMBOL_GPL(af_alg_release);
295 +
296 ++void af_alg_release_parent(struct sock *sk)
297 ++{
298 ++ struct alg_sock *ask = alg_sk(sk);
299 ++ unsigned int nokey = ask->nokey_refcnt;
300 ++ bool last = nokey && !ask->refcnt;
301 ++
302 ++ sk = ask->parent;
303 ++ ask = alg_sk(sk);
304 ++
305 ++ lock_sock(sk);
306 ++ ask->nokey_refcnt -= nokey;
307 ++ if (!last)
308 ++ last = !--ask->refcnt;
309 ++ release_sock(sk);
310 ++
311 ++ if (last)
312 ++ sock_put(sk);
313 ++}
314 ++EXPORT_SYMBOL_GPL(af_alg_release_parent);
315 ++
316 + static int alg_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
317 + {
318 + const u32 forbidden = CRYPTO_ALG_INTERNAL;
319 +@@ -133,6 +155,7 @@ static int alg_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
320 + struct sockaddr_alg *sa = (void *)uaddr;
321 + const struct af_alg_type *type;
322 + void *private;
323 ++ int err;
324 +
325 + if (sock->state == SS_CONNECTED)
326 + return -EINVAL;
327 +@@ -160,16 +183,22 @@ static int alg_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
328 + return PTR_ERR(private);
329 + }
330 +
331 ++ err = -EBUSY;
332 + lock_sock(sk);
333 ++ if (ask->refcnt | ask->nokey_refcnt)
334 ++ goto unlock;
335 +
336 + swap(ask->type, type);
337 + swap(ask->private, private);
338 +
339 ++ err = 0;
340 ++
341 ++unlock:
342 + release_sock(sk);
343 +
344 + alg_do_release(type, private);
345 +
346 +- return 0;
347 ++ return err;
348 + }
349 +
350 + static int alg_setkey(struct sock *sk, char __user *ukey,
351 +@@ -202,11 +231,15 @@ static int alg_setsockopt(struct socket *sock, int level, int optname,
352 + struct sock *sk = sock->sk;
353 + struct alg_sock *ask = alg_sk(sk);
354 + const struct af_alg_type *type;
355 +- int err = -ENOPROTOOPT;
356 ++ int err = -EBUSY;
357 +
358 + lock_sock(sk);
359 ++ if (ask->refcnt)
360 ++ goto unlock;
361 ++
362 + type = ask->type;
363 +
364 ++ err = -ENOPROTOOPT;
365 + if (level != SOL_ALG || !type)
366 + goto unlock;
367 +
368 +@@ -238,6 +271,7 @@ int af_alg_accept(struct sock *sk, struct socket *newsock)
369 + struct alg_sock *ask = alg_sk(sk);
370 + const struct af_alg_type *type;
371 + struct sock *sk2;
372 ++ unsigned int nokey;
373 + int err;
374 +
375 + lock_sock(sk);
376 +@@ -257,20 +291,29 @@ int af_alg_accept(struct sock *sk, struct socket *newsock)
377 + security_sk_clone(sk, sk2);
378 +
379 + err = type->accept(ask->private, sk2);
380 +- if (err) {
381 +- sk_free(sk2);
382 ++
383 ++ nokey = err == -ENOKEY;
384 ++ if (nokey && type->accept_nokey)
385 ++ err = type->accept_nokey(ask->private, sk2);
386 ++
387 ++ if (err)
388 + goto unlock;
389 +- }
390 +
391 + sk2->sk_family = PF_ALG;
392 +
393 +- sock_hold(sk);
394 ++ if (nokey || !ask->refcnt++)
395 ++ sock_hold(sk);
396 ++ ask->nokey_refcnt += nokey;
397 + alg_sk(sk2)->parent = sk;
398 + alg_sk(sk2)->type = type;
399 ++ alg_sk(sk2)->nokey_refcnt = nokey;
400 +
401 + newsock->ops = type->ops;
402 + newsock->state = SS_CONNECTED;
403 +
404 ++ if (nokey)
405 ++ newsock->ops = type->ops_nokey;
406 ++
407 + err = 0;
408 +
409 + unlock:
410 +diff --git a/crypto/ahash.c b/crypto/ahash.c
411 +index 9c1dc8d6106a..d19b52324cf5 100644
412 +--- a/crypto/ahash.c
413 ++++ b/crypto/ahash.c
414 +@@ -451,6 +451,7 @@ static int crypto_ahash_init_tfm(struct crypto_tfm *tfm)
415 + struct ahash_alg *alg = crypto_ahash_alg(hash);
416 +
417 + hash->setkey = ahash_nosetkey;
418 ++ hash->has_setkey = false;
419 + hash->export = ahash_no_export;
420 + hash->import = ahash_no_import;
421 +
422 +@@ -463,8 +464,10 @@ static int crypto_ahash_init_tfm(struct crypto_tfm *tfm)
423 + hash->finup = alg->finup ?: ahash_def_finup;
424 + hash->digest = alg->digest;
425 +
426 +- if (alg->setkey)
427 ++ if (alg->setkey) {
428 + hash->setkey = alg->setkey;
429 ++ hash->has_setkey = true;
430 ++ }
431 + if (alg->export)
432 + hash->export = alg->export;
433 + if (alg->import)
434 +diff --git a/crypto/algif_hash.c b/crypto/algif_hash.c
435 +index b4c24fe3dcfb..68a5ceaa04c8 100644
436 +--- a/crypto/algif_hash.c
437 ++++ b/crypto/algif_hash.c
438 +@@ -34,6 +34,11 @@ struct hash_ctx {
439 + struct ahash_request req;
440 + };
441 +
442 ++struct algif_hash_tfm {
443 ++ struct crypto_ahash *hash;
444 ++ bool has_key;
445 ++};
446 ++
447 + static int hash_sendmsg(struct socket *sock, struct msghdr *msg,
448 + size_t ignored)
449 + {
450 +@@ -49,7 +54,8 @@ static int hash_sendmsg(struct socket *sock, struct msghdr *msg,
451 +
452 + lock_sock(sk);
453 + if (!ctx->more) {
454 +- err = crypto_ahash_init(&ctx->req);
455 ++ err = af_alg_wait_for_completion(crypto_ahash_init(&ctx->req),
456 ++ &ctx->completion);
457 + if (err)
458 + goto unlock;
459 + }
460 +@@ -120,6 +126,7 @@ static ssize_t hash_sendpage(struct socket *sock, struct page *page,
461 + } else {
462 + if (!ctx->more) {
463 + err = crypto_ahash_init(&ctx->req);
464 ++ err = af_alg_wait_for_completion(err, &ctx->completion);
465 + if (err)
466 + goto unlock;
467 + }
468 +@@ -235,19 +242,151 @@ static struct proto_ops algif_hash_ops = {
469 + .accept = hash_accept,
470 + };
471 +
472 ++static int hash_check_key(struct socket *sock)
473 ++{
474 ++ int err = 0;
475 ++ struct sock *psk;
476 ++ struct alg_sock *pask;
477 ++ struct algif_hash_tfm *tfm;
478 ++ struct sock *sk = sock->sk;
479 ++ struct alg_sock *ask = alg_sk(sk);
480 ++
481 ++ lock_sock(sk);
482 ++ if (ask->refcnt)
483 ++ goto unlock_child;
484 ++
485 ++ psk = ask->parent;
486 ++ pask = alg_sk(ask->parent);
487 ++ tfm = pask->private;
488 ++
489 ++ err = -ENOKEY;
490 ++ lock_sock_nested(psk, SINGLE_DEPTH_NESTING);
491 ++ if (!tfm->has_key)
492 ++ goto unlock;
493 ++
494 ++ if (!pask->refcnt++)
495 ++ sock_hold(psk);
496 ++
497 ++ ask->refcnt = 1;
498 ++ sock_put(psk);
499 ++
500 ++ err = 0;
501 ++
502 ++unlock:
503 ++ release_sock(psk);
504 ++unlock_child:
505 ++ release_sock(sk);
506 ++
507 ++ return err;
508 ++}
509 ++
510 ++static int hash_sendmsg_nokey(struct socket *sock, struct msghdr *msg,
511 ++ size_t size)
512 ++{
513 ++ int err;
514 ++
515 ++ err = hash_check_key(sock);
516 ++ if (err)
517 ++ return err;
518 ++
519 ++ return hash_sendmsg(sock, msg, size);
520 ++}
521 ++
522 ++static ssize_t hash_sendpage_nokey(struct socket *sock, struct page *page,
523 ++ int offset, size_t size, int flags)
524 ++{
525 ++ int err;
526 ++
527 ++ err = hash_check_key(sock);
528 ++ if (err)
529 ++ return err;
530 ++
531 ++ return hash_sendpage(sock, page, offset, size, flags);
532 ++}
533 ++
534 ++static int hash_recvmsg_nokey(struct socket *sock, struct msghdr *msg,
535 ++ size_t ignored, int flags)
536 ++{
537 ++ int err;
538 ++
539 ++ err = hash_check_key(sock);
540 ++ if (err)
541 ++ return err;
542 ++
543 ++ return hash_recvmsg(sock, msg, ignored, flags);
544 ++}
545 ++
546 ++static int hash_accept_nokey(struct socket *sock, struct socket *newsock,
547 ++ int flags)
548 ++{
549 ++ int err;
550 ++
551 ++ err = hash_check_key(sock);
552 ++ if (err)
553 ++ return err;
554 ++
555 ++ return hash_accept(sock, newsock, flags);
556 ++}
557 ++
558 ++static struct proto_ops algif_hash_ops_nokey = {
559 ++ .family = PF_ALG,
560 ++
561 ++ .connect = sock_no_connect,
562 ++ .socketpair = sock_no_socketpair,
563 ++ .getname = sock_no_getname,
564 ++ .ioctl = sock_no_ioctl,
565 ++ .listen = sock_no_listen,
566 ++ .shutdown = sock_no_shutdown,
567 ++ .getsockopt = sock_no_getsockopt,
568 ++ .mmap = sock_no_mmap,
569 ++ .bind = sock_no_bind,
570 ++ .setsockopt = sock_no_setsockopt,
571 ++ .poll = sock_no_poll,
572 ++
573 ++ .release = af_alg_release,
574 ++ .sendmsg = hash_sendmsg_nokey,
575 ++ .sendpage = hash_sendpage_nokey,
576 ++ .recvmsg = hash_recvmsg_nokey,
577 ++ .accept = hash_accept_nokey,
578 ++};
579 ++
580 + static void *hash_bind(const char *name, u32 type, u32 mask)
581 + {
582 +- return crypto_alloc_ahash(name, type, mask);
583 ++ struct algif_hash_tfm *tfm;
584 ++ struct crypto_ahash *hash;
585 ++
586 ++ tfm = kzalloc(sizeof(*tfm), GFP_KERNEL);
587 ++ if (!tfm)
588 ++ return ERR_PTR(-ENOMEM);
589 ++
590 ++ hash = crypto_alloc_ahash(name, type, mask);
591 ++ if (IS_ERR(hash)) {
592 ++ kfree(tfm);
593 ++ return ERR_CAST(hash);
594 ++ }
595 ++
596 ++ tfm->hash = hash;
597 ++
598 ++ return tfm;
599 + }
600 +
601 + static void hash_release(void *private)
602 + {
603 +- crypto_free_ahash(private);
604 ++ struct algif_hash_tfm *tfm = private;
605 ++
606 ++ crypto_free_ahash(tfm->hash);
607 ++ kfree(tfm);
608 + }
609 +
610 + static int hash_setkey(void *private, const u8 *key, unsigned int keylen)
611 + {
612 +- return crypto_ahash_setkey(private, key, keylen);
613 ++ struct algif_hash_tfm *tfm = private;
614 ++ int err;
615 ++
616 ++ err = crypto_ahash_setkey(tfm->hash, key, keylen);
617 ++ tfm->has_key = !err;
618 ++
619 ++ return err;
620 + }
621 +
622 + static void hash_sock_destruct(struct sock *sk)
623 +@@ -261,12 +400,14 @@ static void hash_sock_destruct(struct sock *sk)
624 + af_alg_release_parent(sk);
625 + }
626 +
627 +-static int hash_accept_parent(void *private, struct sock *sk)
628 ++static int hash_accept_parent_nokey(void *private, struct sock *sk)
629 + {
630 + struct hash_ctx *ctx;
631 + struct alg_sock *ask = alg_sk(sk);
632 +- unsigned len = sizeof(*ctx) + crypto_ahash_reqsize(private);
633 +- unsigned ds = crypto_ahash_digestsize(private);
634 ++ struct algif_hash_tfm *tfm = private;
635 ++ struct crypto_ahash *hash = tfm->hash;
636 ++ unsigned len = sizeof(*ctx) + crypto_ahash_reqsize(hash);
637 ++ unsigned ds = crypto_ahash_digestsize(hash);
638 +
639 + ctx = sock_kmalloc(sk, len, GFP_KERNEL);
640 + if (!ctx)
641 +@@ -286,7 +427,7 @@ static int hash_accept_parent(void *private, struct sock *sk)
642 +
643 + ask->private = ctx;
644 +
645 +- ahash_request_set_tfm(&ctx->req, private);
646 ++ ahash_request_set_tfm(&ctx->req, hash);
647 + ahash_request_set_callback(&ctx->req, CRYPTO_TFM_REQ_MAY_BACKLOG,
648 + af_alg_complete, &ctx->completion);
649 +
650 +@@ -295,12 +436,24 @@ static int hash_accept_parent(void *private, struct sock *sk)
651 + return 0;
652 + }
653 +
654 ++static int hash_accept_parent(void *private, struct sock *sk)
655 ++{
656 ++ struct algif_hash_tfm *tfm = private;
657 ++
658 ++ if (!tfm->has_key && crypto_ahash_has_setkey(tfm->hash))
659 ++ return -ENOKEY;
660 ++
661 ++ return hash_accept_parent_nokey(private, sk);
662 ++}
663 ++
664 + static const struct af_alg_type algif_type_hash = {
665 + .bind = hash_bind,
666 + .release = hash_release,
667 + .setkey = hash_setkey,
668 + .accept = hash_accept_parent,
669 ++ .accept_nokey = hash_accept_parent_nokey,
670 + .ops = &algif_hash_ops,
671 ++ .ops_nokey = &algif_hash_ops_nokey,
672 + .name = "hash",
673 + .owner = THIS_MODULE
674 + };
675 +diff --git a/crypto/algif_skcipher.c b/crypto/algif_skcipher.c
676 +index 634b4d1ab681..f5e9f9310b48 100644
677 +--- a/crypto/algif_skcipher.c
678 ++++ b/crypto/algif_skcipher.c
679 +@@ -31,6 +31,11 @@ struct skcipher_sg_list {
680 + struct scatterlist sg[0];
681 + };
682 +
683 ++struct skcipher_tfm {
684 ++ struct crypto_skcipher *skcipher;
685 ++ bool has_key;
686 ++};
687 ++
688 + struct skcipher_ctx {
689 + struct list_head tsgl;
690 + struct af_alg_sgl rsgl;
691 +@@ -60,18 +65,10 @@ struct skcipher_async_req {
692 + struct skcipher_async_rsgl first_sgl;
693 + struct list_head list;
694 + struct scatterlist *tsg;
695 +- char iv[];
696 ++ atomic_t *inflight;
697 ++ struct skcipher_request req;
698 + };
699 +
700 +-#define GET_SREQ(areq, ctx) (struct skcipher_async_req *)((char *)areq + \
701 +- crypto_skcipher_reqsize(crypto_skcipher_reqtfm(&ctx->req)))
702 +-
703 +-#define GET_REQ_SIZE(ctx) \
704 +- crypto_skcipher_reqsize(crypto_skcipher_reqtfm(&ctx->req))
705 +-
706 +-#define GET_IV_SIZE(ctx) \
707 +- crypto_skcipher_ivsize(crypto_skcipher_reqtfm(&ctx->req))
708 +-
709 + #define MAX_SGL_ENTS ((4096 - sizeof(struct skcipher_sg_list)) / \
710 + sizeof(struct scatterlist) - 1)
711 +
712 +@@ -97,15 +94,12 @@ static void skcipher_free_async_sgls(struct skcipher_async_req *sreq)
713 +
714 + static void skcipher_async_cb(struct crypto_async_request *req, int err)
715 + {
716 +- struct sock *sk = req->data;
717 +- struct alg_sock *ask = alg_sk(sk);
718 +- struct skcipher_ctx *ctx = ask->private;
719 +- struct skcipher_async_req *sreq = GET_SREQ(req, ctx);
720 ++ struct skcipher_async_req *sreq = req->data;
721 + struct kiocb *iocb = sreq->iocb;
722 +
723 +- atomic_dec(&ctx->inflight);
724 ++ atomic_dec(sreq->inflight);
725 + skcipher_free_async_sgls(sreq);
726 +- kfree(req);
727 ++ kzfree(sreq);
728 + iocb->ki_complete(iocb, err, err);
729 + }
730 +
731 +@@ -301,8 +295,11 @@ static int skcipher_sendmsg(struct socket *sock, struct msghdr *msg,
732 + {
733 + struct sock *sk = sock->sk;
734 + struct alg_sock *ask = alg_sk(sk);
735 ++ struct sock *psk = ask->parent;
736 ++ struct alg_sock *pask = alg_sk(psk);
737 + struct skcipher_ctx *ctx = ask->private;
738 +- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(&ctx->req);
739 ++ struct skcipher_tfm *skc = pask->private;
740 ++ struct crypto_skcipher *tfm = skc->skcipher;
741 + unsigned ivsize = crypto_skcipher_ivsize(tfm);
742 + struct skcipher_sg_list *sgl;
743 + struct af_alg_control con = {};
744 +@@ -387,7 +384,8 @@ static int skcipher_sendmsg(struct socket *sock, struct msghdr *msg,
745 +
746 + sgl = list_entry(ctx->tsgl.prev, struct skcipher_sg_list, list);
747 + sg = sgl->sg;
748 +- sg_unmark_end(sg + sgl->cur);
749 ++ if (sgl->cur)
750 ++ sg_unmark_end(sg + sgl->cur - 1);
751 + do {
752 + i = sgl->cur;
753 + plen = min_t(int, len, PAGE_SIZE);
754 +@@ -503,37 +501,43 @@ static int skcipher_recvmsg_async(struct socket *sock, struct msghdr *msg,
755 + {
756 + struct sock *sk = sock->sk;
757 + struct alg_sock *ask = alg_sk(sk);
758 ++ struct sock *psk = ask->parent;
759 ++ struct alg_sock *pask = alg_sk(psk);
760 + struct skcipher_ctx *ctx = ask->private;
761 ++ struct skcipher_tfm *skc = pask->private;
762 ++ struct crypto_skcipher *tfm = skc->skcipher;
763 + struct skcipher_sg_list *sgl;
764 + struct scatterlist *sg;
765 + struct skcipher_async_req *sreq;
766 + struct skcipher_request *req;
767 + struct skcipher_async_rsgl *last_rsgl = NULL;
768 +- unsigned int txbufs = 0, len = 0, tx_nents = skcipher_all_sg_nents(ctx);
769 +- unsigned int reqlen = sizeof(struct skcipher_async_req) +
770 +- GET_REQ_SIZE(ctx) + GET_IV_SIZE(ctx);
771 ++ unsigned int txbufs = 0, len = 0, tx_nents;
772 ++ unsigned int reqsize = crypto_skcipher_reqsize(tfm);
773 ++ unsigned int ivsize = crypto_skcipher_ivsize(tfm);
774 + int err = -ENOMEM;
775 + bool mark = false;
776 ++ char *iv;
777 +
778 +- lock_sock(sk);
779 +- req = kmalloc(reqlen, GFP_KERNEL);
780 +- if (unlikely(!req))
781 +- goto unlock;
782 ++ sreq = kzalloc(sizeof(*sreq) + reqsize + ivsize, GFP_KERNEL);
783 ++ if (unlikely(!sreq))
784 ++ goto out;
785 +
786 +- sreq = GET_SREQ(req, ctx);
787 ++ req = &sreq->req;
788 ++ iv = (char *)(req + 1) + reqsize;
789 + sreq->iocb = msg->msg_iocb;
790 +- memset(&sreq->first_sgl, '\0', sizeof(struct skcipher_async_rsgl));
791 + INIT_LIST_HEAD(&sreq->list);
792 ++ sreq->inflight = &ctx->inflight;
793 ++
794 ++ lock_sock(sk);
795 ++ tx_nents = skcipher_all_sg_nents(ctx);
796 + sreq->tsg = kcalloc(tx_nents, sizeof(*sg), GFP_KERNEL);
797 +- if (unlikely(!sreq->tsg)) {
798 +- kfree(req);
799 ++ if (unlikely(!sreq->tsg))
800 + goto unlock;
801 +- }
802 + sg_init_table(sreq->tsg, tx_nents);
803 +- memcpy(sreq->iv, ctx->iv, GET_IV_SIZE(ctx));
804 +- skcipher_request_set_tfm(req, crypto_skcipher_reqtfm(&ctx->req));
805 +- skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
806 +- skcipher_async_cb, sk);
807 ++ memcpy(iv, ctx->iv, ivsize);
808 ++ skcipher_request_set_tfm(req, tfm);
809 ++ skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP,
810 ++ skcipher_async_cb, sreq);
811 +
812 + while (iov_iter_count(&msg->msg_iter)) {
813 + struct skcipher_async_rsgl *rsgl;
814 +@@ -609,20 +613,22 @@ static int skcipher_recvmsg_async(struct socket *sock, struct msghdr *msg,
815 + sg_mark_end(sreq->tsg + txbufs - 1);
816 +
817 + skcipher_request_set_crypt(req, sreq->tsg, sreq->first_sgl.sgl.sg,
818 +- len, sreq->iv);
819 ++ len, iv);
820 + err = ctx->enc ? crypto_skcipher_encrypt(req) :
821 + crypto_skcipher_decrypt(req);
822 + if (err == -EINPROGRESS) {
823 + atomic_inc(&ctx->inflight);
824 + err = -EIOCBQUEUED;
825 ++ sreq = NULL;
826 + goto unlock;
827 + }
828 + free:
829 + skcipher_free_async_sgls(sreq);
830 +- kfree(req);
831 + unlock:
832 + skcipher_wmem_wakeup(sk);
833 + release_sock(sk);
834 ++ kzfree(sreq);
835 ++out:
836 + return err;
837 + }
838 +
839 +@@ -631,9 +637,12 @@ static int skcipher_recvmsg_sync(struct socket *sock, struct msghdr *msg,
840 + {
841 + struct sock *sk = sock->sk;
842 + struct alg_sock *ask = alg_sk(sk);
843 ++ struct sock *psk = ask->parent;
844 ++ struct alg_sock *pask = alg_sk(psk);
845 + struct skcipher_ctx *ctx = ask->private;
846 +- unsigned bs = crypto_skcipher_blocksize(crypto_skcipher_reqtfm(
847 +- &ctx->req));
848 ++ struct skcipher_tfm *skc = pask->private;
849 ++ struct crypto_skcipher *tfm = skc->skcipher;
850 ++ unsigned bs = crypto_skcipher_blocksize(tfm);
851 + struct skcipher_sg_list *sgl;
852 + struct scatterlist *sg;
853 + int err = -EAGAIN;
854 +@@ -642,13 +651,6 @@ static int skcipher_recvmsg_sync(struct socket *sock, struct msghdr *msg,
855 +
856 + lock_sock(sk);
857 + while (msg_data_left(msg)) {
858 +- sgl = list_first_entry(&ctx->tsgl,
859 +- struct skcipher_sg_list, list);
860 +- sg = sgl->sg;
861 +-
862 +- while (!sg->length)
863 +- sg++;
864 +-
865 + if (!ctx->used) {
866 + err = skcipher_wait_for_data(sk, flags);
867 + if (err)
868 +@@ -669,6 +671,13 @@ static int skcipher_recvmsg_sync(struct socket *sock, struct msghdr *msg,
869 + if (!used)
870 + goto free;
871 +
872 ++ sgl = list_first_entry(&ctx->tsgl,
873 ++ struct skcipher_sg_list, list);
874 ++ sg = sgl->sg;
875 ++
876 ++ while (!sg->length)
877 ++ sg++;
878 ++
879 + skcipher_request_set_crypt(&ctx->req, sg, ctx->rsgl.sg, used,
880 + ctx->iv);
881 +
882 +@@ -748,19 +757,139 @@ static struct proto_ops algif_skcipher_ops = {
883 + .poll = skcipher_poll,
884 + };
885 +
886 ++static int skcipher_check_key(struct socket *sock)
887 ++{
888 ++ int err = 0;
889 ++ struct sock *psk;
890 ++ struct alg_sock *pask;
891 ++ struct skcipher_tfm *tfm;
892 ++ struct sock *sk = sock->sk;
893 ++ struct alg_sock *ask = alg_sk(sk);
894 ++
895 ++ lock_sock(sk);
896 ++ if (ask->refcnt)
897 ++ goto unlock_child;
898 ++
899 ++ psk = ask->parent;
900 ++ pask = alg_sk(ask->parent);
901 ++ tfm = pask->private;
902 ++
903 ++ err = -ENOKEY;
904 ++ lock_sock_nested(psk, SINGLE_DEPTH_NESTING);
905 ++ if (!tfm->has_key)
906 ++ goto unlock;
907 ++
908 ++ if (!pask->refcnt++)
909 ++ sock_hold(psk);
910 ++
911 ++ ask->refcnt = 1;
912 ++ sock_put(psk);
913 ++
914 ++ err = 0;
915 ++
916 ++unlock:
917 ++ release_sock(psk);
918 ++unlock_child:
919 ++ release_sock(sk);
920 ++
921 ++ return err;
922 ++}
923 ++
924 ++static int skcipher_sendmsg_nokey(struct socket *sock, struct msghdr *msg,
925 ++ size_t size)
926 ++{
927 ++ int err;
928 ++
929 ++ err = skcipher_check_key(sock);
930 ++ if (err)
931 ++ return err;
932 ++
933 ++ return skcipher_sendmsg(sock, msg, size);
934 ++}
935 ++
936 ++static ssize_t skcipher_sendpage_nokey(struct socket *sock, struct page *page,
937 ++ int offset, size_t size, int flags)
938 ++{
939 ++ int err;
940 ++
941 ++ err = skcipher_check_key(sock);
942 ++ if (err)
943 ++ return err;
944 ++
945 ++ return skcipher_sendpage(sock, page, offset, size, flags);
946 ++}
947 ++
948 ++static int skcipher_recvmsg_nokey(struct socket *sock, struct msghdr *msg,
949 ++ size_t ignored, int flags)
950 ++{
951 ++ int err;
952 ++
953 ++ err = skcipher_check_key(sock);
954 ++ if (err)
955 ++ return err;
956 ++
957 ++ return skcipher_recvmsg(sock, msg, ignored, flags);
958 ++}
959 ++
960 ++static struct proto_ops algif_skcipher_ops_nokey = {
961 ++ .family = PF_ALG,
962 ++
963 ++ .connect = sock_no_connect,
964 ++ .socketpair = sock_no_socketpair,
965 ++ .getname = sock_no_getname,
966 ++ .ioctl = sock_no_ioctl,
967 ++ .listen = sock_no_listen,
968 ++ .shutdown = sock_no_shutdown,
969 ++ .getsockopt = sock_no_getsockopt,
970 ++ .mmap = sock_no_mmap,
971 ++ .bind = sock_no_bind,
972 ++ .accept = sock_no_accept,
973 ++ .setsockopt = sock_no_setsockopt,
974 ++
975 ++ .release = af_alg_release,
976 ++ .sendmsg = skcipher_sendmsg_nokey,
977 ++ .sendpage = skcipher_sendpage_nokey,
978 ++ .recvmsg = skcipher_recvmsg_nokey,
979 ++ .poll = skcipher_poll,
980 ++};
981 ++
982 + static void *skcipher_bind(const char *name, u32 type, u32 mask)
983 + {
984 +- return crypto_alloc_skcipher(name, type, mask);
985 ++ struct skcipher_tfm *tfm;
986 ++ struct crypto_skcipher *skcipher;
987 ++
988 ++ tfm = kzalloc(sizeof(*tfm), GFP_KERNEL);
989 ++ if (!tfm)
990 ++ return ERR_PTR(-ENOMEM);
991 ++
992 ++ skcipher = crypto_alloc_skcipher(name, type, mask);
993 ++ if (IS_ERR(skcipher)) {
994 ++ kfree(tfm);
995 ++ return ERR_CAST(skcipher);
996 ++ }
997 ++
998 ++ tfm->skcipher = skcipher;
999 ++
1000 ++ return tfm;
1001 + }
1002 +
1003 + static void skcipher_release(void *private)
1004 + {
1005 +- crypto_free_skcipher(private);
1006 ++ struct skcipher_tfm *tfm = private;
1007 ++
1008 ++ crypto_free_skcipher(tfm->skcipher);
1009 ++ kfree(tfm);
1010 + }
1011 +
1012 + static int skcipher_setkey(void *private, const u8 *key, unsigned int keylen)
1013 + {
1014 +- return crypto_skcipher_setkey(private, key, keylen);
1015 ++ struct skcipher_tfm *tfm = private;
1016 ++ int err;
1017 ++
1018 ++ err = crypto_skcipher_setkey(tfm->skcipher, key, keylen);
1019 ++ tfm->has_key = !err;
1020 ++
1021 ++ return err;
1022 + }
1023 +
1024 + static void skcipher_wait(struct sock *sk)
1025 +@@ -788,24 +917,26 @@ static void skcipher_sock_destruct(struct sock *sk)
1026 + af_alg_release_parent(sk);
1027 + }
1028 +
1029 +-static int skcipher_accept_parent(void *private, struct sock *sk)
1030 ++static int skcipher_accept_parent_nokey(void *private, struct sock *sk)
1031 + {
1032 + struct skcipher_ctx *ctx;
1033 + struct alg_sock *ask = alg_sk(sk);
1034 +- unsigned int len = sizeof(*ctx) + crypto_skcipher_reqsize(private);
1035 ++ struct skcipher_tfm *tfm = private;
1036 ++ struct crypto_skcipher *skcipher = tfm->skcipher;
1037 ++ unsigned int len = sizeof(*ctx) + crypto_skcipher_reqsize(skcipher);
1038 +
1039 + ctx = sock_kmalloc(sk, len, GFP_KERNEL);
1040 + if (!ctx)
1041 + return -ENOMEM;
1042 +
1043 +- ctx->iv = sock_kmalloc(sk, crypto_skcipher_ivsize(private),
1044 ++ ctx->iv = sock_kmalloc(sk, crypto_skcipher_ivsize(skcipher),
1045 + GFP_KERNEL);
1046 + if (!ctx->iv) {
1047 + sock_kfree_s(sk, ctx, len);
1048 + return -ENOMEM;
1049 + }
1050 +
1051 +- memset(ctx->iv, 0, crypto_skcipher_ivsize(private));
1052 ++ memset(ctx->iv, 0, crypto_skcipher_ivsize(skcipher));
1053 +
1054 + INIT_LIST_HEAD(&ctx->tsgl);
1055 + ctx->len = len;
1056 +@@ -818,8 +949,9 @@ static int skcipher_accept_parent(void *private, struct sock *sk)
1057 +
1058 + ask->private = ctx;
1059 +
1060 +- skcipher_request_set_tfm(&ctx->req, private);
1061 +- skcipher_request_set_callback(&ctx->req, CRYPTO_TFM_REQ_MAY_BACKLOG,
1062 ++ skcipher_request_set_tfm(&ctx->req, skcipher);
1063 ++ skcipher_request_set_callback(&ctx->req, CRYPTO_TFM_REQ_MAY_SLEEP |
1064 ++ CRYPTO_TFM_REQ_MAY_BACKLOG,
1065 + af_alg_complete, &ctx->completion);
1066 +
1067 + sk->sk_destruct = skcipher_sock_destruct;
1068 +@@ -827,12 +959,24 @@ static int skcipher_accept_parent(void *private, struct sock *sk)
1069 + return 0;
1070 + }
1071 +
1072 ++static int skcipher_accept_parent(void *private, struct sock *sk)
1073 ++{
1074 ++ struct skcipher_tfm *tfm = private;
1075 ++
1076 ++ if (!tfm->has_key && crypto_skcipher_has_setkey(tfm->skcipher))
1077 ++ return -ENOKEY;
1078 ++
1079 ++ return skcipher_accept_parent_nokey(private, sk);
1080 ++}
1081 ++
1082 + static const struct af_alg_type algif_type_skcipher = {
1083 + .bind = skcipher_bind,
1084 + .release = skcipher_release,
1085 + .setkey = skcipher_setkey,
1086 + .accept = skcipher_accept_parent,
1087 ++ .accept_nokey = skcipher_accept_parent_nokey,
1088 + .ops = &algif_skcipher_ops,
1089 ++ .ops_nokey = &algif_skcipher_ops_nokey,
1090 + .name = "skcipher",
1091 + .owner = THIS_MODULE
1092 + };
1093 +diff --git a/crypto/crc32c_generic.c b/crypto/crc32c_generic.c
1094 +index 06f1b60f02b2..4c0a0e271876 100644
1095 +--- a/crypto/crc32c_generic.c
1096 ++++ b/crypto/crc32c_generic.c
1097 +@@ -172,4 +172,3 @@ MODULE_DESCRIPTION("CRC32c (Castagnoli) calculations wrapper for lib/crc32c");
1098 + MODULE_LICENSE("GPL");
1099 + MODULE_ALIAS_CRYPTO("crc32c");
1100 + MODULE_ALIAS_CRYPTO("crc32c-generic");
1101 +-MODULE_SOFTDEP("pre: crc32c");
1102 +diff --git a/crypto/crypto_user.c b/crypto/crypto_user.c
1103 +index 237f3795cfaa..43fe85f20d57 100644
1104 +--- a/crypto/crypto_user.c
1105 ++++ b/crypto/crypto_user.c
1106 +@@ -499,6 +499,7 @@ static int crypto_user_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
1107 + if (link->dump == NULL)
1108 + return -EINVAL;
1109 +
1110 ++ down_read(&crypto_alg_sem);
1111 + list_for_each_entry(alg, &crypto_alg_list, cra_list)
1112 + dump_alloc += CRYPTO_REPORT_MAXSIZE;
1113 +
1114 +@@ -508,8 +509,11 @@ static int crypto_user_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
1115 + .done = link->done,
1116 + .min_dump_alloc = dump_alloc,
1117 + };
1118 +- return netlink_dump_start(crypto_nlsk, skb, nlh, &c);
1119 ++ err = netlink_dump_start(crypto_nlsk, skb, nlh, &c);
1120 + }
1121 ++ up_read(&crypto_alg_sem);
1122 ++
1123 ++ return err;
1124 + }
1125 +
1126 + err = nlmsg_parse(nlh, crypto_msg_min[type], attrs, CRYPTOCFGA_MAX,
1127 +diff --git a/crypto/shash.c b/crypto/shash.c
1128 +index ecb1e3d39bf0..359754591653 100644
1129 +--- a/crypto/shash.c
1130 ++++ b/crypto/shash.c
1131 +@@ -354,9 +354,10 @@ int crypto_init_shash_ops_async(struct crypto_tfm *tfm)
1132 + crt->final = shash_async_final;
1133 + crt->finup = shash_async_finup;
1134 + crt->digest = shash_async_digest;
1135 ++ crt->setkey = shash_async_setkey;
1136 ++
1137 ++ crt->has_setkey = alg->setkey != shash_no_setkey;
1138 +
1139 +- if (alg->setkey)
1140 +- crt->setkey = shash_async_setkey;
1141 + if (alg->export)
1142 + crt->export = shash_async_export;
1143 + if (alg->import)
1144 +diff --git a/crypto/skcipher.c b/crypto/skcipher.c
1145 +index 7591928be7ca..d199c0b1751c 100644
1146 +--- a/crypto/skcipher.c
1147 ++++ b/crypto/skcipher.c
1148 +@@ -118,6 +118,7 @@ static int crypto_init_skcipher_ops_blkcipher(struct crypto_tfm *tfm)
1149 + skcipher->decrypt = skcipher_decrypt_blkcipher;
1150 +
1151 + skcipher->ivsize = crypto_blkcipher_ivsize(blkcipher);
1152 ++ skcipher->has_setkey = calg->cra_blkcipher.max_keysize;
1153 +
1154 + return 0;
1155 + }
1156 +@@ -210,6 +211,7 @@ static int crypto_init_skcipher_ops_ablkcipher(struct crypto_tfm *tfm)
1157 + skcipher->ivsize = crypto_ablkcipher_ivsize(ablkcipher);
1158 + skcipher->reqsize = crypto_ablkcipher_reqsize(ablkcipher) +
1159 + sizeof(struct ablkcipher_request);
1160 ++ skcipher->has_setkey = calg->cra_ablkcipher.max_keysize;
1161 +
1162 + return 0;
1163 + }
1164 +diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
1165 +index cdfbcc54821f..99921aa0daca 100644
1166 +--- a/drivers/ata/ahci.c
1167 ++++ b/drivers/ata/ahci.c
1168 +@@ -264,6 +264,26 @@ static const struct pci_device_id ahci_pci_tbl[] = {
1169 + { PCI_VDEVICE(INTEL, 0x3b2b), board_ahci }, /* PCH RAID */
1170 + { PCI_VDEVICE(INTEL, 0x3b2c), board_ahci }, /* PCH RAID */
1171 + { PCI_VDEVICE(INTEL, 0x3b2f), board_ahci }, /* PCH AHCI */
1172 ++ { PCI_VDEVICE(INTEL, 0x19b0), board_ahci }, /* DNV AHCI */
1173 ++ { PCI_VDEVICE(INTEL, 0x19b1), board_ahci }, /* DNV AHCI */
1174 ++ { PCI_VDEVICE(INTEL, 0x19b2), board_ahci }, /* DNV AHCI */
1175 ++ { PCI_VDEVICE(INTEL, 0x19b3), board_ahci }, /* DNV AHCI */
1176 ++ { PCI_VDEVICE(INTEL, 0x19b4), board_ahci }, /* DNV AHCI */
1177 ++ { PCI_VDEVICE(INTEL, 0x19b5), board_ahci }, /* DNV AHCI */
1178 ++ { PCI_VDEVICE(INTEL, 0x19b6), board_ahci }, /* DNV AHCI */
1179 ++ { PCI_VDEVICE(INTEL, 0x19b7), board_ahci }, /* DNV AHCI */
1180 ++ { PCI_VDEVICE(INTEL, 0x19bE), board_ahci }, /* DNV AHCI */
1181 ++ { PCI_VDEVICE(INTEL, 0x19bF), board_ahci }, /* DNV AHCI */
1182 ++ { PCI_VDEVICE(INTEL, 0x19c0), board_ahci }, /* DNV AHCI */
1183 ++ { PCI_VDEVICE(INTEL, 0x19c1), board_ahci }, /* DNV AHCI */
1184 ++ { PCI_VDEVICE(INTEL, 0x19c2), board_ahci }, /* DNV AHCI */
1185 ++ { PCI_VDEVICE(INTEL, 0x19c3), board_ahci }, /* DNV AHCI */
1186 ++ { PCI_VDEVICE(INTEL, 0x19c4), board_ahci }, /* DNV AHCI */
1187 ++ { PCI_VDEVICE(INTEL, 0x19c5), board_ahci }, /* DNV AHCI */
1188 ++ { PCI_VDEVICE(INTEL, 0x19c6), board_ahci }, /* DNV AHCI */
1189 ++ { PCI_VDEVICE(INTEL, 0x19c7), board_ahci }, /* DNV AHCI */
1190 ++ { PCI_VDEVICE(INTEL, 0x19cE), board_ahci }, /* DNV AHCI */
1191 ++ { PCI_VDEVICE(INTEL, 0x19cF), board_ahci }, /* DNV AHCI */
1192 + { PCI_VDEVICE(INTEL, 0x1c02), board_ahci }, /* CPT AHCI */
1193 + { PCI_VDEVICE(INTEL, 0x1c03), board_ahci }, /* CPT AHCI */
1194 + { PCI_VDEVICE(INTEL, 0x1c04), board_ahci }, /* CPT RAID */
1195 +diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
1196 +index 4665512dae44..1f225cc1827f 100644
1197 +--- a/drivers/ata/libahci.c
1198 ++++ b/drivers/ata/libahci.c
1199 +@@ -495,8 +495,8 @@ void ahci_save_initial_config(struct device *dev, struct ahci_host_priv *hpriv)
1200 + }
1201 + }
1202 +
1203 +- /* fabricate port_map from cap.nr_ports */
1204 +- if (!port_map) {
1205 ++ /* fabricate port_map from cap.nr_ports for < AHCI 1.3 */
1206 ++ if (!port_map && vers < 0x10300) {
1207 + port_map = (1 << ahci_nr_ports(cap)) - 1;
1208 + dev_warn(dev, "forcing PORTS_IMPL to 0x%x\n", port_map);
1209 +
1210 +diff --git a/drivers/base/platform.c b/drivers/base/platform.c
1211 +index 1dd6d3bf1098..176b59f5bc47 100644
1212 +--- a/drivers/base/platform.c
1213 ++++ b/drivers/base/platform.c
1214 +@@ -513,10 +513,15 @@ static int platform_drv_probe(struct device *_dev)
1215 + return ret;
1216 +
1217 + ret = dev_pm_domain_attach(_dev, true);
1218 +- if (ret != -EPROBE_DEFER && drv->probe) {
1219 +- ret = drv->probe(dev);
1220 +- if (ret)
1221 +- dev_pm_domain_detach(_dev, true);
1222 ++ if (ret != -EPROBE_DEFER) {
1223 ++ if (drv->probe) {
1224 ++ ret = drv->probe(dev);
1225 ++ if (ret)
1226 ++ dev_pm_domain_detach(_dev, true);
1227 ++ } else {
1228 ++ /* don't fail if just dev_pm_domain_attach failed */
1229 ++ ret = 0;
1230 ++ }
1231 + }
1232 +
1233 + if (drv->prevent_deferred_probe && ret == -EPROBE_DEFER) {
1234 +diff --git a/drivers/block/zram/zcomp.c b/drivers/block/zram/zcomp.c
1235 +index 5cb13ca3a3ac..c53617752b93 100644
1236 +--- a/drivers/block/zram/zcomp.c
1237 ++++ b/drivers/block/zram/zcomp.c
1238 +@@ -76,7 +76,7 @@ static void zcomp_strm_free(struct zcomp *comp, struct zcomp_strm *zstrm)
1239 + */
1240 + static struct zcomp_strm *zcomp_strm_alloc(struct zcomp *comp)
1241 + {
1242 +- struct zcomp_strm *zstrm = kmalloc(sizeof(*zstrm), GFP_KERNEL);
1243 ++ struct zcomp_strm *zstrm = kmalloc(sizeof(*zstrm), GFP_NOIO);
1244 + if (!zstrm)
1245 + return NULL;
1246 +
1247 +@@ -85,7 +85,7 @@ static struct zcomp_strm *zcomp_strm_alloc(struct zcomp *comp)
1248 + * allocate 2 pages. 1 for compressed data, plus 1 extra for the
1249 + * case when compressed size is larger than the original one
1250 + */
1251 +- zstrm->buffer = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 1);
1252 ++ zstrm->buffer = (void *)__get_free_pages(GFP_NOIO | __GFP_ZERO, 1);
1253 + if (!zstrm->private || !zstrm->buffer) {
1254 + zcomp_strm_free(comp, zstrm);
1255 + zstrm = NULL;
1256 +diff --git a/drivers/block/zram/zcomp_lz4.c b/drivers/block/zram/zcomp_lz4.c
1257 +index f2afb7e988c3..dd6083124276 100644
1258 +--- a/drivers/block/zram/zcomp_lz4.c
1259 ++++ b/drivers/block/zram/zcomp_lz4.c
1260 +@@ -10,17 +10,36 @@
1261 + #include <linux/kernel.h>
1262 + #include <linux/slab.h>
1263 + #include <linux/lz4.h>
1264 ++#include <linux/vmalloc.h>
1265 ++#include <linux/mm.h>
1266 +
1267 + #include "zcomp_lz4.h"
1268 +
1269 + static void *zcomp_lz4_create(void)
1270 + {
1271 +- return kzalloc(LZ4_MEM_COMPRESS, GFP_KERNEL);
1272 ++ void *ret;
1273 ++
1274 ++ /*
1275 ++ * This function can be called in swapout/fs write path
1276 ++ * so we can't use GFP_FS|IO. And it assumes we already
1277 ++ * have at least one stream in zram initialization so we
1278 ++ * don't do best effort to allocate more stream in here.
1279 ++ * A default stream will work well without further multiple
1280 ++ * streams. That's why we use NORETRY | NOWARN.
1281 ++ */
1282 ++ ret = kzalloc(LZ4_MEM_COMPRESS, GFP_NOIO | __GFP_NORETRY |
1283 ++ __GFP_NOWARN);
1284 ++ if (!ret)
1285 ++ ret = __vmalloc(LZ4_MEM_COMPRESS,
1286 ++ GFP_NOIO | __GFP_NORETRY | __GFP_NOWARN |
1287 ++ __GFP_ZERO | __GFP_HIGHMEM,
1288 ++ PAGE_KERNEL);
1289 ++ return ret;
1290 + }
1291 +
1292 + static void zcomp_lz4_destroy(void *private)
1293 + {
1294 +- kfree(private);
1295 ++ kvfree(private);
1296 + }
1297 +
1298 + static int zcomp_lz4_compress(const unsigned char *src, unsigned char *dst,
1299 +diff --git a/drivers/block/zram/zcomp_lzo.c b/drivers/block/zram/zcomp_lzo.c
1300 +index da1bc47d588e..edc549920fa0 100644
1301 +--- a/drivers/block/zram/zcomp_lzo.c
1302 ++++ b/drivers/block/zram/zcomp_lzo.c
1303 +@@ -10,17 +10,36 @@
1304 + #include <linux/kernel.h>
1305 + #include <linux/slab.h>
1306 + #include <linux/lzo.h>
1307 ++#include <linux/vmalloc.h>
1308 ++#include <linux/mm.h>
1309 +
1310 + #include "zcomp_lzo.h"
1311 +
1312 + static void *lzo_create(void)
1313 + {
1314 +- return kzalloc(LZO1X_MEM_COMPRESS, GFP_KERNEL);
1315 ++ void *ret;
1316 ++
1317 ++ /*
1318 ++ * This function can be called in swapout/fs write path
1319 ++ * so we can't use GFP_FS|IO. And it assumes we already
1320 ++ * have at least one stream in zram initialization so we
1321 ++ * don't do best effort to allocate more stream in here.
1322 ++ * A default stream will work well without further multiple
1323 ++ * streams. That's why we use NORETRY | NOWARN.
1324 ++ */
1325 ++ ret = kzalloc(LZO1X_MEM_COMPRESS, GFP_NOIO | __GFP_NORETRY |
1326 ++ __GFP_NOWARN);
1327 ++ if (!ret)
1328 ++ ret = __vmalloc(LZO1X_MEM_COMPRESS,
1329 ++ GFP_NOIO | __GFP_NORETRY | __GFP_NOWARN |
1330 ++ __GFP_ZERO | __GFP_HIGHMEM,
1331 ++ PAGE_KERNEL);
1332 ++ return ret;
1333 + }
1334 +
1335 + static void lzo_destroy(void *private)
1336 + {
1337 +- kfree(private);
1338 ++ kvfree(private);
1339 + }
1340 +
1341 + static int lzo_compress(const unsigned char *src, unsigned char *dst,
1342 +diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
1343 +index 47915d736f8d..370c2f76016d 100644
1344 +--- a/drivers/block/zram/zram_drv.c
1345 ++++ b/drivers/block/zram/zram_drv.c
1346 +@@ -1325,7 +1325,6 @@ static int zram_remove(struct zram *zram)
1347 +
1348 + pr_info("Removed device: %s\n", zram->disk->disk_name);
1349 +
1350 +- idr_remove(&zram_index_idr, zram->disk->first_minor);
1351 + blk_cleanup_queue(zram->disk->queue);
1352 + del_gendisk(zram->disk);
1353 + put_disk(zram->disk);
1354 +@@ -1367,10 +1366,12 @@ static ssize_t hot_remove_store(struct class *class,
1355 + mutex_lock(&zram_index_mutex);
1356 +
1357 + zram = idr_find(&zram_index_idr, dev_id);
1358 +- if (zram)
1359 ++ if (zram) {
1360 + ret = zram_remove(zram);
1361 +- else
1362 ++ idr_remove(&zram_index_idr, dev_id);
1363 ++ } else {
1364 + ret = -ENODEV;
1365 ++ }
1366 +
1367 + mutex_unlock(&zram_index_mutex);
1368 + return ret ? ret : count;
1369 +diff --git a/drivers/crypto/atmel-sha.c b/drivers/crypto/atmel-sha.c
1370 +index 660d8c06540b..3178f84d2757 100644
1371 +--- a/drivers/crypto/atmel-sha.c
1372 ++++ b/drivers/crypto/atmel-sha.c
1373 +@@ -783,7 +783,7 @@ static void atmel_sha_finish_req(struct ahash_request *req, int err)
1374 + dd->flags &= ~(SHA_FLAGS_BUSY | SHA_FLAGS_FINAL | SHA_FLAGS_CPU |
1375 + SHA_FLAGS_DMA_READY | SHA_FLAGS_OUTPUT_READY);
1376 +
1377 +- clk_disable_unprepare(dd->iclk);
1378 ++ clk_disable(dd->iclk);
1379 +
1380 + if (req->base.complete)
1381 + req->base.complete(&req->base, err);
1382 +@@ -796,7 +796,7 @@ static int atmel_sha_hw_init(struct atmel_sha_dev *dd)
1383 + {
1384 + int err;
1385 +
1386 +- err = clk_prepare_enable(dd->iclk);
1387 ++ err = clk_enable(dd->iclk);
1388 + if (err)
1389 + return err;
1390 +
1391 +@@ -823,7 +823,7 @@ static void atmel_sha_hw_version_init(struct atmel_sha_dev *dd)
1392 + dev_info(dd->dev,
1393 + "version: 0x%x\n", dd->hw_version);
1394 +
1395 +- clk_disable_unprepare(dd->iclk);
1396 ++ clk_disable(dd->iclk);
1397 + }
1398 +
1399 + static int atmel_sha_handle_queue(struct atmel_sha_dev *dd,
1400 +@@ -1411,6 +1411,10 @@ static int atmel_sha_probe(struct platform_device *pdev)
1401 + goto res_err;
1402 + }
1403 +
1404 ++ err = clk_prepare(sha_dd->iclk);
1405 ++ if (err)
1406 ++ goto res_err;
1407 ++
1408 + atmel_sha_hw_version_init(sha_dd);
1409 +
1410 + atmel_sha_get_cap(sha_dd);
1411 +@@ -1422,12 +1426,12 @@ static int atmel_sha_probe(struct platform_device *pdev)
1412 + if (IS_ERR(pdata)) {
1413 + dev_err(&pdev->dev, "platform data not available\n");
1414 + err = PTR_ERR(pdata);
1415 +- goto res_err;
1416 ++ goto iclk_unprepare;
1417 + }
1418 + }
1419 + if (!pdata->dma_slave) {
1420 + err = -ENXIO;
1421 +- goto res_err;
1422 ++ goto iclk_unprepare;
1423 + }
1424 + err = atmel_sha_dma_init(sha_dd, pdata);
1425 + if (err)
1426 +@@ -1458,6 +1462,8 @@ err_algs:
1427 + if (sha_dd->caps.has_dma)
1428 + atmel_sha_dma_cleanup(sha_dd);
1429 + err_sha_dma:
1430 ++iclk_unprepare:
1431 ++ clk_unprepare(sha_dd->iclk);
1432 + res_err:
1433 + tasklet_kill(&sha_dd->done_task);
1434 + sha_dd_err:
1435 +@@ -1484,12 +1490,7 @@ static int atmel_sha_remove(struct platform_device *pdev)
1436 + if (sha_dd->caps.has_dma)
1437 + atmel_sha_dma_cleanup(sha_dd);
1438 +
1439 +- iounmap(sha_dd->io_base);
1440 +-
1441 +- clk_put(sha_dd->iclk);
1442 +-
1443 +- if (sha_dd->irq >= 0)
1444 +- free_irq(sha_dd->irq, sha_dd);
1445 ++ clk_unprepare(sha_dd->iclk);
1446 +
1447 + return 0;
1448 + }
1449 +diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c
1450 +index 8abb4bc548cc..69d4a1326fee 100644
1451 +--- a/drivers/crypto/caam/ctrl.c
1452 ++++ b/drivers/crypto/caam/ctrl.c
1453 +@@ -534,8 +534,8 @@ static int caam_probe(struct platform_device *pdev)
1454 + * long pointers in master configuration register
1455 + */
1456 + clrsetbits_32(&ctrl->mcr, MCFGR_AWCACHE_MASK, MCFGR_AWCACHE_CACH |
1457 +- MCFGR_WDENABLE | (sizeof(dma_addr_t) == sizeof(u64) ?
1458 +- MCFGR_LONG_PTR : 0));
1459 ++ MCFGR_AWCACHE_BUFF | MCFGR_WDENABLE |
1460 ++ (sizeof(dma_addr_t) == sizeof(u64) ? MCFGR_LONG_PTR : 0));
1461 +
1462 + /*
1463 + * Read the Compile Time paramters and SCFGR to determine
1464 +diff --git a/drivers/crypto/marvell/cesa.c b/drivers/crypto/marvell/cesa.c
1465 +index 0643e3366e33..c0656e7f37b5 100644
1466 +--- a/drivers/crypto/marvell/cesa.c
1467 ++++ b/drivers/crypto/marvell/cesa.c
1468 +@@ -306,7 +306,7 @@ static int mv_cesa_dev_dma_init(struct mv_cesa_dev *cesa)
1469 + return -ENOMEM;
1470 +
1471 + dma->padding_pool = dmam_pool_create("cesa_padding", dev, 72, 1, 0);
1472 +- if (!dma->cache_pool)
1473 ++ if (!dma->padding_pool)
1474 + return -ENOMEM;
1475 +
1476 + cesa->dma = dma;
1477 +diff --git a/drivers/crypto/sunxi-ss/sun4i-ss-core.c b/drivers/crypto/sunxi-ss/sun4i-ss-core.c
1478 +index eab6fe227fa0..107cd2a41cae 100644
1479 +--- a/drivers/crypto/sunxi-ss/sun4i-ss-core.c
1480 ++++ b/drivers/crypto/sunxi-ss/sun4i-ss-core.c
1481 +@@ -39,6 +39,7 @@ static struct sun4i_ss_alg_template ss_algs[] = {
1482 + .import = sun4i_hash_import_md5,
1483 + .halg = {
1484 + .digestsize = MD5_DIGEST_SIZE,
1485 ++ .statesize = sizeof(struct md5_state),
1486 + .base = {
1487 + .cra_name = "md5",
1488 + .cra_driver_name = "md5-sun4i-ss",
1489 +@@ -66,6 +67,7 @@ static struct sun4i_ss_alg_template ss_algs[] = {
1490 + .import = sun4i_hash_import_sha1,
1491 + .halg = {
1492 + .digestsize = SHA1_DIGEST_SIZE,
1493 ++ .statesize = sizeof(struct sha1_state),
1494 + .base = {
1495 + .cra_name = "sha1",
1496 + .cra_driver_name = "sha1-sun4i-ss",
1497 +diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
1498 +index 3d664d01305e..2b8ff18d3713 100644
1499 +--- a/drivers/hid/hid-multitouch.c
1500 ++++ b/drivers/hid/hid-multitouch.c
1501 +@@ -357,8 +357,19 @@ static void mt_feature_mapping(struct hid_device *hdev,
1502 + break;
1503 + }
1504 +
1505 +- td->inputmode = field->report->id;
1506 +- td->inputmode_index = usage->usage_index;
1507 ++ if (td->inputmode < 0) {
1508 ++ td->inputmode = field->report->id;
1509 ++ td->inputmode_index = usage->usage_index;
1510 ++ } else {
1511 ++ /*
1512 ++ * Some elan panels wrongly declare 2 input mode
1513 ++ * features, and silently ignore when we set the
1514 ++ * value in the second field. Skip the second feature
1515 ++ * and hope for the best.
1516 ++ */
1517 ++ dev_info(&hdev->dev,
1518 ++ "Ignoring the extra HID_DG_INPUTMODE\n");
1519 ++ }
1520 +
1521 + break;
1522 + case HID_DG_CONTACTMAX:
1523 +diff --git a/drivers/hid/usbhid/hid-core.c b/drivers/hid/usbhid/hid-core.c
1524 +index 36712e9f56c2..5dd426fee8cc 100644
1525 +--- a/drivers/hid/usbhid/hid-core.c
1526 ++++ b/drivers/hid/usbhid/hid-core.c
1527 +@@ -477,8 +477,6 @@ static void hid_ctrl(struct urb *urb)
1528 + struct usbhid_device *usbhid = hid->driver_data;
1529 + int unplug = 0, status = urb->status;
1530 +
1531 +- spin_lock(&usbhid->lock);
1532 +-
1533 + switch (status) {
1534 + case 0: /* success */
1535 + if (usbhid->ctrl[usbhid->ctrltail].dir == USB_DIR_IN)
1536 +@@ -498,6 +496,8 @@ static void hid_ctrl(struct urb *urb)
1537 + hid_warn(urb->dev, "ctrl urb status %d received\n", status);
1538 + }
1539 +
1540 ++ spin_lock(&usbhid->lock);
1541 ++
1542 + if (unplug) {
1543 + usbhid->ctrltail = usbhid->ctrlhead;
1544 + } else {
1545 +diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c
1546 +index 7df97777662d..dad768caa9c5 100644
1547 +--- a/drivers/iommu/io-pgtable-arm.c
1548 ++++ b/drivers/iommu/io-pgtable-arm.c
1549 +@@ -405,17 +405,18 @@ static void __arm_lpae_free_pgtable(struct arm_lpae_io_pgtable *data, int lvl,
1550 + arm_lpae_iopte *start, *end;
1551 + unsigned long table_size;
1552 +
1553 +- /* Only leaf entries at the last level */
1554 +- if (lvl == ARM_LPAE_MAX_LEVELS - 1)
1555 +- return;
1556 +-
1557 + if (lvl == ARM_LPAE_START_LVL(data))
1558 + table_size = data->pgd_size;
1559 + else
1560 + table_size = 1UL << data->pg_shift;
1561 +
1562 + start = ptep;
1563 +- end = (void *)ptep + table_size;
1564 ++
1565 ++ /* Only leaf entries at the last level */
1566 ++ if (lvl == ARM_LPAE_MAX_LEVELS - 1)
1567 ++ end = ptep;
1568 ++ else
1569 ++ end = (void *)ptep + table_size;
1570 +
1571 + while (ptep != end) {
1572 + arm_lpae_iopte pte = *ptep++;
1573 +diff --git a/drivers/md/md.c b/drivers/md/md.c
1574 +index 61aacab424cf..b1e1f6b95782 100644
1575 +--- a/drivers/md/md.c
1576 ++++ b/drivers/md/md.c
1577 +@@ -2017,28 +2017,32 @@ int md_integrity_register(struct mddev *mddev)
1578 + }
1579 + EXPORT_SYMBOL(md_integrity_register);
1580 +
1581 +-/* Disable data integrity if non-capable/non-matching disk is being added */
1582 +-void md_integrity_add_rdev(struct md_rdev *rdev, struct mddev *mddev)
1583 ++/*
1584 ++ * Attempt to add an rdev, but only if it is consistent with the current
1585 ++ * integrity profile
1586 ++ */
1587 ++int md_integrity_add_rdev(struct md_rdev *rdev, struct mddev *mddev)
1588 + {
1589 + struct blk_integrity *bi_rdev;
1590 + struct blk_integrity *bi_mddev;
1591 ++ char name[BDEVNAME_SIZE];
1592 +
1593 + if (!mddev->gendisk)
1594 +- return;
1595 ++ return 0;
1596 +
1597 + bi_rdev = bdev_get_integrity(rdev->bdev);
1598 + bi_mddev = blk_get_integrity(mddev->gendisk);
1599 +
1600 + if (!bi_mddev) /* nothing to do */
1601 +- return;
1602 +- if (rdev->raid_disk < 0) /* skip spares */
1603 +- return;
1604 +- if (bi_rdev && blk_integrity_compare(mddev->gendisk,
1605 +- rdev->bdev->bd_disk) >= 0)
1606 +- return;
1607 +- WARN_ON_ONCE(!mddev->suspended);
1608 +- printk(KERN_NOTICE "disabling data integrity on %s\n", mdname(mddev));
1609 +- blk_integrity_unregister(mddev->gendisk);
1610 ++ return 0;
1611 ++
1612 ++ if (blk_integrity_compare(mddev->gendisk, rdev->bdev->bd_disk) != 0) {
1613 ++ printk(KERN_NOTICE "%s: incompatible integrity profile for %s\n",
1614 ++ mdname(mddev), bdevname(rdev->bdev, name));
1615 ++ return -ENXIO;
1616 ++ }
1617 ++
1618 ++ return 0;
1619 + }
1620 + EXPORT_SYMBOL(md_integrity_add_rdev);
1621 +
1622 +diff --git a/drivers/md/md.h b/drivers/md/md.h
1623 +index ca0b643fe3c1..dfa57b41541b 100644
1624 +--- a/drivers/md/md.h
1625 ++++ b/drivers/md/md.h
1626 +@@ -657,7 +657,7 @@ extern void md_wait_for_blocked_rdev(struct md_rdev *rdev, struct mddev *mddev);
1627 + extern void md_set_array_sectors(struct mddev *mddev, sector_t array_sectors);
1628 + extern int md_check_no_bitmap(struct mddev *mddev);
1629 + extern int md_integrity_register(struct mddev *mddev);
1630 +-extern void md_integrity_add_rdev(struct md_rdev *rdev, struct mddev *mddev);
1631 ++extern int md_integrity_add_rdev(struct md_rdev *rdev, struct mddev *mddev);
1632 + extern int strict_strtoul_scaled(const char *cp, unsigned long *res, int scale);
1633 +
1634 + extern void mddev_init(struct mddev *mddev);
1635 +diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c
1636 +index 7331a80d89f1..0a72ab6e6c20 100644
1637 +--- a/drivers/md/multipath.c
1638 ++++ b/drivers/md/multipath.c
1639 +@@ -257,6 +257,9 @@ static int multipath_add_disk(struct mddev *mddev, struct md_rdev *rdev)
1640 + disk_stack_limits(mddev->gendisk, rdev->bdev,
1641 + rdev->data_offset << 9);
1642 +
1643 ++ err = md_integrity_add_rdev(rdev, mddev);
1644 ++ if (err)
1645 ++ break;
1646 + spin_lock_irq(&conf->device_lock);
1647 + mddev->degraded--;
1648 + rdev->raid_disk = path;
1649 +@@ -264,9 +267,6 @@ static int multipath_add_disk(struct mddev *mddev, struct md_rdev *rdev)
1650 + spin_unlock_irq(&conf->device_lock);
1651 + rcu_assign_pointer(p->rdev, rdev);
1652 + err = 0;
1653 +- mddev_suspend(mddev);
1654 +- md_integrity_add_rdev(rdev, mddev);
1655 +- mddev_resume(mddev);
1656 + break;
1657 + }
1658 +
1659 +diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
1660 +index e2169ff6e0f0..c4b913409226 100644
1661 +--- a/drivers/md/raid1.c
1662 ++++ b/drivers/md/raid1.c
1663 +@@ -1589,6 +1589,9 @@ static int raid1_add_disk(struct mddev *mddev, struct md_rdev *rdev)
1664 + if (mddev->recovery_disabled == conf->recovery_disabled)
1665 + return -EBUSY;
1666 +
1667 ++ if (md_integrity_add_rdev(rdev, mddev))
1668 ++ return -ENXIO;
1669 ++
1670 + if (rdev->raid_disk >= 0)
1671 + first = last = rdev->raid_disk;
1672 +
1673 +@@ -1632,9 +1635,6 @@ static int raid1_add_disk(struct mddev *mddev, struct md_rdev *rdev)
1674 + break;
1675 + }
1676 + }
1677 +- mddev_suspend(mddev);
1678 +- md_integrity_add_rdev(rdev, mddev);
1679 +- mddev_resume(mddev);
1680 + if (mddev->queue && blk_queue_discard(bdev_get_queue(rdev->bdev)))
1681 + queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, mddev->queue);
1682 + print_conf(conf);
1683 +diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
1684 +index 84e597e1c489..ce959b4ae4df 100644
1685 +--- a/drivers/md/raid10.c
1686 ++++ b/drivers/md/raid10.c
1687 +@@ -1698,6 +1698,9 @@ static int raid10_add_disk(struct mddev *mddev, struct md_rdev *rdev)
1688 + if (rdev->saved_raid_disk < 0 && !_enough(conf, 1, -1))
1689 + return -EINVAL;
1690 +
1691 ++ if (md_integrity_add_rdev(rdev, mddev))
1692 ++ return -ENXIO;
1693 ++
1694 + if (rdev->raid_disk >= 0)
1695 + first = last = rdev->raid_disk;
1696 +
1697 +@@ -1739,9 +1742,6 @@ static int raid10_add_disk(struct mddev *mddev, struct md_rdev *rdev)
1698 + rcu_assign_pointer(p->rdev, rdev);
1699 + break;
1700 + }
1701 +- mddev_suspend(mddev);
1702 +- md_integrity_add_rdev(rdev, mddev);
1703 +- mddev_resume(mddev);
1704 + if (mddev->queue && blk_queue_discard(bdev_get_queue(rdev->bdev)))
1705 + queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, mddev->queue);
1706 +
1707 +diff --git a/drivers/media/i2c/ir-kbd-i2c.c b/drivers/media/i2c/ir-kbd-i2c.c
1708 +index 728d2cc8a3e7..175a76114953 100644
1709 +--- a/drivers/media/i2c/ir-kbd-i2c.c
1710 ++++ b/drivers/media/i2c/ir-kbd-i2c.c
1711 +@@ -478,7 +478,6 @@ static const struct i2c_device_id ir_kbd_id[] = {
1712 + { "ir_rx_z8f0811_hdpvr", 0 },
1713 + { }
1714 + };
1715 +-MODULE_DEVICE_TABLE(i2c, ir_kbd_id);
1716 +
1717 + static struct i2c_driver ir_kbd_driver = {
1718 + .driver = {
1719 +diff --git a/drivers/media/pci/saa7134/saa7134-alsa.c b/drivers/media/pci/saa7134/saa7134-alsa.c
1720 +index 1d2c310ce838..94f816244407 100644
1721 +--- a/drivers/media/pci/saa7134/saa7134-alsa.c
1722 ++++ b/drivers/media/pci/saa7134/saa7134-alsa.c
1723 +@@ -1211,6 +1211,8 @@ static int alsa_device_init(struct saa7134_dev *dev)
1724 +
1725 + static int alsa_device_exit(struct saa7134_dev *dev)
1726 + {
1727 ++ if (!snd_saa7134_cards[dev->nr])
1728 ++ return 1;
1729 +
1730 + snd_card_free(snd_saa7134_cards[dev->nr]);
1731 + snd_saa7134_cards[dev->nr] = NULL;
1732 +@@ -1260,7 +1262,8 @@ static void saa7134_alsa_exit(void)
1733 + int idx;
1734 +
1735 + for (idx = 0; idx < SNDRV_CARDS; idx++) {
1736 +- snd_card_free(snd_saa7134_cards[idx]);
1737 ++ if (snd_saa7134_cards[idx])
1738 ++ snd_card_free(snd_saa7134_cards[idx]);
1739 + }
1740 +
1741 + saa7134_dmasound_init = NULL;
1742 +diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
1743 +index ece544efccc3..3ff583f165cd 100644
1744 +--- a/drivers/mtd/nand/nand_base.c
1745 ++++ b/drivers/mtd/nand/nand_base.c
1746 +@@ -3995,6 +3995,9 @@ int nand_scan_ident(struct mtd_info *mtd, int maxchips,
1747 + return ret;
1748 + }
1749 +
1750 ++ if (!mtd->name && mtd->dev.parent)
1751 ++ mtd->name = dev_name(mtd->dev.parent);
1752 ++
1753 + /* Set the default functions */
1754 + nand_set_defaults(chip, chip->options & NAND_BUSWIDTH_16);
1755 +
1756 +diff --git a/drivers/net/wireless/realtek/rtlwifi/regd.c b/drivers/net/wireless/realtek/rtlwifi/regd.c
1757 +index a62bf0a65c32..5be34118e0af 100644
1758 +--- a/drivers/net/wireless/realtek/rtlwifi/regd.c
1759 ++++ b/drivers/net/wireless/realtek/rtlwifi/regd.c
1760 +@@ -351,7 +351,6 @@ static const struct ieee80211_regdomain *_rtl_regdomain_select(
1761 + case COUNTRY_CODE_SPAIN:
1762 + case COUNTRY_CODE_FRANCE:
1763 + case COUNTRY_CODE_ISRAEL:
1764 +- case COUNTRY_CODE_WORLD_WIDE_13:
1765 + return &rtl_regdom_12_13;
1766 + case COUNTRY_CODE_MKK:
1767 + case COUNTRY_CODE_MKK1:
1768 +@@ -360,6 +359,7 @@ static const struct ieee80211_regdomain *_rtl_regdomain_select(
1769 + return &rtl_regdom_14_60_64;
1770 + case COUNTRY_CODE_GLOBAL_DOMAIN:
1771 + return &rtl_regdom_14;
1772 ++ case COUNTRY_CODE_WORLD_WIDE_13:
1773 + case COUNTRY_CODE_WORLD_WIDE_13_5G_ALL:
1774 + return &rtl_regdom_12_13_5g_all;
1775 + default:
1776 +diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c
1777 +index 142bdff4ed60..4159f9b14db6 100644
1778 +--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c
1779 ++++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c
1780 +@@ -95,8 +95,6 @@ int rtl8821ae_init_sw_vars(struct ieee80211_hw *hw)
1781 + struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
1782 +
1783 + rtl8821ae_bt_reg_init(hw);
1784 +- rtlpci->msi_support = rtlpriv->cfg->mod_params->msi_support;
1785 +- rtlpci->int_clear = rtlpriv->cfg->mod_params->int_clear;
1786 + rtlpriv->btcoexist.btc_ops = rtl_btc_get_ops_pointer();
1787 +
1788 + rtlpriv->dm.dm_initialgain_enable = 1;
1789 +@@ -168,12 +166,15 @@ int rtl8821ae_init_sw_vars(struct ieee80211_hw *hw)
1790 + rtlpriv->psc.swctrl_lps = rtlpriv->cfg->mod_params->swctrl_lps;
1791 + rtlpriv->psc.fwctrl_lps = rtlpriv->cfg->mod_params->fwctrl_lps;
1792 + rtlpci->msi_support = rtlpriv->cfg->mod_params->msi_support;
1793 +- rtlpci->msi_support = rtlpriv->cfg->mod_params->int_clear;
1794 ++ rtlpci->int_clear = rtlpriv->cfg->mod_params->int_clear;
1795 ++ rtlpriv->cfg->mod_params->sw_crypto =
1796 ++ rtlpriv->cfg->mod_params->sw_crypto;
1797 ++ rtlpriv->cfg->mod_params->disable_watchdog =
1798 ++ rtlpriv->cfg->mod_params->disable_watchdog;
1799 + if (rtlpriv->cfg->mod_params->disable_watchdog)
1800 + pr_info("watchdog disabled\n");
1801 + rtlpriv->psc.reg_fwctrl_lps = 3;
1802 + rtlpriv->psc.reg_max_lps_awakeintvl = 5;
1803 +- rtlpci->msi_support = rtlpriv->cfg->mod_params->msi_support;
1804 +
1805 + /* for ASPM, you can close aspm through
1806 + * set const_support_pciaspm = 0
1807 +diff --git a/drivers/net/wireless/ti/wlcore/io.h b/drivers/net/wireless/ti/wlcore/io.h
1808 +index 0305729d0986..10cf3747694d 100644
1809 +--- a/drivers/net/wireless/ti/wlcore/io.h
1810 ++++ b/drivers/net/wireless/ti/wlcore/io.h
1811 +@@ -207,19 +207,23 @@ static inline int __must_check wlcore_write_reg(struct wl1271 *wl, int reg,
1812 +
1813 + static inline void wl1271_power_off(struct wl1271 *wl)
1814 + {
1815 +- int ret;
1816 ++ int ret = 0;
1817 +
1818 + if (!test_bit(WL1271_FLAG_GPIO_POWER, &wl->flags))
1819 + return;
1820 +
1821 +- ret = wl->if_ops->power(wl->dev, false);
1822 ++ if (wl->if_ops->power)
1823 ++ ret = wl->if_ops->power(wl->dev, false);
1824 + if (!ret)
1825 + clear_bit(WL1271_FLAG_GPIO_POWER, &wl->flags);
1826 + }
1827 +
1828 + static inline int wl1271_power_on(struct wl1271 *wl)
1829 + {
1830 +- int ret = wl->if_ops->power(wl->dev, true);
1831 ++ int ret = 0;
1832 ++
1833 ++ if (wl->if_ops->power)
1834 ++ ret = wl->if_ops->power(wl->dev, true);
1835 + if (ret == 0)
1836 + set_bit(WL1271_FLAG_GPIO_POWER, &wl->flags);
1837 +
1838 +diff --git a/drivers/net/wireless/ti/wlcore/spi.c b/drivers/net/wireless/ti/wlcore/spi.c
1839 +index 236b41090827..44f059f7f34e 100644
1840 +--- a/drivers/net/wireless/ti/wlcore/spi.c
1841 ++++ b/drivers/net/wireless/ti/wlcore/spi.c
1842 +@@ -73,7 +73,10 @@
1843 + */
1844 + #define SPI_AGGR_BUFFER_SIZE (4 * PAGE_SIZE)
1845 +
1846 +-#define WSPI_MAX_NUM_OF_CHUNKS (SPI_AGGR_BUFFER_SIZE / WSPI_MAX_CHUNK_SIZE)
1847 ++/* Maximum number of SPI write chunks */
1848 ++#define WSPI_MAX_NUM_OF_CHUNKS \
1849 ++ ((SPI_AGGR_BUFFER_SIZE / WSPI_MAX_CHUNK_SIZE) + 1)
1850 ++
1851 +
1852 + struct wl12xx_spi_glue {
1853 + struct device *dev;
1854 +@@ -268,9 +271,10 @@ static int __must_check wl12xx_spi_raw_write(struct device *child, int addr,
1855 + void *buf, size_t len, bool fixed)
1856 + {
1857 + struct wl12xx_spi_glue *glue = dev_get_drvdata(child->parent);
1858 +- struct spi_transfer t[2 * (WSPI_MAX_NUM_OF_CHUNKS + 1)];
1859 ++ /* SPI write buffers - 2 for each chunk */
1860 ++ struct spi_transfer t[2 * WSPI_MAX_NUM_OF_CHUNKS];
1861 + struct spi_message m;
1862 +- u32 commands[WSPI_MAX_NUM_OF_CHUNKS];
1863 ++ u32 commands[WSPI_MAX_NUM_OF_CHUNKS]; /* 1 command per chunk */
1864 + u32 *cmd;
1865 + u32 chunk_len;
1866 + int i;
1867 +diff --git a/drivers/pci/bus.c b/drivers/pci/bus.c
1868 +index d3346d23963b..89b3befc7155 100644
1869 +--- a/drivers/pci/bus.c
1870 ++++ b/drivers/pci/bus.c
1871 +@@ -140,6 +140,8 @@ static int pci_bus_alloc_from_region(struct pci_bus *bus, struct resource *res,
1872 + type_mask |= IORESOURCE_TYPE_BITS;
1873 +
1874 + pci_bus_for_each_resource(bus, r, i) {
1875 ++ resource_size_t min_used = min;
1876 ++
1877 + if (!r)
1878 + continue;
1879 +
1880 +@@ -163,12 +165,12 @@ static int pci_bus_alloc_from_region(struct pci_bus *bus, struct resource *res,
1881 + * overrides "min".
1882 + */
1883 + if (avail.start)
1884 +- min = avail.start;
1885 ++ min_used = avail.start;
1886 +
1887 + max = avail.end;
1888 +
1889 + /* Ok, try it out.. */
1890 +- ret = allocate_resource(r, res, size, min, max,
1891 ++ ret = allocate_resource(r, res, size, min_used, max,
1892 + align, alignf, alignf_data);
1893 + if (ret == 0)
1894 + return 0;
1895 +diff --git a/drivers/pci/host/pci-dra7xx.c b/drivers/pci/host/pci-dra7xx.c
1896 +index 8c3688046c02..923607bdabc5 100644
1897 +--- a/drivers/pci/host/pci-dra7xx.c
1898 ++++ b/drivers/pci/host/pci-dra7xx.c
1899 +@@ -302,7 +302,8 @@ static int __init dra7xx_add_pcie_port(struct dra7xx_pcie *dra7xx,
1900 + }
1901 +
1902 + ret = devm_request_irq(&pdev->dev, pp->irq,
1903 +- dra7xx_pcie_msi_irq_handler, IRQF_SHARED,
1904 ++ dra7xx_pcie_msi_irq_handler,
1905 ++ IRQF_SHARED | IRQF_NO_THREAD,
1906 + "dra7-pcie-msi", pp);
1907 + if (ret) {
1908 + dev_err(&pdev->dev, "failed to request irq\n");
1909 +diff --git a/drivers/pci/host/pci-exynos.c b/drivers/pci/host/pci-exynos.c
1910 +index 01095e1160a4..d997d22d4231 100644
1911 +--- a/drivers/pci/host/pci-exynos.c
1912 ++++ b/drivers/pci/host/pci-exynos.c
1913 +@@ -522,7 +522,8 @@ static int __init exynos_add_pcie_port(struct pcie_port *pp,
1914 +
1915 + ret = devm_request_irq(&pdev->dev, pp->msi_irq,
1916 + exynos_pcie_msi_irq_handler,
1917 +- IRQF_SHARED, "exynos-pcie", pp);
1918 ++ IRQF_SHARED | IRQF_NO_THREAD,
1919 ++ "exynos-pcie", pp);
1920 + if (ret) {
1921 + dev_err(&pdev->dev, "failed to request msi irq\n");
1922 + return ret;
1923 +diff --git a/drivers/pci/host/pci-imx6.c b/drivers/pci/host/pci-imx6.c
1924 +index 22e8224126fd..9ce7cd148c86 100644
1925 +--- a/drivers/pci/host/pci-imx6.c
1926 ++++ b/drivers/pci/host/pci-imx6.c
1927 +@@ -537,7 +537,8 @@ static int __init imx6_add_pcie_port(struct pcie_port *pp,
1928 +
1929 + ret = devm_request_irq(&pdev->dev, pp->msi_irq,
1930 + imx6_pcie_msi_handler,
1931 +- IRQF_SHARED, "mx6-pcie-msi", pp);
1932 ++ IRQF_SHARED | IRQF_NO_THREAD,
1933 ++ "mx6-pcie-msi", pp);
1934 + if (ret) {
1935 + dev_err(&pdev->dev, "failed to request MSI irq\n");
1936 + return ret;
1937 +diff --git a/drivers/pci/host/pci-tegra.c b/drivers/pci/host/pci-tegra.c
1938 +index 3018ae52e092..30323114c53c 100644
1939 +--- a/drivers/pci/host/pci-tegra.c
1940 ++++ b/drivers/pci/host/pci-tegra.c
1941 +@@ -1288,7 +1288,7 @@ static int tegra_pcie_enable_msi(struct tegra_pcie *pcie)
1942 +
1943 + msi->irq = err;
1944 +
1945 +- err = request_irq(msi->irq, tegra_pcie_msi_irq, 0,
1946 ++ err = request_irq(msi->irq, tegra_pcie_msi_irq, IRQF_NO_THREAD,
1947 + tegra_msi_irq_chip.name, pcie);
1948 + if (err < 0) {
1949 + dev_err(&pdev->dev, "failed to request IRQ: %d\n", err);
1950 +diff --git a/drivers/pci/host/pcie-rcar.c b/drivers/pci/host/pcie-rcar.c
1951 +index f4fa6c537448..414c33686621 100644
1952 +--- a/drivers/pci/host/pcie-rcar.c
1953 ++++ b/drivers/pci/host/pcie-rcar.c
1954 +@@ -720,14 +720,16 @@ static int rcar_pcie_enable_msi(struct rcar_pcie *pcie)
1955 +
1956 + /* Two irqs are for MSI, but they are also used for non-MSI irqs */
1957 + err = devm_request_irq(&pdev->dev, msi->irq1, rcar_pcie_msi_irq,
1958 +- IRQF_SHARED, rcar_msi_irq_chip.name, pcie);
1959 ++ IRQF_SHARED | IRQF_NO_THREAD,
1960 ++ rcar_msi_irq_chip.name, pcie);
1961 + if (err < 0) {
1962 + dev_err(&pdev->dev, "failed to request IRQ: %d\n", err);
1963 + goto err;
1964 + }
1965 +
1966 + err = devm_request_irq(&pdev->dev, msi->irq2, rcar_pcie_msi_irq,
1967 +- IRQF_SHARED, rcar_msi_irq_chip.name, pcie);
1968 ++ IRQF_SHARED | IRQF_NO_THREAD,
1969 ++ rcar_msi_irq_chip.name, pcie);
1970 + if (err < 0) {
1971 + dev_err(&pdev->dev, "failed to request IRQ: %d\n", err);
1972 + goto err;
1973 +diff --git a/drivers/pci/host/pcie-spear13xx.c b/drivers/pci/host/pcie-spear13xx.c
1974 +index b95b7563c052..a6cd8233e8c0 100644
1975 +--- a/drivers/pci/host/pcie-spear13xx.c
1976 ++++ b/drivers/pci/host/pcie-spear13xx.c
1977 +@@ -279,7 +279,8 @@ static int spear13xx_add_pcie_port(struct pcie_port *pp,
1978 + return -ENODEV;
1979 + }
1980 + ret = devm_request_irq(dev, pp->irq, spear13xx_pcie_irq_handler,
1981 +- IRQF_SHARED, "spear1340-pcie", pp);
1982 ++ IRQF_SHARED | IRQF_NO_THREAD,
1983 ++ "spear1340-pcie", pp);
1984 + if (ret) {
1985 + dev_err(dev, "failed to request irq %d\n", pp->irq);
1986 + return ret;
1987 +diff --git a/drivers/pci/host/pcie-xilinx.c b/drivers/pci/host/pcie-xilinx.c
1988 +index 3c7a0d580b1e..4cfa46360d12 100644
1989 +--- a/drivers/pci/host/pcie-xilinx.c
1990 ++++ b/drivers/pci/host/pcie-xilinx.c
1991 +@@ -781,7 +781,8 @@ static int xilinx_pcie_parse_dt(struct xilinx_pcie_port *port)
1992 +
1993 + port->irq = irq_of_parse_and_map(node, 0);
1994 + err = devm_request_irq(dev, port->irq, xilinx_pcie_intr_handler,
1995 +- IRQF_SHARED, "xilinx-pcie", port);
1996 ++ IRQF_SHARED | IRQF_NO_THREAD,
1997 ++ "xilinx-pcie", port);
1998 + if (err) {
1999 + dev_err(dev, "unable to request irq %d\n", port->irq);
2000 + return err;
2001 +diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
2002 +index e49c2bce551d..cf000b331eed 100644
2003 +--- a/drivers/tty/n_tty.c
2004 ++++ b/drivers/tty/n_tty.c
2005 +@@ -258,16 +258,13 @@ static void n_tty_check_throttle(struct tty_struct *tty)
2006 +
2007 + static void n_tty_check_unthrottle(struct tty_struct *tty)
2008 + {
2009 +- if (tty->driver->type == TTY_DRIVER_TYPE_PTY &&
2010 +- tty->link->ldisc->ops->write_wakeup == n_tty_write_wakeup) {
2011 ++ if (tty->driver->type == TTY_DRIVER_TYPE_PTY) {
2012 + if (chars_in_buffer(tty) > TTY_THRESHOLD_UNTHROTTLE)
2013 + return;
2014 + if (!tty->count)
2015 + return;
2016 + n_tty_kick_worker(tty);
2017 +- n_tty_write_wakeup(tty->link);
2018 +- if (waitqueue_active(&tty->link->write_wait))
2019 +- wake_up_interruptible_poll(&tty->link->write_wait, POLLOUT);
2020 ++ tty_wakeup(tty->link);
2021 + return;
2022 + }
2023 +
2024 +diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
2025 +index bcc8e1e8bb72..7cef54334b12 100644
2026 +--- a/drivers/tty/tty_io.c
2027 ++++ b/drivers/tty/tty_io.c
2028 +@@ -1462,13 +1462,13 @@ static int tty_reopen(struct tty_struct *tty)
2029 + {
2030 + struct tty_driver *driver = tty->driver;
2031 +
2032 +- if (!tty->count)
2033 +- return -EIO;
2034 +-
2035 + if (driver->type == TTY_DRIVER_TYPE_PTY &&
2036 + driver->subtype == PTY_TYPE_MASTER)
2037 + return -EIO;
2038 +
2039 ++ if (!tty->count)
2040 ++ return -EAGAIN;
2041 ++
2042 + if (test_bit(TTY_EXCLUSIVE, &tty->flags) && !capable(CAP_SYS_ADMIN))
2043 + return -EBUSY;
2044 +
2045 +@@ -2069,7 +2069,12 @@ retry_open:
2046 +
2047 + if (tty) {
2048 + mutex_unlock(&tty_mutex);
2049 +- tty_lock(tty);
2050 ++ retval = tty_lock_interruptible(tty);
2051 ++ if (retval) {
2052 ++ if (retval == -EINTR)
2053 ++ retval = -ERESTARTSYS;
2054 ++ goto err_unref;
2055 ++ }
2056 + /* safe to drop the kref from tty_driver_lookup_tty() */
2057 + tty_kref_put(tty);
2058 + retval = tty_reopen(tty);
2059 +@@ -2087,7 +2092,11 @@ retry_open:
2060 +
2061 + if (IS_ERR(tty)) {
2062 + retval = PTR_ERR(tty);
2063 +- goto err_file;
2064 ++ if (retval != -EAGAIN || signal_pending(current))
2065 ++ goto err_file;
2066 ++ tty_free_file(filp);
2067 ++ schedule();
2068 ++ goto retry_open;
2069 + }
2070 +
2071 + tty_add_file(tty, filp);
2072 +@@ -2156,6 +2165,7 @@ retry_open:
2073 + return 0;
2074 + err_unlock:
2075 + mutex_unlock(&tty_mutex);
2076 ++err_unref:
2077 + /* after locks to avoid deadlock */
2078 + if (!IS_ERR_OR_NULL(driver))
2079 + tty_driver_kref_put(driver);
2080 +@@ -2653,6 +2663,28 @@ static int tiocsetd(struct tty_struct *tty, int __user *p)
2081 + }
2082 +
2083 + /**
2084 ++ * tiocgetd - get line discipline
2085 ++ * @tty: tty device
2086 ++ * @p: pointer to user data
2087 ++ *
2088 ++ * Retrieves the line discipline id directly from the ldisc.
2089 ++ *
2090 ++ * Locking: waits for ldisc reference (in case the line discipline
2091 ++ * is changing or the tty is being hungup)
2092 ++ */
2093 ++
2094 ++static int tiocgetd(struct tty_struct *tty, int __user *p)
2095 ++{
2096 ++ struct tty_ldisc *ld;
2097 ++ int ret;
2098 ++
2099 ++ ld = tty_ldisc_ref_wait(tty);
2100 ++ ret = put_user(ld->ops->num, p);
2101 ++ tty_ldisc_deref(ld);
2102 ++ return ret;
2103 ++}
2104 ++
2105 ++/**
2106 + * send_break - performed time break
2107 + * @tty: device to break on
2108 + * @duration: timeout in mS
2109 +@@ -2878,7 +2910,7 @@ long tty_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
2110 + case TIOCGSID:
2111 + return tiocgsid(tty, real_tty, p);
2112 + case TIOCGETD:
2113 +- return put_user(tty->ldisc->ops->num, (int __user *)p);
2114 ++ return tiocgetd(tty, p);
2115 + case TIOCSETD:
2116 + return tiocsetd(tty, p);
2117 + case TIOCVHANGUP:
2118 +diff --git a/drivers/tty/tty_mutex.c b/drivers/tty/tty_mutex.c
2119 +index 0efcf713b756..d09293bc0e04 100644
2120 +--- a/drivers/tty/tty_mutex.c
2121 ++++ b/drivers/tty/tty_mutex.c
2122 +@@ -22,6 +22,14 @@ void __lockfunc tty_lock(struct tty_struct *tty)
2123 + }
2124 + EXPORT_SYMBOL(tty_lock);
2125 +
2126 ++int tty_lock_interruptible(struct tty_struct *tty)
2127 ++{
2128 ++ if (WARN(tty->magic != TTY_MAGIC, "L Bad %p\n", tty))
2129 ++ return -EIO;
2130 ++ tty_kref_get(tty);
2131 ++ return mutex_lock_interruptible(&tty->legacy_mutex);
2132 ++}
2133 ++
2134 + void __lockfunc tty_unlock(struct tty_struct *tty)
2135 + {
2136 + if (tty->magic != TTY_MAGIC) {
2137 +diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
2138 +index 26ca4f910cb0..e4c70dce3e7c 100644
2139 +--- a/drivers/usb/class/cdc-acm.c
2140 ++++ b/drivers/usb/class/cdc-acm.c
2141 +@@ -428,7 +428,8 @@ static void acm_read_bulk_callback(struct urb *urb)
2142 + set_bit(rb->index, &acm->read_urbs_free);
2143 + dev_dbg(&acm->data->dev, "%s - non-zero urb status: %d\n",
2144 + __func__, status);
2145 +- return;
2146 ++ if ((status != -ENOENT) || (urb->actual_length == 0))
2147 ++ return;
2148 + }
2149 +
2150 + usb_mark_last_busy(acm->dev);
2151 +@@ -1404,6 +1405,8 @@ made_compressed_probe:
2152 + usb_sndbulkpipe(usb_dev, epwrite->bEndpointAddress),
2153 + NULL, acm->writesize, acm_write_bulk, snd);
2154 + snd->urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
2155 ++ if (quirks & SEND_ZERO_PACKET)
2156 ++ snd->urb->transfer_flags |= URB_ZERO_PACKET;
2157 + snd->instance = acm;
2158 + }
2159 +
2160 +@@ -1861,6 +1864,10 @@ static const struct usb_device_id acm_ids[] = {
2161 + { USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ACM,
2162 + USB_CDC_ACM_PROTO_AT_CDMA) },
2163 +
2164 ++ { USB_DEVICE(0x1519, 0x0452), /* Intel 7260 modem */
2165 ++ .driver_info = SEND_ZERO_PACKET,
2166 ++ },
2167 ++
2168 + { }
2169 + };
2170 +
2171 +diff --git a/drivers/usb/class/cdc-acm.h b/drivers/usb/class/cdc-acm.h
2172 +index dd9af38e7cda..ccfaba9ab4e4 100644
2173 +--- a/drivers/usb/class/cdc-acm.h
2174 ++++ b/drivers/usb/class/cdc-acm.h
2175 +@@ -134,3 +134,4 @@ struct acm {
2176 + #define IGNORE_DEVICE BIT(5)
2177 + #define QUIRK_CONTROL_LINE_STATE BIT(6)
2178 + #define CLEAR_HALT_CONDITIONS BIT(7)
2179 ++#define SEND_ZERO_PACKET BIT(8)
2180 +diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
2181 +index 8683436788c3..1560f3f3e756 100644
2182 +--- a/drivers/usb/core/hub.c
2183 ++++ b/drivers/usb/core/hub.c
2184 +@@ -5386,7 +5386,6 @@ static int usb_reset_and_verify_device(struct usb_device *udev)
2185 + }
2186 +
2187 + bos = udev->bos;
2188 +- udev->bos = NULL;
2189 +
2190 + for (i = 0; i < SET_CONFIG_TRIES; ++i) {
2191 +
2192 +@@ -5479,8 +5478,11 @@ done:
2193 + usb_set_usb2_hardware_lpm(udev, 1);
2194 + usb_unlocked_enable_lpm(udev);
2195 + usb_enable_ltm(udev);
2196 +- usb_release_bos_descriptor(udev);
2197 +- udev->bos = bos;
2198 ++ /* release the new BOS descriptor allocated by hub_port_init() */
2199 ++ if (udev->bos != bos) {
2200 ++ usb_release_bos_descriptor(udev);
2201 ++ udev->bos = bos;
2202 ++ }
2203 + return 0;
2204 +
2205 + re_enumerate:
2206 +diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
2207 +index c62109091d12..c2d65206ec6c 100644
2208 +--- a/drivers/usb/host/xhci-pci.c
2209 ++++ b/drivers/usb/host/xhci-pci.c
2210 +@@ -28,7 +28,9 @@
2211 + #include "xhci.h"
2212 + #include "xhci-trace.h"
2213 +
2214 +-#define PORT2_SSIC_CONFIG_REG2 0x883c
2215 ++#define SSIC_PORT_NUM 2
2216 ++#define SSIC_PORT_CFG2 0x880c
2217 ++#define SSIC_PORT_CFG2_OFFSET 0x30
2218 + #define PROG_DONE (1 << 30)
2219 + #define SSIC_PORT_UNUSED (1 << 31)
2220 +
2221 +@@ -45,6 +47,7 @@
2222 + #define PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI 0x22b5
2223 + #define PCI_DEVICE_ID_INTEL_SUNRISEPOINT_H_XHCI 0xa12f
2224 + #define PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_XHCI 0x9d2f
2225 ++#define PCI_DEVICE_ID_INTEL_BROXTON_M_XHCI 0x0aa8
2226 +
2227 + static const char hcd_name[] = "xhci_hcd";
2228 +
2229 +@@ -152,7 +155,8 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
2230 + if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
2231 + (pdev->device == PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_XHCI ||
2232 + pdev->device == PCI_DEVICE_ID_INTEL_SUNRISEPOINT_H_XHCI ||
2233 +- pdev->device == PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI)) {
2234 ++ pdev->device == PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI ||
2235 ++ pdev->device == PCI_DEVICE_ID_INTEL_BROXTON_M_XHCI)) {
2236 + xhci->quirks |= XHCI_PME_STUCK_QUIRK;
2237 + }
2238 + if (pdev->vendor == PCI_VENDOR_ID_ETRON &&
2239 +@@ -322,28 +326,36 @@ static void xhci_pme_quirk(struct usb_hcd *hcd, bool suspend)
2240 + struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
2241 + u32 val;
2242 + void __iomem *reg;
2243 ++ int i;
2244 +
2245 + if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
2246 + pdev->device == PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI) {
2247 +
2248 +- reg = (void __iomem *) xhci->cap_regs + PORT2_SSIC_CONFIG_REG2;
2249 +-
2250 +- /* Notify SSIC that SSIC profile programming is not done */
2251 +- val = readl(reg) & ~PROG_DONE;
2252 +- writel(val, reg);
2253 +-
2254 +- /* Mark SSIC port as unused(suspend) or used(resume) */
2255 +- val = readl(reg);
2256 +- if (suspend)
2257 +- val |= SSIC_PORT_UNUSED;
2258 +- else
2259 +- val &= ~SSIC_PORT_UNUSED;
2260 +- writel(val, reg);
2261 +-
2262 +- /* Notify SSIC that SSIC profile programming is done */
2263 +- val = readl(reg) | PROG_DONE;
2264 +- writel(val, reg);
2265 +- readl(reg);
2266 ++ for (i = 0; i < SSIC_PORT_NUM; i++) {
2267 ++ reg = (void __iomem *) xhci->cap_regs +
2268 ++ SSIC_PORT_CFG2 +
2269 ++ i * SSIC_PORT_CFG2_OFFSET;
2270 ++
2271 ++ /*
2272 ++ * Notify SSIC that SSIC profile programming
2273 ++ * is not done.
2274 ++ */
2275 ++ val = readl(reg) & ~PROG_DONE;
2276 ++ writel(val, reg);
2277 ++
2278 ++ /* Mark SSIC port as unused(suspend) or used(resume) */
2279 ++ val = readl(reg);
2280 ++ if (suspend)
2281 ++ val |= SSIC_PORT_UNUSED;
2282 ++ else
2283 ++ val &= ~SSIC_PORT_UNUSED;
2284 ++ writel(val, reg);
2285 ++
2286 ++ /* Notify SSIC that SSIC profile programming is done */
2287 ++ val = readl(reg) | PROG_DONE;
2288 ++ writel(val, reg);
2289 ++ readl(reg);
2290 ++ }
2291 + }
2292 +
2293 + reg = (void __iomem *) xhci->cap_regs + 0x80a4;
2294 +diff --git a/drivers/usb/phy/phy-msm-usb.c b/drivers/usb/phy/phy-msm-usb.c
2295 +index 0d19a6d61a71..970a30e155cb 100644
2296 +--- a/drivers/usb/phy/phy-msm-usb.c
2297 ++++ b/drivers/usb/phy/phy-msm-usb.c
2298 +@@ -1599,6 +1599,8 @@ static int msm_otg_read_dt(struct platform_device *pdev, struct msm_otg *motg)
2299 + &motg->id.nb);
2300 + if (ret < 0) {
2301 + dev_err(&pdev->dev, "register ID notifier failed\n");
2302 ++ extcon_unregister_notifier(motg->vbus.extcon,
2303 ++ EXTCON_USB, &motg->vbus.nb);
2304 + return ret;
2305 + }
2306 +
2307 +@@ -1660,15 +1662,6 @@ static int msm_otg_probe(struct platform_device *pdev)
2308 + if (!motg)
2309 + return -ENOMEM;
2310 +
2311 +- pdata = dev_get_platdata(&pdev->dev);
2312 +- if (!pdata) {
2313 +- if (!np)
2314 +- return -ENXIO;
2315 +- ret = msm_otg_read_dt(pdev, motg);
2316 +- if (ret)
2317 +- return ret;
2318 +- }
2319 +-
2320 + motg->phy.otg = devm_kzalloc(&pdev->dev, sizeof(struct usb_otg),
2321 + GFP_KERNEL);
2322 + if (!motg->phy.otg)
2323 +@@ -1710,6 +1703,15 @@ static int msm_otg_probe(struct platform_device *pdev)
2324 + if (!motg->regs)
2325 + return -ENOMEM;
2326 +
2327 ++ pdata = dev_get_platdata(&pdev->dev);
2328 ++ if (!pdata) {
2329 ++ if (!np)
2330 ++ return -ENXIO;
2331 ++ ret = msm_otg_read_dt(pdev, motg);
2332 ++ if (ret)
2333 ++ return ret;
2334 ++ }
2335 ++
2336 + /*
2337 + * NOTE: The PHYs can be multiplexed between the chipidea controller
2338 + * and the dwc3 controller, using a single bit. It is important that
2339 +@@ -1717,8 +1719,10 @@ static int msm_otg_probe(struct platform_device *pdev)
2340 + */
2341 + if (motg->phy_number) {
2342 + phy_select = devm_ioremap_nocache(&pdev->dev, USB2_PHY_SEL, 4);
2343 +- if (!phy_select)
2344 +- return -ENOMEM;
2345 ++ if (!phy_select) {
2346 ++ ret = -ENOMEM;
2347 ++ goto unregister_extcon;
2348 ++ }
2349 + /* Enable second PHY with the OTG port */
2350 + writel(0x1, phy_select);
2351 + }
2352 +@@ -1728,7 +1732,8 @@ static int msm_otg_probe(struct platform_device *pdev)
2353 + motg->irq = platform_get_irq(pdev, 0);
2354 + if (motg->irq < 0) {
2355 + dev_err(&pdev->dev, "platform_get_irq failed\n");
2356 +- return motg->irq;
2357 ++ ret = motg->irq;
2358 ++ goto unregister_extcon;
2359 + }
2360 +
2361 + regs[0].supply = "vddcx";
2362 +@@ -1737,7 +1742,7 @@ static int msm_otg_probe(struct platform_device *pdev)
2363 +
2364 + ret = devm_regulator_bulk_get(motg->phy.dev, ARRAY_SIZE(regs), regs);
2365 + if (ret)
2366 +- return ret;
2367 ++ goto unregister_extcon;
2368 +
2369 + motg->vddcx = regs[0].consumer;
2370 + motg->v3p3 = regs[1].consumer;
2371 +@@ -1834,6 +1839,12 @@ disable_clks:
2372 + clk_disable_unprepare(motg->clk);
2373 + if (!IS_ERR(motg->core_clk))
2374 + clk_disable_unprepare(motg->core_clk);
2375 ++unregister_extcon:
2376 ++ extcon_unregister_notifier(motg->id.extcon,
2377 ++ EXTCON_USB_HOST, &motg->id.nb);
2378 ++ extcon_unregister_notifier(motg->vbus.extcon,
2379 ++ EXTCON_USB, &motg->vbus.nb);
2380 ++
2381 + return ret;
2382 + }
2383 +
2384 +diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
2385 +index 59b2126b21a3..1dd9919081f8 100644
2386 +--- a/drivers/usb/serial/cp210x.c
2387 ++++ b/drivers/usb/serial/cp210x.c
2388 +@@ -98,6 +98,7 @@ static const struct usb_device_id id_table[] = {
2389 + { USB_DEVICE(0x10C4, 0x81AC) }, /* MSD Dash Hawk */
2390 + { USB_DEVICE(0x10C4, 0x81AD) }, /* INSYS USB Modem */
2391 + { USB_DEVICE(0x10C4, 0x81C8) }, /* Lipowsky Industrie Elektronik GmbH, Baby-JTAG */
2392 ++ { USB_DEVICE(0x10C4, 0x81D7) }, /* IAI Corp. RCB-CV-USB USB to RS485 Adaptor */
2393 + { USB_DEVICE(0x10C4, 0x81E2) }, /* Lipowsky Industrie Elektronik GmbH, Baby-LIN */
2394 + { USB_DEVICE(0x10C4, 0x81E7) }, /* Aerocomm Radio */
2395 + { USB_DEVICE(0x10C4, 0x81E8) }, /* Zephyr Bioharness */
2396 +diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
2397 +index a5a0376bbd48..8c660ae401d8 100644
2398 +--- a/drivers/usb/serial/ftdi_sio.c
2399 ++++ b/drivers/usb/serial/ftdi_sio.c
2400 +@@ -824,6 +824,7 @@ static const struct usb_device_id id_table_combined[] = {
2401 + { USB_DEVICE(FTDI_VID, FTDI_TURTELIZER_PID),
2402 + .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
2403 + { USB_DEVICE(RATOC_VENDOR_ID, RATOC_PRODUCT_ID_USB60F) },
2404 ++ { USB_DEVICE(RATOC_VENDOR_ID, RATOC_PRODUCT_ID_SCU18) },
2405 + { USB_DEVICE(FTDI_VID, FTDI_REU_TINY_PID) },
2406 +
2407 + /* Papouch devices based on FTDI chip */
2408 +diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
2409 +index 67c6d4469730..a84df2513994 100644
2410 +--- a/drivers/usb/serial/ftdi_sio_ids.h
2411 ++++ b/drivers/usb/serial/ftdi_sio_ids.h
2412 +@@ -615,6 +615,7 @@
2413 + */
2414 + #define RATOC_VENDOR_ID 0x0584
2415 + #define RATOC_PRODUCT_ID_USB60F 0xb020
2416 ++#define RATOC_PRODUCT_ID_SCU18 0xb03a
2417 +
2418 + /*
2419 + * Infineon Technologies
2420 +diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
2421 +index f2280606b73c..db86e512e0fc 100644
2422 +--- a/drivers/usb/serial/option.c
2423 ++++ b/drivers/usb/serial/option.c
2424 +@@ -268,6 +268,8 @@ static void option_instat_callback(struct urb *urb);
2425 + #define TELIT_PRODUCT_CC864_SINGLE 0x1006
2426 + #define TELIT_PRODUCT_DE910_DUAL 0x1010
2427 + #define TELIT_PRODUCT_UE910_V2 0x1012
2428 ++#define TELIT_PRODUCT_LE922_USBCFG0 0x1042
2429 ++#define TELIT_PRODUCT_LE922_USBCFG3 0x1043
2430 + #define TELIT_PRODUCT_LE920 0x1200
2431 + #define TELIT_PRODUCT_LE910 0x1201
2432 +
2433 +@@ -615,6 +617,16 @@ static const struct option_blacklist_info telit_le920_blacklist = {
2434 + .reserved = BIT(1) | BIT(5),
2435 + };
2436 +
2437 ++static const struct option_blacklist_info telit_le922_blacklist_usbcfg0 = {
2438 ++ .sendsetup = BIT(2),
2439 ++ .reserved = BIT(0) | BIT(1) | BIT(3),
2440 ++};
2441 ++
2442 ++static const struct option_blacklist_info telit_le922_blacklist_usbcfg3 = {
2443 ++ .sendsetup = BIT(0),
2444 ++ .reserved = BIT(1) | BIT(2) | BIT(3),
2445 ++};
2446 ++
2447 + static const struct usb_device_id option_ids[] = {
2448 + { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_COLT) },
2449 + { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_RICOLA) },
2450 +@@ -1160,6 +1172,10 @@ static const struct usb_device_id option_ids[] = {
2451 + { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_CC864_SINGLE) },
2452 + { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_DE910_DUAL) },
2453 + { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_UE910_V2) },
2454 ++ { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG0),
2455 ++ .driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg0 },
2456 ++ { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG3),
2457 ++ .driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg3 },
2458 + { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE910),
2459 + .driver_info = (kernel_ulong_t)&telit_le910_blacklist },
2460 + { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920),
2461 +@@ -1679,7 +1695,7 @@ static const struct usb_device_id option_ids[] = {
2462 + { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_EU3_P) },
2463 + { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_PH8),
2464 + .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
2465 +- { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_AHXX) },
2466 ++ { USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_AHXX, 0xff) },
2467 + { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_PLXX),
2468 + .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
2469 + { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_HC28_MDM) },
2470 +diff --git a/drivers/usb/serial/visor.c b/drivers/usb/serial/visor.c
2471 +index 60afb39eb73c..337a0be89fcf 100644
2472 +--- a/drivers/usb/serial/visor.c
2473 ++++ b/drivers/usb/serial/visor.c
2474 +@@ -544,6 +544,11 @@ static int treo_attach(struct usb_serial *serial)
2475 + (serial->num_interrupt_in == 0))
2476 + return 0;
2477 +
2478 ++ if (serial->num_bulk_in < 2 || serial->num_interrupt_in < 2) {
2479 ++ dev_err(&serial->interface->dev, "missing endpoints\n");
2480 ++ return -ENODEV;
2481 ++ }
2482 ++
2483 + /*
2484 + * It appears that Treos and Kyoceras want to use the
2485 + * 1st bulk in endpoint to communicate with the 2nd bulk out endpoint,
2486 +@@ -597,8 +602,10 @@ static int clie_5_attach(struct usb_serial *serial)
2487 + */
2488 +
2489 + /* some sanity check */
2490 +- if (serial->num_ports < 2)
2491 +- return -1;
2492 ++ if (serial->num_bulk_out < 2) {
2493 ++ dev_err(&serial->interface->dev, "missing bulk out endpoints\n");
2494 ++ return -ENODEV;
2495 ++ }
2496 +
2497 + /* port 0 now uses the modified endpoint Address */
2498 + port = serial->port[0];
2499 +diff --git a/fs/ext4/crypto_key.c b/fs/ext4/crypto_key.c
2500 +index c5882b36e558..9a16d1e75a49 100644
2501 +--- a/fs/ext4/crypto_key.c
2502 ++++ b/fs/ext4/crypto_key.c
2503 +@@ -213,9 +213,11 @@ retry:
2504 + res = -ENOKEY;
2505 + goto out;
2506 + }
2507 ++ down_read(&keyring_key->sem);
2508 + ukp = user_key_payload(keyring_key);
2509 + if (ukp->datalen != sizeof(struct ext4_encryption_key)) {
2510 + res = -EINVAL;
2511 ++ up_read(&keyring_key->sem);
2512 + goto out;
2513 + }
2514 + master_key = (struct ext4_encryption_key *)ukp->data;
2515 +@@ -226,10 +228,12 @@ retry:
2516 + "ext4: key size incorrect: %d\n",
2517 + master_key->size);
2518 + res = -ENOKEY;
2519 ++ up_read(&keyring_key->sem);
2520 + goto out;
2521 + }
2522 + res = ext4_derive_key_aes(ctx.nonce, master_key->raw,
2523 + raw_key);
2524 ++ up_read(&keyring_key->sem);
2525 + if (res)
2526 + goto out;
2527 + got_key:
2528 +diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
2529 +index 89818036f035..343b0f1f15b1 100644
2530 +--- a/fs/nfs/nfs4proc.c
2531 ++++ b/fs/nfs/nfs4proc.c
2532 +@@ -8054,7 +8054,6 @@ static void nfs4_layoutreturn_release(void *calldata)
2533 + pnfs_set_layout_stateid(lo, &lrp->res.stateid, true);
2534 + pnfs_mark_matching_lsegs_invalid(lo, &freeme, &lrp->args.range);
2535 + pnfs_clear_layoutreturn_waitbit(lo);
2536 +- lo->plh_block_lgets--;
2537 + spin_unlock(&lo->plh_inode->i_lock);
2538 + pnfs_free_lseg_list(&freeme);
2539 + pnfs_put_layout_hdr(lrp->args.layout);
2540 +diff --git a/fs/ocfs2/dlm/dlmmaster.c b/fs/ocfs2/dlm/dlmmaster.c
2541 +index 84f2f8079466..4e2162b355db 100644
2542 +--- a/fs/ocfs2/dlm/dlmmaster.c
2543 ++++ b/fs/ocfs2/dlm/dlmmaster.c
2544 +@@ -2519,6 +2519,11 @@ static int dlm_migrate_lockres(struct dlm_ctxt *dlm,
2545 + spin_lock(&dlm->master_lock);
2546 + ret = dlm_add_migration_mle(dlm, res, mle, &oldmle, name,
2547 + namelen, target, dlm->node_num);
2548 ++ /* get an extra reference on the mle.
2549 ++ * otherwise the assert_master from the new
2550 ++ * master will destroy this.
2551 ++ */
2552 ++ dlm_get_mle_inuse(mle);
2553 + spin_unlock(&dlm->master_lock);
2554 + spin_unlock(&dlm->spinlock);
2555 +
2556 +@@ -2554,6 +2559,7 @@ fail:
2557 + if (mle_added) {
2558 + dlm_mle_detach_hb_events(dlm, mle);
2559 + dlm_put_mle(mle);
2560 ++ dlm_put_mle_inuse(mle);
2561 + } else if (mle) {
2562 + kmem_cache_free(dlm_mle_cache, mle);
2563 + mle = NULL;
2564 +@@ -2571,17 +2577,6 @@ fail:
2565 + * ensure that all assert_master work is flushed. */
2566 + flush_workqueue(dlm->dlm_worker);
2567 +
2568 +- /* get an extra reference on the mle.
2569 +- * otherwise the assert_master from the new
2570 +- * master will destroy this.
2571 +- * also, make sure that all callers of dlm_get_mle
2572 +- * take both dlm->spinlock and dlm->master_lock */
2573 +- spin_lock(&dlm->spinlock);
2574 +- spin_lock(&dlm->master_lock);
2575 +- dlm_get_mle_inuse(mle);
2576 +- spin_unlock(&dlm->master_lock);
2577 +- spin_unlock(&dlm->spinlock);
2578 +-
2579 + /* notify new node and send all lock state */
2580 + /* call send_one_lockres with migration flag.
2581 + * this serves as notice to the target node that a
2582 +@@ -3312,6 +3307,15 @@ top:
2583 + mle->new_master != dead_node)
2584 + continue;
2585 +
2586 ++ if (mle->new_master == dead_node && mle->inuse) {
2587 ++ mlog(ML_NOTICE, "%s: target %u died during "
2588 ++ "migration from %u, the MLE is "
2589 ++ "still keep used, ignore it!\n",
2590 ++ dlm->name, dead_node,
2591 ++ mle->master);
2592 ++ continue;
2593 ++ }
2594 ++
2595 + /* If we have reached this point, this mle needs to be
2596 + * removed from the list and freed. */
2597 + dlm_clean_migration_mle(dlm, mle);
2598 +diff --git a/fs/ocfs2/dlm/dlmrecovery.c b/fs/ocfs2/dlm/dlmrecovery.c
2599 +index 9e4f862d20fe..42f0cae93a0a 100644
2600 +--- a/fs/ocfs2/dlm/dlmrecovery.c
2601 ++++ b/fs/ocfs2/dlm/dlmrecovery.c
2602 +@@ -2360,6 +2360,8 @@ static void dlm_do_local_recovery_cleanup(struct dlm_ctxt *dlm, u8 dead_node)
2603 + break;
2604 + }
2605 + }
2606 ++ dlm_lockres_clear_refmap_bit(dlm, res,
2607 ++ dead_node);
2608 + spin_unlock(&res->spinlock);
2609 + continue;
2610 + }
2611 +diff --git a/fs/ocfs2/dlmglue.c b/fs/ocfs2/dlmglue.c
2612 +index 20276e340339..b002acf50203 100644
2613 +--- a/fs/ocfs2/dlmglue.c
2614 ++++ b/fs/ocfs2/dlmglue.c
2615 +@@ -1390,6 +1390,7 @@ static int __ocfs2_cluster_lock(struct ocfs2_super *osb,
2616 + unsigned int gen;
2617 + int noqueue_attempted = 0;
2618 + int dlm_locked = 0;
2619 ++ int kick_dc = 0;
2620 +
2621 + if (!(lockres->l_flags & OCFS2_LOCK_INITIALIZED)) {
2622 + mlog_errno(-EINVAL);
2623 +@@ -1524,7 +1525,12 @@ update_holders:
2624 + unlock:
2625 + lockres_clear_flags(lockres, OCFS2_LOCK_UPCONVERT_FINISHING);
2626 +
2627 ++ /* ocfs2_unblock_lock reques on seeing OCFS2_LOCK_UPCONVERT_FINISHING */
2628 ++ kick_dc = (lockres->l_flags & OCFS2_LOCK_BLOCKED);
2629 ++
2630 + spin_unlock_irqrestore(&lockres->l_lock, flags);
2631 ++ if (kick_dc)
2632 ++ ocfs2_wake_downconvert_thread(osb);
2633 + out:
2634 + /*
2635 + * This is helping work around a lock inversion between the page lock
2636 +diff --git a/include/crypto/hash.h b/include/crypto/hash.h
2637 +index 3d69c93d50e8..6361892ea737 100644
2638 +--- a/include/crypto/hash.h
2639 ++++ b/include/crypto/hash.h
2640 +@@ -204,6 +204,7 @@ struct crypto_ahash {
2641 + unsigned int keylen);
2642 +
2643 + unsigned int reqsize;
2644 ++ bool has_setkey;
2645 + struct crypto_tfm base;
2646 + };
2647 +
2648 +@@ -375,6 +376,11 @@ static inline void *ahash_request_ctx(struct ahash_request *req)
2649 + int crypto_ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
2650 + unsigned int keylen);
2651 +
2652 ++static inline bool crypto_ahash_has_setkey(struct crypto_ahash *tfm)
2653 ++{
2654 ++ return tfm->has_setkey;
2655 ++}
2656 ++
2657 + /**
2658 + * crypto_ahash_finup() - update and finalize message digest
2659 + * @req: reference to the ahash_request handle that holds all information
2660 +diff --git a/include/crypto/if_alg.h b/include/crypto/if_alg.h
2661 +index 018afb264ac2..a2bfd7843f18 100644
2662 +--- a/include/crypto/if_alg.h
2663 ++++ b/include/crypto/if_alg.h
2664 +@@ -30,6 +30,9 @@ struct alg_sock {
2665 +
2666 + struct sock *parent;
2667 +
2668 ++ unsigned int refcnt;
2669 ++ unsigned int nokey_refcnt;
2670 ++
2671 + const struct af_alg_type *type;
2672 + void *private;
2673 + };
2674 +@@ -50,9 +53,11 @@ struct af_alg_type {
2675 + void (*release)(void *private);
2676 + int (*setkey)(void *private, const u8 *key, unsigned int keylen);
2677 + int (*accept)(void *private, struct sock *sk);
2678 ++ int (*accept_nokey)(void *private, struct sock *sk);
2679 + int (*setauthsize)(void *private, unsigned int authsize);
2680 +
2681 + struct proto_ops *ops;
2682 ++ struct proto_ops *ops_nokey;
2683 + struct module *owner;
2684 + char name[14];
2685 + };
2686 +@@ -67,6 +72,7 @@ int af_alg_register_type(const struct af_alg_type *type);
2687 + int af_alg_unregister_type(const struct af_alg_type *type);
2688 +
2689 + int af_alg_release(struct socket *sock);
2690 ++void af_alg_release_parent(struct sock *sk);
2691 + int af_alg_accept(struct sock *sk, struct socket *newsock);
2692 +
2693 + int af_alg_make_sg(struct af_alg_sgl *sgl, struct iov_iter *iter, int len);
2694 +@@ -83,11 +89,6 @@ static inline struct alg_sock *alg_sk(struct sock *sk)
2695 + return (struct alg_sock *)sk;
2696 + }
2697 +
2698 +-static inline void af_alg_release_parent(struct sock *sk)
2699 +-{
2700 +- sock_put(alg_sk(sk)->parent);
2701 +-}
2702 +-
2703 + static inline void af_alg_init_completion(struct af_alg_completion *completion)
2704 + {
2705 + init_completion(&completion->completion);
2706 +diff --git a/include/crypto/skcipher.h b/include/crypto/skcipher.h
2707 +index d8dd41fb034f..fd8742a40ff3 100644
2708 +--- a/include/crypto/skcipher.h
2709 ++++ b/include/crypto/skcipher.h
2710 +@@ -61,6 +61,8 @@ struct crypto_skcipher {
2711 + unsigned int ivsize;
2712 + unsigned int reqsize;
2713 +
2714 ++ bool has_setkey;
2715 ++
2716 + struct crypto_tfm base;
2717 + };
2718 +
2719 +@@ -305,6 +307,11 @@ static inline int crypto_skcipher_setkey(struct crypto_skcipher *tfm,
2720 + return tfm->setkey(tfm, key, keylen);
2721 + }
2722 +
2723 ++static inline bool crypto_skcipher_has_setkey(struct crypto_skcipher *tfm)
2724 ++{
2725 ++ return tfm->has_setkey;
2726 ++}
2727 ++
2728 + /**
2729 + * crypto_skcipher_reqtfm() - obtain cipher handle from request
2730 + * @req: skcipher_request out of which the cipher handle is to be obtained
2731 +diff --git a/include/linux/console.h b/include/linux/console.h
2732 +index bd194343c346..ea731af2451e 100644
2733 +--- a/include/linux/console.h
2734 ++++ b/include/linux/console.h
2735 +@@ -150,6 +150,7 @@ extern int console_trylock(void);
2736 + extern void console_unlock(void);
2737 + extern void console_conditional_schedule(void);
2738 + extern void console_unblank(void);
2739 ++extern void console_flush_on_panic(void);
2740 + extern struct tty_driver *console_device(int *);
2741 + extern void console_stop(struct console *);
2742 + extern void console_start(struct console *);
2743 +diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h
2744 +index 76dd4f0da5ca..2ead22dd74a0 100644
2745 +--- a/include/linux/hrtimer.h
2746 ++++ b/include/linux/hrtimer.h
2747 +@@ -87,7 +87,8 @@ enum hrtimer_restart {
2748 + * @function: timer expiry callback function
2749 + * @base: pointer to the timer base (per cpu and per clock)
2750 + * @state: state information (See bit values above)
2751 +- * @start_pid: timer statistics field to store the pid of the task which
2752 ++ * @is_rel: Set if the timer was armed relative
2753 ++ * @start_pid: timer statistics field to store the pid of the task which
2754 + * started the timer
2755 + * @start_site: timer statistics field to store the site where the timer
2756 + * was started
2757 +@@ -101,7 +102,8 @@ struct hrtimer {
2758 + ktime_t _softexpires;
2759 + enum hrtimer_restart (*function)(struct hrtimer *);
2760 + struct hrtimer_clock_base *base;
2761 +- unsigned long state;
2762 ++ u8 state;
2763 ++ u8 is_rel;
2764 + #ifdef CONFIG_TIMER_STATS
2765 + int start_pid;
2766 + void *start_site;
2767 +@@ -321,6 +323,27 @@ static inline void clock_was_set_delayed(void) { }
2768 +
2769 + #endif
2770 +
2771 ++static inline ktime_t
2772 ++__hrtimer_expires_remaining_adjusted(const struct hrtimer *timer, ktime_t now)
2773 ++{
2774 ++ ktime_t rem = ktime_sub(timer->node.expires, now);
2775 ++
2776 ++ /*
2777 ++ * Adjust relative timers for the extra we added in
2778 ++ * hrtimer_start_range_ns() to prevent short timeouts.
2779 ++ */
2780 ++ if (IS_ENABLED(CONFIG_TIME_LOW_RES) && timer->is_rel)
2781 ++ rem.tv64 -= hrtimer_resolution;
2782 ++ return rem;
2783 ++}
2784 ++
2785 ++static inline ktime_t
2786 ++hrtimer_expires_remaining_adjusted(const struct hrtimer *timer)
2787 ++{
2788 ++ return __hrtimer_expires_remaining_adjusted(timer,
2789 ++ timer->base->get_time());
2790 ++}
2791 ++
2792 + extern void clock_was_set(void);
2793 + #ifdef CONFIG_TIMERFD
2794 + extern void timerfd_clock_was_set(void);
2795 +@@ -390,7 +413,12 @@ static inline void hrtimer_restart(struct hrtimer *timer)
2796 + }
2797 +
2798 + /* Query timers: */
2799 +-extern ktime_t hrtimer_get_remaining(const struct hrtimer *timer);
2800 ++extern ktime_t __hrtimer_get_remaining(const struct hrtimer *timer, bool adjust);
2801 ++
2802 ++static inline ktime_t hrtimer_get_remaining(const struct hrtimer *timer)
2803 ++{
2804 ++ return __hrtimer_get_remaining(timer, false);
2805 ++}
2806 +
2807 + extern u64 hrtimer_get_next_event(void);
2808 +
2809 +diff --git a/include/linux/tty.h b/include/linux/tty.h
2810 +index 5e31f1b99037..6b6e811f4575 100644
2811 +--- a/include/linux/tty.h
2812 ++++ b/include/linux/tty.h
2813 +@@ -654,6 +654,7 @@ extern long vt_compat_ioctl(struct tty_struct *tty,
2814 + /* tty_mutex.c */
2815 + /* functions for preparation of BKL removal */
2816 + extern void __lockfunc tty_lock(struct tty_struct *tty);
2817 ++extern int tty_lock_interruptible(struct tty_struct *tty);
2818 + extern void __lockfunc tty_unlock(struct tty_struct *tty);
2819 + extern void __lockfunc tty_lock_slave(struct tty_struct *tty);
2820 + extern void __lockfunc tty_unlock_slave(struct tty_struct *tty);
2821 +diff --git a/include/sound/rawmidi.h b/include/sound/rawmidi.h
2822 +index f6cbef78db62..3b91ad5d5115 100644
2823 +--- a/include/sound/rawmidi.h
2824 ++++ b/include/sound/rawmidi.h
2825 +@@ -167,6 +167,10 @@ int snd_rawmidi_transmit_peek(struct snd_rawmidi_substream *substream,
2826 + int snd_rawmidi_transmit_ack(struct snd_rawmidi_substream *substream, int count);
2827 + int snd_rawmidi_transmit(struct snd_rawmidi_substream *substream,
2828 + unsigned char *buffer, int count);
2829 ++int __snd_rawmidi_transmit_peek(struct snd_rawmidi_substream *substream,
2830 ++ unsigned char *buffer, int count);
2831 ++int __snd_rawmidi_transmit_ack(struct snd_rawmidi_substream *substream,
2832 ++ int count);
2833 +
2834 + /* main midi functions */
2835 +
2836 +diff --git a/kernel/panic.c b/kernel/panic.c
2837 +index 4b150bc0c6c1..41e2b54f36b5 100644
2838 +--- a/kernel/panic.c
2839 ++++ b/kernel/panic.c
2840 +@@ -157,8 +157,7 @@ void panic(const char *fmt, ...)
2841 + * panic() is not being callled from OOPS.
2842 + */
2843 + debug_locks_off();
2844 +- console_trylock();
2845 +- console_unlock();
2846 ++ console_flush_on_panic();
2847 +
2848 + if (!panic_blink)
2849 + panic_blink = no_blink;
2850 +diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
2851 +index 2ce8826f1053..c048e34b177f 100644
2852 +--- a/kernel/printk/printk.c
2853 ++++ b/kernel/printk/printk.c
2854 +@@ -2233,13 +2233,24 @@ void console_unlock(void)
2855 + static u64 seen_seq;
2856 + unsigned long flags;
2857 + bool wake_klogd = false;
2858 +- bool retry;
2859 ++ bool do_cond_resched, retry;
2860 +
2861 + if (console_suspended) {
2862 + up_console_sem();
2863 + return;
2864 + }
2865 +
2866 ++ /*
2867 ++ * Console drivers are called under logbuf_lock, so
2868 ++ * @console_may_schedule should be cleared before; however, we may
2869 ++ * end up dumping a lot of lines, for example, if called from
2870 ++ * console registration path, and should invoke cond_resched()
2871 ++ * between lines if allowable. Not doing so can cause a very long
2872 ++ * scheduling stall on a slow console leading to RCU stall and
2873 ++ * softlockup warnings which exacerbate the issue with more
2874 ++ * messages practically incapacitating the system.
2875 ++ */
2876 ++ do_cond_resched = console_may_schedule;
2877 + console_may_schedule = 0;
2878 +
2879 + /* flush buffered message fragment immediately to console */
2880 +@@ -2311,6 +2322,9 @@ skip:
2881 + call_console_drivers(level, ext_text, ext_len, text, len);
2882 + start_critical_timings();
2883 + local_irq_restore(flags);
2884 ++
2885 ++ if (do_cond_resched)
2886 ++ cond_resched();
2887 + }
2888 + console_locked = 0;
2889 +
2890 +@@ -2378,6 +2392,25 @@ void console_unblank(void)
2891 + console_unlock();
2892 + }
2893 +
2894 ++/**
2895 ++ * console_flush_on_panic - flush console content on panic
2896 ++ *
2897 ++ * Immediately output all pending messages no matter what.
2898 ++ */
2899 ++void console_flush_on_panic(void)
2900 ++{
2901 ++ /*
2902 ++ * If someone else is holding the console lock, trylock will fail
2903 ++ * and may_schedule may be set. Ignore and proceed to unlock so
2904 ++ * that messages are flushed out. As this can be called from any
2905 ++ * context and we don't want to get preempted while flushing,
2906 ++ * ensure may_schedule is cleared.
2907 ++ */
2908 ++ console_trylock();
2909 ++ console_may_schedule = 0;
2910 ++ console_unlock();
2911 ++}
2912 ++
2913 + /*
2914 + * Return the console tty driver structure and its associated index
2915 + */
2916 +diff --git a/kernel/sched/core.c b/kernel/sched/core.c
2917 +index 732e993b564b..eb70592f03f6 100644
2918 +--- a/kernel/sched/core.c
2919 ++++ b/kernel/sched/core.c
2920 +@@ -6738,7 +6738,7 @@ static void sched_init_numa(void)
2921 +
2922 + sched_domains_numa_masks[i][j] = mask;
2923 +
2924 +- for (k = 0; k < nr_node_ids; k++) {
2925 ++ for_each_node(k) {
2926 + if (node_distance(j, k) > sched_domains_numa_distance[i])
2927 + continue;
2928 +
2929 +diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c
2930 +index 435b8850dd80..fa909f9fd559 100644
2931 +--- a/kernel/time/hrtimer.c
2932 ++++ b/kernel/time/hrtimer.c
2933 +@@ -897,10 +897,10 @@ static int enqueue_hrtimer(struct hrtimer *timer,
2934 + */
2935 + static void __remove_hrtimer(struct hrtimer *timer,
2936 + struct hrtimer_clock_base *base,
2937 +- unsigned long newstate, int reprogram)
2938 ++ u8 newstate, int reprogram)
2939 + {
2940 + struct hrtimer_cpu_base *cpu_base = base->cpu_base;
2941 +- unsigned int state = timer->state;
2942 ++ u8 state = timer->state;
2943 +
2944 + timer->state = newstate;
2945 + if (!(state & HRTIMER_STATE_ENQUEUED))
2946 +@@ -930,7 +930,7 @@ static inline int
2947 + remove_hrtimer(struct hrtimer *timer, struct hrtimer_clock_base *base, bool restart)
2948 + {
2949 + if (hrtimer_is_queued(timer)) {
2950 +- unsigned long state = timer->state;
2951 ++ u8 state = timer->state;
2952 + int reprogram;
2953 +
2954 + /*
2955 +@@ -954,6 +954,22 @@ remove_hrtimer(struct hrtimer *timer, struct hrtimer_clock_base *base, bool rest
2956 + return 0;
2957 + }
2958 +
2959 ++static inline ktime_t hrtimer_update_lowres(struct hrtimer *timer, ktime_t tim,
2960 ++ const enum hrtimer_mode mode)
2961 ++{
2962 ++#ifdef CONFIG_TIME_LOW_RES
2963 ++ /*
2964 ++ * CONFIG_TIME_LOW_RES indicates that the system has no way to return
2965 ++ * granular time values. For relative timers we add hrtimer_resolution
2966 ++ * (i.e. one jiffie) to prevent short timeouts.
2967 ++ */
2968 ++ timer->is_rel = mode & HRTIMER_MODE_REL;
2969 ++ if (timer->is_rel)
2970 ++ tim = ktime_add_safe(tim, ktime_set(0, hrtimer_resolution));
2971 ++#endif
2972 ++ return tim;
2973 ++}
2974 ++
2975 + /**
2976 + * hrtimer_start_range_ns - (re)start an hrtimer on the current CPU
2977 + * @timer: the timer to be added
2978 +@@ -974,19 +990,10 @@ void hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
2979 + /* Remove an active timer from the queue: */
2980 + remove_hrtimer(timer, base, true);
2981 +
2982 +- if (mode & HRTIMER_MODE_REL) {
2983 ++ if (mode & HRTIMER_MODE_REL)
2984 + tim = ktime_add_safe(tim, base->get_time());
2985 +- /*
2986 +- * CONFIG_TIME_LOW_RES is a temporary way for architectures
2987 +- * to signal that they simply return xtime in
2988 +- * do_gettimeoffset(). In this case we want to round up by
2989 +- * resolution when starting a relative timer, to avoid short
2990 +- * timeouts. This will go away with the GTOD framework.
2991 +- */
2992 +-#ifdef CONFIG_TIME_LOW_RES
2993 +- tim = ktime_add_safe(tim, ktime_set(0, hrtimer_resolution));
2994 +-#endif
2995 +- }
2996 ++
2997 ++ tim = hrtimer_update_lowres(timer, tim, mode);
2998 +
2999 + hrtimer_set_expires_range_ns(timer, tim, delta_ns);
3000 +
3001 +@@ -1074,19 +1081,23 @@ EXPORT_SYMBOL_GPL(hrtimer_cancel);
3002 + /**
3003 + * hrtimer_get_remaining - get remaining time for the timer
3004 + * @timer: the timer to read
3005 ++ * @adjust: adjust relative timers when CONFIG_TIME_LOW_RES=y
3006 + */
3007 +-ktime_t hrtimer_get_remaining(const struct hrtimer *timer)
3008 ++ktime_t __hrtimer_get_remaining(const struct hrtimer *timer, bool adjust)
3009 + {
3010 + unsigned long flags;
3011 + ktime_t rem;
3012 +
3013 + lock_hrtimer_base(timer, &flags);
3014 +- rem = hrtimer_expires_remaining(timer);
3015 ++ if (IS_ENABLED(CONFIG_TIME_LOW_RES) && adjust)
3016 ++ rem = hrtimer_expires_remaining_adjusted(timer);
3017 ++ else
3018 ++ rem = hrtimer_expires_remaining(timer);
3019 + unlock_hrtimer_base(timer, &flags);
3020 +
3021 + return rem;
3022 + }
3023 +-EXPORT_SYMBOL_GPL(hrtimer_get_remaining);
3024 ++EXPORT_SYMBOL_GPL(__hrtimer_get_remaining);
3025 +
3026 + #ifdef CONFIG_NO_HZ_COMMON
3027 + /**
3028 +@@ -1220,6 +1231,14 @@ static void __run_hrtimer(struct hrtimer_cpu_base *cpu_base,
3029 + fn = timer->function;
3030 +
3031 + /*
3032 ++ * Clear the 'is relative' flag for the TIME_LOW_RES case. If the
3033 ++ * timer is restarted with a period then it becomes an absolute
3034 ++ * timer. If its not restarted it does not matter.
3035 ++ */
3036 ++ if (IS_ENABLED(CONFIG_TIME_LOW_RES))
3037 ++ timer->is_rel = false;
3038 ++
3039 ++ /*
3040 + * Because we run timers from hardirq context, there is no chance
3041 + * they get migrated to another cpu, therefore its safe to unlock
3042 + * the timer base.
3043 +diff --git a/kernel/time/timer_list.c b/kernel/time/timer_list.c
3044 +index f75e35b60149..ba7d8b288bb3 100644
3045 +--- a/kernel/time/timer_list.c
3046 ++++ b/kernel/time/timer_list.c
3047 +@@ -69,7 +69,7 @@ print_timer(struct seq_file *m, struct hrtimer *taddr, struct hrtimer *timer,
3048 + print_name_offset(m, taddr);
3049 + SEQ_printf(m, ", ");
3050 + print_name_offset(m, timer->function);
3051 +- SEQ_printf(m, ", S:%02lx", timer->state);
3052 ++ SEQ_printf(m, ", S:%02x", timer->state);
3053 + #ifdef CONFIG_TIMER_STATS
3054 + SEQ_printf(m, ", ");
3055 + print_name_offset(m, timer->start_site);
3056 +diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
3057 +index 87fb9801bd9e..d9293402ee68 100644
3058 +--- a/kernel/trace/trace.c
3059 ++++ b/kernel/trace/trace.c
3060 +@@ -1751,7 +1751,7 @@ void trace_buffer_unlock_commit_regs(struct trace_array *tr,
3061 + {
3062 + __buffer_unlock_commit(buffer, event);
3063 +
3064 +- ftrace_trace_stack(tr, buffer, flags, 6, pc, regs);
3065 ++ ftrace_trace_stack(tr, buffer, flags, 0, pc, regs);
3066 + ftrace_trace_userstack(buffer, flags, pc);
3067 + }
3068 + EXPORT_SYMBOL_GPL(trace_buffer_unlock_commit_regs);
3069 +diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c
3070 +index dda9e6742950..202df6cffcca 100644
3071 +--- a/kernel/trace/trace_stack.c
3072 ++++ b/kernel/trace/trace_stack.c
3073 +@@ -126,6 +126,13 @@ check_stack(unsigned long ip, unsigned long *stack)
3074 + }
3075 +
3076 + /*
3077 ++ * Some archs may not have the passed in ip in the dump.
3078 ++ * If that happens, we need to show everything.
3079 ++ */
3080 ++ if (i == stack_trace_max.nr_entries)
3081 ++ i = 0;
3082 ++
3083 ++ /*
3084 + * Now find where in the stack these are.
3085 + */
3086 + x = 0;
3087 +diff --git a/lib/libcrc32c.c b/lib/libcrc32c.c
3088 +index 6a08ce7d6adc..acf9da449f81 100644
3089 +--- a/lib/libcrc32c.c
3090 ++++ b/lib/libcrc32c.c
3091 +@@ -74,3 +74,4 @@ module_exit(libcrc32c_mod_fini);
3092 + MODULE_AUTHOR("Clay Haapala <chaapala@×××××.com>");
3093 + MODULE_DESCRIPTION("CRC32c (Castagnoli) calculations");
3094 + MODULE_LICENSE("GPL");
3095 ++MODULE_SOFTDEP("pre: crc32c");
3096 +diff --git a/mm/backing-dev.c b/mm/backing-dev.c
3097 +index 7340353f8aea..cbe6f0b96f29 100644
3098 +--- a/mm/backing-dev.c
3099 ++++ b/mm/backing-dev.c
3100 +@@ -989,7 +989,7 @@ long wait_iff_congested(struct zone *zone, int sync, long timeout)
3101 + * here rather than calling cond_resched().
3102 + */
3103 + if (current->flags & PF_WQ_WORKER)
3104 +- schedule_timeout(1);
3105 ++ schedule_timeout_uninterruptible(1);
3106 + else
3107 + cond_resched();
3108 +
3109 +diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
3110 +index 9f15bdd9163c..fc083996e40a 100644
3111 +--- a/mm/zsmalloc.c
3112 ++++ b/mm/zsmalloc.c
3113 +@@ -309,7 +309,12 @@ static void free_handle(struct zs_pool *pool, unsigned long handle)
3114 +
3115 + static void record_obj(unsigned long handle, unsigned long obj)
3116 + {
3117 +- *(unsigned long *)handle = obj;
3118 ++ /*
3119 ++ * lsb of @obj represents handle lock while other bits
3120 ++ * represent object value the handle is pointing so
3121 ++ * updating shouldn't do store tearing.
3122 ++ */
3123 ++ WRITE_ONCE(*(unsigned long *)handle, obj);
3124 + }
3125 +
3126 + /* zpool driver */
3127 +@@ -1635,6 +1640,13 @@ static int migrate_zspage(struct zs_pool *pool, struct size_class *class,
3128 + free_obj = obj_malloc(d_page, class, handle);
3129 + zs_object_copy(free_obj, used_obj, class);
3130 + index++;
3131 ++ /*
3132 ++ * record_obj updates handle's value to free_obj and it will
3133 ++ * invalidate lock bit(ie, HANDLE_PIN_BIT) of handle, which
3134 ++ * breaks synchronization using pin_tag(e,g, zs_free) so
3135 ++ * let's keep the lock bit.
3136 ++ */
3137 ++ free_obj |= BIT(HANDLE_PIN_BIT);
3138 + record_obj(handle, free_obj);
3139 + unpin_tag(handle);
3140 + obj_free(pool, class, used_obj);
3141 +diff --git a/security/integrity/evm/evm_main.c b/security/integrity/evm/evm_main.c
3142 +index 1334e02ae8f4..3d145a3ffccf 100644
3143 +--- a/security/integrity/evm/evm_main.c
3144 ++++ b/security/integrity/evm/evm_main.c
3145 +@@ -23,6 +23,7 @@
3146 + #include <linux/integrity.h>
3147 + #include <linux/evm.h>
3148 + #include <crypto/hash.h>
3149 ++#include <crypto/algapi.h>
3150 + #include "evm.h"
3151 +
3152 + int evm_initialized;
3153 +@@ -148,7 +149,7 @@ static enum integrity_status evm_verify_hmac(struct dentry *dentry,
3154 + xattr_value_len, calc.digest);
3155 + if (rc)
3156 + break;
3157 +- rc = memcmp(xattr_data->digest, calc.digest,
3158 ++ rc = crypto_memneq(xattr_data->digest, calc.digest,
3159 + sizeof(calc.digest));
3160 + if (rc)
3161 + rc = -EINVAL;
3162 +diff --git a/sound/core/compress_offload.c b/sound/core/compress_offload.c
3163 +index b123c42e7dc8..b554d7f9e3be 100644
3164 +--- a/sound/core/compress_offload.c
3165 ++++ b/sound/core/compress_offload.c
3166 +@@ -44,6 +44,13 @@
3167 + #include <sound/compress_offload.h>
3168 + #include <sound/compress_driver.h>
3169 +
3170 ++/* struct snd_compr_codec_caps overflows the ioctl bit size for some
3171 ++ * architectures, so we need to disable the relevant ioctls.
3172 ++ */
3173 ++#if _IOC_SIZEBITS < 14
3174 ++#define COMPR_CODEC_CAPS_OVERFLOW
3175 ++#endif
3176 ++
3177 + /* TODO:
3178 + * - add substream support for multiple devices in case of
3179 + * SND_DYNAMIC_MINORS is not used
3180 +@@ -438,6 +445,7 @@ out:
3181 + return retval;
3182 + }
3183 +
3184 ++#ifndef COMPR_CODEC_CAPS_OVERFLOW
3185 + static int
3186 + snd_compr_get_codec_caps(struct snd_compr_stream *stream, unsigned long arg)
3187 + {
3188 +@@ -461,6 +469,7 @@ out:
3189 + kfree(caps);
3190 + return retval;
3191 + }
3192 ++#endif /* !COMPR_CODEC_CAPS_OVERFLOW */
3193 +
3194 + /* revisit this with snd_pcm_preallocate_xxx */
3195 + static int snd_compr_allocate_buffer(struct snd_compr_stream *stream,
3196 +@@ -799,9 +808,11 @@ static long snd_compr_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
3197 + case _IOC_NR(SNDRV_COMPRESS_GET_CAPS):
3198 + retval = snd_compr_get_caps(stream, arg);
3199 + break;
3200 ++#ifndef COMPR_CODEC_CAPS_OVERFLOW
3201 + case _IOC_NR(SNDRV_COMPRESS_GET_CODEC_CAPS):
3202 + retval = snd_compr_get_codec_caps(stream, arg);
3203 + break;
3204 ++#endif
3205 + case _IOC_NR(SNDRV_COMPRESS_SET_PARAMS):
3206 + retval = snd_compr_set_params(stream, arg);
3207 + break;
3208 +diff --git a/sound/core/oss/pcm_oss.c b/sound/core/oss/pcm_oss.c
3209 +index 58550cc93f28..33e72c809e50 100644
3210 +--- a/sound/core/oss/pcm_oss.c
3211 ++++ b/sound/core/oss/pcm_oss.c
3212 +@@ -834,7 +834,8 @@ static int choose_rate(struct snd_pcm_substream *substream,
3213 + return snd_pcm_hw_param_near(substream, params, SNDRV_PCM_HW_PARAM_RATE, best_rate, NULL);
3214 + }
3215 +
3216 +-static int snd_pcm_oss_change_params(struct snd_pcm_substream *substream)
3217 ++static int snd_pcm_oss_change_params(struct snd_pcm_substream *substream,
3218 ++ bool trylock)
3219 + {
3220 + struct snd_pcm_runtime *runtime = substream->runtime;
3221 + struct snd_pcm_hw_params *params, *sparams;
3222 +@@ -848,7 +849,10 @@ static int snd_pcm_oss_change_params(struct snd_pcm_substream *substream)
3223 + struct snd_mask sformat_mask;
3224 + struct snd_mask mask;
3225 +
3226 +- if (mutex_lock_interruptible(&runtime->oss.params_lock))
3227 ++ if (trylock) {
3228 ++ if (!(mutex_trylock(&runtime->oss.params_lock)))
3229 ++ return -EAGAIN;
3230 ++ } else if (mutex_lock_interruptible(&runtime->oss.params_lock))
3231 + return -EINTR;
3232 + sw_params = kmalloc(sizeof(*sw_params), GFP_KERNEL);
3233 + params = kmalloc(sizeof(*params), GFP_KERNEL);
3234 +@@ -1092,7 +1096,7 @@ static int snd_pcm_oss_get_active_substream(struct snd_pcm_oss_file *pcm_oss_fil
3235 + if (asubstream == NULL)
3236 + asubstream = substream;
3237 + if (substream->runtime->oss.params) {
3238 +- err = snd_pcm_oss_change_params(substream);
3239 ++ err = snd_pcm_oss_change_params(substream, false);
3240 + if (err < 0)
3241 + return err;
3242 + }
3243 +@@ -1132,7 +1136,7 @@ static int snd_pcm_oss_make_ready(struct snd_pcm_substream *substream)
3244 + return 0;
3245 + runtime = substream->runtime;
3246 + if (runtime->oss.params) {
3247 +- err = snd_pcm_oss_change_params(substream);
3248 ++ err = snd_pcm_oss_change_params(substream, false);
3249 + if (err < 0)
3250 + return err;
3251 + }
3252 +@@ -2163,7 +2167,7 @@ static int snd_pcm_oss_get_space(struct snd_pcm_oss_file *pcm_oss_file, int stre
3253 + runtime = substream->runtime;
3254 +
3255 + if (runtime->oss.params &&
3256 +- (err = snd_pcm_oss_change_params(substream)) < 0)
3257 ++ (err = snd_pcm_oss_change_params(substream, false)) < 0)
3258 + return err;
3259 +
3260 + info.fragsize = runtime->oss.period_bytes;
3261 +@@ -2800,7 +2804,12 @@ static int snd_pcm_oss_mmap(struct file *file, struct vm_area_struct *area)
3262 + return -EIO;
3263 +
3264 + if (runtime->oss.params) {
3265 +- if ((err = snd_pcm_oss_change_params(substream)) < 0)
3266 ++ /* use mutex_trylock() for params_lock for avoiding a deadlock
3267 ++ * between mmap_sem and params_lock taken by
3268 ++ * copy_from/to_user() in snd_pcm_oss_write/read()
3269 ++ */
3270 ++ err = snd_pcm_oss_change_params(substream, true);
3271 ++ if (err < 0)
3272 + return err;
3273 + }
3274 + #ifdef CONFIG_SND_PCM_OSS_PLUGINS
3275 +diff --git a/sound/core/rawmidi.c b/sound/core/rawmidi.c
3276 +index a7759846fbaa..795437b10082 100644
3277 +--- a/sound/core/rawmidi.c
3278 ++++ b/sound/core/rawmidi.c
3279 +@@ -942,31 +942,36 @@ static long snd_rawmidi_kernel_read1(struct snd_rawmidi_substream *substream,
3280 + unsigned long flags;
3281 + long result = 0, count1;
3282 + struct snd_rawmidi_runtime *runtime = substream->runtime;
3283 ++ unsigned long appl_ptr;
3284 +
3285 ++ spin_lock_irqsave(&runtime->lock, flags);
3286 + while (count > 0 && runtime->avail) {
3287 + count1 = runtime->buffer_size - runtime->appl_ptr;
3288 + if (count1 > count)
3289 + count1 = count;
3290 +- spin_lock_irqsave(&runtime->lock, flags);
3291 + if (count1 > (int)runtime->avail)
3292 + count1 = runtime->avail;
3293 ++
3294 ++ /* update runtime->appl_ptr before unlocking for userbuf */
3295 ++ appl_ptr = runtime->appl_ptr;
3296 ++ runtime->appl_ptr += count1;
3297 ++ runtime->appl_ptr %= runtime->buffer_size;
3298 ++ runtime->avail -= count1;
3299 ++
3300 + if (kernelbuf)
3301 +- memcpy(kernelbuf + result, runtime->buffer + runtime->appl_ptr, count1);
3302 ++ memcpy(kernelbuf + result, runtime->buffer + appl_ptr, count1);
3303 + if (userbuf) {
3304 + spin_unlock_irqrestore(&runtime->lock, flags);
3305 + if (copy_to_user(userbuf + result,
3306 +- runtime->buffer + runtime->appl_ptr, count1)) {
3307 ++ runtime->buffer + appl_ptr, count1)) {
3308 + return result > 0 ? result : -EFAULT;
3309 + }
3310 + spin_lock_irqsave(&runtime->lock, flags);
3311 + }
3312 +- runtime->appl_ptr += count1;
3313 +- runtime->appl_ptr %= runtime->buffer_size;
3314 +- runtime->avail -= count1;
3315 +- spin_unlock_irqrestore(&runtime->lock, flags);
3316 + result += count1;
3317 + count -= count1;
3318 + }
3319 ++ spin_unlock_irqrestore(&runtime->lock, flags);
3320 + return result;
3321 + }
3322 +
3323 +@@ -1055,23 +1060,16 @@ int snd_rawmidi_transmit_empty(struct snd_rawmidi_substream *substream)
3324 + EXPORT_SYMBOL(snd_rawmidi_transmit_empty);
3325 +
3326 + /**
3327 +- * snd_rawmidi_transmit_peek - copy data from the internal buffer
3328 ++ * __snd_rawmidi_transmit_peek - copy data from the internal buffer
3329 + * @substream: the rawmidi substream
3330 + * @buffer: the buffer pointer
3331 + * @count: data size to transfer
3332 + *
3333 +- * Copies data from the internal output buffer to the given buffer.
3334 +- *
3335 +- * Call this in the interrupt handler when the midi output is ready,
3336 +- * and call snd_rawmidi_transmit_ack() after the transmission is
3337 +- * finished.
3338 +- *
3339 +- * Return: The size of copied data, or a negative error code on failure.
3340 ++ * This is a variant of snd_rawmidi_transmit_peek() without spinlock.
3341 + */
3342 +-int snd_rawmidi_transmit_peek(struct snd_rawmidi_substream *substream,
3343 ++int __snd_rawmidi_transmit_peek(struct snd_rawmidi_substream *substream,
3344 + unsigned char *buffer, int count)
3345 + {
3346 +- unsigned long flags;
3347 + int result, count1;
3348 + struct snd_rawmidi_runtime *runtime = substream->runtime;
3349 +
3350 +@@ -1081,7 +1079,6 @@ int snd_rawmidi_transmit_peek(struct snd_rawmidi_substream *substream,
3351 + return -EINVAL;
3352 + }
3353 + result = 0;
3354 +- spin_lock_irqsave(&runtime->lock, flags);
3355 + if (runtime->avail >= runtime->buffer_size) {
3356 + /* warning: lowlevel layer MUST trigger down the hardware */
3357 + goto __skip;
3358 +@@ -1106,25 +1103,47 @@ int snd_rawmidi_transmit_peek(struct snd_rawmidi_substream *substream,
3359 + }
3360 + }
3361 + __skip:
3362 ++ return result;
3363 ++}
3364 ++EXPORT_SYMBOL(__snd_rawmidi_transmit_peek);
3365 ++
3366 ++/**
3367 ++ * snd_rawmidi_transmit_peek - copy data from the internal buffer
3368 ++ * @substream: the rawmidi substream
3369 ++ * @buffer: the buffer pointer
3370 ++ * @count: data size to transfer
3371 ++ *
3372 ++ * Copies data from the internal output buffer to the given buffer.
3373 ++ *
3374 ++ * Call this in the interrupt handler when the midi output is ready,
3375 ++ * and call snd_rawmidi_transmit_ack() after the transmission is
3376 ++ * finished.
3377 ++ *
3378 ++ * Return: The size of copied data, or a negative error code on failure.
3379 ++ */
3380 ++int snd_rawmidi_transmit_peek(struct snd_rawmidi_substream *substream,
3381 ++ unsigned char *buffer, int count)
3382 ++{
3383 ++ struct snd_rawmidi_runtime *runtime = substream->runtime;
3384 ++ int result;
3385 ++ unsigned long flags;
3386 ++
3387 ++ spin_lock_irqsave(&runtime->lock, flags);
3388 ++ result = __snd_rawmidi_transmit_peek(substream, buffer, count);
3389 + spin_unlock_irqrestore(&runtime->lock, flags);
3390 + return result;
3391 + }
3392 + EXPORT_SYMBOL(snd_rawmidi_transmit_peek);
3393 +
3394 + /**
3395 +- * snd_rawmidi_transmit_ack - acknowledge the transmission
3396 ++ * __snd_rawmidi_transmit_ack - acknowledge the transmission
3397 + * @substream: the rawmidi substream
3398 + * @count: the transferred count
3399 + *
3400 +- * Advances the hardware pointer for the internal output buffer with
3401 +- * the given size and updates the condition.
3402 +- * Call after the transmission is finished.
3403 +- *
3404 +- * Return: The advanced size if successful, or a negative error code on failure.
3405 ++ * This is a variant of __snd_rawmidi_transmit_ack() without spinlock.
3406 + */
3407 +-int snd_rawmidi_transmit_ack(struct snd_rawmidi_substream *substream, int count)
3408 ++int __snd_rawmidi_transmit_ack(struct snd_rawmidi_substream *substream, int count)
3409 + {
3410 +- unsigned long flags;
3411 + struct snd_rawmidi_runtime *runtime = substream->runtime;
3412 +
3413 + if (runtime->buffer == NULL) {
3414 +@@ -1132,7 +1151,6 @@ int snd_rawmidi_transmit_ack(struct snd_rawmidi_substream *substream, int count)
3415 + "snd_rawmidi_transmit_ack: output is not active!!!\n");
3416 + return -EINVAL;
3417 + }
3418 +- spin_lock_irqsave(&runtime->lock, flags);
3419 + snd_BUG_ON(runtime->avail + count > runtime->buffer_size);
3420 + runtime->hw_ptr += count;
3421 + runtime->hw_ptr %= runtime->buffer_size;
3422 +@@ -1142,9 +1160,32 @@ int snd_rawmidi_transmit_ack(struct snd_rawmidi_substream *substream, int count)
3423 + if (runtime->drain || snd_rawmidi_ready(substream))
3424 + wake_up(&runtime->sleep);
3425 + }
3426 +- spin_unlock_irqrestore(&runtime->lock, flags);
3427 + return count;
3428 + }
3429 ++EXPORT_SYMBOL(__snd_rawmidi_transmit_ack);
3430 ++
3431 ++/**
3432 ++ * snd_rawmidi_transmit_ack - acknowledge the transmission
3433 ++ * @substream: the rawmidi substream
3434 ++ * @count: the transferred count
3435 ++ *
3436 ++ * Advances the hardware pointer for the internal output buffer with
3437 ++ * the given size and updates the condition.
3438 ++ * Call after the transmission is finished.
3439 ++ *
3440 ++ * Return: The advanced size if successful, or a negative error code on failure.
3441 ++ */
3442 ++int snd_rawmidi_transmit_ack(struct snd_rawmidi_substream *substream, int count)
3443 ++{
3444 ++ struct snd_rawmidi_runtime *runtime = substream->runtime;
3445 ++ int result;
3446 ++ unsigned long flags;
3447 ++
3448 ++ spin_lock_irqsave(&runtime->lock, flags);
3449 ++ result = __snd_rawmidi_transmit_ack(substream, count);
3450 ++ spin_unlock_irqrestore(&runtime->lock, flags);
3451 ++ return result;
3452 ++}
3453 + EXPORT_SYMBOL(snd_rawmidi_transmit_ack);
3454 +
3455 + /**
3456 +@@ -1160,12 +1201,22 @@ EXPORT_SYMBOL(snd_rawmidi_transmit_ack);
3457 + int snd_rawmidi_transmit(struct snd_rawmidi_substream *substream,
3458 + unsigned char *buffer, int count)
3459 + {
3460 ++ struct snd_rawmidi_runtime *runtime = substream->runtime;
3461 ++ int result;
3462 ++ unsigned long flags;
3463 ++
3464 ++ spin_lock_irqsave(&runtime->lock, flags);
3465 + if (!substream->opened)
3466 +- return -EBADFD;
3467 +- count = snd_rawmidi_transmit_peek(substream, buffer, count);
3468 +- if (count < 0)
3469 +- return count;
3470 +- return snd_rawmidi_transmit_ack(substream, count);
3471 ++ result = -EBADFD;
3472 ++ else {
3473 ++ count = __snd_rawmidi_transmit_peek(substream, buffer, count);
3474 ++ if (count <= 0)
3475 ++ result = count;
3476 ++ else
3477 ++ result = __snd_rawmidi_transmit_ack(substream, count);
3478 ++ }
3479 ++ spin_unlock_irqrestore(&runtime->lock, flags);
3480 ++ return result;
3481 + }
3482 + EXPORT_SYMBOL(snd_rawmidi_transmit);
3483 +
3484 +@@ -1177,8 +1228,9 @@ static long snd_rawmidi_kernel_write1(struct snd_rawmidi_substream *substream,
3485 + unsigned long flags;
3486 + long count1, result;
3487 + struct snd_rawmidi_runtime *runtime = substream->runtime;
3488 ++ unsigned long appl_ptr;
3489 +
3490 +- if (snd_BUG_ON(!kernelbuf && !userbuf))
3491 ++ if (!kernelbuf && !userbuf)
3492 + return -EINVAL;
3493 + if (snd_BUG_ON(!runtime->buffer))
3494 + return -EINVAL;
3495 +@@ -1197,12 +1249,19 @@ static long snd_rawmidi_kernel_write1(struct snd_rawmidi_substream *substream,
3496 + count1 = count;
3497 + if (count1 > (long)runtime->avail)
3498 + count1 = runtime->avail;
3499 ++
3500 ++ /* update runtime->appl_ptr before unlocking for userbuf */
3501 ++ appl_ptr = runtime->appl_ptr;
3502 ++ runtime->appl_ptr += count1;
3503 ++ runtime->appl_ptr %= runtime->buffer_size;
3504 ++ runtime->avail -= count1;
3505 ++
3506 + if (kernelbuf)
3507 +- memcpy(runtime->buffer + runtime->appl_ptr,
3508 ++ memcpy(runtime->buffer + appl_ptr,
3509 + kernelbuf + result, count1);
3510 + else if (userbuf) {
3511 + spin_unlock_irqrestore(&runtime->lock, flags);
3512 +- if (copy_from_user(runtime->buffer + runtime->appl_ptr,
3513 ++ if (copy_from_user(runtime->buffer + appl_ptr,
3514 + userbuf + result, count1)) {
3515 + spin_lock_irqsave(&runtime->lock, flags);
3516 + result = result > 0 ? result : -EFAULT;
3517 +@@ -1210,9 +1269,6 @@ static long snd_rawmidi_kernel_write1(struct snd_rawmidi_substream *substream,
3518 + }
3519 + spin_lock_irqsave(&runtime->lock, flags);
3520 + }
3521 +- runtime->appl_ptr += count1;
3522 +- runtime->appl_ptr %= runtime->buffer_size;
3523 +- runtime->avail -= count1;
3524 + result += count1;
3525 + count -= count1;
3526 + }
3527 +diff --git a/sound/core/seq/oss/seq_oss_init.c b/sound/core/seq/oss/seq_oss_init.c
3528 +index b1221b29728e..6779e82b46dd 100644
3529 +--- a/sound/core/seq/oss/seq_oss_init.c
3530 ++++ b/sound/core/seq/oss/seq_oss_init.c
3531 +@@ -202,7 +202,7 @@ snd_seq_oss_open(struct file *file, int level)
3532 +
3533 + dp->index = i;
3534 + if (i >= SNDRV_SEQ_OSS_MAX_CLIENTS) {
3535 +- pr_err("ALSA: seq_oss: too many applications\n");
3536 ++ pr_debug("ALSA: seq_oss: too many applications\n");
3537 + rc = -ENOMEM;
3538 + goto _error;
3539 + }
3540 +diff --git a/sound/core/seq/oss/seq_oss_synth.c b/sound/core/seq/oss/seq_oss_synth.c
3541 +index 0f3b38184fe5..b16dbef04174 100644
3542 +--- a/sound/core/seq/oss/seq_oss_synth.c
3543 ++++ b/sound/core/seq/oss/seq_oss_synth.c
3544 +@@ -308,7 +308,7 @@ snd_seq_oss_synth_cleanup(struct seq_oss_devinfo *dp)
3545 + struct seq_oss_synth *rec;
3546 + struct seq_oss_synthinfo *info;
3547 +
3548 +- if (snd_BUG_ON(dp->max_synthdev >= SNDRV_SEQ_OSS_MAX_SYNTH_DEVS))
3549 ++ if (snd_BUG_ON(dp->max_synthdev > SNDRV_SEQ_OSS_MAX_SYNTH_DEVS))
3550 + return;
3551 + for (i = 0; i < dp->max_synthdev; i++) {
3552 + info = &dp->synths[i];
3553 +diff --git a/sound/core/seq/seq_clientmgr.c b/sound/core/seq/seq_clientmgr.c
3554 +index 13cfa815732d..58e79e02f217 100644
3555 +--- a/sound/core/seq/seq_clientmgr.c
3556 ++++ b/sound/core/seq/seq_clientmgr.c
3557 +@@ -678,6 +678,9 @@ static int deliver_to_subscribers(struct snd_seq_client *client,
3558 + else
3559 + down_read(&grp->list_mutex);
3560 + list_for_each_entry(subs, &grp->list_head, src_list) {
3561 ++ /* both ports ready? */
3562 ++ if (atomic_read(&subs->ref_count) != 2)
3563 ++ continue;
3564 + event->dest = subs->info.dest;
3565 + if (subs->info.flags & SNDRV_SEQ_PORT_SUBS_TIMESTAMP)
3566 + /* convert time according to flag with subscription */
3567 +diff --git a/sound/core/seq/seq_ports.c b/sound/core/seq/seq_ports.c
3568 +index 55170a20ae72..921fb2bd8fad 100644
3569 +--- a/sound/core/seq/seq_ports.c
3570 ++++ b/sound/core/seq/seq_ports.c
3571 +@@ -173,10 +173,6 @@ struct snd_seq_client_port *snd_seq_create_port(struct snd_seq_client *client,
3572 + }
3573 +
3574 + /* */
3575 +-enum group_type {
3576 +- SRC_LIST, DEST_LIST
3577 +-};
3578 +-
3579 + static int subscribe_port(struct snd_seq_client *client,
3580 + struct snd_seq_client_port *port,
3581 + struct snd_seq_port_subs_info *grp,
3582 +@@ -203,6 +199,20 @@ static struct snd_seq_client_port *get_client_port(struct snd_seq_addr *addr,
3583 + return NULL;
3584 + }
3585 +
3586 ++static void delete_and_unsubscribe_port(struct snd_seq_client *client,
3587 ++ struct snd_seq_client_port *port,
3588 ++ struct snd_seq_subscribers *subs,
3589 ++ bool is_src, bool ack);
3590 ++
3591 ++static inline struct snd_seq_subscribers *
3592 ++get_subscriber(struct list_head *p, bool is_src)
3593 ++{
3594 ++ if (is_src)
3595 ++ return list_entry(p, struct snd_seq_subscribers, src_list);
3596 ++ else
3597 ++ return list_entry(p, struct snd_seq_subscribers, dest_list);
3598 ++}
3599 ++
3600 + /*
3601 + * remove all subscribers on the list
3602 + * this is called from port_delete, for each src and dest list.
3603 +@@ -210,7 +220,7 @@ static struct snd_seq_client_port *get_client_port(struct snd_seq_addr *addr,
3604 + static void clear_subscriber_list(struct snd_seq_client *client,
3605 + struct snd_seq_client_port *port,
3606 + struct snd_seq_port_subs_info *grp,
3607 +- int grptype)
3608 ++ int is_src)
3609 + {
3610 + struct list_head *p, *n;
3611 +
3612 +@@ -219,15 +229,13 @@ static void clear_subscriber_list(struct snd_seq_client *client,
3613 + struct snd_seq_client *c;
3614 + struct snd_seq_client_port *aport;
3615 +
3616 +- if (grptype == SRC_LIST) {
3617 +- subs = list_entry(p, struct snd_seq_subscribers, src_list);
3618 ++ subs = get_subscriber(p, is_src);
3619 ++ if (is_src)
3620 + aport = get_client_port(&subs->info.dest, &c);
3621 +- } else {
3622 +- subs = list_entry(p, struct snd_seq_subscribers, dest_list);
3623 ++ else
3624 + aport = get_client_port(&subs->info.sender, &c);
3625 +- }
3626 +- list_del(p);
3627 +- unsubscribe_port(client, port, grp, &subs->info, 0);
3628 ++ delete_and_unsubscribe_port(client, port, subs, is_src, false);
3629 ++
3630 + if (!aport) {
3631 + /* looks like the connected port is being deleted.
3632 + * we decrease the counter, and when both ports are deleted
3633 +@@ -235,21 +243,14 @@ static void clear_subscriber_list(struct snd_seq_client *client,
3634 + */
3635 + if (atomic_dec_and_test(&subs->ref_count))
3636 + kfree(subs);
3637 +- } else {
3638 +- /* ok we got the connected port */
3639 +- struct snd_seq_port_subs_info *agrp;
3640 +- agrp = (grptype == SRC_LIST) ? &aport->c_dest : &aport->c_src;
3641 +- down_write(&agrp->list_mutex);
3642 +- if (grptype == SRC_LIST)
3643 +- list_del(&subs->dest_list);
3644 +- else
3645 +- list_del(&subs->src_list);
3646 +- up_write(&agrp->list_mutex);
3647 +- unsubscribe_port(c, aport, agrp, &subs->info, 1);
3648 +- kfree(subs);
3649 +- snd_seq_port_unlock(aport);
3650 +- snd_seq_client_unlock(c);
3651 ++ continue;
3652 + }
3653 ++
3654 ++ /* ok we got the connected port */
3655 ++ delete_and_unsubscribe_port(c, aport, subs, !is_src, true);
3656 ++ kfree(subs);
3657 ++ snd_seq_port_unlock(aport);
3658 ++ snd_seq_client_unlock(c);
3659 + }
3660 + }
3661 +
3662 +@@ -262,8 +263,8 @@ static int port_delete(struct snd_seq_client *client,
3663 + snd_use_lock_sync(&port->use_lock);
3664 +
3665 + /* clear subscribers info */
3666 +- clear_subscriber_list(client, port, &port->c_src, SRC_LIST);
3667 +- clear_subscriber_list(client, port, &port->c_dest, DEST_LIST);
3668 ++ clear_subscriber_list(client, port, &port->c_src, true);
3669 ++ clear_subscriber_list(client, port, &port->c_dest, false);
3670 +
3671 + if (port->private_free)
3672 + port->private_free(port->private_data);
3673 +@@ -479,85 +480,120 @@ static int match_subs_info(struct snd_seq_port_subscribe *r,
3674 + return 0;
3675 + }
3676 +
3677 +-
3678 +-/* connect two ports */
3679 +-int snd_seq_port_connect(struct snd_seq_client *connector,
3680 +- struct snd_seq_client *src_client,
3681 +- struct snd_seq_client_port *src_port,
3682 +- struct snd_seq_client *dest_client,
3683 +- struct snd_seq_client_port *dest_port,
3684 +- struct snd_seq_port_subscribe *info)
3685 ++static int check_and_subscribe_port(struct snd_seq_client *client,
3686 ++ struct snd_seq_client_port *port,
3687 ++ struct snd_seq_subscribers *subs,
3688 ++ bool is_src, bool exclusive, bool ack)
3689 + {
3690 +- struct snd_seq_port_subs_info *src = &src_port->c_src;
3691 +- struct snd_seq_port_subs_info *dest = &dest_port->c_dest;
3692 +- struct snd_seq_subscribers *subs, *s;
3693 +- int err, src_called = 0;
3694 +- unsigned long flags;
3695 +- int exclusive;
3696 ++ struct snd_seq_port_subs_info *grp;
3697 ++ struct list_head *p;
3698 ++ struct snd_seq_subscribers *s;
3699 ++ int err;
3700 +
3701 +- subs = kzalloc(sizeof(*subs), GFP_KERNEL);
3702 +- if (! subs)
3703 +- return -ENOMEM;
3704 +-
3705 +- subs->info = *info;
3706 +- atomic_set(&subs->ref_count, 2);
3707 +-
3708 +- down_write(&src->list_mutex);
3709 +- down_write_nested(&dest->list_mutex, SINGLE_DEPTH_NESTING);
3710 +-
3711 +- exclusive = info->flags & SNDRV_SEQ_PORT_SUBS_EXCLUSIVE ? 1 : 0;
3712 ++ grp = is_src ? &port->c_src : &port->c_dest;
3713 + err = -EBUSY;
3714 ++ down_write(&grp->list_mutex);
3715 + if (exclusive) {
3716 +- if (! list_empty(&src->list_head) || ! list_empty(&dest->list_head))
3717 ++ if (!list_empty(&grp->list_head))
3718 + goto __error;
3719 + } else {
3720 +- if (src->exclusive || dest->exclusive)
3721 ++ if (grp->exclusive)
3722 + goto __error;
3723 + /* check whether already exists */
3724 +- list_for_each_entry(s, &src->list_head, src_list) {
3725 +- if (match_subs_info(info, &s->info))
3726 +- goto __error;
3727 +- }
3728 +- list_for_each_entry(s, &dest->list_head, dest_list) {
3729 +- if (match_subs_info(info, &s->info))
3730 ++ list_for_each(p, &grp->list_head) {
3731 ++ s = get_subscriber(p, is_src);
3732 ++ if (match_subs_info(&subs->info, &s->info))
3733 + goto __error;
3734 + }
3735 + }
3736 +
3737 +- if ((err = subscribe_port(src_client, src_port, src, info,
3738 +- connector->number != src_client->number)) < 0)
3739 +- goto __error;
3740 +- src_called = 1;
3741 +-
3742 +- if ((err = subscribe_port(dest_client, dest_port, dest, info,
3743 +- connector->number != dest_client->number)) < 0)
3744 ++ err = subscribe_port(client, port, grp, &subs->info, ack);
3745 ++ if (err < 0) {
3746 ++ grp->exclusive = 0;
3747 + goto __error;
3748 ++ }
3749 +
3750 + /* add to list */
3751 +- write_lock_irqsave(&src->list_lock, flags);
3752 +- // write_lock(&dest->list_lock); // no other lock yet
3753 +- list_add_tail(&subs->src_list, &src->list_head);
3754 +- list_add_tail(&subs->dest_list, &dest->list_head);
3755 +- // write_unlock(&dest->list_lock); // no other lock yet
3756 +- write_unlock_irqrestore(&src->list_lock, flags);
3757 ++ write_lock_irq(&grp->list_lock);
3758 ++ if (is_src)
3759 ++ list_add_tail(&subs->src_list, &grp->list_head);
3760 ++ else
3761 ++ list_add_tail(&subs->dest_list, &grp->list_head);
3762 ++ grp->exclusive = exclusive;
3763 ++ atomic_inc(&subs->ref_count);
3764 ++ write_unlock_irq(&grp->list_lock);
3765 ++ err = 0;
3766 ++
3767 ++ __error:
3768 ++ up_write(&grp->list_mutex);
3769 ++ return err;
3770 ++}
3771 +
3772 +- src->exclusive = dest->exclusive = exclusive;
3773 ++static void delete_and_unsubscribe_port(struct snd_seq_client *client,
3774 ++ struct snd_seq_client_port *port,
3775 ++ struct snd_seq_subscribers *subs,
3776 ++ bool is_src, bool ack)
3777 ++{
3778 ++ struct snd_seq_port_subs_info *grp;
3779 ++
3780 ++ grp = is_src ? &port->c_src : &port->c_dest;
3781 ++ down_write(&grp->list_mutex);
3782 ++ write_lock_irq(&grp->list_lock);
3783 ++ if (is_src)
3784 ++ list_del(&subs->src_list);
3785 ++ else
3786 ++ list_del(&subs->dest_list);
3787 ++ grp->exclusive = 0;
3788 ++ write_unlock_irq(&grp->list_lock);
3789 ++ up_write(&grp->list_mutex);
3790 ++
3791 ++ unsubscribe_port(client, port, grp, &subs->info, ack);
3792 ++}
3793 ++
3794 ++/* connect two ports */
3795 ++int snd_seq_port_connect(struct snd_seq_client *connector,
3796 ++ struct snd_seq_client *src_client,
3797 ++ struct snd_seq_client_port *src_port,
3798 ++ struct snd_seq_client *dest_client,
3799 ++ struct snd_seq_client_port *dest_port,
3800 ++ struct snd_seq_port_subscribe *info)
3801 ++{
3802 ++ struct snd_seq_subscribers *subs;
3803 ++ bool exclusive;
3804 ++ int err;
3805 ++
3806 ++ subs = kzalloc(sizeof(*subs), GFP_KERNEL);
3807 ++ if (!subs)
3808 ++ return -ENOMEM;
3809 ++
3810 ++ subs->info = *info;
3811 ++ atomic_set(&subs->ref_count, 0);
3812 ++ INIT_LIST_HEAD(&subs->src_list);
3813 ++ INIT_LIST_HEAD(&subs->dest_list);
3814 ++
3815 ++ exclusive = !!(info->flags & SNDRV_SEQ_PORT_SUBS_EXCLUSIVE);
3816 ++
3817 ++ err = check_and_subscribe_port(src_client, src_port, subs, true,
3818 ++ exclusive,
3819 ++ connector->number != src_client->number);
3820 ++ if (err < 0)
3821 ++ goto error;
3822 ++ err = check_and_subscribe_port(dest_client, dest_port, subs, false,
3823 ++ exclusive,
3824 ++ connector->number != dest_client->number);
3825 ++ if (err < 0)
3826 ++ goto error_dest;
3827 +
3828 +- up_write(&dest->list_mutex);
3829 +- up_write(&src->list_mutex);
3830 + return 0;
3831 +
3832 +- __error:
3833 +- if (src_called)
3834 +- unsubscribe_port(src_client, src_port, src, info,
3835 +- connector->number != src_client->number);
3836 ++ error_dest:
3837 ++ delete_and_unsubscribe_port(src_client, src_port, subs, true,
3838 ++ connector->number != src_client->number);
3839 ++ error:
3840 + kfree(subs);
3841 +- up_write(&dest->list_mutex);
3842 +- up_write(&src->list_mutex);
3843 + return err;
3844 + }
3845 +
3846 +-
3847 + /* remove the connection */
3848 + int snd_seq_port_disconnect(struct snd_seq_client *connector,
3849 + struct snd_seq_client *src_client,
3850 +@@ -567,37 +603,28 @@ int snd_seq_port_disconnect(struct snd_seq_client *connector,
3851 + struct snd_seq_port_subscribe *info)
3852 + {
3853 + struct snd_seq_port_subs_info *src = &src_port->c_src;
3854 +- struct snd_seq_port_subs_info *dest = &dest_port->c_dest;
3855 + struct snd_seq_subscribers *subs;
3856 + int err = -ENOENT;
3857 +- unsigned long flags;
3858 +
3859 + down_write(&src->list_mutex);
3860 +- down_write_nested(&dest->list_mutex, SINGLE_DEPTH_NESTING);
3861 +-
3862 + /* look for the connection */
3863 + list_for_each_entry(subs, &src->list_head, src_list) {
3864 + if (match_subs_info(info, &subs->info)) {
3865 +- write_lock_irqsave(&src->list_lock, flags);
3866 +- // write_lock(&dest->list_lock); // no lock yet
3867 +- list_del(&subs->src_list);
3868 +- list_del(&subs->dest_list);
3869 +- // write_unlock(&dest->list_lock);
3870 +- write_unlock_irqrestore(&src->list_lock, flags);
3871 +- src->exclusive = dest->exclusive = 0;
3872 +- unsubscribe_port(src_client, src_port, src, info,
3873 +- connector->number != src_client->number);
3874 +- unsubscribe_port(dest_client, dest_port, dest, info,
3875 +- connector->number != dest_client->number);
3876 +- kfree(subs);
3877 ++ atomic_dec(&subs->ref_count); /* mark as not ready */
3878 + err = 0;
3879 + break;
3880 + }
3881 + }
3882 +-
3883 +- up_write(&dest->list_mutex);
3884 + up_write(&src->list_mutex);
3885 +- return err;
3886 ++ if (err < 0)
3887 ++ return err;
3888 ++
3889 ++ delete_and_unsubscribe_port(src_client, src_port, subs, true,
3890 ++ connector->number != src_client->number);
3891 ++ delete_and_unsubscribe_port(dest_client, dest_port, subs, false,
3892 ++ connector->number != dest_client->number);
3893 ++ kfree(subs);
3894 ++ return 0;
3895 + }
3896 +
3897 +
3898 +diff --git a/sound/core/seq/seq_timer.c b/sound/core/seq/seq_timer.c
3899 +index 82b220c769c1..293104926098 100644
3900 +--- a/sound/core/seq/seq_timer.c
3901 ++++ b/sound/core/seq/seq_timer.c
3902 +@@ -90,6 +90,9 @@ void snd_seq_timer_delete(struct snd_seq_timer **tmr)
3903 +
3904 + void snd_seq_timer_defaults(struct snd_seq_timer * tmr)
3905 + {
3906 ++ unsigned long flags;
3907 ++
3908 ++ spin_lock_irqsave(&tmr->lock, flags);
3909 + /* setup defaults */
3910 + tmr->ppq = 96; /* 96 PPQ */
3911 + tmr->tempo = 500000; /* 120 BPM */
3912 +@@ -105,21 +108,25 @@ void snd_seq_timer_defaults(struct snd_seq_timer * tmr)
3913 + tmr->preferred_resolution = seq_default_timer_resolution;
3914 +
3915 + tmr->skew = tmr->skew_base = SKEW_BASE;
3916 ++ spin_unlock_irqrestore(&tmr->lock, flags);
3917 + }
3918 +
3919 +-void snd_seq_timer_reset(struct snd_seq_timer * tmr)
3920 ++static void seq_timer_reset(struct snd_seq_timer *tmr)
3921 + {
3922 +- unsigned long flags;
3923 +-
3924 +- spin_lock_irqsave(&tmr->lock, flags);
3925 +-
3926 + /* reset time & songposition */
3927 + tmr->cur_time.tv_sec = 0;
3928 + tmr->cur_time.tv_nsec = 0;
3929 +
3930 + tmr->tick.cur_tick = 0;
3931 + tmr->tick.fraction = 0;
3932 ++}
3933 ++
3934 ++void snd_seq_timer_reset(struct snd_seq_timer *tmr)
3935 ++{
3936 ++ unsigned long flags;
3937 +
3938 ++ spin_lock_irqsave(&tmr->lock, flags);
3939 ++ seq_timer_reset(tmr);
3940 + spin_unlock_irqrestore(&tmr->lock, flags);
3941 + }
3942 +
3943 +@@ -138,8 +145,11 @@ static void snd_seq_timer_interrupt(struct snd_timer_instance *timeri,
3944 + tmr = q->timer;
3945 + if (tmr == NULL)
3946 + return;
3947 +- if (!tmr->running)
3948 ++ spin_lock_irqsave(&tmr->lock, flags);
3949 ++ if (!tmr->running) {
3950 ++ spin_unlock_irqrestore(&tmr->lock, flags);
3951 + return;
3952 ++ }
3953 +
3954 + resolution *= ticks;
3955 + if (tmr->skew != tmr->skew_base) {
3956 +@@ -148,8 +158,6 @@ static void snd_seq_timer_interrupt(struct snd_timer_instance *timeri,
3957 + (((resolution & 0xffff) * tmr->skew) >> 16);
3958 + }
3959 +
3960 +- spin_lock_irqsave(&tmr->lock, flags);
3961 +-
3962 + /* update timer */
3963 + snd_seq_inc_time_nsec(&tmr->cur_time, resolution);
3964 +
3965 +@@ -296,26 +304,30 @@ int snd_seq_timer_open(struct snd_seq_queue *q)
3966 + t->callback = snd_seq_timer_interrupt;
3967 + t->callback_data = q;
3968 + t->flags |= SNDRV_TIMER_IFLG_AUTO;
3969 ++ spin_lock_irq(&tmr->lock);
3970 + tmr->timeri = t;
3971 ++ spin_unlock_irq(&tmr->lock);
3972 + return 0;
3973 + }
3974 +
3975 + int snd_seq_timer_close(struct snd_seq_queue *q)
3976 + {
3977 + struct snd_seq_timer *tmr;
3978 ++ struct snd_timer_instance *t;
3979 +
3980 + tmr = q->timer;
3981 + if (snd_BUG_ON(!tmr))
3982 + return -EINVAL;
3983 +- if (tmr->timeri) {
3984 +- snd_timer_stop(tmr->timeri);
3985 +- snd_timer_close(tmr->timeri);
3986 +- tmr->timeri = NULL;
3987 +- }
3988 ++ spin_lock_irq(&tmr->lock);
3989 ++ t = tmr->timeri;
3990 ++ tmr->timeri = NULL;
3991 ++ spin_unlock_irq(&tmr->lock);
3992 ++ if (t)
3993 ++ snd_timer_close(t);
3994 + return 0;
3995 + }
3996 +
3997 +-int snd_seq_timer_stop(struct snd_seq_timer * tmr)
3998 ++static int seq_timer_stop(struct snd_seq_timer *tmr)
3999 + {
4000 + if (! tmr->timeri)
4001 + return -EINVAL;
4002 +@@ -326,6 +338,17 @@ int snd_seq_timer_stop(struct snd_seq_timer * tmr)
4003 + return 0;
4004 + }
4005 +
4006 ++int snd_seq_timer_stop(struct snd_seq_timer *tmr)
4007 ++{
4008 ++ unsigned long flags;
4009 ++ int err;
4010 ++
4011 ++ spin_lock_irqsave(&tmr->lock, flags);
4012 ++ err = seq_timer_stop(tmr);
4013 ++ spin_unlock_irqrestore(&tmr->lock, flags);
4014 ++ return err;
4015 ++}
4016 ++
4017 + static int initialize_timer(struct snd_seq_timer *tmr)
4018 + {
4019 + struct snd_timer *t;
4020 +@@ -358,13 +381,13 @@ static int initialize_timer(struct snd_seq_timer *tmr)
4021 + return 0;
4022 + }
4023 +
4024 +-int snd_seq_timer_start(struct snd_seq_timer * tmr)
4025 ++static int seq_timer_start(struct snd_seq_timer *tmr)
4026 + {
4027 + if (! tmr->timeri)
4028 + return -EINVAL;
4029 + if (tmr->running)
4030 +- snd_seq_timer_stop(tmr);
4031 +- snd_seq_timer_reset(tmr);
4032 ++ seq_timer_stop(tmr);
4033 ++ seq_timer_reset(tmr);
4034 + if (initialize_timer(tmr) < 0)
4035 + return -EINVAL;
4036 + snd_timer_start(tmr->timeri, tmr->ticks);
4037 +@@ -373,14 +396,25 @@ int snd_seq_timer_start(struct snd_seq_timer * tmr)
4038 + return 0;
4039 + }
4040 +
4041 +-int snd_seq_timer_continue(struct snd_seq_timer * tmr)
4042 ++int snd_seq_timer_start(struct snd_seq_timer *tmr)
4043 ++{
4044 ++ unsigned long flags;
4045 ++ int err;
4046 ++
4047 ++ spin_lock_irqsave(&tmr->lock, flags);
4048 ++ err = seq_timer_start(tmr);
4049 ++ spin_unlock_irqrestore(&tmr->lock, flags);
4050 ++ return err;
4051 ++}
4052 ++
4053 ++static int seq_timer_continue(struct snd_seq_timer *tmr)
4054 + {
4055 + if (! tmr->timeri)
4056 + return -EINVAL;
4057 + if (tmr->running)
4058 + return -EBUSY;
4059 + if (! tmr->initialized) {
4060 +- snd_seq_timer_reset(tmr);
4061 ++ seq_timer_reset(tmr);
4062 + if (initialize_timer(tmr) < 0)
4063 + return -EINVAL;
4064 + }
4065 +@@ -390,11 +424,24 @@ int snd_seq_timer_continue(struct snd_seq_timer * tmr)
4066 + return 0;
4067 + }
4068 +
4069 ++int snd_seq_timer_continue(struct snd_seq_timer *tmr)
4070 ++{
4071 ++ unsigned long flags;
4072 ++ int err;
4073 ++
4074 ++ spin_lock_irqsave(&tmr->lock, flags);
4075 ++ err = seq_timer_continue(tmr);
4076 ++ spin_unlock_irqrestore(&tmr->lock, flags);
4077 ++ return err;
4078 ++}
4079 ++
4080 + /* return current 'real' time. use timeofday() to get better granularity. */
4081 + snd_seq_real_time_t snd_seq_timer_get_cur_time(struct snd_seq_timer *tmr)
4082 + {
4083 + snd_seq_real_time_t cur_time;
4084 ++ unsigned long flags;
4085 +
4086 ++ spin_lock_irqsave(&tmr->lock, flags);
4087 + cur_time = tmr->cur_time;
4088 + if (tmr->running) {
4089 + struct timeval tm;
4090 +@@ -410,7 +457,7 @@ snd_seq_real_time_t snd_seq_timer_get_cur_time(struct snd_seq_timer *tmr)
4091 + }
4092 + snd_seq_sanity_real_time(&cur_time);
4093 + }
4094 +-
4095 ++ spin_unlock_irqrestore(&tmr->lock, flags);
4096 + return cur_time;
4097 + }
4098 +
4099 +diff --git a/sound/core/seq/seq_virmidi.c b/sound/core/seq/seq_virmidi.c
4100 +index 56e0f4cd3f82..81134e067184 100644
4101 +--- a/sound/core/seq/seq_virmidi.c
4102 ++++ b/sound/core/seq/seq_virmidi.c
4103 +@@ -155,21 +155,26 @@ static void snd_virmidi_output_trigger(struct snd_rawmidi_substream *substream,
4104 + struct snd_virmidi *vmidi = substream->runtime->private_data;
4105 + int count, res;
4106 + unsigned char buf[32], *pbuf;
4107 ++ unsigned long flags;
4108 +
4109 + if (up) {
4110 + vmidi->trigger = 1;
4111 + if (vmidi->seq_mode == SNDRV_VIRMIDI_SEQ_DISPATCH &&
4112 + !(vmidi->rdev->flags & SNDRV_VIRMIDI_SUBSCRIBE)) {
4113 +- snd_rawmidi_transmit_ack(substream, substream->runtime->buffer_size - substream->runtime->avail);
4114 +- return; /* ignored */
4115 ++ while (snd_rawmidi_transmit(substream, buf,
4116 ++ sizeof(buf)) > 0) {
4117 ++ /* ignored */
4118 ++ }
4119 ++ return;
4120 + }
4121 + if (vmidi->event.type != SNDRV_SEQ_EVENT_NONE) {
4122 + if (snd_seq_kernel_client_dispatch(vmidi->client, &vmidi->event, in_atomic(), 0) < 0)
4123 + return;
4124 + vmidi->event.type = SNDRV_SEQ_EVENT_NONE;
4125 + }
4126 ++ spin_lock_irqsave(&substream->runtime->lock, flags);
4127 + while (1) {
4128 +- count = snd_rawmidi_transmit_peek(substream, buf, sizeof(buf));
4129 ++ count = __snd_rawmidi_transmit_peek(substream, buf, sizeof(buf));
4130 + if (count <= 0)
4131 + break;
4132 + pbuf = buf;
4133 +@@ -179,16 +184,18 @@ static void snd_virmidi_output_trigger(struct snd_rawmidi_substream *substream,
4134 + snd_midi_event_reset_encode(vmidi->parser);
4135 + continue;
4136 + }
4137 +- snd_rawmidi_transmit_ack(substream, res);
4138 ++ __snd_rawmidi_transmit_ack(substream, res);
4139 + pbuf += res;
4140 + count -= res;
4141 + if (vmidi->event.type != SNDRV_SEQ_EVENT_NONE) {
4142 + if (snd_seq_kernel_client_dispatch(vmidi->client, &vmidi->event, in_atomic(), 0) < 0)
4143 +- return;
4144 ++ goto out;
4145 + vmidi->event.type = SNDRV_SEQ_EVENT_NONE;
4146 + }
4147 + }
4148 + }
4149 ++ out:
4150 ++ spin_unlock_irqrestore(&substream->runtime->lock, flags);
4151 + } else {
4152 + vmidi->trigger = 0;
4153 + }
4154 +@@ -254,9 +261,13 @@ static int snd_virmidi_output_open(struct snd_rawmidi_substream *substream)
4155 + */
4156 + static int snd_virmidi_input_close(struct snd_rawmidi_substream *substream)
4157 + {
4158 ++ struct snd_virmidi_dev *rdev = substream->rmidi->private_data;
4159 + struct snd_virmidi *vmidi = substream->runtime->private_data;
4160 +- snd_midi_event_free(vmidi->parser);
4161 ++
4162 ++ write_lock_irq(&rdev->filelist_lock);
4163 + list_del(&vmidi->list);
4164 ++ write_unlock_irq(&rdev->filelist_lock);
4165 ++ snd_midi_event_free(vmidi->parser);
4166 + substream->runtime->private_data = NULL;
4167 + kfree(vmidi);
4168 + return 0;
4169 +diff --git a/sound/core/timer.c b/sound/core/timer.c
4170 +index 0a049c4578f1..f24c9fccf008 100644
4171 +--- a/sound/core/timer.c
4172 ++++ b/sound/core/timer.c
4173 +@@ -305,8 +305,7 @@ int snd_timer_open(struct snd_timer_instance **ti,
4174 + return 0;
4175 + }
4176 +
4177 +-static int _snd_timer_stop(struct snd_timer_instance *timeri,
4178 +- int keep_flag, int event);
4179 ++static int _snd_timer_stop(struct snd_timer_instance *timeri, int event);
4180 +
4181 + /*
4182 + * close a timer instance
4183 +@@ -348,7 +347,7 @@ int snd_timer_close(struct snd_timer_instance *timeri)
4184 + spin_unlock_irq(&timer->lock);
4185 + mutex_lock(&register_mutex);
4186 + list_del(&timeri->open_list);
4187 +- if (timer && list_empty(&timer->open_list_head) &&
4188 ++ if (list_empty(&timer->open_list_head) &&
4189 + timer->hw.close)
4190 + timer->hw.close(timer);
4191 + /* remove slave links */
4192 +@@ -423,7 +422,7 @@ static void snd_timer_notify1(struct snd_timer_instance *ti, int event)
4193 + spin_lock_irqsave(&timer->lock, flags);
4194 + list_for_each_entry(ts, &ti->slave_active_head, active_list)
4195 + if (ts->ccallback)
4196 +- ts->ccallback(ti, event + 100, &tstamp, resolution);
4197 ++ ts->ccallback(ts, event + 100, &tstamp, resolution);
4198 + spin_unlock_irqrestore(&timer->lock, flags);
4199 + }
4200 +
4201 +@@ -452,6 +451,10 @@ static int snd_timer_start_slave(struct snd_timer_instance *timeri)
4202 + unsigned long flags;
4203 +
4204 + spin_lock_irqsave(&slave_active_lock, flags);
4205 ++ if (timeri->flags & SNDRV_TIMER_IFLG_RUNNING) {
4206 ++ spin_unlock_irqrestore(&slave_active_lock, flags);
4207 ++ return -EBUSY;
4208 ++ }
4209 + timeri->flags |= SNDRV_TIMER_IFLG_RUNNING;
4210 + if (timeri->master && timeri->timer) {
4211 + spin_lock(&timeri->timer->lock);
4212 +@@ -476,7 +479,8 @@ int snd_timer_start(struct snd_timer_instance *timeri, unsigned int ticks)
4213 + return -EINVAL;
4214 + if (timeri->flags & SNDRV_TIMER_IFLG_SLAVE) {
4215 + result = snd_timer_start_slave(timeri);
4216 +- snd_timer_notify1(timeri, SNDRV_TIMER_EVENT_START);
4217 ++ if (result >= 0)
4218 ++ snd_timer_notify1(timeri, SNDRV_TIMER_EVENT_START);
4219 + return result;
4220 + }
4221 + timer = timeri->timer;
4222 +@@ -485,16 +489,22 @@ int snd_timer_start(struct snd_timer_instance *timeri, unsigned int ticks)
4223 + if (timer->card && timer->card->shutdown)
4224 + return -ENODEV;
4225 + spin_lock_irqsave(&timer->lock, flags);
4226 ++ if (timeri->flags & (SNDRV_TIMER_IFLG_RUNNING |
4227 ++ SNDRV_TIMER_IFLG_START)) {
4228 ++ result = -EBUSY;
4229 ++ goto unlock;
4230 ++ }
4231 + timeri->ticks = timeri->cticks = ticks;
4232 + timeri->pticks = 0;
4233 + result = snd_timer_start1(timer, timeri, ticks);
4234 ++ unlock:
4235 + spin_unlock_irqrestore(&timer->lock, flags);
4236 +- snd_timer_notify1(timeri, SNDRV_TIMER_EVENT_START);
4237 ++ if (result >= 0)
4238 ++ snd_timer_notify1(timeri, SNDRV_TIMER_EVENT_START);
4239 + return result;
4240 + }
4241 +
4242 +-static int _snd_timer_stop(struct snd_timer_instance * timeri,
4243 +- int keep_flag, int event)
4244 ++static int _snd_timer_stop(struct snd_timer_instance *timeri, int event)
4245 + {
4246 + struct snd_timer *timer;
4247 + unsigned long flags;
4248 +@@ -503,19 +513,30 @@ static int _snd_timer_stop(struct snd_timer_instance * timeri,
4249 + return -ENXIO;
4250 +
4251 + if (timeri->flags & SNDRV_TIMER_IFLG_SLAVE) {
4252 +- if (!keep_flag) {
4253 +- spin_lock_irqsave(&slave_active_lock, flags);
4254 +- timeri->flags &= ~SNDRV_TIMER_IFLG_RUNNING;
4255 +- list_del_init(&timeri->ack_list);
4256 +- list_del_init(&timeri->active_list);
4257 ++ spin_lock_irqsave(&slave_active_lock, flags);
4258 ++ if (!(timeri->flags & SNDRV_TIMER_IFLG_RUNNING)) {
4259 + spin_unlock_irqrestore(&slave_active_lock, flags);
4260 ++ return -EBUSY;
4261 + }
4262 ++ if (timeri->timer)
4263 ++ spin_lock(&timeri->timer->lock);
4264 ++ timeri->flags &= ~SNDRV_TIMER_IFLG_RUNNING;
4265 ++ list_del_init(&timeri->ack_list);
4266 ++ list_del_init(&timeri->active_list);
4267 ++ if (timeri->timer)
4268 ++ spin_unlock(&timeri->timer->lock);
4269 ++ spin_unlock_irqrestore(&slave_active_lock, flags);
4270 + goto __end;
4271 + }
4272 + timer = timeri->timer;
4273 + if (!timer)
4274 + return -EINVAL;
4275 + spin_lock_irqsave(&timer->lock, flags);
4276 ++ if (!(timeri->flags & (SNDRV_TIMER_IFLG_RUNNING |
4277 ++ SNDRV_TIMER_IFLG_START))) {
4278 ++ spin_unlock_irqrestore(&timer->lock, flags);
4279 ++ return -EBUSY;
4280 ++ }
4281 + list_del_init(&timeri->ack_list);
4282 + list_del_init(&timeri->active_list);
4283 + if (timer->card && timer->card->shutdown) {
4284 +@@ -534,9 +555,7 @@ static int _snd_timer_stop(struct snd_timer_instance * timeri,
4285 + }
4286 + }
4287 + }
4288 +- if (!keep_flag)
4289 +- timeri->flags &=
4290 +- ~(SNDRV_TIMER_IFLG_RUNNING | SNDRV_TIMER_IFLG_START);
4291 ++ timeri->flags &= ~(SNDRV_TIMER_IFLG_RUNNING | SNDRV_TIMER_IFLG_START);
4292 + spin_unlock_irqrestore(&timer->lock, flags);
4293 + __end:
4294 + if (event != SNDRV_TIMER_EVENT_RESOLUTION)
4295 +@@ -555,7 +574,7 @@ int snd_timer_stop(struct snd_timer_instance *timeri)
4296 + unsigned long flags;
4297 + int err;
4298 +
4299 +- err = _snd_timer_stop(timeri, 0, SNDRV_TIMER_EVENT_STOP);
4300 ++ err = _snd_timer_stop(timeri, SNDRV_TIMER_EVENT_STOP);
4301 + if (err < 0)
4302 + return err;
4303 + timer = timeri->timer;
4304 +@@ -587,10 +606,15 @@ int snd_timer_continue(struct snd_timer_instance *timeri)
4305 + if (timer->card && timer->card->shutdown)
4306 + return -ENODEV;
4307 + spin_lock_irqsave(&timer->lock, flags);
4308 ++ if (timeri->flags & SNDRV_TIMER_IFLG_RUNNING) {
4309 ++ result = -EBUSY;
4310 ++ goto unlock;
4311 ++ }
4312 + if (!timeri->cticks)
4313 + timeri->cticks = 1;
4314 + timeri->pticks = 0;
4315 + result = snd_timer_start1(timer, timeri, timer->sticks);
4316 ++ unlock:
4317 + spin_unlock_irqrestore(&timer->lock, flags);
4318 + snd_timer_notify1(timeri, SNDRV_TIMER_EVENT_CONTINUE);
4319 + return result;
4320 +@@ -601,7 +625,7 @@ int snd_timer_continue(struct snd_timer_instance *timeri)
4321 + */
4322 + int snd_timer_pause(struct snd_timer_instance * timeri)
4323 + {
4324 +- return _snd_timer_stop(timeri, 0, SNDRV_TIMER_EVENT_PAUSE);
4325 ++ return _snd_timer_stop(timeri, SNDRV_TIMER_EVENT_PAUSE);
4326 + }
4327 +
4328 + /*
4329 +@@ -724,8 +748,8 @@ void snd_timer_interrupt(struct snd_timer * timer, unsigned long ticks_left)
4330 + ti->cticks = ti->ticks;
4331 + } else {
4332 + ti->flags &= ~SNDRV_TIMER_IFLG_RUNNING;
4333 +- if (--timer->running)
4334 +- list_del_init(&ti->active_list);
4335 ++ --timer->running;
4336 ++ list_del_init(&ti->active_list);
4337 + }
4338 + if ((timer->hw.flags & SNDRV_TIMER_HW_TASKLET) ||
4339 + (ti->flags & SNDRV_TIMER_IFLG_FAST))
4340 +@@ -1900,6 +1924,7 @@ static ssize_t snd_timer_user_read(struct file *file, char __user *buffer,
4341 + {
4342 + struct snd_timer_user *tu;
4343 + long result = 0, unit;
4344 ++ int qhead;
4345 + int err = 0;
4346 +
4347 + tu = file->private_data;
4348 +@@ -1911,7 +1936,7 @@ static ssize_t snd_timer_user_read(struct file *file, char __user *buffer,
4349 +
4350 + if ((file->f_flags & O_NONBLOCK) != 0 || result > 0) {
4351 + err = -EAGAIN;
4352 +- break;
4353 ++ goto _error;
4354 + }
4355 +
4356 + set_current_state(TASK_INTERRUPTIBLE);
4357 +@@ -1926,42 +1951,37 @@ static ssize_t snd_timer_user_read(struct file *file, char __user *buffer,
4358 +
4359 + if (tu->disconnected) {
4360 + err = -ENODEV;
4361 +- break;
4362 ++ goto _error;
4363 + }
4364 + if (signal_pending(current)) {
4365 + err = -ERESTARTSYS;
4366 +- break;
4367 ++ goto _error;
4368 + }
4369 + }
4370 +
4371 ++ qhead = tu->qhead++;
4372 ++ tu->qhead %= tu->queue_size;
4373 + spin_unlock_irq(&tu->qlock);
4374 +- if (err < 0)
4375 +- goto _error;
4376 +
4377 + if (tu->tread) {
4378 +- if (copy_to_user(buffer, &tu->tqueue[tu->qhead++],
4379 +- sizeof(struct snd_timer_tread))) {
4380 ++ if (copy_to_user(buffer, &tu->tqueue[qhead],
4381 ++ sizeof(struct snd_timer_tread)))
4382 + err = -EFAULT;
4383 +- goto _error;
4384 +- }
4385 + } else {
4386 +- if (copy_to_user(buffer, &tu->queue[tu->qhead++],
4387 +- sizeof(struct snd_timer_read))) {
4388 ++ if (copy_to_user(buffer, &tu->queue[qhead],
4389 ++ sizeof(struct snd_timer_read)))
4390 + err = -EFAULT;
4391 +- goto _error;
4392 +- }
4393 + }
4394 +
4395 +- tu->qhead %= tu->queue_size;
4396 +-
4397 +- result += unit;
4398 +- buffer += unit;
4399 +-
4400 + spin_lock_irq(&tu->qlock);
4401 + tu->qused--;
4402 ++ if (err < 0)
4403 ++ goto _error;
4404 ++ result += unit;
4405 ++ buffer += unit;
4406 + }
4407 +- spin_unlock_irq(&tu->qlock);
4408 + _error:
4409 ++ spin_unlock_irq(&tu->qlock);
4410 + return result > 0 ? result : err;
4411 + }
4412 +
4413 +diff --git a/sound/drivers/dummy.c b/sound/drivers/dummy.c
4414 +index 016e451ed506..a9f7a75702d2 100644
4415 +--- a/sound/drivers/dummy.c
4416 ++++ b/sound/drivers/dummy.c
4417 +@@ -109,6 +109,9 @@ struct dummy_timer_ops {
4418 + snd_pcm_uframes_t (*pointer)(struct snd_pcm_substream *);
4419 + };
4420 +
4421 ++#define get_dummy_ops(substream) \
4422 ++ (*(const struct dummy_timer_ops **)(substream)->runtime->private_data)
4423 ++
4424 + struct dummy_model {
4425 + const char *name;
4426 + int (*playback_constraints)(struct snd_pcm_runtime *runtime);
4427 +@@ -137,7 +140,6 @@ struct snd_dummy {
4428 + int iobox;
4429 + struct snd_kcontrol *cd_volume_ctl;
4430 + struct snd_kcontrol *cd_switch_ctl;
4431 +- const struct dummy_timer_ops *timer_ops;
4432 + };
4433 +
4434 + /*
4435 +@@ -231,6 +233,8 @@ static struct dummy_model *dummy_models[] = {
4436 + */
4437 +
4438 + struct dummy_systimer_pcm {
4439 ++ /* ops must be the first item */
4440 ++ const struct dummy_timer_ops *timer_ops;
4441 + spinlock_t lock;
4442 + struct timer_list timer;
4443 + unsigned long base_time;
4444 +@@ -366,6 +370,8 @@ static struct dummy_timer_ops dummy_systimer_ops = {
4445 + */
4446 +
4447 + struct dummy_hrtimer_pcm {
4448 ++ /* ops must be the first item */
4449 ++ const struct dummy_timer_ops *timer_ops;
4450 + ktime_t base_time;
4451 + ktime_t period_time;
4452 + atomic_t running;
4453 +@@ -492,31 +498,25 @@ static struct dummy_timer_ops dummy_hrtimer_ops = {
4454 +
4455 + static int dummy_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
4456 + {
4457 +- struct snd_dummy *dummy = snd_pcm_substream_chip(substream);
4458 +-
4459 + switch (cmd) {
4460 + case SNDRV_PCM_TRIGGER_START:
4461 + case SNDRV_PCM_TRIGGER_RESUME:
4462 +- return dummy->timer_ops->start(substream);
4463 ++ return get_dummy_ops(substream)->start(substream);
4464 + case SNDRV_PCM_TRIGGER_STOP:
4465 + case SNDRV_PCM_TRIGGER_SUSPEND:
4466 +- return dummy->timer_ops->stop(substream);
4467 ++ return get_dummy_ops(substream)->stop(substream);
4468 + }
4469 + return -EINVAL;
4470 + }
4471 +
4472 + static int dummy_pcm_prepare(struct snd_pcm_substream *substream)
4473 + {
4474 +- struct snd_dummy *dummy = snd_pcm_substream_chip(substream);
4475 +-
4476 +- return dummy->timer_ops->prepare(substream);
4477 ++ return get_dummy_ops(substream)->prepare(substream);
4478 + }
4479 +
4480 + static snd_pcm_uframes_t dummy_pcm_pointer(struct snd_pcm_substream *substream)
4481 + {
4482 +- struct snd_dummy *dummy = snd_pcm_substream_chip(substream);
4483 +-
4484 +- return dummy->timer_ops->pointer(substream);
4485 ++ return get_dummy_ops(substream)->pointer(substream);
4486 + }
4487 +
4488 + static struct snd_pcm_hardware dummy_pcm_hardware = {
4489 +@@ -562,17 +562,19 @@ static int dummy_pcm_open(struct snd_pcm_substream *substream)
4490 + struct snd_dummy *dummy = snd_pcm_substream_chip(substream);
4491 + struct dummy_model *model = dummy->model;
4492 + struct snd_pcm_runtime *runtime = substream->runtime;
4493 ++ const struct dummy_timer_ops *ops;
4494 + int err;
4495 +
4496 +- dummy->timer_ops = &dummy_systimer_ops;
4497 ++ ops = &dummy_systimer_ops;
4498 + #ifdef CONFIG_HIGH_RES_TIMERS
4499 + if (hrtimer)
4500 +- dummy->timer_ops = &dummy_hrtimer_ops;
4501 ++ ops = &dummy_hrtimer_ops;
4502 + #endif
4503 +
4504 +- err = dummy->timer_ops->create(substream);
4505 ++ err = ops->create(substream);
4506 + if (err < 0)
4507 + return err;
4508 ++ get_dummy_ops(substream) = ops;
4509 +
4510 + runtime->hw = dummy->pcm_hw;
4511 + if (substream->pcm->device & 1) {
4512 +@@ -594,7 +596,7 @@ static int dummy_pcm_open(struct snd_pcm_substream *substream)
4513 + err = model->capture_constraints(substream->runtime);
4514 + }
4515 + if (err < 0) {
4516 +- dummy->timer_ops->free(substream);
4517 ++ get_dummy_ops(substream)->free(substream);
4518 + return err;
4519 + }
4520 + return 0;
4521 +@@ -602,8 +604,7 @@ static int dummy_pcm_open(struct snd_pcm_substream *substream)
4522 +
4523 + static int dummy_pcm_close(struct snd_pcm_substream *substream)
4524 + {
4525 +- struct snd_dummy *dummy = snd_pcm_substream_chip(substream);
4526 +- dummy->timer_ops->free(substream);
4527 ++ get_dummy_ops(substream)->free(substream);
4528 + return 0;
4529 + }
4530 +
4531 +diff --git a/sound/firewire/bebob/bebob_stream.c b/sound/firewire/bebob/bebob_stream.c
4532 +index 926e5dcbb66a..5022c9b97ddf 100644
4533 +--- a/sound/firewire/bebob/bebob_stream.c
4534 ++++ b/sound/firewire/bebob/bebob_stream.c
4535 +@@ -47,14 +47,16 @@ static const unsigned int bridgeco_freq_table[] = {
4536 + [6] = 0x07,
4537 + };
4538 +
4539 +-static unsigned int
4540 +-get_formation_index(unsigned int rate)
4541 ++static int
4542 ++get_formation_index(unsigned int rate, unsigned int *index)
4543 + {
4544 + unsigned int i;
4545 +
4546 + for (i = 0; i < ARRAY_SIZE(snd_bebob_rate_table); i++) {
4547 +- if (snd_bebob_rate_table[i] == rate)
4548 +- return i;
4549 ++ if (snd_bebob_rate_table[i] == rate) {
4550 ++ *index = i;
4551 ++ return 0;
4552 ++ }
4553 + }
4554 + return -EINVAL;
4555 + }
4556 +@@ -425,7 +427,9 @@ make_both_connections(struct snd_bebob *bebob, unsigned int rate)
4557 + goto end;
4558 +
4559 + /* confirm params for both streams */
4560 +- index = get_formation_index(rate);
4561 ++ err = get_formation_index(rate, &index);
4562 ++ if (err < 0)
4563 ++ goto end;
4564 + pcm_channels = bebob->tx_stream_formations[index].pcm;
4565 + midi_channels = bebob->tx_stream_formations[index].midi;
4566 + err = amdtp_am824_set_parameters(&bebob->tx_stream, rate,
4567 +diff --git a/sound/isa/Kconfig b/sound/isa/Kconfig
4568 +index 0216475fc759..37adcc6cbe6b 100644
4569 +--- a/sound/isa/Kconfig
4570 ++++ b/sound/isa/Kconfig
4571 +@@ -3,6 +3,7 @@
4572 + config SND_WSS_LIB
4573 + tristate
4574 + select SND_PCM
4575 ++ select SND_TIMER
4576 +
4577 + config SND_SB_COMMON
4578 + tristate
4579 +@@ -42,6 +43,7 @@ config SND_AD1816A
4580 + select SND_OPL3_LIB
4581 + select SND_MPU401_UART
4582 + select SND_PCM
4583 ++ select SND_TIMER
4584 + help
4585 + Say Y here to include support for Analog Devices SoundPort
4586 + AD1816A or compatible sound chips.
4587 +@@ -209,6 +211,7 @@ config SND_GUSCLASSIC
4588 + tristate "Gravis UltraSound Classic"
4589 + select SND_RAWMIDI
4590 + select SND_PCM
4591 ++ select SND_TIMER
4592 + help
4593 + Say Y here to include support for Gravis UltraSound Classic
4594 + soundcards.
4595 +@@ -221,6 +224,7 @@ config SND_GUSEXTREME
4596 + select SND_OPL3_LIB
4597 + select SND_MPU401_UART
4598 + select SND_PCM
4599 ++ select SND_TIMER
4600 + help
4601 + Say Y here to include support for Gravis UltraSound Extreme
4602 + soundcards.
4603 +diff --git a/sound/pci/Kconfig b/sound/pci/Kconfig
4604 +index 656ce39bddbc..8f6594a7d37f 100644
4605 +--- a/sound/pci/Kconfig
4606 ++++ b/sound/pci/Kconfig
4607 +@@ -155,6 +155,7 @@ config SND_AZT3328
4608 + select SND_PCM
4609 + select SND_RAWMIDI
4610 + select SND_AC97_CODEC
4611 ++ select SND_TIMER
4612 + depends on ZONE_DMA
4613 + help
4614 + Say Y here to include support for Aztech AZF3328 (PCI168)
4615 +@@ -463,6 +464,7 @@ config SND_EMU10K1
4616 + select SND_HWDEP
4617 + select SND_RAWMIDI
4618 + select SND_AC97_CODEC
4619 ++ select SND_TIMER
4620 + depends on ZONE_DMA
4621 + help
4622 + Say Y to include support for Sound Blaster PCI 512, Live!,
4623 +@@ -889,6 +891,7 @@ config SND_YMFPCI
4624 + select SND_OPL3_LIB
4625 + select SND_MPU401_UART
4626 + select SND_AC97_CODEC
4627 ++ select SND_TIMER
4628 + help
4629 + Say Y here to include support for Yamaha PCI audio chips -
4630 + YMF724, YMF724F, YMF740, YMF740C, YMF744, YMF754.
4631 +diff --git a/sound/pci/hda/hda_generic.c b/sound/pci/hda/hda_generic.c
4632 +index c6e8a651cea1..5c4fa8eba1d0 100644
4633 +--- a/sound/pci/hda/hda_generic.c
4634 ++++ b/sound/pci/hda/hda_generic.c
4635 +@@ -771,9 +771,6 @@ static void activate_amp(struct hda_codec *codec, hda_nid_t nid, int dir,
4636 + unsigned int caps;
4637 + unsigned int mask, val;
4638 +
4639 +- if (!enable && is_active_nid(codec, nid, dir, idx_to_check))
4640 +- return;
4641 +-
4642 + caps = query_amp_caps(codec, nid, dir);
4643 + val = get_amp_val_to_activate(codec, nid, dir, caps, enable);
4644 + mask = get_amp_mask_to_modify(codec, nid, dir, idx_to_check, caps);
4645 +@@ -784,12 +781,22 @@ static void activate_amp(struct hda_codec *codec, hda_nid_t nid, int dir,
4646 + update_amp(codec, nid, dir, idx, mask, val);
4647 + }
4648 +
4649 ++static void check_and_activate_amp(struct hda_codec *codec, hda_nid_t nid,
4650 ++ int dir, int idx, int idx_to_check,
4651 ++ bool enable)
4652 ++{
4653 ++ /* check whether the given amp is still used by others */
4654 ++ if (!enable && is_active_nid(codec, nid, dir, idx_to_check))
4655 ++ return;
4656 ++ activate_amp(codec, nid, dir, idx, idx_to_check, enable);
4657 ++}
4658 ++
4659 + static void activate_amp_out(struct hda_codec *codec, struct nid_path *path,
4660 + int i, bool enable)
4661 + {
4662 + hda_nid_t nid = path->path[i];
4663 + init_amp(codec, nid, HDA_OUTPUT, 0);
4664 +- activate_amp(codec, nid, HDA_OUTPUT, 0, 0, enable);
4665 ++ check_and_activate_amp(codec, nid, HDA_OUTPUT, 0, 0, enable);
4666 + }
4667 +
4668 + static void activate_amp_in(struct hda_codec *codec, struct nid_path *path,
4669 +@@ -817,9 +824,16 @@ static void activate_amp_in(struct hda_codec *codec, struct nid_path *path,
4670 + * when aa-mixer is available, we need to enable the path as well
4671 + */
4672 + for (n = 0; n < nums; n++) {
4673 +- if (n != idx && (!add_aamix || conn[n] != spec->mixer_merge_nid))
4674 +- continue;
4675 +- activate_amp(codec, nid, HDA_INPUT, n, idx, enable);
4676 ++ if (n != idx) {
4677 ++ if (conn[n] != spec->mixer_merge_nid)
4678 ++ continue;
4679 ++ /* when aamix is disabled, force to off */
4680 ++ if (!add_aamix) {
4681 ++ activate_amp(codec, nid, HDA_INPUT, n, n, false);
4682 ++ continue;
4683 ++ }
4684 ++ }
4685 ++ check_and_activate_amp(codec, nid, HDA_INPUT, n, idx, enable);
4686 + }
4687 + }
4688 +
4689 +@@ -1580,6 +1594,12 @@ static bool map_singles(struct hda_codec *codec, int outs,
4690 + return found;
4691 + }
4692 +
4693 ++static inline bool has_aamix_out_paths(struct hda_gen_spec *spec)
4694 ++{
4695 ++ return spec->aamix_out_paths[0] || spec->aamix_out_paths[1] ||
4696 ++ spec->aamix_out_paths[2];
4697 ++}
4698 ++
4699 + /* create a new path including aamix if available, and return its index */
4700 + static int check_aamix_out_path(struct hda_codec *codec, int path_idx)
4701 + {
4702 +@@ -2422,25 +2442,51 @@ static void update_aamix_paths(struct hda_codec *codec, bool do_mix,
4703 + }
4704 + }
4705 +
4706 ++/* re-initialize the output paths; only called from loopback_mixing_put() */
4707 ++static void update_output_paths(struct hda_codec *codec, int num_outs,
4708 ++ const int *paths)
4709 ++{
4710 ++ struct hda_gen_spec *spec = codec->spec;
4711 ++ struct nid_path *path;
4712 ++ int i;
4713 ++
4714 ++ for (i = 0; i < num_outs; i++) {
4715 ++ path = snd_hda_get_path_from_idx(codec, paths[i]);
4716 ++ if (path)
4717 ++ snd_hda_activate_path(codec, path, path->active,
4718 ++ spec->aamix_mode);
4719 ++ }
4720 ++}
4721 ++
4722 + static int loopback_mixing_put(struct snd_kcontrol *kcontrol,
4723 + struct snd_ctl_elem_value *ucontrol)
4724 + {
4725 + struct hda_codec *codec = snd_kcontrol_chip(kcontrol);
4726 + struct hda_gen_spec *spec = codec->spec;
4727 ++ const struct auto_pin_cfg *cfg = &spec->autocfg;
4728 + unsigned int val = ucontrol->value.enumerated.item[0];
4729 +
4730 + if (val == spec->aamix_mode)
4731 + return 0;
4732 + spec->aamix_mode = val;
4733 +- update_aamix_paths(codec, val, spec->out_paths[0],
4734 +- spec->aamix_out_paths[0],
4735 +- spec->autocfg.line_out_type);
4736 +- update_aamix_paths(codec, val, spec->hp_paths[0],
4737 +- spec->aamix_out_paths[1],
4738 +- AUTO_PIN_HP_OUT);
4739 +- update_aamix_paths(codec, val, spec->speaker_paths[0],
4740 +- spec->aamix_out_paths[2],
4741 +- AUTO_PIN_SPEAKER_OUT);
4742 ++ if (has_aamix_out_paths(spec)) {
4743 ++ update_aamix_paths(codec, val, spec->out_paths[0],
4744 ++ spec->aamix_out_paths[0],
4745 ++ cfg->line_out_type);
4746 ++ update_aamix_paths(codec, val, spec->hp_paths[0],
4747 ++ spec->aamix_out_paths[1],
4748 ++ AUTO_PIN_HP_OUT);
4749 ++ update_aamix_paths(codec, val, spec->speaker_paths[0],
4750 ++ spec->aamix_out_paths[2],
4751 ++ AUTO_PIN_SPEAKER_OUT);
4752 ++ } else {
4753 ++ update_output_paths(codec, cfg->line_outs, spec->out_paths);
4754 ++ if (cfg->line_out_type != AUTO_PIN_HP_OUT)
4755 ++ update_output_paths(codec, cfg->hp_outs, spec->hp_paths);
4756 ++ if (cfg->line_out_type != AUTO_PIN_SPEAKER_OUT)
4757 ++ update_output_paths(codec, cfg->speaker_outs,
4758 ++ spec->speaker_paths);
4759 ++ }
4760 + return 1;
4761 + }
4762 +
4763 +@@ -2458,12 +2504,13 @@ static int create_loopback_mixing_ctl(struct hda_codec *codec)
4764 +
4765 + if (!spec->mixer_nid)
4766 + return 0;
4767 +- if (!(spec->aamix_out_paths[0] || spec->aamix_out_paths[1] ||
4768 +- spec->aamix_out_paths[2]))
4769 +- return 0;
4770 + if (!snd_hda_gen_add_kctl(spec, NULL, &loopback_mixing_enum))
4771 + return -ENOMEM;
4772 + spec->have_aamix_ctl = 1;
4773 ++ /* if no explicit aamix path is present (e.g. for Realtek codecs),
4774 ++ * enable aamix as default -- just for compatibility
4775 ++ */
4776 ++ spec->aamix_mode = !has_aamix_out_paths(spec);
4777 + return 0;
4778 + }
4779 +
4780 +@@ -3998,9 +4045,9 @@ static void pin_power_callback(struct hda_codec *codec,
4781 + struct hda_jack_callback *jack,
4782 + bool on)
4783 + {
4784 +- if (jack && jack->tbl->nid)
4785 ++ if (jack && jack->nid)
4786 + sync_power_state_change(codec,
4787 +- set_pin_power_jack(codec, jack->tbl->nid, on));
4788 ++ set_pin_power_jack(codec, jack->nid, on));
4789 + }
4790 +
4791 + /* callback only doing power up -- called at first */
4792 +@@ -5664,6 +5711,8 @@ static void init_aamix_paths(struct hda_codec *codec)
4793 +
4794 + if (!spec->have_aamix_ctl)
4795 + return;
4796 ++ if (!has_aamix_out_paths(spec))
4797 ++ return;
4798 + update_aamix_paths(codec, spec->aamix_mode, spec->out_paths[0],
4799 + spec->aamix_out_paths[0],
4800 + spec->autocfg.line_out_type);
4801 +diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
4802 +index 614baff1f5d7..02a86ba5ba22 100644
4803 +--- a/sound/pci/hda/hda_intel.c
4804 ++++ b/sound/pci/hda/hda_intel.c
4805 +@@ -90,6 +90,8 @@ enum {
4806 + #define NVIDIA_HDA_ENABLE_COHBIT 0x01
4807 +
4808 + /* Defines for Intel SCH HDA snoop control */
4809 ++#define INTEL_HDA_CGCTL 0x48
4810 ++#define INTEL_HDA_CGCTL_MISCBDCGE (0x1 << 6)
4811 + #define INTEL_SCH_HDA_DEVC 0x78
4812 + #define INTEL_SCH_HDA_DEVC_NOSNOOP (0x1<<11)
4813 +
4814 +@@ -528,10 +530,21 @@ static void hda_intel_init_chip(struct azx *chip, bool full_reset)
4815 + {
4816 + struct hdac_bus *bus = azx_bus(chip);
4817 + struct pci_dev *pci = chip->pci;
4818 ++ u32 val;
4819 +
4820 + if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL)
4821 + snd_hdac_set_codec_wakeup(bus, true);
4822 ++ if (IS_BROXTON(pci)) {
4823 ++ pci_read_config_dword(pci, INTEL_HDA_CGCTL, &val);
4824 ++ val = val & ~INTEL_HDA_CGCTL_MISCBDCGE;
4825 ++ pci_write_config_dword(pci, INTEL_HDA_CGCTL, val);
4826 ++ }
4827 + azx_init_chip(chip, full_reset);
4828 ++ if (IS_BROXTON(pci)) {
4829 ++ pci_read_config_dword(pci, INTEL_HDA_CGCTL, &val);
4830 ++ val = val | INTEL_HDA_CGCTL_MISCBDCGE;
4831 ++ pci_write_config_dword(pci, INTEL_HDA_CGCTL, val);
4832 ++ }
4833 + if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL)
4834 + snd_hdac_set_codec_wakeup(bus, false);
4835 +
4836 +diff --git a/sound/pci/hda/hda_jack.c b/sound/pci/hda/hda_jack.c
4837 +index c945e257d368..a33234e04d4f 100644
4838 +--- a/sound/pci/hda/hda_jack.c
4839 ++++ b/sound/pci/hda/hda_jack.c
4840 +@@ -259,7 +259,7 @@ snd_hda_jack_detect_enable_callback(struct hda_codec *codec, hda_nid_t nid,
4841 + if (!callback)
4842 + return ERR_PTR(-ENOMEM);
4843 + callback->func = func;
4844 +- callback->tbl = jack;
4845 ++ callback->nid = jack->nid;
4846 + callback->next = jack->callback;
4847 + jack->callback = callback;
4848 + }
4849 +diff --git a/sound/pci/hda/hda_jack.h b/sound/pci/hda/hda_jack.h
4850 +index 858708a044f5..e9814c0168ea 100644
4851 +--- a/sound/pci/hda/hda_jack.h
4852 ++++ b/sound/pci/hda/hda_jack.h
4853 +@@ -21,7 +21,7 @@ struct hda_jack_callback;
4854 + typedef void (*hda_jack_callback_fn) (struct hda_codec *, struct hda_jack_callback *);
4855 +
4856 + struct hda_jack_callback {
4857 +- struct hda_jack_tbl *tbl;
4858 ++ hda_nid_t nid;
4859 + hda_jack_callback_fn func;
4860 + unsigned int private_data; /* arbitrary data */
4861 + struct hda_jack_callback *next;
4862 +diff --git a/sound/pci/hda/patch_ca0132.c b/sound/pci/hda/patch_ca0132.c
4863 +index 4ef2259f88ca..9ceb2bc36e68 100644
4864 +--- a/sound/pci/hda/patch_ca0132.c
4865 ++++ b/sound/pci/hda/patch_ca0132.c
4866 +@@ -4427,13 +4427,16 @@ static void ca0132_process_dsp_response(struct hda_codec *codec,
4867 + static void hp_callback(struct hda_codec *codec, struct hda_jack_callback *cb)
4868 + {
4869 + struct ca0132_spec *spec = codec->spec;
4870 ++ struct hda_jack_tbl *tbl;
4871 +
4872 + /* Delay enabling the HP amp, to let the mic-detection
4873 + * state machine run.
4874 + */
4875 + cancel_delayed_work_sync(&spec->unsol_hp_work);
4876 + schedule_delayed_work(&spec->unsol_hp_work, msecs_to_jiffies(500));
4877 +- cb->tbl->block_report = 1;
4878 ++ tbl = snd_hda_jack_tbl_get(codec, cb->nid);
4879 ++ if (tbl)
4880 ++ tbl->block_report = 1;
4881 + }
4882 +
4883 + static void amic_callback(struct hda_codec *codec, struct hda_jack_callback *cb)
4884 +diff --git a/sound/pci/hda/patch_cirrus.c b/sound/pci/hda/patch_cirrus.c
4885 +index a12ae8ac0914..c1c855a6c0af 100644
4886 +--- a/sound/pci/hda/patch_cirrus.c
4887 ++++ b/sound/pci/hda/patch_cirrus.c
4888 +@@ -614,6 +614,7 @@ enum {
4889 + CS4208_MAC_AUTO,
4890 + CS4208_MBA6,
4891 + CS4208_MBP11,
4892 ++ CS4208_MACMINI,
4893 + CS4208_GPIO0,
4894 + };
4895 +
4896 +@@ -621,6 +622,7 @@ static const struct hda_model_fixup cs4208_models[] = {
4897 + { .id = CS4208_GPIO0, .name = "gpio0" },
4898 + { .id = CS4208_MBA6, .name = "mba6" },
4899 + { .id = CS4208_MBP11, .name = "mbp11" },
4900 ++ { .id = CS4208_MACMINI, .name = "macmini" },
4901 + {}
4902 + };
4903 +
4904 +@@ -632,6 +634,7 @@ static const struct snd_pci_quirk cs4208_fixup_tbl[] = {
4905 + /* codec SSID matching */
4906 + static const struct snd_pci_quirk cs4208_mac_fixup_tbl[] = {
4907 + SND_PCI_QUIRK(0x106b, 0x5e00, "MacBookPro 11,2", CS4208_MBP11),
4908 ++ SND_PCI_QUIRK(0x106b, 0x6c00, "MacMini 7,1", CS4208_MACMINI),
4909 + SND_PCI_QUIRK(0x106b, 0x7100, "MacBookAir 6,1", CS4208_MBA6),
4910 + SND_PCI_QUIRK(0x106b, 0x7200, "MacBookAir 6,2", CS4208_MBA6),
4911 + SND_PCI_QUIRK(0x106b, 0x7b00, "MacBookPro 12,1", CS4208_MBP11),
4912 +@@ -666,6 +669,24 @@ static void cs4208_fixup_mac(struct hda_codec *codec,
4913 + snd_hda_apply_fixup(codec, action);
4914 + }
4915 +
4916 ++/* MacMini 7,1 has the inverted jack detection */
4917 ++static void cs4208_fixup_macmini(struct hda_codec *codec,
4918 ++ const struct hda_fixup *fix, int action)
4919 ++{
4920 ++ static const struct hda_pintbl pincfgs[] = {
4921 ++ { 0x18, 0x00ab9150 }, /* mic (audio-in) jack: disable detect */
4922 ++ { 0x21, 0x004be140 }, /* SPDIF: disable detect */
4923 ++ { }
4924 ++ };
4925 ++
4926 ++ if (action == HDA_FIXUP_ACT_PRE_PROBE) {
4927 ++ /* HP pin (0x10) has an inverted detection */
4928 ++ codec->inv_jack_detect = 1;
4929 ++ /* disable the bogus Mic and SPDIF jack detections */
4930 ++ snd_hda_apply_pincfgs(codec, pincfgs);
4931 ++ }
4932 ++}
4933 ++
4934 + static int cs4208_spdif_sw_put(struct snd_kcontrol *kcontrol,
4935 + struct snd_ctl_elem_value *ucontrol)
4936 + {
4937 +@@ -709,6 +730,12 @@ static const struct hda_fixup cs4208_fixups[] = {
4938 + .chained = true,
4939 + .chain_id = CS4208_GPIO0,
4940 + },
4941 ++ [CS4208_MACMINI] = {
4942 ++ .type = HDA_FIXUP_FUNC,
4943 ++ .v.func = cs4208_fixup_macmini,
4944 ++ .chained = true,
4945 ++ .chain_id = CS4208_GPIO0,
4946 ++ },
4947 + [CS4208_GPIO0] = {
4948 + .type = HDA_FIXUP_FUNC,
4949 + .v.func = cs4208_fixup_gpio0,
4950 +diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
4951 +index 4b6fb668c91c..70c945603379 100644
4952 +--- a/sound/pci/hda/patch_hdmi.c
4953 ++++ b/sound/pci/hda/patch_hdmi.c
4954 +@@ -438,7 +438,8 @@ static int hdmi_eld_ctl_get(struct snd_kcontrol *kcontrol,
4955 + eld = &per_pin->sink_eld;
4956 +
4957 + mutex_lock(&per_pin->lock);
4958 +- if (eld->eld_size > ARRAY_SIZE(ucontrol->value.bytes.data)) {
4959 ++ if (eld->eld_size > ARRAY_SIZE(ucontrol->value.bytes.data) ||
4960 ++ eld->eld_size > ELD_MAX_SIZE) {
4961 + mutex_unlock(&per_pin->lock);
4962 + snd_BUG();
4963 + return -EINVAL;
4964 +@@ -1183,7 +1184,7 @@ static void check_presence_and_report(struct hda_codec *codec, hda_nid_t nid)
4965 + static void jack_callback(struct hda_codec *codec,
4966 + struct hda_jack_callback *jack)
4967 + {
4968 +- check_presence_and_report(codec, jack->tbl->nid);
4969 ++ check_presence_and_report(codec, jack->nid);
4970 + }
4971 +
4972 + static void hdmi_intrinsic_event(struct hda_codec *codec, unsigned int res)
4973 +diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
4974 +index 33753244f48f..efd4980cffb8 100644
4975 +--- a/sound/pci/hda/patch_realtek.c
4976 ++++ b/sound/pci/hda/patch_realtek.c
4977 +@@ -282,7 +282,7 @@ static void alc_update_knob_master(struct hda_codec *codec,
4978 + uctl = kzalloc(sizeof(*uctl), GFP_KERNEL);
4979 + if (!uctl)
4980 + return;
4981 +- val = snd_hda_codec_read(codec, jack->tbl->nid, 0,
4982 ++ val = snd_hda_codec_read(codec, jack->nid, 0,
4983 + AC_VERB_GET_VOLUME_KNOB_CONTROL, 0);
4984 + val &= HDA_AMP_VOLMASK;
4985 + uctl->value.integer.value[0] = val;
4986 +@@ -327,6 +327,7 @@ static void alc_fill_eapd_coef(struct hda_codec *codec)
4987 + case 0x10ec0292:
4988 + alc_update_coef_idx(codec, 0x4, 1<<15, 0);
4989 + break;
4990 ++ case 0x10ec0225:
4991 + case 0x10ec0233:
4992 + case 0x10ec0255:
4993 + case 0x10ec0256:
4994 +@@ -900,6 +901,7 @@ static struct alc_codec_rename_pci_table rename_pci_tbl[] = {
4995 + { 0x10ec0899, 0x1028, 0, "ALC3861" },
4996 + { 0x10ec0298, 0x1028, 0, "ALC3266" },
4997 + { 0x10ec0256, 0x1028, 0, "ALC3246" },
4998 ++ { 0x10ec0225, 0x1028, 0, "ALC3253" },
4999 + { 0x10ec0670, 0x1025, 0, "ALC669X" },
5000 + { 0x10ec0676, 0x1025, 0, "ALC679X" },
5001 + { 0x10ec0282, 0x1043, 0, "ALC3229" },
5002 +@@ -1785,7 +1787,6 @@ enum {
5003 + ALC882_FIXUP_NO_PRIMARY_HP,
5004 + ALC887_FIXUP_ASUS_BASS,
5005 + ALC887_FIXUP_BASS_CHMAP,
5006 +- ALC882_FIXUP_DISABLE_AAMIX,
5007 + };
5008 +
5009 + static void alc889_fixup_coef(struct hda_codec *codec,
5010 +@@ -1947,8 +1948,6 @@ static void alc882_fixup_no_primary_hp(struct hda_codec *codec,
5011 +
5012 + static void alc_fixup_bass_chmap(struct hda_codec *codec,
5013 + const struct hda_fixup *fix, int action);
5014 +-static void alc_fixup_disable_aamix(struct hda_codec *codec,
5015 +- const struct hda_fixup *fix, int action);
5016 +
5017 + static const struct hda_fixup alc882_fixups[] = {
5018 + [ALC882_FIXUP_ABIT_AW9D_MAX] = {
5019 +@@ -2186,10 +2185,6 @@ static const struct hda_fixup alc882_fixups[] = {
5020 + .type = HDA_FIXUP_FUNC,
5021 + .v.func = alc_fixup_bass_chmap,
5022 + },
5023 +- [ALC882_FIXUP_DISABLE_AAMIX] = {
5024 +- .type = HDA_FIXUP_FUNC,
5025 +- .v.func = alc_fixup_disable_aamix,
5026 +- },
5027 + };
5028 +
5029 + static const struct snd_pci_quirk alc882_fixup_tbl[] = {
5030 +@@ -2228,6 +2223,7 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
5031 + SND_PCI_QUIRK(0x104d, 0x9047, "Sony Vaio TT", ALC889_FIXUP_VAIO_TT),
5032 + SND_PCI_QUIRK(0x104d, 0x905a, "Sony Vaio Z", ALC882_FIXUP_NO_PRIMARY_HP),
5033 + SND_PCI_QUIRK(0x104d, 0x9043, "Sony Vaio VGC-LN51JGB", ALC882_FIXUP_NO_PRIMARY_HP),
5034 ++ SND_PCI_QUIRK(0x104d, 0x9044, "Sony VAIO AiO", ALC882_FIXUP_NO_PRIMARY_HP),
5035 +
5036 + /* All Apple entries are in codec SSIDs */
5037 + SND_PCI_QUIRK(0x106b, 0x00a0, "MacBookPro 3,1", ALC889_FIXUP_MBP_VREF),
5038 +@@ -2257,7 +2253,6 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
5039 + SND_PCI_QUIRK(0x1462, 0x7350, "MSI-7350", ALC889_FIXUP_CD),
5040 + SND_PCI_QUIRK_VENDOR(0x1462, "MSI", ALC882_FIXUP_GPIO3),
5041 + SND_PCI_QUIRK(0x1458, 0xa002, "Gigabyte EP45-DS3/Z87X-UD3H", ALC889_FIXUP_FRONT_HP_NO_PRESENCE),
5042 +- SND_PCI_QUIRK(0x1458, 0xa182, "Gigabyte Z170X-UD3", ALC882_FIXUP_DISABLE_AAMIX),
5043 + SND_PCI_QUIRK(0x147b, 0x107a, "Abit AW9D-MAX", ALC882_FIXUP_ABIT_AW9D_MAX),
5044 + SND_PCI_QUIRK_VENDOR(0x1558, "Clevo laptop", ALC882_FIXUP_EAPD),
5045 + SND_PCI_QUIRK(0x161f, 0x2054, "Medion laptop", ALC883_FIXUP_EAPD),
5046 +@@ -2651,6 +2646,7 @@ enum {
5047 + ALC269_TYPE_ALC298,
5048 + ALC269_TYPE_ALC255,
5049 + ALC269_TYPE_ALC256,
5050 ++ ALC269_TYPE_ALC225,
5051 + };
5052 +
5053 + /*
5054 +@@ -2680,6 +2676,7 @@ static int alc269_parse_auto_config(struct hda_codec *codec)
5055 + case ALC269_TYPE_ALC298:
5056 + case ALC269_TYPE_ALC255:
5057 + case ALC269_TYPE_ALC256:
5058 ++ case ALC269_TYPE_ALC225:
5059 + ssids = alc269_ssids;
5060 + break;
5061 + default:
5062 +@@ -3658,6 +3655,16 @@ static void alc_headset_mode_unplugged(struct hda_codec *codec)
5063 + WRITE_COEF(0xb7, 0x802b),
5064 + {}
5065 + };
5066 ++ static struct coef_fw coef0225[] = {
5067 ++ UPDATE_COEF(0x4a, 1<<8, 0),
5068 ++ UPDATE_COEFEX(0x57, 0x05, 1<<14, 0),
5069 ++ UPDATE_COEF(0x63, 3<<14, 3<<14),
5070 ++ UPDATE_COEF(0x4a, 3<<4, 2<<4),
5071 ++ UPDATE_COEF(0x4a, 3<<10, 3<<10),
5072 ++ UPDATE_COEF(0x45, 0x3f<<10, 0x34<<10),
5073 ++ UPDATE_COEF(0x4a, 3<<10, 0),
5074 ++ {}
5075 ++ };
5076 +
5077 + switch (codec->core.vendor_id) {
5078 + case 0x10ec0255:
5079 +@@ -3682,6 +3689,9 @@ static void alc_headset_mode_unplugged(struct hda_codec *codec)
5080 + case 0x10ec0668:
5081 + alc_process_coef_fw(codec, coef0668);
5082 + break;
5083 ++ case 0x10ec0225:
5084 ++ alc_process_coef_fw(codec, coef0225);
5085 ++ break;
5086 + }
5087 + codec_dbg(codec, "Headset jack set to unplugged mode.\n");
5088 + }
5089 +@@ -3727,6 +3737,13 @@ static void alc_headset_mode_mic_in(struct hda_codec *codec, hda_nid_t hp_pin,
5090 + UPDATE_COEF(0xc3, 0, 1<<12),
5091 + {}
5092 + };
5093 ++ static struct coef_fw coef0225[] = {
5094 ++ UPDATE_COEFEX(0x57, 0x05, 1<<14, 1<<14),
5095 ++ UPDATE_COEF(0x4a, 3<<4, 2<<4),
5096 ++ UPDATE_COEF(0x63, 3<<14, 0),
5097 ++ {}
5098 ++ };
5099 ++
5100 +
5101 + switch (codec->core.vendor_id) {
5102 + case 0x10ec0255:
5103 +@@ -3772,6 +3789,12 @@ static void alc_headset_mode_mic_in(struct hda_codec *codec, hda_nid_t hp_pin,
5104 + alc_process_coef_fw(codec, coef0688);
5105 + snd_hda_set_pin_ctl_cache(codec, mic_pin, PIN_VREF50);
5106 + break;
5107 ++ case 0x10ec0225:
5108 ++ alc_update_coef_idx(codec, 0x45, 0x3f<<10, 0x31<<10);
5109 ++ snd_hda_set_pin_ctl_cache(codec, hp_pin, 0);
5110 ++ alc_process_coef_fw(codec, coef0225);
5111 ++ snd_hda_set_pin_ctl_cache(codec, mic_pin, PIN_VREF50);
5112 ++ break;
5113 + }
5114 + codec_dbg(codec, "Headset jack set to mic-in mode.\n");
5115 + }
5116 +@@ -3884,6 +3907,13 @@ static void alc_headset_mode_ctia(struct hda_codec *codec)
5117 + WRITE_COEF(0xc3, 0x0000),
5118 + {}
5119 + };
5120 ++ static struct coef_fw coef0225[] = {
5121 ++ UPDATE_COEF(0x45, 0x3f<<10, 0x35<<10),
5122 ++ UPDATE_COEF(0x49, 1<<8, 1<<8),
5123 ++ UPDATE_COEF(0x4a, 7<<6, 7<<6),
5124 ++ UPDATE_COEF(0x4a, 3<<4, 3<<4),
5125 ++ {}
5126 ++ };
5127 +
5128 + switch (codec->core.vendor_id) {
5129 + case 0x10ec0255:
5130 +@@ -3912,6 +3942,9 @@ static void alc_headset_mode_ctia(struct hda_codec *codec)
5131 + case 0x10ec0668:
5132 + alc_process_coef_fw(codec, coef0688);
5133 + break;
5134 ++ case 0x10ec0225:
5135 ++ alc_process_coef_fw(codec, coef0225);
5136 ++ break;
5137 + }
5138 + codec_dbg(codec, "Headset jack set to iPhone-style headset mode.\n");
5139 + }
5140 +@@ -3955,6 +3988,13 @@ static void alc_headset_mode_omtp(struct hda_codec *codec)
5141 + WRITE_COEF(0xc3, 0x0000),
5142 + {}
5143 + };
5144 ++ static struct coef_fw coef0225[] = {
5145 ++ UPDATE_COEF(0x45, 0x3f<<10, 0x39<<10),
5146 ++ UPDATE_COEF(0x49, 1<<8, 1<<8),
5147 ++ UPDATE_COEF(0x4a, 7<<6, 7<<6),
5148 ++ UPDATE_COEF(0x4a, 3<<4, 3<<4),
5149 ++ {}
5150 ++ };
5151 +
5152 + switch (codec->core.vendor_id) {
5153 + case 0x10ec0255:
5154 +@@ -3983,6 +4023,9 @@ static void alc_headset_mode_omtp(struct hda_codec *codec)
5155 + case 0x10ec0668:
5156 + alc_process_coef_fw(codec, coef0688);
5157 + break;
5158 ++ case 0x10ec0225:
5159 ++ alc_process_coef_fw(codec, coef0225);
5160 ++ break;
5161 + }
5162 + codec_dbg(codec, "Headset jack set to Nokia-style headset mode.\n");
5163 + }
5164 +@@ -4014,6 +4057,11 @@ static void alc_determine_headset_type(struct hda_codec *codec)
5165 + WRITE_COEF(0xc3, 0x0c00),
5166 + {}
5167 + };
5168 ++ static struct coef_fw coef0225[] = {
5169 ++ UPDATE_COEF(0x45, 0x3f<<10, 0x34<<10),
5170 ++ UPDATE_COEF(0x49, 1<<8, 1<<8),
5171 ++ {}
5172 ++ };
5173 +
5174 + switch (codec->core.vendor_id) {
5175 + case 0x10ec0255:
5176 +@@ -4058,6 +4106,12 @@ static void alc_determine_headset_type(struct hda_codec *codec)
5177 + val = alc_read_coef_idx(codec, 0xbe);
5178 + is_ctia = (val & 0x1c02) == 0x1c02;
5179 + break;
5180 ++ case 0x10ec0225:
5181 ++ alc_process_coef_fw(codec, coef0225);
5182 ++ msleep(800);
5183 ++ val = alc_read_coef_idx(codec, 0x46);
5184 ++ is_ctia = (val & 0x00f0) == 0x00f0;
5185 ++ break;
5186 + }
5187 +
5188 + codec_dbg(codec, "Headset jack detected iPhone-style headset: %s\n",
5189 +@@ -5560,6 +5614,9 @@ static const struct hda_model_fixup alc269_fixup_models[] = {
5190 + {.id = ALC292_FIXUP_TPT440, .name = "tpt440"},
5191 + {}
5192 + };
5193 ++#define ALC225_STANDARD_PINS \
5194 ++ {0x12, 0xb7a60130}, \
5195 ++ {0x21, 0x04211020}
5196 +
5197 + #define ALC256_STANDARD_PINS \
5198 + {0x12, 0x90a60140}, \
5199 +@@ -5581,6 +5638,12 @@ static const struct hda_model_fixup alc269_fixup_models[] = {
5200 + {0x21, 0x03211020}
5201 +
5202 + static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
5203 ++ SND_HDA_PIN_QUIRK(0x10ec0225, 0x1028, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE,
5204 ++ ALC225_STANDARD_PINS,
5205 ++ {0x14, 0x901701a0}),
5206 ++ SND_HDA_PIN_QUIRK(0x10ec0225, 0x1028, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE,
5207 ++ ALC225_STANDARD_PINS,
5208 ++ {0x14, 0x901701b0}),
5209 + SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL2_MIC_NO_PRESENCE,
5210 + {0x14, 0x90170110},
5211 + {0x21, 0x02211020}),
5212 +@@ -5906,6 +5969,9 @@ static int patch_alc269(struct hda_codec *codec)
5213 + spec->gen.mixer_nid = 0; /* ALC256 does not have any loopback mixer path */
5214 + alc_update_coef_idx(codec, 0x36, 1 << 13, 1 << 5); /* Switch pcbeep path to Line in path*/
5215 + break;
5216 ++ case 0x10ec0225:
5217 ++ spec->codec_variant = ALC269_TYPE_ALC225;
5218 ++ break;
5219 + }
5220 +
5221 + if (snd_hda_codec_read(codec, 0x51, 0, AC_VERB_PARAMETERS, 0) == 0x10ec5505) {
5222 +@@ -6796,6 +6862,7 @@ static int patch_alc680(struct hda_codec *codec)
5223 + */
5224 + static const struct hda_device_id snd_hda_id_realtek[] = {
5225 + HDA_CODEC_ENTRY(0x10ec0221, "ALC221", patch_alc269),
5226 ++ HDA_CODEC_ENTRY(0x10ec0225, "ALC225", patch_alc269),
5227 + HDA_CODEC_ENTRY(0x10ec0231, "ALC231", patch_alc269),
5228 + HDA_CODEC_ENTRY(0x10ec0233, "ALC233", patch_alc269),
5229 + HDA_CODEC_ENTRY(0x10ec0235, "ALC233", patch_alc269),
5230 +diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c
5231 +index 2c7c5eb8b1e9..37b70f8e878f 100644
5232 +--- a/sound/pci/hda/patch_sigmatel.c
5233 ++++ b/sound/pci/hda/patch_sigmatel.c
5234 +@@ -493,9 +493,9 @@ static void jack_update_power(struct hda_codec *codec,
5235 + if (!spec->num_pwrs)
5236 + return;
5237 +
5238 +- if (jack && jack->tbl->nid) {
5239 +- stac_toggle_power_map(codec, jack->tbl->nid,
5240 +- snd_hda_jack_detect(codec, jack->tbl->nid),
5241 ++ if (jack && jack->nid) {
5242 ++ stac_toggle_power_map(codec, jack->nid,
5243 ++ snd_hda_jack_detect(codec, jack->nid),
5244 + true);
5245 + return;
5246 + }
5247 +diff --git a/sound/soc/codecs/rt5645.c b/sound/soc/codecs/rt5645.c
5248 +index 3e3c7f6be29d..b74840b5becf 100644
5249 +--- a/sound/soc/codecs/rt5645.c
5250 ++++ b/sound/soc/codecs/rt5645.c
5251 +@@ -621,7 +621,7 @@ static const struct snd_kcontrol_new rt5645_snd_controls[] = {
5252 +
5253 + /* IN1/IN2 Control */
5254 + SOC_SINGLE_TLV("IN1 Boost", RT5645_IN1_CTRL1,
5255 +- RT5645_BST_SFT1, 8, 0, bst_tlv),
5256 ++ RT5645_BST_SFT1, 12, 0, bst_tlv),
5257 + SOC_SINGLE_TLV("IN2 Boost", RT5645_IN2_CTRL,
5258 + RT5645_BST_SFT2, 8, 0, bst_tlv),
5259 +
5260 +diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c
5261 +index c86dc96e8986..65b936e251ea 100644
5262 +--- a/sound/soc/soc-pcm.c
5263 ++++ b/sound/soc/soc-pcm.c
5264 +@@ -1743,7 +1743,8 @@ int dpcm_be_dai_hw_free(struct snd_soc_pcm_runtime *fe, int stream)
5265 + (be->dpcm[stream].state != SND_SOC_DPCM_STATE_PREPARE) &&
5266 + (be->dpcm[stream].state != SND_SOC_DPCM_STATE_HW_FREE) &&
5267 + (be->dpcm[stream].state != SND_SOC_DPCM_STATE_PAUSED) &&
5268 +- (be->dpcm[stream].state != SND_SOC_DPCM_STATE_STOP))
5269 ++ (be->dpcm[stream].state != SND_SOC_DPCM_STATE_STOP) &&
5270 ++ (be->dpcm[stream].state != SND_SOC_DPCM_STATE_SUSPEND))
5271 + continue;
5272 +
5273 + dev_dbg(be->dev, "ASoC: hw_free BE %s\n",
5274 +diff --git a/sound/sparc/Kconfig b/sound/sparc/Kconfig
5275 +index d75deba5617d..dfcd38647606 100644
5276 +--- a/sound/sparc/Kconfig
5277 ++++ b/sound/sparc/Kconfig
5278 +@@ -22,6 +22,7 @@ config SND_SUN_AMD7930
5279 + config SND_SUN_CS4231
5280 + tristate "Sun CS4231"
5281 + select SND_PCM
5282 ++ select SND_TIMER
5283 + help
5284 + Say Y here to include support for CS4231 sound device on Sun.
5285 +
5286 +diff --git a/sound/usb/midi.c b/sound/usb/midi.c
5287 +index 5b4c58c3e2c5..b21b76690b31 100644
5288 +--- a/sound/usb/midi.c
5289 ++++ b/sound/usb/midi.c
5290 +@@ -2454,7 +2454,6 @@ int snd_usbmidi_create(struct snd_card *card,
5291 + else
5292 + err = snd_usbmidi_create_endpoints(umidi, endpoints);
5293 + if (err < 0) {
5294 +- snd_usbmidi_free(umidi);
5295 + return err;
5296 + }
5297 +
5298 +diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
5299 +index 23ea6d800c4c..4f6ce1cac8e2 100644
5300 +--- a/sound/usb/quirks.c
5301 ++++ b/sound/usb/quirks.c
5302 +@@ -1121,6 +1121,7 @@ bool snd_usb_get_sample_rate_quirk(struct snd_usb_audio *chip)
5303 + switch (chip->usb_id) {
5304 + case USB_ID(0x045E, 0x075D): /* MS Lifecam Cinema */
5305 + case USB_ID(0x045E, 0x076D): /* MS Lifecam HD-5000 */
5306 ++ case USB_ID(0x045E, 0x076F): /* MS Lifecam HD-6000 */
5307 + case USB_ID(0x045E, 0x0772): /* MS Lifecam Studio */
5308 + case USB_ID(0x045E, 0x0779): /* MS Lifecam HD-3000 */
5309 + case USB_ID(0x04D8, 0xFEEA): /* Benchmark DAC1 Pre */
5310 +@@ -1205,8 +1206,12 @@ void snd_usb_set_interface_quirk(struct usb_device *dev)
5311 + * "Playback Design" products need a 50ms delay after setting the
5312 + * USB interface.
5313 + */
5314 +- if (le16_to_cpu(dev->descriptor.idVendor) == 0x23ba)
5315 ++ switch (le16_to_cpu(dev->descriptor.idVendor)) {
5316 ++ case 0x23ba: /* Playback Design */
5317 ++ case 0x0644: /* TEAC Corp. */
5318 + mdelay(50);
5319 ++ break;
5320 ++ }
5321 + }
5322 +
5323 + void snd_usb_ctl_msg_quirk(struct usb_device *dev, unsigned int pipe,
5324 +@@ -1221,6 +1226,14 @@ void snd_usb_ctl_msg_quirk(struct usb_device *dev, unsigned int pipe,
5325 + (requesttype & USB_TYPE_MASK) == USB_TYPE_CLASS)
5326 + mdelay(20);
5327 +
5328 ++ /*
5329 ++ * "TEAC Corp." products need a 20ms delay after each
5330 ++ * class compliant request
5331 ++ */
5332 ++ if ((le16_to_cpu(dev->descriptor.idVendor) == 0x0644) &&
5333 ++ (requesttype & USB_TYPE_MASK) == USB_TYPE_CLASS)
5334 ++ mdelay(20);
5335 ++
5336 + /* Marantz/Denon devices with USB DAC functionality need a delay
5337 + * after each class compliant request
5338 + */
5339 +@@ -1269,7 +1282,7 @@ u64 snd_usb_interface_dsd_format_quirks(struct snd_usb_audio *chip,
5340 + case USB_ID(0x20b1, 0x3008): /* iFi Audio micro/nano iDSD */
5341 + case USB_ID(0x20b1, 0x2008): /* Matrix Audio X-Sabre */
5342 + case USB_ID(0x20b1, 0x300a): /* Matrix Audio Mini-i Pro */
5343 +- case USB_ID(0x22d8, 0x0416): /* OPPO HA-1*/
5344 ++ case USB_ID(0x22d9, 0x0416): /* OPPO HA-1 */
5345 + if (fp->altsetting == 2)
5346 + return SNDRV_PCM_FMTBIT_DSD_U32_BE;
5347 + break;
5348 +@@ -1278,6 +1291,7 @@ u64 snd_usb_interface_dsd_format_quirks(struct snd_usb_audio *chip,
5349 + case USB_ID(0x20b1, 0x2009): /* DIYINHK DSD DXD 384kHz USB to I2S/DSD */
5350 + case USB_ID(0x20b1, 0x2023): /* JLsounds I2SoverUSB */
5351 + case USB_ID(0x20b1, 0x3023): /* Aune X1S 32BIT/384 DSD DAC */
5352 ++ case USB_ID(0x2616, 0x0106): /* PS Audio NuWave DAC */
5353 + if (fp->altsetting == 3)
5354 + return SNDRV_PCM_FMTBIT_DSD_U32_BE;
5355 + break;