Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:4.9 commit in: /
Date: Fri, 25 Aug 2017 10:59:22
Message-Id: 1503658750.399567b8a50e0c5f641df8c4df0de14f2655e652.mpagano@gentoo
1 commit: 399567b8a50e0c5f641df8c4df0de14f2655e652
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Fri Aug 25 10:59:10 2017 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Fri Aug 25 10:59:10 2017 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=399567b8
7
8 Linux patch 4.9.45
9
10 0000_README | 4 +
11 1044_linux-4.9.45.patch | 1028 +++++++++++++++++++++++++++++++++++++++++++++++
12 2 files changed, 1032 insertions(+)
13
14 diff --git a/0000_README b/0000_README
15 index 8800832..e142b57 100644
16 --- a/0000_README
17 +++ b/0000_README
18 @@ -219,6 +219,10 @@ Patch: 1043_linux-4.9.44.patch
19 From: http://www.kernel.org
20 Desc: Linux 4.9.44
21
22 +Patch: 1044_linux-4.9.45.patch
23 +From: http://www.kernel.org
24 +Desc: Linux 4.9.45
25 +
26 Patch: 1500_XATTR_USER_PREFIX.patch
27 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
28 Desc: Support for namespace user.pax.* on tmpfs.
29
30 diff --git a/1044_linux-4.9.45.patch b/1044_linux-4.9.45.patch
31 new file mode 100644
32 index 0000000..0641b8f
33 --- /dev/null
34 +++ b/1044_linux-4.9.45.patch
35 @@ -0,0 +1,1028 @@
36 +diff --git a/Makefile b/Makefile
37 +index 3e95dfdbe572..ccd6d91f616e 100644
38 +--- a/Makefile
39 ++++ b/Makefile
40 +@@ -1,6 +1,6 @@
41 + VERSION = 4
42 + PATCHLEVEL = 9
43 +-SUBLEVEL = 44
44 ++SUBLEVEL = 45
45 + EXTRAVERSION =
46 + NAME = Roaring Lionus
47 +
48 +diff --git a/arch/arm64/include/asm/elf.h b/arch/arm64/include/asm/elf.h
49 +index afa23b057def..1fb023076dfc 100644
50 +--- a/arch/arm64/include/asm/elf.h
51 ++++ b/arch/arm64/include/asm/elf.h
52 +@@ -114,10 +114,10 @@
53 +
54 + /*
55 + * This is the base location for PIE (ET_DYN with INTERP) loads. On
56 +- * 64-bit, this is raised to 4GB to leave the entire 32-bit address
57 ++ * 64-bit, this is above 4GB to leave the entire 32-bit address
58 + * space open for things that want to use the area for 32-bit pointers.
59 + */
60 +-#define ELF_ET_DYN_BASE 0x100000000UL
61 ++#define ELF_ET_DYN_BASE (2 * TASK_SIZE_64 / 3)
62 +
63 + #ifndef __ASSEMBLY__
64 +
65 +diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
66 +index b249c2fb99c8..1c141d50fbc6 100644
67 +--- a/arch/powerpc/kernel/process.c
68 ++++ b/arch/powerpc/kernel/process.c
69 +@@ -359,7 +359,8 @@ void enable_kernel_vsx(void)
70 +
71 + cpumsr = msr_check_and_set(MSR_FP|MSR_VEC|MSR_VSX);
72 +
73 +- if (current->thread.regs && (current->thread.regs->msr & MSR_VSX)) {
74 ++ if (current->thread.regs &&
75 ++ (current->thread.regs->msr & (MSR_VSX|MSR_VEC|MSR_FP))) {
76 + check_if_tm_restore_required(current);
77 + /*
78 + * If a thread has already been reclaimed then the
79 +@@ -383,7 +384,7 @@ void flush_vsx_to_thread(struct task_struct *tsk)
80 + {
81 + if (tsk->thread.regs) {
82 + preempt_disable();
83 +- if (tsk->thread.regs->msr & MSR_VSX) {
84 ++ if (tsk->thread.regs->msr & (MSR_VSX|MSR_VEC|MSR_FP)) {
85 + BUG_ON(tsk != current);
86 + giveup_vsx(tsk);
87 + }
88 +diff --git a/arch/x86/crypto/sha1_avx2_x86_64_asm.S b/arch/x86/crypto/sha1_avx2_x86_64_asm.S
89 +index 1cd792db15ef..1eab79c9ac48 100644
90 +--- a/arch/x86/crypto/sha1_avx2_x86_64_asm.S
91 ++++ b/arch/x86/crypto/sha1_avx2_x86_64_asm.S
92 +@@ -117,11 +117,10 @@
93 + .set T1, REG_T1
94 + .endm
95 +
96 +-#define K_BASE %r8
97 + #define HASH_PTR %r9
98 ++#define BLOCKS_CTR %r8
99 + #define BUFFER_PTR %r10
100 + #define BUFFER_PTR2 %r13
101 +-#define BUFFER_END %r11
102 +
103 + #define PRECALC_BUF %r14
104 + #define WK_BUF %r15
105 +@@ -205,14 +204,14 @@
106 + * blended AVX2 and ALU instruction scheduling
107 + * 1 vector iteration per 8 rounds
108 + */
109 +- vmovdqu ((i * 2) + PRECALC_OFFSET)(BUFFER_PTR), W_TMP
110 ++ vmovdqu (i * 2)(BUFFER_PTR), W_TMP
111 + .elseif ((i & 7) == 1)
112 +- vinsertf128 $1, (((i-1) * 2)+PRECALC_OFFSET)(BUFFER_PTR2),\
113 ++ vinsertf128 $1, ((i-1) * 2)(BUFFER_PTR2),\
114 + WY_TMP, WY_TMP
115 + .elseif ((i & 7) == 2)
116 + vpshufb YMM_SHUFB_BSWAP, WY_TMP, WY
117 + .elseif ((i & 7) == 4)
118 +- vpaddd K_XMM(K_BASE), WY, WY_TMP
119 ++ vpaddd K_XMM + K_XMM_AR(%rip), WY, WY_TMP
120 + .elseif ((i & 7) == 7)
121 + vmovdqu WY_TMP, PRECALC_WK(i&~7)
122 +
123 +@@ -255,7 +254,7 @@
124 + vpxor WY, WY_TMP, WY_TMP
125 + .elseif ((i & 7) == 7)
126 + vpxor WY_TMP2, WY_TMP, WY
127 +- vpaddd K_XMM(K_BASE), WY, WY_TMP
128 ++ vpaddd K_XMM + K_XMM_AR(%rip), WY, WY_TMP
129 + vmovdqu WY_TMP, PRECALC_WK(i&~7)
130 +
131 + PRECALC_ROTATE_WY
132 +@@ -291,7 +290,7 @@
133 + vpsrld $30, WY, WY
134 + vpor WY, WY_TMP, WY
135 + .elseif ((i & 7) == 7)
136 +- vpaddd K_XMM(K_BASE), WY, WY_TMP
137 ++ vpaddd K_XMM + K_XMM_AR(%rip), WY, WY_TMP
138 + vmovdqu WY_TMP, PRECALC_WK(i&~7)
139 +
140 + PRECALC_ROTATE_WY
141 +@@ -446,6 +445,16 @@
142 +
143 + .endm
144 +
145 ++/* Add constant only if (%2 > %3) condition met (uses RTA as temp)
146 ++ * %1 + %2 >= %3 ? %4 : 0
147 ++ */
148 ++.macro ADD_IF_GE a, b, c, d
149 ++ mov \a, RTA
150 ++ add $\d, RTA
151 ++ cmp $\c, \b
152 ++ cmovge RTA, \a
153 ++.endm
154 ++
155 + /*
156 + * macro implements 80 rounds of SHA-1, for multiple blocks with s/w pipelining
157 + */
158 +@@ -463,13 +472,16 @@
159 + lea (2*4*80+32)(%rsp), WK_BUF
160 +
161 + # Precalc WK for first 2 blocks
162 +- PRECALC_OFFSET = 0
163 ++ ADD_IF_GE BUFFER_PTR2, BLOCKS_CTR, 2, 64
164 + .set i, 0
165 + .rept 160
166 + PRECALC i
167 + .set i, i + 1
168 + .endr
169 +- PRECALC_OFFSET = 128
170 ++
171 ++ /* Go to next block if needed */
172 ++ ADD_IF_GE BUFFER_PTR, BLOCKS_CTR, 3, 128
173 ++ ADD_IF_GE BUFFER_PTR2, BLOCKS_CTR, 4, 128
174 + xchg WK_BUF, PRECALC_BUF
175 +
176 + .align 32
177 +@@ -479,8 +491,8 @@ _loop:
178 + * we use K_BASE value as a signal of a last block,
179 + * it is set below by: cmovae BUFFER_PTR, K_BASE
180 + */
181 +- cmp K_BASE, BUFFER_PTR
182 +- jne _begin
183 ++ test BLOCKS_CTR, BLOCKS_CTR
184 ++ jnz _begin
185 + .align 32
186 + jmp _end
187 + .align 32
188 +@@ -512,10 +524,10 @@ _loop0:
189 + .set j, j+2
190 + .endr
191 +
192 +- add $(2*64), BUFFER_PTR /* move to next odd-64-byte block */
193 +- cmp BUFFER_END, BUFFER_PTR /* is current block the last one? */
194 +- cmovae K_BASE, BUFFER_PTR /* signal the last iteration smartly */
195 +-
196 ++ /* Update Counter */
197 ++ sub $1, BLOCKS_CTR
198 ++ /* Move to the next block only if needed*/
199 ++ ADD_IF_GE BUFFER_PTR, BLOCKS_CTR, 4, 128
200 + /*
201 + * rounds
202 + * 60,62,64,66,68
203 +@@ -532,8 +544,8 @@ _loop0:
204 + UPDATE_HASH 12(HASH_PTR), D
205 + UPDATE_HASH 16(HASH_PTR), E
206 +
207 +- cmp K_BASE, BUFFER_PTR /* is current block the last one? */
208 +- je _loop
209 ++ test BLOCKS_CTR, BLOCKS_CTR
210 ++ jz _loop
211 +
212 + mov TB, B
213 +
214 +@@ -575,10 +587,10 @@ _loop2:
215 + .set j, j+2
216 + .endr
217 +
218 +- add $(2*64), BUFFER_PTR2 /* move to next even-64-byte block */
219 +-
220 +- cmp BUFFER_END, BUFFER_PTR2 /* is current block the last one */
221 +- cmovae K_BASE, BUFFER_PTR /* signal the last iteration smartly */
222 ++ /* update counter */
223 ++ sub $1, BLOCKS_CTR
224 ++ /* Move to the next block only if needed*/
225 ++ ADD_IF_GE BUFFER_PTR2, BLOCKS_CTR, 4, 128
226 +
227 + jmp _loop3
228 + _loop3:
229 +@@ -641,19 +653,12 @@ _loop3:
230 +
231 + avx2_zeroupper
232 +
233 +- lea K_XMM_AR(%rip), K_BASE
234 +-
235 ++ /* Setup initial values */
236 + mov CTX, HASH_PTR
237 + mov BUF, BUFFER_PTR
238 +- lea 64(BUF), BUFFER_PTR2
239 +-
240 +- shl $6, CNT /* mul by 64 */
241 +- add BUF, CNT
242 +- add $64, CNT
243 +- mov CNT, BUFFER_END
244 +
245 +- cmp BUFFER_END, BUFFER_PTR2
246 +- cmovae K_BASE, BUFFER_PTR2
247 ++ mov BUF, BUFFER_PTR2
248 ++ mov CNT, BLOCKS_CTR
249 +
250 + xmm_mov BSWAP_SHUFB_CTL(%rip), YMM_SHUFB_BSWAP
251 +
252 +diff --git a/arch/x86/crypto/sha1_ssse3_glue.c b/arch/x86/crypto/sha1_ssse3_glue.c
253 +index f960a043cdeb..fc61739150e7 100644
254 +--- a/arch/x86/crypto/sha1_ssse3_glue.c
255 ++++ b/arch/x86/crypto/sha1_ssse3_glue.c
256 +@@ -201,7 +201,7 @@ asmlinkage void sha1_transform_avx2(u32 *digest, const char *data,
257 +
258 + static bool avx2_usable(void)
259 + {
260 +- if (false && avx_usable() && boot_cpu_has(X86_FEATURE_AVX2)
261 ++ if (avx_usable() && boot_cpu_has(X86_FEATURE_AVX2)
262 + && boot_cpu_has(X86_FEATURE_BMI1)
263 + && boot_cpu_has(X86_FEATURE_BMI2))
264 + return true;
265 +diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
266 +index ef766a358b37..e7b0e7ff4c58 100644
267 +--- a/arch/x86/entry/entry_64.S
268 ++++ b/arch/x86/entry/entry_64.S
269 +@@ -1215,6 +1215,8 @@ ENTRY(nmi)
270 + * other IST entries.
271 + */
272 +
273 ++ ASM_CLAC
274 ++
275 + /* Use %rdx as our temp variable throughout */
276 + pushq %rdx
277 +
278 +diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h
279 +index c152db2ab687..b31761ecce63 100644
280 +--- a/arch/x86/include/asm/elf.h
281 ++++ b/arch/x86/include/asm/elf.h
282 +@@ -247,11 +247,11 @@ extern int force_personality32;
283 +
284 + /*
285 + * This is the base location for PIE (ET_DYN with INTERP) loads. On
286 +- * 64-bit, this is raised to 4GB to leave the entire 32-bit address
287 ++ * 64-bit, this is above 4GB to leave the entire 32-bit address
288 + * space open for things that want to use the area for 32-bit pointers.
289 + */
290 + #define ELF_ET_DYN_BASE (mmap_is_ia32() ? 0x000400000UL : \
291 +- 0x100000000UL)
292 ++ (TASK_SIZE / 3 * 2))
293 +
294 + /* This yields a mask that user programs can use to figure out what
295 + instruction set this CPU supports. This could be done in user space,
296 +diff --git a/block/blk-mq-pci.c b/block/blk-mq-pci.c
297 +index 966c2169762e..ee9d3d958fbe 100644
298 +--- a/block/blk-mq-pci.c
299 ++++ b/block/blk-mq-pci.c
300 +@@ -36,12 +36,18 @@ int blk_mq_pci_map_queues(struct blk_mq_tag_set *set, struct pci_dev *pdev)
301 + for (queue = 0; queue < set->nr_hw_queues; queue++) {
302 + mask = pci_irq_get_affinity(pdev, queue);
303 + if (!mask)
304 +- return -EINVAL;
305 ++ goto fallback;
306 +
307 + for_each_cpu(cpu, mask)
308 + set->mq_map[cpu] = queue;
309 + }
310 +
311 + return 0;
312 ++
313 ++fallback:
314 ++ WARN_ON_ONCE(set->nr_hw_queues > 1);
315 ++ for_each_possible_cpu(cpu)
316 ++ set->mq_map[cpu] = 0;
317 ++ return 0;
318 + }
319 + EXPORT_SYMBOL_GPL(blk_mq_pci_map_queues);
320 +diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
321 +index 9908597c5209..f11d62de2272 100644
322 +--- a/drivers/block/xen-blkfront.c
323 ++++ b/drivers/block/xen-blkfront.c
324 +@@ -2112,9 +2112,9 @@ static int blkfront_resume(struct xenbus_device *dev)
325 + /*
326 + * Get the bios in the request so we can re-queue them.
327 + */
328 +- if (req_op(shadow[i].request) == REQ_OP_FLUSH ||
329 +- req_op(shadow[i].request) == REQ_OP_DISCARD ||
330 +- req_op(shadow[i].request) == REQ_OP_SECURE_ERASE ||
331 ++ if (req_op(shadow[j].request) == REQ_OP_FLUSH ||
332 ++ req_op(shadow[j].request) == REQ_OP_DISCARD ||
333 ++ req_op(shadow[j].request) == REQ_OP_SECURE_ERASE ||
334 + shadow[j].request->cmd_flags & REQ_FUA) {
335 + /*
336 + * Flush operations don't contain bios, so
337 +diff --git a/drivers/crypto/ixp4xx_crypto.c b/drivers/crypto/ixp4xx_crypto.c
338 +index 7868765a70c5..b54af97a20bb 100644
339 +--- a/drivers/crypto/ixp4xx_crypto.c
340 ++++ b/drivers/crypto/ixp4xx_crypto.c
341 +@@ -1074,7 +1074,7 @@ static int aead_perform(struct aead_request *req, int encrypt,
342 + req_ctx->hmac_virt = dma_pool_alloc(buffer_pool, flags,
343 + &crypt->icv_rev_aes);
344 + if (unlikely(!req_ctx->hmac_virt))
345 +- goto free_buf_src;
346 ++ goto free_buf_dst;
347 + if (!encrypt) {
348 + scatterwalk_map_and_copy(req_ctx->hmac_virt,
349 + req->src, cryptlen, authsize, 0);
350 +@@ -1089,10 +1089,10 @@ static int aead_perform(struct aead_request *req, int encrypt,
351 + BUG_ON(qmgr_stat_overflow(SEND_QID));
352 + return -EINPROGRESS;
353 +
354 +-free_buf_src:
355 +- free_buf_chain(dev, req_ctx->src, crypt->src_buf);
356 + free_buf_dst:
357 + free_buf_chain(dev, req_ctx->dst, crypt->dst_buf);
358 ++free_buf_src:
359 ++ free_buf_chain(dev, req_ctx->src, crypt->src_buf);
360 + crypt->ctl_flags = CTL_FLAG_UNUSED;
361 + return -ENOMEM;
362 + }
363 +diff --git a/drivers/input/mouse/elan_i2c_core.c b/drivers/input/mouse/elan_i2c_core.c
364 +index da5458dfb1e3..98d4e515587a 100644
365 +--- a/drivers/input/mouse/elan_i2c_core.c
366 ++++ b/drivers/input/mouse/elan_i2c_core.c
367 +@@ -1235,6 +1235,10 @@ static const struct acpi_device_id elan_acpi_id[] = {
368 + { "ELAN0100", 0 },
369 + { "ELAN0600", 0 },
370 + { "ELAN0605", 0 },
371 ++ { "ELAN0608", 0 },
372 ++ { "ELAN0605", 0 },
373 ++ { "ELAN0609", 0 },
374 ++ { "ELAN060B", 0 },
375 + { "ELAN1000", 0 },
376 + { }
377 + };
378 +diff --git a/drivers/irqchip/irq-atmel-aic-common.c b/drivers/irqchip/irq-atmel-aic-common.c
379 +index 28b26c80f4cf..056507099725 100644
380 +--- a/drivers/irqchip/irq-atmel-aic-common.c
381 ++++ b/drivers/irqchip/irq-atmel-aic-common.c
382 +@@ -142,9 +142,9 @@ void __init aic_common_rtc_irq_fixup(struct device_node *root)
383 + struct device_node *np;
384 + void __iomem *regs;
385 +
386 +- np = of_find_compatible_node(root, NULL, "atmel,at91rm9200-rtc");
387 ++ np = of_find_compatible_node(NULL, NULL, "atmel,at91rm9200-rtc");
388 + if (!np)
389 +- np = of_find_compatible_node(root, NULL,
390 ++ np = of_find_compatible_node(NULL, NULL,
391 + "atmel,at91sam9x5-rtc");
392 +
393 + if (!np)
394 +@@ -196,7 +196,6 @@ static void __init aic_common_irq_fixup(const struct of_device_id *matches)
395 + return;
396 +
397 + match = of_match_node(matches, root);
398 +- of_node_put(root);
399 +
400 + if (match) {
401 + void (*fixup)(struct device_node *) = match->data;
402 +diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
403 +index 2f260c63c383..49a27dc46e5e 100644
404 +--- a/drivers/net/usb/qmi_wwan.c
405 ++++ b/drivers/net/usb/qmi_wwan.c
406 +@@ -876,6 +876,7 @@ static const struct usb_device_id products[] = {
407 + {QMI_FIXED_INTF(0x19d2, 0x1428, 2)}, /* Telewell TW-LTE 4G v2 */
408 + {QMI_FIXED_INTF(0x19d2, 0x2002, 4)}, /* ZTE (Vodafone) K3765-Z */
409 + {QMI_FIXED_INTF(0x2001, 0x7e19, 4)}, /* D-Link DWM-221 B1 */
410 ++ {QMI_FIXED_INTF(0x2001, 0x7e35, 4)}, /* D-Link DWM-222 */
411 + {QMI_FIXED_INTF(0x0f3d, 0x68a2, 8)}, /* Sierra Wireless MC7700 */
412 + {QMI_FIXED_INTF(0x114f, 0x68a2, 8)}, /* Sierra Wireless MC7750 */
413 + {QMI_FIXED_INTF(0x1199, 0x68a2, 8)}, /* Sierra Wireless MC7710 in QMI mode */
414 +diff --git a/drivers/parisc/dino.c b/drivers/parisc/dino.c
415 +index 5c63b920b471..ed92c1254cff 100644
416 +--- a/drivers/parisc/dino.c
417 ++++ b/drivers/parisc/dino.c
418 +@@ -956,7 +956,7 @@ static int __init dino_probe(struct parisc_device *dev)
419 +
420 + dino_dev->hba.dev = dev;
421 + dino_dev->hba.base_addr = ioremap_nocache(hpa, 4096);
422 +- dino_dev->hba.lmmio_space_offset = 0; /* CPU addrs == bus addrs */
423 ++ dino_dev->hba.lmmio_space_offset = PCI_F_EXTEND;
424 + spin_lock_init(&dino_dev->dinosaur_pen);
425 + dino_dev->hba.iommu = ccio_get_iommu(dev);
426 +
427 +diff --git a/drivers/usb/core/usb-acpi.c b/drivers/usb/core/usb-acpi.c
428 +index 2776cfe64c09..ef9cf4a21afe 100644
429 +--- a/drivers/usb/core/usb-acpi.c
430 ++++ b/drivers/usb/core/usb-acpi.c
431 +@@ -127,6 +127,22 @@ static enum usb_port_connect_type usb_acpi_get_connect_type(acpi_handle handle,
432 + */
433 + #define USB_ACPI_LOCATION_VALID (1 << 31)
434 +
435 ++static struct acpi_device *usb_acpi_find_port(struct acpi_device *parent,
436 ++ int raw)
437 ++{
438 ++ struct acpi_device *adev;
439 ++
440 ++ if (!parent)
441 ++ return NULL;
442 ++
443 ++ list_for_each_entry(adev, &parent->children, node) {
444 ++ if (acpi_device_adr(adev) == raw)
445 ++ return adev;
446 ++ }
447 ++
448 ++ return acpi_find_child_device(parent, raw, false);
449 ++}
450 ++
451 + static struct acpi_device *usb_acpi_find_companion(struct device *dev)
452 + {
453 + struct usb_device *udev;
454 +@@ -174,8 +190,10 @@ static struct acpi_device *usb_acpi_find_companion(struct device *dev)
455 + int raw;
456 +
457 + raw = usb_hcd_find_raw_port_number(hcd, port1);
458 +- adev = acpi_find_child_device(ACPI_COMPANION(&udev->dev),
459 +- raw, false);
460 ++
461 ++ adev = usb_acpi_find_port(ACPI_COMPANION(&udev->dev),
462 ++ raw);
463 ++
464 + if (!adev)
465 + return NULL;
466 + } else {
467 +@@ -186,7 +204,9 @@ static struct acpi_device *usb_acpi_find_companion(struct device *dev)
468 + return NULL;
469 +
470 + acpi_bus_get_device(parent_handle, &adev);
471 +- adev = acpi_find_child_device(adev, port1, false);
472 ++
473 ++ adev = usb_acpi_find_port(adev, port1);
474 ++
475 + if (!adev)
476 + return NULL;
477 + }
478 +diff --git a/drivers/xen/biomerge.c b/drivers/xen/biomerge.c
479 +index 4da69dbf7dca..1bdd02a6d6ac 100644
480 +--- a/drivers/xen/biomerge.c
481 ++++ b/drivers/xen/biomerge.c
482 +@@ -10,8 +10,7 @@ bool xen_biovec_phys_mergeable(const struct bio_vec *vec1,
483 + unsigned long bfn1 = pfn_to_bfn(page_to_pfn(vec1->bv_page));
484 + unsigned long bfn2 = pfn_to_bfn(page_to_pfn(vec2->bv_page));
485 +
486 +- return __BIOVEC_PHYS_MERGEABLE(vec1, vec2) &&
487 +- ((bfn1 == bfn2) || ((bfn1+1) == bfn2));
488 ++ return bfn1 + PFN_DOWN(vec1->bv_offset + vec1->bv_len) == bfn2;
489 + #else
490 + /*
491 + * XXX: Add support for merging bio_vec when using different page
492 +diff --git a/include/linux/memblock.h b/include/linux/memblock.h
493 +index e8fba68e5d03..4024af00b137 100644
494 +--- a/include/linux/memblock.h
495 ++++ b/include/linux/memblock.h
496 +@@ -64,6 +64,7 @@ extern bool movable_node_enabled;
497 + #ifdef CONFIG_ARCH_DISCARD_MEMBLOCK
498 + #define __init_memblock __meminit
499 + #define __initdata_memblock __meminitdata
500 ++void memblock_discard(void);
501 + #else
502 + #define __init_memblock
503 + #define __initdata_memblock
504 +@@ -77,8 +78,6 @@ phys_addr_t memblock_find_in_range_node(phys_addr_t size, phys_addr_t align,
505 + int nid, ulong flags);
506 + phys_addr_t memblock_find_in_range(phys_addr_t start, phys_addr_t end,
507 + phys_addr_t size, phys_addr_t align);
508 +-phys_addr_t get_allocated_memblock_reserved_regions_info(phys_addr_t *addr);
509 +-phys_addr_t get_allocated_memblock_memory_regions_info(phys_addr_t *addr);
510 + void memblock_allow_resize(void);
511 + int memblock_add_node(phys_addr_t base, phys_addr_t size, int nid);
512 + int memblock_add(phys_addr_t base, phys_addr_t size);
513 +@@ -112,6 +111,9 @@ void __next_mem_range_rev(u64 *idx, int nid, ulong flags,
514 + void __next_reserved_mem_region(u64 *idx, phys_addr_t *out_start,
515 + phys_addr_t *out_end);
516 +
517 ++void __memblock_free_early(phys_addr_t base, phys_addr_t size);
518 ++void __memblock_free_late(phys_addr_t base, phys_addr_t size);
519 ++
520 + /**
521 + * for_each_mem_range - iterate through memblock areas from type_a and not
522 + * included in type_b. Or just type_a if type_b is NULL.
523 +diff --git a/include/linux/pid.h b/include/linux/pid.h
524 +index 23705a53abba..97b745ddece5 100644
525 +--- a/include/linux/pid.h
526 ++++ b/include/linux/pid.h
527 +@@ -8,7 +8,9 @@ enum pid_type
528 + PIDTYPE_PID,
529 + PIDTYPE_PGID,
530 + PIDTYPE_SID,
531 +- PIDTYPE_MAX
532 ++ PIDTYPE_MAX,
533 ++ /* only valid to __task_pid_nr_ns() */
534 ++ __PIDTYPE_TGID
535 + };
536 +
537 + /*
538 +diff --git a/include/linux/sched.h b/include/linux/sched.h
539 +index 14f58cf06054..a4d0afc009a7 100644
540 +--- a/include/linux/sched.h
541 ++++ b/include/linux/sched.h
542 +@@ -2132,31 +2132,8 @@ static inline pid_t task_tgid_nr(struct task_struct *tsk)
543 + return tsk->tgid;
544 + }
545 +
546 +-pid_t task_tgid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns);
547 +-
548 +-static inline pid_t task_tgid_vnr(struct task_struct *tsk)
549 +-{
550 +- return pid_vnr(task_tgid(tsk));
551 +-}
552 +-
553 +
554 + static inline int pid_alive(const struct task_struct *p);
555 +-static inline pid_t task_ppid_nr_ns(const struct task_struct *tsk, struct pid_namespace *ns)
556 +-{
557 +- pid_t pid = 0;
558 +-
559 +- rcu_read_lock();
560 +- if (pid_alive(tsk))
561 +- pid = task_tgid_nr_ns(rcu_dereference(tsk->real_parent), ns);
562 +- rcu_read_unlock();
563 +-
564 +- return pid;
565 +-}
566 +-
567 +-static inline pid_t task_ppid_nr(const struct task_struct *tsk)
568 +-{
569 +- return task_ppid_nr_ns(tsk, &init_pid_ns);
570 +-}
571 +
572 + static inline pid_t task_pgrp_nr_ns(struct task_struct *tsk,
573 + struct pid_namespace *ns)
574 +@@ -2181,6 +2158,33 @@ static inline pid_t task_session_vnr(struct task_struct *tsk)
575 + return __task_pid_nr_ns(tsk, PIDTYPE_SID, NULL);
576 + }
577 +
578 ++static inline pid_t task_tgid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
579 ++{
580 ++ return __task_pid_nr_ns(tsk, __PIDTYPE_TGID, ns);
581 ++}
582 ++
583 ++static inline pid_t task_tgid_vnr(struct task_struct *tsk)
584 ++{
585 ++ return __task_pid_nr_ns(tsk, __PIDTYPE_TGID, NULL);
586 ++}
587 ++
588 ++static inline pid_t task_ppid_nr_ns(const struct task_struct *tsk, struct pid_namespace *ns)
589 ++{
590 ++ pid_t pid = 0;
591 ++
592 ++ rcu_read_lock();
593 ++ if (pid_alive(tsk))
594 ++ pid = task_tgid_nr_ns(rcu_dereference(tsk->real_parent), ns);
595 ++ rcu_read_unlock();
596 ++
597 ++ return pid;
598 ++}
599 ++
600 ++static inline pid_t task_ppid_nr(const struct task_struct *tsk)
601 ++{
602 ++ return task_ppid_nr_ns(tsk, &init_pid_ns);
603 ++}
604 ++
605 + /* obsolete, do not use */
606 + static inline pid_t task_pgrp_nr(struct task_struct *tsk)
607 + {
608 +diff --git a/kernel/audit_watch.c b/kernel/audit_watch.c
609 +index 0d302a87f21b..690e1e3c59f7 100644
610 +--- a/kernel/audit_watch.c
611 ++++ b/kernel/audit_watch.c
612 +@@ -457,13 +457,15 @@ void audit_remove_watch_rule(struct audit_krule *krule)
613 + list_del(&krule->rlist);
614 +
615 + if (list_empty(&watch->rules)) {
616 ++ /*
617 ++ * audit_remove_watch() drops our reference to 'parent' which
618 ++ * can get freed. Grab our own reference to be safe.
619 ++ */
620 ++ audit_get_parent(parent);
621 + audit_remove_watch(watch);
622 +-
623 +- if (list_empty(&parent->watches)) {
624 +- audit_get_parent(parent);
625 ++ if (list_empty(&parent->watches))
626 + fsnotify_destroy_mark(&parent->mark, audit_watch_group);
627 +- audit_put_parent(parent);
628 +- }
629 ++ audit_put_parent(parent);
630 + }
631 + }
632 +
633 +diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c
634 +index 077c87f40f4d..f30110e1b8c9 100644
635 +--- a/kernel/irq/chip.c
636 ++++ b/kernel/irq/chip.c
637 +@@ -895,13 +895,15 @@ EXPORT_SYMBOL_GPL(irq_set_chip_and_handler_name);
638 +
639 + void irq_modify_status(unsigned int irq, unsigned long clr, unsigned long set)
640 + {
641 +- unsigned long flags;
642 ++ unsigned long flags, trigger, tmp;
643 + struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
644 +
645 + if (!desc)
646 + return;
647 + irq_settings_clr_and_set(desc, clr, set);
648 +
649 ++ trigger = irqd_get_trigger_type(&desc->irq_data);
650 ++
651 + irqd_clear(&desc->irq_data, IRQD_NO_BALANCING | IRQD_PER_CPU |
652 + IRQD_TRIGGER_MASK | IRQD_LEVEL | IRQD_MOVE_PCNTXT);
653 + if (irq_settings_has_no_balance_set(desc))
654 +@@ -913,7 +915,11 @@ void irq_modify_status(unsigned int irq, unsigned long clr, unsigned long set)
655 + if (irq_settings_is_level(desc))
656 + irqd_set(&desc->irq_data, IRQD_LEVEL);
657 +
658 +- irqd_set(&desc->irq_data, irq_settings_get_trigger_mask(desc));
659 ++ tmp = irq_settings_get_trigger_mask(desc);
660 ++ if (tmp != IRQ_TYPE_NONE)
661 ++ trigger = tmp;
662 ++
663 ++ irqd_set(&desc->irq_data, trigger);
664 +
665 + irq_put_desc_unlock(desc, flags);
666 + }
667 +diff --git a/kernel/irq/ipi.c b/kernel/irq/ipi.c
668 +index 1a9abc1c8ea0..259a22aa9934 100644
669 +--- a/kernel/irq/ipi.c
670 ++++ b/kernel/irq/ipi.c
671 +@@ -165,7 +165,7 @@ irq_hw_number_t ipi_get_hwirq(unsigned int irq, unsigned int cpu)
672 + struct irq_data *data = irq_get_irq_data(irq);
673 + struct cpumask *ipimask = data ? irq_data_get_affinity_mask(data) : NULL;
674 +
675 +- if (!data || !ipimask || cpu > nr_cpu_ids)
676 ++ if (!data || !ipimask || cpu >= nr_cpu_ids)
677 + return INVALID_HWIRQ;
678 +
679 + if (!cpumask_test_cpu(cpu, ipimask))
680 +@@ -195,7 +195,7 @@ static int ipi_send_verify(struct irq_chip *chip, struct irq_data *data,
681 + if (!chip->ipi_send_single && !chip->ipi_send_mask)
682 + return -EINVAL;
683 +
684 +- if (cpu > nr_cpu_ids)
685 ++ if (cpu >= nr_cpu_ids)
686 + return -EINVAL;
687 +
688 + if (dest) {
689 +diff --git a/kernel/pid.c b/kernel/pid.c
690 +index f66162f2359b..693a64385d59 100644
691 +--- a/kernel/pid.c
692 ++++ b/kernel/pid.c
693 +@@ -526,8 +526,11 @@ pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type,
694 + if (!ns)
695 + ns = task_active_pid_ns(current);
696 + if (likely(pid_alive(task))) {
697 +- if (type != PIDTYPE_PID)
698 ++ if (type != PIDTYPE_PID) {
699 ++ if (type == __PIDTYPE_TGID)
700 ++ type = PIDTYPE_PID;
701 + task = task->group_leader;
702 ++ }
703 + nr = pid_nr_ns(rcu_dereference(task->pids[type].pid), ns);
704 + }
705 + rcu_read_unlock();
706 +@@ -536,12 +539,6 @@ pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type,
707 + }
708 + EXPORT_SYMBOL(__task_pid_nr_ns);
709 +
710 +-pid_t task_tgid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
711 +-{
712 +- return pid_nr_ns(task_tgid(tsk), ns);
713 +-}
714 +-EXPORT_SYMBOL(task_tgid_nr_ns);
715 +-
716 + struct pid_namespace *task_active_pid_ns(struct task_struct *tsk)
717 + {
718 + return ns_of_pid(task_pid(tsk));
719 +diff --git a/mm/memblock.c b/mm/memblock.c
720 +index 68849d0ead09..ccec42c12ba8 100644
721 +--- a/mm/memblock.c
722 ++++ b/mm/memblock.c
723 +@@ -297,31 +297,27 @@ static void __init_memblock memblock_remove_region(struct memblock_type *type, u
724 + }
725 +
726 + #ifdef CONFIG_ARCH_DISCARD_MEMBLOCK
727 +-
728 +-phys_addr_t __init_memblock get_allocated_memblock_reserved_regions_info(
729 +- phys_addr_t *addr)
730 +-{
731 +- if (memblock.reserved.regions == memblock_reserved_init_regions)
732 +- return 0;
733 +-
734 +- *addr = __pa(memblock.reserved.regions);
735 +-
736 +- return PAGE_ALIGN(sizeof(struct memblock_region) *
737 +- memblock.reserved.max);
738 +-}
739 +-
740 +-phys_addr_t __init_memblock get_allocated_memblock_memory_regions_info(
741 +- phys_addr_t *addr)
742 ++/**
743 ++ * Discard memory and reserved arrays if they were allocated
744 ++ */
745 ++void __init memblock_discard(void)
746 + {
747 +- if (memblock.memory.regions == memblock_memory_init_regions)
748 +- return 0;
749 ++ phys_addr_t addr, size;
750 +
751 +- *addr = __pa(memblock.memory.regions);
752 ++ if (memblock.reserved.regions != memblock_reserved_init_regions) {
753 ++ addr = __pa(memblock.reserved.regions);
754 ++ size = PAGE_ALIGN(sizeof(struct memblock_region) *
755 ++ memblock.reserved.max);
756 ++ __memblock_free_late(addr, size);
757 ++ }
758 +
759 +- return PAGE_ALIGN(sizeof(struct memblock_region) *
760 +- memblock.memory.max);
761 ++ if (memblock.memory.regions == memblock_memory_init_regions) {
762 ++ addr = __pa(memblock.memory.regions);
763 ++ size = PAGE_ALIGN(sizeof(struct memblock_region) *
764 ++ memblock.memory.max);
765 ++ __memblock_free_late(addr, size);
766 ++ }
767 + }
768 +-
769 + #endif
770 +
771 + /**
772 +diff --git a/mm/memory.c b/mm/memory.c
773 +index 9bf3da0d0e14..d064caff9d7d 100644
774 +--- a/mm/memory.c
775 ++++ b/mm/memory.c
776 +@@ -3635,8 +3635,18 @@ int handle_mm_fault(struct vm_area_struct *vma, unsigned long address,
777 + * further.
778 + */
779 + if (unlikely((current->flags & PF_KTHREAD) && !(ret & VM_FAULT_ERROR)
780 +- && test_bit(MMF_UNSTABLE, &vma->vm_mm->flags)))
781 ++ && test_bit(MMF_UNSTABLE, &vma->vm_mm->flags))) {
782 ++
783 ++ /*
784 ++ * We are going to enforce SIGBUS but the PF path might have
785 ++ * dropped the mmap_sem already so take it again so that
786 ++ * we do not break expectations of all arch specific PF paths
787 ++ * and g-u-p
788 ++ */
789 ++ if (ret & VM_FAULT_RETRY)
790 ++ down_read(&vma->vm_mm->mmap_sem);
791 + ret = VM_FAULT_SIGBUS;
792 ++ }
793 +
794 + return ret;
795 + }
796 +diff --git a/mm/mempolicy.c b/mm/mempolicy.c
797 +index 23471526d424..a8ab5e73dc61 100644
798 +--- a/mm/mempolicy.c
799 ++++ b/mm/mempolicy.c
800 +@@ -926,11 +926,6 @@ static long do_get_mempolicy(int *policy, nodemask_t *nmask,
801 + *policy |= (pol->flags & MPOL_MODE_FLAGS);
802 + }
803 +
804 +- if (vma) {
805 +- up_read(&current->mm->mmap_sem);
806 +- vma = NULL;
807 +- }
808 +-
809 + err = 0;
810 + if (nmask) {
811 + if (mpol_store_user_nodemask(pol)) {
812 +diff --git a/mm/migrate.c b/mm/migrate.c
813 +index 6850f62998cd..821623fc7091 100644
814 +--- a/mm/migrate.c
815 ++++ b/mm/migrate.c
816 +@@ -40,6 +40,7 @@
817 + #include <linux/mmu_notifier.h>
818 + #include <linux/page_idle.h>
819 + #include <linux/page_owner.h>
820 ++#include <linux/ptrace.h>
821 +
822 + #include <asm/tlbflush.h>
823 +
824 +@@ -1663,7 +1664,6 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
825 + const int __user *, nodes,
826 + int __user *, status, int, flags)
827 + {
828 +- const struct cred *cred = current_cred(), *tcred;
829 + struct task_struct *task;
830 + struct mm_struct *mm;
831 + int err;
832 +@@ -1687,14 +1687,9 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
833 +
834 + /*
835 + * Check if this process has the right to modify the specified
836 +- * process. The right exists if the process has administrative
837 +- * capabilities, superuser privileges or the same
838 +- * userid as the target process.
839 ++ * process. Use the regular "ptrace_may_access()" checks.
840 + */
841 +- tcred = __task_cred(task);
842 +- if (!uid_eq(cred->euid, tcred->suid) && !uid_eq(cred->euid, tcred->uid) &&
843 +- !uid_eq(cred->uid, tcred->suid) && !uid_eq(cred->uid, tcred->uid) &&
844 +- !capable(CAP_SYS_NICE)) {
845 ++ if (!ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS)) {
846 + rcu_read_unlock();
847 + err = -EPERM;
848 + goto out;
849 +diff --git a/mm/nobootmem.c b/mm/nobootmem.c
850 +index 487dad610731..ab998125f04d 100644
851 +--- a/mm/nobootmem.c
852 ++++ b/mm/nobootmem.c
853 +@@ -146,22 +146,6 @@ static unsigned long __init free_low_memory_core_early(void)
854 + NULL)
855 + count += __free_memory_core(start, end);
856 +
857 +-#ifdef CONFIG_ARCH_DISCARD_MEMBLOCK
858 +- {
859 +- phys_addr_t size;
860 +-
861 +- /* Free memblock.reserved array if it was allocated */
862 +- size = get_allocated_memblock_reserved_regions_info(&start);
863 +- if (size)
864 +- count += __free_memory_core(start, start + size);
865 +-
866 +- /* Free memblock.memory array if it was allocated */
867 +- size = get_allocated_memblock_memory_regions_info(&start);
868 +- if (size)
869 +- count += __free_memory_core(start, start + size);
870 +- }
871 +-#endif
872 +-
873 + return count;
874 + }
875 +
876 +diff --git a/mm/page_alloc.c b/mm/page_alloc.c
877 +index 9419aa4e5441..2abf8d5f0ad4 100644
878 +--- a/mm/page_alloc.c
879 ++++ b/mm/page_alloc.c
880 +@@ -1587,6 +1587,10 @@ void __init page_alloc_init_late(void)
881 + /* Reinit limits that are based on free pages after the kernel is up */
882 + files_maxfiles_init();
883 + #endif
884 ++#ifdef CONFIG_ARCH_DISCARD_MEMBLOCK
885 ++ /* Discard memblock private memory */
886 ++ memblock_discard();
887 ++#endif
888 +
889 + for_each_populated_zone(zone)
890 + set_zone_contiguous(zone);
891 +diff --git a/net/netfilter/nf_conntrack_extend.c b/net/netfilter/nf_conntrack_extend.c
892 +index 02bcf00c2492..008299b7f78f 100644
893 +--- a/net/netfilter/nf_conntrack_extend.c
894 ++++ b/net/netfilter/nf_conntrack_extend.c
895 +@@ -53,7 +53,11 @@ nf_ct_ext_create(struct nf_ct_ext **ext, enum nf_ct_ext_id id,
896 +
897 + rcu_read_lock();
898 + t = rcu_dereference(nf_ct_ext_types[id]);
899 +- BUG_ON(t == NULL);
900 ++ if (!t) {
901 ++ rcu_read_unlock();
902 ++ return NULL;
903 ++ }
904 ++
905 + off = ALIGN(sizeof(struct nf_ct_ext), t->align);
906 + len = off + t->len + var_alloc_len;
907 + alloc_size = t->alloc_size + var_alloc_len;
908 +@@ -88,7 +92,10 @@ void *__nf_ct_ext_add_length(struct nf_conn *ct, enum nf_ct_ext_id id,
909 +
910 + rcu_read_lock();
911 + t = rcu_dereference(nf_ct_ext_types[id]);
912 +- BUG_ON(t == NULL);
913 ++ if (!t) {
914 ++ rcu_read_unlock();
915 ++ return NULL;
916 ++ }
917 +
918 + newoff = ALIGN(old->len, t->align);
919 + newlen = newoff + t->len + var_alloc_len;
920 +@@ -175,6 +182,6 @@ void nf_ct_extend_unregister(struct nf_ct_ext_type *type)
921 + RCU_INIT_POINTER(nf_ct_ext_types[type->id], NULL);
922 + update_alloc_size(type);
923 + mutex_unlock(&nf_ct_ext_type_mutex);
924 +- rcu_barrier(); /* Wait for completion of call_rcu()'s */
925 ++ synchronize_rcu();
926 + }
927 + EXPORT_SYMBOL_GPL(nf_ct_extend_unregister);
928 +diff --git a/sound/core/seq/seq_clientmgr.c b/sound/core/seq/seq_clientmgr.c
929 +index f3b1d7f50b81..67c4c68ce041 100644
930 +--- a/sound/core/seq/seq_clientmgr.c
931 ++++ b/sound/core/seq/seq_clientmgr.c
932 +@@ -1502,16 +1502,11 @@ static int snd_seq_ioctl_unsubscribe_port(struct snd_seq_client *client,
933 + static int snd_seq_ioctl_create_queue(struct snd_seq_client *client, void *arg)
934 + {
935 + struct snd_seq_queue_info *info = arg;
936 +- int result;
937 + struct snd_seq_queue *q;
938 +
939 +- result = snd_seq_queue_alloc(client->number, info->locked, info->flags);
940 +- if (result < 0)
941 +- return result;
942 +-
943 +- q = queueptr(result);
944 +- if (q == NULL)
945 +- return -EINVAL;
946 ++ q = snd_seq_queue_alloc(client->number, info->locked, info->flags);
947 ++ if (IS_ERR(q))
948 ++ return PTR_ERR(q);
949 +
950 + info->queue = q->queue;
951 + info->locked = q->locked;
952 +@@ -1521,7 +1516,7 @@ static int snd_seq_ioctl_create_queue(struct snd_seq_client *client, void *arg)
953 + if (!info->name[0])
954 + snprintf(info->name, sizeof(info->name), "Queue-%d", q->queue);
955 + strlcpy(q->name, info->name, sizeof(q->name));
956 +- queuefree(q);
957 ++ snd_use_lock_free(&q->use_lock);
958 +
959 + return 0;
960 + }
961 +diff --git a/sound/core/seq/seq_queue.c b/sound/core/seq/seq_queue.c
962 +index 450c5187eecb..79e0c5604ef8 100644
963 +--- a/sound/core/seq/seq_queue.c
964 ++++ b/sound/core/seq/seq_queue.c
965 +@@ -184,22 +184,26 @@ void __exit snd_seq_queues_delete(void)
966 + static void queue_use(struct snd_seq_queue *queue, int client, int use);
967 +
968 + /* allocate a new queue -
969 +- * return queue index value or negative value for error
970 ++ * return pointer to new queue or ERR_PTR(-errno) for error
971 ++ * The new queue's use_lock is set to 1. It is the caller's responsibility to
972 ++ * call snd_use_lock_free(&q->use_lock).
973 + */
974 +-int snd_seq_queue_alloc(int client, int locked, unsigned int info_flags)
975 ++struct snd_seq_queue *snd_seq_queue_alloc(int client, int locked, unsigned int info_flags)
976 + {
977 + struct snd_seq_queue *q;
978 +
979 + q = queue_new(client, locked);
980 + if (q == NULL)
981 +- return -ENOMEM;
982 ++ return ERR_PTR(-ENOMEM);
983 + q->info_flags = info_flags;
984 + queue_use(q, client, 1);
985 ++ snd_use_lock_use(&q->use_lock);
986 + if (queue_list_add(q) < 0) {
987 ++ snd_use_lock_free(&q->use_lock);
988 + queue_delete(q);
989 +- return -ENOMEM;
990 ++ return ERR_PTR(-ENOMEM);
991 + }
992 +- return q->queue;
993 ++ return q;
994 + }
995 +
996 + /* delete a queue - queue must be owned by the client */
997 +diff --git a/sound/core/seq/seq_queue.h b/sound/core/seq/seq_queue.h
998 +index 30c8111477f6..719093489a2c 100644
999 +--- a/sound/core/seq/seq_queue.h
1000 ++++ b/sound/core/seq/seq_queue.h
1001 +@@ -71,7 +71,7 @@ void snd_seq_queues_delete(void);
1002 +
1003 +
1004 + /* create new queue (constructor) */
1005 +-int snd_seq_queue_alloc(int client, int locked, unsigned int flags);
1006 ++struct snd_seq_queue *snd_seq_queue_alloc(int client, int locked, unsigned int flags);
1007 +
1008 + /* delete queue (destructor) */
1009 + int snd_seq_queue_delete(int client, int queueid);
1010 +diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c
1011 +index 4703caea56b2..d09c28c1deaf 100644
1012 +--- a/sound/usb/mixer.c
1013 ++++ b/sound/usb/mixer.c
1014 +@@ -542,6 +542,8 @@ int snd_usb_mixer_vol_tlv(struct snd_kcontrol *kcontrol, int op_flag,
1015 +
1016 + if (size < sizeof(scale))
1017 + return -ENOMEM;
1018 ++ if (cval->min_mute)
1019 ++ scale[0] = SNDRV_CTL_TLVT_DB_MINMAX_MUTE;
1020 + scale[2] = cval->dBmin;
1021 + scale[3] = cval->dBmax;
1022 + if (copy_to_user(_tlv, scale, sizeof(scale)))
1023 +diff --git a/sound/usb/mixer.h b/sound/usb/mixer.h
1024 +index 3417ef347e40..2b4b067646ab 100644
1025 +--- a/sound/usb/mixer.h
1026 ++++ b/sound/usb/mixer.h
1027 +@@ -64,6 +64,7 @@ struct usb_mixer_elem_info {
1028 + int cached;
1029 + int cache_val[MAX_CHANNELS];
1030 + u8 initialized;
1031 ++ u8 min_mute;
1032 + void *private_data;
1033 + };
1034 +
1035 +diff --git a/sound/usb/mixer_quirks.c b/sound/usb/mixer_quirks.c
1036 +index 04991b009132..5d2fc5f58bfe 100644
1037 +--- a/sound/usb/mixer_quirks.c
1038 ++++ b/sound/usb/mixer_quirks.c
1039 +@@ -1873,6 +1873,12 @@ void snd_usb_mixer_fu_apply_quirk(struct usb_mixer_interface *mixer,
1040 + if (unitid == 7 && cval->control == UAC_FU_VOLUME)
1041 + snd_dragonfly_quirk_db_scale(mixer, cval, kctl);
1042 + break;
1043 ++ /* lowest playback value is muted on C-Media devices */
1044 ++ case USB_ID(0x0d8c, 0x000c):
1045 ++ case USB_ID(0x0d8c, 0x0014):
1046 ++ if (strstr(kctl->id.name, "Playback"))
1047 ++ cval->min_mute = 1;
1048 ++ break;
1049 + }
1050 + }
1051 +
1052 +diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
1053 +index eb4b9f7a571e..95c2749ac8a3 100644
1054 +--- a/sound/usb/quirks.c
1055 ++++ b/sound/usb/quirks.c
1056 +@@ -1142,6 +1142,7 @@ bool snd_usb_get_sample_rate_quirk(struct snd_usb_audio *chip)
1057 + case USB_ID(0x0556, 0x0014): /* Phoenix Audio TMX320VC */
1058 + case USB_ID(0x05A3, 0x9420): /* ELP HD USB Camera */
1059 + case USB_ID(0x074D, 0x3553): /* Outlaw RR2150 (Micronas UAC3553B) */
1060 ++ case USB_ID(0x1395, 0x740a): /* Sennheiser DECT */
1061 + case USB_ID(0x1901, 0x0191): /* GE B850V3 CP2114 audio interface */
1062 + case USB_ID(0x1de7, 0x0013): /* Phoenix Audio MT202exe */
1063 + case USB_ID(0x1de7, 0x0014): /* Phoenix Audio TMX320 */