Gentoo Archives: gentoo-commits

From: Alice Ferrazzi <alicef@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:4.4 commit in: /
Date: Wed, 15 Feb 2017 16:22:35
Message-Id: 1487175740.bb3d53cb580146ec022c488681ed4863bf4587c4.alicef@gentoo
1 commit: bb3d53cb580146ec022c488681ed4863bf4587c4
2 Author: Alice Ferrazzi <alicef <AT> gentoo <DOT> org>
3 AuthorDate: Wed Feb 15 16:22:20 2017 +0000
4 Commit: Alice Ferrazzi <alicef <AT> gentoo <DOT> org>
5 CommitDate: Wed Feb 15 16:22:20 2017 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=bb3d53cb
7
8 Linux patch 4.4.49
9
10 0000_README | 4 +
11 1048_linux-4.4.49.patch | 531 ++++++++++++++++++++++++++++++++++++++++++++++++
12 2 files changed, 535 insertions(+)
13
14 diff --git a/0000_README b/0000_README
15 index 44fe826..976dbf2 100644
16 --- a/0000_README
17 +++ b/0000_README
18 @@ -235,6 +235,10 @@ Patch: 1047_linux-4.4.48.patch
19 From: http://www.kernel.org
20 Desc: Linux 4.4.48
21
22 +Patch: 1048_linux-4.4.49.patch
23 +From: http://www.kernel.org
24 +Desc: Linux 4.4.49
25 +
26 Patch: 1500_XATTR_USER_PREFIX.patch
27 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
28 Desc: Support for namespace user.pax.* on tmpfs.
29
30 diff --git a/1048_linux-4.4.49.patch b/1048_linux-4.4.49.patch
31 new file mode 100644
32 index 0000000..d1ccaa9
33 --- /dev/null
34 +++ b/1048_linux-4.4.49.patch
35 @@ -0,0 +1,531 @@
36 +diff --git a/Makefile b/Makefile
37 +index 0793cd412656..5fab6d4068b5 100644
38 +--- a/Makefile
39 ++++ b/Makefile
40 +@@ -1,6 +1,6 @@
41 + VERSION = 4
42 + PATCHLEVEL = 4
43 +-SUBLEVEL = 48
44 ++SUBLEVEL = 49
45 + EXTRAVERSION =
46 + NAME = Blurry Fish Butt
47 +
48 +diff --git a/arch/arc/kernel/unaligned.c b/arch/arc/kernel/unaligned.c
49 +index 91ebe382147f..5f69c3bd59bb 100644
50 +--- a/arch/arc/kernel/unaligned.c
51 ++++ b/arch/arc/kernel/unaligned.c
52 +@@ -243,7 +243,7 @@ int misaligned_fixup(unsigned long address, struct pt_regs *regs,
53 +
54 + /* clear any remanants of delay slot */
55 + if (delay_mode(regs)) {
56 +- regs->ret = regs->bta ~1U;
57 ++ regs->ret = regs->bta & ~1U;
58 + regs->status32 &= ~STATUS_DE_MASK;
59 + } else {
60 + regs->ret += state.instr_len;
61 +diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c
62 +index 4d9375814b53..d54c53b7ab63 100644
63 +--- a/arch/arm/kernel/ptrace.c
64 ++++ b/arch/arm/kernel/ptrace.c
65 +@@ -600,7 +600,7 @@ static int gpr_set(struct task_struct *target,
66 + const void *kbuf, const void __user *ubuf)
67 + {
68 + int ret;
69 +- struct pt_regs newregs;
70 ++ struct pt_regs newregs = *task_pt_regs(target);
71 +
72 + ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
73 + &newregs,
74 +diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
75 +index daafcf121ce0..c095455d496e 100644
76 +--- a/arch/arm/mm/fault.c
77 ++++ b/arch/arm/mm/fault.c
78 +@@ -610,9 +610,9 @@ static int __init early_abort_handler(unsigned long addr, unsigned int fsr,
79 +
80 + void __init early_abt_enable(void)
81 + {
82 +- fsr_info[22].fn = early_abort_handler;
83 ++ fsr_info[FSR_FS_AEA].fn = early_abort_handler;
84 + local_abt_enable();
85 +- fsr_info[22].fn = do_bad;
86 ++ fsr_info[FSR_FS_AEA].fn = do_bad;
87 + }
88 +
89 + #ifndef CONFIG_ARM_LPAE
90 +diff --git a/arch/arm/mm/fault.h b/arch/arm/mm/fault.h
91 +index 05ec5e0df32d..78830657cab3 100644
92 +--- a/arch/arm/mm/fault.h
93 ++++ b/arch/arm/mm/fault.h
94 +@@ -11,11 +11,15 @@
95 + #define FSR_FS5_0 (0x3f)
96 +
97 + #ifdef CONFIG_ARM_LPAE
98 ++#define FSR_FS_AEA 17
99 ++
100 + static inline int fsr_fs(unsigned int fsr)
101 + {
102 + return fsr & FSR_FS5_0;
103 + }
104 + #else
105 ++#define FSR_FS_AEA 22
106 ++
107 + static inline int fsr_fs(unsigned int fsr)
108 + {
109 + return (fsr & FSR_FS3_0) | (fsr & FSR_FS4) >> 6;
110 +diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
111 +index 1e5d2f07416b..8ca533b8c606 100644
112 +--- a/arch/x86/kernel/apic/io_apic.c
113 ++++ b/arch/x86/kernel/apic/io_apic.c
114 +@@ -1875,7 +1875,6 @@ static struct irq_chip ioapic_chip __read_mostly = {
115 + .irq_ack = irq_chip_ack_parent,
116 + .irq_eoi = ioapic_ack_level,
117 + .irq_set_affinity = ioapic_set_affinity,
118 +- .irq_retrigger = irq_chip_retrigger_hierarchy,
119 + .flags = IRQCHIP_SKIP_SET_WAKE,
120 + };
121 +
122 +@@ -1887,7 +1886,6 @@ static struct irq_chip ioapic_ir_chip __read_mostly = {
123 + .irq_ack = irq_chip_ack_parent,
124 + .irq_eoi = ioapic_ir_ack_level,
125 + .irq_set_affinity = ioapic_set_affinity,
126 +- .irq_retrigger = irq_chip_retrigger_hierarchy,
127 + .flags = IRQCHIP_SKIP_SET_WAKE,
128 + };
129 +
130 +diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
131 +index 909d1d71d130..4f5d07bb3511 100644
132 +--- a/drivers/gpu/drm/i915/intel_display.c
133 ++++ b/drivers/gpu/drm/i915/intel_display.c
134 +@@ -3948,10 +3948,10 @@ static void page_flip_completed(struct intel_crtc *intel_crtc)
135 + drm_crtc_vblank_put(&intel_crtc->base);
136 +
137 + wake_up_all(&dev_priv->pending_flip_queue);
138 +- queue_work(dev_priv->wq, &work->work);
139 +-
140 + trace_i915_flip_complete(intel_crtc->plane,
141 + work->pending_flip_obj);
142 ++
143 ++ queue_work(dev_priv->wq, &work->work);
144 + }
145 +
146 + void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
147 +diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h
148 +index bdbd80423b17..9ff2881f933d 100644
149 +--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h
150 ++++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h
151 +@@ -900,9 +900,7 @@
152 +
153 + static inline void dsaf_write_reg(void __iomem *base, u32 reg, u32 value)
154 + {
155 +- u8 __iomem *reg_addr = ACCESS_ONCE(base);
156 +-
157 +- writel(value, reg_addr + reg);
158 ++ writel(value, base + reg);
159 + }
160 +
161 + #define dsaf_write_dev(a, reg, value) \
162 +@@ -910,9 +908,7 @@ static inline void dsaf_write_reg(void __iomem *base, u32 reg, u32 value)
163 +
164 + static inline u32 dsaf_read_reg(u8 __iomem *base, u32 reg)
165 + {
166 +- u8 __iomem *reg_addr = ACCESS_ONCE(base);
167 +-
168 +- return readl(reg_addr + reg);
169 ++ return readl(base + reg);
170 + }
171 +
172 + #define dsaf_read_dev(a, reg) \
173 +diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
174 +index 7a601d8c615e..e8a09ff9e724 100644
175 +--- a/drivers/net/hyperv/netvsc_drv.c
176 ++++ b/drivers/net/hyperv/netvsc_drv.c
177 +@@ -854,7 +854,6 @@ static int netvsc_set_channels(struct net_device *net,
178 + }
179 + goto recover;
180 + }
181 +- netif_set_gso_max_size(net, NETVSC_GSO_MAX_SIZE);
182 +
183 + out:
184 + netvsc_open(net);
185 +@@ -1142,6 +1141,7 @@ static int netvsc_probe(struct hv_device *dev,
186 + nvdev = hv_get_drvdata(dev);
187 + netif_set_real_num_tx_queues(net, nvdev->num_chn);
188 + netif_set_real_num_rx_queues(net, nvdev->num_chn);
189 ++ netif_set_gso_max_size(net, NETVSC_GSO_MAX_SIZE);
190 +
191 + ret = register_netdev(net);
192 + if (ret != 0) {
193 +diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
194 +index d6abf191122a..1f445f357da1 100644
195 +--- a/drivers/net/xen-netfront.c
196 ++++ b/drivers/net/xen-netfront.c
197 +@@ -1391,6 +1391,8 @@ static void xennet_disconnect_backend(struct netfront_info *info)
198 + for (i = 0; i < num_queues && info->queues; ++i) {
199 + struct netfront_queue *queue = &info->queues[i];
200 +
201 ++ del_timer_sync(&queue->rx_refill_timer);
202 ++
203 + if (queue->tx_irq && (queue->tx_irq == queue->rx_irq))
204 + unbind_from_irqhandler(queue->tx_irq, queue);
205 + if (queue->tx_irq && (queue->tx_irq != queue->rx_irq)) {
206 +@@ -1745,7 +1747,6 @@ static void xennet_destroy_queues(struct netfront_info *info)
207 +
208 + if (netif_running(info->netdev))
209 + napi_disable(&queue->napi);
210 +- del_timer_sync(&queue->rx_refill_timer);
211 + netif_napi_del(&queue->napi);
212 + }
213 +
214 +diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c
215 +index 75f820ca17b7..27ff38f839fc 100644
216 +--- a/drivers/s390/scsi/zfcp_fsf.c
217 ++++ b/drivers/s390/scsi/zfcp_fsf.c
218 +@@ -1583,7 +1583,7 @@ out:
219 + int zfcp_fsf_open_wka_port(struct zfcp_fc_wka_port *wka_port)
220 + {
221 + struct zfcp_qdio *qdio = wka_port->adapter->qdio;
222 +- struct zfcp_fsf_req *req = NULL;
223 ++ struct zfcp_fsf_req *req;
224 + int retval = -EIO;
225 +
226 + spin_lock_irq(&qdio->req_q_lock);
227 +@@ -1612,7 +1612,7 @@ int zfcp_fsf_open_wka_port(struct zfcp_fc_wka_port *wka_port)
228 + zfcp_fsf_req_free(req);
229 + out:
230 + spin_unlock_irq(&qdio->req_q_lock);
231 +- if (req && !IS_ERR(req))
232 ++ if (!retval)
233 + zfcp_dbf_rec_run_wka("fsowp_1", wka_port, req->req_id);
234 + return retval;
235 + }
236 +@@ -1638,7 +1638,7 @@ static void zfcp_fsf_close_wka_port_handler(struct zfcp_fsf_req *req)
237 + int zfcp_fsf_close_wka_port(struct zfcp_fc_wka_port *wka_port)
238 + {
239 + struct zfcp_qdio *qdio = wka_port->adapter->qdio;
240 +- struct zfcp_fsf_req *req = NULL;
241 ++ struct zfcp_fsf_req *req;
242 + int retval = -EIO;
243 +
244 + spin_lock_irq(&qdio->req_q_lock);
245 +@@ -1667,7 +1667,7 @@ int zfcp_fsf_close_wka_port(struct zfcp_fc_wka_port *wka_port)
246 + zfcp_fsf_req_free(req);
247 + out:
248 + spin_unlock_irq(&qdio->req_q_lock);
249 +- if (req && !IS_ERR(req))
250 ++ if (!retval)
251 + zfcp_dbf_rec_run_wka("fscwp_1", wka_port, req->req_id);
252 + return retval;
253 + }
254 +diff --git a/drivers/scsi/aacraid/comminit.c b/drivers/scsi/aacraid/comminit.c
255 +index 0d351cd3191b..26d38b1a45ab 100644
256 +--- a/drivers/scsi/aacraid/comminit.c
257 ++++ b/drivers/scsi/aacraid/comminit.c
258 +@@ -50,9 +50,13 @@ struct aac_common aac_config = {
259 +
260 + static inline int aac_is_msix_mode(struct aac_dev *dev)
261 + {
262 +- u32 status;
263 ++ u32 status = 0;
264 +
265 +- status = src_readl(dev, MUnit.OMR);
266 ++ if (dev->pdev->device == PMC_DEVICE_S6 ||
267 ++ dev->pdev->device == PMC_DEVICE_S7 ||
268 ++ dev->pdev->device == PMC_DEVICE_S8) {
269 ++ status = src_readl(dev, MUnit.OMR);
270 ++ }
271 + return (status & AAC_INT_MODE_MSIX);
272 + }
273 +
274 +diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
275 +index 8cead04f26d6..f6a8e9958e75 100644
276 +--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
277 ++++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
278 +@@ -51,6 +51,7 @@
279 + #include <linux/workqueue.h>
280 + #include <linux/delay.h>
281 + #include <linux/pci.h>
282 ++#include <linux/pci-aspm.h>
283 + #include <linux/interrupt.h>
284 + #include <linux/aer.h>
285 + #include <linux/raid_class.h>
286 +@@ -8483,6 +8484,8 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
287 +
288 + switch (hba_mpi_version) {
289 + case MPI2_VERSION:
290 ++ pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S |
291 ++ PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_CLKPM);
292 + /* Use mpt2sas driver host template for SAS 2.0 HBA's */
293 + shost = scsi_host_alloc(&mpt2sas_driver_template,
294 + sizeof(struct MPT3SAS_ADAPTER));
295 +diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
296 +index dcd5ed26eb18..356c80fbb304 100644
297 +--- a/drivers/target/target_core_device.c
298 ++++ b/drivers/target/target_core_device.c
299 +@@ -362,7 +362,15 @@ int core_enable_device_list_for_node(
300 + kfree(new);
301 + return -EINVAL;
302 + }
303 +- BUG_ON(orig->se_lun_acl != NULL);
304 ++ if (orig->se_lun_acl != NULL) {
305 ++ pr_warn_ratelimited("Detected existing explicit"
306 ++ " se_lun_acl->se_lun_group reference for %s"
307 ++ " mapped_lun: %llu, failing\n",
308 ++ nacl->initiatorname, mapped_lun);
309 ++ mutex_unlock(&nacl->lun_entry_mutex);
310 ++ kfree(new);
311 ++ return -EINVAL;
312 ++ }
313 +
314 + rcu_assign_pointer(new->se_lun, lun);
315 + rcu_assign_pointer(new->se_lun_acl, lun_acl);
316 +diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c
317 +index c220bb8dfa9d..2e27b1034ede 100644
318 +--- a/drivers/target/target_core_sbc.c
319 ++++ b/drivers/target/target_core_sbc.c
320 +@@ -442,6 +442,7 @@ static sense_reason_t compare_and_write_post(struct se_cmd *cmd, bool success,
321 + int *post_ret)
322 + {
323 + struct se_device *dev = cmd->se_dev;
324 ++ sense_reason_t ret = TCM_NO_SENSE;
325 +
326 + /*
327 + * Only set SCF_COMPARE_AND_WRITE_POST to force a response fall-through
328 +@@ -449,9 +450,12 @@ static sense_reason_t compare_and_write_post(struct se_cmd *cmd, bool success,
329 + * sent to the backend driver.
330 + */
331 + spin_lock_irq(&cmd->t_state_lock);
332 +- if ((cmd->transport_state & CMD_T_SENT) && !cmd->scsi_status) {
333 ++ if (cmd->transport_state & CMD_T_SENT) {
334 + cmd->se_cmd_flags |= SCF_COMPARE_AND_WRITE_POST;
335 + *post_ret = 1;
336 ++
337 ++ if (cmd->scsi_status == SAM_STAT_CHECK_CONDITION)
338 ++ ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
339 + }
340 + spin_unlock_irq(&cmd->t_state_lock);
341 +
342 +@@ -461,7 +465,7 @@ static sense_reason_t compare_and_write_post(struct se_cmd *cmd, bool success,
343 + */
344 + up(&dev->caw_sem);
345 +
346 +- return TCM_NO_SENSE;
347 ++ return ret;
348 + }
349 +
350 + static sense_reason_t compare_and_write_callback(struct se_cmd *cmd, bool success,
351 +diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
352 +index 2a67af4e2e13..aa517c4fadb9 100644
353 +--- a/drivers/target/target_core_transport.c
354 ++++ b/drivers/target/target_core_transport.c
355 +@@ -3058,7 +3058,6 @@ static void target_tmr_work(struct work_struct *work)
356 + spin_unlock_irqrestore(&cmd->t_state_lock, flags);
357 + goto check_stop;
358 + }
359 +- cmd->t_state = TRANSPORT_ISTATE_PROCESSING;
360 + spin_unlock_irqrestore(&cmd->t_state_lock, flags);
361 +
362 + cmd->se_tfo->queue_tm_rsp(cmd);
363 +@@ -3071,11 +3070,25 @@ int transport_generic_handle_tmr(
364 + struct se_cmd *cmd)
365 + {
366 + unsigned long flags;
367 ++ bool aborted = false;
368 +
369 + spin_lock_irqsave(&cmd->t_state_lock, flags);
370 +- cmd->transport_state |= CMD_T_ACTIVE;
371 ++ if (cmd->transport_state & CMD_T_ABORTED) {
372 ++ aborted = true;
373 ++ } else {
374 ++ cmd->t_state = TRANSPORT_ISTATE_PROCESSING;
375 ++ cmd->transport_state |= CMD_T_ACTIVE;
376 ++ }
377 + spin_unlock_irqrestore(&cmd->t_state_lock, flags);
378 +
379 ++ if (aborted) {
380 ++ pr_warn_ratelimited("handle_tmr caught CMD_T_ABORTED TMR %d"
381 ++ "ref_tag: %llu tag: %llu\n", cmd->se_tmr_req->function,
382 ++ cmd->se_tmr_req->ref_task_tag, cmd->tag);
383 ++ transport_cmd_check_stop_to_fabric(cmd);
384 ++ return 0;
385 ++ }
386 ++
387 + INIT_WORK(&cmd->work, target_tmr_work);
388 + queue_work(cmd->se_dev->tmr_wq, &cmd->work);
389 + return 0;
390 +diff --git a/drivers/target/target_core_xcopy.c b/drivers/target/target_core_xcopy.c
391 +index 153a6f255b6d..6415e9b09a52 100644
392 +--- a/drivers/target/target_core_xcopy.c
393 ++++ b/drivers/target/target_core_xcopy.c
394 +@@ -836,7 +836,7 @@ out:
395 + " CHECK_CONDITION -> sending response\n", rc);
396 + ec_cmd->scsi_status = SAM_STAT_CHECK_CONDITION;
397 + }
398 +- target_complete_cmd(ec_cmd, SAM_STAT_CHECK_CONDITION);
399 ++ target_complete_cmd(ec_cmd, ec_cmd->scsi_status);
400 + }
401 +
402 + sense_reason_t target_do_xcopy(struct se_cmd *se_cmd)
403 +diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h
404 +index 59915ea5373c..a91b3b75da0f 100644
405 +--- a/include/linux/cpumask.h
406 ++++ b/include/linux/cpumask.h
407 +@@ -556,7 +556,7 @@ static inline void cpumask_copy(struct cpumask *dstp,
408 + static inline int cpumask_parse_user(const char __user *buf, int len,
409 + struct cpumask *dstp)
410 + {
411 +- return bitmap_parse_user(buf, len, cpumask_bits(dstp), nr_cpu_ids);
412 ++ return bitmap_parse_user(buf, len, cpumask_bits(dstp), nr_cpumask_bits);
413 + }
414 +
415 + /**
416 +@@ -571,7 +571,7 @@ static inline int cpumask_parselist_user(const char __user *buf, int len,
417 + struct cpumask *dstp)
418 + {
419 + return bitmap_parselist_user(buf, len, cpumask_bits(dstp),
420 +- nr_cpu_ids);
421 ++ nr_cpumask_bits);
422 + }
423 +
424 + /**
425 +@@ -586,7 +586,7 @@ static inline int cpumask_parse(const char *buf, struct cpumask *dstp)
426 + char *nl = strchr(buf, '\n');
427 + unsigned int len = nl ? (unsigned int)(nl - buf) : strlen(buf);
428 +
429 +- return bitmap_parse(buf, len, cpumask_bits(dstp), nr_cpu_ids);
430 ++ return bitmap_parse(buf, len, cpumask_bits(dstp), nr_cpumask_bits);
431 + }
432 +
433 + /**
434 +@@ -598,7 +598,7 @@ static inline int cpumask_parse(const char *buf, struct cpumask *dstp)
435 + */
436 + static inline int cpulist_parse(const char *buf, struct cpumask *dstp)
437 + {
438 +- return bitmap_parselist(buf, cpumask_bits(dstp), nr_cpu_ids);
439 ++ return bitmap_parselist(buf, cpumask_bits(dstp), nr_cpumask_bits);
440 + }
441 +
442 + /**
443 +diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c
444 +index f7bb6829b415..9063e8e736ad 100644
445 +--- a/net/mac80211/mesh.c
446 ++++ b/net/mac80211/mesh.c
447 +@@ -355,7 +355,7 @@ int mesh_add_vendor_ies(struct ieee80211_sub_if_data *sdata,
448 + /* fast-forward to vendor IEs */
449 + offset = ieee80211_ie_split_vendor(ifmsh->ie, ifmsh->ie_len, 0);
450 +
451 +- if (offset) {
452 ++ if (offset < ifmsh->ie_len) {
453 + len = ifmsh->ie_len - offset;
454 + data = ifmsh->ie + offset;
455 + if (skb_tailroom(skb) < len)
456 +diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
457 +index d0cfaa9f19d0..4b56c3b6c25f 100644
458 +--- a/security/selinux/hooks.c
459 ++++ b/security/selinux/hooks.c
460 +@@ -5640,7 +5640,7 @@ static int selinux_setprocattr(struct task_struct *p,
461 + return error;
462 +
463 + /* Obtain a SID for the context, if one was specified. */
464 +- if (size && str[1] && str[1] != '\n') {
465 ++ if (size && str[0] && str[0] != '\n') {
466 + if (str[size-1] == '\n') {
467 + str[size-1] = 0;
468 + size--;
469 +diff --git a/sound/core/seq/seq_memory.c b/sound/core/seq/seq_memory.c
470 +index c850345c43b5..dfa5156f3585 100644
471 +--- a/sound/core/seq/seq_memory.c
472 ++++ b/sound/core/seq/seq_memory.c
473 +@@ -419,7 +419,6 @@ int snd_seq_pool_done(struct snd_seq_pool *pool)
474 + {
475 + unsigned long flags;
476 + struct snd_seq_event_cell *ptr;
477 +- int max_count = 5 * HZ;
478 +
479 + if (snd_BUG_ON(!pool))
480 + return -EINVAL;
481 +@@ -432,14 +431,8 @@ int snd_seq_pool_done(struct snd_seq_pool *pool)
482 + if (waitqueue_active(&pool->output_sleep))
483 + wake_up(&pool->output_sleep);
484 +
485 +- while (atomic_read(&pool->counter) > 0) {
486 +- if (max_count == 0) {
487 +- pr_warn("ALSA: snd_seq_pool_done timeout: %d cells remain\n", atomic_read(&pool->counter));
488 +- break;
489 +- }
490 ++ while (atomic_read(&pool->counter) > 0)
491 + schedule_timeout_uninterruptible(1);
492 +- max_count--;
493 +- }
494 +
495 + /* release all resources */
496 + spin_lock_irqsave(&pool->lock, flags);
497 +diff --git a/sound/core/seq/seq_queue.c b/sound/core/seq/seq_queue.c
498 +index 0bec02e89d51..450c5187eecb 100644
499 +--- a/sound/core/seq/seq_queue.c
500 ++++ b/sound/core/seq/seq_queue.c
501 +@@ -181,6 +181,8 @@ void __exit snd_seq_queues_delete(void)
502 + }
503 + }
504 +
505 ++static void queue_use(struct snd_seq_queue *queue, int client, int use);
506 ++
507 + /* allocate a new queue -
508 + * return queue index value or negative value for error
509 + */
510 +@@ -192,11 +194,11 @@ int snd_seq_queue_alloc(int client, int locked, unsigned int info_flags)
511 + if (q == NULL)
512 + return -ENOMEM;
513 + q->info_flags = info_flags;
514 ++ queue_use(q, client, 1);
515 + if (queue_list_add(q) < 0) {
516 + queue_delete(q);
517 + return -ENOMEM;
518 + }
519 +- snd_seq_queue_use(q->queue, client, 1); /* use this queue */
520 + return q->queue;
521 + }
522 +
523 +@@ -502,19 +504,9 @@ int snd_seq_queue_timer_set_tempo(int queueid, int client,
524 + return result;
525 + }
526 +
527 +-
528 +-/* use or unuse this queue -
529 +- * if it is the first client, starts the timer.
530 +- * if it is not longer used by any clients, stop the timer.
531 +- */
532 +-int snd_seq_queue_use(int queueid, int client, int use)
533 ++/* use or unuse this queue */
534 ++static void queue_use(struct snd_seq_queue *queue, int client, int use)
535 + {
536 +- struct snd_seq_queue *queue;
537 +-
538 +- queue = queueptr(queueid);
539 +- if (queue == NULL)
540 +- return -EINVAL;
541 +- mutex_lock(&queue->timer_mutex);
542 + if (use) {
543 + if (!test_and_set_bit(client, queue->clients_bitmap))
544 + queue->clients++;
545 +@@ -529,6 +521,21 @@ int snd_seq_queue_use(int queueid, int client, int use)
546 + } else {
547 + snd_seq_timer_close(queue);
548 + }
549 ++}
550 ++
551 ++/* use or unuse this queue -
552 ++ * if it is the first client, starts the timer.
553 ++ * if it is not longer used by any clients, stop the timer.
554 ++ */
555 ++int snd_seq_queue_use(int queueid, int client, int use)
556 ++{
557 ++ struct snd_seq_queue *queue;
558 ++
559 ++ queue = queueptr(queueid);
560 ++ if (queue == NULL)
561 ++ return -EINVAL;
562 ++ mutex_lock(&queue->timer_mutex);
563 ++ queue_use(queue, client, use);
564 + mutex_unlock(&queue->timer_mutex);
565 + queuefree(queue);
566 + return 0;