1 |
Author: mpagano |
2 |
Date: 2013-08-31 21:56:55 +0000 (Sat, 31 Aug 2013) |
3 |
New Revision: 2504 |
4 |
|
5 |
Added: |
6 |
genpatches-2.6/trunk/3.4/1059_linux-3.4.60.patch |
7 |
Modified: |
8 |
genpatches-2.6/trunk/3.4/0000_README |
9 |
Log: |
10 |
Linux patch 3.4.60 |
11 |
|
12 |
Modified: genpatches-2.6/trunk/3.4/0000_README |
13 |
=================================================================== |
14 |
--- genpatches-2.6/trunk/3.4/0000_README 2013-08-31 19:42:10 UTC (rev 2503) |
15 |
+++ genpatches-2.6/trunk/3.4/0000_README 2013-08-31 21:56:55 UTC (rev 2504) |
16 |
@@ -275,6 +275,10 @@ |
17 |
From: http://www.kernel.org |
18 |
Desc: Linux 3.4.59 |
19 |
|
20 |
+Patch: 1059_linux-3.4.60.patch |
21 |
+From: http://www.kernel.org |
22 |
+Desc: Linux 3.4.60 |
23 |
+ |
24 |
Patch: 1500_XATTR_USER_PREFIX.patch |
25 |
From: https://bugs.gentoo.org/show_bug.cgi?id=470644 |
26 |
Desc: Support for namespace user.pax.* on tmpfs. |
27 |
|
28 |
Added: genpatches-2.6/trunk/3.4/1059_linux-3.4.60.patch |
29 |
=================================================================== |
30 |
--- genpatches-2.6/trunk/3.4/1059_linux-3.4.60.patch (rev 0) |
31 |
+++ genpatches-2.6/trunk/3.4/1059_linux-3.4.60.patch 2013-08-31 21:56:55 UTC (rev 2504) |
32 |
@@ -0,0 +1,579 @@ |
33 |
+diff --git a/Makefile b/Makefile |
34 |
+index efa1453..0027fbe 100644 |
35 |
+--- a/Makefile |
36 |
++++ b/Makefile |
37 |
+@@ -1,6 +1,6 @@ |
38 |
+ VERSION = 3 |
39 |
+ PATCHLEVEL = 4 |
40 |
+-SUBLEVEL = 59 |
41 |
++SUBLEVEL = 60 |
42 |
+ EXTRAVERSION = |
43 |
+ NAME = Saber-toothed Squirrel |
44 |
+ |
45 |
+diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c |
46 |
+index 017d48a..f8b0260 100644 |
47 |
+--- a/arch/x86/xen/setup.c |
48 |
++++ b/arch/x86/xen/setup.c |
49 |
+@@ -213,6 +213,17 @@ static void xen_align_and_add_e820_region(u64 start, u64 size, int type) |
50 |
+ e820_add_region(start, end - start, type); |
51 |
+ } |
52 |
+ |
53 |
++void xen_ignore_unusable(struct e820entry *list, size_t map_size) |
54 |
++{ |
55 |
++ struct e820entry *entry; |
56 |
++ unsigned int i; |
57 |
++ |
58 |
++ for (i = 0, entry = list; i < map_size; i++, entry++) { |
59 |
++ if (entry->type == E820_UNUSABLE) |
60 |
++ entry->type = E820_RAM; |
61 |
++ } |
62 |
++} |
63 |
++ |
64 |
+ /** |
65 |
+ * machine_specific_memory_setup - Hook for machine specific memory setup. |
66 |
+ **/ |
67 |
+@@ -251,6 +262,17 @@ char * __init xen_memory_setup(void) |
68 |
+ } |
69 |
+ BUG_ON(rc); |
70 |
+ |
71 |
++ /* |
72 |
++ * Xen won't allow a 1:1 mapping to be created to UNUSABLE |
73 |
++ * regions, so if we're using the machine memory map leave the |
74 |
++ * region as RAM as it is in the pseudo-physical map. |
75 |
++ * |
76 |
++ * UNUSABLE regions in domUs are not handled and will need |
77 |
++ * a patch in the future. |
78 |
++ */ |
79 |
++ if (xen_initial_domain()) |
80 |
++ xen_ignore_unusable(map, memmap.nr_entries); |
81 |
++ |
82 |
+ /* Make sure the Xen-supplied memory map is well-ordered. */ |
83 |
+ sanitize_e820_map(map, memmap.nr_entries, &memmap.nr_entries); |
84 |
+ |
85 |
+diff --git a/drivers/ata/libata-pmp.c b/drivers/ata/libata-pmp.c |
86 |
+index f63a588..f5c35be 100644 |
87 |
+--- a/drivers/ata/libata-pmp.c |
88 |
++++ b/drivers/ata/libata-pmp.c |
89 |
+@@ -289,24 +289,24 @@ static int sata_pmp_configure(struct ata_device *dev, int print_info) |
90 |
+ |
91 |
+ /* Disable sending Early R_OK. |
92 |
+ * With "cached read" HDD testing and multiple ports busy on a SATA |
93 |
+- * host controller, 3726 PMP will very rarely drop a deferred |
94 |
++ * host controller, 3x26 PMP will very rarely drop a deferred |
95 |
+ * R_OK that was intended for the host. Symptom will be all |
96 |
+ * 5 drives under test will timeout, get reset, and recover. |
97 |
+ */ |
98 |
+- if (vendor == 0x1095 && devid == 0x3726) { |
99 |
++ if (vendor == 0x1095 && (devid == 0x3726 || devid == 0x3826)) { |
100 |
+ u32 reg; |
101 |
+ |
102 |
+ err_mask = sata_pmp_read(&ap->link, PMP_GSCR_SII_POL, ®); |
103 |
+ if (err_mask) { |
104 |
+ rc = -EIO; |
105 |
+- reason = "failed to read Sil3726 Private Register"; |
106 |
++ reason = "failed to read Sil3x26 Private Register"; |
107 |
+ goto fail; |
108 |
+ } |
109 |
+ reg &= ~0x1; |
110 |
+ err_mask = sata_pmp_write(&ap->link, PMP_GSCR_SII_POL, reg); |
111 |
+ if (err_mask) { |
112 |
+ rc = -EIO; |
113 |
+- reason = "failed to write Sil3726 Private Register"; |
114 |
++ reason = "failed to write Sil3x26 Private Register"; |
115 |
+ goto fail; |
116 |
+ } |
117 |
+ } |
118 |
+@@ -383,8 +383,8 @@ static void sata_pmp_quirks(struct ata_port *ap) |
119 |
+ u16 devid = sata_pmp_gscr_devid(gscr); |
120 |
+ struct ata_link *link; |
121 |
+ |
122 |
+- if (vendor == 0x1095 && devid == 0x3726) { |
123 |
+- /* sil3726 quirks */ |
124 |
++ if (vendor == 0x1095 && (devid == 0x3726 || devid == 0x3826)) { |
125 |
++ /* sil3x26 quirks */ |
126 |
+ ata_for_each_link(link, ap, EDGE) { |
127 |
+ /* link reports offline after LPM */ |
128 |
+ link->flags |= ATA_LFLAG_NO_LPM; |
129 |
+diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h |
130 |
+index dde62bf..d031932 100644 |
131 |
+--- a/drivers/gpu/drm/i915/i915_reg.h |
132 |
++++ b/drivers/gpu/drm/i915/i915_reg.h |
133 |
+@@ -502,6 +502,8 @@ |
134 |
+ will not assert AGPBUSY# and will only |
135 |
+ be delivered when out of C3. */ |
136 |
+ #define INSTPM_FORCE_ORDERING (1<<7) /* GEN6+ */ |
137 |
++#define INSTPM_TLB_INVALIDATE (1<<9) |
138 |
++#define INSTPM_SYNC_FLUSH (1<<5) |
139 |
+ #define ACTHD 0x020c8 |
140 |
+ #define FW_BLC 0x020d8 |
141 |
+ #define FW_BLC2 0x020dc |
142 |
+diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c |
143 |
+index c17325c..99a9df8 100644 |
144 |
+--- a/drivers/gpu/drm/i915/intel_ringbuffer.c |
145 |
++++ b/drivers/gpu/drm/i915/intel_ringbuffer.c |
146 |
+@@ -767,6 +767,18 @@ void intel_ring_setup_status_page(struct intel_ring_buffer *ring) |
147 |
+ |
148 |
+ I915_WRITE(mmio, (u32)ring->status_page.gfx_addr); |
149 |
+ POSTING_READ(mmio); |
150 |
++ |
151 |
++ /* Flush the TLB for this page */ |
152 |
++ if (INTEL_INFO(dev)->gen >= 6) { |
153 |
++ u32 reg = RING_INSTPM(ring->mmio_base); |
154 |
++ I915_WRITE(reg, |
155 |
++ _MASKED_BIT_ENABLE(INSTPM_TLB_INVALIDATE | |
156 |
++ INSTPM_SYNC_FLUSH)); |
157 |
++ if (wait_for((I915_READ(reg) & INSTPM_SYNC_FLUSH) == 0, |
158 |
++ 1000)) |
159 |
++ DRM_ERROR("%s: wait for SyncFlush to complete for TLB invalidation timed out\n", |
160 |
++ ring->name); |
161 |
++ } |
162 |
+ } |
163 |
+ |
164 |
+ static int |
165 |
+diff --git a/drivers/net/wireless/hostap/hostap_ioctl.c b/drivers/net/wireless/hostap/hostap_ioctl.c |
166 |
+index 18054d9..dbec2ff 100644 |
167 |
+--- a/drivers/net/wireless/hostap/hostap_ioctl.c |
168 |
++++ b/drivers/net/wireless/hostap/hostap_ioctl.c |
169 |
+@@ -522,9 +522,9 @@ static int prism2_ioctl_giwaplist(struct net_device *dev, |
170 |
+ |
171 |
+ data->length = prism2_ap_get_sta_qual(local, addr, qual, IW_MAX_AP, 1); |
172 |
+ |
173 |
+- memcpy(extra, &addr, sizeof(struct sockaddr) * data->length); |
174 |
++ memcpy(extra, addr, sizeof(struct sockaddr) * data->length); |
175 |
+ data->flags = 1; /* has quality information */ |
176 |
+- memcpy(extra + sizeof(struct sockaddr) * data->length, &qual, |
177 |
++ memcpy(extra + sizeof(struct sockaddr) * data->length, qual, |
178 |
+ sizeof(struct iw_quality) * data->length); |
179 |
+ |
180 |
+ kfree(addr); |
181 |
+diff --git a/drivers/net/wireless/zd1201.c b/drivers/net/wireless/zd1201.c |
182 |
+index a66b93b..1662fcc 100644 |
183 |
+--- a/drivers/net/wireless/zd1201.c |
184 |
++++ b/drivers/net/wireless/zd1201.c |
185 |
+@@ -98,10 +98,12 @@ static int zd1201_fw_upload(struct usb_device *dev, int apfw) |
186 |
+ goto exit; |
187 |
+ |
188 |
+ err = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), 0x4, |
189 |
+- USB_DIR_IN | 0x40, 0,0, &ret, sizeof(ret), ZD1201_FW_TIMEOUT); |
190 |
++ USB_DIR_IN | 0x40, 0, 0, buf, sizeof(ret), ZD1201_FW_TIMEOUT); |
191 |
+ if (err < 0) |
192 |
+ goto exit; |
193 |
+ |
194 |
++ memcpy(&ret, buf, sizeof(ret)); |
195 |
++ |
196 |
+ if (ret & 0x80) { |
197 |
+ err = -EIO; |
198 |
+ goto exit; |
199 |
+diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c |
200 |
+index 91a375f..17fad3b 100644 |
201 |
+--- a/drivers/of/fdt.c |
202 |
++++ b/drivers/of/fdt.c |
203 |
+@@ -390,6 +390,8 @@ static void __unflatten_device_tree(struct boot_param_header *blob, |
204 |
+ mem = (unsigned long) |
205 |
+ dt_alloc(size + 4, __alignof__(struct device_node)); |
206 |
+ |
207 |
++ memset((void *)mem, 0, size); |
208 |
++ |
209 |
+ ((__be32 *)mem)[size / 4] = cpu_to_be32(0xdeadbeef); |
210 |
+ |
211 |
+ pr_debug(" unflattening %lx...\n", mem); |
212 |
+diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c |
213 |
+index e1b4f80..5c87270 100644 |
214 |
+--- a/drivers/s390/scsi/zfcp_erp.c |
215 |
++++ b/drivers/s390/scsi/zfcp_erp.c |
216 |
+@@ -102,10 +102,13 @@ static void zfcp_erp_action_dismiss_port(struct zfcp_port *port) |
217 |
+ |
218 |
+ if (atomic_read(&port->status) & ZFCP_STATUS_COMMON_ERP_INUSE) |
219 |
+ zfcp_erp_action_dismiss(&port->erp_action); |
220 |
+- else |
221 |
+- shost_for_each_device(sdev, port->adapter->scsi_host) |
222 |
++ else { |
223 |
++ spin_lock(port->adapter->scsi_host->host_lock); |
224 |
++ __shost_for_each_device(sdev, port->adapter->scsi_host) |
225 |
+ if (sdev_to_zfcp(sdev)->port == port) |
226 |
+ zfcp_erp_action_dismiss_lun(sdev); |
227 |
++ spin_unlock(port->adapter->scsi_host->host_lock); |
228 |
++ } |
229 |
+ } |
230 |
+ |
231 |
+ static void zfcp_erp_action_dismiss_adapter(struct zfcp_adapter *adapter) |
232 |
+@@ -592,9 +595,11 @@ static void _zfcp_erp_lun_reopen_all(struct zfcp_port *port, int clear, |
233 |
+ { |
234 |
+ struct scsi_device *sdev; |
235 |
+ |
236 |
+- shost_for_each_device(sdev, port->adapter->scsi_host) |
237 |
++ spin_lock(port->adapter->scsi_host->host_lock); |
238 |
++ __shost_for_each_device(sdev, port->adapter->scsi_host) |
239 |
+ if (sdev_to_zfcp(sdev)->port == port) |
240 |
+ _zfcp_erp_lun_reopen(sdev, clear, id, 0); |
241 |
++ spin_unlock(port->adapter->scsi_host->host_lock); |
242 |
+ } |
243 |
+ |
244 |
+ static void zfcp_erp_strategy_followup_failed(struct zfcp_erp_action *act) |
245 |
+@@ -1435,8 +1440,10 @@ void zfcp_erp_set_adapter_status(struct zfcp_adapter *adapter, u32 mask) |
246 |
+ atomic_set_mask(common_mask, &port->status); |
247 |
+ read_unlock_irqrestore(&adapter->port_list_lock, flags); |
248 |
+ |
249 |
+- shost_for_each_device(sdev, adapter->scsi_host) |
250 |
++ spin_lock_irqsave(adapter->scsi_host->host_lock, flags); |
251 |
++ __shost_for_each_device(sdev, adapter->scsi_host) |
252 |
+ atomic_set_mask(common_mask, &sdev_to_zfcp(sdev)->status); |
253 |
++ spin_unlock_irqrestore(adapter->scsi_host->host_lock, flags); |
254 |
+ } |
255 |
+ |
256 |
+ /** |
257 |
+@@ -1470,11 +1477,13 @@ void zfcp_erp_clear_adapter_status(struct zfcp_adapter *adapter, u32 mask) |
258 |
+ } |
259 |
+ read_unlock_irqrestore(&adapter->port_list_lock, flags); |
260 |
+ |
261 |
+- shost_for_each_device(sdev, adapter->scsi_host) { |
262 |
++ spin_lock_irqsave(adapter->scsi_host->host_lock, flags); |
263 |
++ __shost_for_each_device(sdev, adapter->scsi_host) { |
264 |
+ atomic_clear_mask(common_mask, &sdev_to_zfcp(sdev)->status); |
265 |
+ if (clear_counter) |
266 |
+ atomic_set(&sdev_to_zfcp(sdev)->erp_counter, 0); |
267 |
+ } |
268 |
++ spin_unlock_irqrestore(adapter->scsi_host->host_lock, flags); |
269 |
+ } |
270 |
+ |
271 |
+ /** |
272 |
+@@ -1488,16 +1497,19 @@ void zfcp_erp_set_port_status(struct zfcp_port *port, u32 mask) |
273 |
+ { |
274 |
+ struct scsi_device *sdev; |
275 |
+ u32 common_mask = mask & ZFCP_COMMON_FLAGS; |
276 |
++ unsigned long flags; |
277 |
+ |
278 |
+ atomic_set_mask(mask, &port->status); |
279 |
+ |
280 |
+ if (!common_mask) |
281 |
+ return; |
282 |
+ |
283 |
+- shost_for_each_device(sdev, port->adapter->scsi_host) |
284 |
++ spin_lock_irqsave(port->adapter->scsi_host->host_lock, flags); |
285 |
++ __shost_for_each_device(sdev, port->adapter->scsi_host) |
286 |
+ if (sdev_to_zfcp(sdev)->port == port) |
287 |
+ atomic_set_mask(common_mask, |
288 |
+ &sdev_to_zfcp(sdev)->status); |
289 |
++ spin_unlock_irqrestore(port->adapter->scsi_host->host_lock, flags); |
290 |
+ } |
291 |
+ |
292 |
+ /** |
293 |
+@@ -1512,6 +1524,7 @@ void zfcp_erp_clear_port_status(struct zfcp_port *port, u32 mask) |
294 |
+ struct scsi_device *sdev; |
295 |
+ u32 common_mask = mask & ZFCP_COMMON_FLAGS; |
296 |
+ u32 clear_counter = mask & ZFCP_STATUS_COMMON_ERP_FAILED; |
297 |
++ unsigned long flags; |
298 |
+ |
299 |
+ atomic_clear_mask(mask, &port->status); |
300 |
+ |
301 |
+@@ -1521,13 +1534,15 @@ void zfcp_erp_clear_port_status(struct zfcp_port *port, u32 mask) |
302 |
+ if (clear_counter) |
303 |
+ atomic_set(&port->erp_counter, 0); |
304 |
+ |
305 |
+- shost_for_each_device(sdev, port->adapter->scsi_host) |
306 |
++ spin_lock_irqsave(port->adapter->scsi_host->host_lock, flags); |
307 |
++ __shost_for_each_device(sdev, port->adapter->scsi_host) |
308 |
+ if (sdev_to_zfcp(sdev)->port == port) { |
309 |
+ atomic_clear_mask(common_mask, |
310 |
+ &sdev_to_zfcp(sdev)->status); |
311 |
+ if (clear_counter) |
312 |
+ atomic_set(&sdev_to_zfcp(sdev)->erp_counter, 0); |
313 |
+ } |
314 |
++ spin_unlock_irqrestore(port->adapter->scsi_host->host_lock, flags); |
315 |
+ } |
316 |
+ |
317 |
+ /** |
318 |
+diff --git a/drivers/s390/scsi/zfcp_qdio.c b/drivers/s390/scsi/zfcp_qdio.c |
319 |
+index e76d003..52c6b59 100644 |
320 |
+--- a/drivers/s390/scsi/zfcp_qdio.c |
321 |
++++ b/drivers/s390/scsi/zfcp_qdio.c |
322 |
+@@ -224,11 +224,9 @@ int zfcp_qdio_sbals_from_sg(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req, |
323 |
+ |
324 |
+ static int zfcp_qdio_sbal_check(struct zfcp_qdio *qdio) |
325 |
+ { |
326 |
+- spin_lock_irq(&qdio->req_q_lock); |
327 |
+ if (atomic_read(&qdio->req_q_free) || |
328 |
+ !(atomic_read(&qdio->adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP)) |
329 |
+ return 1; |
330 |
+- spin_unlock_irq(&qdio->req_q_lock); |
331 |
+ return 0; |
332 |
+ } |
333 |
+ |
334 |
+@@ -246,9 +244,8 @@ int zfcp_qdio_sbal_get(struct zfcp_qdio *qdio) |
335 |
+ { |
336 |
+ long ret; |
337 |
+ |
338 |
+- spin_unlock_irq(&qdio->req_q_lock); |
339 |
+- ret = wait_event_interruptible_timeout(qdio->req_q_wq, |
340 |
+- zfcp_qdio_sbal_check(qdio), 5 * HZ); |
341 |
++ ret = wait_event_interruptible_lock_irq_timeout(qdio->req_q_wq, |
342 |
++ zfcp_qdio_sbal_check(qdio), qdio->req_q_lock, 5 * HZ); |
343 |
+ |
344 |
+ if (!(atomic_read(&qdio->adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP)) |
345 |
+ return -EIO; |
346 |
+@@ -262,7 +259,6 @@ int zfcp_qdio_sbal_get(struct zfcp_qdio *qdio) |
347 |
+ zfcp_erp_adapter_reopen(qdio->adapter, 0, "qdsbg_1"); |
348 |
+ } |
349 |
+ |
350 |
+- spin_lock_irq(&qdio->req_q_lock); |
351 |
+ return -EIO; |
352 |
+ } |
353 |
+ |
354 |
+diff --git a/drivers/xen/events.c b/drivers/xen/events.c |
355 |
+index 417c133..33dcad6 100644 |
356 |
+--- a/drivers/xen/events.c |
357 |
++++ b/drivers/xen/events.c |
358 |
+@@ -324,7 +324,7 @@ static void init_evtchn_cpu_bindings(void) |
359 |
+ |
360 |
+ for_each_possible_cpu(i) |
361 |
+ memset(per_cpu(cpu_evtchn_mask, i), |
362 |
+- (i == 0) ? ~0 : 0, sizeof(*per_cpu(cpu_evtchn_mask, i))); |
363 |
++ (i == 0) ? ~0 : 0, NR_EVENT_CHANNELS/8); |
364 |
+ } |
365 |
+ |
366 |
+ static inline void clear_evtchn(int port) |
367 |
+diff --git a/fs/nilfs2/segbuf.c b/fs/nilfs2/segbuf.c |
368 |
+index dc9a913..2d8be51 100644 |
369 |
+--- a/fs/nilfs2/segbuf.c |
370 |
++++ b/fs/nilfs2/segbuf.c |
371 |
+@@ -345,8 +345,7 @@ static void nilfs_end_bio_write(struct bio *bio, int err) |
372 |
+ |
373 |
+ if (err == -EOPNOTSUPP) { |
374 |
+ set_bit(BIO_EOPNOTSUPP, &bio->bi_flags); |
375 |
+- bio_put(bio); |
376 |
+- /* to be detected by submit_seg_bio() */ |
377 |
++ /* to be detected by nilfs_segbuf_submit_bio() */ |
378 |
+ } |
379 |
+ |
380 |
+ if (!uptodate) |
381 |
+@@ -377,12 +376,12 @@ static int nilfs_segbuf_submit_bio(struct nilfs_segment_buffer *segbuf, |
382 |
+ bio->bi_private = segbuf; |
383 |
+ bio_get(bio); |
384 |
+ submit_bio(mode, bio); |
385 |
++ segbuf->sb_nbio++; |
386 |
+ if (bio_flagged(bio, BIO_EOPNOTSUPP)) { |
387 |
+ bio_put(bio); |
388 |
+ err = -EOPNOTSUPP; |
389 |
+ goto failed; |
390 |
+ } |
391 |
+- segbuf->sb_nbio++; |
392 |
+ bio_put(bio); |
393 |
+ |
394 |
+ wi->bio = NULL; |
395 |
+diff --git a/include/linux/wait.h b/include/linux/wait.h |
396 |
+index 6c6c20e..b305b31 100644 |
397 |
+--- a/include/linux/wait.h |
398 |
++++ b/include/linux/wait.h |
399 |
+@@ -530,6 +530,63 @@ do { \ |
400 |
+ ? 0 : __wait_event_interruptible_locked(wq, condition, 1, 1)) |
401 |
+ |
402 |
+ |
403 |
++#define __wait_event_interruptible_lock_irq_timeout(wq, condition, \ |
404 |
++ lock, ret) \ |
405 |
++do { \ |
406 |
++ DEFINE_WAIT(__wait); \ |
407 |
++ \ |
408 |
++ for (;;) { \ |
409 |
++ prepare_to_wait(&wq, &__wait, TASK_INTERRUPTIBLE); \ |
410 |
++ if (condition) \ |
411 |
++ break; \ |
412 |
++ if (signal_pending(current)) { \ |
413 |
++ ret = -ERESTARTSYS; \ |
414 |
++ break; \ |
415 |
++ } \ |
416 |
++ spin_unlock_irq(&lock); \ |
417 |
++ ret = schedule_timeout(ret); \ |
418 |
++ spin_lock_irq(&lock); \ |
419 |
++ if (!ret) \ |
420 |
++ break; \ |
421 |
++ } \ |
422 |
++ finish_wait(&wq, &__wait); \ |
423 |
++} while (0) |
424 |
++ |
425 |
++/** |
426 |
++ * wait_event_interruptible_lock_irq_timeout - sleep until a condition gets true or a timeout elapses. |
427 |
++ * The condition is checked under the lock. This is expected |
428 |
++ * to be called with the lock taken. |
429 |
++ * @wq: the waitqueue to wait on |
430 |
++ * @condition: a C expression for the event to wait for |
431 |
++ * @lock: a locked spinlock_t, which will be released before schedule() |
432 |
++ * and reacquired afterwards. |
433 |
++ * @timeout: timeout, in jiffies |
434 |
++ * |
435 |
++ * The process is put to sleep (TASK_INTERRUPTIBLE) until the |
436 |
++ * @condition evaluates to true or signal is received. The @condition is |
437 |
++ * checked each time the waitqueue @wq is woken up. |
438 |
++ * |
439 |
++ * wake_up() has to be called after changing any variable that could |
440 |
++ * change the result of the wait condition. |
441 |
++ * |
442 |
++ * This is supposed to be called while holding the lock. The lock is |
443 |
++ * dropped before going to sleep and is reacquired afterwards. |
444 |
++ * |
445 |
++ * The function returns 0 if the @timeout elapsed, -ERESTARTSYS if it |
446 |
++ * was interrupted by a signal, and the remaining jiffies otherwise |
447 |
++ * if the condition evaluated to true before the timeout elapsed. |
448 |
++ */ |
449 |
++#define wait_event_interruptible_lock_irq_timeout(wq, condition, lock, \ |
450 |
++ timeout) \ |
451 |
++({ \ |
452 |
++ int __ret = timeout; \ |
453 |
++ \ |
454 |
++ if (!(condition)) \ |
455 |
++ __wait_event_interruptible_lock_irq_timeout( \ |
456 |
++ wq, condition, lock, __ret); \ |
457 |
++ __ret; \ |
458 |
++}) |
459 |
++ |
460 |
+ |
461 |
+ #define __wait_event_killable(wq, condition, ret) \ |
462 |
+ do { \ |
463 |
+diff --git a/kernel/workqueue.c b/kernel/workqueue.c |
464 |
+index a64b94e..575d092 100644 |
465 |
+--- a/kernel/workqueue.c |
466 |
++++ b/kernel/workqueue.c |
467 |
+@@ -128,6 +128,7 @@ struct worker { |
468 |
+ }; |
469 |
+ |
470 |
+ struct work_struct *current_work; /* L: work being processed */ |
471 |
++ work_func_t current_func; /* L: current_work's fn */ |
472 |
+ struct cpu_workqueue_struct *current_cwq; /* L: current_work's cwq */ |
473 |
+ struct list_head scheduled; /* L: scheduled works */ |
474 |
+ struct task_struct *task; /* I: worker task */ |
475 |
+@@ -838,7 +839,8 @@ static struct worker *__find_worker_executing_work(struct global_cwq *gcwq, |
476 |
+ struct hlist_node *tmp; |
477 |
+ |
478 |
+ hlist_for_each_entry(worker, tmp, bwh, hentry) |
479 |
+- if (worker->current_work == work) |
480 |
++ if (worker->current_work == work && |
481 |
++ worker->current_func == work->func) |
482 |
+ return worker; |
483 |
+ return NULL; |
484 |
+ } |
485 |
+@@ -848,9 +850,27 @@ static struct worker *__find_worker_executing_work(struct global_cwq *gcwq, |
486 |
+ * @gcwq: gcwq of interest |
487 |
+ * @work: work to find worker for |
488 |
+ * |
489 |
+- * Find a worker which is executing @work on @gcwq. This function is |
490 |
+- * identical to __find_worker_executing_work() except that this |
491 |
+- * function calculates @bwh itself. |
492 |
++ * Find a worker which is executing @work on @gcwq by searching |
493 |
++ * @gcwq->busy_hash which is keyed by the address of @work. For a worker |
494 |
++ * to match, its current execution should match the address of @work and |
495 |
++ * its work function. This is to avoid unwanted dependency between |
496 |
++ * unrelated work executions through a work item being recycled while still |
497 |
++ * being executed. |
498 |
++ * |
499 |
++ * This is a bit tricky. A work item may be freed once its execution |
500 |
++ * starts and nothing prevents the freed area from being recycled for |
501 |
++ * another work item. If the same work item address ends up being reused |
502 |
++ * before the original execution finishes, workqueue will identify the |
503 |
++ * recycled work item as currently executing and make it wait until the |
504 |
++ * current execution finishes, introducing an unwanted dependency. |
505 |
++ * |
506 |
++ * This function checks the work item address, work function and workqueue |
507 |
++ * to avoid false positives. Note that this isn't complete as one may |
508 |
++ * construct a work function which can introduce dependency onto itself |
509 |
++ * through a recycled work item. Well, if somebody wants to shoot oneself |
510 |
++ * in the foot that badly, there's only so much we can do, and if such |
511 |
++ * deadlock actually occurs, it should be easy to locate the culprit work |
512 |
++ * function. |
513 |
+ * |
514 |
+ * CONTEXT: |
515 |
+ * spin_lock_irq(gcwq->lock). |
516 |
+@@ -1721,10 +1741,9 @@ static void move_linked_works(struct work_struct *work, struct list_head *head, |
517 |
+ *nextp = n; |
518 |
+ } |
519 |
+ |
520 |
+-static void cwq_activate_first_delayed(struct cpu_workqueue_struct *cwq) |
521 |
++static void cwq_activate_delayed_work(struct work_struct *work) |
522 |
+ { |
523 |
+- struct work_struct *work = list_first_entry(&cwq->delayed_works, |
524 |
+- struct work_struct, entry); |
525 |
++ struct cpu_workqueue_struct *cwq = get_work_cwq(work); |
526 |
+ struct list_head *pos = gcwq_determine_ins_pos(cwq->gcwq, cwq); |
527 |
+ |
528 |
+ trace_workqueue_activate_work(work); |
529 |
+@@ -1733,6 +1752,14 @@ static void cwq_activate_first_delayed(struct cpu_workqueue_struct *cwq) |
530 |
+ cwq->nr_active++; |
531 |
+ } |
532 |
+ |
533 |
++static void cwq_activate_first_delayed(struct cpu_workqueue_struct *cwq) |
534 |
++{ |
535 |
++ struct work_struct *work = list_first_entry(&cwq->delayed_works, |
536 |
++ struct work_struct, entry); |
537 |
++ |
538 |
++ cwq_activate_delayed_work(work); |
539 |
++} |
540 |
++ |
541 |
+ /** |
542 |
+ * cwq_dec_nr_in_flight - decrement cwq's nr_in_flight |
543 |
+ * @cwq: cwq of interest |
544 |
+@@ -1804,7 +1831,6 @@ __acquires(&gcwq->lock) |
545 |
+ struct global_cwq *gcwq = cwq->gcwq; |
546 |
+ struct hlist_head *bwh = busy_worker_head(gcwq, work); |
547 |
+ bool cpu_intensive = cwq->wq->flags & WQ_CPU_INTENSIVE; |
548 |
+- work_func_t f = work->func; |
549 |
+ int work_color; |
550 |
+ struct worker *collision; |
551 |
+ #ifdef CONFIG_LOCKDEP |
552 |
+@@ -1833,6 +1859,7 @@ __acquires(&gcwq->lock) |
553 |
+ debug_work_deactivate(work); |
554 |
+ hlist_add_head(&worker->hentry, bwh); |
555 |
+ worker->current_work = work; |
556 |
++ worker->current_func = work->func; |
557 |
+ worker->current_cwq = cwq; |
558 |
+ work_color = get_work_color(work); |
559 |
+ |
560 |
+@@ -1870,7 +1897,7 @@ __acquires(&gcwq->lock) |
561 |
+ lock_map_acquire_read(&cwq->wq->lockdep_map); |
562 |
+ lock_map_acquire(&lockdep_map); |
563 |
+ trace_workqueue_execute_start(work); |
564 |
+- f(work); |
565 |
++ worker->current_func(work); |
566 |
+ /* |
567 |
+ * While we must be careful to not use "work" after this, the trace |
568 |
+ * point will only record its address. |
569 |
+@@ -1880,11 +1907,10 @@ __acquires(&gcwq->lock) |
570 |
+ lock_map_release(&cwq->wq->lockdep_map); |
571 |
+ |
572 |
+ if (unlikely(in_atomic() || lockdep_depth(current) > 0)) { |
573 |
+- printk(KERN_ERR "BUG: workqueue leaked lock or atomic: " |
574 |
+- "%s/0x%08x/%d\n", |
575 |
+- current->comm, preempt_count(), task_pid_nr(current)); |
576 |
+- printk(KERN_ERR " last function: "); |
577 |
+- print_symbol("%s\n", (unsigned long)f); |
578 |
++ pr_err("BUG: workqueue leaked lock or atomic: %s/0x%08x/%d\n" |
579 |
++ " last function: %pf\n", |
580 |
++ current->comm, preempt_count(), task_pid_nr(current), |
581 |
++ worker->current_func); |
582 |
+ debug_show_held_locks(current); |
583 |
+ dump_stack(); |
584 |
+ } |
585 |
+@@ -1898,6 +1924,7 @@ __acquires(&gcwq->lock) |
586 |
+ /* we're done with it, release */ |
587 |
+ hlist_del_init(&worker->hentry); |
588 |
+ worker->current_work = NULL; |
589 |
++ worker->current_func = NULL; |
590 |
+ worker->current_cwq = NULL; |
591 |
+ cwq_dec_nr_in_flight(cwq, work_color, false); |
592 |
+ } |
593 |
+@@ -2625,6 +2652,18 @@ static int try_to_grab_pending(struct work_struct *work) |
594 |
+ smp_rmb(); |
595 |
+ if (gcwq == get_work_gcwq(work)) { |
596 |
+ debug_work_deactivate(work); |
597 |
++ |
598 |
++ /* |
599 |
++ * A delayed work item cannot be grabbed directly |
600 |
++ * because it might have linked NO_COLOR work items |
601 |
++ * which, if left on the delayed_list, will confuse |
602 |
++ * cwq->nr_active management later on and cause |
603 |
++ * stall. Make sure the work item is activated |
604 |
++ * before grabbing. |
605 |
++ */ |
606 |
++ if (*work_data_bits(work) & WORK_STRUCT_DELAYED) |
607 |
++ cwq_activate_delayed_work(work); |
608 |
++ |
609 |
+ list_del_init(&work->entry); |
610 |
+ cwq_dec_nr_in_flight(get_work_cwq(work), |
611 |
+ get_work_color(work), |