Gentoo Archives: gentoo-commits

From: "Mike Pagano (mpagano)" <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] linux-patches r2502 - genpatches-2.6/trunk/3.0
Date: Sat, 31 Aug 2013 19:40:11
Message-Id: 20130831194007.778AF2004B@flycatcher.gentoo.org
1 Author: mpagano
2 Date: 2013-08-31 19:40:07 +0000 (Sat, 31 Aug 2013)
3 New Revision: 2502
4
5 Added:
6 genpatches-2.6/trunk/3.0/1093_linux-3.0.94.patch
7 Modified:
8 genpatches-2.6/trunk/3.0/0000_README
9 Log:
10 Linux patch 3.0.94
11
12 Modified: genpatches-2.6/trunk/3.0/0000_README
13 ===================================================================
14 --- genpatches-2.6/trunk/3.0/0000_README 2013-08-30 12:11:01 UTC (rev 2501)
15 +++ genpatches-2.6/trunk/3.0/0000_README 2013-08-31 19:40:07 UTC (rev 2502)
16 @@ -407,6 +407,10 @@
17 From: http://www.kernel.org
18 Desc: Linux 3.0.93
19
20 +Patch: 1093_linux-3.0.94.patch
21 +From: http://www.kernel.org
22 +Desc: Linux 3.0.94
23 +
24 Patch: 1500_XATTR_USER_PREFIX.patch
25 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
26 Desc: Support for namespace user.pax.* on tmpfs.
27
28 Added: genpatches-2.6/trunk/3.0/1093_linux-3.0.94.patch
29 ===================================================================
30 --- genpatches-2.6/trunk/3.0/1093_linux-3.0.94.patch (rev 0)
31 +++ genpatches-2.6/trunk/3.0/1093_linux-3.0.94.patch 2013-08-31 19:40:07 UTC (rev 2502)
32 @@ -0,0 +1,354 @@
33 +diff --git a/Makefile b/Makefile
34 +index 369ce14..a88b035 100644
35 +--- a/Makefile
36 ++++ b/Makefile
37 +@@ -1,6 +1,6 @@
38 + VERSION = 3
39 + PATCHLEVEL = 0
40 +-SUBLEVEL = 93
41 ++SUBLEVEL = 94
42 + EXTRAVERSION =
43 + NAME = Sneaky Weasel
44 +
45 +diff --git a/drivers/ata/libata-pmp.c b/drivers/ata/libata-pmp.c
46 +index f06b7ea..cf9dc09 100644
47 +--- a/drivers/ata/libata-pmp.c
48 ++++ b/drivers/ata/libata-pmp.c
49 +@@ -288,24 +288,24 @@ static int sata_pmp_configure(struct ata_device *dev, int print_info)
50 +
51 + /* Disable sending Early R_OK.
52 + * With "cached read" HDD testing and multiple ports busy on a SATA
53 +- * host controller, 3726 PMP will very rarely drop a deferred
54 ++ * host controller, 3x26 PMP will very rarely drop a deferred
55 + * R_OK that was intended for the host. Symptom will be all
56 + * 5 drives under test will timeout, get reset, and recover.
57 + */
58 +- if (vendor == 0x1095 && devid == 0x3726) {
59 ++ if (vendor == 0x1095 && (devid == 0x3726 || devid == 0x3826)) {
60 + u32 reg;
61 +
62 + err_mask = sata_pmp_read(&ap->link, PMP_GSCR_SII_POL, &reg);
63 + if (err_mask) {
64 + rc = -EIO;
65 +- reason = "failed to read Sil3726 Private Register";
66 ++ reason = "failed to read Sil3x26 Private Register";
67 + goto fail;
68 + }
69 + reg &= ~0x1;
70 + err_mask = sata_pmp_write(&ap->link, PMP_GSCR_SII_POL, reg);
71 + if (err_mask) {
72 + rc = -EIO;
73 +- reason = "failed to write Sil3726 Private Register";
74 ++ reason = "failed to write Sil3x26 Private Register";
75 + goto fail;
76 + }
77 + }
78 +@@ -383,8 +383,8 @@ static void sata_pmp_quirks(struct ata_port *ap)
79 + u16 devid = sata_pmp_gscr_devid(gscr);
80 + struct ata_link *link;
81 +
82 +- if (vendor == 0x1095 && devid == 0x3726) {
83 +- /* sil3726 quirks */
84 ++ if (vendor == 0x1095 && (devid == 0x3726 || devid == 0x3826)) {
85 ++ /* sil3x26 quirks */
86 + ata_for_each_link(link, ap, EDGE) {
87 + /* link reports offline after LPM */
88 + link->flags |= ATA_LFLAG_NO_LPM;
89 +diff --git a/drivers/net/wireless/hostap/hostap_ioctl.c b/drivers/net/wireless/hostap/hostap_ioctl.c
90 +index 12de464..10ce1bc 100644
91 +--- a/drivers/net/wireless/hostap/hostap_ioctl.c
92 ++++ b/drivers/net/wireless/hostap/hostap_ioctl.c
93 +@@ -521,9 +521,9 @@ static int prism2_ioctl_giwaplist(struct net_device *dev,
94 +
95 + data->length = prism2_ap_get_sta_qual(local, addr, qual, IW_MAX_AP, 1);
96 +
97 +- memcpy(extra, &addr, sizeof(struct sockaddr) * data->length);
98 ++ memcpy(extra, addr, sizeof(struct sockaddr) * data->length);
99 + data->flags = 1; /* has quality information */
100 +- memcpy(extra + sizeof(struct sockaddr) * data->length, &qual,
101 ++ memcpy(extra + sizeof(struct sockaddr) * data->length, qual,
102 + sizeof(struct iw_quality) * data->length);
103 +
104 + kfree(addr);
105 +diff --git a/drivers/net/wireless/zd1201.c b/drivers/net/wireless/zd1201.c
106 +index 415eec4..af792a4 100644
107 +--- a/drivers/net/wireless/zd1201.c
108 ++++ b/drivers/net/wireless/zd1201.c
109 +@@ -98,10 +98,12 @@ static int zd1201_fw_upload(struct usb_device *dev, int apfw)
110 + goto exit;
111 +
112 + err = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), 0x4,
113 +- USB_DIR_IN | 0x40, 0,0, &ret, sizeof(ret), ZD1201_FW_TIMEOUT);
114 ++ USB_DIR_IN | 0x40, 0, 0, buf, sizeof(ret), ZD1201_FW_TIMEOUT);
115 + if (err < 0)
116 + goto exit;
117 +
118 ++ memcpy(&ret, buf, sizeof(ret));
119 ++
120 + if (ret & 0x80) {
121 + err = -EIO;
122 + goto exit;
123 +diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c
124 +index 65200af..d3645f6 100644
125 +--- a/drivers/of/fdt.c
126 ++++ b/drivers/of/fdt.c
127 +@@ -389,6 +389,8 @@ static void __unflatten_device_tree(struct boot_param_header *blob,
128 + mem = (unsigned long)
129 + dt_alloc(size + 4, __alignof__(struct device_node));
130 +
131 ++ memset((void *)mem, 0, size);
132 ++
133 + ((__be32 *)mem)[size / 4] = cpu_to_be32(0xdeadbeef);
134 +
135 + pr_debug(" unflattening %lx...\n", mem);
136 +diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c
137 +index e1b4f80..5c87270 100644
138 +--- a/drivers/s390/scsi/zfcp_erp.c
139 ++++ b/drivers/s390/scsi/zfcp_erp.c
140 +@@ -102,10 +102,13 @@ static void zfcp_erp_action_dismiss_port(struct zfcp_port *port)
141 +
142 + if (atomic_read(&port->status) & ZFCP_STATUS_COMMON_ERP_INUSE)
143 + zfcp_erp_action_dismiss(&port->erp_action);
144 +- else
145 +- shost_for_each_device(sdev, port->adapter->scsi_host)
146 ++ else {
147 ++ spin_lock(port->adapter->scsi_host->host_lock);
148 ++ __shost_for_each_device(sdev, port->adapter->scsi_host)
149 + if (sdev_to_zfcp(sdev)->port == port)
150 + zfcp_erp_action_dismiss_lun(sdev);
151 ++ spin_unlock(port->adapter->scsi_host->host_lock);
152 ++ }
153 + }
154 +
155 + static void zfcp_erp_action_dismiss_adapter(struct zfcp_adapter *adapter)
156 +@@ -592,9 +595,11 @@ static void _zfcp_erp_lun_reopen_all(struct zfcp_port *port, int clear,
157 + {
158 + struct scsi_device *sdev;
159 +
160 +- shost_for_each_device(sdev, port->adapter->scsi_host)
161 ++ spin_lock(port->adapter->scsi_host->host_lock);
162 ++ __shost_for_each_device(sdev, port->adapter->scsi_host)
163 + if (sdev_to_zfcp(sdev)->port == port)
164 + _zfcp_erp_lun_reopen(sdev, clear, id, 0);
165 ++ spin_unlock(port->adapter->scsi_host->host_lock);
166 + }
167 +
168 + static void zfcp_erp_strategy_followup_failed(struct zfcp_erp_action *act)
169 +@@ -1435,8 +1440,10 @@ void zfcp_erp_set_adapter_status(struct zfcp_adapter *adapter, u32 mask)
170 + atomic_set_mask(common_mask, &port->status);
171 + read_unlock_irqrestore(&adapter->port_list_lock, flags);
172 +
173 +- shost_for_each_device(sdev, adapter->scsi_host)
174 ++ spin_lock_irqsave(adapter->scsi_host->host_lock, flags);
175 ++ __shost_for_each_device(sdev, adapter->scsi_host)
176 + atomic_set_mask(common_mask, &sdev_to_zfcp(sdev)->status);
177 ++ spin_unlock_irqrestore(adapter->scsi_host->host_lock, flags);
178 + }
179 +
180 + /**
181 +@@ -1470,11 +1477,13 @@ void zfcp_erp_clear_adapter_status(struct zfcp_adapter *adapter, u32 mask)
182 + }
183 + read_unlock_irqrestore(&adapter->port_list_lock, flags);
184 +
185 +- shost_for_each_device(sdev, adapter->scsi_host) {
186 ++ spin_lock_irqsave(adapter->scsi_host->host_lock, flags);
187 ++ __shost_for_each_device(sdev, adapter->scsi_host) {
188 + atomic_clear_mask(common_mask, &sdev_to_zfcp(sdev)->status);
189 + if (clear_counter)
190 + atomic_set(&sdev_to_zfcp(sdev)->erp_counter, 0);
191 + }
192 ++ spin_unlock_irqrestore(adapter->scsi_host->host_lock, flags);
193 + }
194 +
195 + /**
196 +@@ -1488,16 +1497,19 @@ void zfcp_erp_set_port_status(struct zfcp_port *port, u32 mask)
197 + {
198 + struct scsi_device *sdev;
199 + u32 common_mask = mask & ZFCP_COMMON_FLAGS;
200 ++ unsigned long flags;
201 +
202 + atomic_set_mask(mask, &port->status);
203 +
204 + if (!common_mask)
205 + return;
206 +
207 +- shost_for_each_device(sdev, port->adapter->scsi_host)
208 ++ spin_lock_irqsave(port->adapter->scsi_host->host_lock, flags);
209 ++ __shost_for_each_device(sdev, port->adapter->scsi_host)
210 + if (sdev_to_zfcp(sdev)->port == port)
211 + atomic_set_mask(common_mask,
212 + &sdev_to_zfcp(sdev)->status);
213 ++ spin_unlock_irqrestore(port->adapter->scsi_host->host_lock, flags);
214 + }
215 +
216 + /**
217 +@@ -1512,6 +1524,7 @@ void zfcp_erp_clear_port_status(struct zfcp_port *port, u32 mask)
218 + struct scsi_device *sdev;
219 + u32 common_mask = mask & ZFCP_COMMON_FLAGS;
220 + u32 clear_counter = mask & ZFCP_STATUS_COMMON_ERP_FAILED;
221 ++ unsigned long flags;
222 +
223 + atomic_clear_mask(mask, &port->status);
224 +
225 +@@ -1521,13 +1534,15 @@ void zfcp_erp_clear_port_status(struct zfcp_port *port, u32 mask)
226 + if (clear_counter)
227 + atomic_set(&port->erp_counter, 0);
228 +
229 +- shost_for_each_device(sdev, port->adapter->scsi_host)
230 ++ spin_lock_irqsave(port->adapter->scsi_host->host_lock, flags);
231 ++ __shost_for_each_device(sdev, port->adapter->scsi_host)
232 + if (sdev_to_zfcp(sdev)->port == port) {
233 + atomic_clear_mask(common_mask,
234 + &sdev_to_zfcp(sdev)->status);
235 + if (clear_counter)
236 + atomic_set(&sdev_to_zfcp(sdev)->erp_counter, 0);
237 + }
238 ++ spin_unlock_irqrestore(port->adapter->scsi_host->host_lock, flags);
239 + }
240 +
241 + /**
242 +diff --git a/drivers/s390/scsi/zfcp_qdio.c b/drivers/s390/scsi/zfcp_qdio.c
243 +index d9c40ea..f3922a8 100644
244 +--- a/drivers/s390/scsi/zfcp_qdio.c
245 ++++ b/drivers/s390/scsi/zfcp_qdio.c
246 +@@ -199,11 +199,9 @@ int zfcp_qdio_sbals_from_sg(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req,
247 +
248 + static int zfcp_qdio_sbal_check(struct zfcp_qdio *qdio)
249 + {
250 +- spin_lock_irq(&qdio->req_q_lock);
251 + if (atomic_read(&qdio->req_q_free) ||
252 + !(atomic_read(&qdio->adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP))
253 + return 1;
254 +- spin_unlock_irq(&qdio->req_q_lock);
255 + return 0;
256 + }
257 +
258 +@@ -221,9 +219,8 @@ int zfcp_qdio_sbal_get(struct zfcp_qdio *qdio)
259 + {
260 + long ret;
261 +
262 +- spin_unlock_irq(&qdio->req_q_lock);
263 +- ret = wait_event_interruptible_timeout(qdio->req_q_wq,
264 +- zfcp_qdio_sbal_check(qdio), 5 * HZ);
265 ++ ret = wait_event_interruptible_lock_irq_timeout(qdio->req_q_wq,
266 ++ zfcp_qdio_sbal_check(qdio), qdio->req_q_lock, 5 * HZ);
267 +
268 + if (!(atomic_read(&qdio->adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP))
269 + return -EIO;
270 +@@ -237,7 +234,6 @@ int zfcp_qdio_sbal_get(struct zfcp_qdio *qdio)
271 + zfcp_erp_adapter_reopen(qdio->adapter, 0, "qdsbg_1");
272 + }
273 +
274 +- spin_lock_irq(&qdio->req_q_lock);
275 + return -EIO;
276 + }
277 +
278 +diff --git a/drivers/xen/events.c b/drivers/xen/events.c
279 +index a2f1f71..7ba4d0e 100644
280 +--- a/drivers/xen/events.c
281 ++++ b/drivers/xen/events.c
282 +@@ -317,7 +317,7 @@ static void init_evtchn_cpu_bindings(void)
283 +
284 + for_each_possible_cpu(i)
285 + memset(per_cpu(cpu_evtchn_mask, i),
286 +- (i == 0) ? ~0 : 0, sizeof(*per_cpu(cpu_evtchn_mask, i)));
287 ++ (i == 0) ? ~0 : 0, NR_EVENT_CHANNELS/8);
288 + }
289 +
290 + static inline void clear_evtchn(int port)
291 +diff --git a/fs/nilfs2/segbuf.c b/fs/nilfs2/segbuf.c
292 +index 850a7c0..07a666a 100644
293 +--- a/fs/nilfs2/segbuf.c
294 ++++ b/fs/nilfs2/segbuf.c
295 +@@ -345,8 +345,7 @@ static void nilfs_end_bio_write(struct bio *bio, int err)
296 +
297 + if (err == -EOPNOTSUPP) {
298 + set_bit(BIO_EOPNOTSUPP, &bio->bi_flags);
299 +- bio_put(bio);
300 +- /* to be detected by submit_seg_bio() */
301 ++ /* to be detected by nilfs_segbuf_submit_bio() */
302 + }
303 +
304 + if (!uptodate)
305 +@@ -377,12 +376,12 @@ static int nilfs_segbuf_submit_bio(struct nilfs_segment_buffer *segbuf,
306 + bio->bi_private = segbuf;
307 + bio_get(bio);
308 + submit_bio(mode, bio);
309 ++ segbuf->sb_nbio++;
310 + if (bio_flagged(bio, BIO_EOPNOTSUPP)) {
311 + bio_put(bio);
312 + err = -EOPNOTSUPP;
313 + goto failed;
314 + }
315 +- segbuf->sb_nbio++;
316 + bio_put(bio);
317 +
318 + wi->bio = NULL;
319 +diff --git a/include/linux/wait.h b/include/linux/wait.h
320 +index bea7ad5..e007f76 100644
321 +--- a/include/linux/wait.h
322 ++++ b/include/linux/wait.h
323 +@@ -530,6 +530,63 @@ do { \
324 + ? 0 : __wait_event_interruptible_locked(wq, condition, 1, 1))
325 +
326 +
327 ++#define __wait_event_interruptible_lock_irq_timeout(wq, condition, \
328 ++ lock, ret) \
329 ++do { \
330 ++ DEFINE_WAIT(__wait); \
331 ++ \
332 ++ for (;;) { \
333 ++ prepare_to_wait(&wq, &__wait, TASK_INTERRUPTIBLE); \
334 ++ if (condition) \
335 ++ break; \
336 ++ if (signal_pending(current)) { \
337 ++ ret = -ERESTARTSYS; \
338 ++ break; \
339 ++ } \
340 ++ spin_unlock_irq(&lock); \
341 ++ ret = schedule_timeout(ret); \
342 ++ spin_lock_irq(&lock); \
343 ++ if (!ret) \
344 ++ break; \
345 ++ } \
346 ++ finish_wait(&wq, &__wait); \
347 ++} while (0)
348 ++
349 ++/**
350 ++ * wait_event_interruptible_lock_irq_timeout - sleep until a condition gets true or a timeout elapses.
351 ++ * The condition is checked under the lock. This is expected
352 ++ * to be called with the lock taken.
353 ++ * @wq: the waitqueue to wait on
354 ++ * @condition: a C expression for the event to wait for
355 ++ * @lock: a locked spinlock_t, which will be released before schedule()
356 ++ * and reacquired afterwards.
357 ++ * @timeout: timeout, in jiffies
358 ++ *
359 ++ * The process is put to sleep (TASK_INTERRUPTIBLE) until the
360 ++ * @condition evaluates to true or signal is received. The @condition is
361 ++ * checked each time the waitqueue @wq is woken up.
362 ++ *
363 ++ * wake_up() has to be called after changing any variable that could
364 ++ * change the result of the wait condition.
365 ++ *
366 ++ * This is supposed to be called while holding the lock. The lock is
367 ++ * dropped before going to sleep and is reacquired afterwards.
368 ++ *
369 ++ * The function returns 0 if the @timeout elapsed, -ERESTARTSYS if it
370 ++ * was interrupted by a signal, and the remaining jiffies otherwise
371 ++ * if the condition evaluated to true before the timeout elapsed.
372 ++ */
373 ++#define wait_event_interruptible_lock_irq_timeout(wq, condition, lock, \
374 ++ timeout) \
375 ++({ \
376 ++ int __ret = timeout; \
377 ++ \
378 ++ if (!(condition)) \
379 ++ __wait_event_interruptible_lock_irq_timeout( \
380 ++ wq, condition, lock, __ret); \
381 ++ __ret; \
382 ++})
383 ++
384 +
385 + #define __wait_event_killable(wq, condition, ret) \
386 + do { \