Gentoo Archives: gentoo-commits

From: "Mike Pagano (mpagano)" <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] linux-patches r1356 - genpatches-2.6/trunk/2.6.25
Date: Tue, 14 Oct 2008 13:59:48
Message-Id: E1KpkR6-0002de-97@stork.gentoo.org
1 Author: mpagano
2 Date: 2008-10-14 13:59:43 +0000 (Tue, 14 Oct 2008)
3 New Revision: 1356
4
5 Added:
6 genpatches-2.6/trunk/2.6.25/1017_linux-2.6.25.18.patch
7 Modified:
8 genpatches-2.6/trunk/2.6.25/0000_README
9 Log:
10 Adding linux 2.6.25.18
11
12 Modified: genpatches-2.6/trunk/2.6.25/0000_README
13 ===================================================================
14 --- genpatches-2.6/trunk/2.6.25/0000_README 2008-10-12 19:16:04 UTC (rev 1355)
15 +++ genpatches-2.6/trunk/2.6.25/0000_README 2008-10-14 13:59:43 UTC (rev 1356)
16 @@ -107,6 +107,10 @@
17 From: http://www.kernel.org
18 Desc: Linux 2.6.25.17
19
20 +Patch: 1017_linux-2.6.25.18.patch
21 +From: http://www.kernel.org
22 +Desc: Linux 2.6.25.18
23 +
24 Patch: 1500_thinkpad_acpi-null-ptr-deref-fix.patch
25 From: http://bugs.gentoo.org/222331
26 Desc: Fixes potential NULL pointer deref in Thinkpad ACPI
27
28 Added: genpatches-2.6/trunk/2.6.25/1017_linux-2.6.25.18.patch
29 ===================================================================
30 --- genpatches-2.6/trunk/2.6.25/1017_linux-2.6.25.18.patch (rev 0)
31 +++ genpatches-2.6/trunk/2.6.25/1017_linux-2.6.25.18.patch 2008-10-14 13:59:43 UTC (rev 1356)
32 @@ -0,0 +1,1023 @@
33 +diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c
34 +index 36652ea..bec6496 100644
35 +--- a/arch/x86/kernel/hpet.c
36 ++++ b/arch/x86/kernel/hpet.c
37 +@@ -222,8 +222,8 @@ static void hpet_legacy_clockevent_register(void)
38 + /* Calculate the min / max delta */
39 + hpet_clockevent.max_delta_ns = clockevent_delta2ns(0x7FFFFFFF,
40 + &hpet_clockevent);
41 +- hpet_clockevent.min_delta_ns = clockevent_delta2ns(0x30,
42 +- &hpet_clockevent);
43 ++ /* 5 usec minimum reprogramming delta. */
44 ++ hpet_clockevent.min_delta_ns = 5000;
45 +
46 + /*
47 + * Start hpet with the boot cpu mask and make it
48 +@@ -282,15 +282,22 @@ static void hpet_legacy_set_mode(enum clock_event_mode mode,
49 + }
50 +
51 + static int hpet_legacy_next_event(unsigned long delta,
52 +- struct clock_event_device *evt)
53 ++ struct clock_event_device *evt)
54 + {
55 +- unsigned long cnt;
56 ++ u32 cnt;
57 +
58 + cnt = hpet_readl(HPET_COUNTER);
59 +- cnt += delta;
60 ++ cnt += (u32) delta;
61 + hpet_writel(cnt, HPET_T0_CMP);
62 +
63 +- return ((long)(hpet_readl(HPET_COUNTER) - cnt ) > 0) ? -ETIME : 0;
64 ++ /*
65 ++ * We need to read back the CMP register to make sure that
66 ++ * what we wrote hit the chip before we compare it to the
67 ++ * counter.
68 ++ */
69 ++ WARN_ON((u32)hpet_readl(HPET_T0_CMP) != cnt);
70 ++
71 ++ return (s32)((u32)hpet_readl(HPET_COUNTER) - cnt) >= 0 ? -ETIME : 0;
72 + }
73 +
74 + /*
75 +diff --git a/arch/x86/kernel/io_delay.c b/arch/x86/kernel/io_delay.c
76 +index 5921e5f..84ec3cd 100644
77 +--- a/arch/x86/kernel/io_delay.c
78 ++++ b/arch/x86/kernel/io_delay.c
79 +@@ -92,6 +92,14 @@ static struct dmi_system_id __initdata io_delay_0xed_port_dmi_table[] = {
80 + DMI_MATCH(DMI_BOARD_NAME, "30BF")
81 + }
82 + },
83 ++ {
84 ++ .callback = dmi_io_delay_0xed_port,
85 ++ .ident = "Presario F700",
86 ++ .matches = {
87 ++ DMI_MATCH(DMI_BOARD_VENDOR, "Quanta"),
88 ++ DMI_MATCH(DMI_BOARD_NAME, "30D3")
89 ++ }
90 ++ },
91 + { }
92 + };
93 +
94 +diff --git a/arch/x86/kernel/vmi_32.c b/arch/x86/kernel/vmi_32.c
95 +index 12affe1..72f9826 100644
96 +--- a/arch/x86/kernel/vmi_32.c
97 ++++ b/arch/x86/kernel/vmi_32.c
98 +@@ -234,7 +234,7 @@ static void vmi_write_ldt_entry(struct desc_struct *dt, int entry,
99 + const void *desc)
100 + {
101 + u32 *ldt_entry = (u32 *)desc;
102 +- vmi_ops.write_idt_entry(dt, entry, ldt_entry[0], ldt_entry[1]);
103 ++ vmi_ops.write_ldt_entry(dt, entry, ldt_entry[0], ldt_entry[1]);
104 + }
105 +
106 + static void vmi_load_sp0(struct tss_struct *tss,
107 +diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
108 +index 7222a18..a9b8796 100644
109 +--- a/drivers/acpi/ec.c
110 ++++ b/drivers/acpi/ec.c
111 +@@ -228,6 +228,8 @@ static int acpi_ec_wait(struct acpi_ec *ec, enum ec_event event, int force_poll)
112 + if (acpi_ec_check_status(ec, event))
113 + goto end;
114 + }
115 ++ if (acpi_ec_check_status(ec,event))
116 ++ return 0;
117 + }
118 + pr_err(PREFIX "acpi_ec_wait timeout,"
119 + " status = %d, expect_event = %d\n",
120 +diff --git a/drivers/acpi/processor_perflib.c b/drivers/acpi/processor_perflib.c
121 +index 42fb635..87b9a17 100644
122 +--- a/drivers/acpi/processor_perflib.c
123 ++++ b/drivers/acpi/processor_perflib.c
124 +@@ -70,7 +70,7 @@ static DEFINE_MUTEX(performance_mutex);
125 + * 0 -> cpufreq low level drivers initialized -> consider _PPC values
126 + * 1 -> ignore _PPC totally -> forced by user through boot param
127 + */
128 +-static unsigned int ignore_ppc = -1;
129 ++static int ignore_ppc = -1;
130 + module_param(ignore_ppc, uint, 0644);
131 + MODULE_PARM_DESC(ignore_ppc, "If the frequency of your machine gets wrongly" \
132 + "limited by BIOS, this should help");
133 +diff --git a/drivers/i2c/i2c-dev.c b/drivers/i2c/i2c-dev.c
134 +index 393e679..342255b 100644
135 +--- a/drivers/i2c/i2c-dev.c
136 ++++ b/drivers/i2c/i2c-dev.c
137 +@@ -574,8 +574,10 @@ static int __init i2c_dev_init(void)
138 + goto out;
139 +
140 + i2c_dev_class = class_create(THIS_MODULE, "i2c-dev");
141 +- if (IS_ERR(i2c_dev_class))
142 ++ if (IS_ERR(i2c_dev_class)) {
143 ++ res = PTR_ERR(i2c_dev_class);
144 + goto out_unreg_chrdev;
145 ++ }
146 +
147 + res = i2c_add_driver(&i2cdev_driver);
148 + if (res)
149 +diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
150 +index 91ded3e..3318642 100644
151 +--- a/drivers/mmc/card/block.c
152 ++++ b/drivers/mmc/card/block.c
153 +@@ -103,8 +103,10 @@ static int mmc_blk_open(struct inode *inode, struct file *filp)
154 + check_disk_change(inode->i_bdev);
155 + ret = 0;
156 +
157 +- if ((filp->f_mode & FMODE_WRITE) && md->read_only)
158 ++ if ((filp->f_mode & FMODE_WRITE) && md->read_only) {
159 ++ mmc_blk_put(md);
160 + ret = -EROFS;
161 ++ }
162 + }
163 +
164 + return ret;
165 +diff --git a/drivers/net/niu.c b/drivers/net/niu.c
166 +index d11ba61..5fd6a65 100644
167 +--- a/drivers/net/niu.c
168 ++++ b/drivers/net/niu.c
169 +@@ -5230,6 +5230,56 @@ static void niu_netif_start(struct niu *np)
170 + niu_enable_interrupts(np, 1);
171 + }
172 +
173 ++static void niu_reset_buffers(struct niu *np)
174 ++{
175 ++ int i, j, k, err;
176 ++
177 ++ if (np->rx_rings) {
178 ++ for (i = 0; i < np->num_rx_rings; i++) {
179 ++ struct rx_ring_info *rp = &np->rx_rings[i];
180 ++
181 ++ for (j = 0, k = 0; j < MAX_RBR_RING_SIZE; j++) {
182 ++ struct page *page;
183 ++
184 ++ page = rp->rxhash[j];
185 ++ while (page) {
186 ++ struct page *next =
187 ++ (struct page *) page->mapping;
188 ++ u64 base = page->index;
189 ++ base = base >> RBR_DESCR_ADDR_SHIFT;
190 ++ rp->rbr[k++] = cpu_to_le32(base);
191 ++ page = next;
192 ++ }
193 ++ }
194 ++ for (; k < MAX_RBR_RING_SIZE; k++) {
195 ++ err = niu_rbr_add_page(np, rp, GFP_ATOMIC, k);
196 ++ if (unlikely(err))
197 ++ break;
198 ++ }
199 ++
200 ++ rp->rbr_index = rp->rbr_table_size - 1;
201 ++ rp->rcr_index = 0;
202 ++ rp->rbr_pending = 0;
203 ++ rp->rbr_refill_pending = 0;
204 ++ }
205 ++ }
206 ++ if (np->tx_rings) {
207 ++ for (i = 0; i < np->num_tx_rings; i++) {
208 ++ struct tx_ring_info *rp = &np->tx_rings[i];
209 ++
210 ++ for (j = 0; j < MAX_TX_RING_SIZE; j++) {
211 ++ if (rp->tx_buffs[j].skb)
212 ++ (void) release_tx_packet(np, rp, j);
213 ++ }
214 ++
215 ++ rp->pending = MAX_TX_RING_SIZE;
216 ++ rp->prod = 0;
217 ++ rp->cons = 0;
218 ++ rp->wrap_bit = 0;
219 ++ }
220 ++ }
221 ++}
222 ++
223 + static void niu_reset_task(struct work_struct *work)
224 + {
225 + struct niu *np = container_of(work, struct niu, reset_task);
226 +@@ -5252,6 +5302,12 @@ static void niu_reset_task(struct work_struct *work)
227 +
228 + niu_stop_hw(np);
229 +
230 ++ spin_unlock_irqrestore(&np->lock, flags);
231 ++
232 ++ niu_reset_buffers(np);
233 ++
234 ++ spin_lock_irqsave(&np->lock, flags);
235 ++
236 + err = niu_init_hw(np);
237 + if (!err) {
238 + np->timer.expires = jiffies + HZ;
239 +diff --git a/drivers/spi/pxa2xx_spi.c b/drivers/spi/pxa2xx_spi.c
240 +index 147e26a..1dcc79f 100644
241 +--- a/drivers/spi/pxa2xx_spi.c
242 ++++ b/drivers/spi/pxa2xx_spi.c
243 +@@ -48,9 +48,10 @@ MODULE_ALIAS("platform:pxa2xx-spi");
244 +
245 + #define MAX_BUSES 3
246 +
247 +-#define DMA_INT_MASK (DCSR_ENDINTR | DCSR_STARTINTR | DCSR_BUSERR)
248 +-#define RESET_DMA_CHANNEL (DCSR_NODESC | DMA_INT_MASK)
249 +-#define IS_DMA_ALIGNED(x) (((u32)(x)&0x07)==0)
250 ++#define DMA_INT_MASK (DCSR_ENDINTR | DCSR_STARTINTR | DCSR_BUSERR)
251 ++#define RESET_DMA_CHANNEL (DCSR_NODESC | DMA_INT_MASK)
252 ++#define IS_DMA_ALIGNED(x) ((((u32)(x)) & 0x07) == 0)
253 ++#define MAX_DMA_LEN 8191
254 +
255 + /*
256 + * for testing SSCR1 changes that require SSP restart, basically
257 +@@ -142,7 +143,6 @@ struct driver_data {
258 + size_t tx_map_len;
259 + u8 n_bytes;
260 + u32 dma_width;
261 +- int cs_change;
262 + int (*write)(struct driver_data *drv_data);
263 + int (*read)(struct driver_data *drv_data);
264 + irqreturn_t (*transfer_handler)(struct driver_data *drv_data);
265 +@@ -404,8 +404,45 @@ static void giveback(struct driver_data *drv_data)
266 + struct spi_transfer,
267 + transfer_list);
268 +
269 ++ /* Delay if requested before any change in chip select */
270 ++ if (last_transfer->delay_usecs)
271 ++ udelay(last_transfer->delay_usecs);
272 ++
273 ++ /* Drop chip select UNLESS cs_change is true or we are returning
274 ++ * a message with an error, or next message is for another chip
275 ++ */
276 + if (!last_transfer->cs_change)
277 + drv_data->cs_control(PXA2XX_CS_DEASSERT);
278 ++ else {
279 ++ struct spi_message *next_msg;
280 ++
281 ++ /* Holding of cs was hinted, but we need to make sure
282 ++ * the next message is for the same chip. Don't waste
283 ++ * time with the following tests unless this was hinted.
284 ++ *
285 ++ * We cannot postpone this until pump_messages, because
286 ++ * after calling msg->complete (below) the driver that
287 ++ * sent the current message could be unloaded, which
288 ++ * could invalidate the cs_control() callback...
289 ++ */
290 ++
291 ++ /* get a pointer to the next message, if any */
292 ++ spin_lock_irqsave(&drv_data->lock, flags);
293 ++ if (list_empty(&drv_data->queue))
294 ++ next_msg = NULL;
295 ++ else
296 ++ next_msg = list_entry(drv_data->queue.next,
297 ++ struct spi_message, queue);
298 ++ spin_unlock_irqrestore(&drv_data->lock, flags);
299 ++
300 ++ /* see if the next and current messages point
301 ++ * to the same chip
302 ++ */
303 ++ if (next_msg && next_msg->spi != msg->spi)
304 ++ next_msg = NULL;
305 ++ if (!next_msg || msg->state == ERROR_STATE)
306 ++ drv_data->cs_control(PXA2XX_CS_DEASSERT);
307 ++ }
308 +
309 + msg->state = NULL;
310 + if (msg->complete)
311 +@@ -488,10 +525,9 @@ static void dma_transfer_complete(struct driver_data *drv_data)
312 + msg->actual_length += drv_data->len -
313 + (drv_data->rx_end - drv_data->rx);
314 +
315 +- /* Release chip select if requested, transfer delays are
316 +- * handled in pump_transfers */
317 +- if (drv_data->cs_change)
318 +- drv_data->cs_control(PXA2XX_CS_DEASSERT);
319 ++ /* Transfer delays and chip select release are
320 ++ * handled in pump_transfers or giveback
321 ++ */
322 +
323 + /* Move to next transfer */
324 + msg->state = next_transfer(drv_data);
325 +@@ -600,10 +636,9 @@ static void int_transfer_complete(struct driver_data *drv_data)
326 + drv_data->cur_msg->actual_length += drv_data->len -
327 + (drv_data->rx_end - drv_data->rx);
328 +
329 +- /* Release chip select if requested, transfer delays are
330 +- * handled in pump_transfers */
331 +- if (drv_data->cs_change)
332 +- drv_data->cs_control(PXA2XX_CS_DEASSERT);
333 ++ /* Transfer delays and chip select release are
334 ++ * handled in pump_transfers or giveback
335 ++ */
336 +
337 + /* Move to next transfer */
338 + drv_data->cur_msg->state = next_transfer(drv_data);
339 +@@ -837,23 +872,40 @@ static void pump_transfers(unsigned long data)
340 + return;
341 + }
342 +
343 +- /* Delay if requested at end of transfer*/
344 ++ /* Delay if requested at end of transfer before CS change */
345 + if (message->state == RUNNING_STATE) {
346 + previous = list_entry(transfer->transfer_list.prev,
347 + struct spi_transfer,
348 + transfer_list);
349 + if (previous->delay_usecs)
350 + udelay(previous->delay_usecs);
351 ++
352 ++ /* Drop chip select only if cs_change is requested */
353 ++ if (previous->cs_change)
354 ++ drv_data->cs_control(PXA2XX_CS_DEASSERT);
355 + }
356 +
357 +- /* Check transfer length */
358 +- if (transfer->len > 8191)
359 +- {
360 +- dev_warn(&drv_data->pdev->dev, "pump_transfers: transfer "
361 +- "length greater than 8191\n");
362 +- message->status = -EINVAL;
363 +- giveback(drv_data);
364 +- return;
365 ++ /* Check for transfers that need multiple DMA segments */
366 ++ if (transfer->len > MAX_DMA_LEN && chip->enable_dma) {
367 ++
368 ++ /* reject already-mapped transfers; PIO won't always work */
369 ++ if (message->is_dma_mapped
370 ++ || transfer->rx_dma || transfer->tx_dma) {
371 ++ dev_err(&drv_data->pdev->dev,
372 ++ "pump_transfers: mapped transfer length "
373 ++ "of %u is greater than %d\n",
374 ++ transfer->len, MAX_DMA_LEN);
375 ++ message->status = -EINVAL;
376 ++ giveback(drv_data);
377 ++ return;
378 ++ }
379 ++
380 ++ /* warn ... we force this to PIO mode */
381 ++ if (printk_ratelimit())
382 ++ dev_warn(&message->spi->dev, "pump_transfers: "
383 ++ "DMA disabled for transfer length %ld "
384 ++ "greater than %d\n",
385 ++ (long)drv_data->len, MAX_DMA_LEN);
386 + }
387 +
388 + /* Setup the transfer state based on the type of transfer */
389 +@@ -875,7 +927,6 @@ static void pump_transfers(unsigned long data)
390 + drv_data->len = transfer->len & DCMD_LENGTH;
391 + drv_data->write = drv_data->tx ? chip->write : null_writer;
392 + drv_data->read = drv_data->rx ? chip->read : null_reader;
393 +- drv_data->cs_change = transfer->cs_change;
394 +
395 + /* Change speed and bit per word on a per transfer */
396 + cr0 = chip->cr0;
397 +@@ -922,7 +973,7 @@ static void pump_transfers(unsigned long data)
398 + &dma_thresh))
399 + if (printk_ratelimit())
400 + dev_warn(&message->spi->dev,
401 +- "pump_transfer: "
402 ++ "pump_transfers: "
403 + "DMA burst size reduced to "
404 + "match bits_per_word\n");
405 + }
406 +@@ -936,8 +987,23 @@ static void pump_transfers(unsigned long data)
407 +
408 + message->state = RUNNING_STATE;
409 +
410 +- /* Try to map dma buffer and do a dma transfer if successful */
411 +- if ((drv_data->dma_mapped = map_dma_buffers(drv_data))) {
412 ++ /* Try to map dma buffer and do a dma transfer if successful, but
413 ++ * only if the length is non-zero and less than MAX_DMA_LEN.
414 ++ *
415 ++ * Zero-length non-descriptor DMA is illegal on PXA2xx; force use
416 ++ * of PIO instead. Care is needed above because the transfer may
417 ++ * have have been passed with buffers that are already dma mapped.
418 ++ * A zero-length transfer in PIO mode will not try to write/read
419 ++ * to/from the buffers
420 ++ *
421 ++ * REVISIT large transfers are exactly where we most want to be
422 ++ * using DMA. If this happens much, split those transfers into
423 ++ * multiple DMA segments rather than forcing PIO.
424 ++ */
425 ++ drv_data->dma_mapped = 0;
426 ++ if (drv_data->len > 0 && drv_data->len <= MAX_DMA_LEN)
427 ++ drv_data->dma_mapped = map_dma_buffers(drv_data);
428 ++ if (drv_data->dma_mapped) {
429 +
430 + /* Ensure we have the correct interrupt handler */
431 + drv_data->transfer_handler = dma_transfer;
432 +diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
433 +index 27533b3..ed7b123 100644
434 +--- a/drivers/usb/core/hcd.c
435 ++++ b/drivers/usb/core/hcd.c
436 +@@ -1877,7 +1877,8 @@ int usb_add_hcd(struct usb_hcd *hcd,
437 + * with IRQF_SHARED. As usb_hcd_irq() will always disable
438 + * interrupts we can remove it here.
439 + */
440 +- irqflags &= ~IRQF_DISABLED;
441 ++ if (irqflags & IRQF_SHARED)
442 ++ irqflags &= ~IRQF_DISABLED;
443 +
444 + snprintf(hcd->irq_descr, sizeof(hcd->irq_descr), "%s:usb%d",
445 + hcd->driver->description, hcd->self.busnum);
446 +diff --git a/include/asm-generic/rtc.h b/include/asm-generic/rtc.h
447 +index dd1bed8..85286fb 100644
448 +--- a/include/asm-generic/rtc.h
449 ++++ b/include/asm-generic/rtc.h
450 +@@ -17,6 +17,7 @@
451 + #include <linux/mc146818rtc.h>
452 + #include <linux/rtc.h>
453 + #include <linux/bcd.h>
454 ++#include <linux/delay.h>
455 +
456 + #define RTC_PIE 0x40 /* periodic interrupt enable */
457 + #define RTC_AIE 0x20 /* alarm interrupt enable */
458 +@@ -45,7 +46,6 @@ static inline unsigned char rtc_is_updating(void)
459 +
460 + static inline unsigned int get_rtc_time(struct rtc_time *time)
461 + {
462 +- unsigned long uip_watchdog = jiffies;
463 + unsigned char ctrl;
464 + unsigned long flags;
465 +
466 +@@ -55,19 +55,15 @@ static inline unsigned int get_rtc_time(struct rtc_time *time)
467 +
468 + /*
469 + * read RTC once any update in progress is done. The update
470 +- * can take just over 2ms. We wait 10 to 20ms. There is no need to
471 ++ * can take just over 2ms. We wait 20ms. There is no need to
472 + * to poll-wait (up to 1s - eeccch) for the falling edge of RTC_UIP.
473 + * If you need to know *exactly* when a second has started, enable
474 + * periodic update complete interrupts, (via ioctl) and then
475 + * immediately read /dev/rtc which will block until you get the IRQ.
476 + * Once the read clears, read the RTC time (again via ioctl). Easy.
477 + */
478 +-
479 +- if (rtc_is_updating() != 0)
480 +- while (jiffies - uip_watchdog < 2*HZ/100) {
481 +- barrier();
482 +- cpu_relax();
483 +- }
484 ++ if (rtc_is_updating())
485 ++ mdelay(20);
486 +
487 + /*
488 + * Only the values that we read from the RTC are set. We leave
489 +diff --git a/include/linux/clockchips.h b/include/linux/clockchips.h
490 +index c33b0dc..ed3a5d4 100644
491 +--- a/include/linux/clockchips.h
492 ++++ b/include/linux/clockchips.h
493 +@@ -127,6 +127,8 @@ extern int clockevents_register_notifier(struct notifier_block *nb);
494 + extern int clockevents_program_event(struct clock_event_device *dev,
495 + ktime_t expires, ktime_t now);
496 +
497 ++extern void clockevents_handle_noop(struct clock_event_device *dev);
498 ++
499 + #ifdef CONFIG_GENERIC_CLOCKEVENTS
500 + extern void clockevents_notify(unsigned long reason, void *arg);
501 + #else
502 +diff --git a/include/net/netlink.h b/include/net/netlink.h
503 +index 112dcdf..5383fdf 100644
504 +--- a/include/net/netlink.h
505 ++++ b/include/net/netlink.h
506 +@@ -704,7 +704,7 @@ static inline int nla_len(const struct nlattr *nla)
507 + */
508 + static inline int nla_ok(const struct nlattr *nla, int remaining)
509 + {
510 +- return remaining >= sizeof(*nla) &&
511 ++ return remaining >= (int) sizeof(*nla) &&
512 + nla->nla_len >= sizeof(*nla) &&
513 + nla->nla_len <= remaining;
514 + }
515 +diff --git a/kernel/time/clockevents.c b/kernel/time/clockevents.c
516 +index 3d1e3e1..1876b52 100644
517 +--- a/kernel/time/clockevents.c
518 ++++ b/kernel/time/clockevents.c
519 +@@ -177,7 +177,7 @@ void clockevents_register_device(struct clock_event_device *dev)
520 + /*
521 + * Noop handler when we shut down an event device
522 + */
523 +-static void clockevents_handle_noop(struct clock_event_device *dev)
524 ++void clockevents_handle_noop(struct clock_event_device *dev)
525 + {
526 + }
527 +
528 +@@ -199,7 +199,6 @@ void clockevents_exchange_device(struct clock_event_device *old,
529 + * released list and do a notify add later.
530 + */
531 + if (old) {
532 +- old->event_handler = clockevents_handle_noop;
533 + clockevents_set_mode(old, CLOCK_EVT_MODE_UNUSED);
534 + list_del(&old->list);
535 + list_add(&old->list, &clockevents_released);
536 +diff --git a/kernel/time/ntp.c b/kernel/time/ntp.c
537 +index 5fd9b94..a1dac40 100644
538 +--- a/kernel/time/ntp.c
539 ++++ b/kernel/time/ntp.c
540 +@@ -205,7 +205,7 @@ static void sync_cmos_clock(unsigned long dummy)
541 + if (abs(now.tv_nsec - (NSEC_PER_SEC / 2)) <= tick_nsec / 2)
542 + fail = update_persistent_clock(now);
543 +
544 +- next.tv_nsec = (NSEC_PER_SEC / 2) - now.tv_nsec;
545 ++ next.tv_nsec = (NSEC_PER_SEC / 2) - now.tv_nsec - (TICK_NSEC / 2);
546 + if (next.tv_nsec <= 0)
547 + next.tv_nsec += NSEC_PER_SEC;
548 +
549 +diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
550 +index e1bd50c..0edd345 100644
551 +--- a/kernel/time/tick-broadcast.c
552 ++++ b/kernel/time/tick-broadcast.c
553 +@@ -174,6 +174,8 @@ static void tick_do_periodic_broadcast(void)
554 + */
555 + static void tick_handle_periodic_broadcast(struct clock_event_device *dev)
556 + {
557 ++ ktime_t next;
558 ++
559 + tick_do_periodic_broadcast();
560 +
561 + /*
562 +@@ -184,10 +186,13 @@ static void tick_handle_periodic_broadcast(struct clock_event_device *dev)
563 +
564 + /*
565 + * Setup the next period for devices, which do not have
566 +- * periodic mode:
567 ++ * periodic mode. We read dev->next_event first and add to it
568 ++ * when the event alrady expired. clockevents_program_event()
569 ++ * sets dev->next_event only when the event is really
570 ++ * programmed to the device.
571 + */
572 +- for (;;) {
573 +- ktime_t next = ktime_add(dev->next_event, tick_period);
574 ++ for (next = dev->next_event; ;) {
575 ++ next = ktime_add(next, tick_period);
576 +
577 + if (!clockevents_program_event(dev, next, ktime_get()))
578 + return;
579 +@@ -204,7 +209,7 @@ static void tick_do_broadcast_on_off(void *why)
580 + struct clock_event_device *bc, *dev;
581 + struct tick_device *td;
582 + unsigned long flags, *reason = why;
583 +- int cpu;
584 ++ int cpu, bc_stopped;
585 +
586 + spin_lock_irqsave(&tick_broadcast_lock, flags);
587 +
588 +@@ -222,6 +227,8 @@ static void tick_do_broadcast_on_off(void *why)
589 + if (!tick_device_is_functional(dev))
590 + goto out;
591 +
592 ++ bc_stopped = cpus_empty(tick_broadcast_mask);
593 ++
594 + switch (*reason) {
595 + case CLOCK_EVT_NOTIFY_BROADCAST_ON:
596 + case CLOCK_EVT_NOTIFY_BROADCAST_FORCE:
597 +@@ -243,9 +250,10 @@ static void tick_do_broadcast_on_off(void *why)
598 + break;
599 + }
600 +
601 +- if (cpus_empty(tick_broadcast_mask))
602 +- clockevents_set_mode(bc, CLOCK_EVT_MODE_SHUTDOWN);
603 +- else {
604 ++ if (cpus_empty(tick_broadcast_mask)) {
605 ++ if (!bc_stopped)
606 ++ clockevents_set_mode(bc, CLOCK_EVT_MODE_SHUTDOWN);
607 ++ } else if (bc_stopped) {
608 + if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC)
609 + tick_broadcast_start_periodic(bc);
610 + else
611 +@@ -362,16 +370,8 @@ cpumask_t *tick_get_broadcast_oneshot_mask(void)
612 + static int tick_broadcast_set_event(ktime_t expires, int force)
613 + {
614 + struct clock_event_device *bc = tick_broadcast_device.evtdev;
615 +- ktime_t now = ktime_get();
616 +- int res;
617 +-
618 +- for(;;) {
619 +- res = clockevents_program_event(bc, expires, now);
620 +- if (!res || !force)
621 +- return res;
622 +- now = ktime_get();
623 +- expires = ktime_add(now, ktime_set(0, bc->min_delta_ns));
624 +- }
625 ++
626 ++ return tick_dev_program_event(bc, expires, force);
627 + }
628 +
629 + int tick_resume_broadcast_oneshot(struct clock_event_device *bc)
630 +@@ -490,14 +490,52 @@ static void tick_broadcast_clear_oneshot(int cpu)
631 + cpu_clear(cpu, tick_broadcast_oneshot_mask);
632 + }
633 +
634 ++static void tick_broadcast_init_next_event(cpumask_t *mask, ktime_t expires)
635 ++{
636 ++ struct tick_device *td;
637 ++ int cpu;
638 ++
639 ++ for_each_cpu_mask(cpu, *mask) {
640 ++ td = &per_cpu(tick_cpu_device, cpu);
641 ++ if (td->evtdev)
642 ++ td->evtdev->next_event = expires;
643 ++ }
644 ++}
645 ++
646 + /**
647 + * tick_broadcast_setup_oneshot - setup the broadcast device
648 + */
649 + void tick_broadcast_setup_oneshot(struct clock_event_device *bc)
650 + {
651 +- bc->event_handler = tick_handle_oneshot_broadcast;
652 +- clockevents_set_mode(bc, CLOCK_EVT_MODE_ONESHOT);
653 +- bc->next_event.tv64 = KTIME_MAX;
654 ++ /* Set it up only once ! */
655 ++ if (bc->event_handler != tick_handle_oneshot_broadcast) {
656 ++ int was_periodic = bc->mode == CLOCK_EVT_MODE_PERIODIC;
657 ++ int cpu = smp_processor_id();
658 ++ cpumask_t mask;
659 ++
660 ++ bc->event_handler = tick_handle_oneshot_broadcast;
661 ++ clockevents_set_mode(bc, CLOCK_EVT_MODE_ONESHOT);
662 ++
663 ++ /* Take the do_timer update */
664 ++ tick_do_timer_cpu = cpu;
665 ++
666 ++ /*
667 ++ * We must be careful here. There might be other CPUs
668 ++ * waiting for periodic broadcast. We need to set the
669 ++ * oneshot_mask bits for those and program the
670 ++ * broadcast device to fire.
671 ++ */
672 ++ mask = tick_broadcast_mask;
673 ++ cpu_clear(cpu, mask);
674 ++ cpus_or(tick_broadcast_oneshot_mask,
675 ++ tick_broadcast_oneshot_mask, mask);
676 ++
677 ++ if (was_periodic && !cpus_empty(mask)) {
678 ++ tick_broadcast_init_next_event(&mask, tick_next_period);
679 ++ tick_broadcast_set_event(tick_next_period, 1);
680 ++ } else
681 ++ bc->next_event.tv64 = KTIME_MAX;
682 ++ }
683 + }
684 +
685 + /*
686 +diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c
687 +index 1bea399..d106d61 100644
688 +--- a/kernel/time/tick-common.c
689 ++++ b/kernel/time/tick-common.c
690 +@@ -159,6 +159,7 @@ static void tick_setup_device(struct tick_device *td,
691 + } else {
692 + handler = td->evtdev->event_handler;
693 + next_event = td->evtdev->next_event;
694 ++ td->evtdev->event_handler = clockevents_handle_noop;
695 + }
696 +
697 + td->evtdev = newdev;
698 +diff --git a/kernel/time/tick-internal.h b/kernel/time/tick-internal.h
699 +index f13f2b7..0ffc291 100644
700 +--- a/kernel/time/tick-internal.h
701 ++++ b/kernel/time/tick-internal.h
702 +@@ -17,6 +17,8 @@ extern void tick_handle_periodic(struct clock_event_device *dev);
703 + extern void tick_setup_oneshot(struct clock_event_device *newdev,
704 + void (*handler)(struct clock_event_device *),
705 + ktime_t nextevt);
706 ++extern int tick_dev_program_event(struct clock_event_device *dev,
707 ++ ktime_t expires, int force);
708 + extern int tick_program_event(ktime_t expires, int force);
709 + extern void tick_oneshot_notify(void);
710 + extern int tick_switch_to_oneshot(void (*handler)(struct clock_event_device *));
711 +diff --git a/kernel/time/tick-oneshot.c b/kernel/time/tick-oneshot.c
712 +index 0258d31..0737da0 100644
713 +--- a/kernel/time/tick-oneshot.c
714 ++++ b/kernel/time/tick-oneshot.c
715 +@@ -23,24 +23,56 @@
716 + #include "tick-internal.h"
717 +
718 + /**
719 +- * tick_program_event
720 ++ * tick_program_event internal worker function
721 + */
722 +-int tick_program_event(ktime_t expires, int force)
723 ++int tick_dev_program_event(struct clock_event_device *dev, ktime_t expires,
724 ++ int force)
725 + {
726 +- struct clock_event_device *dev = __get_cpu_var(tick_cpu_device).evtdev;
727 + ktime_t now = ktime_get();
728 ++ int i;
729 +
730 +- while (1) {
731 ++ for (i = 0;;) {
732 + int ret = clockevents_program_event(dev, expires, now);
733 +
734 + if (!ret || !force)
735 + return ret;
736 ++
737 ++ /*
738 ++ * We tried 2 times to program the device with the given
739 ++ * min_delta_ns. If that's not working then we double it
740 ++ * and emit a warning.
741 ++ */
742 ++ if (++i > 2) {
743 ++ /* Increase the min. delta and try again */
744 ++ if (!dev->min_delta_ns)
745 ++ dev->min_delta_ns = 5000;
746 ++ else
747 ++ dev->min_delta_ns += dev->min_delta_ns >> 1;
748 ++
749 ++ printk(KERN_WARNING
750 ++ "CE: %s increasing min_delta_ns to %lu nsec\n",
751 ++ dev->name ? dev->name : "?",
752 ++ dev->min_delta_ns << 1);
753 ++
754 ++ i = 0;
755 ++ }
756 ++
757 + now = ktime_get();
758 +- expires = ktime_add(now, ktime_set(0, dev->min_delta_ns));
759 ++ expires = ktime_add_ns(now, dev->min_delta_ns);
760 + }
761 + }
762 +
763 + /**
764 ++ * tick_program_event
765 ++ */
766 ++int tick_program_event(ktime_t expires, int force)
767 ++{
768 ++ struct clock_event_device *dev = __get_cpu_var(tick_cpu_device).evtdev;
769 ++
770 ++ return tick_dev_program_event(dev, expires, force);
771 ++}
772 ++
773 ++/**
774 + * tick_resume_onshot - resume oneshot mode
775 + */
776 + void tick_resume_oneshot(void)
777 +@@ -61,7 +93,7 @@ void tick_setup_oneshot(struct clock_event_device *newdev,
778 + {
779 + newdev->event_handler = handler;
780 + clockevents_set_mode(newdev, CLOCK_EVT_MODE_ONESHOT);
781 +- clockevents_program_event(newdev, next_event, ktime_get());
782 ++ tick_dev_program_event(newdev, next_event, 1);
783 + }
784 +
785 + /**
786 +diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
787 +index 9703c87..b924502 100644
788 +--- a/net/ipv4/udp.c
789 ++++ b/net/ipv4/udp.c
790 +@@ -956,6 +956,27 @@ int udp_disconnect(struct sock *sk, int flags)
791 + return 0;
792 + }
793 +
794 ++static int __udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
795 ++{
796 ++ int is_udplite = IS_UDPLITE(sk);
797 ++ int rc;
798 ++
799 ++ if ((rc = sock_queue_rcv_skb(sk, skb)) < 0) {
800 ++ /* Note that an ENOMEM error is charged twice */
801 ++ if (rc == -ENOMEM)
802 ++ UDP_INC_STATS_BH(UDP_MIB_RCVBUFERRORS,
803 ++ is_udplite);
804 ++ goto drop;
805 ++ }
806 ++
807 ++ return 0;
808 ++
809 ++drop:
810 ++ UDP_INC_STATS_BH(UDP_MIB_INERRORS, is_udplite);
811 ++ kfree_skb(skb);
812 ++ return -1;
813 ++}
814 ++
815 + /* returns:
816 + * -1: error
817 + * 0: success
818 +@@ -1046,14 +1067,16 @@ int udp_queue_rcv_skb(struct sock * sk, struct sk_buff *skb)
819 + goto drop;
820 + }
821 +
822 +- if ((rc = sock_queue_rcv_skb(sk,skb)) < 0) {
823 +- /* Note that an ENOMEM error is charged twice */
824 +- if (rc == -ENOMEM)
825 +- UDP_INC_STATS_BH(UDP_MIB_RCVBUFERRORS, is_udplite);
826 +- goto drop;
827 +- }
828 ++ rc = 0;
829 +
830 +- return 0;
831 ++ bh_lock_sock(sk);
832 ++ if (!sock_owned_by_user(sk))
833 ++ rc = __udp_queue_rcv_skb(sk, skb);
834 ++ else
835 ++ sk_add_backlog(sk, skb);
836 ++ bh_unlock_sock(sk);
837 ++
838 ++ return rc;
839 +
840 + drop:
841 + UDP_INC_STATS_BH(UDP_MIB_INERRORS, is_udplite);
842 +@@ -1091,15 +1114,7 @@ static int __udp4_lib_mcast_deliver(struct sk_buff *skb,
843 + skb1 = skb_clone(skb, GFP_ATOMIC);
844 +
845 + if (skb1) {
846 +- int ret = 0;
847 +-
848 +- bh_lock_sock_nested(sk);
849 +- if (!sock_owned_by_user(sk))
850 +- ret = udp_queue_rcv_skb(sk, skb1);
851 +- else
852 +- sk_add_backlog(sk, skb1);
853 +- bh_unlock_sock(sk);
854 +-
855 ++ int ret = udp_queue_rcv_skb(sk, skb1);
856 + if (ret > 0)
857 + /* we should probably re-process instead
858 + * of dropping packets here. */
859 +@@ -1192,13 +1207,7 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct hlist_head udptable[],
860 + uh->dest, inet_iif(skb), udptable);
861 +
862 + if (sk != NULL) {
863 +- int ret = 0;
864 +- bh_lock_sock_nested(sk);
865 +- if (!sock_owned_by_user(sk))
866 +- ret = udp_queue_rcv_skb(sk, skb);
867 +- else
868 +- sk_add_backlog(sk, skb);
869 +- bh_unlock_sock(sk);
870 ++ int ret = udp_queue_rcv_skb(sk, skb);
871 + sock_put(sk);
872 +
873 + /* a return value > 0 means to resubmit the input, but
874 +@@ -1493,7 +1502,7 @@ struct proto udp_prot = {
875 + .sendmsg = udp_sendmsg,
876 + .recvmsg = udp_recvmsg,
877 + .sendpage = udp_sendpage,
878 +- .backlog_rcv = udp_queue_rcv_skb,
879 ++ .backlog_rcv = __udp_queue_rcv_skb,
880 + .hash = udp_lib_hash,
881 + .unhash = udp_lib_unhash,
882 + .get_port = udp_v4_get_port,
883 +diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
884 +index fb1c192..d650e10 100644
885 +--- a/net/ipv6/ip6_output.c
886 ++++ b/net/ipv6/ip6_output.c
887 +@@ -930,39 +930,39 @@ static int ip6_dst_lookup_tail(struct sock *sk,
888 + }
889 +
890 + #ifdef CONFIG_IPV6_OPTIMISTIC_DAD
891 +- /*
892 +- * Here if the dst entry we've looked up
893 +- * has a neighbour entry that is in the INCOMPLETE
894 +- * state and the src address from the flow is
895 +- * marked as OPTIMISTIC, we release the found
896 +- * dst entry and replace it instead with the
897 +- * dst entry of the nexthop router
898 +- */
899 +- if (!((*dst)->neighbour->nud_state & NUD_VALID)) {
900 +- struct inet6_ifaddr *ifp;
901 +- struct flowi fl_gw;
902 +- int redirect;
903 +-
904 +- ifp = ipv6_get_ifaddr(&init_net, &fl->fl6_src,
905 +- (*dst)->dev, 1);
906 +-
907 +- redirect = (ifp && ifp->flags & IFA_F_OPTIMISTIC);
908 +- if (ifp)
909 +- in6_ifa_put(ifp);
910 +-
911 +- if (redirect) {
912 +- /*
913 +- * We need to get the dst entry for the
914 +- * default router instead
915 +- */
916 +- dst_release(*dst);
917 +- memcpy(&fl_gw, fl, sizeof(struct flowi));
918 +- memset(&fl_gw.fl6_dst, 0, sizeof(struct in6_addr));
919 +- *dst = ip6_route_output(sk, &fl_gw);
920 +- if ((err = (*dst)->error))
921 +- goto out_err_release;
922 +- }
923 ++ /*
924 ++ * Here if the dst entry we've looked up
925 ++ * has a neighbour entry that is in the INCOMPLETE
926 ++ * state and the src address from the flow is
927 ++ * marked as OPTIMISTIC, we release the found
928 ++ * dst entry and replace it instead with the
929 ++ * dst entry of the nexthop router
930 ++ */
931 ++ if ((*dst)->neighbour && !((*dst)->neighbour->nud_state & NUD_VALID)) {
932 ++ struct inet6_ifaddr *ifp;
933 ++ struct flowi fl_gw;
934 ++ int redirect;
935 ++
936 ++ ifp = ipv6_get_ifaddr(&init_net, &fl->fl6_src,
937 ++ (*dst)->dev, 1);
938 ++
939 ++ redirect = (ifp && ifp->flags & IFA_F_OPTIMISTIC);
940 ++ if (ifp)
941 ++ in6_ifa_put(ifp);
942 ++
943 ++ if (redirect) {
944 ++ /*
945 ++ * We need to get the dst entry for the
946 ++ * default router instead
947 ++ */
948 ++ dst_release(*dst);
949 ++ memcpy(&fl_gw, fl, sizeof(struct flowi));
950 ++ memset(&fl_gw.fl6_dst, 0, sizeof(struct in6_addr));
951 ++ *dst = ip6_route_output(sk, &fl_gw);
952 ++ if ((err = (*dst)->error))
953 ++ goto out_err_release;
954 + }
955 ++ }
956 + #endif
957 +
958 + return 0;
959 +diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
960 +index 53739de..4e36c57 100644
961 +--- a/net/ipv6/udp.c
962 ++++ b/net/ipv6/udp.c
963 +@@ -373,7 +373,7 @@ static int __udp6_lib_mcast_deliver(struct sk_buff *skb, struct in6_addr *saddr,
964 + uh->source, saddr, dif))) {
965 + struct sk_buff *buff = skb_clone(skb, GFP_ATOMIC);
966 + if (buff) {
967 +- bh_lock_sock_nested(sk2);
968 ++ bh_lock_sock(sk2);
969 + if (!sock_owned_by_user(sk2))
970 + udpv6_queue_rcv_skb(sk2, buff);
971 + else
972 +@@ -381,7 +381,7 @@ static int __udp6_lib_mcast_deliver(struct sk_buff *skb, struct in6_addr *saddr,
973 + bh_unlock_sock(sk2);
974 + }
975 + }
976 +- bh_lock_sock_nested(sk);
977 ++ bh_lock_sock(sk);
978 + if (!sock_owned_by_user(sk))
979 + udpv6_queue_rcv_skb(sk, skb);
980 + else
981 +@@ -499,7 +499,7 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct hlist_head udptable[],
982 +
983 + /* deliver */
984 +
985 +- bh_lock_sock_nested(sk);
986 ++ bh_lock_sock(sk);
987 + if (!sock_owned_by_user(sk))
988 + udpv6_queue_rcv_skb(sk, skb);
989 + else
990 +diff --git a/net/sctp/associola.c b/net/sctp/associola.c
991 +index d29f792..1eefac2 100644
992 +--- a/net/sctp/associola.c
993 ++++ b/net/sctp/associola.c
994 +@@ -588,11 +588,12 @@ struct sctp_transport *sctp_assoc_add_peer(struct sctp_association *asoc,
995 + /* Check to see if this is a duplicate. */
996 + peer = sctp_assoc_lookup_paddr(asoc, addr);
997 + if (peer) {
998 ++ /* An UNKNOWN state is only set on transports added by
999 ++ * user in sctp_connectx() call. Such transports should be
1000 ++ * considered CONFIRMED per RFC 4960, Section 5.4.
1001 ++ */
1002 + if (peer->state == SCTP_UNKNOWN) {
1003 +- if (peer_state == SCTP_ACTIVE)
1004 +- peer->state = SCTP_ACTIVE;
1005 +- if (peer_state == SCTP_UNCONFIRMED)
1006 +- peer->state = SCTP_UNCONFIRMED;
1007 ++ peer->state = SCTP_ACTIVE;
1008 + }
1009 + return peer;
1010 + }
1011 +diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c
1012 +index 36ebb39..d024fd0 100644
1013 +--- a/net/sctp/sm_make_chunk.c
1014 ++++ b/net/sctp/sm_make_chunk.c
1015 +@@ -1886,11 +1886,13 @@ static void sctp_process_ext_param(struct sctp_association *asoc,
1016 + /* if the peer reports AUTH, assume that he
1017 + * supports AUTH.
1018 + */
1019 +- asoc->peer.auth_capable = 1;
1020 ++ if (sctp_auth_enable)
1021 ++ asoc->peer.auth_capable = 1;
1022 + break;
1023 + case SCTP_CID_ASCONF:
1024 + case SCTP_CID_ASCONF_ACK:
1025 +- asoc->peer.asconf_capable = 1;
1026 ++ if (sctp_addip_enable)
1027 ++ asoc->peer.asconf_capable = 1;
1028 + break;
1029 + default:
1030 + break;
1031 +@@ -2319,12 +2321,10 @@ clean_up:
1032 + /* Release the transport structures. */
1033 + list_for_each_safe(pos, temp, &asoc->peer.transport_addr_list) {
1034 + transport = list_entry(pos, struct sctp_transport, transports);
1035 +- list_del_init(pos);
1036 +- sctp_transport_free(transport);
1037 ++ if (transport->state != SCTP_ACTIVE)
1038 ++ sctp_assoc_rm_peer(asoc, transport);
1039 + }
1040 +
1041 +- asoc->peer.transport_count = 0;
1042 +-
1043 + nomem:
1044 + return 0;
1045 + }
1046 +@@ -2454,6 +2454,9 @@ static int sctp_process_param(struct sctp_association *asoc,
1047 + break;
1048 +
1049 + case SCTP_PARAM_SET_PRIMARY:
1050 ++ if (!sctp_addip_enable)
1051 ++ goto fall_through;
1052 ++
1053 + addr_param = param.v + sizeof(sctp_addip_param_t);
1054 +
1055 + af = sctp_get_af_specific(param_type2af(param.p->type));