Gentoo Archives: gentoo-commits

From: "Mike Pagano (mpagano)" <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] linux-patches r1343 - genpatches-2.6/trunk/2.6.26
Date: Mon, 08 Sep 2008 12:17:17
Message-Id: E1Kcfg7-0006wV-RF@stork.gentoo.org
1 Author: mpagano
2 Date: 2008-09-08 12:17:10 +0000 (Mon, 08 Sep 2008)
3 New Revision: 1343
4
5 Added:
6 genpatches-2.6/trunk/2.6.26/1003_linux-2.6.26.4.patch
7 Modified:
8 genpatches-2.6/trunk/2.6.26/0000_README
9 Log:
10 Adding linux 2.6.26.4 patch
11
12 Modified: genpatches-2.6/trunk/2.6.26/0000_README
13 ===================================================================
14 --- genpatches-2.6/trunk/2.6.26/0000_README 2008-08-21 16:24:10 UTC (rev 1342)
15 +++ genpatches-2.6/trunk/2.6.26/0000_README 2008-09-08 12:17:10 UTC (rev 1343)
16 @@ -51,6 +51,10 @@
17 From: http://www.kernel.org
18 Desc: Linux 2.6.26.3
19
20 +Patch: 1003_linux-2.6.26.4.patch
21 +From: http://www.kernel.org
22 +Desc: Linux 2.6.26.4
23 +
24 Patch: 1900_UTC-timestamp-option.patch
25 From: http://bugs.gentoo.org/233307
26 Desc: Fix to add UTC timestamp option
27
28 Added: genpatches-2.6/trunk/2.6.26/1003_linux-2.6.26.4.patch
29 ===================================================================
30 --- genpatches-2.6/trunk/2.6.26/1003_linux-2.6.26.4.patch (rev 0)
31 +++ genpatches-2.6/trunk/2.6.26/1003_linux-2.6.26.4.patch 2008-09-08 12:17:10 UTC (rev 1343)
32 @@ -0,0 +1,1952 @@
33 +diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c
34 +index e289a98..387d3f6 100644
35 +--- a/arch/sparc64/mm/init.c
36 ++++ b/arch/sparc64/mm/init.c
37 +@@ -842,6 +842,9 @@ static unsigned long nid_range(unsigned long start, unsigned long end,
38 + start += PAGE_SIZE;
39 + }
40 +
41 ++ if (start > end)
42 ++ start = end;
43 ++
44 + return start;
45 + }
46 + #else
47 +@@ -1769,8 +1772,7 @@ void __init paging_init(void)
48 +
49 + find_ramdisk(phys_base);
50 +
51 +- if (cmdline_memory_size)
52 +- lmb_enforce_memory_limit(phys_base + cmdline_memory_size);
53 ++ lmb_enforce_memory_limit(cmdline_memory_size);
54 +
55 + lmb_analyze();
56 + lmb_dump_all();
57 +@@ -2007,6 +2009,15 @@ void __init mem_init(void)
58 + void free_initmem(void)
59 + {
60 + unsigned long addr, initend;
61 ++ int do_free = 1;
62 ++
63 ++ /* If the physical memory maps were trimmed by kernel command
64 ++ * line options, don't even try freeing this initmem stuff up.
65 ++ * The kernel image could have been in the trimmed out region
66 ++ * and if so the freeing below will free invalid page structs.
67 ++ */
68 ++ if (cmdline_memory_size)
69 ++ do_free = 0;
70 +
71 + /*
72 + * The init section is aligned to 8k in vmlinux.lds. Page align for >8k pagesizes.
73 +@@ -2021,13 +2032,16 @@ void free_initmem(void)
74 + ((unsigned long) __va(kern_base)) -
75 + ((unsigned long) KERNBASE));
76 + memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE);
77 +- p = virt_to_page(page);
78 +
79 +- ClearPageReserved(p);
80 +- init_page_count(p);
81 +- __free_page(p);
82 +- num_physpages++;
83 +- totalram_pages++;
84 ++ if (do_free) {
85 ++ p = virt_to_page(page);
86 ++
87 ++ ClearPageReserved(p);
88 ++ init_page_count(p);
89 ++ __free_page(p);
90 ++ num_physpages++;
91 ++ totalram_pages++;
92 ++ }
93 + }
94 + }
95 +
96 +diff --git a/arch/x86/kernel/cpu/cyrix.c b/arch/x86/kernel/cpu/cyrix.c
97 +index 3fd7a67..e710a21 100644
98 +--- a/arch/x86/kernel/cpu/cyrix.c
99 ++++ b/arch/x86/kernel/cpu/cyrix.c
100 +@@ -134,23 +134,6 @@ static void __cpuinit set_cx86_memwb(void)
101 + setCx86(CX86_CCR2, getCx86(CX86_CCR2) | 0x14);
102 + }
103 +
104 +-static void __cpuinit set_cx86_inc(void)
105 +-{
106 +- unsigned char ccr3;
107 +-
108 +- printk(KERN_INFO "Enable Incrementor on Cyrix/NSC processor.\n");
109 +-
110 +- ccr3 = getCx86(CX86_CCR3);
111 +- setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10); /* enable MAPEN */
112 +- /* PCR1 -- Performance Control */
113 +- /* Incrementor on, whatever that is */
114 +- setCx86(CX86_PCR1, getCx86(CX86_PCR1) | 0x02);
115 +- /* PCR0 -- Performance Control */
116 +- /* Incrementor Margin 10 */
117 +- setCx86(CX86_PCR0, getCx86(CX86_PCR0) | 0x04);
118 +- setCx86(CX86_CCR3, ccr3); /* disable MAPEN */
119 +-}
120 +-
121 + /*
122 + * Configure later MediaGX and/or Geode processor.
123 + */
124 +@@ -174,7 +157,6 @@ static void __cpuinit geode_configure(void)
125 +
126 + set_cx86_memwb();
127 + set_cx86_reorder();
128 +- set_cx86_inc();
129 +
130 + local_irq_restore(flags);
131 + }
132 +diff --git a/arch/x86/kernel/cpu/mtrr/generic.c b/arch/x86/kernel/cpu/mtrr/generic.c
133 +index 75b14b1..745b974 100644
134 +--- a/arch/x86/kernel/cpu/mtrr/generic.c
135 ++++ b/arch/x86/kernel/cpu/mtrr/generic.c
136 +@@ -365,6 +365,7 @@ static void generic_get_mtrr(unsigned int reg, unsigned long *base,
137 + unsigned long *size, mtrr_type *type)
138 + {
139 + unsigned int mask_lo, mask_hi, base_lo, base_hi;
140 ++ unsigned int tmp, hi;
141 +
142 + rdmsr(MTRRphysMask_MSR(reg), mask_lo, mask_hi);
143 + if ((mask_lo & 0x800) == 0) {
144 +@@ -378,8 +379,23 @@ static void generic_get_mtrr(unsigned int reg, unsigned long *base,
145 + rdmsr(MTRRphysBase_MSR(reg), base_lo, base_hi);
146 +
147 + /* Work out the shifted address mask. */
148 +- mask_lo = size_or_mask | mask_hi << (32 - PAGE_SHIFT)
149 +- | mask_lo >> PAGE_SHIFT;
150 ++ tmp = mask_hi << (32 - PAGE_SHIFT) | mask_lo >> PAGE_SHIFT;
151 ++ mask_lo = size_or_mask | tmp;
152 ++ /* Expand tmp with high bits to all 1s*/
153 ++ hi = fls(tmp);
154 ++ if (hi > 0) {
155 ++ tmp |= ~((1<<(hi - 1)) - 1);
156 ++
157 ++ if (tmp != mask_lo) {
158 ++ static int once = 1;
159 ++
160 ++ if (once) {
161 ++ printk(KERN_INFO "mtrr: your BIOS has set up an incorrect mask, fixing it up.\n");
162 ++ once = 0;
163 ++ }
164 ++ mask_lo = tmp;
165 ++ }
166 ++ }
167 +
168 + /* This works correctly if size is a power of two, i.e. a
169 + contiguous range. */
170 +diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
171 +index 934c7b6..d333a74 100644
172 +--- a/arch/x86/kvm/paging_tmpl.h
173 ++++ b/arch/x86/kvm/paging_tmpl.h
174 +@@ -343,7 +343,7 @@ static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
175 + shadow_addr = __pa(shadow_page->spt);
176 + shadow_pte = shadow_addr | PT_PRESENT_MASK | PT_ACCESSED_MASK
177 + | PT_WRITABLE_MASK | PT_USER_MASK;
178 +- *shadow_ent = shadow_pte;
179 ++ set_shadow_pte(shadow_ent, shadow_pte);
180 + }
181 +
182 + mmu_set_spte(vcpu, shadow_ent, access, walker->pte_access & access,
183 +diff --git a/crypto/authenc.c b/crypto/authenc.c
184 +index 4b22676..fd9f06c 100644
185 +--- a/crypto/authenc.c
186 ++++ b/crypto/authenc.c
187 +@@ -174,8 +174,9 @@ static int crypto_authenc_genicv(struct aead_request *req, u8 *iv,
188 + static void crypto_authenc_encrypt_done(struct crypto_async_request *req,
189 + int err)
190 + {
191 ++ struct aead_request *areq = req->data;
192 ++
193 + if (!err) {
194 +- struct aead_request *areq = req->data;
195 + struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
196 + struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc);
197 + struct ablkcipher_request *abreq = aead_request_ctx(areq);
198 +@@ -185,7 +186,7 @@ static void crypto_authenc_encrypt_done(struct crypto_async_request *req,
199 + err = crypto_authenc_genicv(areq, iv, 0);
200 + }
201 +
202 +- aead_request_complete(req->data, err);
203 ++ aead_request_complete(areq, err);
204 + }
205 +
206 + static int crypto_authenc_encrypt(struct aead_request *req)
207 +@@ -216,14 +217,15 @@ static int crypto_authenc_encrypt(struct aead_request *req)
208 + static void crypto_authenc_givencrypt_done(struct crypto_async_request *req,
209 + int err)
210 + {
211 ++ struct aead_request *areq = req->data;
212 ++
213 + if (!err) {
214 +- struct aead_request *areq = req->data;
215 + struct skcipher_givcrypt_request *greq = aead_request_ctx(areq);
216 +
217 + err = crypto_authenc_genicv(areq, greq->giv, 0);
218 + }
219 +
220 +- aead_request_complete(req->data, err);
221 ++ aead_request_complete(areq, err);
222 + }
223 +
224 + static int crypto_authenc_givencrypt(struct aead_givcrypt_request *req)
225 +diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c
226 +index ad169ff..80c655f 100644
227 +--- a/drivers/ata/sata_mv.c
228 ++++ b/drivers/ata/sata_mv.c
229 +@@ -1134,30 +1134,16 @@ static int mv_qc_defer(struct ata_queued_cmd *qc)
230 + if (ap->nr_active_links == 0)
231 + return 0;
232 +
233 +- if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
234 +- /*
235 +- * The port is operating in host queuing mode (EDMA).
236 +- * It can accomodate a new qc if the qc protocol
237 +- * is compatible with the current host queue mode.
238 +- */
239 +- if (pp->pp_flags & MV_PP_FLAG_NCQ_EN) {
240 +- /*
241 +- * The host queue (EDMA) is in NCQ mode.
242 +- * If the new qc is also an NCQ command,
243 +- * then allow the new qc.
244 +- */
245 +- if (qc->tf.protocol == ATA_PROT_NCQ)
246 +- return 0;
247 +- } else {
248 +- /*
249 +- * The host queue (EDMA) is in non-NCQ, DMA mode.
250 +- * If the new qc is also a non-NCQ, DMA command,
251 +- * then allow the new qc.
252 +- */
253 +- if (qc->tf.protocol == ATA_PROT_DMA)
254 +- return 0;
255 +- }
256 +- }
257 ++ /*
258 ++ * The port is operating in host queuing mode (EDMA) with NCQ
259 ++ * enabled, allow multiple NCQ commands. EDMA also allows
260 ++ * queueing multiple DMA commands but libata core currently
261 ++ * doesn't allow it.
262 ++ */
263 ++ if ((pp->pp_flags & MV_PP_FLAG_EDMA_EN) &&
264 ++ (pp->pp_flags & MV_PP_FLAG_NCQ_EN) && ata_is_ncq(qc->tf.protocol))
265 ++ return 0;
266 ++
267 + return ATA_DEFER_PORT;
268 + }
269 +
270 +diff --git a/drivers/char/random.c b/drivers/char/random.c
271 +index 0cf98bd..71320d2 100644
272 +--- a/drivers/char/random.c
273 ++++ b/drivers/char/random.c
274 +@@ -406,7 +406,7 @@ struct entropy_store {
275 + /* read-write data: */
276 + spinlock_t lock;
277 + unsigned add_ptr;
278 +- int entropy_count;
279 ++ int entropy_count; /* Must at no time exceed ->POOLBITS! */
280 + int input_rotate;
281 + };
282 +
283 +@@ -519,6 +519,7 @@ static void mix_pool_bytes(struct entropy_store *r, const void *in, int bytes)
284 + static void credit_entropy_bits(struct entropy_store *r, int nbits)
285 + {
286 + unsigned long flags;
287 ++ int entropy_count;
288 +
289 + if (!nbits)
290 + return;
291 +@@ -526,20 +527,20 @@ static void credit_entropy_bits(struct entropy_store *r, int nbits)
292 + spin_lock_irqsave(&r->lock, flags);
293 +
294 + DEBUG_ENT("added %d entropy credits to %s\n", nbits, r->name);
295 +- r->entropy_count += nbits;
296 +- if (r->entropy_count < 0) {
297 ++ entropy_count = r->entropy_count;
298 ++ entropy_count += nbits;
299 ++ if (entropy_count < 0) {
300 + DEBUG_ENT("negative entropy/overflow\n");
301 +- r->entropy_count = 0;
302 +- } else if (r->entropy_count > r->poolinfo->POOLBITS)
303 +- r->entropy_count = r->poolinfo->POOLBITS;
304 ++ entropy_count = 0;
305 ++ } else if (entropy_count > r->poolinfo->POOLBITS)
306 ++ entropy_count = r->poolinfo->POOLBITS;
307 ++ r->entropy_count = entropy_count;
308 +
309 + /* should we wake readers? */
310 +- if (r == &input_pool &&
311 +- r->entropy_count >= random_read_wakeup_thresh) {
312 ++ if (r == &input_pool && entropy_count >= random_read_wakeup_thresh) {
313 + wake_up_interruptible(&random_read_wait);
314 + kill_fasync(&fasync, SIGIO, POLL_IN);
315 + }
316 +-
317 + spin_unlock_irqrestore(&r->lock, flags);
318 + }
319 +
320 +diff --git a/drivers/misc/eeepc-laptop.c b/drivers/misc/eeepc-laptop.c
321 +index 6d72760..3f3abf9 100644
322 +--- a/drivers/misc/eeepc-laptop.c
323 ++++ b/drivers/misc/eeepc-laptop.c
324 +@@ -553,9 +553,9 @@ static void eeepc_hwmon_exit(void)
325 + hwmon = eeepc_hwmon_device;
326 + if (!hwmon)
327 + return ;
328 +- hwmon_device_unregister(hwmon);
329 + sysfs_remove_group(&hwmon->kobj,
330 + &hwmon_attribute_group);
331 ++ hwmon_device_unregister(hwmon);
332 + eeepc_hwmon_device = NULL;
333 + }
334 +
335 +diff --git a/drivers/net/atlx/atl1.c b/drivers/net/atlx/atl1.c
336 +index 3c798ae..8fe0a49 100644
337 +--- a/drivers/net/atlx/atl1.c
338 ++++ b/drivers/net/atlx/atl1.c
339 +@@ -3019,7 +3019,6 @@ static int __devinit atl1_probe(struct pci_dev *pdev,
340 + netdev->features = NETIF_F_HW_CSUM;
341 + netdev->features |= NETIF_F_SG;
342 + netdev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
343 +- netdev->features |= NETIF_F_TSO;
344 + netdev->features |= NETIF_F_LLTX;
345 +
346 + /*
347 +diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c
348 +index 20d4fe9..1652f10 100644
349 +--- a/drivers/net/forcedeth.c
350 ++++ b/drivers/net/forcedeth.c
351 +@@ -5420,7 +5420,7 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
352 + if (id->driver_data & DEV_HAS_CHECKSUM) {
353 + np->rx_csum = 1;
354 + np->txrxctl_bits |= NVREG_TXRXCTL_RXCHECK;
355 +- dev->features |= NETIF_F_HW_CSUM | NETIF_F_SG;
356 ++ dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
357 + dev->features |= NETIF_F_TSO;
358 + }
359 +
360 +@@ -5728,7 +5728,7 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
361 +
362 + dev_printk(KERN_INFO, &pci_dev->dev, "%s%s%s%s%s%s%s%s%s%sdesc-v%u\n",
363 + dev->features & NETIF_F_HIGHDMA ? "highdma " : "",
364 +- dev->features & (NETIF_F_HW_CSUM | NETIF_F_SG) ?
365 ++ dev->features & (NETIF_F_IP_CSUM | NETIF_F_SG) ?
366 + "csum " : "",
367 + dev->features & (NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_TX) ?
368 + "vlan " : "",
369 +diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
370 +index 42d7c0a..0e4eb15 100644
371 +--- a/drivers/net/r8169.c
372 ++++ b/drivers/net/r8169.c
373 +@@ -2822,7 +2822,7 @@ static int rtl8169_rx_interrupt(struct net_device *dev,
374 + pkt_size, PCI_DMA_FROMDEVICE);
375 + rtl8169_mark_to_asic(desc, tp->rx_buf_sz);
376 + } else {
377 +- pci_unmap_single(pdev, addr, pkt_size,
378 ++ pci_unmap_single(pdev, addr, tp->rx_buf_sz,
379 + PCI_DMA_FROMDEVICE);
380 + tp->Rx_skbuff[entry] = NULL;
381 + }
382 +diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
383 +index cc4bde8..1710e49 100644
384 +--- a/drivers/net/tg3.c
385 ++++ b/drivers/net/tg3.c
386 +@@ -1672,15 +1672,43 @@ static int tg3_set_power_state(struct tg3 *tp, pci_power_t state)
387 + }
388 +
389 + /* tp->lock is held. */
390 ++static inline void tg3_generate_fw_event(struct tg3 *tp)
391 ++{
392 ++ u32 val;
393 ++
394 ++ val = tr32(GRC_RX_CPU_EVENT);
395 ++ val |= GRC_RX_CPU_DRIVER_EVENT;
396 ++ tw32_f(GRC_RX_CPU_EVENT, val);
397 ++
398 ++ tp->last_event_jiffies = jiffies;
399 ++}
400 ++
401 ++#define TG3_FW_EVENT_TIMEOUT_USEC 2500
402 ++
403 ++/* tp->lock is held. */
404 + static void tg3_wait_for_event_ack(struct tg3 *tp)
405 + {
406 + int i;
407 ++ unsigned int delay_cnt;
408 ++ long time_remain;
409 ++
410 ++ /* If enough time has passed, no wait is necessary. */
411 ++ time_remain = (long)(tp->last_event_jiffies + 1 +
412 ++ usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
413 ++ (long)jiffies;
414 ++ if (time_remain < 0)
415 ++ return;
416 +
417 +- /* Wait for up to 2.5 milliseconds */
418 +- for (i = 0; i < 250000; i++) {
419 ++ /* Check if we can shorten the wait time. */
420 ++ delay_cnt = jiffies_to_usecs(time_remain);
421 ++ if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
422 ++ delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
423 ++ delay_cnt = (delay_cnt >> 3) + 1;
424 ++
425 ++ for (i = 0; i < delay_cnt; i++) {
426 + if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
427 + break;
428 +- udelay(10);
429 ++ udelay(8);
430 + }
431 + }
432 +
433 +@@ -1729,9 +1757,7 @@ static void tg3_ump_link_report(struct tg3 *tp)
434 + val = 0;
435 + tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 12, val);
436 +
437 +- val = tr32(GRC_RX_CPU_EVENT);
438 +- val |= GRC_RX_CPU_DRIVER_EVENT;
439 +- tw32_f(GRC_RX_CPU_EVENT, val);
440 ++ tg3_generate_fw_event(tp);
441 + }
442 +
443 + static void tg3_link_report(struct tg3 *tp)
444 +@@ -5565,6 +5591,7 @@ static int tg3_chip_reset(struct tg3 *tp)
445 + tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
446 + if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
447 + tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
448 ++ tp->last_event_jiffies = jiffies;
449 + if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
450 + tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
451 + }
452 +@@ -5578,15 +5605,12 @@ static void tg3_stop_fw(struct tg3 *tp)
453 + {
454 + if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
455 + !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
456 +- u32 val;
457 +-
458 + /* Wait for RX cpu to ACK the previous event. */
459 + tg3_wait_for_event_ack(tp);
460 +
461 + tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
462 +- val = tr32(GRC_RX_CPU_EVENT);
463 +- val |= GRC_RX_CPU_DRIVER_EVENT;
464 +- tw32(GRC_RX_CPU_EVENT, val);
465 ++
466 ++ tg3_generate_fw_event(tp);
467 +
468 + /* Wait for RX cpu to ACK this event. */
469 + tg3_wait_for_event_ack(tp);
470 +@@ -7477,8 +7501,6 @@ static void tg3_timer(unsigned long __opaque)
471 + */
472 + if (!--tp->asf_counter) {
473 + if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
474 +- u32 val;
475 +-
476 + tg3_wait_for_event_ack(tp);
477 +
478 + tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
479 +@@ -7486,9 +7508,8 @@ static void tg3_timer(unsigned long __opaque)
480 + tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
481 + /* 5 seconds timeout */
482 + tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, 5);
483 +- val = tr32(GRC_RX_CPU_EVENT);
484 +- val |= GRC_RX_CPU_DRIVER_EVENT;
485 +- tw32_f(GRC_RX_CPU_EVENT, val);
486 ++
487 ++ tg3_generate_fw_event(tp);
488 + }
489 + tp->asf_counter = tp->asf_multiplier;
490 + }
491 +diff --git a/drivers/net/tg3.h b/drivers/net/tg3.h
492 +index 0404f93..d68b579 100644
493 +--- a/drivers/net/tg3.h
494 ++++ b/drivers/net/tg3.h
495 +@@ -2404,7 +2404,10 @@ struct tg3 {
496 + struct tg3_ethtool_stats estats;
497 + struct tg3_ethtool_stats estats_prev;
498 +
499 ++ union {
500 + unsigned long phy_crc_errors;
501 ++ unsigned long last_event_jiffies;
502 ++ };
503 +
504 + u32 rx_offset;
505 + u32 tg3_flags;
506 +diff --git a/drivers/pci/search.c b/drivers/pci/search.c
507 +index 217814f..3b3b5f1 100644
508 +--- a/drivers/pci/search.c
509 ++++ b/drivers/pci/search.c
510 +@@ -280,6 +280,8 @@ static struct pci_dev *pci_get_dev_by_id(const struct pci_device_id *id,
511 + match_pci_dev_by_id);
512 + if (dev)
513 + pdev = to_pci_dev(dev);
514 ++ if (from)
515 ++ pci_dev_put(from);
516 + return pdev;
517 + }
518 +
519 +diff --git a/drivers/rtc/rtc-lib.c b/drivers/rtc/rtc-lib.c
520 +index 9f996ec..dd70bf7 100644
521 +--- a/drivers/rtc/rtc-lib.c
522 ++++ b/drivers/rtc/rtc-lib.c
523 +@@ -51,10 +51,11 @@ EXPORT_SYMBOL(rtc_year_days);
524 + */
525 + void rtc_time_to_tm(unsigned long time, struct rtc_time *tm)
526 + {
527 +- unsigned int days, month, year;
528 ++ unsigned int month, year;
529 ++ int days;
530 +
531 + days = time / 86400;
532 +- time -= days * 86400;
533 ++ time -= (unsigned int) days * 86400;
534 +
535 + /* day of the week, 1970-01-01 was a Thursday */
536 + tm->tm_wday = (days + 4) % 7;
537 +diff --git a/drivers/s390/block/dasd_eckd.h b/drivers/s390/block/dasd_eckd.h
538 +index fc2509c..a466820 100644
539 +--- a/drivers/s390/block/dasd_eckd.h
540 ++++ b/drivers/s390/block/dasd_eckd.h
541 +@@ -379,7 +379,7 @@ struct dasd_psf_prssd_data {
542 + unsigned char flags;
543 + unsigned char reserved[4];
544 + unsigned char suborder;
545 +- unsigned char varies[9];
546 ++ unsigned char varies[5];
547 + } __attribute__ ((packed));
548 +
549 + /*
550 +diff --git a/drivers/serial/8250.c b/drivers/serial/8250.c
551 +index be95e55..4050845 100644
552 +--- a/drivers/serial/8250.c
553 ++++ b/drivers/serial/8250.c
554 +@@ -1895,15 +1895,23 @@ static int serial8250_startup(struct uart_port *port)
555 + * kick the UART on a regular basis.
556 + */
557 + if (!(iir1 & UART_IIR_NO_INT) && (iir & UART_IIR_NO_INT)) {
558 ++ up->bugs |= UART_BUG_THRE;
559 + pr_debug("ttyS%d - using backup timer\n", port->line);
560 +- up->timer.function = serial8250_backup_timeout;
561 +- up->timer.data = (unsigned long)up;
562 +- mod_timer(&up->timer, jiffies +
563 +- poll_timeout(up->port.timeout) + HZ / 5);
564 + }
565 + }
566 +
567 + /*
568 ++ * The above check will only give an accurate result the first time
569 ++ * the port is opened so this value needs to be preserved.
570 ++ */
571 ++ if (up->bugs & UART_BUG_THRE) {
572 ++ up->timer.function = serial8250_backup_timeout;
573 ++ up->timer.data = (unsigned long)up;
574 ++ mod_timer(&up->timer, jiffies +
575 ++ poll_timeout(up->port.timeout) + HZ / 5);
576 ++ }
577 ++
578 ++ /*
579 + * If the "interrupt" for this port doesn't correspond with any
580 + * hardware interrupt, we use a timer-based system. The original
581 + * driver used to do this with IRQ0.
582 +diff --git a/drivers/serial/8250.h b/drivers/serial/8250.h
583 +index 91bd28f..245288d 100644
584 +--- a/drivers/serial/8250.h
585 ++++ b/drivers/serial/8250.h
586 +@@ -49,6 +49,7 @@ struct serial8250_config {
587 + #define UART_BUG_QUOT (1 << 0) /* UART has buggy quot LSB */
588 + #define UART_BUG_TXEN (1 << 1) /* UART has buggy TX IIR status */
589 + #define UART_BUG_NOMSR (1 << 2) /* UART has buggy MSR status bits (Au1x00) */
590 ++#define UART_BUG_THRE (1 << 3) /* UART has buggy THRE reassertion */
591 +
592 + #define PROBE_RSA (1 << 0)
593 + #define PROBE_ANY (~0)
594 +diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
595 +index c3201af..560337a 100644
596 +--- a/drivers/usb/class/cdc-acm.c
597 ++++ b/drivers/usb/class/cdc-acm.c
598 +@@ -525,8 +525,8 @@ static int acm_tty_open(struct tty_struct *tty, struct file *filp)
599 + tasklet_schedule(&acm->urb_task);
600 +
601 + done:
602 +-err_out:
603 + mutex_unlock(&acm->mutex);
604 ++err_out:
605 + mutex_unlock(&open_mutex);
606 + return rv;
607 +
608 +diff --git a/drivers/video/fb_defio.c b/drivers/video/fb_defio.c
609 +index 59df132..4835bdc 100644
610 +--- a/drivers/video/fb_defio.c
611 ++++ b/drivers/video/fb_defio.c
612 +@@ -114,6 +114,17 @@ static struct vm_operations_struct fb_deferred_io_vm_ops = {
613 + .page_mkwrite = fb_deferred_io_mkwrite,
614 + };
615 +
616 ++static int fb_deferred_io_set_page_dirty(struct page *page)
617 ++{
618 ++ if (!PageDirty(page))
619 ++ SetPageDirty(page);
620 ++ return 0;
621 ++}
622 ++
623 ++static const struct address_space_operations fb_deferred_io_aops = {
624 ++ .set_page_dirty = fb_deferred_io_set_page_dirty,
625 ++};
626 ++
627 + static int fb_deferred_io_mmap(struct fb_info *info, struct vm_area_struct *vma)
628 + {
629 + vma->vm_ops = &fb_deferred_io_vm_ops;
630 +@@ -163,6 +174,14 @@ void fb_deferred_io_init(struct fb_info *info)
631 + }
632 + EXPORT_SYMBOL_GPL(fb_deferred_io_init);
633 +
634 ++void fb_deferred_io_open(struct fb_info *info,
635 ++ struct inode *inode,
636 ++ struct file *file)
637 ++{
638 ++ file->f_mapping->a_ops = &fb_deferred_io_aops;
639 ++}
640 ++EXPORT_SYMBOL_GPL(fb_deferred_io_open);
641 ++
642 + void fb_deferred_io_cleanup(struct fb_info *info)
643 + {
644 + void *screen_base = (void __force *) info->screen_base;
645 +diff --git a/drivers/video/fbmem.c b/drivers/video/fbmem.c
646 +index 776f7fc..ce6b5da 100644
647 +--- a/drivers/video/fbmem.c
648 ++++ b/drivers/video/fbmem.c
649 +@@ -1340,6 +1340,10 @@ fb_open(struct inode *inode, struct file *file)
650 + if (res)
651 + module_put(info->fbops->owner);
652 + }
653 ++#ifdef CONFIG_FB_DEFERRED_IO
654 ++ if (info->fbdefio)
655 ++ fb_deferred_io_open(info, inode, file);
656 ++#endif
657 + return res;
658 + }
659 +
660 +diff --git a/fs/binfmt_misc.c b/fs/binfmt_misc.c
661 +index 7191306..a0a7157 100644
662 +--- a/fs/binfmt_misc.c
663 ++++ b/fs/binfmt_misc.c
664 +@@ -119,8 +119,6 @@ static int load_misc_binary(struct linux_binprm *bprm, struct pt_regs *regs)
665 + if (bprm->misc_bang)
666 + goto _ret;
667 +
668 +- bprm->misc_bang = 1;
669 +-
670 + /* to keep locking time low, we copy the interpreter string */
671 + read_lock(&entries_lock);
672 + fmt = check_file(bprm);
673 +@@ -198,6 +196,8 @@ static int load_misc_binary(struct linux_binprm *bprm, struct pt_regs *regs)
674 + if (retval < 0)
675 + goto _error;
676 +
677 ++ bprm->misc_bang = 1;
678 ++
679 + retval = search_binary_handler (bprm, regs);
680 + if (retval < 0)
681 + goto _error;
682 +diff --git a/fs/bio.c b/fs/bio.c
683 +index 7856257..7db618c 100644
684 +--- a/fs/bio.c
685 ++++ b/fs/bio.c
686 +@@ -464,20 +464,21 @@ static void bio_free_map_data(struct bio_map_data *bmd)
687 + kfree(bmd);
688 + }
689 +
690 +-static struct bio_map_data *bio_alloc_map_data(int nr_segs, int iov_count)
691 ++static struct bio_map_data *bio_alloc_map_data(int nr_segs, int iov_count,
692 ++ gfp_t gfp_mask)
693 + {
694 +- struct bio_map_data *bmd = kmalloc(sizeof(*bmd), GFP_KERNEL);
695 ++ struct bio_map_data *bmd = kmalloc(sizeof(*bmd), gfp_mask);
696 +
697 + if (!bmd)
698 + return NULL;
699 +
700 +- bmd->iovecs = kmalloc(sizeof(struct bio_vec) * nr_segs, GFP_KERNEL);
701 ++ bmd->iovecs = kmalloc(sizeof(struct bio_vec) * nr_segs, gfp_mask);
702 + if (!bmd->iovecs) {
703 + kfree(bmd);
704 + return NULL;
705 + }
706 +
707 +- bmd->sgvecs = kmalloc(sizeof(struct sg_iovec) * iov_count, GFP_KERNEL);
708 ++ bmd->sgvecs = kmalloc(sizeof(struct sg_iovec) * iov_count, gfp_mask);
709 + if (bmd->sgvecs)
710 + return bmd;
711 +
712 +@@ -486,8 +487,8 @@ static struct bio_map_data *bio_alloc_map_data(int nr_segs, int iov_count)
713 + return NULL;
714 + }
715 +
716 +-static int __bio_copy_iov(struct bio *bio, struct sg_iovec *iov, int iov_count,
717 +- int uncopy)
718 ++static int __bio_copy_iov(struct bio *bio, struct bio_vec *iovecs,
719 ++ struct sg_iovec *iov, int iov_count, int uncopy)
720 + {
721 + int ret = 0, i;
722 + struct bio_vec *bvec;
723 +@@ -497,7 +498,7 @@ static int __bio_copy_iov(struct bio *bio, struct sg_iovec *iov, int iov_count,
724 +
725 + __bio_for_each_segment(bvec, bio, i, 0) {
726 + char *bv_addr = page_address(bvec->bv_page);
727 +- unsigned int bv_len = bvec->bv_len;
728 ++ unsigned int bv_len = iovecs[i].bv_len;
729 +
730 + while (bv_len && iov_idx < iov_count) {
731 + unsigned int bytes;
732 +@@ -549,7 +550,7 @@ int bio_uncopy_user(struct bio *bio)
733 + struct bio_map_data *bmd = bio->bi_private;
734 + int ret;
735 +
736 +- ret = __bio_copy_iov(bio, bmd->sgvecs, bmd->nr_sgvecs, 1);
737 ++ ret = __bio_copy_iov(bio, bmd->iovecs, bmd->sgvecs, bmd->nr_sgvecs, 1);
738 +
739 + bio_free_map_data(bmd);
740 + bio_put(bio);
741 +@@ -591,7 +592,7 @@ struct bio *bio_copy_user_iov(struct request_queue *q, struct sg_iovec *iov,
742 + len += iov[i].iov_len;
743 + }
744 +
745 +- bmd = bio_alloc_map_data(nr_pages, iov_count);
746 ++ bmd = bio_alloc_map_data(nr_pages, iov_count, GFP_KERNEL);
747 + if (!bmd)
748 + return ERR_PTR(-ENOMEM);
749 +
750 +@@ -628,7 +629,7 @@ struct bio *bio_copy_user_iov(struct request_queue *q, struct sg_iovec *iov,
751 + * success
752 + */
753 + if (!write_to_vm) {
754 +- ret = __bio_copy_iov(bio, iov, iov_count, 0);
755 ++ ret = __bio_copy_iov(bio, bio->bi_io_vec, iov, iov_count, 0);
756 + if (ret)
757 + goto cleanup;
758 + }
759 +@@ -941,19 +942,22 @@ static void bio_copy_kern_endio(struct bio *bio, int err)
760 + {
761 + struct bio_vec *bvec;
762 + const int read = bio_data_dir(bio) == READ;
763 +- char *p = bio->bi_private;
764 ++ struct bio_map_data *bmd = bio->bi_private;
765 + int i;
766 ++ char *p = bmd->sgvecs[0].iov_base;
767 +
768 + __bio_for_each_segment(bvec, bio, i, 0) {
769 + char *addr = page_address(bvec->bv_page);
770 ++ int len = bmd->iovecs[i].bv_len;
771 +
772 + if (read && !err)
773 +- memcpy(p, addr, bvec->bv_len);
774 ++ memcpy(p, addr, len);
775 +
776 + __free_page(bvec->bv_page);
777 +- p += bvec->bv_len;
778 ++ p += len;
779 + }
780 +
781 ++ bio_free_map_data(bmd);
782 + bio_put(bio);
783 + }
784 +
785 +@@ -977,11 +981,21 @@ struct bio *bio_copy_kern(struct request_queue *q, void *data, unsigned int len,
786 + const int nr_pages = end - start;
787 + struct bio *bio;
788 + struct bio_vec *bvec;
789 ++ struct bio_map_data *bmd;
790 + int i, ret;
791 ++ struct sg_iovec iov;
792 ++
793 ++ iov.iov_base = data;
794 ++ iov.iov_len = len;
795 ++
796 ++ bmd = bio_alloc_map_data(nr_pages, 1, gfp_mask);
797 ++ if (!bmd)
798 ++ return ERR_PTR(-ENOMEM);
799 +
800 ++ ret = -ENOMEM;
801 + bio = bio_alloc(gfp_mask, nr_pages);
802 + if (!bio)
803 +- return ERR_PTR(-ENOMEM);
804 ++ goto out_bmd;
805 +
806 + while (len) {
807 + struct page *page;
808 +@@ -1015,14 +1029,18 @@ struct bio *bio_copy_kern(struct request_queue *q, void *data, unsigned int len,
809 + }
810 + }
811 +
812 +- bio->bi_private = data;
813 ++ bio->bi_private = bmd;
814 + bio->bi_end_io = bio_copy_kern_endio;
815 ++
816 ++ bio_set_map_data(bmd, bio, &iov, 1);
817 + return bio;
818 + cleanup:
819 + bio_for_each_segment(bvec, bio, i)
820 + __free_page(bvec->bv_page);
821 +
822 + bio_put(bio);
823 ++out_bmd:
824 ++ bio_free_map_data(bmd);
825 +
826 + return ERR_PTR(ret);
827 + }
828 +diff --git a/fs/cifs/file.c b/fs/cifs/file.c
829 +index 0aac824..8da903b 100644
830 +--- a/fs/cifs/file.c
831 ++++ b/fs/cifs/file.c
832 +@@ -832,6 +832,10 @@ ssize_t cifs_user_write(struct file *file, const char __user *write_data,
833 + return -EBADF;
834 + open_file = (struct cifsFileInfo *) file->private_data;
835 +
836 ++ rc = generic_write_checks(file, poffset, &write_size, 0);
837 ++ if (rc)
838 ++ return rc;
839 ++
840 + xid = GetXid();
841 +
842 + if (*poffset > file->f_path.dentry->d_inode->i_size)
843 +diff --git a/fs/cramfs/inode.c b/fs/cramfs/inode.c
844 +index 0c3b618..f40423e 100644
845 +--- a/fs/cramfs/inode.c
846 ++++ b/fs/cramfs/inode.c
847 +@@ -43,58 +43,13 @@ static DEFINE_MUTEX(read_mutex);
848 + static int cramfs_iget5_test(struct inode *inode, void *opaque)
849 + {
850 + struct cramfs_inode *cramfs_inode = opaque;
851 +-
852 +- if (inode->i_ino != CRAMINO(cramfs_inode))
853 +- return 0; /* does not match */
854 +-
855 +- if (inode->i_ino != 1)
856 +- return 1;
857 +-
858 +- /* all empty directories, char, block, pipe, and sock, share inode #1 */
859 +-
860 +- if ((inode->i_mode != cramfs_inode->mode) ||
861 +- (inode->i_gid != cramfs_inode->gid) ||
862 +- (inode->i_uid != cramfs_inode->uid))
863 +- return 0; /* does not match */
864 +-
865 +- if ((S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) &&
866 +- (inode->i_rdev != old_decode_dev(cramfs_inode->size)))
867 +- return 0; /* does not match */
868 +-
869 +- return 1; /* matches */
870 ++ return inode->i_ino == CRAMINO(cramfs_inode) && inode->i_ino != 1;
871 + }
872 +
873 + static int cramfs_iget5_set(struct inode *inode, void *opaque)
874 + {
875 +- static struct timespec zerotime;
876 + struct cramfs_inode *cramfs_inode = opaque;
877 +- inode->i_mode = cramfs_inode->mode;
878 +- inode->i_uid = cramfs_inode->uid;
879 +- inode->i_size = cramfs_inode->size;
880 +- inode->i_blocks = (cramfs_inode->size - 1) / 512 + 1;
881 +- inode->i_gid = cramfs_inode->gid;
882 +- /* Struct copy intentional */
883 +- inode->i_mtime = inode->i_atime = inode->i_ctime = zerotime;
884 + inode->i_ino = CRAMINO(cramfs_inode);
885 +- /* inode->i_nlink is left 1 - arguably wrong for directories,
886 +- but it's the best we can do without reading the directory
887 +- contents. 1 yields the right result in GNU find, even
888 +- without -noleaf option. */
889 +- if (S_ISREG(inode->i_mode)) {
890 +- inode->i_fop = &generic_ro_fops;
891 +- inode->i_data.a_ops = &cramfs_aops;
892 +- } else if (S_ISDIR(inode->i_mode)) {
893 +- inode->i_op = &cramfs_dir_inode_operations;
894 +- inode->i_fop = &cramfs_directory_operations;
895 +- } else if (S_ISLNK(inode->i_mode)) {
896 +- inode->i_op = &page_symlink_inode_operations;
897 +- inode->i_data.a_ops = &cramfs_aops;
898 +- } else {
899 +- inode->i_size = 0;
900 +- inode->i_blocks = 0;
901 +- init_special_inode(inode, inode->i_mode,
902 +- old_decode_dev(cramfs_inode->size));
903 +- }
904 + return 0;
905 + }
906 +
907 +@@ -104,12 +59,48 @@ static struct inode *get_cramfs_inode(struct super_block *sb,
908 + struct inode *inode = iget5_locked(sb, CRAMINO(cramfs_inode),
909 + cramfs_iget5_test, cramfs_iget5_set,
910 + cramfs_inode);
911 ++ static struct timespec zerotime;
912 ++
913 + if (inode && (inode->i_state & I_NEW)) {
914 ++ inode->i_mode = cramfs_inode->mode;
915 ++ inode->i_uid = cramfs_inode->uid;
916 ++ inode->i_size = cramfs_inode->size;
917 ++ inode->i_blocks = (cramfs_inode->size - 1) / 512 + 1;
918 ++ inode->i_gid = cramfs_inode->gid;
919 ++ /* Struct copy intentional */
920 ++ inode->i_mtime = inode->i_atime = inode->i_ctime = zerotime;
921 ++ /* inode->i_nlink is left 1 - arguably wrong for directories,
922 ++ but it's the best we can do without reading the directory
923 ++ contents. 1 yields the right result in GNU find, even
924 ++ without -noleaf option. */
925 ++ if (S_ISREG(inode->i_mode)) {
926 ++ inode->i_fop = &generic_ro_fops;
927 ++ inode->i_data.a_ops = &cramfs_aops;
928 ++ } else if (S_ISDIR(inode->i_mode)) {
929 ++ inode->i_op = &cramfs_dir_inode_operations;
930 ++ inode->i_fop = &cramfs_directory_operations;
931 ++ } else if (S_ISLNK(inode->i_mode)) {
932 ++ inode->i_op = &page_symlink_inode_operations;
933 ++ inode->i_data.a_ops = &cramfs_aops;
934 ++ } else {
935 ++ inode->i_size = 0;
936 ++ inode->i_blocks = 0;
937 ++ init_special_inode(inode, inode->i_mode,
938 ++ old_decode_dev(cramfs_inode->size));
939 ++ }
940 + unlock_new_inode(inode);
941 + }
942 + return inode;
943 + }
944 +
945 ++static void cramfs_drop_inode(struct inode *inode)
946 ++{
947 ++ if (inode->i_ino == 1)
948 ++ generic_delete_inode(inode);
949 ++ else
950 ++ generic_drop_inode(inode);
951 ++}
952 ++
953 + /*
954 + * We have our own block cache: don't fill up the buffer cache
955 + * with the rom-image, because the way the filesystem is set
956 +@@ -534,6 +525,7 @@ static const struct super_operations cramfs_ops = {
957 + .put_super = cramfs_put_super,
958 + .remount_fs = cramfs_remount,
959 + .statfs = cramfs_statfs,
960 ++ .drop_inode = cramfs_drop_inode,
961 + };
962 +
963 + static int cramfs_get_sb(struct file_system_type *fs_type,
964 +diff --git a/fs/nfsd/nfs4acl.c b/fs/nfsd/nfs4acl.c
965 +index b6ed383..54b8b41 100644
966 +--- a/fs/nfsd/nfs4acl.c
967 ++++ b/fs/nfsd/nfs4acl.c
968 +@@ -443,7 +443,7 @@ init_state(struct posix_acl_state *state, int cnt)
969 + * enough space for either:
970 + */
971 + alloc = sizeof(struct posix_ace_state_array)
972 +- + cnt*sizeof(struct posix_ace_state);
973 ++ + cnt*sizeof(struct posix_user_ace_state);
974 + state->users = kzalloc(alloc, GFP_KERNEL);
975 + if (!state->users)
976 + return -ENOMEM;
977 +diff --git a/include/linux/Kbuild b/include/linux/Kbuild
978 +index 71d70d1..27af0b8 100644
979 +--- a/include/linux/Kbuild
980 ++++ b/include/linux/Kbuild
981 +@@ -293,7 +293,6 @@ unifdef-y += parport.h
982 + unifdef-y += patchkey.h
983 + unifdef-y += pci.h
984 + unifdef-y += personality.h
985 +-unifdef-y += pim.h
986 + unifdef-y += pktcdvd.h
987 + unifdef-y += pmu.h
988 + unifdef-y += poll.h
989 +diff --git a/include/linux/fb.h b/include/linux/fb.h
990 +index 72295b0..dd82c76 100644
991 +--- a/include/linux/fb.h
992 ++++ b/include/linux/fb.h
993 +@@ -973,6 +973,9 @@ static inline void __fb_pad_aligned_buffer(u8 *dst, u32 d_pitch,
994 +
995 + /* drivers/video/fb_defio.c */
996 + extern void fb_deferred_io_init(struct fb_info *info);
997 ++extern void fb_deferred_io_open(struct fb_info *info,
998 ++ struct inode *inode,
999 ++ struct file *file);
1000 + extern void fb_deferred_io_cleanup(struct fb_info *info);
1001 + extern int fb_deferred_io_fsync(struct file *file, struct dentry *dentry,
1002 + int datasync);
1003 +diff --git a/include/linux/mroute.h b/include/linux/mroute.h
1004 +index de4decf..35a8277 100644
1005 +--- a/include/linux/mroute.h
1006 ++++ b/include/linux/mroute.h
1007 +@@ -2,11 +2,7 @@
1008 + #define __LINUX_MROUTE_H
1009 +
1010 + #include <linux/sockios.h>
1011 +-#include <linux/types.h>
1012 +-#ifdef __KERNEL__
1013 + #include <linux/in.h>
1014 +-#endif
1015 +-#include <linux/pim.h>
1016 +
1017 + /*
1018 + * Based on the MROUTING 3.5 defines primarily to keep
1019 +@@ -214,6 +210,27 @@ struct mfc_cache
1020 + #define IGMPMSG_WHOLEPKT 3 /* For PIM Register processing */
1021 +
1022 + #ifdef __KERNEL__
1023 ++
1024 ++#define PIM_V1_VERSION __constant_htonl(0x10000000)
1025 ++#define PIM_V1_REGISTER 1
1026 ++
1027 ++#define PIM_VERSION 2
1028 ++#define PIM_REGISTER 1
1029 ++
1030 ++#define PIM_NULL_REGISTER __constant_htonl(0x40000000)
1031 ++
1032 ++/* PIMv2 register message header layout (ietf-draft-idmr-pimvsm-v2-00.ps */
1033 ++
1034 ++struct pimreghdr
1035 ++{
1036 ++ __u8 type;
1037 ++ __u8 reserved;
1038 ++ __be16 csum;
1039 ++ __be32 flags;
1040 ++};
1041 ++
1042 ++extern int pim_rcv_v1(struct sk_buff *);
1043 ++
1044 + struct rtmsg;
1045 + extern int ipmr_get_route(struct sk_buff *skb, struct rtmsg *rtm, int nowait);
1046 + #endif
1047 +diff --git a/include/linux/pim.h b/include/linux/pim.h
1048 +deleted file mode 100644
1049 +index 236ffd3..0000000
1050 +--- a/include/linux/pim.h
1051 ++++ /dev/null
1052 +@@ -1,45 +0,0 @@
1053 +-#ifndef __LINUX_PIM_H
1054 +-#define __LINUX_PIM_H
1055 +-
1056 +-#include <asm/byteorder.h>
1057 +-
1058 +-#ifndef __KERNEL__
1059 +-struct pim {
1060 +-#if defined(__LITTLE_ENDIAN_BITFIELD)
1061 +- __u8 pim_type:4, /* PIM message type */
1062 +- pim_ver:4; /* PIM version */
1063 +-#elif defined(__BIG_ENDIAN_BITFIELD)
1064 +- __u8 pim_ver:4; /* PIM version */
1065 +- pim_type:4; /* PIM message type */
1066 +-#endif
1067 +- __u8 pim_rsv; /* Reserved */
1068 +- __be16 pim_cksum; /* Checksum */
1069 +-};
1070 +-
1071 +-#define PIM_MINLEN 8
1072 +-#endif
1073 +-
1074 +-/* Message types - V1 */
1075 +-#define PIM_V1_VERSION __constant_htonl(0x10000000)
1076 +-#define PIM_V1_REGISTER 1
1077 +-
1078 +-/* Message types - V2 */
1079 +-#define PIM_VERSION 2
1080 +-#define PIM_REGISTER 1
1081 +-
1082 +-#if defined(__KERNEL__)
1083 +-#define PIM_NULL_REGISTER __constant_htonl(0x40000000)
1084 +-
1085 +-/* PIMv2 register message header layout (ietf-draft-idmr-pimvsm-v2-00.ps */
1086 +-struct pimreghdr
1087 +-{
1088 +- __u8 type;
1089 +- __u8 reserved;
1090 +- __be16 csum;
1091 +- __be32 flags;
1092 +-};
1093 +-
1094 +-struct sk_buff;
1095 +-extern int pim_rcv_v1(struct sk_buff *);
1096 +-#endif
1097 +-#endif
1098 +diff --git a/include/net/addrconf.h b/include/net/addrconf.h
1099 +index bbd3d58..99ca7cd 100644
1100 +--- a/include/net/addrconf.h
1101 ++++ b/include/net/addrconf.h
1102 +@@ -80,7 +80,8 @@ extern struct inet6_ifaddr *ipv6_get_ifaddr(struct net *net,
1103 + struct net_device *dev,
1104 + int strict);
1105 +
1106 +-extern int ipv6_dev_get_saddr(struct net_device *dev,
1107 ++extern int ipv6_dev_get_saddr(struct net *net,
1108 ++ struct net_device *dev,
1109 + const struct in6_addr *daddr,
1110 + unsigned int srcprefs,
1111 + struct in6_addr *saddr);
1112 +diff --git a/include/net/ip6_route.h b/include/net/ip6_route.h
1113 +index 9313491..03462e5 100644
1114 +--- a/include/net/ip6_route.h
1115 ++++ b/include/net/ip6_route.h
1116 +@@ -112,6 +112,7 @@ struct rt6_rtnl_dump_arg
1117 + {
1118 + struct sk_buff *skb;
1119 + struct netlink_callback *cb;
1120 ++ struct net *net;
1121 + };
1122 +
1123 + extern int rt6_dump_route(struct rt6_info *rt, void *p_arg);
1124 +diff --git a/mm/page_alloc.c b/mm/page_alloc.c
1125 +index f32fae3..0d520dc 100644
1126 +--- a/mm/page_alloc.c
1127 ++++ b/mm/page_alloc.c
1128 +@@ -693,6 +693,9 @@ int move_freepages(struct zone *zone,
1129 + #endif
1130 +
1131 + for (page = start_page; page <= end_page;) {
1132 ++ /* Make sure we are not inadvertently changing nodes */
1133 ++ VM_BUG_ON(page_to_nid(page) != zone_to_nid(zone));
1134 ++
1135 + if (!pfn_valid_within(page_to_pfn(page))) {
1136 + page++;
1137 + continue;
1138 +@@ -2475,6 +2478,10 @@ static void setup_zone_migrate_reserve(struct zone *zone)
1139 + continue;
1140 + page = pfn_to_page(pfn);
1141 +
1142 ++ /* Watch out for overlapping nodes */
1143 ++ if (page_to_nid(page) != zone_to_nid(zone))
1144 ++ continue;
1145 ++
1146 + /* Blocks with reserved pages will never free, skip them. */
1147 + if (PageReserved(page))
1148 + continue;
1149 +diff --git a/net/ax25/sysctl_net_ax25.c b/net/ax25/sysctl_net_ax25.c
1150 +index f597987..f288fc4 100644
1151 +--- a/net/ax25/sysctl_net_ax25.c
1152 ++++ b/net/ax25/sysctl_net_ax25.c
1153 +@@ -36,6 +36,7 @@ static struct ctl_path ax25_path[] = {
1154 + { .procname = "ax25", .ctl_name = NET_AX25, },
1155 + { }
1156 + };
1157 ++
1158 + static const ctl_table ax25_param_table[] = {
1159 + {
1160 + .ctl_name = NET_AX25_IP_DEFAULT_MODE,
1161 +@@ -167,6 +168,7 @@ static const ctl_table ax25_param_table[] = {
1162 + .extra1 = &min_proto,
1163 + .extra2 = &max_proto
1164 + },
1165 ++#ifdef CONFIG_AX25_DAMA_SLAVE
1166 + {
1167 + .ctl_name = NET_AX25_DAMA_SLAVE_TIMEOUT,
1168 + .procname = "dama_slave_timeout",
1169 +@@ -177,6 +179,8 @@ static const ctl_table ax25_param_table[] = {
1170 + .extra1 = &min_ds_timeout,
1171 + .extra2 = &max_ds_timeout
1172 + },
1173 ++#endif
1174 ++
1175 + { .ctl_name = 0 } /* that's all, folks! */
1176 + };
1177 +
1178 +@@ -210,16 +214,6 @@ void ax25_register_sysctl(void)
1179 + ax25_table[n].procname = ax25_dev->dev->name;
1180 + ax25_table[n].mode = 0555;
1181 +
1182 +-#ifndef CONFIG_AX25_DAMA_SLAVE
1183 +- /*
1184 +- * We do not wish to have a representation of this parameter
1185 +- * in /proc/sys/ when configured *not* to include the
1186 +- * AX.25 DAMA slave code, do we?
1187 +- */
1188 +-
1189 +- child[AX25_VALUES_DS_TIMEOUT].procname = NULL;
1190 +-#endif
1191 +-
1192 + child[AX25_MAX_VALUES].ctl_name = 0; /* just in case... */
1193 +
1194 + for (k = 0; k < AX25_MAX_VALUES; k++)
1195 +diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
1196 +index 9f3f7ba..b6e7ec0 100644
1197 +--- a/net/ipv4/udp.c
1198 ++++ b/net/ipv4/udp.c
1199 +@@ -988,7 +988,9 @@ int udp_queue_rcv_skb(struct sock * sk, struct sk_buff *skb)
1200 + up->encap_rcv != NULL) {
1201 + int ret;
1202 +
1203 ++ bh_unlock_sock(sk);
1204 + ret = (*up->encap_rcv)(sk, skb);
1205 ++ bh_lock_sock(sk);
1206 + if (ret <= 0) {
1207 + UDP_INC_STATS_BH(UDP_MIB_INDATAGRAMS,
1208 + is_udplite);
1209 +@@ -1087,7 +1089,7 @@ static int __udp4_lib_mcast_deliver(struct sk_buff *skb,
1210 + if (skb1) {
1211 + int ret = 0;
1212 +
1213 +- bh_lock_sock_nested(sk);
1214 ++ bh_lock_sock(sk);
1215 + if (!sock_owned_by_user(sk))
1216 + ret = udp_queue_rcv_skb(sk, skb1);
1217 + else
1218 +@@ -1187,7 +1189,7 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct hlist_head udptable[],
1219 +
1220 + if (sk != NULL) {
1221 + int ret = 0;
1222 +- bh_lock_sock_nested(sk);
1223 ++ bh_lock_sock(sk);
1224 + if (!sock_owned_by_user(sk))
1225 + ret = udp_queue_rcv_skb(sk, skb);
1226 + else
1227 +diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
1228 +index ff61a5c..1a1d494 100644
1229 +--- a/net/ipv6/addrconf.c
1230 ++++ b/net/ipv6/addrconf.c
1231 +@@ -1076,13 +1076,12 @@ out:
1232 + return ret;
1233 + }
1234 +
1235 +-int ipv6_dev_get_saddr(struct net_device *dst_dev,
1236 ++int ipv6_dev_get_saddr(struct net *net, struct net_device *dst_dev,
1237 + const struct in6_addr *daddr, unsigned int prefs,
1238 + struct in6_addr *saddr)
1239 + {
1240 + struct ipv6_saddr_score scores[2],
1241 + *score = &scores[0], *hiscore = &scores[1];
1242 +- struct net *net = dev_net(dst_dev);
1243 + struct ipv6_saddr_dst dst;
1244 + struct net_device *dev;
1245 + int dst_type;
1246 +diff --git a/net/ipv6/fib6_rules.c b/net/ipv6/fib6_rules.c
1247 +index 8d05527..f5de3f9 100644
1248 +--- a/net/ipv6/fib6_rules.c
1249 ++++ b/net/ipv6/fib6_rules.c
1250 +@@ -93,7 +93,8 @@ static int fib6_rule_action(struct fib_rule *rule, struct flowi *flp,
1251 + if (flags & RT6_LOOKUP_F_SRCPREF_COA)
1252 + srcprefs |= IPV6_PREFER_SRC_COA;
1253 +
1254 +- if (ipv6_dev_get_saddr(ip6_dst_idev(&rt->u.dst)->dev,
1255 ++ if (ipv6_dev_get_saddr(net,
1256 ++ ip6_dst_idev(&rt->u.dst)->dev,
1257 + &flp->fl6_dst, srcprefs,
1258 + &saddr))
1259 + goto again;
1260 +diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
1261 +index 918fde4..fe80171 100644
1262 +--- a/net/ipv6/ip6_fib.c
1263 ++++ b/net/ipv6/ip6_fib.c
1264 +@@ -380,6 +380,7 @@ static int inet6_dump_fib(struct sk_buff *skb, struct netlink_callback *cb)
1265 +
1266 + arg.skb = skb;
1267 + arg.cb = cb;
1268 ++ arg.net = net;
1269 + w->args = &arg;
1270 +
1271 + for (h = s_h; h < FIB_TABLE_HASHSZ; h++, s_e = 0) {
1272 +diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
1273 +index 4019770..d99f094 100644
1274 +--- a/net/ipv6/ip6_output.c
1275 ++++ b/net/ipv6/ip6_output.c
1276 +@@ -925,7 +925,7 @@ static int ip6_dst_lookup_tail(struct sock *sk,
1277 + goto out_err_release;
1278 +
1279 + if (ipv6_addr_any(&fl->fl6_src)) {
1280 +- err = ipv6_dev_get_saddr(ip6_dst_idev(*dst)->dev,
1281 ++ err = ipv6_dev_get_saddr(net, ip6_dst_idev(*dst)->dev,
1282 + &fl->fl6_dst,
1283 + sk ? inet6_sk(sk)->srcprefs : 0,
1284 + &fl->fl6_src);
1285 +diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
1286 +index 282fdb3..efa84ae 100644
1287 +--- a/net/ipv6/ndisc.c
1288 ++++ b/net/ipv6/ndisc.c
1289 +@@ -549,7 +549,7 @@ static void ndisc_send_na(struct net_device *dev, struct neighbour *neigh,
1290 + override = 0;
1291 + in6_ifa_put(ifp);
1292 + } else {
1293 +- if (ipv6_dev_get_saddr(dev, daddr,
1294 ++ if (ipv6_dev_get_saddr(dev_net(dev), dev, daddr,
1295 + inet6_sk(dev_net(dev)->ipv6.ndisc_sk)->srcprefs,
1296 + &tmpaddr))
1297 + return;
1298 +diff --git a/net/ipv6/route.c b/net/ipv6/route.c
1299 +index 7ff6870..9deee59 100644
1300 +--- a/net/ipv6/route.c
1301 ++++ b/net/ipv6/route.c
1302 +@@ -2098,7 +2098,8 @@ static inline size_t rt6_nlmsg_size(void)
1303 + + nla_total_size(sizeof(struct rta_cacheinfo));
1304 + }
1305 +
1306 +-static int rt6_fill_node(struct sk_buff *skb, struct rt6_info *rt,
1307 ++static int rt6_fill_node(struct net *net,
1308 ++ struct sk_buff *skb, struct rt6_info *rt,
1309 + struct in6_addr *dst, struct in6_addr *src,
1310 + int iif, int type, u32 pid, u32 seq,
1311 + int prefix, int nowait, unsigned int flags)
1312 +@@ -2179,8 +2180,9 @@ static int rt6_fill_node(struct sk_buff *skb, struct rt6_info *rt,
1313 + #endif
1314 + NLA_PUT_U32(skb, RTA_IIF, iif);
1315 + } else if (dst) {
1316 ++ struct inet6_dev *idev = ip6_dst_idev(&rt->u.dst);
1317 + struct in6_addr saddr_buf;
1318 +- if (ipv6_dev_get_saddr(ip6_dst_idev(&rt->u.dst)->dev,
1319 ++ if (ipv6_dev_get_saddr(net, idev ? idev->dev : NULL,
1320 + dst, 0, &saddr_buf) == 0)
1321 + NLA_PUT(skb, RTA_PREFSRC, 16, &saddr_buf);
1322 + }
1323 +@@ -2225,7 +2227,8 @@ int rt6_dump_route(struct rt6_info *rt, void *p_arg)
1324 + } else
1325 + prefix = 0;
1326 +
1327 +- return rt6_fill_node(arg->skb, rt, NULL, NULL, 0, RTM_NEWROUTE,
1328 ++ return rt6_fill_node(arg->net,
1329 ++ arg->skb, rt, NULL, NULL, 0, RTM_NEWROUTE,
1330 + NETLINK_CB(arg->cb->skb).pid, arg->cb->nlh->nlmsg_seq,
1331 + prefix, 0, NLM_F_MULTI);
1332 + }
1333 +@@ -2291,7 +2294,7 @@ static int inet6_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr* nlh, void
1334 + rt = (struct rt6_info*) ip6_route_output(net, NULL, &fl);
1335 + skb->dst = &rt->u.dst;
1336 +
1337 +- err = rt6_fill_node(skb, rt, &fl.fl6_dst, &fl.fl6_src, iif,
1338 ++ err = rt6_fill_node(net, skb, rt, &fl.fl6_dst, &fl.fl6_src, iif,
1339 + RTM_NEWROUTE, NETLINK_CB(in_skb).pid,
1340 + nlh->nlmsg_seq, 0, 0, 0);
1341 + if (err < 0) {
1342 +@@ -2318,7 +2321,7 @@ void inet6_rt_notify(int event, struct rt6_info *rt, struct nl_info *info)
1343 + if (skb == NULL)
1344 + goto errout;
1345 +
1346 +- err = rt6_fill_node(skb, rt, NULL, NULL, 0,
1347 ++ err = rt6_fill_node(net, skb, rt, NULL, NULL, 0,
1348 + event, info->pid, seq, 0, 0, 0);
1349 + if (err < 0) {
1350 + /* -EMSGSIZE implies BUG in rt6_nlmsg_size() */
1351 +diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
1352 +index dd30962..e14aa66 100644
1353 +--- a/net/ipv6/udp.c
1354 ++++ b/net/ipv6/udp.c
1355 +@@ -376,7 +376,7 @@ static int __udp6_lib_mcast_deliver(struct sk_buff *skb, struct in6_addr *saddr,
1356 + uh->source, saddr, dif))) {
1357 + struct sk_buff *buff = skb_clone(skb, GFP_ATOMIC);
1358 + if (buff) {
1359 +- bh_lock_sock_nested(sk2);
1360 ++ bh_lock_sock(sk2);
1361 + if (!sock_owned_by_user(sk2))
1362 + udpv6_queue_rcv_skb(sk2, buff);
1363 + else
1364 +@@ -384,7 +384,7 @@ static int __udp6_lib_mcast_deliver(struct sk_buff *skb, struct in6_addr *saddr,
1365 + bh_unlock_sock(sk2);
1366 + }
1367 + }
1368 +- bh_lock_sock_nested(sk);
1369 ++ bh_lock_sock(sk);
1370 + if (!sock_owned_by_user(sk))
1371 + udpv6_queue_rcv_skb(sk, skb);
1372 + else
1373 +@@ -502,7 +502,7 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct hlist_head udptable[],
1374 +
1375 + /* deliver */
1376 +
1377 +- bh_lock_sock_nested(sk);
1378 ++ bh_lock_sock(sk);
1379 + if (!sock_owned_by_user(sk))
1380 + udpv6_queue_rcv_skb(sk, skb);
1381 + else
1382 +diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c
1383 +index 8f1e054..08e4cbb 100644
1384 +--- a/net/ipv6/xfrm6_policy.c
1385 ++++ b/net/ipv6/xfrm6_policy.c
1386 +@@ -52,12 +52,14 @@ static struct dst_entry *xfrm6_dst_lookup(int tos, xfrm_address_t *saddr,
1387 + static int xfrm6_get_saddr(xfrm_address_t *saddr, xfrm_address_t *daddr)
1388 + {
1389 + struct dst_entry *dst;
1390 ++ struct net_device *dev;
1391 +
1392 + dst = xfrm6_dst_lookup(0, NULL, daddr);
1393 + if (IS_ERR(dst))
1394 + return -EHOSTUNREACH;
1395 +
1396 +- ipv6_dev_get_saddr(ip6_dst_idev(dst)->dev,
1397 ++ dev = ip6_dst_idev(dst)->dev;
1398 ++ ipv6_dev_get_saddr(dev_net(dev), dev,
1399 + (struct in6_addr *)&daddr->a6, 0,
1400 + (struct in6_addr *)&saddr->a6);
1401 + dst_release(dst);
1402 +diff --git a/net/sched/act_api.c b/net/sched/act_api.c
1403 +index 74e662c..b5e116c 100644
1404 +--- a/net/sched/act_api.c
1405 ++++ b/net/sched/act_api.c
1406 +@@ -205,10 +205,9 @@ struct tcf_common *tcf_hash_check(u32 index, struct tc_action *a, int bind,
1407 + {
1408 + struct tcf_common *p = NULL;
1409 + if (index && (p = tcf_hash_lookup(index, hinfo)) != NULL) {
1410 +- if (bind) {
1411 ++ if (bind)
1412 + p->tcfc_bindcnt++;
1413 +- p->tcfc_refcnt++;
1414 +- }
1415 ++ p->tcfc_refcnt++;
1416 + a->priv = p;
1417 + }
1418 + return p;
1419 +diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
1420 +index 3fb58f4..51c3f68 100644
1421 +--- a/net/sched/sch_htb.c
1422 ++++ b/net/sched/sch_htb.c
1423 +@@ -595,11 +595,13 @@ static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch)
1424 + kfree_skb(skb);
1425 + return ret;
1426 + #endif
1427 +- } else if (cl->un.leaf.q->enqueue(skb, cl->un.leaf.q) !=
1428 ++ } else if ((ret = cl->un.leaf.q->enqueue(skb, cl->un.leaf.q)) !=
1429 + NET_XMIT_SUCCESS) {
1430 +- sch->qstats.drops++;
1431 +- cl->qstats.drops++;
1432 +- return NET_XMIT_DROP;
1433 ++ if (ret == NET_XMIT_DROP) {
1434 ++ sch->qstats.drops++;
1435 ++ cl->qstats.drops++;
1436 ++ }
1437 ++ return ret;
1438 + } else {
1439 + cl->bstats.packets +=
1440 + skb_is_gso(skb)?skb_shinfo(skb)->gso_segs:1;
1441 +@@ -639,11 +641,13 @@ static int htb_requeue(struct sk_buff *skb, struct Qdisc *sch)
1442 + kfree_skb(skb);
1443 + return ret;
1444 + #endif
1445 +- } else if (cl->un.leaf.q->ops->requeue(skb, cl->un.leaf.q) !=
1446 ++ } else if ((ret = cl->un.leaf.q->ops->requeue(skb, cl->un.leaf.q)) !=
1447 + NET_XMIT_SUCCESS) {
1448 +- sch->qstats.drops++;
1449 +- cl->qstats.drops++;
1450 +- return NET_XMIT_DROP;
1451 ++ if (ret == NET_XMIT_DROP) {
1452 ++ sch->qstats.drops++;
1453 ++ cl->qstats.drops++;
1454 ++ }
1455 ++ return ret;
1456 + } else
1457 + htb_activate(q, cl);
1458 +
1459 +diff --git a/net/sched/sch_prio.c b/net/sched/sch_prio.c
1460 +index 5532f10..ec0c921 100644
1461 +--- a/net/sched/sch_prio.c
1462 ++++ b/net/sched/sch_prio.c
1463 +@@ -228,14 +228,20 @@ static int prio_tune(struct Qdisc *sch, struct nlattr *opt)
1464 + {
1465 + struct prio_sched_data *q = qdisc_priv(sch);
1466 + struct tc_prio_qopt *qopt;
1467 +- struct nlattr *tb[TCA_PRIO_MAX + 1];
1468 ++ struct nlattr *tb[TCA_PRIO_MAX + 1] = {0};
1469 + int err;
1470 + int i;
1471 +
1472 +- err = nla_parse_nested_compat(tb, TCA_PRIO_MAX, opt, NULL, qopt,
1473 +- sizeof(*qopt));
1474 +- if (err < 0)
1475 +- return err;
1476 ++ qopt = nla_data(opt);
1477 ++ if (nla_len(opt) < sizeof(*qopt))
1478 ++ return -1;
1479 ++
1480 ++ if (nla_len(opt) >= sizeof(*qopt) + sizeof(struct nlattr)) {
1481 ++ err = nla_parse_nested(tb, TCA_PRIO_MAX,
1482 ++ (struct nlattr *) (qopt + 1), NULL);
1483 ++ if (err < 0)
1484 ++ return err;
1485 ++ }
1486 +
1487 + q->bands = qopt->bands;
1488 + /* If we're multiqueue, make sure the number of incoming bands
1489 +diff --git a/net/sched/sch_tbf.c b/net/sched/sch_tbf.c
1490 +index 0b7d78f..fc6f8f3 100644
1491 +--- a/net/sched/sch_tbf.c
1492 ++++ b/net/sched/sch_tbf.c
1493 +@@ -123,15 +123,8 @@ static int tbf_enqueue(struct sk_buff *skb, struct Qdisc* sch)
1494 + struct tbf_sched_data *q = qdisc_priv(sch);
1495 + int ret;
1496 +
1497 +- if (skb->len > q->max_size) {
1498 +- sch->qstats.drops++;
1499 +-#ifdef CONFIG_NET_CLS_ACT
1500 +- if (sch->reshape_fail == NULL || sch->reshape_fail(skb, sch))
1501 +-#endif
1502 +- kfree_skb(skb);
1503 +-
1504 +- return NET_XMIT_DROP;
1505 +- }
1506 ++ if (skb->len > q->max_size)
1507 ++ return qdisc_reshape_fail(skb, sch);
1508 +
1509 + if ((ret = q->qdisc->enqueue(skb, q->qdisc)) != 0) {
1510 + sch->qstats.drops++;
1511 +diff --git a/net/sctp/auth.c b/net/sctp/auth.c
1512 +index 675a5c3..52db5f6 100644
1513 +--- a/net/sctp/auth.c
1514 ++++ b/net/sctp/auth.c
1515 +@@ -80,6 +80,10 @@ static struct sctp_auth_bytes *sctp_auth_create_key(__u32 key_len, gfp_t gfp)
1516 + {
1517 + struct sctp_auth_bytes *key;
1518 +
1519 ++ /* Verify that we are not going to overflow INT_MAX */
1520 ++ if ((INT_MAX - key_len) < sizeof(struct sctp_auth_bytes))
1521 ++ return NULL;
1522 ++
1523 + /* Allocate the shared key */
1524 + key = kmalloc(sizeof(struct sctp_auth_bytes) + key_len, gfp);
1525 + if (!key)
1526 +@@ -782,6 +786,9 @@ int sctp_auth_ep_set_hmacs(struct sctp_endpoint *ep,
1527 + for (i = 0; i < hmacs->shmac_num_idents; i++) {
1528 + id = hmacs->shmac_idents[i];
1529 +
1530 ++ if (id > SCTP_AUTH_HMAC_ID_MAX)
1531 ++ return -EOPNOTSUPP;
1532 ++
1533 + if (SCTP_AUTH_HMAC_ID_SHA1 == id)
1534 + has_sha1 = 1;
1535 +
1536 +diff --git a/net/sctp/endpointola.c b/net/sctp/endpointola.c
1537 +index e39a0cd..4c8d9f4 100644
1538 +--- a/net/sctp/endpointola.c
1539 ++++ b/net/sctp/endpointola.c
1540 +@@ -103,6 +103,7 @@ static struct sctp_endpoint *sctp_endpoint_init(struct sctp_endpoint *ep,
1541 +
1542 + /* Initialize the CHUNKS parameter */
1543 + auth_chunks->param_hdr.type = SCTP_PARAM_CHUNKS;
1544 ++ auth_chunks->param_hdr.length = htons(sizeof(sctp_paramhdr_t));
1545 +
1546 + /* If the Add-IP functionality is enabled, we must
1547 + * authenticate, ASCONF and ASCONF-ACK chunks
1548 +@@ -110,8 +111,7 @@ static struct sctp_endpoint *sctp_endpoint_init(struct sctp_endpoint *ep,
1549 + if (sctp_addip_enable) {
1550 + auth_chunks->chunks[0] = SCTP_CID_ASCONF;
1551 + auth_chunks->chunks[1] = SCTP_CID_ASCONF_ACK;
1552 +- auth_chunks->param_hdr.length =
1553 +- htons(sizeof(sctp_paramhdr_t) + 2);
1554 ++ auth_chunks->param_hdr.length += htons(2);
1555 + }
1556 + }
1557 +
1558 +diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
1559 +index a2f4d4d..38a5d80 100644
1560 +--- a/net/sctp/ipv6.c
1561 ++++ b/net/sctp/ipv6.c
1562 +@@ -317,7 +317,8 @@ static void sctp_v6_get_saddr(struct sctp_sock *sk,
1563 + __func__, asoc, dst, NIP6(daddr->v6.sin6_addr));
1564 +
1565 + if (!asoc) {
1566 +- ipv6_dev_get_saddr(dst ? ip6_dst_idev(dst)->dev : NULL,
1567 ++ ipv6_dev_get_saddr(sock_net(sctp_opt2sk(sk)),
1568 ++ dst ? ip6_dst_idev(dst)->dev : NULL,
1569 + &daddr->v6.sin6_addr,
1570 + inet6_sk(&sk->inet.sk)->srcprefs,
1571 + &saddr->v6.sin6_addr);
1572 +diff --git a/net/sctp/socket.c b/net/sctp/socket.c
1573 +index 0dbcde6..700d27d 100644
1574 +--- a/net/sctp/socket.c
1575 ++++ b/net/sctp/socket.c
1576 +@@ -2965,6 +2965,9 @@ static int sctp_setsockopt_auth_chunk(struct sock *sk,
1577 + {
1578 + struct sctp_authchunk val;
1579 +
1580 ++ if (!sctp_auth_enable)
1581 ++ return -EACCES;
1582 ++
1583 + if (optlen != sizeof(struct sctp_authchunk))
1584 + return -EINVAL;
1585 + if (copy_from_user(&val, optval, optlen))
1586 +@@ -2993,8 +2996,12 @@ static int sctp_setsockopt_hmac_ident(struct sock *sk,
1587 + int optlen)
1588 + {
1589 + struct sctp_hmacalgo *hmacs;
1590 ++ u32 idents;
1591 + int err;
1592 +
1593 ++ if (!sctp_auth_enable)
1594 ++ return -EACCES;
1595 ++
1596 + if (optlen < sizeof(struct sctp_hmacalgo))
1597 + return -EINVAL;
1598 +
1599 +@@ -3007,8 +3014,9 @@ static int sctp_setsockopt_hmac_ident(struct sock *sk,
1600 + goto out;
1601 + }
1602 +
1603 +- if (hmacs->shmac_num_idents == 0 ||
1604 +- hmacs->shmac_num_idents > SCTP_AUTH_NUM_HMACS) {
1605 ++ idents = hmacs->shmac_num_idents;
1606 ++ if (idents == 0 || idents > SCTP_AUTH_NUM_HMACS ||
1607 ++ (idents * sizeof(u16)) > (optlen - sizeof(struct sctp_hmacalgo))) {
1608 + err = -EINVAL;
1609 + goto out;
1610 + }
1611 +@@ -3033,6 +3041,9 @@ static int sctp_setsockopt_auth_key(struct sock *sk,
1612 + struct sctp_association *asoc;
1613 + int ret;
1614 +
1615 ++ if (!sctp_auth_enable)
1616 ++ return -EACCES;
1617 ++
1618 + if (optlen <= sizeof(struct sctp_authkey))
1619 + return -EINVAL;
1620 +
1621 +@@ -3045,6 +3056,11 @@ static int sctp_setsockopt_auth_key(struct sock *sk,
1622 + goto out;
1623 + }
1624 +
1625 ++ if (authkey->sca_keylength > optlen - sizeof(struct sctp_authkey)) {
1626 ++ ret = -EINVAL;
1627 ++ goto out;
1628 ++ }
1629 ++
1630 + asoc = sctp_id2assoc(sk, authkey->sca_assoc_id);
1631 + if (!asoc && authkey->sca_assoc_id && sctp_style(sk, UDP)) {
1632 + ret = -EINVAL;
1633 +@@ -3070,6 +3086,9 @@ static int sctp_setsockopt_active_key(struct sock *sk,
1634 + struct sctp_authkeyid val;
1635 + struct sctp_association *asoc;
1636 +
1637 ++ if (!sctp_auth_enable)
1638 ++ return -EACCES;
1639 ++
1640 + if (optlen != sizeof(struct sctp_authkeyid))
1641 + return -EINVAL;
1642 + if (copy_from_user(&val, optval, optlen))
1643 +@@ -3095,6 +3114,9 @@ static int sctp_setsockopt_del_key(struct sock *sk,
1644 + struct sctp_authkeyid val;
1645 + struct sctp_association *asoc;
1646 +
1647 ++ if (!sctp_auth_enable)
1648 ++ return -EACCES;
1649 ++
1650 + if (optlen != sizeof(struct sctp_authkeyid))
1651 + return -EINVAL;
1652 + if (copy_from_user(&val, optval, optlen))
1653 +@@ -5053,19 +5075,29 @@ static int sctp_getsockopt_maxburst(struct sock *sk, int len,
1654 + static int sctp_getsockopt_hmac_ident(struct sock *sk, int len,
1655 + char __user *optval, int __user *optlen)
1656 + {
1657 ++ struct sctp_hmacalgo __user *p = (void __user *)optval;
1658 + struct sctp_hmac_algo_param *hmacs;
1659 +- __u16 param_len;
1660 ++ __u16 data_len = 0;
1661 ++ u32 num_idents;
1662 ++
1663 ++ if (!sctp_auth_enable)
1664 ++ return -EACCES;
1665 +
1666 + hmacs = sctp_sk(sk)->ep->auth_hmacs_list;
1667 +- param_len = ntohs(hmacs->param_hdr.length);
1668 ++ data_len = ntohs(hmacs->param_hdr.length) - sizeof(sctp_paramhdr_t);
1669 +
1670 +- if (len < param_len)
1671 ++ if (len < sizeof(struct sctp_hmacalgo) + data_len)
1672 + return -EINVAL;
1673 ++
1674 ++ len = sizeof(struct sctp_hmacalgo) + data_len;
1675 ++ num_idents = data_len / sizeof(u16);
1676 ++
1677 + if (put_user(len, optlen))
1678 + return -EFAULT;
1679 +- if (copy_to_user(optval, hmacs->hmac_ids, len))
1680 ++ if (put_user(num_idents, &p->shmac_num_idents))
1681 ++ return -EFAULT;
1682 ++ if (copy_to_user(p->shmac_idents, hmacs->hmac_ids, data_len))
1683 + return -EFAULT;
1684 +-
1685 + return 0;
1686 + }
1687 +
1688 +@@ -5075,6 +5107,9 @@ static int sctp_getsockopt_active_key(struct sock *sk, int len,
1689 + struct sctp_authkeyid val;
1690 + struct sctp_association *asoc;
1691 +
1692 ++ if (!sctp_auth_enable)
1693 ++ return -EACCES;
1694 ++
1695 + if (len < sizeof(struct sctp_authkeyid))
1696 + return -EINVAL;
1697 + if (copy_from_user(&val, optval, sizeof(struct sctp_authkeyid)))
1698 +@@ -5089,6 +5124,12 @@ static int sctp_getsockopt_active_key(struct sock *sk, int len,
1699 + else
1700 + val.scact_keynumber = sctp_sk(sk)->ep->active_key_id;
1701 +
1702 ++ len = sizeof(struct sctp_authkeyid);
1703 ++ if (put_user(len, optlen))
1704 ++ return -EFAULT;
1705 ++ if (copy_to_user(optval, &val, len))
1706 ++ return -EFAULT;
1707 ++
1708 + return 0;
1709 + }
1710 +
1711 +@@ -5099,13 +5140,16 @@ static int sctp_getsockopt_peer_auth_chunks(struct sock *sk, int len,
1712 + struct sctp_authchunks val;
1713 + struct sctp_association *asoc;
1714 + struct sctp_chunks_param *ch;
1715 +- u32 num_chunks;
1716 ++ u32 num_chunks = 0;
1717 + char __user *to;
1718 +
1719 +- if (len <= sizeof(struct sctp_authchunks))
1720 ++ if (!sctp_auth_enable)
1721 ++ return -EACCES;
1722 ++
1723 ++ if (len < sizeof(struct sctp_authchunks))
1724 + return -EINVAL;
1725 +
1726 +- if (copy_from_user(&val, p, sizeof(struct sctp_authchunks)))
1727 ++ if (copy_from_user(&val, optval, sizeof(struct sctp_authchunks)))
1728 + return -EFAULT;
1729 +
1730 + to = p->gauth_chunks;
1731 +@@ -5114,20 +5158,21 @@ static int sctp_getsockopt_peer_auth_chunks(struct sock *sk, int len,
1732 + return -EINVAL;
1733 +
1734 + ch = asoc->peer.peer_chunks;
1735 ++ if (!ch)
1736 ++ goto num;
1737 +
1738 + /* See if the user provided enough room for all the data */
1739 + num_chunks = ntohs(ch->param_hdr.length) - sizeof(sctp_paramhdr_t);
1740 + if (len < num_chunks)
1741 + return -EINVAL;
1742 +
1743 +- len = num_chunks;
1744 +- if (put_user(len, optlen))
1745 ++ if (copy_to_user(to, ch->chunks, num_chunks))
1746 + return -EFAULT;
1747 ++num:
1748 ++ len = sizeof(struct sctp_authchunks) + num_chunks;
1749 ++ if (put_user(len, optlen)) return -EFAULT;
1750 + if (put_user(num_chunks, &p->gauth_number_of_chunks))
1751 + return -EFAULT;
1752 +- if (copy_to_user(to, ch->chunks, len))
1753 +- return -EFAULT;
1754 +-
1755 + return 0;
1756 + }
1757 +
1758 +@@ -5138,13 +5183,16 @@ static int sctp_getsockopt_local_auth_chunks(struct sock *sk, int len,
1759 + struct sctp_authchunks val;
1760 + struct sctp_association *asoc;
1761 + struct sctp_chunks_param *ch;
1762 +- u32 num_chunks;
1763 ++ u32 num_chunks = 0;
1764 + char __user *to;
1765 +
1766 +- if (len <= sizeof(struct sctp_authchunks))
1767 ++ if (!sctp_auth_enable)
1768 ++ return -EACCES;
1769 ++
1770 ++ if (len < sizeof(struct sctp_authchunks))
1771 + return -EINVAL;
1772 +
1773 +- if (copy_from_user(&val, p, sizeof(struct sctp_authchunks)))
1774 ++ if (copy_from_user(&val, optval, sizeof(struct sctp_authchunks)))
1775 + return -EFAULT;
1776 +
1777 + to = p->gauth_chunks;
1778 +@@ -5157,17 +5205,21 @@ static int sctp_getsockopt_local_auth_chunks(struct sock *sk, int len,
1779 + else
1780 + ch = sctp_sk(sk)->ep->auth_chunk_list;
1781 +
1782 ++ if (!ch)
1783 ++ goto num;
1784 ++
1785 + num_chunks = ntohs(ch->param_hdr.length) - sizeof(sctp_paramhdr_t);
1786 +- if (len < num_chunks)
1787 ++ if (len < sizeof(struct sctp_authchunks) + num_chunks)
1788 + return -EINVAL;
1789 +
1790 +- len = num_chunks;
1791 ++ if (copy_to_user(to, ch->chunks, num_chunks))
1792 ++ return -EFAULT;
1793 ++num:
1794 ++ len = sizeof(struct sctp_authchunks) + num_chunks;
1795 + if (put_user(len, optlen))
1796 + return -EFAULT;
1797 + if (put_user(num_chunks, &p->gauth_number_of_chunks))
1798 + return -EFAULT;
1799 +- if (copy_to_user(to, ch->chunks, len))
1800 +- return -EFAULT;
1801 +
1802 + return 0;
1803 + }
1804 +diff --git a/net/sunrpc/sysctl.c b/net/sunrpc/sysctl.c
1805 +index 0f8c439..5231f7a 100644
1806 +--- a/net/sunrpc/sysctl.c
1807 ++++ b/net/sunrpc/sysctl.c
1808 +@@ -60,24 +60,14 @@ static int proc_do_xprt(ctl_table *table, int write, struct file *file,
1809 + void __user *buffer, size_t *lenp, loff_t *ppos)
1810 + {
1811 + char tmpbuf[256];
1812 +- int len;
1813 ++ size_t len;
1814 ++
1815 + if ((*ppos && !write) || !*lenp) {
1816 + *lenp = 0;
1817 + return 0;
1818 + }
1819 +- if (write)
1820 +- return -EINVAL;
1821 +- else {
1822 +- len = svc_print_xprts(tmpbuf, sizeof(tmpbuf));
1823 +- if (!access_ok(VERIFY_WRITE, buffer, len))
1824 +- return -EFAULT;
1825 +-
1826 +- if (__copy_to_user(buffer, tmpbuf, len))
1827 +- return -EFAULT;
1828 +- }
1829 +- *lenp -= len;
1830 +- *ppos += len;
1831 +- return 0;
1832 ++ len = svc_print_xprts(tmpbuf, sizeof(tmpbuf));
1833 ++ return simple_read_from_buffer(buffer, *lenp, ppos, tmpbuf, len);
1834 + }
1835 +
1836 + static int
1837 +diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
1838 +index 72fddaf..391f456 100644
1839 +--- a/net/xfrm/xfrm_state.c
1840 ++++ b/net/xfrm/xfrm_state.c
1841 +@@ -780,11 +780,13 @@ xfrm_state_find(xfrm_address_t *daddr, xfrm_address_t *saddr,
1842 + {
1843 + unsigned int h;
1844 + struct hlist_node *entry;
1845 +- struct xfrm_state *x, *x0;
1846 ++ struct xfrm_state *x, *x0, *to_put;
1847 + int acquire_in_progress = 0;
1848 + int error = 0;
1849 + struct xfrm_state *best = NULL;
1850 +
1851 ++ to_put = NULL;
1852 ++
1853 + spin_lock_bh(&xfrm_state_lock);
1854 + h = xfrm_dst_hash(daddr, saddr, tmpl->reqid, family);
1855 + hlist_for_each_entry(x, entry, xfrm_state_bydst+h, bydst) {
1856 +@@ -833,7 +835,7 @@ xfrm_state_find(xfrm_address_t *daddr, xfrm_address_t *saddr,
1857 + if (tmpl->id.spi &&
1858 + (x0 = __xfrm_state_lookup(daddr, tmpl->id.spi,
1859 + tmpl->id.proto, family)) != NULL) {
1860 +- xfrm_state_put(x0);
1861 ++ to_put = x0;
1862 + error = -EEXIST;
1863 + goto out;
1864 + }
1865 +@@ -849,7 +851,7 @@ xfrm_state_find(xfrm_address_t *daddr, xfrm_address_t *saddr,
1866 + error = security_xfrm_state_alloc_acquire(x, pol->security, fl->secid);
1867 + if (error) {
1868 + x->km.state = XFRM_STATE_DEAD;
1869 +- xfrm_state_put(x);
1870 ++ to_put = x;
1871 + x = NULL;
1872 + goto out;
1873 + }
1874 +@@ -870,7 +872,7 @@ xfrm_state_find(xfrm_address_t *daddr, xfrm_address_t *saddr,
1875 + xfrm_hash_grow_check(x->bydst.next != NULL);
1876 + } else {
1877 + x->km.state = XFRM_STATE_DEAD;
1878 +- xfrm_state_put(x);
1879 ++ to_put = x;
1880 + x = NULL;
1881 + error = -ESRCH;
1882 + }
1883 +@@ -881,6 +883,8 @@ out:
1884 + else
1885 + *err = acquire_in_progress ? -EAGAIN : error;
1886 + spin_unlock_bh(&xfrm_state_lock);
1887 ++ if (to_put)
1888 ++ xfrm_state_put(to_put);
1889 + return x;
1890 + }
1891 +
1892 +@@ -1067,18 +1071,20 @@ static struct xfrm_state *__xfrm_find_acq_byseq(u32 seq);
1893 +
1894 + int xfrm_state_add(struct xfrm_state *x)
1895 + {
1896 +- struct xfrm_state *x1;
1897 ++ struct xfrm_state *x1, *to_put;
1898 + int family;
1899 + int err;
1900 + int use_spi = xfrm_id_proto_match(x->id.proto, IPSEC_PROTO_ANY);
1901 +
1902 + family = x->props.family;
1903 +
1904 ++ to_put = NULL;
1905 ++
1906 + spin_lock_bh(&xfrm_state_lock);
1907 +
1908 + x1 = __xfrm_state_locate(x, use_spi, family);
1909 + if (x1) {
1910 +- xfrm_state_put(x1);
1911 ++ to_put = x1;
1912 + x1 = NULL;
1913 + err = -EEXIST;
1914 + goto out;
1915 +@@ -1088,7 +1094,7 @@ int xfrm_state_add(struct xfrm_state *x)
1916 + x1 = __xfrm_find_acq_byseq(x->km.seq);
1917 + if (x1 && ((x1->id.proto != x->id.proto) ||
1918 + xfrm_addr_cmp(&x1->id.daddr, &x->id.daddr, family))) {
1919 +- xfrm_state_put(x1);
1920 ++ to_put = x1;
1921 + x1 = NULL;
1922 + }
1923 + }
1924 +@@ -1110,6 +1116,9 @@ out:
1925 + xfrm_state_put(x1);
1926 + }
1927 +
1928 ++ if (to_put)
1929 ++ xfrm_state_put(to_put);
1930 ++
1931 + return err;
1932 + }
1933 + EXPORT_SYMBOL(xfrm_state_add);
1934 +@@ -1269,10 +1278,12 @@ EXPORT_SYMBOL(xfrm_state_migrate);
1935 +
1936 + int xfrm_state_update(struct xfrm_state *x)
1937 + {
1938 +- struct xfrm_state *x1;
1939 ++ struct xfrm_state *x1, *to_put;
1940 + int err;
1941 + int use_spi = xfrm_id_proto_match(x->id.proto, IPSEC_PROTO_ANY);
1942 +
1943 ++ to_put = NULL;
1944 ++
1945 + spin_lock_bh(&xfrm_state_lock);
1946 + x1 = __xfrm_state_locate(x, use_spi, x->props.family);
1947 +
1948 +@@ -1281,7 +1292,7 @@ int xfrm_state_update(struct xfrm_state *x)
1949 + goto out;
1950 +
1951 + if (xfrm_state_kern(x1)) {
1952 +- xfrm_state_put(x1);
1953 ++ to_put = x1;
1954 + err = -EEXIST;
1955 + goto out;
1956 + }
1957 +@@ -1295,6 +1306,9 @@ int xfrm_state_update(struct xfrm_state *x)
1958 + out:
1959 + spin_unlock_bh(&xfrm_state_lock);
1960 +
1961 ++ if (to_put)
1962 ++ xfrm_state_put(to_put);
1963 ++
1964 + if (err)
1965 + return err;
1966 +
1967 +diff --git a/sound/pci/oxygen/oxygen_mixer.c b/sound/pci/oxygen/oxygen_mixer.c
1968 +index 6facac5..05eb899 100644
1969 +--- a/sound/pci/oxygen/oxygen_mixer.c
1970 ++++ b/sound/pci/oxygen/oxygen_mixer.c
1971 +@@ -512,9 +512,12 @@ static int ac97_switch_get(struct snd_kcontrol *ctl,
1972 +
1973 + static void mute_ac97_ctl(struct oxygen *chip, unsigned int control)
1974 + {
1975 +- unsigned int priv_idx = chip->controls[control]->private_value & 0xff;
1976 ++ unsigned int priv_idx;
1977 + u16 value;
1978 +
1979 ++ if (!chip->controls[control])
1980 ++ return;
1981 ++ priv_idx = chip->controls[control]->private_value & 0xff;
1982 + value = oxygen_read_ac97(chip, 0, priv_idx);
1983 + if (!(value & 0x8000)) {
1984 + oxygen_write_ac97(chip, 0, priv_idx, value | 0x8000);