Gentoo Archives: gentoo-commits

From: "Mike Pagano (mpagano)" <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] linux-patches r1692 - genpatches-2.6/trunk/2.6.32
Date: Fri, 02 Apr 2010 12:00:29
Message-Id: E1NxfXq-0005Q7-RQ@stork.gentoo.org
1 Author: mpagano
2 Date: 2010-04-02 12:00:14 +0000 (Fri, 02 Apr 2010)
3 New Revision: 1692
4
5 Added:
6 genpatches-2.6/trunk/2.6.32/1010_linux-2.6.32.11.patch
7 Removed:
8 genpatches-2.6/trunk/2.6.32/2400_5906-transmit-hang-fix.patch
9 Modified:
10 genpatches-2.6/trunk/2.6.32/0000_README
11 Log:
12 Adding patch 2.6.32.11 and removal of redundant patch for Broadcom 5906.
13
14 Modified: genpatches-2.6/trunk/2.6.32/0000_README
15 ===================================================================
16 --- genpatches-2.6/trunk/2.6.32/0000_README 2010-04-01 19:40:34 UTC (rev 1691)
17 +++ genpatches-2.6/trunk/2.6.32/0000_README 2010-04-02 12:00:14 UTC (rev 1692)
18 @@ -79,9 +79,9 @@
19 From: http://www.kernel.org
20 Desc: Linux 2.6.32.10
21
22 -Patch: 2400_5906-transmit-hang-fix.patch
23 -From: http://bugs.gentoo.org/show_bug.cgi?id=304625
24 -Desc: tg3: Fix 5906 transmit hangs
25 +Patch: 1010-2.6.32.11.patch
26 +From: http://www.kernel.org
27 +Desc: Linux 2.6.32.11
28
29 Patch: 2500_libata-fix-truncated-LBA48-ret-vals.patch
30 From: http://bugs.gentoo.org/show_bug.cgi?id=303313
31
32 Added: genpatches-2.6/trunk/2.6.32/1010_linux-2.6.32.11.patch
33 ===================================================================
34 --- genpatches-2.6/trunk/2.6.32/1010_linux-2.6.32.11.patch (rev 0)
35 +++ genpatches-2.6/trunk/2.6.32/1010_linux-2.6.32.11.patch 2010-04-02 12:00:14 UTC (rev 1692)
36 @@ -0,0 +1,4709 @@
37 +diff --git a/Documentation/filesystems/tmpfs.txt b/Documentation/filesystems/tmpfs.txt
38 +index 3015da0..fe09a2c 100644
39 +--- a/Documentation/filesystems/tmpfs.txt
40 ++++ b/Documentation/filesystems/tmpfs.txt
41 +@@ -82,11 +82,13 @@ tmpfs has a mount option to set the NUMA memory allocation policy for
42 + all files in that instance (if CONFIG_NUMA is enabled) - which can be
43 + adjusted on the fly via 'mount -o remount ...'
44 +
45 +-mpol=default prefers to allocate memory from the local node
46 ++mpol=default use the process allocation policy
47 ++ (see set_mempolicy(2))
48 + mpol=prefer:Node prefers to allocate memory from the given Node
49 + mpol=bind:NodeList allocates memory only from nodes in NodeList
50 + mpol=interleave prefers to allocate from each node in turn
51 + mpol=interleave:NodeList allocates from each node of NodeList in turn
52 ++mpol=local prefers to allocate memory from the local node
53 +
54 + NodeList format is a comma-separated list of decimal numbers and ranges,
55 + a range being two hyphen-separated decimal numbers, the smallest and
56 +@@ -134,3 +136,5 @@ Author:
57 + Christoph Rohland <cr@×××.com>, 1.12.01
58 + Updated:
59 + Hugh Dickins, 4 June 2007
60 ++Updated:
61 ++ KOSAKI Motohiro, 16 Mar 2010
62 +diff --git a/arch/arm/boot/compressed/head.S b/arch/arm/boot/compressed/head.S
63 +index fa6fbf4..3cb8fa3 100644
64 +--- a/arch/arm/boot/compressed/head.S
65 ++++ b/arch/arm/boot/compressed/head.S
66 +@@ -162,8 +162,8 @@ not_angel:
67 +
68 + .text
69 + adr r0, LC0
70 +- ARM( ldmia r0, {r1, r2, r3, r4, r5, r6, ip, sp} )
71 +- THUMB( ldmia r0, {r1, r2, r3, r4, r5, r6, ip} )
72 ++ ARM( ldmia r0, {r1, r2, r3, r4, r5, r6, r11, ip, sp})
73 ++ THUMB( ldmia r0, {r1, r2, r3, r4, r5, r6, r11, ip} )
74 + THUMB( ldr sp, [r0, #28] )
75 + subs r0, r0, r1 @ calculate the delta offset
76 +
77 +@@ -174,12 +174,13 @@ not_angel:
78 + /*
79 + * We're running at a different address. We need to fix
80 + * up various pointers:
81 +- * r5 - zImage base address
82 +- * r6 - GOT start
83 ++ * r5 - zImage base address (_start)
84 ++ * r6 - size of decompressed image
85 ++ * r11 - GOT start
86 + * ip - GOT end
87 + */
88 + add r5, r5, r0
89 +- add r6, r6, r0
90 ++ add r11, r11, r0
91 + add ip, ip, r0
92 +
93 + #ifndef CONFIG_ZBOOT_ROM
94 +@@ -197,10 +198,10 @@ not_angel:
95 + /*
96 + * Relocate all entries in the GOT table.
97 + */
98 +-1: ldr r1, [r6, #0] @ relocate entries in the GOT
99 ++1: ldr r1, [r11, #0] @ relocate entries in the GOT
100 + add r1, r1, r0 @ table. This fixes up the
101 +- str r1, [r6], #4 @ C references.
102 +- cmp r6, ip
103 ++ str r1, [r11], #4 @ C references.
104 ++ cmp r11, ip
105 + blo 1b
106 + #else
107 +
108 +@@ -208,12 +209,12 @@ not_angel:
109 + * Relocate entries in the GOT table. We only relocate
110 + * the entries that are outside the (relocated) BSS region.
111 + */
112 +-1: ldr r1, [r6, #0] @ relocate entries in the GOT
113 ++1: ldr r1, [r11, #0] @ relocate entries in the GOT
114 + cmp r1, r2 @ entry < bss_start ||
115 + cmphs r3, r1 @ _end < entry
116 + addlo r1, r1, r0 @ table. This fixes up the
117 +- str r1, [r6], #4 @ C references.
118 +- cmp r6, ip
119 ++ str r1, [r11], #4 @ C references.
120 ++ cmp r11, ip
121 + blo 1b
122 + #endif
123 +
124 +@@ -239,6 +240,7 @@ not_relocated: mov r0, #0
125 + * Check to see if we will overwrite ourselves.
126 + * r4 = final kernel address
127 + * r5 = start of this image
128 ++ * r6 = size of decompressed image
129 + * r2 = end of malloc space (and therefore this image)
130 + * We basically want:
131 + * r4 >= r2 -> OK
132 +@@ -246,8 +248,7 @@ not_relocated: mov r0, #0
133 + */
134 + cmp r4, r2
135 + bhs wont_overwrite
136 +- sub r3, sp, r5 @ > compressed kernel size
137 +- add r0, r4, r3, lsl #2 @ allow for 4x expansion
138 ++ add r0, r4, r6
139 + cmp r0, r5
140 + bls wont_overwrite
141 +
142 +@@ -263,7 +264,6 @@ not_relocated: mov r0, #0
143 + * r1-r3 = unused
144 + * r4 = kernel execution address
145 + * r5 = decompressed kernel start
146 +- * r6 = processor ID
147 + * r7 = architecture ID
148 + * r8 = atags pointer
149 + * r9-r12,r14 = corrupted
150 +@@ -304,7 +304,8 @@ LC0: .word LC0 @ r1
151 + .word _end @ r3
152 + .word zreladdr @ r4
153 + .word _start @ r5
154 +- .word _got_start @ r6
155 ++ .word _image_size @ r6
156 ++ .word _got_start @ r11
157 + .word _got_end @ ip
158 + .word user_stack+4096 @ sp
159 + LC1: .word reloc_end - reloc_start
160 +@@ -328,7 +329,6 @@ params: ldr r0, =params_phys
161 + *
162 + * On entry,
163 + * r4 = kernel execution address
164 +- * r6 = processor ID
165 + * r7 = architecture number
166 + * r8 = atags pointer
167 + * r9 = run-time address of "start" (???)
168 +@@ -534,7 +534,6 @@ __common_mmu_cache_on:
169 + * r1-r3 = unused
170 + * r4 = kernel execution address
171 + * r5 = decompressed kernel start
172 +- * r6 = processor ID
173 + * r7 = architecture ID
174 + * r8 = atags pointer
175 + * r9-r12,r14 = corrupted
176 +@@ -573,19 +572,19 @@ call_kernel: bl cache_clean_flush
177 + * r1 = corrupted
178 + * r2 = corrupted
179 + * r3 = block offset
180 +- * r6 = corrupted
181 ++ * r9 = corrupted
182 + * r12 = corrupted
183 + */
184 +
185 + call_cache_fn: adr r12, proc_types
186 + #ifdef CONFIG_CPU_CP15
187 +- mrc p15, 0, r6, c0, c0 @ get processor ID
188 ++ mrc p15, 0, r9, c0, c0 @ get processor ID
189 + #else
190 +- ldr r6, =CONFIG_PROCESSOR_ID
191 ++ ldr r9, =CONFIG_PROCESSOR_ID
192 + #endif
193 + 1: ldr r1, [r12, #0] @ get value
194 + ldr r2, [r12, #4] @ get mask
195 +- eor r1, r1, r6 @ (real ^ match)
196 ++ eor r1, r1, r9 @ (real ^ match)
197 + tst r1, r2 @ & mask
198 + ARM( addeq pc, r12, r3 ) @ call cache function
199 + THUMB( addeq r12, r3 )
200 +@@ -764,8 +763,7 @@ proc_types:
201 + * Turn off the Cache and MMU. ARMv3 does not support
202 + * reading the control register, but ARMv4 does.
203 + *
204 +- * On entry, r6 = processor ID
205 +- * On exit, r0, r1, r2, r3, r12 corrupted
206 ++ * On exit, r0, r1, r2, r3, r9, r12 corrupted
207 + * This routine must preserve: r4, r6, r7
208 + */
209 + .align 5
210 +@@ -838,10 +836,8 @@ __armv3_mmu_cache_off:
211 + /*
212 + * Clean and flush the cache to maintain consistency.
213 + *
214 +- * On entry,
215 +- * r6 = processor ID
216 + * On exit,
217 +- * r1, r2, r3, r11, r12 corrupted
218 ++ * r1, r2, r3, r9, r11, r12 corrupted
219 + * This routine must preserve:
220 + * r0, r4, r5, r6, r7
221 + */
222 +@@ -953,7 +949,7 @@ __armv4_mmu_cache_flush:
223 + mov r2, #64*1024 @ default: 32K dcache size (*2)
224 + mov r11, #32 @ default: 32 byte line size
225 + mrc p15, 0, r3, c0, c0, 1 @ read cache type
226 +- teq r3, r6 @ cache ID register present?
227 ++ teq r3, r9 @ cache ID register present?
228 + beq no_cache_id
229 + mov r1, r3, lsr #18
230 + and r1, r1, #7
231 +diff --git a/arch/arm/boot/compressed/vmlinux.lds.in b/arch/arm/boot/compressed/vmlinux.lds.in
232 +index a5924b9..cbed030 100644
233 +--- a/arch/arm/boot/compressed/vmlinux.lds.in
234 ++++ b/arch/arm/boot/compressed/vmlinux.lds.in
235 +@@ -36,6 +36,9 @@ SECTIONS
236 +
237 + _etext = .;
238 +
239 ++ /* Assume size of decompressed image is 4x the compressed image */
240 ++ _image_size = (_etext - _text) * 4;
241 ++
242 + _got_start = .;
243 + .got : { *(.got) }
244 + _got_end = .;
245 +diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c
246 +index bb1719a..5adba4f 100644
247 +--- a/arch/mips/mm/tlbex.c
248 ++++ b/arch/mips/mm/tlbex.c
249 +@@ -73,9 +73,6 @@ static int __cpuinit m4kc_tlbp_war(void)
250 + enum label_id {
251 + label_second_part = 1,
252 + label_leave,
253 +-#ifdef MODULE_START
254 +- label_module_alloc,
255 +-#endif
256 + label_vmalloc,
257 + label_vmalloc_done,
258 + label_tlbw_hazard,
259 +@@ -92,9 +89,6 @@ enum label_id {
260 +
261 + UASM_L_LA(_second_part)
262 + UASM_L_LA(_leave)
263 +-#ifdef MODULE_START
264 +-UASM_L_LA(_module_alloc)
265 +-#endif
266 + UASM_L_LA(_vmalloc)
267 + UASM_L_LA(_vmalloc_done)
268 + UASM_L_LA(_tlbw_hazard)
269 +@@ -802,8 +796,6 @@ static void __cpuinit build_r4000_tlb_refill_handler(void)
270 + } else {
271 + #if defined(CONFIG_HUGETLB_PAGE)
272 + const enum label_id ls = label_tlb_huge_update;
273 +-#elif defined(MODULE_START)
274 +- const enum label_id ls = label_module_alloc;
275 + #else
276 + const enum label_id ls = label_vmalloc;
277 + #endif
278 +diff --git a/arch/sh/boot/compressed/misc.c b/arch/sh/boot/compressed/misc.c
279 +index fd56a71..974ba71 100644
280 +--- a/arch/sh/boot/compressed/misc.c
281 ++++ b/arch/sh/boot/compressed/misc.c
282 +@@ -132,7 +132,7 @@ void decompress_kernel(void)
283 + output_addr = (CONFIG_MEMORY_START + 0x2000);
284 + #else
285 + output_addr = PHYSADDR((unsigned long)&_text+PAGE_SIZE);
286 +-#ifdef CONFIG_29BIT
287 ++#if defined(CONFIG_29BIT) || defined(CONFIG_PMB_FIXED)
288 + output_addr |= P2SEG;
289 + #endif
290 + #endif
291 +diff --git a/arch/sparc/prom/p1275.c b/arch/sparc/prom/p1275.c
292 +index 4b7c937..815cab6 100644
293 +--- a/arch/sparc/prom/p1275.c
294 ++++ b/arch/sparc/prom/p1275.c
295 +@@ -32,8 +32,7 @@ extern void prom_cif_interface(void);
296 + extern void prom_cif_callback(void);
297 +
298 + /*
299 +- * This provides SMP safety on the p1275buf. prom_callback() drops this lock
300 +- * to allow recursuve acquisition.
301 ++ * This provides SMP safety on the p1275buf.
302 + */
303 + DEFINE_SPINLOCK(prom_entry_lock);
304 +
305 +@@ -47,7 +46,9 @@ long p1275_cmd(const char *service, long fmt, ...)
306 +
307 + p = p1275buf.prom_buffer;
308 +
309 +- spin_lock_irqsave(&prom_entry_lock, flags);
310 ++ raw_local_save_flags(flags);
311 ++ raw_local_irq_restore(PIL_NMI);
312 ++ spin_lock(&prom_entry_lock);
313 +
314 + p1275buf.prom_args[0] = (unsigned long)p; /* service */
315 + strcpy (p, service);
316 +@@ -139,7 +140,8 @@ long p1275_cmd(const char *service, long fmt, ...)
317 + va_end(list);
318 + x = p1275buf.prom_args [nargs + 3];
319 +
320 +- spin_unlock_irqrestore(&prom_entry_lock, flags);
321 ++ spin_unlock(&prom_entry_lock);
322 ++ raw_local_irq_restore(flags);
323 +
324 + return x;
325 + }
326 +diff --git a/arch/x86/include/asm/fixmap.h b/arch/x86/include/asm/fixmap.h
327 +index 14f9890..c22a164 100644
328 +--- a/arch/x86/include/asm/fixmap.h
329 ++++ b/arch/x86/include/asm/fixmap.h
330 +@@ -82,6 +82,9 @@ enum fixed_addresses {
331 + #endif
332 + FIX_DBGP_BASE,
333 + FIX_EARLYCON_MEM_BASE,
334 ++#ifdef CONFIG_PROVIDE_OHCI1394_DMA_INIT
335 ++ FIX_OHCI1394_BASE,
336 ++#endif
337 + #ifdef CONFIG_X86_LOCAL_APIC
338 + FIX_APIC_BASE, /* local (CPU) APIC) -- required for SMP or not */
339 + #endif
340 +@@ -126,9 +129,6 @@ enum fixed_addresses {
341 + FIX_BTMAP_END = __end_of_permanent_fixed_addresses + 256 -
342 + (__end_of_permanent_fixed_addresses & 255),
343 + FIX_BTMAP_BEGIN = FIX_BTMAP_END + NR_FIX_BTMAPS*FIX_BTMAPS_SLOTS - 1,
344 +-#ifdef CONFIG_PROVIDE_OHCI1394_DMA_INIT
345 +- FIX_OHCI1394_BASE,
346 +-#endif
347 + #ifdef CONFIG_X86_32
348 + FIX_WP_TEST,
349 + #endif
350 +diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
351 +index 4ffe09b..8cb8489 100644
352 +--- a/arch/x86/include/asm/msr-index.h
353 ++++ b/arch/x86/include/asm/msr-index.h
354 +@@ -104,6 +104,8 @@
355 + #define MSR_AMD64_PATCH_LEVEL 0x0000008b
356 + #define MSR_AMD64_NB_CFG 0xc001001f
357 + #define MSR_AMD64_PATCH_LOADER 0xc0010020
358 ++#define MSR_AMD64_OSVW_ID_LENGTH 0xc0010140
359 ++#define MSR_AMD64_OSVW_STATUS 0xc0010141
360 + #define MSR_AMD64_IBSFETCHCTL 0xc0011030
361 + #define MSR_AMD64_IBSFETCHLINAD 0xc0011031
362 + #define MSR_AMD64_IBSFETCHPHYSAD 0xc0011032
363 +diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
364 +index 195e4b7..23c2da8 100644
365 +--- a/arch/x86/kernel/acpi/boot.c
366 ++++ b/arch/x86/kernel/acpi/boot.c
367 +@@ -1191,9 +1191,6 @@ static void __init acpi_process_madt(void)
368 + if (!error) {
369 + acpi_lapic = 1;
370 +
371 +-#ifdef CONFIG_X86_BIGSMP
372 +- generic_bigsmp_probe();
373 +-#endif
374 + /*
375 + * Parse MADT IO-APIC entries
376 + */
377 +@@ -1203,8 +1200,6 @@ static void __init acpi_process_madt(void)
378 + acpi_ioapic = 1;
379 +
380 + smp_found_config = 1;
381 +- if (apic->setup_apic_routing)
382 +- apic->setup_apic_routing();
383 + }
384 + }
385 + if (error == -EINVAL) {
386 +diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
387 +index c86dbcf..0e69e17 100644
388 +--- a/arch/x86/kernel/apic/apic.c
389 ++++ b/arch/x86/kernel/apic/apic.c
390 +@@ -1665,9 +1665,7 @@ int __init APIC_init_uniprocessor(void)
391 + #endif
392 +
393 + enable_IR_x2apic();
394 +-#ifdef CONFIG_X86_64
395 + default_setup_apic_routing();
396 +-#endif
397 +
398 + verify_local_APIC();
399 + connect_bsp_APIC();
400 +@@ -1915,18 +1913,6 @@ void __cpuinit generic_processor_info(int apicid, int version)
401 + if (apicid > max_physical_apicid)
402 + max_physical_apicid = apicid;
403 +
404 +-#ifdef CONFIG_X86_32
405 +- switch (boot_cpu_data.x86_vendor) {
406 +- case X86_VENDOR_INTEL:
407 +- if (num_processors > 8)
408 +- def_to_bigsmp = 1;
409 +- break;
410 +- case X86_VENDOR_AMD:
411 +- if (max_physical_apicid >= 8)
412 +- def_to_bigsmp = 1;
413 +- }
414 +-#endif
415 +-
416 + #if defined(CONFIG_SMP) || defined(CONFIG_X86_64)
417 + early_per_cpu(x86_cpu_to_apicid, cpu) = apicid;
418 + early_per_cpu(x86_bios_cpu_apicid, cpu) = apicid;
419 +diff --git a/arch/x86/kernel/apic/probe_32.c b/arch/x86/kernel/apic/probe_32.c
420 +index 0c0182c..88b9d22 100644
421 +--- a/arch/x86/kernel/apic/probe_32.c
422 ++++ b/arch/x86/kernel/apic/probe_32.c
423 +@@ -54,6 +54,31 @@ late_initcall(print_ipi_mode);
424 +
425 + void default_setup_apic_routing(void)
426 + {
427 ++ int version = apic_version[boot_cpu_physical_apicid];
428 ++
429 ++ if (num_possible_cpus() > 8) {
430 ++ switch (boot_cpu_data.x86_vendor) {
431 ++ case X86_VENDOR_INTEL:
432 ++ if (!APIC_XAPIC(version)) {
433 ++ def_to_bigsmp = 0;
434 ++ break;
435 ++ }
436 ++ /* If P4 and above fall through */
437 ++ case X86_VENDOR_AMD:
438 ++ def_to_bigsmp = 1;
439 ++ }
440 ++ }
441 ++
442 ++#ifdef CONFIG_X86_BIGSMP
443 ++ generic_bigsmp_probe();
444 ++#endif
445 ++
446 ++ if (apic->setup_apic_routing)
447 ++ apic->setup_apic_routing();
448 ++}
449 ++
450 ++void setup_apic_flat_routing(void)
451 ++{
452 + #ifdef CONFIG_X86_IO_APIC
453 + printk(KERN_INFO
454 + "Enabling APIC mode: Flat. Using %d I/O APICs\n",
455 +@@ -103,7 +128,7 @@ struct apic apic_default = {
456 + .init_apic_ldr = default_init_apic_ldr,
457 +
458 + .ioapic_phys_id_map = default_ioapic_phys_id_map,
459 +- .setup_apic_routing = default_setup_apic_routing,
460 ++ .setup_apic_routing = setup_apic_flat_routing,
461 + .multi_timer_check = NULL,
462 + .apicid_to_node = default_apicid_to_node,
463 + .cpu_to_logical_apicid = default_cpu_to_logical_apicid,
464 +diff --git a/arch/x86/kernel/apic/probe_64.c b/arch/x86/kernel/apic/probe_64.c
465 +index c4cbd30..4c56f54 100644
466 +--- a/arch/x86/kernel/apic/probe_64.c
467 ++++ b/arch/x86/kernel/apic/probe_64.c
468 +@@ -67,17 +67,8 @@ void __init default_setup_apic_routing(void)
469 + }
470 + #endif
471 +
472 +- if (apic == &apic_flat) {
473 +- switch (boot_cpu_data.x86_vendor) {
474 +- case X86_VENDOR_INTEL:
475 +- if (num_processors > 8)
476 +- apic = &apic_physflat;
477 +- break;
478 +- case X86_VENDOR_AMD:
479 +- if (max_physical_apicid >= 8)
480 +- apic = &apic_physflat;
481 +- }
482 +- }
483 ++ if (apic == &apic_flat && num_possible_cpus() > 8)
484 ++ apic = &apic_physflat;
485 +
486 + printk(KERN_INFO "Setting APIC routing to %s\n", apic->name);
487 +
488 +diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
489 +index a2a03cf..2a94890 100644
490 +--- a/arch/x86/kernel/cpu/intel.c
491 ++++ b/arch/x86/kernel/cpu/intel.c
492 +@@ -70,7 +70,8 @@ static void __cpuinit early_init_intel(struct cpuinfo_x86 *c)
493 + if (c->x86_power & (1 << 8)) {
494 + set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
495 + set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC);
496 +- sched_clock_stable = 1;
497 ++ if (!check_tsc_unstable())
498 ++ sched_clock_stable = 1;
499 + }
500 +
501 + /*
502 +diff --git a/arch/x86/kernel/mpparse.c b/arch/x86/kernel/mpparse.c
503 +index 5be95ef..e07bc4e 100644
504 +--- a/arch/x86/kernel/mpparse.c
505 ++++ b/arch/x86/kernel/mpparse.c
506 +@@ -359,13 +359,6 @@ static int __init smp_read_mpc(struct mpc_table *mpc, unsigned early)
507 + x86_init.mpparse.mpc_record(1);
508 + }
509 +
510 +-#ifdef CONFIG_X86_BIGSMP
511 +- generic_bigsmp_probe();
512 +-#endif
513 +-
514 +- if (apic->setup_apic_routing)
515 +- apic->setup_apic_routing();
516 +-
517 + if (!num_processors)
518 + printk(KERN_ERR "MPTABLE: no processors registered!\n");
519 + return num_processors;
520 +diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
521 +index f010ab4..d0ba107 100644
522 +--- a/arch/x86/kernel/process.c
523 ++++ b/arch/x86/kernel/process.c
524 +@@ -439,21 +439,37 @@ static int __cpuinit mwait_usable(const struct cpuinfo_x86 *c)
525 + }
526 +
527 + /*
528 +- * Check for AMD CPUs, which have potentially C1E support
529 ++ * Check for AMD CPUs, where APIC timer interrupt does not wake up CPU from C1e.
530 ++ * For more information see
531 ++ * - Erratum #400 for NPT family 0xf and family 0x10 CPUs
532 ++ * - Erratum #365 for family 0x11 (not affected because C1e not in use)
533 + */
534 + static int __cpuinit check_c1e_idle(const struct cpuinfo_x86 *c)
535 + {
536 ++ u64 val;
537 + if (c->x86_vendor != X86_VENDOR_AMD)
538 +- return 0;
539 +-
540 +- if (c->x86 < 0x0F)
541 +- return 0;
542 ++ goto no_c1e_idle;
543 +
544 + /* Family 0x0f models < rev F do not have C1E */
545 +- if (c->x86 == 0x0f && c->x86_model < 0x40)
546 +- return 0;
547 ++ if (c->x86 == 0x0F && c->x86_model >= 0x40)
548 ++ return 1;
549 +
550 +- return 1;
551 ++ if (c->x86 == 0x10) {
552 ++ /*
553 ++ * check OSVW bit for CPUs that are not affected
554 ++ * by erratum #400
555 ++ */
556 ++ rdmsrl(MSR_AMD64_OSVW_ID_LENGTH, val);
557 ++ if (val >= 2) {
558 ++ rdmsrl(MSR_AMD64_OSVW_STATUS, val);
559 ++ if (!(val & BIT(1)))
560 ++ goto no_c1e_idle;
561 ++ }
562 ++ return 1;
563 ++ }
564 ++
565 ++no_c1e_idle:
566 ++ return 0;
567 + }
568 +
569 + static cpumask_var_t c1e_mask;
570 +diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
571 +index f9ce04f..6eabe90 100644
572 +--- a/arch/x86/kernel/process_64.c
573 ++++ b/arch/x86/kernel/process_64.c
574 +@@ -546,6 +546,7 @@ void set_personality_ia32(void)
575 +
576 + /* Make sure to be in 32bit mode */
577 + set_thread_flag(TIF_IA32);
578 ++ current->personality |= force_personality32;
579 +
580 + /* Prepare the first "return" to user space */
581 + current_thread_info()->status |= TS_COMPAT;
582 +diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
583 +index 565ebc6..28e963d 100644
584 +--- a/arch/x86/kernel/smpboot.c
585 ++++ b/arch/x86/kernel/smpboot.c
586 +@@ -1066,9 +1066,7 @@ void __init native_smp_prepare_cpus(unsigned int max_cpus)
587 + set_cpu_sibling_map(0);
588 +
589 + enable_IR_x2apic();
590 +-#ifdef CONFIG_X86_64
591 + default_setup_apic_routing();
592 +-#endif
593 +
594 + if (smp_sanity_check(max_cpus) < 0) {
595 + printk(KERN_INFO "SMP disabled\n");
596 +diff --git a/block/blk-settings.c b/block/blk-settings.c
597 +index d5aa886..9651c0a 100644
598 +--- a/block/blk-settings.c
599 ++++ b/block/blk-settings.c
600 +@@ -8,6 +8,7 @@
601 + #include <linux/blkdev.h>
602 + #include <linux/bootmem.h> /* for max_pfn/max_low_pfn */
603 + #include <linux/gcd.h>
604 ++#include <linux/lcm.h>
605 +
606 + #include "blk.h"
607 +
608 +@@ -490,18 +491,31 @@ EXPORT_SYMBOL(blk_queue_stack_limits);
609 +
610 + /**
611 + * blk_stack_limits - adjust queue_limits for stacked devices
612 +- * @t: the stacking driver limits (top)
613 +- * @b: the underlying queue limits (bottom)
614 ++ * @t: the stacking driver limits (top device)
615 ++ * @b: the underlying queue limits (bottom, component device)
616 + * @offset: offset to beginning of data within component device
617 + *
618 + * Description:
619 +- * Merges two queue_limit structs. Returns 0 if alignment didn't
620 +- * change. Returns -1 if adding the bottom device caused
621 +- * misalignment.
622 ++ * This function is used by stacking drivers like MD and DM to ensure
623 ++ * that all component devices have compatible block sizes and
624 ++ * alignments. The stacking driver must provide a queue_limits
625 ++ * struct (top) and then iteratively call the stacking function for
626 ++ * all component (bottom) devices. The stacking function will
627 ++ * attempt to combine the values and ensure proper alignment.
628 ++ *
629 ++ * Returns 0 if the top and bottom queue_limits are compatible. The
630 ++ * top device's block sizes and alignment offsets may be adjusted to
631 ++ * ensure alignment with the bottom device. If no compatible sizes
632 ++ * and alignments exist, -1 is returned and the resulting top
633 ++ * queue_limits will have the misaligned flag set to indicate that
634 ++ * the alignment_offset is undefined.
635 + */
636 + int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
637 + sector_t offset)
638 + {
639 ++ sector_t alignment;
640 ++ unsigned int top, bottom, ret = 0;
641 ++
642 + t->max_sectors = min_not_zero(t->max_sectors, b->max_sectors);
643 + t->max_hw_sectors = min_not_zero(t->max_hw_sectors, b->max_hw_sectors);
644 + t->bounce_pfn = min_not_zero(t->bounce_pfn, b->bounce_pfn);
645 +@@ -518,6 +532,26 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
646 + t->max_segment_size = min_not_zero(t->max_segment_size,
647 + b->max_segment_size);
648 +
649 ++ t->misaligned |= b->misaligned;
650 ++
651 ++ alignment = queue_limit_alignment_offset(b, offset);
652 ++
653 ++ /* Bottom device has different alignment. Check that it is
654 ++ * compatible with the current top alignment.
655 ++ */
656 ++ if (t->alignment_offset != alignment) {
657 ++
658 ++ top = max(t->physical_block_size, t->io_min)
659 ++ + t->alignment_offset;
660 ++ bottom = max(b->physical_block_size, b->io_min) + alignment;
661 ++
662 ++ /* Verify that top and bottom intervals line up */
663 ++ if (max(top, bottom) & (min(top, bottom) - 1)) {
664 ++ t->misaligned = 1;
665 ++ ret = -1;
666 ++ }
667 ++ }
668 ++
669 + t->logical_block_size = max(t->logical_block_size,
670 + b->logical_block_size);
671 +
672 +@@ -525,37 +559,46 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
673 + b->physical_block_size);
674 +
675 + t->io_min = max(t->io_min, b->io_min);
676 ++ t->io_opt = lcm(t->io_opt, b->io_opt);
677 ++
678 + t->no_cluster |= b->no_cluster;
679 +
680 +- /* Bottom device offset aligned? */
681 +- if (offset &&
682 +- (offset & (b->physical_block_size - 1)) != b->alignment_offset) {
683 ++ /* Physical block size a multiple of the logical block size? */
684 ++ if (t->physical_block_size & (t->logical_block_size - 1)) {
685 ++ t->physical_block_size = t->logical_block_size;
686 + t->misaligned = 1;
687 +- return -1;
688 ++ ret = -1;
689 + }
690 +
691 +- /* If top has no alignment offset, inherit from bottom */
692 +- if (!t->alignment_offset)
693 +- t->alignment_offset =
694 +- b->alignment_offset & (b->physical_block_size - 1);
695 ++ /* Minimum I/O a multiple of the physical block size? */
696 ++ if (t->io_min & (t->physical_block_size - 1)) {
697 ++ t->io_min = t->physical_block_size;
698 ++ t->misaligned = 1;
699 ++ ret = -1;
700 ++ }
701 +
702 +- /* Top device aligned on logical block boundary? */
703 +- if (t->alignment_offset & (t->logical_block_size - 1)) {
704 ++ /* Optimal I/O a multiple of the physical block size? */
705 ++ if (t->io_opt & (t->physical_block_size - 1)) {
706 ++ t->io_opt = 0;
707 + t->misaligned = 1;
708 +- return -1;
709 ++ ret = -1;
710 + }
711 +
712 +- /* Find lcm() of optimal I/O size */
713 +- if (t->io_opt && b->io_opt)
714 +- t->io_opt = (t->io_opt * b->io_opt) / gcd(t->io_opt, b->io_opt);
715 +- else if (b->io_opt)
716 +- t->io_opt = b->io_opt;
717 ++ /* Find lowest common alignment_offset */
718 ++ t->alignment_offset = lcm(t->alignment_offset, alignment)
719 ++ & (max(t->physical_block_size, t->io_min) - 1);
720 +
721 +- /* Verify that optimal I/O size is a multiple of io_min */
722 +- if (t->io_min && t->io_opt % t->io_min)
723 +- return -1;
724 ++ /* Verify that new alignment_offset is on a logical block boundary */
725 ++ if (t->alignment_offset & (t->logical_block_size - 1)) {
726 ++ t->misaligned = 1;
727 ++ ret = -1;
728 ++ }
729 +
730 +- return 0;
731 ++ /* Discard */
732 ++ t->max_discard_sectors = min_not_zero(t->max_discard_sectors,
733 ++ b->max_discard_sectors);
734 ++
735 ++ return ret;
736 + }
737 + EXPORT_SYMBOL(blk_stack_limits);
738 +
739 +diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
740 +index a587046..2c53024 100644
741 +--- a/drivers/ata/ahci.c
742 ++++ b/drivers/ata/ahci.c
743 +@@ -2831,6 +2831,14 @@ static bool ahci_broken_suspend(struct pci_dev *pdev)
744 + * On HP dv[4-6] and HDX18 with earlier BIOSen, link
745 + * to the harddisk doesn't become online after
746 + * resuming from STR. Warn and fail suspend.
747 ++ *
748 ++ * http://bugzilla.kernel.org/show_bug.cgi?id=12276
749 ++ *
750 ++ * Use dates instead of versions to match as HP is
751 ++ * apparently recycling both product and version
752 ++ * strings.
753 ++ *
754 ++ * http://bugzilla.kernel.org/show_bug.cgi?id=15462
755 + */
756 + {
757 + .ident = "dv4",
758 +@@ -2839,7 +2847,7 @@ static bool ahci_broken_suspend(struct pci_dev *pdev)
759 + DMI_MATCH(DMI_PRODUCT_NAME,
760 + "HP Pavilion dv4 Notebook PC"),
761 + },
762 +- .driver_data = "F.30", /* cutoff BIOS version */
763 ++ .driver_data = "20090105", /* F.30 */
764 + },
765 + {
766 + .ident = "dv5",
767 +@@ -2848,7 +2856,7 @@ static bool ahci_broken_suspend(struct pci_dev *pdev)
768 + DMI_MATCH(DMI_PRODUCT_NAME,
769 + "HP Pavilion dv5 Notebook PC"),
770 + },
771 +- .driver_data = "F.16", /* cutoff BIOS version */
772 ++ .driver_data = "20090506", /* F.16 */
773 + },
774 + {
775 + .ident = "dv6",
776 +@@ -2857,7 +2865,7 @@ static bool ahci_broken_suspend(struct pci_dev *pdev)
777 + DMI_MATCH(DMI_PRODUCT_NAME,
778 + "HP Pavilion dv6 Notebook PC"),
779 + },
780 +- .driver_data = "F.21", /* cutoff BIOS version */
781 ++ .driver_data = "20090423", /* F.21 */
782 + },
783 + {
784 + .ident = "HDX18",
785 +@@ -2866,7 +2874,7 @@ static bool ahci_broken_suspend(struct pci_dev *pdev)
786 + DMI_MATCH(DMI_PRODUCT_NAME,
787 + "HP HDX18 Notebook PC"),
788 + },
789 +- .driver_data = "F.23", /* cutoff BIOS version */
790 ++ .driver_data = "20090430", /* F.23 */
791 + },
792 + /*
793 + * Acer eMachines G725 has the same problem. BIOS
794 +@@ -2874,6 +2882,8 @@ static bool ahci_broken_suspend(struct pci_dev *pdev)
795 + * work. Inbetween, there are V1.06, V2.06 and V3.03
796 + * that we don't have much idea about. For now,
797 + * blacklist anything older than V3.04.
798 ++ *
799 ++ * http://bugzilla.kernel.org/show_bug.cgi?id=15104
800 + */
801 + {
802 + .ident = "G725",
803 +@@ -2881,19 +2891,21 @@ static bool ahci_broken_suspend(struct pci_dev *pdev)
804 + DMI_MATCH(DMI_SYS_VENDOR, "eMachines"),
805 + DMI_MATCH(DMI_PRODUCT_NAME, "eMachines G725"),
806 + },
807 +- .driver_data = "V3.04", /* cutoff BIOS version */
808 ++ .driver_data = "20091216", /* V3.04 */
809 + },
810 + { } /* terminate list */
811 + };
812 + const struct dmi_system_id *dmi = dmi_first_match(sysids);
813 +- const char *ver;
814 ++ int year, month, date;
815 ++ char buf[9];
816 +
817 + if (!dmi || pdev->bus->number || pdev->devfn != PCI_DEVFN(0x1f, 2))
818 + return false;
819 +
820 +- ver = dmi_get_system_info(DMI_BIOS_VERSION);
821 ++ dmi_get_date(DMI_BIOS_DATE, &year, &month, &date);
822 ++ snprintf(buf, sizeof(buf), "%04d%02d%02d", year, month, date);
823 +
824 +- return !ver || strcmp(ver, dmi->driver_data) < 0;
825 ++ return strcmp(buf, dmi->driver_data) < 0;
826 + }
827 +
828 + static bool ahci_broken_online(struct pci_dev *pdev)
829 +diff --git a/drivers/char/tty_buffer.c b/drivers/char/tty_buffer.c
830 +index 66fa4e1..f27c4d6 100644
831 +--- a/drivers/char/tty_buffer.c
832 ++++ b/drivers/char/tty_buffer.c
833 +@@ -247,7 +247,8 @@ int tty_insert_flip_string(struct tty_struct *tty, const unsigned char *chars,
834 + {
835 + int copied = 0;
836 + do {
837 +- int space = tty_buffer_request_room(tty, size - copied);
838 ++ int goal = min(size - copied, TTY_BUFFER_PAGE);
839 ++ int space = tty_buffer_request_room(tty, goal);
840 + struct tty_buffer *tb = tty->buf.tail;
841 + /* If there is no space then tb may be NULL */
842 + if (unlikely(space == 0))
843 +@@ -283,7 +284,8 @@ int tty_insert_flip_string_flags(struct tty_struct *tty,
844 + {
845 + int copied = 0;
846 + do {
847 +- int space = tty_buffer_request_room(tty, size - copied);
848 ++ int goal = min(size - copied, TTY_BUFFER_PAGE);
849 ++ int space = tty_buffer_request_room(tty, goal);
850 + struct tty_buffer *tb = tty->buf.tail;
851 + /* If there is no space then tb may be NULL */
852 + if (unlikely(space == 0))
853 +diff --git a/drivers/edac/edac_mce_amd.c b/drivers/edac/edac_mce_amd.c
854 +index 713ed7d..ac2aea8 100644
855 +--- a/drivers/edac/edac_mce_amd.c
856 ++++ b/drivers/edac/edac_mce_amd.c
857 +@@ -311,9 +311,13 @@ void amd_decode_nb_mce(int node_id, struct err_regs *regs, int handle_errors)
858 + if (regs->nbsh & K8_NBSH_ERR_CPU_VAL)
859 + pr_cont(", core: %u\n", (u8)(regs->nbsh & 0xf));
860 + } else {
861 +- pr_cont(", core: %d\n", ilog2((regs->nbsh & 0xf)));
862 +- }
863 ++ u8 assoc_cpus = regs->nbsh & 0xf;
864 ++
865 ++ if (assoc_cpus > 0)
866 ++ pr_cont(", core: %d", fls(assoc_cpus) - 1);
867 +
868 ++ pr_cont("\n");
869 ++ }
870 +
871 + pr_emerg("%s.\n", EXT_ERR_MSG(xec));
872 +
873 +diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
874 +index b54ba63..d5671c3 100644
875 +--- a/drivers/gpu/drm/drm_edid.c
876 ++++ b/drivers/gpu/drm/drm_edid.c
877 +@@ -834,8 +834,57 @@ static int add_standard_modes(struct drm_connector *connector, struct edid *edid
878 + return modes;
879 + }
880 +
881 ++static int add_detailed_modes(struct drm_connector *connector,
882 ++ struct detailed_timing *timing,
883 ++ struct edid *edid, u32 quirks, int preferred)
884 ++{
885 ++ int i, modes = 0;
886 ++ struct detailed_non_pixel *data = &timing->data.other_data;
887 ++ int timing_level = standard_timing_level(edid);
888 ++ struct drm_display_mode *newmode;
889 ++ struct drm_device *dev = connector->dev;
890 ++
891 ++ if (timing->pixel_clock) {
892 ++ newmode = drm_mode_detailed(dev, edid, timing, quirks);
893 ++ if (!newmode)
894 ++ return 0;
895 ++
896 ++ if (preferred)
897 ++ newmode->type |= DRM_MODE_TYPE_PREFERRED;
898 ++
899 ++ drm_mode_probed_add(connector, newmode);
900 ++ return 1;
901 ++ }
902 ++
903 ++ /* other timing types */
904 ++ switch (data->type) {
905 ++ case EDID_DETAIL_MONITOR_RANGE:
906 ++ /* Get monitor range data */
907 ++ break;
908 ++ case EDID_DETAIL_STD_MODES:
909 ++ /* Six modes per detailed section */
910 ++ for (i = 0; i < 6; i++) {
911 ++ struct std_timing *std;
912 ++ struct drm_display_mode *newmode;
913 ++
914 ++ std = &data->data.timings[i];
915 ++ newmode = drm_mode_std(dev, std, edid->revision,
916 ++ timing_level);
917 ++ if (newmode) {
918 ++ drm_mode_probed_add(connector, newmode);
919 ++ modes++;
920 ++ }
921 ++ }
922 ++ break;
923 ++ default:
924 ++ break;
925 ++ }
926 ++
927 ++ return modes;
928 ++}
929 ++
930 + /**
931 +- * add_detailed_modes - get detailed mode info from EDID data
932 ++ * add_detailed_info - get detailed mode info from EDID data
933 + * @connector: attached connector
934 + * @edid: EDID block to scan
935 + * @quirks: quirks to apply
936 +@@ -846,67 +895,24 @@ static int add_standard_modes(struct drm_connector *connector, struct edid *edid
937 + static int add_detailed_info(struct drm_connector *connector,
938 + struct edid *edid, u32 quirks)
939 + {
940 +- struct drm_device *dev = connector->dev;
941 +- int i, j, modes = 0;
942 +- int timing_level;
943 +-
944 +- timing_level = standard_timing_level(edid);
945 ++ int i, modes = 0;
946 +
947 + for (i = 0; i < EDID_DETAILED_TIMINGS; i++) {
948 + struct detailed_timing *timing = &edid->detailed_timings[i];
949 +- struct detailed_non_pixel *data = &timing->data.other_data;
950 +- struct drm_display_mode *newmode;
951 +-
952 +- /* X server check is version 1.1 or higher */
953 +- if (edid->version == 1 && edid->revision >= 1 &&
954 +- !timing->pixel_clock) {
955 +- /* Other timing or info */
956 +- switch (data->type) {
957 +- case EDID_DETAIL_MONITOR_SERIAL:
958 +- break;
959 +- case EDID_DETAIL_MONITOR_STRING:
960 +- break;
961 +- case EDID_DETAIL_MONITOR_RANGE:
962 +- /* Get monitor range data */
963 +- break;
964 +- case EDID_DETAIL_MONITOR_NAME:
965 +- break;
966 +- case EDID_DETAIL_MONITOR_CPDATA:
967 +- break;
968 +- case EDID_DETAIL_STD_MODES:
969 +- for (j = 0; j < 6; i++) {
970 +- struct std_timing *std;
971 +- struct drm_display_mode *newmode;
972 +-
973 +- std = &data->data.timings[j];
974 +- newmode = drm_mode_std(dev, std,
975 +- edid->revision,
976 +- timing_level);
977 +- if (newmode) {
978 +- drm_mode_probed_add(connector, newmode);
979 +- modes++;
980 +- }
981 +- }
982 +- break;
983 +- default:
984 +- break;
985 +- }
986 +- } else {
987 +- newmode = drm_mode_detailed(dev, edid, timing, quirks);
988 +- if (!newmode)
989 +- continue;
990 ++ int preferred = (i == 0) && (edid->features & DRM_EDID_FEATURE_PREFERRED_TIMING);
991 +
992 +- /* First detailed mode is preferred */
993 +- if (i == 0 && (edid->features & DRM_EDID_FEATURE_PREFERRED_TIMING))
994 +- newmode->type |= DRM_MODE_TYPE_PREFERRED;
995 +- drm_mode_probed_add(connector, newmode);
996 ++ /* In 1.0, only timings are allowed */
997 ++ if (!timing->pixel_clock && edid->version == 1 &&
998 ++ edid->revision == 0)
999 ++ continue;
1000 +
1001 +- modes++;
1002 +- }
1003 ++ modes += add_detailed_modes(connector, timing, edid, quirks,
1004 ++ preferred);
1005 + }
1006 +
1007 + return modes;
1008 + }
1009 ++
1010 + /**
1011 + * add_detailed_mode_eedid - get detailed mode info from addtional timing
1012 + * EDID block
1013 +@@ -920,12 +926,9 @@ static int add_detailed_info(struct drm_connector *connector,
1014 + static int add_detailed_info_eedid(struct drm_connector *connector,
1015 + struct edid *edid, u32 quirks)
1016 + {
1017 +- struct drm_device *dev = connector->dev;
1018 +- int i, j, modes = 0;
1019 ++ int i, modes = 0;
1020 + char *edid_ext = NULL;
1021 + struct detailed_timing *timing;
1022 +- struct detailed_non_pixel *data;
1023 +- struct drm_display_mode *newmode;
1024 + int edid_ext_num;
1025 + int start_offset, end_offset;
1026 + int timing_level;
1027 +@@ -976,51 +979,7 @@ static int add_detailed_info_eedid(struct drm_connector *connector,
1028 + for (i = start_offset; i < end_offset;
1029 + i += sizeof(struct detailed_timing)) {
1030 + timing = (struct detailed_timing *)(edid_ext + i);
1031 +- data = &timing->data.other_data;
1032 +- /* Detailed mode timing */
1033 +- if (timing->pixel_clock) {
1034 +- newmode = drm_mode_detailed(dev, edid, timing, quirks);
1035 +- if (!newmode)
1036 +- continue;
1037 +-
1038 +- drm_mode_probed_add(connector, newmode);
1039 +-
1040 +- modes++;
1041 +- continue;
1042 +- }
1043 +-
1044 +- /* Other timing or info */
1045 +- switch (data->type) {
1046 +- case EDID_DETAIL_MONITOR_SERIAL:
1047 +- break;
1048 +- case EDID_DETAIL_MONITOR_STRING:
1049 +- break;
1050 +- case EDID_DETAIL_MONITOR_RANGE:
1051 +- /* Get monitor range data */
1052 +- break;
1053 +- case EDID_DETAIL_MONITOR_NAME:
1054 +- break;
1055 +- case EDID_DETAIL_MONITOR_CPDATA:
1056 +- break;
1057 +- case EDID_DETAIL_STD_MODES:
1058 +- /* Five modes per detailed section */
1059 +- for (j = 0; j < 5; i++) {
1060 +- struct std_timing *std;
1061 +- struct drm_display_mode *newmode;
1062 +-
1063 +- std = &data->data.timings[j];
1064 +- newmode = drm_mode_std(dev, std,
1065 +- edid->revision,
1066 +- timing_level);
1067 +- if (newmode) {
1068 +- drm_mode_probed_add(connector, newmode);
1069 +- modes++;
1070 +- }
1071 +- }
1072 +- break;
1073 +- default:
1074 +- break;
1075 +- }
1076 ++ modes += add_detailed_modes(connector, timing, edid, quirks, 0);
1077 + }
1078 +
1079 + return modes;
1080 +diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
1081 +index f5d49a7..aafbef7 100644
1082 +--- a/drivers/gpu/drm/i915/i915_drv.h
1083 ++++ b/drivers/gpu/drm/i915/i915_drv.h
1084 +@@ -258,7 +258,7 @@ typedef struct drm_i915_private {
1085 +
1086 + struct notifier_block lid_notifier;
1087 +
1088 +- int crt_ddc_bus; /* -1 = unknown, else GPIO to use for CRT DDC */
1089 ++ int crt_ddc_bus; /* 0 = unknown, else GPIO to use for CRT DDC */
1090 + struct drm_i915_fence_reg fence_regs[16]; /* assume 965 */
1091 + int fence_reg_start; /* 4 if userland hasn't ioctl'd us yet */
1092 + int num_fence_regs; /* 8 on pre-965, 16 otherwise */
1093 +diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
1094 +index 04da731..1e9c66a 100644
1095 +--- a/drivers/gpu/drm/i915/i915_gem.c
1096 ++++ b/drivers/gpu/drm/i915/i915_gem.c
1097 +@@ -1470,9 +1470,6 @@ i915_gem_object_put_pages(struct drm_gem_object *obj)
1098 + obj_priv->dirty = 0;
1099 +
1100 + for (i = 0; i < page_count; i++) {
1101 +- if (obj_priv->pages[i] == NULL)
1102 +- break;
1103 +-
1104 + if (obj_priv->dirty)
1105 + set_page_dirty(obj_priv->pages[i]);
1106 +
1107 +@@ -2246,7 +2243,6 @@ i915_gem_object_get_pages(struct drm_gem_object *obj,
1108 + struct address_space *mapping;
1109 + struct inode *inode;
1110 + struct page *page;
1111 +- int ret;
1112 +
1113 + if (obj_priv->pages_refcount++ != 0)
1114 + return 0;
1115 +@@ -2269,11 +2265,9 @@ i915_gem_object_get_pages(struct drm_gem_object *obj,
1116 + mapping_gfp_mask (mapping) |
1117 + __GFP_COLD |
1118 + gfpmask);
1119 +- if (IS_ERR(page)) {
1120 +- ret = PTR_ERR(page);
1121 +- i915_gem_object_put_pages(obj);
1122 +- return ret;
1123 +- }
1124 ++ if (IS_ERR(page))
1125 ++ goto err_pages;
1126 ++
1127 + obj_priv->pages[i] = page;
1128 + }
1129 +
1130 +@@ -2281,6 +2275,15 @@ i915_gem_object_get_pages(struct drm_gem_object *obj,
1131 + i915_gem_object_do_bit_17_swizzle(obj);
1132 +
1133 + return 0;
1134 ++
1135 ++err_pages:
1136 ++ while (i--)
1137 ++ page_cache_release(obj_priv->pages[i]);
1138 ++
1139 ++ drm_free_large(obj_priv->pages);
1140 ++ obj_priv->pages = NULL;
1141 ++ obj_priv->pages_refcount--;
1142 ++ return PTR_ERR(page);
1143 + }
1144 +
1145 + static void i965_write_fence_reg(struct drm_i915_fence_reg *reg)
1146 +diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
1147 +index 96cd256..97169ea 100644
1148 +--- a/drivers/gpu/drm/i915/intel_bios.c
1149 ++++ b/drivers/gpu/drm/i915/intel_bios.c
1150 +@@ -241,10 +241,6 @@ parse_general_definitions(struct drm_i915_private *dev_priv,
1151 + GPIOF,
1152 + };
1153 +
1154 +- /* Set sensible defaults in case we can't find the general block
1155 +- or it is the wrong chipset */
1156 +- dev_priv->crt_ddc_bus = -1;
1157 +-
1158 + general = find_section(bdb, BDB_GENERAL_DEFINITIONS);
1159 + if (general) {
1160 + u16 block_size = get_blocksize(general);
1161 +diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
1162 +index 5e730e6..166a24e 100644
1163 +--- a/drivers/gpu/drm/i915/intel_crt.c
1164 ++++ b/drivers/gpu/drm/i915/intel_crt.c
1165 +@@ -557,7 +557,7 @@ void intel_crt_init(struct drm_device *dev)
1166 + else {
1167 + i2c_reg = GPIOA;
1168 + /* Use VBT information for CRT DDC if available */
1169 +- if (dev_priv->crt_ddc_bus != -1)
1170 ++ if (dev_priv->crt_ddc_bus != 0)
1171 + i2c_reg = dev_priv->crt_ddc_bus;
1172 + }
1173 + intel_output->ddc_bus = intel_i2c_create(dev, i2c_reg, "CRTDDC_A");
1174 +diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c
1175 +index 2d7bcee..cb4290a 100644
1176 +--- a/drivers/hwmon/coretemp.c
1177 ++++ b/drivers/hwmon/coretemp.c
1178 +@@ -228,7 +228,7 @@ static int __devinit adjust_tjmax(struct cpuinfo_x86 *c, u32 id, struct device *
1179 + if (err) {
1180 + dev_warn(dev,
1181 + "Unable to access MSR 0xEE, for Tjmax, left"
1182 +- " at default");
1183 ++ " at default\n");
1184 + } else if (eax & 0x40000000) {
1185 + tjmax = tjmax_ee;
1186 + }
1187 +diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
1188 +index 55edcfe..4d73fcf 100644
1189 +--- a/drivers/i2c/busses/i2c-i801.c
1190 ++++ b/drivers/i2c/busses/i2c-i801.c
1191 +@@ -415,9 +415,11 @@ static int i801_block_transaction(union i2c_smbus_data *data, char read_write,
1192 + data->block[0] = 32; /* max for SMBus block reads */
1193 + }
1194 +
1195 ++ /* Experience has shown that the block buffer can only be used for
1196 ++ SMBus (not I2C) block transactions, even though the datasheet
1197 ++ doesn't mention this limitation. */
1198 + if ((i801_features & FEATURE_BLOCK_BUFFER)
1199 +- && !(command == I2C_SMBUS_I2C_BLOCK_DATA
1200 +- && read_write == I2C_SMBUS_READ)
1201 ++ && command != I2C_SMBUS_I2C_BLOCK_DATA
1202 + && i801_set_block_buffer_mode() == 0)
1203 + result = i801_block_transaction_by_block(data, read_write,
1204 + hwpec);
1205 +diff --git a/drivers/input/mouse/alps.c b/drivers/input/mouse/alps.c
1206 +index fc8823b..0c99db0 100644
1207 +--- a/drivers/input/mouse/alps.c
1208 ++++ b/drivers/input/mouse/alps.c
1209 +@@ -62,6 +62,8 @@ static const struct alps_model_info alps_model_data[] = {
1210 + { { 0x62, 0x02, 0x14 }, 0xcf, 0xcf,
1211 + ALPS_PASS | ALPS_DUALPOINT | ALPS_PS2_INTERLEAVED },
1212 + { { 0x73, 0x02, 0x50 }, 0xcf, 0xcf, ALPS_FW_BK_1 }, /* Dell Vostro 1400 */
1213 ++ { { 0x52, 0x01, 0x14 }, 0xff, 0xff,
1214 ++ ALPS_PASS | ALPS_DUALPOINT | ALPS_PS2_INTERLEAVED }, /* Toshiba Tecra A11-11L */
1215 + };
1216 +
1217 + /*
1218 +diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h
1219 +index 2a5982e..525b9b9 100644
1220 +--- a/drivers/input/serio/i8042-x86ia64io.h
1221 ++++ b/drivers/input/serio/i8042-x86ia64io.h
1222 +@@ -442,6 +442,13 @@ static const struct dmi_system_id __initconst i8042_dmi_reset_table[] = {
1223 + },
1224 + },
1225 + {
1226 ++ /* Medion Akoya E1222 */
1227 ++ .matches = {
1228 ++ DMI_MATCH(DMI_SYS_VENDOR, "MEDION"),
1229 ++ DMI_MATCH(DMI_PRODUCT_NAME, "E122X"),
1230 ++ },
1231 ++ },
1232 ++ {
1233 + /* Mivvy M310 */
1234 + .matches = {
1235 + DMI_MATCH(DMI_SYS_VENDOR, "VIOOO"),
1236 +diff --git a/drivers/input/tablet/wacom.h b/drivers/input/tablet/wacom.h
1237 +index 9114ae1..e6307ba 100644
1238 +--- a/drivers/input/tablet/wacom.h
1239 ++++ b/drivers/input/tablet/wacom.h
1240 +@@ -1,7 +1,7 @@
1241 + /*
1242 + * drivers/input/tablet/wacom.h
1243 + *
1244 +- * USB Wacom Graphire and Wacom Intuos tablet support
1245 ++ * USB Wacom tablet support
1246 + *
1247 + * Copyright (c) 2000-2004 Vojtech Pavlik <vojtech@×××.cz>
1248 + * Copyright (c) 2000 Andreas Bach Aaen <abach@××××××××.dk>
1249 +@@ -69,6 +69,7 @@
1250 + * v1.49 (pc) - Added support for USB Tablet PC (0x90, 0x93, and 0x9A)
1251 + * v1.50 (pc) - Fixed a TabletPC touch bug in 2.6.28
1252 + * v1.51 (pc) - Added support for Intuos4
1253 ++ * v1.52 (pc) - Query Wacom data upon system resume
1254 + */
1255 +
1256 + /*
1257 +@@ -89,9 +90,9 @@
1258 + /*
1259 + * Version Information
1260 + */
1261 +-#define DRIVER_VERSION "v1.51"
1262 ++#define DRIVER_VERSION "v1.52"
1263 + #define DRIVER_AUTHOR "Vojtech Pavlik <vojtech@×××.cz>"
1264 +-#define DRIVER_DESC "USB Wacom Graphire and Wacom Intuos tablet driver"
1265 ++#define DRIVER_DESC "USB Wacom tablet driver"
1266 + #define DRIVER_LICENSE "GPL"
1267 +
1268 + MODULE_AUTHOR(DRIVER_AUTHOR);
1269 +diff --git a/drivers/input/tablet/wacom_sys.c b/drivers/input/tablet/wacom_sys.c
1270 +index ea30c98..b5b69cc 100644
1271 +--- a/drivers/input/tablet/wacom_sys.c
1272 ++++ b/drivers/input/tablet/wacom_sys.c
1273 +@@ -1,7 +1,7 @@
1274 + /*
1275 + * drivers/input/tablet/wacom_sys.c
1276 + *
1277 +- * USB Wacom Graphire and Wacom Intuos tablet support - system specific code
1278 ++ * USB Wacom tablet support - system specific code
1279 + */
1280 +
1281 + /*
1282 +@@ -562,9 +562,10 @@ static int wacom_resume(struct usb_interface *intf)
1283 + int rv;
1284 +
1285 + mutex_lock(&wacom->lock);
1286 +- if (wacom->open)
1287 ++ if (wacom->open) {
1288 + rv = usb_submit_urb(wacom->irq, GFP_NOIO);
1289 +- else
1290 ++ wacom_query_tablet_data(intf);
1291 ++ } else
1292 + rv = 0;
1293 + mutex_unlock(&wacom->lock);
1294 +
1295 +diff --git a/drivers/isdn/gigaset/ev-layer.c b/drivers/isdn/gigaset/ev-layer.c
1296 +index cc768ca..a0f7b99 100644
1297 +--- a/drivers/isdn/gigaset/ev-layer.c
1298 ++++ b/drivers/isdn/gigaset/ev-layer.c
1299 +@@ -1243,14 +1243,10 @@ static void do_action(int action, struct cardstate *cs,
1300 + * note that bcs may be NULL if no B channel is free
1301 + */
1302 + at_state2->ConState = 700;
1303 +- kfree(at_state2->str_var[STR_NMBR]);
1304 +- at_state2->str_var[STR_NMBR] = NULL;
1305 +- kfree(at_state2->str_var[STR_ZCPN]);
1306 +- at_state2->str_var[STR_ZCPN] = NULL;
1307 +- kfree(at_state2->str_var[STR_ZBC]);
1308 +- at_state2->str_var[STR_ZBC] = NULL;
1309 +- kfree(at_state2->str_var[STR_ZHLC]);
1310 +- at_state2->str_var[STR_ZHLC] = NULL;
1311 ++ for (i = 0; i < STR_NUM; ++i) {
1312 ++ kfree(at_state2->str_var[i]);
1313 ++ at_state2->str_var[i] = NULL;
1314 ++ }
1315 + at_state2->int_var[VAR_ZCTP] = -1;
1316 +
1317 + spin_lock_irqsave(&cs->lock, flags);
1318 +diff --git a/drivers/isdn/gigaset/interface.c b/drivers/isdn/gigaset/interface.c
1319 +index 6a8e138..b3065b8 100644
1320 +--- a/drivers/isdn/gigaset/interface.c
1321 ++++ b/drivers/isdn/gigaset/interface.c
1322 +@@ -635,7 +635,6 @@ void gigaset_if_receive(struct cardstate *cs,
1323 + if ((tty = cs->tty) == NULL)
1324 + gig_dbg(DEBUG_ANY, "receive on closed device");
1325 + else {
1326 +- tty_buffer_request_room(tty, len);
1327 + tty_insert_flip_string(tty, buffer, len);
1328 + tty_flip_buffer_push(tty);
1329 + }
1330 +diff --git a/drivers/leds/leds-gpio.c b/drivers/leds/leds-gpio.c
1331 +index e5225d2..0823e26 100644
1332 +--- a/drivers/leds/leds-gpio.c
1333 ++++ b/drivers/leds/leds-gpio.c
1334 +@@ -211,7 +211,6 @@ static int __devinit of_gpio_leds_probe(struct of_device *ofdev,
1335 + const struct of_device_id *match)
1336 + {
1337 + struct device_node *np = ofdev->node, *child;
1338 +- struct gpio_led led;
1339 + struct gpio_led_of_platform_data *pdata;
1340 + int count = 0, ret;
1341 +
1342 +@@ -226,8 +225,8 @@ static int __devinit of_gpio_leds_probe(struct of_device *ofdev,
1343 + if (!pdata)
1344 + return -ENOMEM;
1345 +
1346 +- memset(&led, 0, sizeof(led));
1347 + for_each_child_of_node(np, child) {
1348 ++ struct gpio_led led = {};
1349 + enum of_gpio_flags flags;
1350 + const char *state;
1351 +
1352 +diff --git a/drivers/media/video/em28xx/em28xx-dvb.c b/drivers/media/video/em28xx/em28xx-dvb.c
1353 +index db74946..efddf15 100644
1354 +--- a/drivers/media/video/em28xx/em28xx-dvb.c
1355 ++++ b/drivers/media/video/em28xx/em28xx-dvb.c
1356 +@@ -610,6 +610,7 @@ static int dvb_fini(struct em28xx *dev)
1357 +
1358 + if (dev->dvb) {
1359 + unregister_dvb(dev->dvb);
1360 ++ kfree(dev->dvb);
1361 + dev->dvb = NULL;
1362 + }
1363 +
1364 +diff --git a/drivers/mmc/host/s3cmci.c b/drivers/mmc/host/s3cmci.c
1365 +index f31f05a..fba147c 100644
1366 +--- a/drivers/mmc/host/s3cmci.c
1367 ++++ b/drivers/mmc/host/s3cmci.c
1368 +@@ -1361,6 +1361,8 @@ static struct mmc_host_ops s3cmci_ops = {
1369 + static struct s3c24xx_mci_pdata s3cmci_def_pdata = {
1370 + /* This is currently here to avoid a number of if (host->pdata)
1371 + * checks. Any zero fields to ensure reaonable defaults are picked. */
1372 ++ .no_wprotect = 1,
1373 ++ .no_detect = 1,
1374 + };
1375 +
1376 + #ifdef CONFIG_CPU_FREQ
1377 +diff --git a/drivers/net/e1000e/hw.h b/drivers/net/e1000e/hw.h
1378 +index aaea41e..e8e87a7 100644
1379 +--- a/drivers/net/e1000e/hw.h
1380 ++++ b/drivers/net/e1000e/hw.h
1381 +@@ -356,6 +356,7 @@ enum e1e_registers {
1382 + #define E1000_DEV_ID_80003ES2LAN_COPPER_SPT 0x10BA
1383 + #define E1000_DEV_ID_80003ES2LAN_SERDES_SPT 0x10BB
1384 +
1385 ++#define E1000_DEV_ID_ICH8_82567V_3 0x1501
1386 + #define E1000_DEV_ID_ICH8_IGP_M_AMT 0x1049
1387 + #define E1000_DEV_ID_ICH8_IGP_AMT 0x104A
1388 + #define E1000_DEV_ID_ICH8_IGP_C 0x104B
1389 +diff --git a/drivers/net/e1000e/ich8lan.c b/drivers/net/e1000e/ich8lan.c
1390 +index eff3f47..c688b55 100644
1391 +--- a/drivers/net/e1000e/ich8lan.c
1392 ++++ b/drivers/net/e1000e/ich8lan.c
1393 +@@ -3209,6 +3209,7 @@ void e1000e_disable_gig_wol_ich8lan(struct e1000_hw *hw)
1394 + u32 phy_ctrl;
1395 +
1396 + switch (hw->mac.type) {
1397 ++ case e1000_ich8lan:
1398 + case e1000_ich9lan:
1399 + case e1000_ich10lan:
1400 + case e1000_pchlan:
1401 +diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c
1402 +index 2154530..f590bea 100644
1403 +--- a/drivers/net/e1000e/netdev.c
1404 ++++ b/drivers/net/e1000e/netdev.c
1405 +@@ -5360,6 +5360,7 @@ static struct pci_device_id e1000_pci_tbl[] = {
1406 + { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_C), board_ich8lan },
1407 + { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_M), board_ich8lan },
1408 + { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_M_AMT), board_ich8lan },
1409 ++ { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_82567V_3), board_ich8lan },
1410 +
1411 + { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IFE), board_ich9lan },
1412 + { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IFE_G), board_ich9lan },
1413 +diff --git a/drivers/net/ixgbe/ixgbe_82599.c b/drivers/net/ixgbe/ixgbe_82599.c
1414 +index 34b0492..9c42149 100644
1415 +--- a/drivers/net/ixgbe/ixgbe_82599.c
1416 ++++ b/drivers/net/ixgbe/ixgbe_82599.c
1417 +@@ -332,6 +332,7 @@ static enum ixgbe_media_type ixgbe_get_media_type_82599(struct ixgbe_hw *hw)
1418 + case IXGBE_DEV_ID_82599_KX4:
1419 + case IXGBE_DEV_ID_82599_KX4_MEZZ:
1420 + case IXGBE_DEV_ID_82599_COMBO_BACKPLANE:
1421 ++ case IXGBE_DEV_ID_82599_KR:
1422 + case IXGBE_DEV_ID_82599_XAUI_LOM:
1423 + /* Default device ID is mezzanine card KX/KX4 */
1424 + media_type = ixgbe_media_type_backplane;
1425 +diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c
1426 +index a456578..6339d65 100644
1427 +--- a/drivers/net/ixgbe/ixgbe_main.c
1428 ++++ b/drivers/net/ixgbe/ixgbe_main.c
1429 +@@ -96,6 +96,8 @@ static struct pci_device_id ixgbe_pci_tbl[] = {
1430 + board_82599 },
1431 + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_XAUI_LOM),
1432 + board_82599 },
1433 ++ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KR),
1434 ++ board_82599 },
1435 + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP),
1436 + board_82599 },
1437 + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KX4_MEZZ),
1438 +diff --git a/drivers/net/ixgbe/ixgbe_type.h b/drivers/net/ixgbe/ixgbe_type.h
1439 +index ef4bdd5..7d66f5b 100644
1440 +--- a/drivers/net/ixgbe/ixgbe_type.h
1441 ++++ b/drivers/net/ixgbe/ixgbe_type.h
1442 +@@ -50,6 +50,7 @@
1443 + #define IXGBE_DEV_ID_82598EB_XF_LR 0x10F4
1444 + #define IXGBE_DEV_ID_82599_KX4 0x10F7
1445 + #define IXGBE_DEV_ID_82599_KX4_MEZZ 0x1514
1446 ++#define IXGBE_DEV_ID_82599_KR 0x1517
1447 + #define IXGBE_DEV_ID_82599_CX4 0x10F9
1448 + #define IXGBE_DEV_ID_82599_SFP 0x10FB
1449 + #define IXGBE_DEV_ID_82599_XAUI_LOM 0x10FC
1450 +diff --git a/drivers/net/jme.c b/drivers/net/jme.c
1451 +index 1d2a325..3bb3a6d 100644
1452 +--- a/drivers/net/jme.c
1453 ++++ b/drivers/net/jme.c
1454 +@@ -946,6 +946,8 @@ jme_alloc_and_feed_skb(struct jme_adapter *jme, int idx)
1455 + jme->jme_vlan_rx(skb, jme->vlgrp,
1456 + le16_to_cpu(rxdesc->descwb.vlan));
1457 + NET_STAT(jme).rx_bytes += 4;
1458 ++ } else {
1459 ++ dev_kfree_skb(skb);
1460 + }
1461 + } else {
1462 + jme->jme_rx(skb);
1463 +@@ -2085,12 +2087,45 @@ jme_tx_timeout(struct net_device *netdev)
1464 + jme_reset_link(jme);
1465 + }
1466 +
1467 ++static inline void jme_pause_rx(struct jme_adapter *jme)
1468 ++{
1469 ++ atomic_dec(&jme->link_changing);
1470 ++
1471 ++ jme_set_rx_pcc(jme, PCC_OFF);
1472 ++ if (test_bit(JME_FLAG_POLL, &jme->flags)) {
1473 ++ JME_NAPI_DISABLE(jme);
1474 ++ } else {
1475 ++ tasklet_disable(&jme->rxclean_task);
1476 ++ tasklet_disable(&jme->rxempty_task);
1477 ++ }
1478 ++}
1479 ++
1480 ++static inline void jme_resume_rx(struct jme_adapter *jme)
1481 ++{
1482 ++ struct dynpcc_info *dpi = &(jme->dpi);
1483 ++
1484 ++ if (test_bit(JME_FLAG_POLL, &jme->flags)) {
1485 ++ JME_NAPI_ENABLE(jme);
1486 ++ } else {
1487 ++ tasklet_hi_enable(&jme->rxclean_task);
1488 ++ tasklet_hi_enable(&jme->rxempty_task);
1489 ++ }
1490 ++ dpi->cur = PCC_P1;
1491 ++ dpi->attempt = PCC_P1;
1492 ++ dpi->cnt = 0;
1493 ++ jme_set_rx_pcc(jme, PCC_P1);
1494 ++
1495 ++ atomic_inc(&jme->link_changing);
1496 ++}
1497 ++
1498 + static void
1499 + jme_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp)
1500 + {
1501 + struct jme_adapter *jme = netdev_priv(netdev);
1502 +
1503 ++ jme_pause_rx(jme);
1504 + jme->vlgrp = grp;
1505 ++ jme_resume_rx(jme);
1506 + }
1507 +
1508 + static void
1509 +diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
1510 +index ba5d3fe..dcc1c23 100644
1511 +--- a/drivers/net/tg3.c
1512 ++++ b/drivers/net/tg3.c
1513 +@@ -4995,7 +4995,7 @@ static void tg3_poll_controller(struct net_device *dev)
1514 + struct tg3 *tp = netdev_priv(dev);
1515 +
1516 + for (i = 0; i < tp->irq_cnt; i++)
1517 +- tg3_interrupt(tp->napi[i].irq_vec, dev);
1518 ++ tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
1519 + }
1520 + #endif
1521 +
1522 +@@ -5392,7 +5392,7 @@ static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb,
1523 + mss = 0;
1524 + if ((mss = skb_shinfo(skb)->gso_size) != 0) {
1525 + struct iphdr *iph;
1526 +- int tcp_opt_len, ip_tcp_len, hdr_len;
1527 ++ u32 tcp_opt_len, ip_tcp_len, hdr_len;
1528 +
1529 + if (skb_header_cloned(skb) &&
1530 + pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
1531 +@@ -5423,8 +5423,10 @@ static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb,
1532 + IPPROTO_TCP,
1533 + 0);
1534 +
1535 +- if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) ||
1536 +- (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)) {
1537 ++ if (tp->tg3_flags2 & TG3_FLG2_HW_TSO_2)
1538 ++ mss |= hdr_len << 9;
1539 ++ else if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_1) ||
1540 ++ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
1541 + if (tcp_opt_len || iph->ihl > 5) {
1542 + int tsflags;
1543 +
1544 +@@ -5459,6 +5461,9 @@ static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb,
1545 +
1546 + would_hit_hwbug = 0;
1547 +
1548 ++ if ((tp->tg3_flags3 & TG3_FLG3_SHORT_DMA_BUG) && len <= 8)
1549 ++ would_hit_hwbug = 1;
1550 ++
1551 + if (tp->tg3_flags3 & TG3_FLG3_5701_DMA_BUG)
1552 + would_hit_hwbug = 1;
1553 + else if (tg3_4g_overflow_test(mapping, len))
1554 +@@ -5482,6 +5487,10 @@ static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb,
1555 +
1556 + tnapi->tx_buffers[entry].skb = NULL;
1557 +
1558 ++ if ((tp->tg3_flags3 & TG3_FLG3_SHORT_DMA_BUG) &&
1559 ++ len <= 8)
1560 ++ would_hit_hwbug = 1;
1561 ++
1562 + if (tg3_4g_overflow_test(mapping, len))
1563 + would_hit_hwbug = 1;
1564 +
1565 +@@ -12608,6 +12617,9 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
1566 + }
1567 + }
1568 +
1569 ++ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
1570 ++ tp->tg3_flags3 |= TG3_FLG3_SHORT_DMA_BUG;
1571 ++
1572 + tp->irq_max = 1;
1573 +
1574 + #ifdef TG3_NAPI
1575 +@@ -13975,8 +13987,7 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
1576 + goto err_out_iounmap;
1577 + }
1578 +
1579 +- if ((tp->tg3_flags3 & TG3_FLG3_5755_PLUS) ||
1580 +- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
1581 ++ if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS)
1582 + dev->netdev_ops = &tg3_netdev_ops;
1583 + else
1584 + dev->netdev_ops = &tg3_netdev_ops_dma_bug;
1585 +diff --git a/drivers/net/tg3.h b/drivers/net/tg3.h
1586 +index bab7940..529f55a 100644
1587 +--- a/drivers/net/tg3.h
1588 ++++ b/drivers/net/tg3.h
1589 +@@ -2759,6 +2759,9 @@ struct tg3 {
1590 + #define TG3_FLG3_TOGGLE_10_100_L1PLLPD 0x00008000
1591 + #define TG3_FLG3_PHY_IS_FET 0x00010000
1592 + #define TG3_FLG3_ENABLE_RSS 0x00020000
1593 ++#define TG3_FLG3_4G_DMA_BNDRY_BUG 0x00080000
1594 ++#define TG3_FLG3_40BIT_DMA_LIMIT_BUG 0x00100000
1595 ++#define TG3_FLG3_SHORT_DMA_BUG 0x00200000
1596 +
1597 + struct timer_list timer;
1598 + u16 timer_counter;
1599 +diff --git a/drivers/net/wireless/ath/ath5k/ath5k.h b/drivers/net/wireless/ath/ath5k/ath5k.h
1600 +index aafdc1e..2c79c78 100644
1601 +--- a/drivers/net/wireless/ath/ath5k/ath5k.h
1602 ++++ b/drivers/net/wireless/ath/ath5k/ath5k.h
1603 +@@ -540,7 +540,7 @@ struct ath5k_txq_info {
1604 + u32 tqi_cbr_period; /* Constant bit rate period */
1605 + u32 tqi_cbr_overflow_limit;
1606 + u32 tqi_burst_time;
1607 +- u32 tqi_ready_time; /* Not used */
1608 ++ u32 tqi_ready_time; /* Time queue waits after an event */
1609 + };
1610 +
1611 + /*
1612 +diff --git a/drivers/net/wireless/ath/ath5k/base.c b/drivers/net/wireless/ath/ath5k/base.c
1613 +index 46f913b..6313788 100644
1614 +--- a/drivers/net/wireless/ath/ath5k/base.c
1615 ++++ b/drivers/net/wireless/ath/ath5k/base.c
1616 +@@ -1511,7 +1511,8 @@ ath5k_beaconq_config(struct ath5k_softc *sc)
1617 +
1618 + ret = ath5k_hw_get_tx_queueprops(ah, sc->bhalq, &qi);
1619 + if (ret)
1620 +- return ret;
1621 ++ goto err;
1622 ++
1623 + if (sc->opmode == NL80211_IFTYPE_AP ||
1624 + sc->opmode == NL80211_IFTYPE_MESH_POINT) {
1625 + /*
1626 +@@ -1538,10 +1539,25 @@ ath5k_beaconq_config(struct ath5k_softc *sc)
1627 + if (ret) {
1628 + ATH5K_ERR(sc, "%s: unable to update parameters for beacon "
1629 + "hardware queue!\n", __func__);
1630 +- return ret;
1631 ++ goto err;
1632 + }
1633 ++ ret = ath5k_hw_reset_tx_queue(ah, sc->bhalq); /* push to h/w */
1634 ++ if (ret)
1635 ++ goto err;
1636 +
1637 +- return ath5k_hw_reset_tx_queue(ah, sc->bhalq); /* push to h/w */;
1638 ++ /* reconfigure cabq with ready time to 80% of beacon_interval */
1639 ++ ret = ath5k_hw_get_tx_queueprops(ah, AR5K_TX_QUEUE_ID_CAB, &qi);
1640 ++ if (ret)
1641 ++ goto err;
1642 ++
1643 ++ qi.tqi_ready_time = (sc->bintval * 80) / 100;
1644 ++ ret = ath5k_hw_set_tx_queueprops(ah, AR5K_TX_QUEUE_ID_CAB, &qi);
1645 ++ if (ret)
1646 ++ goto err;
1647 ++
1648 ++ ret = ath5k_hw_reset_tx_queue(ah, AR5K_TX_QUEUE_ID_CAB);
1649 ++err:
1650 ++ return ret;
1651 + }
1652 +
1653 + static void
1654 +diff --git a/drivers/net/wireless/ath/ath5k/qcu.c b/drivers/net/wireless/ath/ath5k/qcu.c
1655 +index eeebb9a..b7c5725 100644
1656 +--- a/drivers/net/wireless/ath/ath5k/qcu.c
1657 ++++ b/drivers/net/wireless/ath/ath5k/qcu.c
1658 +@@ -408,12 +408,13 @@ int ath5k_hw_reset_tx_queue(struct ath5k_hw *ah, unsigned int queue)
1659 + break;
1660 +
1661 + case AR5K_TX_QUEUE_CAB:
1662 ++ /* XXX: use BCN_SENT_GT, if we can figure out how */
1663 + AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_MISC(queue),
1664 +- AR5K_QCU_MISC_FRSHED_BCN_SENT_GT |
1665 ++ AR5K_QCU_MISC_FRSHED_DBA_GT |
1666 + AR5K_QCU_MISC_CBREXP_DIS |
1667 + AR5K_QCU_MISC_CBREXP_BCN_DIS);
1668 +
1669 +- ath5k_hw_reg_write(ah, ((AR5K_TUNE_BEACON_INTERVAL -
1670 ++ ath5k_hw_reg_write(ah, ((tq->tqi_ready_time -
1671 + (AR5K_TUNE_SW_BEACON_RESP -
1672 + AR5K_TUNE_DMA_BEACON_RESP) -
1673 + AR5K_TUNE_ADDITIONAL_SWBA_BACKOFF) * 1024) |
1674 +diff --git a/drivers/net/wireless/ath/ath5k/reset.c b/drivers/net/wireless/ath/ath5k/reset.c
1675 +index 34e13c7..257ea18 100644
1676 +--- a/drivers/net/wireless/ath/ath5k/reset.c
1677 ++++ b/drivers/net/wireless/ath/ath5k/reset.c
1678 +@@ -1382,8 +1382,9 @@ int ath5k_hw_reset(struct ath5k_hw *ah, enum nl80211_iftype op_mode,
1679 + * Set clocks to 32KHz operation and use an
1680 + * external 32KHz crystal when sleeping if one
1681 + * exists */
1682 +- if (ah->ah_version == AR5K_AR5212)
1683 +- ath5k_hw_set_sleep_clock(ah, true);
1684 ++ if (ah->ah_version == AR5K_AR5212 &&
1685 ++ ah->ah_op_mode != NL80211_IFTYPE_AP)
1686 ++ ath5k_hw_set_sleep_clock(ah, true);
1687 +
1688 + /*
1689 + * Disable beacons and reset the register
1690 +diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h
1691 +index cdb90c5..ad11969 100644
1692 +--- a/drivers/net/wireless/ath/ath9k/ath9k.h
1693 ++++ b/drivers/net/wireless/ath/ath9k/ath9k.h
1694 +@@ -368,6 +368,7 @@ void ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta,
1695 + u16 tid, u16 *ssn);
1696 + void ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid);
1697 + void ath_tx_aggr_resume(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid);
1698 ++void ath9k_enable_ps(struct ath_softc *sc);
1699 +
1700 + /********/
1701 + /* VIFs */
1702 +diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
1703 +index 5864eaa..15eb245 100644
1704 +--- a/drivers/net/wireless/ath/ath9k/main.c
1705 ++++ b/drivers/net/wireless/ath/ath9k/main.c
1706 +@@ -1544,6 +1544,7 @@ void ath_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
1707 + IEEE80211_HW_AMPDU_AGGREGATION |
1708 + IEEE80211_HW_SUPPORTS_PS |
1709 + IEEE80211_HW_PS_NULLFUNC_STACK |
1710 ++ IEEE80211_HW_REPORTS_TX_ACK_STATUS |
1711 + IEEE80211_HW_SPECTRUM_MGMT;
1712 +
1713 + if (AR_SREV_9160_10_OR_LATER(sc->sc_ah) || modparam_nohwcrypt)
1714 +@@ -2305,6 +2306,19 @@ static void ath9k_remove_interface(struct ieee80211_hw *hw,
1715 + mutex_unlock(&sc->mutex);
1716 + }
1717 +
1718 ++void ath9k_enable_ps(struct ath_softc *sc)
1719 ++{
1720 ++ sc->ps_enabled = true;
1721 ++ if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_AUTOSLEEP)) {
1722 ++ if ((sc->imask & ATH9K_INT_TIM_TIMER) == 0) {
1723 ++ sc->imask |= ATH9K_INT_TIM_TIMER;
1724 ++ ath9k_hw_set_interrupts(sc->sc_ah,
1725 ++ sc->imask);
1726 ++ }
1727 ++ }
1728 ++ ath9k_hw_setrxabort(sc->sc_ah, 1);
1729 ++}
1730 ++
1731 + static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
1732 + {
1733 + struct ath_wiphy *aphy = hw->priv;
1734 +@@ -2336,19 +2350,9 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
1735 + if (changed & IEEE80211_CONF_CHANGE_PS) {
1736 + if (conf->flags & IEEE80211_CONF_PS) {
1737 + sc->sc_flags |= SC_OP_PS_ENABLED;
1738 +- if (!(ah->caps.hw_caps &
1739 +- ATH9K_HW_CAP_AUTOSLEEP)) {
1740 +- if ((sc->imask & ATH9K_INT_TIM_TIMER) == 0) {
1741 +- sc->imask |= ATH9K_INT_TIM_TIMER;
1742 +- ath9k_hw_set_interrupts(sc->sc_ah,
1743 +- sc->imask);
1744 +- }
1745 +- }
1746 +- sc->ps_enabled = true;
1747 + if ((sc->sc_flags & SC_OP_NULLFUNC_COMPLETED)) {
1748 + sc->sc_flags &= ~SC_OP_NULLFUNC_COMPLETED;
1749 +- sc->ps_enabled = true;
1750 +- ath9k_hw_setrxabort(sc->sc_ah, 1);
1751 ++ ath9k_enable_ps(sc);
1752 + }
1753 + } else {
1754 + sc->ps_enabled = false;
1755 +diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
1756 +index 9009bac..a232361 100644
1757 +--- a/drivers/net/wireless/ath/ath9k/xmit.c
1758 ++++ b/drivers/net/wireless/ath/ath9k/xmit.c
1759 +@@ -1320,25 +1320,6 @@ static enum ath9k_pkt_type get_hw_packet_type(struct sk_buff *skb)
1760 + return htype;
1761 + }
1762 +
1763 +-static bool is_pae(struct sk_buff *skb)
1764 +-{
1765 +- struct ieee80211_hdr *hdr;
1766 +- __le16 fc;
1767 +-
1768 +- hdr = (struct ieee80211_hdr *)skb->data;
1769 +- fc = hdr->frame_control;
1770 +-
1771 +- if (ieee80211_is_data(fc)) {
1772 +- if (ieee80211_is_nullfunc(fc) ||
1773 +- /* Port Access Entity (IEEE 802.1X) */
1774 +- (skb->protocol == cpu_to_be16(ETH_P_PAE))) {
1775 +- return true;
1776 +- }
1777 +- }
1778 +-
1779 +- return false;
1780 +-}
1781 +-
1782 + static int get_hw_crypto_keytype(struct sk_buff *skb)
1783 + {
1784 + struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1785 +@@ -1648,7 +1629,7 @@ static void ath_tx_start_dma(struct ath_softc *sc, struct ath_buf *bf,
1786 + goto tx_done;
1787 + }
1788 +
1789 +- if ((tx_info->flags & IEEE80211_TX_CTL_AMPDU) && !is_pae(skb)) {
1790 ++ if (tx_info->flags & IEEE80211_TX_CTL_AMPDU) {
1791 + /*
1792 + * Try aggregation if it's a unicast data frame
1793 + * and the destination is HT capable.
1794 +@@ -1998,10 +1979,9 @@ static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
1795 +
1796 + if (bf->bf_isnullfunc &&
1797 + (ds->ds_txstat.ts_status & ATH9K_TX_ACKED)) {
1798 +- if ((sc->sc_flags & SC_OP_PS_ENABLED)) {
1799 +- sc->ps_enabled = true;
1800 +- ath9k_hw_setrxabort(sc->sc_ah, 1);
1801 +- } else
1802 ++ if ((sc->sc_flags & SC_OP_PS_ENABLED))
1803 ++ ath9k_enable_ps(sc);
1804 ++ else
1805 + sc->sc_flags |= SC_OP_NULLFUNC_COMPLETED;
1806 + }
1807 +
1808 +@@ -2210,7 +2190,7 @@ void ath_tx_node_cleanup(struct ath_softc *sc, struct ath_node *an)
1809 + if (ATH_TXQ_SETUP(sc, i)) {
1810 + txq = &sc->tx.txq[i];
1811 +
1812 +- spin_lock(&txq->axq_lock);
1813 ++ spin_lock_bh(&txq->axq_lock);
1814 +
1815 + list_for_each_entry_safe(ac,
1816 + ac_tmp, &txq->axq_acq, list) {
1817 +@@ -2231,7 +2211,7 @@ void ath_tx_node_cleanup(struct ath_softc *sc, struct ath_node *an)
1818 + }
1819 + }
1820 +
1821 +- spin_unlock(&txq->axq_lock);
1822 ++ spin_unlock_bh(&txq->axq_lock);
1823 + }
1824 + }
1825 + }
1826 +diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c
1827 +index 2bde1a9..a8a00d2 100644
1828 +--- a/drivers/net/wireless/b43/main.c
1829 ++++ b/drivers/net/wireless/b43/main.c
1830 +@@ -852,19 +852,16 @@ static void b43_op_update_tkip_key(struct ieee80211_hw *hw,
1831 + if (B43_WARN_ON(!modparam_hwtkip))
1832 + return;
1833 +
1834 +- mutex_lock(&wl->mutex);
1835 +-
1836 ++ /* This is only called from the RX path through mac80211, where
1837 ++ * our mutex is already locked. */
1838 ++ B43_WARN_ON(!mutex_is_locked(&wl->mutex));
1839 + dev = wl->current_dev;
1840 +- if (!dev || b43_status(dev) < B43_STAT_INITIALIZED)
1841 +- goto out_unlock;
1842 ++ B43_WARN_ON(!dev || b43_status(dev) < B43_STAT_INITIALIZED);
1843 +
1844 + keymac_write(dev, index, NULL); /* First zero out mac to avoid race */
1845 +
1846 + rx_tkip_phase1_write(dev, index, iv32, phase1key);
1847 + keymac_write(dev, index, addr);
1848 +-
1849 +-out_unlock:
1850 +- mutex_unlock(&wl->mutex);
1851 + }
1852 +
1853 + static void do_key_write(struct b43_wldev *dev,
1854 +diff --git a/drivers/net/wireless/iwlwifi/iwl-3945.c b/drivers/net/wireless/iwlwifi/iwl-3945.c
1855 +index 9d60f6c..56bfcc3 100644
1856 +--- a/drivers/net/wireless/iwlwifi/iwl-3945.c
1857 ++++ b/drivers/net/wireless/iwlwifi/iwl-3945.c
1858 +@@ -2545,11 +2545,9 @@ int iwl3945_hw_set_hw_params(struct iwl_priv *priv)
1859 + memset((void *)&priv->hw_params, 0,
1860 + sizeof(struct iwl_hw_params));
1861 +
1862 +- priv->shared_virt =
1863 +- pci_alloc_consistent(priv->pci_dev,
1864 +- sizeof(struct iwl3945_shared),
1865 +- &priv->shared_phys);
1866 +-
1867 ++ priv->shared_virt = dma_alloc_coherent(&priv->pci_dev->dev,
1868 ++ sizeof(struct iwl3945_shared),
1869 ++ &priv->shared_phys, GFP_KERNEL);
1870 + if (!priv->shared_virt) {
1871 + IWL_ERR(priv, "failed to allocate pci memory\n");
1872 + mutex_unlock(&priv->mutex);
1873 +diff --git a/drivers/net/wireless/iwlwifi/iwl-core.c b/drivers/net/wireless/iwlwifi/iwl-core.c
1874 +index 6d6235f..4a4f7e4 100644
1875 +--- a/drivers/net/wireless/iwlwifi/iwl-core.c
1876 ++++ b/drivers/net/wireless/iwlwifi/iwl-core.c
1877 +@@ -1598,9 +1598,9 @@ EXPORT_SYMBOL(iwl_uninit_drv);
1878 + void iwl_free_isr_ict(struct iwl_priv *priv)
1879 + {
1880 + if (priv->ict_tbl_vir) {
1881 +- pci_free_consistent(priv->pci_dev, (sizeof(u32) * ICT_COUNT) +
1882 +- PAGE_SIZE, priv->ict_tbl_vir,
1883 +- priv->ict_tbl_dma);
1884 ++ dma_free_coherent(&priv->pci_dev->dev,
1885 ++ (sizeof(u32) * ICT_COUNT) + PAGE_SIZE,
1886 ++ priv->ict_tbl_vir, priv->ict_tbl_dma);
1887 + priv->ict_tbl_vir = NULL;
1888 + }
1889 + }
1890 +@@ -1616,9 +1616,9 @@ int iwl_alloc_isr_ict(struct iwl_priv *priv)
1891 + if (priv->cfg->use_isr_legacy)
1892 + return 0;
1893 + /* allocate shrared data table */
1894 +- priv->ict_tbl_vir = pci_alloc_consistent(priv->pci_dev, (sizeof(u32) *
1895 +- ICT_COUNT) + PAGE_SIZE,
1896 +- &priv->ict_tbl_dma);
1897 ++ priv->ict_tbl_vir = dma_alloc_coherent(&priv->pci_dev->dev,
1898 ++ (sizeof(u32) * ICT_COUNT) + PAGE_SIZE,
1899 ++ &priv->ict_tbl_dma, GFP_KERNEL);
1900 + if (!priv->ict_tbl_vir)
1901 + return -ENOMEM;
1902 +
1903 +diff --git a/drivers/net/wireless/iwlwifi/iwl-helpers.h b/drivers/net/wireless/iwlwifi/iwl-helpers.h
1904 +index bd0b12e..f8481e8 100644
1905 +--- a/drivers/net/wireless/iwlwifi/iwl-helpers.h
1906 ++++ b/drivers/net/wireless/iwlwifi/iwl-helpers.h
1907 +@@ -80,8 +80,8 @@ static inline void iwl_free_fw_desc(struct pci_dev *pci_dev,
1908 + struct fw_desc *desc)
1909 + {
1910 + if (desc->v_addr)
1911 +- pci_free_consistent(pci_dev, desc->len,
1912 +- desc->v_addr, desc->p_addr);
1913 ++ dma_free_coherent(&pci_dev->dev, desc->len,
1914 ++ desc->v_addr, desc->p_addr);
1915 + desc->v_addr = NULL;
1916 + desc->len = 0;
1917 + }
1918 +@@ -89,7 +89,8 @@ static inline void iwl_free_fw_desc(struct pci_dev *pci_dev,
1919 + static inline int iwl_alloc_fw_desc(struct pci_dev *pci_dev,
1920 + struct fw_desc *desc)
1921 + {
1922 +- desc->v_addr = pci_alloc_consistent(pci_dev, desc->len, &desc->p_addr);
1923 ++ desc->v_addr = dma_alloc_coherent(&pci_dev->dev, desc->len,
1924 ++ &desc->p_addr, GFP_KERNEL);
1925 + return (desc->v_addr != NULL) ? 0 : -ENOMEM;
1926 + }
1927 +
1928 +diff --git a/drivers/net/wireless/iwlwifi/iwl-rx.c b/drivers/net/wireless/iwlwifi/iwl-rx.c
1929 +index 493626b..3198a8a 100644
1930 +--- a/drivers/net/wireless/iwlwifi/iwl-rx.c
1931 ++++ b/drivers/net/wireless/iwlwifi/iwl-rx.c
1932 +@@ -345,10 +345,10 @@ void iwl_rx_queue_free(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
1933 + }
1934 + }
1935 +
1936 +- pci_free_consistent(priv->pci_dev, 4 * RX_QUEUE_SIZE, rxq->bd,
1937 +- rxq->dma_addr);
1938 +- pci_free_consistent(priv->pci_dev, sizeof(struct iwl_rb_status),
1939 +- rxq->rb_stts, rxq->rb_stts_dma);
1940 ++ dma_free_coherent(&priv->pci_dev->dev, 4 * RX_QUEUE_SIZE, rxq->bd,
1941 ++ rxq->dma_addr);
1942 ++ dma_free_coherent(&priv->pci_dev->dev, sizeof(struct iwl_rb_status),
1943 ++ rxq->rb_stts, rxq->rb_stts_dma);
1944 + rxq->bd = NULL;
1945 + rxq->rb_stts = NULL;
1946 + }
1947 +@@ -357,7 +357,7 @@ EXPORT_SYMBOL(iwl_rx_queue_free);
1948 + int iwl_rx_queue_alloc(struct iwl_priv *priv)
1949 + {
1950 + struct iwl_rx_queue *rxq = &priv->rxq;
1951 +- struct pci_dev *dev = priv->pci_dev;
1952 ++ struct device *dev = &priv->pci_dev->dev;
1953 + int i;
1954 +
1955 + spin_lock_init(&rxq->lock);
1956 +@@ -365,12 +365,13 @@ int iwl_rx_queue_alloc(struct iwl_priv *priv)
1957 + INIT_LIST_HEAD(&rxq->rx_used);
1958 +
1959 + /* Alloc the circular buffer of Read Buffer Descriptors (RBDs) */
1960 +- rxq->bd = pci_alloc_consistent(dev, 4 * RX_QUEUE_SIZE, &rxq->dma_addr);
1961 ++ rxq->bd = dma_alloc_coherent(dev, 4 * RX_QUEUE_SIZE, &rxq->dma_addr,
1962 ++ GFP_KERNEL);
1963 + if (!rxq->bd)
1964 + goto err_bd;
1965 +
1966 +- rxq->rb_stts = pci_alloc_consistent(dev, sizeof(struct iwl_rb_status),
1967 +- &rxq->rb_stts_dma);
1968 ++ rxq->rb_stts = dma_alloc_coherent(dev, sizeof(struct iwl_rb_status),
1969 ++ &rxq->rb_stts_dma, GFP_KERNEL);
1970 + if (!rxq->rb_stts)
1971 + goto err_rb;
1972 +
1973 +@@ -387,8 +388,8 @@ int iwl_rx_queue_alloc(struct iwl_priv *priv)
1974 + return 0;
1975 +
1976 + err_rb:
1977 +- pci_free_consistent(priv->pci_dev, 4 * RX_QUEUE_SIZE, rxq->bd,
1978 +- rxq->dma_addr);
1979 ++ dma_free_coherent(&priv->pci_dev->dev, 4 * RX_QUEUE_SIZE, rxq->bd,
1980 ++ rxq->dma_addr);
1981 + err_bd:
1982 + return -ENOMEM;
1983 + }
1984 +diff --git a/drivers/net/wireless/iwlwifi/iwl-tx.c b/drivers/net/wireless/iwlwifi/iwl-tx.c
1985 +index f449f06..e143adc 100644
1986 +--- a/drivers/net/wireless/iwlwifi/iwl-tx.c
1987 ++++ b/drivers/net/wireless/iwlwifi/iwl-tx.c
1988 +@@ -60,7 +60,8 @@ static const u16 default_tid_to_tx_fifo[] = {
1989 + static inline int iwl_alloc_dma_ptr(struct iwl_priv *priv,
1990 + struct iwl_dma_ptr *ptr, size_t size)
1991 + {
1992 +- ptr->addr = pci_alloc_consistent(priv->pci_dev, size, &ptr->dma);
1993 ++ ptr->addr = dma_alloc_coherent(&priv->pci_dev->dev, size, &ptr->dma,
1994 ++ GFP_KERNEL);
1995 + if (!ptr->addr)
1996 + return -ENOMEM;
1997 + ptr->size = size;
1998 +@@ -73,7 +74,7 @@ static inline void iwl_free_dma_ptr(struct iwl_priv *priv,
1999 + if (unlikely(!ptr->addr))
2000 + return;
2001 +
2002 +- pci_free_consistent(priv->pci_dev, ptr->size, ptr->addr, ptr->dma);
2003 ++ dma_free_coherent(&priv->pci_dev->dev, ptr->size, ptr->addr, ptr->dma);
2004 + memset(ptr, 0, sizeof(*ptr));
2005 + }
2006 +
2007 +@@ -125,7 +126,7 @@ void iwl_free_tfds_in_queue(struct iwl_priv *priv,
2008 + if (priv->stations[sta_id].tid[tid].tfds_in_queue >= freed)
2009 + priv->stations[sta_id].tid[tid].tfds_in_queue -= freed;
2010 + else {
2011 +- IWL_ERR(priv, "free more than tfds_in_queue (%u:%d)\n",
2012 ++ IWL_DEBUG_TX(priv, "free more than tfds_in_queue (%u:%d)\n",
2013 + priv->stations[sta_id].tid[tid].tfds_in_queue,
2014 + freed);
2015 + priv->stations[sta_id].tid[tid].tfds_in_queue = 0;
2016 +@@ -145,7 +146,7 @@ void iwl_tx_queue_free(struct iwl_priv *priv, int txq_id)
2017 + {
2018 + struct iwl_tx_queue *txq = &priv->txq[txq_id];
2019 + struct iwl_queue *q = &txq->q;
2020 +- struct pci_dev *dev = priv->pci_dev;
2021 ++ struct device *dev = &priv->pci_dev->dev;
2022 + int i, len;
2023 +
2024 + if (q->n_bd == 0)
2025 +@@ -164,8 +165,8 @@ void iwl_tx_queue_free(struct iwl_priv *priv, int txq_id)
2026 +
2027 + /* De-alloc circular buffer of TFDs */
2028 + if (txq->q.n_bd)
2029 +- pci_free_consistent(dev, priv->hw_params.tfd_size *
2030 +- txq->q.n_bd, txq->tfds, txq->q.dma_addr);
2031 ++ dma_free_coherent(dev, priv->hw_params.tfd_size *
2032 ++ txq->q.n_bd, txq->tfds, txq->q.dma_addr);
2033 +
2034 + /* De-alloc array of per-TFD driver data */
2035 + kfree(txq->txb);
2036 +@@ -194,7 +195,7 @@ void iwl_cmd_queue_free(struct iwl_priv *priv)
2037 + {
2038 + struct iwl_tx_queue *txq = &priv->txq[IWL_CMD_QUEUE_NUM];
2039 + struct iwl_queue *q = &txq->q;
2040 +- struct pci_dev *dev = priv->pci_dev;
2041 ++ struct device *dev = &priv->pci_dev->dev;
2042 + int i, len;
2043 +
2044 + if (q->n_bd == 0)
2045 +@@ -209,8 +210,8 @@ void iwl_cmd_queue_free(struct iwl_priv *priv)
2046 +
2047 + /* De-alloc circular buffer of TFDs */
2048 + if (txq->q.n_bd)
2049 +- pci_free_consistent(dev, priv->hw_params.tfd_size *
2050 +- txq->q.n_bd, txq->tfds, txq->q.dma_addr);
2051 ++ dma_free_coherent(dev, priv->hw_params.tfd_size * txq->q.n_bd,
2052 ++ txq->tfds, txq->q.dma_addr);
2053 +
2054 + /* deallocate arrays */
2055 + kfree(txq->cmd);
2056 +@@ -301,7 +302,7 @@ static int iwl_queue_init(struct iwl_priv *priv, struct iwl_queue *q,
2057 + static int iwl_tx_queue_alloc(struct iwl_priv *priv,
2058 + struct iwl_tx_queue *txq, u32 id)
2059 + {
2060 +- struct pci_dev *dev = priv->pci_dev;
2061 ++ struct device *dev = &priv->pci_dev->dev;
2062 + size_t tfd_sz = priv->hw_params.tfd_size * TFD_QUEUE_SIZE_MAX;
2063 +
2064 + /* Driver private data, only for Tx (not command) queues,
2065 +@@ -320,8 +321,8 @@ static int iwl_tx_queue_alloc(struct iwl_priv *priv,
2066 +
2067 + /* Circular buffer of transmit frame descriptors (TFDs),
2068 + * shared with device */
2069 +- txq->tfds = pci_alloc_consistent(dev, tfd_sz, &txq->q.dma_addr);
2070 +-
2071 ++ txq->tfds = dma_alloc_coherent(dev, tfd_sz, &txq->q.dma_addr,
2072 ++ GFP_KERNEL);
2073 + if (!txq->tfds) {
2074 + IWL_ERR(priv, "pci_alloc_consistent(%zd) failed\n", tfd_sz);
2075 + goto error;
2076 +diff --git a/drivers/net/wireless/iwlwifi/iwl3945-base.c b/drivers/net/wireless/iwlwifi/iwl3945-base.c
2077 +index 5f26c93..064d3cd 100644
2078 +--- a/drivers/net/wireless/iwlwifi/iwl3945-base.c
2079 ++++ b/drivers/net/wireless/iwlwifi/iwl3945-base.c
2080 +@@ -356,10 +356,10 @@ static int iwl3945_send_beacon_cmd(struct iwl_priv *priv)
2081 + static void iwl3945_unset_hw_params(struct iwl_priv *priv)
2082 + {
2083 + if (priv->shared_virt)
2084 +- pci_free_consistent(priv->pci_dev,
2085 +- sizeof(struct iwl3945_shared),
2086 +- priv->shared_virt,
2087 +- priv->shared_phys);
2088 ++ dma_free_coherent(&priv->pci_dev->dev,
2089 ++ sizeof(struct iwl3945_shared),
2090 ++ priv->shared_virt,
2091 ++ priv->shared_phys);
2092 + }
2093 +
2094 + static void iwl3945_build_tx_cmd_hwcrypto(struct iwl_priv *priv,
2095 +@@ -1272,10 +1272,10 @@ static void iwl3945_rx_queue_free(struct iwl_priv *priv, struct iwl_rx_queue *rx
2096 + }
2097 + }
2098 +
2099 +- pci_free_consistent(priv->pci_dev, 4 * RX_QUEUE_SIZE, rxq->bd,
2100 +- rxq->dma_addr);
2101 +- pci_free_consistent(priv->pci_dev, sizeof(struct iwl_rb_status),
2102 +- rxq->rb_stts, rxq->rb_stts_dma);
2103 ++ dma_free_coherent(&priv->pci_dev->dev, 4 * RX_QUEUE_SIZE, rxq->bd,
2104 ++ rxq->dma_addr);
2105 ++ dma_free_coherent(&priv->pci_dev->dev, sizeof(struct iwl_rb_status),
2106 ++ rxq->rb_stts, rxq->rb_stts_dma);
2107 + rxq->bd = NULL;
2108 + rxq->rb_stts = NULL;
2109 + }
2110 +diff --git a/drivers/net/wireless/wl12xx/wl1251_debugfs.c b/drivers/net/wireless/wl12xx/wl1251_debugfs.c
2111 +index a007230..1685c09 100644
2112 +--- a/drivers/net/wireless/wl12xx/wl1251_debugfs.c
2113 ++++ b/drivers/net/wireless/wl12xx/wl1251_debugfs.c
2114 +@@ -443,7 +443,8 @@ out:
2115 +
2116 + void wl1251_debugfs_reset(struct wl1251 *wl)
2117 + {
2118 +- memset(wl->stats.fw_stats, 0, sizeof(*wl->stats.fw_stats));
2119 ++ if (wl->stats.fw_stats != NULL)
2120 ++ memset(wl->stats.fw_stats, 0, sizeof(*wl->stats.fw_stats));
2121 + wl->stats.retry_count = 0;
2122 + wl->stats.excessive_retries = 0;
2123 + }
2124 +diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
2125 +index 6477722..4493060 100644
2126 +--- a/drivers/pci/pci.c
2127 ++++ b/drivers/pci/pci.c
2128 +@@ -2350,18 +2350,17 @@ EXPORT_SYMBOL_GPL(pci_reset_function);
2129 + */
2130 + int pcix_get_max_mmrbc(struct pci_dev *dev)
2131 + {
2132 +- int err, cap;
2133 ++ int cap;
2134 + u32 stat;
2135 +
2136 + cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
2137 + if (!cap)
2138 + return -EINVAL;
2139 +
2140 +- err = pci_read_config_dword(dev, cap + PCI_X_STATUS, &stat);
2141 +- if (err)
2142 ++ if (pci_read_config_dword(dev, cap + PCI_X_STATUS, &stat))
2143 + return -EINVAL;
2144 +
2145 +- return (stat & PCI_X_STATUS_MAX_READ) >> 12;
2146 ++ return 512 << ((stat & PCI_X_STATUS_MAX_READ) >> 21);
2147 + }
2148 + EXPORT_SYMBOL(pcix_get_max_mmrbc);
2149 +
2150 +@@ -2374,18 +2373,17 @@ EXPORT_SYMBOL(pcix_get_max_mmrbc);
2151 + */
2152 + int pcix_get_mmrbc(struct pci_dev *dev)
2153 + {
2154 +- int ret, cap;
2155 +- u32 cmd;
2156 ++ int cap;
2157 ++ u16 cmd;
2158 +
2159 + cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
2160 + if (!cap)
2161 + return -EINVAL;
2162 +
2163 +- ret = pci_read_config_dword(dev, cap + PCI_X_CMD, &cmd);
2164 +- if (!ret)
2165 +- ret = 512 << ((cmd & PCI_X_CMD_MAX_READ) >> 2);
2166 ++ if (pci_read_config_word(dev, cap + PCI_X_CMD, &cmd))
2167 ++ return -EINVAL;
2168 +
2169 +- return ret;
2170 ++ return 512 << ((cmd & PCI_X_CMD_MAX_READ) >> 2);
2171 + }
2172 + EXPORT_SYMBOL(pcix_get_mmrbc);
2173 +
2174 +@@ -2400,28 +2398,27 @@ EXPORT_SYMBOL(pcix_get_mmrbc);
2175 + */
2176 + int pcix_set_mmrbc(struct pci_dev *dev, int mmrbc)
2177 + {
2178 +- int cap, err = -EINVAL;
2179 +- u32 stat, cmd, v, o;
2180 ++ int cap;
2181 ++ u32 stat, v, o;
2182 ++ u16 cmd;
2183 +
2184 + if (mmrbc < 512 || mmrbc > 4096 || !is_power_of_2(mmrbc))
2185 +- goto out;
2186 ++ return -EINVAL;
2187 +
2188 + v = ffs(mmrbc) - 10;
2189 +
2190 + cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
2191 + if (!cap)
2192 +- goto out;
2193 ++ return -EINVAL;
2194 +
2195 +- err = pci_read_config_dword(dev, cap + PCI_X_STATUS, &stat);
2196 +- if (err)
2197 +- goto out;
2198 ++ if (pci_read_config_dword(dev, cap + PCI_X_STATUS, &stat))
2199 ++ return -EINVAL;
2200 +
2201 + if (v > (stat & PCI_X_STATUS_MAX_READ) >> 21)
2202 + return -E2BIG;
2203 +
2204 +- err = pci_read_config_dword(dev, cap + PCI_X_CMD, &cmd);
2205 +- if (err)
2206 +- goto out;
2207 ++ if (pci_read_config_word(dev, cap + PCI_X_CMD, &cmd))
2208 ++ return -EINVAL;
2209 +
2210 + o = (cmd & PCI_X_CMD_MAX_READ) >> 2;
2211 + if (o != v) {
2212 +@@ -2431,10 +2428,10 @@ int pcix_set_mmrbc(struct pci_dev *dev, int mmrbc)
2213 +
2214 + cmd &= ~PCI_X_CMD_MAX_READ;
2215 + cmd |= v << 2;
2216 +- err = pci_write_config_dword(dev, cap + PCI_X_CMD, cmd);
2217 ++ if (pci_write_config_word(dev, cap + PCI_X_CMD, cmd))
2218 ++ return -EIO;
2219 + }
2220 +-out:
2221 +- return err;
2222 ++ return 0;
2223 + }
2224 + EXPORT_SYMBOL(pcix_set_mmrbc);
2225 +
2226 +diff --git a/drivers/pci/pcie/aer/aerdrv_core.c b/drivers/pci/pcie/aer/aerdrv_core.c
2227 +index 9f5ccbe..72fa87c 100644
2228 +--- a/drivers/pci/pcie/aer/aerdrv_core.c
2229 ++++ b/drivers/pci/pcie/aer/aerdrv_core.c
2230 +@@ -78,19 +78,15 @@ EXPORT_SYMBOL_GPL(pci_disable_pcie_error_reporting);
2231 + int pci_cleanup_aer_uncorrect_error_status(struct pci_dev *dev)
2232 + {
2233 + int pos;
2234 +- u32 status, mask;
2235 ++ u32 status;
2236 +
2237 + pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
2238 + if (!pos)
2239 + return -EIO;
2240 +
2241 + pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, &status);
2242 +- pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, &mask);
2243 +- if (dev->error_state == pci_channel_io_normal)
2244 +- status &= ~mask; /* Clear corresponding nonfatal bits */
2245 +- else
2246 +- status &= mask; /* Clear corresponding fatal bits */
2247 +- pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, status);
2248 ++ if (status)
2249 ++ pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, status);
2250 +
2251 + return 0;
2252 + }
2253 +diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
2254 +index 245d2cd..e2641cd 100644
2255 +--- a/drivers/pci/quirks.c
2256 ++++ b/drivers/pci/quirks.c
2257 +@@ -2513,6 +2513,7 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x10e7, quirk_i82576_sriov);
2258 + DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x10e8, quirk_i82576_sriov);
2259 + DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x150a, quirk_i82576_sriov);
2260 + DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x150d, quirk_i82576_sriov);
2261 ++DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1518, quirk_i82576_sriov);
2262 +
2263 + #endif /* CONFIG_PCI_IOV */
2264 +
2265 +diff --git a/drivers/scsi/mvsas/mv_init.c b/drivers/scsi/mvsas/mv_init.c
2266 +index c790d45..cae6b2c 100644
2267 +--- a/drivers/scsi/mvsas/mv_init.c
2268 ++++ b/drivers/scsi/mvsas/mv_init.c
2269 +@@ -657,6 +657,7 @@ static struct pci_device_id __devinitdata mvs_pci_table[] = {
2270 + { PCI_VDEVICE(MARVELL, 0x9180), chip_9180 },
2271 + { PCI_VDEVICE(ARECA, PCI_DEVICE_ID_ARECA_1300), chip_1300 },
2272 + { PCI_VDEVICE(ARECA, PCI_DEVICE_ID_ARECA_1320), chip_1320 },
2273 ++ { PCI_VDEVICE(ADAPTEC2, 0x0450), chip_6440 },
2274 +
2275 + { } /* terminate list */
2276 + };
2277 +diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
2278 +index bf52dec..64084aa 100644
2279 +--- a/drivers/scsi/scsi_transport_fc.c
2280 ++++ b/drivers/scsi/scsi_transport_fc.c
2281 +@@ -1215,6 +1215,15 @@ store_fc_vport_delete(struct device *dev, struct device_attribute *attr,
2282 + {
2283 + struct fc_vport *vport = transport_class_to_vport(dev);
2284 + struct Scsi_Host *shost = vport_to_shost(vport);
2285 ++ unsigned long flags;
2286 ++
2287 ++ spin_lock_irqsave(shost->host_lock, flags);
2288 ++ if (vport->flags & (FC_VPORT_DEL | FC_VPORT_CREATING)) {
2289 ++ spin_unlock_irqrestore(shost->host_lock, flags);
2290 ++ return -EBUSY;
2291 ++ }
2292 ++ vport->flags |= FC_VPORT_DELETING;
2293 ++ spin_unlock_irqrestore(shost->host_lock, flags);
2294 +
2295 + fc_queue_work(shost, &vport->vport_delete_work);
2296 + return count;
2297 +@@ -1804,6 +1813,9 @@ store_fc_host_vport_delete(struct device *dev, struct device_attribute *attr,
2298 + list_for_each_entry(vport, &fc_host->vports, peers) {
2299 + if ((vport->channel == 0) &&
2300 + (vport->port_name == wwpn) && (vport->node_name == wwnn)) {
2301 ++ if (vport->flags & (FC_VPORT_DEL | FC_VPORT_CREATING))
2302 ++ break;
2303 ++ vport->flags |= FC_VPORT_DELETING;
2304 + match = 1;
2305 + break;
2306 + }
2307 +@@ -3328,18 +3340,6 @@ fc_vport_terminate(struct fc_vport *vport)
2308 + unsigned long flags;
2309 + int stat;
2310 +
2311 +- spin_lock_irqsave(shost->host_lock, flags);
2312 +- if (vport->flags & FC_VPORT_CREATING) {
2313 +- spin_unlock_irqrestore(shost->host_lock, flags);
2314 +- return -EBUSY;
2315 +- }
2316 +- if (vport->flags & (FC_VPORT_DEL)) {
2317 +- spin_unlock_irqrestore(shost->host_lock, flags);
2318 +- return -EALREADY;
2319 +- }
2320 +- vport->flags |= FC_VPORT_DELETING;
2321 +- spin_unlock_irqrestore(shost->host_lock, flags);
2322 +-
2323 + if (i->f->vport_delete)
2324 + stat = i->f->vport_delete(vport);
2325 + else
2326 +diff --git a/drivers/scsi/ses.c b/drivers/scsi/ses.c
2327 +index 55b034b..3c8a024 100644
2328 +--- a/drivers/scsi/ses.c
2329 ++++ b/drivers/scsi/ses.c
2330 +@@ -591,8 +591,6 @@ static int ses_intf_add(struct device *cdev,
2331 + ses_dev->page10_len = len;
2332 + buf = NULL;
2333 + }
2334 +- kfree(hdr_buf);
2335 +-
2336 + scomp = kzalloc(sizeof(struct ses_component) * components, GFP_KERNEL);
2337 + if (!scomp)
2338 + goto err_free;
2339 +@@ -604,6 +602,8 @@ static int ses_intf_add(struct device *cdev,
2340 + goto err_free;
2341 + }
2342 +
2343 ++ kfree(hdr_buf);
2344 ++
2345 + edev->scratch = ses_dev;
2346 + for (i = 0; i < components; i++)
2347 + edev->component[i].scratch = scomp + i;
2348 +diff --git a/drivers/staging/rt2860/common/2860_rtmp_init.c b/drivers/staging/rt2860/common/2860_rtmp_init.c
2349 +index 0bc0fb9..98b0f8e 100644
2350 +--- a/drivers/staging/rt2860/common/2860_rtmp_init.c
2351 ++++ b/drivers/staging/rt2860/common/2860_rtmp_init.c
2352 +@@ -716,7 +716,7 @@ VOID RTMPFreeTxRxRingMemory(
2353 + {
2354 + if ((pAd->RxRing.Cell[index].DmaBuf.AllocVa) && (pAd->RxRing.Cell[index].pNdisPacket))
2355 + {
2356 +- PCI_UNMAP_SINGLE(pObj->pci_dev, pAd->RxRing.Cell[index].DmaBuf.AllocPa, pAd->RxRing.Cell[index].DmaBuf.AllocSize, PCI_DMA_FROMDEVICE);
2357 ++ PCI_UNMAP_SINGLE(pAd, pAd->RxRing.Cell[index].DmaBuf.AllocPa, pAd->RxRing.Cell[index].DmaBuf.AllocSize, PCI_DMA_FROMDEVICE);
2358 + RELEASE_NDIS_PACKET(pAd, pAd->RxRing.Cell[index].pNdisPacket, NDIS_STATUS_SUCCESS);
2359 + }
2360 + }
2361 +diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
2362 +index 24120db..2f12e2d 100644
2363 +--- a/drivers/usb/core/devio.c
2364 ++++ b/drivers/usb/core/devio.c
2365 +@@ -1176,6 +1176,13 @@ static int proc_do_submiturb(struct dev_state *ps, struct usbdevfs_urb *uurb,
2366 + free_async(as);
2367 + return -ENOMEM;
2368 + }
2369 ++ /* Isochronous input data may end up being discontiguous
2370 ++ * if some of the packets are short. Clear the buffer so
2371 ++ * that the gaps don't leak kernel data to userspace.
2372 ++ */
2373 ++ if (is_in && uurb->type == USBDEVFS_URB_TYPE_ISO)
2374 ++ memset(as->urb->transfer_buffer, 0,
2375 ++ uurb->buffer_length);
2376 + }
2377 + as->urb->dev = ps->dev;
2378 + as->urb->pipe = (uurb->type << 30) |
2379 +@@ -1312,10 +1319,14 @@ static int processcompl(struct async *as, void __user * __user *arg)
2380 + void __user *addr = as->userurb;
2381 + unsigned int i;
2382 +
2383 +- if (as->userbuffer && urb->actual_length)
2384 +- if (copy_to_user(as->userbuffer, urb->transfer_buffer,
2385 +- urb->actual_length))
2386 ++ if (as->userbuffer && urb->actual_length) {
2387 ++ if (urb->number_of_packets > 0) /* Isochronous */
2388 ++ i = urb->transfer_buffer_length;
2389 ++ else /* Non-Isoc */
2390 ++ i = urb->actual_length;
2391 ++ if (copy_to_user(as->userbuffer, urb->transfer_buffer, i))
2392 + goto err_out;
2393 ++ }
2394 + if (put_user(as->status, &userurb->status))
2395 + goto err_out;
2396 + if (put_user(urb->actual_length, &userurb->actual_length))
2397 +diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c
2398 +index e18c677..35bf518 100644
2399 +--- a/drivers/usb/host/ehci-hcd.c
2400 ++++ b/drivers/usb/host/ehci-hcd.c
2401 +@@ -993,7 +993,7 @@ rescan:
2402 + /* endpoints can be iso streams. for now, we don't
2403 + * accelerate iso completions ... so spin a while.
2404 + */
2405 +- if (qh->hw->hw_info1 == 0) {
2406 ++ if (qh->hw == NULL) {
2407 + ehci_vdbg (ehci, "iso delay\n");
2408 + goto idle_timeout;
2409 + }
2410 +diff --git a/drivers/usb/host/ehci-sched.c b/drivers/usb/host/ehci-sched.c
2411 +index a5535b5..5cc3f48 100644
2412 +--- a/drivers/usb/host/ehci-sched.c
2413 ++++ b/drivers/usb/host/ehci-sched.c
2414 +@@ -1121,8 +1121,8 @@ iso_stream_find (struct ehci_hcd *ehci, struct urb *urb)
2415 + urb->interval);
2416 + }
2417 +
2418 +- /* if dev->ep [epnum] is a QH, info1.maxpacket is nonzero */
2419 +- } else if (unlikely (stream->hw_info1 != 0)) {
2420 ++ /* if dev->ep [epnum] is a QH, hw is set */
2421 ++ } else if (unlikely (stream->hw != NULL)) {
2422 + ehci_dbg (ehci, "dev %s ep%d%s, not iso??\n",
2423 + urb->dev->devpath, epnum,
2424 + usb_pipein(urb->pipe) ? "in" : "out");
2425 +@@ -1553,13 +1553,27 @@ itd_patch(
2426 + static inline void
2427 + itd_link (struct ehci_hcd *ehci, unsigned frame, struct ehci_itd *itd)
2428 + {
2429 +- /* always prepend ITD/SITD ... only QH tree is order-sensitive */
2430 +- itd->itd_next = ehci->pshadow [frame];
2431 +- itd->hw_next = ehci->periodic [frame];
2432 +- ehci->pshadow [frame].itd = itd;
2433 ++ union ehci_shadow *prev = &ehci->pshadow[frame];
2434 ++ __hc32 *hw_p = &ehci->periodic[frame];
2435 ++ union ehci_shadow here = *prev;
2436 ++ __hc32 type = 0;
2437 ++
2438 ++ /* skip any iso nodes which might belong to previous microframes */
2439 ++ while (here.ptr) {
2440 ++ type = Q_NEXT_TYPE(ehci, *hw_p);
2441 ++ if (type == cpu_to_hc32(ehci, Q_TYPE_QH))
2442 ++ break;
2443 ++ prev = periodic_next_shadow(ehci, prev, type);
2444 ++ hw_p = shadow_next_periodic(ehci, &here, type);
2445 ++ here = *prev;
2446 ++ }
2447 ++
2448 ++ itd->itd_next = here;
2449 ++ itd->hw_next = *hw_p;
2450 ++ prev->itd = itd;
2451 + itd->frame = frame;
2452 + wmb ();
2453 +- ehci->periodic[frame] = cpu_to_hc32(ehci, itd->itd_dma | Q_TYPE_ITD);
2454 ++ *hw_p = cpu_to_hc32(ehci, itd->itd_dma | Q_TYPE_ITD);
2455 + }
2456 +
2457 + /* fit urb's itds into the selected schedule slot; activate as needed */
2458 +diff --git a/drivers/usb/host/ehci.h b/drivers/usb/host/ehci.h
2459 +index 2d85e21..b1dce96 100644
2460 +--- a/drivers/usb/host/ehci.h
2461 ++++ b/drivers/usb/host/ehci.h
2462 +@@ -394,9 +394,8 @@ struct ehci_iso_sched {
2463 + * acts like a qh would, if EHCI had them for ISO.
2464 + */
2465 + struct ehci_iso_stream {
2466 +- /* first two fields match QH, but info1 == 0 */
2467 +- __hc32 hw_next;
2468 +- __hc32 hw_info1;
2469 ++ /* first field matches ehci_hq, but is NULL */
2470 ++ struct ehci_qh_hw *hw;
2471 +
2472 + u32 refcount;
2473 + u8 bEndpointAddress;
2474 +diff --git a/drivers/usb/host/r8a66597-hcd.c b/drivers/usb/host/r8a66597-hcd.c
2475 +index 9260c74..e3548ee 100644
2476 +--- a/drivers/usb/host/r8a66597-hcd.c
2477 ++++ b/drivers/usb/host/r8a66597-hcd.c
2478 +@@ -418,7 +418,7 @@ static u8 alloc_usb_address(struct r8a66597 *r8a66597, struct urb *urb)
2479 +
2480 + /* this function must be called with interrupt disabled */
2481 + static void free_usb_address(struct r8a66597 *r8a66597,
2482 +- struct r8a66597_device *dev)
2483 ++ struct r8a66597_device *dev, int reset)
2484 + {
2485 + int port;
2486 +
2487 +@@ -430,7 +430,13 @@ static void free_usb_address(struct r8a66597 *r8a66597,
2488 + dev->state = USB_STATE_DEFAULT;
2489 + r8a66597->address_map &= ~(1 << dev->address);
2490 + dev->address = 0;
2491 +- dev_set_drvdata(&dev->udev->dev, NULL);
2492 ++ /*
2493 ++ * Only when resetting USB, it is necessary to erase drvdata. When
2494 ++ * a usb device with usb hub is disconnect, "dev->udev" is already
2495 ++ * freed on usb_desconnect(). So we cannot access the data.
2496 ++ */
2497 ++ if (reset)
2498 ++ dev_set_drvdata(&dev->udev->dev, NULL);
2499 + list_del(&dev->device_list);
2500 + kfree(dev);
2501 +
2502 +@@ -1067,7 +1073,7 @@ static void r8a66597_usb_disconnect(struct r8a66597 *r8a66597, int port)
2503 + struct r8a66597_device *dev = r8a66597->root_hub[port].dev;
2504 +
2505 + disable_r8a66597_pipe_all(r8a66597, dev);
2506 +- free_usb_address(r8a66597, dev);
2507 ++ free_usb_address(r8a66597, dev, 0);
2508 +
2509 + start_root_hub_sampling(r8a66597, port, 0);
2510 + }
2511 +@@ -2085,7 +2091,7 @@ static void update_usb_address_map(struct r8a66597 *r8a66597,
2512 + spin_lock_irqsave(&r8a66597->lock, flags);
2513 + dev = get_r8a66597_device(r8a66597, addr);
2514 + disable_r8a66597_pipe_all(r8a66597, dev);
2515 +- free_usb_address(r8a66597, dev);
2516 ++ free_usb_address(r8a66597, dev, 0);
2517 + put_child_connect_map(r8a66597, addr);
2518 + spin_unlock_irqrestore(&r8a66597->lock, flags);
2519 + }
2520 +@@ -2228,7 +2234,7 @@ static int r8a66597_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
2521 + rh->port |= (1 << USB_PORT_FEAT_RESET);
2522 +
2523 + disable_r8a66597_pipe_all(r8a66597, dev);
2524 +- free_usb_address(r8a66597, dev);
2525 ++ free_usb_address(r8a66597, dev, 1);
2526 +
2527 + r8a66597_mdfy(r8a66597, USBRST, USBRST | UACT,
2528 + get_dvstctr_reg(port));
2529 +diff --git a/drivers/usb/host/xhci-hcd.c b/drivers/usb/host/xhci-hcd.c
2530 +index 932f999..9974f32 100644
2531 +--- a/drivers/usb/host/xhci-hcd.c
2532 ++++ b/drivers/usb/host/xhci-hcd.c
2533 +@@ -1157,6 +1157,7 @@ static int xhci_configure_endpoint(struct xhci_hcd *xhci,
2534 + cmd_completion = &virt_dev->cmd_completion;
2535 + cmd_status = &virt_dev->cmd_status;
2536 + }
2537 ++ init_completion(cmd_completion);
2538 +
2539 + if (!ctx_change)
2540 + ret = xhci_queue_configure_endpoint(xhci, in_ctx->dma,
2541 +diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
2542 +index ce937e7..9cf4652 100644
2543 +--- a/drivers/usb/serial/ftdi_sio.c
2544 ++++ b/drivers/usb/serial/ftdi_sio.c
2545 +@@ -658,6 +658,7 @@ static struct usb_device_id id_table_combined [] = {
2546 + { USB_DEVICE(EVOLUTION_VID, EVOLUTION_ER1_PID) },
2547 + { USB_DEVICE(EVOLUTION_VID, EVO_HYBRID_PID) },
2548 + { USB_DEVICE(EVOLUTION_VID, EVO_RCM4_PID) },
2549 ++ { USB_DEVICE(CONTEC_VID, CONTEC_COM1USBH_PID) },
2550 + { USB_DEVICE(FTDI_VID, FTDI_ARTEMIS_PID) },
2551 + { USB_DEVICE(FTDI_VID, FTDI_ATIK_ATK16_PID) },
2552 + { USB_DEVICE(FTDI_VID, FTDI_ATIK_ATK16C_PID) },
2553 +diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
2554 +index d10b5a8..8f9e805 100644
2555 +--- a/drivers/usb/serial/ftdi_sio_ids.h
2556 ++++ b/drivers/usb/serial/ftdi_sio_ids.h
2557 +@@ -501,6 +501,13 @@
2558 + #define CONTEC_COM1USBH_PID 0x8311 /* COM-1(USB)H */
2559 +
2560 + /*
2561 ++ * Contec products (http://www.contec.com)
2562 ++ * Submitted by Daniel Sangorrin
2563 ++ */
2564 ++#define CONTEC_VID 0x06CE /* Vendor ID */
2565 ++#define CONTEC_COM1USBH_PID 0x8311 /* COM-1(USB)H */
2566 ++
2567 ++/*
2568 + * Definitions for B&B Electronics products.
2569 + */
2570 + #define BANDB_VID 0x0856 /* B&B Electronics Vendor ID */
2571 +diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
2572 +index be3dff1..fcf56f9 100644
2573 +--- a/drivers/usb/serial/option.c
2574 ++++ b/drivers/usb/serial/option.c
2575 +@@ -288,7 +288,9 @@ static int option_resume(struct usb_serial *serial);
2576 +
2577 + #define QUALCOMM_VENDOR_ID 0x05C6
2578 +
2579 +-#define MAXON_VENDOR_ID 0x16d8
2580 ++#define CMOTECH_VENDOR_ID 0x16d8
2581 ++#define CMOTECH_PRODUCT_6008 0x6008
2582 ++#define CMOTECH_PRODUCT_6280 0x6280
2583 +
2584 + #define TELIT_VENDOR_ID 0x1bc7
2585 + #define TELIT_PRODUCT_UC864E 0x1003
2586 +@@ -520,7 +522,8 @@ static struct usb_device_id option_ids[] = {
2587 + { USB_DEVICE(KYOCERA_VENDOR_ID, KYOCERA_PRODUCT_KPC680) },
2588 + { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6000)}, /* ZTE AC8700 */
2589 + { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6613)}, /* Onda H600/ZTE MF330 */
2590 +- { USB_DEVICE(MAXON_VENDOR_ID, 0x6280) }, /* BP3-USB & BP3-EXT HSDPA */
2591 ++ { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6280) }, /* BP3-USB & BP3-EXT HSDPA */
2592 ++ { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6008) },
2593 + { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_UC864E) },
2594 + { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_UC864G) },
2595 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF622, 0xff, 0xff, 0xff) }, /* ZTE WCDMA products */
2596 +diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c
2597 +index 7528b8d..8ab4ab2 100644
2598 +--- a/drivers/usb/serial/qcserial.c
2599 ++++ b/drivers/usb/serial/qcserial.c
2600 +@@ -47,6 +47,35 @@ static struct usb_device_id id_table[] = {
2601 + {USB_DEVICE(0x05c6, 0x9221)}, /* Generic Gobi QDL device */
2602 + {USB_DEVICE(0x05c6, 0x9231)}, /* Generic Gobi QDL device */
2603 + {USB_DEVICE(0x1f45, 0x0001)}, /* Unknown Gobi QDL device */
2604 ++ {USB_DEVICE(0x413c, 0x8185)}, /* Dell Gobi 2000 QDL device (N0218, VU936) */
2605 ++ {USB_DEVICE(0x413c, 0x8186)}, /* Dell Gobi 2000 Modem device (N0218, VU936) */
2606 ++ {USB_DEVICE(0x05c6, 0x9224)}, /* Sony Gobi 2000 QDL device (N0279, VU730) */
2607 ++ {USB_DEVICE(0x05c6, 0x9225)}, /* Sony Gobi 2000 Modem device (N0279, VU730) */
2608 ++ {USB_DEVICE(0x05c6, 0x9244)}, /* Samsung Gobi 2000 QDL device (VL176) */
2609 ++ {USB_DEVICE(0x05c6, 0x9245)}, /* Samsung Gobi 2000 Modem device (VL176) */
2610 ++ {USB_DEVICE(0x03f0, 0x241d)}, /* HP Gobi 2000 QDL device (VP412) */
2611 ++ {USB_DEVICE(0x03f0, 0x251d)}, /* HP Gobi 2000 Modem device (VP412) */
2612 ++ {USB_DEVICE(0x05c6, 0x9214)}, /* Acer Gobi 2000 QDL device (VP413) */
2613 ++ {USB_DEVICE(0x05c6, 0x9215)}, /* Acer Gobi 2000 Modem device (VP413) */
2614 ++ {USB_DEVICE(0x05c6, 0x9264)}, /* Asus Gobi 2000 QDL device (VR305) */
2615 ++ {USB_DEVICE(0x05c6, 0x9265)}, /* Asus Gobi 2000 Modem device (VR305) */
2616 ++ {USB_DEVICE(0x05c6, 0x9234)}, /* Top Global Gobi 2000 QDL device (VR306) */
2617 ++ {USB_DEVICE(0x05c6, 0x9235)}, /* Top Global Gobi 2000 Modem device (VR306) */
2618 ++ {USB_DEVICE(0x05c6, 0x9274)}, /* iRex Technologies Gobi 2000 QDL device (VR307) */
2619 ++ {USB_DEVICE(0x05c6, 0x9275)}, /* iRex Technologies Gobi 2000 Modem device (VR307) */
2620 ++ {USB_DEVICE(0x1199, 0x9000)}, /* Sierra Wireless Gobi 2000 QDL device (VT773) */
2621 ++ {USB_DEVICE(0x1199, 0x9001)}, /* Sierra Wireless Gobi 2000 Modem device (VT773) */
2622 ++ {USB_DEVICE(0x1199, 0x9002)}, /* Sierra Wireless Gobi 2000 Modem device (VT773) */
2623 ++ {USB_DEVICE(0x1199, 0x9003)}, /* Sierra Wireless Gobi 2000 Modem device (VT773) */
2624 ++ {USB_DEVICE(0x1199, 0x9004)}, /* Sierra Wireless Gobi 2000 Modem device (VT773) */
2625 ++ {USB_DEVICE(0x1199, 0x9005)}, /* Sierra Wireless Gobi 2000 Modem device (VT773) */
2626 ++ {USB_DEVICE(0x1199, 0x9006)}, /* Sierra Wireless Gobi 2000 Modem device (VT773) */
2627 ++ {USB_DEVICE(0x1199, 0x9007)}, /* Sierra Wireless Gobi 2000 Modem device (VT773) */
2628 ++ {USB_DEVICE(0x1199, 0x9008)}, /* Sierra Wireless Gobi 2000 Modem device (VT773) */
2629 ++ {USB_DEVICE(0x1199, 0x9009)}, /* Sierra Wireless Gobi 2000 Modem device (VT773) */
2630 ++ {USB_DEVICE(0x1199, 0x900a)}, /* Sierra Wireless Gobi 2000 Modem device (VT773) */
2631 ++ {USB_DEVICE(0x16d8, 0x8001)}, /* CMDTech Gobi 2000 QDL device (VU922) */
2632 ++ {USB_DEVICE(0x16d8, 0x8002)}, /* CMDTech Gobi 2000 Modem device (VU922) */
2633 + { } /* Terminating entry */
2634 + };
2635 + MODULE_DEVICE_TABLE(usb, id_table);
2636 +diff --git a/drivers/video/efifb.c b/drivers/video/efifb.c
2637 +index eb12182..d25df51 100644
2638 +--- a/drivers/video/efifb.c
2639 ++++ b/drivers/video/efifb.c
2640 +@@ -161,8 +161,17 @@ static int efifb_setcolreg(unsigned regno, unsigned red, unsigned green,
2641 + return 0;
2642 + }
2643 +
2644 ++static void efifb_destroy(struct fb_info *info)
2645 ++{
2646 ++ if (info->screen_base)
2647 ++ iounmap(info->screen_base);
2648 ++ release_mem_region(info->aperture_base, info->aperture_size);
2649 ++ framebuffer_release(info);
2650 ++}
2651 ++
2652 + static struct fb_ops efifb_ops = {
2653 + .owner = THIS_MODULE,
2654 ++ .fb_destroy = efifb_destroy,
2655 + .fb_setcolreg = efifb_setcolreg,
2656 + .fb_fillrect = cfb_fillrect,
2657 + .fb_copyarea = cfb_copyarea,
2658 +@@ -281,7 +290,7 @@ static int __init efifb_probe(struct platform_device *dev)
2659 + info->par = NULL;
2660 +
2661 + info->aperture_base = efifb_fix.smem_start;
2662 +- info->aperture_size = size_total;
2663 ++ info->aperture_size = size_remap;
2664 +
2665 + info->screen_base = ioremap(efifb_fix.smem_start, efifb_fix.smem_len);
2666 + if (!info->screen_base) {
2667 +diff --git a/drivers/virtio/virtio_pci.c b/drivers/virtio/virtio_pci.c
2668 +index 28d9cf7..7127bfe 100644
2669 +--- a/drivers/virtio/virtio_pci.c
2670 ++++ b/drivers/virtio/virtio_pci.c
2671 +@@ -473,7 +473,8 @@ static void vp_del_vqs(struct virtio_device *vdev)
2672 +
2673 + list_for_each_entry_safe(vq, n, &vdev->vqs, list) {
2674 + info = vq->priv;
2675 +- if (vp_dev->per_vq_vectors)
2676 ++ if (vp_dev->per_vq_vectors &&
2677 ++ info->msix_vector != VIRTIO_MSI_NO_VECTOR)
2678 + free_irq(vp_dev->msix_entries[info->msix_vector].vector,
2679 + vq);
2680 + vp_del_vq(vq);
2681 +diff --git a/fs/exec.c b/fs/exec.c
2682 +index 9b88366..a2a3944 100644
2683 +--- a/fs/exec.c
2684 ++++ b/fs/exec.c
2685 +@@ -1913,8 +1913,9 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
2686 + /*
2687 + * Dont allow local users get cute and trick others to coredump
2688 + * into their pre-created files:
2689 ++ * Note, this is not relevant for pipes
2690 + */
2691 +- if (inode->i_uid != current_fsuid())
2692 ++ if (!ispipe && (inode->i_uid != current_fsuid()))
2693 + goto close_fail;
2694 + if (!file->f_op)
2695 + goto close_fail;
2696 +diff --git a/fs/gfs2/file.c b/fs/gfs2/file.c
2697 +index 4eb308a..a32bcd7 100644
2698 +--- a/fs/gfs2/file.c
2699 ++++ b/fs/gfs2/file.c
2700 +@@ -606,7 +606,7 @@ static int gfs2_lock(struct file *file, int cmd, struct file_lock *fl)
2701 +
2702 + if (!(fl->fl_flags & FL_POSIX))
2703 + return -ENOLCK;
2704 +- if (__mandatory_lock(&ip->i_inode))
2705 ++ if (__mandatory_lock(&ip->i_inode) && fl->fl_type != F_UNLCK)
2706 + return -ENOLCK;
2707 +
2708 + if (cmd == F_CANCELLK) {
2709 +diff --git a/fs/nfs/delegation.h b/fs/nfs/delegation.h
2710 +index 09f3837..7f237d2 100644
2711 +--- a/fs/nfs/delegation.h
2712 ++++ b/fs/nfs/delegation.h
2713 +@@ -68,4 +68,10 @@ static inline int nfs_inode_return_delegation(struct inode *inode)
2714 + }
2715 + #endif
2716 +
2717 ++static inline int nfs_have_delegated_attributes(struct inode *inode)
2718 ++{
2719 ++ return nfs_have_delegation(inode, FMODE_READ) &&
2720 ++ !(NFS_I(inode)->cache_validity & NFS_INO_REVAL_FORCED);
2721 ++}
2722 ++
2723 + #endif
2724 +diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
2725 +index 7cb2985..f360e9c 100644
2726 +--- a/fs/nfs/dir.c
2727 ++++ b/fs/nfs/dir.c
2728 +@@ -1797,7 +1797,7 @@ static int nfs_access_get_cached(struct inode *inode, struct rpc_cred *cred, str
2729 + cache = nfs_access_search_rbtree(inode, cred);
2730 + if (cache == NULL)
2731 + goto out;
2732 +- if (!nfs_have_delegation(inode, FMODE_READ) &&
2733 ++ if (!nfs_have_delegated_attributes(inode) &&
2734 + !time_in_range_open(jiffies, cache->jiffies, cache->jiffies + nfsi->attrtimeo))
2735 + goto out_stale;
2736 + res->jiffies = cache->jiffies;
2737 +diff --git a/fs/nfs/file.c b/fs/nfs/file.c
2738 +index 393d40f..61b3bf5 100644
2739 +--- a/fs/nfs/file.c
2740 ++++ b/fs/nfs/file.c
2741 +@@ -486,7 +486,8 @@ static int nfs_release_page(struct page *page, gfp_t gfp)
2742 + {
2743 + dfprintk(PAGECACHE, "NFS: release_page(%p)\n", page);
2744 +
2745 +- if (gfp & __GFP_WAIT)
2746 ++ /* Only do I/O if gfp is a superset of GFP_KERNEL */
2747 ++ if ((gfp & GFP_KERNEL) == GFP_KERNEL)
2748 + nfs_wb_page(page->mapping->host, page);
2749 + /* If PagePrivate() is set, then the page is not freeable */
2750 + if (PagePrivate(page))
2751 +diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
2752 +index faa0918..3c80474 100644
2753 +--- a/fs/nfs/inode.c
2754 ++++ b/fs/nfs/inode.c
2755 +@@ -759,7 +759,7 @@ int nfs_attribute_timeout(struct inode *inode)
2756 + {
2757 + struct nfs_inode *nfsi = NFS_I(inode);
2758 +
2759 +- if (nfs_have_delegation(inode, FMODE_READ))
2760 ++ if (nfs_have_delegated_attributes(inode))
2761 + return 0;
2762 + return !time_in_range_open(jiffies, nfsi->read_cache_jiffies, nfsi->read_cache_jiffies + nfsi->attrtimeo);
2763 + }
2764 +diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c
2765 +index a12c45b..29d9d36 100644
2766 +--- a/fs/nfs/pagelist.c
2767 ++++ b/fs/nfs/pagelist.c
2768 +@@ -112,12 +112,10 @@ void nfs_unlock_request(struct nfs_page *req)
2769 + */
2770 + int nfs_set_page_tag_locked(struct nfs_page *req)
2771 + {
2772 +- struct nfs_inode *nfsi = NFS_I(req->wb_context->path.dentry->d_inode);
2773 +-
2774 + if (!nfs_lock_request_dontget(req))
2775 + return 0;
2776 + if (req->wb_page != NULL)
2777 +- radix_tree_tag_set(&nfsi->nfs_page_tree, req->wb_index, NFS_PAGE_TAG_LOCKED);
2778 ++ radix_tree_tag_set(&NFS_I(req->wb_context->path.dentry->d_inode)->nfs_page_tree, req->wb_index, NFS_PAGE_TAG_LOCKED);
2779 + return 1;
2780 + }
2781 +
2782 +@@ -126,10 +124,10 @@ int nfs_set_page_tag_locked(struct nfs_page *req)
2783 + */
2784 + void nfs_clear_page_tag_locked(struct nfs_page *req)
2785 + {
2786 +- struct inode *inode = req->wb_context->path.dentry->d_inode;
2787 +- struct nfs_inode *nfsi = NFS_I(inode);
2788 +-
2789 + if (req->wb_page != NULL) {
2790 ++ struct inode *inode = req->wb_context->path.dentry->d_inode;
2791 ++ struct nfs_inode *nfsi = NFS_I(inode);
2792 ++
2793 + spin_lock(&inode->i_lock);
2794 + radix_tree_tag_clear(&nfsi->nfs_page_tree, req->wb_index, NFS_PAGE_TAG_LOCKED);
2795 + nfs_unlock_request(req);
2796 +@@ -142,16 +140,22 @@ void nfs_clear_page_tag_locked(struct nfs_page *req)
2797 + * nfs_clear_request - Free up all resources allocated to the request
2798 + * @req:
2799 + *
2800 +- * Release page resources associated with a write request after it
2801 +- * has completed.
2802 ++ * Release page and open context resources associated with a read/write
2803 ++ * request after it has completed.
2804 + */
2805 + void nfs_clear_request(struct nfs_page *req)
2806 + {
2807 + struct page *page = req->wb_page;
2808 ++ struct nfs_open_context *ctx = req->wb_context;
2809 ++
2810 + if (page != NULL) {
2811 + page_cache_release(page);
2812 + req->wb_page = NULL;
2813 + }
2814 ++ if (ctx != NULL) {
2815 ++ put_nfs_open_context(ctx);
2816 ++ req->wb_context = NULL;
2817 ++ }
2818 + }
2819 +
2820 +
2821 +@@ -165,9 +169,8 @@ static void nfs_free_request(struct kref *kref)
2822 + {
2823 + struct nfs_page *req = container_of(kref, struct nfs_page, wb_kref);
2824 +
2825 +- /* Release struct file or cached credential */
2826 ++ /* Release struct file and open context */
2827 + nfs_clear_request(req);
2828 +- put_nfs_open_context(req->wb_context);
2829 + nfs_page_free(req);
2830 + }
2831 +
2832 +diff --git a/fs/partitions/msdos.c b/fs/partitions/msdos.c
2833 +index 0028d2e..90be97f 100644
2834 +--- a/fs/partitions/msdos.c
2835 ++++ b/fs/partitions/msdos.c
2836 +@@ -31,14 +31,17 @@
2837 + */
2838 + #include <asm/unaligned.h>
2839 +
2840 +-#define SYS_IND(p) (get_unaligned(&p->sys_ind))
2841 +-#define NR_SECTS(p) ({ __le32 __a = get_unaligned(&p->nr_sects); \
2842 +- le32_to_cpu(__a); \
2843 +- })
2844 ++#define SYS_IND(p) get_unaligned(&p->sys_ind)
2845 +
2846 +-#define START_SECT(p) ({ __le32 __a = get_unaligned(&p->start_sect); \
2847 +- le32_to_cpu(__a); \
2848 +- })
2849 ++static inline sector_t nr_sects(struct partition *p)
2850 ++{
2851 ++ return (sector_t)get_unaligned_le32(&p->nr_sects);
2852 ++}
2853 ++
2854 ++static inline sector_t start_sect(struct partition *p)
2855 ++{
2856 ++ return (sector_t)get_unaligned_le32(&p->start_sect);
2857 ++}
2858 +
2859 + static inline int is_extended_partition(struct partition *p)
2860 + {
2861 +@@ -104,13 +107,13 @@ static int aix_magic_present(unsigned char *p, struct block_device *bdev)
2862 +
2863 + static void
2864 + parse_extended(struct parsed_partitions *state, struct block_device *bdev,
2865 +- u32 first_sector, u32 first_size)
2866 ++ sector_t first_sector, sector_t first_size)
2867 + {
2868 + struct partition *p;
2869 + Sector sect;
2870 + unsigned char *data;
2871 +- u32 this_sector, this_size;
2872 +- int sector_size = bdev_logical_block_size(bdev) / 512;
2873 ++ sector_t this_sector, this_size;
2874 ++ sector_t sector_size = bdev_logical_block_size(bdev) / 512;
2875 + int loopct = 0; /* number of links followed
2876 + without finding a data partition */
2877 + int i;
2878 +@@ -145,14 +148,14 @@ parse_extended(struct parsed_partitions *state, struct block_device *bdev,
2879 + * First process the data partition(s)
2880 + */
2881 + for (i=0; i<4; i++, p++) {
2882 +- u32 offs, size, next;
2883 +- if (!NR_SECTS(p) || is_extended_partition(p))
2884 ++ sector_t offs, size, next;
2885 ++ if (!nr_sects(p) || is_extended_partition(p))
2886 + continue;
2887 +
2888 + /* Check the 3rd and 4th entries -
2889 + these sometimes contain random garbage */
2890 +- offs = START_SECT(p)*sector_size;
2891 +- size = NR_SECTS(p)*sector_size;
2892 ++ offs = start_sect(p)*sector_size;
2893 ++ size = nr_sects(p)*sector_size;
2894 + next = this_sector + offs;
2895 + if (i >= 2) {
2896 + if (offs + size > this_size)
2897 +@@ -179,13 +182,13 @@ parse_extended(struct parsed_partitions *state, struct block_device *bdev,
2898 + */
2899 + p -= 4;
2900 + for (i=0; i<4; i++, p++)
2901 +- if (NR_SECTS(p) && is_extended_partition(p))
2902 ++ if (nr_sects(p) && is_extended_partition(p))
2903 + break;
2904 + if (i == 4)
2905 + goto done; /* nothing left to do */
2906 +
2907 +- this_sector = first_sector + START_SECT(p) * sector_size;
2908 +- this_size = NR_SECTS(p) * sector_size;
2909 ++ this_sector = first_sector + start_sect(p) * sector_size;
2910 ++ this_size = nr_sects(p) * sector_size;
2911 + put_dev_sector(sect);
2912 + }
2913 + done:
2914 +@@ -197,7 +200,7 @@ done:
2915 +
2916 + static void
2917 + parse_solaris_x86(struct parsed_partitions *state, struct block_device *bdev,
2918 +- u32 offset, u32 size, int origin)
2919 ++ sector_t offset, sector_t size, int origin)
2920 + {
2921 + #ifdef CONFIG_SOLARIS_X86_PARTITION
2922 + Sector sect;
2923 +@@ -244,7 +247,7 @@ parse_solaris_x86(struct parsed_partitions *state, struct block_device *bdev,
2924 + */
2925 + static void
2926 + parse_bsd(struct parsed_partitions *state, struct block_device *bdev,
2927 +- u32 offset, u32 size, int origin, char *flavour,
2928 ++ sector_t offset, sector_t size, int origin, char *flavour,
2929 + int max_partitions)
2930 + {
2931 + Sector sect;
2932 +@@ -263,7 +266,7 @@ parse_bsd(struct parsed_partitions *state, struct block_device *bdev,
2933 + if (le16_to_cpu(l->d_npartitions) < max_partitions)
2934 + max_partitions = le16_to_cpu(l->d_npartitions);
2935 + for (p = l->d_partitions; p - l->d_partitions < max_partitions; p++) {
2936 +- u32 bsd_start, bsd_size;
2937 ++ sector_t bsd_start, bsd_size;
2938 +
2939 + if (state->next == state->limit)
2940 + break;
2941 +@@ -290,7 +293,7 @@ parse_bsd(struct parsed_partitions *state, struct block_device *bdev,
2942 +
2943 + static void
2944 + parse_freebsd(struct parsed_partitions *state, struct block_device *bdev,
2945 +- u32 offset, u32 size, int origin)
2946 ++ sector_t offset, sector_t size, int origin)
2947 + {
2948 + #ifdef CONFIG_BSD_DISKLABEL
2949 + parse_bsd(state, bdev, offset, size, origin,
2950 +@@ -300,7 +303,7 @@ parse_freebsd(struct parsed_partitions *state, struct block_device *bdev,
2951 +
2952 + static void
2953 + parse_netbsd(struct parsed_partitions *state, struct block_device *bdev,
2954 +- u32 offset, u32 size, int origin)
2955 ++ sector_t offset, sector_t size, int origin)
2956 + {
2957 + #ifdef CONFIG_BSD_DISKLABEL
2958 + parse_bsd(state, bdev, offset, size, origin,
2959 +@@ -310,7 +313,7 @@ parse_netbsd(struct parsed_partitions *state, struct block_device *bdev,
2960 +
2961 + static void
2962 + parse_openbsd(struct parsed_partitions *state, struct block_device *bdev,
2963 +- u32 offset, u32 size, int origin)
2964 ++ sector_t offset, sector_t size, int origin)
2965 + {
2966 + #ifdef CONFIG_BSD_DISKLABEL
2967 + parse_bsd(state, bdev, offset, size, origin,
2968 +@@ -324,7 +327,7 @@ parse_openbsd(struct parsed_partitions *state, struct block_device *bdev,
2969 + */
2970 + static void
2971 + parse_unixware(struct parsed_partitions *state, struct block_device *bdev,
2972 +- u32 offset, u32 size, int origin)
2973 ++ sector_t offset, sector_t size, int origin)
2974 + {
2975 + #ifdef CONFIG_UNIXWARE_DISKLABEL
2976 + Sector sect;
2977 +@@ -348,7 +351,8 @@ parse_unixware(struct parsed_partitions *state, struct block_device *bdev,
2978 +
2979 + if (p->s_label != UNIXWARE_FS_UNUSED)
2980 + put_partition(state, state->next++,
2981 +- START_SECT(p), NR_SECTS(p));
2982 ++ le32_to_cpu(p->start_sect),
2983 ++ le32_to_cpu(p->nr_sects));
2984 + p++;
2985 + }
2986 + put_dev_sector(sect);
2987 +@@ -363,7 +367,7 @@ parse_unixware(struct parsed_partitions *state, struct block_device *bdev,
2988 + */
2989 + static void
2990 + parse_minix(struct parsed_partitions *state, struct block_device *bdev,
2991 +- u32 offset, u32 size, int origin)
2992 ++ sector_t offset, sector_t size, int origin)
2993 + {
2994 + #ifdef CONFIG_MINIX_SUBPARTITION
2995 + Sector sect;
2996 +@@ -390,7 +394,7 @@ parse_minix(struct parsed_partitions *state, struct block_device *bdev,
2997 + /* add each partition in use */
2998 + if (SYS_IND(p) == MINIX_PARTITION)
2999 + put_partition(state, state->next++,
3000 +- START_SECT(p), NR_SECTS(p));
3001 ++ start_sect(p), nr_sects(p));
3002 + }
3003 + printk(" >\n");
3004 + }
3005 +@@ -401,7 +405,7 @@ parse_minix(struct parsed_partitions *state, struct block_device *bdev,
3006 + static struct {
3007 + unsigned char id;
3008 + void (*parse)(struct parsed_partitions *, struct block_device *,
3009 +- u32, u32, int);
3010 ++ sector_t, sector_t, int);
3011 + } subtypes[] = {
3012 + {FREEBSD_PARTITION, parse_freebsd},
3013 + {NETBSD_PARTITION, parse_netbsd},
3014 +@@ -415,7 +419,7 @@ static struct {
3015 +
3016 + int msdos_partition(struct parsed_partitions *state, struct block_device *bdev)
3017 + {
3018 +- int sector_size = bdev_logical_block_size(bdev) / 512;
3019 ++ sector_t sector_size = bdev_logical_block_size(bdev) / 512;
3020 + Sector sect;
3021 + unsigned char *data;
3022 + struct partition *p;
3023 +@@ -483,14 +487,21 @@ int msdos_partition(struct parsed_partitions *state, struct block_device *bdev)
3024 +
3025 + state->next = 5;
3026 + for (slot = 1 ; slot <= 4 ; slot++, p++) {
3027 +- u32 start = START_SECT(p)*sector_size;
3028 +- u32 size = NR_SECTS(p)*sector_size;
3029 ++ sector_t start = start_sect(p)*sector_size;
3030 ++ sector_t size = nr_sects(p)*sector_size;
3031 + if (!size)
3032 + continue;
3033 + if (is_extended_partition(p)) {
3034 +- /* prevent someone doing mkfs or mkswap on an
3035 +- extended partition, but leave room for LILO */
3036 +- put_partition(state, slot, start, size == 1 ? 1 : 2);
3037 ++ /*
3038 ++ * prevent someone doing mkfs or mkswap on an
3039 ++ * extended partition, but leave room for LILO
3040 ++ * FIXME: this uses one logical sector for > 512b
3041 ++ * sector, although it may not be enough/proper.
3042 ++ */
3043 ++ sector_t n = 2;
3044 ++ n = min(size, max(sector_size, n));
3045 ++ put_partition(state, slot, start, n);
3046 ++
3047 + printk(" <");
3048 + parse_extended(state, bdev, start, size);
3049 + printk(" >");
3050 +@@ -513,7 +524,7 @@ int msdos_partition(struct parsed_partitions *state, struct block_device *bdev)
3051 + unsigned char id = SYS_IND(p);
3052 + int n;
3053 +
3054 +- if (!NR_SECTS(p))
3055 ++ if (!nr_sects(p))
3056 + continue;
3057 +
3058 + for (n = 0; subtypes[n].parse && id != subtypes[n].id; n++)
3059 +@@ -521,8 +532,8 @@ int msdos_partition(struct parsed_partitions *state, struct block_device *bdev)
3060 +
3061 + if (!subtypes[n].parse)
3062 + continue;
3063 +- subtypes[n].parse(state, bdev, START_SECT(p)*sector_size,
3064 +- NR_SECTS(p)*sector_size, slot);
3065 ++ subtypes[n].parse(state, bdev, start_sect(p)*sector_size,
3066 ++ nr_sects(p)*sector_size, slot);
3067 + }
3068 + put_dev_sector(sect);
3069 + return 1;
3070 +diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c
3071 +index 2534987..2ed79a9 100644
3072 +--- a/fs/quota/dquot.c
3073 ++++ b/fs/quota/dquot.c
3074 +@@ -229,6 +229,8 @@ static struct hlist_head *dquot_hash;
3075 + struct dqstats dqstats;
3076 + EXPORT_SYMBOL(dqstats);
3077 +
3078 ++static qsize_t inode_get_rsv_space(struct inode *inode);
3079 ++
3080 + static inline unsigned int
3081 + hashfn(const struct super_block *sb, unsigned int id, int type)
3082 + {
3083 +@@ -820,11 +822,14 @@ static int dqinit_needed(struct inode *inode, int type)
3084 + static void add_dquot_ref(struct super_block *sb, int type)
3085 + {
3086 + struct inode *inode, *old_inode = NULL;
3087 ++ int reserved = 0;
3088 +
3089 + spin_lock(&inode_lock);
3090 + list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
3091 + if (inode->i_state & (I_FREEING|I_CLEAR|I_WILL_FREE|I_NEW))
3092 + continue;
3093 ++ if (unlikely(inode_get_rsv_space(inode) > 0))
3094 ++ reserved = 1;
3095 + if (!atomic_read(&inode->i_writecount))
3096 + continue;
3097 + if (!dqinit_needed(inode, type))
3098 +@@ -845,6 +850,12 @@ static void add_dquot_ref(struct super_block *sb, int type)
3099 + }
3100 + spin_unlock(&inode_lock);
3101 + iput(old_inode);
3102 ++
3103 ++ if (reserved) {
3104 ++ printk(KERN_WARNING "VFS (%s): Writes happened before quota"
3105 ++ " was turned on thus quota information is probably "
3106 ++ "inconsistent. Please run quotacheck(8).\n", sb->s_id);
3107 ++ }
3108 + }
3109 +
3110 + /*
3111 +@@ -958,10 +969,12 @@ static inline void dquot_resv_space(struct dquot *dquot, qsize_t number)
3112 + /*
3113 + * Claim reserved quota space
3114 + */
3115 +-static void dquot_claim_reserved_space(struct dquot *dquot,
3116 +- qsize_t number)
3117 ++static void dquot_claim_reserved_space(struct dquot *dquot, qsize_t number)
3118 + {
3119 +- WARN_ON(dquot->dq_dqb.dqb_rsvspace < number);
3120 ++ if (dquot->dq_dqb.dqb_rsvspace < number) {
3121 ++ WARN_ON_ONCE(1);
3122 ++ number = dquot->dq_dqb.dqb_rsvspace;
3123 ++ }
3124 + dquot->dq_dqb.dqb_curspace += number;
3125 + dquot->dq_dqb.dqb_rsvspace -= number;
3126 + }
3127 +@@ -969,7 +982,12 @@ static void dquot_claim_reserved_space(struct dquot *dquot,
3128 + static inline
3129 + void dquot_free_reserved_space(struct dquot *dquot, qsize_t number)
3130 + {
3131 +- dquot->dq_dqb.dqb_rsvspace -= number;
3132 ++ if (dquot->dq_dqb.dqb_rsvspace >= number)
3133 ++ dquot->dq_dqb.dqb_rsvspace -= number;
3134 ++ else {
3135 ++ WARN_ON_ONCE(1);
3136 ++ dquot->dq_dqb.dqb_rsvspace = 0;
3137 ++ }
3138 + }
3139 +
3140 + static void dquot_decr_inodes(struct dquot *dquot, qsize_t number)
3141 +@@ -1287,6 +1305,7 @@ static int info_bdq_free(struct dquot *dquot, qsize_t space)
3142 + return QUOTA_NL_BHARDBELOW;
3143 + return QUOTA_NL_NOWARN;
3144 + }
3145 ++
3146 + /*
3147 + * Initialize quota pointers in inode
3148 + * We do things in a bit complicated way but by that we avoid calling
3149 +@@ -1298,6 +1317,7 @@ int dquot_initialize(struct inode *inode, int type)
3150 + int cnt, ret = 0;
3151 + struct dquot *got[MAXQUOTAS] = { NULL, NULL };
3152 + struct super_block *sb = inode->i_sb;
3153 ++ qsize_t rsv;
3154 +
3155 + /* First test before acquiring mutex - solves deadlocks when we
3156 + * re-enter the quota code and are already holding the mutex */
3157 +@@ -1332,6 +1352,13 @@ int dquot_initialize(struct inode *inode, int type)
3158 + if (!inode->i_dquot[cnt]) {
3159 + inode->i_dquot[cnt] = got[cnt];
3160 + got[cnt] = NULL;
3161 ++ /*
3162 ++ * Make quota reservation system happy if someone
3163 ++ * did a write before quota was turned on
3164 ++ */
3165 ++ rsv = inode_get_rsv_space(inode);
3166 ++ if (unlikely(rsv))
3167 ++ dquot_resv_space(inode->i_dquot[cnt], rsv);
3168 + }
3169 + }
3170 + out_err:
3171 +@@ -1399,28 +1426,30 @@ static qsize_t *inode_reserved_space(struct inode * inode)
3172 + return inode->i_sb->dq_op->get_reserved_space(inode);
3173 + }
3174 +
3175 +-static void inode_add_rsv_space(struct inode *inode, qsize_t number)
3176 ++void inode_add_rsv_space(struct inode *inode, qsize_t number)
3177 + {
3178 + spin_lock(&inode->i_lock);
3179 + *inode_reserved_space(inode) += number;
3180 + spin_unlock(&inode->i_lock);
3181 + }
3182 ++EXPORT_SYMBOL(inode_add_rsv_space);
3183 +
3184 +-
3185 +-static void inode_claim_rsv_space(struct inode *inode, qsize_t number)
3186 ++void inode_claim_rsv_space(struct inode *inode, qsize_t number)
3187 + {
3188 + spin_lock(&inode->i_lock);
3189 + *inode_reserved_space(inode) -= number;
3190 + __inode_add_bytes(inode, number);
3191 + spin_unlock(&inode->i_lock);
3192 + }
3193 ++EXPORT_SYMBOL(inode_claim_rsv_space);
3194 +
3195 +-static void inode_sub_rsv_space(struct inode *inode, qsize_t number)
3196 ++void inode_sub_rsv_space(struct inode *inode, qsize_t number)
3197 + {
3198 + spin_lock(&inode->i_lock);
3199 + *inode_reserved_space(inode) -= number;
3200 + spin_unlock(&inode->i_lock);
3201 + }
3202 ++EXPORT_SYMBOL(inode_sub_rsv_space);
3203 +
3204 + static qsize_t inode_get_rsv_space(struct inode *inode)
3205 + {
3206 +diff --git a/include/linux/decompress/mm.h b/include/linux/decompress/mm.h
3207 +index 12ff8c3..3e9bd6a 100644
3208 +--- a/include/linux/decompress/mm.h
3209 ++++ b/include/linux/decompress/mm.h
3210 +@@ -14,11 +14,21 @@
3211 +
3212 + /* Code active when included from pre-boot environment: */
3213 +
3214 ++/*
3215 ++ * Some architectures want to ensure there is no local data in their
3216 ++ * pre-boot environment, so that data can arbitarily relocated (via
3217 ++ * GOT references). This is achieved by defining STATIC_RW_DATA to
3218 ++ * be null.
3219 ++ */
3220 ++#ifndef STATIC_RW_DATA
3221 ++#define STATIC_RW_DATA static
3222 ++#endif
3223 ++
3224 + /* A trivial malloc implementation, adapted from
3225 + * malloc by Hannu Savolainen 1993 and Matthias Urlichs 1994
3226 + */
3227 +-static unsigned long malloc_ptr;
3228 +-static int malloc_count;
3229 ++STATIC_RW_DATA unsigned long malloc_ptr;
3230 ++STATIC_RW_DATA int malloc_count;
3231 +
3232 + static void *malloc(int size)
3233 + {
3234 +diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h
3235 +index 9bace4b..040b679 100644
3236 +--- a/include/linux/hrtimer.h
3237 ++++ b/include/linux/hrtimer.h
3238 +@@ -162,10 +162,11 @@ struct hrtimer_clock_base {
3239 + * @expires_next: absolute time of the next event which was scheduled
3240 + * via clock_set_next_event()
3241 + * @hres_active: State of high resolution mode
3242 +- * @check_clocks: Indictator, when set evaluate time source and clock
3243 +- * event devices whether high resolution mode can be
3244 +- * activated.
3245 +- * @nr_events: Total number of timer interrupt events
3246 ++ * @hang_detected: The last hrtimer interrupt detected a hang
3247 ++ * @nr_events: Total number of hrtimer interrupt events
3248 ++ * @nr_retries: Total number of hrtimer interrupt retries
3249 ++ * @nr_hangs: Total number of hrtimer interrupt hangs
3250 ++ * @max_hang_time: Maximum time spent in hrtimer_interrupt
3251 + */
3252 + struct hrtimer_cpu_base {
3253 + spinlock_t lock;
3254 +@@ -173,7 +174,11 @@ struct hrtimer_cpu_base {
3255 + #ifdef CONFIG_HIGH_RES_TIMERS
3256 + ktime_t expires_next;
3257 + int hres_active;
3258 ++ int hang_detected;
3259 + unsigned long nr_events;
3260 ++ unsigned long nr_retries;
3261 ++ unsigned long nr_hangs;
3262 ++ ktime_t max_hang_time;
3263 + #endif
3264 + };
3265 +
3266 +diff --git a/include/linux/if_tunnel.h b/include/linux/if_tunnel.h
3267 +index 5a9aae4..137130b 100644
3268 +--- a/include/linux/if_tunnel.h
3269 ++++ b/include/linux/if_tunnel.h
3270 +@@ -2,6 +2,7 @@
3271 + #define _IF_TUNNEL_H_
3272 +
3273 + #include <linux/types.h>
3274 ++#include <asm/byteorder.h>
3275 +
3276 + #ifdef __KERNEL__
3277 + #include <linux/ip.h>
3278 +diff --git a/include/linux/lcm.h b/include/linux/lcm.h
3279 +new file mode 100644
3280 +index 0000000..7bf01d7
3281 +--- /dev/null
3282 ++++ b/include/linux/lcm.h
3283 +@@ -0,0 +1,8 @@
3284 ++#ifndef _LCM_H
3285 ++#define _LCM_H
3286 ++
3287 ++#include <linux/compiler.h>
3288 ++
3289 ++unsigned long lcm(unsigned long a, unsigned long b) __attribute_const__;
3290 ++
3291 ++#endif /* _LCM_H */
3292 +diff --git a/include/linux/quotaops.h b/include/linux/quotaops.h
3293 +index 3ebb231..a529d86 100644
3294 +--- a/include/linux/quotaops.h
3295 ++++ b/include/linux/quotaops.h
3296 +@@ -26,6 +26,10 @@ static inline void writeout_quota_sb(struct super_block *sb, int type)
3297 + sb->s_qcop->quota_sync(sb, type);
3298 + }
3299 +
3300 ++void inode_add_rsv_space(struct inode *inode, qsize_t number);
3301 ++void inode_claim_rsv_space(struct inode *inode, qsize_t number);
3302 ++void inode_sub_rsv_space(struct inode *inode, qsize_t number);
3303 ++
3304 + int dquot_initialize(struct inode *inode, int type);
3305 + int dquot_drop(struct inode *inode);
3306 + struct dquot *dqget(struct super_block *sb, unsigned int id, int type);
3307 +@@ -42,7 +46,6 @@ int dquot_alloc_inode(const struct inode *inode, qsize_t number);
3308 + int dquot_reserve_space(struct inode *inode, qsize_t number, int prealloc);
3309 + int dquot_claim_space(struct inode *inode, qsize_t number);
3310 + void dquot_release_reserved_space(struct inode *inode, qsize_t number);
3311 +-qsize_t dquot_get_reserved_space(struct inode *inode);
3312 +
3313 + int dquot_free_space(struct inode *inode, qsize_t number);
3314 + int dquot_free_inode(const struct inode *inode, qsize_t number);
3315 +@@ -199,6 +202,8 @@ static inline int vfs_dq_reserve_space(struct inode *inode, qsize_t nr)
3316 + if (inode->i_sb->dq_op->reserve_space(inode, nr, 0) == NO_QUOTA)
3317 + return 1;
3318 + }
3319 ++ else
3320 ++ inode_add_rsv_space(inode, nr);
3321 + return 0;
3322 + }
3323 +
3324 +@@ -221,7 +226,7 @@ static inline int vfs_dq_claim_space(struct inode *inode, qsize_t nr)
3325 + if (inode->i_sb->dq_op->claim_space(inode, nr) == NO_QUOTA)
3326 + return 1;
3327 + } else
3328 +- inode_add_bytes(inode, nr);
3329 ++ inode_claim_rsv_space(inode, nr);
3330 +
3331 + mark_inode_dirty(inode);
3332 + return 0;
3333 +@@ -235,6 +240,8 @@ void vfs_dq_release_reservation_space(struct inode *inode, qsize_t nr)
3334 + {
3335 + if (sb_any_quota_active(inode->i_sb))
3336 + inode->i_sb->dq_op->release_rsv(inode, nr);
3337 ++ else
3338 ++ inode_sub_rsv_space(inode, nr);
3339 + }
3340 +
3341 + static inline void vfs_dq_free_space_nodirty(struct inode *inode, qsize_t nr)
3342 +diff --git a/include/linux/tty.h b/include/linux/tty.h
3343 +index f0f43d0..e9c57e9 100644
3344 +--- a/include/linux/tty.h
3345 ++++ b/include/linux/tty.h
3346 +@@ -68,6 +68,17 @@ struct tty_buffer {
3347 + unsigned long data[0];
3348 + };
3349 +
3350 ++/*
3351 ++ * We default to dicing tty buffer allocations to this many characters
3352 ++ * in order to avoid multiple page allocations. We know the size of
3353 ++ * tty_buffer itself but it must also be taken into account that the
3354 ++ * the buffer is 256 byte aligned. See tty_buffer_find for the allocation
3355 ++ * logic this must match
3356 ++ */
3357 ++
3358 ++#define TTY_BUFFER_PAGE (((PAGE_SIZE - sizeof(struct tty_buffer)) / 2) & ~0xFF)
3359 ++
3360 ++
3361 + struct tty_bufhead {
3362 + struct delayed_work work;
3363 + spinlock_t lock;
3364 +diff --git a/include/net/mac80211.h b/include/net/mac80211.h
3365 +index 998c30f..c39ed07 100644
3366 +--- a/include/net/mac80211.h
3367 ++++ b/include/net/mac80211.h
3368 +@@ -908,6 +908,9 @@ enum ieee80211_tkip_key_type {
3369 + * @IEEE80211_HW_BEACON_FILTER:
3370 + * Hardware supports dropping of irrelevant beacon frames to
3371 + * avoid waking up cpu.
3372 ++ * @IEEE80211_HW_REPORTS_TX_ACK_STATUS:
3373 ++ * Hardware can provide ack status reports of Tx frames to
3374 ++ * the stack.
3375 + */
3376 + enum ieee80211_hw_flags {
3377 + IEEE80211_HW_RX_INCLUDES_FCS = 1<<1,
3378 +@@ -924,6 +927,7 @@ enum ieee80211_hw_flags {
3379 + IEEE80211_HW_SUPPORTS_DYNAMIC_PS = 1<<12,
3380 + IEEE80211_HW_MFP_CAPABLE = 1<<13,
3381 + IEEE80211_HW_BEACON_FILTER = 1<<14,
3382 ++ IEEE80211_HW_REPORTS_TX_ACK_STATUS = 1<<15,
3383 + };
3384 +
3385 + /**
3386 +diff --git a/init/main.c b/init/main.c
3387 +index 4051d75..bc109c7 100644
3388 +--- a/init/main.c
3389 ++++ b/init/main.c
3390 +@@ -369,12 +369,6 @@ static void __init smp_init(void)
3391 + {
3392 + unsigned int cpu;
3393 +
3394 +- /*
3395 +- * Set up the current CPU as possible to migrate to.
3396 +- * The other ones will be done by cpu_up/cpu_down()
3397 +- */
3398 +- set_cpu_active(smp_processor_id(), true);
3399 +-
3400 + /* FIXME: This should be done in userspace --RR */
3401 + for_each_present_cpu(cpu) {
3402 + if (num_online_cpus() >= setup_max_cpus)
3403 +@@ -486,6 +480,7 @@ static void __init boot_cpu_init(void)
3404 + int cpu = smp_processor_id();
3405 + /* Mark the boot cpu "present", "online" etc for SMP and UP case */
3406 + set_cpu_online(cpu, true);
3407 ++ set_cpu_active(cpu, true);
3408 + set_cpu_present(cpu, true);
3409 + set_cpu_possible(cpu, true);
3410 + }
3411 +@@ -851,7 +846,7 @@ static int __init kernel_init(void * unused)
3412 + /*
3413 + * init can allocate pages on any node
3414 + */
3415 +- set_mems_allowed(node_possible_map);
3416 ++ set_mems_allowed(node_states[N_HIGH_MEMORY]);
3417 + /*
3418 + * init can run on any cpu.
3419 + */
3420 +diff --git a/ipc/mqueue.c b/ipc/mqueue.c
3421 +index ee9d697..d01bc14 100644
3422 +--- a/ipc/mqueue.c
3423 ++++ b/ipc/mqueue.c
3424 +@@ -706,7 +706,7 @@ SYSCALL_DEFINE4(mq_open, const char __user *, u_name, int, oflag, mode_t, mode,
3425 + dentry = lookup_one_len(name, ipc_ns->mq_mnt->mnt_root, strlen(name));
3426 + if (IS_ERR(dentry)) {
3427 + error = PTR_ERR(dentry);
3428 +- goto out_err;
3429 ++ goto out_putfd;
3430 + }
3431 + mntget(ipc_ns->mq_mnt);
3432 +
3433 +@@ -744,7 +744,6 @@ out:
3434 + mntput(ipc_ns->mq_mnt);
3435 + out_putfd:
3436 + put_unused_fd(fd);
3437 +-out_err:
3438 + fd = error;
3439 + out_upsem:
3440 + mutex_unlock(&ipc_ns->mq_mnt->mnt_root->d_inode->i_mutex);
3441 +diff --git a/kernel/cpuset.c b/kernel/cpuset.c
3442 +index 39e5121..a81a910 100644
3443 +--- a/kernel/cpuset.c
3444 ++++ b/kernel/cpuset.c
3445 +@@ -921,9 +921,6 @@ static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs,
3446 + * call to guarantee_online_mems(), as we know no one is changing
3447 + * our task's cpuset.
3448 + *
3449 +- * Hold callback_mutex around the two modifications of our tasks
3450 +- * mems_allowed to synchronize with cpuset_mems_allowed().
3451 +- *
3452 + * While the mm_struct we are migrating is typically from some
3453 + * other task, the task_struct mems_allowed that we are hacking
3454 + * is for our current task, which must allocate new pages for that
3455 +@@ -1392,11 +1389,10 @@ static void cpuset_attach(struct cgroup_subsys *ss, struct cgroup *cont,
3456 +
3457 + if (cs == &top_cpuset) {
3458 + cpumask_copy(cpus_attach, cpu_possible_mask);
3459 +- to = node_possible_map;
3460 + } else {
3461 + guarantee_online_cpus(cs, cpus_attach);
3462 +- guarantee_online_mems(cs, &to);
3463 + }
3464 ++ guarantee_online_mems(cs, &to);
3465 +
3466 + /* do per-task migration stuff possibly for each in the threadgroup */
3467 + cpuset_attach_task(tsk, &to, cs);
3468 +@@ -2091,15 +2087,23 @@ static int cpuset_track_online_cpus(struct notifier_block *unused_nb,
3469 + static int cpuset_track_online_nodes(struct notifier_block *self,
3470 + unsigned long action, void *arg)
3471 + {
3472 ++ nodemask_t oldmems;
3473 ++
3474 + cgroup_lock();
3475 + switch (action) {
3476 + case MEM_ONLINE:
3477 +- case MEM_OFFLINE:
3478 ++ oldmems = top_cpuset.mems_allowed;
3479 + mutex_lock(&callback_mutex);
3480 + top_cpuset.mems_allowed = node_states[N_HIGH_MEMORY];
3481 + mutex_unlock(&callback_mutex);
3482 +- if (action == MEM_OFFLINE)
3483 +- scan_for_empty_cpusets(&top_cpuset);
3484 ++ update_tasks_nodemask(&top_cpuset, &oldmems, NULL);
3485 ++ break;
3486 ++ case MEM_OFFLINE:
3487 ++ /*
3488 ++ * needn't update top_cpuset.mems_allowed explicitly because
3489 ++ * scan_for_empty_cpusets() will update it.
3490 ++ */
3491 ++ scan_for_empty_cpusets(&top_cpuset);
3492 + break;
3493 + default:
3494 + break;
3495 +diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
3496 +index 3e1c36e..931a4d9 100644
3497 +--- a/kernel/hrtimer.c
3498 ++++ b/kernel/hrtimer.c
3499 +@@ -557,7 +557,7 @@ hrtimer_force_reprogram(struct hrtimer_cpu_base *cpu_base, int skip_equal)
3500 + static int hrtimer_reprogram(struct hrtimer *timer,
3501 + struct hrtimer_clock_base *base)
3502 + {
3503 +- ktime_t *expires_next = &__get_cpu_var(hrtimer_bases).expires_next;
3504 ++ struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
3505 + ktime_t expires = ktime_sub(hrtimer_get_expires(timer), base->offset);
3506 + int res;
3507 +
3508 +@@ -582,7 +582,16 @@ static int hrtimer_reprogram(struct hrtimer *timer,
3509 + if (expires.tv64 < 0)
3510 + return -ETIME;
3511 +
3512 +- if (expires.tv64 >= expires_next->tv64)
3513 ++ if (expires.tv64 >= cpu_base->expires_next.tv64)
3514 ++ return 0;
3515 ++
3516 ++ /*
3517 ++ * If a hang was detected in the last timer interrupt then we
3518 ++ * do not schedule a timer which is earlier than the expiry
3519 ++ * which we enforced in the hang detection. We want the system
3520 ++ * to make progress.
3521 ++ */
3522 ++ if (cpu_base->hang_detected)
3523 + return 0;
3524 +
3525 + /*
3526 +@@ -590,7 +599,7 @@ static int hrtimer_reprogram(struct hrtimer *timer,
3527 + */
3528 + res = tick_program_event(expires, 0);
3529 + if (!IS_ERR_VALUE(res))
3530 +- *expires_next = expires;
3531 ++ cpu_base->expires_next = expires;
3532 + return res;
3533 + }
3534 +
3535 +@@ -1217,29 +1226,6 @@ static void __run_hrtimer(struct hrtimer *timer, ktime_t *now)
3536 +
3537 + #ifdef CONFIG_HIGH_RES_TIMERS
3538 +
3539 +-static int force_clock_reprogram;
3540 +-
3541 +-/*
3542 +- * After 5 iteration's attempts, we consider that hrtimer_interrupt()
3543 +- * is hanging, which could happen with something that slows the interrupt
3544 +- * such as the tracing. Then we force the clock reprogramming for each future
3545 +- * hrtimer interrupts to avoid infinite loops and use the min_delta_ns
3546 +- * threshold that we will overwrite.
3547 +- * The next tick event will be scheduled to 3 times we currently spend on
3548 +- * hrtimer_interrupt(). This gives a good compromise, the cpus will spend
3549 +- * 1/4 of their time to process the hrtimer interrupts. This is enough to
3550 +- * let it running without serious starvation.
3551 +- */
3552 +-
3553 +-static inline void
3554 +-hrtimer_interrupt_hanging(struct clock_event_device *dev,
3555 +- ktime_t try_time)
3556 +-{
3557 +- force_clock_reprogram = 1;
3558 +- dev->min_delta_ns = (unsigned long)try_time.tv64 * 3;
3559 +- printk(KERN_WARNING "hrtimer: interrupt too slow, "
3560 +- "forcing clock min delta to %lu ns\n", dev->min_delta_ns);
3561 +-}
3562 + /*
3563 + * High resolution timer interrupt
3564 + * Called with interrupts disabled
3565 +@@ -1248,21 +1234,15 @@ void hrtimer_interrupt(struct clock_event_device *dev)
3566 + {
3567 + struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
3568 + struct hrtimer_clock_base *base;
3569 +- ktime_t expires_next, now;
3570 +- int nr_retries = 0;
3571 +- int i;
3572 ++ ktime_t expires_next, now, entry_time, delta;
3573 ++ int i, retries = 0;
3574 +
3575 + BUG_ON(!cpu_base->hres_active);
3576 + cpu_base->nr_events++;
3577 + dev->next_event.tv64 = KTIME_MAX;
3578 +
3579 +- retry:
3580 +- /* 5 retries is enough to notice a hang */
3581 +- if (!(++nr_retries % 5))
3582 +- hrtimer_interrupt_hanging(dev, ktime_sub(ktime_get(), now));
3583 +-
3584 +- now = ktime_get();
3585 +-
3586 ++ entry_time = now = ktime_get();
3587 ++retry:
3588 + expires_next.tv64 = KTIME_MAX;
3589 +
3590 + spin_lock(&cpu_base->lock);
3591 +@@ -1324,10 +1304,48 @@ void hrtimer_interrupt(struct clock_event_device *dev)
3592 + spin_unlock(&cpu_base->lock);
3593 +
3594 + /* Reprogramming necessary ? */
3595 +- if (expires_next.tv64 != KTIME_MAX) {
3596 +- if (tick_program_event(expires_next, force_clock_reprogram))
3597 +- goto retry;
3598 ++ if (expires_next.tv64 == KTIME_MAX ||
3599 ++ !tick_program_event(expires_next, 0)) {
3600 ++ cpu_base->hang_detected = 0;
3601 ++ return;
3602 + }
3603 ++
3604 ++ /*
3605 ++ * The next timer was already expired due to:
3606 ++ * - tracing
3607 ++ * - long lasting callbacks
3608 ++ * - being scheduled away when running in a VM
3609 ++ *
3610 ++ * We need to prevent that we loop forever in the hrtimer
3611 ++ * interrupt routine. We give it 3 attempts to avoid
3612 ++ * overreacting on some spurious event.
3613 ++ */
3614 ++ now = ktime_get();
3615 ++ cpu_base->nr_retries++;
3616 ++ if (++retries < 3)
3617 ++ goto retry;
3618 ++ /*
3619 ++ * Give the system a chance to do something else than looping
3620 ++ * here. We stored the entry time, so we know exactly how long
3621 ++ * we spent here. We schedule the next event this amount of
3622 ++ * time away.
3623 ++ */
3624 ++ cpu_base->nr_hangs++;
3625 ++ cpu_base->hang_detected = 1;
3626 ++ delta = ktime_sub(now, entry_time);
3627 ++ if (delta.tv64 > cpu_base->max_hang_time.tv64)
3628 ++ cpu_base->max_hang_time = delta;
3629 ++ /*
3630 ++ * Limit it to a sensible value as we enforce a longer
3631 ++ * delay. Give the CPU at least 100ms to catch up.
3632 ++ */
3633 ++ if (delta.tv64 > 100 * NSEC_PER_MSEC)
3634 ++ expires_next = ktime_add_ns(now, 100 * NSEC_PER_MSEC);
3635 ++ else
3636 ++ expires_next = ktime_add(now, delta);
3637 ++ tick_program_event(expires_next, 1);
3638 ++ printk_once(KERN_WARNING "hrtimer: interrupt took %llu ns\n",
3639 ++ ktime_to_ns(delta));
3640 + }
3641 +
3642 + /*
3643 +diff --git a/kernel/kthread.c b/kernel/kthread.c
3644 +index ab7ae57..84027cf 100644
3645 +--- a/kernel/kthread.c
3646 ++++ b/kernel/kthread.c
3647 +@@ -196,7 +196,7 @@ int kthreadd(void *unused)
3648 + set_task_comm(tsk, "kthreadd");
3649 + ignore_signals(tsk);
3650 + set_cpus_allowed_ptr(tsk, cpu_all_mask);
3651 +- set_mems_allowed(node_possible_map);
3652 ++ set_mems_allowed(node_states[N_HIGH_MEMORY]);
3653 +
3654 + current->flags |= PF_NOFREEZE | PF_FREEZER_NOSIG;
3655 +
3656 +diff --git a/kernel/perf_event.c b/kernel/perf_event.c
3657 +index 413d101..447e8db 100644
3658 +--- a/kernel/perf_event.c
3659 ++++ b/kernel/perf_event.c
3660 +@@ -4981,12 +4981,22 @@ int perf_event_init_task(struct task_struct *child)
3661 + return ret;
3662 + }
3663 +
3664 ++static void __init perf_event_init_all_cpus(void)
3665 ++{
3666 ++ int cpu;
3667 ++ struct perf_cpu_context *cpuctx;
3668 ++
3669 ++ for_each_possible_cpu(cpu) {
3670 ++ cpuctx = &per_cpu(perf_cpu_context, cpu);
3671 ++ __perf_event_init_context(&cpuctx->ctx, NULL);
3672 ++ }
3673 ++}
3674 ++
3675 + static void __cpuinit perf_event_init_cpu(int cpu)
3676 + {
3677 + struct perf_cpu_context *cpuctx;
3678 +
3679 + cpuctx = &per_cpu(perf_cpu_context, cpu);
3680 +- __perf_event_init_context(&cpuctx->ctx, NULL);
3681 +
3682 + spin_lock(&perf_resource_lock);
3683 + cpuctx->max_pertask = perf_max_events - perf_reserved_percpu;
3684 +@@ -5057,6 +5067,7 @@ static struct notifier_block __cpuinitdata perf_cpu_nb = {
3685 +
3686 + void __init perf_event_init(void)
3687 + {
3688 ++ perf_event_init_all_cpus();
3689 + perf_cpu_notify(&perf_cpu_nb, (unsigned long)CPU_UP_PREPARE,
3690 + (void *)(long)smp_processor_id());
3691 + perf_cpu_notify(&perf_cpu_nb, (unsigned long)CPU_ONLINE,
3692 +diff --git a/kernel/sched.c b/kernel/sched.c
3693 +index 380e1fa..ed61192 100644
3694 +--- a/kernel/sched.c
3695 ++++ b/kernel/sched.c
3696 +@@ -3402,6 +3402,7 @@ struct sd_lb_stats {
3697 + unsigned long max_load;
3698 + unsigned long busiest_load_per_task;
3699 + unsigned long busiest_nr_running;
3700 ++ unsigned long busiest_group_capacity;
3701 +
3702 + int group_imb; /* Is there imbalance in this sd */
3703 + #if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
3704 +@@ -3721,8 +3722,7 @@ static inline void update_sg_lb_stats(struct sched_domain *sd,
3705 + unsigned long load, max_cpu_load, min_cpu_load;
3706 + int i;
3707 + unsigned int balance_cpu = -1, first_idle_cpu = 0;
3708 +- unsigned long sum_avg_load_per_task;
3709 +- unsigned long avg_load_per_task;
3710 ++ unsigned long avg_load_per_task = 0;
3711 +
3712 + if (local_group) {
3713 + balance_cpu = group_first_cpu(group);
3714 +@@ -3731,7 +3731,6 @@ static inline void update_sg_lb_stats(struct sched_domain *sd,
3715 + }
3716 +
3717 + /* Tally up the load of all CPUs in the group */
3718 +- sum_avg_load_per_task = avg_load_per_task = 0;
3719 + max_cpu_load = 0;
3720 + min_cpu_load = ~0UL;
3721 +
3722 +@@ -3761,7 +3760,6 @@ static inline void update_sg_lb_stats(struct sched_domain *sd,
3723 + sgs->sum_nr_running += rq->nr_running;
3724 + sgs->sum_weighted_load += weighted_cpuload(i);
3725 +
3726 +- sum_avg_load_per_task += cpu_avg_load_per_task(i);
3727 + }
3728 +
3729 + /*
3730 +@@ -3779,7 +3777,6 @@ static inline void update_sg_lb_stats(struct sched_domain *sd,
3731 + /* Adjust by relative CPU power of the group */
3732 + sgs->avg_load = (sgs->group_load * SCHED_LOAD_SCALE) / group->cpu_power;
3733 +
3734 +-
3735 + /*
3736 + * Consider the group unbalanced when the imbalance is larger
3737 + * than the average weight of two tasks.
3738 +@@ -3789,8 +3786,8 @@ static inline void update_sg_lb_stats(struct sched_domain *sd,
3739 + * normalized nr_running number somewhere that negates
3740 + * the hierarchy?
3741 + */
3742 +- avg_load_per_task = (sum_avg_load_per_task * SCHED_LOAD_SCALE) /
3743 +- group->cpu_power;
3744 ++ if (sgs->sum_nr_running)
3745 ++ avg_load_per_task = sgs->sum_weighted_load / sgs->sum_nr_running;
3746 +
3747 + if ((max_cpu_load - min_cpu_load) > 2*avg_load_per_task)
3748 + sgs->group_imb = 1;
3749 +@@ -3859,6 +3856,7 @@ static inline void update_sd_lb_stats(struct sched_domain *sd, int this_cpu,
3750 + sds->max_load = sgs.avg_load;
3751 + sds->busiest = group;
3752 + sds->busiest_nr_running = sgs.sum_nr_running;
3753 ++ sds->busiest_group_capacity = sgs.group_capacity;
3754 + sds->busiest_load_per_task = sgs.sum_weighted_load;
3755 + sds->group_imb = sgs.group_imb;
3756 + }
3757 +@@ -3881,6 +3879,7 @@ static inline void fix_small_imbalance(struct sd_lb_stats *sds,
3758 + {
3759 + unsigned long tmp, pwr_now = 0, pwr_move = 0;
3760 + unsigned int imbn = 2;
3761 ++ unsigned long scaled_busy_load_per_task;
3762 +
3763 + if (sds->this_nr_running) {
3764 + sds->this_load_per_task /= sds->this_nr_running;
3765 +@@ -3891,8 +3890,12 @@ static inline void fix_small_imbalance(struct sd_lb_stats *sds,
3766 + sds->this_load_per_task =
3767 + cpu_avg_load_per_task(this_cpu);
3768 +
3769 +- if (sds->max_load - sds->this_load + sds->busiest_load_per_task >=
3770 +- sds->busiest_load_per_task * imbn) {
3771 ++ scaled_busy_load_per_task = sds->busiest_load_per_task
3772 ++ * SCHED_LOAD_SCALE;
3773 ++ scaled_busy_load_per_task /= sds->busiest->cpu_power;
3774 ++
3775 ++ if (sds->max_load - sds->this_load + scaled_busy_load_per_task >=
3776 ++ (scaled_busy_load_per_task * imbn)) {
3777 + *imbalance = sds->busiest_load_per_task;
3778 + return;
3779 + }
3780 +@@ -3943,7 +3946,14 @@ static inline void fix_small_imbalance(struct sd_lb_stats *sds,
3781 + static inline void calculate_imbalance(struct sd_lb_stats *sds, int this_cpu,
3782 + unsigned long *imbalance)
3783 + {
3784 +- unsigned long max_pull;
3785 ++ unsigned long max_pull, load_above_capacity = ~0UL;
3786 ++
3787 ++ sds->busiest_load_per_task /= sds->busiest_nr_running;
3788 ++ if (sds->group_imb) {
3789 ++ sds->busiest_load_per_task =
3790 ++ min(sds->busiest_load_per_task, sds->avg_load);
3791 ++ }
3792 ++
3793 + /*
3794 + * In the presence of smp nice balancing, certain scenarios can have
3795 + * max load less than avg load(as we skip the groups at or below
3796 +@@ -3954,9 +3964,29 @@ static inline void calculate_imbalance(struct sd_lb_stats *sds, int this_cpu,
3797 + return fix_small_imbalance(sds, this_cpu, imbalance);
3798 + }
3799 +
3800 +- /* Don't want to pull so many tasks that a group would go idle */
3801 +- max_pull = min(sds->max_load - sds->avg_load,
3802 +- sds->max_load - sds->busiest_load_per_task);
3803 ++ if (!sds->group_imb) {
3804 ++ /*
3805 ++ * Don't want to pull so many tasks that a group would go idle.
3806 ++ */
3807 ++ load_above_capacity = (sds->busiest_nr_running -
3808 ++ sds->busiest_group_capacity);
3809 ++
3810 ++ load_above_capacity *= (SCHED_LOAD_SCALE * SCHED_LOAD_SCALE);
3811 ++
3812 ++ load_above_capacity /= sds->busiest->cpu_power;
3813 ++ }
3814 ++
3815 ++ /*
3816 ++ * We're trying to get all the cpus to the average_load, so we don't
3817 ++ * want to push ourselves above the average load, nor do we wish to
3818 ++ * reduce the max loaded cpu below the average load. At the same time,
3819 ++ * we also don't want to reduce the group load below the group capacity
3820 ++ * (so that we can implement power-savings policies etc). Thus we look
3821 ++ * for the minimum possible imbalance.
3822 ++ * Be careful of negative numbers as they'll appear as very large values
3823 ++ * with unsigned longs.
3824 ++ */
3825 ++ max_pull = min(sds->max_load - sds->avg_load, load_above_capacity);
3826 +
3827 + /* How much load to actually move to equalise the imbalance */
3828 + *imbalance = min(max_pull * sds->busiest->cpu_power,
3829 +@@ -4024,7 +4054,6 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
3830 + * 4) This group is more busy than the avg busieness at this
3831 + * sched_domain.
3832 + * 5) The imbalance is within the specified limit.
3833 +- * 6) Any rebalance would lead to ping-pong
3834 + */
3835 + if (balance && !(*balance))
3836 + goto ret;
3837 +@@ -4043,25 +4072,6 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
3838 + if (100 * sds.max_load <= sd->imbalance_pct * sds.this_load)
3839 + goto out_balanced;
3840 +
3841 +- sds.busiest_load_per_task /= sds.busiest_nr_running;
3842 +- if (sds.group_imb)
3843 +- sds.busiest_load_per_task =
3844 +- min(sds.busiest_load_per_task, sds.avg_load);
3845 +-
3846 +- /*
3847 +- * We're trying to get all the cpus to the average_load, so we don't
3848 +- * want to push ourselves above the average load, nor do we wish to
3849 +- * reduce the max loaded cpu below the average load, as either of these
3850 +- * actions would just result in more rebalancing later, and ping-pong
3851 +- * tasks around. Thus we look for the minimum possible imbalance.
3852 +- * Negative imbalances (*we* are more loaded than anyone else) will
3853 +- * be counted as no imbalance for these purposes -- we can't fix that
3854 +- * by pulling tasks to us. Be careful of negative numbers as they'll
3855 +- * appear as very large values with unsigned longs.
3856 +- */
3857 +- if (sds.max_load <= sds.busiest_load_per_task)
3858 +- goto out_balanced;
3859 +-
3860 + /* Looks like there is an imbalance. Compute it */
3861 + calculate_imbalance(&sds, this_cpu, imbalance);
3862 + return sds.busiest;
3863 +diff --git a/kernel/softlockup.c b/kernel/softlockup.c
3864 +index 81324d1..d2080ad 100644
3865 +--- a/kernel/softlockup.c
3866 ++++ b/kernel/softlockup.c
3867 +@@ -140,11 +140,11 @@ void softlockup_tick(void)
3868 + * Wake up the high-prio watchdog task twice per
3869 + * threshold timespan.
3870 + */
3871 +- if (now > touch_timestamp + softlockup_thresh/2)
3872 ++ if (time_after(now - softlockup_thresh/2, touch_timestamp))
3873 + wake_up_process(per_cpu(watchdog_task, this_cpu));
3874 +
3875 + /* Warn about unreasonable delays: */
3876 +- if (now <= (touch_timestamp + softlockup_thresh))
3877 ++ if (time_before_eq(now - softlockup_thresh, touch_timestamp))
3878 + return;
3879 +
3880 + per_cpu(print_timestamp, this_cpu) = touch_timestamp;
3881 +diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c
3882 +index ecc7adb..f8b0f96 100644
3883 +--- a/kernel/time/clocksource.c
3884 ++++ b/kernel/time/clocksource.c
3885 +@@ -515,6 +515,10 @@ static inline void clocksource_select(void) { }
3886 + */
3887 + static int __init clocksource_done_booting(void)
3888 + {
3889 ++ mutex_lock(&clocksource_mutex);
3890 ++ curr_clocksource = clocksource_default_clock();
3891 ++ mutex_unlock(&clocksource_mutex);
3892 ++
3893 + finished_booting = 1;
3894 +
3895 + /*
3896 +diff --git a/kernel/time/timer_list.c b/kernel/time/timer_list.c
3897 +index 1b5b7aa..54c0dda 100644
3898 +--- a/kernel/time/timer_list.c
3899 ++++ b/kernel/time/timer_list.c
3900 +@@ -150,6 +150,9 @@ static void print_cpu(struct seq_file *m, int cpu, u64 now)
3901 + P_ns(expires_next);
3902 + P(hres_active);
3903 + P(nr_events);
3904 ++ P(nr_retries);
3905 ++ P(nr_hangs);
3906 ++ P_ns(max_hang_time);
3907 + #endif
3908 + #undef P
3909 + #undef P_ns
3910 +@@ -252,7 +255,7 @@ static int timer_list_show(struct seq_file *m, void *v)
3911 + u64 now = ktime_to_ns(ktime_get());
3912 + int cpu;
3913 +
3914 +- SEQ_printf(m, "Timer List Version: v0.4\n");
3915 ++ SEQ_printf(m, "Timer List Version: v0.5\n");
3916 + SEQ_printf(m, "HRTIMER_MAX_CLOCK_BASES: %d\n", HRTIMER_MAX_CLOCK_BASES);
3917 + SEQ_printf(m, "now at %Ld nsecs\n", (unsigned long long)now);
3918 +
3919 +diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
3920 +index 6dc4e5e..0cccb6c 100644
3921 +--- a/kernel/trace/ftrace.c
3922 ++++ b/kernel/trace/ftrace.c
3923 +@@ -3258,6 +3258,7 @@ void ftrace_graph_init_task(struct task_struct *t)
3924 + {
3925 + /* Make sure we do not use the parent ret_stack */
3926 + t->ret_stack = NULL;
3927 ++ t->curr_ret_stack = -1;
3928 +
3929 + if (ftrace_graph_active) {
3930 + struct ftrace_ret_stack *ret_stack;
3931 +@@ -3267,7 +3268,6 @@ void ftrace_graph_init_task(struct task_struct *t)
3932 + GFP_KERNEL);
3933 + if (!ret_stack)
3934 + return;
3935 +- t->curr_ret_stack = -1;
3936 + atomic_set(&t->tracing_graph_pause, 0);
3937 + atomic_set(&t->trace_overrun, 0);
3938 + t->ftrace_timestamp = 0;
3939 +diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
3940 +index 5dd017f..c88b21c 100644
3941 +--- a/kernel/trace/ring_buffer.c
3942 ++++ b/kernel/trace/ring_buffer.c
3943 +@@ -2237,12 +2237,12 @@ ring_buffer_lock_reserve(struct ring_buffer *buffer, unsigned long length)
3944 + if (ring_buffer_flags != RB_BUFFERS_ON)
3945 + return NULL;
3946 +
3947 +- if (atomic_read(&buffer->record_disabled))
3948 +- return NULL;
3949 +-
3950 + /* If we are tracing schedule, we don't want to recurse */
3951 + resched = ftrace_preempt_disable();
3952 +
3953 ++ if (atomic_read(&buffer->record_disabled))
3954 ++ goto out_nocheck;
3955 ++
3956 + if (trace_recursive_lock())
3957 + goto out_nocheck;
3958 +
3959 +@@ -2474,11 +2474,11 @@ int ring_buffer_write(struct ring_buffer *buffer,
3960 + if (ring_buffer_flags != RB_BUFFERS_ON)
3961 + return -EBUSY;
3962 +
3963 +- if (atomic_read(&buffer->record_disabled))
3964 +- return -EBUSY;
3965 +-
3966 + resched = ftrace_preempt_disable();
3967 +
3968 ++ if (atomic_read(&buffer->record_disabled))
3969 ++ goto out;
3970 ++
3971 + cpu = raw_smp_processor_id();
3972 +
3973 + if (!cpumask_test_cpu(cpu, buffer->cpumask))
3974 +diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
3975 +index b20d3ec..3cfb60b 100644
3976 +--- a/kernel/trace/trace.c
3977 ++++ b/kernel/trace/trace.c
3978 +@@ -748,10 +748,10 @@ out:
3979 + mutex_unlock(&trace_types_lock);
3980 + }
3981 +
3982 +-static void __tracing_reset(struct trace_array *tr, int cpu)
3983 ++static void __tracing_reset(struct ring_buffer *buffer, int cpu)
3984 + {
3985 + ftrace_disable_cpu();
3986 +- ring_buffer_reset_cpu(tr->buffer, cpu);
3987 ++ ring_buffer_reset_cpu(buffer, cpu);
3988 + ftrace_enable_cpu();
3989 + }
3990 +
3991 +@@ -763,7 +763,7 @@ void tracing_reset(struct trace_array *tr, int cpu)
3992 +
3993 + /* Make sure all commits have finished */
3994 + synchronize_sched();
3995 +- __tracing_reset(tr, cpu);
3996 ++ __tracing_reset(buffer, cpu);
3997 +
3998 + ring_buffer_record_enable(buffer);
3999 + }
4000 +@@ -781,7 +781,7 @@ void tracing_reset_online_cpus(struct trace_array *tr)
4001 + tr->time_start = ftrace_now(tr->cpu);
4002 +
4003 + for_each_online_cpu(cpu)
4004 +- __tracing_reset(tr, cpu);
4005 ++ __tracing_reset(buffer, cpu);
4006 +
4007 + ring_buffer_record_enable(buffer);
4008 + }
4009 +@@ -858,6 +858,8 @@ void tracing_start(void)
4010 + goto out;
4011 + }
4012 +
4013 ++ /* Prevent the buffers from switching */
4014 ++ __raw_spin_lock(&ftrace_max_lock);
4015 +
4016 + buffer = global_trace.buffer;
4017 + if (buffer)
4018 +@@ -867,6 +869,8 @@ void tracing_start(void)
4019 + if (buffer)
4020 + ring_buffer_record_enable(buffer);
4021 +
4022 ++ __raw_spin_unlock(&ftrace_max_lock);
4023 ++
4024 + ftrace_start();
4025 + out:
4026 + spin_unlock_irqrestore(&tracing_start_lock, flags);
4027 +@@ -888,6 +892,9 @@ void tracing_stop(void)
4028 + if (trace_stop_count++)
4029 + goto out;
4030 +
4031 ++ /* Prevent the buffers from switching */
4032 ++ __raw_spin_lock(&ftrace_max_lock);
4033 ++
4034 + buffer = global_trace.buffer;
4035 + if (buffer)
4036 + ring_buffer_record_disable(buffer);
4037 +@@ -896,6 +903,8 @@ void tracing_stop(void)
4038 + if (buffer)
4039 + ring_buffer_record_disable(buffer);
4040 +
4041 ++ __raw_spin_unlock(&ftrace_max_lock);
4042 ++
4043 + out:
4044 + spin_unlock_irqrestore(&tracing_start_lock, flags);
4045 + }
4046 +@@ -1162,6 +1171,13 @@ ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc)
4047 + if (!(trace_flags & TRACE_ITER_USERSTACKTRACE))
4048 + return;
4049 +
4050 ++ /*
4051 ++ * NMIs can not handle page faults, even with fix ups.
4052 ++ * The save user stack can (and often does) fault.
4053 ++ */
4054 ++ if (unlikely(in_nmi()))
4055 ++ return;
4056 ++
4057 + event = trace_buffer_lock_reserve(buffer, TRACE_USER_STACK,
4058 + sizeof(*entry), flags, pc);
4059 + if (!event)
4060 +diff --git a/lib/Makefile b/lib/Makefile
4061 +index 2e78277..452f188 100644
4062 +--- a/lib/Makefile
4063 ++++ b/lib/Makefile
4064 +@@ -21,7 +21,7 @@ lib-y += kobject.o kref.o klist.o
4065 +
4066 + obj-y += bcd.o div64.o sort.o parser.o halfmd4.o debug_locks.o random32.o \
4067 + bust_spinlocks.o hexdump.o kasprintf.o bitmap.o scatterlist.o \
4068 +- string_helpers.o gcd.o
4069 ++ string_helpers.o gcd.o lcm.o
4070 +
4071 + ifeq ($(CONFIG_DEBUG_KOBJECT),y)
4072 + CFLAGS_kobject.o += -DDEBUG
4073 +diff --git a/lib/lcm.c b/lib/lcm.c
4074 +new file mode 100644
4075 +index 0000000..157cd88
4076 +--- /dev/null
4077 ++++ b/lib/lcm.c
4078 +@@ -0,0 +1,15 @@
4079 ++#include <linux/kernel.h>
4080 ++#include <linux/gcd.h>
4081 ++#include <linux/module.h>
4082 ++
4083 ++/* Lowest common multiple */
4084 ++unsigned long lcm(unsigned long a, unsigned long b)
4085 ++{
4086 ++ if (a && b)
4087 ++ return (a * b) / gcd(a, b);
4088 ++ else if (b)
4089 ++ return b;
4090 ++
4091 ++ return a;
4092 ++}
4093 ++EXPORT_SYMBOL_GPL(lcm);
4094 +diff --git a/mm/mempolicy.c b/mm/mempolicy.c
4095 +index 4545d59..f29d8d7 100644
4096 +--- a/mm/mempolicy.c
4097 ++++ b/mm/mempolicy.c
4098 +@@ -2122,8 +2122,8 @@ int mpol_parse_str(char *str, struct mempolicy **mpol, int no_context)
4099 + char *rest = nodelist;
4100 + while (isdigit(*rest))
4101 + rest++;
4102 +- if (!*rest)
4103 +- err = 0;
4104 ++ if (*rest)
4105 ++ goto out;
4106 + }
4107 + break;
4108 + case MPOL_INTERLEAVE:
4109 +@@ -2132,7 +2132,6 @@ int mpol_parse_str(char *str, struct mempolicy **mpol, int no_context)
4110 + */
4111 + if (!nodelist)
4112 + nodes = node_states[N_HIGH_MEMORY];
4113 +- err = 0;
4114 + break;
4115 + case MPOL_LOCAL:
4116 + /*
4117 +@@ -2142,11 +2141,19 @@ int mpol_parse_str(char *str, struct mempolicy **mpol, int no_context)
4118 + goto out;
4119 + mode = MPOL_PREFERRED;
4120 + break;
4121 +-
4122 +- /*
4123 +- * case MPOL_BIND: mpol_new() enforces non-empty nodemask.
4124 +- * case MPOL_DEFAULT: mpol_new() enforces empty nodemask, ignores flags.
4125 +- */
4126 ++ case MPOL_DEFAULT:
4127 ++ /*
4128 ++ * Insist on a empty nodelist
4129 ++ */
4130 ++ if (!nodelist)
4131 ++ err = 0;
4132 ++ goto out;
4133 ++ case MPOL_BIND:
4134 ++ /*
4135 ++ * Insist on a nodelist
4136 ++ */
4137 ++ if (!nodelist)
4138 ++ goto out;
4139 + }
4140 +
4141 + mode_flags = 0;
4142 +@@ -2160,13 +2167,14 @@ int mpol_parse_str(char *str, struct mempolicy **mpol, int no_context)
4143 + else if (!strcmp(flags, "relative"))
4144 + mode_flags |= MPOL_F_RELATIVE_NODES;
4145 + else
4146 +- err = 1;
4147 ++ goto out;
4148 + }
4149 +
4150 + new = mpol_new(mode, mode_flags, &nodes);
4151 + if (IS_ERR(new))
4152 +- err = 1;
4153 +- else {
4154 ++ goto out;
4155 ++
4156 ++ {
4157 + int ret;
4158 + NODEMASK_SCRATCH(scratch);
4159 + if (scratch) {
4160 +@@ -2177,13 +2185,15 @@ int mpol_parse_str(char *str, struct mempolicy **mpol, int no_context)
4161 + ret = -ENOMEM;
4162 + NODEMASK_SCRATCH_FREE(scratch);
4163 + if (ret) {
4164 +- err = 1;
4165 + mpol_put(new);
4166 +- } else if (no_context) {
4167 +- /* save for contextualization */
4168 +- new->w.user_nodemask = nodes;
4169 ++ goto out;
4170 + }
4171 + }
4172 ++ err = 0;
4173 ++ if (no_context) {
4174 ++ /* save for contextualization */
4175 ++ new->w.user_nodemask = nodes;
4176 ++ }
4177 +
4178 + out:
4179 + /* Restore string for error message */
4180 +diff --git a/mm/readahead.c b/mm/readahead.c
4181 +index 8f40b47..337b20e 100644
4182 +--- a/mm/readahead.c
4183 ++++ b/mm/readahead.c
4184 +@@ -553,5 +553,17 @@ page_cache_async_readahead(struct address_space *mapping,
4185 +
4186 + /* do read-ahead */
4187 + ondemand_readahead(mapping, ra, filp, true, offset, req_size);
4188 ++
4189 ++#ifdef CONFIG_BLOCK
4190 ++ /*
4191 ++ * Normally the current page is !uptodate and lock_page() will be
4192 ++ * immediately called to implicitly unplug the device. However this
4193 ++ * is not always true for RAID conifgurations, where data arrives
4194 ++ * not strictly in their submission order. In this case we need to
4195 ++ * explicitly kick off the IO.
4196 ++ */
4197 ++ if (PageUptodate(page))
4198 ++ blk_run_backing_dev(mapping->backing_dev_info, NULL);
4199 ++#endif
4200 + }
4201 + EXPORT_SYMBOL_GPL(page_cache_async_readahead);
4202 +diff --git a/net/bluetooth/l2cap.c b/net/bluetooth/l2cap.c
4203 +index 947f8bb..8d1c4a9 100644
4204 +--- a/net/bluetooth/l2cap.c
4205 ++++ b/net/bluetooth/l2cap.c
4206 +@@ -2813,6 +2813,11 @@ static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr
4207 + int len = cmd->len - sizeof(*rsp);
4208 + char req[64];
4209 +
4210 ++ if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
4211 ++ l2cap_send_disconn_req(conn, sk);
4212 ++ goto done;
4213 ++ }
4214 ++
4215 + /* throw out any old stored conf requests */
4216 + result = L2CAP_CONF_SUCCESS;
4217 + len = l2cap_parse_conf_rsp(sk, rsp->data,
4218 +@@ -3885,16 +3890,24 @@ static ssize_t l2cap_sysfs_show(struct class *dev, char *buf)
4219 + struct sock *sk;
4220 + struct hlist_node *node;
4221 + char *str = buf;
4222 ++ int size = PAGE_SIZE;
4223 +
4224 + read_lock_bh(&l2cap_sk_list.lock);
4225 +
4226 + sk_for_each(sk, node, &l2cap_sk_list.head) {
4227 + struct l2cap_pinfo *pi = l2cap_pi(sk);
4228 ++ int len;
4229 +
4230 +- str += sprintf(str, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d\n",
4231 ++ len = snprintf(str, size, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d\n",
4232 + batostr(&bt_sk(sk)->src), batostr(&bt_sk(sk)->dst),
4233 + sk->sk_state, __le16_to_cpu(pi->psm), pi->scid,
4234 + pi->dcid, pi->imtu, pi->omtu, pi->sec_level);
4235 ++
4236 ++ size -= len;
4237 ++ if (size <= 0)
4238 ++ break;
4239 ++
4240 ++ str += len;
4241 + }
4242 +
4243 + read_unlock_bh(&l2cap_sk_list.lock);
4244 +diff --git a/net/bluetooth/rfcomm/core.c b/net/bluetooth/rfcomm/core.c
4245 +index 25692bc..ef3abf2 100644
4246 +--- a/net/bluetooth/rfcomm/core.c
4247 ++++ b/net/bluetooth/rfcomm/core.c
4248 +@@ -251,7 +251,6 @@ static void rfcomm_session_timeout(unsigned long arg)
4249 + BT_DBG("session %p state %ld", s, s->state);
4250 +
4251 + set_bit(RFCOMM_TIMED_OUT, &s->flags);
4252 +- rfcomm_session_put(s);
4253 + rfcomm_schedule(RFCOMM_SCHED_TIMEO);
4254 + }
4255 +
4256 +@@ -1917,6 +1916,7 @@ static inline void rfcomm_process_sessions(void)
4257 + if (test_and_clear_bit(RFCOMM_TIMED_OUT, &s->flags)) {
4258 + s->state = BT_DISCONN;
4259 + rfcomm_send_disc(s, 0);
4260 ++ rfcomm_session_put(s);
4261 + continue;
4262 + }
4263 +
4264 +@@ -2096,6 +2096,7 @@ static ssize_t rfcomm_dlc_sysfs_show(struct class *dev, char *buf)
4265 + struct rfcomm_session *s;
4266 + struct list_head *pp, *p;
4267 + char *str = buf;
4268 ++ int size = PAGE_SIZE;
4269 +
4270 + rfcomm_lock();
4271 +
4272 +@@ -2104,11 +2105,21 @@ static ssize_t rfcomm_dlc_sysfs_show(struct class *dev, char *buf)
4273 + list_for_each(pp, &s->dlcs) {
4274 + struct sock *sk = s->sock->sk;
4275 + struct rfcomm_dlc *d = list_entry(pp, struct rfcomm_dlc, list);
4276 ++ int len;
4277 +
4278 +- str += sprintf(str, "%s %s %ld %d %d %d %d\n",
4279 ++ len = snprintf(str, size, "%s %s %ld %d %d %d %d\n",
4280 + batostr(&bt_sk(sk)->src), batostr(&bt_sk(sk)->dst),
4281 + d->state, d->dlci, d->mtu, d->rx_credits, d->tx_credits);
4282 ++
4283 ++ size -= len;
4284 ++ if (size <= 0)
4285 ++ break;
4286 ++
4287 ++ str += len;
4288 + }
4289 ++
4290 ++ if (size <= 0)
4291 ++ break;
4292 + }
4293 +
4294 + rfcomm_unlock();
4295 +diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c
4296 +index 8a20aaf..30a3649 100644
4297 +--- a/net/bluetooth/rfcomm/sock.c
4298 ++++ b/net/bluetooth/rfcomm/sock.c
4299 +@@ -1065,13 +1065,22 @@ static ssize_t rfcomm_sock_sysfs_show(struct class *dev, char *buf)
4300 + struct sock *sk;
4301 + struct hlist_node *node;
4302 + char *str = buf;
4303 ++ int size = PAGE_SIZE;
4304 +
4305 + read_lock_bh(&rfcomm_sk_list.lock);
4306 +
4307 + sk_for_each(sk, node, &rfcomm_sk_list.head) {
4308 +- str += sprintf(str, "%s %s %d %d\n",
4309 ++ int len;
4310 ++
4311 ++ len = snprintf(str, size, "%s %s %d %d\n",
4312 + batostr(&bt_sk(sk)->src), batostr(&bt_sk(sk)->dst),
4313 + sk->sk_state, rfcomm_pi(sk)->channel);
4314 ++
4315 ++ size -= len;
4316 ++ if (size <= 0)
4317 ++ break;
4318 ++
4319 ++ str += len;
4320 + }
4321 +
4322 + read_unlock_bh(&rfcomm_sk_list.lock);
4323 +diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c
4324 +index 77f4153..5c0685e 100644
4325 +--- a/net/bluetooth/sco.c
4326 ++++ b/net/bluetooth/sco.c
4327 +@@ -957,13 +957,22 @@ static ssize_t sco_sysfs_show(struct class *dev, char *buf)
4328 + struct sock *sk;
4329 + struct hlist_node *node;
4330 + char *str = buf;
4331 ++ int size = PAGE_SIZE;
4332 +
4333 + read_lock_bh(&sco_sk_list.lock);
4334 +
4335 + sk_for_each(sk, node, &sco_sk_list.head) {
4336 +- str += sprintf(str, "%s %s %d\n",
4337 ++ int len;
4338 ++
4339 ++ len = snprintf(str, size, "%s %s %d\n",
4340 + batostr(&bt_sk(sk)->src), batostr(&bt_sk(sk)->dst),
4341 + sk->sk_state);
4342 ++
4343 ++ size -= len;
4344 ++ if (size <= 0)
4345 ++ break;
4346 ++
4347 ++ str += len;
4348 + }
4349 +
4350 + read_unlock_bh(&sco_sk_list.lock);
4351 +diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
4352 +index 5a46164..ca62bfe 100644
4353 +--- a/net/mac80211/ieee80211_i.h
4354 ++++ b/net/mac80211/ieee80211_i.h
4355 +@@ -264,6 +264,7 @@ enum ieee80211_sta_flags {
4356 + IEEE80211_STA_DISABLE_11N = BIT(4),
4357 + IEEE80211_STA_CSA_RECEIVED = BIT(5),
4358 + IEEE80211_STA_MFP_ENABLED = BIT(6),
4359 ++ IEEE80211_STA_NULLFUNC_ACKED = BIT(7),
4360 + };
4361 +
4362 + /* flags for MLME request */
4363 +diff --git a/net/mac80211/main.c b/net/mac80211/main.c
4364 +index 797f539..19fbd25 100644
4365 +--- a/net/mac80211/main.c
4366 ++++ b/net/mac80211/main.c
4367 +@@ -441,6 +441,7 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
4368 + rcu_read_lock();
4369 +
4370 + sband = local->hw.wiphy->bands[info->band];
4371 ++ fc = hdr->frame_control;
4372 +
4373 + sta = sta_info_get(local, hdr->addr1);
4374 +
4375 +@@ -522,6 +523,20 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
4376 + local->dot11FailedCount++;
4377 + }
4378 +
4379 ++ if (ieee80211_is_nullfunc(fc) && ieee80211_has_pm(fc) &&
4380 ++ (local->hw.flags & IEEE80211_HW_REPORTS_TX_ACK_STATUS) &&
4381 ++ !(info->flags & IEEE80211_TX_CTL_INJECTED) &&
4382 ++ local->ps_sdata && !(local->scanning)) {
4383 ++ if (info->flags & IEEE80211_TX_STAT_ACK) {
4384 ++ local->ps_sdata->u.mgd.flags |=
4385 ++ IEEE80211_STA_NULLFUNC_ACKED;
4386 ++ ieee80211_queue_work(&local->hw,
4387 ++ &local->dynamic_ps_enable_work);
4388 ++ } else
4389 ++ mod_timer(&local->dynamic_ps_timer, jiffies +
4390 ++ msecs_to_jiffies(10));
4391 ++ }
4392 ++
4393 + /* this was a transmitted frame, but now we want to reuse it */
4394 + skb_orphan(skb);
4395 +
4396 +diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
4397 +index 6cae295..4a15df1 100644
4398 +--- a/net/mac80211/mlme.c
4399 ++++ b/net/mac80211/mlme.c
4400 +@@ -650,8 +650,11 @@ static void ieee80211_enable_ps(struct ieee80211_local *local,
4401 + } else {
4402 + if (local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK)
4403 + ieee80211_send_nullfunc(local, sdata, 1);
4404 +- conf->flags |= IEEE80211_CONF_PS;
4405 +- ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS);
4406 ++
4407 ++ if (!(local->hw.flags & IEEE80211_HW_REPORTS_TX_ACK_STATUS)) {
4408 ++ conf->flags |= IEEE80211_CONF_PS;
4409 ++ ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS);
4410 ++ }
4411 + }
4412 + }
4413 +
4414 +@@ -742,6 +745,7 @@ void ieee80211_dynamic_ps_enable_work(struct work_struct *work)
4415 + container_of(work, struct ieee80211_local,
4416 + dynamic_ps_enable_work);
4417 + struct ieee80211_sub_if_data *sdata = local->ps_sdata;
4418 ++ struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
4419 +
4420 + /* can only happen when PS was just disabled anyway */
4421 + if (!sdata)
4422 +@@ -750,11 +754,16 @@ void ieee80211_dynamic_ps_enable_work(struct work_struct *work)
4423 + if (local->hw.conf.flags & IEEE80211_CONF_PS)
4424 + return;
4425 +
4426 +- if (local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK)
4427 ++ if ((local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK) &&
4428 ++ (!(ifmgd->flags & IEEE80211_STA_NULLFUNC_ACKED)))
4429 + ieee80211_send_nullfunc(local, sdata, 1);
4430 +
4431 +- local->hw.conf.flags |= IEEE80211_CONF_PS;
4432 +- ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS);
4433 ++ if (!(local->hw.flags & IEEE80211_HW_REPORTS_TX_ACK_STATUS) ||
4434 ++ (ifmgd->flags & IEEE80211_STA_NULLFUNC_ACKED)) {
4435 ++ ifmgd->flags &= ~IEEE80211_STA_NULLFUNC_ACKED;
4436 ++ local->hw.conf.flags |= IEEE80211_CONF_PS;
4437 ++ ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS);
4438 ++ }
4439 + }
4440 +
4441 + void ieee80211_dynamic_ps_timer(unsigned long data)
4442 +@@ -2458,6 +2467,7 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
4443 + list_add(&wk->list, &ifmgd->work_list);
4444 +
4445 + ifmgd->flags &= ~IEEE80211_STA_DISABLE_11N;
4446 ++ ifmgd->flags &= ~IEEE80211_STA_NULLFUNC_ACKED;
4447 +
4448 + for (i = 0; i < req->crypto.n_ciphers_pairwise; i++)
4449 + if (req->crypto.ciphers_pairwise[i] == WLAN_CIPHER_SUITE_WEP40 ||
4450 +diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
4451 +index 16c6cdc..538a7d7 100644
4452 +--- a/net/mac80211/rx.c
4453 ++++ b/net/mac80211/rx.c
4454 +@@ -1590,6 +1590,7 @@ static ieee80211_rx_result debug_noinline
4455 + ieee80211_rx_h_data(struct ieee80211_rx_data *rx)
4456 + {
4457 + struct net_device *dev = rx->dev;
4458 ++ struct ieee80211_local *local = rx->local;
4459 + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
4460 + __le16 fc = hdr->frame_control;
4461 + int err;
4462 +@@ -1612,6 +1613,13 @@ ieee80211_rx_h_data(struct ieee80211_rx_data *rx)
4463 + dev->stats.rx_packets++;
4464 + dev->stats.rx_bytes += rx->skb->len;
4465 +
4466 ++ if (ieee80211_is_data(hdr->frame_control) &&
4467 ++ !is_multicast_ether_addr(hdr->addr1) &&
4468 ++ local->hw.conf.dynamic_ps_timeout > 0 && local->ps_sdata) {
4469 ++ mod_timer(&local->dynamic_ps_timer, jiffies +
4470 ++ msecs_to_jiffies(local->hw.conf.dynamic_ps_timeout));
4471 ++ }
4472 ++
4473 + ieee80211_deliver_skb(rx);
4474 +
4475 + return RX_QUEUED;
4476 +diff --git a/net/netfilter/xt_recent.c b/net/netfilter/xt_recent.c
4477 +index 1a3b650..2f181aa 100644
4478 +--- a/net/netfilter/xt_recent.c
4479 ++++ b/net/netfilter/xt_recent.c
4480 +@@ -260,7 +260,7 @@ recent_mt(const struct sk_buff *skb, const struct xt_match_param *par)
4481 + for (i = 0; i < e->nstamps; i++) {
4482 + if (info->seconds && time_after(time, e->stamps[i]))
4483 + continue;
4484 +- if (info->hit_count && ++hits >= info->hit_count) {
4485 ++ if (!info->hit_count || ++hits >= info->hit_count) {
4486 + ret = !ret;
4487 + break;
4488 + }
4489 +diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c
4490 +index 9c5a19d..2370ab4 100644
4491 +--- a/net/sunrpc/auth_gss/auth_gss.c
4492 ++++ b/net/sunrpc/auth_gss/auth_gss.c
4493 +@@ -1273,9 +1273,8 @@ alloc_enc_pages(struct rpc_rqst *rqstp)
4494 + rqstp->rq_release_snd_buf = priv_release_snd_buf;
4495 + return 0;
4496 + out_free:
4497 +- for (i--; i >= 0; i--) {
4498 +- __free_page(rqstp->rq_enc_pages[i]);
4499 +- }
4500 ++ rqstp->rq_enc_pages_num = i;
4501 ++ priv_release_snd_buf(rqstp);
4502 + out:
4503 + return -EAGAIN;
4504 + }
4505 +diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c
4506 +index 49278f8..27a2378 100644
4507 +--- a/net/sunrpc/rpc_pipe.c
4508 ++++ b/net/sunrpc/rpc_pipe.c
4509 +@@ -587,6 +587,8 @@ static struct dentry *__rpc_lookup_create_exclusive(struct dentry *parent,
4510 + struct dentry *dentry;
4511 +
4512 + dentry = __rpc_lookup_create(parent, name);
4513 ++ if (IS_ERR(dentry))
4514 ++ return dentry;
4515 + if (dentry->d_inode == NULL)
4516 + return dentry;
4517 + dput(dentry);
4518 +diff --git a/security/min_addr.c b/security/min_addr.c
4519 +index c844eed..fc43c9d 100644
4520 +--- a/security/min_addr.c
4521 ++++ b/security/min_addr.c
4522 +@@ -33,6 +33,9 @@ int mmap_min_addr_handler(struct ctl_table *table, int write,
4523 + {
4524 + int ret;
4525 +
4526 ++ if (!capable(CAP_SYS_RAWIO))
4527 ++ return -EPERM;
4528 ++
4529 + ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
4530 +
4531 + update_mmap_min_addr();
4532 +diff --git a/sound/pci/ac97/ac97_patch.c b/sound/pci/ac97/ac97_patch.c
4533 +index 67ca440..e7efcef 100644
4534 +--- a/sound/pci/ac97/ac97_patch.c
4535 ++++ b/sound/pci/ac97/ac97_patch.c
4536 +@@ -1867,12 +1867,14 @@ static unsigned int ad1981_jacks_blacklist[] = {
4537 + 0x10140523, /* Thinkpad R40 */
4538 + 0x10140534, /* Thinkpad X31 */
4539 + 0x10140537, /* Thinkpad T41p */
4540 ++ 0x1014053e, /* Thinkpad R40e */
4541 + 0x10140554, /* Thinkpad T42p/R50p */
4542 + 0x10140567, /* Thinkpad T43p 2668-G7U */
4543 + 0x10140581, /* Thinkpad X41-2527 */
4544 + 0x10280160, /* Dell Dimension 2400 */
4545 + 0x104380b0, /* Asus A7V8X-MX */
4546 + 0x11790241, /* Toshiba Satellite A-15 S127 */
4547 ++ 0x1179ff10, /* Toshiba P500 */
4548 + 0x144dc01a, /* Samsung NP-X20C004/SEG */
4549 + 0 /* end */
4550 + };
4551 +diff --git a/sound/pci/cmipci.c b/sound/pci/cmipci.c
4552 +index ddcd4a9..78c8736 100644
4553 +--- a/sound/pci/cmipci.c
4554 ++++ b/sound/pci/cmipci.c
4555 +@@ -941,13 +941,21 @@ static snd_pcm_uframes_t snd_cmipci_pcm_pointer(struct cmipci *cm, struct cmipci
4556 + struct snd_pcm_substream *substream)
4557 + {
4558 + size_t ptr;
4559 +- unsigned int reg;
4560 ++ unsigned int reg, rem, tries;
4561 ++
4562 + if (!rec->running)
4563 + return 0;
4564 + #if 1 // this seems better..
4565 + reg = rec->ch ? CM_REG_CH1_FRAME2 : CM_REG_CH0_FRAME2;
4566 +- ptr = rec->dma_size - (snd_cmipci_read_w(cm, reg) + 1);
4567 +- ptr >>= rec->shift;
4568 ++ for (tries = 0; tries < 3; tries++) {
4569 ++ rem = snd_cmipci_read_w(cm, reg);
4570 ++ if (rem < rec->dma_size)
4571 ++ goto ok;
4572 ++ }
4573 ++ printk(KERN_ERR "cmipci: invalid PCM pointer: %#x\n", rem);
4574 ++ return SNDRV_PCM_POS_XRUN;
4575 ++ok:
4576 ++ ptr = (rec->dma_size - (rem + 1)) >> rec->shift;
4577 + #else
4578 + reg = rec->ch ? CM_REG_CH1_FRAME1 : CM_REG_CH0_FRAME1;
4579 + ptr = snd_cmipci_read(cm, reg) - rec->offset;
4580 +diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
4581 +index 673cec3..dd3a8e7 100644
4582 +--- a/sound/pci/hda/hda_intel.c
4583 ++++ b/sound/pci/hda/hda_intel.c
4584 +@@ -2228,8 +2228,10 @@ static struct snd_pci_quirk position_fix_list[] __devinitdata = {
4585 + SND_PCI_QUIRK(0x1106, 0x3288, "ASUS M2V-MX SE", POS_FIX_LPIB),
4586 + SND_PCI_QUIRK(0x1043, 0x813d, "ASUS P5AD2", POS_FIX_LPIB),
4587 + SND_PCI_QUIRK(0x1028, 0x01f6, "Dell Latitude 131L", POS_FIX_LPIB),
4588 ++ SND_PCI_QUIRK(0x1458, 0xa022, "ga-ma770-ud3", POS_FIX_LPIB),
4589 + SND_PCI_QUIRK(0x1462, 0x1002, "MSI Wind U115", POS_FIX_LPIB),
4590 + SND_PCI_QUIRK(0x1565, 0x820f, "Biostar Microtech", POS_FIX_LPIB),
4591 ++ SND_PCI_QUIRK(0x8086, 0xd601, "eMachines T5212", POS_FIX_LPIB),
4592 + {}
4593 + };
4594 +
4595 +@@ -2317,6 +2319,7 @@ static void __devinit check_probe_mask(struct azx *chip, int dev)
4596 + static struct snd_pci_quirk msi_white_list[] __devinitdata = {
4597 + SND_PCI_QUIRK(0x103c, 0x30f7, "HP Pavilion dv4t-1300", 1),
4598 + SND_PCI_QUIRK(0x103c, 0x3607, "HP Compa CQ40", 1),
4599 ++ SND_PCI_QUIRK(0x107b, 0x0380, "Gateway M-6866", 1),
4600 + {}
4601 + };
4602 +
4603 +@@ -2333,6 +2336,13 @@ static void __devinit check_msi(struct azx *chip)
4604 + "hda_intel: msi for device %04x:%04x set to %d\n",
4605 + q->subvendor, q->subdevice, q->value);
4606 + chip->msi = q->value;
4607 ++ return;
4608 ++ }
4609 ++
4610 ++ /* NVidia chipsets seem to cause troubles with MSI */
4611 ++ if (chip->driver_type == AZX_DRIVER_NVIDIA) {
4612 ++ printk(KERN_INFO "hda_intel: Disable MSI for Nvidia chipset\n");
4613 ++ chip->msi = 0;
4614 + }
4615 + }
4616 +
4617 +diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
4618 +index 905859d..79afb46 100644
4619 +--- a/sound/pci/hda/patch_conexant.c
4620 ++++ b/sound/pci/hda/patch_conexant.c
4621 +@@ -1581,6 +1581,21 @@ static int patch_cxt5047(struct hda_codec *codec)
4622 + #endif
4623 + }
4624 + spec->vmaster_nid = 0x13;
4625 ++
4626 ++ switch (codec->subsystem_id >> 16) {
4627 ++ case 0x103c:
4628 ++ /* HP laptops have really bad sound over 0 dB on NID 0x10.
4629 ++ * Fix max PCM level to 0 dB (originally it has 0x1e steps
4630 ++ * with 0 dB offset 0x17)
4631 ++ */
4632 ++ snd_hda_override_amp_caps(codec, 0x10, HDA_INPUT,
4633 ++ (0x17 << AC_AMPCAP_OFFSET_SHIFT) |
4634 ++ (0x17 << AC_AMPCAP_NUM_STEPS_SHIFT) |
4635 ++ (0x05 << AC_AMPCAP_STEP_SIZE_SHIFT) |
4636 ++ (1 << AC_AMPCAP_MUTE_SHIFT));
4637 ++ break;
4638 ++ }
4639 ++
4640 + return 0;
4641 + }
4642 +
4643 +diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
4644 +index 911dd1f..26c70d6 100644
4645 +--- a/sound/pci/hda/patch_realtek.c
4646 ++++ b/sound/pci/hda/patch_realtek.c
4647 +@@ -400,6 +400,8 @@ static int alc_mux_enum_info(struct snd_kcontrol *kcontrol,
4648 + unsigned int mux_idx = snd_ctl_get_ioffidx(kcontrol, &uinfo->id);
4649 + if (mux_idx >= spec->num_mux_defs)
4650 + mux_idx = 0;
4651 ++ if (!spec->input_mux[mux_idx].num_items && mux_idx > 0)
4652 ++ mux_idx = 0;
4653 + return snd_hda_input_mux_info(&spec->input_mux[mux_idx], uinfo);
4654 + }
4655 +
4656 +@@ -428,6 +430,8 @@ static int alc_mux_enum_put(struct snd_kcontrol *kcontrol,
4657 +
4658 + mux_idx = adc_idx >= spec->num_mux_defs ? 0 : adc_idx;
4659 + imux = &spec->input_mux[mux_idx];
4660 ++ if (!imux->num_items && mux_idx > 0)
4661 ++ imux = &spec->input_mux[0];
4662 +
4663 + type = get_wcaps_type(get_wcaps(codec, nid));
4664 + if (type == AC_WID_AUD_MIX) {
4665 +@@ -6248,6 +6252,7 @@ static const char *alc260_models[ALC260_MODEL_LAST] = {
4666 +
4667 + static struct snd_pci_quirk alc260_cfg_tbl[] = {
4668 + SND_PCI_QUIRK(0x1025, 0x007b, "Acer C20x", ALC260_ACER),
4669 ++ SND_PCI_QUIRK(0x1025, 0x007f, "Acer", ALC260_WILL),
4670 + SND_PCI_QUIRK(0x1025, 0x008f, "Acer", ALC260_ACER),
4671 + SND_PCI_QUIRK(0x1509, 0x4540, "Favorit 100XS", ALC260_FAVORIT100),
4672 + SND_PCI_QUIRK(0x103c, 0x2808, "HP d5700", ALC260_HP_3013),
4673 +@@ -6277,7 +6282,7 @@ static struct alc_config_preset alc260_presets[] = {
4674 + .num_dacs = ARRAY_SIZE(alc260_dac_nids),
4675 + .dac_nids = alc260_dac_nids,
4676 + .num_adc_nids = ARRAY_SIZE(alc260_dual_adc_nids),
4677 +- .adc_nids = alc260_adc_nids,
4678 ++ .adc_nids = alc260_dual_adc_nids,
4679 + .num_channel_mode = ARRAY_SIZE(alc260_modes),
4680 + .channel_mode = alc260_modes,
4681 + .input_mux = &alc260_capture_source,
4682 +@@ -8917,7 +8922,7 @@ static struct snd_pci_quirk alc882_cfg_tbl[] = {
4683 + SND_PCI_QUIRK(0x8086, 0x0022, "DX58SO", ALC889_INTEL),
4684 + SND_PCI_QUIRK(0x8086, 0x0021, "Intel IbexPeak", ALC889A_INTEL),
4685 + SND_PCI_QUIRK(0x8086, 0x3b56, "Intel IbexPeak", ALC889A_INTEL),
4686 +- SND_PCI_QUIRK(0x8086, 0xd601, "D102GGC", ALC883_3ST_6ch),
4687 ++ SND_PCI_QUIRK(0x8086, 0xd601, "D102GGC", ALC882_6ST_DIG),
4688 +
4689 + {}
4690 + };
4691 +@@ -9743,6 +9748,8 @@ static void alc882_auto_init_input_src(struct hda_codec *codec)
4692 + continue;
4693 + mux_idx = c >= spec->num_mux_defs ? 0 : c;
4694 + imux = &spec->input_mux[mux_idx];
4695 ++ if (!imux->num_items && mux_idx > 0)
4696 ++ imux = &spec->input_mux[0];
4697 + for (idx = 0; idx < conns; idx++) {
4698 + /* if the current connection is the selected one,
4699 + * unmute it as default - otherwise mute it
4700 +diff --git a/tools/perf/Documentation/Makefile b/tools/perf/Documentation/Makefile
4701 +index bdd3b7e..bd498d4 100644
4702 +--- a/tools/perf/Documentation/Makefile
4703 ++++ b/tools/perf/Documentation/Makefile
4704 +@@ -24,7 +24,10 @@ DOC_MAN1=$(patsubst %.txt,%.1,$(MAN1_TXT))
4705 + DOC_MAN5=$(patsubst %.txt,%.5,$(MAN5_TXT))
4706 + DOC_MAN7=$(patsubst %.txt,%.7,$(MAN7_TXT))
4707 +
4708 ++# Make the path relative to DESTDIR, not prefix
4709 ++ifndef DESTDIR
4710 + prefix?=$(HOME)
4711 ++endif
4712 + bindir?=$(prefix)/bin
4713 + htmldir?=$(prefix)/share/doc/perf-doc
4714 + pdfdir?=$(prefix)/share/doc/perf-doc
4715 +@@ -32,7 +35,6 @@ mandir?=$(prefix)/share/man
4716 + man1dir=$(mandir)/man1
4717 + man5dir=$(mandir)/man5
4718 + man7dir=$(mandir)/man7
4719 +-# DESTDIR=
4720 +
4721 + ASCIIDOC=asciidoc
4722 + ASCIIDOC_EXTRA = --unsafe
4723 +diff --git a/tools/perf/Makefile b/tools/perf/Makefile
4724 +index 7e190d5..719d028 100644
4725 +--- a/tools/perf/Makefile
4726 ++++ b/tools/perf/Makefile
4727 +@@ -218,7 +218,10 @@ STRIP ?= strip
4728 + # runtime figures out where they are based on the path to the executable.
4729 + # This can help installing the suite in a relocatable way.
4730 +
4731 ++# Make the path relative to DESTDIR, not to prefix
4732 ++ifndef DESTDIR
4733 + prefix = $(HOME)
4734 ++endif
4735 + bindir_relative = bin
4736 + bindir = $(prefix)/$(bindir_relative)
4737 + mandir = share/man
4738 +@@ -235,7 +238,6 @@ sysconfdir = $(prefix)/etc
4739 + ETC_PERFCONFIG = etc/perfconfig
4740 + endif
4741 + lib = lib
4742 +-# DESTDIR=
4743 +
4744 + export prefix bindir sharedir sysconfdir
4745 +
4746
4747 Deleted: genpatches-2.6/trunk/2.6.32/2400_5906-transmit-hang-fix.patch
4748 ===================================================================
4749 --- genpatches-2.6/trunk/2.6.32/2400_5906-transmit-hang-fix.patch 2010-04-01 19:40:34 UTC (rev 1691)
4750 +++ genpatches-2.6/trunk/2.6.32/2400_5906-transmit-hang-fix.patch 2010-04-02 12:00:14 UTC (rev 1692)
4751 @@ -1,84 +0,0 @@
4752 -This is a resubmit backport of commit 92c6b8d16a36df3f28b2537bed2a56491fb08f11 to kernel version 2.6.32. The gentoo bug report can be found at https://bugs.gentoo.org/show_bug.cgi?id=301091. Thanks to Matt Carlson for his assistance and working me to fix a regression caused by the intial patch. The original description is as follows:
4753 -
4754 -The 5906 has trouble with fragments that are less than 8 bytes in size.
4755 -This patch works around the problem by pivoting the 5906's transmit routine to tg3_start_xmit_dma_bug() and introducing a new SHORT_DMA_BUG flag that enables code to detect and react to the problematic condition.
4756 -
4757 -Signed-off-by: Mike Pagano <mpagano@g.o>
4758 ----
4759 ---- a/drivers/net/tg3.c 2010-02-02 11:09:54.000000000 -0500
4760 -+++ b/drivers/net/tg3.c 2010-02-17 12:54:08.000000000 -0500
4761 -@@ -5392,7 +5392,7 @@ static netdev_tx_t tg3_start_xmit_dma_bu
4762 - mss = 0;
4763 - if ((mss = skb_shinfo(skb)->gso_size) != 0) {
4764 - struct iphdr *iph;
4765 -- int tcp_opt_len, ip_tcp_len, hdr_len;
4766 -+ u32 tcp_opt_len, ip_tcp_len, hdr_len;
4767 -
4768 - if (skb_header_cloned(skb) &&
4769 - pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
4770 -@@ -5423,8 +5423,10 @@ static netdev_tx_t tg3_start_xmit_dma_bu
4771 - IPPROTO_TCP,
4772 - 0);
4773 -
4774 -- if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) ||
4775 -- (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)) {
4776 -+ if (tp->tg3_flags2 & TG3_FLG2_HW_TSO_2)
4777 -+ mss |= hdr_len << 9;
4778 -+ else if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_1) ||
4779 -+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
4780 - if (tcp_opt_len || iph->ihl > 5) {
4781 - int tsflags;
4782 -
4783 -@@ -5459,6 +5461,9 @@ static netdev_tx_t tg3_start_xmit_dma_bu
4784 -
4785 - would_hit_hwbug = 0;
4786 -
4787 -+ if ((tp->tg3_flags3 & TG3_FLG3_SHORT_DMA_BUG) && len <= 8)
4788 -+ would_hit_hwbug = 1;
4789 -+
4790 - if (tp->tg3_flags3 & TG3_FLG3_5701_DMA_BUG)
4791 - would_hit_hwbug = 1;
4792 - else if (tg3_4g_overflow_test(mapping, len))
4793 -@@ -5482,6 +5487,10 @@ static netdev_tx_t tg3_start_xmit_dma_bu
4794 -
4795 - tnapi->tx_buffers[entry].skb = NULL;
4796 -
4797 -+ if ((tp->tg3_flags3 & TG3_FLG3_SHORT_DMA_BUG) &&
4798 -+ len <= 8)
4799 -+ would_hit_hwbug = 1;
4800 -+
4801 - if (tg3_4g_overflow_test(mapping, len))
4802 - would_hit_hwbug = 1;
4803 -
4804 -@@ -12608,6 +12617,9 @@ static int __devinit tg3_get_invariants(
4805 - }
4806 - }
4807 -
4808 -+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
4809 -+ tp->tg3_flags3 |= TG3_FLG3_SHORT_DMA_BUG;
4810 -+
4811 - tp->irq_max = 1;
4812 -
4813 - #ifdef TG3_NAPI
4814 -@@ -13975,8 +13987,7 @@ static int __devinit tg3_init_one(struct
4815 - goto err_out_iounmap;
4816 - }
4817 -
4818 -- if ((tp->tg3_flags3 & TG3_FLG3_5755_PLUS) ||
4819 -- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
4820 -+ if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS)
4821 - dev->netdev_ops = &tg3_netdev_ops;
4822 - else
4823 - dev->netdev_ops = &tg3_netdev_ops_dma_bug;
4824 ---- a/drivers/net/tg3.h 2010-02-17 12:44:57.000000000 -0500
4825 -+++ b/drivers/net/tg3.h 2010-02-17 12:46:02.000000000 -0500
4826 -@@ -2759,6 +2759,9 @@ struct tg3 {
4827 - #define TG3_FLG3_TOGGLE_10_100_L1PLLPD 0x00008000
4828 - #define TG3_FLG3_PHY_IS_FET 0x00010000
4829 - #define TG3_FLG3_ENABLE_RSS 0x00020000
4830 -+#define TG3_FLG3_4G_DMA_BNDRY_BUG 0x00080000
4831 -+#define TG3_FLG3_40BIT_DMA_LIMIT_BUG 0x00100000
4832 -+#define TG3_FLG3_SHORT_DMA_BUG 0x00200000
4833 -
4834 - struct timer_list timer;
4835 - u16 timer_counter;