Gentoo Archives: gentoo-commits

From: "Mike Pagano (mpagano)" <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] linux-patches r1874 - genpatches-2.6/trunk/2.6.37
Date: Sat, 26 Feb 2011 00:28:30
Message-Id: 20110226002752.2FE4B20054@flycatcher.gentoo.org
1 Author: mpagano
2 Date: 2011-02-26 00:27:50 +0000 (Sat, 26 Feb 2011)
3 New Revision: 1874
4
5 Added:
6 genpatches-2.6/trunk/2.6.37/1000_linux-2.6.37.1.patch
7 genpatches-2.6/trunk/2.6.37/1001_linux-2.6.37.2.patch
8 Modified:
9 genpatches-2.6/trunk/2.6.37/0000_README
10 Log:
11 Linux patches 2.6.37.1 and 2.6.37.2
12
13 Modified: genpatches-2.6/trunk/2.6.37/0000_README
14 ===================================================================
15 --- genpatches-2.6/trunk/2.6.37/0000_README 2011-02-18 15:49:52 UTC (rev 1873)
16 +++ genpatches-2.6/trunk/2.6.37/0000_README 2011-02-26 00:27:50 UTC (rev 1874)
17 @@ -39,6 +39,14 @@
18 Individual Patch Descriptions:
19 --------------------------------------------------------------------------
20
21 +Patch: 1000_linux-2.6.37.1.patch
22 +From: http://www.kernel.org
23 +Desc: Linux 2.6.37.1
24 +
25 +Patch: 1001_linux-2.6.37.2.patch
26 +From: http://www.kernel.org
27 +Desc: Linux 2.6.37.2
28 +
29 Patch: 2400_iwlwifi-fw-reload-rf-fix.patch
30 From: http://www.spinics.net/lists/linux-wireless/msg61225.html
31 Desc: Do not reload fw if WiMAX own the RF
32
33 Added: genpatches-2.6/trunk/2.6.37/1000_linux-2.6.37.1.patch
34 ===================================================================
35 --- genpatches-2.6/trunk/2.6.37/1000_linux-2.6.37.1.patch (rev 0)
36 +++ genpatches-2.6/trunk/2.6.37/1000_linux-2.6.37.1.patch 2011-02-26 00:27:50 UTC (rev 1874)
37 @@ -0,0 +1,10709 @@
38 +diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
39 +index 01ece1b..596bb3c 100644
40 +--- a/Documentation/kernel-parameters.txt
41 ++++ b/Documentation/kernel-parameters.txt
42 +@@ -884,6 +884,7 @@ and is between 256 and 4096 characters. It is defined in the file
43 + controller
44 + i8042.nopnp [HW] Don't use ACPIPnP / PnPBIOS to discover KBD/AUX
45 + controllers
46 ++ i8042.notimeout [HW] Ignore timeout condition signalled by conroller
47 + i8042.reset [HW] Reset the controller during init and cleanup
48 + i8042.unlock [HW] Unlock (ignore) the keylock
49 +
50 +diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S
51 +index 6bd82d2..7306b8e 100644
52 +--- a/arch/arm/kernel/head.S
53 ++++ b/arch/arm/kernel/head.S
54 +@@ -91,6 +91,11 @@ ENTRY(stext)
55 + movs r8, r5 @ invalid machine (r5=0)?
56 + THUMB( it eq ) @ force fixup-able long branch encoding
57 + beq __error_a @ yes, error 'a'
58 ++
59 ++ /*
60 ++ * r1 = machine no, r2 = atags,
61 ++ * r8 = machinfo, r9 = cpuid, r10 = procinfo
62 ++ */
63 + bl __vet_atags
64 + #ifdef CONFIG_SMP_ON_UP
65 + bl __fixup_smp
66 +@@ -387,34 +392,32 @@ ENDPROC(__turn_mmu_on)
67 +
68 + #ifdef CONFIG_SMP_ON_UP
69 + __fixup_smp:
70 +- mov r7, #0x00070000
71 +- orr r6, r7, #0xff000000 @ mask 0xff070000
72 +- orr r7, r7, #0x41000000 @ val 0x41070000
73 +- and r0, r9, r6
74 +- teq r0, r7 @ ARM CPU and ARMv6/v7?
75 ++ and r3, r9, #0x000f0000 @ architecture version
76 ++ teq r3, #0x000f0000 @ CPU ID supported?
77 + bne __fixup_smp_on_up @ no, assume UP
78 +
79 +- orr r6, r6, #0x0000ff00
80 +- orr r6, r6, #0x000000f0 @ mask 0xff07fff0
81 +- orr r7, r7, #0x0000b000
82 +- orr r7, r7, #0x00000020 @ val 0x4107b020
83 +- and r0, r9, r6
84 +- teq r0, r7 @ ARM 11MPCore?
85 ++ bic r3, r9, #0x00ff0000
86 ++ bic r3, r3, #0x0000000f @ mask 0xff00fff0
87 ++ mov r4, #0x41000000
88 ++ orr r4, r4, #0x0000b000
89 ++ orr r4, r4, #0x00000020 @ val 0x4100b020
90 ++ teq r3, r4 @ ARM 11MPCore?
91 + moveq pc, lr @ yes, assume SMP
92 +
93 + mrc p15, 0, r0, c0, c0, 5 @ read MPIDR
94 +- tst r0, #1 << 31
95 +- movne pc, lr @ bit 31 => SMP
96 ++ and r0, r0, #0xc0000000 @ multiprocessing extensions and
97 ++ teq r0, #0x80000000 @ not part of a uniprocessor system?
98 ++ moveq pc, lr @ yes, assume SMP
99 +
100 + __fixup_smp_on_up:
101 + adr r0, 1f
102 +- ldmia r0, {r3, r6, r7}
103 ++ ldmia r0, {r3 - r5}
104 + sub r3, r0, r3
105 +- add r6, r6, r3
106 +- add r7, r7, r3
107 +-2: cmp r6, r7
108 +- ldmia r6!, {r0, r4}
109 +- strlo r4, [r0, r3]
110 ++ add r4, r4, r3
111 ++ add r5, r5, r3
112 ++2: cmp r4, r5
113 ++ ldmia r4!, {r0, r6}
114 ++ strlo r6, [r0, r3]
115 + blo 2b
116 + mov pc, lr
117 + ENDPROC(__fixup_smp)
118 +diff --git a/arch/arm/mach-s5pv310/cpu.c b/arch/arm/mach-s5pv310/cpu.c
119 +index 82ce4aa..b2a37d0 100644
120 +--- a/arch/arm/mach-s5pv310/cpu.c
121 ++++ b/arch/arm/mach-s5pv310/cpu.c
122 +@@ -168,7 +168,7 @@ static int __init s5pv310_l2x0_cache_init(void)
123 + __raw_writel(L2X0_DYNAMIC_CLK_GATING_EN | L2X0_STNDBY_MODE_EN,
124 + S5P_VA_L2CC + L2X0_POWER_CTRL);
125 +
126 +- l2x0_init(S5P_VA_L2CC, 0x7C070001, 0xC200ffff);
127 ++ l2x0_init(S5P_VA_L2CC, 0x7C470001, 0xC200ffff);
128 +
129 + return 0;
130 + }
131 +diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
132 +index 5164069..cddd684 100644
133 +--- a/arch/arm/mm/init.c
134 ++++ b/arch/arm/mm/init.c
135 +@@ -297,6 +297,12 @@ void __init arm_memblock_init(struct meminfo *mi, struct machine_desc *mdesc)
136 + memblock_reserve(__pa(_stext), _end - _stext);
137 + #endif
138 + #ifdef CONFIG_BLK_DEV_INITRD
139 ++ if (phys_initrd_size &&
140 ++ memblock_is_region_reserved(phys_initrd_start, phys_initrd_size)) {
141 ++ pr_err("INITRD: 0x%08lx+0x%08lx overlaps in-use memory region - disabling initrd\n",
142 ++ phys_initrd_start, phys_initrd_size);
143 ++ phys_initrd_start = phys_initrd_size = 0;
144 ++ }
145 + if (phys_initrd_size) {
146 + memblock_reserve(phys_initrd_start, phys_initrd_size);
147 +
148 +diff --git a/arch/arm/oprofile/common.c b/arch/arm/oprofile/common.c
149 +index 8aa9744..2b66391 100644
150 +--- a/arch/arm/oprofile/common.c
151 ++++ b/arch/arm/oprofile/common.c
152 +@@ -10,8 +10,6 @@
153 + */
154 +
155 + #include <linux/cpumask.h>
156 +-#include <linux/err.h>
157 +-#include <linux/errno.h>
158 + #include <linux/init.h>
159 + #include <linux/mutex.h>
160 + #include <linux/oprofile.h>
161 +@@ -46,6 +44,7 @@ char *op_name_from_perf_id(void)
162 + return NULL;
163 + }
164 + }
165 ++#endif
166 +
167 + static int report_trace(struct stackframe *frame, void *d)
168 + {
169 +@@ -111,6 +110,7 @@ static void arm_backtrace(struct pt_regs * const regs, unsigned int depth)
170 +
171 + int __init oprofile_arch_init(struct oprofile_operations *ops)
172 + {
173 ++ /* provide backtrace support also in timer mode: */
174 + ops->backtrace = arm_backtrace;
175 +
176 + return oprofile_perf_init(ops);
177 +@@ -120,11 +120,3 @@ void __exit oprofile_arch_exit(void)
178 + {
179 + oprofile_perf_exit();
180 + }
181 +-#else
182 +-int __init oprofile_arch_init(struct oprofile_operations *ops)
183 +-{
184 +- pr_info("oprofile: hardware counters not available\n");
185 +- return -ENODEV;
186 +-}
187 +-void __exit oprofile_arch_exit(void) {}
188 +-#endif /* CONFIG_HW_PERF_EVENTS */
189 +diff --git a/arch/avr32/include/asm/syscalls.h b/arch/avr32/include/asm/syscalls.h
190 +index ab608b7..730a461 100644
191 +--- a/arch/avr32/include/asm/syscalls.h
192 ++++ b/arch/avr32/include/asm/syscalls.h
193 +@@ -16,18 +16,9 @@
194 + #include <linux/signal.h>
195 +
196 + /* kernel/process.c */
197 +-asmlinkage int sys_fork(struct pt_regs *);
198 + asmlinkage int sys_clone(unsigned long, unsigned long,
199 + unsigned long, unsigned long,
200 + struct pt_regs *);
201 +-asmlinkage int sys_vfork(struct pt_regs *);
202 +-asmlinkage int sys_execve(const char __user *, char __user *__user *,
203 +- char __user *__user *, struct pt_regs *);
204 +-
205 +-/* kernel/signal.c */
206 +-asmlinkage int sys_sigaltstack(const stack_t __user *, stack_t __user *,
207 +- struct pt_regs *);
208 +-asmlinkage int sys_rt_sigreturn(struct pt_regs *);
209 +
210 + /* mm/cache.c */
211 + asmlinkage int sys_cacheflush(int, void __user *, size_t);
212 +diff --git a/arch/parisc/kernel/firmware.c b/arch/parisc/kernel/firmware.c
213 +index df971fa..4896ed0 100644
214 +--- a/arch/parisc/kernel/firmware.c
215 ++++ b/arch/parisc/kernel/firmware.c
216 +@@ -1126,15 +1126,13 @@ int pdc_iodc_print(const unsigned char *str, unsigned count)
217 + unsigned int i;
218 + unsigned long flags;
219 +
220 +- for (i = 0; i < count && i < 79;) {
221 ++ for (i = 0; i < count;) {
222 + switch(str[i]) {
223 + case '\n':
224 + iodc_dbuf[i+0] = '\r';
225 + iodc_dbuf[i+1] = '\n';
226 + i += 2;
227 + goto print;
228 +- case '\b': /* BS */
229 +- i--; /* overwrite last */
230 + default:
231 + iodc_dbuf[i] = str[i];
232 + i++;
233 +@@ -1142,15 +1140,6 @@ int pdc_iodc_print(const unsigned char *str, unsigned count)
234 + }
235 + }
236 +
237 +- /* if we're at the end of line, and not already inserting a newline,
238 +- * insert one anyway. iodc console doesn't claim to support >79 char
239 +- * lines. don't account for this in the return value.
240 +- */
241 +- if (i == 79 && iodc_dbuf[i-1] != '\n') {
242 +- iodc_dbuf[i+0] = '\r';
243 +- iodc_dbuf[i+1] = '\n';
244 +- }
245 +-
246 + print:
247 + spin_lock_irqsave(&pdc_lock, flags);
248 + real32_call(PAGE0->mem_cons.iodc_io,
249 +diff --git a/arch/powerpc/boot/dts/p1022ds.dts b/arch/powerpc/boot/dts/p1022ds.dts
250 +index 2bbecbb..69422eb 100644
251 +--- a/arch/powerpc/boot/dts/p1022ds.dts
252 ++++ b/arch/powerpc/boot/dts/p1022ds.dts
253 +@@ -291,13 +291,13 @@
254 + ranges = <0x0 0xc100 0x200>;
255 + cell-index = <1>;
256 + dma00: dma-channel@0 {
257 +- compatible = "fsl,eloplus-dma-channel";
258 ++ compatible = "fsl,ssi-dma-channel";
259 + reg = <0x0 0x80>;
260 + cell-index = <0>;
261 + interrupts = <76 2>;
262 + };
263 + dma01: dma-channel@80 {
264 +- compatible = "fsl,eloplus-dma-channel";
265 ++ compatible = "fsl,ssi-dma-channel";
266 + reg = <0x80 0x80>;
267 + cell-index = <1>;
268 + interrupts = <77 2>;
269 +diff --git a/arch/powerpc/kernel/cpu_setup_6xx.S b/arch/powerpc/kernel/cpu_setup_6xx.S
270 +index 55cba4a..f8cd9fb 100644
271 +--- a/arch/powerpc/kernel/cpu_setup_6xx.S
272 ++++ b/arch/powerpc/kernel/cpu_setup_6xx.S
273 +@@ -18,7 +18,7 @@
274 + #include <asm/mmu.h>
275 +
276 + _GLOBAL(__setup_cpu_603)
277 +- mflr r4
278 ++ mflr r5
279 + BEGIN_MMU_FTR_SECTION
280 + li r10,0
281 + mtspr SPRN_SPRG_603_LRU,r10 /* init SW LRU tracking */
282 +@@ -27,60 +27,60 @@ BEGIN_FTR_SECTION
283 + bl __init_fpu_registers
284 + END_FTR_SECTION_IFCLR(CPU_FTR_FPU_UNAVAILABLE)
285 + bl setup_common_caches
286 +- mtlr r4
287 ++ mtlr r5
288 + blr
289 + _GLOBAL(__setup_cpu_604)
290 +- mflr r4
291 ++ mflr r5
292 + bl setup_common_caches
293 + bl setup_604_hid0
294 +- mtlr r4
295 ++ mtlr r5
296 + blr
297 + _GLOBAL(__setup_cpu_750)
298 +- mflr r4
299 ++ mflr r5
300 + bl __init_fpu_registers
301 + bl setup_common_caches
302 + bl setup_750_7400_hid0
303 +- mtlr r4
304 ++ mtlr r5
305 + blr
306 + _GLOBAL(__setup_cpu_750cx)
307 +- mflr r4
308 ++ mflr r5
309 + bl __init_fpu_registers
310 + bl setup_common_caches
311 + bl setup_750_7400_hid0
312 + bl setup_750cx
313 +- mtlr r4
314 ++ mtlr r5
315 + blr
316 + _GLOBAL(__setup_cpu_750fx)
317 +- mflr r4
318 ++ mflr r5
319 + bl __init_fpu_registers
320 + bl setup_common_caches
321 + bl setup_750_7400_hid0
322 + bl setup_750fx
323 +- mtlr r4
324 ++ mtlr r5
325 + blr
326 + _GLOBAL(__setup_cpu_7400)
327 +- mflr r4
328 ++ mflr r5
329 + bl __init_fpu_registers
330 + bl setup_7400_workarounds
331 + bl setup_common_caches
332 + bl setup_750_7400_hid0
333 +- mtlr r4
334 ++ mtlr r5
335 + blr
336 + _GLOBAL(__setup_cpu_7410)
337 +- mflr r4
338 ++ mflr r5
339 + bl __init_fpu_registers
340 + bl setup_7410_workarounds
341 + bl setup_common_caches
342 + bl setup_750_7400_hid0
343 + li r3,0
344 + mtspr SPRN_L2CR2,r3
345 +- mtlr r4
346 ++ mtlr r5
347 + blr
348 + _GLOBAL(__setup_cpu_745x)
349 +- mflr r4
350 ++ mflr r5
351 + bl setup_common_caches
352 + bl setup_745x_specifics
353 +- mtlr r4
354 ++ mtlr r5
355 + blr
356 +
357 + /* Enable caches for 603's, 604, 750 & 7400 */
358 +@@ -194,10 +194,10 @@ setup_750cx:
359 + cror 4*cr0+eq,4*cr0+eq,4*cr1+eq
360 + cror 4*cr0+eq,4*cr0+eq,4*cr2+eq
361 + bnelr
362 +- lwz r6,CPU_SPEC_FEATURES(r5)
363 ++ lwz r6,CPU_SPEC_FEATURES(r4)
364 + li r7,CPU_FTR_CAN_NAP
365 + andc r6,r6,r7
366 +- stw r6,CPU_SPEC_FEATURES(r5)
367 ++ stw r6,CPU_SPEC_FEATURES(r4)
368 + blr
369 +
370 + /* 750fx specific
371 +@@ -225,12 +225,12 @@ BEGIN_FTR_SECTION
372 + andis. r11,r11,L3CR_L3E@h
373 + beq 1f
374 + END_FTR_SECTION_IFSET(CPU_FTR_L3CR)
375 +- lwz r6,CPU_SPEC_FEATURES(r5)
376 ++ lwz r6,CPU_SPEC_FEATURES(r4)
377 + andi. r0,r6,CPU_FTR_L3_DISABLE_NAP
378 + beq 1f
379 + li r7,CPU_FTR_CAN_NAP
380 + andc r6,r6,r7
381 +- stw r6,CPU_SPEC_FEATURES(r5)
382 ++ stw r6,CPU_SPEC_FEATURES(r4)
383 + 1:
384 + mfspr r11,SPRN_HID0
385 +
386 +diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
387 +index 74505b2..c33210a 100644
388 +--- a/arch/powerpc/mm/numa.c
389 ++++ b/arch/powerpc/mm/numa.c
390 +@@ -181,7 +181,7 @@ static void unmap_cpu_from_node(unsigned long cpu)
391 + dbg("removing cpu %lu from node %d\n", cpu, node);
392 +
393 + if (cpumask_test_cpu(cpu, node_to_cpumask_map[node])) {
394 +- cpumask_set_cpu(cpu, node_to_cpumask_map[node]);
395 ++ cpumask_clear_cpu(cpu, node_to_cpumask_map[node]);
396 + } else {
397 + printk(KERN_ERR "WARNING: cpu %lu not found in node %d\n",
398 + cpu, node);
399 +diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c
400 +index f129040..8323f14 100644
401 +--- a/arch/powerpc/platforms/pseries/lpar.c
402 ++++ b/arch/powerpc/platforms/pseries/lpar.c
403 +@@ -701,6 +701,13 @@ EXPORT_SYMBOL(arch_free_page);
404 + /* NB: reg/unreg are called while guarded with the tracepoints_mutex */
405 + extern long hcall_tracepoint_refcount;
406 +
407 ++/*
408 ++ * Since the tracing code might execute hcalls we need to guard against
409 ++ * recursion. One example of this are spinlocks calling H_YIELD on
410 ++ * shared processor partitions.
411 ++ */
412 ++static DEFINE_PER_CPU(unsigned int, hcall_trace_depth);
413 ++
414 + void hcall_tracepoint_regfunc(void)
415 + {
416 + hcall_tracepoint_refcount++;
417 +@@ -713,12 +720,42 @@ void hcall_tracepoint_unregfunc(void)
418 +
419 + void __trace_hcall_entry(unsigned long opcode, unsigned long *args)
420 + {
421 ++ unsigned long flags;
422 ++ unsigned int *depth;
423 ++
424 ++ local_irq_save(flags);
425 ++
426 ++ depth = &__get_cpu_var(hcall_trace_depth);
427 ++
428 ++ if (*depth)
429 ++ goto out;
430 ++
431 ++ (*depth)++;
432 + trace_hcall_entry(opcode, args);
433 ++ (*depth)--;
434 ++
435 ++out:
436 ++ local_irq_restore(flags);
437 + }
438 +
439 + void __trace_hcall_exit(long opcode, unsigned long retval,
440 + unsigned long *retbuf)
441 + {
442 ++ unsigned long flags;
443 ++ unsigned int *depth;
444 ++
445 ++ local_irq_save(flags);
446 ++
447 ++ depth = &__get_cpu_var(hcall_trace_depth);
448 ++
449 ++ if (*depth)
450 ++ goto out;
451 ++
452 ++ (*depth)++;
453 + trace_hcall_exit(opcode, retval, retbuf);
454 ++ (*depth)--;
455 ++
456 ++out:
457 ++ local_irq_restore(flags);
458 + }
459 + #endif
460 +diff --git a/arch/powerpc/sysdev/fsl_rio.c b/arch/powerpc/sysdev/fsl_rio.c
461 +index 9725369..9f99bef 100644
462 +--- a/arch/powerpc/sysdev/fsl_rio.c
463 ++++ b/arch/powerpc/sysdev/fsl_rio.c
464 +@@ -973,7 +973,6 @@ fsl_rio_dbell_handler(int irq, void *dev_instance)
465 + if (dsr & DOORBELL_DSR_QFI) {
466 + pr_info("RIO: doorbell queue full\n");
467 + out_be32(&priv->msg_regs->dsr, DOORBELL_DSR_QFI);
468 +- goto out;
469 + }
470 +
471 + /* XXX Need to check/dispatch until queue empty */
472 +diff --git a/arch/sh/include/asm/io.h b/arch/sh/include/asm/io.h
473 +index b237d52..34ba197 100644
474 +--- a/arch/sh/include/asm/io.h
475 ++++ b/arch/sh/include/asm/io.h
476 +@@ -322,7 +322,15 @@ __ioremap_29bit(phys_addr_t offset, unsigned long size, pgprot_t prot)
477 + * mapping must be done by the PMB or by using page tables.
478 + */
479 + if (likely(PXSEG(offset) < P3SEG && PXSEG(last_addr) < P3SEG)) {
480 +- if (unlikely(pgprot_val(prot) & _PAGE_CACHABLE))
481 ++ u64 flags = pgprot_val(prot);
482 ++
483 ++ /*
484 ++ * Anything using the legacy PTEA space attributes needs
485 ++ * to be kicked down to page table mappings.
486 ++ */
487 ++ if (unlikely(flags & _PAGE_PCC_MASK))
488 ++ return NULL;
489 ++ if (unlikely(flags & _PAGE_CACHABLE))
490 + return (void __iomem *)P1SEGADDR(offset);
491 +
492 + return (void __iomem *)P2SEGADDR(offset);
493 +diff --git a/arch/sh/include/asm/pgtable_32.h b/arch/sh/include/asm/pgtable_32.h
494 +index 43528ec..68f3eff 100644
495 +--- a/arch/sh/include/asm/pgtable_32.h
496 ++++ b/arch/sh/include/asm/pgtable_32.h
497 +@@ -76,6 +76,10 @@
498 + /* Wrapper for extended mode pgprot twiddling */
499 + #define _PAGE_EXT(x) ((unsigned long long)(x) << 32)
500 +
501 ++#ifdef CONFIG_X2TLB
502 ++#define _PAGE_PCC_MASK 0x00000000 /* No legacy PTEA support */
503 ++#else
504 ++
505 + /* software: moves to PTEA.TC (Timing Control) */
506 + #define _PAGE_PCC_AREA5 0x00000000 /* use BSC registers for area5 */
507 + #define _PAGE_PCC_AREA6 0x80000000 /* use BSC registers for area6 */
508 +@@ -89,7 +93,8 @@
509 + #define _PAGE_PCC_ATR8 0x60000000 /* Attribute Memory space, 8 bit bus */
510 + #define _PAGE_PCC_ATR16 0x60000001 /* Attribute Memory space, 6 bit bus */
511 +
512 +-#ifndef CONFIG_X2TLB
513 ++#define _PAGE_PCC_MASK 0xe0000001
514 ++
515 + /* copy the ptea attributes */
516 + static inline unsigned long copy_ptea_attributes(unsigned long x)
517 + {
518 +diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
519 +index e330da2..57e823a 100644
520 +--- a/arch/x86/Kconfig
521 ++++ b/arch/x86/Kconfig
522 +@@ -2035,6 +2035,7 @@ config OLPC
523 + bool "One Laptop Per Child support"
524 + select GPIOLIB
525 + select OLPC_OPENFIRMWARE
526 ++ depends on !X86_64 && !X86_PAE
527 + ---help---
528 + Add support for detecting the unique features of the OLPC
529 + XO hardware.
530 +diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
531 +index 4a2d4e0..8b5393e 100644
532 +--- a/arch/x86/include/asm/mmu_context.h
533 ++++ b/arch/x86/include/asm/mmu_context.h
534 +@@ -36,8 +36,6 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
535 + unsigned cpu = smp_processor_id();
536 +
537 + if (likely(prev != next)) {
538 +- /* stop flush ipis for the previous mm */
539 +- cpumask_clear_cpu(cpu, mm_cpumask(prev));
540 + #ifdef CONFIG_SMP
541 + percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
542 + percpu_write(cpu_tlbstate.active_mm, next);
543 +@@ -47,6 +45,9 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
544 + /* Re-load page tables */
545 + load_cr3(next->pgd);
546 +
547 ++ /* stop flush ipis for the previous mm */
548 ++ cpumask_clear_cpu(cpu, mm_cpumask(prev));
549 ++
550 + /*
551 + * load the LDT, if the LDT is different:
552 + */
553 +diff --git a/arch/x86/include/asm/uv/uv_bau.h b/arch/x86/include/asm/uv/uv_bau.h
554 +index 42d412f..ce1d54c 100644
555 +--- a/arch/x86/include/asm/uv/uv_bau.h
556 ++++ b/arch/x86/include/asm/uv/uv_bau.h
557 +@@ -26,20 +26,22 @@
558 + * BAU_SB_DESCRIPTOR_BASE register, set 1 is located at BASE + 512,
559 + * set 2 is at BASE + 2*512, set 3 at BASE + 3*512, and so on.
560 + *
561 +- * We will use 31 sets, one for sending BAU messages from each of the 32
562 ++ * We will use one set for sending BAU messages from each of the
563 + * cpu's on the uvhub.
564 + *
565 + * TLB shootdown will use the first of the 8 descriptors of each set.
566 + * Each of the descriptors is 64 bytes in size (8*64 = 512 bytes in a set).
567 + */
568 +
569 ++#define MAX_CPUS_PER_UVHUB 64
570 ++#define MAX_CPUS_PER_SOCKET 32
571 ++#define UV_ADP_SIZE 64 /* hardware-provided max. */
572 ++#define UV_CPUS_PER_ACT_STATUS 32 /* hardware-provided max. */
573 + #define UV_ITEMS_PER_DESCRIPTOR 8
574 + /* the 'throttle' to prevent the hardware stay-busy bug */
575 + #define MAX_BAU_CONCURRENT 3
576 +-#define UV_CPUS_PER_ACT_STATUS 32
577 + #define UV_ACT_STATUS_MASK 0x3
578 + #define UV_ACT_STATUS_SIZE 2
579 +-#define UV_ADP_SIZE 32
580 + #define UV_DISTRIBUTION_SIZE 256
581 + #define UV_SW_ACK_NPENDING 8
582 + #define UV_NET_ENDPOINT_INTD 0x38
583 +@@ -100,7 +102,6 @@
584 + * number of destination side software ack resources
585 + */
586 + #define DEST_NUM_RESOURCES 8
587 +-#define MAX_CPUS_PER_NODE 32
588 + /*
589 + * completion statuses for sending a TLB flush message
590 + */
591 +diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c
592 +index 01c0f3e..bebabec 100644
593 +--- a/arch/x86/kernel/cpu/mtrr/main.c
594 ++++ b/arch/x86/kernel/cpu/mtrr/main.c
595 +@@ -793,13 +793,21 @@ void set_mtrr_aps_delayed_init(void)
596 + }
597 +
598 + /*
599 +- * MTRR initialization for all AP's
600 ++ * Delayed MTRR initialization for all AP's
601 + */
602 + void mtrr_aps_init(void)
603 + {
604 + if (!use_intel())
605 + return;
606 +
607 ++ /*
608 ++ * Check if someone has requested the delay of AP MTRR initialization,
609 ++ * by doing set_mtrr_aps_delayed_init(), prior to this point. If not,
610 ++ * then we are done.
611 ++ */
612 ++ if (!mtrr_aps_delayed_init)
613 ++ return;
614 ++
615 + set_mtrr(~0U, 0, 0, 0);
616 + mtrr_aps_delayed_init = false;
617 + }
618 +diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
619 +index 83ec017..e430114 100644
620 +--- a/arch/x86/kernel/irq.c
621 ++++ b/arch/x86/kernel/irq.c
622 +@@ -357,7 +357,8 @@ void fixup_irqs(void)
623 + if (irr & (1 << (vector % 32))) {
624 + irq = __get_cpu_var(vector_irq)[vector];
625 +
626 +- data = irq_get_irq_data(irq);
627 ++ desc = irq_to_desc(irq);
628 ++ data = &desc->irq_data;
629 + raw_spin_lock(&desc->lock);
630 + if (data->chip->irq_retrigger)
631 + data->chip->irq_retrigger(data);
632 +diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
633 +index 57d1868..2502aaf 100644
634 +--- a/arch/x86/kernel/process.c
635 ++++ b/arch/x86/kernel/process.c
636 +@@ -97,21 +97,31 @@ void show_regs(struct pt_regs *regs)
637 +
638 + void show_regs_common(void)
639 + {
640 +- const char *board, *product;
641 ++ const char *vendor, *product, *board;
642 +
643 +- board = dmi_get_system_info(DMI_BOARD_NAME);
644 +- if (!board)
645 +- board = "";
646 ++ vendor = dmi_get_system_info(DMI_SYS_VENDOR);
647 ++ if (!vendor)
648 ++ vendor = "";
649 + product = dmi_get_system_info(DMI_PRODUCT_NAME);
650 + if (!product)
651 + product = "";
652 +
653 ++ /* Board Name is optional */
654 ++ board = dmi_get_system_info(DMI_BOARD_NAME);
655 ++
656 + printk(KERN_CONT "\n");
657 +- printk(KERN_DEFAULT "Pid: %d, comm: %.20s %s %s %.*s %s/%s\n",
658 ++ printk(KERN_DEFAULT "Pid: %d, comm: %.20s %s %s %.*s",
659 + current->pid, current->comm, print_tainted(),
660 + init_utsname()->release,
661 + (int)strcspn(init_utsname()->version, " "),
662 +- init_utsname()->version, board, product);
663 ++ init_utsname()->version);
664 ++ printk(KERN_CONT " ");
665 ++ printk(KERN_CONT "%s %s", vendor, product);
666 ++ if (board) {
667 ++ printk(KERN_CONT "/");
668 ++ printk(KERN_CONT "%s", board);
669 ++ }
670 ++ printk(KERN_CONT "\n");
671 + }
672 +
673 + void flush_thread(void)
674 +diff --git a/arch/x86/platform/uv/tlb_uv.c b/arch/x86/platform/uv/tlb_uv.c
675 +index ba9caa8..df58e9c 100644
676 +--- a/arch/x86/platform/uv/tlb_uv.c
677 ++++ b/arch/x86/platform/uv/tlb_uv.c
678 +@@ -1341,7 +1341,7 @@ uv_activation_descriptor_init(int node, int pnode)
679 +
680 + /*
681 + * each bau_desc is 64 bytes; there are 8 (UV_ITEMS_PER_DESCRIPTOR)
682 +- * per cpu; and up to 32 (UV_ADP_SIZE) cpu's per uvhub
683 ++ * per cpu; and one per cpu on the uvhub (UV_ADP_SIZE)
684 + */
685 + bau_desc = kmalloc_node(sizeof(struct bau_desc) * UV_ADP_SIZE
686 + * UV_ITEMS_PER_DESCRIPTOR, GFP_KERNEL, node);
687 +@@ -1490,7 +1490,7 @@ calculate_destination_timeout(void)
688 + /*
689 + * initialize the bau_control structure for each cpu
690 + */
691 +-static void __init uv_init_per_cpu(int nuvhubs)
692 ++static int __init uv_init_per_cpu(int nuvhubs)
693 + {
694 + int i;
695 + int cpu;
696 +@@ -1507,7 +1507,7 @@ static void __init uv_init_per_cpu(int nuvhubs)
697 + struct bau_control *smaster = NULL;
698 + struct socket_desc {
699 + short num_cpus;
700 +- short cpu_number[16];
701 ++ short cpu_number[MAX_CPUS_PER_SOCKET];
702 + };
703 + struct uvhub_desc {
704 + unsigned short socket_mask;
705 +@@ -1540,6 +1540,10 @@ static void __init uv_init_per_cpu(int nuvhubs)
706 + sdp = &bdp->socket[socket];
707 + sdp->cpu_number[sdp->num_cpus] = cpu;
708 + sdp->num_cpus++;
709 ++ if (sdp->num_cpus > MAX_CPUS_PER_SOCKET) {
710 ++ printk(KERN_EMERG "%d cpus per socket invalid\n", sdp->num_cpus);
711 ++ return 1;
712 ++ }
713 + }
714 + for (uvhub = 0; uvhub < nuvhubs; uvhub++) {
715 + if (!(*(uvhub_mask + (uvhub/8)) & (1 << (uvhub%8))))
716 +@@ -1570,6 +1574,12 @@ static void __init uv_init_per_cpu(int nuvhubs)
717 + bcp->uvhub_master = hmaster;
718 + bcp->uvhub_cpu = uv_cpu_hub_info(cpu)->
719 + blade_processor_id;
720 ++ if (bcp->uvhub_cpu >= MAX_CPUS_PER_UVHUB) {
721 ++ printk(KERN_EMERG
722 ++ "%d cpus per uvhub invalid\n",
723 ++ bcp->uvhub_cpu);
724 ++ return 1;
725 ++ }
726 + }
727 + nextsocket:
728 + socket++;
729 +@@ -1595,6 +1605,7 @@ nextsocket:
730 + bcp->congested_reps = congested_reps;
731 + bcp->congested_period = congested_period;
732 + }
733 ++ return 0;
734 + }
735 +
736 + /*
737 +@@ -1625,7 +1636,10 @@ static int __init uv_bau_init(void)
738 + spin_lock_init(&disable_lock);
739 + congested_cycles = microsec_2_cycles(congested_response_us);
740 +
741 +- uv_init_per_cpu(nuvhubs);
742 ++ if (uv_init_per_cpu(nuvhubs)) {
743 ++ nobau = 1;
744 ++ return 0;
745 ++ }
746 +
747 + uv_partition_base_pnode = 0x7fffffff;
748 + for (uvhub = 0; uvhub < nuvhubs; uvhub++)
749 +diff --git a/block/blk-core.c b/block/blk-core.c
750 +index 4ce953f..8767520 100644
751 +--- a/block/blk-core.c
752 ++++ b/block/blk-core.c
753 +@@ -64,13 +64,27 @@ static void drive_stat_acct(struct request *rq, int new_io)
754 + return;
755 +
756 + cpu = part_stat_lock();
757 +- part = disk_map_sector_rcu(rq->rq_disk, blk_rq_pos(rq));
758 +
759 +- if (!new_io)
760 ++ if (!new_io) {
761 ++ part = rq->part;
762 + part_stat_inc(cpu, part, merges[rw]);
763 +- else {
764 ++ } else {
765 ++ part = disk_map_sector_rcu(rq->rq_disk, blk_rq_pos(rq));
766 ++ if (!kref_test_and_get(&part->ref)) {
767 ++ /*
768 ++ * The partition is already being removed,
769 ++ * the request will be accounted on the disk only
770 ++ *
771 ++ * We take a reference on disk->part0 although that
772 ++ * partition will never be deleted, so we can treat
773 ++ * it as any other partition.
774 ++ */
775 ++ part = &rq->rq_disk->part0;
776 ++ kref_get(&part->ref);
777 ++ }
778 + part_round_stats(cpu, part);
779 + part_inc_in_flight(part, rw);
780 ++ rq->part = part;
781 + }
782 +
783 + part_stat_unlock();
784 +@@ -128,6 +142,7 @@ void blk_rq_init(struct request_queue *q, struct request *rq)
785 + rq->ref_count = 1;
786 + rq->start_time = jiffies;
787 + set_start_time_ns(rq);
788 ++ rq->part = NULL;
789 + }
790 + EXPORT_SYMBOL(blk_rq_init);
791 +
792 +@@ -1776,7 +1791,7 @@ static void blk_account_io_completion(struct request *req, unsigned int bytes)
793 + int cpu;
794 +
795 + cpu = part_stat_lock();
796 +- part = disk_map_sector_rcu(req->rq_disk, blk_rq_pos(req));
797 ++ part = req->part;
798 + part_stat_add(cpu, part, sectors[rw], bytes >> 9);
799 + part_stat_unlock();
800 + }
801 +@@ -1796,13 +1811,14 @@ static void blk_account_io_done(struct request *req)
802 + int cpu;
803 +
804 + cpu = part_stat_lock();
805 +- part = disk_map_sector_rcu(req->rq_disk, blk_rq_pos(req));
806 ++ part = req->part;
807 +
808 + part_stat_inc(cpu, part, ios[rw]);
809 + part_stat_add(cpu, part, ticks[rw], duration);
810 + part_round_stats(cpu, part);
811 + part_dec_in_flight(part, rw);
812 +
813 ++ kref_put(&part->ref, __delete_partition);
814 + part_stat_unlock();
815 + }
816 + }
817 +diff --git a/block/blk-merge.c b/block/blk-merge.c
818 +index 74bc4a7..23ea74b 100644
819 +--- a/block/blk-merge.c
820 ++++ b/block/blk-merge.c
821 +@@ -351,11 +351,12 @@ static void blk_account_io_merge(struct request *req)
822 + int cpu;
823 +
824 + cpu = part_stat_lock();
825 +- part = disk_map_sector_rcu(req->rq_disk, blk_rq_pos(req));
826 ++ part = req->part;
827 +
828 + part_round_stats(cpu, part);
829 + part_dec_in_flight(part, rq_data_dir(req));
830 +
831 ++ kref_put(&part->ref, __delete_partition);
832 + part_stat_unlock();
833 + }
834 + }
835 +diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
836 +index 4cd59b0..6f2a966 100644
837 +--- a/block/cfq-iosched.c
838 ++++ b/block/cfq-iosched.c
839 +@@ -3412,6 +3412,10 @@ static bool cfq_should_wait_busy(struct cfq_data *cfqd, struct cfq_queue *cfqq)
840 + {
841 + struct cfq_io_context *cic = cfqd->active_cic;
842 +
843 ++ /* If the queue already has requests, don't wait */
844 ++ if (!RB_EMPTY_ROOT(&cfqq->sort_list))
845 ++ return false;
846 ++
847 + /* If there are other queues in the group, don't wait */
848 + if (cfqq->cfqg->nr_cfqq > 1)
849 + return false;
850 +diff --git a/block/genhd.c b/block/genhd.c
851 +index 5fa2b44..0c55eae 100644
852 +--- a/block/genhd.c
853 ++++ b/block/genhd.c
854 +@@ -1192,6 +1192,7 @@ struct gendisk *alloc_disk_node(int minors, int node_id)
855 + return NULL;
856 + }
857 + disk->part_tbl->part[0] = &disk->part0;
858 ++ kref_init(&disk->part0.ref);
859 +
860 + disk->minors = minors;
861 + rand_initialize_disk(disk);
862 +diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
863 +index 66aa4be..f03ea29 100644
864 +--- a/drivers/ata/libata-scsi.c
865 ++++ b/drivers/ata/libata-scsi.c
866 +@@ -1102,9 +1102,9 @@ static int ata_scsi_dev_config(struct scsi_device *sdev,
867 + struct request_queue *q = sdev->request_queue;
868 + void *buf;
869 +
870 +- /* set the min alignment and padding */
871 +- blk_queue_update_dma_alignment(sdev->request_queue,
872 +- ATA_DMA_PAD_SZ - 1);
873 ++ sdev->sector_size = ATA_SECT_SIZE;
874 ++
875 ++ /* set DMA padding */
876 + blk_queue_update_dma_pad(sdev->request_queue,
877 + ATA_DMA_PAD_SZ - 1);
878 +
879 +@@ -1118,13 +1118,25 @@ static int ata_scsi_dev_config(struct scsi_device *sdev,
880 +
881 + blk_queue_dma_drain(q, atapi_drain_needed, buf, ATAPI_MAX_DRAIN);
882 + } else {
883 +- /* ATA devices must be sector aligned */
884 + sdev->sector_size = ata_id_logical_sector_size(dev->id);
885 +- blk_queue_update_dma_alignment(sdev->request_queue,
886 +- sdev->sector_size - 1);
887 + sdev->manage_start_stop = 1;
888 + }
889 +
890 ++ /*
891 ++ * ata_pio_sectors() expects buffer for each sector to not cross
892 ++ * page boundary. Enforce it by requiring buffers to be sector
893 ++ * aligned, which works iff sector_size is not larger than
894 ++ * PAGE_SIZE. ATAPI devices also need the alignment as
895 ++ * IDENTIFY_PACKET is executed as ATA_PROT_PIO.
896 ++ */
897 ++ if (sdev->sector_size > PAGE_SIZE)
898 ++ ata_dev_printk(dev, KERN_WARNING,
899 ++ "sector_size=%u > PAGE_SIZE, PIO may malfunction\n",
900 ++ sdev->sector_size);
901 ++
902 ++ blk_queue_update_dma_alignment(sdev->request_queue,
903 ++ sdev->sector_size - 1);
904 ++
905 + if (dev->flags & ATA_DFLAG_AN)
906 + set_bit(SDEV_EVT_MEDIA_CHANGE, sdev->supported_events);
907 +
908 +diff --git a/drivers/ata/pata_mpc52xx.c b/drivers/ata/pata_mpc52xx.c
909 +index 8cc536e..d7d8026 100644
910 +--- a/drivers/ata/pata_mpc52xx.c
911 ++++ b/drivers/ata/pata_mpc52xx.c
912 +@@ -610,7 +610,7 @@ static struct scsi_host_template mpc52xx_ata_sht = {
913 + };
914 +
915 + static struct ata_port_operations mpc52xx_ata_port_ops = {
916 +- .inherits = &ata_sff_port_ops,
917 ++ .inherits = &ata_bmdma_port_ops,
918 + .sff_dev_select = mpc52xx_ata_dev_select,
919 + .set_piomode = mpc52xx_ata_set_piomode,
920 + .set_dmamode = mpc52xx_ata_set_dmamode,
921 +diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
922 +index 02c652b..6b2d409 100644
923 +--- a/drivers/base/power/runtime.c
924 ++++ b/drivers/base/power/runtime.c
925 +@@ -404,12 +404,15 @@ static int rpm_suspend(struct device *dev, int rpmflags)
926 + goto out;
927 + }
928 +
929 ++ /* Maybe the parent is now able to suspend. */
930 + if (parent && !parent->power.ignore_children) {
931 +- spin_unlock_irq(&dev->power.lock);
932 ++ spin_unlock(&dev->power.lock);
933 +
934 +- pm_request_idle(parent);
935 ++ spin_lock(&parent->power.lock);
936 ++ rpm_idle(parent, RPM_ASYNC);
937 ++ spin_unlock(&parent->power.lock);
938 +
939 +- spin_lock_irq(&dev->power.lock);
940 ++ spin_lock(&dev->power.lock);
941 + }
942 +
943 + out:
944 +diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
945 +index a32fb41..e6fc716 100644
946 +--- a/drivers/block/nbd.c
947 ++++ b/drivers/block/nbd.c
948 +@@ -53,7 +53,6 @@
949 + #define DBG_BLKDEV 0x0100
950 + #define DBG_RX 0x0200
951 + #define DBG_TX 0x0400
952 +-static DEFINE_MUTEX(nbd_mutex);
953 + static unsigned int debugflags;
954 + #endif /* NDEBUG */
955 +
956 +@@ -718,11 +717,9 @@ static int nbd_ioctl(struct block_device *bdev, fmode_t mode,
957 + dprintk(DBG_IOCTL, "%s: nbd_ioctl cmd=%s(0x%x) arg=%lu\n",
958 + lo->disk->disk_name, ioctl_cmd_to_ascii(cmd), cmd, arg);
959 +
960 +- mutex_lock(&nbd_mutex);
961 + mutex_lock(&lo->tx_lock);
962 + error = __nbd_ioctl(bdev, lo, cmd, arg);
963 + mutex_unlock(&lo->tx_lock);
964 +- mutex_unlock(&nbd_mutex);
965 +
966 + return error;
967 + }
968 +diff --git a/drivers/char/agp/intel-agp.c b/drivers/char/agp/intel-agp.c
969 +index e72f49d..2c9dca9 100644
970 +--- a/drivers/char/agp/intel-agp.c
971 ++++ b/drivers/char/agp/intel-agp.c
972 +@@ -774,20 +774,14 @@ static int __devinit agp_intel_probe(struct pci_dev *pdev,
973 + dev_info(&pdev->dev, "Intel %s Chipset\n", intel_agp_chipsets[i].name);
974 +
975 + /*
976 +- * If the device has not been properly setup, the following will catch
977 +- * the problem and should stop the system from crashing.
978 +- * 20030610 - hamish@×××.org
979 +- */
980 +- if (pci_enable_device(pdev)) {
981 +- dev_err(&pdev->dev, "can't enable PCI device\n");
982 +- agp_put_bridge(bridge);
983 +- return -ENODEV;
984 +- }
985 +-
986 +- /*
987 + * The following fixes the case where the BIOS has "forgotten" to
988 + * provide an address range for the GART.
989 + * 20030610 - hamish@×××.org
990 ++ * This happens before pci_enable_device() intentionally;
991 ++ * calling pci_enable_device() before assigning the resource
992 ++ * will result in the GART being disabled on machines with such
993 ++ * BIOSs (the GART ends up with a BAR starting at 0, which
994 ++ * conflicts a lot of other devices).
995 + */
996 + r = &pdev->resource[0];
997 + if (!r->start && r->end) {
998 +@@ -798,6 +792,17 @@ static int __devinit agp_intel_probe(struct pci_dev *pdev,
999 + }
1000 + }
1001 +
1002 ++ /*
1003 ++ * If the device has not been properly setup, the following will catch
1004 ++ * the problem and should stop the system from crashing.
1005 ++ * 20030610 - hamish@×××.org
1006 ++ */
1007 ++ if (pci_enable_device(pdev)) {
1008 ++ dev_err(&pdev->dev, "can't enable PCI device\n");
1009 ++ agp_put_bridge(bridge);
1010 ++ return -ENODEV;
1011 ++ }
1012 ++
1013 + /* Fill in the mode register */
1014 + if (cap_ptr) {
1015 + pci_read_config_dword(pdev,
1016 +diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
1017 +index 035da9e..2689ddb 100644
1018 +--- a/drivers/char/ipmi/ipmi_si_intf.c
1019 ++++ b/drivers/char/ipmi/ipmi_si_intf.c
1020 +@@ -320,6 +320,7 @@ static int unload_when_empty = 1;
1021 + static int add_smi(struct smi_info *smi);
1022 + static int try_smi_init(struct smi_info *smi);
1023 + static void cleanup_one_si(struct smi_info *to_clean);
1024 ++static void cleanup_ipmi_si(void);
1025 +
1026 + static ATOMIC_NOTIFIER_HEAD(xaction_notifier_list);
1027 + static int register_xaction_notifier(struct notifier_block *nb)
1028 +@@ -3435,16 +3436,7 @@ static int __devinit init_ipmi_si(void)
1029 + mutex_lock(&smi_infos_lock);
1030 + if (unload_when_empty && list_empty(&smi_infos)) {
1031 + mutex_unlock(&smi_infos_lock);
1032 +-#ifdef CONFIG_PCI
1033 +- if (pci_registered)
1034 +- pci_unregister_driver(&ipmi_pci_driver);
1035 +-#endif
1036 +-
1037 +-#ifdef CONFIG_PPC_OF
1038 +- if (of_registered)
1039 +- of_unregister_platform_driver(&ipmi_of_platform_driver);
1040 +-#endif
1041 +- driver_unregister(&ipmi_driver.driver);
1042 ++ cleanup_ipmi_si();
1043 + printk(KERN_WARNING PFX
1044 + "Unable to find any System Interface(s)\n");
1045 + return -ENODEV;
1046 +diff --git a/drivers/char/tpm/tpm.c b/drivers/char/tpm/tpm.c
1047 +index 7c41335..55d0466 100644
1048 +--- a/drivers/char/tpm/tpm.c
1049 ++++ b/drivers/char/tpm/tpm.c
1050 +@@ -364,12 +364,14 @@ unsigned long tpm_calc_ordinal_duration(struct tpm_chip *chip,
1051 + tpm_protected_ordinal_duration[ordinal &
1052 + TPM_PROTECTED_ORDINAL_MASK];
1053 +
1054 +- if (duration_idx != TPM_UNDEFINED)
1055 ++ if (duration_idx != TPM_UNDEFINED) {
1056 + duration = chip->vendor.duration[duration_idx];
1057 +- if (duration <= 0)
1058 ++ /* if duration is 0, it's because chip->vendor.duration wasn't */
1059 ++ /* filled yet, so we set the lowest timeout just to give enough */
1060 ++ /* time for tpm_get_timeouts() to succeed */
1061 ++ return (duration <= 0 ? HZ : duration);
1062 ++ } else
1063 + return 2 * 60 * HZ;
1064 +- else
1065 +- return duration;
1066 + }
1067 + EXPORT_SYMBOL_GPL(tpm_calc_ordinal_duration);
1068 +
1069 +@@ -575,9 +577,11 @@ duration:
1070 + if (rc)
1071 + return;
1072 +
1073 +- if (be32_to_cpu(tpm_cmd.header.out.return_code)
1074 +- != 3 * sizeof(u32))
1075 ++ if (be32_to_cpu(tpm_cmd.header.out.return_code) != 0 ||
1076 ++ be32_to_cpu(tpm_cmd.header.out.length)
1077 ++ != sizeof(tpm_cmd.header.out) + sizeof(u32) + 3 * sizeof(u32))
1078 + return;
1079 ++
1080 + duration_cap = &tpm_cmd.params.getcap_out.cap.duration;
1081 + chip->vendor.duration[TPM_SHORT] =
1082 + usecs_to_jiffies(be32_to_cpu(duration_cap->tpm_short));
1083 +@@ -921,6 +925,18 @@ ssize_t tpm_show_caps_1_2(struct device * dev,
1084 + }
1085 + EXPORT_SYMBOL_GPL(tpm_show_caps_1_2);
1086 +
1087 ++ssize_t tpm_show_timeouts(struct device *dev, struct device_attribute *attr,
1088 ++ char *buf)
1089 ++{
1090 ++ struct tpm_chip *chip = dev_get_drvdata(dev);
1091 ++
1092 ++ return sprintf(buf, "%d %d %d\n",
1093 ++ jiffies_to_usecs(chip->vendor.duration[TPM_SHORT]),
1094 ++ jiffies_to_usecs(chip->vendor.duration[TPM_MEDIUM]),
1095 ++ jiffies_to_usecs(chip->vendor.duration[TPM_LONG]));
1096 ++}
1097 ++EXPORT_SYMBOL_GPL(tpm_show_timeouts);
1098 ++
1099 + ssize_t tpm_store_cancel(struct device *dev, struct device_attribute *attr,
1100 + const char *buf, size_t count)
1101 + {
1102 +diff --git a/drivers/char/tpm/tpm.h b/drivers/char/tpm/tpm.h
1103 +index 792868d..ba1779c 100644
1104 +--- a/drivers/char/tpm/tpm.h
1105 ++++ b/drivers/char/tpm/tpm.h
1106 +@@ -56,6 +56,8 @@ extern ssize_t tpm_show_owned(struct device *, struct device_attribute *attr,
1107 + char *);
1108 + extern ssize_t tpm_show_temp_deactivated(struct device *,
1109 + struct device_attribute *attr, char *);
1110 ++extern ssize_t tpm_show_timeouts(struct device *,
1111 ++ struct device_attribute *attr, char *);
1112 +
1113 + struct tpm_chip;
1114 +
1115 +diff --git a/drivers/char/tpm/tpm_tis.c b/drivers/char/tpm/tpm_tis.c
1116 +index c17a305..0d1d38e 100644
1117 +--- a/drivers/char/tpm/tpm_tis.c
1118 ++++ b/drivers/char/tpm/tpm_tis.c
1119 +@@ -376,6 +376,7 @@ static DEVICE_ATTR(temp_deactivated, S_IRUGO, tpm_show_temp_deactivated,
1120 + NULL);
1121 + static DEVICE_ATTR(caps, S_IRUGO, tpm_show_caps_1_2, NULL);
1122 + static DEVICE_ATTR(cancel, S_IWUSR | S_IWGRP, NULL, tpm_store_cancel);
1123 ++static DEVICE_ATTR(timeouts, S_IRUGO, tpm_show_timeouts, NULL);
1124 +
1125 + static struct attribute *tis_attrs[] = {
1126 + &dev_attr_pubek.attr,
1127 +@@ -385,7 +386,8 @@ static struct attribute *tis_attrs[] = {
1128 + &dev_attr_owned.attr,
1129 + &dev_attr_temp_deactivated.attr,
1130 + &dev_attr_caps.attr,
1131 +- &dev_attr_cancel.attr, NULL,
1132 ++ &dev_attr_cancel.attr,
1133 ++ &dev_attr_timeouts.attr, NULL,
1134 + };
1135 +
1136 + static struct attribute_group tis_attr_grp = {
1137 +@@ -493,9 +495,6 @@ static int tpm_tis_init(struct device *dev, resource_size_t start,
1138 + "1.2 TPM (device-id 0x%X, rev-id %d)\n",
1139 + vendor >> 16, ioread8(chip->vendor.iobase + TPM_RID(0)));
1140 +
1141 +- if (is_itpm(to_pnp_dev(dev)))
1142 +- itpm = 1;
1143 +-
1144 + if (itpm)
1145 + dev_info(dev, "Intel iTPM workaround enabled\n");
1146 +
1147 +@@ -637,6 +636,9 @@ static int __devinit tpm_tis_pnp_init(struct pnp_dev *pnp_dev,
1148 + else
1149 + interrupts = 0;
1150 +
1151 ++ if (is_itpm(pnp_dev))
1152 ++ itpm = 1;
1153 ++
1154 + return tpm_tis_init(&pnp_dev->dev, start, len, irq);
1155 + }
1156 +
1157 +diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
1158 +index 896a2ce..ad2520b 100644
1159 +--- a/drivers/char/virtio_console.c
1160 ++++ b/drivers/char/virtio_console.c
1161 +@@ -1462,6 +1462,17 @@ static void control_work_handler(struct work_struct *work)
1162 + spin_unlock(&portdev->cvq_lock);
1163 + }
1164 +
1165 ++static void out_intr(struct virtqueue *vq)
1166 ++{
1167 ++ struct port *port;
1168 ++
1169 ++ port = find_port_by_vq(vq->vdev->priv, vq);
1170 ++ if (!port)
1171 ++ return;
1172 ++
1173 ++ wake_up_interruptible(&port->waitqueue);
1174 ++}
1175 ++
1176 + static void in_intr(struct virtqueue *vq)
1177 + {
1178 + struct port *port;
1179 +@@ -1566,7 +1577,7 @@ static int init_vqs(struct ports_device *portdev)
1180 + */
1181 + j = 0;
1182 + io_callbacks[j] = in_intr;
1183 +- io_callbacks[j + 1] = NULL;
1184 ++ io_callbacks[j + 1] = out_intr;
1185 + io_names[j] = "input";
1186 + io_names[j + 1] = "output";
1187 + j += 2;
1188 +@@ -1580,7 +1591,7 @@ static int init_vqs(struct ports_device *portdev)
1189 + for (i = 1; i < nr_ports; i++) {
1190 + j += 2;
1191 + io_callbacks[j] = in_intr;
1192 +- io_callbacks[j + 1] = NULL;
1193 ++ io_callbacks[j + 1] = out_intr;
1194 + io_names[j] = "input";
1195 + io_names[j + 1] = "output";
1196 + }
1197 +diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c
1198 +index a507108..97df791 100644
1199 +--- a/drivers/cpuidle/cpuidle.c
1200 ++++ b/drivers/cpuidle/cpuidle.c
1201 +@@ -154,6 +154,45 @@ void cpuidle_resume_and_unlock(void)
1202 +
1203 + EXPORT_SYMBOL_GPL(cpuidle_resume_and_unlock);
1204 +
1205 ++#ifdef CONFIG_ARCH_HAS_CPU_RELAX
1206 ++static int poll_idle(struct cpuidle_device *dev, struct cpuidle_state *st)
1207 ++{
1208 ++ ktime_t t1, t2;
1209 ++ s64 diff;
1210 ++ int ret;
1211 ++
1212 ++ t1 = ktime_get();
1213 ++ local_irq_enable();
1214 ++ while (!need_resched())
1215 ++ cpu_relax();
1216 ++
1217 ++ t2 = ktime_get();
1218 ++ diff = ktime_to_us(ktime_sub(t2, t1));
1219 ++ if (diff > INT_MAX)
1220 ++ diff = INT_MAX;
1221 ++
1222 ++ ret = (int) diff;
1223 ++ return ret;
1224 ++}
1225 ++
1226 ++static void poll_idle_init(struct cpuidle_device *dev)
1227 ++{
1228 ++ struct cpuidle_state *state = &dev->states[0];
1229 ++
1230 ++ cpuidle_set_statedata(state, NULL);
1231 ++
1232 ++ snprintf(state->name, CPUIDLE_NAME_LEN, "C0");
1233 ++ snprintf(state->desc, CPUIDLE_DESC_LEN, "CPUIDLE CORE POLL IDLE");
1234 ++ state->exit_latency = 0;
1235 ++ state->target_residency = 0;
1236 ++ state->power_usage = -1;
1237 ++ state->flags = CPUIDLE_FLAG_POLL;
1238 ++ state->enter = poll_idle;
1239 ++}
1240 ++#else
1241 ++static void poll_idle_init(struct cpuidle_device *dev) {}
1242 ++#endif /* CONFIG_ARCH_HAS_CPU_RELAX */
1243 ++
1244 + /**
1245 + * cpuidle_enable_device - enables idle PM for a CPU
1246 + * @dev: the CPU
1247 +@@ -178,6 +217,8 @@ int cpuidle_enable_device(struct cpuidle_device *dev)
1248 + return ret;
1249 + }
1250 +
1251 ++ poll_idle_init(dev);
1252 ++
1253 + if ((ret = cpuidle_add_state_sysfs(dev)))
1254 + return ret;
1255 +
1256 +@@ -232,45 +273,6 @@ void cpuidle_disable_device(struct cpuidle_device *dev)
1257 +
1258 + EXPORT_SYMBOL_GPL(cpuidle_disable_device);
1259 +
1260 +-#ifdef CONFIG_ARCH_HAS_CPU_RELAX
1261 +-static int poll_idle(struct cpuidle_device *dev, struct cpuidle_state *st)
1262 +-{
1263 +- ktime_t t1, t2;
1264 +- s64 diff;
1265 +- int ret;
1266 +-
1267 +- t1 = ktime_get();
1268 +- local_irq_enable();
1269 +- while (!need_resched())
1270 +- cpu_relax();
1271 +-
1272 +- t2 = ktime_get();
1273 +- diff = ktime_to_us(ktime_sub(t2, t1));
1274 +- if (diff > INT_MAX)
1275 +- diff = INT_MAX;
1276 +-
1277 +- ret = (int) diff;
1278 +- return ret;
1279 +-}
1280 +-
1281 +-static void poll_idle_init(struct cpuidle_device *dev)
1282 +-{
1283 +- struct cpuidle_state *state = &dev->states[0];
1284 +-
1285 +- cpuidle_set_statedata(state, NULL);
1286 +-
1287 +- snprintf(state->name, CPUIDLE_NAME_LEN, "C0");
1288 +- snprintf(state->desc, CPUIDLE_DESC_LEN, "CPUIDLE CORE POLL IDLE");
1289 +- state->exit_latency = 0;
1290 +- state->target_residency = 0;
1291 +- state->power_usage = -1;
1292 +- state->flags = CPUIDLE_FLAG_POLL;
1293 +- state->enter = poll_idle;
1294 +-}
1295 +-#else
1296 +-static void poll_idle_init(struct cpuidle_device *dev) {}
1297 +-#endif /* CONFIG_ARCH_HAS_CPU_RELAX */
1298 +-
1299 + /**
1300 + * __cpuidle_register_device - internal register function called before register
1301 + * and enable routines
1302 +@@ -291,8 +293,6 @@ static int __cpuidle_register_device(struct cpuidle_device *dev)
1303 +
1304 + init_completion(&dev->kobj_unregister);
1305 +
1306 +- poll_idle_init(dev);
1307 +-
1308 + /*
1309 + * cpuidle driver should set the dev->power_specified bit
1310 + * before registering the device if the driver provides
1311 +diff --git a/drivers/firewire/core-card.c b/drivers/firewire/core-card.c
1312 +index be04923..24ff355 100644
1313 +--- a/drivers/firewire/core-card.c
1314 ++++ b/drivers/firewire/core-card.c
1315 +@@ -75,6 +75,8 @@ static size_t config_rom_length = 1 + 4 + 1 + 1;
1316 + #define BIB_IRMC ((1) << 31)
1317 + #define NODE_CAPABILITIES 0x0c0083c0 /* per IEEE 1394 clause 8.3.2.6.5.2 */
1318 +
1319 ++#define CANON_OUI 0x000085
1320 ++
1321 + static void generate_config_rom(struct fw_card *card, __be32 *config_rom)
1322 + {
1323 + struct fw_descriptor *desc;
1324 +@@ -284,6 +286,7 @@ static void bm_work(struct work_struct *work)
1325 + bool root_device_is_running;
1326 + bool root_device_is_cmc;
1327 + bool irm_is_1394_1995_only;
1328 ++ bool keep_this_irm;
1329 +
1330 + spin_lock_irq(&card->lock);
1331 +
1332 +@@ -305,6 +308,10 @@ static void bm_work(struct work_struct *work)
1333 + irm_is_1394_1995_only = irm_device && irm_device->config_rom &&
1334 + (irm_device->config_rom[2] & 0x000000f0) == 0;
1335 +
1336 ++ /* Canon MV5i works unreliably if it is not root node. */
1337 ++ keep_this_irm = irm_device && irm_device->config_rom &&
1338 ++ irm_device->config_rom[3] >> 8 == CANON_OUI;
1339 ++
1340 + root_id = root_node->node_id;
1341 + irm_id = card->irm_node->node_id;
1342 + local_id = card->local_node->node_id;
1343 +@@ -333,7 +340,7 @@ static void bm_work(struct work_struct *work)
1344 + goto pick_me;
1345 + }
1346 +
1347 +- if (irm_is_1394_1995_only) {
1348 ++ if (irm_is_1394_1995_only && !keep_this_irm) {
1349 + new_root_id = local_id;
1350 + fw_notify("%s, making local node (%02x) root.\n",
1351 + "IRM is not 1394a compliant", new_root_id);
1352 +@@ -382,7 +389,7 @@ static void bm_work(struct work_struct *work)
1353 +
1354 + spin_lock_irq(&card->lock);
1355 +
1356 +- if (rcode != RCODE_COMPLETE) {
1357 ++ if (rcode != RCODE_COMPLETE && !keep_this_irm) {
1358 + /*
1359 + * The lock request failed, maybe the IRM
1360 + * isn't really IRM capable after all. Let's
1361 +diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
1362 +index e28e41668..bcb1126 100644
1363 +--- a/drivers/firmware/dmi_scan.c
1364 ++++ b/drivers/firmware/dmi_scan.c
1365 +@@ -378,10 +378,17 @@ static void __init print_filtered(const char *info)
1366 +
1367 + static void __init dmi_dump_ids(void)
1368 + {
1369 ++ const char *board; /* Board Name is optional */
1370 ++
1371 + printk(KERN_DEBUG "DMI: ");
1372 +- print_filtered(dmi_get_system_info(DMI_BOARD_NAME));
1373 +- printk(KERN_CONT "/");
1374 ++ print_filtered(dmi_get_system_info(DMI_SYS_VENDOR));
1375 ++ printk(KERN_CONT " ");
1376 + print_filtered(dmi_get_system_info(DMI_PRODUCT_NAME));
1377 ++ board = dmi_get_system_info(DMI_BOARD_NAME);
1378 ++ if (board) {
1379 ++ printk(KERN_CONT "/");
1380 ++ print_filtered(board);
1381 ++ }
1382 + printk(KERN_CONT ", BIOS ");
1383 + print_filtered(dmi_get_system_info(DMI_BIOS_VERSION));
1384 + printk(KERN_CONT " ");
1385 +diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
1386 +index 7af4436..7ea4bdd 100644
1387 +--- a/drivers/gpu/drm/Kconfig
1388 ++++ b/drivers/gpu/drm/Kconfig
1389 +@@ -100,7 +100,10 @@ config DRM_I830
1390 + config DRM_I915
1391 + tristate "i915 driver"
1392 + depends on AGP_INTEL
1393 ++ # we need shmfs for the swappable backing store, and in particular
1394 ++ # the shmem_readpage() which depends upon tmpfs
1395 + select SHMEM
1396 ++ select TMPFS
1397 + select DRM_KMS_HELPER
1398 + select FB_CFB_FILLRECT
1399 + select FB_CFB_COPYAREA
1400 +diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
1401 +index 2d4e17a..a0ce53d 100644
1402 +--- a/drivers/gpu/drm/drm_crtc_helper.c
1403 ++++ b/drivers/gpu/drm/drm_crtc_helper.c
1404 +@@ -650,9 +650,16 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
1405 + old_fb)) {
1406 + DRM_ERROR("failed to set mode on [CRTC:%d]\n",
1407 + set->crtc->base.id);
1408 ++ set->crtc->fb = old_fb;
1409 + ret = -EINVAL;
1410 + goto fail;
1411 + }
1412 ++ DRM_DEBUG_KMS("Setting connector DPMS state to on\n");
1413 ++ for (i = 0; i < set->num_connectors; i++) {
1414 ++ DRM_DEBUG_KMS("\t[CONNECTOR:%d:%s] set DPMS on\n", set->connectors[i]->base.id,
1415 ++ drm_get_connector_name(set->connectors[i]));
1416 ++ set->connectors[i]->dpms = DRM_MODE_DPMS_ON;
1417 ++ }
1418 + }
1419 + drm_helper_disable_unused_functions(dev);
1420 + } else if (fb_changed) {
1421 +@@ -664,14 +671,10 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
1422 + set->crtc->fb = set->fb;
1423 + ret = crtc_funcs->mode_set_base(set->crtc,
1424 + set->x, set->y, old_fb);
1425 +- if (ret != 0)
1426 ++ if (ret != 0) {
1427 ++ set->crtc->fb = old_fb;
1428 + goto fail;
1429 +- }
1430 +- DRM_DEBUG_KMS("Setting connector DPMS state to on\n");
1431 +- for (i = 0; i < set->num_connectors; i++) {
1432 +- DRM_DEBUG_KMS("\t[CONNECTOR:%d:%s] set DPMS on\n", set->connectors[i]->base.id,
1433 +- drm_get_connector_name(set->connectors[i]));
1434 +- set->connectors[i]->dpms = DRM_MODE_DPMS_ON;
1435 ++ }
1436 + }
1437 +
1438 + kfree(save_connectors);
1439 +diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
1440 +index cb900dc..4916c10 100644
1441 +--- a/drivers/gpu/drm/i915/i915_dma.c
1442 ++++ b/drivers/gpu/drm/i915/i915_dma.c
1443 +@@ -1243,9 +1243,15 @@ static int i915_load_modeset_init(struct drm_device *dev,
1444 + if (ret)
1445 + DRM_INFO("failed to find VBIOS tables\n");
1446 +
1447 +- /* if we have > 1 VGA cards, then disable the radeon VGA resources */
1448 ++ /* If we have > 1 VGA cards, then we need to arbitrate access
1449 ++ * to the common VGA resources.
1450 ++ *
1451 ++ * If we are a secondary display controller (!PCI_DISPLAY_CLASS_VGA),
1452 ++ * then we do not take part in VGA arbitration and the
1453 ++ * vga_client_register() fails with -ENODEV.
1454 ++ */
1455 + ret = vga_client_register(dev->pdev, dev, NULL, i915_vga_set_decode);
1456 +- if (ret)
1457 ++ if (ret && ret != -ENODEV)
1458 + goto cleanup_ringbuffer;
1459 +
1460 + intel_register_dsm_handler();
1461 +diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
1462 +index f737960..da769bc 100644
1463 +--- a/drivers/gpu/drm/i915/i915_drv.c
1464 ++++ b/drivers/gpu/drm/i915/i915_drv.c
1465 +@@ -54,7 +54,7 @@ extern int intel_agp_enabled;
1466 +
1467 + #define INTEL_VGA_DEVICE(id, info) { \
1468 + .class = PCI_CLASS_DISPLAY_VGA << 8, \
1469 +- .class_mask = 0xffff00, \
1470 ++ .class_mask = 0xff0000, \
1471 + .vendor = 0x8086, \
1472 + .device = id, \
1473 + .subvendor = PCI_ANY_ID, \
1474 +@@ -501,6 +501,14 @@ int i915_reset(struct drm_device *dev, u8 flags)
1475 + static int __devinit
1476 + i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1477 + {
1478 ++ /* Only bind to function 0 of the device. Early generations
1479 ++ * used function 1 as a placeholder for multi-head. This causes
1480 ++ * us confusion instead, especially on the systems where both
1481 ++ * functions have the same PCI-ID!
1482 ++ */
1483 ++ if (PCI_FUNC(pdev->devfn))
1484 ++ return -ENODEV;
1485 ++
1486 + return drm_get_pci_dev(pdev, ent, &driver);
1487 + }
1488 +
1489 +diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
1490 +index 409826d..d2896eb 100644
1491 +--- a/drivers/gpu/drm/i915/i915_drv.h
1492 ++++ b/drivers/gpu/drm/i915/i915_drv.h
1493 +@@ -329,6 +329,7 @@ typedef struct drm_i915_private {
1494 +
1495 + /* LVDS info */
1496 + int backlight_level; /* restore backlight to this value */
1497 ++ bool backlight_enabled;
1498 + struct drm_display_mode *panel_fixed_mode;
1499 + struct drm_display_mode *lfp_lvds_vbt_mode; /* if any */
1500 + struct drm_display_mode *sdvo_lvds_vbt_mode; /* if any */
1501 +diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
1502 +index cb8f434..0a1b276 100644
1503 +--- a/drivers/gpu/drm/i915/i915_reg.h
1504 ++++ b/drivers/gpu/drm/i915/i915_reg.h
1505 +@@ -3057,10 +3057,11 @@
1506 + #define EDP_LINK_TRAIN_600MV_3_5DB_SNB_A (0x01<<22)
1507 + #define EDP_LINK_TRAIN_800MV_0DB_SNB_A (0x0<<22)
1508 + /* SNB B-stepping */
1509 +-#define EDP_LINK_TRAIN_400MV_0DB_SNB_B (0x0<<22)
1510 +-#define EDP_LINK_TRAIN_400MV_6DB_SNB_B (0x3a<<22)
1511 +-#define EDP_LINK_TRAIN_600MV_3_5DB_SNB_B (0x39<<22)
1512 +-#define EDP_LINK_TRAIN_800MV_0DB_SNB_B (0x38<<22)
1513 ++#define EDP_LINK_TRAIN_400_600MV_0DB_SNB_B (0x0<<22)
1514 ++#define EDP_LINK_TRAIN_400MV_3_5DB_SNB_B (0x1<<22)
1515 ++#define EDP_LINK_TRAIN_400_600MV_6DB_SNB_B (0x3a<<22)
1516 ++#define EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B (0x39<<22)
1517 ++#define EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B (0x38<<22)
1518 + #define EDP_LINK_TRAIN_VOL_EMP_MASK_SNB (0x3f<<22)
1519 +
1520 + #endif /* _I915_REG_H_ */
1521 +diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
1522 +index 8df5743..17035b8 100644
1523 +--- a/drivers/gpu/drm/i915/intel_crt.c
1524 ++++ b/drivers/gpu/drm/i915/intel_crt.c
1525 +@@ -30,6 +30,7 @@
1526 + #include "drm.h"
1527 + #include "drm_crtc.h"
1528 + #include "drm_crtc_helper.h"
1529 ++#include "drm_edid.h"
1530 + #include "intel_drv.h"
1531 + #include "i915_drm.h"
1532 + #include "i915_drv.h"
1533 +@@ -287,8 +288,9 @@ static bool intel_crt_ddc_probe(struct drm_i915_private *dev_priv, int ddc_bus)
1534 + return i2c_transfer(&dev_priv->gmbus[ddc_bus].adapter, msgs, 1) == 1;
1535 + }
1536 +
1537 +-static bool intel_crt_detect_ddc(struct intel_crt *crt)
1538 ++static bool intel_crt_detect_ddc(struct drm_connector *connector)
1539 + {
1540 ++ struct intel_crt *crt = intel_attached_crt(connector);
1541 + struct drm_i915_private *dev_priv = crt->base.base.dev->dev_private;
1542 +
1543 + /* CRT should always be at 0, but check anyway */
1544 +@@ -301,8 +303,26 @@ static bool intel_crt_detect_ddc(struct intel_crt *crt)
1545 + }
1546 +
1547 + if (intel_ddc_probe(&crt->base, dev_priv->crt_ddc_pin)) {
1548 +- DRM_DEBUG_KMS("CRT detected via DDC:0x50 [EDID]\n");
1549 +- return true;
1550 ++ struct edid *edid;
1551 ++ bool is_digital = false;
1552 ++
1553 ++ edid = drm_get_edid(connector,
1554 ++ &dev_priv->gmbus[dev_priv->crt_ddc_pin].adapter);
1555 ++ /*
1556 ++ * This may be a DVI-I connector with a shared DDC
1557 ++ * link between analog and digital outputs, so we
1558 ++ * have to check the EDID input spec of the attached device.
1559 ++ */
1560 ++ if (edid != NULL) {
1561 ++ is_digital = edid->input & DRM_EDID_INPUT_DIGITAL;
1562 ++ connector->display_info.raw_edid = NULL;
1563 ++ kfree(edid);
1564 ++ }
1565 ++
1566 ++ if (!is_digital) {
1567 ++ DRM_DEBUG_KMS("CRT detected via DDC:0x50 [EDID]\n");
1568 ++ return true;
1569 ++ }
1570 + }
1571 +
1572 + return false;
1573 +@@ -458,7 +478,7 @@ intel_crt_detect(struct drm_connector *connector, bool force)
1574 + }
1575 + }
1576 +
1577 +- if (intel_crt_detect_ddc(crt))
1578 ++ if (intel_crt_detect_ddc(connector))
1579 + return connector_status_connected;
1580 +
1581 + if (!force)
1582 +@@ -472,7 +492,7 @@ intel_crt_detect(struct drm_connector *connector, bool force)
1583 + crtc = intel_get_load_detect_pipe(&crt->base, connector,
1584 + NULL, &dpms_mode);
1585 + if (crtc) {
1586 +- if (intel_crt_detect_ddc(crt))
1587 ++ if (intel_crt_detect_ddc(connector))
1588 + status = connector_status_connected;
1589 + else
1590 + status = intel_crt_load_detect(crtc, crt);
1591 +diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
1592 +index fca5232..3abd904 100644
1593 +--- a/drivers/gpu/drm/i915/intel_display.c
1594 ++++ b/drivers/gpu/drm/i915/intel_display.c
1595 +@@ -3714,7 +3714,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
1596 + int lane = 0, link_bw, bpp;
1597 + /* CPU eDP doesn't require FDI link, so just set DP M/N
1598 + according to current link config */
1599 +- if (has_edp_encoder && !intel_encoder_is_pch_edp(&encoder->base)) {
1600 ++ if (has_edp_encoder && !intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
1601 + target_clock = mode->clock;
1602 + intel_edp_link_config(has_edp_encoder,
1603 + &lane, &link_bw);
1604 +@@ -5498,6 +5498,8 @@ static void intel_setup_outputs(struct drm_device *dev)
1605 + encoder->base.possible_clones =
1606 + intel_encoder_clones(dev, encoder->clone_mask);
1607 + }
1608 ++
1609 ++ intel_panel_setup_backlight(dev);
1610 + }
1611 +
1612 + static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb)
1613 +diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
1614 +index 864417c..c1f13bf 100644
1615 +--- a/drivers/gpu/drm/i915/intel_dp.c
1616 ++++ b/drivers/gpu/drm/i915/intel_dp.c
1617 +@@ -1153,18 +1153,27 @@ intel_dp_signal_levels(uint8_t train_set, int lane_count)
1618 + static uint32_t
1619 + intel_gen6_edp_signal_levels(uint8_t train_set)
1620 + {
1621 +- switch (train_set & (DP_TRAIN_VOLTAGE_SWING_MASK|DP_TRAIN_PRE_EMPHASIS_MASK)) {
1622 ++ int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
1623 ++ DP_TRAIN_PRE_EMPHASIS_MASK);
1624 ++ switch (signal_levels) {
1625 + case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_0:
1626 +- return EDP_LINK_TRAIN_400MV_0DB_SNB_B;
1627 ++ case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_0:
1628 ++ return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
1629 ++ case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_3_5:
1630 ++ return EDP_LINK_TRAIN_400MV_3_5DB_SNB_B;
1631 + case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_6:
1632 +- return EDP_LINK_TRAIN_400MV_6DB_SNB_B;
1633 ++ case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_6:
1634 ++ return EDP_LINK_TRAIN_400_600MV_6DB_SNB_B;
1635 + case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_3_5:
1636 +- return EDP_LINK_TRAIN_600MV_3_5DB_SNB_B;
1637 ++ case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_3_5:
1638 ++ return EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B;
1639 + case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_0:
1640 +- return EDP_LINK_TRAIN_800MV_0DB_SNB_B;
1641 ++ case DP_TRAIN_VOLTAGE_SWING_1200 | DP_TRAIN_PRE_EMPHASIS_0:
1642 ++ return EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B;
1643 + default:
1644 +- DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level\n");
1645 +- return EDP_LINK_TRAIN_400MV_0DB_SNB_B;
1646 ++ DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
1647 ++ "0x%x\n", signal_levels);
1648 ++ return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
1649 + }
1650 + }
1651 +
1652 +diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
1653 +index e52c612..1e68293 100644
1654 +--- a/drivers/gpu/drm/i915/intel_drv.h
1655 ++++ b/drivers/gpu/drm/i915/intel_drv.h
1656 +@@ -256,6 +256,9 @@ extern void intel_pch_panel_fitting(struct drm_device *dev,
1657 + extern u32 intel_panel_get_max_backlight(struct drm_device *dev);
1658 + extern u32 intel_panel_get_backlight(struct drm_device *dev);
1659 + extern void intel_panel_set_backlight(struct drm_device *dev, u32 level);
1660 ++extern void intel_panel_setup_backlight(struct drm_device *dev);
1661 ++extern void intel_panel_enable_backlight(struct drm_device *dev);
1662 ++extern void intel_panel_disable_backlight(struct drm_device *dev);
1663 +
1664 + extern void intel_crtc_load_lut(struct drm_crtc *crtc);
1665 + extern void intel_encoder_prepare (struct drm_encoder *encoder);
1666 +diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
1667 +index 25bcedf..fe779b3 100644
1668 +--- a/drivers/gpu/drm/i915/intel_lvds.c
1669 ++++ b/drivers/gpu/drm/i915/intel_lvds.c
1670 +@@ -106,7 +106,7 @@ static void intel_lvds_enable(struct intel_lvds *intel_lvds)
1671 + I915_WRITE(ctl_reg, I915_READ(ctl_reg) | POWER_TARGET_ON);
1672 + POSTING_READ(lvds_reg);
1673 +
1674 +- intel_panel_set_backlight(dev, dev_priv->backlight_level);
1675 ++ intel_panel_enable_backlight(dev);
1676 + }
1677 +
1678 + static void intel_lvds_disable(struct intel_lvds *intel_lvds)
1679 +@@ -123,8 +123,7 @@ static void intel_lvds_disable(struct intel_lvds *intel_lvds)
1680 + lvds_reg = LVDS;
1681 + }
1682 +
1683 +- dev_priv->backlight_level = intel_panel_get_backlight(dev);
1684 +- intel_panel_set_backlight(dev, 0);
1685 ++ intel_panel_disable_backlight(dev);
1686 +
1687 + I915_WRITE(ctl_reg, I915_READ(ctl_reg) & ~POWER_TARGET_ON);
1688 +
1689 +@@ -395,8 +394,6 @@ static void intel_lvds_prepare(struct drm_encoder *encoder)
1690 + struct drm_i915_private *dev_priv = dev->dev_private;
1691 + struct intel_lvds *intel_lvds = to_intel_lvds(encoder);
1692 +
1693 +- dev_priv->backlight_level = intel_panel_get_backlight(dev);
1694 +-
1695 + /* We try to do the minimum that is necessary in order to unlock
1696 + * the registers for mode setting.
1697 + *
1698 +@@ -427,9 +424,6 @@ static void intel_lvds_commit(struct drm_encoder *encoder)
1699 + struct drm_i915_private *dev_priv = dev->dev_private;
1700 + struct intel_lvds *intel_lvds = to_intel_lvds(encoder);
1701 +
1702 +- if (dev_priv->backlight_level == 0)
1703 +- dev_priv->backlight_level = intel_panel_get_max_backlight(dev);
1704 +-
1705 + /* Undo any unlocking done in prepare to prevent accidental
1706 + * adjustment of the registers.
1707 + */
1708 +@@ -703,6 +697,14 @@ static const struct dmi_system_id intel_no_lvds[] = {
1709 + },
1710 + {
1711 + .callback = intel_no_lvds_dmi_callback,
1712 ++ .ident = "AOpen i915GMm-HFS",
1713 ++ .matches = {
1714 ++ DMI_MATCH(DMI_BOARD_VENDOR, "AOpen"),
1715 ++ DMI_MATCH(DMI_BOARD_NAME, "i915GMm-HFS"),
1716 ++ },
1717 ++ },
1718 ++ {
1719 ++ .callback = intel_no_lvds_dmi_callback,
1720 + .ident = "Aopen i945GTt-VFA",
1721 + .matches = {
1722 + DMI_MATCH(DMI_PRODUCT_VERSION, "AO00001JW"),
1723 +diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
1724 +index 92ff8f3..0df86b5 100644
1725 +--- a/drivers/gpu/drm/i915/intel_panel.c
1726 ++++ b/drivers/gpu/drm/i915/intel_panel.c
1727 +@@ -218,3 +218,34 @@ void intel_panel_set_backlight(struct drm_device *dev, u32 level)
1728 + tmp &= ~BACKLIGHT_DUTY_CYCLE_MASK;
1729 + I915_WRITE(BLC_PWM_CTL, tmp | level);
1730 + }
1731 ++
1732 ++void intel_panel_disable_backlight(struct drm_device *dev)
1733 ++{
1734 ++ struct drm_i915_private *dev_priv = dev->dev_private;
1735 ++
1736 ++ if (dev_priv->backlight_enabled) {
1737 ++ dev_priv->backlight_level = intel_panel_get_backlight(dev);
1738 ++ dev_priv->backlight_enabled = false;
1739 ++ }
1740 ++
1741 ++ intel_panel_set_backlight(dev, 0);
1742 ++}
1743 ++
1744 ++void intel_panel_enable_backlight(struct drm_device *dev)
1745 ++{
1746 ++ struct drm_i915_private *dev_priv = dev->dev_private;
1747 ++
1748 ++ if (dev_priv->backlight_level == 0)
1749 ++ dev_priv->backlight_level = intel_panel_get_max_backlight(dev);
1750 ++
1751 ++ intel_panel_set_backlight(dev, dev_priv->backlight_level);
1752 ++ dev_priv->backlight_enabled = true;
1753 ++}
1754 ++
1755 ++void intel_panel_setup_backlight(struct drm_device *dev)
1756 ++{
1757 ++ struct drm_i915_private *dev_priv = dev->dev_private;
1758 ++
1759 ++ dev_priv->backlight_level = intel_panel_get_backlight(dev);
1760 ++ dev_priv->backlight_enabled = dev_priv->backlight_level != 0;
1761 ++}
1762 +diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
1763 +index 6bc42fa..920ca27 100644
1764 +--- a/drivers/gpu/drm/i915/intel_sdvo.c
1765 ++++ b/drivers/gpu/drm/i915/intel_sdvo.c
1766 +@@ -1024,9 +1024,13 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
1767 + if (!intel_sdvo_set_target_input(intel_sdvo))
1768 + return;
1769 +
1770 +- if (intel_sdvo->has_hdmi_monitor &&
1771 +- !intel_sdvo_set_avi_infoframe(intel_sdvo))
1772 +- return;
1773 ++ if (intel_sdvo->has_hdmi_monitor) {
1774 ++ intel_sdvo_set_encode(intel_sdvo, SDVO_ENCODE_HDMI);
1775 ++ intel_sdvo_set_colorimetry(intel_sdvo,
1776 ++ SDVO_COLORIMETRY_RGB256);
1777 ++ intel_sdvo_set_avi_infoframe(intel_sdvo);
1778 ++ } else
1779 ++ intel_sdvo_set_encode(intel_sdvo, SDVO_ENCODE_DVI);
1780 +
1781 + if (intel_sdvo->is_tv &&
1782 + !intel_sdvo_set_tv_format(intel_sdvo))
1783 +@@ -1395,6 +1399,9 @@ intel_sdvo_detect(struct drm_connector *connector, bool force)
1784 +
1785 + intel_sdvo->attached_output = response;
1786 +
1787 ++ intel_sdvo->has_hdmi_monitor = false;
1788 ++ intel_sdvo->has_hdmi_audio = false;
1789 ++
1790 + if ((intel_sdvo_connector->output_flag & response) == 0)
1791 + ret = connector_status_disconnected;
1792 + else if (response & SDVO_TMDS_MASK)
1793 +@@ -1919,20 +1926,7 @@ intel_sdvo_select_i2c_bus(struct drm_i915_private *dev_priv,
1794 + static bool
1795 + intel_sdvo_is_hdmi_connector(struct intel_sdvo *intel_sdvo, int device)
1796 + {
1797 +- int is_hdmi;
1798 +-
1799 +- if (!intel_sdvo_check_supp_encode(intel_sdvo))
1800 +- return false;
1801 +-
1802 +- if (!intel_sdvo_set_target_output(intel_sdvo,
1803 +- device == 0 ? SDVO_OUTPUT_TMDS0 : SDVO_OUTPUT_TMDS1))
1804 +- return false;
1805 +-
1806 +- is_hdmi = 0;
1807 +- if (!intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_ENCODE, &is_hdmi, 1))
1808 +- return false;
1809 +-
1810 +- return !!is_hdmi;
1811 ++ return intel_sdvo_check_supp_encode(intel_sdvo);
1812 + }
1813 +
1814 + static u8
1815 +@@ -2034,12 +2028,7 @@ intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int device)
1816 + connector->connector_type = DRM_MODE_CONNECTOR_DVID;
1817 +
1818 + if (intel_sdvo_is_hdmi_connector(intel_sdvo, device)) {
1819 +- /* enable hdmi encoding mode if supported */
1820 +- intel_sdvo_set_encode(intel_sdvo, SDVO_ENCODE_HDMI);
1821 +- intel_sdvo_set_colorimetry(intel_sdvo,
1822 +- SDVO_COLORIMETRY_RGB256);
1823 + connector->connector_type = DRM_MODE_CONNECTOR_HDMIA;
1824 +-
1825 + intel_sdvo->is_hdmi = true;
1826 + }
1827 + intel_sdvo->base.clone_mask = ((1 << INTEL_SDVO_NON_TV_CLONE_BIT) |
1828 +diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
1829 +index 9fbabaa..6b7fc4b 100644
1830 +--- a/drivers/gpu/drm/radeon/atombios_crtc.c
1831 ++++ b/drivers/gpu/drm/radeon/atombios_crtc.c
1832 +@@ -531,6 +531,7 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
1833 + dp_clock = dig_connector->dp_clock;
1834 + }
1835 + }
1836 ++/* this might work properly with the new pll algo */
1837 + #if 0 /* doesn't work properly on some laptops */
1838 + /* use recommended ref_div for ss */
1839 + if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
1840 +@@ -548,6 +549,11 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
1841 + adjusted_clock = mode->clock * 2;
1842 + if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT))
1843 + pll->flags |= RADEON_PLL_PREFER_CLOSEST_LOWER;
1844 ++ /* rv515 needs more testing with this option */
1845 ++ if (rdev->family != CHIP_RV515) {
1846 ++ if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
1847 ++ pll->flags |= RADEON_PLL_IS_LCD;
1848 ++ }
1849 + } else {
1850 + if (encoder->encoder_type != DRM_MODE_ENCODER_DAC)
1851 + pll->flags |= RADEON_PLL_NO_ODD_POST_DIV;
1852 +@@ -915,8 +921,16 @@ static void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode
1853 + /* adjust pixel clock as needed */
1854 + adjusted_clock = atombios_adjust_pll(crtc, mode, pll, ss_enabled, &ss);
1855 +
1856 +- radeon_compute_pll(pll, adjusted_clock, &pll_clock, &fb_div, &frac_fb_div,
1857 +- &ref_div, &post_div);
1858 ++ /* rv515 seems happier with the old algo */
1859 ++ if (rdev->family == CHIP_RV515)
1860 ++ radeon_compute_pll_legacy(pll, adjusted_clock, &pll_clock, &fb_div, &frac_fb_div,
1861 ++ &ref_div, &post_div);
1862 ++ else if (ASIC_IS_AVIVO(rdev))
1863 ++ radeon_compute_pll_avivo(pll, adjusted_clock, &pll_clock, &fb_div, &frac_fb_div,
1864 ++ &ref_div, &post_div);
1865 ++ else
1866 ++ radeon_compute_pll_legacy(pll, adjusted_clock, &pll_clock, &fb_div, &frac_fb_div,
1867 ++ &ref_div, &post_div);
1868 +
1869 + atombios_crtc_program_ss(crtc, ATOM_DISABLE, radeon_crtc->pll_id, &ss);
1870 +
1871 +diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c
1872 +index 4e7778d..695de9a 100644
1873 +--- a/drivers/gpu/drm/radeon/atombios_dp.c
1874 ++++ b/drivers/gpu/drm/radeon/atombios_dp.c
1875 +@@ -187,9 +187,9 @@ static int dp_link_clock_for_mode_clock(u8 dpcd[DP_DPCD_SIZE], int mode_clock)
1876 + int dp_mode_valid(u8 dpcd[DP_DPCD_SIZE], int mode_clock)
1877 + {
1878 + int lanes = dp_lanes_for_mode_clock(dpcd, mode_clock);
1879 +- int bw = dp_lanes_for_mode_clock(dpcd, mode_clock);
1880 ++ int dp_clock = dp_link_clock_for_mode_clock(dpcd, mode_clock);
1881 +
1882 +- if ((lanes == 0) || (bw == 0))
1883 ++ if ((lanes == 0) || (dp_clock == 0))
1884 + return MODE_CLOCK_HIGH;
1885 +
1886 + return MODE_OK;
1887 +diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
1888 +index 7b337c3..cff593d 100644
1889 +--- a/drivers/gpu/drm/radeon/evergreen.c
1890 ++++ b/drivers/gpu/drm/radeon/evergreen.c
1891 +@@ -1135,7 +1135,7 @@ static int evergreen_cp_start(struct radeon_device *rdev)
1892 + cp_me = 0xff;
1893 + WREG32(CP_ME_CNTL, cp_me);
1894 +
1895 +- r = radeon_ring_lock(rdev, evergreen_default_size + 15);
1896 ++ r = radeon_ring_lock(rdev, evergreen_default_size + 19);
1897 + if (r) {
1898 + DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
1899 + return r;
1900 +@@ -1168,6 +1168,11 @@ static int evergreen_cp_start(struct radeon_device *rdev)
1901 + radeon_ring_write(rdev, 0xffffffff);
1902 + radeon_ring_write(rdev, 0xffffffff);
1903 +
1904 ++ radeon_ring_write(rdev, 0xc0026900);
1905 ++ radeon_ring_write(rdev, 0x00000316);
1906 ++ radeon_ring_write(rdev, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */
1907 ++ radeon_ring_write(rdev, 0x00000010); /* */
1908 ++
1909 + radeon_ring_unlock_commit(rdev);
1910 +
1911 + return 0;
1912 +diff --git a/drivers/gpu/drm/radeon/evergreen_blit_kms.c b/drivers/gpu/drm/radeon/evergreen_blit_kms.c
1913 +index e0e5901..a2cfead 100644
1914 +--- a/drivers/gpu/drm/radeon/evergreen_blit_kms.c
1915 ++++ b/drivers/gpu/drm/radeon/evergreen_blit_kms.c
1916 +@@ -230,7 +230,7 @@ draw_auto(struct radeon_device *rdev)
1917 +
1918 + }
1919 +
1920 +-/* emits 30 */
1921 ++/* emits 34 */
1922 + static void
1923 + set_default_state(struct radeon_device *rdev)
1924 + {
1925 +@@ -243,6 +243,8 @@ set_default_state(struct radeon_device *rdev)
1926 + int num_hs_threads, num_ls_threads;
1927 + int num_ps_stack_entries, num_vs_stack_entries, num_gs_stack_entries, num_es_stack_entries;
1928 + int num_hs_stack_entries, num_ls_stack_entries;
1929 ++ u64 gpu_addr;
1930 ++ int dwords;
1931 +
1932 + switch (rdev->family) {
1933 + case CHIP_CEDAR:
1934 +@@ -409,6 +411,14 @@ set_default_state(struct radeon_device *rdev)
1935 + radeon_ring_write(rdev, 0x00000000);
1936 + radeon_ring_write(rdev, 0x00000000);
1937 +
1938 ++ /* emit an IB pointing at default state */
1939 ++ dwords = ALIGN(rdev->r600_blit.state_len, 0x10);
1940 ++ gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.state_offset;
1941 ++ radeon_ring_write(rdev, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
1942 ++ radeon_ring_write(rdev, gpu_addr & 0xFFFFFFFC);
1943 ++ radeon_ring_write(rdev, upper_32_bits(gpu_addr) & 0xFF);
1944 ++ radeon_ring_write(rdev, dwords);
1945 ++
1946 + }
1947 +
1948 + static inline uint32_t i2f(uint32_t input)
1949 +@@ -439,8 +449,10 @@ static inline uint32_t i2f(uint32_t input)
1950 + int evergreen_blit_init(struct radeon_device *rdev)
1951 + {
1952 + u32 obj_size;
1953 +- int r;
1954 ++ int r, dwords;
1955 + void *ptr;
1956 ++ u32 packet2s[16];
1957 ++ int num_packet2s = 0;
1958 +
1959 + /* pin copy shader into vram if already initialized */
1960 + if (rdev->r600_blit.shader_obj)
1961 +@@ -448,8 +460,17 @@ int evergreen_blit_init(struct radeon_device *rdev)
1962 +
1963 + mutex_init(&rdev->r600_blit.mutex);
1964 + rdev->r600_blit.state_offset = 0;
1965 +- rdev->r600_blit.state_len = 0;
1966 +- obj_size = 0;
1967 ++
1968 ++ rdev->r600_blit.state_len = evergreen_default_size;
1969 ++
1970 ++ dwords = rdev->r600_blit.state_len;
1971 ++ while (dwords & 0xf) {
1972 ++ packet2s[num_packet2s++] = PACKET2(0);
1973 ++ dwords++;
1974 ++ }
1975 ++
1976 ++ obj_size = dwords * 4;
1977 ++ obj_size = ALIGN(obj_size, 256);
1978 +
1979 + rdev->r600_blit.vs_offset = obj_size;
1980 + obj_size += evergreen_vs_size * 4;
1981 +@@ -479,6 +500,12 @@ int evergreen_blit_init(struct radeon_device *rdev)
1982 + return r;
1983 + }
1984 +
1985 ++ memcpy_toio(ptr + rdev->r600_blit.state_offset,
1986 ++ evergreen_default_state, rdev->r600_blit.state_len * 4);
1987 ++
1988 ++ if (num_packet2s)
1989 ++ memcpy_toio(ptr + rdev->r600_blit.state_offset + (rdev->r600_blit.state_len * 4),
1990 ++ packet2s, num_packet2s * 4);
1991 + memcpy(ptr + rdev->r600_blit.vs_offset, evergreen_vs, evergreen_vs_size * 4);
1992 + memcpy(ptr + rdev->r600_blit.ps_offset, evergreen_ps, evergreen_ps_size * 4);
1993 + radeon_bo_kunmap(rdev->r600_blit.shader_obj);
1994 +@@ -564,7 +591,7 @@ int evergreen_blit_prepare_copy(struct radeon_device *rdev, int size_bytes)
1995 + /* calculate number of loops correctly */
1996 + ring_size = num_loops * dwords_per_loop;
1997 + /* set default + shaders */
1998 +- ring_size += 46; /* shaders + def state */
1999 ++ ring_size += 50; /* shaders + def state */
2000 + ring_size += 10; /* fence emit for VB IB */
2001 + ring_size += 5; /* done copy */
2002 + ring_size += 10; /* fence emit for done copy */
2003 +@@ -572,7 +599,7 @@ int evergreen_blit_prepare_copy(struct radeon_device *rdev, int size_bytes)
2004 + if (r)
2005 + return r;
2006 +
2007 +- set_default_state(rdev); /* 30 */
2008 ++ set_default_state(rdev); /* 34 */
2009 + set_shaders(rdev); /* 16 */
2010 + return 0;
2011 + }
2012 +diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
2013 +index bc5a2c3..57b9de3 100644
2014 +--- a/drivers/gpu/drm/radeon/radeon_atombios.c
2015 ++++ b/drivers/gpu/drm/radeon/radeon_atombios.c
2016 +@@ -313,7 +313,6 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev,
2017 + uint16_t *line_mux,
2018 + struct radeon_hpd *hpd)
2019 + {
2020 +- struct radeon_device *rdev = dev->dev_private;
2021 +
2022 + /* Asus M2A-VM HDMI board lists the DVI port as HDMI */
2023 + if ((dev->pdev->device == 0x791e) &&
2024 +@@ -388,6 +387,13 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev,
2025 + *line_mux = 0x90;
2026 + }
2027 +
2028 ++ /* mac rv630, rv730, others */
2029 ++ if ((supported_device == ATOM_DEVICE_TV1_SUPPORT) &&
2030 ++ (*connector_type == DRM_MODE_CONNECTOR_DVII)) {
2031 ++ *connector_type = DRM_MODE_CONNECTOR_9PinDIN;
2032 ++ *line_mux = CONNECTOR_7PIN_DIN_ENUM_ID1;
2033 ++ }
2034 ++
2035 + /* ASUS HD 3600 XT board lists the DVI port as HDMI */
2036 + if ((dev->pdev->device == 0x9598) &&
2037 + (dev->pdev->subsystem_vendor == 0x1043) &&
2038 +@@ -425,21 +431,23 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev,
2039 + }
2040 + }
2041 +
2042 +- /* Acer laptop reports DVI-D as DVI-I and hpd pins reversed */
2043 ++ /* Acer laptop (Acer TravelMate 5730G) has an HDMI port
2044 ++ * on the laptop and a DVI port on the docking station and
2045 ++ * both share the same encoder, hpd pin, and ddc line.
2046 ++ * So while the bios table is technically correct,
2047 ++ * we drop the DVI port here since xrandr has no concept of
2048 ++ * encoders and will try and drive both connectors
2049 ++ * with different crtcs which isn't possible on the hardware
2050 ++ * side and leaves no crtcs for LVDS or VGA.
2051 ++ */
2052 + if ((dev->pdev->device == 0x95c4) &&
2053 + (dev->pdev->subsystem_vendor == 0x1025) &&
2054 + (dev->pdev->subsystem_device == 0x013c)) {
2055 +- struct radeon_gpio_rec gpio;
2056 +-
2057 + if ((*connector_type == DRM_MODE_CONNECTOR_DVII) &&
2058 + (supported_device == ATOM_DEVICE_DFP1_SUPPORT)) {
2059 +- gpio = radeon_lookup_gpio(rdev, 6);
2060 +- *hpd = radeon_atom_get_hpd_info_from_gpio(rdev, &gpio);
2061 ++ /* actually it's a DVI-D port not DVI-I */
2062 + *connector_type = DRM_MODE_CONNECTOR_DVID;
2063 +- } else if ((*connector_type == DRM_MODE_CONNECTOR_HDMIA) &&
2064 +- (supported_device == ATOM_DEVICE_DFP1_SUPPORT)) {
2065 +- gpio = radeon_lookup_gpio(rdev, 7);
2066 +- *hpd = radeon_atom_get_hpd_info_from_gpio(rdev, &gpio);
2067 ++ return false;
2068 + }
2069 + }
2070 +
2071 +@@ -1128,16 +1136,6 @@ bool radeon_atom_get_clock_info(struct drm_device *dev)
2072 + p1pll->pll_out_min = 64800;
2073 + else
2074 + p1pll->pll_out_min = 20000;
2075 +- } else if (p1pll->pll_out_min > 64800) {
2076 +- /* Limiting the pll output range is a good thing generally as
2077 +- * it limits the number of possible pll combinations for a given
2078 +- * frequency presumably to the ones that work best on each card.
2079 +- * However, certain duallink DVI monitors seem to like
2080 +- * pll combinations that would be limited by this at least on
2081 +- * pre-DCE 3.0 r6xx hardware. This might need to be adjusted per
2082 +- * family.
2083 +- */
2084 +- p1pll->pll_out_min = 64800;
2085 + }
2086 +
2087 + p1pll->pll_in_min =
2088 +@@ -2359,7 +2357,7 @@ void radeon_atom_initialize_bios_scratch_regs(struct drm_device *dev)
2089 + bios_2_scratch &= ~ATOM_S2_VRI_BRIGHT_ENABLE;
2090 +
2091 + /* tell the bios not to handle mode switching */
2092 +- bios_6_scratch |= (ATOM_S6_ACC_BLOCK_DISPLAY_SWITCH | ATOM_S6_ACC_MODE);
2093 ++ bios_6_scratch |= ATOM_S6_ACC_BLOCK_DISPLAY_SWITCH;
2094 +
2095 + if (rdev->family >= CHIP_R600) {
2096 + WREG32(R600_BIOS_2_SCRATCH, bios_2_scratch);
2097 +@@ -2410,10 +2408,13 @@ void radeon_atom_output_lock(struct drm_encoder *encoder, bool lock)
2098 + else
2099 + bios_6_scratch = RREG32(RADEON_BIOS_6_SCRATCH);
2100 +
2101 +- if (lock)
2102 ++ if (lock) {
2103 + bios_6_scratch |= ATOM_S6_CRITICAL_STATE;
2104 +- else
2105 ++ bios_6_scratch &= ~ATOM_S6_ACC_MODE;
2106 ++ } else {
2107 + bios_6_scratch &= ~ATOM_S6_CRITICAL_STATE;
2108 ++ bios_6_scratch |= ATOM_S6_ACC_MODE;
2109 ++ }
2110 +
2111 + if (rdev->family >= CHIP_R600)
2112 + WREG32(R600_BIOS_6_SCRATCH, bios_6_scratch);
2113 +diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
2114 +index 1df4dc6..a26a70d 100644
2115 +--- a/drivers/gpu/drm/radeon/radeon_display.c
2116 ++++ b/drivers/gpu/drm/radeon/radeon_display.c
2117 +@@ -448,6 +448,115 @@ static int radeon_ddc_dump(struct drm_connector *connector)
2118 + return ret;
2119 + }
2120 +
2121 ++/* avivo */
2122 ++static void avivo_get_fb_div(struct radeon_pll *pll,
2123 ++ u32 target_clock,
2124 ++ u32 post_div,
2125 ++ u32 ref_div,
2126 ++ u32 *fb_div,
2127 ++ u32 *frac_fb_div)
2128 ++{
2129 ++ u32 tmp = post_div * ref_div;
2130 ++
2131 ++ tmp *= target_clock;
2132 ++ *fb_div = tmp / pll->reference_freq;
2133 ++ *frac_fb_div = tmp % pll->reference_freq;
2134 ++}
2135 ++
2136 ++static u32 avivo_get_post_div(struct radeon_pll *pll,
2137 ++ u32 target_clock)
2138 ++{
2139 ++ u32 vco, post_div, tmp;
2140 ++
2141 ++ if (pll->flags & RADEON_PLL_USE_POST_DIV)
2142 ++ return pll->post_div;
2143 ++
2144 ++ if (pll->flags & RADEON_PLL_PREFER_MINM_OVER_MAXP) {
2145 ++ if (pll->flags & RADEON_PLL_IS_LCD)
2146 ++ vco = pll->lcd_pll_out_min;
2147 ++ else
2148 ++ vco = pll->pll_out_min;
2149 ++ } else {
2150 ++ if (pll->flags & RADEON_PLL_IS_LCD)
2151 ++ vco = pll->lcd_pll_out_max;
2152 ++ else
2153 ++ vco = pll->pll_out_max;
2154 ++ }
2155 ++
2156 ++ post_div = vco / target_clock;
2157 ++ tmp = vco % target_clock;
2158 ++
2159 ++ if (pll->flags & RADEON_PLL_PREFER_MINM_OVER_MAXP) {
2160 ++ if (tmp)
2161 ++ post_div++;
2162 ++ } else {
2163 ++ if (!tmp)
2164 ++ post_div--;
2165 ++ }
2166 ++
2167 ++ return post_div;
2168 ++}
2169 ++
2170 ++#define MAX_TOLERANCE 10
2171 ++
2172 ++void radeon_compute_pll_avivo(struct radeon_pll *pll,
2173 ++ u32 freq,
2174 ++ u32 *dot_clock_p,
2175 ++ u32 *fb_div_p,
2176 ++ u32 *frac_fb_div_p,
2177 ++ u32 *ref_div_p,
2178 ++ u32 *post_div_p)
2179 ++{
2180 ++ u32 target_clock = freq / 10;
2181 ++ u32 post_div = avivo_get_post_div(pll, target_clock);
2182 ++ u32 ref_div = pll->min_ref_div;
2183 ++ u32 fb_div = 0, frac_fb_div = 0, tmp;
2184 ++
2185 ++ if (pll->flags & RADEON_PLL_USE_REF_DIV)
2186 ++ ref_div = pll->reference_div;
2187 ++
2188 ++ if (pll->flags & RADEON_PLL_USE_FRAC_FB_DIV) {
2189 ++ avivo_get_fb_div(pll, target_clock, post_div, ref_div, &fb_div, &frac_fb_div);
2190 ++ frac_fb_div = (100 * frac_fb_div) / pll->reference_freq;
2191 ++ if (frac_fb_div >= 5) {
2192 ++ frac_fb_div -= 5;
2193 ++ frac_fb_div = frac_fb_div / 10;
2194 ++ frac_fb_div++;
2195 ++ }
2196 ++ if (frac_fb_div >= 10) {
2197 ++ fb_div++;
2198 ++ frac_fb_div = 0;
2199 ++ }
2200 ++ } else {
2201 ++ while (ref_div <= pll->max_ref_div) {
2202 ++ avivo_get_fb_div(pll, target_clock, post_div, ref_div,
2203 ++ &fb_div, &frac_fb_div);
2204 ++ if (frac_fb_div >= (pll->reference_freq / 2))
2205 ++ fb_div++;
2206 ++ frac_fb_div = 0;
2207 ++ tmp = (pll->reference_freq * fb_div) / (post_div * ref_div);
2208 ++ tmp = (tmp * 10000) / target_clock;
2209 ++
2210 ++ if (tmp > (10000 + MAX_TOLERANCE))
2211 ++ ref_div++;
2212 ++ else if (tmp >= (10000 - MAX_TOLERANCE))
2213 ++ break;
2214 ++ else
2215 ++ ref_div++;
2216 ++ }
2217 ++ }
2218 ++
2219 ++ *dot_clock_p = ((pll->reference_freq * fb_div * 10) + (pll->reference_freq * frac_fb_div)) /
2220 ++ (ref_div * post_div * 10);
2221 ++ *fb_div_p = fb_div;
2222 ++ *frac_fb_div_p = frac_fb_div;
2223 ++ *ref_div_p = ref_div;
2224 ++ *post_div_p = post_div;
2225 ++ DRM_DEBUG_KMS("%d, pll dividers - fb: %d.%d ref: %d, post %d\n",
2226 ++ *dot_clock_p, fb_div, frac_fb_div, ref_div, post_div);
2227 ++}
2228 ++
2229 ++/* pre-avivo */
2230 + static inline uint32_t radeon_div(uint64_t n, uint32_t d)
2231 + {
2232 + uint64_t mod;
2233 +@@ -458,13 +567,13 @@ static inline uint32_t radeon_div(uint64_t n, uint32_t d)
2234 + return n;
2235 + }
2236 +
2237 +-void radeon_compute_pll(struct radeon_pll *pll,
2238 +- uint64_t freq,
2239 +- uint32_t *dot_clock_p,
2240 +- uint32_t *fb_div_p,
2241 +- uint32_t *frac_fb_div_p,
2242 +- uint32_t *ref_div_p,
2243 +- uint32_t *post_div_p)
2244 ++void radeon_compute_pll_legacy(struct radeon_pll *pll,
2245 ++ uint64_t freq,
2246 ++ uint32_t *dot_clock_p,
2247 ++ uint32_t *fb_div_p,
2248 ++ uint32_t *frac_fb_div_p,
2249 ++ uint32_t *ref_div_p,
2250 ++ uint32_t *post_div_p)
2251 + {
2252 + uint32_t min_ref_div = pll->min_ref_div;
2253 + uint32_t max_ref_div = pll->max_ref_div;
2254 +@@ -494,6 +603,9 @@ void radeon_compute_pll(struct radeon_pll *pll,
2255 + pll_out_max = pll->pll_out_max;
2256 + }
2257 +
2258 ++ if (pll_out_min > 64800)
2259 ++ pll_out_min = 64800;
2260 ++
2261 + if (pll->flags & RADEON_PLL_USE_REF_DIV)
2262 + min_ref_div = max_ref_div = pll->reference_div;
2263 + else {
2264 +@@ -517,7 +629,7 @@ void radeon_compute_pll(struct radeon_pll *pll,
2265 + max_fractional_feed_div = pll->max_frac_feedback_div;
2266 + }
2267 +
2268 +- for (post_div = max_post_div; post_div >= min_post_div; --post_div) {
2269 ++ for (post_div = min_post_div; post_div <= max_post_div; ++post_div) {
2270 + uint32_t ref_div;
2271 +
2272 + if ((pll->flags & RADEON_PLL_NO_ODD_POST_DIV) && (post_div & 1))
2273 +@@ -633,6 +745,10 @@ void radeon_compute_pll(struct radeon_pll *pll,
2274 + *frac_fb_div_p = best_frac_feedback_div;
2275 + *ref_div_p = best_ref_div;
2276 + *post_div_p = best_post_div;
2277 ++ DRM_DEBUG_KMS("%d %d, pll dividers - fb: %d.%d ref: %d, post %d\n",
2278 ++ freq, best_freq / 1000, best_feedback_div, best_frac_feedback_div,
2279 ++ best_ref_div, best_post_div);
2280 ++
2281 + }
2282 +
2283 + static void radeon_user_framebuffer_destroy(struct drm_framebuffer *fb)
2284 +diff --git a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
2285 +index ace2e63..cf0638c 100644
2286 +--- a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
2287 ++++ b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
2288 +@@ -778,9 +778,9 @@ static void radeon_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
2289 + DRM_DEBUG_KMS("\n");
2290 +
2291 + if (!use_bios_divs) {
2292 +- radeon_compute_pll(pll, mode->clock,
2293 +- &freq, &feedback_div, &frac_fb_div,
2294 +- &reference_div, &post_divider);
2295 ++ radeon_compute_pll_legacy(pll, mode->clock,
2296 ++ &freq, &feedback_div, &frac_fb_div,
2297 ++ &reference_div, &post_divider);
2298 +
2299 + for (post_div = &post_divs[0]; post_div->divider; ++post_div) {
2300 + if (post_div->divider == post_divider)
2301 +diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
2302 +index e301c6f..aa22570 100644
2303 +--- a/drivers/gpu/drm/radeon/radeon_mode.h
2304 ++++ b/drivers/gpu/drm/radeon/radeon_mode.h
2305 +@@ -149,6 +149,7 @@ struct radeon_tmds_pll {
2306 + #define RADEON_PLL_PREFER_CLOSEST_LOWER (1 << 11)
2307 + #define RADEON_PLL_USE_POST_DIV (1 << 12)
2308 + #define RADEON_PLL_IS_LCD (1 << 13)
2309 ++#define RADEON_PLL_PREFER_MINM_OVER_MAXP (1 << 14)
2310 +
2311 + struct radeon_pll {
2312 + /* reference frequency */
2313 +@@ -510,13 +511,21 @@ extern bool radeon_atombios_get_asic_ss_info(struct radeon_device *rdev,
2314 + struct radeon_atom_ss *ss,
2315 + int id, u32 clock);
2316 +
2317 +-extern void radeon_compute_pll(struct radeon_pll *pll,
2318 +- uint64_t freq,
2319 +- uint32_t *dot_clock_p,
2320 +- uint32_t *fb_div_p,
2321 +- uint32_t *frac_fb_div_p,
2322 +- uint32_t *ref_div_p,
2323 +- uint32_t *post_div_p);
2324 ++extern void radeon_compute_pll_legacy(struct radeon_pll *pll,
2325 ++ uint64_t freq,
2326 ++ uint32_t *dot_clock_p,
2327 ++ uint32_t *fb_div_p,
2328 ++ uint32_t *frac_fb_div_p,
2329 ++ uint32_t *ref_div_p,
2330 ++ uint32_t *post_div_p);
2331 ++
2332 ++extern void radeon_compute_pll_avivo(struct radeon_pll *pll,
2333 ++ u32 freq,
2334 ++ u32 *dot_clock_p,
2335 ++ u32 *fb_div_p,
2336 ++ u32 *frac_fb_div_p,
2337 ++ u32 *ref_div_p,
2338 ++ u32 *post_div_p);
2339 +
2340 + extern void radeon_setup_encoder_clones(struct drm_device *dev);
2341 +
2342 +diff --git a/drivers/gpu/vga/vgaarb.c b/drivers/gpu/vga/vgaarb.c
2343 +index c380c65..ace2b16 100644
2344 +--- a/drivers/gpu/vga/vgaarb.c
2345 ++++ b/drivers/gpu/vga/vgaarb.c
2346 +@@ -636,7 +636,7 @@ int vga_client_register(struct pci_dev *pdev, void *cookie,
2347 + void (*irq_set_state)(void *cookie, bool state),
2348 + unsigned int (*set_vga_decode)(void *cookie, bool decode))
2349 + {
2350 +- int ret = -1;
2351 ++ int ret = -ENODEV;
2352 + struct vga_device *vgadev;
2353 + unsigned long flags;
2354 +
2355 +diff --git a/drivers/hwmon/applesmc.c b/drivers/hwmon/applesmc.c
2356 +index b6598aa..87a5fd51 100644
2357 +--- a/drivers/hwmon/applesmc.c
2358 ++++ b/drivers/hwmon/applesmc.c
2359 +@@ -162,6 +162,10 @@ static const char *temperature_sensors_sets[][41] = {
2360 + /* Set 22: MacBook Pro 7,1 */
2361 + { "TB0T", "TB1T", "TB2T", "TC0D", "TC0P", "TN0D", "TN0P", "TN0S",
2362 + "TN1D", "TN1F", "TN1G", "TN1S", "Th1H", "Ts0P", "Ts0S", NULL },
2363 ++/* Set 23: MacBook Air 3,1 */
2364 ++ { "TB0T", "TB1T", "TB2T", "TC0D", "TC0E", "TC0P", "TC1E", "TCZ3",
2365 ++ "TCZ4", "TCZ5", "TG0E", "TG1E", "TG2E", "TGZ3", "TGZ4", "TGZ5",
2366 ++ "TH0F", "TH0O", "TM0P" },
2367 + };
2368 +
2369 + /* List of keys used to read/write fan speeds */
2370 +@@ -444,38 +448,22 @@ static int applesmc_read_motion_sensor(int index, s16* value)
2371 + }
2372 +
2373 + /*
2374 +- * applesmc_device_init - initialize the accelerometer. Returns zero on success
2375 +- * and negative error code on failure. Can sleep.
2376 ++ * applesmc_device_init - initialize the accelerometer. Can sleep.
2377 + */
2378 +-static int applesmc_device_init(void)
2379 ++static void applesmc_device_init(void)
2380 + {
2381 +- int total, ret = -ENXIO;
2382 ++ int total;
2383 + u8 buffer[2];
2384 +
2385 + if (!applesmc_accelerometer)
2386 +- return 0;
2387 ++ return;
2388 +
2389 + mutex_lock(&applesmc_lock);
2390 +
2391 + for (total = INIT_TIMEOUT_MSECS; total > 0; total -= INIT_WAIT_MSECS) {
2392 +- if (debug)
2393 +- printk(KERN_DEBUG "applesmc try %d\n", total);
2394 + if (!applesmc_read_key(MOTION_SENSOR_KEY, buffer, 2) &&
2395 +- (buffer[0] != 0x00 || buffer[1] != 0x00)) {
2396 +- if (total == INIT_TIMEOUT_MSECS) {
2397 +- printk(KERN_DEBUG "applesmc: device has"
2398 +- " already been initialized"
2399 +- " (0x%02x, 0x%02x).\n",
2400 +- buffer[0], buffer[1]);
2401 +- } else {
2402 +- printk(KERN_DEBUG "applesmc: device"
2403 +- " successfully initialized"
2404 +- " (0x%02x, 0x%02x).\n",
2405 +- buffer[0], buffer[1]);
2406 +- }
2407 +- ret = 0;
2408 ++ (buffer[0] != 0x00 || buffer[1] != 0x00))
2409 + goto out;
2410 +- }
2411 + buffer[0] = 0xe0;
2412 + buffer[1] = 0x00;
2413 + applesmc_write_key(MOTION_SENSOR_KEY, buffer, 2);
2414 +@@ -486,7 +474,6 @@ static int applesmc_device_init(void)
2415 +
2416 + out:
2417 + mutex_unlock(&applesmc_lock);
2418 +- return ret;
2419 + }
2420 +
2421 + /*
2422 +@@ -512,13 +499,8 @@ static int applesmc_get_fan_count(void)
2423 + /* Device model stuff */
2424 + static int applesmc_probe(struct platform_device *dev)
2425 + {
2426 +- int ret;
2427 +-
2428 +- ret = applesmc_device_init();
2429 +- if (ret)
2430 +- return ret;
2431 ++ applesmc_device_init();
2432 +
2433 +- printk(KERN_INFO "applesmc: device successfully initialized.\n");
2434 + return 0;
2435 + }
2436 +
2437 +@@ -535,9 +517,7 @@ static int applesmc_pm_resume(struct device *dev)
2438 + /* Reinitialize device on resume from hibernation */
2439 + static int applesmc_pm_restore(struct device *dev)
2440 + {
2441 +- int ret = applesmc_device_init();
2442 +- if (ret)
2443 +- return ret;
2444 ++ applesmc_device_init();
2445 + return applesmc_pm_resume(dev);
2446 + }
2447 +
2448 +@@ -1524,11 +1504,17 @@ static __initdata struct dmi_match_data applesmc_dmi_data[] = {
2449 + { .accelerometer = 1, .light = 1, .temperature_set = 21 },
2450 + /* MacBook Pro 7,1: accelerometer, backlight and temperature set 22 */
2451 + { .accelerometer = 1, .light = 1, .temperature_set = 22 },
2452 ++/* MacBook Air 3,1: accelerometer, backlight and temperature set 23 */
2453 ++ { .accelerometer = 0, .light = 0, .temperature_set = 23 },
2454 + };
2455 +
2456 + /* Note that DMI_MATCH(...,"MacBook") will match "MacBookPro1,1".
2457 + * So we need to put "Apple MacBook Pro" before "Apple MacBook". */
2458 + static __initdata struct dmi_system_id applesmc_whitelist[] = {
2459 ++ { applesmc_dmi_match, "Apple MacBook Air 3", {
2460 ++ DMI_MATCH(DMI_BOARD_VENDOR, "Apple"),
2461 ++ DMI_MATCH(DMI_PRODUCT_NAME, "MacBookAir3") },
2462 ++ &applesmc_dmi_data[23]},
2463 + { applesmc_dmi_match, "Apple MacBook Air 2", {
2464 + DMI_MATCH(DMI_BOARD_VENDOR, "Apple"),
2465 + DMI_MATCH(DMI_PRODUCT_NAME, "MacBookAir2") },
2466 +diff --git a/drivers/hwmon/lm63.c b/drivers/hwmon/lm63.c
2467 +index 776aeb3..508cb29 100644
2468 +--- a/drivers/hwmon/lm63.c
2469 ++++ b/drivers/hwmon/lm63.c
2470 +@@ -98,6 +98,9 @@ static const unsigned short normal_i2c[] = { 0x18, 0x4c, 0x4e, I2C_CLIENT_END };
2471 + * value, it uses signed 8-bit values with LSB = 1 degree Celsius.
2472 + * For remote temperature, low and high limits, it uses signed 11-bit values
2473 + * with LSB = 0.125 degree Celsius, left-justified in 16-bit registers.
2474 ++ * For LM64 the actual remote diode temperature is 16 degree Celsius higher
2475 ++ * than the register reading. Remote temperature setpoints have to be
2476 ++ * adapted accordingly.
2477 + */
2478 +
2479 + #define FAN_FROM_REG(reg) ((reg) == 0xFFFC || (reg) == 0 ? 0 : \
2480 +@@ -165,6 +168,8 @@ struct lm63_data {
2481 + struct mutex update_lock;
2482 + char valid; /* zero until following fields are valid */
2483 + unsigned long last_updated; /* in jiffies */
2484 ++ int kind;
2485 ++ int temp2_offset;
2486 +
2487 + /* registers values */
2488 + u8 config, config_fan;
2489 +@@ -247,16 +252,34 @@ static ssize_t show_pwm1_enable(struct device *dev, struct device_attribute *dum
2490 + return sprintf(buf, "%d\n", data->config_fan & 0x20 ? 1 : 2);
2491 + }
2492 +
2493 +-static ssize_t show_temp8(struct device *dev, struct device_attribute *devattr,
2494 +- char *buf)
2495 ++/*
2496 ++ * There are 8bit registers for both local(temp1) and remote(temp2) sensor.
2497 ++ * For remote sensor registers temp2_offset has to be considered,
2498 ++ * for local sensor it must not.
2499 ++ * So we need separate 8bit accessors for local and remote sensor.
2500 ++ */
2501 ++static ssize_t show_local_temp8(struct device *dev,
2502 ++ struct device_attribute *devattr,
2503 ++ char *buf)
2504 + {
2505 + struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
2506 + struct lm63_data *data = lm63_update_device(dev);
2507 + return sprintf(buf, "%d\n", TEMP8_FROM_REG(data->temp8[attr->index]));
2508 + }
2509 +
2510 +-static ssize_t set_temp8(struct device *dev, struct device_attribute *dummy,
2511 +- const char *buf, size_t count)
2512 ++static ssize_t show_remote_temp8(struct device *dev,
2513 ++ struct device_attribute *devattr,
2514 ++ char *buf)
2515 ++{
2516 ++ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
2517 ++ struct lm63_data *data = lm63_update_device(dev);
2518 ++ return sprintf(buf, "%d\n", TEMP8_FROM_REG(data->temp8[attr->index])
2519 ++ + data->temp2_offset);
2520 ++}
2521 ++
2522 ++static ssize_t set_local_temp8(struct device *dev,
2523 ++ struct device_attribute *dummy,
2524 ++ const char *buf, size_t count)
2525 + {
2526 + struct i2c_client *client = to_i2c_client(dev);
2527 + struct lm63_data *data = i2c_get_clientdata(client);
2528 +@@ -274,7 +297,8 @@ static ssize_t show_temp11(struct device *dev, struct device_attribute *devattr,
2529 + {
2530 + struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
2531 + struct lm63_data *data = lm63_update_device(dev);
2532 +- return sprintf(buf, "%d\n", TEMP11_FROM_REG(data->temp11[attr->index]));
2533 ++ return sprintf(buf, "%d\n", TEMP11_FROM_REG(data->temp11[attr->index])
2534 ++ + data->temp2_offset);
2535 + }
2536 +
2537 + static ssize_t set_temp11(struct device *dev, struct device_attribute *devattr,
2538 +@@ -294,7 +318,7 @@ static ssize_t set_temp11(struct device *dev, struct device_attribute *devattr,
2539 + int nr = attr->index;
2540 +
2541 + mutex_lock(&data->update_lock);
2542 +- data->temp11[nr] = TEMP11_TO_REG(val);
2543 ++ data->temp11[nr] = TEMP11_TO_REG(val - data->temp2_offset);
2544 + i2c_smbus_write_byte_data(client, reg[(nr - 1) * 2],
2545 + data->temp11[nr] >> 8);
2546 + i2c_smbus_write_byte_data(client, reg[(nr - 1) * 2 + 1],
2547 +@@ -310,6 +334,7 @@ static ssize_t show_temp2_crit_hyst(struct device *dev, struct device_attribute
2548 + {
2549 + struct lm63_data *data = lm63_update_device(dev);
2550 + return sprintf(buf, "%d\n", TEMP8_FROM_REG(data->temp8[2])
2551 ++ + data->temp2_offset
2552 + - TEMP8_FROM_REG(data->temp2_crit_hyst));
2553 + }
2554 +
2555 +@@ -324,7 +349,7 @@ static ssize_t set_temp2_crit_hyst(struct device *dev, struct device_attribute *
2556 + long hyst;
2557 +
2558 + mutex_lock(&data->update_lock);
2559 +- hyst = TEMP8_FROM_REG(data->temp8[2]) - val;
2560 ++ hyst = TEMP8_FROM_REG(data->temp8[2]) + data->temp2_offset - val;
2561 + i2c_smbus_write_byte_data(client, LM63_REG_REMOTE_TCRIT_HYST,
2562 + HYST_TO_REG(hyst));
2563 + mutex_unlock(&data->update_lock);
2564 +@@ -355,16 +380,21 @@ static SENSOR_DEVICE_ATTR(fan1_min, S_IWUSR | S_IRUGO, show_fan,
2565 + static DEVICE_ATTR(pwm1, S_IWUSR | S_IRUGO, show_pwm1, set_pwm1);
2566 + static DEVICE_ATTR(pwm1_enable, S_IRUGO, show_pwm1_enable, NULL);
2567 +
2568 +-static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, show_temp8, NULL, 0);
2569 +-static SENSOR_DEVICE_ATTR(temp1_max, S_IWUSR | S_IRUGO, show_temp8,
2570 +- set_temp8, 1);
2571 ++static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, show_local_temp8, NULL, 0);
2572 ++static SENSOR_DEVICE_ATTR(temp1_max, S_IWUSR | S_IRUGO, show_local_temp8,
2573 ++ set_local_temp8, 1);
2574 +
2575 + static SENSOR_DEVICE_ATTR(temp2_input, S_IRUGO, show_temp11, NULL, 0);
2576 + static SENSOR_DEVICE_ATTR(temp2_min, S_IWUSR | S_IRUGO, show_temp11,
2577 + set_temp11, 1);
2578 + static SENSOR_DEVICE_ATTR(temp2_max, S_IWUSR | S_IRUGO, show_temp11,
2579 + set_temp11, 2);
2580 +-static SENSOR_DEVICE_ATTR(temp2_crit, S_IRUGO, show_temp8, NULL, 2);
2581 ++/*
2582 ++ * On LM63, temp2_crit can be set only once, which should be job
2583 ++ * of the bootloader.
2584 ++ */
2585 ++static SENSOR_DEVICE_ATTR(temp2_crit, S_IRUGO, show_remote_temp8,
2586 ++ NULL, 2);
2587 + static DEVICE_ATTR(temp2_crit_hyst, S_IWUSR | S_IRUGO, show_temp2_crit_hyst,
2588 + set_temp2_crit_hyst);
2589 +
2590 +@@ -479,7 +509,12 @@ static int lm63_probe(struct i2c_client *new_client,
2591 + data->valid = 0;
2592 + mutex_init(&data->update_lock);
2593 +
2594 +- /* Initialize the LM63 chip */
2595 ++ /* Set the device type */
2596 ++ data->kind = id->driver_data;
2597 ++ if (data->kind == lm64)
2598 ++ data->temp2_offset = 16000;
2599 ++
2600 ++ /* Initialize chip */
2601 + lm63_init_client(new_client);
2602 +
2603 + /* Register sysfs hooks */
2604 +diff --git a/drivers/hwmon/via686a.c b/drivers/hwmon/via686a.c
2605 +index f397ce7..b2074e3 100644
2606 +--- a/drivers/hwmon/via686a.c
2607 ++++ b/drivers/hwmon/via686a.c
2608 +@@ -687,6 +687,13 @@ static int __devexit via686a_remove(struct platform_device *pdev)
2609 + return 0;
2610 + }
2611 +
2612 ++static void via686a_update_fan_div(struct via686a_data *data)
2613 ++{
2614 ++ int reg = via686a_read_value(data, VIA686A_REG_FANDIV);
2615 ++ data->fan_div[0] = (reg >> 4) & 0x03;
2616 ++ data->fan_div[1] = reg >> 6;
2617 ++}
2618 ++
2619 + static void __devinit via686a_init_device(struct via686a_data *data)
2620 + {
2621 + u8 reg;
2622 +@@ -700,6 +707,9 @@ static void __devinit via686a_init_device(struct via686a_data *data)
2623 + via686a_write_value(data, VIA686A_REG_TEMP_MODE,
2624 + (reg & ~VIA686A_TEMP_MODE_MASK)
2625 + | VIA686A_TEMP_MODE_CONTINUOUS);
2626 ++
2627 ++ /* Pre-read fan clock divisor values */
2628 ++ via686a_update_fan_div(data);
2629 + }
2630 +
2631 + static struct via686a_data *via686a_update_device(struct device *dev)
2632 +@@ -751,9 +761,7 @@ static struct via686a_data *via686a_update_device(struct device *dev)
2633 + (via686a_read_value(data, VIA686A_REG_TEMP_LOW23) &
2634 + 0xc0) >> 6;
2635 +
2636 +- i = via686a_read_value(data, VIA686A_REG_FANDIV);
2637 +- data->fan_div[0] = (i >> 4) & 0x03;
2638 +- data->fan_div[1] = i >> 6;
2639 ++ via686a_update_fan_div(data);
2640 + data->alarms =
2641 + via686a_read_value(data,
2642 + VIA686A_REG_ALARM1) |
2643 +diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c
2644 +index 6b4cc56..44bb9c2 100644
2645 +--- a/drivers/i2c/i2c-core.c
2646 ++++ b/drivers/i2c/i2c-core.c
2647 +@@ -1021,6 +1021,14 @@ static int i2c_do_del_adapter(struct i2c_driver *driver,
2648 + static int __unregister_client(struct device *dev, void *dummy)
2649 + {
2650 + struct i2c_client *client = i2c_verify_client(dev);
2651 ++ if (client && strcmp(client->name, "dummy"))
2652 ++ i2c_unregister_device(client);
2653 ++ return 0;
2654 ++}
2655 ++
2656 ++static int __unregister_dummy(struct device *dev, void *dummy)
2657 ++{
2658 ++ struct i2c_client *client = i2c_verify_client(dev);
2659 + if (client)
2660 + i2c_unregister_device(client);
2661 + return 0;
2662 +@@ -1075,8 +1083,12 @@ int i2c_del_adapter(struct i2c_adapter *adap)
2663 + mutex_unlock(&adap->userspace_clients_lock);
2664 +
2665 + /* Detach any active clients. This can't fail, thus we do not
2666 +- checking the returned value. */
2667 ++ * check the returned value. This is a two-pass process, because
2668 ++ * we can't remove the dummy devices during the first pass: they
2669 ++ * could have been instantiated by real devices wishing to clean
2670 ++ * them up properly, so we give them a chance to do that first. */
2671 + res = device_for_each_child(&adap->dev, NULL, __unregister_client);
2672 ++ res = device_for_each_child(&adap->dev, NULL, __unregister_dummy);
2673 +
2674 + #ifdef CONFIG_I2C_COMPAT
2675 + class_compat_remove_link(i2c_adapter_compat_class, &adap->dev,
2676 +diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c
2677 +index c131d58..a1e141e 100644
2678 +--- a/drivers/idle/intel_idle.c
2679 ++++ b/drivers/idle/intel_idle.c
2680 +@@ -59,6 +59,8 @@
2681 + #include <linux/hrtimer.h> /* ktime_get_real() */
2682 + #include <trace/events/power.h>
2683 + #include <linux/sched.h>
2684 ++#include <linux/notifier.h>
2685 ++#include <linux/cpu.h>
2686 + #include <asm/mwait.h>
2687 +
2688 + #define INTEL_IDLE_VERSION "0.4"
2689 +@@ -73,6 +75,7 @@ static int max_cstate = MWAIT_MAX_NUM_CSTATES - 1;
2690 +
2691 + static unsigned int mwait_substates;
2692 +
2693 ++#define LAPIC_TIMER_ALWAYS_RELIABLE 0xFFFFFFFF
2694 + /* Reliable LAPIC Timer States, bit 1 for C1 etc. */
2695 + static unsigned int lapic_timer_reliable_states = (1 << 1); /* Default to only C1 */
2696 +
2697 +@@ -244,6 +247,35 @@ static int intel_idle(struct cpuidle_device *dev, struct cpuidle_state *state)
2698 + return usec_delta;
2699 + }
2700 +
2701 ++static void __setup_broadcast_timer(void *arg)
2702 ++{
2703 ++ unsigned long reason = (unsigned long)arg;
2704 ++ int cpu = smp_processor_id();
2705 ++
2706 ++ reason = reason ?
2707 ++ CLOCK_EVT_NOTIFY_BROADCAST_ON : CLOCK_EVT_NOTIFY_BROADCAST_OFF;
2708 ++
2709 ++ clockevents_notify(reason, &cpu);
2710 ++}
2711 ++
2712 ++static int setup_broadcast_cpuhp_notify(struct notifier_block *n,
2713 ++ unsigned long action, void *hcpu)
2714 ++{
2715 ++ int hotcpu = (unsigned long)hcpu;
2716 ++
2717 ++ switch (action & 0xf) {
2718 ++ case CPU_ONLINE:
2719 ++ smp_call_function_single(hotcpu, __setup_broadcast_timer,
2720 ++ (void *)true, 1);
2721 ++ break;
2722 ++ }
2723 ++ return NOTIFY_OK;
2724 ++}
2725 ++
2726 ++static struct notifier_block setup_broadcast_notifier = {
2727 ++ .notifier_call = setup_broadcast_cpuhp_notify,
2728 ++};
2729 ++
2730 + /*
2731 + * intel_idle_probe()
2732 + */
2733 +@@ -306,7 +338,11 @@ static int intel_idle_probe(void)
2734 + }
2735 +
2736 + if (boot_cpu_has(X86_FEATURE_ARAT)) /* Always Reliable APIC Timer */
2737 +- lapic_timer_reliable_states = 0xFFFFFFFF;
2738 ++ lapic_timer_reliable_states = LAPIC_TIMER_ALWAYS_RELIABLE;
2739 ++ else {
2740 ++ smp_call_function(__setup_broadcast_timer, (void *)true, 1);
2741 ++ register_cpu_notifier(&setup_broadcast_notifier);
2742 ++ }
2743 +
2744 + pr_debug(PREFIX "v" INTEL_IDLE_VERSION
2745 + " model 0x%X\n", boot_cpu_data.x86_model);
2746 +@@ -429,6 +465,11 @@ static void __exit intel_idle_exit(void)
2747 + intel_idle_cpuidle_devices_uninit();
2748 + cpuidle_unregister_driver(&intel_idle_driver);
2749 +
2750 ++ if (lapic_timer_reliable_states != LAPIC_TIMER_ALWAYS_RELIABLE) {
2751 ++ smp_call_function(__setup_broadcast_timer, (void *)false, 1);
2752 ++ unregister_cpu_notifier(&setup_broadcast_notifier);
2753 ++ }
2754 ++
2755 + return;
2756 + }
2757 +
2758 +diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
2759 +index 0dc62b1..8b00e6c 100644
2760 +--- a/drivers/infiniband/hw/cxgb4/cm.c
2761 ++++ b/drivers/infiniband/hw/cxgb4/cm.c
2762 +@@ -380,7 +380,7 @@ static void send_flowc(struct c4iw_ep *ep, struct sk_buff *skb)
2763 + 16)) | FW_WR_FLOWID(ep->hwtid));
2764 +
2765 + flowc->mnemval[0].mnemonic = FW_FLOWC_MNEM_PFNVFN;
2766 +- flowc->mnemval[0].val = cpu_to_be32(0);
2767 ++ flowc->mnemval[0].val = cpu_to_be32(PCI_FUNC(ep->com.dev->rdev.lldi.pdev->devfn) << 8);
2768 + flowc->mnemval[1].mnemonic = FW_FLOWC_MNEM_CH;
2769 + flowc->mnemval[1].val = cpu_to_be32(ep->tx_chan);
2770 + flowc->mnemval[2].mnemonic = FW_FLOWC_MNEM_PORT;
2771 +diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c
2772 +index 057cb25..8ae09e7 100644
2773 +--- a/drivers/infiniband/hw/cxgb4/qp.c
2774 ++++ b/drivers/infiniband/hw/cxgb4/qp.c
2775 +@@ -220,7 +220,7 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
2776 + V_FW_RI_RES_WR_DCAEN(0) |
2777 + V_FW_RI_RES_WR_DCACPU(0) |
2778 + V_FW_RI_RES_WR_FBMIN(2) |
2779 +- V_FW_RI_RES_WR_FBMAX(3) |
2780 ++ V_FW_RI_RES_WR_FBMAX(2) |
2781 + V_FW_RI_RES_WR_CIDXFTHRESHO(0) |
2782 + V_FW_RI_RES_WR_CIDXFTHRESH(0) |
2783 + V_FW_RI_RES_WR_EQSIZE(eqsize));
2784 +@@ -243,7 +243,7 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
2785 + V_FW_RI_RES_WR_DCAEN(0) |
2786 + V_FW_RI_RES_WR_DCACPU(0) |
2787 + V_FW_RI_RES_WR_FBMIN(2) |
2788 +- V_FW_RI_RES_WR_FBMAX(3) |
2789 ++ V_FW_RI_RES_WR_FBMAX(2) |
2790 + V_FW_RI_RES_WR_CIDXFTHRESHO(0) |
2791 + V_FW_RI_RES_WR_CIDXFTHRESH(0) |
2792 + V_FW_RI_RES_WR_EQSIZE(eqsize));
2793 +@@ -1029,7 +1029,6 @@ static int rdma_fini(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
2794 + wqe->cookie = (unsigned long) &ep->com.wr_wait;
2795 +
2796 + wqe->u.fini.type = FW_RI_TYPE_FINI;
2797 +- c4iw_init_wr_wait(&ep->com.wr_wait);
2798 + ret = c4iw_ofld_send(&rhp->rdev, skb);
2799 + if (ret)
2800 + goto out;
2801 +@@ -1125,7 +1124,6 @@ static int rdma_init(struct c4iw_dev *rhp, struct c4iw_qp *qhp)
2802 + if (qhp->attr.mpa_attr.initiator)
2803 + build_rtr_msg(qhp->attr.mpa_attr.p2p_type, &wqe->u.init);
2804 +
2805 +- c4iw_init_wr_wait(&qhp->ep->com.wr_wait);
2806 + ret = c4iw_ofld_send(&rhp->rdev, skb);
2807 + if (ret)
2808 + goto out;
2809 +diff --git a/drivers/input/input.c b/drivers/input/input.c
2810 +index db409d6..e8a8802 100644
2811 +--- a/drivers/input/input.c
2812 ++++ b/drivers/input/input.c
2813 +@@ -73,7 +73,6 @@ static int input_defuzz_abs_event(int value, int old_val, int fuzz)
2814 + * dev->event_lock held and interrupts disabled.
2815 + */
2816 + static void input_pass_event(struct input_dev *dev,
2817 +- struct input_handler *src_handler,
2818 + unsigned int type, unsigned int code, int value)
2819 + {
2820 + struct input_handler *handler;
2821 +@@ -92,15 +91,6 @@ static void input_pass_event(struct input_dev *dev,
2822 + continue;
2823 +
2824 + handler = handle->handler;
2825 +-
2826 +- /*
2827 +- * If this is the handler that injected this
2828 +- * particular event we want to skip it to avoid
2829 +- * filters firing again and again.
2830 +- */
2831 +- if (handler == src_handler)
2832 +- continue;
2833 +-
2834 + if (!handler->filter) {
2835 + if (filtered)
2836 + break;
2837 +@@ -130,7 +120,7 @@ static void input_repeat_key(unsigned long data)
2838 + if (test_bit(dev->repeat_key, dev->key) &&
2839 + is_event_supported(dev->repeat_key, dev->keybit, KEY_MAX)) {
2840 +
2841 +- input_pass_event(dev, NULL, EV_KEY, dev->repeat_key, 2);
2842 ++ input_pass_event(dev, EV_KEY, dev->repeat_key, 2);
2843 +
2844 + if (dev->sync) {
2845 + /*
2846 +@@ -139,7 +129,7 @@ static void input_repeat_key(unsigned long data)
2847 + * Otherwise assume that the driver will send
2848 + * SYN_REPORT once it's done.
2849 + */
2850 +- input_pass_event(dev, NULL, EV_SYN, SYN_REPORT, 1);
2851 ++ input_pass_event(dev, EV_SYN, SYN_REPORT, 1);
2852 + }
2853 +
2854 + if (dev->rep[REP_PERIOD])
2855 +@@ -172,7 +162,6 @@ static void input_stop_autorepeat(struct input_dev *dev)
2856 + #define INPUT_PASS_TO_ALL (INPUT_PASS_TO_HANDLERS | INPUT_PASS_TO_DEVICE)
2857 +
2858 + static int input_handle_abs_event(struct input_dev *dev,
2859 +- struct input_handler *src_handler,
2860 + unsigned int code, int *pval)
2861 + {
2862 + bool is_mt_event;
2863 +@@ -216,15 +205,13 @@ static int input_handle_abs_event(struct input_dev *dev,
2864 + /* Flush pending "slot" event */
2865 + if (is_mt_event && dev->slot != input_abs_get_val(dev, ABS_MT_SLOT)) {
2866 + input_abs_set_val(dev, ABS_MT_SLOT, dev->slot);
2867 +- input_pass_event(dev, src_handler,
2868 +- EV_ABS, ABS_MT_SLOT, dev->slot);
2869 ++ input_pass_event(dev, EV_ABS, ABS_MT_SLOT, dev->slot);
2870 + }
2871 +
2872 + return INPUT_PASS_TO_HANDLERS;
2873 + }
2874 +
2875 + static void input_handle_event(struct input_dev *dev,
2876 +- struct input_handler *src_handler,
2877 + unsigned int type, unsigned int code, int value)
2878 + {
2879 + int disposition = INPUT_IGNORE_EVENT;
2880 +@@ -277,8 +264,7 @@ static void input_handle_event(struct input_dev *dev,
2881 +
2882 + case EV_ABS:
2883 + if (is_event_supported(code, dev->absbit, ABS_MAX))
2884 +- disposition = input_handle_abs_event(dev, src_handler,
2885 +- code, &value);
2886 ++ disposition = input_handle_abs_event(dev, code, &value);
2887 +
2888 + break;
2889 +
2890 +@@ -336,7 +322,7 @@ static void input_handle_event(struct input_dev *dev,
2891 + dev->event(dev, type, code, value);
2892 +
2893 + if (disposition & INPUT_PASS_TO_HANDLERS)
2894 +- input_pass_event(dev, src_handler, type, code, value);
2895 ++ input_pass_event(dev, type, code, value);
2896 + }
2897 +
2898 + /**
2899 +@@ -365,7 +351,7 @@ void input_event(struct input_dev *dev,
2900 +
2901 + spin_lock_irqsave(&dev->event_lock, flags);
2902 + add_input_randomness(type, code, value);
2903 +- input_handle_event(dev, NULL, type, code, value);
2904 ++ input_handle_event(dev, type, code, value);
2905 + spin_unlock_irqrestore(&dev->event_lock, flags);
2906 + }
2907 + }
2908 +@@ -395,8 +381,7 @@ void input_inject_event(struct input_handle *handle,
2909 + rcu_read_lock();
2910 + grab = rcu_dereference(dev->grab);
2911 + if (!grab || grab == handle)
2912 +- input_handle_event(dev, handle->handler,
2913 +- type, code, value);
2914 ++ input_handle_event(dev, type, code, value);
2915 + rcu_read_unlock();
2916 +
2917 + spin_unlock_irqrestore(&dev->event_lock, flags);
2918 +@@ -609,10 +594,10 @@ static void input_dev_release_keys(struct input_dev *dev)
2919 + for (code = 0; code <= KEY_MAX; code++) {
2920 + if (is_event_supported(code, dev->keybit, KEY_MAX) &&
2921 + __test_and_clear_bit(code, dev->key)) {
2922 +- input_pass_event(dev, NULL, EV_KEY, code, 0);
2923 ++ input_pass_event(dev, EV_KEY, code, 0);
2924 + }
2925 + }
2926 +- input_pass_event(dev, NULL, EV_SYN, SYN_REPORT, 1);
2927 ++ input_pass_event(dev, EV_SYN, SYN_REPORT, 1);
2928 + }
2929 + }
2930 +
2931 +@@ -887,9 +872,9 @@ int input_set_keycode(struct input_dev *dev,
2932 + !is_event_supported(old_keycode, dev->keybit, KEY_MAX) &&
2933 + __test_and_clear_bit(old_keycode, dev->key)) {
2934 +
2935 +- input_pass_event(dev, NULL, EV_KEY, old_keycode, 0);
2936 ++ input_pass_event(dev, EV_KEY, old_keycode, 0);
2937 + if (dev->sync)
2938 +- input_pass_event(dev, NULL, EV_SYN, SYN_REPORT, 1);
2939 ++ input_pass_event(dev, EV_SYN, SYN_REPORT, 1);
2940 + }
2941 +
2942 + out:
2943 +diff --git a/drivers/input/mouse/bcm5974.c b/drivers/input/mouse/bcm5974.c
2944 +index b952317..ee82851 100644
2945 +--- a/drivers/input/mouse/bcm5974.c
2946 ++++ b/drivers/input/mouse/bcm5974.c
2947 +@@ -55,6 +55,14 @@
2948 + #define USB_DEVICE_ID_APPLE_WELLSPRING3_ANSI 0x0236
2949 + #define USB_DEVICE_ID_APPLE_WELLSPRING3_ISO 0x0237
2950 + #define USB_DEVICE_ID_APPLE_WELLSPRING3_JIS 0x0238
2951 ++/* MacbookAir3,2 (unibody), aka wellspring5 */
2952 ++#define USB_DEVICE_ID_APPLE_WELLSPRING4_ANSI 0x023f
2953 ++#define USB_DEVICE_ID_APPLE_WELLSPRING4_ISO 0x0240
2954 ++#define USB_DEVICE_ID_APPLE_WELLSPRING4_JIS 0x0241
2955 ++/* MacbookAir3,1 (unibody), aka wellspring4 */
2956 ++#define USB_DEVICE_ID_APPLE_WELLSPRING4A_ANSI 0x0242
2957 ++#define USB_DEVICE_ID_APPLE_WELLSPRING4A_ISO 0x0243
2958 ++#define USB_DEVICE_ID_APPLE_WELLSPRING4A_JIS 0x0244
2959 +
2960 + #define BCM5974_DEVICE(prod) { \
2961 + .match_flags = (USB_DEVICE_ID_MATCH_DEVICE | \
2962 +@@ -80,6 +88,14 @@ static const struct usb_device_id bcm5974_table[] = {
2963 + BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING3_ANSI),
2964 + BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING3_ISO),
2965 + BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING3_JIS),
2966 ++ /* MacbookAir3,2 */
2967 ++ BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING4_ANSI),
2968 ++ BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING4_ISO),
2969 ++ BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING4_JIS),
2970 ++ /* MacbookAir3,1 */
2971 ++ BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING4A_ANSI),
2972 ++ BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING4A_ISO),
2973 ++ BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING4A_JIS),
2974 + /* Terminating entry */
2975 + {}
2976 + };
2977 +@@ -234,6 +250,30 @@ static const struct bcm5974_config bcm5974_config_table[] = {
2978 + { DIM_X, DIM_X / SN_COORD, -4460, 5166 },
2979 + { DIM_Y, DIM_Y / SN_COORD, -75, 6700 }
2980 + },
2981 ++ {
2982 ++ USB_DEVICE_ID_APPLE_WELLSPRING4_ANSI,
2983 ++ USB_DEVICE_ID_APPLE_WELLSPRING4_ISO,
2984 ++ USB_DEVICE_ID_APPLE_WELLSPRING4_JIS,
2985 ++ HAS_INTEGRATED_BUTTON,
2986 ++ 0x84, sizeof(struct bt_data),
2987 ++ 0x81, TYPE2, FINGER_TYPE2, FINGER_TYPE2 + SIZEOF_ALL_FINGERS,
2988 ++ { DIM_PRESSURE, DIM_PRESSURE / SN_PRESSURE, 0, 300 },
2989 ++ { DIM_WIDTH, DIM_WIDTH / SN_WIDTH, 0, 2048 },
2990 ++ { DIM_X, DIM_X / SN_COORD, -4620, 5140 },
2991 ++ { DIM_Y, DIM_Y / SN_COORD, -150, 6600 }
2992 ++ },
2993 ++ {
2994 ++ USB_DEVICE_ID_APPLE_WELLSPRING4A_ANSI,
2995 ++ USB_DEVICE_ID_APPLE_WELLSPRING4A_ISO,
2996 ++ USB_DEVICE_ID_APPLE_WELLSPRING4A_JIS,
2997 ++ HAS_INTEGRATED_BUTTON,
2998 ++ 0x84, sizeof(struct bt_data),
2999 ++ 0x81, TYPE2, FINGER_TYPE2, FINGER_TYPE2 + SIZEOF_ALL_FINGERS,
3000 ++ { DIM_PRESSURE, DIM_PRESSURE / SN_PRESSURE, 0, 300 },
3001 ++ { DIM_WIDTH, DIM_WIDTH / SN_WIDTH, 0, 2048 },
3002 ++ { DIM_X, DIM_X / SN_COORD, -4616, 5112 },
3003 ++ { DIM_Y, DIM_Y / SN_COORD, -142, 5234 }
3004 ++ },
3005 + {}
3006 + };
3007 +
3008 +diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h
3009 +index a5475b5..b04dd27 100644
3010 +--- a/drivers/input/serio/i8042-x86ia64io.h
3011 ++++ b/drivers/input/serio/i8042-x86ia64io.h
3012 +@@ -424,6 +424,13 @@ static const struct dmi_system_id __initconst i8042_dmi_nomux_table[] = {
3013 + DMI_MATCH(DMI_PRODUCT_VERSION, "0100"),
3014 + },
3015 + },
3016 ++ {
3017 ++ /* Dell Vostro V13 */
3018 ++ .matches = {
3019 ++ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
3020 ++ DMI_MATCH(DMI_PRODUCT_NAME, "Vostro V13"),
3021 ++ },
3022 ++ },
3023 + { }
3024 + };
3025 +
3026 +@@ -545,6 +552,17 @@ static const struct dmi_system_id __initconst i8042_dmi_laptop_table[] = {
3027 + };
3028 + #endif
3029 +
3030 ++static const struct dmi_system_id __initconst i8042_dmi_notimeout_table[] = {
3031 ++ {
3032 ++ /* Dell Vostro V13 */
3033 ++ .matches = {
3034 ++ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
3035 ++ DMI_MATCH(DMI_PRODUCT_NAME, "Vostro V13"),
3036 ++ },
3037 ++ },
3038 ++ { }
3039 ++};
3040 ++
3041 + /*
3042 + * Some Wistron based laptops need us to explicitly enable the 'Dritek
3043 + * keyboard extension' to make their extra keys start generating scancodes.
3044 +@@ -897,6 +915,9 @@ static int __init i8042_platform_init(void)
3045 + if (dmi_check_system(i8042_dmi_nomux_table))
3046 + i8042_nomux = true;
3047 +
3048 ++ if (dmi_check_system(i8042_dmi_notimeout_table))
3049 ++ i8042_notimeout = true;
3050 ++
3051 + if (dmi_check_system(i8042_dmi_dritek_table))
3052 + i8042_dritek = true;
3053 + #endif /* CONFIG_X86 */
3054 +diff --git a/drivers/input/serio/i8042.c b/drivers/input/serio/i8042.c
3055 +index 18db5a8..039037d 100644
3056 +--- a/drivers/input/serio/i8042.c
3057 ++++ b/drivers/input/serio/i8042.c
3058 +@@ -61,6 +61,10 @@ static bool i8042_noloop;
3059 + module_param_named(noloop, i8042_noloop, bool, 0);
3060 + MODULE_PARM_DESC(noloop, "Disable the AUX Loopback command while probing for the AUX port");
3061 +
3062 ++static bool i8042_notimeout;
3063 ++module_param_named(notimeout, i8042_notimeout, bool, 0);
3064 ++MODULE_PARM_DESC(notimeout, "Ignore timeouts signalled by i8042");
3065 ++
3066 + #ifdef CONFIG_X86
3067 + static bool i8042_dritek;
3068 + module_param_named(dritek, i8042_dritek, bool, 0);
3069 +@@ -503,7 +507,7 @@ static irqreturn_t i8042_interrupt(int irq, void *dev_id)
3070 + } else {
3071 +
3072 + dfl = ((str & I8042_STR_PARITY) ? SERIO_PARITY : 0) |
3073 +- ((str & I8042_STR_TIMEOUT) ? SERIO_TIMEOUT : 0);
3074 ++ ((str & I8042_STR_TIMEOUT && !i8042_notimeout) ? SERIO_TIMEOUT : 0);
3075 +
3076 + port_no = (str & I8042_STR_AUXDATA) ?
3077 + I8042_AUX_PORT_NO : I8042_KBD_PORT_NO;
3078 +diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
3079 +index 487ecda..406091f 100644
3080 +--- a/drivers/md/dm-mpath.c
3081 ++++ b/drivers/md/dm-mpath.c
3082 +@@ -33,7 +33,6 @@ struct pgpath {
3083 + unsigned fail_count; /* Cumulative failure count */
3084 +
3085 + struct dm_path path;
3086 +- struct work_struct deactivate_path;
3087 + struct work_struct activate_path;
3088 + };
3089 +
3090 +@@ -116,7 +115,6 @@ static struct workqueue_struct *kmultipathd, *kmpath_handlerd;
3091 + static void process_queued_ios(struct work_struct *work);
3092 + static void trigger_event(struct work_struct *work);
3093 + static void activate_path(struct work_struct *work);
3094 +-static void deactivate_path(struct work_struct *work);
3095 +
3096 +
3097 + /*-----------------------------------------------
3098 +@@ -129,7 +127,6 @@ static struct pgpath *alloc_pgpath(void)
3099 +
3100 + if (pgpath) {
3101 + pgpath->is_active = 1;
3102 +- INIT_WORK(&pgpath->deactivate_path, deactivate_path);
3103 + INIT_WORK(&pgpath->activate_path, activate_path);
3104 + }
3105 +
3106 +@@ -141,14 +138,6 @@ static void free_pgpath(struct pgpath *pgpath)
3107 + kfree(pgpath);
3108 + }
3109 +
3110 +-static void deactivate_path(struct work_struct *work)
3111 +-{
3112 +- struct pgpath *pgpath =
3113 +- container_of(work, struct pgpath, deactivate_path);
3114 +-
3115 +- blk_abort_queue(pgpath->path.dev->bdev->bd_disk->queue);
3116 +-}
3117 +-
3118 + static struct priority_group *alloc_priority_group(void)
3119 + {
3120 + struct priority_group *pg;
3121 +@@ -995,7 +984,6 @@ static int fail_path(struct pgpath *pgpath)
3122 + pgpath->path.dev->name, m->nr_valid_paths);
3123 +
3124 + schedule_work(&m->trigger_event);
3125 +- queue_work(kmultipathd, &pgpath->deactivate_path);
3126 +
3127 + out:
3128 + spin_unlock_irqrestore(&m->lock, flags);
3129 +diff --git a/drivers/md/dm.c b/drivers/md/dm.c
3130 +index 7cb1352..81cb2f5 100644
3131 +--- a/drivers/md/dm.c
3132 ++++ b/drivers/md/dm.c
3133 +@@ -1992,13 +1992,14 @@ static void event_callback(void *context)
3134 + wake_up(&md->eventq);
3135 + }
3136 +
3137 ++/*
3138 ++ * Protected by md->suspend_lock obtained by dm_swap_table().
3139 ++ */
3140 + static void __set_size(struct mapped_device *md, sector_t size)
3141 + {
3142 + set_capacity(md->disk, size);
3143 +
3144 +- mutex_lock(&md->bdev->bd_inode->i_mutex);
3145 + i_size_write(md->bdev->bd_inode, (loff_t)size << SECTOR_SHIFT);
3146 +- mutex_unlock(&md->bdev->bd_inode->i_mutex);
3147 + }
3148 +
3149 + /*
3150 +diff --git a/drivers/md/md.c b/drivers/md/md.c
3151 +index 175c424..0e5a483 100644
3152 +--- a/drivers/md/md.c
3153 ++++ b/drivers/md/md.c
3154 +@@ -287,11 +287,14 @@ static int md_make_request(struct request_queue *q, struct bio *bio)
3155 + mddev_t *mddev = q->queuedata;
3156 + int rv;
3157 + int cpu;
3158 ++ unsigned int sectors;
3159 +
3160 +- if (mddev == NULL || mddev->pers == NULL) {
3161 ++ if (mddev == NULL || mddev->pers == NULL
3162 ++ || !mddev->ready) {
3163 + bio_io_error(bio);
3164 + return 0;
3165 + }
3166 ++ smp_rmb(); /* Ensure implications of 'active' are visible */
3167 + rcu_read_lock();
3168 + if (mddev->suspended) {
3169 + DEFINE_WAIT(__wait);
3170 +@@ -309,12 +312,16 @@ static int md_make_request(struct request_queue *q, struct bio *bio)
3171 + atomic_inc(&mddev->active_io);
3172 + rcu_read_unlock();
3173 +
3174 ++ /*
3175 ++ * save the sectors now since our bio can
3176 ++ * go away inside make_request
3177 ++ */
3178 ++ sectors = bio_sectors(bio);
3179 + rv = mddev->pers->make_request(mddev, bio);
3180 +
3181 + cpu = part_stat_lock();
3182 + part_stat_inc(cpu, &mddev->gendisk->part0, ios[rw]);
3183 +- part_stat_add(cpu, &mddev->gendisk->part0, sectors[rw],
3184 +- bio_sectors(bio));
3185 ++ part_stat_add(cpu, &mddev->gendisk->part0, sectors[rw], sectors);
3186 + part_stat_unlock();
3187 +
3188 + if (atomic_dec_and_test(&mddev->active_io) && mddev->suspended)
3189 +@@ -3115,7 +3122,7 @@ level_store(mddev_t *mddev, const char *buf, size_t len)
3190 + char nm[20];
3191 + if (rdev->raid_disk < 0)
3192 + continue;
3193 +- if (rdev->new_raid_disk > mddev->raid_disks)
3194 ++ if (rdev->new_raid_disk >= mddev->raid_disks)
3195 + rdev->new_raid_disk = -1;
3196 + if (rdev->new_raid_disk == rdev->raid_disk)
3197 + continue;
3198 +@@ -4564,7 +4571,8 @@ int md_run(mddev_t *mddev)
3199 + mddev->safemode_timer.data = (unsigned long) mddev;
3200 + mddev->safemode_delay = (200 * HZ)/1000 +1; /* 200 msec delay */
3201 + mddev->in_sync = 1;
3202 +-
3203 ++ smp_wmb();
3204 ++ mddev->ready = 1;
3205 + list_for_each_entry(rdev, &mddev->disks, same_set)
3206 + if (rdev->raid_disk >= 0) {
3207 + char nm[20];
3208 +@@ -4725,6 +4733,7 @@ EXPORT_SYMBOL_GPL(md_stop_writes);
3209 +
3210 + void md_stop(mddev_t *mddev)
3211 + {
3212 ++ mddev->ready = 0;
3213 + mddev->pers->stop(mddev);
3214 + if (mddev->pers->sync_request && mddev->to_remove == NULL)
3215 + mddev->to_remove = &md_redundancy_group;
3216 +@@ -5159,9 +5168,10 @@ static int add_new_disk(mddev_t * mddev, mdu_disk_info_t *info)
3217 + /* set saved_raid_disk if appropriate */
3218 + if (!mddev->persistent) {
3219 + if (info->state & (1<<MD_DISK_SYNC) &&
3220 +- info->raid_disk < mddev->raid_disks)
3221 ++ info->raid_disk < mddev->raid_disks) {
3222 + rdev->raid_disk = info->raid_disk;
3223 +- else
3224 ++ set_bit(In_sync, &rdev->flags);
3225 ++ } else
3226 + rdev->raid_disk = -1;
3227 + } else
3228 + super_types[mddev->major_version].
3229 +@@ -6041,7 +6051,8 @@ static int md_thread(void * arg)
3230 + || kthread_should_stop(),
3231 + thread->timeout);
3232 +
3233 +- if (test_and_clear_bit(THREAD_WAKEUP, &thread->flags))
3234 ++ clear_bit(THREAD_WAKEUP, &thread->flags);
3235 ++ if (!kthread_should_stop())
3236 + thread->run(thread->mddev);
3237 + }
3238 +
3239 +diff --git a/drivers/md/md.h b/drivers/md/md.h
3240 +index d05bab5..a161283 100644
3241 +--- a/drivers/md/md.h
3242 ++++ b/drivers/md/md.h
3243 +@@ -148,7 +148,8 @@ struct mddev_s
3244 + * are happening, so run/
3245 + * takeover/stop are not safe
3246 + */
3247 +-
3248 ++ int ready; /* See when safe to pass
3249 ++ * IO requests down */
3250 + struct gendisk *gendisk;
3251 +
3252 + struct kobject kobj;
3253 +diff --git a/drivers/media/IR/ir-keytable.c b/drivers/media/IR/ir-keytable.c
3254 +index f60107c..c4645d7 100644
3255 +--- a/drivers/media/IR/ir-keytable.c
3256 ++++ b/drivers/media/IR/ir-keytable.c
3257 +@@ -374,21 +374,27 @@ static int ir_getkeycode(struct input_dev *dev,
3258 + index = ir_lookup_by_scancode(rc_tab, scancode);
3259 + }
3260 +
3261 +- if (index >= rc_tab->len) {
3262 +- if (!(ke->flags & INPUT_KEYMAP_BY_INDEX))
3263 +- IR_dprintk(1, "unknown key for scancode 0x%04x\n",
3264 +- scancode);
3265 ++ if (index < rc_tab->len) {
3266 ++ entry = &rc_tab->scan[index];
3267 ++
3268 ++ ke->index = index;
3269 ++ ke->keycode = entry->keycode;
3270 ++ ke->len = sizeof(entry->scancode);
3271 ++ memcpy(ke->scancode, &entry->scancode, sizeof(entry->scancode));
3272 ++
3273 ++ } else if (!(ke->flags & INPUT_KEYMAP_BY_INDEX)) {
3274 ++ /*
3275 ++ * We do not really know the valid range of scancodes
3276 ++ * so let's respond with KEY_RESERVED to anything we
3277 ++ * do not have mapping for [yet].
3278 ++ */
3279 ++ ke->index = index;
3280 ++ ke->keycode = KEY_RESERVED;
3281 ++ } else {
3282 + retval = -EINVAL;
3283 + goto out;
3284 + }
3285 +
3286 +- entry = &rc_tab->scan[index];
3287 +-
3288 +- ke->index = index;
3289 +- ke->keycode = entry->keycode;
3290 +- ke->len = sizeof(entry->scancode);
3291 +- memcpy(ke->scancode, &entry->scancode, sizeof(entry->scancode));
3292 +-
3293 + retval = 0;
3294 +
3295 + out:
3296 +diff --git a/drivers/media/dvb/frontends/ix2505v.c b/drivers/media/dvb/frontends/ix2505v.c
3297 +index 55f2eba..6360c68 100644
3298 +--- a/drivers/media/dvb/frontends/ix2505v.c
3299 ++++ b/drivers/media/dvb/frontends/ix2505v.c
3300 +@@ -72,7 +72,7 @@ static int ix2505v_read_status_reg(struct ix2505v_state *state)
3301 + ret = i2c_transfer(state->i2c, msg, 1);
3302 + deb_i2c("Read %s ", __func__);
3303 +
3304 +- return (ret = 1) ? (int) b2[0] : -1;
3305 ++ return (ret == 1) ? (int) b2[0] : -1;
3306 + }
3307 +
3308 + static int ix2505v_write(struct ix2505v_state *state, u8 buf[], u8 count)
3309 +diff --git a/drivers/media/radio/radio-aimslab.c b/drivers/media/radio/radio-aimslab.c
3310 +index 05e832f..6cc5d13 100644
3311 +--- a/drivers/media/radio/radio-aimslab.c
3312 ++++ b/drivers/media/radio/radio-aimslab.c
3313 +@@ -31,7 +31,6 @@
3314 + #include <linux/module.h> /* Modules */
3315 + #include <linux/init.h> /* Initdata */
3316 + #include <linux/ioport.h> /* request_region */
3317 +-#include <linux/delay.h> /* udelay */
3318 + #include <linux/videodev2.h> /* kernel radio structs */
3319 + #include <linux/version.h> /* for KERNEL_VERSION MACRO */
3320 + #include <linux/io.h> /* outb, outb_p */
3321 +@@ -71,27 +70,17 @@ static struct rtrack rtrack_card;
3322 +
3323 + /* local things */
3324 +
3325 +-static void sleep_delay(long n)
3326 +-{
3327 +- /* Sleep nicely for 'n' uS */
3328 +- int d = n / msecs_to_jiffies(1000);
3329 +- if (!d)
3330 +- udelay(n);
3331 +- else
3332 +- msleep(jiffies_to_msecs(d));
3333 +-}
3334 +-
3335 + static void rt_decvol(struct rtrack *rt)
3336 + {
3337 + outb(0x58, rt->io); /* volume down + sigstr + on */
3338 +- sleep_delay(100000);
3339 ++ msleep(100);
3340 + outb(0xd8, rt->io); /* volume steady + sigstr + on */
3341 + }
3342 +
3343 + static void rt_incvol(struct rtrack *rt)
3344 + {
3345 + outb(0x98, rt->io); /* volume up + sigstr + on */
3346 +- sleep_delay(100000);
3347 ++ msleep(100);
3348 + outb(0xd8, rt->io); /* volume steady + sigstr + on */
3349 + }
3350 +
3351 +@@ -120,7 +109,7 @@ static int rt_setvol(struct rtrack *rt, int vol)
3352 +
3353 + if (vol == 0) { /* volume = 0 means mute the card */
3354 + outb(0x48, rt->io); /* volume down but still "on" */
3355 +- sleep_delay(2000000); /* make sure it's totally down */
3356 ++ msleep(2000); /* make sure it's totally down */
3357 + outb(0xd0, rt->io); /* volume steady, off */
3358 + rt->curvol = 0; /* track the volume state! */
3359 + mutex_unlock(&rt->lock);
3360 +@@ -155,7 +144,7 @@ static void send_0_byte(struct rtrack *rt)
3361 + outb_p(128+64+16+8+ 1, rt->io); /* on + wr-enable + data low */
3362 + outb_p(128+64+16+8+2+1, rt->io); /* clock */
3363 + }
3364 +- sleep_delay(1000);
3365 ++ msleep(1);
3366 + }
3367 +
3368 + static void send_1_byte(struct rtrack *rt)
3369 +@@ -169,7 +158,7 @@ static void send_1_byte(struct rtrack *rt)
3370 + outb_p(128+64+16+8+4+2+1, rt->io); /* clock */
3371 + }
3372 +
3373 +- sleep_delay(1000);
3374 ++ msleep(1);
3375 + }
3376 +
3377 + static int rt_setfreq(struct rtrack *rt, unsigned long freq)
3378 +@@ -420,7 +409,7 @@ static int __init rtrack_init(void)
3379 +
3380 + /* this ensures that the volume is all the way down */
3381 + outb(0x48, rt->io); /* volume down but still "on" */
3382 +- sleep_delay(2000000); /* make sure it's totally down */
3383 ++ msleep(2000); /* make sure it's totally down */
3384 + outb(0xc0, rt->io); /* steady volume, mute card */
3385 +
3386 + if (video_register_device(&rt->vdev, VFL_TYPE_RADIO, radio_nr) < 0) {
3387 +diff --git a/drivers/media/video/em28xx/em28xx-cards.c b/drivers/media/video/em28xx/em28xx-cards.c
3388 +index f7e9168..2637f6f 100644
3389 +--- a/drivers/media/video/em28xx/em28xx-cards.c
3390 ++++ b/drivers/media/video/em28xx/em28xx-cards.c
3391 +@@ -1633,11 +1633,11 @@ struct em28xx_board em28xx_boards[] = {
3392 + .input = { {
3393 + .type = EM28XX_VMUX_COMPOSITE1,
3394 + .vmux = SAA7115_COMPOSITE0,
3395 +- .amux = EM28XX_AMUX_VIDEO2,
3396 ++ .amux = EM28XX_AMUX_LINE_IN,
3397 + }, {
3398 + .type = EM28XX_VMUX_SVIDEO,
3399 + .vmux = SAA7115_SVIDEO3,
3400 +- .amux = EM28XX_AMUX_VIDEO2,
3401 ++ .amux = EM28XX_AMUX_LINE_IN,
3402 + } },
3403 + },
3404 + [EM2860_BOARD_TERRATEC_AV350] = {
3405 +diff --git a/drivers/mmc/host/bfin_sdh.c b/drivers/mmc/host/bfin_sdh.c
3406 +index bac7d62..0371bf5 100644
3407 +--- a/drivers/mmc/host/bfin_sdh.c
3408 ++++ b/drivers/mmc/host/bfin_sdh.c
3409 +@@ -462,7 +462,7 @@ static int __devinit sdh_probe(struct platform_device *pdev)
3410 + goto out;
3411 + }
3412 +
3413 +- mmc = mmc_alloc_host(sizeof(*mmc), &pdev->dev);
3414 ++ mmc = mmc_alloc_host(sizeof(struct sdh_host), &pdev->dev);
3415 + if (!mmc) {
3416 + ret = -ENOMEM;
3417 + goto out;
3418 +diff --git a/drivers/mtd/nand/pxa3xx_nand.c b/drivers/mtd/nand/pxa3xx_nand.c
3419 +index 17f8518..ea2c288 100644
3420 +--- a/drivers/mtd/nand/pxa3xx_nand.c
3421 ++++ b/drivers/mtd/nand/pxa3xx_nand.c
3422 +@@ -885,6 +885,7 @@ static int pxa3xx_nand_detect_config(struct pxa3xx_nand_info *info)
3423 + /* set info fields needed to __readid */
3424 + info->read_id_bytes = (info->page_size == 2048) ? 4 : 2;
3425 + info->reg_ndcr = ndcr;
3426 ++ info->cmdset = &default_cmdset;
3427 +
3428 + if (__readid(info, &id))
3429 + return -ENODEV;
3430 +@@ -915,7 +916,6 @@ static int pxa3xx_nand_detect_config(struct pxa3xx_nand_info *info)
3431 +
3432 + info->ndtr0cs0 = nand_readl(info, NDTR0CS0);
3433 + info->ndtr1cs0 = nand_readl(info, NDTR1CS0);
3434 +- info->cmdset = &default_cmdset;
3435 +
3436 + return 0;
3437 + }
3438 +diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c
3439 +index 4d62f7b..b3b251e 100644
3440 +--- a/drivers/net/e1000/e1000_main.c
3441 ++++ b/drivers/net/e1000/e1000_main.c
3442 +@@ -971,11 +971,13 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
3443 + */
3444 + dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
3445 + pci_using_dac = 1;
3446 +- } else if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) {
3447 +- dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
3448 + } else {
3449 +- pr_err("No usable DMA config, aborting\n");
3450 +- goto err_dma;
3451 ++ err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
3452 ++ if (err) {
3453 ++ pr_err("No usable DMA config, aborting\n");
3454 ++ goto err_dma;
3455 ++ }
3456 ++ dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
3457 + }
3458 +
3459 + netdev->netdev_ops = &e1000_netdev_ops;
3460 +diff --git a/drivers/net/fec.c b/drivers/net/fec.c
3461 +index cce32d4..52e9ca8 100644
3462 +--- a/drivers/net/fec.c
3463 ++++ b/drivers/net/fec.c
3464 +@@ -651,8 +651,8 @@ static int fec_enet_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
3465 + fep->mii_timeout = 0;
3466 + init_completion(&fep->mdio_done);
3467 +
3468 +- /* start a read op */
3469 +- writel(FEC_MMFR_ST | FEC_MMFR_OP_READ |
3470 ++ /* start a write op */
3471 ++ writel(FEC_MMFR_ST | FEC_MMFR_OP_WRITE |
3472 + FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(regnum) |
3473 + FEC_MMFR_TA | FEC_MMFR_DATA(value),
3474 + fep->hwp + FEC_MII_DATA);
3475 +diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c
3476 +index eee0b29..57d747a 100644
3477 +--- a/drivers/net/ixgbe/ixgbe_main.c
3478 ++++ b/drivers/net/ixgbe/ixgbe_main.c
3479 +@@ -2912,9 +2912,16 @@ static void ixgbe_set_rx_buffer_len(struct ixgbe_adapter *adapter)
3480 + u32 mhadd, hlreg0;
3481 +
3482 + /* Decide whether to use packet split mode or not */
3483 ++ /* On by default */
3484 ++ adapter->flags |= IXGBE_FLAG_RX_PS_ENABLED;
3485 ++
3486 + /* Do not use packet split if we're in SR-IOV Mode */
3487 +- if (!adapter->num_vfs)
3488 +- adapter->flags |= IXGBE_FLAG_RX_PS_ENABLED;
3489 ++ if (adapter->num_vfs)
3490 ++ adapter->flags &= ~IXGBE_FLAG_RX_PS_ENABLED;
3491 ++
3492 ++ /* Disable packet split due to 82599 erratum #45 */
3493 ++ if (hw->mac.type == ixgbe_mac_82599EB)
3494 ++ adapter->flags &= ~IXGBE_FLAG_RX_PS_ENABLED;
3495 +
3496 + /* Set the RX buffer length according to the mode */
3497 + if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
3498 +diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
3499 +index b6d4028..c1689ba 100644
3500 +--- a/drivers/net/virtio_net.c
3501 ++++ b/drivers/net/virtio_net.c
3502 +@@ -446,6 +446,20 @@ static void skb_recv_done(struct virtqueue *rvq)
3503 + }
3504 + }
3505 +
3506 ++static void virtnet_napi_enable(struct virtnet_info *vi)
3507 ++{
3508 ++ napi_enable(&vi->napi);
3509 ++
3510 ++ /* If all buffers were filled by other side before we napi_enabled, we
3511 ++ * won't get another interrupt, so process any outstanding packets
3512 ++ * now. virtnet_poll wants re-enable the queue, so we disable here.
3513 ++ * We synchronize against interrupts via NAPI_STATE_SCHED */
3514 ++ if (napi_schedule_prep(&vi->napi)) {
3515 ++ virtqueue_disable_cb(vi->rvq);
3516 ++ __napi_schedule(&vi->napi);
3517 ++ }
3518 ++}
3519 ++
3520 + static void refill_work(struct work_struct *work)
3521 + {
3522 + struct virtnet_info *vi;
3523 +@@ -454,7 +468,7 @@ static void refill_work(struct work_struct *work)
3524 + vi = container_of(work, struct virtnet_info, refill.work);
3525 + napi_disable(&vi->napi);
3526 + still_empty = !try_fill_recv(vi, GFP_KERNEL);
3527 +- napi_enable(&vi->napi);
3528 ++ virtnet_napi_enable(vi);
3529 +
3530 + /* In theory, this can happen: if we don't get any buffers in
3531 + * we will *never* try to fill again. */
3532 +@@ -638,16 +652,7 @@ static int virtnet_open(struct net_device *dev)
3533 + {
3534 + struct virtnet_info *vi = netdev_priv(dev);
3535 +
3536 +- napi_enable(&vi->napi);
3537 +-
3538 +- /* If all buffers were filled by other side before we napi_enabled, we
3539 +- * won't get another interrupt, so process any outstanding packets
3540 +- * now. virtnet_poll wants re-enable the queue, so we disable here.
3541 +- * We synchronize against interrupts via NAPI_STATE_SCHED */
3542 +- if (napi_schedule_prep(&vi->napi)) {
3543 +- virtqueue_disable_cb(vi->rvq);
3544 +- __napi_schedule(&vi->napi);
3545 +- }
3546 ++ virtnet_napi_enable(vi);
3547 + return 0;
3548 + }
3549 +
3550 +diff --git a/drivers/net/wireless/ath/ath9k/ar9002_calib.c b/drivers/net/wireless/ath/ath9k/ar9002_calib.c
3551 +index 15f62cd..9b6ba23 100644
3552 +--- a/drivers/net/wireless/ath/ath9k/ar9002_calib.c
3553 ++++ b/drivers/net/wireless/ath/ath9k/ar9002_calib.c
3554 +@@ -681,10 +681,6 @@ static bool ar9002_hw_calibrate(struct ath_hw *ah,
3555 +
3556 + /* Do NF cal only at longer intervals */
3557 + if (longcal || nfcal_pending) {
3558 +- /* Do periodic PAOffset Cal */
3559 +- ar9002_hw_pa_cal(ah, false);
3560 +- ar9002_hw_olc_temp_compensation(ah);
3561 +-
3562 + /*
3563 + * Get the value from the previous NF cal and update
3564 + * history buffer.
3565 +@@ -699,8 +695,12 @@ static bool ar9002_hw_calibrate(struct ath_hw *ah,
3566 + ath9k_hw_loadnf(ah, ah->curchan);
3567 + }
3568 +
3569 +- if (longcal)
3570 ++ if (longcal) {
3571 + ath9k_hw_start_nfcal(ah, false);
3572 ++ /* Do periodic PAOffset Cal */
3573 ++ ar9002_hw_pa_cal(ah, false);
3574 ++ ar9002_hw_olc_temp_compensation(ah);
3575 ++ }
3576 + }
3577 +
3578 + return iscaldone;
3579 +diff --git a/drivers/net/wireless/ath/ath9k/ar9002_hw.c b/drivers/net/wireless/ath/ath9k/ar9002_hw.c
3580 +index 48261b7..2528b29 100644
3581 +--- a/drivers/net/wireless/ath/ath9k/ar9002_hw.c
3582 ++++ b/drivers/net/wireless/ath/ath9k/ar9002_hw.c
3583 +@@ -444,9 +444,8 @@ static void ar9002_hw_configpcipowersave(struct ath_hw *ah,
3584 + }
3585 +
3586 + /* WAR for ASPM system hang */
3587 +- if (AR_SREV_9280(ah) || AR_SREV_9285(ah) || AR_SREV_9287(ah)) {
3588 ++ if (AR_SREV_9285(ah) || AR_SREV_9287(ah))
3589 + val |= (AR_WA_BIT6 | AR_WA_BIT7);
3590 +- }
3591 +
3592 + if (AR_SREV_9285E_20(ah))
3593 + val |= AR_WA_BIT23;
3594 +diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
3595 +index a7b82f0..aa355df 100644
3596 +--- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
3597 ++++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
3598 +@@ -22,12 +22,14 @@
3599 + #define COMP_CKSUM_LEN 2
3600 +
3601 + #define AR_CH0_TOP (0x00016288)
3602 +-#define AR_CH0_TOP_XPABIASLVL (0x3)
3603 ++#define AR_CH0_TOP_XPABIASLVL (0x300)
3604 + #define AR_CH0_TOP_XPABIASLVL_S (8)
3605 +
3606 + #define AR_CH0_THERM (0x00016290)
3607 +-#define AR_CH0_THERM_SPARE (0x3f)
3608 +-#define AR_CH0_THERM_SPARE_S (0)
3609 ++#define AR_CH0_THERM_XPABIASLVL_MSB 0x3
3610 ++#define AR_CH0_THERM_XPABIASLVL_MSB_S 0
3611 ++#define AR_CH0_THERM_XPASHORT2GND 0x4
3612 ++#define AR_CH0_THERM_XPASHORT2GND_S 2
3613 +
3614 + #define AR_SWITCH_TABLE_COM_ALL (0xffff)
3615 + #define AR_SWITCH_TABLE_COM_ALL_S (0)
3616 +@@ -995,9 +997,9 @@ static s32 ar9003_hw_xpa_bias_level_get(struct ath_hw *ah, bool is2ghz)
3617 + static void ar9003_hw_xpa_bias_level_apply(struct ath_hw *ah, bool is2ghz)
3618 + {
3619 + int bias = ar9003_hw_xpa_bias_level_get(ah, is2ghz);
3620 +- REG_RMW_FIELD(ah, AR_CH0_TOP, AR_CH0_TOP_XPABIASLVL, (bias & 0x3));
3621 +- REG_RMW_FIELD(ah, AR_CH0_THERM, AR_CH0_THERM_SPARE,
3622 +- ((bias >> 2) & 0x3));
3623 ++ REG_RMW_FIELD(ah, AR_CH0_TOP, AR_CH0_TOP_XPABIASLVL, bias);
3624 ++ REG_RMW_FIELD(ah, AR_CH0_THERM, AR_CH0_THERM_XPABIASLVL_MSB, bias >> 2);
3625 ++ REG_RMW_FIELD(ah, AR_CH0_THERM, AR_CH0_THERM_XPASHORT2GND, 1);
3626 + }
3627 +
3628 + static u32 ar9003_hw_ant_ctrl_common_get(struct ath_hw *ah, bool is2ghz)
3629 +diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h
3630 +index 0963071..c0b60ce 100644
3631 +--- a/drivers/net/wireless/ath/ath9k/ath9k.h
3632 ++++ b/drivers/net/wireless/ath/ath9k/ath9k.h
3633 +@@ -178,8 +178,8 @@ void ath_descdma_cleanup(struct ath_softc *sc, struct ath_descdma *dd,
3634 +
3635 + /* returns delimiter padding required given the packet length */
3636 + #define ATH_AGGR_GET_NDELIM(_len) \
3637 +- (((((_len) + ATH_AGGR_DELIM_SZ) < ATH_AGGR_MINPLEN) ? \
3638 +- (ATH_AGGR_MINPLEN - (_len) - ATH_AGGR_DELIM_SZ) : 0) >> 2)
3639 ++ (((_len) >= ATH_AGGR_MINPLEN) ? 0 : \
3640 ++ DIV_ROUND_UP(ATH_AGGR_MINPLEN - (_len), ATH_AGGR_DELIM_SZ))
3641 +
3642 + #define BAW_WITHIN(_start, _bawsz, _seqno) \
3643 + ((((_seqno) - (_start)) & 4095) < (_bawsz))
3644 +diff --git a/drivers/net/wireless/ath/ath9k/hif_usb.c b/drivers/net/wireless/ath/ath9k/hif_usb.c
3645 +index 0de3c3d..270671f 100644
3646 +--- a/drivers/net/wireless/ath/ath9k/hif_usb.c
3647 ++++ b/drivers/net/wireless/ath/ath9k/hif_usb.c
3648 +@@ -144,16 +144,36 @@ static void hif_usb_tx_cb(struct urb *urb)
3649 + case -ENODEV:
3650 + case -ESHUTDOWN:
3651 + /*
3652 +- * The URB has been killed, free the SKBs
3653 +- * and return.
3654 ++ * The URB has been killed, free the SKBs.
3655 + */
3656 + ath9k_skb_queue_purge(hif_dev, &tx_buf->skb_queue);
3657 +- return;
3658 ++
3659 ++ /*
3660 ++ * If the URBs are being flushed, no need to add this
3661 ++ * URB to the free list.
3662 ++ */
3663 ++ spin_lock(&hif_dev->tx.tx_lock);
3664 ++ if (hif_dev->tx.flags & HIF_USB_TX_FLUSH) {
3665 ++ spin_unlock(&hif_dev->tx.tx_lock);
3666 ++ return;
3667 ++ }
3668 ++ spin_unlock(&hif_dev->tx.tx_lock);
3669 ++
3670 ++ /*
3671 ++ * In the stop() case, this URB has to be added to
3672 ++ * the free list.
3673 ++ */
3674 ++ goto add_free;
3675 + default:
3676 + break;
3677 + }
3678 +
3679 +- /* Check if TX has been stopped */
3680 ++ /*
3681 ++ * Check if TX has been stopped, this is needed because
3682 ++ * this CB could have been invoked just after the TX lock
3683 ++ * was released in hif_stop() and kill_urb() hasn't been
3684 ++ * called yet.
3685 ++ */
3686 + spin_lock(&hif_dev->tx.tx_lock);
3687 + if (hif_dev->tx.flags & HIF_USB_TX_STOP) {
3688 + spin_unlock(&hif_dev->tx.tx_lock);
3689 +@@ -305,6 +325,7 @@ static void hif_usb_start(void *hif_handle, u8 pipe_id)
3690 + static void hif_usb_stop(void *hif_handle, u8 pipe_id)
3691 + {
3692 + struct hif_device_usb *hif_dev = (struct hif_device_usb *)hif_handle;
3693 ++ struct tx_buf *tx_buf = NULL, *tx_buf_tmp = NULL;
3694 + unsigned long flags;
3695 +
3696 + spin_lock_irqsave(&hif_dev->tx.tx_lock, flags);
3697 +@@ -312,6 +333,12 @@ static void hif_usb_stop(void *hif_handle, u8 pipe_id)
3698 + hif_dev->tx.tx_skb_cnt = 0;
3699 + hif_dev->tx.flags |= HIF_USB_TX_STOP;
3700 + spin_unlock_irqrestore(&hif_dev->tx.tx_lock, flags);
3701 ++
3702 ++ /* The pending URBs have to be canceled. */
3703 ++ list_for_each_entry_safe(tx_buf, tx_buf_tmp,
3704 ++ &hif_dev->tx.tx_pending, list) {
3705 ++ usb_kill_urb(tx_buf->urb);
3706 ++ }
3707 + }
3708 +
3709 + static int hif_usb_send(void *hif_handle, u8 pipe_id, struct sk_buff *skb,
3710 +@@ -578,6 +605,7 @@ free:
3711 + static void ath9k_hif_usb_dealloc_tx_urbs(struct hif_device_usb *hif_dev)
3712 + {
3713 + struct tx_buf *tx_buf = NULL, *tx_buf_tmp = NULL;
3714 ++ unsigned long flags;
3715 +
3716 + list_for_each_entry_safe(tx_buf, tx_buf_tmp,
3717 + &hif_dev->tx.tx_buf, list) {
3718 +@@ -588,6 +616,10 @@ static void ath9k_hif_usb_dealloc_tx_urbs(struct hif_device_usb *hif_dev)
3719 + kfree(tx_buf);
3720 + }
3721 +
3722 ++ spin_lock_irqsave(&hif_dev->tx.tx_lock, flags);
3723 ++ hif_dev->tx.flags |= HIF_USB_TX_FLUSH;
3724 ++ spin_unlock_irqrestore(&hif_dev->tx.tx_lock, flags);
3725 ++
3726 + list_for_each_entry_safe(tx_buf, tx_buf_tmp,
3727 + &hif_dev->tx.tx_pending, list) {
3728 + usb_kill_urb(tx_buf->urb);
3729 +diff --git a/drivers/net/wireless/ath/ath9k/hif_usb.h b/drivers/net/wireless/ath/ath9k/hif_usb.h
3730 +index 2daf97b..30d0938 100644
3731 +--- a/drivers/net/wireless/ath/ath9k/hif_usb.h
3732 ++++ b/drivers/net/wireless/ath/ath9k/hif_usb.h
3733 +@@ -62,6 +62,7 @@ struct tx_buf {
3734 + };
3735 +
3736 + #define HIF_USB_TX_STOP BIT(0)
3737 ++#define HIF_USB_TX_FLUSH BIT(1)
3738 +
3739 + struct hif_usb_tx {
3740 + u8 flags;
3741 +diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
3742 +index c7fbe25..5dcf140 100644
3743 +--- a/drivers/net/wireless/ath/ath9k/hw.c
3744 ++++ b/drivers/net/wireless/ath/ath9k/hw.c
3745 +@@ -385,6 +385,9 @@ static void ath9k_hw_init_config(struct ath_hw *ah)
3746 + else
3747 + ah->config.ht_enable = 0;
3748 +
3749 ++ /* PAPRD needs some more work to be enabled */
3750 ++ ah->config.paprd_disable = 1;
3751 ++
3752 + ah->config.rx_intr_mitigation = true;
3753 + ah->config.pcieSerDesWrite = true;
3754 +
3755 +@@ -1949,7 +1952,8 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah)
3756 + pCap->rx_status_len = sizeof(struct ar9003_rxs);
3757 + pCap->tx_desc_len = sizeof(struct ar9003_txc);
3758 + pCap->txs_len = sizeof(struct ar9003_txs);
3759 +- if (ah->eep_ops->get_eeprom(ah, EEP_PAPRD))
3760 ++ if (!ah->config.paprd_disable &&
3761 ++ ah->eep_ops->get_eeprom(ah, EEP_PAPRD))
3762 + pCap->hw_caps |= ATH9K_HW_CAP_PAPRD;
3763 + } else {
3764 + pCap->tx_desc_len = sizeof(struct ath_desc);
3765 +diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
3766 +index d47d1b4..1240376 100644
3767 +--- a/drivers/net/wireless/ath/ath9k/hw.h
3768 ++++ b/drivers/net/wireless/ath/ath9k/hw.h
3769 +@@ -215,6 +215,7 @@ struct ath9k_ops_config {
3770 + u32 pcie_waen;
3771 + u8 analog_shiftreg;
3772 + u8 ht_enable;
3773 ++ u8 paprd_disable;
3774 + u32 ofdm_trig_low;
3775 + u32 ofdm_trig_high;
3776 + u32 cck_trig_high;
3777 +diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
3778 +index c0c3464..d1b0db4 100644
3779 +--- a/drivers/net/wireless/ath/ath9k/main.c
3780 ++++ b/drivers/net/wireless/ath/ath9k/main.c
3781 +@@ -295,7 +295,8 @@ int ath_set_channel(struct ath_softc *sc, struct ieee80211_hw *hw,
3782 + ath9k_hw_set_interrupts(ah, ah->imask);
3783 +
3784 + if (!(sc->sc_flags & (SC_OP_OFFCHANNEL))) {
3785 +- ath_beacon_config(sc, NULL);
3786 ++ if (sc->sc_flags & SC_OP_BEACONS)
3787 ++ ath_beacon_config(sc, NULL);
3788 + ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work, 0);
3789 + ath_start_ani(common);
3790 + }
3791 +@@ -1418,8 +1419,9 @@ static void ath9k_stop(struct ieee80211_hw *hw)
3792 + ath9k_hw_configpcipowersave(ah, 1, 1);
3793 + ath9k_ps_restore(sc);
3794 +
3795 +- /* Finally, put the chip in FULL SLEEP mode */
3796 +- ath9k_setpower(sc, ATH9K_PM_FULL_SLEEP);
3797 ++ sc->ps_idle = true;
3798 ++ ath9k_set_wiphy_idle(aphy, true);
3799 ++ ath_radio_disable(sc, hw);
3800 +
3801 + sc->sc_flags |= SC_OP_INVALID;
3802 +
3803 +diff --git a/drivers/net/wireless/ath/ath9k/pci.c b/drivers/net/wireless/ath/ath9k/pci.c
3804 +index b5b6514..894ebadb 100644
3805 +--- a/drivers/net/wireless/ath/ath9k/pci.c
3806 ++++ b/drivers/net/wireless/ath/ath9k/pci.c
3807 +@@ -290,6 +290,10 @@ static int ath_pci_resume(struct pci_dev *pdev)
3808 + AR_GPIO_OUTPUT_MUX_AS_OUTPUT);
3809 + ath9k_hw_set_gpio(sc->sc_ah, sc->sc_ah->led_pin, 1);
3810 +
3811 ++ sc->ps_idle = true;
3812 ++ ath9k_set_wiphy_idle(aphy, true);
3813 ++ ath_radio_disable(sc, hw);
3814 ++
3815 + return 0;
3816 + }
3817 +
3818 +diff --git a/drivers/net/wireless/ath/carl9170/rx.c b/drivers/net/wireless/ath/carl9170/rx.c
3819 +index 939a0e9..84866a4 100644
3820 +--- a/drivers/net/wireless/ath/carl9170/rx.c
3821 ++++ b/drivers/net/wireless/ath/carl9170/rx.c
3822 +@@ -564,7 +564,7 @@ static void carl9170_ps_beacon(struct ar9170 *ar, void *data, unsigned int len)
3823 + cam = ieee80211_check_tim(tim_ie, tim_len, ar->common.curaid);
3824 +
3825 + /* 2. Maybe the AP wants to send multicast/broadcast data? */
3826 +- cam = !!(tim_ie->bitmap_ctrl & 0x01);
3827 ++ cam |= !!(tim_ie->bitmap_ctrl & 0x01);
3828 +
3829 + if (!cam) {
3830 + /* back to low-power land. */
3831 +diff --git a/drivers/net/wireless/ath/key.c b/drivers/net/wireless/ath/key.c
3832 +index bd21a4d..32fcc19 100644
3833 +--- a/drivers/net/wireless/ath/key.c
3834 ++++ b/drivers/net/wireless/ath/key.c
3835 +@@ -60,6 +60,8 @@ bool ath_hw_keyreset(struct ath_common *common, u16 entry)
3836 + REG_WRITE(ah, AR_KEYTABLE_KEY1(micentry), 0);
3837 + REG_WRITE(ah, AR_KEYTABLE_KEY2(micentry), 0);
3838 + REG_WRITE(ah, AR_KEYTABLE_KEY3(micentry), 0);
3839 ++ if (common->crypt_caps & ATH_CRYPT_CAP_MIC_COMBINED)
3840 ++ REG_WRITE(ah, AR_KEYTABLE_KEY4(micentry), 0);
3841 +
3842 + }
3843 +
3844 +diff --git a/drivers/net/wireless/hostap/hostap_cs.c b/drivers/net/wireless/hostap/hostap_cs.c
3845 +index bd8a413..2176ede 100644
3846 +--- a/drivers/net/wireless/hostap/hostap_cs.c
3847 ++++ b/drivers/net/wireless/hostap/hostap_cs.c
3848 +@@ -518,22 +518,21 @@ static int prism2_config(struct pcmcia_device *link)
3849 + hw_priv->link = link;
3850 +
3851 + /*
3852 +- * Make sure the IRQ handler cannot proceed until at least
3853 +- * dev->base_addr is initialized.
3854 ++ * We enable IRQ here, but IRQ handler will not proceed
3855 ++ * until dev->base_addr is set below. This protect us from
3856 ++ * receive interrupts when driver is not initialized.
3857 + */
3858 +- spin_lock_irqsave(&local->irq_init_lock, flags);
3859 +-
3860 + ret = pcmcia_request_irq(link, prism2_interrupt);
3861 + if (ret)
3862 +- goto failed_unlock;
3863 ++ goto failed;
3864 +
3865 + ret = pcmcia_enable_device(link);
3866 + if (ret)
3867 +- goto failed_unlock;
3868 ++ goto failed;
3869 +
3870 ++ spin_lock_irqsave(&local->irq_init_lock, flags);
3871 + dev->irq = link->irq;
3872 + dev->base_addr = link->resource[0]->start;
3873 +-
3874 + spin_unlock_irqrestore(&local->irq_init_lock, flags);
3875 +
3876 + local->shutdown = 0;
3877 +@@ -546,8 +545,6 @@ static int prism2_config(struct pcmcia_device *link)
3878 +
3879 + return ret;
3880 +
3881 +- failed_unlock:
3882 +- spin_unlock_irqrestore(&local->irq_init_lock, flags);
3883 + failed:
3884 + kfree(hw_priv);
3885 + prism2_release((u_long)link);
3886 +diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.c b/drivers/net/wireless/iwlwifi/iwl-agn.c
3887 +index c2636a7..f0468cd 100644
3888 +--- a/drivers/net/wireless/iwlwifi/iwl-agn.c
3889 ++++ b/drivers/net/wireless/iwlwifi/iwl-agn.c
3890 +@@ -1319,6 +1319,9 @@ static void iwl_irq_tasklet_legacy(struct iwl_priv *priv)
3891 + /* only Re-enable if diabled by irq */
3892 + if (test_bit(STATUS_INT_ENABLED, &priv->status))
3893 + iwl_enable_interrupts(priv);
3894 ++ /* Re-enable RF_KILL if it occurred */
3895 ++ else if (handled & CSR_INT_BIT_RF_KILL)
3896 ++ iwl_enable_rfkill_int(priv);
3897 +
3898 + #ifdef CONFIG_IWLWIFI_DEBUG
3899 + if (iwl_get_debug_level(priv) & (IWL_DL_ISR)) {
3900 +@@ -1533,6 +1536,9 @@ static void iwl_irq_tasklet(struct iwl_priv *priv)
3901 + /* only Re-enable if diabled by irq */
3902 + if (test_bit(STATUS_INT_ENABLED, &priv->status))
3903 + iwl_enable_interrupts(priv);
3904 ++ /* Re-enable RF_KILL if it occurred */
3905 ++ else if (handled & CSR_INT_BIT_RF_KILL)
3906 ++ iwl_enable_rfkill_int(priv);
3907 + }
3908 +
3909 + /* the threshold ratio of actual_ack_cnt to expected_ack_cnt in percent */
3910 +@@ -3530,9 +3536,10 @@ static void iwl_mac_stop(struct ieee80211_hw *hw)
3911 +
3912 + flush_workqueue(priv->workqueue);
3913 +
3914 +- /* enable interrupts again in order to receive rfkill changes */
3915 ++ /* User space software may expect getting rfkill changes
3916 ++ * even if interface is down */
3917 + iwl_write32(priv, CSR_INT, 0xFFFFFFFF);
3918 +- iwl_enable_interrupts(priv);
3919 ++ iwl_enable_rfkill_int(priv);
3920 +
3921 + IWL_DEBUG_MAC80211(priv, "leave\n");
3922 + }
3923 +@@ -4515,14 +4522,14 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
3924 + * 8. Enable interrupts and read RFKILL state
3925 + *********************************************/
3926 +
3927 +- /* enable interrupts if needed: hw bug w/a */
3928 ++ /* enable rfkill interrupt: hw bug w/a */
3929 + pci_read_config_word(priv->pci_dev, PCI_COMMAND, &pci_cmd);
3930 + if (pci_cmd & PCI_COMMAND_INTX_DISABLE) {
3931 + pci_cmd &= ~PCI_COMMAND_INTX_DISABLE;
3932 + pci_write_config_word(priv->pci_dev, PCI_COMMAND, pci_cmd);
3933 + }
3934 +
3935 +- iwl_enable_interrupts(priv);
3936 ++ iwl_enable_rfkill_int(priv);
3937 +
3938 + /* If platform's RF_KILL switch is NOT set to KILL */
3939 + if (iwl_read32(priv, CSR_GP_CNTRL) & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)
3940 +diff --git a/drivers/net/wireless/iwlwifi/iwl-helpers.h b/drivers/net/wireless/iwlwifi/iwl-helpers.h
3941 +index 1aaef70..19f5586 100644
3942 +--- a/drivers/net/wireless/iwlwifi/iwl-helpers.h
3943 ++++ b/drivers/net/wireless/iwlwifi/iwl-helpers.h
3944 +@@ -163,6 +163,12 @@ static inline void iwl_disable_interrupts(struct iwl_priv *priv)
3945 + IWL_DEBUG_ISR(priv, "Disabled interrupts\n");
3946 + }
3947 +
3948 ++static inline void iwl_enable_rfkill_int(struct iwl_priv *priv)
3949 ++{
3950 ++ IWL_DEBUG_ISR(priv, "Enabling rfkill interrupt\n");
3951 ++ iwl_write32(priv, CSR_INT_MASK, CSR_INT_BIT_RF_KILL);
3952 ++}
3953 ++
3954 + static inline void iwl_enable_interrupts(struct iwl_priv *priv)
3955 + {
3956 + IWL_DEBUG_ISR(priv, "Enabling interrupts\n");
3957 +diff --git a/drivers/net/wireless/p54/txrx.c b/drivers/net/wireless/p54/txrx.c
3958 +index 76b2318a..f618b96 100644
3959 +--- a/drivers/net/wireless/p54/txrx.c
3960 ++++ b/drivers/net/wireless/p54/txrx.c
3961 +@@ -618,7 +618,7 @@ static void p54_tx_80211_header(struct p54_common *priv, struct sk_buff *skb,
3962 + else
3963 + *burst_possible = false;
3964 +
3965 +- if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ)
3966 ++ if (!(info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ))
3967 + *flags |= P54_HDR_FLAG_DATA_OUT_SEQNR;
3968 +
3969 + if (info->flags & IEEE80211_TX_CTL_PSPOLL_RESPONSE)
3970 +diff --git a/drivers/net/wireless/rt2x00/rt73usb.c b/drivers/net/wireless/rt2x00/rt73usb.c
3971 +index 9be8089..4c6fba7 100644
3972 +--- a/drivers/net/wireless/rt2x00/rt73usb.c
3973 ++++ b/drivers/net/wireless/rt2x00/rt73usb.c
3974 +@@ -2417,6 +2417,7 @@ static struct usb_device_id rt73usb_device_table[] = {
3975 + { USB_DEVICE(0x04bb, 0x093d), USB_DEVICE_DATA(&rt73usb_ops) },
3976 + { USB_DEVICE(0x148f, 0x2573), USB_DEVICE_DATA(&rt73usb_ops) },
3977 + { USB_DEVICE(0x148f, 0x2671), USB_DEVICE_DATA(&rt73usb_ops) },
3978 ++ { USB_DEVICE(0x0812, 0x3101), USB_DEVICE_DATA(&rt73usb_ops) },
3979 + /* Qcom */
3980 + { USB_DEVICE(0x18e8, 0x6196), USB_DEVICE_DATA(&rt73usb_ops) },
3981 + { USB_DEVICE(0x18e8, 0x6229), USB_DEVICE_DATA(&rt73usb_ops) },
3982 +diff --git a/drivers/pci/pci-stub.c b/drivers/pci/pci-stub.c
3983 +index f7b68ca..4ae494b 100644
3984 +--- a/drivers/pci/pci-stub.c
3985 ++++ b/drivers/pci/pci-stub.c
3986 +@@ -54,6 +54,9 @@ static int __init pci_stub_init(void)
3987 + subdevice = PCI_ANY_ID, class=0, class_mask=0;
3988 + int fields;
3989 +
3990 ++ if (!strlen(id))
3991 ++ continue;
3992 ++
3993 + fields = sscanf(id, "%x:%x:%x:%x:%x:%x",
3994 + &vendor, &device, &subvendor, &subdevice,
3995 + &class, &class_mask);
3996 +diff --git a/drivers/power/ds2760_battery.c b/drivers/power/ds2760_battery.c
3997 +index b3c01c1..11e1ac5 100644
3998 +--- a/drivers/power/ds2760_battery.c
3999 ++++ b/drivers/power/ds2760_battery.c
4000 +@@ -212,7 +212,7 @@ static int ds2760_battery_read_status(struct ds2760_device_info *di)
4001 + if (di->rem_capacity > 100)
4002 + di->rem_capacity = 100;
4003 +
4004 +- if (di->current_uA >= 100L)
4005 ++ if (di->current_uA < -100L)
4006 + di->life_sec = -((di->accum_current_uAh - di->empty_uAh) * 36L)
4007 + / (di->current_uA / 100L);
4008 + else
4009 +diff --git a/drivers/power/jz4740-battery.c b/drivers/power/jz4740-battery.c
4010 +index a8108a7..2bbe04a 100644
4011 +--- a/drivers/power/jz4740-battery.c
4012 ++++ b/drivers/power/jz4740-battery.c
4013 +@@ -47,6 +47,8 @@ struct jz_battery {
4014 +
4015 + struct power_supply battery;
4016 + struct delayed_work work;
4017 ++
4018 ++ struct mutex lock;
4019 + };
4020 +
4021 + static inline struct jz_battery *psy_to_jz_battery(struct power_supply *psy)
4022 +@@ -68,6 +70,8 @@ static long jz_battery_read_voltage(struct jz_battery *battery)
4023 + unsigned long val;
4024 + long voltage;
4025 +
4026 ++ mutex_lock(&battery->lock);
4027 ++
4028 + INIT_COMPLETION(battery->read_completion);
4029 +
4030 + enable_irq(battery->irq);
4031 +@@ -91,6 +95,8 @@ static long jz_battery_read_voltage(struct jz_battery *battery)
4032 + battery->cell->disable(battery->pdev);
4033 + disable_irq(battery->irq);
4034 +
4035 ++ mutex_unlock(&battery->lock);
4036 ++
4037 + return voltage;
4038 + }
4039 +
4040 +@@ -291,6 +297,7 @@ static int __devinit jz_battery_probe(struct platform_device *pdev)
4041 + jz_battery->pdev = pdev;
4042 +
4043 + init_completion(&jz_battery->read_completion);
4044 ++ mutex_init(&jz_battery->lock);
4045 +
4046 + INIT_DELAYED_WORK(&jz_battery->work, jz_battery_work);
4047 +
4048 +diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c
4049 +index 5856167..dd8242d 100644
4050 +--- a/drivers/rtc/rtc-cmos.c
4051 ++++ b/drivers/rtc/rtc-cmos.c
4052 +@@ -36,6 +36,7 @@
4053 + #include <linux/platform_device.h>
4054 + #include <linux/mod_devicetable.h>
4055 + #include <linux/log2.h>
4056 ++#include <linux/pm.h>
4057 +
4058 + /* this is for "generic access to PC-style RTC" using CMOS_READ/CMOS_WRITE */
4059 + #include <asm-generic/rtc.h>
4060 +@@ -850,7 +851,7 @@ static void __exit cmos_do_remove(struct device *dev)
4061 +
4062 + #ifdef CONFIG_PM
4063 +
4064 +-static int cmos_suspend(struct device *dev, pm_message_t mesg)
4065 ++static int cmos_suspend(struct device *dev)
4066 + {
4067 + struct cmos_rtc *cmos = dev_get_drvdata(dev);
4068 + unsigned char tmp;
4069 +@@ -898,7 +899,7 @@ static int cmos_suspend(struct device *dev, pm_message_t mesg)
4070 + */
4071 + static inline int cmos_poweroff(struct device *dev)
4072 + {
4073 +- return cmos_suspend(dev, PMSG_HIBERNATE);
4074 ++ return cmos_suspend(dev);
4075 + }
4076 +
4077 + static int cmos_resume(struct device *dev)
4078 +@@ -945,9 +946,9 @@ static int cmos_resume(struct device *dev)
4079 + return 0;
4080 + }
4081 +
4082 ++static SIMPLE_DEV_PM_OPS(cmos_pm_ops, cmos_suspend, cmos_resume);
4083 ++
4084 + #else
4085 +-#define cmos_suspend NULL
4086 +-#define cmos_resume NULL
4087 +
4088 + static inline int cmos_poweroff(struct device *dev)
4089 + {
4090 +@@ -1077,7 +1078,7 @@ static void __exit cmos_pnp_remove(struct pnp_dev *pnp)
4091 +
4092 + static int cmos_pnp_suspend(struct pnp_dev *pnp, pm_message_t mesg)
4093 + {
4094 +- return cmos_suspend(&pnp->dev, mesg);
4095 ++ return cmos_suspend(&pnp->dev);
4096 + }
4097 +
4098 + static int cmos_pnp_resume(struct pnp_dev *pnp)
4099 +@@ -1157,8 +1158,9 @@ static struct platform_driver cmos_platform_driver = {
4100 + .shutdown = cmos_platform_shutdown,
4101 + .driver = {
4102 + .name = (char *) driver_name,
4103 +- .suspend = cmos_suspend,
4104 +- .resume = cmos_resume,
4105 ++#ifdef CONFIG_PM
4106 ++ .pm = &cmos_pm_ops,
4107 ++#endif
4108 + }
4109 + };
4110 +
4111 +diff --git a/drivers/s390/cio/qdio.h b/drivers/s390/cio/qdio.h
4112 +index 0f4ef87..3e6ad23 100644
4113 +--- a/drivers/s390/cio/qdio.h
4114 ++++ b/drivers/s390/cio/qdio.h
4115 +@@ -91,6 +91,12 @@ enum qdio_irq_states {
4116 + #define AC1_SC_QEBSM_AVAILABLE 0x02 /* available for subchannel */
4117 + #define AC1_SC_QEBSM_ENABLED 0x01 /* enabled for subchannel */
4118 +
4119 ++/* SIGA flags */
4120 ++#define QDIO_SIGA_WRITE 0x00
4121 ++#define QDIO_SIGA_READ 0x01
4122 ++#define QDIO_SIGA_SYNC 0x02
4123 ++#define QDIO_SIGA_QEBSM_FLAG 0x80
4124 ++
4125 + #ifdef CONFIG_64BIT
4126 + static inline int do_sqbs(u64 token, unsigned char state, int queue,
4127 + int *start, int *count)
4128 +diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c
4129 +index 5fcfa7f..27de2ea 100644
4130 +--- a/drivers/s390/cio/qdio_main.c
4131 ++++ b/drivers/s390/cio/qdio_main.c
4132 +@@ -29,11 +29,12 @@ MODULE_AUTHOR("Utz Bacher <utz.bacher@××××××.com>,"\
4133 + MODULE_DESCRIPTION("QDIO base support");
4134 + MODULE_LICENSE("GPL");
4135 +
4136 +-static inline int do_siga_sync(struct subchannel_id schid,
4137 +- unsigned int out_mask, unsigned int in_mask)
4138 ++static inline int do_siga_sync(unsigned long schid,
4139 ++ unsigned int out_mask, unsigned int in_mask,
4140 ++ unsigned int fc)
4141 + {
4142 +- register unsigned long __fc asm ("0") = 2;
4143 +- register struct subchannel_id __schid asm ("1") = schid;
4144 ++ register unsigned long __fc asm ("0") = fc;
4145 ++ register unsigned long __schid asm ("1") = schid;
4146 + register unsigned long out asm ("2") = out_mask;
4147 + register unsigned long in asm ("3") = in_mask;
4148 + int cc;
4149 +@@ -47,10 +48,11 @@ static inline int do_siga_sync(struct subchannel_id schid,
4150 + return cc;
4151 + }
4152 +
4153 +-static inline int do_siga_input(struct subchannel_id schid, unsigned int mask)
4154 ++static inline int do_siga_input(unsigned long schid, unsigned int mask,
4155 ++ unsigned int fc)
4156 + {
4157 +- register unsigned long __fc asm ("0") = 1;
4158 +- register struct subchannel_id __schid asm ("1") = schid;
4159 ++ register unsigned long __fc asm ("0") = fc;
4160 ++ register unsigned long __schid asm ("1") = schid;
4161 + register unsigned long __mask asm ("2") = mask;
4162 + int cc;
4163 +
4164 +@@ -279,6 +281,8 @@ void qdio_init_buf_states(struct qdio_irq *irq_ptr)
4165 + static inline int qdio_siga_sync(struct qdio_q *q, unsigned int output,
4166 + unsigned int input)
4167 + {
4168 ++ unsigned long schid = *((u32 *) &q->irq_ptr->schid);
4169 ++ unsigned int fc = QDIO_SIGA_SYNC;
4170 + int cc;
4171 +
4172 + if (!need_siga_sync(q))
4173 +@@ -287,7 +291,12 @@ static inline int qdio_siga_sync(struct qdio_q *q, unsigned int output,
4174 + DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-s:%1d", q->nr);
4175 + qperf_inc(q, siga_sync);
4176 +
4177 +- cc = do_siga_sync(q->irq_ptr->schid, output, input);
4178 ++ if (is_qebsm(q)) {
4179 ++ schid = q->irq_ptr->sch_token;
4180 ++ fc |= QDIO_SIGA_QEBSM_FLAG;
4181 ++ }
4182 ++
4183 ++ cc = do_siga_sync(schid, output, input, fc);
4184 + if (cc)
4185 + DBF_ERROR("%4x SIGA-S:%2d", SCH_NO(q), cc);
4186 + return cc;
4187 +@@ -313,8 +322,8 @@ static inline int qdio_siga_sync_all(struct qdio_q *q)
4188 +
4189 + static int qdio_siga_output(struct qdio_q *q, unsigned int *busy_bit)
4190 + {
4191 +- unsigned long schid;
4192 +- unsigned int fc = 0;
4193 ++ unsigned long schid = *((u32 *) &q->irq_ptr->schid);
4194 ++ unsigned int fc = QDIO_SIGA_WRITE;
4195 + u64 start_time = 0;
4196 + int cc;
4197 +
4198 +@@ -323,11 +332,8 @@ static int qdio_siga_output(struct qdio_q *q, unsigned int *busy_bit)
4199 +
4200 + if (is_qebsm(q)) {
4201 + schid = q->irq_ptr->sch_token;
4202 +- fc |= 0x80;
4203 ++ fc |= QDIO_SIGA_QEBSM_FLAG;
4204 + }
4205 +- else
4206 +- schid = *((u32 *)&q->irq_ptr->schid);
4207 +-
4208 + again:
4209 + cc = do_siga_output(schid, q->mask, busy_bit, fc);
4210 +
4211 +@@ -347,12 +353,19 @@ again:
4212 +
4213 + static inline int qdio_siga_input(struct qdio_q *q)
4214 + {
4215 ++ unsigned long schid = *((u32 *) &q->irq_ptr->schid);
4216 ++ unsigned int fc = QDIO_SIGA_READ;
4217 + int cc;
4218 +
4219 + DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-r:%1d", q->nr);
4220 + qperf_inc(q, siga_read);
4221 +
4222 +- cc = do_siga_input(q->irq_ptr->schid, q->mask);
4223 ++ if (is_qebsm(q)) {
4224 ++ schid = q->irq_ptr->sch_token;
4225 ++ fc |= QDIO_SIGA_QEBSM_FLAG;
4226 ++ }
4227 ++
4228 ++ cc = do_siga_input(schid, q->mask, fc);
4229 + if (cc)
4230 + DBF_ERROR("%4x SIGA-R:%2d", SCH_NO(q), cc);
4231 + return cc;
4232 +diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c
4233 +index 8fd8c62..a1ba52a 100644
4234 +--- a/drivers/s390/crypto/ap_bus.c
4235 ++++ b/drivers/s390/crypto/ap_bus.c
4236 +@@ -154,7 +154,7 @@ static inline int ap_instructions_available(void)
4237 + */
4238 + static int ap_interrupts_available(void)
4239 + {
4240 +- return test_facility(1) && test_facility(2);
4241 ++ return test_facility(2) && test_facility(65);
4242 + }
4243 +
4244 + /**
4245 +diff --git a/drivers/scsi/libsas/sas_scsi_host.c b/drivers/scsi/libsas/sas_scsi_host.c
4246 +index 29251fa..812d32c 100644
4247 +--- a/drivers/scsi/libsas/sas_scsi_host.c
4248 ++++ b/drivers/scsi/libsas/sas_scsi_host.c
4249 +@@ -647,6 +647,7 @@ void sas_scsi_recover_host(struct Scsi_Host *shost)
4250 +
4251 + spin_lock_irqsave(shost->host_lock, flags);
4252 + list_splice_init(&shost->eh_cmd_q, &eh_work_q);
4253 ++ shost->host_eh_scheduled = 0;
4254 + spin_unlock_irqrestore(shost->host_lock, flags);
4255 +
4256 + SAS_DPRINTK("Enter %s\n", __func__);
4257 +diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.c b/drivers/scsi/mpt2sas/mpt2sas_base.c
4258 +index 12faf64..7227227 100644
4259 +--- a/drivers/scsi/mpt2sas/mpt2sas_base.c
4260 ++++ b/drivers/scsi/mpt2sas/mpt2sas_base.c
4261 +@@ -2057,9 +2057,9 @@ _base_allocate_memory_pools(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
4262 + /* adjust hba_queue_depth, reply_free_queue_depth,
4263 + * and queue_size
4264 + */
4265 +- ioc->hba_queue_depth -= queue_diff;
4266 +- ioc->reply_free_queue_depth -= queue_diff;
4267 +- queue_size -= queue_diff;
4268 ++ ioc->hba_queue_depth -= (queue_diff / 2);
4269 ++ ioc->reply_free_queue_depth -= (queue_diff / 2);
4270 ++ queue_size = facts->MaxReplyDescriptorPostQueueDepth;
4271 + }
4272 + ioc->reply_post_queue_depth = queue_size;
4273 +
4274 +@@ -3662,6 +3662,11 @@ mpt2sas_base_attach(struct MPT2SAS_ADAPTER *ioc)
4275 + ioc->scsih_cmds.status = MPT2_CMD_NOT_USED;
4276 + mutex_init(&ioc->scsih_cmds.mutex);
4277 +
4278 ++ /* scsih internal command bits */
4279 ++ ioc->scsih_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
4280 ++ ioc->scsih_cmds.status = MPT2_CMD_NOT_USED;
4281 ++ mutex_init(&ioc->scsih_cmds.mutex);
4282 ++
4283 + /* task management internal command bits */
4284 + ioc->tm_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
4285 + ioc->tm_cmds.status = MPT2_CMD_NOT_USED;
4286 +@@ -3786,6 +3791,8 @@ mpt2sas_base_detach(struct MPT2SAS_ADAPTER *ioc)
4287 + static void
4288 + _base_reset_handler(struct MPT2SAS_ADAPTER *ioc, int reset_phase)
4289 + {
4290 ++ mpt2sas_scsih_reset_handler(ioc, reset_phase);
4291 ++ mpt2sas_ctl_reset_handler(ioc, reset_phase);
4292 + switch (reset_phase) {
4293 + case MPT2_IOC_PRE_RESET:
4294 + dtmprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: "
4295 +@@ -3816,8 +3823,6 @@ _base_reset_handler(struct MPT2SAS_ADAPTER *ioc, int reset_phase)
4296 + "MPT2_IOC_DONE_RESET\n", ioc->name, __func__));
4297 + break;
4298 + }
4299 +- mpt2sas_scsih_reset_handler(ioc, reset_phase);
4300 +- mpt2sas_ctl_reset_handler(ioc, reset_phase);
4301 + }
4302 +
4303 + /**
4304 +@@ -3871,6 +3876,7 @@ mpt2sas_base_hard_reset_handler(struct MPT2SAS_ADAPTER *ioc, int sleep_flag,
4305 + {
4306 + int r;
4307 + unsigned long flags;
4308 ++ u8 pe_complete = ioc->wait_for_port_enable_to_complete;
4309 +
4310 + dtmprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: enter\n", ioc->name,
4311 + __func__));
4312 +@@ -3913,6 +3919,14 @@ mpt2sas_base_hard_reset_handler(struct MPT2SAS_ADAPTER *ioc, int sleep_flag,
4313 + if (r)
4314 + goto out;
4315 + _base_reset_handler(ioc, MPT2_IOC_AFTER_RESET);
4316 ++
4317 ++ /* If this hard reset is called while port enable is active, then
4318 ++ * there is no reason to call make_ioc_operational
4319 ++ */
4320 ++ if (pe_complete) {
4321 ++ r = -EFAULT;
4322 ++ goto out;
4323 ++ }
4324 + r = _base_make_ioc_operational(ioc, sleep_flag);
4325 + if (!r)
4326 + _base_reset_handler(ioc, MPT2_IOC_DONE_RESET);
4327 +diff --git a/drivers/scsi/mpt2sas/mpt2sas_scsih.c b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
4328 +index 1a96a00..90bc21e 100644
4329 +--- a/drivers/scsi/mpt2sas/mpt2sas_scsih.c
4330 ++++ b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
4331 +@@ -819,7 +819,7 @@ _scsih_is_end_device(u32 device_info)
4332 + }
4333 +
4334 + /**
4335 +- * mptscsih_get_scsi_lookup - returns scmd entry
4336 ++ * _scsih_scsi_lookup_get - returns scmd entry
4337 + * @ioc: per adapter object
4338 + * @smid: system request message index
4339 + *
4340 +@@ -832,6 +832,28 @@ _scsih_scsi_lookup_get(struct MPT2SAS_ADAPTER *ioc, u16 smid)
4341 + }
4342 +
4343 + /**
4344 ++ * _scsih_scsi_lookup_get_clear - returns scmd entry
4345 ++ * @ioc: per adapter object
4346 ++ * @smid: system request message index
4347 ++ *
4348 ++ * Returns the smid stored scmd pointer.
4349 ++ * Then will derefrence the stored scmd pointer.
4350 ++ */
4351 ++static inline struct scsi_cmnd *
4352 ++_scsih_scsi_lookup_get_clear(struct MPT2SAS_ADAPTER *ioc, u16 smid)
4353 ++{
4354 ++ unsigned long flags;
4355 ++ struct scsi_cmnd *scmd;
4356 ++
4357 ++ spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
4358 ++ scmd = ioc->scsi_lookup[smid - 1].scmd;
4359 ++ ioc->scsi_lookup[smid - 1].scmd = NULL;
4360 ++ spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
4361 ++
4362 ++ return scmd;
4363 ++}
4364 ++
4365 ++/**
4366 + * _scsih_scsi_lookup_find_by_scmd - scmd lookup
4367 + * @ioc: per adapter object
4368 + * @smid: system request message index
4369 +@@ -2957,9 +2979,6 @@ _scsih_check_topo_delete_events(struct MPT2SAS_ADAPTER *ioc,
4370 + u16 handle;
4371 +
4372 + for (i = 0 ; i < event_data->NumEntries; i++) {
4373 +- if (event_data->PHY[i].PhyStatus &
4374 +- MPI2_EVENT_SAS_TOPO_PHYSTATUS_VACANT)
4375 +- continue;
4376 + handle = le16_to_cpu(event_data->PHY[i].AttachedDevHandle);
4377 + if (!handle)
4378 + continue;
4379 +@@ -3186,7 +3205,7 @@ _scsih_flush_running_cmds(struct MPT2SAS_ADAPTER *ioc)
4380 + u16 count = 0;
4381 +
4382 + for (smid = 1; smid <= ioc->scsiio_depth; smid++) {
4383 +- scmd = _scsih_scsi_lookup_get(ioc, smid);
4384 ++ scmd = _scsih_scsi_lookup_get_clear(ioc, smid);
4385 + if (!scmd)
4386 + continue;
4387 + count++;
4388 +@@ -3780,7 +3799,7 @@ _scsih_io_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
4389 + u32 response_code = 0;
4390 +
4391 + mpi_reply = mpt2sas_base_get_reply_virt_addr(ioc, reply);
4392 +- scmd = _scsih_scsi_lookup_get(ioc, smid);
4393 ++ scmd = _scsih_scsi_lookup_get_clear(ioc, smid);
4394 + if (scmd == NULL)
4395 + return 1;
4396 +
4397 +@@ -4942,6 +4961,12 @@ _scsih_sas_device_status_change_event(struct MPT2SAS_ADAPTER *ioc,
4398 + event_data);
4399 + #endif
4400 +
4401 ++ /* In MPI Revision K (0xC), the internal device reset complete was
4402 ++ * implemented, so avoid setting tm_busy flag for older firmware.
4403 ++ */
4404 ++ if ((ioc->facts.HeaderVersion >> 8) < 0xC)
4405 ++ return;
4406 ++
4407 + if (event_data->ReasonCode !=
4408 + MPI2_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET &&
4409 + event_data->ReasonCode !=
4410 +@@ -5036,6 +5061,7 @@ _scsih_sas_broadcast_primative_event(struct MPT2SAS_ADAPTER *ioc,
4411 + struct fw_event_work *fw_event)
4412 + {
4413 + struct scsi_cmnd *scmd;
4414 ++ struct scsi_device *sdev;
4415 + u16 smid, handle;
4416 + u32 lun;
4417 + struct MPT2SAS_DEVICE *sas_device_priv_data;
4418 +@@ -5046,12 +5072,17 @@ _scsih_sas_broadcast_primative_event(struct MPT2SAS_ADAPTER *ioc,
4419 + Mpi2EventDataSasBroadcastPrimitive_t *event_data = fw_event->event_data;
4420 + #endif
4421 + u16 ioc_status;
4422 ++ unsigned long flags;
4423 ++ int r;
4424 ++
4425 + dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "broadcast primative: "
4426 + "phy number(%d), width(%d)\n", ioc->name, event_data->PhyNum,
4427 + event_data->PortWidth));
4428 + dtmprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: enter\n", ioc->name,
4429 + __func__));
4430 +
4431 ++ spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
4432 ++ ioc->broadcast_aen_busy = 0;
4433 + termination_count = 0;
4434 + query_count = 0;
4435 + mpi_reply = ioc->tm_cmds.reply;
4436 +@@ -5059,7 +5090,8 @@ _scsih_sas_broadcast_primative_event(struct MPT2SAS_ADAPTER *ioc,
4437 + scmd = _scsih_scsi_lookup_get(ioc, smid);
4438 + if (!scmd)
4439 + continue;
4440 +- sas_device_priv_data = scmd->device->hostdata;
4441 ++ sdev = scmd->device;
4442 ++ sas_device_priv_data = sdev->hostdata;
4443 + if (!sas_device_priv_data || !sas_device_priv_data->sas_target)
4444 + continue;
4445 + /* skip hidden raid components */
4446 +@@ -5075,6 +5107,7 @@ _scsih_sas_broadcast_primative_event(struct MPT2SAS_ADAPTER *ioc,
4447 + lun = sas_device_priv_data->lun;
4448 + query_count++;
4449 +
4450 ++ spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
4451 + mpt2sas_scsih_issue_tm(ioc, handle, 0, 0, lun,
4452 + MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK, smid, 30, NULL);
4453 + ioc->tm_cmds.status = MPT2_CMD_NOT_USED;
4454 +@@ -5084,14 +5117,20 @@ _scsih_sas_broadcast_primative_event(struct MPT2SAS_ADAPTER *ioc,
4455 + (mpi_reply->ResponseCode ==
4456 + MPI2_SCSITASKMGMT_RSP_TM_SUCCEEDED ||
4457 + mpi_reply->ResponseCode ==
4458 +- MPI2_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC))
4459 ++ MPI2_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC)) {
4460 ++ spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
4461 + continue;
4462 +-
4463 +- mpt2sas_scsih_issue_tm(ioc, handle, 0, 0, lun,
4464 +- MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET, 0, 30, NULL);
4465 ++ }
4466 ++ r = mpt2sas_scsih_issue_tm(ioc, handle, sdev->channel, sdev->id,
4467 ++ sdev->lun, MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK, smid, 30,
4468 ++ scmd);
4469 ++ if (r == FAILED)
4470 ++ sdev_printk(KERN_WARNING, sdev, "task abort: FAILED "
4471 ++ "scmd(%p)\n", scmd);
4472 + termination_count += le32_to_cpu(mpi_reply->TerminationCount);
4473 ++ spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
4474 + }
4475 +- ioc->broadcast_aen_busy = 0;
4476 ++ spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
4477 +
4478 + dtmprintk(ioc, printk(MPT2SAS_INFO_FMT
4479 + "%s - exit, query_count = %d termination_count = %d\n",
4480 +@@ -6687,6 +6726,7 @@ _scsih_remove(struct pci_dev *pdev)
4481 + destroy_workqueue(wq);
4482 +
4483 + /* release all the volumes */
4484 ++ _scsih_ir_shutdown(ioc);
4485 + list_for_each_entry_safe(raid_device, next, &ioc->raid_device_list,
4486 + list) {
4487 + if (raid_device->starget) {
4488 +diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
4489 +index 9564961..9c9e6d3 100644
4490 +--- a/drivers/scsi/sd.c
4491 ++++ b/drivers/scsi/sd.c
4492 +@@ -1175,6 +1175,12 @@ static unsigned int sd_completed_bytes(struct scsi_cmnd *scmd)
4493 + u64 end_lba = blk_rq_pos(scmd->request) + (scsi_bufflen(scmd) / 512);
4494 + u64 bad_lba;
4495 + int info_valid;
4496 ++ /*
4497 ++ * resid is optional but mostly filled in. When it's unused,
4498 ++ * its value is zero, so we assume the whole buffer transferred
4499 ++ */
4500 ++ unsigned int transferred = scsi_bufflen(scmd) - scsi_get_resid(scmd);
4501 ++ unsigned int good_bytes;
4502 +
4503 + if (scmd->request->cmd_type != REQ_TYPE_FS)
4504 + return 0;
4505 +@@ -1208,7 +1214,8 @@ static unsigned int sd_completed_bytes(struct scsi_cmnd *scmd)
4506 + /* This computation should always be done in terms of
4507 + * the resolution of the device's medium.
4508 + */
4509 +- return (bad_lba - start_lba) * scmd->device->sector_size;
4510 ++ good_bytes = (bad_lba - start_lba) * scmd->device->sector_size;
4511 ++ return min(good_bytes, transferred);
4512 + }
4513 +
4514 + /**
4515 +diff --git a/drivers/serial/68360serial.c b/drivers/serial/68360serial.c
4516 +index 88b1335..bc21eea 100644
4517 +--- a/drivers/serial/68360serial.c
4518 ++++ b/drivers/serial/68360serial.c
4519 +@@ -2428,6 +2428,7 @@ static const struct tty_operations rs_360_ops = {
4520 + /* .read_proc = rs_360_read_proc, */
4521 + .tiocmget = rs_360_tiocmget,
4522 + .tiocmset = rs_360_tiocmset,
4523 ++ .get_icount = rs_360_get_icount,
4524 + };
4525 +
4526 + static int __init rs_360_init(void)
4527 +diff --git a/drivers/serial/8250.c b/drivers/serial/8250.c
4528 +index 09a5508..b2b40f91 100644
4529 +--- a/drivers/serial/8250.c
4530 ++++ b/drivers/serial/8250.c
4531 +@@ -236,7 +236,8 @@ static const struct serial8250_config uart_config[] = {
4532 + .fifo_size = 128,
4533 + .tx_loadsz = 128,
4534 + .fcr = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_10,
4535 +- .flags = UART_CAP_FIFO | UART_CAP_EFR | UART_CAP_SLEEP,
4536 ++ /* UART_CAP_EFR breaks billionon CF bluetooth card. */
4537 ++ .flags = UART_CAP_FIFO | UART_CAP_SLEEP,
4538 + },
4539 + [PORT_16654] = {
4540 + .name = "ST16654",
4541 +diff --git a/drivers/ssb/pcmcia.c b/drivers/ssb/pcmcia.c
4542 +index c7345db..f853379 100644
4543 +--- a/drivers/ssb/pcmcia.c
4544 ++++ b/drivers/ssb/pcmcia.c
4545 +@@ -733,7 +733,7 @@ int ssb_pcmcia_get_invariants(struct ssb_bus *bus,
4546 +
4547 + /* Fetch the vendor specific tuples. */
4548 + res = pcmcia_loop_tuple(bus->host_pcmcia, SSB_PCMCIA_CIS,
4549 +- ssb_pcmcia_do_get_invariants, sprom);
4550 ++ ssb_pcmcia_do_get_invariants, iv);
4551 + if ((res == 0) || (res == -ENOSPC))
4552 + return 0;
4553 +
4554 +diff --git a/drivers/staging/bcm/InterfaceInit.c b/drivers/staging/bcm/InterfaceInit.c
4555 +index 824f9a4..dc760ef 100644
4556 +--- a/drivers/staging/bcm/InterfaceInit.c
4557 ++++ b/drivers/staging/bcm/InterfaceInit.c
4558 +@@ -4,6 +4,7 @@ static struct usb_device_id InterfaceUsbtable[] = {
4559 + { USB_DEVICE(BCM_USB_VENDOR_ID_T3, BCM_USB_PRODUCT_ID_T3) },
4560 + { USB_DEVICE(BCM_USB_VENDOR_ID_T3, BCM_USB_PRODUCT_ID_T3B) },
4561 + { USB_DEVICE(BCM_USB_VENDOR_ID_T3, BCM_USB_PRODUCT_ID_T3L) },
4562 ++ { USB_DEVICE(BCM_USB_VENDOR_ID_T3, BCM_USB_PRODUCT_ID_SM250) },
4563 + { USB_DEVICE(BCM_USB_VENDOR_ID_ZTE, BCM_USB_PRODUCT_ID_226) },
4564 + { USB_DEVICE(BCM_USB_VENDOR_ID_FOXCONN, BCM_USB_PRODUCT_ID_1901) },
4565 + {}
4566 +diff --git a/drivers/staging/bcm/InterfaceInit.h b/drivers/staging/bcm/InterfaceInit.h
4567 +index e7a96e5..6c21625 100644
4568 +--- a/drivers/staging/bcm/InterfaceInit.h
4569 ++++ b/drivers/staging/bcm/InterfaceInit.h
4570 +@@ -8,6 +8,7 @@
4571 + #define BCM_USB_PRODUCT_ID_T3 0x0300
4572 + #define BCM_USB_PRODUCT_ID_T3B 0x0210
4573 + #define BCM_USB_PRODUCT_ID_T3L 0x0220
4574 ++#define BCM_USB_PRODUCT_ID_SM250 0xbccd
4575 + #define BCM_USB_PRODUCT_ID_SYM 0x15E
4576 + #define BCM_USB_PRODUCT_ID_1901 0xe017
4577 + #define BCM_USB_PRODUCT_ID_226 0x0132
4578 +diff --git a/drivers/staging/brcm80211/sys/wl_mac80211.c b/drivers/staging/brcm80211/sys/wl_mac80211.c
4579 +index d060377..cb2041a 100644
4580 +--- a/drivers/staging/brcm80211/sys/wl_mac80211.c
4581 ++++ b/drivers/staging/brcm80211/sys/wl_mac80211.c
4582 +@@ -299,11 +299,8 @@ static void wl_ops_stop(struct ieee80211_hw *hw)
4583 + wl_info_t *wl = hw->priv;
4584 + ASSERT(wl);
4585 + WL_LOCK(wl);
4586 +- wl_down(wl);
4587 + ieee80211_stop_queues(hw);
4588 + WL_UNLOCK(wl);
4589 +-
4590 +- return;
4591 + }
4592 +
4593 + static int
4594 +@@ -336,7 +333,14 @@ wl_ops_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
4595 + static void
4596 + wl_ops_remove_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
4597 + {
4598 +- return;
4599 ++ struct wl_info *wl;
4600 ++
4601 ++ wl = HW_TO_WL(hw);
4602 ++
4603 ++ /* put driver in down state */
4604 ++ WL_LOCK(wl);
4605 ++ wl_down(wl);
4606 ++ WL_UNLOCK(wl);
4607 + }
4608 +
4609 + static int
4610 +@@ -972,7 +976,7 @@ static wl_info_t *wl_attach(u16 vendor, u16 device, unsigned long regs,
4611 + wl_found++;
4612 + return wl;
4613 +
4614 +- fail:
4615 ++fail:
4616 + wl_free(wl);
4617 + fail1:
4618 + return NULL;
4619 +@@ -1356,7 +1360,6 @@ wl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
4620 + return 0;
4621 + }
4622 +
4623 +-#ifdef LINUXSTA_PS
4624 + static int wl_suspend(struct pci_dev *pdev, pm_message_t state)
4625 + {
4626 + wl_info_t *wl;
4627 +@@ -1371,11 +1374,12 @@ static int wl_suspend(struct pci_dev *pdev, pm_message_t state)
4628 + return -ENODEV;
4629 + }
4630 +
4631 ++ /* only need to flag hw is down for proper resume */
4632 + WL_LOCK(wl);
4633 +- wl_down(wl);
4634 + wl->pub->hw_up = false;
4635 + WL_UNLOCK(wl);
4636 +- pci_save_state(pdev, wl->pci_psstate);
4637 ++
4638 ++ pci_save_state(pdev);
4639 + pci_disable_device(pdev);
4640 + return pci_set_power_state(pdev, PCI_D3hot);
4641 + }
4642 +@@ -1399,7 +1403,7 @@ static int wl_resume(struct pci_dev *pdev)
4643 + if (err)
4644 + return err;
4645 +
4646 +- pci_restore_state(pdev, wl->pci_psstate);
4647 ++ pci_restore_state(pdev);
4648 +
4649 + err = pci_enable_device(pdev);
4650 + if (err)
4651 +@@ -1411,13 +1415,12 @@ static int wl_resume(struct pci_dev *pdev)
4652 + if ((val & 0x0000ff00) != 0)
4653 + pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
4654 +
4655 +- WL_LOCK(wl);
4656 +- err = wl_up(wl);
4657 +- WL_UNLOCK(wl);
4658 +-
4659 ++ /*
4660 ++ * done. driver will be put in up state
4661 ++ * in wl_ops_add_interface() call.
4662 ++ */
4663 + return err;
4664 + }
4665 +-#endif /* LINUXSTA_PS */
4666 +
4667 + static void wl_remove(struct pci_dev *pdev)
4668 + {
4669 +@@ -1450,14 +1453,12 @@ static void wl_remove(struct pci_dev *pdev)
4670 + }
4671 +
4672 + static struct pci_driver wl_pci_driver = {
4673 +- .name = "brcm80211",
4674 +- .probe = wl_pci_probe,
4675 +-#ifdef LINUXSTA_PS
4676 +- .suspend = wl_suspend,
4677 +- .resume = wl_resume,
4678 +-#endif /* LINUXSTA_PS */
4679 +- .remove = __devexit_p(wl_remove),
4680 +- .id_table = wl_id_table,
4681 ++ .name = "brcm80211",
4682 ++ .probe = wl_pci_probe,
4683 ++ .suspend = wl_suspend,
4684 ++ .resume = wl_resume,
4685 ++ .remove = __devexit_p(wl_remove),
4686 ++ .id_table = wl_id_table,
4687 + };
4688 + #endif /* !BCMSDIO */
4689 +
4690 +diff --git a/drivers/staging/comedi/drivers/jr3_pci.c b/drivers/staging/comedi/drivers/jr3_pci.c
4691 +index 8b383ee..5c6c727 100644
4692 +--- a/drivers/staging/comedi/drivers/jr3_pci.c
4693 ++++ b/drivers/staging/comedi/drivers/jr3_pci.c
4694 +@@ -54,6 +54,7 @@ Devices: [JR3] PCI force sensor board (jr3_pci)
4695 +
4696 + #define PCI_VENDOR_ID_JR3 0x1762
4697 + #define PCI_DEVICE_ID_JR3_1_CHANNEL 0x3111
4698 ++#define PCI_DEVICE_ID_JR3_1_CHANNEL_NEW 0x1111
4699 + #define PCI_DEVICE_ID_JR3_2_CHANNEL 0x3112
4700 + #define PCI_DEVICE_ID_JR3_3_CHANNEL 0x3113
4701 + #define PCI_DEVICE_ID_JR3_4_CHANNEL 0x3114
4702 +@@ -73,6 +74,8 @@ static DEFINE_PCI_DEVICE_TABLE(jr3_pci_pci_table) = {
4703 + {
4704 + PCI_VENDOR_ID_JR3, PCI_DEVICE_ID_JR3_1_CHANNEL,
4705 + PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {
4706 ++ PCI_VENDOR_ID_JR3, PCI_DEVICE_ID_JR3_1_CHANNEL_NEW,
4707 ++ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {
4708 + PCI_VENDOR_ID_JR3, PCI_DEVICE_ID_JR3_2_CHANNEL,
4709 + PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {
4710 + PCI_VENDOR_ID_JR3, PCI_DEVICE_ID_JR3_3_CHANNEL,
4711 +@@ -807,6 +810,10 @@ static int jr3_pci_attach(struct comedi_device *dev,
4712 + devpriv->n_channels = 1;
4713 + }
4714 + break;
4715 ++ case PCI_DEVICE_ID_JR3_1_CHANNEL_NEW:{
4716 ++ devpriv->n_channels = 1;
4717 ++ }
4718 ++ break;
4719 + case PCI_DEVICE_ID_JR3_2_CHANNEL:{
4720 + devpriv->n_channels = 2;
4721 + }
4722 +diff --git a/drivers/staging/comedi/drivers/ni_labpc.c b/drivers/staging/comedi/drivers/ni_labpc.c
4723 +index 4d1868d..0728c3c 100644
4724 +--- a/drivers/staging/comedi/drivers/ni_labpc.c
4725 ++++ b/drivers/staging/comedi/drivers/ni_labpc.c
4726 +@@ -575,7 +575,8 @@ int labpc_common_attach(struct comedi_device *dev, unsigned long iobase,
4727 + /* grab our IRQ */
4728 + if (irq) {
4729 + isr_flags = 0;
4730 +- if (thisboard->bustype == pci_bustype)
4731 ++ if (thisboard->bustype == pci_bustype
4732 ++ || thisboard->bustype == pcmcia_bustype)
4733 + isr_flags |= IRQF_SHARED;
4734 + if (request_irq(irq, labpc_interrupt, isr_flags,
4735 + driver_labpc.driver_name, dev)) {
4736 +diff --git a/drivers/staging/hv/blkvsc_drv.c b/drivers/staging/hv/blkvsc_drv.c
4737 +index 3f81ca5..b255c8b 100644
4738 +--- a/drivers/staging/hv/blkvsc_drv.c
4739 ++++ b/drivers/staging/hv/blkvsc_drv.c
4740 +@@ -368,6 +368,7 @@ static int blkvsc_probe(struct device *device)
4741 + blkdev->gd->first_minor = 0;
4742 + blkdev->gd->fops = &block_ops;
4743 + blkdev->gd->private_data = blkdev;
4744 ++ blkdev->gd->driverfs_dev = &(blkdev->device_ctx->device);
4745 + sprintf(blkdev->gd->disk_name, "hd%c", 'a' + devnum);
4746 +
4747 + blkvsc_do_inquiry(blkdev);
4748 +diff --git a/drivers/staging/hv/netvsc.c b/drivers/staging/hv/netvsc.c
4749 +index 4c2632c..d7dcc66 100644
4750 +--- a/drivers/staging/hv/netvsc.c
4751 ++++ b/drivers/staging/hv/netvsc.c
4752 +@@ -1236,7 +1236,7 @@ static void NetVscOnChannelCallback(void *Context)
4753 + /* ASSERT(device); */
4754 +
4755 + packet = kzalloc(NETVSC_PACKET_SIZE * sizeof(unsigned char),
4756 +- GFP_KERNEL);
4757 ++ GFP_ATOMIC);
4758 + if (!packet)
4759 + return;
4760 + buffer = packet;
4761 +diff --git a/drivers/staging/hv/netvsc_drv.c b/drivers/staging/hv/netvsc_drv.c
4762 +index 1415352..cb79dff 100644
4763 +--- a/drivers/staging/hv/netvsc_drv.c
4764 ++++ b/drivers/staging/hv/netvsc_drv.c
4765 +@@ -233,6 +233,7 @@ static void netvsc_linkstatus_callback(struct hv_device *device_obj,
4766 + if (status == 1) {
4767 + netif_carrier_on(net);
4768 + netif_wake_queue(net);
4769 ++ netif_notify_peers(net);
4770 + } else {
4771 + netif_carrier_off(net);
4772 + netif_stop_queue(net);
4773 +@@ -355,7 +356,6 @@ static int netvsc_probe(struct device *device)
4774 +
4775 + /* Set initial state */
4776 + netif_carrier_off(net);
4777 +- netif_stop_queue(net);
4778 +
4779 + net_device_ctx = netdev_priv(net);
4780 + net_device_ctx->device_ctx = device_ctx;
4781 +diff --git a/drivers/staging/iio/adc/ad7476_core.c b/drivers/staging/iio/adc/ad7476_core.c
4782 +index deb68c8..b8b54da 100644
4783 +--- a/drivers/staging/iio/adc/ad7476_core.c
4784 ++++ b/drivers/staging/iio/adc/ad7476_core.c
4785 +@@ -68,7 +68,7 @@ static ssize_t ad7476_show_scale(struct device *dev,
4786 + /* Corresponds to Vref / 2^(bits) */
4787 + unsigned int scale_uv = (st->int_vref_mv * 1000) >> st->chip_info->bits;
4788 +
4789 +- return sprintf(buf, "%d.%d\n", scale_uv / 1000, scale_uv % 1000);
4790 ++ return sprintf(buf, "%d.%03d\n", scale_uv / 1000, scale_uv % 1000);
4791 + }
4792 + static IIO_DEVICE_ATTR(in_scale, S_IRUGO, ad7476_show_scale, NULL, 0);
4793 +
4794 +diff --git a/drivers/staging/iio/adc/ad799x_core.c b/drivers/staging/iio/adc/ad799x_core.c
4795 +index 6309d52..89ccf37 100644
4796 +--- a/drivers/staging/iio/adc/ad799x_core.c
4797 ++++ b/drivers/staging/iio/adc/ad799x_core.c
4798 +@@ -432,7 +432,7 @@ static ssize_t ad799x_show_scale(struct device *dev,
4799 + /* Corresponds to Vref / 2^(bits) */
4800 + unsigned int scale_uv = (st->int_vref_mv * 1000) >> st->chip_info->bits;
4801 +
4802 +- return sprintf(buf, "%d.%d\n", scale_uv / 1000, scale_uv % 1000);
4803 ++ return sprintf(buf, "%d.%03d\n", scale_uv / 1000, scale_uv % 1000);
4804 + }
4805 +
4806 + static IIO_DEVICE_ATTR(in_scale, S_IRUGO, ad799x_show_scale, NULL, 0);
4807 +diff --git a/drivers/staging/rt2860/chips/rt3090.c b/drivers/staging/rt2860/chips/rt3090.c
4808 +index c2933c6..cbc59f8 100644
4809 +--- a/drivers/staging/rt2860/chips/rt3090.c
4810 ++++ b/drivers/staging/rt2860/chips/rt3090.c
4811 +@@ -51,7 +51,8 @@ void NICInitRT3090RFRegisters(struct rt_rtmp_adapter *pAd)
4812 + if (IS_RT3090(pAd)) {
4813 + /* Init RF calibration */
4814 + /* Driver should toggle RF R30 bit7 before init RF registers */
4815 +- u32 RfReg = 0, data;
4816 ++ u8 RfReg;
4817 ++ u32 data;
4818 +
4819 + RT30xxReadRFRegister(pAd, RF_R30, (u8 *)&RfReg);
4820 + RfReg |= 0x80;
4821 +diff --git a/drivers/staging/rt2860/chips/rt30xx.c b/drivers/staging/rt2860/chips/rt30xx.c
4822 +index 4367a19..88eba51 100644
4823 +--- a/drivers/staging/rt2860/chips/rt30xx.c
4824 ++++ b/drivers/staging/rt2860/chips/rt30xx.c
4825 +@@ -53,7 +53,7 @@ struct rt_reg_pair RT30xx_RFRegTable[] = {
4826 + ,
4827 + {RF_R06, 0x02}
4828 + ,
4829 +- {RF_R07, 0x70}
4830 ++ {RF_R07, 0x60}
4831 + ,
4832 + {RF_R09, 0x0F}
4833 + ,
4834 +@@ -441,7 +441,7 @@ void RT30xxReverseRFSleepModeSetup(struct rt_rtmp_adapter *pAd)
4835 +
4836 + /* VCO_IC, RF R7 register Bit 4 & Bit 5 to 1 */
4837 + RT30xxReadRFRegister(pAd, RF_R07, &RFValue);
4838 +- RFValue |= 0x30;
4839 ++ RFValue |= 0x20;
4840 + RT30xxWriteRFRegister(pAd, RF_R07, RFValue);
4841 +
4842 + /* Idoh, RF R9 register Bit 1, Bit 2 & Bit 3 to 1 */
4843 +diff --git a/drivers/staging/rt2860/rt_main_dev.c b/drivers/staging/rt2860/rt_main_dev.c
4844 +index ad60cea..caf8b76 100644
4845 +--- a/drivers/staging/rt2860/rt_main_dev.c
4846 ++++ b/drivers/staging/rt2860/rt_main_dev.c
4847 +@@ -483,8 +483,6 @@ struct net_device *RtmpPhyNetDevInit(struct rt_rtmp_adapter *pAd,
4848 + net_dev->ml_priv = (void *)pAd;
4849 + pAd->net_dev = net_dev;
4850 +
4851 +- netif_stop_queue(net_dev);
4852 +-
4853 + return net_dev;
4854 +
4855 + }
4856 +diff --git a/drivers/staging/rt2860/usb_main_dev.c b/drivers/staging/rt2860/usb_main_dev.c
4857 +index cd15daa..aca0c46 100644
4858 +--- a/drivers/staging/rt2860/usb_main_dev.c
4859 ++++ b/drivers/staging/rt2860/usb_main_dev.c
4860 +@@ -106,6 +106,7 @@ struct usb_device_id rtusb_usb_id[] = {
4861 + {USB_DEVICE(0x0411, 0x016f)}, /* MelCo.,Inc. WLI-UC-G301N */
4862 + {USB_DEVICE(0x1737, 0x0070)}, /* Linksys WUSB100 */
4863 + {USB_DEVICE(0x1737, 0x0071)}, /* Linksys WUSB600N */
4864 ++ {USB_DEVICE(0x1737, 0x0078)}, /* Linksys WUSB100v2 */
4865 + {USB_DEVICE(0x0411, 0x00e8)}, /* Buffalo WLI-UC-G300N */
4866 + {USB_DEVICE(0x050d, 0x815c)}, /* Belkin F5D8053 */
4867 + {USB_DEVICE(0x100D, 0x9031)}, /* Motorola 2770 */
4868 +diff --git a/drivers/staging/rtl8712/usb_intf.c b/drivers/staging/rtl8712/usb_intf.c
4869 +index f1f0c63..e2b2cd9 100644
4870 +--- a/drivers/staging/rtl8712/usb_intf.c
4871 ++++ b/drivers/staging/rtl8712/usb_intf.c
4872 +@@ -47,54 +47,123 @@ static int r871xu_drv_init(struct usb_interface *pusb_intf,
4873 + static void r871xu_dev_remove(struct usb_interface *pusb_intf);
4874 +
4875 + static struct usb_device_id rtl871x_usb_id_tbl[] = {
4876 +- /*92SU
4877 +- * Realtek */
4878 +- {USB_DEVICE(0x0bda, 0x8171)},
4879 +- {USB_DEVICE(0x0bda, 0x8172)},
4880 ++
4881 ++/* RTL8188SU */
4882 ++ /* Realtek */
4883 ++ {USB_DEVICE(0x0BDA, 0x8171)},
4884 + {USB_DEVICE(0x0bda, 0x8173)},
4885 +- {USB_DEVICE(0x0bda, 0x8174)},
4886 + {USB_DEVICE(0x0bda, 0x8712)},
4887 + {USB_DEVICE(0x0bda, 0x8713)},
4888 + {USB_DEVICE(0x0bda, 0xC512)},
4889 +- /* Abocom */
4890 ++ /* Abocom */
4891 + {USB_DEVICE(0x07B8, 0x8188)},
4892 ++ /* ASUS */
4893 ++ {USB_DEVICE(0x0B05, 0x1786)},
4894 ++ {USB_DEVICE(0x0B05, 0x1791)}, /* 11n mode disable */
4895 ++ /* Belkin */
4896 ++ {USB_DEVICE(0x050D, 0x945A)},
4897 + /* Corega */
4898 +- {USB_DEVICE(0x07aa, 0x0047)},
4899 +- /* Dlink */
4900 +- {USB_DEVICE(0x07d1, 0x3303)},
4901 +- {USB_DEVICE(0x07d1, 0x3302)},
4902 +- {USB_DEVICE(0x07d1, 0x3300)},
4903 +- /* Dlink for Skyworth */
4904 +- {USB_DEVICE(0x14b2, 0x3300)},
4905 +- {USB_DEVICE(0x14b2, 0x3301)},
4906 +- {USB_DEVICE(0x14b2, 0x3302)},
4907 ++ {USB_DEVICE(0x07AA, 0x0047)},
4908 ++ /* D-Link */
4909 ++ {USB_DEVICE(0x2001, 0x3306)},
4910 ++ {USB_DEVICE(0x07D1, 0x3306)}, /* 11n mode disable */
4911 ++ /* Edimax */
4912 ++ {USB_DEVICE(0x7392, 0x7611)},
4913 + /* EnGenius */
4914 + {USB_DEVICE(0x1740, 0x9603)},
4915 +- {USB_DEVICE(0x1740, 0x9605)},
4916 ++ /* Hawking */
4917 ++ {USB_DEVICE(0x0E66, 0x0016)},
4918 ++ /* Hercules */
4919 ++ {USB_DEVICE(0x06F8, 0xE034)},
4920 ++ {USB_DEVICE(0x06F8, 0xE032)},
4921 ++ /* Logitec */
4922 ++ {USB_DEVICE(0x0789, 0x0167)},
4923 ++ /* PCI */
4924 ++ {USB_DEVICE(0x2019, 0xAB28)},
4925 ++ {USB_DEVICE(0x2019, 0xED16)},
4926 ++ /* Sitecom */
4927 ++ {USB_DEVICE(0x0DF6, 0x0057)},
4928 ++ {USB_DEVICE(0x0DF6, 0x0045)},
4929 ++ {USB_DEVICE(0x0DF6, 0x0059)}, /* 11n mode disable */
4930 ++ {USB_DEVICE(0x0DF6, 0x004B)},
4931 ++ {USB_DEVICE(0x0DF6, 0x0063)},
4932 ++ /* Sweex */
4933 ++ {USB_DEVICE(0x177F, 0x0154)},
4934 ++ /* Thinkware */
4935 ++ {USB_DEVICE(0x0BDA, 0x5077)},
4936 ++ /* Toshiba */
4937 ++ {USB_DEVICE(0x1690, 0x0752)},
4938 ++ /* - */
4939 ++ {USB_DEVICE(0x20F4, 0x646B)},
4940 ++ {USB_DEVICE(0x083A, 0xC512)},
4941 ++
4942 ++/* RTL8191SU */
4943 ++ /* Realtek */
4944 ++ {USB_DEVICE(0x0BDA, 0x8172)},
4945 ++ /* Amigo */
4946 ++ {USB_DEVICE(0x0EB0, 0x9061)},
4947 ++ /* ASUS/EKB */
4948 ++ {USB_DEVICE(0x0BDA, 0x8172)},
4949 ++ {USB_DEVICE(0x13D3, 0x3323)},
4950 ++ {USB_DEVICE(0x13D3, 0x3311)}, /* 11n mode disable */
4951 ++ {USB_DEVICE(0x13D3, 0x3342)},
4952 ++ /* ASUS/EKBLenovo */
4953 ++ {USB_DEVICE(0x13D3, 0x3333)},
4954 ++ {USB_DEVICE(0x13D3, 0x3334)},
4955 ++ {USB_DEVICE(0x13D3, 0x3335)}, /* 11n mode disable */
4956 ++ {USB_DEVICE(0x13D3, 0x3336)}, /* 11n mode disable */
4957 ++ /* ASUS/Media BOX */
4958 ++ {USB_DEVICE(0x13D3, 0x3309)},
4959 + /* Belkin */
4960 +- {USB_DEVICE(0x050d, 0x815F)},
4961 +- {USB_DEVICE(0x050d, 0x945A)},
4962 +- {USB_DEVICE(0x050d, 0x845A)},
4963 +- /* Guillemot */
4964 +- {USB_DEVICE(0x06f8, 0xe031)},
4965 ++ {USB_DEVICE(0x050D, 0x815F)},
4966 ++ /* D-Link */
4967 ++ {USB_DEVICE(0x07D1, 0x3302)},
4968 ++ {USB_DEVICE(0x07D1, 0x3300)},
4969 ++ {USB_DEVICE(0x07D1, 0x3303)},
4970 + /* Edimax */
4971 +- {USB_DEVICE(0x7392, 0x7611)},
4972 + {USB_DEVICE(0x7392, 0x7612)},
4973 +- {USB_DEVICE(0x7392, 0x7622)},
4974 +- /* Sitecom */
4975 +- {USB_DEVICE(0x0DF6, 0x0045)},
4976 ++ /* EnGenius */
4977 ++ {USB_DEVICE(0x1740, 0x9605)},
4978 ++ /* Guillemot */
4979 ++ {USB_DEVICE(0x06F8, 0xE031)},
4980 + /* Hawking */
4981 + {USB_DEVICE(0x0E66, 0x0015)},
4982 +- {USB_DEVICE(0x0E66, 0x0016)},
4983 +- {USB_DEVICE(0x0b05, 0x1786)},
4984 +- {USB_DEVICE(0x0b05, 0x1791)}, /* 11n mode disable */
4985 +-
4986 ++ /* Mediao */
4987 + {USB_DEVICE(0x13D3, 0x3306)},
4988 +- {USB_DEVICE(0x13D3, 0x3309)},
4989 ++ /* PCI */
4990 ++ {USB_DEVICE(0x2019, 0xED18)},
4991 ++ {USB_DEVICE(0x2019, 0x4901)},
4992 ++ /* Sitecom */
4993 ++ {USB_DEVICE(0x0DF6, 0x0058)},
4994 ++ {USB_DEVICE(0x0DF6, 0x0049)},
4995 ++ {USB_DEVICE(0x0DF6, 0x004C)},
4996 ++ {USB_DEVICE(0x0DF6, 0x0064)},
4997 ++ /* Skyworth */
4998 ++ {USB_DEVICE(0x14b2, 0x3300)},
4999 ++ {USB_DEVICE(0x14b2, 0x3301)},
5000 ++ {USB_DEVICE(0x14B2, 0x3302)},
5001 ++ /* - */
5002 ++ {USB_DEVICE(0x04F2, 0xAFF2)},
5003 ++ {USB_DEVICE(0x04F2, 0xAFF5)},
5004 ++ {USB_DEVICE(0x04F2, 0xAFF6)},
5005 ++ {USB_DEVICE(0x13D3, 0x3339)},
5006 ++ {USB_DEVICE(0x13D3, 0x3340)}, /* 11n mode disable */
5007 ++ {USB_DEVICE(0x13D3, 0x3341)}, /* 11n mode disable */
5008 + {USB_DEVICE(0x13D3, 0x3310)},
5009 +- {USB_DEVICE(0x13D3, 0x3311)}, /* 11n mode disable */
5010 + {USB_DEVICE(0x13D3, 0x3325)},
5011 +- {USB_DEVICE(0x083A, 0xC512)},
5012 ++
5013 ++/* RTL8192SU */
5014 ++ /* Realtek */
5015 ++ {USB_DEVICE(0x0BDA, 0x8174)},
5016 ++ {USB_DEVICE(0x0BDA, 0x8174)},
5017 ++ /* Belkin */
5018 ++ {USB_DEVICE(0x050D, 0x845A)},
5019 ++ /* Corega */
5020 ++ {USB_DEVICE(0x07AA, 0x0051)},
5021 ++ /* Edimax */
5022 ++ {USB_DEVICE(0x7392, 0x7622)},
5023 ++ /* NEC */
5024 ++ {USB_DEVICE(0x0409, 0x02B6)},
5025 + {}
5026 + };
5027 +
5028 +@@ -103,8 +172,20 @@ MODULE_DEVICE_TABLE(usb, rtl871x_usb_id_tbl);
5029 + static struct specific_device_id specific_device_id_tbl[] = {
5030 + {.idVendor = 0x0b05, .idProduct = 0x1791,
5031 + .flags = SPEC_DEV_ID_DISABLE_HT},
5032 ++ {.idVendor = 0x0df6, .idProduct = 0x0059,
5033 ++ .flags = SPEC_DEV_ID_DISABLE_HT},
5034 ++ {.idVendor = 0x13d3, .idProduct = 0x3306,
5035 ++ .flags = SPEC_DEV_ID_DISABLE_HT},
5036 + {.idVendor = 0x13D3, .idProduct = 0x3311,
5037 + .flags = SPEC_DEV_ID_DISABLE_HT},
5038 ++ {.idVendor = 0x13d3, .idProduct = 0x3335,
5039 ++ .flags = SPEC_DEV_ID_DISABLE_HT},
5040 ++ {.idVendor = 0x13d3, .idProduct = 0x3336,
5041 ++ .flags = SPEC_DEV_ID_DISABLE_HT},
5042 ++ {.idVendor = 0x13d3, .idProduct = 0x3340,
5043 ++ .flags = SPEC_DEV_ID_DISABLE_HT},
5044 ++ {.idVendor = 0x13d3, .idProduct = 0x3341,
5045 ++ .flags = SPEC_DEV_ID_DISABLE_HT},
5046 + {}
5047 + };
5048 +
5049 +diff --git a/drivers/staging/speakup/kobjects.c b/drivers/staging/speakup/kobjects.c
5050 +index cc79f9e..a082f8d 100644
5051 +--- a/drivers/staging/speakup/kobjects.c
5052 ++++ b/drivers/staging/speakup/kobjects.c
5053 +@@ -332,7 +332,7 @@ static ssize_t silent_store(struct kobject *kobj, struct kobj_attribute *attr,
5054 + unsigned long flags;
5055 +
5056 + len = strlen(buf);
5057 +- if (len > 0 || len < 3) {
5058 ++ if (len > 0 && len < 3) {
5059 + ch = buf[0];
5060 + if (ch == '\n')
5061 + ch = '0';
5062 +diff --git a/drivers/staging/usbip/vhci_hcd.c b/drivers/staging/usbip/vhci_hcd.c
5063 +index 832608d..08bd26a 100644
5064 +--- a/drivers/staging/usbip/vhci_hcd.c
5065 ++++ b/drivers/staging/usbip/vhci_hcd.c
5066 +@@ -799,20 +799,6 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
5067 + spin_unlock_irqrestore(&vdev->priv_lock, flags2);
5068 + }
5069 +
5070 +-
5071 +- if (!vdev->ud.tcp_socket) {
5072 +- /* tcp connection is closed */
5073 +- usbip_uinfo("vhci_hcd: vhci_urb_dequeue() gives back urb %p\n",
5074 +- urb);
5075 +-
5076 +- usb_hcd_unlink_urb_from_ep(hcd, urb);
5077 +-
5078 +- spin_unlock_irqrestore(&the_controller->lock, flags);
5079 +- usb_hcd_giveback_urb(vhci_to_hcd(the_controller), urb,
5080 +- urb->status);
5081 +- spin_lock_irqsave(&the_controller->lock, flags);
5082 +- }
5083 +-
5084 + spin_unlock_irqrestore(&the_controller->lock, flags);
5085 +
5086 + usbip_dbg_vhci_hc("leave\n");
5087 +diff --git a/drivers/staging/zram/zram_drv.c b/drivers/staging/zram/zram_drv.c
5088 +index d0e9e02..080e85f 100644
5089 +--- a/drivers/staging/zram/zram_drv.c
5090 ++++ b/drivers/staging/zram/zram_drv.c
5091 +@@ -227,6 +227,7 @@ static int zram_read(struct zram *zram, struct bio *bio)
5092 +
5093 + if (zram_test_flag(zram, index, ZRAM_ZERO)) {
5094 + handle_zero_page(page);
5095 ++ index++;
5096 + continue;
5097 + }
5098 +
5099 +@@ -235,12 +236,14 @@ static int zram_read(struct zram *zram, struct bio *bio)
5100 + pr_debug("Read before write: sector=%lu, size=%u",
5101 + (ulong)(bio->bi_sector), bio->bi_size);
5102 + /* Do nothing */
5103 ++ index++;
5104 + continue;
5105 + }
5106 +
5107 + /* Page is stored uncompressed since it's incompressible */
5108 + if (unlikely(zram_test_flag(zram, index, ZRAM_UNCOMPRESSED))) {
5109 + handle_uncompressed_page(zram, page, index);
5110 ++ index++;
5111 + continue;
5112 + }
5113 +
5114 +@@ -320,6 +323,7 @@ static int zram_write(struct zram *zram, struct bio *bio)
5115 + mutex_unlock(&zram->lock);
5116 + zram_stat_inc(&zram->stats.pages_zero);
5117 + zram_set_flag(zram, index, ZRAM_ZERO);
5118 ++ index++;
5119 + continue;
5120 + }
5121 +
5122 +diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
5123 +index c5f8e5b..dc74295 100644
5124 +--- a/drivers/tty/n_gsm.c
5125 ++++ b/drivers/tty/n_gsm.c
5126 +@@ -2375,6 +2375,7 @@ static int gsmld_config(struct tty_struct *tty, struct gsm_mux *gsm,
5127 +
5128 + gsm->initiator = c->initiator;
5129 + gsm->mru = c->mru;
5130 ++ gsm->mtu = c->mtu;
5131 + gsm->encoding = c->encapsulation;
5132 + gsm->adaption = c->adaption;
5133 + gsm->n2 = c->n2;
5134 +diff --git a/drivers/tty/sysrq.c b/drivers/tty/sysrq.c
5135 +index c556ed9..81f1395 100644
5136 +--- a/drivers/tty/sysrq.c
5137 ++++ b/drivers/tty/sysrq.c
5138 +@@ -46,7 +46,7 @@
5139 + #include <asm/irq_regs.h>
5140 +
5141 + /* Whether we react on sysrq keys or just ignore them */
5142 +-static int __read_mostly sysrq_enabled = 1;
5143 ++static int __read_mostly sysrq_enabled = SYSRQ_DEFAULT_ENABLE;
5144 + static bool __read_mostly sysrq_always_enabled;
5145 +
5146 + static bool sysrq_on(void)
5147 +@@ -571,6 +571,7 @@ struct sysrq_state {
5148 + unsigned int alt_use;
5149 + bool active;
5150 + bool need_reinject;
5151 ++ bool reinjecting;
5152 + };
5153 +
5154 + static void sysrq_reinject_alt_sysrq(struct work_struct *work)
5155 +@@ -581,6 +582,10 @@ static void sysrq_reinject_alt_sysrq(struct work_struct *work)
5156 + unsigned int alt_code = sysrq->alt_use;
5157 +
5158 + if (sysrq->need_reinject) {
5159 ++ /* we do not want the assignment to be reordered */
5160 ++ sysrq->reinjecting = true;
5161 ++ mb();
5162 ++
5163 + /* Simulate press and release of Alt + SysRq */
5164 + input_inject_event(handle, EV_KEY, alt_code, 1);
5165 + input_inject_event(handle, EV_KEY, KEY_SYSRQ, 1);
5166 +@@ -589,6 +594,9 @@ static void sysrq_reinject_alt_sysrq(struct work_struct *work)
5167 + input_inject_event(handle, EV_KEY, KEY_SYSRQ, 0);
5168 + input_inject_event(handle, EV_KEY, alt_code, 0);
5169 + input_inject_event(handle, EV_SYN, SYN_REPORT, 1);
5170 ++
5171 ++ mb();
5172 ++ sysrq->reinjecting = false;
5173 + }
5174 + }
5175 +
5176 +@@ -599,6 +607,13 @@ static bool sysrq_filter(struct input_handle *handle,
5177 + bool was_active = sysrq->active;
5178 + bool suppress;
5179 +
5180 ++ /*
5181 ++ * Do not filter anything if we are in the process of re-injecting
5182 ++ * Alt+SysRq combination.
5183 ++ */
5184 ++ if (sysrq->reinjecting)
5185 ++ return false;
5186 ++
5187 + switch (type) {
5188 +
5189 + case EV_SYN:
5190 +@@ -629,7 +644,7 @@ static bool sysrq_filter(struct input_handle *handle,
5191 + sysrq->alt_use = sysrq->alt;
5192 + /*
5193 + * If nothing else will be pressed we'll need
5194 +- * to * re-inject Alt-SysRq keysroke.
5195 ++ * to re-inject Alt-SysRq keysroke.
5196 + */
5197 + sysrq->need_reinject = true;
5198 + }
5199 +diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
5200 +index d6ede98..4ab49d4 100644
5201 +--- a/drivers/usb/class/cdc-acm.c
5202 ++++ b/drivers/usb/class/cdc-acm.c
5203 +@@ -1607,6 +1607,7 @@ static const struct usb_device_id acm_ids[] = {
5204 + { NOKIA_PCSUITE_ACM_INFO(0x0154), }, /* Nokia 5800 XpressMusic */
5205 + { NOKIA_PCSUITE_ACM_INFO(0x04ce), }, /* Nokia E90 */
5206 + { NOKIA_PCSUITE_ACM_INFO(0x01d4), }, /* Nokia E55 */
5207 ++ { NOKIA_PCSUITE_ACM_INFO(0x0302), }, /* Nokia N8 */
5208 + { SAMSUNG_PCSUITE_ACM_INFO(0x6651), }, /* Samsung GTi8510 (INNOV8) */
5209 +
5210 + /* NOTE: non-Nokia COMM/ACM/0xff is likely MSFT RNDIS... NOT a modem! */
5211 +diff --git a/drivers/usb/core/hcd-pci.c b/drivers/usb/core/hcd-pci.c
5212 +index 3799573..4de52dc 100644
5213 +--- a/drivers/usb/core/hcd-pci.c
5214 ++++ b/drivers/usb/core/hcd-pci.c
5215 +@@ -406,7 +406,12 @@ static int suspend_common(struct device *dev, bool do_wakeup)
5216 + return retval;
5217 + }
5218 +
5219 +- synchronize_irq(pci_dev->irq);
5220 ++ /* If MSI-X is enabled, the driver will have synchronized all vectors
5221 ++ * in pci_suspend(). If MSI or legacy PCI is enabled, that will be
5222 ++ * synchronized here.
5223 ++ */
5224 ++ if (!hcd->msix_enabled)
5225 ++ synchronize_irq(pci_dev->irq);
5226 +
5227 + /* Downstream ports from this root hub should already be quiesced, so
5228 + * there will be no DMA activity. Now we can shut down the upstream
5229 +diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
5230 +index ced846a..8aa6b51 100644
5231 +--- a/drivers/usb/core/hcd.c
5232 ++++ b/drivers/usb/core/hcd.c
5233 +@@ -1956,7 +1956,6 @@ int hcd_bus_resume(struct usb_device *rhdev, pm_message_t msg)
5234 +
5235 + dev_dbg(&rhdev->dev, "usb %s%s\n",
5236 + (msg.event & PM_EVENT_AUTO ? "auto-" : ""), "resume");
5237 +- clear_bit(HCD_FLAG_WAKEUP_PENDING, &hcd->flags);
5238 + if (!hcd->driver->bus_resume)
5239 + return -ENOENT;
5240 + if (hcd->state == HC_STATE_RUNNING)
5241 +@@ -1964,6 +1963,7 @@ int hcd_bus_resume(struct usb_device *rhdev, pm_message_t msg)
5242 +
5243 + hcd->state = HC_STATE_RESUMING;
5244 + status = hcd->driver->bus_resume(hcd);
5245 ++ clear_bit(HCD_FLAG_WAKEUP_PENDING, &hcd->flags);
5246 + if (status == 0) {
5247 + /* TRSMRCY = 10 msec */
5248 + msleep(10);
5249 +diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
5250 +index 27115b4..31edd87 100644
5251 +--- a/drivers/usb/core/hub.c
5252 ++++ b/drivers/usb/core/hub.c
5253 +@@ -677,6 +677,8 @@ static void hub_init_func3(struct work_struct *ws);
5254 + static void hub_activate(struct usb_hub *hub, enum hub_activation_type type)
5255 + {
5256 + struct usb_device *hdev = hub->hdev;
5257 ++ struct usb_hcd *hcd;
5258 ++ int ret;
5259 + int port1;
5260 + int status;
5261 + bool need_debounce_delay = false;
5262 +@@ -715,6 +717,25 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type)
5263 + usb_autopm_get_interface_no_resume(
5264 + to_usb_interface(hub->intfdev));
5265 + return; /* Continues at init2: below */
5266 ++ } else if (type == HUB_RESET_RESUME) {
5267 ++ /* The internal host controller state for the hub device
5268 ++ * may be gone after a host power loss on system resume.
5269 ++ * Update the device's info so the HW knows it's a hub.
5270 ++ */
5271 ++ hcd = bus_to_hcd(hdev->bus);
5272 ++ if (hcd->driver->update_hub_device) {
5273 ++ ret = hcd->driver->update_hub_device(hcd, hdev,
5274 ++ &hub->tt, GFP_NOIO);
5275 ++ if (ret < 0) {
5276 ++ dev_err(hub->intfdev, "Host not "
5277 ++ "accepting hub info "
5278 ++ "update.\n");
5279 ++ dev_err(hub->intfdev, "LS/FS devices "
5280 ++ "and hubs may not work "
5281 ++ "under this hub\n.");
5282 ++ }
5283 ++ }
5284 ++ hub_power_on(hub, true);
5285 + } else {
5286 + hub_power_on(hub, true);
5287 + }
5288 +@@ -2723,6 +2744,11 @@ hub_port_init (struct usb_hub *hub, struct usb_device *udev, int port1,
5289 + udev->ttport = hdev->ttport;
5290 + } else if (udev->speed != USB_SPEED_HIGH
5291 + && hdev->speed == USB_SPEED_HIGH) {
5292 ++ if (!hub->tt.hub) {
5293 ++ dev_err(&udev->dev, "parent hub has no TT\n");
5294 ++ retval = -EINVAL;
5295 ++ goto fail;
5296 ++ }
5297 + udev->tt = &hub->tt;
5298 + udev->ttport = port1;
5299 + }
5300 +diff --git a/drivers/usb/gadget/printer.c b/drivers/usb/gadget/printer.c
5301 +index 2fc8636..12ff6cf 100644
5302 +--- a/drivers/usb/gadget/printer.c
5303 ++++ b/drivers/usb/gadget/printer.c
5304 +@@ -131,31 +131,31 @@ static struct printer_dev usb_printer_gadget;
5305 + * parameters are in UTF-8 (superset of ASCII's 7 bit characters).
5306 + */
5307 +
5308 +-static ushort __initdata idVendor;
5309 ++static ushort idVendor;
5310 + module_param(idVendor, ushort, S_IRUGO);
5311 + MODULE_PARM_DESC(idVendor, "USB Vendor ID");
5312 +
5313 +-static ushort __initdata idProduct;
5314 ++static ushort idProduct;
5315 + module_param(idProduct, ushort, S_IRUGO);
5316 + MODULE_PARM_DESC(idProduct, "USB Product ID");
5317 +
5318 +-static ushort __initdata bcdDevice;
5319 ++static ushort bcdDevice;
5320 + module_param(bcdDevice, ushort, S_IRUGO);
5321 + MODULE_PARM_DESC(bcdDevice, "USB Device version (BCD)");
5322 +
5323 +-static char *__initdata iManufacturer;
5324 ++static char *iManufacturer;
5325 + module_param(iManufacturer, charp, S_IRUGO);
5326 + MODULE_PARM_DESC(iManufacturer, "USB Manufacturer string");
5327 +
5328 +-static char *__initdata iProduct;
5329 ++static char *iProduct;
5330 + module_param(iProduct, charp, S_IRUGO);
5331 + MODULE_PARM_DESC(iProduct, "USB Product string");
5332 +
5333 +-static char *__initdata iSerialNum;
5334 ++static char *iSerialNum;
5335 + module_param(iSerialNum, charp, S_IRUGO);
5336 + MODULE_PARM_DESC(iSerialNum, "1");
5337 +
5338 +-static char *__initdata iPNPstring;
5339 ++static char *iPNPstring;
5340 + module_param(iPNPstring, charp, S_IRUGO);
5341 + MODULE_PARM_DESC(iPNPstring, "MFG:linux;MDL:g_printer;CLS:PRINTER;SN:1;");
5342 +
5343 +@@ -1596,13 +1596,12 @@ cleanup(void)
5344 + int status;
5345 +
5346 + mutex_lock(&usb_printer_gadget.lock_printer_io);
5347 +- class_destroy(usb_gadget_class);
5348 +- unregister_chrdev_region(g_printer_devno, 2);
5349 +-
5350 + status = usb_gadget_unregister_driver(&printer_driver);
5351 + if (status)
5352 + ERROR(dev, "usb_gadget_unregister_driver %x\n", status);
5353 +
5354 ++ unregister_chrdev_region(g_printer_devno, 2);
5355 ++ class_destroy(usb_gadget_class);
5356 + mutex_unlock(&usb_printer_gadget.lock_printer_io);
5357 + }
5358 + module_exit(cleanup);
5359 +diff --git a/drivers/usb/host/ehci-au1xxx.c b/drivers/usb/host/ehci-au1xxx.c
5360 +index 2baf8a8..a869e3c 100644
5361 +--- a/drivers/usb/host/ehci-au1xxx.c
5362 ++++ b/drivers/usb/host/ehci-au1xxx.c
5363 +@@ -227,8 +227,8 @@ static int ehci_hcd_au1xxx_drv_suspend(struct device *dev)
5364 + * mark HW unaccessible. The PM and USB cores make sure that
5365 + * the root hub is either suspended or stopped.
5366 + */
5367 +- spin_lock_irqsave(&ehci->lock, flags);
5368 + ehci_prepare_ports_for_controller_suspend(ehci, device_may_wakeup(dev));
5369 ++ spin_lock_irqsave(&ehci->lock, flags);
5370 + ehci_writel(ehci, 0, &ehci->regs->intr_enable);
5371 + (void)ehci_readl(ehci, &ehci->regs->intr_enable);
5372 +
5373 +diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c
5374 +index e906280..6716312 100644
5375 +--- a/drivers/usb/host/ehci-hcd.c
5376 ++++ b/drivers/usb/host/ehci-hcd.c
5377 +@@ -114,6 +114,9 @@ MODULE_PARM_DESC(hird, "host initiated resume duration, +1 for each 75us\n");
5378 +
5379 + #define INTR_MASK (STS_IAA | STS_FATAL | STS_PCD | STS_ERR | STS_INT)
5380 +
5381 ++/* for ASPM quirk of ISOC on AMD SB800 */
5382 ++static struct pci_dev *amd_nb_dev;
5383 ++
5384 + /*-------------------------------------------------------------------------*/
5385 +
5386 + #include "ehci.h"
5387 +@@ -529,6 +532,11 @@ static void ehci_stop (struct usb_hcd *hcd)
5388 + spin_unlock_irq (&ehci->lock);
5389 + ehci_mem_cleanup (ehci);
5390 +
5391 ++ if (amd_nb_dev) {
5392 ++ pci_dev_put(amd_nb_dev);
5393 ++ amd_nb_dev = NULL;
5394 ++ }
5395 ++
5396 + #ifdef EHCI_STATS
5397 + ehci_dbg (ehci, "irq normal %ld err %ld reclaim %ld (lost %ld)\n",
5398 + ehci->stats.normal, ehci->stats.error, ehci->stats.reclaim,
5399 +@@ -564,6 +572,8 @@ static int ehci_init(struct usb_hcd *hcd)
5400 + ehci->iaa_watchdog.function = ehci_iaa_watchdog;
5401 + ehci->iaa_watchdog.data = (unsigned long) ehci;
5402 +
5403 ++ hcc_params = ehci_readl(ehci, &ehci->caps->hcc_params);
5404 ++
5405 + /*
5406 + * hw default: 1K periodic list heads, one per frame.
5407 + * periodic_size can shrink by USBCMD update if hcc_params allows.
5408 +@@ -571,11 +581,20 @@ static int ehci_init(struct usb_hcd *hcd)
5409 + ehci->periodic_size = DEFAULT_I_TDPS;
5410 + INIT_LIST_HEAD(&ehci->cached_itd_list);
5411 + INIT_LIST_HEAD(&ehci->cached_sitd_list);
5412 ++
5413 ++ if (HCC_PGM_FRAMELISTLEN(hcc_params)) {
5414 ++ /* periodic schedule size can be smaller than default */
5415 ++ switch (EHCI_TUNE_FLS) {
5416 ++ case 0: ehci->periodic_size = 1024; break;
5417 ++ case 1: ehci->periodic_size = 512; break;
5418 ++ case 2: ehci->periodic_size = 256; break;
5419 ++ default: BUG();
5420 ++ }
5421 ++ }
5422 + if ((retval = ehci_mem_init(ehci, GFP_KERNEL)) < 0)
5423 + return retval;
5424 +
5425 + /* controllers may cache some of the periodic schedule ... */
5426 +- hcc_params = ehci_readl(ehci, &ehci->caps->hcc_params);
5427 + if (HCC_ISOC_CACHE(hcc_params)) // full frame cache
5428 + ehci->i_thresh = 2 + 8;
5429 + else // N microframes cached
5430 +@@ -629,12 +648,6 @@ static int ehci_init(struct usb_hcd *hcd)
5431 + /* periodic schedule size can be smaller than default */
5432 + temp &= ~(3 << 2);
5433 + temp |= (EHCI_TUNE_FLS << 2);
5434 +- switch (EHCI_TUNE_FLS) {
5435 +- case 0: ehci->periodic_size = 1024; break;
5436 +- case 1: ehci->periodic_size = 512; break;
5437 +- case 2: ehci->periodic_size = 256; break;
5438 +- default: BUG();
5439 +- }
5440 + }
5441 + if (HCC_LPM(hcc_params)) {
5442 + /* support link power management EHCI 1.1 addendum */
5443 +diff --git a/drivers/usb/host/ehci-hub.c b/drivers/usb/host/ehci-hub.c
5444 +index 796ea0c..8a515f0 100644
5445 +--- a/drivers/usb/host/ehci-hub.c
5446 ++++ b/drivers/usb/host/ehci-hub.c
5447 +@@ -111,6 +111,7 @@ static void ehci_adjust_port_wakeup_flags(struct ehci_hcd *ehci,
5448 + {
5449 + int port;
5450 + u32 temp;
5451 ++ unsigned long flags;
5452 +
5453 + /* If remote wakeup is enabled for the root hub but disabled
5454 + * for the controller, we must adjust all the port wakeup flags
5455 +@@ -120,6 +121,8 @@ static void ehci_adjust_port_wakeup_flags(struct ehci_hcd *ehci,
5456 + if (!ehci_to_hcd(ehci)->self.root_hub->do_remote_wakeup || do_wakeup)
5457 + return;
5458 +
5459 ++ spin_lock_irqsave(&ehci->lock, flags);
5460 ++
5461 + /* clear phy low-power mode before changing wakeup flags */
5462 + if (ehci->has_hostpc) {
5463 + port = HCS_N_PORTS(ehci->hcs_params);
5464 +@@ -131,7 +134,9 @@ static void ehci_adjust_port_wakeup_flags(struct ehci_hcd *ehci,
5465 + temp = ehci_readl(ehci, hostpc_reg);
5466 + ehci_writel(ehci, temp & ~HOSTPC_PHCD, hostpc_reg);
5467 + }
5468 ++ spin_unlock_irqrestore(&ehci->lock, flags);
5469 + msleep(5);
5470 ++ spin_lock_irqsave(&ehci->lock, flags);
5471 + }
5472 +
5473 + port = HCS_N_PORTS(ehci->hcs_params);
5474 +@@ -170,6 +175,8 @@ static void ehci_adjust_port_wakeup_flags(struct ehci_hcd *ehci,
5475 + /* Does the root hub have a port wakeup pending? */
5476 + if (!suspending && (ehci_readl(ehci, &ehci->regs->status) & STS_PCD))
5477 + usb_hcd_resume_root_hub(ehci_to_hcd(ehci));
5478 ++
5479 ++ spin_unlock_irqrestore(&ehci->lock, flags);
5480 + }
5481 +
5482 + static int ehci_bus_suspend (struct usb_hcd *hcd)
5483 +diff --git a/drivers/usb/host/ehci-pci.c b/drivers/usb/host/ehci-pci.c
5484 +index 655f3c9..331ab20 100644
5485 +--- a/drivers/usb/host/ehci-pci.c
5486 ++++ b/drivers/usb/host/ehci-pci.c
5487 +@@ -41,6 +41,42 @@ static int ehci_pci_reinit(struct ehci_hcd *ehci, struct pci_dev *pdev)
5488 + return 0;
5489 + }
5490 +
5491 ++static int ehci_quirk_amd_hudson(struct ehci_hcd *ehci)
5492 ++{
5493 ++ struct pci_dev *amd_smbus_dev;
5494 ++ u8 rev = 0;
5495 ++
5496 ++ amd_smbus_dev = pci_get_device(PCI_VENDOR_ID_ATI, 0x4385, NULL);
5497 ++ if (amd_smbus_dev) {
5498 ++ pci_read_config_byte(amd_smbus_dev, PCI_REVISION_ID, &rev);
5499 ++ if (rev < 0x40) {
5500 ++ pci_dev_put(amd_smbus_dev);
5501 ++ amd_smbus_dev = NULL;
5502 ++ return 0;
5503 ++ }
5504 ++ } else {
5505 ++ amd_smbus_dev = pci_get_device(PCI_VENDOR_ID_AMD, 0x780b, NULL);
5506 ++ if (!amd_smbus_dev)
5507 ++ return 0;
5508 ++ pci_read_config_byte(amd_smbus_dev, PCI_REVISION_ID, &rev);
5509 ++ if (rev < 0x11 || rev > 0x18) {
5510 ++ pci_dev_put(amd_smbus_dev);
5511 ++ amd_smbus_dev = NULL;
5512 ++ return 0;
5513 ++ }
5514 ++ }
5515 ++
5516 ++ if (!amd_nb_dev)
5517 ++ amd_nb_dev = pci_get_device(PCI_VENDOR_ID_AMD, 0x1510, NULL);
5518 ++
5519 ++ ehci_info(ehci, "QUIRK: Enable exception for AMD Hudson ASPM\n");
5520 ++
5521 ++ pci_dev_put(amd_smbus_dev);
5522 ++ amd_smbus_dev = NULL;
5523 ++
5524 ++ return 1;
5525 ++}
5526 ++
5527 + /* called during probe() after chip reset completes */
5528 + static int ehci_pci_setup(struct usb_hcd *hcd)
5529 + {
5530 +@@ -99,6 +135,9 @@ static int ehci_pci_setup(struct usb_hcd *hcd)
5531 + /* cache this readonly data; minimize chip reads */
5532 + ehci->hcs_params = ehci_readl(ehci, &ehci->caps->hcs_params);
5533 +
5534 ++ if (ehci_quirk_amd_hudson(ehci))
5535 ++ ehci->amd_l1_fix = 1;
5536 ++
5537 + retval = ehci_halt(ehci);
5538 + if (retval)
5539 + return retval;
5540 +@@ -321,8 +360,8 @@ static int ehci_pci_suspend(struct usb_hcd *hcd, bool do_wakeup)
5541 + * mark HW unaccessible. The PM and USB cores make sure that
5542 + * the root hub is either suspended or stopped.
5543 + */
5544 +- spin_lock_irqsave (&ehci->lock, flags);
5545 + ehci_prepare_ports_for_controller_suspend(ehci, do_wakeup);
5546 ++ spin_lock_irqsave (&ehci->lock, flags);
5547 + ehci_writel(ehci, 0, &ehci->regs->intr_enable);
5548 + (void)ehci_readl(ehci, &ehci->regs->intr_enable);
5549 +
5550 +diff --git a/drivers/usb/host/ehci-sched.c b/drivers/usb/host/ehci-sched.c
5551 +index d9f78eb..aa46f57 100644
5552 +--- a/drivers/usb/host/ehci-sched.c
5553 ++++ b/drivers/usb/host/ehci-sched.c
5554 +@@ -1590,6 +1590,63 @@ itd_link (struct ehci_hcd *ehci, unsigned frame, struct ehci_itd *itd)
5555 + *hw_p = cpu_to_hc32(ehci, itd->itd_dma | Q_TYPE_ITD);
5556 + }
5557 +
5558 ++#define AB_REG_BAR_LOW 0xe0
5559 ++#define AB_REG_BAR_HIGH 0xe1
5560 ++#define AB_INDX(addr) ((addr) + 0x00)
5561 ++#define AB_DATA(addr) ((addr) + 0x04)
5562 ++#define NB_PCIE_INDX_ADDR 0xe0
5563 ++#define NB_PCIE_INDX_DATA 0xe4
5564 ++#define NB_PIF0_PWRDOWN_0 0x01100012
5565 ++#define NB_PIF0_PWRDOWN_1 0x01100013
5566 ++
5567 ++static void ehci_quirk_amd_L1(struct ehci_hcd *ehci, int disable)
5568 ++{
5569 ++ u32 addr, addr_low, addr_high, val;
5570 ++
5571 ++ outb_p(AB_REG_BAR_LOW, 0xcd6);
5572 ++ addr_low = inb_p(0xcd7);
5573 ++ outb_p(AB_REG_BAR_HIGH, 0xcd6);
5574 ++ addr_high = inb_p(0xcd7);
5575 ++ addr = addr_high << 8 | addr_low;
5576 ++ outl_p(0x30, AB_INDX(addr));
5577 ++ outl_p(0x40, AB_DATA(addr));
5578 ++ outl_p(0x34, AB_INDX(addr));
5579 ++ val = inl_p(AB_DATA(addr));
5580 ++
5581 ++ if (disable) {
5582 ++ val &= ~0x8;
5583 ++ val |= (1 << 4) | (1 << 9);
5584 ++ } else {
5585 ++ val |= 0x8;
5586 ++ val &= ~((1 << 4) | (1 << 9));
5587 ++ }
5588 ++ outl_p(val, AB_DATA(addr));
5589 ++
5590 ++ if (amd_nb_dev) {
5591 ++ addr = NB_PIF0_PWRDOWN_0;
5592 ++ pci_write_config_dword(amd_nb_dev, NB_PCIE_INDX_ADDR, addr);
5593 ++ pci_read_config_dword(amd_nb_dev, NB_PCIE_INDX_DATA, &val);
5594 ++ if (disable)
5595 ++ val &= ~(0x3f << 7);
5596 ++ else
5597 ++ val |= 0x3f << 7;
5598 ++
5599 ++ pci_write_config_dword(amd_nb_dev, NB_PCIE_INDX_DATA, val);
5600 ++
5601 ++ addr = NB_PIF0_PWRDOWN_1;
5602 ++ pci_write_config_dword(amd_nb_dev, NB_PCIE_INDX_ADDR, addr);
5603 ++ pci_read_config_dword(amd_nb_dev, NB_PCIE_INDX_DATA, &val);
5604 ++ if (disable)
5605 ++ val &= ~(0x3f << 7);
5606 ++ else
5607 ++ val |= 0x3f << 7;
5608 ++
5609 ++ pci_write_config_dword(amd_nb_dev, NB_PCIE_INDX_DATA, val);
5610 ++ }
5611 ++
5612 ++ return;
5613 ++}
5614 ++
5615 + /* fit urb's itds into the selected schedule slot; activate as needed */
5616 + static int
5617 + itd_link_urb (
5618 +@@ -1616,6 +1673,12 @@ itd_link_urb (
5619 + urb->interval,
5620 + next_uframe >> 3, next_uframe & 0x7);
5621 + }
5622 ++
5623 ++ if (ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs == 0) {
5624 ++ if (ehci->amd_l1_fix == 1)
5625 ++ ehci_quirk_amd_L1(ehci, 1);
5626 ++ }
5627 ++
5628 + ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs++;
5629 +
5630 + /* fill iTDs uframe by uframe */
5631 +@@ -1740,6 +1803,11 @@ itd_complete (
5632 + (void) disable_periodic(ehci);
5633 + ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs--;
5634 +
5635 ++ if (ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs == 0) {
5636 ++ if (ehci->amd_l1_fix == 1)
5637 ++ ehci_quirk_amd_L1(ehci, 0);
5638 ++ }
5639 ++
5640 + if (unlikely(list_is_singular(&stream->td_list))) {
5641 + ehci_to_hcd(ehci)->self.bandwidth_allocated
5642 + -= stream->bandwidth;
5643 +@@ -2025,6 +2093,12 @@ sitd_link_urb (
5644 + (next_uframe >> 3) & (ehci->periodic_size - 1),
5645 + stream->interval, hc32_to_cpu(ehci, stream->splits));
5646 + }
5647 ++
5648 ++ if (ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs == 0) {
5649 ++ if (ehci->amd_l1_fix == 1)
5650 ++ ehci_quirk_amd_L1(ehci, 1);
5651 ++ }
5652 ++
5653 + ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs++;
5654 +
5655 + /* fill sITDs frame by frame */
5656 +@@ -2125,6 +2199,11 @@ sitd_complete (
5657 + (void) disable_periodic(ehci);
5658 + ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs--;
5659 +
5660 ++ if (ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs == 0) {
5661 ++ if (ehci->amd_l1_fix == 1)
5662 ++ ehci_quirk_amd_L1(ehci, 0);
5663 ++ }
5664 ++
5665 + if (list_is_singular(&stream->td_list)) {
5666 + ehci_to_hcd(ehci)->self.bandwidth_allocated
5667 + -= stream->bandwidth;
5668 +diff --git a/drivers/usb/host/ehci.h b/drivers/usb/host/ehci.h
5669 +index ba8eab3..799ac16 100644
5670 +--- a/drivers/usb/host/ehci.h
5671 ++++ b/drivers/usb/host/ehci.h
5672 +@@ -131,6 +131,7 @@ struct ehci_hcd { /* one per controller */
5673 + unsigned has_amcc_usb23:1;
5674 + unsigned need_io_watchdog:1;
5675 + unsigned broken_periodic:1;
5676 ++ unsigned amd_l1_fix:1;
5677 + unsigned fs_i_thresh:1; /* Intel iso scheduling */
5678 + unsigned use_dummy_qh:1; /* AMD Frame List table quirk*/
5679 +
5680 +diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
5681 +index df558f6..62c70c2 100644
5682 +--- a/drivers/usb/host/xhci-ring.c
5683 ++++ b/drivers/usb/host/xhci-ring.c
5684 +@@ -1188,7 +1188,7 @@ static void handle_port_status(struct xhci_hcd *xhci,
5685 +
5686 + addr = &xhci->op_regs->port_status_base + NUM_PORT_REGS * (port_id - 1);
5687 + temp = xhci_readl(xhci, addr);
5688 +- if ((temp & PORT_CONNECT) && (hcd->state == HC_STATE_SUSPENDED)) {
5689 ++ if (hcd->state == HC_STATE_SUSPENDED) {
5690 + xhci_dbg(xhci, "resume root hub\n");
5691 + usb_hcd_resume_root_hub(hcd);
5692 + }
5693 +diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
5694 +index 45e4a31..34cf4e1 100644
5695 +--- a/drivers/usb/host/xhci.c
5696 ++++ b/drivers/usb/host/xhci.c
5697 +@@ -226,7 +226,8 @@ static int xhci_setup_msi(struct xhci_hcd *xhci)
5698 + static int xhci_setup_msix(struct xhci_hcd *xhci)
5699 + {
5700 + int i, ret = 0;
5701 +- struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
5702 ++ struct usb_hcd *hcd = xhci_to_hcd(xhci);
5703 ++ struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
5704 +
5705 + /*
5706 + * calculate number of msi-x vectors supported.
5707 +@@ -265,6 +266,7 @@ static int xhci_setup_msix(struct xhci_hcd *xhci)
5708 + goto disable_msix;
5709 + }
5710 +
5711 ++ hcd->msix_enabled = 1;
5712 + return ret;
5713 +
5714 + disable_msix:
5715 +@@ -280,7 +282,8 @@ free_entries:
5716 + /* Free any IRQs and disable MSI-X */
5717 + static void xhci_cleanup_msix(struct xhci_hcd *xhci)
5718 + {
5719 +- struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
5720 ++ struct usb_hcd *hcd = xhci_to_hcd(xhci);
5721 ++ struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
5722 +
5723 + xhci_free_irq(xhci);
5724 +
5725 +@@ -292,6 +295,7 @@ static void xhci_cleanup_msix(struct xhci_hcd *xhci)
5726 + pci_disable_msi(pdev);
5727 + }
5728 +
5729 ++ hcd->msix_enabled = 0;
5730 + return;
5731 + }
5732 +
5733 +@@ -508,9 +512,10 @@ void xhci_stop(struct usb_hcd *hcd)
5734 + spin_lock_irq(&xhci->lock);
5735 + xhci_halt(xhci);
5736 + xhci_reset(xhci);
5737 +- xhci_cleanup_msix(xhci);
5738 + spin_unlock_irq(&xhci->lock);
5739 +
5740 ++ xhci_cleanup_msix(xhci);
5741 ++
5742 + #ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
5743 + /* Tell the event ring poll function not to reschedule */
5744 + xhci->zombie = 1;
5745 +@@ -544,9 +549,10 @@ void xhci_shutdown(struct usb_hcd *hcd)
5746 +
5747 + spin_lock_irq(&xhci->lock);
5748 + xhci_halt(xhci);
5749 +- xhci_cleanup_msix(xhci);
5750 + spin_unlock_irq(&xhci->lock);
5751 +
5752 ++ xhci_cleanup_msix(xhci);
5753 ++
5754 + xhci_dbg(xhci, "xhci_shutdown completed - status = %x\n",
5755 + xhci_readl(xhci, &xhci->op_regs->status));
5756 + }
5757 +@@ -647,6 +653,7 @@ int xhci_suspend(struct xhci_hcd *xhci)
5758 + int rc = 0;
5759 + struct usb_hcd *hcd = xhci_to_hcd(xhci);
5760 + u32 command;
5761 ++ int i;
5762 +
5763 + spin_lock_irq(&xhci->lock);
5764 + clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
5765 +@@ -677,10 +684,15 @@ int xhci_suspend(struct xhci_hcd *xhci)
5766 + spin_unlock_irq(&xhci->lock);
5767 + return -ETIMEDOUT;
5768 + }
5769 +- /* step 5: remove core well power */
5770 +- xhci_cleanup_msix(xhci);
5771 + spin_unlock_irq(&xhci->lock);
5772 +
5773 ++ /* step 5: remove core well power */
5774 ++ /* synchronize irq when using MSI-X */
5775 ++ if (xhci->msix_entries) {
5776 ++ for (i = 0; i < xhci->msix_count; i++)
5777 ++ synchronize_irq(xhci->msix_entries[i].vector);
5778 ++ }
5779 ++
5780 + return rc;
5781 + }
5782 +
5783 +@@ -694,7 +706,6 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
5784 + {
5785 + u32 command, temp = 0;
5786 + struct usb_hcd *hcd = xhci_to_hcd(xhci);
5787 +- struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
5788 + int old_state, retval;
5789 +
5790 + old_state = hcd->state;
5791 +@@ -729,9 +740,8 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
5792 + xhci_dbg(xhci, "Stop HCD\n");
5793 + xhci_halt(xhci);
5794 + xhci_reset(xhci);
5795 +- if (hibernated)
5796 +- xhci_cleanup_msix(xhci);
5797 + spin_unlock_irq(&xhci->lock);
5798 ++ xhci_cleanup_msix(xhci);
5799 +
5800 + #ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
5801 + /* Tell the event ring poll function not to reschedule */
5802 +@@ -765,30 +775,6 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
5803 + return retval;
5804 + }
5805 +
5806 +- spin_unlock_irq(&xhci->lock);
5807 +- /* Re-setup MSI-X */
5808 +- if (hcd->irq)
5809 +- free_irq(hcd->irq, hcd);
5810 +- hcd->irq = -1;
5811 +-
5812 +- retval = xhci_setup_msix(xhci);
5813 +- if (retval)
5814 +- /* fall back to msi*/
5815 +- retval = xhci_setup_msi(xhci);
5816 +-
5817 +- if (retval) {
5818 +- /* fall back to legacy interrupt*/
5819 +- retval = request_irq(pdev->irq, &usb_hcd_irq, IRQF_SHARED,
5820 +- hcd->irq_descr, hcd);
5821 +- if (retval) {
5822 +- xhci_err(xhci, "request interrupt %d failed\n",
5823 +- pdev->irq);
5824 +- return retval;
5825 +- }
5826 +- hcd->irq = pdev->irq;
5827 +- }
5828 +-
5829 +- spin_lock_irq(&xhci->lock);
5830 + /* step 4: set Run/Stop bit */
5831 + command = xhci_readl(xhci, &xhci->op_regs->command);
5832 + command |= CMD_RUN;
5833 +@@ -2445,8 +2431,12 @@ int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev)
5834 + xhci_err(xhci, "Error while assigning device slot ID\n");
5835 + return 0;
5836 + }
5837 +- /* xhci_alloc_virt_device() does not touch rings; no need to lock */
5838 +- if (!xhci_alloc_virt_device(xhci, xhci->slot_id, udev, GFP_KERNEL)) {
5839 ++ /* xhci_alloc_virt_device() does not touch rings; no need to lock.
5840 ++ * Use GFP_NOIO, since this function can be called from
5841 ++ * xhci_discover_or_reset_device(), which may be called as part of
5842 ++ * mass storage driver error handling.
5843 ++ */
5844 ++ if (!xhci_alloc_virt_device(xhci, xhci->slot_id, udev, GFP_NOIO)) {
5845 + /* Disable slot, if we can do it without mem alloc */
5846 + xhci_warn(xhci, "Could not allocate xHCI USB device data structures\n");
5847 + spin_lock_irqsave(&xhci->lock, flags);
5848 +diff --git a/drivers/usb/serial/ch341.c b/drivers/usb/serial/ch341.c
5849 +index 63f7cc4..7b8815d 100644
5850 +--- a/drivers/usb/serial/ch341.c
5851 ++++ b/drivers/usb/serial/ch341.c
5852 +@@ -486,12 +486,22 @@ static void ch341_read_int_callback(struct urb *urb)
5853 + if (actual_length >= 4) {
5854 + struct ch341_private *priv = usb_get_serial_port_data(port);
5855 + unsigned long flags;
5856 ++ u8 prev_line_status = priv->line_status;
5857 +
5858 + spin_lock_irqsave(&priv->lock, flags);
5859 + priv->line_status = (~(data[2])) & CH341_BITS_MODEM_STAT;
5860 + if ((data[1] & CH341_MULT_STAT))
5861 + priv->multi_status_change = 1;
5862 + spin_unlock_irqrestore(&priv->lock, flags);
5863 ++
5864 ++ if ((priv->line_status ^ prev_line_status) & CH341_BIT_DCD) {
5865 ++ struct tty_struct *tty = tty_port_tty_get(&port->port);
5866 ++ if (tty)
5867 ++ usb_serial_handle_dcd_change(port, tty,
5868 ++ priv->line_status & CH341_BIT_DCD);
5869 ++ tty_kref_put(tty);
5870 ++ }
5871 ++
5872 + wake_up_interruptible(&priv->delta_msr_wait);
5873 + }
5874 +
5875 +diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
5876 +index 8d7731d..735ea03 100644
5877 +--- a/drivers/usb/serial/cp210x.c
5878 ++++ b/drivers/usb/serial/cp210x.c
5879 +@@ -49,7 +49,6 @@ static int cp210x_tiocmset_port(struct usb_serial_port *port, struct file *,
5880 + static void cp210x_break_ctl(struct tty_struct *, int);
5881 + static int cp210x_startup(struct usb_serial *);
5882 + static void cp210x_dtr_rts(struct usb_serial_port *p, int on);
5883 +-static int cp210x_carrier_raised(struct usb_serial_port *p);
5884 +
5885 + static int debug;
5886 +
5887 +@@ -87,7 +86,6 @@ static const struct usb_device_id id_table[] = {
5888 + { USB_DEVICE(0x10C4, 0x8115) }, /* Arygon NFC/Mifare Reader */
5889 + { USB_DEVICE(0x10C4, 0x813D) }, /* Burnside Telecom Deskmobile */
5890 + { USB_DEVICE(0x10C4, 0x813F) }, /* Tams Master Easy Control */
5891 +- { USB_DEVICE(0x10C4, 0x8149) }, /* West Mountain Radio Computerized Battery Analyzer */
5892 + { USB_DEVICE(0x10C4, 0x814A) }, /* West Mountain Radio RIGblaster P&P */
5893 + { USB_DEVICE(0x10C4, 0x814B) }, /* West Mountain Radio RIGtalk */
5894 + { USB_DEVICE(0x10C4, 0x8156) }, /* B&G H3000 link cable */
5895 +@@ -110,7 +108,9 @@ static const struct usb_device_id id_table[] = {
5896 + { USB_DEVICE(0x10C4, 0x8341) }, /* Siemens MC35PU GPRS Modem */
5897 + { USB_DEVICE(0x10C4, 0x8382) }, /* Cygnal Integrated Products, Inc. */
5898 + { USB_DEVICE(0x10C4, 0x83A8) }, /* Amber Wireless AMB2560 */
5899 ++ { USB_DEVICE(0x10C4, 0x83D8) }, /* DekTec DTA Plus VHF/UHF Booster/Attenuator */
5900 + { USB_DEVICE(0x10C4, 0x8411) }, /* Kyocera GPS Module */
5901 ++ { USB_DEVICE(0x10C4, 0x8418) }, /* IRZ Automation Teleport SG-10 GSM/GPRS Modem */
5902 + { USB_DEVICE(0x10C4, 0x846E) }, /* BEI USB Sensor Interface (VCP) */
5903 + { USB_DEVICE(0x10C4, 0x8477) }, /* Balluff RFID */
5904 + { USB_DEVICE(0x10C4, 0xEA60) }, /* Silicon Labs factory default */
5905 +@@ -165,8 +165,7 @@ static struct usb_serial_driver cp210x_device = {
5906 + .tiocmget = cp210x_tiocmget,
5907 + .tiocmset = cp210x_tiocmset,
5908 + .attach = cp210x_startup,
5909 +- .dtr_rts = cp210x_dtr_rts,
5910 +- .carrier_raised = cp210x_carrier_raised
5911 ++ .dtr_rts = cp210x_dtr_rts
5912 + };
5913 +
5914 + /* Config request types */
5915 +@@ -765,15 +764,6 @@ static int cp210x_tiocmget (struct tty_struct *tty, struct file *file)
5916 + return result;
5917 + }
5918 +
5919 +-static int cp210x_carrier_raised(struct usb_serial_port *p)
5920 +-{
5921 +- unsigned int control;
5922 +- cp210x_get_config(p, CP210X_GET_MDMSTS, &control, 1);
5923 +- if (control & CONTROL_DCD)
5924 +- return 1;
5925 +- return 0;
5926 +-}
5927 +-
5928 + static void cp210x_break_ctl (struct tty_struct *tty, int break_state)
5929 + {
5930 + struct usb_serial_port *port = tty->driver_data;
5931 +diff --git a/drivers/usb/serial/digi_acceleport.c b/drivers/usb/serial/digi_acceleport.c
5932 +index b92070c..666e5a6 100644
5933 +--- a/drivers/usb/serial/digi_acceleport.c
5934 ++++ b/drivers/usb/serial/digi_acceleport.c
5935 +@@ -455,7 +455,6 @@ static int digi_write_room(struct tty_struct *tty);
5936 + static int digi_chars_in_buffer(struct tty_struct *tty);
5937 + static int digi_open(struct tty_struct *tty, struct usb_serial_port *port);
5938 + static void digi_close(struct usb_serial_port *port);
5939 +-static int digi_carrier_raised(struct usb_serial_port *port);
5940 + static void digi_dtr_rts(struct usb_serial_port *port, int on);
5941 + static int digi_startup_device(struct usb_serial *serial);
5942 + static int digi_startup(struct usb_serial *serial);
5943 +@@ -511,7 +510,6 @@ static struct usb_serial_driver digi_acceleport_2_device = {
5944 + .open = digi_open,
5945 + .close = digi_close,
5946 + .dtr_rts = digi_dtr_rts,
5947 +- .carrier_raised = digi_carrier_raised,
5948 + .write = digi_write,
5949 + .write_room = digi_write_room,
5950 + .write_bulk_callback = digi_write_bulk_callback,
5951 +@@ -1339,14 +1337,6 @@ static void digi_dtr_rts(struct usb_serial_port *port, int on)
5952 + digi_set_modem_signals(port, on * (TIOCM_DTR|TIOCM_RTS), 1);
5953 + }
5954 +
5955 +-static int digi_carrier_raised(struct usb_serial_port *port)
5956 +-{
5957 +- struct digi_port *priv = usb_get_serial_port_data(port);
5958 +- if (priv->dp_modem_signals & TIOCM_CD)
5959 +- return 1;
5960 +- return 0;
5961 +-}
5962 +-
5963 + static int digi_open(struct tty_struct *tty, struct usb_serial_port *port)
5964 + {
5965 + int ret;
5966 +diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
5967 +index 2dec500..48ce01e 100644
5968 +--- a/drivers/usb/serial/ftdi_sio.c
5969 ++++ b/drivers/usb/serial/ftdi_sio.c
5970 +@@ -99,6 +99,7 @@ struct ftdi_sio_quirk {
5971 + static int ftdi_jtag_probe(struct usb_serial *serial);
5972 + static int ftdi_mtxorb_hack_setup(struct usb_serial *serial);
5973 + static int ftdi_NDI_device_setup(struct usb_serial *serial);
5974 ++static int ftdi_stmclite_probe(struct usb_serial *serial);
5975 + static void ftdi_USB_UIRT_setup(struct ftdi_private *priv);
5976 + static void ftdi_HE_TIRA1_setup(struct ftdi_private *priv);
5977 +
5978 +@@ -122,6 +123,10 @@ static struct ftdi_sio_quirk ftdi_HE_TIRA1_quirk = {
5979 + .port_probe = ftdi_HE_TIRA1_setup,
5980 + };
5981 +
5982 ++static struct ftdi_sio_quirk ftdi_stmclite_quirk = {
5983 ++ .probe = ftdi_stmclite_probe,
5984 ++};
5985 ++
5986 + /*
5987 + * The 8U232AM has the same API as the sio except for:
5988 + * - it can support MUCH higher baudrates; up to:
5989 +@@ -615,6 +620,7 @@ static struct usb_device_id id_table_combined [] = {
5990 + { USB_DEVICE(FTDI_VID, FTDI_OCEANIC_PID) },
5991 + { USB_DEVICE(TTI_VID, TTI_QL355P_PID) },
5992 + { USB_DEVICE(FTDI_VID, FTDI_RM_CANVIEW_PID) },
5993 ++ { USB_DEVICE(ACTON_VID, ACTON_SPECTRAPRO_PID) },
5994 + { USB_DEVICE(CONTEC_VID, CONTEC_COM1USBH_PID) },
5995 + { USB_DEVICE(BANDB_VID, BANDB_USOTL4_PID) },
5996 + { USB_DEVICE(BANDB_VID, BANDB_USTL4_PID) },
5997 +@@ -675,7 +681,17 @@ static struct usb_device_id id_table_combined [] = {
5998 + { USB_DEVICE(FTDI_VID, FTDI_PCDJ_DAC2_PID) },
5999 + { USB_DEVICE(FTDI_VID, FTDI_RRCIRKITS_LOCOBUFFER_PID) },
6000 + { USB_DEVICE(FTDI_VID, FTDI_ASK_RDR400_PID) },
6001 +- { USB_DEVICE(ICOM_ID1_VID, ICOM_ID1_PID) },
6002 ++ { USB_DEVICE(ICOM_VID, ICOM_ID_1_PID) },
6003 ++ { USB_DEVICE(ICOM_VID, ICOM_OPC_U_UC_PID) },
6004 ++ { USB_DEVICE(ICOM_VID, ICOM_ID_RP2C1_PID) },
6005 ++ { USB_DEVICE(ICOM_VID, ICOM_ID_RP2C2_PID) },
6006 ++ { USB_DEVICE(ICOM_VID, ICOM_ID_RP2D_PID) },
6007 ++ { USB_DEVICE(ICOM_VID, ICOM_ID_RP2VT_PID) },
6008 ++ { USB_DEVICE(ICOM_VID, ICOM_ID_RP2VR_PID) },
6009 ++ { USB_DEVICE(ICOM_VID, ICOM_ID_RP4KVT_PID) },
6010 ++ { USB_DEVICE(ICOM_VID, ICOM_ID_RP4KVR_PID) },
6011 ++ { USB_DEVICE(ICOM_VID, ICOM_ID_RP2KVT_PID) },
6012 ++ { USB_DEVICE(ICOM_VID, ICOM_ID_RP2KVR_PID) },
6013 + { USB_DEVICE(FTDI_VID, FTDI_ACG_HFDUAL_PID) },
6014 + { USB_DEVICE(FTDI_VID, FTDI_YEI_SERVOCENTER31_PID) },
6015 + { USB_DEVICE(FTDI_VID, FTDI_THORLABS_PID) },
6016 +@@ -799,6 +815,8 @@ static struct usb_device_id id_table_combined [] = {
6017 + { USB_DEVICE(FTDI_VID, FTDI_DOTEC_PID) },
6018 + { USB_DEVICE(QIHARDWARE_VID, MILKYMISTONE_JTAGSERIAL_PID),
6019 + .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
6020 ++ { USB_DEVICE(ST_VID, ST_STMCLT1030_PID),
6021 ++ .driver_info = (kernel_ulong_t)&ftdi_stmclite_quirk },
6022 + { }, /* Optional parameter entry */
6023 + { } /* Terminating entry */
6024 + };
6025 +@@ -1681,6 +1699,25 @@ static int ftdi_jtag_probe(struct usb_serial *serial)
6026 + }
6027 +
6028 + /*
6029 ++ * First and second port on STMCLiteadaptors is reserved for JTAG interface
6030 ++ * and the forth port for pio
6031 ++ */
6032 ++static int ftdi_stmclite_probe(struct usb_serial *serial)
6033 ++{
6034 ++ struct usb_device *udev = serial->dev;
6035 ++ struct usb_interface *interface = serial->interface;
6036 ++
6037 ++ dbg("%s", __func__);
6038 ++
6039 ++ if (interface == udev->actconfig->interface[2])
6040 ++ return 0;
6041 ++
6042 ++ dev_info(&udev->dev, "Ignoring serial port reserved for JTAG\n");
6043 ++
6044 ++ return -ENODEV;
6045 ++}
6046 ++
6047 ++/*
6048 + * The Matrix Orbital VK204-25-USB has an invalid IN endpoint.
6049 + * We have to correct it if we want to read from it.
6050 + */
6051 +diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
6052 +index bf08672..117e8e6 100644
6053 +--- a/drivers/usb/serial/ftdi_sio_ids.h
6054 ++++ b/drivers/usb/serial/ftdi_sio_ids.h
6055 +@@ -518,6 +518,12 @@
6056 + #define RATOC_PRODUCT_ID_USB60F 0xb020
6057 +
6058 + /*
6059 ++ * Acton Research Corp.
6060 ++ */
6061 ++#define ACTON_VID 0x0647 /* Vendor ID */
6062 ++#define ACTON_SPECTRAPRO_PID 0x0100
6063 ++
6064 ++/*
6065 + * Contec products (http://www.contec.com)
6066 + * Submitted by Daniel Sangorrin
6067 + */
6068 +@@ -569,11 +575,23 @@
6069 + #define OCT_US101_PID 0x0421 /* OCT US101 USB to RS-232 */
6070 +
6071 + /*
6072 +- * Icom ID-1 digital transceiver
6073 ++ * Definitions for Icom Inc. devices
6074 + */
6075 +-
6076 +-#define ICOM_ID1_VID 0x0C26
6077 +-#define ICOM_ID1_PID 0x0004
6078 ++#define ICOM_VID 0x0C26 /* Icom vendor ID */
6079 ++/* Note: ID-1 is a communications tranceiver for HAM-radio operators */
6080 ++#define ICOM_ID_1_PID 0x0004 /* ID-1 USB to RS-232 */
6081 ++/* Note: OPC is an Optional cable to connect an Icom Tranceiver */
6082 ++#define ICOM_OPC_U_UC_PID 0x0018 /* OPC-478UC, OPC-1122U cloning cable */
6083 ++/* Note: ID-RP* devices are Icom Repeater Devices for HAM-radio */
6084 ++#define ICOM_ID_RP2C1_PID 0x0009 /* ID-RP2C Asset 1 to RS-232 */
6085 ++#define ICOM_ID_RP2C2_PID 0x000A /* ID-RP2C Asset 2 to RS-232 */
6086 ++#define ICOM_ID_RP2D_PID 0x000B /* ID-RP2D configuration port*/
6087 ++#define ICOM_ID_RP2VT_PID 0x000C /* ID-RP2V Transmit config port */
6088 ++#define ICOM_ID_RP2VR_PID 0x000D /* ID-RP2V Receive config port */
6089 ++#define ICOM_ID_RP4KVT_PID 0x0010 /* ID-RP4000V Transmit config port */
6090 ++#define ICOM_ID_RP4KVR_PID 0x0011 /* ID-RP4000V Receive config port */
6091 ++#define ICOM_ID_RP2KVT_PID 0x0012 /* ID-RP2000V Transmit config port */
6092 ++#define ICOM_ID_RP2KVR_PID 0x0013 /* ID-RP2000V Receive config port */
6093 +
6094 + /*
6095 + * GN Otometrics (http://www.otometrics.com)
6096 +@@ -1022,6 +1040,12 @@
6097 + #define WHT_PID 0x0004 /* Wireless Handheld Terminal */
6098 +
6099 + /*
6100 ++ * STMicroelectonics
6101 ++ */
6102 ++#define ST_VID 0x0483
6103 ++#define ST_STMCLT1030_PID 0x3747 /* ST Micro Connect Lite STMCLT1030 */
6104 ++
6105 ++/*
6106 + * Papouch products (http://www.papouch.com/)
6107 + * Submitted by Folkert van Heusden
6108 + */
6109 +diff --git a/drivers/usb/serial/generic.c b/drivers/usb/serial/generic.c
6110 +index e6833e2..e4db5ad 100644
6111 +--- a/drivers/usb/serial/generic.c
6112 ++++ b/drivers/usb/serial/generic.c
6113 +@@ -479,6 +479,26 @@ int usb_serial_handle_break(struct usb_serial_port *port)
6114 + }
6115 + EXPORT_SYMBOL_GPL(usb_serial_handle_break);
6116 +
6117 ++/**
6118 ++ * usb_serial_handle_dcd_change - handle a change of carrier detect state
6119 ++ * @port: usb_serial_port structure for the open port
6120 ++ * @tty: tty_struct structure for the port
6121 ++ * @status: new carrier detect status, nonzero if active
6122 ++ */
6123 ++void usb_serial_handle_dcd_change(struct usb_serial_port *usb_port,
6124 ++ struct tty_struct *tty, unsigned int status)
6125 ++{
6126 ++ struct tty_port *port = &usb_port->port;
6127 ++
6128 ++ dbg("%s - port %d, status %d", __func__, usb_port->number, status);
6129 ++
6130 ++ if (status)
6131 ++ wake_up_interruptible(&port->open_wait);
6132 ++ else if (tty && !C_CLOCAL(tty))
6133 ++ tty_hangup(tty);
6134 ++}
6135 ++EXPORT_SYMBOL_GPL(usb_serial_handle_dcd_change);
6136 ++
6137 + int usb_serial_generic_resume(struct usb_serial *serial)
6138 + {
6139 + struct usb_serial_port *port;
6140 +diff --git a/drivers/usb/serial/io_edgeport.c b/drivers/usb/serial/io_edgeport.c
6141 +index cd769ef..3b246d9 100644
6142 +--- a/drivers/usb/serial/io_edgeport.c
6143 ++++ b/drivers/usb/serial/io_edgeport.c
6144 +@@ -2889,8 +2889,8 @@ static void load_application_firmware(struct edgeport_serial *edge_serial)
6145 +
6146 + dbg("%s %d.%d.%d", fw_info, rec->data[0], rec->data[1], build);
6147 +
6148 +- edge_serial->product_info.FirmwareMajorVersion = fw->data[0];
6149 +- edge_serial->product_info.FirmwareMinorVersion = fw->data[1];
6150 ++ edge_serial->product_info.FirmwareMajorVersion = rec->data[0];
6151 ++ edge_serial->product_info.FirmwareMinorVersion = rec->data[1];
6152 + edge_serial->product_info.FirmwareBuildNumber = cpu_to_le16(build);
6153 +
6154 + for (rec = ihex_next_binrec(rec); rec;
6155 +diff --git a/drivers/usb/serial/io_tables.h b/drivers/usb/serial/io_tables.h
6156 +index 6ab2a3f..178b22e 100644
6157 +--- a/drivers/usb/serial/io_tables.h
6158 ++++ b/drivers/usb/serial/io_tables.h
6159 +@@ -199,6 +199,7 @@ static struct usb_serial_driver epic_device = {
6160 + .name = "epic",
6161 + },
6162 + .description = "EPiC device",
6163 ++ .usb_driver = &io_driver,
6164 + .id_table = Epic_port_id_table,
6165 + .num_ports = 1,
6166 + .open = edge_open,
6167 +diff --git a/drivers/usb/serial/iuu_phoenix.c b/drivers/usb/serial/iuu_phoenix.c
6168 +index 12ed594..99b97c0 100644
6169 +--- a/drivers/usb/serial/iuu_phoenix.c
6170 ++++ b/drivers/usb/serial/iuu_phoenix.c
6171 +@@ -1275,6 +1275,7 @@ static struct usb_serial_driver iuu_device = {
6172 + .name = "iuu_phoenix",
6173 + },
6174 + .id_table = id_table,
6175 ++ .usb_driver = &iuu_driver,
6176 + .num_ports = 1,
6177 + .bulk_in_size = 512,
6178 + .bulk_out_size = 512,
6179 +diff --git a/drivers/usb/serial/keyspan.h b/drivers/usb/serial/keyspan.h
6180 +index 2d8baf6..ce134dc 100644
6181 +--- a/drivers/usb/serial/keyspan.h
6182 ++++ b/drivers/usb/serial/keyspan.h
6183 +@@ -546,6 +546,7 @@ static struct usb_serial_driver keyspan_pre_device = {
6184 + .name = "keyspan_no_firm",
6185 + },
6186 + .description = "Keyspan - (without firmware)",
6187 ++ .usb_driver = &keyspan_driver,
6188 + .id_table = keyspan_pre_ids,
6189 + .num_ports = 1,
6190 + .attach = keyspan_fake_startup,
6191 +@@ -557,6 +558,7 @@ static struct usb_serial_driver keyspan_1port_device = {
6192 + .name = "keyspan_1",
6193 + },
6194 + .description = "Keyspan 1 port adapter",
6195 ++ .usb_driver = &keyspan_driver,
6196 + .id_table = keyspan_1port_ids,
6197 + .num_ports = 1,
6198 + .open = keyspan_open,
6199 +@@ -579,6 +581,7 @@ static struct usb_serial_driver keyspan_2port_device = {
6200 + .name = "keyspan_2",
6201 + },
6202 + .description = "Keyspan 2 port adapter",
6203 ++ .usb_driver = &keyspan_driver,
6204 + .id_table = keyspan_2port_ids,
6205 + .num_ports = 2,
6206 + .open = keyspan_open,
6207 +@@ -601,6 +604,7 @@ static struct usb_serial_driver keyspan_4port_device = {
6208 + .name = "keyspan_4",
6209 + },
6210 + .description = "Keyspan 4 port adapter",
6211 ++ .usb_driver = &keyspan_driver,
6212 + .id_table = keyspan_4port_ids,
6213 + .num_ports = 4,
6214 + .open = keyspan_open,
6215 +diff --git a/drivers/usb/serial/keyspan_pda.c b/drivers/usb/serial/keyspan_pda.c
6216 +index a10dd56..554a869 100644
6217 +--- a/drivers/usb/serial/keyspan_pda.c
6218 ++++ b/drivers/usb/serial/keyspan_pda.c
6219 +@@ -679,22 +679,6 @@ static void keyspan_pda_dtr_rts(struct usb_serial_port *port, int on)
6220 + }
6221 + }
6222 +
6223 +-static int keyspan_pda_carrier_raised(struct usb_serial_port *port)
6224 +-{
6225 +- struct usb_serial *serial = port->serial;
6226 +- unsigned char modembits;
6227 +-
6228 +- /* If we can read the modem status and the DCD is low then
6229 +- carrier is not raised yet */
6230 +- if (keyspan_pda_get_modem_info(serial, &modembits) >= 0) {
6231 +- if (!(modembits & (1>>6)))
6232 +- return 0;
6233 +- }
6234 +- /* Carrier raised, or we failed (eg disconnected) so
6235 +- progress accordingly */
6236 +- return 1;
6237 +-}
6238 +-
6239 +
6240 + static int keyspan_pda_open(struct tty_struct *tty,
6241 + struct usb_serial_port *port)
6242 +@@ -881,7 +865,6 @@ static struct usb_serial_driver keyspan_pda_device = {
6243 + .id_table = id_table_std,
6244 + .num_ports = 1,
6245 + .dtr_rts = keyspan_pda_dtr_rts,
6246 +- .carrier_raised = keyspan_pda_carrier_raised,
6247 + .open = keyspan_pda_open,
6248 + .close = keyspan_pda_close,
6249 + .write = keyspan_pda_write,
6250 +diff --git a/drivers/usb/serial/moto_modem.c b/drivers/usb/serial/moto_modem.c
6251 +index cf17183..653465f 100644
6252 +--- a/drivers/usb/serial/moto_modem.c
6253 ++++ b/drivers/usb/serial/moto_modem.c
6254 +@@ -44,6 +44,7 @@ static struct usb_serial_driver moto_device = {
6255 + .name = "moto-modem",
6256 + },
6257 + .id_table = id_table,
6258 ++ .usb_driver = &moto_driver,
6259 + .num_ports = 1,
6260 + };
6261 +
6262 +diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
6263 +index ef2977d..356c870 100644
6264 +--- a/drivers/usb/serial/option.c
6265 ++++ b/drivers/usb/serial/option.c
6266 +@@ -382,7 +382,16 @@ static void option_instat_callback(struct urb *urb);
6267 + #define HAIER_VENDOR_ID 0x201e
6268 + #define HAIER_PRODUCT_CE100 0x2009
6269 +
6270 +-#define CINTERION_VENDOR_ID 0x0681
6271 ++/* Cinterion (formerly Siemens) products */
6272 ++#define SIEMENS_VENDOR_ID 0x0681
6273 ++#define CINTERION_VENDOR_ID 0x1e2d
6274 ++#define CINTERION_PRODUCT_HC25_MDM 0x0047
6275 ++#define CINTERION_PRODUCT_HC25_MDMNET 0x0040
6276 ++#define CINTERION_PRODUCT_HC28_MDM 0x004C
6277 ++#define CINTERION_PRODUCT_HC28_MDMNET 0x004A /* same for HC28J */
6278 ++#define CINTERION_PRODUCT_EU3_E 0x0051
6279 ++#define CINTERION_PRODUCT_EU3_P 0x0052
6280 ++#define CINTERION_PRODUCT_PH8 0x0053
6281 +
6282 + /* Olivetti products */
6283 + #define OLIVETTI_VENDOR_ID 0x0b3c
6284 +@@ -945,7 +954,17 @@ static const struct usb_device_id option_ids[] = {
6285 + { USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_100F) },
6286 + { USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_1011)},
6287 + { USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_1012)},
6288 +- { USB_DEVICE(CINTERION_VENDOR_ID, 0x0047) },
6289 ++ /* Cinterion */
6290 ++ { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_EU3_E) },
6291 ++ { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_EU3_P) },
6292 ++ { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_PH8) },
6293 ++ { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_HC28_MDM) },
6294 ++ { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_HC28_MDMNET) },
6295 ++ { USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC25_MDM) },
6296 ++ { USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC25_MDMNET) },
6297 ++ { USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC28_MDM) }, /* HC28 enumerates with Siemens or Cinterion VID depending on FW revision */
6298 ++ { USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC28_MDMNET) },
6299 ++
6300 + { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD100) },
6301 + { USB_DEVICE(CELOT_VENDOR_ID, CELOT_PRODUCT_CT680M) }, /* CT-650 CDMA 450 1xEVDO modem */
6302 + { USB_DEVICE(ONDA_VENDOR_ID, ONDA_MT825UP) }, /* ONDA MT825UP modem */
6303 +diff --git a/drivers/usb/serial/oti6858.c b/drivers/usb/serial/oti6858.c
6304 +index e199b0f..1c46a86 100644
6305 +--- a/drivers/usb/serial/oti6858.c
6306 ++++ b/drivers/usb/serial/oti6858.c
6307 +@@ -157,6 +157,7 @@ static struct usb_serial_driver oti6858_device = {
6308 + .name = "oti6858",
6309 + },
6310 + .id_table = id_table,
6311 ++ .usb_driver = &oti6858_driver,
6312 + .num_ports = 1,
6313 + .open = oti6858_open,
6314 + .close = oti6858_close,
6315 +diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c
6316 +index 8ae4c6c..08c9181 100644
6317 +--- a/drivers/usb/serial/pl2303.c
6318 ++++ b/drivers/usb/serial/pl2303.c
6319 +@@ -50,6 +50,7 @@ static const struct usb_device_id id_table[] = {
6320 + { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_MMX) },
6321 + { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_GPRS) },
6322 + { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_HCR331) },
6323 ++ { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_MOTOROLA) },
6324 + { USB_DEVICE(IODATA_VENDOR_ID, IODATA_PRODUCT_ID) },
6325 + { USB_DEVICE(IODATA_VENDOR_ID, IODATA_PRODUCT_ID_RSAQ5) },
6326 + { USB_DEVICE(ATEN_VENDOR_ID, ATEN_PRODUCT_ID) },
6327 +@@ -677,9 +678,11 @@ static void pl2303_update_line_status(struct usb_serial_port *port,
6328 + {
6329 +
6330 + struct pl2303_private *priv = usb_get_serial_port_data(port);
6331 ++ struct tty_struct *tty;
6332 + unsigned long flags;
6333 + u8 status_idx = UART_STATE;
6334 + u8 length = UART_STATE + 1;
6335 ++ u8 prev_line_status;
6336 + u16 idv, idp;
6337 +
6338 + idv = le16_to_cpu(port->serial->dev->descriptor.idVendor);
6339 +@@ -701,11 +704,20 @@ static void pl2303_update_line_status(struct usb_serial_port *port,
6340 +
6341 + /* Save off the uart status for others to look at */
6342 + spin_lock_irqsave(&priv->lock, flags);
6343 ++ prev_line_status = priv->line_status;
6344 + priv->line_status = data[status_idx];
6345 + spin_unlock_irqrestore(&priv->lock, flags);
6346 + if (priv->line_status & UART_BREAK_ERROR)
6347 + usb_serial_handle_break(port);
6348 + wake_up_interruptible(&priv->delta_msr_wait);
6349 ++
6350 ++ tty = tty_port_tty_get(&port->port);
6351 ++ if (!tty)
6352 ++ return;
6353 ++ if ((priv->line_status ^ prev_line_status) & UART_DCD)
6354 ++ usb_serial_handle_dcd_change(port, tty,
6355 ++ priv->line_status & UART_DCD);
6356 ++ tty_kref_put(tty);
6357 + }
6358 +
6359 + static void pl2303_read_int_callback(struct urb *urb)
6360 +diff --git a/drivers/usb/serial/pl2303.h b/drivers/usb/serial/pl2303.h
6361 +index 43eb9bd..1b025f7 100644
6362 +--- a/drivers/usb/serial/pl2303.h
6363 ++++ b/drivers/usb/serial/pl2303.h
6364 +@@ -21,6 +21,7 @@
6365 + #define PL2303_PRODUCT_ID_MMX 0x0612
6366 + #define PL2303_PRODUCT_ID_GPRS 0x0609
6367 + #define PL2303_PRODUCT_ID_HCR331 0x331a
6368 ++#define PL2303_PRODUCT_ID_MOTOROLA 0x0307
6369 +
6370 + #define ATEN_VENDOR_ID 0x0557
6371 + #define ATEN_VENDOR_ID2 0x0547
6372 +diff --git a/drivers/usb/serial/qcaux.c b/drivers/usb/serial/qcaux.c
6373 +index 214a3e5..30b73e6 100644
6374 +--- a/drivers/usb/serial/qcaux.c
6375 ++++ b/drivers/usb/serial/qcaux.c
6376 +@@ -36,6 +36,7 @@
6377 + #define UTSTARCOM_PRODUCT_UM175_V1 0x3712
6378 + #define UTSTARCOM_PRODUCT_UM175_V2 0x3714
6379 + #define UTSTARCOM_PRODUCT_UM175_ALLTEL 0x3715
6380 ++#define PANTECH_PRODUCT_UML290_VZW 0x3718
6381 +
6382 + /* CMOTECH devices */
6383 + #define CMOTECH_VENDOR_ID 0x16d8
6384 +@@ -66,6 +67,7 @@ static struct usb_device_id id_table[] = {
6385 + { USB_DEVICE_AND_INTERFACE_INFO(LG_VENDOR_ID, LG_PRODUCT_VX4400_6000, 0xff, 0xff, 0x00) },
6386 + { USB_DEVICE_AND_INTERFACE_INFO(SANYO_VENDOR_ID, SANYO_PRODUCT_KATANA_LX, 0xff, 0xff, 0x00) },
6387 + { USB_DEVICE_AND_INTERFACE_INFO(SAMSUNG_VENDOR_ID, SAMSUNG_PRODUCT_U520, 0xff, 0x00, 0x00) },
6388 ++ { USB_DEVICE_AND_INTERFACE_INFO(UTSTARCOM_VENDOR_ID, PANTECH_PRODUCT_UML290_VZW, 0xff, 0xff, 0xff) },
6389 + { },
6390 + };
6391 + MODULE_DEVICE_TABLE(usb, id_table);
6392 +@@ -84,6 +86,7 @@ static struct usb_serial_driver qcaux_device = {
6393 + .name = "qcaux",
6394 + },
6395 + .id_table = id_table,
6396 ++ .usb_driver = &qcaux_driver,
6397 + .num_ports = 1,
6398 + };
6399 +
6400 +diff --git a/drivers/usb/serial/siemens_mpi.c b/drivers/usb/serial/siemens_mpi.c
6401 +index cb8195c..74cd4cc 100644
6402 +--- a/drivers/usb/serial/siemens_mpi.c
6403 ++++ b/drivers/usb/serial/siemens_mpi.c
6404 +@@ -42,6 +42,7 @@ static struct usb_serial_driver siemens_usb_mpi_device = {
6405 + .name = "siemens_mpi",
6406 + },
6407 + .id_table = id_table,
6408 ++ .usb_driver = &siemens_usb_mpi_driver,
6409 + .num_ports = 1,
6410 + };
6411 +
6412 +diff --git a/drivers/usb/serial/spcp8x5.c b/drivers/usb/serial/spcp8x5.c
6413 +index 765aa98..cbfb70b 100644
6414 +--- a/drivers/usb/serial/spcp8x5.c
6415 ++++ b/drivers/usb/serial/spcp8x5.c
6416 +@@ -133,7 +133,7 @@ struct spcp8x5_usb_ctrl_arg {
6417 +
6418 + /* how come ??? */
6419 + #define UART_STATE 0x08
6420 +-#define UART_STATE_TRANSIENT_MASK 0x74
6421 ++#define UART_STATE_TRANSIENT_MASK 0x75
6422 + #define UART_DCD 0x01
6423 + #define UART_DSR 0x02
6424 + #define UART_BREAK_ERROR 0x04
6425 +@@ -525,6 +525,10 @@ static void spcp8x5_process_read_urb(struct urb *urb)
6426 + /* overrun is special, not associated with a char */
6427 + if (status & UART_OVERRUN_ERROR)
6428 + tty_insert_flip_char(tty, 0, TTY_OVERRUN);
6429 ++
6430 ++ if (status & UART_DCD)
6431 ++ usb_serial_handle_dcd_change(port, tty,
6432 ++ priv->line_status & MSR_STATUS_LINE_DCD);
6433 + }
6434 +
6435 + tty_insert_flip_string_fixed_flag(tty, data, tty_flag,
6436 +@@ -645,6 +649,7 @@ static struct usb_serial_driver spcp8x5_device = {
6437 + .name = "SPCP8x5",
6438 + },
6439 + .id_table = id_table,
6440 ++ .usb_driver = &spcp8x5_driver,
6441 + .num_ports = 1,
6442 + .open = spcp8x5_open,
6443 + .dtr_rts = spcp8x5_dtr_rts,
6444 +diff --git a/drivers/usb/serial/ti_usb_3410_5052.c b/drivers/usb/serial/ti_usb_3410_5052.c
6445 +index b2902f3..a910004 100644
6446 +--- a/drivers/usb/serial/ti_usb_3410_5052.c
6447 ++++ b/drivers/usb/serial/ti_usb_3410_5052.c
6448 +@@ -369,9 +369,9 @@ failed_1port:
6449 +
6450 + static void __exit ti_exit(void)
6451 + {
6452 ++ usb_deregister(&ti_usb_driver);
6453 + usb_serial_deregister(&ti_1port_device);
6454 + usb_serial_deregister(&ti_2port_device);
6455 +- usb_deregister(&ti_usb_driver);
6456 + }
6457 +
6458 +
6459 +diff --git a/drivers/usb/serial/usb-serial.c b/drivers/usb/serial/usb-serial.c
6460 +index 6954de5..546a521 100644
6461 +--- a/drivers/usb/serial/usb-serial.c
6462 ++++ b/drivers/usb/serial/usb-serial.c
6463 +@@ -1344,11 +1344,15 @@ int usb_serial_register(struct usb_serial_driver *driver)
6464 + return -ENODEV;
6465 +
6466 + fixup_generic(driver);
6467 +- if (driver->usb_driver)
6468 +- driver->usb_driver->supports_autosuspend = 1;
6469 +
6470 + if (!driver->description)
6471 + driver->description = driver->driver.name;
6472 ++ if (!driver->usb_driver) {
6473 ++ WARN(1, "Serial driver %s has no usb_driver\n",
6474 ++ driver->description);
6475 ++ return -EINVAL;
6476 ++ }
6477 ++ driver->usb_driver->supports_autosuspend = 1;
6478 +
6479 + /* Add this device to our list of devices */
6480 + mutex_lock(&table_lock);
6481 +diff --git a/drivers/usb/serial/usb_debug.c b/drivers/usb/serial/usb_debug.c
6482 +index f2ed6a3..95a8214 100644
6483 +--- a/drivers/usb/serial/usb_debug.c
6484 ++++ b/drivers/usb/serial/usb_debug.c
6485 +@@ -75,6 +75,7 @@ static struct usb_serial_driver debug_device = {
6486 + .name = "debug",
6487 + },
6488 + .id_table = id_table,
6489 ++ .usb_driver = &debug_driver,
6490 + .num_ports = 1,
6491 + .bulk_out_size = USB_DEBUG_MAX_PACKET_SIZE,
6492 + .break_ctl = usb_debug_break_ctl,
6493 +diff --git a/drivers/usb/storage/unusual_cypress.h b/drivers/usb/storage/unusual_cypress.h
6494 +index c854fde..2c85530 100644
6495 +--- a/drivers/usb/storage/unusual_cypress.h
6496 ++++ b/drivers/usb/storage/unusual_cypress.h
6497 +@@ -31,4 +31,9 @@ UNUSUAL_DEV( 0x04b4, 0x6831, 0x0000, 0x9999,
6498 + "Cypress ISD-300LP",
6499 + USB_SC_CYP_ATACB, USB_PR_DEVICE, NULL, 0),
6500 +
6501 ++UNUSUAL_DEV( 0x14cd, 0x6116, 0x0000, 0x9999,
6502 ++ "Super Top",
6503 ++ "USB 2.0 SATA BRIDGE",
6504 ++ USB_SC_CYP_ATACB, USB_PR_DEVICE, NULL, 0),
6505 ++
6506 + #endif /* defined(CONFIG_USB_STORAGE_CYPRESS_ATACB) || ... */
6507 +diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
6508 +index fcc1e32..c1602b8 100644
6509 +--- a/drivers/usb/storage/unusual_devs.h
6510 ++++ b/drivers/usb/storage/unusual_devs.h
6511 +@@ -1044,6 +1044,15 @@ UNUSUAL_DEV( 0x084d, 0x0011, 0x0110, 0x0110,
6512 + USB_SC_DEVICE, USB_PR_DEVICE, NULL,
6513 + US_FL_BULK32),
6514 +
6515 ++/* Reported by <ttkspam@××××.fr>
6516 ++ * The device reports a vendor-specific device class, requiring an
6517 ++ * explicit vendor/product match.
6518 ++ */
6519 ++UNUSUAL_DEV( 0x0851, 0x1542, 0x0002, 0x0002,
6520 ++ "MagicPixel",
6521 ++ "FW_Omega2",
6522 ++ USB_SC_DEVICE, USB_PR_DEVICE, NULL, 0),
6523 ++
6524 + /* Andrew Lunn <andrew@××××.ch>
6525 + * PanDigital Digital Picture Frame. Does not like ALLOW_MEDIUM_REMOVAL
6526 + * on LUN 4.
6527 +@@ -1388,6 +1397,13 @@ UNUSUAL_DEV( 0x0f19, 0x0105, 0x0100, 0x0100,
6528 + USB_SC_DEVICE, USB_PR_DEVICE, NULL,
6529 + US_FL_IGNORE_RESIDUE ),
6530 +
6531 ++/* Submitted by Nick Holloway */
6532 ++UNUSUAL_DEV( 0x0f88, 0x042e, 0x0100, 0x0100,
6533 ++ "VTech",
6534 ++ "Kidizoom",
6535 ++ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
6536 ++ US_FL_FIX_CAPACITY ),
6537 ++
6538 + /* Reported by Michael Stattmann <michael@×××××××××.com> */
6539 + UNUSUAL_DEV( 0x0fce, 0xd008, 0x0000, 0x0000,
6540 + "Sony Ericsson",
6541 +@@ -1872,6 +1888,22 @@ UNUSUAL_DEV( 0x1908, 0x3335, 0x0200, 0x0200,
6542 + USB_SC_DEVICE, USB_PR_DEVICE, NULL,
6543 + US_FL_NO_READ_DISC_INFO ),
6544 +
6545 ++/* Patch by Richard Schütz <r.schtz@××××××××.de>
6546 ++ * This external hard drive enclosure uses a JMicron chip which
6547 ++ * needs the US_FL_IGNORE_RESIDUE flag to work properly. */
6548 ++UNUSUAL_DEV( 0x1e68, 0x001b, 0x0000, 0x0000,
6549 ++ "TrekStor GmbH & Co. KG",
6550 ++ "DataStation maxi g.u",
6551 ++ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
6552 ++ US_FL_IGNORE_RESIDUE | US_FL_SANE_SENSE ),
6553 ++
6554 ++/* Reported by Jasper Mackenzie <scarletpimpernal@×××××××.com> */
6555 ++UNUSUAL_DEV( 0x1e74, 0x4621, 0x0000, 0x0000,
6556 ++ "Coby Electronics",
6557 ++ "MP3 Player",
6558 ++ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
6559 ++ US_FL_BULK_IGNORE_TAG | US_FL_MAX_SECTORS_64 ),
6560 ++
6561 + UNUSUAL_DEV( 0x2116, 0x0320, 0x0001, 0x0001,
6562 + "ST",
6563 + "2A",
6564 +diff --git a/drivers/video/aty/atyfb_base.c b/drivers/video/aty/atyfb_base.c
6565 +index 5bf9123..5a3ce3a 100644
6566 +--- a/drivers/video/aty/atyfb_base.c
6567 ++++ b/drivers/video/aty/atyfb_base.c
6568 +@@ -2969,10 +2969,8 @@ static int __devinit atyfb_setup_sparc(struct pci_dev *pdev,
6569 + {
6570 + struct atyfb_par *par = info->par;
6571 + struct device_node *dp;
6572 +- char prop[128];
6573 +- phandle node;
6574 +- int len, i, j, ret;
6575 + u32 mem, chip_id;
6576 ++ int i, j, ret;
6577 +
6578 + /*
6579 + * Map memory-mapped registers.
6580 +@@ -3088,23 +3086,8 @@ static int __devinit atyfb_setup_sparc(struct pci_dev *pdev,
6581 + aty_st_le32(MEM_CNTL, mem, par);
6582 + }
6583 +
6584 +- /*
6585 +- * If this is the console device, we will set default video
6586 +- * settings to what the PROM left us with.
6587 +- */
6588 +- node = prom_getchild(prom_root_node);
6589 +- node = prom_searchsiblings(node, "aliases");
6590 +- if (node) {
6591 +- len = prom_getproperty(node, "screen", prop, sizeof(prop));
6592 +- if (len > 0) {
6593 +- prop[len] = '\0';
6594 +- node = prom_finddevice(prop);
6595 +- } else
6596 +- node = 0;
6597 +- }
6598 +-
6599 + dp = pci_device_to_OF_node(pdev);
6600 +- if (node == dp->phandle) {
6601 ++ if (dp == of_console_device) {
6602 + struct fb_var_screeninfo *var = &default_var;
6603 + unsigned int N, P, Q, M, T, R;
6604 + u32 v_total, h_total;
6605 +@@ -3112,9 +3095,9 @@ static int __devinit atyfb_setup_sparc(struct pci_dev *pdev,
6606 + u8 pll_regs[16];
6607 + u8 clock_cntl;
6608 +
6609 +- crtc.vxres = prom_getintdefault(node, "width", 1024);
6610 +- crtc.vyres = prom_getintdefault(node, "height", 768);
6611 +- var->bits_per_pixel = prom_getintdefault(node, "depth", 8);
6612 ++ crtc.vxres = of_getintprop_default(dp, "width", 1024);
6613 ++ crtc.vyres = of_getintprop_default(dp, "height", 768);
6614 ++ var->bits_per_pixel = of_getintprop_default(dp, "depth", 8);
6615 + var->xoffset = var->yoffset = 0;
6616 + crtc.h_tot_disp = aty_ld_le32(CRTC_H_TOTAL_DISP, par);
6617 + crtc.h_sync_strt_wid = aty_ld_le32(CRTC_H_SYNC_STRT_WID, par);
6618 +diff --git a/drivers/video/backlight/88pm860x_bl.c b/drivers/video/backlight/88pm860x_bl.c
6619 +index 38ffc3f..c06c667 100644
6620 +--- a/drivers/video/backlight/88pm860x_bl.c
6621 ++++ b/drivers/video/backlight/88pm860x_bl.c
6622 +@@ -21,7 +21,7 @@
6623 + #define MAX_BRIGHTNESS (0xFF)
6624 + #define MIN_BRIGHTNESS (0)
6625 +
6626 +-#define CURRENT_MASK (0x1F << 1)
6627 ++#define CURRENT_BITMASK (0x1F << 1)
6628 +
6629 + struct pm860x_backlight_data {
6630 + struct pm860x_chip *chip;
6631 +@@ -85,7 +85,7 @@ static int pm860x_backlight_set(struct backlight_device *bl, int brightness)
6632 + if ((data->current_brightness == 0) && brightness) {
6633 + if (data->iset) {
6634 + ret = pm860x_set_bits(data->i2c, wled_idc(data->port),
6635 +- CURRENT_MASK, data->iset);
6636 ++ CURRENT_BITMASK, data->iset);
6637 + if (ret < 0)
6638 + goto out;
6639 + }
6640 +diff --git a/drivers/virtio/virtio_pci.c b/drivers/virtio/virtio_pci.c
6641 +index ef8d9d5..4fb5b2b 100644
6642 +--- a/drivers/virtio/virtio_pci.c
6643 ++++ b/drivers/virtio/virtio_pci.c
6644 +@@ -96,11 +96,6 @@ static struct pci_device_id virtio_pci_id_table[] = {
6645 +
6646 + MODULE_DEVICE_TABLE(pci, virtio_pci_id_table);
6647 +
6648 +-/* A PCI device has it's own struct device and so does a virtio device so
6649 +- * we create a place for the virtio devices to show up in sysfs. I think it
6650 +- * would make more sense for virtio to not insist on having it's own device. */
6651 +-static struct device *virtio_pci_root;
6652 +-
6653 + /* Convert a generic virtio device to our structure */
6654 + static struct virtio_pci_device *to_vp_device(struct virtio_device *vdev)
6655 + {
6656 +@@ -629,7 +624,7 @@ static int __devinit virtio_pci_probe(struct pci_dev *pci_dev,
6657 + if (vp_dev == NULL)
6658 + return -ENOMEM;
6659 +
6660 +- vp_dev->vdev.dev.parent = virtio_pci_root;
6661 ++ vp_dev->vdev.dev.parent = &pci_dev->dev;
6662 + vp_dev->vdev.dev.release = virtio_pci_release_dev;
6663 + vp_dev->vdev.config = &virtio_pci_config_ops;
6664 + vp_dev->pci_dev = pci_dev;
6665 +@@ -717,17 +712,7 @@ static struct pci_driver virtio_pci_driver = {
6666 +
6667 + static int __init virtio_pci_init(void)
6668 + {
6669 +- int err;
6670 +-
6671 +- virtio_pci_root = root_device_register("virtio-pci");
6672 +- if (IS_ERR(virtio_pci_root))
6673 +- return PTR_ERR(virtio_pci_root);
6674 +-
6675 +- err = pci_register_driver(&virtio_pci_driver);
6676 +- if (err)
6677 +- root_device_unregister(virtio_pci_root);
6678 +-
6679 +- return err;
6680 ++ return pci_register_driver(&virtio_pci_driver);
6681 + }
6682 +
6683 + module_init(virtio_pci_init);
6684 +@@ -735,7 +720,6 @@ module_init(virtio_pci_init);
6685 + static void __exit virtio_pci_exit(void)
6686 + {
6687 + pci_unregister_driver(&virtio_pci_driver);
6688 +- root_device_unregister(virtio_pci_root);
6689 + }
6690 +
6691 + module_exit(virtio_pci_exit);
6692 +diff --git a/drivers/xen/platform-pci.c b/drivers/xen/platform-pci.c
6693 +index c01b5dd..afbe041 100644
6694 +--- a/drivers/xen/platform-pci.c
6695 ++++ b/drivers/xen/platform-pci.c
6696 +@@ -105,7 +105,7 @@ static int __devinit platform_pci_init(struct pci_dev *pdev,
6697 + const struct pci_device_id *ent)
6698 + {
6699 + int i, ret;
6700 +- long ioaddr, iolen;
6701 ++ long ioaddr;
6702 + long mmio_addr, mmio_len;
6703 + unsigned int max_nr_gframes;
6704 +
6705 +@@ -114,7 +114,6 @@ static int __devinit platform_pci_init(struct pci_dev *pdev,
6706 + return i;
6707 +
6708 + ioaddr = pci_resource_start(pdev, 0);
6709 +- iolen = pci_resource_len(pdev, 0);
6710 +
6711 + mmio_addr = pci_resource_start(pdev, 1);
6712 + mmio_len = pci_resource_len(pdev, 1);
6713 +@@ -125,19 +124,13 @@ static int __devinit platform_pci_init(struct pci_dev *pdev,
6714 + goto pci_out;
6715 + }
6716 +
6717 +- if (request_mem_region(mmio_addr, mmio_len, DRV_NAME) == NULL) {
6718 +- dev_err(&pdev->dev, "MEM I/O resource 0x%lx @ 0x%lx busy\n",
6719 +- mmio_addr, mmio_len);
6720 +- ret = -EBUSY;
6721 ++ ret = pci_request_region(pdev, 1, DRV_NAME);
6722 ++ if (ret < 0)
6723 + goto pci_out;
6724 +- }
6725 +
6726 +- if (request_region(ioaddr, iolen, DRV_NAME) == NULL) {
6727 +- dev_err(&pdev->dev, "I/O resource 0x%lx @ 0x%lx busy\n",
6728 +- iolen, ioaddr);
6729 +- ret = -EBUSY;
6730 ++ ret = pci_request_region(pdev, 0, DRV_NAME);
6731 ++ if (ret < 0)
6732 + goto mem_out;
6733 +- }
6734 +
6735 + platform_mmio = mmio_addr;
6736 + platform_mmiolen = mmio_len;
6737 +@@ -169,9 +162,9 @@ static int __devinit platform_pci_init(struct pci_dev *pdev,
6738 + return 0;
6739 +
6740 + out:
6741 +- release_region(ioaddr, iolen);
6742 ++ pci_release_region(pdev, 0);
6743 + mem_out:
6744 +- release_mem_region(mmio_addr, mmio_len);
6745 ++ pci_release_region(pdev, 1);
6746 + pci_out:
6747 + pci_disable_device(pdev);
6748 + return ret;
6749 +diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h
6750 +index e6d1481..95d5dbb 100644
6751 +--- a/fs/cifs/cifsproto.h
6752 ++++ b/fs/cifs/cifsproto.h
6753 +@@ -347,7 +347,7 @@ extern int CIFSSMBLock(const int xid, struct cifsTconInfo *tcon,
6754 + const __u16 netfid, const __u64 len,
6755 + const __u64 offset, const __u32 numUnlock,
6756 + const __u32 numLock, const __u8 lockType,
6757 +- const bool waitFlag);
6758 ++ const bool waitFlag, const __u8 oplock_level);
6759 + extern int CIFSSMBPosixLock(const int xid, struct cifsTconInfo *tcon,
6760 + const __u16 smb_file_id, const int get_flag,
6761 + const __u64 len, struct file_lock *,
6762 +diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
6763 +index 67acfb3..0fa5c1f 100644
6764 +--- a/fs/cifs/cifssmb.c
6765 ++++ b/fs/cifs/cifssmb.c
6766 +@@ -1666,7 +1666,8 @@ int
6767 + CIFSSMBLock(const int xid, struct cifsTconInfo *tcon,
6768 + const __u16 smb_file_id, const __u64 len,
6769 + const __u64 offset, const __u32 numUnlock,
6770 +- const __u32 numLock, const __u8 lockType, const bool waitFlag)
6771 ++ const __u32 numLock, const __u8 lockType,
6772 ++ const bool waitFlag, const __u8 oplock_level)
6773 + {
6774 + int rc = 0;
6775 + LOCK_REQ *pSMB = NULL;
6776 +@@ -1694,6 +1695,7 @@ CIFSSMBLock(const int xid, struct cifsTconInfo *tcon,
6777 + pSMB->NumberOfLocks = cpu_to_le16(numLock);
6778 + pSMB->NumberOfUnlocks = cpu_to_le16(numUnlock);
6779 + pSMB->LockType = lockType;
6780 ++ pSMB->OplockLevel = oplock_level;
6781 + pSMB->AndXCommand = 0xFF; /* none */
6782 + pSMB->Fid = smb_file_id; /* netfid stays le */
6783 +
6784 +diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
6785 +index cc1a860..ac75883 100644
6786 +--- a/fs/cifs/connect.c
6787 ++++ b/fs/cifs/connect.c
6788 +@@ -2999,7 +2999,7 @@ CIFSTCon(unsigned int xid, struct cifsSesInfo *ses,
6789 + bcc_ptr++; /* skip password */
6790 + /* already aligned so no need to do it below */
6791 + } else {
6792 +- pSMB->PasswordLength = cpu_to_le16(CIFS_SESS_KEY_SIZE);
6793 ++ pSMB->PasswordLength = cpu_to_le16(CIFS_AUTH_RESP_SIZE);
6794 + /* BB FIXME add code to fail this if NTLMv2 or Kerberos
6795 + specified as required (when that support is added to
6796 + the vfs in the future) as only NTLM or the much
6797 +@@ -3017,7 +3017,7 @@ CIFSTCon(unsigned int xid, struct cifsSesInfo *ses,
6798 + #endif /* CIFS_WEAK_PW_HASH */
6799 + SMBNTencrypt(tcon->password, ses->server->cryptkey, bcc_ptr);
6800 +
6801 +- bcc_ptr += CIFS_SESS_KEY_SIZE;
6802 ++ bcc_ptr += CIFS_AUTH_RESP_SIZE;
6803 + if (ses->capabilities & CAP_UNICODE) {
6804 + /* must align unicode strings */
6805 + *bcc_ptr = 0; /* null byte password */
6806 +diff --git a/fs/cifs/file.c b/fs/cifs/file.c
6807 +index 5a28660..904edbe 100644
6808 +--- a/fs/cifs/file.c
6809 ++++ b/fs/cifs/file.c
6810 +@@ -779,12 +779,12 @@ int cifs_lock(struct file *file, int cmd, struct file_lock *pfLock)
6811 +
6812 + /* BB we could chain these into one lock request BB */
6813 + rc = CIFSSMBLock(xid, tcon, netfid, length, pfLock->fl_start,
6814 +- 0, 1, lockType, 0 /* wait flag */ );
6815 ++ 0, 1, lockType, 0 /* wait flag */, 0);
6816 + if (rc == 0) {
6817 + rc = CIFSSMBLock(xid, tcon, netfid, length,
6818 + pfLock->fl_start, 1 /* numUnlock */ ,
6819 + 0 /* numLock */ , lockType,
6820 +- 0 /* wait flag */ );
6821 ++ 0 /* wait flag */, 0);
6822 + pfLock->fl_type = F_UNLCK;
6823 + if (rc != 0)
6824 + cERROR(1, "Error unlocking previously locked "
6825 +@@ -801,13 +801,13 @@ int cifs_lock(struct file *file, int cmd, struct file_lock *pfLock)
6826 + rc = CIFSSMBLock(xid, tcon, netfid, length,
6827 + pfLock->fl_start, 0, 1,
6828 + lockType | LOCKING_ANDX_SHARED_LOCK,
6829 +- 0 /* wait flag */);
6830 ++ 0 /* wait flag */, 0);
6831 + if (rc == 0) {
6832 + rc = CIFSSMBLock(xid, tcon, netfid,
6833 + length, pfLock->fl_start, 1, 0,
6834 + lockType |
6835 + LOCKING_ANDX_SHARED_LOCK,
6836 +- 0 /* wait flag */);
6837 ++ 0 /* wait flag */, 0);
6838 + pfLock->fl_type = F_RDLCK;
6839 + if (rc != 0)
6840 + cERROR(1, "Error unlocking "
6841 +@@ -850,8 +850,8 @@ int cifs_lock(struct file *file, int cmd, struct file_lock *pfLock)
6842 +
6843 + if (numLock) {
6844 + rc = CIFSSMBLock(xid, tcon, netfid, length,
6845 +- pfLock->fl_start,
6846 +- 0, numLock, lockType, wait_flag);
6847 ++ pfLock->fl_start, 0, numLock, lockType,
6848 ++ wait_flag, 0);
6849 +
6850 + if (rc == 0) {
6851 + /* For Windows locks we must store them. */
6852 +@@ -871,9 +871,9 @@ int cifs_lock(struct file *file, int cmd, struct file_lock *pfLock)
6853 + (pfLock->fl_start + length) >=
6854 + (li->offset + li->length)) {
6855 + stored_rc = CIFSSMBLock(xid, tcon,
6856 +- netfid,
6857 +- li->length, li->offset,
6858 +- 1, 0, li->type, false);
6859 ++ netfid, li->length,
6860 ++ li->offset, 1, 0,
6861 ++ li->type, false, 0);
6862 + if (stored_rc)
6863 + rc = stored_rc;
6864 + else {
6865 +@@ -2245,7 +2245,8 @@ void cifs_oplock_break(struct work_struct *work)
6866 + */
6867 + if (!cfile->oplock_break_cancelled) {
6868 + rc = CIFSSMBLock(0, tlink_tcon(cfile->tlink), cfile->netfid, 0,
6869 +- 0, 0, 0, LOCKING_ANDX_OPLOCK_RELEASE, false);
6870 ++ 0, 0, 0, LOCKING_ANDX_OPLOCK_RELEASE, false,
6871 ++ cinode->clientCanCacheRead ? 1 : 0);
6872 + cFYI(1, "Oplock release rc = %d", rc);
6873 + }
6874 +
6875 +diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
6876 +index 43f1028..09bfcf0 100644
6877 +--- a/fs/cifs/misc.c
6878 ++++ b/fs/cifs/misc.c
6879 +@@ -571,7 +571,7 @@ is_valid_oplock_break(struct smb_hdr *buf, struct TCP_Server_Info *srv)
6880 + pCifsInode = CIFS_I(netfile->dentry->d_inode);
6881 +
6882 + cifs_set_oplock_level(pCifsInode,
6883 +- pSMB->OplockLevel);
6884 ++ pSMB->OplockLevel ? OPLOCK_READ : 0);
6885 + /*
6886 + * cifs_oplock_break_put() can't be called
6887 + * from here. Get reference after queueing
6888 +diff --git a/fs/direct-io.c b/fs/direct-io.c
6889 +index 85882f6..b044705 100644
6890 +--- a/fs/direct-io.c
6891 ++++ b/fs/direct-io.c
6892 +@@ -325,12 +325,16 @@ void dio_end_io(struct bio *bio, int error)
6893 + }
6894 + EXPORT_SYMBOL_GPL(dio_end_io);
6895 +
6896 +-static int
6897 ++static void
6898 + dio_bio_alloc(struct dio *dio, struct block_device *bdev,
6899 + sector_t first_sector, int nr_vecs)
6900 + {
6901 + struct bio *bio;
6902 +
6903 ++ /*
6904 ++ * bio_alloc() is guaranteed to return a bio when called with
6905 ++ * __GFP_WAIT and we request a valid number of vectors.
6906 ++ */
6907 + bio = bio_alloc(GFP_KERNEL, nr_vecs);
6908 +
6909 + bio->bi_bdev = bdev;
6910 +@@ -342,7 +346,6 @@ dio_bio_alloc(struct dio *dio, struct block_device *bdev,
6911 +
6912 + dio->bio = bio;
6913 + dio->logical_offset_in_bio = dio->cur_page_fs_offset;
6914 +- return 0;
6915 + }
6916 +
6917 + /*
6918 +@@ -583,8 +586,9 @@ static int dio_new_bio(struct dio *dio, sector_t start_sector)
6919 + goto out;
6920 + sector = start_sector << (dio->blkbits - 9);
6921 + nr_pages = min(dio->pages_in_io, bio_get_nr_vecs(dio->map_bh.b_bdev));
6922 ++ nr_pages = min(nr_pages, BIO_MAX_PAGES);
6923 + BUG_ON(nr_pages <= 0);
6924 +- ret = dio_bio_alloc(dio, dio->map_bh.b_bdev, sector, nr_pages);
6925 ++ dio_bio_alloc(dio, dio->map_bh.b_bdev, sector, nr_pages);
6926 + dio->boundary = 0;
6927 + out:
6928 + return ret;
6929 +diff --git a/fs/eventpoll.c b/fs/eventpoll.c
6930 +index 8cf0724..7daf1e6 100644
6931 +--- a/fs/eventpoll.c
6932 ++++ b/fs/eventpoll.c
6933 +@@ -1111,6 +1111,17 @@ static int ep_send_events(struct eventpoll *ep,
6934 + return ep_scan_ready_list(ep, ep_send_events_proc, &esed);
6935 + }
6936 +
6937 ++static inline struct timespec ep_set_mstimeout(long ms)
6938 ++{
6939 ++ struct timespec now, ts = {
6940 ++ .tv_sec = ms / MSEC_PER_SEC,
6941 ++ .tv_nsec = NSEC_PER_MSEC * (ms % MSEC_PER_SEC),
6942 ++ };
6943 ++
6944 ++ ktime_get_ts(&now);
6945 ++ return timespec_add_safe(now, ts);
6946 ++}
6947 ++
6948 + static int ep_poll(struct eventpoll *ep, struct epoll_event __user *events,
6949 + int maxevents, long timeout)
6950 + {
6951 +@@ -1118,12 +1129,11 @@ static int ep_poll(struct eventpoll *ep, struct epoll_event __user *events,
6952 + unsigned long flags;
6953 + long slack;
6954 + wait_queue_t wait;
6955 +- struct timespec end_time;
6956 + ktime_t expires, *to = NULL;
6957 +
6958 + if (timeout > 0) {
6959 +- ktime_get_ts(&end_time);
6960 +- timespec_add_ns(&end_time, (u64)timeout * NSEC_PER_MSEC);
6961 ++ struct timespec end_time = ep_set_mstimeout(timeout);
6962 ++
6963 + slack = select_estimate_accuracy(&end_time);
6964 + to = &expires;
6965 + *to = timespec_to_ktime(end_time);
6966 +diff --git a/fs/exofs/inode.c b/fs/exofs/inode.c
6967 +index 4268542..a755523 100644
6968 +--- a/fs/exofs/inode.c
6969 ++++ b/fs/exofs/inode.c
6970 +@@ -1030,7 +1030,6 @@ struct inode *exofs_iget(struct super_block *sb, unsigned long ino)
6971 + memcpy(oi->i_data, fcb.i_data, sizeof(fcb.i_data));
6972 + }
6973 +
6974 +- inode->i_mapping->backing_dev_info = sb->s_bdi;
6975 + if (S_ISREG(inode->i_mode)) {
6976 + inode->i_op = &exofs_file_inode_operations;
6977 + inode->i_fop = &exofs_file_operations;
6978 +@@ -1131,7 +1130,6 @@ struct inode *exofs_new_inode(struct inode *dir, int mode)
6979 +
6980 + sbi = sb->s_fs_info;
6981 +
6982 +- inode->i_mapping->backing_dev_info = sb->s_bdi;
6983 + sb->s_dirt = 1;
6984 + inode_init_owner(inode, dir, mode);
6985 + inode->i_ino = sbi->s_nextid++;
6986 +diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
6987 +index e659597..97a28e9 100644
6988 +--- a/fs/ext4/inode.c
6989 ++++ b/fs/ext4/inode.c
6990 +@@ -4349,6 +4349,7 @@ static void ext4_free_branches(handle_t *handle, struct inode *inode,
6991 + (__le32 *) bh->b_data,
6992 + (__le32 *) bh->b_data + addr_per_block,
6993 + depth);
6994 ++ brelse(bh);
6995 +
6996 + /*
6997 + * Everything below this this pointer has been
6998 +diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
6999 +index 5b4d4e3..f1811d5 100644
7000 +--- a/fs/ext4/mballoc.c
7001 ++++ b/fs/ext4/mballoc.c
7002 +@@ -342,10 +342,15 @@ static struct kmem_cache *ext4_free_ext_cachep;
7003 + /* We create slab caches for groupinfo data structures based on the
7004 + * superblock block size. There will be one per mounted filesystem for
7005 + * each unique s_blocksize_bits */
7006 +-#define NR_GRPINFO_CACHES \
7007 +- (EXT4_MAX_BLOCK_LOG_SIZE - EXT4_MIN_BLOCK_LOG_SIZE + 1)
7008 ++#define NR_GRPINFO_CACHES 8
7009 + static struct kmem_cache *ext4_groupinfo_caches[NR_GRPINFO_CACHES];
7010 +
7011 ++static const char *ext4_groupinfo_slab_names[NR_GRPINFO_CACHES] = {
7012 ++ "ext4_groupinfo_1k", "ext4_groupinfo_2k", "ext4_groupinfo_4k",
7013 ++ "ext4_groupinfo_8k", "ext4_groupinfo_16k", "ext4_groupinfo_32k",
7014 ++ "ext4_groupinfo_64k", "ext4_groupinfo_128k"
7015 ++};
7016 ++
7017 + static void ext4_mb_generate_from_pa(struct super_block *sb, void *bitmap,
7018 + ext4_group_t group);
7019 + static void ext4_mb_generate_from_freelist(struct super_block *sb, void *bitmap,
7020 +@@ -2414,6 +2419,55 @@ err_freesgi:
7021 + return -ENOMEM;
7022 + }
7023 +
7024 ++static void ext4_groupinfo_destroy_slabs(void)
7025 ++{
7026 ++ int i;
7027 ++
7028 ++ for (i = 0; i < NR_GRPINFO_CACHES; i++) {
7029 ++ if (ext4_groupinfo_caches[i])
7030 ++ kmem_cache_destroy(ext4_groupinfo_caches[i]);
7031 ++ ext4_groupinfo_caches[i] = NULL;
7032 ++ }
7033 ++}
7034 ++
7035 ++static int ext4_groupinfo_create_slab(size_t size)
7036 ++{
7037 ++ static DEFINE_MUTEX(ext4_grpinfo_slab_create_mutex);
7038 ++ int slab_size;
7039 ++ int blocksize_bits = order_base_2(size);
7040 ++ int cache_index = blocksize_bits - EXT4_MIN_BLOCK_LOG_SIZE;
7041 ++ struct kmem_cache *cachep;
7042 ++
7043 ++ if (cache_index >= NR_GRPINFO_CACHES)
7044 ++ return -EINVAL;
7045 ++
7046 ++ if (unlikely(cache_index < 0))
7047 ++ cache_index = 0;
7048 ++
7049 ++ mutex_lock(&ext4_grpinfo_slab_create_mutex);
7050 ++ if (ext4_groupinfo_caches[cache_index]) {
7051 ++ mutex_unlock(&ext4_grpinfo_slab_create_mutex);
7052 ++ return 0; /* Already created */
7053 ++ }
7054 ++
7055 ++ slab_size = offsetof(struct ext4_group_info,
7056 ++ bb_counters[blocksize_bits + 2]);
7057 ++
7058 ++ cachep = kmem_cache_create(ext4_groupinfo_slab_names[cache_index],
7059 ++ slab_size, 0, SLAB_RECLAIM_ACCOUNT,
7060 ++ NULL);
7061 ++
7062 ++ mutex_unlock(&ext4_grpinfo_slab_create_mutex);
7063 ++ if (!cachep) {
7064 ++ printk(KERN_EMERG "EXT4: no memory for groupinfo slab cache\n");
7065 ++ return -ENOMEM;
7066 ++ }
7067 ++
7068 ++ ext4_groupinfo_caches[cache_index] = cachep;
7069 ++
7070 ++ return 0;
7071 ++}
7072 ++
7073 + int ext4_mb_init(struct super_block *sb, int needs_recovery)
7074 + {
7075 + struct ext4_sb_info *sbi = EXT4_SB(sb);
7076 +@@ -2421,9 +2475,6 @@ int ext4_mb_init(struct super_block *sb, int needs_recovery)
7077 + unsigned offset;
7078 + unsigned max;
7079 + int ret;
7080 +- int cache_index;
7081 +- struct kmem_cache *cachep;
7082 +- char *namep = NULL;
7083 +
7084 + i = (sb->s_blocksize_bits + 2) * sizeof(*sbi->s_mb_offsets);
7085 +
7086 +@@ -2440,30 +2491,9 @@ int ext4_mb_init(struct super_block *sb, int needs_recovery)
7087 + goto out;
7088 + }
7089 +
7090 +- cache_index = sb->s_blocksize_bits - EXT4_MIN_BLOCK_LOG_SIZE;
7091 +- cachep = ext4_groupinfo_caches[cache_index];
7092 +- if (!cachep) {
7093 +- char name[32];
7094 +- int len = offsetof(struct ext4_group_info,
7095 +- bb_counters[sb->s_blocksize_bits + 2]);
7096 +-
7097 +- sprintf(name, "ext4_groupinfo_%d", sb->s_blocksize_bits);
7098 +- namep = kstrdup(name, GFP_KERNEL);
7099 +- if (!namep) {
7100 +- ret = -ENOMEM;
7101 +- goto out;
7102 +- }
7103 +-
7104 +- /* Need to free the kmem_cache_name() when we
7105 +- * destroy the slab */
7106 +- cachep = kmem_cache_create(namep, len, 0,
7107 +- SLAB_RECLAIM_ACCOUNT, NULL);
7108 +- if (!cachep) {
7109 +- ret = -ENOMEM;
7110 +- goto out;
7111 +- }
7112 +- ext4_groupinfo_caches[cache_index] = cachep;
7113 +- }
7114 ++ ret = ext4_groupinfo_create_slab(sb->s_blocksize);
7115 ++ if (ret < 0)
7116 ++ goto out;
7117 +
7118 + /* order 0 is regular bitmap */
7119 + sbi->s_mb_maxs[0] = sb->s_blocksize << 3;
7120 +@@ -2520,7 +2550,6 @@ out:
7121 + if (ret) {
7122 + kfree(sbi->s_mb_offsets);
7123 + kfree(sbi->s_mb_maxs);
7124 +- kfree(namep);
7125 + }
7126 + return ret;
7127 + }
7128 +@@ -2734,7 +2763,6 @@ int __init ext4_init_mballoc(void)
7129 +
7130 + void ext4_exit_mballoc(void)
7131 + {
7132 +- int i;
7133 + /*
7134 + * Wait for completion of call_rcu()'s on ext4_pspace_cachep
7135 + * before destroying the slab cache.
7136 +@@ -2743,15 +2771,7 @@ void ext4_exit_mballoc(void)
7137 + kmem_cache_destroy(ext4_pspace_cachep);
7138 + kmem_cache_destroy(ext4_ac_cachep);
7139 + kmem_cache_destroy(ext4_free_ext_cachep);
7140 +-
7141 +- for (i = 0; i < NR_GRPINFO_CACHES; i++) {
7142 +- struct kmem_cache *cachep = ext4_groupinfo_caches[i];
7143 +- if (cachep) {
7144 +- char *name = (char *)kmem_cache_name(cachep);
7145 +- kmem_cache_destroy(cachep);
7146 +- kfree(name);
7147 +- }
7148 +- }
7149 ++ ext4_groupinfo_destroy_slabs();
7150 + ext4_remove_debugfs_entry();
7151 + }
7152 +
7153 +@@ -4851,7 +4871,7 @@ int ext4_trim_fs(struct super_block *sb, struct fstrim_range *range)
7154 + if (len >= EXT4_BLOCKS_PER_GROUP(sb))
7155 + len -= (EXT4_BLOCKS_PER_GROUP(sb) - first_block);
7156 + else
7157 +- last_block = len;
7158 ++ last_block = first_block + len;
7159 +
7160 + if (e4b.bd_info->bb_free >= minlen) {
7161 + cnt = ext4_trim_all_free(sb, &e4b, first_block,
7162 +diff --git a/fs/ext4/page-io.c b/fs/ext4/page-io.c
7163 +index beacce1..736f4ee 100644
7164 +--- a/fs/ext4/page-io.c
7165 ++++ b/fs/ext4/page-io.c
7166 +@@ -193,6 +193,7 @@ static void ext4_end_bio(struct bio *bio, int error)
7167 + struct inode *inode;
7168 + unsigned long flags;
7169 + int i;
7170 ++ sector_t bi_sector = bio->bi_sector;
7171 +
7172 + BUG_ON(!io_end);
7173 + bio->bi_private = NULL;
7174 +@@ -210,9 +211,7 @@ static void ext4_end_bio(struct bio *bio, int error)
7175 + if (error)
7176 + SetPageError(page);
7177 + BUG_ON(!head);
7178 +- if (head->b_size == PAGE_CACHE_SIZE)
7179 +- clear_buffer_dirty(head);
7180 +- else {
7181 ++ if (head->b_size != PAGE_CACHE_SIZE) {
7182 + loff_t offset;
7183 + loff_t io_end_offset = io_end->offset + io_end->size;
7184 +
7185 +@@ -224,7 +223,6 @@ static void ext4_end_bio(struct bio *bio, int error)
7186 + if (error)
7187 + buffer_io_error(bh);
7188 +
7189 +- clear_buffer_dirty(bh);
7190 + }
7191 + if (buffer_delay(bh))
7192 + partial_write = 1;
7193 +@@ -260,7 +258,7 @@ static void ext4_end_bio(struct bio *bio, int error)
7194 + (unsigned long long) io_end->offset,
7195 + (long) io_end->size,
7196 + (unsigned long long)
7197 +- bio->bi_sector >> (inode->i_blkbits - 9));
7198 ++ bi_sector >> (inode->i_blkbits - 9));
7199 + }
7200 +
7201 + /* Add the io_end to per-inode completed io list*/
7202 +@@ -383,6 +381,7 @@ int ext4_bio_write_page(struct ext4_io_submit *io,
7203 +
7204 + blocksize = 1 << inode->i_blkbits;
7205 +
7206 ++ BUG_ON(!PageLocked(page));
7207 + BUG_ON(PageWriteback(page));
7208 + set_page_writeback(page);
7209 + ClearPageError(page);
7210 +@@ -400,12 +399,14 @@ int ext4_bio_write_page(struct ext4_io_submit *io,
7211 + for (bh = head = page_buffers(page), block_start = 0;
7212 + bh != head || !block_start;
7213 + block_start = block_end, bh = bh->b_this_page) {
7214 ++
7215 + block_end = block_start + blocksize;
7216 + if (block_start >= len) {
7217 + clear_buffer_dirty(bh);
7218 + set_buffer_uptodate(bh);
7219 + continue;
7220 + }
7221 ++ clear_buffer_dirty(bh);
7222 + ret = io_submit_add_bh(io, io_page, inode, wbc, bh);
7223 + if (ret) {
7224 + /*
7225 +diff --git a/fs/ext4/super.c b/fs/ext4/super.c
7226 +index fb15c9c..851eac3 100644
7227 +--- a/fs/ext4/super.c
7228 ++++ b/fs/ext4/super.c
7229 +@@ -77,6 +77,7 @@ static struct dentry *ext4_mount(struct file_system_type *fs_type, int flags,
7230 + const char *dev_name, void *data);
7231 + static void ext4_destroy_lazyinit_thread(void);
7232 + static void ext4_unregister_li_request(struct super_block *sb);
7233 ++static void ext4_clear_request_list(void);
7234 +
7235 + #if !defined(CONFIG_EXT3_FS) && !defined(CONFIG_EXT3_FS_MODULE) && defined(CONFIG_EXT4_USE_FOR_EXT23)
7236 + static struct file_system_type ext3_fs_type = {
7237 +@@ -2704,6 +2705,8 @@ static void ext4_unregister_li_request(struct super_block *sb)
7238 + mutex_unlock(&ext4_li_info->li_list_mtx);
7239 + }
7240 +
7241 ++static struct task_struct *ext4_lazyinit_task;
7242 ++
7243 + /*
7244 + * This is the function where ext4lazyinit thread lives. It walks
7245 + * through the request list searching for next scheduled filesystem.
7246 +@@ -2772,6 +2775,10 @@ cont_thread:
7247 + if (time_before(jiffies, next_wakeup))
7248 + schedule();
7249 + finish_wait(&eli->li_wait_daemon, &wait);
7250 ++ if (kthread_should_stop()) {
7251 ++ ext4_clear_request_list();
7252 ++ goto exit_thread;
7253 ++ }
7254 + }
7255 +
7256 + exit_thread:
7257 +@@ -2796,6 +2803,7 @@ exit_thread:
7258 + wake_up(&eli->li_wait_task);
7259 +
7260 + kfree(ext4_li_info);
7261 ++ ext4_lazyinit_task = NULL;
7262 + ext4_li_info = NULL;
7263 + mutex_unlock(&ext4_li_mtx);
7264 +
7265 +@@ -2818,11 +2826,10 @@ static void ext4_clear_request_list(void)
7266 +
7267 + static int ext4_run_lazyinit_thread(void)
7268 + {
7269 +- struct task_struct *t;
7270 +-
7271 +- t = kthread_run(ext4_lazyinit_thread, ext4_li_info, "ext4lazyinit");
7272 +- if (IS_ERR(t)) {
7273 +- int err = PTR_ERR(t);
7274 ++ ext4_lazyinit_task = kthread_run(ext4_lazyinit_thread,
7275 ++ ext4_li_info, "ext4lazyinit");
7276 ++ if (IS_ERR(ext4_lazyinit_task)) {
7277 ++ int err = PTR_ERR(ext4_lazyinit_task);
7278 + ext4_clear_request_list();
7279 + del_timer_sync(&ext4_li_info->li_timer);
7280 + kfree(ext4_li_info);
7281 +@@ -2916,7 +2923,7 @@ static int ext4_register_li_request(struct super_block *sb,
7282 + struct ext4_sb_info *sbi = EXT4_SB(sb);
7283 + struct ext4_li_request *elr;
7284 + ext4_group_t ngroups = EXT4_SB(sb)->s_groups_count;
7285 +- int ret;
7286 ++ int ret = 0;
7287 +
7288 + if (sbi->s_li_request != NULL)
7289 + return 0;
7290 +@@ -2973,16 +2980,10 @@ static void ext4_destroy_lazyinit_thread(void)
7291 + * If thread exited earlier
7292 + * there's nothing to be done.
7293 + */
7294 +- if (!ext4_li_info)
7295 ++ if (!ext4_li_info || !ext4_lazyinit_task)
7296 + return;
7297 +
7298 +- ext4_clear_request_list();
7299 +-
7300 +- while (ext4_li_info->li_task) {
7301 +- wake_up(&ext4_li_info->li_wait_daemon);
7302 +- wait_event(ext4_li_info->li_wait_task,
7303 +- ext4_li_info->li_task == NULL);
7304 +- }
7305 ++ kthread_stop(ext4_lazyinit_task);
7306 + }
7307 +
7308 + static int ext4_fill_super(struct super_block *sb, void *data, int silent)
7309 +@@ -4756,7 +4757,7 @@ static struct file_system_type ext4_fs_type = {
7310 + .fs_flags = FS_REQUIRES_DEV,
7311 + };
7312 +
7313 +-int __init ext4_init_feat_adverts(void)
7314 ++static int __init ext4_init_feat_adverts(void)
7315 + {
7316 + struct ext4_features *ef;
7317 + int ret = -ENOMEM;
7318 +@@ -4780,6 +4781,13 @@ out:
7319 + return ret;
7320 + }
7321 +
7322 ++static void ext4_exit_feat_adverts(void)
7323 ++{
7324 ++ kobject_put(&ext4_feat->f_kobj);
7325 ++ wait_for_completion(&ext4_feat->f_kobj_unregister);
7326 ++ kfree(ext4_feat);
7327 ++}
7328 ++
7329 + static int __init ext4_init_fs(void)
7330 + {
7331 + int err;
7332 +@@ -4826,7 +4834,7 @@ out1:
7333 + out2:
7334 + ext4_exit_mballoc();
7335 + out3:
7336 +- kfree(ext4_feat);
7337 ++ ext4_exit_feat_adverts();
7338 + remove_proc_entry("fs/ext4", NULL);
7339 + kset_unregister(ext4_kset);
7340 + out4:
7341 +@@ -4845,6 +4853,7 @@ static void __exit ext4_exit_fs(void)
7342 + destroy_inodecache();
7343 + ext4_exit_xattr();
7344 + ext4_exit_mballoc();
7345 ++ ext4_exit_feat_adverts();
7346 + remove_proc_entry("fs/ext4", NULL);
7347 + kset_unregister(ext4_kset);
7348 + ext4_exit_system_zone();
7349 +diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
7350 +index 3d06ccc..cdbf7ac 100644
7351 +--- a/fs/fs-writeback.c
7352 ++++ b/fs/fs-writeback.c
7353 +@@ -84,13 +84,9 @@ static inline struct inode *wb_inode(struct list_head *head)
7354 + return list_entry(head, struct inode, i_wb_list);
7355 + }
7356 +
7357 +-static void bdi_queue_work(struct backing_dev_info *bdi,
7358 +- struct wb_writeback_work *work)
7359 ++/* Wakeup flusher thread or forker thread to fork it. Requires bdi->wb_lock. */
7360 ++static void bdi_wakeup_flusher(struct backing_dev_info *bdi)
7361 + {
7362 +- trace_writeback_queue(bdi, work);
7363 +-
7364 +- spin_lock_bh(&bdi->wb_lock);
7365 +- list_add_tail(&work->list, &bdi->work_list);
7366 + if (bdi->wb.task) {
7367 + wake_up_process(bdi->wb.task);
7368 + } else {
7369 +@@ -98,15 +94,26 @@ static void bdi_queue_work(struct backing_dev_info *bdi,
7370 + * The bdi thread isn't there, wake up the forker thread which
7371 + * will create and run it.
7372 + */
7373 +- trace_writeback_nothread(bdi, work);
7374 + wake_up_process(default_backing_dev_info.wb.task);
7375 + }
7376 ++}
7377 ++
7378 ++static void bdi_queue_work(struct backing_dev_info *bdi,
7379 ++ struct wb_writeback_work *work)
7380 ++{
7381 ++ trace_writeback_queue(bdi, work);
7382 ++
7383 ++ spin_lock_bh(&bdi->wb_lock);
7384 ++ list_add_tail(&work->list, &bdi->work_list);
7385 ++ if (!bdi->wb.task)
7386 ++ trace_writeback_nothread(bdi, work);
7387 ++ bdi_wakeup_flusher(bdi);
7388 + spin_unlock_bh(&bdi->wb_lock);
7389 + }
7390 +
7391 + static void
7392 + __bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages,
7393 +- bool range_cyclic, bool for_background)
7394 ++ bool range_cyclic)
7395 + {
7396 + struct wb_writeback_work *work;
7397 +
7398 +@@ -126,7 +133,6 @@ __bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages,
7399 + work->sync_mode = WB_SYNC_NONE;
7400 + work->nr_pages = nr_pages;
7401 + work->range_cyclic = range_cyclic;
7402 +- work->for_background = for_background;
7403 +
7404 + bdi_queue_work(bdi, work);
7405 + }
7406 +@@ -144,7 +150,7 @@ __bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages,
7407 + */
7408 + void bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages)
7409 + {
7410 +- __bdi_start_writeback(bdi, nr_pages, true, false);
7411 ++ __bdi_start_writeback(bdi, nr_pages, true);
7412 + }
7413 +
7414 + /**
7415 +@@ -152,13 +158,20 @@ void bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages)
7416 + * @bdi: the backing device to write from
7417 + *
7418 + * Description:
7419 +- * This does WB_SYNC_NONE background writeback. The IO is only
7420 +- * started when this function returns, we make no guarentees on
7421 +- * completion. Caller need not hold sb s_umount semaphore.
7422 ++ * This makes sure WB_SYNC_NONE background writeback happens. When
7423 ++ * this function returns, it is only guaranteed that for given BDI
7424 ++ * some IO is happening if we are over background dirty threshold.
7425 ++ * Caller need not hold sb s_umount semaphore.
7426 + */
7427 + void bdi_start_background_writeback(struct backing_dev_info *bdi)
7428 + {
7429 +- __bdi_start_writeback(bdi, LONG_MAX, true, true);
7430 ++ /*
7431 ++ * We just wake up the flusher thread. It will perform background
7432 ++ * writeback as soon as there is no other work to do.
7433 ++ */
7434 ++ spin_lock_bh(&bdi->wb_lock);
7435 ++ bdi_wakeup_flusher(bdi);
7436 ++ spin_unlock_bh(&bdi->wb_lock);
7437 + }
7438 +
7439 + /*
7440 +@@ -616,6 +629,7 @@ static long wb_writeback(struct bdi_writeback *wb,
7441 + };
7442 + unsigned long oldest_jif;
7443 + long wrote = 0;
7444 ++ long write_chunk;
7445 + struct inode *inode;
7446 +
7447 + if (wbc.for_kupdate) {
7448 +@@ -628,6 +642,24 @@ static long wb_writeback(struct bdi_writeback *wb,
7449 + wbc.range_end = LLONG_MAX;
7450 + }
7451 +
7452 ++ /*
7453 ++ * WB_SYNC_ALL mode does livelock avoidance by syncing dirty
7454 ++ * inodes/pages in one big loop. Setting wbc.nr_to_write=LONG_MAX
7455 ++ * here avoids calling into writeback_inodes_wb() more than once.
7456 ++ *
7457 ++ * The intended call sequence for WB_SYNC_ALL writeback is:
7458 ++ *
7459 ++ * wb_writeback()
7460 ++ * __writeback_inodes_sb() <== called only once
7461 ++ * write_cache_pages() <== called once for each inode
7462 ++ * (quickly) tag currently dirty pages
7463 ++ * (maybe slowly) sync all tagged pages
7464 ++ */
7465 ++ if (wbc.sync_mode == WB_SYNC_NONE)
7466 ++ write_chunk = MAX_WRITEBACK_PAGES;
7467 ++ else
7468 ++ write_chunk = LONG_MAX;
7469 ++
7470 + wbc.wb_start = jiffies; /* livelock avoidance */
7471 + for (;;) {
7472 + /*
7473 +@@ -637,6 +669,16 @@ static long wb_writeback(struct bdi_writeback *wb,
7474 + break;
7475 +
7476 + /*
7477 ++ * Background writeout and kupdate-style writeback may
7478 ++ * run forever. Stop them if there is other work to do
7479 ++ * so that e.g. sync can proceed. They'll be restarted
7480 ++ * after the other works are all done.
7481 ++ */
7482 ++ if ((work->for_background || work->for_kupdate) &&
7483 ++ !list_empty(&wb->bdi->work_list))
7484 ++ break;
7485 ++
7486 ++ /*
7487 + * For background writeout, stop when we are below the
7488 + * background dirty threshold
7489 + */
7490 +@@ -644,7 +686,7 @@ static long wb_writeback(struct bdi_writeback *wb,
7491 + break;
7492 +
7493 + wbc.more_io = 0;
7494 +- wbc.nr_to_write = MAX_WRITEBACK_PAGES;
7495 ++ wbc.nr_to_write = write_chunk;
7496 + wbc.pages_skipped = 0;
7497 +
7498 + trace_wbc_writeback_start(&wbc, wb->bdi);
7499 +@@ -654,8 +696,8 @@ static long wb_writeback(struct bdi_writeback *wb,
7500 + writeback_inodes_wb(wb, &wbc);
7501 + trace_wbc_writeback_written(&wbc, wb->bdi);
7502 +
7503 +- work->nr_pages -= MAX_WRITEBACK_PAGES - wbc.nr_to_write;
7504 +- wrote += MAX_WRITEBACK_PAGES - wbc.nr_to_write;
7505 ++ work->nr_pages -= write_chunk - wbc.nr_to_write;
7506 ++ wrote += write_chunk - wbc.nr_to_write;
7507 +
7508 + /*
7509 + * If we consumed everything, see if we have more
7510 +@@ -670,7 +712,7 @@ static long wb_writeback(struct bdi_writeback *wb,
7511 + /*
7512 + * Did we write something? Try for more
7513 + */
7514 +- if (wbc.nr_to_write < MAX_WRITEBACK_PAGES)
7515 ++ if (wbc.nr_to_write < write_chunk)
7516 + continue;
7517 + /*
7518 + * Nothing written. Wait for some inode to
7519 +@@ -718,6 +760,23 @@ static unsigned long get_nr_dirty_pages(void)
7520 + get_nr_dirty_inodes();
7521 + }
7522 +
7523 ++static long wb_check_background_flush(struct bdi_writeback *wb)
7524 ++{
7525 ++ if (over_bground_thresh()) {
7526 ++
7527 ++ struct wb_writeback_work work = {
7528 ++ .nr_pages = LONG_MAX,
7529 ++ .sync_mode = WB_SYNC_NONE,
7530 ++ .for_background = 1,
7531 ++ .range_cyclic = 1,
7532 ++ };
7533 ++
7534 ++ return wb_writeback(wb, &work);
7535 ++ }
7536 ++
7537 ++ return 0;
7538 ++}
7539 ++
7540 + static long wb_check_old_data_flush(struct bdi_writeback *wb)
7541 + {
7542 + unsigned long expired;
7543 +@@ -787,6 +846,7 @@ long wb_do_writeback(struct bdi_writeback *wb, int force_wait)
7544 + * Check for periodic writeback, kupdated() style
7545 + */
7546 + wrote += wb_check_old_data_flush(wb);
7547 ++ wrote += wb_check_background_flush(wb);
7548 + clear_bit(BDI_writeback_running, &wb->bdi->state);
7549 +
7550 + return wrote;
7551 +@@ -873,7 +933,7 @@ void wakeup_flusher_threads(long nr_pages)
7552 + list_for_each_entry_rcu(bdi, &bdi_list, bdi_list) {
7553 + if (!bdi_has_dirty_io(bdi))
7554 + continue;
7555 +- __bdi_start_writeback(bdi, nr_pages, false, false);
7556 ++ __bdi_start_writeback(bdi, nr_pages, false);
7557 + }
7558 + rcu_read_unlock();
7559 + }
7560 +diff --git a/fs/nfs/delegation.c b/fs/nfs/delegation.c
7561 +index 1fd62fc..78df330 100644
7562 +--- a/fs/nfs/delegation.c
7563 ++++ b/fs/nfs/delegation.c
7564 +@@ -23,8 +23,6 @@
7565 +
7566 + static void nfs_do_free_delegation(struct nfs_delegation *delegation)
7567 + {
7568 +- if (delegation->cred)
7569 +- put_rpccred(delegation->cred);
7570 + kfree(delegation);
7571 + }
7572 +
7573 +@@ -37,6 +35,10 @@ static void nfs_free_delegation_callback(struct rcu_head *head)
7574 +
7575 + static void nfs_free_delegation(struct nfs_delegation *delegation)
7576 + {
7577 ++ if (delegation->cred) {
7578 ++ put_rpccred(delegation->cred);
7579 ++ delegation->cred = NULL;
7580 ++ }
7581 + call_rcu(&delegation->rcu, nfs_free_delegation_callback);
7582 + }
7583 +
7584 +diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
7585 +index 996dd89..bd80b38 100644
7586 +--- a/fs/nfs/dir.c
7587 ++++ b/fs/nfs/dir.c
7588 +@@ -33,7 +33,6 @@
7589 + #include <linux/namei.h>
7590 + #include <linux/mount.h>
7591 + #include <linux/sched.h>
7592 +-#include <linux/vmalloc.h>
7593 + #include <linux/kmemleak.h>
7594 +
7595 + #include "delegation.h"
7596 +@@ -459,25 +458,26 @@ out:
7597 + /* Perform conversion from xdr to cache array */
7598 + static
7599 + int nfs_readdir_page_filler(nfs_readdir_descriptor_t *desc, struct nfs_entry *entry,
7600 +- void *xdr_page, struct page *page, unsigned int buflen)
7601 ++ struct page **xdr_pages, struct page *page, unsigned int buflen)
7602 + {
7603 + struct xdr_stream stream;
7604 +- struct xdr_buf buf;
7605 +- __be32 *ptr = xdr_page;
7606 ++ struct xdr_buf buf = {
7607 ++ .pages = xdr_pages,
7608 ++ .page_len = buflen,
7609 ++ .buflen = buflen,
7610 ++ .len = buflen,
7611 ++ };
7612 ++ struct page *scratch;
7613 + struct nfs_cache_array *array;
7614 + unsigned int count = 0;
7615 + int status;
7616 +
7617 +- buf.head->iov_base = xdr_page;
7618 +- buf.head->iov_len = buflen;
7619 +- buf.tail->iov_len = 0;
7620 +- buf.page_base = 0;
7621 +- buf.page_len = 0;
7622 +- buf.buflen = buf.head->iov_len;
7623 +- buf.len = buf.head->iov_len;
7624 +-
7625 +- xdr_init_decode(&stream, &buf, ptr);
7626 ++ scratch = alloc_page(GFP_KERNEL);
7627 ++ if (scratch == NULL)
7628 ++ return -ENOMEM;
7629 +
7630 ++ xdr_init_decode(&stream, &buf, NULL);
7631 ++ xdr_set_scratch_buffer(&stream, page_address(scratch), PAGE_SIZE);
7632 +
7633 + do {
7634 + status = xdr_decode(desc, entry, &stream);
7635 +@@ -506,6 +506,8 @@ int nfs_readdir_page_filler(nfs_readdir_descriptor_t *desc, struct nfs_entry *en
7636 + } else
7637 + status = PTR_ERR(array);
7638 + }
7639 ++
7640 ++ put_page(scratch);
7641 + return status;
7642 + }
7643 +
7644 +@@ -521,7 +523,6 @@ static
7645 + void nfs_readdir_free_large_page(void *ptr, struct page **pages,
7646 + unsigned int npages)
7647 + {
7648 +- vm_unmap_ram(ptr, npages);
7649 + nfs_readdir_free_pagearray(pages, npages);
7650 + }
7651 +
7652 +@@ -530,9 +531,8 @@ void nfs_readdir_free_large_page(void *ptr, struct page **pages,
7653 + * to nfs_readdir_free_large_page
7654 + */
7655 + static
7656 +-void *nfs_readdir_large_page(struct page **pages, unsigned int npages)
7657 ++int nfs_readdir_large_page(struct page **pages, unsigned int npages)
7658 + {
7659 +- void *ptr;
7660 + unsigned int i;
7661 +
7662 + for (i = 0; i < npages; i++) {
7663 +@@ -541,13 +541,11 @@ void *nfs_readdir_large_page(struct page **pages, unsigned int npages)
7664 + goto out_freepages;
7665 + pages[i] = page;
7666 + }
7667 ++ return 0;
7668 +
7669 +- ptr = vm_map_ram(pages, npages, 0, PAGE_KERNEL);
7670 +- if (!IS_ERR_OR_NULL(ptr))
7671 +- return ptr;
7672 + out_freepages:
7673 + nfs_readdir_free_pagearray(pages, i);
7674 +- return NULL;
7675 ++ return -ENOMEM;
7676 + }
7677 +
7678 + static
7679 +@@ -577,8 +575,8 @@ int nfs_readdir_xdr_to_array(nfs_readdir_descriptor_t *desc, struct page *page,
7680 + memset(array, 0, sizeof(struct nfs_cache_array));
7681 + array->eof_index = -1;
7682 +
7683 +- pages_ptr = nfs_readdir_large_page(pages, array_size);
7684 +- if (!pages_ptr)
7685 ++ status = nfs_readdir_large_page(pages, array_size);
7686 ++ if (status < 0)
7687 + goto out_release_array;
7688 + do {
7689 + unsigned int pglen;
7690 +@@ -587,7 +585,7 @@ int nfs_readdir_xdr_to_array(nfs_readdir_descriptor_t *desc, struct page *page,
7691 + if (status < 0)
7692 + break;
7693 + pglen = status;
7694 +- status = nfs_readdir_page_filler(desc, &entry, pages_ptr, page, pglen);
7695 ++ status = nfs_readdir_page_filler(desc, &entry, pages, page, pglen);
7696 + if (status < 0) {
7697 + if (status == -ENOSPC)
7698 + status = 0;
7699 +@@ -1579,6 +1577,7 @@ static int nfs_create(struct inode *dir, struct dentry *dentry, int mode,
7700 + {
7701 + struct iattr attr;
7702 + int error;
7703 ++ int open_flags = 0;
7704 +
7705 + dfprintk(VFS, "NFS: create(%s/%ld), %s\n",
7706 + dir->i_sb->s_id, dir->i_ino, dentry->d_name.name);
7707 +@@ -1586,7 +1585,10 @@ static int nfs_create(struct inode *dir, struct dentry *dentry, int mode,
7708 + attr.ia_mode = mode;
7709 + attr.ia_valid = ATTR_MODE;
7710 +
7711 +- error = NFS_PROTO(dir)->create(dir, dentry, &attr, 0, NULL);
7712 ++ if ((nd->flags & LOOKUP_CREATE) != 0)
7713 ++ open_flags = nd->intent.open.flags;
7714 ++
7715 ++ error = NFS_PROTO(dir)->create(dir, dentry, &attr, open_flags, NULL);
7716 + if (error != 0)
7717 + goto out_err;
7718 + return 0;
7719 +diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c
7720 +index e6ace0d..9943a75 100644
7721 +--- a/fs/nfs/direct.c
7722 ++++ b/fs/nfs/direct.c
7723 +@@ -407,15 +407,18 @@ static ssize_t nfs_direct_read_schedule_iovec(struct nfs_direct_req *dreq,
7724 + pos += vec->iov_len;
7725 + }
7726 +
7727 ++ /*
7728 ++ * If no bytes were started, return the error, and let the
7729 ++ * generic layer handle the completion.
7730 ++ */
7731 ++ if (requested_bytes == 0) {
7732 ++ nfs_direct_req_release(dreq);
7733 ++ return result < 0 ? result : -EIO;
7734 ++ }
7735 ++
7736 + if (put_dreq(dreq))
7737 + nfs_direct_complete(dreq);
7738 +-
7739 +- if (requested_bytes != 0)
7740 +- return 0;
7741 +-
7742 +- if (result < 0)
7743 +- return result;
7744 +- return -EIO;
7745 ++ return 0;
7746 + }
7747 +
7748 + static ssize_t nfs_direct_read(struct kiocb *iocb, const struct iovec *iov,
7749 +@@ -841,15 +844,18 @@ static ssize_t nfs_direct_write_schedule_iovec(struct nfs_direct_req *dreq,
7750 + pos += vec->iov_len;
7751 + }
7752 +
7753 ++ /*
7754 ++ * If no bytes were started, return the error, and let the
7755 ++ * generic layer handle the completion.
7756 ++ */
7757 ++ if (requested_bytes == 0) {
7758 ++ nfs_direct_req_release(dreq);
7759 ++ return result < 0 ? result : -EIO;
7760 ++ }
7761 ++
7762 + if (put_dreq(dreq))
7763 + nfs_direct_write_complete(dreq, dreq->inode);
7764 +-
7765 +- if (requested_bytes != 0)
7766 +- return 0;
7767 +-
7768 +- if (result < 0)
7769 +- return result;
7770 +- return -EIO;
7771 ++ return 0;
7772 + }
7773 +
7774 + static ssize_t nfs_direct_write(struct kiocb *iocb, const struct iovec *iov,
7775 +diff --git a/fs/nfs/nfs2xdr.c b/fs/nfs/nfs2xdr.c
7776 +index 5914a19..b382a1b 100644
7777 +--- a/fs/nfs/nfs2xdr.c
7778 ++++ b/fs/nfs/nfs2xdr.c
7779 +@@ -487,12 +487,6 @@ nfs_decode_dirent(struct xdr_stream *xdr, struct nfs_entry *entry, struct nfs_se
7780 +
7781 + entry->d_type = DT_UNKNOWN;
7782 +
7783 +- p = xdr_inline_peek(xdr, 8);
7784 +- if (p != NULL)
7785 +- entry->eof = !p[0] && p[1];
7786 +- else
7787 +- entry->eof = 0;
7788 +-
7789 + return p;
7790 +
7791 + out_overflow:
7792 +diff --git a/fs/nfs/nfs3xdr.c b/fs/nfs/nfs3xdr.c
7793 +index f6cc60f..ba91236 100644
7794 +--- a/fs/nfs/nfs3xdr.c
7795 ++++ b/fs/nfs/nfs3xdr.c
7796 +@@ -647,12 +647,6 @@ nfs3_decode_dirent(struct xdr_stream *xdr, struct nfs_entry *entry, struct nfs_s
7797 + memset((u8*)(entry->fh), 0, sizeof(*entry->fh));
7798 + }
7799 +
7800 +- p = xdr_inline_peek(xdr, 8);
7801 +- if (p != NULL)
7802 +- entry->eof = !p[0] && p[1];
7803 +- else
7804 +- entry->eof = 0;
7805 +-
7806 + return p;
7807 +
7808 + out_overflow:
7809 +diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c
7810 +index 9f1826b..0662a98 100644
7811 +--- a/fs/nfs/nfs4xdr.c
7812 ++++ b/fs/nfs/nfs4xdr.c
7813 +@@ -6215,12 +6215,6 @@ __be32 *nfs4_decode_dirent(struct xdr_stream *xdr, struct nfs_entry *entry,
7814 + if (verify_attr_len(xdr, p, len) < 0)
7815 + goto out_overflow;
7816 +
7817 +- p = xdr_inline_peek(xdr, 8);
7818 +- if (p != NULL)
7819 +- entry->eof = !p[0] && p[1];
7820 +- else
7821 +- entry->eof = 0;
7822 +-
7823 + return p;
7824 +
7825 + out_overflow:
7826 +diff --git a/fs/nfsd/nfs4idmap.c b/fs/nfsd/nfs4idmap.c
7827 +index f0695e8..844960f 100644
7828 +--- a/fs/nfsd/nfs4idmap.c
7829 ++++ b/fs/nfsd/nfs4idmap.c
7830 +@@ -524,13 +524,13 @@ idmap_name_to_id(struct svc_rqst *rqstp, int type, const char *name, u32 namelen
7831 + int ret;
7832 +
7833 + if (namelen + 1 > sizeof(key.name))
7834 +- return -EINVAL;
7835 ++ return -ESRCH; /* nfserr_badowner */
7836 + memcpy(key.name, name, namelen);
7837 + key.name[namelen] = '\0';
7838 + strlcpy(key.authname, rqst_authname(rqstp), sizeof(key.authname));
7839 + ret = idmap_lookup(rqstp, nametoid_lookup, &key, &nametoid_cache, &item);
7840 + if (ret == -ENOENT)
7841 +- ret = -ESRCH; /* nfserr_badname */
7842 ++ ret = -ESRCH; /* nfserr_badowner */
7843 + if (ret)
7844 + return ret;
7845 + *id = item->id;
7846 +diff --git a/fs/nfsd/nfsd.h b/fs/nfsd/nfsd.h
7847 +index 6b641cf..7ecfa24 100644
7848 +--- a/fs/nfsd/nfsd.h
7849 ++++ b/fs/nfsd/nfsd.h
7850 +@@ -158,6 +158,7 @@ void nfsd_lockd_shutdown(void);
7851 + #define nfserr_attrnotsupp cpu_to_be32(NFSERR_ATTRNOTSUPP)
7852 + #define nfserr_bad_xdr cpu_to_be32(NFSERR_BAD_XDR)
7853 + #define nfserr_openmode cpu_to_be32(NFSERR_OPENMODE)
7854 ++#define nfserr_badowner cpu_to_be32(NFSERR_BADOWNER)
7855 + #define nfserr_locks_held cpu_to_be32(NFSERR_LOCKS_HELD)
7856 + #define nfserr_op_illegal cpu_to_be32(NFSERR_OP_ILLEGAL)
7857 + #define nfserr_grace cpu_to_be32(NFSERR_GRACE)
7858 +diff --git a/fs/nfsd/nfsproc.c b/fs/nfsd/nfsproc.c
7859 +index 08e1726..f0c2f23 100644
7860 +--- a/fs/nfsd/nfsproc.c
7861 ++++ b/fs/nfsd/nfsproc.c
7862 +@@ -737,7 +737,7 @@ nfserrno (int errno)
7863 + { nfserr_jukebox, -ERESTARTSYS },
7864 + { nfserr_dropit, -EAGAIN },
7865 + { nfserr_dropit, -ENOMEM },
7866 +- { nfserr_badname, -ESRCH },
7867 ++ { nfserr_badowner, -ESRCH },
7868 + { nfserr_io, -ETXTBSY },
7869 + { nfserr_notsupp, -EOPNOTSUPP },
7870 + { nfserr_toosmall, -ETOOSMALL },
7871 +diff --git a/fs/nilfs2/super.c b/fs/nilfs2/super.c
7872 +index f804d41..2940a58 100644
7873 +--- a/fs/nilfs2/super.c
7874 ++++ b/fs/nilfs2/super.c
7875 +@@ -688,7 +688,8 @@ skip_mount_setup:
7876 + sbp[0]->s_state =
7877 + cpu_to_le16(le16_to_cpu(sbp[0]->s_state) & ~NILFS_VALID_FS);
7878 + /* synchronize sbp[1] with sbp[0] */
7879 +- memcpy(sbp[1], sbp[0], nilfs->ns_sbsize);
7880 ++ if (sbp[1])
7881 ++ memcpy(sbp[1], sbp[0], nilfs->ns_sbsize);
7882 + return nilfs_commit_super(sbi, NILFS_SB_COMMIT_ALL);
7883 + }
7884 +
7885 +diff --git a/fs/partitions/check.c b/fs/partitions/check.c
7886 +index 0a8b0ad..0123717 100644
7887 +--- a/fs/partitions/check.c
7888 ++++ b/fs/partitions/check.c
7889 +@@ -372,6 +372,13 @@ static void delete_partition_rcu_cb(struct rcu_head *head)
7890 + put_device(part_to_dev(part));
7891 + }
7892 +
7893 ++void __delete_partition(struct kref *ref)
7894 ++{
7895 ++ struct hd_struct *part = container_of(ref, struct hd_struct, ref);
7896 ++
7897 ++ call_rcu(&part->rcu_head, delete_partition_rcu_cb);
7898 ++}
7899 ++
7900 + void delete_partition(struct gendisk *disk, int partno)
7901 + {
7902 + struct disk_part_tbl *ptbl = disk->part_tbl;
7903 +@@ -390,7 +397,7 @@ void delete_partition(struct gendisk *disk, int partno)
7904 + kobject_put(part->holder_dir);
7905 + device_del(part_to_dev(part));
7906 +
7907 +- call_rcu(&part->rcu_head, delete_partition_rcu_cb);
7908 ++ kref_put(&part->ref, __delete_partition);
7909 + }
7910 +
7911 + static ssize_t whole_disk_show(struct device *dev,
7912 +@@ -489,6 +496,7 @@ struct hd_struct *add_partition(struct gendisk *disk, int partno,
7913 + if (!dev_get_uevent_suppress(ddev))
7914 + kobject_uevent(&pdev->kobj, KOBJ_ADD);
7915 +
7916 ++ kref_init(&p->ref);
7917 + return p;
7918 +
7919 + out_free_info:
7920 +diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c
7921 +index 6f37c39..d245cb2 100644
7922 +--- a/fs/proc/kcore.c
7923 ++++ b/fs/proc/kcore.c
7924 +@@ -558,7 +558,7 @@ static int open_kcore(struct inode *inode, struct file *filp)
7925 + static const struct file_operations proc_kcore_operations = {
7926 + .read = read_kcore,
7927 + .open = open_kcore,
7928 +- .llseek = generic_file_llseek,
7929 ++ .llseek = default_llseek,
7930 + };
7931 +
7932 + #ifdef CONFIG_MEMORY_HOTPLUG
7933 +diff --git a/include/drm/drm_pciids.h b/include/drm/drm_pciids.h
7934 +index 883c1d4..40b1f0e 100644
7935 +--- a/include/drm/drm_pciids.h
7936 ++++ b/include/drm/drm_pciids.h
7937 +@@ -28,7 +28,6 @@
7938 + {0x1002, 0x4156, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350}, \
7939 + {0x1002, 0x4237, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS200|RADEON_IS_IGP}, \
7940 + {0x1002, 0x4242, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R200}, \
7941 +- {0x1002, 0x4243, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R200}, \
7942 + {0x1002, 0x4336, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS100|RADEON_IS_IGP|RADEON_IS_MOBILITY}, \
7943 + {0x1002, 0x4337, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS200|RADEON_IS_IGP|RADEON_IS_MOBILITY}, \
7944 + {0x1002, 0x4437, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS200|RADEON_IS_IGP|RADEON_IS_MOBILITY}, \
7945 +diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
7946 +index 36ab42c..7572b19 100644
7947 +--- a/include/linux/blkdev.h
7948 ++++ b/include/linux/blkdev.h
7949 +@@ -115,6 +115,7 @@ struct request {
7950 + void *elevator_private3;
7951 +
7952 + struct gendisk *rq_disk;
7953 ++ struct hd_struct *part;
7954 + unsigned long start_time;
7955 + #ifdef CONFIG_BLK_CGROUP
7956 + unsigned long long start_time_ns;
7957 +diff --git a/include/linux/completion.h b/include/linux/completion.h
7958 +index 36d57f7..51494e6 100644
7959 +--- a/include/linux/completion.h
7960 ++++ b/include/linux/completion.h
7961 +@@ -81,10 +81,10 @@ extern int wait_for_completion_interruptible(struct completion *x);
7962 + extern int wait_for_completion_killable(struct completion *x);
7963 + extern unsigned long wait_for_completion_timeout(struct completion *x,
7964 + unsigned long timeout);
7965 +-extern unsigned long wait_for_completion_interruptible_timeout(
7966 +- struct completion *x, unsigned long timeout);
7967 +-extern unsigned long wait_for_completion_killable_timeout(
7968 +- struct completion *x, unsigned long timeout);
7969 ++extern long wait_for_completion_interruptible_timeout(
7970 ++ struct completion *x, unsigned long timeout);
7971 ++extern long wait_for_completion_killable_timeout(
7972 ++ struct completion *x, unsigned long timeout);
7973 + extern bool try_wait_for_completion(struct completion *x);
7974 + extern bool completion_done(struct completion *x);
7975 +
7976 +diff --git a/include/linux/dynamic_debug.h b/include/linux/dynamic_debug.h
7977 +index a90b389..1c70028 100644
7978 +--- a/include/linux/dynamic_debug.h
7979 ++++ b/include/linux/dynamic_debug.h
7980 +@@ -44,34 +44,24 @@ int ddebug_add_module(struct _ddebug *tab, unsigned int n,
7981 + extern int ddebug_remove_module(const char *mod_name);
7982 +
7983 + #define dynamic_pr_debug(fmt, ...) do { \
7984 +- __label__ do_printk; \
7985 +- __label__ out; \
7986 + static struct _ddebug descriptor \
7987 + __used \
7988 + __attribute__((section("__verbose"), aligned(8))) = \
7989 + { KBUILD_MODNAME, __func__, __FILE__, fmt, __LINE__, \
7990 + _DPRINTK_FLAGS_DEFAULT }; \
7991 +- JUMP_LABEL(&descriptor.enabled, do_printk); \
7992 +- goto out; \
7993 +-do_printk: \
7994 +- printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__); \
7995 +-out: ; \
7996 ++ if (unlikely(descriptor.enabled)) \
7997 ++ printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__); \
7998 + } while (0)
7999 +
8000 +
8001 + #define dynamic_dev_dbg(dev, fmt, ...) do { \
8002 +- __label__ do_printk; \
8003 +- __label__ out; \
8004 + static struct _ddebug descriptor \
8005 + __used \
8006 + __attribute__((section("__verbose"), aligned(8))) = \
8007 + { KBUILD_MODNAME, __func__, __FILE__, fmt, __LINE__, \
8008 + _DPRINTK_FLAGS_DEFAULT }; \
8009 +- JUMP_LABEL(&descriptor.enabled, do_printk); \
8010 +- goto out; \
8011 +-do_printk: \
8012 +- dev_printk(KERN_DEBUG, dev, fmt, ##__VA_ARGS__); \
8013 +-out: ; \
8014 ++ if (unlikely(descriptor.enabled)) \
8015 ++ dev_printk(KERN_DEBUG, dev, fmt, ##__VA_ARGS__); \
8016 + } while (0)
8017 +
8018 + #else
8019 +diff --git a/include/linux/genhd.h b/include/linux/genhd.h
8020 +index 7a7b9c1..2ba2792 100644
8021 +--- a/include/linux/genhd.h
8022 ++++ b/include/linux/genhd.h
8023 +@@ -116,6 +116,7 @@ struct hd_struct {
8024 + struct disk_stats dkstats;
8025 + #endif
8026 + struct rcu_head rcu_head;
8027 ++ struct kref ref;
8028 + };
8029 +
8030 + #define GENHD_FL_REMOVABLE 1
8031 +@@ -583,6 +584,7 @@ extern struct hd_struct * __must_check add_partition(struct gendisk *disk,
8032 + sector_t len, int flags,
8033 + struct partition_meta_info
8034 + *info);
8035 ++extern void __delete_partition(struct kref *ref);
8036 + extern void delete_partition(struct gendisk *, int);
8037 + extern void printk_all_partitions(void);
8038 +
8039 +diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h
8040 +index ed5a03c..1c2138d 100644
8041 +--- a/include/linux/ieee80211.h
8042 ++++ b/include/linux/ieee80211.h
8043 +@@ -959,7 +959,7 @@ struct ieee80211_ht_info {
8044 + /* block-ack parameters */
8045 + #define IEEE80211_ADDBA_PARAM_POLICY_MASK 0x0002
8046 + #define IEEE80211_ADDBA_PARAM_TID_MASK 0x003C
8047 +-#define IEEE80211_ADDBA_PARAM_BUF_SIZE_MASK 0xFFA0
8048 ++#define IEEE80211_ADDBA_PARAM_BUF_SIZE_MASK 0xFFC0
8049 + #define IEEE80211_DELBA_PARAM_TID_MASK 0xF000
8050 + #define IEEE80211_DELBA_PARAM_INITIATOR_MASK 0x0800
8051 +
8052 +diff --git a/include/linux/klist.h b/include/linux/klist.h
8053 +index e91a4e5..a370ce5 100644
8054 +--- a/include/linux/klist.h
8055 ++++ b/include/linux/klist.h
8056 +@@ -22,7 +22,7 @@ struct klist {
8057 + struct list_head k_list;
8058 + void (*get)(struct klist_node *);
8059 + void (*put)(struct klist_node *);
8060 +-} __attribute__ ((aligned (4)));
8061 ++} __attribute__ ((aligned (sizeof(void *))));
8062 +
8063 + #define KLIST_INIT(_name, _get, _put) \
8064 + { .k_lock = __SPIN_LOCK_UNLOCKED(_name.k_lock), \
8065 +diff --git a/include/linux/kref.h b/include/linux/kref.h
8066 +index 6cc38fc..90b9e44 100644
8067 +--- a/include/linux/kref.h
8068 ++++ b/include/linux/kref.h
8069 +@@ -23,6 +23,7 @@ struct kref {
8070 +
8071 + void kref_init(struct kref *kref);
8072 + void kref_get(struct kref *kref);
8073 ++int kref_test_and_get(struct kref *kref);
8074 + int kref_put(struct kref *kref, void (*release) (struct kref *kref));
8075 +
8076 + #endif /* _KREF_H_ */
8077 +diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
8078 +index 71c09b2..9f19430 100644
8079 +--- a/include/linux/lockdep.h
8080 ++++ b/include/linux/lockdep.h
8081 +@@ -522,12 +522,15 @@ static inline void print_irqtrace_events(struct task_struct *curr)
8082 + #ifdef CONFIG_DEBUG_LOCK_ALLOC
8083 + # ifdef CONFIG_PROVE_LOCKING
8084 + # define lock_map_acquire(l) lock_acquire(l, 0, 0, 0, 2, NULL, _THIS_IP_)
8085 ++# define lock_map_acquire_read(l) lock_acquire(l, 0, 0, 2, 2, NULL, _THIS_IP_)
8086 + # else
8087 + # define lock_map_acquire(l) lock_acquire(l, 0, 0, 0, 1, NULL, _THIS_IP_)
8088 ++# define lock_map_acquire_read(l) lock_acquire(l, 0, 0, 2, 1, NULL, _THIS_IP_)
8089 + # endif
8090 + # define lock_map_release(l) lock_release(l, 1, _THIS_IP_)
8091 + #else
8092 + # define lock_map_acquire(l) do { } while (0)
8093 ++# define lock_map_acquire_read(l) do { } while (0)
8094 + # define lock_map_release(l) do { } while (0)
8095 + #endif
8096 +
8097 +diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h
8098 +index 31c237a..12b9eb5 100644
8099 +--- a/include/linux/memory_hotplug.h
8100 ++++ b/include/linux/memory_hotplug.h
8101 +@@ -161,6 +161,12 @@ extern void register_page_bootmem_info_node(struct pglist_data *pgdat);
8102 + extern void put_page_bootmem(struct page *page);
8103 + #endif
8104 +
8105 ++/*
8106 ++ * Lock for memory hotplug guarantees 1) all callbacks for memory hotplug
8107 ++ * notifier will be called under this. 2) offline/online/add/remove memory
8108 ++ * will not run simultaneously.
8109 ++ */
8110 ++
8111 + void lock_memory_hotplug(void);
8112 + void unlock_memory_hotplug(void);
8113 +
8114 +diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
8115 +index 39c24eb..4890662 100644
8116 +--- a/include/linux/mmzone.h
8117 ++++ b/include/linux/mmzone.h
8118 +@@ -458,12 +458,6 @@ static inline int zone_is_oom_locked(const struct zone *zone)
8119 + return test_bit(ZONE_OOM_LOCKED, &zone->flags);
8120 + }
8121 +
8122 +-#ifdef CONFIG_SMP
8123 +-unsigned long zone_nr_free_pages(struct zone *zone);
8124 +-#else
8125 +-#define zone_nr_free_pages(zone) zone_page_state(zone, NR_FREE_PAGES)
8126 +-#endif /* CONFIG_SMP */
8127 +-
8128 + /*
8129 + * The "priority" of VM scanning is how much of the queues we will scan in one
8130 + * go. A value of 12 for DEF_PRIORITY implies that we will scan 1/4096th of the
8131 +@@ -661,7 +655,9 @@ typedef struct pglist_data {
8132 + extern struct mutex zonelists_mutex;
8133 + void build_all_zonelists(void *data);
8134 + void wakeup_kswapd(struct zone *zone, int order);
8135 +-int zone_watermark_ok(struct zone *z, int order, unsigned long mark,
8136 ++bool zone_watermark_ok(struct zone *z, int order, unsigned long mark,
8137 ++ int classzone_idx, int alloc_flags);
8138 ++bool zone_watermark_ok_safe(struct zone *z, int order, unsigned long mark,
8139 + int classzone_idx, int alloc_flags);
8140 + enum memmap_context {
8141 + MEMMAP_EARLY,
8142 +diff --git a/include/linux/nl80211.h b/include/linux/nl80211.h
8143 +index 0edb256..fb877b5 100644
8144 +--- a/include/linux/nl80211.h
8145 ++++ b/include/linux/nl80211.h
8146 +@@ -1307,7 +1307,11 @@ enum nl80211_bitrate_attr {
8147 + * wireless core it thinks its knows the regulatory domain we should be in.
8148 + * @NL80211_REGDOM_SET_BY_COUNTRY_IE: the wireless core has received an
8149 + * 802.11 country information element with regulatory information it
8150 +- * thinks we should consider.
8151 ++ * thinks we should consider. cfg80211 only processes the country
8152 ++ * code from the IE, and relies on the regulatory domain information
8153 ++ * structure pased by userspace (CRDA) from our wireless-regdb.
8154 ++ * If a channel is enabled but the country code indicates it should
8155 ++ * be disabled we disable the channel and re-enable it upon disassociation.
8156 + */
8157 + enum nl80211_reg_initiator {
8158 + NL80211_REGDOM_SET_BY_CORE,
8159 +diff --git a/include/linux/oprofile.h b/include/linux/oprofile.h
8160 +index 32fb812..1ca6411 100644
8161 +--- a/include/linux/oprofile.h
8162 ++++ b/include/linux/oprofile.h
8163 +@@ -16,6 +16,8 @@
8164 + #include <linux/types.h>
8165 + #include <linux/spinlock.h>
8166 + #include <linux/init.h>
8167 ++#include <linux/errno.h>
8168 ++#include <linux/printk.h>
8169 + #include <asm/atomic.h>
8170 +
8171 + /* Each escaped entry is prefixed by ESCAPE_CODE
8172 +@@ -186,10 +188,17 @@ int oprofile_add_data(struct op_entry *entry, unsigned long val);
8173 + int oprofile_add_data64(struct op_entry *entry, u64 val);
8174 + int oprofile_write_commit(struct op_entry *entry);
8175 +
8176 +-#ifdef CONFIG_PERF_EVENTS
8177 ++#ifdef CONFIG_HW_PERF_EVENTS
8178 + int __init oprofile_perf_init(struct oprofile_operations *ops);
8179 + void oprofile_perf_exit(void);
8180 + char *op_name_from_perf_id(void);
8181 +-#endif /* CONFIG_PERF_EVENTS */
8182 ++#else
8183 ++static inline int __init oprofile_perf_init(struct oprofile_operations *ops)
8184 ++{
8185 ++ pr_info("oprofile: hardware counters not available\n");
8186 ++ return -ENODEV;
8187 ++}
8188 ++static inline void oprofile_perf_exit(void) { }
8189 ++#endif /* CONFIG_HW_PERF_EVENTS */
8190 +
8191 + #endif /* OPROFILE_H */
8192 +diff --git a/include/linux/radix-tree.h b/include/linux/radix-tree.h
8193 +index ab2baa5..23241c2 100644
8194 +--- a/include/linux/radix-tree.h
8195 ++++ b/include/linux/radix-tree.h
8196 +@@ -146,6 +146,22 @@ static inline void *radix_tree_deref_slot(void **pslot)
8197 + }
8198 +
8199 + /**
8200 ++ * radix_tree_deref_slot_protected - dereference a slot without RCU lock but with tree lock held
8201 ++ * @pslot: pointer to slot, returned by radix_tree_lookup_slot
8202 ++ * Returns: item that was stored in that slot with any direct pointer flag
8203 ++ * removed.
8204 ++ *
8205 ++ * Similar to radix_tree_deref_slot but only used during migration when a pages
8206 ++ * mapping is being moved. The caller does not hold the RCU read lock but it
8207 ++ * must hold the tree lock to prevent parallel updates.
8208 ++ */
8209 ++static inline void *radix_tree_deref_slot_protected(void **pslot,
8210 ++ spinlock_t *treelock)
8211 ++{
8212 ++ return rcu_dereference_protected(*pslot, lockdep_is_held(treelock));
8213 ++}
8214 ++
8215 ++/**
8216 + * radix_tree_deref_retry - check radix_tree_deref_slot
8217 + * @arg: pointer returned by radix_tree_deref_slot
8218 + * Returns: 0 if retry is not required, otherwise retry is required
8219 +diff --git a/include/linux/security.h b/include/linux/security.h
8220 +index fd4d55f..d47a4c2 100644
8221 +--- a/include/linux/security.h
8222 ++++ b/include/linux/security.h
8223 +@@ -796,8 +796,9 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts)
8224 + * @unix_stream_connect:
8225 + * Check permissions before establishing a Unix domain stream connection
8226 + * between @sock and @other.
8227 +- * @sock contains the socket structure.
8228 +- * @other contains the peer socket structure.
8229 ++ * @sock contains the sock structure.
8230 ++ * @other contains the peer sock structure.
8231 ++ * @newsk contains the new sock structure.
8232 + * Return 0 if permission is granted.
8233 + * @unix_may_send:
8234 + * Check permissions before connecting or sending datagrams from @sock to
8235 +@@ -1568,8 +1569,7 @@ struct security_operations {
8236 + int (*inode_getsecctx)(struct inode *inode, void **ctx, u32 *ctxlen);
8237 +
8238 + #ifdef CONFIG_SECURITY_NETWORK
8239 +- int (*unix_stream_connect) (struct socket *sock,
8240 +- struct socket *other, struct sock *newsk);
8241 ++ int (*unix_stream_connect) (struct sock *sock, struct sock *other, struct sock *newsk);
8242 + int (*unix_may_send) (struct socket *sock, struct socket *other);
8243 +
8244 + int (*socket_create) (int family, int type, int protocol, int kern);
8245 +@@ -2525,8 +2525,7 @@ static inline int security_inode_getsecctx(struct inode *inode, void **ctx, u32
8246 +
8247 + #ifdef CONFIG_SECURITY_NETWORK
8248 +
8249 +-int security_unix_stream_connect(struct socket *sock, struct socket *other,
8250 +- struct sock *newsk);
8251 ++int security_unix_stream_connect(struct sock *sock, struct sock *other, struct sock *newsk);
8252 + int security_unix_may_send(struct socket *sock, struct socket *other);
8253 + int security_socket_create(int family, int type, int protocol, int kern);
8254 + int security_socket_post_create(struct socket *sock, int family,
8255 +@@ -2567,8 +2566,8 @@ void security_tun_dev_post_create(struct sock *sk);
8256 + int security_tun_dev_attach(struct sock *sk);
8257 +
8258 + #else /* CONFIG_SECURITY_NETWORK */
8259 +-static inline int security_unix_stream_connect(struct socket *sock,
8260 +- struct socket *other,
8261 ++static inline int security_unix_stream_connect(struct sock *sock,
8262 ++ struct sock *other,
8263 + struct sock *newsk)
8264 + {
8265 + return 0;
8266 +diff --git a/include/linux/sunrpc/xdr.h b/include/linux/sunrpc/xdr.h
8267 +index 498ab93..7783c68 100644
8268 +--- a/include/linux/sunrpc/xdr.h
8269 ++++ b/include/linux/sunrpc/xdr.h
8270 +@@ -201,6 +201,8 @@ struct xdr_stream {
8271 +
8272 + __be32 *end; /* end of available buffer space */
8273 + struct kvec *iov; /* pointer to the current kvec */
8274 ++ struct kvec scratch; /* Scratch buffer */
8275 ++ struct page **page_ptr; /* pointer to the current page */
8276 + };
8277 +
8278 + extern void xdr_init_encode(struct xdr_stream *xdr, struct xdr_buf *buf, __be32 *p);
8279 +@@ -208,7 +210,7 @@ extern __be32 *xdr_reserve_space(struct xdr_stream *xdr, size_t nbytes);
8280 + extern void xdr_write_pages(struct xdr_stream *xdr, struct page **pages,
8281 + unsigned int base, unsigned int len);
8282 + extern void xdr_init_decode(struct xdr_stream *xdr, struct xdr_buf *buf, __be32 *p);
8283 +-extern __be32 *xdr_inline_peek(struct xdr_stream *xdr, size_t nbytes);
8284 ++extern void xdr_set_scratch_buffer(struct xdr_stream *xdr, void *buf, size_t buflen);
8285 + extern __be32 *xdr_inline_decode(struct xdr_stream *xdr, size_t nbytes);
8286 + extern void xdr_read_pages(struct xdr_stream *xdr, unsigned int len);
8287 + extern void xdr_enter_page(struct xdr_stream *xdr, unsigned int len);
8288 +diff --git a/include/linux/sysrq.h b/include/linux/sysrq.h
8289 +index 387fa7d..7faf933 100644
8290 +--- a/include/linux/sysrq.h
8291 ++++ b/include/linux/sysrq.h
8292 +@@ -17,6 +17,9 @@
8293 + #include <linux/errno.h>
8294 + #include <linux/types.h>
8295 +
8296 ++/* Enable/disable SYSRQ support by default (0==no, 1==yes). */
8297 ++#define SYSRQ_DEFAULT_ENABLE 1
8298 ++
8299 + /* Possible values of bitmask for enabling sysrq functions */
8300 + /* 0x0001 is reserved for enable everything */
8301 + #define SYSRQ_ENABLE_LOG 0x0002
8302 +diff --git a/include/linux/usb/hcd.h b/include/linux/usb/hcd.h
8303 +index 0b6e751..6c37d78 100644
8304 +--- a/include/linux/usb/hcd.h
8305 ++++ b/include/linux/usb/hcd.h
8306 +@@ -112,6 +112,7 @@ struct usb_hcd {
8307 + /* Flags that get set only during HCD registration or removal. */
8308 + unsigned rh_registered:1;/* is root hub registered? */
8309 + unsigned rh_pollable:1; /* may we poll the root hub? */
8310 ++ unsigned msix_enabled:1; /* driver has MSI-X enabled? */
8311 +
8312 + /* The next flag is a stopgap, to be removed when all the HCDs
8313 + * support the new root-hub polling mechanism. */
8314 +diff --git a/include/linux/usb/serial.h b/include/linux/usb/serial.h
8315 +index 16d682f..c904913 100644
8316 +--- a/include/linux/usb/serial.h
8317 ++++ b/include/linux/usb/serial.h
8318 +@@ -347,6 +347,9 @@ extern int usb_serial_generic_prepare_write_buffer(struct usb_serial_port *port,
8319 + extern int usb_serial_handle_sysrq_char(struct usb_serial_port *port,
8320 + unsigned int ch);
8321 + extern int usb_serial_handle_break(struct usb_serial_port *port);
8322 ++extern void usb_serial_handle_dcd_change(struct usb_serial_port *usb_port,
8323 ++ struct tty_struct *tty,
8324 ++ unsigned int status);
8325 +
8326 +
8327 + extern int usb_serial_bus_register(struct usb_serial_driver *device);
8328 +diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
8329 +index eaaea37..e4cc21c 100644
8330 +--- a/include/linux/vmstat.h
8331 ++++ b/include/linux/vmstat.h
8332 +@@ -254,6 +254,8 @@ extern void dec_zone_state(struct zone *, enum zone_stat_item);
8333 + extern void __dec_zone_state(struct zone *, enum zone_stat_item);
8334 +
8335 + void refresh_cpu_vm_stats(int);
8336 ++void reduce_pgdat_percpu_threshold(pg_data_t *pgdat);
8337 ++void restore_pgdat_percpu_threshold(pg_data_t *pgdat);
8338 + #else /* CONFIG_SMP */
8339 +
8340 + /*
8341 +@@ -298,6 +300,9 @@ static inline void __dec_zone_page_state(struct page *page,
8342 + #define dec_zone_page_state __dec_zone_page_state
8343 + #define mod_zone_page_state __mod_zone_page_state
8344 +
8345 ++static inline void reduce_pgdat_percpu_threshold(pg_data_t *pgdat) { }
8346 ++static inline void restore_pgdat_percpu_threshold(pg_data_t *pgdat) { }
8347 ++
8348 + static inline void refresh_cpu_vm_stats(int cpu) { }
8349 + #endif
8350 +
8351 +diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
8352 +index 97b8b7c..772dea2 100644
8353 +--- a/include/net/cfg80211.h
8354 ++++ b/include/net/cfg80211.h
8355 +@@ -1321,13 +1321,14 @@ struct cfg80211_ops {
8356 + * initiator is %REGDOM_SET_BY_CORE).
8357 + * @WIPHY_FLAG_STRICT_REGULATORY: tells us the driver for this device will
8358 + * ignore regulatory domain settings until it gets its own regulatory
8359 +- * domain via its regulatory_hint(). After its gets its own regulatory
8360 +- * domain it will only allow further regulatory domain settings to
8361 +- * further enhance compliance. For example if channel 13 and 14 are
8362 +- * disabled by this regulatory domain no user regulatory domain can
8363 +- * enable these channels at a later time. This can be used for devices
8364 +- * which do not have calibration information gauranteed for frequencies
8365 +- * or settings outside of its regulatory domain.
8366 ++ * domain via its regulatory_hint() unless the regulatory hint is
8367 ++ * from a country IE. After its gets its own regulatory domain it will
8368 ++ * only allow further regulatory domain settings to further enhance
8369 ++ * compliance. For example if channel 13 and 14 are disabled by this
8370 ++ * regulatory domain no user regulatory domain can enable these channels
8371 ++ * at a later time. This can be used for devices which do not have
8372 ++ * calibration information guaranteed for frequencies or settings
8373 ++ * outside of its regulatory domain.
8374 + * @WIPHY_FLAG_DISABLE_BEACON_HINTS: enable this if your driver needs to ensure
8375 + * that passive scan flags and beaconing flags may not be lifted by
8376 + * cfg80211 due to regulatory beacon hints. For more information on beacon
8377 +diff --git a/include/scsi/scsi.h b/include/scsi/scsi.h
8378 +index 216af85..1933e3c 100644
8379 +--- a/include/scsi/scsi.h
8380 ++++ b/include/scsi/scsi.h
8381 +@@ -9,6 +9,7 @@
8382 + #define _SCSI_SCSI_H
8383 +
8384 + #include <linux/types.h>
8385 ++#include <linux/scatterlist.h>
8386 +
8387 + struct scsi_cmnd;
8388 +
8389 +diff --git a/init/calibrate.c b/init/calibrate.c
8390 +index 6eb48e5..24fe022 100644
8391 +--- a/init/calibrate.c
8392 ++++ b/init/calibrate.c
8393 +@@ -66,7 +66,7 @@ static unsigned long __cpuinit calibrate_delay_direct(void)
8394 + pre_start = 0;
8395 + read_current_timer(&start);
8396 + start_jiffies = jiffies;
8397 +- while (jiffies <= (start_jiffies + 1)) {
8398 ++ while (time_before_eq(jiffies, start_jiffies + 1)) {
8399 + pre_start = start;
8400 + read_current_timer(&start);
8401 + }
8402 +@@ -74,8 +74,8 @@ static unsigned long __cpuinit calibrate_delay_direct(void)
8403 +
8404 + pre_end = 0;
8405 + end = post_start;
8406 +- while (jiffies <=
8407 +- (start_jiffies + 1 + DELAY_CALIBRATION_TICKS)) {
8408 ++ while (time_before_eq(jiffies, start_jiffies + 1 +
8409 ++ DELAY_CALIBRATION_TICKS)) {
8410 + pre_end = end;
8411 + read_current_timer(&end);
8412 + }
8413 +diff --git a/kernel/irq/migration.c b/kernel/irq/migration.c
8414 +index 1d25419..441fd62 100644
8415 +--- a/kernel/irq/migration.c
8416 ++++ b/kernel/irq/migration.c
8417 +@@ -56,6 +56,7 @@ void move_masked_irq(int irq)
8418 + void move_native_irq(int irq)
8419 + {
8420 + struct irq_desc *desc = irq_to_desc(irq);
8421 ++ bool masked;
8422 +
8423 + if (likely(!(desc->status & IRQ_MOVE_PENDING)))
8424 + return;
8425 +@@ -63,8 +64,15 @@ void move_native_irq(int irq)
8426 + if (unlikely(desc->status & IRQ_DISABLED))
8427 + return;
8428 +
8429 +- desc->irq_data.chip->irq_mask(&desc->irq_data);
8430 ++ /*
8431 ++ * Be careful vs. already masked interrupts. If this is a
8432 ++ * threaded interrupt with ONESHOT set, we can end up with an
8433 ++ * interrupt storm.
8434 ++ */
8435 ++ masked = desc->status & IRQ_MASKED;
8436 ++ if (!masked)
8437 ++ desc->irq_data.chip->irq_mask(&desc->irq_data);
8438 + move_masked_irq(irq);
8439 +- desc->irq_data.chip->irq_unmask(&desc->irq_data);
8440 ++ if (!masked)
8441 ++ desc->irq_data.chip->irq_unmask(&desc->irq_data);
8442 + }
8443 +-
8444 +diff --git a/kernel/perf_event.c b/kernel/perf_event.c
8445 +index 2870fee..64668bd 100644
8446 +--- a/kernel/perf_event.c
8447 ++++ b/kernel/perf_event.c
8448 +@@ -1872,8 +1872,7 @@ static int alloc_callchain_buffers(void)
8449 + * accessed from NMI. Use a temporary manual per cpu allocation
8450 + * until that gets sorted out.
8451 + */
8452 +- size = sizeof(*entries) + sizeof(struct perf_callchain_entry *) *
8453 +- num_possible_cpus();
8454 ++ size = offsetof(struct callchain_cpus_entries, cpu_entries[nr_cpu_ids]);
8455 +
8456 + entries = kzalloc(size, GFP_KERNEL);
8457 + if (!entries)
8458 +@@ -2101,14 +2100,11 @@ find_get_context(struct pmu *pmu, struct task_struct *task, int cpu)
8459 + unsigned long flags;
8460 + int ctxn, err;
8461 +
8462 +- if (!task && cpu != -1) {
8463 ++ if (!task) {
8464 + /* Must be root to operate on a CPU event: */
8465 + if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN))
8466 + return ERR_PTR(-EACCES);
8467 +
8468 +- if (cpu < 0 || cpu >= nr_cpumask_bits)
8469 +- return ERR_PTR(-EINVAL);
8470 +-
8471 + /*
8472 + * We could be clever and allow to attach a event to an
8473 + * offline CPU and activate it when the CPU comes up, but
8474 +@@ -5305,6 +5301,11 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
8475 + struct hw_perf_event *hwc;
8476 + long err;
8477 +
8478 ++ if ((unsigned)cpu >= nr_cpu_ids) {
8479 ++ if (!task || cpu != -1)
8480 ++ return ERR_PTR(-EINVAL);
8481 ++ }
8482 ++
8483 + event = kzalloc(sizeof(*event), GFP_KERNEL);
8484 + if (!event)
8485 + return ERR_PTR(-ENOMEM);
8486 +@@ -5353,7 +5354,7 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
8487 +
8488 + if (!overflow_handler && parent_event)
8489 + overflow_handler = parent_event->overflow_handler;
8490 +-
8491 ++
8492 + event->overflow_handler = overflow_handler;
8493 +
8494 + if (attr->disabled)
8495 +diff --git a/kernel/ptrace.c b/kernel/ptrace.c
8496 +index 99bbaa3..1708b1e 100644
8497 +--- a/kernel/ptrace.c
8498 ++++ b/kernel/ptrace.c
8499 +@@ -313,7 +313,7 @@ int ptrace_detach(struct task_struct *child, unsigned int data)
8500 + child->exit_code = data;
8501 + dead = __ptrace_detach(current, child);
8502 + if (!child->exit_state)
8503 +- wake_up_process(child);
8504 ++ wake_up_state(child, TASK_TRACED | TASK_STOPPED);
8505 + }
8506 + write_unlock_irq(&tasklist_lock);
8507 +
8508 +diff --git a/kernel/sched.c b/kernel/sched.c
8509 +index 297d1a0..2f912b7 100644
8510 +--- a/kernel/sched.c
8511 ++++ b/kernel/sched.c
8512 +@@ -607,6 +607,9 @@ static inline struct task_group *task_group(struct task_struct *p)
8513 + {
8514 + struct cgroup_subsys_state *css;
8515 +
8516 ++ if (p->flags & PF_EXITING)
8517 ++ return &root_task_group;
8518 ++
8519 + css = task_subsys_state_check(p, cpu_cgroup_subsys_id,
8520 + lockdep_is_held(&task_rq(p)->lock));
8521 + return container_of(css, struct task_group, css);
8522 +@@ -4526,7 +4529,7 @@ EXPORT_SYMBOL(wait_for_completion_interruptible);
8523 + * This waits for either a completion of a specific task to be signaled or for a
8524 + * specified timeout to expire. It is interruptible. The timeout is in jiffies.
8525 + */
8526 +-unsigned long __sched
8527 ++long __sched
8528 + wait_for_completion_interruptible_timeout(struct completion *x,
8529 + unsigned long timeout)
8530 + {
8531 +@@ -4559,7 +4562,7 @@ EXPORT_SYMBOL(wait_for_completion_killable);
8532 + * signaled or for a specified timeout to expire. It can be
8533 + * interrupted by a kill signal. The timeout is in jiffies.
8534 + */
8535 +-unsigned long __sched
8536 ++long __sched
8537 + wait_for_completion_killable_timeout(struct completion *x,
8538 + unsigned long timeout)
8539 + {
8540 +@@ -9178,6 +9181,20 @@ cpu_cgroup_attach(struct cgroup_subsys *ss, struct cgroup *cgrp,
8541 + }
8542 + }
8543 +
8544 ++static void
8545 ++cpu_cgroup_exit(struct cgroup_subsys *ss, struct task_struct *task)
8546 ++{
8547 ++ /*
8548 ++ * cgroup_exit() is called in the copy_process() failure path.
8549 ++ * Ignore this case since the task hasn't ran yet, this avoids
8550 ++ * trying to poke a half freed task state from generic code.
8551 ++ */
8552 ++ if (!(task->flags & PF_EXITING))
8553 ++ return;
8554 ++
8555 ++ sched_move_task(task);
8556 ++}
8557 ++
8558 + #ifdef CONFIG_FAIR_GROUP_SCHED
8559 + static int cpu_shares_write_u64(struct cgroup *cgrp, struct cftype *cftype,
8560 + u64 shareval)
8561 +@@ -9250,6 +9267,7 @@ struct cgroup_subsys cpu_cgroup_subsys = {
8562 + .destroy = cpu_cgroup_destroy,
8563 + .can_attach = cpu_cgroup_can_attach,
8564 + .attach = cpu_cgroup_attach,
8565 ++ .exit = cpu_cgroup_exit,
8566 + .populate = cpu_cgroup_populate,
8567 + .subsys_id = cpu_cgroup_subsys_id,
8568 + .early_init = 1,
8569 +diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c
8570 +index bea7d79..c5f1d23 100644
8571 +--- a/kernel/sched_rt.c
8572 ++++ b/kernel/sched_rt.c
8573 +@@ -606,7 +606,7 @@ static void update_curr_rt(struct rq *rq)
8574 + struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
8575 + u64 delta_exec;
8576 +
8577 +- if (!task_has_rt_policy(curr))
8578 ++ if (curr->sched_class != &rt_sched_class)
8579 + return;
8580 +
8581 + delta_exec = rq->clock_task - curr->se.exec_start;
8582 +diff --git a/kernel/smp.c b/kernel/smp.c
8583 +index 12ed8b0..8448f8f 100644
8584 +--- a/kernel/smp.c
8585 ++++ b/kernel/smp.c
8586 +@@ -194,6 +194,24 @@ void generic_smp_call_function_interrupt(void)
8587 + list_for_each_entry_rcu(data, &call_function.queue, csd.list) {
8588 + int refs;
8589 +
8590 ++ /*
8591 ++ * Since we walk the list without any locks, we might
8592 ++ * see an entry that was completed, removed from the
8593 ++ * list and is in the process of being reused.
8594 ++ *
8595 ++ * We must check that the cpu is in the cpumask before
8596 ++ * checking the refs, and both must be set before
8597 ++ * executing the callback on this cpu.
8598 ++ */
8599 ++
8600 ++ if (!cpumask_test_cpu(cpu, data->cpumask))
8601 ++ continue;
8602 ++
8603 ++ smp_rmb();
8604 ++
8605 ++ if (atomic_read(&data->refs) == 0)
8606 ++ continue;
8607 ++
8608 + if (!cpumask_test_and_clear_cpu(cpu, data->cpumask))
8609 + continue;
8610 +
8611 +@@ -202,6 +220,8 @@ void generic_smp_call_function_interrupt(void)
8612 + refs = atomic_dec_return(&data->refs);
8613 + WARN_ON(refs < 0);
8614 + if (!refs) {
8615 ++ WARN_ON(!cpumask_empty(data->cpumask));
8616 ++
8617 + raw_spin_lock(&call_function.lock);
8618 + list_del_rcu(&data->csd.list);
8619 + raw_spin_unlock(&call_function.lock);
8620 +@@ -453,11 +473,21 @@ void smp_call_function_many(const struct cpumask *mask,
8621 +
8622 + data = &__get_cpu_var(cfd_data);
8623 + csd_lock(&data->csd);
8624 ++ BUG_ON(atomic_read(&data->refs) || !cpumask_empty(data->cpumask));
8625 +
8626 + data->csd.func = func;
8627 + data->csd.info = info;
8628 + cpumask_and(data->cpumask, mask, cpu_online_mask);
8629 + cpumask_clear_cpu(this_cpu, data->cpumask);
8630 ++
8631 ++ /*
8632 ++ * To ensure the interrupt handler gets an complete view
8633 ++ * we order the cpumask and refs writes and order the read
8634 ++ * of them in the interrupt handler. In addition we may
8635 ++ * only clear our own cpu bit from the mask.
8636 ++ */
8637 ++ smp_wmb();
8638 ++
8639 + atomic_set(&data->refs, cpumask_weight(data->cpumask));
8640 +
8641 + raw_spin_lock_irqsave(&call_function.lock, flags);
8642 +diff --git a/kernel/sys.c b/kernel/sys.c
8643 +index 7f5a0cd..66136ca 100644
8644 +--- a/kernel/sys.c
8645 ++++ b/kernel/sys.c
8646 +@@ -1377,7 +1377,8 @@ static int check_prlimit_permission(struct task_struct *task)
8647 + const struct cred *cred = current_cred(), *tcred;
8648 +
8649 + tcred = __task_cred(task);
8650 +- if ((cred->uid != tcred->euid ||
8651 ++ if (current != task &&
8652 ++ (cred->uid != tcred->euid ||
8653 + cred->uid != tcred->suid ||
8654 + cred->uid != tcred->uid ||
8655 + cred->gid != tcred->egid ||
8656 +diff --git a/kernel/sysctl.c b/kernel/sysctl.c
8657 +index 5abfa15..4e17828 100644
8658 +--- a/kernel/sysctl.c
8659 ++++ b/kernel/sysctl.c
8660 +@@ -169,7 +169,8 @@ static int proc_taint(struct ctl_table *table, int write,
8661 + #endif
8662 +
8663 + #ifdef CONFIG_MAGIC_SYSRQ
8664 +-static int __sysrq_enabled; /* Note: sysrq code ises it's own private copy */
8665 ++/* Note: sysrq code uses it's own private copy */
8666 ++static int __sysrq_enabled = SYSRQ_DEFAULT_ENABLE;
8667 +
8668 + static int sysrq_sysctl_handler(ctl_table *table, int write,
8669 + void __user *buffer, size_t *lenp,
8670 +diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
8671 +index f8cf959..dc53ecb 100644
8672 +--- a/kernel/trace/trace.c
8673 ++++ b/kernel/trace/trace.c
8674 +@@ -1313,12 +1313,10 @@ ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc)
8675 +
8676 + __this_cpu_inc(user_stack_count);
8677 +
8678 +-
8679 +-
8680 + event = trace_buffer_lock_reserve(buffer, TRACE_USER_STACK,
8681 + sizeof(*entry), flags, pc);
8682 + if (!event)
8683 +- return;
8684 ++ goto out_drop_count;
8685 + entry = ring_buffer_event_data(event);
8686 +
8687 + entry->tgid = current->tgid;
8688 +@@ -1333,8 +1331,8 @@ ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc)
8689 + if (!filter_check_discard(call, entry, buffer, event))
8690 + ring_buffer_unlock_commit(buffer, event);
8691 +
8692 ++ out_drop_count:
8693 + __this_cpu_dec(user_stack_count);
8694 +-
8695 + out:
8696 + preempt_enable();
8697 + }
8698 +diff --git a/kernel/watchdog.c b/kernel/watchdog.c
8699 +index 5b08215..32a9ce5 100644
8700 +--- a/kernel/watchdog.c
8701 ++++ b/kernel/watchdog.c
8702 +@@ -27,7 +27,7 @@
8703 + #include <asm/irq_regs.h>
8704 + #include <linux/perf_event.h>
8705 +
8706 +-int watchdog_enabled;
8707 ++int watchdog_enabled = 1;
8708 + int __read_mostly softlockup_thresh = 60;
8709 +
8710 + static DEFINE_PER_CPU(unsigned long, watchdog_touch_ts);
8711 +@@ -43,9 +43,6 @@ static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts_saved);
8712 + static DEFINE_PER_CPU(struct perf_event *, watchdog_ev);
8713 + #endif
8714 +
8715 +-static int no_watchdog;
8716 +-
8717 +-
8718 + /* boot commands */
8719 + /*
8720 + * Should we panic when a soft-lockup or hard-lockup occurs:
8721 +@@ -75,7 +72,7 @@ __setup("softlockup_panic=", softlockup_panic_setup);
8722 +
8723 + static int __init nowatchdog_setup(char *str)
8724 + {
8725 +- no_watchdog = 1;
8726 ++ watchdog_enabled = 0;
8727 + return 1;
8728 + }
8729 + __setup("nowatchdog", nowatchdog_setup);
8730 +@@ -83,7 +80,7 @@ __setup("nowatchdog", nowatchdog_setup);
8731 + /* deprecated */
8732 + static int __init nosoftlockup_setup(char *str)
8733 + {
8734 +- no_watchdog = 1;
8735 ++ watchdog_enabled = 0;
8736 + return 1;
8737 + }
8738 + __setup("nosoftlockup", nosoftlockup_setup);
8739 +@@ -430,9 +427,6 @@ static int watchdog_enable(int cpu)
8740 + wake_up_process(p);
8741 + }
8742 +
8743 +- /* if any cpu succeeds, watchdog is considered enabled for the system */
8744 +- watchdog_enabled = 1;
8745 +-
8746 + return 0;
8747 + }
8748 +
8749 +@@ -460,12 +454,16 @@ static void watchdog_disable(int cpu)
8750 + static void watchdog_enable_all_cpus(void)
8751 + {
8752 + int cpu;
8753 +- int result = 0;
8754 ++
8755 ++ watchdog_enabled = 0;
8756 +
8757 + for_each_online_cpu(cpu)
8758 +- result += watchdog_enable(cpu);
8759 ++ if (!watchdog_enable(cpu))
8760 ++ /* if any cpu succeeds, watchdog is considered
8761 ++ enabled for the system */
8762 ++ watchdog_enabled = 1;
8763 +
8764 +- if (result)
8765 ++ if (!watchdog_enabled)
8766 + printk(KERN_ERR "watchdog: failed to be enabled on some cpus\n");
8767 +
8768 + }
8769 +@@ -474,9 +472,6 @@ static void watchdog_disable_all_cpus(void)
8770 + {
8771 + int cpu;
8772 +
8773 +- if (no_watchdog)
8774 +- return;
8775 +-
8776 + for_each_online_cpu(cpu)
8777 + watchdog_disable(cpu);
8778 +
8779 +@@ -496,10 +491,12 @@ int proc_dowatchdog_enabled(struct ctl_table *table, int write,
8780 + {
8781 + proc_dointvec(table, write, buffer, length, ppos);
8782 +
8783 +- if (watchdog_enabled)
8784 +- watchdog_enable_all_cpus();
8785 +- else
8786 +- watchdog_disable_all_cpus();
8787 ++ if (write) {
8788 ++ if (watchdog_enabled)
8789 ++ watchdog_enable_all_cpus();
8790 ++ else
8791 ++ watchdog_disable_all_cpus();
8792 ++ }
8793 + return 0;
8794 + }
8795 +
8796 +@@ -528,7 +525,8 @@ cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
8797 + break;
8798 + case CPU_ONLINE:
8799 + case CPU_ONLINE_FROZEN:
8800 +- err = watchdog_enable(hotcpu);
8801 ++ if (watchdog_enabled)
8802 ++ err = watchdog_enable(hotcpu);
8803 + break;
8804 + #ifdef CONFIG_HOTPLUG_CPU
8805 + case CPU_UP_CANCELED:
8806 +@@ -553,9 +551,6 @@ static int __init spawn_watchdog_task(void)
8807 + void *cpu = (void *)(long)smp_processor_id();
8808 + int err;
8809 +
8810 +- if (no_watchdog)
8811 +- return 0;
8812 +-
8813 + err = cpu_callback(&cpu_nfb, CPU_UP_PREPARE, cpu);
8814 + WARN_ON(notifier_to_errno(err));
8815 +
8816 +diff --git a/kernel/workqueue.c b/kernel/workqueue.c
8817 +index e785b0f..4be7fa5 100644
8818 +--- a/kernel/workqueue.c
8819 ++++ b/kernel/workqueue.c
8820 +@@ -1806,7 +1806,7 @@ __acquires(&gcwq->lock)
8821 + spin_unlock_irq(&gcwq->lock);
8822 +
8823 + work_clear_pending(work);
8824 +- lock_map_acquire(&cwq->wq->lockdep_map);
8825 ++ lock_map_acquire_read(&cwq->wq->lockdep_map);
8826 + lock_map_acquire(&lockdep_map);
8827 + trace_workqueue_execute_start(work);
8828 + f(work);
8829 +@@ -2350,8 +2350,18 @@ static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr,
8830 + insert_wq_barrier(cwq, barr, work, worker);
8831 + spin_unlock_irq(&gcwq->lock);
8832 +
8833 +- lock_map_acquire(&cwq->wq->lockdep_map);
8834 ++ /*
8835 ++ * If @max_active is 1 or rescuer is in use, flushing another work
8836 ++ * item on the same workqueue may lead to deadlock. Make sure the
8837 ++ * flusher is not running on the same workqueue by verifying write
8838 ++ * access.
8839 ++ */
8840 ++ if (cwq->wq->saved_max_active == 1 || cwq->wq->flags & WQ_RESCUER)
8841 ++ lock_map_acquire(&cwq->wq->lockdep_map);
8842 ++ else
8843 ++ lock_map_acquire_read(&cwq->wq->lockdep_map);
8844 + lock_map_release(&cwq->wq->lockdep_map);
8845 ++
8846 + return true;
8847 + already_gone:
8848 + spin_unlock_irq(&gcwq->lock);
8849 +diff --git a/lib/dynamic_debug.c b/lib/dynamic_debug.c
8850 +index 3094318..b335acb 100644
8851 +--- a/lib/dynamic_debug.c
8852 ++++ b/lib/dynamic_debug.c
8853 +@@ -141,11 +141,10 @@ static void ddebug_change(const struct ddebug_query *query,
8854 + else if (!dp->flags)
8855 + dt->num_enabled++;
8856 + dp->flags = newflags;
8857 +- if (newflags) {
8858 +- jump_label_enable(&dp->enabled);
8859 +- } else {
8860 +- jump_label_disable(&dp->enabled);
8861 +- }
8862 ++ if (newflags)
8863 ++ dp->enabled = 1;
8864 ++ else
8865 ++ dp->enabled = 0;
8866 + if (verbose)
8867 + printk(KERN_INFO
8868 + "ddebug: changed %s:%d [%s]%s %s\n",
8869 +diff --git a/lib/kref.c b/lib/kref.c
8870 +index d3d227a..e7a6e10 100644
8871 +--- a/lib/kref.c
8872 ++++ b/lib/kref.c
8873 +@@ -37,6 +37,18 @@ void kref_get(struct kref *kref)
8874 + }
8875 +
8876 + /**
8877 ++ * kref_test_and_get - increment refcount for object only if refcount is not
8878 ++ * zero.
8879 ++ * @kref: object.
8880 ++ *
8881 ++ * Return non-zero if the refcount was incremented, 0 otherwise
8882 ++ */
8883 ++int kref_test_and_get(struct kref *kref)
8884 ++{
8885 ++ return atomic_inc_not_zero(&kref->refcount);
8886 ++}
8887 ++
8888 ++/**
8889 + * kref_put - decrement refcount for object.
8890 + * @kref: object.
8891 + * @release: pointer to the function that will clean up the object when the
8892 +diff --git a/mm/memcontrol.c b/mm/memcontrol.c
8893 +index 00bb8a6..d7a1e3d 100644
8894 +--- a/mm/memcontrol.c
8895 ++++ b/mm/memcontrol.c
8896 +@@ -1842,6 +1842,7 @@ static int __mem_cgroup_do_charge(struct mem_cgroup *mem, gfp_t gfp_mask,
8897 + if (likely(!ret))
8898 + return CHARGE_OK;
8899 +
8900 ++ res_counter_uncharge(&mem->res, csize);
8901 + mem_over_limit = mem_cgroup_from_res_counter(fail_res, memsw);
8902 + flags |= MEM_CGROUP_RECLAIM_NOSWAP;
8903 + } else
8904 +@@ -4929,9 +4930,9 @@ struct cgroup_subsys mem_cgroup_subsys = {
8905 + static int __init enable_swap_account(char *s)
8906 + {
8907 + /* consider enabled if no parameter or 1 is given */
8908 +- if (!s || !strcmp(s, "1"))
8909 ++ if (!(*s) || !strcmp(s, "=1"))
8910 + really_do_swap_account = 1;
8911 +- else if (!strcmp(s, "0"))
8912 ++ else if (!strcmp(s, "=0"))
8913 + really_do_swap_account = 0;
8914 + return 1;
8915 + }
8916 +@@ -4939,7 +4940,7 @@ __setup("swapaccount", enable_swap_account);
8917 +
8918 + static int __init disable_swap_account(char *s)
8919 + {
8920 +- enable_swap_account("0");
8921 ++ enable_swap_account("=0");
8922 + return 1;
8923 + }
8924 + __setup("noswapaccount", disable_swap_account);
8925 +diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
8926 +index 2c6523a..83163c0 100644
8927 +--- a/mm/memory_hotplug.c
8928 ++++ b/mm/memory_hotplug.c
8929 +@@ -407,6 +407,7 @@ int online_pages(unsigned long pfn, unsigned long nr_pages)
8930 + int ret;
8931 + struct memory_notify arg;
8932 +
8933 ++ lock_memory_hotplug();
8934 + arg.start_pfn = pfn;
8935 + arg.nr_pages = nr_pages;
8936 + arg.status_change_nid = -1;
8937 +@@ -419,6 +420,7 @@ int online_pages(unsigned long pfn, unsigned long nr_pages)
8938 + ret = notifier_to_errno(ret);
8939 + if (ret) {
8940 + memory_notify(MEM_CANCEL_ONLINE, &arg);
8941 ++ unlock_memory_hotplug();
8942 + return ret;
8943 + }
8944 + /*
8945 +@@ -443,6 +445,7 @@ int online_pages(unsigned long pfn, unsigned long nr_pages)
8946 + printk(KERN_DEBUG "online_pages %lx at %lx failed\n",
8947 + nr_pages, pfn);
8948 + memory_notify(MEM_CANCEL_ONLINE, &arg);
8949 ++ unlock_memory_hotplug();
8950 + return ret;
8951 + }
8952 +
8953 +@@ -467,6 +470,7 @@ int online_pages(unsigned long pfn, unsigned long nr_pages)
8954 +
8955 + if (onlined_pages)
8956 + memory_notify(MEM_ONLINE, &arg);
8957 ++ unlock_memory_hotplug();
8958 +
8959 + return 0;
8960 + }
8961 +diff --git a/mm/migrate.c b/mm/migrate.c
8962 +index 6ae8a66..87e7e3a 100644
8963 +--- a/mm/migrate.c
8964 ++++ b/mm/migrate.c
8965 +@@ -246,7 +246,7 @@ static int migrate_page_move_mapping(struct address_space *mapping,
8966 +
8967 + expected_count = 2 + page_has_private(page);
8968 + if (page_count(page) != expected_count ||
8969 +- (struct page *)radix_tree_deref_slot(pslot) != page) {
8970 ++ radix_tree_deref_slot_protected(pslot, &mapping->tree_lock) != page) {
8971 + spin_unlock_irq(&mapping->tree_lock);
8972 + return -EAGAIN;
8973 + }
8974 +@@ -318,7 +318,7 @@ int migrate_huge_page_move_mapping(struct address_space *mapping,
8975 +
8976 + expected_count = 2 + page_has_private(page);
8977 + if (page_count(page) != expected_count ||
8978 +- (struct page *)radix_tree_deref_slot(pslot) != page) {
8979 ++ radix_tree_deref_slot_protected(pslot, &mapping->tree_lock) != page) {
8980 + spin_unlock_irq(&mapping->tree_lock);
8981 + return -EAGAIN;
8982 + }
8983 +@@ -620,7 +620,6 @@ static int unmap_and_move(new_page_t get_new_page, unsigned long private,
8984 + int *result = NULL;
8985 + struct page *newpage = get_new_page(page, private, &result);
8986 + int remap_swapcache = 1;
8987 +- int rcu_locked = 0;
8988 + int charge = 0;
8989 + struct mem_cgroup *mem = NULL;
8990 + struct anon_vma *anon_vma = NULL;
8991 +@@ -672,20 +671,26 @@ static int unmap_and_move(new_page_t get_new_page, unsigned long private,
8992 + /*
8993 + * By try_to_unmap(), page->mapcount goes down to 0 here. In this case,
8994 + * we cannot notice that anon_vma is freed while we migrates a page.
8995 +- * This rcu_read_lock() delays freeing anon_vma pointer until the end
8996 ++ * This get_anon_vma() delays freeing anon_vma pointer until the end
8997 + * of migration. File cache pages are no problem because of page_lock()
8998 + * File Caches may use write_page() or lock_page() in migration, then,
8999 + * just care Anon page here.
9000 + */
9001 + if (PageAnon(page)) {
9002 +- rcu_read_lock();
9003 +- rcu_locked = 1;
9004 +-
9005 +- /* Determine how to safely use anon_vma */
9006 +- if (!page_mapped(page)) {
9007 +- if (!PageSwapCache(page))
9008 +- goto rcu_unlock;
9009 +-
9010 ++ /*
9011 ++ * Only page_lock_anon_vma() understands the subtleties of
9012 ++ * getting a hold on an anon_vma from outside one of its mms.
9013 ++ */
9014 ++ anon_vma = page_lock_anon_vma(page);
9015 ++ if (anon_vma) {
9016 ++ /*
9017 ++ * Take a reference count on the anon_vma if the
9018 ++ * page is mapped so that it is guaranteed to
9019 ++ * exist when the page is remapped later
9020 ++ */
9021 ++ get_anon_vma(anon_vma);
9022 ++ page_unlock_anon_vma(anon_vma);
9023 ++ } else if (PageSwapCache(page)) {
9024 + /*
9025 + * We cannot be sure that the anon_vma of an unmapped
9026 + * swapcache page is safe to use because we don't
9027 +@@ -700,13 +705,7 @@ static int unmap_and_move(new_page_t get_new_page, unsigned long private,
9028 + */
9029 + remap_swapcache = 0;
9030 + } else {
9031 +- /*
9032 +- * Take a reference count on the anon_vma if the
9033 +- * page is mapped so that it is guaranteed to
9034 +- * exist when the page is remapped later
9035 +- */
9036 +- anon_vma = page_anon_vma(page);
9037 +- get_anon_vma(anon_vma);
9038 ++ goto uncharge;
9039 + }
9040 + }
9041 +
9042 +@@ -723,16 +722,10 @@ static int unmap_and_move(new_page_t get_new_page, unsigned long private,
9043 + * free the metadata, so the page can be freed.
9044 + */
9045 + if (!page->mapping) {
9046 +- if (!PageAnon(page) && page_has_private(page)) {
9047 +- /*
9048 +- * Go direct to try_to_free_buffers() here because
9049 +- * a) that's what try_to_release_page() would do anyway
9050 +- * b) we may be under rcu_read_lock() here, so we can't
9051 +- * use GFP_KERNEL which is what try_to_release_page()
9052 +- * needs to be effective.
9053 +- */
9054 ++ VM_BUG_ON(PageAnon(page));
9055 ++ if (page_has_private(page)) {
9056 + try_to_free_buffers(page);
9057 +- goto rcu_unlock;
9058 ++ goto uncharge;
9059 + }
9060 + goto skip_unmap;
9061 + }
9062 +@@ -746,14 +739,11 @@ skip_unmap:
9063 +
9064 + if (rc && remap_swapcache)
9065 + remove_migration_ptes(page, page);
9066 +-rcu_unlock:
9067 +
9068 + /* Drop an anon_vma reference if we took one */
9069 + if (anon_vma)
9070 + drop_anon_vma(anon_vma);
9071 +
9072 +- if (rcu_locked)
9073 +- rcu_read_unlock();
9074 + uncharge:
9075 + if (!charge)
9076 + mem_cgroup_end_migration(mem, page, newpage);
9077 +@@ -815,7 +805,6 @@ static int unmap_and_move_huge_page(new_page_t get_new_page,
9078 + int rc = 0;
9079 + int *result = NULL;
9080 + struct page *new_hpage = get_new_page(hpage, private, &result);
9081 +- int rcu_locked = 0;
9082 + struct anon_vma *anon_vma = NULL;
9083 +
9084 + if (!new_hpage)
9085 +@@ -830,12 +819,10 @@ static int unmap_and_move_huge_page(new_page_t get_new_page,
9086 + }
9087 +
9088 + if (PageAnon(hpage)) {
9089 +- rcu_read_lock();
9090 +- rcu_locked = 1;
9091 +-
9092 +- if (page_mapped(hpage)) {
9093 +- anon_vma = page_anon_vma(hpage);
9094 +- atomic_inc(&anon_vma->external_refcount);
9095 ++ anon_vma = page_lock_anon_vma(hpage);
9096 ++ if (anon_vma) {
9097 ++ get_anon_vma(anon_vma);
9098 ++ page_unlock_anon_vma(anon_vma);
9099 + }
9100 + }
9101 +
9102 +@@ -847,16 +834,8 @@ static int unmap_and_move_huge_page(new_page_t get_new_page,
9103 + if (rc)
9104 + remove_migration_ptes(hpage, hpage);
9105 +
9106 +- if (anon_vma && atomic_dec_and_lock(&anon_vma->external_refcount,
9107 +- &anon_vma->lock)) {
9108 +- int empty = list_empty(&anon_vma->head);
9109 +- spin_unlock(&anon_vma->lock);
9110 +- if (empty)
9111 +- anon_vma_free(anon_vma);
9112 +- }
9113 +-
9114 +- if (rcu_locked)
9115 +- rcu_read_unlock();
9116 ++ if (anon_vma)
9117 ++ drop_anon_vma(anon_vma);
9118 + out:
9119 + unlock_page(hpage);
9120 +
9121 +diff --git a/mm/mmzone.c b/mm/mmzone.c
9122 +index e35bfb8..f5b7d17 100644
9123 +--- a/mm/mmzone.c
9124 ++++ b/mm/mmzone.c
9125 +@@ -87,24 +87,3 @@ int memmap_valid_within(unsigned long pfn,
9126 + return 1;
9127 + }
9128 + #endif /* CONFIG_ARCH_HAS_HOLES_MEMORYMODEL */
9129 +-
9130 +-#ifdef CONFIG_SMP
9131 +-/* Called when a more accurate view of NR_FREE_PAGES is needed */
9132 +-unsigned long zone_nr_free_pages(struct zone *zone)
9133 +-{
9134 +- unsigned long nr_free_pages = zone_page_state(zone, NR_FREE_PAGES);
9135 +-
9136 +- /*
9137 +- * While kswapd is awake, it is considered the zone is under some
9138 +- * memory pressure. Under pressure, there is a risk that
9139 +- * per-cpu-counter-drift will allow the min watermark to be breached
9140 +- * potentially causing a live-lock. While kswapd is awake and
9141 +- * free pages are low, get a better estimate for free pages
9142 +- */
9143 +- if (nr_free_pages < zone->percpu_drift_mark &&
9144 +- !waitqueue_active(&zone->zone_pgdat->kswapd_wait))
9145 +- return zone_page_state_snapshot(zone, NR_FREE_PAGES);
9146 +-
9147 +- return nr_free_pages;
9148 +-}
9149 +-#endif /* CONFIG_SMP */
9150 +diff --git a/mm/page_alloc.c b/mm/page_alloc.c
9151 +index ff7e158..91b6d8c 100644
9152 +--- a/mm/page_alloc.c
9153 ++++ b/mm/page_alloc.c
9154 +@@ -1460,24 +1460,24 @@ static inline int should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
9155 + #endif /* CONFIG_FAIL_PAGE_ALLOC */
9156 +
9157 + /*
9158 +- * Return 1 if free pages are above 'mark'. This takes into account the order
9159 ++ * Return true if free pages are above 'mark'. This takes into account the order
9160 + * of the allocation.
9161 + */
9162 +-int zone_watermark_ok(struct zone *z, int order, unsigned long mark,
9163 +- int classzone_idx, int alloc_flags)
9164 ++static bool __zone_watermark_ok(struct zone *z, int order, unsigned long mark,
9165 ++ int classzone_idx, int alloc_flags, long free_pages)
9166 + {
9167 + /* free_pages my go negative - that's OK */
9168 + long min = mark;
9169 +- long free_pages = zone_nr_free_pages(z) - (1 << order) + 1;
9170 + int o;
9171 +
9172 ++ free_pages -= (1 << order) + 1;
9173 + if (alloc_flags & ALLOC_HIGH)
9174 + min -= min / 2;
9175 + if (alloc_flags & ALLOC_HARDER)
9176 + min -= min / 4;
9177 +
9178 + if (free_pages <= min + z->lowmem_reserve[classzone_idx])
9179 +- return 0;
9180 ++ return false;
9181 + for (o = 0; o < order; o++) {
9182 + /* At the next order, this order's pages become unavailable */
9183 + free_pages -= z->free_area[o].nr_free << o;
9184 +@@ -1486,9 +1486,28 @@ int zone_watermark_ok(struct zone *z, int order, unsigned long mark,
9185 + min >>= 1;
9186 +
9187 + if (free_pages <= min)
9188 +- return 0;
9189 ++ return false;
9190 + }
9191 +- return 1;
9192 ++ return true;
9193 ++}
9194 ++
9195 ++bool zone_watermark_ok(struct zone *z, int order, unsigned long mark,
9196 ++ int classzone_idx, int alloc_flags)
9197 ++{
9198 ++ return __zone_watermark_ok(z, order, mark, classzone_idx, alloc_flags,
9199 ++ zone_page_state(z, NR_FREE_PAGES));
9200 ++}
9201 ++
9202 ++bool zone_watermark_ok_safe(struct zone *z, int order, unsigned long mark,
9203 ++ int classzone_idx, int alloc_flags)
9204 ++{
9205 ++ long free_pages = zone_page_state(z, NR_FREE_PAGES);
9206 ++
9207 ++ if (z->percpu_drift_mark && free_pages < z->percpu_drift_mark)
9208 ++ free_pages = zone_page_state_snapshot(z, NR_FREE_PAGES);
9209 ++
9210 ++ return __zone_watermark_ok(z, order, mark, classzone_idx, alloc_flags,
9211 ++ free_pages);
9212 + }
9213 +
9214 + #ifdef CONFIG_NUMA
9215 +@@ -2442,7 +2461,7 @@ void show_free_areas(void)
9216 + " all_unreclaimable? %s"
9217 + "\n",
9218 + zone->name,
9219 +- K(zone_nr_free_pages(zone)),
9220 ++ K(zone_page_state(zone, NR_FREE_PAGES)),
9221 + K(min_wmark_pages(zone)),
9222 + K(low_wmark_pages(zone)),
9223 + K(high_wmark_pages(zone)),
9224 +diff --git a/mm/slub.c b/mm/slub.c
9225 +index bec0e35..96e6907 100644
9226 +--- a/mm/slub.c
9227 ++++ b/mm/slub.c
9228 +@@ -3821,7 +3821,7 @@ static ssize_t show_slab_objects(struct kmem_cache *s,
9229 + }
9230 + }
9231 +
9232 +- down_read(&slub_lock);
9233 ++ lock_memory_hotplug();
9234 + #ifdef CONFIG_SLUB_DEBUG
9235 + if (flags & SO_ALL) {
9236 + for_each_node_state(node, N_NORMAL_MEMORY) {
9237 +@@ -3862,7 +3862,7 @@ static ssize_t show_slab_objects(struct kmem_cache *s,
9238 + x += sprintf(buf + x, " N%d=%lu",
9239 + node, nodes[node]);
9240 + #endif
9241 +- up_read(&slub_lock);
9242 ++ unlock_memory_hotplug();
9243 + kfree(nodes);
9244 + return x + sprintf(buf + x, "\n");
9245 + }
9246 +diff --git a/mm/vmscan.c b/mm/vmscan.c
9247 +index 9ca587c..5da4295 100644
9248 +--- a/mm/vmscan.c
9249 ++++ b/mm/vmscan.c
9250 +@@ -2143,7 +2143,7 @@ static int sleeping_prematurely(pg_data_t *pgdat, int order, long remaining)
9251 + if (zone->all_unreclaimable)
9252 + continue;
9253 +
9254 +- if (!zone_watermark_ok(zone, order, high_wmark_pages(zone),
9255 ++ if (!zone_watermark_ok_safe(zone, order, high_wmark_pages(zone),
9256 + 0, 0))
9257 + return 1;
9258 + }
9259 +@@ -2230,7 +2230,7 @@ loop_again:
9260 + shrink_active_list(SWAP_CLUSTER_MAX, zone,
9261 + &sc, priority, 0);
9262 +
9263 +- if (!zone_watermark_ok(zone, order,
9264 ++ if (!zone_watermark_ok_safe(zone, order,
9265 + high_wmark_pages(zone), 0, 0)) {
9266 + end_zone = i;
9267 + break;
9268 +@@ -2276,7 +2276,7 @@ loop_again:
9269 + * We put equal pressure on every zone, unless one
9270 + * zone has way too many pages free already.
9271 + */
9272 +- if (!zone_watermark_ok(zone, order,
9273 ++ if (!zone_watermark_ok_safe(zone, order,
9274 + 8*high_wmark_pages(zone), end_zone, 0))
9275 + shrink_zone(priority, zone, &sc);
9276 + reclaim_state->reclaimed_slab = 0;
9277 +@@ -2297,7 +2297,7 @@ loop_again:
9278 + total_scanned > sc.nr_reclaimed + sc.nr_reclaimed / 2)
9279 + sc.may_writepage = 1;
9280 +
9281 +- if (!zone_watermark_ok(zone, order,
9282 ++ if (!zone_watermark_ok_safe(zone, order,
9283 + high_wmark_pages(zone), end_zone, 0)) {
9284 + all_zones_ok = 0;
9285 + /*
9286 +@@ -2305,7 +2305,7 @@ loop_again:
9287 + * means that we have a GFP_ATOMIC allocation
9288 + * failure risk. Hurry up!
9289 + */
9290 +- if (!zone_watermark_ok(zone, order,
9291 ++ if (!zone_watermark_ok_safe(zone, order,
9292 + min_wmark_pages(zone), end_zone, 0))
9293 + has_under_min_watermark_zone = 1;
9294 + } else {
9295 +@@ -2448,7 +2448,9 @@ static int kswapd(void *p)
9296 + */
9297 + if (!sleeping_prematurely(pgdat, order, remaining)) {
9298 + trace_mm_vmscan_kswapd_sleep(pgdat->node_id);
9299 ++ restore_pgdat_percpu_threshold(pgdat);
9300 + schedule();
9301 ++ reduce_pgdat_percpu_threshold(pgdat);
9302 + } else {
9303 + if (remaining)
9304 + count_vm_event(KSWAPD_LOW_WMARK_HIT_QUICKLY);
9305 +@@ -2487,16 +2489,17 @@ void wakeup_kswapd(struct zone *zone, int order)
9306 + if (!populated_zone(zone))
9307 + return;
9308 +
9309 +- pgdat = zone->zone_pgdat;
9310 +- if (zone_watermark_ok(zone, order, low_wmark_pages(zone), 0, 0))
9311 ++ if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
9312 + return;
9313 ++ pgdat = zone->zone_pgdat;
9314 + if (pgdat->kswapd_max_order < order)
9315 + pgdat->kswapd_max_order = order;
9316 +- trace_mm_vmscan_wakeup_kswapd(pgdat->node_id, zone_idx(zone), order);
9317 +- if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
9318 +- return;
9319 + if (!waitqueue_active(&pgdat->kswapd_wait))
9320 + return;
9321 ++ if (zone_watermark_ok_safe(zone, order, low_wmark_pages(zone), 0, 0))
9322 ++ return;
9323 ++
9324 ++ trace_mm_vmscan_wakeup_kswapd(pgdat->node_id, zone_idx(zone), order);
9325 + wake_up_interruptible(&pgdat->kswapd_wait);
9326 + }
9327 +
9328 +diff --git a/mm/vmstat.c b/mm/vmstat.c
9329 +index 8f62f17..8aff417 100644
9330 +--- a/mm/vmstat.c
9331 ++++ b/mm/vmstat.c
9332 +@@ -83,6 +83,30 @@ EXPORT_SYMBOL(vm_stat);
9333 +
9334 + #ifdef CONFIG_SMP
9335 +
9336 ++static int calculate_pressure_threshold(struct zone *zone)
9337 ++{
9338 ++ int threshold;
9339 ++ int watermark_distance;
9340 ++
9341 ++ /*
9342 ++ * As vmstats are not up to date, there is drift between the estimated
9343 ++ * and real values. For high thresholds and a high number of CPUs, it
9344 ++ * is possible for the min watermark to be breached while the estimated
9345 ++ * value looks fine. The pressure threshold is a reduced value such
9346 ++ * that even the maximum amount of drift will not accidentally breach
9347 ++ * the min watermark
9348 ++ */
9349 ++ watermark_distance = low_wmark_pages(zone) - min_wmark_pages(zone);
9350 ++ threshold = max(1, (int)(watermark_distance / num_online_cpus()));
9351 ++
9352 ++ /*
9353 ++ * Maximum threshold is 125
9354 ++ */
9355 ++ threshold = min(125, threshold);
9356 ++
9357 ++ return threshold;
9358 ++}
9359 ++
9360 + static int calculate_threshold(struct zone *zone)
9361 + {
9362 + int threshold;
9363 +@@ -161,6 +185,48 @@ static void refresh_zone_stat_thresholds(void)
9364 + }
9365 + }
9366 +
9367 ++void reduce_pgdat_percpu_threshold(pg_data_t *pgdat)
9368 ++{
9369 ++ struct zone *zone;
9370 ++ int cpu;
9371 ++ int threshold;
9372 ++ int i;
9373 ++
9374 ++ get_online_cpus();
9375 ++ for (i = 0; i < pgdat->nr_zones; i++) {
9376 ++ zone = &pgdat->node_zones[i];
9377 ++ if (!zone->percpu_drift_mark)
9378 ++ continue;
9379 ++
9380 ++ threshold = calculate_pressure_threshold(zone);
9381 ++ for_each_online_cpu(cpu)
9382 ++ per_cpu_ptr(zone->pageset, cpu)->stat_threshold
9383 ++ = threshold;
9384 ++ }
9385 ++ put_online_cpus();
9386 ++}
9387 ++
9388 ++void restore_pgdat_percpu_threshold(pg_data_t *pgdat)
9389 ++{
9390 ++ struct zone *zone;
9391 ++ int cpu;
9392 ++ int threshold;
9393 ++ int i;
9394 ++
9395 ++ get_online_cpus();
9396 ++ for (i = 0; i < pgdat->nr_zones; i++) {
9397 ++ zone = &pgdat->node_zones[i];
9398 ++ if (!zone->percpu_drift_mark)
9399 ++ continue;
9400 ++
9401 ++ threshold = calculate_threshold(zone);
9402 ++ for_each_online_cpu(cpu)
9403 ++ per_cpu_ptr(zone->pageset, cpu)->stat_threshold
9404 ++ = threshold;
9405 ++ }
9406 ++ put_online_cpus();
9407 ++}
9408 ++
9409 + /*
9410 + * For use when we know that interrupts are disabled.
9411 + */
9412 +@@ -834,7 +900,7 @@ static void zoneinfo_show_print(struct seq_file *m, pg_data_t *pgdat,
9413 + "\n scanned %lu"
9414 + "\n spanned %lu"
9415 + "\n present %lu",
9416 +- zone_nr_free_pages(zone),
9417 ++ zone_page_state(zone, NR_FREE_PAGES),
9418 + min_wmark_pages(zone),
9419 + low_wmark_pages(zone),
9420 + high_wmark_pages(zone),
9421 +diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c
9422 +index bb86d29..6da5dae 100644
9423 +--- a/net/ax25/af_ax25.c
9424 ++++ b/net/ax25/af_ax25.c
9425 +@@ -1392,7 +1392,7 @@ static int ax25_getname(struct socket *sock, struct sockaddr *uaddr,
9426 + ax25_cb *ax25;
9427 + int err = 0;
9428 +
9429 +- memset(fsa, 0, sizeof(fsa));
9430 ++ memset(fsa, 0, sizeof(*fsa));
9431 + lock_sock(sk);
9432 + ax25 = ax25_sk(sk);
9433 +
9434 +diff --git a/net/core/ethtool.c b/net/core/ethtool.c
9435 +index 956a9f4..1c6c3ac 100644
9436 +--- a/net/core/ethtool.c
9437 ++++ b/net/core/ethtool.c
9438 +@@ -817,7 +817,7 @@ static int ethtool_get_regs(struct net_device *dev, char __user *useraddr)
9439 + if (regs.len > reglen)
9440 + regs.len = reglen;
9441 +
9442 +- regbuf = vmalloc(reglen);
9443 ++ regbuf = vzalloc(reglen);
9444 + if (!regbuf)
9445 + return -ENOMEM;
9446 +
9447 +diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
9448 +index 841c287..b078d90 100644
9449 +--- a/net/core/rtnetlink.c
9450 ++++ b/net/core/rtnetlink.c
9451 +@@ -1510,6 +1510,9 @@ replay:
9452 + snprintf(ifname, IFNAMSIZ, "%s%%d", ops->kind);
9453 +
9454 + dest_net = rtnl_link_get_net(net, tb);
9455 ++ if (IS_ERR(dest_net))
9456 ++ return PTR_ERR(dest_net);
9457 ++
9458 + dev = rtnl_create_link(net, dest_net, ifname, ops, tb);
9459 +
9460 + if (IS_ERR(dev))
9461 +diff --git a/net/core/sock.c b/net/core/sock.c
9462 +index e5af8d5..7fd3541 100644
9463 +--- a/net/core/sock.c
9464 ++++ b/net/core/sock.c
9465 +@@ -1907,7 +1907,7 @@ static void sock_def_readable(struct sock *sk, int len)
9466 + rcu_read_lock();
9467 + wq = rcu_dereference(sk->sk_wq);
9468 + if (wq_has_sleeper(wq))
9469 +- wake_up_interruptible_sync_poll(&wq->wait, POLLIN |
9470 ++ wake_up_interruptible_sync_poll(&wq->wait, POLLIN | POLLPRI |
9471 + POLLRDNORM | POLLRDBAND);
9472 + sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN);
9473 + rcu_read_unlock();
9474 +diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
9475 +index 1684408..fb9b94a 100644
9476 +--- a/net/ipv4/ip_fragment.c
9477 ++++ b/net/ipv4/ip_fragment.c
9478 +@@ -45,6 +45,7 @@
9479 + #include <linux/udp.h>
9480 + #include <linux/inet.h>
9481 + #include <linux/netfilter_ipv4.h>
9482 ++#include <net/inet_ecn.h>
9483 +
9484 + /* NOTE. Logic of IP defragmentation is parallel to corresponding IPv6
9485 + * code now. If you change something here, _PLEASE_ update ipv6/reassembly.c
9486 +@@ -70,11 +71,28 @@ struct ipq {
9487 + __be32 daddr;
9488 + __be16 id;
9489 + u8 protocol;
9490 ++ u8 ecn; /* RFC3168 support */
9491 + int iif;
9492 + unsigned int rid;
9493 + struct inet_peer *peer;
9494 + };
9495 +
9496 ++#define IPFRAG_ECN_CLEAR 0x01 /* one frag had INET_ECN_NOT_ECT */
9497 ++#define IPFRAG_ECN_SET_CE 0x04 /* one frag had INET_ECN_CE */
9498 ++
9499 ++static inline u8 ip4_frag_ecn(u8 tos)
9500 ++{
9501 ++ tos = (tos & INET_ECN_MASK) + 1;
9502 ++ /*
9503 ++ * After the last operation we have (in binary):
9504 ++ * INET_ECN_NOT_ECT => 001
9505 ++ * INET_ECN_ECT_1 => 010
9506 ++ * INET_ECN_ECT_0 => 011
9507 ++ * INET_ECN_CE => 100
9508 ++ */
9509 ++ return (tos & 2) ? 0 : tos;
9510 ++}
9511 ++
9512 + static struct inet_frags ip4_frags;
9513 +
9514 + int ip_frag_nqueues(struct net *net)
9515 +@@ -137,6 +155,7 @@ static void ip4_frag_init(struct inet_frag_queue *q, void *a)
9516 +
9517 + qp->protocol = arg->iph->protocol;
9518 + qp->id = arg->iph->id;
9519 ++ qp->ecn = ip4_frag_ecn(arg->iph->tos);
9520 + qp->saddr = arg->iph->saddr;
9521 + qp->daddr = arg->iph->daddr;
9522 + qp->user = arg->user;
9523 +@@ -316,6 +335,7 @@ static int ip_frag_reinit(struct ipq *qp)
9524 + qp->q.fragments = NULL;
9525 + qp->q.fragments_tail = NULL;
9526 + qp->iif = 0;
9527 ++ qp->ecn = 0;
9528 +
9529 + return 0;
9530 + }
9531 +@@ -328,6 +348,7 @@ static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb)
9532 + int flags, offset;
9533 + int ihl, end;
9534 + int err = -ENOENT;
9535 ++ u8 ecn;
9536 +
9537 + if (qp->q.last_in & INET_FRAG_COMPLETE)
9538 + goto err;
9539 +@@ -339,6 +360,7 @@ static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb)
9540 + goto err;
9541 + }
9542 +
9543 ++ ecn = ip4_frag_ecn(ip_hdr(skb)->tos);
9544 + offset = ntohs(ip_hdr(skb)->frag_off);
9545 + flags = offset & ~IP_OFFSET;
9546 + offset &= IP_OFFSET;
9547 +@@ -472,6 +494,7 @@ found:
9548 + }
9549 + qp->q.stamp = skb->tstamp;
9550 + qp->q.meat += skb->len;
9551 ++ qp->ecn |= ecn;
9552 + atomic_add(skb->truesize, &qp->q.net->mem);
9553 + if (offset == 0)
9554 + qp->q.last_in |= INET_FRAG_FIRST_IN;
9555 +@@ -583,6 +606,17 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev,
9556 + iph = ip_hdr(head);
9557 + iph->frag_off = 0;
9558 + iph->tot_len = htons(len);
9559 ++ /* RFC3168 5.3 Fragmentation support
9560 ++ * If one fragment had INET_ECN_NOT_ECT,
9561 ++ * reassembled frame also has INET_ECN_NOT_ECT
9562 ++ * Elif one fragment had INET_ECN_CE
9563 ++ * reassembled frame also has INET_ECN_CE
9564 ++ */
9565 ++ if (qp->ecn & IPFRAG_ECN_CLEAR)
9566 ++ iph->tos &= ~INET_ECN_MASK;
9567 ++ else if (qp->ecn & IPFRAG_ECN_SET_CE)
9568 ++ iph->tos |= INET_ECN_CE;
9569 ++
9570 + IP_INC_STATS_BH(net, IPSTATS_MIB_REASMOKS);
9571 + qp->q.fragments = NULL;
9572 + qp->q.fragments_tail = NULL;
9573 +diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
9574 +index 94b5bf1..5f8d242 100644
9575 +--- a/net/ipv6/ip6_output.c
9576 ++++ b/net/ipv6/ip6_output.c
9577 +@@ -401,6 +401,9 @@ int ip6_forward(struct sk_buff *skb)
9578 + goto drop;
9579 + }
9580 +
9581 ++ if (skb->pkt_type != PACKET_HOST)
9582 ++ goto drop;
9583 ++
9584 + skb_forward_csum(skb);
9585 +
9586 + /*
9587 +diff --git a/net/mac80211/agg-rx.c b/net/mac80211/agg-rx.c
9588 +index 720b7a8..c080a5d 100644
9589 +--- a/net/mac80211/agg-rx.c
9590 ++++ b/net/mac80211/agg-rx.c
9591 +@@ -187,8 +187,6 @@ void ieee80211_process_addba_request(struct ieee80211_local *local,
9592 + struct ieee80211_mgmt *mgmt,
9593 + size_t len)
9594 + {
9595 +- struct ieee80211_hw *hw = &local->hw;
9596 +- struct ieee80211_conf *conf = &hw->conf;
9597 + struct tid_ampdu_rx *tid_agg_rx;
9598 + u16 capab, tid, timeout, ba_policy, buf_size, start_seq_num, status;
9599 + u8 dialog_token;
9600 +@@ -233,13 +231,8 @@ void ieee80211_process_addba_request(struct ieee80211_local *local,
9601 + goto end_no_lock;
9602 + }
9603 + /* determine default buffer size */
9604 +- if (buf_size == 0) {
9605 +- struct ieee80211_supported_band *sband;
9606 +-
9607 +- sband = local->hw.wiphy->bands[conf->channel->band];
9608 +- buf_size = IEEE80211_MIN_AMPDU_BUF;
9609 +- buf_size = buf_size << sband->ht_cap.ampdu_factor;
9610 +- }
9611 ++ if (buf_size == 0)
9612 ++ buf_size = IEEE80211_MAX_AMPDU_BUF;
9613 +
9614 +
9615 + /* examine state machine */
9616 +diff --git a/net/mac80211/key.c b/net/mac80211/key.c
9617 +index ccd676b..aa1b734 100644
9618 +--- a/net/mac80211/key.c
9619 ++++ b/net/mac80211/key.c
9620 +@@ -366,6 +366,12 @@ static void __ieee80211_key_destroy(struct ieee80211_key *key)
9621 + if (!key)
9622 + return;
9623 +
9624 ++ /*
9625 ++ * Synchronize so the TX path can no longer be using
9626 ++ * this key before we free/remove it.
9627 ++ */
9628 ++ synchronize_rcu();
9629 ++
9630 + if (key->local)
9631 + ieee80211_key_disable_hw_accel(key);
9632 +
9633 +diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
9634 +index b01e467..e98668f 100644
9635 +--- a/net/mac80211/rx.c
9636 ++++ b/net/mac80211/rx.c
9637 +@@ -1788,11 +1788,11 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx)
9638 +
9639 + fwd_skb = skb_copy(skb, GFP_ATOMIC);
9640 +
9641 +- if (!fwd_skb && net_ratelimit()) {
9642 ++ if (!fwd_skb && net_ratelimit())
9643 + printk(KERN_DEBUG "%s: failed to clone mesh frame\n",
9644 + sdata->name);
9645 ++ if (!fwd_skb)
9646 + goto out;
9647 +- }
9648 +
9649 + fwd_hdr = (struct ieee80211_hdr *) fwd_skb->data;
9650 + memcpy(fwd_hdr->addr2, sdata->vif.addr, ETH_ALEN);
9651 +diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
9652 +index 7a637b8..2f09db5 100644
9653 +--- a/net/mac80211/tx.c
9654 ++++ b/net/mac80211/tx.c
9655 +@@ -1726,7 +1726,7 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
9656 + {
9657 + struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
9658 + struct ieee80211_local *local = sdata->local;
9659 +- struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
9660 ++ struct ieee80211_tx_info *info;
9661 + int ret = NETDEV_TX_BUSY, head_need;
9662 + u16 ethertype, hdrlen, meshhdrlen = 0;
9663 + __le16 fc;
9664 +@@ -2017,6 +2017,7 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
9665 + skb_set_network_header(skb, nh_pos);
9666 + skb_set_transport_header(skb, h_pos);
9667 +
9668 ++ info = IEEE80211_SKB_CB(skb);
9669 + memset(info, 0, sizeof(*info));
9670 +
9671 + dev->trans_start = jiffies;
9672 +@@ -2207,6 +2208,9 @@ struct sk_buff *ieee80211_beacon_get_tim(struct ieee80211_hw *hw,
9673 +
9674 + sdata = vif_to_sdata(vif);
9675 +
9676 ++ if (!ieee80211_sdata_running(sdata))
9677 ++ goto out;
9678 ++
9679 + if (tim_offset)
9680 + *tim_offset = 0;
9681 + if (tim_length)
9682 +diff --git a/net/sched/sch_fifo.c b/net/sched/sch_fifo.c
9683 +index 4dfecb0..aa4d633 100644
9684 +--- a/net/sched/sch_fifo.c
9685 ++++ b/net/sched/sch_fifo.c
9686 +@@ -54,8 +54,6 @@ static int pfifo_tail_enqueue(struct sk_buff *skb, struct Qdisc* sch)
9687 +
9688 + /* queue full, remove one skb to fulfill the limit */
9689 + skb_head = qdisc_dequeue_head(sch);
9690 +- sch->bstats.bytes -= qdisc_pkt_len(skb_head);
9691 +- sch->bstats.packets--;
9692 + sch->qstats.drops++;
9693 + kfree_skb(skb_head);
9694 +
9695 +diff --git a/net/sunrpc/xdr.c b/net/sunrpc/xdr.c
9696 +index cd9e841..679cd67 100644
9697 +--- a/net/sunrpc/xdr.c
9698 ++++ b/net/sunrpc/xdr.c
9699 +@@ -552,6 +552,74 @@ void xdr_write_pages(struct xdr_stream *xdr, struct page **pages, unsigned int b
9700 + }
9701 + EXPORT_SYMBOL_GPL(xdr_write_pages);
9702 +
9703 ++static void xdr_set_iov(struct xdr_stream *xdr, struct kvec *iov,
9704 ++ __be32 *p, unsigned int len)
9705 ++{
9706 ++ if (len > iov->iov_len)
9707 ++ len = iov->iov_len;
9708 ++ if (p == NULL)
9709 ++ p = (__be32*)iov->iov_base;
9710 ++ xdr->p = p;
9711 ++ xdr->end = (__be32*)(iov->iov_base + len);
9712 ++ xdr->iov = iov;
9713 ++ xdr->page_ptr = NULL;
9714 ++}
9715 ++
9716 ++static int xdr_set_page_base(struct xdr_stream *xdr,
9717 ++ unsigned int base, unsigned int len)
9718 ++{
9719 ++ unsigned int pgnr;
9720 ++ unsigned int maxlen;
9721 ++ unsigned int pgoff;
9722 ++ unsigned int pgend;
9723 ++ void *kaddr;
9724 ++
9725 ++ maxlen = xdr->buf->page_len;
9726 ++ if (base >= maxlen)
9727 ++ return -EINVAL;
9728 ++ maxlen -= base;
9729 ++ if (len > maxlen)
9730 ++ len = maxlen;
9731 ++
9732 ++ base += xdr->buf->page_base;
9733 ++
9734 ++ pgnr = base >> PAGE_SHIFT;
9735 ++ xdr->page_ptr = &xdr->buf->pages[pgnr];
9736 ++ kaddr = page_address(*xdr->page_ptr);
9737 ++
9738 ++ pgoff = base & ~PAGE_MASK;
9739 ++ xdr->p = (__be32*)(kaddr + pgoff);
9740 ++
9741 ++ pgend = pgoff + len;
9742 ++ if (pgend > PAGE_SIZE)
9743 ++ pgend = PAGE_SIZE;
9744 ++ xdr->end = (__be32*)(kaddr + pgend);
9745 ++ xdr->iov = NULL;
9746 ++ return 0;
9747 ++}
9748 ++
9749 ++static void xdr_set_next_page(struct xdr_stream *xdr)
9750 ++{
9751 ++ unsigned int newbase;
9752 ++
9753 ++ newbase = (1 + xdr->page_ptr - xdr->buf->pages) << PAGE_SHIFT;
9754 ++ newbase -= xdr->buf->page_base;
9755 ++
9756 ++ if (xdr_set_page_base(xdr, newbase, PAGE_SIZE) < 0)
9757 ++ xdr_set_iov(xdr, xdr->buf->tail, NULL, xdr->buf->len);
9758 ++}
9759 ++
9760 ++static bool xdr_set_next_buffer(struct xdr_stream *xdr)
9761 ++{
9762 ++ if (xdr->page_ptr != NULL)
9763 ++ xdr_set_next_page(xdr);
9764 ++ else if (xdr->iov == xdr->buf->head) {
9765 ++ if (xdr_set_page_base(xdr, 0, PAGE_SIZE) < 0)
9766 ++ xdr_set_iov(xdr, xdr->buf->tail, NULL, xdr->buf->len);
9767 ++ }
9768 ++ return xdr->p != xdr->end;
9769 ++}
9770 ++
9771 + /**
9772 + * xdr_init_decode - Initialize an xdr_stream for decoding data.
9773 + * @xdr: pointer to xdr_stream struct
9774 +@@ -560,41 +628,67 @@ EXPORT_SYMBOL_GPL(xdr_write_pages);
9775 + */
9776 + void xdr_init_decode(struct xdr_stream *xdr, struct xdr_buf *buf, __be32 *p)
9777 + {
9778 +- struct kvec *iov = buf->head;
9779 +- unsigned int len = iov->iov_len;
9780 +-
9781 +- if (len > buf->len)
9782 +- len = buf->len;
9783 + xdr->buf = buf;
9784 +- xdr->iov = iov;
9785 +- xdr->p = p;
9786 +- xdr->end = (__be32 *)((char *)iov->iov_base + len);
9787 ++ xdr->scratch.iov_base = NULL;
9788 ++ xdr->scratch.iov_len = 0;
9789 ++ if (buf->head[0].iov_len != 0)
9790 ++ xdr_set_iov(xdr, buf->head, p, buf->len);
9791 ++ else if (buf->page_len != 0)
9792 ++ xdr_set_page_base(xdr, 0, buf->len);
9793 + }
9794 + EXPORT_SYMBOL_GPL(xdr_init_decode);
9795 +
9796 +-/**
9797 +- * xdr_inline_peek - Allow read-ahead in the XDR data stream
9798 +- * @xdr: pointer to xdr_stream struct
9799 +- * @nbytes: number of bytes of data to decode
9800 +- *
9801 +- * Check if the input buffer is long enough to enable us to decode
9802 +- * 'nbytes' more bytes of data starting at the current position.
9803 +- * If so return the current pointer without updating the current
9804 +- * pointer position.
9805 +- */
9806 +-__be32 * xdr_inline_peek(struct xdr_stream *xdr, size_t nbytes)
9807 ++static __be32 * __xdr_inline_decode(struct xdr_stream *xdr, size_t nbytes)
9808 + {
9809 + __be32 *p = xdr->p;
9810 + __be32 *q = p + XDR_QUADLEN(nbytes);
9811 +
9812 + if (unlikely(q > xdr->end || q < p))
9813 + return NULL;
9814 ++ xdr->p = q;
9815 + return p;
9816 + }
9817 +-EXPORT_SYMBOL_GPL(xdr_inline_peek);
9818 +
9819 + /**
9820 +- * xdr_inline_decode - Retrieve non-page XDR data to decode
9821 ++ * xdr_set_scratch_buffer - Attach a scratch buffer for decoding data.
9822 ++ * @xdr: pointer to xdr_stream struct
9823 ++ * @buf: pointer to an empty buffer
9824 ++ * @buflen: size of 'buf'
9825 ++ *
9826 ++ * The scratch buffer is used when decoding from an array of pages.
9827 ++ * If an xdr_inline_decode() call spans across page boundaries, then
9828 ++ * we copy the data into the scratch buffer in order to allow linear
9829 ++ * access.
9830 ++ */
9831 ++void xdr_set_scratch_buffer(struct xdr_stream *xdr, void *buf, size_t buflen)
9832 ++{
9833 ++ xdr->scratch.iov_base = buf;
9834 ++ xdr->scratch.iov_len = buflen;
9835 ++}
9836 ++EXPORT_SYMBOL_GPL(xdr_set_scratch_buffer);
9837 ++
9838 ++static __be32 *xdr_copy_to_scratch(struct xdr_stream *xdr, size_t nbytes)
9839 ++{
9840 ++ __be32 *p;
9841 ++ void *cpdest = xdr->scratch.iov_base;
9842 ++ size_t cplen = (char *)xdr->end - (char *)xdr->p;
9843 ++
9844 ++ if (nbytes > xdr->scratch.iov_len)
9845 ++ return NULL;
9846 ++ memcpy(cpdest, xdr->p, cplen);
9847 ++ cpdest += cplen;
9848 ++ nbytes -= cplen;
9849 ++ if (!xdr_set_next_buffer(xdr))
9850 ++ return NULL;
9851 ++ p = __xdr_inline_decode(xdr, nbytes);
9852 ++ if (p == NULL)
9853 ++ return NULL;
9854 ++ memcpy(cpdest, p, nbytes);
9855 ++ return xdr->scratch.iov_base;
9856 ++}
9857 ++
9858 ++/**
9859 ++ * xdr_inline_decode - Retrieve XDR data to decode
9860 + * @xdr: pointer to xdr_stream struct
9861 + * @nbytes: number of bytes of data to decode
9862 + *
9863 +@@ -605,13 +699,16 @@ EXPORT_SYMBOL_GPL(xdr_inline_peek);
9864 + */
9865 + __be32 * xdr_inline_decode(struct xdr_stream *xdr, size_t nbytes)
9866 + {
9867 +- __be32 *p = xdr->p;
9868 +- __be32 *q = p + XDR_QUADLEN(nbytes);
9869 ++ __be32 *p;
9870 +
9871 +- if (unlikely(q > xdr->end || q < p))
9872 ++ if (nbytes == 0)
9873 ++ return xdr->p;
9874 ++ if (xdr->p == xdr->end && !xdr_set_next_buffer(xdr))
9875 + return NULL;
9876 +- xdr->p = q;
9877 +- return p;
9878 ++ p = __xdr_inline_decode(xdr, nbytes);
9879 ++ if (p != NULL)
9880 ++ return p;
9881 ++ return xdr_copy_to_scratch(xdr, nbytes);
9882 + }
9883 + EXPORT_SYMBOL_GPL(xdr_inline_decode);
9884 +
9885 +@@ -671,16 +768,12 @@ EXPORT_SYMBOL_GPL(xdr_read_pages);
9886 + */
9887 + void xdr_enter_page(struct xdr_stream *xdr, unsigned int len)
9888 + {
9889 +- char * kaddr = page_address(xdr->buf->pages[0]);
9890 + xdr_read_pages(xdr, len);
9891 + /*
9892 + * Position current pointer at beginning of tail, and
9893 + * set remaining message length.
9894 + */
9895 +- if (len > PAGE_CACHE_SIZE - xdr->buf->page_base)
9896 +- len = PAGE_CACHE_SIZE - xdr->buf->page_base;
9897 +- xdr->p = (__be32 *)(kaddr + xdr->buf->page_base);
9898 +- xdr->end = (__be32 *)((char *)xdr->p + len);
9899 ++ xdr_set_page_base(xdr, 0, len);
9900 + }
9901 + EXPORT_SYMBOL_GPL(xdr_enter_page);
9902 +
9903 +diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
9904 +index 2268e67..759bbcb 100644
9905 +--- a/net/unix/af_unix.c
9906 ++++ b/net/unix/af_unix.c
9907 +@@ -1156,7 +1156,7 @@ restart:
9908 + goto restart;
9909 + }
9910 +
9911 +- err = security_unix_stream_connect(sock, other->sk_socket, newsk);
9912 ++ err = security_unix_stream_connect(sk, other, newsk);
9913 + if (err) {
9914 + unix_state_unlock(sk);
9915 + goto out_unlock;
9916 +diff --git a/net/wireless/reg.c b/net/wireless/reg.c
9917 +index 4b9f891..8ab65f2 100644
9918 +--- a/net/wireless/reg.c
9919 ++++ b/net/wireless/reg.c
9920 +@@ -720,7 +720,9 @@ EXPORT_SYMBOL(freq_reg_info);
9921 + * on the wiphy with the target_bw specified. Then we can simply use
9922 + * that below for the desired_bw_khz below.
9923 + */
9924 +-static void handle_channel(struct wiphy *wiphy, enum ieee80211_band band,
9925 ++static void handle_channel(struct wiphy *wiphy,
9926 ++ enum nl80211_reg_initiator initiator,
9927 ++ enum ieee80211_band band,
9928 + unsigned int chan_idx)
9929 + {
9930 + int r;
9931 +@@ -748,8 +750,26 @@ static void handle_channel(struct wiphy *wiphy, enum ieee80211_band band,
9932 + desired_bw_khz,
9933 + &reg_rule);
9934 +
9935 +- if (r)
9936 ++ if (r) {
9937 ++ /*
9938 ++ * We will disable all channels that do not match our
9939 ++ * recieved regulatory rule unless the hint is coming
9940 ++ * from a Country IE and the Country IE had no information
9941 ++ * about a band. The IEEE 802.11 spec allows for an AP
9942 ++ * to send only a subset of the regulatory rules allowed,
9943 ++ * so an AP in the US that only supports 2.4 GHz may only send
9944 ++ * a country IE with information for the 2.4 GHz band
9945 ++ * while 5 GHz is still supported.
9946 ++ */
9947 ++ if (initiator == NL80211_REGDOM_SET_BY_COUNTRY_IE &&
9948 ++ r == -ERANGE)
9949 ++ return;
9950 ++
9951 ++ REG_DBG_PRINT("cfg80211: Disabling freq %d MHz\n",
9952 ++ chan->center_freq);
9953 ++ chan->flags = IEEE80211_CHAN_DISABLED;
9954 + return;
9955 ++ }
9956 +
9957 + power_rule = &reg_rule->power_rule;
9958 + freq_range = &reg_rule->freq_range;
9959 +@@ -784,7 +804,9 @@ static void handle_channel(struct wiphy *wiphy, enum ieee80211_band band,
9960 + chan->max_power = (int) MBM_TO_DBM(power_rule->max_eirp);
9961 + }
9962 +
9963 +-static void handle_band(struct wiphy *wiphy, enum ieee80211_band band)
9964 ++static void handle_band(struct wiphy *wiphy,
9965 ++ enum ieee80211_band band,
9966 ++ enum nl80211_reg_initiator initiator)
9967 + {
9968 + unsigned int i;
9969 + struct ieee80211_supported_band *sband;
9970 +@@ -793,7 +815,7 @@ static void handle_band(struct wiphy *wiphy, enum ieee80211_band band)
9971 + sband = wiphy->bands[band];
9972 +
9973 + for (i = 0; i < sband->n_channels; i++)
9974 +- handle_channel(wiphy, band, i);
9975 ++ handle_channel(wiphy, initiator, band, i);
9976 + }
9977 +
9978 + static bool ignore_reg_update(struct wiphy *wiphy,
9979 +@@ -809,6 +831,7 @@ static bool ignore_reg_update(struct wiphy *wiphy,
9980 + * desired regulatory domain set
9981 + */
9982 + if (wiphy->flags & WIPHY_FLAG_STRICT_REGULATORY && !wiphy->regd &&
9983 ++ initiator != NL80211_REGDOM_SET_BY_COUNTRY_IE &&
9984 + !is_world_regdom(last_request->alpha2))
9985 + return true;
9986 + return false;
9987 +@@ -1030,7 +1053,7 @@ void wiphy_update_regulatory(struct wiphy *wiphy,
9988 + goto out;
9989 + for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
9990 + if (wiphy->bands[band])
9991 +- handle_band(wiphy, band);
9992 ++ handle_band(wiphy, band, initiator);
9993 + }
9994 + out:
9995 + reg_process_beacons(wiphy);
9996 +diff --git a/security/capability.c b/security/capability.c
9997 +index c773635..2a5df2b 100644
9998 +--- a/security/capability.c
9999 ++++ b/security/capability.c
10000 +@@ -548,7 +548,7 @@ static int cap_sem_semop(struct sem_array *sma, struct sembuf *sops,
10001 + }
10002 +
10003 + #ifdef CONFIG_SECURITY_NETWORK
10004 +-static int cap_unix_stream_connect(struct socket *sock, struct socket *other,
10005 ++static int cap_unix_stream_connect(struct sock *sock, struct sock *other,
10006 + struct sock *newsk)
10007 + {
10008 + return 0;
10009 +diff --git a/security/security.c b/security/security.c
10010 +index 1b798d3..e5fb07a 100644
10011 +--- a/security/security.c
10012 ++++ b/security/security.c
10013 +@@ -977,8 +977,7 @@ EXPORT_SYMBOL(security_inode_getsecctx);
10014 +
10015 + #ifdef CONFIG_SECURITY_NETWORK
10016 +
10017 +-int security_unix_stream_connect(struct socket *sock, struct socket *other,
10018 +- struct sock *newsk)
10019 ++int security_unix_stream_connect(struct sock *sock, struct sock *other, struct sock *newsk)
10020 + {
10021 + return security_ops->unix_stream_connect(sock, other, newsk);
10022 + }
10023 +diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
10024 +index 65fa8bf..11d5c47 100644
10025 +--- a/security/selinux/hooks.c
10026 ++++ b/security/selinux/hooks.c
10027 +@@ -2525,7 +2525,10 @@ static int selinux_inode_init_security(struct inode *inode, struct inode *dir,
10028 + sid = tsec->sid;
10029 + newsid = tsec->create_sid;
10030 +
10031 +- if (!newsid || !(sbsec->flags & SE_SBLABELSUPP)) {
10032 ++ if ((sbsec->flags & SE_SBINITIALIZED) &&
10033 ++ (sbsec->behavior == SECURITY_FS_USE_MNTPOINT))
10034 ++ newsid = sbsec->mntpoint_sid;
10035 ++ else if (!newsid || !(sbsec->flags & SE_SBLABELSUPP)) {
10036 + rc = security_transition_sid(sid, dsec->sid,
10037 + inode_mode_to_security_class(inode->i_mode),
10038 + &newsid);
10039 +@@ -3921,18 +3924,18 @@ static int selinux_socket_shutdown(struct socket *sock, int how)
10040 + return sock_has_perm(current, sock->sk, SOCKET__SHUTDOWN);
10041 + }
10042 +
10043 +-static int selinux_socket_unix_stream_connect(struct socket *sock,
10044 +- struct socket *other,
10045 ++static int selinux_socket_unix_stream_connect(struct sock *sock,
10046 ++ struct sock *other,
10047 + struct sock *newsk)
10048 + {
10049 +- struct sk_security_struct *sksec_sock = sock->sk->sk_security;
10050 +- struct sk_security_struct *sksec_other = other->sk->sk_security;
10051 ++ struct sk_security_struct *sksec_sock = sock->sk_security;
10052 ++ struct sk_security_struct *sksec_other = other->sk_security;
10053 + struct sk_security_struct *sksec_new = newsk->sk_security;
10054 + struct common_audit_data ad;
10055 + int err;
10056 +
10057 + COMMON_AUDIT_DATA_INIT(&ad, NET);
10058 +- ad.u.net.sk = other->sk;
10059 ++ ad.u.net.sk = other;
10060 +
10061 + err = avc_has_perm(sksec_sock->sid, sksec_other->sid,
10062 + sksec_other->sclass,
10063 +diff --git a/security/selinux/nlmsgtab.c b/security/selinux/nlmsgtab.c
10064 +index 75ec0c6..8b02b21 100644
10065 +--- a/security/selinux/nlmsgtab.c
10066 ++++ b/security/selinux/nlmsgtab.c
10067 +@@ -65,6 +65,8 @@ static struct nlmsg_perm nlmsg_route_perms[] =
10068 + { RTM_NEWADDRLABEL, NETLINK_ROUTE_SOCKET__NLMSG_WRITE },
10069 + { RTM_DELADDRLABEL, NETLINK_ROUTE_SOCKET__NLMSG_WRITE },
10070 + { RTM_GETADDRLABEL, NETLINK_ROUTE_SOCKET__NLMSG_READ },
10071 ++ { RTM_GETDCB, NETLINK_ROUTE_SOCKET__NLMSG_READ },
10072 ++ { RTM_SETDCB, NETLINK_ROUTE_SOCKET__NLMSG_WRITE },
10073 + };
10074 +
10075 + static struct nlmsg_perm nlmsg_firewall_perms[] =
10076 +diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c
10077 +index 489a85a..ccb71a0 100644
10078 +--- a/security/smack/smack_lsm.c
10079 ++++ b/security/smack/smack_lsm.c
10080 +@@ -2408,22 +2408,22 @@ static int smack_setprocattr(struct task_struct *p, char *name,
10081 +
10082 + /**
10083 + * smack_unix_stream_connect - Smack access on UDS
10084 +- * @sock: one socket
10085 +- * @other: the other socket
10086 ++ * @sock: one sock
10087 ++ * @other: the other sock
10088 + * @newsk: unused
10089 + *
10090 + * Return 0 if a subject with the smack of sock could access
10091 + * an object with the smack of other, otherwise an error code
10092 + */
10093 +-static int smack_unix_stream_connect(struct socket *sock,
10094 +- struct socket *other, struct sock *newsk)
10095 ++static int smack_unix_stream_connect(struct sock *sock,
10096 ++ struct sock *other, struct sock *newsk)
10097 + {
10098 +- struct inode *sp = SOCK_INODE(sock);
10099 +- struct inode *op = SOCK_INODE(other);
10100 ++ struct inode *sp = SOCK_INODE(sock->sk_socket);
10101 ++ struct inode *op = SOCK_INODE(other->sk_socket);
10102 + struct smk_audit_info ad;
10103 +
10104 + smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_NET);
10105 +- smk_ad_setfield_u_net_sk(&ad, other->sk);
10106 ++ smk_ad_setfield_u_net_sk(&ad, other);
10107 + return smk_access(smk_of_inode(sp), smk_of_inode(op),
10108 + MAY_READWRITE, &ad);
10109 + }
10110 +diff --git a/sound/atmel/ac97c.c b/sound/atmel/ac97c.c
10111 +index 10c3a87..b310702 100644
10112 +--- a/sound/atmel/ac97c.c
10113 ++++ b/sound/atmel/ac97c.c
10114 +@@ -33,9 +33,12 @@
10115 + #include <linux/dw_dmac.h>
10116 +
10117 + #include <mach/cpu.h>
10118 +-#include <mach/hardware.h>
10119 + #include <mach/gpio.h>
10120 +
10121 ++#ifdef CONFIG_ARCH_AT91
10122 ++#include <mach/hardware.h>
10123 ++#endif
10124 ++
10125 + #include "ac97c.h"
10126 +
10127 + enum {
10128 +diff --git a/sound/core/hrtimer.c b/sound/core/hrtimer.c
10129 +index 7730575..07efa29 100644
10130 +--- a/sound/core/hrtimer.c
10131 ++++ b/sound/core/hrtimer.c
10132 +@@ -45,12 +45,13 @@ static enum hrtimer_restart snd_hrtimer_callback(struct hrtimer *hrt)
10133 + {
10134 + struct snd_hrtimer *stime = container_of(hrt, struct snd_hrtimer, hrt);
10135 + struct snd_timer *t = stime->timer;
10136 ++ unsigned long oruns;
10137 +
10138 + if (!atomic_read(&stime->running))
10139 + return HRTIMER_NORESTART;
10140 +
10141 +- hrtimer_forward_now(hrt, ns_to_ktime(t->sticks * resolution));
10142 +- snd_timer_interrupt(stime->timer, t->sticks);
10143 ++ oruns = hrtimer_forward_now(hrt, ns_to_ktime(t->sticks * resolution));
10144 ++ snd_timer_interrupt(stime->timer, t->sticks * oruns);
10145 +
10146 + if (!atomic_read(&stime->running))
10147 + return HRTIMER_NORESTART;
10148 +diff --git a/sound/pci/au88x0/au88x0_pcm.c b/sound/pci/au88x0/au88x0_pcm.c
10149 +index b9d2f20..5439d66 100644
10150 +--- a/sound/pci/au88x0/au88x0_pcm.c
10151 ++++ b/sound/pci/au88x0/au88x0_pcm.c
10152 +@@ -42,11 +42,7 @@ static struct snd_pcm_hardware snd_vortex_playback_hw_adb = {
10153 + .rate_min = 5000,
10154 + .rate_max = 48000,
10155 + .channels_min = 1,
10156 +-#ifdef CHIP_AU8830
10157 +- .channels_max = 4,
10158 +-#else
10159 + .channels_max = 2,
10160 +-#endif
10161 + .buffer_bytes_max = 0x10000,
10162 + .period_bytes_min = 0x1,
10163 + .period_bytes_max = 0x1000,
10164 +@@ -115,6 +111,17 @@ static struct snd_pcm_hardware snd_vortex_playback_hw_wt = {
10165 + .periods_max = 64,
10166 + };
10167 + #endif
10168 ++#ifdef CHIP_AU8830
10169 ++static unsigned int au8830_channels[3] = {
10170 ++ 1, 2, 4,
10171 ++};
10172 ++
10173 ++static struct snd_pcm_hw_constraint_list hw_constraints_au8830_channels = {
10174 ++ .count = ARRAY_SIZE(au8830_channels),
10175 ++ .list = au8830_channels,
10176 ++ .mask = 0,
10177 ++};
10178 ++#endif
10179 + /* open callback */
10180 + static int snd_vortex_pcm_open(struct snd_pcm_substream *substream)
10181 + {
10182 +@@ -156,6 +163,15 @@ static int snd_vortex_pcm_open(struct snd_pcm_substream *substream)
10183 + if (VORTEX_PCM_TYPE(substream->pcm) == VORTEX_PCM_ADB
10184 + || VORTEX_PCM_TYPE(substream->pcm) == VORTEX_PCM_I2S)
10185 + runtime->hw = snd_vortex_playback_hw_adb;
10186 ++#ifdef CHIP_AU8830
10187 ++ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK &&
10188 ++ VORTEX_PCM_TYPE(substream->pcm) == VORTEX_PCM_ADB) {
10189 ++ runtime->hw.channels_max = 4;
10190 ++ snd_pcm_hw_constraint_list(runtime, 0,
10191 ++ SNDRV_PCM_HW_PARAM_CHANNELS,
10192 ++ &hw_constraints_au8830_channels);
10193 ++ }
10194 ++#endif
10195 + substream->runtime->private_data = NULL;
10196 + }
10197 + #ifndef CHIP_AU8810
10198 +diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c
10199 +index 98b6d02..05e5ec8 100644
10200 +--- a/sound/pci/hda/hda_codec.c
10201 ++++ b/sound/pci/hda/hda_codec.c
10202 +@@ -4571,6 +4571,9 @@ int snd_hda_parse_pin_def_config(struct hda_codec *codec,
10203 + }
10204 + memset(cfg->hp_pins + cfg->hp_outs, 0,
10205 + sizeof(hda_nid_t) * (AUTO_CFG_MAX_OUTS - cfg->hp_outs));
10206 ++ if (!cfg->hp_outs)
10207 ++ cfg->line_out_type = AUTO_PIN_HP_OUT;
10208 ++
10209 + }
10210 +
10211 + /* sort by sequence */
10212 +diff --git a/sound/pci/hda/hda_eld.c b/sound/pci/hda/hda_eld.c
10213 +index 4a66347..74b0560 100644
10214 +--- a/sound/pci/hda/hda_eld.c
10215 ++++ b/sound/pci/hda/hda_eld.c
10216 +@@ -381,7 +381,7 @@ static void hdmi_show_short_audio_desc(struct cea_sad *a)
10217 + snd_print_pcm_rates(a->rates, buf, sizeof(buf));
10218 +
10219 + if (a->format == AUDIO_CODING_TYPE_LPCM)
10220 +- snd_print_pcm_bits(a->sample_bits, buf2 + 8, sizeof(buf2 - 8));
10221 ++ snd_print_pcm_bits(a->sample_bits, buf2 + 8, sizeof(buf2) - 8);
10222 + else if (a->max_bitrate)
10223 + snprintf(buf2, sizeof(buf2),
10224 + ", max bitrate = %d", a->max_bitrate);
10225 +diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
10226 +index 76bd58a..5667fb9 100644
10227 +--- a/sound/pci/hda/patch_conexant.c
10228 ++++ b/sound/pci/hda/patch_conexant.c
10229 +@@ -403,10 +403,16 @@ static int conexant_add_jack(struct hda_codec *codec,
10230 + struct conexant_spec *spec;
10231 + struct conexant_jack *jack;
10232 + const char *name;
10233 +- int err;
10234 ++ int i, err;
10235 +
10236 + spec = codec->spec;
10237 + snd_array_init(&spec->jacks, sizeof(*jack), 32);
10238 ++
10239 ++ jack = spec->jacks.list;
10240 ++ for (i = 0; i < spec->jacks.used; i++, jack++)
10241 ++ if (jack->nid == nid)
10242 ++ return 0 ; /* already present */
10243 ++
10244 + jack = snd_array_new(&spec->jacks);
10245 + name = (type == SND_JACK_HEADPHONE) ? "Headphone" : "Mic" ;
10246 +
10247 +@@ -3097,6 +3103,7 @@ static struct snd_pci_quirk cxt5066_cfg_tbl[] = {
10248 + SND_PCI_QUIRK_MASK(0x1025, 0xff00, 0x0400, "Acer", CXT5066_IDEAPAD),
10249 + SND_PCI_QUIRK(0x1028, 0x02d8, "Dell Vostro", CXT5066_DELL_VOSTRO),
10250 + SND_PCI_QUIRK(0x1028, 0x02f5, "Dell Vostro 320", CXT5066_IDEAPAD),
10251 ++ SND_PCI_QUIRK(0x1028, 0x0401, "Dell Vostro 1014", CXT5066_DELL_VOSTRO),
10252 + SND_PCI_QUIRK(0x1028, 0x0402, "Dell Vostro", CXT5066_DELL_VOSTRO),
10253 + SND_PCI_QUIRK(0x1028, 0x0408, "Dell Inspiron One 19T", CXT5066_IDEAPAD),
10254 + SND_PCI_QUIRK(0x103c, 0x360b, "HP G60", CXT5066_HP_LAPTOP),
10255 +diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
10256 +index 31df774..232833b 100644
10257 +--- a/sound/pci/hda/patch_hdmi.c
10258 ++++ b/sound/pci/hda/patch_hdmi.c
10259 +@@ -31,10 +31,15 @@
10260 + #include <linux/init.h>
10261 + #include <linux/delay.h>
10262 + #include <linux/slab.h>
10263 ++#include <linux/moduleparam.h>
10264 + #include <sound/core.h>
10265 + #include "hda_codec.h"
10266 + #include "hda_local.h"
10267 +
10268 ++static bool static_hdmi_pcm;
10269 ++module_param(static_hdmi_pcm, bool, 0644);
10270 ++MODULE_PARM_DESC(static_hdmi_pcm, "Don't restrict PCM parameters per ELD info");
10271 ++
10272 + /*
10273 + * The HDMI/DisplayPort configuration can be highly dynamic. A graphics device
10274 + * could support two independent pipes, each of them can be connected to one or
10275 +@@ -637,6 +642,7 @@ static void hdmi_setup_audio_infoframe(struct hda_codec *codec, hda_nid_t nid,
10276 + hdmi_ai->ver = 0x01;
10277 + hdmi_ai->len = 0x0a;
10278 + hdmi_ai->CC02_CT47 = channels - 1;
10279 ++ hdmi_ai->CA = ca;
10280 + hdmi_checksum_audio_infoframe(hdmi_ai);
10281 + } else if (spec->sink_eld[i].conn_type == 1) { /* DisplayPort */
10282 + struct dp_audio_infoframe *dp_ai;
10283 +@@ -646,6 +652,7 @@ static void hdmi_setup_audio_infoframe(struct hda_codec *codec, hda_nid_t nid,
10284 + dp_ai->len = 0x1b;
10285 + dp_ai->ver = 0x11 << 2;
10286 + dp_ai->CC02_CT47 = channels - 1;
10287 ++ dp_ai->CA = ca;
10288 + } else {
10289 + snd_printd("HDMI: unknown connection type at pin %d\n",
10290 + pin_nid);
10291 +@@ -827,7 +834,7 @@ static int hdmi_pcm_open(struct hda_pcm_stream *hinfo,
10292 + *codec_pars = *hinfo;
10293 +
10294 + eld = &spec->sink_eld[idx];
10295 +- if (eld->sad_count > 0) {
10296 ++ if (!static_hdmi_pcm && eld->eld_valid && eld->sad_count > 0) {
10297 + hdmi_eld_update_pcm_info(eld, hinfo, codec_pars);
10298 + if (hinfo->channels_min > hinfo->channels_max ||
10299 + !hinfo->rates || !hinfo->formats)
10300 +@@ -1165,11 +1172,53 @@ static int nvhdmi_7x_init(struct hda_codec *codec)
10301 + return 0;
10302 + }
10303 +
10304 ++static unsigned int channels_2_6_8[] = {
10305 ++ 2, 6, 8
10306 ++};
10307 ++
10308 ++static unsigned int channels_2_8[] = {
10309 ++ 2, 8
10310 ++};
10311 ++
10312 ++static struct snd_pcm_hw_constraint_list hw_constraints_2_6_8_channels = {
10313 ++ .count = ARRAY_SIZE(channels_2_6_8),
10314 ++ .list = channels_2_6_8,
10315 ++ .mask = 0,
10316 ++};
10317 ++
10318 ++static struct snd_pcm_hw_constraint_list hw_constraints_2_8_channels = {
10319 ++ .count = ARRAY_SIZE(channels_2_8),
10320 ++ .list = channels_2_8,
10321 ++ .mask = 0,
10322 ++};
10323 ++
10324 + static int simple_playback_pcm_open(struct hda_pcm_stream *hinfo,
10325 + struct hda_codec *codec,
10326 + struct snd_pcm_substream *substream)
10327 + {
10328 + struct hdmi_spec *spec = codec->spec;
10329 ++ struct snd_pcm_hw_constraint_list *hw_constraints_channels = NULL;
10330 ++
10331 ++ switch (codec->preset->id) {
10332 ++ case 0x10de0002:
10333 ++ case 0x10de0003:
10334 ++ case 0x10de0005:
10335 ++ case 0x10de0006:
10336 ++ hw_constraints_channels = &hw_constraints_2_8_channels;
10337 ++ break;
10338 ++ case 0x10de0007:
10339 ++ hw_constraints_channels = &hw_constraints_2_6_8_channels;
10340 ++ break;
10341 ++ default:
10342 ++ break;
10343 ++ }
10344 ++
10345 ++ if (hw_constraints_channels != NULL) {
10346 ++ snd_pcm_hw_constraint_list(substream->runtime, 0,
10347 ++ SNDRV_PCM_HW_PARAM_CHANNELS,
10348 ++ hw_constraints_channels);
10349 ++ }
10350 ++
10351 + return snd_hda_multi_out_dig_open(codec, &spec->multiout);
10352 + }
10353 +
10354 +diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
10355 +index 552a09e..0bc6e4e 100644
10356 +--- a/sound/pci/hda/patch_realtek.c
10357 ++++ b/sound/pci/hda/patch_realtek.c
10358 +@@ -14824,6 +14824,7 @@ static const struct alc_fixup alc269_fixups[] = {
10359 + {0x01, AC_VERB_SET_GPIO_MASK, 0x04},
10360 + {0x01, AC_VERB_SET_GPIO_DIRECTION, 0x04},
10361 + {0x01, AC_VERB_SET_GPIO_DATA, 0x00},
10362 ++ {0x19, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_VREFGRD},
10363 + { }
10364 + }
10365 + },
10366 +@@ -19379,6 +19380,7 @@ static const struct alc_fixup alc662_fixups[] = {
10367 + };
10368 +
10369 + static struct snd_pci_quirk alc662_fixup_tbl[] = {
10370 ++ SND_PCI_QUIRK(0x1025, 0x0308, "Acer Aspire 8942G", ALC662_FIXUP_ASPIRE),
10371 + SND_PCI_QUIRK(0x1025, 0x038b, "Acer Aspire 8943G", ALC662_FIXUP_ASPIRE),
10372 + SND_PCI_QUIRK(0x144d, 0xc051, "Samsung R720", ALC662_FIXUP_IDEAPAD),
10373 + SND_PCI_QUIRK(0x17aa, 0x38af, "Lenovo Ideapad Y550P", ALC662_FIXUP_IDEAPAD),
10374 +diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c
10375 +index f03b2ff..fc16beb 100644
10376 +--- a/sound/pci/hda/patch_sigmatel.c
10377 ++++ b/sound/pci/hda/patch_sigmatel.c
10378 +@@ -3591,7 +3591,7 @@ static int stac_check_auto_mic(struct hda_codec *codec)
10379 + if (check_mic_pin(codec, spec->dmic_nids[i],
10380 + &fixed, &ext, &dock))
10381 + return 0;
10382 +- if (!fixed && !ext && !dock)
10383 ++ if (!fixed || (!ext && !dock))
10384 + return 0; /* no input to switch */
10385 + if (!(get_wcaps(codec, ext) & AC_WCAP_UNSOL_CAP))
10386 + return 0; /* no unsol support */
10387 +diff --git a/sound/soc/blackfin/bf5xx-ac97.c b/sound/soc/blackfin/bf5xx-ac97.c
10388 +index c5f856e..ffbac26 100644
10389 +--- a/sound/soc/blackfin/bf5xx-ac97.c
10390 ++++ b/sound/soc/blackfin/bf5xx-ac97.c
10391 +@@ -260,9 +260,9 @@ static int bf5xx_ac97_suspend(struct snd_soc_dai *dai)
10392 + pr_debug("%s : sport %d\n", __func__, dai->id);
10393 + if (!dai->active)
10394 + return 0;
10395 +- if (dai->capture.active)
10396 ++ if (dai->capture_active)
10397 + sport_rx_stop(sport);
10398 +- if (dai->playback.active)
10399 ++ if (dai->playback_active)
10400 + sport_tx_stop(sport);
10401 + return 0;
10402 + }
10403 +diff --git a/sound/soc/blackfin/bf5xx-tdm.c b/sound/soc/blackfin/bf5xx-tdm.c
10404 +index 1251239..b2cf239 100644
10405 +--- a/sound/soc/blackfin/bf5xx-tdm.c
10406 ++++ b/sound/soc/blackfin/bf5xx-tdm.c
10407 +@@ -210,7 +210,7 @@ static int bf5xx_tdm_set_channel_map(struct snd_soc_dai *dai,
10408 + #ifdef CONFIG_PM
10409 + static int bf5xx_tdm_suspend(struct snd_soc_dai *dai)
10410 + {
10411 +- struct sport_device *sport = dai->private_data;
10412 ++ struct sport_device *sport = snd_soc_dai_get_drvdata(dai);
10413 +
10414 + if (!dai->active)
10415 + return 0;
10416 +diff --git a/sound/soc/codecs/wm8990.c b/sound/soc/codecs/wm8990.c
10417 +index 264828e..f007d77 100644
10418 +--- a/sound/soc/codecs/wm8990.c
10419 ++++ b/sound/soc/codecs/wm8990.c
10420 +@@ -1183,7 +1183,7 @@ static int wm8990_set_bias_level(struct snd_soc_codec *codec,
10421 + WM8990_VMIDTOG);
10422 +
10423 + /* Delay to allow output caps to discharge */
10424 +- msleep(msecs_to_jiffies(300));
10425 ++ msleep(300);
10426 +
10427 + /* Disable VMIDTOG */
10428 + snd_soc_write(codec, WM8990_ANTIPOP2, WM8990_SOFTST |
10429 +@@ -1195,17 +1195,17 @@ static int wm8990_set_bias_level(struct snd_soc_codec *codec,
10430 + /* Enable outputs */
10431 + snd_soc_write(codec, WM8990_POWER_MANAGEMENT_1, 0x1b00);
10432 +
10433 +- msleep(msecs_to_jiffies(50));
10434 ++ msleep(50);
10435 +
10436 + /* Enable VMID at 2x50k */
10437 + snd_soc_write(codec, WM8990_POWER_MANAGEMENT_1, 0x1f02);
10438 +
10439 +- msleep(msecs_to_jiffies(100));
10440 ++ msleep(100);
10441 +
10442 + /* Enable VREF */
10443 + snd_soc_write(codec, WM8990_POWER_MANAGEMENT_1, 0x1f03);
10444 +
10445 +- msleep(msecs_to_jiffies(600));
10446 ++ msleep(600);
10447 +
10448 + /* Enable BUFIOEN */
10449 + snd_soc_write(codec, WM8990_ANTIPOP2, WM8990_SOFTST |
10450 +@@ -1250,7 +1250,7 @@ static int wm8990_set_bias_level(struct snd_soc_codec *codec,
10451 + /* Disable VMID */
10452 + snd_soc_write(codec, WM8990_POWER_MANAGEMENT_1, 0x1f01);
10453 +
10454 +- msleep(msecs_to_jiffies(300));
10455 ++ msleep(300);
10456 +
10457 + /* Enable all output discharge bits */
10458 + snd_soc_write(codec, WM8990_ANTIPOP1, WM8990_DIS_LLINE |
10459 +diff --git a/sound/soc/codecs/wm8994.c b/sound/soc/codecs/wm8994.c
10460 +index 4d3e6f1..f451a2c 100644
10461 +--- a/sound/soc/codecs/wm8994.c
10462 ++++ b/sound/soc/codecs/wm8994.c
10463 +@@ -2536,18 +2536,18 @@ SND_SOC_DAPM_SUPPLY("DSPINTCLK", WM8994_CLOCKING_1, 1, 0, NULL, 0),
10464 + SND_SOC_DAPM_SUPPLY("AIF1CLK", WM8994_AIF1_CLOCKING_1, 0, 0, NULL, 0),
10465 + SND_SOC_DAPM_SUPPLY("AIF2CLK", WM8994_AIF2_CLOCKING_1, 0, 0, NULL, 0),
10466 +
10467 +-SND_SOC_DAPM_AIF_OUT("AIF1ADC1L", "AIF1 Capture",
10468 ++SND_SOC_DAPM_AIF_OUT("AIF1ADC1L", NULL,
10469 + 0, WM8994_POWER_MANAGEMENT_4, 9, 0),
10470 +-SND_SOC_DAPM_AIF_OUT("AIF1ADC1R", "AIF1 Capture",
10471 ++SND_SOC_DAPM_AIF_OUT("AIF1ADC1R", NULL,
10472 + 0, WM8994_POWER_MANAGEMENT_4, 8, 0),
10473 + SND_SOC_DAPM_AIF_IN("AIF1DAC1L", NULL, 0,
10474 + WM8994_POWER_MANAGEMENT_5, 9, 0),
10475 + SND_SOC_DAPM_AIF_IN("AIF1DAC1R", NULL, 0,
10476 + WM8994_POWER_MANAGEMENT_5, 8, 0),
10477 +
10478 +-SND_SOC_DAPM_AIF_OUT("AIF1ADC2L", "AIF1 Capture",
10479 ++SND_SOC_DAPM_AIF_OUT("AIF1ADC2L", NULL,
10480 + 0, WM8994_POWER_MANAGEMENT_4, 11, 0),
10481 +-SND_SOC_DAPM_AIF_OUT("AIF1ADC2R", "AIF1 Capture",
10482 ++SND_SOC_DAPM_AIF_OUT("AIF1ADC2R", NULL,
10483 + 0, WM8994_POWER_MANAGEMENT_4, 10, 0),
10484 + SND_SOC_DAPM_AIF_IN("AIF1DAC2L", NULL, 0,
10485 + WM8994_POWER_MANAGEMENT_5, 11, 0),
10486 +@@ -2588,6 +2588,7 @@ SND_SOC_DAPM_AIF_IN("AIF2DACR", NULL, 0,
10487 +
10488 + SND_SOC_DAPM_AIF_IN("AIF1DACDAT", "AIF1 Playback", 0, SND_SOC_NOPM, 0, 0),
10489 + SND_SOC_DAPM_AIF_IN("AIF2DACDAT", "AIF2 Playback", 0, SND_SOC_NOPM, 0, 0),
10490 ++SND_SOC_DAPM_AIF_OUT("AIF1ADCDAT", "AIF1 Capture", 0, SND_SOC_NOPM, 0, 0),
10491 + SND_SOC_DAPM_AIF_OUT("AIF2ADCDAT", "AIF2 Capture", 0, SND_SOC_NOPM, 0, 0),
10492 +
10493 + SND_SOC_DAPM_MUX("AIF1DAC Mux", SND_SOC_NOPM, 0, 0, &aif1dac_mux),
10494 +@@ -2783,6 +2784,11 @@ static const struct snd_soc_dapm_route intercon[] = {
10495 + { "AIF2DAC2R Mixer", "Left Sidetone Switch", "Left Sidetone" },
10496 + { "AIF2DAC2R Mixer", "Right Sidetone Switch", "Right Sidetone" },
10497 +
10498 ++ { "AIF1ADCDAT", NULL, "AIF1ADC1L" },
10499 ++ { "AIF1ADCDAT", NULL, "AIF1ADC1R" },
10500 ++ { "AIF1ADCDAT", NULL, "AIF1ADC2L" },
10501 ++ { "AIF1ADCDAT", NULL, "AIF1ADC2R" },
10502 ++
10503 + { "AIF2ADCDAT", NULL, "AIF2ADC Mux" },
10504 +
10505 + /* AIF3 output */
10506 +@@ -2930,6 +2936,7 @@ static int _wm8994_set_fll(struct snd_soc_codec *codec, int id, int src,
10507 + /* Allow no source specification when stopping */
10508 + if (freq_out)
10509 + return -EINVAL;
10510 ++ src = wm8994->fll[id].src;
10511 + break;
10512 + case WM8994_FLL_SRC_MCLK1:
10513 + case WM8994_FLL_SRC_MCLK2:
10514 +@@ -3512,7 +3519,7 @@ static int wm8994_set_tristate(struct snd_soc_dai *codec_dai, int tristate)
10515 + else
10516 + val = 0;
10517 +
10518 +- return snd_soc_update_bits(codec, reg, mask, reg);
10519 ++ return snd_soc_update_bits(codec, reg, mask, val);
10520 + }
10521 +
10522 + #define WM8994_RATES SNDRV_PCM_RATE_8000_96000
10523 +diff --git a/sound/soc/codecs/wm_hubs.c b/sound/soc/codecs/wm_hubs.c
10524 +index 0e24092..feacf18 100644
10525 +--- a/sound/soc/codecs/wm_hubs.c
10526 ++++ b/sound/soc/codecs/wm_hubs.c
10527 +@@ -92,6 +92,7 @@ static void wait_for_dc_servo(struct snd_soc_codec *codec, unsigned int op)
10528 + static void calibrate_dc_servo(struct snd_soc_codec *codec)
10529 + {
10530 + struct wm_hubs_data *hubs = snd_soc_codec_get_drvdata(codec);
10531 ++ s8 offset;
10532 + u16 reg, reg_l, reg_r, dcs_cfg;
10533 +
10534 + /* Set for 32 series updates */
10535 +@@ -130,16 +131,14 @@ static void calibrate_dc_servo(struct snd_soc_codec *codec)
10536 + dev_dbg(codec->dev, "DCS input: %x %x\n", reg_l, reg_r);
10537 +
10538 + /* HPOUT1L */
10539 +- if (reg_l + hubs->dcs_codes > 0 &&
10540 +- reg_l + hubs->dcs_codes < 0xff)
10541 +- reg_l += hubs->dcs_codes;
10542 +- dcs_cfg = reg_l << WM8993_DCS_DAC_WR_VAL_1_SHIFT;
10543 ++ offset = reg_l;
10544 ++ offset += hubs->dcs_codes;
10545 ++ dcs_cfg = (u8)offset << WM8993_DCS_DAC_WR_VAL_1_SHIFT;
10546 +
10547 + /* HPOUT1R */
10548 +- if (reg_r + hubs->dcs_codes > 0 &&
10549 +- reg_r + hubs->dcs_codes < 0xff)
10550 +- reg_r += hubs->dcs_codes;
10551 +- dcs_cfg |= reg_r;
10552 ++ offset = reg_r;
10553 ++ offset += hubs->dcs_codes;
10554 ++ dcs_cfg |= (u8)offset;
10555 +
10556 + dev_dbg(codec->dev, "DCS result: %x\n", dcs_cfg);
10557 +
10558 +diff --git a/sound/soc/davinci/davinci-evm.c b/sound/soc/davinci/davinci-evm.c
10559 +index bc9e6b0..07db881 100644
10560 +--- a/sound/soc/davinci/davinci-evm.c
10561 ++++ b/sound/soc/davinci/davinci-evm.c
10562 +@@ -224,7 +224,7 @@ static struct snd_soc_dai_link da8xx_evm_dai = {
10563 + .stream_name = "AIC3X",
10564 + .cpu_dai_name= "davinci-mcasp.0",
10565 + .codec_dai_name = "tlv320aic3x-hifi",
10566 +- .codec_name = "tlv320aic3x-codec.0-001a",
10567 ++ .codec_name = "tlv320aic3x-codec.1-0018",
10568 + .platform_name = "davinci-pcm-audio",
10569 + .init = evm_aic3x_init,
10570 + .ops = &evm_ops,
10571 +diff --git a/sound/soc/pxa/corgi.c b/sound/soc/pxa/corgi.c
10572 +index f451acd..135d903 100644
10573 +--- a/sound/soc/pxa/corgi.c
10574 ++++ b/sound/soc/pxa/corgi.c
10575 +@@ -305,10 +305,10 @@ static int corgi_wm8731_init(struct snd_soc_pcm_runtime *rtd)
10576 + static struct snd_soc_dai_link corgi_dai = {
10577 + .name = "WM8731",
10578 + .stream_name = "WM8731",
10579 +- .cpu_dai_name = "pxa-is2-dai",
10580 ++ .cpu_dai_name = "pxa2xx-i2s",
10581 + .codec_dai_name = "wm8731-hifi",
10582 + .platform_name = "pxa-pcm-audio",
10583 +- .codec_name = "wm8731-codec-0.001a",
10584 ++ .codec_name = "wm8731-codec-0.001b",
10585 + .init = corgi_wm8731_init,
10586 + .ops = &corgi_ops,
10587 + };
10588 +diff --git a/sound/soc/pxa/poodle.c b/sound/soc/pxa/poodle.c
10589 +index 84edd03..0eebe11 100644
10590 +--- a/sound/soc/pxa/poodle.c
10591 ++++ b/sound/soc/pxa/poodle.c
10592 +@@ -274,7 +274,7 @@ static struct snd_soc_dai_link poodle_dai = {
10593 + .cpu_dai_name = "pxa2xx-i2s",
10594 + .codec_dai_name = "wm8731-hifi",
10595 + .platform_name = "pxa-pcm-audio",
10596 +- .codec_name = "wm8731-codec.0-001a",
10597 ++ .codec_name = "wm8731-codec.0-001b",
10598 + .init = poodle_wm8731_init,
10599 + .ops = &poodle_ops,
10600 + };
10601 +diff --git a/sound/soc/pxa/spitz.c b/sound/soc/pxa/spitz.c
10602 +index 0b30d7d..7604471 100644
10603 +--- a/sound/soc/pxa/spitz.c
10604 ++++ b/sound/soc/pxa/spitz.c
10605 +@@ -313,10 +313,10 @@ static int spitz_wm8750_init(struct snd_soc_pcm_runtime *rtd)
10606 + static struct snd_soc_dai_link spitz_dai = {
10607 + .name = "wm8750",
10608 + .stream_name = "WM8750",
10609 +- .cpu_dai_name = "pxa-is2",
10610 ++ .cpu_dai_name = "pxa2xx-i2s",
10611 + .codec_dai_name = "wm8750-hifi",
10612 + .platform_name = "pxa-pcm-audio",
10613 +- .codec_name = "wm8750-codec.0-001a",
10614 ++ .codec_name = "wm8750-codec.0-001b",
10615 + .init = spitz_wm8750_init,
10616 + .ops = &spitz_ops,
10617 + };
10618 +diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c
10619 +index 85b7d54..757ba59 100644
10620 +--- a/sound/soc/soc-core.c
10621 ++++ b/sound/soc/soc-core.c
10622 +@@ -1623,7 +1623,7 @@ static void snd_soc_instantiate_card(struct snd_soc_card *card)
10623 + if (ret < 0) {
10624 + printk(KERN_ERR "asoc: failed to register AC97 %s\n", card->name);
10625 + while (--i >= 0)
10626 +- soc_unregister_ac97_dai_link(&card->rtd[i]);
10627 ++ soc_unregister_ac97_dai_link(card->rtd[i].codec);
10628 + goto probe_dai_err;
10629 + }
10630 + }
10631 +diff --git a/sound/usb/midi.c b/sound/usb/midi.c
10632 +index 25bce7e..b1f9ec9 100644
10633 +--- a/sound/usb/midi.c
10634 ++++ b/sound/usb/midi.c
10635 +@@ -850,8 +850,8 @@ static void snd_usbmidi_us122l_output(struct snd_usb_midi_out_endpoint *ep,
10636 + return;
10637 + }
10638 +
10639 +- memset(urb->transfer_buffer + count, 0xFD, 9 - count);
10640 +- urb->transfer_buffer_length = count;
10641 ++ memset(urb->transfer_buffer + count, 0xFD, ep->max_transfer - count);
10642 ++ urb->transfer_buffer_length = ep->max_transfer;
10643 + }
10644 +
10645 + static struct usb_protocol_ops snd_usbmidi_122l_ops = {
10646 +@@ -1295,6 +1295,13 @@ static int snd_usbmidi_out_endpoint_create(struct snd_usb_midi* umidi,
10647 + case USB_ID(0x1a86, 0x752d): /* QinHeng CH345 "USB2.0-MIDI" */
10648 + ep->max_transfer = 4;
10649 + break;
10650 ++ /*
10651 ++ * Some devices only work with 9 bytes packet size:
10652 ++ */
10653 ++ case USB_ID(0x0644, 0x800E): /* Tascam US-122L */
10654 ++ case USB_ID(0x0644, 0x800F): /* Tascam US-144 */
10655 ++ ep->max_transfer = 9;
10656 ++ break;
10657 + }
10658 + for (i = 0; i < OUTPUT_URBS; ++i) {
10659 + buffer = usb_alloc_coherent(umidi->dev,
10660 +diff --git a/sound/usb/usx2y/us122l.c b/sound/usb/usx2y/us122l.c
10661 +index 6ef68e4..084e6fc 100644
10662 +--- a/sound/usb/usx2y/us122l.c
10663 ++++ b/sound/usb/usx2y/us122l.c
10664 +@@ -273,29 +273,26 @@ static unsigned int usb_stream_hwdep_poll(struct snd_hwdep *hw,
10665 + struct file *file, poll_table *wait)
10666 + {
10667 + struct us122l *us122l = hw->private_data;
10668 +- struct usb_stream *s = us122l->sk.s;
10669 + unsigned *polled;
10670 + unsigned int mask;
10671 +
10672 + poll_wait(file, &us122l->sk.sleep, wait);
10673 +
10674 +- switch (s->state) {
10675 +- case usb_stream_ready:
10676 +- if (us122l->first == file)
10677 +- polled = &s->periods_polled;
10678 +- else
10679 +- polled = &us122l->second_periods_polled;
10680 +- if (*polled != s->periods_done) {
10681 +- *polled = s->periods_done;
10682 +- mask = POLLIN | POLLOUT | POLLWRNORM;
10683 +- break;
10684 ++ mask = POLLIN | POLLOUT | POLLWRNORM | POLLERR;
10685 ++ if (mutex_trylock(&us122l->mutex)) {
10686 ++ struct usb_stream *s = us122l->sk.s;
10687 ++ if (s && s->state == usb_stream_ready) {
10688 ++ if (us122l->first == file)
10689 ++ polled = &s->periods_polled;
10690 ++ else
10691 ++ polled = &us122l->second_periods_polled;
10692 ++ if (*polled != s->periods_done) {
10693 ++ *polled = s->periods_done;
10694 ++ mask = POLLIN | POLLOUT | POLLWRNORM;
10695 ++ } else
10696 ++ mask = 0;
10697 + }
10698 +- /* Fall through */
10699 +- mask = 0;
10700 +- break;
10701 +- default:
10702 +- mask = POLLIN | POLLOUT | POLLWRNORM | POLLERR;
10703 +- break;
10704 ++ mutex_unlock(&us122l->mutex);
10705 + }
10706 + return mask;
10707 + }
10708 +@@ -381,6 +378,7 @@ static int usb_stream_hwdep_ioctl(struct snd_hwdep *hw, struct file *file,
10709 + {
10710 + struct usb_stream_config *cfg;
10711 + struct us122l *us122l = hw->private_data;
10712 ++ struct usb_stream *s;
10713 + unsigned min_period_frames;
10714 + int err = 0;
10715 + bool high_speed;
10716 +@@ -426,18 +424,18 @@ static int usb_stream_hwdep_ioctl(struct snd_hwdep *hw, struct file *file,
10717 + snd_power_wait(hw->card, SNDRV_CTL_POWER_D0);
10718 +
10719 + mutex_lock(&us122l->mutex);
10720 ++ s = us122l->sk.s;
10721 + if (!us122l->master)
10722 + us122l->master = file;
10723 + else if (us122l->master != file) {
10724 +- if (memcmp(cfg, &us122l->sk.s->cfg, sizeof(*cfg))) {
10725 ++ if (!s || memcmp(cfg, &s->cfg, sizeof(*cfg))) {
10726 + err = -EIO;
10727 + goto unlock;
10728 + }
10729 + us122l->slave = file;
10730 + }
10731 +- if (!us122l->sk.s ||
10732 +- memcmp(cfg, &us122l->sk.s->cfg, sizeof(*cfg)) ||
10733 +- us122l->sk.s->state == usb_stream_xrun) {
10734 ++ if (!s || memcmp(cfg, &s->cfg, sizeof(*cfg)) ||
10735 ++ s->state == usb_stream_xrun) {
10736 + us122l_stop(us122l);
10737 + if (!us122l_start(us122l, cfg->sample_rate, cfg->period_frames))
10738 + err = -EIO;
10739 +@@ -448,6 +446,7 @@ unlock:
10740 + mutex_unlock(&us122l->mutex);
10741 + free:
10742 + kfree(cfg);
10743 ++ wake_up_all(&us122l->sk.sleep);
10744 + return err;
10745 + }
10746 +
10747
10748 Added: genpatches-2.6/trunk/2.6.37/1001_linux-2.6.37.2.patch
10749 ===================================================================
10750 --- genpatches-2.6/trunk/2.6.37/1001_linux-2.6.37.2.patch (rev 0)
10751 +++ genpatches-2.6/trunk/2.6.37/1001_linux-2.6.37.2.patch 2011-02-26 00:27:50 UTC (rev 1874)
10752 @@ -0,0 +1,2336 @@
10753 +diff --git a/Documentation/hwmon/jc42 b/Documentation/hwmon/jc42
10754 +index 0e76ef1..a22ecf4 100644
10755 +--- a/Documentation/hwmon/jc42
10756 ++++ b/Documentation/hwmon/jc42
10757 +@@ -51,7 +51,8 @@ Supported chips:
10758 + * JEDEC JC 42.4 compliant temperature sensor chips
10759 + Prefix: 'jc42'
10760 + Addresses scanned: I2C 0x18 - 0x1f
10761 +- Datasheet: -
10762 ++ Datasheet:
10763 ++ http://www.jedec.org/sites/default/files/docs/4_01_04R19.pdf
10764 +
10765 + Author:
10766 + Guenter Roeck <guenter.roeck@××××××××.com>
10767 +@@ -60,7 +61,11 @@ Author:
10768 + Description
10769 + -----------
10770 +
10771 +-This driver implements support for JEDEC JC 42.4 compliant temperature sensors.
10772 ++This driver implements support for JEDEC JC 42.4 compliant temperature sensors,
10773 ++which are used on many DDR3 memory modules for mobile devices and servers. Some
10774 ++systems use the sensor to prevent memory overheating by automatically throttling
10775 ++the memory controller.
10776 ++
10777 + The driver auto-detects the chips listed above, but can be manually instantiated
10778 + to support other JC 42.4 compliant chips.
10779 +
10780 +@@ -81,15 +86,19 @@ limits. The chip supports only a single register to configure the hysteresis,
10781 + which applies to all limits. This register can be written by writing into
10782 + temp1_crit_hyst. Other hysteresis attributes are read-only.
10783 +
10784 ++If the BIOS has configured the sensor for automatic temperature management, it
10785 ++is likely that it has locked the registers, i.e., that the temperature limits
10786 ++cannot be changed.
10787 ++
10788 + Sysfs entries
10789 + -------------
10790 +
10791 + temp1_input Temperature (RO)
10792 +-temp1_min Minimum temperature (RW)
10793 +-temp1_max Maximum temperature (RW)
10794 +-temp1_crit Critical high temperature (RW)
10795 ++temp1_min Minimum temperature (RO or RW)
10796 ++temp1_max Maximum temperature (RO or RW)
10797 ++temp1_crit Critical high temperature (RO or RW)
10798 +
10799 +-temp1_crit_hyst Critical hysteresis temperature (RW)
10800 ++temp1_crit_hyst Critical hysteresis temperature (RO or RW)
10801 + temp1_max_hyst Maximum hysteresis temperature (RO)
10802 +
10803 + temp1_min_alarm Temperature low alarm
10804 +diff --git a/Documentation/hwmon/k10temp b/Documentation/hwmon/k10temp
10805 +index 6526eee..d2b56a4 100644
10806 +--- a/Documentation/hwmon/k10temp
10807 ++++ b/Documentation/hwmon/k10temp
10808 +@@ -9,6 +9,8 @@ Supported chips:
10809 + Socket S1G3: Athlon II, Sempron, Turion II
10810 + * AMD Family 11h processors:
10811 + Socket S1G2: Athlon (X2), Sempron (X2), Turion X2 (Ultra)
10812 ++* AMD Family 12h processors: "Llano"
10813 ++* AMD Family 14h processors: "Brazos" (C/E/G-Series)
10814 +
10815 + Prefix: 'k10temp'
10816 + Addresses scanned: PCI space
10817 +@@ -17,10 +19,14 @@ Supported chips:
10818 + http://support.amd.com/us/Processor_TechDocs/31116.pdf
10819 + BIOS and Kernel Developer's Guide (BKDG) for AMD Family 11h Processors:
10820 + http://support.amd.com/us/Processor_TechDocs/41256.pdf
10821 ++ BIOS and Kernel Developer's Guide (BKDG) for AMD Family 14h Models 00h-0Fh Processors:
10822 ++ http://support.amd.com/us/Processor_TechDocs/43170.pdf
10823 + Revision Guide for AMD Family 10h Processors:
10824 + http://support.amd.com/us/Processor_TechDocs/41322.pdf
10825 + Revision Guide for AMD Family 11h Processors:
10826 + http://support.amd.com/us/Processor_TechDocs/41788.pdf
10827 ++ Revision Guide for AMD Family 14h Models 00h-0Fh Processors:
10828 ++ http://support.amd.com/us/Processor_TechDocs/47534.pdf
10829 + AMD Family 11h Processor Power and Thermal Data Sheet for Notebooks:
10830 + http://support.amd.com/us/Processor_TechDocs/43373.pdf
10831 + AMD Family 10h Server and Workstation Processor Power and Thermal Data Sheet:
10832 +@@ -34,7 +40,7 @@ Description
10833 + -----------
10834 +
10835 + This driver permits reading of the internal temperature sensor of AMD
10836 +-Family 10h and 11h processors.
10837 ++Family 10h/11h/12h/14h processors.
10838 +
10839 + All these processors have a sensor, but on those for Socket F or AM2+,
10840 + the sensor may return inconsistent values (erratum 319). The driver
10841 +diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c
10842 +index 907d5a6..abaf844 100644
10843 +--- a/arch/arm/kernel/signal.c
10844 ++++ b/arch/arm/kernel/signal.c
10845 +@@ -474,7 +474,9 @@ setup_return(struct pt_regs *regs, struct k_sigaction *ka,
10846 + unsigned long handler = (unsigned long)ka->sa.sa_handler;
10847 + unsigned long retcode;
10848 + int thumb = 0;
10849 +- unsigned long cpsr = regs->ARM_cpsr & ~PSR_f;
10850 ++ unsigned long cpsr = regs->ARM_cpsr & ~(PSR_f | PSR_E_BIT);
10851 ++
10852 ++ cpsr |= PSR_ENDSTATE;
10853 +
10854 + /*
10855 + * Maybe we need to deliver a 32-bit signal to a 26-bit task.
10856 +diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S
10857 +index cead889..223ba96 100644
10858 +--- a/arch/arm/kernel/vmlinux.lds.S
10859 ++++ b/arch/arm/kernel/vmlinux.lds.S
10860 +@@ -21,6 +21,12 @@
10861 + #define ARM_CPU_KEEP(x)
10862 + #endif
10863 +
10864 ++#if defined(CONFIG_SMP_ON_UP) && !defined(CONFIG_DEBUG_SPINLOCK)
10865 ++#define ARM_EXIT_KEEP(x) x
10866 ++#else
10867 ++#define ARM_EXIT_KEEP(x)
10868 ++#endif
10869 ++
10870 + OUTPUT_ARCH(arm)
10871 + ENTRY(stext)
10872 +
10873 +@@ -43,6 +49,7 @@ SECTIONS
10874 + _sinittext = .;
10875 + HEAD_TEXT
10876 + INIT_TEXT
10877 ++ ARM_EXIT_KEEP(EXIT_TEXT)
10878 + _einittext = .;
10879 + ARM_CPU_DISCARD(PROC_INFO)
10880 + __arch_info_begin = .;
10881 +@@ -67,6 +74,7 @@ SECTIONS
10882 + #ifndef CONFIG_XIP_KERNEL
10883 + __init_begin = _stext;
10884 + INIT_DATA
10885 ++ ARM_EXIT_KEEP(EXIT_DATA)
10886 + #endif
10887 + }
10888 +
10889 +@@ -161,6 +169,7 @@ SECTIONS
10890 + . = ALIGN(PAGE_SIZE);
10891 + __init_begin = .;
10892 + INIT_DATA
10893 ++ ARM_EXIT_KEEP(EXIT_DATA)
10894 + . = ALIGN(PAGE_SIZE);
10895 + __init_end = .;
10896 + #endif
10897 +diff --git a/arch/arm/mach-sa1100/simpad.c b/arch/arm/mach-sa1100/simpad.c
10898 +index 27692d0..cfb7607 100644
10899 +--- a/arch/arm/mach-sa1100/simpad.c
10900 ++++ b/arch/arm/mach-sa1100/simpad.c
10901 +@@ -166,9 +166,6 @@ static void __init simpad_map_io(void)
10902 + PCFR = 0;
10903 + PSDR = 0;
10904 +
10905 +- sa11x0_register_mtd(&simpad_flash_data, simpad_flash_resources,
10906 +- ARRAY_SIZE(simpad_flash_resources));
10907 +- sa11x0_register_mcp(&simpad_mcp_data);
10908 + }
10909 +
10910 + static void simpad_power_off(void)
10911 +@@ -216,6 +213,10 @@ static int __init simpad_init(void)
10912 +
10913 + pm_power_off = simpad_power_off;
10914 +
10915 ++ sa11x0_register_mtd(&simpad_flash_data, simpad_flash_resources,
10916 ++ ARRAY_SIZE(simpad_flash_resources));
10917 ++ sa11x0_register_mcp(&simpad_mcp_data);
10918 ++
10919 + ret = platform_add_devices(devices, ARRAY_SIZE(devices));
10920 + if(ret)
10921 + printk(KERN_WARNING "simpad: Unable to register mq200 framebuffer device");
10922 +diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h
10923 +index 8d6f871..b7cbfab 100644
10924 +--- a/arch/s390/include/asm/processor.h
10925 ++++ b/arch/s390/include/asm/processor.h
10926 +@@ -148,11 +148,6 @@ extern int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags);
10927 + */
10928 + extern unsigned long thread_saved_pc(struct task_struct *t);
10929 +
10930 +-/*
10931 +- * Print register of task into buffer. Used in fs/proc/array.c.
10932 +- */
10933 +-extern void task_show_regs(struct seq_file *m, struct task_struct *task);
10934 +-
10935 + extern void show_code(struct pt_regs *regs);
10936 +
10937 + unsigned long get_wchan(struct task_struct *p);
10938 +diff --git a/arch/s390/kernel/traps.c b/arch/s390/kernel/traps.c
10939 +index 7064082..5aa4183 100644
10940 +--- a/arch/s390/kernel/traps.c
10941 ++++ b/arch/s390/kernel/traps.c
10942 +@@ -237,43 +237,6 @@ void show_regs(struct pt_regs *regs)
10943 + show_last_breaking_event(regs);
10944 + }
10945 +
10946 +-/* This is called from fs/proc/array.c */
10947 +-void task_show_regs(struct seq_file *m, struct task_struct *task)
10948 +-{
10949 +- struct pt_regs *regs;
10950 +-
10951 +- regs = task_pt_regs(task);
10952 +- seq_printf(m, "task: %p, ksp: %p\n",
10953 +- task, (void *)task->thread.ksp);
10954 +- seq_printf(m, "User PSW : %p %p\n",
10955 +- (void *) regs->psw.mask, (void *)regs->psw.addr);
10956 +-
10957 +- seq_printf(m, "User GPRS: " FOURLONG,
10958 +- regs->gprs[0], regs->gprs[1],
10959 +- regs->gprs[2], regs->gprs[3]);
10960 +- seq_printf(m, " " FOURLONG,
10961 +- regs->gprs[4], regs->gprs[5],
10962 +- regs->gprs[6], regs->gprs[7]);
10963 +- seq_printf(m, " " FOURLONG,
10964 +- regs->gprs[8], regs->gprs[9],
10965 +- regs->gprs[10], regs->gprs[11]);
10966 +- seq_printf(m, " " FOURLONG,
10967 +- regs->gprs[12], regs->gprs[13],
10968 +- regs->gprs[14], regs->gprs[15]);
10969 +- seq_printf(m, "User ACRS: %08x %08x %08x %08x\n",
10970 +- task->thread.acrs[0], task->thread.acrs[1],
10971 +- task->thread.acrs[2], task->thread.acrs[3]);
10972 +- seq_printf(m, " %08x %08x %08x %08x\n",
10973 +- task->thread.acrs[4], task->thread.acrs[5],
10974 +- task->thread.acrs[6], task->thread.acrs[7]);
10975 +- seq_printf(m, " %08x %08x %08x %08x\n",
10976 +- task->thread.acrs[8], task->thread.acrs[9],
10977 +- task->thread.acrs[10], task->thread.acrs[11]);
10978 +- seq_printf(m, " %08x %08x %08x %08x\n",
10979 +- task->thread.acrs[12], task->thread.acrs[13],
10980 +- task->thread.acrs[14], task->thread.acrs[15]);
10981 +-}
10982 +-
10983 + static DEFINE_SPINLOCK(die_lock);
10984 +
10985 + void die(const char * str, struct pt_regs * regs, long err)
10986 +diff --git a/arch/x86/include/asm/cpu.h b/arch/x86/include/asm/cpu.h
10987 +index 4fab24d..4564c8e 100644
10988 +--- a/arch/x86/include/asm/cpu.h
10989 ++++ b/arch/x86/include/asm/cpu.h
10990 +@@ -32,5 +32,6 @@ extern void arch_unregister_cpu(int);
10991 +
10992 + DECLARE_PER_CPU(int, cpu_state);
10993 +
10994 ++int mwait_usable(const struct cpuinfo_x86 *);
10995 +
10996 + #endif /* _ASM_X86_CPU_H */
10997 +diff --git a/arch/x86/kernel/apb_timer.c b/arch/x86/kernel/apb_timer.c
10998 +index 92543c7..5181673 100644
10999 +--- a/arch/x86/kernel/apb_timer.c
11000 ++++ b/arch/x86/kernel/apb_timer.c
11001 +@@ -313,10 +313,12 @@ static void apbt_setup_irq(struct apbt_dev *adev)
11002 + if (adev->irq == 0)
11003 + return;
11004 +
11005 ++ irq_modify_status(adev->irq, 0, IRQ_MOVE_PCNTXT);
11006 ++ irq_set_affinity(adev->irq, cpumask_of(adev->cpu));
11007 ++ /* APB timer irqs are set up as mp_irqs, timer is edge type */
11008 ++ __set_irq_handler(adev->irq, handle_edge_irq, 0, "edge");
11009 ++
11010 + if (system_state == SYSTEM_BOOTING) {
11011 +- irq_modify_status(adev->irq, 0, IRQ_MOVE_PCNTXT);
11012 +- /* APB timer irqs are set up as mp_irqs, timer is edge type */
11013 +- __set_irq_handler(adev->irq, handle_edge_irq, 0, "edge");
11014 + if (request_irq(adev->irq, apbt_interrupt_handler,
11015 + IRQF_TIMER | IRQF_DISABLED | IRQF_NOBALANCING,
11016 + adev->name, adev)) {
11017 +diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c
11018 +index 96656f2..5206bb9 100644
11019 +--- a/arch/x86/kernel/irq_32.c
11020 ++++ b/arch/x86/kernel/irq_32.c
11021 +@@ -129,8 +129,7 @@ void __cpuinit irq_ctx_init(int cpu)
11022 + irqctx = page_address(alloc_pages_node(cpu_to_node(cpu),
11023 + THREAD_FLAGS,
11024 + THREAD_ORDER));
11025 +- irqctx->tinfo.task = NULL;
11026 +- irqctx->tinfo.exec_domain = NULL;
11027 ++ memset(&irqctx->tinfo, 0, sizeof(struct thread_info));
11028 + irqctx->tinfo.cpu = cpu;
11029 + irqctx->tinfo.preempt_count = HARDIRQ_OFFSET;
11030 + irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
11031 +@@ -140,10 +139,8 @@ void __cpuinit irq_ctx_init(int cpu)
11032 + irqctx = page_address(alloc_pages_node(cpu_to_node(cpu),
11033 + THREAD_FLAGS,
11034 + THREAD_ORDER));
11035 +- irqctx->tinfo.task = NULL;
11036 +- irqctx->tinfo.exec_domain = NULL;
11037 ++ memset(&irqctx->tinfo, 0, sizeof(struct thread_info));
11038 + irqctx->tinfo.cpu = cpu;
11039 +- irqctx->tinfo.preempt_count = 0;
11040 + irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
11041 +
11042 + per_cpu(softirq_ctx, cpu) = irqctx;
11043 +diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
11044 +index 2502aaf..12d4bf1 100644
11045 +--- a/arch/x86/kernel/process.c
11046 ++++ b/arch/x86/kernel/process.c
11047 +@@ -14,6 +14,7 @@
11048 + #include <linux/utsname.h>
11049 + #include <trace/events/power.h>
11050 + #include <linux/hw_breakpoint.h>
11051 ++#include <asm/cpu.h>
11052 + #include <asm/system.h>
11053 + #include <asm/apic.h>
11054 + #include <asm/syscalls.h>
11055 +@@ -509,13 +510,13 @@ static void poll_idle(void)
11056 + *
11057 + * idle=mwait overrides this decision and forces the usage of mwait.
11058 + */
11059 +-static int __cpuinitdata force_mwait;
11060 ++static int force_mwait;
11061 +
11062 + #define MWAIT_INFO 0x05
11063 + #define MWAIT_ECX_EXTENDED_INFO 0x01
11064 + #define MWAIT_EDX_C1 0xf0
11065 +
11066 +-static int __cpuinit mwait_usable(const struct cpuinfo_x86 *c)
11067 ++int mwait_usable(const struct cpuinfo_x86 *c)
11068 + {
11069 + u32 eax, ebx, ecx, edx;
11070 +
11071 +diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
11072 +index 083e99d..d8fd571 100644
11073 +--- a/arch/x86/kernel/smpboot.c
11074 ++++ b/arch/x86/kernel/smpboot.c
11075 +@@ -1396,8 +1396,9 @@ static inline void mwait_play_dead(void)
11076 + unsigned int highest_subcstate = 0;
11077 + int i;
11078 + void *mwait_ptr;
11079 ++ struct cpuinfo_x86 *c = &current_cpu_data;
11080 +
11081 +- if (!cpu_has(&current_cpu_data, X86_FEATURE_MWAIT))
11082 ++ if (!(cpu_has(c, X86_FEATURE_MWAIT) && mwait_usable(c)))
11083 + return;
11084 + if (!cpu_has(&current_cpu_data, X86_FEATURE_CLFLSH))
11085 + return;
11086 +diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
11087 +index 44924e5..198df8d 100644
11088 +--- a/arch/x86/xen/mmu.c
11089 ++++ b/arch/x86/xen/mmu.c
11090 +@@ -395,6 +395,18 @@ void __init xen_build_dynamic_phys_to_machine(void)
11091 + p2m_top[topidx] = mid;
11092 + }
11093 +
11094 ++ /*
11095 ++ * As long as the mfn_list has enough entries to completely
11096 ++ * fill a p2m page, pointing into the array is ok. But if
11097 ++ * not the entries beyond the last pfn will be undefined.
11098 ++ */
11099 ++ if (unlikely(pfn + P2M_PER_PAGE > max_pfn)) {
11100 ++ unsigned long p2midx;
11101 ++
11102 ++ p2midx = max_pfn % P2M_PER_PAGE;
11103 ++ for ( ; p2midx < P2M_PER_PAGE; p2midx++)
11104 ++ mfn_list[pfn + p2midx] = INVALID_P2M_ENTRY;
11105 ++ }
11106 + p2m_top[topidx][mididx] = &mfn_list[pfn];
11107 + }
11108 + }
11109 +diff --git a/drivers/acpi/wakeup.c b/drivers/acpi/wakeup.c
11110 +index f62a50c..3f3ab8f 100644
11111 +--- a/drivers/acpi/wakeup.c
11112 ++++ b/drivers/acpi/wakeup.c
11113 +@@ -84,8 +84,12 @@ int __init acpi_wakeup_device_init(void)
11114 + struct acpi_device *dev = container_of(node,
11115 + struct acpi_device,
11116 + wakeup_list);
11117 +- if (dev->wakeup.flags.always_enabled)
11118 ++ if (dev->wakeup.flags.always_enabled) {
11119 ++ /* Button GPEs are supposed to be always enabled. */
11120 ++ acpi_enable_gpe(dev->wakeup.gpe_device,
11121 ++ dev->wakeup.gpe_number);
11122 + dev->wakeup.state.enabled = 1;
11123 ++ }
11124 + }
11125 + mutex_unlock(&acpi_device_lock);
11126 + return 0;
11127 +diff --git a/drivers/char/tpm/tpm.c b/drivers/char/tpm/tpm.c
11128 +index 55d0466..d5284bc 100644
11129 +--- a/drivers/char/tpm/tpm.c
11130 ++++ b/drivers/char/tpm/tpm.c
11131 +@@ -577,11 +577,9 @@ duration:
11132 + if (rc)
11133 + return;
11134 +
11135 +- if (be32_to_cpu(tpm_cmd.header.out.return_code) != 0 ||
11136 +- be32_to_cpu(tpm_cmd.header.out.length)
11137 +- != sizeof(tpm_cmd.header.out) + sizeof(u32) + 3 * sizeof(u32))
11138 ++ if (be32_to_cpu(tpm_cmd.header.out.return_code)
11139 ++ != 3 * sizeof(u32))
11140 + return;
11141 +-
11142 + duration_cap = &tpm_cmd.params.getcap_out.cap.duration;
11143 + chip->vendor.duration[TPM_SHORT] =
11144 + usecs_to_jiffies(be32_to_cpu(duration_cap->tpm_short));
11145 +@@ -925,18 +923,6 @@ ssize_t tpm_show_caps_1_2(struct device * dev,
11146 + }
11147 + EXPORT_SYMBOL_GPL(tpm_show_caps_1_2);
11148 +
11149 +-ssize_t tpm_show_timeouts(struct device *dev, struct device_attribute *attr,
11150 +- char *buf)
11151 +-{
11152 +- struct tpm_chip *chip = dev_get_drvdata(dev);
11153 +-
11154 +- return sprintf(buf, "%d %d %d\n",
11155 +- jiffies_to_usecs(chip->vendor.duration[TPM_SHORT]),
11156 +- jiffies_to_usecs(chip->vendor.duration[TPM_MEDIUM]),
11157 +- jiffies_to_usecs(chip->vendor.duration[TPM_LONG]));
11158 +-}
11159 +-EXPORT_SYMBOL_GPL(tpm_show_timeouts);
11160 +-
11161 + ssize_t tpm_store_cancel(struct device *dev, struct device_attribute *attr,
11162 + const char *buf, size_t count)
11163 + {
11164 +diff --git a/drivers/char/tpm/tpm.h b/drivers/char/tpm/tpm.h
11165 +index ba1779c..792868d 100644
11166 +--- a/drivers/char/tpm/tpm.h
11167 ++++ b/drivers/char/tpm/tpm.h
11168 +@@ -56,8 +56,6 @@ extern ssize_t tpm_show_owned(struct device *, struct device_attribute *attr,
11169 + char *);
11170 + extern ssize_t tpm_show_temp_deactivated(struct device *,
11171 + struct device_attribute *attr, char *);
11172 +-extern ssize_t tpm_show_timeouts(struct device *,
11173 +- struct device_attribute *attr, char *);
11174 +
11175 + struct tpm_chip;
11176 +
11177 +diff --git a/drivers/char/tpm/tpm_tis.c b/drivers/char/tpm/tpm_tis.c
11178 +index 0d1d38e..dd21df5 100644
11179 +--- a/drivers/char/tpm/tpm_tis.c
11180 ++++ b/drivers/char/tpm/tpm_tis.c
11181 +@@ -376,7 +376,6 @@ static DEVICE_ATTR(temp_deactivated, S_IRUGO, tpm_show_temp_deactivated,
11182 + NULL);
11183 + static DEVICE_ATTR(caps, S_IRUGO, tpm_show_caps_1_2, NULL);
11184 + static DEVICE_ATTR(cancel, S_IWUSR | S_IWGRP, NULL, tpm_store_cancel);
11185 +-static DEVICE_ATTR(timeouts, S_IRUGO, tpm_show_timeouts, NULL);
11186 +
11187 + static struct attribute *tis_attrs[] = {
11188 + &dev_attr_pubek.attr,
11189 +@@ -386,8 +385,7 @@ static struct attribute *tis_attrs[] = {
11190 + &dev_attr_owned.attr,
11191 + &dev_attr_temp_deactivated.attr,
11192 + &dev_attr_caps.attr,
11193 +- &dev_attr_cancel.attr,
11194 +- &dev_attr_timeouts.attr, NULL,
11195 ++ &dev_attr_cancel.attr, NULL,
11196 + };
11197 +
11198 + static struct attribute_group tis_attr_grp = {
11199 +diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
11200 +index c1f13bf..5f08e2c 100644
11201 +--- a/drivers/gpu/drm/i915/intel_dp.c
11202 ++++ b/drivers/gpu/drm/i915/intel_dp.c
11203 +@@ -1343,17 +1343,24 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp)
11204 + struct drm_device *dev = intel_dp->base.base.dev;
11205 + struct drm_i915_private *dev_priv = dev->dev_private;
11206 + bool channel_eq = false;
11207 +- int tries;
11208 ++ int tries, cr_tries;
11209 + u32 reg;
11210 + uint32_t DP = intel_dp->DP;
11211 +
11212 + /* channel equalization */
11213 + tries = 0;
11214 ++ cr_tries = 0;
11215 + channel_eq = false;
11216 + for (;;) {
11217 + /* Use intel_dp->train_set[0] to set the voltage and pre emphasis values */
11218 + uint32_t signal_levels;
11219 +
11220 ++ if (cr_tries > 5) {
11221 ++ DRM_ERROR("failed to train DP, aborting\n");
11222 ++ intel_dp_link_down(intel_dp);
11223 ++ break;
11224 ++ }
11225 ++
11226 + if (IS_GEN6(dev) && is_edp(intel_dp)) {
11227 + signal_levels = intel_gen6_edp_signal_levels(intel_dp->train_set[0]);
11228 + DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels;
11229 +@@ -1376,14 +1383,26 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp)
11230 + if (!intel_dp_get_link_status(intel_dp))
11231 + break;
11232 +
11233 ++ /* Make sure clock is still ok */
11234 ++ if (!intel_clock_recovery_ok(intel_dp->link_status, intel_dp->lane_count)) {
11235 ++ intel_dp_start_link_train(intel_dp);
11236 ++ cr_tries++;
11237 ++ continue;
11238 ++ }
11239 ++
11240 + if (intel_channel_eq_ok(intel_dp)) {
11241 + channel_eq = true;
11242 + break;
11243 + }
11244 +
11245 +- /* Try 5 times */
11246 +- if (tries > 5)
11247 +- break;
11248 ++ /* Try 5 times, then try clock recovery if that fails */
11249 ++ if (tries > 5) {
11250 ++ intel_dp_link_down(intel_dp);
11251 ++ intel_dp_start_link_train(intel_dp);
11252 ++ tries = 0;
11253 ++ cr_tries++;
11254 ++ continue;
11255 ++ }
11256 +
11257 + /* Compute new intel_dp->train_set as requested by target */
11258 + intel_get_adjust_train(intel_dp);
11259 +diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
11260 +index 920ca27..c0de519 100644
11261 +--- a/drivers/gpu/drm/i915/intel_sdvo.c
11262 ++++ b/drivers/gpu/drm/i915/intel_sdvo.c
11263 +@@ -46,6 +46,7 @@
11264 + SDVO_TV_MASK)
11265 +
11266 + #define IS_TV(c) (c->output_flag & SDVO_TV_MASK)
11267 ++#define IS_TMDS(c) (c->output_flag & SDVO_TMDS_MASK)
11268 + #define IS_LVDS(c) (c->output_flag & SDVO_LVDS_MASK)
11269 + #define IS_TV_OR_LVDS(c) (c->output_flag & (SDVO_TV_MASK | SDVO_LVDS_MASK))
11270 +
11271 +@@ -1356,7 +1357,8 @@ intel_sdvo_hdmi_sink_detect(struct drm_connector *connector)
11272 + intel_sdvo->has_hdmi_monitor = drm_detect_hdmi_monitor(edid);
11273 + intel_sdvo->has_hdmi_audio = drm_detect_monitor_audio(edid);
11274 + }
11275 +- }
11276 ++ } else
11277 ++ status = connector_status_disconnected;
11278 + connector->display_info.raw_edid = NULL;
11279 + kfree(edid);
11280 + }
11281 +@@ -1404,10 +1406,25 @@ intel_sdvo_detect(struct drm_connector *connector, bool force)
11282 +
11283 + if ((intel_sdvo_connector->output_flag & response) == 0)
11284 + ret = connector_status_disconnected;
11285 +- else if (response & SDVO_TMDS_MASK)
11286 ++ else if (IS_TMDS(intel_sdvo_connector))
11287 + ret = intel_sdvo_hdmi_sink_detect(connector);
11288 +- else
11289 +- ret = connector_status_connected;
11290 ++ else {
11291 ++ struct edid *edid;
11292 ++
11293 ++ /* if we have an edid check it matches the connection */
11294 ++ edid = intel_sdvo_get_edid(connector);
11295 ++ if (edid == NULL)
11296 ++ edid = intel_sdvo_get_analog_edid(connector);
11297 ++ if (edid != NULL) {
11298 ++ if (edid->input & DRM_EDID_INPUT_DIGITAL)
11299 ++ ret = connector_status_disconnected;
11300 ++ else
11301 ++ ret = connector_status_connected;
11302 ++ connector->display_info.raw_edid = NULL;
11303 ++ kfree(edid);
11304 ++ } else
11305 ++ ret = connector_status_connected;
11306 ++ }
11307 +
11308 + /* May update encoder flag for like clock for SDVO TV, etc.*/
11309 + if (ret == connector_status_connected) {
11310 +@@ -1443,10 +1460,15 @@ static void intel_sdvo_get_ddc_modes(struct drm_connector *connector)
11311 + edid = intel_sdvo_get_analog_edid(connector);
11312 +
11313 + if (edid != NULL) {
11314 +- if (edid->input & DRM_EDID_INPUT_DIGITAL) {
11315 ++ struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector);
11316 ++ bool monitor_is_digital = !!(edid->input & DRM_EDID_INPUT_DIGITAL);
11317 ++ bool connector_is_digital = !!IS_TMDS(intel_sdvo_connector);
11318 ++
11319 ++ if (connector_is_digital == monitor_is_digital) {
11320 + drm_mode_connector_update_edid_property(connector, edid);
11321 + drm_add_edid_modes(connector, edid);
11322 + }
11323 ++
11324 + connector->display_info.raw_edid = NULL;
11325 + kfree(edid);
11326 + }
11327 +diff --git a/drivers/gpu/drm/nouveau/Kconfig b/drivers/gpu/drm/nouveau/Kconfig
11328 +index 72730e9..21d6c29 100644
11329 +--- a/drivers/gpu/drm/nouveau/Kconfig
11330 ++++ b/drivers/gpu/drm/nouveau/Kconfig
11331 +@@ -10,7 +10,7 @@ config DRM_NOUVEAU
11332 + select FB
11333 + select FRAMEBUFFER_CONSOLE if !EMBEDDED
11334 + select FB_BACKLIGHT if DRM_NOUVEAU_BACKLIGHT
11335 +- select ACPI_VIDEO if ACPI
11336 ++ select ACPI_VIDEO if ACPI && X86 && BACKLIGHT_CLASS_DEVICE && VIDEO_OUTPUT_CONTROL && INPUT
11337 + help
11338 + Choose this option for open-source nVidia support.
11339 +
11340 +diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
11341 +index 6b7fc4b..ee14b8a 100644
11342 +--- a/drivers/gpu/drm/radeon/atombios_crtc.c
11343 ++++ b/drivers/gpu/drm/radeon/atombios_crtc.c
11344 +@@ -514,7 +514,6 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
11345 + pll->flags |= RADEON_PLL_PREFER_HIGH_FB_DIV;
11346 + else
11347 + pll->flags |= RADEON_PLL_PREFER_LOW_REF_DIV;
11348 +-
11349 + }
11350 +
11351 + list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
11352 +@@ -531,29 +530,28 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
11353 + dp_clock = dig_connector->dp_clock;
11354 + }
11355 + }
11356 +-/* this might work properly with the new pll algo */
11357 +-#if 0 /* doesn't work properly on some laptops */
11358 ++
11359 + /* use recommended ref_div for ss */
11360 + if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
11361 + if (ss_enabled) {
11362 + if (ss->refdiv) {
11363 ++ pll->flags |= RADEON_PLL_PREFER_MINM_OVER_MAXP;
11364 + pll->flags |= RADEON_PLL_USE_REF_DIV;
11365 + pll->reference_div = ss->refdiv;
11366 ++ if (ASIC_IS_AVIVO(rdev))
11367 ++ pll->flags |= RADEON_PLL_USE_FRAC_FB_DIV;
11368 + }
11369 + }
11370 + }
11371 +-#endif
11372 ++
11373 + if (ASIC_IS_AVIVO(rdev)) {
11374 + /* DVO wants 2x pixel clock if the DVO chip is in 12 bit mode */
11375 + if (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1)
11376 + adjusted_clock = mode->clock * 2;
11377 + if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT))
11378 + pll->flags |= RADEON_PLL_PREFER_CLOSEST_LOWER;
11379 +- /* rv515 needs more testing with this option */
11380 +- if (rdev->family != CHIP_RV515) {
11381 +- if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
11382 +- pll->flags |= RADEON_PLL_IS_LCD;
11383 +- }
11384 ++ if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
11385 ++ pll->flags |= RADEON_PLL_IS_LCD;
11386 + } else {
11387 + if (encoder->encoder_type != DRM_MODE_ENCODER_DAC)
11388 + pll->flags |= RADEON_PLL_NO_ODD_POST_DIV;
11389 +@@ -652,10 +650,12 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
11390 + index, (uint32_t *)&args);
11391 + adjusted_clock = le32_to_cpu(args.v3.sOutput.ulDispPllFreq) * 10;
11392 + if (args.v3.sOutput.ucRefDiv) {
11393 ++ pll->flags |= RADEON_PLL_USE_FRAC_FB_DIV;
11394 + pll->flags |= RADEON_PLL_USE_REF_DIV;
11395 + pll->reference_div = args.v3.sOutput.ucRefDiv;
11396 + }
11397 + if (args.v3.sOutput.ucPostDiv) {
11398 ++ pll->flags |= RADEON_PLL_USE_FRAC_FB_DIV;
11399 + pll->flags |= RADEON_PLL_USE_POST_DIV;
11400 + pll->post_div = args.v3.sOutput.ucPostDiv;
11401 + }
11402 +@@ -921,11 +921,7 @@ static void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode
11403 + /* adjust pixel clock as needed */
11404 + adjusted_clock = atombios_adjust_pll(crtc, mode, pll, ss_enabled, &ss);
11405 +
11406 +- /* rv515 seems happier with the old algo */
11407 +- if (rdev->family == CHIP_RV515)
11408 +- radeon_compute_pll_legacy(pll, adjusted_clock, &pll_clock, &fb_div, &frac_fb_div,
11409 +- &ref_div, &post_div);
11410 +- else if (ASIC_IS_AVIVO(rdev))
11411 ++ if (ASIC_IS_AVIVO(rdev))
11412 + radeon_compute_pll_avivo(pll, adjusted_clock, &pll_clock, &fb_div, &frac_fb_div,
11413 + &ref_div, &post_div);
11414 + else
11415 +@@ -959,9 +955,9 @@ static void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode
11416 + }
11417 + }
11418 +
11419 +-static int evergreen_crtc_do_set_base(struct drm_crtc *crtc,
11420 +- struct drm_framebuffer *fb,
11421 +- int x, int y, int atomic)
11422 ++static int dce4_crtc_do_set_base(struct drm_crtc *crtc,
11423 ++ struct drm_framebuffer *fb,
11424 ++ int x, int y, int atomic)
11425 + {
11426 + struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
11427 + struct drm_device *dev = crtc->dev;
11428 +@@ -1093,12 +1089,6 @@ static int evergreen_crtc_do_set_base(struct drm_crtc *crtc,
11429 + WREG32(EVERGREEN_VIEWPORT_SIZE + radeon_crtc->crtc_offset,
11430 + (crtc->mode.hdisplay << 16) | crtc->mode.vdisplay);
11431 +
11432 +- if (crtc->mode.flags & DRM_MODE_FLAG_INTERLACE)
11433 +- WREG32(EVERGREEN_DATA_FORMAT + radeon_crtc->crtc_offset,
11434 +- EVERGREEN_INTERLEAVE_EN);
11435 +- else
11436 +- WREG32(EVERGREEN_DATA_FORMAT + radeon_crtc->crtc_offset, 0);
11437 +-
11438 + if (!atomic && fb && fb != crtc->fb) {
11439 + radeon_fb = to_radeon_framebuffer(fb);
11440 + rbo = radeon_fb->obj->driver_private;
11441 +@@ -1247,12 +1237,6 @@ static int avivo_crtc_do_set_base(struct drm_crtc *crtc,
11442 + WREG32(AVIVO_D1MODE_VIEWPORT_SIZE + radeon_crtc->crtc_offset,
11443 + (crtc->mode.hdisplay << 16) | crtc->mode.vdisplay);
11444 +
11445 +- if (crtc->mode.flags & DRM_MODE_FLAG_INTERLACE)
11446 +- WREG32(AVIVO_D1MODE_DATA_FORMAT + radeon_crtc->crtc_offset,
11447 +- AVIVO_D1MODE_INTERLEAVE_EN);
11448 +- else
11449 +- WREG32(AVIVO_D1MODE_DATA_FORMAT + radeon_crtc->crtc_offset, 0);
11450 +-
11451 + if (!atomic && fb && fb != crtc->fb) {
11452 + radeon_fb = to_radeon_framebuffer(fb);
11453 + rbo = radeon_fb->obj->driver_private;
11454 +@@ -1276,7 +1260,7 @@ int atombios_crtc_set_base(struct drm_crtc *crtc, int x, int y,
11455 + struct radeon_device *rdev = dev->dev_private;
11456 +
11457 + if (ASIC_IS_DCE4(rdev))
11458 +- return evergreen_crtc_do_set_base(crtc, old_fb, x, y, 0);
11459 ++ return dce4_crtc_do_set_base(crtc, old_fb, x, y, 0);
11460 + else if (ASIC_IS_AVIVO(rdev))
11461 + return avivo_crtc_do_set_base(crtc, old_fb, x, y, 0);
11462 + else
11463 +@@ -1291,7 +1275,7 @@ int atombios_crtc_set_base_atomic(struct drm_crtc *crtc,
11464 + struct radeon_device *rdev = dev->dev_private;
11465 +
11466 + if (ASIC_IS_DCE4(rdev))
11467 +- return evergreen_crtc_do_set_base(crtc, fb, x, y, 1);
11468 ++ return dce4_crtc_do_set_base(crtc, fb, x, y, 1);
11469 + else if (ASIC_IS_AVIVO(rdev))
11470 + return avivo_crtc_do_set_base(crtc, fb, x, y, 1);
11471 + else
11472 +diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c
11473 +index 7831e08..153095f 100644
11474 +--- a/drivers/gpu/drm/radeon/r600_cs.c
11475 ++++ b/drivers/gpu/drm/radeon/r600_cs.c
11476 +@@ -295,17 +295,18 @@ static inline int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i)
11477 + }
11478 +
11479 + if (!IS_ALIGNED(pitch, pitch_align)) {
11480 +- dev_warn(p->dev, "%s:%d cb pitch (%d) invalid\n",
11481 +- __func__, __LINE__, pitch);
11482 ++ dev_warn(p->dev, "%s:%d cb pitch (%d, 0x%x, %d) invalid\n",
11483 ++ __func__, __LINE__, pitch, pitch_align, array_mode);
11484 + return -EINVAL;
11485 + }
11486 + if (!IS_ALIGNED(height, height_align)) {
11487 +- dev_warn(p->dev, "%s:%d cb height (%d) invalid\n",
11488 +- __func__, __LINE__, height);
11489 ++ dev_warn(p->dev, "%s:%d cb height (%d, 0x%x, %d) invalid\n",
11490 ++ __func__, __LINE__, height, height_align, array_mode);
11491 + return -EINVAL;
11492 + }
11493 + if (!IS_ALIGNED(base_offset, base_align)) {
11494 +- dev_warn(p->dev, "%s offset[%d] 0x%llx not aligned\n", __func__, i, base_offset);
11495 ++ dev_warn(p->dev, "%s offset[%d] 0x%llx 0x%llx, %d not aligned\n", __func__, i,
11496 ++ base_offset, base_align, array_mode);
11497 + return -EINVAL;
11498 + }
11499 +
11500 +@@ -320,7 +321,10 @@ static inline int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i)
11501 + * broken userspace.
11502 + */
11503 + } else {
11504 +- dev_warn(p->dev, "%s offset[%d] %d %d %lu too big\n", __func__, i, track->cb_color_bo_offset[i], tmp, radeon_bo_size(track->cb_color_bo[i]));
11505 ++ dev_warn(p->dev, "%s offset[%d] %d %d %d %lu too big\n", __func__, i,
11506 ++ array_mode,
11507 ++ track->cb_color_bo_offset[i], tmp,
11508 ++ radeon_bo_size(track->cb_color_bo[i]));
11509 + return -EINVAL;
11510 + }
11511 + }
11512 +@@ -455,17 +459,18 @@ static int r600_cs_track_check(struct radeon_cs_parser *p)
11513 + }
11514 +
11515 + if (!IS_ALIGNED(pitch, pitch_align)) {
11516 +- dev_warn(p->dev, "%s:%d db pitch (%d) invalid\n",
11517 +- __func__, __LINE__, pitch);
11518 ++ dev_warn(p->dev, "%s:%d db pitch (%d, 0x%x, %d) invalid\n",
11519 ++ __func__, __LINE__, pitch, pitch_align, array_mode);
11520 + return -EINVAL;
11521 + }
11522 + if (!IS_ALIGNED(height, height_align)) {
11523 +- dev_warn(p->dev, "%s:%d db height (%d) invalid\n",
11524 +- __func__, __LINE__, height);
11525 ++ dev_warn(p->dev, "%s:%d db height (%d, 0x%x, %d) invalid\n",
11526 ++ __func__, __LINE__, height, height_align, array_mode);
11527 + return -EINVAL;
11528 + }
11529 + if (!IS_ALIGNED(base_offset, base_align)) {
11530 +- dev_warn(p->dev, "%s offset[%d] 0x%llx not aligned\n", __func__, i, base_offset);
11531 ++ dev_warn(p->dev, "%s offset[%d] 0x%llx, 0x%llx, %d not aligned\n", __func__, i,
11532 ++ base_offset, base_align, array_mode);
11533 + return -EINVAL;
11534 + }
11535 +
11536 +@@ -473,9 +478,10 @@ static int r600_cs_track_check(struct radeon_cs_parser *p)
11537 + nviews = G_028004_SLICE_MAX(track->db_depth_view) + 1;
11538 + tmp = ntiles * bpe * 64 * nviews;
11539 + if ((tmp + track->db_offset) > radeon_bo_size(track->db_bo)) {
11540 +- dev_warn(p->dev, "z/stencil buffer too small (0x%08X %d %d %d -> %u have %lu)\n",
11541 +- track->db_depth_size, ntiles, nviews, bpe, tmp + track->db_offset,
11542 +- radeon_bo_size(track->db_bo));
11543 ++ dev_warn(p->dev, "z/stencil buffer (%d) too small (0x%08X %d %d %d -> %u have %lu)\n",
11544 ++ array_mode,
11545 ++ track->db_depth_size, ntiles, nviews, bpe, tmp + track->db_offset,
11546 ++ radeon_bo_size(track->db_bo));
11547 + return -EINVAL;
11548 + }
11549 + }
11550 +@@ -1227,18 +1233,18 @@ static inline int r600_check_texture_resource(struct radeon_cs_parser *p, u32 i
11551 + /* XXX check height as well... */
11552 +
11553 + if (!IS_ALIGNED(pitch, pitch_align)) {
11554 +- dev_warn(p->dev, "%s:%d tex pitch (%d) invalid\n",
11555 +- __func__, __LINE__, pitch);
11556 ++ dev_warn(p->dev, "%s:%d tex pitch (%d, 0x%x, %d) invalid\n",
11557 ++ __func__, __LINE__, pitch, pitch_align, G_038000_TILE_MODE(word0));
11558 + return -EINVAL;
11559 + }
11560 + if (!IS_ALIGNED(base_offset, base_align)) {
11561 +- dev_warn(p->dev, "%s:%d tex base offset (0x%llx) invalid\n",
11562 +- __func__, __LINE__, base_offset);
11563 ++ dev_warn(p->dev, "%s:%d tex base offset (0x%llx, 0x%llx, %d) invalid\n",
11564 ++ __func__, __LINE__, base_offset, base_align, G_038000_TILE_MODE(word0));
11565 + return -EINVAL;
11566 + }
11567 + if (!IS_ALIGNED(mip_offset, base_align)) {
11568 +- dev_warn(p->dev, "%s:%d tex mip offset (0x%llx) invalid\n",
11569 +- __func__, __LINE__, mip_offset);
11570 ++ dev_warn(p->dev, "%s:%d tex mip offset (0x%llx, 0x%llx, %d) invalid\n",
11571 ++ __func__, __LINE__, mip_offset, base_align, G_038000_TILE_MODE(word0));
11572 + return -EINVAL;
11573 + }
11574 +
11575 +diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c
11576 +index 137b807..57bee7e 100644
11577 +--- a/drivers/gpu/drm/radeon/radeon_combios.c
11578 ++++ b/drivers/gpu/drm/radeon/radeon_combios.c
11579 +@@ -1503,6 +1503,11 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
11580 + (rdev->pdev->subsystem_device == 0x4a48)) {
11581 + /* Mac X800 */
11582 + rdev->mode_info.connector_table = CT_MAC_X800;
11583 ++ } else if ((rdev->pdev->device == 0x4150) &&
11584 ++ (rdev->pdev->subsystem_vendor == 0x1002) &&
11585 ++ (rdev->pdev->subsystem_device == 0x4150)) {
11586 ++ /* Mac G5 9600 */
11587 ++ rdev->mode_info.connector_table = CT_MAC_G5_9600;
11588 + } else
11589 + #endif /* CONFIG_PPC_PMAC */
11590 + #ifdef CONFIG_PPC64
11591 +@@ -2021,6 +2026,48 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
11592 + CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I,
11593 + &hpd);
11594 + break;
11595 ++ case CT_MAC_G5_9600:
11596 ++ DRM_INFO("Connector Table: %d (mac g5 9600)\n",
11597 ++ rdev->mode_info.connector_table);
11598 ++ /* DVI - tv dac, dvo */
11599 ++ ddc_i2c = combios_setup_i2c_bus(rdev, DDC_DVI, 0, 0);
11600 ++ hpd.hpd = RADEON_HPD_1; /* ??? */
11601 ++ radeon_add_legacy_encoder(dev,
11602 ++ radeon_get_encoder_enum(dev,
11603 ++ ATOM_DEVICE_DFP2_SUPPORT,
11604 ++ 0),
11605 ++ ATOM_DEVICE_DFP2_SUPPORT);
11606 ++ radeon_add_legacy_encoder(dev,
11607 ++ radeon_get_encoder_enum(dev,
11608 ++ ATOM_DEVICE_CRT2_SUPPORT,
11609 ++ 2),
11610 ++ ATOM_DEVICE_CRT2_SUPPORT);
11611 ++ radeon_add_legacy_connector(dev, 0,
11612 ++ ATOM_DEVICE_DFP2_SUPPORT |
11613 ++ ATOM_DEVICE_CRT2_SUPPORT,
11614 ++ DRM_MODE_CONNECTOR_DVII, &ddc_i2c,
11615 ++ CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I,
11616 ++ &hpd);
11617 ++ /* ADC - primary dac, internal tmds */
11618 ++ ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0);
11619 ++ hpd.hpd = RADEON_HPD_2; /* ??? */
11620 ++ radeon_add_legacy_encoder(dev,
11621 ++ radeon_get_encoder_enum(dev,
11622 ++ ATOM_DEVICE_DFP1_SUPPORT,
11623 ++ 0),
11624 ++ ATOM_DEVICE_DFP1_SUPPORT);
11625 ++ radeon_add_legacy_encoder(dev,
11626 ++ radeon_get_encoder_enum(dev,
11627 ++ ATOM_DEVICE_CRT1_SUPPORT,
11628 ++ 1),
11629 ++ ATOM_DEVICE_CRT1_SUPPORT);
11630 ++ radeon_add_legacy_connector(dev, 1,
11631 ++ ATOM_DEVICE_DFP1_SUPPORT |
11632 ++ ATOM_DEVICE_CRT1_SUPPORT,
11633 ++ DRM_MODE_CONNECTOR_DVII, &ddc_i2c,
11634 ++ CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I,
11635 ++ &hpd);
11636 ++ break;
11637 + default:
11638 + DRM_INFO("Connector table: %d (invalid)\n",
11639 + rdev->mode_info.connector_table);
11640 +diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
11641 +index a26a70d..c7d1fca 100644
11642 +--- a/drivers/gpu/drm/radeon/radeon_display.c
11643 ++++ b/drivers/gpu/drm/radeon/radeon_display.c
11644 +@@ -461,6 +461,11 @@ static void avivo_get_fb_div(struct radeon_pll *pll,
11645 + tmp *= target_clock;
11646 + *fb_div = tmp / pll->reference_freq;
11647 + *frac_fb_div = tmp % pll->reference_freq;
11648 ++
11649 ++ if (*fb_div > pll->max_feedback_div)
11650 ++ *fb_div = pll->max_feedback_div;
11651 ++ else if (*fb_div < pll->min_feedback_div)
11652 ++ *fb_div = pll->min_feedback_div;
11653 + }
11654 +
11655 + static u32 avivo_get_post_div(struct radeon_pll *pll,
11656 +@@ -494,6 +499,11 @@ static u32 avivo_get_post_div(struct radeon_pll *pll,
11657 + post_div--;
11658 + }
11659 +
11660 ++ if (post_div > pll->max_post_div)
11661 ++ post_div = pll->max_post_div;
11662 ++ else if (post_div < pll->min_post_div)
11663 ++ post_div = pll->min_post_div;
11664 ++
11665 + return post_div;
11666 + }
11667 +
11668 +diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c
11669 +index 041943d..6a3d063 100644
11670 +--- a/drivers/gpu/drm/radeon/radeon_encoders.c
11671 ++++ b/drivers/gpu/drm/radeon/radeon_encoders.c
11672 +@@ -1465,11 +1465,21 @@ atombios_apply_encoder_quirks(struct drm_encoder *encoder,
11673 + }
11674 +
11675 + /* set scaler clears this on some chips */
11676 +- /* XXX check DCE4 */
11677 +- if (!(radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT))) {
11678 +- if (ASIC_IS_AVIVO(rdev) && (mode->flags & DRM_MODE_FLAG_INTERLACE))
11679 +- WREG32(AVIVO_D1MODE_DATA_FORMAT + radeon_crtc->crtc_offset,
11680 +- AVIVO_D1MODE_INTERLEAVE_EN);
11681 ++ if (ASIC_IS_AVIVO(rdev) &&
11682 ++ (!(radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT)))) {
11683 ++ if (ASIC_IS_DCE4(rdev)) {
11684 ++ if (mode->flags & DRM_MODE_FLAG_INTERLACE)
11685 ++ WREG32(EVERGREEN_DATA_FORMAT + radeon_crtc->crtc_offset,
11686 ++ EVERGREEN_INTERLEAVE_EN);
11687 ++ else
11688 ++ WREG32(EVERGREEN_DATA_FORMAT + radeon_crtc->crtc_offset, 0);
11689 ++ } else {
11690 ++ if (mode->flags & DRM_MODE_FLAG_INTERLACE)
11691 ++ WREG32(AVIVO_D1MODE_DATA_FORMAT + radeon_crtc->crtc_offset,
11692 ++ AVIVO_D1MODE_INTERLEAVE_EN);
11693 ++ else
11694 ++ WREG32(AVIVO_D1MODE_DATA_FORMAT + radeon_crtc->crtc_offset, 0);
11695 ++ }
11696 + }
11697 + }
11698 +
11699 +diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
11700 +index aa22570..2615e51 100644
11701 +--- a/drivers/gpu/drm/radeon/radeon_mode.h
11702 ++++ b/drivers/gpu/drm/radeon/radeon_mode.h
11703 +@@ -209,6 +209,7 @@ enum radeon_connector_table {
11704 + CT_EMAC,
11705 + CT_RN50_POWER,
11706 + CT_MAC_X800,
11707 ++ CT_MAC_G5_9600,
11708 + };
11709 +
11710 + enum radeon_dvo_chip {
11711 +diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
11712 +index 1272e4b..e5b2cf1 100644
11713 +--- a/drivers/gpu/drm/radeon/radeon_ttm.c
11714 ++++ b/drivers/gpu/drm/radeon/radeon_ttm.c
11715 +@@ -787,9 +787,9 @@ static int radeon_ttm_debugfs_init(struct radeon_device *rdev)
11716 + radeon_mem_types_list[i].show = &radeon_mm_dump_table;
11717 + radeon_mem_types_list[i].driver_features = 0;
11718 + if (i == 0)
11719 +- radeon_mem_types_list[i].data = &rdev->mman.bdev.man[TTM_PL_VRAM].priv;
11720 ++ radeon_mem_types_list[i].data = rdev->mman.bdev.man[TTM_PL_VRAM].priv;
11721 + else
11722 +- radeon_mem_types_list[i].data = &rdev->mman.bdev.man[TTM_PL_TT].priv;
11723 ++ radeon_mem_types_list[i].data = rdev->mman.bdev.man[TTM_PL_TT].priv;
11724 +
11725 + }
11726 + /* Add ttm page pool to debugfs */
11727 +diff --git a/drivers/hid/hid-cando.c b/drivers/hid/hid-cando.c
11728 +index 5925bdc..343c41b 100644
11729 +--- a/drivers/hid/hid-cando.c
11730 ++++ b/drivers/hid/hid-cando.c
11731 +@@ -236,6 +236,8 @@ static const struct hid_device_id cando_devices[] = {
11732 + { HID_USB_DEVICE(USB_VENDOR_ID_CANDO,
11733 + USB_DEVICE_ID_CANDO_MULTI_TOUCH) },
11734 + { HID_USB_DEVICE(USB_VENDOR_ID_CANDO,
11735 ++ USB_DEVICE_ID_CANDO_MULTI_TOUCH_10_1) },
11736 ++ { HID_USB_DEVICE(USB_VENDOR_ID_CANDO,
11737 + USB_DEVICE_ID_CANDO_MULTI_TOUCH_11_6) },
11738 + { HID_USB_DEVICE(USB_VENDOR_ID_CANDO,
11739 + USB_DEVICE_ID_CANDO_MULTI_TOUCH_15_6) },
11740 +diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
11741 +index 88cb04e..52cf7ca 100644
11742 +--- a/drivers/hid/hid-core.c
11743 ++++ b/drivers/hid/hid-core.c
11744 +@@ -1287,6 +1287,7 @@ static const struct hid_device_id hid_blacklist[] = {
11745 + { HID_USB_DEVICE(USB_VENDOR_ID_BTC, USB_DEVICE_ID_BTC_EMPREX_REMOTE) },
11746 + { HID_USB_DEVICE(USB_VENDOR_ID_BTC, USB_DEVICE_ID_BTC_EMPREX_REMOTE_2) },
11747 + { HID_USB_DEVICE(USB_VENDOR_ID_CANDO, USB_DEVICE_ID_CANDO_MULTI_TOUCH) },
11748 ++ { HID_USB_DEVICE(USB_VENDOR_ID_CANDO, USB_DEVICE_ID_CANDO_MULTI_TOUCH_10_1) },
11749 + { HID_USB_DEVICE(USB_VENDOR_ID_CANDO, USB_DEVICE_ID_CANDO_MULTI_TOUCH_11_6) },
11750 + { HID_USB_DEVICE(USB_VENDOR_ID_CANDO, USB_DEVICE_ID_CANDO_MULTI_TOUCH_15_6) },
11751 + { HID_USB_DEVICE(USB_VENDOR_ID_CHERRY, USB_DEVICE_ID_CHERRY_CYMOTION) },
11752 +@@ -1390,6 +1391,7 @@ static const struct hid_device_id hid_blacklist[] = {
11753 + { HID_USB_DEVICE(USB_VENDOR_ID_TOPSEED, USB_DEVICE_ID_TOPSEED_CYBERLINK) },
11754 + { HID_USB_DEVICE(USB_VENDOR_ID_TOPSEED2, USB_DEVICE_ID_TOPSEED2_RF_COMBO) },
11755 + { HID_USB_DEVICE(USB_VENDOR_ID_TWINHAN, USB_DEVICE_ID_TWINHAN_IR_REMOTE) },
11756 ++ { HID_USB_DEVICE(USB_VENDOR_ID_TURBOX, USB_DEVICE_ID_TURBOX_TOUCHSCREEN_MOSART) },
11757 + { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_PF1209) },
11758 + { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_WP4030U) },
11759 + { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_WP5540U) },
11760 +diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
11761 +index 3341baa..f04789d 100644
11762 +--- a/drivers/hid/hid-ids.h
11763 ++++ b/drivers/hid/hid-ids.h
11764 +@@ -135,6 +135,7 @@
11765 +
11766 + #define USB_VENDOR_ID_CANDO 0x2087
11767 + #define USB_DEVICE_ID_CANDO_MULTI_TOUCH 0x0a01
11768 ++#define USB_DEVICE_ID_CANDO_MULTI_TOUCH_10_1 0x0a02
11769 + #define USB_DEVICE_ID_CANDO_MULTI_TOUCH_11_6 0x0b03
11770 + #define USB_DEVICE_ID_CANDO_MULTI_TOUCH_15_6 0x0f01
11771 +
11772 +diff --git a/drivers/hid/hid-magicmouse.c b/drivers/hid/hid-magicmouse.c
11773 +index e6dc151..ed732b7 100644
11774 +--- a/drivers/hid/hid-magicmouse.c
11775 ++++ b/drivers/hid/hid-magicmouse.c
11776 +@@ -433,6 +433,11 @@ static int magicmouse_input_mapping(struct hid_device *hdev,
11777 + if (!msc->input)
11778 + msc->input = hi->input;
11779 +
11780 ++ /* Magic Trackpad does not give relative data after switching to MT */
11781 ++ if (hi->input->id.product == USB_DEVICE_ID_APPLE_MAGICTRACKPAD &&
11782 ++ field->flags & HID_MAIN_ITEM_RELATIVE)
11783 ++ return -1;
11784 ++
11785 + return 0;
11786 + }
11787 +
11788 +diff --git a/drivers/hid/hid-mosart.c b/drivers/hid/hid-mosart.c
11789 +index ac5421d..251eaa4 100644
11790 +--- a/drivers/hid/hid-mosart.c
11791 ++++ b/drivers/hid/hid-mosart.c
11792 +@@ -240,6 +240,7 @@ static void mosart_remove(struct hid_device *hdev)
11793 + static const struct hid_device_id mosart_devices[] = {
11794 + { HID_USB_DEVICE(USB_VENDOR_ID_ASUS, USB_DEVICE_ID_ASUS_T91MT) },
11795 + { HID_USB_DEVICE(USB_VENDOR_ID_ASUS, USB_DEVICE_ID_ASUSTEK_MULTITOUCH_YFO) },
11796 ++ { HID_USB_DEVICE(USB_VENDOR_ID_TURBOX, USB_DEVICE_ID_TURBOX_TOUCHSCREEN_MOSART) },
11797 + { }
11798 + };
11799 + MODULE_DEVICE_TABLE(hid, mosart_devices);
11800 +diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
11801 +index 2c18547..af34376 100644
11802 +--- a/drivers/hid/usbhid/hid-quirks.c
11803 ++++ b/drivers/hid/usbhid/hid-quirks.c
11804 +@@ -35,7 +35,6 @@ static const struct hid_blacklist {
11805 + { USB_VENDOR_ID_CHIC, USB_DEVICE_ID_CHIC_GAMEPAD, HID_QUIRK_BADPAD },
11806 + { USB_VENDOR_ID_DWAV, USB_DEVICE_ID_EGALAX_TOUCHCONTROLLER, HID_QUIRK_MULTI_INPUT | HID_QUIRK_NOGET },
11807 + { USB_VENDOR_ID_MOJO, USB_DEVICE_ID_RETRO_ADAPTER, HID_QUIRK_MULTI_INPUT },
11808 +- { USB_VENDOR_ID_TURBOX, USB_DEVICE_ID_TURBOX_TOUCHSCREEN_MOSART, HID_QUIRK_MULTI_INPUT },
11809 + { USB_VENDOR_ID_HAPP, USB_DEVICE_ID_UGCI_DRIVING, HID_QUIRK_BADPAD | HID_QUIRK_MULTI_INPUT },
11810 + { USB_VENDOR_ID_HAPP, USB_DEVICE_ID_UGCI_FLYING, HID_QUIRK_BADPAD | HID_QUIRK_MULTI_INPUT },
11811 + { USB_VENDOR_ID_HAPP, USB_DEVICE_ID_UGCI_FIGHTING, HID_QUIRK_BADPAD | HID_QUIRK_MULTI_INPUT },
11812 +diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
11813 +index a56f6ad..004ff55 100644
11814 +--- a/drivers/hwmon/Kconfig
11815 ++++ b/drivers/hwmon/Kconfig
11816 +@@ -238,13 +238,13 @@ config SENSORS_K8TEMP
11817 + will be called k8temp.
11818 +
11819 + config SENSORS_K10TEMP
11820 +- tristate "AMD Phenom/Sempron/Turion/Opteron temperature sensor"
11821 ++ tristate "AMD Family 10h/11h/12h/14h temperature sensor"
11822 + depends on X86 && PCI
11823 + help
11824 + If you say yes here you get support for the temperature
11825 + sensor(s) inside your CPU. Supported are later revisions of
11826 +- the AMD Family 10h and all revisions of the AMD Family 11h
11827 +- microarchitectures.
11828 ++ the AMD Family 10h and all revisions of the AMD Family 11h,
11829 ++ 12h (Llano), and 14h (Brazos) microarchitectures.
11830 +
11831 + This driver can also be built as a module. If so, the module
11832 + will be called k10temp.
11833 +@@ -445,13 +445,14 @@ config SENSORS_JZ4740
11834 + called jz4740-hwmon.
11835 +
11836 + config SENSORS_JC42
11837 +- tristate "JEDEC JC42.4 compliant temperature sensors"
11838 ++ tristate "JEDEC JC42.4 compliant memory module temperature sensors"
11839 + depends on I2C
11840 + help
11841 +- If you say yes here you get support for Jedec JC42.4 compliant
11842 +- temperature sensors. Support will include, but not be limited to,
11843 +- ADT7408, CAT34TS02,, CAT6095, MAX6604, MCP9805, MCP98242, MCP98243,
11844 +- MCP9843, SE97, SE98, STTS424, TSE2002B3, and TS3000B3.
11845 ++ If you say yes here, you get support for JEDEC JC42.4 compliant
11846 ++ temperature sensors, which are used on many DDR3 memory modules for
11847 ++ mobile devices and servers. Support will include, but not be limited
11848 ++ to, ADT7408, CAT34TS02, CAT6095, MAX6604, MCP9805, MCP98242, MCP98243,
11849 ++ MCP9843, SE97, SE98, STTS424(E), TSE2002B3, and TS3000B3.
11850 +
11851 + This driver can also be built as a module. If so, the module
11852 + will be called jc42.
11853 +@@ -564,7 +565,7 @@ config SENSORS_LM85
11854 + help
11855 + If you say yes here you get support for National Semiconductor LM85
11856 + sensor chips and clones: ADM1027, ADT7463, ADT7468, EMC6D100,
11857 +- EMC6D101 and EMC6D102.
11858 ++ EMC6D101, EMC6D102, and EMC6D103.
11859 +
11860 + This driver can also be built as a module. If so, the module
11861 + will be called lm85.
11862 +diff --git a/drivers/hwmon/jc42.c b/drivers/hwmon/jc42.c
11863 +index 340fc78..9349912 100644
11864 +--- a/drivers/hwmon/jc42.c
11865 ++++ b/drivers/hwmon/jc42.c
11866 +@@ -53,6 +53,8 @@ static const unsigned short normal_i2c[] = {
11867 +
11868 + /* Configuration register defines */
11869 + #define JC42_CFG_CRIT_ONLY (1 << 2)
11870 ++#define JC42_CFG_TCRIT_LOCK (1 << 6)
11871 ++#define JC42_CFG_EVENT_LOCK (1 << 7)
11872 + #define JC42_CFG_SHUTDOWN (1 << 8)
11873 + #define JC42_CFG_HYST_SHIFT 9
11874 + #define JC42_CFG_HYST_MASK 0x03
11875 +@@ -332,7 +334,7 @@ static ssize_t set_temp_crit_hyst(struct device *dev,
11876 + {
11877 + struct i2c_client *client = to_i2c_client(dev);
11878 + struct jc42_data *data = i2c_get_clientdata(client);
11879 +- long val;
11880 ++ unsigned long val;
11881 + int diff, hyst;
11882 + int err;
11883 + int ret = count;
11884 +@@ -380,14 +382,14 @@ static ssize_t show_alarm(struct device *dev,
11885 +
11886 + static DEVICE_ATTR(temp1_input, S_IRUGO,
11887 + show_temp_input, NULL);
11888 +-static DEVICE_ATTR(temp1_crit, S_IWUSR | S_IRUGO,
11889 ++static DEVICE_ATTR(temp1_crit, S_IRUGO,
11890 + show_temp_crit, set_temp_crit);
11891 +-static DEVICE_ATTR(temp1_min, S_IWUSR | S_IRUGO,
11892 ++static DEVICE_ATTR(temp1_min, S_IRUGO,
11893 + show_temp_min, set_temp_min);
11894 +-static DEVICE_ATTR(temp1_max, S_IWUSR | S_IRUGO,
11895 ++static DEVICE_ATTR(temp1_max, S_IRUGO,
11896 + show_temp_max, set_temp_max);
11897 +
11898 +-static DEVICE_ATTR(temp1_crit_hyst, S_IWUSR | S_IRUGO,
11899 ++static DEVICE_ATTR(temp1_crit_hyst, S_IRUGO,
11900 + show_temp_crit_hyst, set_temp_crit_hyst);
11901 + static DEVICE_ATTR(temp1_max_hyst, S_IRUGO,
11902 + show_temp_max_hyst, NULL);
11903 +@@ -412,8 +414,31 @@ static struct attribute *jc42_attributes[] = {
11904 + NULL
11905 + };
11906 +
11907 ++static mode_t jc42_attribute_mode(struct kobject *kobj,
11908 ++ struct attribute *attr, int index)
11909 ++{
11910 ++ struct device *dev = container_of(kobj, struct device, kobj);
11911 ++ struct i2c_client *client = to_i2c_client(dev);
11912 ++ struct jc42_data *data = i2c_get_clientdata(client);
11913 ++ unsigned int config = data->config;
11914 ++ bool readonly;
11915 ++
11916 ++ if (attr == &dev_attr_temp1_crit.attr)
11917 ++ readonly = config & JC42_CFG_TCRIT_LOCK;
11918 ++ else if (attr == &dev_attr_temp1_min.attr ||
11919 ++ attr == &dev_attr_temp1_max.attr)
11920 ++ readonly = config & JC42_CFG_EVENT_LOCK;
11921 ++ else if (attr == &dev_attr_temp1_crit_hyst.attr)
11922 ++ readonly = config & (JC42_CFG_EVENT_LOCK | JC42_CFG_TCRIT_LOCK);
11923 ++ else
11924 ++ readonly = true;
11925 ++
11926 ++ return S_IRUGO | (readonly ? 0 : S_IWUSR);
11927 ++}
11928 ++
11929 + static const struct attribute_group jc42_group = {
11930 + .attrs = jc42_attributes,
11931 ++ .is_visible = jc42_attribute_mode,
11932 + };
11933 +
11934 + /* Return 0 if detection is successful, -ENODEV otherwise */
11935 +diff --git a/drivers/hwmon/k10temp.c b/drivers/hwmon/k10temp.c
11936 +index da5a240..82bf65a 100644
11937 +--- a/drivers/hwmon/k10temp.c
11938 ++++ b/drivers/hwmon/k10temp.c
11939 +@@ -1,5 +1,5 @@
11940 + /*
11941 +- * k10temp.c - AMD Family 10h/11h processor hardware monitoring
11942 ++ * k10temp.c - AMD Family 10h/11h/12h/14h processor hardware monitoring
11943 + *
11944 + * Copyright (c) 2009 Clemens Ladisch <clemens@×××××××.de>
11945 + *
11946 +@@ -25,7 +25,7 @@
11947 + #include <linux/pci.h>
11948 + #include <asm/processor.h>
11949 +
11950 +-MODULE_DESCRIPTION("AMD Family 10h/11h CPU core temperature monitor");
11951 ++MODULE_DESCRIPTION("AMD Family 10h/11h/12h/14h CPU core temperature monitor");
11952 + MODULE_AUTHOR("Clemens Ladisch <clemens@×××××××.de>");
11953 + MODULE_LICENSE("GPL");
11954 +
11955 +@@ -208,6 +208,7 @@ static void __devexit k10temp_remove(struct pci_dev *pdev)
11956 + static const struct pci_device_id k10temp_id_table[] = {
11957 + { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_10H_NB_MISC) },
11958 + { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_11H_NB_MISC) },
11959 ++ { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_CNB17H_F3) },
11960 + {}
11961 + };
11962 + MODULE_DEVICE_TABLE(pci, k10temp_id_table);
11963 +diff --git a/drivers/hwmon/lm85.c b/drivers/hwmon/lm85.c
11964 +index 1e22984..d2cc286 100644
11965 +--- a/drivers/hwmon/lm85.c
11966 ++++ b/drivers/hwmon/lm85.c
11967 +@@ -41,7 +41,7 @@ static const unsigned short normal_i2c[] = { 0x2c, 0x2d, 0x2e, I2C_CLIENT_END };
11968 + enum chips {
11969 + any_chip, lm85b, lm85c,
11970 + adm1027, adt7463, adt7468,
11971 +- emc6d100, emc6d102
11972 ++ emc6d100, emc6d102, emc6d103
11973 + };
11974 +
11975 + /* The LM85 registers */
11976 +@@ -90,6 +90,9 @@ enum chips {
11977 + #define LM85_VERSTEP_EMC6D100_A0 0x60
11978 + #define LM85_VERSTEP_EMC6D100_A1 0x61
11979 + #define LM85_VERSTEP_EMC6D102 0x65
11980 ++#define LM85_VERSTEP_EMC6D103_A0 0x68
11981 ++#define LM85_VERSTEP_EMC6D103_A1 0x69
11982 ++#define LM85_VERSTEP_EMC6D103S 0x6A /* Also known as EMC6D103:A2 */
11983 +
11984 + #define LM85_REG_CONFIG 0x40
11985 +
11986 +@@ -348,6 +351,7 @@ static const struct i2c_device_id lm85_id[] = {
11987 + { "emc6d100", emc6d100 },
11988 + { "emc6d101", emc6d100 },
11989 + { "emc6d102", emc6d102 },
11990 ++ { "emc6d103", emc6d103 },
11991 + { }
11992 + };
11993 + MODULE_DEVICE_TABLE(i2c, lm85_id);
11994 +@@ -1250,6 +1254,20 @@ static int lm85_detect(struct i2c_client *client, struct i2c_board_info *info)
11995 + case LM85_VERSTEP_EMC6D102:
11996 + type_name = "emc6d102";
11997 + break;
11998 ++ case LM85_VERSTEP_EMC6D103_A0:
11999 ++ case LM85_VERSTEP_EMC6D103_A1:
12000 ++ type_name = "emc6d103";
12001 ++ break;
12002 ++ /*
12003 ++ * Registers apparently missing in EMC6D103S/EMC6D103:A2
12004 ++ * compared to EMC6D103:A0, EMC6D103:A1, and EMC6D102
12005 ++ * (according to the data sheets), but used unconditionally
12006 ++ * in the driver: 62[5:7], 6D[0:7], and 6E[0:7].
12007 ++ * So skip EMC6D103S for now.
12008 ++ case LM85_VERSTEP_EMC6D103S:
12009 ++ type_name = "emc6d103s";
12010 ++ break;
12011 ++ */
12012 + }
12013 + } else {
12014 + dev_dbg(&adapter->dev,
12015 +@@ -1283,6 +1301,7 @@ static int lm85_probe(struct i2c_client *client,
12016 + case adt7468:
12017 + case emc6d100:
12018 + case emc6d102:
12019 ++ case emc6d103:
12020 + data->freq_map = adm1027_freq_map;
12021 + break;
12022 + default:
12023 +@@ -1468,7 +1487,7 @@ static struct lm85_data *lm85_update_device(struct device *dev)
12024 + /* More alarm bits */
12025 + data->alarms |= lm85_read_value(client,
12026 + EMC6D100_REG_ALARM3) << 16;
12027 +- } else if (data->type == emc6d102) {
12028 ++ } else if (data->type == emc6d102 || data->type == emc6d103) {
12029 + /* Have to read LSB bits after the MSB ones because
12030 + the reading of the MSB bits has frozen the
12031 + LSBs (backward from the ADM1027).
12032 +diff --git a/drivers/media/dvb/ttpci/av7110_ca.c b/drivers/media/dvb/ttpci/av7110_ca.c
12033 +index 122c728..9fc1dd0 100644
12034 +--- a/drivers/media/dvb/ttpci/av7110_ca.c
12035 ++++ b/drivers/media/dvb/ttpci/av7110_ca.c
12036 +@@ -277,7 +277,7 @@ static int dvb_ca_ioctl(struct file *file, unsigned int cmd, void *parg)
12037 + {
12038 + ca_slot_info_t *info=(ca_slot_info_t *)parg;
12039 +
12040 +- if (info->num > 1)
12041 ++ if (info->num < 0 || info->num > 1)
12042 + return -EINVAL;
12043 + av7110->ci_slot[info->num].num = info->num;
12044 + av7110->ci_slot[info->num].type = FW_CI_LL_SUPPORT(av7110->arm_app) ?
12045 +diff --git a/drivers/media/radio/radio-aimslab.c b/drivers/media/radio/radio-aimslab.c
12046 +index 6cc5d13..4ce10db 100644
12047 +--- a/drivers/media/radio/radio-aimslab.c
12048 ++++ b/drivers/media/radio/radio-aimslab.c
12049 +@@ -31,6 +31,7 @@
12050 + #include <linux/module.h> /* Modules */
12051 + #include <linux/init.h> /* Initdata */
12052 + #include <linux/ioport.h> /* request_region */
12053 ++#include <linux/delay.h> /* msleep */
12054 + #include <linux/videodev2.h> /* kernel radio structs */
12055 + #include <linux/version.h> /* for KERNEL_VERSION MACRO */
12056 + #include <linux/io.h> /* outb, outb_p */
12057 +diff --git a/drivers/message/fusion/mptctl.c b/drivers/message/fusion/mptctl.c
12058 +index a3856ed..e8deb8e 100644
12059 +--- a/drivers/message/fusion/mptctl.c
12060 ++++ b/drivers/message/fusion/mptctl.c
12061 +@@ -597,6 +597,13 @@ mptctl_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply)
12062 + }
12063 +
12064 + static int
12065 ++mptctl_release(struct inode *inode, struct file *filep)
12066 ++{
12067 ++ fasync_helper(-1, filep, 0, &async_queue);
12068 ++ return 0;
12069 ++}
12070 ++
12071 ++static int
12072 + mptctl_fasync(int fd, struct file *filep, int mode)
12073 + {
12074 + MPT_ADAPTER *ioc;
12075 +@@ -2815,6 +2822,7 @@ static const struct file_operations mptctl_fops = {
12076 + .llseek = no_llseek,
12077 + .fasync = mptctl_fasync,
12078 + .unlocked_ioctl = mptctl_ioctl,
12079 ++ .release = mptctl_release,
12080 + #ifdef CONFIG_COMPAT
12081 + .compat_ioctl = compat_mpctl_ioctl,
12082 + #endif
12083 +diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c
12084 +index 59b8f53..0d9b82a 100644
12085 +--- a/drivers/message/fusion/mptscsih.c
12086 ++++ b/drivers/message/fusion/mptscsih.c
12087 +@@ -1873,8 +1873,9 @@ mptscsih_abort(struct scsi_cmnd * SCpnt)
12088 + }
12089 +
12090 + out:
12091 +- printk(MYIOC_s_INFO_FMT "task abort: %s (sc=%p)\n",
12092 +- ioc->name, ((retval == SUCCESS) ? "SUCCESS" : "FAILED"), SCpnt);
12093 ++ printk(MYIOC_s_INFO_FMT "task abort: %s (rv=%04x) (sc=%p) (sn=%ld)\n",
12094 ++ ioc->name, ((retval == SUCCESS) ? "SUCCESS" : "FAILED"), retval,
12095 ++ SCpnt, SCpnt->serial_number);
12096 +
12097 + return retval;
12098 + }
12099 +@@ -1911,7 +1912,7 @@ mptscsih_dev_reset(struct scsi_cmnd * SCpnt)
12100 +
12101 + vdevice = SCpnt->device->hostdata;
12102 + if (!vdevice || !vdevice->vtarget) {
12103 +- retval = SUCCESS;
12104 ++ retval = 0;
12105 + goto out;
12106 + }
12107 +
12108 +diff --git a/drivers/net/can/janz-ican3.c b/drivers/net/can/janz-ican3.c
12109 +index 6e533dc..e733f2c 100644
12110 +--- a/drivers/net/can/janz-ican3.c
12111 ++++ b/drivers/net/can/janz-ican3.c
12112 +@@ -1627,7 +1627,7 @@ static ssize_t ican3_sysfs_set_term(struct device *dev,
12113 + return count;
12114 + }
12115 +
12116 +-static DEVICE_ATTR(termination, S_IWUGO | S_IRUGO, ican3_sysfs_show_term,
12117 ++static DEVICE_ATTR(termination, S_IWUSR | S_IRUGO, ican3_sysfs_show_term,
12118 + ican3_sysfs_set_term);
12119 +
12120 + static struct attribute *ican3_sysfs_attrs[] = {
12121 +diff --git a/drivers/net/wireless/iwlwifi/iwl-3945.c b/drivers/net/wireless/iwlwifi/iwl-3945.c
12122 +index 176e525..e7fe35c 100644
12123 +--- a/drivers/net/wireless/iwlwifi/iwl-3945.c
12124 ++++ b/drivers/net/wireless/iwlwifi/iwl-3945.c
12125 +@@ -2727,7 +2727,6 @@ static struct iwl_lib_ops iwl3945_lib = {
12126 + .config_ap = iwl3945_config_ap,
12127 + .manage_ibss_station = iwl3945_manage_ibss_station,
12128 + .recover_from_tx_stall = iwl_bg_monitor_recover,
12129 +- .check_plcp_health = iwl3945_good_plcp_health,
12130 +
12131 + .debugfs_ops = {
12132 + .rx_stats_read = iwl3945_ucode_rx_stats_read,
12133 +diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
12134 +index 63d5042..1539d2c 100644
12135 +--- a/drivers/pci/pci-sysfs.c
12136 ++++ b/drivers/pci/pci-sysfs.c
12137 +@@ -23,6 +23,7 @@
12138 + #include <linux/mm.h>
12139 + #include <linux/fs.h>
12140 + #include <linux/capability.h>
12141 ++#include <linux/security.h>
12142 + #include <linux/pci-aspm.h>
12143 + #include <linux/slab.h>
12144 + #include "pci.h"
12145 +@@ -368,7 +369,7 @@ pci_read_config(struct file *filp, struct kobject *kobj,
12146 + u8 *data = (u8*) buf;
12147 +
12148 + /* Several chips lock up trying to read undefined config space */
12149 +- if (cap_raised(filp->f_cred->cap_effective, CAP_SYS_ADMIN)) {
12150 ++ if (security_capable(filp->f_cred, CAP_SYS_ADMIN) == 0) {
12151 + size = dev->cfg_size;
12152 + } else if (dev->hdr_type == PCI_HEADER_TYPE_CARDBUS) {
12153 + size = 128;
12154 +diff --git a/drivers/pcmcia/pcmcia_resource.c b/drivers/pcmcia/pcmcia_resource.c
12155 +index 0bdda5b..42fbf1a 100644
12156 +--- a/drivers/pcmcia/pcmcia_resource.c
12157 ++++ b/drivers/pcmcia/pcmcia_resource.c
12158 +@@ -518,6 +518,8 @@ int pcmcia_enable_device(struct pcmcia_device *p_dev)
12159 + flags |= CONF_ENABLE_IOCARD;
12160 + if (flags & CONF_ENABLE_IOCARD)
12161 + s->socket.flags |= SS_IOCARD;
12162 ++ if (flags & CONF_ENABLE_ZVCARD)
12163 ++ s->socket.flags |= SS_ZVCARD | SS_IOCARD;
12164 + if (flags & CONF_ENABLE_SPKR) {
12165 + s->socket.flags |= SS_SPKR_ENA;
12166 + status = CCSR_AUDIO_ENA;
12167 +diff --git a/drivers/platform/x86/acer-wmi.c b/drivers/platform/x86/acer-wmi.c
12168 +index c8c6537..196a59e 100644
12169 +--- a/drivers/platform/x86/acer-wmi.c
12170 ++++ b/drivers/platform/x86/acer-wmi.c
12171 +@@ -80,7 +80,7 @@ MODULE_LICENSE("GPL");
12172 + */
12173 + #define AMW0_GUID1 "67C3371D-95A3-4C37-BB61-DD47B491DAAB"
12174 + #define AMW0_GUID2 "431F16ED-0C2B-444C-B267-27DEB140CF9C"
12175 +-#define WMID_GUID1 "6AF4F258-B401-42fd-BE91-3D4AC2D7C0D3"
12176 ++#define WMID_GUID1 "6AF4F258-B401-42FD-BE91-3D4AC2D7C0D3"
12177 + #define WMID_GUID2 "95764E09-FB56-4e83-B31A-37761F60994A"
12178 +
12179 + MODULE_ALIAS("wmi:67C3371D-95A3-4C37-BB61-DD47B491DAAB");
12180 +@@ -1065,7 +1065,7 @@ static ssize_t set_bool_threeg(struct device *dev,
12181 + return -EINVAL;
12182 + return count;
12183 + }
12184 +-static DEVICE_ATTR(threeg, S_IWUGO | S_IRUGO | S_IWUSR, show_bool_threeg,
12185 ++static DEVICE_ATTR(threeg, S_IRUGO | S_IWUSR, show_bool_threeg,
12186 + set_bool_threeg);
12187 +
12188 + static ssize_t show_interface(struct device *dev, struct device_attribute *attr,
12189 +diff --git a/drivers/platform/x86/asus_acpi.c b/drivers/platform/x86/asus_acpi.c
12190 +index ca05aef..cfab7b1 100644
12191 +--- a/drivers/platform/x86/asus_acpi.c
12192 ++++ b/drivers/platform/x86/asus_acpi.c
12193 +@@ -1081,14 +1081,8 @@ static int asus_hotk_add_fs(struct acpi_device *device)
12194 + struct proc_dir_entry *proc;
12195 + mode_t mode;
12196 +
12197 +- /*
12198 +- * If parameter uid or gid is not changed, keep the default setting for
12199 +- * our proc entries (-rw-rw-rw-) else, it means we care about security,
12200 +- * and then set to -rw-rw----
12201 +- */
12202 +-
12203 + if ((asus_uid == 0) && (asus_gid == 0)) {
12204 +- mode = S_IFREG | S_IRUGO | S_IWUGO;
12205 ++ mode = S_IFREG | S_IRUGO | S_IWUSR | S_IWGRP;
12206 + } else {
12207 + mode = S_IFREG | S_IRUSR | S_IRGRP | S_IWUSR | S_IWGRP;
12208 + printk(KERN_WARNING " asus_uid and asus_gid parameters are "
12209 +diff --git a/drivers/platform/x86/tc1100-wmi.c b/drivers/platform/x86/tc1100-wmi.c
12210 +index 1fe0f1f..865ef78 100644
12211 +--- a/drivers/platform/x86/tc1100-wmi.c
12212 ++++ b/drivers/platform/x86/tc1100-wmi.c
12213 +@@ -162,7 +162,7 @@ set_bool_##value(struct device *dev, struct device_attribute *attr, \
12214 + return -EINVAL; \
12215 + return count; \
12216 + } \
12217 +-static DEVICE_ATTR(value, S_IWUGO | S_IRUGO | S_IWUSR, \
12218 ++static DEVICE_ATTR(value, S_IRUGO | S_IWUSR, \
12219 + show_bool_##value, set_bool_##value);
12220 +
12221 + show_set_bool(wireless, TC1100_INSTANCE_WIRELESS);
12222 +diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
12223 +index a8ec48e..9130699 100644
12224 +--- a/drivers/tty/vt/vt.c
12225 ++++ b/drivers/tty/vt/vt.c
12226 +@@ -3524,7 +3524,7 @@ int register_con_driver(const struct consw *csw, int first, int last)
12227 +
12228 + /* already registered */
12229 + if (con_driver->con == csw)
12230 +- retval = -EINVAL;
12231 ++ retval = -EBUSY;
12232 + }
12233 +
12234 + if (retval)
12235 +@@ -3635,7 +3635,12 @@ int take_over_console(const struct consw *csw, int first, int last, int deflt)
12236 + int err;
12237 +
12238 + err = register_con_driver(csw, first, last);
12239 +-
12240 ++ /* if we get an busy error we still want to bind the console driver
12241 ++ * and return success, as we may have unbound the console driver
12242 ++  * but not unregistered it.
12243 ++ */
12244 ++ if (err == -EBUSY)
12245 ++ err = 0;
12246 + if (!err)
12247 + bind_con_driver(csw, first, last, deflt);
12248 +
12249 +diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
12250 +index 31edd87..32d0ad2 100644
12251 +--- a/drivers/usb/core/hub.c
12252 ++++ b/drivers/usb/core/hub.c
12253 +@@ -2744,11 +2744,6 @@ hub_port_init (struct usb_hub *hub, struct usb_device *udev, int port1,
12254 + udev->ttport = hdev->ttport;
12255 + } else if (udev->speed != USB_SPEED_HIGH
12256 + && hdev->speed == USB_SPEED_HIGH) {
12257 +- if (!hub->tt.hub) {
12258 +- dev_err(&udev->dev, "parent hub has no TT\n");
12259 +- retval = -EINVAL;
12260 +- goto fail;
12261 +- }
12262 + udev->tt = &hub->tt;
12263 + udev->ttport = port1;
12264 + }
12265 +diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
12266 +index f87552a..866a1d5 100644
12267 +--- a/fs/btrfs/ioctl.c
12268 ++++ b/fs/btrfs/ioctl.c
12269 +@@ -2087,7 +2087,7 @@ long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg)
12270 + int num_types = 4;
12271 + int alloc_size;
12272 + int ret = 0;
12273 +- int slot_count = 0;
12274 ++ u64 slot_count = 0;
12275 + int i, c;
12276 +
12277 + if (copy_from_user(&space_args,
12278 +@@ -2126,7 +2126,7 @@ long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg)
12279 + goto out;
12280 + }
12281 +
12282 +- slot_count = min_t(int, space_args.space_slots, slot_count);
12283 ++ slot_count = min_t(u64, space_args.space_slots, slot_count);
12284 +
12285 + alloc_size = sizeof(*dest) * slot_count;
12286 +
12287 +@@ -2146,6 +2146,9 @@ long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg)
12288 + for (i = 0; i < num_types; i++) {
12289 + struct btrfs_space_info *tmp;
12290 +
12291 ++ if (!slot_count)
12292 ++ break;
12293 ++
12294 + info = NULL;
12295 + rcu_read_lock();
12296 + list_for_each_entry_rcu(tmp, &root->fs_info->space_info,
12297 +@@ -2167,7 +2170,10 @@ long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg)
12298 + memcpy(dest, &space, sizeof(space));
12299 + dest++;
12300 + space_args.total_spaces++;
12301 ++ slot_count--;
12302 + }
12303 ++ if (!slot_count)
12304 ++ break;
12305 + }
12306 + up_read(&info->groups_sem);
12307 + }
12308 +diff --git a/fs/cifs/netmisc.c b/fs/cifs/netmisc.c
12309 +index 9aad47a..23b741d 100644
12310 +--- a/fs/cifs/netmisc.c
12311 ++++ b/fs/cifs/netmisc.c
12312 +@@ -170,7 +170,7 @@ cifs_convert_address(struct sockaddr *dst, const char *src, int len)
12313 + {
12314 + int rc, alen, slen;
12315 + const char *pct;
12316 +- char *endp, scope_id[13];
12317 ++ char scope_id[13];
12318 + struct sockaddr_in *s4 = (struct sockaddr_in *) dst;
12319 + struct sockaddr_in6 *s6 = (struct sockaddr_in6 *) dst;
12320 +
12321 +@@ -197,9 +197,9 @@ cifs_convert_address(struct sockaddr *dst, const char *src, int len)
12322 + memcpy(scope_id, pct + 1, slen);
12323 + scope_id[slen] = '\0';
12324 +
12325 +- s6->sin6_scope_id = (u32) simple_strtoul(pct, &endp, 0);
12326 +- if (endp != scope_id + slen)
12327 +- return 0;
12328 ++ rc = strict_strtoul(scope_id, 0,
12329 ++ (unsigned long *)&s6->sin6_scope_id);
12330 ++ rc = (rc == 0) ? 1 : 0;
12331 + }
12332 +
12333 + return rc;
12334 +diff --git a/fs/cifs/sess.c b/fs/cifs/sess.c
12335 +index 7b01d3f..a91f8d2 100644
12336 +--- a/fs/cifs/sess.c
12337 ++++ b/fs/cifs/sess.c
12338 +@@ -667,13 +667,13 @@ ssetup_ntlmssp_authenticate:
12339 +
12340 + if (type == LANMAN) {
12341 + #ifdef CONFIG_CIFS_WEAK_PW_HASH
12342 +- char lnm_session_key[CIFS_SESS_KEY_SIZE];
12343 ++ char lnm_session_key[CIFS_AUTH_RESP_SIZE];
12344 +
12345 + pSMB->req.hdr.Flags2 &= ~SMBFLG2_UNICODE;
12346 +
12347 + /* no capabilities flags in old lanman negotiation */
12348 +
12349 +- pSMB->old_req.PasswordLength = cpu_to_le16(CIFS_SESS_KEY_SIZE);
12350 ++ pSMB->old_req.PasswordLength = cpu_to_le16(CIFS_AUTH_RESP_SIZE);
12351 +
12352 + /* Calculate hash with password and copy into bcc_ptr.
12353 + * Encryption Key (stored as in cryptkey) gets used if the
12354 +@@ -686,8 +686,8 @@ ssetup_ntlmssp_authenticate:
12355 + true : false, lnm_session_key);
12356 +
12357 + ses->flags |= CIFS_SES_LANMAN;
12358 +- memcpy(bcc_ptr, (char *)lnm_session_key, CIFS_SESS_KEY_SIZE);
12359 +- bcc_ptr += CIFS_SESS_KEY_SIZE;
12360 ++ memcpy(bcc_ptr, (char *)lnm_session_key, CIFS_AUTH_RESP_SIZE);
12361 ++ bcc_ptr += CIFS_AUTH_RESP_SIZE;
12362 +
12363 + /* can not sign if LANMAN negotiated so no need
12364 + to calculate signing key? but what if server
12365 +diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
12366 +index 9d1a22d..89edfe3 100644
12367 +--- a/fs/ecryptfs/inode.c
12368 ++++ b/fs/ecryptfs/inode.c
12369 +@@ -1095,6 +1095,8 @@ int ecryptfs_getattr(struct vfsmount *mnt, struct dentry *dentry,
12370 + rc = vfs_getattr(ecryptfs_dentry_to_lower_mnt(dentry),
12371 + ecryptfs_dentry_to_lower(dentry), &lower_stat);
12372 + if (!rc) {
12373 ++ fsstack_copy_attr_all(dentry->d_inode,
12374 ++ ecryptfs_inode_to_lower(dentry->d_inode));
12375 + generic_fillattr(dentry->d_inode, stat);
12376 + stat->blocks = lower_stat.blocks;
12377 + }
12378 +diff --git a/fs/file_table.c b/fs/file_table.c
12379 +index c3dee38..079940a 100644
12380 +--- a/fs/file_table.c
12381 ++++ b/fs/file_table.c
12382 +@@ -125,13 +125,13 @@ struct file *get_empty_filp(void)
12383 + goto fail;
12384 +
12385 + percpu_counter_inc(&nr_files);
12386 ++ f->f_cred = get_cred(cred);
12387 + if (security_file_alloc(f))
12388 + goto fail_sec;
12389 +
12390 + INIT_LIST_HEAD(&f->f_u.fu_list);
12391 + atomic_long_set(&f->f_count, 1);
12392 + rwlock_init(&f->f_owner.lock);
12393 +- f->f_cred = get_cred(cred);
12394 + spin_lock_init(&f->f_lock);
12395 + eventpoll_init_file(f);
12396 + /* f->f_version: 0 */
12397 +diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
12398 +index f35a94a..67080e4 100644
12399 +--- a/fs/nfsd/nfs4xdr.c
12400 ++++ b/fs/nfsd/nfs4xdr.c
12401 +@@ -316,8 +316,8 @@ nfsd4_decode_fattr(struct nfsd4_compoundargs *argp, u32 *bmval,
12402 + READ_BUF(dummy32);
12403 + len += (XDR_QUADLEN(dummy32) << 2);
12404 + READMEM(buf, dummy32);
12405 +- if ((host_err = nfsd_map_name_to_uid(argp->rqstp, buf, dummy32, &iattr->ia_uid)))
12406 +- goto out_nfserr;
12407 ++ if ((status = nfsd_map_name_to_uid(argp->rqstp, buf, dummy32, &iattr->ia_uid)))
12408 ++ return status;
12409 + iattr->ia_valid |= ATTR_UID;
12410 + }
12411 + if (bmval[1] & FATTR4_WORD1_OWNER_GROUP) {
12412 +@@ -327,8 +327,8 @@ nfsd4_decode_fattr(struct nfsd4_compoundargs *argp, u32 *bmval,
12413 + READ_BUF(dummy32);
12414 + len += (XDR_QUADLEN(dummy32) << 2);
12415 + READMEM(buf, dummy32);
12416 +- if ((host_err = nfsd_map_name_to_gid(argp->rqstp, buf, dummy32, &iattr->ia_gid)))
12417 +- goto out_nfserr;
12418 ++ if ((status = nfsd_map_name_to_gid(argp->rqstp, buf, dummy32, &iattr->ia_gid)))
12419 ++ return status;
12420 + iattr->ia_valid |= ATTR_GID;
12421 + }
12422 + if (bmval[1] & FATTR4_WORD1_TIME_ACCESS_SET) {
12423 +diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
12424 +index 184938f..f1b0951 100644
12425 +--- a/fs/nfsd/vfs.c
12426 ++++ b/fs/nfsd/vfs.c
12427 +@@ -809,7 +809,7 @@ nfsd_get_raparms(dev_t dev, ino_t ino)
12428 + if (ra->p_count == 0)
12429 + frap = rap;
12430 + }
12431 +- depth = nfsdstats.ra_size*11/10;
12432 ++ depth = nfsdstats.ra_size;
12433 + if (!frap) {
12434 + spin_unlock(&rab->pb_lock);
12435 + return NULL;
12436 +diff --git a/fs/partitions/mac.c b/fs/partitions/mac.c
12437 +index 68d6a21..11f688b 100644
12438 +--- a/fs/partitions/mac.c
12439 ++++ b/fs/partitions/mac.c
12440 +@@ -29,10 +29,9 @@ static inline void mac_fix_string(char *stg, int len)
12441 +
12442 + int mac_partition(struct parsed_partitions *state)
12443 + {
12444 +- int slot = 1;
12445 + Sector sect;
12446 + unsigned char *data;
12447 +- int blk, blocks_in_map;
12448 ++ int slot, blocks_in_map;
12449 + unsigned secsize;
12450 + #ifdef CONFIG_PPC_PMAC
12451 + int found_root = 0;
12452 +@@ -59,10 +58,14 @@ int mac_partition(struct parsed_partitions *state)
12453 + put_dev_sector(sect);
12454 + return 0; /* not a MacOS disk */
12455 + }
12456 +- strlcat(state->pp_buf, " [mac]", PAGE_SIZE);
12457 + blocks_in_map = be32_to_cpu(part->map_count);
12458 +- for (blk = 1; blk <= blocks_in_map; ++blk) {
12459 +- int pos = blk * secsize;
12460 ++ if (blocks_in_map < 0 || blocks_in_map >= DISK_MAX_PARTS) {
12461 ++ put_dev_sector(sect);
12462 ++ return 0;
12463 ++ }
12464 ++ strlcat(state->pp_buf, " [mac]", PAGE_SIZE);
12465 ++ for (slot = 1; slot <= blocks_in_map; ++slot) {
12466 ++ int pos = slot * secsize;
12467 + put_dev_sector(sect);
12468 + data = read_part_sector(state, pos/512, &sect);
12469 + if (!data)
12470 +@@ -113,13 +116,11 @@ int mac_partition(struct parsed_partitions *state)
12471 + }
12472 +
12473 + if (goodness > found_root_goodness) {
12474 +- found_root = blk;
12475 ++ found_root = slot;
12476 + found_root_goodness = goodness;
12477 + }
12478 + }
12479 + #endif /* CONFIG_PPC_PMAC */
12480 +-
12481 +- ++slot;
12482 + }
12483 + #ifdef CONFIG_PPC_PMAC
12484 + if (found_root_goodness)
12485 +diff --git a/fs/proc/array.c b/fs/proc/array.c
12486 +index fff6572..3d88fe1 100644
12487 +--- a/fs/proc/array.c
12488 ++++ b/fs/proc/array.c
12489 +@@ -353,9 +353,6 @@ int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
12490 + task_cap(m, task);
12491 + task_cpus_allowed(m, task);
12492 + cpuset_task_status_allowed(m, task);
12493 +-#if defined(CONFIG_S390)
12494 +- task_show_regs(m, task);
12495 +-#endif
12496 + task_context_switch_counts(m, task);
12497 + return 0;
12498 + }
12499 +diff --git a/fs/xfs/quota/xfs_qm.c b/fs/xfs/quota/xfs_qm.c
12500 +index f8e854b..206a281 100644
12501 +--- a/fs/xfs/quota/xfs_qm.c
12502 ++++ b/fs/xfs/quota/xfs_qm.c
12503 +@@ -1863,12 +1863,14 @@ xfs_qm_dqreclaim_one(void)
12504 + xfs_dquot_t *dqpout;
12505 + xfs_dquot_t *dqp;
12506 + int restarts;
12507 ++ int startagain;
12508 +
12509 + restarts = 0;
12510 + dqpout = NULL;
12511 +
12512 + /* lockorder: hashchainlock, freelistlock, mplistlock, dqlock, dqflock */
12513 +-startagain:
12514 ++again:
12515 ++ startagain = 0;
12516 + mutex_lock(&xfs_Gqm->qm_dqfrlist_lock);
12517 +
12518 + list_for_each_entry(dqp, &xfs_Gqm->qm_dqfrlist, q_freelist) {
12519 +@@ -1885,13 +1887,10 @@ startagain:
12520 + ASSERT(! (dqp->dq_flags & XFS_DQ_INACTIVE));
12521 +
12522 + trace_xfs_dqreclaim_want(dqp);
12523 +-
12524 +- xfs_dqunlock(dqp);
12525 +- mutex_unlock(&xfs_Gqm->qm_dqfrlist_lock);
12526 +- if (++restarts >= XFS_QM_RECLAIM_MAX_RESTARTS)
12527 +- return NULL;
12528 + XQM_STATS_INC(xqmstats.xs_qm_dqwants);
12529 +- goto startagain;
12530 ++ restarts++;
12531 ++ startagain = 1;
12532 ++ goto dqunlock;
12533 + }
12534 +
12535 + /*
12536 +@@ -1906,23 +1905,20 @@ startagain:
12537 + ASSERT(list_empty(&dqp->q_mplist));
12538 + list_del_init(&dqp->q_freelist);
12539 + xfs_Gqm->qm_dqfrlist_cnt--;
12540 +- xfs_dqunlock(dqp);
12541 + dqpout = dqp;
12542 + XQM_STATS_INC(xqmstats.xs_qm_dqinact_reclaims);
12543 +- break;
12544 ++ goto dqunlock;
12545 + }
12546 +
12547 + ASSERT(dqp->q_hash);
12548 + ASSERT(!list_empty(&dqp->q_mplist));
12549 +
12550 + /*
12551 +- * Try to grab the flush lock. If this dquot is in the process of
12552 +- * getting flushed to disk, we don't want to reclaim it.
12553 ++ * Try to grab the flush lock. If this dquot is in the process
12554 ++ * of getting flushed to disk, we don't want to reclaim it.
12555 + */
12556 +- if (!xfs_dqflock_nowait(dqp)) {
12557 +- xfs_dqunlock(dqp);
12558 +- continue;
12559 +- }
12560 ++ if (!xfs_dqflock_nowait(dqp))
12561 ++ goto dqunlock;
12562 +
12563 + /*
12564 + * We have the flush lock so we know that this is not in the
12565 +@@ -1944,8 +1940,7 @@ startagain:
12566 + xfs_fs_cmn_err(CE_WARN, mp,
12567 + "xfs_qm_dqreclaim: dquot %p flush failed", dqp);
12568 + }
12569 +- xfs_dqunlock(dqp); /* dqflush unlocks dqflock */
12570 +- continue;
12571 ++ goto dqunlock;
12572 + }
12573 +
12574 + /*
12575 +@@ -1967,13 +1962,8 @@ startagain:
12576 + */
12577 + if (!mutex_trylock(&mp->m_quotainfo->qi_dqlist_lock)) {
12578 + restarts++;
12579 +- mutex_unlock(&dqp->q_hash->qh_lock);
12580 +- xfs_dqfunlock(dqp);
12581 +- xfs_dqunlock(dqp);
12582 +- mutex_unlock(&xfs_Gqm->qm_dqfrlist_lock);
12583 +- if (restarts++ >= XFS_QM_RECLAIM_MAX_RESTARTS)
12584 +- return NULL;
12585 +- goto startagain;
12586 ++ startagain = 1;
12587 ++ goto qhunlock;
12588 + }
12589 +
12590 + ASSERT(dqp->q_nrefs == 0);
12591 +@@ -1986,14 +1976,20 @@ startagain:
12592 + xfs_Gqm->qm_dqfrlist_cnt--;
12593 + dqpout = dqp;
12594 + mutex_unlock(&mp->m_quotainfo->qi_dqlist_lock);
12595 ++qhunlock:
12596 + mutex_unlock(&dqp->q_hash->qh_lock);
12597 + dqfunlock:
12598 + xfs_dqfunlock(dqp);
12599 ++dqunlock:
12600 + xfs_dqunlock(dqp);
12601 + if (dqpout)
12602 + break;
12603 + if (restarts >= XFS_QM_RECLAIM_MAX_RESTARTS)
12604 +- return NULL;
12605 ++ break;
12606 ++ if (startagain) {
12607 ++ mutex_unlock(&xfs_Gqm->qm_dqfrlist_lock);
12608 ++ goto again;
12609 ++ }
12610 + }
12611 + mutex_unlock(&xfs_Gqm->qm_dqfrlist_lock);
12612 + return dqpout;
12613 +diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
12614 +index cb845c1..dd7d4e2 100644
12615 +--- a/include/linux/pci_ids.h
12616 ++++ b/include/linux/pci_ids.h
12617 +@@ -518,6 +518,7 @@
12618 + #define PCI_DEVICE_ID_AMD_11H_NB_MISC 0x1303
12619 + #define PCI_DEVICE_ID_AMD_11H_NB_LINK 0x1304
12620 + #define PCI_DEVICE_ID_AMD_15H_NB_MISC 0x1603
12621 ++#define PCI_DEVICE_ID_AMD_CNB17H_F3 0x1703
12622 + #define PCI_DEVICE_ID_AMD_LANCE 0x2000
12623 + #define PCI_DEVICE_ID_AMD_LANCE_HOME 0x2001
12624 + #define PCI_DEVICE_ID_AMD_SCSI 0x2020
12625 +diff --git a/include/linux/security.h b/include/linux/security.h
12626 +index d47a4c2..b3f2f47 100644
12627 +--- a/include/linux/security.h
12628 ++++ b/include/linux/security.h
12629 +@@ -1664,7 +1664,7 @@ int security_capset(struct cred *new, const struct cred *old,
12630 + const kernel_cap_t *effective,
12631 + const kernel_cap_t *inheritable,
12632 + const kernel_cap_t *permitted);
12633 +-int security_capable(int cap);
12634 ++int security_capable(const struct cred *cred, int cap);
12635 + int security_real_capable(struct task_struct *tsk, int cap);
12636 + int security_real_capable_noaudit(struct task_struct *tsk, int cap);
12637 + int security_sysctl(struct ctl_table *table, int op);
12638 +@@ -1857,9 +1857,9 @@ static inline int security_capset(struct cred *new,
12639 + return cap_capset(new, old, effective, inheritable, permitted);
12640 + }
12641 +
12642 +-static inline int security_capable(int cap)
12643 ++static inline int security_capable(const struct cred *cred, int cap)
12644 + {
12645 +- return cap_capable(current, current_cred(), cap, SECURITY_CAP_AUDIT);
12646 ++ return cap_capable(current, cred, cap, SECURITY_CAP_AUDIT);
12647 + }
12648 +
12649 + static inline int security_real_capable(struct task_struct *tsk, int cap)
12650 +diff --git a/include/pcmcia/ds.h b/include/pcmcia/ds.h
12651 +index 8479b66..3fd5064 100644
12652 +--- a/include/pcmcia/ds.h
12653 ++++ b/include/pcmcia/ds.h
12654 +@@ -261,6 +261,7 @@ void pcmcia_disable_device(struct pcmcia_device *p_dev);
12655 + #define CONF_ENABLE_ESR 0x0008
12656 + #define CONF_ENABLE_IOCARD 0x0010 /* auto-enabled if IO resources or IRQ
12657 + * (CONF_ENABLE_IRQ) in use */
12658 ++#define CONF_ENABLE_ZVCARD 0x0020
12659 +
12660 + /* flags used by pcmcia_loop_config() autoconfiguration */
12661 + #define CONF_AUTO_CHECK_VCC 0x0100 /* check for matching Vcc? */
12662 +diff --git a/kernel/capability.c b/kernel/capability.c
12663 +index 2f05303..9e9385f 100644
12664 +--- a/kernel/capability.c
12665 ++++ b/kernel/capability.c
12666 +@@ -306,7 +306,7 @@ int capable(int cap)
12667 + BUG();
12668 + }
12669 +
12670 +- if (security_capable(cap) == 0) {
12671 ++ if (security_capable(current_cred(), cap) == 0) {
12672 + current->flags |= PF_SUPERPRIV;
12673 + return 1;
12674 + }
12675 +diff --git a/kernel/cred.c b/kernel/cred.c
12676 +index 6a1aa00..3a9d6dd 100644
12677 +--- a/kernel/cred.c
12678 ++++ b/kernel/cred.c
12679 +@@ -252,13 +252,13 @@ struct cred *cred_alloc_blank(void)
12680 + #endif
12681 +
12682 + atomic_set(&new->usage, 1);
12683 ++#ifdef CONFIG_DEBUG_CREDENTIALS
12684 ++ new->magic = CRED_MAGIC;
12685 ++#endif
12686 +
12687 + if (security_cred_alloc_blank(new, GFP_KERNEL) < 0)
12688 + goto error;
12689 +
12690 +-#ifdef CONFIG_DEBUG_CREDENTIALS
12691 +- new->magic = CRED_MAGIC;
12692 +-#endif
12693 + return new;
12694 +
12695 + error:
12696 +@@ -657,6 +657,8 @@ struct cred *prepare_kernel_cred(struct task_struct *daemon)
12697 + validate_creds(old);
12698 +
12699 + *new = *old;
12700 ++ atomic_set(&new->usage, 1);
12701 ++ set_cred_subscribers(new, 0);
12702 + get_uid(new->user);
12703 + get_group_info(new->group_info);
12704 +
12705 +@@ -674,8 +676,6 @@ struct cred *prepare_kernel_cred(struct task_struct *daemon)
12706 + if (security_prepare_creds(new, old, GFP_KERNEL) < 0)
12707 + goto error;
12708 +
12709 +- atomic_set(&new->usage, 1);
12710 +- set_cred_subscribers(new, 0);
12711 + put_cred(old);
12712 + validate_creds(new);
12713 + return new;
12714 +@@ -748,7 +748,11 @@ bool creds_are_invalid(const struct cred *cred)
12715 + if (cred->magic != CRED_MAGIC)
12716 + return true;
12717 + #ifdef CONFIG_SECURITY_SELINUX
12718 +- if (selinux_is_enabled()) {
12719 ++ /*
12720 ++ * cred->security == NULL if security_cred_alloc_blank() or
12721 ++ * security_prepare_creds() returned an error.
12722 ++ */
12723 ++ if (selinux_is_enabled() && cred->security) {
12724 + if ((unsigned long) cred->security < PAGE_SIZE)
12725 + return true;
12726 + if ((*(u32 *)cred->security & 0xffffff00) ==
12727 +diff --git a/kernel/irq/internals.h b/kernel/irq/internals.h
12728 +index 4571ae7..99c3bc8 100644
12729 +--- a/kernel/irq/internals.h
12730 ++++ b/kernel/irq/internals.h
12731 +@@ -3,6 +3,12 @@
12732 + */
12733 + #include <linux/irqdesc.h>
12734 +
12735 ++#ifdef CONFIG_SPARSE_IRQ
12736 ++# define IRQ_BITMAP_BITS (NR_IRQS + 8196)
12737 ++#else
12738 ++# define IRQ_BITMAP_BITS NR_IRQS
12739 ++#endif
12740 ++
12741 + extern int noirqdebug;
12742 +
12743 + #define irq_data_to_desc(data) container_of(data, struct irq_desc, irq_data)
12744 +diff --git a/kernel/irq/irqdesc.c b/kernel/irq/irqdesc.c
12745 +index 9988d03..49b8394 100644
12746 +--- a/kernel/irq/irqdesc.c
12747 ++++ b/kernel/irq/irqdesc.c
12748 +@@ -91,7 +91,7 @@ int nr_irqs = NR_IRQS;
12749 + EXPORT_SYMBOL_GPL(nr_irqs);
12750 +
12751 + static DEFINE_MUTEX(sparse_irq_lock);
12752 +-static DECLARE_BITMAP(allocated_irqs, NR_IRQS);
12753 ++static DECLARE_BITMAP(allocated_irqs, IRQ_BITMAP_BITS);
12754 +
12755 + #ifdef CONFIG_SPARSE_IRQ
12756 +
12757 +@@ -215,6 +215,15 @@ int __init early_irq_init(void)
12758 + initcnt = arch_probe_nr_irqs();
12759 + printk(KERN_INFO "NR_IRQS:%d nr_irqs:%d %d\n", NR_IRQS, nr_irqs, initcnt);
12760 +
12761 ++ if (WARN_ON(nr_irqs > IRQ_BITMAP_BITS))
12762 ++ nr_irqs = IRQ_BITMAP_BITS;
12763 ++
12764 ++ if (WARN_ON(initcnt > IRQ_BITMAP_BITS))
12765 ++ initcnt = IRQ_BITMAP_BITS;
12766 ++
12767 ++ if (initcnt > nr_irqs)
12768 ++ nr_irqs = initcnt;
12769 ++
12770 + for (i = 0; i < initcnt; i++) {
12771 + desc = alloc_desc(i, node);
12772 + set_bit(i, allocated_irqs);
12773 +diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
12774 +index 5f92acc5..6f7c114 100644
12775 +--- a/kernel/irq/manage.c
12776 ++++ b/kernel/irq/manage.c
12777 +@@ -1098,7 +1098,7 @@ int request_threaded_irq(unsigned int irq, irq_handler_t handler,
12778 + if (retval)
12779 + kfree(action);
12780 +
12781 +-#ifdef CONFIG_DEBUG_SHIRQ
12782 ++#ifdef CONFIG_DEBUG_SHIRQ_FIXME
12783 + if (!retval && (irqflags & IRQF_SHARED)) {
12784 + /*
12785 + * It's a shared IRQ -- the driver ought to be prepared for it
12786 +diff --git a/kernel/irq/resend.c b/kernel/irq/resend.c
12787 +index 891115a..dc49358 100644
12788 +--- a/kernel/irq/resend.c
12789 ++++ b/kernel/irq/resend.c
12790 +@@ -23,7 +23,7 @@
12791 + #ifdef CONFIG_HARDIRQS_SW_RESEND
12792 +
12793 + /* Bitmap to handle software resend of interrupts: */
12794 +-static DECLARE_BITMAP(irqs_resend, NR_IRQS);
12795 ++static DECLARE_BITMAP(irqs_resend, IRQ_BITMAP_BITS);
12796 +
12797 + /*
12798 + * Run software resends of IRQ's
12799 +diff --git a/kernel/perf_event.c b/kernel/perf_event.c
12800 +index 64668bd..785c66a 100644
12801 +--- a/kernel/perf_event.c
12802 ++++ b/kernel/perf_event.c
12803 +@@ -652,6 +652,10 @@ retry:
12804 + raw_spin_unlock_irq(&ctx->lock);
12805 + }
12806 +
12807 ++#define MAX_INTERRUPTS (~0ULL)
12808 ++
12809 ++static void perf_log_throttle(struct perf_event *event, int enable);
12810 ++
12811 + static int
12812 + event_sched_in(struct perf_event *event,
12813 + struct perf_cpu_context *cpuctx,
12814 +@@ -662,6 +666,17 @@ event_sched_in(struct perf_event *event,
12815 +
12816 + event->state = PERF_EVENT_STATE_ACTIVE;
12817 + event->oncpu = smp_processor_id();
12818 ++
12819 ++ /*
12820 ++ * Unthrottle events, since we scheduled we might have missed several
12821 ++ * ticks already, also for a heavily scheduling task there is little
12822 ++ * guarantee it'll get a tick in a timely manner.
12823 ++ */
12824 ++ if (unlikely(event->hw.interrupts == MAX_INTERRUPTS)) {
12825 ++ perf_log_throttle(event, 1);
12826 ++ event->hw.interrupts = 0;
12827 ++ }
12828 ++
12829 + /*
12830 + * The new state must be visible before we turn it on in the hardware:
12831 + */
12832 +@@ -1469,10 +1484,6 @@ void __perf_event_task_sched_in(struct task_struct *task)
12833 + }
12834 + }
12835 +
12836 +-#define MAX_INTERRUPTS (~0ULL)
12837 +-
12838 +-static void perf_log_throttle(struct perf_event *event, int enable);
12839 +-
12840 + static u64 perf_calculate_period(struct perf_event *event, u64 nsec, u64 count)
12841 + {
12842 + u64 frequency = event->attr.sample_freq;
12843 +diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c
12844 +index 0dac75e..64db648 100644
12845 +--- a/kernel/power/snapshot.c
12846 ++++ b/kernel/power/snapshot.c
12847 +@@ -1519,11 +1519,8 @@ static int
12848 + swsusp_alloc(struct memory_bitmap *orig_bm, struct memory_bitmap *copy_bm,
12849 + unsigned int nr_pages, unsigned int nr_highmem)
12850 + {
12851 +- int error = 0;
12852 +-
12853 + if (nr_highmem > 0) {
12854 +- error = get_highmem_buffer(PG_ANY);
12855 +- if (error)
12856 ++ if (get_highmem_buffer(PG_ANY))
12857 + goto err_out;
12858 + if (nr_highmem > alloc_highmem) {
12859 + nr_highmem -= alloc_highmem;
12860 +@@ -1546,7 +1543,7 @@ swsusp_alloc(struct memory_bitmap *orig_bm, struct memory_bitmap *copy_bm,
12861 +
12862 + err_out:
12863 + swsusp_free();
12864 +- return error;
12865 ++ return -ENOMEM;
12866 + }
12867 +
12868 + asmlinkage int swsusp_save(void)
12869 +diff --git a/kernel/workqueue.c b/kernel/workqueue.c
12870 +index 4be7fa5..c95f321 100644
12871 +--- a/kernel/workqueue.c
12872 ++++ b/kernel/workqueue.c
12873 +@@ -79,7 +79,9 @@ enum {
12874 + MAX_IDLE_WORKERS_RATIO = 4, /* 1/4 of busy can be idle */
12875 + IDLE_WORKER_TIMEOUT = 300 * HZ, /* keep idle ones for 5 mins */
12876 +
12877 +- MAYDAY_INITIAL_TIMEOUT = HZ / 100, /* call for help after 10ms */
12878 ++ MAYDAY_INITIAL_TIMEOUT = HZ / 100 >= 2 ? HZ / 100 : 2,
12879 ++ /* call for help after 10ms
12880 ++ (min two ticks) */
12881 + MAYDAY_INTERVAL = HZ / 10, /* and then every 100ms */
12882 + CREATE_COOLDOWN = HZ, /* time to breath after fail */
12883 + TRUSTEE_COOLDOWN = HZ / 10, /* for trustee draining */
12884 +@@ -2009,6 +2011,15 @@ repeat:
12885 + move_linked_works(work, scheduled, &n);
12886 +
12887 + process_scheduled_works(rescuer);
12888 ++
12889 ++ /*
12890 ++ * Leave this gcwq. If keep_working() is %true, notify a
12891 ++ * regular worker; otherwise, we end up with 0 concurrency
12892 ++ * and stalling the execution.
12893 ++ */
12894 ++ if (keep_working(gcwq))
12895 ++ wake_up_worker(gcwq);
12896 ++
12897 + spin_unlock_irq(&gcwq->lock);
12898 + }
12899 +
12900 +diff --git a/net/core/dev.c b/net/core/dev.c
12901 +index 0dd54a6..c0d3b5f 100644
12902 +--- a/net/core/dev.c
12903 ++++ b/net/core/dev.c
12904 +@@ -4945,6 +4945,7 @@ static void rollback_registered(struct net_device *dev)
12905 +
12906 + list_add(&dev->unreg_list, &single);
12907 + rollback_registered_many(&single);
12908 ++ list_del(&single);
12909 + }
12910 +
12911 + unsigned long netdev_fix_features(unsigned long features, const char *name)
12912 +@@ -6114,6 +6115,7 @@ static void __net_exit default_device_exit_batch(struct list_head *net_list)
12913 + }
12914 + }
12915 + unregister_netdevice_many(&dev_kill_list);
12916 ++ list_del(&dev_kill_list);
12917 + rtnl_unlock();
12918 + }
12919 +
12920 +diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
12921 +index b729ace..742a6dc 100644
12922 +--- a/net/netfilter/nf_conntrack_netlink.c
12923 ++++ b/net/netfilter/nf_conntrack_netlink.c
12924 +@@ -642,30 +642,29 @@ ctnetlink_dump_table(struct sk_buff *skb, struct netlink_callback *cb)
12925 + struct nfgenmsg *nfmsg = nlmsg_data(cb->nlh);
12926 + u_int8_t l3proto = nfmsg->nfgen_family;
12927 +
12928 +- rcu_read_lock();
12929 ++ spin_lock_bh(&nf_conntrack_lock);
12930 + last = (struct nf_conn *)cb->args[1];
12931 + for (; cb->args[0] < net->ct.htable_size; cb->args[0]++) {
12932 + restart:
12933 +- hlist_nulls_for_each_entry_rcu(h, n, &net->ct.hash[cb->args[0]],
12934 ++ hlist_nulls_for_each_entry(h, n, &net->ct.hash[cb->args[0]],
12935 + hnnode) {
12936 + if (NF_CT_DIRECTION(h) != IP_CT_DIR_ORIGINAL)
12937 + continue;
12938 + ct = nf_ct_tuplehash_to_ctrack(h);
12939 +- if (!atomic_inc_not_zero(&ct->ct_general.use))
12940 +- continue;
12941 + /* Dump entries of a given L3 protocol number.
12942 + * If it is not specified, ie. l3proto == 0,
12943 + * then dump everything. */
12944 + if (l3proto && nf_ct_l3num(ct) != l3proto)
12945 +- goto releasect;
12946 ++ continue;
12947 + if (cb->args[1]) {
12948 + if (ct != last)
12949 +- goto releasect;
12950 ++ continue;
12951 + cb->args[1] = 0;
12952 + }
12953 + if (ctnetlink_fill_info(skb, NETLINK_CB(cb->skb).pid,
12954 + cb->nlh->nlmsg_seq,
12955 + IPCTNL_MSG_CT_NEW, ct) < 0) {
12956 ++ nf_conntrack_get(&ct->ct_general);
12957 + cb->args[1] = (unsigned long)ct;
12958 + goto out;
12959 + }
12960 +@@ -678,8 +677,6 @@ restart:
12961 + if (acct)
12962 + memset(acct, 0, sizeof(struct nf_conn_counter[IP_CT_DIR_MAX]));
12963 + }
12964 +-releasect:
12965 +- nf_ct_put(ct);
12966 + }
12967 + if (cb->args[1]) {
12968 + cb->args[1] = 0;
12969 +@@ -687,7 +684,7 @@ releasect:
12970 + }
12971 + }
12972 + out:
12973 +- rcu_read_unlock();
12974 ++ spin_unlock_bh(&nf_conntrack_lock);
12975 + if (last)
12976 + nf_ct_put(last);
12977 +
12978 +diff --git a/security/security.c b/security/security.c
12979 +index e5fb07a..8d57dbb 100644
12980 +--- a/security/security.c
12981 ++++ b/security/security.c
12982 +@@ -154,10 +154,9 @@ int security_capset(struct cred *new, const struct cred *old,
12983 + effective, inheritable, permitted);
12984 + }
12985 +
12986 +-int security_capable(int cap)
12987 ++int security_capable(const struct cred *cred, int cap)
12988 + {
12989 +- return security_ops->capable(current, current_cred(), cap,
12990 +- SECURITY_CAP_AUDIT);
12991 ++ return security_ops->capable(current, cred, cap, SECURITY_CAP_AUDIT);
12992 + }
12993 +
12994 + int security_real_capable(struct task_struct *tsk, int cap)
12995 +diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
12996 +index 11d5c47..a373ab6 100644
12997 +--- a/security/selinux/hooks.c
12998 ++++ b/security/selinux/hooks.c
12999 +@@ -3198,7 +3198,11 @@ static void selinux_cred_free(struct cred *cred)
13000 + {
13001 + struct task_security_struct *tsec = cred->security;
13002 +
13003 +- BUG_ON((unsigned long) cred->security < PAGE_SIZE);
13004 ++ /*
13005 ++ * cred->security == NULL if security_cred_alloc_blank() or
13006 ++ * security_prepare_creds() returned an error.
13007 ++ */
13008 ++ BUG_ON(cred->security && (unsigned long) cred->security < PAGE_SIZE);
13009 + cred->security = (void *) 0x7UL;
13010 + kfree(tsec);
13011 + }
13012 +diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
13013 +index a1c4008..5825d18 100644
13014 +--- a/sound/pci/hda/hda_intel.c
13015 ++++ b/sound/pci/hda/hda_intel.c
13016 +@@ -2305,6 +2305,7 @@ static struct snd_pci_quirk position_fix_list[] __devinitdata = {
13017 + SND_PCI_QUIRK(0x1043, 0x813d, "ASUS P5AD2", POS_FIX_LPIB),
13018 + SND_PCI_QUIRK(0x1043, 0x81b3, "ASUS", POS_FIX_LPIB),
13019 + SND_PCI_QUIRK(0x1043, 0x81e7, "ASUS M2V", POS_FIX_LPIB),
13020 ++ SND_PCI_QUIRK(0x1043, 0x8410, "ASUS", POS_FIX_LPIB),
13021 + SND_PCI_QUIRK(0x104d, 0x9069, "Sony VPCS11V9E", POS_FIX_LPIB),
13022 + SND_PCI_QUIRK(0x1106, 0x3288, "ASUS M2V-MX SE", POS_FIX_LPIB),
13023 + SND_PCI_QUIRK(0x1179, 0xff10, "Toshiba A100-259", POS_FIX_LPIB),
13024 +diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
13025 +index 5667fb9..fc5e027 100644
13026 +--- a/sound/pci/hda/patch_conexant.c
13027 ++++ b/sound/pci/hda/patch_conexant.c
13028 +@@ -3401,7 +3401,7 @@ static void cx_auto_parse_output(struct hda_codec *codec)
13029 + }
13030 + }
13031 + spec->multiout.dac_nids = spec->private_dac_nids;
13032 +- spec->multiout.max_channels = nums * 2;
13033 ++ spec->multiout.max_channels = spec->multiout.num_dacs * 2;
13034 +
13035 + if (cfg->hp_outs > 0)
13036 + spec->auto_mute = 1;
13037 +diff --git a/sound/soc/codecs/wm8903.c b/sound/soc/codecs/wm8903.c
13038 +index 622b602..8b87e04 100644
13039 +--- a/sound/soc/codecs/wm8903.c
13040 ++++ b/sound/soc/codecs/wm8903.c
13041 +@@ -1479,7 +1479,7 @@ int wm8903_mic_detect(struct snd_soc_codec *codec, struct snd_soc_jack *jack,
13042 + WM8903_MICDET_EINT | WM8903_MICSHRT_EINT,
13043 + irq_mask);
13044 +
13045 +- if (det && shrt) {
13046 ++ if (det || shrt) {
13047 + /* Enable mic detection, this may not have been set through
13048 + * platform data (eg, if the defaults are OK). */
13049 + snd_soc_update_bits(codec, WM8903_WRITE_SEQUENCER_0,
13050 +diff --git a/sound/usb/caiaq/audio.c b/sound/usb/caiaq/audio.c
13051 +index 68b9747..66eabaf 100644
13052 +--- a/sound/usb/caiaq/audio.c
13053 ++++ b/sound/usb/caiaq/audio.c
13054 +@@ -785,7 +785,7 @@ int snd_usb_caiaq_audio_init(struct snd_usb_caiaqdev *dev)
13055 + }
13056 +
13057 + dev->pcm->private_data = dev;
13058 +- strcpy(dev->pcm->name, dev->product_name);
13059 ++ strlcpy(dev->pcm->name, dev->product_name, sizeof(dev->pcm->name));
13060 +
13061 + memset(dev->sub_playback, 0, sizeof(dev->sub_playback));
13062 + memset(dev->sub_capture, 0, sizeof(dev->sub_capture));
13063 +diff --git a/sound/usb/caiaq/midi.c b/sound/usb/caiaq/midi.c
13064 +index 2f218c7..a1a4708 100644
13065 +--- a/sound/usb/caiaq/midi.c
13066 ++++ b/sound/usb/caiaq/midi.c
13067 +@@ -136,7 +136,7 @@ int snd_usb_caiaq_midi_init(struct snd_usb_caiaqdev *device)
13068 + if (ret < 0)
13069 + return ret;
13070 +
13071 +- strcpy(rmidi->name, device->product_name);
13072 ++ strlcpy(rmidi->name, device->product_name, sizeof(rmidi->name));
13073 +
13074 + rmidi->info_flags = SNDRV_RAWMIDI_INFO_DUPLEX;
13075 + rmidi->private_data = device;
13076 +diff --git a/tools/perf/builtin-timechart.c b/tools/perf/builtin-timechart.c
13077 +index 9bcc38f..b3028eb 100644
13078 +--- a/tools/perf/builtin-timechart.c
13079 ++++ b/tools/perf/builtin-timechart.c
13080 +@@ -502,7 +502,7 @@ static int process_sample_event(event_t *event, struct perf_session *session)
13081 + c_state_start(pe->cpu_id, data.time, pe->value);
13082 +
13083 + if (strcmp(event_str, "power:power_end") == 0)
13084 +- c_state_end(pe->cpu_id, data.time);
13085 ++ c_state_end(data.cpu, data.time);
13086 +
13087 + if (strcmp(event_str, "power:power_frequency") == 0)
13088 + p_state_change(pe->cpu_id, data.time, pe->value);