Gentoo Archives: gentoo-commits

From: "Mike Pagano (mpagano)" <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] linux-patches r1719 - genpatches-2.6/trunk/2.6.34
Date: Mon, 05 Jul 2010 20:47:12
Message-Id: 20100705204645.9E52B2CF3A@corvid.gentoo.org
1 Author: mpagano
2 Date: 2010-07-05 20:46:44 +0000 (Mon, 05 Jul 2010)
3 New Revision: 1719
4
5 Added:
6 genpatches-2.6/trunk/2.6.34/1000_linux-2.6.34.1.patch
7 Removed:
8 genpatches-2.6/trunk/2.6.34/2710_vbfb-section-cleanup.patch
9 Modified:
10 genpatches-2.6/trunk/2.6.34/0000_README
11 Log:
12 Linux 2.6.34.1 and removal of redundant patch
13
14 Modified: genpatches-2.6/trunk/2.6.34/0000_README
15 ===================================================================
16 --- genpatches-2.6/trunk/2.6.34/0000_README 2010-06-25 20:53:10 UTC (rev 1718)
17 +++ genpatches-2.6/trunk/2.6.34/0000_README 2010-07-05 20:46:44 UTC (rev 1719)
18 @@ -39,14 +39,14 @@
19 Individual Patch Descriptions:
20 --------------------------------------------------------------------------
21
22 +Patch: 1000_linux-2.6.34.1.patch
23 +From: http://www.kernel.org
24 +Desc: Linux 2.6.34.1
25 +
26 Patch: 2700_nouveau-acpi-lid-open-undefined-fix.patch
27 From: http://bugs.gentoo.org/show_bug.cgi?id=322001
28 Desc: nouveau: fix acpi_lid_open undefined
29
30 -Patch: 2710_vbfb-section-cleanup.patch
31 -From: http://bugs.gentoo.org/show_bug.cgi?id=323021
32 -Desc: Fix errors issued by modpost in vfb driver
33 -
34 Patch: 2720_vbfb-section-cleanup.patch
35 From: http://lkml.org/lkml/2010/6/25/271
36 Desc: add missing define to prevent re-define errors in vgaarb.h
37
38 Added: genpatches-2.6/trunk/2.6.34/1000_linux-2.6.34.1.patch
39 ===================================================================
40 --- genpatches-2.6/trunk/2.6.34/1000_linux-2.6.34.1.patch (rev 0)
41 +++ genpatches-2.6/trunk/2.6.34/1000_linux-2.6.34.1.patch 2010-07-05 20:46:44 UTC (rev 1719)
42 @@ -0,0 +1,8720 @@
43 +diff --git a/Documentation/hwmon/ltc4245 b/Documentation/hwmon/ltc4245
44 +index 02838a4..86b5880 100644
45 +--- a/Documentation/hwmon/ltc4245
46 ++++ b/Documentation/hwmon/ltc4245
47 +@@ -72,9 +72,7 @@ in6_min_alarm 5v output undervoltage alarm
48 + in7_min_alarm 3v output undervoltage alarm
49 + in8_min_alarm Vee (-12v) output undervoltage alarm
50 +
51 +-in9_input GPIO #1 voltage data
52 +-in10_input GPIO #2 voltage data
53 +-in11_input GPIO #3 voltage data
54 ++in9_input GPIO voltage data
55 +
56 + power1_input 12v power usage (mW)
57 + power2_input 5v power usage (mW)
58 +diff --git a/arch/arm/common/sa1111.c b/arch/arm/common/sa1111.c
59 +index a52a27c..6f80665 100644
60 +--- a/arch/arm/common/sa1111.c
61 ++++ b/arch/arm/common/sa1111.c
62 +@@ -951,8 +951,6 @@ static int sa1111_resume(struct platform_device *dev)
63 + if (!save)
64 + return 0;
65 +
66 +- spin_lock_irqsave(&sachip->lock, flags);
67 +-
68 + /*
69 + * Ensure that the SA1111 is still here.
70 + * FIXME: shouldn't do this here.
71 +@@ -969,6 +967,13 @@ static int sa1111_resume(struct platform_device *dev)
72 + * First of all, wake up the chip.
73 + */
74 + sa1111_wake(sachip);
75 ++
76 ++ /*
77 ++ * Only lock for write ops. Also, sa1111_wake must be called with
78 ++ * released spinlock!
79 ++ */
80 ++ spin_lock_irqsave(&sachip->lock, flags);
81 ++
82 + sa1111_writel(0, sachip->base + SA1111_INTC + SA1111_INTEN0);
83 + sa1111_writel(0, sachip->base + SA1111_INTC + SA1111_INTEN1);
84 +
85 +diff --git a/arch/arm/mach-mx2/devices.c b/arch/arm/mach-mx2/devices.c
86 +index b91e412..04f36d8 100644
87 +--- a/arch/arm/mach-mx2/devices.c
88 ++++ b/arch/arm/mach-mx2/devices.c
89 +@@ -483,8 +483,8 @@ int __init mxc_register_gpios(void)
90 + #ifdef CONFIG_MACH_MX21
91 + static struct resource mx21_usbhc_resources[] = {
92 + {
93 +- .start = MX21_BASE_ADDR,
94 +- .end = MX21_BASE_ADDR + 0x1FFF,
95 ++ .start = MX21_USBOTG_BASE_ADDR,
96 ++ .end = MX21_USBOTG_BASE_ADDR + SZ_8K - 1,
97 + .flags = IORESOURCE_MEM,
98 + },
99 + {
100 +diff --git a/arch/arm/mm/cache-v7.S b/arch/arm/mm/cache-v7.S
101 +index 06a90dc..37c8157 100644
102 +--- a/arch/arm/mm/cache-v7.S
103 ++++ b/arch/arm/mm/cache-v7.S
104 +@@ -91,7 +91,11 @@ ENTRY(v7_flush_kern_cache_all)
105 + THUMB( stmfd sp!, {r4-r7, r9-r11, lr} )
106 + bl v7_flush_dcache_all
107 + mov r0, #0
108 ++#ifdef CONFIG_SMP
109 ++ mcr p15, 0, r0, c7, c1, 0 @ invalidate I-cache inner shareable
110 ++#else
111 + mcr p15, 0, r0, c7, c5, 0 @ I+BTB cache invalidate
112 ++#endif
113 + ARM( ldmfd sp!, {r4-r5, r7, r9-r11, lr} )
114 + THUMB( ldmfd sp!, {r4-r7, r9-r11, lr} )
115 + mov pc, lr
116 +diff --git a/arch/arm/mm/copypage-feroceon.c b/arch/arm/mm/copypage-feroceon.c
117 +index 5eb4fd9..ac163de 100644
118 +--- a/arch/arm/mm/copypage-feroceon.c
119 ++++ b/arch/arm/mm/copypage-feroceon.c
120 +@@ -18,7 +18,7 @@ feroceon_copy_user_page(void *kto, const void *kfrom)
121 + {
122 + asm("\
123 + stmfd sp!, {r4-r9, lr} \n\
124 +- mov ip, %0 \n\
125 ++ mov ip, %2 \n\
126 + 1: mov lr, r1 \n\
127 + ldmia r1!, {r2 - r9} \n\
128 + pld [lr, #32] \n\
129 +@@ -64,7 +64,7 @@ feroceon_copy_user_page(void *kto, const void *kfrom)
130 + mcr p15, 0, ip, c7, c10, 4 @ drain WB\n\
131 + ldmfd sp!, {r4-r9, pc}"
132 + :
133 +- : "I" (PAGE_SIZE));
134 ++ : "r" (kto), "r" (kfrom), "I" (PAGE_SIZE));
135 + }
136 +
137 + void feroceon_copy_user_highpage(struct page *to, struct page *from,
138 +diff --git a/arch/arm/mm/copypage-v4wb.c b/arch/arm/mm/copypage-v4wb.c
139 +index 7c2eb55..cb589cb 100644
140 +--- a/arch/arm/mm/copypage-v4wb.c
141 ++++ b/arch/arm/mm/copypage-v4wb.c
142 +@@ -27,7 +27,7 @@ v4wb_copy_user_page(void *kto, const void *kfrom)
143 + {
144 + asm("\
145 + stmfd sp!, {r4, lr} @ 2\n\
146 +- mov r2, %0 @ 1\n\
147 ++ mov r2, %2 @ 1\n\
148 + ldmia r1!, {r3, r4, ip, lr} @ 4\n\
149 + 1: mcr p15, 0, r0, c7, c6, 1 @ 1 invalidate D line\n\
150 + stmia r0!, {r3, r4, ip, lr} @ 4\n\
151 +@@ -44,7 +44,7 @@ v4wb_copy_user_page(void *kto, const void *kfrom)
152 + mcr p15, 0, r1, c7, c10, 4 @ 1 drain WB\n\
153 + ldmfd sp!, {r4, pc} @ 3"
154 + :
155 +- : "I" (PAGE_SIZE / 64));
156 ++ : "r" (kto), "r" (kfrom), "I" (PAGE_SIZE / 64));
157 + }
158 +
159 + void v4wb_copy_user_highpage(struct page *to, struct page *from,
160 +diff --git a/arch/arm/mm/copypage-v4wt.c b/arch/arm/mm/copypage-v4wt.c
161 +index 172e6a5..30c7d04 100644
162 +--- a/arch/arm/mm/copypage-v4wt.c
163 ++++ b/arch/arm/mm/copypage-v4wt.c
164 +@@ -25,7 +25,7 @@ v4wt_copy_user_page(void *kto, const void *kfrom)
165 + {
166 + asm("\
167 + stmfd sp!, {r4, lr} @ 2\n\
168 +- mov r2, %0 @ 1\n\
169 ++ mov r2, %2 @ 1\n\
170 + ldmia r1!, {r3, r4, ip, lr} @ 4\n\
171 + 1: stmia r0!, {r3, r4, ip, lr} @ 4\n\
172 + ldmia r1!, {r3, r4, ip, lr} @ 4+1\n\
173 +@@ -40,7 +40,7 @@ v4wt_copy_user_page(void *kto, const void *kfrom)
174 + mcr p15, 0, r2, c7, c7, 0 @ flush ID cache\n\
175 + ldmfd sp!, {r4, pc} @ 3"
176 + :
177 +- : "I" (PAGE_SIZE / 64));
178 ++ : "r" (kto), "r" (kfrom), "I" (PAGE_SIZE / 64));
179 + }
180 +
181 + void v4wt_copy_user_highpage(struct page *to, struct page *from,
182 +diff --git a/arch/arm/mm/copypage-xsc3.c b/arch/arm/mm/copypage-xsc3.c
183 +index 747ad41..f9cde07 100644
184 +--- a/arch/arm/mm/copypage-xsc3.c
185 ++++ b/arch/arm/mm/copypage-xsc3.c
186 +@@ -34,7 +34,7 @@ xsc3_mc_copy_user_page(void *kto, const void *kfrom)
187 + {
188 + asm("\
189 + stmfd sp!, {r4, r5, lr} \n\
190 +- mov lr, %0 \n\
191 ++ mov lr, %2 \n\
192 + \n\
193 + pld [r1, #0] \n\
194 + pld [r1, #32] \n\
195 +@@ -67,7 +67,7 @@ xsc3_mc_copy_user_page(void *kto, const void *kfrom)
196 + \n\
197 + ldmfd sp!, {r4, r5, pc}"
198 + :
199 +- : "I" (PAGE_SIZE / 64 - 1));
200 ++ : "r" (kto), "r" (kfrom), "I" (PAGE_SIZE / 64 - 1));
201 + }
202 +
203 + void xsc3_mc_copy_user_highpage(struct page *to, struct page *from,
204 +diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
205 +index 9d40c34..8ad75e9 100644
206 +--- a/arch/arm/mm/fault.c
207 ++++ b/arch/arm/mm/fault.c
208 +@@ -393,6 +393,9 @@ do_translation_fault(unsigned long addr, unsigned int fsr,
209 + if (addr < TASK_SIZE)
210 + return do_page_fault(addr, fsr, regs);
211 +
212 ++ if (user_mode(regs))
213 ++ goto bad_area;
214 ++
215 + index = pgd_index(addr);
216 +
217 + /*
218 +diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
219 +index 0ed29bf..55d07c8 100644
220 +--- a/arch/arm/mm/init.c
221 ++++ b/arch/arm/mm/init.c
222 +@@ -712,10 +712,10 @@ void __init mem_init(void)
223 + void free_initmem(void)
224 + {
225 + #ifdef CONFIG_HAVE_TCM
226 +- extern char *__tcm_start, *__tcm_end;
227 ++ extern char __tcm_start, __tcm_end;
228 +
229 +- totalram_pages += free_area(__phys_to_pfn(__pa(__tcm_start)),
230 +- __phys_to_pfn(__pa(__tcm_end)),
231 ++ totalram_pages += free_area(__phys_to_pfn(__pa(&__tcm_start)),
232 ++ __phys_to_pfn(__pa(&__tcm_end)),
233 + "TCM link");
234 + #endif
235 +
236 +diff --git a/arch/arm/vfp/vfphw.S b/arch/arm/vfp/vfphw.S
237 +index 66dc2d0..d66cead 100644
238 +--- a/arch/arm/vfp/vfphw.S
239 ++++ b/arch/arm/vfp/vfphw.S
240 +@@ -277,7 +277,7 @@ ENTRY(vfp_put_double)
241 + #ifdef CONFIG_VFPv3
242 + @ d16 - d31 registers
243 + .irp dr,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15
244 +-1: mcrr p11, 3, r1, r2, c\dr @ fmdrr r1, r2, d\dr
245 ++1: mcrr p11, 3, r0, r1, c\dr @ fmdrr r0, r1, d\dr
246 + mov pc, lr
247 + .org 1b + 8
248 + .endr
249 +diff --git a/arch/blackfin/include/asm/cache.h b/arch/blackfin/include/asm/cache.h
250 +index 8542bc3..93f6c63 100644
251 +--- a/arch/blackfin/include/asm/cache.h
252 ++++ b/arch/blackfin/include/asm/cache.h
253 +@@ -15,6 +15,8 @@
254 + #define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
255 + #define SMP_CACHE_BYTES L1_CACHE_BYTES
256 +
257 ++#define ARCH_KMALLOC_MINALIGN L1_CACHE_BYTES
258 ++
259 + #ifdef CONFIG_SMP
260 + #define __cacheline_aligned
261 + #else
262 +diff --git a/arch/frv/include/asm/cache.h b/arch/frv/include/asm/cache.h
263 +index 2797163..7dc0f0f 100644
264 +--- a/arch/frv/include/asm/cache.h
265 ++++ b/arch/frv/include/asm/cache.h
266 +@@ -17,6 +17,8 @@
267 + #define L1_CACHE_SHIFT (CONFIG_FRV_L1_CACHE_SHIFT)
268 + #define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
269 +
270 ++#define ARCH_KMALLOC_MINALIGN L1_CACHE_BYTES
271 ++
272 + #define __cacheline_aligned __attribute__((aligned(L1_CACHE_BYTES)))
273 + #define ____cacheline_aligned __attribute__((aligned(L1_CACHE_BYTES)))
274 +
275 +diff --git a/arch/m68k/include/asm/cache.h b/arch/m68k/include/asm/cache.h
276 +index fed3fd3..ecafbe1 100644
277 +--- a/arch/m68k/include/asm/cache.h
278 ++++ b/arch/m68k/include/asm/cache.h
279 +@@ -8,4 +8,6 @@
280 + #define L1_CACHE_SHIFT 4
281 + #define L1_CACHE_BYTES (1<< L1_CACHE_SHIFT)
282 +
283 ++#define ARCH_KMALLOC_MINALIGN L1_CACHE_BYTES
284 ++
285 + #endif
286 +diff --git a/arch/mn10300/include/asm/cache.h b/arch/mn10300/include/asm/cache.h
287 +index e03cfa2..6e2fe28 100644
288 +--- a/arch/mn10300/include/asm/cache.h
289 ++++ b/arch/mn10300/include/asm/cache.h
290 +@@ -21,6 +21,8 @@
291 + #define L1_CACHE_DISPARITY L1_CACHE_NENTRIES * L1_CACHE_BYTES
292 + #endif
293 +
294 ++#define ARCH_KMALLOC_MINALIGN L1_CACHE_BYTES
295 ++
296 + /* data cache purge registers
297 + * - read from the register to unconditionally purge that cache line
298 + * - write address & 0xffffff00 to conditionally purge that cache line
299 +diff --git a/arch/parisc/math-emu/decode_exc.c b/arch/parisc/math-emu/decode_exc.c
300 +index 3ca1c61..27a7492 100644
301 +--- a/arch/parisc/math-emu/decode_exc.c
302 ++++ b/arch/parisc/math-emu/decode_exc.c
303 +@@ -342,6 +342,7 @@ decode_fpu(unsigned int Fpu_register[], unsigned int trap_counts[])
304 + return SIGNALCODE(SIGFPE, FPE_FLTINV);
305 + case DIVISIONBYZEROEXCEPTION:
306 + update_trap_counts(Fpu_register, aflags, bflags, trap_counts);
307 ++ Clear_excp_register(exception_index);
308 + return SIGNALCODE(SIGFPE, FPE_FLTDIV);
309 + case INEXACTEXCEPTION:
310 + update_trap_counts(Fpu_register, aflags, bflags, trap_counts);
311 +diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
312 +index c09138d..b894721 100644
313 +--- a/arch/powerpc/kernel/asm-offsets.c
314 ++++ b/arch/powerpc/kernel/asm-offsets.c
315 +@@ -447,6 +447,14 @@ int main(void)
316 + DEFINE(PGD_T_LOG2, PGD_T_LOG2);
317 + DEFINE(PTE_T_LOG2, PTE_T_LOG2);
318 + #endif
319 ++#ifdef CONFIG_FSL_BOOKE
320 ++ DEFINE(TLBCAM_SIZE, sizeof(struct tlbcam));
321 ++ DEFINE(TLBCAM_MAS0, offsetof(struct tlbcam, MAS0));
322 ++ DEFINE(TLBCAM_MAS1, offsetof(struct tlbcam, MAS1));
323 ++ DEFINE(TLBCAM_MAS2, offsetof(struct tlbcam, MAS2));
324 ++ DEFINE(TLBCAM_MAS3, offsetof(struct tlbcam, MAS3));
325 ++ DEFINE(TLBCAM_MAS7, offsetof(struct tlbcam, MAS7));
326 ++#endif
327 +
328 + #ifdef CONFIG_KVM_EXIT_TIMING
329 + DEFINE(VCPU_TIMING_EXIT_TBU, offsetof(struct kvm_vcpu,
330 +diff --git a/arch/powerpc/kernel/head_fsl_booke.S b/arch/powerpc/kernel/head_fsl_booke.S
331 +index 7255265..edd4a57 100644
332 +--- a/arch/powerpc/kernel/head_fsl_booke.S
333 ++++ b/arch/powerpc/kernel/head_fsl_booke.S
334 +@@ -639,6 +639,13 @@ interrupt_base:
335 + rlwinm r12,r12,0,16,1
336 + mtspr SPRN_MAS1,r12
337 +
338 ++ /* Make up the required permissions for kernel code */
339 ++#ifdef CONFIG_PTE_64BIT
340 ++ li r13,_PAGE_PRESENT | _PAGE_BAP_SX
341 ++ oris r13,r13,_PAGE_ACCESSED@h
342 ++#else
343 ++ li r13,_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_EXEC
344 ++#endif
345 + b 4f
346 +
347 + /* Get the PGD for the current thread */
348 +@@ -646,15 +653,15 @@ interrupt_base:
349 + mfspr r11,SPRN_SPRG_THREAD
350 + lwz r11,PGDIR(r11)
351 +
352 +-4:
353 +- /* Make up the required permissions */
354 ++ /* Make up the required permissions for user code */
355 + #ifdef CONFIG_PTE_64BIT
356 +- li r13,_PAGE_PRESENT | _PAGE_EXEC
357 ++ li r13,_PAGE_PRESENT | _PAGE_BAP_UX
358 + oris r13,r13,_PAGE_ACCESSED@h
359 + #else
360 + li r13,_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_EXEC
361 + #endif
362 +
363 ++4:
364 + FIND_PTE
365 + andc. r13,r13,r11 /* Check permission */
366 +
367 +diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c
368 +index 604af29..ecb532b 100644
369 +--- a/arch/powerpc/kvm/book3s.c
370 ++++ b/arch/powerpc/kvm/book3s.c
371 +@@ -922,6 +922,8 @@ int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
372 + struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
373 + int i;
374 +
375 ++ vcpu_load(vcpu);
376 ++
377 + sregs->pvr = vcpu->arch.pvr;
378 +
379 + sregs->u.s.sdr1 = to_book3s(vcpu)->sdr1;
380 +@@ -940,6 +942,9 @@ int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
381 + sregs->u.s.ppc32.dbat[i] = vcpu3s->dbat[i].raw;
382 + }
383 + }
384 ++
385 ++ vcpu_put(vcpu);
386 ++
387 + return 0;
388 + }
389 +
390 +@@ -949,6 +954,8 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
391 + struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
392 + int i;
393 +
394 ++ vcpu_load(vcpu);
395 ++
396 + kvmppc_set_pvr(vcpu, sregs->pvr);
397 +
398 + vcpu3s->sdr1 = sregs->u.s.sdr1;
399 +@@ -975,6 +982,9 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
400 +
401 + /* Flush the MMU after messing with the segments */
402 + kvmppc_mmu_pte_flush(vcpu, 0, 0);
403 ++
404 ++ vcpu_put(vcpu);
405 ++
406 + return 0;
407 + }
408 +
409 +diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c
410 +index 2a3a195..df0182a 100644
411 +--- a/arch/powerpc/kvm/booke.c
412 ++++ b/arch/powerpc/kvm/booke.c
413 +@@ -479,6 +479,8 @@ int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
414 + {
415 + int i;
416 +
417 ++ vcpu_load(vcpu);
418 ++
419 + regs->pc = vcpu->arch.pc;
420 + regs->cr = kvmppc_get_cr(vcpu);
421 + regs->ctr = vcpu->arch.ctr;
422 +@@ -499,6 +501,8 @@ int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
423 + for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
424 + regs->gpr[i] = kvmppc_get_gpr(vcpu, i);
425 +
426 ++ vcpu_put(vcpu);
427 ++
428 + return 0;
429 + }
430 +
431 +@@ -506,6 +510,8 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
432 + {
433 + int i;
434 +
435 ++ vcpu_load(vcpu);
436 ++
437 + vcpu->arch.pc = regs->pc;
438 + kvmppc_set_cr(vcpu, regs->cr);
439 + vcpu->arch.ctr = regs->ctr;
440 +@@ -525,6 +531,8 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
441 + for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
442 + kvmppc_set_gpr(vcpu, i, regs->gpr[i]);
443 +
444 ++ vcpu_put(vcpu);
445 ++
446 + return 0;
447 + }
448 +
449 +@@ -553,7 +561,12 @@ int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
450 + int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
451 + struct kvm_translation *tr)
452 + {
453 +- return kvmppc_core_vcpu_translate(vcpu, tr);
454 ++ int r;
455 ++
456 ++ vcpu_load(vcpu);
457 ++ r = kvmppc_core_vcpu_translate(vcpu, tr);
458 ++ vcpu_put(vcpu);
459 ++ return r;
460 + }
461 +
462 + int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
463 +diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
464 +index 297fcd2..bf36a9d 100644
465 +--- a/arch/powerpc/kvm/powerpc.c
466 ++++ b/arch/powerpc/kvm/powerpc.c
467 +@@ -193,7 +193,8 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
468 + {
469 + struct kvm_vcpu *vcpu;
470 + vcpu = kvmppc_core_vcpu_create(kvm, id);
471 +- kvmppc_create_vcpu_debugfs(vcpu, id);
472 ++ if (!IS_ERR(vcpu))
473 ++ kvmppc_create_vcpu_debugfs(vcpu, id);
474 + return vcpu;
475 + }
476 +
477 +diff --git a/arch/powerpc/lib/string.S b/arch/powerpc/lib/string.S
478 +index 64e2e49..3ac0cd3 100644
479 +--- a/arch/powerpc/lib/string.S
480 ++++ b/arch/powerpc/lib/string.S
481 +@@ -71,7 +71,7 @@ _GLOBAL(strcmp)
482 +
483 + _GLOBAL(strncmp)
484 + PPC_LCMPI r5,0
485 +- beqlr
486 ++ ble- 2f
487 + mtctr r5
488 + addi r5,r3,-1
489 + addi r4,r4,-1
490 +@@ -82,6 +82,8 @@ _GLOBAL(strncmp)
491 + beqlr 1
492 + bdnzt eq,1b
493 + blr
494 ++2: li r3,0
495 ++ blr
496 +
497 + _GLOBAL(strlen)
498 + addi r4,r3,-1
499 +diff --git a/arch/powerpc/mm/fsl_booke_mmu.c b/arch/powerpc/mm/fsl_booke_mmu.c
500 +index 1ed6b52..cdc7526 100644
501 +--- a/arch/powerpc/mm/fsl_booke_mmu.c
502 ++++ b/arch/powerpc/mm/fsl_booke_mmu.c
503 +@@ -2,7 +2,7 @@
504 + * Modifications by Kumar Gala (galak@×××××××××××××××.org) to support
505 + * E500 Book E processors.
506 + *
507 +- * Copyright 2004 Freescale Semiconductor, Inc
508 ++ * Copyright 2004,2010 Freescale Semiconductor, Inc.
509 + *
510 + * This file contains the routines for initializing the MMU
511 + * on the 4xx series of chips.
512 +@@ -56,19 +56,13 @@
513 +
514 + unsigned int tlbcam_index;
515 +
516 +-#define NUM_TLBCAMS (64)
517 +
518 + #if defined(CONFIG_LOWMEM_CAM_NUM_BOOL) && (CONFIG_LOWMEM_CAM_NUM >= NUM_TLBCAMS)
519 + #error "LOWMEM_CAM_NUM must be less than NUM_TLBCAMS"
520 + #endif
521 +
522 +-struct tlbcam {
523 +- u32 MAS0;
524 +- u32 MAS1;
525 +- unsigned long MAS2;
526 +- u32 MAS3;
527 +- u32 MAS7;
528 +-} TLBCAM[NUM_TLBCAMS];
529 ++#define NUM_TLBCAMS (64)
530 ++struct tlbcam TLBCAM[NUM_TLBCAMS];
531 +
532 + struct tlbcamrange {
533 + unsigned long start;
534 +@@ -109,19 +103,6 @@ unsigned long p_mapped_by_tlbcam(phys_addr_t pa)
535 + return 0;
536 + }
537 +
538 +-void loadcam_entry(int idx)
539 +-{
540 +- mtspr(SPRN_MAS0, TLBCAM[idx].MAS0);
541 +- mtspr(SPRN_MAS1, TLBCAM[idx].MAS1);
542 +- mtspr(SPRN_MAS2, TLBCAM[idx].MAS2);
543 +- mtspr(SPRN_MAS3, TLBCAM[idx].MAS3);
544 +-
545 +- if (mmu_has_feature(MMU_FTR_BIG_PHYS))
546 +- mtspr(SPRN_MAS7, TLBCAM[idx].MAS7);
547 +-
548 +- asm volatile("isync;tlbwe;isync" : : : "memory");
549 +-}
550 +-
551 + /*
552 + * Set up one of the I/D BAT (block address translation) register pairs.
553 + * The parameters are not checked; in particular size must be a power
554 +diff --git a/arch/powerpc/mm/mmu_decl.h b/arch/powerpc/mm/mmu_decl.h
555 +index d49a775..0591f25 100644
556 +--- a/arch/powerpc/mm/mmu_decl.h
557 ++++ b/arch/powerpc/mm/mmu_decl.h
558 +@@ -149,7 +149,15 @@ extern unsigned long mmu_mapin_ram(unsigned long top);
559 + extern void MMU_init_hw(void);
560 + extern unsigned long mmu_mapin_ram(unsigned long top);
561 + extern void adjust_total_lowmem(void);
562 +-
563 ++extern void loadcam_entry(unsigned int index);
564 ++
565 ++struct tlbcam {
566 ++ u32 MAS0;
567 ++ u32 MAS1;
568 ++ unsigned long MAS2;
569 ++ u32 MAS3;
570 ++ u32 MAS7;
571 ++};
572 + #elif defined(CONFIG_PPC32)
573 + /* anything 32-bit except 4xx or 8xx */
574 + extern void MMU_init_hw(void);
575 +diff --git a/arch/powerpc/mm/pgtable_32.c b/arch/powerpc/mm/pgtable_32.c
576 +index b9243e7..767b0cf 100644
577 +--- a/arch/powerpc/mm/pgtable_32.c
578 ++++ b/arch/powerpc/mm/pgtable_32.c
579 +@@ -146,6 +146,14 @@ ioremap_flags(phys_addr_t addr, unsigned long size, unsigned long flags)
580 + /* we don't want to let _PAGE_USER and _PAGE_EXEC leak out */
581 + flags &= ~(_PAGE_USER | _PAGE_EXEC);
582 +
583 ++#ifdef _PAGE_BAP_SR
584 ++ /* _PAGE_USER contains _PAGE_BAP_SR on BookE using the new PTE format
585 ++ * which means that we just cleared supervisor access... oops ;-) This
586 ++ * restores it
587 ++ */
588 ++ flags |= _PAGE_BAP_SR;
589 ++#endif
590 ++
591 + return __ioremap_caller(addr, size, flags, __builtin_return_address(0));
592 + }
593 + EXPORT_SYMBOL(ioremap_flags);
594 +diff --git a/arch/powerpc/mm/pgtable_64.c b/arch/powerpc/mm/pgtable_64.c
595 +index d95679a..d050fc8 100644
596 +--- a/arch/powerpc/mm/pgtable_64.c
597 ++++ b/arch/powerpc/mm/pgtable_64.c
598 +@@ -265,6 +265,14 @@ void __iomem * ioremap_flags(phys_addr_t addr, unsigned long size,
599 + /* we don't want to let _PAGE_USER and _PAGE_EXEC leak out */
600 + flags &= ~(_PAGE_USER | _PAGE_EXEC);
601 +
602 ++#ifdef _PAGE_BAP_SR
603 ++ /* _PAGE_USER contains _PAGE_BAP_SR on BookE using the new PTE format
604 ++ * which means that we just cleared supervisor access... oops ;-) This
605 ++ * restores it
606 ++ */
607 ++ flags |= _PAGE_BAP_SR;
608 ++#endif
609 ++
610 + if (ppc_md.ioremap)
611 + return ppc_md.ioremap(addr, size, flags, caller);
612 + return __ioremap_caller(addr, size, flags, caller);
613 +diff --git a/arch/powerpc/mm/tlb_nohash_low.S b/arch/powerpc/mm/tlb_nohash_low.S
614 +index bbdc5b5..8656ecf 100644
615 +--- a/arch/powerpc/mm/tlb_nohash_low.S
616 ++++ b/arch/powerpc/mm/tlb_nohash_low.S
617 +@@ -271,3 +271,31 @@ _GLOBAL(set_context)
618 + #else
619 + #error Unsupported processor type !
620 + #endif
621 ++
622 ++#if defined(CONFIG_FSL_BOOKE)
623 ++/*
624 ++ * extern void loadcam_entry(unsigned int index)
625 ++ *
626 ++ * Load TLBCAM[index] entry in to the L2 CAM MMU
627 ++ */
628 ++_GLOBAL(loadcam_entry)
629 ++ LOAD_REG_ADDR(r4, TLBCAM)
630 ++ mulli r5,r3,TLBCAM_SIZE
631 ++ add r3,r5,r4
632 ++ lwz r4,TLBCAM_MAS0(r3)
633 ++ mtspr SPRN_MAS0,r4
634 ++ lwz r4,TLBCAM_MAS1(r3)
635 ++ mtspr SPRN_MAS1,r4
636 ++ PPC_LL r4,TLBCAM_MAS2(r3)
637 ++ mtspr SPRN_MAS2,r4
638 ++ lwz r4,TLBCAM_MAS3(r3)
639 ++ mtspr SPRN_MAS3,r4
640 ++BEGIN_MMU_FTR_SECTION
641 ++ lwz r4,TLBCAM_MAS7(r3)
642 ++ mtspr SPRN_MAS7,r4
643 ++END_MMU_FTR_SECTION_IFSET(MMU_FTR_BIG_PHYS)
644 ++ isync
645 ++ tlbwe
646 ++ isync
647 ++ blr
648 ++#endif
649 +diff --git a/arch/powerpc/oprofile/op_model_cell.c b/arch/powerpc/oprofile/op_model_cell.c
650 +index 2c9e522..7fd90d0 100644
651 +--- a/arch/powerpc/oprofile/op_model_cell.c
652 ++++ b/arch/powerpc/oprofile/op_model_cell.c
653 +@@ -1077,7 +1077,7 @@ static int calculate_lfsr(int n)
654 + index = ENTRIES-1;
655 +
656 + /* make sure index is valid */
657 +- if ((index > ENTRIES) || (index < 0))
658 ++ if ((index >= ENTRIES) || (index < 0))
659 + index = ENTRIES-1;
660 +
661 + return initial_lfsr[index];
662 +diff --git a/arch/powerpc/platforms/pseries/hotplug-cpu.c b/arch/powerpc/platforms/pseries/hotplug-cpu.c
663 +index a8e1d5d..b0760d7 100644
664 +--- a/arch/powerpc/platforms/pseries/hotplug-cpu.c
665 ++++ b/arch/powerpc/platforms/pseries/hotplug-cpu.c
666 +@@ -154,30 +154,6 @@ static void pseries_mach_cpu_die(void)
667 + for(;;);
668 + }
669 +
670 +-static int qcss_tok; /* query-cpu-stopped-state token */
671 +-
672 +-/* Get state of physical CPU.
673 +- * Return codes:
674 +- * 0 - The processor is in the RTAS stopped state
675 +- * 1 - stop-self is in progress
676 +- * 2 - The processor is not in the RTAS stopped state
677 +- * -1 - Hardware Error
678 +- * -2 - Hardware Busy, Try again later.
679 +- */
680 +-static int query_cpu_stopped(unsigned int pcpu)
681 +-{
682 +- int cpu_status, status;
683 +-
684 +- status = rtas_call(qcss_tok, 1, 2, &cpu_status, pcpu);
685 +- if (status != 0) {
686 +- printk(KERN_ERR
687 +- "RTAS query-cpu-stopped-state failed: %i\n", status);
688 +- return status;
689 +- }
690 +-
691 +- return cpu_status;
692 +-}
693 +-
694 + static int pseries_cpu_disable(void)
695 + {
696 + int cpu = smp_processor_id();
697 +@@ -224,8 +200,9 @@ static void pseries_cpu_die(unsigned int cpu)
698 + } else if (get_preferred_offline_state(cpu) == CPU_STATE_OFFLINE) {
699 +
700 + for (tries = 0; tries < 25; tries++) {
701 +- cpu_status = query_cpu_stopped(pcpu);
702 +- if (cpu_status == 0 || cpu_status == -1)
703 ++ cpu_status = smp_query_cpu_stopped(pcpu);
704 ++ if (cpu_status == QCSS_STOPPED ||
705 ++ cpu_status == QCSS_HARDWARE_ERROR)
706 + break;
707 + cpu_relax();
708 + }
709 +@@ -388,6 +365,7 @@ static int __init pseries_cpu_hotplug_init(void)
710 + struct device_node *np;
711 + const char *typep;
712 + int cpu;
713 ++ int qcss_tok;
714 +
715 + for_each_node_by_name(np, "interrupt-controller") {
716 + typep = of_get_property(np, "compatible", NULL);
717 +diff --git a/arch/powerpc/platforms/pseries/plpar_wrappers.h b/arch/powerpc/platforms/pseries/plpar_wrappers.h
718 +index a05f8d4..6c4fd2c 100644
719 +--- a/arch/powerpc/platforms/pseries/plpar_wrappers.h
720 ++++ b/arch/powerpc/platforms/pseries/plpar_wrappers.h
721 +@@ -4,6 +4,14 @@
722 + #include <asm/hvcall.h>
723 + #include <asm/page.h>
724 +
725 ++/* Get state of physical CPU from query_cpu_stopped */
726 ++int smp_query_cpu_stopped(unsigned int pcpu);
727 ++#define QCSS_STOPPED 0
728 ++#define QCSS_STOPPING 1
729 ++#define QCSS_NOT_STOPPED 2
730 ++#define QCSS_HARDWARE_ERROR -1
731 ++#define QCSS_HARDWARE_BUSY -2
732 ++
733 + static inline long poll_pending(void)
734 + {
735 + return plpar_hcall_norets(H_POLL_PENDING);
736 +diff --git a/arch/powerpc/platforms/pseries/smp.c b/arch/powerpc/platforms/pseries/smp.c
737 +index 4e7f89a..8979982 100644
738 +--- a/arch/powerpc/platforms/pseries/smp.c
739 ++++ b/arch/powerpc/platforms/pseries/smp.c
740 +@@ -57,6 +57,28 @@
741 + */
742 + static cpumask_t of_spin_map;
743 +
744 ++/* Query where a cpu is now. Return codes #defined in plpar_wrappers.h */
745 ++int smp_query_cpu_stopped(unsigned int pcpu)
746 ++{
747 ++ int cpu_status, status;
748 ++ int qcss_tok = rtas_token("query-cpu-stopped-state");
749 ++
750 ++ if (qcss_tok == RTAS_UNKNOWN_SERVICE) {
751 ++ printk(KERN_INFO "Firmware doesn't support "
752 ++ "query-cpu-stopped-state\n");
753 ++ return QCSS_HARDWARE_ERROR;
754 ++ }
755 ++
756 ++ status = rtas_call(qcss_tok, 1, 2, &cpu_status, pcpu);
757 ++ if (status != 0) {
758 ++ printk(KERN_ERR
759 ++ "RTAS query-cpu-stopped-state failed: %i\n", status);
760 ++ return status;
761 ++ }
762 ++
763 ++ return cpu_status;
764 ++}
765 ++
766 + /**
767 + * smp_startup_cpu() - start the given cpu
768 + *
769 +@@ -82,6 +104,12 @@ static inline int __devinit smp_startup_cpu(unsigned int lcpu)
770 +
771 + pcpu = get_hard_smp_processor_id(lcpu);
772 +
773 ++ /* Check to see if the CPU out of FW already for kexec */
774 ++ if (smp_query_cpu_stopped(pcpu) == QCSS_NOT_STOPPED){
775 ++ cpu_set(lcpu, of_spin_map);
776 ++ return 1;
777 ++ }
778 ++
779 + /* Fixup atomic count: it exited inside IRQ handler. */
780 + task_thread_info(paca[lcpu].__current)->preempt_count = 0;
781 +
782 +diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
783 +index 4929286..ee7c713 100644
784 +--- a/arch/s390/kvm/kvm-s390.c
785 ++++ b/arch/s390/kvm/kvm-s390.c
786 +@@ -341,11 +341,13 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
787 +
788 + rc = kvm_vcpu_init(vcpu, kvm, id);
789 + if (rc)
790 +- goto out_free_cpu;
791 ++ goto out_free_sie_block;
792 + VM_EVENT(kvm, 3, "create cpu %d at %p, sie block at %p", id, vcpu,
793 + vcpu->arch.sie_block);
794 +
795 + return vcpu;
796 ++out_free_sie_block:
797 ++ free_page((unsigned long)(vcpu->arch.sie_block));
798 + out_free_cpu:
799 + kfree(vcpu);
800 + out_nomem:
801 +diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
802 +index 06d9e79..63400e6 100644
803 +--- a/arch/x86/include/asm/kvm_host.h
804 ++++ b/arch/x86/include/asm/kvm_host.h
805 +@@ -180,6 +180,7 @@ union kvm_mmu_page_role {
806 + unsigned invalid:1;
807 + unsigned cr4_pge:1;
808 + unsigned nxe:1;
809 ++ unsigned cr0_wp:1;
810 + };
811 + };
812 +
813 +@@ -541,6 +542,8 @@ struct kvm_x86_ops {
814 + int (*get_lpage_level)(void);
815 + bool (*rdtscp_supported)(void);
816 +
817 ++ void (*set_supported_cpuid)(u32 func, struct kvm_cpuid_entry2 *entry);
818 ++
819 + const struct trace_print_flags *exit_reasons_str;
820 + };
821 +
822 +diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
823 +index 4604e6a..d86da72 100644
824 +--- a/arch/x86/include/asm/msr-index.h
825 ++++ b/arch/x86/include/asm/msr-index.h
826 +@@ -199,8 +199,9 @@
827 + #define MSR_IA32_EBL_CR_POWERON 0x0000002a
828 + #define MSR_IA32_FEATURE_CONTROL 0x0000003a
829 +
830 +-#define FEATURE_CONTROL_LOCKED (1<<0)
831 +-#define FEATURE_CONTROL_VMXON_ENABLED (1<<2)
832 ++#define FEATURE_CONTROL_LOCKED (1<<0)
833 ++#define FEATURE_CONTROL_VMXON_ENABLED_INSIDE_SMX (1<<1)
834 ++#define FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX (1<<2)
835 +
836 + #define MSR_IA32_APICBASE 0x0000001b
837 + #define MSR_IA32_APICBASE_BSP (1<<8)
838 +diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c
839 +index f854d89..29e5e6e 100644
840 +--- a/arch/x86/kernel/amd_iommu.c
841 ++++ b/arch/x86/kernel/amd_iommu.c
842 +@@ -1420,6 +1420,7 @@ static int __attach_device(struct device *dev,
843 + struct protection_domain *domain)
844 + {
845 + struct iommu_dev_data *dev_data, *alias_data;
846 ++ int ret;
847 +
848 + dev_data = get_dev_data(dev);
849 + alias_data = get_dev_data(dev_data->alias);
850 +@@ -1431,13 +1432,14 @@ static int __attach_device(struct device *dev,
851 + spin_lock(&domain->lock);
852 +
853 + /* Some sanity checks */
854 ++ ret = -EBUSY;
855 + if (alias_data->domain != NULL &&
856 + alias_data->domain != domain)
857 +- return -EBUSY;
858 ++ goto out_unlock;
859 +
860 + if (dev_data->domain != NULL &&
861 + dev_data->domain != domain)
862 +- return -EBUSY;
863 ++ goto out_unlock;
864 +
865 + /* Do real assignment */
866 + if (dev_data->alias != dev) {
867 +@@ -1453,10 +1455,14 @@ static int __attach_device(struct device *dev,
868 +
869 + atomic_inc(&dev_data->bind);
870 +
871 ++ ret = 0;
872 ++
873 ++out_unlock:
874 ++
875 + /* ready */
876 + spin_unlock(&domain->lock);
877 +
878 +- return 0;
879 ++ return ret;
880 + }
881 +
882 + /*
883 +@@ -2257,10 +2263,6 @@ int __init amd_iommu_init_dma_ops(void)
884 +
885 + iommu_detected = 1;
886 + swiotlb = 0;
887 +-#ifdef CONFIG_GART_IOMMU
888 +- gart_iommu_aperture_disabled = 1;
889 +- gart_iommu_aperture = 0;
890 +-#endif
891 +
892 + /* Make the driver finally visible to the drivers */
893 + dma_ops = &amd_iommu_dma_ops;
894 +diff --git a/arch/x86/kernel/amd_iommu_init.c b/arch/x86/kernel/amd_iommu_init.c
895 +index 6360abf..6f8ce75 100644
896 +--- a/arch/x86/kernel/amd_iommu_init.c
897 ++++ b/arch/x86/kernel/amd_iommu_init.c
898 +@@ -286,8 +286,12 @@ static u8 * __init iommu_map_mmio_space(u64 address)
899 + {
900 + u8 *ret;
901 +
902 +- if (!request_mem_region(address, MMIO_REGION_LENGTH, "amd_iommu"))
903 ++ if (!request_mem_region(address, MMIO_REGION_LENGTH, "amd_iommu")) {
904 ++ pr_err("AMD-Vi: Can not reserve memory region %llx for mmio\n",
905 ++ address);
906 ++ pr_err("AMD-Vi: This is a BIOS bug. Please contact your hardware vendor\n");
907 + return NULL;
908 ++ }
909 +
910 + ret = ioremap_nocache(address, MMIO_REGION_LENGTH);
911 + if (ret != NULL)
912 +@@ -1313,7 +1317,7 @@ static int __init amd_iommu_init(void)
913 + ret = amd_iommu_init_dma_ops();
914 +
915 + if (ret)
916 +- goto free;
917 ++ goto free_disable;
918 +
919 + amd_iommu_init_api();
920 +
921 +@@ -1331,9 +1335,10 @@ static int __init amd_iommu_init(void)
922 + out:
923 + return ret;
924 +
925 +-free:
926 ++free_disable:
927 + disable_iommus();
928 +
929 ++free:
930 + amd_iommu_uninit_devices();
931 +
932 + free_pages((unsigned long)amd_iommu_pd_alloc_bitmap,
933 +@@ -1352,6 +1357,15 @@ free:
934 +
935 + free_unity_maps();
936 +
937 ++#ifdef CONFIG_GART_IOMMU
938 ++ /*
939 ++ * We failed to initialize the AMD IOMMU - try fallback to GART
940 ++ * if possible.
941 ++ */
942 ++ gart_iommu_init();
943 ++
944 ++#endif
945 ++
946 + goto out;
947 + }
948 +
949 +diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
950 +index db5bdc8..c5e8b53 100644
951 +--- a/arch/x86/kernel/cpu/perf_event.c
952 ++++ b/arch/x86/kernel/cpu/perf_event.c
953 +@@ -460,8 +460,11 @@ static int __hw_perf_event_init(struct perf_event *event)
954 + if (atomic_read(&active_events) == 0) {
955 + if (!reserve_pmc_hardware())
956 + err = -EBUSY;
957 +- else
958 ++ else {
959 + err = reserve_bts_hardware();
960 ++ if (err)
961 ++ release_pmc_hardware();
962 ++ }
963 + }
964 + if (!err)
965 + atomic_inc(&active_events);
966 +diff --git a/arch/x86/kernel/pvclock.c b/arch/x86/kernel/pvclock.c
967 +index 03801f2..dfdfe46 100644
968 +--- a/arch/x86/kernel/pvclock.c
969 ++++ b/arch/x86/kernel/pvclock.c
970 +@@ -109,11 +109,14 @@ unsigned long pvclock_tsc_khz(struct pvclock_vcpu_time_info *src)
971 + return pv_tsc_khz;
972 + }
973 +
974 ++static atomic64_t last_value = ATOMIC64_INIT(0);
975 ++
976 + cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src)
977 + {
978 + struct pvclock_shadow_time shadow;
979 + unsigned version;
980 + cycle_t ret, offset;
981 ++ u64 last;
982 +
983 + do {
984 + version = pvclock_get_time_values(&shadow, src);
985 +@@ -123,6 +126,27 @@ cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src)
986 + barrier();
987 + } while (version != src->version);
988 +
989 ++ /*
990 ++ * Assumption here is that last_value, a global accumulator, always goes
991 ++ * forward. If we are less than that, we should not be much smaller.
992 ++ * We assume there is an error marging we're inside, and then the correction
993 ++ * does not sacrifice accuracy.
994 ++ *
995 ++ * For reads: global may have changed between test and return,
996 ++ * but this means someone else updated poked the clock at a later time.
997 ++ * We just need to make sure we are not seeing a backwards event.
998 ++ *
999 ++ * For updates: last_value = ret is not enough, since two vcpus could be
1000 ++ * updating at the same time, and one of them could be slightly behind,
1001 ++ * making the assumption that last_value always go forward fail to hold.
1002 ++ */
1003 ++ last = atomic64_read(&last_value);
1004 ++ do {
1005 ++ if (ret < last)
1006 ++ return last;
1007 ++ last = atomic64_cmpxchg(&last_value, last, ret);
1008 ++ } while (unlikely(last != ret));
1009 ++
1010 + return ret;
1011 + }
1012 +
1013 +diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
1014 +index c4851ef..47ae912 100644
1015 +--- a/arch/x86/kernel/setup.c
1016 ++++ b/arch/x86/kernel/setup.c
1017 +@@ -676,6 +676,17 @@ static struct dmi_system_id __initdata bad_bios_dmi_table[] = {
1018 + DMI_MATCH(DMI_BOARD_NAME, "DG45FC"),
1019 + },
1020 + },
1021 ++ /*
1022 ++ * The Dell Inspiron Mini 1012 has DMI_BIOS_VENDOR = "Dell Inc.", so
1023 ++ * match on the product name.
1024 ++ */
1025 ++ {
1026 ++ .callback = dmi_low_memory_corruption,
1027 ++ .ident = "Phoenix BIOS",
1028 ++ .matches = {
1029 ++ DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 1012"),
1030 ++ },
1031 ++ },
1032 + #endif
1033 + {}
1034 + };
1035 +diff --git a/arch/x86/kernel/tboot.c b/arch/x86/kernel/tboot.c
1036 +index 86c9f91..46b8277 100644
1037 +--- a/arch/x86/kernel/tboot.c
1038 ++++ b/arch/x86/kernel/tboot.c
1039 +@@ -46,6 +46,7 @@
1040 +
1041 + /* Global pointer to shared data; NULL means no measured launch. */
1042 + struct tboot *tboot __read_mostly;
1043 ++EXPORT_SYMBOL(tboot);
1044 +
1045 + /* timeout for APs (in secs) to enter wait-for-SIPI state during shutdown */
1046 + #define AP_WAIT_TIMEOUT 1
1047 +diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
1048 +index 19a8906..62fd8e6 100644
1049 +--- a/arch/x86/kvm/mmu.c
1050 ++++ b/arch/x86/kvm/mmu.c
1051 +@@ -223,7 +223,7 @@ void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask,
1052 + }
1053 + EXPORT_SYMBOL_GPL(kvm_mmu_set_mask_ptes);
1054 +
1055 +-static int is_write_protection(struct kvm_vcpu *vcpu)
1056 ++static bool is_write_protection(struct kvm_vcpu *vcpu)
1057 + {
1058 + return kvm_read_cr0_bits(vcpu, X86_CR0_WP);
1059 + }
1060 +@@ -2085,11 +2085,13 @@ static int mmu_alloc_roots(struct kvm_vcpu *vcpu)
1061 + direct = 1;
1062 + if (mmu_check_root(vcpu, root_gfn))
1063 + return 1;
1064 ++ spin_lock(&vcpu->kvm->mmu_lock);
1065 + sp = kvm_mmu_get_page(vcpu, root_gfn, 0,
1066 + PT64_ROOT_LEVEL, direct,
1067 + ACC_ALL, NULL);
1068 + root = __pa(sp->spt);
1069 + ++sp->root_count;
1070 ++ spin_unlock(&vcpu->kvm->mmu_lock);
1071 + vcpu->arch.mmu.root_hpa = root;
1072 + return 0;
1073 + }
1074 +@@ -2111,11 +2113,14 @@ static int mmu_alloc_roots(struct kvm_vcpu *vcpu)
1075 + root_gfn = 0;
1076 + if (mmu_check_root(vcpu, root_gfn))
1077 + return 1;
1078 ++ spin_lock(&vcpu->kvm->mmu_lock);
1079 + sp = kvm_mmu_get_page(vcpu, root_gfn, i << 30,
1080 + PT32_ROOT_LEVEL, direct,
1081 + ACC_ALL, NULL);
1082 + root = __pa(sp->spt);
1083 + ++sp->root_count;
1084 ++ spin_unlock(&vcpu->kvm->mmu_lock);
1085 ++
1086 + vcpu->arch.mmu.pae_root[i] = root | PT_PRESENT_MASK;
1087 + }
1088 + vcpu->arch.mmu.root_hpa = __pa(vcpu->arch.mmu.pae_root);
1089 +@@ -2439,6 +2444,7 @@ static int init_kvm_softmmu(struct kvm_vcpu *vcpu)
1090 + r = paging32_init_context(vcpu);
1091 +
1092 + vcpu->arch.mmu.base_role.glevels = vcpu->arch.mmu.root_level;
1093 ++ vcpu->arch.mmu.base_role.cr0_wp = is_write_protection(vcpu);
1094 +
1095 + return r;
1096 + }
1097 +@@ -2478,7 +2484,9 @@ int kvm_mmu_load(struct kvm_vcpu *vcpu)
1098 + goto out;
1099 + spin_lock(&vcpu->kvm->mmu_lock);
1100 + kvm_mmu_free_some_pages(vcpu);
1101 ++ spin_unlock(&vcpu->kvm->mmu_lock);
1102 + r = mmu_alloc_roots(vcpu);
1103 ++ spin_lock(&vcpu->kvm->mmu_lock);
1104 + mmu_sync_roots(vcpu);
1105 + spin_unlock(&vcpu->kvm->mmu_lock);
1106 + if (r)
1107 +diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
1108 +index 737361f..1185b55 100644
1109 +--- a/arch/x86/kvm/svm.c
1110 ++++ b/arch/x86/kvm/svm.c
1111 +@@ -129,6 +129,7 @@ static void svm_flush_tlb(struct kvm_vcpu *vcpu);
1112 + static void svm_complete_interrupts(struct vcpu_svm *svm);
1113 +
1114 + static int nested_svm_exit_handled(struct vcpu_svm *svm);
1115 ++static int nested_svm_intercept(struct vcpu_svm *svm);
1116 + static int nested_svm_vmexit(struct vcpu_svm *svm);
1117 + static int nested_svm_check_exception(struct vcpu_svm *svm, unsigned nr,
1118 + bool has_error_code, u32 error_code);
1119 +@@ -1384,6 +1385,8 @@ static int nested_svm_check_permissions(struct vcpu_svm *svm)
1120 + static int nested_svm_check_exception(struct vcpu_svm *svm, unsigned nr,
1121 + bool has_error_code, u32 error_code)
1122 + {
1123 ++ int vmexit;
1124 ++
1125 + if (!is_nested(svm))
1126 + return 0;
1127 +
1128 +@@ -1392,19 +1395,24 @@ static int nested_svm_check_exception(struct vcpu_svm *svm, unsigned nr,
1129 + svm->vmcb->control.exit_info_1 = error_code;
1130 + svm->vmcb->control.exit_info_2 = svm->vcpu.arch.cr2;
1131 +
1132 +- return nested_svm_exit_handled(svm);
1133 ++ vmexit = nested_svm_intercept(svm);
1134 ++ if (vmexit == NESTED_EXIT_DONE)
1135 ++ svm->nested.exit_required = true;
1136 ++
1137 ++ return vmexit;
1138 + }
1139 +
1140 +-static inline int nested_svm_intr(struct vcpu_svm *svm)
1141 ++/* This function returns true if it is save to enable the irq window */
1142 ++static inline bool nested_svm_intr(struct vcpu_svm *svm)
1143 + {
1144 + if (!is_nested(svm))
1145 +- return 0;
1146 ++ return true;
1147 +
1148 + if (!(svm->vcpu.arch.hflags & HF_VINTR_MASK))
1149 +- return 0;
1150 ++ return true;
1151 +
1152 + if (!(svm->vcpu.arch.hflags & HF_HIF_MASK))
1153 +- return 0;
1154 ++ return false;
1155 +
1156 + svm->vmcb->control.exit_code = SVM_EXIT_INTR;
1157 +
1158 +@@ -1417,13 +1425,13 @@ static inline int nested_svm_intr(struct vcpu_svm *svm)
1159 + */
1160 + svm->nested.exit_required = true;
1161 + trace_kvm_nested_intr_vmexit(svm->vmcb->save.rip);
1162 +- return 1;
1163 ++ return false;
1164 + }
1165 +
1166 +- return 0;
1167 ++ return true;
1168 + }
1169 +
1170 +-static void *nested_svm_map(struct vcpu_svm *svm, u64 gpa, enum km_type idx)
1171 ++static void *nested_svm_map(struct vcpu_svm *svm, u64 gpa, struct page **_page)
1172 + {
1173 + struct page *page;
1174 +
1175 +@@ -1431,7 +1439,9 @@ static void *nested_svm_map(struct vcpu_svm *svm, u64 gpa, enum km_type idx)
1176 + if (is_error_page(page))
1177 + goto error;
1178 +
1179 +- return kmap_atomic(page, idx);
1180 ++ *_page = page;
1181 ++
1182 ++ return kmap(page);
1183 +
1184 + error:
1185 + kvm_release_page_clean(page);
1186 +@@ -1440,16 +1450,9 @@ error:
1187 + return NULL;
1188 + }
1189 +
1190 +-static void nested_svm_unmap(void *addr, enum km_type idx)
1191 ++static void nested_svm_unmap(struct page *page)
1192 + {
1193 +- struct page *page;
1194 +-
1195 +- if (!addr)
1196 +- return;
1197 +-
1198 +- page = kmap_atomic_to_page(addr);
1199 +-
1200 +- kunmap_atomic(addr, idx);
1201 ++ kunmap(page);
1202 + kvm_release_page_dirty(page);
1203 + }
1204 +
1205 +@@ -1459,16 +1462,11 @@ static bool nested_svm_exit_handled_msr(struct vcpu_svm *svm)
1206 + u32 msr = svm->vcpu.arch.regs[VCPU_REGS_RCX];
1207 + bool ret = false;
1208 + u32 t0, t1;
1209 +- u8 *msrpm;
1210 ++ u8 val;
1211 +
1212 + if (!(svm->nested.intercept & (1ULL << INTERCEPT_MSR_PROT)))
1213 + return false;
1214 +
1215 +- msrpm = nested_svm_map(svm, svm->nested.vmcb_msrpm, KM_USER0);
1216 +-
1217 +- if (!msrpm)
1218 +- goto out;
1219 +-
1220 + switch (msr) {
1221 + case 0 ... 0x1fff:
1222 + t0 = (msr * 2) % 8;
1223 +@@ -1489,11 +1487,10 @@ static bool nested_svm_exit_handled_msr(struct vcpu_svm *svm)
1224 + goto out;
1225 + }
1226 +
1227 +- ret = msrpm[t1] & ((1 << param) << t0);
1228 ++ if (!kvm_read_guest(svm->vcpu.kvm, svm->nested.vmcb_msrpm + t1, &val, 1))
1229 ++ ret = val & ((1 << param) << t0);
1230 +
1231 + out:
1232 +- nested_svm_unmap(msrpm, KM_USER0);
1233 +-
1234 + return ret;
1235 + }
1236 +
1237 +@@ -1525,7 +1522,7 @@ static int nested_svm_exit_special(struct vcpu_svm *svm)
1238 + /*
1239 + * If this function returns true, this #vmexit was already handled
1240 + */
1241 +-static int nested_svm_exit_handled(struct vcpu_svm *svm)
1242 ++static int nested_svm_intercept(struct vcpu_svm *svm)
1243 + {
1244 + u32 exit_code = svm->vmcb->control.exit_code;
1245 + int vmexit = NESTED_EXIT_HOST;
1246 +@@ -1571,9 +1568,17 @@ static int nested_svm_exit_handled(struct vcpu_svm *svm)
1247 + }
1248 + }
1249 +
1250 +- if (vmexit == NESTED_EXIT_DONE) {
1251 ++ return vmexit;
1252 ++}
1253 ++
1254 ++static int nested_svm_exit_handled(struct vcpu_svm *svm)
1255 ++{
1256 ++ int vmexit;
1257 ++
1258 ++ vmexit = nested_svm_intercept(svm);
1259 ++
1260 ++ if (vmexit == NESTED_EXIT_DONE)
1261 + nested_svm_vmexit(svm);
1262 +- }
1263 +
1264 + return vmexit;
1265 + }
1266 +@@ -1615,6 +1620,7 @@ static int nested_svm_vmexit(struct vcpu_svm *svm)
1267 + struct vmcb *nested_vmcb;
1268 + struct vmcb *hsave = svm->nested.hsave;
1269 + struct vmcb *vmcb = svm->vmcb;
1270 ++ struct page *page;
1271 +
1272 + trace_kvm_nested_vmexit_inject(vmcb->control.exit_code,
1273 + vmcb->control.exit_info_1,
1274 +@@ -1622,7 +1628,7 @@ static int nested_svm_vmexit(struct vcpu_svm *svm)
1275 + vmcb->control.exit_int_info,
1276 + vmcb->control.exit_int_info_err);
1277 +
1278 +- nested_vmcb = nested_svm_map(svm, svm->nested.vmcb, KM_USER0);
1279 ++ nested_vmcb = nested_svm_map(svm, svm->nested.vmcb, &page);
1280 + if (!nested_vmcb)
1281 + return 1;
1282 +
1283 +@@ -1635,9 +1641,13 @@ static int nested_svm_vmexit(struct vcpu_svm *svm)
1284 + nested_vmcb->save.ds = vmcb->save.ds;
1285 + nested_vmcb->save.gdtr = vmcb->save.gdtr;
1286 + nested_vmcb->save.idtr = vmcb->save.idtr;
1287 ++ nested_vmcb->save.cr0 = kvm_read_cr0(&svm->vcpu);
1288 + if (npt_enabled)
1289 + nested_vmcb->save.cr3 = vmcb->save.cr3;
1290 ++ else
1291 ++ nested_vmcb->save.cr3 = svm->vcpu.arch.cr3;
1292 + nested_vmcb->save.cr2 = vmcb->save.cr2;
1293 ++ nested_vmcb->save.cr4 = svm->vcpu.arch.cr4;
1294 + nested_vmcb->save.rflags = vmcb->save.rflags;
1295 + nested_vmcb->save.rip = vmcb->save.rip;
1296 + nested_vmcb->save.rsp = vmcb->save.rsp;
1297 +@@ -1712,7 +1722,7 @@ static int nested_svm_vmexit(struct vcpu_svm *svm)
1298 + /* Exit nested SVM mode */
1299 + svm->nested.vmcb = 0;
1300 +
1301 +- nested_svm_unmap(nested_vmcb, KM_USER0);
1302 ++ nested_svm_unmap(page);
1303 +
1304 + kvm_mmu_reset_context(&svm->vcpu);
1305 + kvm_mmu_load(&svm->vcpu);
1306 +@@ -1723,9 +1733,10 @@ static int nested_svm_vmexit(struct vcpu_svm *svm)
1307 + static bool nested_svm_vmrun_msrpm(struct vcpu_svm *svm)
1308 + {
1309 + u32 *nested_msrpm;
1310 ++ struct page *page;
1311 + int i;
1312 +
1313 +- nested_msrpm = nested_svm_map(svm, svm->nested.vmcb_msrpm, KM_USER0);
1314 ++ nested_msrpm = nested_svm_map(svm, svm->nested.vmcb_msrpm, &page);
1315 + if (!nested_msrpm)
1316 + return false;
1317 +
1318 +@@ -1734,7 +1745,7 @@ static bool nested_svm_vmrun_msrpm(struct vcpu_svm *svm)
1319 +
1320 + svm->vmcb->control.msrpm_base_pa = __pa(svm->nested.msrpm);
1321 +
1322 +- nested_svm_unmap(nested_msrpm, KM_USER0);
1323 ++ nested_svm_unmap(page);
1324 +
1325 + return true;
1326 + }
1327 +@@ -1744,8 +1755,9 @@ static bool nested_svm_vmrun(struct vcpu_svm *svm)
1328 + struct vmcb *nested_vmcb;
1329 + struct vmcb *hsave = svm->nested.hsave;
1330 + struct vmcb *vmcb = svm->vmcb;
1331 ++ struct page *page;
1332 +
1333 +- nested_vmcb = nested_svm_map(svm, svm->vmcb->save.rax, KM_USER0);
1334 ++ nested_vmcb = nested_svm_map(svm, svm->vmcb->save.rax, &page);
1335 + if (!nested_vmcb)
1336 + return false;
1337 +
1338 +@@ -1819,21 +1831,6 @@ static bool nested_svm_vmrun(struct vcpu_svm *svm)
1339 + svm->vmcb->save.dr6 = nested_vmcb->save.dr6;
1340 + svm->vmcb->save.cpl = nested_vmcb->save.cpl;
1341 +
1342 +- /* We don't want a nested guest to be more powerful than the guest,
1343 +- so all intercepts are ORed */
1344 +- svm->vmcb->control.intercept_cr_read |=
1345 +- nested_vmcb->control.intercept_cr_read;
1346 +- svm->vmcb->control.intercept_cr_write |=
1347 +- nested_vmcb->control.intercept_cr_write;
1348 +- svm->vmcb->control.intercept_dr_read |=
1349 +- nested_vmcb->control.intercept_dr_read;
1350 +- svm->vmcb->control.intercept_dr_write |=
1351 +- nested_vmcb->control.intercept_dr_write;
1352 +- svm->vmcb->control.intercept_exceptions |=
1353 +- nested_vmcb->control.intercept_exceptions;
1354 +-
1355 +- svm->vmcb->control.intercept |= nested_vmcb->control.intercept;
1356 +-
1357 + svm->nested.vmcb_msrpm = nested_vmcb->control.msrpm_base_pa;
1358 +
1359 + /* cache intercepts */
1360 +@@ -1851,13 +1848,38 @@ static bool nested_svm_vmrun(struct vcpu_svm *svm)
1361 + else
1362 + svm->vcpu.arch.hflags &= ~HF_VINTR_MASK;
1363 +
1364 ++ if (svm->vcpu.arch.hflags & HF_VINTR_MASK) {
1365 ++ /* We only want the cr8 intercept bits of the guest */
1366 ++ svm->vmcb->control.intercept_cr_read &= ~INTERCEPT_CR8_MASK;
1367 ++ svm->vmcb->control.intercept_cr_write &= ~INTERCEPT_CR8_MASK;
1368 ++ }
1369 ++
1370 ++ /* We don't want to see VMMCALLs from a nested guest */
1371 ++ svm->vmcb->control.intercept &= ~(1ULL << INTERCEPT_VMMCALL);
1372 ++
1373 ++ /* We don't want a nested guest to be more powerful than the guest,
1374 ++ so all intercepts are ORed */
1375 ++ svm->vmcb->control.intercept_cr_read |=
1376 ++ nested_vmcb->control.intercept_cr_read;
1377 ++ svm->vmcb->control.intercept_cr_write |=
1378 ++ nested_vmcb->control.intercept_cr_write;
1379 ++ svm->vmcb->control.intercept_dr_read |=
1380 ++ nested_vmcb->control.intercept_dr_read;
1381 ++ svm->vmcb->control.intercept_dr_write |=
1382 ++ nested_vmcb->control.intercept_dr_write;
1383 ++ svm->vmcb->control.intercept_exceptions |=
1384 ++ nested_vmcb->control.intercept_exceptions;
1385 ++
1386 ++ svm->vmcb->control.intercept |= nested_vmcb->control.intercept;
1387 ++
1388 ++ svm->vmcb->control.lbr_ctl = nested_vmcb->control.lbr_ctl;
1389 + svm->vmcb->control.int_vector = nested_vmcb->control.int_vector;
1390 + svm->vmcb->control.int_state = nested_vmcb->control.int_state;
1391 + svm->vmcb->control.tsc_offset += nested_vmcb->control.tsc_offset;
1392 + svm->vmcb->control.event_inj = nested_vmcb->control.event_inj;
1393 + svm->vmcb->control.event_inj_err = nested_vmcb->control.event_inj_err;
1394 +
1395 +- nested_svm_unmap(nested_vmcb, KM_USER0);
1396 ++ nested_svm_unmap(page);
1397 +
1398 + enable_gif(svm);
1399 +
1400 +@@ -1883,6 +1905,7 @@ static void nested_svm_vmloadsave(struct vmcb *from_vmcb, struct vmcb *to_vmcb)
1401 + static int vmload_interception(struct vcpu_svm *svm)
1402 + {
1403 + struct vmcb *nested_vmcb;
1404 ++ struct page *page;
1405 +
1406 + if (nested_svm_check_permissions(svm))
1407 + return 1;
1408 +@@ -1890,12 +1913,12 @@ static int vmload_interception(struct vcpu_svm *svm)
1409 + svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
1410 + skip_emulated_instruction(&svm->vcpu);
1411 +
1412 +- nested_vmcb = nested_svm_map(svm, svm->vmcb->save.rax, KM_USER0);
1413 ++ nested_vmcb = nested_svm_map(svm, svm->vmcb->save.rax, &page);
1414 + if (!nested_vmcb)
1415 + return 1;
1416 +
1417 + nested_svm_vmloadsave(nested_vmcb, svm->vmcb);
1418 +- nested_svm_unmap(nested_vmcb, KM_USER0);
1419 ++ nested_svm_unmap(page);
1420 +
1421 + return 1;
1422 + }
1423 +@@ -1903,6 +1926,7 @@ static int vmload_interception(struct vcpu_svm *svm)
1424 + static int vmsave_interception(struct vcpu_svm *svm)
1425 + {
1426 + struct vmcb *nested_vmcb;
1427 ++ struct page *page;
1428 +
1429 + if (nested_svm_check_permissions(svm))
1430 + return 1;
1431 +@@ -1910,12 +1934,12 @@ static int vmsave_interception(struct vcpu_svm *svm)
1432 + svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
1433 + skip_emulated_instruction(&svm->vcpu);
1434 +
1435 +- nested_vmcb = nested_svm_map(svm, svm->vmcb->save.rax, KM_USER0);
1436 ++ nested_vmcb = nested_svm_map(svm, svm->vmcb->save.rax, &page);
1437 + if (!nested_vmcb)
1438 + return 1;
1439 +
1440 + nested_svm_vmloadsave(svm->vmcb, nested_vmcb);
1441 +- nested_svm_unmap(nested_vmcb, KM_USER0);
1442 ++ nested_svm_unmap(page);
1443 +
1444 + return 1;
1445 + }
1446 +@@ -2511,6 +2535,9 @@ static void update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr)
1447 + {
1448 + struct vcpu_svm *svm = to_svm(vcpu);
1449 +
1450 ++ if (is_nested(svm) && (vcpu->arch.hflags & HF_VINTR_MASK))
1451 ++ return;
1452 ++
1453 + if (irr == -1)
1454 + return;
1455 +
1456 +@@ -2568,13 +2595,11 @@ static void enable_irq_window(struct kvm_vcpu *vcpu)
1457 + {
1458 + struct vcpu_svm *svm = to_svm(vcpu);
1459 +
1460 +- nested_svm_intr(svm);
1461 +-
1462 + /* In case GIF=0 we can't rely on the CPU to tell us when
1463 + * GIF becomes 1, because that's a separate STGI/VMRUN intercept.
1464 + * The next time we get that intercept, this function will be
1465 + * called again though and we'll get the vintr intercept. */
1466 +- if (gif_set(svm)) {
1467 ++ if (gif_set(svm) && nested_svm_intr(svm)) {
1468 + svm_set_vintr(svm);
1469 + svm_inject_irq(svm, 0x0);
1470 + }
1471 +@@ -2614,6 +2639,9 @@ static inline void sync_cr8_to_lapic(struct kvm_vcpu *vcpu)
1472 + {
1473 + struct vcpu_svm *svm = to_svm(vcpu);
1474 +
1475 ++ if (is_nested(svm) && (vcpu->arch.hflags & HF_VINTR_MASK))
1476 ++ return;
1477 ++
1478 + if (!(svm->vmcb->control.intercept_cr_write & INTERCEPT_CR8_MASK)) {
1479 + int cr8 = svm->vmcb->control.int_ctl & V_TPR_MASK;
1480 + kvm_set_cr8(vcpu, cr8);
1481 +@@ -2625,6 +2653,9 @@ static inline void sync_lapic_to_cr8(struct kvm_vcpu *vcpu)
1482 + struct vcpu_svm *svm = to_svm(vcpu);
1483 + u64 cr8;
1484 +
1485 ++ if (is_nested(svm) && (vcpu->arch.hflags & HF_VINTR_MASK))
1486 ++ return;
1487 ++
1488 + cr8 = kvm_get_cr8(vcpu);
1489 + svm->vmcb->control.int_ctl &= ~V_TPR_MASK;
1490 + svm->vmcb->control.int_ctl |= cr8 & V_TPR_MASK;
1491 +@@ -2879,6 +2910,20 @@ static void svm_cpuid_update(struct kvm_vcpu *vcpu)
1492 + {
1493 + }
1494 +
1495 ++static void svm_set_supported_cpuid(u32 func, struct kvm_cpuid_entry2 *entry)
1496 ++{
1497 ++ switch (func) {
1498 ++ case 0x8000000A:
1499 ++ entry->eax = 1; /* SVM revision 1 */
1500 ++ entry->ebx = 8; /* Lets support 8 ASIDs in case we add proper
1501 ++ ASID emulation to nested SVM */
1502 ++ entry->ecx = 0; /* Reserved */
1503 ++ entry->edx = 0; /* Do not support any additional features */
1504 ++
1505 ++ break;
1506 ++ }
1507 ++}
1508 ++
1509 + static const struct trace_print_flags svm_exit_reasons_str[] = {
1510 + { SVM_EXIT_READ_CR0, "read_cr0" },
1511 + { SVM_EXIT_READ_CR3, "read_cr3" },
1512 +@@ -3023,6 +3068,8 @@ static struct kvm_x86_ops svm_x86_ops = {
1513 + .cpuid_update = svm_cpuid_update,
1514 +
1515 + .rdtscp_supported = svm_rdtscp_supported,
1516 ++
1517 ++ .set_supported_cpuid = svm_set_supported_cpuid,
1518 + };
1519 +
1520 + static int __init svm_init(void)
1521 +diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
1522 +index 2f8db0e..3c86c42 100644
1523 +--- a/arch/x86/kvm/vmx.c
1524 ++++ b/arch/x86/kvm/vmx.c
1525 +@@ -27,6 +27,7 @@
1526 + #include <linux/moduleparam.h>
1527 + #include <linux/ftrace_event.h>
1528 + #include <linux/slab.h>
1529 ++#include <linux/tboot.h>
1530 + #include "kvm_cache_regs.h"
1531 + #include "x86.h"
1532 +
1533 +@@ -1176,9 +1177,16 @@ static __init int vmx_disabled_by_bios(void)
1534 + u64 msr;
1535 +
1536 + rdmsrl(MSR_IA32_FEATURE_CONTROL, msr);
1537 +- return (msr & (FEATURE_CONTROL_LOCKED |
1538 +- FEATURE_CONTROL_VMXON_ENABLED))
1539 +- == FEATURE_CONTROL_LOCKED;
1540 ++ if (msr & FEATURE_CONTROL_LOCKED) {
1541 ++ if (!(msr & FEATURE_CONTROL_VMXON_ENABLED_INSIDE_SMX)
1542 ++ && tboot_enabled())
1543 ++ return 1;
1544 ++ if (!(msr & FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX)
1545 ++ && !tboot_enabled())
1546 ++ return 1;
1547 ++ }
1548 ++
1549 ++ return 0;
1550 + /* locked but not enabled */
1551 + }
1552 +
1553 +@@ -1186,21 +1194,23 @@ static int hardware_enable(void *garbage)
1554 + {
1555 + int cpu = raw_smp_processor_id();
1556 + u64 phys_addr = __pa(per_cpu(vmxarea, cpu));
1557 +- u64 old;
1558 ++ u64 old, test_bits;
1559 +
1560 + if (read_cr4() & X86_CR4_VMXE)
1561 + return -EBUSY;
1562 +
1563 + INIT_LIST_HEAD(&per_cpu(vcpus_on_cpu, cpu));
1564 + rdmsrl(MSR_IA32_FEATURE_CONTROL, old);
1565 +- if ((old & (FEATURE_CONTROL_LOCKED |
1566 +- FEATURE_CONTROL_VMXON_ENABLED))
1567 +- != (FEATURE_CONTROL_LOCKED |
1568 +- FEATURE_CONTROL_VMXON_ENABLED))
1569 ++
1570 ++ test_bits = FEATURE_CONTROL_LOCKED;
1571 ++ test_bits |= FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX;
1572 ++ if (tboot_enabled())
1573 ++ test_bits |= FEATURE_CONTROL_VMXON_ENABLED_INSIDE_SMX;
1574 ++
1575 ++ if ((old & test_bits) != test_bits) {
1576 + /* enable and lock */
1577 +- wrmsrl(MSR_IA32_FEATURE_CONTROL, old |
1578 +- FEATURE_CONTROL_LOCKED |
1579 +- FEATURE_CONTROL_VMXON_ENABLED);
1580 ++ wrmsrl(MSR_IA32_FEATURE_CONTROL, old | test_bits);
1581 ++ }
1582 + write_cr4(read_cr4() | X86_CR4_VMXE); /* FIXME: not cpu hotplug safe */
1583 + asm volatile (ASM_VMX_VMXON_RAX
1584 + : : "a"(&phys_addr), "m"(phys_addr)
1585 +@@ -4115,6 +4125,10 @@ static void vmx_cpuid_update(struct kvm_vcpu *vcpu)
1586 + }
1587 + }
1588 +
1589 ++static void vmx_set_supported_cpuid(u32 func, struct kvm_cpuid_entry2 *entry)
1590 ++{
1591 ++}
1592 ++
1593 + static struct kvm_x86_ops vmx_x86_ops = {
1594 + .cpu_has_kvm_support = cpu_has_kvm_support,
1595 + .disabled_by_bios = vmx_disabled_by_bios,
1596 +@@ -4186,6 +4200,8 @@ static struct kvm_x86_ops vmx_x86_ops = {
1597 + .cpuid_update = vmx_cpuid_update,
1598 +
1599 + .rdtscp_supported = vmx_rdtscp_supported,
1600 ++
1601 ++ .set_supported_cpuid = vmx_set_supported_cpuid,
1602 + };
1603 +
1604 + static int __init vmx_init(void)
1605 +diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
1606 +index c4f35b5..a6517a2 100644
1607 +--- a/arch/x86/kvm/x86.c
1608 ++++ b/arch/x86/kvm/x86.c
1609 +@@ -484,7 +484,7 @@ EXPORT_SYMBOL_GPL(kvm_set_cr0);
1610 +
1611 + void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw)
1612 + {
1613 +- kvm_set_cr0(vcpu, kvm_read_cr0_bits(vcpu, ~0x0ful) | (msw & 0x0f));
1614 ++ kvm_set_cr0(vcpu, kvm_read_cr0_bits(vcpu, ~0x0eul) | (msw & 0x0f));
1615 + }
1616 + EXPORT_SYMBOL_GPL(kvm_lmsw);
1617 +
1618 +@@ -624,48 +624,42 @@ static u32 emulated_msrs[] = {
1619 + MSR_IA32_MISC_ENABLE,
1620 + };
1621 +
1622 +-static void set_efer(struct kvm_vcpu *vcpu, u64 efer)
1623 ++static int set_efer(struct kvm_vcpu *vcpu, u64 efer)
1624 + {
1625 +- if (efer & efer_reserved_bits) {
1626 +- kvm_inject_gp(vcpu, 0);
1627 +- return;
1628 +- }
1629 ++ if (efer & efer_reserved_bits)
1630 ++ return 1;
1631 +
1632 + if (is_paging(vcpu)
1633 +- && (vcpu->arch.efer & EFER_LME) != (efer & EFER_LME)) {
1634 +- kvm_inject_gp(vcpu, 0);
1635 +- return;
1636 +- }
1637 ++ && (vcpu->arch.efer & EFER_LME) != (efer & EFER_LME))
1638 ++ return 1;
1639 +
1640 + if (efer & EFER_FFXSR) {
1641 + struct kvm_cpuid_entry2 *feat;
1642 +
1643 + feat = kvm_find_cpuid_entry(vcpu, 0x80000001, 0);
1644 +- if (!feat || !(feat->edx & bit(X86_FEATURE_FXSR_OPT))) {
1645 +- kvm_inject_gp(vcpu, 0);
1646 +- return;
1647 +- }
1648 ++ if (!feat || !(feat->edx & bit(X86_FEATURE_FXSR_OPT)))
1649 ++ return 1;
1650 + }
1651 +
1652 + if (efer & EFER_SVME) {
1653 + struct kvm_cpuid_entry2 *feat;
1654 +
1655 + feat = kvm_find_cpuid_entry(vcpu, 0x80000001, 0);
1656 +- if (!feat || !(feat->ecx & bit(X86_FEATURE_SVM))) {
1657 +- kvm_inject_gp(vcpu, 0);
1658 +- return;
1659 +- }
1660 ++ if (!feat || !(feat->ecx & bit(X86_FEATURE_SVM)))
1661 ++ return 1;
1662 + }
1663 +
1664 +- kvm_x86_ops->set_efer(vcpu, efer);
1665 +-
1666 + efer &= ~EFER_LMA;
1667 + efer |= vcpu->arch.efer & EFER_LMA;
1668 +
1669 ++ kvm_x86_ops->set_efer(vcpu, efer);
1670 ++
1671 + vcpu->arch.efer = efer;
1672 +
1673 + vcpu->arch.mmu.base_role.nxe = (efer & EFER_NX) && !tdp_enabled;
1674 + kvm_mmu_reset_context(vcpu);
1675 ++
1676 ++ return 0;
1677 + }
1678 +
1679 + void kvm_enable_efer_bits(u64 mask)
1680 +@@ -695,14 +689,22 @@ static int do_set_msr(struct kvm_vcpu *vcpu, unsigned index, u64 *data)
1681 +
1682 + static void kvm_write_wall_clock(struct kvm *kvm, gpa_t wall_clock)
1683 + {
1684 +- static int version;
1685 ++ int version;
1686 ++ int r;
1687 + struct pvclock_wall_clock wc;
1688 + struct timespec boot;
1689 +
1690 + if (!wall_clock)
1691 + return;
1692 +
1693 +- version++;
1694 ++ r = kvm_read_guest(kvm, wall_clock, &version, sizeof(version));
1695 ++ if (r)
1696 ++ return;
1697 ++
1698 ++ if (version & 1)
1699 ++ ++version; /* first time write, random junk */
1700 ++
1701 ++ ++version;
1702 +
1703 + kvm_write_guest(kvm, wall_clock, &version, sizeof(version));
1704 +
1705 +@@ -1086,8 +1088,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data)
1706 + {
1707 + switch (msr) {
1708 + case MSR_EFER:
1709 +- set_efer(vcpu, data);
1710 +- break;
1711 ++ return set_efer(vcpu, data);
1712 + case MSR_K7_HWCR:
1713 + data &= ~(u64)0x40; /* ignore flush filter disable */
1714 + if (data != 0) {
1715 +@@ -1768,6 +1769,7 @@ static int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
1716 + {
1717 + int r;
1718 +
1719 ++ vcpu_load(vcpu);
1720 + r = -E2BIG;
1721 + if (cpuid->nent < vcpu->arch.cpuid_nent)
1722 + goto out;
1723 +@@ -1779,6 +1781,7 @@ static int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
1724 +
1725 + out:
1726 + cpuid->nent = vcpu->arch.cpuid_nent;
1727 ++ vcpu_put(vcpu);
1728 + return r;
1729 + }
1730 +
1731 +@@ -1917,6 +1920,9 @@ static void do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
1732 + entry->ecx &= kvm_supported_word6_x86_features;
1733 + break;
1734 + }
1735 ++
1736 ++ kvm_x86_ops->set_supported_cpuid(function, entry);
1737 ++
1738 + put_cpu();
1739 + }
1740 +
1741 +@@ -2031,6 +2037,7 @@ static int kvm_vcpu_ioctl_x86_setup_mce(struct kvm_vcpu *vcpu,
1742 + int r;
1743 + unsigned bank_num = mcg_cap & 0xff, bank;
1744 +
1745 ++ vcpu_load(vcpu);
1746 + r = -EINVAL;
1747 + if (!bank_num || bank_num >= KVM_MAX_MCE_BANKS)
1748 + goto out;
1749 +@@ -2045,6 +2052,7 @@ static int kvm_vcpu_ioctl_x86_setup_mce(struct kvm_vcpu *vcpu,
1750 + for (bank = 0; bank < bank_num; bank++)
1751 + vcpu->arch.mce_banks[bank*4] = ~(u64)0;
1752 + out:
1753 ++ vcpu_put(vcpu);
1754 + return r;
1755 + }
1756 +
1757 +@@ -2312,7 +2320,9 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
1758 + r = -EFAULT;
1759 + if (copy_from_user(&mce, argp, sizeof mce))
1760 + goto out;
1761 ++ vcpu_load(vcpu);
1762 + r = kvm_vcpu_ioctl_x86_set_mce(vcpu, &mce);
1763 ++ vcpu_put(vcpu);
1764 + break;
1765 + }
1766 + case KVM_GET_VCPU_EVENTS: {
1767 +diff --git a/arch/x86/oprofile/nmi_int.c b/arch/x86/oprofile/nmi_int.c
1768 +index 2c505ee..f1fb411 100644
1769 +--- a/arch/x86/oprofile/nmi_int.c
1770 ++++ b/arch/x86/oprofile/nmi_int.c
1771 +@@ -95,7 +95,10 @@ static void nmi_cpu_save_registers(struct op_msrs *msrs)
1772 + static void nmi_cpu_start(void *dummy)
1773 + {
1774 + struct op_msrs const *msrs = &__get_cpu_var(cpu_msrs);
1775 +- model->start(msrs);
1776 ++ if (!msrs->controls)
1777 ++ WARN_ON_ONCE(1);
1778 ++ else
1779 ++ model->start(msrs);
1780 + }
1781 +
1782 + static int nmi_start(void)
1783 +@@ -107,7 +110,10 @@ static int nmi_start(void)
1784 + static void nmi_cpu_stop(void *dummy)
1785 + {
1786 + struct op_msrs const *msrs = &__get_cpu_var(cpu_msrs);
1787 +- model->stop(msrs);
1788 ++ if (!msrs->controls)
1789 ++ WARN_ON_ONCE(1);
1790 ++ else
1791 ++ model->stop(msrs);
1792 + }
1793 +
1794 + static void nmi_stop(void)
1795 +diff --git a/arch/x86/xen/suspend.c b/arch/x86/xen/suspend.c
1796 +index 987267f..a9c6611 100644
1797 +--- a/arch/x86/xen/suspend.c
1798 ++++ b/arch/x86/xen/suspend.c
1799 +@@ -60,6 +60,6 @@ static void xen_vcpu_notify_restore(void *data)
1800 +
1801 + void xen_arch_resume(void)
1802 + {
1803 +- smp_call_function(xen_vcpu_notify_restore,
1804 +- (void *)CLOCK_EVT_NOTIFY_RESUME, 1);
1805 ++ on_each_cpu(xen_vcpu_notify_restore,
1806 ++ (void *)CLOCK_EVT_NOTIFY_RESUME, 1);
1807 + }
1808 +diff --git a/arch/xtensa/include/asm/cache.h b/arch/xtensa/include/asm/cache.h
1809 +index f04c989..ed8cd3c 100644
1810 +--- a/arch/xtensa/include/asm/cache.h
1811 ++++ b/arch/xtensa/include/asm/cache.h
1812 +@@ -29,5 +29,6 @@
1813 + # define CACHE_WAY_SIZE ICACHE_WAY_SIZE
1814 + #endif
1815 +
1816 ++#define ARCH_KMALLOC_MINALIGN L1_CACHE_BYTES
1817 +
1818 + #endif /* _XTENSA_CACHE_H */
1819 +diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
1820 +index 5f127cf..002c0ce 100644
1821 +--- a/block/cfq-iosched.c
1822 ++++ b/block/cfq-iosched.c
1823 +@@ -2503,15 +2503,10 @@ static void cfq_free_io_context(struct io_context *ioc)
1824 + __call_for_each_cic(ioc, cic_free_func);
1825 + }
1826 +
1827 +-static void cfq_exit_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1828 ++static void cfq_put_cooperator(struct cfq_queue *cfqq)
1829 + {
1830 + struct cfq_queue *__cfqq, *next;
1831 +
1832 +- if (unlikely(cfqq == cfqd->active_queue)) {
1833 +- __cfq_slice_expired(cfqd, cfqq, 0);
1834 +- cfq_schedule_dispatch(cfqd);
1835 +- }
1836 +-
1837 + /*
1838 + * If this queue was scheduled to merge with another queue, be
1839 + * sure to drop the reference taken on that queue (and others in
1840 +@@ -2527,6 +2522,16 @@ static void cfq_exit_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1841 + cfq_put_queue(__cfqq);
1842 + __cfqq = next;
1843 + }
1844 ++}
1845 ++
1846 ++static void cfq_exit_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1847 ++{
1848 ++ if (unlikely(cfqq == cfqd->active_queue)) {
1849 ++ __cfq_slice_expired(cfqd, cfqq, 0);
1850 ++ cfq_schedule_dispatch(cfqd);
1851 ++ }
1852 ++
1853 ++ cfq_put_cooperator(cfqq);
1854 +
1855 + cfq_put_queue(cfqq);
1856 + }
1857 +@@ -3470,6 +3475,9 @@ split_cfqq(struct cfq_io_context *cic, struct cfq_queue *cfqq)
1858 + }
1859 +
1860 + cic_set_cfqq(cic, NULL, 1);
1861 ++
1862 ++ cfq_put_cooperator(cfqq);
1863 ++
1864 + cfq_put_queue(cfqq);
1865 + return NULL;
1866 + }
1867 +diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c
1868 +index fc2f26b..c5fef01 100644
1869 +--- a/drivers/acpi/video_detect.c
1870 ++++ b/drivers/acpi/video_detect.c
1871 +@@ -250,7 +250,7 @@ static int __init acpi_backlight(char *str)
1872 + ACPI_VIDEO_BACKLIGHT_FORCE_VENDOR;
1873 + if (!strcmp("video", str))
1874 + acpi_video_support |=
1875 +- ACPI_VIDEO_OUTPUT_SWITCHING_FORCE_VIDEO;
1876 ++ ACPI_VIDEO_BACKLIGHT_FORCE_VIDEO;
1877 + }
1878 + return 1;
1879 + }
1880 +diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
1881 +index 49cffb6..5abab5d 100644
1882 +--- a/drivers/ata/libata-core.c
1883 ++++ b/drivers/ata/libata-core.c
1884 +@@ -160,6 +160,10 @@ int libata_allow_tpm = 0;
1885 + module_param_named(allow_tpm, libata_allow_tpm, int, 0444);
1886 + MODULE_PARM_DESC(allow_tpm, "Permit the use of TPM commands (0=off [default], 1=on)");
1887 +
1888 ++static int atapi_an;
1889 ++module_param(atapi_an, int, 0444);
1890 ++MODULE_PARM_DESC(atapi_an, "Enable ATAPI AN media presence notification (0=0ff [default], 1=on)");
1891 ++
1892 + MODULE_AUTHOR("Jeff Garzik");
1893 + MODULE_DESCRIPTION("Library module for ATA devices");
1894 + MODULE_LICENSE("GPL");
1895 +@@ -2572,7 +2576,8 @@ int ata_dev_configure(struct ata_device *dev)
1896 + * to enable ATAPI AN to discern between PHY status
1897 + * changed notifications and ATAPI ANs.
1898 + */
1899 +- if ((ap->flags & ATA_FLAG_AN) && ata_id_has_atapi_AN(id) &&
1900 ++ if (atapi_an &&
1901 ++ (ap->flags & ATA_FLAG_AN) && ata_id_has_atapi_AN(id) &&
1902 + (!sata_pmp_attached(ap) ||
1903 + sata_scr_read(&ap->link, SCR_NOTIFICATION, &sntf) == 0)) {
1904 + unsigned int err_mask;
1905 +diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c
1906 +index e3877b6..4723648 100644
1907 +--- a/drivers/ata/libata-sff.c
1908 ++++ b/drivers/ata/libata-sff.c
1909 +@@ -894,7 +894,7 @@ static void ata_pio_sector(struct ata_queued_cmd *qc)
1910 + do_write);
1911 + }
1912 +
1913 +- if (!do_write)
1914 ++ if (!do_write && !PageSlab(page))
1915 + flush_dcache_page(page);
1916 +
1917 + qc->curbytes += qc->sect_size;
1918 +diff --git a/drivers/ata/sata_nv.c b/drivers/ata/sata_nv.c
1919 +index 2a98b09..7f3d179 100644
1920 +--- a/drivers/ata/sata_nv.c
1921 ++++ b/drivers/ata/sata_nv.c
1922 +@@ -1674,7 +1674,6 @@ static void nv_mcp55_freeze(struct ata_port *ap)
1923 + mask = readl(mmio_base + NV_INT_ENABLE_MCP55);
1924 + mask &= ~(NV_INT_ALL_MCP55 << shift);
1925 + writel(mask, mmio_base + NV_INT_ENABLE_MCP55);
1926 +- ata_sff_freeze(ap);
1927 + }
1928 +
1929 + static void nv_mcp55_thaw(struct ata_port *ap)
1930 +@@ -1688,7 +1687,6 @@ static void nv_mcp55_thaw(struct ata_port *ap)
1931 + mask = readl(mmio_base + NV_INT_ENABLE_MCP55);
1932 + mask |= (NV_INT_MASK_MCP55 << shift);
1933 + writel(mask, mmio_base + NV_INT_ENABLE_MCP55);
1934 +- ata_sff_thaw(ap);
1935 + }
1936 +
1937 + static void nv_adma_error_handler(struct ata_port *ap)
1938 +@@ -2479,8 +2477,7 @@ static int nv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1939 + }
1940 +
1941 + pci_set_master(pdev);
1942 +- return ata_host_activate(host, pdev->irq, ipriv->irq_handler,
1943 +- IRQF_SHARED, ipriv->sht);
1944 ++ return ata_pci_sff_activate_host(host, ipriv->irq_handler, ipriv->sht);
1945 + }
1946 +
1947 + #ifdef CONFIG_PM
1948 +diff --git a/drivers/ata/sata_via.c b/drivers/ata/sata_via.c
1949 +index 08f6549..0553455 100644
1950 +--- a/drivers/ata/sata_via.c
1951 ++++ b/drivers/ata/sata_via.c
1952 +@@ -575,6 +575,19 @@ static void svia_configure(struct pci_dev *pdev)
1953 + tmp8 |= NATIVE_MODE_ALL;
1954 + pci_write_config_byte(pdev, SATA_NATIVE_MODE, tmp8);
1955 + }
1956 ++
1957 ++ /*
1958 ++ * vt6421 has problems talking to some drives. The following
1959 ++ * is the magic fix from Joseph Chan <JosephChan@×××××××.tw>.
1960 ++ * Please add proper documentation if possible.
1961 ++ *
1962 ++ * https://bugzilla.kernel.org/show_bug.cgi?id=15173
1963 ++ */
1964 ++ if (pdev->device == 0x3249) {
1965 ++ pci_read_config_byte(pdev, 0x52, &tmp8);
1966 ++ tmp8 |= 1 << 2;
1967 ++ pci_write_config_byte(pdev, 0x52, tmp8);
1968 ++ }
1969 + }
1970 +
1971 + static int svia_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1972 +diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c
1973 +index f35719a..251acea 100644
1974 +--- a/drivers/base/cpu.c
1975 ++++ b/drivers/base/cpu.c
1976 +@@ -186,7 +186,7 @@ static ssize_t print_cpus_offline(struct sysdev_class *class,
1977 + /* display offline cpus < nr_cpu_ids */
1978 + if (!alloc_cpumask_var(&offline, GFP_KERNEL))
1979 + return -ENOMEM;
1980 +- cpumask_complement(offline, cpu_online_mask);
1981 ++ cpumask_andnot(offline, cpu_possible_mask, cpu_online_mask);
1982 + n = cpulist_scnprintf(buf, len, offline);
1983 + free_cpumask_var(offline);
1984 +
1985 +diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
1986 +index 4462b11..9ead05d 100644
1987 +--- a/drivers/char/ipmi/ipmi_si_intf.c
1988 ++++ b/drivers/char/ipmi/ipmi_si_intf.c
1989 +@@ -314,9 +314,14 @@ static void deliver_recv_msg(struct smi_info *smi_info,
1990 + {
1991 + /* Deliver the message to the upper layer with the lock
1992 + released. */
1993 +- spin_unlock(&(smi_info->si_lock));
1994 +- ipmi_smi_msg_received(smi_info->intf, msg);
1995 +- spin_lock(&(smi_info->si_lock));
1996 ++
1997 ++ if (smi_info->run_to_completion) {
1998 ++ ipmi_smi_msg_received(smi_info->intf, msg);
1999 ++ } else {
2000 ++ spin_unlock(&(smi_info->si_lock));
2001 ++ ipmi_smi_msg_received(smi_info->intf, msg);
2002 ++ spin_lock(&(smi_info->si_lock));
2003 ++ }
2004 + }
2005 +
2006 + static void return_hosed_msg(struct smi_info *smi_info, int cCode)
2007 +diff --git a/drivers/clocksource/sh_cmt.c b/drivers/clocksource/sh_cmt.c
2008 +index 744f748..a860ec0 100644
2009 +--- a/drivers/clocksource/sh_cmt.c
2010 ++++ b/drivers/clocksource/sh_cmt.c
2011 +@@ -413,18 +413,10 @@ static cycle_t sh_cmt_clocksource_read(struct clocksource *cs)
2012 + static int sh_cmt_clocksource_enable(struct clocksource *cs)
2013 + {
2014 + struct sh_cmt_priv *p = cs_to_sh_cmt(cs);
2015 +- int ret;
2016 +
2017 + p->total_cycles = 0;
2018 +
2019 +- ret = sh_cmt_start(p, FLAG_CLOCKSOURCE);
2020 +- if (ret)
2021 +- return ret;
2022 +-
2023 +- /* TODO: calculate good shift from rate and counter bit width */
2024 +- cs->shift = 0;
2025 +- cs->mult = clocksource_hz2mult(p->rate, cs->shift);
2026 +- return 0;
2027 ++ return sh_cmt_start(p, FLAG_CLOCKSOURCE);
2028 + }
2029 +
2030 + static void sh_cmt_clocksource_disable(struct clocksource *cs)
2031 +@@ -451,7 +443,18 @@ static int sh_cmt_register_clocksource(struct sh_cmt_priv *p,
2032 + cs->resume = sh_cmt_clocksource_resume;
2033 + cs->mask = CLOCKSOURCE_MASK(sizeof(unsigned long) * 8);
2034 + cs->flags = CLOCK_SOURCE_IS_CONTINUOUS;
2035 ++
2036 ++ /* clk_get_rate() needs an enabled clock */
2037 ++ clk_enable(p->clk);
2038 ++ p->rate = clk_get_rate(p->clk) / (p->width == 16) ? 512 : 8;
2039 ++ clk_disable(p->clk);
2040 ++
2041 ++ /* TODO: calculate good shift from rate and counter bit width */
2042 ++ cs->shift = 10;
2043 ++ cs->mult = clocksource_hz2mult(p->rate, cs->shift);
2044 ++
2045 + pr_info("sh_cmt: %s used as clock source\n", cs->name);
2046 ++
2047 + clocksource_register(cs);
2048 + return 0;
2049 + }
2050 +diff --git a/drivers/clocksource/sh_tmu.c b/drivers/clocksource/sh_tmu.c
2051 +index fc9ff1e..7a24160 100644
2052 +--- a/drivers/clocksource/sh_tmu.c
2053 ++++ b/drivers/clocksource/sh_tmu.c
2054 +@@ -200,16 +200,8 @@ static cycle_t sh_tmu_clocksource_read(struct clocksource *cs)
2055 + static int sh_tmu_clocksource_enable(struct clocksource *cs)
2056 + {
2057 + struct sh_tmu_priv *p = cs_to_sh_tmu(cs);
2058 +- int ret;
2059 +-
2060 +- ret = sh_tmu_enable(p);
2061 +- if (ret)
2062 +- return ret;
2063 +
2064 +- /* TODO: calculate good shift from rate and counter bit width */
2065 +- cs->shift = 10;
2066 +- cs->mult = clocksource_hz2mult(p->rate, cs->shift);
2067 +- return 0;
2068 ++ return sh_tmu_enable(p);
2069 + }
2070 +
2071 + static void sh_tmu_clocksource_disable(struct clocksource *cs)
2072 +@@ -229,6 +221,16 @@ static int sh_tmu_register_clocksource(struct sh_tmu_priv *p,
2073 + cs->disable = sh_tmu_clocksource_disable;
2074 + cs->mask = CLOCKSOURCE_MASK(32);
2075 + cs->flags = CLOCK_SOURCE_IS_CONTINUOUS;
2076 ++
2077 ++ /* clk_get_rate() needs an enabled clock */
2078 ++ clk_enable(p->clk);
2079 ++ /* channel will be configured at parent clock / 4 */
2080 ++ p->rate = clk_get_rate(p->clk) / 4;
2081 ++ clk_disable(p->clk);
2082 ++ /* TODO: calculate good shift from rate and counter bit width */
2083 ++ cs->shift = 10;
2084 ++ cs->mult = clocksource_hz2mult(p->rate, cs->shift);
2085 ++
2086 + pr_info("sh_tmu: %s used as clock source\n", cs->name);
2087 + clocksource_register(cs);
2088 + return 0;
2089 +diff --git a/drivers/firewire/core-card.c b/drivers/firewire/core-card.c
2090 +index 5045156..991447b 100644
2091 +--- a/drivers/firewire/core-card.c
2092 ++++ b/drivers/firewire/core-card.c
2093 +@@ -231,7 +231,7 @@ void fw_schedule_bm_work(struct fw_card *card, unsigned long delay)
2094 + static void fw_card_bm_work(struct work_struct *work)
2095 + {
2096 + struct fw_card *card = container_of(work, struct fw_card, work.work);
2097 +- struct fw_device *root_device;
2098 ++ struct fw_device *root_device, *irm_device;
2099 + struct fw_node *root_node;
2100 + unsigned long flags;
2101 + int root_id, new_root_id, irm_id, local_id;
2102 +@@ -239,6 +239,7 @@ static void fw_card_bm_work(struct work_struct *work)
2103 + bool do_reset = false;
2104 + bool root_device_is_running;
2105 + bool root_device_is_cmc;
2106 ++ bool irm_is_1394_1995_only;
2107 +
2108 + spin_lock_irqsave(&card->lock, flags);
2109 +
2110 +@@ -248,12 +249,18 @@ static void fw_card_bm_work(struct work_struct *work)
2111 + }
2112 +
2113 + generation = card->generation;
2114 ++
2115 + root_node = card->root_node;
2116 + fw_node_get(root_node);
2117 + root_device = root_node->data;
2118 + root_device_is_running = root_device &&
2119 + atomic_read(&root_device->state) == FW_DEVICE_RUNNING;
2120 + root_device_is_cmc = root_device && root_device->cmc;
2121 ++
2122 ++ irm_device = card->irm_node->data;
2123 ++ irm_is_1394_1995_only = irm_device && irm_device->config_rom &&
2124 ++ (irm_device->config_rom[2] & 0x000000f0) == 0;
2125 ++
2126 + root_id = root_node->node_id;
2127 + irm_id = card->irm_node->node_id;
2128 + local_id = card->local_node->node_id;
2129 +@@ -276,8 +283,15 @@ static void fw_card_bm_work(struct work_struct *work)
2130 +
2131 + if (!card->irm_node->link_on) {
2132 + new_root_id = local_id;
2133 +- fw_notify("IRM has link off, making local node (%02x) root.\n",
2134 +- new_root_id);
2135 ++ fw_notify("%s, making local node (%02x) root.\n",
2136 ++ "IRM has link off", new_root_id);
2137 ++ goto pick_me;
2138 ++ }
2139 ++
2140 ++ if (irm_is_1394_1995_only) {
2141 ++ new_root_id = local_id;
2142 ++ fw_notify("%s, making local node (%02x) root.\n",
2143 ++ "IRM is not 1394a compliant", new_root_id);
2144 + goto pick_me;
2145 + }
2146 +
2147 +@@ -316,8 +330,8 @@ static void fw_card_bm_work(struct work_struct *work)
2148 + * root, and thus, IRM.
2149 + */
2150 + new_root_id = local_id;
2151 +- fw_notify("BM lock failed, making local node (%02x) root.\n",
2152 +- new_root_id);
2153 ++ fw_notify("%s, making local node (%02x) root.\n",
2154 ++ "BM lock failed", new_root_id);
2155 + goto pick_me;
2156 + }
2157 + } else if (card->bm_generation != generation) {
2158 +diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
2159 +index 18f41d7..10348d3 100644
2160 +--- a/drivers/gpu/drm/drm_edid.c
2161 ++++ b/drivers/gpu/drm/drm_edid.c
2162 +@@ -335,7 +335,7 @@ static struct drm_display_mode drm_dmt_modes[] = {
2163 + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
2164 + /* 1024x768@85Hz */
2165 + { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 94500, 1024, 1072,
2166 +- 1072, 1376, 0, 768, 769, 772, 808, 0,
2167 ++ 1168, 1376, 0, 768, 769, 772, 808, 0,
2168 + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
2169 + /* 1152x864@75Hz */
2170 + { DRM_MODE("1152x864", DRM_MODE_TYPE_DRIVER, 108000, 1152, 1216,
2171 +diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
2172 +index ef3d91d..691701a 100644
2173 +--- a/drivers/gpu/drm/i915/i915_gem.c
2174 ++++ b/drivers/gpu/drm/i915/i915_gem.c
2175 +@@ -2688,6 +2688,14 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
2176 + return -EINVAL;
2177 + }
2178 +
2179 ++ /* If the object is bigger than the entire aperture, reject it early
2180 ++ * before evicting everything in a vain attempt to find space.
2181 ++ */
2182 ++ if (obj->size > dev->gtt_total) {
2183 ++ DRM_ERROR("Attempting to bind an object larger than the aperture\n");
2184 ++ return -E2BIG;
2185 ++ }
2186 ++
2187 + search_free:
2188 + free_space = drm_mm_search_free(&dev_priv->mm.gtt_space,
2189 + obj->size, alignment, 0);
2190 +@@ -4231,6 +4239,17 @@ i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment)
2191 + int ret;
2192 +
2193 + i915_verify_inactive(dev, __FILE__, __LINE__);
2194 ++
2195 ++ if (obj_priv->gtt_space != NULL) {
2196 ++ if (alignment == 0)
2197 ++ alignment = i915_gem_get_gtt_alignment(obj);
2198 ++ if (obj_priv->gtt_offset & (alignment - 1)) {
2199 ++ ret = i915_gem_object_unbind(obj);
2200 ++ if (ret)
2201 ++ return ret;
2202 ++ }
2203 ++ }
2204 ++
2205 + if (obj_priv->gtt_space == NULL) {
2206 + ret = i915_gem_object_bind_to_gtt(obj, alignment);
2207 + if (ret)
2208 +diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
2209 +index c7502b6..70765cf 100644
2210 +--- a/drivers/gpu/drm/i915/intel_display.c
2211 ++++ b/drivers/gpu/drm/i915/intel_display.c
2212 +@@ -4155,12 +4155,6 @@ void intel_finish_page_flip(struct drm_device *dev, int pipe)
2213 + spin_lock_irqsave(&dev->event_lock, flags);
2214 + work = intel_crtc->unpin_work;
2215 + if (work == NULL || !work->pending) {
2216 +- if (work && !work->pending) {
2217 +- obj_priv = to_intel_bo(work->pending_flip_obj);
2218 +- DRM_DEBUG_DRIVER("flip finish: %p (%d) not pending?\n",
2219 +- obj_priv,
2220 +- atomic_read(&obj_priv->pending_flip));
2221 +- }
2222 + spin_unlock_irqrestore(&dev->event_lock, flags);
2223 + return;
2224 + }
2225 +diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
2226 +index 77e40cf..7c28ff1 100644
2227 +--- a/drivers/gpu/drm/i915/intel_dp.c
2228 ++++ b/drivers/gpu/drm/i915/intel_dp.c
2229 +@@ -1180,16 +1180,6 @@ intel_dp_detect(struct drm_connector *connector)
2230 + if (HAS_PCH_SPLIT(dev))
2231 + return ironlake_dp_detect(connector);
2232 +
2233 +- temp = I915_READ(PORT_HOTPLUG_EN);
2234 +-
2235 +- I915_WRITE(PORT_HOTPLUG_EN,
2236 +- temp |
2237 +- DPB_HOTPLUG_INT_EN |
2238 +- DPC_HOTPLUG_INT_EN |
2239 +- DPD_HOTPLUG_INT_EN);
2240 +-
2241 +- POSTING_READ(PORT_HOTPLUG_EN);
2242 +-
2243 + switch (dp_priv->output_reg) {
2244 + case DP_B:
2245 + bit = DPB_HOTPLUG_INT_STATUS;
2246 +diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
2247 +index 034218c..4c7204a 100644
2248 +--- a/drivers/gpu/drm/radeon/radeon.h
2249 ++++ b/drivers/gpu/drm/radeon/radeon.h
2250 +@@ -566,6 +566,7 @@ typedef int (*radeon_packet3_check_t)(struct radeon_cs_parser *p,
2251 + */
2252 + int radeon_agp_init(struct radeon_device *rdev);
2253 + void radeon_agp_resume(struct radeon_device *rdev);
2254 ++void radeon_agp_suspend(struct radeon_device *rdev);
2255 + void radeon_agp_fini(struct radeon_device *rdev);
2256 +
2257 +
2258 +diff --git a/drivers/gpu/drm/radeon/radeon_agp.c b/drivers/gpu/drm/radeon/radeon_agp.c
2259 +index 28e473f..f40dfb7 100644
2260 +--- a/drivers/gpu/drm/radeon/radeon_agp.c
2261 ++++ b/drivers/gpu/drm/radeon/radeon_agp.c
2262 +@@ -270,3 +270,8 @@ void radeon_agp_fini(struct radeon_device *rdev)
2263 + }
2264 + #endif
2265 + }
2266 ++
2267 ++void radeon_agp_suspend(struct radeon_device *rdev)
2268 ++{
2269 ++ radeon_agp_fini(rdev);
2270 ++}
2271 +diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
2272 +index 9916d82..1a4fa9b 100644
2273 +--- a/drivers/gpu/drm/radeon/radeon_atombios.c
2274 ++++ b/drivers/gpu/drm/radeon/radeon_atombios.c
2275 +@@ -530,6 +530,8 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
2276 + }
2277 +
2278 + /* look up gpio for ddc, hpd */
2279 ++ ddc_bus.valid = false;
2280 ++ hpd.hpd = RADEON_HPD_NONE;
2281 + if ((le16_to_cpu(path->usDeviceTag) &
2282 + (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT)) == 0) {
2283 + for (j = 0; j < con_obj->ucNumberOfObjects; j++) {
2284 +@@ -585,9 +587,6 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
2285 + break;
2286 + }
2287 + }
2288 +- } else {
2289 +- hpd.hpd = RADEON_HPD_NONE;
2290 +- ddc_bus.valid = false;
2291 + }
2292 +
2293 + /* needed for aux chan transactions */
2294 +@@ -1174,7 +1173,7 @@ struct radeon_encoder_atom_dig *radeon_atombios_get_lvds_info(struct
2295 + lvds->native_mode.vtotal = lvds->native_mode.vdisplay +
2296 + le16_to_cpu(lvds_info->info.sLCDTiming.usVBlanking_Time);
2297 + lvds->native_mode.vsync_start = lvds->native_mode.vdisplay +
2298 +- le16_to_cpu(lvds_info->info.sLCDTiming.usVSyncWidth);
2299 ++ le16_to_cpu(lvds_info->info.sLCDTiming.usVSyncOffset);
2300 + lvds->native_mode.vsync_end = lvds->native_mode.vsync_start +
2301 + le16_to_cpu(lvds_info->info.sLCDTiming.usVSyncWidth);
2302 + lvds->panel_pwr_delay =
2303 +diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
2304 +index 7b629e3..ed6a724 100644
2305 +--- a/drivers/gpu/drm/radeon/radeon_device.c
2306 ++++ b/drivers/gpu/drm/radeon/radeon_device.c
2307 +@@ -748,6 +748,8 @@ int radeon_suspend_kms(struct drm_device *dev, pm_message_t state)
2308 + /* evict remaining vram memory */
2309 + radeon_bo_evict_vram(rdev);
2310 +
2311 ++ radeon_agp_suspend(rdev);
2312 ++
2313 + pci_save_state(dev->pdev);
2314 + if (state.event == PM_EVENT_SUSPEND) {
2315 + /* Shut down the device */
2316 +diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
2317 +index bb1c122..c2848e0 100644
2318 +--- a/drivers/gpu/drm/radeon/radeon_display.c
2319 ++++ b/drivers/gpu/drm/radeon/radeon_display.c
2320 +@@ -978,8 +978,11 @@ void radeon_update_display_priority(struct radeon_device *rdev)
2321 + /* set display priority to high for r3xx, rv515 chips
2322 + * this avoids flickering due to underflow to the
2323 + * display controllers during heavy acceleration.
2324 ++ * Don't force high on rs4xx igp chips as it seems to
2325 ++ * affect the sound card. See kernel bug 15982.
2326 + */
2327 +- if (ASIC_IS_R300(rdev) || (rdev->family == CHIP_RV515))
2328 ++ if ((ASIC_IS_R300(rdev) || (rdev->family == CHIP_RV515)) &&
2329 ++ !(rdev->flags & RADEON_IS_IGP))
2330 + rdev->disp_priority = 2;
2331 + else
2332 + rdev->disp_priority = 0;
2333 +diff --git a/drivers/gpu/drm/radeon/radeon_state.c b/drivers/gpu/drm/radeon/radeon_state.c
2334 +index cc5316d..b3ba44c 100644
2335 +--- a/drivers/gpu/drm/radeon/radeon_state.c
2336 ++++ b/drivers/gpu/drm/radeon/radeon_state.c
2337 +@@ -900,9 +900,10 @@ static void radeon_cp_dispatch_clear(struct drm_device * dev,
2338 + flags |= RADEON_FRONT;
2339 + }
2340 + if (flags & (RADEON_DEPTH|RADEON_STENCIL)) {
2341 +- if (!dev_priv->have_z_offset)
2342 ++ if (!dev_priv->have_z_offset) {
2343 + printk_once(KERN_ERR "radeon: illegal depth clear request. Buggy mesa detected - please update.\n");
2344 +- flags &= ~(RADEON_DEPTH | RADEON_STENCIL);
2345 ++ flags &= ~(RADEON_DEPTH | RADEON_STENCIL);
2346 ++ }
2347 + }
2348 +
2349 + if (flags & (RADEON_FRONT | RADEON_BACK)) {
2350 +diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
2351 +index 143e788..0010efa 100644
2352 +--- a/drivers/hid/hid-core.c
2353 ++++ b/drivers/hid/hid-core.c
2354 +@@ -1305,6 +1305,7 @@ static const struct hid_device_id hid_blacklist[] = {
2355 + { HID_USB_DEVICE(USB_VENDOR_ID_GREENASIA, 0x0012) },
2356 + { HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE) },
2357 + { HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE_2) },
2358 ++ { HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE_3) },
2359 + { HID_USB_DEVICE(USB_VENDOR_ID_KENSINGTON, USB_DEVICE_ID_KS_SLIMBLADE) },
2360 + { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_ERGO_525V) },
2361 + { HID_USB_DEVICE(USB_VENDOR_ID_LABTEC, USB_DEVICE_ID_LABTEC_WIRELESS_KEYBOARD) },
2362 +diff --git a/drivers/hid/hid-gyration.c b/drivers/hid/hid-gyration.c
2363 +index 62416e6..3975e03 100644
2364 +--- a/drivers/hid/hid-gyration.c
2365 ++++ b/drivers/hid/hid-gyration.c
2366 +@@ -73,6 +73,7 @@ static int gyration_event(struct hid_device *hdev, struct hid_field *field,
2367 + static const struct hid_device_id gyration_devices[] = {
2368 + { HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE) },
2369 + { HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE_2) },
2370 ++ { HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE_3) },
2371 + { }
2372 + };
2373 + MODULE_DEVICE_TABLE(hid, gyration_devices);
2374 +diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
2375 +index 09d2764..b681cbf 100644
2376 +--- a/drivers/hid/hid-ids.h
2377 ++++ b/drivers/hid/hid-ids.h
2378 +@@ -267,6 +267,7 @@
2379 + #define USB_VENDOR_ID_GYRATION 0x0c16
2380 + #define USB_DEVICE_ID_GYRATION_REMOTE 0x0002
2381 + #define USB_DEVICE_ID_GYRATION_REMOTE_2 0x0003
2382 ++#define USB_DEVICE_ID_GYRATION_REMOTE_3 0x0008
2383 +
2384 + #define USB_VENDOR_ID_HAPP 0x078b
2385 + #define USB_DEVICE_ID_UGCI_DRIVING 0x0010
2386 +diff --git a/drivers/hwmon/ltc4245.c b/drivers/hwmon/ltc4245.c
2387 +index 65c232a..21d201b 100644
2388 +--- a/drivers/hwmon/ltc4245.c
2389 ++++ b/drivers/hwmon/ltc4245.c
2390 +@@ -45,9 +45,7 @@ enum ltc4245_cmd {
2391 + LTC4245_VEEIN = 0x19,
2392 + LTC4245_VEESENSE = 0x1a,
2393 + LTC4245_VEEOUT = 0x1b,
2394 +- LTC4245_GPIOADC1 = 0x1c,
2395 +- LTC4245_GPIOADC2 = 0x1d,
2396 +- LTC4245_GPIOADC3 = 0x1e,
2397 ++ LTC4245_GPIOADC = 0x1c,
2398 + };
2399 +
2400 + struct ltc4245_data {
2401 +@@ -61,7 +59,7 @@ struct ltc4245_data {
2402 + u8 cregs[0x08];
2403 +
2404 + /* Voltage registers */
2405 +- u8 vregs[0x0f];
2406 ++ u8 vregs[0x0d];
2407 + };
2408 +
2409 + static struct ltc4245_data *ltc4245_update_device(struct device *dev)
2410 +@@ -86,7 +84,7 @@ static struct ltc4245_data *ltc4245_update_device(struct device *dev)
2411 + data->cregs[i] = val;
2412 + }
2413 +
2414 +- /* Read voltage registers -- 0x10 to 0x1f */
2415 ++ /* Read voltage registers -- 0x10 to 0x1c */
2416 + for (i = 0; i < ARRAY_SIZE(data->vregs); i++) {
2417 + val = i2c_smbus_read_byte_data(client, i+0x10);
2418 + if (unlikely(val < 0))
2419 +@@ -128,9 +126,7 @@ static int ltc4245_get_voltage(struct device *dev, u8 reg)
2420 + case LTC4245_VEEOUT:
2421 + voltage = regval * -55;
2422 + break;
2423 +- case LTC4245_GPIOADC1:
2424 +- case LTC4245_GPIOADC2:
2425 +- case LTC4245_GPIOADC3:
2426 ++ case LTC4245_GPIOADC:
2427 + voltage = regval * 10;
2428 + break;
2429 + default:
2430 +@@ -297,9 +293,7 @@ LTC4245_ALARM(in7_min_alarm, (1 << 2), LTC4245_FAULT2);
2431 + LTC4245_ALARM(in8_min_alarm, (1 << 3), LTC4245_FAULT2);
2432 +
2433 + /* GPIO voltages */
2434 +-LTC4245_VOLTAGE(in9_input, LTC4245_GPIOADC1);
2435 +-LTC4245_VOLTAGE(in10_input, LTC4245_GPIOADC2);
2436 +-LTC4245_VOLTAGE(in11_input, LTC4245_GPIOADC3);
2437 ++LTC4245_VOLTAGE(in9_input, LTC4245_GPIOADC);
2438 +
2439 + /* Power Consumption (virtual) */
2440 + LTC4245_POWER(power1_input, LTC4245_12VSENSE);
2441 +@@ -342,8 +336,6 @@ static struct attribute *ltc4245_attributes[] = {
2442 + &sensor_dev_attr_in8_min_alarm.dev_attr.attr,
2443 +
2444 + &sensor_dev_attr_in9_input.dev_attr.attr,
2445 +- &sensor_dev_attr_in10_input.dev_attr.attr,
2446 +- &sensor_dev_attr_in11_input.dev_attr.attr,
2447 +
2448 + &sensor_dev_attr_power1_input.dev_attr.attr,
2449 + &sensor_dev_attr_power2_input.dev_attr.attr,
2450 +diff --git a/drivers/md/linear.c b/drivers/md/linear.c
2451 +index 09437e9..0a1042b 100644
2452 +--- a/drivers/md/linear.c
2453 ++++ b/drivers/md/linear.c
2454 +@@ -282,6 +282,7 @@ static int linear_stop (mddev_t *mddev)
2455 + rcu_barrier();
2456 + blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/
2457 + kfree(conf);
2458 ++ mddev->private = NULL;
2459 +
2460 + return 0;
2461 + }
2462 +diff --git a/drivers/md/md.c b/drivers/md/md.c
2463 +index cefd63d..336792e 100644
2464 +--- a/drivers/md/md.c
2465 ++++ b/drivers/md/md.c
2466 +@@ -508,9 +508,36 @@ static inline int mddev_trylock(mddev_t * mddev)
2467 + return mutex_trylock(&mddev->reconfig_mutex);
2468 + }
2469 +
2470 +-static inline void mddev_unlock(mddev_t * mddev)
2471 +-{
2472 +- mutex_unlock(&mddev->reconfig_mutex);
2473 ++static struct attribute_group md_redundancy_group;
2474 ++
2475 ++static void mddev_unlock(mddev_t * mddev)
2476 ++{
2477 ++ if (mddev->to_remove) {
2478 ++ /* These cannot be removed under reconfig_mutex as
2479 ++ * an access to the files will try to take reconfig_mutex
2480 ++ * while holding the file unremovable, which leads to
2481 ++ * a deadlock.
2482 ++ * So hold open_mutex instead - we are allowed to take
2483 ++ * it while holding reconfig_mutex, and md_run can
2484 ++ * use it to wait for the remove to complete.
2485 ++ */
2486 ++ struct attribute_group *to_remove = mddev->to_remove;
2487 ++ mddev->to_remove = NULL;
2488 ++ mutex_lock(&mddev->open_mutex);
2489 ++ mutex_unlock(&mddev->reconfig_mutex);
2490 ++
2491 ++ if (to_remove != &md_redundancy_group)
2492 ++ sysfs_remove_group(&mddev->kobj, to_remove);
2493 ++ if (mddev->pers == NULL ||
2494 ++ mddev->pers->sync_request == NULL) {
2495 ++ sysfs_remove_group(&mddev->kobj, &md_redundancy_group);
2496 ++ if (mddev->sysfs_action)
2497 ++ sysfs_put(mddev->sysfs_action);
2498 ++ mddev->sysfs_action = NULL;
2499 ++ }
2500 ++ mutex_unlock(&mddev->open_mutex);
2501 ++ } else
2502 ++ mutex_unlock(&mddev->reconfig_mutex);
2503 +
2504 + md_wakeup_thread(mddev->thread);
2505 + }
2506 +@@ -2980,6 +3007,23 @@ level_store(mddev_t *mddev, const char *buf, size_t len)
2507 + /* Looks like we have a winner */
2508 + mddev_suspend(mddev);
2509 + mddev->pers->stop(mddev);
2510 ++
2511 ++ if (mddev->pers->sync_request == NULL &&
2512 ++ pers->sync_request != NULL) {
2513 ++ /* need to add the md_redundancy_group */
2514 ++ if (sysfs_create_group(&mddev->kobj, &md_redundancy_group))
2515 ++ printk(KERN_WARNING
2516 ++ "md: cannot register extra attributes for %s\n",
2517 ++ mdname(mddev));
2518 ++ mddev->sysfs_action = sysfs_get_dirent(mddev->kobj.sd, "sync_action");
2519 ++ }
2520 ++ if (mddev->pers->sync_request != NULL &&
2521 ++ pers->sync_request == NULL) {
2522 ++ /* need to remove the md_redundancy_group */
2523 ++ if (mddev->to_remove == NULL)
2524 ++ mddev->to_remove = &md_redundancy_group;
2525 ++ }
2526 ++
2527 + module_put(mddev->pers->owner);
2528 + /* Invalidate devices that are now superfluous */
2529 + list_for_each_entry(rdev, &mddev->disks, same_set)
2530 +@@ -4082,15 +4126,6 @@ static void mddev_delayed_delete(struct work_struct *ws)
2531 + {
2532 + mddev_t *mddev = container_of(ws, mddev_t, del_work);
2533 +
2534 +- if (mddev->private) {
2535 +- sysfs_remove_group(&mddev->kobj, &md_redundancy_group);
2536 +- if (mddev->private != (void*)1)
2537 +- sysfs_remove_group(&mddev->kobj, mddev->private);
2538 +- if (mddev->sysfs_action)
2539 +- sysfs_put(mddev->sysfs_action);
2540 +- mddev->sysfs_action = NULL;
2541 +- mddev->private = NULL;
2542 +- }
2543 + sysfs_remove_group(&mddev->kobj, &md_bitmap_group);
2544 + kobject_del(&mddev->kobj);
2545 + kobject_put(&mddev->kobj);
2546 +@@ -4248,6 +4283,13 @@ static int do_md_run(mddev_t * mddev)
2547 + if (mddev->pers)
2548 + return -EBUSY;
2549 +
2550 ++ /* These two calls synchronise us with the
2551 ++ * sysfs_remove_group calls in mddev_unlock,
2552 ++ * so they must have completed.
2553 ++ */
2554 ++ mutex_lock(&mddev->open_mutex);
2555 ++ mutex_unlock(&mddev->open_mutex);
2556 ++
2557 + /*
2558 + * Analyze all RAID superblock(s)
2559 + */
2560 +@@ -4536,8 +4578,8 @@ static int do_md_stop(mddev_t * mddev, int mode, int is_open)
2561 + mddev->queue->unplug_fn = NULL;
2562 + mddev->queue->backing_dev_info.congested_fn = NULL;
2563 + module_put(mddev->pers->owner);
2564 +- if (mddev->pers->sync_request && mddev->private == NULL)
2565 +- mddev->private = (void*)1;
2566 ++ if (mddev->pers->sync_request && mddev->to_remove == NULL)
2567 ++ mddev->to_remove = &md_redundancy_group;
2568 + mddev->pers = NULL;
2569 + /* tell userspace to handle 'inactive' */
2570 + sysfs_notify_dirent(mddev->sysfs_state);
2571 +@@ -5496,6 +5538,7 @@ static int md_ioctl(struct block_device *bdev, fmode_t mode,
2572 + int err = 0;
2573 + void __user *argp = (void __user *)arg;
2574 + mddev_t *mddev = NULL;
2575 ++ int ro;
2576 +
2577 + if (!capable(CAP_SYS_ADMIN))
2578 + return -EACCES;
2579 +@@ -5631,6 +5674,34 @@ static int md_ioctl(struct block_device *bdev, fmode_t mode,
2580 + err = do_md_stop(mddev, 1, 1);
2581 + goto done_unlock;
2582 +
2583 ++ case BLKROSET:
2584 ++ if (get_user(ro, (int __user *)(arg))) {
2585 ++ err = -EFAULT;
2586 ++ goto done_unlock;
2587 ++ }
2588 ++ err = -EINVAL;
2589 ++
2590 ++ /* if the bdev is going readonly the value of mddev->ro
2591 ++ * does not matter, no writes are coming
2592 ++ */
2593 ++ if (ro)
2594 ++ goto done_unlock;
2595 ++
2596 ++ /* are we are already prepared for writes? */
2597 ++ if (mddev->ro != 1)
2598 ++ goto done_unlock;
2599 ++
2600 ++ /* transitioning to readauto need only happen for
2601 ++ * arrays that call md_write_start
2602 ++ */
2603 ++ if (mddev->pers) {
2604 ++ err = restart_array(mddev);
2605 ++ if (err == 0) {
2606 ++ mddev->ro = 2;
2607 ++ set_disk_ro(mddev->gendisk, 0);
2608 ++ }
2609 ++ }
2610 ++ goto done_unlock;
2611 + }
2612 +
2613 + /*
2614 +diff --git a/drivers/md/md.h b/drivers/md/md.h
2615 +index 8e4c75c..722f5df 100644
2616 +--- a/drivers/md/md.h
2617 ++++ b/drivers/md/md.h
2618 +@@ -305,6 +305,7 @@ struct mddev_s
2619 + atomic_t max_corr_read_errors; /* max read retries */
2620 + struct list_head all_mddevs;
2621 +
2622 ++ struct attribute_group *to_remove;
2623 + /* Generic barrier handling.
2624 + * If there is a pending barrier request, all other
2625 + * writes are blocked while the devices are flushed.
2626 +diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
2627 +index e59b10e..84d3bf0 100644
2628 +--- a/drivers/md/raid1.c
2629 ++++ b/drivers/md/raid1.c
2630 +@@ -418,7 +418,7 @@ static void raid1_end_write_request(struct bio *bio, int error)
2631 + */
2632 + static int read_balance(conf_t *conf, r1bio_t *r1_bio)
2633 + {
2634 +- const unsigned long this_sector = r1_bio->sector;
2635 ++ const sector_t this_sector = r1_bio->sector;
2636 + int new_disk = conf->last_used, disk = new_disk;
2637 + int wonly_disk = -1;
2638 + const int sectors = r1_bio->sectors;
2639 +@@ -434,7 +434,7 @@ static int read_balance(conf_t *conf, r1bio_t *r1_bio)
2640 + retry:
2641 + if (conf->mddev->recovery_cp < MaxSector &&
2642 + (this_sector + sectors >= conf->next_resync)) {
2643 +- /* Choose the first operation device, for consistancy */
2644 ++ /* Choose the first operational device, for consistancy */
2645 + new_disk = 0;
2646 +
2647 + for (rdev = rcu_dereference(conf->mirrors[new_disk].rdev);
2648 +@@ -912,9 +912,10 @@ static int make_request(struct request_queue *q, struct bio * bio)
2649 + if (test_bit(Faulty, &rdev->flags)) {
2650 + rdev_dec_pending(rdev, mddev);
2651 + r1_bio->bios[i] = NULL;
2652 +- } else
2653 ++ } else {
2654 + r1_bio->bios[i] = bio;
2655 +- targets++;
2656 ++ targets++;
2657 ++ }
2658 + } else
2659 + r1_bio->bios[i] = NULL;
2660 + }
2661 +diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
2662 +index e2766d8..ad945cc 100644
2663 +--- a/drivers/md/raid10.c
2664 ++++ b/drivers/md/raid10.c
2665 +@@ -494,7 +494,7 @@ static int raid10_mergeable_bvec(struct request_queue *q,
2666 + */
2667 + static int read_balance(conf_t *conf, r10bio_t *r10_bio)
2668 + {
2669 +- const unsigned long this_sector = r10_bio->sector;
2670 ++ const sector_t this_sector = r10_bio->sector;
2671 + int disk, slot, nslot;
2672 + const int sectors = r10_bio->sectors;
2673 + sector_t new_distance, current_distance;
2674 +diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
2675 +index 15348c3..6af0a6d 100644
2676 +--- a/drivers/md/raid5.c
2677 ++++ b/drivers/md/raid5.c
2678 +@@ -5087,7 +5087,9 @@ static int run(mddev_t *mddev)
2679 + }
2680 +
2681 + /* Ok, everything is just fine now */
2682 +- if (sysfs_create_group(&mddev->kobj, &raid5_attrs_group))
2683 ++ if (mddev->to_remove == &raid5_attrs_group)
2684 ++ mddev->to_remove = NULL;
2685 ++ else if (sysfs_create_group(&mddev->kobj, &raid5_attrs_group))
2686 + printk(KERN_WARNING
2687 + "raid5: failed to create sysfs attributes for %s\n",
2688 + mdname(mddev));
2689 +@@ -5134,7 +5136,8 @@ static int stop(mddev_t *mddev)
2690 + mddev->queue->backing_dev_info.congested_fn = NULL;
2691 + blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/
2692 + free_conf(conf);
2693 +- mddev->private = &raid5_attrs_group;
2694 ++ mddev->private = NULL;
2695 ++ mddev->to_remove = &raid5_attrs_group;
2696 + return 0;
2697 + }
2698 +
2699 +diff --git a/drivers/media/video/uvc/uvc_ctrl.c b/drivers/media/video/uvc/uvc_ctrl.c
2700 +index 6d3850b..2194da5 100644
2701 +--- a/drivers/media/video/uvc/uvc_ctrl.c
2702 ++++ b/drivers/media/video/uvc/uvc_ctrl.c
2703 +@@ -1047,6 +1047,8 @@ int uvc_ctrl_set(struct uvc_video_chain *chain,
2704 + uvc_ctrl_data(ctrl, UVC_CTRL_DATA_MAX));
2705 + step = mapping->get(mapping, UVC_GET_RES,
2706 + uvc_ctrl_data(ctrl, UVC_CTRL_DATA_RES));
2707 ++ if (step == 0)
2708 ++ step = 1;
2709 +
2710 + xctrl->value = min + (xctrl->value - min + step/2) / step * step;
2711 + xctrl->value = clamp(xctrl->value, min, max);
2712 +diff --git a/drivers/misc/vmware_balloon.c b/drivers/misc/vmware_balloon.c
2713 +index e7161c4..ad8fb09 100644
2714 +--- a/drivers/misc/vmware_balloon.c
2715 ++++ b/drivers/misc/vmware_balloon.c
2716 +@@ -45,7 +45,7 @@
2717 +
2718 + MODULE_AUTHOR("VMware, Inc.");
2719 + MODULE_DESCRIPTION("VMware Memory Control (Balloon) Driver");
2720 +-MODULE_VERSION("1.2.1.0-K");
2721 ++MODULE_VERSION("1.2.1.1-k");
2722 + MODULE_ALIAS("dmi:*:svnVMware*:*");
2723 + MODULE_ALIAS("vmware_vmmemctl");
2724 + MODULE_LICENSE("GPL");
2725 +@@ -101,6 +101,8 @@ MODULE_LICENSE("GPL");
2726 + /* Maximum number of page allocations without yielding processor */
2727 + #define VMW_BALLOON_YIELD_THRESHOLD 1024
2728 +
2729 ++/* Maximum number of refused pages we accumulate during inflation cycle */
2730 ++#define VMW_BALLOON_MAX_REFUSED 16
2731 +
2732 + /*
2733 + * Hypervisor communication port definitions.
2734 +@@ -183,6 +185,7 @@ struct vmballoon {
2735 +
2736 + /* transient list of non-balloonable pages */
2737 + struct list_head refused_pages;
2738 ++ unsigned int n_refused_pages;
2739 +
2740 + /* balloon size in pages */
2741 + unsigned int size;
2742 +@@ -428,14 +431,21 @@ static int vmballoon_reserve_page(struct vmballoon *b, bool can_sleep)
2743 + /* inform monitor */
2744 + locked = vmballoon_send_lock_page(b, page_to_pfn(page));
2745 + if (!locked) {
2746 ++ STATS_INC(b->stats.refused_alloc);
2747 ++
2748 + if (b->reset_required) {
2749 + __free_page(page);
2750 + return -EIO;
2751 + }
2752 +
2753 +- /* place on list of non-balloonable pages, retry allocation */
2754 ++ /*
2755 ++ * Place page on the list of non-balloonable pages
2756 ++ * and retry allocation, unless we already accumulated
2757 ++ * too many of them, in which case take a breather.
2758 ++ */
2759 + list_add(&page->lru, &b->refused_pages);
2760 +- STATS_INC(b->stats.refused_alloc);
2761 ++ if (++b->n_refused_pages >= VMW_BALLOON_MAX_REFUSED)
2762 ++ return -EIO;
2763 + }
2764 + } while (!locked);
2765 +
2766 +@@ -483,6 +493,8 @@ static void vmballoon_release_refused_pages(struct vmballoon *b)
2767 + __free_page(page);
2768 + STATS_INC(b->stats.refused_free);
2769 + }
2770 ++
2771 ++ b->n_refused_pages = 0;
2772 + }
2773 +
2774 + /*
2775 +diff --git a/drivers/net/arcnet/com20020-pci.c b/drivers/net/arcnet/com20020-pci.c
2776 +index 2c712af..48a1dbf 100644
2777 +--- a/drivers/net/arcnet/com20020-pci.c
2778 ++++ b/drivers/net/arcnet/com20020-pci.c
2779 +@@ -164,8 +164,8 @@ static DEFINE_PCI_DEVICE_TABLE(com20020pci_id_table) = {
2780 + { 0x1571, 0xa204, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ARC_CAN_10MBIT },
2781 + { 0x1571, 0xa205, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ARC_CAN_10MBIT },
2782 + { 0x1571, 0xa206, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ARC_CAN_10MBIT },
2783 +- { 0x10B5, 0x9030, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ARC_CAN_10MBIT },
2784 +- { 0x10B5, 0x9050, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ARC_CAN_10MBIT },
2785 ++ { 0x10B5, 0x9030, 0x10B5, 0x2978, 0, 0, ARC_CAN_10MBIT },
2786 ++ { 0x10B5, 0x9050, 0x10B5, 0x2273, 0, 0, ARC_CAN_10MBIT },
2787 + { 0x14BA, 0x6000, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ARC_CAN_10MBIT },
2788 + { 0x10B5, 0x2200, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ARC_CAN_10MBIT },
2789 + {0,}
2790 +diff --git a/drivers/net/can/sja1000/sja1000.c b/drivers/net/can/sja1000/sja1000.c
2791 +index 145b1a7..dd70d0c 100644
2792 +--- a/drivers/net/can/sja1000/sja1000.c
2793 ++++ b/drivers/net/can/sja1000/sja1000.c
2794 +@@ -84,6 +84,20 @@ static struct can_bittiming_const sja1000_bittiming_const = {
2795 + .brp_inc = 1,
2796 + };
2797 +
2798 ++static void sja1000_write_cmdreg(struct sja1000_priv *priv, u8 val)
2799 ++{
2800 ++ unsigned long flags;
2801 ++
2802 ++ /*
2803 ++ * The command register needs some locking and time to settle
2804 ++ * the write_reg() operation - especially on SMP systems.
2805 ++ */
2806 ++ spin_lock_irqsave(&priv->cmdreg_lock, flags);
2807 ++ priv->write_reg(priv, REG_CMR, val);
2808 ++ priv->read_reg(priv, REG_SR);
2809 ++ spin_unlock_irqrestore(&priv->cmdreg_lock, flags);
2810 ++}
2811 ++
2812 + static int sja1000_probe_chip(struct net_device *dev)
2813 + {
2814 + struct sja1000_priv *priv = netdev_priv(dev);
2815 +@@ -297,7 +311,7 @@ static netdev_tx_t sja1000_start_xmit(struct sk_buff *skb,
2816 +
2817 + can_put_echo_skb(skb, dev, 0);
2818 +
2819 +- priv->write_reg(priv, REG_CMR, CMD_TR);
2820 ++ sja1000_write_cmdreg(priv, CMD_TR);
2821 +
2822 + return NETDEV_TX_OK;
2823 + }
2824 +@@ -346,7 +360,7 @@ static void sja1000_rx(struct net_device *dev)
2825 + cf->can_id = id;
2826 +
2827 + /* release receive buffer */
2828 +- priv->write_reg(priv, REG_CMR, CMD_RRB);
2829 ++ sja1000_write_cmdreg(priv, CMD_RRB);
2830 +
2831 + netif_rx(skb);
2832 +
2833 +@@ -374,7 +388,7 @@ static int sja1000_err(struct net_device *dev, uint8_t isrc, uint8_t status)
2834 + cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
2835 + stats->rx_over_errors++;
2836 + stats->rx_errors++;
2837 +- priv->write_reg(priv, REG_CMR, CMD_CDO); /* clear bit */
2838 ++ sja1000_write_cmdreg(priv, CMD_CDO); /* clear bit */
2839 + }
2840 +
2841 + if (isrc & IRQ_EI) {
2842 +diff --git a/drivers/net/can/sja1000/sja1000.h b/drivers/net/can/sja1000/sja1000.h
2843 +index 97a622b..de8e778 100644
2844 +--- a/drivers/net/can/sja1000/sja1000.h
2845 ++++ b/drivers/net/can/sja1000/sja1000.h
2846 +@@ -167,6 +167,7 @@ struct sja1000_priv {
2847 +
2848 + void __iomem *reg_base; /* ioremap'ed address to registers */
2849 + unsigned long irq_flags; /* for request_irq() */
2850 ++ spinlock_t cmdreg_lock; /* lock for concurrent cmd register writes */
2851 +
2852 + u16 flags; /* custom mode flags */
2853 + u8 ocr; /* output control register */
2854 +diff --git a/drivers/net/mlx4/icm.c b/drivers/net/mlx4/icm.c
2855 +index 57288ca..ef62f17 100644
2856 +--- a/drivers/net/mlx4/icm.c
2857 ++++ b/drivers/net/mlx4/icm.c
2858 +@@ -175,9 +175,10 @@ struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages,
2859 +
2860 + if (chunk->nsg <= 0)
2861 + goto fail;
2862 ++ }
2863 +
2864 ++ if (chunk->npages == MLX4_ICM_CHUNK_LEN)
2865 + chunk = NULL;
2866 +- }
2867 +
2868 + npages -= 1 << cur_order;
2869 + } else {
2870 +diff --git a/drivers/net/wireless/ath/ar9170/hw.h b/drivers/net/wireless/ath/ar9170/hw.h
2871 +index 0a1d4c2..06f1f3c 100644
2872 +--- a/drivers/net/wireless/ath/ar9170/hw.h
2873 ++++ b/drivers/net/wireless/ath/ar9170/hw.h
2874 +@@ -425,5 +425,6 @@ enum ar9170_txq {
2875 +
2876 + #define AR9170_TXQ_DEPTH 32
2877 + #define AR9170_TX_MAX_PENDING 128
2878 ++#define AR9170_RX_STREAM_MAX_SIZE 65535
2879 +
2880 + #endif /* __AR9170_HW_H */
2881 +diff --git a/drivers/net/wireless/ath/ar9170/main.c b/drivers/net/wireless/ath/ar9170/main.c
2882 +index c536929..144db02 100644
2883 +--- a/drivers/net/wireless/ath/ar9170/main.c
2884 ++++ b/drivers/net/wireless/ath/ar9170/main.c
2885 +@@ -2516,7 +2516,7 @@ void *ar9170_alloc(size_t priv_size)
2886 + * tends to split the streams into separate rx descriptors.
2887 + */
2888 +
2889 +- skb = __dev_alloc_skb(AR9170_MAX_RX_BUFFER_SIZE, GFP_KERNEL);
2890 ++ skb = __dev_alloc_skb(AR9170_RX_STREAM_MAX_SIZE, GFP_KERNEL);
2891 + if (!skb)
2892 + goto err_nomem;
2893 +
2894 +diff --git a/drivers/net/wireless/ath/ar9170/usb.c b/drivers/net/wireless/ath/ar9170/usb.c
2895 +index e1c2fca..7bae7fd 100644
2896 +--- a/drivers/net/wireless/ath/ar9170/usb.c
2897 ++++ b/drivers/net/wireless/ath/ar9170/usb.c
2898 +@@ -67,18 +67,28 @@ static struct usb_device_id ar9170_usb_ids[] = {
2899 + { USB_DEVICE(0x0cf3, 0x1001) },
2900 + /* TP-Link TL-WN821N v2 */
2901 + { USB_DEVICE(0x0cf3, 0x1002) },
2902 ++ /* 3Com Dual Band 802.11n USB Adapter */
2903 ++ { USB_DEVICE(0x0cf3, 0x1010) },
2904 ++ /* H3C Dual Band 802.11n USB Adapter */
2905 ++ { USB_DEVICE(0x0cf3, 0x1011) },
2906 + /* Cace Airpcap NX */
2907 + { USB_DEVICE(0xcace, 0x0300) },
2908 + /* D-Link DWA 160 A1 */
2909 + { USB_DEVICE(0x07d1, 0x3c10) },
2910 + /* D-Link DWA 160 A2 */
2911 + { USB_DEVICE(0x07d1, 0x3a09) },
2912 ++ /* Netgear WNA1000 */
2913 ++ { USB_DEVICE(0x0846, 0x9040) },
2914 + /* Netgear WNDA3100 */
2915 + { USB_DEVICE(0x0846, 0x9010) },
2916 + /* Netgear WN111 v2 */
2917 + { USB_DEVICE(0x0846, 0x9001) },
2918 + /* Zydas ZD1221 */
2919 + { USB_DEVICE(0x0ace, 0x1221) },
2920 ++ /* Proxim ORiNOCO 802.11n USB */
2921 ++ { USB_DEVICE(0x1435, 0x0804) },
2922 ++ /* WNC Generic 11n USB Dongle */
2923 ++ { USB_DEVICE(0x1435, 0x0326) },
2924 + /* ZyXEL NWD271N */
2925 + { USB_DEVICE(0x0586, 0x3417) },
2926 + /* Z-Com UB81 BG */
2927 +diff --git a/drivers/net/wireless/ath/ath5k/base.c b/drivers/net/wireless/ath/ath5k/base.c
2928 +index 3abbe75..ea90997 100644
2929 +--- a/drivers/net/wireless/ath/ath5k/base.c
2930 ++++ b/drivers/net/wireless/ath/ath5k/base.c
2931 +@@ -1211,6 +1211,7 @@ ath5k_rxbuf_setup(struct ath5k_softc *sc, struct ath5k_buf *bf)
2932 + struct ath5k_hw *ah = sc->ah;
2933 + struct sk_buff *skb = bf->skb;
2934 + struct ath5k_desc *ds;
2935 ++ int ret;
2936 +
2937 + if (!skb) {
2938 + skb = ath5k_rx_skb_alloc(sc, &bf->skbaddr);
2939 +@@ -1237,9 +1238,9 @@ ath5k_rxbuf_setup(struct ath5k_softc *sc, struct ath5k_buf *bf)
2940 + ds = bf->desc;
2941 + ds->ds_link = bf->daddr; /* link to self */
2942 + ds->ds_data = bf->skbaddr;
2943 +- ah->ah_setup_rx_desc(ah, ds,
2944 +- skb_tailroom(skb), /* buffer size */
2945 +- 0);
2946 ++ ret = ah->ah_setup_rx_desc(ah, ds, ah->common.rx_bufsize, 0);
2947 ++ if (ret)
2948 ++ return ret;
2949 +
2950 + if (sc->rxlink != NULL)
2951 + *sc->rxlink = bf->daddr;
2952 +@@ -2993,13 +2994,15 @@ static void ath5k_configure_filter(struct ieee80211_hw *hw,
2953 +
2954 + if (changed_flags & (FIF_PROMISC_IN_BSS | FIF_OTHER_BSS)) {
2955 + if (*new_flags & FIF_PROMISC_IN_BSS) {
2956 +- rfilt |= AR5K_RX_FILTER_PROM;
2957 + __set_bit(ATH_STAT_PROMISC, sc->status);
2958 + } else {
2959 + __clear_bit(ATH_STAT_PROMISC, sc->status);
2960 + }
2961 + }
2962 +
2963 ++ if (test_bit(ATH_STAT_PROMISC, sc->status))
2964 ++ rfilt |= AR5K_RX_FILTER_PROM;
2965 ++
2966 + /* Note, AR5K_RX_FILTER_MCAST is already enabled */
2967 + if (*new_flags & FIF_ALLMULTI) {
2968 + mfilt[0] = ~0;
2969 +diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
2970 +index 78b5711..20d5414 100644
2971 +--- a/drivers/net/wireless/ath/ath9k/hw.c
2972 ++++ b/drivers/net/wireless/ath/ath9k/hw.c
2973 +@@ -1241,7 +1241,7 @@ void ath9k_hw_deinit(struct ath_hw *ah)
2974 + {
2975 + struct ath_common *common = ath9k_hw_common(ah);
2976 +
2977 +- if (common->state <= ATH_HW_INITIALIZED)
2978 ++ if (common->state < ATH_HW_INITIALIZED)
2979 + goto free_hw;
2980 +
2981 + if (!AR_SREV_9100(ah))
2982 +@@ -1252,8 +1252,6 @@ void ath9k_hw_deinit(struct ath_hw *ah)
2983 + free_hw:
2984 + if (!AR_SREV_9280_10_OR_LATER(ah))
2985 + ath9k_hw_rf_free_ext_banks(ah);
2986 +- kfree(ah);
2987 +- ah = NULL;
2988 + }
2989 + EXPORT_SYMBOL(ath9k_hw_deinit);
2990 +
2991 +diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c
2992 +index 3d4d897..b78308c 100644
2993 +--- a/drivers/net/wireless/ath/ath9k/init.c
2994 ++++ b/drivers/net/wireless/ath/ath9k/init.c
2995 +@@ -760,6 +760,9 @@ static void ath9k_deinit_softc(struct ath_softc *sc)
2996 +
2997 + tasklet_kill(&sc->intr_tq);
2998 + tasklet_kill(&sc->bcon_tasklet);
2999 ++
3000 ++ kfree(sc->sc_ah);
3001 ++ sc->sc_ah = NULL;
3002 + }
3003 +
3004 + void ath9k_deinit_device(struct ath_softc *sc)
3005 +diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
3006 +index 1460116..d3ef2a9 100644
3007 +--- a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
3008 ++++ b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
3009 +@@ -2077,10 +2077,12 @@ static void rs_rate_scale_perform(struct iwl_priv *priv,
3010 + }
3011 + /* Else we have enough samples; calculate estimate of
3012 + * actual average throughput */
3013 +-
3014 +- /* Sanity-check TPT calculations */
3015 +- BUG_ON(window->average_tpt != ((window->success_ratio *
3016 +- tbl->expected_tpt[index] + 64) / 128));
3017 ++ if (window->average_tpt != ((window->success_ratio *
3018 ++ tbl->expected_tpt[index] + 64) / 128)) {
3019 ++ IWL_ERR(priv, "expected_tpt should have been calculated by now\n");
3020 ++ window->average_tpt = ((window->success_ratio *
3021 ++ tbl->expected_tpt[index] + 64) / 128);
3022 ++ }
3023 +
3024 + /* If we are searching for better modulation mode, check success. */
3025 + if (lq_sta->search_better_tbl &&
3026 +diff --git a/drivers/net/wireless/iwlwifi/iwl-scan.c b/drivers/net/wireless/iwlwifi/iwl-scan.c
3027 +index 741e65e..661e36b 100644
3028 +--- a/drivers/net/wireless/iwlwifi/iwl-scan.c
3029 ++++ b/drivers/net/wireless/iwlwifi/iwl-scan.c
3030 +@@ -561,6 +561,11 @@ static void iwl_bg_start_internal_scan(struct work_struct *work)
3031 +
3032 + mutex_lock(&priv->mutex);
3033 +
3034 ++ if (priv->is_internal_short_scan == true) {
3035 ++ IWL_DEBUG_SCAN(priv, "Internal scan already in progress\n");
3036 ++ goto unlock;
3037 ++ }
3038 ++
3039 + if (!iwl_is_ready_rf(priv)) {
3040 + IWL_DEBUG_SCAN(priv, "not ready or exit pending\n");
3041 + goto unlock;
3042 +@@ -958,17 +963,27 @@ void iwl_bg_scan_completed(struct work_struct *work)
3043 + {
3044 + struct iwl_priv *priv =
3045 + container_of(work, struct iwl_priv, scan_completed);
3046 ++ bool internal = false;
3047 +
3048 + IWL_DEBUG_SCAN(priv, "SCAN complete scan\n");
3049 +
3050 + cancel_delayed_work(&priv->scan_check);
3051 +
3052 +- if (!priv->is_internal_short_scan)
3053 +- ieee80211_scan_completed(priv->hw, false);
3054 +- else {
3055 ++ mutex_lock(&priv->mutex);
3056 ++ if (priv->is_internal_short_scan) {
3057 + priv->is_internal_short_scan = false;
3058 + IWL_DEBUG_SCAN(priv, "internal short scan completed\n");
3059 ++ internal = true;
3060 + }
3061 ++ mutex_unlock(&priv->mutex);
3062 ++
3063 ++ /*
3064 ++ * Do not hold mutex here since this will cause mac80211 to call
3065 ++ * into driver again into functions that will attempt to take
3066 ++ * mutex.
3067 ++ */
3068 ++ if (!internal)
3069 ++ ieee80211_scan_completed(priv->hw, false);
3070 +
3071 + if (test_bit(STATUS_EXIT_PENDING, &priv->status))
3072 + return;
3073 +diff --git a/drivers/net/wireless/iwlwifi/iwl-tx.c b/drivers/net/wireless/iwlwifi/iwl-tx.c
3074 +index 8dd0c03..c243df7 100644
3075 +--- a/drivers/net/wireless/iwlwifi/iwl-tx.c
3076 ++++ b/drivers/net/wireless/iwlwifi/iwl-tx.c
3077 +@@ -1198,6 +1198,7 @@ static void iwl_tx_status(struct iwl_priv *priv, struct sk_buff *skb)
3078 + struct ieee80211_sta *sta;
3079 + struct iwl_station_priv *sta_priv;
3080 +
3081 ++ rcu_read_lock();
3082 + sta = ieee80211_find_sta(priv->vif, hdr->addr1);
3083 + if (sta) {
3084 + sta_priv = (void *)sta->drv_priv;
3085 +@@ -1206,6 +1207,7 @@ static void iwl_tx_status(struct iwl_priv *priv, struct sk_buff *skb)
3086 + atomic_dec_return(&sta_priv->pending_frames) == 0)
3087 + ieee80211_sta_block_awake(priv->hw, sta, false);
3088 + }
3089 ++ rcu_read_unlock();
3090 +
3091 + ieee80211_tx_status_irqsafe(priv->hw, skb);
3092 + }
3093 +diff --git a/drivers/net/wireless/p54/p54usb.c b/drivers/net/wireless/p54/p54usb.c
3094 +index 743a6c6..186dc71 100644
3095 +--- a/drivers/net/wireless/p54/p54usb.c
3096 ++++ b/drivers/net/wireless/p54/p54usb.c
3097 +@@ -80,6 +80,7 @@ static struct usb_device_id p54u_table[] __devinitdata = {
3098 + {USB_DEVICE(0x1413, 0x5400)}, /* Telsey 802.11g USB2.0 Adapter */
3099 + {USB_DEVICE(0x1435, 0x0427)}, /* Inventel UR054G */
3100 + {USB_DEVICE(0x2001, 0x3704)}, /* DLink DWL-G122 rev A2 */
3101 ++ {USB_DEVICE(0x413c, 0x5513)}, /* Dell WLA3310 USB Wireless Adapter */
3102 + {USB_DEVICE(0x413c, 0x8102)}, /* Spinnaker DUT */
3103 + {USB_DEVICE(0x413c, 0x8104)}, /* Cohiba Proto board */
3104 + {}
3105 +diff --git a/drivers/net/wireless/rtl818x/rtl8180_dev.c b/drivers/net/wireless/rtl818x/rtl8180_dev.c
3106 +index 2131a44..de632ec 100644
3107 +--- a/drivers/net/wireless/rtl818x/rtl8180_dev.c
3108 ++++ b/drivers/net/wireless/rtl818x/rtl8180_dev.c
3109 +@@ -188,6 +188,7 @@ static void rtl8180_handle_tx(struct ieee80211_hw *dev, unsigned int prio)
3110 + info->flags |= IEEE80211_TX_STAT_ACK;
3111 +
3112 + info->status.rates[0].count = (flags & 0xFF) + 1;
3113 ++ info->status.rates[1].idx = -1;
3114 +
3115 + ieee80211_tx_status_irqsafe(dev, skb);
3116 + if (ring->entries - skb_queue_len(&ring->queue) == 2)
3117 +diff --git a/drivers/net/wireless/wl12xx/wl1251_sdio.c b/drivers/net/wireless/wl12xx/wl1251_sdio.c
3118 +index 9423f22..d74b89b 100644
3119 +--- a/drivers/net/wireless/wl12xx/wl1251_sdio.c
3120 ++++ b/drivers/net/wireless/wl12xx/wl1251_sdio.c
3121 +@@ -160,6 +160,7 @@ disable:
3122 + sdio_disable_func(func);
3123 + release:
3124 + sdio_release_host(func);
3125 ++ wl1251_free_hw(wl);
3126 + return ret;
3127 + }
3128 +
3129 +diff --git a/drivers/oprofile/cpu_buffer.c b/drivers/oprofile/cpu_buffer.c
3130 +index 166b67e..de82183 100644
3131 +--- a/drivers/oprofile/cpu_buffer.c
3132 ++++ b/drivers/oprofile/cpu_buffer.c
3133 +@@ -30,23 +30,7 @@
3134 +
3135 + #define OP_BUFFER_FLAGS 0
3136 +
3137 +-/*
3138 +- * Read and write access is using spin locking. Thus, writing to the
3139 +- * buffer by NMI handler (x86) could occur also during critical
3140 +- * sections when reading the buffer. To avoid this, there are 2
3141 +- * buffers for independent read and write access. Read access is in
3142 +- * process context only, write access only in the NMI handler. If the
3143 +- * read buffer runs empty, both buffers are swapped atomically. There
3144 +- * is potentially a small window during swapping where the buffers are
3145 +- * disabled and samples could be lost.
3146 +- *
3147 +- * Using 2 buffers is a little bit overhead, but the solution is clear
3148 +- * and does not require changes in the ring buffer implementation. It
3149 +- * can be changed to a single buffer solution when the ring buffer
3150 +- * access is implemented as non-locking atomic code.
3151 +- */
3152 +-static struct ring_buffer *op_ring_buffer_read;
3153 +-static struct ring_buffer *op_ring_buffer_write;
3154 ++static struct ring_buffer *op_ring_buffer;
3155 + DEFINE_PER_CPU(struct oprofile_cpu_buffer, op_cpu_buffer);
3156 +
3157 + static void wq_sync_buffer(struct work_struct *work);
3158 +@@ -68,12 +52,9 @@ void oprofile_cpu_buffer_inc_smpl_lost(void)
3159 +
3160 + void free_cpu_buffers(void)
3161 + {
3162 +- if (op_ring_buffer_read)
3163 +- ring_buffer_free(op_ring_buffer_read);
3164 +- op_ring_buffer_read = NULL;
3165 +- if (op_ring_buffer_write)
3166 +- ring_buffer_free(op_ring_buffer_write);
3167 +- op_ring_buffer_write = NULL;
3168 ++ if (op_ring_buffer)
3169 ++ ring_buffer_free(op_ring_buffer);
3170 ++ op_ring_buffer = NULL;
3171 + }
3172 +
3173 + #define RB_EVENT_HDR_SIZE 4
3174 +@@ -86,11 +67,8 @@ int alloc_cpu_buffers(void)
3175 + unsigned long byte_size = buffer_size * (sizeof(struct op_sample) +
3176 + RB_EVENT_HDR_SIZE);
3177 +
3178 +- op_ring_buffer_read = ring_buffer_alloc(byte_size, OP_BUFFER_FLAGS);
3179 +- if (!op_ring_buffer_read)
3180 +- goto fail;
3181 +- op_ring_buffer_write = ring_buffer_alloc(byte_size, OP_BUFFER_FLAGS);
3182 +- if (!op_ring_buffer_write)
3183 ++ op_ring_buffer = ring_buffer_alloc(byte_size, OP_BUFFER_FLAGS);
3184 ++ if (!op_ring_buffer)
3185 + goto fail;
3186 +
3187 + for_each_possible_cpu(i) {
3188 +@@ -162,16 +140,11 @@ struct op_sample
3189 + *op_cpu_buffer_write_reserve(struct op_entry *entry, unsigned long size)
3190 + {
3191 + entry->event = ring_buffer_lock_reserve
3192 +- (op_ring_buffer_write, sizeof(struct op_sample) +
3193 ++ (op_ring_buffer, sizeof(struct op_sample) +
3194 + size * sizeof(entry->sample->data[0]));
3195 +- if (entry->event)
3196 +- entry->sample = ring_buffer_event_data(entry->event);
3197 +- else
3198 +- entry->sample = NULL;
3199 +-
3200 +- if (!entry->sample)
3201 ++ if (!entry->event)
3202 + return NULL;
3203 +-
3204 ++ entry->sample = ring_buffer_event_data(entry->event);
3205 + entry->size = size;
3206 + entry->data = entry->sample->data;
3207 +
3208 +@@ -180,25 +153,16 @@ struct op_sample
3209 +
3210 + int op_cpu_buffer_write_commit(struct op_entry *entry)
3211 + {
3212 +- return ring_buffer_unlock_commit(op_ring_buffer_write, entry->event);
3213 ++ return ring_buffer_unlock_commit(op_ring_buffer, entry->event);
3214 + }
3215 +
3216 + struct op_sample *op_cpu_buffer_read_entry(struct op_entry *entry, int cpu)
3217 + {
3218 + struct ring_buffer_event *e;
3219 +- e = ring_buffer_consume(op_ring_buffer_read, cpu, NULL);
3220 +- if (e)
3221 +- goto event;
3222 +- if (ring_buffer_swap_cpu(op_ring_buffer_read,
3223 +- op_ring_buffer_write,
3224 +- cpu))
3225 ++ e = ring_buffer_consume(op_ring_buffer, cpu, NULL);
3226 ++ if (!e)
3227 + return NULL;
3228 +- e = ring_buffer_consume(op_ring_buffer_read, cpu, NULL);
3229 +- if (e)
3230 +- goto event;
3231 +- return NULL;
3232 +
3233 +-event:
3234 + entry->event = e;
3235 + entry->sample = ring_buffer_event_data(e);
3236 + entry->size = (ring_buffer_event_length(e) - sizeof(struct op_sample))
3237 +@@ -209,8 +173,7 @@ event:
3238 +
3239 + unsigned long op_cpu_buffer_entries(int cpu)
3240 + {
3241 +- return ring_buffer_entries_cpu(op_ring_buffer_read, cpu)
3242 +- + ring_buffer_entries_cpu(op_ring_buffer_write, cpu);
3243 ++ return ring_buffer_entries_cpu(op_ring_buffer, cpu);
3244 + }
3245 +
3246 + static int
3247 +diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
3248 +index 27c0e6e..2f2d0ec 100644
3249 +--- a/drivers/pci/quirks.c
3250 ++++ b/drivers/pci/quirks.c
3251 +@@ -1457,7 +1457,8 @@ static void quirk_jmicron_ata(struct pci_dev *pdev)
3252 + conf5 &= ~(1 << 24); /* Clear bit 24 */
3253 +
3254 + switch (pdev->device) {
3255 +- case PCI_DEVICE_ID_JMICRON_JMB360:
3256 ++ case PCI_DEVICE_ID_JMICRON_JMB360: /* SATA single port */
3257 ++ case PCI_DEVICE_ID_JMICRON_JMB362: /* SATA dual ports */
3258 + /* The controller should be in single function ahci mode */
3259 + conf1 |= 0x0002A100; /* Set 8, 13, 15, 17 */
3260 + break;
3261 +@@ -1493,12 +1494,14 @@ static void quirk_jmicron_ata(struct pci_dev *pdev)
3262 + }
3263 + DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB360, quirk_jmicron_ata);
3264 + DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB361, quirk_jmicron_ata);
3265 ++DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB362, quirk_jmicron_ata);
3266 + DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB363, quirk_jmicron_ata);
3267 + DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB365, quirk_jmicron_ata);
3268 + DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB366, quirk_jmicron_ata);
3269 + DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB368, quirk_jmicron_ata);
3270 + DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB360, quirk_jmicron_ata);
3271 + DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB361, quirk_jmicron_ata);
3272 ++DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB362, quirk_jmicron_ata);
3273 + DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB363, quirk_jmicron_ata);
3274 + DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB365, quirk_jmicron_ata);
3275 + DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB366, quirk_jmicron_ata);
3276 +@@ -2127,6 +2130,7 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x9602, quirk_disable_msi);
3277 + DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ASUSTEK, 0x9602, quirk_disable_msi);
3278 + DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AI, 0x9602, quirk_disable_msi);
3279 + DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, 0xa238, quirk_disable_msi);
3280 ++DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x5a3f, quirk_disable_msi);
3281 +
3282 + /* Go through the list of Hypertransport capabilities and
3283 + * return 1 if a HT MSI capability is found and enabled */
3284 +@@ -2218,15 +2222,16 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SERVERWORKS,
3285 + DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8132_BRIDGE,
3286 + ht_enable_msi_mapping);
3287 +
3288 +-/* The P5N32-SLI Premium motherboard from Asus has a problem with msi
3289 ++/* The P5N32-SLI motherboards from Asus have a problem with msi
3290 + * for the MCP55 NIC. It is not yet determined whether the msi problem
3291 + * also affects other devices. As for now, turn off msi for this device.
3292 + */
3293 + static void __devinit nvenet_msi_disable(struct pci_dev *dev)
3294 + {
3295 +- if (dmi_name_in_vendors("P5N32-SLI PREMIUM")) {
3296 ++ if (dmi_name_in_vendors("P5N32-SLI PREMIUM") ||
3297 ++ dmi_name_in_vendors("P5N32-E SLI")) {
3298 + dev_info(&dev->dev,
3299 +- "Disabling msi for MCP55 NIC on P5N32-SLI Premium\n");
3300 ++ "Disabling msi for MCP55 NIC on P5N32-SLI\n");
3301 + dev->no_msi = 1;
3302 + }
3303 + }
3304 +diff --git a/drivers/pcmcia/ds.c b/drivers/pcmcia/ds.c
3305 +index 041eee4..6df5dff 100644
3306 +--- a/drivers/pcmcia/ds.c
3307 ++++ b/drivers/pcmcia/ds.c
3308 +@@ -682,6 +682,7 @@ static void pcmcia_requery(struct pcmcia_socket *s)
3309 + if (old_funcs != new_funcs) {
3310 + /* we need to re-start */
3311 + pcmcia_card_remove(s, NULL);
3312 ++ s->functions = 0;
3313 + pcmcia_card_add(s);
3314 + }
3315 + }
3316 +diff --git a/drivers/pcmcia/yenta_socket.c b/drivers/pcmcia/yenta_socket.c
3317 +index 83ace27..6bb6cb9 100644
3318 +--- a/drivers/pcmcia/yenta_socket.c
3319 ++++ b/drivers/pcmcia/yenta_socket.c
3320 +@@ -975,7 +975,7 @@ static irqreturn_t yenta_probe_handler(int irq, void *dev_id)
3321 + /* probes the PCI interrupt, use only on override functions */
3322 + static int yenta_probe_cb_irq(struct yenta_socket *socket)
3323 + {
3324 +- u8 reg;
3325 ++ u8 reg = 0;
3326 +
3327 + if (!socket->cb_irq)
3328 + return -1;
3329 +@@ -989,7 +989,8 @@ static int yenta_probe_cb_irq(struct yenta_socket *socket)
3330 + }
3331 +
3332 + /* generate interrupt, wait */
3333 +- reg = exca_readb(socket, I365_CSCINT);
3334 ++ if (!socket->dev->irq)
3335 ++ reg = exca_readb(socket, I365_CSCINT);
3336 + exca_writeb(socket, I365_CSCINT, reg | I365_CSC_STSCHG);
3337 + cb_writel(socket, CB_SOCKET_EVENT, -1);
3338 + cb_writel(socket, CB_SOCKET_MASK, CB_CSTSMASK);
3339 +diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
3340 +index 6c3320d..50601d9 100644
3341 +--- a/drivers/platform/x86/Kconfig
3342 ++++ b/drivers/platform/x86/Kconfig
3343 +@@ -390,6 +390,7 @@ config EEEPC_WMI
3344 + depends on ACPI_WMI
3345 + depends on INPUT
3346 + depends on EXPERIMENTAL
3347 ++ depends on BACKLIGHT_CLASS_DEVICE
3348 + select INPUT_SPARSEKMAP
3349 + ---help---
3350 + Say Y here if you want to support WMI-based hotkeys on Eee PC laptops.
3351 +diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c
3352 +index e9aa814..aa13875 100644
3353 +--- a/drivers/rtc/rtc-cmos.c
3354 ++++ b/drivers/rtc/rtc-cmos.c
3355 +@@ -719,6 +719,9 @@ cmos_do_probe(struct device *dev, struct resource *ports, int rtc_irq)
3356 + }
3357 + }
3358 +
3359 ++ cmos_rtc.dev = dev;
3360 ++ dev_set_drvdata(dev, &cmos_rtc);
3361 ++
3362 + cmos_rtc.rtc = rtc_device_register(driver_name, dev,
3363 + &cmos_rtc_ops, THIS_MODULE);
3364 + if (IS_ERR(cmos_rtc.rtc)) {
3365 +@@ -726,8 +729,6 @@ cmos_do_probe(struct device *dev, struct resource *ports, int rtc_irq)
3366 + goto cleanup0;
3367 + }
3368 +
3369 +- cmos_rtc.dev = dev;
3370 +- dev_set_drvdata(dev, &cmos_rtc);
3371 + rename_region(ports, dev_name(&cmos_rtc.rtc->dev));
3372 +
3373 + spin_lock_irq(&rtc_lock);
3374 +diff --git a/drivers/rtc/rtc-s3c.c b/drivers/rtc/rtc-s3c.c
3375 +index 4969b60..3793ea6 100644
3376 +--- a/drivers/rtc/rtc-s3c.c
3377 ++++ b/drivers/rtc/rtc-s3c.c
3378 +@@ -457,8 +457,6 @@ static int __devinit s3c_rtc_probe(struct platform_device *pdev)
3379 + pr_debug("s3c2410_rtc: RTCCON=%02x\n",
3380 + readb(s3c_rtc_base + S3C2410_RTCCON));
3381 +
3382 +- s3c_rtc_setfreq(&pdev->dev, 1);
3383 +-
3384 + device_init_wakeup(&pdev->dev, 1);
3385 +
3386 + /* register RTC and exit */
3387 +@@ -475,6 +473,9 @@ static int __devinit s3c_rtc_probe(struct platform_device *pdev)
3388 + rtc->max_user_freq = 128;
3389 +
3390 + platform_set_drvdata(pdev, rtc);
3391 ++
3392 ++ s3c_rtc_setfreq(&pdev->dev, 1);
3393 ++
3394 + return 0;
3395 +
3396 + err_nortc:
3397 +diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
3398 +index 88f7446..8c496b5 100644
3399 +--- a/drivers/scsi/libsas/sas_ata.c
3400 ++++ b/drivers/scsi/libsas/sas_ata.c
3401 +@@ -395,12 +395,13 @@ int sas_ata_init_host_and_port(struct domain_device *found_dev,
3402 + void sas_ata_task_abort(struct sas_task *task)
3403 + {
3404 + struct ata_queued_cmd *qc = task->uldd_task;
3405 +- struct request_queue *q = qc->scsicmd->device->request_queue;
3406 + struct completion *waiting;
3407 +- unsigned long flags;
3408 +
3409 + /* Bounce SCSI-initiated commands to the SCSI EH */
3410 + if (qc->scsicmd) {
3411 ++ struct request_queue *q = qc->scsicmd->device->request_queue;
3412 ++ unsigned long flags;
3413 ++
3414 + spin_lock_irqsave(q->queue_lock, flags);
3415 + blk_abort_request(qc->scsicmd->request);
3416 + spin_unlock_irqrestore(q->queue_lock, flags);
3417 +diff --git a/drivers/scsi/libsas/sas_scsi_host.c b/drivers/scsi/libsas/sas_scsi_host.c
3418 +index 8228350..53849f2 100644
3419 +--- a/drivers/scsi/libsas/sas_scsi_host.c
3420 ++++ b/drivers/scsi/libsas/sas_scsi_host.c
3421 +@@ -1030,8 +1030,6 @@ int __sas_task_abort(struct sas_task *task)
3422 + void sas_task_abort(struct sas_task *task)
3423 + {
3424 + struct scsi_cmnd *sc = task->uldd_task;
3425 +- struct request_queue *q = sc->device->request_queue;
3426 +- unsigned long flags;
3427 +
3428 + /* Escape for libsas internal commands */
3429 + if (!sc) {
3430 +@@ -1043,13 +1041,15 @@ void sas_task_abort(struct sas_task *task)
3431 +
3432 + if (dev_is_sata(task->dev)) {
3433 + sas_ata_task_abort(task);
3434 +- return;
3435 +- }
3436 ++ } else {
3437 ++ struct request_queue *q = sc->device->request_queue;
3438 ++ unsigned long flags;
3439 +
3440 +- spin_lock_irqsave(q->queue_lock, flags);
3441 +- blk_abort_request(sc->request);
3442 +- spin_unlock_irqrestore(q->queue_lock, flags);
3443 +- scsi_schedule_eh(sc->device->host);
3444 ++ spin_lock_irqsave(q->queue_lock, flags);
3445 ++ blk_abort_request(sc->request);
3446 ++ spin_unlock_irqrestore(q->queue_lock, flags);
3447 ++ scsi_schedule_eh(sc->device->host);
3448 ++ }
3449 + }
3450 +
3451 + int sas_slave_alloc(struct scsi_device *scsi_dev)
3452 +diff --git a/drivers/serial/68328serial.c b/drivers/serial/68328serial.c
3453 +index 78ed24b..3046386 100644
3454 +--- a/drivers/serial/68328serial.c
3455 ++++ b/drivers/serial/68328serial.c
3456 +@@ -1437,7 +1437,7 @@ int m68328_console_setup(struct console *cp, char *arg)
3457 + for (i = 0; i < ARRAY_SIZE(baud_table); i++)
3458 + if (baud_table[i] == n)
3459 + break;
3460 +- if (i < BAUD_TABLE_SIZE) {
3461 ++ if (i < ARRAY_SIZE(baud_table)) {
3462 + m68328_console_baud = n;
3463 + m68328_console_cbaud = 0;
3464 + if (i > 15) {
3465 +diff --git a/drivers/staging/batman-adv/proc.c b/drivers/staging/batman-adv/proc.c
3466 +index 7de60e8..c9366bc 100644
3467 +--- a/drivers/staging/batman-adv/proc.c
3468 ++++ b/drivers/staging/batman-adv/proc.c
3469 +@@ -41,7 +41,7 @@ static int proc_interfaces_read(struct seq_file *seq, void *offset)
3470 +
3471 + rcu_read_lock();
3472 + list_for_each_entry_rcu(batman_if, &if_list, list) {
3473 +- seq_printf(seq, "[%8s] %s %s \n",
3474 ++ seq_printf(seq, "[%8s] %s %s\n",
3475 + (batman_if->if_active == IF_ACTIVE ?
3476 + "active" : "inactive"),
3477 + batman_if->dev,
3478 +@@ -188,18 +188,18 @@ static int proc_originators_read(struct seq_file *seq, void *offset)
3479 + rcu_read_lock();
3480 + if (list_empty(&if_list)) {
3481 + rcu_read_unlock();
3482 +- seq_printf(seq, "BATMAN disabled - please specify interfaces to enable it \n");
3483 ++ seq_printf(seq, "BATMAN disabled - please specify interfaces to enable it\n");
3484 + goto end;
3485 + }
3486 +
3487 + if (((struct batman_if *)if_list.next)->if_active != IF_ACTIVE) {
3488 + rcu_read_unlock();
3489 +- seq_printf(seq, "BATMAN disabled - primary interface not active \n");
3490 ++ seq_printf(seq, "BATMAN disabled - primary interface not active\n");
3491 + goto end;
3492 + }
3493 +
3494 + seq_printf(seq,
3495 +- " %-14s (%s/%i) %17s [%10s]: %20s ... [B.A.T.M.A.N. adv %s%s, MainIF/MAC: %s/%s] \n",
3496 ++ " %-14s (%s/%i) %17s [%10s]: %20s ... [B.A.T.M.A.N. adv %s%s, MainIF/MAC: %s/%s]\n",
3497 + "Originator", "#", TQ_MAX_VALUE, "Nexthop", "outgoingIF",
3498 + "Potential nexthops", SOURCE_VERSION, REVISION_VERSION_STR,
3499 + ((struct batman_if *)if_list.next)->dev,
3500 +@@ -240,7 +240,7 @@ static int proc_originators_read(struct seq_file *seq, void *offset)
3501 + spin_unlock_irqrestore(&orig_hash_lock, flags);
3502 +
3503 + if (batman_count == 0)
3504 +- seq_printf(seq, "No batman nodes in range ... \n");
3505 ++ seq_printf(seq, "No batman nodes in range ...\n");
3506 +
3507 + end:
3508 + return 0;
3509 +@@ -262,7 +262,7 @@ static int proc_transt_local_read(struct seq_file *seq, void *offset)
3510 + rcu_read_lock();
3511 + if (list_empty(&if_list)) {
3512 + rcu_read_unlock();
3513 +- seq_printf(seq, "BATMAN disabled - please specify interfaces to enable it \n");
3514 ++ seq_printf(seq, "BATMAN disabled - please specify interfaces to enable it\n");
3515 + goto end;
3516 + }
3517 +
3518 +@@ -294,7 +294,7 @@ static int proc_transt_global_read(struct seq_file *seq, void *offset)
3519 + rcu_read_lock();
3520 + if (list_empty(&if_list)) {
3521 + rcu_read_unlock();
3522 +- seq_printf(seq, "BATMAN disabled - please specify interfaces to enable it \n");
3523 ++ seq_printf(seq, "BATMAN disabled - please specify interfaces to enable it\n");
3524 + goto end;
3525 + }
3526 + rcu_read_unlock();
3527 +@@ -350,9 +350,9 @@ static int proc_vis_srv_read(struct seq_file *seq, void *offset)
3528 + {
3529 + int vis_server = atomic_read(&vis_mode);
3530 +
3531 +- seq_printf(seq, "[%c] client mode (server disabled) \n",
3532 ++ seq_printf(seq, "[%c] client mode (server disabled)\n",
3533 + (vis_server == VIS_TYPE_CLIENT_UPDATE) ? 'x' : ' ');
3534 +- seq_printf(seq, "[%c] server mode (server enabled) \n",
3535 ++ seq_printf(seq, "[%c] server mode (server enabled)\n",
3536 + (vis_server == VIS_TYPE_SERVER_SYNC) ? 'x' : ' ');
3537 +
3538 + return 0;
3539 +@@ -369,6 +369,8 @@ static int proc_vis_data_read(struct seq_file *seq, void *offset)
3540 + struct vis_info *info;
3541 + struct vis_info_entry *entries;
3542 + HLIST_HEAD(vis_if_list);
3543 ++ struct if_list_entry *entry;
3544 ++ struct hlist_node *pos, *n;
3545 + int i;
3546 + char tmp_addr_str[ETH_STR_LEN];
3547 + unsigned long flags;
3548 +@@ -387,17 +389,34 @@ static int proc_vis_data_read(struct seq_file *seq, void *offset)
3549 + info = hashit.bucket->data;
3550 + entries = (struct vis_info_entry *)
3551 + ((char *)info + sizeof(struct vis_info));
3552 +- addr_to_string(tmp_addr_str, info->packet.vis_orig);
3553 +- seq_printf(seq, "%s,", tmp_addr_str);
3554 +
3555 + for (i = 0; i < info->packet.entries; i++) {
3556 +- proc_vis_read_entry(seq, &entries[i], &vis_if_list,
3557 +- info->packet.vis_orig);
3558 ++ if (entries[i].quality == 0)
3559 ++ continue;
3560 ++ proc_vis_insert_interface(entries[i].src, &vis_if_list,
3561 ++ compare_orig(entries[i].src,
3562 ++ info->packet.vis_orig));
3563 + }
3564 +
3565 +- /* add primary/secondary records */
3566 +- proc_vis_read_prim_sec(seq, &vis_if_list);
3567 +- seq_printf(seq, "\n");
3568 ++ hlist_for_each_entry(entry, pos, &vis_if_list, list) {
3569 ++ addr_to_string(tmp_addr_str, entry->addr);
3570 ++ seq_printf(seq, "%s,", tmp_addr_str);
3571 ++
3572 ++ for (i = 0; i < info->packet.entries; i++)
3573 ++ proc_vis_read_entry(seq, &entries[i],
3574 ++ entry->addr, entry->primary);
3575 ++
3576 ++ /* add primary/secondary records */
3577 ++ if (compare_orig(entry->addr, info->packet.vis_orig))
3578 ++ proc_vis_read_prim_sec(seq, &vis_if_list);
3579 ++
3580 ++ seq_printf(seq, "\n");
3581 ++ }
3582 ++
3583 ++ hlist_for_each_entry_safe(entry, pos, n, &vis_if_list, list) {
3584 ++ hlist_del(&entry->list);
3585 ++ kfree(entry);
3586 ++ }
3587 + }
3588 + spin_unlock_irqrestore(&vis_hash_lock, flags);
3589 +
3590 +diff --git a/drivers/staging/batman-adv/vis.c b/drivers/staging/batman-adv/vis.c
3591 +index fedec1b..28eac7e 100644
3592 +--- a/drivers/staging/batman-adv/vis.c
3593 ++++ b/drivers/staging/batman-adv/vis.c
3594 +@@ -27,24 +27,44 @@
3595 + #include "hard-interface.h"
3596 + #include "hash.h"
3597 +
3598 ++/* Returns the smallest signed integer in two's complement with the sizeof x */
3599 ++#define smallest_signed_int(x) (1u << (7u + 8u * (sizeof(x) - 1u)))
3600 ++
3601 ++/* Checks if a sequence number x is a predecessor/successor of y.
3602 ++ they handle overflows/underflows and can correctly check for a
3603 ++ predecessor/successor unless the variable sequence number has grown by
3604 ++ more then 2**(bitwidth(x)-1)-1.
3605 ++ This means that for a uint8_t with the maximum value 255, it would think:
3606 ++ * when adding nothing - it is neither a predecessor nor a successor
3607 ++ * before adding more than 127 to the starting value - it is a predecessor,
3608 ++ * when adding 128 - it is neither a predecessor nor a successor,
3609 ++ * after adding more than 127 to the starting value - it is a successor */
3610 ++#define seq_before(x, y) ({typeof(x) _dummy = (x - y); \
3611 ++ _dummy > smallest_signed_int(_dummy); })
3612 ++#define seq_after(x, y) seq_before(y, x)
3613 ++
3614 + struct hashtable_t *vis_hash;
3615 + DEFINE_SPINLOCK(vis_hash_lock);
3616 ++static DEFINE_SPINLOCK(recv_list_lock);
3617 + static struct vis_info *my_vis_info;
3618 + static struct list_head send_list; /* always locked with vis_hash_lock */
3619 +
3620 + static void start_vis_timer(void);
3621 +
3622 + /* free the info */
3623 +-static void free_info(void *data)
3624 ++static void free_info(struct kref *ref)
3625 + {
3626 +- struct vis_info *info = data;
3627 ++ struct vis_info *info = container_of(ref, struct vis_info, refcount);
3628 + struct recvlist_node *entry, *tmp;
3629 ++ unsigned long flags;
3630 +
3631 + list_del_init(&info->send_list);
3632 ++ spin_lock_irqsave(&recv_list_lock, flags);
3633 + list_for_each_entry_safe(entry, tmp, &info->recv_list, list) {
3634 + list_del(&entry->list);
3635 + kfree(entry);
3636 + }
3637 ++ spin_unlock_irqrestore(&recv_list_lock, flags);
3638 + kfree(info);
3639 + }
3640 +
3641 +@@ -82,7 +102,7 @@ static int vis_info_choose(void *data, int size)
3642 +
3643 + /* insert interface to the list of interfaces of one originator, if it
3644 + * does not already exist in the list */
3645 +-static void proc_vis_insert_interface(const uint8_t *interface,
3646 ++void proc_vis_insert_interface(const uint8_t *interface,
3647 + struct hlist_head *if_list,
3648 + bool primary)
3649 + {
3650 +@@ -107,38 +127,51 @@ void proc_vis_read_prim_sec(struct seq_file *seq,
3651 + struct hlist_head *if_list)
3652 + {
3653 + struct if_list_entry *entry;
3654 +- struct hlist_node *pos, *n;
3655 ++ struct hlist_node *pos;
3656 + char tmp_addr_str[ETH_STR_LEN];
3657 +
3658 +- hlist_for_each_entry_safe(entry, pos, n, if_list, list) {
3659 +- if (entry->primary) {
3660 ++ hlist_for_each_entry(entry, pos, if_list, list) {
3661 ++ if (entry->primary)
3662 + seq_printf(seq, "PRIMARY, ");
3663 +- } else {
3664 ++ else {
3665 + addr_to_string(tmp_addr_str, entry->addr);
3666 + seq_printf(seq, "SEC %s, ", tmp_addr_str);
3667 + }
3668 +-
3669 +- hlist_del(&entry->list);
3670 +- kfree(entry);
3671 + }
3672 + }
3673 +
3674 + /* read an entry */
3675 + void proc_vis_read_entry(struct seq_file *seq,
3676 + struct vis_info_entry *entry,
3677 +- struct hlist_head *if_list,
3678 +- uint8_t *vis_orig)
3679 ++ uint8_t *src,
3680 ++ bool primary)
3681 + {
3682 + char to[40];
3683 +
3684 + addr_to_string(to, entry->dest);
3685 +- if (entry->quality == 0) {
3686 +- proc_vis_insert_interface(vis_orig, if_list, true);
3687 ++ if (primary && entry->quality == 0)
3688 + seq_printf(seq, "HNA %s, ", to);
3689 +- } else {
3690 +- proc_vis_insert_interface(entry->src, if_list,
3691 +- compare_orig(entry->src, vis_orig));
3692 ++ else if (compare_orig(entry->src, src))
3693 + seq_printf(seq, "TQ %s %d, ", to, entry->quality);
3694 ++}
3695 ++
3696 ++/* add the info packet to the send list, if it was not
3697 ++ * already linked in. */
3698 ++static void send_list_add(struct vis_info *info)
3699 ++{
3700 ++ if (list_empty(&info->send_list)) {
3701 ++ kref_get(&info->refcount);
3702 ++ list_add_tail(&info->send_list, &send_list);
3703 ++ }
3704 ++}
3705 ++
3706 ++/* delete the info packet from the send list, if it was
3707 ++ * linked in. */
3708 ++static void send_list_del(struct vis_info *info)
3709 ++{
3710 ++ if (!list_empty(&info->send_list)) {
3711 ++ list_del_init(&info->send_list);
3712 ++ kref_put(&info->refcount, free_info);
3713 + }
3714 + }
3715 +
3716 +@@ -146,32 +179,41 @@ void proc_vis_read_entry(struct seq_file *seq,
3717 + static void recv_list_add(struct list_head *recv_list, char *mac)
3718 + {
3719 + struct recvlist_node *entry;
3720 ++ unsigned long flags;
3721 ++
3722 + entry = kmalloc(sizeof(struct recvlist_node), GFP_ATOMIC);
3723 + if (!entry)
3724 + return;
3725 +
3726 + memcpy(entry->mac, mac, ETH_ALEN);
3727 ++ spin_lock_irqsave(&recv_list_lock, flags);
3728 + list_add_tail(&entry->list, recv_list);
3729 ++ spin_unlock_irqrestore(&recv_list_lock, flags);
3730 + }
3731 +
3732 + /* returns 1 if this mac is in the recv_list */
3733 + static int recv_list_is_in(struct list_head *recv_list, char *mac)
3734 + {
3735 + struct recvlist_node *entry;
3736 ++ unsigned long flags;
3737 +
3738 ++ spin_lock_irqsave(&recv_list_lock, flags);
3739 + list_for_each_entry(entry, recv_list, list) {
3740 +- if (memcmp(entry->mac, mac, ETH_ALEN) == 0)
3741 ++ if (memcmp(entry->mac, mac, ETH_ALEN) == 0) {
3742 ++ spin_unlock_irqrestore(&recv_list_lock, flags);
3743 + return 1;
3744 ++ }
3745 + }
3746 +-
3747 ++ spin_unlock_irqrestore(&recv_list_lock, flags);
3748 + return 0;
3749 + }
3750 +
3751 + /* try to add the packet to the vis_hash. return NULL if invalid (e.g. too old,
3752 +- * broken.. ). vis hash must be locked outside. is_new is set when the packet
3753 ++ * broken.. ). vis hash must be locked outside. is_new is set when the packet
3754 + * is newer than old entries in the hash. */
3755 + static struct vis_info *add_packet(struct vis_packet *vis_packet,
3756 +- int vis_info_len, int *is_new)
3757 ++ int vis_info_len, int *is_new,
3758 ++ int make_broadcast)
3759 + {
3760 + struct vis_info *info, *old_info;
3761 + struct vis_info search_elem;
3762 +@@ -186,7 +228,7 @@ static struct vis_info *add_packet(struct vis_packet *vis_packet,
3763 + old_info = hash_find(vis_hash, &search_elem);
3764 +
3765 + if (old_info != NULL) {
3766 +- if (vis_packet->seqno - old_info->packet.seqno <= 0) {
3767 ++ if (!seq_after(vis_packet->seqno, old_info->packet.seqno)) {
3768 + if (old_info->packet.seqno == vis_packet->seqno) {
3769 + recv_list_add(&old_info->recv_list,
3770 + vis_packet->sender_orig);
3771 +@@ -198,13 +240,15 @@ static struct vis_info *add_packet(struct vis_packet *vis_packet,
3772 + }
3773 + /* remove old entry */
3774 + hash_remove(vis_hash, old_info);
3775 +- free_info(old_info);
3776 ++ send_list_del(old_info);
3777 ++ kref_put(&old_info->refcount, free_info);
3778 + }
3779 +
3780 + info = kmalloc(sizeof(struct vis_info) + vis_info_len, GFP_ATOMIC);
3781 + if (info == NULL)
3782 + return NULL;
3783 +
3784 ++ kref_init(&info->refcount);
3785 + INIT_LIST_HEAD(&info->send_list);
3786 + INIT_LIST_HEAD(&info->recv_list);
3787 + info->first_seen = jiffies;
3788 +@@ -214,16 +258,21 @@ static struct vis_info *add_packet(struct vis_packet *vis_packet,
3789 + /* initialize and add new packet. */
3790 + *is_new = 1;
3791 +
3792 ++ /* Make it a broadcast packet, if required */
3793 ++ if (make_broadcast)
3794 ++ memcpy(info->packet.target_orig, broadcastAddr, ETH_ALEN);
3795 ++
3796 + /* repair if entries is longer than packet. */
3797 + if (info->packet.entries * sizeof(struct vis_info_entry) > vis_info_len)
3798 +- info->packet.entries = vis_info_len / sizeof(struct vis_info_entry);
3799 ++ info->packet.entries = vis_info_len /
3800 ++ sizeof(struct vis_info_entry);
3801 +
3802 + recv_list_add(&info->recv_list, info->packet.sender_orig);
3803 +
3804 + /* try to add it */
3805 + if (hash_add(vis_hash, info) < 0) {
3806 + /* did not work (for some reason) */
3807 +- free_info(info);
3808 ++ kref_put(&old_info->refcount, free_info);
3809 + info = NULL;
3810 + }
3811 +
3812 +@@ -234,22 +283,21 @@ static struct vis_info *add_packet(struct vis_packet *vis_packet,
3813 + void receive_server_sync_packet(struct vis_packet *vis_packet, int vis_info_len)
3814 + {
3815 + struct vis_info *info;
3816 +- int is_new;
3817 ++ int is_new, make_broadcast;
3818 + unsigned long flags;
3819 + int vis_server = atomic_read(&vis_mode);
3820 +
3821 ++ make_broadcast = (vis_server == VIS_TYPE_SERVER_SYNC);
3822 ++
3823 + spin_lock_irqsave(&vis_hash_lock, flags);
3824 +- info = add_packet(vis_packet, vis_info_len, &is_new);
3825 ++ info = add_packet(vis_packet, vis_info_len, &is_new, make_broadcast);
3826 + if (info == NULL)
3827 + goto end;
3828 +
3829 + /* only if we are server ourselves and packet is newer than the one in
3830 + * hash.*/
3831 +- if (vis_server == VIS_TYPE_SERVER_SYNC && is_new) {
3832 +- memcpy(info->packet.target_orig, broadcastAddr, ETH_ALEN);
3833 +- if (list_empty(&info->send_list))
3834 +- list_add_tail(&info->send_list, &send_list);
3835 +- }
3836 ++ if (vis_server == VIS_TYPE_SERVER_SYNC && is_new)
3837 ++ send_list_add(info);
3838 + end:
3839 + spin_unlock_irqrestore(&vis_hash_lock, flags);
3840 + }
3841 +@@ -262,31 +310,32 @@ void receive_client_update_packet(struct vis_packet *vis_packet,
3842 + int is_new;
3843 + unsigned long flags;
3844 + int vis_server = atomic_read(&vis_mode);
3845 ++ int are_target = 0;
3846 +
3847 + /* clients shall not broadcast. */
3848 + if (is_bcast(vis_packet->target_orig))
3849 + return;
3850 +
3851 ++ /* Are we the target for this VIS packet? */
3852 ++ if (vis_server == VIS_TYPE_SERVER_SYNC &&
3853 ++ is_my_mac(vis_packet->target_orig))
3854 ++ are_target = 1;
3855 ++
3856 + spin_lock_irqsave(&vis_hash_lock, flags);
3857 +- info = add_packet(vis_packet, vis_info_len, &is_new);
3858 ++ info = add_packet(vis_packet, vis_info_len, &is_new, are_target);
3859 + if (info == NULL)
3860 + goto end;
3861 + /* note that outdated packets will be dropped at this point. */
3862 +
3863 +
3864 + /* send only if we're the target server or ... */
3865 +- if (vis_server == VIS_TYPE_SERVER_SYNC &&
3866 +- is_my_mac(info->packet.target_orig) &&
3867 +- is_new) {
3868 ++ if (are_target && is_new) {
3869 + info->packet.vis_type = VIS_TYPE_SERVER_SYNC; /* upgrade! */
3870 +- memcpy(info->packet.target_orig, broadcastAddr, ETH_ALEN);
3871 +- if (list_empty(&info->send_list))
3872 +- list_add_tail(&info->send_list, &send_list);
3873 ++ send_list_add(info);
3874 +
3875 + /* ... we're not the recipient (and thus need to forward). */
3876 + } else if (!is_my_mac(info->packet.target_orig)) {
3877 +- if (list_empty(&info->send_list))
3878 +- list_add_tail(&info->send_list, &send_list);
3879 ++ send_list_add(info);
3880 + }
3881 + end:
3882 + spin_unlock_irqrestore(&vis_hash_lock, flags);
3883 +@@ -361,14 +410,17 @@ static int generate_vis_packet(void)
3884 + while (hash_iterate(orig_hash, &hashit_global)) {
3885 + orig_node = hashit_global.bucket->data;
3886 + if (orig_node->router != NULL
3887 +- && compare_orig(orig_node->router->addr, orig_node->orig)
3888 ++ && compare_orig(orig_node->router->addr,
3889 ++ orig_node->orig)
3890 + && orig_node->batman_if
3891 + && (orig_node->batman_if->if_active == IF_ACTIVE)
3892 + && orig_node->router->tq_avg > 0) {
3893 +
3894 + /* fill one entry into buffer. */
3895 + entry = &entry_array[info->packet.entries];
3896 +- memcpy(entry->src, orig_node->batman_if->net_dev->dev_addr, ETH_ALEN);
3897 ++ memcpy(entry->src,
3898 ++ orig_node->batman_if->net_dev->dev_addr,
3899 ++ ETH_ALEN);
3900 + memcpy(entry->dest, orig_node->orig, ETH_ALEN);
3901 + entry->quality = orig_node->router->tq_avg;
3902 + info->packet.entries++;
3903 +@@ -400,6 +452,8 @@ static int generate_vis_packet(void)
3904 + return 0;
3905 + }
3906 +
3907 ++/* free old vis packets. Must be called with this vis_hash_lock
3908 ++ * held */
3909 + static void purge_vis_packets(void)
3910 + {
3911 + HASHIT(hashit);
3912 +@@ -412,7 +466,8 @@ static void purge_vis_packets(void)
3913 + if (time_after(jiffies,
3914 + info->first_seen + (VIS_TIMEOUT*HZ)/1000)) {
3915 + hash_remove_bucket(vis_hash, &hashit);
3916 +- free_info(info);
3917 ++ send_list_del(info);
3918 ++ kref_put(&info->refcount, free_info);
3919 + }
3920 + }
3921 + }
3922 +@@ -422,6 +477,8 @@ static void broadcast_vis_packet(struct vis_info *info, int packet_length)
3923 + HASHIT(hashit);
3924 + struct orig_node *orig_node;
3925 + unsigned long flags;
3926 ++ struct batman_if *batman_if;
3927 ++ uint8_t dstaddr[ETH_ALEN];
3928 +
3929 + spin_lock_irqsave(&orig_hash_lock, flags);
3930 +
3931 +@@ -430,45 +487,56 @@ static void broadcast_vis_packet(struct vis_info *info, int packet_length)
3932 + orig_node = hashit.bucket->data;
3933 +
3934 + /* if it's a vis server and reachable, send it. */
3935 +- if (orig_node &&
3936 +- (orig_node->flags & VIS_SERVER) &&
3937 +- orig_node->batman_if &&
3938 +- orig_node->router) {
3939 ++ if ((!orig_node) || (!orig_node->batman_if) ||
3940 ++ (!orig_node->router))
3941 ++ continue;
3942 ++ if (!(orig_node->flags & VIS_SERVER))
3943 ++ continue;
3944 ++ /* don't send it if we already received the packet from
3945 ++ * this node. */
3946 ++ if (recv_list_is_in(&info->recv_list, orig_node->orig))
3947 ++ continue;
3948 +
3949 +- /* don't send it if we already received the packet from
3950 +- * this node. */
3951 +- if (recv_list_is_in(&info->recv_list, orig_node->orig))
3952 +- continue;
3953 ++ memcpy(info->packet.target_orig, orig_node->orig, ETH_ALEN);
3954 ++ batman_if = orig_node->batman_if;
3955 ++ memcpy(dstaddr, orig_node->router->addr, ETH_ALEN);
3956 ++ spin_unlock_irqrestore(&orig_hash_lock, flags);
3957 +
3958 +- memcpy(info->packet.target_orig,
3959 +- orig_node->orig, ETH_ALEN);
3960 ++ send_raw_packet((unsigned char *)&info->packet,
3961 ++ packet_length, batman_if, dstaddr);
3962 ++
3963 ++ spin_lock_irqsave(&orig_hash_lock, flags);
3964 +
3965 +- send_raw_packet((unsigned char *) &info->packet,
3966 +- packet_length,
3967 +- orig_node->batman_if,
3968 +- orig_node->router->addr);
3969 +- }
3970 + }
3971 +- memcpy(info->packet.target_orig, broadcastAddr, ETH_ALEN);
3972 + spin_unlock_irqrestore(&orig_hash_lock, flags);
3973 ++ memcpy(info->packet.target_orig, broadcastAddr, ETH_ALEN);
3974 + }
3975 +
3976 + static void unicast_vis_packet(struct vis_info *info, int packet_length)
3977 + {
3978 + struct orig_node *orig_node;
3979 + unsigned long flags;
3980 ++ struct batman_if *batman_if;
3981 ++ uint8_t dstaddr[ETH_ALEN];
3982 +
3983 + spin_lock_irqsave(&orig_hash_lock, flags);
3984 + orig_node = ((struct orig_node *)
3985 + hash_find(orig_hash, info->packet.target_orig));
3986 +
3987 +- if ((orig_node != NULL) &&
3988 +- (orig_node->batman_if != NULL) &&
3989 +- (orig_node->router != NULL)) {
3990 +- send_raw_packet((unsigned char *) &info->packet, packet_length,
3991 +- orig_node->batman_if,
3992 +- orig_node->router->addr);
3993 +- }
3994 ++ if ((!orig_node) || (!orig_node->batman_if) || (!orig_node->router))
3995 ++ goto out;
3996 ++
3997 ++ /* don't lock while sending the packets ... we therefore
3998 ++ * copy the required data before sending */
3999 ++ batman_if = orig_node->batman_if;
4000 ++ memcpy(dstaddr, orig_node->router->addr, ETH_ALEN);
4001 ++ spin_unlock_irqrestore(&orig_hash_lock, flags);
4002 ++
4003 ++ send_raw_packet((unsigned char *)&info->packet,
4004 ++ packet_length, batman_if, dstaddr);
4005 ++ return;
4006 ++
4007 ++out:
4008 + spin_unlock_irqrestore(&orig_hash_lock, flags);
4009 + }
4010 +
4011 +@@ -502,15 +570,24 @@ static void send_vis_packets(struct work_struct *work)
4012 + unsigned long flags;
4013 +
4014 + spin_lock_irqsave(&vis_hash_lock, flags);
4015 ++
4016 + purge_vis_packets();
4017 +
4018 +- if (generate_vis_packet() == 0)
4019 ++ if (generate_vis_packet() == 0) {
4020 + /* schedule if generation was successful */
4021 +- list_add_tail(&my_vis_info->send_list, &send_list);
4022 ++ send_list_add(my_vis_info);
4023 ++ }
4024 +
4025 + list_for_each_entry_safe(info, temp, &send_list, send_list) {
4026 +- list_del_init(&info->send_list);
4027 ++
4028 ++ kref_get(&info->refcount);
4029 ++ spin_unlock_irqrestore(&vis_hash_lock, flags);
4030 ++
4031 + send_vis_packet(info);
4032 ++
4033 ++ spin_lock_irqsave(&vis_hash_lock, flags);
4034 ++ send_list_del(info);
4035 ++ kref_put(&info->refcount, free_info);
4036 + }
4037 + spin_unlock_irqrestore(&vis_hash_lock, flags);
4038 + start_vis_timer();
4039 +@@ -543,6 +620,7 @@ int vis_init(void)
4040 + my_vis_info->first_seen = jiffies - atomic_read(&vis_interval);
4041 + INIT_LIST_HEAD(&my_vis_info->recv_list);
4042 + INIT_LIST_HEAD(&my_vis_info->send_list);
4043 ++ kref_init(&my_vis_info->refcount);
4044 + my_vis_info->packet.version = COMPAT_VERSION;
4045 + my_vis_info->packet.packet_type = BAT_VIS;
4046 + my_vis_info->packet.ttl = TTL;
4047 +@@ -556,9 +634,9 @@ int vis_init(void)
4048 +
4049 + if (hash_add(vis_hash, my_vis_info) < 0) {
4050 + printk(KERN_ERR
4051 +- "batman-adv:Can't add own vis packet into hash\n");
4052 +- free_info(my_vis_info); /* not in hash, need to remove it
4053 +- * manually. */
4054 ++ "batman-adv:Can't add own vis packet into hash\n");
4055 ++ /* not in hash, need to remove it manually. */
4056 ++ kref_put(&my_vis_info->refcount, free_info);
4057 + goto err;
4058 + }
4059 +
4060 +@@ -572,6 +650,15 @@ err:
4061 + return 0;
4062 + }
4063 +
4064 ++/* Decrease the reference count on a hash item info */
4065 ++static void free_info_ref(void *data)
4066 ++{
4067 ++ struct vis_info *info = data;
4068 ++
4069 ++ send_list_del(info);
4070 ++ kref_put(&info->refcount, free_info);
4071 ++}
4072 ++
4073 + /* shutdown vis-server */
4074 + void vis_quit(void)
4075 + {
4076 +@@ -583,7 +670,7 @@ void vis_quit(void)
4077 +
4078 + spin_lock_irqsave(&vis_hash_lock, flags);
4079 + /* properly remove, kill timers ... */
4080 +- hash_delete(vis_hash, free_info);
4081 ++ hash_delete(vis_hash, free_info_ref);
4082 + vis_hash = NULL;
4083 + my_vis_info = NULL;
4084 + spin_unlock_irqrestore(&vis_hash_lock, flags);
4085 +diff --git a/drivers/staging/batman-adv/vis.h b/drivers/staging/batman-adv/vis.h
4086 +index 0cdafde..a1f92a4 100644
4087 +--- a/drivers/staging/batman-adv/vis.h
4088 ++++ b/drivers/staging/batman-adv/vis.h
4089 +@@ -29,6 +29,7 @@ struct vis_info {
4090 + /* list of server-neighbors we received a vis-packet
4091 + * from. we should not reply to them. */
4092 + struct list_head send_list;
4093 ++ struct kref refcount;
4094 + /* this packet might be part of the vis send queue. */
4095 + struct vis_packet packet;
4096 + /* vis_info may follow here*/
4097 +@@ -48,10 +49,13 @@ struct recvlist_node {
4098 + extern struct hashtable_t *vis_hash;
4099 + extern spinlock_t vis_hash_lock;
4100 +
4101 ++void proc_vis_insert_interface(const uint8_t *interface,
4102 ++ struct hlist_head *if_list,
4103 ++ bool primary);
4104 + void proc_vis_read_entry(struct seq_file *seq,
4105 + struct vis_info_entry *entry,
4106 +- struct hlist_head *if_list,
4107 +- uint8_t *vis_orig);
4108 ++ uint8_t *src,
4109 ++ bool primary);
4110 + void proc_vis_read_prim_sec(struct seq_file *seq,
4111 + struct hlist_head *if_list);
4112 + void receive_server_sync_packet(struct vis_packet *vis_packet,
4113 +diff --git a/drivers/staging/comedi/drivers/ni_mio_cs.c b/drivers/staging/comedi/drivers/ni_mio_cs.c
4114 +index dc4849a..9855608 100644
4115 +--- a/drivers/staging/comedi/drivers/ni_mio_cs.c
4116 ++++ b/drivers/staging/comedi/drivers/ni_mio_cs.c
4117 +@@ -123,7 +123,7 @@ static const struct ni_board_struct ni_boards[] = {
4118 + .adbits = 12,
4119 + .ai_fifo_depth = 1024,
4120 + .alwaysdither = 0,
4121 +- .gainlkup = ai_gain_16,
4122 ++ .gainlkup = ai_gain_4,
4123 + .ai_speed = 5000,
4124 + .n_aochan = 2,
4125 + .aobits = 12,
4126 +diff --git a/drivers/staging/rt2860/usb_main_dev.c b/drivers/staging/rt2860/usb_main_dev.c
4127 +index 740db0c..2ffd0fe 100644
4128 +--- a/drivers/staging/rt2860/usb_main_dev.c
4129 ++++ b/drivers/staging/rt2860/usb_main_dev.c
4130 +@@ -98,6 +98,7 @@ struct usb_device_id rtusb_usb_id[] = {
4131 + {USB_DEVICE(0x5A57, 0x0282)}, /* Zinwell */
4132 + {USB_DEVICE(0x7392, 0x7718)},
4133 + {USB_DEVICE(0x7392, 0x7717)},
4134 ++ {USB_DEVICE(0x0411, 0x016f)}, /* MelCo.,Inc. WLI-UC-G301N */
4135 + {USB_DEVICE(0x1737, 0x0070)}, /* Linksys WUSB100 */
4136 + {USB_DEVICE(0x1737, 0x0071)}, /* Linksys WUSB600N */
4137 + {USB_DEVICE(0x0411, 0x00e8)}, /* Buffalo WLI-UC-G300N */
4138 +diff --git a/drivers/staging/vt6655/device_main.c b/drivers/staging/vt6655/device_main.c
4139 +index e40a2e9..fea0e99 100644
4140 +--- a/drivers/staging/vt6655/device_main.c
4141 ++++ b/drivers/staging/vt6655/device_main.c
4142 +@@ -1090,11 +1090,13 @@ device_found1(struct pci_dev *pcid, const struct pci_device_id *ent)
4143 + }
4144 + //2008-07-21-01<Add>by MikeLiu
4145 + //register wpadev
4146 ++#if 0
4147 + if(wpa_set_wpadev(pDevice, 1)!=0) {
4148 + printk("Fail to Register WPADEV?\n");
4149 + unregister_netdev(pDevice->dev);
4150 + free_netdev(dev);
4151 + }
4152 ++#endif
4153 + device_print_info(pDevice);
4154 + pci_set_drvdata(pcid, pDevice);
4155 + return 0;
4156 +diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
4157 +index 5e1a253..3c73add 100644
4158 +--- a/drivers/usb/class/cdc-acm.c
4159 ++++ b/drivers/usb/class/cdc-acm.c
4160 +@@ -1201,7 +1201,7 @@ made_compressed_probe:
4161 + if (rcv->urb == NULL) {
4162 + dev_dbg(&intf->dev,
4163 + "out of memory (read urbs usb_alloc_urb)\n");
4164 +- goto alloc_fail7;
4165 ++ goto alloc_fail6;
4166 + }
4167 +
4168 + rcv->urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
4169 +@@ -1225,7 +1225,7 @@ made_compressed_probe:
4170 + if (snd->urb == NULL) {
4171 + dev_dbg(&intf->dev,
4172 + "out of memory (write urbs usb_alloc_urb)");
4173 +- goto alloc_fail7;
4174 ++ goto alloc_fail8;
4175 + }
4176 +
4177 + if (usb_endpoint_xfer_int(epwrite))
4178 +@@ -1264,6 +1264,7 @@ made_compressed_probe:
4179 + i = device_create_file(&intf->dev,
4180 + &dev_attr_iCountryCodeRelDate);
4181 + if (i < 0) {
4182 ++ device_remove_file(&intf->dev, &dev_attr_wCountryCodes);
4183 + kfree(acm->country_codes);
4184 + goto skip_countries;
4185 + }
4186 +@@ -1300,6 +1301,7 @@ alloc_fail8:
4187 + usb_free_urb(acm->wb[i].urb);
4188 + alloc_fail7:
4189 + acm_read_buffers_free(acm);
4190 ++alloc_fail6:
4191 + for (i = 0; i < num_rx_buf; i++)
4192 + usb_free_urb(acm->ru[i].urb);
4193 + usb_free_urb(acm->ctrlurb);
4194 +diff --git a/drivers/usb/core/driver.c b/drivers/usb/core/driver.c
4195 +index 2f3dc4c..9d02dc6 100644
4196 +--- a/drivers/usb/core/driver.c
4197 ++++ b/drivers/usb/core/driver.c
4198 +@@ -1322,6 +1322,7 @@ int usb_resume(struct device *dev, pm_message_t msg)
4199 +
4200 + /* For all other calls, take the device back to full power and
4201 + * tell the PM core in case it was autosuspended previously.
4202 ++ * Unbind the interfaces that will need rebinding later.
4203 + */
4204 + } else {
4205 + status = usb_resume_both(udev, msg);
4206 +@@ -1330,6 +1331,7 @@ int usb_resume(struct device *dev, pm_message_t msg)
4207 + pm_runtime_set_active(dev);
4208 + pm_runtime_enable(dev);
4209 + udev->last_busy = jiffies;
4210 ++ do_unbind_rebind(udev, DO_REBIND);
4211 + }
4212 + }
4213 +
4214 +diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
4215 +index 2f8cedd..b60a14d 100644
4216 +--- a/drivers/usb/core/hcd.c
4217 ++++ b/drivers/usb/core/hcd.c
4218 +@@ -1261,6 +1261,51 @@ static void hcd_free_coherent(struct usb_bus *bus, dma_addr_t *dma_handle,
4219 + *dma_handle = 0;
4220 + }
4221 +
4222 ++static void unmap_urb_for_dma(struct usb_hcd *hcd, struct urb *urb)
4223 ++{
4224 ++ enum dma_data_direction dir;
4225 ++
4226 ++ if (urb->transfer_flags & URB_SETUP_MAP_SINGLE)
4227 ++ dma_unmap_single(hcd->self.controller,
4228 ++ urb->setup_dma,
4229 ++ sizeof(struct usb_ctrlrequest),
4230 ++ DMA_TO_DEVICE);
4231 ++ else if (urb->transfer_flags & URB_SETUP_MAP_LOCAL)
4232 ++ hcd_free_coherent(urb->dev->bus,
4233 ++ &urb->setup_dma,
4234 ++ (void **) &urb->setup_packet,
4235 ++ sizeof(struct usb_ctrlrequest),
4236 ++ DMA_TO_DEVICE);
4237 ++
4238 ++ dir = usb_urb_dir_in(urb) ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
4239 ++ if (urb->transfer_flags & URB_DMA_MAP_SG)
4240 ++ dma_unmap_sg(hcd->self.controller,
4241 ++ urb->sg->sg,
4242 ++ urb->num_sgs,
4243 ++ dir);
4244 ++ else if (urb->transfer_flags & URB_DMA_MAP_PAGE)
4245 ++ dma_unmap_page(hcd->self.controller,
4246 ++ urb->transfer_dma,
4247 ++ urb->transfer_buffer_length,
4248 ++ dir);
4249 ++ else if (urb->transfer_flags & URB_DMA_MAP_SINGLE)
4250 ++ dma_unmap_single(hcd->self.controller,
4251 ++ urb->transfer_dma,
4252 ++ urb->transfer_buffer_length,
4253 ++ dir);
4254 ++ else if (urb->transfer_flags & URB_MAP_LOCAL)
4255 ++ hcd_free_coherent(urb->dev->bus,
4256 ++ &urb->transfer_dma,
4257 ++ &urb->transfer_buffer,
4258 ++ urb->transfer_buffer_length,
4259 ++ dir);
4260 ++
4261 ++ /* Make it safe to call this routine more than once */
4262 ++ urb->transfer_flags &= ~(URB_SETUP_MAP_SINGLE | URB_SETUP_MAP_LOCAL |
4263 ++ URB_DMA_MAP_SG | URB_DMA_MAP_PAGE |
4264 ++ URB_DMA_MAP_SINGLE | URB_MAP_LOCAL);
4265 ++}
4266 ++
4267 + static int map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb,
4268 + gfp_t mem_flags)
4269 + {
4270 +@@ -1272,8 +1317,6 @@ static int map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb,
4271 + * unless it uses pio or talks to another transport,
4272 + * or uses the provided scatter gather list for bulk.
4273 + */
4274 +- if (is_root_hub(urb->dev))
4275 +- return 0;
4276 +
4277 + if (usb_endpoint_xfer_control(&urb->ep->desc)
4278 + && !(urb->transfer_flags & URB_NO_SETUP_DMA_MAP)) {
4279 +@@ -1286,6 +1329,7 @@ static int map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb,
4280 + if (dma_mapping_error(hcd->self.controller,
4281 + urb->setup_dma))
4282 + return -EAGAIN;
4283 ++ urb->transfer_flags |= URB_SETUP_MAP_SINGLE;
4284 + } else if (hcd->driver->flags & HCD_LOCAL_MEM)
4285 + ret = hcd_alloc_coherent(
4286 + urb->dev->bus, mem_flags,
4287 +@@ -1293,20 +1337,57 @@ static int map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb,
4288 + (void **)&urb->setup_packet,
4289 + sizeof(struct usb_ctrlrequest),
4290 + DMA_TO_DEVICE);
4291 ++ if (ret)
4292 ++ return ret;
4293 ++ urb->transfer_flags |= URB_SETUP_MAP_LOCAL;
4294 + }
4295 +
4296 + dir = usb_urb_dir_in(urb) ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
4297 +- if (ret == 0 && urb->transfer_buffer_length != 0
4298 ++ if (urb->transfer_buffer_length != 0
4299 + && !(urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP)) {
4300 + if (hcd->self.uses_dma) {
4301 +- urb->transfer_dma = dma_map_single (
4302 +- hcd->self.controller,
4303 +- urb->transfer_buffer,
4304 +- urb->transfer_buffer_length,
4305 +- dir);
4306 +- if (dma_mapping_error(hcd->self.controller,
4307 ++ if (urb->num_sgs) {
4308 ++ int n = dma_map_sg(
4309 ++ hcd->self.controller,
4310 ++ urb->sg->sg,
4311 ++ urb->num_sgs,
4312 ++ dir);
4313 ++ if (n <= 0)
4314 ++ ret = -EAGAIN;
4315 ++ else
4316 ++ urb->transfer_flags |= URB_DMA_MAP_SG;
4317 ++ if (n != urb->num_sgs) {
4318 ++ urb->num_sgs = n;
4319 ++ urb->transfer_flags |=
4320 ++ URB_DMA_SG_COMBINED;
4321 ++ }
4322 ++ } else if (urb->sg) {
4323 ++ struct scatterlist *sg;
4324 ++
4325 ++ sg = (struct scatterlist *) urb->sg;
4326 ++ urb->transfer_dma = dma_map_page(
4327 ++ hcd->self.controller,
4328 ++ sg_page(sg),
4329 ++ sg->offset,
4330 ++ urb->transfer_buffer_length,
4331 ++ dir);
4332 ++ if (dma_mapping_error(hcd->self.controller,
4333 + urb->transfer_dma))
4334 +- return -EAGAIN;
4335 ++ ret = -EAGAIN;
4336 ++ else
4337 ++ urb->transfer_flags |= URB_DMA_MAP_PAGE;
4338 ++ } else {
4339 ++ urb->transfer_dma = dma_map_single(
4340 ++ hcd->self.controller,
4341 ++ urb->transfer_buffer,
4342 ++ urb->transfer_buffer_length,
4343 ++ dir);
4344 ++ if (dma_mapping_error(hcd->self.controller,
4345 ++ urb->transfer_dma))
4346 ++ ret = -EAGAIN;
4347 ++ else
4348 ++ urb->transfer_flags |= URB_DMA_MAP_SINGLE;
4349 ++ }
4350 + } else if (hcd->driver->flags & HCD_LOCAL_MEM) {
4351 + ret = hcd_alloc_coherent(
4352 + urb->dev->bus, mem_flags,
4353 +@@ -1314,55 +1395,16 @@ static int map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb,
4354 + &urb->transfer_buffer,
4355 + urb->transfer_buffer_length,
4356 + dir);
4357 +-
4358 +- if (ret && usb_endpoint_xfer_control(&urb->ep->desc)
4359 +- && !(urb->transfer_flags & URB_NO_SETUP_DMA_MAP))
4360 +- hcd_free_coherent(urb->dev->bus,
4361 +- &urb->setup_dma,
4362 +- (void **)&urb->setup_packet,
4363 +- sizeof(struct usb_ctrlrequest),
4364 +- DMA_TO_DEVICE);
4365 ++ if (ret == 0)
4366 ++ urb->transfer_flags |= URB_MAP_LOCAL;
4367 + }
4368 ++ if (ret && (urb->transfer_flags & (URB_SETUP_MAP_SINGLE |
4369 ++ URB_SETUP_MAP_LOCAL)))
4370 ++ unmap_urb_for_dma(hcd, urb);
4371 + }
4372 + return ret;
4373 + }
4374 +
4375 +-static void unmap_urb_for_dma(struct usb_hcd *hcd, struct urb *urb)
4376 +-{
4377 +- enum dma_data_direction dir;
4378 +-
4379 +- if (is_root_hub(urb->dev))
4380 +- return;
4381 +-
4382 +- if (usb_endpoint_xfer_control(&urb->ep->desc)
4383 +- && !(urb->transfer_flags & URB_NO_SETUP_DMA_MAP)) {
4384 +- if (hcd->self.uses_dma)
4385 +- dma_unmap_single(hcd->self.controller, urb->setup_dma,
4386 +- sizeof(struct usb_ctrlrequest),
4387 +- DMA_TO_DEVICE);
4388 +- else if (hcd->driver->flags & HCD_LOCAL_MEM)
4389 +- hcd_free_coherent(urb->dev->bus, &urb->setup_dma,
4390 +- (void **)&urb->setup_packet,
4391 +- sizeof(struct usb_ctrlrequest),
4392 +- DMA_TO_DEVICE);
4393 +- }
4394 +-
4395 +- dir = usb_urb_dir_in(urb) ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
4396 +- if (urb->transfer_buffer_length != 0
4397 +- && !(urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP)) {
4398 +- if (hcd->self.uses_dma)
4399 +- dma_unmap_single(hcd->self.controller,
4400 +- urb->transfer_dma,
4401 +- urb->transfer_buffer_length,
4402 +- dir);
4403 +- else if (hcd->driver->flags & HCD_LOCAL_MEM)
4404 +- hcd_free_coherent(urb->dev->bus, &urb->transfer_dma,
4405 +- &urb->transfer_buffer,
4406 +- urb->transfer_buffer_length,
4407 +- dir);
4408 +- }
4409 +-}
4410 +-
4411 + /*-------------------------------------------------------------------------*/
4412 +
4413 + /* may be called in any context with a valid urb->dev usecount
4414 +@@ -1391,21 +1433,20 @@ int usb_hcd_submit_urb (struct urb *urb, gfp_t mem_flags)
4415 + * URBs must be submitted in process context with interrupts
4416 + * enabled.
4417 + */
4418 +- status = map_urb_for_dma(hcd, urb, mem_flags);
4419 +- if (unlikely(status)) {
4420 +- usbmon_urb_submit_error(&hcd->self, urb, status);
4421 +- goto error;
4422 +- }
4423 +
4424 +- if (is_root_hub(urb->dev))
4425 ++ if (is_root_hub(urb->dev)) {
4426 + status = rh_urb_enqueue(hcd, urb);
4427 +- else
4428 +- status = hcd->driver->urb_enqueue(hcd, urb, mem_flags);
4429 ++ } else {
4430 ++ status = map_urb_for_dma(hcd, urb, mem_flags);
4431 ++ if (likely(status == 0)) {
4432 ++ status = hcd->driver->urb_enqueue(hcd, urb, mem_flags);
4433 ++ if (unlikely(status))
4434 ++ unmap_urb_for_dma(hcd, urb);
4435 ++ }
4436 ++ }
4437 +
4438 + if (unlikely(status)) {
4439 + usbmon_urb_submit_error(&hcd->self, urb, status);
4440 +- unmap_urb_for_dma(hcd, urb);
4441 +- error:
4442 + urb->hcpriv = NULL;
4443 + INIT_LIST_HEAD(&urb->urb_list);
4444 + atomic_dec(&urb->use_count);
4445 +diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
4446 +index cd22027..794dca2 100644
4447 +--- a/drivers/usb/core/message.c
4448 ++++ b/drivers/usb/core/message.c
4449 +@@ -259,9 +259,6 @@ static void sg_clean(struct usb_sg_request *io)
4450 + kfree(io->urbs);
4451 + io->urbs = NULL;
4452 + }
4453 +- if (io->dev->dev.dma_mask != NULL)
4454 +- usb_buffer_unmap_sg(io->dev, usb_pipein(io->pipe),
4455 +- io->sg, io->nents);
4456 + io->dev = NULL;
4457 + }
4458 +
4459 +@@ -364,7 +361,6 @@ int usb_sg_init(struct usb_sg_request *io, struct usb_device *dev,
4460 + {
4461 + int i;
4462 + int urb_flags;
4463 +- int dma;
4464 + int use_sg;
4465 +
4466 + if (!io || !dev || !sg
4467 +@@ -378,21 +374,9 @@ int usb_sg_init(struct usb_sg_request *io, struct usb_device *dev,
4468 + io->pipe = pipe;
4469 + io->sg = sg;
4470 + io->nents = nents;
4471 +-
4472 +- /* not all host controllers use DMA (like the mainstream pci ones);
4473 +- * they can use PIO (sl811) or be software over another transport.
4474 +- */
4475 +- dma = (dev->dev.dma_mask != NULL);
4476 +- if (dma)
4477 +- io->entries = usb_buffer_map_sg(dev, usb_pipein(pipe),
4478 +- sg, nents);
4479 +- else
4480 +- io->entries = nents;
4481 ++ io->entries = nents;
4482 +
4483 + /* initialize all the urbs we'll use */
4484 +- if (io->entries <= 0)
4485 +- return io->entries;
4486 +-
4487 + if (dev->bus->sg_tablesize > 0) {
4488 + io->urbs = kmalloc(sizeof *io->urbs, mem_flags);
4489 + use_sg = true;
4490 +@@ -404,8 +388,6 @@ int usb_sg_init(struct usb_sg_request *io, struct usb_device *dev,
4491 + goto nomem;
4492 +
4493 + urb_flags = 0;
4494 +- if (dma)
4495 +- urb_flags |= URB_NO_TRANSFER_DMA_MAP;
4496 + if (usb_pipein(pipe))
4497 + urb_flags |= URB_SHORT_NOT_OK;
4498 +
4499 +@@ -423,12 +405,13 @@ int usb_sg_init(struct usb_sg_request *io, struct usb_device *dev,
4500 +
4501 + io->urbs[0]->complete = sg_complete;
4502 + io->urbs[0]->context = io;
4503 ++
4504 + /* A length of zero means transfer the whole sg list */
4505 + io->urbs[0]->transfer_buffer_length = length;
4506 + if (length == 0) {
4507 + for_each_sg(sg, sg, io->entries, i) {
4508 + io->urbs[0]->transfer_buffer_length +=
4509 +- sg_dma_len(sg);
4510 ++ sg->length;
4511 + }
4512 + }
4513 + io->urbs[0]->sg = io;
4514 +@@ -454,26 +437,16 @@ int usb_sg_init(struct usb_sg_request *io, struct usb_device *dev,
4515 + io->urbs[i]->context = io;
4516 +
4517 + /*
4518 +- * Some systems need to revert to PIO when DMA is temporarily
4519 +- * unavailable. For their sakes, both transfer_buffer and
4520 +- * transfer_dma are set when possible.
4521 +- *
4522 +- * Note that if IOMMU coalescing occurred, we cannot
4523 +- * trust sg_page anymore, so check if S/G list shrunk.
4524 ++ * Some systems can't use DMA; they use PIO instead.
4525 ++ * For their sakes, transfer_buffer is set whenever
4526 ++ * possible.
4527 + */
4528 +- if (io->nents == io->entries && !PageHighMem(sg_page(sg)))
4529 ++ if (!PageHighMem(sg_page(sg)))
4530 + io->urbs[i]->transfer_buffer = sg_virt(sg);
4531 + else
4532 + io->urbs[i]->transfer_buffer = NULL;
4533 +
4534 +- if (dma) {
4535 +- io->urbs[i]->transfer_dma = sg_dma_address(sg);
4536 +- len = sg_dma_len(sg);
4537 +- } else {
4538 +- /* hc may use _only_ transfer_buffer */
4539 +- len = sg->length;
4540 +- }
4541 +-
4542 ++ len = sg->length;
4543 + if (length) {
4544 + len = min_t(unsigned, len, length);
4545 + length -= len;
4546 +@@ -481,6 +454,8 @@ int usb_sg_init(struct usb_sg_request *io, struct usb_device *dev,
4547 + io->entries = i + 1;
4548 + }
4549 + io->urbs[i]->transfer_buffer_length = len;
4550 ++
4551 ++ io->urbs[i]->sg = (struct usb_sg_request *) sg;
4552 + }
4553 + io->urbs[--i]->transfer_flags &= ~URB_NO_INTERRUPT;
4554 + }
4555 +diff --git a/drivers/usb/core/urb.c b/drivers/usb/core/urb.c
4556 +index 45a32da..fec46d0 100644
4557 +--- a/drivers/usb/core/urb.c
4558 ++++ b/drivers/usb/core/urb.c
4559 +@@ -333,9 +333,12 @@ int usb_submit_urb(struct urb *urb, gfp_t mem_flags)
4560 + is_out = usb_endpoint_dir_out(&ep->desc);
4561 + }
4562 +
4563 +- /* Cache the direction for later use */
4564 +- urb->transfer_flags = (urb->transfer_flags & ~URB_DIR_MASK) |
4565 +- (is_out ? URB_DIR_OUT : URB_DIR_IN);
4566 ++ /* Clear the internal flags and cache the direction for later use */
4567 ++ urb->transfer_flags &= ~(URB_DIR_MASK | URB_DMA_MAP_SINGLE |
4568 ++ URB_DMA_MAP_PAGE | URB_DMA_MAP_SG | URB_MAP_LOCAL |
4569 ++ URB_SETUP_MAP_SINGLE | URB_SETUP_MAP_LOCAL |
4570 ++ URB_DMA_SG_COMBINED);
4571 ++ urb->transfer_flags |= (is_out ? URB_DIR_OUT : URB_DIR_IN);
4572 +
4573 + if (xfertype != USB_ENDPOINT_XFER_CONTROL &&
4574 + dev->state < USB_STATE_CONFIGURED)
4575 +diff --git a/drivers/usb/core/usb.c b/drivers/usb/core/usb.c
4576 +index 0561430..956108e 100644
4577 +--- a/drivers/usb/core/usb.c
4578 ++++ b/drivers/usb/core/usb.c
4579 +@@ -893,6 +893,7 @@ void usb_buffer_unmap(struct urb *urb)
4580 + EXPORT_SYMBOL_GPL(usb_buffer_unmap);
4581 + #endif /* 0 */
4582 +
4583 ++#if 0
4584 + /**
4585 + * usb_buffer_map_sg - create scatterlist DMA mapping(s) for an endpoint
4586 + * @dev: device to which the scatterlist will be mapped
4587 +@@ -936,6 +937,7 @@ int usb_buffer_map_sg(const struct usb_device *dev, int is_in,
4588 + is_in ? DMA_FROM_DEVICE : DMA_TO_DEVICE) ? : -ENOMEM;
4589 + }
4590 + EXPORT_SYMBOL_GPL(usb_buffer_map_sg);
4591 ++#endif
4592 +
4593 + /* XXX DISABLED, no users currently. If you wish to re-enable this
4594 + * XXX please determine whether the sync is to transfer ownership of
4595 +@@ -972,6 +974,7 @@ void usb_buffer_dmasync_sg(const struct usb_device *dev, int is_in,
4596 + EXPORT_SYMBOL_GPL(usb_buffer_dmasync_sg);
4597 + #endif
4598 +
4599 ++#if 0
4600 + /**
4601 + * usb_buffer_unmap_sg - free DMA mapping(s) for a scatterlist
4602 + * @dev: device to which the scatterlist will be mapped
4603 +@@ -997,6 +1000,7 @@ void usb_buffer_unmap_sg(const struct usb_device *dev, int is_in,
4604 + is_in ? DMA_FROM_DEVICE : DMA_TO_DEVICE);
4605 + }
4606 + EXPORT_SYMBOL_GPL(usb_buffer_unmap_sg);
4607 ++#endif
4608 +
4609 + /* To disable USB, kernel command line is 'nousb' not 'usbcore.nousb' */
4610 + #ifdef MODULE
4611 +diff --git a/drivers/usb/gadget/fsl_udc_core.c b/drivers/usb/gadget/fsl_udc_core.c
4612 +index fa3d142..08a9a62 100644
4613 +--- a/drivers/usb/gadget/fsl_udc_core.c
4614 ++++ b/drivers/usb/gadget/fsl_udc_core.c
4615 +@@ -489,7 +489,7 @@ static int fsl_ep_enable(struct usb_ep *_ep,
4616 + case USB_ENDPOINT_XFER_ISOC:
4617 + /* Calculate transactions needed for high bandwidth iso */
4618 + mult = (unsigned char)(1 + ((max >> 11) & 0x03));
4619 +- max = max & 0x8ff; /* bit 0~10 */
4620 ++ max = max & 0x7ff; /* bit 0~10 */
4621 + /* 3 transactions at most */
4622 + if (mult > 3)
4623 + goto en_done;
4624 +diff --git a/drivers/usb/host/ehci-au1xxx.c b/drivers/usb/host/ehci-au1xxx.c
4625 +index e3a74e7..a422a1b 100644
4626 +--- a/drivers/usb/host/ehci-au1xxx.c
4627 ++++ b/drivers/usb/host/ehci-au1xxx.c
4628 +@@ -215,26 +215,17 @@ static int ehci_hcd_au1xxx_drv_suspend(struct device *dev)
4629 + msleep(10);
4630 +
4631 + /* Root hub was already suspended. Disable irq emission and
4632 +- * mark HW unaccessible, bail out if RH has been resumed. Use
4633 +- * the spinlock to properly synchronize with possible pending
4634 +- * RH suspend or resume activity.
4635 +- *
4636 +- * This is still racy as hcd->state is manipulated outside of
4637 +- * any locks =P But that will be a different fix.
4638 ++ * mark HW unaccessible. The PM and USB cores make sure that
4639 ++ * the root hub is either suspended or stopped.
4640 + */
4641 + spin_lock_irqsave(&ehci->lock, flags);
4642 +- if (hcd->state != HC_STATE_SUSPENDED) {
4643 +- rc = -EINVAL;
4644 +- goto bail;
4645 +- }
4646 ++ ehci_prepare_ports_for_controller_suspend(ehci);
4647 + ehci_writel(ehci, 0, &ehci->regs->intr_enable);
4648 + (void)ehci_readl(ehci, &ehci->regs->intr_enable);
4649 +
4650 + clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
4651 +
4652 + au1xxx_stop_ehc();
4653 +-
4654 +-bail:
4655 + spin_unlock_irqrestore(&ehci->lock, flags);
4656 +
4657 + // could save FLADJ in case of Vaux power loss
4658 +@@ -264,6 +255,7 @@ static int ehci_hcd_au1xxx_drv_resume(struct device *dev)
4659 + if (ehci_readl(ehci, &ehci->regs->configured_flag) == FLAG_CF) {
4660 + int mask = INTR_MASK;
4661 +
4662 ++ ehci_prepare_ports_for_controller_resume(ehci);
4663 + if (!hcd->self.root_hub->do_remote_wakeup)
4664 + mask &= ~STS_PCD;
4665 + ehci_writel(ehci, mask, &ehci->regs->intr_enable);
4666 +diff --git a/drivers/usb/host/ehci-fsl.c b/drivers/usb/host/ehci-fsl.c
4667 +index 0e26aa1..5cd967d 100644
4668 +--- a/drivers/usb/host/ehci-fsl.c
4669 ++++ b/drivers/usb/host/ehci-fsl.c
4670 +@@ -313,6 +313,7 @@ static int ehci_fsl_drv_suspend(struct device *dev)
4671 + struct ehci_fsl *ehci_fsl = hcd_to_ehci_fsl(hcd);
4672 + void __iomem *non_ehci = hcd->regs;
4673 +
4674 ++ ehci_prepare_ports_for_controller_suspend(hcd_to_ehci(hcd));
4675 + if (!fsl_deep_sleep())
4676 + return 0;
4677 +
4678 +@@ -327,6 +328,7 @@ static int ehci_fsl_drv_resume(struct device *dev)
4679 + struct ehci_hcd *ehci = hcd_to_ehci(hcd);
4680 + void __iomem *non_ehci = hcd->regs;
4681 +
4682 ++ ehci_prepare_ports_for_controller_resume(ehci);
4683 + if (!fsl_deep_sleep())
4684 + return 0;
4685 +
4686 +diff --git a/drivers/usb/host/ehci-hub.c b/drivers/usb/host/ehci-hub.c
4687 +index c7178bc..1b2af4d 100644
4688 +--- a/drivers/usb/host/ehci-hub.c
4689 ++++ b/drivers/usb/host/ehci-hub.c
4690 +@@ -106,12 +106,75 @@ static void ehci_handover_companion_ports(struct ehci_hcd *ehci)
4691 + ehci->owned_ports = 0;
4692 + }
4693 +
4694 ++static void ehci_adjust_port_wakeup_flags(struct ehci_hcd *ehci,
4695 ++ bool suspending)
4696 ++{
4697 ++ int port;
4698 ++ u32 temp;
4699 ++
4700 ++ /* If remote wakeup is enabled for the root hub but disabled
4701 ++ * for the controller, we must adjust all the port wakeup flags
4702 ++ * when the controller is suspended or resumed. In all other
4703 ++ * cases they don't need to be changed.
4704 ++ */
4705 ++ if (!ehci_to_hcd(ehci)->self.root_hub->do_remote_wakeup ||
4706 ++ device_may_wakeup(ehci_to_hcd(ehci)->self.controller))
4707 ++ return;
4708 ++
4709 ++ /* clear phy low-power mode before changing wakeup flags */
4710 ++ if (ehci->has_hostpc) {
4711 ++ port = HCS_N_PORTS(ehci->hcs_params);
4712 ++ while (port--) {
4713 ++ u32 __iomem *hostpc_reg;
4714 ++
4715 ++ hostpc_reg = (u32 __iomem *)((u8 *) ehci->regs
4716 ++ + HOSTPC0 + 4 * port);
4717 ++ temp = ehci_readl(ehci, hostpc_reg);
4718 ++ ehci_writel(ehci, temp & ~HOSTPC_PHCD, hostpc_reg);
4719 ++ }
4720 ++ msleep(5);
4721 ++ }
4722 ++
4723 ++ port = HCS_N_PORTS(ehci->hcs_params);
4724 ++ while (port--) {
4725 ++ u32 __iomem *reg = &ehci->regs->port_status[port];
4726 ++ u32 t1 = ehci_readl(ehci, reg) & ~PORT_RWC_BITS;
4727 ++ u32 t2 = t1 & ~PORT_WAKE_BITS;
4728 ++
4729 ++ /* If we are suspending the controller, clear the flags.
4730 ++ * If we are resuming the controller, set the wakeup flags.
4731 ++ */
4732 ++ if (!suspending) {
4733 ++ if (t1 & PORT_CONNECT)
4734 ++ t2 |= PORT_WKOC_E | PORT_WKDISC_E;
4735 ++ else
4736 ++ t2 |= PORT_WKOC_E | PORT_WKCONN_E;
4737 ++ }
4738 ++ ehci_vdbg(ehci, "port %d, %08x -> %08x\n",
4739 ++ port + 1, t1, t2);
4740 ++ ehci_writel(ehci, t2, reg);
4741 ++ }
4742 ++
4743 ++ /* enter phy low-power mode again */
4744 ++ if (ehci->has_hostpc) {
4745 ++ port = HCS_N_PORTS(ehci->hcs_params);
4746 ++ while (port--) {
4747 ++ u32 __iomem *hostpc_reg;
4748 ++
4749 ++ hostpc_reg = (u32 __iomem *)((u8 *) ehci->regs
4750 ++ + HOSTPC0 + 4 * port);
4751 ++ temp = ehci_readl(ehci, hostpc_reg);
4752 ++ ehci_writel(ehci, temp | HOSTPC_PHCD, hostpc_reg);
4753 ++ }
4754 ++ }
4755 ++}
4756 ++
4757 + static int ehci_bus_suspend (struct usb_hcd *hcd)
4758 + {
4759 + struct ehci_hcd *ehci = hcd_to_ehci (hcd);
4760 + int port;
4761 + int mask;
4762 +- u32 __iomem *hostpc_reg = NULL;
4763 ++ int changed;
4764 +
4765 + ehci_dbg(ehci, "suspend root hub\n");
4766 +
4767 +@@ -155,15 +218,13 @@ static int ehci_bus_suspend (struct usb_hcd *hcd)
4768 + */
4769 + ehci->bus_suspended = 0;
4770 + ehci->owned_ports = 0;
4771 ++ changed = 0;
4772 + port = HCS_N_PORTS(ehci->hcs_params);
4773 + while (port--) {
4774 + u32 __iomem *reg = &ehci->regs->port_status [port];
4775 + u32 t1 = ehci_readl(ehci, reg) & ~PORT_RWC_BITS;
4776 +- u32 t2 = t1;
4777 ++ u32 t2 = t1 & ~PORT_WAKE_BITS;
4778 +
4779 +- if (ehci->has_hostpc)
4780 +- hostpc_reg = (u32 __iomem *)((u8 *)ehci->regs
4781 +- + HOSTPC0 + 4 * (port & 0xff));
4782 + /* keep track of which ports we suspend */
4783 + if (t1 & PORT_OWNER)
4784 + set_bit(port, &ehci->owned_ports);
4785 +@@ -172,40 +233,45 @@ static int ehci_bus_suspend (struct usb_hcd *hcd)
4786 + set_bit(port, &ehci->bus_suspended);
4787 + }
4788 +
4789 +- /* enable remote wakeup on all ports */
4790 ++ /* enable remote wakeup on all ports, if told to do so */
4791 + if (hcd->self.root_hub->do_remote_wakeup) {
4792 + /* only enable appropriate wake bits, otherwise the
4793 + * hardware can not go phy low power mode. If a race
4794 + * condition happens here(connection change during bits
4795 + * set), the port change detection will finally fix it.
4796 + */
4797 +- if (t1 & PORT_CONNECT) {
4798 ++ if (t1 & PORT_CONNECT)
4799 + t2 |= PORT_WKOC_E | PORT_WKDISC_E;
4800 +- t2 &= ~PORT_WKCONN_E;
4801 +- } else {
4802 ++ else
4803 + t2 |= PORT_WKOC_E | PORT_WKCONN_E;
4804 +- t2 &= ~PORT_WKDISC_E;
4805 +- }
4806 +- } else
4807 +- t2 &= ~PORT_WAKE_BITS;
4808 ++ }
4809 +
4810 + if (t1 != t2) {
4811 + ehci_vdbg (ehci, "port %d, %08x -> %08x\n",
4812 + port + 1, t1, t2);
4813 + ehci_writel(ehci, t2, reg);
4814 +- if (hostpc_reg) {
4815 +- u32 t3;
4816 ++ changed = 1;
4817 ++ }
4818 ++ }
4819 +
4820 +- spin_unlock_irq(&ehci->lock);
4821 +- msleep(5);/* 5ms for HCD enter low pwr mode */
4822 +- spin_lock_irq(&ehci->lock);
4823 +- t3 = ehci_readl(ehci, hostpc_reg);
4824 +- ehci_writel(ehci, t3 | HOSTPC_PHCD, hostpc_reg);
4825 +- t3 = ehci_readl(ehci, hostpc_reg);
4826 +- ehci_dbg(ehci, "Port%d phy low pwr mode %s\n",
4827 ++ if (changed && ehci->has_hostpc) {
4828 ++ spin_unlock_irq(&ehci->lock);
4829 ++ msleep(5); /* 5 ms for HCD to enter low-power mode */
4830 ++ spin_lock_irq(&ehci->lock);
4831 ++
4832 ++ port = HCS_N_PORTS(ehci->hcs_params);
4833 ++ while (port--) {
4834 ++ u32 __iomem *hostpc_reg;
4835 ++ u32 t3;
4836 ++
4837 ++ hostpc_reg = (u32 __iomem *)((u8 *) ehci->regs
4838 ++ + HOSTPC0 + 4 * port);
4839 ++ t3 = ehci_readl(ehci, hostpc_reg);
4840 ++ ehci_writel(ehci, t3 | HOSTPC_PHCD, hostpc_reg);
4841 ++ t3 = ehci_readl(ehci, hostpc_reg);
4842 ++ ehci_dbg(ehci, "Port %d phy low-power mode %s\n",
4843 + port, (t3 & HOSTPC_PHCD) ?
4844 + "succeeded" : "failed");
4845 +- }
4846 + }
4847 + }
4848 +
4849 +@@ -291,6 +357,25 @@ static int ehci_bus_resume (struct usb_hcd *hcd)
4850 + msleep(8);
4851 + spin_lock_irq(&ehci->lock);
4852 +
4853 ++ /* clear phy low-power mode before resume */
4854 ++ if (ehci->bus_suspended && ehci->has_hostpc) {
4855 ++ i = HCS_N_PORTS(ehci->hcs_params);
4856 ++ while (i--) {
4857 ++ if (test_bit(i, &ehci->bus_suspended)) {
4858 ++ u32 __iomem *hostpc_reg;
4859 ++
4860 ++ hostpc_reg = (u32 __iomem *)((u8 *) ehci->regs
4861 ++ + HOSTPC0 + 4 * i);
4862 ++ temp = ehci_readl(ehci, hostpc_reg);
4863 ++ ehci_writel(ehci, temp & ~HOSTPC_PHCD,
4864 ++ hostpc_reg);
4865 ++ }
4866 ++ }
4867 ++ spin_unlock_irq(&ehci->lock);
4868 ++ msleep(5);
4869 ++ spin_lock_irq(&ehci->lock);
4870 ++ }
4871 ++
4872 + /* manually resume the ports we suspended during bus_suspend() */
4873 + i = HCS_N_PORTS (ehci->hcs_params);
4874 + while (i--) {
4875 +@@ -675,16 +760,25 @@ static int ehci_hub_control (
4876 + goto error;
4877 + if (ehci->no_selective_suspend)
4878 + break;
4879 +- if (temp & PORT_SUSPEND) {
4880 +- if ((temp & PORT_PE) == 0)
4881 +- goto error;
4882 +- /* resume signaling for 20 msec */
4883 +- temp &= ~(PORT_RWC_BITS | PORT_WAKE_BITS);
4884 +- ehci_writel(ehci, temp | PORT_RESUME,
4885 +- status_reg);
4886 +- ehci->reset_done [wIndex] = jiffies
4887 +- + msecs_to_jiffies (20);
4888 ++ if (!(temp & PORT_SUSPEND))
4889 ++ break;
4890 ++ if ((temp & PORT_PE) == 0)
4891 ++ goto error;
4892 ++
4893 ++ /* clear phy low-power mode before resume */
4894 ++ if (hostpc_reg) {
4895 ++ temp1 = ehci_readl(ehci, hostpc_reg);
4896 ++ ehci_writel(ehci, temp1 & ~HOSTPC_PHCD,
4897 ++ hostpc_reg);
4898 ++ spin_unlock_irqrestore(&ehci->lock, flags);
4899 ++ msleep(5);/* wait to leave low-power mode */
4900 ++ spin_lock_irqsave(&ehci->lock, flags);
4901 + }
4902 ++ /* resume signaling for 20 msec */
4903 ++ temp &= ~(PORT_RWC_BITS | PORT_WAKE_BITS);
4904 ++ ehci_writel(ehci, temp | PORT_RESUME, status_reg);
4905 ++ ehci->reset_done[wIndex] = jiffies
4906 ++ + msecs_to_jiffies(20);
4907 + break;
4908 + case USB_PORT_FEAT_C_SUSPEND:
4909 + clear_bit(wIndex, &ehci->port_c_suspend);
4910 +diff --git a/drivers/usb/host/ehci-pci.c b/drivers/usb/host/ehci-pci.c
4911 +index ead5f4f..c5f662d 100644
4912 +--- a/drivers/usb/host/ehci-pci.c
4913 ++++ b/drivers/usb/host/ehci-pci.c
4914 +@@ -284,23 +284,15 @@ static int ehci_pci_suspend(struct usb_hcd *hcd)
4915 + msleep(10);
4916 +
4917 + /* Root hub was already suspended. Disable irq emission and
4918 +- * mark HW unaccessible, bail out if RH has been resumed. Use
4919 +- * the spinlock to properly synchronize with possible pending
4920 +- * RH suspend or resume activity.
4921 +- *
4922 +- * This is still racy as hcd->state is manipulated outside of
4923 +- * any locks =P But that will be a different fix.
4924 ++ * mark HW unaccessible. The PM and USB cores make sure that
4925 ++ * the root hub is either suspended or stopped.
4926 + */
4927 + spin_lock_irqsave (&ehci->lock, flags);
4928 +- if (hcd->state != HC_STATE_SUSPENDED) {
4929 +- rc = -EINVAL;
4930 +- goto bail;
4931 +- }
4932 ++ ehci_prepare_ports_for_controller_suspend(ehci);
4933 + ehci_writel(ehci, 0, &ehci->regs->intr_enable);
4934 + (void)ehci_readl(ehci, &ehci->regs->intr_enable);
4935 +
4936 + clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
4937 +- bail:
4938 + spin_unlock_irqrestore (&ehci->lock, flags);
4939 +
4940 + // could save FLADJ in case of Vaux power loss
4941 +@@ -330,6 +322,7 @@ static int ehci_pci_resume(struct usb_hcd *hcd, bool hibernated)
4942 + !hibernated) {
4943 + int mask = INTR_MASK;
4944 +
4945 ++ ehci_prepare_ports_for_controller_resume(ehci);
4946 + if (!hcd->self.root_hub->do_remote_wakeup)
4947 + mask &= ~STS_PCD;
4948 + ehci_writel(ehci, mask, &ehci->regs->intr_enable);
4949 +diff --git a/drivers/usb/host/ehci.h b/drivers/usb/host/ehci.h
4950 +index 556c0b4..ddf61c3 100644
4951 +--- a/drivers/usb/host/ehci.h
4952 ++++ b/drivers/usb/host/ehci.h
4953 +@@ -536,6 +536,16 @@ struct ehci_fstn {
4954 +
4955 + /*-------------------------------------------------------------------------*/
4956 +
4957 ++/* Prepare the PORTSC wakeup flags during controller suspend/resume */
4958 ++
4959 ++#define ehci_prepare_ports_for_controller_suspend(ehci) \
4960 ++ ehci_adjust_port_wakeup_flags(ehci, true);
4961 ++
4962 ++#define ehci_prepare_ports_for_controller_resume(ehci) \
4963 ++ ehci_adjust_port_wakeup_flags(ehci, false);
4964 ++
4965 ++/*-------------------------------------------------------------------------*/
4966 ++
4967 + #ifdef CONFIG_USB_EHCI_ROOT_HUB_TT
4968 +
4969 + /*
4970 +diff --git a/drivers/usb/host/fhci.h b/drivers/usb/host/fhci.h
4971 +index 72dae1c..3b6e864 100644
4972 +--- a/drivers/usb/host/fhci.h
4973 ++++ b/drivers/usb/host/fhci.h
4974 +@@ -20,6 +20,7 @@
4975 +
4976 + #include <linux/kernel.h>
4977 + #include <linux/types.h>
4978 ++#include <linux/bug.h>
4979 + #include <linux/spinlock.h>
4980 + #include <linux/interrupt.h>
4981 + #include <linux/kfifo.h>
4982 +@@ -515,9 +516,13 @@ static inline int cq_put(struct kfifo *kfifo, void *p)
4983 +
4984 + static inline void *cq_get(struct kfifo *kfifo)
4985 + {
4986 +- void *p = NULL;
4987 ++ unsigned int sz;
4988 ++ void *p;
4989 ++
4990 ++ sz = kfifo_out(kfifo, (void *)&p, sizeof(p));
4991 ++ if (sz != sizeof(p))
4992 ++ return NULL;
4993 +
4994 +- kfifo_out(kfifo, (void *)&p, sizeof(p));
4995 + return p;
4996 + }
4997 +
4998 +diff --git a/drivers/usb/host/whci/qset.c b/drivers/usb/host/whci/qset.c
4999 +index 141d049..b388dd1 100644
5000 +--- a/drivers/usb/host/whci/qset.c
5001 ++++ b/drivers/usb/host/whci/qset.c
5002 +@@ -646,7 +646,7 @@ int qset_add_urb(struct whc *whc, struct whc_qset *qset, struct urb *urb,
5003 + wurb->urb = urb;
5004 + INIT_WORK(&wurb->dequeue_work, urb_dequeue_work);
5005 +
5006 +- if (urb->sg) {
5007 ++ if (urb->num_sgs) {
5008 + ret = qset_add_urb_sg(whc, qset, urb, mem_flags);
5009 + if (ret == -EINVAL) {
5010 + qset_free_stds(qset, urb);
5011 +diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
5012 +index 417d37a..98a73cd 100644
5013 +--- a/drivers/usb/host/xhci-pci.c
5014 ++++ b/drivers/usb/host/xhci-pci.c
5015 +@@ -54,7 +54,7 @@ static int xhci_pci_setup(struct usb_hcd *hcd)
5016 + struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
5017 + int retval;
5018 +
5019 +- hcd->self.sg_tablesize = TRBS_PER_SEGMENT - 1;
5020 ++ hcd->self.sg_tablesize = TRBS_PER_SEGMENT - 2;
5021 +
5022 + xhci->cap_regs = hcd->regs;
5023 + xhci->op_regs = hcd->regs +
5024 +diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
5025 +index 85d7e8f..40cba25 100644
5026 +--- a/drivers/usb/host/xhci-ring.c
5027 ++++ b/drivers/usb/host/xhci-ring.c
5028 +@@ -242,10 +242,27 @@ static int room_on_ring(struct xhci_hcd *xhci, struct xhci_ring *ring,
5029 + int i;
5030 + union xhci_trb *enq = ring->enqueue;
5031 + struct xhci_segment *enq_seg = ring->enq_seg;
5032 ++ struct xhci_segment *cur_seg;
5033 ++ unsigned int left_on_ring;
5034 +
5035 + /* Check if ring is empty */
5036 +- if (enq == ring->dequeue)
5037 ++ if (enq == ring->dequeue) {
5038 ++ /* Can't use link trbs */
5039 ++ left_on_ring = TRBS_PER_SEGMENT - 1;
5040 ++ for (cur_seg = enq_seg->next; cur_seg != enq_seg;
5041 ++ cur_seg = cur_seg->next)
5042 ++ left_on_ring += TRBS_PER_SEGMENT - 1;
5043 ++
5044 ++ /* Always need one TRB free in the ring. */
5045 ++ left_on_ring -= 1;
5046 ++ if (num_trbs > left_on_ring) {
5047 ++ xhci_warn(xhci, "Not enough room on ring; "
5048 ++ "need %u TRBs, %u TRBs left\n",
5049 ++ num_trbs, left_on_ring);
5050 ++ return 0;
5051 ++ }
5052 + return 1;
5053 ++ }
5054 + /* Make sure there's an extra empty TRB available */
5055 + for (i = 0; i <= num_trbs; ++i) {
5056 + if (enq == ring->dequeue)
5057 +@@ -334,7 +351,8 @@ static struct xhci_segment *find_trb_seg(
5058 + while (cur_seg->trbs > trb ||
5059 + &cur_seg->trbs[TRBS_PER_SEGMENT - 1] < trb) {
5060 + generic_trb = &cur_seg->trbs[TRBS_PER_SEGMENT - 1].generic;
5061 +- if (TRB_TYPE(generic_trb->field[3]) == TRB_LINK &&
5062 ++ if ((generic_trb->field[3] & TRB_TYPE_BITMASK) ==
5063 ++ TRB_TYPE(TRB_LINK) &&
5064 + (generic_trb->field[3] & LINK_TOGGLE))
5065 + *cycle_state = ~(*cycle_state) & 0x1;
5066 + cur_seg = cur_seg->next;
5067 +@@ -390,7 +408,7 @@ void xhci_find_new_dequeue_state(struct xhci_hcd *xhci,
5068 + BUG();
5069 +
5070 + trb = &state->new_deq_ptr->generic;
5071 +- if (TRB_TYPE(trb->field[3]) == TRB_LINK &&
5072 ++ if ((trb->field[3] & TRB_TYPE_BITMASK) == TRB_TYPE(TRB_LINK) &&
5073 + (trb->field[3] & LINK_TOGGLE))
5074 + state->new_cycle_state = ~(state->new_cycle_state) & 0x1;
5075 + next_trb(xhci, ep_ring, &state->new_deq_seg, &state->new_deq_ptr);
5076 +@@ -578,6 +596,8 @@ static void handle_stopped_endpoint(struct xhci_hcd *xhci,
5077 + /* Otherwise just ring the doorbell to restart the ring */
5078 + ring_ep_doorbell(xhci, slot_id, ep_index);
5079 + }
5080 ++ ep->stopped_td = NULL;
5081 ++ ep->stopped_trb = NULL;
5082 +
5083 + /*
5084 + * Drop the lock and complete the URBs in the cancelled TD list.
5085 +@@ -1061,8 +1081,13 @@ static void xhci_cleanup_halted_endpoint(struct xhci_hcd *xhci,
5086 + ep->ep_state |= EP_HALTED;
5087 + ep->stopped_td = td;
5088 + ep->stopped_trb = event_trb;
5089 ++
5090 + xhci_queue_reset_ep(xhci, slot_id, ep_index);
5091 + xhci_cleanup_stalled_ring(xhci, td->urb->dev, ep_index);
5092 ++
5093 ++ ep->stopped_td = NULL;
5094 ++ ep->stopped_trb = NULL;
5095 ++
5096 + xhci_ring_cmd_db(xhci);
5097 + }
5098 +
5099 +@@ -1390,8 +1415,10 @@ static int handle_tx_event(struct xhci_hcd *xhci,
5100 + for (cur_trb = ep_ring->dequeue, cur_seg = ep_ring->deq_seg;
5101 + cur_trb != event_trb;
5102 + next_trb(xhci, ep_ring, &cur_seg, &cur_trb)) {
5103 +- if (TRB_TYPE(cur_trb->generic.field[3]) != TRB_TR_NOOP &&
5104 +- TRB_TYPE(cur_trb->generic.field[3]) != TRB_LINK)
5105 ++ if ((cur_trb->generic.field[3] &
5106 ++ TRB_TYPE_BITMASK) != TRB_TYPE(TRB_TR_NOOP) &&
5107 ++ (cur_trb->generic.field[3] &
5108 ++ TRB_TYPE_BITMASK) != TRB_TYPE(TRB_LINK))
5109 + td->urb->actual_length +=
5110 + TRB_LEN(cur_trb->generic.field[2]);
5111 + }
5112 +@@ -1938,7 +1965,7 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
5113 + int running_total, trb_buff_len, ret;
5114 + u64 addr;
5115 +
5116 +- if (urb->sg)
5117 ++ if (urb->num_sgs)
5118 + return queue_bulk_sg_tx(xhci, mem_flags, urb, slot_id, ep_index);
5119 +
5120 + ep_ring = xhci->devs[slot_id]->eps[ep_index].ring;
5121 +diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
5122 +index 7e42772..5a752d6 100644
5123 +--- a/drivers/usb/host/xhci.c
5124 ++++ b/drivers/usb/host/xhci.c
5125 +@@ -105,6 +105,33 @@ int xhci_halt(struct xhci_hcd *xhci)
5126 + }
5127 +
5128 + /*
5129 ++ * Set the run bit and wait for the host to be running.
5130 ++ */
5131 ++int xhci_start(struct xhci_hcd *xhci)
5132 ++{
5133 ++ u32 temp;
5134 ++ int ret;
5135 ++
5136 ++ temp = xhci_readl(xhci, &xhci->op_regs->command);
5137 ++ temp |= (CMD_RUN);
5138 ++ xhci_dbg(xhci, "// Turn on HC, cmd = 0x%x.\n",
5139 ++ temp);
5140 ++ xhci_writel(xhci, temp, &xhci->op_regs->command);
5141 ++
5142 ++ /*
5143 ++ * Wait for the HCHalted Status bit to be 0 to indicate the host is
5144 ++ * running.
5145 ++ */
5146 ++ ret = handshake(xhci, &xhci->op_regs->status,
5147 ++ STS_HALT, 0, XHCI_MAX_HALT_USEC);
5148 ++ if (ret == -ETIMEDOUT)
5149 ++ xhci_err(xhci, "Host took too long to start, "
5150 ++ "waited %u microseconds.\n",
5151 ++ XHCI_MAX_HALT_USEC);
5152 ++ return ret;
5153 ++}
5154 ++
5155 ++/*
5156 + * Reset a halted HC, and set the internal HC state to HC_STATE_HALT.
5157 + *
5158 + * This resets pipelines, timers, counters, state machines, etc.
5159 +@@ -115,6 +142,7 @@ int xhci_reset(struct xhci_hcd *xhci)
5160 + {
5161 + u32 command;
5162 + u32 state;
5163 ++ int ret;
5164 +
5165 + state = xhci_readl(xhci, &xhci->op_regs->status);
5166 + if ((state & STS_HALT) == 0) {
5167 +@@ -129,7 +157,17 @@ int xhci_reset(struct xhci_hcd *xhci)
5168 + /* XXX: Why does EHCI set this here? Shouldn't other code do this? */
5169 + xhci_to_hcd(xhci)->state = HC_STATE_HALT;
5170 +
5171 +- return handshake(xhci, &xhci->op_regs->command, CMD_RESET, 0, 250 * 1000);
5172 ++ ret = handshake(xhci, &xhci->op_regs->command,
5173 ++ CMD_RESET, 0, 250 * 1000);
5174 ++ if (ret)
5175 ++ return ret;
5176 ++
5177 ++ xhci_dbg(xhci, "Wait for controller to be ready for doorbell rings\n");
5178 ++ /*
5179 ++ * xHCI cannot write to any doorbells or operational registers other
5180 ++ * than status until the "Controller Not Ready" flag is cleared.
5181 ++ */
5182 ++ return handshake(xhci, &xhci->op_regs->status, STS_CNR, 0, 250 * 1000);
5183 + }
5184 +
5185 +
5186 +@@ -452,13 +490,11 @@ int xhci_run(struct usb_hcd *hcd)
5187 + if (NUM_TEST_NOOPS > 0)
5188 + doorbell = xhci_setup_one_noop(xhci);
5189 +
5190 +- temp = xhci_readl(xhci, &xhci->op_regs->command);
5191 +- temp |= (CMD_RUN);
5192 +- xhci_dbg(xhci, "// Turn on HC, cmd = 0x%x.\n",
5193 +- temp);
5194 +- xhci_writel(xhci, temp, &xhci->op_regs->command);
5195 +- /* Flush PCI posted writes */
5196 +- temp = xhci_readl(xhci, &xhci->op_regs->command);
5197 ++ if (xhci_start(xhci)) {
5198 ++ xhci_halt(xhci);
5199 ++ return -ENODEV;
5200 ++ }
5201 ++
5202 + xhci_dbg(xhci, "// @%p = 0x%x\n", &xhci->op_regs->command, temp);
5203 + if (doorbell)
5204 + (*doorbell)(xhci);
5205 +@@ -1438,6 +1474,8 @@ void xhci_endpoint_reset(struct usb_hcd *hcd,
5206 + kfree(virt_ep->stopped_td);
5207 + xhci_ring_cmd_db(xhci);
5208 + }
5209 ++ virt_ep->stopped_td = NULL;
5210 ++ virt_ep->stopped_trb = NULL;
5211 + spin_unlock_irqrestore(&xhci->lock, flags);
5212 +
5213 + if (ret)
5214 +diff --git a/drivers/usb/mon/mon_bin.c b/drivers/usb/mon/mon_bin.c
5215 +index ddf7f9a..8a7968d 100644
5216 +--- a/drivers/usb/mon/mon_bin.c
5217 ++++ b/drivers/usb/mon/mon_bin.c
5218 +@@ -416,7 +416,7 @@ static unsigned int mon_bin_get_data(const struct mon_reader_bin *rp,
5219 +
5220 + } else {
5221 + /* If IOMMU coalescing occurred, we cannot trust sg_page */
5222 +- if (urb->sg->nents != urb->num_sgs) {
5223 ++ if (urb->transfer_flags & URB_DMA_SG_COMBINED) {
5224 + *flag = 'D';
5225 + return length;
5226 + }
5227 +diff --git a/drivers/usb/mon/mon_text.c b/drivers/usb/mon/mon_text.c
5228 +index 4d0be13..d562602 100644
5229 +--- a/drivers/usb/mon/mon_text.c
5230 ++++ b/drivers/usb/mon/mon_text.c
5231 +@@ -161,9 +161,7 @@ static inline char mon_text_get_data(struct mon_event_text *ep, struct urb *urb,
5232 + } else {
5233 + struct scatterlist *sg = urb->sg->sg;
5234 +
5235 +- /* If IOMMU coalescing occurred, we cannot trust sg_page */
5236 +- if (urb->sg->nents != urb->num_sgs ||
5237 +- PageHighMem(sg_page(sg)))
5238 ++ if (PageHighMem(sg_page(sg)))
5239 + return 'D';
5240 +
5241 + /* For the text interface we copy only the first sg buffer */
5242 +diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
5243 +index ec9b044..009e26c 100644
5244 +--- a/drivers/usb/serial/cp210x.c
5245 ++++ b/drivers/usb/serial/cp210x.c
5246 +@@ -61,6 +61,8 @@ static const struct usb_device_id id_table[] = {
5247 + { USB_DEVICE(0x0745, 0x1000) }, /* CipherLab USB CCD Barcode Scanner 1000 */
5248 + { USB_DEVICE(0x08e6, 0x5501) }, /* Gemalto Prox-PU/CU contactless smartcard reader */
5249 + { USB_DEVICE(0x08FD, 0x000A) }, /* Digianswer A/S , ZigBee/802.15.4 MAC Device */
5250 ++ { USB_DEVICE(0x0BED, 0x1100) }, /* MEI (TM) Cashflow-SC Bill/Voucher Acceptor */
5251 ++ { USB_DEVICE(0x0BED, 0x1101) }, /* MEI series 2000 Combo Acceptor */
5252 + { USB_DEVICE(0x0FCF, 0x1003) }, /* Dynastream ANT development board */
5253 + { USB_DEVICE(0x0FCF, 0x1004) }, /* Dynastream ANT2USB */
5254 + { USB_DEVICE(0x0FCF, 0x1006) }, /* Dynastream ANT development board */
5255 +@@ -72,9 +74,12 @@ static const struct usb_device_id id_table[] = {
5256 + { USB_DEVICE(0x10C4, 0x1601) }, /* Arkham Technology DS101 Adapter */
5257 + { USB_DEVICE(0x10C4, 0x800A) }, /* SPORTident BSM7-D-USB main station */
5258 + { USB_DEVICE(0x10C4, 0x803B) }, /* Pololu USB-serial converter */
5259 ++ { USB_DEVICE(0x10C4, 0x8044) }, /* Cygnal Debug Adapter */
5260 ++ { USB_DEVICE(0x10C4, 0x804E) }, /* Software Bisque Paramount ME build-in converter */
5261 + { USB_DEVICE(0x10C4, 0x8053) }, /* Enfora EDG1228 */
5262 + { USB_DEVICE(0x10C4, 0x8054) }, /* Enfora GSM2228 */
5263 + { USB_DEVICE(0x10C4, 0x8066) }, /* Argussoft In-System Programmer */
5264 ++ { USB_DEVICE(0x10C4, 0x806F) }, /* IMS USB to RS422 Converter Cable */
5265 + { USB_DEVICE(0x10C4, 0x807A) }, /* Crumb128 board */
5266 + { USB_DEVICE(0x10C4, 0x80CA) }, /* Degree Controls Inc */
5267 + { USB_DEVICE(0x10C4, 0x80DD) }, /* Tracient RFID */
5268 +@@ -82,12 +87,15 @@ static const struct usb_device_id id_table[] = {
5269 + { USB_DEVICE(0x10C4, 0x8115) }, /* Arygon NFC/Mifare Reader */
5270 + { USB_DEVICE(0x10C4, 0x813D) }, /* Burnside Telecom Deskmobile */
5271 + { USB_DEVICE(0x10C4, 0x813F) }, /* Tams Master Easy Control */
5272 ++ { USB_DEVICE(0x10C4, 0x8149) }, /* West Mountain Radio Computerized Battery Analyzer */
5273 + { USB_DEVICE(0x10C4, 0x814A) }, /* West Mountain Radio RIGblaster P&P */
5274 + { USB_DEVICE(0x10C4, 0x814B) }, /* West Mountain Radio RIGtalk */
5275 + { USB_DEVICE(0x10C4, 0x815E) }, /* Helicomm IP-Link 1220-DVM */
5276 ++ { USB_DEVICE(0x10C4, 0x818B) }, /* AVIT Research USB to TTL */
5277 + { USB_DEVICE(0x10C4, 0x819F) }, /* MJS USB Toslink Switcher */
5278 + { USB_DEVICE(0x10C4, 0x81A6) }, /* ThinkOptics WavIt */
5279 + { USB_DEVICE(0x10C4, 0x81AC) }, /* MSD Dash Hawk */
5280 ++ { USB_DEVICE(0x10C4, 0x81AD) }, /* INSYS USB Modem */
5281 + { USB_DEVICE(0x10C4, 0x81C8) }, /* Lipowsky Industrie Elektronik GmbH, Baby-JTAG */
5282 + { USB_DEVICE(0x10C4, 0x81E2) }, /* Lipowsky Industrie Elektronik GmbH, Baby-LIN */
5283 + { USB_DEVICE(0x10C4, 0x81E7) }, /* Aerocomm Radio */
5284 +@@ -105,6 +113,7 @@ static const struct usb_device_id id_table[] = {
5285 + { USB_DEVICE(0x10C4, 0x846E) }, /* BEI USB Sensor Interface (VCP) */
5286 + { USB_DEVICE(0x10C4, 0xEA60) }, /* Silicon Labs factory default */
5287 + { USB_DEVICE(0x10C4, 0xEA61) }, /* Silicon Labs factory default */
5288 ++ { USB_DEVICE(0x10C4, 0xEA71) }, /* Infinity GPS-MIC-1 Radio Monophone */
5289 + { USB_DEVICE(0x10C4, 0xF001) }, /* Elan Digital Systems USBscope50 */
5290 + { USB_DEVICE(0x10C4, 0xF002) }, /* Elan Digital Systems USBwave12 */
5291 + { USB_DEVICE(0x10C4, 0xF003) }, /* Elan Digital Systems USBpulse100 */
5292 +@@ -115,6 +124,8 @@ static const struct usb_device_id id_table[] = {
5293 + { USB_DEVICE(0x1555, 0x0004) }, /* Owen AC4 USB-RS485 Converter */
5294 + { USB_DEVICE(0x166A, 0x0303) }, /* Clipsal 5500PCU C-Bus USB interface */
5295 + { USB_DEVICE(0x16D6, 0x0001) }, /* Jablotron serial interface */
5296 ++ { USB_DEVICE(0x17F4, 0xAAAA) }, /* Wavesense Jazz blood glucose meter */
5297 ++ { USB_DEVICE(0x1843, 0x0200) }, /* Vaisala USB Instrument Cable */
5298 + { USB_DEVICE(0x18EF, 0xE00F) }, /* ELV USB-I2C-Interface */
5299 + { USB_DEVICE(0x413C, 0x9500) }, /* DW700 GPS USB interface */
5300 + { } /* Terminating Entry */
5301 +diff --git a/drivers/usb/serial/cypress_m8.c b/drivers/usb/serial/cypress_m8.c
5302 +index e23c779..582e832 100644
5303 +--- a/drivers/usb/serial/cypress_m8.c
5304 ++++ b/drivers/usb/serial/cypress_m8.c
5305 +@@ -1309,7 +1309,7 @@ static void cypress_read_int_callback(struct urb *urb)
5306 + /* process read if there is data other than line status */
5307 + if (tty && bytes > i) {
5308 + tty_insert_flip_string_fixed_flag(tty, data + i,
5309 +- bytes - i, tty_flag);
5310 ++ tty_flag, bytes - i);
5311 + tty_flip_buffer_push(tty);
5312 + }
5313 +
5314 +diff --git a/drivers/usb/serial/digi_acceleport.c b/drivers/usb/serial/digi_acceleport.c
5315 +index 68b0aa5..3edda3e 100644
5316 +--- a/drivers/usb/serial/digi_acceleport.c
5317 ++++ b/drivers/usb/serial/digi_acceleport.c
5318 +@@ -1703,8 +1703,8 @@ static int digi_read_inb_callback(struct urb *urb)
5319 + /* data length is len-1 (one byte of len is port_status) */
5320 + --len;
5321 + if (len > 0) {
5322 +- tty_insert_flip_string_fixed_flag(tty, data, len,
5323 +- flag);
5324 ++ tty_insert_flip_string_fixed_flag(tty, data, flag,
5325 ++ len);
5326 + tty_flip_buffer_push(tty);
5327 + }
5328 + }
5329 +diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
5330 +index 1d7c4fa..3f5676e 100644
5331 +--- a/drivers/usb/serial/ftdi_sio.c
5332 ++++ b/drivers/usb/serial/ftdi_sio.c
5333 +@@ -2289,6 +2289,8 @@ static void ftdi_set_termios(struct tty_struct *tty,
5334 + "urb failed to set to rts/cts flow control\n");
5335 + }
5336 +
5337 ++ /* raise DTR/RTS */
5338 ++ set_mctrl(port, TIOCM_DTR | TIOCM_RTS);
5339 + } else {
5340 + /*
5341 + * Xon/Xoff code
5342 +@@ -2336,6 +2338,8 @@ static void ftdi_set_termios(struct tty_struct *tty,
5343 + }
5344 + }
5345 +
5346 ++ /* lower DTR/RTS */
5347 ++ clear_mctrl(port, TIOCM_DTR | TIOCM_RTS);
5348 + }
5349 + return;
5350 + }
5351 +diff --git a/drivers/usb/serial/ir-usb.c b/drivers/usb/serial/ir-usb.c
5352 +index 4a0f519..71bdbe0 100644
5353 +--- a/drivers/usb/serial/ir-usb.c
5354 ++++ b/drivers/usb/serial/ir-usb.c
5355 +@@ -312,6 +312,7 @@ static int ir_open(struct tty_struct *tty, struct usb_serial_port *port)
5356 + kfree(port->read_urb->transfer_buffer);
5357 + port->read_urb->transfer_buffer = buffer;
5358 + port->read_urb->transfer_buffer_length = buffer_size;
5359 ++ port->bulk_in_buffer = buffer;
5360 +
5361 + buffer = kmalloc(buffer_size, GFP_KERNEL);
5362 + if (!buffer) {
5363 +@@ -321,6 +322,7 @@ static int ir_open(struct tty_struct *tty, struct usb_serial_port *port)
5364 + kfree(port->write_urb->transfer_buffer);
5365 + port->write_urb->transfer_buffer = buffer;
5366 + port->write_urb->transfer_buffer_length = buffer_size;
5367 ++ port->bulk_out_buffer = buffer;
5368 + port->bulk_out_size = buffer_size;
5369 + }
5370 +
5371 +diff --git a/drivers/usb/serial/kl5kusb105.c b/drivers/usb/serial/kl5kusb105.c
5372 +index 8eef91b..cc0ba38 100644
5373 +--- a/drivers/usb/serial/kl5kusb105.c
5374 ++++ b/drivers/usb/serial/kl5kusb105.c
5375 +@@ -321,6 +321,7 @@ err_cleanup:
5376 + usb_free_urb(priv->write_urb_pool[j]);
5377 + }
5378 + }
5379 ++ kfree(priv);
5380 + usb_set_serial_port_data(serial->port[i], NULL);
5381 + }
5382 + return -ENOMEM;
5383 +diff --git a/drivers/usb/serial/kobil_sct.c b/drivers/usb/serial/kobil_sct.c
5384 +index c113a2a..bd5bd85 100644
5385 +--- a/drivers/usb/serial/kobil_sct.c
5386 ++++ b/drivers/usb/serial/kobil_sct.c
5387 +@@ -345,7 +345,8 @@ static void kobil_close(struct usb_serial_port *port)
5388 +
5389 + /* FIXME: Add rts/dtr methods */
5390 + if (port->write_urb) {
5391 +- usb_kill_urb(port->write_urb);
5392 ++ usb_poison_urb(port->write_urb);
5393 ++ kfree(port->write_urb->transfer_buffer);
5394 + usb_free_urb(port->write_urb);
5395 + port->write_urb = NULL;
5396 + }
5397 +diff --git a/drivers/usb/serial/mos7840.c b/drivers/usb/serial/mos7840.c
5398 +index 2fda1c0..68f6a1d 100644
5399 +--- a/drivers/usb/serial/mos7840.c
5400 ++++ b/drivers/usb/serial/mos7840.c
5401 +@@ -731,7 +731,6 @@ static void mos7840_bulk_in_callback(struct urb *urb)
5402 + mos7840_port = urb->context;
5403 + if (!mos7840_port) {
5404 + dbg("%s", "NULL mos7840_port pointer");
5405 +- mos7840_port->read_urb_busy = false;
5406 + return;
5407 + }
5408 +
5409 +diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
5410 +index 84d0eda..8b2e612 100644
5411 +--- a/drivers/usb/serial/option.c
5412 ++++ b/drivers/usb/serial/option.c
5413 +@@ -380,6 +380,10 @@ static int option_resume(struct usb_serial *serial);
5414 +
5415 + #define CINTERION_VENDOR_ID 0x0681
5416 +
5417 ++/* Olivetti products */
5418 ++#define OLIVETTI_VENDOR_ID 0x0b3c
5419 ++#define OLIVETTI_PRODUCT_OLICARD100 0xc000
5420 ++
5421 + /* some devices interfaces need special handling due to a number of reasons */
5422 + enum option_blacklist_reason {
5423 + OPTION_BLACKLIST_NONE = 0,
5424 +@@ -675,6 +679,180 @@ static const struct usb_device_id option_ids[] = {
5425 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0160, 0xff, 0xff, 0xff) },
5426 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0161, 0xff, 0xff, 0xff) },
5427 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0162, 0xff, 0xff, 0xff) },
5428 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1008, 0xff, 0xff, 0xff) },
5429 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1010, 0xff, 0xff, 0xff) },
5430 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1012, 0xff, 0xff, 0xff) },
5431 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1057, 0xff, 0xff, 0xff) },
5432 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1058, 0xff, 0xff, 0xff) },
5433 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1059, 0xff, 0xff, 0xff) },
5434 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1060, 0xff, 0xff, 0xff) },
5435 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1061, 0xff, 0xff, 0xff) },
5436 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1062, 0xff, 0xff, 0xff) },
5437 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1063, 0xff, 0xff, 0xff) },
5438 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1064, 0xff, 0xff, 0xff) },
5439 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1065, 0xff, 0xff, 0xff) },
5440 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1066, 0xff, 0xff, 0xff) },
5441 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1067, 0xff, 0xff, 0xff) },
5442 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1068, 0xff, 0xff, 0xff) },
5443 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1069, 0xff, 0xff, 0xff) },
5444 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1070, 0xff, 0xff, 0xff) },
5445 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1071, 0xff, 0xff, 0xff) },
5446 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1072, 0xff, 0xff, 0xff) },
5447 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1073, 0xff, 0xff, 0xff) },
5448 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1074, 0xff, 0xff, 0xff) },
5449 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1075, 0xff, 0xff, 0xff) },
5450 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1076, 0xff, 0xff, 0xff) },
5451 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1077, 0xff, 0xff, 0xff) },
5452 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1078, 0xff, 0xff, 0xff) },
5453 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1079, 0xff, 0xff, 0xff) },
5454 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1080, 0xff, 0xff, 0xff) },
5455 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1081, 0xff, 0xff, 0xff) },
5456 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1082, 0xff, 0xff, 0xff) },
5457 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1083, 0xff, 0xff, 0xff) },
5458 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1084, 0xff, 0xff, 0xff) },
5459 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1085, 0xff, 0xff, 0xff) },
5460 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1086, 0xff, 0xff, 0xff) },
5461 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1087, 0xff, 0xff, 0xff) },
5462 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1088, 0xff, 0xff, 0xff) },
5463 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1089, 0xff, 0xff, 0xff) },
5464 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1090, 0xff, 0xff, 0xff) },
5465 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1091, 0xff, 0xff, 0xff) },
5466 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1092, 0xff, 0xff, 0xff) },
5467 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1093, 0xff, 0xff, 0xff) },
5468 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1094, 0xff, 0xff, 0xff) },
5469 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1095, 0xff, 0xff, 0xff) },
5470 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1096, 0xff, 0xff, 0xff) },
5471 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1097, 0xff, 0xff, 0xff) },
5472 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1098, 0xff, 0xff, 0xff) },
5473 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1099, 0xff, 0xff, 0xff) },
5474 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1100, 0xff, 0xff, 0xff) },
5475 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1101, 0xff, 0xff, 0xff) },
5476 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1102, 0xff, 0xff, 0xff) },
5477 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1103, 0xff, 0xff, 0xff) },
5478 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1104, 0xff, 0xff, 0xff) },
5479 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1105, 0xff, 0xff, 0xff) },
5480 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1106, 0xff, 0xff, 0xff) },
5481 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1107, 0xff, 0xff, 0xff) },
5482 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1108, 0xff, 0xff, 0xff) },
5483 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1109, 0xff, 0xff, 0xff) },
5484 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1110, 0xff, 0xff, 0xff) },
5485 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1111, 0xff, 0xff, 0xff) },
5486 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1112, 0xff, 0xff, 0xff) },
5487 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1113, 0xff, 0xff, 0xff) },
5488 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1114, 0xff, 0xff, 0xff) },
5489 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1115, 0xff, 0xff, 0xff) },
5490 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1116, 0xff, 0xff, 0xff) },
5491 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1117, 0xff, 0xff, 0xff) },
5492 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1118, 0xff, 0xff, 0xff) },
5493 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1119, 0xff, 0xff, 0xff) },
5494 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1120, 0xff, 0xff, 0xff) },
5495 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1121, 0xff, 0xff, 0xff) },
5496 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1122, 0xff, 0xff, 0xff) },
5497 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1123, 0xff, 0xff, 0xff) },
5498 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1124, 0xff, 0xff, 0xff) },
5499 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1125, 0xff, 0xff, 0xff) },
5500 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1126, 0xff, 0xff, 0xff) },
5501 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1127, 0xff, 0xff, 0xff) },
5502 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1128, 0xff, 0xff, 0xff) },
5503 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1129, 0xff, 0xff, 0xff) },
5504 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1130, 0xff, 0xff, 0xff) },
5505 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1131, 0xff, 0xff, 0xff) },
5506 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1132, 0xff, 0xff, 0xff) },
5507 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1133, 0xff, 0xff, 0xff) },
5508 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1134, 0xff, 0xff, 0xff) },
5509 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1135, 0xff, 0xff, 0xff) },
5510 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1136, 0xff, 0xff, 0xff) },
5511 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1137, 0xff, 0xff, 0xff) },
5512 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1138, 0xff, 0xff, 0xff) },
5513 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1139, 0xff, 0xff, 0xff) },
5514 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1140, 0xff, 0xff, 0xff) },
5515 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1141, 0xff, 0xff, 0xff) },
5516 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1142, 0xff, 0xff, 0xff) },
5517 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1143, 0xff, 0xff, 0xff) },
5518 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1144, 0xff, 0xff, 0xff) },
5519 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1145, 0xff, 0xff, 0xff) },
5520 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1146, 0xff, 0xff, 0xff) },
5521 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1147, 0xff, 0xff, 0xff) },
5522 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1148, 0xff, 0xff, 0xff) },
5523 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1149, 0xff, 0xff, 0xff) },
5524 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1150, 0xff, 0xff, 0xff) },
5525 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1151, 0xff, 0xff, 0xff) },
5526 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1152, 0xff, 0xff, 0xff) },
5527 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1153, 0xff, 0xff, 0xff) },
5528 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1154, 0xff, 0xff, 0xff) },
5529 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1155, 0xff, 0xff, 0xff) },
5530 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1156, 0xff, 0xff, 0xff) },
5531 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1157, 0xff, 0xff, 0xff) },
5532 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1158, 0xff, 0xff, 0xff) },
5533 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1159, 0xff, 0xff, 0xff) },
5534 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1160, 0xff, 0xff, 0xff) },
5535 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1161, 0xff, 0xff, 0xff) },
5536 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1162, 0xff, 0xff, 0xff) },
5537 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1163, 0xff, 0xff, 0xff) },
5538 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1164, 0xff, 0xff, 0xff) },
5539 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1165, 0xff, 0xff, 0xff) },
5540 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1166, 0xff, 0xff, 0xff) },
5541 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1167, 0xff, 0xff, 0xff) },
5542 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1168, 0xff, 0xff, 0xff) },
5543 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1169, 0xff, 0xff, 0xff) },
5544 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1170, 0xff, 0xff, 0xff) },
5545 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1244, 0xff, 0xff, 0xff) },
5546 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1245, 0xff, 0xff, 0xff) },
5547 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1246, 0xff, 0xff, 0xff) },
5548 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1247, 0xff, 0xff, 0xff) },
5549 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1248, 0xff, 0xff, 0xff) },
5550 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1249, 0xff, 0xff, 0xff) },
5551 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1250, 0xff, 0xff, 0xff) },
5552 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1251, 0xff, 0xff, 0xff) },
5553 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1252, 0xff, 0xff, 0xff) },
5554 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1253, 0xff, 0xff, 0xff) },
5555 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1254, 0xff, 0xff, 0xff) },
5556 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1255, 0xff, 0xff, 0xff) },
5557 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1256, 0xff, 0xff, 0xff) },
5558 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1257, 0xff, 0xff, 0xff) },
5559 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1258, 0xff, 0xff, 0xff) },
5560 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1259, 0xff, 0xff, 0xff) },
5561 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1260, 0xff, 0xff, 0xff) },
5562 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1261, 0xff, 0xff, 0xff) },
5563 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1262, 0xff, 0xff, 0xff) },
5564 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1263, 0xff, 0xff, 0xff) },
5565 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1264, 0xff, 0xff, 0xff) },
5566 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1265, 0xff, 0xff, 0xff) },
5567 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1266, 0xff, 0xff, 0xff) },
5568 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1267, 0xff, 0xff, 0xff) },
5569 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1268, 0xff, 0xff, 0xff) },
5570 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1269, 0xff, 0xff, 0xff) },
5571 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1270, 0xff, 0xff, 0xff) },
5572 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1271, 0xff, 0xff, 0xff) },
5573 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1272, 0xff, 0xff, 0xff) },
5574 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1273, 0xff, 0xff, 0xff) },
5575 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1274, 0xff, 0xff, 0xff) },
5576 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1275, 0xff, 0xff, 0xff) },
5577 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1276, 0xff, 0xff, 0xff) },
5578 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1277, 0xff, 0xff, 0xff) },
5579 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1278, 0xff, 0xff, 0xff) },
5580 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1279, 0xff, 0xff, 0xff) },
5581 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1280, 0xff, 0xff, 0xff) },
5582 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1281, 0xff, 0xff, 0xff) },
5583 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1282, 0xff, 0xff, 0xff) },
5584 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1283, 0xff, 0xff, 0xff) },
5585 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1284, 0xff, 0xff, 0xff) },
5586 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1285, 0xff, 0xff, 0xff) },
5587 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1286, 0xff, 0xff, 0xff) },
5588 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1287, 0xff, 0xff, 0xff) },
5589 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1288, 0xff, 0xff, 0xff) },
5590 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1289, 0xff, 0xff, 0xff) },
5591 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1290, 0xff, 0xff, 0xff) },
5592 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1291, 0xff, 0xff, 0xff) },
5593 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1292, 0xff, 0xff, 0xff) },
5594 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1293, 0xff, 0xff, 0xff) },
5595 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1294, 0xff, 0xff, 0xff) },
5596 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1295, 0xff, 0xff, 0xff) },
5597 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1296, 0xff, 0xff, 0xff) },
5598 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1297, 0xff, 0xff, 0xff) },
5599 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1298, 0xff, 0xff, 0xff) },
5600 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1299, 0xff, 0xff, 0xff) },
5601 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1300, 0xff, 0xff, 0xff) },
5602 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0014, 0xff, 0xff, 0xff) }, /* ZTE CDMA products */
5603 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0027, 0xff, 0xff, 0xff) },
5604 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0059, 0xff, 0xff, 0xff) },
5605 +@@ -726,6 +904,8 @@ static const struct usb_device_id option_ids[] = {
5606 + { USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_1012)},
5607 +
5608 + { USB_DEVICE(CINTERION_VENDOR_ID, 0x0047) },
5609 ++
5610 ++ { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD100) },
5611 + { } /* Terminating entry */
5612 + };
5613 + MODULE_DEVICE_TABLE(usb, option_ids);
5614 +diff --git a/drivers/usb/serial/qcaux.c b/drivers/usb/serial/qcaux.c
5615 +index 7e3bea2..214a3e5 100644
5616 +--- a/drivers/usb/serial/qcaux.c
5617 ++++ b/drivers/usb/serial/qcaux.c
5618 +@@ -50,6 +50,10 @@
5619 + #define SANYO_VENDOR_ID 0x0474
5620 + #define SANYO_PRODUCT_KATANA_LX 0x0754 /* SCP-3800 (Katana LX) */
5621 +
5622 ++/* Samsung devices */
5623 ++#define SAMSUNG_VENDOR_ID 0x04e8
5624 ++#define SAMSUNG_PRODUCT_U520 0x6640 /* SCH-U520 */
5625 ++
5626 + static struct usb_device_id id_table[] = {
5627 + { USB_DEVICE_AND_INTERFACE_INFO(UTSTARCOM_VENDOR_ID, UTSTARCOM_PRODUCT_PC5740, 0xff, 0x00, 0x00) },
5628 + { USB_DEVICE_AND_INTERFACE_INFO(UTSTARCOM_VENDOR_ID, UTSTARCOM_PRODUCT_PC5750, 0xff, 0x00, 0x00) },
5629 +@@ -61,6 +65,7 @@ static struct usb_device_id id_table[] = {
5630 + { USB_DEVICE_AND_INTERFACE_INFO(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CDX650, 0xff, 0xff, 0x00) },
5631 + { USB_DEVICE_AND_INTERFACE_INFO(LG_VENDOR_ID, LG_PRODUCT_VX4400_6000, 0xff, 0xff, 0x00) },
5632 + { USB_DEVICE_AND_INTERFACE_INFO(SANYO_VENDOR_ID, SANYO_PRODUCT_KATANA_LX, 0xff, 0xff, 0x00) },
5633 ++ { USB_DEVICE_AND_INTERFACE_INFO(SAMSUNG_VENDOR_ID, SAMSUNG_PRODUCT_U520, 0xff, 0x00, 0x00) },
5634 + { },
5635 + };
5636 + MODULE_DEVICE_TABLE(usb, id_table);
5637 +diff --git a/drivers/usb/serial/spcp8x5.c b/drivers/usb/serial/spcp8x5.c
5638 +index 5d39191..2ea32c5 100644
5639 +--- a/drivers/usb/serial/spcp8x5.c
5640 ++++ b/drivers/usb/serial/spcp8x5.c
5641 +@@ -726,8 +726,8 @@ static void spcp8x5_read_bulk_callback(struct urb *urb)
5642 + /* overrun is special, not associated with a char */
5643 + if (status & UART_OVERRUN_ERROR)
5644 + tty_insert_flip_char(tty, 0, TTY_OVERRUN);
5645 +- tty_insert_flip_string_fixed_flag(tty, data,
5646 +- urb->actual_length, tty_flag);
5647 ++ tty_insert_flip_string_fixed_flag(tty, data, tty_flag,
5648 ++ urb->actual_length);
5649 + tty_flip_buffer_push(tty);
5650 + }
5651 + tty_kref_put(tty);
5652 +diff --git a/drivers/usb/serial/visor.c b/drivers/usb/serial/visor.c
5653 +index 0949427..fb7fc40 100644
5654 +--- a/drivers/usb/serial/visor.c
5655 ++++ b/drivers/usb/serial/visor.c
5656 +@@ -249,6 +249,7 @@ static struct usb_serial_driver clie_3_5_device = {
5657 + .throttle = visor_throttle,
5658 + .unthrottle = visor_unthrottle,
5659 + .attach = clie_3_5_startup,
5660 ++ .release = visor_release,
5661 + .write = visor_write,
5662 + .write_room = visor_write_room,
5663 + .write_bulk_callback = visor_write_bulk_callback,
5664 +diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
5665 +index ccf1dbb..55b3cd1 100644
5666 +--- a/drivers/usb/storage/unusual_devs.h
5667 ++++ b/drivers/usb/storage/unusual_devs.h
5668 +@@ -1853,6 +1853,21 @@ UNUSUAL_DEV( 0x1652, 0x6600, 0x0201, 0x0201,
5669 + US_SC_DEVICE, US_PR_DEVICE, NULL,
5670 + US_FL_IGNORE_RESIDUE ),
5671 +
5672 ++/* Reported by Hans de Goede <hdegoede@××××××.com>
5673 ++ * These Appotech controllers are found in Picture Frames, they provide a
5674 ++ * (buggy) emulation of a cdrom drive which contains the windows software
5675 ++ * Uploading of pictures happens over the corresponding /dev/sg device. */
5676 ++UNUSUAL_DEV( 0x1908, 0x1315, 0x0000, 0x0000,
5677 ++ "BUILDWIN",
5678 ++ "Photo Frame",
5679 ++ US_SC_DEVICE, US_PR_DEVICE, NULL,
5680 ++ US_FL_BAD_SENSE ),
5681 ++UNUSUAL_DEV( 0x1908, 0x1320, 0x0000, 0x0000,
5682 ++ "BUILDWIN",
5683 ++ "Photo Frame",
5684 ++ US_SC_DEVICE, US_PR_DEVICE, NULL,
5685 ++ US_FL_BAD_SENSE ),
5686 ++
5687 + UNUSUAL_DEV( 0x2116, 0x0320, 0x0001, 0x0001,
5688 + "ST",
5689 + "2A",
5690 +diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
5691 +index 9777583..c9d0c79 100644
5692 +--- a/drivers/vhost/net.c
5693 ++++ b/drivers/vhost/net.c
5694 +@@ -637,7 +637,7 @@ const static struct file_operations vhost_net_fops = {
5695 + };
5696 +
5697 + static struct miscdevice vhost_net_misc = {
5698 +- VHOST_NET_MINOR,
5699 ++ MISC_DYNAMIC_MINOR,
5700 + "vhost-net",
5701 + &vhost_net_fops,
5702 + };
5703 +diff --git a/drivers/video/arcfb.c b/drivers/video/arcfb.c
5704 +index 8d406fb..f3d7440 100644
5705 +--- a/drivers/video/arcfb.c
5706 ++++ b/drivers/video/arcfb.c
5707 +@@ -80,7 +80,7 @@ struct arcfb_par {
5708 + spinlock_t lock;
5709 + };
5710 +
5711 +-static struct fb_fix_screeninfo arcfb_fix __initdata = {
5712 ++static struct fb_fix_screeninfo arcfb_fix __devinitdata = {
5713 + .id = "arcfb",
5714 + .type = FB_TYPE_PACKED_PIXELS,
5715 + .visual = FB_VISUAL_MONO01,
5716 +@@ -90,7 +90,7 @@ static struct fb_fix_screeninfo arcfb_fix __initdata = {
5717 + .accel = FB_ACCEL_NONE,
5718 + };
5719 +
5720 +-static struct fb_var_screeninfo arcfb_var __initdata = {
5721 ++static struct fb_var_screeninfo arcfb_var __devinitdata = {
5722 + .xres = 128,
5723 + .yres = 64,
5724 + .xres_virtual = 128,
5725 +@@ -588,7 +588,7 @@ err:
5726 + return retval;
5727 + }
5728 +
5729 +-static int arcfb_remove(struct platform_device *dev)
5730 ++static int __devexit arcfb_remove(struct platform_device *dev)
5731 + {
5732 + struct fb_info *info = platform_get_drvdata(dev);
5733 +
5734 +@@ -602,7 +602,7 @@ static int arcfb_remove(struct platform_device *dev)
5735 +
5736 + static struct platform_driver arcfb_driver = {
5737 + .probe = arcfb_probe,
5738 +- .remove = arcfb_remove,
5739 ++ .remove = __devexit_p(arcfb_remove),
5740 + .driver = {
5741 + .name = "arcfb",
5742 + },
5743 +diff --git a/drivers/video/hgafb.c b/drivers/video/hgafb.c
5744 +index 8bbf251..af8f0f2 100644
5745 +--- a/drivers/video/hgafb.c
5746 ++++ b/drivers/video/hgafb.c
5747 +@@ -106,7 +106,7 @@ static DEFINE_SPINLOCK(hga_reg_lock);
5748 +
5749 + /* Framebuffer driver structures */
5750 +
5751 +-static struct fb_var_screeninfo __initdata hga_default_var = {
5752 ++static struct fb_var_screeninfo hga_default_var __devinitdata = {
5753 + .xres = 720,
5754 + .yres = 348,
5755 + .xres_virtual = 720,
5756 +@@ -120,7 +120,7 @@ static struct fb_var_screeninfo __initdata hga_default_var = {
5757 + .width = -1,
5758 + };
5759 +
5760 +-static struct fb_fix_screeninfo __initdata hga_fix = {
5761 ++static struct fb_fix_screeninfo hga_fix __devinitdata = {
5762 + .id = "HGA",
5763 + .type = FB_TYPE_PACKED_PIXELS, /* (not sure) */
5764 + .visual = FB_VISUAL_MONO10,
5765 +@@ -276,7 +276,7 @@ static void hga_blank(int blank_mode)
5766 + spin_unlock_irqrestore(&hga_reg_lock, flags);
5767 + }
5768 +
5769 +-static int __init hga_card_detect(void)
5770 ++static int __devinit hga_card_detect(void)
5771 + {
5772 + int count = 0;
5773 + void __iomem *p, *q;
5774 +@@ -596,7 +596,7 @@ static int __devinit hgafb_probe(struct platform_device *pdev)
5775 + return 0;
5776 + }
5777 +
5778 +-static int hgafb_remove(struct platform_device *pdev)
5779 ++static int __devexit hgafb_remove(struct platform_device *pdev)
5780 + {
5781 + struct fb_info *info = platform_get_drvdata(pdev);
5782 +
5783 +@@ -621,7 +621,7 @@ static int hgafb_remove(struct platform_device *pdev)
5784 +
5785 + static struct platform_driver hgafb_driver = {
5786 + .probe = hgafb_probe,
5787 +- .remove = hgafb_remove,
5788 ++ .remove = __devexit_p(hgafb_remove),
5789 + .driver = {
5790 + .name = "hgafb",
5791 + },
5792 +diff --git a/drivers/video/vfb.c b/drivers/video/vfb.c
5793 +index 9b5532b..bc67251 100644
5794 +--- a/drivers/video/vfb.c
5795 ++++ b/drivers/video/vfb.c
5796 +@@ -78,7 +78,7 @@ static void rvfree(void *mem, unsigned long size)
5797 + vfree(mem);
5798 + }
5799 +
5800 +-static struct fb_var_screeninfo vfb_default __initdata = {
5801 ++static struct fb_var_screeninfo vfb_default __devinitdata = {
5802 + .xres = 640,
5803 + .yres = 480,
5804 + .xres_virtual = 640,
5805 +@@ -100,7 +100,7 @@ static struct fb_var_screeninfo vfb_default __initdata = {
5806 + .vmode = FB_VMODE_NONINTERLACED,
5807 + };
5808 +
5809 +-static struct fb_fix_screeninfo vfb_fix __initdata = {
5810 ++static struct fb_fix_screeninfo vfb_fix __devinitdata = {
5811 + .id = "Virtual FB",
5812 + .type = FB_TYPE_PACKED_PIXELS,
5813 + .visual = FB_VISUAL_PSEUDOCOLOR,
5814 +diff --git a/drivers/video/vga16fb.c b/drivers/video/vga16fb.c
5815 +index bf638a4..2ab3cc7 100644
5816 +--- a/drivers/video/vga16fb.c
5817 ++++ b/drivers/video/vga16fb.c
5818 +@@ -65,7 +65,7 @@ struct vga16fb_par {
5819 +
5820 + /* --------------------------------------------------------------------- */
5821 +
5822 +-static struct fb_var_screeninfo vga16fb_defined __initdata = {
5823 ++static struct fb_var_screeninfo vga16fb_defined __devinitdata = {
5824 + .xres = 640,
5825 + .yres = 480,
5826 + .xres_virtual = 640,
5827 +@@ -85,7 +85,7 @@ static struct fb_var_screeninfo vga16fb_defined __initdata = {
5828 + };
5829 +
5830 + /* name should not depend on EGA/VGA */
5831 +-static struct fb_fix_screeninfo vga16fb_fix __initdata = {
5832 ++static struct fb_fix_screeninfo vga16fb_fix __devinitdata = {
5833 + .id = "VGA16 VGA",
5834 + .smem_start = VGA_FB_PHYS,
5835 + .smem_len = VGA_FB_PHYS_LEN,
5836 +@@ -1278,7 +1278,7 @@ static struct fb_ops vga16fb_ops = {
5837 + };
5838 +
5839 + #ifndef MODULE
5840 +-static int vga16fb_setup(char *options)
5841 ++static int __init vga16fb_setup(char *options)
5842 + {
5843 + char *this_opt;
5844 +
5845 +@@ -1376,7 +1376,7 @@ static int __devinit vga16fb_probe(struct platform_device *dev)
5846 + return ret;
5847 + }
5848 +
5849 +-static int vga16fb_remove(struct platform_device *dev)
5850 ++static int __devexit vga16fb_remove(struct platform_device *dev)
5851 + {
5852 + struct fb_info *info = platform_get_drvdata(dev);
5853 +
5854 +@@ -1393,7 +1393,7 @@ static int vga16fb_remove(struct platform_device *dev)
5855 +
5856 + static struct platform_driver vga16fb_driver = {
5857 + .probe = vga16fb_probe,
5858 +- .remove = vga16fb_remove,
5859 ++ .remove = __devexit_p(vga16fb_remove),
5860 + .driver = {
5861 + .name = "vga16fb",
5862 + },
5863 +diff --git a/drivers/video/w100fb.c b/drivers/video/w100fb.c
5864 +index 31b0e17..e66b8b1 100644
5865 +--- a/drivers/video/w100fb.c
5866 ++++ b/drivers/video/w100fb.c
5867 +@@ -53,7 +53,7 @@ static void w100_update_enable(void);
5868 + static void w100_update_disable(void);
5869 + static void calc_hsync(struct w100fb_par *par);
5870 + static void w100_init_graphic_engine(struct w100fb_par *par);
5871 +-struct w100_pll_info *w100_get_xtal_table(unsigned int freq);
5872 ++struct w100_pll_info *w100_get_xtal_table(unsigned int freq) __devinit;
5873 +
5874 + /* Pseudo palette size */
5875 + #define MAX_PALETTES 16
5876 +@@ -782,7 +782,7 @@ out:
5877 + }
5878 +
5879 +
5880 +-static int w100fb_remove(struct platform_device *pdev)
5881 ++static int __devexit w100fb_remove(struct platform_device *pdev)
5882 + {
5883 + struct fb_info *info = platform_get_drvdata(pdev);
5884 + struct w100fb_par *par=info->par;
5885 +@@ -1020,7 +1020,7 @@ static struct pll_entries {
5886 + { 0 },
5887 + };
5888 +
5889 +-struct w100_pll_info *w100_get_xtal_table(unsigned int freq)
5890 ++struct w100_pll_info __devinit *w100_get_xtal_table(unsigned int freq)
5891 + {
5892 + struct pll_entries *pll_entry = w100_pll_tables;
5893 +
5894 +@@ -1611,7 +1611,7 @@ static void w100_vsync(void)
5895 +
5896 + static struct platform_driver w100fb_driver = {
5897 + .probe = w100fb_probe,
5898 +- .remove = w100fb_remove,
5899 ++ .remove = __devexit_p(w100fb_remove),
5900 + .suspend = w100fb_suspend,
5901 + .resume = w100fb_resume,
5902 + .driver = {
5903 +@@ -1619,7 +1619,7 @@ static struct platform_driver w100fb_driver = {
5904 + },
5905 + };
5906 +
5907 +-int __devinit w100fb_init(void)
5908 ++int __init w100fb_init(void)
5909 + {
5910 + return platform_driver_register(&w100fb_driver);
5911 + }
5912 +diff --git a/drivers/xen/xenbus/xenbus_xs.c b/drivers/xen/xenbus/xenbus_xs.c
5913 +index eab33f1..7b547f5 100644
5914 +--- a/drivers/xen/xenbus/xenbus_xs.c
5915 ++++ b/drivers/xen/xenbus/xenbus_xs.c
5916 +@@ -499,7 +499,7 @@ int xenbus_printf(struct xenbus_transaction t,
5917 + #define PRINTF_BUFFER_SIZE 4096
5918 + char *printf_buffer;
5919 +
5920 +- printf_buffer = kmalloc(PRINTF_BUFFER_SIZE, GFP_KERNEL);
5921 ++ printf_buffer = kmalloc(PRINTF_BUFFER_SIZE, GFP_NOIO | __GFP_HIGH);
5922 + if (printf_buffer == NULL)
5923 + return -ENOMEM;
5924 +
5925 +diff --git a/fs/aio.c b/fs/aio.c
5926 +index 1cf12b3..48fdeeb 100644
5927 +--- a/fs/aio.c
5928 ++++ b/fs/aio.c
5929 +@@ -36,6 +36,7 @@
5930 + #include <linux/blkdev.h>
5931 + #include <linux/mempool.h>
5932 + #include <linux/hash.h>
5933 ++#include <linux/compat.h>
5934 +
5935 + #include <asm/kmap_types.h>
5936 + #include <asm/uaccess.h>
5937 +@@ -1384,13 +1385,22 @@ static ssize_t aio_fsync(struct kiocb *iocb)
5938 + return ret;
5939 + }
5940 +
5941 +-static ssize_t aio_setup_vectored_rw(int type, struct kiocb *kiocb)
5942 ++static ssize_t aio_setup_vectored_rw(int type, struct kiocb *kiocb, bool compat)
5943 + {
5944 + ssize_t ret;
5945 +
5946 +- ret = rw_copy_check_uvector(type, (struct iovec __user *)kiocb->ki_buf,
5947 +- kiocb->ki_nbytes, 1,
5948 +- &kiocb->ki_inline_vec, &kiocb->ki_iovec);
5949 ++#ifdef CONFIG_COMPAT
5950 ++ if (compat)
5951 ++ ret = compat_rw_copy_check_uvector(type,
5952 ++ (struct compat_iovec __user *)kiocb->ki_buf,
5953 ++ kiocb->ki_nbytes, 1, &kiocb->ki_inline_vec,
5954 ++ &kiocb->ki_iovec);
5955 ++ else
5956 ++#endif
5957 ++ ret = rw_copy_check_uvector(type,
5958 ++ (struct iovec __user *)kiocb->ki_buf,
5959 ++ kiocb->ki_nbytes, 1, &kiocb->ki_inline_vec,
5960 ++ &kiocb->ki_iovec);
5961 + if (ret < 0)
5962 + goto out;
5963 +
5964 +@@ -1420,7 +1430,7 @@ static ssize_t aio_setup_single_vector(struct kiocb *kiocb)
5965 + * Performs the initial checks and aio retry method
5966 + * setup for the kiocb at the time of io submission.
5967 + */
5968 +-static ssize_t aio_setup_iocb(struct kiocb *kiocb)
5969 ++static ssize_t aio_setup_iocb(struct kiocb *kiocb, bool compat)
5970 + {
5971 + struct file *file = kiocb->ki_filp;
5972 + ssize_t ret = 0;
5973 +@@ -1469,7 +1479,7 @@ static ssize_t aio_setup_iocb(struct kiocb *kiocb)
5974 + ret = security_file_permission(file, MAY_READ);
5975 + if (unlikely(ret))
5976 + break;
5977 +- ret = aio_setup_vectored_rw(READ, kiocb);
5978 ++ ret = aio_setup_vectored_rw(READ, kiocb, compat);
5979 + if (ret)
5980 + break;
5981 + ret = -EINVAL;
5982 +@@ -1483,7 +1493,7 @@ static ssize_t aio_setup_iocb(struct kiocb *kiocb)
5983 + ret = security_file_permission(file, MAY_WRITE);
5984 + if (unlikely(ret))
5985 + break;
5986 +- ret = aio_setup_vectored_rw(WRITE, kiocb);
5987 ++ ret = aio_setup_vectored_rw(WRITE, kiocb, compat);
5988 + if (ret)
5989 + break;
5990 + ret = -EINVAL;
5991 +@@ -1548,7 +1558,8 @@ static void aio_batch_free(struct hlist_head *batch_hash)
5992 + }
5993 +
5994 + static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb,
5995 +- struct iocb *iocb, struct hlist_head *batch_hash)
5996 ++ struct iocb *iocb, struct hlist_head *batch_hash,
5997 ++ bool compat)
5998 + {
5999 + struct kiocb *req;
6000 + struct file *file;
6001 +@@ -1609,7 +1620,7 @@ static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb,
6002 + req->ki_left = req->ki_nbytes = iocb->aio_nbytes;
6003 + req->ki_opcode = iocb->aio_lio_opcode;
6004 +
6005 +- ret = aio_setup_iocb(req);
6006 ++ ret = aio_setup_iocb(req, compat);
6007 +
6008 + if (ret)
6009 + goto out_put_req;
6010 +@@ -1637,20 +1648,8 @@ out_put_req:
6011 + return ret;
6012 + }
6013 +
6014 +-/* sys_io_submit:
6015 +- * Queue the nr iocbs pointed to by iocbpp for processing. Returns
6016 +- * the number of iocbs queued. May return -EINVAL if the aio_context
6017 +- * specified by ctx_id is invalid, if nr is < 0, if the iocb at
6018 +- * *iocbpp[0] is not properly initialized, if the operation specified
6019 +- * is invalid for the file descriptor in the iocb. May fail with
6020 +- * -EFAULT if any of the data structures point to invalid data. May
6021 +- * fail with -EBADF if the file descriptor specified in the first
6022 +- * iocb is invalid. May fail with -EAGAIN if insufficient resources
6023 +- * are available to queue any iocbs. Will return 0 if nr is 0. Will
6024 +- * fail with -ENOSYS if not implemented.
6025 +- */
6026 +-SYSCALL_DEFINE3(io_submit, aio_context_t, ctx_id, long, nr,
6027 +- struct iocb __user * __user *, iocbpp)
6028 ++long do_io_submit(aio_context_t ctx_id, long nr,
6029 ++ struct iocb __user *__user *iocbpp, bool compat)
6030 + {
6031 + struct kioctx *ctx;
6032 + long ret = 0;
6033 +@@ -1687,7 +1686,7 @@ SYSCALL_DEFINE3(io_submit, aio_context_t, ctx_id, long, nr,
6034 + break;
6035 + }
6036 +
6037 +- ret = io_submit_one(ctx, user_iocb, &tmp, batch_hash);
6038 ++ ret = io_submit_one(ctx, user_iocb, &tmp, batch_hash, compat);
6039 + if (ret)
6040 + break;
6041 + }
6042 +@@ -1697,6 +1696,24 @@ SYSCALL_DEFINE3(io_submit, aio_context_t, ctx_id, long, nr,
6043 + return i ? i : ret;
6044 + }
6045 +
6046 ++/* sys_io_submit:
6047 ++ * Queue the nr iocbs pointed to by iocbpp for processing. Returns
6048 ++ * the number of iocbs queued. May return -EINVAL if the aio_context
6049 ++ * specified by ctx_id is invalid, if nr is < 0, if the iocb at
6050 ++ * *iocbpp[0] is not properly initialized, if the operation specified
6051 ++ * is invalid for the file descriptor in the iocb. May fail with
6052 ++ * -EFAULT if any of the data structures point to invalid data. May
6053 ++ * fail with -EBADF if the file descriptor specified in the first
6054 ++ * iocb is invalid. May fail with -EAGAIN if insufficient resources
6055 ++ * are available to queue any iocbs. Will return 0 if nr is 0. Will
6056 ++ * fail with -ENOSYS if not implemented.
6057 ++ */
6058 ++SYSCALL_DEFINE3(io_submit, aio_context_t, ctx_id, long, nr,
6059 ++ struct iocb __user * __user *, iocbpp)
6060 ++{
6061 ++ return do_io_submit(ctx_id, nr, iocbpp, 0);
6062 ++}
6063 ++
6064 + /* lookup_kiocb
6065 + * Finds a given iocb for cancellation.
6066 + */
6067 +diff --git a/fs/btrfs/acl.c b/fs/btrfs/acl.c
6068 +index 6ef7b26..6b4d0cc 100644
6069 +--- a/fs/btrfs/acl.c
6070 ++++ b/fs/btrfs/acl.c
6071 +@@ -160,6 +160,9 @@ static int btrfs_xattr_acl_set(struct dentry *dentry, const char *name,
6072 + int ret;
6073 + struct posix_acl *acl = NULL;
6074 +
6075 ++ if (!is_owner_or_cap(dentry->d_inode))
6076 ++ return -EPERM;
6077 ++
6078 + if (value) {
6079 + acl = posix_acl_from_xattr(value, size);
6080 + if (acl == NULL) {
6081 +diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h
6082 +index 39e47f4..a6db615 100644
6083 +--- a/fs/cifs/cifsproto.h
6084 ++++ b/fs/cifs/cifsproto.h
6085 +@@ -95,8 +95,10 @@ extern struct cifsFileInfo *cifs_new_fileinfo(struct inode *newinode,
6086 + __u16 fileHandle, struct file *file,
6087 + struct vfsmount *mnt, unsigned int oflags);
6088 + extern int cifs_posix_open(char *full_path, struct inode **pinode,
6089 +- struct vfsmount *mnt, int mode, int oflags,
6090 +- __u32 *poplock, __u16 *pnetfid, int xid);
6091 ++ struct vfsmount *mnt,
6092 ++ struct super_block *sb,
6093 ++ int mode, int oflags,
6094 ++ __u32 *poplock, __u16 *pnetfid, int xid);
6095 + extern void cifs_unix_basic_to_fattr(struct cifs_fattr *fattr,
6096 + FILE_UNIX_BASIC_INFO *info,
6097 + struct cifs_sb_info *cifs_sb);
6098 +diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c
6099 +index e9f7ecc..ff3d891 100644
6100 +--- a/fs/cifs/dir.c
6101 ++++ b/fs/cifs/dir.c
6102 +@@ -183,13 +183,14 @@ cifs_new_fileinfo(struct inode *newinode, __u16 fileHandle,
6103 + }
6104 +
6105 + int cifs_posix_open(char *full_path, struct inode **pinode,
6106 +- struct vfsmount *mnt, int mode, int oflags,
6107 +- __u32 *poplock, __u16 *pnetfid, int xid)
6108 ++ struct vfsmount *mnt, struct super_block *sb,
6109 ++ int mode, int oflags,
6110 ++ __u32 *poplock, __u16 *pnetfid, int xid)
6111 + {
6112 + int rc;
6113 + FILE_UNIX_BASIC_INFO *presp_data;
6114 + __u32 posix_flags = 0;
6115 +- struct cifs_sb_info *cifs_sb = CIFS_SB(mnt->mnt_sb);
6116 ++ struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
6117 + struct cifs_fattr fattr;
6118 +
6119 + cFYI(1, ("posix open %s", full_path));
6120 +@@ -242,7 +243,7 @@ int cifs_posix_open(char *full_path, struct inode **pinode,
6121 +
6122 + /* get new inode and set it up */
6123 + if (*pinode == NULL) {
6124 +- *pinode = cifs_iget(mnt->mnt_sb, &fattr);
6125 ++ *pinode = cifs_iget(sb, &fattr);
6126 + if (!*pinode) {
6127 + rc = -ENOMEM;
6128 + goto posix_open_ret;
6129 +@@ -251,7 +252,8 @@ int cifs_posix_open(char *full_path, struct inode **pinode,
6130 + cifs_fattr_to_inode(*pinode, &fattr);
6131 + }
6132 +
6133 +- cifs_new_fileinfo(*pinode, *pnetfid, NULL, mnt, oflags);
6134 ++ if (mnt)
6135 ++ cifs_new_fileinfo(*pinode, *pnetfid, NULL, mnt, oflags);
6136 +
6137 + posix_open_ret:
6138 + kfree(presp_data);
6139 +@@ -315,13 +317,14 @@ cifs_create(struct inode *inode, struct dentry *direntry, int mode,
6140 + if (nd && (nd->flags & LOOKUP_OPEN))
6141 + oflags = nd->intent.open.flags;
6142 + else
6143 +- oflags = FMODE_READ;
6144 ++ oflags = FMODE_READ | SMB_O_CREAT;
6145 +
6146 + if (tcon->unix_ext && (tcon->ses->capabilities & CAP_UNIX) &&
6147 + (CIFS_UNIX_POSIX_PATH_OPS_CAP &
6148 + le64_to_cpu(tcon->fsUnixInfo.Capability))) {
6149 +- rc = cifs_posix_open(full_path, &newinode, nd->path.mnt,
6150 +- mode, oflags, &oplock, &fileHandle, xid);
6151 ++ rc = cifs_posix_open(full_path, &newinode,
6152 ++ nd ? nd->path.mnt : NULL,
6153 ++ inode->i_sb, mode, oflags, &oplock, &fileHandle, xid);
6154 + /* EIO could indicate that (posix open) operation is not
6155 + supported, despite what server claimed in capability
6156 + negotation. EREMOTE indicates DFS junction, which is not
6157 +@@ -678,6 +681,7 @@ cifs_lookup(struct inode *parent_dir_inode, struct dentry *direntry,
6158 + (nd->flags & LOOKUP_OPEN) && !pTcon->broken_posix_open &&
6159 + (nd->intent.open.flags & O_CREAT)) {
6160 + rc = cifs_posix_open(full_path, &newInode, nd->path.mnt,
6161 ++ parent_dir_inode->i_sb,
6162 + nd->intent.open.create_mode,
6163 + nd->intent.open.flags, &oplock,
6164 + &fileHandle, xid);
6165 +diff --git a/fs/cifs/file.c b/fs/cifs/file.c
6166 +index 9b11a8f..4cbdb20 100644
6167 +--- a/fs/cifs/file.c
6168 ++++ b/fs/cifs/file.c
6169 +@@ -298,10 +298,12 @@ int cifs_open(struct inode *inode, struct file *file)
6170 + (CIFS_UNIX_POSIX_PATH_OPS_CAP &
6171 + le64_to_cpu(tcon->fsUnixInfo.Capability))) {
6172 + int oflags = (int) cifs_posix_convert_flags(file->f_flags);
6173 ++ oflags |= SMB_O_CREAT;
6174 + /* can not refresh inode info since size could be stale */
6175 + rc = cifs_posix_open(full_path, &inode, file->f_path.mnt,
6176 +- cifs_sb->mnt_file_mode /* ignored */,
6177 +- oflags, &oplock, &netfid, xid);
6178 ++ inode->i_sb,
6179 ++ cifs_sb->mnt_file_mode /* ignored */,
6180 ++ oflags, &oplock, &netfid, xid);
6181 + if (rc == 0) {
6182 + cFYI(1, ("posix open succeeded"));
6183 + /* no need for special case handling of setting mode
6184 +@@ -513,8 +515,9 @@ reopen_error_exit:
6185 + int oflags = (int) cifs_posix_convert_flags(file->f_flags);
6186 + /* can not refresh inode info since size could be stale */
6187 + rc = cifs_posix_open(full_path, NULL, file->f_path.mnt,
6188 +- cifs_sb->mnt_file_mode /* ignored */,
6189 +- oflags, &oplock, &netfid, xid);
6190 ++ inode->i_sb,
6191 ++ cifs_sb->mnt_file_mode /* ignored */,
6192 ++ oflags, &oplock, &netfid, xid);
6193 + if (rc == 0) {
6194 + cFYI(1, ("posix reopen succeeded"));
6195 + goto reopen_success;
6196 +diff --git a/fs/compat.c b/fs/compat.c
6197 +index 0544873..6490d21 100644
6198 +--- a/fs/compat.c
6199 ++++ b/fs/compat.c
6200 +@@ -568,6 +568,79 @@ out:
6201 + return ret;
6202 + }
6203 +
6204 ++/* A write operation does a read from user space and vice versa */
6205 ++#define vrfy_dir(type) ((type) == READ ? VERIFY_WRITE : VERIFY_READ)
6206 ++
6207 ++ssize_t compat_rw_copy_check_uvector(int type,
6208 ++ const struct compat_iovec __user *uvector, unsigned long nr_segs,
6209 ++ unsigned long fast_segs, struct iovec *fast_pointer,
6210 ++ struct iovec **ret_pointer)
6211 ++{
6212 ++ compat_ssize_t tot_len;
6213 ++ struct iovec *iov = *ret_pointer = fast_pointer;
6214 ++ ssize_t ret = 0;
6215 ++ int seg;
6216 ++
6217 ++ /*
6218 ++ * SuS says "The readv() function *may* fail if the iovcnt argument
6219 ++ * was less than or equal to 0, or greater than {IOV_MAX}. Linux has
6220 ++ * traditionally returned zero for zero segments, so...
6221 ++ */
6222 ++ if (nr_segs == 0)
6223 ++ goto out;
6224 ++
6225 ++ ret = -EINVAL;
6226 ++ if (nr_segs > UIO_MAXIOV || nr_segs < 0)
6227 ++ goto out;
6228 ++ if (nr_segs > fast_segs) {
6229 ++ ret = -ENOMEM;
6230 ++ iov = kmalloc(nr_segs*sizeof(struct iovec), GFP_KERNEL);
6231 ++ if (iov == NULL) {
6232 ++ *ret_pointer = fast_pointer;
6233 ++ goto out;
6234 ++ }
6235 ++ }
6236 ++ *ret_pointer = iov;
6237 ++
6238 ++ /*
6239 ++ * Single unix specification:
6240 ++ * We should -EINVAL if an element length is not >= 0 and fitting an
6241 ++ * ssize_t. The total length is fitting an ssize_t
6242 ++ *
6243 ++ * Be careful here because iov_len is a size_t not an ssize_t
6244 ++ */
6245 ++ tot_len = 0;
6246 ++ ret = -EINVAL;
6247 ++ for (seg = 0; seg < nr_segs; seg++) {
6248 ++ compat_ssize_t tmp = tot_len;
6249 ++ compat_uptr_t buf;
6250 ++ compat_ssize_t len;
6251 ++
6252 ++ if (__get_user(len, &uvector->iov_len) ||
6253 ++ __get_user(buf, &uvector->iov_base)) {
6254 ++ ret = -EFAULT;
6255 ++ goto out;
6256 ++ }
6257 ++ if (len < 0) /* size_t not fitting in compat_ssize_t .. */
6258 ++ goto out;
6259 ++ tot_len += len;
6260 ++ if (tot_len < tmp) /* maths overflow on the compat_ssize_t */
6261 ++ goto out;
6262 ++ if (!access_ok(vrfy_dir(type), compat_ptr(buf), len)) {
6263 ++ ret = -EFAULT;
6264 ++ goto out;
6265 ++ }
6266 ++ iov->iov_base = compat_ptr(buf);
6267 ++ iov->iov_len = (compat_size_t) len;
6268 ++ uvector++;
6269 ++ iov++;
6270 ++ }
6271 ++ ret = tot_len;
6272 ++
6273 ++out:
6274 ++ return ret;
6275 ++}
6276 ++
6277 + static inline long
6278 + copy_iocb(long nr, u32 __user *ptr32, struct iocb __user * __user *ptr64)
6279 + {
6280 +@@ -600,7 +673,7 @@ compat_sys_io_submit(aio_context_t ctx_id, int nr, u32 __user *iocb)
6281 + iocb64 = compat_alloc_user_space(nr * sizeof(*iocb64));
6282 + ret = copy_iocb(nr, iocb, iocb64);
6283 + if (!ret)
6284 +- ret = sys_io_submit(ctx_id, nr, iocb64);
6285 ++ ret = do_io_submit(ctx_id, nr, iocb64, 1);
6286 + return ret;
6287 + }
6288 +
6289 +@@ -1077,70 +1150,21 @@ static ssize_t compat_do_readv_writev(int type, struct file *file,
6290 + {
6291 + compat_ssize_t tot_len;
6292 + struct iovec iovstack[UIO_FASTIOV];
6293 +- struct iovec *iov=iovstack, *vector;
6294 ++ struct iovec *iov;
6295 + ssize_t ret;
6296 +- int seg;
6297 + io_fn_t fn;
6298 + iov_fn_t fnv;
6299 +
6300 +- /*
6301 +- * SuS says "The readv() function *may* fail if the iovcnt argument
6302 +- * was less than or equal to 0, or greater than {IOV_MAX}. Linux has
6303 +- * traditionally returned zero for zero segments, so...
6304 +- */
6305 +- ret = 0;
6306 +- if (nr_segs == 0)
6307 +- goto out;
6308 +-
6309 +- /*
6310 +- * First get the "struct iovec" from user memory and
6311 +- * verify all the pointers
6312 +- */
6313 + ret = -EINVAL;
6314 +- if ((nr_segs > UIO_MAXIOV) || (nr_segs <= 0))
6315 +- goto out;
6316 + if (!file->f_op)
6317 + goto out;
6318 +- if (nr_segs > UIO_FASTIOV) {
6319 +- ret = -ENOMEM;
6320 +- iov = kmalloc(nr_segs*sizeof(struct iovec), GFP_KERNEL);
6321 +- if (!iov)
6322 +- goto out;
6323 +- }
6324 ++
6325 + ret = -EFAULT;
6326 + if (!access_ok(VERIFY_READ, uvector, nr_segs*sizeof(*uvector)))
6327 + goto out;
6328 +
6329 +- /*
6330 +- * Single unix specification:
6331 +- * We should -EINVAL if an element length is not >= 0 and fitting an
6332 +- * ssize_t. The total length is fitting an ssize_t
6333 +- *
6334 +- * Be careful here because iov_len is a size_t not an ssize_t
6335 +- */
6336 +- tot_len = 0;
6337 +- vector = iov;
6338 +- ret = -EINVAL;
6339 +- for (seg = 0 ; seg < nr_segs; seg++) {
6340 +- compat_ssize_t tmp = tot_len;
6341 +- compat_ssize_t len;
6342 +- compat_uptr_t buf;
6343 +-
6344 +- if (__get_user(len, &uvector->iov_len) ||
6345 +- __get_user(buf, &uvector->iov_base)) {
6346 +- ret = -EFAULT;
6347 +- goto out;
6348 +- }
6349 +- if (len < 0) /* size_t not fitting an compat_ssize_t .. */
6350 +- goto out;
6351 +- tot_len += len;
6352 +- if (tot_len < tmp) /* maths overflow on the compat_ssize_t */
6353 +- goto out;
6354 +- vector->iov_base = compat_ptr(buf);
6355 +- vector->iov_len = (compat_size_t) len;
6356 +- uvector++;
6357 +- vector++;
6358 +- }
6359 ++ tot_len = compat_rw_copy_check_uvector(type, uvector, nr_segs,
6360 ++ UIO_FASTIOV, iovstack, &iov);
6361 + if (tot_len == 0) {
6362 + ret = 0;
6363 + goto out;
6364 +diff --git a/fs/dcache.c b/fs/dcache.c
6365 +index f1358e5..2b6f09a 100644
6366 +--- a/fs/dcache.c
6367 ++++ b/fs/dcache.c
6368 +@@ -1529,6 +1529,7 @@ void d_delete(struct dentry * dentry)
6369 + spin_lock(&dentry->d_lock);
6370 + isdir = S_ISDIR(dentry->d_inode->i_mode);
6371 + if (atomic_read(&dentry->d_count) == 1) {
6372 ++ dentry->d_flags &= ~DCACHE_CANT_MOUNT;
6373 + dentry_iput(dentry);
6374 + fsnotify_nameremove(dentry, isdir);
6375 + return;
6376 +diff --git a/fs/exofs/dir.c b/fs/exofs/dir.c
6377 +index 4cfab1c..d91e9d8 100644
6378 +--- a/fs/exofs/dir.c
6379 ++++ b/fs/exofs/dir.c
6380 +@@ -608,7 +608,7 @@ int exofs_make_empty(struct inode *inode, struct inode *parent)
6381 + de->inode_no = cpu_to_le64(parent->i_ino);
6382 + memcpy(de->name, PARENT_DIR, sizeof(PARENT_DIR));
6383 + exofs_set_de_type(de, inode);
6384 +- kunmap_atomic(page, KM_USER0);
6385 ++ kunmap_atomic(kaddr, KM_USER0);
6386 + err = exofs_commit_chunk(page, 0, chunk_size);
6387 + fail:
6388 + page_cache_release(page);
6389 +diff --git a/fs/ext4/move_extent.c b/fs/ext4/move_extent.c
6390 +index d1fc662..d3d6a64 100644
6391 +--- a/fs/ext4/move_extent.c
6392 ++++ b/fs/ext4/move_extent.c
6393 +@@ -959,6 +959,9 @@ mext_check_arguments(struct inode *orig_inode,
6394 + return -EINVAL;
6395 + }
6396 +
6397 ++ if (IS_IMMUTABLE(donor_inode) || IS_APPEND(donor_inode))
6398 ++ return -EPERM;
6399 ++
6400 + /* Ext4 move extent does not support swapfile */
6401 + if (IS_SWAPFILE(orig_inode) || IS_SWAPFILE(donor_inode)) {
6402 + ext4_debug("ext4 move extent: The argument files should "
6403 +diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c
6404 +index 5692c48..6df797e 100644
6405 +--- a/fs/ext4/resize.c
6406 ++++ b/fs/ext4/resize.c
6407 +@@ -911,7 +911,8 @@ int ext4_group_add(struct super_block *sb, struct ext4_new_group_data *input)
6408 + percpu_counter_add(&sbi->s_freeinodes_counter,
6409 + EXT4_INODES_PER_GROUP(sb));
6410 +
6411 +- if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_FLEX_BG)) {
6412 ++ if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_FLEX_BG) &&
6413 ++ sbi->s_log_groups_per_flex) {
6414 + ext4_group_t flex_group;
6415 + flex_group = ext4_flex_group(sbi, input->group);
6416 + atomic_add(input->free_blocks_count,
6417 +diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
6418 +index 4b37f7c..760dc8d 100644
6419 +--- a/fs/fs-writeback.c
6420 ++++ b/fs/fs-writeback.c
6421 +@@ -852,6 +852,12 @@ static long wb_check_old_data_flush(struct bdi_writeback *wb)
6422 + unsigned long expired;
6423 + long nr_pages;
6424 +
6425 ++ /*
6426 ++ * When set to zero, disable periodic writeback
6427 ++ */
6428 ++ if (!dirty_writeback_interval)
6429 ++ return 0;
6430 ++
6431 + expired = wb->last_old_flush +
6432 + msecs_to_jiffies(dirty_writeback_interval * 10);
6433 + if (time_before(jiffies, expired))
6434 +@@ -947,8 +953,12 @@ int bdi_writeback_task(struct bdi_writeback *wb)
6435 + break;
6436 + }
6437 +
6438 +- wait_jiffies = msecs_to_jiffies(dirty_writeback_interval * 10);
6439 +- schedule_timeout_interruptible(wait_jiffies);
6440 ++ if (dirty_writeback_interval) {
6441 ++ wait_jiffies = msecs_to_jiffies(dirty_writeback_interval * 10);
6442 ++ schedule_timeout_interruptible(wait_jiffies);
6443 ++ } else
6444 ++ schedule();
6445 ++
6446 + try_to_freeze();
6447 + }
6448 +
6449 +diff --git a/fs/gfs2/file.c b/fs/gfs2/file.c
6450 +index e6dd2ae..b20bfcc 100644
6451 +--- a/fs/gfs2/file.c
6452 ++++ b/fs/gfs2/file.c
6453 +@@ -218,6 +218,11 @@ static int do_gfs2_set_flags(struct file *filp, u32 reqflags, u32 mask)
6454 + if (error)
6455 + goto out_drop_write;
6456 +
6457 ++ error = -EACCES;
6458 ++ if (!is_owner_or_cap(inode))
6459 ++ goto out;
6460 ++
6461 ++ error = 0;
6462 + flags = ip->i_diskflags;
6463 + new_flags = (flags & ~mask) | (reqflags & mask);
6464 + if ((new_flags ^ flags) == 0)
6465 +@@ -275,8 +280,10 @@ static int gfs2_set_flags(struct file *filp, u32 __user *ptr)
6466 + {
6467 + struct inode *inode = filp->f_path.dentry->d_inode;
6468 + u32 fsflags, gfsflags;
6469 ++
6470 + if (get_user(fsflags, ptr))
6471 + return -EFAULT;
6472 ++
6473 + gfsflags = fsflags_cvt(fsflags_to_gfs2, fsflags);
6474 + if (!S_ISDIR(inode->i_mode)) {
6475 + if (gfsflags & GFS2_DIF_INHERIT_JDATA)
6476 +diff --git a/fs/libfs.c b/fs/libfs.c
6477 +index ea9a6cc..b016af9 100644
6478 +--- a/fs/libfs.c
6479 ++++ b/fs/libfs.c
6480 +@@ -418,7 +418,8 @@ int simple_write_end(struct file *file, struct address_space *mapping,
6481 + * unique inode values later for this filesystem, then you must take care
6482 + * to pass it an appropriate max_reserved value to avoid collisions.
6483 + */
6484 +-int simple_fill_super(struct super_block *s, int magic, struct tree_descr *files)
6485 ++int simple_fill_super(struct super_block *s, unsigned long magic,
6486 ++ struct tree_descr *files)
6487 + {
6488 + struct inode *inode;
6489 + struct dentry *root;
6490 +diff --git a/fs/namei.c b/fs/namei.c
6491 +index b86b96f..f6c7fcf 100644
6492 +--- a/fs/namei.c
6493 ++++ b/fs/namei.c
6494 +@@ -1620,6 +1620,7 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
6495 + case LAST_DOTDOT:
6496 + follow_dotdot(nd);
6497 + dir = nd->path.dentry;
6498 ++ case LAST_DOT:
6499 + if (nd->path.mnt->mnt_sb->s_type->fs_flags & FS_REVAL_DOT) {
6500 + if (!dir->d_op->d_revalidate(dir, nd)) {
6501 + error = -ESTALE;
6502 +@@ -1627,7 +1628,6 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
6503 + }
6504 + }
6505 + /* fallthrough */
6506 +- case LAST_DOT:
6507 + case LAST_ROOT:
6508 + if (open_flag & O_CREAT)
6509 + goto exit;
6510 +diff --git a/fs/nfs/write.c b/fs/nfs/write.c
6511 +index 3aea3ca..91679e2 100644
6512 +--- a/fs/nfs/write.c
6513 ++++ b/fs/nfs/write.c
6514 +@@ -1386,7 +1386,7 @@ static int nfs_commit_inode(struct inode *inode, int how)
6515 + int res = 0;
6516 +
6517 + if (!nfs_commit_set_lock(NFS_I(inode), may_wait))
6518 +- goto out;
6519 ++ goto out_mark_dirty;
6520 + spin_lock(&inode->i_lock);
6521 + res = nfs_scan_commit(inode, &head, 0, 0);
6522 + spin_unlock(&inode->i_lock);
6523 +@@ -1398,9 +1398,18 @@ static int nfs_commit_inode(struct inode *inode, int how)
6524 + wait_on_bit(&NFS_I(inode)->flags, NFS_INO_COMMIT,
6525 + nfs_wait_bit_killable,
6526 + TASK_KILLABLE);
6527 ++ else
6528 ++ goto out_mark_dirty;
6529 + } else
6530 + nfs_commit_clear_lock(NFS_I(inode));
6531 +-out:
6532 ++ return res;
6533 ++ /* Note: If we exit without ensuring that the commit is complete,
6534 ++ * we must mark the inode as dirty. Otherwise, future calls to
6535 ++ * sync_inode() with the WB_SYNC_ALL flag set will fail to ensure
6536 ++ * that the data is on the disk.
6537 ++ */
6538 ++out_mark_dirty:
6539 ++ __mark_inode_dirty(inode, I_DIRTY_DATASYNC);
6540 + return res;
6541 + }
6542 +
6543 +@@ -1509,14 +1518,17 @@ int nfs_wb_page(struct inode *inode, struct page *page)
6544 + };
6545 + int ret;
6546 +
6547 +- while(PagePrivate(page)) {
6548 ++ for (;;) {
6549 + wait_on_page_writeback(page);
6550 + if (clear_page_dirty_for_io(page)) {
6551 + ret = nfs_writepage_locked(page, &wbc);
6552 + if (ret < 0)
6553 + goto out_error;
6554 ++ continue;
6555 + }
6556 +- ret = sync_inode(inode, &wbc);
6557 ++ if (!PagePrivate(page))
6558 ++ break;
6559 ++ ret = nfs_commit_inode(inode, FLUSH_SYNC);
6560 + if (ret < 0)
6561 + goto out_error;
6562 + }
6563 +diff --git a/fs/nfsd/nfssvc.c b/fs/nfsd/nfssvc.c
6564 +index 171699e..06b2a26 100644
6565 +--- a/fs/nfsd/nfssvc.c
6566 ++++ b/fs/nfsd/nfssvc.c
6567 +@@ -120,7 +120,7 @@ u32 nfsd_supported_minorversion;
6568 + int nfsd_vers(int vers, enum vers_op change)
6569 + {
6570 + if (vers < NFSD_MINVERS || vers >= NFSD_NRVERS)
6571 +- return -1;
6572 ++ return 0;
6573 + switch(change) {
6574 + case NFSD_SET:
6575 + nfsd_versions[vers] = nfsd_version[vers];
6576 +diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
6577 +index 6dd5f19..4eb9baa 100644
6578 +--- a/fs/nfsd/vfs.c
6579 ++++ b/fs/nfsd/vfs.c
6580 +@@ -443,8 +443,7 @@ nfsd_setattr(struct svc_rqst *rqstp, struct svc_fh *fhp, struct iattr *iap,
6581 + if (size_change)
6582 + put_write_access(inode);
6583 + if (!err)
6584 +- if (EX_ISSYNC(fhp->fh_export))
6585 +- write_inode_now(inode, 1);
6586 ++ commit_metadata(fhp);
6587 + out:
6588 + return err;
6589 +
6590 +@@ -724,7 +723,7 @@ nfsd_open(struct svc_rqst *rqstp, struct svc_fh *fhp, int type,
6591 + struct inode *inode;
6592 + int flags = O_RDONLY|O_LARGEFILE;
6593 + __be32 err;
6594 +- int host_err;
6595 ++ int host_err = 0;
6596 +
6597 + validate_process_creds();
6598 +
6599 +@@ -761,7 +760,8 @@ nfsd_open(struct svc_rqst *rqstp, struct svc_fh *fhp, int type,
6600 + * Check to see if there are any leases on this file.
6601 + * This may block while leases are broken.
6602 + */
6603 +- host_err = break_lease(inode, O_NONBLOCK | ((access & NFSD_MAY_WRITE) ? O_WRONLY : 0));
6604 ++ if (!(access & NFSD_MAY_NOT_BREAK_LEASE))
6605 ++ host_err = break_lease(inode, O_NONBLOCK | ((access & NFSD_MAY_WRITE) ? O_WRONLY : 0));
6606 + if (host_err == -EWOULDBLOCK)
6607 + host_err = -ETIMEDOUT;
6608 + if (host_err) /* NOMEM or WOULDBLOCK */
6609 +@@ -1169,7 +1169,8 @@ nfsd_commit(struct svc_rqst *rqstp, struct svc_fh *fhp,
6610 + goto out;
6611 + }
6612 +
6613 +- err = nfsd_open(rqstp, fhp, S_IFREG, NFSD_MAY_WRITE, &file);
6614 ++ err = nfsd_open(rqstp, fhp, S_IFREG,
6615 ++ NFSD_MAY_WRITE|NFSD_MAY_NOT_BREAK_LEASE, &file);
6616 + if (err)
6617 + goto out;
6618 + if (EX_ISSYNC(fhp->fh_export)) {
6619 +diff --git a/fs/nfsd/vfs.h b/fs/nfsd/vfs.h
6620 +index 4b1de0a..217a62c 100644
6621 +--- a/fs/nfsd/vfs.h
6622 ++++ b/fs/nfsd/vfs.h
6623 +@@ -20,6 +20,7 @@
6624 + #define NFSD_MAY_OWNER_OVERRIDE 64
6625 + #define NFSD_MAY_LOCAL_ACCESS 128 /* IRIX doing local access check on device special file*/
6626 + #define NFSD_MAY_BYPASS_GSS_ON_ROOT 256
6627 ++#define NFSD_MAY_NOT_BREAK_LEASE 512
6628 +
6629 + #define NFSD_MAY_CREATE (NFSD_MAY_EXEC|NFSD_MAY_WRITE)
6630 + #define NFSD_MAY_REMOVE (NFSD_MAY_EXEC|NFSD_MAY_WRITE|NFSD_MAY_TRUNC)
6631 +diff --git a/include/linux/aio.h b/include/linux/aio.h
6632 +index 811dbb3..7a8db41 100644
6633 +--- a/include/linux/aio.h
6634 ++++ b/include/linux/aio.h
6635 +@@ -212,6 +212,8 @@ extern void kick_iocb(struct kiocb *iocb);
6636 + extern int aio_complete(struct kiocb *iocb, long res, long res2);
6637 + struct mm_struct;
6638 + extern void exit_aio(struct mm_struct *mm);
6639 ++extern long do_io_submit(aio_context_t ctx_id, long nr,
6640 ++ struct iocb __user *__user *iocbpp, bool compat);
6641 + #else
6642 + static inline ssize_t wait_on_sync_kiocb(struct kiocb *iocb) { return 0; }
6643 + static inline int aio_put_req(struct kiocb *iocb) { return 0; }
6644 +@@ -219,6 +221,9 @@ static inline void kick_iocb(struct kiocb *iocb) { }
6645 + static inline int aio_complete(struct kiocb *iocb, long res, long res2) { return 0; }
6646 + struct mm_struct;
6647 + static inline void exit_aio(struct mm_struct *mm) { }
6648 ++static inline long do_io_submit(aio_context_t ctx_id, long nr,
6649 ++ struct iocb __user * __user *iocbpp,
6650 ++ bool compat) { return 0; }
6651 + #endif /* CONFIG_AIO */
6652 +
6653 + static inline struct kiocb *list_kiocb(struct list_head *h)
6654 +diff --git a/include/linux/compat.h b/include/linux/compat.h
6655 +index 717c691..168f7da 100644
6656 +--- a/include/linux/compat.h
6657 ++++ b/include/linux/compat.h
6658 +@@ -356,5 +356,9 @@ asmlinkage long compat_sys_newfstatat(unsigned int dfd, char __user * filename,
6659 + asmlinkage long compat_sys_openat(unsigned int dfd, const char __user *filename,
6660 + int flags, int mode);
6661 +
6662 ++extern ssize_t compat_rw_copy_check_uvector(int type,
6663 ++ const struct compat_iovec __user *uvector, unsigned long nr_segs,
6664 ++ unsigned long fast_segs, struct iovec *fast_pointer,
6665 ++ struct iovec **ret_pointer);
6666 + #endif /* CONFIG_COMPAT */
6667 + #endif /* _LINUX_COMPAT_H */
6668 +diff --git a/include/linux/fs.h b/include/linux/fs.h
6669 +index 44f35ae..801b398 100644
6670 +--- a/include/linux/fs.h
6671 ++++ b/include/linux/fs.h
6672 +@@ -2356,7 +2356,7 @@ extern const struct file_operations simple_dir_operations;
6673 + extern const struct inode_operations simple_dir_inode_operations;
6674 + struct tree_descr { char *name; const struct file_operations *ops; int mode; };
6675 + struct dentry *d_alloc_name(struct dentry *, const char *);
6676 +-extern int simple_fill_super(struct super_block *, int, struct tree_descr *);
6677 ++extern int simple_fill_super(struct super_block *, unsigned long, struct tree_descr *);
6678 + extern int simple_pin_fs(struct file_system_type *, struct vfsmount **mount, int *count);
6679 + extern void simple_release_fs(struct vfsmount **mount, int *count);
6680 +
6681 +diff --git a/include/linux/miscdevice.h b/include/linux/miscdevice.h
6682 +index 8b5f7cc..ce9cd11 100644
6683 +--- a/include/linux/miscdevice.h
6684 ++++ b/include/linux/miscdevice.h
6685 +@@ -3,6 +3,12 @@
6686 + #include <linux/module.h>
6687 + #include <linux/major.h>
6688 +
6689 ++/*
6690 ++ * These allocations are managed by device@××××××.org. If you use an
6691 ++ * entry that is not in assigned your entry may well be moved and
6692 ++ * reassigned, or set dynamic if a fixed value is not justified.
6693 ++ */
6694 ++
6695 + #define PSMOUSE_MINOR 1
6696 + #define MS_BUSMOUSE_MINOR 2
6697 + #define ATIXL_BUSMOUSE_MINOR 3
6698 +@@ -30,7 +36,6 @@
6699 + #define HPET_MINOR 228
6700 + #define FUSE_MINOR 229
6701 + #define KVM_MINOR 232
6702 +-#define VHOST_NET_MINOR 233
6703 + #define MISC_DYNAMIC_MINOR 255
6704 +
6705 + struct device;
6706 +diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
6707 +index 9f688d2..c516a33 100644
6708 +--- a/include/linux/pci_ids.h
6709 ++++ b/include/linux/pci_ids.h
6710 +@@ -2321,6 +2321,7 @@
6711 + #define PCI_VENDOR_ID_JMICRON 0x197B
6712 + #define PCI_DEVICE_ID_JMICRON_JMB360 0x2360
6713 + #define PCI_DEVICE_ID_JMICRON_JMB361 0x2361
6714 ++#define PCI_DEVICE_ID_JMICRON_JMB362 0x2362
6715 + #define PCI_DEVICE_ID_JMICRON_JMB363 0x2363
6716 + #define PCI_DEVICE_ID_JMICRON_JMB365 0x2365
6717 + #define PCI_DEVICE_ID_JMICRON_JMB366 0x2366
6718 +diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
6719 +index c8e3754..eea9188 100644
6720 +--- a/include/linux/perf_event.h
6721 ++++ b/include/linux/perf_event.h
6722 +@@ -531,6 +531,7 @@ enum perf_event_active_state {
6723 + struct file;
6724 +
6725 + struct perf_mmap_data {
6726 ++ atomic_t refcount;
6727 + struct rcu_head rcu_head;
6728 + #ifdef CONFIG_PERF_USE_VMALLOC
6729 + struct work_struct work;
6730 +@@ -538,7 +539,6 @@ struct perf_mmap_data {
6731 + int data_order;
6732 + int nr_pages; /* nr of data pages */
6733 + int writable; /* are we writable */
6734 +- int nr_locked; /* nr pages mlocked */
6735 +
6736 + atomic_t poll; /* POLL_ for wakeups */
6737 + atomic_t events; /* event_id limit */
6738 +@@ -582,7 +582,6 @@ struct perf_event {
6739 + int nr_siblings;
6740 + int group_flags;
6741 + struct perf_event *group_leader;
6742 +- struct perf_event *output;
6743 + const struct pmu *pmu;
6744 +
6745 + enum perf_event_active_state state;
6746 +@@ -643,6 +642,8 @@ struct perf_event {
6747 + /* mmap bits */
6748 + struct mutex mmap_mutex;
6749 + atomic_t mmap_count;
6750 ++ int mmap_locked;
6751 ++ struct user_struct *mmap_user;
6752 + struct perf_mmap_data *data;
6753 +
6754 + /* poll related */
6755 +diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
6756 +index 0249d41..c3dc7e1 100644
6757 +--- a/include/linux/slub_def.h
6758 ++++ b/include/linux/slub_def.h
6759 +@@ -75,12 +75,6 @@ struct kmem_cache {
6760 + int offset; /* Free pointer offset. */
6761 + struct kmem_cache_order_objects oo;
6762 +
6763 +- /*
6764 +- * Avoid an extra cache line for UP, SMP and for the node local to
6765 +- * struct kmem_cache.
6766 +- */
6767 +- struct kmem_cache_node local_node;
6768 +-
6769 + /* Allocation and freeing of slabs */
6770 + struct kmem_cache_order_objects max;
6771 + struct kmem_cache_order_objects min;
6772 +@@ -102,6 +96,9 @@ struct kmem_cache {
6773 + */
6774 + int remote_node_defrag_ratio;
6775 + struct kmem_cache_node *node[MAX_NUMNODES];
6776 ++#else
6777 ++ /* Avoid an extra cache line for UP */
6778 ++ struct kmem_cache_node local_node;
6779 + #endif
6780 + };
6781 +
6782 +@@ -132,7 +129,7 @@ struct kmem_cache {
6783 + #ifdef CONFIG_ZONE_DMA
6784 + #define SLUB_DMA __GFP_DMA
6785 + /* Reserve extra caches for potential DMA use */
6786 +-#define KMALLOC_CACHES (2 * SLUB_PAGE_SHIFT - 6)
6787 ++#define KMALLOC_CACHES (2 * SLUB_PAGE_SHIFT)
6788 + #else
6789 + /* Disable DMA functionality */
6790 + #define SLUB_DMA (__force gfp_t)0
6791 +diff --git a/include/linux/swap.h b/include/linux/swap.h
6792 +index 1f59d93..d70f424 100644
6793 +--- a/include/linux/swap.h
6794 ++++ b/include/linux/swap.h
6795 +@@ -223,21 +223,11 @@ static inline void lru_cache_add_anon(struct page *page)
6796 + __lru_cache_add(page, LRU_INACTIVE_ANON);
6797 + }
6798 +
6799 +-static inline void lru_cache_add_active_anon(struct page *page)
6800 +-{
6801 +- __lru_cache_add(page, LRU_ACTIVE_ANON);
6802 +-}
6803 +-
6804 + static inline void lru_cache_add_file(struct page *page)
6805 + {
6806 + __lru_cache_add(page, LRU_INACTIVE_FILE);
6807 + }
6808 +
6809 +-static inline void lru_cache_add_active_file(struct page *page)
6810 +-{
6811 +- __lru_cache_add(page, LRU_ACTIVE_FILE);
6812 +-}
6813 +-
6814 + /* linux/mm/vmscan.c */
6815 + extern unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
6816 + gfp_t gfp_mask, nodemask_t *mask);
6817 +diff --git a/include/linux/tboot.h b/include/linux/tboot.h
6818 +index bf2a0c7..1dba6ee 100644
6819 +--- a/include/linux/tboot.h
6820 ++++ b/include/linux/tboot.h
6821 +@@ -150,6 +150,7 @@ extern int tboot_force_iommu(void);
6822 +
6823 + #else
6824 +
6825 ++#define tboot_enabled() 0
6826 + #define tboot_probe() do { } while (0)
6827 + #define tboot_shutdown(shutdown_type) do { } while (0)
6828 + #define tboot_sleep(sleep_state, pm1a_control, pm1b_control) \
6829 +diff --git a/include/linux/usb.h b/include/linux/usb.h
6830 +index 739f1fd..9983302 100644
6831 +--- a/include/linux/usb.h
6832 ++++ b/include/linux/usb.h
6833 +@@ -965,10 +965,19 @@ extern int usb_disabled(void);
6834 + * needed */
6835 + #define URB_FREE_BUFFER 0x0100 /* Free transfer buffer with the URB */
6836 +
6837 ++/* The following flags are used internally by usbcore and HCDs */
6838 + #define URB_DIR_IN 0x0200 /* Transfer from device to host */
6839 + #define URB_DIR_OUT 0
6840 + #define URB_DIR_MASK URB_DIR_IN
6841 +
6842 ++#define URB_DMA_MAP_SINGLE 0x00010000 /* Non-scatter-gather mapping */
6843 ++#define URB_DMA_MAP_PAGE 0x00020000 /* HCD-unsupported S-G */
6844 ++#define URB_DMA_MAP_SG 0x00040000 /* HCD-supported S-G */
6845 ++#define URB_MAP_LOCAL 0x00080000 /* HCD-local-memory mapping */
6846 ++#define URB_SETUP_MAP_SINGLE 0x00100000 /* Setup packet DMA mapped */
6847 ++#define URB_SETUP_MAP_LOCAL 0x00200000 /* HCD-local setup packet */
6848 ++#define URB_DMA_SG_COMBINED 0x00400000 /* S-G entries were combined */
6849 ++
6850 + struct usb_iso_packet_descriptor {
6851 + unsigned int offset;
6852 + unsigned int length; /* expected length */
6853 +diff --git a/include/trace/events/signal.h b/include/trace/events/signal.h
6854 +index a510b75..32c0697 100644
6855 +--- a/include/trace/events/signal.h
6856 ++++ b/include/trace/events/signal.h
6857 +@@ -10,7 +10,8 @@
6858 +
6859 + #define TP_STORE_SIGINFO(__entry, info) \
6860 + do { \
6861 +- if (info == SEND_SIG_NOINFO) { \
6862 ++ if (info == SEND_SIG_NOINFO || \
6863 ++ info == SEND_SIG_FORCED) { \
6864 + __entry->errno = 0; \
6865 + __entry->code = SI_USER; \
6866 + } else if (info == SEND_SIG_PRIV) { \
6867 +diff --git a/kernel/cgroup.c b/kernel/cgroup.c
6868 +index 6d870f2..bf4f78f 100644
6869 +--- a/kernel/cgroup.c
6870 ++++ b/kernel/cgroup.c
6871 +@@ -4599,7 +4599,7 @@ static int alloc_css_id(struct cgroup_subsys *ss, struct cgroup *parent,
6872 + parent_css = parent->subsys[subsys_id];
6873 + child_css = child->subsys[subsys_id];
6874 + parent_id = parent_css->id;
6875 +- depth = parent_id->depth;
6876 ++ depth = parent_id->depth + 1;
6877 +
6878 + child_id = get_new_cssid(ss, depth);
6879 + if (IS_ERR(child_id))
6880 +diff --git a/kernel/compat.c b/kernel/compat.c
6881 +index 7f40e92..5adab05 100644
6882 +--- a/kernel/compat.c
6883 ++++ b/kernel/compat.c
6884 +@@ -495,29 +495,26 @@ asmlinkage long compat_sys_sched_getaffinity(compat_pid_t pid, unsigned int len,
6885 + {
6886 + int ret;
6887 + cpumask_var_t mask;
6888 +- unsigned long *k;
6889 +- unsigned int min_length = cpumask_size();
6890 +-
6891 +- if (nr_cpu_ids <= BITS_PER_COMPAT_LONG)
6892 +- min_length = sizeof(compat_ulong_t);
6893 +
6894 +- if (len < min_length)
6895 ++ if ((len * BITS_PER_BYTE) < nr_cpu_ids)
6896 ++ return -EINVAL;
6897 ++ if (len & (sizeof(compat_ulong_t)-1))
6898 + return -EINVAL;
6899 +
6900 + if (!alloc_cpumask_var(&mask, GFP_KERNEL))
6901 + return -ENOMEM;
6902 +
6903 + ret = sched_getaffinity(pid, mask);
6904 +- if (ret < 0)
6905 +- goto out;
6906 ++ if (ret == 0) {
6907 ++ size_t retlen = min_t(size_t, len, cpumask_size());
6908 +
6909 +- k = cpumask_bits(mask);
6910 +- ret = compat_put_bitmap(user_mask_ptr, k, min_length * 8);
6911 +- if (ret == 0)
6912 +- ret = min_length;
6913 +-
6914 +-out:
6915 ++ if (compat_put_bitmap(user_mask_ptr, cpumask_bits(mask), retlen * 8))
6916 ++ ret = -EFAULT;
6917 ++ else
6918 ++ ret = retlen;
6919 ++ }
6920 + free_cpumask_var(mask);
6921 ++
6922 + return ret;
6923 + }
6924 +
6925 +diff --git a/kernel/mutex.c b/kernel/mutex.c
6926 +index 632f04c..4c0b7b3 100644
6927 +--- a/kernel/mutex.c
6928 ++++ b/kernel/mutex.c
6929 +@@ -172,6 +172,13 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
6930 + struct thread_info *owner;
6931 +
6932 + /*
6933 ++ * If we own the BKL, then don't spin. The owner of
6934 ++ * the mutex might be waiting on us to release the BKL.
6935 ++ */
6936 ++ if (unlikely(current->lock_depth >= 0))
6937 ++ break;
6938 ++
6939 ++ /*
6940 + * If there's an owner, wait for it to either
6941 + * release the lock or go to sleep.
6942 + */
6943 +diff --git a/kernel/perf_event.c b/kernel/perf_event.c
6944 +index 3d1552d..a244651 100644
6945 +--- a/kernel/perf_event.c
6946 ++++ b/kernel/perf_event.c
6947 +@@ -262,6 +262,18 @@ static void update_event_times(struct perf_event *event)
6948 + event->total_time_running = run_end - event->tstamp_running;
6949 + }
6950 +
6951 ++/*
6952 ++ * Update total_time_enabled and total_time_running for all events in a group.
6953 ++ */
6954 ++static void update_group_times(struct perf_event *leader)
6955 ++{
6956 ++ struct perf_event *event;
6957 ++
6958 ++ update_event_times(leader);
6959 ++ list_for_each_entry(event, &leader->sibling_list, group_entry)
6960 ++ update_event_times(event);
6961 ++}
6962 ++
6963 + static struct list_head *
6964 + ctx_group_list(struct perf_event *event, struct perf_event_context *ctx)
6965 + {
6966 +@@ -315,8 +327,6 @@ list_add_event(struct perf_event *event, struct perf_event_context *ctx)
6967 + static void
6968 + list_del_event(struct perf_event *event, struct perf_event_context *ctx)
6969 + {
6970 +- struct perf_event *sibling, *tmp;
6971 +-
6972 + if (list_empty(&event->group_entry))
6973 + return;
6974 + ctx->nr_events--;
6975 +@@ -329,7 +339,7 @@ list_del_event(struct perf_event *event, struct perf_event_context *ctx)
6976 + if (event->group_leader != event)
6977 + event->group_leader->nr_siblings--;
6978 +
6979 +- update_event_times(event);
6980 ++ update_group_times(event);
6981 +
6982 + /*
6983 + * If event was in error state, then keep it
6984 +@@ -340,6 +350,12 @@ list_del_event(struct perf_event *event, struct perf_event_context *ctx)
6985 + */
6986 + if (event->state > PERF_EVENT_STATE_OFF)
6987 + event->state = PERF_EVENT_STATE_OFF;
6988 ++}
6989 ++
6990 ++static void
6991 ++perf_destroy_group(struct perf_event *event, struct perf_event_context *ctx)
6992 ++{
6993 ++ struct perf_event *sibling, *tmp;
6994 +
6995 + /*
6996 + * If this was a group event with sibling events then
6997 +@@ -505,18 +521,6 @@ retry:
6998 + }
6999 +
7000 + /*
7001 +- * Update total_time_enabled and total_time_running for all events in a group.
7002 +- */
7003 +-static void update_group_times(struct perf_event *leader)
7004 +-{
7005 +- struct perf_event *event;
7006 +-
7007 +- update_event_times(leader);
7008 +- list_for_each_entry(event, &leader->sibling_list, group_entry)
7009 +- update_event_times(event);
7010 +-}
7011 +-
7012 +-/*
7013 + * Cross CPU call to disable a performance event
7014 + */
7015 + static void __perf_event_disable(void *info)
7016 +@@ -1452,6 +1456,9 @@ do { \
7017 + divisor = nsec * frequency;
7018 + }
7019 +
7020 ++ if (!divisor)
7021 ++ return dividend;
7022 ++
7023 + return div64_u64(dividend, divisor);
7024 + }
7025 +
7026 +@@ -1474,7 +1481,7 @@ static int perf_event_start(struct perf_event *event)
7027 + static void perf_adjust_period(struct perf_event *event, u64 nsec, u64 count)
7028 + {
7029 + struct hw_perf_event *hwc = &event->hw;
7030 +- u64 period, sample_period;
7031 ++ s64 period, sample_period;
7032 + s64 delta;
7033 +
7034 + period = perf_calculate_period(event, nsec, count);
7035 +@@ -1825,6 +1832,7 @@ static void free_event_rcu(struct rcu_head *head)
7036 + }
7037 +
7038 + static void perf_pending_sync(struct perf_event *event);
7039 ++static void perf_mmap_data_put(struct perf_mmap_data *data);
7040 +
7041 + static void free_event(struct perf_event *event)
7042 + {
7043 +@@ -1840,9 +1848,9 @@ static void free_event(struct perf_event *event)
7044 + atomic_dec(&nr_task_events);
7045 + }
7046 +
7047 +- if (event->output) {
7048 +- fput(event->output->filp);
7049 +- event->output = NULL;
7050 ++ if (event->data) {
7051 ++ perf_mmap_data_put(event->data);
7052 ++ event->data = NULL;
7053 + }
7054 +
7055 + if (event->destroy)
7056 +@@ -1856,9 +1864,18 @@ int perf_event_release_kernel(struct perf_event *event)
7057 + {
7058 + struct perf_event_context *ctx = event->ctx;
7059 +
7060 ++ /*
7061 ++ * Remove from the PMU, can't get re-enabled since we got
7062 ++ * here because the last ref went.
7063 ++ */
7064 ++ perf_event_disable(event);
7065 ++
7066 + WARN_ON_ONCE(ctx->parent_ctx);
7067 + mutex_lock(&ctx->mutex);
7068 +- perf_event_remove_from_context(event);
7069 ++ raw_spin_lock_irq(&ctx->lock);
7070 ++ list_del_event(event, ctx);
7071 ++ perf_destroy_group(event, ctx);
7072 ++ raw_spin_unlock_irq(&ctx->lock);
7073 + mutex_unlock(&ctx->mutex);
7074 +
7075 + mutex_lock(&event->owner->perf_event_mutex);
7076 +@@ -2138,7 +2155,27 @@ unlock:
7077 + return ret;
7078 + }
7079 +
7080 +-static int perf_event_set_output(struct perf_event *event, int output_fd);
7081 ++static const struct file_operations perf_fops;
7082 ++
7083 ++static struct perf_event *perf_fget_light(int fd, int *fput_needed)
7084 ++{
7085 ++ struct file *file;
7086 ++
7087 ++ file = fget_light(fd, fput_needed);
7088 ++ if (!file)
7089 ++ return ERR_PTR(-EBADF);
7090 ++
7091 ++ if (file->f_op != &perf_fops) {
7092 ++ fput_light(file, *fput_needed);
7093 ++ *fput_needed = 0;
7094 ++ return ERR_PTR(-EBADF);
7095 ++ }
7096 ++
7097 ++ return file->private_data;
7098 ++}
7099 ++
7100 ++static int perf_event_set_output(struct perf_event *event,
7101 ++ struct perf_event *output_event);
7102 + static int perf_event_set_filter(struct perf_event *event, void __user *arg);
7103 +
7104 + static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
7105 +@@ -2165,7 +2202,23 @@ static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
7106 + return perf_event_period(event, (u64 __user *)arg);
7107 +
7108 + case PERF_EVENT_IOC_SET_OUTPUT:
7109 +- return perf_event_set_output(event, arg);
7110 ++ {
7111 ++ struct perf_event *output_event = NULL;
7112 ++ int fput_needed = 0;
7113 ++ int ret;
7114 ++
7115 ++ if (arg != -1) {
7116 ++ output_event = perf_fget_light(arg, &fput_needed);
7117 ++ if (IS_ERR(output_event))
7118 ++ return PTR_ERR(output_event);
7119 ++ }
7120 ++
7121 ++ ret = perf_event_set_output(event, output_event);
7122 ++ if (output_event)
7123 ++ fput_light(output_event->filp, fput_needed);
7124 ++
7125 ++ return ret;
7126 ++ }
7127 +
7128 + case PERF_EVENT_IOC_SET_FILTER:
7129 + return perf_event_set_filter(event, (void __user *)arg);
7130 +@@ -2290,8 +2343,6 @@ perf_mmap_data_alloc(struct perf_event *event, int nr_pages)
7131 + unsigned long size;
7132 + int i;
7133 +
7134 +- WARN_ON(atomic_read(&event->mmap_count));
7135 +-
7136 + size = sizeof(struct perf_mmap_data);
7137 + size += nr_pages * sizeof(void *);
7138 +
7139 +@@ -2398,8 +2449,6 @@ perf_mmap_data_alloc(struct perf_event *event, int nr_pages)
7140 + unsigned long size;
7141 + void *all_buf;
7142 +
7143 +- WARN_ON(atomic_read(&event->mmap_count));
7144 +-
7145 + size = sizeof(struct perf_mmap_data);
7146 + size += sizeof(void *);
7147 +
7148 +@@ -2479,7 +2528,7 @@ perf_mmap_data_init(struct perf_event *event, struct perf_mmap_data *data)
7149 + if (!data->watermark)
7150 + data->watermark = max_size / 2;
7151 +
7152 +-
7153 ++ atomic_set(&data->refcount, 1);
7154 + rcu_assign_pointer(event->data, data);
7155 + }
7156 +
7157 +@@ -2491,13 +2540,26 @@ static void perf_mmap_data_free_rcu(struct rcu_head *rcu_head)
7158 + perf_mmap_data_free(data);
7159 + }
7160 +
7161 +-static void perf_mmap_data_release(struct perf_event *event)
7162 ++static struct perf_mmap_data *perf_mmap_data_get(struct perf_event *event)
7163 + {
7164 +- struct perf_mmap_data *data = event->data;
7165 ++ struct perf_mmap_data *data;
7166 ++
7167 ++ rcu_read_lock();
7168 ++ data = rcu_dereference(event->data);
7169 ++ if (data) {
7170 ++ if (!atomic_inc_not_zero(&data->refcount))
7171 ++ data = NULL;
7172 ++ }
7173 ++ rcu_read_unlock();
7174 +
7175 +- WARN_ON(atomic_read(&event->mmap_count));
7176 ++ return data;
7177 ++}
7178 ++
7179 ++static void perf_mmap_data_put(struct perf_mmap_data *data)
7180 ++{
7181 ++ if (!atomic_dec_and_test(&data->refcount))
7182 ++ return;
7183 +
7184 +- rcu_assign_pointer(event->data, NULL);
7185 + call_rcu(&data->rcu_head, perf_mmap_data_free_rcu);
7186 + }
7187 +
7188 +@@ -2512,15 +2574,18 @@ static void perf_mmap_close(struct vm_area_struct *vma)
7189 + {
7190 + struct perf_event *event = vma->vm_file->private_data;
7191 +
7192 +- WARN_ON_ONCE(event->ctx->parent_ctx);
7193 + if (atomic_dec_and_mutex_lock(&event->mmap_count, &event->mmap_mutex)) {
7194 + unsigned long size = perf_data_size(event->data);
7195 +- struct user_struct *user = current_user();
7196 ++ struct user_struct *user = event->mmap_user;
7197 ++ struct perf_mmap_data *data = event->data;
7198 +
7199 + atomic_long_sub((size >> PAGE_SHIFT) + 1, &user->locked_vm);
7200 +- vma->vm_mm->locked_vm -= event->data->nr_locked;
7201 +- perf_mmap_data_release(event);
7202 ++ vma->vm_mm->locked_vm -= event->mmap_locked;
7203 ++ rcu_assign_pointer(event->data, NULL);
7204 + mutex_unlock(&event->mmap_mutex);
7205 ++
7206 ++ perf_mmap_data_put(data);
7207 ++ free_uid(user);
7208 + }
7209 + }
7210 +
7211 +@@ -2564,13 +2629,10 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma)
7212 +
7213 + WARN_ON_ONCE(event->ctx->parent_ctx);
7214 + mutex_lock(&event->mmap_mutex);
7215 +- if (event->output) {
7216 +- ret = -EINVAL;
7217 +- goto unlock;
7218 +- }
7219 +-
7220 +- if (atomic_inc_not_zero(&event->mmap_count)) {
7221 +- if (nr_pages != event->data->nr_pages)
7222 ++ if (event->data) {
7223 ++ if (event->data->nr_pages == nr_pages)
7224 ++ atomic_inc(&event->data->refcount);
7225 ++ else
7226 + ret = -EINVAL;
7227 + goto unlock;
7228 + }
7229 +@@ -2602,21 +2664,23 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma)
7230 + WARN_ON(event->data);
7231 +
7232 + data = perf_mmap_data_alloc(event, nr_pages);
7233 +- ret = -ENOMEM;
7234 +- if (!data)
7235 ++ if (!data) {
7236 ++ ret = -ENOMEM;
7237 + goto unlock;
7238 ++ }
7239 +
7240 +- ret = 0;
7241 + perf_mmap_data_init(event, data);
7242 +-
7243 +- atomic_set(&event->mmap_count, 1);
7244 +- atomic_long_add(user_extra, &user->locked_vm);
7245 +- vma->vm_mm->locked_vm += extra;
7246 +- event->data->nr_locked = extra;
7247 + if (vma->vm_flags & VM_WRITE)
7248 + event->data->writable = 1;
7249 +
7250 ++ atomic_long_add(user_extra, &user->locked_vm);
7251 ++ event->mmap_locked = extra;
7252 ++ event->mmap_user = get_current_user();
7253 ++ vma->vm_mm->locked_vm += event->mmap_locked;
7254 ++
7255 + unlock:
7256 ++ if (!ret)
7257 ++ atomic_inc(&event->mmap_count);
7258 + mutex_unlock(&event->mmap_mutex);
7259 +
7260 + vma->vm_flags |= VM_RESERVED;
7261 +@@ -2946,7 +3010,6 @@ int perf_output_begin(struct perf_output_handle *handle,
7262 + struct perf_event *event, unsigned int size,
7263 + int nmi, int sample)
7264 + {
7265 +- struct perf_event *output_event;
7266 + struct perf_mmap_data *data;
7267 + unsigned long tail, offset, head;
7268 + int have_lost;
7269 +@@ -2963,10 +3026,6 @@ int perf_output_begin(struct perf_output_handle *handle,
7270 + if (event->parent)
7271 + event = event->parent;
7272 +
7273 +- output_event = rcu_dereference(event->output);
7274 +- if (output_event)
7275 +- event = output_event;
7276 +-
7277 + data = rcu_dereference(event->data);
7278 + if (!data)
7279 + goto out;
7280 +@@ -4730,54 +4789,41 @@ err_size:
7281 + goto out;
7282 + }
7283 +
7284 +-static int perf_event_set_output(struct perf_event *event, int output_fd)
7285 ++static int
7286 ++perf_event_set_output(struct perf_event *event, struct perf_event *output_event)
7287 + {
7288 +- struct perf_event *output_event = NULL;
7289 +- struct file *output_file = NULL;
7290 +- struct perf_event *old_output;
7291 +- int fput_needed = 0;
7292 ++ struct perf_mmap_data *data = NULL, *old_data = NULL;
7293 + int ret = -EINVAL;
7294 +
7295 +- if (!output_fd)
7296 ++ if (!output_event)
7297 + goto set;
7298 +
7299 +- output_file = fget_light(output_fd, &fput_needed);
7300 +- if (!output_file)
7301 +- return -EBADF;
7302 +-
7303 +- if (output_file->f_op != &perf_fops)
7304 ++ /* don't allow circular references */
7305 ++ if (event == output_event)
7306 + goto out;
7307 +
7308 +- output_event = output_file->private_data;
7309 +-
7310 +- /* Don't chain output fds */
7311 +- if (output_event->output)
7312 +- goto out;
7313 +-
7314 +- /* Don't set an output fd when we already have an output channel */
7315 +- if (event->data)
7316 +- goto out;
7317 +-
7318 +- atomic_long_inc(&output_file->f_count);
7319 +-
7320 + set:
7321 + mutex_lock(&event->mmap_mutex);
7322 +- old_output = event->output;
7323 +- rcu_assign_pointer(event->output, output_event);
7324 +- mutex_unlock(&event->mmap_mutex);
7325 ++ /* Can't redirect output if we've got an active mmap() */
7326 ++ if (atomic_read(&event->mmap_count))
7327 ++ goto unlock;
7328 +
7329 +- if (old_output) {
7330 +- /*
7331 +- * we need to make sure no existing perf_output_*()
7332 +- * is still referencing this event.
7333 +- */
7334 +- synchronize_rcu();
7335 +- fput(old_output->filp);
7336 ++ if (output_event) {
7337 ++ /* get the buffer we want to redirect to */
7338 ++ data = perf_mmap_data_get(output_event);
7339 ++ if (!data)
7340 ++ goto unlock;
7341 + }
7342 +
7343 ++ old_data = event->data;
7344 ++ rcu_assign_pointer(event->data, data);
7345 + ret = 0;
7346 ++unlock:
7347 ++ mutex_unlock(&event->mmap_mutex);
7348 ++
7349 ++ if (old_data)
7350 ++ perf_mmap_data_put(old_data);
7351 + out:
7352 +- fput_light(output_file, fput_needed);
7353 + return ret;
7354 + }
7355 +
7356 +@@ -4793,13 +4839,13 @@ SYSCALL_DEFINE5(perf_event_open,
7357 + struct perf_event_attr __user *, attr_uptr,
7358 + pid_t, pid, int, cpu, int, group_fd, unsigned long, flags)
7359 + {
7360 +- struct perf_event *event, *group_leader;
7361 ++ struct perf_event *event, *group_leader = NULL, *output_event = NULL;
7362 + struct perf_event_attr attr;
7363 + struct perf_event_context *ctx;
7364 + struct file *event_file = NULL;
7365 + struct file *group_file = NULL;
7366 ++ int event_fd;
7367 + int fput_needed = 0;
7368 +- int fput_needed2 = 0;
7369 + int err;
7370 +
7371 + /* for future expandability... */
7372 +@@ -4820,26 +4866,38 @@ SYSCALL_DEFINE5(perf_event_open,
7373 + return -EINVAL;
7374 + }
7375 +
7376 ++ event_fd = get_unused_fd_flags(O_RDWR);
7377 ++ if (event_fd < 0)
7378 ++ return event_fd;
7379 ++
7380 ++ if (group_fd != -1) {
7381 ++ group_leader = perf_fget_light(group_fd, &fput_needed);
7382 ++ if (IS_ERR(group_leader)) {
7383 ++ err = PTR_ERR(group_leader);
7384 ++ goto err_put_context;
7385 ++ }
7386 ++ group_file = group_leader->filp;
7387 ++ if (flags & PERF_FLAG_FD_OUTPUT)
7388 ++ output_event = group_leader;
7389 ++ if (flags & PERF_FLAG_FD_NO_GROUP)
7390 ++ group_leader = NULL;
7391 ++ }
7392 ++
7393 + /*
7394 + * Get the target context (task or percpu):
7395 + */
7396 + ctx = find_get_context(pid, cpu);
7397 +- if (IS_ERR(ctx))
7398 +- return PTR_ERR(ctx);
7399 ++ if (IS_ERR(ctx)) {
7400 ++ err = PTR_ERR(ctx);
7401 ++ goto err_fd;
7402 ++ }
7403 +
7404 + /*
7405 + * Look up the group leader (we will attach this event to it):
7406 + */
7407 +- group_leader = NULL;
7408 +- if (group_fd != -1 && !(flags & PERF_FLAG_FD_NO_GROUP)) {
7409 ++ if (group_leader) {
7410 + err = -EINVAL;
7411 +- group_file = fget_light(group_fd, &fput_needed);
7412 +- if (!group_file)
7413 +- goto err_put_context;
7414 +- if (group_file->f_op != &perf_fops)
7415 +- goto err_put_context;
7416 +
7417 +- group_leader = group_file->private_data;
7418 + /*
7419 + * Do not allow a recursive hierarchy (this new sibling
7420 + * becoming part of another group-sibling):
7421 +@@ -4861,22 +4919,21 @@ SYSCALL_DEFINE5(perf_event_open,
7422 +
7423 + event = perf_event_alloc(&attr, cpu, ctx, group_leader,
7424 + NULL, NULL, GFP_KERNEL);
7425 +- err = PTR_ERR(event);
7426 +- if (IS_ERR(event))
7427 ++ if (IS_ERR(event)) {
7428 ++ err = PTR_ERR(event);
7429 + goto err_put_context;
7430 ++ }
7431 +
7432 +- err = anon_inode_getfd("[perf_event]", &perf_fops, event, O_RDWR);
7433 +- if (err < 0)
7434 +- goto err_free_put_context;
7435 ++ if (output_event) {
7436 ++ err = perf_event_set_output(event, output_event);
7437 ++ if (err)
7438 ++ goto err_free_put_context;
7439 ++ }
7440 +
7441 +- event_file = fget_light(err, &fput_needed2);
7442 +- if (!event_file)
7443 ++ event_file = anon_inode_getfile("[perf_event]", &perf_fops, event, O_RDWR);
7444 ++ if (IS_ERR(event_file)) {
7445 ++ err = PTR_ERR(event_file);
7446 + goto err_free_put_context;
7447 +-
7448 +- if (flags & PERF_FLAG_FD_OUTPUT) {
7449 +- err = perf_event_set_output(event, group_fd);
7450 +- if (err)
7451 +- goto err_fput_free_put_context;
7452 + }
7453 +
7454 + event->filp = event_file;
7455 +@@ -4892,19 +4949,17 @@ SYSCALL_DEFINE5(perf_event_open,
7456 + list_add_tail(&event->owner_entry, &current->perf_event_list);
7457 + mutex_unlock(&current->perf_event_mutex);
7458 +
7459 +-err_fput_free_put_context:
7460 +- fput_light(event_file, fput_needed2);
7461 ++ fput_light(group_file, fput_needed);
7462 ++ fd_install(event_fd, event_file);
7463 ++ return event_fd;
7464 +
7465 + err_free_put_context:
7466 +- if (err < 0)
7467 +- free_event(event);
7468 +-
7469 ++ free_event(event);
7470 + err_put_context:
7471 +- if (err < 0)
7472 +- put_ctx(ctx);
7473 +-
7474 + fput_light(group_file, fput_needed);
7475 +-
7476 ++ put_ctx(ctx);
7477 ++err_fd:
7478 ++ put_unused_fd(event_fd);
7479 + return err;
7480 + }
7481 +
7482 +diff --git a/kernel/posix-timers.c b/kernel/posix-timers.c
7483 +index 00d1fda..ad72342 100644
7484 +--- a/kernel/posix-timers.c
7485 ++++ b/kernel/posix-timers.c
7486 +@@ -559,14 +559,7 @@ SYSCALL_DEFINE3(timer_create, const clockid_t, which_clock,
7487 + new_timer->it_id = (timer_t) new_timer_id;
7488 + new_timer->it_clock = which_clock;
7489 + new_timer->it_overrun = -1;
7490 +- error = CLOCK_DISPATCH(which_clock, timer_create, (new_timer));
7491 +- if (error)
7492 +- goto out;
7493 +
7494 +- /*
7495 +- * return the timer_id now. The next step is hard to
7496 +- * back out if there is an error.
7497 +- */
7498 + if (copy_to_user(created_timer_id,
7499 + &new_timer_id, sizeof (new_timer_id))) {
7500 + error = -EFAULT;
7501 +@@ -597,6 +590,10 @@ SYSCALL_DEFINE3(timer_create, const clockid_t, which_clock,
7502 + new_timer->sigq->info.si_tid = new_timer->it_id;
7503 + new_timer->sigq->info.si_code = SI_TIMER;
7504 +
7505 ++ error = CLOCK_DISPATCH(which_clock, timer_create, (new_timer));
7506 ++ if (error)
7507 ++ goto out;
7508 ++
7509 + spin_lock_irq(&current->sighand->siglock);
7510 + new_timer->it_signal = current->signal;
7511 + list_add(&new_timer->list, &current->signal->posix_timers);
7512 +diff --git a/kernel/signal.c b/kernel/signal.c
7513 +index dbd7fe0..48f2130 100644
7514 +--- a/kernel/signal.c
7515 ++++ b/kernel/signal.c
7516 +@@ -642,7 +642,7 @@ static inline bool si_fromuser(const struct siginfo *info)
7517 + static int check_kill_permission(int sig, struct siginfo *info,
7518 + struct task_struct *t)
7519 + {
7520 +- const struct cred *cred = current_cred(), *tcred;
7521 ++ const struct cred *cred, *tcred;
7522 + struct pid *sid;
7523 + int error;
7524 +
7525 +@@ -656,8 +656,10 @@ static int check_kill_permission(int sig, struct siginfo *info,
7526 + if (error)
7527 + return error;
7528 +
7529 ++ cred = current_cred();
7530 + tcred = __task_cred(t);
7531 +- if ((cred->euid ^ tcred->suid) &&
7532 ++ if (!same_thread_group(current, t) &&
7533 ++ (cred->euid ^ tcred->suid) &&
7534 + (cred->euid ^ tcred->uid) &&
7535 + (cred->uid ^ tcred->suid) &&
7536 + (cred->uid ^ tcred->uid) &&
7537 +diff --git a/lib/idr.c b/lib/idr.c
7538 +index 2eb1dca..0d74f6b 100644
7539 +--- a/lib/idr.c
7540 ++++ b/lib/idr.c
7541 +@@ -445,6 +445,7 @@ EXPORT_SYMBOL(idr_remove);
7542 + void idr_remove_all(struct idr *idp)
7543 + {
7544 + int n, id, max;
7545 ++ int bt_mask;
7546 + struct idr_layer *p;
7547 + struct idr_layer *pa[MAX_LEVEL];
7548 + struct idr_layer **paa = &pa[0];
7549 +@@ -462,8 +463,10 @@ void idr_remove_all(struct idr *idp)
7550 + p = p->ary[(id >> n) & IDR_MASK];
7551 + }
7552 +
7553 ++ bt_mask = id;
7554 + id += 1 << n;
7555 +- while (n < fls(id)) {
7556 ++ /* Get the highest bit that the above add changed from 0->1. */
7557 ++ while (n < fls(id ^ bt_mask)) {
7558 + if (p)
7559 + free_layer(p);
7560 + n += IDR_BITS;
7561 +diff --git a/mm/filemap.c b/mm/filemap.c
7562 +index 140ebda..3760bdc 100644
7563 +--- a/mm/filemap.c
7564 ++++ b/mm/filemap.c
7565 +@@ -441,7 +441,7 @@ int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
7566 + /*
7567 + * Splice_read and readahead add shmem/tmpfs pages into the page cache
7568 + * before shmem_readpage has a chance to mark them as SwapBacked: they
7569 +- * need to go on the active_anon lru below, and mem_cgroup_cache_charge
7570 ++ * need to go on the anon lru below, and mem_cgroup_cache_charge
7571 + * (called in add_to_page_cache) needs to know where they're going too.
7572 + */
7573 + if (mapping_cap_swap_backed(mapping))
7574 +@@ -452,7 +452,7 @@ int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
7575 + if (page_is_file_cache(page))
7576 + lru_cache_add_file(page);
7577 + else
7578 +- lru_cache_add_active_anon(page);
7579 ++ lru_cache_add_anon(page);
7580 + }
7581 + return ret;
7582 + }
7583 +@@ -1099,6 +1099,12 @@ page_not_up_to_date_locked:
7584 + }
7585 +
7586 + readpage:
7587 ++ /*
7588 ++ * A previous I/O error may have been due to temporary
7589 ++ * failures, eg. multipath errors.
7590 ++ * PG_error will be set again if readpage fails.
7591 ++ */
7592 ++ ClearPageError(page);
7593 + /* Start the actual read. The read will unlock the page. */
7594 + error = mapping->a_ops->readpage(filp, page);
7595 +
7596 +diff --git a/mm/slub.c b/mm/slub.c
7597 +index d2a54fe..6cf6be7 100644
7598 +--- a/mm/slub.c
7599 ++++ b/mm/slub.c
7600 +@@ -2141,7 +2141,7 @@ static void free_kmem_cache_nodes(struct kmem_cache *s)
7601 +
7602 + for_each_node_state(node, N_NORMAL_MEMORY) {
7603 + struct kmem_cache_node *n = s->node[node];
7604 +- if (n && n != &s->local_node)
7605 ++ if (n)
7606 + kmem_cache_free(kmalloc_caches, n);
7607 + s->node[node] = NULL;
7608 + }
7609 +@@ -2150,33 +2150,22 @@ static void free_kmem_cache_nodes(struct kmem_cache *s)
7610 + static int init_kmem_cache_nodes(struct kmem_cache *s, gfp_t gfpflags)
7611 + {
7612 + int node;
7613 +- int local_node;
7614 +-
7615 +- if (slab_state >= UP && (s < kmalloc_caches ||
7616 +- s >= kmalloc_caches + KMALLOC_CACHES))
7617 +- local_node = page_to_nid(virt_to_page(s));
7618 +- else
7619 +- local_node = 0;
7620 +
7621 + for_each_node_state(node, N_NORMAL_MEMORY) {
7622 + struct kmem_cache_node *n;
7623 +
7624 +- if (local_node == node)
7625 +- n = &s->local_node;
7626 +- else {
7627 +- if (slab_state == DOWN) {
7628 +- early_kmem_cache_node_alloc(gfpflags, node);
7629 +- continue;
7630 +- }
7631 +- n = kmem_cache_alloc_node(kmalloc_caches,
7632 +- gfpflags, node);
7633 +-
7634 +- if (!n) {
7635 +- free_kmem_cache_nodes(s);
7636 +- return 0;
7637 +- }
7638 ++ if (slab_state == DOWN) {
7639 ++ early_kmem_cache_node_alloc(gfpflags, node);
7640 ++ continue;
7641 ++ }
7642 ++ n = kmem_cache_alloc_node(kmalloc_caches,
7643 ++ gfpflags, node);
7644 +
7645 ++ if (!n) {
7646 ++ free_kmem_cache_nodes(s);
7647 ++ return 0;
7648 + }
7649 ++
7650 + s->node[node] = n;
7651 + init_kmem_cache_node(n, s);
7652 + }
7653 +diff --git a/net/mac80211/Kconfig b/net/mac80211/Kconfig
7654 +index a952b7f..334c359 100644
7655 +--- a/net/mac80211/Kconfig
7656 ++++ b/net/mac80211/Kconfig
7657 +@@ -15,8 +15,12 @@ comment "CFG80211 needs to be enabled for MAC80211"
7658 +
7659 + if MAC80211 != n
7660 +
7661 ++config MAC80211_HAS_RC
7662 ++ def_bool n
7663 ++
7664 + config MAC80211_RC_PID
7665 + bool "PID controller based rate control algorithm" if EMBEDDED
7666 ++ select MAC80211_HAS_RC
7667 + ---help---
7668 + This option enables a TX rate control algorithm for
7669 + mac80211 that uses a PID controller to select the TX
7670 +@@ -24,12 +28,14 @@ config MAC80211_RC_PID
7671 +
7672 + config MAC80211_RC_MINSTREL
7673 + bool "Minstrel" if EMBEDDED
7674 ++ select MAC80211_HAS_RC
7675 + default y
7676 + ---help---
7677 + This option enables the 'minstrel' TX rate control algorithm
7678 +
7679 + choice
7680 + prompt "Default rate control algorithm"
7681 ++ depends on MAC80211_HAS_RC
7682 + default MAC80211_RC_DEFAULT_MINSTREL
7683 + ---help---
7684 + This option selects the default rate control algorithm
7685 +@@ -62,6 +68,9 @@ config MAC80211_RC_DEFAULT
7686 +
7687 + endif
7688 +
7689 ++comment "Some wireless drivers require a rate control algorithm"
7690 ++ depends on MAC80211_HAS_RC=n
7691 ++
7692 + config MAC80211_MESH
7693 + bool "Enable mac80211 mesh networking (pre-802.11s) support"
7694 + depends on MAC80211 && EXPERIMENTAL
7695 +diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
7696 +index edc872e..0d1811b 100644
7697 +--- a/net/mac80211/cfg.c
7698 ++++ b/net/mac80211/cfg.c
7699 +@@ -97,9 +97,6 @@ static int ieee80211_change_iface(struct wiphy *wiphy,
7700 + params->mesh_id_len,
7701 + params->mesh_id);
7702 +
7703 +- if (sdata->vif.type != NL80211_IFTYPE_MONITOR || !flags)
7704 +- return 0;
7705 +-
7706 + if (type == NL80211_IFTYPE_AP_VLAN &&
7707 + params && params->use_4addr == 0)
7708 + rcu_assign_pointer(sdata->u.vlan.sta, NULL);
7709 +@@ -107,7 +104,9 @@ static int ieee80211_change_iface(struct wiphy *wiphy,
7710 + params && params->use_4addr >= 0)
7711 + sdata->u.mgd.use_4addr = params->use_4addr;
7712 +
7713 +- sdata->u.mntr_flags = *flags;
7714 ++ if (sdata->vif.type == NL80211_IFTYPE_MONITOR && flags)
7715 ++ sdata->u.mntr_flags = *flags;
7716 ++
7717 + return 0;
7718 + }
7719 +
7720 +diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
7721 +index 875c8de..1349a09 100644
7722 +--- a/net/mac80211/mlme.c
7723 ++++ b/net/mac80211/mlme.c
7724 +@@ -1530,9 +1530,45 @@ static void ieee80211_sta_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata,
7725 + mutex_unlock(&ifmgd->mtx);
7726 +
7727 + if (skb->len >= 24 + 2 /* mgmt + deauth reason */ &&
7728 +- (fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_DEAUTH)
7729 +- cfg80211_send_deauth(sdata->dev, (u8 *)mgmt, skb->len);
7730 ++ (fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_DEAUTH) {
7731 ++ struct ieee80211_local *local = sdata->local;
7732 ++ struct ieee80211_work *wk;
7733 ++
7734 ++ mutex_lock(&local->work_mtx);
7735 ++ list_for_each_entry(wk, &local->work_list, list) {
7736 ++ if (wk->sdata != sdata)
7737 ++ continue;
7738 ++
7739 ++ if (wk->type != IEEE80211_WORK_ASSOC)
7740 ++ continue;
7741 ++
7742 ++ if (memcmp(mgmt->bssid, wk->filter_ta, ETH_ALEN))
7743 ++ continue;
7744 ++ if (memcmp(mgmt->sa, wk->filter_ta, ETH_ALEN))
7745 ++ continue;
7746 +
7747 ++ /*
7748 ++ * Printing the message only here means we can't
7749 ++ * spuriously print it, but it also means that it
7750 ++ * won't be printed when the frame comes in before
7751 ++ * we even tried to associate or in similar cases.
7752 ++ *
7753 ++ * Ultimately, I suspect cfg80211 should print the
7754 ++ * messages instead.
7755 ++ */
7756 ++ printk(KERN_DEBUG
7757 ++ "%s: deauthenticated from %pM (Reason: %u)\n",
7758 ++ sdata->name, mgmt->bssid,
7759 ++ le16_to_cpu(mgmt->u.deauth.reason_code));
7760 ++
7761 ++ list_del_rcu(&wk->list);
7762 ++ free_work(wk);
7763 ++ break;
7764 ++ }
7765 ++ mutex_unlock(&local->work_mtx);
7766 ++
7767 ++ cfg80211_send_deauth(sdata->dev, (u8 *)mgmt, skb->len);
7768 ++ }
7769 + out:
7770 + kfree_skb(skb);
7771 + }
7772 +diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
7773 +index 04ea07f..1946f6b 100644
7774 +--- a/net/mac80211/rx.c
7775 ++++ b/net/mac80211/rx.c
7776 +@@ -1414,7 +1414,8 @@ ieee80211_drop_unencrypted_mgmt(struct ieee80211_rx_data *rx)
7777 + return res;
7778 +
7779 + if (rx->sta && test_sta_flags(rx->sta, WLAN_STA_MFP)) {
7780 +- if (unlikely(ieee80211_is_unicast_robust_mgmt_frame(rx->skb) &&
7781 ++ if (unlikely(!ieee80211_has_protected(fc) &&
7782 ++ ieee80211_is_unicast_robust_mgmt_frame(rx->skb) &&
7783 + rx->key))
7784 + return -EACCES;
7785 + /* BIP does not use Protected field, so need to check MMIE */
7786 +diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
7787 +index cfc473e..d0716b9 100644
7788 +--- a/net/mac80211/tx.c
7789 ++++ b/net/mac80211/tx.c
7790 +@@ -584,7 +584,8 @@ ieee80211_tx_h_rate_ctrl(struct ieee80211_tx_data *tx)
7791 + struct ieee80211_hdr *hdr = (void *)tx->skb->data;
7792 + struct ieee80211_supported_band *sband;
7793 + struct ieee80211_rate *rate;
7794 +- int i, len;
7795 ++ int i;
7796 ++ u32 len;
7797 + bool inval = false, rts = false, short_preamble = false;
7798 + struct ieee80211_tx_rate_control txrc;
7799 + u32 sta_flags;
7800 +@@ -593,7 +594,7 @@ ieee80211_tx_h_rate_ctrl(struct ieee80211_tx_data *tx)
7801 +
7802 + sband = tx->local->hw.wiphy->bands[tx->channel->band];
7803 +
7804 +- len = min_t(int, tx->skb->len + FCS_LEN,
7805 ++ len = min_t(u32, tx->skb->len + FCS_LEN,
7806 + tx->local->hw.wiphy->frag_threshold);
7807 +
7808 + /* set up the tx rate control struct we give the RC algo */
7809 +diff --git a/scripts/kconfig/Makefile b/scripts/kconfig/Makefile
7810 +index 186c466..9842611 100644
7811 +--- a/scripts/kconfig/Makefile
7812 ++++ b/scripts/kconfig/Makefile
7813 +@@ -208,7 +208,7 @@ HOSTCFLAGS_zconf.tab.o := -I$(src)
7814 + HOSTLOADLIBES_qconf = $(KC_QT_LIBS) -ldl
7815 + HOSTCXXFLAGS_qconf.o = $(KC_QT_CFLAGS) -D LKC_DIRECT_LINK
7816 +
7817 +-HOSTLOADLIBES_gconf = `pkg-config --libs gtk+-2.0 gmodule-2.0 libglade-2.0`
7818 ++HOSTLOADLIBES_gconf = `pkg-config --libs gtk+-2.0 gmodule-2.0 libglade-2.0` -ldl
7819 + HOSTCFLAGS_gconf.o = `pkg-config --cflags gtk+-2.0 gmodule-2.0 libglade-2.0` \
7820 + -D LKC_DIRECT_LINK
7821 +
7822 +diff --git a/sound/core/pcm_lib.c b/sound/core/pcm_lib.c
7823 +index a2ff861..e9d98be 100644
7824 +--- a/sound/core/pcm_lib.c
7825 ++++ b/sound/core/pcm_lib.c
7826 +@@ -345,7 +345,9 @@ static int snd_pcm_update_hw_ptr0(struct snd_pcm_substream *substream,
7827 + new_hw_ptr = hw_base + pos;
7828 + }
7829 + __delta:
7830 +- delta = (new_hw_ptr - old_hw_ptr) % runtime->boundary;
7831 ++ delta = new_hw_ptr - old_hw_ptr;
7832 ++ if (delta < 0)
7833 ++ delta += runtime->boundary;
7834 + if (xrun_debug(substream, in_interrupt ?
7835 + XRUN_DEBUG_PERIODUPDATE : XRUN_DEBUG_HWPTRUPDATE)) {
7836 + char name[16];
7837 +@@ -439,8 +441,13 @@ static int snd_pcm_update_hw_ptr0(struct snd_pcm_substream *substream,
7838 + snd_pcm_playback_silence(substream, new_hw_ptr);
7839 +
7840 + if (in_interrupt) {
7841 +- runtime->hw_ptr_interrupt = new_hw_ptr -
7842 +- (new_hw_ptr % runtime->period_size);
7843 ++ delta = new_hw_ptr - runtime->hw_ptr_interrupt;
7844 ++ if (delta < 0)
7845 ++ delta += runtime->boundary;
7846 ++ delta -= (snd_pcm_uframes_t)delta % runtime->period_size;
7847 ++ runtime->hw_ptr_interrupt += delta;
7848 ++ if (runtime->hw_ptr_interrupt >= runtime->boundary)
7849 ++ runtime->hw_ptr_interrupt -= runtime->boundary;
7850 + }
7851 + runtime->hw_ptr_base = hw_base;
7852 + runtime->status->hw_ptr = new_hw_ptr;
7853 +diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
7854 +index 20b5982..d124e95 100644
7855 +--- a/sound/core/pcm_native.c
7856 ++++ b/sound/core/pcm_native.c
7857 +@@ -27,7 +27,6 @@
7858 + #include <linux/pm_qos_params.h>
7859 + #include <linux/uio.h>
7860 + #include <linux/dma-mapping.h>
7861 +-#include <linux/math64.h>
7862 + #include <sound/core.h>
7863 + #include <sound/control.h>
7864 + #include <sound/info.h>
7865 +@@ -370,38 +369,6 @@ static int period_to_usecs(struct snd_pcm_runtime *runtime)
7866 + return usecs;
7867 + }
7868 +
7869 +-static int calc_boundary(struct snd_pcm_runtime *runtime)
7870 +-{
7871 +- u_int64_t boundary;
7872 +-
7873 +- boundary = (u_int64_t)runtime->buffer_size *
7874 +- (u_int64_t)runtime->period_size;
7875 +-#if BITS_PER_LONG < 64
7876 +- /* try to find lowest common multiple for buffer and period */
7877 +- if (boundary > LONG_MAX - runtime->buffer_size) {
7878 +- u_int32_t remainder = -1;
7879 +- u_int32_t divident = runtime->buffer_size;
7880 +- u_int32_t divisor = runtime->period_size;
7881 +- while (remainder) {
7882 +- remainder = divident % divisor;
7883 +- if (remainder) {
7884 +- divident = divisor;
7885 +- divisor = remainder;
7886 +- }
7887 +- }
7888 +- boundary = div_u64(boundary, divisor);
7889 +- if (boundary > LONG_MAX - runtime->buffer_size)
7890 +- return -ERANGE;
7891 +- }
7892 +-#endif
7893 +- if (boundary == 0)
7894 +- return -ERANGE;
7895 +- runtime->boundary = boundary;
7896 +- while (runtime->boundary * 2 <= LONG_MAX - runtime->buffer_size)
7897 +- runtime->boundary *= 2;
7898 +- return 0;
7899 +-}
7900 +-
7901 + static int snd_pcm_hw_params(struct snd_pcm_substream *substream,
7902 + struct snd_pcm_hw_params *params)
7903 + {
7904 +@@ -477,9 +444,9 @@ static int snd_pcm_hw_params(struct snd_pcm_substream *substream,
7905 + runtime->stop_threshold = runtime->buffer_size;
7906 + runtime->silence_threshold = 0;
7907 + runtime->silence_size = 0;
7908 +- err = calc_boundary(runtime);
7909 +- if (err < 0)
7910 +- goto _error;
7911 ++ runtime->boundary = runtime->buffer_size;
7912 ++ while (runtime->boundary * 2 <= LONG_MAX - runtime->buffer_size)
7913 ++ runtime->boundary *= 2;
7914 +
7915 + snd_pcm_timer_resolution_change(substream);
7916 + runtime->status->state = SNDRV_PCM_STATE_SETUP;
7917 +diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
7918 +index cec6815..0bc24bd 100644
7919 +--- a/sound/pci/hda/hda_intel.c
7920 ++++ b/sound/pci/hda/hda_intel.c
7921 +@@ -2263,16 +2263,23 @@ static int azx_dev_free(struct snd_device *device)
7922 + * white/black-listing for position_fix
7923 + */
7924 + static struct snd_pci_quirk position_fix_list[] __devinitdata = {
7925 ++ SND_PCI_QUIRK(0x1025, 0x009f, "Acer Aspire 5110", POS_FIX_LPIB),
7926 + SND_PCI_QUIRK(0x1028, 0x01cc, "Dell D820", POS_FIX_LPIB),
7927 + SND_PCI_QUIRK(0x1028, 0x01de, "Dell Precision 390", POS_FIX_LPIB),
7928 + SND_PCI_QUIRK(0x1028, 0x01f6, "Dell Latitude 131L", POS_FIX_LPIB),
7929 + SND_PCI_QUIRK(0x103c, 0x306d, "HP dv3", POS_FIX_LPIB),
7930 +- SND_PCI_QUIRK(0x1106, 0x3288, "ASUS M2V-MX SE", POS_FIX_LPIB),
7931 + SND_PCI_QUIRK(0x1043, 0x813d, "ASUS P5AD2", POS_FIX_LPIB),
7932 ++ SND_PCI_QUIRK(0x1043, 0x81b3, "ASUS", POS_FIX_LPIB),
7933 ++ SND_PCI_QUIRK(0x1043, 0x81e7, "ASUS M2V", POS_FIX_LPIB),
7934 ++ SND_PCI_QUIRK(0x104d, 0x9069, "Sony VPCS11V9E", POS_FIX_LPIB),
7935 ++ SND_PCI_QUIRK(0x1106, 0x3288, "ASUS M2V-MX SE", POS_FIX_LPIB),
7936 ++ SND_PCI_QUIRK(0x1179, 0xff10, "Toshiba A100-259", POS_FIX_LPIB),
7937 ++ SND_PCI_QUIRK(0x1297, 0x3166, "Shuttle", POS_FIX_LPIB),
7938 + SND_PCI_QUIRK(0x1458, 0xa022, "ga-ma770-ud3", POS_FIX_LPIB),
7939 + SND_PCI_QUIRK(0x1462, 0x1002, "MSI Wind U115", POS_FIX_LPIB),
7940 + SND_PCI_QUIRK(0x1565, 0x820f, "Biostar Microtech", POS_FIX_LPIB),
7941 + SND_PCI_QUIRK(0x1565, 0x8218, "Biostar Microtech", POS_FIX_LPIB),
7942 ++ SND_PCI_QUIRK(0x1849, 0x0888, "775Dual-VSTA", POS_FIX_LPIB),
7943 + SND_PCI_QUIRK(0x8086, 0x2503, "DG965OT AAD63733-203", POS_FIX_LPIB),
7944 + SND_PCI_QUIRK(0x8086, 0xd601, "eMachines T5212", POS_FIX_LPIB),
7945 + {}
7946 +diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
7947 +index 886d8e4..3273765 100644
7948 +--- a/sound/pci/hda/patch_realtek.c
7949 ++++ b/sound/pci/hda/patch_realtek.c
7950 +@@ -9392,6 +9392,7 @@ static struct snd_pci_quirk alc882_ssid_cfg_tbl[] = {
7951 + SND_PCI_QUIRK(0x106b, 0x1000, "iMac 24", ALC885_IMAC24),
7952 + SND_PCI_QUIRK(0x106b, 0x2800, "AppleTV", ALC885_IMAC24),
7953 + SND_PCI_QUIRK(0x106b, 0x2c00, "MacbookPro rev3", ALC885_MBP3),
7954 ++ SND_PCI_QUIRK(0x106b, 0x3000, "iMac", ALC889A_MB31),
7955 + SND_PCI_QUIRK(0x106b, 0x3600, "Macbook 3,1", ALC889A_MB31),
7956 + SND_PCI_QUIRK(0x106b, 0x3800, "MacbookPro 4,1", ALC885_MBP3),
7957 + SND_PCI_QUIRK(0x106b, 0x3e00, "iMac 24 Aluminum", ALC885_IMAC24),
7958 +diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c
7959 +index a0e06d8..f1e7bab 100644
7960 +--- a/sound/pci/hda/patch_sigmatel.c
7961 ++++ b/sound/pci/hda/patch_sigmatel.c
7962 +@@ -2078,12 +2078,12 @@ static struct snd_pci_quirk stac927x_cfg_tbl[] = {
7963 + SND_PCI_QUIRK_MASK(PCI_VENDOR_ID_INTEL, 0xff00, 0x2000,
7964 + "Intel D965", STAC_D965_3ST),
7965 + /* Dell 3 stack systems */
7966 +- SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x01f7, "Dell XPS M1730", STAC_DELL_3ST),
7967 + SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x01dd, "Dell Dimension E520", STAC_DELL_3ST),
7968 + SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x01ed, "Dell ", STAC_DELL_3ST),
7969 + SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x01f4, "Dell ", STAC_DELL_3ST),
7970 + /* Dell 3 stack systems with verb table in BIOS */
7971 + SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x01f3, "Dell Inspiron 1420", STAC_DELL_BIOS),
7972 ++ SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x01f7, "Dell XPS M1730", STAC_DELL_BIOS),
7973 + SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x0227, "Dell Vostro 1400 ", STAC_DELL_BIOS),
7974 + SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x022e, "Dell ", STAC_DELL_BIOS),
7975 + SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x022f, "Dell Inspiron 1525", STAC_DELL_BIOS),
7976 +diff --git a/sound/soc/codecs/wm8350.c b/sound/soc/codecs/wm8350.c
7977 +index 2e0772f..1e22141 100644
7978 +--- a/sound/soc/codecs/wm8350.c
7979 ++++ b/sound/soc/codecs/wm8350.c
7980 +@@ -424,8 +424,8 @@ static const struct soc_enum wm8350_enum[] = {
7981 + SOC_ENUM_SINGLE(WM8350_INPUT_MIXER_VOLUME, 15, 2, wm8350_lr),
7982 + };
7983 +
7984 +-static DECLARE_TLV_DB_LINEAR(pre_amp_tlv, -1200, 3525);
7985 +-static DECLARE_TLV_DB_LINEAR(out_pga_tlv, -5700, 600);
7986 ++static DECLARE_TLV_DB_SCALE(pre_amp_tlv, -1200, 3525, 0);
7987 ++static DECLARE_TLV_DB_SCALE(out_pga_tlv, -5700, 600, 0);
7988 + static DECLARE_TLV_DB_SCALE(dac_pcm_tlv, -7163, 36, 1);
7989 + static DECLARE_TLV_DB_SCALE(adc_pcm_tlv, -12700, 50, 1);
7990 + static DECLARE_TLV_DB_SCALE(out_mix_tlv, -1500, 300, 1);
7991 +diff --git a/sound/soc/codecs/wm8400.c b/sound/soc/codecs/wm8400.c
7992 +index 6acc885..a9fa46c 100644
7993 +--- a/sound/soc/codecs/wm8400.c
7994 ++++ b/sound/soc/codecs/wm8400.c
7995 +@@ -107,21 +107,21 @@ static void wm8400_codec_reset(struct snd_soc_codec *codec)
7996 + wm8400_reset_codec_reg_cache(wm8400->wm8400);
7997 + }
7998 +
7999 +-static const DECLARE_TLV_DB_LINEAR(rec_mix_tlv, -1500, 600);
8000 ++static const DECLARE_TLV_DB_SCALE(rec_mix_tlv, -1500, 600, 0);
8001 +
8002 +-static const DECLARE_TLV_DB_LINEAR(in_pga_tlv, -1650, 3000);
8003 ++static const DECLARE_TLV_DB_SCALE(in_pga_tlv, -1650, 3000, 0);
8004 +
8005 +-static const DECLARE_TLV_DB_LINEAR(out_mix_tlv, -2100, 0);
8006 ++static const DECLARE_TLV_DB_SCALE(out_mix_tlv, -2100, 0, 0);
8007 +
8008 +-static const DECLARE_TLV_DB_LINEAR(out_pga_tlv, -7300, 600);
8009 ++static const DECLARE_TLV_DB_SCALE(out_pga_tlv, -7300, 600, 0);
8010 +
8011 +-static const DECLARE_TLV_DB_LINEAR(out_omix_tlv, -600, 0);
8012 ++static const DECLARE_TLV_DB_SCALE(out_omix_tlv, -600, 0, 0);
8013 +
8014 +-static const DECLARE_TLV_DB_LINEAR(out_dac_tlv, -7163, 0);
8015 ++static const DECLARE_TLV_DB_SCALE(out_dac_tlv, -7163, 0, 0);
8016 +
8017 +-static const DECLARE_TLV_DB_LINEAR(in_adc_tlv, -7163, 1763);
8018 ++static const DECLARE_TLV_DB_SCALE(in_adc_tlv, -7163, 1763, 0);
8019 +
8020 +-static const DECLARE_TLV_DB_LINEAR(out_sidetone_tlv, -3600, 0);
8021 ++static const DECLARE_TLV_DB_SCALE(out_sidetone_tlv, -3600, 0, 0);
8022 +
8023 + static int wm8400_outpga_put_volsw_vu(struct snd_kcontrol *kcontrol,
8024 + struct snd_ctl_elem_value *ucontrol)
8025 +@@ -440,7 +440,7 @@ static int outmixer_event (struct snd_soc_dapm_widget *w,
8026 + /* INMIX dB values */
8027 + static const unsigned int in_mix_tlv[] = {
8028 + TLV_DB_RANGE_HEAD(1),
8029 +- 0,7, TLV_DB_LINEAR_ITEM(-1200, 600),
8030 ++ 0,7, TLV_DB_SCALE_ITEM(-1200, 600, 0),
8031 + };
8032 +
8033 + /* Left In PGA Connections */
8034 +diff --git a/sound/soc/codecs/wm8990.c b/sound/soc/codecs/wm8990.c
8035 +index 831f473..7c6b00e 100644
8036 +--- a/sound/soc/codecs/wm8990.c
8037 ++++ b/sound/soc/codecs/wm8990.c
8038 +@@ -111,21 +111,21 @@ static const u16 wm8990_reg[] = {
8039 +
8040 + #define wm8990_reset(c) snd_soc_write(c, WM8990_RESET, 0)
8041 +
8042 +-static const DECLARE_TLV_DB_LINEAR(rec_mix_tlv, -1500, 600);
8043 ++static const DECLARE_TLV_DB_SCALE(rec_mix_tlv, -1500, 600, 0);
8044 +
8045 +-static const DECLARE_TLV_DB_LINEAR(in_pga_tlv, -1650, 3000);
8046 ++static const DECLARE_TLV_DB_SCALE(in_pga_tlv, -1650, 3000, 0);
8047 +
8048 +-static const DECLARE_TLV_DB_LINEAR(out_mix_tlv, 0, -2100);
8049 ++static const DECLARE_TLV_DB_SCALE(out_mix_tlv, 0, -2100, 0);
8050 +
8051 +-static const DECLARE_TLV_DB_LINEAR(out_pga_tlv, -7300, 600);
8052 ++static const DECLARE_TLV_DB_SCALE(out_pga_tlv, -7300, 600, 0);
8053 +
8054 +-static const DECLARE_TLV_DB_LINEAR(out_omix_tlv, -600, 0);
8055 ++static const DECLARE_TLV_DB_SCALE(out_omix_tlv, -600, 0, 0);
8056 +
8057 +-static const DECLARE_TLV_DB_LINEAR(out_dac_tlv, -7163, 0);
8058 ++static const DECLARE_TLV_DB_SCALE(out_dac_tlv, -7163, 0, 0);
8059 +
8060 +-static const DECLARE_TLV_DB_LINEAR(in_adc_tlv, -7163, 1763);
8061 ++static const DECLARE_TLV_DB_SCALE(in_adc_tlv, -7163, 1763, 0);
8062 +
8063 +-static const DECLARE_TLV_DB_LINEAR(out_sidetone_tlv, -3600, 0);
8064 ++static const DECLARE_TLV_DB_SCALE(out_sidetone_tlv, -3600, 0, 0);
8065 +
8066 + static int wm899x_outpga_put_volsw_vu(struct snd_kcontrol *kcontrol,
8067 + struct snd_ctl_elem_value *ucontrol)
8068 +@@ -451,7 +451,7 @@ static int outmixer_event(struct snd_soc_dapm_widget *w,
8069 + /* INMIX dB values */
8070 + static const unsigned int in_mix_tlv[] = {
8071 + TLV_DB_RANGE_HEAD(1),
8072 +- 0, 7, TLV_DB_LINEAR_ITEM(-1200, 600),
8073 ++ 0, 7, TLV_DB_SCALE_ITEM(-1200, 600, 0),
8074 + };
8075 +
8076 + /* Left In PGA Connections */
8077 +diff --git a/sound/soc/imx/imx-pcm-dma-mx2.c b/sound/soc/imx/imx-pcm-dma-mx2.c
8078 +index 2b31ac6..803aeef 100644
8079 +--- a/sound/soc/imx/imx-pcm-dma-mx2.c
8080 ++++ b/sound/soc/imx/imx-pcm-dma-mx2.c
8081 +@@ -73,7 +73,8 @@ static void snd_imx_dma_err_callback(int channel, void *data, int err)
8082 + {
8083 + struct snd_pcm_substream *substream = data;
8084 + struct snd_soc_pcm_runtime *rtd = substream->private_data;
8085 +- struct imx_pcm_dma_params *dma_params = rtd->dai->cpu_dai->dma_data;
8086 ++ struct imx_pcm_dma_params *dma_params =
8087 ++ snd_soc_dai_get_dma_data(rtd->dai->cpu_dai, substream);
8088 + struct snd_pcm_runtime *runtime = substream->runtime;
8089 + struct imx_pcm_runtime_data *iprtd = runtime->private_data;
8090 + int ret;
8091 +@@ -102,7 +103,7 @@ static int imx_ssi_dma_alloc(struct snd_pcm_substream *substream)
8092 + struct imx_pcm_runtime_data *iprtd = runtime->private_data;
8093 + int ret;
8094 +
8095 +- dma_params = snd_soc_get_dma_data(rtd->dai->cpu_dai, substream);
8096 ++ dma_params = snd_soc_dai_get_dma_data(rtd->dai->cpu_dai, substream);
8097 +
8098 + iprtd->dma = imx_dma_request_by_prio(DRV_NAME, DMA_PRIO_HIGH);
8099 + if (iprtd->dma < 0) {
8100 +@@ -212,7 +213,7 @@ static int snd_imx_pcm_prepare(struct snd_pcm_substream *substream)
8101 + struct imx_pcm_runtime_data *iprtd = runtime->private_data;
8102 + int err;
8103 +
8104 +- dma_params = snd_soc_get_dma_data(rtd->dai->cpu_dai, substream);
8105 ++ dma_params = snd_soc_dai_get_dma_data(rtd->dai->cpu_dai, substream);
8106 +
8107 + iprtd->substream = substream;
8108 + iprtd->buf = (unsigned int *)substream->dma_buffer.area;
8109 +diff --git a/tools/perf/bench/mem-memcpy.c b/tools/perf/bench/mem-memcpy.c
8110 +index 8977317..5fd55cd 100644
8111 +--- a/tools/perf/bench/mem-memcpy.c
8112 ++++ b/tools/perf/bench/mem-memcpy.c
8113 +@@ -24,7 +24,7 @@
8114 +
8115 + static const char *length_str = "1MB";
8116 + static const char *routine = "default";
8117 +-static int use_clock = 0;
8118 ++static bool use_clock = false;
8119 + static int clock_fd;
8120 +
8121 + static const struct option options[] = {
8122 +diff --git a/tools/perf/bench/sched-messaging.c b/tools/perf/bench/sched-messaging.c
8123 +index 81cee78..da1b2e9 100644
8124 +--- a/tools/perf/bench/sched-messaging.c
8125 ++++ b/tools/perf/bench/sched-messaging.c
8126 +@@ -31,9 +31,9 @@
8127 +
8128 + #define DATASIZE 100
8129 +
8130 +-static int use_pipes = 0;
8131 ++static bool use_pipes = false;
8132 + static unsigned int loops = 100;
8133 +-static unsigned int thread_mode = 0;
8134 ++static bool thread_mode = false;
8135 + static unsigned int num_groups = 10;
8136 +
8137 + struct sender_context {
8138 +diff --git a/tools/perf/builtin-annotate.c b/tools/perf/builtin-annotate.c
8139 +index 6ad7148..94a814c 100644
8140 +--- a/tools/perf/builtin-annotate.c
8141 ++++ b/tools/perf/builtin-annotate.c
8142 +@@ -29,11 +29,11 @@
8143 +
8144 + static char const *input_name = "perf.data";
8145 +
8146 +-static int force;
8147 ++static bool force;
8148 +
8149 +-static int full_paths;
8150 ++static bool full_paths;
8151 +
8152 +-static int print_line;
8153 ++static bool print_line;
8154 +
8155 + struct sym_hist {
8156 + u64 sum;
8157 +@@ -584,7 +584,7 @@ static const struct option options[] = {
8158 + OPT_STRING('s', "symbol", &sym_hist_filter, "symbol",
8159 + "symbol to annotate"),
8160 + OPT_BOOLEAN('f', "force", &force, "don't complain, do it"),
8161 +- OPT_BOOLEAN('v', "verbose", &verbose,
8162 ++ OPT_INCR('v', "verbose", &verbose,
8163 + "be more verbose (show symbol address, etc)"),
8164 + OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
8165 + "dump raw trace in ASCII"),
8166 +diff --git a/tools/perf/builtin-buildid-cache.c b/tools/perf/builtin-buildid-cache.c
8167 +index 30a05f5..f8e3d18 100644
8168 +--- a/tools/perf/builtin-buildid-cache.c
8169 ++++ b/tools/perf/builtin-buildid-cache.c
8170 +@@ -27,7 +27,7 @@ static const struct option buildid_cache_options[] = {
8171 + "file list", "file(s) to add"),
8172 + OPT_STRING('r', "remove", &remove_name_list_str, "file list",
8173 + "file(s) to remove"),
8174 +- OPT_BOOLEAN('v', "verbose", &verbose, "be more verbose"),
8175 ++ OPT_INCR('v', "verbose", &verbose, "be more verbose"),
8176 + OPT_END()
8177 + };
8178 +
8179 +diff --git a/tools/perf/builtin-buildid-list.c b/tools/perf/builtin-buildid-list.c
8180 +index d0675c0..af2ad8b 100644
8181 +--- a/tools/perf/builtin-buildid-list.c
8182 ++++ b/tools/perf/builtin-buildid-list.c
8183 +@@ -16,7 +16,7 @@
8184 + #include "util/symbol.h"
8185 +
8186 + static char const *input_name = "perf.data";
8187 +-static int force;
8188 ++static bool force;
8189 + static bool with_hits;
8190 +
8191 + static const char * const buildid_list_usage[] = {
8192 +@@ -29,7 +29,7 @@ static const struct option options[] = {
8193 + OPT_STRING('i', "input", &input_name, "file",
8194 + "input file name"),
8195 + OPT_BOOLEAN('f', "force", &force, "don't complain, do it"),
8196 +- OPT_BOOLEAN('v', "verbose", &verbose,
8197 ++ OPT_INCR('v', "verbose", &verbose,
8198 + "be more verbose"),
8199 + OPT_END()
8200 + };
8201 +diff --git a/tools/perf/builtin-diff.c b/tools/perf/builtin-diff.c
8202 +index 1ea15d8..3a1d94d 100644
8203 +--- a/tools/perf/builtin-diff.c
8204 ++++ b/tools/perf/builtin-diff.c
8205 +@@ -19,7 +19,7 @@
8206 + static char const *input_old = "perf.data.old",
8207 + *input_new = "perf.data";
8208 + static char diff__default_sort_order[] = "dso,symbol";
8209 +-static int force;
8210 ++static bool force;
8211 + static bool show_displacement;
8212 +
8213 + static int perf_session__add_hist_entry(struct perf_session *self,
8214 +@@ -188,7 +188,7 @@ static const char * const diff_usage[] = {
8215 + };
8216 +
8217 + static const struct option options[] = {
8218 +- OPT_BOOLEAN('v', "verbose", &verbose,
8219 ++ OPT_INCR('v', "verbose", &verbose,
8220 + "be more verbose (show symbol address, etc)"),
8221 + OPT_BOOLEAN('m', "displacement", &show_displacement,
8222 + "Show position displacement relative to baseline"),
8223 +diff --git a/tools/perf/builtin-help.c b/tools/perf/builtin-help.c
8224 +index 215b584..81e3ecc 100644
8225 +--- a/tools/perf/builtin-help.c
8226 ++++ b/tools/perf/builtin-help.c
8227 +@@ -29,7 +29,7 @@ enum help_format {
8228 + HELP_FORMAT_WEB,
8229 + };
8230 +
8231 +-static int show_all = 0;
8232 ++static bool show_all = false;
8233 + static enum help_format help_format = HELP_FORMAT_MAN;
8234 + static struct option builtin_help_options[] = {
8235 + OPT_BOOLEAN('a', "all", &show_all, "print all available commands"),
8236 +diff --git a/tools/perf/builtin-lock.c b/tools/perf/builtin-lock.c
8237 +index e12c844..6c38e4f 100644
8238 +--- a/tools/perf/builtin-lock.c
8239 ++++ b/tools/perf/builtin-lock.c
8240 +@@ -744,7 +744,7 @@ static const char * const lock_usage[] = {
8241 +
8242 + static const struct option lock_options[] = {
8243 + OPT_STRING('i', "input", &input_name, "file", "input file name"),
8244 +- OPT_BOOLEAN('v', "verbose", &verbose, "be more verbose (show symbol address, etc)"),
8245 ++ OPT_INCR('v', "verbose", &verbose, "be more verbose (show symbol address, etc)"),
8246 + OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace, "dump raw trace in ASCII"),
8247 + OPT_END()
8248 + };
8249 +diff --git a/tools/perf/builtin-probe.c b/tools/perf/builtin-probe.c
8250 +index 152d6c9..0562c50 100644
8251 +--- a/tools/perf/builtin-probe.c
8252 ++++ b/tools/perf/builtin-probe.c
8253 +@@ -162,7 +162,7 @@ static const char * const probe_usage[] = {
8254 + };
8255 +
8256 + static const struct option options[] = {
8257 +- OPT_BOOLEAN('v', "verbose", &verbose,
8258 ++ OPT_INCR('v', "verbose", &verbose,
8259 + "be more verbose (show parsed arguments, etc)"),
8260 + #ifndef NO_DWARF_SUPPORT
8261 + OPT_STRING('k', "vmlinux", &symbol_conf.vmlinux_name,
8262 +diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c
8263 +index f1411e9..d2d1b02 100644
8264 +--- a/tools/perf/builtin-record.c
8265 ++++ b/tools/perf/builtin-record.c
8266 +@@ -39,19 +39,19 @@ static int output;
8267 + static const char *output_name = "perf.data";
8268 + static int group = 0;
8269 + static unsigned int realtime_prio = 0;
8270 +-static int raw_samples = 0;
8271 +-static int system_wide = 0;
8272 ++static bool raw_samples = false;
8273 ++static bool system_wide = false;
8274 + static int profile_cpu = -1;
8275 + static pid_t target_pid = -1;
8276 + static pid_t child_pid = -1;
8277 +-static int inherit = 1;
8278 +-static int force = 0;
8279 +-static int append_file = 0;
8280 +-static int call_graph = 0;
8281 +-static int inherit_stat = 0;
8282 +-static int no_samples = 0;
8283 +-static int sample_address = 0;
8284 +-static int multiplex = 0;
8285 ++static bool inherit = true;
8286 ++static bool force = false;
8287 ++static bool append_file = false;
8288 ++static bool call_graph = false;
8289 ++static bool inherit_stat = false;
8290 ++static bool no_samples = false;
8291 ++static bool sample_address = false;
8292 ++static bool multiplex = false;
8293 + static int multiplex_fd = -1;
8294 +
8295 + static long samples = 0;
8296 +@@ -451,7 +451,7 @@ static int __cmd_record(int argc, const char **argv)
8297 + rename(output_name, oldname);
8298 + }
8299 + } else {
8300 +- append_file = 0;
8301 ++ append_file = false;
8302 + }
8303 +
8304 + flags = O_CREAT|O_RDWR;
8305 +@@ -676,7 +676,7 @@ static const struct option options[] = {
8306 + "number of mmap data pages"),
8307 + OPT_BOOLEAN('g', "call-graph", &call_graph,
8308 + "do call-graph (stack chain/backtrace) recording"),
8309 +- OPT_BOOLEAN('v', "verbose", &verbose,
8310 ++ OPT_INCR('v', "verbose", &verbose,
8311 + "be more verbose (show counter open errors, etc)"),
8312 + OPT_BOOLEAN('s', "stat", &inherit_stat,
8313 + "per thread counts"),
8314 +diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c
8315 +index f815de2..63fc64e 100644
8316 +--- a/tools/perf/builtin-report.c
8317 ++++ b/tools/perf/builtin-report.c
8318 +@@ -33,11 +33,11 @@
8319 +
8320 + static char const *input_name = "perf.data";
8321 +
8322 +-static int force;
8323 ++static bool force;
8324 + static bool hide_unresolved;
8325 + static bool dont_use_callchains;
8326 +
8327 +-static int show_threads;
8328 ++static bool show_threads;
8329 + static struct perf_read_values show_threads_values;
8330 +
8331 + static char default_pretty_printing_style[] = "normal";
8332 +@@ -400,7 +400,7 @@ static const char * const report_usage[] = {
8333 + static const struct option options[] = {
8334 + OPT_STRING('i', "input", &input_name, "file",
8335 + "input file name"),
8336 +- OPT_BOOLEAN('v', "verbose", &verbose,
8337 ++ OPT_INCR('v', "verbose", &verbose,
8338 + "be more verbose (show symbol address, etc)"),
8339 + OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
8340 + "dump raw trace in ASCII"),
8341 +diff --git a/tools/perf/builtin-sched.c b/tools/perf/builtin-sched.c
8342 +index 4f5a03e..682783c 100644
8343 +--- a/tools/perf/builtin-sched.c
8344 ++++ b/tools/perf/builtin-sched.c
8345 +@@ -1790,7 +1790,7 @@ static const char * const sched_usage[] = {
8346 + static const struct option sched_options[] = {
8347 + OPT_STRING('i', "input", &input_name, "file",
8348 + "input file name"),
8349 +- OPT_BOOLEAN('v', "verbose", &verbose,
8350 ++ OPT_INCR('v', "verbose", &verbose,
8351 + "be more verbose (show symbol address, etc)"),
8352 + OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
8353 + "dump raw trace in ASCII"),
8354 +@@ -1805,7 +1805,7 @@ static const char * const latency_usage[] = {
8355 + static const struct option latency_options[] = {
8356 + OPT_STRING('s', "sort", &sort_order, "key[,key2...]",
8357 + "sort by key(s): runtime, switch, avg, max"),
8358 +- OPT_BOOLEAN('v', "verbose", &verbose,
8359 ++ OPT_INCR('v', "verbose", &verbose,
8360 + "be more verbose (show symbol address, etc)"),
8361 + OPT_INTEGER('C', "CPU", &profile_cpu,
8362 + "CPU to profile on"),
8363 +@@ -1822,7 +1822,7 @@ static const char * const replay_usage[] = {
8364 + static const struct option replay_options[] = {
8365 + OPT_INTEGER('r', "repeat", &replay_repeat,
8366 + "repeat the workload replay N times (-1: infinite)"),
8367 +- OPT_BOOLEAN('v', "verbose", &verbose,
8368 ++ OPT_INCR('v', "verbose", &verbose,
8369 + "be more verbose (show symbol address, etc)"),
8370 + OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
8371 + "dump raw trace in ASCII"),
8372 +diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c
8373 +index 95db31c..f7f4a88 100644
8374 +--- a/tools/perf/builtin-stat.c
8375 ++++ b/tools/perf/builtin-stat.c
8376 +@@ -66,16 +66,16 @@ static struct perf_event_attr default_attrs[] = {
8377 +
8378 + };
8379 +
8380 +-static int system_wide = 0;
8381 ++static bool system_wide = false;
8382 + static unsigned int nr_cpus = 0;
8383 + static int run_idx = 0;
8384 +
8385 + static int run_count = 1;
8386 +-static int inherit = 1;
8387 +-static int scale = 1;
8388 ++static bool inherit = true;
8389 ++static bool scale = true;
8390 + static pid_t target_pid = -1;
8391 + static pid_t child_pid = -1;
8392 +-static int null_run = 0;
8393 ++static bool null_run = false;
8394 +
8395 + static int fd[MAX_NR_CPUS][MAX_COUNTERS];
8396 +
8397 +@@ -494,7 +494,7 @@ static const struct option options[] = {
8398 + "system-wide collection from all CPUs"),
8399 + OPT_BOOLEAN('c', "scale", &scale,
8400 + "scale/normalize counters"),
8401 +- OPT_BOOLEAN('v', "verbose", &verbose,
8402 ++ OPT_INCR('v', "verbose", &verbose,
8403 + "be more verbose (show counter open errors, etc)"),
8404 + OPT_INTEGER('r', "repeat", &run_count,
8405 + "repeat command and print average + stddev (max: 100)"),
8406 +diff --git a/tools/perf/builtin-timechart.c b/tools/perf/builtin-timechart.c
8407 +index 0d4d8ff..1d7416d 100644
8408 +--- a/tools/perf/builtin-timechart.c
8409 ++++ b/tools/perf/builtin-timechart.c
8410 +@@ -43,7 +43,7 @@ static u64 turbo_frequency;
8411 +
8412 + static u64 first_time, last_time;
8413 +
8414 +-static int power_only;
8415 ++static bool power_only;
8416 +
8417 +
8418 + struct per_pid;
8419 +diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c
8420 +index 1f52932..4f94b10 100644
8421 +--- a/tools/perf/builtin-top.c
8422 ++++ b/tools/perf/builtin-top.c
8423 +@@ -57,7 +57,7 @@
8424 +
8425 + static int fd[MAX_NR_CPUS][MAX_COUNTERS];
8426 +
8427 +-static int system_wide = 0;
8428 ++static bool system_wide = false;
8429 +
8430 + static int default_interval = 0;
8431 +
8432 +@@ -65,18 +65,18 @@ static int count_filter = 5;
8433 + static int print_entries;
8434 +
8435 + static int target_pid = -1;
8436 +-static int inherit = 0;
8437 ++static bool inherit = false;
8438 + static int profile_cpu = -1;
8439 + static int nr_cpus = 0;
8440 + static unsigned int realtime_prio = 0;
8441 +-static int group = 0;
8442 ++static bool group = false;
8443 + static unsigned int page_size;
8444 + static unsigned int mmap_pages = 16;
8445 + static int freq = 1000; /* 1 KHz */
8446 +
8447 + static int delay_secs = 2;
8448 +-static int zero = 0;
8449 +-static int dump_symtab = 0;
8450 ++static bool zero = false;
8451 ++static bool dump_symtab = false;
8452 +
8453 + static bool hide_kernel_symbols = false;
8454 + static bool hide_user_symbols = false;
8455 +@@ -169,7 +169,7 @@ static void sig_winch_handler(int sig __used)
8456 + update_print_entries(&winsize);
8457 + }
8458 +
8459 +-static void parse_source(struct sym_entry *syme)
8460 ++static int parse_source(struct sym_entry *syme)
8461 + {
8462 + struct symbol *sym;
8463 + struct sym_entry_source *source;
8464 +@@ -180,12 +180,21 @@ static void parse_source(struct sym_entry *syme)
8465 + u64 len;
8466 +
8467 + if (!syme)
8468 +- return;
8469 ++ return -1;
8470 ++
8471 ++ sym = sym_entry__symbol(syme);
8472 ++ map = syme->map;
8473 ++
8474 ++ /*
8475 ++ * We can't annotate with just /proc/kallsyms
8476 ++ */
8477 ++ if (map->dso->origin == DSO__ORIG_KERNEL)
8478 ++ return -1;
8479 +
8480 + if (syme->src == NULL) {
8481 + syme->src = zalloc(sizeof(*source));
8482 + if (syme->src == NULL)
8483 +- return;
8484 ++ return -1;
8485 + pthread_mutex_init(&syme->src->lock, NULL);
8486 + }
8487 +
8488 +@@ -195,9 +204,6 @@ static void parse_source(struct sym_entry *syme)
8489 + pthread_mutex_lock(&source->lock);
8490 + goto out_assign;
8491 + }
8492 +-
8493 +- sym = sym_entry__symbol(syme);
8494 +- map = syme->map;
8495 + path = map->dso->long_name;
8496 +
8497 + len = sym->end - sym->start;
8498 +@@ -209,7 +215,7 @@ static void parse_source(struct sym_entry *syme)
8499 +
8500 + file = popen(command, "r");
8501 + if (!file)
8502 +- return;
8503 ++ return -1;
8504 +
8505 + pthread_mutex_lock(&source->lock);
8506 + source->lines_tail = &source->lines;
8507 +@@ -245,6 +251,7 @@ static void parse_source(struct sym_entry *syme)
8508 + out_assign:
8509 + sym_filter_entry = syme;
8510 + pthread_mutex_unlock(&source->lock);
8511 ++ return 0;
8512 + }
8513 +
8514 + static void __zero_source_counters(struct sym_entry *syme)
8515 +@@ -839,7 +846,7 @@ static void handle_keypress(int c)
8516 + display_weighted = ~display_weighted;
8517 + break;
8518 + case 'z':
8519 +- zero = ~zero;
8520 ++ zero = !zero;
8521 + break;
8522 + default:
8523 + break;
8524 +@@ -990,7 +997,17 @@ static void event__process_sample(const event_t *self,
8525 + if (sym_filter_entry_sched) {
8526 + sym_filter_entry = sym_filter_entry_sched;
8527 + sym_filter_entry_sched = NULL;
8528 +- parse_source(sym_filter_entry);
8529 ++ if (parse_source(sym_filter_entry) < 0) {
8530 ++ struct symbol *sym = sym_entry__symbol(sym_filter_entry);
8531 ++
8532 ++ pr_err("Can't annotate %s", sym->name);
8533 ++ if (sym_filter_entry->map->dso->origin == DSO__ORIG_KERNEL) {
8534 ++ pr_err(": No vmlinux file was found in the path:\n");
8535 ++ vmlinux_path__fprintf(stderr);
8536 ++ } else
8537 ++ pr_err(".\n");
8538 ++ exit(1);
8539 ++ }
8540 + }
8541 +
8542 + syme = symbol__priv(al.sym);
8543 +@@ -1296,7 +1313,7 @@ static const struct option options[] = {
8544 + "display this many functions"),
8545 + OPT_BOOLEAN('U', "hide_user_symbols", &hide_user_symbols,
8546 + "hide user symbols"),
8547 +- OPT_BOOLEAN('v', "verbose", &verbose,
8548 ++ OPT_INCR('v', "verbose", &verbose,
8549 + "be more verbose (show counter open errors, etc)"),
8550 + OPT_END()
8551 + };
8552 +diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c
8553 +index 407041d..8fc50d8 100644
8554 +--- a/tools/perf/builtin-trace.c
8555 ++++ b/tools/perf/builtin-trace.c
8556 +@@ -505,7 +505,7 @@ static const char * const trace_usage[] = {
8557 + static const struct option options[] = {
8558 + OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
8559 + "dump raw trace in ASCII"),
8560 +- OPT_BOOLEAN('v', "verbose", &verbose,
8561 ++ OPT_INCR('v', "verbose", &verbose,
8562 + "be more verbose (show symbol address, etc)"),
8563 + OPT_BOOLEAN('L', "Latency", &latency_format,
8564 + "show latency attributes (irqs/preemption disabled, etc)"),
8565 +diff --git a/tools/perf/util/debug.c b/tools/perf/util/debug.c
8566 +index 0905600..666b1c2 100644
8567 +--- a/tools/perf/util/debug.c
8568 ++++ b/tools/perf/util/debug.c
8569 +@@ -12,7 +12,7 @@
8570 + #include "util.h"
8571 +
8572 + int verbose = 0;
8573 +-int dump_trace = 0;
8574 ++bool dump_trace = false;
8575 +
8576 + int eprintf(int level, const char *fmt, ...)
8577 + {
8578 +diff --git a/tools/perf/util/debug.h b/tools/perf/util/debug.h
8579 +index c6c24c5..0f7af87 100644
8580 +--- a/tools/perf/util/debug.h
8581 ++++ b/tools/perf/util/debug.h
8582 +@@ -2,10 +2,11 @@
8583 + #ifndef __PERF_DEBUG_H
8584 + #define __PERF_DEBUG_H
8585 +
8586 ++#include <stdbool.h>
8587 + #include "event.h"
8588 +
8589 + extern int verbose;
8590 +-extern int dump_trace;
8591 ++extern bool dump_trace;
8592 +
8593 + int eprintf(int level,
8594 + const char *fmt, ...) __attribute__((format(printf, 2, 3)));
8595 +diff --git a/tools/perf/util/parse-options.c b/tools/perf/util/parse-options.c
8596 +index efebd5b..bbe646b 100644
8597 +--- a/tools/perf/util/parse-options.c
8598 ++++ b/tools/perf/util/parse-options.c
8599 +@@ -49,6 +49,7 @@ static int get_value(struct parse_opt_ctx_t *p,
8600 + break;
8601 + /* FALLTHROUGH */
8602 + case OPTION_BOOLEAN:
8603 ++ case OPTION_INCR:
8604 + case OPTION_BIT:
8605 + case OPTION_SET_INT:
8606 + case OPTION_SET_PTR:
8607 +@@ -73,6 +74,10 @@ static int get_value(struct parse_opt_ctx_t *p,
8608 + return 0;
8609 +
8610 + case OPTION_BOOLEAN:
8611 ++ *(bool *)opt->value = unset ? false : true;
8612 ++ return 0;
8613 ++
8614 ++ case OPTION_INCR:
8615 + *(int *)opt->value = unset ? 0 : *(int *)opt->value + 1;
8616 + return 0;
8617 +
8618 +@@ -478,6 +483,7 @@ int usage_with_options_internal(const char * const *usagestr,
8619 + case OPTION_GROUP:
8620 + case OPTION_BIT:
8621 + case OPTION_BOOLEAN:
8622 ++ case OPTION_INCR:
8623 + case OPTION_SET_INT:
8624 + case OPTION_SET_PTR:
8625 + case OPTION_LONG:
8626 +diff --git a/tools/perf/util/parse-options.h b/tools/perf/util/parse-options.h
8627 +index 948805a..b2da725 100644
8628 +--- a/tools/perf/util/parse-options.h
8629 ++++ b/tools/perf/util/parse-options.h
8630 +@@ -8,7 +8,8 @@ enum parse_opt_type {
8631 + OPTION_GROUP,
8632 + /* options with no arguments */
8633 + OPTION_BIT,
8634 +- OPTION_BOOLEAN, /* _INCR would have been a better name */
8635 ++ OPTION_BOOLEAN,
8636 ++ OPTION_INCR,
8637 + OPTION_SET_INT,
8638 + OPTION_SET_PTR,
8639 + /* options with arguments (usually) */
8640 +@@ -95,6 +96,7 @@ struct option {
8641 + #define OPT_GROUP(h) { .type = OPTION_GROUP, .help = (h) }
8642 + #define OPT_BIT(s, l, v, h, b) { .type = OPTION_BIT, .short_name = (s), .long_name = (l), .value = (v), .help = (h), .defval = (b) }
8643 + #define OPT_BOOLEAN(s, l, v, h) { .type = OPTION_BOOLEAN, .short_name = (s), .long_name = (l), .value = (v), .help = (h) }
8644 ++#define OPT_INCR(s, l, v, h) { .type = OPTION_INCR, .short_name = (s), .long_name = (l), .value = (v), .help = (h) }
8645 + #define OPT_SET_INT(s, l, v, h, i) { .type = OPTION_SET_INT, .short_name = (s), .long_name = (l), .value = (v), .help = (h), .defval = (i) }
8646 + #define OPT_SET_PTR(s, l, v, h, p) { .type = OPTION_SET_PTR, .short_name = (s), .long_name = (l), .value = (v), .help = (h), .defval = (p) }
8647 + #define OPT_INTEGER(s, l, v, h) { .type = OPTION_INTEGER, .short_name = (s), .long_name = (l), .value = (v), .help = (h) }
8648 +diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c
8649 +index c458c4a..4acb5d7 100644
8650 +--- a/tools/perf/util/symbol.c
8651 ++++ b/tools/perf/util/symbol.c
8652 +@@ -18,18 +18,6 @@
8653 + #define NT_GNU_BUILD_ID 3
8654 + #endif
8655 +
8656 +-enum dso_origin {
8657 +- DSO__ORIG_KERNEL = 0,
8658 +- DSO__ORIG_JAVA_JIT,
8659 +- DSO__ORIG_BUILD_ID_CACHE,
8660 +- DSO__ORIG_FEDORA,
8661 +- DSO__ORIG_UBUNTU,
8662 +- DSO__ORIG_BUILDID,
8663 +- DSO__ORIG_DSO,
8664 +- DSO__ORIG_KMODULE,
8665 +- DSO__ORIG_NOT_FOUND,
8666 +-};
8667 +-
8668 + static void dsos__add(struct list_head *head, struct dso *dso);
8669 + static struct map *map__new2(u64 start, struct dso *dso, enum map_type type);
8670 + static int dso__load_kernel_sym(struct dso *self, struct map *map,
8671 +@@ -1025,7 +1013,7 @@ static int dso__load_sym(struct dso *self, struct map *map, const char *name,
8672 + }
8673 + curr_map->map_ip = identity__map_ip;
8674 + curr_map->unmap_ip = identity__map_ip;
8675 +- curr_dso->origin = DSO__ORIG_KERNEL;
8676 ++ curr_dso->origin = self->origin;
8677 + map_groups__insert(kmap->kmaps, curr_map);
8678 + dsos__add(&dsos__kernel, curr_dso);
8679 + dso__set_loaded(curr_dso, map->type);
8680 +@@ -1895,6 +1883,17 @@ out_fail:
8681 + return -1;
8682 + }
8683 +
8684 ++size_t vmlinux_path__fprintf(FILE *fp)
8685 ++{
8686 ++ int i;
8687 ++ size_t printed = 0;
8688 ++
8689 ++ for (i = 0; i < vmlinux_path__nr_entries; ++i)
8690 ++ printed += fprintf(fp, "[%d] %s\n", i, vmlinux_path[i]);
8691 ++
8692 ++ return printed;
8693 ++}
8694 ++
8695 + static int setup_list(struct strlist **list, const char *list_str,
8696 + const char *list_name)
8697 + {
8698 +diff --git a/tools/perf/util/symbol.h b/tools/perf/util/symbol.h
8699 +index f30a374..044a4bc 100644
8700 +--- a/tools/perf/util/symbol.h
8701 ++++ b/tools/perf/util/symbol.h
8702 +@@ -150,6 +150,19 @@ size_t dsos__fprintf_buildid(FILE *fp, bool with_hits);
8703 +
8704 + size_t dso__fprintf_buildid(struct dso *self, FILE *fp);
8705 + size_t dso__fprintf(struct dso *self, enum map_type type, FILE *fp);
8706 ++
8707 ++enum dso_origin {
8708 ++ DSO__ORIG_KERNEL = 0,
8709 ++ DSO__ORIG_JAVA_JIT,
8710 ++ DSO__ORIG_BUILD_ID_CACHE,
8711 ++ DSO__ORIG_FEDORA,
8712 ++ DSO__ORIG_UBUNTU,
8713 ++ DSO__ORIG_BUILDID,
8714 ++ DSO__ORIG_DSO,
8715 ++ DSO__ORIG_KMODULE,
8716 ++ DSO__ORIG_NOT_FOUND,
8717 ++};
8718 ++
8719 + char dso__symtab_origin(const struct dso *self);
8720 + void dso__set_long_name(struct dso *self, char *name);
8721 + void dso__set_build_id(struct dso *self, void *build_id);
8722 +@@ -169,4 +182,6 @@ int kallsyms__parse(const char *filename, void *arg,
8723 + int symbol__init(void);
8724 + bool symbol_type__is_a(char symbol_type, enum map_type map_type);
8725 +
8726 ++size_t vmlinux_path__fprintf(FILE *fp);
8727 ++
8728 + #endif /* __PERF_SYMBOL */
8729 +diff --git a/tools/perf/util/trace-event-parse.c b/tools/perf/util/trace-event-parse.c
8730 +index 613c9cc..dc78bae 100644
8731 +--- a/tools/perf/util/trace-event-parse.c
8732 ++++ b/tools/perf/util/trace-event-parse.c
8733 +@@ -40,7 +40,7 @@ int header_page_size_size;
8734 + int header_page_data_offset;
8735 + int header_page_data_size;
8736 +
8737 +-int latency_format;
8738 ++bool latency_format;
8739 +
8740 + static char *input_buf;
8741 + static unsigned long long input_buf_ptr;
8742 +diff --git a/tools/perf/util/trace-event.h b/tools/perf/util/trace-event.h
8743 +index c3269b9..81f2fd2 100644
8744 +--- a/tools/perf/util/trace-event.h
8745 ++++ b/tools/perf/util/trace-event.h
8746 +@@ -1,6 +1,7 @@
8747 + #ifndef __PERF_TRACE_EVENTS_H
8748 + #define __PERF_TRACE_EVENTS_H
8749 +
8750 ++#include <stdbool.h>
8751 + #include "parse-events.h"
8752 +
8753 + #define __unused __attribute__((unused))
8754 +@@ -241,7 +242,7 @@ extern int header_page_size_size;
8755 + extern int header_page_data_offset;
8756 + extern int header_page_data_size;
8757 +
8758 +-extern int latency_format;
8759 ++extern bool latency_format;
8760 +
8761 + int parse_header_page(char *buf, unsigned long size);
8762 + int trace_parse_common_type(void *data);
8763
8764 Deleted: genpatches-2.6/trunk/2.6.34/2710_vbfb-section-cleanup.patch
8765 ===================================================================
8766 --- genpatches-2.6/trunk/2.6.34/2710_vbfb-section-cleanup.patch 2010-06-25 20:53:10 UTC (rev 1718)
8767 +++ genpatches-2.6/trunk/2.6.34/2710_vbfb-section-cleanup.patch 2010-07-05 20:46:44 UTC (rev 1719)
8768 @@ -1,55 +0,0 @@
8769 -From: Henrik Kretzschmar <henne@×××××××××××××.de>
8770 -Date: Mon, 24 May 2010 21:33:57 +0000 (-0700)
8771 -Subject: fbdev: section cleanup in vfb
8772 -X-Git-Tag: v2.6.35-rc1~283
8773 -X-Git-Url: http://git.kernel.org/?p=linux%2Fkernel%2Fgit%2Ftorvalds%2Flinux-2.6.git;a=commitdiff_plain;h=3cc04971661e37e7de6fbf9808ede554b5e1cb4e
8774 -
8775 -fbdev: section cleanup in vfb
8776 -
8777 -Fix up the section in the vfb driver, by moving the variables vfb_default
8778 -and vfb_fix from .init.data to .devinit.data
8779 -
8780 -This fixes the following warnings issued by modpost:
8781 -
8782 -WARNING: drivers/video/vfb.o(.devinit.text+0xf8): Section mismatch in reference from the function vfb_probe() to the variable .init.data:vfb_default
8783 -The function __devinit vfb_probe() references
8784 -a variable __initdata vfb_default.
8785 -If vfb_default is only used by vfb_probe then
8786 -annotate vfb_default with a matching annotation.
8787 -
8788 -WARNING: drivers/video/vfb.o(.devinit.text+0x114): Section mismatch in reference from the function vfb_probe() to the variable .init.data:vfb_fix
8789 -The function __devinit vfb_probe() references
8790 -a variable __initdata vfb_fix.
8791 -If vfb_fix is only used by vfb_probe then
8792 -annotate vfb_fix with a matching annotation.
8793 -
8794 -Signed-off-by: Henrik Kretzschmar <henne@×××××××××××××.de>
8795 -Acked-by: Uwe Kleine-Kö <u.kleine-koenig@×××××××××××.de>
8796 -Cc: <stable@××××××.org> [if "platform-drivers: move probe to .devinit.text in drivers/video" was merged]
8797 -Signed-off-by: Andrew Morton <akpm@××××××××××××××××.org>
8798 -Signed-off-by: Linus Torvalds <torvalds@××××××××××××××××.org>
8799 ----
8800 -
8801 -diff --git a/drivers/video/vfb.c b/drivers/video/vfb.c
8802 -index 9b5532b..bc67251 100644
8803 ---- a/drivers/video/vfb.c
8804 -+++ b/drivers/video/vfb.c
8805 -@@ -78,7 +78,7 @@ static void rvfree(void *mem, unsigned long size)
8806 - vfree(mem);
8807 - }
8808 -
8809 --static struct fb_var_screeninfo vfb_default __initdata = {
8810 -+static struct fb_var_screeninfo vfb_default __devinitdata = {
8811 - .xres = 640,
8812 - .yres = 480,
8813 - .xres_virtual = 640,
8814 -@@ -100,7 +100,7 @@ static struct fb_var_screeninfo vfb_default __initdata = {
8815 - .vmode = FB_VMODE_NONINTERLACED,
8816 - };
8817 -
8818 --static struct fb_fix_screeninfo vfb_fix __initdata = {
8819 -+static struct fb_fix_screeninfo vfb_fix __devinitdata = {
8820 - .id = "Virtual FB",
8821 - .type = FB_TYPE_PACKED_PIXELS,
8822 - .visual = FB_VISUAL_PSEUDOCOLOR,
8823 -