Gentoo Archives: gentoo-commits

From: "Mike Pagano (mpagano)" <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] linux-patches r1696 - genpatches-2.6/trunk/2.6.33
Date: Tue, 27 Apr 2010 13:06:15
Message-Id: 20100427130603.313BF2C04C@corvid.gentoo.org
1 Author: mpagano
2 Date: 2010-04-27 13:06:02 +0000 (Tue, 27 Apr 2010)
3 New Revision: 1696
4
5 Added:
6 genpatches-2.6/trunk/2.6.33/1002_linux-2.6.33.3.patch
7 Modified:
8 genpatches-2.6/trunk/2.6.33/0000_README
9 Log:
10 Linux patch 2.6.33.3
11
12 Modified: genpatches-2.6/trunk/2.6.33/0000_README
13 ===================================================================
14 --- genpatches-2.6/trunk/2.6.33/0000_README 2010-04-25 00:23:19 UTC (rev 1695)
15 +++ genpatches-2.6/trunk/2.6.33/0000_README 2010-04-27 13:06:02 UTC (rev 1696)
16 @@ -47,6 +47,10 @@
17 From: http://www.kernel.org
18 Desc: Linux 2.6.33.2
19
20 +Patch: 1002_linux-2.6.33.3.patch
21 +From: http://www.kernel.org
22 +Desc: Linux 2.6.33.3
23 +
24 Patch: 4100_dm-bbr.patch
25 From: EVMS 2.5.2
26 Desc: Bad block relocation support for LiveCD users
27
28 Added: genpatches-2.6/trunk/2.6.33/1002_linux-2.6.33.3.patch
29 ===================================================================
30 --- genpatches-2.6/trunk/2.6.33/1002_linux-2.6.33.3.patch (rev 0)
31 +++ genpatches-2.6/trunk/2.6.33/1002_linux-2.6.33.3.patch 2010-04-27 13:06:02 UTC (rev 1696)
32 @@ -0,0 +1,5184 @@
33 +diff --git a/Documentation/i2c/busses/i2c-i801 b/Documentation/i2c/busses/i2c-i801
34 +index 81c0c59..e1bb5b2 100644
35 +--- a/Documentation/i2c/busses/i2c-i801
36 ++++ b/Documentation/i2c/busses/i2c-i801
37 +@@ -15,7 +15,8 @@ Supported adapters:
38 + * Intel 82801I (ICH9)
39 + * Intel EP80579 (Tolapai)
40 + * Intel 82801JI (ICH10)
41 +- * Intel PCH
42 ++ * Intel 3400/5 Series (PCH)
43 ++ * Intel Cougar Point (PCH)
44 + Datasheets: Publicly available at the Intel website
45 +
46 + Authors:
47 +diff --git a/arch/arm/boot/compressed/head.S b/arch/arm/boot/compressed/head.S
48 +index 6b84a04..cbeb6e0 100644
49 +--- a/arch/arm/boot/compressed/head.S
50 ++++ b/arch/arm/boot/compressed/head.S
51 +@@ -172,7 +172,7 @@ not_angel:
52 + adr r0, LC0
53 + ARM( ldmia r0, {r1, r2, r3, r4, r5, r6, r11, ip, sp})
54 + THUMB( ldmia r0, {r1, r2, r3, r4, r5, r6, r11, ip} )
55 +- THUMB( ldr sp, [r0, #28] )
56 ++ THUMB( ldr sp, [r0, #32] )
57 + subs r0, r0, r1 @ calculate the delta offset
58 +
59 + @ if delta is zero, we are
60 +diff --git a/arch/ia64/kvm/kvm-ia64.c b/arch/ia64/kvm/kvm-ia64.c
61 +index 5fdeec5..d76279a 100644
62 +--- a/arch/ia64/kvm/kvm-ia64.c
63 ++++ b/arch/ia64/kvm/kvm-ia64.c
64 +@@ -1794,7 +1794,8 @@ static int kvm_ia64_sync_dirty_log(struct kvm *kvm,
65 + {
66 + struct kvm_memory_slot *memslot;
67 + int r, i;
68 +- long n, base;
69 ++ long base;
70 ++ unsigned long n;
71 + unsigned long *dirty_bitmap = (unsigned long *)(kvm->arch.vm_base +
72 + offsetof(struct kvm_vm_data, kvm_mem_dirty_log));
73 +
74 +@@ -1807,7 +1808,7 @@ static int kvm_ia64_sync_dirty_log(struct kvm *kvm,
75 + if (!memslot->dirty_bitmap)
76 + goto out;
77 +
78 +- n = ALIGN(memslot->npages, BITS_PER_LONG) / 8;
79 ++ n = kvm_dirty_bitmap_bytes(memslot);
80 + base = memslot->base_gfn / BITS_PER_LONG;
81 +
82 + for (i = 0; i < n/sizeof(long); ++i) {
83 +@@ -1823,7 +1824,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
84 + struct kvm_dirty_log *log)
85 + {
86 + int r;
87 +- int n;
88 ++ unsigned long n;
89 + struct kvm_memory_slot *memslot;
90 + int is_dirty = 0;
91 +
92 +@@ -1841,7 +1842,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
93 + if (is_dirty) {
94 + kvm_flush_remote_tlbs(kvm);
95 + memslot = &kvm->memslots[log->slot];
96 +- n = ALIGN(memslot->npages, BITS_PER_LONG) / 8;
97 ++ n = kvm_dirty_bitmap_bytes(memslot);
98 + memset(memslot->dirty_bitmap, 0, n);
99 + }
100 + r = 0;
101 +diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c
102 +index 3e294bd..e6dc595 100644
103 +--- a/arch/powerpc/kvm/book3s.c
104 ++++ b/arch/powerpc/kvm/book3s.c
105 +@@ -848,7 +848,8 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
106 + struct kvm_vcpu *vcpu;
107 + ulong ga, ga_end;
108 + int is_dirty = 0;
109 +- int r, n;
110 ++ int r;
111 ++ unsigned long n;
112 +
113 + down_write(&kvm->slots_lock);
114 +
115 +@@ -866,7 +867,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
116 + kvm_for_each_vcpu(n, vcpu, kvm)
117 + kvmppc_mmu_pte_pflush(vcpu, ga, ga_end);
118 +
119 +- n = ALIGN(memslot->npages, BITS_PER_LONG) / 8;
120 ++ n = kvm_dirty_bitmap_bytes(memslot);
121 + memset(memslot->dirty_bitmap, 0, n);
122 + }
123 +
124 +diff --git a/arch/powerpc/platforms/pseries/offline_states.h b/arch/powerpc/platforms/pseries/offline_states.h
125 +index 22574e0..202d869 100644
126 +--- a/arch/powerpc/platforms/pseries/offline_states.h
127 ++++ b/arch/powerpc/platforms/pseries/offline_states.h
128 +@@ -9,10 +9,30 @@ enum cpu_state_vals {
129 + CPU_MAX_OFFLINE_STATES
130 + };
131 +
132 ++#ifdef CONFIG_HOTPLUG_CPU
133 + extern enum cpu_state_vals get_cpu_current_state(int cpu);
134 + extern void set_cpu_current_state(int cpu, enum cpu_state_vals state);
135 +-extern enum cpu_state_vals get_preferred_offline_state(int cpu);
136 + extern void set_preferred_offline_state(int cpu, enum cpu_state_vals state);
137 + extern void set_default_offline_state(int cpu);
138 ++#else
139 ++static inline enum cpu_state_vals get_cpu_current_state(int cpu)
140 ++{
141 ++ return CPU_STATE_ONLINE;
142 ++}
143 ++
144 ++static inline void set_cpu_current_state(int cpu, enum cpu_state_vals state)
145 ++{
146 ++}
147 ++
148 ++static inline void set_preferred_offline_state(int cpu, enum cpu_state_vals state)
149 ++{
150 ++}
151 ++
152 ++static inline void set_default_offline_state(int cpu)
153 ++{
154 ++}
155 ++#endif
156 ++
157 ++extern enum cpu_state_vals get_preferred_offline_state(int cpu);
158 + extern int start_secondary(void);
159 + #endif
160 +diff --git a/arch/s390/mm/vmem.c b/arch/s390/mm/vmem.c
161 +index 300ab01..5f91a38 100644
162 +--- a/arch/s390/mm/vmem.c
163 ++++ b/arch/s390/mm/vmem.c
164 +@@ -70,12 +70,8 @@ static pte_t __ref *vmem_pte_alloc(void)
165 + pte = alloc_bootmem(PTRS_PER_PTE * sizeof(pte_t));
166 + if (!pte)
167 + return NULL;
168 +- if (MACHINE_HAS_HPAGE)
169 +- clear_table((unsigned long *) pte, _PAGE_TYPE_EMPTY | _PAGE_CO,
170 +- PTRS_PER_PTE * sizeof(pte_t));
171 +- else
172 +- clear_table((unsigned long *) pte, _PAGE_TYPE_EMPTY,
173 +- PTRS_PER_PTE * sizeof(pte_t));
174 ++ clear_table((unsigned long *) pte, _PAGE_TYPE_EMPTY,
175 ++ PTRS_PER_PTE * sizeof(pte_t));
176 + return pte;
177 + }
178 +
179 +@@ -116,8 +112,7 @@ static int vmem_add_mem(unsigned long start, unsigned long size, int ro)
180 + if (MACHINE_HAS_HPAGE && !(address & ~HPAGE_MASK) &&
181 + (address + HPAGE_SIZE <= start + size) &&
182 + (address >= HPAGE_SIZE)) {
183 +- pte_val(pte) |= _SEGMENT_ENTRY_LARGE |
184 +- _SEGMENT_ENTRY_CO;
185 ++ pte_val(pte) |= _SEGMENT_ENTRY_LARGE;
186 + pmd_val(*pm_dir) = pte_val(pte);
187 + address += HPAGE_SIZE - PAGE_SIZE;
188 + continue;
189 +diff --git a/arch/sh/include/asm/elf.h b/arch/sh/include/asm/elf.h
190 +index ac04255..ce830fa 100644
191 +--- a/arch/sh/include/asm/elf.h
192 ++++ b/arch/sh/include/asm/elf.h
193 +@@ -211,7 +211,9 @@ extern void __kernel_vsyscall;
194 +
195 + #define VSYSCALL_AUX_ENT \
196 + if (vdso_enabled) \
197 +- NEW_AUX_ENT(AT_SYSINFO_EHDR, VDSO_BASE);
198 ++ NEW_AUX_ENT(AT_SYSINFO_EHDR, VDSO_BASE); \
199 ++ else \
200 ++ NEW_AUX_ENT(AT_IGNORE, 0);
201 + #else
202 + #define VSYSCALL_AUX_ENT
203 + #endif /* CONFIG_VSYSCALL */
204 +@@ -219,7 +221,7 @@ extern void __kernel_vsyscall;
205 + #ifdef CONFIG_SH_FPU
206 + #define FPU_AUX_ENT NEW_AUX_ENT(AT_FPUCW, FPSCR_INIT)
207 + #else
208 +-#define FPU_AUX_ENT
209 ++#define FPU_AUX_ENT NEW_AUX_ENT(AT_IGNORE, 0)
210 + #endif
211 +
212 + extern int l1i_cache_shape, l1d_cache_shape, l2_cache_shape;
213 +diff --git a/arch/sh/kernel/smp.c b/arch/sh/kernel/smp.c
214 +index 983e079..1d19c19 100644
215 +--- a/arch/sh/kernel/smp.c
216 ++++ b/arch/sh/kernel/smp.c
217 +@@ -69,6 +69,7 @@ asmlinkage void __cpuinit start_secondary(void)
218 + unsigned int cpu;
219 + struct mm_struct *mm = &init_mm;
220 +
221 ++ enable_mmu();
222 + atomic_inc(&mm->mm_count);
223 + atomic_inc(&mm->mm_users);
224 + current->active_mm = mm;
225 +diff --git a/arch/sparc/kernel/ptrace_32.c b/arch/sparc/kernel/ptrace_32.c
226 +index 7e3dfd9..e608f39 100644
227 +--- a/arch/sparc/kernel/ptrace_32.c
228 ++++ b/arch/sparc/kernel/ptrace_32.c
229 +@@ -65,6 +65,7 @@ static int genregs32_get(struct task_struct *target,
230 + *k++ = regs->u_regs[pos++];
231 +
232 + reg_window = (unsigned long __user *) regs->u_regs[UREG_I6];
233 ++ reg_window -= 16;
234 + for (; count > 0 && pos < 32; count--) {
235 + if (get_user(*k++, &reg_window[pos++]))
236 + return -EFAULT;
237 +@@ -76,6 +77,7 @@ static int genregs32_get(struct task_struct *target,
238 + }
239 +
240 + reg_window = (unsigned long __user *) regs->u_regs[UREG_I6];
241 ++ reg_window -= 16;
242 + for (; count > 0 && pos < 32; count--) {
243 + if (get_user(reg, &reg_window[pos++]) ||
244 + put_user(reg, u++))
245 +@@ -141,6 +143,7 @@ static int genregs32_set(struct task_struct *target,
246 + regs->u_regs[pos++] = *k++;
247 +
248 + reg_window = (unsigned long __user *) regs->u_regs[UREG_I6];
249 ++ reg_window -= 16;
250 + for (; count > 0 && pos < 32; count--) {
251 + if (put_user(*k++, &reg_window[pos++]))
252 + return -EFAULT;
253 +@@ -153,6 +156,7 @@ static int genregs32_set(struct task_struct *target,
254 + }
255 +
256 + reg_window = (unsigned long __user *) regs->u_regs[UREG_I6];
257 ++ reg_window -= 16;
258 + for (; count > 0 && pos < 32; count--) {
259 + if (get_user(reg, u++) ||
260 + put_user(reg, &reg_window[pos++]))
261 +diff --git a/arch/sparc/kernel/ptrace_64.c b/arch/sparc/kernel/ptrace_64.c
262 +index 2f6524d..aa90da0 100644
263 +--- a/arch/sparc/kernel/ptrace_64.c
264 ++++ b/arch/sparc/kernel/ptrace_64.c
265 +@@ -492,6 +492,7 @@ static int genregs32_get(struct task_struct *target,
266 + *k++ = regs->u_regs[pos++];
267 +
268 + reg_window = (compat_ulong_t __user *) regs->u_regs[UREG_I6];
269 ++ reg_window -= 16;
270 + if (target == current) {
271 + for (; count > 0 && pos < 32; count--) {
272 + if (get_user(*k++, &reg_window[pos++]))
273 +@@ -516,6 +517,7 @@ static int genregs32_get(struct task_struct *target,
274 + }
275 +
276 + reg_window = (compat_ulong_t __user *) regs->u_regs[UREG_I6];
277 ++ reg_window -= 16;
278 + if (target == current) {
279 + for (; count > 0 && pos < 32; count--) {
280 + if (get_user(reg, &reg_window[pos++]) ||
281 +@@ -599,6 +601,7 @@ static int genregs32_set(struct task_struct *target,
282 + regs->u_regs[pos++] = *k++;
283 +
284 + reg_window = (compat_ulong_t __user *) regs->u_regs[UREG_I6];
285 ++ reg_window -= 16;
286 + if (target == current) {
287 + for (; count > 0 && pos < 32; count--) {
288 + if (put_user(*k++, &reg_window[pos++]))
289 +@@ -625,6 +628,7 @@ static int genregs32_set(struct task_struct *target,
290 + }
291 +
292 + reg_window = (compat_ulong_t __user *) regs->u_regs[UREG_I6];
293 ++ reg_window -= 16;
294 + if (target == current) {
295 + for (; count > 0 && pos < 32; count--) {
296 + if (get_user(reg, u++) ||
297 +diff --git a/arch/um/sys-x86_64/Makefile b/arch/um/sys-x86_64/Makefile
298 +index 2201e9c..c1ea9eb 100644
299 +--- a/arch/um/sys-x86_64/Makefile
300 ++++ b/arch/um/sys-x86_64/Makefile
301 +@@ -8,7 +8,8 @@ obj-y = bug.o bugs.o delay.o fault.o ldt.o mem.o ptrace.o ptrace_user.o \
302 + setjmp.o signal.o stub.o stub_segv.o syscalls.o syscall_table.o \
303 + sysrq.o ksyms.o tls.o
304 +
305 +-subarch-obj-y = lib/csum-partial_64.o lib/memcpy_64.o lib/thunk_64.o
306 ++subarch-obj-y = lib/csum-partial_64.o lib/memcpy_64.o lib/thunk_64.o \
307 ++ lib/rwsem_64.o
308 + subarch-obj-$(CONFIG_MODULES) += kernel/module.o
309 +
310 + ldt-y = ../sys-i386/ldt.o
311 +diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu
312 +index f20ddf8..a198293 100644
313 +--- a/arch/x86/Kconfig.cpu
314 ++++ b/arch/x86/Kconfig.cpu
315 +@@ -319,7 +319,7 @@ config X86_L1_CACHE_SHIFT
316 +
317 + config X86_XADD
318 + def_bool y
319 +- depends on X86_32 && !M386
320 ++ depends on X86_64 || !M386
321 +
322 + config X86_PPRO_FENCE
323 + bool "PentiumPro memory ordering errata workaround"
324 +diff --git a/arch/x86/include/asm/rwsem.h b/arch/x86/include/asm/rwsem.h
325 +index ca7517d..606ede1 100644
326 +--- a/arch/x86/include/asm/rwsem.h
327 ++++ b/arch/x86/include/asm/rwsem.h
328 +@@ -41,6 +41,7 @@
329 + #include <linux/list.h>
330 + #include <linux/spinlock.h>
331 + #include <linux/lockdep.h>
332 ++#include <asm/asm.h>
333 +
334 + struct rwsem_waiter;
335 +
336 +@@ -55,17 +56,28 @@ extern asmregparm struct rw_semaphore *
337 +
338 + /*
339 + * the semaphore definition
340 ++ *
341 ++ * The bias values and the counter type limits the number of
342 ++ * potential readers/writers to 32767 for 32 bits and 2147483647
343 ++ * for 64 bits.
344 + */
345 +
346 +-#define RWSEM_UNLOCKED_VALUE 0x00000000
347 +-#define RWSEM_ACTIVE_BIAS 0x00000001
348 +-#define RWSEM_ACTIVE_MASK 0x0000ffff
349 +-#define RWSEM_WAITING_BIAS (-0x00010000)
350 ++#ifdef CONFIG_X86_64
351 ++# define RWSEM_ACTIVE_MASK 0xffffffffL
352 ++#else
353 ++# define RWSEM_ACTIVE_MASK 0x0000ffffL
354 ++#endif
355 ++
356 ++#define RWSEM_UNLOCKED_VALUE 0x00000000L
357 ++#define RWSEM_ACTIVE_BIAS 0x00000001L
358 ++#define RWSEM_WAITING_BIAS (-RWSEM_ACTIVE_MASK-1)
359 + #define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS
360 + #define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
361 +
362 ++typedef signed long rwsem_count_t;
363 ++
364 + struct rw_semaphore {
365 +- signed long count;
366 ++ rwsem_count_t count;
367 + spinlock_t wait_lock;
368 + struct list_head wait_list;
369 + #ifdef CONFIG_DEBUG_LOCK_ALLOC
370 +@@ -105,7 +117,7 @@ do { \
371 + static inline void __down_read(struct rw_semaphore *sem)
372 + {
373 + asm volatile("# beginning down_read\n\t"
374 +- LOCK_PREFIX " incl (%%eax)\n\t"
375 ++ LOCK_PREFIX _ASM_INC "(%1)\n\t"
376 + /* adds 0x00000001, returns the old value */
377 + " jns 1f\n"
378 + " call call_rwsem_down_read_failed\n"
379 +@@ -121,14 +133,14 @@ static inline void __down_read(struct rw_semaphore *sem)
380 + */
381 + static inline int __down_read_trylock(struct rw_semaphore *sem)
382 + {
383 +- __s32 result, tmp;
384 ++ rwsem_count_t result, tmp;
385 + asm volatile("# beginning __down_read_trylock\n\t"
386 +- " movl %0,%1\n\t"
387 ++ " mov %0,%1\n\t"
388 + "1:\n\t"
389 +- " movl %1,%2\n\t"
390 +- " addl %3,%2\n\t"
391 ++ " mov %1,%2\n\t"
392 ++ " add %3,%2\n\t"
393 + " jle 2f\n\t"
394 +- LOCK_PREFIX " cmpxchgl %2,%0\n\t"
395 ++ LOCK_PREFIX " cmpxchg %2,%0\n\t"
396 + " jnz 1b\n\t"
397 + "2:\n\t"
398 + "# ending __down_read_trylock\n\t"
399 +@@ -143,13 +155,13 @@ static inline int __down_read_trylock(struct rw_semaphore *sem)
400 + */
401 + static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
402 + {
403 +- int tmp;
404 ++ rwsem_count_t tmp;
405 +
406 + tmp = RWSEM_ACTIVE_WRITE_BIAS;
407 + asm volatile("# beginning down_write\n\t"
408 +- LOCK_PREFIX " xadd %%edx,(%%eax)\n\t"
409 ++ LOCK_PREFIX " xadd %1,(%2)\n\t"
410 + /* subtract 0x0000ffff, returns the old value */
411 +- " testl %%edx,%%edx\n\t"
412 ++ " test %1,%1\n\t"
413 + /* was the count 0 before? */
414 + " jz 1f\n"
415 + " call call_rwsem_down_write_failed\n"
416 +@@ -170,9 +182,9 @@ static inline void __down_write(struct rw_semaphore *sem)
417 + */
418 + static inline int __down_write_trylock(struct rw_semaphore *sem)
419 + {
420 +- signed long ret = cmpxchg(&sem->count,
421 +- RWSEM_UNLOCKED_VALUE,
422 +- RWSEM_ACTIVE_WRITE_BIAS);
423 ++ rwsem_count_t ret = cmpxchg(&sem->count,
424 ++ RWSEM_UNLOCKED_VALUE,
425 ++ RWSEM_ACTIVE_WRITE_BIAS);
426 + if (ret == RWSEM_UNLOCKED_VALUE)
427 + return 1;
428 + return 0;
429 +@@ -183,9 +195,9 @@ static inline int __down_write_trylock(struct rw_semaphore *sem)
430 + */
431 + static inline void __up_read(struct rw_semaphore *sem)
432 + {
433 +- __s32 tmp = -RWSEM_ACTIVE_READ_BIAS;
434 ++ rwsem_count_t tmp = -RWSEM_ACTIVE_READ_BIAS;
435 + asm volatile("# beginning __up_read\n\t"
436 +- LOCK_PREFIX " xadd %%edx,(%%eax)\n\t"
437 ++ LOCK_PREFIX " xadd %1,(%2)\n\t"
438 + /* subtracts 1, returns the old value */
439 + " jns 1f\n\t"
440 + " call call_rwsem_wake\n"
441 +@@ -201,18 +213,18 @@ static inline void __up_read(struct rw_semaphore *sem)
442 + */
443 + static inline void __up_write(struct rw_semaphore *sem)
444 + {
445 ++ rwsem_count_t tmp;
446 + asm volatile("# beginning __up_write\n\t"
447 +- " movl %2,%%edx\n\t"
448 +- LOCK_PREFIX " xaddl %%edx,(%%eax)\n\t"
449 ++ LOCK_PREFIX " xadd %1,(%2)\n\t"
450 + /* tries to transition
451 + 0xffff0001 -> 0x00000000 */
452 + " jz 1f\n"
453 + " call call_rwsem_wake\n"
454 + "1:\n\t"
455 + "# ending __up_write\n"
456 +- : "+m" (sem->count)
457 +- : "a" (sem), "i" (-RWSEM_ACTIVE_WRITE_BIAS)
458 +- : "memory", "cc", "edx");
459 ++ : "+m" (sem->count), "=d" (tmp)
460 ++ : "a" (sem), "1" (-RWSEM_ACTIVE_WRITE_BIAS)
461 ++ : "memory", "cc");
462 + }
463 +
464 + /*
465 +@@ -221,33 +233,38 @@ static inline void __up_write(struct rw_semaphore *sem)
466 + static inline void __downgrade_write(struct rw_semaphore *sem)
467 + {
468 + asm volatile("# beginning __downgrade_write\n\t"
469 +- LOCK_PREFIX " addl %2,(%%eax)\n\t"
470 +- /* transitions 0xZZZZ0001 -> 0xYYYY0001 */
471 ++ LOCK_PREFIX _ASM_ADD "%2,(%1)\n\t"
472 ++ /*
473 ++ * transitions 0xZZZZ0001 -> 0xYYYY0001 (i386)
474 ++ * 0xZZZZZZZZ00000001 -> 0xYYYYYYYY00000001 (x86_64)
475 ++ */
476 + " jns 1f\n\t"
477 + " call call_rwsem_downgrade_wake\n"
478 + "1:\n\t"
479 + "# ending __downgrade_write\n"
480 + : "+m" (sem->count)
481 +- : "a" (sem), "i" (-RWSEM_WAITING_BIAS)
482 ++ : "a" (sem), "er" (-RWSEM_WAITING_BIAS)
483 + : "memory", "cc");
484 + }
485 +
486 + /*
487 + * implement atomic add functionality
488 + */
489 +-static inline void rwsem_atomic_add(int delta, struct rw_semaphore *sem)
490 ++static inline void rwsem_atomic_add(rwsem_count_t delta,
491 ++ struct rw_semaphore *sem)
492 + {
493 +- asm volatile(LOCK_PREFIX "addl %1,%0"
494 ++ asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0"
495 + : "+m" (sem->count)
496 +- : "ir" (delta));
497 ++ : "er" (delta));
498 + }
499 +
500 + /*
501 + * implement exchange and add functionality
502 + */
503 +-static inline int rwsem_atomic_update(int delta, struct rw_semaphore *sem)
504 ++static inline rwsem_count_t rwsem_atomic_update(rwsem_count_t delta,
505 ++ struct rw_semaphore *sem)
506 + {
507 +- int tmp = delta;
508 ++ rwsem_count_t tmp = delta;
509 +
510 + asm volatile(LOCK_PREFIX "xadd %0,%1"
511 + : "+r" (tmp), "+m" (sem->count)
512 +diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h
513 +index 1e79678..4cfc908 100644
514 +--- a/arch/x86/include/asm/smp.h
515 ++++ b/arch/x86/include/asm/smp.h
516 +@@ -135,6 +135,8 @@ int native_cpu_disable(void);
517 + void native_cpu_die(unsigned int cpu);
518 + void native_play_dead(void);
519 + void play_dead_common(void);
520 ++void wbinvd_on_cpu(int cpu);
521 ++int wbinvd_on_all_cpus(void);
522 +
523 + void native_send_call_func_ipi(const struct cpumask *mask);
524 + void native_send_call_func_single_ipi(int cpu);
525 +@@ -147,6 +149,13 @@ static inline int num_booting_cpus(void)
526 + {
527 + return cpumask_weight(cpu_callout_mask);
528 + }
529 ++#else /* !CONFIG_SMP */
530 ++#define wbinvd_on_cpu(cpu) wbinvd()
531 ++static inline int wbinvd_on_all_cpus(void)
532 ++{
533 ++ wbinvd();
534 ++ return 0;
535 ++}
536 + #endif /* CONFIG_SMP */
537 +
538 + extern unsigned disabled_cpus __cpuinitdata;
539 +diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c
540 +index adb0ba0..2e77516 100644
541 +--- a/arch/x86/kernel/amd_iommu.c
542 ++++ b/arch/x86/kernel/amd_iommu.c
543 +@@ -2298,7 +2298,7 @@ static void cleanup_domain(struct protection_domain *domain)
544 + list_for_each_entry_safe(dev_data, next, &domain->dev_list, list) {
545 + struct device *dev = dev_data->dev;
546 +
547 +- do_detach(dev);
548 ++ __detach_device(dev);
549 + atomic_set(&dev_data->bind, 0);
550 + }
551 +
552 +@@ -2379,9 +2379,7 @@ static void amd_iommu_domain_destroy(struct iommu_domain *dom)
553 +
554 + free_pagetable(domain);
555 +
556 +- domain_id_free(domain->id);
557 +-
558 +- kfree(domain);
559 ++ protection_domain_free(domain);
560 +
561 + dom->priv = NULL;
562 + }
563 +diff --git a/arch/x86/kernel/amd_iommu_init.c b/arch/x86/kernel/amd_iommu_init.c
564 +index 9dc91b4..883d619 100644
565 +--- a/arch/x86/kernel/amd_iommu_init.c
566 ++++ b/arch/x86/kernel/amd_iommu_init.c
567 +@@ -1288,6 +1288,8 @@ static int __init amd_iommu_init(void)
568 + if (ret)
569 + goto free;
570 +
571 ++ enable_iommus();
572 ++
573 + if (iommu_pass_through)
574 + ret = amd_iommu_init_passthrough();
575 + else
576 +@@ -1300,8 +1302,6 @@ static int __init amd_iommu_init(void)
577 +
578 + amd_iommu_init_notifier();
579 +
580 +- enable_iommus();
581 +-
582 + if (iommu_pass_through)
583 + goto out;
584 +
585 +@@ -1315,6 +1315,7 @@ out:
586 + return ret;
587 +
588 + free:
589 ++ disable_iommus();
590 +
591 + amd_iommu_uninit_devices();
592 +
593 +diff --git a/arch/x86/kernel/aperture_64.c b/arch/x86/kernel/aperture_64.c
594 +index f147a95..19f2c70 100644
595 +--- a/arch/x86/kernel/aperture_64.c
596 ++++ b/arch/x86/kernel/aperture_64.c
597 +@@ -394,6 +394,7 @@ void __init gart_iommu_hole_init(void)
598 + for (i = 0; i < ARRAY_SIZE(bus_dev_ranges); i++) {
599 + int bus;
600 + int dev_base, dev_limit;
601 ++ u32 ctl;
602 +
603 + bus = bus_dev_ranges[i].bus;
604 + dev_base = bus_dev_ranges[i].dev_base;
605 +@@ -407,7 +408,19 @@ void __init gart_iommu_hole_init(void)
606 + gart_iommu_aperture = 1;
607 + x86_init.iommu.iommu_init = gart_iommu_init;
608 +
609 +- aper_order = (read_pci_config(bus, slot, 3, AMD64_GARTAPERTURECTL) >> 1) & 7;
610 ++ ctl = read_pci_config(bus, slot, 3,
611 ++ AMD64_GARTAPERTURECTL);
612 ++
613 ++ /*
614 ++ * Before we do anything else disable the GART. It may
615 ++ * still be enabled if we boot into a crash-kernel here.
616 ++ * Reconfiguring the GART while it is enabled could have
617 ++ * unknown side-effects.
618 ++ */
619 ++ ctl &= ~GARTEN;
620 ++ write_pci_config(bus, slot, 3, AMD64_GARTAPERTURECTL, ctl);
621 ++
622 ++ aper_order = (ctl >> 1) & 7;
623 + aper_size = (32 * 1024 * 1024) << aper_order;
624 + aper_base = read_pci_config(bus, slot, 3, AMD64_GARTAPERTUREBASE) & 0x7fff;
625 + aper_base <<= 25;
626 +diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
627 +index dfca210..d4df517 100644
628 +--- a/arch/x86/kernel/apic/apic.c
629 ++++ b/arch/x86/kernel/apic/apic.c
630 +@@ -1640,8 +1640,10 @@ int __init APIC_init_uniprocessor(void)
631 + }
632 + #endif
633 +
634 ++#ifndef CONFIG_SMP
635 + enable_IR_x2apic();
636 + default_setup_apic_routing();
637 ++#endif
638 +
639 + verify_local_APIC();
640 + connect_bsp_APIC();
641 +diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c
642 +index fc6c8ef..d440123 100644
643 +--- a/arch/x86/kernel/cpu/intel_cacheinfo.c
644 ++++ b/arch/x86/kernel/cpu/intel_cacheinfo.c
645 +@@ -18,6 +18,7 @@
646 + #include <asm/processor.h>
647 + #include <linux/smp.h>
648 + #include <asm/k8.h>
649 ++#include <asm/smp.h>
650 +
651 + #define LVL_1_INST 1
652 + #define LVL_1_DATA 2
653 +@@ -150,7 +151,8 @@ struct _cpuid4_info {
654 + union _cpuid4_leaf_ebx ebx;
655 + union _cpuid4_leaf_ecx ecx;
656 + unsigned long size;
657 +- unsigned long can_disable;
658 ++ bool can_disable;
659 ++ unsigned int l3_indices;
660 + DECLARE_BITMAP(shared_cpu_map, NR_CPUS);
661 + };
662 +
663 +@@ -160,7 +162,8 @@ struct _cpuid4_info_regs {
664 + union _cpuid4_leaf_ebx ebx;
665 + union _cpuid4_leaf_ecx ecx;
666 + unsigned long size;
667 +- unsigned long can_disable;
668 ++ bool can_disable;
669 ++ unsigned int l3_indices;
670 + };
671 +
672 + unsigned short num_cache_leaves;
673 +@@ -290,6 +293,36 @@ amd_cpuid4(int leaf, union _cpuid4_leaf_eax *eax,
674 + (ebx->split.ways_of_associativity + 1) - 1;
675 + }
676 +
677 ++struct _cache_attr {
678 ++ struct attribute attr;
679 ++ ssize_t (*show)(struct _cpuid4_info *, char *);
680 ++ ssize_t (*store)(struct _cpuid4_info *, const char *, size_t count);
681 ++};
682 ++
683 ++#ifdef CONFIG_CPU_SUP_AMD
684 ++static unsigned int __cpuinit amd_calc_l3_indices(void)
685 ++{
686 ++ /*
687 ++ * We're called over smp_call_function_single() and therefore
688 ++ * are on the correct cpu.
689 ++ */
690 ++ int cpu = smp_processor_id();
691 ++ int node = cpu_to_node(cpu);
692 ++ struct pci_dev *dev = node_to_k8_nb_misc(node);
693 ++ unsigned int sc0, sc1, sc2, sc3;
694 ++ u32 val = 0;
695 ++
696 ++ pci_read_config_dword(dev, 0x1C4, &val);
697 ++
698 ++ /* calculate subcache sizes */
699 ++ sc0 = !(val & BIT(0));
700 ++ sc1 = !(val & BIT(4));
701 ++ sc2 = !(val & BIT(8)) + !(val & BIT(9));
702 ++ sc3 = !(val & BIT(12)) + !(val & BIT(13));
703 ++
704 ++ return (max(max(max(sc0, sc1), sc2), sc3) << 10) - 1;
705 ++}
706 ++
707 + static void __cpuinit
708 + amd_check_l3_disable(int index, struct _cpuid4_info_regs *this_leaf)
709 + {
710 +@@ -299,12 +332,103 @@ amd_check_l3_disable(int index, struct _cpuid4_info_regs *this_leaf)
711 + if (boot_cpu_data.x86 == 0x11)
712 + return;
713 +
714 +- /* see erratum #382 */
715 +- if ((boot_cpu_data.x86 == 0x10) && (boot_cpu_data.x86_model < 0x8))
716 ++ /* see errata #382 and #388 */
717 ++ if ((boot_cpu_data.x86 == 0x10) &&
718 ++ ((boot_cpu_data.x86_model < 0x8) ||
719 ++ (boot_cpu_data.x86_mask < 0x1)))
720 + return;
721 +
722 +- this_leaf->can_disable = 1;
723 ++ this_leaf->can_disable = true;
724 ++ this_leaf->l3_indices = amd_calc_l3_indices();
725 ++}
726 ++
727 ++static ssize_t show_cache_disable(struct _cpuid4_info *this_leaf, char *buf,
728 ++ unsigned int index)
729 ++{
730 ++ int cpu = cpumask_first(to_cpumask(this_leaf->shared_cpu_map));
731 ++ int node = amd_get_nb_id(cpu);
732 ++ struct pci_dev *dev = node_to_k8_nb_misc(node);
733 ++ unsigned int reg = 0;
734 ++
735 ++ if (!this_leaf->can_disable)
736 ++ return -EINVAL;
737 ++
738 ++ if (!dev)
739 ++ return -EINVAL;
740 ++
741 ++ pci_read_config_dword(dev, 0x1BC + index * 4, &reg);
742 ++ return sprintf(buf, "0x%08x\n", reg);
743 ++}
744 ++
745 ++#define SHOW_CACHE_DISABLE(index) \
746 ++static ssize_t \
747 ++show_cache_disable_##index(struct _cpuid4_info *this_leaf, char *buf) \
748 ++{ \
749 ++ return show_cache_disable(this_leaf, buf, index); \
750 ++}
751 ++SHOW_CACHE_DISABLE(0)
752 ++SHOW_CACHE_DISABLE(1)
753 ++
754 ++static ssize_t store_cache_disable(struct _cpuid4_info *this_leaf,
755 ++ const char *buf, size_t count, unsigned int index)
756 ++{
757 ++ int cpu = cpumask_first(to_cpumask(this_leaf->shared_cpu_map));
758 ++ int node = amd_get_nb_id(cpu);
759 ++ struct pci_dev *dev = node_to_k8_nb_misc(node);
760 ++ unsigned long val = 0;
761 ++
762 ++#define SUBCACHE_MASK (3UL << 20)
763 ++#define SUBCACHE_INDEX 0xfff
764 ++
765 ++ if (!this_leaf->can_disable)
766 ++ return -EINVAL;
767 ++
768 ++ if (!capable(CAP_SYS_ADMIN))
769 ++ return -EPERM;
770 ++
771 ++ if (!dev)
772 ++ return -EINVAL;
773 ++
774 ++ if (strict_strtoul(buf, 10, &val) < 0)
775 ++ return -EINVAL;
776 ++
777 ++ /* do not allow writes outside of allowed bits */
778 ++ if ((val & ~(SUBCACHE_MASK | SUBCACHE_INDEX)) ||
779 ++ ((val & SUBCACHE_INDEX) > this_leaf->l3_indices))
780 ++ return -EINVAL;
781 ++
782 ++ val |= BIT(30);
783 ++ pci_write_config_dword(dev, 0x1BC + index * 4, val);
784 ++ /*
785 ++ * We need to WBINVD on a core on the node containing the L3 cache which
786 ++ * indices we disable therefore a simple wbinvd() is not sufficient.
787 ++ */
788 ++ wbinvd_on_cpu(cpu);
789 ++ pci_write_config_dword(dev, 0x1BC + index * 4, val | BIT(31));
790 ++ return count;
791 ++}
792 ++
793 ++#define STORE_CACHE_DISABLE(index) \
794 ++static ssize_t \
795 ++store_cache_disable_##index(struct _cpuid4_info *this_leaf, \
796 ++ const char *buf, size_t count) \
797 ++{ \
798 ++ return store_cache_disable(this_leaf, buf, count, index); \
799 + }
800 ++STORE_CACHE_DISABLE(0)
801 ++STORE_CACHE_DISABLE(1)
802 ++
803 ++static struct _cache_attr cache_disable_0 = __ATTR(cache_disable_0, 0644,
804 ++ show_cache_disable_0, store_cache_disable_0);
805 ++static struct _cache_attr cache_disable_1 = __ATTR(cache_disable_1, 0644,
806 ++ show_cache_disable_1, store_cache_disable_1);
807 ++
808 ++#else /* CONFIG_CPU_SUP_AMD */
809 ++static void __cpuinit
810 ++amd_check_l3_disable(int index, struct _cpuid4_info_regs *this_leaf)
811 ++{
812 ++};
813 ++#endif /* CONFIG_CPU_SUP_AMD */
814 +
815 + static int
816 + __cpuinit cpuid4_cache_lookup_regs(int index,
817 +@@ -711,82 +835,6 @@ static ssize_t show_type(struct _cpuid4_info *this_leaf, char *buf)
818 + #define to_object(k) container_of(k, struct _index_kobject, kobj)
819 + #define to_attr(a) container_of(a, struct _cache_attr, attr)
820 +
821 +-static ssize_t show_cache_disable(struct _cpuid4_info *this_leaf, char *buf,
822 +- unsigned int index)
823 +-{
824 +- int cpu = cpumask_first(to_cpumask(this_leaf->shared_cpu_map));
825 +- int node = cpu_to_node(cpu);
826 +- struct pci_dev *dev = node_to_k8_nb_misc(node);
827 +- unsigned int reg = 0;
828 +-
829 +- if (!this_leaf->can_disable)
830 +- return -EINVAL;
831 +-
832 +- if (!dev)
833 +- return -EINVAL;
834 +-
835 +- pci_read_config_dword(dev, 0x1BC + index * 4, &reg);
836 +- return sprintf(buf, "%x\n", reg);
837 +-}
838 +-
839 +-#define SHOW_CACHE_DISABLE(index) \
840 +-static ssize_t \
841 +-show_cache_disable_##index(struct _cpuid4_info *this_leaf, char *buf) \
842 +-{ \
843 +- return show_cache_disable(this_leaf, buf, index); \
844 +-}
845 +-SHOW_CACHE_DISABLE(0)
846 +-SHOW_CACHE_DISABLE(1)
847 +-
848 +-static ssize_t store_cache_disable(struct _cpuid4_info *this_leaf,
849 +- const char *buf, size_t count, unsigned int index)
850 +-{
851 +- int cpu = cpumask_first(to_cpumask(this_leaf->shared_cpu_map));
852 +- int node = cpu_to_node(cpu);
853 +- struct pci_dev *dev = node_to_k8_nb_misc(node);
854 +- unsigned long val = 0;
855 +- unsigned int scrubber = 0;
856 +-
857 +- if (!this_leaf->can_disable)
858 +- return -EINVAL;
859 +-
860 +- if (!capable(CAP_SYS_ADMIN))
861 +- return -EPERM;
862 +-
863 +- if (!dev)
864 +- return -EINVAL;
865 +-
866 +- if (strict_strtoul(buf, 10, &val) < 0)
867 +- return -EINVAL;
868 +-
869 +- val |= 0xc0000000;
870 +-
871 +- pci_read_config_dword(dev, 0x58, &scrubber);
872 +- scrubber &= ~0x1f000000;
873 +- pci_write_config_dword(dev, 0x58, scrubber);
874 +-
875 +- pci_write_config_dword(dev, 0x1BC + index * 4, val & ~0x40000000);
876 +- wbinvd();
877 +- pci_write_config_dword(dev, 0x1BC + index * 4, val);
878 +- return count;
879 +-}
880 +-
881 +-#define STORE_CACHE_DISABLE(index) \
882 +-static ssize_t \
883 +-store_cache_disable_##index(struct _cpuid4_info *this_leaf, \
884 +- const char *buf, size_t count) \
885 +-{ \
886 +- return store_cache_disable(this_leaf, buf, count, index); \
887 +-}
888 +-STORE_CACHE_DISABLE(0)
889 +-STORE_CACHE_DISABLE(1)
890 +-
891 +-struct _cache_attr {
892 +- struct attribute attr;
893 +- ssize_t (*show)(struct _cpuid4_info *, char *);
894 +- ssize_t (*store)(struct _cpuid4_info *, const char *, size_t count);
895 +-};
896 +-
897 + #define define_one_ro(_name) \
898 + static struct _cache_attr _name = \
899 + __ATTR(_name, 0444, show_##_name, NULL)
900 +@@ -801,23 +849,28 @@ define_one_ro(size);
901 + define_one_ro(shared_cpu_map);
902 + define_one_ro(shared_cpu_list);
903 +
904 +-static struct _cache_attr cache_disable_0 = __ATTR(cache_disable_0, 0644,
905 +- show_cache_disable_0, store_cache_disable_0);
906 +-static struct _cache_attr cache_disable_1 = __ATTR(cache_disable_1, 0644,
907 +- show_cache_disable_1, store_cache_disable_1);
908 ++#define DEFAULT_SYSFS_CACHE_ATTRS \
909 ++ &type.attr, \
910 ++ &level.attr, \
911 ++ &coherency_line_size.attr, \
912 ++ &physical_line_partition.attr, \
913 ++ &ways_of_associativity.attr, \
914 ++ &number_of_sets.attr, \
915 ++ &size.attr, \
916 ++ &shared_cpu_map.attr, \
917 ++ &shared_cpu_list.attr
918 +
919 + static struct attribute *default_attrs[] = {
920 +- &type.attr,
921 +- &level.attr,
922 +- &coherency_line_size.attr,
923 +- &physical_line_partition.attr,
924 +- &ways_of_associativity.attr,
925 +- &number_of_sets.attr,
926 +- &size.attr,
927 +- &shared_cpu_map.attr,
928 +- &shared_cpu_list.attr,
929 ++ DEFAULT_SYSFS_CACHE_ATTRS,
930 ++ NULL
931 ++};
932 ++
933 ++static struct attribute *default_l3_attrs[] = {
934 ++ DEFAULT_SYSFS_CACHE_ATTRS,
935 ++#ifdef CONFIG_CPU_SUP_AMD
936 + &cache_disable_0.attr,
937 + &cache_disable_1.attr,
938 ++#endif
939 + NULL
940 + };
941 +
942 +@@ -908,6 +961,7 @@ static int __cpuinit cache_add_dev(struct sys_device * sys_dev)
943 + unsigned int cpu = sys_dev->id;
944 + unsigned long i, j;
945 + struct _index_kobject *this_object;
946 ++ struct _cpuid4_info *this_leaf;
947 + int retval;
948 +
949 + retval = cpuid4_cache_sysfs_init(cpu);
950 +@@ -926,6 +980,14 @@ static int __cpuinit cache_add_dev(struct sys_device * sys_dev)
951 + this_object = INDEX_KOBJECT_PTR(cpu, i);
952 + this_object->cpu = cpu;
953 + this_object->index = i;
954 ++
955 ++ this_leaf = CPUID4_INFO_IDX(cpu, i);
956 ++
957 ++ if (this_leaf->can_disable)
958 ++ ktype_cache.default_attrs = default_l3_attrs;
959 ++ else
960 ++ ktype_cache.default_attrs = default_attrs;
961 ++
962 + retval = kobject_init_and_add(&(this_object->kobj),
963 + &ktype_cache,
964 + per_cpu(ici_cache_kobject, cpu),
965 +diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
966 +index 98819b3..c7ca8e2 100644
967 +--- a/arch/x86/kernel/cpu/perf_event.c
968 ++++ b/arch/x86/kernel/cpu/perf_event.c
969 +@@ -245,6 +245,97 @@ static u64 __read_mostly hw_cache_event_ids
970 + [PERF_COUNT_HW_CACHE_OP_MAX]
971 + [PERF_COUNT_HW_CACHE_RESULT_MAX];
972 +
973 ++static const u64 westmere_hw_cache_event_ids
974 ++ [PERF_COUNT_HW_CACHE_MAX]
975 ++ [PERF_COUNT_HW_CACHE_OP_MAX]
976 ++ [PERF_COUNT_HW_CACHE_RESULT_MAX] =
977 ++{
978 ++ [ C(L1D) ] = {
979 ++ [ C(OP_READ) ] = {
980 ++ [ C(RESULT_ACCESS) ] = 0x010b, /* MEM_INST_RETIRED.LOADS */
981 ++ [ C(RESULT_MISS) ] = 0x0151, /* L1D.REPL */
982 ++ },
983 ++ [ C(OP_WRITE) ] = {
984 ++ [ C(RESULT_ACCESS) ] = 0x020b, /* MEM_INST_RETURED.STORES */
985 ++ [ C(RESULT_MISS) ] = 0x0251, /* L1D.M_REPL */
986 ++ },
987 ++ [ C(OP_PREFETCH) ] = {
988 ++ [ C(RESULT_ACCESS) ] = 0x014e, /* L1D_PREFETCH.REQUESTS */
989 ++ [ C(RESULT_MISS) ] = 0x024e, /* L1D_PREFETCH.MISS */
990 ++ },
991 ++ },
992 ++ [ C(L1I ) ] = {
993 ++ [ C(OP_READ) ] = {
994 ++ [ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS */
995 ++ [ C(RESULT_MISS) ] = 0x0280, /* L1I.MISSES */
996 ++ },
997 ++ [ C(OP_WRITE) ] = {
998 ++ [ C(RESULT_ACCESS) ] = -1,
999 ++ [ C(RESULT_MISS) ] = -1,
1000 ++ },
1001 ++ [ C(OP_PREFETCH) ] = {
1002 ++ [ C(RESULT_ACCESS) ] = 0x0,
1003 ++ [ C(RESULT_MISS) ] = 0x0,
1004 ++ },
1005 ++ },
1006 ++ [ C(LL ) ] = {
1007 ++ [ C(OP_READ) ] = {
1008 ++ [ C(RESULT_ACCESS) ] = 0x0324, /* L2_RQSTS.LOADS */
1009 ++ [ C(RESULT_MISS) ] = 0x0224, /* L2_RQSTS.LD_MISS */
1010 ++ },
1011 ++ [ C(OP_WRITE) ] = {
1012 ++ [ C(RESULT_ACCESS) ] = 0x0c24, /* L2_RQSTS.RFOS */
1013 ++ [ C(RESULT_MISS) ] = 0x0824, /* L2_RQSTS.RFO_MISS */
1014 ++ },
1015 ++ [ C(OP_PREFETCH) ] = {
1016 ++ [ C(RESULT_ACCESS) ] = 0x4f2e, /* LLC Reference */
1017 ++ [ C(RESULT_MISS) ] = 0x412e, /* LLC Misses */
1018 ++ },
1019 ++ },
1020 ++ [ C(DTLB) ] = {
1021 ++ [ C(OP_READ) ] = {
1022 ++ [ C(RESULT_ACCESS) ] = 0x010b, /* MEM_INST_RETIRED.LOADS */
1023 ++ [ C(RESULT_MISS) ] = 0x0108, /* DTLB_LOAD_MISSES.ANY */
1024 ++ },
1025 ++ [ C(OP_WRITE) ] = {
1026 ++ [ C(RESULT_ACCESS) ] = 0x020b, /* MEM_INST_RETURED.STORES */
1027 ++ [ C(RESULT_MISS) ] = 0x010c, /* MEM_STORE_RETIRED.DTLB_MISS */
1028 ++ },
1029 ++ [ C(OP_PREFETCH) ] = {
1030 ++ [ C(RESULT_ACCESS) ] = 0x0,
1031 ++ [ C(RESULT_MISS) ] = 0x0,
1032 ++ },
1033 ++ },
1034 ++ [ C(ITLB) ] = {
1035 ++ [ C(OP_READ) ] = {
1036 ++ [ C(RESULT_ACCESS) ] = 0x01c0, /* INST_RETIRED.ANY_P */
1037 ++ [ C(RESULT_MISS) ] = 0x0185, /* ITLB_MISSES.ANY */
1038 ++ },
1039 ++ [ C(OP_WRITE) ] = {
1040 ++ [ C(RESULT_ACCESS) ] = -1,
1041 ++ [ C(RESULT_MISS) ] = -1,
1042 ++ },
1043 ++ [ C(OP_PREFETCH) ] = {
1044 ++ [ C(RESULT_ACCESS) ] = -1,
1045 ++ [ C(RESULT_MISS) ] = -1,
1046 ++ },
1047 ++ },
1048 ++ [ C(BPU ) ] = {
1049 ++ [ C(OP_READ) ] = {
1050 ++ [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
1051 ++ [ C(RESULT_MISS) ] = 0x03e8, /* BPU_CLEARS.ANY */
1052 ++ },
1053 ++ [ C(OP_WRITE) ] = {
1054 ++ [ C(RESULT_ACCESS) ] = -1,
1055 ++ [ C(RESULT_MISS) ] = -1,
1056 ++ },
1057 ++ [ C(OP_PREFETCH) ] = {
1058 ++ [ C(RESULT_ACCESS) ] = -1,
1059 ++ [ C(RESULT_MISS) ] = -1,
1060 ++ },
1061 ++ },
1062 ++};
1063 ++
1064 + static __initconst u64 nehalem_hw_cache_event_ids
1065 + [PERF_COUNT_HW_CACHE_MAX]
1066 + [PERF_COUNT_HW_CACHE_OP_MAX]
1067 +@@ -2118,6 +2209,7 @@ static __init int intel_pmu_init(void)
1068 + * Install the hw-cache-events table:
1069 + */
1070 + switch (boot_cpu_data.x86_model) {
1071 ++
1072 + case 15: /* original 65 nm celeron/pentium/core2/xeon, "Merom"/"Conroe" */
1073 + case 22: /* single-core 65 nm celeron/core2solo "Merom-L"/"Conroe-L" */
1074 + case 23: /* current 45 nm celeron/core2/xeon "Penryn"/"Wolfdale" */
1075 +@@ -2129,7 +2221,9 @@ static __init int intel_pmu_init(void)
1076 + event_constraints = intel_core_event_constraints;
1077 + break;
1078 + default:
1079 +- case 26:
1080 ++ case 26: /* 45 nm nehalem, "Bloomfield" */
1081 ++ case 30: /* 45 nm nehalem, "Lynnfield" */
1082 ++ case 46: /* 45 nm nehalem-ex, "Beckton" */
1083 + memcpy(hw_cache_event_ids, nehalem_hw_cache_event_ids,
1084 + sizeof(hw_cache_event_ids));
1085 +
1086 +@@ -2142,6 +2236,14 @@ static __init int intel_pmu_init(void)
1087 +
1088 + pr_cont("Atom events, ");
1089 + break;
1090 ++
1091 ++ case 37: /* 32 nm nehalem, "Clarkdale" */
1092 ++ case 44: /* 32 nm nehalem, "Gulftown" */
1093 ++ memcpy(hw_cache_event_ids, westmere_hw_cache_event_ids,
1094 ++ sizeof(hw_cache_event_ids));
1095 ++
1096 ++ pr_cont("Westmere events, ");
1097 ++ break;
1098 + }
1099 + return 0;
1100 + }
1101 +diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c
1102 +index a4849c1..ebd4c51 100644
1103 +--- a/arch/x86/kernel/crash.c
1104 ++++ b/arch/x86/kernel/crash.c
1105 +@@ -27,7 +27,6 @@
1106 + #include <asm/cpu.h>
1107 + #include <asm/reboot.h>
1108 + #include <asm/virtext.h>
1109 +-#include <asm/x86_init.h>
1110 +
1111 + #if defined(CONFIG_SMP) && defined(CONFIG_X86_LOCAL_APIC)
1112 +
1113 +@@ -103,10 +102,5 @@ void native_machine_crash_shutdown(struct pt_regs *regs)
1114 + #ifdef CONFIG_HPET_TIMER
1115 + hpet_disable();
1116 + #endif
1117 +-
1118 +-#ifdef CONFIG_X86_64
1119 +- x86_platform.iommu_shutdown();
1120 +-#endif
1121 +-
1122 + crash_save_cpu(regs, safe_smp_processor_id());
1123 + }
1124 +diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c
1125 +index ad80a1c..773afc9 100644
1126 +--- a/arch/x86/kernel/hpet.c
1127 ++++ b/arch/x86/kernel/hpet.c
1128 +@@ -399,9 +399,15 @@ static int hpet_next_event(unsigned long delta,
1129 + * then we might have a real hardware problem. We can not do
1130 + * much about it here, but at least alert the user/admin with
1131 + * a prominent warning.
1132 ++ * An erratum on some chipsets (ICH9,..), results in comparator read
1133 ++ * immediately following a write returning old value. Workaround
1134 ++ * for this is to read this value second time, when first
1135 ++ * read returns old value.
1136 + */
1137 +- WARN_ONCE(hpet_readl(HPET_Tn_CMP(timer)) != cnt,
1138 ++ if (unlikely((u32)hpet_readl(HPET_Tn_CMP(timer)) != cnt)) {
1139 ++ WARN_ONCE(hpet_readl(HPET_Tn_CMP(timer)) != cnt,
1140 + KERN_WARNING "hpet: compare register read back failed.\n");
1141 ++ }
1142 +
1143 + return (s32)(hpet_readl(HPET_COUNTER) - cnt) >= 0 ? -ETIME : 0;
1144 + }
1145 +diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c
1146 +index bfba601..b2258ca 100644
1147 +--- a/arch/x86/kernel/kgdb.c
1148 ++++ b/arch/x86/kernel/kgdb.c
1149 +@@ -618,8 +618,8 @@ int kgdb_arch_init(void)
1150 + * portion of kgdb because this operation requires mutexs to
1151 + * complete.
1152 + */
1153 ++ hw_breakpoint_init(&attr);
1154 + attr.bp_addr = (unsigned long)kgdb_arch_init;
1155 +- attr.type = PERF_TYPE_BREAKPOINT;
1156 + attr.bp_len = HW_BREAKPOINT_LEN_1;
1157 + attr.bp_type = HW_BREAKPOINT_W;
1158 + attr.disabled = 1;
1159 +diff --git a/arch/x86/kernel/mpparse.c b/arch/x86/kernel/mpparse.c
1160 +index a2c1edd..e81030f 100644
1161 +--- a/arch/x86/kernel/mpparse.c
1162 ++++ b/arch/x86/kernel/mpparse.c
1163 +@@ -664,7 +664,7 @@ static void __init smp_reserve_memory(struct mpf_intel *mpf)
1164 + {
1165 + unsigned long size = get_mpc_size(mpf->physptr);
1166 +
1167 +- reserve_early(mpf->physptr, mpf->physptr+size, "MP-table mpc");
1168 ++ reserve_early_overlap_ok(mpf->physptr, mpf->physptr+size, "MP-table mpc");
1169 + }
1170 +
1171 + static int __init smp_scan_config(unsigned long base, unsigned long length)
1172 +@@ -693,7 +693,7 @@ static int __init smp_scan_config(unsigned long base, unsigned long length)
1173 + mpf, (u64)virt_to_phys(mpf));
1174 +
1175 + mem = virt_to_phys(mpf);
1176 +- reserve_early(mem, mem + sizeof(*mpf), "MP-table mpf");
1177 ++ reserve_early_overlap_ok(mem, mem + sizeof(*mpf), "MP-table mpf");
1178 + if (mpf->physptr)
1179 + smp_reserve_memory(mpf);
1180 +
1181 +diff --git a/arch/x86/kernel/pci-gart_64.c b/arch/x86/kernel/pci-gart_64.c
1182 +index 34de53b..4f41b29 100644
1183 +--- a/arch/x86/kernel/pci-gart_64.c
1184 ++++ b/arch/x86/kernel/pci-gart_64.c
1185 +@@ -564,6 +564,9 @@ static void enable_gart_translations(void)
1186 +
1187 + enable_gart_translation(dev, __pa(agp_gatt_table));
1188 + }
1189 ++
1190 ++ /* Flush the GART-TLB to remove stale entries */
1191 ++ k8_flush_garts();
1192 + }
1193 +
1194 + /*
1195 +diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
1196 +index 89a49fb..28c3d81 100644
1197 +--- a/arch/x86/kvm/mmu.c
1198 ++++ b/arch/x86/kvm/mmu.c
1199 +@@ -1502,8 +1502,8 @@ static int mmu_zap_unsync_children(struct kvm *kvm,
1200 + for_each_sp(pages, sp, parents, i) {
1201 + kvm_mmu_zap_page(kvm, sp);
1202 + mmu_pages_clear_parents(&parents);
1203 ++ zapped++;
1204 + }
1205 +- zapped += pages.nr;
1206 + kvm_mmu_pages_init(parent, &parents, &pages);
1207 + }
1208 +
1209 +@@ -1554,14 +1554,16 @@ void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages)
1210 + */
1211 +
1212 + if (used_pages > kvm_nr_mmu_pages) {
1213 +- while (used_pages > kvm_nr_mmu_pages) {
1214 ++ while (used_pages > kvm_nr_mmu_pages &&
1215 ++ !list_empty(&kvm->arch.active_mmu_pages)) {
1216 + struct kvm_mmu_page *page;
1217 +
1218 + page = container_of(kvm->arch.active_mmu_pages.prev,
1219 + struct kvm_mmu_page, link);
1220 +- kvm_mmu_zap_page(kvm, page);
1221 ++ used_pages -= kvm_mmu_zap_page(kvm, page);
1222 + used_pages--;
1223 + }
1224 ++ kvm_nr_mmu_pages = used_pages;
1225 + kvm->arch.n_free_mmu_pages = 0;
1226 + }
1227 + else
1228 +@@ -1608,7 +1610,8 @@ static void mmu_unshadow(struct kvm *kvm, gfn_t gfn)
1229 + && !sp->role.invalid) {
1230 + pgprintk("%s: zap %lx %x\n",
1231 + __func__, gfn, sp->role.word);
1232 +- kvm_mmu_zap_page(kvm, sp);
1233 ++ if (kvm_mmu_zap_page(kvm, sp))
1234 ++ nn = bucket->first;
1235 + }
1236 + }
1237 + }
1238 +diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
1239 +index 1d9b338..d42e191 100644
1240 +--- a/arch/x86/kvm/svm.c
1241 ++++ b/arch/x86/kvm/svm.c
1242 +@@ -698,29 +698,28 @@ static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id)
1243 + if (err)
1244 + goto free_svm;
1245 +
1246 ++ err = -ENOMEM;
1247 + page = alloc_page(GFP_KERNEL);
1248 +- if (!page) {
1249 +- err = -ENOMEM;
1250 ++ if (!page)
1251 + goto uninit;
1252 +- }
1253 +
1254 +- err = -ENOMEM;
1255 + msrpm_pages = alloc_pages(GFP_KERNEL, MSRPM_ALLOC_ORDER);
1256 + if (!msrpm_pages)
1257 +- goto uninit;
1258 ++ goto free_page1;
1259 +
1260 + nested_msrpm_pages = alloc_pages(GFP_KERNEL, MSRPM_ALLOC_ORDER);
1261 + if (!nested_msrpm_pages)
1262 +- goto uninit;
1263 +-
1264 +- svm->msrpm = page_address(msrpm_pages);
1265 +- svm_vcpu_init_msrpm(svm->msrpm);
1266 ++ goto free_page2;
1267 +
1268 + hsave_page = alloc_page(GFP_KERNEL);
1269 + if (!hsave_page)
1270 +- goto uninit;
1271 ++ goto free_page3;
1272 ++
1273 + svm->nested.hsave = page_address(hsave_page);
1274 +
1275 ++ svm->msrpm = page_address(msrpm_pages);
1276 ++ svm_vcpu_init_msrpm(svm->msrpm);
1277 ++
1278 + svm->nested.msrpm = page_address(nested_msrpm_pages);
1279 +
1280 + svm->vmcb = page_address(page);
1281 +@@ -737,6 +736,12 @@ static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id)
1282 +
1283 + return &svm->vcpu;
1284 +
1285 ++free_page3:
1286 ++ __free_pages(nested_msrpm_pages, MSRPM_ALLOC_ORDER);
1287 ++free_page2:
1288 ++ __free_pages(msrpm_pages, MSRPM_ALLOC_ORDER);
1289 ++free_page1:
1290 ++ __free_page(page);
1291 + uninit:
1292 + kvm_vcpu_uninit(&svm->vcpu);
1293 + free_svm:
1294 +diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
1295 +index 8a8e139..3acbe19 100644
1296 +--- a/arch/x86/kvm/vmx.c
1297 ++++ b/arch/x86/kvm/vmx.c
1298 +@@ -61,6 +61,8 @@ module_param_named(unrestricted_guest,
1299 + static int __read_mostly emulate_invalid_guest_state = 0;
1300 + module_param(emulate_invalid_guest_state, bool, S_IRUGO);
1301 +
1302 ++#define RMODE_GUEST_OWNED_EFLAGS_BITS (~(X86_EFLAGS_IOPL | X86_EFLAGS_VM))
1303 ++
1304 + /*
1305 + * These 2 parameters are used to config the controls for Pause-Loop Exiting:
1306 + * ple_gap: upper bound on the amount of time between two successive
1307 +@@ -115,7 +117,7 @@ struct vcpu_vmx {
1308 + } host_state;
1309 + struct {
1310 + int vm86_active;
1311 +- u8 save_iopl;
1312 ++ ulong save_rflags;
1313 + struct kvm_save_segment {
1314 + u16 selector;
1315 + unsigned long base;
1316 +@@ -787,18 +789,23 @@ static void vmx_fpu_deactivate(struct kvm_vcpu *vcpu)
1317 +
1318 + static unsigned long vmx_get_rflags(struct kvm_vcpu *vcpu)
1319 + {
1320 +- unsigned long rflags;
1321 ++ unsigned long rflags, save_rflags;
1322 +
1323 + rflags = vmcs_readl(GUEST_RFLAGS);
1324 +- if (to_vmx(vcpu)->rmode.vm86_active)
1325 +- rflags &= ~(unsigned long)(X86_EFLAGS_IOPL | X86_EFLAGS_VM);
1326 ++ if (to_vmx(vcpu)->rmode.vm86_active) {
1327 ++ rflags &= RMODE_GUEST_OWNED_EFLAGS_BITS;
1328 ++ save_rflags = to_vmx(vcpu)->rmode.save_rflags;
1329 ++ rflags |= save_rflags & ~RMODE_GUEST_OWNED_EFLAGS_BITS;
1330 ++ }
1331 + return rflags;
1332 + }
1333 +
1334 + static void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
1335 + {
1336 +- if (to_vmx(vcpu)->rmode.vm86_active)
1337 ++ if (to_vmx(vcpu)->rmode.vm86_active) {
1338 ++ to_vmx(vcpu)->rmode.save_rflags = rflags;
1339 + rflags |= X86_EFLAGS_IOPL | X86_EFLAGS_VM;
1340 ++ }
1341 + vmcs_writel(GUEST_RFLAGS, rflags);
1342 + }
1343 +
1344 +@@ -1431,8 +1438,8 @@ static void enter_pmode(struct kvm_vcpu *vcpu)
1345 + vmcs_write32(GUEST_TR_AR_BYTES, vmx->rmode.tr.ar);
1346 +
1347 + flags = vmcs_readl(GUEST_RFLAGS);
1348 +- flags &= ~(X86_EFLAGS_IOPL | X86_EFLAGS_VM);
1349 +- flags |= (vmx->rmode.save_iopl << IOPL_SHIFT);
1350 ++ flags &= RMODE_GUEST_OWNED_EFLAGS_BITS;
1351 ++ flags |= vmx->rmode.save_rflags & ~RMODE_GUEST_OWNED_EFLAGS_BITS;
1352 + vmcs_writel(GUEST_RFLAGS, flags);
1353 +
1354 + vmcs_writel(GUEST_CR4, (vmcs_readl(GUEST_CR4) & ~X86_CR4_VME) |
1355 +@@ -1501,8 +1508,7 @@ static void enter_rmode(struct kvm_vcpu *vcpu)
1356 + vmcs_write32(GUEST_TR_AR_BYTES, 0x008b);
1357 +
1358 + flags = vmcs_readl(GUEST_RFLAGS);
1359 +- vmx->rmode.save_iopl
1360 +- = (flags & X86_EFLAGS_IOPL) >> IOPL_SHIFT;
1361 ++ vmx->rmode.save_rflags = flags;
1362 +
1363 + flags |= X86_EFLAGS_IOPL | X86_EFLAGS_VM;
1364 +
1365 +@@ -2719,6 +2725,12 @@ static int handle_rmode_exception(struct kvm_vcpu *vcpu,
1366 + kvm_queue_exception(vcpu, vec);
1367 + return 1;
1368 + case BP_VECTOR:
1369 ++ /*
1370 ++ * Update instruction length as we may reinject the exception
1371 ++ * from user space while in guest debugging mode.
1372 ++ */
1373 ++ to_vmx(vcpu)->vcpu.arch.event_exit_inst_len =
1374 ++ vmcs_read32(VM_EXIT_INSTRUCTION_LEN);
1375 + if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP)
1376 + return 0;
1377 + /* fall through */
1378 +@@ -2841,6 +2853,13 @@ static int handle_exception(struct kvm_vcpu *vcpu)
1379 + kvm_run->debug.arch.dr7 = vmcs_readl(GUEST_DR7);
1380 + /* fall through */
1381 + case BP_VECTOR:
1382 ++ /*
1383 ++ * Update instruction length as we may reinject #BP from
1384 ++ * user space while in guest debugging mode. Reading it for
1385 ++ * #DB as well causes no harm, it is not used in that case.
1386 ++ */
1387 ++ vmx->vcpu.arch.event_exit_inst_len =
1388 ++ vmcs_read32(VM_EXIT_INSTRUCTION_LEN);
1389 + kvm_run->exit_reason = KVM_EXIT_DEBUG;
1390 + kvm_run->debug.arch.pc = vmcs_readl(GUEST_CS_BASE) + rip;
1391 + kvm_run->debug.arch.exception = ex_no;
1392 +diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
1393 +index e900908..dd78927 100644
1394 +--- a/arch/x86/kvm/x86.c
1395 ++++ b/arch/x86/kvm/x86.c
1396 +@@ -384,21 +384,16 @@ out:
1397 + void kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
1398 + {
1399 + if (cr0 & CR0_RESERVED_BITS) {
1400 +- printk(KERN_DEBUG "set_cr0: 0x%lx #GP, reserved bits 0x%lx\n",
1401 +- cr0, vcpu->arch.cr0);
1402 + kvm_inject_gp(vcpu, 0);
1403 + return;
1404 + }
1405 +
1406 + if ((cr0 & X86_CR0_NW) && !(cr0 & X86_CR0_CD)) {
1407 +- printk(KERN_DEBUG "set_cr0: #GP, CD == 0 && NW == 1\n");
1408 + kvm_inject_gp(vcpu, 0);
1409 + return;
1410 + }
1411 +
1412 + if ((cr0 & X86_CR0_PG) && !(cr0 & X86_CR0_PE)) {
1413 +- printk(KERN_DEBUG "set_cr0: #GP, set PG flag "
1414 +- "and a clear PE flag\n");
1415 + kvm_inject_gp(vcpu, 0);
1416 + return;
1417 + }
1418 +@@ -409,15 +404,11 @@ void kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
1419 + int cs_db, cs_l;
1420 +
1421 + if (!is_pae(vcpu)) {
1422 +- printk(KERN_DEBUG "set_cr0: #GP, start paging "
1423 +- "in long mode while PAE is disabled\n");
1424 + kvm_inject_gp(vcpu, 0);
1425 + return;
1426 + }
1427 + kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
1428 + if (cs_l) {
1429 +- printk(KERN_DEBUG "set_cr0: #GP, start paging "
1430 +- "in long mode while CS.L == 1\n");
1431 + kvm_inject_gp(vcpu, 0);
1432 + return;
1433 +
1434 +@@ -425,8 +416,6 @@ void kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
1435 + } else
1436 + #endif
1437 + if (is_pae(vcpu) && !load_pdptrs(vcpu, vcpu->arch.cr3)) {
1438 +- printk(KERN_DEBUG "set_cr0: #GP, pdptrs "
1439 +- "reserved bits\n");
1440 + kvm_inject_gp(vcpu, 0);
1441 + return;
1442 + }
1443 +@@ -453,28 +442,23 @@ void kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
1444 + unsigned long pdptr_bits = X86_CR4_PGE | X86_CR4_PSE | X86_CR4_PAE;
1445 +
1446 + if (cr4 & CR4_RESERVED_BITS) {
1447 +- printk(KERN_DEBUG "set_cr4: #GP, reserved bits\n");
1448 + kvm_inject_gp(vcpu, 0);
1449 + return;
1450 + }
1451 +
1452 + if (is_long_mode(vcpu)) {
1453 + if (!(cr4 & X86_CR4_PAE)) {
1454 +- printk(KERN_DEBUG "set_cr4: #GP, clearing PAE while "
1455 +- "in long mode\n");
1456 + kvm_inject_gp(vcpu, 0);
1457 + return;
1458 + }
1459 + } else if (is_paging(vcpu) && (cr4 & X86_CR4_PAE)
1460 + && ((cr4 ^ old_cr4) & pdptr_bits)
1461 + && !load_pdptrs(vcpu, vcpu->arch.cr3)) {
1462 +- printk(KERN_DEBUG "set_cr4: #GP, pdptrs reserved bits\n");
1463 + kvm_inject_gp(vcpu, 0);
1464 + return;
1465 + }
1466 +
1467 + if (cr4 & X86_CR4_VMXE) {
1468 +- printk(KERN_DEBUG "set_cr4: #GP, setting VMXE\n");
1469 + kvm_inject_gp(vcpu, 0);
1470 + return;
1471 + }
1472 +@@ -495,21 +479,16 @@ void kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
1473 +
1474 + if (is_long_mode(vcpu)) {
1475 + if (cr3 & CR3_L_MODE_RESERVED_BITS) {
1476 +- printk(KERN_DEBUG "set_cr3: #GP, reserved bits\n");
1477 + kvm_inject_gp(vcpu, 0);
1478 + return;
1479 + }
1480 + } else {
1481 + if (is_pae(vcpu)) {
1482 + if (cr3 & CR3_PAE_RESERVED_BITS) {
1483 +- printk(KERN_DEBUG
1484 +- "set_cr3: #GP, reserved bits\n");
1485 + kvm_inject_gp(vcpu, 0);
1486 + return;
1487 + }
1488 + if (is_paging(vcpu) && !load_pdptrs(vcpu, cr3)) {
1489 +- printk(KERN_DEBUG "set_cr3: #GP, pdptrs "
1490 +- "reserved bits\n");
1491 + kvm_inject_gp(vcpu, 0);
1492 + return;
1493 + }
1494 +@@ -541,7 +520,6 @@ EXPORT_SYMBOL_GPL(kvm_set_cr3);
1495 + void kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8)
1496 + {
1497 + if (cr8 & CR8_RESERVED_BITS) {
1498 +- printk(KERN_DEBUG "set_cr8: #GP, reserved bits 0x%lx\n", cr8);
1499 + kvm_inject_gp(vcpu, 0);
1500 + return;
1501 + }
1502 +@@ -595,15 +573,12 @@ static u32 emulated_msrs[] = {
1503 + static void set_efer(struct kvm_vcpu *vcpu, u64 efer)
1504 + {
1505 + if (efer & efer_reserved_bits) {
1506 +- printk(KERN_DEBUG "set_efer: 0x%llx #GP, reserved bits\n",
1507 +- efer);
1508 + kvm_inject_gp(vcpu, 0);
1509 + return;
1510 + }
1511 +
1512 + if (is_paging(vcpu)
1513 + && (vcpu->arch.shadow_efer & EFER_LME) != (efer & EFER_LME)) {
1514 +- printk(KERN_DEBUG "set_efer: #GP, change LME while paging\n");
1515 + kvm_inject_gp(vcpu, 0);
1516 + return;
1517 + }
1518 +@@ -613,7 +588,6 @@ static void set_efer(struct kvm_vcpu *vcpu, u64 efer)
1519 +
1520 + feat = kvm_find_cpuid_entry(vcpu, 0x80000001, 0);
1521 + if (!feat || !(feat->edx & bit(X86_FEATURE_FXSR_OPT))) {
1522 +- printk(KERN_DEBUG "set_efer: #GP, enable FFXSR w/o CPUID capability\n");
1523 + kvm_inject_gp(vcpu, 0);
1524 + return;
1525 + }
1526 +@@ -624,7 +598,6 @@ static void set_efer(struct kvm_vcpu *vcpu, u64 efer)
1527 +
1528 + feat = kvm_find_cpuid_entry(vcpu, 0x80000001, 0);
1529 + if (!feat || !(feat->ecx & bit(X86_FEATURE_SVM))) {
1530 +- printk(KERN_DEBUG "set_efer: #GP, enable SVM w/o SVM\n");
1531 + kvm_inject_gp(vcpu, 0);
1532 + return;
1533 + }
1534 +@@ -913,9 +886,13 @@ static int set_msr_mce(struct kvm_vcpu *vcpu, u32 msr, u64 data)
1535 + if (msr >= MSR_IA32_MC0_CTL &&
1536 + msr < MSR_IA32_MC0_CTL + 4 * bank_num) {
1537 + u32 offset = msr - MSR_IA32_MC0_CTL;
1538 +- /* only 0 or all 1s can be written to IA32_MCi_CTL */
1539 ++ /* only 0 or all 1s can be written to IA32_MCi_CTL
1540 ++ * some Linux kernels though clear bit 10 in bank 4 to
1541 ++ * workaround a BIOS/GART TBL issue on AMD K8s, ignore
1542 ++ * this to avoid an uncatched #GP in the guest
1543 ++ */
1544 + if ((offset & 0x3) == 0 &&
1545 +- data != 0 && data != ~(u64)0)
1546 ++ data != 0 && (data | (1 << 10)) != ~(u64)0)
1547 + return -1;
1548 + vcpu->arch.mce_banks[offset] = data;
1549 + break;
1550 +@@ -2366,7 +2343,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
1551 + struct kvm_dirty_log *log)
1552 + {
1553 + int r;
1554 +- int n;
1555 ++ unsigned long n;
1556 + struct kvm_memory_slot *memslot;
1557 + int is_dirty = 0;
1558 +
1559 +@@ -2382,7 +2359,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
1560 + kvm_mmu_slot_remove_write_access(kvm, log->slot);
1561 + spin_unlock(&kvm->mmu_lock);
1562 + memslot = &kvm->memslots[log->slot];
1563 +- n = ALIGN(memslot->npages, BITS_PER_LONG) / 8;
1564 ++ n = kvm_dirty_bitmap_bytes(memslot);
1565 + memset(memslot->dirty_bitmap, 0, n);
1566 + }
1567 + r = 0;
1568 +@@ -4599,6 +4576,7 @@ int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason)
1569 + int ret = 0;
1570 + u32 old_tss_base = get_segment_base(vcpu, VCPU_SREG_TR);
1571 + u16 old_tss_sel = get_segment_selector(vcpu, VCPU_SREG_TR);
1572 ++ u32 desc_limit;
1573 +
1574 + old_tss_base = vcpu->arch.mmu.gva_to_gpa(vcpu, old_tss_base);
1575 +
1576 +@@ -4621,7 +4599,10 @@ int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason)
1577 + }
1578 + }
1579 +
1580 +- if (!nseg_desc.p || get_desc_limit(&nseg_desc) < 0x67) {
1581 ++ desc_limit = get_desc_limit(&nseg_desc);
1582 ++ if (!nseg_desc.p ||
1583 ++ ((desc_limit < 0x67 && (nseg_desc.type & 8)) ||
1584 ++ desc_limit < 0x2b)) {
1585 + kvm_queue_exception_e(vcpu, TS_VECTOR, tss_selector & 0xfffc);
1586 + return 1;
1587 + }
1588 +diff --git a/arch/x86/lib/Makefile b/arch/x86/lib/Makefile
1589 +index cffd754..ddef409 100644
1590 +--- a/arch/x86/lib/Makefile
1591 ++++ b/arch/x86/lib/Makefile
1592 +@@ -14,7 +14,7 @@ $(obj)/inat.o: $(obj)/inat-tables.c
1593 +
1594 + clean-files := inat-tables.c
1595 +
1596 +-obj-$(CONFIG_SMP) += msr-smp.o
1597 ++obj-$(CONFIG_SMP) += msr-smp.o cache-smp.o
1598 +
1599 + lib-y := delay.o
1600 + lib-y += thunk_$(BITS).o
1601 +@@ -39,4 +39,5 @@ else
1602 + lib-y += thunk_64.o clear_page_64.o copy_page_64.o
1603 + lib-y += memmove_64.o memset_64.o
1604 + lib-y += copy_user_64.o rwlock_64.o copy_user_nocache_64.o
1605 ++ lib-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem_64.o
1606 + endif
1607 +diff --git a/arch/x86/lib/cache-smp.c b/arch/x86/lib/cache-smp.c
1608 +new file mode 100644
1609 +index 0000000..a3c6688
1610 +--- /dev/null
1611 ++++ b/arch/x86/lib/cache-smp.c
1612 +@@ -0,0 +1,19 @@
1613 ++#include <linux/smp.h>
1614 ++#include <linux/module.h>
1615 ++
1616 ++static void __wbinvd(void *dummy)
1617 ++{
1618 ++ wbinvd();
1619 ++}
1620 ++
1621 ++void wbinvd_on_cpu(int cpu)
1622 ++{
1623 ++ smp_call_function_single(cpu, __wbinvd, NULL, 1);
1624 ++}
1625 ++EXPORT_SYMBOL(wbinvd_on_cpu);
1626 ++
1627 ++int wbinvd_on_all_cpus(void)
1628 ++{
1629 ++ return on_each_cpu(__wbinvd, NULL, 1);
1630 ++}
1631 ++EXPORT_SYMBOL(wbinvd_on_all_cpus);
1632 +diff --git a/arch/x86/lib/rwsem_64.S b/arch/x86/lib/rwsem_64.S
1633 +new file mode 100644
1634 +index 0000000..15acecf
1635 +--- /dev/null
1636 ++++ b/arch/x86/lib/rwsem_64.S
1637 +@@ -0,0 +1,81 @@
1638 ++/*
1639 ++ * x86-64 rwsem wrappers
1640 ++ *
1641 ++ * This interfaces the inline asm code to the slow-path
1642 ++ * C routines. We need to save the call-clobbered regs
1643 ++ * that the asm does not mark as clobbered, and move the
1644 ++ * argument from %rax to %rdi.
1645 ++ *
1646 ++ * NOTE! We don't need to save %rax, because the functions
1647 ++ * will always return the semaphore pointer in %rax (which
1648 ++ * is also the input argument to these helpers)
1649 ++ *
1650 ++ * The following can clobber %rdx because the asm clobbers it:
1651 ++ * call_rwsem_down_write_failed
1652 ++ * call_rwsem_wake
1653 ++ * but %rdi, %rsi, %rcx, %r8-r11 always need saving.
1654 ++ */
1655 ++
1656 ++#include <linux/linkage.h>
1657 ++#include <asm/rwlock.h>
1658 ++#include <asm/alternative-asm.h>
1659 ++#include <asm/frame.h>
1660 ++#include <asm/dwarf2.h>
1661 ++
1662 ++#define save_common_regs \
1663 ++ pushq %rdi; \
1664 ++ pushq %rsi; \
1665 ++ pushq %rcx; \
1666 ++ pushq %r8; \
1667 ++ pushq %r9; \
1668 ++ pushq %r10; \
1669 ++ pushq %r11
1670 ++
1671 ++#define restore_common_regs \
1672 ++ popq %r11; \
1673 ++ popq %r10; \
1674 ++ popq %r9; \
1675 ++ popq %r8; \
1676 ++ popq %rcx; \
1677 ++ popq %rsi; \
1678 ++ popq %rdi
1679 ++
1680 ++/* Fix up special calling conventions */
1681 ++ENTRY(call_rwsem_down_read_failed)
1682 ++ save_common_regs
1683 ++ pushq %rdx
1684 ++ movq %rax,%rdi
1685 ++ call rwsem_down_read_failed
1686 ++ popq %rdx
1687 ++ restore_common_regs
1688 ++ ret
1689 ++ ENDPROC(call_rwsem_down_read_failed)
1690 ++
1691 ++ENTRY(call_rwsem_down_write_failed)
1692 ++ save_common_regs
1693 ++ movq %rax,%rdi
1694 ++ call rwsem_down_write_failed
1695 ++ restore_common_regs
1696 ++ ret
1697 ++ ENDPROC(call_rwsem_down_write_failed)
1698 ++
1699 ++ENTRY(call_rwsem_wake)
1700 ++ decw %dx /* do nothing if still outstanding active readers */
1701 ++ jnz 1f
1702 ++ save_common_regs
1703 ++ movq %rax,%rdi
1704 ++ call rwsem_wake
1705 ++ restore_common_regs
1706 ++1: ret
1707 ++ ENDPROC(call_rwsem_wake)
1708 ++
1709 ++/* Fix up special calling conventions */
1710 ++ENTRY(call_rwsem_downgrade_wake)
1711 ++ save_common_regs
1712 ++ pushq %rdx
1713 ++ movq %rax,%rdi
1714 ++ call rwsem_downgrade_wake
1715 ++ popq %rdx
1716 ++ restore_common_regs
1717 ++ ret
1718 ++ ENDPROC(call_rwsem_downgrade_wake)
1719 +diff --git a/arch/x86/pci/irq.c b/arch/x86/pci/irq.c
1720 +index 0696d50..b02f6d8 100644
1721 +--- a/arch/x86/pci/irq.c
1722 ++++ b/arch/x86/pci/irq.c
1723 +@@ -590,6 +590,8 @@ static __init int intel_router_probe(struct irq_router *r, struct pci_dev *route
1724 + case PCI_DEVICE_ID_INTEL_ICH10_1:
1725 + case PCI_DEVICE_ID_INTEL_ICH10_2:
1726 + case PCI_DEVICE_ID_INTEL_ICH10_3:
1727 ++ case PCI_DEVICE_ID_INTEL_CPT_LPC1:
1728 ++ case PCI_DEVICE_ID_INTEL_CPT_LPC2:
1729 + r->name = "PIIX/ICH";
1730 + r->get = pirq_piix_get;
1731 + r->set = pirq_piix_set;
1732 +diff --git a/arch/x86/power/hibernate_asm_32.S b/arch/x86/power/hibernate_asm_32.S
1733 +index b641388..ad47dae 100644
1734 +--- a/arch/x86/power/hibernate_asm_32.S
1735 ++++ b/arch/x86/power/hibernate_asm_32.S
1736 +@@ -27,10 +27,17 @@ ENTRY(swsusp_arch_suspend)
1737 + ret
1738 +
1739 + ENTRY(restore_image)
1740 ++ movl mmu_cr4_features, %ecx
1741 + movl resume_pg_dir, %eax
1742 + subl $__PAGE_OFFSET, %eax
1743 + movl %eax, %cr3
1744 +
1745 ++ jecxz 1f # cr4 Pentium and higher, skip if zero
1746 ++ andl $~(X86_CR4_PGE), %ecx
1747 ++ movl %ecx, %cr4; # turn off PGE
1748 ++ movl %cr3, %eax; # flush TLB
1749 ++ movl %eax, %cr3
1750 ++1:
1751 + movl restore_pblist, %edx
1752 + .p2align 4,,7
1753 +
1754 +@@ -54,16 +61,8 @@ done:
1755 + movl $swapper_pg_dir, %eax
1756 + subl $__PAGE_OFFSET, %eax
1757 + movl %eax, %cr3
1758 +- /* Flush TLB, including "global" things (vmalloc) */
1759 + movl mmu_cr4_features, %ecx
1760 + jecxz 1f # cr4 Pentium and higher, skip if zero
1761 +- movl %ecx, %edx
1762 +- andl $~(X86_CR4_PGE), %edx
1763 +- movl %edx, %cr4; # turn off PGE
1764 +-1:
1765 +- movl %cr3, %eax; # flush TLB
1766 +- movl %eax, %cr3
1767 +- jecxz 1f # cr4 Pentium and higher, skip if zero
1768 + movl %ecx, %cr4; # turn PGE back on
1769 + 1:
1770 +
1771 +diff --git a/drivers/acpi/acpica/exprep.c b/drivers/acpi/acpica/exprep.c
1772 +index 52fec07..83b6252 100644
1773 +--- a/drivers/acpi/acpica/exprep.c
1774 ++++ b/drivers/acpi/acpica/exprep.c
1775 +@@ -468,6 +468,23 @@ acpi_status acpi_ex_prep_field_value(struct acpi_create_field_info *info)
1776 +
1777 + acpi_ut_add_reference(obj_desc->field.region_obj);
1778 +
1779 ++ /* allow full data read from EC address space */
1780 ++ if (obj_desc->field.region_obj->region.space_id ==
1781 ++ ACPI_ADR_SPACE_EC) {
1782 ++ if (obj_desc->common_field.bit_length > 8) {
1783 ++ unsigned width =
1784 ++ ACPI_ROUND_BITS_UP_TO_BYTES(
1785 ++ obj_desc->common_field.bit_length);
1786 ++ // access_bit_width is u8, don't overflow it
1787 ++ if (width > 8)
1788 ++ width = 8;
1789 ++ obj_desc->common_field.access_byte_width =
1790 ++ width;
1791 ++ obj_desc->common_field.access_bit_width =
1792 ++ 8 * width;
1793 ++ }
1794 ++ }
1795 ++
1796 + ACPI_DEBUG_PRINT((ACPI_DB_BFIELD,
1797 + "RegionField: BitOff %X, Off %X, Gran %X, Region %p\n",
1798 + obj_desc->field.start_field_bit_offset,
1799 +diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
1800 +index d6471bb..fc67d11 100644
1801 +--- a/drivers/acpi/ec.c
1802 ++++ b/drivers/acpi/ec.c
1803 +@@ -589,12 +589,12 @@ static u32 acpi_ec_gpe_handler(void *data)
1804 +
1805 + static acpi_status
1806 + acpi_ec_space_handler(u32 function, acpi_physical_address address,
1807 +- u32 bits, acpi_integer *value,
1808 ++ u32 bits, acpi_integer *value64,
1809 + void *handler_context, void *region_context)
1810 + {
1811 + struct acpi_ec *ec = handler_context;
1812 +- int result = 0, i;
1813 +- u8 temp = 0;
1814 ++ int result = 0, i, bytes = bits / 8;
1815 ++ u8 *value = (u8 *)value64;
1816 +
1817 + if ((address > 0xFF) || !value || !handler_context)
1818 + return AE_BAD_PARAMETER;
1819 +@@ -602,32 +602,15 @@ acpi_ec_space_handler(u32 function, acpi_physical_address address,
1820 + if (function != ACPI_READ && function != ACPI_WRITE)
1821 + return AE_BAD_PARAMETER;
1822 +
1823 +- if (bits != 8 && acpi_strict)
1824 +- return AE_BAD_PARAMETER;
1825 +-
1826 +- if (EC_FLAGS_MSI)
1827 ++ if (EC_FLAGS_MSI || bits > 8)
1828 + acpi_ec_burst_enable(ec);
1829 +
1830 +- if (function == ACPI_READ) {
1831 +- result = acpi_ec_read(ec, address, &temp);
1832 +- *value = temp;
1833 +- } else {
1834 +- temp = 0xff & (*value);
1835 +- result = acpi_ec_write(ec, address, temp);
1836 +- }
1837 +-
1838 +- for (i = 8; unlikely(bits - i > 0); i += 8) {
1839 +- ++address;
1840 +- if (function == ACPI_READ) {
1841 +- result = acpi_ec_read(ec, address, &temp);
1842 +- (*value) |= ((acpi_integer)temp) << i;
1843 +- } else {
1844 +- temp = 0xff & ((*value) >> i);
1845 +- result = acpi_ec_write(ec, address, temp);
1846 +- }
1847 +- }
1848 ++ for (i = 0; i < bytes; ++i, ++address, ++value)
1849 ++ result = (function == ACPI_READ) ?
1850 ++ acpi_ec_read(ec, address, value) :
1851 ++ acpi_ec_write(ec, address, *value);
1852 +
1853 +- if (EC_FLAGS_MSI)
1854 ++ if (EC_FLAGS_MSI || bits > 8)
1855 + acpi_ec_burst_disable(ec);
1856 +
1857 + switch (result) {
1858 +diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
1859 +index 9e2feb6..462200d 100644
1860 +--- a/drivers/ata/ahci.c
1861 ++++ b/drivers/ata/ahci.c
1862 +@@ -570,6 +570,12 @@ static const struct pci_device_id ahci_pci_tbl[] = {
1863 + { PCI_VDEVICE(INTEL, 0x3b2b), board_ahci }, /* PCH RAID */
1864 + { PCI_VDEVICE(INTEL, 0x3b2c), board_ahci }, /* PCH RAID */
1865 + { PCI_VDEVICE(INTEL, 0x3b2f), board_ahci }, /* PCH AHCI */
1866 ++ { PCI_VDEVICE(INTEL, 0x1c02), board_ahci }, /* CPT AHCI */
1867 ++ { PCI_VDEVICE(INTEL, 0x1c03), board_ahci }, /* CPT AHCI */
1868 ++ { PCI_VDEVICE(INTEL, 0x1c04), board_ahci }, /* CPT RAID */
1869 ++ { PCI_VDEVICE(INTEL, 0x1c05), board_ahci }, /* CPT RAID */
1870 ++ { PCI_VDEVICE(INTEL, 0x1c06), board_ahci }, /* CPT RAID */
1871 ++ { PCI_VDEVICE(INTEL, 0x1c07), board_ahci }, /* CPT RAID */
1872 +
1873 + /* JMicron 360/1/3/5/6, match class to avoid IDE function */
1874 + { PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
1875 +diff --git a/drivers/ata/ata_piix.c b/drivers/ata/ata_piix.c
1876 +index 6f3f225..b5f614b 100644
1877 +--- a/drivers/ata/ata_piix.c
1878 ++++ b/drivers/ata/ata_piix.c
1879 +@@ -291,6 +291,14 @@ static const struct pci_device_id piix_pci_tbl[] = {
1880 + { 0x8086, 0x3b2d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
1881 + /* SATA Controller IDE (PCH) */
1882 + { 0x8086, 0x3b2e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata },
1883 ++ /* SATA Controller IDE (CPT) */
1884 ++ { 0x8086, 0x1c00, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata },
1885 ++ /* SATA Controller IDE (CPT) */
1886 ++ { 0x8086, 0x1c01, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata },
1887 ++ /* SATA Controller IDE (CPT) */
1888 ++ { 0x8086, 0x1c08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
1889 ++ /* SATA Controller IDE (CPT) */
1890 ++ { 0x8086, 0x1c09, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
1891 + { } /* terminate list */
1892 + };
1893 +
1894 +diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
1895 +index 6728328..2401c9c 100644
1896 +--- a/drivers/ata/libata-core.c
1897 ++++ b/drivers/ata/libata-core.c
1898 +@@ -4348,6 +4348,9 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
1899 + { "HTS541080G9SA00", "MB4OC60D", ATA_HORKAGE_NONCQ, },
1900 + { "HTS541010G9SA00", "MBZOC60D", ATA_HORKAGE_NONCQ, },
1901 +
1902 ++ /* https://bugzilla.kernel.org/show_bug.cgi?id=15573 */
1903 ++ { "C300-CTFDDAC128MAG", "0001", ATA_HORKAGE_NONCQ, },
1904 ++
1905 + /* devices which puke on READ_NATIVE_MAX */
1906 + { "HDS724040KLSA80", "KFAOA20N", ATA_HORKAGE_BROKEN_HPA, },
1907 + { "WDC WD3200JD-00KLB0", "WD-WCAMR1130137", ATA_HORKAGE_BROKEN_HPA },
1908 +diff --git a/drivers/ata/pata_via.c b/drivers/ata/pata_via.c
1909 +index be7c395..ad64750 100644
1910 +--- a/drivers/ata/pata_via.c
1911 ++++ b/drivers/ata/pata_via.c
1912 +@@ -697,6 +697,7 @@ static const struct pci_device_id via[] = {
1913 + { PCI_VDEVICE(VIA, 0x3164), },
1914 + { PCI_VDEVICE(VIA, 0x5324), },
1915 + { PCI_VDEVICE(VIA, 0xC409), VIA_IDFLAG_SINGLE },
1916 ++ { PCI_VDEVICE(VIA, 0x9001), VIA_IDFLAG_SINGLE },
1917 +
1918 + { },
1919 + };
1920 +diff --git a/drivers/char/agp/intel-agp.c b/drivers/char/agp/intel-agp.c
1921 +index 3999a5f..8a713f1 100644
1922 +--- a/drivers/char/agp/intel-agp.c
1923 ++++ b/drivers/char/agp/intel-agp.c
1924 +@@ -8,6 +8,7 @@
1925 + #include <linux/kernel.h>
1926 + #include <linux/pagemap.h>
1927 + #include <linux/agp_backend.h>
1928 ++#include <asm/smp.h>
1929 + #include "agp.h"
1930 +
1931 + /*
1932 +@@ -815,12 +816,6 @@ static void intel_i830_setup_flush(void)
1933 + intel_i830_fini_flush();
1934 + }
1935 +
1936 +-static void
1937 +-do_wbinvd(void *null)
1938 +-{
1939 +- wbinvd();
1940 +-}
1941 +-
1942 + /* The chipset_flush interface needs to get data that has already been
1943 + * flushed out of the CPU all the way out to main memory, because the GPU
1944 + * doesn't snoop those buffers.
1945 +@@ -837,12 +832,10 @@ static void intel_i830_chipset_flush(struct agp_bridge_data *bridge)
1946 +
1947 + memset(pg, 0, 1024);
1948 +
1949 +- if (cpu_has_clflush) {
1950 ++ if (cpu_has_clflush)
1951 + clflush_cache_range(pg, 1024);
1952 +- } else {
1953 +- if (on_each_cpu(do_wbinvd, NULL, 1) != 0)
1954 +- printk(KERN_ERR "Timed out waiting for cache flush.\n");
1955 +- }
1956 ++ else if (wbinvd_on_all_cpus() != 0)
1957 ++ printk(KERN_ERR "Timed out waiting for cache flush.\n");
1958 + }
1959 +
1960 + /* The intel i830 automatically initializes the agp aperture during POST.
1961 +diff --git a/drivers/char/raw.c b/drivers/char/raw.c
1962 +index 64acd05..9abc3a1 100644
1963 +--- a/drivers/char/raw.c
1964 ++++ b/drivers/char/raw.c
1965 +@@ -247,6 +247,7 @@ static const struct file_operations raw_fops = {
1966 + .aio_read = generic_file_aio_read,
1967 + .write = do_sync_write,
1968 + .aio_write = blkdev_aio_write,
1969 ++ .fsync = block_fsync,
1970 + .open = raw_open,
1971 + .release= raw_release,
1972 + .ioctl = raw_ioctl,
1973 +diff --git a/drivers/char/tty_io.c b/drivers/char/tty_io.c
1974 +index dcb9083..76253cf 100644
1975 +--- a/drivers/char/tty_io.c
1976 ++++ b/drivers/char/tty_io.c
1977 +@@ -1423,6 +1423,8 @@ static void release_one_tty(struct work_struct *work)
1978 + list_del_init(&tty->tty_files);
1979 + file_list_unlock();
1980 +
1981 ++ put_pid(tty->pgrp);
1982 ++ put_pid(tty->session);
1983 + free_tty_struct(tty);
1984 + }
1985 +
1986 +diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
1987 +index 7d0f00a..99907c3 100644
1988 +--- a/drivers/gpu/drm/drm_crtc_helper.c
1989 ++++ b/drivers/gpu/drm/drm_crtc_helper.c
1990 +@@ -104,6 +104,7 @@ int drm_helper_probe_single_connector_modes(struct drm_connector *connector,
1991 + if (connector->status == connector_status_disconnected) {
1992 + DRM_DEBUG_KMS("%s is disconnected\n",
1993 + drm_get_connector_name(connector));
1994 ++ drm_mode_connector_update_edid_property(connector, NULL);
1995 + goto prune;
1996 + }
1997 +
1998 +diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
1999 +index ab6c973..bfd0e4a 100644
2000 +--- a/drivers/gpu/drm/drm_edid.c
2001 ++++ b/drivers/gpu/drm/drm_edid.c
2002 +@@ -85,6 +85,8 @@ static struct edid_quirk {
2003 +
2004 + /* Envision Peripherals, Inc. EN-7100e */
2005 + { "EPI", 59264, EDID_QUIRK_135_CLOCK_TOO_HIGH },
2006 ++ /* Envision EN2028 */
2007 ++ { "EPI", 8232, EDID_QUIRK_PREFER_LARGE_60 },
2008 +
2009 + /* Funai Electronics PM36B */
2010 + { "FCM", 13600, EDID_QUIRK_PREFER_LARGE_75 |
2011 +@@ -707,15 +709,6 @@ static struct drm_display_mode *drm_mode_detailed(struct drm_device *dev,
2012 + mode->vsync_end = mode->vsync_start + vsync_pulse_width;
2013 + mode->vtotal = mode->vdisplay + vblank;
2014 +
2015 +- /* perform the basic check for the detailed timing */
2016 +- if (mode->hsync_end > mode->htotal ||
2017 +- mode->vsync_end > mode->vtotal) {
2018 +- drm_mode_destroy(dev, mode);
2019 +- DRM_DEBUG_KMS("Incorrect detailed timing. "
2020 +- "Sync is beyond the blank.\n");
2021 +- return NULL;
2022 +- }
2023 +-
2024 + /* Some EDIDs have bogus h/vtotal values */
2025 + if (mode->hsync_end > mode->htotal)
2026 + mode->htotal = mode->hsync_end + 1;
2027 +diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
2028 +index 08d14df..4804872 100644
2029 +--- a/drivers/gpu/drm/drm_fops.c
2030 ++++ b/drivers/gpu/drm/drm_fops.c
2031 +@@ -140,14 +140,16 @@ int drm_open(struct inode *inode, struct file *filp)
2032 + spin_unlock(&dev->count_lock);
2033 + }
2034 + out:
2035 +- mutex_lock(&dev->struct_mutex);
2036 +- if (minor->type == DRM_MINOR_LEGACY) {
2037 +- BUG_ON((dev->dev_mapping != NULL) &&
2038 +- (dev->dev_mapping != inode->i_mapping));
2039 +- if (dev->dev_mapping == NULL)
2040 +- dev->dev_mapping = inode->i_mapping;
2041 ++ if (!retcode) {
2042 ++ mutex_lock(&dev->struct_mutex);
2043 ++ if (minor->type == DRM_MINOR_LEGACY) {
2044 ++ if (dev->dev_mapping == NULL)
2045 ++ dev->dev_mapping = inode->i_mapping;
2046 ++ else if (dev->dev_mapping != inode->i_mapping)
2047 ++ retcode = -ENODEV;
2048 ++ }
2049 ++ mutex_unlock(&dev->struct_mutex);
2050 + }
2051 +- mutex_unlock(&dev->struct_mutex);
2052 +
2053 + return retcode;
2054 + }
2055 +diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
2056 +index 93031a7..1238bc9 100644
2057 +--- a/drivers/gpu/drm/i915/intel_lvds.c
2058 ++++ b/drivers/gpu/drm/i915/intel_lvds.c
2059 +@@ -899,6 +899,14 @@ static const struct dmi_system_id intel_no_lvds[] = {
2060 + DMI_MATCH(DMI_PRODUCT_VERSION, "AO00001JW"),
2061 + },
2062 + },
2063 ++ {
2064 ++ .callback = intel_no_lvds_dmi_callback,
2065 ++ .ident = "Clientron U800",
2066 ++ .matches = {
2067 ++ DMI_MATCH(DMI_SYS_VENDOR, "Clientron"),
2068 ++ DMI_MATCH(DMI_PRODUCT_NAME, "U800"),
2069 ++ },
2070 ++ },
2071 +
2072 + { } /* terminating entry */
2073 + };
2074 +diff --git a/drivers/gpu/drm/radeon/atom.c b/drivers/gpu/drm/radeon/atom.c
2075 +index d75788f..b1f929d 100644
2076 +--- a/drivers/gpu/drm/radeon/atom.c
2077 ++++ b/drivers/gpu/drm/radeon/atom.c
2078 +@@ -881,11 +881,16 @@ static void atom_op_shl(atom_exec_context *ctx, int *ptr, int arg)
2079 + uint8_t attr = U8((*ptr)++), shift;
2080 + uint32_t saved, dst;
2081 + int dptr = *ptr;
2082 ++ uint32_t dst_align = atom_dst_to_src[(attr >> 3) & 7][(attr >> 6) & 3];
2083 + SDEBUG(" dst: ");
2084 + dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
2085 ++ /* op needs to full dst value */
2086 ++ dst = saved;
2087 + shift = atom_get_src(ctx, attr, ptr);
2088 + SDEBUG(" shift: %d\n", shift);
2089 + dst <<= shift;
2090 ++ dst &= atom_arg_mask[dst_align];
2091 ++ dst >>= atom_arg_shift[dst_align];
2092 + SDEBUG(" dst: ");
2093 + atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
2094 + }
2095 +@@ -895,11 +900,16 @@ static void atom_op_shr(atom_exec_context *ctx, int *ptr, int arg)
2096 + uint8_t attr = U8((*ptr)++), shift;
2097 + uint32_t saved, dst;
2098 + int dptr = *ptr;
2099 ++ uint32_t dst_align = atom_dst_to_src[(attr >> 3) & 7][(attr >> 6) & 3];
2100 + SDEBUG(" dst: ");
2101 + dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
2102 ++ /* op needs to full dst value */
2103 ++ dst = saved;
2104 + shift = atom_get_src(ctx, attr, ptr);
2105 + SDEBUG(" shift: %d\n", shift);
2106 + dst >>= shift;
2107 ++ dst &= atom_arg_mask[dst_align];
2108 ++ dst >>= atom_arg_shift[dst_align];
2109 + SDEBUG(" dst: ");
2110 + atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
2111 + }
2112 +diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c
2113 +index 43b55a0..5bdfaf2 100644
2114 +--- a/drivers/gpu/drm/radeon/r300.c
2115 ++++ b/drivers/gpu/drm/radeon/r300.c
2116 +@@ -364,11 +364,12 @@ void r300_gpu_init(struct radeon_device *rdev)
2117 +
2118 + r100_hdp_reset(rdev);
2119 + /* FIXME: rv380 one pipes ? */
2120 +- if ((rdev->family == CHIP_R300) || (rdev->family == CHIP_R350)) {
2121 ++ if ((rdev->family == CHIP_R300 && rdev->pdev->device != 0x4144) ||
2122 ++ (rdev->family == CHIP_R350)) {
2123 + /* r300,r350 */
2124 + rdev->num_gb_pipes = 2;
2125 + } else {
2126 +- /* rv350,rv370,rv380 */
2127 ++ /* rv350,rv370,rv380,r300 AD */
2128 + rdev->num_gb_pipes = 1;
2129 + }
2130 + rdev->num_z_pipes = 1;
2131 +diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c
2132 +index e7b1944..81b832e 100644
2133 +--- a/drivers/gpu/drm/radeon/radeon_combios.c
2134 ++++ b/drivers/gpu/drm/radeon/radeon_combios.c
2135 +@@ -670,7 +670,9 @@ struct radeon_encoder_primary_dac *radeon_combios_get_primary_dac_info(struct
2136 + dac = RBIOS8(dac_info + 0x3) & 0xf;
2137 + p_dac->ps2_pdac_adj = (bg << 8) | (dac);
2138 + }
2139 +- found = 1;
2140 ++ /* if the values are all zeros, use the table */
2141 ++ if (p_dac->ps2_pdac_adj)
2142 ++ found = 1;
2143 + }
2144 +
2145 + out:
2146 +@@ -812,7 +814,9 @@ struct radeon_encoder_tv_dac *radeon_combios_get_tv_dac_info(struct
2147 + bg = RBIOS8(dac_info + 0x10) & 0xf;
2148 + dac = RBIOS8(dac_info + 0x11) & 0xf;
2149 + tv_dac->ntsc_tvdac_adj = (bg << 16) | (dac << 20);
2150 +- found = 1;
2151 ++ /* if the values are all zeros, use the table */
2152 ++ if (tv_dac->ps2_tvdac_adj)
2153 ++ found = 1;
2154 + } else if (rev > 1) {
2155 + bg = RBIOS8(dac_info + 0xc) & 0xf;
2156 + dac = (RBIOS8(dac_info + 0xc) >> 4) & 0xf;
2157 +@@ -825,7 +829,9 @@ struct radeon_encoder_tv_dac *radeon_combios_get_tv_dac_info(struct
2158 + bg = RBIOS8(dac_info + 0xe) & 0xf;
2159 + dac = (RBIOS8(dac_info + 0xe) >> 4) & 0xf;
2160 + tv_dac->ntsc_tvdac_adj = (bg << 16) | (dac << 20);
2161 +- found = 1;
2162 ++ /* if the values are all zeros, use the table */
2163 ++ if (tv_dac->ps2_tvdac_adj)
2164 ++ found = 1;
2165 + }
2166 + tv_dac->tv_std = radeon_combios_get_tv_info(rdev);
2167 + }
2168 +@@ -842,7 +848,9 @@ struct radeon_encoder_tv_dac *radeon_combios_get_tv_dac_info(struct
2169 + (bg << 16) | (dac << 20);
2170 + tv_dac->pal_tvdac_adj = tv_dac->ps2_tvdac_adj;
2171 + tv_dac->ntsc_tvdac_adj = tv_dac->ps2_tvdac_adj;
2172 +- found = 1;
2173 ++ /* if the values are all zeros, use the table */
2174 ++ if (tv_dac->ps2_tvdac_adj)
2175 ++ found = 1;
2176 + } else {
2177 + bg = RBIOS8(dac_info + 0x4) & 0xf;
2178 + dac = RBIOS8(dac_info + 0x5) & 0xf;
2179 +@@ -850,7 +858,9 @@ struct radeon_encoder_tv_dac *radeon_combios_get_tv_dac_info(struct
2180 + (bg << 16) | (dac << 20);
2181 + tv_dac->pal_tvdac_adj = tv_dac->ps2_tvdac_adj;
2182 + tv_dac->ntsc_tvdac_adj = tv_dac->ps2_tvdac_adj;
2183 +- found = 1;
2184 ++ /* if the values are all zeros, use the table */
2185 ++ if (tv_dac->ps2_tvdac_adj)
2186 ++ found = 1;
2187 + }
2188 + } else {
2189 + DRM_INFO("No TV DAC info found in BIOS\n");
2190 +diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
2191 +index 65f8194..2bdfbcd 100644
2192 +--- a/drivers/gpu/drm/radeon/radeon_connectors.c
2193 ++++ b/drivers/gpu/drm/radeon/radeon_connectors.c
2194 +@@ -162,12 +162,14 @@ radeon_connector_analog_encoder_conflict_solve(struct drm_connector *connector,
2195 + {
2196 + struct drm_device *dev = connector->dev;
2197 + struct drm_connector *conflict;
2198 ++ struct radeon_connector *radeon_conflict;
2199 + int i;
2200 +
2201 + list_for_each_entry(conflict, &dev->mode_config.connector_list, head) {
2202 + if (conflict == connector)
2203 + continue;
2204 +
2205 ++ radeon_conflict = to_radeon_connector(conflict);
2206 + for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
2207 + if (conflict->encoder_ids[i] == 0)
2208 + break;
2209 +@@ -177,6 +179,9 @@ radeon_connector_analog_encoder_conflict_solve(struct drm_connector *connector,
2210 + if (conflict->status != connector_status_connected)
2211 + continue;
2212 +
2213 ++ if (radeon_conflict->use_digital)
2214 ++ continue;
2215 ++
2216 + if (priority == true) {
2217 + DRM_INFO("1: conflicting encoders switching off %s\n", drm_get_connector_name(conflict));
2218 + DRM_INFO("in favor of %s\n", drm_get_connector_name(connector));
2219 +@@ -315,7 +320,7 @@ int radeon_connector_set_property(struct drm_connector *connector, struct drm_pr
2220 + radeon_encoder = to_radeon_encoder(encoder);
2221 + if (!radeon_encoder->enc_priv)
2222 + return 0;
2223 +- if (rdev->is_atom_bios) {
2224 ++ if (ASIC_IS_AVIVO(rdev) || radeon_r4xx_atom) {
2225 + struct radeon_encoder_atom_dac *dac_int;
2226 + dac_int = radeon_encoder->enc_priv;
2227 + dac_int->tv_std = val;
2228 +diff --git a/drivers/gpu/drm/radeon/radeon_cp.c b/drivers/gpu/drm/radeon/radeon_cp.c
2229 +index 06123ba..f129bbb 100644
2230 +--- a/drivers/gpu/drm/radeon/radeon_cp.c
2231 ++++ b/drivers/gpu/drm/radeon/radeon_cp.c
2232 +@@ -417,8 +417,9 @@ static int radeon_do_wait_for_idle(drm_radeon_private_t * dev_priv)
2233 + return -EBUSY;
2234 + }
2235 +
2236 +-static void radeon_init_pipes(drm_radeon_private_t *dev_priv)
2237 ++static void radeon_init_pipes(struct drm_device *dev)
2238 + {
2239 ++ drm_radeon_private_t *dev_priv = dev->dev_private;
2240 + uint32_t gb_tile_config, gb_pipe_sel = 0;
2241 +
2242 + if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV530) {
2243 +@@ -436,11 +437,12 @@ static void radeon_init_pipes(drm_radeon_private_t *dev_priv)
2244 + dev_priv->num_gb_pipes = ((gb_pipe_sel >> 12) & 0x3) + 1;
2245 + } else {
2246 + /* R3xx */
2247 +- if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R300) ||
2248 ++ if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R300 &&
2249 ++ dev->pdev->device != 0x4144) ||
2250 + ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R350)) {
2251 + dev_priv->num_gb_pipes = 2;
2252 + } else {
2253 +- /* R3Vxx */
2254 ++ /* RV3xx/R300 AD */
2255 + dev_priv->num_gb_pipes = 1;
2256 + }
2257 + }
2258 +@@ -736,7 +738,7 @@ static int radeon_do_engine_reset(struct drm_device * dev)
2259 +
2260 + /* setup the raster pipes */
2261 + if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R300)
2262 +- radeon_init_pipes(dev_priv);
2263 ++ radeon_init_pipes(dev);
2264 +
2265 + /* Reset the CP ring */
2266 + radeon_do_cp_reset(dev_priv);
2267 +diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
2268 +index e9d0850..9933c2c 100644
2269 +--- a/drivers/gpu/drm/radeon/radeon_cs.c
2270 ++++ b/drivers/gpu/drm/radeon/radeon_cs.c
2271 +@@ -193,11 +193,13 @@ static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error)
2272 + radeon_bo_list_fence(&parser->validated, parser->ib->fence);
2273 + }
2274 + radeon_bo_list_unreserve(&parser->validated);
2275 +- for (i = 0; i < parser->nrelocs; i++) {
2276 +- if (parser->relocs[i].gobj) {
2277 +- mutex_lock(&parser->rdev->ddev->struct_mutex);
2278 +- drm_gem_object_unreference(parser->relocs[i].gobj);
2279 +- mutex_unlock(&parser->rdev->ddev->struct_mutex);
2280 ++ if (parser->relocs != NULL) {
2281 ++ for (i = 0; i < parser->nrelocs; i++) {
2282 ++ if (parser->relocs[i].gobj) {
2283 ++ mutex_lock(&parser->rdev->ddev->struct_mutex);
2284 ++ drm_gem_object_unreference(parser->relocs[i].gobj);
2285 ++ mutex_unlock(&parser->rdev->ddev->struct_mutex);
2286 ++ }
2287 + }
2288 + }
2289 + kfree(parser->track);
2290 +@@ -246,7 +248,8 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
2291 + }
2292 + r = radeon_cs_parser_relocs(&parser);
2293 + if (r) {
2294 +- DRM_ERROR("Failed to parse relocation !\n");
2295 ++ if (r != -ERESTARTSYS)
2296 ++ DRM_ERROR("Failed to parse relocation %d!\n", r);
2297 + radeon_cs_parser_fini(&parser, r);
2298 + mutex_unlock(&rdev->cs_mutex);
2299 + return r;
2300 +diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
2301 +index 768b150..509ba3f 100644
2302 +--- a/drivers/gpu/drm/radeon/radeon_device.c
2303 ++++ b/drivers/gpu/drm/radeon/radeon_device.c
2304 +@@ -655,6 +655,14 @@ int radeon_device_init(struct radeon_device *rdev,
2305 + return r;
2306 + radeon_check_arguments(rdev);
2307 +
2308 ++ /* all of the newer IGP chips have an internal gart
2309 ++ * However some rs4xx report as AGP, so remove that here.
2310 ++ */
2311 ++ if ((rdev->family >= CHIP_RS400) &&
2312 ++ (rdev->flags & RADEON_IS_IGP)) {
2313 ++ rdev->flags &= ~RADEON_IS_AGP;
2314 ++ }
2315 ++
2316 + if (rdev->flags & RADEON_IS_AGP && radeon_agpmode == -1) {
2317 + radeon_agp_disable(rdev);
2318 + }
2319 +diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c
2320 +index 3c91724..7626bd5 100644
2321 +--- a/drivers/gpu/drm/radeon/radeon_encoders.c
2322 ++++ b/drivers/gpu/drm/radeon/radeon_encoders.c
2323 +@@ -1276,8 +1276,12 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder,
2324 + case ENCODER_OBJECT_ID_INTERNAL_DAC2:
2325 + case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
2326 + atombios_dac_setup(encoder, ATOM_ENABLE);
2327 +- if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT))
2328 +- atombios_tv_setup(encoder, ATOM_ENABLE);
2329 ++ if (radeon_encoder->devices & (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT)) {
2330 ++ if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT))
2331 ++ atombios_tv_setup(encoder, ATOM_ENABLE);
2332 ++ else
2333 ++ atombios_tv_setup(encoder, ATOM_DISABLE);
2334 ++ }
2335 + break;
2336 + }
2337 + atombios_apply_encoder_quirks(encoder, adjusted_mode);
2338 +diff --git a/drivers/gpu/drm/radeon/radeon_legacy_tv.c b/drivers/gpu/drm/radeon/radeon_legacy_tv.c
2339 +index 417684d..f2ed27c 100644
2340 +--- a/drivers/gpu/drm/radeon/radeon_legacy_tv.c
2341 ++++ b/drivers/gpu/drm/radeon/radeon_legacy_tv.c
2342 +@@ -57,6 +57,10 @@
2343 + #define NTSC_TV_PLL_N_14 693
2344 + #define NTSC_TV_PLL_P_14 7
2345 +
2346 ++#define PAL_TV_PLL_M_14 19
2347 ++#define PAL_TV_PLL_N_14 353
2348 ++#define PAL_TV_PLL_P_14 5
2349 ++
2350 + #define VERT_LEAD_IN_LINES 2
2351 + #define FRAC_BITS 0xe
2352 + #define FRAC_MASK 0x3fff
2353 +@@ -205,9 +209,24 @@ static const struct radeon_tv_mode_constants available_tv_modes[] = {
2354 + 630627, /* defRestart */
2355 + 347, /* crtcPLL_N */
2356 + 14, /* crtcPLL_M */
2357 +- 8, /* crtcPLL_postDiv */
2358 ++ 8, /* crtcPLL_postDiv */
2359 + 1022, /* pixToTV */
2360 + },
2361 ++ { /* PAL timing for 14 Mhz ref clk */
2362 ++ 800, /* horResolution */
2363 ++ 600, /* verResolution */
2364 ++ TV_STD_PAL, /* standard */
2365 ++ 1131, /* horTotal */
2366 ++ 742, /* verTotal */
2367 ++ 813, /* horStart */
2368 ++ 840, /* horSyncStart */
2369 ++ 633, /* verSyncStart */
2370 ++ 708369, /* defRestart */
2371 ++ 211, /* crtcPLL_N */
2372 ++ 9, /* crtcPLL_M */
2373 ++ 8, /* crtcPLL_postDiv */
2374 ++ 759, /* pixToTV */
2375 ++ },
2376 + };
2377 +
2378 + #define N_AVAILABLE_MODES ARRAY_SIZE(available_tv_modes)
2379 +@@ -242,7 +261,7 @@ static const struct radeon_tv_mode_constants *radeon_legacy_tv_get_std_mode(stru
2380 + if (pll->reference_freq == 2700)
2381 + const_ptr = &available_tv_modes[1];
2382 + else
2383 +- const_ptr = &available_tv_modes[1]; /* FIX ME */
2384 ++ const_ptr = &available_tv_modes[3];
2385 + }
2386 + return const_ptr;
2387 + }
2388 +@@ -685,9 +704,9 @@ void radeon_legacy_tv_mode_set(struct drm_encoder *encoder,
2389 + n = PAL_TV_PLL_N_27;
2390 + p = PAL_TV_PLL_P_27;
2391 + } else {
2392 +- m = PAL_TV_PLL_M_27;
2393 +- n = PAL_TV_PLL_N_27;
2394 +- p = PAL_TV_PLL_P_27;
2395 ++ m = PAL_TV_PLL_M_14;
2396 ++ n = PAL_TV_PLL_N_14;
2397 ++ p = PAL_TV_PLL_P_14;
2398 + }
2399 + }
2400 +
2401 +diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c
2402 +index c381856..a27c09f 100644
2403 +--- a/drivers/gpu/drm/radeon/rs600.c
2404 ++++ b/drivers/gpu/drm/radeon/rs600.c
2405 +@@ -175,7 +175,7 @@ void rs600_gart_tlb_flush(struct radeon_device *rdev)
2406 + WREG32_MC(R_000100_MC_PT0_CNTL, tmp);
2407 +
2408 + tmp = RREG32_MC(R_000100_MC_PT0_CNTL);
2409 +- tmp |= S_000100_INVALIDATE_ALL_L1_TLBS(1) & S_000100_INVALIDATE_L2_CACHE(1);
2410 ++ tmp |= S_000100_INVALIDATE_ALL_L1_TLBS(1) | S_000100_INVALIDATE_L2_CACHE(1);
2411 + WREG32_MC(R_000100_MC_PT0_CNTL, tmp);
2412 +
2413 + tmp = RREG32_MC(R_000100_MC_PT0_CNTL);
2414 +diff --git a/drivers/hid/hid-gyration.c b/drivers/hid/hid-gyration.c
2415 +index cab13e8..62416e6 100644
2416 +--- a/drivers/hid/hid-gyration.c
2417 ++++ b/drivers/hid/hid-gyration.c
2418 +@@ -53,10 +53,13 @@ static int gyration_input_mapping(struct hid_device *hdev, struct hid_input *hi,
2419 + static int gyration_event(struct hid_device *hdev, struct hid_field *field,
2420 + struct hid_usage *usage, __s32 value)
2421 + {
2422 +- struct input_dev *input = field->hidinput->input;
2423 ++
2424 ++ if (!(hdev->claimed & HID_CLAIMED_INPUT) || !field->hidinput)
2425 ++ return 0;
2426 +
2427 + if ((usage->hid & HID_USAGE_PAGE) == HID_UP_GENDESK &&
2428 + (usage->hid & 0xff) == 0x82) {
2429 ++ struct input_dev *input = field->hidinput->input;
2430 + input_event(input, usage->type, usage->code, 1);
2431 + input_sync(input);
2432 + input_event(input, usage->type, usage->code, 0);
2433 +diff --git a/drivers/hwmon/sht15.c b/drivers/hwmon/sht15.c
2434 +index 864a371..fbc997e 100644
2435 +--- a/drivers/hwmon/sht15.c
2436 ++++ b/drivers/hwmon/sht15.c
2437 +@@ -302,13 +302,13 @@ error_ret:
2438 + **/
2439 + static inline int sht15_calc_temp(struct sht15_data *data)
2440 + {
2441 +- int d1 = 0;
2442 ++ int d1 = temppoints[0].d1;
2443 + int i;
2444 +
2445 +- for (i = 1; i < ARRAY_SIZE(temppoints); i++)
2446 ++ for (i = ARRAY_SIZE(temppoints) - 1; i > 0; i--)
2447 + /* Find pointer to interpolate */
2448 + if (data->supply_uV > temppoints[i - 1].vdd) {
2449 +- d1 = (data->supply_uV/1000 - temppoints[i - 1].vdd)
2450 ++ d1 = (data->supply_uV - temppoints[i - 1].vdd)
2451 + * (temppoints[i].d1 - temppoints[i - 1].d1)
2452 + / (temppoints[i].vdd - temppoints[i - 1].vdd)
2453 + + temppoints[i - 1].d1;
2454 +@@ -541,7 +541,12 @@ static int __devinit sht15_probe(struct platform_device *pdev)
2455 + /* If a regulator is available, query what the supply voltage actually is!*/
2456 + data->reg = regulator_get(data->dev, "vcc");
2457 + if (!IS_ERR(data->reg)) {
2458 +- data->supply_uV = regulator_get_voltage(data->reg);
2459 ++ int voltage;
2460 ++
2461 ++ voltage = regulator_get_voltage(data->reg);
2462 ++ if (voltage)
2463 ++ data->supply_uV = voltage;
2464 ++
2465 + regulator_enable(data->reg);
2466 + /* setup a notifier block to update this if another device
2467 + * causes the voltage to change */
2468 +diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
2469 +index 5f318ce..cb9f95c 100644
2470 +--- a/drivers/i2c/busses/Kconfig
2471 ++++ b/drivers/i2c/busses/Kconfig
2472 +@@ -77,7 +77,7 @@ config I2C_AMD8111
2473 + will be called i2c-amd8111.
2474 +
2475 + config I2C_I801
2476 +- tristate "Intel 82801 (ICH)"
2477 ++ tristate "Intel 82801 (ICH/PCH)"
2478 + depends on PCI
2479 + help
2480 + If you say yes to this option, support will be included for the Intel
2481 +@@ -97,7 +97,8 @@ config I2C_I801
2482 + ICH9
2483 + Tolapai
2484 + ICH10
2485 +- PCH
2486 ++ 3400/5 Series (PCH)
2487 ++ Cougar Point (PCH)
2488 +
2489 + This driver can also be built as a module. If so, the module
2490 + will be called i2c-i801.
2491 +diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
2492 +index 5574be2..e361da7 100644
2493 +--- a/drivers/i2c/busses/i2c-i801.c
2494 ++++ b/drivers/i2c/busses/i2c-i801.c
2495 +@@ -41,7 +41,8 @@
2496 + Tolapai 0x5032 32 hard yes yes yes
2497 + ICH10 0x3a30 32 hard yes yes yes
2498 + ICH10 0x3a60 32 hard yes yes yes
2499 +- PCH 0x3b30 32 hard yes yes yes
2500 ++ 3400/5 Series (PCH) 0x3b30 32 hard yes yes yes
2501 ++ Cougar Point (PCH) 0x1c22 32 hard yes yes yes
2502 +
2503 + Features supported by this driver:
2504 + Software PEC no
2505 +@@ -580,6 +581,7 @@ static struct pci_device_id i801_ids[] = {
2506 + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH10_4) },
2507 + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH10_5) },
2508 + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PCH_SMBUS) },
2509 ++ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CPT_SMBUS) },
2510 + { 0, }
2511 + };
2512 +
2513 +@@ -709,6 +711,7 @@ static int __devinit i801_probe(struct pci_dev *dev, const struct pci_device_id
2514 + case PCI_DEVICE_ID_INTEL_ICH10_4:
2515 + case PCI_DEVICE_ID_INTEL_ICH10_5:
2516 + case PCI_DEVICE_ID_INTEL_PCH_SMBUS:
2517 ++ case PCI_DEVICE_ID_INTEL_CPT_SMBUS:
2518 + i801_features |= FEATURE_I2C_BLOCK_READ;
2519 + /* fall through */
2520 + case PCI_DEVICE_ID_INTEL_82801DB_3:
2521 +diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
2522 +index 30bdf42..f8302c2 100644
2523 +--- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c
2524 ++++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
2525 +@@ -752,6 +752,8 @@ void ipoib_cm_send(struct net_device *dev, struct sk_buff *skb, struct ipoib_cm_
2526 + if (++priv->tx_outstanding == ipoib_sendq_size) {
2527 + ipoib_dbg(priv, "TX ring 0x%x full, stopping kernel net queue\n",
2528 + tx->qp->qp_num);
2529 ++ if (ib_req_notify_cq(priv->send_cq, IB_CQ_NEXT_COMP))
2530 ++ ipoib_warn(priv, "request notify on send CQ failed\n");
2531 + netif_stop_queue(dev);
2532 + }
2533 + }
2534 +diff --git a/drivers/input/sparse-keymap.c b/drivers/input/sparse-keymap.c
2535 +index fbd3987..e8d65b3 100644
2536 +--- a/drivers/input/sparse-keymap.c
2537 ++++ b/drivers/input/sparse-keymap.c
2538 +@@ -161,7 +161,7 @@ int sparse_keymap_setup(struct input_dev *dev,
2539 + return 0;
2540 +
2541 + err_out:
2542 +- kfree(keymap);
2543 ++ kfree(map);
2544 + return error;
2545 +
2546 + }
2547 +diff --git a/drivers/input/tablet/wacom_sys.c b/drivers/input/tablet/wacom_sys.c
2548 +index 072f33b..e53ddc5 100644
2549 +--- a/drivers/input/tablet/wacom_sys.c
2550 ++++ b/drivers/input/tablet/wacom_sys.c
2551 +@@ -644,13 +644,15 @@ static int wacom_resume(struct usb_interface *intf)
2552 + int rv;
2553 +
2554 + mutex_lock(&wacom->lock);
2555 +- if (wacom->open) {
2556 ++
2557 ++ /* switch to wacom mode first */
2558 ++ wacom_query_tablet_data(intf, features);
2559 ++
2560 ++ if (wacom->open)
2561 + rv = usb_submit_urb(wacom->irq, GFP_NOIO);
2562 +- /* switch to wacom mode if needed */
2563 +- if (!wacom_retrieve_hid_descriptor(intf, features))
2564 +- wacom_query_tablet_data(intf, features);
2565 +- } else
2566 ++ else
2567 + rv = 0;
2568 ++
2569 + mutex_unlock(&wacom->lock);
2570 +
2571 + return rv;
2572 +diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
2573 +index e3cf568..d7500e1 100644
2574 +--- a/drivers/md/dm-ioctl.c
2575 ++++ b/drivers/md/dm-ioctl.c
2576 +@@ -285,7 +285,8 @@ retry:
2577 + up_write(&_hash_lock);
2578 + }
2579 +
2580 +-static int dm_hash_rename(uint32_t cookie, const char *old, const char *new)
2581 ++static int dm_hash_rename(uint32_t cookie, uint32_t *flags, const char *old,
2582 ++ const char *new)
2583 + {
2584 + char *new_name, *old_name;
2585 + struct hash_cell *hc;
2586 +@@ -344,7 +345,8 @@ static int dm_hash_rename(uint32_t cookie, const char *old, const char *new)
2587 + dm_table_put(table);
2588 + }
2589 +
2590 +- dm_kobject_uevent(hc->md, KOBJ_CHANGE, cookie);
2591 ++ if (!dm_kobject_uevent(hc->md, KOBJ_CHANGE, cookie))
2592 ++ *flags |= DM_UEVENT_GENERATED_FLAG;
2593 +
2594 + dm_put(hc->md);
2595 + up_write(&_hash_lock);
2596 +@@ -736,10 +738,10 @@ static int dev_remove(struct dm_ioctl *param, size_t param_size)
2597 + __hash_remove(hc);
2598 + up_write(&_hash_lock);
2599 +
2600 +- dm_kobject_uevent(md, KOBJ_REMOVE, param->event_nr);
2601 ++ if (!dm_kobject_uevent(md, KOBJ_REMOVE, param->event_nr))
2602 ++ param->flags |= DM_UEVENT_GENERATED_FLAG;
2603 +
2604 + dm_put(md);
2605 +- param->data_size = 0;
2606 + return 0;
2607 + }
2608 +
2609 +@@ -773,7 +775,9 @@ static int dev_rename(struct dm_ioctl *param, size_t param_size)
2610 + return r;
2611 +
2612 + param->data_size = 0;
2613 +- return dm_hash_rename(param->event_nr, param->name, new_name);
2614 ++
2615 ++ return dm_hash_rename(param->event_nr, &param->flags, param->name,
2616 ++ new_name);
2617 + }
2618 +
2619 + static int dev_set_geometry(struct dm_ioctl *param, size_t param_size)
2620 +@@ -899,8 +903,8 @@ static int do_resume(struct dm_ioctl *param)
2621 +
2622 + if (dm_suspended_md(md)) {
2623 + r = dm_resume(md);
2624 +- if (!r)
2625 +- dm_kobject_uevent(md, KOBJ_CHANGE, param->event_nr);
2626 ++ if (!r && !dm_kobject_uevent(md, KOBJ_CHANGE, param->event_nr))
2627 ++ param->flags |= DM_UEVENT_GENERATED_FLAG;
2628 + }
2629 +
2630 + if (old_map)
2631 +@@ -1477,6 +1481,7 @@ static int validate_params(uint cmd, struct dm_ioctl *param)
2632 + {
2633 + /* Always clear this flag */
2634 + param->flags &= ~DM_BUFFER_FULL_FLAG;
2635 ++ param->flags &= ~DM_UEVENT_GENERATED_FLAG;
2636 +
2637 + /* Ignores parameters */
2638 + if (cmd == DM_REMOVE_ALL_CMD ||
2639 +diff --git a/drivers/md/dm.c b/drivers/md/dm.c
2640 +index fa786b9..fe8889e 100644
2641 +--- a/drivers/md/dm.c
2642 ++++ b/drivers/md/dm.c
2643 +@@ -2618,18 +2618,19 @@ out:
2644 + /*-----------------------------------------------------------------
2645 + * Event notification.
2646 + *---------------------------------------------------------------*/
2647 +-void dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
2648 ++int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
2649 + unsigned cookie)
2650 + {
2651 + char udev_cookie[DM_COOKIE_LENGTH];
2652 + char *envp[] = { udev_cookie, NULL };
2653 +
2654 + if (!cookie)
2655 +- kobject_uevent(&disk_to_dev(md->disk)->kobj, action);
2656 ++ return kobject_uevent(&disk_to_dev(md->disk)->kobj, action);
2657 + else {
2658 + snprintf(udev_cookie, DM_COOKIE_LENGTH, "%s=%u",
2659 + DM_COOKIE_ENV_VAR_NAME, cookie);
2660 +- kobject_uevent_env(&disk_to_dev(md->disk)->kobj, action, envp);
2661 ++ return kobject_uevent_env(&disk_to_dev(md->disk)->kobj,
2662 ++ action, envp);
2663 + }
2664 + }
2665 +
2666 +diff --git a/drivers/md/dm.h b/drivers/md/dm.h
2667 +index 8dadaa5..bad1724 100644
2668 +--- a/drivers/md/dm.h
2669 ++++ b/drivers/md/dm.h
2670 +@@ -125,8 +125,8 @@ void dm_stripe_exit(void);
2671 + int dm_open_count(struct mapped_device *md);
2672 + int dm_lock_for_deletion(struct mapped_device *md);
2673 +
2674 +-void dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
2675 +- unsigned cookie);
2676 ++int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
2677 ++ unsigned cookie);
2678 +
2679 + int dm_io_init(void);
2680 + void dm_io_exit(void);
2681 +diff --git a/drivers/md/linear.c b/drivers/md/linear.c
2682 +index 00435bd..001317b 100644
2683 +--- a/drivers/md/linear.c
2684 ++++ b/drivers/md/linear.c
2685 +@@ -172,12 +172,14 @@ static linear_conf_t *linear_conf(mddev_t *mddev, int raid_disks)
2686 + disk_stack_limits(mddev->gendisk, rdev->bdev,
2687 + rdev->data_offset << 9);
2688 + /* as we don't honour merge_bvec_fn, we must never risk
2689 +- * violating it, so limit ->max_sector to one PAGE, as
2690 +- * a one page request is never in violation.
2691 ++ * violating it, so limit max_phys_segments to 1 lying within
2692 ++ * a single page.
2693 + */
2694 +- if (rdev->bdev->bd_disk->queue->merge_bvec_fn &&
2695 +- queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9))
2696 +- blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);
2697 ++ if (rdev->bdev->bd_disk->queue->merge_bvec_fn) {
2698 ++ blk_queue_max_phys_segments(mddev->queue, 1);
2699 ++ blk_queue_segment_boundary(mddev->queue,
2700 ++ PAGE_CACHE_SIZE - 1);
2701 ++ }
2702 +
2703 + conf->array_sectors += rdev->sectors;
2704 + cnt++;
2705 +diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c
2706 +index 32a662f..f9ee99f 100644
2707 +--- a/drivers/md/multipath.c
2708 ++++ b/drivers/md/multipath.c
2709 +@@ -301,14 +301,16 @@ static int multipath_add_disk(mddev_t *mddev, mdk_rdev_t *rdev)
2710 + rdev->data_offset << 9);
2711 +
2712 + /* as we don't honour merge_bvec_fn, we must never risk
2713 +- * violating it, so limit ->max_sector to one PAGE, as
2714 +- * a one page request is never in violation.
2715 ++ * violating it, so limit ->max_phys_segments to one, lying
2716 ++ * within a single page.
2717 + * (Note: it is very unlikely that a device with
2718 + * merge_bvec_fn will be involved in multipath.)
2719 + */
2720 +- if (q->merge_bvec_fn &&
2721 +- queue_max_sectors(q) > (PAGE_SIZE>>9))
2722 +- blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);
2723 ++ if (q->merge_bvec_fn) {
2724 ++ blk_queue_max_phys_segments(mddev->queue, 1);
2725 ++ blk_queue_segment_boundary(mddev->queue,
2726 ++ PAGE_CACHE_SIZE - 1);
2727 ++ }
2728 +
2729 + conf->working_disks++;
2730 + mddev->degraded--;
2731 +@@ -476,9 +478,11 @@ static int multipath_run (mddev_t *mddev)
2732 + /* as we don't honour merge_bvec_fn, we must never risk
2733 + * violating it, not that we ever expect a device with
2734 + * a merge_bvec_fn to be involved in multipath */
2735 +- if (rdev->bdev->bd_disk->queue->merge_bvec_fn &&
2736 +- queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9))
2737 +- blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);
2738 ++ if (rdev->bdev->bd_disk->queue->merge_bvec_fn) {
2739 ++ blk_queue_max_phys_segments(mddev->queue, 1);
2740 ++ blk_queue_segment_boundary(mddev->queue,
2741 ++ PAGE_CACHE_SIZE - 1);
2742 ++ }
2743 +
2744 + if (!test_bit(Faulty, &rdev->flags))
2745 + conf->working_disks++;
2746 +diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
2747 +index 77605cd..41ee9de 100644
2748 +--- a/drivers/md/raid0.c
2749 ++++ b/drivers/md/raid0.c
2750 +@@ -176,14 +176,15 @@ static int create_strip_zones(mddev_t *mddev)
2751 + disk_stack_limits(mddev->gendisk, rdev1->bdev,
2752 + rdev1->data_offset << 9);
2753 + /* as we don't honour merge_bvec_fn, we must never risk
2754 +- * violating it, so limit ->max_sector to one PAGE, as
2755 +- * a one page request is never in violation.
2756 ++ * violating it, so limit ->max_phys_segments to 1, lying within
2757 ++ * a single page.
2758 + */
2759 +
2760 +- if (rdev1->bdev->bd_disk->queue->merge_bvec_fn &&
2761 +- queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9))
2762 +- blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);
2763 +-
2764 ++ if (rdev1->bdev->bd_disk->queue->merge_bvec_fn) {
2765 ++ blk_queue_max_phys_segments(mddev->queue, 1);
2766 ++ blk_queue_segment_boundary(mddev->queue,
2767 ++ PAGE_CACHE_SIZE - 1);
2768 ++ }
2769 + if (!smallest || (rdev1->sectors < smallest->sectors))
2770 + smallest = rdev1;
2771 + cnt++;
2772 +diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
2773 +index d119b7b..047c468 100644
2774 +--- a/drivers/md/raid10.c
2775 ++++ b/drivers/md/raid10.c
2776 +@@ -1155,13 +1155,17 @@ static int raid10_add_disk(mddev_t *mddev, mdk_rdev_t *rdev)
2777 +
2778 + disk_stack_limits(mddev->gendisk, rdev->bdev,
2779 + rdev->data_offset << 9);
2780 +- /* as we don't honour merge_bvec_fn, we must never risk
2781 +- * violating it, so limit ->max_sector to one PAGE, as
2782 +- * a one page request is never in violation.
2783 ++ /* as we don't honour merge_bvec_fn, we must
2784 ++ * never risk violating it, so limit
2785 ++ * ->max_phys_segments to one lying with a single
2786 ++ * page, as a one page request is never in
2787 ++ * violation.
2788 + */
2789 +- if (rdev->bdev->bd_disk->queue->merge_bvec_fn &&
2790 +- queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9))
2791 +- blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);
2792 ++ if (rdev->bdev->bd_disk->queue->merge_bvec_fn) {
2793 ++ blk_queue_max_phys_segments(mddev->queue, 1);
2794 ++ blk_queue_segment_boundary(mddev->queue,
2795 ++ PAGE_CACHE_SIZE - 1);
2796 ++ }
2797 +
2798 + p->head_position = 0;
2799 + rdev->raid_disk = mirror;
2800 +@@ -2255,12 +2259,14 @@ static int run(mddev_t *mddev)
2801 + disk_stack_limits(mddev->gendisk, rdev->bdev,
2802 + rdev->data_offset << 9);
2803 + /* as we don't honour merge_bvec_fn, we must never risk
2804 +- * violating it, so limit ->max_sector to one PAGE, as
2805 +- * a one page request is never in violation.
2806 ++ * violating it, so limit max_phys_segments to 1 lying
2807 ++ * within a single page.
2808 + */
2809 +- if (rdev->bdev->bd_disk->queue->merge_bvec_fn &&
2810 +- queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9))
2811 +- blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);
2812 ++ if (rdev->bdev->bd_disk->queue->merge_bvec_fn) {
2813 ++ blk_queue_max_phys_segments(mddev->queue, 1);
2814 ++ blk_queue_segment_boundary(mddev->queue,
2815 ++ PAGE_CACHE_SIZE - 1);
2816 ++ }
2817 +
2818 + disk->head_position = 0;
2819 + }
2820 +diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c
2821 +index 57f149b..4d353d2 100644
2822 +--- a/drivers/net/e1000e/netdev.c
2823 ++++ b/drivers/net/e1000e/netdev.c
2824 +@@ -660,6 +660,8 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter)
2825 + i = 0;
2826 + }
2827 +
2828 ++ if (i == tx_ring->next_to_use)
2829 ++ break;
2830 + eop = tx_ring->buffer_info[i].next_to_watch;
2831 + eop_desc = E1000_TX_DESC(*tx_ring, eop);
2832 + }
2833 +diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
2834 +index 67d414b..3db85da 100644
2835 +--- a/drivers/net/r8169.c
2836 ++++ b/drivers/net/r8169.c
2837 +@@ -3255,8 +3255,8 @@ static void rtl8169_set_rxbufsize(struct rtl8169_private *tp,
2838 + unsigned int max_frame = mtu + VLAN_ETH_HLEN + ETH_FCS_LEN;
2839 +
2840 + if (max_frame != 16383)
2841 +- printk(KERN_WARNING "WARNING! Changing of MTU on this NIC"
2842 +- "May lead to frame reception errors!\n");
2843 ++ printk(KERN_WARNING PFX "WARNING! Changing of MTU on this "
2844 ++ "NIC may lead to frame reception errors!\n");
2845 +
2846 + tp->rx_buf_sz = (max_frame > RX_BUF_SIZE) ? max_frame : RX_BUF_SIZE;
2847 + }
2848 +diff --git a/drivers/net/wireless/ath/ar9170/usb.c b/drivers/net/wireless/ath/ar9170/usb.c
2849 +index e0799d9..0387658 100644
2850 +--- a/drivers/net/wireless/ath/ar9170/usb.c
2851 ++++ b/drivers/net/wireless/ath/ar9170/usb.c
2852 +@@ -414,7 +414,7 @@ static int ar9170_usb_exec_cmd(struct ar9170 *ar, enum ar9170_cmd cmd,
2853 + spin_unlock_irqrestore(&aru->common.cmdlock, flags);
2854 +
2855 + usb_fill_int_urb(urb, aru->udev,
2856 +- usb_sndbulkpipe(aru->udev, AR9170_EP_CMD),
2857 ++ usb_sndintpipe(aru->udev, AR9170_EP_CMD),
2858 + aru->common.cmdbuf, plen + 4,
2859 + ar9170_usb_tx_urb_complete, NULL, 1);
2860 +
2861 +diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
2862 +index 33a1071..7b1eab4 100644
2863 +--- a/drivers/net/wireless/ath/ath9k/main.c
2864 ++++ b/drivers/net/wireless/ath/ath9k/main.c
2865 +@@ -2721,8 +2721,7 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
2866 + all_wiphys_idle = ath9k_all_wiphys_idle(sc);
2867 + ath9k_set_wiphy_idle(aphy, idle);
2868 +
2869 +- if (!idle && all_wiphys_idle)
2870 +- enable_radio = true;
2871 ++ enable_radio = (!idle && all_wiphys_idle);
2872 +
2873 + /*
2874 + * After we unlock here its possible another wiphy
2875 +diff --git a/drivers/net/wireless/b43/Kconfig b/drivers/net/wireless/b43/Kconfig
2876 +index 64c12e1..0a00d42 100644
2877 +--- a/drivers/net/wireless/b43/Kconfig
2878 ++++ b/drivers/net/wireless/b43/Kconfig
2879 +@@ -78,11 +78,11 @@ config B43_SDIO
2880 +
2881 + If unsure, say N.
2882 +
2883 +-# Data transfers to the device via PIO
2884 +-# This is only needed on PCMCIA and SDIO devices. All others can do DMA properly.
2885 ++#Data transfers to the device via PIO. We want it as a fallback even
2886 ++# if we can do DMA.
2887 + config B43_PIO
2888 + bool
2889 +- depends on B43 && (B43_SDIO || B43_PCMCIA || B43_FORCE_PIO)
2890 ++ depends on B43
2891 + select SSB_BLOCKIO
2892 + default y
2893 +
2894 +diff --git a/drivers/net/wireless/b43/Makefile b/drivers/net/wireless/b43/Makefile
2895 +index 84772a2..5e83b6f 100644
2896 +--- a/drivers/net/wireless/b43/Makefile
2897 ++++ b/drivers/net/wireless/b43/Makefile
2898 +@@ -12,7 +12,7 @@ b43-y += xmit.o
2899 + b43-y += lo.o
2900 + b43-y += wa.o
2901 + b43-y += dma.o
2902 +-b43-$(CONFIG_B43_PIO) += pio.o
2903 ++b43-y += pio.o
2904 + b43-y += rfkill.o
2905 + b43-$(CONFIG_B43_LEDS) += leds.o
2906 + b43-$(CONFIG_B43_PCMCIA) += pcmcia.o
2907 +diff --git a/drivers/net/wireless/b43/b43.h b/drivers/net/wireless/b43/b43.h
2908 +index c484cc2..7df822e 100644
2909 +--- a/drivers/net/wireless/b43/b43.h
2910 ++++ b/drivers/net/wireless/b43/b43.h
2911 +@@ -694,6 +694,7 @@ struct b43_wldev {
2912 + bool radio_hw_enable; /* saved state of radio hardware enabled state */
2913 + bool qos_enabled; /* TRUE, if QoS is used. */
2914 + bool hwcrypto_enabled; /* TRUE, if HW crypto acceleration is enabled. */
2915 ++ bool use_pio; /* TRUE if next init should use PIO */
2916 +
2917 + /* PHY/Radio device. */
2918 + struct b43_phy phy;
2919 +@@ -822,11 +823,9 @@ struct b43_wl {
2920 + /* The device LEDs. */
2921 + struct b43_leds leds;
2922 +
2923 +-#ifdef CONFIG_B43_PIO
2924 + /* Kmalloc'ed scratch space for PIO TX/RX. Protected by wl->mutex. */
2925 + u8 pio_scratchspace[110] __attribute__((__aligned__(8)));
2926 + u8 pio_tailspace[4] __attribute__((__aligned__(8)));
2927 +-#endif /* CONFIG_B43_PIO */
2928 + };
2929 +
2930 + static inline struct b43_wl *hw_to_b43_wl(struct ieee80211_hw *hw)
2931 +@@ -877,20 +876,15 @@ static inline void b43_write32(struct b43_wldev *dev, u16 offset, u32 value)
2932 +
2933 + static inline bool b43_using_pio_transfers(struct b43_wldev *dev)
2934 + {
2935 +-#ifdef CONFIG_B43_PIO
2936 + return dev->__using_pio_transfers;
2937 +-#else
2938 +- return 0;
2939 +-#endif
2940 + }
2941 +
2942 + #ifdef CONFIG_B43_FORCE_PIO
2943 +-# define B43_FORCE_PIO 1
2944 ++# define B43_PIO_DEFAULT 1
2945 + #else
2946 +-# define B43_FORCE_PIO 0
2947 ++# define B43_PIO_DEFAULT 0
2948 + #endif
2949 +
2950 +-
2951 + /* Message printing */
2952 + void b43info(struct b43_wl *wl, const char *fmt, ...)
2953 + __attribute__ ((format(printf, 2, 3)));
2954 +diff --git a/drivers/net/wireless/b43/dma.c b/drivers/net/wireless/b43/dma.c
2955 +index 88d1fd0..615af22 100644
2956 +--- a/drivers/net/wireless/b43/dma.c
2957 ++++ b/drivers/net/wireless/b43/dma.c
2958 +@@ -1653,7 +1653,6 @@ void b43_dma_tx_resume(struct b43_wldev *dev)
2959 + b43_power_saving_ctl_bits(dev, 0);
2960 + }
2961 +
2962 +-#ifdef CONFIG_B43_PIO
2963 + static void direct_fifo_rx(struct b43_wldev *dev, enum b43_dmatype type,
2964 + u16 mmio_base, bool enable)
2965 + {
2966 +@@ -1687,4 +1686,3 @@ void b43_dma_direct_fifo_rx(struct b43_wldev *dev,
2967 + mmio_base = b43_dmacontroller_base(type, engine_index);
2968 + direct_fifo_rx(dev, type, mmio_base, enable);
2969 + }
2970 +-#endif /* CONFIG_B43_PIO */
2971 +diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c
2972 +index 629c166..9eb4f5e 100644
2973 +--- a/drivers/net/wireless/b43/main.c
2974 ++++ b/drivers/net/wireless/b43/main.c
2975 +@@ -102,6 +102,9 @@ int b43_modparam_verbose = B43_VERBOSITY_DEFAULT;
2976 + module_param_named(verbose, b43_modparam_verbose, int, 0644);
2977 + MODULE_PARM_DESC(verbose, "Log message verbosity: 0=error, 1=warn, 2=info(default), 3=debug");
2978 +
2979 ++int b43_modparam_pio = B43_PIO_DEFAULT;
2980 ++module_param_named(pio, b43_modparam_pio, int, 0644);
2981 ++MODULE_PARM_DESC(pio, "Use PIO accesses by default: 0=DMA, 1=PIO");
2982 +
2983 + static const struct ssb_device_id b43_ssb_tbl[] = {
2984 + SSB_DEVICE(SSB_VENDOR_BROADCOM, SSB_DEV_80211, 5),
2985 +@@ -1790,8 +1793,9 @@ static void b43_do_interrupt_thread(struct b43_wldev *dev)
2986 + dma_reason[4], dma_reason[5]);
2987 + b43err(dev->wl, "This device does not support DMA "
2988 + "on your system. Please use PIO instead.\n");
2989 +- b43err(dev->wl, "CONFIG_B43_FORCE_PIO must be set in "
2990 +- "your kernel configuration.\n");
2991 ++ /* Fall back to PIO transfers if we get fatal DMA errors! */
2992 ++ dev->use_pio = 1;
2993 ++ b43_controller_restart(dev, "DMA error");
2994 + return;
2995 + }
2996 + if (merged_dma_reason & B43_DMAIRQ_NONFATALMASK) {
2997 +@@ -4358,7 +4362,7 @@ static int b43_wireless_core_init(struct b43_wldev *dev)
2998 +
2999 + if ((dev->dev->bus->bustype == SSB_BUSTYPE_PCMCIA) ||
3000 + (dev->dev->bus->bustype == SSB_BUSTYPE_SDIO) ||
3001 +- B43_FORCE_PIO) {
3002 ++ dev->use_pio) {
3003 + dev->__using_pio_transfers = 1;
3004 + err = b43_pio_init(dev);
3005 + } else {
3006 +@@ -4826,6 +4830,7 @@ static int b43_one_core_attach(struct ssb_device *dev, struct b43_wl *wl)
3007 + if (!wldev)
3008 + goto out;
3009 +
3010 ++ wldev->use_pio = b43_modparam_pio;
3011 + wldev->dev = dev;
3012 + wldev->wl = wl;
3013 + b43_set_status(wldev, B43_STAT_UNINIT);
3014 +diff --git a/drivers/net/wireless/b43/pio.h b/drivers/net/wireless/b43/pio.h
3015 +index 7dd649c..7b3c42f 100644
3016 +--- a/drivers/net/wireless/b43/pio.h
3017 ++++ b/drivers/net/wireless/b43/pio.h
3018 +@@ -55,8 +55,6 @@
3019 + #define B43_PIO_MAX_NR_TXPACKETS 32
3020 +
3021 +
3022 +-#ifdef CONFIG_B43_PIO
3023 +-
3024 + struct b43_pio_txpacket {
3025 + /* Pointer to the TX queue we belong to. */
3026 + struct b43_pio_txqueue *queue;
3027 +@@ -169,42 +167,4 @@ void b43_pio_rx(struct b43_pio_rxqueue *q);
3028 + void b43_pio_tx_suspend(struct b43_wldev *dev);
3029 + void b43_pio_tx_resume(struct b43_wldev *dev);
3030 +
3031 +-
3032 +-#else /* CONFIG_B43_PIO */
3033 +-
3034 +-
3035 +-static inline int b43_pio_init(struct b43_wldev *dev)
3036 +-{
3037 +- return 0;
3038 +-}
3039 +-static inline void b43_pio_free(struct b43_wldev *dev)
3040 +-{
3041 +-}
3042 +-static inline void b43_pio_stop(struct b43_wldev *dev)
3043 +-{
3044 +-}
3045 +-static inline int b43_pio_tx(struct b43_wldev *dev,
3046 +- struct sk_buff *skb)
3047 +-{
3048 +- return 0;
3049 +-}
3050 +-static inline void b43_pio_handle_txstatus(struct b43_wldev *dev,
3051 +- const struct b43_txstatus *status)
3052 +-{
3053 +-}
3054 +-static inline void b43_pio_get_tx_stats(struct b43_wldev *dev,
3055 +- struct ieee80211_tx_queue_stats *stats)
3056 +-{
3057 +-}
3058 +-static inline void b43_pio_rx(struct b43_pio_rxqueue *q)
3059 +-{
3060 +-}
3061 +-static inline void b43_pio_tx_suspend(struct b43_wldev *dev)
3062 +-{
3063 +-}
3064 +-static inline void b43_pio_tx_resume(struct b43_wldev *dev)
3065 +-{
3066 +-}
3067 +-
3068 +-#endif /* CONFIG_B43_PIO */
3069 + #endif /* B43_PIO_H_ */
3070 +diff --git a/drivers/net/wireless/iwlwifi/iwl-4965.c b/drivers/net/wireless/iwlwifi/iwl-4965.c
3071 +index 3146281..3b4c5a4 100644
3072 +--- a/drivers/net/wireless/iwlwifi/iwl-4965.c
3073 ++++ b/drivers/net/wireless/iwlwifi/iwl-4965.c
3074 +@@ -581,6 +581,8 @@ static int iwl4965_alive_notify(struct iwl_priv *priv)
3075 +
3076 + iwl4965_set_wr_ptrs(priv, IWL_CMD_QUEUE_NUM, 0);
3077 +
3078 ++ /* reset to 0 to enable all the queue first */
3079 ++ priv->txq_ctx_active_msk = 0;
3080 + /* Map each Tx/cmd queue to its corresponding fifo */
3081 + for (i = 0; i < ARRAY_SIZE(default_queue_to_tx_fifo); i++) {
3082 + int ac = default_queue_to_tx_fifo[i];
3083 +@@ -2008,7 +2010,9 @@ static void iwl4965_rx_reply_tx(struct iwl_priv *priv,
3084 + IWL_DEBUG_TX_REPLY(priv, "Retry scheduler reclaim scd_ssn "
3085 + "%d index %d\n", scd_ssn , index);
3086 + freed = iwl_tx_queue_reclaim(priv, txq_id, index);
3087 +- iwl_free_tfds_in_queue(priv, sta_id, tid, freed);
3088 ++ if (qc)
3089 ++ iwl_free_tfds_in_queue(priv, sta_id,
3090 ++ tid, freed);
3091 +
3092 + if (priv->mac80211_registered &&
3093 + (iwl_queue_space(&txq->q) > txq->q.low_mark) &&
3094 +@@ -2035,13 +2039,14 @@ static void iwl4965_rx_reply_tx(struct iwl_priv *priv,
3095 +
3096 + freed = iwl_tx_queue_reclaim(priv, txq_id, index);
3097 + if (qc && likely(sta_id != IWL_INVALID_STATION))
3098 +- priv->stations[sta_id].tid[tid].tfds_in_queue -= freed;
3099 ++ iwl_free_tfds_in_queue(priv, sta_id, tid, freed);
3100 ++ else if (sta_id == IWL_INVALID_STATION)
3101 ++ IWL_DEBUG_TX_REPLY(priv, "Station not known\n");
3102 +
3103 + if (priv->mac80211_registered &&
3104 + (iwl_queue_space(&txq->q) > txq->q.low_mark))
3105 + iwl_wake_queue(priv, txq_id);
3106 + }
3107 +-
3108 + if (qc && likely(sta_id != IWL_INVALID_STATION))
3109 + iwl_txq_check_empty(priv, sta_id, tid, txq_id);
3110 +
3111 +diff --git a/drivers/net/wireless/iwlwifi/iwl-5000.c b/drivers/net/wireless/iwlwifi/iwl-5000.c
3112 +index cffaae7..c610e5f 100644
3113 +--- a/drivers/net/wireless/iwlwifi/iwl-5000.c
3114 ++++ b/drivers/net/wireless/iwlwifi/iwl-5000.c
3115 +@@ -657,6 +657,8 @@ int iwl5000_alive_notify(struct iwl_priv *priv)
3116 +
3117 + iwl5000_set_wr_ptrs(priv, IWL_CMD_QUEUE_NUM, 0);
3118 +
3119 ++ /* reset to 0 to enable all the queue first */
3120 ++ priv->txq_ctx_active_msk = 0;
3121 + /* map qos queues to fifos one-to-one */
3122 + for (i = 0; i < ARRAY_SIZE(iwl5000_default_queue_to_tx_fifo); i++) {
3123 + int ac = iwl5000_default_queue_to_tx_fifo[i];
3124 +diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.c b/drivers/net/wireless/iwlwifi/iwl-agn.c
3125 +index 1c9866d..5622a55 100644
3126 +--- a/drivers/net/wireless/iwlwifi/iwl-agn.c
3127 ++++ b/drivers/net/wireless/iwlwifi/iwl-agn.c
3128 +@@ -2461,7 +2461,7 @@ static int iwl_setup_mac(struct iwl_priv *priv)
3129 + BIT(NL80211_IFTYPE_STATION) |
3130 + BIT(NL80211_IFTYPE_ADHOC);
3131 +
3132 +- hw->wiphy->flags |= WIPHY_FLAG_STRICT_REGULATORY |
3133 ++ hw->wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY |
3134 + WIPHY_FLAG_DISABLE_BEACON_HINTS;
3135 +
3136 + /*
3137 +diff --git a/drivers/net/wireless/iwlwifi/iwl-scan.c b/drivers/net/wireless/iwlwifi/iwl-scan.c
3138 +index fa1c89b..8f1b850 100644
3139 +--- a/drivers/net/wireless/iwlwifi/iwl-scan.c
3140 ++++ b/drivers/net/wireless/iwlwifi/iwl-scan.c
3141 +@@ -404,21 +404,6 @@ EXPORT_SYMBOL(iwl_init_scan_params);
3142 +
3143 + static int iwl_scan_initiate(struct iwl_priv *priv)
3144 + {
3145 +- if (!iwl_is_ready_rf(priv)) {
3146 +- IWL_DEBUG_SCAN(priv, "Aborting scan due to not ready.\n");
3147 +- return -EIO;
3148 +- }
3149 +-
3150 +- if (test_bit(STATUS_SCANNING, &priv->status)) {
3151 +- IWL_DEBUG_SCAN(priv, "Scan already in progress.\n");
3152 +- return -EAGAIN;
3153 +- }
3154 +-
3155 +- if (test_bit(STATUS_SCAN_ABORTING, &priv->status)) {
3156 +- IWL_DEBUG_SCAN(priv, "Scan request while abort pending\n");
3157 +- return -EAGAIN;
3158 +- }
3159 +-
3160 + IWL_DEBUG_INFO(priv, "Starting scan...\n");
3161 + set_bit(STATUS_SCANNING, &priv->status);
3162 + priv->scan_start = jiffies;
3163 +@@ -449,6 +434,18 @@ int iwl_mac_hw_scan(struct ieee80211_hw *hw,
3164 + goto out_unlock;
3165 + }
3166 +
3167 ++ if (test_bit(STATUS_SCANNING, &priv->status)) {
3168 ++ IWL_DEBUG_SCAN(priv, "Scan already in progress.\n");
3169 ++ ret = -EAGAIN;
3170 ++ goto out_unlock;
3171 ++ }
3172 ++
3173 ++ if (test_bit(STATUS_SCAN_ABORTING, &priv->status)) {
3174 ++ IWL_DEBUG_SCAN(priv, "Scan request while abort pending\n");
3175 ++ ret = -EAGAIN;
3176 ++ goto out_unlock;
3177 ++ }
3178 ++
3179 + /* We don't schedule scan within next_scan_jiffies period.
3180 + * Avoid scanning during possible EAPOL exchange, return
3181 + * success immediately.
3182 +diff --git a/drivers/net/wireless/iwlwifi/iwl3945-base.c b/drivers/net/wireless/iwlwifi/iwl3945-base.c
3183 +index f297865..adbb3ea 100644
3184 +--- a/drivers/net/wireless/iwlwifi/iwl3945-base.c
3185 ++++ b/drivers/net/wireless/iwlwifi/iwl3945-base.c
3186 +@@ -1926,7 +1926,7 @@ static void iwl3945_init_hw_rates(struct iwl_priv *priv,
3187 + {
3188 + int i;
3189 +
3190 +- for (i = 0; i < IWL_RATE_COUNT; i++) {
3191 ++ for (i = 0; i < IWL_RATE_COUNT_LEGACY; i++) {
3192 + rates[i].bitrate = iwl3945_rates[i].ieee * 5;
3193 + rates[i].hw_value = i; /* Rate scaling will work on indexes */
3194 + rates[i].hw_value_short = i;
3195 +@@ -3903,7 +3903,7 @@ static int iwl3945_setup_mac(struct iwl_priv *priv)
3196 + BIT(NL80211_IFTYPE_STATION) |
3197 + BIT(NL80211_IFTYPE_ADHOC);
3198 +
3199 +- hw->wiphy->flags |= WIPHY_FLAG_STRICT_REGULATORY |
3200 ++ hw->wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY |
3201 + WIPHY_FLAG_DISABLE_BEACON_HINTS;
3202 +
3203 + hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX_3945;
3204 +diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
3205 +index 3245d33..c4fead1 100644
3206 +--- a/drivers/pci/pci.c
3207 ++++ b/drivers/pci/pci.c
3208 +@@ -2612,6 +2612,23 @@ int pci_resource_bar(struct pci_dev *dev, int resno, enum pci_bar_type *type)
3209 + return 0;
3210 + }
3211 +
3212 ++/* Some architectures require additional programming to enable VGA */
3213 ++static arch_set_vga_state_t arch_set_vga_state;
3214 ++
3215 ++void __init pci_register_set_vga_state(arch_set_vga_state_t func)
3216 ++{
3217 ++ arch_set_vga_state = func; /* NULL disables */
3218 ++}
3219 ++
3220 ++static int pci_set_vga_state_arch(struct pci_dev *dev, bool decode,
3221 ++ unsigned int command_bits, bool change_bridge)
3222 ++{
3223 ++ if (arch_set_vga_state)
3224 ++ return arch_set_vga_state(dev, decode, command_bits,
3225 ++ change_bridge);
3226 ++ return 0;
3227 ++}
3228 ++
3229 + /**
3230 + * pci_set_vga_state - set VGA decode state on device and parents if requested
3231 + * @dev: the PCI device
3232 +@@ -2625,9 +2642,15 @@ int pci_set_vga_state(struct pci_dev *dev, bool decode,
3233 + struct pci_bus *bus;
3234 + struct pci_dev *bridge;
3235 + u16 cmd;
3236 ++ int rc;
3237 +
3238 + WARN_ON(command_bits & ~(PCI_COMMAND_IO|PCI_COMMAND_MEMORY));
3239 +
3240 ++ /* ARCH specific VGA enables */
3241 ++ rc = pci_set_vga_state_arch(dev, decode, command_bits, change_bridge);
3242 ++ if (rc)
3243 ++ return rc;
3244 ++
3245 + pci_read_config_word(dev, PCI_COMMAND, &cmd);
3246 + if (decode == true)
3247 + cmd |= command_bits;
3248 +@@ -2874,4 +2897,3 @@ EXPORT_SYMBOL(pci_target_state);
3249 + EXPORT_SYMBOL(pci_prepare_to_sleep);
3250 + EXPORT_SYMBOL(pci_back_from_sleep);
3251 + EXPORT_SYMBOL_GPL(pci_set_pcie_reset_state);
3252 +-
3253 +diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
3254 +index c28a712..e6b67f2 100644
3255 +--- a/drivers/scsi/libiscsi.c
3256 ++++ b/drivers/scsi/libiscsi.c
3257 +@@ -3027,14 +3027,15 @@ static void iscsi_start_session_recovery(struct iscsi_session *session,
3258 + session->state = ISCSI_STATE_TERMINATE;
3259 + else if (conn->stop_stage != STOP_CONN_RECOVER)
3260 + session->state = ISCSI_STATE_IN_RECOVERY;
3261 ++
3262 ++ old_stop_stage = conn->stop_stage;
3263 ++ conn->stop_stage = flag;
3264 + spin_unlock_bh(&session->lock);
3265 +
3266 + del_timer_sync(&conn->transport_timer);
3267 + iscsi_suspend_tx(conn);
3268 +
3269 + spin_lock_bh(&session->lock);
3270 +- old_stop_stage = conn->stop_stage;
3271 +- conn->stop_stage = flag;
3272 + conn->c_stage = ISCSI_CONN_STOPPED;
3273 + spin_unlock_bh(&session->lock);
3274 +
3275 +diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
3276 +index 34d4eb9..db6b071 100644
3277 +--- a/drivers/usb/class/cdc-acm.c
3278 ++++ b/drivers/usb/class/cdc-acm.c
3279 +@@ -170,6 +170,7 @@ static void acm_write_done(struct acm *acm, struct acm_wb *wb)
3280 + {
3281 + wb->use = 0;
3282 + acm->transmitting--;
3283 ++ usb_autopm_put_interface_async(acm->control);
3284 + }
3285 +
3286 + /*
3287 +@@ -211,9 +212,12 @@ static int acm_write_start(struct acm *acm, int wbn)
3288 + }
3289 +
3290 + dbg("%s susp_count: %d", __func__, acm->susp_count);
3291 ++ usb_autopm_get_interface_async(acm->control);
3292 + if (acm->susp_count) {
3293 +- acm->delayed_wb = wb;
3294 +- schedule_work(&acm->waker);
3295 ++ if (!acm->delayed_wb)
3296 ++ acm->delayed_wb = wb;
3297 ++ else
3298 ++ usb_autopm_put_interface_async(acm->control);
3299 + spin_unlock_irqrestore(&acm->write_lock, flags);
3300 + return 0; /* A white lie */
3301 + }
3302 +@@ -534,23 +538,6 @@ static void acm_softint(struct work_struct *work)
3303 + tty_kref_put(tty);
3304 + }
3305 +
3306 +-static void acm_waker(struct work_struct *waker)
3307 +-{
3308 +- struct acm *acm = container_of(waker, struct acm, waker);
3309 +- int rv;
3310 +-
3311 +- rv = usb_autopm_get_interface(acm->control);
3312 +- if (rv < 0) {
3313 +- dev_err(&acm->dev->dev, "Autopm failure in %s\n", __func__);
3314 +- return;
3315 +- }
3316 +- if (acm->delayed_wb) {
3317 +- acm_start_wb(acm, acm->delayed_wb);
3318 +- acm->delayed_wb = NULL;
3319 +- }
3320 +- usb_autopm_put_interface(acm->control);
3321 +-}
3322 +-
3323 + /*
3324 + * TTY handlers
3325 + */
3326 +@@ -1178,7 +1165,6 @@ made_compressed_probe:
3327 + acm->urb_task.func = acm_rx_tasklet;
3328 + acm->urb_task.data = (unsigned long) acm;
3329 + INIT_WORK(&acm->work, acm_softint);
3330 +- INIT_WORK(&acm->waker, acm_waker);
3331 + init_waitqueue_head(&acm->drain_wait);
3332 + spin_lock_init(&acm->throttle_lock);
3333 + spin_lock_init(&acm->write_lock);
3334 +@@ -1343,7 +1329,6 @@ static void stop_data_traffic(struct acm *acm)
3335 + tasklet_enable(&acm->urb_task);
3336 +
3337 + cancel_work_sync(&acm->work);
3338 +- cancel_work_sync(&acm->waker);
3339 + }
3340 +
3341 + static void acm_disconnect(struct usb_interface *intf)
3342 +@@ -1435,6 +1420,7 @@ static int acm_suspend(struct usb_interface *intf, pm_message_t message)
3343 + static int acm_resume(struct usb_interface *intf)
3344 + {
3345 + struct acm *acm = usb_get_intfdata(intf);
3346 ++ struct acm_wb *wb;
3347 + int rv = 0;
3348 + int cnt;
3349 +
3350 +@@ -1449,6 +1435,21 @@ static int acm_resume(struct usb_interface *intf)
3351 + mutex_lock(&acm->mutex);
3352 + if (acm->port.count) {
3353 + rv = usb_submit_urb(acm->ctrlurb, GFP_NOIO);
3354 ++
3355 ++ spin_lock_irq(&acm->write_lock);
3356 ++ if (acm->delayed_wb) {
3357 ++ wb = acm->delayed_wb;
3358 ++ acm->delayed_wb = NULL;
3359 ++ spin_unlock_irq(&acm->write_lock);
3360 ++ acm_start_wb(acm, wb);
3361 ++ } else {
3362 ++ spin_unlock_irq(&acm->write_lock);
3363 ++ }
3364 ++
3365 ++ /*
3366 ++ * delayed error checking because we must
3367 ++ * do the write path at all cost
3368 ++ */
3369 + if (rv < 0)
3370 + goto err_out;
3371 +
3372 +diff --git a/drivers/usb/class/cdc-acm.h b/drivers/usb/class/cdc-acm.h
3373 +index c4a0ee8..519eb63 100644
3374 +--- a/drivers/usb/class/cdc-acm.h
3375 ++++ b/drivers/usb/class/cdc-acm.h
3376 +@@ -112,7 +112,6 @@ struct acm {
3377 + struct mutex mutex;
3378 + struct usb_cdc_line_coding line; /* bits, stop, parity */
3379 + struct work_struct work; /* work queue entry for line discipline waking up */
3380 +- struct work_struct waker;
3381 + wait_queue_head_t drain_wait; /* close processing */
3382 + struct tasklet_struct urb_task; /* rx processing */
3383 + spinlock_t throttle_lock; /* synchronize throtteling and read callback */
3384 +diff --git a/drivers/video/backlight/mbp_nvidia_bl.c b/drivers/video/backlight/mbp_nvidia_bl.c
3385 +index 2e78b07..9804ee9 100644
3386 +--- a/drivers/video/backlight/mbp_nvidia_bl.c
3387 ++++ b/drivers/video/backlight/mbp_nvidia_bl.c
3388 +@@ -139,6 +139,51 @@ static int mbp_dmi_match(const struct dmi_system_id *id)
3389 + static const struct dmi_system_id __initdata mbp_device_table[] = {
3390 + {
3391 + .callback = mbp_dmi_match,
3392 ++ .ident = "MacBook 1,1",
3393 ++ .matches = {
3394 ++ DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
3395 ++ DMI_MATCH(DMI_PRODUCT_NAME, "MacBook1,1"),
3396 ++ },
3397 ++ .driver_data = (void *)&intel_chipset_data,
3398 ++ },
3399 ++ {
3400 ++ .callback = mbp_dmi_match,
3401 ++ .ident = "MacBook 2,1",
3402 ++ .matches = {
3403 ++ DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
3404 ++ DMI_MATCH(DMI_PRODUCT_NAME, "MacBook2,1"),
3405 ++ },
3406 ++ .driver_data = (void *)&intel_chipset_data,
3407 ++ },
3408 ++ {
3409 ++ .callback = mbp_dmi_match,
3410 ++ .ident = "MacBook 3,1",
3411 ++ .matches = {
3412 ++ DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
3413 ++ DMI_MATCH(DMI_PRODUCT_NAME, "MacBook3,1"),
3414 ++ },
3415 ++ .driver_data = (void *)&intel_chipset_data,
3416 ++ },
3417 ++ {
3418 ++ .callback = mbp_dmi_match,
3419 ++ .ident = "MacBook 4,1",
3420 ++ .matches = {
3421 ++ DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
3422 ++ DMI_MATCH(DMI_PRODUCT_NAME, "MacBook4,1"),
3423 ++ },
3424 ++ .driver_data = (void *)&intel_chipset_data,
3425 ++ },
3426 ++ {
3427 ++ .callback = mbp_dmi_match,
3428 ++ .ident = "MacBook 4,2",
3429 ++ .matches = {
3430 ++ DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
3431 ++ DMI_MATCH(DMI_PRODUCT_NAME, "MacBook4,2"),
3432 ++ },
3433 ++ .driver_data = (void *)&intel_chipset_data,
3434 ++ },
3435 ++ {
3436 ++ .callback = mbp_dmi_match,
3437 + .ident = "MacBookPro 3,1",
3438 + .matches = {
3439 + DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
3440 +diff --git a/drivers/video/sunxvr500.c b/drivers/video/sunxvr500.c
3441 +index 4cd5049..3803745 100644
3442 +--- a/drivers/video/sunxvr500.c
3443 ++++ b/drivers/video/sunxvr500.c
3444 +@@ -242,11 +242,27 @@ static int __devinit e3d_set_fbinfo(struct e3d_info *ep)
3445 + static int __devinit e3d_pci_register(struct pci_dev *pdev,
3446 + const struct pci_device_id *ent)
3447 + {
3448 ++ struct device_node *of_node;
3449 ++ const char *device_type;
3450 + struct fb_info *info;
3451 + struct e3d_info *ep;
3452 + unsigned int line_length;
3453 + int err;
3454 +
3455 ++ of_node = pci_device_to_OF_node(pdev);
3456 ++ if (!of_node) {
3457 ++ printk(KERN_ERR "e3d: Cannot find OF node of %s\n",
3458 ++ pci_name(pdev));
3459 ++ return -ENODEV;
3460 ++ }
3461 ++
3462 ++ device_type = of_get_property(of_node, "device_type", NULL);
3463 ++ if (!device_type) {
3464 ++ printk(KERN_INFO "e3d: Ignoring secondary output device "
3465 ++ "at %s\n", pci_name(pdev));
3466 ++ return -ENODEV;
3467 ++ }
3468 ++
3469 + err = pci_enable_device(pdev);
3470 + if (err < 0) {
3471 + printk(KERN_ERR "e3d: Cannot enable PCI device %s\n",
3472 +@@ -265,13 +281,7 @@ static int __devinit e3d_pci_register(struct pci_dev *pdev,
3473 + ep->info = info;
3474 + ep->pdev = pdev;
3475 + spin_lock_init(&ep->lock);
3476 +- ep->of_node = pci_device_to_OF_node(pdev);
3477 +- if (!ep->of_node) {
3478 +- printk(KERN_ERR "e3d: Cannot find OF node of %s\n",
3479 +- pci_name(pdev));
3480 +- err = -ENODEV;
3481 +- goto err_release_fb;
3482 +- }
3483 ++ ep->of_node = of_node;
3484 +
3485 + /* Read the PCI base register of the frame buffer, which we
3486 + * need in order to interpret the RAMDAC_VID_*FB* values in
3487 +diff --git a/drivers/watchdog/hpwdt.c b/drivers/watchdog/hpwdt.c
3488 +index a6c5674..0b91907 100644
3489 +--- a/drivers/watchdog/hpwdt.c
3490 ++++ b/drivers/watchdog/hpwdt.c
3491 +@@ -443,7 +443,7 @@ static void hpwdt_ping(void)
3492 + static int hpwdt_change_timer(int new_margin)
3493 + {
3494 + /* Arbitrary, can't find the card's limits */
3495 +- if (new_margin < 30 || new_margin > 600) {
3496 ++ if (new_margin < 5 || new_margin > 600) {
3497 + printk(KERN_WARNING
3498 + "hpwdt: New value passed in is invalid: %d seconds.\n",
3499 + new_margin);
3500 +diff --git a/drivers/watchdog/iTCO_wdt.c b/drivers/watchdog/iTCO_wdt.c
3501 +index 4bdb7f1..e2ebe08 100644
3502 +--- a/drivers/watchdog/iTCO_wdt.c
3503 ++++ b/drivers/watchdog/iTCO_wdt.c
3504 +@@ -115,8 +115,37 @@ enum iTCO_chipsets {
3505 + TCO_3420, /* 3420 */
3506 + TCO_3450, /* 3450 */
3507 + TCO_EP80579, /* EP80579 */
3508 +- TCO_CPTD, /* CPT Desktop */
3509 +- TCO_CPTM, /* CPT Mobile */
3510 ++ TCO_CPT1, /* Cougar Point */
3511 ++ TCO_CPT2, /* Cougar Point Desktop */
3512 ++ TCO_CPT3, /* Cougar Point Mobile */
3513 ++ TCO_CPT4, /* Cougar Point */
3514 ++ TCO_CPT5, /* Cougar Point */
3515 ++ TCO_CPT6, /* Cougar Point */
3516 ++ TCO_CPT7, /* Cougar Point */
3517 ++ TCO_CPT8, /* Cougar Point */
3518 ++ TCO_CPT9, /* Cougar Point */
3519 ++ TCO_CPT10, /* Cougar Point */
3520 ++ TCO_CPT11, /* Cougar Point */
3521 ++ TCO_CPT12, /* Cougar Point */
3522 ++ TCO_CPT13, /* Cougar Point */
3523 ++ TCO_CPT14, /* Cougar Point */
3524 ++ TCO_CPT15, /* Cougar Point */
3525 ++ TCO_CPT16, /* Cougar Point */
3526 ++ TCO_CPT17, /* Cougar Point */
3527 ++ TCO_CPT18, /* Cougar Point */
3528 ++ TCO_CPT19, /* Cougar Point */
3529 ++ TCO_CPT20, /* Cougar Point */
3530 ++ TCO_CPT21, /* Cougar Point */
3531 ++ TCO_CPT22, /* Cougar Point */
3532 ++ TCO_CPT23, /* Cougar Point */
3533 ++ TCO_CPT24, /* Cougar Point */
3534 ++ TCO_CPT25, /* Cougar Point */
3535 ++ TCO_CPT26, /* Cougar Point */
3536 ++ TCO_CPT27, /* Cougar Point */
3537 ++ TCO_CPT28, /* Cougar Point */
3538 ++ TCO_CPT29, /* Cougar Point */
3539 ++ TCO_CPT30, /* Cougar Point */
3540 ++ TCO_CPT31, /* Cougar Point */
3541 + };
3542 +
3543 + static struct {
3544 +@@ -173,8 +202,37 @@ static struct {
3545 + {"3420", 2},
3546 + {"3450", 2},
3547 + {"EP80579", 2},
3548 +- {"CPT Desktop", 2},
3549 +- {"CPT Mobile", 2},
3550 ++ {"Cougar Point", 2},
3551 ++ {"Cougar Point", 2},
3552 ++ {"Cougar Point", 2},
3553 ++ {"Cougar Point", 2},
3554 ++ {"Cougar Point", 2},
3555 ++ {"Cougar Point", 2},
3556 ++ {"Cougar Point", 2},
3557 ++ {"Cougar Point", 2},
3558 ++ {"Cougar Point", 2},
3559 ++ {"Cougar Point", 2},
3560 ++ {"Cougar Point", 2},
3561 ++ {"Cougar Point", 2},
3562 ++ {"Cougar Point", 2},
3563 ++ {"Cougar Point", 2},
3564 ++ {"Cougar Point", 2},
3565 ++ {"Cougar Point", 2},
3566 ++ {"Cougar Point", 2},
3567 ++ {"Cougar Point", 2},
3568 ++ {"Cougar Point", 2},
3569 ++ {"Cougar Point", 2},
3570 ++ {"Cougar Point", 2},
3571 ++ {"Cougar Point", 2},
3572 ++ {"Cougar Point", 2},
3573 ++ {"Cougar Point", 2},
3574 ++ {"Cougar Point", 2},
3575 ++ {"Cougar Point", 2},
3576 ++ {"Cougar Point", 2},
3577 ++ {"Cougar Point", 2},
3578 ++ {"Cougar Point", 2},
3579 ++ {"Cougar Point", 2},
3580 ++ {"Cougar Point", 2},
3581 + {NULL, 0}
3582 + };
3583 +
3584 +@@ -259,8 +317,37 @@ static struct pci_device_id iTCO_wdt_pci_tbl[] = {
3585 + { ITCO_PCI_DEVICE(0x3b14, TCO_3420)},
3586 + { ITCO_PCI_DEVICE(0x3b16, TCO_3450)},
3587 + { ITCO_PCI_DEVICE(0x5031, TCO_EP80579)},
3588 +- { ITCO_PCI_DEVICE(0x1c42, TCO_CPTD)},
3589 +- { ITCO_PCI_DEVICE(0x1c43, TCO_CPTM)},
3590 ++ { ITCO_PCI_DEVICE(0x1c41, TCO_CPT1)},
3591 ++ { ITCO_PCI_DEVICE(0x1c42, TCO_CPT2)},
3592 ++ { ITCO_PCI_DEVICE(0x1c43, TCO_CPT3)},
3593 ++ { ITCO_PCI_DEVICE(0x1c44, TCO_CPT4)},
3594 ++ { ITCO_PCI_DEVICE(0x1c45, TCO_CPT5)},
3595 ++ { ITCO_PCI_DEVICE(0x1c46, TCO_CPT6)},
3596 ++ { ITCO_PCI_DEVICE(0x1c47, TCO_CPT7)},
3597 ++ { ITCO_PCI_DEVICE(0x1c48, TCO_CPT8)},
3598 ++ { ITCO_PCI_DEVICE(0x1c49, TCO_CPT9)},
3599 ++ { ITCO_PCI_DEVICE(0x1c4a, TCO_CPT10)},
3600 ++ { ITCO_PCI_DEVICE(0x1c4b, TCO_CPT11)},
3601 ++ { ITCO_PCI_DEVICE(0x1c4c, TCO_CPT12)},
3602 ++ { ITCO_PCI_DEVICE(0x1c4d, TCO_CPT13)},
3603 ++ { ITCO_PCI_DEVICE(0x1c4e, TCO_CPT14)},
3604 ++ { ITCO_PCI_DEVICE(0x1c4f, TCO_CPT15)},
3605 ++ { ITCO_PCI_DEVICE(0x1c50, TCO_CPT16)},
3606 ++ { ITCO_PCI_DEVICE(0x1c51, TCO_CPT17)},
3607 ++ { ITCO_PCI_DEVICE(0x1c52, TCO_CPT18)},
3608 ++ { ITCO_PCI_DEVICE(0x1c53, TCO_CPT19)},
3609 ++ { ITCO_PCI_DEVICE(0x1c54, TCO_CPT20)},
3610 ++ { ITCO_PCI_DEVICE(0x1c55, TCO_CPT21)},
3611 ++ { ITCO_PCI_DEVICE(0x1c56, TCO_CPT22)},
3612 ++ { ITCO_PCI_DEVICE(0x1c57, TCO_CPT23)},
3613 ++ { ITCO_PCI_DEVICE(0x1c58, TCO_CPT24)},
3614 ++ { ITCO_PCI_DEVICE(0x1c59, TCO_CPT25)},
3615 ++ { ITCO_PCI_DEVICE(0x1c5a, TCO_CPT26)},
3616 ++ { ITCO_PCI_DEVICE(0x1c5b, TCO_CPT27)},
3617 ++ { ITCO_PCI_DEVICE(0x1c5c, TCO_CPT28)},
3618 ++ { ITCO_PCI_DEVICE(0x1c5d, TCO_CPT29)},
3619 ++ { ITCO_PCI_DEVICE(0x1c5e, TCO_CPT30)},
3620 ++ { ITCO_PCI_DEVICE(0x1c5f, TCO_CPT31)},
3621 + { 0, }, /* End of list */
3622 + };
3623 + MODULE_DEVICE_TABLE(pci, iTCO_wdt_pci_tbl);
3624 +diff --git a/fs/9p/vfs_file.c b/fs/9p/vfs_file.c
3625 +index 74a0461..92f9590 100644
3626 +--- a/fs/9p/vfs_file.c
3627 ++++ b/fs/9p/vfs_file.c
3628 +@@ -114,7 +114,7 @@ static int v9fs_file_lock(struct file *filp, int cmd, struct file_lock *fl)
3629 + P9_DPRINTK(P9_DEBUG_VFS, "filp: %p lock: %p\n", filp, fl);
3630 +
3631 + /* No mandatory locks */
3632 +- if (__mandatory_lock(inode))
3633 ++ if (__mandatory_lock(inode) && fl->fl_type != F_UNLCK)
3634 + return -ENOLCK;
3635 +
3636 + if ((IS_SETLK(cmd) || IS_SETLKW(cmd)) && fl->fl_type != F_UNLCK) {
3637 +diff --git a/fs/block_dev.c b/fs/block_dev.c
3638 +index d11d028..8db62b2 100644
3639 +--- a/fs/block_dev.c
3640 ++++ b/fs/block_dev.c
3641 +@@ -404,7 +404,7 @@ static loff_t block_llseek(struct file *file, loff_t offset, int origin)
3642 + * NULL first argument is nfsd_sync_dir() and that's not a directory.
3643 + */
3644 +
3645 +-static int block_fsync(struct file *filp, struct dentry *dentry, int datasync)
3646 ++int block_fsync(struct file *filp, struct dentry *dentry, int datasync)
3647 + {
3648 + struct block_device *bdev = I_BDEV(filp->f_mapping->host);
3649 + int error;
3650 +@@ -418,6 +418,7 @@ static int block_fsync(struct file *filp, struct dentry *dentry, int datasync)
3651 + error = 0;
3652 + return error;
3653 + }
3654 ++EXPORT_SYMBOL(block_fsync);
3655 +
3656 + /*
3657 + * pseudo-fs
3658 +diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
3659 +index 941441d..4e6dbab 100644
3660 +--- a/fs/cifs/cifssmb.c
3661 ++++ b/fs/cifs/cifssmb.c
3662 +@@ -1430,6 +1430,8 @@ CIFSSMBWrite(const int xid, struct cifsTconInfo *tcon,
3663 + __u32 bytes_sent;
3664 + __u16 byte_count;
3665 +
3666 ++ *nbytes = 0;
3667 ++
3668 + /* cFYI(1, ("write at %lld %d bytes", offset, count));*/
3669 + if (tcon->ses == NULL)
3670 + return -ECONNABORTED;
3671 +@@ -1512,11 +1514,18 @@ CIFSSMBWrite(const int xid, struct cifsTconInfo *tcon,
3672 + cifs_stats_inc(&tcon->num_writes);
3673 + if (rc) {
3674 + cFYI(1, ("Send error in write = %d", rc));
3675 +- *nbytes = 0;
3676 + } else {
3677 + *nbytes = le16_to_cpu(pSMBr->CountHigh);
3678 + *nbytes = (*nbytes) << 16;
3679 + *nbytes += le16_to_cpu(pSMBr->Count);
3680 ++
3681 ++ /*
3682 ++ * Mask off high 16 bits when bytes written as returned by the
3683 ++ * server is greater than bytes requested by the client. Some
3684 ++ * OS/2 servers are known to set incorrect CountHigh values.
3685 ++ */
3686 ++ if (*nbytes > count)
3687 ++ *nbytes &= 0xFFFF;
3688 + }
3689 +
3690 + cifs_buf_release(pSMB);
3691 +@@ -1605,6 +1614,14 @@ CIFSSMBWrite2(const int xid, struct cifsTconInfo *tcon,
3692 + *nbytes = le16_to_cpu(pSMBr->CountHigh);
3693 + *nbytes = (*nbytes) << 16;
3694 + *nbytes += le16_to_cpu(pSMBr->Count);
3695 ++
3696 ++ /*
3697 ++ * Mask off high 16 bits when bytes written as returned by the
3698 ++ * server is greater than bytes requested by the client. OS/2
3699 ++ * servers are known to set incorrect CountHigh values.
3700 ++ */
3701 ++ if (*nbytes > count)
3702 ++ *nbytes &= 0xFFFF;
3703 + }
3704 +
3705 + /* cifs_small_buf_release(pSMB); */ /* Freed earlier now in SendReceive2 */
3706 +diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
3707 +index 4a430ab..23dc2af 100644
3708 +--- a/fs/ecryptfs/inode.c
3709 ++++ b/fs/ecryptfs/inode.c
3710 +@@ -647,38 +647,17 @@ out_lock:
3711 + return rc;
3712 + }
3713 +
3714 +-static int
3715 +-ecryptfs_readlink(struct dentry *dentry, char __user *buf, int bufsiz)
3716 ++static int ecryptfs_readlink_lower(struct dentry *dentry, char **buf,
3717 ++ size_t *bufsiz)
3718 + {
3719 ++ struct dentry *lower_dentry = ecryptfs_dentry_to_lower(dentry);
3720 + char *lower_buf;
3721 +- size_t lower_bufsiz;
3722 +- struct dentry *lower_dentry;
3723 +- struct ecryptfs_mount_crypt_stat *mount_crypt_stat;
3724 +- char *plaintext_name;
3725 +- size_t plaintext_name_size;
3726 ++ size_t lower_bufsiz = PATH_MAX;
3727 + mm_segment_t old_fs;
3728 + int rc;
3729 +
3730 +- lower_dentry = ecryptfs_dentry_to_lower(dentry);
3731 +- if (!lower_dentry->d_inode->i_op->readlink) {
3732 +- rc = -EINVAL;
3733 +- goto out;
3734 +- }
3735 +- mount_crypt_stat = &ecryptfs_superblock_to_private(
3736 +- dentry->d_sb)->mount_crypt_stat;
3737 +- /*
3738 +- * If the lower filename is encrypted, it will result in a significantly
3739 +- * longer name. If needed, truncate the name after decode and decrypt.
3740 +- */
3741 +- if (mount_crypt_stat->flags & ECRYPTFS_GLOBAL_ENCRYPT_FILENAMES)
3742 +- lower_bufsiz = PATH_MAX;
3743 +- else
3744 +- lower_bufsiz = bufsiz;
3745 +- /* Released in this function */
3746 + lower_buf = kmalloc(lower_bufsiz, GFP_KERNEL);
3747 +- if (lower_buf == NULL) {
3748 +- printk(KERN_ERR "%s: Out of memory whilst attempting to "
3749 +- "kmalloc [%zd] bytes\n", __func__, lower_bufsiz);
3750 ++ if (!lower_buf) {
3751 + rc = -ENOMEM;
3752 + goto out;
3753 + }
3754 +@@ -688,29 +667,31 @@ ecryptfs_readlink(struct dentry *dentry, char __user *buf, int bufsiz)
3755 + (char __user *)lower_buf,
3756 + lower_bufsiz);
3757 + set_fs(old_fs);
3758 +- if (rc >= 0) {
3759 +- rc = ecryptfs_decode_and_decrypt_filename(&plaintext_name,
3760 +- &plaintext_name_size,
3761 +- dentry, lower_buf,
3762 +- rc);
3763 +- if (rc) {
3764 +- printk(KERN_ERR "%s: Error attempting to decode and "
3765 +- "decrypt filename; rc = [%d]\n", __func__,
3766 +- rc);
3767 +- goto out_free_lower_buf;
3768 +- }
3769 +- /* Check for bufsiz <= 0 done in sys_readlinkat() */
3770 +- rc = copy_to_user(buf, plaintext_name,
3771 +- min((size_t) bufsiz, plaintext_name_size));
3772 +- if (rc)
3773 +- rc = -EFAULT;
3774 +- else
3775 +- rc = plaintext_name_size;
3776 +- kfree(plaintext_name);
3777 +- fsstack_copy_attr_atime(dentry->d_inode, lower_dentry->d_inode);
3778 +- }
3779 +-out_free_lower_buf:
3780 ++ if (rc < 0)
3781 ++ goto out;
3782 ++ lower_bufsiz = rc;
3783 ++ rc = ecryptfs_decode_and_decrypt_filename(buf, bufsiz, dentry,
3784 ++ lower_buf, lower_bufsiz);
3785 ++out:
3786 + kfree(lower_buf);
3787 ++ return rc;
3788 ++}
3789 ++
3790 ++static int
3791 ++ecryptfs_readlink(struct dentry *dentry, char __user *buf, int bufsiz)
3792 ++{
3793 ++ char *kbuf;
3794 ++ size_t kbufsiz, copied;
3795 ++ int rc;
3796 ++
3797 ++ rc = ecryptfs_readlink_lower(dentry, &kbuf, &kbufsiz);
3798 ++ if (rc)
3799 ++ goto out;
3800 ++ copied = min_t(size_t, bufsiz, kbufsiz);
3801 ++ rc = copy_to_user(buf, kbuf, copied) ? -EFAULT : copied;
3802 ++ kfree(kbuf);
3803 ++ fsstack_copy_attr_atime(dentry->d_inode,
3804 ++ ecryptfs_dentry_to_lower(dentry)->d_inode);
3805 + out:
3806 + return rc;
3807 + }
3808 +@@ -1015,6 +996,28 @@ out:
3809 + return rc;
3810 + }
3811 +
3812 ++int ecryptfs_getattr_link(struct vfsmount *mnt, struct dentry *dentry,
3813 ++ struct kstat *stat)
3814 ++{
3815 ++ struct ecryptfs_mount_crypt_stat *mount_crypt_stat;
3816 ++ int rc = 0;
3817 ++
3818 ++ mount_crypt_stat = &ecryptfs_superblock_to_private(
3819 ++ dentry->d_sb)->mount_crypt_stat;
3820 ++ generic_fillattr(dentry->d_inode, stat);
3821 ++ if (mount_crypt_stat->flags & ECRYPTFS_GLOBAL_ENCRYPT_FILENAMES) {
3822 ++ char *target;
3823 ++ size_t targetsiz;
3824 ++
3825 ++ rc = ecryptfs_readlink_lower(dentry, &target, &targetsiz);
3826 ++ if (!rc) {
3827 ++ kfree(target);
3828 ++ stat->size = targetsiz;
3829 ++ }
3830 ++ }
3831 ++ return rc;
3832 ++}
3833 ++
3834 + int ecryptfs_getattr(struct vfsmount *mnt, struct dentry *dentry,
3835 + struct kstat *stat)
3836 + {
3837 +@@ -1039,7 +1042,7 @@ ecryptfs_setxattr(struct dentry *dentry, const char *name, const void *value,
3838 +
3839 + lower_dentry = ecryptfs_dentry_to_lower(dentry);
3840 + if (!lower_dentry->d_inode->i_op->setxattr) {
3841 +- rc = -ENOSYS;
3842 ++ rc = -EOPNOTSUPP;
3843 + goto out;
3844 + }
3845 + mutex_lock(&lower_dentry->d_inode->i_mutex);
3846 +@@ -1057,7 +1060,7 @@ ecryptfs_getxattr_lower(struct dentry *lower_dentry, const char *name,
3847 + int rc = 0;
3848 +
3849 + if (!lower_dentry->d_inode->i_op->getxattr) {
3850 +- rc = -ENOSYS;
3851 ++ rc = -EOPNOTSUPP;
3852 + goto out;
3853 + }
3854 + mutex_lock(&lower_dentry->d_inode->i_mutex);
3855 +@@ -1084,7 +1087,7 @@ ecryptfs_listxattr(struct dentry *dentry, char *list, size_t size)
3856 +
3857 + lower_dentry = ecryptfs_dentry_to_lower(dentry);
3858 + if (!lower_dentry->d_inode->i_op->listxattr) {
3859 +- rc = -ENOSYS;
3860 ++ rc = -EOPNOTSUPP;
3861 + goto out;
3862 + }
3863 + mutex_lock(&lower_dentry->d_inode->i_mutex);
3864 +@@ -1101,7 +1104,7 @@ static int ecryptfs_removexattr(struct dentry *dentry, const char *name)
3865 +
3866 + lower_dentry = ecryptfs_dentry_to_lower(dentry);
3867 + if (!lower_dentry->d_inode->i_op->removexattr) {
3868 +- rc = -ENOSYS;
3869 ++ rc = -EOPNOTSUPP;
3870 + goto out;
3871 + }
3872 + mutex_lock(&lower_dentry->d_inode->i_mutex);
3873 +@@ -1132,6 +1135,7 @@ const struct inode_operations ecryptfs_symlink_iops = {
3874 + .put_link = ecryptfs_put_link,
3875 + .permission = ecryptfs_permission,
3876 + .setattr = ecryptfs_setattr,
3877 ++ .getattr = ecryptfs_getattr_link,
3878 + .setxattr = ecryptfs_setxattr,
3879 + .getxattr = ecryptfs_getxattr,
3880 + .listxattr = ecryptfs_listxattr,
3881 +diff --git a/fs/ecryptfs/super.c b/fs/ecryptfs/super.c
3882 +index b15a43a..1a037f7 100644
3883 +--- a/fs/ecryptfs/super.c
3884 ++++ b/fs/ecryptfs/super.c
3885 +@@ -85,7 +85,6 @@ static void ecryptfs_destroy_inode(struct inode *inode)
3886 + if (lower_dentry->d_inode) {
3887 + fput(inode_info->lower_file);
3888 + inode_info->lower_file = NULL;
3889 +- d_drop(lower_dentry);
3890 + }
3891 + }
3892 + ecryptfs_destroy_crypt_stat(&inode_info->crypt_stat);
3893 +diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
3894 +index 874d169..602d5ad 100644
3895 +--- a/fs/ext4/ext4.h
3896 ++++ b/fs/ext4/ext4.h
3897 +@@ -139,8 +139,8 @@ typedef struct ext4_io_end {
3898 + struct inode *inode; /* file being written to */
3899 + unsigned int flag; /* unwritten or not */
3900 + int error; /* I/O error code */
3901 +- ext4_lblk_t offset; /* offset in the file */
3902 +- size_t size; /* size of the extent */
3903 ++ loff_t offset; /* offset in the file */
3904 ++ ssize_t size; /* size of the extent */
3905 + struct work_struct work; /* data work queue */
3906 + } ext4_io_end_t;
3907 +
3908 +@@ -1744,7 +1744,7 @@ extern void ext4_ext_release(struct super_block *);
3909 + extern long ext4_fallocate(struct inode *inode, int mode, loff_t offset,
3910 + loff_t len);
3911 + extern int ext4_convert_unwritten_extents(struct inode *inode, loff_t offset,
3912 +- loff_t len);
3913 ++ ssize_t len);
3914 + extern int ext4_get_blocks(handle_t *handle, struct inode *inode,
3915 + sector_t block, unsigned int max_blocks,
3916 + struct buffer_head *bh, int flags);
3917 +diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
3918 +index 765a482..c568779 100644
3919 +--- a/fs/ext4/extents.c
3920 ++++ b/fs/ext4/extents.c
3921 +@@ -3603,7 +3603,7 @@ retry:
3922 + * Returns 0 on success.
3923 + */
3924 + int ext4_convert_unwritten_extents(struct inode *inode, loff_t offset,
3925 +- loff_t len)
3926 ++ ssize_t len)
3927 + {
3928 + handle_t *handle;
3929 + ext4_lblk_t block;
3930 +diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
3931 +index e119524..2059c34 100644
3932 +--- a/fs/ext4/inode.c
3933 ++++ b/fs/ext4/inode.c
3934 +@@ -3551,7 +3551,7 @@ static int ext4_end_aio_dio_nolock(ext4_io_end_t *io)
3935 + {
3936 + struct inode *inode = io->inode;
3937 + loff_t offset = io->offset;
3938 +- size_t size = io->size;
3939 ++ ssize_t size = io->size;
3940 + int ret = 0;
3941 +
3942 + ext4_debug("end_aio_dio_onlock: io 0x%p from inode %lu,list->next 0x%p,"
3943 +diff --git a/fs/fat/namei_vfat.c b/fs/fat/namei_vfat.c
3944 +index f565f24..72646e2 100644
3945 +--- a/fs/fat/namei_vfat.c
3946 ++++ b/fs/fat/namei_vfat.c
3947 +@@ -309,7 +309,7 @@ static int vfat_create_shortname(struct inode *dir, struct nls_table *nls,
3948 + {
3949 + struct fat_mount_options *opts = &MSDOS_SB(dir->i_sb)->options;
3950 + wchar_t *ip, *ext_start, *end, *name_start;
3951 +- unsigned char base[9], ext[4], buf[8], *p;
3952 ++ unsigned char base[9], ext[4], buf[5], *p;
3953 + unsigned char charbuf[NLS_MAX_CHARSET_SIZE];
3954 + int chl, chi;
3955 + int sz = 0, extlen, baselen, i, numtail_baselen, numtail2_baselen;
3956 +@@ -467,7 +467,7 @@ static int vfat_create_shortname(struct inode *dir, struct nls_table *nls,
3957 + return 0;
3958 + }
3959 +
3960 +- i = jiffies & 0xffff;
3961 ++ i = jiffies;
3962 + sz = (jiffies >> 16) & 0x7;
3963 + if (baselen > 2) {
3964 + baselen = numtail2_baselen;
3965 +@@ -476,7 +476,7 @@ static int vfat_create_shortname(struct inode *dir, struct nls_table *nls,
3966 + name_res[baselen + 4] = '~';
3967 + name_res[baselen + 5] = '1' + sz;
3968 + while (1) {
3969 +- sprintf(buf, "%04X", i);
3970 ++ snprintf(buf, sizeof(buf), "%04X", i & 0xffff);
3971 + memcpy(&name_res[baselen], buf, 4);
3972 + if (vfat_find_form(dir, name_res) < 0)
3973 + break;
3974 +diff --git a/fs/nfs/client.c b/fs/nfs/client.c
3975 +index ee77713..bd39abc 100644
3976 +--- a/fs/nfs/client.c
3977 ++++ b/fs/nfs/client.c
3978 +@@ -1293,7 +1293,8 @@ static int nfs4_init_server(struct nfs_server *server,
3979 +
3980 + /* Initialise the client representation from the mount data */
3981 + server->flags = data->flags;
3982 +- server->caps |= NFS_CAP_ATOMIC_OPEN|NFS_CAP_CHANGE_ATTR;
3983 ++ server->caps |= NFS_CAP_ATOMIC_OPEN|NFS_CAP_CHANGE_ATTR|
3984 ++ NFS_CAP_POSIX_LOCK;
3985 + server->options = data->options;
3986 +
3987 + /* Get a client record */
3988 +diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
3989 +index 8b5382e..af6948d 100644
3990 +--- a/fs/nfs/dir.c
3991 ++++ b/fs/nfs/dir.c
3992 +@@ -1025,12 +1025,12 @@ static struct dentry *nfs_atomic_lookup(struct inode *dir, struct dentry *dentry
3993 + res = NULL;
3994 + goto out;
3995 + /* This turned out not to be a regular file */
3996 ++ case -EISDIR:
3997 + case -ENOTDIR:
3998 + goto no_open;
3999 + case -ELOOP:
4000 + if (!(nd->intent.open.flags & O_NOFOLLOW))
4001 + goto no_open;
4002 +- /* case -EISDIR: */
4003 + /* case -EINVAL: */
4004 + default:
4005 + goto out;
4006 +diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
4007 +index 375f0fa..ecf6602 100644
4008 +--- a/fs/nfs/nfs4proc.c
4009 ++++ b/fs/nfs/nfs4proc.c
4010 +@@ -1520,6 +1520,8 @@ static int _nfs4_proc_open(struct nfs4_opendata *data)
4011 + nfs_post_op_update_inode(dir, o_res->dir_attr);
4012 + } else
4013 + nfs_refresh_inode(dir, o_res->dir_attr);
4014 ++ if ((o_res->rflags & NFS4_OPEN_RESULT_LOCKTYPE_POSIX) == 0)
4015 ++ server->caps &= ~NFS_CAP_POSIX_LOCK;
4016 + if(o_res->rflags & NFS4_OPEN_RESULT_CONFIRM) {
4017 + status = _nfs4_proc_open_confirm(data);
4018 + if (status != 0)
4019 +@@ -1660,7 +1662,7 @@ static int _nfs4_do_open(struct inode *dir, struct path *path, fmode_t fmode, in
4020 + status = PTR_ERR(state);
4021 + if (IS_ERR(state))
4022 + goto err_opendata_put;
4023 +- if ((opendata->o_res.rflags & NFS4_OPEN_RESULT_LOCKTYPE_POSIX) != 0)
4024 ++ if (server->caps & NFS_CAP_POSIX_LOCK)
4025 + set_bit(NFS_STATE_POSIX_LOCKS, &state->flags);
4026 + nfs4_opendata_put(opendata);
4027 + nfs4_put_state_owner(sp);
4028 +diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
4029 +index a8587e9..bbf72d8 100644
4030 +--- a/fs/nfsd/nfs4xdr.c
4031 ++++ b/fs/nfsd/nfs4xdr.c
4032 +@@ -2121,9 +2121,15 @@ out_acl:
4033 + * and this is the root of a cross-mounted filesystem.
4034 + */
4035 + if (ignore_crossmnt == 0 &&
4036 +- exp->ex_path.mnt->mnt_root->d_inode == dentry->d_inode) {
4037 +- err = vfs_getattr(exp->ex_path.mnt->mnt_parent,
4038 +- exp->ex_path.mnt->mnt_mountpoint, &stat);
4039 ++ dentry == exp->ex_path.mnt->mnt_root) {
4040 ++ struct path path = exp->ex_path;
4041 ++ path_get(&path);
4042 ++ while (follow_up(&path)) {
4043 ++ if (path.dentry != path.mnt->mnt_root)
4044 ++ break;
4045 ++ }
4046 ++ err = vfs_getattr(path.mnt, path.dentry, &stat);
4047 ++ path_put(&path);
4048 + if (err)
4049 + goto out_nfserr;
4050 + }
4051 +diff --git a/fs/ocfs2/acl.c b/fs/ocfs2/acl.c
4052 +index 0501974..8ccf0f8 100644
4053 +--- a/fs/ocfs2/acl.c
4054 ++++ b/fs/ocfs2/acl.c
4055 +@@ -30,6 +30,8 @@
4056 + #include "alloc.h"
4057 + #include "dlmglue.h"
4058 + #include "file.h"
4059 ++#include "inode.h"
4060 ++#include "journal.h"
4061 + #include "ocfs2_fs.h"
4062 +
4063 + #include "xattr.h"
4064 +@@ -166,6 +168,60 @@ static struct posix_acl *ocfs2_get_acl(struct inode *inode, int type)
4065 + }
4066 +
4067 + /*
4068 ++ * Helper function to set i_mode in memory and disk. Some call paths
4069 ++ * will not have di_bh or a journal handle to pass, in which case it
4070 ++ * will create it's own.
4071 ++ */
4072 ++static int ocfs2_acl_set_mode(struct inode *inode, struct buffer_head *di_bh,
4073 ++ handle_t *handle, umode_t new_mode)
4074 ++{
4075 ++ int ret, commit_handle = 0;
4076 ++ struct ocfs2_dinode *di;
4077 ++
4078 ++ if (di_bh == NULL) {
4079 ++ ret = ocfs2_read_inode_block(inode, &di_bh);
4080 ++ if (ret) {
4081 ++ mlog_errno(ret);
4082 ++ goto out;
4083 ++ }
4084 ++ } else
4085 ++ get_bh(di_bh);
4086 ++
4087 ++ if (handle == NULL) {
4088 ++ handle = ocfs2_start_trans(OCFS2_SB(inode->i_sb),
4089 ++ OCFS2_INODE_UPDATE_CREDITS);
4090 ++ if (IS_ERR(handle)) {
4091 ++ ret = PTR_ERR(handle);
4092 ++ mlog_errno(ret);
4093 ++ goto out_brelse;
4094 ++ }
4095 ++
4096 ++ commit_handle = 1;
4097 ++ }
4098 ++
4099 ++ di = (struct ocfs2_dinode *)di_bh->b_data;
4100 ++ ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), di_bh,
4101 ++ OCFS2_JOURNAL_ACCESS_WRITE);
4102 ++ if (ret) {
4103 ++ mlog_errno(ret);
4104 ++ goto out_commit;
4105 ++ }
4106 ++
4107 ++ inode->i_mode = new_mode;
4108 ++ di->i_mode = cpu_to_le16(inode->i_mode);
4109 ++
4110 ++ ocfs2_journal_dirty(handle, di_bh);
4111 ++
4112 ++out_commit:
4113 ++ if (commit_handle)
4114 ++ ocfs2_commit_trans(OCFS2_SB(inode->i_sb), handle);
4115 ++out_brelse:
4116 ++ brelse(di_bh);
4117 ++out:
4118 ++ return ret;
4119 ++}
4120 ++
4121 ++/*
4122 + * Set the access or default ACL of an inode.
4123 + */
4124 + static int ocfs2_set_acl(handle_t *handle,
4125 +@@ -193,9 +249,14 @@ static int ocfs2_set_acl(handle_t *handle,
4126 + if (ret < 0)
4127 + return ret;
4128 + else {
4129 +- inode->i_mode = mode;
4130 + if (ret == 0)
4131 + acl = NULL;
4132 ++
4133 ++ ret = ocfs2_acl_set_mode(inode, di_bh,
4134 ++ handle, mode);
4135 ++ if (ret)
4136 ++ return ret;
4137 ++
4138 + }
4139 + }
4140 + break;
4141 +@@ -283,6 +344,7 @@ int ocfs2_init_acl(handle_t *handle,
4142 + struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
4143 + struct posix_acl *acl = NULL;
4144 + int ret = 0;
4145 ++ mode_t mode;
4146 +
4147 + if (!S_ISLNK(inode->i_mode)) {
4148 + if (osb->s_mount_opt & OCFS2_MOUNT_POSIX_ACL) {
4149 +@@ -291,12 +353,17 @@ int ocfs2_init_acl(handle_t *handle,
4150 + if (IS_ERR(acl))
4151 + return PTR_ERR(acl);
4152 + }
4153 +- if (!acl)
4154 +- inode->i_mode &= ~current_umask();
4155 ++ if (!acl) {
4156 ++ mode = inode->i_mode & ~current_umask();
4157 ++ ret = ocfs2_acl_set_mode(inode, di_bh, handle, mode);
4158 ++ if (ret) {
4159 ++ mlog_errno(ret);
4160 ++ goto cleanup;
4161 ++ }
4162 ++ }
4163 + }
4164 + if ((osb->s_mount_opt & OCFS2_MOUNT_POSIX_ACL) && acl) {
4165 + struct posix_acl *clone;
4166 +- mode_t mode;
4167 +
4168 + if (S_ISDIR(inode->i_mode)) {
4169 + ret = ocfs2_set_acl(handle, inode, di_bh,
4170 +@@ -313,7 +380,7 @@ int ocfs2_init_acl(handle_t *handle,
4171 + mode = inode->i_mode;
4172 + ret = posix_acl_create_masq(clone, &mode);
4173 + if (ret >= 0) {
4174 +- inode->i_mode = mode;
4175 ++ ret = ocfs2_acl_set_mode(inode, di_bh, handle, mode);
4176 + if (ret > 0) {
4177 + ret = ocfs2_set_acl(handle, inode,
4178 + di_bh, ACL_TYPE_ACCESS,
4179 +diff --git a/fs/ocfs2/suballoc.c b/fs/ocfs2/suballoc.c
4180 +index c30b644..79b5dac 100644
4181 +--- a/fs/ocfs2/suballoc.c
4182 ++++ b/fs/ocfs2/suballoc.c
4183 +@@ -152,7 +152,7 @@ static u32 ocfs2_bits_per_group(struct ocfs2_chain_list *cl)
4184 +
4185 + #define do_error(fmt, ...) \
4186 + do{ \
4187 +- if (clean_error) \
4188 ++ if (resize) \
4189 + mlog(ML_ERROR, fmt "\n", ##__VA_ARGS__); \
4190 + else \
4191 + ocfs2_error(sb, fmt, ##__VA_ARGS__); \
4192 +@@ -160,7 +160,7 @@ static u32 ocfs2_bits_per_group(struct ocfs2_chain_list *cl)
4193 +
4194 + static int ocfs2_validate_gd_self(struct super_block *sb,
4195 + struct buffer_head *bh,
4196 +- int clean_error)
4197 ++ int resize)
4198 + {
4199 + struct ocfs2_group_desc *gd = (struct ocfs2_group_desc *)bh->b_data;
4200 +
4201 +@@ -211,7 +211,7 @@ static int ocfs2_validate_gd_self(struct super_block *sb,
4202 + static int ocfs2_validate_gd_parent(struct super_block *sb,
4203 + struct ocfs2_dinode *di,
4204 + struct buffer_head *bh,
4205 +- int clean_error)
4206 ++ int resize)
4207 + {
4208 + unsigned int max_bits;
4209 + struct ocfs2_group_desc *gd = (struct ocfs2_group_desc *)bh->b_data;
4210 +@@ -233,8 +233,11 @@ static int ocfs2_validate_gd_parent(struct super_block *sb,
4211 + return -EINVAL;
4212 + }
4213 +
4214 +- if (le16_to_cpu(gd->bg_chain) >=
4215 +- le16_to_cpu(di->id2.i_chain.cl_next_free_rec)) {
4216 ++ /* In resize, we may meet the case bg_chain == cl_next_free_rec. */
4217 ++ if ((le16_to_cpu(gd->bg_chain) >
4218 ++ le16_to_cpu(di->id2.i_chain.cl_next_free_rec)) ||
4219 ++ ((le16_to_cpu(gd->bg_chain) ==
4220 ++ le16_to_cpu(di->id2.i_chain.cl_next_free_rec)) && !resize)) {
4221 + do_error("Group descriptor #%llu has bad chain %u",
4222 + (unsigned long long)bh->b_blocknr,
4223 + le16_to_cpu(gd->bg_chain));
4224 +diff --git a/fs/proc/base.c b/fs/proc/base.c
4225 +index 58324c2..3cd449d 100644
4226 +--- a/fs/proc/base.c
4227 ++++ b/fs/proc/base.c
4228 +@@ -442,12 +442,13 @@ static const struct file_operations proc_lstats_operations = {
4229 + unsigned long badness(struct task_struct *p, unsigned long uptime);
4230 + static int proc_oom_score(struct task_struct *task, char *buffer)
4231 + {
4232 +- unsigned long points;
4233 ++ unsigned long points = 0;
4234 + struct timespec uptime;
4235 +
4236 + do_posix_clock_monotonic_gettime(&uptime);
4237 + read_lock(&tasklist_lock);
4238 +- points = badness(task->group_leader, uptime.tv_sec);
4239 ++ if (pid_alive(task))
4240 ++ points = badness(task, uptime.tv_sec);
4241 + read_unlock(&tasklist_lock);
4242 + return sprintf(buffer, "%lu\n", points);
4243 + }
4244 +diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c
4245 +index 6e722c1..6c9da00 100644
4246 +--- a/fs/quota/dquot.c
4247 ++++ b/fs/quota/dquot.c
4248 +@@ -2321,34 +2321,34 @@ static int do_set_dqblk(struct dquot *dquot, struct if_dqblk *di)
4249 + if (di->dqb_valid & QIF_SPACE) {
4250 + dm->dqb_curspace = di->dqb_curspace - dm->dqb_rsvspace;
4251 + check_blim = 1;
4252 +- __set_bit(DQ_LASTSET_B + QIF_SPACE_B, &dquot->dq_flags);
4253 ++ set_bit(DQ_LASTSET_B + QIF_SPACE_B, &dquot->dq_flags);
4254 + }
4255 + if (di->dqb_valid & QIF_BLIMITS) {
4256 + dm->dqb_bsoftlimit = qbtos(di->dqb_bsoftlimit);
4257 + dm->dqb_bhardlimit = qbtos(di->dqb_bhardlimit);
4258 + check_blim = 1;
4259 +- __set_bit(DQ_LASTSET_B + QIF_BLIMITS_B, &dquot->dq_flags);
4260 ++ set_bit(DQ_LASTSET_B + QIF_BLIMITS_B, &dquot->dq_flags);
4261 + }
4262 + if (di->dqb_valid & QIF_INODES) {
4263 + dm->dqb_curinodes = di->dqb_curinodes;
4264 + check_ilim = 1;
4265 +- __set_bit(DQ_LASTSET_B + QIF_INODES_B, &dquot->dq_flags);
4266 ++ set_bit(DQ_LASTSET_B + QIF_INODES_B, &dquot->dq_flags);
4267 + }
4268 + if (di->dqb_valid & QIF_ILIMITS) {
4269 + dm->dqb_isoftlimit = di->dqb_isoftlimit;
4270 + dm->dqb_ihardlimit = di->dqb_ihardlimit;
4271 + check_ilim = 1;
4272 +- __set_bit(DQ_LASTSET_B + QIF_ILIMITS_B, &dquot->dq_flags);
4273 ++ set_bit(DQ_LASTSET_B + QIF_ILIMITS_B, &dquot->dq_flags);
4274 + }
4275 + if (di->dqb_valid & QIF_BTIME) {
4276 + dm->dqb_btime = di->dqb_btime;
4277 + check_blim = 1;
4278 +- __set_bit(DQ_LASTSET_B + QIF_BTIME_B, &dquot->dq_flags);
4279 ++ set_bit(DQ_LASTSET_B + QIF_BTIME_B, &dquot->dq_flags);
4280 + }
4281 + if (di->dqb_valid & QIF_ITIME) {
4282 + dm->dqb_itime = di->dqb_itime;
4283 + check_ilim = 1;
4284 +- __set_bit(DQ_LASTSET_B + QIF_ITIME_B, &dquot->dq_flags);
4285 ++ set_bit(DQ_LASTSET_B + QIF_ITIME_B, &dquot->dq_flags);
4286 + }
4287 +
4288 + if (check_blim) {
4289 +diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c
4290 +index b4a7dd0..33bc410 100644
4291 +--- a/fs/reiserfs/super.c
4292 ++++ b/fs/reiserfs/super.c
4293 +@@ -1619,10 +1619,8 @@ static int reiserfs_fill_super(struct super_block *s, void *data, int silent)
4294 + save_mount_options(s, data);
4295 +
4296 + sbi = kzalloc(sizeof(struct reiserfs_sb_info), GFP_KERNEL);
4297 +- if (!sbi) {
4298 +- errval = -ENOMEM;
4299 +- goto error_alloc;
4300 +- }
4301 ++ if (!sbi)
4302 ++ return -ENOMEM;
4303 + s->s_fs_info = sbi;
4304 + /* Set default values for options: non-aggressive tails, RO on errors */
4305 + REISERFS_SB(s)->s_mount_opt |= (1 << REISERFS_SMALLTAIL);
4306 +@@ -1879,12 +1877,12 @@ static int reiserfs_fill_super(struct super_block *s, void *data, int silent)
4307 + return (0);
4308 +
4309 + error:
4310 +- reiserfs_write_unlock(s);
4311 +-error_alloc:
4312 + if (jinit_done) { /* kill the commit thread, free journal ram */
4313 + journal_release_error(NULL, s);
4314 + }
4315 +
4316 ++ reiserfs_write_unlock(s);
4317 ++
4318 + reiserfs_free_bitmap_cache(s);
4319 + if (SB_BUFFER_WITH_SB(s))
4320 + brelse(SB_BUFFER_WITH_SB(s));
4321 +diff --git a/fs/xfs/linux-2.6/xfs_aops.c b/fs/xfs/linux-2.6/xfs_aops.c
4322 +index 66abe36..1c65a2b 100644
4323 +--- a/fs/xfs/linux-2.6/xfs_aops.c
4324 ++++ b/fs/xfs/linux-2.6/xfs_aops.c
4325 +@@ -163,14 +163,17 @@ xfs_ioend_new_eof(
4326 + }
4327 +
4328 + /*
4329 +- * Update on-disk file size now that data has been written to disk.
4330 +- * The current in-memory file size is i_size. If a write is beyond
4331 +- * eof i_new_size will be the intended file size until i_size is
4332 +- * updated. If this write does not extend all the way to the valid
4333 +- * file size then restrict this update to the end of the write.
4334 ++ * Update on-disk file size now that data has been written to disk. The
4335 ++ * current in-memory file size is i_size. If a write is beyond eof i_new_size
4336 ++ * will be the intended file size until i_size is updated. If this write does
4337 ++ * not extend all the way to the valid file size then restrict this update to
4338 ++ * the end of the write.
4339 ++ *
4340 ++ * This function does not block as blocking on the inode lock in IO completion
4341 ++ * can lead to IO completion order dependency deadlocks.. If it can't get the
4342 ++ * inode ilock it will return EAGAIN. Callers must handle this.
4343 + */
4344 +-
4345 +-STATIC void
4346 ++STATIC int
4347 + xfs_setfilesize(
4348 + xfs_ioend_t *ioend)
4349 + {
4350 +@@ -181,9 +184,11 @@ xfs_setfilesize(
4351 + ASSERT(ioend->io_type != IOMAP_READ);
4352 +
4353 + if (unlikely(ioend->io_error))
4354 +- return;
4355 ++ return 0;
4356 ++
4357 ++ if (!xfs_ilock_nowait(ip, XFS_ILOCK_EXCL))
4358 ++ return EAGAIN;
4359 +
4360 +- xfs_ilock(ip, XFS_ILOCK_EXCL);
4361 + isize = xfs_ioend_new_eof(ioend);
4362 + if (isize) {
4363 + ip->i_d.di_size = isize;
4364 +@@ -191,6 +196,28 @@ xfs_setfilesize(
4365 + }
4366 +
4367 + xfs_iunlock(ip, XFS_ILOCK_EXCL);
4368 ++ return 0;
4369 ++}
4370 ++
4371 ++/*
4372 ++ * Schedule IO completion handling on a xfsdatad if this was
4373 ++ * the final hold on this ioend. If we are asked to wait,
4374 ++ * flush the workqueue.
4375 ++ */
4376 ++STATIC void
4377 ++xfs_finish_ioend(
4378 ++ xfs_ioend_t *ioend,
4379 ++ int wait)
4380 ++{
4381 ++ if (atomic_dec_and_test(&ioend->io_remaining)) {
4382 ++ struct workqueue_struct *wq;
4383 ++
4384 ++ wq = (ioend->io_type == IOMAP_UNWRITTEN) ?
4385 ++ xfsconvertd_workqueue : xfsdatad_workqueue;
4386 ++ queue_work(wq, &ioend->io_work);
4387 ++ if (wait)
4388 ++ flush_workqueue(wq);
4389 ++ }
4390 + }
4391 +
4392 + /*
4393 +@@ -198,11 +225,11 @@ xfs_setfilesize(
4394 + */
4395 + STATIC void
4396 + xfs_end_io(
4397 +- struct work_struct *work)
4398 ++ struct work_struct *work)
4399 + {
4400 +- xfs_ioend_t *ioend =
4401 +- container_of(work, xfs_ioend_t, io_work);
4402 +- struct xfs_inode *ip = XFS_I(ioend->io_inode);
4403 ++ xfs_ioend_t *ioend = container_of(work, xfs_ioend_t, io_work);
4404 ++ struct xfs_inode *ip = XFS_I(ioend->io_inode);
4405 ++ int error;
4406 +
4407 + /*
4408 + * For unwritten extents we need to issue transactions to convert a
4409 +@@ -210,7 +237,6 @@ xfs_end_io(
4410 + */
4411 + if (ioend->io_type == IOMAP_UNWRITTEN &&
4412 + likely(!ioend->io_error && !XFS_FORCED_SHUTDOWN(ip->i_mount))) {
4413 +- int error;
4414 +
4415 + error = xfs_iomap_write_unwritten(ip, ioend->io_offset,
4416 + ioend->io_size);
4417 +@@ -222,30 +248,23 @@ xfs_end_io(
4418 + * We might have to update the on-disk file size after extending
4419 + * writes.
4420 + */
4421 +- if (ioend->io_type != IOMAP_READ)
4422 +- xfs_setfilesize(ioend);
4423 +- xfs_destroy_ioend(ioend);
4424 +-}
4425 +-
4426 +-/*
4427 +- * Schedule IO completion handling on a xfsdatad if this was
4428 +- * the final hold on this ioend. If we are asked to wait,
4429 +- * flush the workqueue.
4430 +- */
4431 +-STATIC void
4432 +-xfs_finish_ioend(
4433 +- xfs_ioend_t *ioend,
4434 +- int wait)
4435 +-{
4436 +- if (atomic_dec_and_test(&ioend->io_remaining)) {
4437 +- struct workqueue_struct *wq;
4438 +-
4439 +- wq = (ioend->io_type == IOMAP_UNWRITTEN) ?
4440 +- xfsconvertd_workqueue : xfsdatad_workqueue;
4441 +- queue_work(wq, &ioend->io_work);
4442 +- if (wait)
4443 +- flush_workqueue(wq);
4444 ++ if (ioend->io_type != IOMAP_READ) {
4445 ++ error = xfs_setfilesize(ioend);
4446 ++ ASSERT(!error || error == EAGAIN);
4447 + }
4448 ++
4449 ++ /*
4450 ++ * If we didn't complete processing of the ioend, requeue it to the
4451 ++ * tail of the workqueue for another attempt later. Otherwise destroy
4452 ++ * it.
4453 ++ */
4454 ++ if (error == EAGAIN) {
4455 ++ atomic_inc(&ioend->io_remaining);
4456 ++ xfs_finish_ioend(ioend, 0);
4457 ++ /* ensure we don't spin on blocked ioends */
4458 ++ delay(1);
4459 ++ } else
4460 ++ xfs_destroy_ioend(ioend);
4461 + }
4462 +
4463 + /*
4464 +diff --git a/fs/xfs/linux-2.6/xfs_sync.c b/fs/xfs/linux-2.6/xfs_sync.c
4465 +index 1f5e4bb..6b6b394 100644
4466 +--- a/fs/xfs/linux-2.6/xfs_sync.c
4467 ++++ b/fs/xfs/linux-2.6/xfs_sync.c
4468 +@@ -613,7 +613,8 @@ xfssyncd(
4469 + set_freezable();
4470 + timeleft = xfs_syncd_centisecs * msecs_to_jiffies(10);
4471 + for (;;) {
4472 +- timeleft = schedule_timeout_interruptible(timeleft);
4473 ++ if (list_empty(&mp->m_sync_list))
4474 ++ timeleft = schedule_timeout_interruptible(timeleft);
4475 + /* swsusp */
4476 + try_to_freeze();
4477 + if (kthread_should_stop() && list_empty(&mp->m_sync_list))
4478 +@@ -633,8 +634,7 @@ xfssyncd(
4479 + list_add_tail(&mp->m_sync_work.w_list,
4480 + &mp->m_sync_list);
4481 + }
4482 +- list_for_each_entry_safe(work, n, &mp->m_sync_list, w_list)
4483 +- list_move(&work->w_list, &tmp);
4484 ++ list_splice_init(&mp->m_sync_list, &tmp);
4485 + spin_unlock(&mp->m_sync_lock);
4486 +
4487 + list_for_each_entry_safe(work, n, &tmp, w_list) {
4488 +@@ -693,12 +693,12 @@ xfs_inode_set_reclaim_tag(
4489 + xfs_mount_t *mp = ip->i_mount;
4490 + xfs_perag_t *pag = xfs_get_perag(mp, ip->i_ino);
4491 +
4492 +- read_lock(&pag->pag_ici_lock);
4493 ++ write_lock(&pag->pag_ici_lock);
4494 + spin_lock(&ip->i_flags_lock);
4495 + __xfs_inode_set_reclaim_tag(pag, ip);
4496 + __xfs_iflags_set(ip, XFS_IRECLAIMABLE);
4497 + spin_unlock(&ip->i_flags_lock);
4498 +- read_unlock(&pag->pag_ici_lock);
4499 ++ write_unlock(&pag->pag_ici_lock);
4500 + xfs_put_perag(mp, pag);
4501 + }
4502 +
4503 +diff --git a/fs/xfs/xfs_iget.c b/fs/xfs/xfs_iget.c
4504 +index 155e798..fd21160 100644
4505 +--- a/fs/xfs/xfs_iget.c
4506 ++++ b/fs/xfs/xfs_iget.c
4507 +@@ -190,13 +190,12 @@ xfs_iget_cache_hit(
4508 + trace_xfs_iget_reclaim(ip);
4509 +
4510 + /*
4511 +- * We need to set XFS_INEW atomically with clearing the
4512 +- * reclaimable tag so that we do have an indicator of the
4513 +- * inode still being initialized.
4514 ++ * We need to set XFS_IRECLAIM to prevent xfs_reclaim_inode
4515 ++ * from stomping over us while we recycle the inode. We can't
4516 ++ * clear the radix tree reclaimable tag yet as it requires
4517 ++ * pag_ici_lock to be held exclusive.
4518 + */
4519 +- ip->i_flags |= XFS_INEW;
4520 +- ip->i_flags &= ~XFS_IRECLAIMABLE;
4521 +- __xfs_inode_clear_reclaim_tag(mp, pag, ip);
4522 ++ ip->i_flags |= XFS_IRECLAIM;
4523 +
4524 + spin_unlock(&ip->i_flags_lock);
4525 + read_unlock(&pag->pag_ici_lock);
4526 +@@ -216,7 +215,15 @@ xfs_iget_cache_hit(
4527 + trace_xfs_iget_reclaim(ip);
4528 + goto out_error;
4529 + }
4530 ++
4531 ++ write_lock(&pag->pag_ici_lock);
4532 ++ spin_lock(&ip->i_flags_lock);
4533 ++ ip->i_flags &= ~(XFS_IRECLAIMABLE | XFS_IRECLAIM);
4534 ++ ip->i_flags |= XFS_INEW;
4535 ++ __xfs_inode_clear_reclaim_tag(mp, pag, ip);
4536 + inode->i_state = I_NEW;
4537 ++ spin_unlock(&ip->i_flags_lock);
4538 ++ write_unlock(&pag->pag_ici_lock);
4539 + } else {
4540 + /* If the VFS inode is being torn down, pause and try again. */
4541 + if (!igrab(inode)) {
4542 +diff --git a/include/drm/drm_pciids.h b/include/drm/drm_pciids.h
4543 +index e6f3b12..0cbdccc 100644
4544 +--- a/include/drm/drm_pciids.h
4545 ++++ b/include/drm/drm_pciids.h
4546 +@@ -6,6 +6,7 @@
4547 + {0x1002, 0x3150, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY}, \
4548 + {0x1002, 0x3152, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
4549 + {0x1002, 0x3154, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
4550 ++ {0x1002, 0x3155, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
4551 + {0x1002, 0x3E50, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_NEW_MEMMAP}, \
4552 + {0x1002, 0x3E54, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_NEW_MEMMAP}, \
4553 + {0x1002, 0x4136, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS100|RADEON_IS_IGP}, \
4554 +@@ -375,6 +376,7 @@
4555 + {0x1002, 0x9712, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS880|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
4556 + {0x1002, 0x9713, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS880|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
4557 + {0x1002, 0x9714, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS880|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
4558 ++ {0x1002, 0x9715, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS880|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
4559 + {0, 0, 0}
4560 +
4561 + #define r128_PCI_IDS \
4562 +diff --git a/include/linux/dm-ioctl.h b/include/linux/dm-ioctl.h
4563 +index aa95508..2c445e1 100644
4564 +--- a/include/linux/dm-ioctl.h
4565 ++++ b/include/linux/dm-ioctl.h
4566 +@@ -266,9 +266,9 @@ enum {
4567 + #define DM_DEV_SET_GEOMETRY _IOWR(DM_IOCTL, DM_DEV_SET_GEOMETRY_CMD, struct dm_ioctl)
4568 +
4569 + #define DM_VERSION_MAJOR 4
4570 +-#define DM_VERSION_MINOR 16
4571 ++#define DM_VERSION_MINOR 17
4572 + #define DM_VERSION_PATCHLEVEL 0
4573 +-#define DM_VERSION_EXTRA "-ioctl (2009-11-05)"
4574 ++#define DM_VERSION_EXTRA "-ioctl (2010-03-05)"
4575 +
4576 + /* Status bits */
4577 + #define DM_READONLY_FLAG (1 << 0) /* In/Out */
4578 +@@ -316,4 +316,9 @@ enum {
4579 + */
4580 + #define DM_QUERY_INACTIVE_TABLE_FLAG (1 << 12) /* In */
4581 +
4582 ++/*
4583 ++ * If set, a uevent was generated for which the caller may need to wait.
4584 ++ */
4585 ++#define DM_UEVENT_GENERATED_FLAG (1 << 13) /* Out */
4586 ++
4587 + #endif /* _LINUX_DM_IOCTL_H */
4588 +diff --git a/include/linux/freezer.h b/include/linux/freezer.h
4589 +index 5a361f8..da7e52b 100644
4590 +--- a/include/linux/freezer.h
4591 ++++ b/include/linux/freezer.h
4592 +@@ -64,9 +64,12 @@ extern bool freeze_task(struct task_struct *p, bool sig_only);
4593 + extern void cancel_freezing(struct task_struct *p);
4594 +
4595 + #ifdef CONFIG_CGROUP_FREEZER
4596 +-extern int cgroup_frozen(struct task_struct *task);
4597 ++extern int cgroup_freezing_or_frozen(struct task_struct *task);
4598 + #else /* !CONFIG_CGROUP_FREEZER */
4599 +-static inline int cgroup_frozen(struct task_struct *task) { return 0; }
4600 ++static inline int cgroup_freezing_or_frozen(struct task_struct *task)
4601 ++{
4602 ++ return 0;
4603 ++}
4604 + #endif /* !CONFIG_CGROUP_FREEZER */
4605 +
4606 + /*
4607 +diff --git a/include/linux/fs.h b/include/linux/fs.h
4608 +index f2f68ce..66b0705 100644
4609 +--- a/include/linux/fs.h
4610 ++++ b/include/linux/fs.h
4611 +@@ -2214,6 +2214,7 @@ extern int generic_segment_checks(const struct iovec *iov,
4612 + /* fs/block_dev.c */
4613 + extern ssize_t blkdev_aio_write(struct kiocb *iocb, const struct iovec *iov,
4614 + unsigned long nr_segs, loff_t pos);
4615 ++extern int block_fsync(struct file *filp, struct dentry *dentry, int datasync);
4616 +
4617 + /* fs/splice.c */
4618 + extern ssize_t generic_file_splice_read(struct file *, loff_t *,
4619 +diff --git a/include/linux/kfifo.h b/include/linux/kfifo.h
4620 +index ece0b1c..e117b1a 100644
4621 +--- a/include/linux/kfifo.h
4622 ++++ b/include/linux/kfifo.h
4623 +@@ -86,7 +86,8 @@ union { \
4624 + */
4625 + #define INIT_KFIFO(name) \
4626 + name = __kfifo_initializer(sizeof(name##kfifo_buffer) - \
4627 +- sizeof(struct kfifo), name##kfifo_buffer)
4628 ++ sizeof(struct kfifo), \
4629 ++ name##kfifo_buffer + sizeof(struct kfifo))
4630 +
4631 + /**
4632 + * DEFINE_KFIFO - macro to define and initialize a kfifo
4633 +diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
4634 +index bd5a616..1fe293e 100644
4635 +--- a/include/linux/kvm_host.h
4636 ++++ b/include/linux/kvm_host.h
4637 +@@ -53,7 +53,7 @@ extern struct kmem_cache *kvm_vcpu_cache;
4638 + */
4639 + struct kvm_io_bus {
4640 + int dev_count;
4641 +-#define NR_IOBUS_DEVS 6
4642 ++#define NR_IOBUS_DEVS 200
4643 + struct kvm_io_device *devs[NR_IOBUS_DEVS];
4644 + };
4645 +
4646 +@@ -116,6 +116,11 @@ struct kvm_memory_slot {
4647 + int user_alloc;
4648 + };
4649 +
4650 ++static inline unsigned long kvm_dirty_bitmap_bytes(struct kvm_memory_slot *memslot)
4651 ++{
4652 ++ return ALIGN(memslot->npages, BITS_PER_LONG) / 8;
4653 ++}
4654 ++
4655 + struct kvm_kernel_irq_routing_entry {
4656 + u32 gsi;
4657 + u32 type;
4658 +diff --git a/include/linux/module.h b/include/linux/module.h
4659 +index 6cb1a3c..bd465d4 100644
4660 +--- a/include/linux/module.h
4661 ++++ b/include/linux/module.h
4662 +@@ -457,7 +457,7 @@ void symbol_put_addr(void *addr);
4663 + static inline local_t *__module_ref_addr(struct module *mod, int cpu)
4664 + {
4665 + #ifdef CONFIG_SMP
4666 +- return (local_t *) (mod->refptr + per_cpu_offset(cpu));
4667 ++ return (local_t *) per_cpu_ptr(mod->refptr, cpu);
4668 + #else
4669 + return &mod->ref;
4670 + #endif
4671 +diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h
4672 +index 34fc6be..ebc4809 100644
4673 +--- a/include/linux/nfs_fs_sb.h
4674 ++++ b/include/linux/nfs_fs_sb.h
4675 +@@ -176,6 +176,7 @@ struct nfs_server {
4676 + #define NFS_CAP_ATIME (1U << 11)
4677 + #define NFS_CAP_CTIME (1U << 12)
4678 + #define NFS_CAP_MTIME (1U << 13)
4679 ++#define NFS_CAP_POSIX_LOCK (1U << 14)
4680 +
4681 +
4682 + /* maximum number of slots to use */
4683 +diff --git a/include/linux/pci.h b/include/linux/pci.h
4684 +index c1968f4..0afb527 100644
4685 +--- a/include/linux/pci.h
4686 ++++ b/include/linux/pci.h
4687 +@@ -959,6 +959,11 @@ static inline int pci_proc_domain(struct pci_bus *bus)
4688 + }
4689 + #endif /* CONFIG_PCI_DOMAINS */
4690 +
4691 ++/* some architectures require additional setup to direct VGA traffic */
4692 ++typedef int (*arch_set_vga_state_t)(struct pci_dev *pdev, bool decode,
4693 ++ unsigned int command_bits, bool change_bridge);
4694 ++extern void pci_register_set_vga_state(arch_set_vga_state_t func);
4695 ++
4696 + #else /* CONFIG_PCI is not enabled */
4697 +
4698 + /*
4699 +diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
4700 +index cca8a04..0be8243 100644
4701 +--- a/include/linux/pci_ids.h
4702 ++++ b/include/linux/pci_ids.h
4703 +@@ -2417,6 +2417,9 @@
4704 + #define PCI_DEVICE_ID_INTEL_82840_HB 0x1a21
4705 + #define PCI_DEVICE_ID_INTEL_82845_HB 0x1a30
4706 + #define PCI_DEVICE_ID_INTEL_IOAT 0x1a38
4707 ++#define PCI_DEVICE_ID_INTEL_CPT_SMBUS 0x1c22
4708 ++#define PCI_DEVICE_ID_INTEL_CPT_LPC1 0x1c42
4709 ++#define PCI_DEVICE_ID_INTEL_CPT_LPC2 0x1c43
4710 + #define PCI_DEVICE_ID_INTEL_82801AA_0 0x2410
4711 + #define PCI_DEVICE_ID_INTEL_82801AA_1 0x2411
4712 + #define PCI_DEVICE_ID_INTEL_82801AA_3 0x2413
4713 +diff --git a/kernel/cgroup_freezer.c b/kernel/cgroup_freezer.c
4714 +index 59e9ef6..eb3f34d 100644
4715 +--- a/kernel/cgroup_freezer.c
4716 ++++ b/kernel/cgroup_freezer.c
4717 +@@ -47,17 +47,20 @@ static inline struct freezer *task_freezer(struct task_struct *task)
4718 + struct freezer, css);
4719 + }
4720 +
4721 +-int cgroup_frozen(struct task_struct *task)
4722 ++int cgroup_freezing_or_frozen(struct task_struct *task)
4723 + {
4724 + struct freezer *freezer;
4725 + enum freezer_state state;
4726 +
4727 + task_lock(task);
4728 + freezer = task_freezer(task);
4729 +- state = freezer->state;
4730 ++ if (!freezer->css.cgroup->parent)
4731 ++ state = CGROUP_THAWED; /* root cgroup can't be frozen */
4732 ++ else
4733 ++ state = freezer->state;
4734 + task_unlock(task);
4735 +
4736 +- return state == CGROUP_FROZEN;
4737 ++ return (state == CGROUP_FREEZING) || (state == CGROUP_FROZEN);
4738 + }
4739 +
4740 + /*
4741 +diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
4742 +index 69a3d7b..0b23ff7 100644
4743 +--- a/kernel/irq/manage.c
4744 ++++ b/kernel/irq/manage.c
4745 +@@ -753,6 +753,16 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
4746 + if (new->flags & IRQF_ONESHOT)
4747 + desc->status |= IRQ_ONESHOT;
4748 +
4749 ++ /*
4750 ++ * Force MSI interrupts to run with interrupts
4751 ++ * disabled. The multi vector cards can cause stack
4752 ++ * overflows due to nested interrupts when enough of
4753 ++ * them are directed to a core and fire at the same
4754 ++ * time.
4755 ++ */
4756 ++ if (desc->msi_desc)
4757 ++ new->flags |= IRQF_DISABLED;
4758 ++
4759 + if (!(desc->status & IRQ_NOAUTOEN)) {
4760 + desc->depth = 0;
4761 + desc->status &= ~IRQ_DISABLED;
4762 +diff --git a/kernel/lockdep.c b/kernel/lockdep.c
4763 +index c62ec14..493a0ef 100644
4764 +--- a/kernel/lockdep.c
4765 ++++ b/kernel/lockdep.c
4766 +@@ -600,9 +600,9 @@ static int static_obj(void *obj)
4767 + * percpu var?
4768 + */
4769 + for_each_possible_cpu(i) {
4770 +- start = (unsigned long) &__per_cpu_start + per_cpu_offset(i);
4771 +- end = (unsigned long) &__per_cpu_start + PERCPU_ENOUGH_ROOM
4772 +- + per_cpu_offset(i);
4773 ++ start = (unsigned long) per_cpu_ptr(&__per_cpu_start, i);
4774 ++ end = (unsigned long) per_cpu_ptr(&__per_cpu_start, i)
4775 ++ + PERCPU_ENOUGH_ROOM;
4776 +
4777 + if ((addr >= start) && (addr < end))
4778 + return 1;
4779 +diff --git a/kernel/module.c b/kernel/module.c
4780 +index f82386b..5b6ce39 100644
4781 +--- a/kernel/module.c
4782 ++++ b/kernel/module.c
4783 +@@ -405,7 +405,7 @@ static void percpu_modcopy(void *pcpudest, const void *from, unsigned long size)
4784 + int cpu;
4785 +
4786 + for_each_possible_cpu(cpu)
4787 +- memcpy(pcpudest + per_cpu_offset(cpu), from, size);
4788 ++ memcpy(per_cpu_ptr(pcpudest, cpu), from, size);
4789 + }
4790 +
4791 + #else /* ... !CONFIG_SMP */
4792 +diff --git a/kernel/power/process.c b/kernel/power/process.c
4793 +index 5ade1bd..de53015 100644
4794 +--- a/kernel/power/process.c
4795 ++++ b/kernel/power/process.c
4796 +@@ -145,7 +145,7 @@ static void thaw_tasks(bool nosig_only)
4797 + if (nosig_only && should_send_signal(p))
4798 + continue;
4799 +
4800 +- if (cgroup_frozen(p))
4801 ++ if (cgroup_freezing_or_frozen(p))
4802 + continue;
4803 +
4804 + thaw_process(p);
4805 +diff --git a/kernel/sched.c b/kernel/sched.c
4806 +index 7ca9345..da19c1e 100644
4807 +--- a/kernel/sched.c
4808 ++++ b/kernel/sched.c
4809 +@@ -6717,7 +6717,9 @@ SYSCALL_DEFINE3(sched_getaffinity, pid_t, pid, unsigned int, len,
4810 + int ret;
4811 + cpumask_var_t mask;
4812 +
4813 +- if (len < cpumask_size())
4814 ++ if ((len * BITS_PER_BYTE) < nr_cpu_ids)
4815 ++ return -EINVAL;
4816 ++ if (len & (sizeof(unsigned long)-1))
4817 + return -EINVAL;
4818 +
4819 + if (!alloc_cpumask_var(&mask, GFP_KERNEL))
4820 +@@ -6725,10 +6727,12 @@ SYSCALL_DEFINE3(sched_getaffinity, pid_t, pid, unsigned int, len,
4821 +
4822 + ret = sched_getaffinity(pid, mask);
4823 + if (ret == 0) {
4824 +- if (copy_to_user(user_mask_ptr, mask, cpumask_size()))
4825 ++ size_t retlen = min_t(size_t, len, cpumask_size());
4826 ++
4827 ++ if (copy_to_user(user_mask_ptr, mask, retlen))
4828 + ret = -EFAULT;
4829 + else
4830 +- ret = cpumask_size();
4831 ++ ret = retlen;
4832 + }
4833 + free_cpumask_var(mask);
4834 +
4835 +diff --git a/mm/readahead.c b/mm/readahead.c
4836 +index 337b20e..fe1a069 100644
4837 +--- a/mm/readahead.c
4838 ++++ b/mm/readahead.c
4839 +@@ -502,7 +502,7 @@ void page_cache_sync_readahead(struct address_space *mapping,
4840 + return;
4841 +
4842 + /* be dumb */
4843 +- if (filp->f_mode & FMODE_RANDOM) {
4844 ++ if (filp && (filp->f_mode & FMODE_RANDOM)) {
4845 + force_page_cache_readahead(mapping, filp, offset, req_size);
4846 + return;
4847 + }
4848 +diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c
4849 +index 6a43314..ba1fadb 100644
4850 +--- a/net/mac80211/mesh.c
4851 ++++ b/net/mac80211/mesh.c
4852 +@@ -749,9 +749,6 @@ ieee80211_mesh_rx_mgmt(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb)
4853 +
4854 + switch (fc & IEEE80211_FCTL_STYPE) {
4855 + case IEEE80211_STYPE_ACTION:
4856 +- if (skb->len < IEEE80211_MIN_ACTION_SIZE)
4857 +- return RX_DROP_MONITOR;
4858 +- /* fall through */
4859 + case IEEE80211_STYPE_PROBE_RESP:
4860 + case IEEE80211_STYPE_BEACON:
4861 + skb_queue_tail(&ifmsh->skb_queue, skb);
4862 +diff --git a/net/mac80211/mesh_hwmp.c b/net/mac80211/mesh_hwmp.c
4863 +index d28acb6..4eed81b 100644
4864 +--- a/net/mac80211/mesh_hwmp.c
4865 ++++ b/net/mac80211/mesh_hwmp.c
4866 +@@ -391,7 +391,7 @@ static u32 hwmp_route_info_get(struct ieee80211_sub_if_data *sdata,
4867 + if (SN_GT(mpath->sn, orig_sn) ||
4868 + (mpath->sn == orig_sn &&
4869 + action == MPATH_PREQ &&
4870 +- new_metric > mpath->metric)) {
4871 ++ new_metric >= mpath->metric)) {
4872 + process = false;
4873 + fresh_info = false;
4874 + }
4875 +@@ -611,7 +611,7 @@ static void hwmp_prep_frame_process(struct ieee80211_sub_if_data *sdata,
4876 +
4877 + mesh_path_sel_frame_tx(MPATH_PREP, flags, orig_addr,
4878 + cpu_to_le32(orig_sn), 0, target_addr,
4879 +- cpu_to_le32(target_sn), mpath->next_hop->sta.addr, hopcount,
4880 ++ cpu_to_le32(target_sn), next_hop, hopcount,
4881 + ttl, cpu_to_le32(lifetime), cpu_to_le32(metric),
4882 + 0, sdata);
4883 + rcu_read_unlock();
4884 +diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
4885 +index da92cde..edfa036 100644
4886 +--- a/net/mac80211/rx.c
4887 ++++ b/net/mac80211/rx.c
4888 +@@ -2355,6 +2355,11 @@ static int prepare_for_handlers(struct ieee80211_sub_if_data *sdata,
4889 + /* should never get here */
4890 + WARN_ON(1);
4891 + break;
4892 ++ case MESH_PLINK_CATEGORY:
4893 ++ case MESH_PATH_SEL_CATEGORY:
4894 ++ if (ieee80211_vif_is_mesh(&sdata->vif))
4895 ++ return ieee80211_mesh_rx_mgmt(sdata, rx->skb);
4896 ++ break;
4897 + }
4898 +
4899 + return 1;
4900 +diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
4901 +index 70c79c3..1fdc0a5 100644
4902 +--- a/net/mac80211/tx.c
4903 ++++ b/net/mac80211/tx.c
4904 +@@ -1945,6 +1945,7 @@ static bool ieee80211_tx_pending_skb(struct ieee80211_local *local,
4905 + void ieee80211_tx_pending(unsigned long data)
4906 + {
4907 + struct ieee80211_local *local = (struct ieee80211_local *)data;
4908 ++ struct ieee80211_sub_if_data *sdata;
4909 + unsigned long flags;
4910 + int i;
4911 + bool txok;
4912 +@@ -1983,6 +1984,11 @@ void ieee80211_tx_pending(unsigned long data)
4913 + if (!txok)
4914 + break;
4915 + }
4916 ++
4917 ++ if (skb_queue_empty(&local->pending[i]))
4918 ++ list_for_each_entry_rcu(sdata, &local->interfaces, list)
4919 ++ netif_tx_wake_queue(
4920 ++ netdev_get_tx_queue(sdata->dev, i));
4921 + }
4922 + spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
4923 +
4924 +diff --git a/net/mac80211/util.c b/net/mac80211/util.c
4925 +index 3848140..27212e8 100644
4926 +--- a/net/mac80211/util.c
4927 ++++ b/net/mac80211/util.c
4928 +@@ -280,13 +280,13 @@ static void __ieee80211_wake_queue(struct ieee80211_hw *hw, int queue,
4929 + /* someone still has this queue stopped */
4930 + return;
4931 +
4932 +- if (!skb_queue_empty(&local->pending[queue]))
4933 ++ if (skb_queue_empty(&local->pending[queue])) {
4934 ++ rcu_read_lock();
4935 ++ list_for_each_entry_rcu(sdata, &local->interfaces, list)
4936 ++ netif_tx_wake_queue(netdev_get_tx_queue(sdata->dev, queue));
4937 ++ rcu_read_unlock();
4938 ++ } else
4939 + tasklet_schedule(&local->tx_pending_tasklet);
4940 +-
4941 +- rcu_read_lock();
4942 +- list_for_each_entry_rcu(sdata, &local->interfaces, list)
4943 +- netif_tx_wake_queue(netdev_get_tx_queue(sdata->dev, queue));
4944 +- rcu_read_unlock();
4945 + }
4946 +
4947 + void ieee80211_wake_queue_by_reason(struct ieee80211_hw *hw, int queue,
4948 +@@ -1145,6 +1145,14 @@ int ieee80211_reconfig(struct ieee80211_local *local)
4949 + }
4950 + }
4951 +
4952 ++ rcu_read_lock();
4953 ++ if (hw->flags & IEEE80211_HW_AMPDU_AGGREGATION) {
4954 ++ list_for_each_entry_rcu(sta, &local->sta_list, list) {
4955 ++ ieee80211_sta_tear_down_BA_sessions(sta);
4956 ++ }
4957 ++ }
4958 ++ rcu_read_unlock();
4959 ++
4960 + /* add back keys */
4961 + list_for_each_entry(sdata, &local->interfaces, list)
4962 + if (netif_running(sdata->dev))
4963 +diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
4964 +index 9ace8eb..062a8b0 100644
4965 +--- a/sound/pci/hda/hda_intel.c
4966 ++++ b/sound/pci/hda/hda_intel.c
4967 +@@ -125,6 +125,7 @@ MODULE_SUPPORTED_DEVICE("{{Intel, ICH6},"
4968 + "{Intel, ICH9},"
4969 + "{Intel, ICH10},"
4970 + "{Intel, PCH},"
4971 ++ "{Intel, CPT},"
4972 + "{Intel, SCH},"
4973 + "{ATI, SB450},"
4974 + "{ATI, SB600},"
4975 +@@ -449,6 +450,7 @@ struct azx {
4976 + /* driver types */
4977 + enum {
4978 + AZX_DRIVER_ICH,
4979 ++ AZX_DRIVER_PCH,
4980 + AZX_DRIVER_SCH,
4981 + AZX_DRIVER_ATI,
4982 + AZX_DRIVER_ATIHDMI,
4983 +@@ -463,6 +465,7 @@ enum {
4984 +
4985 + static char *driver_short_names[] __devinitdata = {
4986 + [AZX_DRIVER_ICH] = "HDA Intel",
4987 ++ [AZX_DRIVER_PCH] = "HDA Intel PCH",
4988 + [AZX_DRIVER_SCH] = "HDA Intel MID",
4989 + [AZX_DRIVER_ATI] = "HDA ATI SB",
4990 + [AZX_DRIVER_ATIHDMI] = "HDA ATI HDMI",
4991 +@@ -1065,6 +1068,7 @@ static void azx_init_pci(struct azx *chip)
4992 + 0x01, NVIDIA_HDA_ENABLE_COHBIT);
4993 + break;
4994 + case AZX_DRIVER_SCH:
4995 ++ case AZX_DRIVER_PCH:
4996 + pci_read_config_word(chip->pci, INTEL_SCH_HDA_DEVC, &snoop);
4997 + if (snoop & INTEL_SCH_HDA_DEVC_NOSNOOP) {
4998 + pci_write_config_word(chip->pci, INTEL_SCH_HDA_DEVC,
4999 +@@ -2268,6 +2272,7 @@ static struct snd_pci_quirk position_fix_list[] __devinitdata = {
5000 + SND_PCI_QUIRK(0x1458, 0xa022, "ga-ma770-ud3", POS_FIX_LPIB),
5001 + SND_PCI_QUIRK(0x1462, 0x1002, "MSI Wind U115", POS_FIX_LPIB),
5002 + SND_PCI_QUIRK(0x1565, 0x820f, "Biostar Microtech", POS_FIX_LPIB),
5003 ++ SND_PCI_QUIRK(0x1565, 0x8218, "Biostar Microtech", POS_FIX_LPIB),
5004 + SND_PCI_QUIRK(0x8086, 0xd601, "eMachines T5212", POS_FIX_LPIB),
5005 + {}
5006 + };
5007 +@@ -2357,6 +2362,8 @@ static struct snd_pci_quirk msi_black_list[] __devinitdata = {
5008 + SND_PCI_QUIRK(0x1043, 0x81f2, "ASUS", 0), /* Athlon64 X2 + nvidia */
5009 + SND_PCI_QUIRK(0x1043, 0x81f6, "ASUS", 0), /* nvidia */
5010 + SND_PCI_QUIRK(0x1043, 0x822d, "ASUS", 0), /* Athlon64 X2 + nvidia MCP55 */
5011 ++ SND_PCI_QUIRK(0x1849, 0x0888, "ASRock", 0), /* Athlon64 X2 + nvidia */
5012 ++ SND_PCI_QUIRK(0xa0a0, 0x0575, "Aopen MZ915-M", 0), /* ICH6 */
5013 + {}
5014 + };
5015 +
5016 +@@ -2431,6 +2438,7 @@ static int __devinit azx_create(struct snd_card *card, struct pci_dev *pci,
5017 + if (bdl_pos_adj[dev] < 0) {
5018 + switch (chip->driver_type) {
5019 + case AZX_DRIVER_ICH:
5020 ++ case AZX_DRIVER_PCH:
5021 + bdl_pos_adj[dev] = 1;
5022 + break;
5023 + default:
5024 +@@ -2709,6 +2717,8 @@ static struct pci_device_id azx_ids[] = {
5025 + { PCI_DEVICE(0x8086, 0x3a6e), .driver_data = AZX_DRIVER_ICH },
5026 + /* PCH */
5027 + { PCI_DEVICE(0x8086, 0x3b56), .driver_data = AZX_DRIVER_ICH },
5028 ++ /* CPT */
5029 ++ { PCI_DEVICE(0x8086, 0x1c20), .driver_data = AZX_DRIVER_PCH },
5030 + /* SCH */
5031 + { PCI_DEVICE(0x8086, 0x811b), .driver_data = AZX_DRIVER_SCH },
5032 + /* ATI SB 450/600 */
5033 +diff --git a/sound/pci/hda/patch_analog.c b/sound/pci/hda/patch_analog.c
5034 +index 7069441..263bf3b 100644
5035 +--- a/sound/pci/hda/patch_analog.c
5036 ++++ b/sound/pci/hda/patch_analog.c
5037 +@@ -1805,6 +1805,14 @@ static int patch_ad1981(struct hda_codec *codec)
5038 + case AD1981_THINKPAD:
5039 + spec->mixers[0] = ad1981_thinkpad_mixers;
5040 + spec->input_mux = &ad1981_thinkpad_capture_source;
5041 ++ /* set the upper-limit for mixer amp to 0dB for avoiding the
5042 ++ * possible damage by overloading
5043 ++ */
5044 ++ snd_hda_override_amp_caps(codec, 0x11, HDA_INPUT,
5045 ++ (0x17 << AC_AMPCAP_OFFSET_SHIFT) |
5046 ++ (0x17 << AC_AMPCAP_NUM_STEPS_SHIFT) |
5047 ++ (0x05 << AC_AMPCAP_STEP_SIZE_SHIFT) |
5048 ++ (1 << AC_AMPCAP_MUTE_SHIFT));
5049 + break;
5050 + case AD1981_TOSHIBA:
5051 + spec->mixers[0] = ad1981_hp_mixers;
5052 +diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
5053 +index a79f841..bd8a567 100644
5054 +--- a/sound/pci/hda/patch_realtek.c
5055 ++++ b/sound/pci/hda/patch_realtek.c
5056 +@@ -9074,6 +9074,7 @@ static struct snd_pci_quirk alc882_cfg_tbl[] = {
5057 + SND_PCI_QUIRK(0x1462, 0xaa08, "MSI", ALC883_TARGA_2ch_DIG),
5058 +
5059 + SND_PCI_QUIRK(0x147b, 0x1083, "Abit IP35-PRO", ALC883_6ST_DIG),
5060 ++ SND_PCI_QUIRK(0x1558, 0x0571, "Clevo laptop M570U", ALC883_3ST_6ch_DIG),
5061 + SND_PCI_QUIRK(0x1558, 0x0721, "Clevo laptop M720R", ALC883_CLEVO_M720),
5062 + SND_PCI_QUIRK(0x1558, 0x0722, "Clevo laptop M720SR", ALC883_CLEVO_M720),
5063 + SND_PCI_QUIRK(0x1558, 0x5409, "Clevo laptop M540R", ALC883_CLEVO_M540R),
5064 +diff --git a/sound/pci/mixart/mixart.c b/sound/pci/mixart/mixart.c
5065 +index a83d196..32f9853 100644
5066 +--- a/sound/pci/mixart/mixart.c
5067 ++++ b/sound/pci/mixart/mixart.c
5068 +@@ -1161,13 +1161,15 @@ static long snd_mixart_BA0_read(struct snd_info_entry *entry, void *file_private
5069 + unsigned long count, unsigned long pos)
5070 + {
5071 + struct mixart_mgr *mgr = entry->private_data;
5072 ++ unsigned long maxsize;
5073 +
5074 +- count = count & ~3; /* make sure the read size is a multiple of 4 bytes */
5075 +- if(count <= 0)
5076 ++ if (pos >= MIXART_BA0_SIZE)
5077 + return 0;
5078 +- if(pos + count > MIXART_BA0_SIZE)
5079 +- count = (long)(MIXART_BA0_SIZE - pos);
5080 +- if(copy_to_user_fromio(buf, MIXART_MEM( mgr, pos ), count))
5081 ++ maxsize = MIXART_BA0_SIZE - pos;
5082 ++ if (count > maxsize)
5083 ++ count = maxsize;
5084 ++ count = count & ~3; /* make sure the read size is a multiple of 4 bytes */
5085 ++ if (copy_to_user_fromio(buf, MIXART_MEM(mgr, pos), count))
5086 + return -EFAULT;
5087 + return count;
5088 + }
5089 +@@ -1180,13 +1182,15 @@ static long snd_mixart_BA1_read(struct snd_info_entry *entry, void *file_private
5090 + unsigned long count, unsigned long pos)
5091 + {
5092 + struct mixart_mgr *mgr = entry->private_data;
5093 ++ unsigned long maxsize;
5094 +
5095 +- count = count & ~3; /* make sure the read size is a multiple of 4 bytes */
5096 +- if(count <= 0)
5097 ++ if (pos > MIXART_BA1_SIZE)
5098 + return 0;
5099 +- if(pos + count > MIXART_BA1_SIZE)
5100 +- count = (long)(MIXART_BA1_SIZE - pos);
5101 +- if(copy_to_user_fromio(buf, MIXART_REG( mgr, pos ), count))
5102 ++ maxsize = MIXART_BA1_SIZE - pos;
5103 ++ if (count > maxsize)
5104 ++ count = maxsize;
5105 ++ count = count & ~3; /* make sure the read size is a multiple of 4 bytes */
5106 ++ if (copy_to_user_fromio(buf, MIXART_REG(mgr, pos), count))
5107 + return -EFAULT;
5108 + return count;
5109 + }
5110 +diff --git a/sound/usb/usbmidi.c b/sound/usb/usbmidi.c
5111 +index b2da478..c7cb207 100644
5112 +--- a/sound/usb/usbmidi.c
5113 ++++ b/sound/usb/usbmidi.c
5114 +@@ -984,6 +984,8 @@ static void snd_usbmidi_output_drain(struct snd_rawmidi_substream *substream)
5115 + DEFINE_WAIT(wait);
5116 + long timeout = msecs_to_jiffies(50);
5117 +
5118 ++ if (ep->umidi->disconnected)
5119 ++ return;
5120 + /*
5121 + * The substream buffer is empty, but some data might still be in the
5122 + * currently active URBs, so we have to wait for those to complete.
5123 +@@ -1121,14 +1123,21 @@ static int snd_usbmidi_in_endpoint_create(struct snd_usb_midi* umidi,
5124 + * Frees an output endpoint.
5125 + * May be called when ep hasn't been initialized completely.
5126 + */
5127 +-static void snd_usbmidi_out_endpoint_delete(struct snd_usb_midi_out_endpoint* ep)
5128 ++static void snd_usbmidi_out_endpoint_clear(struct snd_usb_midi_out_endpoint *ep)
5129 + {
5130 + unsigned int i;
5131 +
5132 + for (i = 0; i < OUTPUT_URBS; ++i)
5133 +- if (ep->urbs[i].urb)
5134 ++ if (ep->urbs[i].urb) {
5135 + free_urb_and_buffer(ep->umidi, ep->urbs[i].urb,
5136 + ep->max_transfer);
5137 ++ ep->urbs[i].urb = NULL;
5138 ++ }
5139 ++}
5140 ++
5141 ++static void snd_usbmidi_out_endpoint_delete(struct snd_usb_midi_out_endpoint *ep)
5142 ++{
5143 ++ snd_usbmidi_out_endpoint_clear(ep);
5144 + kfree(ep);
5145 + }
5146 +
5147 +@@ -1260,15 +1269,18 @@ void snd_usbmidi_disconnect(struct list_head* p)
5148 + usb_kill_urb(ep->out->urbs[j].urb);
5149 + if (umidi->usb_protocol_ops->finish_out_endpoint)
5150 + umidi->usb_protocol_ops->finish_out_endpoint(ep->out);
5151 ++ ep->out->active_urbs = 0;
5152 ++ if (ep->out->drain_urbs) {
5153 ++ ep->out->drain_urbs = 0;
5154 ++ wake_up(&ep->out->drain_wait);
5155 ++ }
5156 + }
5157 + if (ep->in)
5158 + for (j = 0; j < INPUT_URBS; ++j)
5159 + usb_kill_urb(ep->in->urbs[j]);
5160 + /* free endpoints here; later call can result in Oops */
5161 +- if (ep->out) {
5162 +- snd_usbmidi_out_endpoint_delete(ep->out);
5163 +- ep->out = NULL;
5164 +- }
5165 ++ if (ep->out)
5166 ++ snd_usbmidi_out_endpoint_clear(ep->out);
5167 + if (ep->in) {
5168 + snd_usbmidi_in_endpoint_delete(ep->in);
5169 + ep->in = NULL;
5170 +diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
5171 +index a944be3..9dd98cb 100644
5172 +--- a/virt/kvm/kvm_main.c
5173 ++++ b/virt/kvm/kvm_main.c
5174 +@@ -636,7 +636,7 @@ skip_lpage:
5175 +
5176 + /* Allocate page dirty bitmap if needed */
5177 + if ((new.flags & KVM_MEM_LOG_DIRTY_PAGES) && !new.dirty_bitmap) {
5178 +- unsigned dirty_bytes = ALIGN(npages, BITS_PER_LONG) / 8;
5179 ++ unsigned long dirty_bytes = kvm_dirty_bitmap_bytes(&new);
5180 +
5181 + new.dirty_bitmap = vmalloc(dirty_bytes);
5182 + if (!new.dirty_bitmap)
5183 +@@ -719,7 +719,7 @@ int kvm_get_dirty_log(struct kvm *kvm,
5184 + {
5185 + struct kvm_memory_slot *memslot;
5186 + int r, i;
5187 +- int n;
5188 ++ unsigned long n;
5189 + unsigned long any = 0;
5190 +
5191 + r = -EINVAL;
5192 +@@ -731,7 +731,7 @@ int kvm_get_dirty_log(struct kvm *kvm,
5193 + if (!memslot->dirty_bitmap)
5194 + goto out;
5195 +
5196 +- n = ALIGN(memslot->npages, BITS_PER_LONG) / 8;
5197 ++ n = kvm_dirty_bitmap_bytes(memslot);
5198 +
5199 + for (i = 0; !any && i < n/sizeof(long); ++i)
5200 + any = memslot->dirty_bitmap[i];
5201 +@@ -1073,10 +1073,13 @@ void mark_page_dirty(struct kvm *kvm, gfn_t gfn)
5202 + memslot = gfn_to_memslot_unaliased(kvm, gfn);
5203 + if (memslot && memslot->dirty_bitmap) {
5204 + unsigned long rel_gfn = gfn - memslot->base_gfn;
5205 ++ unsigned long *p = memslot->dirty_bitmap +
5206 ++ rel_gfn / BITS_PER_LONG;
5207 ++ int offset = rel_gfn % BITS_PER_LONG;
5208 +
5209 + /* avoid RMW */
5210 +- if (!generic_test_le_bit(rel_gfn, memslot->dirty_bitmap))
5211 +- generic___set_le_bit(rel_gfn, memslot->dirty_bitmap);
5212 ++ if (!generic_test_le_bit(offset, p))
5213 ++ generic___set_le_bit(offset, p);
5214 + }
5215 + }
5216 +