Gentoo Archives: gentoo-commits

From: "Mike Pagano (mpagano)" <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] linux-patches r1696 - genpatches-2.6/trunk/2.6.33
Date: Tue, 27 Apr 2010 13:06:15
Message-Id: 20100427130603.313BF2C04C@corvid.gentoo.org
1 Author: mpagano
2 Date: 2010-04-27 13:06:02 +0000 (Tue, 27 Apr 2010)
3 New Revision: 1696
4
5 Added:
6 genpatches-2.6/trunk/2.6.33/1002_linux-2.6.33.3.patch
7 Modified:
8 genpatches-2.6/trunk/2.6.33/0000_README
9 Log:
10 Linux patch 2.6.33.3
11
12 Modified: genpatches-2.6/trunk/2.6.33/0000_README
13 ===================================================================
14 --- genpatches-2.6/trunk/2.6.33/0000_README 2010-04-25 00:23:19 UTC (rev 1695)
15 +++ genpatches-2.6/trunk/2.6.33/0000_README 2010-04-27 13:06:02 UTC (rev 1696)
16 @@ -47,6 +47,10 @@
17 From: http://www.kernel.org
18 Desc: Linux 2.6.33.2
19
20 +Patch: 1002_linux-2.6.33.3.patch
21 +From: http://www.kernel.org
22 +Desc: Linux 2.6.33.3
23 +
24 Patch: 4100_dm-bbr.patch
25 From: EVMS 2.5.2
26 Desc: Bad block relocation support for LiveCD users
27
28 Added: genpatches-2.6/trunk/2.6.33/1002_linux-2.6.33.3.patch
29 ===================================================================
30 --- genpatches-2.6/trunk/2.6.33/1002_linux-2.6.33.3.patch (rev 0)
31 +++ genpatches-2.6/trunk/2.6.33/1002_linux-2.6.33.3.patch 2010-04-27 13:06:02 UTC (rev 1696)
32 @@ -0,0 +1,5184 @@
33 +diff --git a/Documentation/i2c/busses/i2c-i801 b/Documentation/i2c/busses/i2c-i801
34 +index 81c0c59..e1bb5b2 100644
35 +--- a/Documentation/i2c/busses/i2c-i801
36 ++++ b/Documentation/i2c/busses/i2c-i801
37 +@@ -15,7 +15,8 @@ Supported adapters:
38 + * Intel 82801I (ICH9)
39 + * Intel EP80579 (Tolapai)
40 + * Intel 82801JI (ICH10)
41 +- * Intel PCH
42 ++ * Intel 3400/5 Series (PCH)
43 ++ * Intel Cougar Point (PCH)
44 + Datasheets: Publicly available at the Intel website
45 +
46 + Authors:
47 +diff --git a/arch/arm/boot/compressed/head.S b/arch/arm/boot/compressed/head.S
48 +index 6b84a04..cbeb6e0 100644
49 +--- a/arch/arm/boot/compressed/head.S
50 ++++ b/arch/arm/boot/compressed/head.S
51 +@@ -172,7 +172,7 @@ not_angel:
52 + adr r0, LC0
53 + ARM( ldmia r0, {r1, r2, r3, r4, r5, r6, r11, ip, sp})
54 + THUMB( ldmia r0, {r1, r2, r3, r4, r5, r6, r11, ip} )
55 +- THUMB( ldr sp, [r0, #28] )
56 ++ THUMB( ldr sp, [r0, #32] )
57 + subs r0, r0, r1 @ calculate the delta offset
58 +
59 + @ if delta is zero, we are
60 +diff --git a/arch/ia64/kvm/kvm-ia64.c b/arch/ia64/kvm/kvm-ia64.c
61 +index 5fdeec5..d76279a 100644
62 +--- a/arch/ia64/kvm/kvm-ia64.c
63 ++++ b/arch/ia64/kvm/kvm-ia64.c
64 +@@ -1794,7 +1794,8 @@ static int kvm_ia64_sync_dirty_log(struct kvm *kvm,
65 + {
66 + struct kvm_memory_slot *memslot;
67 + int r, i;
68 +- long n, base;
69 ++ long base;
70 ++ unsigned long n;
71 + unsigned long *dirty_bitmap = (unsigned long *)(kvm->arch.vm_base +
72 + offsetof(struct kvm_vm_data, kvm_mem_dirty_log));
73 +
74 +@@ -1807,7 +1808,7 @@ static int kvm_ia64_sync_dirty_log(struct kvm *kvm,
75 + if (!memslot->dirty_bitmap)
76 + goto out;
77 +
78 +- n = ALIGN(memslot->npages, BITS_PER_LONG) / 8;
79 ++ n = kvm_dirty_bitmap_bytes(memslot);
80 + base = memslot->base_gfn / BITS_PER_LONG;
81 +
82 + for (i = 0; i < n/sizeof(long); ++i) {
83 +@@ -1823,7 +1824,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
84 + struct kvm_dirty_log *log)
85 + {
86 + int r;
87 +- int n;
88 ++ unsigned long n;
89 + struct kvm_memory_slot *memslot;
90 + int is_dirty = 0;
91 +
92 +@@ -1841,7 +1842,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
93 + if (is_dirty) {
94 + kvm_flush_remote_tlbs(kvm);
95 + memslot = &kvm->memslots[log->slot];
96 +- n = ALIGN(memslot->npages, BITS_PER_LONG) / 8;
97 ++ n = kvm_dirty_bitmap_bytes(memslot);
98 + memset(memslot->dirty_bitmap, 0, n);
99 + }
100 + r = 0;
101 +diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c
102 +index 3e294bd..e6dc595 100644
103 +--- a/arch/powerpc/kvm/book3s.c
104 ++++ b/arch/powerpc/kvm/book3s.c
105 +@@ -848,7 +848,8 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
106 + struct kvm_vcpu *vcpu;
107 + ulong ga, ga_end;
108 + int is_dirty = 0;
109 +- int r, n;
110 ++ int r;
111 ++ unsigned long n;
112 +
113 + down_write(&kvm->slots_lock);
114 +
115 +@@ -866,7 +867,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
116 + kvm_for_each_vcpu(n, vcpu, kvm)
117 + kvmppc_mmu_pte_pflush(vcpu, ga, ga_end);
118 +
119 +- n = ALIGN(memslot->npages, BITS_PER_LONG) / 8;
120 ++ n = kvm_dirty_bitmap_bytes(memslot);
121 + memset(memslot->dirty_bitmap, 0, n);
122 + }
123 +
124 +diff --git a/arch/powerpc/platforms/pseries/offline_states.h b/arch/powerpc/platforms/pseries/offline_states.h
125 +index 22574e0..202d869 100644
126 +--- a/arch/powerpc/platforms/pseries/offline_states.h
127 ++++ b/arch/powerpc/platforms/pseries/offline_states.h
128 +@@ -9,10 +9,30 @@ enum cpu_state_vals {
129 + CPU_MAX_OFFLINE_STATES
130 + };
131 +
132 ++#ifdef CONFIG_HOTPLUG_CPU
133 + extern enum cpu_state_vals get_cpu_current_state(int cpu);
134 + extern void set_cpu_current_state(int cpu, enum cpu_state_vals state);
135 +-extern enum cpu_state_vals get_preferred_offline_state(int cpu);
136 + extern void set_preferred_offline_state(int cpu, enum cpu_state_vals state);
137 + extern void set_default_offline_state(int cpu);
138 ++#else
139 ++static inline enum cpu_state_vals get_cpu_current_state(int cpu)
140 ++{
141 ++ return CPU_STATE_ONLINE;
142 ++}
143 ++
144 ++static inline void set_cpu_current_state(int cpu, enum cpu_state_vals state)
145 ++{
146 ++}
147 ++
148 ++static inline void set_preferred_offline_state(int cpu, enum cpu_state_vals state)
149 ++{
150 ++}
151 ++
152 ++static inline void set_default_offline_state(int cpu)
153 ++{
154 ++}
155 ++#endif
156 ++
157 ++extern enum cpu_state_vals get_preferred_offline_state(int cpu);
158 + extern int start_secondary(void);
159 + #endif
160 +diff --git a/arch/s390/mm/vmem.c b/arch/s390/mm/vmem.c
161 +index 300ab01..5f91a38 100644
162 +--- a/arch/s390/mm/vmem.c
163 ++++ b/arch/s390/mm/vmem.c
164 +@@ -70,12 +70,8 @@ static pte_t __ref *vmem_pte_alloc(void)
165 + pte = alloc_bootmem(PTRS_PER_PTE * sizeof(pte_t));
166 + if (!pte)
167 + return NULL;
168 +- if (MACHINE_HAS_HPAGE)
169 +- clear_table((unsigned long *) pte, _PAGE_TYPE_EMPTY | _PAGE_CO,
170 +- PTRS_PER_PTE * sizeof(pte_t));
171 +- else
172 +- clear_table((unsigned long *) pte, _PAGE_TYPE_EMPTY,
173 +- PTRS_PER_PTE * sizeof(pte_t));
174 ++ clear_table((unsigned long *) pte, _PAGE_TYPE_EMPTY,
175 ++ PTRS_PER_PTE * sizeof(pte_t));
176 + return pte;
177 + }
178 +
179 +@@ -116,8 +112,7 @@ static int vmem_add_mem(unsigned long start, unsigned long size, int ro)
180 + if (MACHINE_HAS_HPAGE && !(address & ~HPAGE_MASK) &&
181 + (address + HPAGE_SIZE <= start + size) &&
182 + (address >= HPAGE_SIZE)) {
183 +- pte_val(pte) |= _SEGMENT_ENTRY_LARGE |
184 +- _SEGMENT_ENTRY_CO;
185 ++ pte_val(pte) |= _SEGMENT_ENTRY_LARGE;
186 + pmd_val(*pm_dir) = pte_val(pte);
187 + address += HPAGE_SIZE - PAGE_SIZE;
188 + continue;
189 +diff --git a/arch/sh/include/asm/elf.h b/arch/sh/include/asm/elf.h
190 +index ac04255..ce830fa 100644
191 +--- a/arch/sh/include/asm/elf.h
192 ++++ b/arch/sh/include/asm/elf.h
193 +@@ -211,7 +211,9 @@ extern void __kernel_vsyscall;
194 +
195 + #define VSYSCALL_AUX_ENT \
196 + if (vdso_enabled) \
197 +- NEW_AUX_ENT(AT_SYSINFO_EHDR, VDSO_BASE);
198 ++ NEW_AUX_ENT(AT_SYSINFO_EHDR, VDSO_BASE); \
199 ++ else \
200 ++ NEW_AUX_ENT(AT_IGNORE, 0);
201 + #else
202 + #define VSYSCALL_AUX_ENT
203 + #endif /* CONFIG_VSYSCALL */
204 +@@ -219,7 +221,7 @@ extern void __kernel_vsyscall;
205 + #ifdef CONFIG_SH_FPU
206 + #define FPU_AUX_ENT NEW_AUX_ENT(AT_FPUCW, FPSCR_INIT)
207 + #else
208 +-#define FPU_AUX_ENT
209 ++#define FPU_AUX_ENT NEW_AUX_ENT(AT_IGNORE, 0)
210 + #endif
211 +
212 + extern int l1i_cache_shape, l1d_cache_shape, l2_cache_shape;
213 +diff --git a/arch/sh/kernel/smp.c b/arch/sh/kernel/smp.c
214 +index 983e079..1d19c19 100644
215 +--- a/arch/sh/kernel/smp.c
216 ++++ b/arch/sh/kernel/smp.c
217 +@@ -69,6 +69,7 @@ asmlinkage void __cpuinit start_secondary(void)
218 + unsigned int cpu;
219 + struct mm_struct *mm = &init_mm;
220 +
221 ++ enable_mmu();
222 + atomic_inc(&mm->mm_count);
223 + atomic_inc(&mm->mm_users);
224 + current->active_mm = mm;
225 +diff --git a/arch/sparc/kernel/ptrace_32.c b/arch/sparc/kernel/ptrace_32.c
226 +index 7e3dfd9..e608f39 100644
227 +--- a/arch/sparc/kernel/ptrace_32.c
228 ++++ b/arch/sparc/kernel/ptrace_32.c
229 +@@ -65,6 +65,7 @@ static int genregs32_get(struct task_struct *target,
230 + *k++ = regs->u_regs[pos++];
231 +
232 + reg_window = (unsigned long __user *) regs->u_regs[UREG_I6];
233 ++ reg_window -= 16;
234 + for (; count > 0 && pos < 32; count--) {
235 + if (get_user(*k++, &reg_window[pos++]))
236 + return -EFAULT;
237 +@@ -76,6 +77,7 @@ static int genregs32_get(struct task_struct *target,
238 + }
239 +
240 + reg_window = (unsigned long __user *) regs->u_regs[UREG_I6];
241 ++ reg_window -= 16;
242 + for (; count > 0 && pos < 32; count--) {
243 + if (get_user(reg, &reg_window[pos++]) ||
244 + put_user(reg, u++))
245 +@@ -141,6 +143,7 @@ static int genregs32_set(struct task_struct *target,
246 + regs->u_regs[pos++] = *k++;
247 +
248 + reg_window = (unsigned long __user *) regs->u_regs[UREG_I6];
249 ++ reg_window -= 16;
250 + for (; count > 0 && pos < 32; count--) {
251 + if (put_user(*k++, &reg_window[pos++]))
252 + return -EFAULT;
253 +@@ -153,6 +156,7 @@ static int genregs32_set(struct task_struct *target,
254 + }
255 +
256 + reg_window = (unsigned long __user *) regs->u_regs[UREG_I6];
257 ++ reg_window -= 16;
258 + for (; count > 0 && pos < 32; count--) {
259 + if (get_user(reg, u++) ||
260 + put_user(reg, &reg_window[pos++]))
261 +diff --git a/arch/sparc/kernel/ptrace_64.c b/arch/sparc/kernel/ptrace_64.c
262 +index 2f6524d..aa90da0 100644
263 +--- a/arch/sparc/kernel/ptrace_64.c
264 ++++ b/arch/sparc/kernel/ptrace_64.c
265 +@@ -492,6 +492,7 @@ static int genregs32_get(struct task_struct *target,
266 + *k++ = regs->u_regs[pos++];
267 +
268 + reg_window = (compat_ulong_t __user *) regs->u_regs[UREG_I6];
269 ++ reg_window -= 16;
270 + if (target == current) {
271 + for (; count > 0 && pos < 32; count--) {
272 + if (get_user(*k++, &reg_window[pos++]))
273 +@@ -516,6 +517,7 @@ static int genregs32_get(struct task_struct *target,
274 + }
275 +
276 + reg_window = (compat_ulong_t __user *) regs->u_regs[UREG_I6];
277 ++ reg_window -= 16;
278 + if (target == current) {
279 + for (; count > 0 && pos < 32; count--) {
280 + if (get_user(reg, &reg_window[pos++]) ||
281 +@@ -599,6 +601,7 @@ static int genregs32_set(struct task_struct *target,
282 + regs->u_regs[pos++] = *k++;
283 +
284 + reg_window = (compat_ulong_t __user *) regs->u_regs[UREG_I6];
285 ++ reg_window -= 16;
286 + if (target == current) {
287 + for (; count > 0 && pos < 32; count--) {
288 + if (put_user(*k++, &reg_window[pos++]))
289 +@@ -625,6 +628,7 @@ static int genregs32_set(struct task_struct *target,
290 + }
291 +
292 + reg_window = (compat_ulong_t __user *) regs->u_regs[UREG_I6];
293 ++ reg_window -= 16;
294 + if (target == current) {
295 + for (; count > 0 && pos < 32; count--) {
296 + if (get_user(reg, u++) ||
297 +diff --git a/arch/um/sys-x86_64/Makefile b/arch/um/sys-x86_64/Makefile
298 +index 2201e9c..c1ea9eb 100644
299 +--- a/arch/um/sys-x86_64/Makefile
300 ++++ b/arch/um/sys-x86_64/Makefile
301 +@@ -8,7 +8,8 @@ obj-y = bug.o bugs.o delay.o fault.o ldt.o mem.o ptrace.o ptrace_user.o \
302 + setjmp.o signal.o stub.o stub_segv.o syscalls.o syscall_table.o \
303 + sysrq.o ksyms.o tls.o
304 +
305 +-subarch-obj-y = lib/csum-partial_64.o lib/memcpy_64.o lib/thunk_64.o
306 ++subarch-obj-y = lib/csum-partial_64.o lib/memcpy_64.o lib/thunk_64.o \
307 ++ lib/rwsem_64.o
308 + subarch-obj-$(CONFIG_MODULES) += kernel/module.o
309 +
310 + ldt-y = ../sys-i386/ldt.o
311 +diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu
312 +index f20ddf8..a198293 100644
313 +--- a/arch/x86/Kconfig.cpu
314 ++++ b/arch/x86/Kconfig.cpu
315 +@@ -319,7 +319,7 @@ config X86_L1_CACHE_SHIFT
316 +
317 + config X86_XADD
318 + def_bool y
319 +- depends on X86_32 && !M386
320 ++ depends on X86_64 || !M386
321 +
322 + config X86_PPRO_FENCE
323 + bool "PentiumPro memory ordering errata workaround"
324 +diff --git a/arch/x86/include/asm/rwsem.h b/arch/x86/include/asm/rwsem.h
325 +index ca7517d..606ede1 100644
326 +--- a/arch/x86/include/asm/rwsem.h
327 ++++ b/arch/x86/include/asm/rwsem.h
328 +@@ -41,6 +41,7 @@
329 + #include <linux/list.h>
330 + #include <linux/spinlock.h>
331 + #include <linux/lockdep.h>
332 ++#include <asm/asm.h>
333 +
334 + struct rwsem_waiter;
335 +
336 +@@ -55,17 +56,28 @@ extern asmregparm struct rw_semaphore *
337 +
338 + /*
339 + * the semaphore definition
340 ++ *
341 ++ * The bias values and the counter type limits the number of
342 ++ * potential readers/writers to 32767 for 32 bits and 2147483647
343 ++ * for 64 bits.
344 + */
345 +
346 +-#define RWSEM_UNLOCKED_VALUE 0x00000000
347 +-#define RWSEM_ACTIVE_BIAS 0x00000001
348 +-#define RWSEM_ACTIVE_MASK 0x0000ffff
349 +-#define RWSEM_WAITING_BIAS (-0x00010000)
350 ++#ifdef CONFIG_X86_64
351 ++# define RWSEM_ACTIVE_MASK 0xffffffffL
352 ++#else
353 ++# define RWSEM_ACTIVE_MASK 0x0000ffffL
354 ++#endif
355 ++
356 ++#define RWSEM_UNLOCKED_VALUE 0x00000000L
357 ++#define RWSEM_ACTIVE_BIAS 0x00000001L
358 ++#define RWSEM_WAITING_BIAS (-RWSEM_ACTIVE_MASK-1)
359 + #define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS
360 + #define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
361 +
362 ++typedef signed long rwsem_count_t;
363 ++
364 + struct rw_semaphore {
365 +- signed long count;
366 ++ rwsem_count_t count;
367 + spinlock_t wait_lock;
368 + struct list_head wait_list;
369 + #ifdef CONFIG_DEBUG_LOCK_ALLOC
370 +@@ -105,7 +117,7 @@ do { \
371 + static inline void __down_read(struct rw_semaphore *sem)
372 + {
373 + asm volatile("# beginning down_read\n\t"
374 +- LOCK_PREFIX " incl (%%eax)\n\t"
375 ++ LOCK_PREFIX _ASM_INC "(%1)\n\t"
376 + /* adds 0x00000001, returns the old value */
377 + " jns 1f\n"
378 + " call call_rwsem_down_read_failed\n"
379 +@@ -121,14 +133,14 @@ static inline void __down_read(struct rw_semaphore *sem)
380 + */
381 + static inline int __down_read_trylock(struct rw_semaphore *sem)
382 + {
383 +- __s32 result, tmp;
384 ++ rwsem_count_t result, tmp;
385 + asm volatile("# beginning __down_read_trylock\n\t"
386 +- " movl %0,%1\n\t"
387 ++ " mov %0,%1\n\t"
388 + "1:\n\t"
389 +- " movl %1,%2\n\t"
390 +- " addl %3,%2\n\t"
391 ++ " mov %1,%2\n\t"
392 ++ " add %3,%2\n\t"
393 + " jle 2f\n\t"
394 +- LOCK_PREFIX " cmpxchgl %2,%0\n\t"
395 ++ LOCK_PREFIX " cmpxchg %2,%0\n\t"
396 + " jnz 1b\n\t"
397 + "2:\n\t"
398 + "# ending __down_read_trylock\n\t"
399 +@@ -143,13 +155,13 @@ static inline int __down_read_trylock(struct rw_semaphore *sem)
400 + */
401 + static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
402 + {
403 +- int tmp;
404 ++ rwsem_count_t tmp;
405 +
406 + tmp = RWSEM_ACTIVE_WRITE_BIAS;
407 + asm volatile("# beginning down_write\n\t"
408 +- LOCK_PREFIX " xadd %%edx,(%%eax)\n\t"
409 ++ LOCK_PREFIX " xadd %1,(%2)\n\t"
410 + /* subtract 0x0000ffff, returns the old value */
411 +- " testl %%edx,%%edx\n\t"
412 ++ " test %1,%1\n\t"
413 + /* was the count 0 before? */
414 + " jz 1f\n"
415 + " call call_rwsem_down_write_failed\n"
416 +@@ -170,9 +182,9 @@ static inline void __down_write(struct rw_semaphore *sem)
417 + */
418 + static inline int __down_write_trylock(struct rw_semaphore *sem)
419 + {
420 +- signed long ret = cmpxchg(&sem->count,
421 +- RWSEM_UNLOCKED_VALUE,
422 +- RWSEM_ACTIVE_WRITE_BIAS);
423 ++ rwsem_count_t ret = cmpxchg(&sem->count,
424 ++ RWSEM_UNLOCKED_VALUE,
425 ++ RWSEM_ACTIVE_WRITE_BIAS);
426 + if (ret == RWSEM_UNLOCKED_VALUE)
427 + return 1;
428 + return 0;
429 +@@ -183,9 +195,9 @@ static inline int __down_write_trylock(struct rw_semaphore *sem)
430 + */
431 + static inline void __up_read(struct rw_semaphore *sem)
432 + {
433 +- __s32 tmp = -RWSEM_ACTIVE_READ_BIAS;
434 ++ rwsem_count_t tmp = -RWSEM_ACTIVE_READ_BIAS;
435 + asm volatile("# beginning __up_read\n\t"
436 +- LOCK_PREFIX " xadd %%edx,(%%eax)\n\t"
437 ++ LOCK_PREFIX " xadd %1,(%2)\n\t"
438 + /* subtracts 1, returns the old value */
439 + " jns 1f\n\t"
440 + " call call_rwsem_wake\n"
441 +@@ -201,18 +213,18 @@ static inline void __up_read(struct rw_semaphore *sem)
442 + */
443 + static inline void __up_write(struct rw_semaphore *sem)
444 + {
445 ++ rwsem_count_t tmp;
446 + asm volatile("# beginning __up_write\n\t"
447 +- " movl %2,%%edx\n\t"
448 +- LOCK_PREFIX " xaddl %%edx,(%%eax)\n\t"
449 ++ LOCK_PREFIX " xadd %1,(%2)\n\t"
450 + /* tries to transition
451 + 0xffff0001 -> 0x00000000 */
452 + " jz 1f\n"
453 + " call call_rwsem_wake\n"
454 + "1:\n\t"
455 + "# ending __up_write\n"
456 +- : "+m" (sem->count)
457 +- : "a" (sem), "i" (-RWSEM_ACTIVE_WRITE_BIAS)
458 +- : "memory", "cc", "edx");
459 ++ : "+m" (sem->count), "=d" (tmp)
460 ++ : "a" (sem), "1" (-RWSEM_ACTIVE_WRITE_BIAS)
461 ++ : "memory", "cc");
462 + }
463 +
464 + /*
465 +@@ -221,33 +233,38 @@ static inline void __up_write(struct rw_semaphore *sem)
466 + static inline void __downgrade_write(struct rw_semaphore *sem)
467 + {
468 + asm volatile("# beginning __downgrade_write\n\t"
469 +- LOCK_PREFIX " addl %2,(%%eax)\n\t"
470 +- /* transitions 0xZZZZ0001 -> 0xYYYY0001 */
471 ++ LOCK_PREFIX _ASM_ADD "%2,(%1)\n\t"
472 ++ /*
473 ++ * transitions 0xZZZZ0001 -> 0xYYYY0001 (i386)
474 ++ * 0xZZZZZZZZ00000001 -> 0xYYYYYYYY00000001 (x86_64)
475 ++ */
476 + " jns 1f\n\t"
477 + " call call_rwsem_downgrade_wake\n"
478 + "1:\n\t"
479 + "# ending __downgrade_write\n"
480 + : "+m" (sem->count)
481 +- : "a" (sem), "i" (-RWSEM_WAITING_BIAS)
482 ++ : "a" (sem), "er" (-RWSEM_WAITING_BIAS)
483 + : "memory", "cc");
484 + }
485 +
486 + /*
487 + * implement atomic add functionality
488 + */
489 +-static inline void rwsem_atomic_add(int delta, struct rw_semaphore *sem)
490 ++static inline void rwsem_atomic_add(rwsem_count_t delta,
491 ++ struct rw_semaphore *sem)
492 + {
493 +- asm volatile(LOCK_PREFIX "addl %1,%0"
494 ++ asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0"
495 + : "+m" (sem->count)
496 +- : "ir" (delta));
497 ++ : "er" (delta));
498 + }
499 +
500 + /*
501 + * implement exchange and add functionality
502 + */
503 +-static inline int rwsem_atomic_update(int delta, struct rw_semaphore *sem)
504 ++static inline rwsem_count_t rwsem_atomic_update(rwsem_count_t delta,
505 ++ struct rw_semaphore *sem)
506 + {
507 +- int tmp = delta;
508 ++ rwsem_count_t tmp = delta;
509 +
510 + asm volatile(LOCK_PREFIX "xadd %0,%1"
511 + : "+r" (tmp), "+m" (sem->count)
512 +diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h
513 +index 1e79678..4cfc908 100644
514 +--- a/arch/x86/include/asm/smp.h
515 ++++ b/arch/x86/include/asm/smp.h
516 +@@ -135,6 +135,8 @@ int native_cpu_disable(void);
517 + void native_cpu_die(unsigned int cpu);
518 + void native_play_dead(void);
519 + void play_dead_common(void);
520 ++void wbinvd_on_cpu(int cpu);
521 ++int wbinvd_on_all_cpus(void);
522 +
523 + void native_send_call_func_ipi(const struct cpumask *mask);
524 + void native_send_call_func_single_ipi(int cpu);
525 +@@ -147,6 +149,13 @@ static inline int num_booting_cpus(void)
526 + {
527 + return cpumask_weight(cpu_callout_mask);
528 + }
529 ++#else /* !CONFIG_SMP */
530 ++#define wbinvd_on_cpu(cpu) wbinvd()
531 ++static inline int wbinvd_on_all_cpus(void)
532 ++{
533 ++ wbinvd();
534 ++ return 0;
535 ++}
536 + #endif /* CONFIG_SMP */
537 +
538 + extern unsigned disabled_cpus __cpuinitdata;
539 +diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c
540 +index adb0ba0..2e77516 100644
541 +--- a/arch/x86/kernel/amd_iommu.c
542 ++++ b/arch/x86/kernel/amd_iommu.c
543 +@@ -2298,7 +2298,7 @@ static void cleanup_domain(struct protection_domain *domain)
544 + list_for_each_entry_safe(dev_data, next, &domain->dev_list, list) {
545 + struct device *dev = dev_data->dev;
546 +
547 +- do_detach(dev);
548 ++ __detach_device(dev);
549 + atomic_set(&dev_data->bind, 0);
550 + }
551 +
552 +@@ -2379,9 +2379,7 @@ static void amd_iommu_domain_destroy(struct iommu_domain *dom)
553 +
554 + free_pagetable(domain);
555 +
556 +- domain_id_free(domain->id);
557 +-
558 +- kfree(domain);
559 ++ protection_domain_free(domain);
560 +
561 + dom->priv = NULL;
562 + }
563 +diff --git a/arch/x86/kernel/amd_iommu_init.c b/arch/x86/kernel/amd_iommu_init.c
564 +index 9dc91b4..883d619 100644
565 +--- a/arch/x86/kernel/amd_iommu_init.c
566 ++++ b/arch/x86/kernel/amd_iommu_init.c
567 +@@ -1288,6 +1288,8 @@ static int __init amd_iommu_init(void)
568 + if (ret)
569 + goto free;
570 +
571 ++ enable_iommus();
572 ++
573 + if (iommu_pass_through)
574 + ret = amd_iommu_init_passthrough();
575 + else
576 +@@ -1300,8 +1302,6 @@ static int __init amd_iommu_init(void)
577 +
578 + amd_iommu_init_notifier();
579 +
580 +- enable_iommus();
581 +-
582 + if (iommu_pass_through)
583 + goto out;
584 +
585 +@@ -1315,6 +1315,7 @@ out:
586 + return ret;
587 +
588 + free:
589 ++ disable_iommus();
590 +
591 + amd_iommu_uninit_devices();
592 +
593 +diff --git a/arch/x86/kernel/aperture_64.c b/arch/x86/kernel/aperture_64.c
594 +index f147a95..19f2c70 100644
595 +--- a/arch/x86/kernel/aperture_64.c
596 ++++ b/arch/x86/kernel/aperture_64.c
597 +@@ -394,6 +394,7 @@ void __init gart_iommu_hole_init(void)
598 + for (i = 0; i < ARRAY_SIZE(bus_dev_ranges); i++) {
599 + int bus;
600 + int dev_base, dev_limit;
601 ++ u32 ctl;
602 +
603 + bus = bus_dev_ranges[i].bus;
604 + dev_base = bus_dev_ranges[i].dev_base;
605 +@@ -407,7 +408,19 @@ void __init gart_iommu_hole_init(void)
606 + gart_iommu_aperture = 1;
607 + x86_init.iommu.iommu_init = gart_iommu_init;
608 +
609 +- aper_order = (read_pci_config(bus, slot, 3, AMD64_GARTAPERTURECTL) >> 1) & 7;
610 ++ ctl = read_pci_config(bus, slot, 3,
611 ++ AMD64_GARTAPERTURECTL);
612 ++
613 ++ /*
614 ++ * Before we do anything else disable the GART. It may
615 ++ * still be enabled if we boot into a crash-kernel here.
616 ++ * Reconfiguring the GART while it is enabled could have
617 ++ * unknown side-effects.
618 ++ */
619 ++ ctl &= ~GARTEN;
620 ++ write_pci_config(bus, slot, 3, AMD64_GARTAPERTURECTL, ctl);
621 ++
622 ++ aper_order = (ctl >> 1) & 7;
623 + aper_size = (32 * 1024 * 1024) << aper_order;
624 + aper_base = read_pci_config(bus, slot, 3, AMD64_GARTAPERTUREBASE) & 0x7fff;
625 + aper_base <<= 25;
626 +diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
627 +index dfca210..d4df517 100644
628 +--- a/arch/x86/kernel/apic/apic.c
629 ++++ b/arch/x86/kernel/apic/apic.c
630 +@@ -1640,8 +1640,10 @@ int __init APIC_init_uniprocessor(void)
631 + }
632 + #endif
633 +
634 ++#ifndef CONFIG_SMP
635 + enable_IR_x2apic();
636 + default_setup_apic_routing();
637 ++#endif
638 +
639 + verify_local_APIC();
640 + connect_bsp_APIC();
641 +diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c
642 +index fc6c8ef..d440123 100644
643 +--- a/arch/x86/kernel/cpu/intel_cacheinfo.c
644 ++++ b/arch/x86/kernel/cpu/intel_cacheinfo.c
645 +@@ -18,6 +18,7 @@
646 + #include <asm/processor.h>
647 + #include <linux/smp.h>
648 + #include <asm/k8.h>
649 ++#include <asm/smp.h>
650 +
651 + #define LVL_1_INST 1
652 + #define LVL_1_DATA 2
653 +@@ -150,7 +151,8 @@ struct _cpuid4_info {
654 + union _cpuid4_leaf_ebx ebx;
655 + union _cpuid4_leaf_ecx ecx;
656 + unsigned long size;
657 +- unsigned long can_disable;
658 ++ bool can_disable;
659 ++ unsigned int l3_indices;
660 + DECLARE_BITMAP(shared_cpu_map, NR_CPUS);
661 + };
662 +
663 +@@ -160,7 +162,8 @@ struct _cpuid4_info_regs {
664 + union _cpuid4_leaf_ebx ebx;
665 + union _cpuid4_leaf_ecx ecx;
666 + unsigned long size;
667 +- unsigned long can_disable;
668 ++ bool can_disable;
669 ++ unsigned int l3_indices;
670 + };
671 +
672 + unsigned short num_cache_leaves;
673 +@@ -290,6 +293,36 @@ amd_cpuid4(int leaf, union _cpuid4_leaf_eax *eax,
674 + (ebx->split.ways_of_associativity + 1) - 1;
675 + }
676 +
677 ++struct _cache_attr {
678 ++ struct attribute attr;
679 ++ ssize_t (*show)(struct _cpuid4_info *, char *);
680 ++ ssize_t (*store)(struct _cpuid4_info *, const char *, size_t count);
681 ++};
682 ++
683 ++#ifdef CONFIG_CPU_SUP_AMD
684 ++static unsigned int __cpuinit amd_calc_l3_indices(void)
685 ++{
686 ++ /*
687 ++ * We're called over smp_call_function_single() and therefore
688 ++ * are on the correct cpu.
689 ++ */
690 ++ int cpu = smp_processor_id();
691 ++ int node = cpu_to_node(cpu);
692 ++ struct pci_dev *dev = node_to_k8_nb_misc(node);
693 ++ unsigned int sc0, sc1, sc2, sc3;
694 ++ u32 val = 0;
695 ++
696 ++ pci_read_config_dword(dev, 0x1C4, &val);
697 ++
698 ++ /* calculate subcache sizes */
699 ++ sc0 = !(val & BIT(0));
700 ++ sc1 = !(val & BIT(4));
701 ++ sc2 = !(val & BIT(8)) + !(val & BIT(9));
702 ++ sc3 = !(val & BIT(12)) + !(val & BIT(13));
703 ++
704 ++ return (max(max(max(sc0, sc1), sc2), sc3) << 10) - 1;
705 ++}
706 ++
707 + static void __cpuinit
708 + amd_check_l3_disable(int index, struct _cpuid4_info_regs *this_leaf)
709 + {
710 +@@ -299,12 +332,103 @@ amd_check_l3_disable(int index, struct _cpuid4_info_regs *this_leaf)
711 + if (boot_cpu_data.x86 == 0x11)
712 + return;
713 +
714 +- /* see erratum #382 */
715 +- if ((boot_cpu_data.x86 == 0x10) && (boot_cpu_data.x86_model < 0x8))
716 ++ /* see errata #382 and #388 */
717 ++ if ((boot_cpu_data.x86 == 0x10) &&
718 ++ ((boot_cpu_data.x86_model < 0x8) ||
719 ++ (boot_cpu_data.x86_mask < 0x1)))
720 + return;
721 +
722 +- this_leaf->can_disable = 1;
723 ++ this_leaf->can_disable = true;
724 ++ this_leaf->l3_indices = amd_calc_l3_indices();
725 ++}
726 ++
727 ++static ssize_t show_cache_disable(struct _cpuid4_info *this_leaf, char *buf,
728 ++ unsigned int index)
729 ++{
730 ++ int cpu = cpumask_first(to_cpumask(this_leaf->shared_cpu_map));
731 ++ int node = amd_get_nb_id(cpu);
732 ++ struct pci_dev *dev = node_to_k8_nb_misc(node);
733 ++ unsigned int reg = 0;
734 ++
735 ++ if (!this_leaf->can_disable)
736 ++ return -EINVAL;
737 ++
738 ++ if (!dev)
739 ++ return -EINVAL;
740 ++
741 ++ pci_read_config_dword(dev, 0x1BC + index * 4, &reg);
742 ++ return sprintf(buf, "0x%08x\n", reg);
743 ++}
744 ++
745 ++#define SHOW_CACHE_DISABLE(index) \
746 ++static ssize_t \
747 ++show_cache_disable_##index(struct _cpuid4_info *this_leaf, char *buf) \
748 ++{ \
749 ++ return show_cache_disable(this_leaf, buf, index); \
750 ++}
751 ++SHOW_CACHE_DISABLE(0)
752 ++SHOW_CACHE_DISABLE(1)
753 ++
754 ++static ssize_t store_cache_disable(struct _cpuid4_info *this_leaf,
755 ++ const char *buf, size_t count, unsigned int index)
756 ++{
757 ++ int cpu = cpumask_first(to_cpumask(this_leaf->shared_cpu_map));
758 ++ int node = amd_get_nb_id(cpu);
759 ++ struct pci_dev *dev = node_to_k8_nb_misc(node);
760 ++ unsigned long val = 0;
761 ++
762 ++#define SUBCACHE_MASK (3UL << 20)
763 ++#define SUBCACHE_INDEX 0xfff
764 ++
765 ++ if (!this_leaf->can_disable)
766 ++ return -EINVAL;
767 ++
768 ++ if (!capable(CAP_SYS_ADMIN))
769 ++ return -EPERM;
770 ++
771 ++ if (!dev)
772 ++ return -EINVAL;
773 ++
774 ++ if (strict_strtoul(buf, 10, &val) < 0)
775 ++ return -EINVAL;
776 ++
777 ++ /* do not allow writes outside of allowed bits */
778 ++ if ((val & ~(SUBCACHE_MASK | SUBCACHE_INDEX)) ||
779 ++ ((val & SUBCACHE_INDEX) > this_leaf->l3_indices))
780 ++ return -EINVAL;
781 ++
782 ++ val |= BIT(30);
783 ++ pci_write_config_dword(dev, 0x1BC + index * 4, val);
784 ++ /*
785 ++ * We need to WBINVD on a core on the node containing the L3 cache which
786 ++ * indices we disable therefore a simple wbinvd() is not sufficient.
787 ++ */
788 ++ wbinvd_on_cpu(cpu);
789 ++ pci_write_config_dword(dev, 0x1BC + index * 4, val | BIT(31));
790 ++ return count;
791 ++}
792 ++
793 ++#define STORE_CACHE_DISABLE(index) \
794 ++static ssize_t \
795 ++store_cache_disable_##index(struct _cpuid4_info *this_leaf, \
796 ++ const char *buf, size_t count) \
797 ++{ \
798 ++ return store_cache_disable(this_leaf, buf, count, index); \
799 + }
800 ++STORE_CACHE_DISABLE(0)
801 ++STORE_CACHE_DISABLE(1)
802 ++
803 ++static struct _cache_attr cache_disable_0 = __ATTR(cache_disable_0, 0644,
804 ++ show_cache_disable_0, store_cache_disable_0);
805 ++static struct _cache_attr cache_disable_1 = __ATTR(cache_disable_1, 0644,
806 ++ show_cache_disable_1, store_cache_disable_1);
807 ++
808 ++#else /* CONFIG_CPU_SUP_AMD */
809 ++static void __cpuinit
810 ++amd_check_l3_disable(int index, struct _cpuid4_info_regs *this_leaf)
811 ++{
812 ++};
813 ++#endif /* CONFIG_CPU_SUP_AMD */
814 +
815 + static int
816 + __cpuinit cpuid4_cache_lookup_regs(int index,
817 +@@ -711,82 +835,6 @@ static ssize_t show_type(struct _cpuid4_info *this_leaf, char *buf)
818 + #define to_object(k) container_of(k, struct _index_kobject, kobj)
819 + #define to_attr(a) container_of(a, struct _cache_attr, attr)
820 +
821 +-static ssize_t show_cache_disable(struct _cpuid4_info *this_leaf, char *buf,
822 +- unsigned int index)
823 +-{
824 +- int cpu = cpumask_first(to_cpumask(this_leaf->shared_cpu_map));
825 +- int node = cpu_to_node(cpu);
826 +- struct pci_dev *dev = node_to_k8_nb_misc(node);
827 +- unsigned int reg = 0;
828 +-
829 +- if (!this_leaf->can_disable)
830 +- return -EINVAL;
831 +-
832 +- if (!dev)
833 +- return -EINVAL;
834 +-
835 +- pci_read_config_dword(dev, 0x1BC + index * 4, &reg);
836 +- return sprintf(buf, "%x\n", reg);
837 +-}
838 +-
839 +-#define SHOW_CACHE_DISABLE(index) \
840 +-static ssize_t \
841 +-show_cache_disable_##index(struct _cpuid4_info *this_leaf, char *buf) \
842 +-{ \
843 +- return show_cache_disable(this_leaf, buf, index); \
844 +-}
845 +-SHOW_CACHE_DISABLE(0)
846 +-SHOW_CACHE_DISABLE(1)
847 +-
848 +-static ssize_t store_cache_disable(struct _cpuid4_info *this_leaf,
849 +- const char *buf, size_t count, unsigned int index)
850 +-{
851 +- int cpu = cpumask_first(to_cpumask(this_leaf->shared_cpu_map));
852 +- int node = cpu_to_node(cpu);
853 +- struct pci_dev *dev = node_to_k8_nb_misc(node);
854 +- unsigned long val = 0;
855 +- unsigned int scrubber = 0;
856 +-
857 +- if (!this_leaf->can_disable)
858 +- return -EINVAL;
859 +-
860 +- if (!capable(CAP_SYS_ADMIN))
861 +- return -EPERM;
862 +-
863 +- if (!dev)
864 +- return -EINVAL;
865 +-
866 +- if (strict_strtoul(buf, 10, &val) < 0)
867 +- return -EINVAL;
868 +-
869 +- val |= 0xc0000000;
870 +-
871 +- pci_read_config_dword(dev, 0x58, &scrubber);
872 +- scrubber &= ~0x1f000000;
873 +- pci_write_config_dword(dev, 0x58, scrubber);
874 +-
875 +- pci_write_config_dword(dev, 0x1BC + index * 4, val & ~0x40000000);
876 +- wbinvd();
877 +- pci_write_config_dword(dev, 0x1BC + index * 4, val);
878 +- return count;
879 +-}
880 +-
881 +-#define STORE_CACHE_DISABLE(index) \
882 +-static ssize_t \
883 +-store_cache_disable_##index(struct _cpuid4_info *this_leaf, \
884 +- const char *buf, size_t count) \
885 +-{ \
886 +- return store_cache_disable(this_leaf, buf, count, index); \
887 +-}
888 +-STORE_CACHE_DISABLE(0)
889 +-STORE_CACHE_DISABLE(1)
890 +-
891 +-struct _cache_attr {
892 +- struct attribute attr;
893 +- ssize_t (*show)(struct _cpuid4_info *, char *);
894 +- ssize_t (*store)(struct _cpuid4_info *, const char *, size_t count);
895 +-};
896 +-
897 + #define define_one_ro(_name) \
898 + static struct _cache_attr _name = \
899 + __ATTR(_name, 0444, show_##_name, NULL)
900 +@@ -801,23 +849,28 @@ define_one_ro(size);
901 + define_one_ro(shared_cpu_map);
902 + define_one_ro(shared_cpu_list);
903 +
904 +-static struct _cache_attr cache_disable_0 = __ATTR(cache_disable_0, 0644,
905 +- show_cache_disable_0, store_cache_disable_0);
906 +-static struct _cache_attr cache_disable_1 = __ATTR(cache_disable_1, 0644,
907 +- show_cache_disable_1, store_cache_disable_1);
908 ++#define DEFAULT_SYSFS_CACHE_ATTRS \
909 ++ &type.attr, \
910 ++ &level.attr, \
911 ++ &coherency_line_size.attr, \
912 ++ &physical_line_partition.attr, \
913 ++ &ways_of_associativity.attr, \
914 ++ &number_of_sets.attr, \
915 ++ &size.attr, \
916 ++ &shared_cpu_map.attr, \
917 ++ &shared_cpu_list.attr
918 +
919 + static struct attribute *default_attrs[] = {
920 +- &type.attr,
921 +- &level.attr,
922 +- &coherency_line_size.attr,
923 +- &physical_line_partition.attr,
924 +- &ways_of_associativity.attr,
925 +- &number_of_sets.attr,
926 +- &size.attr,
927 +- &shared_cpu_map.attr,
928 +- &shared_cpu_list.attr,
929 ++ DEFAULT_SYSFS_CACHE_ATTRS,
930 ++ NULL
931 ++};
932 ++
933 ++static struct attribute *default_l3_attrs[] = {
934 ++ DEFAULT_SYSFS_CACHE_ATTRS,
935 ++#ifdef CONFIG_CPU_SUP_AMD
936 + &cache_disable_0.attr,
937 + &cache_disable_1.attr,
938 ++#endif
939 + NULL
940 + };
941 +
942 +@@ -908,6 +961,7 @@ static int __cpuinit cache_add_dev(struct sys_device * sys_dev)
943 + unsigned int cpu = sys_dev->id;
944 + unsigned long i, j;
945 + struct _index_kobject *this_object;
946 ++ struct _cpuid4_info *this_leaf;
947 + int retval;
948 +
949 + retval = cpuid4_cache_sysfs_init(cpu);
950 +@@ -926,6 +980,14 @@ static int __cpuinit cache_add_dev(struct sys_device * sys_dev)
951 + this_object = INDEX_KOBJECT_PTR(cpu, i);
952 + this_object->cpu = cpu;
953 + this_object->index = i;
954 ++
955 ++ this_leaf = CPUID4_INFO_IDX(cpu, i);
956 ++
957 ++ if (this_leaf->can_disable)
958 ++ ktype_cache.default_attrs = default_l3_attrs;
959 ++ else
960 ++ ktype_cache.default_attrs = default_attrs;
961 ++
962 + retval = kobject_init_and_add(&(this_object->kobj),
963 + &ktype_cache,
964 + per_cpu(ici_cache_kobject, cpu),
965 +diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
966 +index 98819b3..c7ca8e2 100644
967 +--- a/arch/x86/kernel/cpu/perf_event.c
968 ++++ b/arch/x86/kernel/cpu/perf_event.c
969 +@@ -245,6 +245,97 @@ static u64 __read_mostly hw_cache_event_ids
970 + [PERF_COUNT_HW_CACHE_OP_MAX]
971 + [PERF_COUNT_HW_CACHE_RESULT_MAX];
972 +
973 ++static const u64 westmere_hw_cache_event_ids
974 ++ [PERF_COUNT_HW_CACHE_MAX]
975 ++ [PERF_COUNT_HW_CACHE_OP_MAX]
976 ++ [PERF_COUNT_HW_CACHE_RESULT_MAX] =
977 ++{
978 ++ [ C(L1D) ] = {
979 ++ [ C(OP_READ) ] = {
980 ++ [ C(RESULT_ACCESS) ] = 0x010b, /* MEM_INST_RETIRED.LOADS */
981 ++ [ C(RESULT_MISS) ] = 0x0151, /* L1D.REPL */
982 ++ },
983 ++ [ C(OP_WRITE) ] = {
984 ++ [ C(RESULT_ACCESS) ] = 0x020b, /* MEM_INST_RETURED.STORES */
985 ++ [ C(RESULT_MISS) ] = 0x0251, /* L1D.M_REPL */
986 ++ },
987 ++ [ C(OP_PREFETCH) ] = {
988 ++ [ C(RESULT_ACCESS) ] = 0x014e, /* L1D_PREFETCH.REQUESTS */
989 ++ [ C(RESULT_MISS) ] = 0x024e, /* L1D_PREFETCH.MISS */
990 ++ },
991 ++ },
992 ++ [ C(L1I ) ] = {
993 ++ [ C(OP_READ) ] = {
994 ++ [ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS */
995 ++ [ C(RESULT_MISS) ] = 0x0280, /* L1I.MISSES */
996 ++ },
997 ++ [ C(OP_WRITE) ] = {
998 ++ [ C(RESULT_ACCESS) ] = -1,
999 ++ [ C(RESULT_MISS) ] = -1,
1000 ++ },
1001 ++ [ C(OP_PREFETCH) ] = {
1002 ++ [ C(RESULT_ACCESS) ] = 0x0,
1003 ++ [ C(RESULT_MISS) ] = 0x0,
1004 ++ },
1005 ++ },
1006 ++ [ C(LL ) ] = {
1007 ++ [ C(OP_READ) ] = {
1008 ++ [ C(RESULT_ACCESS) ] = 0x0324, /* L2_RQSTS.LOADS */
1009 ++ [ C(RESULT_MISS) ] = 0x0224, /* L2_RQSTS.LD_MISS */
1010 ++ },
1011 ++ [ C(OP_WRITE) ] = {
1012 ++ [ C(RESULT_ACCESS) ] = 0x0c24, /* L2_RQSTS.RFOS */
1013 ++ [ C(RESULT_MISS) ] = 0x0824, /* L2_RQSTS.RFO_MISS */
1014 ++ },
1015 ++ [ C(OP_PREFETCH) ] = {
1016 ++ [ C(RESULT_ACCESS) ] = 0x4f2e, /* LLC Reference */
1017 ++ [ C(RESULT_MISS) ] = 0x412e, /* LLC Misses */
1018 ++ },
1019 ++ },
1020 ++ [ C(DTLB) ] = {
1021 ++ [ C(OP_READ) ] = {
1022 ++ [ C(RESULT_ACCESS) ] = 0x010b, /* MEM_INST_RETIRED.LOADS */
1023 ++ [ C(RESULT_MISS) ] = 0x0108, /* DTLB_LOAD_MISSES.ANY */
1024 ++ },
1025 ++ [ C(OP_WRITE) ] = {
1026 ++ [ C(RESULT_ACCESS) ] = 0x020b, /* MEM_INST_RETURED.STORES */
1027 ++ [ C(RESULT_MISS) ] = 0x010c, /* MEM_STORE_RETIRED.DTLB_MISS */
1028 ++ },
1029 ++ [ C(OP_PREFETCH) ] = {
1030 ++ [ C(RESULT_ACCESS) ] = 0x0,
1031 ++ [ C(RESULT_MISS) ] = 0x0,
1032 ++ },
1033 ++ },
1034 ++ [ C(ITLB) ] = {
1035 ++ [ C(OP_READ) ] = {
1036 ++ [ C(RESULT_ACCESS) ] = 0x01c0, /* INST_RETIRED.ANY_P */
1037 ++ [ C(RESULT_MISS) ] = 0x0185, /* ITLB_MISSES.ANY */
1038 ++ },
1039 ++ [ C(OP_WRITE) ] = {
1040 ++ [ C(RESULT_ACCESS) ] = -1,
1041 ++ [ C(RESULT_MISS) ] = -1,
1042 ++ },
1043 ++ [ C(OP_PREFETCH) ] = {
1044 ++ [ C(RESULT_ACCESS) ] = -1,
1045 ++ [ C(RESULT_MISS) ] = -1,
1046 ++ },
1047 ++ },
1048 ++ [ C(BPU ) ] = {
1049 ++ [ C(OP_READ) ] = {
1050 ++ [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
1051 ++ [ C(RESULT_MISS) ] = 0x03e8, /* BPU_CLEARS.ANY */
1052 ++ },
1053 ++ [ C(OP_WRITE) ] = {
1054 ++ [ C(RESULT_ACCESS) ] = -1,
1055 ++ [ C(RESULT_MISS) ] = -1,
1056 ++ },
1057 ++ [ C(OP_PREFETCH) ] = {
1058 ++ [ C(RESULT_ACCESS) ] = -1,
1059 ++ [ C(RESULT_MISS) ] = -1,
1060 ++ },
1061 ++ },
1062 ++};
1063 ++
1064 + static __initconst u64 nehalem_hw_cache_event_ids
1065 + [PERF_COUNT_HW_CACHE_MAX]
1066 + [PERF_COUNT_HW_CACHE_OP_MAX]
1067 +@@ -2118,6 +2209,7 @@ static __init int intel_pmu_init(void)
1068 + * Install the hw-cache-events table:
1069 + */
1070 + switch (boot_cpu_data.x86_model) {
1071 ++
1072 + case 15: /* original 65 nm celeron/pentium/core2/xeon, "Merom"/"Conroe" */
1073 + case 22: /* single-core 65 nm celeron/core2solo "Merom-L"/"Conroe-L" */
1074 + case 23: /* current 45 nm celeron/core2/xeon "Penryn"/"Wolfdale" */
1075 +@@ -2129,7 +2221,9 @@ static __init int intel_pmu_init(void)
1076 + event_constraints = intel_core_event_constraints;
1077 + break;
1078 + default:
1079 +- case 26:
1080 ++ case 26: /* 45 nm nehalem, "Bloomfield" */
1081 ++ case 30: /* 45 nm nehalem, "Lynnfield" */
1082 ++ case 46: /* 45 nm nehalem-ex, "Beckton" */
1083 + memcpy(hw_cache_event_ids, nehalem_hw_cache_event_ids,
1084 + sizeof(hw_cache_event_ids));
1085 +
1086 +@@ -2142,6 +2236,14 @@ static __init int intel_pmu_init(void)
1087 +
1088 + pr_cont("Atom events, ");
1089 + break;
1090 ++
1091 ++ case 37: /* 32 nm nehalem, "Clarkdale" */
1092 ++ case 44: /* 32 nm nehalem, "Gulftown" */
1093 ++ memcpy(hw_cache_event_ids, westmere_hw_cache_event_ids,
1094 ++ sizeof(hw_cache_event_ids));
1095 ++
1096 ++ pr_cont("Westmere events, ");
1097 ++ break;
1098 + }
1099 + return 0;
1100 + }
1101 +diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c
1102 +index a4849c1..ebd4c51 100644
1103 +--- a/arch/x86/kernel/crash.c
1104 ++++ b/arch/x86/kernel/crash.c
1105 +@@ -27,7 +27,6 @@
1106 + #include <asm/cpu.h>
1107 + #include <asm/reboot.h>
1108 + #include <asm/virtext.h>
1109 +-#include <asm/x86_init.h>
1110 +
1111 + #if defined(CONFIG_SMP) && defined(CONFIG_X86_LOCAL_APIC)
1112 +
1113 +@@ -103,10 +102,5 @@ void native_machine_crash_shutdown(struct pt_regs *regs)
1114 + #ifdef CONFIG_HPET_TIMER
1115 + hpet_disable();
1116 + #endif
1117 +-
1118 +-#ifdef CONFIG_X86_64
1119 +- x86_platform.iommu_shutdown();
1120 +-#endif
1121 +-
1122 + crash_save_cpu(regs, safe_smp_processor_id());
1123 + }
1124 +diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c
1125 +index ad80a1c..773afc9 100644
1126 +--- a/arch/x86/kernel/hpet.c
1127 ++++ b/arch/x86/kernel/hpet.c
1128 +@@ -399,9 +399,15 @@ static int hpet_next_event(unsigned long delta,
1129 + * then we might have a real hardware problem. We can not do
1130 + * much about it here, but at least alert the user/admin with
1131 + * a prominent warning.
1132 ++ * An erratum on some chipsets (ICH9,..), results in comparator read
1133 ++ * immediately following a write returning old value. Workaround
1134 ++ * for this is to read this value second time, when first
1135 ++ * read returns old value.
1136 + */
1137 +- WARN_ONCE(hpet_readl(HPET_Tn_CMP(timer)) != cnt,
1138 ++ if (unlikely((u32)hpet_readl(HPET_Tn_CMP(timer)) != cnt)) {
1139 ++ WARN_ONCE(hpet_readl(HPET_Tn_CMP(timer)) != cnt,
1140 + KERN_WARNING "hpet: compare register read back failed.\n");
1141 ++ }
1142 +
1143 + return (s32)(hpet_readl(HPET_COUNTER) - cnt) >= 0 ? -ETIME : 0;
1144 + }
1145 +diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c
1146 +index bfba601..b2258ca 100644
1147 +--- a/arch/x86/kernel/kgdb.c
1148 ++++ b/arch/x86/kernel/kgdb.c
1149 +@@ -618,8 +618,8 @@ int kgdb_arch_init(void)
1150 + * portion of kgdb because this operation requires mutexs to
1151 + * complete.
1152 + */
1153 ++ hw_breakpoint_init(&attr);
1154 + attr.bp_addr = (unsigned long)kgdb_arch_init;
1155 +- attr.type = PERF_TYPE_BREAKPOINT;
1156 + attr.bp_len = HW_BREAKPOINT_LEN_1;
1157 + attr.bp_type = HW_BREAKPOINT_W;
1158 + attr.disabled = 1;
1159 +diff --git a/arch/x86/kernel/mpparse.c b/arch/x86/kernel/mpparse.c
1160 +index a2c1edd..e81030f 100644
1161 +--- a/arch/x86/kernel/mpparse.c
1162 ++++ b/arch/x86/kernel/mpparse.c
1163 +@@ -664,7 +664,7 @@ static void __init smp_reserve_memory(struct mpf_intel *mpf)
1164 + {
1165 + unsigned long size = get_mpc_size(mpf->physptr);
1166 +
1167 +- reserve_early(mpf->physptr, mpf->physptr+size, "MP-table mpc");
1168 ++ reserve_early_overlap_ok(mpf->physptr, mpf->physptr+size, "MP-table mpc");
1169 + }
1170 +
1171 + static int __init smp_scan_config(unsigned long base, unsigned long length)
1172 +@@ -693,7 +693,7 @@ static int __init smp_scan_config(unsigned long base, unsigned long length)
1173 + mpf, (u64)virt_to_phys(mpf));
1174 +
1175 + mem = virt_to_phys(mpf);
1176 +- reserve_early(mem, mem + sizeof(*mpf), "MP-table mpf");
1177 ++ reserve_early_overlap_ok(mem, mem + sizeof(*mpf), "MP-table mpf");
1178 + if (mpf->physptr)
1179 + smp_reserve_memory(mpf);
1180 +
1181 +diff --git a/arch/x86/kernel/pci-gart_64.c b/arch/x86/kernel/pci-gart_64.c
1182 +index 34de53b..4f41b29 100644
1183 +--- a/arch/x86/kernel/pci-gart_64.c
1184 ++++ b/arch/x86/kernel/pci-gart_64.c
1185 +@@ -564,6 +564,9 @@ static void enable_gart_translations(void)
1186 +
1187 + enable_gart_translation(dev, __pa(agp_gatt_table));
1188 + }
1189 ++
1190 ++ /* Flush the GART-TLB to remove stale entries */
1191 ++ k8_flush_garts();
1192 + }
1193 +
1194 + /*
1195 +diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
1196 +index 89a49fb..28c3d81 100644
1197 +--- a/arch/x86/kvm/mmu.c
1198 ++++ b/arch/x86/kvm/mmu.c
1199 +@@ -1502,8 +1502,8 @@ static int mmu_zap_unsync_children(struct kvm *kvm,
1200 + for_each_sp(pages, sp, parents, i) {
1201 + kvm_mmu_zap_page(kvm, sp);
1202 + mmu_pages_clear_parents(&parents);
1203 ++ zapped++;
1204 + }
1205 +- zapped += pages.nr;
1206 + kvm_mmu_pages_init(parent, &parents, &pages);
1207 + }
1208 +
1209 +@@ -1554,14 +1554,16 @@ void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages)
1210 + */
1211 +
1212 + if (used_pages > kvm_nr_mmu_pages) {
1213 +- while (used_pages > kvm_nr_mmu_pages) {
1214 ++ while (used_pages > kvm_nr_mmu_pages &&
1215 ++ !list_empty(&kvm->arch.active_mmu_pages)) {
1216 + struct kvm_mmu_page *page;
1217 +
1218 + page = container_of(kvm->arch.active_mmu_pages.prev,
1219 + struct kvm_mmu_page, link);
1220 +- kvm_mmu_zap_page(kvm, page);
1221 ++ used_pages -= kvm_mmu_zap_page(kvm, page);
1222 + used_pages--;
1223 + }
1224 ++ kvm_nr_mmu_pages = used_pages;
1225 + kvm->arch.n_free_mmu_pages = 0;
1226 + }
1227 + else
1228 +@@ -1608,7 +1610,8 @@ static void mmu_unshadow(struct kvm *kvm, gfn_t gfn)
1229 + && !sp->role.invalid) {
1230 + pgprintk("%s: zap %lx %x\n",
1231 + __func__, gfn, sp->role.word);
1232 +- kvm_mmu_zap_page(kvm, sp);
1233 ++ if (kvm_mmu_zap_page(kvm, sp))
1234 ++ nn = bucket->first;
1235 + }
1236 + }
1237 + }
1238 +diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
1239 +index 1d9b338..d42e191 100644
1240 +--- a/arch/x86/kvm/svm.c
1241 ++++ b/arch/x86/kvm/svm.c
1242 +@@ -698,29 +698,28 @@ static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id)
1243 + if (err)
1244 + goto free_svm;
1245 +
1246 ++ err = -ENOMEM;
1247 + page = alloc_page(GFP_KERNEL);
1248 +- if (!page) {
1249 +- err = -ENOMEM;
1250 ++ if (!page)
1251 + goto uninit;
1252 +- }
1253 +
1254 +- err = -ENOMEM;
1255 + msrpm_pages = alloc_pages(GFP_KERNEL, MSRPM_ALLOC_ORDER);
1256 + if (!msrpm_pages)
1257 +- goto uninit;
1258 ++ goto free_page1;
1259 +
1260 + nested_msrpm_pages = alloc_pages(GFP_KERNEL, MSRPM_ALLOC_ORDER);
1261 + if (!nested_msrpm_pages)
1262 +- goto uninit;
1263 +-
1264 +- svm->msrpm = page_address(msrpm_pages);
1265 +- svm_vcpu_init_msrpm(svm->msrpm);
1266 ++ goto free_page2;
1267 +
1268 + hsave_page = alloc_page(GFP_KERNEL);
1269 + if (!hsave_page)
1270 +- goto uninit;
1271 ++ goto free_page3;
1272 ++
1273 + svm->nested.hsave = page_address(hsave_page);
1274 +
1275 ++ svm->msrpm = page_address(msrpm_pages);
1276 ++ svm_vcpu_init_msrpm(svm->msrpm);
1277 ++
1278 + svm->nested.msrpm = page_address(nested_msrpm_pages);
1279 +
1280 + svm->vmcb = page_address(page);
1281 +@@ -737,6 +736,12 @@ static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id)
1282 +
1283 + return &svm->vcpu;
1284 +
1285 ++free_page3:
1286 ++ __free_pages(nested_msrpm_pages, MSRPM_ALLOC_ORDER);
1287 ++free_page2:
1288 ++ __free_pages(msrpm_pages, MSRPM_ALLOC_ORDER);
1289 ++free_page1:
1290 ++ __free_page(page);
1291 + uninit:
1292 + kvm_vcpu_uninit(&svm->vcpu);
1293 + free_svm:
1294 +diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
1295 +index 8a8e139..3acbe19 100644
1296 +--- a/arch/x86/kvm/vmx.c
1297 ++++ b/arch/x86/kvm/vmx.c
1298 +@@ -61,6 +61,8 @@ module_param_named(unrestricted_guest,
1299 + static int __read_mostly emulate_invalid_guest_state = 0;
1300 + module_param(emulate_invalid_guest_state, bool, S_IRUGO);
1301 +
1302 ++#define RMODE_GUEST_OWNED_EFLAGS_BITS (~(X86_EFLAGS_IOPL | X86_EFLAGS_VM))
1303 ++
1304 + /*
1305 + * These 2 parameters are used to config the controls for Pause-Loop Exiting:
1306 + * ple_gap: upper bound on the amount of time between two successive
1307 +@@ -115,7 +117,7 @@ struct vcpu_vmx {
1308 + } host_state;
1309 + struct {
1310 + int vm86_active;
1311 +- u8 save_iopl;
1312 ++ ulong save_rflags;
1313 + struct kvm_save_segment {
1314 + u16 selector;
1315 + unsigned long base;
1316 +@@ -787,18 +789,23 @@ static void vmx_fpu_deactivate(struct kvm_vcpu *vcpu)
1317 +
1318 + static unsigned long vmx_get_rflags(struct kvm_vcpu *vcpu)
1319 + {
1320 +- unsigned long rflags;
1321 ++ unsigned long rflags, save_rflags;
1322 +
1323 + rflags = vmcs_readl(GUEST_RFLAGS);
1324 +- if (to_vmx(vcpu)->rmode.vm86_active)
1325 +- rflags &= ~(unsigned long)(X86_EFLAGS_IOPL | X86_EFLAGS_VM);
1326 ++ if (to_vmx(vcpu)->rmode.vm86_active) {
1327 ++ rflags &= RMODE_GUEST_OWNED_EFLAGS_BITS;
1328 ++ save_rflags = to_vmx(vcpu)->rmode.save_rflags;
1329 ++ rflags |= save_rflags & ~RMODE_GUEST_OWNED_EFLAGS_BITS;
1330 ++ }
1331 + return rflags;
1332 + }
1333 +
1334 + static void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
1335 + {
1336 +- if (to_vmx(vcpu)->rmode.vm86_active)
1337 ++ if (to_vmx(vcpu)->rmode.vm86_active) {
1338 ++ to_vmx(vcpu)->rmode.save_rflags = rflags;
1339 + rflags |= X86_EFLAGS_IOPL | X86_EFLAGS_VM;
1340 ++ }
1341 + vmcs_writel(GUEST_RFLAGS, rflags);
1342 + }
1343 +
1344 +@@ -1431,8 +1438,8 @@ static void enter_pmode(struct kvm_vcpu *vcpu)
1345 + vmcs_write32(GUEST_TR_AR_BYTES, vmx->rmode.tr.ar);
1346 +
1347 + flags = vmcs_readl(GUEST_RFLAGS);
1348 +- flags &= ~(X86_EFLAGS_IOPL | X86_EFLAGS_VM);
1349 +- flags |= (vmx->rmode.save_iopl << IOPL_SHIFT);
1350 ++ flags &= RMODE_GUEST_OWNED_EFLAGS_BITS;
1351 ++ flags |= vmx->rmode.save_rflags & ~RMODE_GUEST_OWNED_EFLAGS_BITS;
1352 + vmcs_writel(GUEST_RFLAGS, flags);
1353 +
1354 + vmcs_writel(GUEST_CR4, (vmcs_readl(GUEST_CR4) & ~X86_CR4_VME) |
1355 +@@ -1501,8 +1508,7 @@ static void enter_rmode(struct kvm_vcpu *vcpu)
1356 + vmcs_write32(GUEST_TR_AR_BYTES, 0x008b);
1357 +
1358 + flags = vmcs_readl(GUEST_RFLAGS);
1359 +- vmx->rmode.save_iopl
1360 +- = (flags & X86_EFLAGS_IOPL) >> IOPL_SHIFT;
1361 ++ vmx->rmode.save_rflags = flags;
1362 +
1363 + flags |= X86_EFLAGS_IOPL | X86_EFLAGS_VM;
1364 +
1365 +@@ -2719,6 +2725,12 @@ static int handle_rmode_exception(struct kvm_vcpu *vcpu,
1366 + kvm_queue_exception(vcpu, vec);
1367 + return 1;
1368 + case BP_VECTOR:
1369 ++ /*
1370 ++ * Update instruction length as we may reinject the exception
1371 ++ * from user space while in guest debugging mode.
1372 ++ */
1373 ++ to_vmx(vcpu)->vcpu.arch.event_exit_inst_len =
1374 ++ vmcs_read32(VM_EXIT_INSTRUCTION_LEN);
1375 + if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP)
1376 + return 0;
1377 + /* fall through */
1378 +@@ -2841,6 +2853,13 @@ static int handle_exception(struct kvm_vcpu *vcpu)
1379 + kvm_run->debug.arch.dr7 = vmcs_readl(GUEST_DR7);
1380 + /* fall through */
1381 + case BP_VECTOR:
1382 ++ /*
1383 ++ * Update instruction length as we may reinject #BP from
1384 ++ * user space while in guest debugging mode. Reading it for
1385 ++ * #DB as well causes no harm, it is not used in that case.
1386 ++ */
1387 ++ vmx->vcpu.arch.event_exit_inst_len =
1388 ++ vmcs_read32(VM_EXIT_INSTRUCTION_LEN);
1389 + kvm_run->exit_reason = KVM_EXIT_DEBUG;
1390 + kvm_run->debug.arch.pc = vmcs_readl(GUEST_CS_BASE) + rip;
1391 + kvm_run->debug.arch.exception = ex_no;
1392 +diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
1393 +index e900908..dd78927 100644
1394 +--- a/arch/x86/kvm/x86.c
1395 ++++ b/arch/x86/kvm/x86.c
1396 +@@ -384,21 +384,16 @@ out:
1397 + void kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
1398 + {
1399 + if (cr0 & CR0_RESERVED_BITS) {
1400 +- printk(KERN_DEBUG "set_cr0: 0x%lx #GP, reserved bits 0x%lx\n",
1401 +- cr0, vcpu->arch.cr0);
1402 + kvm_inject_gp(vcpu, 0);
1403 + return;
1404 + }
1405 +
1406 + if ((cr0 & X86_CR0_NW) && !(cr0 & X86_CR0_CD)) {
1407 +- printk(KERN_DEBUG "set_cr0: #GP, CD == 0 && NW == 1\n");
1408 + kvm_inject_gp(vcpu, 0);
1409 + return;
1410 + }
1411 +
1412 + if ((cr0 & X86_CR0_PG) && !(cr0 & X86_CR0_PE)) {
1413 +- printk(KERN_DEBUG "set_cr0: #GP, set PG flag "
1414 +- "and a clear PE flag\n");
1415 + kvm_inject_gp(vcpu, 0);
1416 + return;
1417 + }
1418 +@@ -409,15 +404,11 @@ void kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
1419 + int cs_db, cs_l;
1420 +
1421 + if (!is_pae(vcpu)) {
1422 +- printk(KERN_DEBUG "set_cr0: #GP, start paging "
1423 +- "in long mode while PAE is disabled\n");
1424 + kvm_inject_gp(vcpu, 0);
1425 + return;
1426 + }
1427 + kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
1428 + if (cs_l) {
1429 +- printk(KERN_DEBUG "set_cr0: #GP, start paging "
1430 +- "in long mode while CS.L == 1\n");
1431 + kvm_inject_gp(vcpu, 0);
1432 + return;
1433 +
1434 +@@ -425,8 +416,6 @@ void kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
1435 + } else
1436 + #endif
1437 + if (is_pae(vcpu) && !load_pdptrs(vcpu, vcpu->arch.cr3)) {
1438 +- printk(KERN_DEBUG "set_cr0: #GP, pdptrs "
1439 +- "reserved bits\n");
1440 + kvm_inject_gp(vcpu, 0);
1441 + return;
1442 + }
1443 +@@ -453,28 +442,23 @@ void kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
1444 + unsigned long pdptr_bits = X86_CR4_PGE | X86_CR4_PSE | X86_CR4_PAE;
1445 +
1446 + if (cr4 & CR4_RESERVED_BITS) {
1447 +- printk(KERN_DEBUG "set_cr4: #GP, reserved bits\n");
1448 + kvm_inject_gp(vcpu, 0);
1449 + return;
1450 + }
1451 +
1452 + if (is_long_mode(vcpu)) {
1453 + if (!(cr4 & X86_CR4_PAE)) {
1454 +- printk(KERN_DEBUG "set_cr4: #GP, clearing PAE while "
1455 +- "in long mode\n");
1456 + kvm_inject_gp(vcpu, 0);
1457 + return;
1458 + }
1459 + } else if (is_paging(vcpu) && (cr4 & X86_CR4_PAE)
1460 + && ((cr4 ^ old_cr4) & pdptr_bits)
1461 + && !load_pdptrs(vcpu, vcpu->arch.cr3)) {
1462 +- printk(KERN_DEBUG "set_cr4: #GP, pdptrs reserved bits\n");
1463 + kvm_inject_gp(vcpu, 0);
1464 + return;
1465 + }
1466 +
1467 + if (cr4 & X86_CR4_VMXE) {
1468 +- printk(KERN_DEBUG "set_cr4: #GP, setting VMXE\n");
1469 + kvm_inject_gp(vcpu, 0);
1470 + return;
1471 + }
1472 +@@ -495,21 +479,16 @@ void kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
1473 +
1474 + if (is_long_mode(vcpu)) {
1475 + if (cr3 & CR3_L_MODE_RESERVED_BITS) {
1476 +- printk(KERN_DEBUG "set_cr3: #GP, reserved bits\n");
1477 + kvm_inject_gp(vcpu, 0);
1478 + return;
1479 + }
1480 + } else {
1481 + if (is_pae(vcpu)) {
1482 + if (cr3 & CR3_PAE_RESERVED_BITS) {
1483 +- printk(KERN_DEBUG
1484 +- "set_cr3: #GP, reserved bits\n");
1485 + kvm_inject_gp(vcpu, 0);
1486 + return;
1487 + }
1488 + if (is_paging(vcpu) && !load_pdptrs(vcpu, cr3)) {
1489 +- printk(KERN_DEBUG "set_cr3: #GP, pdptrs "
1490 +- "reserved bits\n");
1491 + kvm_inject_gp(vcpu, 0);
1492 + return;
1493 + }
1494 +@@ -541,7 +520,6 @@ EXPORT_SYMBOL_GPL(kvm_set_cr3);
1495 + void kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8)
1496 + {
1497 + if (cr8 & CR8_RESERVED_BITS) {
1498 +- printk(KERN_DEBUG "set_cr8: #GP, reserved bits 0x%lx\n", cr8);
1499 + kvm_inject_gp(vcpu, 0);
1500 + return;
1501 + }
1502 +@@ -595,15 +573,12 @@ static u32 emulated_msrs[] = {
1503 + static void set_efer(struct kvm_vcpu *vcpu, u64 efer)
1504 + {
1505 + if (efer & efer_reserved_bits) {
1506 +- printk(KERN_DEBUG "set_efer: 0x%llx #GP, reserved bits\n",
1507 +- efer);
1508 + kvm_inject_gp(vcpu, 0);
1509 + return;
1510 + }
1511 +
1512 + if (is_paging(vcpu)
1513 + && (vcpu->arch.shadow_efer & EFER_LME) != (efer & EFER_LME)) {
1514 +- printk(KERN_DEBUG "set_efer: #GP, change LME while paging\n");
1515 + kvm_inject_gp(vcpu, 0);
1516 + return;
1517 + }
1518 +@@ -613,7 +588,6 @@ static void set_efer(struct kvm_vcpu *vcpu, u64 efer)
1519 +
1520 + feat = kvm_find_cpuid_entry(vcpu, 0x80000001, 0);
1521 + if (!feat || !(feat->edx & bit(X86_FEATURE_FXSR_OPT))) {
1522 +- printk(KERN_DEBUG "set_efer: #GP, enable FFXSR w/o CPUID capability\n");
1523 + kvm_inject_gp(vcpu, 0);
1524 + return;
1525 + }
1526 +@@ -624,7 +598,6 @@ static void set_efer(struct kvm_vcpu *vcpu, u64 efer)
1527 +
1528 + feat = kvm_find_cpuid_entry(vcpu, 0x80000001, 0);
1529 + if (!feat || !(feat->ecx & bit(X86_FEATURE_SVM))) {
1530 +- printk(KERN_DEBUG "set_efer: #GP, enable SVM w/o SVM\n");
1531 + kvm_inject_gp(vcpu, 0);
1532 + return;
1533 + }
1534 +@@ -913,9 +886,13 @@ static int set_msr_mce(struct kvm_vcpu *vcpu, u32 msr, u64 data)
1535 + if (msr >= MSR_IA32_MC0_CTL &&
1536 + msr < MSR_IA32_MC0_CTL + 4 * bank_num) {
1537 + u32 offset = msr - MSR_IA32_MC0_CTL;
1538 +- /* only 0 or all 1s can be written to IA32_MCi_CTL */
1539 ++ /* only 0 or all 1s can be written to IA32_MCi_CTL
1540 ++ * some Linux kernels though clear bit 10 in bank 4 to
1541 ++ * workaround a BIOS/GART TBL issue on AMD K8s, ignore
1542 ++ * this to avoid an uncatched #GP in the guest
1543 ++ */
1544 + if ((offset & 0x3) == 0 &&
1545 +- data != 0 && data != ~(u64)0)
1546 ++ data != 0 && (data | (1 << 10)) != ~(u64)0)
1547 + return -1;
1548 + vcpu->arch.mce_banks[offset] = data;
1549 + break;
1550 +@@ -2366,7 +2343,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
1551 + struct kvm_dirty_log *log)
1552 + {
1553 + int r;
1554 +- int n;
1555 ++ unsigned long n;
1556 + struct kvm_memory_slot *memslot;
1557 + int is_dirty = 0;
1558 +
1559 +@@ -2382,7 +2359,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
1560 + kvm_mmu_slot_remove_write_access(kvm, log->slot);
1561 + spin_unlock(&kvm->mmu_lock);
1562 + memslot = &kvm->memslots[log->slot];
1563 +- n = ALIGN(memslot->npages, BITS_PER_LONG) / 8;
1564 ++ n = kvm_dirty_bitmap_bytes(memslot);
1565 + memset(memslot->dirty_bitmap, 0, n);
1566 + }
1567 + r = 0;
1568 +@@ -4599,6 +4576,7 @@ int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason)
1569 + int ret = 0;
1570 + u32 old_tss_base = get_segment_base(vcpu, VCPU_SREG_TR);
1571 + u16 old_tss_sel = get_segment_selector(vcpu, VCPU_SREG_TR);
1572 ++ u32 desc_limit;
1573 +
1574 + old_tss_base = vcpu->arch.mmu.gva_to_gpa(vcpu, old_tss_base);
1575 +
1576 +@@ -4621,7 +4599,10 @@ int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason)
1577 + }
1578 + }
1579 +
1580 +- if (!nseg_desc.p || get_desc_limit(&nseg_desc) < 0x67) {
1581 ++ desc_limit = get_desc_limit(&nseg_desc);
1582 ++ if (!nseg_desc.p ||
1583 ++ ((desc_limit < 0x67 && (nseg_desc.type & 8)) ||
1584 ++ desc_limit < 0x2b)) {
1585 + kvm_queue_exception_e(vcpu, TS_VECTOR, tss_selector & 0xfffc);
1586 + return 1;
1587 + }
1588 +diff --git a/arch/x86/lib/Makefile b/arch/x86/lib/Makefile
1589 +index cffd754..ddef409 100644
1590 +--- a/arch/x86/lib/Makefile
1591 ++++ b/arch/x86/lib/Makefile
1592 +@@ -14,7 +14,7 @@ $(obj)/inat.o: $(obj)/inat-tables.c
1593 +
1594 + clean-files := inat-tables.c
1595 +
1596 +-obj-$(CONFIG_SMP) += msr-smp.o
1597 ++obj-$(CONFIG_SMP) += msr-smp.o cache-smp.o
1598 +
1599 + lib-y := delay.o
1600 + lib-y += thunk_$(BITS).o
1601 +@@ -39,4 +39,5 @@ else
1602 + lib-y += thunk_64.o clear_page_64.o copy_page_64.o
1603 + lib-y += memmove_64.o memset_64.o
1604 + lib-y += copy_user_64.o rwlock_64.o copy_user_nocache_64.o
1605 ++ lib-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem_64.o
1606 + endif
1607 +diff --git a/arch/x86/lib/cache-smp.c b/arch/x86/lib/cache-smp.c
1608 +new file mode 100644
1609 +index 0000000..a3c6688
1610 +--- /dev/null
1611 ++++ b/arch/x86/lib/cache-smp.c
1612 +@@ -0,0 +1,19 @@
1613 ++#include <linux/smp.h>
1614 ++#include <linux/module.h>
1615 ++
1616 ++static void __wbinvd(void *dummy)
1617 ++{
1618 ++ wbinvd();
1619 ++}
1620 ++
1621 ++void wbinvd_on_cpu(int cpu)
1622 ++{
1623 ++ smp_call_function_single(cpu, __wbinvd, NULL, 1);
1624 ++}
1625 ++EXPORT_SYMBOL(wbinvd_on_cpu);
1626 ++
1627 ++int wbinvd_on_all_cpus(void)
1628 ++{
1629 ++ return on_each_cpu(__wbinvd, NULL, 1);
1630 ++}
1631 ++EXPORT_SYMBOL(wbinvd_on_all_cpus);
1632 +diff --git a/arch/x86/lib/rwsem_64.S b/arch/x86/lib/rwsem_64.S
1633 +new file mode 100644
1634 +index 0000000..15acecf
1635 +--- /dev/null
1636 ++++ b/arch/x86/lib/rwsem_64.S
1637 +@@ -0,0 +1,81 @@
1638 ++/*
1639 ++ * x86-64 rwsem wrappers
1640 ++ *
1641 ++ * This interfaces the inline asm code to the slow-path
1642 ++ * C routines. We need to save the call-clobbered regs
1643 ++ * that the asm does not mark as clobbered, and move the
1644 ++ * argument from %rax to %rdi.
1645 ++ *
1646 ++ * NOTE! We don't need to save %rax, because the functions
1647 ++ * will always return the semaphore pointer in %rax (which
1648 ++ * is also the input argument to these helpers)
1649 ++ *
1650 ++ * The following can clobber %rdx because the asm clobbers it:
1651 ++ * call_rwsem_down_write_failed
1652 ++ * call_rwsem_wake
1653 ++ * but %rdi, %rsi, %rcx, %r8-r11 always need saving.
1654 ++ */
1655 ++
1656 ++#include <linux/linkage.h>
1657 ++#include <asm/rwlock.h>
1658 ++#include <asm/alternative-asm.h>
1659 ++#include <asm/frame.h>
1660 ++#include <asm/dwarf2.h>
1661 ++
1662 ++#define save_common_regs \
1663 ++ pushq %rdi; \
1664 ++ pushq %rsi; \
1665 ++ pushq %rcx; \
1666 ++ pushq %r8; \
1667 ++ pushq %r9; \
1668 ++ pushq %r10; \
1669 ++ pushq %r11
1670 ++
1671 ++#define restore_common_regs \
1672 ++ popq %r11; \
1673 ++ popq %r10; \
1674 ++ popq %r9; \
1675 ++ popq %r8; \
1676 ++ popq %rcx; \
1677 ++ popq %rsi; \
1678 ++ popq %rdi
1679 ++
1680 ++/* Fix up special calling conventions */
1681 ++ENTRY(call_rwsem_down_read_failed)
1682 ++ save_common_regs
1683 ++ pushq %rdx
1684 ++ movq %rax,%rdi
1685 ++ call rwsem_down_read_failed
1686 ++ popq %rdx
1687 ++ restore_common_regs
1688 ++ ret
1689 ++ ENDPROC(call_rwsem_down_read_failed)
1690 ++
1691 ++ENTRY(call_rwsem_down_write_failed)
1692 ++ save_common_regs
1693 ++ movq %rax,%rdi
1694 ++ call rwsem_down_write_failed
1695 ++ restore_common_regs
1696 ++ ret
1697 ++ ENDPROC(call_rwsem_down_write_failed)
1698 ++
1699 ++ENTRY(call_rwsem_wake)
1700 ++ decw %dx /* do nothing if still outstanding active readers */
1701 ++ jnz 1f
1702 ++ save_common_regs
1703 ++ movq %rax,%rdi
1704 ++ call rwsem_wake
1705 ++ restore_common_regs
1706 ++1: ret
1707 ++ ENDPROC(call_rwsem_wake)
1708 ++
1709 ++/* Fix up special calling conventions */
1710 ++ENTRY(call_rwsem_downgrade_wake)
1711 ++ save_common_regs
1712 ++ pushq %rdx
1713 ++ movq %rax,%rdi
1714 ++ call rwsem_downgrade_wake
1715 ++ popq %rdx
1716 ++ restore_common_regs
1717 ++ ret
1718 ++ ENDPROC(call_rwsem_downgrade_wake)
1719 +diff --git a/arch/x86/pci/irq.c b/arch/x86/pci/irq.c
1720 +index 0696d50..b02f6d8 100644
1721 +--- a/arch/x86/pci/irq.c
1722 ++++ b/arch/x86/pci/irq.c
1723 +@@ -590,6 +590,8 @@ static __init int intel_router_probe(struct irq_router *r, struct pci_dev *route
1724 + case PCI_DEVICE_ID_INTEL_ICH10_1:
1725 + case PCI_DEVICE_ID_INTEL_ICH10_2:
1726 + case PCI_DEVICE_ID_INTEL_ICH10_3:
1727 ++ case PCI_DEVICE_ID_INTEL_CPT_LPC1:
1728 ++ case PCI_DEVICE_ID_INTEL_CPT_LPC2:
1729 + r->name = "PIIX/ICH";
1730 + r->get = pirq_piix_get;
1731 + r->set = pirq_piix_set;
1732 +diff --git a/arch/x86/power/hibernate_asm_32.S b/arch/x86/power/hibernate_asm_32.S
1733 +index b641388..ad47dae 100644
1734 +--- a/arch/x86/power/hibernate_asm_32.S
1735 ++++ b/arch/x86/power/hibernate_asm_32.S
1736 +@@ -27,10 +27,17 @@ ENTRY(swsusp_arch_suspend)
1737 + ret
1738 +
1739 + ENTRY(restore_image)
1740 ++ movl mmu_cr4_features, %ecx
1741 + movl resume_pg_dir, %eax
1742 + subl $__PAGE_OFFSET, %eax
1743 + movl %eax, %cr3
1744 +
1745 ++ jecxz 1f # cr4 Pentium and higher, skip if zero
1746 ++ andl $~(X86_CR4_PGE), %ecx
1747 ++ movl %ecx, %cr4; # turn off PGE
1748 ++ movl %cr3, %eax; # flush TLB
1749 ++ movl %eax, %cr3
1750 ++1:
1751 + movl restore_pblist, %edx
1752 + .p2align 4,,7
1753 +
1754 +@@ -54,16 +61,8 @@ done:
1755 + movl $swapper_pg_dir, %eax
1756 + subl $__PAGE_OFFSET, %eax
1757 + movl %eax, %cr3
1758 +- /* Flush TLB, including "global" things (vmalloc) */
1759 + movl mmu_cr4_features, %ecx
1760 + jecxz 1f # cr4 Pentium and higher, skip if zero
1761 +- movl %ecx, %edx
1762 +- andl $~(X86_CR4_PGE), %edx
1763 +- movl %edx, %cr4; # turn off PGE
1764 +-1:
1765 +- movl %cr3, %eax; # flush TLB
1766 +- movl %eax, %cr3
1767 +- jecxz 1f # cr4 Pentium and higher, skip if zero
1768 + movl %ecx, %cr4; # turn PGE back on
1769 + 1:
1770 +
1771 +diff --git a/drivers/acpi/acpica/exprep.c b/drivers/acpi/acpica/exprep.c
1772 +index 52fec07..83b6252 100644
1773 +--- a/drivers/acpi/acpica/exprep.c
1774 ++++ b/drivers/acpi/acpica/exprep.c
1775 +@@ -468,6 +468,23 @@ acpi_status acpi_ex_prep_field_value(struct acpi_create_field_info *info)
1776 +
1777 + acpi_ut_add_reference(obj_desc->field.region_obj);
1778 +
1779 ++ /* allow full data read from EC address space */
1780 ++ if (obj_desc->field.region_obj->region.space_id ==
1781 ++ ACPI_ADR_SPACE_EC) {
1782 ++ if (obj_desc->common_field.bit_length > 8) {
1783 ++ unsigned width =
1784 ++ ACPI_ROUND_BITS_UP_TO_BYTES(
1785 ++ obj_desc->common_field.bit_length);
1786 ++ // access_bit_width is u8, don't overflow it
1787 ++ if (width > 8)
1788 ++ width = 8;
1789 ++ obj_desc->common_field.access_byte_width =
1790 ++ width;
1791 ++ obj_desc->common_field.access_bit_width =
1792 ++ 8 * width;
1793 ++ }
1794 ++ }
1795 ++
1796 + ACPI_DEBUG_PRINT((ACPI_DB_BFIELD,
1797 + "RegionField: BitOff %X, Off %X, Gran %X, Region %p\n",
1798 + obj_desc->field.start_field_bit_offset,
1799 +diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
1800 +index d6471bb..fc67d11 100644
1801 +--- a/drivers/acpi/ec.c
1802 ++++ b/drivers/acpi/ec.c
1803 +@@ -589,12 +589,12 @@ static u32 acpi_ec_gpe_handler(void *data)
1804 +
1805 + static acpi_status
1806 + acpi_ec_space_handler(u32 function, acpi_physical_address address,
1807 +- u32 bits, acpi_integer *value,
1808 ++ u32 bits, acpi_integer *value64,
1809 + void *handler_context, void *region_context)
1810 + {
1811 + struct acpi_ec *ec = handler_context;
1812 +- int result = 0, i;
1813 +- u8 temp = 0;
1814 ++ int result = 0, i, bytes = bits / 8;
1815 ++ u8 *value = (u8 *)value64;
1816 +
1817 + if ((address > 0xFF) || !value || !handler_context)
1818 + return AE_BAD_PARAMETER;
1819 +@@ -602,32 +602,15 @@ acpi_ec_space_handler(u32 function, acpi_physical_address address,
1820 + if (function != ACPI_READ && function != ACPI_WRITE)
1821 + return AE_BAD_PARAMETER;
1822 +
1823 +- if (bits != 8 && acpi_strict)
1824 +- return AE_BAD_PARAMETER;
1825 +-
1826 +- if (EC_FLAGS_MSI)
1827 ++ if (EC_FLAGS_MSI || bits > 8)
1828 + acpi_ec_burst_enable(ec);
1829 +
1830 +- if (function == ACPI_READ) {
1831 +- result = acpi_ec_read(ec, address, &temp);
1832 +- *value = temp;
1833 +- } else {
1834 +- temp = 0xff & (*value);
1835 +- result = acpi_ec_write(ec, address, temp);
1836 +- }
1837 +-
1838 +- for (i = 8; unlikely(bits - i > 0); i += 8) {
1839 +- ++address;
1840 +- if (function == ACPI_READ) {
1841 +- result = acpi_ec_read(ec, address, &temp);
1842 +- (*value) |= ((acpi_integer)temp) << i;
1843 +- } else {
1844 +- temp = 0xff & ((*value) >> i);
1845 +- result = acpi_ec_write(ec, address, temp);
1846 +- }
1847 +- }
1848 ++ for (i = 0; i < bytes; ++i, ++address, ++value)
1849 ++ result = (function == ACPI_READ) ?
1850 ++ acpi_ec_read(ec, address, value) :
1851 ++ acpi_ec_write(ec, address, *value);
1852 +
1853 +- if (EC_FLAGS_MSI)
1854 ++ if (EC_FLAGS_MSI || bits > 8)
1855 + acpi_ec_burst_disable(ec);
1856 +
1857 + switch (result) {
1858 +diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
1859 +index 9e2feb6..462200d 100644
1860 +--- a/drivers/ata/ahci.c
1861 ++++ b/drivers/ata/ahci.c
1862 +@@ -570,6 +570,12 @@ static const struct pci_device_id ahci_pci_tbl[] = {
1863 + { PCI_VDEVICE(INTEL, 0x3b2b), board_ahci }, /* PCH RAID */
1864 + { PCI_VDEVICE(INTEL, 0x3b2c), board_ahci }, /* PCH RAID */
1865 + { PCI_VDEVICE(INTEL, 0x3b2f), board_ahci }, /* PCH AHCI */
1866 ++ { PCI_VDEVICE(INTEL, 0x1c02), board_ahci }, /* CPT AHCI */
1867 ++ { PCI_VDEVICE(INTEL, 0x1c03), board_ahci }, /* CPT AHCI */
1868 ++ { PCI_VDEVICE(INTEL, 0x1c04), board_ahci }, /* CPT RAID */
1869 ++ { PCI_VDEVICE(INTEL, 0x1c05), board_ahci }, /* CPT RAID */
1870 ++ { PCI_VDEVICE(INTEL, 0x1c06), board_ahci }, /* CPT RAID */
1871 ++ { PCI_VDEVICE(INTEL, 0x1c07), board_ahci }, /* CPT RAID */
1872 +
1873 + /* JMicron 360/1/3/5/6, match class to avoid IDE function */
1874 + { PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
1875 +diff --git a/drivers/ata/ata_piix.c b/drivers/ata/ata_piix.c
1876 +index 6f3f225..b5f614b 100644
1877 +--- a/drivers/ata/ata_piix.c
1878 ++++ b/drivers/ata/ata_piix.c
1879 +@@ -291,6 +291,14 @@ static const struct pci_device_id piix_pci_tbl[] = {
1880 + { 0x8086, 0x3b2d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
1881 + /* SATA Controller IDE (PCH) */
1882 + { 0x8086, 0x3b2e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata },
1883 ++ /* SATA Controller IDE (CPT) */
1884 ++ { 0x8086, 0x1c00, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata },
1885 ++ /* SATA Controller IDE (CPT) */
1886 ++ { 0x8086, 0x1c01, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata },
1887 ++ /* SATA Controller IDE (CPT) */
1888 ++ { 0x8086, 0x1c08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
1889 ++ /* SATA Controller IDE (CPT) */
1890 ++ { 0x8086, 0x1c09, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
1891 + { } /* terminate list */
1892 + };
1893 +
1894 +diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
1895 +index 6728328..2401c9c 100644
1896 +--- a/drivers/ata/libata-core.c
1897 ++++ b/drivers/ata/libata-core.c
1898 +@@ -4348,6 +4348,9 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
1899 + { "HTS541080G9SA00", "MB4OC60D", ATA_HORKAGE_NONCQ, },
1900 + { "HTS541010G9SA00", "MBZOC60D", ATA_HORKAGE_NONCQ, },
1901 +
1902 ++ /* https://bugzilla.kernel.org/show_bug.cgi?id=15573 */
1903 ++ { "C300-CTFDDAC128MAG", "0001", ATA_HORKAGE_NONCQ, },
1904 ++
1905 + /* devices which puke on READ_NATIVE_MAX */
1906 + { "HDS724040KLSA80", "KFAOA20N", ATA_HORKAGE_BROKEN_HPA, },
1907 + { "WDC WD3200JD-00KLB0", "WD-WCAMR1130137", ATA_HORKAGE_BROKEN_HPA },
1908 +diff --git a/drivers/ata/pata_via.c b/drivers/ata/pata_via.c
1909 +index be7c395..ad64750 100644
1910 +--- a/drivers/ata/pata_via.c
1911 ++++ b/drivers/ata/pata_via.c
1912 +@@ -697,6 +697,7 @@ static const struct pci_device_id via[] = {
1913 + { PCI_VDEVICE(VIA, 0x3164), },
1914 + { PCI_VDEVICE(VIA, 0x5324), },
1915 + { PCI_VDEVICE(VIA, 0xC409), VIA_IDFLAG_SINGLE },
1916 ++ { PCI_VDEVICE(VIA, 0x9001), VIA_IDFLAG_SINGLE },
1917 +
1918 + { },
1919 + };
1920 +diff --git a/drivers/char/agp/intel-agp.c b/drivers/char/agp/intel-agp.c
1921 +index 3999a5f..8a713f1 100644
1922 +--- a/drivers/char/agp/intel-agp.c
1923 ++++ b/drivers/char/agp/intel-agp.c
1924 +@@ -8,6 +8,7 @@
1925 + #include <linux/kernel.h>
1926 + #include <linux/pagemap.h>
1927 + #include <linux/agp_backend.h>
1928 ++#include <asm/smp.h>
1929 + #include "agp.h"
1930 +
1931 + /*
1932 +@@ -815,12 +816,6 @@ static void intel_i830_setup_flush(void)
1933 + intel_i830_fini_flush();
1934 + }
1935 +
1936 +-static void
1937 +-do_wbinvd(void *null)
1938 +-{
1939 +- wbinvd();
1940 +-}
1941 +-
1942 + /* The chipset_flush interface needs to get data that has already been
1943 + * flushed out of the CPU all the way out to main memory, because the GPU
1944 + * doesn't snoop those buffers.
1945 +@@ -837,12 +832,10 @@ static void intel_i830_chipset_flush(struct agp_bridge_data *bridge)
1946 +
1947 + memset(pg, 0, 1024);
1948 +
1949 +- if (cpu_has_clflush) {
1950 ++ if (cpu_has_clflush)
1951 + clflush_cache_range(pg, 1024);
1952 +- } else {
1953 +- if (on_each_cpu(do_wbinvd, NULL, 1) != 0)
1954 +- printk(KERN_ERR "Timed out waiting for cache flush.\n");
1955 +- }
1956 ++ else if (wbinvd_on_all_cpus() != 0)
1957 ++ printk(KERN_ERR "Timed out waiting for cache flush.\n");
1958 + }
1959 +
1960 + /* The intel i830 automatically initializes the agp aperture during POST.
1961 +diff --git a/drivers/char/raw.c b/drivers/char/raw.c
1962 +index 64acd05..9abc3a1 100644
1963 +--- a/drivers/char/raw.c
1964 ++++ b/drivers/char/raw.c
1965 +@@ -247,6 +247,7 @@ static const struct file_operations raw_fops = {
1966 + .aio_read = generic_file_aio_read,
1967 + .write = do_sync_write,
1968 + .aio_write = blkdev_aio_write,
1969 ++ .fsync = block_fsync,
1970 + .open = raw_open,
1971 + .release= raw_release,
1972 + .ioctl = raw_ioctl,
1973 +diff --git a/drivers/char/tty_io.c b/drivers/char/tty_io.c
1974 +index dcb9083..76253cf 100644
1975 +--- a/drivers/char/tty_io.c
1976 ++++ b/drivers/char/tty_io.c
1977 +@@ -1423,6 +1423,8 @@ static void release_one_tty(struct work_struct *work)
1978 + list_del_init(&tty->tty_files);
1979 + file_list_unlock();
1980 +
1981 ++ put_pid(tty->pgrp);
1982 ++ put_pid(tty->session);
1983 + free_tty_struct(tty);
1984 + }
1985 +
1986 +diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
1987 +index 7d0f00a..99907c3 100644
1988 +--- a/drivers/gpu/drm/drm_crtc_helper.c
1989 ++++ b/drivers/gpu/drm/drm_crtc_helper.c
1990 +@@ -104,6 +104,7 @@ int drm_helper_probe_single_connector_modes(struct drm_connector *connector,
1991 + if (connector->status == connector_status_disconnected) {
1992 + DRM_DEBUG_KMS("%s is disconnected\n",
1993 + drm_get_connector_name(connector));
1994 ++ drm_mode_connector_update_edid_property(connector, NULL);
1995 + goto prune;
1996 + }
1997 +
1998 +diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
1999 +index ab6c973..bfd0e4a 100644
2000 +--- a/drivers/gpu/drm/drm_edid.c
2001 ++++ b/drivers/gpu/drm/drm_edid.c
2002 +@@ -85,6 +85,8 @@ static struct edid_quirk {
2003 +
2004 + /* Envision Peripherals, Inc. EN-7100e */
2005 + { "EPI", 59264, EDID_QUIRK_135_CLOCK_TOO_HIGH },
2006 ++ /* Envision EN2028 */
2007 ++ { "EPI", 8232, EDID_QUIRK_PREFER_LARGE_60 },
2008 +
2009 + /* Funai Electronics PM36B */
2010 + { "FCM", 13600, EDID_QUIRK_PREFER_LARGE_75 |
2011 +@@ -707,15 +709,6 @@ static struct drm_display_mode *drm_mode_detailed(struct drm_device *dev,
2012 + mode->vsync_end = mode->vsync_start + vsync_pulse_width;
2013 + mode->vtotal = mode->vdisplay + vblank;
2014 +
2015 +- /* perform the basic check for the detailed timing */
2016 +- if (mode->hsync_end > mode->htotal ||
2017 +- mode->vsync_end > mode->vtotal) {
2018 +- drm_mode_destroy(dev, mode);
2019 +- DRM_DEBUG_KMS("Incorrect detailed timing. "
2020 +- "Sync is beyond the blank.\n");
2021 +- return NULL;
2022 +- }
2023 +-
2024 + /* Some EDIDs have bogus h/vtotal values */
2025 + if (mode->hsync_end > mode->htotal)
2026 + mode->htotal = mode->hsync_end + 1;
2027 +diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
2028 +index 08d14df..4804872 100644
2029 +--- a/drivers/gpu/drm/drm_fops.c
2030 ++++ b/drivers/gpu/drm/drm_fops.c
2031 +@@ -140,14 +140,16 @@ int drm_open(struct inode *inode, struct file *filp)
2032 + spin_unlock(&dev->count_lock);
2033 + }
2034 + out:
2035 +- mutex_lock(&dev->struct_mutex);
2036 +- if (minor->type == DRM_MINOR_LEGACY) {
2037 +- BUG_ON((dev->dev_mapping != NULL) &&
2038 +- (dev->dev_mapping != inode->i_mapping));
2039 +- if (dev->dev_mapping == NULL)
2040 +- dev->dev_mapping = inode->i_mapping;
2041 ++ if (!retcode) {
2042 ++ mutex_lock(&dev->struct_mutex);
2043 ++ if (minor->type == DRM_MINOR_LEGACY) {
2044 ++ if (dev->dev_mapping == NULL)
2045 ++ dev->dev_mapping = inode->i_mapping;
2046 ++ else if (dev->dev_mapping != inode->i_mapping)
2047 ++ retcode = -ENODEV;
2048 ++ }
2049 ++ mutex_unlock(&dev->struct_mutex);
2050 + }
2051 +- mutex_unlock(&dev->struct_mutex);
2052 +
2053 + return retcode;
2054 + }
2055 +diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
2056 +index 93031a7..1238bc9 100644
2057 +--- a/drivers/gpu/drm/i915/intel_lvds.c
2058 ++++ b/drivers/gpu/drm/i915/intel_lvds.c
2059 +@@ -899,6 +899,14 @@ static const struct dmi_system_id intel_no_lvds[] = {
2060 + DMI_MATCH(DMI_PRODUCT_VERSION, "AO00001JW"),
2061 + },
2062 + },
2063 ++ {
2064 ++ .callback = intel_no_lvds_dmi_callback,
2065 ++ .ident = "Clientron U800",
2066 ++ .matches = {
2067 ++ DMI_MATCH(DMI_SYS_VENDOR, "Clientron"),
2068 ++ DMI_MATCH(DMI_PRODUCT_NAME, "U800"),
2069 ++ },
2070 ++ },
2071 +
2072 + { } /* terminating entry */
2073 + };
2074 +diff --git a/drivers/gpu/drm/radeon/atom.c b/drivers/gpu/drm/radeon/atom.c
2075 +index d75788f..b1f929d 100644
2076 +--- a/drivers/gpu/drm/radeon/atom.c
2077 ++++ b/drivers/gpu/drm/radeon/atom.c
2078 +@@ -881,11 +881,16 @@ static void atom_op_shl(atom_exec_context *ctx, int *ptr, int arg)
2079 + uint8_t attr = U8((*ptr)++), shift;
2080 + uint32_t saved, dst;
2081 + int dptr = *ptr;
2082 ++ uint32_t dst_align = atom_dst_to_src[(attr >> 3) & 7][(attr >> 6) & 3];
2083 + SDEBUG(" dst: ");
2084 + dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
2085 ++ /* op needs to full dst value */
2086 ++ dst = saved;
2087 + shift = atom_get_src(ctx, attr, ptr);
2088 + SDEBUG(" shift: %d\n", shift);
2089 + dst <<= shift;
2090 ++ dst &= atom_arg_mask[dst_align];
2091 ++ dst >>= atom_arg_shift[dst_align];
2092 + SDEBUG(" dst: ");
2093 + atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
2094 + }
2095 +@@ -895,11 +900,16 @@ static void atom_op_shr(atom_exec_context *ctx, int *ptr, int arg)
2096 + uint8_t attr = U8((*ptr)++), shift;
2097 + uint32_t saved, dst;
2098 + int dptr = *ptr;
2099 ++ uint32_t dst_align = atom_dst_to_src[(attr >> 3) & 7][(attr >> 6) & 3];
2100 + SDEBUG(" dst: ");
2101 + dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
2102 ++ /* op needs to full dst value */
2103 ++ dst = saved;
2104 + shift = atom_get_src(ctx, attr, ptr);
2105 + SDEBUG(" shift: %d\n", shift);
2106 + dst >>= shift;
2107 ++ dst &= atom_arg_mask[dst_align];
2108 ++ dst >>= atom_arg_shift[dst_align];
2109 + SDEBUG(" dst: ");
2110 + atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
2111 + }
2112 +diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c
2113 +index 43b55a0..5bdfaf2 100644
2114 +--- a/drivers/gpu/drm/radeon/r300.c
2115 ++++ b/drivers/gpu/drm/radeon/r300.c
2116 +@@ -364,11 +364,12 @@ void r300_gpu_init(struct radeon_device *rdev)
2117 +
2118 + r100_hdp_reset(rdev);
2119 + /* FIXME: rv380 one pipes ? */
2120 +- if ((rdev->family == CHIP_R300) || (rdev->family == CHIP_R350)) {
2121 ++ if ((rdev->family == CHIP_R300 && rdev->pdev->device != 0x4144) ||
2122 ++ (rdev->family == CHIP_R350)) {
2123 + /* r300,r350 */
2124 + rdev->num_gb_pipes = 2;
2125 + } else {
2126 +- /* rv350,rv370,rv380 */
2127 ++ /* rv350,rv370,rv380,r300 AD */
2128 + rdev->num_gb_pipes = 1;
2129 + }
2130 + rdev->num_z_pipes = 1;
2131 +diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c
2132 +index e7b1944..81b832e 100644
2133 +--- a/drivers/gpu/drm/radeon/radeon_combios.c
2134 ++++ b/drivers/gpu/drm/radeon/radeon_combios.c
2135 +@@ -670,7 +670,9 @@ struct radeon_encoder_primary_dac *radeon_combios_get_primary_dac_info(struct
2136 + dac = RBIOS8(dac_info + 0x3) & 0xf;
2137 + p_dac->ps2_pdac_adj = (bg << 8) | (dac);
2138 + }
2139 +- found = 1;
2140 ++ /* if the values are all zeros, use the table */
2141 ++ if (p_dac->ps2_pdac_adj)
2142 ++ found = 1;
2143 + }
2144 +
2145 + out:
2146 +@@ -812,7 +814,9 @@ struct radeon_encoder_tv_dac *radeon_combios_get_tv_dac_info(struct
2147 + bg = RBIOS8(dac_info + 0x10) & 0xf;
2148 + dac = RBIOS8(dac_info + 0x11) & 0xf;
2149 + tv_dac->ntsc_tvdac_adj = (bg << 16) | (dac << 20);
2150 +- found = 1;
2151 ++ /* if the values are all zeros, use the table */
2152 ++ if (tv_dac->ps2_tvdac_adj)
2153 ++ found = 1;
2154 + } else if (rev > 1) {
2155 + bg = RBIOS8(dac_info + 0xc) & 0xf;
2156 + dac = (RBIOS8(dac_info + 0xc) >> 4) & 0xf;
2157 +@@ -825,7 +829,9 @@ struct radeon_encoder_tv_dac *radeon_combios_get_tv_dac_info(struct
2158 + bg = RBIOS8(dac_info + 0xe) & 0xf;
2159 + dac = (RBIOS8(dac_info + 0xe) >> 4) & 0xf;
2160 + tv_dac->ntsc_tvdac_adj = (bg << 16) | (dac << 20);
2161 +- found = 1;
2162 ++ /* if the values are all zeros, use the table */
2163 ++ if (tv_dac->ps2_tvdac_adj)
2164 ++ found = 1;
2165 + }
2166 + tv_dac->tv_std = radeon_combios_get_tv_info(rdev);
2167 + }
2168 +@@ -842,7 +848,9 @@ struct radeon_encoder_tv_dac *radeon_combios_get_tv_dac_info(struct
2169 + (bg << 16) | (dac << 20);
2170 + tv_dac->pal_tvdac_adj = tv_dac->ps2_tvdac_adj;
2171 + tv_dac->ntsc_tvdac_adj = tv_dac->ps2_tvdac_adj;
2172 +- found = 1;
2173 ++ /* if the values are all zeros, use the table */
2174 ++ if (tv_dac->ps2_tvdac_adj)
2175 ++ found = 1;
2176 + } else {
2177 + bg = RBIOS8(dac_info + 0x4) & 0xf;
2178 + dac = RBIOS8(dac_info + 0x5) & 0xf;
2179 +@@ -850,7 +858,9 @@ struct radeon_encoder_tv_dac *radeon_combios_get_tv_dac_info(struct
2180 + (bg << 16) | (dac << 20);
2181 + tv_dac->pal_tvdac_adj = tv_dac->ps2_tvdac_adj;
2182 + tv_dac->ntsc_tvdac_adj = tv_dac->ps2_tvdac_adj;
2183 +- found = 1;
2184 ++ /* if the values are all zeros, use the table */
2185 ++ if (tv_dac->ps2_tvdac_adj)
2186 ++ found = 1;
2187 + }
2188 + } else {
2189 + DRM_INFO("No TV DAC info found in BIOS\n");
2190 +diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
2191 +index 65f8194..2bdfbcd 100644
2192 +--- a/drivers/gpu/drm/radeon/radeon_connectors.c
2193 ++++ b/drivers/gpu/drm/radeon/radeon_connectors.c
2194 +@@ -162,12 +162,14 @@ radeon_connector_analog_encoder_conflict_solve(struct drm_connector *connector,
2195 + {
2196 + struct drm_device *dev = connector->dev;
2197 + struct drm_connector *conflict;
2198 ++ struct radeon_connector *radeon_conflict;
2199 + int i;
2200 +
2201 + list_for_each_entry(conflict, &dev->mode_config.connector_list, head) {
2202 + if (conflict == connector)
2203 + continue;
2204 +
2205 ++ radeon_conflict = to_radeon_connector(conflict);
2206 + for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
2207 + if (conflict->encoder_ids[i] == 0)
2208 + break;
2209 +@@ -177,6 +179,9 @@ radeon_connector_analog_encoder_conflict_solve(struct drm_connector *connector,
2210 + if (conflict->status != connector_status_connected)
2211 + continue;
2212 +
2213 ++ if (radeon_conflict->use_digital)
2214 ++ continue;
2215 ++
2216 + if (priority == true) {
2217 + DRM_INFO("1: conflicting encoders switching off %s\n", drm_get_connector_name(conflict));
2218 + DRM_INFO("in favor of %s\n", drm_get_connector_name(connector));
2219 +@@ -315,7 +320,7 @@ int radeon_connector_set_property(struct drm_connector *connector, struct drm_pr
2220 + radeon_encoder = to_radeon_encoder(encoder);
2221 + if (!radeon_encoder->enc_priv)
2222 + return 0;
2223 +- if (rdev->is_atom_bios) {
2224 ++ if (ASIC_IS_AVIVO(rdev) || radeon_r4xx_atom) {
2225 + struct radeon_encoder_atom_dac *dac_int;
2226 + dac_int = radeon_encoder->enc_priv;
2227 + dac_int->tv_std = val;
2228 +diff --git a/drivers/gpu/drm/radeon/radeon_cp.c b/drivers/gpu/drm/radeon/radeon_cp.c
2229 +index 06123ba..f129bbb 100644
2230 +--- a/drivers/gpu/drm/radeon/radeon_cp.c
2231 ++++ b/drivers/gpu/drm/radeon/radeon_cp.c
2232 +@@ -417,8 +417,9 @@ static int radeon_do_wait_for_idle(drm_radeon_private_t * dev_priv)
2233 + return -EBUSY;
2234 + }
2235 +
2236 +-static void radeon_init_pipes(drm_radeon_private_t *dev_priv)
2237 ++static void radeon_init_pipes(struct drm_device *dev)
2238 + {
2239 ++ drm_radeon_private_t *dev_priv = dev->dev_private;
2240 + uint32_t gb_tile_config, gb_pipe_sel = 0;
2241 +
2242 + if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV530) {
2243 +@@ -436,11 +437,12 @@ static void radeon_init_pipes(drm_radeon_private_t *dev_priv)
2244 + dev_priv->num_gb_pipes = ((gb_pipe_sel >> 12) & 0x3) + 1;
2245 + } else {
2246 + /* R3xx */
2247 +- if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R300) ||
2248 ++ if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R300 &&
2249 ++ dev->pdev->device != 0x4144) ||
2250 + ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R350)) {
2251 + dev_priv->num_gb_pipes = 2;
2252 + } else {
2253 +- /* R3Vxx */
2254 ++ /* RV3xx/R300 AD */
2255 + dev_priv->num_gb_pipes = 1;
2256 + }
2257 + }
2258 +@@ -736,7 +738,7 @@ static int radeon_do_engine_reset(struct drm_device * dev)
2259 +
2260 + /* setup the raster pipes */
2261 + if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R300)
2262 +- radeon_init_pipes(dev_priv);
2263 ++ radeon_init_pipes(dev);
2264 +
2265 + /* Reset the CP ring */
2266 + radeon_do_cp_reset(dev_priv);
2267 +diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
2268 +index e9d0850..9933c2c 100644
2269 +--- a/drivers/gpu/drm/radeon/radeon_cs.c
2270 ++++ b/drivers/gpu/drm/radeon/radeon_cs.c
2271 +@@ -193,11 +193,13 @@ static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error)
2272 + radeon_bo_list_fence(&parser->validated, parser->ib->fence);
2273 + }
2274 + radeon_bo_list_unreserve(&parser->validated);
2275 +- for (i = 0; i < parser->nrelocs; i++) {
2276 +- if (parser->relocs[i].gobj) {
2277 +- mutex_lock(&parser->rdev->ddev->struct_mutex);
2278 +- drm_gem_object_unreference(parser->relocs[i].gobj);
2279 +- mutex_unlock(&parser->rdev->ddev->struct_mutex);
2280 ++ if (parser->relocs != NULL) {
2281 ++ for (i = 0; i < parser->nrelocs; i++) {
2282 ++ if (parser->relocs[i].gobj) {
2283 ++ mutex_lock(&parser->rdev->ddev->struct_mutex);
2284 ++ drm_gem_object_unreference(parser->relocs[i].gobj);
2285 ++ mutex_unlock(&parser->rdev->ddev->struct_mutex);
2286 ++ }
2287 + }
2288 + }
2289 + kfree(parser->track);
2290 +@@ -246,7 +248,8 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
2291 + }
2292 + r = radeon_cs_parser_relocs(&parser);
2293 + if (r) {
2294 +- DRM_ERROR("Failed to parse relocation !\n");
2295 ++ if (r != -ERESTARTSYS)
2296 ++ DRM_ERROR("Failed to parse relocation %d!\n", r);
2297 + radeon_cs_parser_fini(&parser, r);
2298 + mutex_unlock(&rdev->cs_mutex);
2299 + return r;
2300 +diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
2301 +index 768b150..509ba3f 100644
2302 +--- a/drivers/gpu/drm/radeon/radeon_device.c
2303 ++++ b/drivers/gpu/drm/radeon/radeon_device.c
2304 +@@ -655,6 +655,14 @@ int radeon_device_init(struct radeon_device *rdev,
2305 + return r;
2306 + radeon_check_arguments(rdev);
2307 +
2308 ++ /* all of the newer IGP chips have an internal gart
2309 ++ * However some rs4xx report as AGP, so remove that here.
2310 ++ */
2311 ++ if ((rdev->family >= CHIP_RS400) &&
2312 ++ (rdev->flags & RADEON_IS_IGP)) {
2313 ++ rdev->flags &= ~RADEON_IS_AGP;
2314 ++ }
2315 ++
2316 + if (rdev->flags & RADEON_IS_AGP && radeon_agpmode == -1) {
2317 + radeon_agp_disable(rdev);
2318 + }
2319 +diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c
2320 +index 3c91724..7626bd5 100644
2321 +--- a/drivers/gpu/drm/radeon/radeon_encoders.c
2322 ++++ b/drivers/gpu/drm/radeon/radeon_encoders.c
2323 +@@ -1276,8 +1276,12 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder,
2324 + case ENCODER_OBJECT_ID_INTERNAL_DAC2:
2325 + case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
2326 + atombios_dac_setup(encoder, ATOM_ENABLE);
2327 +- if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT))
2328 +- atombios_tv_setup(encoder, ATOM_ENABLE);
2329 ++ if (radeon_encoder->devices & (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT)) {
2330 ++ if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT))
2331 ++ atombios_tv_setup(encoder, ATOM_ENABLE);
2332 ++ else
2333 ++ atombios_tv_setup(encoder, ATOM_DISABLE);
2334 ++ }
2335 + break;
2336 + }
2337 + atombios_apply_encoder_quirks(encoder, adjusted_mode);
2338 +diff --git a/drivers/gpu/drm/radeon/radeon_legacy_tv.c b/drivers/gpu/drm/radeon/radeon_legacy_tv.c
2339 +index 417684d..f2ed27c 100644
2340 +--- a/drivers/gpu/drm/radeon/radeon_legacy_tv.c
2341 ++++ b/drivers/gpu/drm/radeon/radeon_legacy_tv.c
2342 +@@ -57,6 +57,10 @@
2343 + #define NTSC_TV_PLL_N_14 693
2344 + #define NTSC_TV_PLL_P_14 7
2345 +
2346 ++#define PAL_TV_PLL_M_14 19
2347 ++#define PAL_TV_PLL_N_14 353
2348 ++#define PAL_TV_PLL_P_14 5
2349 ++
2350 + #define VERT_LEAD_IN_LINES 2
2351 + #define FRAC_BITS 0xe
2352 + #define FRAC_MASK 0x3fff
2353 +@@ -205,9 +209,24 @@ static const struct radeon_tv_mode_constants available_tv_modes[] = {
2354 + 630627, /* defRestart */
2355 + 347, /* crtcPLL_N */
2356 + 14, /* crtcPLL_M */
2357 +- 8, /* crtcPLL_postDiv */
2358 ++ 8, /* crtcPLL_postDiv */
2359 + 1022, /* pixToTV */
2360 + },
2361 ++ { /* PAL timing for 14 Mhz ref clk */
2362 ++ 800, /* horResolution */
2363 ++ 600, /* verResolution */
2364 ++ TV_STD_PAL, /* standard */
2365 ++ 1131, /* horTotal */
2366 ++ 742, /* verTotal */
2367 ++ 813, /* horStart */
2368 ++ 840, /* horSyncStart */
2369 ++ 633, /* verSyncStart */
2370 ++ 708369, /* defRestart */
2371 ++ 211, /* crtcPLL_N */
2372 ++ 9, /* crtcPLL_M */
2373 ++ 8, /* crtcPLL_postDiv */
2374 ++ 759, /* pixToTV */
2375 ++ },
2376 + };
2377 +
2378 + #define N_AVAILABLE_MODES ARRAY_SIZE(available_tv_modes)
2379 +@@ -242,7 +261,7 @@ static const struct radeon_tv_mode_constants *radeon_legacy_tv_get_std_mode(stru
2380 + if (pll->reference_freq == 2700)
2381 + const_ptr = &available_tv_modes[1];
2382 + else
2383 +- const_ptr = &available_tv_modes[1]; /* FIX ME */
2384 ++ const_ptr = &available_tv_modes[3];
2385 + }
2386 + return const_ptr;
2387 + }
2388 +@@ -685,9 +704,9 @@ void radeon_legacy_tv_mode_set(struct drm_encoder *encoder,
2389 + n = PAL_TV_PLL_N_27;
2390 + p = PAL_TV_PLL_P_27;
2391 + } else {
2392 +- m = PAL_TV_PLL_M_27;
2393 +- n = PAL_TV_PLL_N_27;
2394 +- p = PAL_TV_PLL_P_27;
2395 ++ m = PAL_TV_PLL_M_14;
2396 ++ n = PAL_TV_PLL_N_14;
2397 ++ p = PAL_TV_PLL_P_14;
2398 + }
2399 + }
2400 +
2401 +diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c
2402 +index c381856..a27c09f 100644
2403 +--- a/drivers/gpu/drm/radeon/rs600.c
2404 ++++ b/drivers/gpu/drm/radeon/rs600.c
2405 +@@ -175,7 +175,7 @@ void rs600_gart_tlb_flush(struct radeon_device *rdev)
2406 + WREG32_MC(R_000100_MC_PT0_CNTL, tmp);
2407 +
2408 + tmp = RREG32_MC(R_000100_MC_PT0_CNTL);
2409 +- tmp |= S_000100_INVALIDATE_ALL_L1_TLBS(1) & S_000100_INVALIDATE_L2_CACHE(1);
2410 ++ tmp |= S_000100_INVALIDATE_ALL_L1_TLBS(1) | S_000100_INVALIDATE_L2_CACHE(1);
2411 + WREG32_MC(R_000100_MC_PT0_CNTL, tmp);
2412 +
2413 + tmp = RREG32_MC(R_000100_MC_PT0_CNTL);
2414 +diff --git a/drivers/hid/hid-gyration.c b/drivers/hid/hid-gyration.c
2415 +index cab13e8..62416e6 100644
2416 +--- a/drivers/hid/hid-gyration.c
2417 ++++ b/drivers/hid/hid-gyration.c
2418 +@@ -53,10 +53,13 @@ static int gyration_input_mapping(struct hid_device *hdev, struct hid_input *hi,
2419 + static int gyration_event(struct hid_device *hdev, struct hid_field *field,
2420 + struct hid_usage *usage, __s32 value)
2421 + {
2422 +- struct input_dev *input = field->hidinput->input;
2423 ++
2424 ++ if (!(hdev->claimed & HID_CLAIMED_INPUT) || !field->hidinput)
2425 ++ return 0;
2426 +
2427 + if ((usage->hid & HID_USAGE_PAGE) == HID_UP_GENDESK &&
2428 + (usage->hid & 0xff) == 0x82) {
2429 ++ struct input_dev *input = field->hidinput->input;
2430 + input_event(input, usage->type, usage->code, 1);
2431 + input_sync(input);
2432 + input_event(input, usage->type, usage->code, 0);
2433 +diff --git a/drivers/hwmon/sht15.c b/drivers/hwmon/sht15.c
2434 +index 864a371..fbc997e 100644
2435 +--- a/drivers/hwmon/sht15.c
2436 ++++ b/drivers/hwmon/sht15.c
2437 +@@ -302,13 +302,13 @@ error_ret:
2438 + **/
2439 + static inline int sht15_calc_temp(struct sht15_data *data)
2440 + {
2441 +- int d1 = 0;
2442 ++ int d1 = temppoints[0].d1;
2443 + int i;
2444 +
2445 +- for (i = 1; i < ARRAY_SIZE(temppoints); i++)
2446 ++ for (i = ARRAY_SIZE(temppoints) - 1; i > 0; i--)
2447 + /* Find pointer to interpolate */
2448 + if (data->supply_uV > temppoints[i - 1].vdd) {
2449 +- d1 = (data->supply_uV/1000 - temppoints[i - 1].vdd)
2450 ++ d1 = (data->supply_uV - temppoints[i - 1].vdd)
2451 + * (temppoints[i].d1 - temppoints[i - 1].d1)
2452 + / (temppoints[i].vdd - temppoints[i - 1].vdd)
2453 + + temppoints[i - 1].d1;
2454 +@@ -541,7 +541,12 @@ static int __devinit sht15_probe(struct platform_device *pdev)
2455 + /* If a regulator is available, query what the supply voltage actually is!*/
2456 + data->reg = regulator_get(data->dev, "vcc");
2457 + if (!IS_ERR(data->reg)) {
2458 +- data->supply_uV = regulator_get_voltage(data->reg);
2459 ++ int voltage;
2460 ++
2461 ++ voltage = regulator_get_voltage(data->reg);
2462 ++ if (voltage)
2463 ++ data->supply_uV = voltage;
2464 ++
2465 + regulator_enable(data->reg);
2466 + /* setup a notifier block to update this if another device
2467 + * causes the voltage to change */
2468 +diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
2469 +index 5f318ce..cb9f95c 100644
2470 +--- a/drivers/i2c/busses/Kconfig
2471 ++++ b/drivers/i2c/busses/Kconfig
2472 +@@ -77,7 +77,7 @@ config I2C_AMD8111
2473 + will be called i2c-amd8111.
2474 +
2475 + config I2C_I801
2476 +- tristate "Intel 82801 (ICH)"
2477 ++ tristate "Intel 82801 (ICH/PCH)"
2478 + depends on PCI
2479 + help
2480 + If you say yes to this option, support will be included for the Intel
2481 +@@ -97,7 +97,8 @@ config I2C_I801
2482 + ICH9
2483 + Tolapai
2484 + ICH10
2485 +- PCH
2486 ++ 3400/5 Series (PCH)
2487 ++ Cougar Point (PCH)
2488 +
2489 + This driver can also be built as a module. If so, the module
2490 + will be called i2c-i801.
2491 +diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
2492 +index 5574be2..e361da7 100644
2493 +--- a/drivers/i2c/busses/i2c-i801.c
2494 ++++ b/drivers/i2c/busses/i2c-i801.c
2495 +@@ -41,7 +41,8 @@
2496 + Tolapai 0x5032 32 hard yes yes yes
2497 + ICH10 0x3a30 32 hard yes yes yes
2498 + ICH10 0x3a60 32 hard yes yes yes
2499 +- PCH 0x3b30 32 hard yes yes yes
2500 ++ 3400/5 Series (PCH) 0x3b30 32 hard yes yes yes
2501 ++ Cougar Point (PCH) 0x1c22 32 hard yes yes yes
2502 +
2503 + Features supported by this driver:
2504 + Software PEC no
2505 +@@ -580,6 +581,7 @@ static struct pci_device_id i801_ids[] = {
2506 + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH10_4) },
2507 + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH10_5) },
2508 + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PCH_SMBUS) },
2509 ++ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CPT_SMBUS) },
2510 + { 0, }
2511 + };
2512 +
2513 +@@ -709,6 +711,7 @@ static int __devinit i801_probe(struct pci_dev *dev, const struct pci_device_id
2514 + case PCI_DEVICE_ID_INTEL_ICH10_4:
2515 + case PCI_DEVICE_ID_INTEL_ICH10_5:
2516 + case PCI_DEVICE_ID_INTEL_PCH_SMBUS:
2517 ++ case PCI_DEVICE_ID_INTEL_CPT_SMBUS:
2518 + i801_features |= FEATURE_I2C_BLOCK_READ;
2519 + /* fall through */
2520 + case PCI_DEVICE_ID_INTEL_82801DB_3:
2521 +diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
2522 +index 30bdf42..f8302c2 100644
2523 +--- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c
2524 ++++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
2525 +@@ -752,6 +752,8 @@ void ipoib_cm_send(struct net_device *dev, struct sk_buff *skb, struct ipoib_cm_
2526 + if (++priv->tx_outstanding == ipoib_sendq_size) {
2527 + ipoib_dbg(priv, "TX ring 0x%x full, stopping kernel net queue\n",
2528 + tx->qp->qp_num);
2529 ++ if (ib_req_notify_cq(priv->send_cq, IB_CQ_NEXT_COMP))
2530 ++ ipoib_warn(priv, "request notify on send CQ failed\n");
2531 + netif_stop_queue(dev);
2532 + }
2533 + }
2534 +diff --git a/drivers/input/sparse-keymap.c b/drivers/input/sparse-keymap.c
2535 +index fbd3987..e8d65b3 100644
2536 +--- a/drivers/input/sparse-keymap.c
2537 ++++ b/drivers/input/sparse-keymap.c
2538 +@@ -161,7 +161,7 @@ int sparse_keymap_setup(struct input_dev *dev,
2539 + return 0;
2540 +
2541 + err_out:
2542 +- kfree(keymap);
2543 ++ kfree(map);
2544 + return error;
2545 +
2546 + }
2547 +diff --git a/drivers/input/tablet/wacom_sys.c b/drivers/input/tablet/wacom_sys.c
2548 +index 072f33b..e53ddc5 100644
2549 +--- a/drivers/input/tablet/wacom_sys.c
2550 ++++ b/drivers/input/tablet/wacom_sys.c
2551 +@@ -644,13 +644,15 @@ static int wacom_resume(struct usb_interface *intf)
2552 + int rv;
2553 +
2554 + mutex_lock(&wacom->lock);
2555 +- if (wacom->open) {
2556 ++
2557 ++ /* switch to wacom mode first */