Gentoo Archives: gentoo-commits

From: "Mike Pagano (mpagano)" <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] linux-patches r1663 - genpatches-2.6/trunk/2.6.32
Date: Tue, 09 Feb 2010 15:21:24
Message-Id: E1Nerte-0005ka-Iw@stork.gentoo.org
1 Author: mpagano
2 Date: 2010-02-09 15:21:02 +0000 (Tue, 09 Feb 2010)
3 New Revision: 1663
4
5 Added:
6 genpatches-2.6/trunk/2.6.32/1007_linux-2.6.32.8.patch
7 Modified:
8 genpatches-2.6/trunk/2.6.32/0000_README
9 Log:
10 Linux patch 2.6.32.8
11
12 Modified: genpatches-2.6/trunk/2.6.32/0000_README
13 ===================================================================
14 --- genpatches-2.6/trunk/2.6.32/0000_README 2010-02-03 13:38:20 UTC (rev 1662)
15 +++ genpatches-2.6/trunk/2.6.32/0000_README 2010-02-09 15:21:02 UTC (rev 1663)
16 @@ -67,6 +67,10 @@
17 From: http://www.kernel.org
18 Desc: Linux 2.6.32.7
19
20 +Patch: 1007_linux-2.6.32.8.patch
21 +From: http://www.kernel.org
22 +Desc: Linux 2.6.32.8
23 +
24 Patch: 1900_EFI-GPT-header-read-fix.patch
25 From: http://bugs.gentoo.org/show_bug.cgi?id=296915
26 Desc: Read whole sector with EFI GPT header
27
28 Added: genpatches-2.6/trunk/2.6.32/1007_linux-2.6.32.8.patch
29 ===================================================================
30 --- genpatches-2.6/trunk/2.6.32/1007_linux-2.6.32.8.patch (rev 0)
31 +++ genpatches-2.6/trunk/2.6.32/1007_linux-2.6.32.8.patch 2010-02-09 15:21:02 UTC (rev 1663)
32 @@ -0,0 +1,4704 @@
33 +diff --git a/Documentation/kvm/api.txt b/Documentation/kvm/api.txt
34 +index 5a4bc8c..db3a706 100644
35 +--- a/Documentation/kvm/api.txt
36 ++++ b/Documentation/kvm/api.txt
37 +@@ -593,6 +593,42 @@ struct kvm_irqchip {
38 + } chip;
39 + };
40 +
41 ++4.27 KVM_GET_CLOCK
42 ++
43 ++Capability: KVM_CAP_ADJUST_CLOCK
44 ++Architectures: x86
45 ++Type: vm ioctl
46 ++Parameters: struct kvm_clock_data (out)
47 ++Returns: 0 on success, -1 on error
48 ++
49 ++Gets the current timestamp of kvmclock as seen by the current guest. In
50 ++conjunction with KVM_SET_CLOCK, it is used to ensure monotonicity on scenarios
51 ++such as migration.
52 ++
53 ++struct kvm_clock_data {
54 ++ __u64 clock; /* kvmclock current value */
55 ++ __u32 flags;
56 ++ __u32 pad[9];
57 ++};
58 ++
59 ++4.28 KVM_SET_CLOCK
60 ++
61 ++Capability: KVM_CAP_ADJUST_CLOCK
62 ++Architectures: x86
63 ++Type: vm ioctl
64 ++Parameters: struct kvm_clock_data (in)
65 ++Returns: 0 on success, -1 on error
66 ++
67 ++Sets the current timestamp of kvmclock to the valued specific in its parameter.
68 ++In conjunction with KVM_GET_CLOCK, it is used to ensure monotonicity on scenarios
69 ++such as migration.
70 ++
71 ++struct kvm_clock_data {
72 ++ __u64 clock; /* kvmclock current value */
73 ++ __u32 flags;
74 ++ __u32 pad[9];
75 ++};
76 ++
77 + 5. The kvm_run structure
78 +
79 + Application code obtains a pointer to the kvm_run structure by
80 +diff --git a/arch/blackfin/include/asm/page.h b/arch/blackfin/include/asm/page.h
81 +index 944a07c..1d04e40 100644
82 +--- a/arch/blackfin/include/asm/page.h
83 ++++ b/arch/blackfin/include/asm/page.h
84 +@@ -10,4 +10,9 @@
85 + #include <asm-generic/page.h>
86 + #define MAP_NR(addr) (((unsigned long)(addr)-PAGE_OFFSET) >> PAGE_SHIFT)
87 +
88 ++#define VM_DATA_DEFAULT_FLAGS \
89 ++ (VM_READ | VM_WRITE | \
90 ++ ((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0 ) | \
91 ++ VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
92 ++
93 + #endif
94 +diff --git a/arch/frv/include/asm/page.h b/arch/frv/include/asm/page.h
95 +index 25c6a50..8c97068 100644
96 +--- a/arch/frv/include/asm/page.h
97 ++++ b/arch/frv/include/asm/page.h
98 +@@ -63,12 +63,10 @@ extern unsigned long max_pfn;
99 + #define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
100 +
101 +
102 +-#ifdef CONFIG_MMU
103 + #define VM_DATA_DEFAULT_FLAGS \
104 + (VM_READ | VM_WRITE | \
105 + ((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0 ) | \
106 + VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
107 +-#endif
108 +
109 + #endif /* __ASSEMBLY__ */
110 +
111 +diff --git a/arch/powerpc/include/asm/elf.h b/arch/powerpc/include/asm/elf.h
112 +index 014a624..5698502 100644
113 +--- a/arch/powerpc/include/asm/elf.h
114 ++++ b/arch/powerpc/include/asm/elf.h
115 +@@ -236,14 +236,10 @@ typedef elf_vrregset_t elf_fpxregset_t;
116 + #ifdef __powerpc64__
117 + # define SET_PERSONALITY(ex) \
118 + do { \
119 +- unsigned long new_flags = 0; \
120 + if ((ex).e_ident[EI_CLASS] == ELFCLASS32) \
121 +- new_flags = _TIF_32BIT; \
122 +- if ((current_thread_info()->flags & _TIF_32BIT) \
123 +- != new_flags) \
124 +- set_thread_flag(TIF_ABI_PENDING); \
125 ++ set_thread_flag(TIF_32BIT); \
126 + else \
127 +- clear_thread_flag(TIF_ABI_PENDING); \
128 ++ clear_thread_flag(TIF_32BIT); \
129 + if (personality(current->personality) != PER_LINUX32) \
130 + set_personality(PER_LINUX | \
131 + (current->personality & (~PER_MASK))); \
132 +diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h
133 +index c8b3292..aa9d383 100644
134 +--- a/arch/powerpc/include/asm/thread_info.h
135 ++++ b/arch/powerpc/include/asm/thread_info.h
136 +@@ -111,7 +111,6 @@ static inline struct thread_info *current_thread_info(void)
137 + #define TIF_NOTIFY_RESUME 13 /* callback before returning to user */
138 + #define TIF_FREEZE 14 /* Freezing for suspend */
139 + #define TIF_RUNLATCH 15 /* Is the runlatch enabled? */
140 +-#define TIF_ABI_PENDING 16 /* 32/64 bit switch needed */
141 +
142 + /* as above, but as bit values */
143 + #define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
144 +@@ -129,7 +128,6 @@ static inline struct thread_info *current_thread_info(void)
145 + #define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME)
146 + #define _TIF_FREEZE (1<<TIF_FREEZE)
147 + #define _TIF_RUNLATCH (1<<TIF_RUNLATCH)
148 +-#define _TIF_ABI_PENDING (1<<TIF_ABI_PENDING)
149 + #define _TIF_SYSCALL_T_OR_A (_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SECCOMP)
150 +
151 + #define _TIF_USER_WORK_MASK (_TIF_SIGPENDING | _TIF_NEED_RESCHED | \
152 +diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
153 +index c930ac3..7b816da 100644
154 +--- a/arch/powerpc/kernel/process.c
155 ++++ b/arch/powerpc/kernel/process.c
156 +@@ -554,18 +554,6 @@ void exit_thread(void)
157 +
158 + void flush_thread(void)
159 + {
160 +-#ifdef CONFIG_PPC64
161 +- struct thread_info *t = current_thread_info();
162 +-
163 +- if (test_ti_thread_flag(t, TIF_ABI_PENDING)) {
164 +- clear_ti_thread_flag(t, TIF_ABI_PENDING);
165 +- if (test_ti_thread_flag(t, TIF_32BIT))
166 +- clear_ti_thread_flag(t, TIF_32BIT);
167 +- else
168 +- set_ti_thread_flag(t, TIF_32BIT);
169 +- }
170 +-#endif
171 +-
172 + discard_lazy_cpu_state();
173 +
174 + if (current->thread.dabr) {
175 +diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
176 +index 48215d1..e8ef21c 100644
177 +--- a/arch/s390/kernel/entry.S
178 ++++ b/arch/s390/kernel/entry.S
179 +@@ -571,6 +571,7 @@ pgm_svcper:
180 + mvc __THREAD_per+__PER_access_id(1,%r8),__LC_PER_ACCESS_ID
181 + oi __TI_flags+3(%r9),_TIF_SINGLE_STEP # set TIF_SINGLE_STEP
182 + TRACE_IRQS_ON
183 ++ lm %r2,%r6,SP_R2(%r15) # load svc arguments
184 + stosm __SF_EMPTY(%r15),0x03 # reenable interrupts
185 + b BASED(sysc_do_svc)
186 +
187 +diff --git a/arch/s390/kernel/entry64.S b/arch/s390/kernel/entry64.S
188 +index 9aff1d4..f33658f 100644
189 +--- a/arch/s390/kernel/entry64.S
190 ++++ b/arch/s390/kernel/entry64.S
191 +@@ -549,6 +549,7 @@ pgm_svcper:
192 + mvc __THREAD_per+__PER_access_id(1,%r8),__LC_PER_ACCESS_ID
193 + oi __TI_flags+7(%r9),_TIF_SINGLE_STEP # set TIF_SINGLE_STEP
194 + TRACE_IRQS_ON
195 ++ lmg %r2,%r6,SP_R2(%r15) # load svc arguments
196 + stosm __SF_EMPTY(%r15),0x03 # reenable interrupts
197 + j sysc_do_svc
198 +
199 +diff --git a/arch/sh/kernel/process_64.c b/arch/sh/kernel/process_64.c
200 +index 1192398..44aa119 100644
201 +--- a/arch/sh/kernel/process_64.c
202 ++++ b/arch/sh/kernel/process_64.c
203 +@@ -367,7 +367,7 @@ void exit_thread(void)
204 + void flush_thread(void)
205 + {
206 +
207 +- /* Called by fs/exec.c (flush_old_exec) to remove traces of a
208 ++ /* Called by fs/exec.c (setup_new_exec) to remove traces of a
209 + * previously running executable. */
210 + #ifdef CONFIG_SH_FPU
211 + if (last_task_used_math == current) {
212 +diff --git a/arch/sparc/include/asm/elf_64.h b/arch/sparc/include/asm/elf_64.h
213 +index d42e393..9968085 100644
214 +--- a/arch/sparc/include/asm/elf_64.h
215 ++++ b/arch/sparc/include/asm/elf_64.h
216 +@@ -196,17 +196,10 @@ static inline unsigned int sparc64_elf_hwcap(void)
217 + #define ELF_PLATFORM (NULL)
218 +
219 + #define SET_PERSONALITY(ex) \
220 +-do { unsigned long new_flags = current_thread_info()->flags; \
221 +- new_flags &= _TIF_32BIT; \
222 +- if ((ex).e_ident[EI_CLASS] == ELFCLASS32) \
223 +- new_flags |= _TIF_32BIT; \
224 ++do { if ((ex).e_ident[EI_CLASS] == ELFCLASS32) \
225 ++ set_thread_flag(TIF_32BIT); \
226 + else \
227 +- new_flags &= ~_TIF_32BIT; \
228 +- if ((current_thread_info()->flags & _TIF_32BIT) \
229 +- != new_flags) \
230 +- set_thread_flag(TIF_ABI_PENDING); \
231 +- else \
232 +- clear_thread_flag(TIF_ABI_PENDING); \
233 ++ clear_thread_flag(TIF_32BIT); \
234 + /* flush_thread will update pgd cache */ \
235 + if (personality(current->personality) != PER_LINUX32) \
236 + set_personality(PER_LINUX | \
237 +diff --git a/arch/sparc/include/asm/thread_info_64.h b/arch/sparc/include/asm/thread_info_64.h
238 +index 1b45a7b..f78ad9a 100644
239 +--- a/arch/sparc/include/asm/thread_info_64.h
240 ++++ b/arch/sparc/include/asm/thread_info_64.h
241 +@@ -227,12 +227,11 @@ register struct thread_info *current_thread_info_reg asm("g6");
242 + /* flag bit 8 is available */
243 + #define TIF_SECCOMP 9 /* secure computing */
244 + #define TIF_SYSCALL_AUDIT 10 /* syscall auditing active */
245 +-/* flag bit 11 is available */
246 + /* NOTE: Thread flags >= 12 should be ones we have no interest
247 + * in using in assembly, else we can't use the mask as
248 + * an immediate value in instructions such as andcc.
249 + */
250 +-#define TIF_ABI_PENDING 12
251 ++/* flag bit 12 is available */
252 + #define TIF_MEMDIE 13
253 + #define TIF_POLLING_NRFLAG 14
254 + #define TIF_FREEZE 15 /* is freezing for suspend */
255 +@@ -246,7 +245,6 @@ register struct thread_info *current_thread_info_reg asm("g6");
256 + #define _TIF_32BIT (1<<TIF_32BIT)
257 + #define _TIF_SECCOMP (1<<TIF_SECCOMP)
258 + #define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT)
259 +-#define _TIF_ABI_PENDING (1<<TIF_ABI_PENDING)
260 + #define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
261 + #define _TIF_FREEZE (1<<TIF_FREEZE)
262 +
263 +diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c
264 +index 18d6785..c3f1cce 100644
265 +--- a/arch/sparc/kernel/process_64.c
266 ++++ b/arch/sparc/kernel/process_64.c
267 +@@ -365,14 +365,6 @@ void flush_thread(void)
268 + struct thread_info *t = current_thread_info();
269 + struct mm_struct *mm;
270 +
271 +- if (test_ti_thread_flag(t, TIF_ABI_PENDING)) {
272 +- clear_ti_thread_flag(t, TIF_ABI_PENDING);
273 +- if (test_ti_thread_flag(t, TIF_32BIT))
274 +- clear_ti_thread_flag(t, TIF_32BIT);
275 +- else
276 +- set_ti_thread_flag(t, TIF_32BIT);
277 +- }
278 +-
279 + mm = t->task->mm;
280 + if (mm)
281 + tsb_context_switch(mm);
282 +diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
283 +index 72ace95..4fdb669 100644
284 +--- a/arch/x86/Kconfig
285 ++++ b/arch/x86/Kconfig
286 +@@ -984,12 +984,6 @@ config X86_CPUID
287 + with major 203 and minors 0 to 31 for /dev/cpu/0/cpuid to
288 + /dev/cpu/31/cpuid.
289 +
290 +-config X86_CPU_DEBUG
291 +- tristate "/sys/kernel/debug/x86/cpu/* - CPU Debug support"
292 +- ---help---
293 +- If you select this option, this will provide various x86 CPUs
294 +- information through debugfs.
295 +-
296 + choice
297 + prompt "High Memory Support"
298 + default HIGHMEM4G if !X86_NUMAQ
299 +diff --git a/arch/x86/ia32/ia32_aout.c b/arch/x86/ia32/ia32_aout.c
300 +index 2a4d073..f9f4724 100644
301 +--- a/arch/x86/ia32/ia32_aout.c
302 ++++ b/arch/x86/ia32/ia32_aout.c
303 +@@ -308,14 +308,15 @@ static int load_aout_binary(struct linux_binprm *bprm, struct pt_regs *regs)
304 + if (retval)
305 + return retval;
306 +
307 +- regs->cs = __USER32_CS;
308 +- regs->r8 = regs->r9 = regs->r10 = regs->r11 = regs->r12 =
309 +- regs->r13 = regs->r14 = regs->r15 = 0;
310 +-
311 + /* OK, This is the point of no return */
312 + set_personality(PER_LINUX);
313 + set_thread_flag(TIF_IA32);
314 +- clear_thread_flag(TIF_ABI_PENDING);
315 ++
316 ++ setup_new_exec(bprm);
317 ++
318 ++ regs->cs = __USER32_CS;
319 ++ regs->r8 = regs->r9 = regs->r10 = regs->r11 = regs->r12 =
320 ++ regs->r13 = regs->r14 = regs->r15 = 0;
321 +
322 + current->mm->end_code = ex.a_text +
323 + (current->mm->start_code = N_TXTADDR(ex));
324 +diff --git a/arch/x86/include/asm/cpu_debug.h b/arch/x86/include/asm/cpu_debug.h
325 +deleted file mode 100644
326 +index d96c1ee..0000000
327 +--- a/arch/x86/include/asm/cpu_debug.h
328 ++++ /dev/null
329 +@@ -1,127 +0,0 @@
330 +-#ifndef _ASM_X86_CPU_DEBUG_H
331 +-#define _ASM_X86_CPU_DEBUG_H
332 +-
333 +-/*
334 +- * CPU x86 architecture debug
335 +- *
336 +- * Copyright(C) 2009 Jaswinder Singh Rajput
337 +- */
338 +-
339 +-/* Register flags */
340 +-enum cpu_debug_bit {
341 +-/* Model Specific Registers (MSRs) */
342 +- CPU_MC_BIT, /* Machine Check */
343 +- CPU_MONITOR_BIT, /* Monitor */
344 +- CPU_TIME_BIT, /* Time */
345 +- CPU_PMC_BIT, /* Performance Monitor */
346 +- CPU_PLATFORM_BIT, /* Platform */
347 +- CPU_APIC_BIT, /* APIC */
348 +- CPU_POWERON_BIT, /* Power-on */
349 +- CPU_CONTROL_BIT, /* Control */
350 +- CPU_FEATURES_BIT, /* Features control */
351 +- CPU_LBRANCH_BIT, /* Last Branch */
352 +- CPU_BIOS_BIT, /* BIOS */
353 +- CPU_FREQ_BIT, /* Frequency */
354 +- CPU_MTTR_BIT, /* MTRR */
355 +- CPU_PERF_BIT, /* Performance */
356 +- CPU_CACHE_BIT, /* Cache */
357 +- CPU_SYSENTER_BIT, /* Sysenter */
358 +- CPU_THERM_BIT, /* Thermal */
359 +- CPU_MISC_BIT, /* Miscellaneous */
360 +- CPU_DEBUG_BIT, /* Debug */
361 +- CPU_PAT_BIT, /* PAT */
362 +- CPU_VMX_BIT, /* VMX */
363 +- CPU_CALL_BIT, /* System Call */
364 +- CPU_BASE_BIT, /* BASE Address */
365 +- CPU_VER_BIT, /* Version ID */
366 +- CPU_CONF_BIT, /* Configuration */
367 +- CPU_SMM_BIT, /* System mgmt mode */
368 +- CPU_SVM_BIT, /*Secure Virtual Machine*/
369 +- CPU_OSVM_BIT, /* OS-Visible Workaround*/
370 +-/* Standard Registers */
371 +- CPU_TSS_BIT, /* Task Stack Segment */
372 +- CPU_CR_BIT, /* Control Registers */
373 +- CPU_DT_BIT, /* Descriptor Table */
374 +-/* End of Registers flags */
375 +- CPU_REG_ALL_BIT, /* Select all Registers */
376 +-};
377 +-
378 +-#define CPU_REG_ALL (~0) /* Select all Registers */
379 +-
380 +-#define CPU_MC (1 << CPU_MC_BIT)
381 +-#define CPU_MONITOR (1 << CPU_MONITOR_BIT)
382 +-#define CPU_TIME (1 << CPU_TIME_BIT)
383 +-#define CPU_PMC (1 << CPU_PMC_BIT)
384 +-#define CPU_PLATFORM (1 << CPU_PLATFORM_BIT)
385 +-#define CPU_APIC (1 << CPU_APIC_BIT)
386 +-#define CPU_POWERON (1 << CPU_POWERON_BIT)
387 +-#define CPU_CONTROL (1 << CPU_CONTROL_BIT)
388 +-#define CPU_FEATURES (1 << CPU_FEATURES_BIT)
389 +-#define CPU_LBRANCH (1 << CPU_LBRANCH_BIT)
390 +-#define CPU_BIOS (1 << CPU_BIOS_BIT)
391 +-#define CPU_FREQ (1 << CPU_FREQ_BIT)
392 +-#define CPU_MTRR (1 << CPU_MTTR_BIT)
393 +-#define CPU_PERF (1 << CPU_PERF_BIT)
394 +-#define CPU_CACHE (1 << CPU_CACHE_BIT)
395 +-#define CPU_SYSENTER (1 << CPU_SYSENTER_BIT)
396 +-#define CPU_THERM (1 << CPU_THERM_BIT)
397 +-#define CPU_MISC (1 << CPU_MISC_BIT)
398 +-#define CPU_DEBUG (1 << CPU_DEBUG_BIT)
399 +-#define CPU_PAT (1 << CPU_PAT_BIT)
400 +-#define CPU_VMX (1 << CPU_VMX_BIT)
401 +-#define CPU_CALL (1 << CPU_CALL_BIT)
402 +-#define CPU_BASE (1 << CPU_BASE_BIT)
403 +-#define CPU_VER (1 << CPU_VER_BIT)
404 +-#define CPU_CONF (1 << CPU_CONF_BIT)
405 +-#define CPU_SMM (1 << CPU_SMM_BIT)
406 +-#define CPU_SVM (1 << CPU_SVM_BIT)
407 +-#define CPU_OSVM (1 << CPU_OSVM_BIT)
408 +-#define CPU_TSS (1 << CPU_TSS_BIT)
409 +-#define CPU_CR (1 << CPU_CR_BIT)
410 +-#define CPU_DT (1 << CPU_DT_BIT)
411 +-
412 +-/* Register file flags */
413 +-enum cpu_file_bit {
414 +- CPU_INDEX_BIT, /* index */
415 +- CPU_VALUE_BIT, /* value */
416 +-};
417 +-
418 +-#define CPU_FILE_VALUE (1 << CPU_VALUE_BIT)
419 +-
420 +-#define MAX_CPU_FILES 512
421 +-
422 +-struct cpu_private {
423 +- unsigned cpu;
424 +- unsigned type;
425 +- unsigned reg;
426 +- unsigned file;
427 +-};
428 +-
429 +-struct cpu_debug_base {
430 +- char *name; /* Register name */
431 +- unsigned flag; /* Register flag */
432 +- unsigned write; /* Register write flag */
433 +-};
434 +-
435 +-/*
436 +- * Currently it looks similar to cpu_debug_base but once we add more files
437 +- * cpu_file_base will go in different direction
438 +- */
439 +-struct cpu_file_base {
440 +- char *name; /* Register file name */
441 +- unsigned flag; /* Register file flag */
442 +- unsigned write; /* Register write flag */
443 +-};
444 +-
445 +-struct cpu_cpuX_base {
446 +- struct dentry *dentry; /* Register dentry */
447 +- int init; /* Register index file */
448 +-};
449 +-
450 +-struct cpu_debug_range {
451 +- unsigned min; /* Register range min */
452 +- unsigned max; /* Register range max */
453 +- unsigned flag; /* Supported flags */
454 +-};
455 +-
456 +-#endif /* _ASM_X86_CPU_DEBUG_H */
457 +diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h
458 +index 456a304..8ac9d9a 100644
459 +--- a/arch/x86/include/asm/elf.h
460 ++++ b/arch/x86/include/asm/elf.h
461 +@@ -197,14 +197,8 @@ do { \
462 + set_fs(USER_DS); \
463 + } while (0)
464 +
465 +-#define COMPAT_SET_PERSONALITY(ex) \
466 +-do { \
467 +- if (test_thread_flag(TIF_IA32)) \
468 +- clear_thread_flag(TIF_ABI_PENDING); \
469 +- else \
470 +- set_thread_flag(TIF_ABI_PENDING); \
471 +- current->personality |= force_personality32; \
472 +-} while (0)
473 ++void set_personality_ia32(void);
474 ++#define COMPAT_SET_PERSONALITY(ex) set_personality_ia32()
475 +
476 + #define COMPAT_ELF_PLATFORM ("i686")
477 +
478 +diff --git a/arch/x86/include/asm/hpet.h b/arch/x86/include/asm/hpet.h
479 +index 1c22cb0..3251e23 100644
480 +--- a/arch/x86/include/asm/hpet.h
481 ++++ b/arch/x86/include/asm/hpet.h
482 +@@ -66,6 +66,7 @@
483 + extern unsigned long hpet_address;
484 + extern unsigned long force_hpet_address;
485 + extern int hpet_force_user;
486 ++extern u8 hpet_msi_disable;
487 + extern int is_hpet_enabled(void);
488 + extern int hpet_enable(void);
489 + extern void hpet_disable(void);
490 +diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
491 +index d838922..d759a1f 100644
492 +--- a/arch/x86/include/asm/kvm_host.h
493 ++++ b/arch/x86/include/asm/kvm_host.h
494 +@@ -412,6 +412,7 @@ struct kvm_arch{
495 + unsigned long irq_sources_bitmap;
496 + unsigned long irq_states[KVM_IOAPIC_NUM_PINS];
497 + u64 vm_init_tsc;
498 ++ s64 kvmclock_offset;
499 + };
500 +
501 + struct kvm_vm_stat {
502 +diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
503 +index d27d0a2..19c3ce4 100644
504 +--- a/arch/x86/include/asm/thread_info.h
505 ++++ b/arch/x86/include/asm/thread_info.h
506 +@@ -86,7 +86,6 @@ struct thread_info {
507 + #define TIF_NOTSC 16 /* TSC is not accessible in userland */
508 + #define TIF_IA32 17 /* 32bit process */
509 + #define TIF_FORK 18 /* ret_from_fork */
510 +-#define TIF_ABI_PENDING 19
511 + #define TIF_MEMDIE 20
512 + #define TIF_DEBUG 21 /* uses debug registers */
513 + #define TIF_IO_BITMAP 22 /* uses I/O bitmap */
514 +@@ -110,7 +109,6 @@ struct thread_info {
515 + #define _TIF_NOTSC (1 << TIF_NOTSC)
516 + #define _TIF_IA32 (1 << TIF_IA32)
517 + #define _TIF_FORK (1 << TIF_FORK)
518 +-#define _TIF_ABI_PENDING (1 << TIF_ABI_PENDING)
519 + #define _TIF_DEBUG (1 << TIF_DEBUG)
520 + #define _TIF_IO_BITMAP (1 << TIF_IO_BITMAP)
521 + #define _TIF_FREEZE (1 << TIF_FREEZE)
522 +diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c
523 +index 90b9b55..e3f85fe 100644
524 +--- a/arch/x86/kernel/amd_iommu.c
525 ++++ b/arch/x86/kernel/amd_iommu.c
526 +@@ -540,7 +540,7 @@ static void flush_all_devices_for_iommu(struct amd_iommu *iommu)
527 + static void flush_devices_by_domain(struct protection_domain *domain)
528 + {
529 + struct amd_iommu *iommu;
530 +- int i;
531 ++ unsigned long i;
532 +
533 + for (i = 0; i <= amd_iommu_last_bdf; ++i) {
534 + if ((domain == NULL && amd_iommu_pd_table[i] == NULL) ||
535 +diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile
536 +index 68537e9..ff502cc 100644
537 +--- a/arch/x86/kernel/cpu/Makefile
538 ++++ b/arch/x86/kernel/cpu/Makefile
539 +@@ -18,8 +18,6 @@ obj-y += vmware.o hypervisor.o sched.o
540 + obj-$(CONFIG_X86_32) += bugs.o cmpxchg.o
541 + obj-$(CONFIG_X86_64) += bugs_64.o
542 +
543 +-obj-$(CONFIG_X86_CPU_DEBUG) += cpu_debug.o
544 +-
545 + obj-$(CONFIG_CPU_SUP_INTEL) += intel.o
546 + obj-$(CONFIG_CPU_SUP_AMD) += amd.o
547 + obj-$(CONFIG_CPU_SUP_CYRIX_32) += cyrix.o
548 +diff --git a/arch/x86/kernel/cpu/cpu_debug.c b/arch/x86/kernel/cpu/cpu_debug.c
549 +deleted file mode 100644
550 +index dca325c..0000000
551 +--- a/arch/x86/kernel/cpu/cpu_debug.c
552 ++++ /dev/null
553 +@@ -1,688 +0,0 @@
554 +-/*
555 +- * CPU x86 architecture debug code
556 +- *
557 +- * Copyright(C) 2009 Jaswinder Singh Rajput
558 +- *
559 +- * For licencing details see kernel-base/COPYING
560 +- */
561 +-
562 +-#include <linux/interrupt.h>
563 +-#include <linux/compiler.h>
564 +-#include <linux/seq_file.h>
565 +-#include <linux/debugfs.h>
566 +-#include <linux/kprobes.h>
567 +-#include <linux/uaccess.h>
568 +-#include <linux/kernel.h>
569 +-#include <linux/module.h>
570 +-#include <linux/percpu.h>
571 +-#include <linux/signal.h>
572 +-#include <linux/errno.h>
573 +-#include <linux/sched.h>
574 +-#include <linux/types.h>
575 +-#include <linux/init.h>
576 +-#include <linux/slab.h>
577 +-#include <linux/smp.h>
578 +-
579 +-#include <asm/cpu_debug.h>
580 +-#include <asm/paravirt.h>
581 +-#include <asm/system.h>
582 +-#include <asm/traps.h>
583 +-#include <asm/apic.h>
584 +-#include <asm/desc.h>
585 +-
586 +-static DEFINE_PER_CPU(struct cpu_cpuX_base [CPU_REG_ALL_BIT], cpu_arr);
587 +-static DEFINE_PER_CPU(struct cpu_private * [MAX_CPU_FILES], priv_arr);
588 +-static DEFINE_PER_CPU(int, cpu_priv_count);
589 +-
590 +-static DEFINE_MUTEX(cpu_debug_lock);
591 +-
592 +-static struct dentry *cpu_debugfs_dir;
593 +-
594 +-static struct cpu_debug_base cpu_base[] = {
595 +- { "mc", CPU_MC, 0 },
596 +- { "monitor", CPU_MONITOR, 0 },
597 +- { "time", CPU_TIME, 0 },
598 +- { "pmc", CPU_PMC, 1 },
599 +- { "platform", CPU_PLATFORM, 0 },
600 +- { "apic", CPU_APIC, 0 },
601 +- { "poweron", CPU_POWERON, 0 },
602 +- { "control", CPU_CONTROL, 0 },
603 +- { "features", CPU_FEATURES, 0 },
604 +- { "lastbranch", CPU_LBRANCH, 0 },
605 +- { "bios", CPU_BIOS, 0 },
606 +- { "freq", CPU_FREQ, 0 },
607 +- { "mtrr", CPU_MTRR, 0 },
608 +- { "perf", CPU_PERF, 0 },
609 +- { "cache", CPU_CACHE, 0 },
610 +- { "sysenter", CPU_SYSENTER, 0 },
611 +- { "therm", CPU_THERM, 0 },
612 +- { "misc", CPU_MISC, 0 },
613 +- { "debug", CPU_DEBUG, 0 },
614 +- { "pat", CPU_PAT, 0 },
615 +- { "vmx", CPU_VMX, 0 },
616 +- { "call", CPU_CALL, 0 },
617 +- { "base", CPU_BASE, 0 },
618 +- { "ver", CPU_VER, 0 },
619 +- { "conf", CPU_CONF, 0 },
620 +- { "smm", CPU_SMM, 0 },
621 +- { "svm", CPU_SVM, 0 },
622 +- { "osvm", CPU_OSVM, 0 },
623 +- { "tss", CPU_TSS, 0 },
624 +- { "cr", CPU_CR, 0 },
625 +- { "dt", CPU_DT, 0 },
626 +- { "registers", CPU_REG_ALL, 0 },
627 +-};
628 +-
629 +-static struct cpu_file_base cpu_file[] = {
630 +- { "index", CPU_REG_ALL, 0 },
631 +- { "value", CPU_REG_ALL, 1 },
632 +-};
633 +-
634 +-/* CPU Registers Range */
635 +-static struct cpu_debug_range cpu_reg_range[] = {
636 +- { 0x00000000, 0x00000001, CPU_MC, },
637 +- { 0x00000006, 0x00000007, CPU_MONITOR, },
638 +- { 0x00000010, 0x00000010, CPU_TIME, },
639 +- { 0x00000011, 0x00000013, CPU_PMC, },
640 +- { 0x00000017, 0x00000017, CPU_PLATFORM, },
641 +- { 0x0000001B, 0x0000001B, CPU_APIC, },
642 +- { 0x0000002A, 0x0000002B, CPU_POWERON, },
643 +- { 0x0000002C, 0x0000002C, CPU_FREQ, },
644 +- { 0x0000003A, 0x0000003A, CPU_CONTROL, },
645 +- { 0x00000040, 0x00000047, CPU_LBRANCH, },
646 +- { 0x00000060, 0x00000067, CPU_LBRANCH, },
647 +- { 0x00000079, 0x00000079, CPU_BIOS, },
648 +- { 0x00000088, 0x0000008A, CPU_CACHE, },
649 +- { 0x0000008B, 0x0000008B, CPU_BIOS, },
650 +- { 0x0000009B, 0x0000009B, CPU_MONITOR, },
651 +- { 0x000000C1, 0x000000C4, CPU_PMC, },
652 +- { 0x000000CD, 0x000000CD, CPU_FREQ, },
653 +- { 0x000000E7, 0x000000E8, CPU_PERF, },
654 +- { 0x000000FE, 0x000000FE, CPU_MTRR, },
655 +-
656 +- { 0x00000116, 0x0000011E, CPU_CACHE, },
657 +- { 0x00000174, 0x00000176, CPU_SYSENTER, },
658 +- { 0x00000179, 0x0000017B, CPU_MC, },
659 +- { 0x00000186, 0x00000189, CPU_PMC, },
660 +- { 0x00000198, 0x00000199, CPU_PERF, },
661 +- { 0x0000019A, 0x0000019A, CPU_TIME, },
662 +- { 0x0000019B, 0x0000019D, CPU_THERM, },
663 +- { 0x000001A0, 0x000001A0, CPU_MISC, },
664 +- { 0x000001C9, 0x000001C9, CPU_LBRANCH, },
665 +- { 0x000001D7, 0x000001D8, CPU_LBRANCH, },
666 +- { 0x000001D9, 0x000001D9, CPU_DEBUG, },
667 +- { 0x000001DA, 0x000001E0, CPU_LBRANCH, },
668 +-
669 +- { 0x00000200, 0x0000020F, CPU_MTRR, },
670 +- { 0x00000250, 0x00000250, CPU_MTRR, },
671 +- { 0x00000258, 0x00000259, CPU_MTRR, },
672 +- { 0x00000268, 0x0000026F, CPU_MTRR, },
673 +- { 0x00000277, 0x00000277, CPU_PAT, },
674 +- { 0x000002FF, 0x000002FF, CPU_MTRR, },
675 +-
676 +- { 0x00000300, 0x00000311, CPU_PMC, },
677 +- { 0x00000345, 0x00000345, CPU_PMC, },
678 +- { 0x00000360, 0x00000371, CPU_PMC, },
679 +- { 0x0000038D, 0x00000390, CPU_PMC, },
680 +- { 0x000003A0, 0x000003BE, CPU_PMC, },
681 +- { 0x000003C0, 0x000003CD, CPU_PMC, },
682 +- { 0x000003E0, 0x000003E1, CPU_PMC, },
683 +- { 0x000003F0, 0x000003F2, CPU_PMC, },
684 +-
685 +- { 0x00000400, 0x00000417, CPU_MC, },
686 +- { 0x00000480, 0x0000048B, CPU_VMX, },
687 +-
688 +- { 0x00000600, 0x00000600, CPU_DEBUG, },
689 +- { 0x00000680, 0x0000068F, CPU_LBRANCH, },
690 +- { 0x000006C0, 0x000006CF, CPU_LBRANCH, },
691 +-
692 +- { 0x000107CC, 0x000107D3, CPU_PMC, },
693 +-
694 +- { 0xC0000080, 0xC0000080, CPU_FEATURES, },
695 +- { 0xC0000081, 0xC0000084, CPU_CALL, },
696 +- { 0xC0000100, 0xC0000102, CPU_BASE, },
697 +- { 0xC0000103, 0xC0000103, CPU_TIME, },
698 +-
699 +- { 0xC0010000, 0xC0010007, CPU_PMC, },
700 +- { 0xC0010010, 0xC0010010, CPU_CONF, },
701 +- { 0xC0010015, 0xC0010015, CPU_CONF, },
702 +- { 0xC0010016, 0xC001001A, CPU_MTRR, },
703 +- { 0xC001001D, 0xC001001D, CPU_MTRR, },
704 +- { 0xC001001F, 0xC001001F, CPU_CONF, },
705 +- { 0xC0010030, 0xC0010035, CPU_BIOS, },
706 +- { 0xC0010044, 0xC0010048, CPU_MC, },
707 +- { 0xC0010050, 0xC0010056, CPU_SMM, },
708 +- { 0xC0010058, 0xC0010058, CPU_CONF, },
709 +- { 0xC0010060, 0xC0010060, CPU_CACHE, },
710 +- { 0xC0010061, 0xC0010068, CPU_SMM, },
711 +- { 0xC0010069, 0xC001006B, CPU_SMM, },
712 +- { 0xC0010070, 0xC0010071, CPU_SMM, },
713 +- { 0xC0010111, 0xC0010113, CPU_SMM, },
714 +- { 0xC0010114, 0xC0010118, CPU_SVM, },
715 +- { 0xC0010140, 0xC0010141, CPU_OSVM, },
716 +- { 0xC0011022, 0xC0011023, CPU_CONF, },
717 +-};
718 +-
719 +-static int is_typeflag_valid(unsigned cpu, unsigned flag)
720 +-{
721 +- int i;
722 +-
723 +- /* Standard Registers should be always valid */
724 +- if (flag >= CPU_TSS)
725 +- return 1;
726 +-
727 +- for (i = 0; i < ARRAY_SIZE(cpu_reg_range); i++) {
728 +- if (cpu_reg_range[i].flag == flag)
729 +- return 1;
730 +- }
731 +-
732 +- /* Invalid */
733 +- return 0;
734 +-}
735 +-
736 +-static unsigned get_cpu_range(unsigned cpu, unsigned *min, unsigned *max,
737 +- int index, unsigned flag)
738 +-{
739 +- if (cpu_reg_range[index].flag == flag) {
740 +- *min = cpu_reg_range[index].min;
741 +- *max = cpu_reg_range[index].max;
742 +- } else
743 +- *max = 0;
744 +-
745 +- return *max;
746 +-}
747 +-
748 +-/* This function can also be called with seq = NULL for printk */
749 +-static void print_cpu_data(struct seq_file *seq, unsigned type,
750 +- u32 low, u32 high)
751 +-{
752 +- struct cpu_private *priv;
753 +- u64 val = high;
754 +-
755 +- if (seq) {
756 +- priv = seq->private;
757 +- if (priv->file) {
758 +- val = (val << 32) | low;
759 +- seq_printf(seq, "0x%llx\n", val);
760 +- } else
761 +- seq_printf(seq, " %08x: %08x_%08x\n",
762 +- type, high, low);
763 +- } else
764 +- printk(KERN_INFO " %08x: %08x_%08x\n", type, high, low);
765 +-}
766 +-
767 +-/* This function can also be called with seq = NULL for printk */
768 +-static void print_msr(struct seq_file *seq, unsigned cpu, unsigned flag)
769 +-{
770 +- unsigned msr, msr_min, msr_max;
771 +- struct cpu_private *priv;
772 +- u32 low, high;
773 +- int i;
774 +-
775 +- if (seq) {
776 +- priv = seq->private;
777 +- if (priv->file) {
778 +- if (!rdmsr_safe_on_cpu(priv->cpu, priv->reg,
779 +- &low, &high))
780 +- print_cpu_data(seq, priv->reg, low, high);
781 +- return;
782 +- }
783 +- }
784 +-
785 +- for (i = 0; i < ARRAY_SIZE(cpu_reg_range); i++) {
786 +- if (!get_cpu_range(cpu, &msr_min, &msr_max, i, flag))
787 +- continue;
788 +-
789 +- for (msr = msr_min; msr <= msr_max; msr++) {
790 +- if (rdmsr_safe_on_cpu(cpu, msr, &low, &high))
791 +- continue;
792 +- print_cpu_data(seq, msr, low, high);
793 +- }
794 +- }
795 +-}
796 +-
797 +-static void print_tss(void *arg)
798 +-{
799 +- struct pt_regs *regs = task_pt_regs(current);
800 +- struct seq_file *seq = arg;
801 +- unsigned int seg;
802 +-
803 +- seq_printf(seq, " RAX\t: %016lx\n", regs->ax);
804 +- seq_printf(seq, " RBX\t: %016lx\n", regs->bx);
805 +- seq_printf(seq, " RCX\t: %016lx\n", regs->cx);
806 +- seq_printf(seq, " RDX\t: %016lx\n", regs->dx);
807 +-
808 +- seq_printf(seq, " RSI\t: %016lx\n", regs->si);
809 +- seq_printf(seq, " RDI\t: %016lx\n", regs->di);
810 +- seq_printf(seq, " RBP\t: %016lx\n", regs->bp);
811 +- seq_printf(seq, " ESP\t: %016lx\n", regs->sp);
812 +-
813 +-#ifdef CONFIG_X86_64
814 +- seq_printf(seq, " R08\t: %016lx\n", regs->r8);
815 +- seq_printf(seq, " R09\t: %016lx\n", regs->r9);
816 +- seq_printf(seq, " R10\t: %016lx\n", regs->r10);
817 +- seq_printf(seq, " R11\t: %016lx\n", regs->r11);
818 +- seq_printf(seq, " R12\t: %016lx\n", regs->r12);
819 +- seq_printf(seq, " R13\t: %016lx\n", regs->r13);
820 +- seq_printf(seq, " R14\t: %016lx\n", regs->r14);
821 +- seq_printf(seq, " R15\t: %016lx\n", regs->r15);
822 +-#endif
823 +-
824 +- asm("movl %%cs,%0" : "=r" (seg));
825 +- seq_printf(seq, " CS\t: %04x\n", seg);
826 +- asm("movl %%ds,%0" : "=r" (seg));
827 +- seq_printf(seq, " DS\t: %04x\n", seg);
828 +- seq_printf(seq, " SS\t: %04lx\n", regs->ss & 0xffff);
829 +- asm("movl %%es,%0" : "=r" (seg));
830 +- seq_printf(seq, " ES\t: %04x\n", seg);
831 +- asm("movl %%fs,%0" : "=r" (seg));
832 +- seq_printf(seq, " FS\t: %04x\n", seg);
833 +- asm("movl %%gs,%0" : "=r" (seg));
834 +- seq_printf(seq, " GS\t: %04x\n", seg);
835 +-
836 +- seq_printf(seq, " EFLAGS\t: %016lx\n", regs->flags);
837 +-
838 +- seq_printf(seq, " EIP\t: %016lx\n", regs->ip);
839 +-}
840 +-
841 +-static void print_cr(void *arg)
842 +-{
843 +- struct seq_file *seq = arg;
844 +-
845 +- seq_printf(seq, " cr0\t: %016lx\n", read_cr0());
846 +- seq_printf(seq, " cr2\t: %016lx\n", read_cr2());
847 +- seq_printf(seq, " cr3\t: %016lx\n", read_cr3());
848 +- seq_printf(seq, " cr4\t: %016lx\n", read_cr4_safe());
849 +-#ifdef CONFIG_X86_64
850 +- seq_printf(seq, " cr8\t: %016lx\n", read_cr8());
851 +-#endif
852 +-}
853 +-
854 +-static void print_desc_ptr(char *str, struct seq_file *seq, struct desc_ptr dt)
855 +-{
856 +- seq_printf(seq, " %s\t: %016llx\n", str, (u64)(dt.address | dt.size));
857 +-}
858 +-
859 +-static void print_dt(void *seq)
860 +-{
861 +- struct desc_ptr dt;
862 +- unsigned long ldt;
863 +-
864 +- /* IDT */
865 +- store_idt((struct desc_ptr *)&dt);
866 +- print_desc_ptr("IDT", seq, dt);
867 +-
868 +- /* GDT */
869 +- store_gdt((struct desc_ptr *)&dt);
870 +- print_desc_ptr("GDT", seq, dt);
871 +-
872 +- /* LDT */
873 +- store_ldt(ldt);
874 +- seq_printf(seq, " LDT\t: %016lx\n", ldt);
875 +-
876 +- /* TR */
877 +- store_tr(ldt);
878 +- seq_printf(seq, " TR\t: %016lx\n", ldt);
879 +-}
880 +-
881 +-static void print_dr(void *arg)
882 +-{
883 +- struct seq_file *seq = arg;
884 +- unsigned long dr;
885 +- int i;
886 +-
887 +- for (i = 0; i < 8; i++) {
888 +- /* Ignore db4, db5 */
889 +- if ((i == 4) || (i == 5))
890 +- continue;
891 +- get_debugreg(dr, i);
892 +- seq_printf(seq, " dr%d\t: %016lx\n", i, dr);
893 +- }
894 +-
895 +- seq_printf(seq, "\n MSR\t:\n");
896 +-}
897 +-
898 +-static void print_apic(void *arg)
899 +-{
900 +- struct seq_file *seq = arg;
901 +-
902 +-#ifdef CONFIG_X86_LOCAL_APIC
903 +- seq_printf(seq, " LAPIC\t:\n");
904 +- seq_printf(seq, " ID\t\t: %08x\n", apic_read(APIC_ID) >> 24);
905 +- seq_printf(seq, " LVR\t\t: %08x\n", apic_read(APIC_LVR));
906 +- seq_printf(seq, " TASKPRI\t: %08x\n", apic_read(APIC_TASKPRI));
907 +- seq_printf(seq, " ARBPRI\t\t: %08x\n", apic_read(APIC_ARBPRI));
908 +- seq_printf(seq, " PROCPRI\t: %08x\n", apic_read(APIC_PROCPRI));
909 +- seq_printf(seq, " LDR\t\t: %08x\n", apic_read(APIC_LDR));
910 +- seq_printf(seq, " DFR\t\t: %08x\n", apic_read(APIC_DFR));
911 +- seq_printf(seq, " SPIV\t\t: %08x\n", apic_read(APIC_SPIV));
912 +- seq_printf(seq, " ISR\t\t: %08x\n", apic_read(APIC_ISR));
913 +- seq_printf(seq, " ESR\t\t: %08x\n", apic_read(APIC_ESR));
914 +- seq_printf(seq, " ICR\t\t: %08x\n", apic_read(APIC_ICR));
915 +- seq_printf(seq, " ICR2\t\t: %08x\n", apic_read(APIC_ICR2));
916 +- seq_printf(seq, " LVTT\t\t: %08x\n", apic_read(APIC_LVTT));
917 +- seq_printf(seq, " LVTTHMR\t: %08x\n", apic_read(APIC_LVTTHMR));
918 +- seq_printf(seq, " LVTPC\t\t: %08x\n", apic_read(APIC_LVTPC));
919 +- seq_printf(seq, " LVT0\t\t: %08x\n", apic_read(APIC_LVT0));
920 +- seq_printf(seq, " LVT1\t\t: %08x\n", apic_read(APIC_LVT1));
921 +- seq_printf(seq, " LVTERR\t\t: %08x\n", apic_read(APIC_LVTERR));
922 +- seq_printf(seq, " TMICT\t\t: %08x\n", apic_read(APIC_TMICT));
923 +- seq_printf(seq, " TMCCT\t\t: %08x\n", apic_read(APIC_TMCCT));
924 +- seq_printf(seq, " TDCR\t\t: %08x\n", apic_read(APIC_TDCR));
925 +- if (boot_cpu_has(X86_FEATURE_EXTAPIC)) {
926 +- unsigned int i, v, maxeilvt;
927 +-
928 +- v = apic_read(APIC_EFEAT);
929 +- maxeilvt = (v >> 16) & 0xff;
930 +- seq_printf(seq, " EFEAT\t\t: %08x\n", v);
931 +- seq_printf(seq, " ECTRL\t\t: %08x\n", apic_read(APIC_ECTRL));
932 +-
933 +- for (i = 0; i < maxeilvt; i++) {
934 +- v = apic_read(APIC_EILVTn(i));
935 +- seq_printf(seq, " EILVT%d\t\t: %08x\n", i, v);
936 +- }
937 +- }
938 +-#endif /* CONFIG_X86_LOCAL_APIC */
939 +- seq_printf(seq, "\n MSR\t:\n");
940 +-}
941 +-
942 +-static int cpu_seq_show(struct seq_file *seq, void *v)
943 +-{
944 +- struct cpu_private *priv = seq->private;
945 +-
946 +- if (priv == NULL)
947 +- return -EINVAL;
948 +-
949 +- switch (cpu_base[priv->type].flag) {
950 +- case CPU_TSS:
951 +- smp_call_function_single(priv->cpu, print_tss, seq, 1);
952 +- break;
953 +- case CPU_CR:
954 +- smp_call_function_single(priv->cpu, print_cr, seq, 1);
955 +- break;
956 +- case CPU_DT:
957 +- smp_call_function_single(priv->cpu, print_dt, seq, 1);
958 +- break;
959 +- case CPU_DEBUG:
960 +- if (priv->file == CPU_INDEX_BIT)
961 +- smp_call_function_single(priv->cpu, print_dr, seq, 1);
962 +- print_msr(seq, priv->cpu, cpu_base[priv->type].flag);
963 +- break;
964 +- case CPU_APIC:
965 +- if (priv->file == CPU_INDEX_BIT)
966 +- smp_call_function_single(priv->cpu, print_apic, seq, 1);
967 +- print_msr(seq, priv->cpu, cpu_base[priv->type].flag);
968 +- break;
969 +-
970 +- default:
971 +- print_msr(seq, priv->cpu, cpu_base[priv->type].flag);
972 +- break;
973 +- }
974 +- seq_printf(seq, "\n");
975 +-
976 +- return 0;
977 +-}
978 +-
979 +-static void *cpu_seq_start(struct seq_file *seq, loff_t *pos)
980 +-{
981 +- if (*pos == 0) /* One time is enough ;-) */
982 +- return seq;
983 +-
984 +- return NULL;
985 +-}
986 +-
987 +-static void *cpu_seq_next(struct seq_file *seq, void *v, loff_t *pos)
988 +-{
989 +- (*pos)++;
990 +-
991 +- return cpu_seq_start(seq, pos);
992 +-}
993 +-
994 +-static void cpu_seq_stop(struct seq_file *seq, void *v)
995 +-{
996 +-}
997 +-
998 +-static const struct seq_operations cpu_seq_ops = {
999 +- .start = cpu_seq_start,
1000 +- .next = cpu_seq_next,
1001 +- .stop = cpu_seq_stop,
1002 +- .show = cpu_seq_show,
1003 +-};
1004 +-
1005 +-static int cpu_seq_open(struct inode *inode, struct file *file)
1006 +-{
1007 +- struct cpu_private *priv = inode->i_private;
1008 +- struct seq_file *seq;
1009 +- int err;
1010 +-
1011 +- err = seq_open(file, &cpu_seq_ops);
1012 +- if (!err) {
1013 +- seq = file->private_data;
1014 +- seq->private = priv;
1015 +- }
1016 +-
1017 +- return err;
1018 +-}
1019 +-
1020 +-static int write_msr(struct cpu_private *priv, u64 val)
1021 +-{
1022 +- u32 low, high;
1023 +-
1024 +- high = (val >> 32) & 0xffffffff;
1025 +- low = val & 0xffffffff;
1026 +-
1027 +- if (!wrmsr_safe_on_cpu(priv->cpu, priv->reg, low, high))
1028 +- return 0;
1029 +-
1030 +- return -EPERM;
1031 +-}
1032 +-
1033 +-static int write_cpu_register(struct cpu_private *priv, const char *buf)
1034 +-{
1035 +- int ret = -EPERM;
1036 +- u64 val;
1037 +-
1038 +- ret = strict_strtoull(buf, 0, &val);
1039 +- if (ret < 0)
1040 +- return ret;
1041 +-
1042 +- /* Supporting only MSRs */
1043 +- if (priv->type < CPU_TSS_BIT)
1044 +- return write_msr(priv, val);
1045 +-
1046 +- return ret;
1047 +-}
1048 +-
1049 +-static ssize_t cpu_write(struct file *file, const char __user *ubuf,
1050 +- size_t count, loff_t *off)
1051 +-{
1052 +- struct seq_file *seq = file->private_data;
1053 +- struct cpu_private *priv = seq->private;
1054 +- char buf[19];
1055 +-
1056 +- if ((priv == NULL) || (count >= sizeof(buf)))
1057 +- return -EINVAL;
1058 +-
1059 +- if (copy_from_user(&buf, ubuf, count))
1060 +- return -EFAULT;
1061 +-
1062 +- buf[count] = 0;
1063 +-
1064 +- if ((cpu_base[priv->type].write) && (cpu_file[priv->file].write))
1065 +- if (!write_cpu_register(priv, buf))
1066 +- return count;
1067 +-
1068 +- return -EACCES;
1069 +-}
1070 +-
1071 +-static const struct file_operations cpu_fops = {
1072 +- .owner = THIS_MODULE,
1073 +- .open = cpu_seq_open,
1074 +- .read = seq_read,
1075 +- .write = cpu_write,
1076 +- .llseek = seq_lseek,
1077 +- .release = seq_release,
1078 +-};
1079 +-
1080 +-static int cpu_create_file(unsigned cpu, unsigned type, unsigned reg,
1081 +- unsigned file, struct dentry *dentry)
1082 +-{
1083 +- struct cpu_private *priv = NULL;
1084 +-
1085 +- /* Already intialized */
1086 +- if (file == CPU_INDEX_BIT)
1087 +- if (per_cpu(cpu_arr[type].init, cpu))
1088 +- return 0;
1089 +-
1090 +- priv = kzalloc(sizeof(*priv), GFP_KERNEL);
1091 +- if (priv == NULL)
1092 +- return -ENOMEM;
1093 +-
1094 +- priv->cpu = cpu;
1095 +- priv->type = type;
1096 +- priv->reg = reg;
1097 +- priv->file = file;
1098 +- mutex_lock(&cpu_debug_lock);
1099 +- per_cpu(priv_arr[type], cpu) = priv;
1100 +- per_cpu(cpu_priv_count, cpu)++;
1101 +- mutex_unlock(&cpu_debug_lock);
1102 +-
1103 +- if (file)
1104 +- debugfs_create_file(cpu_file[file].name, S_IRUGO,
1105 +- dentry, (void *)priv, &cpu_fops);
1106 +- else {
1107 +- debugfs_create_file(cpu_base[type].name, S_IRUGO,
1108 +- per_cpu(cpu_arr[type].dentry, cpu),
1109 +- (void *)priv, &cpu_fops);
1110 +- mutex_lock(&cpu_debug_lock);
1111 +- per_cpu(cpu_arr[type].init, cpu) = 1;
1112 +- mutex_unlock(&cpu_debug_lock);
1113 +- }
1114 +-
1115 +- return 0;
1116 +-}
1117 +-
1118 +-static int cpu_init_regfiles(unsigned cpu, unsigned int type, unsigned reg,
1119 +- struct dentry *dentry)
1120 +-{
1121 +- unsigned file;
1122 +- int err = 0;
1123 +-
1124 +- for (file = 0; file < ARRAY_SIZE(cpu_file); file++) {
1125 +- err = cpu_create_file(cpu, type, reg, file, dentry);
1126 +- if (err)
1127 +- return err;
1128 +- }
1129 +-
1130 +- return err;
1131 +-}
1132 +-
1133 +-static int cpu_init_msr(unsigned cpu, unsigned type, struct dentry *dentry)
1134 +-{
1135 +- struct dentry *cpu_dentry = NULL;
1136 +- unsigned reg, reg_min, reg_max;
1137 +- int i, err = 0;
1138 +- char reg_dir[12];
1139 +- u32 low, high;
1140 +-
1141 +- for (i = 0; i < ARRAY_SIZE(cpu_reg_range); i++) {
1142 +- if (!get_cpu_range(cpu, &reg_min, &reg_max, i,
1143 +- cpu_base[type].flag))
1144 +- continue;
1145 +-
1146 +- for (reg = reg_min; reg <= reg_max; reg++) {
1147 +- if (rdmsr_safe_on_cpu(cpu, reg, &low, &high))
1148 +- continue;
1149 +-
1150 +- sprintf(reg_dir, "0x%x", reg);
1151 +- cpu_dentry = debugfs_create_dir(reg_dir, dentry);
1152 +- err = cpu_init_regfiles(cpu, type, reg, cpu_dentry);
1153 +- if (err)
1154 +- return err;
1155 +- }
1156 +- }
1157 +-
1158 +- return err;
1159 +-}
1160 +-
1161 +-static int cpu_init_allreg(unsigned cpu, struct dentry *dentry)
1162 +-{
1163 +- struct dentry *cpu_dentry = NULL;
1164 +- unsigned type;
1165 +- int err = 0;
1166 +-
1167 +- for (type = 0; type < ARRAY_SIZE(cpu_base) - 1; type++) {
1168 +- if (!is_typeflag_valid(cpu, cpu_base[type].flag))
1169 +- continue;
1170 +- cpu_dentry = debugfs_create_dir(cpu_base[type].name, dentry);
1171 +- per_cpu(cpu_arr[type].dentry, cpu) = cpu_dentry;
1172 +-
1173 +- if (type < CPU_TSS_BIT)
1174 +- err = cpu_init_msr(cpu, type, cpu_dentry);
1175 +- else
1176 +- err = cpu_create_file(cpu, type, 0, CPU_INDEX_BIT,
1177 +- cpu_dentry);
1178 +- if (err)
1179 +- return err;
1180 +- }
1181 +-
1182 +- return err;
1183 +-}
1184 +-
1185 +-static int cpu_init_cpu(void)
1186 +-{
1187 +- struct dentry *cpu_dentry = NULL;
1188 +- struct cpuinfo_x86 *cpui;
1189 +- char cpu_dir[12];
1190 +- unsigned cpu;
1191 +- int err = 0;
1192 +-
1193 +- for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
1194 +- cpui = &cpu_data(cpu);
1195 +- if (!cpu_has(cpui, X86_FEATURE_MSR))
1196 +- continue;
1197 +-
1198 +- sprintf(cpu_dir, "cpu%d", cpu);
1199 +- cpu_dentry = debugfs_create_dir(cpu_dir, cpu_debugfs_dir);
1200 +- err = cpu_init_allreg(cpu, cpu_dentry);
1201 +-
1202 +- pr_info("cpu%d(%d) debug files %d\n",
1203 +- cpu, nr_cpu_ids, per_cpu(cpu_priv_count, cpu));
1204 +- if (per_cpu(cpu_priv_count, cpu) > MAX_CPU_FILES) {
1205 +- pr_err("Register files count %d exceeds limit %d\n",
1206 +- per_cpu(cpu_priv_count, cpu), MAX_CPU_FILES);
1207 +- per_cpu(cpu_priv_count, cpu) = MAX_CPU_FILES;
1208 +- err = -ENFILE;
1209 +- }
1210 +- if (err)
1211 +- return err;
1212 +- }
1213 +-
1214 +- return err;
1215 +-}
1216 +-
1217 +-static int __init cpu_debug_init(void)
1218 +-{
1219 +- cpu_debugfs_dir = debugfs_create_dir("cpu", arch_debugfs_dir);
1220 +-
1221 +- return cpu_init_cpu();
1222 +-}
1223 +-
1224 +-static void __exit cpu_debug_exit(void)
1225 +-{
1226 +- int i, cpu;
1227 +-
1228 +- if (cpu_debugfs_dir)
1229 +- debugfs_remove_recursive(cpu_debugfs_dir);
1230 +-
1231 +- for (cpu = 0; cpu < nr_cpu_ids; cpu++)
1232 +- for (i = 0; i < per_cpu(cpu_priv_count, cpu); i++)
1233 +- kfree(per_cpu(priv_arr[i], cpu));
1234 +-}
1235 +-
1236 +-module_init(cpu_debug_init);
1237 +-module_exit(cpu_debug_exit);
1238 +-
1239 +-MODULE_AUTHOR("Jaswinder Singh Rajput");
1240 +-MODULE_DESCRIPTION("CPU Debug module");
1241 +-MODULE_LICENSE("GPL");
1242 +diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c
1243 +index dedc2bd..5877873 100644
1244 +--- a/arch/x86/kernel/hpet.c
1245 ++++ b/arch/x86/kernel/hpet.c
1246 +@@ -33,6 +33,8 @@
1247 + * HPET address is set in acpi/boot.c, when an ACPI entry exists
1248 + */
1249 + unsigned long hpet_address;
1250 ++u8 hpet_msi_disable;
1251 ++
1252 + #ifdef CONFIG_PCI_MSI
1253 + static unsigned long hpet_num_timers;
1254 + #endif
1255 +@@ -584,6 +586,9 @@ static void hpet_msi_capability_lookup(unsigned int start_timer)
1256 + unsigned int num_timers_used = 0;
1257 + int i;
1258 +
1259 ++ if (hpet_msi_disable)
1260 ++ return;
1261 ++
1262 + id = hpet_readl(HPET_ID);
1263 +
1264 + num_timers = ((id & HPET_ID_NUMBER) >> HPET_ID_NUMBER_SHIFT);
1265 +@@ -911,6 +916,9 @@ static __init int hpet_late_init(void)
1266 + hpet_reserve_platform_timers(hpet_readl(HPET_ID));
1267 + hpet_print_config();
1268 +
1269 ++ if (hpet_msi_disable)
1270 ++ return 0;
1271 ++
1272 + for_each_online_cpu(cpu) {
1273 + hpet_cpuhp_notify(NULL, CPU_ONLINE, (void *)(long)cpu);
1274 + }
1275 +diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
1276 +index 5284cd2..f010ab4 100644
1277 +--- a/arch/x86/kernel/process.c
1278 ++++ b/arch/x86/kernel/process.c
1279 +@@ -91,18 +91,6 @@ void flush_thread(void)
1280 + {
1281 + struct task_struct *tsk = current;
1282 +
1283 +-#ifdef CONFIG_X86_64
1284 +- if (test_tsk_thread_flag(tsk, TIF_ABI_PENDING)) {
1285 +- clear_tsk_thread_flag(tsk, TIF_ABI_PENDING);
1286 +- if (test_tsk_thread_flag(tsk, TIF_IA32)) {
1287 +- clear_tsk_thread_flag(tsk, TIF_IA32);
1288 +- } else {
1289 +- set_tsk_thread_flag(tsk, TIF_IA32);
1290 +- current_thread_info()->status |= TS_COMPAT;
1291 +- }
1292 +- }
1293 +-#endif
1294 +-
1295 + clear_tsk_thread_flag(tsk, TIF_DEBUG);
1296 +
1297 + tsk->thread.debugreg0 = 0;
1298 +diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
1299 +index eb62cbc..f9ce04f 100644
1300 +--- a/arch/x86/kernel/process_64.c
1301 ++++ b/arch/x86/kernel/process_64.c
1302 +@@ -540,6 +540,17 @@ sys_clone(unsigned long clone_flags, unsigned long newsp,
1303 + return do_fork(clone_flags, newsp, regs, 0, parent_tid, child_tid);
1304 + }
1305 +
1306 ++void set_personality_ia32(void)
1307 ++{
1308 ++ /* inherit personality from parent */
1309 ++
1310 ++ /* Make sure to be in 32bit mode */
1311 ++ set_thread_flag(TIF_IA32);
1312 ++
1313 ++ /* Prepare the first "return" to user space */
1314 ++ current_thread_info()->status |= TS_COMPAT;
1315 ++}
1316 ++
1317 + unsigned long get_wchan(struct task_struct *p)
1318 + {
1319 + unsigned long stack;
1320 +diff --git a/arch/x86/kernel/quirks.c b/arch/x86/kernel/quirks.c
1321 +index 6c3b2c6..0040164 100644
1322 +--- a/arch/x86/kernel/quirks.c
1323 ++++ b/arch/x86/kernel/quirks.c
1324 +@@ -491,6 +491,19 @@ void force_hpet_resume(void)
1325 + break;
1326 + }
1327 + }
1328 ++
1329 ++/*
1330 ++ * HPET MSI on some boards (ATI SB700/SB800) has side effect on
1331 ++ * floppy DMA. Disable HPET MSI on such platforms.
1332 ++ */
1333 ++static void force_disable_hpet_msi(struct pci_dev *unused)
1334 ++{
1335 ++ hpet_msi_disable = 1;
1336 ++}
1337 ++
1338 ++DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_SBX00_SMBUS,
1339 ++ force_disable_hpet_msi);
1340 ++
1341 + #endif
1342 +
1343 + #if defined(CONFIG_PCI) && defined(CONFIG_NUMA)
1344 +diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
1345 +index 6caf260..bff34d6 100644
1346 +--- a/arch/x86/kernel/reboot.c
1347 ++++ b/arch/x86/kernel/reboot.c
1348 +@@ -203,6 +203,15 @@ static struct dmi_system_id __initdata reboot_dmi_table[] = {
1349 + DMI_MATCH(DMI_BOARD_NAME, "0T656F"),
1350 + },
1351 + },
1352 ++ { /* Handle problems with rebooting on Dell OptiPlex 760 with 0G919G*/
1353 ++ .callback = set_bios_reboot,
1354 ++ .ident = "Dell OptiPlex 760",
1355 ++ .matches = {
1356 ++ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
1357 ++ DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex 760"),
1358 ++ DMI_MATCH(DMI_BOARD_NAME, "0G919G"),
1359 ++ },
1360 ++ },
1361 + { /* Handle problems with rebooting on Dell 2400's */
1362 + .callback = set_bios_reboot,
1363 + .ident = "Dell PowerEdge 2400",
1364 +diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
1365 +index 51aa5b2..8425f7e 100644
1366 +--- a/arch/x86/kernel/setup.c
1367 ++++ b/arch/x86/kernel/setup.c
1368 +@@ -667,19 +667,27 @@ static struct dmi_system_id __initdata bad_bios_dmi_table[] = {
1369 + DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix/MSC"),
1370 + },
1371 + },
1372 +- {
1373 + /*
1374 +- * AMI BIOS with low memory corruption was found on Intel DG45ID board.
1375 +- * It hase different DMI_BIOS_VENDOR = "Intel Corp.", for now we will
1376 ++ * AMI BIOS with low memory corruption was found on Intel DG45ID and
1377 ++ * DG45FC boards.
1378 ++ * It has a different DMI_BIOS_VENDOR = "Intel Corp.", for now we will
1379 + * match only DMI_BOARD_NAME and see if there is more bad products
1380 + * with this vendor.
1381 + */
1382 ++ {
1383 + .callback = dmi_low_memory_corruption,
1384 + .ident = "AMI BIOS",
1385 + .matches = {
1386 + DMI_MATCH(DMI_BOARD_NAME, "DG45ID"),
1387 + },
1388 + },
1389 ++ {
1390 ++ .callback = dmi_low_memory_corruption,
1391 ++ .ident = "AMI BIOS",
1392 ++ .matches = {
1393 ++ DMI_MATCH(DMI_BOARD_NAME, "DG45FC"),
1394 ++ },
1395 ++ },
1396 + #endif
1397 + {}
1398 + };
1399 +diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
1400 +index 6378e07..145741c 100644
1401 +--- a/arch/x86/kvm/x86.c
1402 ++++ b/arch/x86/kvm/x86.c
1403 +@@ -680,7 +680,8 @@ static void kvm_write_guest_time(struct kvm_vcpu *v)
1404 + /* With all the info we got, fill in the values */
1405 +
1406 + vcpu->hv_clock.system_time = ts.tv_nsec +
1407 +- (NSEC_PER_SEC * (u64)ts.tv_sec);
1408 ++ (NSEC_PER_SEC * (u64)ts.tv_sec) + v->kvm->arch.kvmclock_offset;
1409 ++
1410 + /*
1411 + * The interface expects us to write an even number signaling that the
1412 + * update is finished. Since the guest won't see the intermediate
1413 +@@ -1227,6 +1228,7 @@ int kvm_dev_ioctl_check_extension(long ext)
1414 + case KVM_CAP_PIT2:
1415 + case KVM_CAP_PIT_STATE2:
1416 + case KVM_CAP_SET_IDENTITY_MAP_ADDR:
1417 ++ case KVM_CAP_ADJUST_CLOCK:
1418 + r = 1;
1419 + break;
1420 + case KVM_CAP_COALESCED_MMIO:
1421 +@@ -2424,6 +2426,44 @@ long kvm_arch_vm_ioctl(struct file *filp,
1422 + r = 0;
1423 + break;
1424 + }
1425 ++ case KVM_SET_CLOCK: {
1426 ++ struct timespec now;
1427 ++ struct kvm_clock_data user_ns;
1428 ++ u64 now_ns;
1429 ++ s64 delta;
1430 ++
1431 ++ r = -EFAULT;
1432 ++ if (copy_from_user(&user_ns, argp, sizeof(user_ns)))
1433 ++ goto out;
1434 ++
1435 ++ r = -EINVAL;
1436 ++ if (user_ns.flags)
1437 ++ goto out;
1438 ++
1439 ++ r = 0;
1440 ++ ktime_get_ts(&now);
1441 ++ now_ns = timespec_to_ns(&now);
1442 ++ delta = user_ns.clock - now_ns;
1443 ++ kvm->arch.kvmclock_offset = delta;
1444 ++ break;
1445 ++ }
1446 ++ case KVM_GET_CLOCK: {
1447 ++ struct timespec now;
1448 ++ struct kvm_clock_data user_ns;
1449 ++ u64 now_ns;
1450 ++
1451 ++ ktime_get_ts(&now);
1452 ++ now_ns = timespec_to_ns(&now);
1453 ++ user_ns.clock = kvm->arch.kvmclock_offset + now_ns;
1454 ++ user_ns.flags = 0;
1455 ++
1456 ++ r = -EFAULT;
1457 ++ if (copy_to_user(argp, &user_ns, sizeof(user_ns)))
1458 ++ goto out;
1459 ++ r = 0;
1460 ++ break;
1461 ++ }
1462 ++
1463 + default:
1464 + ;
1465 + }
1466 +diff --git a/arch/x86/mm/srat_64.c b/arch/x86/mm/srat_64.c
1467 +index dbb5381..3871c60 100644
1468 +--- a/arch/x86/mm/srat_64.c
1469 ++++ b/arch/x86/mm/srat_64.c
1470 +@@ -229,9 +229,11 @@ update_nodes_add(int node, unsigned long start, unsigned long end)
1471 + printk(KERN_ERR "SRAT: Hotplug zone not continuous. Partly ignored\n");
1472 + }
1473 +
1474 +- if (changed)
1475 ++ if (changed) {
1476 ++ node_set(node, cpu_nodes_parsed);
1477 + printk(KERN_INFO "SRAT: hot plug zone found %Lx - %Lx\n",
1478 + nd->start, nd->end);
1479 ++ }
1480 + }
1481 +
1482 + /* Callback for parsing of the Proximity Domain <-> Memory Area mappings */
1483 +diff --git a/arch/x86/oprofile/nmi_int.c b/arch/x86/oprofile/nmi_int.c
1484 +index cb88b1a..3347f69 100644
1485 +--- a/arch/x86/oprofile/nmi_int.c
1486 ++++ b/arch/x86/oprofile/nmi_int.c
1487 +@@ -222,7 +222,7 @@ static void nmi_cpu_switch(void *dummy)
1488 +
1489 + /* move to next set */
1490 + si += model->num_counters;
1491 +- if ((si > model->num_virt_counters) || (counter_config[si].count == 0))
1492 ++ if ((si >= model->num_virt_counters) || (counter_config[si].count == 0))
1493 + per_cpu(switch_index, cpu) = 0;
1494 + else
1495 + per_cpu(switch_index, cpu) = si;
1496 +@@ -598,6 +598,7 @@ static int __init ppro_init(char **cpu_type)
1497 + case 15: case 23:
1498 + *cpu_type = "i386/core_2";
1499 + break;
1500 ++ case 0x2e:
1501 + case 26:
1502 + spec = &op_arch_perfmon_spec;
1503 + *cpu_type = "i386/core_i7";
1504 +diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
1505 +index 7411915..49f6ede 100644
1506 +--- a/drivers/acpi/bus.c
1507 ++++ b/drivers/acpi/bus.c
1508 +@@ -344,6 +344,167 @@ bool acpi_bus_can_wakeup(acpi_handle handle)
1509 +
1510 + EXPORT_SYMBOL(acpi_bus_can_wakeup);
1511 +
1512 ++static void acpi_print_osc_error(acpi_handle handle,
1513 ++ struct acpi_osc_context *context, char *error)
1514 ++{
1515 ++ struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER};
1516 ++ int i;
1517 ++
1518 ++ if (ACPI_FAILURE(acpi_get_name(handle, ACPI_FULL_PATHNAME, &buffer)))
1519 ++ printk(KERN_DEBUG "%s\n", error);
1520 ++ else {
1521 ++ printk(KERN_DEBUG "%s:%s\n", (char *)buffer.pointer, error);
1522 ++ kfree(buffer.pointer);
1523 ++ }
1524 ++ printk(KERN_DEBUG"_OSC request data:");
1525 ++ for (i = 0; i < context->cap.length; i += sizeof(u32))
1526 ++ printk("%x ", *((u32 *)(context->cap.pointer + i)));
1527 ++ printk("\n");
1528 ++}
1529 ++
1530 ++static u8 hex_val(unsigned char c)
1531 ++{
1532 ++ return isdigit(c) ? c - '0' : toupper(c) - 'A' + 10;
1533 ++}
1534 ++
1535 ++static acpi_status acpi_str_to_uuid(char *str, u8 *uuid)
1536 ++{
1537 ++ int i;
1538 ++ static int opc_map_to_uuid[16] = {6, 4, 2, 0, 11, 9, 16, 14, 19, 21,
1539 ++ 24, 26, 28, 30, 32, 34};
1540 ++
1541 ++ if (strlen(str) != 36)
1542 ++ return AE_BAD_PARAMETER;
1543 ++ for (i = 0; i < 36; i++) {
1544 ++ if (i == 8 || i == 13 || i == 18 || i == 23) {
1545 ++ if (str[i] != '-')
1546 ++ return AE_BAD_PARAMETER;
1547 ++ } else if (!isxdigit(str[i]))
1548 ++ return AE_BAD_PARAMETER;
1549 ++ }
1550 ++ for (i = 0; i < 16; i++) {
1551 ++ uuid[i] = hex_val(str[opc_map_to_uuid[i]]) << 4;
1552 ++ uuid[i] |= hex_val(str[opc_map_to_uuid[i] + 1]);
1553 ++ }
1554 ++ return AE_OK;
1555 ++}
1556 ++
1557 ++acpi_status acpi_run_osc(acpi_handle handle, struct acpi_osc_context *context)
1558 ++{
1559 ++ acpi_status status;
1560 ++ struct acpi_object_list input;
1561 ++ union acpi_object in_params[4];
1562 ++ union acpi_object *out_obj;
1563 ++ u8 uuid[16];
1564 ++ u32 errors;
1565 ++ struct acpi_buffer output = {ACPI_ALLOCATE_BUFFER, NULL};
1566 ++
1567 ++ if (!context)
1568 ++ return AE_ERROR;
1569 ++ if (ACPI_FAILURE(acpi_str_to_uuid(context->uuid_str, uuid)))
1570 ++ return AE_ERROR;
1571 ++ context->ret.length = ACPI_ALLOCATE_BUFFER;
1572 ++ context->ret.pointer = NULL;
1573 ++
1574 ++ /* Setting up input parameters */
1575 ++ input.count = 4;
1576 ++ input.pointer = in_params;
1577 ++ in_params[0].type = ACPI_TYPE_BUFFER;
1578 ++ in_params[0].buffer.length = 16;
1579 ++ in_params[0].buffer.pointer = uuid;
1580 ++ in_params[1].type = ACPI_TYPE_INTEGER;
1581 ++ in_params[1].integer.value = context->rev;
1582 ++ in_params[2].type = ACPI_TYPE_INTEGER;
1583 ++ in_params[2].integer.value = context->cap.length/sizeof(u32);
1584 ++ in_params[3].type = ACPI_TYPE_BUFFER;
1585 ++ in_params[3].buffer.length = context->cap.length;
1586 ++ in_params[3].buffer.pointer = context->cap.pointer;
1587 ++
1588 ++ status = acpi_evaluate_object(handle, "_OSC", &input, &output);
1589 ++ if (ACPI_FAILURE(status))
1590 ++ return status;
1591 ++
1592 ++ if (!output.length)
1593 ++ return AE_NULL_OBJECT;
1594 ++
1595 ++ out_obj = output.pointer;
1596 ++ if (out_obj->type != ACPI_TYPE_BUFFER
1597 ++ || out_obj->buffer.length != context->cap.length) {
1598 ++ acpi_print_osc_error(handle, context,
1599 ++ "_OSC evaluation returned wrong type");
1600 ++ status = AE_TYPE;
1601 ++ goto out_kfree;
1602 ++ }
1603 ++ /* Need to ignore the bit0 in result code */
1604 ++ errors = *((u32 *)out_obj->buffer.pointer) & ~(1 << 0);
1605 ++ if (errors) {
1606 ++ if (errors & OSC_REQUEST_ERROR)
1607 ++ acpi_print_osc_error(handle, context,
1608 ++ "_OSC request failed");
1609 ++ if (errors & OSC_INVALID_UUID_ERROR)
1610 ++ acpi_print_osc_error(handle, context,
1611 ++ "_OSC invalid UUID");
1612 ++ if (errors & OSC_INVALID_REVISION_ERROR)
1613 ++ acpi_print_osc_error(handle, context,
1614 ++ "_OSC invalid revision");
1615 ++ if (errors & OSC_CAPABILITIES_MASK_ERROR) {
1616 ++ if (((u32 *)context->cap.pointer)[OSC_QUERY_TYPE]
1617 ++ & OSC_QUERY_ENABLE)
1618 ++ goto out_success;
1619 ++ status = AE_SUPPORT;
1620 ++ goto out_kfree;
1621 ++ }
1622 ++ status = AE_ERROR;
1623 ++ goto out_kfree;
1624 ++ }
1625 ++out_success:
1626 ++ context->ret.length = out_obj->buffer.length;
1627 ++ context->ret.pointer = kmalloc(context->ret.length, GFP_KERNEL);
1628 ++ if (!context->ret.pointer) {
1629 ++ status = AE_NO_MEMORY;
1630 ++ goto out_kfree;
1631 ++ }
1632 ++ memcpy(context->ret.pointer, out_obj->buffer.pointer,
1633 ++ context->ret.length);
1634 ++ status = AE_OK;
1635 ++
1636 ++out_kfree:
1637 ++ kfree(output.pointer);
1638 ++ if (status != AE_OK)
1639 ++ context->ret.pointer = NULL;
1640 ++ return status;
1641 ++}
1642 ++EXPORT_SYMBOL(acpi_run_osc);
1643 ++
1644 ++static u8 sb_uuid_str[] = "0811B06E-4A27-44F9-8D60-3CBBC22E7B48";
1645 ++static void acpi_bus_osc_support(void)
1646 ++{
1647 ++ u32 capbuf[2];
1648 ++ struct acpi_osc_context context = {
1649 ++ .uuid_str = sb_uuid_str,
1650 ++ .rev = 1,
1651 ++ .cap.length = 8,
1652 ++ .cap.pointer = capbuf,
1653 ++ };
1654 ++ acpi_handle handle;
1655 ++
1656 ++ capbuf[OSC_QUERY_TYPE] = OSC_QUERY_ENABLE;
1657 ++ capbuf[OSC_SUPPORT_TYPE] = OSC_SB_PR3_SUPPORT; /* _PR3 is in use */
1658 ++#if defined(CONFIG_ACPI_PROCESSOR_AGGREGATOR) ||\
1659 ++ defined(CONFIG_ACPI_PROCESSOR_AGGREGATOR_MODULE)
1660 ++ capbuf[OSC_SUPPORT_TYPE] |= OSC_SB_PAD_SUPPORT;
1661 ++#endif
1662 ++
1663 ++#if defined(CONFIG_ACPI_PROCESSOR) || defined(CONFIG_ACPI_PROCESSOR_MODULE)
1664 ++ capbuf[OSC_SUPPORT_TYPE] |= OSC_SB_PPC_OST_SUPPORT;
1665 ++#endif
1666 ++ if (ACPI_FAILURE(acpi_get_handle(NULL, "\\_SB", &handle)))
1667 ++ return;
1668 ++ if (ACPI_SUCCESS(acpi_run_osc(handle, &context)))
1669 ++ kfree(context.ret.pointer);
1670 ++ /* do we need to check the returned cap? Sounds no */
1671 ++}
1672 ++
1673 + /* --------------------------------------------------------------------------
1674 + Event Management
1675 + -------------------------------------------------------------------------- */
1676 +@@ -734,6 +895,8 @@ static int __init acpi_bus_init(void)
1677 + status = acpi_ec_ecdt_probe();
1678 + /* Ignore result. Not having an ECDT is not fatal. */
1679 +
1680 ++ acpi_bus_osc_support();
1681 ++
1682 + status = acpi_initialize_objects(ACPI_FULL_INITIALIZATION);
1683 + if (ACPI_FAILURE(status)) {
1684 + printk(KERN_ERR PREFIX "Unable to initialize ACPI objects\n");
1685 +diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
1686 +index dc72690..91fed3c 100644
1687 +--- a/drivers/ata/libata-core.c
1688 ++++ b/drivers/ata/libata-core.c
1689 +@@ -3790,21 +3790,45 @@ int sata_link_debounce(struct ata_link *link, const unsigned long *params,
1690 + int sata_link_resume(struct ata_link *link, const unsigned long *params,
1691 + unsigned long deadline)
1692 + {
1693 ++ int tries = ATA_LINK_RESUME_TRIES;
1694 + u32 scontrol, serror;
1695 + int rc;
1696 +
1697 + if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
1698 + return rc;
1699 +
1700 +- scontrol = (scontrol & 0x0f0) | 0x300;
1701 ++ /*
1702 ++ * Writes to SControl sometimes get ignored under certain
1703 ++ * controllers (ata_piix SIDPR). Make sure DET actually is
1704 ++ * cleared.
1705 ++ */
1706 ++ do {
1707 ++ scontrol = (scontrol & 0x0f0) | 0x300;
1708 ++ if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
1709 ++ return rc;
1710 ++ /*
1711 ++ * Some PHYs react badly if SStatus is pounded
1712 ++ * immediately after resuming. Delay 200ms before
1713 ++ * debouncing.
1714 ++ */
1715 ++ msleep(200);
1716 +
1717 +- if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
1718 +- return rc;
1719 ++ /* is SControl restored correctly? */
1720 ++ if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
1721 ++ return rc;
1722 ++ } while ((scontrol & 0xf0f) != 0x300 && --tries);
1723 +
1724 +- /* Some PHYs react badly if SStatus is pounded immediately
1725 +- * after resuming. Delay 200ms before debouncing.
1726 +- */
1727 +- msleep(200);
1728 ++ if ((scontrol & 0xf0f) != 0x300) {
1729 ++ ata_link_printk(link, KERN_ERR,
1730 ++ "failed to resume link (SControl %X)\n",
1731 ++ scontrol);
1732 ++ return 0;
1733 ++ }
1734 ++
1735 ++ if (tries < ATA_LINK_RESUME_TRIES)
1736 ++ ata_link_printk(link, KERN_WARNING,
1737 ++ "link resume succeeded after %d retries\n",
1738 ++ ATA_LINK_RESUME_TRIES - tries);
1739 +
1740 + if ((rc = sata_link_debounce(link, params, deadline)))
1741 + return rc;
1742 +diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
1743 +index bba2ae5..7d8d3c3 100644
1744 +--- a/drivers/ata/libata-eh.c
1745 ++++ b/drivers/ata/libata-eh.c
1746 +@@ -2019,8 +2019,9 @@ static void ata_eh_link_autopsy(struct ata_link *link)
1747 + qc->err_mask &= ~(AC_ERR_DEV | AC_ERR_OTHER);
1748 +
1749 + /* determine whether the command is worth retrying */
1750 +- if (!(qc->err_mask & AC_ERR_INVALID) &&
1751 +- ((qc->flags & ATA_QCFLAG_IO) || qc->err_mask != AC_ERR_DEV))
1752 ++ if (qc->flags & ATA_QCFLAG_IO ||
1753 ++ (!(qc->err_mask & AC_ERR_INVALID) &&
1754 ++ qc->err_mask != AC_ERR_DEV))
1755 + qc->flags |= ATA_QCFLAG_RETRY;
1756 +
1757 + /* accumulate error info */
1758 +diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
1759 +index 2ddf03a..68b5957 100644
1760 +--- a/drivers/block/pktcdvd.c
1761 ++++ b/drivers/block/pktcdvd.c
1762 +@@ -322,7 +322,7 @@ static void pkt_sysfs_dev_remove(struct pktcdvd_device *pd)
1763 + pkt_kobj_remove(pd->kobj_stat);
1764 + pkt_kobj_remove(pd->kobj_wqueue);
1765 + if (class_pktcdvd)
1766 +- device_destroy(class_pktcdvd, pd->pkt_dev);
1767 ++ device_unregister(pd->dev);
1768 + }
1769 +
1770 +
1771 +diff --git a/drivers/char/random.c b/drivers/char/random.c
1772 +index 04b505e..908ac1f 100644
1773 +--- a/drivers/char/random.c
1774 ++++ b/drivers/char/random.c
1775 +@@ -1051,12 +1051,6 @@ random_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos)
1776 + /* like a named pipe */
1777 + }
1778 +
1779 +- /*
1780 +- * If we gave the user some bytes, update the access time.
1781 +- */
1782 +- if (count)
1783 +- file_accessed(file);
1784 +-
1785 + return (count ? count : retval);
1786 + }
1787 +
1788 +@@ -1107,7 +1101,6 @@ static ssize_t random_write(struct file *file, const char __user *buffer,
1789 + size_t count, loff_t *ppos)
1790 + {
1791 + size_t ret;
1792 +- struct inode *inode = file->f_path.dentry->d_inode;
1793 +
1794 + ret = write_pool(&blocking_pool, buffer, count);
1795 + if (ret)
1796 +@@ -1116,8 +1109,6 @@ static ssize_t random_write(struct file *file, const char __user *buffer,
1797 + if (ret)
1798 + return ret;
1799 +
1800 +- inode->i_mtime = current_fs_time(inode->i_sb);
1801 +- mark_inode_dirty(inode);
1802 + return (ssize_t)count;
1803 + }
1804 +
1805 +diff --git a/drivers/connector/connector.c b/drivers/connector/connector.c
1806 +index f060246..537c29a 100644
1807 +--- a/drivers/connector/connector.c
1808 ++++ b/drivers/connector/connector.c
1809 +@@ -36,17 +36,6 @@ MODULE_LICENSE("GPL");
1810 + MODULE_AUTHOR("Evgeniy Polyakov <zbr@×××××××.net>");
1811 + MODULE_DESCRIPTION("Generic userspace <-> kernelspace connector.");
1812 +
1813 +-static u32 cn_idx = CN_IDX_CONNECTOR;
1814 +-static u32 cn_val = CN_VAL_CONNECTOR;
1815 +-
1816 +-module_param(cn_idx, uint, 0);
1817 +-module_param(cn_val, uint, 0);
1818 +-MODULE_PARM_DESC(cn_idx, "Connector's main device idx.");
1819 +-MODULE_PARM_DESC(cn_val, "Connector's main device val.");
1820 +-
1821 +-static DEFINE_MUTEX(notify_lock);
1822 +-static LIST_HEAD(notify_list);
1823 +-
1824 + static struct cn_dev cdev;
1825 +
1826 + static int cn_already_initialized;
1827 +@@ -210,54 +199,6 @@ static void cn_rx_skb(struct sk_buff *__skb)
1828 + }
1829 +
1830 + /*
1831 +- * Notification routing.
1832 +- *
1833 +- * Gets id and checks if there are notification request for it's idx
1834 +- * and val. If there are such requests notify the listeners with the
1835 +- * given notify event.
1836 +- *
1837 +- */
1838 +-static void cn_notify(struct cb_id *id, u32 notify_event)
1839 +-{
1840 +- struct cn_ctl_entry *ent;
1841 +-
1842 +- mutex_lock(&notify_lock);
1843 +- list_for_each_entry(ent, &notify_list, notify_entry) {
1844 +- int i;
1845 +- struct cn_notify_req *req;
1846 +- struct cn_ctl_msg *ctl = ent->msg;
1847 +- int idx_found, val_found;
1848 +-
1849 +- idx_found = val_found = 0;
1850 +-
1851 +- req = (struct cn_notify_req *)ctl->data;
1852 +- for (i = 0; i < ctl->idx_notify_num; ++i, ++req) {
1853 +- if (id->idx >= req->first &&
1854 +- id->idx < req->first + req->range) {
1855 +- idx_found = 1;
1856 +- break;
1857 +- }
1858 +- }
1859 +-
1860 +- for (i = 0; i < ctl->val_notify_num; ++i, ++req) {
1861 +- if (id->val >= req->first &&
1862 +- id->val < req->first + req->range) {
1863 +- val_found = 1;
1864 +- break;
1865 +- }
1866 +- }
1867 +-
1868 +- if (idx_found && val_found) {
1869 +- struct cn_msg m = { .ack = notify_event, };
1870 +-
1871 +- memcpy(&m.id, id, sizeof(m.id));
1872 +- cn_netlink_send(&m, ctl->group, GFP_KERNEL);
1873 +- }
1874 +- }
1875 +- mutex_unlock(&notify_lock);
1876 +-}
1877 +-
1878 +-/*
1879 + * Callback add routing - adds callback with given ID and name.
1880 + * If there is registered callback with the same ID it will not be added.
1881 + *
1882 +@@ -276,8 +217,6 @@ int cn_add_callback(struct cb_id *id, char *name,
1883 + if (err)
1884 + return err;
1885 +
1886 +- cn_notify(id, 0);
1887 +-
1888 + return 0;
1889 + }
1890 + EXPORT_SYMBOL_GPL(cn_add_callback);
1891 +@@ -295,111 +234,9 @@ void cn_del_callback(struct cb_id *id)
1892 + struct cn_dev *dev = &cdev;
1893 +
1894 + cn_queue_del_callback(dev->cbdev, id);
1895 +- cn_notify(id, 1);
1896 + }
1897 + EXPORT_SYMBOL_GPL(cn_del_callback);
1898 +
1899 +-/*
1900 +- * Checks two connector's control messages to be the same.
1901 +- * Returns 1 if they are the same or if the first one is corrupted.
1902 +- */
1903 +-static int cn_ctl_msg_equals(struct cn_ctl_msg *m1, struct cn_ctl_msg *m2)
1904 +-{
1905 +- int i;
1906 +- struct cn_notify_req *req1, *req2;
1907 +-
1908 +- if (m1->idx_notify_num != m2->idx_notify_num)
1909 +- return 0;
1910 +-
1911 +- if (m1->val_notify_num != m2->val_notify_num)
1912 +- return 0;
1913 +-
1914 +- if (m1->len != m2->len)
1915 +- return 0;
1916 +-
1917 +- if ((m1->idx_notify_num + m1->val_notify_num) * sizeof(*req1) !=
1918 +- m1->len)
1919 +- return 1;
1920 +-
1921 +- req1 = (struct cn_notify_req *)m1->data;
1922 +- req2 = (struct cn_notify_req *)m2->data;
1923 +-
1924 +- for (i = 0; i < m1->idx_notify_num; ++i) {
1925 +- if (req1->first != req2->first || req1->range != req2->range)
1926 +- return 0;
1927 +- req1++;
1928 +- req2++;
1929 +- }
1930 +-
1931 +- for (i = 0; i < m1->val_notify_num; ++i) {
1932 +- if (req1->first != req2->first || req1->range != req2->range)
1933 +- return 0;
1934 +- req1++;
1935 +- req2++;
1936 +- }
1937 +-
1938 +- return 1;
1939 +-}
1940 +-
1941 +-/*
1942 +- * Main connector device's callback.
1943 +- *
1944 +- * Used for notification of a request's processing.
1945 +- */
1946 +-static void cn_callback(struct cn_msg *msg, struct netlink_skb_parms *nsp)
1947 +-{
1948 +- struct cn_ctl_msg *ctl;
1949 +- struct cn_ctl_entry *ent;
1950 +- u32 size;
1951 +-
1952 +- if (msg->len < sizeof(*ctl))
1953 +- return;
1954 +-
1955 +- ctl = (struct cn_ctl_msg *)msg->data;
1956 +-
1957 +- size = (sizeof(*ctl) + ((ctl->idx_notify_num +
1958 +- ctl->val_notify_num) *
1959 +- sizeof(struct cn_notify_req)));
1960 +-
1961 +- if (msg->len != size)
1962 +- return;
1963 +-
1964 +- if (ctl->len + sizeof(*ctl) != msg->len)
1965 +- return;
1966 +-
1967 +- /*
1968 +- * Remove notification.
1969 +- */
1970 +- if (ctl->group == 0) {
1971 +- struct cn_ctl_entry *n;
1972 +-
1973 +- mutex_lock(&notify_lock);
1974 +- list_for_each_entry_safe(ent, n, &notify_list, notify_entry) {
1975 +- if (cn_ctl_msg_equals(ent->msg, ctl)) {
1976 +- list_del(&ent->notify_entry);
1977 +- kfree(ent);
1978 +- }
1979 +- }
1980 +- mutex_unlock(&notify_lock);
1981 +-
1982 +- return;
1983 +- }
1984 +-
1985 +- size += sizeof(*ent);
1986 +-
1987 +- ent = kzalloc(size, GFP_KERNEL);
1988 +- if (!ent)
1989 +- return;
1990 +-
1991 +- ent->msg = (struct cn_ctl_msg *)(ent + 1);
1992 +-
1993 +- memcpy(ent->msg, ctl, size - sizeof(*ent));
1994 +-
1995 +- mutex_lock(&notify_lock);
1996 +- list_add(&ent->notify_entry, &notify_list);
1997 +- mutex_unlock(&notify_lock);
1998 +-}
1999 +-
2000 + static int cn_proc_show(struct seq_file *m, void *v)
2001 + {
2002 + struct cn_queue_dev *dev = cdev.cbdev;
2003 +@@ -437,11 +274,8 @@ static const struct file_operations cn_file_ops = {
2004 + static int __devinit cn_init(void)
2005 + {
2006 + struct cn_dev *dev = &cdev;
2007 +- int err;
2008 +
2009 + dev->input = cn_rx_skb;
2010 +- dev->id.idx = cn_idx;
2011 +- dev->id.val = cn_val;
2012 +
2013 + dev->nls = netlink_kernel_create(&init_net, NETLINK_CONNECTOR,
2014 + CN_NETLINK_USERS + 0xf,
2015 +@@ -457,14 +291,6 @@ static int __devinit cn_init(void)
2016 +
2017 + cn_already_initialized = 1;
2018 +
2019 +- err = cn_add_callback(&dev->id, "connector", &cn_callback);
2020 +- if (err) {
2021 +- cn_already_initialized = 0;
2022 +- cn_queue_free_dev(dev->cbdev);
2023 +- netlink_kernel_release(dev->nls);
2024 +- return -EINVAL;
2025 +- }
2026 +-
2027 + proc_net_fops_create(&init_net, "connector", S_IRUGO, &cn_file_ops);
2028 +
2029 + return 0;
2030 +@@ -478,7 +304,6 @@ static void __devexit cn_fini(void)
2031 +
2032 + proc_net_remove(&init_net, "connector");
2033 +
2034 +- cn_del_callback(&dev->id);
2035 + cn_queue_free_dev(dev->cbdev);
2036 + netlink_kernel_release(dev->nls);
2037 + }
2038 +diff --git a/drivers/firewire/core-card.c b/drivers/firewire/core-card.c
2039 +index e4864e8..ed635ae 100644
2040 +--- a/drivers/firewire/core-card.c
2041 ++++ b/drivers/firewire/core-card.c
2042 +@@ -57,6 +57,9 @@ static LIST_HEAD(card_list);
2043 + static LIST_HEAD(descriptor_list);
2044 + static int descriptor_count;
2045 +
2046 ++/* ROM header, bus info block, root dir header, capabilities = 7 quadlets */
2047 ++static size_t config_rom_length = 1 + 4 + 1 + 1;
2048 ++
2049 + #define BIB_CRC(v) ((v) << 0)
2050 + #define BIB_CRC_LENGTH(v) ((v) << 16)
2051 + #define BIB_INFO_LENGTH(v) ((v) << 24)
2052 +@@ -72,7 +75,7 @@ static int descriptor_count;
2053 + #define BIB_CMC ((1) << 30)
2054 + #define BIB_IMC ((1) << 31)
2055 +
2056 +-static u32 *generate_config_rom(struct fw_card *card, size_t *config_rom_length)
2057 ++static u32 *generate_config_rom(struct fw_card *card)
2058 + {
2059 + struct fw_descriptor *desc;
2060 + static u32 config_rom[256];
2061 +@@ -131,7 +134,7 @@ static u32 *generate_config_rom(struct fw_card *card, size_t *config_rom_length)
2062 + for (i = 0; i < j; i += length + 1)
2063 + length = fw_compute_block_crc(config_rom + i);
2064 +
2065 +- *config_rom_length = j;
2066 ++ WARN_ON(j != config_rom_length);
2067 +
2068 + return config_rom;
2069 + }
2070 +@@ -140,17 +143,24 @@ static void update_config_roms(void)
2071 + {
2072 + struct fw_card *card;
2073 + u32 *config_rom;
2074 +- size_t length;
2075 +
2076 + list_for_each_entry (card, &card_list, link) {
2077 +- config_rom = generate_config_rom(card, &length);
2078 +- card->driver->set_config_rom(card, config_rom, length);
2079 ++ config_rom = generate_config_rom(card);
2080 ++ card->driver->set_config_rom(card, config_rom,
2081 ++ config_rom_length);
2082 + }
2083 + }
2084 +
2085 ++static size_t required_space(struct fw_descriptor *desc)
2086 ++{
2087 ++ /* descriptor + entry into root dir + optional immediate entry */
2088 ++ return desc->length + 1 + (desc->immediate > 0 ? 1 : 0);
2089 ++}
2090 ++
2091 + int fw_core_add_descriptor(struct fw_descriptor *desc)
2092 + {
2093 + size_t i;
2094 ++ int ret;
2095 +
2096 + /*
2097 + * Check descriptor is valid; the length of all blocks in the
2098 +@@ -166,15 +176,21 @@ int fw_core_add_descriptor(struct fw_descriptor *desc)
2099 +
2100 + mutex_lock(&card_mutex);
2101 +
2102 +- list_add_tail(&desc->link, &descriptor_list);
2103 +- descriptor_count++;
2104 +- if (desc->immediate > 0)
2105 ++ if (config_rom_length + required_space(desc) > 256) {
2106 ++ ret = -EBUSY;
2107 ++ } else {
2108 ++ list_add_tail(&desc->link, &descriptor_list);
2109 ++ config_rom_length += required_space(desc);
2110 + descriptor_count++;
2111 +- update_config_roms();
2112 ++ if (desc->immediate > 0)
2113 ++ descriptor_count++;
2114 ++ update_config_roms();
2115 ++ ret = 0;
2116 ++ }
2117 +
2118 + mutex_unlock(&card_mutex);
2119 +
2120 +- return 0;
2121 ++ return ret;
2122 + }
2123 + EXPORT_SYMBOL(fw_core_add_descriptor);
2124 +
2125 +@@ -183,6 +199,7 @@ void fw_core_remove_descriptor(struct fw_descriptor *desc)
2126 + mutex_lock(&card_mutex);
2127 +
2128 + list_del(&desc->link);
2129 ++ config_rom_length -= required_space(desc);
2130 + descriptor_count--;
2131 + if (desc->immediate > 0)
2132 + descriptor_count--;
2133 +@@ -436,7 +453,6 @@ int fw_card_add(struct fw_card *card,
2134 + u32 max_receive, u32 link_speed, u64 guid)
2135 + {
2136 + u32 *config_rom;
2137 +- size_t length;
2138 + int ret;
2139 +
2140 + card->max_receive = max_receive;
2141 +@@ -445,8 +461,8 @@ int fw_card_add(struct fw_card *card,
2142 +
2143 + mutex_lock(&card_mutex);
2144 +
2145 +- config_rom = generate_config_rom(card, &length);
2146 +- ret = card->driver->enable(card, config_rom, length);
2147 ++ config_rom = generate_config_rom(card);
2148 ++ ret = card->driver->enable(card, config_rom, config_rom_length);
2149 + if (ret == 0)
2150 + list_add_tail(&card->link, &card_list);
2151 +
2152 +diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c
2153 +index 1e504de..720b39b 100644
2154 +--- a/drivers/firewire/ohci.c
2155 ++++ b/drivers/firewire/ohci.c
2156 +@@ -2412,6 +2412,7 @@ static void ohci_pmac_off(struct pci_dev *dev)
2157 +
2158 + #define PCI_VENDOR_ID_AGERE PCI_VENDOR_ID_ATT
2159 + #define PCI_DEVICE_ID_AGERE_FW643 0x5901
2160 ++#define PCI_DEVICE_ID_TI_TSB43AB23 0x8024
2161 +
2162 + static int __devinit pci_probe(struct pci_dev *dev,
2163 + const struct pci_device_id *ent)
2164 +@@ -2477,7 +2478,8 @@ static int __devinit pci_probe(struct pci_dev *dev,
2165 + #if !defined(CONFIG_X86_32)
2166 + /* dual-buffer mode is broken with descriptor addresses above 2G */
2167 + if (dev->vendor == PCI_VENDOR_ID_TI &&
2168 +- dev->device == PCI_DEVICE_ID_TI_TSB43AB22)
2169 ++ (dev->device == PCI_DEVICE_ID_TI_TSB43AB22 ||
2170 ++ dev->device == PCI_DEVICE_ID_TI_TSB43AB23))
2171 + ohci->use_dualbuffer = false;
2172 + #endif
2173 +
2174 +diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
2175 +index e9dbb48..8bf3770 100644
2176 +--- a/drivers/gpu/drm/drm_gem.c
2177 ++++ b/drivers/gpu/drm/drm_gem.c
2178 +@@ -142,19 +142,6 @@ drm_gem_object_alloc(struct drm_device *dev, size_t size)
2179 + if (IS_ERR(obj->filp))
2180 + goto free;
2181 +
2182 +- /* Basically we want to disable the OOM killer and handle ENOMEM
2183 +- * ourselves by sacrificing pages from cached buffers.
2184 +- * XXX shmem_file_[gs]et_gfp_mask()
2185 +- */
2186 +- mapping_set_gfp_mask(obj->filp->f_path.dentry->d_inode->i_mapping,
2187 +- GFP_HIGHUSER |
2188 +- __GFP_COLD |
2189 +- __GFP_FS |
2190 +- __GFP_RECLAIMABLE |
2191 +- __GFP_NORETRY |
2192 +- __GFP_NOWARN |
2193 +- __GFP_NOMEMALLOC);
2194 +-
2195 + kref_init(&obj->refcount);
2196 + kref_init(&obj->handlecount);
2197 + obj->size = size;
2198 +diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
2199 +index 26bf055..af655e8 100644
2200 +--- a/drivers/gpu/drm/i915/i915_debugfs.c
2201 ++++ b/drivers/gpu/drm/i915/i915_debugfs.c
2202 +@@ -288,7 +288,7 @@ static int i915_batchbuffer_info(struct seq_file *m, void *data)
2203 + list_for_each_entry(obj_priv, &dev_priv->mm.active_list, list) {
2204 + obj = obj_priv->obj;
2205 + if (obj->read_domains & I915_GEM_DOMAIN_COMMAND) {
2206 +- ret = i915_gem_object_get_pages(obj);
2207 ++ ret = i915_gem_object_get_pages(obj, 0);
2208 + if (ret) {
2209 + DRM_ERROR("Failed to get pages: %d\n", ret);
2210 + spin_unlock(&dev_priv->mm.active_list_lock);
2211 +diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
2212 +index bc2db7d..eaa1893 100644
2213 +--- a/drivers/gpu/drm/i915/i915_dma.c
2214 ++++ b/drivers/gpu/drm/i915/i915_dma.c
2215 +@@ -1252,6 +1252,8 @@ static int i915_load_modeset_init(struct drm_device *dev,
2216 + if (ret)
2217 + goto destroy_ringbuffer;
2218 +
2219 ++ intel_modeset_init(dev);
2220 ++
2221 + ret = drm_irq_install(dev);
2222 + if (ret)
2223 + goto destroy_ringbuffer;
2224 +@@ -1266,8 +1268,6 @@ static int i915_load_modeset_init(struct drm_device *dev,
2225 +
2226 + I915_WRITE(INSTPM, (1 << 5) | (1 << 21));
2227 +
2228 +- intel_modeset_init(dev);
2229 +-
2230 + drm_helper_initial_config(dev);
2231 +
2232 + return 0;
2233 +diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
2234 +index 791fded..7277246 100644
2235 +--- a/drivers/gpu/drm/i915/i915_drv.h
2236 ++++ b/drivers/gpu/drm/i915/i915_drv.h
2237 +@@ -822,7 +822,7 @@ int i915_gem_attach_phys_object(struct drm_device *dev,
2238 + void i915_gem_detach_phys_object(struct drm_device *dev,
2239 + struct drm_gem_object *obj);
2240 + void i915_gem_free_all_phys_object(struct drm_device *dev);
2241 +-int i915_gem_object_get_pages(struct drm_gem_object *obj);
2242 ++int i915_gem_object_get_pages(struct drm_gem_object *obj, gfp_t gfpmask);
2243 + void i915_gem_object_put_pages(struct drm_gem_object *obj);
2244 + void i915_gem_release(struct drm_device * dev, struct drm_file *file_priv);
2245 +
2246 +diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
2247 +index df2c625..8ad244a 100644
2248 +--- a/drivers/gpu/drm/i915/i915_gem.c
2249 ++++ b/drivers/gpu/drm/i915/i915_gem.c
2250 +@@ -277,7 +277,7 @@ i915_gem_shmem_pread_fast(struct drm_device *dev, struct drm_gem_object *obj,
2251 +
2252 + mutex_lock(&dev->struct_mutex);
2253 +
2254 +- ret = i915_gem_object_get_pages(obj);
2255 ++ ret = i915_gem_object_get_pages(obj, 0);
2256 + if (ret != 0)
2257 + goto fail_unlock;
2258 +
2259 +@@ -321,40 +321,24 @@ fail_unlock:
2260 + return ret;
2261 + }
2262 +
2263 +-static inline gfp_t
2264 +-i915_gem_object_get_page_gfp_mask (struct drm_gem_object *obj)
2265 +-{
2266 +- return mapping_gfp_mask(obj->filp->f_path.dentry->d_inode->i_mapping);
2267 +-}
2268 +-
2269 +-static inline void
2270 +-i915_gem_object_set_page_gfp_mask (struct drm_gem_object *obj, gfp_t gfp)
2271 +-{
2272 +- mapping_set_gfp_mask(obj->filp->f_path.dentry->d_inode->i_mapping, gfp);
2273 +-}
2274 +-
2275 + static int
2276 + i915_gem_object_get_pages_or_evict(struct drm_gem_object *obj)
2277 + {
2278 + int ret;
2279 +
2280 +- ret = i915_gem_object_get_pages(obj);
2281 ++ ret = i915_gem_object_get_pages(obj, __GFP_NORETRY | __GFP_NOWARN);
2282 +
2283 + /* If we've insufficient memory to map in the pages, attempt
2284 + * to make some space by throwing out some old buffers.
2285 + */
2286 + if (ret == -ENOMEM) {
2287 + struct drm_device *dev = obj->dev;
2288 +- gfp_t gfp;
2289 +
2290 + ret = i915_gem_evict_something(dev, obj->size);
2291 + if (ret)
2292 + return ret;
2293 +
2294 +- gfp = i915_gem_object_get_page_gfp_mask(obj);
2295 +- i915_gem_object_set_page_gfp_mask(obj, gfp & ~__GFP_NORETRY);
2296 +- ret = i915_gem_object_get_pages(obj);
2297 +- i915_gem_object_set_page_gfp_mask (obj, gfp);
2298 ++ ret = i915_gem_object_get_pages(obj, 0);
2299 + }
2300 +
2301 + return ret;
2302 +@@ -790,7 +774,7 @@ i915_gem_shmem_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj,
2303 +
2304 + mutex_lock(&dev->struct_mutex);
2305 +
2306 +- ret = i915_gem_object_get_pages(obj);
2307 ++ ret = i915_gem_object_get_pages(obj, 0);
2308 + if (ret != 0)
2309 + goto fail_unlock;
2310 +
2311 +@@ -2219,7 +2203,8 @@ i915_gem_evict_something(struct drm_device *dev, int min_size)
2312 + }
2313 +
2314 + int
2315 +-i915_gem_object_get_pages(struct drm_gem_object *obj)
2316 ++i915_gem_object_get_pages(struct drm_gem_object *obj,
2317 ++ gfp_t gfpmask)
2318 + {
2319 + struct drm_i915_gem_object *obj_priv = obj->driver_private;
2320 + int page_count, i;
2321 +@@ -2245,7 +2230,10 @@ i915_gem_object_get_pages(struct drm_gem_object *obj)
2322 + inode = obj->filp->f_path.dentry->d_inode;
2323 + mapping = inode->i_mapping;
2324 + for (i = 0; i < page_count; i++) {
2325 +- page = read_mapping_page(mapping, i, NULL);
2326 ++ page = read_cache_page_gfp(mapping, i,
2327 ++ mapping_gfp_mask (mapping) |
2328 ++ __GFP_COLD |
2329 ++ gfpmask);
2330 + if (IS_ERR(page)) {
2331 + ret = PTR_ERR(page);
2332 + i915_gem_object_put_pages(obj);
2333 +@@ -2568,7 +2556,7 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
2334 + drm_i915_private_t *dev_priv = dev->dev_private;
2335 + struct drm_i915_gem_object *obj_priv = obj->driver_private;
2336 + struct drm_mm_node *free_space;
2337 +- bool retry_alloc = false;
2338 ++ gfp_t gfpmask = __GFP_NORETRY | __GFP_NOWARN;
2339 + int ret;
2340 +
2341 + if (obj_priv->madv != I915_MADV_WILLNEED) {
2342 +@@ -2612,15 +2600,7 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
2343 + DRM_INFO("Binding object of size %zd at 0x%08x\n",
2344 + obj->size, obj_priv->gtt_offset);
2345 + #endif
2346 +- if (retry_alloc) {
2347 +- i915_gem_object_set_page_gfp_mask (obj,
2348 +- i915_gem_object_get_page_gfp_mask (obj) & ~__GFP_NORETRY);
2349 +- }
2350 +- ret = i915_gem_object_get_pages(obj);
2351 +- if (retry_alloc) {
2352 +- i915_gem_object_set_page_gfp_mask (obj,
2353 +- i915_gem_object_get_page_gfp_mask (obj) | __GFP_NORETRY);
2354 +- }
2355 ++ ret = i915_gem_object_get_pages(obj, gfpmask);
2356 + if (ret) {
2357 + drm_mm_put_block(obj_priv->gtt_space);
2358 + obj_priv->gtt_space = NULL;
2359 +@@ -2630,9 +2610,9 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
2360 + ret = i915_gem_evict_something(dev, obj->size);
2361 + if (ret) {
2362 + /* now try to shrink everyone else */
2363 +- if (! retry_alloc) {
2364 +- retry_alloc = true;
2365 +- goto search_free;
2366 ++ if (gfpmask) {
2367 ++ gfpmask = 0;
2368 ++ goto search_free;
2369 + }
2370 +
2371 + return ret;
2372 +@@ -4695,7 +4675,7 @@ void i915_gem_detach_phys_object(struct drm_device *dev,
2373 + if (!obj_priv->phys_obj)
2374 + return;
2375 +
2376 +- ret = i915_gem_object_get_pages(obj);
2377 ++ ret = i915_gem_object_get_pages(obj, 0);
2378 + if (ret)
2379 + goto out;
2380 +
2381 +@@ -4753,7 +4733,7 @@ i915_gem_attach_phys_object(struct drm_device *dev,
2382 + obj_priv->phys_obj = dev_priv->mm.phys_objs[id - 1];
2383 + obj_priv->phys_obj->cur_obj = obj;
2384 +
2385 +- ret = i915_gem_object_get_pages(obj);
2386 ++ ret = i915_gem_object_get_pages(obj, 0);
2387 + if (ret) {
2388 + DRM_ERROR("failed to get page list\n");
2389 + goto out;
2390 +diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
2391 +index 7d1357e..63f28ad 100644
2392 +--- a/drivers/gpu/drm/i915/i915_irq.c
2393 ++++ b/drivers/gpu/drm/i915/i915_irq.c
2394 +@@ -282,6 +282,8 @@ irqreturn_t igdng_irq_handler(struct drm_device *dev)
2395 + dev_priv->mm.irq_gem_seqno = seqno;
2396 + trace_i915_gem_request_complete(dev, seqno);
2397 + DRM_WAKEUP(&dev_priv->irq_queue);
2398 ++ dev_priv->hangcheck_count = 0;
2399 ++ mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD);
2400 + }
2401 +
2402 + I915_WRITE(GTIIR, gt_iir);
2403 +@@ -1042,6 +1044,10 @@ void i915_driver_irq_preinstall(struct drm_device * dev)
2404 + (void) I915_READ(IER);
2405 + }
2406 +
2407 ++/*
2408 ++ * Must be called after intel_modeset_init or hotplug interrupts won't be
2409 ++ * enabled correctly.
2410 ++ */
2411 + int i915_driver_irq_postinstall(struct drm_device *dev)
2412 + {
2413 + drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2414 +@@ -1064,19 +1070,23 @@ int i915_driver_irq_postinstall(struct drm_device *dev)
2415 + if (I915_HAS_HOTPLUG(dev)) {
2416 + u32 hotplug_en = I915_READ(PORT_HOTPLUG_EN);
2417 +
2418 +- /* Leave other bits alone */
2419 +- hotplug_en |= HOTPLUG_EN_MASK;
2420 ++ /* Note HDMI and DP share bits */
2421 ++ if (dev_priv->hotplug_supported_mask & HDMIB_HOTPLUG_INT_STATUS)
2422 ++ hotplug_en |= HDMIB_HOTPLUG_INT_EN;
2423 ++ if (dev_priv->hotplug_supported_mask & HDMIC_HOTPLUG_INT_STATUS)
2424 ++ hotplug_en |= HDMIC_HOTPLUG_INT_EN;
2425 ++ if (dev_priv->hotplug_supported_mask & HDMID_HOTPLUG_INT_STATUS)
2426 ++ hotplug_en |= HDMID_HOTPLUG_INT_EN;
2427 ++ if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS)
2428 ++ hotplug_en |= SDVOC_HOTPLUG_INT_EN;
2429 ++ if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS)
2430 ++ hotplug_en |= SDVOB_HOTPLUG_INT_EN;
2431 ++ if (dev_priv->hotplug_supported_mask & CRT_HOTPLUG_INT_STATUS)
2432 ++ hotplug_en |= CRT_HOTPLUG_INT_EN;
2433 ++ /* Ignore TV since it's buggy */
2434 ++
2435 + I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
2436 +
2437 +- dev_priv->hotplug_supported_mask = CRT_HOTPLUG_INT_STATUS |
2438 +- TV_HOTPLUG_INT_STATUS | SDVOC_HOTPLUG_INT_STATUS |
2439 +- SDVOB_HOTPLUG_INT_STATUS;
2440 +- if (IS_G4X(dev)) {
2441 +- dev_priv->hotplug_supported_mask |=
2442 +- HDMIB_HOTPLUG_INT_STATUS |
2443 +- HDMIC_HOTPLUG_INT_STATUS |
2444 +- HDMID_HOTPLUG_INT_STATUS;
2445 +- }
2446 + /* Enable in IER... */
2447 + enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
2448 + /* and unmask in IMR */
2449 +diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
2450 +index 54e5907..fd537f4 100644
2451 +--- a/drivers/gpu/drm/i915/i915_reg.h
2452 ++++ b/drivers/gpu/drm/i915/i915_reg.h
2453 +@@ -863,14 +863,6 @@
2454 + #define CRT_HOTPLUG_DETECT_VOLTAGE_475MV (1 << 2)
2455 + #define CRT_HOTPLUG_MASK (0x3fc) /* Bits 9-2 */
2456 + #define CRT_FORCE_HOTPLUG_MASK 0xfffffe1f
2457 +-#define HOTPLUG_EN_MASK (HDMIB_HOTPLUG_INT_EN | \
2458 +- HDMIC_HOTPLUG_INT_EN | \
2459 +- HDMID_HOTPLUG_INT_EN | \
2460 +- SDVOB_HOTPLUG_INT_EN | \
2461 +- SDVOC_HOTPLUG_INT_EN | \
2462 +- TV_HOTPLUG_INT_EN | \
2463 +- CRT_HOTPLUG_INT_EN)
2464 +-
2465 +
2466 + #define PORT_HOTPLUG_STAT 0x61114
2467 + #define HDMIB_HOTPLUG_INT_STATUS (1 << 29)
2468 +diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
2469 +index e505144..6d3730f 100644
2470 +--- a/drivers/gpu/drm/i915/intel_crt.c
2471 ++++ b/drivers/gpu/drm/i915/intel_crt.c
2472 +@@ -576,4 +576,6 @@ void intel_crt_init(struct drm_device *dev)
2473 + drm_connector_helper_add(connector, &intel_crt_connector_helper_funcs);
2474 +
2475 + drm_sysfs_connector_add(connector);
2476 ++
2477 ++ dev_priv->hotplug_supported_mask |= CRT_HOTPLUG_INT_STATUS;
2478 + }
2479 +diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
2480 +index 121b92e..601415d 100644
2481 +--- a/drivers/gpu/drm/i915/intel_display.c
2482 ++++ b/drivers/gpu/drm/i915/intel_display.c
2483 +@@ -4068,29 +4068,43 @@ static void intel_setup_outputs(struct drm_device *dev)
2484 + bool found = false;
2485 +
2486 + if (I915_READ(SDVOB) & SDVO_DETECTED) {
2487 ++ DRM_DEBUG_KMS("probing SDVOB\n");
2488 + found = intel_sdvo_init(dev, SDVOB);
2489 +- if (!found && SUPPORTS_INTEGRATED_HDMI(dev))
2490 ++ if (!found && SUPPORTS_INTEGRATED_HDMI(dev)) {
2491 ++ DRM_DEBUG_KMS("probing HDMI on SDVOB\n");
2492 + intel_hdmi_init(dev, SDVOB);
2493 ++ }
2494 +
2495 +- if (!found && SUPPORTS_INTEGRATED_DP(dev))
2496 ++ if (!found && SUPPORTS_INTEGRATED_DP(dev)) {
2497 ++ DRM_DEBUG_KMS("probing DP_B\n");
2498 + intel_dp_init(dev, DP_B);
2499 ++ }
2500 + }
2501 +
2502 + /* Before G4X SDVOC doesn't have its own detect register */
2503 +
2504 +- if (I915_READ(SDVOB) & SDVO_DETECTED)
2505 ++ if (I915_READ(SDVOB) & SDVO_DETECTED) {
2506 ++ DRM_DEBUG_KMS("probing SDVOC\n");
2507 + found = intel_sdvo_init(dev, SDVOC);
2508 ++ }
2509 +
2510 + if (!found && (I915_READ(SDVOC) & SDVO_DETECTED)) {
2511 +
2512 +- if (SUPPORTS_INTEGRATED_HDMI(dev))
2513 ++ if (SUPPORTS_INTEGRATED_HDMI(dev)) {
2514 ++ DRM_DEBUG_KMS("probing HDMI on SDVOC\n");
2515 + intel_hdmi_init(dev, SDVOC);
2516 +- if (SUPPORTS_INTEGRATED_DP(dev))
2517 ++ }
2518 ++ if (SUPPORTS_INTEGRATED_DP(dev)) {
2519 ++ DRM_DEBUG_KMS("probing DP_C\n");
2520 + intel_dp_init(dev, DP_C);
2521 ++ }
2522 + }
2523 +
2524 +- if (SUPPORTS_INTEGRATED_DP(dev) && (I915_READ(DP_D) & DP_DETECTED))
2525 ++ if (SUPPORTS_INTEGRATED_DP(dev) &&
2526 ++ (I915_READ(DP_D) & DP_DETECTED)) {
2527 ++ DRM_DEBUG_KMS("probing DP_D\n");
2528 + intel_dp_init(dev, DP_D);
2529 ++ }
2530 + } else if (IS_I8XX(dev))
2531 + intel_dvo_init(dev);
2532 +
2533 +diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
2534 +index 92a3d7b..d487771 100644
2535 +--- a/drivers/gpu/drm/i915/intel_dp.c
2536 ++++ b/drivers/gpu/drm/i915/intel_dp.c
2537 +@@ -1290,14 +1290,20 @@ intel_dp_init(struct drm_device *dev, int output_reg)
2538 + break;
2539 + case DP_B:
2540 + case PCH_DP_B:
2541 ++ dev_priv->hotplug_supported_mask |=
2542 ++ HDMIB_HOTPLUG_INT_STATUS;
2543 + name = "DPDDC-B";
2544 + break;
2545 + case DP_C:
2546 + case PCH_DP_C:
2547 ++ dev_priv->hotplug_supported_mask |=
2548 ++ HDMIC_HOTPLUG_INT_STATUS;
2549 + name = "DPDDC-C";
2550 + break;
2551 + case DP_D:
2552 + case PCH_DP_D:
2553 ++ dev_priv->hotplug_supported_mask |=
2554 ++ HDMID_HOTPLUG_INT_STATUS;
2555 + name = "DPDDC-D";
2556 + break;
2557 + }
2558 +diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
2559 +index c33451a..85760bf 100644
2560 +--- a/drivers/gpu/drm/i915/intel_hdmi.c
2561 ++++ b/drivers/gpu/drm/i915/intel_hdmi.c
2562 +@@ -254,21 +254,26 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg)
2563 + if (sdvox_reg == SDVOB) {
2564 + intel_output->clone_mask = (1 << INTEL_HDMIB_CLONE_BIT);
2565 + intel_output->ddc_bus = intel_i2c_create(dev, GPIOE, "HDMIB");
2566 ++ dev_priv->hotplug_supported_mask |= HDMIB_HOTPLUG_INT_STATUS;
2567 + } else if (sdvox_reg == SDVOC) {
2568 + intel_output->clone_mask = (1 << INTEL_HDMIC_CLONE_BIT);
2569 + intel_output->ddc_bus = intel_i2c_create(dev, GPIOD, "HDMIC");
2570 ++ dev_priv->hotplug_supported_mask |= HDMIC_HOTPLUG_INT_STATUS;
2571 + } else if (sdvox_reg == HDMIB) {
2572 + intel_output->clone_mask = (1 << INTEL_HDMID_CLONE_BIT);
2573 + intel_output->ddc_bus = intel_i2c_create(dev, PCH_GPIOE,
2574 + "HDMIB");
2575 ++ dev_priv->hotplug_supported_mask |= HDMIB_HOTPLUG_INT_STATUS;
2576 + } else if (sdvox_reg == HDMIC) {
2577 + intel_output->clone_mask = (1 << INTEL_HDMIE_CLONE_BIT);
2578 + intel_output->ddc_bus = intel_i2c_create(dev, PCH_GPIOD,
2579 + "HDMIC");
2580 ++ dev_priv->hotplug_supported_mask |= HDMIC_HOTPLUG_INT_STATUS;
2581 + } else if (sdvox_reg == HDMID) {
2582 + intel_output->clone_mask = (1 << INTEL_HDMIF_CLONE_BIT);
2583 + intel_output->ddc_bus = intel_i2c_create(dev, PCH_GPIOF,
2584 + "HDMID");
2585 ++ dev_priv->hotplug_supported_mask |= HDMID_HOTPLUG_INT_STATUS;
2586 + }
2587 + if (!intel_output->ddc_bus)
2588 + goto err_connector;
2589 +diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
2590 +index 29e21d3..3f5aaf1 100644
2591 +--- a/drivers/gpu/drm/i915/intel_sdvo.c
2592 ++++ b/drivers/gpu/drm/i915/intel_sdvo.c
2593 +@@ -2743,6 +2743,7 @@ static void intel_sdvo_create_enhance_property(struct drm_connector *connector)
2594 +
2595 + bool intel_sdvo_init(struct drm_device *dev, int output_device)
2596 + {
2597 ++ struct drm_i915_private *dev_priv = dev->dev_private;
2598 + struct drm_connector *connector;
2599 + struct intel_output *intel_output;
2600 + struct intel_sdvo_priv *sdvo_priv;
2601 +@@ -2789,10 +2790,12 @@ bool intel_sdvo_init(struct drm_device *dev, int output_device)
2602 + intel_output->ddc_bus = intel_i2c_create(dev, GPIOE, "SDVOB DDC BUS");
2603 + sdvo_priv->analog_ddc_bus = intel_i2c_create(dev, GPIOA,
2604 + "SDVOB/VGA DDC BUS");
2605 ++ dev_priv->hotplug_supported_mask |= SDVOB_HOTPLUG_INT_STATUS;
2606 + } else {
2607 + intel_output->ddc_bus = intel_i2c_create(dev, GPIOE, "SDVOC DDC BUS");
2608 + sdvo_priv->analog_ddc_bus = intel_i2c_create(dev, GPIOA,
2609 + "SDVOC/VGA DDC BUS");
2610 ++ dev_priv->hotplug_supported_mask |= SDVOC_HOTPLUG_INT_STATUS;
2611 + }
2612 +
2613 + if (intel_output->ddc_bus == NULL)
2614 +diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
2615 +index 5b28b4e..ce026f0 100644
2616 +--- a/drivers/gpu/drm/i915/intel_tv.c
2617 ++++ b/drivers/gpu/drm/i915/intel_tv.c
2618 +@@ -1801,6 +1801,8 @@ intel_tv_init(struct drm_device *dev)
2619 + drm_connector_attach_property(connector,
2620 + dev->mode_config.tv_bottom_margin_property,
2621 + tv_priv->margin[TV_MARGIN_BOTTOM]);
2622 ++
2623 ++ dev_priv->hotplug_supported_mask |= TV_HOTPLUG_INT_STATUS;
2624 + out:
2625 + drm_sysfs_connector_add(connector);
2626 + }
2627 +diff --git a/drivers/infiniband/hw/ipath/ipath_fs.c b/drivers/infiniband/hw/ipath/ipath_fs.c
2628 +index b368406..100da85 100644
2629 +--- a/drivers/infiniband/hw/ipath/ipath_fs.c
2630 ++++ b/drivers/infiniband/hw/ipath/ipath_fs.c
2631 +@@ -346,10 +346,8 @@ static int ipathfs_fill_super(struct super_block *sb, void *data,
2632 + list_for_each_entry_safe(dd, tmp, &ipath_dev_list, ipath_list) {
2633 + spin_unlock_irqrestore(&ipath_devs_lock, flags);
2634 + ret = create_device_files(sb, dd);
2635 +- if (ret) {
2636 +- deactivate_locked_super(sb);
2637 ++ if (ret)
2638 + goto bail;
2639 +- }
2640 + spin_lock_irqsave(&ipath_devs_lock, flags);
2641 + }
2642 +
2643 +diff --git a/drivers/input/misc/winbond-cir.c b/drivers/input/misc/winbond-cir.c
2644 +index 33309fe..c8f5a9a 100644
2645 +--- a/drivers/input/misc/winbond-cir.c
2646 ++++ b/drivers/input/misc/winbond-cir.c
2647 +@@ -768,7 +768,7 @@ wbcir_parse_rc6(struct device *dev, struct wbcir_data *data)
2648 + return;
2649 + }
2650 +
2651 +- dev_info(dev, "IR-RC6 ad 0x%02X cm 0x%02X cu 0x%04X "
2652 ++ dev_dbg(dev, "IR-RC6 ad 0x%02X cm 0x%02X cu 0x%04X "
2653 + "toggle %u mode %u scan 0x%08X\n",
2654 + address,
2655 + command,
2656 +diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
2657 +index 610e914..b6992b7 100644
2658 +--- a/drivers/message/fusion/mptbase.c
2659 ++++ b/drivers/message/fusion/mptbase.c
2660 +@@ -4330,6 +4330,8 @@ initChainBuffers(MPT_ADAPTER *ioc)
2661 +
2662 + if (ioc->bus_type == SPI)
2663 + num_chain *= MPT_SCSI_CAN_QUEUE;
2664 ++ else if (ioc->bus_type == SAS)
2665 ++ num_chain *= MPT_SAS_CAN_QUEUE;
2666 + else
2667 + num_chain *= MPT_FC_CAN_QUEUE;
2668 +
2669 +diff --git a/drivers/mtd/ubi/cdev.c b/drivers/mtd/ubi/cdev.c
2670 +index f237ddb..111ea41 100644
2671 +--- a/drivers/mtd/ubi/cdev.c
2672 ++++ b/drivers/mtd/ubi/cdev.c
2673 +@@ -853,7 +853,6 @@ static long ubi_cdev_ioctl(struct file *file, unsigned int cmd,
2674 + break;
2675 + }
2676 +
2677 +- req.name[req.name_len] = '\0';
2678 + err = verify_mkvol_req(ubi, &req);
2679 + if (err)
2680 + break;
2681 +diff --git a/drivers/net/benet/be.h b/drivers/net/benet/be.h
2682 +index a84f1c5..511b922 100644
2683 +--- a/drivers/net/benet/be.h
2684 ++++ b/drivers/net/benet/be.h
2685 +@@ -272,8 +272,13 @@ struct be_adapter {
2686 + u32 cap;
2687 + u32 rx_fc; /* Rx flow control */
2688 + u32 tx_fc; /* Tx flow control */
2689 ++ u8 generation; /* BladeEngine ASIC generation */
2690 + };
2691 +
2692 ++/* BladeEngine Generation numbers */
2693 ++#define BE_GEN2 2
2694 ++#define BE_GEN3 3
2695 ++
2696 + extern const struct ethtool_ops be_ethtool_ops;
2697 +
2698 + #define drvr_stats(adapter) (&adapter->stats.drvr_stats)
2699 +diff --git a/drivers/net/benet/be_cmds.h b/drivers/net/benet/be_cmds.h
2700 +index e5f9676..ad33d55 100644
2701 +--- a/drivers/net/benet/be_cmds.h
2702 ++++ b/drivers/net/benet/be_cmds.h
2703 +@@ -154,7 +154,8 @@ struct be_cmd_req_hdr {
2704 + u8 domain; /* dword 0 */
2705 + u32 timeout; /* dword 1 */
2706 + u32 request_length; /* dword 2 */
2707 +- u32 rsvd; /* dword 3 */
2708 ++ u8 version; /* dword 3 */
2709 ++ u8 rsvd[3]; /* dword 3 */
2710 + };
2711 +
2712 + #define RESP_HDR_INFO_OPCODE_SHIFT 0 /* bits 0 - 7 */
2713 +diff --git a/drivers/net/benet/be_main.c b/drivers/net/benet/be_main.c
2714 +index 3749bb1..ec983cb 100644
2715 +--- a/drivers/net/benet/be_main.c
2716 ++++ b/drivers/net/benet/be_main.c
2717 +@@ -1944,6 +1944,7 @@ static void be_unmap_pci_bars(struct be_adapter *adapter)
2718 + static int be_map_pci_bars(struct be_adapter *adapter)
2719 + {
2720 + u8 __iomem *addr;
2721 ++ int pcicfg_reg;
2722 +
2723 + addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
2724 + pci_resource_len(adapter->pdev, 2));
2725 +@@ -1957,8 +1958,13 @@ static int be_map_pci_bars(struct be_adapter *adapter)
2726 + goto pci_map_err;
2727 + adapter->db = addr;
2728 +
2729 +- addr = ioremap_nocache(pci_resource_start(adapter->pdev, 1),
2730 +- pci_resource_len(adapter->pdev, 1));
2731 ++ if (adapter->generation == BE_GEN2)
2732 ++ pcicfg_reg = 1;
2733 ++ else
2734 ++ pcicfg_reg = 0;
2735 ++
2736 ++ addr = ioremap_nocache(pci_resource_start(adapter->pdev, pcicfg_reg),
2737 ++ pci_resource_len(adapter->pdev, pcicfg_reg));
2738 + if (addr == NULL)
2739 + goto pci_map_err;
2740 + adapter->pcicfg = addr;
2741 +@@ -2028,6 +2034,7 @@ static int be_stats_init(struct be_adapter *adapter)
2742 + cmd->va = pci_alloc_consistent(adapter->pdev, cmd->size, &cmd->dma);
2743 + if (cmd->va == NULL)
2744 + return -1;
2745 ++ memset(cmd->va, 0, cmd->size);
2746 + return 0;
2747 + }
2748 +
2749 +@@ -2101,6 +2108,20 @@ static int __devinit be_probe(struct pci_dev *pdev,
2750 + goto rel_reg;
2751 + }
2752 + adapter = netdev_priv(netdev);
2753 ++
2754 ++ switch (pdev->device) {
2755 ++ case BE_DEVICE_ID1:
2756 ++ case OC_DEVICE_ID1:
2757 ++ adapter->generation = BE_GEN2;
2758 ++ break;
2759 ++ case BE_DEVICE_ID2:
2760 ++ case OC_DEVICE_ID2:
2761 ++ adapter->generation = BE_GEN3;
2762 ++ break;
2763 ++ default:
2764 ++ adapter->generation = 0;
2765 ++ }
2766 ++
2767 + adapter->pdev = pdev;
2768 + pci_set_drvdata(pdev, adapter);
2769 + adapter->netdev = netdev;
2770 +diff --git a/drivers/net/e1000/e1000.h b/drivers/net/e1000/e1000.h
2771 +index 42e2b7e..4a2ee85 100644
2772 +--- a/drivers/net/e1000/e1000.h
2773 ++++ b/drivers/net/e1000/e1000.h
2774 +@@ -326,6 +326,8 @@ struct e1000_adapter {
2775 + /* for ioport free */
2776 + int bars;
2777 + int need_ioport;
2778 ++
2779 ++ bool discarding;
2780 + };
2781 +
2782 + enum e1000_state_t {
2783 +diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c
2784 +index bcd192c..1a23f16 100644
2785 +--- a/drivers/net/e1000/e1000_main.c
2786 ++++ b/drivers/net/e1000/e1000_main.c
2787 +@@ -1698,18 +1698,6 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter)
2788 + rctl &= ~E1000_RCTL_SZ_4096;
2789 + rctl |= E1000_RCTL_BSEX;
2790 + switch (adapter->rx_buffer_len) {
2791 +- case E1000_RXBUFFER_256:
2792 +- rctl |= E1000_RCTL_SZ_256;
2793 +- rctl &= ~E1000_RCTL_BSEX;
2794 +- break;
2795 +- case E1000_RXBUFFER_512:
2796 +- rctl |= E1000_RCTL_SZ_512;
2797 +- rctl &= ~E1000_RCTL_BSEX;
2798 +- break;
2799 +- case E1000_RXBUFFER_1024:
2800 +- rctl |= E1000_RCTL_SZ_1024;
2801 +- rctl &= ~E1000_RCTL_BSEX;
2802 +- break;
2803 + case E1000_RXBUFFER_2048:
2804 + default:
2805 + rctl |= E1000_RCTL_SZ_2048;
2806 +@@ -3154,13 +3142,7 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
2807 + * however with the new *_jumbo_rx* routines, jumbo receives will use
2808 + * fragmented skbs */
2809 +
2810 +- if (max_frame <= E1000_RXBUFFER_256)
2811 +- adapter->rx_buffer_len = E1000_RXBUFFER_256;
2812 +- else if (max_frame <= E1000_RXBUFFER_512)
2813 +- adapter->rx_buffer_len = E1000_RXBUFFER_512;
2814 +- else if (max_frame <= E1000_RXBUFFER_1024)
2815 +- adapter->rx_buffer_len = E1000_RXBUFFER_1024;
2816 +- else if (max_frame <= E1000_RXBUFFER_2048)
2817 ++ if (max_frame <= E1000_RXBUFFER_2048)
2818 + adapter->rx_buffer_len = E1000_RXBUFFER_2048;
2819 + else
2820 + #if (PAGE_SIZE >= E1000_RXBUFFER_16384)
2821 +@@ -3827,13 +3809,22 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
2822 +
2823 + length = le16_to_cpu(rx_desc->length);
2824 + /* !EOP means multiple descriptors were used to store a single
2825 +- * packet, also make sure the frame isn't just CRC only */
2826 +- if (unlikely(!(status & E1000_RXD_STAT_EOP) || (length <= 4))) {
2827 ++ * packet, if thats the case we need to toss it. In fact, we
2828 ++ * to toss every packet with the EOP bit clear and the next
2829 ++ * frame that _does_ have the EOP bit set, as it is by
2830 ++ * definition only a frame fragment
2831 ++ */
2832 ++ if (unlikely(!(status & E1000_RXD_STAT_EOP)))
2833 ++ adapter->discarding = true;
2834 ++
2835 ++ if (adapter->discarding) {
2836 + /* All receives must fit into a single buffer */
2837 + E1000_DBG("%s: Receive packet consumed multiple"
2838 + " buffers\n", netdev->name);
2839 + /* recycle */
2840 + buffer_info->skb = skb;
2841 ++ if (status & E1000_RXD_STAT_EOP)
2842 ++ adapter->discarding = false;
2843 + goto next_desc;
2844 + }
2845 +
2846 +diff --git a/drivers/net/e1000e/e1000.h b/drivers/net/e1000e/e1000.h
2847 +index 3e187b0..47db9bd 100644
2848 +--- a/drivers/net/e1000e/e1000.h
2849 ++++ b/drivers/net/e1000e/e1000.h
2850 +@@ -417,6 +417,7 @@ struct e1000_info {
2851 + /* CRC Stripping defines */
2852 + #define FLAG2_CRC_STRIPPING (1 << 0)
2853 + #define FLAG2_HAS_PHY_WAKEUP (1 << 1)
2854 ++#define FLAG2_IS_DISCARDING (1 << 2)
2855 +
2856 + #define E1000_RX_DESC_PS(R, i) \
2857 + (&(((union e1000_rx_desc_packet_split *)((R).desc))[i]))
2858 +diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c
2859 +index fad8f9e..2154530 100644
2860 +--- a/drivers/net/e1000e/netdev.c
2861 ++++ b/drivers/net/e1000e/netdev.c
2862 +@@ -482,14 +482,24 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
2863 +
2864 + length = le16_to_cpu(rx_desc->length);
2865 +
2866 +- /* !EOP means multiple descriptors were used to store a single
2867 +- * packet, also make sure the frame isn't just CRC only */
2868 +- if (!(status & E1000_RXD_STAT_EOP) || (length <= 4)) {
2869 ++ /*
2870 ++ * !EOP means multiple descriptors were used to store a single
2871 ++ * packet, if that's the case we need to toss it. In fact, we
2872 ++ * need to toss every packet with the EOP bit clear and the
2873 ++ * next frame that _does_ have the EOP bit set, as it is by
2874 ++ * definition only a frame fragment
2875 ++ */
2876 ++ if (unlikely(!(status & E1000_RXD_STAT_EOP)))
2877 ++ adapter->flags2 |= FLAG2_IS_DISCARDING;
2878 ++
2879 ++ if (adapter->flags2 & FLAG2_IS_DISCARDING) {
2880 + /* All receives must fit into a single buffer */
2881 + e_dbg("%s: Receive packet consumed multiple buffers\n",
2882 + netdev->name);
2883 + /* recycle */
2884 + buffer_info->skb = skb;
2885 ++ if (status & E1000_RXD_STAT_EOP)
2886 ++ adapter->flags2 &= ~FLAG2_IS_DISCARDING;
2887 + goto next_desc;
2888 + }
2889 +
2890 +@@ -747,10 +757,16 @@ static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
2891 + PCI_DMA_FROMDEVICE);
2892 + buffer_info->dma = 0;
2893 +
2894 +- if (!(staterr & E1000_RXD_STAT_EOP)) {
2895 ++ /* see !EOP comment in other rx routine */
2896 ++ if (!(staterr & E1000_RXD_STAT_EOP))
2897 ++ adapter->flags2 |= FLAG2_IS_DISCARDING;
2898 ++
2899 ++ if (adapter->flags2 & FLAG2_IS_DISCARDING) {
2900 + e_dbg("%s: Packet Split buffers didn't pick up the "
2901 + "full packet\n", netdev->name);
2902 + dev_kfree_skb_irq(skb);
2903 ++ if (staterr & E1000_RXD_STAT_EOP)
2904 ++ adapter->flags2 &= ~FLAG2_IS_DISCARDING;
2905 + goto next_desc;
2906 + }
2907 +
2908 +@@ -1120,6 +1136,7 @@ static void e1000_clean_rx_ring(struct e1000_adapter *adapter)
2909 +
2910 + rx_ring->next_to_clean = 0;
2911 + rx_ring->next_to_use = 0;
2912 ++ adapter->flags2 &= ~FLAG2_IS_DISCARDING;
2913 +
2914 + writel(0, adapter->hw.hw_addr + rx_ring->head);
2915 + writel(0, adapter->hw.hw_addr + rx_ring->tail);
2916 +@@ -2330,18 +2347,6 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter)
2917 + rctl &= ~E1000_RCTL_SZ_4096;
2918 + rctl |= E1000_RCTL_BSEX;
2919 + switch (adapter->rx_buffer_len) {
2920 +- case 256:
2921 +- rctl |= E1000_RCTL_SZ_256;
2922 +- rctl &= ~E1000_RCTL_BSEX;
2923 +- break;
2924 +- case 512:
2925 +- rctl |= E1000_RCTL_SZ_512;
2926 +- rctl &= ~E1000_RCTL_BSEX;
2927 +- break;
2928 +- case 1024:
2929 +- rctl |= E1000_RCTL_SZ_1024;
2930 +- rctl &= ~E1000_RCTL_BSEX;
2931 +- break;
2932 + case 2048:
2933 + default:
2934 + rctl |= E1000_RCTL_SZ_2048;
2935 +@@ -4321,13 +4326,7 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
2936 + * fragmented skbs
2937 + */
2938 +
2939 +- if (max_frame <= 256)
2940 +- adapter->rx_buffer_len = 256;
2941 +- else if (max_frame <= 512)
2942 +- adapter->rx_buffer_len = 512;
2943 +- else if (max_frame <= 1024)
2944 +- adapter->rx_buffer_len = 1024;
2945 +- else if (max_frame <= 2048)
2946 ++ if (max_frame <= 2048)
2947 + adapter->rx_buffer_len = 2048;
2948 + else
2949 + adapter->rx_buffer_len = 4096;
2950 +diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c
2951 +index 6a10d7b..f3600b3 100644
2952 +--- a/drivers/net/sky2.c
2953 ++++ b/drivers/net/sky2.c
2954 +@@ -1806,7 +1806,8 @@ static void sky2_tx_complete(struct sky2_port *sky2, u16 done)
2955 + sky2->tx_cons = idx;
2956 + smp_mb();
2957 +
2958 +- if (tx_avail(sky2) > MAX_SKB_TX_LE + 4)
2959 ++ /* Wake unless it's detached, and called e.g. from sky2_down() */
2960 ++ if (tx_avail(sky2) > MAX_SKB_TX_LE + 4 && netif_device_present(dev))
2961 + netif_wake_queue(dev);
2962 + }
2963 +
2964 +diff --git a/drivers/net/starfire.c b/drivers/net/starfire.c
2965 +index a36e2b5..e65ee4d 100644
2966 +--- a/drivers/net/starfire.c
2967 ++++ b/drivers/net/starfire.c
2968 +@@ -1063,7 +1063,7 @@ static int netdev_open(struct net_device *dev)
2969 + if (retval) {
2970 + printk(KERN_ERR "starfire: Failed to load firmware \"%s\"\n",
2971 + FIRMWARE_RX);
2972 +- return retval;
2973 ++ goto out_init;
2974 + }
2975 + if (fw_rx->size % 4) {
2976 + printk(KERN_ERR "starfire: bogus length %zu in \"%s\"\n",
2977 +@@ -1108,6 +1108,9 @@ out_tx:
2978 + release_firmware(fw_tx);
2979 + out_rx:
2980 + release_firmware(fw_rx);
2981 ++out_init:
2982 ++ if (retval)
2983 ++ netdev_close(dev);
2984 + return retval;
2985 + }
2986 +
2987 +diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
2988 +index c7aa05a..0905b38 100644
2989 +--- a/drivers/net/wireless/ath/ath9k/hw.c
2990 ++++ b/drivers/net/wireless/ath/ath9k/hw.c
2991 +@@ -880,12 +880,11 @@ static void ath9k_hw_init_mode_gain_regs(struct ath_hw *ah)
2992 + }
2993 + }
2994 +
2995 +-static void ath9k_hw_init_11a_eeprom_fix(struct ath_hw *ah)
2996 ++static void ath9k_hw_init_eeprom_fix(struct ath_hw *ah)
2997 + {
2998 + u32 i, j;
2999 +
3000 +- if ((ah->hw_version.devid == AR9280_DEVID_PCI) &&
3001 +- test_bit(ATH9K_MODE_11A, ah->caps.wireless_modes)) {
3002 ++ if (ah->hw_version.devid == AR9280_DEVID_PCI) {
3003 +
3004 + /* EEPROM Fixup */
3005 + for (i = 0; i < ah->iniModes.ia_rows; i++) {
3006 +@@ -980,7 +979,7 @@ int ath9k_hw_init(struct ath_hw *ah)
3007 +
3008 + ath9k_hw_init_mode_gain_regs(ah);
3009 + ath9k_hw_fill_cap_info(ah);
3010 +- ath9k_hw_init_11a_eeprom_fix(ah);
3011 ++ ath9k_hw_init_eeprom_fix(ah);
3012 +
3013 + r = ath9k_hw_init_macaddr(ah);
3014 + if (r) {
3015 +diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
3016 +index 80df8f3..5864eaa 100644
3017 +--- a/drivers/net/wireless/ath/ath9k/main.c
3018 ++++ b/drivers/net/wireless/ath/ath9k/main.c
3019 +@@ -2285,10 +2285,10 @@ static void ath9k_remove_interface(struct ieee80211_hw *hw,
3020 + (sc->sc_ah->opmode == NL80211_IFTYPE_MESH_POINT)) {
3021 + ath9k_ps_wakeup(sc);
3022 + ath9k_hw_stoptxdma(sc->sc_ah, sc->beacon.beaconq);
3023 +- ath_beacon_return(sc, avp);
3024 + ath9k_ps_restore(sc);
3025 + }
3026 +
3027 ++ ath_beacon_return(sc, avp);
3028 + sc->sc_flags &= ~SC_OP_BEACONS;
3029 +
3030 + for (i = 0; i < ARRAY_SIZE(sc->beacon.bslot); i++) {
3031 +diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
3032 +index 81726ee..0eb2591 100644
3033 +--- a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
3034 ++++ b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
3035 +@@ -2808,7 +2808,7 @@ static void rs_fill_link_cmd(struct iwl_priv *priv,
3036 + repeat_rate--;
3037 + }
3038 +
3039 +- lq_cmd->agg_params.agg_frame_cnt_limit = LINK_QUAL_AGG_FRAME_LIMIT_MAX;
3040 ++ lq_cmd->agg_params.agg_frame_cnt_limit = LINK_QUAL_AGG_FRAME_LIMIT_DEF;
3041 + lq_cmd->agg_params.agg_dis_start_th = LINK_QUAL_AGG_DISABLE_START_DEF;
3042 + lq_cmd->agg_params.agg_time_limit =
3043 + cpu_to_le16(LINK_QUAL_AGG_TIME_LIMIT_DEF);
3044 +diff --git a/drivers/regulator/wm8350-regulator.c b/drivers/regulator/wm8350-regulator.c
3045 +index 768bd0e..43ed81e 100644
3046 +--- a/drivers/regulator/wm8350-regulator.c
3047 ++++ b/drivers/regulator/wm8350-regulator.c
3048 +@@ -1504,7 +1504,8 @@ int wm8350_register_led(struct wm8350 *wm8350, int lednum, int dcdc, int isink,
3049 + led->isink_init.consumer_supplies = &led->isink_consumer;
3050 + led->isink_init.constraints.min_uA = 0;
3051 + led->isink_init.constraints.max_uA = pdata->max_uA;
3052 +- led->isink_init.constraints.valid_ops_mask = REGULATOR_CHANGE_CURRENT;
3053 ++ led->isink_init.constraints.valid_ops_mask
3054 ++ = REGULATOR_CHANGE_CURRENT | REGULATOR_CHANGE_STATUS;
3055 + led->isink_init.constraints.valid_modes_mask = REGULATOR_MODE_NORMAL;
3056 + ret = wm8350_register_regulator(wm8350, isink, &led->isink_init);
3057 + if (ret != 0) {
3058 +@@ -1517,6 +1518,7 @@ int wm8350_register_led(struct wm8350 *wm8350, int lednum, int dcdc, int isink,
3059 + led->dcdc_init.num_consumer_supplies = 1;
3060 + led->dcdc_init.consumer_supplies = &led->dcdc_consumer;
3061 + led->dcdc_init.constraints.valid_modes_mask = REGULATOR_MODE_NORMAL;
3062 ++ led->dcdc_init.constraints.valid_ops_mask = REGULATOR_CHANGE_STATUS;
3063 + ret = wm8350_register_regulator(wm8350, dcdc, &led->dcdc_init);
3064 + if (ret != 0) {
3065 + platform_device_put(pdev);
3066 +diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
3067 +index aaccc8e..513dec9 100644
3068 +--- a/drivers/s390/block/dasd.c
3069 ++++ b/drivers/s390/block/dasd.c
3070 +@@ -1005,8 +1005,8 @@ static void dasd_handle_killed_request(struct ccw_device *cdev,
3071 + if (device == NULL ||
3072 + device != dasd_device_from_cdev_locked(cdev) ||
3073 + strncmp(device->discipline->ebcname, (char *) &cqr->magic, 4)) {
3074 +- DBF_DEV_EVENT(DBF_DEBUG, device, "invalid device in request: "
3075 +- "bus_id %s", dev_name(&cdev->dev));
3076 ++ DBF_EVENT_DEVID(DBF_DEBUG, cdev, "%s",
3077 ++ "invalid device in request");
3078 + return;
3079 + }
3080 +
3081 +@@ -1078,8 +1078,8 @@ void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm,
3082 + device = (struct dasd_device *) cqr->startdev;
3083 + if (!device ||
3084 + strncmp(device->discipline->ebcname, (char *) &cqr->magic, 4)) {
3085 +- DBF_DEV_EVENT(DBF_DEBUG, device, "invalid device in request: "
3086 +- "bus_id %s", dev_name(&cdev->dev));
3087 ++ DBF_EVENT_DEVID(DBF_DEBUG, cdev, "%s",
3088 ++ "invalid device in request");
3089 + return;
3090 + }
3091 +
3092 +diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
3093 +index 417b97c..80c205b 100644
3094 +--- a/drivers/s390/block/dasd_eckd.c
3095 ++++ b/drivers/s390/block/dasd_eckd.c
3096 +@@ -2980,7 +2980,7 @@ static void dasd_eckd_dump_sense_ccw(struct dasd_device *device,
3097 + len += sprintf(page + len, KERN_ERR PRINTK_HEADER
3098 + " in req: %p CS: 0x%02X DS: 0x%02X CC: 0x%02X RC: %d\n",
3099 + req, scsw_cstat(&irb->scsw), scsw_dstat(&irb->scsw),
3100 +- scsw_cc(&irb->scsw), req->intrc);
3101 ++ scsw_cc(&irb->scsw), req ? req->intrc : 0);
3102 + len += sprintf(page + len, KERN_ERR PRINTK_HEADER
3103 + " device %s: Failing CCW: %p\n",
3104 + dev_name(&device->cdev->dev),
3105 +diff --git a/drivers/s390/block/dasd_ioctl.c b/drivers/s390/block/dasd_ioctl.c
3106 +index f756a1b..a5354b8 100644
3107 +--- a/drivers/s390/block/dasd_ioctl.c
3108 ++++ b/drivers/s390/block/dasd_ioctl.c
3109 +@@ -260,7 +260,7 @@ static int dasd_ioctl_information(struct dasd_block *block,
3110 + struct ccw_dev_id dev_id;
3111 +
3112 + base = block->base;
3113 +- if (!base->discipline->fill_info)
3114 ++ if (!base->discipline || !base->discipline->fill_info)
3115 + return -EINVAL;
3116 +
3117 + dasd_info = kzalloc(sizeof(struct dasd_information2_t), GFP_KERNEL);
3118 +@@ -303,10 +303,7 @@ static int dasd_ioctl_information(struct dasd_block *block,
3119 + dasd_info->features |=
3120 + ((base->features & DASD_FEATURE_READONLY) != 0);
3121 +
3122 +- if (base->discipline)
3123 +- memcpy(dasd_info->type, base->discipline->name, 4);
3124 +- else
3125 +- memcpy(dasd_info->type, "none", 4);
3126 ++ memcpy(dasd_info->type, base->discipline->name, 4);
3127 +
3128 + if (block->request_queue->request_fn) {
3129 + struct list_head *l;
3130 +diff --git a/drivers/s390/block/dasd_proc.c b/drivers/s390/block/dasd_proc.c
3131 +index 654daa3..f9d7d38 100644
3132 +--- a/drivers/s390/block/dasd_proc.c
3133 ++++ b/drivers/s390/block/dasd_proc.c
3134 +@@ -71,7 +71,7 @@ dasd_devices_show(struct seq_file *m, void *v)
3135 + /* Print device number. */
3136 + seq_printf(m, "%s", dev_name(&device->cdev->dev));
3137 + /* Print discipline string. */
3138 +- if (device != NULL && device->discipline != NULL)
3139 ++ if (device->discipline != NULL)
3140 + seq_printf(m, "(%s)", device->discipline->name);
3141 + else
3142 + seq_printf(m, "(none)");
3143 +@@ -91,10 +91,7 @@ dasd_devices_show(struct seq_file *m, void *v)
3144 + substr = (device->features & DASD_FEATURE_READONLY) ? "(ro)" : " ";
3145 + seq_printf(m, "%4s: ", substr);
3146 + /* Print device status information. */
3147 +- switch ((device != NULL) ? device->state : -1) {
3148 +- case -1:
3149 +- seq_printf(m, "unknown");
3150 +- break;
3151 ++ switch (device->state) {
3152 + case DASD_STATE_NEW:
3153 + seq_printf(m, "new");
3154 + break;
3155 +diff --git a/drivers/s390/crypto/zcrypt_pcicc.c b/drivers/s390/crypto/zcrypt_pcicc.c
3156 +index f4b0c47..7f1e3ba 100644
3157 +--- a/drivers/s390/crypto/zcrypt_pcicc.c
3158 ++++ b/drivers/s390/crypto/zcrypt_pcicc.c
3159 +@@ -373,6 +373,8 @@ static int convert_type86(struct zcrypt_device *zdev,
3160 + zdev->max_mod_size = PCICC_MAX_MOD_SIZE_OLD;
3161 + return -EAGAIN;
3162 + }
3163 ++ if (service_rc == 8 && service_rs == 72)
3164 ++ return -EINVAL;
3165 + zdev->online = 0;
3166 + return -EAGAIN; /* repeat the request on a different device. */
3167 + }
3168 +diff --git a/drivers/s390/crypto/zcrypt_pcixcc.c b/drivers/s390/crypto/zcrypt_pcixcc.c
3169 +index 5677b40..1f9e923 100644
3170 +--- a/drivers/s390/crypto/zcrypt_pcixcc.c
3171 ++++ b/drivers/s390/crypto/zcrypt_pcixcc.c
3172 +@@ -462,6 +462,8 @@ static int convert_type86_ica(struct zcrypt_device *zdev,
3173 + }
3174 + if (service_rc == 12 && service_rs == 769)
3175 + return -EINVAL;
3176 ++ if (service_rc == 8 && service_rs == 72)
3177 ++ return -EINVAL;
3178 + zdev->online = 0;
3179 + return -EAGAIN; /* repeat the request on a different device. */
3180 + }
3181 +diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
3182 +index 5987da8..bc9a881 100644
3183 +--- a/drivers/scsi/scsi_lib.c
3184 ++++ b/drivers/scsi/scsi_lib.c
3185 +@@ -749,9 +749,9 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
3186 + */
3187 + req->next_rq->resid_len = scsi_in(cmd)->resid;
3188 +
3189 ++ scsi_release_buffers(cmd);
3190 + blk_end_request_all(req, 0);
3191 +
3192 +- scsi_release_buffers(cmd);
3193 + scsi_next_command(cmd);
3194 + return;
3195 + }
3196 +diff --git a/drivers/serial/uartlite.c b/drivers/serial/uartlite.c
3197 +index 377f271..ab2ab3c 100644
3198 +--- a/drivers/serial/uartlite.c
3199 ++++ b/drivers/serial/uartlite.c
3200 +@@ -394,7 +394,7 @@ static void ulite_console_write(struct console *co, const char *s,
3201 + spin_unlock_irqrestore(&port->lock, flags);
3202 + }
3203 +
3204 +-static int __init ulite_console_setup(struct console *co, char *options)
3205 ++static int __devinit ulite_console_setup(struct console *co, char *options)
3206 + {
3207 + struct uart_port *port;
3208 + int baud = 9600;
3209 +diff --git a/drivers/usb/host/r8a66597-hcd.c b/drivers/usb/host/r8a66597-hcd.c
3210 +index e33d362..5b56f53 100644
3211 +--- a/drivers/usb/host/r8a66597-hcd.c
3212 ++++ b/drivers/usb/host/r8a66597-hcd.c
3213 +@@ -216,8 +216,17 @@ static void disable_controller(struct r8a66597 *r8a66597)
3214 + {
3215 + int port;
3216 +
3217 ++ /* disable interrupts */
3218 + r8a66597_write(r8a66597, 0, INTENB0);
3219 +- r8a66597_write(r8a66597, 0, INTSTS0);
3220 ++ r8a66597_write(r8a66597, 0, INTENB1);
3221 ++ r8a66597_write(r8a66597, 0, BRDYENB);
3222 ++ r8a66597_write(r8a66597, 0, BEMPENB);
3223 ++ r8a66597_write(r8a66597, 0, NRDYENB);
3224 ++
3225 ++ /* clear status */
3226 ++ r8a66597_write(r8a66597, 0, BRDYSTS);
3227 ++ r8a66597_write(r8a66597, 0, NRDYSTS);
3228 ++ r8a66597_write(r8a66597, 0, BEMPSTS);
3229 +
3230 + for (port = 0; port < r8a66597->max_root_hub; port++)
3231 + r8a66597_disable_port(r8a66597, port);
3232 +@@ -2470,6 +2479,12 @@ static int __devinit r8a66597_probe(struct platform_device *pdev)
3233 + r8a66597->rh_timer.data = (unsigned long)r8a66597;
3234 + r8a66597->reg = (unsigned long)reg;
3235 +
3236 ++ /* make sure no interrupts are pending */
3237 ++ ret = r8a66597_clock_enable(r8a66597);
3238 ++ if (ret < 0)
3239 ++ goto clean_up3;
3240 ++ disable_controller(r8a66597);
3241 ++
3242 + for (i = 0; i < R8A66597_MAX_NUM_PIPE; i++) {
3243 + INIT_LIST_HEAD(&r8a66597->pipe_queue[i]);
3244 + init_timer(&r8a66597->td_timer[i]);
3245 +diff --git a/drivers/video/imxfb.c b/drivers/video/imxfb.c
3246 +index 66358fa..b4b6dec 100644
3247 +--- a/drivers/video/imxfb.c
3248 ++++ b/drivers/video/imxfb.c
3249 +@@ -593,7 +593,8 @@ static int imxfb_activate_var(struct fb_var_screeninfo *var, struct fb_info *inf
3250 + */
3251 + static int imxfb_suspend(struct platform_device *dev, pm_message_t state)
3252 + {
3253 +- struct imxfb_info *fbi = platform_get_drvdata(dev);
3254 ++ struct fb_info *info = platform_get_drvdata(dev);
3255 ++ struct imxfb_info *fbi = info->par;
3256 +
3257 + pr_debug("%s\n", __func__);
3258 +
3259 +@@ -603,7 +604,8 @@ static int imxfb_suspend(struct platform_device *dev, pm_message_t state)
3260 +
3261 + static int imxfb_resume(struct platform_device *dev)
3262 + {
3263 +- struct imxfb_info *fbi = platform_get_drvdata(dev);
3264 ++ struct fb_info *info = platform_get_drvdata(dev);
3265 ++ struct imxfb_info *fbi = info->par;
3266 +
3267 + pr_debug("%s\n", __func__);
3268 +
3269 +diff --git a/drivers/video/mx3fb.c b/drivers/video/mx3fb.c
3270 +index 054ef29..772ba3f 100644
3271 +--- a/drivers/video/mx3fb.c
3272 ++++ b/drivers/video/mx3fb.c
3273 +@@ -324,8 +324,11 @@ static void sdc_enable_channel(struct mx3fb_info *mx3_fbi)
3274 + unsigned long flags;
3275 + dma_cookie_t cookie;
3276 +
3277 +- dev_dbg(mx3fb->dev, "mx3fbi %p, desc %p, sg %p\n", mx3_fbi,
3278 +- to_tx_desc(mx3_fbi->txd), to_tx_desc(mx3_fbi->txd)->sg);
3279 ++ if (mx3_fbi->txd)
3280 ++ dev_dbg(mx3fb->dev, "mx3fbi %p, desc %p, sg %p\n", mx3_fbi,
3281 ++ to_tx_desc(mx3_fbi->txd), to_tx_desc(mx3_fbi->txd)->sg);
3282 ++ else
3283 ++ dev_dbg(mx3fb->dev, "mx3fbi %p, txd = NULL\n", mx3_fbi);
3284 +
3285 + /* This enables the channel */
3286 + if (mx3_fbi->cookie < 0) {
3287 +@@ -646,6 +649,7 @@ static int sdc_set_global_alpha(struct mx3fb_data *mx3fb, bool enable, uint8_t a
3288 +
3289 + static void sdc_set_brightness(struct mx3fb_data *mx3fb, uint8_t value)
3290 + {
3291 ++ dev_dbg(mx3fb->dev, "%s: value = %d\n", __func__, value);
3292 + /* This might be board-specific */
3293 + mx3fb_write_reg(mx3fb, 0x03000000UL | value << 16, SDC_PWM_CTRL);
3294 + return;
3295 +@@ -1486,12 +1490,12 @@ static int mx3fb_probe(struct platform_device *pdev)
3296 + goto ersdc0;
3297 + }
3298 +
3299 ++ mx3fb->backlight_level = 255;
3300 ++
3301 + ret = init_fb_chan(mx3fb, to_idmac_chan(chan));
3302 + if (ret < 0)
3303 + goto eisdc0;
3304 +
3305 +- mx3fb->backlight_level = 255;
3306 +-
3307 + return 0;
3308 +
3309 + eisdc0:
3310 +diff --git a/fs/9p/vfs_super.c b/fs/9p/vfs_super.c
3311 +index 14a8644..69357c0 100644
3312 +--- a/fs/9p/vfs_super.c
3313 ++++ b/fs/9p/vfs_super.c
3314 +@@ -188,7 +188,8 @@ static void v9fs_kill_super(struct super_block *s)
3315 +
3316 + P9_DPRINTK(P9_DEBUG_VFS, " %p\n", s);
3317 +
3318 +- v9fs_dentry_release(s->s_root); /* clunk root */
3319 ++ if (s->s_root)
3320 ++ v9fs_dentry_release(s->s_root); /* clunk root */
3321 +
3322 + kill_anon_super(s);
3323 +
3324 +diff --git a/fs/affs/affs.h b/fs/affs/affs.h
3325 +index e511dc6..0e40caa 100644
3326 +--- a/fs/affs/affs.h
3327 ++++ b/fs/affs/affs.h
3328 +@@ -106,8 +106,8 @@ struct affs_sb_info {
3329 + u32 s_last_bmap;
3330 + struct buffer_head *s_bmap_bh;
3331 + char *s_prefix; /* Prefix for volumes and assigns. */
3332 +- int s_prefix_len; /* Length of prefix. */
3333 + char s_volume[32]; /* Volume prefix for absolute symlinks. */
3334 ++ spinlock_t symlink_lock; /* protects the previous two */
3335 + };
3336 +
3337 + #define SF_INTL 0x0001 /* International filesystem. */
3338 +diff --git a/fs/affs/namei.c b/fs/affs/namei.c
3339 +index 960d336..d70bbba 100644
3340 +--- a/fs/affs/namei.c
3341 ++++ b/fs/affs/namei.c
3342 +@@ -341,10 +341,13 @@ affs_symlink(struct inode *dir, struct dentry *dentry, const char *symname)
3343 + p = (char *)AFFS_HEAD(bh)->table;
3344 + lc = '/';
3345 + if (*symname == '/') {
3346 ++ struct affs_sb_info *sbi = AFFS_SB(sb);
3347 + while (*symname == '/')
3348 + symname++;
3349 +- while (AFFS_SB(sb)->s_volume[i]) /* Cannot overflow */
3350 +- *p++ = AFFS_SB(sb)->s_volume[i++];
3351 ++ spin_lock(&sbi->symlink_lock);
3352 ++ while (sbi->s_volume[i]) /* Cannot overflow */
3353 ++ *p++ = sbi->s_volume[i++];
3354 ++ spin_unlock(&sbi->symlink_lock);
3355 + }
3356 + while (i < maxlen && (c = *symname++)) {
3357 + if (c == '.' && lc == '/' && *symname == '.' && symname[1] == '/') {
3358 +diff --git a/fs/affs/super.c b/fs/affs/super.c
3359 +index 104fdcb..d41e967 100644
3360 +--- a/fs/affs/super.c
3361 ++++ b/fs/affs/super.c
3362 +@@ -203,7 +203,7 @@ parse_options(char *options, uid_t *uid, gid_t *gid, int *mode, int *reserved, s
3363 + switch (token) {
3364 + case Opt_bs:
3365 + if (match_int(&args[0], &n))
3366 +- return -EINVAL;
3367 ++ return 0;
3368 + if (n != 512 && n != 1024 && n != 2048
3369 + && n != 4096) {
3370 + printk ("AFFS: Invalid blocksize (512, 1024, 2048, 4096 allowed)\n");
3371 +@@ -213,7 +213,7 @@ parse_options(char *options, uid_t *uid, gid_t *gid, int *mode, int *reserved, s
3372 + break;
3373 + case Opt_mode:
3374 + if (match_octal(&args[0], &option))
3375 +- return 1;
3376 ++ return 0;
3377 + *mode = option & 0777;
3378 + *mount_opts |= SF_SETMODE;
3379 + break;
3380 +@@ -221,8 +221,6 @@ parse_options(char *options, uid_t *uid, gid_t *gid, int *mode, int *reserved, s
3381 + *mount_opts |= SF_MUFS;
3382 + break;
3383 + case Opt_prefix:
3384 +- /* Free any previous prefix */
3385 +- kfree(*prefix);
3386 + *prefix = match_strdup(&args[0]);
3387 + if (!*prefix)
3388 + return 0;
3389 +@@ -233,21 +231,21 @@ parse_options(char *options, uid_t *uid, gid_t *gid, int *mode, int *reserved, s
3390 + break;
3391 + case Opt_reserved:
3392 + if (match_int(&args[0], reserved))
3393 +- return 1;
3394 ++ return 0;
3395 + break;
3396 + case Opt_root:
3397 + if (match_int(&args[0], root))
3398 +- return 1;
3399 ++ return 0;
3400 + break;
3401 + case Opt_setgid:
3402 + if (match_int(&args[0], &option))
3403 +- return 1;
3404 ++ return 0;
3405 + *gid = option;
3406 + *mount_opts |= SF_SETGID;
3407 + break;
3408 + case Opt_setuid:
3409 + if (match_int(&args[0], &option))
3410 +- return -EINVAL;
3411 ++ return 0;
3412 + *uid = option;
3413 + *mount_opts |= SF_SETUID;
3414 + break;
3415 +@@ -311,11 +309,14 @@ static int affs_fill_super(struct super_block *sb, void *data, int silent)
3416 + return -ENOMEM;
3417 + sb->s_fs_info = sbi;
3418 + mutex_init(&sbi->s_bmlock);
3419 ++ spin_lock_init(&sbi->symlink_lock);
3420 +
3421 + if (!parse_options(data,&uid,&gid,&i,&reserved,&root_block,
3422 + &blocksize,&sbi->s_prefix,
3423 + sbi->s_volume, &mount_flags)) {
3424 + printk(KERN_ERR "AFFS: Error parsing options\n");
3425 ++ kfree(sbi->s_prefix);
3426 ++ kfree(sbi);
3427 + return -EINVAL;
3428 + }
3429 + /* N.B. after this point s_prefix must be released */
3430 +@@ -516,14 +517,18 @@ affs_remount(struct super_block *sb, int *flags, char *data)
3431 + unsigned long mount_flags;
3432 + int res = 0;
3433 + char *new_opts = kstrdup(data, GFP_KERNEL);
3434 ++ char volume[32];
3435 ++ char *prefix = NULL;
3436 +
3437 + pr_debug("AFFS: remount(flags=0x%x,opts=\"%s\")\n",*flags,data);
3438 +
3439 + *flags |= MS_NODIRATIME;
3440 +
3441 ++ memcpy(volume, sbi->s_volume, 32);
3442 + if (!parse_options(data, &uid, &gid, &mode, &reserved, &root_block,
3443 +- &blocksize, &sbi->s_prefix, sbi->s_volume,
3444 ++ &blocksize, &prefix, volume,
3445 + &mount_flags)) {
3446 ++ kfree(prefix);
3447 + kfree(new_opts);
3448 + return -EINVAL;
3449 + }
3450 +@@ -534,6 +539,14 @@ affs_remount(struct super_block *sb, int *flags, char *data)
3451 + sbi->s_mode = mode;
3452 + sbi->s_uid = uid;
3453 + sbi->s_gid = gid;
3454 ++ /* protect against readers */
3455 ++ spin_lock(&sbi->symlink_lock);
3456 ++ if (prefix) {
3457 ++ kfree(sbi->s_prefix);
3458 ++ sbi->s_prefix = prefix;
3459 ++ }
3460 ++ memcpy(sbi->s_volume, volume, 32);
3461 ++ spin_unlock(&sbi->symlink_lock);
3462 +
3463 + if ((*flags & MS_RDONLY) == (sb->s_flags & MS_RDONLY)) {
3464 + unlock_kernel();
3465 +diff --git a/fs/affs/symlink.c b/fs/affs/symlink.c
3466 +index 4178253..ee00f08 100644
3467 +--- a/fs/affs/symlink.c
3468 ++++ b/fs/affs/symlink.c
3469 +@@ -20,7 +20,6 @@ static int affs_symlink_readpage(struct file *file, struct page *page)
3470 + int i, j;
3471 + char c;
3472 + char lc;
3473 +- char *pf;
3474 +
3475 + pr_debug("AFFS: follow_link(ino=%lu)\n",inode->i_ino);
3476 +
3477 +@@ -32,11 +31,15 @@ static int affs_symlink_readpage(struct file *file, struct page *page)
3478 + j = 0;
3479 + lf = (struct slink_front *)bh->b_data;
3480 + lc = 0;
3481 +- pf = AFFS_SB(inode->i_sb)->s_prefix ? AFFS_SB(inode->i_sb)->s_prefix : "/";
3482 +
3483 + if (strchr(lf->symname,':')) { /* Handle assign or volume name */
3484 ++ struct affs_sb_info *sbi = AFFS_SB(inode->i_sb);
3485 ++ char *pf;
3486 ++ spin_lock(&sbi->symlink_lock);
3487 ++ pf = sbi->s_prefix ? sbi->s_prefix : "/";
3488 + while (i < 1023 && (c = pf[i]))
3489 + link[i++] = c;
3490 ++ spin_unlock(&sbi->symlink_lock);
3491 + while (i < 1023 && lf->symname[j] != ':')
3492 + link[i++] = lf->symname[j++];
3493 + if (i < 1023)
3494 +diff --git a/fs/bfs/inode.c b/fs/bfs/inode.c
3495 +index 6f60336..8f3d9fd 100644
3496 +--- a/fs/bfs/inode.c
3497 ++++ b/fs/bfs/inode.c
3498 +@@ -353,35 +353,35 @@ static int bfs_fill_super(struct super_block *s, void *data, int silent)
3499 + struct inode *inode;
3500 + unsigned i, imap_len;
3501 + struct bfs_sb_info *info;
3502 +- long ret = -EINVAL;
3503 ++ int ret = -EINVAL;
3504 + unsigned long i_sblock, i_eblock, i_eoff, s_size;
3505 +
3506 + info = kzalloc(sizeof(*info), GFP_KERNEL);
3507 + if (!info)
3508 + return -ENOMEM;
3509 ++ mutex_init(&info->bfs_lock);
3510 + s->s_fs_info = info;
3511 +
3512 + sb_set_blocksize(s, BFS_BSIZE);
3513 +
3514 +- bh = sb_bread(s, 0);
3515 +- if(!bh)
3516 ++ info->si_sbh = sb_bread(s, 0);
3517 ++ if (!info->si_sbh)
3518 + goto out;
3519 +- bfs_sb = (struct bfs_super_block *)bh->b_data;
3520 ++ bfs_sb = (struct bfs_super_block *)info->si_sbh->b_data;
3521 + if (le32_to_cpu(bfs_sb->s_magic) != BFS_MAGIC) {
3522 + if (!silent)
3523 + printf("No BFS filesystem on %s (magic=%08x)\n",
3524 + s->s_id, le32_to_cpu(bfs_sb->s_magic));
3525 +- goto out;
3526 ++ goto out1;
3527 + }
3528 + if (BFS_UNCLEAN(bfs_sb, s) && !silent)
3529 + printf("%s is unclean, continuing\n", s->s_id);
3530 +
3531 + s->s_magic = BFS_MAGIC;
3532 +- info->si_sbh = bh;
3533 +
3534 + if (le32_to_cpu(bfs_sb->s_start) > le32_to_cpu(bfs_sb->s_end)) {
3535 + printf("Superblock is corrupted\n");
3536 +- goto out;
3537 ++ goto out1;
3538 + }
3539 +
3540 + info->si_lasti = (le32_to_cpu(bfs_sb->s_start) - BFS_BSIZE) /
3541 +@@ -390,7 +390,7 @@ static int bfs_fill_super(struct super_block *s, void *data, int silent)
3542 + imap_len = (info->si_lasti / 8) + 1;
3543 + info->si_imap = kzalloc(imap_len, GFP_KERNEL);
3544 + if (!info->si_imap)
3545 +- goto out;
3546 ++ goto out1;
3547 + for (i = 0; i < BFS_ROOT_INO; i++)
3548 + set_bit(i, info->si_imap);
3549 +
3550 +@@ -398,15 +398,13 @@ static int bfs_fill_super(struct super_block *s, void *data, int silent)
3551 + inode = bfs_iget(s, BFS_ROOT_INO);
3552 + if (IS_ERR(inode)) {
3553 + ret = PTR_ERR(inode);
3554 +- kfree(info->si_imap);
3555 +- goto out;
3556 ++ goto out2;
3557 + }
3558 + s->s_root = d_alloc_root(inode);
3559 + if (!s->s_root) {
3560 + iput(inode);
3561 + ret = -ENOMEM;
3562 +- kfree(info->si_imap);
3563 +- goto out;
3564 ++ goto out2;
3565 + }
3566 +
3567 + info->si_blocks = (le32_to_cpu(bfs_sb->s_end) + 1) >> BFS_BSIZE_BITS;
3568 +@@ -419,10 +417,8 @@ static int bfs_fill_super(struct super_block *s, void *data, int silent)
3569 + bh = sb_bread(s, info->si_blocks - 1);
3570 + if (!bh) {
3571 + printf("Last block not available: %lu\n", info->si_blocks - 1);
3572 +- iput(inode);
3573 + ret = -EIO;
3574 +- kfree(info->si_imap);
3575 +- goto out;
3576 ++ goto out3;
3577 + }
3578 + brelse(bh);
3579 +
3580 +@@ -459,11 +455,8 @@ static int bfs_fill_super(struct super_block *s, void *data, int silent)
3581 + printf("Inode 0x%08x corrupted\n", i);
3582 +
3583 + brelse(bh);
3584 +- s->s_root = NULL;
3585 +- kfree(info->si_imap);
3586 +- kfree(info);
3587 +- s->s_fs_info = NULL;
3588 +- return -EIO;
3589 ++ ret = -EIO;
3590 ++ goto out3;
3591 + }
3592 +
3593 + if (!di->i_ino) {
3594 +@@ -483,11 +476,17 @@ static int bfs_fill_super(struct super_block *s, void *data, int silent)
3595 + s->s_dirt = 1;
3596 + }
3597 + dump_imap("read_super", s);
3598 +- mutex_init(&info->bfs_lock);
3599 + return 0;
3600 +
3601 ++out3:
3602 ++ dput(s->s_root);
3603 ++ s->s_root = NULL;
3604 ++out2:
3605 ++ kfree(info->si_imap);
3606 ++out1:
3607 ++ brelse(info->si_sbh);
3608 + out:
3609 +- brelse(bh);
3610 ++ mutex_destroy(&info->bfs_lock);
3611 + kfree(info);
3612 + s->s_fs_info = NULL;
3613 + return ret;
3614 +diff --git a/fs/binfmt_aout.c b/fs/binfmt_aout.c
3615 +index b639dcf..0133b5a 100644
3616 +--- a/fs/binfmt_aout.c
3617 ++++ b/fs/binfmt_aout.c
3618 +@@ -263,6 +263,7 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs)
3619 + #else
3620 + set_personality(PER_LINUX);
3621 + #endif
3622 ++ setup_new_exec(bprm);
3623 +
3624 + current->mm->end_code = ex.a_text +
3625 + (current->mm->start_code = N_TXTADDR(ex));
3626 +diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
3627 +index b9b3bb5..1ed37ba 100644
3628 +--- a/fs/binfmt_elf.c
3629 ++++ b/fs/binfmt_elf.c
3630 +@@ -662,27 +662,6 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
3631 + if (elf_interpreter[elf_ppnt->p_filesz - 1] != '\0')
3632 + goto out_free_interp;
3633 +
3634 +- /*
3635 +- * The early SET_PERSONALITY here is so that the lookup
3636 +- * for the interpreter happens in the namespace of the
3637 +- * to-be-execed image. SET_PERSONALITY can select an
3638 +- * alternate root.
3639 +- *
3640 +- * However, SET_PERSONALITY is NOT allowed to switch
3641 +- * this task into the new images's memory mapping
3642 +- * policy - that is, TASK_SIZE must still evaluate to
3643 +- * that which is appropriate to the execing application.
3644 +- * This is because exit_mmap() needs to have TASK_SIZE
3645 +- * evaluate to the size of the old image.
3646 +- *
3647 +- * So if (say) a 64-bit application is execing a 32-bit
3648 +- * application it is the architecture's responsibility
3649 +- * to defer changing the value of TASK_SIZE until the
3650 +- * switch really is going to happen - do this in
3651 +- * flush_thread(). - akpm
3652 +- */
3653 +- SET_PERSONALITY(loc->elf_ex);
3654 +-
3655 + interpreter = open_exec(elf_interpreter);
3656 + retval = PTR_ERR(interpreter);
3657 + if (IS_ERR(interpreter))
3658 +@@ -730,9 +709,6 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
3659 + /* Verify the interpreter has a valid arch */
3660 + if (!elf_check_arch(&loc->interp_elf_ex))
3661 + goto out_free_dentry;
3662 +- } else {
3663 +- /* Executables without an interpreter also need a personality */
3664 +- SET_PERSONALITY(loc->elf_ex);
3665 + }
3666 +
3667 + /* Flush all traces of the currently running executable */
3668 +@@ -752,7 +728,8 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
3669 +
3670 + if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
3671 + current->flags |= PF_RANDOMIZE;
3672 +- arch_pick_mmap_layout(current->mm);
3673 ++
3674 ++ setup_new_exec(bprm);
3675 +
3676 + /* Do this so that we can load the interpreter, if need be. We will
3677 + change some of these later */
3678 +diff --git a/fs/binfmt_elf_fdpic.c b/fs/binfmt_elf_fdpic.c
3679 +index 38502c6..e7a0bb4 100644
3680 +--- a/fs/binfmt_elf_fdpic.c
3681 ++++ b/fs/binfmt_elf_fdpic.c
3682 +@@ -171,6 +171,9 @@ static int load_elf_fdpic_binary(struct linux_binprm *bprm,
3683 + #ifdef ELF_FDPIC_PLAT_INIT
3684 + unsigned long dynaddr;
3685 + #endif
3686 ++#ifndef CONFIG_MMU
3687 ++ unsigned long stack_prot;
3688 ++#endif
3689 + struct file *interpreter = NULL; /* to shut gcc up */
3690 + char *interpreter_name = NULL;
3691 + int executable_stack;
3692 +@@ -316,6 +319,11 @@ static int load_elf_fdpic_binary(struct linux_binprm *bprm,
3693 + * defunct, deceased, etc. after this point we have to exit via
3694 + * error_kill */
3695 + set_personality(PER_LINUX_FDPIC);
3696 ++ if (elf_read_implies_exec(&exec_params.hdr, executable_stack))
3697 ++ current->personality |= READ_IMPLIES_EXEC;
3698 ++
3699 ++ setup_new_exec(bprm);
3700 ++
3701 + set_binfmt(&elf_fdpic_format);
3702 +
3703 + current->mm->start_code = 0;
3704 +@@ -377,9 +385,13 @@ static int load_elf_fdpic_binary(struct linux_binprm *bprm,
3705 + if (stack_size < PAGE_SIZE * 2)
3706 + stack_size = PAGE_SIZE * 2;
3707 +
3708 ++ stack_prot = PROT_READ | PROT_WRITE;
3709 ++ if (executable_stack == EXSTACK_ENABLE_X ||
3710 ++ (executable_stack == EXSTACK_DEFAULT && VM_STACK_FLAGS & VM_EXEC))
3711 ++ stack_prot |= PROT_EXEC;
3712 ++
3713 + down_write(&current->mm->mmap_sem);
3714 +- current->mm->start_brk = do_mmap(NULL, 0, stack_size,
3715 +- PROT_READ | PROT_WRITE | PROT_EXEC,
3716 ++ current->mm->start_brk = do_mmap(NULL, 0, stack_size, stack_prot,
3717 + MAP_PRIVATE | MAP_ANONYMOUS | MAP_GROWSDOWN,
3718 + 0);
3719 +
3720 +diff --git a/fs/binfmt_flat.c b/fs/binfmt_flat.c
3721 +index a279665..ca88c46 100644
3722 +--- a/fs/binfmt_flat.c
3723 ++++ b/fs/binfmt_flat.c
3724 +@@ -519,6 +519,7 @@ static int load_flat_file(struct linux_binprm * bprm,
3725 +
3726 + /* OK, This is the point of no return */
3727 + set_personality(PER_LINUX_32BIT);
3728 ++ setup_new_exec(bprm);
3729 + }
3730 +
3731 + /*
3732 +diff --git a/fs/binfmt_som.c b/fs/binfmt_som.c
3733 +index eff74b9..35cf002 100644
3734 +--- a/fs/binfmt_som.c
3735 ++++ b/fs/binfmt_som.c
3736 +@@ -227,6 +227,7 @@ load_som_binary(struct linux_binprm * bprm, struct pt_regs * regs)
3737 + /* OK, This is the point of no return */
3738 + current->flags &= ~PF_FORKNOEXEC;
3739 + current->personality = PER_HPUX;
3740 ++ setup_new_exec(bprm);
3741 +
3742 + /* Set the task size for HP-UX processes such that
3743 + * the gateway page is outside the address space.
3744 +diff --git a/fs/bio-integrity.c b/fs/bio-integrity.c
3745 +index 49a34e7..a16f29e 100644
3746 +--- a/fs/bio-integrity.c
3747 ++++ b/fs/bio-integrity.c
3748 +@@ -61,7 +61,7 @@ static inline unsigned int vecs_to_idx(unsigned int nr)
3749 +
3750 + static inline int use_bip_pool(unsigned int idx)
3751 + {
3752 +- if (idx == BIOVEC_NR_POOLS)
3753 ++ if (idx == BIOVEC_MAX_IDX)
3754 + return 1;
3755 +
3756 + return 0;
3757 +@@ -95,6 +95,7 @@ struct bio_integrity_payload *bio_integrity_alloc_bioset(struct bio *bio,
3758 +
3759 + /* Use mempool if lower order alloc failed or max vecs were requested */
3760 + if (bip == NULL) {
3761 ++ idx = BIOVEC_MAX_IDX; /* so we free the payload properly later */
3762 + bip = mempool_alloc(bs->bio_integrity_pool, gfp_mask);
3763 +
3764 + if (unlikely(bip == NULL)) {
3765 +diff --git a/fs/bio.c b/fs/bio.c
3766 +index 12da5db..e0c9e71 100644
3767 +--- a/fs/bio.c
3768 ++++ b/fs/bio.c
3769 +@@ -542,13 +542,18 @@ static int __bio_add_page(struct request_queue *q, struct bio *bio, struct page
3770 +
3771 + if (page == prev->bv_page &&
3772 + offset == prev->bv_offset + prev->bv_len) {
3773 ++ unsigned int prev_bv_len = prev->bv_len;
3774 + prev->bv_len += len;
3775 +
3776 + if (q->merge_bvec_fn) {
3777 + struct bvec_merge_data bvm = {
3778 ++ /* prev_bvec is already charged in
3779 ++ bi_size, discharge it in order to
3780 ++ simulate merging updated prev_bvec
3781 ++ as new bvec. */
3782 + .bi_bdev = bio->bi_bdev,
3783 + .bi_sector = bio->bi_sector,
3784 +- .bi_size = bio->bi_size,
3785 ++ .bi_size = bio->bi_size - prev_bv_len,
3786 + .bi_rw = bio->bi_rw,
3787 + };
3788 +
3789 +diff --git a/fs/exec.c b/fs/exec.c
3790 +index ba112bd..7fa4efd 100644
3791 +--- a/fs/exec.c
3792 ++++ b/fs/exec.c
3793 +@@ -931,9 +931,7 @@ void set_task_comm(struct task_struct *tsk, char *buf)
3794 +
3795 + int flush_old_exec(struct linux_binprm * bprm)
3796 + {
3797 +- char * name;
3798 +- int i, ch, retval;
3799 +- char tcomm[sizeof(current->comm)];
3800 ++ int retval;
3801 +
3802 + /*
3803 + * Make sure we have a private signal table and that
3804 +@@ -954,6 +952,25 @@ int flush_old_exec(struct linux_binprm * bprm)
3805 +
3806 + bprm->mm = NULL; /* We're using it now */
3807 +
3808 ++ current->flags &= ~PF_RANDOMIZE;
3809 ++ flush_thread();
3810 ++ current->personality &= ~bprm->per_clear;
3811 ++
3812 ++ return 0;
3813 ++
3814 ++out:
3815 ++ return retval;
3816 ++}
3817 ++EXPORT_SYMBOL(flush_old_exec);
3818 ++
3819 ++void setup_new_exec(struct linux_binprm * bprm)
3820 ++{
3821 ++ int i, ch;
3822 ++ char * name;
3823 ++ char tcomm[sizeof(current->comm)];
3824 ++
3825 ++ arch_pick_mmap_layout(current->mm);
3826 ++
3827 + /* This is the point of no return */
3828 + current->sas_ss_sp = current->sas_ss_size = 0;
3829 +
3830 +@@ -975,9 +992,6 @@ int flush_old_exec(struct linux_binprm * bprm)
3831 + tcomm[i] = '\0';
3832 + set_task_comm(current, tcomm);
3833 +
3834 +- current->flags &= ~PF_RANDOMIZE;
3835 +- flush_thread();
3836 +-
3837 + /* Set the new mm task size. We have to do that late because it may
3838 + * depend on TIF_32BIT which is only updated in flush_thread() on
3839 + * some architectures like powerpc
3840 +@@ -993,8 +1007,6 @@ int flush_old_exec(struct linux_binprm * bprm)
3841 + set_dumpable(current->mm, suid_dumpable);
3842 + }
3843 +
3844 +- current->personality &= ~bprm->per_clear;
3845 +-
3846 + /*
3847 + * Flush performance counters when crossing a
3848 + * security domain:
3849 +@@ -1009,14 +1021,8 @@ int flush_old_exec(struct linux_binprm * bprm)
3850 +
3851 + flush_signal_handlers(current, 0);
3852 + flush_old_files(current->files);
3853 +-
3854 +- return 0;
3855 +-
3856 +-out:
3857 +- return retval;
3858 + }
3859 +-
3860 +-EXPORT_SYMBOL(flush_old_exec);
3861 ++EXPORT_SYMBOL(setup_new_exec);
3862 +
3863 + /*
3864 + * Prepare credentials and lock ->cred_guard_mutex.
3865 +diff --git a/fs/fuse/file.c b/fs/fuse/file.c
3866 +index c18913a..a9f5e13 100644
3867 +--- a/fs/fuse/file.c
3868 ++++ b/fs/fuse/file.c
3869 +@@ -828,6 +828,9 @@ static ssize_t fuse_fill_write_pages(struct fuse_req *req,
3870 + if (!page)
3871 + break;
3872 +
3873 ++ if (mapping_writably_mapped(mapping))
3874 ++ flush_dcache_page(page);
3875 ++
3876 + pagefault_disable();
3877 + tmp = iov_iter_copy_from_user_atomic(page, ii, offset, bytes);
3878 + pagefault_enable();
3879 +diff --git a/fs/romfs/super.c b/fs/romfs/super.c
3880 +index c117fa8..42d2135 100644
3881 +--- a/fs/romfs/super.c
3882 ++++ b/fs/romfs/super.c
3883 +@@ -544,6 +544,7 @@ error:
3884 + error_rsb_inval:
3885 + ret = -EINVAL;
3886 + error_rsb:
3887 ++ kfree(rsb);
3888 + return ret;
3889 + }
3890 +
3891 +diff --git a/include/linux/acpi.h b/include/linux/acpi.h
3892 +index dfcd920..c010b94 100644
3893 +--- a/include/linux/acpi.h
3894 ++++ b/include/linux/acpi.h
3895 +@@ -253,6 +253,13 @@ void __init acpi_old_suspend_ordering(void);
3896 + void __init acpi_s4_no_nvs(void);
3897 + #endif /* CONFIG_PM_SLEEP */
3898 +
3899 ++struct acpi_osc_context {
3900 ++ char *uuid_str; /* uuid string */
3901 ++ int rev;
3902 ++ struct acpi_buffer cap; /* arg2/arg3 */
3903 ++ struct acpi_buffer ret; /* free by caller if success */
3904 ++};
3905 ++
3906 + #define OSC_QUERY_TYPE 0
3907 + #define OSC_SUPPORT_TYPE 1
3908 + #define OSC_CONTROL_TYPE 2
3909 +@@ -265,6 +272,15 @@ void __init acpi_s4_no_nvs(void);
3910 + #define OSC_INVALID_REVISION_ERROR 8
3911 + #define OSC_CAPABILITIES_MASK_ERROR 16
3912 +
3913 ++acpi_status acpi_run_osc(acpi_handle handle, struct acpi_osc_context *context);
3914 ++
3915 ++/* platform-wide _OSC bits */
3916 ++#define OSC_SB_PAD_SUPPORT 1
3917 ++#define OSC_SB_PPC_OST_SUPPORT 2
3918 ++#define OSC_SB_PR3_SUPPORT 4
3919 ++#define OSC_SB_CPUHP_OST_SUPPORT 8
3920 ++#define OSC_SB_APEI_SUPPORT 16
3921 ++
3922 + /* _OSC DW1 Definition (OS Support Fields) */
3923 + #define OSC_EXT_PCI_CONFIG_SUPPORT 1
3924 + #define OSC_ACTIVE_STATE_PWR_SUPPORT 2
3925 +diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h
3926 +index aece486..340f441 100644
3927 +--- a/include/linux/binfmts.h
3928 ++++ b/include/linux/binfmts.h
3929 +@@ -101,6 +101,7 @@ extern int prepare_binprm(struct linux_binprm *);
3930 + extern int __must_check remove_arg_zero(struct linux_binprm *);
3931 + extern int search_binary_handler(struct linux_binprm *,struct pt_regs *);
3932 + extern int flush_old_exec(struct linux_binprm * bprm);
3933 ++extern void setup_new_exec(struct linux_binprm * bprm);
3934 +
3935 + extern int suid_dumpable;
3936 + #define SUID_DUMP_DISABLE 0 /* No setuid dumping */
3937 +diff --git a/include/linux/connector.h b/include/linux/connector.h
3938 +index 3a14615..ecb61c4 100644
3939 +--- a/include/linux/connector.h
3940 ++++ b/include/linux/connector.h
3941 +@@ -24,9 +24,6 @@
3942 +
3943 + #include <linux/types.h>
3944 +
3945 +-#define CN_IDX_CONNECTOR 0xffffffff
3946 +-#define CN_VAL_CONNECTOR 0xffffffff
3947 +-
3948 + /*
3949 + * Process Events connector unique ids -- used for message routing
3950 + */
3951 +@@ -73,30 +70,6 @@ struct cn_msg {
3952 + __u8 data[0];
3953 + };
3954 +
3955 +-/*
3956 +- * Notify structure - requests notification about
3957 +- * registering/unregistering idx/val in range [first, first+range].
3958 +- */
3959 +-struct cn_notify_req {
3960 +- __u32 first;
3961 +- __u32 range;
3962 +-};
3963 +-
3964 +-/*
3965 +- * Main notification control message
3966 +- * *_notify_num - number of appropriate cn_notify_req structures after
3967 +- * this struct.
3968 +- * group - notification receiver's idx.
3969 +- * len - total length of the attached data.
3970 +- */
3971 +-struct cn_ctl_msg {
3972 +- __u32 idx_notify_num;
3973 +- __u32 val_notify_num;
3974 +- __u32 group;
3975 +- __u32 len;
3976 +- __u8 data[0];
3977 +-};
3978 +-
3979 + #ifdef __KERNEL__
3980 +
3981 + #include <asm/atomic.h>
3982 +@@ -149,11 +122,6 @@ struct cn_callback_entry {
3983 + u32 seq, group;
3984 + };
3985 +
3986 +-struct cn_ctl_entry {
3987 +- struct list_head notify_entry;
3988 +- struct cn_ctl_msg *msg;
3989 +-};
3990 +-
3991 + struct cn_dev {
3992 + struct cb_id id;
3993 +
3994 +diff --git a/include/linux/inetdevice.h b/include/linux/inetdevice.h
3995 +index ad27c7d..9cd0bcf 100644
3996 +--- a/include/linux/inetdevice.h
3997 ++++ b/include/linux/inetdevice.h
3998 +@@ -83,6 +83,7 @@ static inline void ipv4_devconf_setall(struct in_device *in_dev)
3999 + #define IN_DEV_FORWARD(in_dev) IN_DEV_CONF_GET((in_dev), FORWARDING)
4000 + #define IN_DEV_MFORWARD(in_dev) IN_DEV_ANDCONF((in_dev), MC_FORWARDING)
4001 + #define IN_DEV_RPFILTER(in_dev) IN_DEV_MAXCONF((in_dev), RP_FILTER)
4002 ++#define IN_DEV_SRC_VMARK(in_dev) IN_DEV_ORCONF((in_dev), SRC_VMARK)
4003 + #define IN_DEV_SOURCE_ROUTE(in_dev) IN_DEV_ANDCONF((in_dev), \
4004 + ACCEPT_SOURCE_ROUTE)
4005 + #define IN_DEV_BOOTP_RELAY(in_dev) IN_DEV_ANDCONF((in_dev), BOOTP_RELAY)
4006 +diff --git a/include/linux/kvm.h b/include/linux/kvm.h
4007 +index 8908dd6..0eadd71 100644
4008 +--- a/include/linux/kvm.h
4009 ++++ b/include/linux/kvm.h
4010 +@@ -439,6 +439,7 @@ struct kvm_ioeventfd {
4011 + #endif
4012 + #define KVM_CAP_IOEVENTFD 36
4013 + #define KVM_CAP_SET_IDENTITY_MAP_ADDR 37
4014 ++#define KVM_CAP_ADJUST_CLOCK 39
4015 +
4016 + #ifdef KVM_CAP_IRQ_ROUTING
4017 +
4018 +@@ -501,6 +502,12 @@ struct kvm_irqfd {
4019 + __u8 pad[20];
4020 + };
4021 +
4022 ++struct kvm_clock_data {
4023 ++ __u64 clock;
4024 ++ __u32 flags;
4025 ++ __u32 pad[9];
4026 ++};
4027 ++
4028 + /*
4029 + * ioctls for VM fds
4030 + */
4031 +@@ -550,6 +557,8 @@ struct kvm_irqfd {
4032 + #define KVM_CREATE_PIT2 _IOW(KVMIO, 0x77, struct kvm_pit_config)
4033 + #define KVM_SET_BOOT_CPU_ID _IO(KVMIO, 0x78)
4034 + #define KVM_IOEVENTFD _IOW(KVMIO, 0x79, struct kvm_ioeventfd)
4035 ++#define KVM_SET_CLOCK _IOW(KVMIO, 0x7b, struct kvm_clock_data)
4036 ++#define KVM_GET_CLOCK _IOR(KVMIO, 0x7c, struct kvm_clock_data)
4037 +
4038 + /*
4039 + * ioctls for vcpu fds
4040 +diff --git a/include/linux/libata.h b/include/linux/libata.h
4041 +index 8769864..b0f6d97 100644
4042 +--- a/include/linux/libata.h
4043 ++++ b/include/linux/libata.h
4044 +@@ -354,6 +354,9 @@ enum {
4045 + /* max tries if error condition is still set after ->error_handler */
4046 + ATA_EH_MAX_TRIES = 5,
4047 +
4048 ++ /* sometimes resuming a link requires several retries */
4049 ++ ATA_LINK_RESUME_TRIES = 5,
4050 ++
4051 + /* how hard are we gonna try to probe/recover devices */
4052 + ATA_PROBE_MAX_TRIES = 3,
4053 + ATA_EH_DEV_TRIES = 3,
4054 +diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
4055 +index ed5d750..3c62ed4 100644
4056 +--- a/include/linux/pagemap.h
4057 ++++ b/include/linux/pagemap.h
4058 +@@ -253,6 +253,8 @@ extern struct page * read_cache_page_async(struct address_space *mapping,
4059 + extern struct page * read_cache_page(struct address_space *mapping,
4060 + pgoff_t index, filler_t *filler,
4061 + void *data);
4062 ++extern struct page * read_cache_page_gfp(struct address_space *mapping,
4063 ++ pgoff_t index, gfp_t gfp_mask);
4064 + extern int read_cache_pages(struct address_space *mapping,
4065 + struct list_head *pages, filler_t *filler, void *data);
4066 +
4067 +diff --git a/include/linux/sched.h b/include/linux/sched.h
4068 +index 0f67914..d3dce7d 100644
4069 +--- a/include/linux/sched.h
4070 ++++ b/include/linux/sched.h
4071 +@@ -1354,7 +1354,7 @@ struct task_struct {
4072 + char comm[TASK_COMM_LEN]; /* executable name excluding path
4073 + - access with [gs]et_task_comm (which lock
4074 + it with task_lock())
4075 +- - initialized normally by flush_old_exec */
4076 ++ - initialized normally by setup_new_exec */
4077 + /* file system info */
4078 + int link_count, total_link_count;
4079 + #ifdef CONFIG_SYSVIPC
4080 +diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h
4081 +index 1e4743e..0eb6942 100644
4082 +--- a/include/linux/sysctl.h
4083 ++++ b/include/linux/sysctl.h
4084 +@@ -490,6 +490,7 @@ enum
4085 + NET_IPV4_CONF_PROMOTE_SECONDARIES=20,
4086 + NET_IPV4_CONF_ARP_ACCEPT=21,
4087 + NET_IPV4_CONF_ARP_NOTIFY=22,
4088 ++ NET_IPV4_CONF_SRC_VMARK=24,
4089 + __NET_IPV4_CONF_MAX
4090 + };
4091 +
4092 +diff --git a/include/net/netrom.h b/include/net/netrom.h
4093 +index 15696b1..ab170a6 100644
4094 +--- a/include/net/netrom.h
4095 ++++ b/include/net/netrom.h
4096 +@@ -132,6 +132,8 @@ static __inline__ void nr_node_put(struct nr_node *nr_node)
4097 + static __inline__ void nr_neigh_put(struct nr_neigh *nr_neigh)
4098 + {
4099 + if (atomic_dec_and_test(&nr_neigh->refcount)) {
4100 ++ if (nr_neigh->ax25)
4101 ++ ax25_cb_put(nr_neigh->ax25);
4102 + kfree(nr_neigh->digipeat);
4103 + kfree(nr_neigh);
4104 + }
4105 +diff --git a/kernel/cred.c b/kernel/cred.c
4106 +index dd76cfe..1ed8ca1 100644
4107 +--- a/kernel/cred.c
4108 ++++ b/kernel/cred.c
4109 +@@ -224,7 +224,7 @@ struct cred *cred_alloc_blank(void)
4110 + #ifdef CONFIG_KEYS
4111 + new->tgcred = kzalloc(sizeof(*new->tgcred), GFP_KERNEL);
4112 + if (!new->tgcred) {
4113 +- kfree(new);
4114 ++ kmem_cache_free(cred_jar, new);
4115 + return NULL;
4116 + }
4117 + atomic_set(&new->tgcred->usage, 1);
4118 +diff --git a/kernel/sysctl_check.c b/kernel/sysctl_check.c
4119 +index b6e7aae..469193c 100644
4120 +--- a/kernel/sysctl_check.c
4121 ++++ b/kernel/sysctl_check.c
4122 +@@ -220,6 +220,7 @@ static const struct trans_ctl_table trans_net_ipv4_conf_vars_table[] = {
4123 + { NET_IPV4_CONF_PROMOTE_SECONDARIES, "promote_secondaries" },
4124 + { NET_IPV4_CONF_ARP_ACCEPT, "arp_accept" },
4125 + { NET_IPV4_CONF_ARP_NOTIFY, "arp_notify" },
4126 ++ { NET_IPV4_CONF_SRC_VMARK, "src_valid_mark" },
4127 + {}
4128 + };
4129 +
4130 +diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c
4131 +index 5155dc3..ecc7adb 100644
4132 +--- a/kernel/time/clocksource.c
4133 ++++ b/kernel/time/clocksource.c
4134 +@@ -413,8 +413,6 @@ void clocksource_touch_watchdog(void)
4135 + clocksource_resume_watchdog();
4136 + }
4137 +
4138 +-#ifdef CONFIG_GENERIC_TIME
4139 +-
4140 + /**
4141 + * clocksource_max_deferment - Returns max time the clocksource can be deferred
4142 + * @cs: Pointer to clocksource
4143 +@@ -456,6 +454,8 @@ static u64 clocksource_max_deferment(struct clocksource *cs)
4144 + return max_nsecs - (max_nsecs >> 5);
4145 + }
4146 +
4147 ++#ifdef CONFIG_GENERIC_TIME
4148 ++
4149 + /**
4150 + * clocksource_select - Select the best clocksource available
4151 + *
4152 +diff --git a/mm/filemap.c b/mm/filemap.c
4153 +index ef169f3..8e96c90 100644
4154 +--- a/mm/filemap.c
4155 ++++ b/mm/filemap.c
4156 +@@ -1655,14 +1655,15 @@ EXPORT_SYMBOL(generic_file_readonly_mmap);
4157 + static struct page *__read_cache_page(struct address_space *mapping,
4158 + pgoff_t index,
4159 + int (*filler)(void *,struct page*),
4160 +- void *data)
4161 ++ void *data,
4162 ++ gfp_t gfp)
4163 + {
4164 + struct page *page;
4165 + int err;
4166 + repeat:
4167 + page = find_get_page(mapping, index);
4168 + if (!page) {
4169 +- page = page_cache_alloc_cold(mapping);
4170 ++ page = __page_cache_alloc(gfp | __GFP_COLD);
4171 + if (!page)
4172 + return ERR_PTR(-ENOMEM);
4173 + err = add_to_page_cache_lru(page, mapping, index, GFP_KERNEL);
4174 +@@ -1682,31 +1683,18 @@ repeat:
4175 + return page;
4176 + }
4177 +
4178 +-/**
4179 +- * read_cache_page_async - read into page cache, fill it if needed
4180 +- * @mapping: the page's address_space
4181 +- * @index: the page index
4182 +- * @filler: function to perform the read
4183 +- * @data: destination for read data
4184 +- *
4185 +- * Same as read_cache_page, but don't wait for page to become unlocked
4186 +- * after submitting it to the filler.
4187 +- *
4188 +- * Read into the page cache. If a page already exists, and PageUptodate() is
4189 +- * not set, try to fill the page but don't wait for it to become unlocked.
4190 +- *
4191 +- * If the page does not get brought uptodate, return -EIO.
4192 +- */
4193 +-struct page *read_cache_page_async(struct address_space *mapping,
4194 ++static struct page *do_read_cache_page(struct address_space *mapping,
4195 + pgoff_t index,
4196 + int (*filler)(void *,struct page*),
4197 +- void *data)
4198 ++ void *data,
4199 ++ gfp_t gfp)
4200 ++
4201 + {
4202 + struct page *page;
4203 + int err;
4204 +
4205 + retry:
4206 +- page = __read_cache_page(mapping, index, filler, data);
4207 ++ page = __read_cache_page(mapping, index, filler, data, gfp);
4208 + if (IS_ERR(page))
4209 + return page;
4210 + if (PageUptodate(page))
4211 +@@ -1731,8 +1719,67 @@ out:
4212 + mark_page_accessed(page);
4213 + return page;
4214 + }
4215 ++
4216 ++/**
4217 ++ * read_cache_page_async - read into page cache, fill it if needed
4218 ++ * @mapping: the page's address_space
4219 ++ * @index: the page index
4220 ++ * @filler: function to perform the read
4221 ++ * @data: destination for read data
4222 ++ *
4223 ++ * Same as read_cache_page, but don't wait for page to become unlocked
4224 ++ * after submitting it to the filler.
4225 ++ *
4226 ++ * Read into the page cache. If a page already exists, and PageUptodate() is
4227 ++ * not set, try to fill the page but don't wait for it to become unlocked.
4228 ++ *
4229 ++ * If the page does not get brought uptodate, return -EIO.
4230 ++ */
4231 ++struct page *read_cache_page_async(struct address_space *mapping,
4232 ++ pgoff_t index,
4233 ++ int (*filler)(void *,struct page*),
4234 ++ void *data)
4235 ++{
4236 ++ return do_read_cache_page(mapping, index, filler, data, mapping_gfp_mask(mapping));
4237 ++}
4238 + EXPORT_SYMBOL(read_cache_page_async);
4239 +
4240 ++static struct page *wait_on_page_read(struct page *page)
4241 ++{
4242 ++ if (!IS_ERR(page)) {
4243 ++ wait_on_page_locked(page);
4244 ++ if (!PageUptodate(page)) {
4245 ++ page_cache_release(page);
4246 ++ page = ERR_PTR(-EIO);
4247 ++ }
4248 ++ }
4249 ++ return page;
4250 ++}
4251 ++
4252 ++/**
4253 ++ * read_cache_page_gfp - read into page cache, using specified page allocation flags.
4254 ++ * @mapping: the page's address_space
4255 ++ * @index: the page index
4256 ++ * @gfp: the page allocator flags to use if allocating
4257 ++ *
4258 ++ * This is the same as "read_mapping_page(mapping, index, NULL)", but with
4259 ++ * any new page allocations done using the specified allocation flags. Note
4260 ++ * that the Radix tree operations will still use GFP_KERNEL, so you can't
4261 ++ * expect to do this atomically or anything like that - but you can pass in
4262 ++ * other page requirements.
4263 ++ *
4264 ++ * If the page does not get brought uptodate, return -EIO.
4265 ++ */
4266 ++struct page *read_cache_page_gfp(struct address_space *mapping,
4267 ++ pgoff_t index,
4268 ++ gfp_t gfp)
4269 ++{
4270 ++ filler_t *filler = (filler_t *)mapping->a_ops->readpage;
4271 ++
4272 ++ return wait_on_page_read(do_read_cache_page(mapping, index, filler, NULL, gfp));
4273 ++}
4274 ++EXPORT_SYMBOL(read_cache_page_gfp);
4275 ++
4276 + /**
4277 + * read_cache_page - read into page cache, fill it if needed
4278 + * @mapping: the page's address_space
4279 +@@ -1750,18 +1797,7 @@ struct page *read_cache_page(struct address_space *mapping,
4280 + int (*filler)(void *,struct page*),
4281 + void *data)
4282 + {
4283 +- struct page *page;
4284 +-
4285 +- page = read_cache_page_async(mapping, index, filler, data);
4286 +- if (IS_ERR(page))
4287 +- goto out;
4288 +- wait_on_page_locked(page);
4289 +- if (!PageUptodate(page)) {
4290 +- page_cache_release(page);
4291 +- page = ERR_PTR(-EIO);
4292 +- }
4293 +- out:
4294 +- return page;
4295 ++ return wait_on_page_read(read_cache_page_async(mapping, index, filler, data));
4296 + }
4297 + EXPORT_SYMBOL(read_cache_page);
4298 +
4299 +@@ -2217,6 +2253,9 @@ again:
4300 + if (unlikely(status))
4301 + break;
4302 +
4303 ++ if (mapping_writably_mapped(mapping))
4304 ++ flush_dcache_page(page);
4305 ++
4306 + pagefault_disable();
4307 + copied = iov_iter_copy_from_user_atomic(page, i, offset, bytes);
4308 + pagefault_enable();
4309 +diff --git a/mm/page_alloc.c b/mm/page_alloc.c
4310 +index 3a78e2e..36992b6 100644
4311 +--- a/mm/page_alloc.c
4312 ++++ b/mm/page_alloc.c
4313 +@@ -559,8 +559,9 @@ static void free_pcppages_bulk(struct zone *zone, int count,
4314 + page = list_entry(list->prev, struct page, lru);
4315 + /* must delete as __free_one_page list manipulates */
4316 + list_del(&page->lru);
4317 +- __free_one_page(page, zone, 0, migratetype);
4318 +- trace_mm_page_pcpu_drain(page, 0, migratetype);
4319 ++ /* MIGRATE_MOVABLE list may include MIGRATE_RESERVEs */
4320 ++ __free_one_page(page, zone, 0, page_private(page));
4321 ++ trace_mm_page_pcpu_drain(page, 0, page_private(page));
4322 + } while (--count && --batch_free && !list_empty(list));
4323 + }
4324 + spin_unlock(&zone->lock);
4325 +diff --git a/mm/vmalloc.c b/mm/vmalloc.c
4326 +index a3a99d3..c228731 100644
4327 +--- a/mm/vmalloc.c
4328 ++++ b/mm/vmalloc.c
4329 +@@ -509,6 +509,9 @@ static unsigned long lazy_max_pages(void)
4330 +
4331 + static atomic_t vmap_lazy_nr = ATOMIC_INIT(0);
4332 +
4333 ++/* for per-CPU blocks */
4334 ++static void purge_fragmented_blocks_allcpus(void);
4335 ++
4336 + /*
4337 + * Purges all lazily-freed vmap areas.
4338 + *
4339 +@@ -539,6 +542,9 @@ static void __purge_vmap_area_lazy(unsigned long *start, unsigned long *end,
4340 + } else
4341 + spin_lock(&purge_lock);
4342 +
4343 ++ if (sync)
4344 ++ purge_fragmented_blocks_allcpus();
4345 ++
4346 + rcu_read_lock();
4347 + list_for_each_entry_rcu(va, &vmap_area_list, list) {
4348 + if (va->flags & VM_LAZY_FREE) {
4349 +@@ -667,8 +673,6 @@ static bool vmap_initialized __read_mostly = false;
4350 + struct vmap_block_queue {
4351 + spinlock_t lock;
4352 + struct list_head free;
4353 +- struct list_head dirty;
4354 +- unsigned int nr_dirty;
4355 + };
4356 +
4357 + struct vmap_block {
4358 +@@ -678,10 +682,9 @@ struct vmap_block {
4359 + unsigned long free, dirty;
4360 + DECLARE_BITMAP(alloc_map, VMAP_BBMAP_BITS);
4361 + DECLARE_BITMAP(dirty_map, VMAP_BBMAP_BITS);
4362 +- union {
4363 +- struct list_head free_list;
4364 +- struct rcu_head rcu_head;
4365 +- };
4366 ++ struct list_head free_list;
4367 ++ struct rcu_head rcu_head;
4368 ++ struct list_head purge;
4369 + };
4370 +
4371 + /* Queue of free and dirty vmap blocks, for allocation and flushing purposes */
4372 +@@ -757,7 +760,7 @@ static struct vmap_block *new_vmap_block(gfp_t gfp_mask)
4373 + vbq = &get_cpu_var(vmap_block_queue);
4374 + vb->vbq = vbq;
4375 + spin_lock(&vbq->lock);
4376 +- list_add(&vb->free_list, &vbq->free);
4377 ++ list_add_rcu(&vb->free_list, &vbq->free);
4378 + spin_unlock(&vbq->lock);
4379 + put_cpu_var(vmap_cpu_blocks);
4380 +
4381 +@@ -776,8 +779,6 @@ static void free_vmap_block(struct vmap_block *vb)
4382 + struct vmap_block *tmp;
4383 + unsigned long vb_idx;
4384 +
4385 +- BUG_ON(!list_empty(&vb->free_list));
4386 +-
4387 + vb_idx = addr_to_vb_idx(vb->va->va_start);
4388 + spin_lock(&vmap_block_tree_lock);
4389 + tmp = radix_tree_delete(&vmap_block_tree, vb_idx);
4390 +@@ -788,12 +789,61 @@ static void free_vmap_block(struct vmap_block *vb)
4391 + call_rcu(&vb->rcu_head, rcu_free_vb);
4392 + }
4393 +
4394 ++static void purge_fragmented_blocks(int cpu)
4395 ++{
4396 ++ LIST_HEAD(purge);
4397 ++ struct vmap_block *vb;
4398 ++ struct vmap_block *n_vb;
4399 ++ struct vmap_block_queue *vbq = &per_cpu(vmap_block_queue, cpu);
4400 ++
4401 ++ rcu_read_lock();
4402 ++ list_for_each_entry_rcu(vb, &vbq->free, free_list) {
4403 ++
4404 ++ if (!(vb->free + vb->dirty == VMAP_BBMAP_BITS && vb->dirty != VMAP_BBMAP_BITS))
4405 ++ continue;
4406 ++
4407 ++ spin_lock(&vb->lock);
4408 ++ if (vb->free + vb->dirty == VMAP_BBMAP_BITS && vb->dirty != VMAP_BBMAP_BITS) {
4409 ++ vb->free = 0; /* prevent further allocs after releasing lock */
4410 ++ vb->dirty = VMAP_BBMAP_BITS; /* prevent purging it again */
4411 ++ bitmap_fill(vb->alloc_map, VMAP_BBMAP_BITS);
4412 ++ bitmap_fill(vb->dirty_map, VMAP_BBMAP_BITS);
4413 ++ spin_lock(&vbq->lock);
4414 ++ list_del_rcu(&vb->free_list);
4415 ++ spin_unlock(&vbq->lock);
4416 ++ spin_unlock(&vb->lock);
4417 ++ list_add_tail(&vb->purge, &purge);
4418 ++ } else
4419 ++ spin_unlock(&vb->lock);
4420 ++ }
4421 ++ rcu_read_unlock();
4422 ++
4423 ++ list_for_each_entry_safe(vb, n_vb, &purge, purge) {
4424 ++ list_del(&vb->purge);
4425 ++ free_vmap_block(vb);
4426 ++ }
4427 ++}
4428 ++
4429 ++static void purge_fragmented_blocks_thiscpu(void)
4430 ++{
4431 ++ purge_fragmented_blocks(smp_processor_id());
4432 ++}
4433 ++
4434 ++static void purge_fragmented_blocks_allcpus(void)
4435 ++{
4436 ++ int cpu;
4437 ++
4438 ++ for_each_possible_cpu(cpu)
4439 ++ purge_fragmented_blocks(cpu);
4440 ++}
4441 ++
4442 + static void *vb_alloc(unsigned long size, gfp_t gfp_mask)
4443 + {
4444 + struct vmap_block_queue *vbq;
4445 + struct vmap_block *vb;
4446 + unsigned long addr = 0;
4447 + unsigned int order;
4448 ++ int purge = 0;
4449 +
4450 + BUG_ON(size & ~PAGE_MASK);
4451 + BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC);
4452 +@@ -806,24 +856,37 @@ again:
4453 + int i;
4454 +
4455 + spin_lock(&vb->lock);
4456 ++ if (vb->free < 1UL << order)
4457 ++ goto next;
4458 + i = bitmap_find_free_region(vb->alloc_map,
4459 + VMAP_BBMAP_BITS, order);
4460 +
4461 +- if (i >= 0) {
4462 +- addr = vb->va->va_start + (i << PAGE_SHIFT);
4463 +- BUG_ON(addr_to_vb_idx(addr) !=
4464 +- addr_to_vb_idx(vb->va->va_start));
4465 +- vb->free -= 1UL << order;
4466 +- if (vb->free == 0) {
4467 +- spin_lock(&vbq->lock);
4468 +- list_del_init(&vb->free_list);
4469 +- spin_unlock(&vbq->lock);
4470 ++ if (i < 0) {
4471 ++ if (vb->free + vb->dirty == VMAP_BBMAP_BITS) {
4472 ++ /* fragmented and no outstanding allocations */
4473 ++ BUG_ON(vb->dirty != VMAP_BBMAP_BITS);
4474 ++ purge = 1;
4475 + }
4476 +- spin_unlock(&vb->lock);
4477 +- break;
4478 ++ goto next;
4479 + }
4480 ++ addr = vb->va->va_start + (i << PAGE_SHIFT);
4481 ++ BUG_ON(addr_to_vb_idx(addr) !=
4482 ++ addr_to_vb_idx(vb->va->va_start));
4483 ++ vb->free -= 1UL << order;
4484 ++ if (vb->free == 0) {
4485 ++ spin_lock(&vbq->lock);
4486 ++ list_del_rcu(&vb->free_list);
4487 ++ spin_unlock(&vbq->lock);
4488 ++ }
4489 ++ spin_unlock(&vb->lock);
4490 ++ break;
4491 ++next:
4492 + spin_unlock(&vb->lock);
4493 + }
4494 ++
4495 ++ if (purge)
4496 ++ purge_fragmented_blocks_thiscpu();
4497 ++
4498 + put_cpu_var(vmap_cpu_blocks);
4499 + rcu_read_unlock();
4500 +
4501 +@@ -860,11 +923,11 @@ static void vb_free(const void *addr, unsigned long size)
4502 + BUG_ON(!vb);
4503 +
4504 + spin_lock(&vb->lock);
4505 +- bitmap_allocate_region(vb->dirty_map, offset >> PAGE_SHIFT, order);
4506 ++ BUG_ON(bitmap_allocate_region(vb->dirty_map, offset >> PAGE_SHIFT, order));
4507 +
4508 + vb->dirty += 1UL << order;
4509 + if (vb->dirty == VMAP_BBMAP_BITS) {
4510 +- BUG_ON(vb->free || !list_empty(&vb->free_list));
4511 ++ BUG_ON(vb->free);
4512 + spin_unlock(&vb->lock);
4513 + free_vmap_block(vb);
4514 + } else
4515 +@@ -1033,8 +1096,6 @@ void __init vmalloc_init(void)
4516 + vbq = &per_cpu(vmap_block_queue, i);
4517 + spin_lock_init(&vbq->lock);
4518 + INIT_LIST_HEAD(&vbq->free);
4519 +- INIT_LIST_HEAD(&vbq->dirty);
4520 +- vbq->nr_dirty = 0;
4521 + }
4522 +
4523 + /* Import existing vmlist entries. */
4524 +diff --git a/net/ax25/ax25_out.c b/net/ax25/ax25_out.c
4525 +index bf706f8..1491260 100644
4526 +--- a/net/ax25/ax25_out.c
4527 ++++ b/net/ax25/ax25_out.c
4528 +@@ -92,6 +92,12 @@ ax25_cb *ax25_send_frame(struct sk_buff *skb, int paclen, ax25_address *src, ax2
4529 + #endif
4530 + }
4531 +
4532 ++ /*
4533 ++ * There is one ref for the state machine; a caller needs
4534 ++ * one more to put it back, just like with the existing one.
4535 ++ */
4536 ++ ax25_cb_hold(ax25);
4537 ++
4538 + ax25_cb_add(ax25);
4539 +
4540 + ax25->state = AX25_STATE_1;
4541 +diff --git a/net/core/sock.c b/net/core/sock.c
4542 +index 7626b6a..6605e75 100644
4543 +--- a/net/core/sock.c
4544 ++++ b/net/core/sock.c
4545 +@@ -1181,6 +1181,10 @@ struct sock *sk_clone(const struct sock *sk, const gfp_t priority)
4546 +
4547 + if (newsk->sk_prot->sockets_allocated)
4548 + percpu_counter_inc(newsk->sk_prot->sockets_allocated);
4549 ++
4550 ++ if (sock_flag(newsk, SOCK_TIMESTAMP) ||
4551 ++ sock_flag(newsk, SOCK_TIMESTAMPING_RX_SOFTWARE))
4552 ++ net_enable_timestamp();
4553 + }
4554 + out:
4555 + return newsk;
4556 +diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
4557 +index 5df2f6a..0030e73 100644
4558 +--- a/net/ipv4/devinet.c
4559 ++++ b/net/ipv4/devinet.c
4560 +@@ -1450,6 +1450,7 @@ static struct devinet_sysctl_table {
4561 + DEVINET_SYSCTL_RW_ENTRY(SEND_REDIRECTS, "send_redirects"),
4562 + DEVINET_SYSCTL_RW_ENTRY(ACCEPT_SOURCE_ROUTE,
4563 + "accept_source_route"),
4564 ++ DEVINET_SYSCTL_RW_ENTRY(SRC_VMARK, "src_valid_mark"),
4565 + DEVINET_SYSCTL_RW_ENTRY(PROXY_ARP, "proxy_arp"),
4566 + DEVINET_SYSCTL_RW_ENTRY(MEDIUM_ID, "medium_id"),
4567 + DEVINET_SYSCTL_RW_ENTRY(BOOTP_RELAY, "bootp_relay"),
4568 +diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
4569 +index aa00398..29391ee 100644
4570 +--- a/net/ipv4/fib_frontend.c
4571 ++++ b/net/ipv4/fib_frontend.c
4572 +@@ -251,6 +251,8 @@ int fib_validate_source(__be32 src, __be32 dst, u8 tos, int oif,
4573 + if (in_dev) {
4574 + no_addr = in_dev->ifa_list == NULL;
4575 + rpf = IN_DEV_RPFILTER(in_dev);
4576 ++ if (mark && !IN_DEV_SRC_VMARK(in_dev))
4577 ++ fl.mark = 0;
4578 + }
4579 + rcu_read_unlock();
4580 +
4581 +diff --git a/net/mac80211/driver-trace.h b/net/mac80211/driver-trace.h
4582 +index 37b9051..d87645e 100644
4583 +--- a/net/mac80211/driver-trace.h
4584 ++++ b/net/mac80211/driver-trace.h
4585 +@@ -655,7 +655,7 @@ TRACE_EVENT(drv_ampdu_action,
4586 + __entry->ret = ret;
4587 + __entry->action = action;
4588 + __entry->tid = tid;
4589 +- __entry->ssn = *ssn;
4590 ++ __entry->ssn = ssn ? *ssn : 0;
4591 + ),
4592 +
4593 + TP_printk(
4594 +diff --git a/net/netrom/nr_route.c b/net/netrom/nr_route.c
4595 +index 4eb1ac9..850ffc0 100644
4596 +--- a/net/netrom/nr_route.c
4597 ++++ b/net/netrom/nr_route.c
4598 +@@ -842,12 +842,13 @@ int nr_route_frame(struct sk_buff *skb, ax25_cb *ax25)
4599 + dptr = skb_push(skb, 1);
4600 + *dptr = AX25_P_NETROM;
4601 +
4602 +- ax25s = ax25_send_frame(skb, 256, (ax25_address *)dev->dev_addr, &nr_neigh->callsign, nr_neigh->digipeat, nr_neigh->dev);
4603 +- if (nr_neigh->ax25 && ax25s) {
4604 +- /* We were already holding this ax25_cb */
4605 ++ ax25s = nr_neigh->ax25;
4606 ++ nr_neigh->ax25 = ax25_send_frame(skb, 256,
4607 ++ (ax25_address *)dev->dev_addr,
4608 ++ &nr_neigh->callsign,
4609 ++ nr_neigh->digipeat, nr_neigh->dev);
4610 ++ if (ax25s)
4611 + ax25_cb_put(ax25s);
4612 +- }
4613 +- nr_neigh->ax25 = ax25s;
4614 +
4615 + dev_put(dev);
4616 + ret = (nr_neigh->ax25 != NULL);
4617 +diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
4618 +index f2d116a..41866eb 100644
4619 +--- a/net/packet/af_packet.c
4620 ++++ b/net/packet/af_packet.c
4621 +@@ -1028,8 +1028,20 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
4622 +
4623 + status = TP_STATUS_SEND_REQUEST;
4624 + err = dev_queue_xmit(skb);
4625 +- if (unlikely(err > 0 && (err = net_xmit_errno(err)) != 0))
4626 +- goto out_xmit;
4627 ++ if (unlikely(err > 0)) {
4628 ++ err = net_xmit_errno(err);
4629 ++ if (err && __packet_get_status(po, ph) ==
4630 ++ TP_STATUS_AVAILABLE) {
4631 ++ /* skb was destructed already */
4632 ++ skb = NULL;
4633 ++ goto out_status;
4634 ++ }
4635 ++ /*
4636 ++ * skb was dropped but not destructed yet;
4637 ++ * let's treat it like congestion or err < 0
4638 ++ */
4639 ++ err = 0;
4640 ++ }
4641 + packet_increment_head(&po->tx_ring);
4642 + len_sum += tp_len;
4643 + } while (likely((ph != NULL) || ((!(msg->msg_flags & MSG_DONTWAIT))
4644 +@@ -1039,9 +1051,6 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
4645 + err = len_sum;
4646 + goto out_put;
4647 +
4648 +-out_xmit:
4649 +- skb->destructor = sock_wfree;
4650 +- atomic_dec(&po->tx_ring.pending);
4651 + out_status:
4652 + __packet_set_status(po, ph, status);
4653 + kfree_skb(skb);
4654 +diff --git a/net/rose/rose_link.c b/net/rose/rose_link.c
4655 +index bd86a63..5ef5f69 100644
4656 +--- a/net/rose/rose_link.c
4657 ++++ b/net/rose/rose_link.c
4658 +@@ -101,13 +101,17 @@ static void rose_t0timer_expiry(unsigned long param)
4659 + static int rose_send_frame(struct sk_buff *skb, struct rose_neigh *neigh)
4660 + {
4661 + ax25_address *rose_call;
4662 ++ ax25_cb *ax25s;
4663 +
4664 + if (ax25cmp(&rose_callsign, &null_ax25_address) == 0)
4665 + rose_call = (ax25_address *)neigh->dev->dev_addr;
4666 + else
4667 + rose_call = &rose_callsign;
4668 +
4669 ++ ax25s = neigh->ax25;
4670 + neigh->ax25 = ax25_send_frame(skb, 260, rose_call, &neigh->callsign, neigh->digipeat, neigh->dev);
4671 ++ if (ax25s)
4672 ++ ax25_cb_put(ax25s);
4673 +
4674 + return (neigh->ax25 != NULL);
4675 + }
4676 +@@ -120,13 +124,17 @@ static int rose_send_frame(struct sk_buff *skb, struct rose_neigh *neigh)
4677 + static int rose_link_up(struct rose_neigh *neigh)
4678 + {
4679 + ax25_address *rose_call;
4680 ++ ax25_cb *ax25s;
4681 +
4682 + if (ax25cmp(&rose_callsign, &null_ax25_address) == 0)
4683 + rose_call = (ax25_address *)neigh->dev->dev_addr;
4684 + else
4685 + rose_call = &rose_callsign;
4686 +
4687 ++ ax25s = neigh->ax25;
4688 + neigh->ax25 = ax25_find_cb(rose_call, &neigh->callsign, neigh->digipeat, neigh->dev);
4689 ++ if (ax25s)
4690 ++ ax25_cb_put(ax25s);
4691 +
4692 + return (neigh->ax25 != NULL);
4693 + }
4694 +diff --git a/net/rose/rose_route.c b/net/rose/rose_route.c
4695 +index f3e2198..08230fa 100644
4696 +--- a/net/rose/rose_route.c
4697 ++++ b/net/rose/rose_route.c
4698 +@@ -234,6 +234,8 @@ static void rose_remove_neigh(struct rose_neigh *rose_neigh)
4699 +
4700 + if ((s = rose_neigh_list) == rose_neigh) {
4701 + rose_neigh_list = rose_neigh->next;
4702 ++ if (rose_neigh->ax25)
4703 ++ ax25_cb_put(rose_neigh->ax25);
4704 + kfree(rose_neigh->digipeat);
4705 + kfree(rose_neigh);
4706 + return;
4707 +@@ -242,6 +244,8 @@ static void rose_remove_neigh(struct rose_neigh *rose_neigh)
4708 + while (s != NULL && s->next != NULL) {
4709 + if (s->next == rose_neigh) {
4710 + s->next = rose_neigh->next;
4711 ++ if (rose_neigh->ax25)
4712 ++ ax25_cb_put(rose_neigh->ax25);
4713 + kfree(rose_neigh->digipeat);
4714 + kfree(rose_neigh);
4715 + return;
4716 +@@ -810,6 +814,7 @@ void rose_link_failed(ax25_cb *ax25, int reason)
4717 +
4718 + if (rose_neigh != NULL) {
4719 + rose_neigh->ax25 = NULL;
4720 ++ ax25_cb_put(ax25);
4721 +
4722 + rose_del_route_by_neigh(rose_neigh);
4723 + rose_kill_by_neigh(rose_neigh);
4724 +diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
4725 +index bb230d5..36d9e25 100644
4726 +--- a/security/selinux/hooks.c
4727 ++++ b/security/selinux/hooks.c
4728 +@@ -2366,7 +2366,7 @@ static void selinux_bprm_committing_creds(struct linux_binprm *bprm)
4729 + initrlim = init_task.signal->rlim + i;
4730 + rlim->rlim_cur = min(rlim->rlim_max, initrlim->rlim_cur);
4731 + }
4732 +- update_rlimit_cpu(rlim->rlim_cur);
4733 ++ update_rlimit_cpu(current->signal->rlim[RLIMIT_CPU].rlim_cur);
4734 + }
4735 + }
4736 +