Gentoo Archives: gentoo-commits

From: "Mike Pagano (mpagano)" <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] linux-patches r1710 - genpatches-2.6/trunk/2.6.33
Date: Thu, 27 May 2010 08:51:06
Message-Id: 20100527085055.702972CF39@corvid.gentoo.org
1 Author: mpagano
2 Date: 2010-05-27 08:50:54 +0000 (Thu, 27 May 2010)
3 New Revision: 1710
4
5 Added:
6 genpatches-2.6/trunk/2.6.33/1002_linux-2.6.33.4.patch
7 genpatches-2.6/trunk/2.6.33/1003_linux-2.6.33.5.patch
8 Modified:
9 genpatches-2.6/trunk/2.6.33/0000_README
10 Log:
11 Linux patches 2.6.33.4 and 2.6.33.5
12
13 Modified: genpatches-2.6/trunk/2.6.33/0000_README
14 ===================================================================
15 --- genpatches-2.6/trunk/2.6.33/0000_README 2010-05-27 08:41:59 UTC (rev 1709)
16 +++ genpatches-2.6/trunk/2.6.33/0000_README 2010-05-27 08:50:54 UTC (rev 1710)
17 @@ -47,6 +47,14 @@
18 From: http://www.kernel.org
19 Desc: Linux 2.6.33.2
20
21 +Patch: 1003_linux-2.6.33.4.patch
22 +From: http://www.kernel.org
23 +Desc: Linux 2.6.33.4
24 +
25 +Patch: 1004_linux-2.6.33.5.patch
26 +From: http://www.kernel.org
27 +Desc: Linux 2.6.33.5
28 +
29 Patch: 1002_linux-2.6.33.3.patch
30 From: http://www.kernel.org
31 Desc: Linux 2.6.33.3
32
33 Added: genpatches-2.6/trunk/2.6.33/1002_linux-2.6.33.4.patch
34 ===================================================================
35 --- genpatches-2.6/trunk/2.6.33/1002_linux-2.6.33.4.patch (rev 0)
36 +++ genpatches-2.6/trunk/2.6.33/1002_linux-2.6.33.4.patch 2010-05-27 08:50:54 UTC (rev 1710)
37 @@ -0,0 +1,4010 @@
38 +diff --git a/arch/arm/mach-pxa/include/mach/colibri.h b/arch/arm/mach-pxa/include/mach/colibri.h
39 +index 811743c..5f2ba8d 100644
40 +--- a/arch/arm/mach-pxa/include/mach/colibri.h
41 ++++ b/arch/arm/mach-pxa/include/mach/colibri.h
42 +@@ -2,6 +2,7 @@
43 + #define _COLIBRI_H_
44 +
45 + #include <net/ax88796.h>
46 ++#include <mach/mfp.h>
47 +
48 + /*
49 + * common settings for all modules
50 +diff --git a/arch/mips/include/asm/mach-sibyte/war.h b/arch/mips/include/asm/mach-sibyte/war.h
51 +index 7950ef4..743385d 100644
52 +--- a/arch/mips/include/asm/mach-sibyte/war.h
53 ++++ b/arch/mips/include/asm/mach-sibyte/war.h
54 +@@ -16,7 +16,11 @@
55 + #if defined(CONFIG_SB1_PASS_1_WORKAROUNDS) || \
56 + defined(CONFIG_SB1_PASS_2_WORKAROUNDS)
57 +
58 +-#define BCM1250_M3_WAR 1
59 ++#ifndef __ASSEMBLY__
60 ++extern int sb1250_m3_workaround_needed(void);
61 ++#endif
62 ++
63 ++#define BCM1250_M3_WAR sb1250_m3_workaround_needed()
64 + #define SIBYTE_1956_WAR 1
65 +
66 + #else
67 +diff --git a/arch/mips/sibyte/sb1250/setup.c b/arch/mips/sibyte/sb1250/setup.c
68 +index 0444da1..92da315 100644
69 +--- a/arch/mips/sibyte/sb1250/setup.c
70 ++++ b/arch/mips/sibyte/sb1250/setup.c
71 +@@ -87,6 +87,21 @@ static int __init setup_bcm1250(void)
72 + return ret;
73 + }
74 +
75 ++int sb1250_m3_workaround_needed(void)
76 ++{
77 ++ switch (soc_type) {
78 ++ case K_SYS_SOC_TYPE_BCM1250:
79 ++ case K_SYS_SOC_TYPE_BCM1250_ALT:
80 ++ case K_SYS_SOC_TYPE_BCM1250_ALT2:
81 ++ case K_SYS_SOC_TYPE_BCM1125:
82 ++ case K_SYS_SOC_TYPE_BCM1125H:
83 ++ return soc_pass < K_SYS_REVISION_BCM1250_C0;
84 ++
85 ++ default:
86 ++ return 0;
87 ++ }
88 ++}
89 ++
90 + static int __init setup_bcm112x(void)
91 + {
92 + int ret = 0;
93 +diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S
94 +index 9258074..567cd57 100644
95 +--- a/arch/powerpc/kernel/head_64.S
96 ++++ b/arch/powerpc/kernel/head_64.S
97 +@@ -615,6 +615,17 @@ _GLOBAL(start_secondary_prolog)
98 + std r3,0(r1) /* Zero the stack frame pointer */
99 + bl .start_secondary
100 + b .
101 ++/*
102 ++ * Reset stack pointer and call start_secondary
103 ++ * to continue with online operation when woken up
104 ++ * from cede in cpu offline.
105 ++ */
106 ++_GLOBAL(start_secondary_resume)
107 ++ ld r1,PACAKSAVE(r13) /* Reload kernel stack pointer */
108 ++ li r3,0
109 ++ std r3,0(r1) /* Zero the stack frame pointer */
110 ++ bl .start_secondary
111 ++ b .
112 + #endif
113 +
114 + /*
115 +diff --git a/arch/powerpc/mm/fsl_booke_mmu.c b/arch/powerpc/mm/fsl_booke_mmu.c
116 +index c539472..1ce9dd5 100644
117 +--- a/arch/powerpc/mm/fsl_booke_mmu.c
118 ++++ b/arch/powerpc/mm/fsl_booke_mmu.c
119 +@@ -155,15 +155,10 @@ static void settlbcam(int index, unsigned long virt, phys_addr_t phys,
120 + if (cur_cpu_spec->cpu_features & MMU_FTR_BIG_PHYS)
121 + TLBCAM[index].MAS7 = (u64)phys >> 32;
122 +
123 +-#ifndef CONFIG_KGDB /* want user access for breakpoints */
124 + if (flags & _PAGE_USER) {
125 + TLBCAM[index].MAS3 |= MAS3_UX | MAS3_UR;
126 + TLBCAM[index].MAS3 |= ((flags & _PAGE_RW) ? MAS3_UW : 0);
127 + }
128 +-#else
129 +- TLBCAM[index].MAS3 |= MAS3_UX | MAS3_UR;
130 +- TLBCAM[index].MAS3 |= ((flags & _PAGE_RW) ? MAS3_UW : 0);
131 +-#endif
132 +
133 + tlbcam_addrs[index].start = virt;
134 + tlbcam_addrs[index].limit = virt + size - 1;
135 +diff --git a/arch/powerpc/platforms/pseries/hotplug-cpu.c b/arch/powerpc/platforms/pseries/hotplug-cpu.c
136 +index 6ea4698..b842378 100644
137 +--- a/arch/powerpc/platforms/pseries/hotplug-cpu.c
138 ++++ b/arch/powerpc/platforms/pseries/hotplug-cpu.c
139 +@@ -122,44 +122,32 @@ static void pseries_mach_cpu_die(void)
140 + if (!get_lppaca()->shared_proc)
141 + get_lppaca()->donate_dedicated_cpu = 1;
142 +
143 +- printk(KERN_INFO
144 +- "cpu %u (hwid %u) ceding for offline with hint %d\n",
145 +- cpu, hwcpu, cede_latency_hint);
146 + while (get_preferred_offline_state(cpu) == CPU_STATE_INACTIVE) {
147 + extended_cede_processor(cede_latency_hint);
148 +- printk(KERN_INFO "cpu %u (hwid %u) returned from cede.\n",
149 +- cpu, hwcpu);
150 +- printk(KERN_INFO
151 +- "Decrementer value = %x Timebase value = %llx\n",
152 +- get_dec(), get_tb());
153 + }
154 +
155 +- printk(KERN_INFO "cpu %u (hwid %u) got prodded to go online\n",
156 +- cpu, hwcpu);
157 +-
158 + if (!get_lppaca()->shared_proc)
159 + get_lppaca()->donate_dedicated_cpu = 0;
160 + get_lppaca()->idle = 0;
161 +- }
162 +
163 +- if (get_preferred_offline_state(cpu) == CPU_STATE_ONLINE) {
164 +- unregister_slb_shadow(hwcpu, __pa(get_slb_shadow()));
165 ++ if (get_preferred_offline_state(cpu) == CPU_STATE_ONLINE) {
166 ++ unregister_slb_shadow(hwcpu, __pa(get_slb_shadow()));
167 +
168 +- /*
169 +- * NOTE: Calling start_secondary() here for now to
170 +- * start new context.
171 +- * However, need to do it cleanly by resetting the
172 +- * stack pointer.
173 +- */
174 +- start_secondary();
175 ++ /*
176 ++ * Call to start_secondary_resume() will not return.
177 ++ * Kernel stack will be reset and start_secondary()
178 ++ * will be called to continue the online operation.
179 ++ */
180 ++ start_secondary_resume();
181 ++ }
182 ++ }
183 +
184 +- } else if (get_preferred_offline_state(cpu) == CPU_STATE_OFFLINE) {
185 ++ /* Requested state is CPU_STATE_OFFLINE at this point */
186 ++ WARN_ON(get_preferred_offline_state(cpu) != CPU_STATE_OFFLINE);
187 +
188 +- set_cpu_current_state(cpu, CPU_STATE_OFFLINE);
189 +- unregister_slb_shadow(hard_smp_processor_id(),
190 +- __pa(get_slb_shadow()));
191 +- rtas_stop_self();
192 +- }
193 ++ set_cpu_current_state(cpu, CPU_STATE_OFFLINE);
194 ++ unregister_slb_shadow(hwcpu, __pa(get_slb_shadow()));
195 ++ rtas_stop_self();
196 +
197 + /* Should never get here... */
198 + BUG();
199 +diff --git a/arch/powerpc/platforms/pseries/offline_states.h b/arch/powerpc/platforms/pseries/offline_states.h
200 +index 202d869..75a6f48 100644
201 +--- a/arch/powerpc/platforms/pseries/offline_states.h
202 ++++ b/arch/powerpc/platforms/pseries/offline_states.h
203 +@@ -35,4 +35,5 @@ static inline void set_default_offline_state(int cpu)
204 +
205 + extern enum cpu_state_vals get_preferred_offline_state(int cpu);
206 + extern int start_secondary(void);
207 ++extern void start_secondary_resume(void);
208 + #endif
209 +diff --git a/arch/sparc/include/asm/irqflags_64.h b/arch/sparc/include/asm/irqflags_64.h
210 +index 8b49bf9..bfa1ea4 100644
211 +--- a/arch/sparc/include/asm/irqflags_64.h
212 ++++ b/arch/sparc/include/asm/irqflags_64.h
213 +@@ -76,9 +76,26 @@ static inline int raw_irqs_disabled(void)
214 + */
215 + static inline unsigned long __raw_local_irq_save(void)
216 + {
217 +- unsigned long flags = __raw_local_save_flags();
218 +-
219 +- raw_local_irq_disable();
220 ++ unsigned long flags, tmp;
221 ++
222 ++ /* Disable interrupts to PIL_NORMAL_MAX unless we already
223 ++ * are using PIL_NMI, in which case PIL_NMI is retained.
224 ++ *
225 ++ * The only values we ever program into the %pil are 0,
226 ++ * PIL_NORMAL_MAX and PIL_NMI.
227 ++ *
228 ++ * Since PIL_NMI is the largest %pil value and all bits are
229 ++ * set in it (0xf), it doesn't matter what PIL_NORMAL_MAX
230 ++ * actually is.
231 ++ */
232 ++ __asm__ __volatile__(
233 ++ "rdpr %%pil, %0\n\t"
234 ++ "or %0, %2, %1\n\t"
235 ++ "wrpr %1, 0x0, %%pil"
236 ++ : "=r" (flags), "=r" (tmp)
237 ++ : "i" (PIL_NORMAL_MAX)
238 ++ : "memory"
239 ++ );
240 +
241 + return flags;
242 + }
243 +diff --git a/arch/sparc/include/asm/thread_info_64.h b/arch/sparc/include/asm/thread_info_64.h
244 +index 39be9f2..3df02de 100644
245 +--- a/arch/sparc/include/asm/thread_info_64.h
246 ++++ b/arch/sparc/include/asm/thread_info_64.h
247 +@@ -121,7 +121,7 @@ struct thread_info {
248 + #define THREAD_SHIFT PAGE_SHIFT
249 + #endif /* PAGE_SHIFT == 13 */
250 +
251 +-#define PREEMPT_ACTIVE 0x4000000
252 ++#define PREEMPT_ACTIVE 0x10000000
253 +
254 + /*
255 + * macros/functions for gaining access to the thread information structure
256 +diff --git a/arch/sparc/kernel/pci_common.c b/arch/sparc/kernel/pci_common.c
257 +index b775658..8a00058 100644
258 +--- a/arch/sparc/kernel/pci_common.c
259 ++++ b/arch/sparc/kernel/pci_common.c
260 +@@ -371,14 +371,19 @@ static void pci_register_iommu_region(struct pci_pbm_info *pbm)
261 + struct resource *rp = kzalloc(sizeof(*rp), GFP_KERNEL);
262 +
263 + if (!rp) {
264 +- prom_printf("Cannot allocate IOMMU resource.\n");
265 +- prom_halt();
266 ++ pr_info("%s: Cannot allocate IOMMU resource.\n",
267 ++ pbm->name);
268 ++ return;
269 + }
270 + rp->name = "IOMMU";
271 + rp->start = pbm->mem_space.start + (unsigned long) vdma[0];
272 + rp->end = rp->start + (unsigned long) vdma[1] - 1UL;
273 + rp->flags = IORESOURCE_BUSY;
274 +- request_resource(&pbm->mem_space, rp);
275 ++ if (request_resource(&pbm->mem_space, rp)) {
276 ++ pr_info("%s: Unable to request IOMMU resource.\n",
277 ++ pbm->name);
278 ++ kfree(rp);
279 ++ }
280 + }
281 + }
282 +
283 +diff --git a/arch/sparc/kernel/rtrap_64.S b/arch/sparc/kernel/rtrap_64.S
284 +index fd3cee4..c720f0c 100644
285 +--- a/arch/sparc/kernel/rtrap_64.S
286 ++++ b/arch/sparc/kernel/rtrap_64.S
287 +@@ -172,7 +172,17 @@ rtrap_xcall:
288 + nop
289 + call trace_hardirqs_on
290 + nop
291 +- wrpr %l4, %pil
292 ++ /* Do not actually set the %pil here. We will do that
293 ++ * below after we clear PSTATE_IE in the %pstate register.
294 ++ * If we re-enable interrupts here, we can recurse down
295 ++ * the hardirq stack potentially endlessly, causing a
296 ++ * stack overflow.
297 ++ *
298 ++ * It is tempting to put this test and trace_hardirqs_on
299 ++ * call at the 'rt_continue' label, but that will not work
300 ++ * as that path hits unconditionally and we do not want to
301 ++ * execute this in NMI return paths, for example.
302 ++ */
303 + #endif
304 + rtrap_no_irq_enable:
305 + andcc %l1, TSTATE_PRIV, %l3
306 +diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c
307 +index 10f7bb9..22cd475 100644
308 +--- a/arch/sparc/kernel/traps_64.c
309 ++++ b/arch/sparc/kernel/traps_64.c
310 +@@ -2202,27 +2202,6 @@ void dump_stack(void)
311 +
312 + EXPORT_SYMBOL(dump_stack);
313 +
314 +-static inline int is_kernel_stack(struct task_struct *task,
315 +- struct reg_window *rw)
316 +-{
317 +- unsigned long rw_addr = (unsigned long) rw;
318 +- unsigned long thread_base, thread_end;
319 +-
320 +- if (rw_addr < PAGE_OFFSET) {
321 +- if (task != &init_task)
322 +- return 0;
323 +- }
324 +-
325 +- thread_base = (unsigned long) task_stack_page(task);
326 +- thread_end = thread_base + sizeof(union thread_union);
327 +- if (rw_addr >= thread_base &&
328 +- rw_addr < thread_end &&
329 +- !(rw_addr & 0x7UL))
330 +- return 1;
331 +-
332 +- return 0;
333 +-}
334 +-
335 + static inline struct reg_window *kernel_stack_up(struct reg_window *rw)
336 + {
337 + unsigned long fp = rw->ins[6];
338 +@@ -2251,6 +2230,7 @@ void die_if_kernel(char *str, struct pt_regs *regs)
339 + show_regs(regs);
340 + add_taint(TAINT_DIE);
341 + if (regs->tstate & TSTATE_PRIV) {
342 ++ struct thread_info *tp = current_thread_info();
343 + struct reg_window *rw = (struct reg_window *)
344 + (regs->u_regs[UREG_FP] + STACK_BIAS);
345 +
346 +@@ -2258,8 +2238,8 @@ void die_if_kernel(char *str, struct pt_regs *regs)
347 + * find some badly aligned kernel stack.
348 + */
349 + while (rw &&
350 +- count++ < 30&&
351 +- is_kernel_stack(current, rw)) {
352 ++ count++ < 30 &&
353 ++ kstack_valid(tp, (unsigned long) rw)) {
354 + printk("Caller[%016lx]: %pS\n", rw->ins[7],
355 + (void *) rw->ins[7]);
356 +
357 +diff --git a/arch/sparc/kernel/unaligned_64.c b/arch/sparc/kernel/unaligned_64.c
358 +index 378ca82..95a8e9a 100644
359 +--- a/arch/sparc/kernel/unaligned_64.c
360 ++++ b/arch/sparc/kernel/unaligned_64.c
361 +@@ -49,7 +49,7 @@ static inline enum direction decode_direction(unsigned int insn)
362 + }
363 +
364 + /* 16 = double-word, 8 = extra-word, 4 = word, 2 = half-word */
365 +-static inline int decode_access_size(unsigned int insn)
366 ++static inline int decode_access_size(struct pt_regs *regs, unsigned int insn)
367 + {
368 + unsigned int tmp;
369 +
370 +@@ -65,7 +65,7 @@ static inline int decode_access_size(unsigned int insn)
371 + return 2;
372 + else {
373 + printk("Impossible unaligned trap. insn=%08x\n", insn);
374 +- die_if_kernel("Byte sized unaligned access?!?!", current_thread_info()->kregs);
375 ++ die_if_kernel("Byte sized unaligned access?!?!", regs);
376 +
377 + /* GCC should never warn that control reaches the end
378 + * of this function without returning a value because
379 +@@ -289,7 +289,7 @@ static void log_unaligned(struct pt_regs *regs)
380 + asmlinkage void kernel_unaligned_trap(struct pt_regs *regs, unsigned int insn)
381 + {
382 + enum direction dir = decode_direction(insn);
383 +- int size = decode_access_size(insn);
384 ++ int size = decode_access_size(regs, insn);
385 + int orig_asi, asi;
386 +
387 + current_thread_info()->kern_una_regs = regs;
388 +diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
389 +index eb40925..ddb52b8 100644
390 +--- a/arch/x86/Kconfig
391 ++++ b/arch/x86/Kconfig
392 +@@ -627,7 +627,7 @@ config GART_IOMMU
393 + bool "GART IOMMU support" if EMBEDDED
394 + default y
395 + select SWIOTLB
396 +- depends on X86_64 && PCI
397 ++ depends on X86_64 && PCI && K8_NB
398 + ---help---
399 + Support for full DMA access of devices with 32bit memory access only
400 + on systems with more than 3GB. This is usually needed for USB,
401 +@@ -2026,7 +2026,7 @@ endif # X86_32
402 +
403 + config K8_NB
404 + def_bool y
405 +- depends on AGP_AMD64 || (X86_64 && (GART_IOMMU || (PCI && NUMA)))
406 ++ depends on CPU_SUP_AMD && PCI
407 +
408 + source "drivers/pcmcia/Kconfig"
409 +
410 +diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
411 +index be37059..b35c160 100644
412 +--- a/arch/x86/kernel/apic/io_apic.c
413 ++++ b/arch/x86/kernel/apic/io_apic.c
414 +@@ -2539,6 +2539,9 @@ void irq_force_complete_move(int irq)
415 + struct irq_desc *desc = irq_to_desc(irq);
416 + struct irq_cfg *cfg = desc->chip_data;
417 +
418 ++ if (!cfg)
419 ++ return;
420 ++
421 + __irq_complete_move(&desc, cfg->vector);
422 + }
423 + #else
424 +diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c
425 +index 6e44519..3b5ea38 100644
426 +--- a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c
427 ++++ b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c
428 +@@ -929,7 +929,8 @@ static int fill_powernow_table_pstate(struct powernow_k8_data *data,
429 + powernow_table[i].index = index;
430 +
431 + /* Frequency may be rounded for these */
432 +- if (boot_cpu_data.x86 == 0x10 || boot_cpu_data.x86 == 0x11) {
433 ++ if ((boot_cpu_data.x86 == 0x10 && boot_cpu_data.x86_model < 10)
434 ++ || boot_cpu_data.x86 == 0x11) {
435 + powernow_table[i].frequency =
436 + freq_from_fid_did(lo & 0x3f, (lo >> 6) & 7);
437 + } else
438 +diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
439 +index 7e1cca1..1366c7c 100644
440 +--- a/arch/x86/kernel/cpu/intel.c
441 ++++ b/arch/x86/kernel/cpu/intel.c
442 +@@ -47,6 +47,27 @@ static void __cpuinit early_init_intel(struct cpuinfo_x86 *c)
443 + (c->x86 == 0x6 && c->x86_model >= 0x0e))
444 + set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
445 +
446 ++ /*
447 ++ * Atom erratum AAE44/AAF40/AAG38/AAH41:
448 ++ *
449 ++ * A race condition between speculative fetches and invalidating
450 ++ * a large page. This is worked around in microcode, but we
451 ++ * need the microcode to have already been loaded... so if it is
452 ++ * not, recommend a BIOS update and disable large pages.
453 ++ */
454 ++ if (c->x86 == 6 && c->x86_model == 0x1c && c->x86_mask <= 2) {
455 ++ u32 ucode, junk;
456 ++
457 ++ wrmsr(MSR_IA32_UCODE_REV, 0, 0);
458 ++ sync_core();
459 ++ rdmsr(MSR_IA32_UCODE_REV, junk, ucode);
460 ++
461 ++ if (ucode < 0x20e) {
462 ++ printk(KERN_WARNING "Atom PSE erratum detected, BIOS microcode update recommended\n");
463 ++ clear_cpu_cap(c, X86_FEATURE_PSE);
464 ++ }
465 ++ }
466 ++
467 + #ifdef CONFIG_X86_64
468 + set_cpu_cap(c, X86_FEATURE_SYSENTER32);
469 + #else
470 +diff --git a/arch/x86/kernel/k8.c b/arch/x86/kernel/k8.c
471 +index cbc4332..9b89546 100644
472 +--- a/arch/x86/kernel/k8.c
473 ++++ b/arch/x86/kernel/k8.c
474 +@@ -121,3 +121,17 @@ void k8_flush_garts(void)
475 + }
476 + EXPORT_SYMBOL_GPL(k8_flush_garts);
477 +
478 ++static __init int init_k8_nbs(void)
479 ++{
480 ++ int err = 0;
481 ++
482 ++ err = cache_k8_northbridges();
483 ++
484 ++ if (err < 0)
485 ++ printk(KERN_NOTICE "K8 NB: Cannot enumerate AMD northbridges.\n");
486 ++
487 ++ return err;
488 ++}
489 ++
490 ++/* This has to go after the PCI subsystem */
491 ++fs_initcall(init_k8_nbs);
492 +diff --git a/arch/x86/kernel/pci-gart_64.c b/arch/x86/kernel/pci-gart_64.c
493 +index 4f41b29..0ae24d9 100644
494 +--- a/arch/x86/kernel/pci-gart_64.c
495 ++++ b/arch/x86/kernel/pci-gart_64.c
496 +@@ -738,7 +738,7 @@ int __init gart_iommu_init(void)
497 + unsigned long scratch;
498 + long i;
499 +
500 +- if (cache_k8_northbridges() < 0 || num_k8_northbridges == 0)
501 ++ if (num_k8_northbridges == 0)
502 + return 0;
503 +
504 + #ifndef CONFIG_AGP_AMD64
505 +diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
506 +index 126f0b4..11d0702 100644
507 +--- a/arch/x86/kernel/process_64.c
508 ++++ b/arch/x86/kernel/process_64.c
509 +@@ -282,12 +282,12 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
510 +
511 + set_tsk_thread_flag(p, TIF_FORK);
512 +
513 +- p->thread.fs = me->thread.fs;
514 +- p->thread.gs = me->thread.gs;
515 + p->thread.io_bitmap_ptr = NULL;
516 +
517 + savesegment(gs, p->thread.gsindex);
518 ++ p->thread.gs = p->thread.gsindex ? 0 : me->thread.gs;
519 + savesegment(fs, p->thread.fsindex);
520 ++ p->thread.fs = p->thread.fsindex ? 0 : me->thread.fs;
521 + savesegment(es, p->thread.es);
522 + savesegment(ds, p->thread.ds);
523 +
524 +diff --git a/block/blk-timeout.c b/block/blk-timeout.c
525 +index 1ba7e0a..4f0c06c 100644
526 +--- a/block/blk-timeout.c
527 ++++ b/block/blk-timeout.c
528 +@@ -109,6 +109,7 @@ void blk_rq_timed_out_timer(unsigned long data)
529 + struct request_queue *q = (struct request_queue *) data;
530 + unsigned long flags, next = 0;
531 + struct request *rq, *tmp;
532 ++ int next_set = 0;
533 +
534 + spin_lock_irqsave(q->queue_lock, flags);
535 +
536 +@@ -122,16 +123,13 @@ void blk_rq_timed_out_timer(unsigned long data)
537 + if (blk_mark_rq_complete(rq))
538 + continue;
539 + blk_rq_timed_out(rq);
540 +- } else if (!next || time_after(next, rq->deadline))
541 ++ } else if (!next_set || time_after(next, rq->deadline)) {
542 + next = rq->deadline;
543 ++ next_set = 1;
544 ++ }
545 + }
546 +
547 +- /*
548 +- * next can never be 0 here with the list non-empty, since we always
549 +- * bump ->deadline to 1 so we can detect if the timer was ever added
550 +- * or not. See comment in blk_add_timer()
551 +- */
552 +- if (next)
553 ++ if (next_set)
554 + mod_timer(&q->timeout, round_jiffies_up(next));
555 +
556 + spin_unlock_irqrestore(q->queue_lock, flags);
557 +diff --git a/crypto/async_tx/async_raid6_recov.c b/crypto/async_tx/async_raid6_recov.c
558 +index 943f2ab..ce038d8 100644
559 +--- a/crypto/async_tx/async_raid6_recov.c
560 ++++ b/crypto/async_tx/async_raid6_recov.c
561 +@@ -324,6 +324,7 @@ struct dma_async_tx_descriptor *
562 + async_raid6_2data_recov(int disks, size_t bytes, int faila, int failb,
563 + struct page **blocks, struct async_submit_ctl *submit)
564 + {
565 ++ void *scribble = submit->scribble;
566 + int non_zero_srcs, i;
567 +
568 + BUG_ON(faila == failb);
569 +@@ -332,11 +333,13 @@ async_raid6_2data_recov(int disks, size_t bytes, int faila, int failb,
570 +
571 + pr_debug("%s: disks: %d len: %zu\n", __func__, disks, bytes);
572 +
573 +- /* we need to preserve the contents of 'blocks' for the async
574 +- * case, so punt to synchronous if a scribble buffer is not available
575 ++ /* if a dma resource is not available or a scribble buffer is not
576 ++ * available punt to the synchronous path. In the 'dma not
577 ++ * available' case be sure to use the scribble buffer to
578 ++ * preserve the content of 'blocks' as the caller intended.
579 + */
580 +- if (!submit->scribble) {
581 +- void **ptrs = (void **) blocks;
582 ++ if (!async_dma_find_channel(DMA_PQ) || !scribble) {
583 ++ void **ptrs = scribble ? scribble : (void **) blocks;
584 +
585 + async_tx_quiesce(&submit->depend_tx);
586 + for (i = 0; i < disks; i++)
587 +@@ -406,11 +409,13 @@ async_raid6_datap_recov(int disks, size_t bytes, int faila,
588 +
589 + pr_debug("%s: disks: %d len: %zu\n", __func__, disks, bytes);
590 +
591 +- /* we need to preserve the contents of 'blocks' for the async
592 +- * case, so punt to synchronous if a scribble buffer is not available
593 ++ /* if a dma resource is not available or a scribble buffer is not
594 ++ * available punt to the synchronous path. In the 'dma not
595 ++ * available' case be sure to use the scribble buffer to
596 ++ * preserve the content of 'blocks' as the caller intended.
597 + */
598 +- if (!scribble) {
599 +- void **ptrs = (void **) blocks;
600 ++ if (!async_dma_find_channel(DMA_PQ) || !scribble) {
601 ++ void **ptrs = scribble ? scribble : (void **) blocks;
602 +
603 + async_tx_quiesce(&submit->depend_tx);
604 + for (i = 0; i < disks; i++)
605 +diff --git a/drivers/Makefile b/drivers/Makefile
606 +index 6ee53c7..8b0b948 100644
607 +--- a/drivers/Makefile
608 ++++ b/drivers/Makefile
609 +@@ -17,6 +17,7 @@ obj-$(CONFIG_SFI) += sfi/
610 + obj-$(CONFIG_PNP) += pnp/
611 + obj-$(CONFIG_ARM_AMBA) += amba/
612 +
613 ++obj-$(CONFIG_VIRTIO) += virtio/
614 + obj-$(CONFIG_XEN) += xen/
615 +
616 + # regulators early, since some subsystems rely on them to initialize
617 +@@ -106,7 +107,6 @@ obj-$(CONFIG_HID) += hid/
618 + obj-$(CONFIG_PPC_PS3) += ps3/
619 + obj-$(CONFIG_OF) += of/
620 + obj-$(CONFIG_SSB) += ssb/
621 +-obj-$(CONFIG_VIRTIO) += virtio/
622 + obj-$(CONFIG_VLYNQ) += vlynq/
623 + obj-$(CONFIG_STAGING) += staging/
624 + obj-y += platform/
625 +diff --git a/drivers/acpi/power_meter.c b/drivers/acpi/power_meter.c
626 +index dc4ffad..e02d93c 100644
627 +--- a/drivers/acpi/power_meter.c
628 ++++ b/drivers/acpi/power_meter.c
629 +@@ -34,7 +34,7 @@
630 + #define ACPI_POWER_METER_NAME "power_meter"
631 + ACPI_MODULE_NAME(ACPI_POWER_METER_NAME);
632 + #define ACPI_POWER_METER_DEVICE_NAME "Power Meter"
633 +-#define ACPI_POWER_METER_CLASS "power_meter_resource"
634 ++#define ACPI_POWER_METER_CLASS "pwr_meter_resource"
635 +
636 + #define NUM_SENSORS 17
637 +
638 +diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
639 +index 79d33d9..7c85265 100644
640 +--- a/drivers/acpi/sleep.c
641 ++++ b/drivers/acpi/sleep.c
642 +@@ -450,6 +450,126 @@ static struct dmi_system_id __initdata acpisleep_dmi_table[] = {
643 + },
644 + },
645 + {
646 ++ .callback = init_set_sci_en_on_resume,
647 ++ .ident = "Lenovo ThinkPad T410",
648 ++ .matches = {
649 ++ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
650 ++ DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T410"),
651 ++ },
652 ++ },
653 ++ {
654 ++ .callback = init_set_sci_en_on_resume,
655 ++ .ident = "Lenovo ThinkPad T510",
656 ++ .matches = {
657 ++ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
658 ++ DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T510"),
659 ++ },
660 ++ },
661 ++ {
662 ++ .callback = init_set_sci_en_on_resume,
663 ++ .ident = "Lenovo ThinkPad W510",
664 ++ .matches = {
665 ++ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
666 ++ DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad W510"),
667 ++ },
668 ++ },
669 ++ {
670 ++ .callback = init_set_sci_en_on_resume,
671 ++ .ident = "Lenovo ThinkPad X201",
672 ++ .matches = {
673 ++ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
674 ++ DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad X201"),
675 ++ },
676 ++ },
677 ++ {
678 ++ .callback = init_set_sci_en_on_resume,
679 ++ .ident = "Lenovo ThinkPad X201",
680 ++ .matches = {
681 ++ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
682 ++ DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad X201s"),
683 ++ },
684 ++ },
685 ++ {
686 ++ .callback = init_set_sci_en_on_resume,
687 ++ .ident = "Lenovo ThinkPad T410",
688 ++ .matches = {
689 ++ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
690 ++ DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T410"),
691 ++ },
692 ++ },
693 ++ {
694 ++ .callback = init_set_sci_en_on_resume,
695 ++ .ident = "Lenovo ThinkPad T510",
696 ++ .matches = {
697 ++ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
698 ++ DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T510"),
699 ++ },
700 ++ },
701 ++ {
702 ++ .callback = init_set_sci_en_on_resume,
703 ++ .ident = "Lenovo ThinkPad W510",
704 ++ .matches = {
705 ++ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
706 ++ DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad W510"),
707 ++ },
708 ++ },
709 ++ {
710 ++ .callback = init_set_sci_en_on_resume,
711 ++ .ident = "Lenovo ThinkPad X201",
712 ++ .matches = {
713 ++ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
714 ++ DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad X201"),
715 ++ },
716 ++ },
717 ++ {
718 ++ .callback = init_set_sci_en_on_resume,
719 ++ .ident = "Lenovo ThinkPad X201",
720 ++ .matches = {
721 ++ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
722 ++ DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad X201s"),
723 ++ },
724 ++ },
725 ++ {
726 ++ .callback = init_set_sci_en_on_resume,
727 ++ .ident = "Lenovo ThinkPad T410",
728 ++ .matches = {
729 ++ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
730 ++ DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T410"),
731 ++ },
732 ++ },
733 ++ {
734 ++ .callback = init_set_sci_en_on_resume,
735 ++ .ident = "Lenovo ThinkPad T510",
736 ++ .matches = {
737 ++ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
738 ++ DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T510"),
739 ++ },
740 ++ },
741 ++ {
742 ++ .callback = init_set_sci_en_on_resume,
743 ++ .ident = "Lenovo ThinkPad W510",
744 ++ .matches = {
745 ++ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
746 ++ DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad W510"),
747 ++ },
748 ++ },
749 ++ {
750 ++ .callback = init_set_sci_en_on_resume,
751 ++ .ident = "Lenovo ThinkPad X201",
752 ++ .matches = {
753 ++ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
754 ++ DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad X201"),
755 ++ },
756 ++ },
757 ++ {
758 ++ .callback = init_set_sci_en_on_resume,
759 ++ .ident = "Lenovo ThinkPad X201",
760 ++ .matches = {
761 ++ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
762 ++ DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad X201s"),
763 ++ },
764 ++ },
765 ++ {
766 + .callback = init_old_suspend_ordering,
767 + .ident = "Panasonic CF51-2L",
768 + .matches = {
769 +@@ -458,6 +578,30 @@ static struct dmi_system_id __initdata acpisleep_dmi_table[] = {
770 + DMI_MATCH(DMI_BOARD_NAME, "CF51-2L"),
771 + },
772 + },
773 ++ {
774 ++ .callback = init_set_sci_en_on_resume,
775 ++ .ident = "Dell Studio 1558",
776 ++ .matches = {
777 ++ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
778 ++ DMI_MATCH(DMI_PRODUCT_NAME, "Studio 1558"),
779 ++ },
780 ++ },
781 ++ {
782 ++ .callback = init_set_sci_en_on_resume,
783 ++ .ident = "Dell Studio 1557",
784 ++ .matches = {
785 ++ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
786 ++ DMI_MATCH(DMI_PRODUCT_NAME, "Studio 1557"),
787 ++ },
788 ++ },
789 ++ {
790 ++ .callback = init_set_sci_en_on_resume,
791 ++ .ident = "Dell Studio 1555",
792 ++ .matches = {
793 ++ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
794 ++ DMI_MATCH(DMI_PRODUCT_NAME, "Studio 1555"),
795 ++ },
796 ++ },
797 + {},
798 + };
799 + #endif /* CONFIG_SUSPEND */
800 +diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
801 +index 9f6cfac..228740f 100644
802 +--- a/drivers/ata/libata-eh.c
803 ++++ b/drivers/ata/libata-eh.c
804 +@@ -879,6 +879,8 @@ static void ata_eh_set_pending(struct ata_port *ap, int fastdrain)
805 + void ata_qc_schedule_eh(struct ata_queued_cmd *qc)
806 + {
807 + struct ata_port *ap = qc->ap;
808 ++ struct request_queue *q = qc->scsicmd->device->request_queue;
809 ++ unsigned long flags;
810 +
811 + WARN_ON(!ap->ops->error_handler);
812 +
813 +@@ -890,7 +892,9 @@ void ata_qc_schedule_eh(struct ata_queued_cmd *qc)
814 + * Note that ATA_QCFLAG_FAILED is unconditionally set after
815 + * this function completes.
816 + */
817 ++ spin_lock_irqsave(q->queue_lock, flags);
818 + blk_abort_request(qc->scsicmd->request);
819 ++ spin_unlock_irqrestore(q->queue_lock, flags);
820 + }
821 +
822 + /**
823 +@@ -1624,6 +1628,7 @@ void ata_eh_analyze_ncq_error(struct ata_link *link)
824 + }
825 +
826 + /* okay, this error is ours */
827 ++ memset(&tf, 0, sizeof(tf));
828 + rc = ata_eh_read_log_10h(dev, &tag, &tf);
829 + if (rc) {
830 + ata_link_printk(link, KERN_ERR, "failed to read log page 10h "
831 +diff --git a/drivers/base/memory.c b/drivers/base/memory.c
832 +index bd02505..d7d77d4 100644
833 +--- a/drivers/base/memory.c
834 ++++ b/drivers/base/memory.c
835 +@@ -311,7 +311,7 @@ static SYSDEV_ATTR(removable, 0444, show_mem_removable, NULL);
836 + static ssize_t
837 + print_block_size(struct class *class, char *buf)
838 + {
839 +- return sprintf(buf, "%#lx\n", (unsigned long)PAGES_PER_SECTION * PAGE_SIZE);
840 ++ return sprintf(buf, "%lx\n", (unsigned long)PAGES_PER_SECTION * PAGE_SIZE);
841 + }
842 +
843 + static CLASS_ATTR(block_size_bytes, 0444, print_block_size, NULL);
844 +diff --git a/drivers/char/agp/Kconfig b/drivers/char/agp/Kconfig
845 +index 2fb3a48..4b66c69 100644
846 +--- a/drivers/char/agp/Kconfig
847 ++++ b/drivers/char/agp/Kconfig
848 +@@ -57,7 +57,7 @@ config AGP_AMD
849 +
850 + config AGP_AMD64
851 + tristate "AMD Opteron/Athlon64 on-CPU GART support"
852 +- depends on AGP && X86
853 ++ depends on AGP && X86 && K8_NB
854 + help
855 + This option gives you AGP support for the GLX component of
856 + X using the on-CPU northbridge of the AMD Athlon64/Opteron CPUs.
857 +diff --git a/drivers/cpuidle/governors/menu.c b/drivers/cpuidle/governors/menu.c
858 +index 73655ae..f8e57c6 100644
859 +--- a/drivers/cpuidle/governors/menu.c
860 ++++ b/drivers/cpuidle/governors/menu.c
861 +@@ -101,7 +101,6 @@ struct menu_device {
862 +
863 + unsigned int expected_us;
864 + u64 predicted_us;
865 +- unsigned int measured_us;
866 + unsigned int exit_us;
867 + unsigned int bucket;
868 + u64 correction_factor[BUCKETS];
869 +@@ -187,14 +186,14 @@ static int menu_select(struct cpuidle_device *dev)
870 + int i;
871 + int multiplier;
872 +
873 +- data->last_state_idx = 0;
874 +- data->exit_us = 0;
875 +-
876 + if (data->needs_update) {
877 + menu_update(dev);
878 + data->needs_update = 0;
879 + }
880 +
881 ++ data->last_state_idx = 0;
882 ++ data->exit_us = 0;
883 ++
884 + /* Special case when user has set very strict latency requirement */
885 + if (unlikely(latency_req == 0))
886 + return 0;
887 +@@ -294,7 +293,7 @@ static void menu_update(struct cpuidle_device *dev)
888 + new_factor = data->correction_factor[data->bucket]
889 + * (DECAY - 1) / DECAY;
890 +
891 +- if (data->expected_us > 0 && data->measured_us < MAX_INTERESTING)
892 ++ if (data->expected_us > 0 && measured_us < MAX_INTERESTING)
893 + new_factor += RESOLUTION * measured_us / data->expected_us;
894 + else
895 + /*
896 +diff --git a/drivers/edac/edac_mce_amd.c b/drivers/edac/edac_mce_amd.c
897 +index f5b6d9f..97e64bc 100644
898 +--- a/drivers/edac/edac_mce_amd.c
899 ++++ b/drivers/edac/edac_mce_amd.c
900 +@@ -294,7 +294,6 @@ wrong_ls_mce:
901 + void amd_decode_nb_mce(int node_id, struct err_regs *regs, int handle_errors)
902 + {
903 + u32 ec = ERROR_CODE(regs->nbsl);
904 +- u32 xec = EXT_ERROR_CODE(regs->nbsl);
905 +
906 + if (!handle_errors)
907 + return;
908 +@@ -324,7 +323,7 @@ void amd_decode_nb_mce(int node_id, struct err_regs *regs, int handle_errors)
909 + pr_cont("\n");
910 + }
911 +
912 +- pr_emerg("%s.\n", EXT_ERR_MSG(xec));
913 ++ pr_emerg("%s.\n", EXT_ERR_MSG(regs->nbsl));
914 +
915 + if (BUS_ERROR(ec) && nb_bus_decoder)
916 + nb_bus_decoder(node_id, regs);
917 +@@ -374,7 +373,7 @@ static int amd_decode_mce(struct notifier_block *nb, unsigned long val,
918 + ((m->status & MCI_STATUS_PCC) ? "yes" : "no"));
919 +
920 + /* do the two bits[14:13] together */
921 +- ecc = m->status & (3ULL << 45);
922 ++ ecc = (m->status >> 45) & 0x3;
923 + if (ecc)
924 + pr_cont(", %sECC Error", ((ecc == 2) ? "C" : "U"));
925 +
926 +diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
927 +index a894ade..1372796 100644
928 +--- a/drivers/gpu/drm/i915/i915_debugfs.c
929 ++++ b/drivers/gpu/drm/i915/i915_debugfs.c
930 +@@ -162,7 +162,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
931 + struct drm_device *dev = node->minor->dev;
932 + drm_i915_private_t *dev_priv = dev->dev_private;
933 +
934 +- if (!IS_IRONLAKE(dev)) {
935 ++ if (!HAS_PCH_SPLIT(dev)) {
936 + seq_printf(m, "Interrupt enable: %08x\n",
937 + I915_READ(IER));
938 + seq_printf(m, "Interrupt identity: %08x\n",
939 +diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
940 +index 2307f98..d642efd 100644
941 +--- a/drivers/gpu/drm/i915/i915_dma.c
942 ++++ b/drivers/gpu/drm/i915/i915_dma.c
943 +@@ -978,15 +978,21 @@ static int i915_probe_agp(struct drm_device *dev, uint32_t *aperture_size,
944 + * Some of the preallocated space is taken by the GTT
945 + * and popup. GTT is 1K per MB of aperture size, and popup is 4K.
946 + */
947 +- if (IS_G4X(dev) || IS_PINEVIEW(dev) || IS_IRONLAKE(dev))
948 ++ if (IS_G4X(dev) || IS_PINEVIEW(dev) || IS_IRONLAKE(dev) || IS_GEN6(dev))
949 + overhead = 4096;
950 + else
951 + overhead = (*aperture_size / 1024) + 4096;
952 +
953 + switch (tmp & INTEL_GMCH_GMS_MASK) {
954 + case INTEL_855_GMCH_GMS_DISABLED:
955 +- DRM_ERROR("video memory is disabled\n");
956 +- return -1;
957 ++ /* XXX: This is what my A1 silicon has. */
958 ++ if (IS_GEN6(dev)) {
959 ++ stolen = 64 * 1024 * 1024;
960 ++ } else {
961 ++ DRM_ERROR("video memory is disabled\n");
962 ++ return -1;
963 ++ }
964 ++ break;
965 + case INTEL_855_GMCH_GMS_STOLEN_1M:
966 + stolen = 1 * 1024 * 1024;
967 + break;
968 +@@ -1064,7 +1070,7 @@ static unsigned long i915_gtt_to_phys(struct drm_device *dev,
969 + int gtt_offset, gtt_size;
970 +
971 + if (IS_I965G(dev)) {
972 +- if (IS_G4X(dev) || IS_IRONLAKE(dev)) {
973 ++ if (IS_G4X(dev) || IS_IRONLAKE(dev) || IS_GEN6(dev)) {
974 + gtt_offset = 2*1024*1024;
975 + gtt_size = 2*1024*1024;
976 + } else {
977 +@@ -1445,7 +1451,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
978 +
979 + dev->driver->get_vblank_counter = i915_get_vblank_counter;
980 + dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
981 +- if (IS_G4X(dev) || IS_IRONLAKE(dev)) {
982 ++ if (IS_G4X(dev) || IS_IRONLAKE(dev) || IS_GEN6(dev)) {
983 + dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */
984 + dev->driver->get_vblank_counter = gm45_get_vblank_counter;
985 + }
986 +diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
987 +index b99b6a8..16ce3ba 100644
988 +--- a/drivers/gpu/drm/i915/i915_drv.h
989 ++++ b/drivers/gpu/drm/i915/i915_drv.h
990 +@@ -1026,7 +1026,7 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
991 + #define IS_845G(dev) ((dev)->pci_device == 0x2562)
992 + #define IS_I85X(dev) ((dev)->pci_device == 0x3582)
993 + #define IS_I865G(dev) ((dev)->pci_device == 0x2572)
994 +-#define IS_I8XX(dev) (INTEL_INFO(dev)->is_i8xx)
995 ++#define IS_GEN2(dev) (INTEL_INFO(dev)->is_i8xx)
996 + #define IS_I915G(dev) (INTEL_INFO(dev)->is_i915g)
997 + #define IS_I915GM(dev) ((dev)->pci_device == 0x2592)
998 + #define IS_I945G(dev) ((dev)->pci_device == 0x2772)
999 +@@ -1045,8 +1045,29 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
1000 + #define IS_I9XX(dev) (INTEL_INFO(dev)->is_i9xx)
1001 + #define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile)
1002 +
1003 ++#define IS_GEN3(dev) (IS_I915G(dev) || \
1004 ++ IS_I915GM(dev) || \
1005 ++ IS_I945G(dev) || \
1006 ++ IS_I945GM(dev) || \
1007 ++ IS_G33(dev) || \
1008 ++ IS_PINEVIEW(dev))
1009 ++#define IS_GEN4(dev) ((dev)->pci_device == 0x2972 || \
1010 ++ (dev)->pci_device == 0x2982 || \
1011 ++ (dev)->pci_device == 0x2992 || \
1012 ++ (dev)->pci_device == 0x29A2 || \
1013 ++ (dev)->pci_device == 0x2A02 || \
1014 ++ (dev)->pci_device == 0x2A12 || \
1015 ++ (dev)->pci_device == 0x2E02 || \
1016 ++ (dev)->pci_device == 0x2E12 || \
1017 ++ (dev)->pci_device == 0x2E22 || \
1018 ++ (dev)->pci_device == 0x2E32 || \
1019 ++ (dev)->pci_device == 0x2A42 || \
1020 ++ (dev)->pci_device == 0x2E42)
1021 ++
1022 + #define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws)
1023 +
1024 ++#define IS_GEN6(dev) ((dev)->pci_device == 0x0102)
1025 ++
1026 + /* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte
1027 + * rows, which changed the alignment requirements and fence programming.
1028 + */
1029 +@@ -1067,6 +1088,9 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
1030 + #define I915_HAS_FBC(dev) (INTEL_INFO(dev)->has_fbc)
1031 + #define I915_HAS_RC6(dev) (INTEL_INFO(dev)->has_rc6)
1032 +
1033 ++#define HAS_PCH_SPLIT(dev) (IS_IRONLAKE(dev) || \
1034 ++ IS_GEN6(dev))
1035 ++
1036 + #define PRIMARY_RINGBUFFER_SIZE (128*1024)
1037 +
1038 + #endif
1039 +diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
1040 +index fd099a1..6458400 100644
1041 +--- a/drivers/gpu/drm/i915/i915_gem.c
1042 ++++ b/drivers/gpu/drm/i915/i915_gem.c
1043 +@@ -1819,7 +1819,7 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno, int interruptible)
1044 + return -EIO;
1045 +
1046 + if (!i915_seqno_passed(i915_get_gem_seqno(dev), seqno)) {
1047 +- if (IS_IRONLAKE(dev))
1048 ++ if (HAS_PCH_SPLIT(dev))
1049 + ier = I915_READ(DEIER) | I915_READ(GTIER);
1050 + else
1051 + ier = I915_READ(IER);
1052 +@@ -2316,6 +2316,12 @@ static void i915_write_fence_reg(struct drm_i915_fence_reg *reg)
1053 + pitch_val = obj_priv->stride / tile_width;
1054 + pitch_val = ffs(pitch_val) - 1;
1055 +
1056 ++ if (obj_priv->tiling_mode == I915_TILING_Y &&
1057 ++ HAS_128_BYTE_Y_TILING(dev))
1058 ++ WARN_ON(pitch_val > I830_FENCE_MAX_PITCH_VAL);
1059 ++ else
1060 ++ WARN_ON(pitch_val > I915_FENCE_MAX_PITCH_VAL);
1061 ++
1062 + val = obj_priv->gtt_offset;
1063 + if (obj_priv->tiling_mode == I915_TILING_Y)
1064 + val |= 1 << I830_FENCE_TILING_Y_SHIFT;
1065 +diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
1066 +index df278b2..040e80c 100644
1067 +--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
1068 ++++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
1069 +@@ -209,7 +209,7 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
1070 + uint32_t swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
1071 + bool need_disable;
1072 +
1073 +- if (IS_IRONLAKE(dev)) {
1074 ++ if (IS_IRONLAKE(dev) || IS_GEN6(dev)) {
1075 + /* On Ironlake whatever DRAM config, GPU always do
1076 + * same swizzling setup.
1077 + */
1078 +@@ -357,21 +357,17 @@ i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode)
1079 + * reg, so dont bother to check the size */
1080 + if (stride / 128 > I965_FENCE_MAX_PITCH_VAL)
1081 + return false;
1082 +- } else if (IS_I9XX(dev)) {
1083 +- uint32_t pitch_val = ffs(stride / tile_width) - 1;
1084 +-
1085 +- /* XXX: For Y tiling, FENCE_MAX_PITCH_VAL is actually 6 (8KB)
1086 +- * instead of 4 (2KB) on 945s.
1087 +- */
1088 +- if (pitch_val > I915_FENCE_MAX_PITCH_VAL ||
1089 +- size > (I830_FENCE_MAX_SIZE_VAL << 20))
1090 ++ } else if (IS_GEN3(dev) || IS_GEN2(dev)) {
1091 ++ if (stride > 8192)
1092 + return false;
1093 +- } else {
1094 +- uint32_t pitch_val = ffs(stride / tile_width) - 1;
1095 +
1096 +- if (pitch_val > I830_FENCE_MAX_PITCH_VAL ||
1097 +- size > (I830_FENCE_MAX_SIZE_VAL << 19))
1098 +- return false;
1099 ++ if (IS_GEN3(dev)) {
1100 ++ if (size > I830_FENCE_MAX_SIZE_VAL << 20)
1101 ++ return false;
1102 ++ } else {
1103 ++ if (size > I830_FENCE_MAX_SIZE_VAL << 19)
1104 ++ return false;
1105 ++ }
1106 + }
1107 +
1108 + /* 965+ just needs multiples of tile width */
1109 +diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
1110 +index a17d6bd..032f667 100644
1111 +--- a/drivers/gpu/drm/i915/i915_irq.c
1112 ++++ b/drivers/gpu/drm/i915/i915_irq.c
1113 +@@ -576,7 +576,7 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
1114 +
1115 + atomic_inc(&dev_priv->irq_received);
1116 +
1117 +- if (IS_IRONLAKE(dev))
1118 ++ if (HAS_PCH_SPLIT(dev))
1119 + return ironlake_irq_handler(dev);
1120 +
1121 + iir = I915_READ(IIR);
1122 +@@ -737,7 +737,7 @@ void i915_user_irq_get(struct drm_device *dev)
1123 +
1124 + spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
1125 + if (dev->irq_enabled && (++dev_priv->user_irq_refcount == 1)) {
1126 +- if (IS_IRONLAKE(dev))
1127 ++ if (HAS_PCH_SPLIT(dev))
1128 + ironlake_enable_graphics_irq(dev_priv, GT_USER_INTERRUPT);
1129 + else
1130 + i915_enable_irq(dev_priv, I915_USER_INTERRUPT);
1131 +@@ -753,7 +753,7 @@ void i915_user_irq_put(struct drm_device *dev)
1132 + spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
1133 + BUG_ON(dev->irq_enabled && dev_priv->user_irq_refcount <= 0);
1134 + if (dev->irq_enabled && (--dev_priv->user_irq_refcount == 0)) {
1135 +- if (IS_IRONLAKE(dev))
1136 ++ if (HAS_PCH_SPLIT(dev))
1137 + ironlake_disable_graphics_irq(dev_priv, GT_USER_INTERRUPT);
1138 + else
1139 + i915_disable_irq(dev_priv, I915_USER_INTERRUPT);
1140 +@@ -861,7 +861,7 @@ int i915_enable_vblank(struct drm_device *dev, int pipe)
1141 + return -EINVAL;
1142 +
1143 + spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
1144 +- if (IS_IRONLAKE(dev))
1145 ++ if (HAS_PCH_SPLIT(dev))
1146 + ironlake_enable_display_irq(dev_priv, (pipe == 0) ?
1147 + DE_PIPEA_VBLANK: DE_PIPEB_VBLANK);
1148 + else if (IS_I965G(dev))
1149 +@@ -883,7 +883,7 @@ void i915_disable_vblank(struct drm_device *dev, int pipe)
1150 + unsigned long irqflags;
1151 +
1152 + spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
1153 +- if (IS_IRONLAKE(dev))
1154 ++ if (HAS_PCH_SPLIT(dev))
1155 + ironlake_disable_display_irq(dev_priv, (pipe == 0) ?
1156 + DE_PIPEA_VBLANK: DE_PIPEB_VBLANK);
1157 + else
1158 +@@ -897,7 +897,7 @@ void i915_enable_interrupt (struct drm_device *dev)
1159 + {
1160 + struct drm_i915_private *dev_priv = dev->dev_private;
1161 +
1162 +- if (!IS_IRONLAKE(dev))
1163 ++ if (!HAS_PCH_SPLIT(dev))
1164 + opregion_enable_asle(dev);
1165 + dev_priv->irq_enabled = 1;
1166 + }
1167 +@@ -1076,7 +1076,7 @@ void i915_driver_irq_preinstall(struct drm_device * dev)
1168 + INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
1169 + INIT_WORK(&dev_priv->error_work, i915_error_work_func);
1170 +
1171 +- if (IS_IRONLAKE(dev)) {
1172 ++ if (HAS_PCH_SPLIT(dev)) {
1173 + ironlake_irq_preinstall(dev);
1174 + return;
1175 + }
1176 +@@ -1108,7 +1108,7 @@ int i915_driver_irq_postinstall(struct drm_device *dev)
1177 +
1178 + dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B;
1179 +
1180 +- if (IS_IRONLAKE(dev))
1181 ++ if (HAS_PCH_SPLIT(dev))
1182 + return ironlake_irq_postinstall(dev);
1183 +
1184 + /* Unmask the interrupts that we always want on. */
1185 +@@ -1196,7 +1196,7 @@ void i915_driver_irq_uninstall(struct drm_device * dev)
1186 +
1187 + dev_priv->vblank_pipe = 0;
1188 +
1189 +- if (IS_IRONLAKE(dev)) {
1190 ++ if (HAS_PCH_SPLIT(dev)) {
1191 + ironlake_irq_uninstall(dev);
1192 + return;
1193 + }
1194 +diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
1195 +index ab1bd2d..fd95bdf 100644
1196 +--- a/drivers/gpu/drm/i915/i915_reg.h
1197 ++++ b/drivers/gpu/drm/i915/i915_reg.h
1198 +@@ -221,7 +221,7 @@
1199 + #define I830_FENCE_SIZE_BITS(size) ((ffs((size) >> 19) - 1) << 8)
1200 + #define I830_FENCE_PITCH_SHIFT 4
1201 + #define I830_FENCE_REG_VALID (1<<0)
1202 +-#define I915_FENCE_MAX_PITCH_VAL 0x10
1203 ++#define I915_FENCE_MAX_PITCH_VAL 4
1204 + #define I830_FENCE_MAX_PITCH_VAL 6
1205 + #define I830_FENCE_MAX_SIZE_VAL (1<<8)
1206 +
1207 +diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
1208 +index 15fbc1b..70c9d4b 100644
1209 +--- a/drivers/gpu/drm/i915/intel_bios.c
1210 ++++ b/drivers/gpu/drm/i915/intel_bios.c
1211 +@@ -247,6 +247,7 @@ static void
1212 + parse_general_features(struct drm_i915_private *dev_priv,
1213 + struct bdb_header *bdb)
1214 + {
1215 ++ struct drm_device *dev = dev_priv->dev;
1216 + struct bdb_general_features *general;
1217 +
1218 + /* Set sensible defaults in case we can't find the general block */
1219 +@@ -263,7 +264,7 @@ parse_general_features(struct drm_i915_private *dev_priv,
1220 + if (IS_I85X(dev_priv->dev))
1221 + dev_priv->lvds_ssc_freq =
1222 + general->ssc_freq ? 66 : 48;
1223 +- else if (IS_IRONLAKE(dev_priv->dev))
1224 ++ else if (IS_IRONLAKE(dev_priv->dev) || IS_GEN6(dev))
1225 + dev_priv->lvds_ssc_freq =
1226 + general->ssc_freq ? 100 : 120;
1227 + else
1228 +diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
1229 +index 79dd402..fccf074 100644
1230 +--- a/drivers/gpu/drm/i915/intel_crt.c
1231 ++++ b/drivers/gpu/drm/i915/intel_crt.c
1232 +@@ -39,7 +39,7 @@ static void intel_crt_dpms(struct drm_encoder *encoder, int mode)
1233 + struct drm_i915_private *dev_priv = dev->dev_private;
1234 + u32 temp, reg;
1235 +
1236 +- if (IS_IRONLAKE(dev))
1237 ++ if (HAS_PCH_SPLIT(dev))
1238 + reg = PCH_ADPA;
1239 + else
1240 + reg = ADPA;
1241 +@@ -113,7 +113,7 @@ static void intel_crt_mode_set(struct drm_encoder *encoder,
1242 + else
1243 + dpll_md_reg = DPLL_B_MD;
1244 +
1245 +- if (IS_IRONLAKE(dev))
1246 ++ if (HAS_PCH_SPLIT(dev))
1247 + adpa_reg = PCH_ADPA;
1248 + else
1249 + adpa_reg = ADPA;
1250 +@@ -122,7 +122,7 @@ static void intel_crt_mode_set(struct drm_encoder *encoder,
1251 + * Disable separate mode multiplier used when cloning SDVO to CRT
1252 + * XXX this needs to be adjusted when we really are cloning
1253 + */
1254 +- if (IS_I965G(dev) && !IS_IRONLAKE(dev)) {
1255 ++ if (IS_I965G(dev) && !HAS_PCH_SPLIT(dev)) {
1256 + dpll_md = I915_READ(dpll_md_reg);
1257 + I915_WRITE(dpll_md_reg,
1258 + dpll_md & ~DPLL_MD_UDI_MULTIPLIER_MASK);
1259 +@@ -136,11 +136,11 @@ static void intel_crt_mode_set(struct drm_encoder *encoder,
1260 +
1261 + if (intel_crtc->pipe == 0) {
1262 + adpa |= ADPA_PIPE_A_SELECT;
1263 +- if (!IS_IRONLAKE(dev))
1264 ++ if (!HAS_PCH_SPLIT(dev))
1265 + I915_WRITE(BCLRPAT_A, 0);
1266 + } else {
1267 + adpa |= ADPA_PIPE_B_SELECT;
1268 +- if (!IS_IRONLAKE(dev))
1269 ++ if (!HAS_PCH_SPLIT(dev))
1270 + I915_WRITE(BCLRPAT_B, 0);
1271 + }
1272 +
1273 +@@ -202,7 +202,7 @@ static bool intel_crt_detect_hotplug(struct drm_connector *connector)
1274 + u32 hotplug_en;
1275 + int i, tries = 0;
1276 +
1277 +- if (IS_IRONLAKE(dev))
1278 ++ if (HAS_PCH_SPLIT(dev))
1279 + return intel_ironlake_crt_detect_hotplug(connector);
1280 +
1281 + /*
1282 +@@ -524,7 +524,7 @@ void intel_crt_init(struct drm_device *dev)
1283 + &intel_output->enc);
1284 +
1285 + /* Set up the DDC bus. */
1286 +- if (IS_IRONLAKE(dev))
1287 ++ if (HAS_PCH_SPLIT(dev))
1288 + i2c_reg = PCH_GPIOA;
1289 + else {
1290 + i2c_reg = GPIOA;
1291 +diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
1292 +index b27202d..4b2458d 100644
1293 +--- a/drivers/gpu/drm/i915/intel_display.c
1294 ++++ b/drivers/gpu/drm/i915/intel_display.c
1295 +@@ -232,7 +232,7 @@ struct intel_limit {
1296 + #define G4X_P2_DISPLAY_PORT_FAST 10
1297 + #define G4X_P2_DISPLAY_PORT_LIMIT 0
1298 +
1299 +-/* Ironlake */
1300 ++/* Ironlake / Sandybridge */
1301 + /* as we calculate clock using (register_value + 2) for
1302 + N/M1/M2, so here the range value for them is (actual_value-2).
1303 + */
1304 +@@ -690,7 +690,7 @@ static const intel_limit_t *intel_limit(struct drm_crtc *crtc)
1305 + struct drm_device *dev = crtc->dev;
1306 + const intel_limit_t *limit;
1307 +
1308 +- if (IS_IRONLAKE(dev))
1309 ++ if (HAS_PCH_SPLIT(dev))
1310 + limit = intel_ironlake_limit(crtc);
1311 + else if (IS_G4X(dev)) {
1312 + limit = intel_g4x_limit(crtc);
1313 +@@ -1366,7 +1366,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
1314 + dspcntr &= ~DISPPLANE_TILED;
1315 + }
1316 +
1317 +- if (IS_IRONLAKE(dev))
1318 ++ if (HAS_PCH_SPLIT(dev))
1319 + /* must disable */
1320 + dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
1321 +
1322 +@@ -1427,7 +1427,7 @@ static void i915_disable_vga (struct drm_device *dev)
1323 + u8 sr1;
1324 + u32 vga_reg;
1325 +
1326 +- if (IS_IRONLAKE(dev))
1327 ++ if (HAS_PCH_SPLIT(dev))
1328 + vga_reg = CPU_VGACNTRL;
1329 + else
1330 + vga_reg = VGACNTRL;
1331 +@@ -2111,7 +2111,7 @@ static bool intel_crtc_mode_fixup(struct drm_crtc *crtc,
1332 + struct drm_display_mode *adjusted_mode)
1333 + {
1334 + struct drm_device *dev = crtc->dev;
1335 +- if (IS_IRONLAKE(dev)) {
1336 ++ if (HAS_PCH_SPLIT(dev)) {
1337 + /* FDI link clock is fixed at 2.7G */
1338 + if (mode->clock * 3 > 27000 * 4)
1339 + return MODE_CLOCK_HIGH;
1340 +@@ -2967,7 +2967,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
1341 + refclk / 1000);
1342 + } else if (IS_I9XX(dev)) {
1343 + refclk = 96000;
1344 +- if (IS_IRONLAKE(dev))
1345 ++ if (HAS_PCH_SPLIT(dev))
1346 + refclk = 120000; /* 120Mhz refclk */
1347 + } else {
1348 + refclk = 48000;
1349 +@@ -3025,7 +3025,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
1350 + }
1351 +
1352 + /* FDI link */
1353 +- if (IS_IRONLAKE(dev)) {
1354 ++ if (HAS_PCH_SPLIT(dev)) {
1355 + int lane, link_bw, bpp;
1356 + /* eDP doesn't require FDI link, so just set DP M/N
1357 + according to current link config */
1358 +@@ -3102,7 +3102,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
1359 + * PCH B stepping, previous chipset stepping should be
1360 + * ignoring this setting.
1361 + */
1362 +- if (IS_IRONLAKE(dev)) {
1363 ++ if (HAS_PCH_SPLIT(dev)) {
1364 + temp = I915_READ(PCH_DREF_CONTROL);
1365 + /* Always enable nonspread source */
1366 + temp &= ~DREF_NONSPREAD_SOURCE_MASK;
1367 +@@ -3149,7 +3149,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
1368 + reduced_clock.m2;
1369 + }
1370 +
1371 +- if (!IS_IRONLAKE(dev))
1372 ++ if (!HAS_PCH_SPLIT(dev))
1373 + dpll = DPLL_VGA_MODE_DIS;
1374 +
1375 + if (IS_I9XX(dev)) {
1376 +@@ -3162,7 +3162,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
1377 + sdvo_pixel_multiply = adjusted_mode->clock / mode->clock;
1378 + if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
1379 + dpll |= (sdvo_pixel_multiply - 1) << SDVO_MULTIPLIER_SHIFT_HIRES;
1380 +- else if (IS_IRONLAKE(dev))
1381 ++ else if (HAS_PCH_SPLIT(dev))
1382 + dpll |= (sdvo_pixel_multiply - 1) << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT;
1383 + }
1384 + if (is_dp)
1385 +@@ -3174,7 +3174,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
1386 + else {
1387 + dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
1388 + /* also FPA1 */
1389 +- if (IS_IRONLAKE(dev))
1390 ++ if (HAS_PCH_SPLIT(dev))
1391 + dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
1392 + if (IS_G4X(dev) && has_reduced_clock)
1393 + dpll |= (1 << (reduced_clock.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
1394 +@@ -3193,7 +3193,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
1395 + dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
1396 + break;
1397 + }
1398 +- if (IS_I965G(dev) && !IS_IRONLAKE(dev))
1399 ++ if (IS_I965G(dev) && !HAS_PCH_SPLIT(dev))
1400 + dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT);
1401 + } else {
1402 + if (is_lvds) {
1403 +@@ -3227,7 +3227,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
1404 +
1405 + /* Ironlake's plane is forced to pipe, bit 24 is to
1406 + enable color space conversion */
1407 +- if (!IS_IRONLAKE(dev)) {
1408 ++ if (!HAS_PCH_SPLIT(dev)) {
1409 + if (pipe == 0)
1410 + dspcntr &= ~DISPPLANE_SEL_PIPE_MASK;
1411 + else
1412 +@@ -3254,14 +3254,14 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
1413 +
1414 +
1415 + /* Disable the panel fitter if it was on our pipe */
1416 +- if (!IS_IRONLAKE(dev) && intel_panel_fitter_pipe(dev) == pipe)
1417 ++ if (!HAS_PCH_SPLIT(dev) && intel_panel_fitter_pipe(dev) == pipe)
1418 + I915_WRITE(PFIT_CONTROL, 0);
1419 +
1420 + DRM_DEBUG_KMS("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B');
1421 + drm_mode_debug_printmodeline(mode);
1422 +
1423 + /* assign to Ironlake registers */
1424 +- if (IS_IRONLAKE(dev)) {
1425 ++ if (HAS_PCH_SPLIT(dev)) {
1426 + fp_reg = pch_fp_reg;
1427 + dpll_reg = pch_dpll_reg;
1428 + }
1429 +@@ -3282,7 +3282,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
1430 + if (is_lvds) {
1431 + u32 lvds;
1432 +
1433 +- if (IS_IRONLAKE(dev))
1434 ++ if (HAS_PCH_SPLIT(dev))
1435 + lvds_reg = PCH_LVDS;
1436 +
1437 + lvds = I915_READ(lvds_reg);
1438 +@@ -3328,7 +3328,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
1439 + /* Wait for the clocks to stabilize. */
1440 + udelay(150);
1441 +
1442 +- if (IS_I965G(dev) && !IS_IRONLAKE(dev)) {
1443 ++ if (IS_I965G(dev) && !HAS_PCH_SPLIT(dev)) {
1444 + if (is_sdvo) {
1445 + sdvo_pixel_multiply = adjusted_mode->clock / mode->clock;
1446 + I915_WRITE(dpll_md_reg, (0 << DPLL_MD_UDI_DIVIDER_SHIFT) |
1447 +@@ -3375,14 +3375,14 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
1448 + /* pipesrc and dspsize control the size that is scaled from, which should
1449 + * always be the user's requested size.
1450 + */
1451 +- if (!IS_IRONLAKE(dev)) {
1452 ++ if (!HAS_PCH_SPLIT(dev)) {
1453 + I915_WRITE(dspsize_reg, ((mode->vdisplay - 1) << 16) |
1454 + (mode->hdisplay - 1));
1455 + I915_WRITE(dsppos_reg, 0);
1456 + }
1457 + I915_WRITE(pipesrc_reg, ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1));
1458 +
1459 +- if (IS_IRONLAKE(dev)) {
1460 ++ if (HAS_PCH_SPLIT(dev)) {
1461 + I915_WRITE(data_m1_reg, TU_SIZE(m_n.tu) | m_n.gmch_m);
1462 + I915_WRITE(data_n1_reg, TU_SIZE(m_n.tu) | m_n.gmch_n);
1463 + I915_WRITE(link_m1_reg, m_n.link_m);
1464 +@@ -3403,7 +3403,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
1465 +
1466 + intel_wait_for_vblank(dev);
1467 +
1468 +- if (IS_IRONLAKE(dev)) {
1469 ++ if (HAS_PCH_SPLIT(dev)) {
1470 + /* enable address swizzle for tiling buffer */
1471 + temp = I915_READ(DISP_ARB_CTL);
1472 + I915_WRITE(DISP_ARB_CTL, temp | DISP_TILE_SURFACE_SWIZZLING);
1473 +@@ -3438,7 +3438,7 @@ void intel_crtc_load_lut(struct drm_crtc *crtc)
1474 + return;
1475 +
1476 + /* use legacy palette for Ironlake */
1477 +- if (IS_IRONLAKE(dev))
1478 ++ if (HAS_PCH_SPLIT(dev))
1479 + palreg = (intel_crtc->pipe == 0) ? LGC_PALETTE_A :
1480 + LGC_PALETTE_B;
1481 +
1482 +@@ -3922,7 +3922,7 @@ static void intel_increase_pllclock(struct drm_crtc *crtc, bool schedule)
1483 + int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B;
1484 + int dpll = I915_READ(dpll_reg);
1485 +
1486 +- if (IS_IRONLAKE(dev))
1487 ++ if (HAS_PCH_SPLIT(dev))
1488 + return;
1489 +
1490 + if (!dev_priv->lvds_downclock_avail)
1491 +@@ -3961,7 +3961,7 @@ static void intel_decrease_pllclock(struct drm_crtc *crtc)
1492 + int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B;
1493 + int dpll = I915_READ(dpll_reg);
1494 +
1495 +- if (IS_IRONLAKE(dev))
1496 ++ if (HAS_PCH_SPLIT(dev))
1497 + return;
1498 +
1499 + if (!dev_priv->lvds_downclock_avail)
1500 +@@ -4382,7 +4382,7 @@ static void intel_setup_outputs(struct drm_device *dev)
1501 + if (IS_MOBILE(dev) && !IS_I830(dev))
1502 + intel_lvds_init(dev);
1503 +
1504 +- if (IS_IRONLAKE(dev)) {
1505 ++ if (HAS_PCH_SPLIT(dev)) {
1506 + int found;
1507 +
1508 + if (IS_MOBILE(dev) && (I915_READ(DP_A) & DP_DETECTED))
1509 +@@ -4451,7 +4451,7 @@ static void intel_setup_outputs(struct drm_device *dev)
1510 + DRM_DEBUG_KMS("probing DP_D\n");
1511 + intel_dp_init(dev, DP_D);
1512 + }
1513 +- } else if (IS_I8XX(dev))
1514 ++ } else if (IS_GEN2(dev))
1515 + intel_dvo_init(dev);
1516 +
1517 + if (SUPPORTS_TV(dev))
1518 +@@ -4599,7 +4599,7 @@ void intel_init_clock_gating(struct drm_device *dev)
1519 + * Disable clock gating reported to work incorrectly according to the
1520 + * specs, but enable as much else as we can.
1521 + */
1522 +- if (IS_IRONLAKE(dev)) {
1523 ++ if (HAS_PCH_SPLIT(dev)) {
1524 + return;
1525 + } else if (IS_G4X(dev)) {
1526 + uint32_t dspclk_gate;
1527 +@@ -4672,7 +4672,7 @@ static void intel_init_display(struct drm_device *dev)
1528 + struct drm_i915_private *dev_priv = dev->dev_private;
1529 +
1530 + /* We always want a DPMS function */
1531 +- if (IS_IRONLAKE(dev))
1532 ++ if (HAS_PCH_SPLIT(dev))
1533 + dev_priv->display.dpms = ironlake_crtc_dpms;
1534 + else
1535 + dev_priv->display.dpms = i9xx_crtc_dpms;
1536 +@@ -4715,7 +4715,7 @@ static void intel_init_display(struct drm_device *dev)
1537 + i830_get_display_clock_speed;
1538 +
1539 + /* For FIFO watermark updates */
1540 +- if (IS_IRONLAKE(dev))
1541 ++ if (HAS_PCH_SPLIT(dev))
1542 + dev_priv->display.update_wm = NULL;
1543 + else if (IS_G4X(dev))
1544 + dev_priv->display.update_wm = g4x_update_wm;
1545 +diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
1546 +index 1238bc9..66df0c3 100644
1547 +--- a/drivers/gpu/drm/i915/intel_lvds.c
1548 ++++ b/drivers/gpu/drm/i915/intel_lvds.c
1549 +@@ -661,7 +661,7 @@ static enum drm_connector_status intel_lvds_detect(struct drm_connector *connect
1550 + /* ACPI lid methods were generally unreliable in this generation, so
1551 + * don't even bother.
1552 + */
1553 +- if (IS_I8XX(dev))
1554 ++ if (IS_GEN2(dev))
1555 + return connector_status_connected;
1556 +
1557 + if (!dmi_check_system(bad_lid_status) && !acpi_lid_open())
1558 +diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
1559 +index 63f569b..6b89042 100644
1560 +--- a/drivers/gpu/drm/i915/intel_overlay.c
1561 ++++ b/drivers/gpu/drm/i915/intel_overlay.c
1562 +@@ -172,7 +172,7 @@ struct overlay_registers {
1563 + #define OFC_UPDATE 0x1
1564 +
1565 + #define OVERLAY_NONPHYSICAL(dev) (IS_G33(dev) || IS_I965G(dev))
1566 +-#define OVERLAY_EXISTS(dev) (!IS_G4X(dev) && !IS_IRONLAKE(dev))
1567 ++#define OVERLAY_EXISTS(dev) (!IS_G4X(dev) && !IS_IRONLAKE(dev) && !IS_GEN6(dev))
1568 +
1569 +
1570 + static struct overlay_registers *intel_overlay_map_regs_atomic(struct intel_overlay *overlay)
1571 +diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c
1572 +index 10be7b5..855911e 100644
1573 +--- a/drivers/i2c/i2c-core.c
1574 ++++ b/drivers/i2c/i2c-core.c
1575 +@@ -1210,12 +1210,23 @@ static int i2c_detect_address(struct i2c_client *temp_client,
1576 + return 0;
1577 +
1578 + /* Make sure there is something at this address */
1579 +- if (i2c_smbus_xfer(adapter, addr, 0, 0, 0, I2C_SMBUS_QUICK, NULL) < 0)
1580 +- return 0;
1581 ++ if (addr == 0x73 && (adapter->class & I2C_CLASS_HWMON)) {
1582 ++ /* Special probe for FSC hwmon chips */
1583 ++ union i2c_smbus_data dummy;
1584 +
1585 +- /* Prevent 24RF08 corruption */
1586 +- if ((addr & ~0x0f) == 0x50)
1587 +- i2c_smbus_xfer(adapter, addr, 0, 0, 0, I2C_SMBUS_QUICK, NULL);
1588 ++ if (i2c_smbus_xfer(adapter, addr, 0, I2C_SMBUS_READ, 0,
1589 ++ I2C_SMBUS_BYTE_DATA, &dummy) < 0)
1590 ++ return 0;
1591 ++ } else {
1592 ++ if (i2c_smbus_xfer(adapter, addr, 0, I2C_SMBUS_WRITE, 0,
1593 ++ I2C_SMBUS_QUICK, NULL) < 0)
1594 ++ return 0;
1595 ++
1596 ++ /* Prevent 24RF08 corruption */
1597 ++ if ((addr & ~0x0f) == 0x50)
1598 ++ i2c_smbus_xfer(adapter, addr, 0, I2C_SMBUS_WRITE, 0,
1599 ++ I2C_SMBUS_QUICK, NULL);
1600 ++ }
1601 +
1602 + /* Finally call the custom detection function */
1603 + memset(&info, 0, sizeof(struct i2c_board_info));
1604 +diff --git a/drivers/md/md.c b/drivers/md/md.c
1605 +index a20a71e..2ecd1d5 100644
1606 +--- a/drivers/md/md.c
1607 ++++ b/drivers/md/md.c
1608 +@@ -2108,12 +2108,18 @@ repeat:
1609 + if (!mddev->in_sync || mddev->recovery_cp != MaxSector) { /* not clean */
1610 + /* .. if the array isn't clean, an 'even' event must also go
1611 + * to spares. */
1612 +- if ((mddev->events&1)==0)
1613 ++ if ((mddev->events&1)==0) {
1614 + nospares = 0;
1615 ++ sync_req = 2; /* force a second update to get the
1616 ++ * even/odd in sync */
1617 ++ }
1618 + } else {
1619 + /* otherwise an 'odd' event must go to spares */
1620 +- if ((mddev->events&1))
1621 ++ if ((mddev->events&1)) {
1622 + nospares = 0;
1623 ++ sync_req = 2; /* force a second update to get the
1624 ++ * even/odd in sync */
1625 ++ }
1626 + }
1627 + }
1628 +
1629 +diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
1630 +index ceb24af..0468f5b 100644
1631 +--- a/drivers/md/raid5.c
1632 ++++ b/drivers/md/raid5.c
1633 +@@ -1526,7 +1526,7 @@ static void raid5_end_read_request(struct bio * bi, int error)
1634 +
1635 + clear_bit(R5_UPTODATE, &sh->dev[i].flags);
1636 + atomic_inc(&rdev->read_errors);
1637 +- if (conf->mddev->degraded)
1638 ++ if (conf->mddev->degraded >= conf->max_degraded)
1639 + printk_rl(KERN_WARNING
1640 + "raid5:%s: read error not correctable "
1641 + "(sector %llu on %s).\n",
1642 +@@ -1649,8 +1649,8 @@ static sector_t raid5_compute_sector(raid5_conf_t *conf, sector_t r_sector,
1643 + int previous, int *dd_idx,
1644 + struct stripe_head *sh)
1645 + {
1646 +- long stripe;
1647 +- unsigned long chunk_number;
1648 ++ sector_t stripe, stripe2;
1649 ++ sector_t chunk_number;
1650 + unsigned int chunk_offset;
1651 + int pd_idx, qd_idx;
1652 + int ddf_layout = 0;
1653 +@@ -1670,18 +1670,13 @@ static sector_t raid5_compute_sector(raid5_conf_t *conf, sector_t r_sector,
1654 + */
1655 + chunk_offset = sector_div(r_sector, sectors_per_chunk);
1656 + chunk_number = r_sector;
1657 +- BUG_ON(r_sector != chunk_number);
1658 +
1659 + /*
1660 + * Compute the stripe number
1661 + */
1662 +- stripe = chunk_number / data_disks;
1663 +-
1664 +- /*
1665 +- * Compute the data disk and parity disk indexes inside the stripe
1666 +- */
1667 +- *dd_idx = chunk_number % data_disks;
1668 +-
1669 ++ stripe = chunk_number;
1670 ++ *dd_idx = sector_div(stripe, data_disks);
1671 ++ stripe2 = stripe;
1672 + /*
1673 + * Select the parity disk based on the user selected algorithm.
1674 + */
1675 +@@ -1693,21 +1688,21 @@ static sector_t raid5_compute_sector(raid5_conf_t *conf, sector_t r_sector,
1676 + case 5:
1677 + switch (algorithm) {
1678 + case ALGORITHM_LEFT_ASYMMETRIC:
1679 +- pd_idx = data_disks - stripe % raid_disks;
1680 ++ pd_idx = data_disks - sector_div(stripe2, raid_disks);
1681 + if (*dd_idx >= pd_idx)
1682 + (*dd_idx)++;
1683 + break;
1684 + case ALGORITHM_RIGHT_ASYMMETRIC:
1685 +- pd_idx = stripe % raid_disks;
1686 ++ pd_idx = sector_div(stripe2, raid_disks);
1687 + if (*dd_idx >= pd_idx)
1688 + (*dd_idx)++;
1689 + break;
1690 + case ALGORITHM_LEFT_SYMMETRIC:
1691 +- pd_idx = data_disks - stripe % raid_disks;
1692 ++ pd_idx = data_disks - sector_div(stripe2, raid_disks);
1693 + *dd_idx = (pd_idx + 1 + *dd_idx) % raid_disks;
1694 + break;
1695 + case ALGORITHM_RIGHT_SYMMETRIC:
1696 +- pd_idx = stripe % raid_disks;
1697 ++ pd_idx = sector_div(stripe2, raid_disks);
1698 + *dd_idx = (pd_idx + 1 + *dd_idx) % raid_disks;
1699 + break;
1700 + case ALGORITHM_PARITY_0:
1701 +@@ -1727,7 +1722,7 @@ static sector_t raid5_compute_sector(raid5_conf_t *conf, sector_t r_sector,
1702 +
1703 + switch (algorithm) {
1704 + case ALGORITHM_LEFT_ASYMMETRIC:
1705 +- pd_idx = raid_disks - 1 - (stripe % raid_disks);
1706 ++ pd_idx = raid_disks - 1 - sector_div(stripe2, raid_disks);
1707 + qd_idx = pd_idx + 1;
1708 + if (pd_idx == raid_disks-1) {
1709 + (*dd_idx)++; /* Q D D D P */
1710 +@@ -1736,7 +1731,7 @@ static sector_t raid5_compute_sector(raid5_conf_t *conf, sector_t r_sector,
1711 + (*dd_idx) += 2; /* D D P Q D */
1712 + break;
1713 + case ALGORITHM_RIGHT_ASYMMETRIC:
1714 +- pd_idx = stripe % raid_disks;
1715 ++ pd_idx = sector_div(stripe2, raid_disks);
1716 + qd_idx = pd_idx + 1;
1717 + if (pd_idx == raid_disks-1) {
1718 + (*dd_idx)++; /* Q D D D P */
1719 +@@ -1745,12 +1740,12 @@ static sector_t raid5_compute_sector(raid5_conf_t *conf, sector_t r_sector,
1720 + (*dd_idx) += 2; /* D D P Q D */
1721 + break;
1722 + case ALGORITHM_LEFT_SYMMETRIC:
1723 +- pd_idx = raid_disks - 1 - (stripe % raid_disks);
1724 ++ pd_idx = raid_disks - 1 - sector_div(stripe2, raid_disks);
1725 + qd_idx = (pd_idx + 1) % raid_disks;
1726 + *dd_idx = (pd_idx + 2 + *dd_idx) % raid_disks;
1727 + break;
1728 + case ALGORITHM_RIGHT_SYMMETRIC:
1729 +- pd_idx = stripe % raid_disks;
1730 ++ pd_idx = sector_div(stripe2, raid_disks);
1731 + qd_idx = (pd_idx + 1) % raid_disks;
1732 + *dd_idx = (pd_idx + 2 + *dd_idx) % raid_disks;
1733 + break;
1734 +@@ -1769,7 +1764,7 @@ static sector_t raid5_compute_sector(raid5_conf_t *conf, sector_t r_sector,
1735 + /* Exactly the same as RIGHT_ASYMMETRIC, but or
1736 + * of blocks for computing Q is different.
1737 + */
1738 +- pd_idx = stripe % raid_disks;
1739 ++ pd_idx = sector_div(stripe2, raid_disks);
1740 + qd_idx = pd_idx + 1;
1741 + if (pd_idx == raid_disks-1) {
1742 + (*dd_idx)++; /* Q D D D P */
1743 +@@ -1784,7 +1779,8 @@ static sector_t raid5_compute_sector(raid5_conf_t *conf, sector_t r_sector,
1744 + * D D D P Q rather than
1745 + * Q D D D P
1746 + */
1747 +- pd_idx = raid_disks - 1 - ((stripe + 1) % raid_disks);
1748 ++ stripe2 += 1;
1749 ++ pd_idx = raid_disks - 1 - sector_div(stripe2, raid_disks);
1750 + qd_idx = pd_idx + 1;
1751 + if (pd_idx == raid_disks-1) {
1752 + (*dd_idx)++; /* Q D D D P */
1753 +@@ -1796,7 +1792,7 @@ static sector_t raid5_compute_sector(raid5_conf_t *conf, sector_t r_sector,
1754 +
1755 + case ALGORITHM_ROTATING_N_CONTINUE:
1756 + /* Same as left_symmetric but Q is before P */
1757 +- pd_idx = raid_disks - 1 - (stripe % raid_disks);
1758 ++ pd_idx = raid_disks - 1 - sector_div(stripe2, raid_disks);
1759 + qd_idx = (pd_idx + raid_disks - 1) % raid_disks;
1760 + *dd_idx = (pd_idx + 1 + *dd_idx) % raid_disks;
1761 + ddf_layout = 1;
1762 +@@ -1804,27 +1800,27 @@ static sector_t raid5_compute_sector(raid5_conf_t *conf, sector_t r_sector,
1763 +
1764 + case ALGORITHM_LEFT_ASYMMETRIC_6:
1765 + /* RAID5 left_asymmetric, with Q on last device */
1766 +- pd_idx = data_disks - stripe % (raid_disks-1);
1767 ++ pd_idx = data_disks - sector_div(stripe2, raid_disks-1);
1768 + if (*dd_idx >= pd_idx)
1769 + (*dd_idx)++;
1770 + qd_idx = raid_disks - 1;
1771 + break;
1772 +
1773 + case ALGORITHM_RIGHT_ASYMMETRIC_6:
1774 +- pd_idx = stripe % (raid_disks-1);
1775 ++ pd_idx = sector_div(stripe2, raid_disks-1);
1776 + if (*dd_idx >= pd_idx)
1777 + (*dd_idx)++;
1778 + qd_idx = raid_disks - 1;
1779 + break;
1780 +
1781 + case ALGORITHM_LEFT_SYMMETRIC_6:
1782 +- pd_idx = data_disks - stripe % (raid_disks-1);
1783 ++ pd_idx = data_disks - sector_div(stripe2, raid_disks-1);
1784 + *dd_idx = (pd_idx + 1 + *dd_idx) % (raid_disks-1);
1785 + qd_idx = raid_disks - 1;
1786 + break;
1787 +
1788 + case ALGORITHM_RIGHT_SYMMETRIC_6:
1789 +- pd_idx = stripe % (raid_disks-1);
1790 ++ pd_idx = sector_div(stripe2, raid_disks-1);
1791 + *dd_idx = (pd_idx + 1 + *dd_idx) % (raid_disks-1);
1792 + qd_idx = raid_disks - 1;
1793 + break;
1794 +@@ -1869,14 +1865,14 @@ static sector_t compute_blocknr(struct stripe_head *sh, int i, int previous)
1795 + : conf->algorithm;
1796 + sector_t stripe;
1797 + int chunk_offset;
1798 +- int chunk_number, dummy1, dd_idx = i;
1799 ++ sector_t chunk_number;
1800 ++ int dummy1, dd_idx = i;
1801 + sector_t r_sector;
1802 + struct stripe_head sh2;
1803 +
1804 +
1805 + chunk_offset = sector_div(new_sector, sectors_per_chunk);
1806 + stripe = new_sector;
1807 +- BUG_ON(new_sector != stripe);
1808 +
1809 + if (i == sh->pd_idx)
1810 + return 0;
1811 +@@ -1969,7 +1965,7 @@ static sector_t compute_blocknr(struct stripe_head *sh, int i, int previous)
1812 + }
1813 +
1814 + chunk_number = stripe * data_disks + i;
1815 +- r_sector = (sector_t)chunk_number * sectors_per_chunk + chunk_offset;
1816 ++ r_sector = chunk_number * sectors_per_chunk + chunk_offset;
1817 +
1818 + check = raid5_compute_sector(conf, r_sector,
1819 + previous, &dummy1, &sh2);
1820 +diff --git a/drivers/media/dvb/ttpci/budget.c b/drivers/media/dvb/ttpci/budget.c
1821 +index e48380c..95a463c 100644
1822 +--- a/drivers/media/dvb/ttpci/budget.c
1823 ++++ b/drivers/media/dvb/ttpci/budget.c
1824 +@@ -643,9 +643,6 @@ static void frontend_init(struct budget *budget)
1825 + &budget->i2c_adap,
1826 + &tt1600_isl6423_config);
1827 +
1828 +- } else {
1829 +- dvb_frontend_detach(budget->dvb_frontend);
1830 +- budget->dvb_frontend = NULL;
1831 + }
1832 + }
1833 + break;
1834 +diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c
1835 +index 65df1de..a555c90 100644
1836 +--- a/drivers/net/bnx2.c
1837 ++++ b/drivers/net/bnx2.c
1838 +@@ -4772,8 +4772,12 @@ bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
1839 + rc = bnx2_alloc_bad_rbuf(bp);
1840 + }
1841 +
1842 +- if (bp->flags & BNX2_FLAG_USING_MSIX)
1843 ++ if (bp->flags & BNX2_FLAG_USING_MSIX) {
1844 + bnx2_setup_msix_tbl(bp);
1845 ++ /* Prevent MSIX table reads and write from timing out */
1846 ++ REG_WR(bp, BNX2_MISC_ECO_HW_CTL,
1847 ++ BNX2_MISC_ECO_HW_CTL_LARGE_GRC_TMOUT_EN);
1848 ++ }
1849 +
1850 + return rc;
1851 + }
1852 +diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
1853 +index 3db85da..787befc 100644
1854 +--- a/drivers/net/r8169.c
1855 ++++ b/drivers/net/r8169.c
1856 +@@ -2832,8 +2832,13 @@ static void rtl_rar_set(struct rtl8169_private *tp, u8 *addr)
1857 + spin_lock_irq(&tp->lock);
1858 +
1859 + RTL_W8(Cfg9346, Cfg9346_Unlock);
1860 +- RTL_W32(MAC0, low);
1861 ++
1862 + RTL_W32(MAC4, high);
1863 ++ RTL_R32(MAC4);
1864 ++
1865 ++ RTL_W32(MAC0, low);
1866 ++ RTL_R32(MAC0);
1867 ++
1868 + RTL_W8(Cfg9346, Cfg9346_Lock);
1869 +
1870 + spin_unlock_irq(&tp->lock);
1871 +@@ -4316,7 +4321,7 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
1872 +
1873 + tp->cur_tx += frags + 1;
1874 +
1875 +- smp_wmb();
1876 ++ wmb();
1877 +
1878 + RTL_W8(TxPoll, NPQ); /* set polling bit */
1879 +
1880 +@@ -4675,7 +4680,7 @@ static int rtl8169_poll(struct napi_struct *napi, int budget)
1881 + * until it does.
1882 + */
1883 + tp->intr_mask = 0xffff;
1884 +- smp_wmb();
1885 ++ wmb();
1886 + RTL_W16(IntrMask, tp->intr_event);
1887 + }
1888 +
1889 +@@ -4813,8 +4818,8 @@ static void rtl_set_rx_mode(struct net_device *dev)
1890 + mc_filter[1] = swab32(data);
1891 + }
1892 +
1893 +- RTL_W32(MAR0 + 0, mc_filter[0]);
1894 + RTL_W32(MAR0 + 4, mc_filter[1]);
1895 ++ RTL_W32(MAR0 + 0, mc_filter[0]);
1896 +
1897 + RTL_W32(RxConfig, tmp);
1898 +
1899 +diff --git a/drivers/net/sfc/efx.c b/drivers/net/sfc/efx.c
1900 +index 46997e1..fb52e47 100644
1901 +--- a/drivers/net/sfc/efx.c
1902 ++++ b/drivers/net/sfc/efx.c
1903 +@@ -1862,6 +1862,7 @@ out:
1904 + }
1905 +
1906 + if (disabled) {
1907 ++ dev_close(efx->net_dev);
1908 + EFX_ERR(efx, "has been disabled\n");
1909 + efx->state = STATE_DISABLED;
1910 + } else {
1911 +@@ -1885,8 +1886,7 @@ static void efx_reset_work(struct work_struct *data)
1912 + }
1913 +
1914 + rtnl_lock();
1915 +- if (efx_reset(efx, efx->reset_pending))
1916 +- dev_close(efx->net_dev);
1917 ++ (void)efx_reset(efx, efx->reset_pending);
1918 + rtnl_unlock();
1919 + }
1920 +
1921 +diff --git a/drivers/net/sfc/falcon.c b/drivers/net/sfc/falcon.c
1922 +index 9d009c4..e20a824 100644
1923 +--- a/drivers/net/sfc/falcon.c
1924 ++++ b/drivers/net/sfc/falcon.c
1925 +@@ -1317,7 +1317,9 @@ static int falcon_probe_nvconfig(struct efx_nic *efx)
1926 +
1927 + EFX_LOG(efx, "PHY is %d phy_id %d\n", efx->phy_type, efx->mdio.prtad);
1928 +
1929 +- falcon_probe_board(efx, board_rev);
1930 ++ rc = falcon_probe_board(efx, board_rev);
1931 ++ if (rc)
1932 ++ goto fail2;
1933 +
1934 + kfree(nvconfig);
1935 + return 0;
1936 +diff --git a/drivers/net/sfc/falcon_boards.c b/drivers/net/sfc/falcon_boards.c
1937 +index 5712fdd..c7a933a 100644
1938 +--- a/drivers/net/sfc/falcon_boards.c
1939 ++++ b/drivers/net/sfc/falcon_boards.c
1940 +@@ -728,15 +728,7 @@ static const struct falcon_board_type board_types[] = {
1941 + },
1942 + };
1943 +
1944 +-static const struct falcon_board_type falcon_dummy_board = {
1945 +- .init = efx_port_dummy_op_int,
1946 +- .init_phy = efx_port_dummy_op_void,
1947 +- .fini = efx_port_dummy_op_void,
1948 +- .set_id_led = efx_port_dummy_op_set_id_led,
1949 +- .monitor = efx_port_dummy_op_int,
1950 +-};
1951 +-
1952 +-void falcon_probe_board(struct efx_nic *efx, u16 revision_info)
1953 ++int falcon_probe_board(struct efx_nic *efx, u16 revision_info)
1954 + {
1955 + struct falcon_board *board = falcon_board(efx);
1956 + u8 type_id = FALCON_BOARD_TYPE(revision_info);
1957 +@@ -754,8 +746,9 @@ void falcon_probe_board(struct efx_nic *efx, u16 revision_info)
1958 + (efx->pci_dev->subsystem_vendor == EFX_VENDID_SFC)
1959 + ? board->type->ref_model : board->type->gen_type,
1960 + 'A' + board->major, board->minor);
1961 ++ return 0;
1962 + } else {
1963 + EFX_ERR(efx, "unknown board type %d\n", type_id);
1964 +- board->type = &falcon_dummy_board;
1965 ++ return -ENODEV;
1966 + }
1967 + }
1968 +diff --git a/drivers/net/sfc/nic.h b/drivers/net/sfc/nic.h
1969 +index 9351c03..3166baf 100644
1970 +--- a/drivers/net/sfc/nic.h
1971 ++++ b/drivers/net/sfc/nic.h
1972 +@@ -156,7 +156,7 @@ extern struct efx_nic_type siena_a0_nic_type;
1973 + **************************************************************************
1974 + */
1975 +
1976 +-extern void falcon_probe_board(struct efx_nic *efx, u16 revision_info);
1977 ++extern int falcon_probe_board(struct efx_nic *efx, u16 revision_info);
1978 +
1979 + /* TX data path */
1980 + extern int efx_nic_probe_tx(struct efx_tx_queue *tx_queue);
1981 +diff --git a/drivers/net/sfc/siena.c b/drivers/net/sfc/siena.c
1982 +index f8c6771..afbac2d 100644
1983 +--- a/drivers/net/sfc/siena.c
1984 ++++ b/drivers/net/sfc/siena.c
1985 +@@ -454,8 +454,17 @@ static int siena_try_update_nic_stats(struct efx_nic *efx)
1986 +
1987 + static void siena_update_nic_stats(struct efx_nic *efx)
1988 + {
1989 +- while (siena_try_update_nic_stats(efx) == -EAGAIN)
1990 +- cpu_relax();
1991 ++ int retry;
1992 ++
1993 ++ /* If we're unlucky enough to read statistics wduring the DMA, wait
1994 ++ * up to 10ms for it to finish (typically takes <500us) */
1995 ++ for (retry = 0; retry < 100; ++retry) {
1996 ++ if (siena_try_update_nic_stats(efx) == 0)
1997 ++ return;
1998 ++ udelay(100);
1999 ++ }
2000 ++
2001 ++ /* Use the old values instead */
2002 + }
2003 +
2004 + static void siena_start_nic_stats(struct efx_nic *efx)
2005 +diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
2006 +index 17d1493..8405fb8 100644
2007 +--- a/drivers/net/tg3.c
2008 ++++ b/drivers/net/tg3.c
2009 +@@ -8572,6 +8572,7 @@ static int tg3_test_msi(struct tg3 *tp)
2010 + pci_disable_msi(tp->pdev);
2011 +
2012 + tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
2013 ++ tp->napi[0].irq_vec = tp->pdev->irq;
2014 +
2015 + err = tg3_request_irq(tp, 0);
2016 + if (err)
2017 +diff --git a/drivers/net/tun.c b/drivers/net/tun.c
2018 +index 2834a01..909b73d 100644
2019 +--- a/drivers/net/tun.c
2020 ++++ b/drivers/net/tun.c
2021 +@@ -380,6 +380,10 @@ static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev)
2022 + }
2023 + }
2024 +
2025 ++ /* Orphan the skb - required as we might hang on to it
2026 ++ * for indefinite time. */
2027 ++ skb_orphan(skb);
2028 ++
2029 + /* Enqueue packet */
2030 + skb_queue_tail(&tun->socket.sk->sk_receive_queue, skb);
2031 + dev->trans_start = jiffies;
2032 +diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
2033 +index 5f3b9ea..8a6e027 100644
2034 +--- a/drivers/net/usb/cdc_ether.c
2035 ++++ b/drivers/net/usb/cdc_ether.c
2036 +@@ -433,6 +433,7 @@ static const struct driver_info mbm_info = {
2037 + .bind = cdc_bind,
2038 + .unbind = usbnet_cdc_unbind,
2039 + .status = cdc_status,
2040 ++ .manage_power = cdc_manage_power,
2041 + };
2042 +
2043 + /*-------------------------------------------------------------------------*/
2044 +diff --git a/drivers/net/usb/dm9601.c b/drivers/net/usb/dm9601.c
2045 +index 3d406f9..c60625b 100644
2046 +--- a/drivers/net/usb/dm9601.c
2047 ++++ b/drivers/net/usb/dm9601.c
2048 +@@ -238,7 +238,7 @@ static int dm_write_shared_word(struct usbnet *dev, int phy, u8 reg, __le16 valu
2049 + goto out;
2050 +
2051 + dm_write_reg(dev, DM_SHARED_ADDR, phy ? (reg | 0x40) : reg);
2052 +- dm_write_reg(dev, DM_SHARED_CTRL, phy ? 0x1c : 0x14);
2053 ++ dm_write_reg(dev, DM_SHARED_CTRL, phy ? 0x1a : 0x12);
2054 +
2055 + for (i = 0; i < DM_TIMEOUT; i++) {
2056 + u8 tmp;
2057 +diff --git a/drivers/net/wan/hdlc_ppp.c b/drivers/net/wan/hdlc_ppp.c
2058 +index b9b9d6b..941f053 100644
2059 +--- a/drivers/net/wan/hdlc_ppp.c
2060 ++++ b/drivers/net/wan/hdlc_ppp.c
2061 +@@ -628,9 +628,15 @@ static void ppp_stop(struct net_device *dev)
2062 + ppp_cp_event(dev, PID_LCP, STOP, 0, 0, 0, NULL);
2063 + }
2064 +
2065 ++static void ppp_close(struct net_device *dev)
2066 ++{
2067 ++ ppp_tx_flush();
2068 ++}
2069 ++
2070 + static struct hdlc_proto proto = {
2071 + .start = ppp_start,
2072 + .stop = ppp_stop,
2073 ++ .close = ppp_close,
2074 + .type_trans = ppp_type_trans,
2075 + .ioctl = ppp_ioctl,
2076 + .netif_rx = ppp_rx,
2077 +diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
2078 +index 7b1eab4..e55f718 100644
2079 +--- a/drivers/net/wireless/ath/ath9k/main.c
2080 ++++ b/drivers/net/wireless/ath/ath9k/main.c
2081 +@@ -1358,9 +1358,9 @@ void ath_cleanup(struct ath_softc *sc)
2082 + free_irq(sc->irq, sc);
2083 + ath_bus_cleanup(common);
2084 + kfree(sc->sec_wiphy);
2085 +- ieee80211_free_hw(sc->hw);
2086 +
2087 + ath9k_uninit_hw(sc);
2088 ++ ieee80211_free_hw(sc->hw);
2089 + }
2090 +
2091 + static int ath9k_reg_notifier(struct wiphy *wiphy,
2092 +diff --git a/drivers/net/wireless/p54/p54pci.c b/drivers/net/wireless/p54/p54pci.c
2093 +index 4bf4c21..41d33cd 100644
2094 +--- a/drivers/net/wireless/p54/p54pci.c
2095 ++++ b/drivers/net/wireless/p54/p54pci.c
2096 +@@ -245,7 +245,7 @@ static void p54p_check_tx_ring(struct ieee80211_hw *dev, u32 *index,
2097 + u32 idx, i;
2098 +
2099 + i = (*index) % ring_limit;
2100 +- (*index) = idx = le32_to_cpu(ring_control->device_idx[1]);
2101 ++ (*index) = idx = le32_to_cpu(ring_control->device_idx[ring_index]);
2102 + idx %= ring_limit;
2103 +
2104 + while (i != idx) {
2105 +diff --git a/drivers/net/wireless/p54/p54usb.c b/drivers/net/wireless/p54/p54usb.c
2106 +index 8742640..b3c4fbd 100644
2107 +--- a/drivers/net/wireless/p54/p54usb.c
2108 ++++ b/drivers/net/wireless/p54/p54usb.c
2109 +@@ -36,6 +36,7 @@ static struct usb_device_id p54u_table[] __devinitdata = {
2110 + /* Version 1 devices (pci chip + net2280) */
2111 + {USB_DEVICE(0x0506, 0x0a11)}, /* 3COM 3CRWE254G72 */
2112 + {USB_DEVICE(0x0707, 0xee06)}, /* SMC 2862W-G */
2113 ++ {USB_DEVICE(0x07aa, 0x001c)}, /* Corega CG-WLUSB2GT */
2114 + {USB_DEVICE(0x083a, 0x4501)}, /* Accton 802.11g WN4501 USB */
2115 + {USB_DEVICE(0x083a, 0x4502)}, /* Siemens Gigaset USB Adapter */
2116 + {USB_DEVICE(0x083a, 0x5501)}, /* Phillips CPWUA054 */
2117 +diff --git a/drivers/net/wireless/p54/txrx.c b/drivers/net/wireless/p54/txrx.c
2118 +index b6dda2b..9d147de 100644
2119 +--- a/drivers/net/wireless/p54/txrx.c
2120 ++++ b/drivers/net/wireless/p54/txrx.c
2121 +@@ -186,7 +186,7 @@ static int p54_tx_qos_accounting_alloc(struct p54_common *priv,
2122 + struct ieee80211_tx_queue_stats *queue;
2123 + unsigned long flags;
2124 +
2125 +- if (WARN_ON(p54_queue > P54_QUEUE_NUM))
2126 ++ if (WARN_ON(p54_queue >= P54_QUEUE_NUM))
2127 + return -EINVAL;
2128 +
2129 + queue = &priv->tx_stats[p54_queue];
2130 +diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
2131 +index c4fead1..b8eb5e7 100644
2132 +--- a/drivers/pci/pci.c
2133 ++++ b/drivers/pci/pci.c
2134 +@@ -624,7 +624,7 @@ static void __pci_start_power_transition(struct pci_dev *dev, pci_power_t state)
2135 + */
2136 + int __pci_complete_power_transition(struct pci_dev *dev, pci_power_t state)
2137 + {
2138 +- return state > PCI_D0 ?
2139 ++ return state >= PCI_D0 ?
2140 + pci_platform_power_transition(dev, state) : -EINVAL;
2141 + }
2142 + EXPORT_SYMBOL_GPL(__pci_complete_power_transition);
2143 +@@ -661,10 +661,6 @@ int pci_set_power_state(struct pci_dev *dev, pci_power_t state)
2144 + */
2145 + return 0;
2146 +
2147 +- /* Check if we're already there */
2148 +- if (dev->current_state == state)
2149 +- return 0;
2150 +-
2151 + __pci_start_power_transition(dev, state);
2152 +
2153 + /* This device is quirked not to be put into D3, so
2154 +diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
2155 +index e6b67f2..741672f 100644
2156 +--- a/drivers/scsi/libiscsi.c
2157 ++++ b/drivers/scsi/libiscsi.c
2158 +@@ -470,12 +470,12 @@ static int iscsi_prep_scsi_cmd_pdu(struct iscsi_task *task)
2159 +
2160 + WARN_ON(hdrlength >= 256);
2161 + hdr->hlength = hdrlength & 0xFF;
2162 ++ hdr->cmdsn = task->cmdsn = cpu_to_be32(session->cmdsn);
2163 +
2164 + if (session->tt->init_task && session->tt->init_task(task))
2165 + return -EIO;
2166 +
2167 + task->state = ISCSI_TASK_RUNNING;
2168 +- hdr->cmdsn = task->cmdsn = cpu_to_be32(session->cmdsn);
2169 + session->cmdsn++;
2170 +
2171 + conn->scsicmd_pdus_cnt++;
2172 +diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
2173 +index e155011..816ab97 100644
2174 +--- a/drivers/scsi/libsas/sas_ata.c
2175 ++++ b/drivers/scsi/libsas/sas_ata.c
2176 +@@ -394,11 +394,15 @@ int sas_ata_init_host_and_port(struct domain_device *found_dev,
2177 + void sas_ata_task_abort(struct sas_task *task)
2178 + {
2179 + struct ata_queued_cmd *qc = task->uldd_task;
2180 ++ struct request_queue *q = qc->scsicmd->device->request_queue;
2181 + struct completion *waiting;
2182 ++ unsigned long flags;
2183 +
2184 + /* Bounce SCSI-initiated commands to the SCSI EH */
2185 + if (qc->scsicmd) {
2186 ++ spin_lock_irqsave(q->queue_lock, flags);
2187 + blk_abort_request(qc->scsicmd->request);
2188 ++ spin_unlock_irqrestore(q->queue_lock, flags);
2189 + scsi_schedule_eh(qc->scsicmd->device->host);
2190 + return;
2191 + }
2192 +diff --git a/drivers/scsi/libsas/sas_scsi_host.c b/drivers/scsi/libsas/sas_scsi_host.c
2193 +index 14b1319..b672d10 100644
2194 +--- a/drivers/scsi/libsas/sas_scsi_host.c
2195 ++++ b/drivers/scsi/libsas/sas_scsi_host.c
2196 +@@ -1029,6 +1029,8 @@ int __sas_task_abort(struct sas_task *task)
2197 + void sas_task_abort(struct sas_task *task)
2198 + {
2199 + struct scsi_cmnd *sc = task->uldd_task;
2200 ++ struct request_queue *q = sc->device->request_queue;
2201 ++ unsigned long flags;
2202 +
2203 + /* Escape for libsas internal commands */
2204 + if (!sc) {
2205 +@@ -1043,7 +1045,9 @@ void sas_task_abort(struct sas_task *task)
2206 + return;
2207 + }
2208 +
2209 ++ spin_lock_irqsave(q->queue_lock, flags);
2210 + blk_abort_request(sc->request);
2211 ++ spin_unlock_irqrestore(q->queue_lock, flags);
2212 + scsi_schedule_eh(sc->device->host);
2213 + }
2214 +
2215 +diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
2216 +index 0b575c8..aa2a2dc 100644
2217 +--- a/drivers/scsi/scsi_debug.c
2218 ++++ b/drivers/scsi/scsi_debug.c
2219 +@@ -956,7 +956,8 @@ static int resp_start_stop(struct scsi_cmnd * scp,
2220 + static sector_t get_sdebug_capacity(void)
2221 + {
2222 + if (scsi_debug_virtual_gb > 0)
2223 +- return 2048 * 1024 * (sector_t)scsi_debug_virtual_gb;
2224 ++ return (sector_t)scsi_debug_virtual_gb *
2225 ++ (1073741824 / scsi_debug_sector_size);
2226 + else
2227 + return sdebug_store_sectors;
2228 + }
2229 +diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
2230 +index 08ed506..e46155b 100644
2231 +--- a/drivers/scsi/scsi_error.c
2232 ++++ b/drivers/scsi/scsi_error.c
2233 +@@ -301,7 +301,20 @@ static int scsi_check_sense(struct scsi_cmnd *scmd)
2234 + if (scmd->device->allow_restart &&
2235 + (sshdr.asc == 0x04) && (sshdr.ascq == 0x02))
2236 + return FAILED;
2237 +- return SUCCESS;
2238 ++
2239 ++ if (blk_barrier_rq(scmd->request))
2240 ++ /*
2241 ++ * barrier requests should always retry on UA
2242 ++ * otherwise block will get a spurious error
2243 ++ */
2244 ++ return NEEDS_RETRY;
2245 ++ else
2246 ++ /*
2247 ++ * for normal (non barrier) commands, pass the
2248 ++ * UA upwards for a determination in the
2249 ++ * completion functions
2250 ++ */
2251 ++ return SUCCESS;
2252 +
2253 + /* these three are not supported */
2254 + case COPY_ABORTED:
2255 +diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
2256 +index c664242..5697709 100644
2257 +--- a/drivers/scsi/scsi_lib.c
2258 ++++ b/drivers/scsi/scsi_lib.c
2259 +@@ -773,8 +773,14 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
2260 + * we already took a copy of the original into rq->errors which
2261 + * is what gets returned to the user
2262 + */
2263 +- if (sense_valid && sshdr.sense_key == RECOVERED_ERROR) {
2264 +- if (!(req->cmd_flags & REQ_QUIET))
2265 ++ if (sense_valid && (sshdr.sense_key == RECOVERED_ERROR)) {
2266 ++ /* if ATA PASS-THROUGH INFORMATION AVAILABLE skip
2267 ++ * print since caller wants ATA registers. Only occurs on
2268 ++ * SCSI ATA PASS_THROUGH commands when CK_COND=1
2269 ++ */
2270 ++ if ((sshdr.asc == 0x0) && (sshdr.ascq == 0x1d))
2271 ++ ;
2272 ++ else if (!(req->cmd_flags & REQ_QUIET))
2273 + scsi_print_sense("", cmd);
2274 + result = 0;
2275 + /* BLOCK_PC may have set error */
2276 +diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
2277 +index 255da53..bf15920 100644
2278 +--- a/drivers/scsi/sd.c
2279 ++++ b/drivers/scsi/sd.c
2280 +@@ -1039,6 +1039,7 @@ static void sd_prepare_flush(struct request_queue *q, struct request *rq)
2281 + {
2282 + rq->cmd_type = REQ_TYPE_BLOCK_PC;
2283 + rq->timeout = SD_TIMEOUT;
2284 ++ rq->retries = SD_MAX_RETRIES;
2285 + rq->cmd[0] = SYNCHRONIZE_CACHE;
2286 + rq->cmd_len = 10;
2287 + }
2288 +diff --git a/drivers/serial/8250_pnp.c b/drivers/serial/8250_pnp.c
2289 +index 24485cc..4822cb5 100644
2290 +--- a/drivers/serial/8250_pnp.c
2291 ++++ b/drivers/serial/8250_pnp.c
2292 +@@ -348,6 +348,8 @@ static const struct pnp_device_id pnp_dev_table[] = {
2293 + { "FUJ02E6", 0 },
2294 + /* Fujitsu Wacom 2FGT Tablet PC device */
2295 + { "FUJ02E7", 0 },
2296 ++ /* Fujitsu Wacom 1FGT Tablet PC device */
2297 ++ { "FUJ02E9", 0 },
2298 + /*
2299 + * LG C1 EXPRESS DUAL (C1-PB11A3) touch screen (actually a FUJ02E6 in
2300 + * disguise)
2301 +diff --git a/drivers/staging/hv/Hv.c b/drivers/staging/hv/Hv.c
2302 +index c2809f2..b12237f 100644
2303 +--- a/drivers/staging/hv/Hv.c
2304 ++++ b/drivers/staging/hv/Hv.c
2305 +@@ -306,9 +306,9 @@ void HvCleanup(void)
2306 + DPRINT_ENTER(VMBUS);
2307 +
2308 + if (gHvContext.SignalEventBuffer) {
2309 ++ kfree(gHvContext.SignalEventBuffer);
2310 + gHvContext.SignalEventBuffer = NULL;
2311 + gHvContext.SignalEventParam = NULL;
2312 +- kfree(gHvContext.SignalEventBuffer);
2313 + }
2314 +
2315 + if (gHvContext.GuestId == HV_LINUX_GUEST_ID) {
2316 +diff --git a/drivers/staging/hv/RndisFilter.c b/drivers/staging/hv/RndisFilter.c
2317 +index 26d7997..f05f4e1 100644
2318 +--- a/drivers/staging/hv/RndisFilter.c
2319 ++++ b/drivers/staging/hv/RndisFilter.c
2320 +@@ -756,6 +756,7 @@ static int RndisFilterOpenDevice(struct rndis_device *Device)
2321 +
2322 + ret = RndisFilterSetPacketFilter(Device,
2323 + NDIS_PACKET_TYPE_BROADCAST |
2324 ++ NDIS_PACKET_TYPE_ALL_MULTICAST |
2325 + NDIS_PACKET_TYPE_DIRECTED);
2326 + if (ret == 0)
2327 + Device->State = RNDIS_DEV_DATAINITIALIZED;
2328 +diff --git a/drivers/staging/hv/netvsc_drv.c b/drivers/staging/hv/netvsc_drv.c
2329 +index 0d7459e..4c3c8bc 100644
2330 +--- a/drivers/staging/hv/netvsc_drv.c
2331 ++++ b/drivers/staging/hv/netvsc_drv.c
2332 +@@ -413,8 +413,7 @@ static int netvsc_probe(struct device *device)
2333 + if (!net_drv_obj->Base.OnDeviceAdd)
2334 + return -1;
2335 +
2336 +- net = alloc_netdev(sizeof(struct net_device_context), "seth%d",
2337 +- ether_setup);
2338 ++ net = alloc_etherdev(sizeof(struct net_device_context));
2339 + if (!net)
2340 + return -1;
2341 +
2342 +diff --git a/drivers/staging/usbip/usbip_event.c b/drivers/staging/usbip/usbip_event.c
2343 +index 6da1021..a2566f1 100644
2344 +--- a/drivers/staging/usbip/usbip_event.c
2345 ++++ b/drivers/staging/usbip/usbip_event.c
2346 +@@ -117,6 +117,9 @@ void usbip_stop_eh(struct usbip_device *ud)
2347 + {
2348 + struct usbip_task *eh = &ud->eh;
2349 +
2350 ++ if (eh->thread == current)
2351 ++ return; /* do not wait for myself */
2352 ++
2353 + wait_for_completion(&eh->thread_done);
2354 + usbip_dbg_eh("usbip_eh has finished\n");
2355 + }
2356 +diff --git a/drivers/usb/core/driver.c b/drivers/usb/core/driver.c
2357 +index ca479a7..d9d0bf5 100644
2358 +--- a/drivers/usb/core/driver.c
2359 ++++ b/drivers/usb/core/driver.c
2360 +@@ -1255,9 +1255,8 @@ static int usb_suspend_both(struct usb_device *udev, pm_message_t msg)
2361 + udev->state == USB_STATE_SUSPENDED)
2362 + goto done;
2363 +
2364 +- udev->do_remote_wakeup = device_may_wakeup(&udev->dev);
2365 +-
2366 + if (msg.event & PM_EVENT_AUTO) {
2367 ++ udev->do_remote_wakeup = device_may_wakeup(&udev->dev);
2368 + status = autosuspend_check(udev, 0);
2369 + if (status < 0)
2370 + goto done;
2371 +@@ -1789,6 +1788,34 @@ int usb_external_resume_device(struct usb_device *udev, pm_message_t msg)
2372 + return status;
2373 + }
2374 +
2375 ++static void choose_wakeup(struct usb_device *udev, pm_message_t msg)
2376 ++{
2377 ++ int w, i;
2378 ++ struct usb_interface *intf;
2379 ++
2380 ++ /* Remote wakeup is needed only when we actually go to sleep.
2381 ++ * For things like FREEZE and QUIESCE, if the device is already
2382 ++ * autosuspended then its current wakeup setting is okay.
2383 ++ */
2384 ++ if (msg.event == PM_EVENT_FREEZE || msg.event == PM_EVENT_QUIESCE) {
2385 ++ udev->do_remote_wakeup = 0;
2386 ++ return;
2387 ++ }
2388 ++
2389 ++ /* If remote wakeup is permitted, see whether any interface drivers
2390 ++ * actually want it.
2391 ++ */
2392 ++ w = 0;
2393 ++ if (device_may_wakeup(&udev->dev) && udev->actconfig) {
2394 ++ for (i = 0; i < udev->actconfig->desc.bNumInterfaces; i++) {
2395 ++ intf = udev->actconfig->interface[i];
2396 ++ w |= intf->needs_remote_wakeup;
2397 ++ }
2398 ++ }
2399 ++
2400 ++ udev->do_remote_wakeup = w;
2401 ++}
2402 ++
2403 + int usb_suspend(struct device *dev, pm_message_t msg)
2404 + {
2405 + struct usb_device *udev;
2406 +@@ -1808,6 +1835,7 @@ int usb_suspend(struct device *dev, pm_message_t msg)
2407 + }
2408 +
2409 + udev->skip_sys_resume = 0;
2410 ++ choose_wakeup(udev, msg);
2411 + return usb_external_suspend_device(udev, msg);
2412 + }
2413 +
2414 +diff --git a/drivers/usb/core/generic.c b/drivers/usb/core/generic.c
2415 +index bdf87a8..2c95153 100644
2416 +--- a/drivers/usb/core/generic.c
2417 ++++ b/drivers/usb/core/generic.c
2418 +@@ -120,7 +120,7 @@ int usb_choose_configuration(struct usb_device *udev)
2419 + * than a vendor-specific driver. */
2420 + else if (udev->descriptor.bDeviceClass !=
2421 + USB_CLASS_VENDOR_SPEC &&
2422 +- (!desc || desc->bInterfaceClass !=
2423 ++ (desc && desc->bInterfaceClass !=
2424 + USB_CLASS_VENDOR_SPEC)) {
2425 + best = c;
2426 + break;
2427 +diff --git a/drivers/usb/core/inode.c b/drivers/usb/core/inode.c
2428 +index 97b40ce..4a6366a 100644
2429 +--- a/drivers/usb/core/inode.c
2430 ++++ b/drivers/usb/core/inode.c
2431 +@@ -515,13 +515,13 @@ static int fs_create_by_name (const char *name, mode_t mode,
2432 + *dentry = NULL;
2433 + mutex_lock(&parent->d_inode->i_mutex);
2434 + *dentry = lookup_one_len(name, parent, strlen(name));
2435 +- if (!IS_ERR(dentry)) {
2436 ++ if (!IS_ERR(*dentry)) {
2437 + if ((mode & S_IFMT) == S_IFDIR)
2438 + error = usbfs_mkdir (parent->d_inode, *dentry, mode);
2439 + else
2440 + error = usbfs_create (parent->d_inode, *dentry, mode);
2441 + } else
2442 +- error = PTR_ERR(dentry);
2443 ++ error = PTR_ERR(*dentry);
2444 + mutex_unlock(&parent->d_inode->i_mutex);
2445 +
2446 + return error;
2447 +diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
2448 +index 9bc95fe..1a48aac 100644
2449 +--- a/drivers/usb/core/message.c
2450 ++++ b/drivers/usb/core/message.c
2451 +@@ -1471,7 +1471,7 @@ int usb_reset_configuration(struct usb_device *dev)
2452 + /* If not, reinstate the old alternate settings */
2453 + if (retval < 0) {
2454 + reset_old_alts:
2455 +- for (; i >= 0; i--) {
2456 ++ for (i--; i >= 0; i--) {
2457 + struct usb_interface *intf = config->interface[i];
2458 + struct usb_host_interface *alt;
2459 +
2460 +diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c
2461 +index 9c90b67..efa0372 100644
2462 +--- a/drivers/usb/host/ehci-hcd.c
2463 ++++ b/drivers/usb/host/ehci-hcd.c
2464 +@@ -543,6 +543,7 @@ static int ehci_init(struct usb_hcd *hcd)
2465 + */
2466 + ehci->periodic_size = DEFAULT_I_TDPS;
2467 + INIT_LIST_HEAD(&ehci->cached_itd_list);
2468 ++ INIT_LIST_HEAD(&ehci->cached_sitd_list);
2469 + if ((retval = ehci_mem_init(ehci, GFP_KERNEL)) < 0)
2470 + return retval;
2471 +
2472 +diff --git a/drivers/usb/host/ehci-mem.c b/drivers/usb/host/ehci-mem.c
2473 +index aeda96e..1f3f01e 100644
2474 +--- a/drivers/usb/host/ehci-mem.c
2475 ++++ b/drivers/usb/host/ehci-mem.c
2476 +@@ -136,7 +136,7 @@ static inline void qh_put (struct ehci_qh *qh)
2477 +
2478 + static void ehci_mem_cleanup (struct ehci_hcd *ehci)
2479 + {
2480 +- free_cached_itd_list(ehci);
2481 ++ free_cached_lists(ehci);
2482 + if (ehci->async)
2483 + qh_put (ehci->async);
2484 + ehci->async = NULL;
2485 +diff --git a/drivers/usb/host/ehci-sched.c b/drivers/usb/host/ehci-sched.c
2486 +index df533ce..2064045 100644
2487 +--- a/drivers/usb/host/ehci-sched.c
2488 ++++ b/drivers/usb/host/ehci-sched.c
2489 +@@ -2137,13 +2137,27 @@ sitd_complete (
2490 + (stream->bEndpointAddress & USB_DIR_IN) ? "in" : "out");
2491 + }
2492 + iso_stream_put (ehci, stream);
2493 +- /* OK to recycle this SITD now that its completion callback ran. */
2494 ++
2495 + done:
2496 + sitd->urb = NULL;
2497 +- sitd->stream = NULL;
2498 +- list_move(&sitd->sitd_list, &stream->free_list);
2499 +- iso_stream_put(ehci, stream);
2500 +-
2501 ++ if (ehci->clock_frame != sitd->frame) {
2502 ++ /* OK to recycle this SITD now. */
2503 ++ sitd->stream = NULL;
2504 ++ list_move(&sitd->sitd_list, &stream->free_list);
2505 ++ iso_stream_put(ehci, stream);
2506 ++ } else {
2507 ++ /* HW might remember this SITD, so we can't recycle it yet.
2508 ++ * Move it to a safe place until a new frame starts.
2509 ++ */
2510 ++ list_move(&sitd->sitd_list, &ehci->cached_sitd_list);
2511 ++ if (stream->refcount == 2) {
2512 ++ /* If iso_stream_put() were called here, stream
2513 ++ * would be freed. Instead, just prevent reuse.
2514 ++ */
2515 ++ stream->ep->hcpriv = NULL;
2516 ++ stream->ep = NULL;
2517 ++ }
2518 ++ }
2519 + return retval;
2520 + }
2521 +
2522 +@@ -2209,9 +2223,10 @@ done:
2523 +
2524 + /*-------------------------------------------------------------------------*/
2525 +
2526 +-static void free_cached_itd_list(struct ehci_hcd *ehci)
2527 ++static void free_cached_lists(struct ehci_hcd *ehci)
2528 + {
2529 + struct ehci_itd *itd, *n;
2530 ++ struct ehci_sitd *sitd, *sn;
2531 +
2532 + list_for_each_entry_safe(itd, n, &ehci->cached_itd_list, itd_list) {
2533 + struct ehci_iso_stream *stream = itd->stream;
2534 +@@ -2219,6 +2234,13 @@ static void free_cached_itd_list(struct ehci_hcd *ehci)
2535 + list_move(&itd->itd_list, &stream->free_list);
2536 + iso_stream_put(ehci, stream);
2537 + }
2538 ++
2539 ++ list_for_each_entry_safe(sitd, sn, &ehci->cached_sitd_list, sitd_list) {
2540 ++ struct ehci_iso_stream *stream = sitd->stream;
2541 ++ sitd->stream = NULL;
2542 ++ list_move(&sitd->sitd_list, &stream->free_list);
2543 ++ iso_stream_put(ehci, stream);
2544 ++ }
2545 + }
2546 +
2547 + /*-------------------------------------------------------------------------*/
2548 +@@ -2245,7 +2267,7 @@ scan_periodic (struct ehci_hcd *ehci)
2549 + clock_frame = -1;
2550 + }
2551 + if (ehci->clock_frame != clock_frame) {
2552 +- free_cached_itd_list(ehci);
2553 ++ free_cached_lists(ehci);
2554 + ehci->clock_frame = clock_frame;
2555 + }
2556 + clock %= mod;
2557 +@@ -2408,7 +2430,7 @@ restart:
2558 + clock = now;
2559 + clock_frame = clock >> 3;
2560 + if (ehci->clock_frame != clock_frame) {
2561 +- free_cached_itd_list(ehci);
2562 ++ free_cached_lists(ehci);
2563 + ehci->clock_frame = clock_frame;
2564 + }
2565 + } else {
2566 +diff --git a/drivers/usb/host/ehci.h b/drivers/usb/host/ehci.h
2567 +index b1dce96..556c0b4 100644
2568 +--- a/drivers/usb/host/ehci.h
2569 ++++ b/drivers/usb/host/ehci.h
2570 +@@ -87,8 +87,9 @@ struct ehci_hcd { /* one per controller */
2571 + int next_uframe; /* scan periodic, start here */
2572 + unsigned periodic_sched; /* periodic activity count */
2573 +
2574 +- /* list of itds completed while clock_frame was still active */
2575 ++ /* list of itds & sitds completed while clock_frame was still active */
2576 + struct list_head cached_itd_list;
2577 ++ struct list_head cached_sitd_list;
2578 + unsigned clock_frame;
2579 +
2580 + /* per root hub port */
2581 +@@ -195,7 +196,7 @@ timer_action_done (struct ehci_hcd *ehci, enum ehci_timer_action action)
2582 + clear_bit (action, &ehci->actions);
2583 + }
2584 +
2585 +-static void free_cached_itd_list(struct ehci_hcd *ehci);
2586 ++static void free_cached_lists(struct ehci_hcd *ehci);
2587 +
2588 + /*-------------------------------------------------------------------------*/
2589 +
2590 +diff --git a/drivers/usb/host/ohci-hub.c b/drivers/usb/host/ohci-hub.c
2591 +index 32bbce9..65cac8c 100644
2592 +--- a/drivers/usb/host/ohci-hub.c
2593 ++++ b/drivers/usb/host/ohci-hub.c
2594 +@@ -697,7 +697,7 @@ static int ohci_hub_control (
2595 + u16 wLength
2596 + ) {
2597 + struct ohci_hcd *ohci = hcd_to_ohci (hcd);
2598 +- int ports = hcd_to_bus (hcd)->root_hub->maxchild;
2599 ++ int ports = ohci->num_ports;
2600 + u32 temp;
2601 + int retval = 0;
2602 +
2603 +diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
2604 +index bffcef7..6c1f673 100644
2605 +--- a/drivers/usb/host/xhci-mem.c
2606 ++++ b/drivers/usb/host/xhci-mem.c
2607 +@@ -549,6 +549,19 @@ static inline unsigned int xhci_get_endpoint_interval(struct usb_device *udev,
2608 + return EP_INTERVAL(interval);
2609 + }
2610 +
2611 ++/* The "Mult" field in the endpoint context is only set for SuperSpeed devices.
2612 ++ * High speed endpoint descriptors can define "the number of additional
2613 ++ * transaction opportunities per microframe", but that goes in the Max Burst
2614 ++ * endpoint context field.
2615 ++ */
2616 ++static inline u32 xhci_get_endpoint_mult(struct usb_device *udev,
2617 ++ struct usb_host_endpoint *ep)
2618 ++{
2619 ++ if (udev->speed != USB_SPEED_SUPER || !ep->ss_ep_comp)
2620 ++ return 0;
2621 ++ return ep->ss_ep_comp->desc.bmAttributes;
2622 ++}
2623 ++
2624 + static inline u32 xhci_get_endpoint_type(struct usb_device *udev,
2625 + struct usb_host_endpoint *ep)
2626 + {
2627 +@@ -579,6 +592,36 @@ static inline u32 xhci_get_endpoint_type(struct usb_device *udev,
2628 + return type;
2629 + }
2630 +
2631 ++/* Return the maximum endpoint service interval time (ESIT) payload.
2632 ++ * Basically, this is the maxpacket size, multiplied by the burst size
2633 ++ * and mult size.
2634 ++ */
2635 ++static inline u32 xhci_get_max_esit_payload(struct xhci_hcd *xhci,
2636 ++ struct usb_device *udev,
2637 ++ struct usb_host_endpoint *ep)
2638 ++{
2639 ++ int max_burst;
2640 ++ int max_packet;
2641 ++
2642 ++ /* Only applies for interrupt or isochronous endpoints */
2643 ++ if (usb_endpoint_xfer_control(&ep->desc) ||
2644 ++ usb_endpoint_xfer_bulk(&ep->desc))
2645 ++ return 0;
2646 ++
2647 ++ if (udev->speed == USB_SPEED_SUPER) {
2648 ++ if (ep->ss_ep_comp)
2649 ++ return ep->ss_ep_comp->desc.wBytesPerInterval;
2650 ++ xhci_warn(xhci, "WARN no SS endpoint companion descriptor.\n");
2651 ++ /* Assume no bursts, no multiple opportunities to send. */
2652 ++ return ep->desc.wMaxPacketSize;
2653 ++ }
2654 ++
2655 ++ max_packet = ep->desc.wMaxPacketSize & 0x3ff;
2656 ++ max_burst = (ep->desc.wMaxPacketSize & 0x1800) >> 11;
2657 ++ /* A 0 in max burst means 1 transfer per ESIT */
2658 ++ return max_packet * (max_burst + 1);
2659 ++}
2660 ++
2661 + int xhci_endpoint_init(struct xhci_hcd *xhci,
2662 + struct xhci_virt_device *virt_dev,
2663 + struct usb_device *udev,
2664 +@@ -590,6 +633,7 @@ int xhci_endpoint_init(struct xhci_hcd *xhci,
2665 + struct xhci_ring *ep_ring;
2666 + unsigned int max_packet;
2667 + unsigned int max_burst;
2668 ++ u32 max_esit_payload;
2669 +
2670 + ep_index = xhci_get_endpoint_index(&ep->desc);
2671 + ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index);
2672 +@@ -611,6 +655,7 @@ int xhci_endpoint_init(struct xhci_hcd *xhci,
2673 + ep_ctx->deq = ep_ring->first_seg->dma | ep_ring->cycle_state;
2674 +
2675 + ep_ctx->ep_info = xhci_get_endpoint_interval(udev, ep);
2676 ++ ep_ctx->ep_info |= EP_MULT(xhci_get_endpoint_mult(udev, ep));
2677 +
2678 + /* FIXME dig Mult and streams info out of ep companion desc */
2679 +
2680 +@@ -656,6 +701,26 @@ int xhci_endpoint_init(struct xhci_hcd *xhci,
2681 + default:
2682 + BUG();
2683 + }
2684 ++ max_esit_payload = xhci_get_max_esit_payload(xhci, udev, ep);
2685 ++ ep_ctx->tx_info = MAX_ESIT_PAYLOAD_FOR_EP(max_esit_payload);
2686 ++
2687 ++ /*
2688 ++ * XXX no idea how to calculate the average TRB buffer length for bulk
2689 ++ * endpoints, as the driver gives us no clue how big each scatter gather
2690 ++ * list entry (or buffer) is going to be.
2691 ++ *
2692 ++ * For isochronous and interrupt endpoints, we set it to the max
2693 ++ * available, until we have new API in the USB core to allow drivers to
2694 ++ * declare how much bandwidth they actually need.
2695 ++ *
2696 ++ * Normally, it would be calculated by taking the total of the buffer
2697 ++ * lengths in the TD and then dividing by the number of TRBs in a TD,
2698 ++ * including link TRBs, No-op TRBs, and Event data TRBs. Since we don't
2699 ++ * use Event Data TRBs, and we don't chain in a link TRB on short
2700 ++ * transfers, we're basically dividing by 1.
2701 ++ */
2702 ++ ep_ctx->tx_info |= AVG_TRB_LENGTH_FOR_EP(max_esit_payload);
2703 ++
2704 + /* FIXME Debug endpoint context */
2705 + return 0;
2706 + }
2707 +diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
2708 +index 8778135..9e904a6 100644
2709 +--- a/drivers/usb/host/xhci.h
2710 ++++ b/drivers/usb/host/xhci.h
2711 +@@ -609,6 +609,10 @@ struct xhci_ep_ctx {
2712 + #define MAX_PACKET_MASK (0xffff << 16)
2713 + #define MAX_PACKET_DECODED(p) (((p) >> 16) & 0xffff)
2714 +
2715 ++/* tx_info bitmasks */
2716 ++#define AVG_TRB_LENGTH_FOR_EP(p) ((p) & 0xffff)
2717 ++#define MAX_ESIT_PAYLOAD_FOR_EP(p) (((p) & 0xffff) << 16)
2718 ++
2719 +
2720 + /**
2721 + * struct xhci_input_control_context
2722 +diff --git a/drivers/usb/serial/sierra.c b/drivers/usb/serial/sierra.c
2723 +index 0cfd621..a442989 100644
2724 +--- a/drivers/usb/serial/sierra.c
2725 ++++ b/drivers/usb/serial/sierra.c
2726 +@@ -229,6 +229,7 @@ static const struct sierra_iface_info direct_ip_interface_blacklist = {
2727 + static struct usb_device_id id_table [] = {
2728 + { USB_DEVICE(0x0F3D, 0x0112) }, /* Airprime/Sierra PC 5220 */
2729 + { USB_DEVICE(0x03F0, 0x1B1D) }, /* HP ev2200 a.k.a MC5720 */
2730 ++ { USB_DEVICE(0x03F0, 0x211D) }, /* HP ev2210 a.k.a MC5725 */
2731 + { USB_DEVICE(0x03F0, 0x1E1D) }, /* HP hs2300 a.k.a MC8775 */
2732 +
2733 + { USB_DEVICE(0x1199, 0x0017) }, /* Sierra Wireless EM5625 */
2734 +diff --git a/drivers/w1/slaves/w1_therm.c b/drivers/w1/slaves/w1_therm.c
2735 +index 1ed3d55..17726a0 100644
2736 +--- a/drivers/w1/slaves/w1_therm.c
2737 ++++ b/drivers/w1/slaves/w1_therm.c
2738 +@@ -115,9 +115,8 @@ static struct w1_therm_family_converter w1_therm_families[] = {
2739 +
2740 + static inline int w1_DS18B20_convert_temp(u8 rom[9])
2741 + {
2742 +- int t = ((s16)rom[1] << 8) | rom[0];
2743 +- t = t*1000/16;
2744 +- return t;
2745 ++ s16 t = le16_to_cpup((__le16 *)rom);
2746 ++ return t*1000/16;
2747 + }
2748 +
2749 + static inline int w1_DS18S20_convert_temp(u8 rom[9])
2750 +diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
2751 +index c568779..cae75c1 100644
2752 +--- a/fs/ext4/extents.c
2753 ++++ b/fs/ext4/extents.c
2754 +@@ -3767,7 +3767,6 @@ int ext4_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
2755 + __u64 start, __u64 len)
2756 + {
2757 + ext4_lblk_t start_blk;
2758 +- ext4_lblk_t len_blks;
2759 + int error = 0;
2760 +
2761 + /* fallback to generic here if not in extents fmt */
2762 +@@ -3781,8 +3780,14 @@ int ext4_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
2763 + if (fieinfo->fi_flags & FIEMAP_FLAG_XATTR) {
2764 + error = ext4_xattr_fiemap(inode, fieinfo);
2765 + } else {
2766 ++ ext4_lblk_t len_blks;
2767 ++ __u64 last_blk;
2768 ++
2769 + start_blk = start >> inode->i_sb->s_blocksize_bits;
2770 +- len_blks = len >> inode->i_sb->s_blocksize_bits;
2771 ++ last_blk = (start + len - 1) >> inode->i_sb->s_blocksize_bits;
2772 ++ if (last_blk >= EXT_MAX_BLOCK)
2773 ++ last_blk = EXT_MAX_BLOCK-1;
2774 ++ len_blks = ((ext4_lblk_t) last_blk) - start_blk + 1;
2775 +
2776 + /*
2777 + * Walk the extent tree gathering extent information.
2778 +diff --git a/fs/jfs/resize.c b/fs/jfs/resize.c
2779 +index 7f24a0b..1aba003 100644
2780 +--- a/fs/jfs/resize.c
2781 ++++ b/fs/jfs/resize.c
2782 +@@ -81,6 +81,7 @@ int jfs_extendfs(struct super_block *sb, s64 newLVSize, int newLogSize)
2783 + struct inode *iplist[1];
2784 + struct jfs_superblock *j_sb, *j_sb2;
2785 + uint old_agsize;
2786 ++ int agsizechanged = 0;
2787 + struct buffer_head *bh, *bh2;
2788 +
2789 + /* If the volume hasn't grown, get out now */
2790 +@@ -333,6 +334,9 @@ int jfs_extendfs(struct super_block *sb, s64 newLVSize, int newLogSize)
2791 + */
2792 + if ((rc = dbExtendFS(ipbmap, XAddress, nblocks)))
2793 + goto error_out;
2794 ++
2795 ++ agsizechanged |= (bmp->db_agsize != old_agsize);
2796 ++
2797 + /*
2798 + * the map now has extended to cover additional nblocks:
2799 + * dn_mapsize = oldMapsize + nblocks;
2800 +@@ -432,7 +436,7 @@ int jfs_extendfs(struct super_block *sb, s64 newLVSize, int newLogSize)
2801 + * will correctly identify the new ag);
2802 + */
2803 + /* if new AG size the same as old AG size, done! */
2804 +- if (bmp->db_agsize != old_agsize) {
2805 ++ if (agsizechanged) {
2806 + if ((rc = diExtendFS(ipimap, ipbmap)))
2807 + goto error_out;
2808 +
2809 +diff --git a/fs/nfs/client.c b/fs/nfs/client.c
2810 +index bd39abc..37d555c 100644
2811 +--- a/fs/nfs/client.c
2812 ++++ b/fs/nfs/client.c
2813 +@@ -965,6 +965,8 @@ out_error:
2814 + static void nfs_server_copy_userdata(struct nfs_server *target, struct nfs_server *source)
2815 + {
2816 + target->flags = source->flags;
2817 ++ target->rsize = source->rsize;
2818 ++ target->wsize = source->wsize;
2819 + target->acregmin = source->acregmin;
2820 + target->acregmax = source->acregmax;
2821 + target->acdirmin = source->acdirmin;
2822 +diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
2823 +index af6948d..b5d55d3 100644
2824 +--- a/fs/nfs/dir.c
2825 ++++ b/fs/nfs/dir.c
2826 +@@ -837,6 +837,8 @@ out_zap_parent:
2827 + /* If we have submounts, don't unhash ! */
2828 + if (have_submounts(dentry))
2829 + goto out_valid;
2830 ++ if (dentry->d_flags & DCACHE_DISCONNECTED)
2831 ++ goto out_valid;
2832 + shrink_dcache_parent(dentry);
2833 + }
2834 + d_drop(dentry);
2835 +diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
2836 +index bbf72d8..718f3fb 100644
2837 +--- a/fs/nfsd/nfs4xdr.c
2838 ++++ b/fs/nfsd/nfs4xdr.c
2839 +@@ -160,10 +160,10 @@ static __be32 *read_buf(struct nfsd4_compoundargs *argp, u32 nbytes)
2840 + argp->p = page_address(argp->pagelist[0]);
2841 + argp->pagelist++;
2842 + if (argp->pagelen < PAGE_SIZE) {
2843 +- argp->end = p + (argp->pagelen>>2);
2844 ++ argp->end = argp->p + (argp->pagelen>>2);
2845 + argp->pagelen = 0;
2846 + } else {
2847 +- argp->end = p + (PAGE_SIZE>>2);
2848 ++ argp->end = argp->p + (PAGE_SIZE>>2);
2849 + argp->pagelen -= PAGE_SIZE;
2850 + }
2851 + memcpy(((char*)p)+avail, argp->p, (nbytes - avail));
2852 +@@ -1425,10 +1425,10 @@ nfsd4_decode_compound(struct nfsd4_compoundargs *argp)
2853 + argp->p = page_address(argp->pagelist[0]);
2854 + argp->pagelist++;
2855 + if (argp->pagelen < PAGE_SIZE) {
2856 +- argp->end = p + (argp->pagelen>>2);
2857 ++ argp->end = argp->p + (argp->pagelen>>2);
2858 + argp->pagelen = 0;
2859 + } else {
2860 +- argp->end = p + (PAGE_SIZE>>2);
2861 ++ argp->end = argp->p + (PAGE_SIZE>>2);
2862 + argp->pagelen -= PAGE_SIZE;
2863 + }
2864 + }
2865 +diff --git a/fs/ocfs2/buffer_head_io.c b/fs/ocfs2/buffer_head_io.c
2866 +index 21c808f..b18c6d6 100644
2867 +--- a/fs/ocfs2/buffer_head_io.c
2868 ++++ b/fs/ocfs2/buffer_head_io.c
2869 +@@ -407,6 +407,7 @@ int ocfs2_write_super_or_backup(struct ocfs2_super *osb,
2870 + struct buffer_head *bh)
2871 + {
2872 + int ret = 0;
2873 ++ struct ocfs2_dinode *di = (struct ocfs2_dinode *)bh->b_data;
2874 +
2875 + mlog_entry_void();
2876 +
2877 +@@ -426,6 +427,7 @@ int ocfs2_write_super_or_backup(struct ocfs2_super *osb,
2878 +
2879 + get_bh(bh); /* for end_buffer_write_sync() */
2880 + bh->b_end_io = end_buffer_write_sync;
2881 ++ ocfs2_compute_meta_ecc(osb->sb, bh->b_data, &di->i_check);
2882 + submit_bh(WRITE, bh);
2883 +
2884 + wait_on_buffer(bh);
2885 +diff --git a/fs/ocfs2/dlm/dlmfs.c b/fs/ocfs2/dlm/dlmfs.c
2886 +index 02bf178..18bc101 100644
2887 +--- a/fs/ocfs2/dlm/dlmfs.c
2888 ++++ b/fs/ocfs2/dlm/dlmfs.c
2889 +@@ -205,7 +205,7 @@ static ssize_t dlmfs_file_read(struct file *filp,
2890 + if ((count + *ppos) > i_size_read(inode))
2891 + readlen = i_size_read(inode) - *ppos;
2892 + else
2893 +- readlen = count - *ppos;
2894 ++ readlen = count;
2895 +
2896 + lvb_buf = kmalloc(readlen, GFP_NOFS);
2897 + if (!lvb_buf)
2898 +diff --git a/fs/ocfs2/inode.c b/fs/ocfs2/inode.c
2899 +index 88459bd..ec4d97f 100644
2900 +--- a/fs/ocfs2/inode.c
2901 ++++ b/fs/ocfs2/inode.c
2902 +@@ -559,6 +559,7 @@ static int ocfs2_truncate_for_delete(struct ocfs2_super *osb,
2903 + handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
2904 + if (IS_ERR(handle)) {
2905 + status = PTR_ERR(handle);
2906 ++ handle = NULL;
2907 + mlog_errno(status);
2908 + goto out;
2909 + }
2910 +diff --git a/fs/ocfs2/refcounttree.c b/fs/ocfs2/refcounttree.c
2911 +index 8ae65c9..a8e8572 100644
2912 +--- a/fs/ocfs2/refcounttree.c
2913 ++++ b/fs/ocfs2/refcounttree.c
2914 +@@ -4083,6 +4083,9 @@ static int ocfs2_complete_reflink(struct inode *s_inode,
2915 + di->i_attr = s_di->i_attr;
2916 +
2917 + if (preserve) {
2918 ++ t_inode->i_uid = s_inode->i_uid;
2919 ++ t_inode->i_gid = s_inode->i_gid;
2920 ++ t_inode->i_mode = s_inode->i_mode;
2921 + di->i_uid = s_di->i_uid;
2922 + di->i_gid = s_di->i_gid;
2923 + di->i_mode = s_di->i_mode;
2924 +diff --git a/fs/proc/base.c b/fs/proc/base.c
2925 +index 3cd449d..8dce96c 100644
2926 +--- a/fs/proc/base.c
2927 ++++ b/fs/proc/base.c
2928 +@@ -2910,7 +2910,7 @@ out_no_task:
2929 + */
2930 + static const struct pid_entry tid_base_stuff[] = {
2931 + DIR("fd", S_IRUSR|S_IXUSR, proc_fd_inode_operations, proc_fd_operations),
2932 +- DIR("fdinfo", S_IRUSR|S_IXUSR, proc_fdinfo_inode_operations, proc_fd_operations),
2933 ++ DIR("fdinfo", S_IRUSR|S_IXUSR, proc_fdinfo_inode_operations, proc_fdinfo_operations),
2934 + REG("environ", S_IRUSR, proc_environ_operations),
2935 + INF("auxv", S_IRUSR, proc_pid_auxv),
2936 + ONE("status", S_IRUGO, proc_pid_status),
2937 +diff --git a/fs/reiserfs/dir.c b/fs/reiserfs/dir.c
2938 +index c094f58..1e686ee 100644
2939 +--- a/fs/reiserfs/dir.c
2940 ++++ b/fs/reiserfs/dir.c
2941 +@@ -45,8 +45,6 @@ static inline bool is_privroot_deh(struct dentry *dir,
2942 + struct reiserfs_de_head *deh)
2943 + {
2944 + struct dentry *privroot = REISERFS_SB(dir->d_sb)->priv_root;
2945 +- if (reiserfs_expose_privroot(dir->d_sb))
2946 +- return 0;
2947 + return (dir == dir->d_parent && privroot->d_inode &&
2948 + deh->deh_objectid == INODE_PKEY(privroot->d_inode)->k_objectid);
2949 + }
2950 +diff --git a/fs/reiserfs/xattr.c b/fs/reiserfs/xattr.c
2951 +index 81f09fa..0392e82 100644
2952 +--- a/fs/reiserfs/xattr.c
2953 ++++ b/fs/reiserfs/xattr.c
2954 +@@ -557,7 +557,7 @@ reiserfs_xattr_set_handle(struct reiserfs_transaction_handle *th,
2955 + if (!err && new_size < i_size_read(dentry->d_inode)) {
2956 + struct iattr newattrs = {
2957 + .ia_ctime = current_fs_time(inode->i_sb),
2958 +- .ia_size = buffer_size,
2959 ++ .ia_size = new_size,
2960 + .ia_valid = ATTR_SIZE | ATTR_CTIME,
2961 + };
2962 +
2963 +@@ -976,21 +976,13 @@ int reiserfs_permission(struct inode *inode, int mask)
2964 + return generic_permission(inode, mask, NULL);
2965 + }
2966 +
2967 +-/* This will catch lookups from the fs root to .reiserfs_priv */
2968 +-static int
2969 +-xattr_lookup_poison(struct dentry *dentry, struct qstr *q1, struct qstr *name)
2970 ++static int xattr_hide_revalidate(struct dentry *dentry, struct nameidata *nd)
2971 + {
2972 +- struct dentry *priv_root = REISERFS_SB(dentry->d_sb)->priv_root;
2973 +- if (container_of(q1, struct dentry, d_name) == priv_root)
2974 +- return -ENOENT;
2975 +- if (q1->len == name->len &&
2976 +- !memcmp(q1->name, name->name, name->len))
2977 +- return 0;
2978 +- return 1;
2979 ++ return -EPERM;
2980 + }
2981 +
2982 + static const struct dentry_operations xattr_lookup_poison_ops = {
2983 +- .d_compare = xattr_lookup_poison,
2984 ++ .d_revalidate = xattr_hide_revalidate,
2985 + };
2986 +
2987 + int reiserfs_lookup_privroot(struct super_block *s)
2988 +@@ -1004,8 +996,7 @@ int reiserfs_lookup_privroot(struct super_block *s)
2989 + strlen(PRIVROOT_NAME));
2990 + if (!IS_ERR(dentry)) {
2991 + REISERFS_SB(s)->priv_root = dentry;
2992 +- if (!reiserfs_expose_privroot(s))
2993 +- s->s_root->d_op = &xattr_lookup_poison_ops;
2994 ++ dentry->d_op = &xattr_lookup_poison_ops;
2995 + if (dentry->d_inode)
2996 + dentry->d_inode->i_flags |= S_PRIVATE;
2997 + } else
2998 +diff --git a/fs/xfs/linux-2.6/xfs_super.c b/fs/xfs/linux-2.6/xfs_super.c
2999 +index 77414db..146d491 100644
3000 +--- a/fs/xfs/linux-2.6/xfs_super.c
3001 ++++ b/fs/xfs/linux-2.6/xfs_super.c
3002 +@@ -1160,6 +1160,7 @@ xfs_fs_put_super(
3003 +
3004 + xfs_unmountfs(mp);
3005 + xfs_freesb(mp);
3006 ++ xfs_inode_shrinker_unregister(mp);
3007 + xfs_icsb_destroy_counters(mp);
3008 + xfs_close_devices(mp);
3009 + xfs_dmops_put(mp);
3010 +@@ -1523,6 +1524,8 @@ xfs_fs_fill_super(
3011 + if (error)
3012 + goto fail_vnrele;
3013 +
3014 ++ xfs_inode_shrinker_register(mp);
3015 ++
3016 + kfree(mtpt);
3017 + return 0;
3018 +
3019 +@@ -1767,6 +1770,7 @@ init_xfs_fs(void)
3020 + goto out_cleanup_procfs;
3021 +
3022 + vfs_initquota();
3023 ++ xfs_inode_shrinker_init();
3024 +
3025 + error = register_filesystem(&xfs_fs_type);
3026 + if (error)
3027 +@@ -1794,6 +1798,7 @@ exit_xfs_fs(void)
3028 + {
3029 + vfs_exitquota();
3030 + unregister_filesystem(&xfs_fs_type);
3031 ++ xfs_inode_shrinker_destroy();
3032 + xfs_sysctl_unregister();
3033 + xfs_cleanup_procfs();
3034 + xfs_buf_terminate();
3035 +diff --git a/fs/xfs/linux-2.6/xfs_sync.c b/fs/xfs/linux-2.6/xfs_sync.c
3036 +index 6b6b394..57adf2d 100644
3037 +--- a/fs/xfs/linux-2.6/xfs_sync.c
3038 ++++ b/fs/xfs/linux-2.6/xfs_sync.c
3039 +@@ -95,7 +95,8 @@ xfs_inode_ag_walk(
3040 + struct xfs_perag *pag, int flags),
3041 + int flags,
3042 + int tag,
3043 +- int exclusive)
3044 ++ int exclusive,
3045 ++ int *nr_to_scan)
3046 + {
3047 + struct xfs_perag *pag = &mp->m_perag[ag];
3048 + uint32_t first_index;
3049 +@@ -135,7 +136,7 @@ restart:
3050 + if (error == EFSCORRUPTED)
3051 + break;
3052 +
3053 +- } while (1);
3054 ++ } while ((*nr_to_scan)--);
3055 +
3056 + if (skipped) {
3057 + delay(1);
3058 +@@ -153,23 +154,30 @@ xfs_inode_ag_iterator(
3059 + struct xfs_perag *pag, int flags),
3060 + int flags,
3061 + int tag,
3062 +- int exclusive)
3063 ++ int exclusive,
3064 ++ int *nr_to_scan)
3065 + {
3066 + int error = 0;
3067 + int last_error = 0;
3068 + xfs_agnumber_t ag;
3069 ++ int nr;
3070 +
3071 ++ nr = nr_to_scan ? *nr_to_scan : INT_MAX;
3072 + for (ag = 0; ag < mp->m_sb.sb_agcount; ag++) {
3073 + if (!mp->m_perag[ag].pag_ici_init)
3074 + continue;
3075 + error = xfs_inode_ag_walk(mp, ag, execute, flags, tag,
3076 +- exclusive);
3077 ++ exclusive, &nr);
3078 + if (error) {
3079 + last_error = error;
3080 + if (error == EFSCORRUPTED)
3081 + break;
3082 + }
3083 ++ if (nr <= 0)
3084 ++ break;
3085 + }
3086 ++ if (nr_to_scan)
3087 ++ *nr_to_scan = nr;
3088 + return XFS_ERROR(last_error);
3089 + }
3090 +
3091 +@@ -289,7 +297,7 @@ xfs_sync_data(
3092 + ASSERT((flags & ~(SYNC_TRYLOCK|SYNC_WAIT)) == 0);
3093 +
3094 + error = xfs_inode_ag_iterator(mp, xfs_sync_inode_data, flags,
3095 +- XFS_ICI_NO_TAG, 0);
3096 ++ XFS_ICI_NO_TAG, 0, NULL);
3097 + if (error)
3098 + return XFS_ERROR(error);
3099 +
3100 +@@ -311,7 +319,7 @@ xfs_sync_attr(
3101 + ASSERT((flags & ~SYNC_WAIT) == 0);
3102 +
3103 + return xfs_inode_ag_iterator(mp, xfs_sync_inode_attr, flags,
3104 +- XFS_ICI_NO_TAG, 0);
3105 ++ XFS_ICI_NO_TAG, 0, NULL);
3106 + }
3107 +
3108 + STATIC int
3109 +@@ -679,6 +687,7 @@ __xfs_inode_set_reclaim_tag(
3110 + radix_tree_tag_set(&pag->pag_ici_root,
3111 + XFS_INO_TO_AGINO(ip->i_mount, ip->i_ino),
3112 + XFS_ICI_RECLAIM_TAG);
3113 ++ pag->pag_ici_reclaimable++;
3114 + }
3115 +
3116 + /*
3117 +@@ -710,6 +719,7 @@ __xfs_inode_clear_reclaim_tag(
3118 + {
3119 + radix_tree_tag_clear(&pag->pag_ici_root,
3120 + XFS_INO_TO_AGINO(mp, ip->i_ino), XFS_ICI_RECLAIM_TAG);
3121 ++ pag->pag_ici_reclaimable--;
3122 + }
3123 +
3124 + STATIC int
3125 +@@ -770,5 +780,88 @@ xfs_reclaim_inodes(
3126 + int mode)
3127 + {
3128 + return xfs_inode_ag_iterator(mp, xfs_reclaim_inode, mode,
3129 +- XFS_ICI_RECLAIM_TAG, 1);
3130 ++ XFS_ICI_RECLAIM_TAG, 1, NULL);
3131 ++}
3132 ++
3133 ++/*
3134 ++ * Shrinker infrastructure.
3135 ++ *
3136 ++ * This is all far more complex than it needs to be. It adds a global list of
3137 ++ * mounts because the shrinkers can only call a global context. We need to make
3138 ++ * the shrinkers pass a context to avoid the need for global state.
3139 ++ */
3140 ++static LIST_HEAD(xfs_mount_list);
3141 ++static struct rw_semaphore xfs_mount_list_lock;
3142 ++
3143 ++static int
3144 ++xfs_reclaim_inode_shrink(
3145 ++ int nr_to_scan,
3146 ++ gfp_t gfp_mask)
3147 ++{
3148 ++ struct xfs_mount *mp;
3149 ++ xfs_agnumber_t ag;
3150 ++ int reclaimable = 0;
3151 ++
3152 ++ if (nr_to_scan) {
3153 ++ if (!(gfp_mask & __GFP_FS))
3154 ++ return -1;
3155 ++
3156 ++ down_read(&xfs_mount_list_lock);
3157 ++ list_for_each_entry(mp, &xfs_mount_list, m_mplist) {
3158 ++ xfs_inode_ag_iterator(mp, xfs_reclaim_inode, 0,
3159 ++ XFS_ICI_RECLAIM_TAG, 1, &nr_to_scan);
3160 ++ if (nr_to_scan <= 0)
3161 ++ break;
3162 ++ }
3163 ++ up_read(&xfs_mount_list_lock);
3164 ++ }
3165 ++
3166 ++ down_read(&xfs_mount_list_lock);
3167 ++ list_for_each_entry(mp, &xfs_mount_list, m_mplist) {
3168 ++ for (ag = 0; ag < mp->m_sb.sb_agcount; ag++) {
3169 ++
3170 ++ if (!mp->m_perag[ag].pag_ici_init)
3171 ++ continue;
3172 ++ reclaimable += mp->m_perag[ag].pag_ici_reclaimable;
3173 ++ }
3174 ++ }
3175 ++ up_read(&xfs_mount_list_lock);
3176 ++ return reclaimable;
3177 ++}
3178 ++
3179 ++static struct shrinker xfs_inode_shrinker = {
3180 ++ .shrink = xfs_reclaim_inode_shrink,
3181 ++ .seeks = DEFAULT_SEEKS,
3182 ++};
3183 ++
3184 ++void __init
3185 ++xfs_inode_shrinker_init(void)
3186 ++{
3187 ++ init_rwsem(&xfs_mount_list_lock);
3188 ++ register_shrinker(&xfs_inode_shrinker);
3189 ++}
3190 ++
3191 ++void
3192 ++xfs_inode_shrinker_destroy(void)
3193 ++{
3194 ++ ASSERT(list_empty(&xfs_mount_list));
3195 ++ unregister_shrinker(&xfs_inode_shrinker);
3196 ++}
3197 ++
3198 ++void
3199 ++xfs_inode_shrinker_register(
3200 ++ struct xfs_mount *mp)
3201 ++{
3202 ++ down_write(&xfs_mount_list_lock);
3203 ++ list_add_tail(&mp->m_mplist, &xfs_mount_list);
3204 ++ up_write(&xfs_mount_list_lock);
3205 ++}
3206 ++
3207 ++void
3208 ++xfs_inode_shrinker_unregister(
3209 ++ struct xfs_mount *mp)
3210 ++{
3211 ++ down_write(&xfs_mount_list_lock);
3212 ++ list_del(&mp->m_mplist);
3213 ++ up_write(&xfs_mount_list_lock);
3214 + }
3215 +diff --git a/fs/xfs/linux-2.6/xfs_sync.h b/fs/xfs/linux-2.6/xfs_sync.h
3216 +index ea932b4..0b28c13 100644
3217 +--- a/fs/xfs/linux-2.6/xfs_sync.h
3218 ++++ b/fs/xfs/linux-2.6/xfs_sync.h
3219 +@@ -54,6 +54,11 @@ void __xfs_inode_clear_reclaim_tag(struct xfs_mount *mp, struct xfs_perag *pag,
3220 + int xfs_sync_inode_valid(struct xfs_inode *ip, struct xfs_perag *pag);
3221 + int xfs_inode_ag_iterator(struct xfs_mount *mp,
3222 + int (*execute)(struct xfs_inode *ip, struct xfs_perag *pag, int flags),
3223 +- int flags, int tag, int write_lock);
3224 ++ int flags, int tag, int write_lock, int *nr_to_scan);
3225 ++
3226 ++void xfs_inode_shrinker_init(void);
3227 ++void xfs_inode_shrinker_destroy(void);
3228 ++void xfs_inode_shrinker_register(struct xfs_mount *mp);
3229 ++void xfs_inode_shrinker_unregister(struct xfs_mount *mp);
3230 +
3231 + #endif
3232 +diff --git a/fs/xfs/quota/xfs_qm_syscalls.c b/fs/xfs/quota/xfs_qm_syscalls.c
3233 +index 873e07e..145f596 100644
3234 +--- a/fs/xfs/quota/xfs_qm_syscalls.c
3235 ++++ b/fs/xfs/quota/xfs_qm_syscalls.c
3236 +@@ -891,7 +891,8 @@ xfs_qm_dqrele_all_inodes(
3237 + uint flags)
3238 + {
3239 + ASSERT(mp->m_quotainfo);
3240 +- xfs_inode_ag_iterator(mp, xfs_dqrele_inode, flags, XFS_ICI_NO_TAG, 0);
3241 ++ xfs_inode_ag_iterator(mp, xfs_dqrele_inode, flags,
3242 ++ XFS_ICI_NO_TAG, 0, NULL);
3243 + }
3244 +
3245 + /*------------------------------------------------------------------------*/
3246 +diff --git a/fs/xfs/xfs_ag.h b/fs/xfs/xfs_ag.h
3247 +index 6702bd8..1182604 100644
3248 +--- a/fs/xfs/xfs_ag.h
3249 ++++ b/fs/xfs/xfs_ag.h
3250 +@@ -229,6 +229,7 @@ typedef struct xfs_perag
3251 + int pag_ici_init; /* incore inode cache initialised */
3252 + rwlock_t pag_ici_lock; /* incore inode lock */
3253 + struct radix_tree_root pag_ici_root; /* incore inode cache root */
3254 ++ int pag_ici_reclaimable; /* reclaimable inodes */
3255 + #endif
3256 + } xfs_perag_t;
3257 +
3258 +diff --git a/fs/xfs/xfs_mount.h b/fs/xfs/xfs_mount.h
3259 +index 1df7e45..c95f81a 100644
3260 +--- a/fs/xfs/xfs_mount.h
3261 ++++ b/fs/xfs/xfs_mount.h
3262 +@@ -257,6 +257,7 @@ typedef struct xfs_mount {
3263 + wait_queue_head_t m_wait_single_sync_task;
3264 + __int64_t m_update_flags; /* sb flags we need to update
3265 + on the next remount,rw */
3266 ++ struct list_head m_mplist; /* inode shrinker mount list */
3267 + } xfs_mount_t;
3268 +
3269 + /*
3270 +diff --git a/include/linux/ata.h b/include/linux/ata.h
3271 +index 20f3156..f8bd0f9 100644
3272 +--- a/include/linux/ata.h
3273 ++++ b/include/linux/ata.h
3274 +@@ -1024,8 +1024,8 @@ static inline int ata_ok(u8 status)
3275 +
3276 + static inline int lba_28_ok(u64 block, u32 n_block)
3277 + {
3278 +- /* check the ending block number */
3279 +- return ((block + n_block) < ((u64)1 << 28)) && (n_block <= 256);
3280 ++ /* check the ending block number: must be LESS THAN 0x0fffffff */
3281 ++ return ((block + n_block) < ((1 << 28) - 1)) && (n_block <= 256);
3282 + }
3283 +
3284 + static inline int lba_48_ok(u64 block, u32 n_block)
3285 +diff --git a/include/linux/poison.h b/include/linux/poison.h
3286 +index 2110a81..34066ff 100644
3287 +--- a/include/linux/poison.h
3288 ++++ b/include/linux/poison.h
3289 +@@ -48,6 +48,15 @@
3290 + #define POISON_FREE 0x6b /* for use-after-free poisoning */
3291 + #define POISON_END 0xa5 /* end-byte of poisoning */
3292 +
3293 ++/********** mm/hugetlb.c **********/
3294 ++/*
3295 ++ * Private mappings of hugetlb pages use this poisoned value for
3296 ++ * page->mapping. The core VM should not be doing anything with this mapping
3297 ++ * but futex requires the existence of some page->mapping value even though it
3298 ++ * is unused if PAGE_MAPPING_ANON is set.
3299 ++ */
3300 ++#define HUGETLB_POISON ((void *)(0x00300300 + POISON_POINTER_DELTA + PAGE_MAPPING_ANON))
3301 ++
3302 + /********** arch/$ARCH/mm/init.c **********/
3303 + #define POISON_FREE_INITMEM 0xcc
3304 +
3305 +diff --git a/include/net/sctp/command.h b/include/net/sctp/command.h
3306 +index 8be5135..2c55a7e 100644
3307 +--- a/include/net/sctp/command.h
3308 ++++ b/include/net/sctp/command.h
3309 +@@ -107,6 +107,7 @@ typedef enum {
3310 + SCTP_CMD_T1_RETRAN, /* Mark for retransmission after T1 timeout */
3311 + SCTP_CMD_UPDATE_INITTAG, /* Update peer inittag */
3312 + SCTP_CMD_SEND_MSG, /* Send the whole use message */
3313 ++ SCTP_CMD_SEND_NEXT_ASCONF, /* Send the next ASCONF after ACK */
3314 + SCTP_CMD_LAST
3315 + } sctp_verb_t;
3316 +
3317 +diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
3318 +index 78740ec..fa6cde5 100644
3319 +--- a/include/net/sctp/sctp.h
3320 ++++ b/include/net/sctp/sctp.h
3321 +@@ -128,6 +128,7 @@ extern int sctp_register_pf(struct sctp_pf *, sa_family_t);
3322 + int sctp_backlog_rcv(struct sock *sk, struct sk_buff *skb);
3323 + int sctp_inet_listen(struct socket *sock, int backlog);
3324 + void sctp_write_space(struct sock *sk);
3325 ++void sctp_data_ready(struct sock *sk, int len);
3326 + unsigned int sctp_poll(struct file *file, struct socket *sock,
3327 + poll_table *wait);
3328 + void sctp_sock_rfree(struct sk_buff *skb);
3329 +diff --git a/init/initramfs.c b/init/initramfs.c
3330 +index b37d34b..b27d045 100644
3331 +--- a/init/initramfs.c
3332 ++++ b/init/initramfs.c
3333 +@@ -457,7 +457,8 @@ static char * __init unpack_to_rootfs(char *buf, unsigned len)
3334 + compress_name);
3335 + message = msg_buf;
3336 + }
3337 +- }
3338 ++ } else
3339 ++ error("junk in compressed archive");
3340 + if (state != Reset)
3341 + error("junk in compressed archive");
3342 + this_header = saved_offset + my_inptr;
3343 +diff --git a/kernel/cred.c b/kernel/cred.c
3344 +index 1ed8ca1..099f5e6 100644
3345 +--- a/kernel/cred.c
3346 ++++ b/kernel/cred.c
3347 +@@ -786,8 +786,6 @@ bool creds_are_invalid(const struct cred *cred)
3348 + {
3349 + if (cred->magic != CRED_MAGIC)
3350 + return true;
3351 +- if (atomic_read(&cred->usage) < atomic_read(&cred->subscribers))
3352 +- return true;
3353 + #ifdef CONFIG_SECURITY_SELINUX
3354 + if (selinux_is_enabled()) {
3355 + if ((unsigned long) cred->security < PAGE_SIZE)
3356 +diff --git a/kernel/perf_event.c b/kernel/perf_event.c
3357 +index 32d0ae2..e928e1a 100644
3358 +--- a/kernel/perf_event.c
3359 ++++ b/kernel/perf_event.c
3360 +@@ -4811,7 +4811,7 @@ err_fput_free_put_context:
3361 +
3362 + err_free_put_context:
3363 + if (err < 0)
3364 +- kfree(event);
3365 ++ free_event(event);
3366 +
3367 + err_put_context:
3368 + if (err < 0)
3369 +diff --git a/lib/flex_array.c b/lib/flex_array.c
3370 +index 66eef2e..41b1804 100644
3371 +--- a/lib/flex_array.c
3372 ++++ b/lib/flex_array.c
3373 +@@ -99,7 +99,7 @@ struct flex_array *flex_array_alloc(int element_size, unsigned int total,
3374 + ret->element_size = element_size;
3375 + ret->total_nr_elements = total;
3376 + if (elements_fit_in_base(ret) && !(flags & __GFP_ZERO))
3377 +- memset(ret->parts[0], FLEX_ARRAY_FREE,
3378 ++ memset(&ret->parts[0], FLEX_ARRAY_FREE,
3379 + FLEX_ARRAY_BASE_BYTES_LEFT);
3380 + return ret;
3381 + }
3382 +diff --git a/mm/hugetlb.c b/mm/hugetlb.c
3383 +index 2d16fa6..fd9ba95 100644
3384 +--- a/mm/hugetlb.c
3385 ++++ b/mm/hugetlb.c
3386 +@@ -546,6 +546,7 @@ static void free_huge_page(struct page *page)
3387 +
3388 + mapping = (struct address_space *) page_private(page);
3389 + set_page_private(page, 0);
3390 ++ page->mapping = NULL;
3391 + BUG_ON(page_count(page));
3392 + INIT_LIST_HEAD(&page->lru);
3393 +
3394 +@@ -2447,8 +2448,10 @@ retry:
3395 + spin_lock(&inode->i_lock);
3396 + inode->i_blocks += blocks_per_huge_page(h);
3397 + spin_unlock(&inode->i_lock);
3398 +- } else
3399 ++ } else {
3400 + lock_page(page);
3401 ++ page->mapping = HUGETLB_POISON;
3402 ++ }
3403 + }
3404 +
3405 + /*
3406 +diff --git a/mm/memcontrol.c b/mm/memcontrol.c
3407 +index 954032b..dff3379 100644
3408 +--- a/mm/memcontrol.c
3409 ++++ b/mm/memcontrol.c
3410 +@@ -2215,12 +2215,12 @@ int mem_cgroup_prepare_migration(struct page *page, struct mem_cgroup **ptr)
3411 + }
3412 + unlock_page_cgroup(pc);
3413 +
3414 ++ *ptr = mem;
3415 + if (mem) {
3416 +- ret = __mem_cgroup_try_charge(NULL, GFP_KERNEL, &mem, false,
3417 ++ ret = __mem_cgroup_try_charge(NULL, GFP_KERNEL, ptr, false,
3418 + page);
3419 + css_put(&mem->css);
3420 + }
3421 +- *ptr = mem;
3422 + return ret;
3423 + }
3424 +
3425 +diff --git a/net/ieee802154/af_ieee802154.c b/net/ieee802154/af_ieee802154.c
3426 +index bad1c49..72340dd 100644
3427 +--- a/net/ieee802154/af_ieee802154.c
3428 ++++ b/net/ieee802154/af_ieee802154.c
3429 +@@ -147,6 +147,9 @@ static int ieee802154_dev_ioctl(struct sock *sk, struct ifreq __user *arg,
3430 + dev_load(sock_net(sk), ifr.ifr_name);
3431 + dev = dev_get_by_name(sock_net(sk), ifr.ifr_name);
3432 +
3433 ++ if (!dev)
3434 ++ return -ENODEV;
3435 ++
3436 + if (dev->type == ARPHRD_IEEE802154 && dev->netdev_ops->ndo_do_ioctl)
3437 + ret = dev->netdev_ops->ndo_do_ioctl(dev, &ifr, cmd);
3438 +
3439 +diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
3440 +index 564a0f8..03c55ac 100644
3441 +--- a/net/ipv4/tcp.c
3442 ++++ b/net/ipv4/tcp.c
3443 +@@ -1368,6 +1368,7 @@ int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
3444 + sk_eat_skb(sk, skb, 0);
3445 + if (!desc->count)
3446 + break;
3447 ++ tp->copied_seq = seq;
3448 + }
3449 + tp->copied_seq = seq;
3450 +
3451 +diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
3452 +index 112c611..16190ca 100644
3453 +--- a/net/ipv4/udp.c
3454 ++++ b/net/ipv4/udp.c
3455 +@@ -471,8 +471,8 @@ static struct sock *__udp4_lib_lookup(struct net *net, __be32 saddr,
3456 + if (hslot->count < hslot2->count)
3457 + goto begin;
3458 +
3459 +- result = udp4_lib_lookup2(net, INADDR_ANY, sport,
3460 +- daddr, hnum, dif,
3461 ++ result = udp4_lib_lookup2(net, saddr, sport,
3462 ++ INADDR_ANY, hnum, dif,
3463 + hslot2, slot2);
3464 + }
3465 + rcu_read_unlock();
3466 +diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
3467 +index 548a06e..d2ef3a3 100644
3468 +--- a/net/ipv6/tcp_ipv6.c
3469 ++++ b/net/ipv6/tcp_ipv6.c
3470 +@@ -1006,7 +1006,7 @@ static void tcp_v6_send_response(struct sk_buff *skb, u32 seq, u32 ack, u32 win,
3471 + skb_reserve(buff, MAX_HEADER + sizeof(struct ipv6hdr) + tot_len);
3472 +
3473 + t1 = (struct tcphdr *) skb_push(buff, tot_len);
3474 +- skb_reset_transport_header(skb);
3475 ++ skb_reset_transport_header(buff);
3476 +
3477 + /* Swap the send and the receive. */
3478 + memset(t1, 0, sizeof(*t1));
3479 +diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
3480 +index d9714d2..4f57cd2 100644
3481 +--- a/net/ipv6/udp.c
3482 ++++ b/net/ipv6/udp.c
3483 +@@ -258,8 +258,8 @@ static struct sock *__udp6_lib_lookup(struct net *net,
3484 + if (hslot->count < hslot2->count)
3485 + goto begin;
3486 +
3487 +- result = udp6_lib_lookup2(net, &in6addr_any, sport,
3488 +- daddr, hnum, dif,
3489 ++ result = udp6_lib_lookup2(net, saddr, sport,
3490 ++ &in6addr_any, hnum, dif,
3491 + hslot2, slot2);
3492 + }
3493 + rcu_read_unlock();
3494 +diff --git a/net/mac80211/agg-tx.c b/net/mac80211/agg-tx.c
3495 +index 304b0b6..dfdc138 100644
3496 +--- a/net/mac80211/agg-tx.c
3497 ++++ b/net/mac80211/agg-tx.c
3498 +@@ -183,7 +183,6 @@ static void sta_addba_resp_timer_expired(unsigned long data)
3499 + HT_AGG_STATE_REQ_STOP_BA_MSK)) !=
3500 + HT_ADDBA_REQUESTED_MSK) {
3501 + spin_unlock_bh(&sta->lock);
3502 +- *state = HT_AGG_STATE_IDLE;
3503 + #ifdef CONFIG_MAC80211_HT_DEBUG
3504 + printk(KERN_DEBUG "timer expired on tid %d but we are not "
3505 + "(or no longer) expecting addBA response there",
3506 +diff --git a/net/sctp/associola.c b/net/sctp/associola.c
3507 +index df5abbf..99c93ee 100644
3508 +--- a/net/sctp/associola.c
3509 ++++ b/net/sctp/associola.c
3510 +@@ -1194,8 +1194,10 @@ void sctp_assoc_update(struct sctp_association *asoc,
3511 + /* Remove any peer addresses not present in the new association. */
3512 + list_for_each_safe(pos, temp, &asoc->peer.transport_addr_list) {
3513 + trans = list_entry(pos, struct sctp_transport, transports);
3514 +- if (!sctp_assoc_lookup_paddr(new, &trans->ipaddr))
3515 +- sctp_assoc_del_peer(asoc, &trans->ipaddr);
3516 ++ if (!sctp_assoc_lookup_paddr(new, &trans->ipaddr)) {
3517 ++ sctp_assoc_rm_peer(asoc, trans);
3518 ++ continue;
3519 ++ }
3520 +
3521 + if (asoc->state >= SCTP_STATE_ESTABLISHED)
3522 + sctp_transport_reset(trans);
3523 +diff --git a/net/sctp/endpointola.c b/net/sctp/endpointola.c
3524 +index 905fda5..7ec09ba 100644
3525 +--- a/net/sctp/endpointola.c
3526 ++++ b/net/sctp/endpointola.c
3527 +@@ -144,6 +144,7 @@ static struct sctp_endpoint *sctp_endpoint_init(struct sctp_endpoint *ep,
3528 + /* Use SCTP specific send buffer space queues. */
3529 + ep->sndbuf_policy = sctp_sndbuf_policy;
3530 +
3531 ++ sk->sk_data_ready = sctp_data_ready;
3532 + sk->sk_write_space = sctp_write_space;
3533 + sock_set_flag(sk, SOCK_USE_WRITE_QUEUE);
3534 +
3535 +diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c
3536 +index 9e73291..224db01 100644
3537 +--- a/net/sctp/sm_make_chunk.c
3538 ++++ b/net/sctp/sm_make_chunk.c
3539 +@@ -207,7 +207,8 @@ struct sctp_chunk *sctp_make_init(const struct sctp_association *asoc,
3540 + sp = sctp_sk(asoc->base.sk);
3541 + num_types = sp->pf->supported_addrs(sp, types);
3542 +
3543 +- chunksize = sizeof(init) + addrs_len + SCTP_SAT_LEN(num_types);
3544 ++ chunksize = sizeof(init) + addrs_len;
3545 ++ chunksize += WORD_ROUND(SCTP_SAT_LEN(num_types));
3546 + chunksize += sizeof(ecap_param);
3547 +
3548 + if (sctp_prsctp_enable)
3549 +@@ -237,14 +238,14 @@ struct sctp_chunk *sctp_make_init(const struct sctp_association *asoc,
3550 + /* Add HMACS parameter length if any were defined */
3551 + auth_hmacs = (sctp_paramhdr_t *)asoc->c.auth_hmacs;
3552 + if (auth_hmacs->length)
3553 +- chunksize += ntohs(auth_hmacs->length);
3554 ++ chunksize += WORD_ROUND(ntohs(auth_hmacs->length));
3555 + else
3556 + auth_hmacs = NULL;
3557 +
3558 + /* Add CHUNKS parameter length */
3559 + auth_chunks = (sctp_paramhdr_t *)asoc->c.auth_chunks;
3560 + if (auth_chunks->length)
3561 +- chunksize += ntohs(auth_chunks->length);
3562 ++ chunksize += WORD_ROUND(ntohs(auth_chunks->length));
3563 + else
3564 + auth_chunks = NULL;
3565 +
3566 +@@ -254,7 +255,8 @@ struct sctp_chunk *sctp_make_init(const struct sctp_association *asoc,
3567 +
3568 + /* If we have any extensions to report, account for that */
3569 + if (num_ext)
3570 +- chunksize += sizeof(sctp_supported_ext_param_t) + num_ext;
3571 ++ chunksize += WORD_ROUND(sizeof(sctp_supported_ext_param_t) +
3572 ++ num_ext);
3573 +
3574 + /* RFC 2960 3.3.2 Initiation (INIT) (1)
3575 + *
3576 +@@ -396,13 +398,13 @@ struct sctp_chunk *sctp_make_init_ack(const struct sctp_association *asoc,
3577 +
3578 + auth_hmacs = (sctp_paramhdr_t *)asoc->c.auth_hmacs;
3579 + if (auth_hmacs->length)
3580 +- chunksize += ntohs(auth_hmacs->length);
3581 ++ chunksize += WORD_ROUND(ntohs(auth_hmacs->length));
3582 + else
3583 + auth_hmacs = NULL;
3584 +
3585 + auth_chunks = (sctp_paramhdr_t *)asoc->c.auth_chunks;
3586 + if (auth_chunks->length)
3587 +- chunksize += ntohs(auth_chunks->length);
3588 ++ chunksize += WORD_ROUND(ntohs(auth_chunks->length));
3589 + else
3590 + auth_chunks = NULL;
3591 +
3592 +@@ -411,7 +413,8 @@ struct sctp_chunk *sctp_make_init_ack(const struct sctp_association *asoc,
3593 + }
3594 +
3595 + if (num_ext)
3596 +- chunksize += sizeof(sctp_supported_ext_param_t) + num_ext;
3597 ++ chunksize += WORD_ROUND(sizeof(sctp_supported_ext_param_t) +
3598 ++ num_ext);
3599 +
3600 + /* Now allocate and fill out the chunk. */
3601 + retval = sctp_make_chunk(asoc, SCTP_CID_INIT_ACK, 0, chunksize);
3602 +@@ -3314,21 +3317,6 @@ int sctp_process_asconf_ack(struct sctp_association *asoc,
3603 + sctp_chunk_free(asconf);
3604 + asoc->addip_last_asconf = NULL;
3605 +
3606 +- /* Send the next asconf chunk from the addip chunk queue. */
3607 +- if (!list_empty(&asoc->addip_chunk_list)) {
3608 +- struct list_head *entry = asoc->addip_chunk_list.next;
3609 +- asconf = list_entry(entry, struct sctp_chunk, list);
3610 +-
3611 +- list_del_init(entry);
3612 +-
3613 +- /* Hold the chunk until an ASCONF_ACK is received. */
3614 +- sctp_chunk_hold(asconf);
3615 +- if (sctp_primitive_ASCONF(asoc, asconf))
3616 +- sctp_chunk_free(asconf);
3617 +- else
3618 +- asoc->addip_last_asconf = asconf;
3619 +- }
3620 +-
3621 + return retval;
3622 + }
3623 +
3624 +diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c
3625 +index 4e4ca65..42bbb24 100644
3626 +--- a/net/sctp/sm_sideeffect.c
3627 ++++ b/net/sctp/sm_sideeffect.c
3628 +@@ -961,6 +961,29 @@ static int sctp_cmd_send_msg(struct sctp_association *asoc,
3629 + }
3630 +
3631 +
3632 ++/* Sent the next ASCONF packet currently stored in the association.
3633 ++ * This happens after the ASCONF_ACK was succeffully processed.
3634 ++ */
3635 ++static void sctp_cmd_send_asconf(struct sctp_association *asoc)
3636 ++{
3637 ++ /* Send the next asconf chunk from the addip chunk
3638 ++ * queue.
3639 ++ */
3640 ++ if (!list_empty(&asoc->addip_chunk_list)) {
3641 ++ struct list_head *entry = asoc->addip_chunk_list.next;
3642 ++ struct sctp_chunk *asconf = list_entry(entry,
3643 ++ struct sctp_chunk, list);
3644 ++ list_del_init(entry);
3645 ++
3646 ++ /* Hold the chunk until an ASCONF_ACK is received. */
3647 ++ sctp_chunk_hold(asconf);
3648 ++ if (sctp_primitive_ASCONF(asoc, asconf))
3649 ++ sctp_chunk_free(asconf);
3650 ++ else
3651 ++ asoc->addip_last_asconf = asconf;
3652 ++ }
3653 ++}
3654 ++
3655 +
3656 + /* These three macros allow us to pull the debugging code out of the
3657 + * main flow of sctp_do_sm() to keep attention focused on the real
3658 +@@ -1616,6 +1639,9 @@ static int sctp_cmd_interpreter(sctp_event_t event_type,
3659 + }
3660 + error = sctp_cmd_send_msg(asoc, cmd->obj.msg);
3661 + break;
3662 ++ case SCTP_CMD_SEND_NEXT_ASCONF:
3663 ++ sctp_cmd_send_asconf(asoc);
3664 ++ break;
3665 + default:
3666 + printk(KERN_WARNING "Impossible command: %u, %p\n",
3667 + cmd->verb, cmd->obj.ptr);
3668 +diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
3669 +index 47bc20d..c3f75e7 100644
3670 +--- a/net/sctp/sm_statefuns.c
3671 ++++ b/net/sctp/sm_statefuns.c
3672 +@@ -3675,8 +3675,14 @@ sctp_disposition_t sctp_sf_do_asconf_ack(const struct sctp_endpoint *ep,
3673 + SCTP_TO(SCTP_EVENT_TIMEOUT_T4_RTO));
3674 +
3675 + if (!sctp_process_asconf_ack((struct sctp_association *)asoc,
3676 +- asconf_ack))
3677 ++ asconf_ack)) {
3678 ++ /* Successfully processed ASCONF_ACK. We can
3679 ++ * release the next asconf if we have one.
3680 ++ */
3681 ++ sctp_add_cmd_sf(commands, SCTP_CMD_SEND_NEXT_ASCONF,
3682 ++ SCTP_NULL());
3683 + return SCTP_DISPOSITION_CONSUME;
3684 ++ }
3685 +
3686 + abort = sctp_make_abort(asoc, asconf_ack,
3687 + sizeof(sctp_errhdr_t));
3688 +diff --git a/net/sctp/socket.c b/net/sctp/socket.c
3689 +index 9bd9d82..aa3ba60 100644
3690 +--- a/net/sctp/socket.c
3691 ++++ b/net/sctp/socket.c
3692 +@@ -3718,12 +3718,12 @@ SCTP_STATIC int sctp_init_sock(struct sock *sk)
3693 + sp->hmac = NULL;
3694 +
3695 + SCTP_DBG_OBJCNT_INC(sock);
3696 +- percpu_counter_inc(&sctp_sockets_allocated);
3697 +
3698 + /* Set socket backlog limit. */
3699 + sk->sk_backlog.limit = sysctl_sctp_rmem[1];
3700 +
3701 + local_bh_disable();
3702 ++ percpu_counter_inc(&sctp_sockets_allocated);
3703 + sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
3704 + local_bh_enable();
3705 +
3706 +@@ -3740,8 +3740,8 @@ SCTP_STATIC void sctp_destroy_sock(struct sock *sk)
3707 + /* Release our hold on the endpoint. */
3708 + ep = sctp_sk(sk)->ep;
3709 + sctp_endpoint_free(ep);
3710 +- percpu_counter_dec(&sctp_sockets_allocated);
3711 + local_bh_disable();
3712 ++ percpu_counter_dec(&sctp_sockets_allocated);
3713 + sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
3714 + local_bh_enable();
3715 + }
3716 +@@ -6188,6 +6188,16 @@ do_nonblock:
3717 + goto out;
3718 + }
3719 +
3720 ++void sctp_data_ready(struct sock *sk, int len)
3721 ++{
3722 ++ read_lock_bh(&sk->sk_callback_lock);
3723 ++ if (sk_has_sleeper(sk))
3724 ++ wake_up_interruptible_sync_poll(sk->sk_sleep, POLLIN |
3725 ++ POLLRDNORM | POLLRDBAND);
3726 ++ sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN);
3727 ++ read_unlock_bh(&sk->sk_callback_lock);
3728 ++}
3729 ++
3730 + /* If socket sndbuf has changed, wake up all per association waiters. */
3731 + void sctp_write_space(struct sock *sk)
3732 + {
3733 +diff --git a/net/tipc/bearer.c b/net/tipc/bearer.c
3734 +index 327011f..7809137 100644
3735 +--- a/net/tipc/bearer.c
3736 ++++ b/net/tipc/bearer.c
3737 +@@ -45,10 +45,10 @@
3738 +
3739 + #define MAX_ADDR_STR 32
3740 +
3741 +-static struct media *media_list = NULL;
3742 ++static struct media media_list[MAX_MEDIA];
3743 + static u32 media_count = 0;
3744 +
3745 +-struct bearer *tipc_bearers = NULL;
3746 ++struct bearer tipc_bearers[MAX_BEARERS];
3747 +
3748 + /**
3749 + * media_name_valid - validate media name
3750 +@@ -108,9 +108,11 @@ int tipc_register_media(u32 media_type,
3751 + int res = -EINVAL;
3752 +
3753 + write_lock_bh(&tipc_net_lock);
3754 +- if (!media_list)
3755 +- goto exit;
3756 +
3757 ++ if (tipc_mode != TIPC_NET_MODE) {
3758 ++ warn("Media <%s> rejected, not in networked mode yet\n", name);
3759 ++ goto exit;
3760 ++ }
3761 + if (!media_name_valid(name)) {
3762 + warn("Media <%s> rejected, illegal name\n", name);
3763 + goto exit;
3764 +@@ -660,33 +662,10 @@ int tipc_disable_bearer(const char *name)
3765 +
3766 +
3767 +
3768 +-int tipc_bearer_init(void)
3769 +-{
3770 +- int res;
3771 +-
3772 +- write_lock_bh(&tipc_net_lock);
3773 +- tipc_bearers = kcalloc(MAX_BEARERS, sizeof(struct bearer), GFP_ATOMIC);
3774 +- media_list = kcalloc(MAX_MEDIA, sizeof(struct media), GFP_ATOMIC);
3775 +- if (tipc_bearers && media_list) {
3776 +- res = 0;
3777 +- } else {
3778 +- kfree(tipc_bearers);
3779 +- kfree(media_list);
3780 +- tipc_bearers = NULL;
3781 +- media_list = NULL;
3782 +- res = -ENOMEM;
3783 +- }
3784 +- write_unlock_bh(&tipc_net_lock);
3785 +- return res;
3786 +-}
3787 +-
3788 + void tipc_bearer_stop(void)
3789 + {
3790 + u32 i;
3791 +
3792 +- if (!tipc_bearers)
3793 +- return;
3794 +-
3795 + for (i = 0; i < MAX_BEARERS; i++) {
3796 + if (tipc_bearers[i].active)
3797 + tipc_bearers[i].publ.blocked = 1;
3798 +@@ -695,10 +674,6 @@ void tipc_bearer_stop(void)
3799 + if (tipc_bearers[i].active)
3800 + bearer_disable(tipc_bearers[i].publ.name);
3801 + }
3802 +- kfree(tipc_bearers);
3803 +- kfree(media_list);
3804 +- tipc_bearers = NULL;
3805 +- media_list = NULL;
3806 + media_count = 0;
3807 + }
3808 +
3809 +diff --git a/net/tipc/bearer.h b/net/tipc/bearer.h
3810 +index ca57348..000228e 100644
3811 +--- a/net/tipc/bearer.h
3812 ++++ b/net/tipc/bearer.h
3813 +@@ -114,7 +114,7 @@ struct bearer_name {
3814 +
3815 + struct link;
3816 +
3817 +-extern struct bearer *tipc_bearers;
3818 ++extern struct bearer tipc_bearers[];
3819 +
3820 + void tipc_media_addr_printf(struct print_buf *pb, struct tipc_media_addr *a);
3821 + struct sk_buff *tipc_media_get_names(void);
3822 +diff --git a/net/tipc/net.c b/net/tipc/net.c
3823 +index 7906608..f25b1cd 100644
3824 +--- a/net/tipc/net.c
3825 ++++ b/net/tipc/net.c
3826 +@@ -116,7 +116,8 @@
3827 + */
3828 +
3829 + DEFINE_RWLOCK(tipc_net_lock);
3830 +-struct network tipc_net = { NULL };
3831 ++struct _zone *tipc_zones[256] = { NULL, };
3832 ++struct network tipc_net = { tipc_zones };
3833 +
3834 + struct tipc_node *tipc_net_select_remote_node(u32 addr, u32 ref)
3835 + {
3836 +@@ -158,28 +159,12 @@ void tipc_net_send_external_routes(u32 dest)
3837 + }
3838 + }
3839 +
3840 +-static int net_init(void)
3841 +-{
3842 +- memset(&tipc_net, 0, sizeof(tipc_net));
3843 +- tipc_net.zones = kcalloc(tipc_max_zones + 1, sizeof(struct _zone *), GFP_ATOMIC);
3844 +- if (!tipc_net.zones) {
3845 +- return -ENOMEM;
3846 +- }
3847 +- return 0;
3848 +-}
3849 +-
3850 + static void net_stop(void)
3851 + {
3852 + u32 z_num;
3853 +
3854 +- if (!tipc_net.zones)
3855 +- return;
3856 +-
3857 +- for (z_num = 1; z_num <= tipc_max_zones; z_num++) {
3858 ++ for (z_num = 1; z_num <= tipc_max_zones; z_num++)
3859 + tipc_zone_delete(tipc_net.zones[z_num]);
3860 +- }
3861 +- kfree(tipc_net.zones);
3862 +- tipc_net.zones = NULL;
3863 + }
3864 +
3865 + static void net_route_named_msg(struct sk_buff *buf)
3866 +@@ -282,9 +267,7 @@ int tipc_net_start(u32 addr)
3867 + tipc_named_reinit();
3868 + tipc_port_reinit();
3869 +
3870 +- if ((res = tipc_bearer_init()) ||
3871 +- (res = net_init()) ||
3872 +- (res = tipc_cltr_init()) ||
3873 ++ if ((res = tipc_cltr_init()) ||
3874 + (res = tipc_bclink_init())) {
3875 + return res;
3876 + }
3877 +diff --git a/security/inode.c b/security/inode.c
3878 +index c3a7938..1c812e8 100644
3879 +--- a/security/inode.c
3880 ++++ b/security/inode.c
3881 +@@ -161,13 +161,13 @@ static int create_by_name(const char *name, mode_t mode,
3882 +
3883 + mutex_lock(&parent->d_inode->i_mutex);
3884 + *dentry = lookup_one_len(name, parent, strlen(name));
3885 +- if (!IS_ERR(dentry)) {
3886 ++ if (!IS_ERR(*dentry)) {
3887 + if ((mode & S_IFMT) == S_IFDIR)
3888 + error = mkdir(parent->d_inode, *dentry, mode);
3889 + else
3890 + error = create(parent->d_inode, *dentry, mode);
3891 + } else
3892 +- error = PTR_ERR(dentry);
3893 ++ error = PTR_ERR(*dentry);
3894 + mutex_unlock(&parent->d_inode->i_mutex);
3895 +
3896 + return error;
3897 +diff --git a/security/keys/request_key.c b/security/keys/request_key.c
3898 +index 03fe63e..9ac7bfd 100644
3899 +--- a/security/keys/request_key.c
3900 ++++ b/security/keys/request_key.c
3901 +@@ -336,8 +336,10 @@ static int construct_alloc_key(struct key_type *type,
3902 +
3903 + key_already_present:
3904 + mutex_unlock(&key_construction_mutex);
3905 +- if (dest_keyring)
3906 ++ if (dest_keyring) {
3907 ++ __key_link(dest_keyring, key_ref_to_ptr(key_ref));
3908 + up_write(&dest_keyring->sem);
3909 ++ }
3910 + mutex_unlock(&user->cons_lock);
3911 + key_put(key);
3912 + *_key = key = key_ref_to_ptr(key_ref);
3913 +@@ -428,6 +430,11 @@ struct key *request_key_and_link(struct key_type *type,
3914 +
3915 + if (!IS_ERR(key_ref)) {
3916 + key = key_ref_to_ptr(key_ref);
3917 ++ if (dest_keyring) {
3918 ++ construct_get_dest_keyring(&dest_keyring);
3919 ++ key_link(dest_keyring, key);
3920 ++ key_put(dest_keyring);
3921 ++ }
3922 + } else if (PTR_ERR(key_ref) != -EAGAIN) {
3923 + key = ERR_CAST(key_ref);
3924 + } else {
3925 +diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
3926 +index 062a8b0..fd831bd 100644
3927 +--- a/sound/pci/hda/hda_intel.c
3928 ++++ b/sound/pci/hda/hda_intel.c
3929 +@@ -2273,6 +2273,7 @@ static struct snd_pci_quirk position_fix_list[] __devinitdata = {
3930 + SND_PCI_QUIRK(0x1462, 0x1002, "MSI Wind U115", POS_FIX_LPIB),
3931 + SND_PCI_QUIRK(0x1565, 0x820f, "Biostar Microtech", POS_FIX_LPIB),
3932 + SND_PCI_QUIRK(0x1565, 0x8218, "Biostar Microtech", POS_FIX_LPIB),
3933 ++ SND_PCI_QUIRK(0x8086, 0x2503, "DG965OT AAD63733-203", POS_FIX_LPIB),
3934 + SND_PCI_QUIRK(0x8086, 0xd601, "eMachines T5212", POS_FIX_LPIB),
3935 + {}
3936 + };
3937 +diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
3938 +index 71b7a96..1a97c81 100644
3939 +--- a/sound/pci/hda/patch_conexant.c
3940 ++++ b/sound/pci/hda/patch_conexant.c
3941 +@@ -1174,9 +1174,10 @@ static int patch_cxt5045(struct hda_codec *codec)
3942 +
3943 + switch (codec->subsystem_id >> 16) {
3944 + case 0x103c:
3945 ++ case 0x1631:
3946 + case 0x1734:
3947 +- /* HP & Fujitsu-Siemens laptops have really bad sound over 0dB
3948 +- * on NID 0x17. Fix max PCM level to 0 dB
3949 ++ /* HP, Packard Bell, & Fujitsu-Siemens laptops have really bad
3950 ++ * sound over 0dB on NID 0x17. Fix max PCM level to 0 dB
3951 + * (originally it has 0x2b steps with 0dB offset 0x14)
3952 + */
3953 + snd_hda_override_amp_caps(codec, 0x17, HDA_INPUT,
3954 +@@ -2471,6 +2472,8 @@ static struct snd_pci_quirk cxt5066_cfg_tbl[] = {
3955 + CXT5066_DELL_LAPTOP),
3956 + SND_PCI_QUIRK(0x152d, 0x0833, "OLPC XO-1.5", CXT5066_OLPC_XO_1_5),
3957 + SND_PCI_QUIRK(0x1028, 0x0402, "Dell Vostro", CXT5066_DELL_VOSTO),
3958 ++ SND_PCI_QUIRK(0x1179, 0xff50, "Toshiba Satellite P500-PSPGSC-01800T", CXT5066_OLPC_XO_1_5),
3959 ++ SND_PCI_QUIRK(0x1179, 0xffe0, "Toshiba Satellite Pro T130-15F", CXT5066_OLPC_XO_1_5),
3960 + {}
3961 + };
3962 +
3963 +diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
3964 +index bd8a567..b486daa 100644
3965 +--- a/sound/pci/hda/patch_realtek.c
3966 ++++ b/sound/pci/hda/patch_realtek.c
3967 +@@ -4033,7 +4033,7 @@ static struct snd_pci_quirk alc880_cfg_tbl[] = {
3968 + SND_PCI_QUIRK(0x1695, 0x4012, "EPox EP-5LDA", ALC880_5ST_DIG),
3969 + SND_PCI_QUIRK(0x1734, 0x107c, "FSC F1734", ALC880_F1734),
3970 + SND_PCI_QUIRK(0x1734, 0x1094, "FSC Amilo M1451G", ALC880_FUJITSU),
3971 +- SND_PCI_QUIRK(0x1734, 0x10ac, "FSC", ALC880_UNIWILL),
3972 ++ SND_PCI_QUIRK(0x1734, 0x10ac, "FSC AMILO Xi 1526", ALC880_F1734),
3973 + SND_PCI_QUIRK(0x1734, 0x10b0, "Fujitsu", ALC880_FUJITSU),
3974 + SND_PCI_QUIRK(0x1854, 0x0018, "LG LW20", ALC880_LG_LW),
3975 + SND_PCI_QUIRK(0x1854, 0x003b, "LG", ALC880_LG),
3976 +diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c
3977 +index 799ba25..ac2d528 100644
3978 +--- a/sound/pci/hda/patch_sigmatel.c
3979 ++++ b/sound/pci/hda/patch_sigmatel.c
3980 +@@ -1602,6 +1602,10 @@ static struct snd_pci_quirk stac92hd73xx_cfg_tbl[] = {
3981 + "Dell Studio 1555", STAC_DELL_M6_DMIC),
3982 + SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x02bd,
3983 + "Dell Studio 1557", STAC_DELL_M6_DMIC),
3984 ++ SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x02fe,
3985 ++ "Dell Studio XPS 1645", STAC_DELL_M6_BOTH),
3986 ++ SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x0413,
3987 ++ "Dell Studio 1558", STAC_DELL_M6_BOTH),
3988 + {} /* terminator */
3989 + };
3990 +
3991 +@@ -1725,6 +1729,8 @@ static struct snd_pci_quirk stac92hd71bxx_cfg_tbl[] = {
3992 + "HP HDX", STAC_HP_HDX), /* HDX16 */
3993 + SND_PCI_QUIRK_MASK(PCI_VENDOR_ID_HP, 0xfff0, 0x3620,
3994 + "HP dv6", STAC_HP_DV5),
3995 ++ SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x3061,
3996 ++ "HP dv6", STAC_HP_DV5), /* HP dv6-1110ax */
3997 + SND_PCI_QUIRK_MASK(PCI_VENDOR_ID_HP, 0xfff0, 0x7010,
3998 + "HP", STAC_HP_DV5),
3999 + SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x0233,
4000 +diff --git a/sound/pci/maestro3.c b/sound/pci/maestro3.c
4001 +index 75283fb..c2311f8 100644
4002 +--- a/sound/pci/maestro3.c
4003 ++++ b/sound/pci/maestro3.c
4004 +@@ -849,6 +849,7 @@ struct snd_m3 {
4005 + struct snd_kcontrol *master_switch;
4006 + struct snd_kcontrol *master_volume;
4007 + struct tasklet_struct hwvol_tq;
4008 ++ unsigned int in_suspend;
4009 +
4010 + #ifdef CONFIG_PM
4011 + u16 *suspend_mem;
4012 +@@ -884,6 +885,7 @@ static struct pci_device_id snd_m3_ids[] = {
4013 + MODULE_DEVICE_TABLE(pci, snd_m3_ids);
4014 +
4015 + static struct snd_pci_quirk m3_amp_quirk_list[] __devinitdata = {
4016 ++ SND_PCI_QUIRK(0x0E11, 0x0094, "Compaq Evo N600c", 0x0c),
4017 + SND_PCI_QUIRK(0x10f7, 0x833e, "Panasonic CF-28", 0x0d),
4018 + SND_PCI_QUIRK(0x10f7, 0x833d, "Panasonic CF-72", 0x0d),
4019 + SND_PCI_QUIRK(0x1033, 0x80f1, "NEC LM800J/7", 0x03),
4020 +@@ -1613,6 +1615,11 @@ static void snd_m3_update_hw_volume(unsigned long private_data)
4021 + outb(0x88, chip->iobase + SHADOW_MIX_REG_MASTER);
4022 + outb(0x88, chip->iobase + HW_VOL_COUNTER_MASTER);
4023 +
4024 ++ /* Ignore spurious HV interrupts during suspend / resume, this avoids
4025 ++ mistaking them for a mute button press. */
4026 ++ if (chip->in_suspend)
4027 ++ return;
4028 ++
4029 + if (!chip->master_switch || !chip->master_volume)
4030 + return;
4031 +
4032 +@@ -2424,6 +2431,7 @@ static int m3_suspend(struct pci_dev *pci, pm_message_t state)
4033 + if (chip->suspend_mem == NULL)
4034 + return 0;
4035 +
4036 ++ chip->in_suspend = 1;
4037 + snd_power_change_state(card, SNDRV_CTL_POWER_D3hot);
4038 + snd_pcm_suspend_all(chip->pcm);
4039 + snd_ac97_suspend(chip->ac97);
4040 +@@ -2497,6 +2505,7 @@ static int m3_resume(struct pci_dev *pci)
4041 + snd_m3_hv_init(chip);
4042 +
4043 + snd_power_change_state(card, SNDRV_CTL_POWER_D0);
4044 ++ chip->in_suspend = 0;
4045 + return 0;
4046 + }
4047 + #endif /* CONFIG_PM */
4048
4049 Added: genpatches-2.6/trunk/2.6.33/1003_linux-2.6.33.5.patch
4050 ===================================================================
4051 --- genpatches-2.6/trunk/2.6.33/1003_linux-2.6.33.5.patch (rev 0)
4052 +++ genpatches-2.6/trunk/2.6.33/1003_linux-2.6.33.5.patch 2010-05-27 08:50:54 UTC (rev 1710)
4053 @@ -0,0 +1,1623 @@
4054 +diff --git a/Documentation/filesystems/proc.txt b/Documentation/filesystems/proc.txt
4055 +index 0d07513..bf241be 100644
4056 +--- a/Documentation/filesystems/proc.txt
4057 ++++ b/Documentation/filesystems/proc.txt
4058 +@@ -308,7 +308,7 @@ address perms offset dev inode pathname
4059 + 08049000-0804a000 rw-p 00001000 03:00 8312 /opt/test
4060 + 0804a000-0806b000 rw-p 00000000 00:00 0 [heap]
4061 + a7cb1000-a7cb2000 ---p 00000000 00:00 0
4062 +-a7cb2000-a7eb2000 rw-p 00000000 00:00 0 [threadstack:001ff4b4]
4063 ++a7cb2000-a7eb2000 rw-p 00000000 00:00 0
4064 + a7eb2000-a7eb3000 ---p 00000000 00:00 0
4065 + a7eb3000-a7ed5000 rw-p 00000000 00:00 0
4066 + a7ed5000-a8008000 r-xp 00000000 03:00 4222 /lib/libc.so.6
4067 +@@ -344,7 +344,6 @@ is not associated with a file:
4068 + [stack] = the stack of the main process
4069 + [vdso] = the "virtual dynamic shared object",
4070 + the kernel system call handler
4071 +- [threadstack:xxxxxxxx] = the stack of the thread, xxxxxxxx is the stack size
4072 +
4073 + or if empty, the mapping is anonymous.
4074 +
4075 +diff --git a/arch/powerpc/include/asm/hw_irq.h b/arch/powerpc/include/asm/hw_irq.h
4076 +index 9f4c9d4..bd100fc 100644
4077 +--- a/arch/powerpc/include/asm/hw_irq.h
4078 ++++ b/arch/powerpc/include/asm/hw_irq.h
4079 +@@ -130,43 +130,5 @@ static inline int irqs_disabled_flags(unsigned long flags)
4080 + */
4081 + struct irq_chip;
4082 +
4083 +-#ifdef CONFIG_PERF_EVENTS
4084 +-
4085 +-#ifdef CONFIG_PPC64
4086 +-static inline unsigned long test_perf_event_pending(void)
4087 +-{
4088 +- unsigned long x;
4089 +-
4090 +- asm volatile("lbz %0,%1(13)"
4091 +- : "=r" (x)
4092 +- : "i" (offsetof(struct paca_struct, perf_event_pending)));
4093 +- return x;
4094 +-}
4095 +-
4096 +-static inline void set_perf_event_pending(void)
4097 +-{
4098 +- asm volatile("stb %0,%1(13)" : :
4099 +- "r" (1),
4100 +- "i" (offsetof(struct paca_struct, perf_event_pending)));
4101 +-}
4102 +-
4103 +-static inline void clear_perf_event_pending(void)
4104 +-{
4105 +- asm volatile("stb %0,%1(13)" : :
4106 +- "r" (0),
4107 +- "i" (offsetof(struct paca_struct, perf_event_pending)));
4108 +-}
4109 +-#endif /* CONFIG_PPC64 */
4110 +-
4111 +-#else /* CONFIG_PERF_EVENTS */
4112 +-
4113 +-static inline unsigned long test_perf_event_pending(void)
4114 +-{
4115 +- return 0;
4116 +-}
4117 +-
4118 +-static inline void clear_perf_event_pending(void) {}
4119 +-#endif /* CONFIG_PERF_EVENTS */
4120 +-
4121 + #endif /* __KERNEL__ */
4122 + #endif /* _ASM_POWERPC_HW_IRQ_H */
4123 +diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
4124 +index a6c2b63..11d0668 100644
4125 +--- a/arch/powerpc/kernel/asm-offsets.c
4126 ++++ b/arch/powerpc/kernel/asm-offsets.c
4127 +@@ -133,7 +133,6 @@ int main(void)
4128 + DEFINE(PACAKMSR, offsetof(struct paca_struct, kernel_msr));
4129 + DEFINE(PACASOFTIRQEN, offsetof(struct paca_struct, soft_enabled));
4130 + DEFINE(PACAHARDIRQEN, offsetof(struct paca_struct, hard_enabled));
4131 +- DEFINE(PACAPERFPEND, offsetof(struct paca_struct, perf_event_pending));
4132 + DEFINE(PACACONTEXTID, offsetof(struct paca_struct, context.id));
4133 + #ifdef CONFIG_PPC_MM_SLICES
4134 + DEFINE(PACALOWSLICESPSIZE, offsetof(struct paca_struct,
4135 +diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
4136 +index bdcb557..afbf400 100644
4137 +--- a/arch/powerpc/kernel/entry_64.S
4138 ++++ b/arch/powerpc/kernel/entry_64.S
4139 +@@ -556,15 +556,6 @@ ALT_FW_FTR_SECTION_END_IFCLR(FW_FEATURE_ISERIES)
4140 + 2:
4141 + TRACE_AND_RESTORE_IRQ(r5);
4142 +
4143 +-#ifdef CONFIG_PERF_EVENTS
4144 +- /* check paca->perf_event_pending if we're enabling ints */
4145 +- lbz r3,PACAPERFPEND(r13)
4146 +- and. r3,r3,r5
4147 +- beq 27f
4148 +- bl .perf_event_do_pending
4149 +-27:
4150 +-#endif /* CONFIG_PERF_EVENTS */
4151 +-
4152 + /* extract EE bit and use it to restore paca->hard_enabled */
4153 + ld r3,_MSR(r1)
4154 + rldicl r4,r3,49,63 /* r0 = (r3 >> 15) & 1 */
4155 +diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
4156 +index 9040330..dee83b8 100644
4157 +--- a/arch/powerpc/kernel/irq.c
4158 ++++ b/arch/powerpc/kernel/irq.c
4159 +@@ -53,7 +53,6 @@
4160 + #include <linux/bootmem.h>
4161 + #include <linux/pci.h>
4162 + #include <linux/debugfs.h>
4163 +-#include <linux/perf_event.h>
4164 +
4165 + #include <asm/uaccess.h>
4166 + #include <asm/system.h>
4167 +@@ -143,11 +142,6 @@ notrace void raw_local_irq_restore(unsigned long en)
4168 + }
4169 + #endif /* CONFIG_PPC_STD_MMU_64 */
4170 +
4171 +- if (test_perf_event_pending()) {
4172 +- clear_perf_event_pending();
4173 +- perf_event_do_pending();
4174 +- }
4175 +-
4176 + /*
4177 + * if (get_paca()->hard_enabled) return;
4178 + * But again we need to take care that gcc gets hard_enabled directly
4179 +diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
4180 +index 6c6093d..6f174e7 100644
4181 +--- a/arch/powerpc/kernel/time.c
4182 ++++ b/arch/powerpc/kernel/time.c
4183 +@@ -532,25 +532,60 @@ void __init iSeries_time_init_early(void)
4184 + }
4185 + #endif /* CONFIG_PPC_ISERIES */
4186 +
4187 +-#if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_PPC32)
4188 +-DEFINE_PER_CPU(u8, perf_event_pending);
4189 ++#ifdef CONFIG_PERF_EVENTS
4190 +
4191 +-void set_perf_event_pending(void)
4192 ++/*
4193 ++ * 64-bit uses a byte in the PACA, 32-bit uses a per-cpu variable...
4194 ++ */
4195 ++#ifdef CONFIG_PPC64
4196 ++static inline unsigned long test_perf_event_pending(void)
4197 + {
4198 +- get_cpu_var(perf_event_pending) = 1;
4199 +- set_dec(1);
4200 +- put_cpu_var(perf_event_pending);
4201 ++ unsigned long x;
4202 ++
4203 ++ asm volatile("lbz %0,%1(13)"
4204 ++ : "=r" (x)
4205 ++ : "i" (offsetof(struct paca_struct, perf_event_pending)));
4206 ++ return x;
4207 + }
4208 +
4209 ++static inline void set_perf_event_pending_flag(void)
4210 ++{
4211 ++ asm volatile("stb %0,%1(13)" : :
4212 ++ "r" (1),
4213 ++ "i" (offsetof(struct paca_struct, perf_event_pending)));
4214 ++}
4215 ++
4216 ++static inline void clear_perf_event_pending(void)
4217 ++{
4218 ++ asm volatile("stb %0,%1(13)" : :
4219 ++ "r" (0),
4220 ++ "i" (offsetof(struct paca_struct, perf_event_pending)));
4221 ++}
4222 ++
4223 ++#else /* 32-bit */
4224 ++
4225 ++DEFINE_PER_CPU(u8, perf_event_pending);
4226 ++
4227 ++#define set_perf_event_pending_flag() __get_cpu_var(perf_event_pending) = 1
4228 + #define test_perf_event_pending() __get_cpu_var(perf_event_pending)
4229 + #define clear_perf_event_pending() __get_cpu_var(perf_event_pending) = 0
4230 +
4231 +-#else /* CONFIG_PERF_EVENTS && CONFIG_PPC32 */
4232 ++#endif /* 32 vs 64 bit */
4233 ++
4234 ++void set_perf_event_pending(void)
4235 ++{
4236 ++ preempt_disable();
4237 ++ set_perf_event_pending_flag();
4238 ++ set_dec(1);
4239 ++ preempt_enable();
4240 ++}
4241 ++
4242 ++#else /* CONFIG_PERF_EVENTS */
4243 +
4244 + #define test_perf_event_pending() 0
4245 + #define clear_perf_event_pending()
4246 +
4247 +-#endif /* CONFIG_PERF_EVENTS && CONFIG_PPC32 */
4248 ++#endif /* CONFIG_PERF_EVENTS */
4249 +
4250 + /*
4251 + * For iSeries shared processors, we have to let the hypervisor
4252 +@@ -580,10 +615,6 @@ void timer_interrupt(struct pt_regs * regs)
4253 + set_dec(DECREMENTER_MAX);
4254 +
4255 + #ifdef CONFIG_PPC32
4256 +- if (test_perf_event_pending()) {
4257 +- clear_perf_event_pending();
4258 +- perf_event_do_pending();
4259 +- }
4260 + if (atomic_read(&ppc_n_lost_interrupts) != 0)
4261 + do_IRQ(regs);
4262 + #endif
4263 +@@ -602,6 +633,11 @@ void timer_interrupt(struct pt_regs * regs)
4264 +
4265 + calculate_steal_time();
4266 +
4267 ++ if (test_perf_event_pending()) {
4268 ++ clear_perf_event_pending();
4269 ++ perf_event_do_pending();
4270 ++ }
4271 ++
4272 + #ifdef CONFIG_PPC_ISERIES
4273 + if (firmware_has_feature(FW_FEATURE_ISERIES))
4274 + get_lppaca()->int_dword.fields.decr_int = 0;
4275 +diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c
4276 +index 7cf4642..11e94de 100644
4277 +--- a/arch/s390/kernel/ptrace.c
4278 ++++ b/arch/s390/kernel/ptrace.c
4279 +@@ -640,7 +640,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
4280 +
4281 + asmlinkage long do_syscall_trace_enter(struct pt_regs *regs)
4282 + {
4283 +- long ret;
4284 ++ long ret = 0;
4285 +
4286 + /* Do the secure computing check first. */
4287 + secure_computing(regs->gprs[2]);
4288 +@@ -649,7 +649,6 @@ asmlinkage long do_syscall_trace_enter(struct pt_regs *regs)
4289 + * The sysc_tracesys code in entry.S stored the system
4290 + * call number to gprs[2].
4291 + */
4292 +- ret = regs->gprs[2];
4293 + if (test_thread_flag(TIF_SYSCALL_TRACE) &&
4294 + (tracehook_report_syscall_entry(regs) ||
4295 + regs->gprs[2] >= NR_syscalls)) {
4296 +@@ -671,7 +670,7 @@ asmlinkage long do_syscall_trace_enter(struct pt_regs *regs)
4297 + regs->gprs[2], regs->orig_gpr2,
4298 + regs->gprs[3], regs->gprs[4],
4299 + regs->gprs[5]);
4300 +- return ret;
4301 ++ return ret ?: regs->gprs[2];
4302 + }
4303 +
4304 + asmlinkage void do_syscall_trace_exit(struct pt_regs *regs)
4305 +diff --git a/arch/x86/include/asm/k8.h b/arch/x86/include/asm/k8.h
4306 +index f70e600..af00bd1 100644
4307 +--- a/arch/x86/include/asm/k8.h
4308 ++++ b/arch/x86/include/asm/k8.h
4309 +@@ -16,11 +16,16 @@ extern int k8_numa_init(unsigned long start_pfn, unsigned long end_pfn);
4310 + extern int k8_scan_nodes(void);
4311 +
4312 + #ifdef CONFIG_K8_NB
4313 ++extern int num_k8_northbridges;
4314 ++
4315 + static inline struct pci_dev *node_to_k8_nb_misc(int node)
4316 + {
4317 + return (node < num_k8_northbridges) ? k8_northbridges[node] : NULL;
4318 + }
4319 ++
4320 + #else
4321 ++#define num_k8_northbridges 0
4322 ++
4323 + static inline struct pci_dev *node_to_k8_nb_misc(int node)
4324 + {
4325 + return NULL;
4326 +diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c
4327 +index d440123..581924b 100644
4328 +--- a/arch/x86/kernel/cpu/intel_cacheinfo.c
4329 ++++ b/arch/x86/kernel/cpu/intel_cacheinfo.c
4330 +@@ -338,6 +338,10 @@ amd_check_l3_disable(int index, struct _cpuid4_info_regs *this_leaf)
4331 + (boot_cpu_data.x86_mask < 0x1)))
4332 + return;
4333 +
4334 ++ /* not in virtualized environments */
4335 ++ if (num_k8_northbridges == 0)
4336 ++ return;
4337 ++
4338 + this_leaf->can_disable = true;
4339 + this_leaf->l3_indices = amd_calc_l3_indices();
4340 + }
4341 +diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
4342 +index 999c8a6..0571b72 100644
4343 +--- a/arch/x86/kernel/process.c
4344 ++++ b/arch/x86/kernel/process.c
4345 +@@ -539,11 +539,13 @@ static int __cpuinit check_c1e_idle(const struct cpuinfo_x86 *c)
4346 + * check OSVW bit for CPUs that are not affected
4347 + * by erratum #400
4348 + */
4349 +- rdmsrl(MSR_AMD64_OSVW_ID_LENGTH, val);
4350 +- if (val >= 2) {
4351 +- rdmsrl(MSR_AMD64_OSVW_STATUS, val);
4352 +- if (!(val & BIT(1)))
4353 +- goto no_c1e_idle;
4354 ++ if (cpu_has(c, X86_FEATURE_OSVW)) {
4355 ++ rdmsrl(MSR_AMD64_OSVW_ID_LENGTH, val);
4356 ++ if (val >= 2) {
4357 ++ rdmsrl(MSR_AMD64_OSVW_STATUS, val);
4358 ++ if (!(val & BIT(1)))
4359 ++ goto no_c1e_idle;
4360 ++ }
4361 + }
4362 + return 1;
4363 + }
4364 +diff --git a/crypto/authenc.c b/crypto/authenc.c
4365 +index 4d6f49a..0d54de9 100644
4366 +--- a/crypto/authenc.c
4367 ++++ b/crypto/authenc.c
4368 +@@ -46,6 +46,12 @@ struct authenc_request_ctx {
4369 + char tail[];
4370 + };
4371 +
4372 ++static void authenc_request_complete(struct aead_request *req, int err)
4373 ++{
4374 ++ if (err != -EINPROGRESS)
4375 ++ aead_request_complete(req, err);
4376 ++}
4377 ++
4378 + static int crypto_authenc_setkey(struct crypto_aead *authenc, const u8 *key,
4379 + unsigned int keylen)
4380 + {
4381 +@@ -142,7 +148,7 @@ static void authenc_geniv_ahash_update_done(struct crypto_async_request *areq,
4382 + crypto_aead_authsize(authenc), 1);
4383 +
4384 + out:
4385 +- aead_request_complete(req, err);
4386 ++ authenc_request_complete(req, err);
4387 + }
4388 +
4389 + static void authenc_geniv_ahash_done(struct crypto_async_request *areq, int err)
4390 +@@ -208,7 +214,7 @@ static void authenc_verify_ahash_update_done(struct crypto_async_request *areq,
4391 + err = crypto_ablkcipher_decrypt(abreq);
4392 +
4393 + out:
4394 +- aead_request_complete(req, err);
4395 ++ authenc_request_complete(req, err);
4396 + }
4397 +
4398 + static void authenc_verify_ahash_done(struct crypto_async_request *areq,
4399 +@@ -245,7 +251,7 @@ static void authenc_verify_ahash_done(struct crypto_async_request *areq,
4400 + err = crypto_ablkcipher_decrypt(abreq);
4401 +
4402 + out:
4403 +- aead_request_complete(req, err);
4404 ++ authenc_request_complete(req, err);
4405 + }
4406 +
4407 + static u8 *crypto_authenc_ahash_fb(struct aead_request *req, unsigned int flags)
4408 +@@ -379,7 +385,7 @@ static void crypto_authenc_encrypt_done(struct crypto_async_request *req,
4409 + err = crypto_authenc_genicv(areq, iv, 0);
4410 + }
4411 +
4412 +- aead_request_complete(areq, err);
4413 ++ authenc_request_complete(areq, err);
4414 + }
4415 +
4416 + static int crypto_authenc_encrypt(struct aead_request *req)
4417 +@@ -418,7 +424,7 @@ static void crypto_authenc_givencrypt_done(struct crypto_async_request *req,
4418 + err = crypto_authenc_genicv(areq, greq->giv, 0);
4419 + }
4420 +
4421 +- aead_request_complete(areq, err);
4422 ++ authenc_request_complete(areq, err);
4423 + }
4424 +
4425 + static int crypto_authenc_givencrypt(struct aead_givcrypt_request *req)
4426 +diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
4427 +index 7c85265..9ed9292 100644
4428 +--- a/drivers/acpi/sleep.c
4429 ++++ b/drivers/acpi/sleep.c
4430 +@@ -475,101 +475,13 @@ static struct dmi_system_id __initdata acpisleep_dmi_table[] = {
4431 + },
4432 + {
4433 + .callback = init_set_sci_en_on_resume,
4434 +- .ident = "Lenovo ThinkPad X201",
4435 ++ .ident = "Lenovo ThinkPad X201[s]",
4436 + .matches = {
4437 + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
4438 + DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad X201"),
4439 + },
4440 + },
4441 + {
4442 +- .callback = init_set_sci_en_on_resume,
4443 +- .ident = "Lenovo ThinkPad X201",
4444 +- .matches = {
4445 +- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
4446 +- DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad X201s"),
4447 +- },
4448 +- },
4449 +- {
4450 +- .callback = init_set_sci_en_on_resume,
4451 +- .ident = "Lenovo ThinkPad T410",
4452 +- .matches = {
4453 +- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
4454 +- DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T410"),
4455 +- },
4456 +- },
4457 +- {
4458 +- .callback = init_set_sci_en_on_resume,
4459 +- .ident = "Lenovo ThinkPad T510",
4460 +- .matches = {
4461 +- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
4462 +- DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T510"),
4463 +- },
4464 +- },
4465 +- {
4466 +- .callback = init_set_sci_en_on_resume,
4467 +- .ident = "Lenovo ThinkPad W510",
4468 +- .matches = {
4469 +- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
4470 +- DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad W510"),
4471 +- },
4472 +- },
4473 +- {
4474 +- .callback = init_set_sci_en_on_resume,
4475 +- .ident = "Lenovo ThinkPad X201",
4476 +- .matches = {
4477 +- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
4478 +- DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad X201"),
4479 +- },
4480 +- },
4481 +- {
4482 +- .callback = init_set_sci_en_on_resume,
4483 +- .ident = "Lenovo ThinkPad X201",
4484 +- .matches = {
4485 +- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
4486 +- DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad X201s"),
4487 +- },
4488 +- },
4489 +- {
4490 +- .callback = init_set_sci_en_on_resume,
4491 +- .ident = "Lenovo ThinkPad T410",
4492 +- .matches = {
4493 +- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
4494 +- DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T410"),
4495 +- },
4496 +- },
4497 +- {
4498 +- .callback = init_set_sci_en_on_resume,
4499 +- .ident = "Lenovo ThinkPad T510",
4500 +- .matches = {
4501 +- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
4502 +- DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T510"),
4503 +- },
4504 +- },
4505 +- {
4506 +- .callback = init_set_sci_en_on_resume,
4507 +- .ident = "Lenovo ThinkPad W510",
4508 +- .matches = {
4509 +- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
4510 +- DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad W510"),
4511 +- },
4512 +- },
4513 +- {
4514 +- .callback = init_set_sci_en_on_resume,
4515 +- .ident = "Lenovo ThinkPad X201",
4516 +- .matches = {
4517 +- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
4518 +- DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad X201"),
4519 +- },
4520 +- },
4521 +- {
4522 +- .callback = init_set_sci_en_on_resume,
4523 +- .ident = "Lenovo ThinkPad X201",
4524 +- .matches = {
4525 +- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
4526 +- DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad X201s"),
4527 +- },
4528 +- },
4529 +- {
4530 + .callback = init_old_suspend_ordering,
4531 + .ident = "Panasonic CF51-2L",
4532 + .matches = {
4533 +diff --git a/drivers/char/tty_io.c b/drivers/char/tty_io.c
4534 +index 76253cf..9af6766 100644
4535 +--- a/drivers/char/tty_io.c
4536 ++++ b/drivers/char/tty_io.c
4537 +@@ -1875,6 +1875,7 @@ got_driver:
4538 + */
4539 + if (filp->f_op == &hung_up_tty_fops)
4540 + filp->f_op = &tty_fops;
4541 ++ unlock_kernel();
4542 + goto retry_open;
4543 + }
4544 + unlock_kernel();
4545 +diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
4546 +index cf4cb3e..4746bfe 100644
4547 +--- a/drivers/gpu/drm/i915/i915_drv.c
4548 ++++ b/drivers/gpu/drm/i915/i915_drv.c
4549 +@@ -79,14 +79,14 @@ const static struct intel_device_info intel_i915g_info = {
4550 + .is_i915g = 1, .is_i9xx = 1, .cursor_needs_physical = 1,
4551 + };
4552 + const static struct intel_device_info intel_i915gm_info = {
4553 +- .is_i9xx = 1, .is_mobile = 1, .has_fbc = 1,
4554 ++ .is_i9xx = 1, .is_mobile = 1,
4555 + .cursor_needs_physical = 1,
4556 + };
4557 + const static struct intel_device_info intel_i945g_info = {
4558 + .is_i9xx = 1, .has_hotplug = 1, .cursor_needs_physical = 1,
4559 + };
4560 + const static struct intel_device_info intel_i945gm_info = {
4561 +- .is_i945gm = 1, .is_i9xx = 1, .is_mobile = 1, .has_fbc = 1,
4562 ++ .is_i945gm = 1, .is_i9xx = 1, .is_mobile = 1,
4563 + .has_hotplug = 1, .cursor_needs_physical = 1,
4564 + };
4565 +
4566 +diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
4567 +index 16ce3ba..0b33757 100644
4568 +--- a/drivers/gpu/drm/i915/i915_drv.h
4569 ++++ b/drivers/gpu/drm/i915/i915_drv.h
4570 +@@ -206,11 +206,14 @@ typedef struct drm_i915_private {
4571 +
4572 + drm_dma_handle_t *status_page_dmah;
4573 + void *hw_status_page;
4574 ++ void *seqno_page;
4575 + dma_addr_t dma_status_page;
4576 + uint32_t counter;
4577 + unsigned int status_gfx_addr;
4578 ++ unsigned int seqno_gfx_addr;
4579 + drm_local_map_t hws_map;
4580 + struct drm_gem_object *hws_obj;
4581 ++ struct drm_gem_object *seqno_obj;
4582 + struct drm_gem_object *pwrctx;
4583 +
4584 + struct resource mch_res;
4585 +@@ -1090,6 +1093,7 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
4586 +
4587 + #define HAS_PCH_SPLIT(dev) (IS_IRONLAKE(dev) || \
4588 + IS_GEN6(dev))
4589 ++#define HAS_PIPE_CONTROL(dev) (IS_IRONLAKE(dev) || IS_GEN6(dev))
4590 +
4591 + #define PRIMARY_RINGBUFFER_SIZE (128*1024)
4592 +
4593 +diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
4594 +index 6458400..c00c978 100644
4595 +--- a/drivers/gpu/drm/i915/i915_gem.c
4596 ++++ b/drivers/gpu/drm/i915/i915_gem.c
4597 +@@ -1559,6 +1559,13 @@ i915_gem_object_move_to_inactive(struct drm_gem_object *obj)
4598 + i915_verify_inactive(dev, __FILE__, __LINE__);
4599 + }
4600 +
4601 ++#define PIPE_CONTROL_FLUSH(addr) \
4602 ++ OUT_RING(GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE | \
4603 ++ PIPE_CONTROL_DEPTH_STALL); \
4604 ++ OUT_RING(addr | PIPE_CONTROL_GLOBAL_GTT); \
4605 ++ OUT_RING(0); \
4606 ++ OUT_RING(0); \
4607 ++
4608 + /**
4609 + * Creates a new sequence number, emitting a write of it to the status page
4610 + * plus an interrupt, which will trigger i915_user_interrupt_handler.
4611 +@@ -1593,13 +1600,47 @@ i915_add_request(struct drm_device *dev, struct drm_file *file_priv,
4612 + if (dev_priv->mm.next_gem_seqno == 0)
4613 + dev_priv->mm.next_gem_seqno++;
4614 +
4615 +- BEGIN_LP_RING(4);
4616 +- OUT_RING(MI_STORE_DWORD_INDEX);
4617 +- OUT_RING(I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
4618 +- OUT_RING(seqno);
4619 ++ if (HAS_PIPE_CONTROL(dev)) {
4620 ++ u32 scratch_addr = dev_priv->seqno_gfx_addr + 128;
4621 +
4622 +- OUT_RING(MI_USER_INTERRUPT);
4623 +- ADVANCE_LP_RING();
4624 ++ /*
4625 ++ * Workaround qword write incoherence by flushing the
4626 ++ * PIPE_NOTIFY buffers out to memory before requesting
4627 ++ * an interrupt.
4628 ++ */
4629 ++ BEGIN_LP_RING(32);
4630 ++ OUT_RING(GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE |
4631 ++ PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH);
4632 ++ OUT_RING(dev_priv->seqno_gfx_addr | PIPE_CONTROL_GLOBAL_GTT);
4633 ++ OUT_RING(seqno);
4634 ++ OUT_RING(0);
4635 ++ PIPE_CONTROL_FLUSH(scratch_addr);
4636 ++ scratch_addr += 128; /* write to separate cachelines */
4637 ++ PIPE_CONTROL_FLUSH(scratch_addr);
4638 ++ scratch_addr += 128;
4639 ++ PIPE_CONTROL_FLUSH(scratch_addr);
4640 ++ scratch_addr += 128;
4641 ++ PIPE_CONTROL_FLUSH(scratch_addr);
4642 ++ scratch_addr += 128;
4643 ++ PIPE_CONTROL_FLUSH(scratch_addr);
4644 ++ scratch_addr += 128;
4645 ++ PIPE_CONTROL_FLUSH(scratch_addr);
4646 ++ OUT_RING(GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE |
4647 ++ PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH |
4648 ++ PIPE_CONTROL_NOTIFY);
4649 ++ OUT_RING(dev_priv->seqno_gfx_addr | PIPE_CONTROL_GLOBAL_GTT);
4650 ++ OUT_RING(seqno);
4651 ++ OUT_RING(0);
4652 ++ ADVANCE_LP_RING();
4653 ++ } else {
4654 ++ BEGIN_LP_RING(4);
4655 ++ OUT_RING(MI_STORE_DWORD_INDEX);
4656 ++ OUT_RING(I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
4657 ++ OUT_RING(seqno);
4658 ++
4659 ++ OUT_RING(MI_USER_INTERRUPT);
4660 ++ ADVANCE_LP_RING();
4661 ++ }
4662 +
4663 + DRM_DEBUG_DRIVER("%d\n", seqno);
4664 +
4665 +@@ -1744,7 +1785,10 @@ i915_get_gem_seqno(struct drm_device *dev)
4666 + {
4667 + drm_i915_private_t *dev_priv = dev->dev_private;
4668 +
4669 +- return READ_HWSP(dev_priv, I915_GEM_HWS_INDEX);
4670 ++ if (HAS_PIPE_CONTROL(dev))
4671 ++ return ((volatile u32 *)(dev_priv->seqno_page))[0];
4672 ++ else
4673 ++ return READ_HWSP(dev_priv, I915_GEM_HWS_INDEX);
4674 + }
4675 +
4676 + /**
4677 +@@ -4576,6 +4620,49 @@ i915_gem_idle(struct drm_device *dev)
4678 + return 0;
4679 + }
4680 +
4681 ++/*
4682 ++ * 965+ support PIPE_CONTROL commands, which provide finer grained control
4683 ++ * over cache flushing.
4684 ++ */
4685 ++static int
4686 ++i915_gem_init_pipe_control(struct drm_device *dev)
4687 ++{
4688 ++ drm_i915_private_t *dev_priv = dev->dev_private;
4689 ++ struct drm_gem_object *obj;
4690 ++ struct drm_i915_gem_object *obj_priv;
4691 ++ int ret;
4692 ++
4693 ++ obj = drm_gem_object_alloc(dev, 4096);
4694 ++ if (obj == NULL) {
4695 ++ DRM_ERROR("Failed to allocate seqno page\n");
4696 ++ ret = -ENOMEM;
4697 ++ goto err;
4698 ++ }
4699 ++ obj_priv = obj->driver_private;
4700 ++ obj_priv->agp_type = AGP_USER_CACHED_MEMORY;
4701 ++
4702 ++ ret = i915_gem_object_pin(obj, 4096);
4703 ++ if (ret)
4704 ++ goto err_unref;
4705 ++
4706 ++ dev_priv->seqno_gfx_addr = obj_priv->gtt_offset;
4707 ++ dev_priv->seqno_page = kmap(obj_priv->pages[0]);
4708 ++ if (dev_priv->seqno_page == NULL)
4709 ++ goto err_unpin;
4710 ++
4711 ++ dev_priv->seqno_obj = obj;
4712 ++ memset(dev_priv->seqno_page, 0, PAGE_SIZE);
4713 ++
4714 ++ return 0;
4715 ++
4716 ++err_unpin:
4717 ++ i915_gem_object_unpin(obj);
4718 ++err_unref:
4719 ++ drm_gem_object_unreference(obj);
4720 ++err:
4721 ++ return ret;
4722 ++}
4723 ++
4724 + static int
4725 + i915_gem_init_hws(struct drm_device *dev)
4726 + {
4727 +@@ -4593,7 +4680,8 @@ i915_gem_init_hws(struct drm_device *dev)
4728 + obj = drm_gem_object_alloc(dev, 4096);
4729 + if (obj == NULL) {
4730 + DRM_ERROR("Failed to allocate status page\n");
4731 +- return -ENOMEM;
4732 ++ ret = -ENOMEM;
4733 ++ goto err;
4734 + }
4735 + obj_priv = obj->driver_private;
4736 + obj_priv->agp_type = AGP_USER_CACHED_MEMORY;
4737 +@@ -4601,7 +4689,7 @@ i915_gem_init_hws(struct drm_device *dev)
4738 + ret = i915_gem_object_pin(obj, 4096);
4739 + if (ret != 0) {
4740 + drm_gem_object_unreference(obj);
4741 +- return ret;
4742 ++ goto err_unref;
4743 + }
4744 +
4745 + dev_priv->status_gfx_addr = obj_priv->gtt_offset;
4746 +@@ -4610,10 +4698,16 @@ i915_gem_init_hws(struct drm_device *dev)
4747 + if (dev_priv->hw_status_page == NULL) {
4748 + DRM_ERROR("Failed to map status page.\n");
4749 + memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
4750 +- i915_gem_object_unpin(obj);
4751 +- drm_gem_object_unreference(obj);
4752 +- return -EINVAL;
4753 ++ ret = -EINVAL;
4754 ++ goto err_unpin;
4755 + }
4756 ++
4757 ++ if (HAS_PIPE_CONTROL(dev)) {
4758 ++ ret = i915_gem_init_pipe_control(dev);
4759 ++ if (ret)
4760 ++ goto err_unpin;
4761 ++ }
4762 ++
4763 + dev_priv->hws_obj = obj;
4764 + memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
4765 + I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr);
4766 +@@ -4621,6 +4715,30 @@ i915_gem_init_hws(struct drm_device *dev)
4767 + DRM_DEBUG_DRIVER("hws offset: 0x%08x\n", dev_priv->status_gfx_addr);
4768 +
4769 + return 0;
4770 ++
4771 ++err_unpin:
4772 ++ i915_gem_object_unpin(obj);
4773 ++err_unref:
4774 ++ drm_gem_object_unreference(obj);
4775 ++err:
4776 ++ return 0;
4777 ++}
4778 ++
4779 ++static void
4780 ++i915_gem_cleanup_pipe_control(struct drm_device *dev)
4781 ++{
4782 ++ drm_i915_private_t *dev_priv = dev->dev_private;
4783 ++ struct drm_gem_object *obj;
4784 ++ struct drm_i915_gem_object *obj_priv;
4785 ++
4786 ++ obj = dev_priv->seqno_obj;
4787 ++ obj_priv = obj->driver_private;
4788 ++ kunmap(obj_priv->pages[0]);
4789 ++ i915_gem_object_unpin(obj);
4790 ++ drm_gem_object_unreference(obj);
4791 ++ dev_priv->seqno_obj = NULL;
4792 ++
4793 ++ dev_priv->seqno_page = NULL;
4794 + }
4795 +
4796 + static void
4797 +@@ -4644,6 +4762,9 @@ i915_gem_cleanup_hws(struct drm_device *dev)
4798 + memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
4799 + dev_priv->hw_status_page = NULL;
4800 +
4801 ++ if (HAS_PIPE_CONTROL(dev))
4802 ++ i915_gem_cleanup_pipe_control(dev);
4803 ++
4804 + /* Write high address into HWS_PGA when disabling. */
4805 + I915_WRITE(HWS_PGA, 0x1ffff000);
4806 + }
4807 +diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
4808 +index 032f667..d6466d5 100644
4809 +--- a/drivers/gpu/drm/i915/i915_irq.c
4810 ++++ b/drivers/gpu/drm/i915/i915_irq.c
4811 +@@ -297,7 +297,7 @@ irqreturn_t ironlake_irq_handler(struct drm_device *dev)
4812 + READ_BREADCRUMB(dev_priv);
4813 + }
4814 +
4815 +- if (gt_iir & GT_USER_INTERRUPT) {
4816 ++ if (gt_iir & GT_PIPE_NOTIFY) {
4817 + u32 seqno = i915_get_gem_seqno(dev);
4818 + dev_priv->mm.irq_gem_seqno = seqno;
4819 + trace_i915_gem_request_complete(dev, seqno);
4820 +@@ -738,7 +738,7 @@ void i915_user_irq_get(struct drm_device *dev)
4821 + spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
4822 + if (dev->irq_enabled && (++dev_priv->user_irq_refcount == 1)) {
4823 + if (HAS_PCH_SPLIT(dev))
4824 +- ironlake_enable_graphics_irq(dev_priv, GT_USER_INTERRUPT);
4825 ++ ironlake_enable_graphics_irq(dev_priv, GT_PIPE_NOTIFY);
4826 + else
4827 + i915_enable_irq(dev_priv, I915_USER_INTERRUPT);
4828 + }
4829 +@@ -754,7 +754,7 @@ void i915_user_irq_put(struct drm_device *dev)
4830 + BUG_ON(dev->irq_enabled && dev_priv->user_irq_refcount <= 0);
4831 + if (dev->irq_enabled && (--dev_priv->user_irq_refcount == 0)) {
4832 + if (HAS_PCH_SPLIT(dev))
4833 +- ironlake_disable_graphics_irq(dev_priv, GT_USER_INTERRUPT);
4834 ++ ironlake_disable_graphics_irq(dev_priv, GT_PIPE_NOTIFY);
4835 + else
4836 + i915_disable_irq(dev_priv, I915_USER_INTERRUPT);
4837 + }
4838 +@@ -1034,7 +1034,7 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
4839 + /* enable kind of interrupts always enabled */
4840 + u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
4841 + DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE;
4842 +- u32 render_mask = GT_USER_INTERRUPT;
4843 ++ u32 render_mask = GT_PIPE_NOTIFY;
4844 + u32 hotplug_mask = SDE_CRT_HOTPLUG | SDE_PORTB_HOTPLUG |
4845 + SDE_PORTC_HOTPLUG | SDE_PORTD_HOTPLUG;
4846 +
4847 +diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
4848 +index fd95bdf..30a2322 100644
4849 +--- a/drivers/gpu/drm/i915/i915_reg.h
4850 ++++ b/drivers/gpu/drm/i915/i915_reg.h
4851 +@@ -210,6 +210,16 @@
4852 + #define ASYNC_FLIP (1<<22)
4853 + #define DISPLAY_PLANE_A (0<<20)
4854 + #define DISPLAY_PLANE_B (1<<20)
4855 ++#define GFX_OP_PIPE_CONTROL ((0x3<<29)|(0x3<<27)|(0x2<<24)|2)
4856 ++#define PIPE_CONTROL_QW_WRITE (1<<14)
4857 ++#define PIPE_CONTROL_DEPTH_STALL (1<<13)
4858 ++#define PIPE_CONTROL_WC_FLUSH (1<<12)
4859 ++#define PIPE_CONTROL_IS_FLUSH (1<<11) /* MBZ on Ironlake */
4860 ++#define PIPE_CONTROL_TC_FLUSH (1<<10) /* GM45+ only */
4861 ++#define PIPE_CONTROL_ISP_DIS (1<<9)
4862 ++#define PIPE_CONTROL_NOTIFY (1<<8)
4863 ++#define PIPE_CONTROL_GLOBAL_GTT (1<<2) /* in addr dword */
4864 ++#define PIPE_CONTROL_STALL_EN (1<<1) /* in addr word, Ironlake+ only */
4865 +
4866 + /*
4867 + * Fence registers
4868 +@@ -2111,6 +2121,7 @@
4869 + #define DEIER 0x4400c
4870 +
4871 + /* GT interrupt */
4872 ++#define GT_PIPE_NOTIFY (1 << 4)
4873 + #define GT_SYNC_STATUS (1 << 2)
4874 + #define GT_USER_INTERRUPT (1 << 0)
4875 +
4876 +diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
4877 +index 4b2458d..3f00902 100644
4878 +--- a/drivers/gpu/drm/i915/intel_display.c
4879 ++++ b/drivers/gpu/drm/i915/intel_display.c
4880 +@@ -4683,7 +4683,7 @@ static void intel_init_display(struct drm_device *dev)
4881 + dev_priv->display.fbc_enabled = g4x_fbc_enabled;
4882 + dev_priv->display.enable_fbc = g4x_enable_fbc;
4883 + dev_priv->display.disable_fbc = g4x_disable_fbc;
4884 +- } else if (IS_I965GM(dev) || IS_I945GM(dev) || IS_I915GM(dev)) {
4885 ++ } else if (IS_I965GM(dev)) {
4886 + dev_priv->display.fbc_enabled = i8xx_fbc_enabled;
4887 + dev_priv->display.enable_fbc = i8xx_enable_fbc;
4888 + dev_priv->display.disable_fbc = i8xx_disable_fbc;
4889 +diff --git a/drivers/hwmon/hp_accel.c b/drivers/hwmon/hp_accel.c
4890 +index be475e8..f16d60f 100644
4891 +--- a/drivers/hwmon/hp_accel.c
4892 ++++ b/drivers/hwmon/hp_accel.c
4893 +@@ -324,8 +324,8 @@ static int lis3lv02d_remove(struct acpi_device *device, int type)
4894 + lis3lv02d_joystick_disable();
4895 + lis3lv02d_poweroff(&lis3_dev);
4896 +
4897 +- flush_work(&hpled_led.work);
4898 + led_classdev_unregister(&hpled_led.led_classdev);
4899 ++ flush_work(&hpled_led.work);
4900 +
4901 + return lis3lv02d_remove_fs(&lis3_dev);
4902 + }
4903 +diff --git a/drivers/mmc/host/atmel-mci.c b/drivers/mmc/host/atmel-mci.c
4904 +index 8072128..ee337df 100644
4905 +--- a/drivers/mmc/host/atmel-mci.c
4906 ++++ b/drivers/mmc/host/atmel-mci.c
4907 +@@ -568,9 +568,10 @@ static void atmci_dma_cleanup(struct atmel_mci *host)
4908 + {
4909 + struct mmc_data *data = host->data;
4910 +
4911 +- dma_unmap_sg(&host->pdev->dev, data->sg, data->sg_len,
4912 +- ((data->flags & MMC_DATA_WRITE)
4913 +- ? DMA_TO_DEVICE : DMA_FROM_DEVICE));
4914 ++ if (data)
4915 ++ dma_unmap_sg(&host->pdev->dev, data->sg, data->sg_len,
4916 ++ ((data->flags & MMC_DATA_WRITE)
4917 ++ ? DMA_TO_DEVICE : DMA_FROM_DEVICE));
4918 + }
4919 +
4920 + static void atmci_stop_dma(struct atmel_mci *host)
4921 +@@ -1098,8 +1099,8 @@ static void atmci_command_complete(struct atmel_mci *host,
4922 + "command error: status=0x%08x\n", status);
4923 +
4924 + if (cmd->data) {
4925 +- host->data = NULL;
4926 + atmci_stop_dma(host);
4927 ++ host->data = NULL;
4928 + mci_writel(host, IDR, MCI_NOTBUSY
4929 + | MCI_TXRDY | MCI_RXRDY
4930 + | ATMCI_DATA_ERROR_FLAGS);
4931 +@@ -1292,6 +1293,7 @@ static void atmci_tasklet_func(unsigned long priv)
4932 + } else {
4933 + data->bytes_xfered = data->blocks * data->blksz;
4934 + data->error = 0;
4935 ++ mci_writel(host, IDR, ATMCI_DATA_ERROR_FLAGS);
4936 + }
4937 +
4938 + if (!data->stop) {
4939 +@@ -1750,13 +1752,13 @@ static int __init atmci_probe(struct platform_device *pdev)
4940 + ret = -ENODEV;
4941 + if (pdata->slot[0].bus_width) {
4942 + ret = atmci_init_slot(host, &pdata->slot[0],
4943 +- MCI_SDCSEL_SLOT_A, 0);
4944 ++ 0, MCI_SDCSEL_SLOT_A);
4945 + if (!ret)
4946 + nr_slots++;
4947 + }
4948 + if (pdata->slot[1].bus_width) {
4949 + ret = atmci_init_slot(host, &pdata->slot[1],
4950 +- MCI_SDCSEL_SLOT_B, 1);
4951 ++ 1, MCI_SDCSEL_SLOT_B);
4952 + if (!ret)
4953 + nr_slots++;
4954 + }
4955 +diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
4956 +index c3ce920..8b7c267 100644
4957 +--- a/drivers/net/wireless/ath/ath9k/xmit.c
4958 ++++ b/drivers/net/wireless/ath/ath9k/xmit.c
4959 +@@ -2244,7 +2244,7 @@ void ath_tx_node_cleanup(struct ath_softc *sc, struct ath_node *an)
4960 + if (ATH_TXQ_SETUP(sc, i)) {
4961 + txq = &sc->tx.txq[i];
4962 +
4963 +- spin_lock_bh(&txq->axq_lock);
4964 ++ spin_lock(&txq->axq_lock);
4965 +
4966 + list_for_each_entry_safe(ac,
4967 + ac_tmp, &txq->axq_acq, list) {
4968 +@@ -2265,7 +2265,7 @@ void ath_tx_node_cleanup(struct ath_softc *sc, struct ath_node *an)
4969 + }
4970 + }
4971 +
4972 +- spin_unlock_bh(&txq->axq_lock);
4973 ++ spin_unlock(&txq->axq_lock);
4974 + }
4975 + }
4976 + }
4977 +diff --git a/drivers/net/wireless/iwlwifi/iwl-4965.c b/drivers/net/wireless/iwlwifi/iwl-4965.c
4978 +index 3b4c5a4..82c1d2e 100644
4979 +--- a/drivers/net/wireless/iwlwifi/iwl-4965.c
4980 ++++ b/drivers/net/wireless/iwlwifi/iwl-4965.c
4981 +@@ -581,6 +581,11 @@ static int iwl4965_alive_notify(struct iwl_priv *priv)
4982 +
4983 + iwl4965_set_wr_ptrs(priv, IWL_CMD_QUEUE_NUM, 0);
4984 +
4985 ++ /* make sure all queue are not stopped */
4986 ++ memset(&priv->queue_stopped[0], 0, sizeof(priv->queue_stopped));
4987 ++ for (i = 0; i < 4; i++)
4988 ++ atomic_set(&priv->queue_stop_count[i], 0);
4989 ++
4990 + /* reset to 0 to enable all the queue first */
4991 + priv->txq_ctx_active_msk = 0;
4992 + /* Map each Tx/cmd queue to its corresponding fifo */
4993 +diff --git a/drivers/net/wireless/iwlwifi/iwl-5000.c b/drivers/net/wireless/iwlwifi/iwl-5000.c
4994 +index c610e5f..f7d41c7 100644
4995 +--- a/drivers/net/wireless/iwlwifi/iwl-5000.c
4996 ++++ b/drivers/net/wireless/iwlwifi/iwl-5000.c
4997 +@@ -657,6 +657,11 @@ int iwl5000_alive_notify(struct iwl_priv *priv)
4998 +
4999 + iwl5000_set_wr_ptrs(priv, IWL_CMD_QUEUE_NUM, 0);
5000 +
5001 ++ /* make sure all queue are not stopped */
5002 ++ memset(&priv->queue_stopped[0], 0, sizeof(priv->queue_stopped));
5003 ++ for (i = 0; i < 4; i++)
5004 ++ atomic_set(&priv->queue_stop_count[i], 0);
5005 ++
5006 + /* reset to 0 to enable all the queue first */
5007 + priv->txq_ctx_active_msk = 0;
5008 + /* map qos queues to fifos one-to-one */
5009 +diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
5010 +index b93e491..3534d86 100644
5011 +--- a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
5012 ++++ b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
5013 +@@ -298,10 +298,23 @@ static void rs_tl_turn_on_agg_for_tid(struct iwl_priv *priv,
5014 + struct iwl_lq_sta *lq_data, u8 tid,
5015 + struct ieee80211_sta *sta)
5016 + {
5017 ++ int ret;
5018 ++
5019 + if (rs_tl_get_load(lq_data, tid) > IWL_AGG_LOAD_THRESHOLD) {
5020 + IWL_DEBUG_HT(priv, "Starting Tx agg: STA: %pM tid: %d\n",
5021 + sta->addr, tid);
5022 +- ieee80211_start_tx_ba_session(sta, tid);
5023 ++ ret = ieee80211_start_tx_ba_session(sta, tid);
5024 ++ if (ret == -EAGAIN) {
5025 ++ /*
5026 ++ * driver and mac80211 is out of sync
5027 ++ * this might be cause by reloading firmware
5028 ++ * stop the tx ba session here
5029 ++ */
5030 ++ IWL_DEBUG_HT(priv, "Fail start Tx agg on tid: %d\n",
5031 ++ tid);
5032 ++ ret = ieee80211_stop_tx_ba_session(sta, tid,
5033 ++ WLAN_BACK_INITIATOR);
5034 ++ }
5035 + }
5036 + }
5037 +
5038 +diff --git a/drivers/net/wireless/iwlwifi/iwl-tx.c b/drivers/net/wireless/iwlwifi/iwl-tx.c
5039 +index 88470fb..e0ce039 100644
5040 +--- a/drivers/net/wireless/iwlwifi/iwl-tx.c
5041 ++++ b/drivers/net/wireless/iwlwifi/iwl-tx.c
5042 +@@ -821,8 +821,10 @@ int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
5043 + hdr->seq_ctrl |= cpu_to_le16(seq_number);
5044 + seq_number += 0x10;
5045 + /* aggregation is on for this <sta,tid> */
5046 +- if (info->flags & IEEE80211_TX_CTL_AMPDU)
5047 ++ if (info->flags & IEEE80211_TX_CTL_AMPDU &&
5048 ++ priv->stations[sta_id].tid[tid].agg.state == IWL_AGG_ON) {
5049 + txq_id = priv->stations[sta_id].tid[tid].agg.txq_id;
5050 ++ }
5051 + }
5052 +
5053 + txq = &priv->txq[txq_id];
5054 +@@ -1347,7 +1349,7 @@ int iwl_tx_agg_stop(struct iwl_priv *priv , const u8 *ra, u16 tid)
5055 + {
5056 + int tx_fifo_id, txq_id, sta_id, ssn = -1;
5057 + struct iwl_tid_data *tid_data;
5058 +- int ret, write_ptr, read_ptr;
5059 ++ int write_ptr, read_ptr;
5060 + unsigned long flags;
5061 +
5062 + if (!ra) {
5063 +@@ -1399,13 +1401,17 @@ int iwl_tx_agg_stop(struct iwl_priv *priv , const u8 *ra, u16 tid)
5064 + priv->stations[sta_id].tid[tid].agg.state = IWL_AGG_OFF;
5065 +
5066 + spin_lock_irqsave(&priv->lock, flags);
5067 +- ret = priv->cfg->ops->lib->txq_agg_disable(priv, txq_id, ssn,
5068 ++ /*
5069 ++ * the only reason this call can fail is queue number out of range,
5070 ++ * which can happen if uCode is reloaded and all the station
5071 ++ * information are lost. if it is outside the range, there is no need
5072 ++ * to deactivate the uCode queue, just return "success" to allow
5073 ++ * mac80211 to clean up it own data.
5074 ++ */
5075 ++ priv->cfg->ops->lib->txq_agg_disable(priv, txq_id, ssn,
5076 + tx_fifo_id);
5077 + spin_unlock_irqrestore(&priv->lock, flags);
5078 +
5079 +- if (ret)
5080 +- return ret;
5081 +-
5082 + ieee80211_stop_tx_ba_cb_irqsafe(priv->vif, ra, tid);
5083 +
5084 + return 0;
5085 +diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
5086 +index 5905936..e4bd795 100644
5087 +--- a/drivers/s390/block/dasd.c
5088 ++++ b/drivers/s390/block/dasd.c
5089 +@@ -35,6 +35,9 @@
5090 + */
5091 + #define DASD_CHANQ_MAX_SIZE 4
5092 +
5093 ++#define DASD_SLEEPON_START_TAG (void *) 1
5094 ++#define DASD_SLEEPON_END_TAG (void *) 2
5095 ++
5096 + /*
5097 + * SECTION: exported variables of dasd.c
5098 + */
5099 +@@ -1460,7 +1463,10 @@ void dasd_add_request_tail(struct dasd_ccw_req *cqr)
5100 + */
5101 + static void dasd_wakeup_cb(struct dasd_ccw_req *cqr, void *data)
5102 + {
5103 +- wake_up((wait_queue_head_t *) data);
5104 ++ spin_lock_irq(get_ccwdev_lock(cqr->startdev->cdev));
5105 ++ cqr->callback_data = DASD_SLEEPON_END_TAG;
5106 ++ spin_unlock_irq(get_ccwdev_lock(cqr->startdev->cdev));
5107 ++ wake_up(&generic_waitq);
5108 + }
5109 +
5110 + static inline int _wait_for_wakeup(struct dasd_ccw_req *cqr)
5111 +@@ -1470,10 +1476,7 @@ static inline int _wait_for_wakeup(struct dasd_ccw_req *cqr)
5112 +
5113 + device = cqr->startdev;
5114 + spin_lock_irq(get_ccwdev_lock(device->cdev));
5115 +- rc = ((cqr->status == DASD_CQR_DONE ||
5116 +- cqr->status == DASD_CQR_NEED_ERP ||
5117 +- cqr->status == DASD_CQR_TERMINATED) &&
5118 +- list_empty(&cqr->devlist));
5119 ++ rc = (cqr->callback_data == DASD_SLEEPON_END_TAG);
5120 + spin_unlock_irq(get_ccwdev_lock(device->cdev));
5121 + return rc;
5122 + }
5123 +@@ -1561,7 +1564,7 @@ static int _dasd_sleep_on(struct dasd_ccw_req *maincqr, int interruptible)
5124 + wait_event(generic_waitq, !(device->stopped));
5125 +
5126 + cqr->callback = dasd_wakeup_cb;
5127 +- cqr->callback_data = (void *) &generic_waitq;
5128 ++ cqr->callback_data = DASD_SLEEPON_START_TAG;
5129 + dasd_add_request_tail(cqr);
5130 + if (interruptible) {
5131 + rc = wait_event_interruptible(
5132 +@@ -1640,7 +1643,7 @@ int dasd_sleep_on_immediatly(struct dasd_ccw_req *cqr)
5133 + }
5134 +
5135 + cqr->callback = dasd_wakeup_cb;
5136 +- cqr->callback_data = (void *) &generic_waitq;
5137 ++ cqr->callback_data = DASD_SLEEPON_START_TAG;
5138 + cqr->status = DASD_CQR_QUEUED;
5139 + list_add(&cqr->devlist, &device->ccw_queue);
5140 +
5141 +diff --git a/drivers/serial/imx.c b/drivers/serial/imx.c
5142 +index d00fcf8..fd6b135 100644
5143 +--- a/drivers/serial/imx.c
5144 ++++ b/drivers/serial/imx.c
5145 +@@ -119,7 +119,8 @@
5146 + #define MX2_UCR3_RXDMUXSEL (1<<2) /* RXD Muxed Input Select, on mx2/mx3 */
5147 + #define UCR3_INVT (1<<1) /* Inverted Infrared transmission */
5148 + #define UCR3_BPEN (1<<0) /* Preset registers enable */
5149 +-#define UCR4_CTSTL_32 (32<<10) /* CTS trigger level (32 chars) */
5150 ++#define UCR4_CTSTL_SHF 10 /* CTS trigger level shift */
5151 ++#define UCR4_CTSTL_MASK 0x3F /* CTS trigger is 6 bits wide */
5152 + #define UCR4_INVR (1<<9) /* Inverted infrared reception */
5153 + #define UCR4_ENIRI (1<<8) /* Serial infrared interrupt enable */
5154 + #define UCR4_WKEN (1<<7) /* Wake interrupt enable */
5155 +@@ -590,6 +591,9 @@ static int imx_setup_ufcr(struct imx_port *sport, unsigned int mode)
5156 + return 0;
5157 + }
5158 +
5159 ++/* half the RX buffer size */
5160 ++#define CTSTL 16
5161 ++
5162 + static int imx_startup(struct uart_port *port)
5163 + {
5164 + struct imx_port *sport = (struct imx_port *)port;
5165 +@@ -606,6 +610,10 @@ static int imx_startup(struct uart_port *port)
5166 + if (USE_IRDA(sport))
5167 + temp |= UCR4_IRSC;
5168 +
5169 ++ /* set the trigger level for CTS */
5170 ++ temp &= ~(UCR4_CTSTL_MASK<< UCR4_CTSTL_SHF);
5171 ++ temp |= CTSTL<< UCR4_CTSTL_SHF;
5172 ++
5173 + writel(temp & ~UCR4_DREN, sport->port.membase + UCR4);
5174 +
5175 + if (USE_IRDA(sport)) {
5176 +diff --git a/drivers/video/bfin-t350mcqb-fb.c b/drivers/video/bfin-t350mcqb-fb.c
5177 +index 2549c53..6c8b6b6 100644
5178 +--- a/drivers/video/bfin-t350mcqb-fb.c
5179 ++++ b/drivers/video/bfin-t350mcqb-fb.c
5180 +@@ -515,9 +515,9 @@ static int __devinit bfin_t350mcqb_probe(struct platform_device *pdev)
5181 + fbinfo->fbops = &bfin_t350mcqb_fb_ops;
5182 + fbinfo->flags = FBINFO_FLAG_DEFAULT;
5183 +
5184 +- info->fb_buffer =
5185 +- dma_alloc_coherent(NULL, fbinfo->fix.smem_len, &info->dma_handle,
5186 +- GFP_KERNEL);
5187 ++ info->fb_buffer = dma_alloc_coherent(NULL, fbinfo->fix.smem_len +
5188 ++ ACTIVE_VIDEO_MEM_OFFSET,
5189 ++ &info->dma_handle, GFP_KERNEL);
5190 +
5191 + if (NULL == info->fb_buffer) {
5192 + printk(KERN_ERR DRIVER_NAME
5193 +@@ -587,8 +587,8 @@ out7:
5194 + out6:
5195 + fb_dealloc_cmap(&fbinfo->cmap);
5196 + out4:
5197 +- dma_free_coherent(NULL, fbinfo->fix.smem_len, info->fb_buffer,
5198 +- info->dma_handle);
5199 ++ dma_free_coherent(NULL, fbinfo->fix.smem_len + ACTIVE_VIDEO_MEM_OFFSET,
5200 ++ info->fb_buffer, info->dma_handle);
5201 + out3:
5202 + framebuffer_release(fbinfo);
5203 + out2:
5204 +@@ -611,8 +611,9 @@ static int __devexit bfin_t350mcqb_remove(struct platform_device *pdev)
5205 + free_irq(info->irq, info);
5206 +
5207 + if (info->fb_buffer != NULL)
5208 +- dma_free_coherent(NULL, fbinfo->fix.smem_len, info->fb_buffer,
5209 +- info->dma_handle);
5210 ++ dma_free_coherent(NULL, fbinfo->fix.smem_len +
5211 ++ ACTIVE_VIDEO_MEM_OFFSET, info->fb_buffer,
5212 ++ info->dma_handle);
5213 +
5214 + fb_dealloc_cmap(&fbinfo->cmap);
5215 +
5216 +diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
5217 +index 645a179..2c6ee6a 100644
5218 +--- a/fs/btrfs/ioctl.c
5219 ++++ b/fs/btrfs/ioctl.c
5220 +@@ -964,12 +964,17 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd,
5221 + ret = -EBADF;
5222 + goto out_drop_write;
5223 + }
5224 ++
5225 + src = src_file->f_dentry->d_inode;
5226 +
5227 + ret = -EINVAL;
5228 + if (src == inode)
5229 + goto out_fput;
5230 +
5231 ++ /* the src must be open for reading */
5232 ++ if (!(src_file->f_mode & FMODE_READ))
5233 ++ goto out_fput;
5234 ++
5235 + ret = -EISDIR;
5236 + if (S_ISDIR(src->i_mode) || S_ISDIR(inode->i_mode))
5237 + goto out_fput;
5238 +diff --git a/fs/cachefiles/security.c b/fs/cachefiles/security.c
5239 +index b5808cd..039b501 100644
5240 +--- a/fs/cachefiles/security.c
5241 ++++ b/fs/cachefiles/security.c
5242 +@@ -77,6 +77,8 @@ static int cachefiles_check_cache_dir(struct cachefiles_cache *cache,
5243 + /*
5244 + * check the security details of the on-disk cache
5245 + * - must be called with security override in force
5246 ++ * - must return with a security override in force - even in the case of an
5247 ++ * error
5248 + */
5249 + int cachefiles_determine_cache_security(struct cachefiles_cache *cache,
5250 + struct dentry *root,
5251 +@@ -99,6 +101,8 @@ int cachefiles_determine_cache_security(struct cachefiles_cache *cache,
5252 + * which create files */
5253 + ret = set_create_files_as(new, root->d_inode);
5254 + if (ret < 0) {
5255 ++ abort_creds(new);
5256 ++ cachefiles_begin_secure(cache, _saved_cred);
5257 + _leave(" = %d [cfa]", ret);
5258 + return ret;
5259 + }
5260 +diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
5261 +index ed751bb..2568889 100644
5262 +--- a/fs/cifs/cifsglob.h
5263 ++++ b/fs/cifs/cifsglob.h
5264 +@@ -500,6 +500,7 @@ struct dfs_info3_param {
5265 + #define CIFS_FATTR_DFS_REFERRAL 0x1
5266 + #define CIFS_FATTR_DELETE_PENDING 0x2
5267 + #define CIFS_FATTR_NEED_REVAL 0x4
5268 ++#define CIFS_FATTR_INO_COLLISION 0x8
5269 +
5270 + struct cifs_fattr {
5271 + u32 cf_flags;
5272 +diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
5273 +index e3fda97..7ec8555 100644
5274 +--- a/fs/cifs/inode.c
5275 ++++ b/fs/cifs/inode.c
5276 +@@ -610,6 +610,16 @@ cifs_find_inode(struct inode *inode, void *opaque)
5277 + if (CIFS_I(inode)->uniqueid != fattr->cf_uniqueid)
5278 + return 0;
5279 +
5280 ++ /*
5281 ++ * uh oh -- it's a directory. We can't use it since hardlinked dirs are
5282 ++ * verboten. Disable serverino and return it as if it were found, the
5283 ++ * caller can discard it, generate a uniqueid and retry the find
5284 ++ */
5285 ++ if (S_ISDIR(inode->i_mode) && !list_empty(&inode->i_dentry)) {
5286 ++ fattr->cf_flags |= CIFS_FATTR_INO_COLLISION;
5287 ++ cifs_autodisable_serverino(CIFS_SB(inode->i_sb));
5288 ++ }
5289 ++
5290 + return 1;
5291 + }
5292 +
5293 +@@ -629,15 +639,22 @@ cifs_iget(struct super_block *sb, struct cifs_fattr *fattr)
5294 + unsigned long hash;
5295 + struct inode *inode;
5296 +
5297 ++retry_iget5_locked:
5298 + cFYI(1, ("looking for uniqueid=%llu", fattr->cf_uniqueid));
5299 +
5300 + /* hash down to 32-bits on 32-bit arch */
5301 + hash = cifs_uniqueid_to_ino_t(fattr->cf_uniqueid);
5302 +
5303 + inode = iget5_locked(sb, hash, cifs_find_inode, cifs_init_inode, fattr);
5304 +-
5305 +- /* we have fattrs in hand, update the inode */
5306 + if (inode) {
5307 ++ /* was there a problematic inode number collision? */
5308 ++ if (fattr->cf_flags & CIFS_FATTR_INO_COLLISION) {
5309 ++ iput(inode);
5310 ++ fattr->cf_uniqueid = iunique(sb, ROOT_I);
5311 ++ fattr->cf_flags &= ~CIFS_FATTR_INO_COLLISION;
5312 ++ goto retry_iget5_locked;
5313 ++ }
5314 ++
5315 + cifs_fattr_to_inode(inode, fattr);
5316 + if (sb->s_flags & MS_NOATIME)
5317 + inode->i_flags |= S_NOATIME | S_NOCMTIME;
5318 +diff --git a/fs/compat.c b/fs/compat.c
5319 +index 00d90c2..514b623 100644
5320 +--- a/fs/compat.c
5321 ++++ b/fs/compat.c
5322 +@@ -1530,8 +1530,6 @@ int compat_do_execve(char * filename,
5323 + if (retval < 0)
5324 + goto out;
5325 +
5326 +- current->stack_start = current->mm->start_stack;
5327 +-
5328 + /* execve succeeded */
5329 + current->fs->in_exec = 0;
5330 + current->in_execve = 0;
5331 +diff --git a/fs/exec.c b/fs/exec.c
5332 +index 9071360..332f781 100644
5333 +--- a/fs/exec.c
5334 ++++ b/fs/exec.c
5335 +@@ -1386,8 +1386,6 @@ int do_execve(char * filename,
5336 + if (retval < 0)
5337 + goto out;
5338 +
5339 +- current->stack_start = current->mm->start_stack;
5340 +-
5341 + /* execve succeeded */
5342 + current->fs->in_exec = 0;
5343 + current->in_execve = 0;
5344 +diff --git a/fs/nilfs2/super.c b/fs/nilfs2/super.c
5345 +index 8173fae..4d3ddcc 100644
5346 +--- a/fs/nilfs2/super.c
5347 ++++ b/fs/nilfs2/super.c
5348 +@@ -746,6 +746,7 @@ nilfs_fill_super(struct super_block *sb, void *data, int silent,
5349 + sb->s_export_op = &nilfs_export_ops;
5350 + sb->s_root = NULL;
5351 + sb->s_time_gran = 1;
5352 ++ sb->s_bdi = nilfs->ns_bdi;
5353 +
5354 + err = load_nilfs(nilfs, sbi);
5355 + if (err)
5356 +diff --git a/fs/notify/inotify/inotify_fsnotify.c b/fs/notify/inotify/inotify_fsnotify.c
5357 +index 1afb0a1..e27960c 100644
5358 +--- a/fs/notify/inotify/inotify_fsnotify.c
5359 ++++ b/fs/notify/inotify/inotify_fsnotify.c
5360 +@@ -28,6 +28,7 @@
5361 + #include <linux/path.h> /* struct path */
5362 + #include <linux/slab.h> /* kmem_* */
5363 + #include <linux/types.h>
5364 ++#include <linux/sched.h>
5365 +
5366 + #include "inotify.h"
5367 +
5368 +@@ -146,6 +147,7 @@ static void inotify_free_group_priv(struct fsnotify_group *group)
5369 + idr_for_each(&group->inotify_data.idr, idr_callback, group);
5370 + idr_remove_all(&group->inotify_data.idr);
5371 + idr_destroy(&group->inotify_data.idr);
5372 ++ free_uid(group->inotify_data.user);
5373 + }
5374 +
5375 + void inotify_free_event_priv(struct fsnotify_event_private_data *fsn_event_priv)
5376 +diff --git a/fs/notify/inotify/inotify_user.c b/fs/notify/inotify/inotify_user.c
5377 +index a94e8bd..75aa15a 100644
5378 +--- a/fs/notify/inotify/inotify_user.c
5379 ++++ b/fs/notify/inotify/inotify_user.c
5380 +@@ -550,21 +550,24 @@ retry:
5381 + if (unlikely(!idr_pre_get(&group->inotify_data.idr, GFP_KERNEL)))
5382 + goto out_err;
5383 +
5384 ++ /* we are putting the mark on the idr, take a reference */
5385 ++ fsnotify_get_mark(&tmp_ientry->fsn_entry);
5386 ++
5387 + spin_lock(&group->inotify_data.idr_lock);
5388 + ret = idr_get_new_above(&group->inotify_data.idr, &tmp_ientry->fsn_entry,
5389 + group->inotify_data.last_wd+1,
5390 + &tmp_ientry->wd);
5391 + spin_unlock(&group->inotify_data.idr_lock);
5392 + if (ret) {
5393 ++ /* we didn't get on the idr, drop the idr reference */
5394 ++ fsnotify_put_mark(&tmp_ientry->fsn_entry);
5395 ++
5396 + /* idr was out of memory allocate and try again */
5397 + if (ret == -EAGAIN)
5398 + goto retry;
5399 + goto out_err;
5400 + }
5401 +
5402 +- /* we put the mark on the idr, take a reference */
5403 +- fsnotify_get_mark(&tmp_ientry->fsn_entry);
5404 +-
5405 + /* we are on the idr, now get on the inode */
5406 + ret = fsnotify_add_mark(&tmp_ientry->fsn_entry, group, inode);
5407 + if (ret) {
5408 +diff --git a/fs/proc/array.c b/fs/proc/array.c
5409 +index 13b5d07..69eb4c4 100644
5410 +--- a/fs/proc/array.c
5411 ++++ b/fs/proc/array.c
5412 +@@ -82,7 +82,6 @@
5413 + #include <linux/pid_namespace.h>
5414 + #include <linux/ptrace.h>
5415 + #include <linux/tracehook.h>
5416 +-#include <linux/swapops.h>
5417 +
5418 + #include <asm/pgtable.h>
5419 + #include <asm/processor.h>
5420 +@@ -494,7 +493,7 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
5421 + rsslim,
5422 + mm ? mm->start_code : 0,
5423 + mm ? mm->end_code : 0,
5424 +- (permitted && mm) ? task->stack_start : 0,
5425 ++ (permitted && mm) ? mm->start_stack : 0,
5426 + esp,
5427 + eip,
5428 + /* The signal information here is obsolete.
5429 +diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
5430 +index f277c4a..9df34a5 100644
5431 +--- a/fs/proc/task_mmu.c
5432 ++++ b/fs/proc/task_mmu.c
5433 +@@ -243,25 +243,6 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
5434 + } else if (vma->vm_start <= mm->start_stack &&
5435 + vma->vm_end >= mm->start_stack) {
5436 + name = "[stack]";
5437 +- } else {
5438 +- unsigned long stack_start;
5439 +- struct proc_maps_private *pmp;
5440 +-
5441 +- pmp = m->private;
5442 +- stack_start = pmp->task->stack_start;
5443 +-
5444 +- if (vma->vm_start <= stack_start &&
5445 +- vma->vm_end >= stack_start) {
5446 +- pad_len_spaces(m, len);
5447 +- seq_printf(m,
5448 +- "[threadstack:%08lx]",
5449 +-#ifdef CONFIG_STACK_GROWSUP
5450 +- vma->vm_end - stack_start
5451 +-#else
5452 +- stack_start - vma->vm_start
5453 +-#endif
5454 +- );
5455 +- }
5456 + }
5457 + } else {
5458 + name = "[vdso]";
5459 +diff --git a/include/asm-generic/dma-mapping-common.h b/include/asm-generic/dma-mapping-common.h
5460 +index e694263..6920695 100644
5461 +--- a/include/asm-generic/dma-mapping-common.h
5462 ++++ b/include/asm-generic/dma-mapping-common.h
5463 +@@ -131,7 +131,7 @@ static inline void dma_sync_single_range_for_cpu(struct device *dev,
5464 + debug_dma_sync_single_range_for_cpu(dev, addr, offset, size, dir);
5465 +
5466 + } else
5467 +- dma_sync_single_for_cpu(dev, addr, size, dir);
5468 ++ dma_sync_single_for_cpu(dev, addr + offset, size, dir);
5469 + }
5470 +
5471 + static inline void dma_sync_single_range_for_device(struct device *dev,
5472 +@@ -148,7 +148,7 @@ static inline void dma_sync_single_range_for_device(struct device *dev,
5473 + debug_dma_sync_single_range_for_device(dev, addr, offset, size, dir);
5474 +
5475 + } else
5476 +- dma_sync_single_for_device(dev, addr, size, dir);
5477 ++ dma_sync_single_for_device(dev, addr + offset, size, dir);
5478 + }
5479 +
5480 + static inline void
5481 +diff --git a/include/linux/sched.h b/include/linux/sched.h
5482 +index 1f5fa53..db821a4 100644
5483 +--- a/include/linux/sched.h
5484 ++++ b/include/linux/sched.h
5485 +@@ -1560,7 +1560,6 @@ struct task_struct {
5486 + /* bitmask of trace recursion */
5487 + unsigned long trace_recursion;
5488 + #endif /* CONFIG_TRACING */
5489 +- unsigned long stack_start;
5490 + #ifdef CONFIG_CGROUP_MEM_RES_CTLR /* memcg uses this to do batch job */
5491 + struct memcg_batch_info {
5492 + int do_batch; /* incremented when batch uncharge started */
5493 +diff --git a/kernel/fork.c b/kernel/fork.c
5494 +index f88bd98..0ea67a3 100644
5495 +--- a/kernel/fork.c
5496 ++++ b/kernel/fork.c
5497 +@@ -1134,8 +1134,6 @@ static struct task_struct *copy_process(unsigned long clone_flags,
5498 +
5499 + p->bts = NULL;
5500 +
5501 +- p->stack_start = stack_start;
5502 +-
5503 + /* Perform scheduler related setup. Assign this task to a CPU. */
5504 + sched_fork(p, clone_flags);
5505 +
5506 +diff --git a/kernel/profile.c b/kernel/profile.c
5507 +index a55d3a3..dfadc5b 100644
5508 +--- a/kernel/profile.c
5509 ++++ b/kernel/profile.c
5510 +@@ -127,8 +127,10 @@ int __ref profile_init(void)
5511 + return 0;
5512 +
5513 + prof_buffer = vmalloc(buffer_bytes);
5514 +- if (prof_buffer)
5515 ++ if (prof_buffer) {
5516 ++ memset(prof_buffer, 0, buffer_bytes);
5517 + return 0;
5518 ++ }
5519 +
5520 + free_cpumask_var(prof_cpu_mask);
5521 + return -ENOMEM;
5522 +diff --git a/mm/hugetlb.c b/mm/hugetlb.c
5523 +index fd9ba95..e8d9544 100644
5524 +--- a/mm/hugetlb.c
5525 ++++ b/mm/hugetlb.c
5526 +@@ -1039,7 +1039,7 @@ static struct page *alloc_huge_page(struct vm_area_struct *vma,
5527 + page = alloc_buddy_huge_page(h, vma, addr);
5528 + if (!page) {
5529 + hugetlb_put_quota(inode->i_mapping, chg);
5530 +- return ERR_PTR(-VM_FAULT_OOM);
5531 ++ return ERR_PTR(-VM_FAULT_SIGBUS);
5532 + }
5533 + }
5534 +
5535 +diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
5536 +index 16190ca..955f0b2 100644
5537 +--- a/net/ipv4/udp.c
5538 ++++ b/net/ipv4/udp.c
5539 +@@ -1527,6 +1527,9 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
5540 +
5541 + uh = udp_hdr(skb);
5542 + ulen = ntohs(uh->len);
5543 ++ saddr = ip_hdr(skb)->saddr;
5544 ++ daddr = ip_hdr(skb)->daddr;
5545 ++
5546 + if (ulen > skb->len)
5547 + goto short_packet;
5548 +
5549 +@@ -1540,9 +1543,6 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
5550 + if (udp4_csum_init(skb, uh, proto))
5551 + goto csum_error;
5552 +
5553 +- saddr = ip_hdr(skb)->saddr;
5554 +- daddr = ip_hdr(skb)->daddr;
5555 +-
5556 + if (rt->rt_flags & (RTCF_BROADCAST|RTCF_MULTICAST))
5557 + return __udp4_lib_mcast_deliver(net, skb, uh,
5558 + saddr, daddr, udptable);
5559 +diff --git a/security/min_addr.c b/security/min_addr.c
5560 +index e86f297..f728728 100644
5561 +--- a/security/min_addr.c
5562 ++++ b/security/min_addr.c
5563 +@@ -33,7 +33,7 @@ int mmap_min_addr_handler(struct ctl_table *table, int write,
5564 + {
5565 + int ret;
5566 +
5567 +- if (!capable(CAP_SYS_RAWIO))
5568 ++ if (write && !capable(CAP_SYS_RAWIO))
5569 + return -EPERM;
5570 +
5571 + ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
5572 +diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
5573 +index fd831bd..a747871 100644
5574 +--- a/sound/pci/hda/hda_intel.c
5575 ++++ b/sound/pci/hda/hda_intel.c
5576 +@@ -2718,6 +2718,7 @@ static struct pci_device_id azx_ids[] = {
5577 + { PCI_DEVICE(0x8086, 0x3a6e), .driver_data = AZX_DRIVER_ICH },
5578 + /* PCH */
5579 + { PCI_DEVICE(0x8086, 0x3b56), .driver_data = AZX_DRIVER_ICH },
5580 ++ { PCI_DEVICE(0x8086, 0x3b57), .driver_data = AZX_DRIVER_ICH },
5581 + /* CPT */
5582 + { PCI_DEVICE(0x8086, 0x1c20), .driver_data = AZX_DRIVER_PCH },
5583 + /* SCH */
5584 +diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
5585 +index 1a97c81..a978645 100644
5586 +--- a/sound/pci/hda/patch_conexant.c
5587 ++++ b/sound/pci/hda/patch_conexant.c
5588 +@@ -1176,9 +1176,10 @@ static int patch_cxt5045(struct hda_codec *codec)
5589 + case 0x103c:
5590 + case 0x1631:
5591 + case 0x1734:
5592 +- /* HP, Packard Bell, & Fujitsu-Siemens laptops have really bad
5593 +- * sound over 0dB on NID 0x17. Fix max PCM level to 0 dB
5594 +- * (originally it has 0x2b steps with 0dB offset 0x14)
5595 ++ case 0x17aa:
5596 ++ /* HP, Packard Bell, Fujitsu-Siemens & Lenovo laptops have
5597 ++ * really bad sound over 0dB on NID 0x17. Fix max PCM level to
5598 ++ * 0 dB (originally it has 0x2b steps with 0dB offset 0x14)
5599 + */
5600 + snd_hda_override_amp_caps(codec, 0x17, HDA_INPUT,
5601 + (0x14 << AC_AMPCAP_OFFSET_SHIFT) |
5602 +diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
5603 +index b486daa..abfc558 100644
5604 +--- a/sound/pci/hda/patch_realtek.c
5605 ++++ b/sound/pci/hda/patch_realtek.c
5606 +@@ -17348,7 +17348,6 @@ static struct snd_pci_quirk alc662_cfg_tbl[] = {
5607 + ALC662_3ST_6ch_DIG),
5608 + SND_PCI_QUIRK_MASK(0x1854, 0xf000, 0x2000, "ASUS H13-200x",
5609 + ALC663_ASUS_H13),
5610 +- SND_PCI_QUIRK(0x8086, 0xd604, "Intel mobo", ALC662_3ST_2ch_DIG),
5611 + {}
5612 + };
5613 +
5614 +diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c
5615 +index ac2d528..cb474c0 100644
5616 +--- a/sound/pci/hda/patch_sigmatel.c
5617 ++++ b/sound/pci/hda/patch_sigmatel.c
5618 +@@ -1539,11 +1539,9 @@ static unsigned int alienware_m17x_pin_configs[13] = {
5619 + 0x904601b0,
5620 + };
5621 +
5622 +-static unsigned int intel_dg45id_pin_configs[14] = {
5623 ++static unsigned int intel_dg45id_pin_configs[13] = {
5624 + 0x02214230, 0x02A19240, 0x01013214, 0x01014210,
5625 +- 0x01A19250, 0x01011212, 0x01016211, 0x40f000f0,
5626 +- 0x40f000f0, 0x40f000f0, 0x40f000f0, 0x014510A0,
5627 +- 0x074510B0, 0x40f000f0
5628 ++ 0x01A19250, 0x01011212, 0x01016211
5629 + };
5630 +
5631 + static unsigned int *stac92hd73xx_brd_tbl[STAC_92HD73XX_MODELS] = {
5632 +diff --git a/sound/pci/ice1712/maya44.c b/sound/pci/ice1712/maya44.c
5633 +index 3e1c20a..726fd4b 100644
5634 +--- a/sound/pci/ice1712/maya44.c
5635 ++++ b/sound/pci/ice1712/maya44.c
5636 +@@ -347,7 +347,7 @@ static int maya_gpio_sw_put(struct snd_kcontrol *kcontrol,
5637 +
5638 + /* known working input slots (0-4) */
5639 + #define MAYA_LINE_IN 1 /* in-2 */
5640 +-#define MAYA_MIC_IN 4 /* in-5 */
5641 ++#define MAYA_MIC_IN 3 /* in-4 */
5642 +
5643 + static void wm8776_select_input(struct snd_maya44 *chip, int idx, int line)
5644 + {
5645 +@@ -393,8 +393,8 @@ static int maya_rec_src_put(struct snd_kcontrol *kcontrol,
5646 + int changed;
5647 +
5648 + mutex_lock(&chip->mutex);
5649 +- changed = maya_set_gpio_bits(chip->ice, GPIO_MIC_RELAY,
5650 +- sel ? GPIO_MIC_RELAY : 0);
5651 ++ changed = maya_set_gpio_bits(chip->ice, 1 << GPIO_MIC_RELAY,
5652 ++ sel ? (1 << GPIO_MIC_RELAY) : 0);
5653 + wm8776_select_input(chip, 0, sel ? MAYA_MIC_IN : MAYA_LINE_IN);
5654 + mutex_unlock(&chip->mutex);
5655 + return changed;
5656 +diff --git a/sound/pci/oxygen/xonar_cs43xx.c b/sound/pci/oxygen/xonar_cs43xx.c
5657 +index 16c226b..7c4986b 100644
5658 +--- a/sound/pci/oxygen/xonar_cs43xx.c
5659 ++++ b/sound/pci/oxygen/xonar_cs43xx.c
5660 +@@ -56,6 +56,7 @@
5661 + #include <sound/pcm_params.h>
5662 + #include <sound/tlv.h>
5663 + #include "xonar.h"
5664 ++#include "cm9780.h"
5665 + #include "cs4398.h"
5666 + #include "cs4362a.h"
5667 +
5668 +@@ -172,6 +173,8 @@ static void xonar_d1_init(struct oxygen *chip)
5669 + oxygen_clear_bits16(chip, OXYGEN_GPIO_DATA,
5670 + GPIO_D1_FRONT_PANEL | GPIO_D1_INPUT_ROUTE);
5671 +
5672 ++ oxygen_ac97_set_bits(chip, 0, CM9780_JACK, CM9780_FMIC2MIC);
5673 ++
5674 + xonar_init_cs53x1(chip);
5675 + xonar_enable_output(chip);
5676 +