Gentoo Archives: gentoo-commits

From: "Mike Pagano (mpagano)" <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] linux-patches r1351 - genpatches-2.6/trunk/2.6.27
Date: Sat, 11 Oct 2008 00:01:14
Message-Id: E1KoRuZ-0001NA-Hy@stork.gentoo.org
1 Author: mpagano
2 Date: 2008-10-11 00:00:47 +0000 (Sat, 11 Oct 2008)
3 New Revision: 1351
4
5 Removed:
6 genpatches-2.6/trunk/2.6.27/1000_linux-2.6.26.1.patch
7 genpatches-2.6/trunk/2.6.27/1001_linux-2.6.26.2.patch
8 genpatches-2.6/trunk/2.6.27/1002_linux-2.6.26.3.patch
9 genpatches-2.6/trunk/2.6.27/1003_linux-2.6.26.4.patch
10 genpatches-2.6/trunk/2.6.27/1004_linux-2.6.26.5.patch
11 genpatches-2.6/trunk/2.6.27/1005_linux-2.6.26.6.patch
12 Log:
13 Removing 2.6.26.x patches
14
15 Deleted: genpatches-2.6/trunk/2.6.27/1000_linux-2.6.26.1.patch
16 ===================================================================
17 --- genpatches-2.6/trunk/2.6.27/1000_linux-2.6.26.1.patch 2008-10-10 23:58:26 UTC (rev 1350)
18 +++ genpatches-2.6/trunk/2.6.27/1000_linux-2.6.26.1.patch 2008-10-11 00:00:47 UTC (rev 1351)
19 @@ -1,2185 +0,0 @@
20 -diff --git a/Documentation/networking/udplite.txt b/Documentation/networking/udplite.txt
21 -index 3870f28..855d8da 100644
22 ---- a/Documentation/networking/udplite.txt
23 -+++ b/Documentation/networking/udplite.txt
24 -@@ -148,7 +148,7 @@
25 - getsockopt(sockfd, SOL_SOCKET, SO_NO_CHECK, &value, ...);
26 -
27 - is meaningless (as in TCP). Packets with a zero checksum field are
28 -- illegal (cf. RFC 3828, sec. 3.1) will be silently discarded.
29 -+ illegal (cf. RFC 3828, sec. 3.1) and will be silently discarded.
30 -
31 - 4) Fragmentation
32 -
33 -diff --git a/Documentation/video4linux/cx18.txt b/Documentation/video4linux/cx18.txt
34 -index 6842c26..63f3aef 100644
35 ---- a/Documentation/video4linux/cx18.txt
36 -+++ b/Documentation/video4linux/cx18.txt
37 -@@ -23,14 +23,8 @@ encoder chip:
38 -
39 - Firmware:
40 -
41 --The firmware needs to be extracted from the Windows Hauppauge HVR-1600
42 --driver, available here:
43 -+You can obtain the firmware files here:
44 -
45 --http://hauppauge.lightpath.net/software/install_cd/hauppauge_cd_3.4d1.zip
46 -+http://dl.ivtvdriver.org/ivtv/firmware/cx18-firmware.tar.gz
47 -
48 --Unzip, then copy the following files to the firmware directory
49 --and rename them as follows:
50 --
51 --Drivers/Driver18/hcw18apu.rom -> v4l-cx23418-apu.fw
52 --Drivers/Driver18/hcw18enc.rom -> v4l-cx23418-cpu.fw
53 --Drivers/Driver18/hcw18mlC.rom -> v4l-cx23418-dig.fw
54 -+Untar and copy the .fw files to your firmware directory.
55 -diff --git a/arch/ia64/kvm/kvm-ia64.c b/arch/ia64/kvm/kvm-ia64.c
56 -index 318b811..5152ba0 100644
57 ---- a/arch/ia64/kvm/kvm-ia64.c
58 -+++ b/arch/ia64/kvm/kvm-ia64.c
59 -@@ -1460,6 +1460,9 @@ int kvm_arch_set_memory_region(struct kvm *kvm,
60 - return 0;
61 - }
62 -
63 -+void kvm_arch_flush_shadow(struct kvm *kvm)
64 -+{
65 -+}
66 -
67 - long kvm_arch_dev_ioctl(struct file *filp,
68 - unsigned int ioctl, unsigned long arg)
69 -diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
70 -index 777e0f3..1eaa3e4 100644
71 ---- a/arch/powerpc/kvm/powerpc.c
72 -+++ b/arch/powerpc/kvm/powerpc.c
73 -@@ -167,6 +167,10 @@ int kvm_arch_set_memory_region(struct kvm *kvm,
74 - return 0;
75 - }
76 -
77 -+void kvm_arch_flush_shadow(struct kvm *kvm)
78 -+{
79 -+}
80 -+
81 - struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
82 - {
83 - struct kvm_vcpu *vcpu;
84 -diff --git a/arch/powerpc/platforms/cell/cbe_cpufreq_pmi.c b/arch/powerpc/platforms/cell/cbe_cpufreq_pmi.c
85 -index 69288f6..3233fe8 100644
86 ---- a/arch/powerpc/platforms/cell/cbe_cpufreq_pmi.c
87 -+++ b/arch/powerpc/platforms/cell/cbe_cpufreq_pmi.c
88 -@@ -96,6 +96,12 @@ static int pmi_notifier(struct notifier_block *nb,
89 - struct cpufreq_frequency_table *cbe_freqs;
90 - u8 node;
91 -
92 -+ /* Should this really be called for CPUFREQ_ADJUST, CPUFREQ_INCOMPATIBLE
93 -+ * and CPUFREQ_NOTIFY policy events?)
94 -+ */
95 -+ if (event == CPUFREQ_START)
96 -+ return 0;
97 -+
98 - cbe_freqs = cpufreq_frequency_get_table(policy->cpu);
99 - node = cbe_cpu_to_node(policy->cpu);
100 -
101 -diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
102 -index 6558b09..b19c170 100644
103 ---- a/arch/s390/kvm/kvm-s390.c
104 -+++ b/arch/s390/kvm/kvm-s390.c
105 -@@ -672,6 +672,10 @@ int kvm_arch_set_memory_region(struct kvm *kvm,
106 - return 0;
107 - }
108 -
109 -+void kvm_arch_flush_shadow(struct kvm *kvm)
110 -+{
111 -+}
112 -+
113 - gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn)
114 - {
115 - return gfn;
116 -diff --git a/arch/sparc64/kernel/irq.c b/arch/sparc64/kernel/irq.c
117 -index b441a26..c481673 100644
118 ---- a/arch/sparc64/kernel/irq.c
119 -+++ b/arch/sparc64/kernel/irq.c
120 -@@ -621,8 +621,9 @@ unsigned int sun4v_build_irq(u32 devhandle, unsigned int devino)
121 - unsigned int sun4v_build_virq(u32 devhandle, unsigned int devino)
122 - {
123 - struct irq_handler_data *data;
124 -- struct ino_bucket *bucket;
125 - unsigned long hv_err, cookie;
126 -+ struct ino_bucket *bucket;
127 -+ struct irq_desc *desc;
128 - unsigned int virt_irq;
129 -
130 - bucket = kzalloc(sizeof(struct ino_bucket), GFP_ATOMIC);
131 -@@ -643,6 +644,13 @@ unsigned int sun4v_build_virq(u32 devhandle, unsigned int devino)
132 - if (unlikely(!data))
133 - return 0;
134 -
135 -+ /* In order to make the LDC channel startup sequence easier,
136 -+ * especially wrt. locking, we do not let request_irq() enable
137 -+ * the interrupt.
138 -+ */
139 -+ desc = irq_desc + virt_irq;
140 -+ desc->status |= IRQ_NOAUTOEN;
141 -+
142 - set_irq_chip_data(virt_irq, data);
143 -
144 - /* Catch accidental accesses to these things. IMAP/ICLR handling
145 -diff --git a/arch/sparc64/kernel/ldc.c b/arch/sparc64/kernel/ldc.c
146 -index 63969f6..d689823 100644
147 ---- a/arch/sparc64/kernel/ldc.c
148 -+++ b/arch/sparc64/kernel/ldc.c
149 -@@ -1,6 +1,6 @@
150 - /* ldc.c: Logical Domain Channel link-layer protocol driver.
151 - *
152 -- * Copyright (C) 2007 David S. Miller <davem@×××××××××.net>
153 -+ * Copyright (C) 2007, 2008 David S. Miller <davem@×××××××××.net>
154 - */
155 -
156 - #include <linux/kernel.h>
157 -@@ -23,8 +23,8 @@
158 -
159 - #define DRV_MODULE_NAME "ldc"
160 - #define PFX DRV_MODULE_NAME ": "
161 --#define DRV_MODULE_VERSION "1.0"
162 --#define DRV_MODULE_RELDATE "June 25, 2007"
163 -+#define DRV_MODULE_VERSION "1.1"
164 -+#define DRV_MODULE_RELDATE "July 22, 2008"
165 -
166 - static char version[] __devinitdata =
167 - DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
168 -@@ -1235,13 +1235,9 @@ int ldc_bind(struct ldc_channel *lp, const char *name)
169 - unsigned long hv_err, flags;
170 - int err = -EINVAL;
171 -
172 -- spin_lock_irqsave(&lp->lock, flags);
173 --
174 -- if (!name)
175 -- goto out_err;
176 --
177 -- if (lp->state != LDC_STATE_INIT)
178 -- goto out_err;
179 -+ if (!name ||
180 -+ (lp->state != LDC_STATE_INIT))
181 -+ return -EINVAL;
182 -
183 - snprintf(lp->rx_irq_name, LDC_IRQ_NAME_MAX, "%s RX", name);
184 - snprintf(lp->tx_irq_name, LDC_IRQ_NAME_MAX, "%s TX", name);
185 -@@ -1250,25 +1246,32 @@ int ldc_bind(struct ldc_channel *lp, const char *name)
186 - IRQF_SAMPLE_RANDOM | IRQF_SHARED,
187 - lp->rx_irq_name, lp);
188 - if (err)
189 -- goto out_err;
190 -+ return err;
191 -
192 - err = request_irq(lp->cfg.tx_irq, ldc_tx,
193 - IRQF_SAMPLE_RANDOM | IRQF_SHARED,
194 - lp->tx_irq_name, lp);
195 -- if (err)
196 -- goto out_free_rx_irq;
197 -+ if (err) {
198 -+ free_irq(lp->cfg.rx_irq, lp);
199 -+ return err;
200 -+ }
201 -+
202 -
203 -+ spin_lock_irqsave(&lp->lock, flags);
204 -+
205 -+ enable_irq(lp->cfg.rx_irq);
206 -+ enable_irq(lp->cfg.tx_irq);
207 -
208 - lp->flags |= LDC_FLAG_REGISTERED_IRQS;
209 -
210 - err = -ENODEV;
211 - hv_err = sun4v_ldc_tx_qconf(lp->id, 0, 0);
212 - if (hv_err)
213 -- goto out_free_tx_irq;
214 -+ goto out_free_irqs;
215 -
216 - hv_err = sun4v_ldc_tx_qconf(lp->id, lp->tx_ra, lp->tx_num_entries);
217 - if (hv_err)
218 -- goto out_free_tx_irq;
219 -+ goto out_free_irqs;
220 -
221 - hv_err = sun4v_ldc_rx_qconf(lp->id, 0, 0);
222 - if (hv_err)
223 -@@ -1304,14 +1307,11 @@ out_unmap_rx:
224 - out_unmap_tx:
225 - sun4v_ldc_tx_qconf(lp->id, 0, 0);
226 -
227 --out_free_tx_irq:
228 -+out_free_irqs:
229 - lp->flags &= ~LDC_FLAG_REGISTERED_IRQS;
230 - free_irq(lp->cfg.tx_irq, lp);
231 --
232 --out_free_rx_irq:
233 - free_irq(lp->cfg.rx_irq, lp);
234 -
235 --out_err:
236 - spin_unlock_irqrestore(&lp->lock, flags);
237 -
238 - return err;
239 -diff --git a/arch/sparc64/kernel/time.c b/arch/sparc64/kernel/time.c
240 -index e5d2389..f464023 100644
241 ---- a/arch/sparc64/kernel/time.c
242 -+++ b/arch/sparc64/kernel/time.c
243 -@@ -883,6 +883,16 @@ static struct notifier_block sparc64_cpufreq_notifier_block = {
244 - .notifier_call = sparc64_cpufreq_notifier
245 - };
246 -
247 -+static int __init register_sparc64_cpufreq_notifier(void)
248 -+{
249 -+
250 -+ cpufreq_register_notifier(&sparc64_cpufreq_notifier_block,
251 -+ CPUFREQ_TRANSITION_NOTIFIER);
252 -+ return 0;
253 -+}
254 -+
255 -+core_initcall(register_sparc64_cpufreq_notifier);
256 -+
257 - #endif /* CONFIG_CPU_FREQ */
258 -
259 - static int sparc64_next_event(unsigned long delta,
260 -@@ -1049,11 +1059,6 @@ void __init time_init(void)
261 - sparc64_clockevent.mult, sparc64_clockevent.shift);
262 -
263 - setup_sparc64_timer();
264 --
265 --#ifdef CONFIG_CPU_FREQ
266 -- cpufreq_register_notifier(&sparc64_cpufreq_notifier_block,
267 -- CPUFREQ_TRANSITION_NOTIFIER);
268 --#endif
269 - }
270 -
271 - unsigned long long sched_clock(void)
272 -diff --git a/arch/um/include/init.h b/arch/um/include/init.h
273 -index b00a957..37dd097 100644
274 ---- a/arch/um/include/init.h
275 -+++ b/arch/um/include/init.h
276 -@@ -45,6 +45,8 @@ typedef void (*exitcall_t)(void);
277 - # define __section(S) __attribute__ ((__section__(#S)))
278 - #endif
279 -
280 -+#if __GNUC__ == 3
281 -+
282 - #if __GNUC_MINOR__ >= 3
283 - # define __used __attribute__((__used__))
284 - #else
285 -@@ -52,6 +54,12 @@ typedef void (*exitcall_t)(void);
286 - #endif
287 -
288 - #else
289 -+#if __GNUC__ == 4
290 -+# define __used __attribute__((__used__))
291 -+#endif
292 -+#endif
293 -+
294 -+#else
295 - #include <linux/compiler.h>
296 - #endif
297 - /* These are for everybody (although not all archs will actually
298 -diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu
299 -index 2ad6301..dd138a2 100644
300 ---- a/arch/x86/Kconfig.cpu
301 -+++ b/arch/x86/Kconfig.cpu
302 -@@ -414,4 +414,4 @@ config X86_MINIMUM_CPU_FAMILY
303 -
304 - config X86_DEBUGCTLMSR
305 - def_bool y
306 -- depends on !(M586MMX || M586TSC || M586 || M486 || M386)
307 -+ depends on !(MK6 || MWINCHIPC6 || MWINCHIP2 || MWINCHIP3D || MCYRIXIII || M586MMX || M586TSC || M586 || M486 || M386)
308 -diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c
309 -index 36af01f..130711f 100644
310 ---- a/arch/x86/kernel/acpi/sleep.c
311 -+++ b/arch/x86/kernel/acpi/sleep.c
312 -@@ -23,6 +23,15 @@ static unsigned long acpi_realmode;
313 - static char temp_stack[10240];
314 - #endif
315 -
316 -+/* XXX: this macro should move to asm-x86/segment.h and be shared with the
317 -+ boot code... */
318 -+#define GDT_ENTRY(flags, base, limit) \
319 -+ (((u64)(base & 0xff000000) << 32) | \
320 -+ ((u64)flags << 40) | \
321 -+ ((u64)(limit & 0x00ff0000) << 32) | \
322 -+ ((u64)(base & 0x00ffffff) << 16) | \
323 -+ ((u64)(limit & 0x0000ffff)))
324 -+
325 - /**
326 - * acpi_save_state_mem - save kernel state
327 - *
328 -@@ -58,11 +67,11 @@ int acpi_save_state_mem(void)
329 - ((char *)&header->wakeup_gdt - (char *)acpi_realmode))
330 - << 16);
331 - /* GDT[1]: real-mode-like code segment */
332 -- header->wakeup_gdt[1] = (0x009bULL << 40) +
333 -- ((u64)acpi_wakeup_address << 16) + 0xffff;
334 -+ header->wakeup_gdt[1] =
335 -+ GDT_ENTRY(0x809b, acpi_wakeup_address, 0xfffff);
336 - /* GDT[2]: real-mode-like data segment */
337 -- header->wakeup_gdt[2] = (0x0093ULL << 40) +
338 -- ((u64)acpi_wakeup_address << 16) + 0xffff;
339 -+ header->wakeup_gdt[2] =
340 -+ GDT_ENTRY(0x8093, acpi_wakeup_address, 0xfffff);
341 -
342 - #ifndef CONFIG_64BIT
343 - store_gdt((struct desc_ptr *)&header->pmode_gdt);
344 -diff --git a/arch/x86/kernel/i387.c b/arch/x86/kernel/i387.c
345 -index 95e80e5..eb9ddd8 100644
346 ---- a/arch/x86/kernel/i387.c
347 -+++ b/arch/x86/kernel/i387.c
348 -@@ -162,7 +162,7 @@ int xfpregs_get(struct task_struct *target, const struct user_regset *regset,
349 - int ret;
350 -
351 - if (!cpu_has_fxsr)
352 -- return -EIO;
353 -+ return -ENODEV;
354 -
355 - ret = init_fpu(target);
356 - if (ret)
357 -@@ -179,7 +179,7 @@ int xfpregs_set(struct task_struct *target, const struct user_regset *regset,
358 - int ret;
359 -
360 - if (!cpu_has_fxsr)
361 -- return -EIO;
362 -+ return -ENODEV;
363 -
364 - ret = init_fpu(target);
365 - if (ret)
366 -diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
367 -index a7835f2..77040b6 100644
368 ---- a/arch/x86/kernel/ptrace.c
369 -+++ b/arch/x86/kernel/ptrace.c
370 -@@ -943,13 +943,13 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
371 - return copy_regset_to_user(child, &user_x86_32_view,
372 - REGSET_XFP,
373 - 0, sizeof(struct user_fxsr_struct),
374 -- datap);
375 -+ datap) ? -EIO : 0;
376 -
377 - case PTRACE_SETFPXREGS: /* Set the child extended FPU state. */
378 - return copy_regset_from_user(child, &user_x86_32_view,
379 - REGSET_XFP,
380 - 0, sizeof(struct user_fxsr_struct),
381 -- datap);
382 -+ datap) ? -EIO : 0;
383 - #endif
384 -
385 - #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
386 -diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
387 -index f6be7d5..d834b36 100644
388 ---- a/arch/x86/kernel/reboot.c
389 -+++ b/arch/x86/kernel/reboot.c
390 -@@ -177,6 +177,14 @@ static struct dmi_system_id __initdata reboot_dmi_table[] = {
391 - DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge 2400"),
392 - },
393 - },
394 -+ { /* Handle problems with rebooting on Dell T5400's */
395 -+ .callback = set_bios_reboot,
396 -+ .ident = "Dell Precision T5400",
397 -+ .matches = {
398 -+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
399 -+ DMI_MATCH(DMI_PRODUCT_NAME, "Precision WorkStation T5400"),
400 -+ },
401 -+ },
402 - { /* Handle problems with rebooting on HP laptops */
403 - .callback = set_bios_reboot,
404 - .ident = "HP Compaq Laptop",
405 -diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
406 -index 7e7c396..c26d811 100644
407 ---- a/arch/x86/kvm/mmu.c
408 -+++ b/arch/x86/kvm/mmu.c
409 -@@ -1171,9 +1171,10 @@ static int __direct_map(struct kvm_vcpu *vcpu, gpa_t v, int write,
410 - return -ENOMEM;
411 - }
412 -
413 -- table[index] = __pa(new_table->spt)
414 -- | PT_PRESENT_MASK | PT_WRITABLE_MASK
415 -- | shadow_user_mask | shadow_x_mask;
416 -+ set_shadow_pte(&table[index],
417 -+ __pa(new_table->spt)
418 -+ | PT_PRESENT_MASK | PT_WRITABLE_MASK
419 -+ | shadow_user_mask | shadow_x_mask);
420 - }
421 - table_addr = table[index] & PT64_BASE_ADDR_MASK;
422 - }
423 -@@ -1968,6 +1969,8 @@ static int mmu_shrink(int nr_to_scan, gfp_t gfp_mask)
424 - list_for_each_entry(kvm, &vm_list, vm_list) {
425 - int npages;
426 -
427 -+ if (!down_read_trylock(&kvm->slots_lock))
428 -+ continue;
429 - spin_lock(&kvm->mmu_lock);
430 - npages = kvm->arch.n_alloc_mmu_pages -
431 - kvm->arch.n_free_mmu_pages;
432 -@@ -1980,6 +1983,7 @@ static int mmu_shrink(int nr_to_scan, gfp_t gfp_mask)
433 - nr_to_scan--;
434 -
435 - spin_unlock(&kvm->mmu_lock);
436 -+ up_read(&kvm->slots_lock);
437 - }
438 - if (kvm_freed)
439 - list_move_tail(&kvm_freed->vm_list, &vm_list);
440 -diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
441 -index 6b0d5fa..06992d6 100644
442 ---- a/arch/x86/kvm/svm.c
443 -+++ b/arch/x86/kvm/svm.c
444 -@@ -270,19 +270,11 @@ static int has_svm(void)
445 -
446 - static void svm_hardware_disable(void *garbage)
447 - {
448 -- struct svm_cpu_data *svm_data
449 -- = per_cpu(svm_data, raw_smp_processor_id());
450 --
451 -- if (svm_data) {
452 -- uint64_t efer;
453 -+ uint64_t efer;
454 -
455 -- wrmsrl(MSR_VM_HSAVE_PA, 0);
456 -- rdmsrl(MSR_EFER, efer);
457 -- wrmsrl(MSR_EFER, efer & ~MSR_EFER_SVME_MASK);
458 -- per_cpu(svm_data, raw_smp_processor_id()) = NULL;
459 -- __free_page(svm_data->save_area);
460 -- kfree(svm_data);
461 -- }
462 -+ wrmsrl(MSR_VM_HSAVE_PA, 0);
463 -+ rdmsrl(MSR_EFER, efer);
464 -+ wrmsrl(MSR_EFER, efer & ~MSR_EFER_SVME_MASK);
465 - }
466 -
467 - static void svm_hardware_enable(void *garbage)
468 -@@ -321,6 +313,19 @@ static void svm_hardware_enable(void *garbage)
469 - page_to_pfn(svm_data->save_area) << PAGE_SHIFT);
470 - }
471 -
472 -+static void svm_cpu_uninit(int cpu)
473 -+{
474 -+ struct svm_cpu_data *svm_data
475 -+ = per_cpu(svm_data, raw_smp_processor_id());
476 -+
477 -+ if (!svm_data)
478 -+ return;
479 -+
480 -+ per_cpu(svm_data, raw_smp_processor_id()) = NULL;
481 -+ __free_page(svm_data->save_area);
482 -+ kfree(svm_data);
483 -+}
484 -+
485 - static int svm_cpu_init(int cpu)
486 - {
487 - struct svm_cpu_data *svm_data;
488 -@@ -458,6 +463,11 @@ err:
489 -
490 - static __exit void svm_hardware_unsetup(void)
491 - {
492 -+ int cpu;
493 -+
494 -+ for_each_online_cpu(cpu)
495 -+ svm_cpu_uninit(cpu);
496 -+
497 - __free_pages(pfn_to_page(iopm_base >> PAGE_SHIFT), IOPM_ALLOC_ORDER);
498 - iopm_base = 0;
499 - }
500 -diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
501 -index 540e951..2ce9063 100644
502 ---- a/arch/x86/kvm/vmx.c
503 -+++ b/arch/x86/kvm/vmx.c
504 -@@ -88,6 +88,7 @@ static inline struct vcpu_vmx *to_vmx(struct kvm_vcpu *vcpu)
505 - }
506 -
507 - static int init_rmode(struct kvm *kvm);
508 -+static u64 construct_eptp(unsigned long root_hpa);
509 -
510 - static DEFINE_PER_CPU(struct vmcs *, vmxarea);
511 - static DEFINE_PER_CPU(struct vmcs *, current_vmcs);
512 -@@ -1389,6 +1390,8 @@ static void exit_lmode(struct kvm_vcpu *vcpu)
513 - static void vmx_flush_tlb(struct kvm_vcpu *vcpu)
514 - {
515 - vpid_sync_vcpu_all(to_vmx(vcpu));
516 -+ if (vm_need_ept())
517 -+ ept_sync_context(construct_eptp(vcpu->arch.mmu.root_hpa));
518 - }
519 -
520 - static void vmx_decache_cr4_guest_bits(struct kvm_vcpu *vcpu)
521 -@@ -1420,7 +1423,7 @@ static void ept_update_paging_mode_cr0(unsigned long *hw_cr0,
522 - if (!(cr0 & X86_CR0_PG)) {
523 - /* From paging/starting to nonpaging */
524 - vmcs_write32(CPU_BASED_VM_EXEC_CONTROL,
525 -- vmcs_config.cpu_based_exec_ctrl |
526 -+ vmcs_read32(CPU_BASED_VM_EXEC_CONTROL) |
527 - (CPU_BASED_CR3_LOAD_EXITING |
528 - CPU_BASED_CR3_STORE_EXITING));
529 - vcpu->arch.cr0 = cr0;
530 -@@ -1430,7 +1433,7 @@ static void ept_update_paging_mode_cr0(unsigned long *hw_cr0,
531 - } else if (!is_paging(vcpu)) {
532 - /* From nonpaging to paging */
533 - vmcs_write32(CPU_BASED_VM_EXEC_CONTROL,
534 -- vmcs_config.cpu_based_exec_ctrl &
535 -+ vmcs_read32(CPU_BASED_VM_EXEC_CONTROL) &
536 - ~(CPU_BASED_CR3_LOAD_EXITING |
537 - CPU_BASED_CR3_STORE_EXITING));
538 - vcpu->arch.cr0 = cr0;
539 -diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
540 -index 63a77ca..5a7406e 100644
541 ---- a/arch/x86/kvm/x86.c
542 -+++ b/arch/x86/kvm/x86.c
543 -@@ -4016,6 +4016,11 @@ int kvm_arch_set_memory_region(struct kvm *kvm,
544 - return 0;
545 - }
546 -
547 -+void kvm_arch_flush_shadow(struct kvm *kvm)
548 -+{
549 -+ kvm_mmu_zap_all(kvm);
550 -+}
551 -+
552 - int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
553 - {
554 - return vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE
555 -diff --git a/arch/x86/kvm/x86_emulate.c b/arch/x86/kvm/x86_emulate.c
556 -index 932f216..d41b9bc 100644
557 ---- a/arch/x86/kvm/x86_emulate.c
558 -+++ b/arch/x86/kvm/x86_emulate.c
559 -@@ -1666,7 +1666,7 @@ special_insn:
560 - break;
561 - case 0xf4: /* hlt */
562 - ctxt->vcpu->arch.halt_request = 1;
563 -- goto done;
564 -+ break;
565 - case 0xf5: /* cmc */
566 - /* complement carry flag from eflags reg */
567 - ctxt->eflags ^= EFLG_CF;
568 -diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
569 -index 819dad9..7b27710 100644
570 ---- a/arch/x86/mm/init_64.c
571 -+++ b/arch/x86/mm/init_64.c
572 -@@ -579,7 +579,7 @@ unsigned long __init_refok init_memory_mapping(unsigned long start, unsigned lon
573 - else
574 - pud = alloc_low_page(&pud_phys);
575 -
576 -- next = start + PGDIR_SIZE;
577 -+ next = (start + PGDIR_SIZE) & PGDIR_MASK;
578 - if (next > end)
579 - next = end;
580 - last_map_addr = phys_pud_init(pud, __pa(start), __pa(next));
581 -diff --git a/drivers/acpi/processor_perflib.c b/drivers/acpi/processor_perflib.c
582 -index d80b2d1..8c06a53 100644
583 ---- a/drivers/acpi/processor_perflib.c
584 -+++ b/drivers/acpi/processor_perflib.c
585 -@@ -64,7 +64,13 @@ static DEFINE_MUTEX(performance_mutex);
586 - * policy is adjusted accordingly.
587 - */
588 -
589 --static unsigned int ignore_ppc = 0;
590 -+/* ignore_ppc:
591 -+ * -1 -> cpufreq low level drivers not initialized -> _PSS, etc. not called yet
592 -+ * ignore _PPC
593 -+ * 0 -> cpufreq low level drivers initialized -> consider _PPC values
594 -+ * 1 -> ignore _PPC totally -> forced by user through boot param
595 -+ */
596 -+static unsigned int ignore_ppc = -1;
597 - module_param(ignore_ppc, uint, 0644);
598 - MODULE_PARM_DESC(ignore_ppc, "If the frequency of your machine gets wrongly" \
599 - "limited by BIOS, this should help");
600 -@@ -72,7 +78,7 @@ MODULE_PARM_DESC(ignore_ppc, "If the frequency of your machine gets wrongly" \
601 - #define PPC_REGISTERED 1
602 - #define PPC_IN_USE 2
603 -
604 --static int acpi_processor_ppc_status = 0;
605 -+static int acpi_processor_ppc_status;
606 -
607 - static int acpi_processor_ppc_notifier(struct notifier_block *nb,
608 - unsigned long event, void *data)
609 -@@ -81,6 +87,11 @@ static int acpi_processor_ppc_notifier(struct notifier_block *nb,
610 - struct acpi_processor *pr;
611 - unsigned int ppc = 0;
612 -
613 -+ if (event == CPUFREQ_START && ignore_ppc <= 0) {
614 -+ ignore_ppc = 0;
615 -+ return 0;
616 -+ }
617 -+
618 - if (ignore_ppc)
619 - return 0;
620 -
621 -diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
622 -index 1d41496..0471ef5 100644
623 ---- a/drivers/cpufreq/cpufreq.c
624 -+++ b/drivers/cpufreq/cpufreq.c
625 -@@ -825,6 +825,9 @@ static int cpufreq_add_dev(struct sys_device *sys_dev)
626 - policy->user_policy.min = policy->cpuinfo.min_freq;
627 - policy->user_policy.max = policy->cpuinfo.max_freq;
628 -
629 -+ blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
630 -+ CPUFREQ_START, policy);
631 -+
632 - #ifdef CONFIG_SMP
633 -
634 - #ifdef CONFIG_HOTPLUG_CPU
635 -diff --git a/drivers/dma/iop-adma.c b/drivers/dma/iop-adma.c
636 -index 0ec0f43..4e6b052 100644
637 ---- a/drivers/dma/iop-adma.c
638 -+++ b/drivers/dma/iop-adma.c
639 -@@ -1387,6 +1387,8 @@ static void iop_chan_start_null_xor(struct iop_adma_chan *iop_chan)
640 - spin_unlock_bh(&iop_chan->lock);
641 - }
642 -
643 -+MODULE_ALIAS("platform:iop-adma");
644 -+
645 - static struct platform_driver iop_adma_driver = {
646 - .probe = iop_adma_probe,
647 - .remove = iop_adma_remove,
648 -diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
649 -index 68e7f19..0cc854e 100644
650 ---- a/drivers/ide/ide-cd.c
651 -+++ b/drivers/ide/ide-cd.c
652 -@@ -1308,13 +1308,30 @@ static int cdrom_read_capacity(ide_drive_t *drive, unsigned long *capacity,
653 - req.cmd_flags |= REQ_QUIET;
654 -
655 - stat = ide_cd_queue_pc(drive, &req);
656 -- if (stat == 0) {
657 -- *capacity = 1 + be32_to_cpu(capbuf.lba);
658 -- *sectors_per_frame =
659 -- be32_to_cpu(capbuf.blocklen) >> SECTOR_BITS;
660 -+ if (stat)
661 -+ return stat;
662 -+
663 -+ /*
664 -+ * Sanity check the given block size
665 -+ */
666 -+ switch (capbuf.blocklen) {
667 -+ case __constant_cpu_to_be32(512):
668 -+ case __constant_cpu_to_be32(1024):
669 -+ case __constant_cpu_to_be32(2048):
670 -+ case __constant_cpu_to_be32(4096):
671 -+ break;
672 -+ default:
673 -+ printk(KERN_ERR "%s: weird block size %u\n",
674 -+ drive->name, capbuf.blocklen);
675 -+ printk(KERN_ERR "%s: default to 2kb block size\n",
676 -+ drive->name);
677 -+ capbuf.blocklen = __constant_cpu_to_be32(2048);
678 -+ break;
679 - }
680 -
681 -- return stat;
682 -+ *capacity = 1 + be32_to_cpu(capbuf.lba);
683 -+ *sectors_per_frame = be32_to_cpu(capbuf.blocklen) >> SECTOR_BITS;
684 -+ return 0;
685 - }
686 -
687 - static int cdrom_read_tocentry(ide_drive_t *drive, int trackno, int msf_flag,
688 -diff --git a/drivers/media/dvb/dvb-usb/dib0700_devices.c b/drivers/media/dvb/dvb-usb/dib0700_devices.c
689 -index c4d40fe..3dd20bf 100644
690 ---- a/drivers/media/dvb/dvb-usb/dib0700_devices.c
691 -+++ b/drivers/media/dvb/dvb-usb/dib0700_devices.c
692 -@@ -1117,6 +1117,7 @@ struct usb_device_id dib0700_usb_id_table[] = {
693 - { USB_DEVICE(USB_VID_TERRATEC, USB_PID_TERRATEC_CINERGY_HT_EXPRESS) },
694 - { USB_DEVICE(USB_VID_TERRATEC, USB_PID_TERRATEC_CINERGY_T_XXS) },
695 - { USB_DEVICE(USB_VID_LEADTEK, USB_PID_WINFAST_DTV_DONGLE_STK7700P_2) },
696 -+ { USB_DEVICE(USB_VID_HAUPPAUGE, USB_PID_HAUPPAUGE_NOVA_TD_STICK_52009) },
697 - { 0 } /* Terminating entry */
698 - };
699 - MODULE_DEVICE_TABLE(usb, dib0700_usb_id_table);
700 -@@ -1372,7 +1373,7 @@ struct dvb_usb_device_properties dib0700_devices[] = {
701 - }
702 - },
703 -
704 -- .num_device_descs = 2,
705 -+ .num_device_descs = 3,
706 - .devices = {
707 - { "DiBcom STK7070PD reference design",
708 - { &dib0700_usb_id_table[17], NULL },
709 -@@ -1381,6 +1382,10 @@ struct dvb_usb_device_properties dib0700_devices[] = {
710 - { "Pinnacle PCTV Dual DVB-T Diversity Stick",
711 - { &dib0700_usb_id_table[18], NULL },
712 - { NULL },
713 -+ },
714 -+ { "Hauppauge Nova-TD Stick (52009)",
715 -+ { &dib0700_usb_id_table[35], NULL },
716 -+ { NULL },
717 - }
718 - }
719 - }, { DIB0700_DEFAULT_DEVICE_PROPERTIES,
720 -diff --git a/drivers/media/dvb/dvb-usb/dvb-usb-ids.h b/drivers/media/dvb/dvb-usb/dvb-usb-ids.h
721 -index 34245d1..31ded10 100644
722 ---- a/drivers/media/dvb/dvb-usb/dvb-usb-ids.h
723 -+++ b/drivers/media/dvb/dvb-usb/dvb-usb-ids.h
724 -@@ -132,6 +132,7 @@
725 - #define USB_PID_HAUPPAUGE_NOVA_T_STICK_3 0x7070
726 - #define USB_PID_HAUPPAUGE_MYTV_T 0x7080
727 - #define USB_PID_HAUPPAUGE_NOVA_TD_STICK 0x9580
728 -+#define USB_PID_HAUPPAUGE_NOVA_TD_STICK_52009 0x5200
729 - #define USB_PID_AVERMEDIA_EXPRESS 0xb568
730 - #define USB_PID_AVERMEDIA_VOLAR 0xa807
731 - #define USB_PID_AVERMEDIA_VOLAR_2 0xb808
732 -diff --git a/drivers/media/video/Kconfig b/drivers/media/video/Kconfig
733 -index 5ccb0ae..e00717d 100644
734 ---- a/drivers/media/video/Kconfig
735 -+++ b/drivers/media/video/Kconfig
736 -@@ -793,13 +793,7 @@ menuconfig V4L_USB_DRIVERS
737 -
738 - if V4L_USB_DRIVERS && USB
739 -
740 --config USB_VIDEO_CLASS
741 -- tristate "USB Video Class (UVC)"
742 -- ---help---
743 -- Support for the USB Video Class (UVC). Currently only video
744 -- input devices, such as webcams, are supported.
745 --
746 -- For more information see: <http://linux-uvc.berlios.de/>
747 -+source "drivers/media/video/uvc/Kconfig"
748 -
749 - source "drivers/media/video/pvrusb2/Kconfig"
750 -
751 -diff --git a/drivers/media/video/cx18/cx18-firmware.c b/drivers/media/video/cx18/cx18-firmware.c
752 -index 2694ce3..9dda2ca 100644
753 ---- a/drivers/media/video/cx18/cx18-firmware.c
754 -+++ b/drivers/media/video/cx18/cx18-firmware.c
755 -@@ -90,7 +90,7 @@
756 - #define CX18_DSP0_INTERRUPT_MASK 0xd0004C
757 -
758 - /* Encoder/decoder firmware sizes */
759 --#define CX18_FW_CPU_SIZE (174716)
760 -+#define CX18_FW_CPU_SIZE (158332)
761 - #define CX18_FW_APU_SIZE (141200)
762 -
763 - #define APU_ROM_SYNC1 0x6D676553 /* "mgeS" */
764 -diff --git a/drivers/media/video/cx23885/cx23885-core.c b/drivers/media/video/cx23885/cx23885-core.c
765 -index c4cc2f3..ad143f1 100644
766 ---- a/drivers/media/video/cx23885/cx23885-core.c
767 -+++ b/drivers/media/video/cx23885/cx23885-core.c
768 -@@ -76,6 +76,117 @@ LIST_HEAD(cx23885_devlist);
769 - * 0x00010ea0 0x00010xxx Free
770 - */
771 -
772 -+static struct sram_channel cx23885_sram_channels[] = {
773 -+ [SRAM_CH01] = {
774 -+ .name = "VID A",
775 -+ .cmds_start = 0x10000,
776 -+ .ctrl_start = 0x10380,
777 -+ .cdt = 0x104c0,
778 -+ .fifo_start = 0x40,
779 -+ .fifo_size = 0x2800,
780 -+ .ptr1_reg = DMA1_PTR1,
781 -+ .ptr2_reg = DMA1_PTR2,
782 -+ .cnt1_reg = DMA1_CNT1,
783 -+ .cnt2_reg = DMA1_CNT2,
784 -+ },
785 -+ [SRAM_CH02] = {
786 -+ .name = "ch2",
787 -+ .cmds_start = 0x0,
788 -+ .ctrl_start = 0x0,
789 -+ .cdt = 0x0,
790 -+ .fifo_start = 0x0,
791 -+ .fifo_size = 0x0,
792 -+ .ptr1_reg = DMA2_PTR1,
793 -+ .ptr2_reg = DMA2_PTR2,
794 -+ .cnt1_reg = DMA2_CNT1,
795 -+ .cnt2_reg = DMA2_CNT2,
796 -+ },
797 -+ [SRAM_CH03] = {
798 -+ .name = "TS1 B",
799 -+ .cmds_start = 0x100A0,
800 -+ .ctrl_start = 0x10400,
801 -+ .cdt = 0x10580,
802 -+ .fifo_start = 0x5000,
803 -+ .fifo_size = 0x1000,
804 -+ .ptr1_reg = DMA3_PTR1,
805 -+ .ptr2_reg = DMA3_PTR2,
806 -+ .cnt1_reg = DMA3_CNT1,
807 -+ .cnt2_reg = DMA3_CNT2,
808 -+ },
809 -+ [SRAM_CH04] = {
810 -+ .name = "ch4",
811 -+ .cmds_start = 0x0,
812 -+ .ctrl_start = 0x0,
813 -+ .cdt = 0x0,
814 -+ .fifo_start = 0x0,
815 -+ .fifo_size = 0x0,
816 -+ .ptr1_reg = DMA4_PTR1,
817 -+ .ptr2_reg = DMA4_PTR2,
818 -+ .cnt1_reg = DMA4_CNT1,
819 -+ .cnt2_reg = DMA4_CNT2,
820 -+ },
821 -+ [SRAM_CH05] = {
822 -+ .name = "ch5",
823 -+ .cmds_start = 0x0,
824 -+ .ctrl_start = 0x0,
825 -+ .cdt = 0x0,
826 -+ .fifo_start = 0x0,
827 -+ .fifo_size = 0x0,
828 -+ .ptr1_reg = DMA5_PTR1,
829 -+ .ptr2_reg = DMA5_PTR2,
830 -+ .cnt1_reg = DMA5_CNT1,
831 -+ .cnt2_reg = DMA5_CNT2,
832 -+ },
833 -+ [SRAM_CH06] = {
834 -+ .name = "TS2 C",
835 -+ .cmds_start = 0x10140,
836 -+ .ctrl_start = 0x10440,
837 -+ .cdt = 0x105e0,
838 -+ .fifo_start = 0x6000,
839 -+ .fifo_size = 0x1000,
840 -+ .ptr1_reg = DMA5_PTR1,
841 -+ .ptr2_reg = DMA5_PTR2,
842 -+ .cnt1_reg = DMA5_CNT1,
843 -+ .cnt2_reg = DMA5_CNT2,
844 -+ },
845 -+ [SRAM_CH07] = {
846 -+ .name = "ch7",
847 -+ .cmds_start = 0x0,
848 -+ .ctrl_start = 0x0,
849 -+ .cdt = 0x0,
850 -+ .fifo_start = 0x0,
851 -+ .fifo_size = 0x0,
852 -+ .ptr1_reg = DMA6_PTR1,
853 -+ .ptr2_reg = DMA6_PTR2,
854 -+ .cnt1_reg = DMA6_CNT1,
855 -+ .cnt2_reg = DMA6_CNT2,
856 -+ },
857 -+ [SRAM_CH08] = {
858 -+ .name = "ch8",
859 -+ .cmds_start = 0x0,
860 -+ .ctrl_start = 0x0,
861 -+ .cdt = 0x0,
862 -+ .fifo_start = 0x0,
863 -+ .fifo_size = 0x0,
864 -+ .ptr1_reg = DMA7_PTR1,
865 -+ .ptr2_reg = DMA7_PTR2,
866 -+ .cnt1_reg = DMA7_CNT1,
867 -+ .cnt2_reg = DMA7_CNT2,
868 -+ },
869 -+ [SRAM_CH09] = {
870 -+ .name = "ch9",
871 -+ .cmds_start = 0x0,
872 -+ .ctrl_start = 0x0,
873 -+ .cdt = 0x0,
874 -+ .fifo_start = 0x0,
875 -+ .fifo_size = 0x0,
876 -+ .ptr1_reg = DMA8_PTR1,
877 -+ .ptr2_reg = DMA8_PTR2,
878 -+ .cnt1_reg = DMA8_CNT1,
879 -+ .cnt2_reg = DMA8_CNT2,
880 -+ },
881 -+};
882 -+
883 - static struct sram_channel cx23887_sram_channels[] = {
884 - [SRAM_CH01] = {
885 - .name = "VID A",
886 -@@ -104,8 +215,8 @@ static struct sram_channel cx23887_sram_channels[] = {
887 - [SRAM_CH03] = {
888 - .name = "TS1 B",
889 - .cmds_start = 0x100A0,
890 -- .ctrl_start = 0x10780,
891 -- .cdt = 0x10400,
892 -+ .ctrl_start = 0x10630,
893 -+ .cdt = 0x10870,
894 - .fifo_start = 0x5000,
895 - .fifo_size = 0x1000,
896 - .ptr1_reg = DMA3_PTR1,
897 -@@ -140,7 +251,7 @@ static struct sram_channel cx23887_sram_channels[] = {
898 - [SRAM_CH06] = {
899 - .name = "TS2 C",
900 - .cmds_start = 0x10140,
901 -- .ctrl_start = 0x10680,
902 -+ .ctrl_start = 0x10670,
903 - .cdt = 0x108d0,
904 - .fifo_start = 0x6000,
905 - .fifo_size = 0x1000,
906 -@@ -460,6 +571,7 @@ static void cx23885_reset(struct cx23885_dev *dev)
907 - cx_write(AUDIO_INT_INT_STAT, 0xffffffff);
908 - cx_write(AUDIO_EXT_INT_STAT, 0xffffffff);
909 - cx_write(CLK_DELAY, cx_read(CLK_DELAY) & 0x80000000);
910 -+ cx_write(PAD_CTRL, 0x00500300);
911 -
912 - mdelay(100);
913 -
914 -@@ -625,7 +737,6 @@ static int cx23885_dev_setup(struct cx23885_dev *dev)
915 - atomic_inc(&dev->refcount);
916 -
917 - dev->nr = cx23885_devcount++;
918 -- dev->sram_channels = cx23887_sram_channels;
919 - sprintf(dev->name, "cx23885[%d]", dev->nr);
920 -
921 - mutex_lock(&devlist);
922 -@@ -637,11 +748,13 @@ static int cx23885_dev_setup(struct cx23885_dev *dev)
923 - dev->bridge = CX23885_BRIDGE_887;
924 - /* Apply a sensible clock frequency for the PCIe bridge */
925 - dev->clk_freq = 25000000;
926 -+ dev->sram_channels = cx23887_sram_channels;
927 - } else
928 - if(dev->pci->device == 0x8852) {
929 - dev->bridge = CX23885_BRIDGE_885;
930 - /* Apply a sensible clock frequency for the PCIe bridge */
931 - dev->clk_freq = 28000000;
932 -+ dev->sram_channels = cx23885_sram_channels;
933 - } else
934 - BUG();
935 -
936 -@@ -1042,6 +1155,9 @@ static int cx23885_start_dma(struct cx23885_tsport *port,
937 - dprintk(1, "%s() w: %d, h: %d, f: %d\n", __func__,
938 - buf->vb.width, buf->vb.height, buf->vb.field);
939 -
940 -+ /* Stop the fifo and risc engine for this port */
941 -+ cx_clear(port->reg_dma_ctl, port->dma_ctl_val);
942 -+
943 - /* setup fifo + format */
944 - cx23885_sram_channel_setup(dev,
945 - &dev->sram_channels[ port->sram_chno ],
946 -@@ -1083,7 +1199,21 @@ static int cx23885_start_dma(struct cx23885_tsport *port,
947 - cx_write(port->reg_gpcnt_ctl, 3);
948 - q->count = 1;
949 -
950 -- if (cx23885_boards[dev->board].portb & CX23885_MPEG_ENCODER) {
951 -+ /* Set VIDB pins to input */
952 -+ if (cx23885_boards[dev->board].portb == CX23885_MPEG_DVB) {
953 -+ reg = cx_read(PAD_CTRL);
954 -+ reg &= ~0x3; /* Clear TS1_OE & TS1_SOP_OE */
955 -+ cx_write(PAD_CTRL, reg);
956 -+ }
957 -+
958 -+ /* Set VIDC pins to input */
959 -+ if (cx23885_boards[dev->board].portc == CX23885_MPEG_DVB) {
960 -+ reg = cx_read(PAD_CTRL);
961 -+ reg &= ~0x4; /* Clear TS2_SOP_OE */
962 -+ cx_write(PAD_CTRL, reg);
963 -+ }
964 -+
965 -+ if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER) {
966 -
967 - reg = cx_read(PAD_CTRL);
968 - reg = reg & ~0x1; /* Clear TS1_OE */
969 -@@ -1133,7 +1263,7 @@ static int cx23885_stop_dma(struct cx23885_tsport *port)
970 - cx_clear(port->reg_ts_int_msk, port->ts_int_msk_val);
971 - cx_clear(port->reg_dma_ctl, port->dma_ctl_val);
972 -
973 -- if (cx23885_boards[dev->board].portb & CX23885_MPEG_ENCODER) {
974 -+ if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER) {
975 -
976 - reg = cx_read(PAD_CTRL);
977 -
978 -diff --git a/drivers/media/video/saa7134/saa7134-cards.c b/drivers/media/video/saa7134/saa7134-cards.c
979 -index 2618cfa..0227cf9 100644
980 ---- a/drivers/media/video/saa7134/saa7134-cards.c
981 -+++ b/drivers/media/video/saa7134/saa7134-cards.c
982 -@@ -5703,9 +5703,6 @@ int saa7134_board_init2(struct saa7134_dev *dev)
983 - unsigned char buf;
984 - int board;
985 -
986 -- dev->tuner_type = saa7134_boards[dev->board].tuner_type;
987 -- dev->tuner_addr = saa7134_boards[dev->board].tuner_addr;
988 --
989 - switch (dev->board) {
990 - case SAA7134_BOARD_BMK_MPEX_NOTUNER:
991 - case SAA7134_BOARD_BMK_MPEX_TUNER:
992 -diff --git a/drivers/media/video/saa7134/saa7134-core.c b/drivers/media/video/saa7134/saa7134-core.c
993 -index 2c19cd0..69f340d 100644
994 ---- a/drivers/media/video/saa7134/saa7134-core.c
995 -+++ b/drivers/media/video/saa7134/saa7134-core.c
996 -@@ -946,11 +946,12 @@ static int __devinit saa7134_initdev(struct pci_dev *pci_dev,
997 - dev->board = SAA7134_BOARD_UNKNOWN;
998 - }
999 - dev->autodetected = card[dev->nr] != dev->board;
1000 -- dev->tuner_type = saa7134_boards[dev->board].tuner_type;
1001 -+ dev->tuner_type = saa7134_boards[dev->board].tuner_type;
1002 -+ dev->tuner_addr = saa7134_boards[dev->board].tuner_addr;
1003 - dev->tda9887_conf = saa7134_boards[dev->board].tda9887_conf;
1004 - if (UNSET != tuner[dev->nr])
1005 - dev->tuner_type = tuner[dev->nr];
1006 -- printk(KERN_INFO "%s: subsystem: %04x:%04x, board: %s [card=%d,%s]\n",
1007 -+ printk(KERN_INFO "%s: subsystem: %04x:%04x, board: %s [card=%d,%s]\n",
1008 - dev->name,pci_dev->subsystem_vendor,
1009 - pci_dev->subsystem_device,saa7134_boards[dev->board].name,
1010 - dev->board, dev->autodetected ?
1011 -diff --git a/drivers/media/video/uvc/Kconfig b/drivers/media/video/uvc/Kconfig
1012 -new file mode 100644
1013 -index 0000000..c2d9760
1014 ---- /dev/null
1015 -+++ b/drivers/media/video/uvc/Kconfig
1016 -@@ -0,0 +1,17 @@
1017 -+config USB_VIDEO_CLASS
1018 -+ tristate "USB Video Class (UVC)"
1019 -+ ---help---
1020 -+ Support for the USB Video Class (UVC). Currently only video
1021 -+ input devices, such as webcams, are supported.
1022 -+
1023 -+ For more information see: <http://linux-uvc.berlios.de/>
1024 -+
1025 -+config USB_VIDEO_CLASS_INPUT_EVDEV
1026 -+ bool "UVC input events device support"
1027 -+ default y
1028 -+ depends on USB_VIDEO_CLASS && INPUT
1029 -+ ---help---
1030 -+ This option makes USB Video Class devices register an input device
1031 -+ to report button events.
1032 -+
1033 -+ If you are in doubt, say Y.
1034 -diff --git a/drivers/media/video/uvc/uvc_driver.c b/drivers/media/video/uvc/uvc_driver.c
1035 -index 60ced58..9d954d2 100644
1036 ---- a/drivers/media/video/uvc/uvc_driver.c
1037 -+++ b/drivers/media/video/uvc/uvc_driver.c
1038 -@@ -298,7 +298,8 @@ static int uvc_parse_format(struct uvc_device *dev,
1039 - switch (buffer[2]) {
1040 - case VS_FORMAT_UNCOMPRESSED:
1041 - case VS_FORMAT_FRAME_BASED:
1042 -- if (buflen < 27) {
1043 -+ n = buffer[2] == VS_FORMAT_UNCOMPRESSED ? 27 : 28;
1044 -+ if (buflen < n) {
1045 - uvc_trace(UVC_TRACE_DESCR, "device %d videostreaming"
1046 - "interface %d FORMAT error\n",
1047 - dev->udev->devnum,
1048 -@@ -1891,6 +1892,15 @@ static struct usb_device_id uvc_ids[] = {
1049 - .bInterfaceSubClass = 1,
1050 - .bInterfaceProtocol = 0,
1051 - .driver_info = UVC_QUIRK_PROBE_MINMAX },
1052 -+ /* Medion Akoya Mini E1210 */
1053 -+ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE
1054 -+ | USB_DEVICE_ID_MATCH_INT_INFO,
1055 -+ .idVendor = 0x5986,
1056 -+ .idProduct = 0x0141,
1057 -+ .bInterfaceClass = USB_CLASS_VIDEO,
1058 -+ .bInterfaceSubClass = 1,
1059 -+ .bInterfaceProtocol = 0,
1060 -+ .driver_info = UVC_QUIRK_PROBE_MINMAX },
1061 - /* Acer OrbiCam - Unknown vendor */
1062 - { .match_flags = USB_DEVICE_ID_MATCH_DEVICE
1063 - | USB_DEVICE_ID_MATCH_INT_INFO,
1064 -diff --git a/drivers/media/video/uvc/uvc_status.c b/drivers/media/video/uvc/uvc_status.c
1065 -index be9084e..eb2f970 100644
1066 ---- a/drivers/media/video/uvc/uvc_status.c
1067 -+++ b/drivers/media/video/uvc/uvc_status.c
1068 -@@ -22,6 +22,7 @@
1069 - /* --------------------------------------------------------------------------
1070 - * Input device
1071 - */
1072 -+#ifdef CONFIG_USB_VIDEO_CLASS_INPUT_EVDEV
1073 - static int uvc_input_init(struct uvc_device *dev)
1074 - {
1075 - struct usb_device *udev = dev->udev;
1076 -@@ -67,6 +68,19 @@ static void uvc_input_cleanup(struct uvc_device *dev)
1077 - input_unregister_device(dev->input);
1078 - }
1079 -
1080 -+static void uvc_input_report_key(struct uvc_device *dev, unsigned int code,
1081 -+ int value)
1082 -+{
1083 -+ if (dev->input)
1084 -+ input_report_key(dev->input, code, value);
1085 -+}
1086 -+
1087 -+#else
1088 -+#define uvc_input_init(dev)
1089 -+#define uvc_input_cleanup(dev)
1090 -+#define uvc_input_report_key(dev, code, value)
1091 -+#endif /* CONFIG_USB_VIDEO_CLASS_INPUT_EVDEV */
1092 -+
1093 - /* --------------------------------------------------------------------------
1094 - * Status interrupt endpoint
1095 - */
1096 -@@ -83,8 +97,7 @@ static void uvc_event_streaming(struct uvc_device *dev, __u8 *data, int len)
1097 - return;
1098 - uvc_trace(UVC_TRACE_STATUS, "Button (intf %u) %s len %d\n",
1099 - data[1], data[3] ? "pressed" : "released", len);
1100 -- if (dev->input)
1101 -- input_report_key(dev->input, BTN_0, data[3]);
1102 -+ uvc_input_report_key(dev, BTN_0, data[3]);
1103 - } else {
1104 - uvc_trace(UVC_TRACE_STATUS, "Stream %u error event %02x %02x "
1105 - "len %d.\n", data[1], data[2], data[3], len);
1106 -@@ -203,5 +216,5 @@ int uvc_status_resume(struct uvc_device *dev)
1107 - if (dev->int_urb == NULL)
1108 - return 0;
1109 -
1110 -- return usb_submit_urb(dev->int_urb, GFP_KERNEL);
1111 -+ return usb_submit_urb(dev->int_urb, GFP_NOIO);
1112 - }
1113 -diff --git a/drivers/media/video/uvc/uvc_video.c b/drivers/media/video/uvc/uvc_video.c
1114 -index 6faf1fb..817af2e 100644
1115 ---- a/drivers/media/video/uvc/uvc_video.c
1116 -+++ b/drivers/media/video/uvc/uvc_video.c
1117 -@@ -554,9 +554,56 @@ static void uvc_video_complete(struct urb *urb)
1118 - }
1119 -
1120 - /*
1121 -+ * Free transfer buffers.
1122 -+ */
1123 -+static void uvc_free_urb_buffers(struct uvc_video_device *video)
1124 -+{
1125 -+ unsigned int i;
1126 -+
1127 -+ for (i = 0; i < UVC_URBS; ++i) {
1128 -+ if (video->urb_buffer[i]) {
1129 -+ usb_buffer_free(video->dev->udev, video->urb_size,
1130 -+ video->urb_buffer[i], video->urb_dma[i]);
1131 -+ video->urb_buffer[i] = NULL;
1132 -+ }
1133 -+ }
1134 -+
1135 -+ video->urb_size = 0;
1136 -+}
1137 -+
1138 -+/*
1139 -+ * Allocate transfer buffers. This function can be called with buffers
1140 -+ * already allocated when resuming from suspend, in which case it will
1141 -+ * return without touching the buffers.
1142 -+ *
1143 -+ * Return 0 on success or -ENOMEM when out of memory.
1144 -+ */
1145 -+static int uvc_alloc_urb_buffers(struct uvc_video_device *video,
1146 -+ unsigned int size)
1147 -+{
1148 -+ unsigned int i;
1149 -+
1150 -+ /* Buffers are already allocated, bail out. */
1151 -+ if (video->urb_size)
1152 -+ return 0;
1153 -+
1154 -+ for (i = 0; i < UVC_URBS; ++i) {
1155 -+ video->urb_buffer[i] = usb_buffer_alloc(video->dev->udev,
1156 -+ size, GFP_KERNEL, &video->urb_dma[i]);
1157 -+ if (video->urb_buffer[i] == NULL) {
1158 -+ uvc_free_urb_buffers(video);
1159 -+ return -ENOMEM;
1160 -+ }
1161 -+ }
1162 -+
1163 -+ video->urb_size = size;
1164 -+ return 0;
1165 -+}
1166 -+
1167 -+/*
1168 - * Uninitialize isochronous/bulk URBs and free transfer buffers.
1169 - */
1170 --static void uvc_uninit_video(struct uvc_video_device *video)
1171 -+static void uvc_uninit_video(struct uvc_video_device *video, int free_buffers)
1172 - {
1173 - struct urb *urb;
1174 - unsigned int i;
1175 -@@ -566,19 +613,12 @@ static void uvc_uninit_video(struct uvc_video_device *video)
1176 - continue;
1177 -
1178 - usb_kill_urb(urb);
1179 -- /* urb->transfer_buffer_length is not touched by USB core, so
1180 -- * we can use it here as the buffer length.
1181 -- */
1182 -- if (video->urb_buffer[i]) {
1183 -- usb_buffer_free(video->dev->udev,
1184 -- urb->transfer_buffer_length,
1185 -- video->urb_buffer[i], urb->transfer_dma);
1186 -- video->urb_buffer[i] = NULL;
1187 -- }
1188 --
1189 - usb_free_urb(urb);
1190 - video->urb[i] = NULL;
1191 - }
1192 -+
1193 -+ if (free_buffers)
1194 -+ uvc_free_urb_buffers(video);
1195 - }
1196 -
1197 - /*
1198 -@@ -586,7 +626,7 @@ static void uvc_uninit_video(struct uvc_video_device *video)
1199 - * is given by the endpoint.
1200 - */
1201 - static int uvc_init_video_isoc(struct uvc_video_device *video,
1202 -- struct usb_host_endpoint *ep)
1203 -+ struct usb_host_endpoint *ep, gfp_t gfp_flags)
1204 - {
1205 - struct urb *urb;
1206 - unsigned int npackets, i, j;
1207 -@@ -610,18 +650,13 @@ static int uvc_init_video_isoc(struct uvc_video_device *video,
1208 -
1209 - size = npackets * psize;
1210 -
1211 -+ if (uvc_alloc_urb_buffers(video, size) < 0)
1212 -+ return -ENOMEM;
1213 -+
1214 - for (i = 0; i < UVC_URBS; ++i) {
1215 -- urb = usb_alloc_urb(npackets, GFP_KERNEL);
1216 -+ urb = usb_alloc_urb(npackets, gfp_flags);
1217 - if (urb == NULL) {
1218 -- uvc_uninit_video(video);
1219 -- return -ENOMEM;
1220 -- }
1221 --
1222 -- video->urb_buffer[i] = usb_buffer_alloc(video->dev->udev,
1223 -- size, GFP_KERNEL, &urb->transfer_dma);
1224 -- if (video->urb_buffer[i] == NULL) {
1225 -- usb_free_urb(urb);
1226 -- uvc_uninit_video(video);
1227 -+ uvc_uninit_video(video, 1);
1228 - return -ENOMEM;
1229 - }
1230 -
1231 -@@ -632,6 +667,7 @@ static int uvc_init_video_isoc(struct uvc_video_device *video,
1232 - urb->transfer_flags = URB_ISO_ASAP | URB_NO_TRANSFER_DMA_MAP;
1233 - urb->interval = ep->desc.bInterval;
1234 - urb->transfer_buffer = video->urb_buffer[i];
1235 -+ urb->transfer_dma = video->urb_dma[i];
1236 - urb->complete = uvc_video_complete;
1237 - urb->number_of_packets = npackets;
1238 - urb->transfer_buffer_length = size;
1239 -@@ -652,7 +688,7 @@ static int uvc_init_video_isoc(struct uvc_video_device *video,
1240 - * given by the endpoint.
1241 - */
1242 - static int uvc_init_video_bulk(struct uvc_video_device *video,
1243 -- struct usb_host_endpoint *ep)
1244 -+ struct usb_host_endpoint *ep, gfp_t gfp_flags)
1245 - {
1246 - struct urb *urb;
1247 - unsigned int pipe, i;
1248 -@@ -671,20 +707,15 @@ static int uvc_init_video_bulk(struct uvc_video_device *video,
1249 - if (size > psize * UVC_MAX_ISO_PACKETS)
1250 - size = psize * UVC_MAX_ISO_PACKETS;
1251 -
1252 -+ if (uvc_alloc_urb_buffers(video, size) < 0)
1253 -+ return -ENOMEM;
1254 -+
1255 - pipe = usb_rcvbulkpipe(video->dev->udev, ep->desc.bEndpointAddress);
1256 -
1257 - for (i = 0; i < UVC_URBS; ++i) {
1258 -- urb = usb_alloc_urb(0, GFP_KERNEL);
1259 -+ urb = usb_alloc_urb(0, gfp_flags);
1260 - if (urb == NULL) {
1261 -- uvc_uninit_video(video);
1262 -- return -ENOMEM;
1263 -- }
1264 --
1265 -- video->urb_buffer[i] = usb_buffer_alloc(video->dev->udev,
1266 -- size, GFP_KERNEL, &urb->transfer_dma);
1267 -- if (video->urb_buffer[i] == NULL) {
1268 -- usb_free_urb(urb);
1269 -- uvc_uninit_video(video);
1270 -+ uvc_uninit_video(video, 1);
1271 - return -ENOMEM;
1272 - }
1273 -
1274 -@@ -692,6 +723,7 @@ static int uvc_init_video_bulk(struct uvc_video_device *video,
1275 - video->urb_buffer[i], size, uvc_video_complete,
1276 - video);
1277 - urb->transfer_flags = URB_NO_TRANSFER_DMA_MAP;
1278 -+ urb->transfer_dma = video->urb_dma[i];
1279 -
1280 - video->urb[i] = urb;
1281 - }
1282 -@@ -702,7 +734,7 @@ static int uvc_init_video_bulk(struct uvc_video_device *video,
1283 - /*
1284 - * Initialize isochronous/bulk URBs and allocate transfer buffers.
1285 - */
1286 --static int uvc_init_video(struct uvc_video_device *video)
1287 -+static int uvc_init_video(struct uvc_video_device *video, gfp_t gfp_flags)
1288 - {
1289 - struct usb_interface *intf = video->streaming->intf;
1290 - struct usb_host_interface *alts;
1291 -@@ -747,7 +779,7 @@ static int uvc_init_video(struct uvc_video_device *video)
1292 - if ((ret = usb_set_interface(video->dev->udev, intfnum, i)) < 0)
1293 - return ret;
1294 -
1295 -- ret = uvc_init_video_isoc(video, ep);
1296 -+ ret = uvc_init_video_isoc(video, ep, gfp_flags);
1297 - } else {
1298 - /* Bulk endpoint, proceed to URB initialization. */
1299 - ep = uvc_find_endpoint(&intf->altsetting[0],
1300 -@@ -755,7 +787,7 @@ static int uvc_init_video(struct uvc_video_device *video)
1301 - if (ep == NULL)
1302 - return -EIO;
1303 -
1304 -- ret = uvc_init_video_bulk(video, ep);
1305 -+ ret = uvc_init_video_bulk(video, ep, gfp_flags);
1306 - }
1307 -
1308 - if (ret < 0)
1309 -@@ -763,10 +795,10 @@ static int uvc_init_video(struct uvc_video_device *video)
1310 -
1311 - /* Submit the URBs. */
1312 - for (i = 0; i < UVC_URBS; ++i) {
1313 -- if ((ret = usb_submit_urb(video->urb[i], GFP_KERNEL)) < 0) {
1314 -+ if ((ret = usb_submit_urb(video->urb[i], gfp_flags)) < 0) {
1315 - uvc_printk(KERN_ERR, "Failed to submit URB %u "
1316 - "(%d).\n", i, ret);
1317 -- uvc_uninit_video(video);
1318 -+ uvc_uninit_video(video, 1);
1319 - return ret;
1320 - }
1321 - }
1322 -@@ -791,7 +823,7 @@ int uvc_video_suspend(struct uvc_video_device *video)
1323 - return 0;
1324 -
1325 - video->frozen = 1;
1326 -- uvc_uninit_video(video);
1327 -+ uvc_uninit_video(video, 0);
1328 - usb_set_interface(video->dev->udev, video->streaming->intfnum, 0);
1329 - return 0;
1330 - }
1331 -@@ -818,7 +850,7 @@ int uvc_video_resume(struct uvc_video_device *video)
1332 - if (!uvc_queue_streaming(&video->queue))
1333 - return 0;
1334 -
1335 -- if ((ret = uvc_init_video(video)) < 0)
1336 -+ if ((ret = uvc_init_video(video, GFP_NOIO)) < 0)
1337 - uvc_queue_enable(&video->queue, 0);
1338 -
1339 - return ret;
1340 -@@ -920,7 +952,7 @@ int uvc_video_enable(struct uvc_video_device *video, int enable)
1341 - int ret;
1342 -
1343 - if (!enable) {
1344 -- uvc_uninit_video(video);
1345 -+ uvc_uninit_video(video, 1);
1346 - usb_set_interface(video->dev->udev,
1347 - video->streaming->intfnum, 0);
1348 - uvc_queue_enable(&video->queue, 0);
1349 -@@ -930,5 +962,5 @@ int uvc_video_enable(struct uvc_video_device *video, int enable)
1350 - if ((ret = uvc_queue_enable(&video->queue, 1)) < 0)
1351 - return ret;
1352 -
1353 -- return uvc_init_video(video);
1354 -+ return uvc_init_video(video, GFP_KERNEL);
1355 - }
1356 -diff --git a/drivers/media/video/uvc/uvcvideo.h b/drivers/media/video/uvc/uvcvideo.h
1357 -index a995a78..2444b8a 100644
1358 ---- a/drivers/media/video/uvc/uvcvideo.h
1359 -+++ b/drivers/media/video/uvc/uvcvideo.h
1360 -@@ -602,6 +602,8 @@ struct uvc_video_device {
1361 -
1362 - struct urb *urb[UVC_URBS];
1363 - char *urb_buffer[UVC_URBS];
1364 -+ dma_addr_t urb_dma[UVC_URBS];
1365 -+ unsigned int urb_size;
1366 -
1367 - __u8 last_fid;
1368 - };
1369 -diff --git a/drivers/mmc/host/pxamci.c b/drivers/mmc/host/pxamci.c
1370 -index d89475d..5c35e63 100644
1371 ---- a/drivers/mmc/host/pxamci.c
1372 -+++ b/drivers/mmc/host/pxamci.c
1373 -@@ -177,7 +177,7 @@ static void pxamci_setup_data(struct pxamci_host *host, struct mmc_data *data)
1374 - if (dalgn)
1375 - DALGN |= (1 << host->dma);
1376 - else
1377 -- DALGN &= (1 << host->dma);
1378 -+ DALGN &= ~(1 << host->dma);
1379 - DDADR(host->dma) = host->sg_dma;
1380 - DCSR(host->dma) = DCSR_RUN;
1381 - }
1382 -diff --git a/drivers/net/hamradio/hdlcdrv.c b/drivers/net/hamradio/hdlcdrv.c
1383 -index ae9629f..c258a05 100644
1384 ---- a/drivers/net/hamradio/hdlcdrv.c
1385 -+++ b/drivers/net/hamradio/hdlcdrv.c
1386 -@@ -88,6 +88,7 @@
1387 - static inline void append_crc_ccitt(unsigned char *buffer, int len)
1388 - {
1389 - unsigned int crc = crc_ccitt(0xffff, buffer, len) ^ 0xffff;
1390 -+ buffer += len;
1391 - *buffer++ = crc;
1392 - *buffer++ = crc >> 8;
1393 - }
1394 -diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c
1395 -index 8f04609..e248f80 100644
1396 ---- a/drivers/net/ixgbe/ixgbe_main.c
1397 -+++ b/drivers/net/ixgbe/ixgbe_main.c
1398 -@@ -70,8 +70,6 @@ static struct pci_device_id ixgbe_pci_tbl[] = {
1399 - board_82598 },
1400 - {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AF_SINGLE_PORT),
1401 - board_82598 },
1402 -- {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AT_DUAL_PORT),
1403 -- board_82598 },
1404 - {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_CX4),
1405 - board_82598 },
1406 -
1407 -diff --git a/drivers/net/myri10ge/myri10ge.c b/drivers/net/myri10ge/myri10ge.c
1408 -index e0d76c7..9e28d91 100644
1409 ---- a/drivers/net/myri10ge/myri10ge.c
1410 -+++ b/drivers/net/myri10ge/myri10ge.c
1411 -@@ -3126,6 +3126,8 @@ static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1412 -
1413 - mgp = netdev_priv(netdev);
1414 - mgp->dev = netdev;
1415 -+ mgp->ss.mgp = mgp;
1416 -+ mgp->ss.dev = mgp->dev;
1417 - netif_napi_add(netdev, &mgp->ss.napi, myri10ge_poll, myri10ge_napi_weight);
1418 - mgp->pdev = pdev;
1419 - mgp->csum_flag = MXGEFW_FLAGS_CKSUM;
1420 -@@ -3213,26 +3215,26 @@ static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1421 - for (i = 0; i < ETH_ALEN; i++)
1422 - netdev->dev_addr[i] = mgp->mac_addr[i];
1423 -
1424 -- /* allocate rx done ring */
1425 -- bytes = mgp->max_intr_slots * sizeof(*mgp->ss.rx_done.entry);
1426 -- mgp->ss.rx_done.entry = dma_alloc_coherent(&pdev->dev, bytes,
1427 -- &mgp->ss.rx_done.bus, GFP_KERNEL);
1428 -- if (mgp->ss.rx_done.entry == NULL)
1429 -- goto abort_with_ioremap;
1430 -- memset(mgp->ss.rx_done.entry, 0, bytes);
1431 --
1432 - myri10ge_select_firmware(mgp);
1433 -
1434 - status = myri10ge_load_firmware(mgp);
1435 - if (status != 0) {
1436 - dev_err(&pdev->dev, "failed to load firmware\n");
1437 -- goto abort_with_rx_done;
1438 -+ goto abort_with_ioremap;
1439 - }
1440 -
1441 -+ /* allocate rx done ring */
1442 -+ bytes = mgp->max_intr_slots * sizeof(*mgp->ss.rx_done.entry);
1443 -+ mgp->ss.rx_done.entry = dma_alloc_coherent(&pdev->dev, bytes,
1444 -+ &mgp->ss.rx_done.bus, GFP_KERNEL);
1445 -+ if (mgp->ss.rx_done.entry == NULL)
1446 -+ goto abort_with_firmware;
1447 -+ memset(mgp->ss.rx_done.entry, 0, bytes);
1448 -+
1449 - status = myri10ge_reset(mgp);
1450 - if (status != 0) {
1451 - dev_err(&pdev->dev, "failed reset\n");
1452 -- goto abort_with_firmware;
1453 -+ goto abort_with_rx_done;
1454 - }
1455 -
1456 - pci_set_drvdata(pdev, mgp);
1457 -@@ -3258,7 +3260,7 @@ static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1458 - * is set to correct value if MSI is enabled */
1459 - status = myri10ge_request_irq(mgp);
1460 - if (status != 0)
1461 -- goto abort_with_firmware;
1462 -+ goto abort_with_rx_done;
1463 - netdev->irq = pdev->irq;
1464 - myri10ge_free_irq(mgp);
1465 -
1466 -@@ -3287,14 +3289,14 @@ static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1467 - abort_with_state:
1468 - pci_restore_state(pdev);
1469 -
1470 --abort_with_firmware:
1471 -- myri10ge_dummy_rdma(mgp, 0);
1472 --
1473 - abort_with_rx_done:
1474 - bytes = mgp->max_intr_slots * sizeof(*mgp->ss.rx_done.entry);
1475 - dma_free_coherent(&pdev->dev, bytes,
1476 - mgp->ss.rx_done.entry, mgp->ss.rx_done.bus);
1477 -
1478 -+abort_with_firmware:
1479 -+ myri10ge_dummy_rdma(mgp, 0);
1480 -+
1481 - abort_with_ioremap:
1482 - iounmap(mgp->sram);
1483 -
1484 -diff --git a/drivers/net/wireless/ath5k/base.c b/drivers/net/wireless/ath5k/base.c
1485 -index 635b9ac..e57905c 100644
1486 ---- a/drivers/net/wireless/ath5k/base.c
1487 -+++ b/drivers/net/wireless/ath5k/base.c
1488 -@@ -487,9 +487,6 @@ ath5k_pci_probe(struct pci_dev *pdev,
1489 - /* Set private data */
1490 - pci_set_drvdata(pdev, hw);
1491 -
1492 -- /* Enable msi for devices that support it */
1493 -- pci_enable_msi(pdev);
1494 --
1495 - /* Setup interrupt handler */
1496 - ret = request_irq(pdev->irq, ath5k_intr, IRQF_SHARED, "ath", sc);
1497 - if (ret) {
1498 -@@ -567,7 +564,6 @@ err_ah:
1499 - err_irq:
1500 - free_irq(pdev->irq, sc);
1501 - err_free:
1502 -- pci_disable_msi(pdev);
1503 - ieee80211_free_hw(hw);
1504 - err_map:
1505 - pci_iounmap(pdev, mem);
1506 -@@ -589,7 +585,6 @@ ath5k_pci_remove(struct pci_dev *pdev)
1507 - ath5k_detach(pdev, hw);
1508 - ath5k_hw_detach(sc->ah);
1509 - free_irq(pdev->irq, sc);
1510 -- pci_disable_msi(pdev);
1511 - pci_iounmap(pdev, sc->iobase);
1512 - pci_release_region(pdev, 0);
1513 - pci_disable_device(pdev);
1514 -diff --git a/drivers/net/wireless/b43legacy/main.c b/drivers/net/wireless/b43legacy/main.c
1515 -index 3e612d0..9bba3ec 100644
1516 ---- a/drivers/net/wireless/b43legacy/main.c
1517 -+++ b/drivers/net/wireless/b43legacy/main.c
1518 -@@ -3862,10 +3862,10 @@ static int b43legacy_resume(struct ssb_device *dev)
1519 - goto out;
1520 - }
1521 - }
1522 -- mutex_unlock(&wl->mutex);
1523 -
1524 - b43legacydbg(wl, "Device resumed.\n");
1525 - out:
1526 -+ mutex_unlock(&wl->mutex);
1527 - return err;
1528 - }
1529 -
1530 -diff --git a/drivers/rtc/rtc-at91rm9200.c b/drivers/rtc/rtc-at91rm9200.c
1531 -index 39e64ab..67c7a01 100644
1532 ---- a/drivers/rtc/rtc-at91rm9200.c
1533 -+++ b/drivers/rtc/rtc-at91rm9200.c
1534 -@@ -175,8 +175,10 @@ static int at91_rtc_setalarm(struct device *dev, struct rtc_wkalrm *alrm)
1535 - | BIN2BCD(tm.tm_mday) << 24
1536 - | AT91_RTC_DATEEN | AT91_RTC_MTHEN);
1537 -
1538 -- if (alrm->enabled)
1539 -+ if (alrm->enabled) {
1540 -+ at91_sys_write(AT91_RTC_SCCR, AT91_RTC_ALARM);
1541 - at91_sys_write(AT91_RTC_IER, AT91_RTC_ALARM);
1542 -+ }
1543 -
1544 - pr_debug("%s(): %4d-%02d-%02d %02d:%02d:%02d\n", __func__,
1545 - at91_alarm_year, tm.tm_mon, tm.tm_mday, tm.tm_hour,
1546 -@@ -195,28 +197,22 @@ static int at91_rtc_ioctl(struct device *dev, unsigned int cmd,
1547 -
1548 - pr_debug("%s(): cmd=%08x, arg=%08lx.\n", __func__, cmd, arg);
1549 -
1550 -+ /* important: scrub old status before enabling IRQs */
1551 - switch (cmd) {
1552 - case RTC_AIE_OFF: /* alarm off */
1553 - at91_sys_write(AT91_RTC_IDR, AT91_RTC_ALARM);
1554 - break;
1555 - case RTC_AIE_ON: /* alarm on */
1556 -+ at91_sys_write(AT91_RTC_SCCR, AT91_RTC_ALARM);
1557 - at91_sys_write(AT91_RTC_IER, AT91_RTC_ALARM);
1558 - break;
1559 - case RTC_UIE_OFF: /* update off */
1560 -- case RTC_PIE_OFF: /* periodic off */
1561 - at91_sys_write(AT91_RTC_IDR, AT91_RTC_SECEV);
1562 - break;
1563 - case RTC_UIE_ON: /* update on */
1564 -- case RTC_PIE_ON: /* periodic on */
1565 -+ at91_sys_write(AT91_RTC_SCCR, AT91_RTC_SECEV);
1566 - at91_sys_write(AT91_RTC_IER, AT91_RTC_SECEV);
1567 - break;
1568 -- case RTC_IRQP_READ: /* read periodic alarm frequency */
1569 -- ret = put_user(AT91_RTC_FREQ, (unsigned long *) arg);
1570 -- break;
1571 -- case RTC_IRQP_SET: /* set periodic alarm frequency */
1572 -- if (arg != AT91_RTC_FREQ)
1573 -- ret = -EINVAL;
1574 -- break;
1575 - default:
1576 - ret = -ENOIOCTLCMD;
1577 - break;
1578 -diff --git a/drivers/spi/mpc52xx_psc_spi.c b/drivers/spi/mpc52xx_psc_spi.c
1579 -index 681d623..d8a1f91 100644
1580 ---- a/drivers/spi/mpc52xx_psc_spi.c
1581 -+++ b/drivers/spi/mpc52xx_psc_spi.c
1582 -@@ -148,7 +148,6 @@ static int mpc52xx_psc_spi_transfer_rxtx(struct spi_device *spi,
1583 - unsigned rfalarm;
1584 - unsigned send_at_once = MPC52xx_PSC_BUFSIZE;
1585 - unsigned recv_at_once;
1586 -- unsigned bpw = mps->bits_per_word / 8;
1587 -
1588 - if (!t->tx_buf && !t->rx_buf && t->len)
1589 - return -EINVAL;
1590 -@@ -164,22 +163,15 @@ static int mpc52xx_psc_spi_transfer_rxtx(struct spi_device *spi,
1591 - }
1592 -
1593 - dev_dbg(&spi->dev, "send %d bytes...\n", send_at_once);
1594 -- if (tx_buf) {
1595 -- for (; send_at_once; sb++, send_at_once--) {
1596 -- /* set EOF flag */
1597 -- if (mps->bits_per_word
1598 -- && (sb + 1) % bpw == 0)
1599 -- out_8(&psc->ircr2, 0x01);
1600 -+ for (; send_at_once; sb++, send_at_once--) {
1601 -+ /* set EOF flag before the last word is sent */
1602 -+ if (send_at_once == 1)
1603 -+ out_8(&psc->ircr2, 0x01);
1604 -+
1605 -+ if (tx_buf)
1606 - out_8(&psc->mpc52xx_psc_buffer_8, tx_buf[sb]);
1607 -- }
1608 -- } else {
1609 -- for (; send_at_once; sb++, send_at_once--) {
1610 -- /* set EOF flag */
1611 -- if (mps->bits_per_word
1612 -- && ((sb + 1) % bpw) == 0)
1613 -- out_8(&psc->ircr2, 0x01);
1614 -+ else
1615 - out_8(&psc->mpc52xx_psc_buffer_8, 0);
1616 -- }
1617 - }
1618 -
1619 -
1620 -diff --git a/drivers/usb/serial/usb-serial.c b/drivers/usb/serial/usb-serial.c
1621 -index 0cb0d77..db1db4c 100644
1622 ---- a/drivers/usb/serial/usb-serial.c
1623 -+++ b/drivers/usb/serial/usb-serial.c
1624 -@@ -283,7 +283,10 @@ static void serial_close(struct tty_struct *tty, struct file * filp)
1625 - }
1626 -
1627 - if (port->open_count == 0) {
1628 -- usb_autopm_put_interface(port->serial->interface);
1629 -+ mutex_lock(&port->serial->disc_mutex);
1630 -+ if (!port->serial->disconnected)
1631 -+ usb_autopm_put_interface(port->serial->interface);
1632 -+ mutex_unlock(&port->serial->disc_mutex);
1633 - module_put(port->serial->type->driver.owner);
1634 - }
1635 -
1636 -diff --git a/fs/dquot.c b/fs/dquot.c
1637 -index 5ac77da..ad88cf6 100644
1638 ---- a/fs/dquot.c
1639 -+++ b/fs/dquot.c
1640 -@@ -562,6 +562,8 @@ static struct shrinker dqcache_shrinker = {
1641 - */
1642 - static void dqput(struct dquot *dquot)
1643 - {
1644 -+ int ret;
1645 -+
1646 - if (!dquot)
1647 - return;
1648 - #ifdef __DQUOT_PARANOIA
1649 -@@ -594,7 +596,19 @@ we_slept:
1650 - if (test_bit(DQ_ACTIVE_B, &dquot->dq_flags) && dquot_dirty(dquot)) {
1651 - spin_unlock(&dq_list_lock);
1652 - /* Commit dquot before releasing */
1653 -- dquot->dq_sb->dq_op->write_dquot(dquot);
1654 -+ ret = dquot->dq_sb->dq_op->write_dquot(dquot);
1655 -+ if (ret < 0) {
1656 -+ printk(KERN_ERR "VFS: cannot write quota structure on "
1657 -+ "device %s (error %d). Quota may get out of "
1658 -+ "sync!\n", dquot->dq_sb->s_id, ret);
1659 -+ /*
1660 -+ * We clear dirty bit anyway, so that we avoid
1661 -+ * infinite loop here
1662 -+ */
1663 -+ spin_lock(&dq_list_lock);
1664 -+ clear_dquot_dirty(dquot);
1665 -+ spin_unlock(&dq_list_lock);
1666 -+ }
1667 - goto we_slept;
1668 - }
1669 - /* Clear flag in case dquot was inactive (something bad happened) */
1670 -diff --git a/fs/ecryptfs/crypto.c b/fs/ecryptfs/crypto.c
1671 -index e2832bc..a82e0cb 100644
1672 ---- a/fs/ecryptfs/crypto.c
1673 -+++ b/fs/ecryptfs/crypto.c
1674 -@@ -474,8 +474,8 @@ int ecryptfs_encrypt_page(struct page *page)
1675 - {
1676 - struct inode *ecryptfs_inode;
1677 - struct ecryptfs_crypt_stat *crypt_stat;
1678 -- char *enc_extent_virt = NULL;
1679 -- struct page *enc_extent_page;
1680 -+ char *enc_extent_virt;
1681 -+ struct page *enc_extent_page = NULL;
1682 - loff_t extent_offset;
1683 - int rc = 0;
1684 -
1685 -@@ -491,14 +491,14 @@ int ecryptfs_encrypt_page(struct page *page)
1686 - page->index);
1687 - goto out;
1688 - }
1689 -- enc_extent_virt = kmalloc(PAGE_CACHE_SIZE, GFP_USER);
1690 -- if (!enc_extent_virt) {
1691 -+ enc_extent_page = alloc_page(GFP_USER);
1692 -+ if (!enc_extent_page) {
1693 - rc = -ENOMEM;
1694 - ecryptfs_printk(KERN_ERR, "Error allocating memory for "
1695 - "encrypted extent\n");
1696 - goto out;
1697 - }
1698 -- enc_extent_page = virt_to_page(enc_extent_virt);
1699 -+ enc_extent_virt = kmap(enc_extent_page);
1700 - for (extent_offset = 0;
1701 - extent_offset < (PAGE_CACHE_SIZE / crypt_stat->extent_size);
1702 - extent_offset++) {
1703 -@@ -526,7 +526,10 @@ int ecryptfs_encrypt_page(struct page *page)
1704 - }
1705 - }
1706 - out:
1707 -- kfree(enc_extent_virt);
1708 -+ if (enc_extent_page) {
1709 -+ kunmap(enc_extent_page);
1710 -+ __free_page(enc_extent_page);
1711 -+ }
1712 - return rc;
1713 - }
1714 -
1715 -@@ -608,8 +611,8 @@ int ecryptfs_decrypt_page(struct page *page)
1716 - {
1717 - struct inode *ecryptfs_inode;
1718 - struct ecryptfs_crypt_stat *crypt_stat;
1719 -- char *enc_extent_virt = NULL;
1720 -- struct page *enc_extent_page;
1721 -+ char *enc_extent_virt;
1722 -+ struct page *enc_extent_page = NULL;
1723 - unsigned long extent_offset;
1724 - int rc = 0;
1725 -
1726 -@@ -626,14 +629,14 @@ int ecryptfs_decrypt_page(struct page *page)
1727 - page->index);
1728 - goto out;
1729 - }
1730 -- enc_extent_virt = kmalloc(PAGE_CACHE_SIZE, GFP_USER);
1731 -- if (!enc_extent_virt) {
1732 -+ enc_extent_page = alloc_page(GFP_USER);
1733 -+ if (!enc_extent_page) {
1734 - rc = -ENOMEM;
1735 - ecryptfs_printk(KERN_ERR, "Error allocating memory for "
1736 - "encrypted extent\n");
1737 - goto out;
1738 - }
1739 -- enc_extent_page = virt_to_page(enc_extent_virt);
1740 -+ enc_extent_virt = kmap(enc_extent_page);
1741 - for (extent_offset = 0;
1742 - extent_offset < (PAGE_CACHE_SIZE / crypt_stat->extent_size);
1743 - extent_offset++) {
1744 -@@ -661,7 +664,10 @@ int ecryptfs_decrypt_page(struct page *page)
1745 - }
1746 - }
1747 - out:
1748 -- kfree(enc_extent_virt);
1749 -+ if (enc_extent_page) {
1750 -+ kunmap(enc_extent_page);
1751 -+ __free_page(enc_extent_page);
1752 -+ }
1753 - return rc;
1754 - }
1755 -
1756 -diff --git a/fs/isofs/rock.c b/fs/isofs/rock.c
1757 -index 6bd48f0..c2fb2dd 100644
1758 ---- a/fs/isofs/rock.c
1759 -+++ b/fs/isofs/rock.c
1760 -@@ -209,6 +209,11 @@ repeat:
1761 -
1762 - while (rs.len > 2) { /* There may be one byte for padding somewhere */
1763 - rr = (struct rock_ridge *)rs.chr;
1764 -+ /*
1765 -+ * Ignore rock ridge info if rr->len is out of range, but
1766 -+ * don't return -EIO because that would make the file
1767 -+ * invisible.
1768 -+ */
1769 - if (rr->len < 3)
1770 - goto out; /* Something got screwed up here */
1771 - sig = isonum_721(rs.chr);
1772 -@@ -216,8 +221,12 @@ repeat:
1773 - goto eio;
1774 - rs.chr += rr->len;
1775 - rs.len -= rr->len;
1776 -+ /*
1777 -+ * As above, just ignore the rock ridge info if rr->len
1778 -+ * is bogus.
1779 -+ */
1780 - if (rs.len < 0)
1781 -- goto eio; /* corrupted isofs */
1782 -+ goto out; /* Something got screwed up here */
1783 -
1784 - switch (sig) {
1785 - case SIG('R', 'R'):
1786 -@@ -307,6 +316,11 @@ parse_rock_ridge_inode_internal(struct iso_directory_record *de,
1787 - repeat:
1788 - while (rs.len > 2) { /* There may be one byte for padding somewhere */
1789 - rr = (struct rock_ridge *)rs.chr;
1790 -+ /*
1791 -+ * Ignore rock ridge info if rr->len is out of range, but
1792 -+ * don't return -EIO because that would make the file
1793 -+ * invisible.
1794 -+ */
1795 - if (rr->len < 3)
1796 - goto out; /* Something got screwed up here */
1797 - sig = isonum_721(rs.chr);
1798 -@@ -314,8 +328,12 @@ repeat:
1799 - goto eio;
1800 - rs.chr += rr->len;
1801 - rs.len -= rr->len;
1802 -+ /*
1803 -+ * As above, just ignore the rock ridge info if rr->len
1804 -+ * is bogus.
1805 -+ */
1806 - if (rs.len < 0)
1807 -- goto eio; /* corrupted isofs */
1808 -+ goto out; /* Something got screwed up here */
1809 -
1810 - switch (sig) {
1811 - #ifndef CONFIG_ZISOFS /* No flag for SF or ZF */
1812 -diff --git a/fs/libfs.c b/fs/libfs.c
1813 -index baeb71e..1add676 100644
1814 ---- a/fs/libfs.c
1815 -+++ b/fs/libfs.c
1816 -@@ -216,8 +216,8 @@ int get_sb_pseudo(struct file_system_type *fs_type, char *name,
1817 -
1818 - s->s_flags = MS_NOUSER;
1819 - s->s_maxbytes = ~0ULL;
1820 -- s->s_blocksize = 1024;
1821 -- s->s_blocksize_bits = 10;
1822 -+ s->s_blocksize = PAGE_SIZE;
1823 -+ s->s_blocksize_bits = PAGE_SHIFT;
1824 - s->s_magic = magic;
1825 - s->s_op = ops ? ops : &simple_super_operations;
1826 - s->s_time_gran = 1;
1827 -diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
1828 -index c492449..1eaa39a 100644
1829 ---- a/fs/proc/task_mmu.c
1830 -+++ b/fs/proc/task_mmu.c
1831 -@@ -636,7 +636,7 @@ static ssize_t pagemap_read(struct file *file, char __user *buf,
1832 - struct pagemapread pm;
1833 - int pagecount;
1834 - int ret = -ESRCH;
1835 -- struct mm_walk pagemap_walk;
1836 -+ struct mm_walk pagemap_walk = {};
1837 - unsigned long src;
1838 - unsigned long svpfn;
1839 - unsigned long start_vaddr;
1840 -diff --git a/include/asm-arm/bitops.h b/include/asm-arm/bitops.h
1841 -index 5c60bfc..9a1db20 100644
1842 ---- a/include/asm-arm/bitops.h
1843 -+++ b/include/asm-arm/bitops.h
1844 -@@ -277,9 +277,16 @@ static inline int constant_fls(int x)
1845 - * the clz instruction for much better code efficiency.
1846 - */
1847 -
1848 --#define fls(x) \
1849 -+#define __fls(x) \
1850 - ( __builtin_constant_p(x) ? constant_fls(x) : \
1851 - ({ int __r; asm("clz\t%0, %1" : "=r"(__r) : "r"(x) : "cc"); 32-__r; }) )
1852 -+
1853 -+/* Implement fls() in C so that 64-bit args are suitably truncated */
1854 -+static inline int fls(int x)
1855 -+{
1856 -+ return __fls(x);
1857 -+}
1858 -+
1859 - #define ffs(x) ({ unsigned long __t = (x); fls(__t & -__t); })
1860 - #define __ffs(x) (ffs(x) - 1)
1861 - #define ffz(x) __ffs( ~(x) )
1862 -diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
1863 -index f054778..5a544bf 100644
1864 ---- a/include/asm-generic/vmlinux.lds.h
1865 -+++ b/include/asm-generic/vmlinux.lds.h
1866 -@@ -204,6 +204,7 @@
1867 - * during second ld run in second ld pass when generating System.map */
1868 - #define TEXT_TEXT \
1869 - ALIGN_FUNCTION(); \
1870 -+ *(.text.hot) \
1871 - *(.text) \
1872 - *(.ref.text) \
1873 - *(.text.init.refok) \
1874 -@@ -213,7 +214,8 @@
1875 - CPU_KEEP(init.text) \
1876 - CPU_KEEP(exit.text) \
1877 - MEM_KEEP(init.text) \
1878 -- MEM_KEEP(exit.text)
1879 -+ MEM_KEEP(exit.text) \
1880 -+ *(.text.unlikely)
1881 -
1882 -
1883 - /* sched.text is aling to function alignment to secure we have same
1884 -diff --git a/include/asm-sparc64/io.h b/include/asm-sparc64/io.h
1885 -index 3158960..38e7ba6 100644
1886 ---- a/include/asm-sparc64/io.h
1887 -+++ b/include/asm-sparc64/io.h
1888 -@@ -16,7 +16,6 @@
1889 - /* BIO layer definitions. */
1890 - extern unsigned long kern_base, kern_size;
1891 - #define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT)
1892 --#define BIO_VMERGE_BOUNDARY 8192
1893 -
1894 - static inline u8 _inb(unsigned long addr)
1895 - {
1896 -diff --git a/include/asm-x86/signal.h b/include/asm-x86/signal.h
1897 -index f15186d..6dac493 100644
1898 ---- a/include/asm-x86/signal.h
1899 -+++ b/include/asm-x86/signal.h
1900 -@@ -181,12 +181,12 @@ typedef struct sigaltstack {
1901 - #ifdef __KERNEL__
1902 - #include <asm/sigcontext.h>
1903 -
1904 --#ifdef __386__
1905 -+#ifdef __i386__
1906 -
1907 - #define __HAVE_ARCH_SIG_BITOPS
1908 -
1909 - #define sigaddset(set,sig) \
1910 -- (__builtin_constantp(sig) \
1911 -+ (__builtin_constant_p(sig) \
1912 - ? __const_sigaddset((set), (sig)) \
1913 - : __gen_sigaddset((set), (sig)))
1914 -
1915 -diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
1916 -index e7e91db..07cb761 100644
1917 ---- a/include/linux/cpufreq.h
1918 -+++ b/include/linux/cpufreq.h
1919 -@@ -109,6 +109,7 @@ struct cpufreq_policy {
1920 - #define CPUFREQ_ADJUST (0)
1921 - #define CPUFREQ_INCOMPATIBLE (1)
1922 - #define CPUFREQ_NOTIFY (2)
1923 -+#define CPUFREQ_START (3)
1924 -
1925 - #define CPUFREQ_SHARED_TYPE_NONE (0) /* None */
1926 - #define CPUFREQ_SHARED_TYPE_HW (1) /* HW does needed coordination */
1927 -diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h
1928 -index cde056e..0bfd97f 100644
1929 ---- a/include/linux/ipv6.h
1930 -+++ b/include/linux/ipv6.h
1931 -@@ -123,6 +123,7 @@ struct ipv6hdr {
1932 - struct in6_addr daddr;
1933 - };
1934 -
1935 -+#ifdef __KERNEL__
1936 - /*
1937 - * This structure contains configuration options per IPv6 link.
1938 - */
1939 -@@ -165,6 +166,7 @@ struct ipv6_devconf {
1940 - #endif
1941 - void *sysctl;
1942 - };
1943 -+#endif
1944 -
1945 - /* index values for the variables in ipv6_devconf */
1946 - enum {
1947 -diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
1948 -index de9d1df..d413e89 100644
1949 ---- a/include/linux/kvm_host.h
1950 -+++ b/include/linux/kvm_host.h
1951 -@@ -166,6 +166,7 @@ int kvm_arch_set_memory_region(struct kvm *kvm,
1952 - struct kvm_userspace_memory_region *mem,
1953 - struct kvm_memory_slot old,
1954 - int user_alloc);
1955 -+void kvm_arch_flush_shadow(struct kvm *kvm);
1956 - gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn);
1957 - struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn);
1958 - unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn);
1959 -diff --git a/kernel/cpuset.c b/kernel/cpuset.c
1960 -index 798b3ab..2a028f5 100644
1961 ---- a/kernel/cpuset.c
1962 -+++ b/kernel/cpuset.c
1963 -@@ -679,7 +679,9 @@ restart:
1964 - if (apn == b->pn) {
1965 - cpus_or(*dp, *dp, b->cpus_allowed);
1966 - b->pn = -1;
1967 -- update_domain_attr(dattr, b);
1968 -+ if (dattr)
1969 -+ update_domain_attr(dattr
1970 -+ + nslot, b);
1971 - }
1972 - }
1973 - nslot++;
1974 -diff --git a/kernel/marker.c b/kernel/marker.c
1975 -index b5a9fe1..39e7596 100644
1976 ---- a/kernel/marker.c
1977 -+++ b/kernel/marker.c
1978 -@@ -127,6 +127,11 @@ void marker_probe_cb(const struct marker *mdata, void *call_private,
1979 - struct marker_probe_closure *multi;
1980 - int i;
1981 - /*
1982 -+ * Read mdata->ptype before mdata->multi.
1983 -+ */
1984 -+ smp_rmb();
1985 -+ multi = mdata->multi;
1986 -+ /*
1987 - * multi points to an array, therefore accessing the array
1988 - * depends on reading multi. However, even in this case,
1989 - * we must insure that the pointer is read _before_ the array
1990 -@@ -134,7 +139,6 @@ void marker_probe_cb(const struct marker *mdata, void *call_private,
1991 - * in the fast path, so put the explicit barrier here.
1992 - */
1993 - smp_read_barrier_depends();
1994 -- multi = mdata->multi;
1995 - for (i = 0; multi[i].func; i++) {
1996 - va_start(args, fmt);
1997 - multi[i].func(multi[i].probe_private, call_private, fmt,
1998 -@@ -177,6 +181,11 @@ void marker_probe_cb_noarg(const struct marker *mdata,
1999 - struct marker_probe_closure *multi;
2000 - int i;
2001 - /*
2002 -+ * Read mdata->ptype before mdata->multi.
2003 -+ */
2004 -+ smp_rmb();
2005 -+ multi = mdata->multi;
2006 -+ /*
2007 - * multi points to an array, therefore accessing the array
2008 - * depends on reading multi. However, even in this case,
2009 - * we must insure that the pointer is read _before_ the array
2010 -@@ -184,7 +193,6 @@ void marker_probe_cb_noarg(const struct marker *mdata,
2011 - * in the fast path, so put the explicit barrier here.
2012 - */
2013 - smp_read_barrier_depends();
2014 -- multi = mdata->multi;
2015 - for (i = 0; multi[i].func; i++)
2016 - multi[i].func(multi[i].probe_private, call_private, fmt,
2017 - &args);
2018 -diff --git a/kernel/rcupreempt.c b/kernel/rcupreempt.c
2019 -index 41d275a..379c25e 100644
2020 ---- a/kernel/rcupreempt.c
2021 -+++ b/kernel/rcupreempt.c
2022 -@@ -567,7 +567,7 @@ rcu_try_flip_waitack_needed(int cpu)
2023 - * that this CPU already acknowledged the counter.
2024 - */
2025 -
2026 -- if ((curr - snap) > 2 || (snap & 0x1) == 0)
2027 -+ if ((curr - snap) > 2 || (curr & 0x1) == 0)
2028 - return 0;
2029 -
2030 - /* We need this CPU to explicitly acknowledge the counter flip. */
2031 -diff --git a/kernel/sys_ni.c b/kernel/sys_ni.c
2032 -index 5b9b467..0fea0ee 100644
2033 ---- a/kernel/sys_ni.c
2034 -+++ b/kernel/sys_ni.c
2035 -@@ -59,6 +59,7 @@ cond_syscall(sys_epoll_create);
2036 - cond_syscall(sys_epoll_ctl);
2037 - cond_syscall(sys_epoll_wait);
2038 - cond_syscall(sys_epoll_pwait);
2039 -+cond_syscall(compat_sys_epoll_pwait);
2040 - cond_syscall(sys_semget);
2041 - cond_syscall(sys_semop);
2042 - cond_syscall(sys_semtimedop);
2043 -diff --git a/mm/filemap.c b/mm/filemap.c
2044 -index 1e6a7d3..4f32423 100644
2045 ---- a/mm/filemap.c
2046 -+++ b/mm/filemap.c
2047 -@@ -1778,7 +1778,7 @@ void iov_iter_advance(struct iov_iter *i, size_t bytes)
2048 - * The !iov->iov_len check ensures we skip over unlikely
2049 - * zero-length segments (without overruning the iovec).
2050 - */
2051 -- while (bytes || unlikely(!iov->iov_len && i->count)) {
2052 -+ while (bytes || unlikely(i->count && !iov->iov_len)) {
2053 - int copy;
2054 -
2055 - copy = min(bytes, iov->iov_len - base);
2056 -diff --git a/mm/shmem.c b/mm/shmem.c
2057 -index e2a6ae1..8f8412b 100644
2058 ---- a/mm/shmem.c
2059 -+++ b/mm/shmem.c
2060 -@@ -1503,7 +1503,6 @@ shmem_get_inode(struct super_block *sb, int mode, dev_t dev)
2061 - inode->i_uid = current->fsuid;
2062 - inode->i_gid = current->fsgid;
2063 - inode->i_blocks = 0;
2064 -- inode->i_mapping->a_ops = &shmem_aops;
2065 - inode->i_mapping->backing_dev_info = &shmem_backing_dev_info;
2066 - inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
2067 - inode->i_generation = get_seconds();
2068 -@@ -1518,6 +1517,7 @@ shmem_get_inode(struct super_block *sb, int mode, dev_t dev)
2069 - init_special_inode(inode, mode, dev);
2070 - break;
2071 - case S_IFREG:
2072 -+ inode->i_mapping->a_ops = &shmem_aops;
2073 - inode->i_op = &shmem_inode_operations;
2074 - inode->i_fop = &shmem_file_operations;
2075 - mpol_shared_policy_init(&info->policy,
2076 -@@ -1907,6 +1907,7 @@ static int shmem_symlink(struct inode *dir, struct dentry *dentry, const char *s
2077 - return error;
2078 - }
2079 - unlock_page(page);
2080 -+ inode->i_mapping->a_ops = &shmem_aops;
2081 - inode->i_op = &shmem_symlink_inode_operations;
2082 - kaddr = kmap_atomic(page, KM_USER0);
2083 - memcpy(kaddr, symname, len);
2084 -diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
2085 -index cad73b7..c443a7f 100644
2086 ---- a/net/ipv4/tcp_input.c
2087 -+++ b/net/ipv4/tcp_input.c
2088 -@@ -3273,6 +3273,7 @@ static int tcp_ack(struct sock *sk, struct sk_buff *skb, int flag)
2089 - * log. Something worked...
2090 - */
2091 - sk->sk_err_soft = 0;
2092 -+ icsk->icsk_probes_out = 0;
2093 - tp->rcv_tstamp = tcp_time_stamp;
2094 - prior_packets = tp->packets_out;
2095 - if (!prior_packets)
2096 -@@ -3305,8 +3306,6 @@ static int tcp_ack(struct sock *sk, struct sk_buff *skb, int flag)
2097 - return 1;
2098 -
2099 - no_queue:
2100 -- icsk->icsk_probes_out = 0;
2101 --
2102 - /* If this ack opens up a zero window, clear backoff. It was
2103 - * being used to time the probes, and is probably far higher than
2104 - * it needs to be for normal retransmission.
2105 -diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
2106 -index 56fcda3..9f3f7ba 100644
2107 ---- a/net/ipv4/udp.c
2108 -+++ b/net/ipv4/udp.c
2109 -@@ -1319,6 +1319,8 @@ int udp_lib_setsockopt(struct sock *sk, int level, int optname,
2110 - return -ENOPROTOOPT;
2111 - if (val != 0 && val < 8) /* Illegal coverage: use default (8) */
2112 - val = 8;
2113 -+ else if (val > USHORT_MAX)
2114 -+ val = USHORT_MAX;
2115 - up->pcslen = val;
2116 - up->pcflag |= UDPLITE_SEND_CC;
2117 - break;
2118 -@@ -1331,6 +1333,8 @@ int udp_lib_setsockopt(struct sock *sk, int level, int optname,
2119 - return -ENOPROTOOPT;
2120 - if (val != 0 && val < 8) /* Avoid silly minimal values. */
2121 - val = 8;
2122 -+ else if (val > USHORT_MAX)
2123 -+ val = USHORT_MAX;
2124 - up->pcrlen = val;
2125 - up->pcflag |= UDPLITE_RECV_CC;
2126 - break;
2127 -diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
2128 -index 1ee4fa1..918fde4 100644
2129 ---- a/net/ipv6/ip6_fib.c
2130 -+++ b/net/ipv6/ip6_fib.c
2131 -@@ -663,7 +663,7 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct rt6_info *rt,
2132 -
2133 - static __inline__ void fib6_start_gc(struct net *net, struct rt6_info *rt)
2134 - {
2135 -- if (net->ipv6.ip6_fib_timer->expires == 0 &&
2136 -+ if (!timer_pending(net->ipv6.ip6_fib_timer) &&
2137 - (rt->rt6i_flags & (RTF_EXPIRES|RTF_CACHE)))
2138 - mod_timer(net->ipv6.ip6_fib_timer, jiffies +
2139 - net->ipv6.sysctl.ip6_rt_gc_interval);
2140 -@@ -671,7 +671,7 @@ static __inline__ void fib6_start_gc(struct net *net, struct rt6_info *rt)
2141 -
2142 - void fib6_force_start_gc(struct net *net)
2143 - {
2144 -- if (net->ipv6.ip6_fib_timer->expires == 0)
2145 -+ if (!timer_pending(net->ipv6.ip6_fib_timer))
2146 - mod_timer(net->ipv6.ip6_fib_timer, jiffies +
2147 - net->ipv6.sysctl.ip6_rt_gc_interval);
2148 - }
2149 -diff --git a/scripts/Makefile.modpost b/scripts/Makefile.modpost
2150 -index 17092d6..9ee9783 100644
2151 ---- a/scripts/Makefile.modpost
2152 -+++ b/scripts/Makefile.modpost
2153 -@@ -101,6 +101,7 @@ quiet_cmd_kernel-mod = MODPOST $@
2154 - cmd_kernel-mod = $(modpost) $@
2155 -
2156 - vmlinux.o: FORCE
2157 -+ @rm -fr $(kernelmarkersfile)
2158 - $(call cmd,kernel-mod)
2159 -
2160 - # Declare generated files as targets for modpost
2161 -diff --git a/scripts/mod/modpost.c b/scripts/mod/modpost.c
2162 -index a07f91a..8f038e6 100644
2163 ---- a/scripts/mod/modpost.c
2164 -+++ b/scripts/mod/modpost.c
2165 -@@ -1992,7 +1992,8 @@ static void read_markers(const char *fname)
2166 - mod->skip = 1;
2167 - }
2168 -
2169 -- add_marker(mod, marker, fmt);
2170 -+ if (!mod->skip)
2171 -+ add_marker(mod, marker, fmt);
2172 - }
2173 - return;
2174 - fail:
2175 -diff --git a/sound/pci/trident/trident_main.c b/sound/pci/trident/trident_main.c
2176 -index bbcee2c..a69b420 100644
2177 ---- a/sound/pci/trident/trident_main.c
2178 -+++ b/sound/pci/trident/trident_main.c
2179 -@@ -1590,7 +1590,10 @@ static int snd_trident_trigger(struct snd_pcm_substream *substream,
2180 - if (spdif_flag) {
2181 - if (trident->device != TRIDENT_DEVICE_ID_SI7018) {
2182 - outl(trident->spdif_pcm_bits, TRID_REG(trident, NX_SPCSTATUS));
2183 -- outb(trident->spdif_pcm_ctrl, TRID_REG(trident, NX_SPCTRL_SPCSO + 3));
2184 -+ val = trident->spdif_pcm_ctrl;
2185 -+ if (!go)
2186 -+ val &= ~(0x28);
2187 -+ outb(val, TRID_REG(trident, NX_SPCTRL_SPCSO + 3));
2188 - } else {
2189 - outl(trident->spdif_pcm_bits, TRID_REG(trident, SI_SPDIF_CS));
2190 - val = inl(TRID_REG(trident, SI_SERIAL_INTF_CTRL)) | SPDIF_EN;
2191 -diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
2192 -index 2d29e26..0262847 100644
2193 ---- a/virt/kvm/kvm_main.c
2194 -+++ b/virt/kvm/kvm_main.c
2195 -@@ -377,6 +377,9 @@ int __kvm_set_memory_region(struct kvm *kvm,
2196 - if (mem->slot >= kvm->nmemslots)
2197 - kvm->nmemslots = mem->slot + 1;
2198 -
2199 -+ if (!npages)
2200 -+ kvm_arch_flush_shadow(kvm);
2201 -+
2202 - *memslot = new;
2203 -
2204 - r = kvm_arch_set_memory_region(kvm, mem, old, user_alloc);
2205
2206 Deleted: genpatches-2.6/trunk/2.6.27/1001_linux-2.6.26.2.patch
2207 ===================================================================
2208 --- genpatches-2.6/trunk/2.6.27/1001_linux-2.6.26.2.patch 2008-10-10 23:58:26 UTC (rev 1350)
2209 +++ genpatches-2.6/trunk/2.6.27/1001_linux-2.6.26.2.patch 2008-10-11 00:00:47 UTC (rev 1351)
2210 @@ -1,2510 +0,0 @@
2211 -diff --git a/Documentation/ftrace.txt b/Documentation/ftrace.txt
2212 -deleted file mode 100644
2213 -index 13e4bf0..0000000
2214 ---- a/Documentation/ftrace.txt
2215 -+++ /dev/null
2216 -@@ -1,1353 +0,0 @@
2217 -- ftrace - Function Tracer
2218 -- ========================
2219 --
2220 --Copyright 2008 Red Hat Inc.
2221 --Author: Steven Rostedt <srostedt@××××××.com>
2222 --
2223 --
2224 --Introduction
2225 --------------
2226 --
2227 --Ftrace is an internal tracer designed to help out developers and
2228 --designers of systems to find what is going on inside the kernel.
2229 --It can be used for debugging or analyzing latencies and performance
2230 --issues that take place outside of user-space.
2231 --
2232 --Although ftrace is the function tracer, it also includes an
2233 --infrastructure that allows for other types of tracing. Some of the
2234 --tracers that are currently in ftrace is a tracer to trace
2235 --context switches, the time it takes for a high priority task to
2236 --run after it was woken up, the time interrupts are disabled, and
2237 --more.
2238 --
2239 --
2240 --The File System
2241 -----------------
2242 --
2243 --Ftrace uses the debugfs file system to hold the control files as well
2244 --as the files to display output.
2245 --
2246 --To mount the debugfs system:
2247 --
2248 -- # mkdir /debug
2249 -- # mount -t debugfs nodev /debug
2250 --
2251 --
2252 --That's it! (assuming that you have ftrace configured into your kernel)
2253 --
2254 --After mounting the debugfs, you can see a directory called
2255 --"tracing". This directory contains the control and output files
2256 --of ftrace. Here is a list of some of the key files:
2257 --
2258 --
2259 -- Note: all time values are in microseconds.
2260 --
2261 -- current_tracer : This is used to set or display the current tracer
2262 -- that is configured.
2263 --
2264 -- available_tracers : This holds the different types of tracers that
2265 -- has been compiled into the kernel. The tracers
2266 -- listed here can be configured by echoing in their
2267 -- name into current_tracer.
2268 --
2269 -- tracing_enabled : This sets or displays whether the current_tracer
2270 -- is activated and tracing or not. Echo 0 into this
2271 -- file to disable the tracer or 1 (or non-zero) to
2272 -- enable it.
2273 --
2274 -- trace : This file holds the output of the trace in a human readable
2275 -- format.
2276 --
2277 -- latency_trace : This file shows the same trace but the information
2278 -- is organized more to display possible latencies
2279 -- in the system.
2280 --
2281 -- trace_pipe : The output is the same as the "trace" file but this
2282 -- file is meant to be streamed with live tracing.
2283 -- Reads from this file will block until new data
2284 -- is retrieved. Unlike the "trace" and "latency_trace"
2285 -- files, this file is a consumer. This means reading
2286 -- from this file causes sequential reads to display
2287 -- more current data. Once data is read from this
2288 -- file, it is consumed, and will not be read
2289 -- again with a sequential read. The "trace" and
2290 -- "latency_trace" files are static, and if the
2291 -- tracer isn't adding more data, they will display
2292 -- the same information every time they are read.
2293 --
2294 -- iter_ctrl : This file lets the user control the amount of data
2295 -- that is displayed in one of the above output
2296 -- files.
2297 --
2298 -- trace_max_latency : Some of the tracers record the max latency.
2299 -- For example, the time interrupts are disabled.
2300 -- This time is saved in this file. The max trace
2301 -- will also be stored, and displayed by either
2302 -- "trace" or "latency_trace". A new max trace will
2303 -- only be recorded if the latency is greater than
2304 -- the value in this file. (in microseconds)
2305 --
2306 -- trace_entries : This sets or displays the number of trace
2307 -- entries each CPU buffer can hold. The tracer buffers
2308 -- are the same size for each CPU, so care must be
2309 -- taken when modifying the trace_entries. The number
2310 -- of actually entries will be the number given
2311 -- times the number of possible CPUS. The buffers
2312 -- are saved as individual pages, and the actual entries
2313 -- will always be rounded up to entries per page.
2314 --
2315 -- This can only be updated when the current_tracer
2316 -- is set to "none".
2317 --
2318 -- NOTE: It is planned on changing the allocated buffers
2319 -- from being the number of possible CPUS to
2320 -- the number of online CPUS.
2321 --
2322 -- tracing_cpumask : This is a mask that lets the user only trace
2323 -- on specified CPUS. The format is a hex string
2324 -- representing the CPUS.
2325 --
2326 -- set_ftrace_filter : When dynamic ftrace is configured in, the
2327 -- code is dynamically modified to disable calling
2328 -- of the function profiler (mcount). This lets
2329 -- tracing be configured in with practically no overhead
2330 -- in performance. This also has a side effect of
2331 -- enabling or disabling specific functions to be
2332 -- traced. Echoing in names of functions into this
2333 -- file will limit the trace to only those files.
2334 --
2335 -- set_ftrace_notrace: This has the opposite effect that
2336 -- set_ftrace_filter has. Any function that is added
2337 -- here will not be traced. If a function exists
2338 -- in both set_ftrace_filter and set_ftrace_notrace
2339 -- the function will _not_ bet traced.
2340 --
2341 -- available_filter_functions : When a function is encountered the first
2342 -- time by the dynamic tracer, it is recorded and
2343 -- later the call is converted into a nop. This file
2344 -- lists the functions that have been recorded
2345 -- by the dynamic tracer and these functions can
2346 -- be used to set the ftrace filter by the above
2347 -- "set_ftrace_filter" file.
2348 --
2349 --
2350 --The Tracers
2351 -------------
2352 --
2353 --Here are the list of current tracers that can be configured.
2354 --
2355 -- ftrace - function tracer that uses mcount to trace all functions.
2356 -- It is possible to filter out which functions that are
2357 -- traced when dynamic ftrace is configured in.
2358 --
2359 -- sched_switch - traces the context switches between tasks.
2360 --
2361 -- irqsoff - traces the areas that disable interrupts and saves off
2362 -- the trace with the longest max latency.
2363 -- See tracing_max_latency. When a new max is recorded,
2364 -- it replaces the old trace. It is best to view this
2365 -- trace with the latency_trace file.
2366 --
2367 -- preemptoff - Similar to irqsoff but traces and records the time
2368 -- preemption is disabled.
2369 --
2370 -- preemptirqsoff - Similar to irqsoff and preemptoff, but traces and
2371 -- records the largest time irqs and/or preemption is
2372 -- disabled.
2373 --
2374 -- wakeup - Traces and records the max latency that it takes for
2375 -- the highest priority task to get scheduled after
2376 -- it has been woken up.
2377 --
2378 -- none - This is not a tracer. To remove all tracers from tracing
2379 -- simply echo "none" into current_tracer.
2380 --
2381 --
2382 --Examples of using the tracer
2383 ------------------------------
2384 --
2385 --Here are typical examples of using the tracers with only controlling
2386 --them with the debugfs interface (without using any user-land utilities).
2387 --
2388 --Output format:
2389 ----------------
2390 --
2391 --Here's an example of the output format of the file "trace"
2392 --
2393 -- --------
2394 --# tracer: ftrace
2395 --#
2396 --# TASK-PID CPU# TIMESTAMP FUNCTION
2397 --# | | | | |
2398 -- bash-4251 [01] 10152.583854: path_put <-path_walk
2399 -- bash-4251 [01] 10152.583855: dput <-path_put
2400 -- bash-4251 [01] 10152.583855: _atomic_dec_and_lock <-dput
2401 -- --------
2402 --
2403 --A header is printed with the trace that is represented. In this case
2404 --the tracer is "ftrace". Then a header showing the format. Task name
2405 --"bash", the task PID "4251", the CPU that it was running on
2406 --"01", the timestamp in <secs>.<usecs> format, the function name that was
2407 --traced "path_put" and the parent function that called this function
2408 --"path_walk".
2409 --
2410 --The sched_switch tracer also includes tracing of task wake ups and
2411 --context switches.
2412 --
2413 -- ksoftirqd/1-7 [01] 1453.070013: 7:115:R + 2916:115:S
2414 -- ksoftirqd/1-7 [01] 1453.070013: 7:115:R + 10:115:S
2415 -- ksoftirqd/1-7 [01] 1453.070013: 7:115:R ==> 10:115:R
2416 -- events/1-10 [01] 1453.070013: 10:115:S ==> 2916:115:R
2417 -- kondemand/1-2916 [01] 1453.070013: 2916:115:S ==> 7:115:R
2418 -- ksoftirqd/1-7 [01] 1453.070013: 7:115:S ==> 0:140:R
2419 --
2420 --Wake ups are represented by a "+" and the context switches show
2421 --"==>". The format is:
2422 --
2423 -- Context switches:
2424 --
2425 -- Previous task Next Task
2426 --
2427 -- <pid>:<prio>:<state> ==> <pid>:<prio>:<state>
2428 --
2429 -- Wake ups:
2430 --
2431 -- Current task Task waking up
2432 --
2433 -- <pid>:<prio>:<state> + <pid>:<prio>:<state>
2434 --
2435 --The prio is the internal kernel priority, which is inverse to the
2436 --priority that is usually displayed by user-space tools. Zero represents
2437 --the highest priority (99). Prio 100 starts the "nice" priorities with
2438 --100 being equal to nice -20 and 139 being nice 19. The prio "140" is
2439 --reserved for the idle task which is the lowest priority thread (pid 0).
2440 --
2441 --
2442 --Latency trace format
2443 ----------------------
2444 --
2445 --For traces that display latency times, the latency_trace file gives
2446 --a bit more information to see why a latency happened. Here's a typical
2447 --trace.
2448 --
2449 --# tracer: irqsoff
2450 --#
2451 --irqsoff latency trace v1.1.5 on 2.6.26-rc8
2452 ----------------------------------------------------------------------
2453 -- latency: 97 us, #3/3, CPU#0 | (M:preempt VP:0, KP:0, SP:0 HP:0 #P:2)
2454 -- -----------------
2455 -- | task: swapper-0 (uid:0 nice:0 policy:0 rt_prio:0)
2456 -- -----------------
2457 -- => started at: apic_timer_interrupt
2458 -- => ended at: do_softirq
2459 --
2460 --# _------=> CPU#
2461 --# / _-----=> irqs-off
2462 --# | / _----=> need-resched
2463 --# || / _---=> hardirq/softirq
2464 --# ||| / _--=> preempt-depth
2465 --# |||| /
2466 --# ||||| delay
2467 --# cmd pid ||||| time | caller
2468 --# \ / ||||| \ | /
2469 -- <idle>-0 0d..1 0us+: trace_hardirqs_off_thunk (apic_timer_interrupt)
2470 -- <idle>-0 0d.s. 97us : __do_softirq (do_softirq)
2471 -- <idle>-0 0d.s1 98us : trace_hardirqs_on (do_softirq)
2472 --
2473 --
2474 --vim:ft=help
2475 --
2476 --
2477 --This shows that the current tracer is "irqsoff" tracing the time
2478 --interrupts are disabled. It gives the trace version and the kernel
2479 --this was executed on (2.6.26-rc8). Then it displays the max latency
2480 --in microsecs (97 us). The number of trace entries displayed
2481 --by the total number recorded (both are three: #3/3). The type of
2482 --preemption that was used (PREEMPT). VP, KP, SP, and HP are always zero
2483 --and reserved for later use. #P is the number of online CPUS (#P:2).
2484 --
2485 --The task is the process that was running when the latency happened.
2486 --(swapper pid: 0).
2487 --
2488 --The start and stop that caused the latencies:
2489 --
2490 -- apic_timer_interrupt is where the interrupts were disabled.
2491 -- do_softirq is where they were enabled again.
2492 --
2493 --The next lines after the header are the trace itself. The header
2494 --explains which is which.
2495 --
2496 -- cmd: The name of the process in the trace.
2497 --
2498 -- pid: The PID of that process.
2499 --
2500 -- CPU#: The CPU that the process was running on.
2501 --
2502 -- irqs-off: 'd' interrupts are disabled. '.' otherwise.
2503 --
2504 -- need-resched: 'N' task need_resched is set, '.' otherwise.
2505 --
2506 -- hardirq/softirq:
2507 -- 'H' - hard irq happened inside a softirq.
2508 -- 'h' - hard irq is running
2509 -- 's' - soft irq is running
2510 -- '.' - normal context.
2511 --
2512 -- preempt-depth: The level of preempt_disabled
2513 --
2514 --The above is mostly meaningful for kernel developers.
2515 --
2516 -- time: This differs from the trace output where as the trace output
2517 -- contained a absolute timestamp. This timestamp is relative
2518 -- to the start of the first entry in the the trace.
2519 --
2520 -- delay: This is just to help catch your eye a bit better. And
2521 -- needs to be fixed to be only relative to the same CPU.
2522 -- The marks is determined by the difference between this
2523 -- current trace and the next trace.
2524 -- '!' - greater than preempt_mark_thresh (default 100)
2525 -- '+' - greater than 1 microsecond
2526 -- ' ' - less than or equal to 1 microsecond.
2527 --
2528 -- The rest is the same as the 'trace' file.
2529 --
2530 --
2531 --iter_ctrl
2532 -----------
2533 --
2534 --The iter_ctrl file is used to control what gets printed in the trace
2535 --output. To see what is available, simply cat the file:
2536 --
2537 -- cat /debug/tracing/iter_ctrl
2538 -- print-parent nosym-offset nosym-addr noverbose noraw nohex nobin \
2539 -- noblock nostacktrace nosched-tree
2540 --
2541 --To disable one of the options, echo in the option appended with "no".
2542 --
2543 -- echo noprint-parent > /debug/tracing/iter_ctrl
2544 --
2545 --To enable an option, leave off the "no".
2546 --
2547 -- echo sym-offest > /debug/tracing/iter_ctrl
2548 --
2549 --Here are the available options:
2550 --
2551 -- print-parent - On function traces, display the calling function
2552 -- as well as the function being traced.
2553 --
2554 -- print-parent:
2555 -- bash-4000 [01] 1477.606694: simple_strtoul <-strict_strtoul
2556 --
2557 -- noprint-parent:
2558 -- bash-4000 [01] 1477.606694: simple_strtoul
2559 --
2560 --
2561 -- sym-offset - Display not only the function name, but also the offset
2562 -- in the function. For example, instead of seeing just
2563 -- "ktime_get" you will see "ktime_get+0xb/0x20"
2564 --
2565 -- sym-offset:
2566 -- bash-4000 [01] 1477.606694: simple_strtoul+0x6/0xa0
2567 --
2568 -- sym-addr - this will also display the function address as well as
2569 -- the function name.
2570 --
2571 -- sym-addr:
2572 -- bash-4000 [01] 1477.606694: simple_strtoul <c0339346>
2573 --
2574 -- verbose - This deals with the latency_trace file.
2575 --
2576 -- bash 4000 1 0 00000000 00010a95 [58127d26] 1720.415ms \
2577 -- (+0.000ms): simple_strtoul (strict_strtoul)
2578 --
2579 -- raw - This will display raw numbers. This option is best for use with
2580 -- user applications that can translate the raw numbers better than
2581 -- having it done in the kernel.
2582 --
2583 -- hex - similar to raw, but the numbers will be in a hexadecimal format.
2584 --
2585 -- bin - This will print out the formats in raw binary.
2586 --
2587 -- block - TBD (needs update)
2588 --
2589 -- stacktrace - This is one of the options that changes the trace itself.
2590 -- When a trace is recorded, so is the stack of functions.
2591 -- This allows for back traces of trace sites.
2592 --
2593 -- sched-tree - TBD (any users??)
2594 --
2595 --
2596 --sched_switch
2597 --------------
2598 --
2599 --This tracer simply records schedule switches. Here's an example
2600 --on how to implement it.
2601 --
2602 -- # echo sched_switch > /debug/tracing/current_tracer
2603 -- # echo 1 > /debug/tracing/tracing_enabled
2604 -- # sleep 1
2605 -- # echo 0 > /debug/tracing/tracing_enabled
2606 -- # cat /debug/tracing/trace
2607 --
2608 --# tracer: sched_switch
2609 --#
2610 --# TASK-PID CPU# TIMESTAMP FUNCTION
2611 --# | | | | |
2612 -- bash-3997 [01] 240.132281: 3997:120:R + 4055:120:R
2613 -- bash-3997 [01] 240.132284: 3997:120:R ==> 4055:120:R
2614 -- sleep-4055 [01] 240.132371: 4055:120:S ==> 3997:120:R
2615 -- bash-3997 [01] 240.132454: 3997:120:R + 4055:120:S
2616 -- bash-3997 [01] 240.132457: 3997:120:R ==> 4055:120:R
2617 -- sleep-4055 [01] 240.132460: 4055:120:D ==> 3997:120:R
2618 -- bash-3997 [01] 240.132463: 3997:120:R + 4055:120:D
2619 -- bash-3997 [01] 240.132465: 3997:120:R ==> 4055:120:R
2620 -- <idle>-0 [00] 240.132589: 0:140:R + 4:115:S
2621 -- <idle>-0 [00] 240.132591: 0:140:R ==> 4:115:R
2622 -- ksoftirqd/0-4 [00] 240.132595: 4:115:S ==> 0:140:R
2623 -- <idle>-0 [00] 240.132598: 0:140:R + 4:115:S
2624 -- <idle>-0 [00] 240.132599: 0:140:R ==> 4:115:R
2625 -- ksoftirqd/0-4 [00] 240.132603: 4:115:S ==> 0:140:R
2626 -- sleep-4055 [01] 240.133058: 4055:120:S ==> 3997:120:R
2627 -- [...]
2628 --
2629 --
2630 --As we have discussed previously about this format, the header shows
2631 --the name of the trace and points to the options. The "FUNCTION"
2632 --is a misnomer since here it represents the wake ups and context
2633 --switches.
2634 --
2635 --The sched_switch only lists the wake ups (represented with '+')
2636 --and context switches ('==>') with the previous task or current
2637 --first followed by the next task or task waking up. The format for both
2638 --of these is PID:KERNEL-PRIO:TASK-STATE. Remember that the KERNEL-PRIO
2639 --is the inverse of the actual priority with zero (0) being the highest
2640 --priority and the nice values starting at 100 (nice -20). Below is
2641 --a quick chart to map the kernel priority to user land priorities.
2642 --
2643 -- Kernel priority: 0 to 99 ==> user RT priority 99 to 0
2644 -- Kernel priority: 100 to 139 ==> user nice -20 to 19
2645 -- Kernel priority: 140 ==> idle task priority
2646 --
2647 --The task states are:
2648 --
2649 -- R - running : wants to run, may not actually be running
2650 -- S - sleep : process is waiting to be woken up (handles signals)
2651 -- D - deep sleep : process must be woken up (ignores signals)
2652 -- T - stopped : process suspended
2653 -- t - traced : process is being traced (with something like gdb)
2654 -- Z - zombie : process waiting to be cleaned up
2655 -- X - unknown
2656 --
2657 --
2658 --ftrace_enabled
2659 ----------------
2660 --
2661 --The following tracers give different output depending on whether
2662 --or not the sysctl ftrace_enabled is set. To set ftrace_enabled,
2663 --one can either use the sysctl function or set it via the proc
2664 --file system interface.
2665 --
2666 -- sysctl kernel.ftrace_enabled=1
2667 --
2668 -- or
2669 --
2670 -- echo 1 > /proc/sys/kernel/ftrace_enabled
2671 --
2672 --To disable ftrace_enabled simply replace the '1' with '0' in
2673 --the above commands.
2674 --
2675 --When ftrace_enabled is set the tracers will also record the functions
2676 --that are within the trace. The descriptions of the tracers
2677 --will also show an example with ftrace enabled.
2678 --
2679 --
2680 --irqsoff
2681 ---------
2682 --
2683 --When interrupts are disabled, the CPU can not react to any other
2684 --external event (besides NMIs and SMIs). This prevents the timer
2685 --interrupt from triggering or the mouse interrupt from letting the
2686 --kernel know of a new mouse event. The result is a latency with the
2687 --reaction time.
2688 --
2689 --The irqsoff tracer tracks the time interrupts are disabled and when
2690 --they are re-enabled. When a new maximum latency is hit, it saves off
2691 --the trace so that it may be retrieved at a later time. Every time a
2692 --new maximum in reached, the old saved trace is discarded and the new
2693 --trace is saved.
2694 --
2695 --To reset the maximum, echo 0 into tracing_max_latency. Here's an
2696 --example:
2697 --
2698 -- # echo irqsoff > /debug/tracing/current_tracer
2699 -- # echo 0 > /debug/tracing/tracing_max_latency
2700 -- # echo 1 > /debug/tracing/tracing_enabled
2701 -- # ls -ltr
2702 -- [...]
2703 -- # echo 0 > /debug/tracing/tracing_enabled
2704 -- # cat /debug/tracing/latency_trace
2705 --# tracer: irqsoff
2706 --#
2707 --irqsoff latency trace v1.1.5 on 2.6.26-rc8
2708 ----------------------------------------------------------------------
2709 -- latency: 6 us, #3/3, CPU#1 | (M:preempt VP:0, KP:0, SP:0 HP:0 #P:2)
2710 -- -----------------
2711 -- | task: bash-4269 (uid:0 nice:0 policy:0 rt_prio:0)
2712 -- -----------------
2713 -- => started at: copy_page_range
2714 -- => ended at: copy_page_range
2715 --
2716 --# _------=> CPU#
2717 --# / _-----=> irqs-off
2718 --# | / _----=> need-resched
2719 --# || / _---=> hardirq/softirq
2720 --# ||| / _--=> preempt-depth
2721 --# |||| /
2722 --# ||||| delay
2723 --# cmd pid ||||| time | caller
2724 --# \ / ||||| \ | /
2725 -- bash-4269 1...1 0us+: _spin_lock (copy_page_range)
2726 -- bash-4269 1...1 7us : _spin_unlock (copy_page_range)
2727 -- bash-4269 1...2 7us : trace_preempt_on (copy_page_range)
2728 --
2729 --
2730 --vim:ft=help
2731 --
2732 --Here we see that that we had a latency of 6 microsecs (which is
2733 --very good). The spin_lock in copy_page_range disabled interrupts.
2734 --The difference between the 6 and the displayed timestamp 7us is
2735 --because the clock must have incremented between the time of recording
2736 --the max latency and recording the function that had that latency.
2737 --
2738 --Note the above had ftrace_enabled not set. If we set the ftrace_enabled
2739 --we get a much larger output:
2740 --
2741 --# tracer: irqsoff
2742 --#
2743 --irqsoff latency trace v1.1.5 on 2.6.26-rc8
2744 ----------------------------------------------------------------------
2745 -- latency: 50 us, #101/101, CPU#0 | (M:preempt VP:0, KP:0, SP:0 HP:0 #P:2)
2746 -- -----------------
2747 -- | task: ls-4339 (uid:0 nice:0 policy:0 rt_prio:0)
2748 -- -----------------
2749 -- => started at: __alloc_pages_internal
2750 -- => ended at: __alloc_pages_internal
2751 --
2752 --# _------=> CPU#
2753 --# / _-----=> irqs-off
2754 --# | / _----=> need-resched
2755 --# || / _---=> hardirq/softirq
2756 --# ||| / _--=> preempt-depth
2757 --# |||| /
2758 --# ||||| delay
2759 --# cmd pid ||||| time | caller
2760 --# \ / ||||| \ | /
2761 -- ls-4339 0...1 0us+: get_page_from_freelist (__alloc_pages_internal)
2762 -- ls-4339 0d..1 3us : rmqueue_bulk (get_page_from_freelist)
2763 -- ls-4339 0d..1 3us : _spin_lock (rmqueue_bulk)
2764 -- ls-4339 0d..1 4us : add_preempt_count (_spin_lock)
2765 -- ls-4339 0d..2 4us : __rmqueue (rmqueue_bulk)
2766 -- ls-4339 0d..2 5us : __rmqueue_smallest (__rmqueue)
2767 -- ls-4339 0d..2 5us : __mod_zone_page_state (__rmqueue_smallest)
2768 -- ls-4339 0d..2 6us : __rmqueue (rmqueue_bulk)
2769 -- ls-4339 0d..2 6us : __rmqueue_smallest (__rmqueue)
2770 -- ls-4339 0d..2 7us : __mod_zone_page_state (__rmqueue_smallest)
2771 -- ls-4339 0d..2 7us : __rmqueue (rmqueue_bulk)
2772 -- ls-4339 0d..2 8us : __rmqueue_smallest (__rmqueue)
2773 --[...]
2774 -- ls-4339 0d..2 46us : __rmqueue_smallest (__rmqueue)
2775 -- ls-4339 0d..2 47us : __mod_zone_page_state (__rmqueue_smallest)
2776 -- ls-4339 0d..2 47us : __rmqueue (rmqueue_bulk)
2777 -- ls-4339 0d..2 48us : __rmqueue_smallest (__rmqueue)
2778 -- ls-4339 0d..2 48us : __mod_zone_page_state (__rmqueue_smallest)
2779 -- ls-4339 0d..2 49us : _spin_unlock (rmqueue_bulk)
2780 -- ls-4339 0d..2 49us : sub_preempt_count (_spin_unlock)
2781 -- ls-4339 0d..1 50us : get_page_from_freelist (__alloc_pages_internal)
2782 -- ls-4339 0d..2 51us : trace_hardirqs_on (__alloc_pages_internal)
2783 --
2784 --
2785 --vim:ft=help
2786 --
2787 --
2788 --Here we traced a 50 microsecond latency. But we also see all the
2789 --functions that were called during that time. Note that enabling
2790 --function tracing we endure an added overhead. This overhead may
2791 --extend the latency times. But never the less, this trace has provided
2792 --some very helpful debugging.
2793 --
2794 --
2795 --preemptoff
2796 ------------
2797 --
2798 --When preemption is disabled we may be able to receive interrupts but
2799 --the task can not be preempted and a higher priority task must wait
2800 --for preemption to be enabled again before it can preempt a lower
2801 --priority task.
2802 --
2803 --The preemptoff tracer traces the places that disables preemption.
2804 --Like the irqsoff, it records the maximum latency that preemption
2805 --was disabled. The control of preemptoff is much like the irqsoff.
2806 --
2807 -- # echo preemptoff > /debug/tracing/current_tracer
2808 -- # echo 0 > /debug/tracing/tracing_max_latency
2809 -- # echo 1 > /debug/tracing/tracing_enabled
2810 -- # ls -ltr
2811 -- [...]
2812 -- # echo 0 > /debug/tracing/tracing_enabled
2813 -- # cat /debug/tracing/latency_trace
2814 --# tracer: preemptoff
2815 --#
2816 --preemptoff latency trace v1.1.5 on 2.6.26-rc8
2817 ----------------------------------------------------------------------
2818 -- latency: 29 us, #3/3, CPU#0 | (M:preempt VP:0, KP:0, SP:0 HP:0 #P:2)
2819 -- -----------------
2820 -- | task: sshd-4261 (uid:0 nice:0 policy:0 rt_prio:0)
2821 -- -----------------
2822 -- => started at: do_IRQ
2823 -- => ended at: __do_softirq
2824 --
2825 --# _------=> CPU#
2826 --# / _-----=> irqs-off
2827 --# | / _----=> need-resched
2828 --# || / _---=> hardirq/softirq
2829 --# ||| / _--=> preempt-depth
2830 --# |||| /
2831 --# ||||| delay
2832 --# cmd pid ||||| time | caller
2833 --# \ / ||||| \ | /
2834 -- sshd-4261 0d.h. 0us+: irq_enter (do_IRQ)
2835 -- sshd-4261 0d.s. 29us : _local_bh_enable (__do_softirq)
2836 -- sshd-4261 0d.s1 30us : trace_preempt_on (__do_softirq)
2837 --
2838 --
2839 --vim:ft=help
2840 --
2841 --This has some more changes. Preemption was disabled when an interrupt
2842 --came in (notice the 'h'), and was enabled while doing a softirq.
2843 --(notice the 's'). But we also see that interrupts have been disabled
2844 --when entering the preempt off section and leaving it (the 'd').
2845 --We do not know if interrupts were enabled in the mean time.
2846 --
2847 --# tracer: preemptoff
2848 --#
2849 --preemptoff latency trace v1.1.5 on 2.6.26-rc8
2850 ----------------------------------------------------------------------
2851 -- latency: 63 us, #87/87, CPU#0 | (M:preempt VP:0, KP:0, SP:0 HP:0 #P:2)
2852 -- -----------------
2853 -- | task: sshd-4261 (uid:0 nice:0 policy:0 rt_prio:0)
2854 -- -----------------
2855 -- => started at: remove_wait_queue
2856 -- => ended at: __do_softirq
2857 --
2858 --# _------=> CPU#
2859 --# / _-----=> irqs-off
2860 --# | / _----=> need-resched
2861 --# || / _---=> hardirq/softirq
2862 --# ||| / _--=> preempt-depth
2863 --# |||| /
2864 --# ||||| delay
2865 --# cmd pid ||||| time | caller
2866 --# \ / ||||| \ | /
2867 -- sshd-4261 0d..1 0us : _spin_lock_irqsave (remove_wait_queue)
2868 -- sshd-4261 0d..1 1us : _spin_unlock_irqrestore (remove_wait_queue)
2869 -- sshd-4261 0d..1 2us : do_IRQ (common_interrupt)
2870 -- sshd-4261 0d..1 2us : irq_enter (do_IRQ)
2871 -- sshd-4261 0d..1 2us : idle_cpu (irq_enter)
2872 -- sshd-4261 0d..1 3us : add_preempt_count (irq_enter)
2873 -- sshd-4261 0d.h1 3us : idle_cpu (irq_enter)
2874 -- sshd-4261 0d.h. 4us : handle_fasteoi_irq (do_IRQ)
2875 --[...]
2876 -- sshd-4261 0d.h. 12us : add_preempt_count (_spin_lock)
2877 -- sshd-4261 0d.h1 12us : ack_ioapic_quirk_irq (handle_fasteoi_irq)
2878 -- sshd-4261 0d.h1 13us : move_native_irq (ack_ioapic_quirk_irq)
2879 -- sshd-4261 0d.h1 13us : _spin_unlock (handle_fasteoi_irq)
2880 -- sshd-4261 0d.h1 14us : sub_preempt_count (_spin_unlock)
2881 -- sshd-4261 0d.h1 14us : irq_exit (do_IRQ)
2882 -- sshd-4261 0d.h1 15us : sub_preempt_count (irq_exit)
2883 -- sshd-4261 0d..2 15us : do_softirq (irq_exit)
2884 -- sshd-4261 0d... 15us : __do_softirq (do_softirq)
2885 -- sshd-4261 0d... 16us : __local_bh_disable (__do_softirq)
2886 -- sshd-4261 0d... 16us+: add_preempt_count (__local_bh_disable)
2887 -- sshd-4261 0d.s4 20us : add_preempt_count (__local_bh_disable)
2888 -- sshd-4261 0d.s4 21us : sub_preempt_count (local_bh_enable)
2889 -- sshd-4261 0d.s5 21us : sub_preempt_count (local_bh_enable)
2890 --[...]
2891 -- sshd-4261 0d.s6 41us : add_preempt_count (__local_bh_disable)
2892 -- sshd-4261 0d.s6 42us : sub_preempt_count (local_bh_enable)
2893 -- sshd-4261 0d.s7 42us : sub_preempt_count (local_bh_enable)
2894 -- sshd-4261 0d.s5 43us : add_preempt_count (__local_bh_disable)
2895 -- sshd-4261 0d.s5 43us : sub_preempt_count (local_bh_enable_ip)
2896 -- sshd-4261 0d.s6 44us : sub_preempt_count (local_bh_enable_ip)
2897 -- sshd-4261 0d.s5 44us : add_preempt_count (__local_bh_disable)
2898 -- sshd-4261 0d.s5 45us : sub_preempt_count (local_bh_enable)
2899 --[...]
2900 -- sshd-4261 0d.s. 63us : _local_bh_enable (__do_softirq)
2901 -- sshd-4261 0d.s1 64us : trace_preempt_on (__do_softirq)
2902 --
2903 --
2904 --The above is an example of the preemptoff trace with ftrace_enabled
2905 --set. Here we see that interrupts were disabled the entire time.
2906 --The irq_enter code lets us know that we entered an interrupt 'h'.
2907 --Before that, the functions being traced still show that it is not
2908 --in an interrupt, but we can see by the functions themselves that
2909 --this is not the case.
2910 --
2911 --Notice that the __do_softirq when called doesn't have a preempt_count.
2912 --It may seem that we missed a preempt enabled. What really happened
2913 --is that the preempt count is held on the threads stack and we
2914 --switched to the softirq stack (4K stacks in effect). The code
2915 --does not copy the preempt count, but because interrupts are disabled
2916 --we don't need to worry about it. Having a tracer like this is good
2917 --to let people know what really happens inside the kernel.
2918 --
2919 --
2920 --preemptirqsoff
2921 ----------------
2922 --
2923 --Knowing the locations that have interrupts disabled or preemption
2924 --disabled for the longest times is helpful. But sometimes we would
2925 --like to know when either preemption and/or interrupts are disabled.
2926 --
2927 --The following code:
2928 --
2929 -- local_irq_disable();
2930 -- call_function_with_irqs_off();
2931 -- preempt_disable();
2932 -- call_function_with_irqs_and_preemption_off();
2933 -- local_irq_enable();
2934 -- call_function_with_preemption_off();
2935 -- preempt_enable();
2936 --
2937 --The irqsoff tracer will record the total length of
2938 --call_function_with_irqs_off() and
2939 --call_function_with_irqs_and_preemption_off().
2940 --
2941 --The preemptoff tracer will record the total length of
2942 --call_function_with_irqs_and_preemption_off() and
2943 --call_function_with_preemption_off().
2944 --
2945 --But neither will trace the time that interrupts and/or preemption
2946 --is disabled. This total time is the time that we can not schedule.
2947 --To record this time, use the preemptirqsoff tracer.
2948 --
2949 --Again, using this trace is much like the irqsoff and preemptoff tracers.
2950 --
2951 -- # echo preemptoff > /debug/tracing/current_tracer
2952 -- # echo 0 > /debug/tracing/tracing_max_latency
2953 -- # echo 1 > /debug/tracing/tracing_enabled
2954 -- # ls -ltr
2955 -- [...]
2956 -- # echo 0 > /debug/tracing/tracing_enabled
2957 -- # cat /debug/tracing/latency_trace
2958 --# tracer: preemptirqsoff
2959 --#
2960 --preemptirqsoff latency trace v1.1.5 on 2.6.26-rc8
2961 ----------------------------------------------------------------------
2962 -- latency: 293 us, #3/3, CPU#0 | (M:preempt VP:0, KP:0, SP:0 HP:0 #P:2)
2963 -- -----------------
2964 -- | task: ls-4860 (uid:0 nice:0 policy:0 rt_prio:0)
2965 -- -----------------
2966 -- => started at: apic_timer_interrupt
2967 -- => ended at: __do_softirq
2968 --
2969 --# _------=> CPU#
2970 --# / _-----=> irqs-off
2971 --# | / _----=> need-resched
2972 --# || / _---=> hardirq/softirq
2973 --# ||| / _--=> preempt-depth
2974 --# |||| /
2975 --# ||||| delay
2976 --# cmd pid ||||| time | caller
2977 --# \ / ||||| \ | /
2978 -- ls-4860 0d... 0us!: trace_hardirqs_off_thunk (apic_timer_interrupt)
2979 -- ls-4860 0d.s. 294us : _local_bh_enable (__do_softirq)
2980 -- ls-4860 0d.s1 294us : trace_preempt_on (__do_softirq)
2981 --
2982 --
2983 --vim:ft=help
2984 --
2985 --
2986 --The trace_hardirqs_off_thunk is called from assembly on x86 when
2987 --interrupts are disabled in the assembly code. Without the function
2988 --tracing, we don't know if interrupts were enabled within the preemption
2989 --points. We do see that it started with preemption enabled.
2990 --
2991 --Here is a trace with ftrace_enabled set:
2992 --
2993 --
2994 --# tracer: preemptirqsoff
2995 --#
2996 --preemptirqsoff latency trace v1.1.5 on 2.6.26-rc8
2997 ----------------------------------------------------------------------
2998 -- latency: 105 us, #183/183, CPU#0 | (M:preempt VP:0, KP:0, SP:0 HP:0 #P:2)
2999 -- -----------------
3000 -- | task: sshd-4261 (uid:0 nice:0 policy:0 rt_prio:0)
3001 -- -----------------
3002 -- => started at: write_chan
3003 -- => ended at: __do_softirq
3004 --
3005 --# _------=> CPU#
3006 --# / _-----=> irqs-off
3007 --# | / _----=> need-resched
3008 --# || / _---=> hardirq/softirq
3009 --# ||| / _--=> preempt-depth
3010 --# |||| /
3011 --# ||||| delay
3012 --# cmd pid ||||| time | caller
3013 --# \ / ||||| \ | /
3014 -- ls-4473 0.N.. 0us : preempt_schedule (write_chan)
3015 -- ls-4473 0dN.1 1us : _spin_lock (schedule)
3016 -- ls-4473 0dN.1 2us : add_preempt_count (_spin_lock)
3017 -- ls-4473 0d..2 2us : put_prev_task_fair (schedule)
3018 --[...]
3019 -- ls-4473 0d..2 13us : set_normalized_timespec (ktime_get_ts)
3020 -- ls-4473 0d..2 13us : __switch_to (schedule)
3021 -- sshd-4261 0d..2 14us : finish_task_switch (schedule)
3022 -- sshd-4261 0d..2 14us : _spin_unlock_irq (finish_task_switch)
3023 -- sshd-4261 0d..1 15us : add_preempt_count (_spin_lock_irqsave)
3024 -- sshd-4261 0d..2 16us : _spin_unlock_irqrestore (hrtick_set)
3025 -- sshd-4261 0d..2 16us : do_IRQ (common_interrupt)
3026 -- sshd-4261 0d..2 17us : irq_enter (do_IRQ)
3027 -- sshd-4261 0d..2 17us : idle_cpu (irq_enter)
3028 -- sshd-4261 0d..2 18us : add_preempt_count (irq_enter)
3029 -- sshd-4261 0d.h2 18us : idle_cpu (irq_enter)
3030 -- sshd-4261 0d.h. 18us : handle_fasteoi_irq (do_IRQ)
3031 -- sshd-4261 0d.h. 19us : _spin_lock (handle_fasteoi_irq)
3032 -- sshd-4261 0d.h. 19us : add_preempt_count (_spin_lock)
3033 -- sshd-4261 0d.h1 20us : _spin_unlock (handle_fasteoi_irq)
3034 -- sshd-4261 0d.h1 20us : sub_preempt_count (_spin_unlock)
3035 --[...]
3036 -- sshd-4261 0d.h1 28us : _spin_unlock (handle_fasteoi_irq)
3037 -- sshd-4261 0d.h1 29us : sub_preempt_count (_spin_unlock)
3038 -- sshd-4261 0d.h2 29us : irq_exit (do_IRQ)
3039 -- sshd-4261 0d.h2 29us : sub_preempt_count (irq_exit)
3040 -- sshd-4261 0d..3 30us : do_softirq (irq_exit)
3041 -- sshd-4261 0d... 30us : __do_softirq (do_softirq)
3042 -- sshd-4261 0d... 31us : __local_bh_disable (__do_softirq)
3043 -- sshd-4261 0d... 31us+: add_preempt_count (__local_bh_disable)
3044 -- sshd-4261 0d.s4 34us : add_preempt_count (__local_bh_disable)
3045 --[...]
3046 -- sshd-4261 0d.s3 43us : sub_preempt_count (local_bh_enable_ip)
3047 -- sshd-4261 0d.s4 44us : sub_preempt_count (local_bh_enable_ip)
3048 -- sshd-4261 0d.s3 44us : smp_apic_timer_interrupt (apic_timer_interrupt)
3049 -- sshd-4261 0d.s3 45us : irq_enter (smp_apic_timer_interrupt)
3050 -- sshd-4261 0d.s3 45us : idle_cpu (irq_enter)
3051 -- sshd-4261 0d.s3 46us : add_preempt_count (irq_enter)
3052 -- sshd-4261 0d.H3 46us : idle_cpu (irq_enter)
3053 -- sshd-4261 0d.H3 47us : hrtimer_interrupt (smp_apic_timer_interrupt)
3054 -- sshd-4261 0d.H3 47us : ktime_get (hrtimer_interrupt)
3055 --[...]
3056 -- sshd-4261 0d.H3 81us : tick_program_event (hrtimer_interrupt)
3057 -- sshd-4261 0d.H3 82us : ktime_get (tick_program_event)
3058 -- sshd-4261 0d.H3 82us : ktime_get_ts (ktime_get)
3059 -- sshd-4261 0d.H3 83us : getnstimeofday (ktime_get_ts)
3060 -- sshd-4261 0d.H3 83us : set_normalized_timespec (ktime_get_ts)
3061 -- sshd-4261 0d.H3 84us : clockevents_program_event (tick_program_event)
3062 -- sshd-4261 0d.H3 84us : lapic_next_event (clockevents_program_event)
3063 -- sshd-4261 0d.H3 85us : irq_exit (smp_apic_timer_interrupt)
3064 -- sshd-4261 0d.H3 85us : sub_preempt_count (irq_exit)
3065 -- sshd-4261 0d.s4 86us : sub_preempt_count (irq_exit)
3066 -- sshd-4261 0d.s3 86us : add_preempt_count (__local_bh_disable)
3067 --[...]
3068 -- sshd-4261 0d.s1 98us : sub_preempt_count (net_rx_action)
3069 -- sshd-4261 0d.s. 99us : add_preempt_count (_spin_lock_irq)
3070 -- sshd-4261 0d.s1 99us+: _spin_unlock_irq (run_timer_softirq)
3071 -- sshd-4261 0d.s. 104us : _local_bh_enable (__do_softirq)
3072 -- sshd-4261 0d.s. 104us : sub_preempt_count (_local_bh_enable)
3073 -- sshd-4261 0d.s. 105us : _local_bh_enable (__do_softirq)
3074 -- sshd-4261 0d.s1 105us : trace_preempt_on (__do_softirq)
3075 --
3076 --
3077 --This is a very interesting trace. It started with the preemption of
3078 --the ls task. We see that the task had the "need_resched" bit set
3079 --with the 'N' in the trace. Interrupts are disabled in the spin_lock
3080 --and the trace started. We see that a schedule took place to run
3081 --sshd. When the interrupts were enabled we took an interrupt.
3082 --On return of the interrupt the softirq ran. We took another interrupt
3083 --while running the softirq as we see with the capital 'H'.
3084 --
3085 --
3086 --wakeup
3087 --------
3088 --
3089 --In Real-Time environment it is very important to know the wakeup
3090 --time it takes for the highest priority task that wakes up to the
3091 --time it executes. This is also known as "schedule latency".
3092 --I stress the point that this is about RT tasks. It is also important
3093 --to know the scheduling latency of non-RT tasks, but the average
3094 --schedule latency is better for non-RT tasks. Tools like
3095 --LatencyTop is more appropriate for such measurements.
3096 --
3097 --Real-Time environments is interested in the worst case latency.
3098 --That is the longest latency it takes for something to happen, and
3099 --not the average. We can have a very fast scheduler that may only
3100 --have a large latency once in a while, but that would not work well
3101 --with Real-Time tasks. The wakeup tracer was designed to record
3102 --the worst case wakeups of RT tasks. Non-RT tasks are not recorded
3103 --because the tracer only records one worst case and tracing non-RT
3104 --tasks that are unpredictable will overwrite the worst case latency
3105 --of RT tasks.
3106 --
3107 --Since this tracer only deals with RT tasks, we will run this slightly
3108 --different than we did with the previous tracers. Instead of performing
3109 --an 'ls' we will run 'sleep 1' under 'chrt' which changes the
3110 --priority of the task.
3111 --
3112 -- # echo wakeup > /debug/tracing/current_tracer
3113 -- # echo 0 > /debug/tracing/tracing_max_latency
3114 -- # echo 1 > /debug/tracing/tracing_enabled
3115 -- # chrt -f 5 sleep 1
3116 -- # echo 0 > /debug/tracing/tracing_enabled
3117 -- # cat /debug/tracing/latency_trace
3118 --# tracer: wakeup
3119 --#
3120 --wakeup latency trace v1.1.5 on 2.6.26-rc8
3121 ----------------------------------------------------------------------
3122 -- latency: 4 us, #2/2, CPU#1 | (M:preempt VP:0, KP:0, SP:0 HP:0 #P:2)
3123 -- -----------------
3124 -- | task: sleep-4901 (uid:0 nice:0 policy:1 rt_prio:5)
3125 -- -----------------
3126 --
3127 --# _------=> CPU#
3128 --# / _-----=> irqs-off
3129 --# | / _----=> need-resched
3130 --# || / _---=> hardirq/softirq
3131 --# ||| / _--=> preempt-depth
3132 --# |||| /
3133 --# ||||| delay
3134 --# cmd pid ||||| time | caller
3135 --# \ / ||||| \ | /
3136 -- <idle>-0 1d.h4 0us+: try_to_wake_up (wake_up_process)
3137 -- <idle>-0 1d..4 4us : schedule (cpu_idle)
3138 --
3139 --
3140 --vim:ft=help
3141 --
3142 --
3143 --Running this on an idle system we see that it only took 4 microseconds
3144 --to perform the task switch. Note, since the trace marker in the
3145 --schedule is before the actual "switch" we stop the tracing when
3146 --the recorded task is about to schedule in. This may change if
3147 --we add a new marker at the end of the scheduler.
3148 --
3149 --Notice that the recorded task is 'sleep' with the PID of 4901 and it
3150 --has an rt_prio of 5. This priority is user-space priority and not
3151 --the internal kernel priority. The policy is 1 for SCHED_FIFO and 2
3152 --for SCHED_RR.
3153 --
3154 --Doing the same with chrt -r 5 and ftrace_enabled set.
3155 --
3156 --# tracer: wakeup
3157 --#
3158 --wakeup latency trace v1.1.5 on 2.6.26-rc8
3159 ----------------------------------------------------------------------
3160 -- latency: 50 us, #60/60, CPU#1 | (M:preempt VP:0, KP:0, SP:0 HP:0 #P:2)
3161 -- -----------------
3162 -- | task: sleep-4068 (uid:0 nice:0 policy:2 rt_prio:5)
3163 -- -----------------
3164 --
3165 --# _------=> CPU#
3166 --# / _-----=> irqs-off
3167 --# | / _----=> need-resched
3168 --# || / _---=> hardirq/softirq
3169 --# ||| / _--=> preempt-depth
3170 --# |||| /
3171 --# ||||| delay
3172 --# cmd pid ||||| time | caller
3173 --# \ / ||||| \ | /
3174 --ksoftirq-7 1d.H3 0us : try_to_wake_up (wake_up_process)
3175 --ksoftirq-7 1d.H4 1us : sub_preempt_count (marker_probe_cb)
3176 --ksoftirq-7 1d.H3 2us : check_preempt_wakeup (try_to_wake_up)
3177 --ksoftirq-7 1d.H3 3us : update_curr (check_preempt_wakeup)
3178 --ksoftirq-7 1d.H3 4us : calc_delta_mine (update_curr)
3179 --ksoftirq-7 1d.H3 5us : __resched_task (check_preempt_wakeup)
3180 --ksoftirq-7 1d.H3 6us : task_wake_up_rt (try_to_wake_up)
3181 --ksoftirq-7 1d.H3 7us : _spin_unlock_irqrestore (try_to_wake_up)
3182 --[...]
3183 --ksoftirq-7 1d.H2 17us : irq_exit (smp_apic_timer_interrupt)
3184 --ksoftirq-7 1d.H2 18us : sub_preempt_count (irq_exit)
3185 --ksoftirq-7 1d.s3 19us : sub_preempt_count (irq_exit)
3186 --ksoftirq-7 1..s2 20us : rcu_process_callbacks (__do_softirq)
3187 --[...]
3188 --ksoftirq-7 1..s2 26us : __rcu_process_callbacks (rcu_process_callbacks)
3189 --ksoftirq-7 1d.s2 27us : _local_bh_enable (__do_softirq)
3190 --ksoftirq-7 1d.s2 28us : sub_preempt_count (_local_bh_enable)
3191 --ksoftirq-7 1.N.3 29us : sub_preempt_count (ksoftirqd)
3192 --ksoftirq-7 1.N.2 30us : _cond_resched (ksoftirqd)
3193 --ksoftirq-7 1.N.2 31us : __cond_resched (_cond_resched)
3194 --ksoftirq-7 1.N.2 32us : add_preempt_count (__cond_resched)
3195 --ksoftirq-7 1.N.2 33us : schedule (__cond_resched)
3196 --ksoftirq-7 1.N.2 33us : add_preempt_count (schedule)
3197 --ksoftirq-7 1.N.3 34us : hrtick_clear (schedule)
3198 --ksoftirq-7 1dN.3 35us : _spin_lock (schedule)
3199 --ksoftirq-7 1dN.3 36us : add_preempt_count (_spin_lock)
3200 --ksoftirq-7 1d..4 37us : put_prev_task_fair (schedule)
3201 --ksoftirq-7 1d..4 38us : update_curr (put_prev_task_fair)
3202 --[...]
3203 --ksoftirq-7 1d..5 47us : _spin_trylock (tracing_record_cmdline)
3204 --ksoftirq-7 1d..5 48us : add_preempt_count (_spin_trylock)
3205 --ksoftirq-7 1d..6 49us : _spin_unlock (tracing_record_cmdline)
3206 --ksoftirq-7 1d..6 49us : sub_preempt_count (_spin_unlock)
3207 --ksoftirq-7 1d..4 50us : schedule (__cond_resched)
3208 --
3209 --The interrupt went off while running ksoftirqd. This task runs at
3210 --SCHED_OTHER. Why didn't we see the 'N' set early? This may be
3211 --a harmless bug with x86_32 and 4K stacks. The need_reched() function
3212 --that tests if we need to reschedule looks on the actual stack.
3213 --Where as the setting of the NEED_RESCHED bit happens on the
3214 --task's stack. But because we are in a hard interrupt, the test
3215 --is with the interrupts stack which has that to be false. We don't
3216 --see the 'N' until we switch back to the task's stack.
3217 --
3218 --ftrace
3219 --------
3220 --
3221 --ftrace is not only the name of the tracing infrastructure, but it
3222 --is also a name of one of the tracers. The tracer is the function
3223 --tracer. Enabling the function tracer can be done from the
3224 --debug file system. Make sure the ftrace_enabled is set otherwise
3225 --this tracer is a nop.
3226 --
3227 -- # sysctl kernel.ftrace_enabled=1
3228 -- # echo ftrace > /debug/tracing/current_tracer
3229 -- # echo 1 > /debug/tracing/tracing_enabled
3230 -- # usleep 1
3231 -- # echo 0 > /debug/tracing/tracing_enabled
3232 -- # cat /debug/tracing/trace
3233 --# tracer: ftrace
3234 --#
3235 --# TASK-PID CPU# TIMESTAMP FUNCTION
3236 --# | | | | |
3237 -- bash-4003 [00] 123.638713: finish_task_switch <-schedule
3238 -- bash-4003 [00] 123.638714: _spin_unlock_irq <-finish_task_switch
3239 -- bash-4003 [00] 123.638714: sub_preempt_count <-_spin_unlock_irq
3240 -- bash-4003 [00] 123.638715: hrtick_set <-schedule
3241 -- bash-4003 [00] 123.638715: _spin_lock_irqsave <-hrtick_set
3242 -- bash-4003 [00] 123.638716: add_preempt_count <-_spin_lock_irqsave
3243 -- bash-4003 [00] 123.638716: _spin_unlock_irqrestore <-hrtick_set
3244 -- bash-4003 [00] 123.638717: sub_preempt_count <-_spin_unlock_irqrestore
3245 -- bash-4003 [00] 123.638717: hrtick_clear <-hrtick_set
3246 -- bash-4003 [00] 123.638718: sub_preempt_count <-schedule
3247 -- bash-4003 [00] 123.638718: sub_preempt_count <-preempt_schedule
3248 -- bash-4003 [00] 123.638719: wait_for_completion <-__stop_machine_run
3249 -- bash-4003 [00] 123.638719: wait_for_common <-wait_for_completion
3250 -- bash-4003 [00] 123.638720: _spin_lock_irq <-wait_for_common
3251 -- bash-4003 [00] 123.638720: add_preempt_count <-_spin_lock_irq
3252 --[...]
3253 --
3254 --
3255 --Note: It is sometimes better to enable or disable tracing directly from
3256 --a program, because the buffer may be overflowed by the echo commands
3257 --before you get to the point you want to trace. It is also easier to
3258 --stop the tracing at the point that you hit the part that you are
3259 --interested in. Since the ftrace buffer is a ring buffer with the
3260 --oldest data being overwritten, usually it is sufficient to start the
3261 --tracer with an echo command but have you code stop it. Something
3262 --like the following is usually appropriate for this.
3263 --
3264 --int trace_fd;
3265 --[...]
3266 --int main(int argc, char *argv[]) {
3267 -- [...]
3268 -- trace_fd = open("/debug/tracing/tracing_enabled", O_WRONLY);
3269 -- [...]
3270 -- if (condition_hit()) {
3271 -- write(trace_fd, "0", 1);
3272 -- }
3273 -- [...]
3274 --}
3275 --
3276 --
3277 --dynamic ftrace
3278 ----------------
3279 --
3280 --If CONFIG_DYNAMIC_FTRACE is set, then the system will run with
3281 --virtually no overhead when function tracing is disabled. The way
3282 --this works is the mcount function call (placed at the start of
3283 --every kernel function, produced by the -pg switch in gcc), starts
3284 --of pointing to a simple return.
3285 --
3286 --When dynamic ftrace is initialized, it calls kstop_machine to make it
3287 --act like a uniprocessor so that it can freely modify code without
3288 --worrying about other processors executing that same code. At
3289 --initialization, the mcount calls are change to call a "record_ip"
3290 --function. After this, the first time a kernel function is called,
3291 --it has the calling address saved in a hash table.
3292 --
3293 --Later on the ftraced kernel thread is awoken and will again call
3294 --kstop_machine if new functions have been recorded. The ftraced thread
3295 --will change all calls to mcount to "nop". Just calling mcount
3296 --and having mcount return has shown a 10% overhead. By converting
3297 --it to a nop, there is no recordable overhead to the system.
3298 --
3299 --One special side-effect to the recording of the functions being
3300 --traced, is that we can now selectively choose which functions we
3301 --want to trace and which ones we want the mcount calls to remain as
3302 --nops.
3303 --
3304 --Two files that contain to the enabling and disabling of recorded
3305 --functions are:
3306 --
3307 -- set_ftrace_filter
3308 --
3309 --and
3310 --
3311 -- set_ftrace_notrace
3312 --
3313 --A list of available functions that you can add to this files is listed
3314 --in:
3315 --
3316 -- available_filter_functions
3317 --
3318 -- # cat /debug/tracing/available_filter_functions
3319 --put_prev_task_idle
3320 --kmem_cache_create
3321 --pick_next_task_rt
3322 --get_online_cpus
3323 --pick_next_task_fair
3324 --mutex_lock
3325 --[...]
3326 --
3327 --If I'm only interested in sys_nanosleep and hrtimer_interrupt:
3328 --
3329 -- # echo sys_nanosleep hrtimer_interrupt \
3330 -- > /debug/tracing/set_ftrace_filter
3331 -- # echo ftrace > /debug/tracing/current_tracer
3332 -- # echo 1 > /debug/tracing/tracing_enabled
3333 -- # usleep 1
3334 -- # echo 0 > /debug/tracing/tracing_enabled
3335 -- # cat /debug/tracing/trace
3336 --# tracer: ftrace
3337 --#
3338 --# TASK-PID CPU# TIMESTAMP FUNCTION
3339 --# | | | | |
3340 -- usleep-4134 [00] 1317.070017: hrtimer_interrupt <-smp_apic_timer_interrupt
3341 -- usleep-4134 [00] 1317.070111: sys_nanosleep <-syscall_call
3342 -- <idle>-0 [00] 1317.070115: hrtimer_interrupt <-smp_apic_timer_interrupt
3343 --
3344 --To see what functions are being traced, you can cat the file:
3345 --
3346 -- # cat /debug/tracing/set_ftrace_filter
3347 --hrtimer_interrupt
3348 --sys_nanosleep
3349 --
3350 --
3351 --Perhaps this isn't enough. The filters also allow simple wild cards.
3352 --Only the following is currently available
3353 --
3354 -- <match>* - will match functions that begins with <match>
3355 -- *<match> - will match functions that end with <match>
3356 -- *<match>* - will match functions that have <match> in it
3357 --
3358 --Thats all the wild cards that are allowed.
3359 --
3360 -- <match>*<match> will not work.
3361 --
3362 -- # echo hrtimer_* > /debug/tracing/set_ftrace_filter
3363 --
3364 --Produces:
3365 --
3366 --# tracer: ftrace
3367 --#
3368 --# TASK-PID CPU# TIMESTAMP FUNCTION
3369 --# | | | | |
3370 -- bash-4003 [00] 1480.611794: hrtimer_init <-copy_process
3371 -- bash-4003 [00] 1480.611941: hrtimer_start <-hrtick_set
3372 -- bash-4003 [00] 1480.611956: hrtimer_cancel <-hrtick_clear
3373 -- bash-4003 [00] 1480.611956: hrtimer_try_to_cancel <-hrtimer_cancel
3374 -- <idle>-0 [00] 1480.612019: hrtimer_get_next_event <-get_next_timer_interrupt
3375 -- <idle>-0 [00] 1480.612025: hrtimer_get_next_event <-get_next_timer_interrupt
3376 -- <idle>-0 [00] 1480.612032: hrtimer_get_next_event <-get_next_timer_interrupt
3377 -- <idle>-0 [00] 1480.612037: hrtimer_get_next_event <-get_next_timer_interrupt
3378 -- <idle>-0 [00] 1480.612382: hrtimer_get_next_event <-get_next_timer_interrupt
3379 --
3380 --
3381 --Notice that we lost the sys_nanosleep.
3382 --
3383 -- # cat /debug/tracing/set_ftrace_filter
3384 --hrtimer_run_queues
3385 --hrtimer_run_pending
3386 --hrtimer_init
3387 --hrtimer_cancel
3388 --hrtimer_try_to_cancel
3389 --hrtimer_forward
3390 --hrtimer_start
3391 --hrtimer_reprogram
3392 --hrtimer_force_reprogram
3393 --hrtimer_get_next_event
3394 --hrtimer_interrupt
3395 --hrtimer_nanosleep
3396 --hrtimer_wakeup
3397 --hrtimer_get_remaining
3398 --hrtimer_get_res
3399 --hrtimer_init_sleeper
3400 --
3401 --
3402 --This is because the '>' and '>>' act just like they do in bash.
3403 --To rewrite the filters, use '>'
3404 --To append to the filters, use '>>'
3405 --
3406 --To clear out a filter so that all functions will be recorded again.
3407 --
3408 -- # echo > /debug/tracing/set_ftrace_filter
3409 -- # cat /debug/tracing/set_ftrace_filter
3410 -- #
3411 --
3412 --Again, now we want to append.
3413 --
3414 -- # echo sys_nanosleep > /debug/tracing/set_ftrace_filter
3415 -- # cat /debug/tracing/set_ftrace_filter
3416 --sys_nanosleep
3417 -- # echo hrtimer_* >> /debug/tracing/set_ftrace_filter
3418 -- # cat /debug/tracing/set_ftrace_filter
3419 --hrtimer_run_queues
3420 --hrtimer_run_pending
3421 --hrtimer_init
3422 --hrtimer_cancel
3423 --hrtimer_try_to_cancel
3424 --hrtimer_forward
3425 --hrtimer_start
3426 --hrtimer_reprogram
3427 --hrtimer_force_reprogram
3428 --hrtimer_get_next_event
3429 --hrtimer_interrupt
3430 --sys_nanosleep
3431 --hrtimer_nanosleep
3432 --hrtimer_wakeup
3433 --hrtimer_get_remaining
3434 --hrtimer_get_res
3435 --hrtimer_init_sleeper
3436 --
3437 --
3438 --The set_ftrace_notrace prevents those functions from being traced.
3439 --
3440 -- # echo '*preempt*' '*lock*' > /debug/tracing/set_ftrace_notrace
3441 --
3442 --Produces:
3443 --
3444 --# tracer: ftrace
3445 --#
3446 --# TASK-PID CPU# TIMESTAMP FUNCTION
3447 --# | | | | |
3448 -- bash-4043 [01] 115.281644: finish_task_switch <-schedule
3449 -- bash-4043 [01] 115.281645: hrtick_set <-schedule
3450 -- bash-4043 [01] 115.281645: hrtick_clear <-hrtick_set
3451 -- bash-4043 [01] 115.281646: wait_for_completion <-__stop_machine_run
3452 -- bash-4043 [01] 115.281647: wait_for_common <-wait_for_completion
3453 -- bash-4043 [01] 115.281647: kthread_stop <-stop_machine_run
3454 -- bash-4043 [01] 115.281648: init_waitqueue_head <-kthread_stop
3455 -- bash-4043 [01] 115.281648: wake_up_process <-kthread_stop
3456 -- bash-4043 [01] 115.281649: try_to_wake_up <-wake_up_process
3457 --
3458 --We can see that there's no more lock or preempt tracing.
3459 --
3460 --ftraced
3461 ---------
3462 --
3463 --As mentioned above, when dynamic ftrace is configured in, a kernel
3464 --thread wakes up once a second and checks to see if there are mcount
3465 --calls that need to be converted into nops. If there is not, then
3466 --it simply goes back to sleep. But if there is, it will call
3467 --kstop_machine to convert the calls to nops.
3468 --
3469 --There may be a case that you do not want this added latency.
3470 --Perhaps you are doing some audio recording and this activity might
3471 --cause skips in the playback. There is an interface to disable
3472 --and enable the ftraced kernel thread.
3473 --
3474 -- # echo 0 > /debug/tracing/ftraced_enabled
3475 --
3476 --This will disable the calling of the kstop_machine to update the
3477 --mcount calls to nops. Remember that there's a large overhead
3478 --to calling mcount. Without this kernel thread, that overhead will
3479 --exist.
3480 --
3481 --Any write to the ftraced_enabled file will cause the kstop_machine
3482 --to run if there are recorded calls to mcount. This means that a
3483 --user can manually perform the updates when they want to by simply
3484 --echoing a '0' into the ftraced_enabled file.
3485 --
3486 --The updates are also done at the beginning of enabling a tracer
3487 --that uses ftrace function recording.
3488 --
3489 --
3490 --trace_pipe
3491 ------------
3492 --
3493 --The trace_pipe outputs the same as trace, but the effect on the
3494 --tracing is different. Every read from trace_pipe is consumed.
3495 --This means that subsequent reads will be different. The trace
3496 --is live.
3497 --
3498 -- # echo ftrace > /debug/tracing/current_tracer
3499 -- # cat /debug/tracing/trace_pipe > /tmp/trace.out &
3500 --[1] 4153
3501 -- # echo 1 > /debug/tracing/tracing_enabled
3502 -- # usleep 1
3503 -- # echo 0 > /debug/tracing/tracing_enabled
3504 -- # cat /debug/tracing/trace
3505 --# tracer: ftrace
3506 --#
3507 --# TASK-PID CPU# TIMESTAMP FUNCTION
3508 --# | | | | |
3509 --
3510 -- #
3511 -- # cat /tmp/trace.out
3512 -- bash-4043 [00] 41.267106: finish_task_switch <-schedule
3513 -- bash-4043 [00] 41.267106: hrtick_set <-schedule
3514 -- bash-4043 [00] 41.267107: hrtick_clear <-hrtick_set
3515 -- bash-4043 [00] 41.267108: wait_for_completion <-__stop_machine_run
3516 -- bash-4043 [00] 41.267108: wait_for_common <-wait_for_completion
3517 -- bash-4043 [00] 41.267109: kthread_stop <-stop_machine_run
3518 -- bash-4043 [00] 41.267109: init_waitqueue_head <-kthread_stop
3519 -- bash-4043 [00] 41.267110: wake_up_process <-kthread_stop
3520 -- bash-4043 [00] 41.267110: try_to_wake_up <-wake_up_process
3521 -- bash-4043 [00] 41.267111: select_task_rq_rt <-try_to_wake_up
3522 --
3523 --
3524 --Note, reading the trace_pipe will block until more input is added.
3525 --By changing the tracer, trace_pipe will issue an EOF. We needed
3526 --to set the ftrace tracer _before_ cating the trace_pipe file.
3527 --
3528 --
3529 --trace entries
3530 ---------------
3531 --
3532 --Having too much or not enough data can be troublesome in diagnosing
3533 --some issue in the kernel. The file trace_entries is used to modify
3534 --the size of the internal trace buffers. The numbers listed
3535 --is the number of entries that can be recorded per CPU. To know
3536 --the full size, multiply the number of possible CPUS with the
3537 --number of entries.
3538 --
3539 -- # cat /debug/tracing/trace_entries
3540 --65620
3541 --
3542 --Note, to modify this you must have tracing fulling disabled. To do that,
3543 --echo "none" into the current_tracer.
3544 --
3545 -- # echo none > /debug/tracing/current_tracer
3546 -- # echo 100000 > /debug/tracing/trace_entries
3547 -- # cat /debug/tracing/trace_entries
3548 --100045
3549 --
3550 --
3551 --Notice that we echoed in 100,000 but the size is 100,045. The entries
3552 --are held by individual pages. It allocates the number of pages it takes
3553 --to fulfill the request. If more entries may fit on the last page
3554 --it will add them.
3555 --
3556 -- # echo 1 > /debug/tracing/trace_entries
3557 -- # cat /debug/tracing/trace_entries
3558 --85
3559 --
3560 --This shows us that 85 entries can fit on a single page.
3561 --
3562 --The number of pages that will be allocated is a percentage of available
3563 --memory. Allocating too much will produces an error.
3564 --
3565 -- # echo 1000000000000 > /debug/tracing/trace_entries
3566 ---bash: echo: write error: Cannot allocate memory
3567 -- # cat /debug/tracing/trace_entries
3568 --85
3569 --
3570 -diff --git a/arch/powerpc/kernel/ppc32.h b/arch/powerpc/kernel/ppc32.h
3571 -index 90e5627..fda05e2 100644
3572 ---- a/arch/powerpc/kernel/ppc32.h
3573 -+++ b/arch/powerpc/kernel/ppc32.h
3574 -@@ -135,4 +135,6 @@ struct ucontext32 {
3575 - struct mcontext32 uc_mcontext;
3576 - };
3577 -
3578 -+extern int copy_siginfo_to_user32(struct compat_siginfo __user *d, siginfo_t *s);
3579 -+
3580 - #endif /* _PPC64_PPC32_H */
3581 -diff --git a/arch/powerpc/kernel/ptrace32.c b/arch/powerpc/kernel/ptrace32.c
3582 -index 4c1de6a..9d30e10 100644
3583 ---- a/arch/powerpc/kernel/ptrace32.c
3584 -+++ b/arch/powerpc/kernel/ptrace32.c
3585 -@@ -29,12 +29,15 @@
3586 - #include <linux/security.h>
3587 - #include <linux/signal.h>
3588 - #include <linux/compat.h>
3589 -+#include <linux/elf.h>
3590 -
3591 - #include <asm/uaccess.h>
3592 - #include <asm/page.h>
3593 - #include <asm/pgtable.h>
3594 - #include <asm/system.h>
3595 -
3596 -+#include "ppc32.h"
3597 -+
3598 - /*
3599 - * does not yet catch signals sent when the child dies.
3600 - * in exit.c or in signal.c.
3601 -@@ -64,6 +67,27 @@ static long compat_ptrace_old(struct task_struct *child, long request,
3602 - return -EPERM;
3603 - }
3604 -
3605 -+static int compat_ptrace_getsiginfo(struct task_struct *child, compat_siginfo_t __user *data)
3606 -+{
3607 -+ siginfo_t lastinfo;
3608 -+ int error = -ESRCH;
3609 -+
3610 -+ read_lock(&tasklist_lock);
3611 -+ if (likely(child->sighand != NULL)) {
3612 -+ error = -EINVAL;
3613 -+ spin_lock_irq(&child->sighand->siglock);
3614 -+ if (likely(child->last_siginfo != NULL)) {
3615 -+ lastinfo = *child->last_siginfo;
3616 -+ error = 0;
3617 -+ }
3618 -+ spin_unlock_irq(&child->sighand->siglock);
3619 -+ }
3620 -+ read_unlock(&tasklist_lock);
3621 -+ if (!error)
3622 -+ return copy_siginfo_to_user32(data, &lastinfo);
3623 -+ return error;
3624 -+}
3625 -+
3626 - long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
3627 - compat_ulong_t caddr, compat_ulong_t cdata)
3628 - {
3629 -@@ -282,6 +306,9 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
3630 - 0, PT_REGS_COUNT * sizeof(compat_long_t),
3631 - compat_ptr(data));
3632 -
3633 -+ case PTRACE_GETSIGINFO:
3634 -+ return compat_ptrace_getsiginfo(child, compat_ptr(data));
3635 -+
3636 - case PTRACE_GETFPREGS:
3637 - case PTRACE_SETFPREGS:
3638 - case PTRACE_GETVRREGS:
3639 -diff --git a/arch/x86/kernel/io_delay.c b/arch/x86/kernel/io_delay.c
3640 -index 5921e5f..1c3a66a 100644
3641 ---- a/arch/x86/kernel/io_delay.c
3642 -+++ b/arch/x86/kernel/io_delay.c
3643 -@@ -103,6 +103,9 @@ void __init io_delay_init(void)
3644 -
3645 - static int __init io_delay_param(char *s)
3646 - {
3647 -+ if (!s)
3648 -+ return -EINVAL;
3649 -+
3650 - if (!strcmp(s, "0x80"))
3651 - io_delay_type = CONFIG_IO_DELAY_TYPE_0X80;
3652 - else if (!strcmp(s, "0xed"))
3653 -diff --git a/arch/x86/kernel/kprobes.c b/arch/x86/kernel/kprobes.c
3654 -index b8c6743..43c019f 100644
3655 ---- a/arch/x86/kernel/kprobes.c
3656 -+++ b/arch/x86/kernel/kprobes.c
3657 -@@ -860,7 +860,6 @@ static int __kprobes post_kprobe_handler(struct pt_regs *regs)
3658 -
3659 - resume_execution(cur, regs, kcb);
3660 - regs->flags |= kcb->kprobe_saved_flags;
3661 -- trace_hardirqs_fixup_flags(regs->flags);
3662 -
3663 - if ((kcb->kprobe_status != KPROBE_REENTER) && cur->post_handler) {
3664 - kcb->kprobe_status = KPROBE_HIT_SSDONE;
3665 -diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
3666 -index ba370dc..58325a6 100644
3667 ---- a/arch/x86/kernel/process.c
3668 -+++ b/arch/x86/kernel/process.c
3669 -@@ -164,6 +164,9 @@ void __cpuinit select_idle_routine(const struct cpuinfo_x86 *c)
3670 -
3671 - static int __init idle_setup(char *str)
3672 - {
3673 -+ if (!str)
3674 -+ return -EINVAL;
3675 -+
3676 - if (!strcmp(str, "poll")) {
3677 - printk("using polling idle threads.\n");
3678 - pm_idle = poll_idle;
3679 -diff --git a/block/bsg.c b/block/bsg.c
3680 -index 54d617f..0526471 100644
3681 ---- a/block/bsg.c
3682 -+++ b/block/bsg.c
3683 -@@ -725,8 +725,13 @@ static int bsg_put_device(struct bsg_device *bd)
3684 - mutex_lock(&bsg_mutex);
3685 -
3686 - do_free = atomic_dec_and_test(&bd->ref_count);
3687 -- if (!do_free)
3688 -+ if (!do_free) {
3689 -+ mutex_unlock(&bsg_mutex);
3690 - goto out;
3691 -+ }
3692 -+
3693 -+ hlist_del(&bd->dev_list);
3694 -+ mutex_unlock(&bsg_mutex);
3695 -
3696 - dprintk("%s: tearing down\n", bd->name);
3697 -
3698 -@@ -742,10 +747,8 @@ static int bsg_put_device(struct bsg_device *bd)
3699 - */
3700 - ret = bsg_complete_all_commands(bd);
3701 -
3702 -- hlist_del(&bd->dev_list);
3703 - kfree(bd);
3704 - out:
3705 -- mutex_unlock(&bsg_mutex);
3706 - kref_put(&q->bsg_dev.ref, bsg_kref_release_function);
3707 - if (do_free)
3708 - blk_put_queue(q);
3709 -diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h
3710 -index 78eb784..7828ef2 100644
3711 ---- a/drivers/input/serio/i8042-x86ia64io.h
3712 -+++ b/drivers/input/serio/i8042-x86ia64io.h
3713 -@@ -63,7 +63,7 @@ static inline void i8042_write_command(int val)
3714 - outb(val, I8042_COMMAND_REG);
3715 - }
3716 -
3717 --#if defined(__i386__) || defined(__x86_64__)
3718 -+#ifdef CONFIG_X86
3719 -
3720 - #include <linux/dmi.h>
3721 -
3722 -@@ -291,17 +291,36 @@ static struct dmi_system_id __initdata i8042_dmi_nomux_table[] = {
3723 - DMI_MATCH(DMI_PRODUCT_VERSION, "3000 N100"),
3724 - },
3725 - },
3726 -+ {
3727 -+ .ident = "Acer Aspire 1360",
3728 -+ .matches = {
3729 -+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
3730 -+ DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 1360"),
3731 -+ },
3732 -+ },
3733 - { }
3734 - };
3735 -
3736 --
3737 --
3738 -+#ifdef CONFIG_PNP
3739 -+static struct dmi_system_id __initdata i8042_dmi_nopnp_table[] = {
3740 -+ {
3741 -+ .ident = "Intel MBO Desktop D845PESV",
3742 -+ .matches = {
3743 -+ DMI_MATCH(DMI_BOARD_NAME, "D845PESV"),
3744 -+ DMI_MATCH(DMI_BOARD_VENDOR, "Intel Corporation"),
3745 -+ },
3746 -+ },
3747 -+ {
3748 -+ .ident = "Gericom Bellagio",
3749 -+ .matches = {
3750 -+ DMI_MATCH(DMI_SYS_VENDOR, "Gericom"),
3751 -+ DMI_MATCH(DMI_PRODUCT_NAME, "N34AS6"),
3752 -+ },
3753 -+ },
3754 -+ { }
3755 -+};
3756 - #endif
3757 -
3758 --#ifdef CONFIG_X86
3759 --
3760 --#include <linux/dmi.h>
3761 --
3762 - /*
3763 - * Some Wistron based laptops need us to explicitly enable the 'Dritek
3764 - * keyboard extension' to make their extra keys start generating scancodes.
3765 -@@ -356,7 +375,6 @@ static struct dmi_system_id __initdata i8042_dmi_dritek_table[] = {
3766 -
3767 - #endif /* CONFIG_X86 */
3768 -
3769 --
3770 - #ifdef CONFIG_PNP
3771 - #include <linux/pnp.h>
3772 -
3773 -@@ -466,6 +484,11 @@ static int __init i8042_pnp_init(void)
3774 - int pnp_data_busted = 0;
3775 - int err;
3776 -
3777 -+#ifdef CONFIG_X86
3778 -+ if (dmi_check_system(i8042_dmi_nopnp_table))
3779 -+ i8042_nopnp = 1;
3780 -+#endif
3781 -+
3782 - if (i8042_nopnp) {
3783 - printk(KERN_INFO "i8042: PNP detection disabled\n");
3784 - return 0;
3785 -@@ -591,15 +614,13 @@ static int __init i8042_platform_init(void)
3786 - i8042_reset = 1;
3787 - #endif
3788 -
3789 --#if defined(__i386__) || defined(__x86_64__)
3790 -+#ifdef CONFIG_X86
3791 - if (dmi_check_system(i8042_dmi_noloop_table))
3792 - i8042_noloop = 1;
3793 -
3794 - if (dmi_check_system(i8042_dmi_nomux_table))
3795 - i8042_nomux = 1;
3796 --#endif
3797 -
3798 --#ifdef CONFIG_X86
3799 - if (dmi_check_system(i8042_dmi_dritek_table))
3800 - i8042_dritek = 1;
3801 - #endif /* CONFIG_X86 */
3802 -diff --git a/drivers/md/linear.c b/drivers/md/linear.c
3803 -index 1074824..ec921f5 100644
3804 ---- a/drivers/md/linear.c
3805 -+++ b/drivers/md/linear.c
3806 -@@ -126,7 +126,7 @@ static linear_conf_t *linear_conf(mddev_t *mddev, int raid_disks)
3807 - int j = rdev->raid_disk;
3808 - dev_info_t *disk = conf->disks + j;
3809 -
3810 -- if (j < 0 || j > raid_disks || disk->rdev) {
3811 -+ if (j < 0 || j >= raid_disks || disk->rdev) {
3812 - printk("linear: disk numbering problem. Aborting!\n");
3813 - goto out;
3814 - }
3815 -diff --git a/drivers/md/md.c b/drivers/md/md.c
3816 -index 2580ac1..9664511 100644
3817 ---- a/drivers/md/md.c
3818 -+++ b/drivers/md/md.c
3819 -@@ -3326,9 +3326,9 @@ static struct kobject *md_probe(dev_t dev, int *part, void *data)
3820 - disk->queue = mddev->queue;
3821 - add_disk(disk);
3822 - mddev->gendisk = disk;
3823 -- mutex_unlock(&disks_mutex);
3824 - error = kobject_init_and_add(&mddev->kobj, &md_ktype, &disk->dev.kobj,
3825 - "%s", "md");
3826 -+ mutex_unlock(&disks_mutex);
3827 - if (error)
3828 - printk(KERN_WARNING "md: cannot register %s/md - name in use\n",
3829 - disk->disk_name);
3830 -diff --git a/drivers/net/wireless/ath5k/base.c b/drivers/net/wireless/ath5k/base.c
3831 -index e57905c..bc3ea09 100644
3832 ---- a/drivers/net/wireless/ath5k/base.c
3833 -+++ b/drivers/net/wireless/ath5k/base.c
3834 -@@ -1774,20 +1774,21 @@ ath5k_tasklet_rx(unsigned long data)
3835 - struct ath5k_rx_status rs = {};
3836 - struct sk_buff *skb;
3837 - struct ath5k_softc *sc = (void *)data;
3838 -- struct ath5k_buf *bf;
3839 -+ struct ath5k_buf *bf, *bf_last;
3840 - struct ath5k_desc *ds;
3841 - int ret;
3842 - int hdrlen;
3843 - int pad;
3844 -
3845 - spin_lock(&sc->rxbuflock);
3846 -+ if (list_empty(&sc->rxbuf)) {
3847 -+ ATH5K_WARN(sc, "empty rx buf pool\n");
3848 -+ goto unlock;
3849 -+ }
3850 -+ bf_last = list_entry(sc->rxbuf.prev, struct ath5k_buf, list);
3851 - do {
3852 - rxs.flag = 0;
3853 -
3854 -- if (unlikely(list_empty(&sc->rxbuf))) {
3855 -- ATH5K_WARN(sc, "empty rx buf pool\n");
3856 -- break;
3857 -- }
3858 - bf = list_first_entry(&sc->rxbuf, struct ath5k_buf, list);
3859 - BUG_ON(bf->skb == NULL);
3860 - skb = bf->skb;
3861 -@@ -1797,8 +1798,24 @@ ath5k_tasklet_rx(unsigned long data)
3862 - pci_dma_sync_single_for_cpu(sc->pdev, sc->desc_daddr,
3863 - sc->desc_len, PCI_DMA_FROMDEVICE);
3864 -
3865 -- if (unlikely(ds->ds_link == bf->daddr)) /* this is the end */
3866 -- break;
3867 -+ /*
3868 -+ * last buffer must not be freed to ensure proper hardware
3869 -+ * function. When the hardware finishes also a packet next to
3870 -+ * it, we are sure, it doesn't use it anymore and we can go on.
3871 -+ */
3872 -+ if (bf_last == bf)
3873 -+ bf->flags |= 1;
3874 -+ if (bf->flags) {
3875 -+ struct ath5k_buf *bf_next = list_entry(bf->list.next,
3876 -+ struct ath5k_buf, list);
3877 -+ ret = sc->ah->ah_proc_rx_desc(sc->ah, bf_next->desc,
3878 -+ &rs);
3879 -+ if (ret)
3880 -+ break;
3881 -+ bf->flags &= ~1;
3882 -+ /* skip the overwritten one (even status is martian) */
3883 -+ goto next;
3884 -+ }
3885 -
3886 - ret = sc->ah->ah_proc_rx_desc(sc->ah, ds, &rs);
3887 - if (unlikely(ret == -EINPROGRESS))
3888 -@@ -1921,6 +1938,7 @@ accept:
3889 - next:
3890 - list_move_tail(&bf->list, &sc->rxbuf);
3891 - } while (ath5k_rxbuf_setup(sc, bf) == 0);
3892 -+unlock:
3893 - spin_unlock(&sc->rxbuflock);
3894 - }
3895 -
3896 -@@ -2435,6 +2453,9 @@ ath5k_stop_hw(struct ath5k_softc *sc)
3897 - mutex_unlock(&sc->lock);
3898 -
3899 - del_timer_sync(&sc->calib_tim);
3900 -+ tasklet_kill(&sc->rxtq);
3901 -+ tasklet_kill(&sc->txtq);
3902 -+ tasklet_kill(&sc->restq);
3903 -
3904 - return ret;
3905 - }
3906 -diff --git a/drivers/net/wireless/ath5k/base.h b/drivers/net/wireless/ath5k/base.h
3907 -index 3a97558..4badca7 100644
3908 ---- a/drivers/net/wireless/ath5k/base.h
3909 -+++ b/drivers/net/wireless/ath5k/base.h
3910 -@@ -55,7 +55,7 @@
3911 -
3912 - struct ath5k_buf {
3913 - struct list_head list;
3914 -- unsigned int flags; /* tx descriptor flags */
3915 -+ unsigned int flags; /* rx descriptor flags */
3916 - struct ath5k_desc *desc; /* virtual addr of desc */
3917 - dma_addr_t daddr; /* physical addr of desc */
3918 - struct sk_buff *skb; /* skbuff for buf */
3919 -diff --git a/drivers/scsi/ch.c b/drivers/scsi/ch.c
3920 -index c4b938b..2be2da6 100644
3921 ---- a/drivers/scsi/ch.c
3922 -+++ b/drivers/scsi/ch.c
3923 -@@ -926,6 +926,7 @@ static int ch_probe(struct device *dev)
3924 - if (init)
3925 - ch_init_elem(ch);
3926 -
3927 -+ dev_set_drvdata(dev, ch);
3928 - sdev_printk(KERN_INFO, sd, "Attached scsi changer %s\n", ch->name);
3929 -
3930 - return 0;
3931 -diff --git a/fs/jbd/transaction.c b/fs/jbd/transaction.c
3932 -index 67ff202..8dee320 100644
3933 ---- a/fs/jbd/transaction.c
3934 -+++ b/fs/jbd/transaction.c
3935 -@@ -1648,12 +1648,42 @@ out:
3936 - return;
3937 - }
3938 -
3939 -+/*
3940 -+ * journal_try_to_free_buffers() could race with journal_commit_transaction()
3941 -+ * The latter might still hold the a count on buffers when inspecting
3942 -+ * them on t_syncdata_list or t_locked_list.
3943 -+ *
3944 -+ * journal_try_to_free_buffers() will call this function to
3945 -+ * wait for the current transaction to finish syncing data buffers, before
3946 -+ * tryinf to free that buffer.
3947 -+ *
3948 -+ * Called with journal->j_state_lock held.
3949 -+ */
3950 -+static void journal_wait_for_transaction_sync_data(journal_t *journal)
3951 -+{
3952 -+ transaction_t *transaction = NULL;
3953 -+ tid_t tid;
3954 -+
3955 -+ spin_lock(&journal->j_state_lock);
3956 -+ transaction = journal->j_committing_transaction;
3957 -+
3958 -+ if (!transaction) {
3959 -+ spin_unlock(&journal->j_state_lock);
3960 -+ return;
3961 -+ }
3962 -+
3963 -+ tid = transaction->t_tid;
3964 -+ spin_unlock(&journal->j_state_lock);
3965 -+ log_wait_commit(journal, tid);
3966 -+}
3967 -
3968 - /**
3969 - * int journal_try_to_free_buffers() - try to free page buffers.
3970 - * @journal: journal for operation
3971 - * @page: to try and free
3972 -- * @unused_gfp_mask: unused
3973 -+ * @gfp_mask: we use the mask to detect how hard should we try to release
3974 -+ * buffers. If __GFP_WAIT and __GFP_FS is set, we wait for commit code to
3975 -+ * release the buffers.
3976 - *
3977 - *
3978 - * For all the buffers on this page,
3979 -@@ -1682,9 +1712,11 @@ out:
3980 - * journal_try_to_free_buffer() is changing its state. But that
3981 - * cannot happen because we never reallocate freed data as metadata
3982 - * while the data is part of a transaction. Yes?
3983 -+ *
3984 -+ * Return 0 on failure, 1 on success
3985 - */
3986 - int journal_try_to_free_buffers(journal_t *journal,
3987 -- struct page *page, gfp_t unused_gfp_mask)
3988 -+ struct page *page, gfp_t gfp_mask)
3989 - {
3990 - struct buffer_head *head;
3991 - struct buffer_head *bh;
3992 -@@ -1713,7 +1745,28 @@ int journal_try_to_free_buffers(journal_t *journal,
3993 - if (buffer_jbd(bh))
3994 - goto busy;
3995 - } while ((bh = bh->b_this_page) != head);
3996 -+
3997 - ret = try_to_free_buffers(page);
3998 -+
3999 -+ /*
4000 -+ * There are a number of places where journal_try_to_free_buffers()
4001 -+ * could race with journal_commit_transaction(), the later still
4002 -+ * holds the reference to the buffers to free while processing them.
4003 -+ * try_to_free_buffers() failed to free those buffers. Some of the
4004 -+ * caller of releasepage() request page buffers to be dropped, otherwise
4005 -+ * treat the fail-to-free as errors (such as generic_file_direct_IO())
4006 -+ *
4007 -+ * So, if the caller of try_to_release_page() wants the synchronous
4008 -+ * behaviour(i.e make sure buffers are dropped upon return),
4009 -+ * let's wait for the current transaction to finish flush of
4010 -+ * dirty data buffers, then try to free those buffers again,
4011 -+ * with the journal locked.
4012 -+ */
4013 -+ if (ret == 0 && (gfp_mask & __GFP_WAIT) && (gfp_mask & __GFP_FS)) {
4014 -+ journal_wait_for_transaction_sync_data(journal);
4015 -+ ret = try_to_free_buffers(page);
4016 -+ }
4017 -+
4018 - busy:
4019 - return ret;
4020 - }
4021 -diff --git a/fs/namei.c b/fs/namei.c
4022 -index 01e67dd..3b26a24 100644
4023 ---- a/fs/namei.c
4024 -+++ b/fs/namei.c
4025 -@@ -519,7 +519,14 @@ static struct dentry * real_lookup(struct dentry * parent, struct qstr * name, s
4026 - */
4027 - result = d_lookup(parent, name);
4028 - if (!result) {
4029 -- struct dentry * dentry = d_alloc(parent, name);
4030 -+ struct dentry *dentry;
4031 -+
4032 -+ /* Don't create child dentry for a dead directory. */
4033 -+ result = ERR_PTR(-ENOENT);
4034 -+ if (IS_DEADDIR(dir))
4035 -+ goto out_unlock;
4036 -+
4037 -+ dentry = d_alloc(parent, name);
4038 - result = ERR_PTR(-ENOMEM);
4039 - if (dentry) {
4040 - result = dir->i_op->lookup(dir, dentry, nd);
4041 -@@ -528,6 +535,7 @@ static struct dentry * real_lookup(struct dentry * parent, struct qstr * name, s
4042 - else
4043 - result = dentry;
4044 - }
4045 -+out_unlock:
4046 - mutex_unlock(&dir->i_mutex);
4047 - return result;
4048 - }
4049 -@@ -1317,7 +1325,14 @@ static struct dentry *__lookup_hash(struct qstr *name,
4050 -
4051 - dentry = cached_lookup(base, name, nd);
4052 - if (!dentry) {
4053 -- struct dentry *new = d_alloc(base, name);
4054 -+ struct dentry *new;
4055 -+
4056 -+ /* Don't create child dentry for a dead directory. */
4057 -+ dentry = ERR_PTR(-ENOENT);
4058 -+ if (IS_DEADDIR(inode))
4059 -+ goto out;
4060 -+
4061 -+ new = d_alloc(base, name);
4062 - dentry = ERR_PTR(-ENOMEM);
4063 - if (!new)
4064 - goto out;
4065 -diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
4066 -index 596c5d8..1d7ac64 100644
4067 ---- a/fs/nfs/inode.c
4068 -+++ b/fs/nfs/inode.c
4069 -@@ -57,8 +57,6 @@ static int enable_ino64 = NFS_64_BIT_INODE_NUMBERS_ENABLED;
4070 - static void nfs_invalidate_inode(struct inode *);
4071 - static int nfs_update_inode(struct inode *, struct nfs_fattr *);
4072 -
4073 --static void nfs_zap_acl_cache(struct inode *);
4074 --
4075 - static struct kmem_cache * nfs_inode_cachep;
4076 -
4077 - static inline unsigned long
4078 -@@ -167,7 +165,7 @@ void nfs_zap_mapping(struct inode *inode, struct address_space *mapping)
4079 - }
4080 - }
4081 -
4082 --static void nfs_zap_acl_cache(struct inode *inode)
4083 -+void nfs_zap_acl_cache(struct inode *inode)
4084 - {
4085 - void (*clear_acl_cache)(struct inode *);
4086 -
4087 -diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h
4088 -index 04ae867..24241fc 100644
4089 ---- a/fs/nfs/internal.h
4090 -+++ b/fs/nfs/internal.h
4091 -@@ -150,6 +150,7 @@ extern void nfs_clear_inode(struct inode *);
4092 - #ifdef CONFIG_NFS_V4
4093 - extern void nfs4_clear_inode(struct inode *);
4094 - #endif
4095 -+void nfs_zap_acl_cache(struct inode *inode);
4096 -
4097 - /* super.c */
4098 - extern struct file_system_type nfs_xdev_fs_type;
4099 -diff --git a/fs/nfs/nfs3acl.c b/fs/nfs/nfs3acl.c
4100 -index 9b73625..423842f 100644
4101 ---- a/fs/nfs/nfs3acl.c
4102 -+++ b/fs/nfs/nfs3acl.c
4103 -@@ -5,6 +5,8 @@
4104 - #include <linux/posix_acl_xattr.h>
4105 - #include <linux/nfsacl.h>
4106 -
4107 -+#include "internal.h"
4108 -+
4109 - #define NFSDBG_FACILITY NFSDBG_PROC
4110 -
4111 - ssize_t nfs3_listxattr(struct dentry *dentry, char *buffer, size_t size)
4112 -@@ -205,6 +207,8 @@ struct posix_acl *nfs3_proc_getacl(struct inode *inode, int type)
4113 - status = nfs_revalidate_inode(server, inode);
4114 - if (status < 0)
4115 - return ERR_PTR(status);
4116 -+ if (NFS_I(inode)->cache_validity & NFS_INO_INVALID_ACL)
4117 -+ nfs_zap_acl_cache(inode);
4118 - acl = nfs3_get_cached_acl(inode, type);
4119 - if (acl != ERR_PTR(-EAGAIN))
4120 - return acl;
4121 -@@ -319,9 +323,8 @@ static int nfs3_proc_setacls(struct inode *inode, struct posix_acl *acl,
4122 - dprintk("NFS call setacl\n");
4123 - msg.rpc_proc = &server->client_acl->cl_procinfo[ACLPROC3_SETACL];
4124 - status = rpc_call_sync(server->client_acl, &msg, 0);
4125 -- spin_lock(&inode->i_lock);
4126 -- NFS_I(inode)->cache_validity |= NFS_INO_INVALID_ACCESS;
4127 -- spin_unlock(&inode->i_lock);
4128 -+ nfs_access_zap_cache(inode);
4129 -+ nfs_zap_acl_cache(inode);
4130 - dprintk("NFS reply setacl: %d\n", status);
4131 -
4132 - /* pages may have been allocated at the xdr layer. */
4133 -diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
4134 -index 1293e0a..806d17f 100644
4135 ---- a/fs/nfs/nfs4proc.c
4136 -+++ b/fs/nfs/nfs4proc.c
4137 -@@ -2706,6 +2706,8 @@ static ssize_t nfs4_proc_get_acl(struct inode *inode, void *buf, size_t buflen)
4138 - ret = nfs_revalidate_inode(server, inode);
4139 - if (ret < 0)
4140 - return ret;
4141 -+ if (NFS_I(inode)->cache_validity & NFS_INO_INVALID_ACL)
4142 -+ nfs_zap_acl_cache(inode);
4143 - ret = nfs4_read_cached_acl(inode, buf, buflen);
4144 - if (ret != -ENOENT)
4145 - return ret;
4146 -@@ -2733,7 +2735,8 @@ static int __nfs4_proc_set_acl(struct inode *inode, const void *buf, size_t bufl
4147 - nfs_inode_return_delegation(inode);
4148 - buf_to_pages(buf, buflen, arg.acl_pages, &arg.acl_pgbase);
4149 - ret = rpc_call_sync(NFS_CLIENT(inode), &msg, 0);
4150 -- nfs_zap_caches(inode);
4151 -+ nfs_access_zap_cache(inode);
4152 -+ nfs_zap_acl_cache(inode);
4153 - return ret;
4154 - }
4155 -
4156 -diff --git a/fs/romfs/inode.c b/fs/romfs/inode.c
4157 -index 3f13d49..35e5c6e 100644
4158 ---- a/fs/romfs/inode.c
4159 -+++ b/fs/romfs/inode.c
4160 -@@ -418,7 +418,8 @@ static int
4161 - romfs_readpage(struct file *file, struct page * page)
4162 - {
4163 - struct inode *inode = page->mapping->host;
4164 -- loff_t offset, avail, readlen;
4165 -+ loff_t offset, size;
4166 -+ unsigned long filled;
4167 - void *buf;
4168 - int result = -EIO;
4169 -
4170 -@@ -430,21 +431,29 @@ romfs_readpage(struct file *file, struct page * page)
4171 -
4172 - /* 32 bit warning -- but not for us :) */
4173 - offset = page_offset(page);
4174 -- if (offset < i_size_read(inode)) {
4175 -- avail = inode->i_size-offset;
4176 -- readlen = min_t(unsigned long, avail, PAGE_SIZE);
4177 -- if (romfs_copyfrom(inode, buf, ROMFS_I(inode)->i_dataoffset+offset, readlen) == readlen) {
4178 -- if (readlen < PAGE_SIZE) {
4179 -- memset(buf + readlen,0,PAGE_SIZE-readlen);
4180 -- }
4181 -- SetPageUptodate(page);
4182 -- result = 0;
4183 -+ size = i_size_read(inode);
4184 -+ filled = 0;
4185 -+ result = 0;
4186 -+ if (offset < size) {
4187 -+ unsigned long readlen;
4188 -+
4189 -+ size -= offset;
4190 -+ readlen = size > PAGE_SIZE ? PAGE_SIZE : size;
4191 -+
4192 -+ filled = romfs_copyfrom(inode, buf, ROMFS_I(inode)->i_dataoffset+offset, readlen);
4193 -+
4194 -+ if (filled != readlen) {
4195 -+ SetPageError(page);
4196 -+ filled = 0;
4197 -+ result = -EIO;
4198 - }
4199 - }
4200 -- if (result) {
4201 -- memset(buf, 0, PAGE_SIZE);
4202 -- SetPageError(page);
4203 -- }
4204 -+
4205 -+ if (filled < PAGE_SIZE)
4206 -+ memset(buf + filled, 0, PAGE_SIZE-filled);
4207 -+
4208 -+ if (!result)
4209 -+ SetPageUptodate(page);
4210 - flush_dcache_page(page);
4211 -
4212 - unlock_page(page);
4213 -diff --git a/include/sound/emu10k1.h b/include/sound/emu10k1.h
4214 -index 7b7b9b1..10ee28e 100644
4215 ---- a/include/sound/emu10k1.h
4216 -+++ b/include/sound/emu10k1.h
4217 -@@ -1670,6 +1670,7 @@ struct snd_emu_chip_details {
4218 - unsigned char spi_dac; /* SPI interface for DAC */
4219 - unsigned char i2c_adc; /* I2C interface for ADC */
4220 - unsigned char adc_1361t; /* Use Philips 1361T ADC */
4221 -+ unsigned char invert_shared_spdif; /* analog/digital switch inverted */
4222 - const char *driver;
4223 - const char *name;
4224 - const char *id; /* for backward compatibility - can be NULL if not needed */
4225 -diff --git a/mm/filemap.c b/mm/filemap.c
4226 -index 4f32423..afb991a 100644
4227 ---- a/mm/filemap.c
4228 -+++ b/mm/filemap.c
4229 -@@ -2581,9 +2581,8 @@ out:
4230 - * Otherwise return zero.
4231 - *
4232 - * The @gfp_mask argument specifies whether I/O may be performed to release
4233 -- * this page (__GFP_IO), and whether the call may block (__GFP_WAIT).
4234 -+ * this page (__GFP_IO), and whether the call may block (__GFP_WAIT & __GFP_FS).
4235 - *
4236 -- * NOTE: @gfp_mask may go away, and this function may become non-blocking.
4237 - */
4238 - int try_to_release_page(struct page *page, gfp_t gfp_mask)
4239 - {
4240 -diff --git a/net/bluetooth/bnep/core.c b/net/bluetooth/bnep/core.c
4241 -index f85d946..24e91eb 100644
4242 ---- a/net/bluetooth/bnep/core.c
4243 -+++ b/net/bluetooth/bnep/core.c
4244 -@@ -507,6 +507,11 @@ static int bnep_session(void *arg)
4245 - /* Delete network device */
4246 - unregister_netdev(dev);
4247 -
4248 -+ /* Wakeup user-space polling for socket errors */
4249 -+ s->sock->sk->sk_err = EUNATCH;
4250 -+
4251 -+ wake_up_interruptible(s->sock->sk->sk_sleep);
4252 -+
4253 - /* Release the socket */
4254 - fput(s->sock->file);
4255 -
4256 -diff --git a/net/bluetooth/hidp/core.c b/net/bluetooth/hidp/core.c
4257 -index 519cdb9..96434d7 100644
4258 ---- a/net/bluetooth/hidp/core.c
4259 -+++ b/net/bluetooth/hidp/core.c
4260 -@@ -581,6 +581,12 @@ static int hidp_session(void *arg)
4261 - hid_free_device(session->hid);
4262 - }
4263 -
4264 -+ /* Wakeup user-space polling for socket errors */
4265 -+ session->intr_sock->sk->sk_err = EUNATCH;
4266 -+ session->ctrl_sock->sk->sk_err = EUNATCH;
4267 -+
4268 -+ hidp_schedule(session);
4269 -+
4270 - fput(session->intr_sock->file);
4271 -
4272 - wait_event_timeout(*(ctrl_sk->sk_sleep),
4273 -@@ -879,6 +885,10 @@ int hidp_del_connection(struct hidp_conndel_req *req)
4274 - skb_queue_purge(&session->ctrl_transmit);
4275 - skb_queue_purge(&session->intr_transmit);
4276 -
4277 -+ /* Wakeup user-space polling for socket errors */
4278 -+ session->intr_sock->sk->sk_err = EUNATCH;
4279 -+ session->ctrl_sock->sk->sk_err = EUNATCH;
4280 -+
4281 - /* Kill session thread */
4282 - atomic_inc(&session->terminate);
4283 - hidp_schedule(session);
4284 -diff --git a/net/ipv4/netfilter/nf_nat_sip.c b/net/ipv4/netfilter/nf_nat_sip.c
4285 -index 4334d5c..1454432 100644
4286 ---- a/net/ipv4/netfilter/nf_nat_sip.c
4287 -+++ b/net/ipv4/netfilter/nf_nat_sip.c
4288 -@@ -318,11 +318,11 @@ static int mangle_content_len(struct sk_buff *skb,
4289 - buffer, buflen);
4290 - }
4291 -
4292 --static unsigned mangle_sdp_packet(struct sk_buff *skb, const char **dptr,
4293 -- unsigned int dataoff, unsigned int *datalen,
4294 -- enum sdp_header_types type,
4295 -- enum sdp_header_types term,
4296 -- char *buffer, int buflen)
4297 -+static int mangle_sdp_packet(struct sk_buff *skb, const char **dptr,
4298 -+ unsigned int dataoff, unsigned int *datalen,
4299 -+ enum sdp_header_types type,
4300 -+ enum sdp_header_types term,
4301 -+ char *buffer, int buflen)
4302 - {
4303 - enum ip_conntrack_info ctinfo;
4304 - struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
4305 -@@ -330,9 +330,9 @@ static unsigned mangle_sdp_packet(struct sk_buff *skb, const char **dptr,
4306 -
4307 - if (ct_sip_get_sdp_header(ct, *dptr, dataoff, *datalen, type, term,
4308 - &matchoff, &matchlen) <= 0)
4309 -- return 0;
4310 -+ return -ENOENT;
4311 - return mangle_packet(skb, dptr, datalen, matchoff, matchlen,
4312 -- buffer, buflen);
4313 -+ buffer, buflen) ? 0 : -EINVAL;
4314 - }
4315 -
4316 - static unsigned int ip_nat_sdp_addr(struct sk_buff *skb, const char **dptr,
4317 -@@ -346,8 +346,8 @@ static unsigned int ip_nat_sdp_addr(struct sk_buff *skb, const char **dptr,
4318 - unsigned int buflen;
4319 -
4320 - buflen = sprintf(buffer, NIPQUAD_FMT, NIPQUAD(addr->ip));
4321 -- if (!mangle_sdp_packet(skb, dptr, dataoff, datalen, type, term,
4322 -- buffer, buflen))
4323 -+ if (mangle_sdp_packet(skb, dptr, dataoff, datalen, type, term,
4324 -+ buffer, buflen))
4325 - return 0;
4326 -
4327 - return mangle_content_len(skb, dptr, datalen);
4328 -@@ -381,15 +381,27 @@ static unsigned int ip_nat_sdp_session(struct sk_buff *skb, const char **dptr,
4329 -
4330 - /* Mangle session description owner and contact addresses */
4331 - buflen = sprintf(buffer, "%u.%u.%u.%u", NIPQUAD(addr->ip));
4332 -- if (!mangle_sdp_packet(skb, dptr, dataoff, datalen,
4333 -+ if (mangle_sdp_packet(skb, dptr, dataoff, datalen,
4334 - SDP_HDR_OWNER_IP4, SDP_HDR_MEDIA,
4335 - buffer, buflen))
4336 - return 0;
4337 -
4338 -- if (!mangle_sdp_packet(skb, dptr, dataoff, datalen,
4339 -- SDP_HDR_CONNECTION_IP4, SDP_HDR_MEDIA,
4340 -- buffer, buflen))
4341 -+ switch (mangle_sdp_packet(skb, dptr, dataoff, datalen,
4342 -+ SDP_HDR_CONNECTION_IP4, SDP_HDR_MEDIA,
4343 -+ buffer, buflen)) {
4344 -+ case 0:
4345 -+ /*
4346 -+ * RFC 2327:
4347 -+ *
4348 -+ * Session description
4349 -+ *
4350 -+ * c=* (connection information - not required if included in all media)
4351 -+ */
4352 -+ case -ENOENT:
4353 -+ break;
4354 -+ default:
4355 - return 0;
4356 -+ }
4357 -
4358 - return mangle_content_len(skb, dptr, datalen);
4359 - }
4360 -diff --git a/net/netfilter/xt_time.c b/net/netfilter/xt_time.c
4361 -index ed76baa..9f32859 100644
4362 ---- a/net/netfilter/xt_time.c
4363 -+++ b/net/netfilter/xt_time.c
4364 -@@ -173,7 +173,7 @@ time_mt(const struct sk_buff *skb, const struct net_device *in,
4365 - __net_timestamp((struct sk_buff *)skb);
4366 -
4367 - stamp = ktime_to_ns(skb->tstamp);
4368 -- do_div(stamp, NSEC_PER_SEC);
4369 -+ stamp = div_s64(stamp, NSEC_PER_SEC);
4370 -
4371 - if (info->flags & XT_TIME_LOCAL_TZ)
4372 - /* Adjust for local timezone */
4373 -diff --git a/sound/core/seq/oss/seq_oss_synth.c b/sound/core/seq/oss/seq_oss_synth.c
4374 -index 558dadb..e024e45 100644
4375 ---- a/sound/core/seq/oss/seq_oss_synth.c
4376 -+++ b/sound/core/seq/oss/seq_oss_synth.c
4377 -@@ -604,6 +604,9 @@ snd_seq_oss_synth_make_info(struct seq_oss_devinfo *dp, int dev, struct synth_in
4378 - {
4379 - struct seq_oss_synth *rec;
4380 -
4381 -+ if (dev < 0 || dev >= dp->max_synthdev)
4382 -+ return -ENXIO;
4383 -+
4384 - if (dp->synths[dev].is_midi) {
4385 - struct midi_info minf;
4386 - snd_seq_oss_midi_make_info(dp, dp->synths[dev].midi_mapped, &minf);
4387 -diff --git a/sound/pci/emu10k1/emu10k1_main.c b/sound/pci/emu10k1/emu10k1_main.c
4388 -index 548c9cc..2f283ea 100644
4389 ---- a/sound/pci/emu10k1/emu10k1_main.c
4390 -+++ b/sound/pci/emu10k1/emu10k1_main.c
4391 -@@ -1528,6 +1528,7 @@ static struct snd_emu_chip_details emu_chip_details[] = {
4392 - .ca0151_chip = 1,
4393 - .spk71 = 1,
4394 - .spdif_bug = 1,
4395 -+ .invert_shared_spdif = 1, /* digital/analog switch swapped */
4396 - .adc_1361t = 1, /* 24 bit capture instead of 16bit. Fixes ALSA bug#324 */
4397 - .ac97_chip = 1} ,
4398 - {.vendor = 0x1102, .device = 0x0004, .revision = 0x04,
4399 -diff --git a/sound/pci/emu10k1/emumixer.c b/sound/pci/emu10k1/emumixer.c
4400 -index fd22120..9f77692 100644
4401 ---- a/sound/pci/emu10k1/emumixer.c
4402 -+++ b/sound/pci/emu10k1/emumixer.c
4403 -@@ -1578,6 +1578,10 @@ static int snd_emu10k1_shared_spdif_get(struct snd_kcontrol *kcontrol,
4404 - ucontrol->value.integer.value[0] = inl(emu->port + A_IOCFG) & A_IOCFG_GPOUT0 ? 1 : 0;
4405 - else
4406 - ucontrol->value.integer.value[0] = inl(emu->port + HCFG) & HCFG_GPOUT0 ? 1 : 0;
4407 -+ if (emu->card_capabilities->invert_shared_spdif)
4408 -+ ucontrol->value.integer.value[0] =
4409 -+ !ucontrol->value.integer.value[0];
4410 -+
4411 - return 0;
4412 - }
4413 -
4414 -@@ -1586,15 +1590,18 @@ static int snd_emu10k1_shared_spdif_put(struct snd_kcontrol *kcontrol,
4415 - {
4416 - unsigned long flags;
4417 - struct snd_emu10k1 *emu = snd_kcontrol_chip(kcontrol);
4418 -- unsigned int reg, val;
4419 -+ unsigned int reg, val, sw;
4420 - int change = 0;
4421 -
4422 -+ sw = ucontrol->value.integer.value[0];
4423 -+ if (emu->card_capabilities->invert_shared_spdif)
4424 -+ sw = !sw;
4425 - spin_lock_irqsave(&emu->reg_lock, flags);
4426 - if ( emu->card_capabilities->i2c_adc) {
4427 - /* Do nothing for Audigy 2 ZS Notebook */
4428 - } else if (emu->audigy) {
4429 - reg = inl(emu->port + A_IOCFG);
4430 -- val = ucontrol->value.integer.value[0] ? A_IOCFG_GPOUT0 : 0;
4431 -+ val = sw ? A_IOCFG_GPOUT0 : 0;
4432 - change = (reg & A_IOCFG_GPOUT0) != val;
4433 - if (change) {
4434 - reg &= ~A_IOCFG_GPOUT0;
4435 -@@ -1603,7 +1610,7 @@ static int snd_emu10k1_shared_spdif_put(struct snd_kcontrol *kcontrol,
4436 - }
4437 - }
4438 - reg = inl(emu->port + HCFG);
4439 -- val = ucontrol->value.integer.value[0] ? HCFG_GPOUT0 : 0;
4440 -+ val = sw ? HCFG_GPOUT0 : 0;
4441 - change |= (reg & HCFG_GPOUT0) != val;
4442 - if (change) {
4443 - reg &= ~HCFG_GPOUT0;
4444 -diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
4445 -index b3a618e..6ba7ac0 100644
4446 ---- a/sound/pci/hda/hda_intel.c
4447 -+++ b/sound/pci/hda/hda_intel.c
4448 -@@ -285,6 +285,7 @@ struct azx_dev {
4449 - u32 *posbuf; /* position buffer pointer */
4450 -
4451 - unsigned int bufsize; /* size of the play buffer in bytes */
4452 -+ unsigned int period_bytes; /* size of the period in bytes */
4453 - unsigned int frags; /* number for period in the play buffer */
4454 - unsigned int fifo_size; /* FIFO size */
4455 -
4456 -@@ -301,11 +302,10 @@ struct azx_dev {
4457 - */
4458 - unsigned char stream_tag; /* assigned stream */
4459 - unsigned char index; /* stream index */
4460 -- /* for sanity check of position buffer */
4461 -- unsigned int period_intr;
4462 -
4463 - unsigned int opened :1;
4464 - unsigned int running :1;
4465 -+ unsigned int irq_pending: 1;
4466 - };
4467 -
4468 - /* CORB/RIRB */
4469 -@@ -369,6 +369,9 @@ struct azx {
4470 -
4471 - /* for debugging */
4472 - unsigned int last_cmd; /* last issued command (to sync) */
4473 -+
4474 -+ /* for pending irqs */
4475 -+ struct work_struct irq_pending_work;
4476 - };
4477 -
4478 - /* driver types */
4479 -@@ -908,6 +911,8 @@ static void azx_init_pci(struct azx *chip)
4480 - }
4481 -
4482 -
4483 -+static int azx_position_ok(struct azx *chip, struct azx_dev *azx_dev);
4484 -+
4485 - /*
4486 - * interrupt handler
4487 - */
4488 -@@ -930,11 +935,18 @@ static irqreturn_t azx_interrupt(int irq, void *dev_id)
4489 - azx_dev = &chip->azx_dev[i];
4490 - if (status & azx_dev->sd_int_sta_mask) {
4491 - azx_sd_writeb(azx_dev, SD_STS, SD_INT_MASK);
4492 -- if (azx_dev->substream && azx_dev->running) {
4493 -- azx_dev->period_intr++;
4494 -+ if (!azx_dev->substream || !azx_dev->running)
4495 -+ continue;
4496 -+ /* check whether this IRQ is really acceptable */
4497 -+ if (azx_position_ok(chip, azx_dev)) {
4498 -+ azx_dev->irq_pending = 0;
4499 - spin_unlock(&chip->reg_lock);
4500 - snd_pcm_period_elapsed(azx_dev->substream);
4501 - spin_lock(&chip->reg_lock);
4502 -+ } else {
4503 -+ /* bogus IRQ, process it later */
4504 -+ azx_dev->irq_pending = 1;
4505 -+ schedule_work(&chip->irq_pending_work);
4506 - }
4507 - }
4508 - }
4509 -@@ -973,6 +985,7 @@ static int azx_setup_periods(struct snd_pcm_substream *substream,
4510 - azx_sd_writel(azx_dev, SD_BDLPU, 0);
4511 -
4512 - period_bytes = snd_pcm_lib_period_bytes(substream);
4513 -+ azx_dev->period_bytes = period_bytes;
4514 - periods = azx_dev->bufsize / period_bytes;
4515 -
4516 - /* program the initial BDL entries */
4517 -@@ -1421,27 +1434,16 @@ static int azx_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
4518 - return 0;
4519 - }
4520 -
4521 --static snd_pcm_uframes_t azx_pcm_pointer(struct snd_pcm_substream *substream)
4522 -+static unsigned int azx_get_position(struct azx *chip,
4523 -+ struct azx_dev *azx_dev)
4524 - {
4525 -- struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
4526 -- struct azx *chip = apcm->chip;
4527 -- struct azx_dev *azx_dev = get_azx_dev(substream);
4528 - unsigned int pos;
4529 -
4530 - if (chip->position_fix == POS_FIX_POSBUF ||
4531 - chip->position_fix == POS_FIX_AUTO) {
4532 - /* use the position buffer */
4533 - pos = le32_to_cpu(*azx_dev->posbuf);
4534 -- if (chip->position_fix == POS_FIX_AUTO &&
4535 -- azx_dev->period_intr == 1 && !pos) {
4536 -- printk(KERN_WARNING
4537 -- "hda-intel: Invalid position buffer, "
4538 -- "using LPIB read method instead.\n");
4539 -- chip->position_fix = POS_FIX_NONE;
4540 -- goto read_lpib;
4541 -- }
4542 - } else {
4543 -- read_lpib:
4544 - /* read LPIB */
4545 - pos = azx_sd_readl(azx_dev, SD_LPIB);
4546 - if (chip->position_fix == POS_FIX_FIFO)
4547 -@@ -1449,7 +1451,90 @@ static snd_pcm_uframes_t azx_pcm_pointer(struct snd_pcm_substream *substream)
4548 - }
4549 - if (pos >= azx_dev->bufsize)
4550 - pos = 0;
4551 -- return bytes_to_frames(substream->runtime, pos);
4552 -+ return pos;
4553 -+}
4554 -+
4555 -+static snd_pcm_uframes_t azx_pcm_pointer(struct snd_pcm_substream *substream)
4556 -+{
4557 -+ struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
4558 -+ struct azx *chip = apcm->chip;
4559 -+ struct azx_dev *azx_dev = get_azx_dev(substream);
4560 -+ return bytes_to_frames(substream->runtime,
4561 -+ azx_get_position(chip, azx_dev));
4562 -+}
4563 -+
4564 -+/*
4565 -+ * Check whether the current DMA position is acceptable for updating
4566 -+ * periods. Returns non-zero if it's OK.
4567 -+ *
4568 -+ * Many HD-audio controllers appear pretty inaccurate about
4569 -+ * the update-IRQ timing. The IRQ is issued before actually the
4570 -+ * data is processed. So, we need to process it afterwords in a
4571 -+ * workqueue.
4572 -+ */
4573 -+static int azx_position_ok(struct azx *chip, struct azx_dev *azx_dev)
4574 -+{
4575 -+ unsigned int pos;
4576 -+
4577 -+ pos = azx_get_position(chip, azx_dev);
4578 -+ if (chip->position_fix == POS_FIX_AUTO) {
4579 -+ if (!pos) {
4580 -+ printk(KERN_WARNING
4581 -+ "hda-intel: Invalid position buffer, "
4582 -+ "using LPIB read method instead.\n");
4583 -+ chip->position_fix = POS_FIX_NONE;
4584 -+ pos = azx_get_position(chip, azx_dev);
4585 -+ } else
4586 -+ chip->position_fix = POS_FIX_POSBUF;
4587 -+ }
4588 -+
4589 -+ if (pos % azx_dev->period_bytes > azx_dev->period_bytes / 2)
4590 -+ return 0; /* NG - it's below the period boundary */
4591 -+ return 1; /* OK, it's fine */
4592 -+}
4593 -+
4594 -+/*
4595 -+ * The work for pending PCM period updates.
4596 -+ */
4597 -+static void azx_irq_pending_work(struct work_struct *work)
4598 -+{
4599 -+ struct azx *chip = container_of(work, struct azx, irq_pending_work);
4600 -+ int i, pending;
4601 -+
4602 -+ for (;;) {
4603 -+ pending = 0;
4604 -+ spin_lock_irq(&chip->reg_lock);
4605 -+ for (i = 0; i < chip->num_streams; i++) {
4606 -+ struct azx_dev *azx_dev = &chip->azx_dev[i];
4607 -+ if (!azx_dev->irq_pending ||
4608 -+ !azx_dev->substream ||
4609 -+ !azx_dev->running)
4610 -+ continue;
4611 -+ if (azx_position_ok(chip, azx_dev)) {
4612 -+ azx_dev->irq_pending = 0;
4613 -+ spin_unlock(&chip->reg_lock);
4614 -+ snd_pcm_period_elapsed(azx_dev->substream);
4615 -+ spin_lock(&chip->reg_lock);
4616 -+ } else
4617 -+ pending++;
4618 -+ }
4619 -+ spin_unlock_irq(&chip->reg_lock);
4620 -+ if (!pending)
4621 -+ return;
4622 -+ cond_resched();
4623 -+ }
4624 -+}
4625 -+
4626 -+/* clear irq_pending flags and assure no on-going workq */
4627 -+static void azx_clear_irq_pending(struct azx *chip)
4628 -+{
4629 -+ int i;
4630 -+
4631 -+ spin_lock_irq(&chip->reg_lock);
4632 -+ for (i = 0; i < chip->num_streams; i++)
4633 -+ chip->azx_dev[i].irq_pending = 0;
4634 -+ spin_unlock_irq(&chip->reg_lock);
4635 -+ flush_scheduled_work();
4636 - }
4637 -
4638 - static struct snd_pcm_ops azx_pcm_ops = {
4639 -@@ -1676,6 +1761,7 @@ static int azx_suspend(struct pci_dev *pci, pm_message_t state)
4640 - int i;
4641 -
4642 - snd_power_change_state(card, SNDRV_CTL_POWER_D3hot);
4643 -+ azx_clear_irq_pending(chip);
4644 - for (i = 0; i < AZX_MAX_PCMS; i++)
4645 - snd_pcm_suspend_all(chip->pcm[i]);
4646 - if (chip->initialized)
4647 -@@ -1732,6 +1818,7 @@ static int azx_free(struct azx *chip)
4648 - int i;
4649 -
4650 - if (chip->initialized) {
4651 -+ azx_clear_irq_pending(chip);
4652 - for (i = 0; i < chip->num_streams; i++)
4653 - azx_stream_stop(chip, &chip->azx_dev[i]);
4654 - azx_stop_chip(chip);
4655 -@@ -1857,6 +1944,7 @@ static int __devinit azx_create(struct snd_card *card, struct pci_dev *pci,
4656 - chip->irq = -1;
4657 - chip->driver_type = driver_type;
4658 - chip->msi = enable_msi;
4659 -+ INIT_WORK(&chip->irq_pending_work, azx_irq_pending_work);
4660 -
4661 - chip->position_fix = check_position_fix(chip, position_fix[dev]);
4662 - check_probe_mask(chip, dev);
4663 -diff --git a/sound/pci/hda/patch_analog.c b/sound/pci/hda/patch_analog.c
4664 -index a99e86d..b5f655d 100644
4665 ---- a/sound/pci/hda/patch_analog.c
4666 -+++ b/sound/pci/hda/patch_analog.c
4667 -@@ -1618,6 +1618,7 @@ static const char *ad1981_models[AD1981_MODELS] = {
4668 -
4669 - static struct snd_pci_quirk ad1981_cfg_tbl[] = {
4670 - SND_PCI_QUIRK(0x1014, 0x0597, "Lenovo Z60", AD1981_THINKPAD),
4671 -+ SND_PCI_QUIRK(0x1014, 0x05b7, "Lenovo Z60m", AD1981_THINKPAD),
4672 - /* All HP models */
4673 - SND_PCI_QUIRK(0x103c, 0, "HP nx", AD1981_HP),
4674 - SND_PCI_QUIRK(0x1179, 0x0001, "Toshiba U205", AD1981_TOSHIBA),
4675 -@@ -2623,7 +2624,7 @@ static int ad1988_auto_create_extra_out(struct hda_codec *codec, hda_nid_t pin,
4676 - {
4677 - struct ad198x_spec *spec = codec->spec;
4678 - hda_nid_t nid;
4679 -- int idx, err;
4680 -+ int i, idx, err;
4681 - char name[32];
4682 -
4683 - if (! pin)
4684 -@@ -2631,16 +2632,26 @@ static int ad1988_auto_create_extra_out(struct hda_codec *codec, hda_nid_t pin,
4685 -
4686 - idx = ad1988_pin_idx(pin);
4687 - nid = ad1988_idx_to_dac(codec, idx);
4688 -- /* specify the DAC as the extra output */
4689 -- if (! spec->multiout.hp_nid)
4690 -- spec->multiout.hp_nid = nid;
4691 -- else
4692 -- spec->multiout.extra_out_nid[0] = nid;
4693 -- /* control HP volume/switch on the output mixer amp */
4694 -- sprintf(name, "%s Playback Volume", pfx);
4695 -- if ((err = add_control(spec, AD_CTL_WIDGET_VOL, name,
4696 -- HDA_COMPOSE_AMP_VAL(nid, 3, 0, HDA_OUTPUT))) < 0)
4697 -- return err;
4698 -+ /* check whether the corresponding DAC was already taken */
4699 -+ for (i = 0; i < spec->autocfg.line_outs; i++) {
4700 -+ hda_nid_t pin = spec->autocfg.line_out_pins[i];
4701 -+ hda_nid_t dac = ad1988_idx_to_dac(codec, ad1988_pin_idx(pin));
4702 -+ if (dac == nid)
4703 -+ break;
4704 -+ }
4705 -+ if (i >= spec->autocfg.line_outs) {
4706 -+ /* specify the DAC as the extra output */
4707 -+ if (!spec->multiout.hp_nid)
4708 -+ spec->multiout.hp_nid = nid;
4709 -+ else
4710 -+ spec->multiout.extra_out_nid[0] = nid;
4711 -+ /* control HP volume/switch on the output mixer amp */
4712 -+ sprintf(name, "%s Playback Volume", pfx);
4713 -+ err = add_control(spec, AD_CTL_WIDGET_VOL, name,
4714 -+ HDA_COMPOSE_AMP_VAL(nid, 3, 0, HDA_OUTPUT));
4715 -+ if (err < 0)
4716 -+ return err;
4717 -+ }
4718 - nid = ad1988_mixer_nids[idx];
4719 - sprintf(name, "%s Playback Switch", pfx);
4720 - if ((err = add_control(spec, AD_CTL_BIND_MUTE, name,
4721
4722 Deleted: genpatches-2.6/trunk/2.6.27/1002_linux-2.6.26.3.patch
4723 ===================================================================
4724 --- genpatches-2.6/trunk/2.6.27/1002_linux-2.6.26.3.patch 2008-10-10 23:58:26 UTC (rev 1350)
4725 +++ genpatches-2.6/trunk/2.6.27/1002_linux-2.6.26.3.patch 2008-10-11 00:00:47 UTC (rev 1351)
4726 @@ -1,3210 +0,0 @@
4727 -diff --git a/arch/ia64/kvm/kvm-ia64.c b/arch/ia64/kvm/kvm-ia64.c
4728 -index 5152ba0..778de8d 100644
4729 ---- a/arch/ia64/kvm/kvm-ia64.c
4730 -+++ b/arch/ia64/kvm/kvm-ia64.c
4731 -@@ -125,9 +125,9 @@ void kvm_arch_hardware_enable(void *garbage)
4732 - PAGE_KERNEL));
4733 - local_irq_save(saved_psr);
4734 - slot = ia64_itr_entry(0x3, KVM_VMM_BASE, pte, KVM_VMM_SHIFT);
4735 -+ local_irq_restore(saved_psr);
4736 - if (slot < 0)
4737 - return;
4738 -- local_irq_restore(saved_psr);
4739 -
4740 - spin_lock(&vp_lock);
4741 - status = ia64_pal_vp_init_env(kvm_vsa_base ?
4742 -@@ -160,9 +160,9 @@ void kvm_arch_hardware_disable(void *garbage)
4743 -
4744 - local_irq_save(saved_psr);
4745 - slot = ia64_itr_entry(0x3, KVM_VMM_BASE, pte, KVM_VMM_SHIFT);
4746 -+ local_irq_restore(saved_psr);
4747 - if (slot < 0)
4748 - return;
4749 -- local_irq_restore(saved_psr);
4750 -
4751 - status = ia64_pal_vp_exit_env(host_iva);
4752 - if (status)
4753 -@@ -1258,6 +1258,7 @@ static int vti_vcpu_setup(struct kvm_vcpu *vcpu, int id)
4754 - uninit:
4755 - kvm_vcpu_uninit(vcpu);
4756 - fail:
4757 -+ local_irq_restore(psr);
4758 - return r;
4759 - }
4760 -
4761 -diff --git a/arch/sparc64/kernel/irq.c b/arch/sparc64/kernel/irq.c
4762 -index c481673..acb0b97 100644
4763 ---- a/arch/sparc64/kernel/irq.c
4764 -+++ b/arch/sparc64/kernel/irq.c
4765 -@@ -682,10 +682,32 @@ void ack_bad_irq(unsigned int virt_irq)
4766 - ino, virt_irq);
4767 - }
4768 -
4769 -+void *hardirq_stack[NR_CPUS];
4770 -+void *softirq_stack[NR_CPUS];
4771 -+
4772 -+static __attribute__((always_inline)) void *set_hardirq_stack(void)
4773 -+{
4774 -+ void *orig_sp, *sp = hardirq_stack[smp_processor_id()];
4775 -+
4776 -+ __asm__ __volatile__("mov %%sp, %0" : "=r" (orig_sp));
4777 -+ if (orig_sp < sp ||
4778 -+ orig_sp > (sp + THREAD_SIZE)) {
4779 -+ sp += THREAD_SIZE - 192 - STACK_BIAS;
4780 -+ __asm__ __volatile__("mov %0, %%sp" : : "r" (sp));
4781 -+ }
4782 -+
4783 -+ return orig_sp;
4784 -+}
4785 -+static __attribute__((always_inline)) void restore_hardirq_stack(void *orig_sp)
4786 -+{
4787 -+ __asm__ __volatile__("mov %0, %%sp" : : "r" (orig_sp));
4788 -+}
4789 -+
4790 - void handler_irq(int irq, struct pt_regs *regs)
4791 - {
4792 - unsigned long pstate, bucket_pa;
4793 - struct pt_regs *old_regs;
4794 -+ void *orig_sp;
4795 -
4796 - clear_softint(1 << irq);
4797 -
4798 -@@ -703,6 +725,8 @@ void handler_irq(int irq, struct pt_regs *regs)
4799 - "i" (PSTATE_IE)
4800 - : "memory");
4801 -
4802 -+ orig_sp = set_hardirq_stack();
4803 -+
4804 - while (bucket_pa) {
4805 - struct irq_desc *desc;
4806 - unsigned long next_pa;
4807 -@@ -719,10 +743,38 @@ void handler_irq(int irq, struct pt_regs *regs)
4808 - bucket_pa = next_pa;
4809 - }
4810 -
4811 -+ restore_hardirq_stack(orig_sp);
4812 -+
4813 - irq_exit();
4814 - set_irq_regs(old_regs);
4815 - }
4816 -
4817 -+void do_softirq(void)
4818 -+{
4819 -+ unsigned long flags;
4820 -+
4821 -+ if (in_interrupt())
4822 -+ return;
4823 -+
4824 -+ local_irq_save(flags);
4825 -+
4826 -+ if (local_softirq_pending()) {
4827 -+ void *orig_sp, *sp = softirq_stack[smp_processor_id()];
4828 -+
4829 -+ sp += THREAD_SIZE - 192 - STACK_BIAS;
4830 -+
4831 -+ __asm__ __volatile__("mov %%sp, %0\n\t"
4832 -+ "mov %1, %%sp"
4833 -+ : "=&r" (orig_sp)
4834 -+ : "r" (sp));
4835 -+ __do_softirq();
4836 -+ __asm__ __volatile__("mov %0, %%sp"
4837 -+ : : "r" (orig_sp));
4838 -+ }
4839 -+
4840 -+ local_irq_restore(flags);
4841 -+}
4842 -+
4843 - #ifdef CONFIG_HOTPLUG_CPU
4844 - void fixup_irqs(void)
4845 - {
4846 -diff --git a/arch/sparc64/kernel/kstack.h b/arch/sparc64/kernel/kstack.h
4847 -new file mode 100644
4848 -index 0000000..4248d96
4849 ---- /dev/null
4850 -+++ b/arch/sparc64/kernel/kstack.h
4851 -@@ -0,0 +1,60 @@
4852 -+#ifndef _KSTACK_H
4853 -+#define _KSTACK_H
4854 -+
4855 -+#include <linux/thread_info.h>
4856 -+#include <linux/sched.h>
4857 -+#include <asm/ptrace.h>
4858 -+#include <asm/irq.h>
4859 -+
4860 -+/* SP must be STACK_BIAS adjusted already. */
4861 -+static inline bool kstack_valid(struct thread_info *tp, unsigned long sp)
4862 -+{
4863 -+ unsigned long base = (unsigned long) tp;
4864 -+
4865 -+ if (sp >= (base + sizeof(struct thread_info)) &&
4866 -+ sp <= (base + THREAD_SIZE - sizeof(struct sparc_stackf)))
4867 -+ return true;
4868 -+
4869 -+ if (hardirq_stack[tp->cpu]) {
4870 -+ base = (unsigned long) hardirq_stack[tp->cpu];
4871 -+ if (sp >= base &&
4872 -+ sp <= (base + THREAD_SIZE - sizeof(struct sparc_stackf)))
4873 -+ return true;
4874 -+ base = (unsigned long) softirq_stack[tp->cpu];
4875 -+ if (sp >= base &&
4876 -+ sp <= (base + THREAD_SIZE - sizeof(struct sparc_stackf)))
4877 -+ return true;
4878 -+ }
4879 -+ return false;
4880 -+}
4881 -+
4882 -+/* Does "regs" point to a valid pt_regs trap frame? */
4883 -+static inline bool kstack_is_trap_frame(struct thread_info *tp, struct pt_regs *regs)
4884 -+{
4885 -+ unsigned long base = (unsigned long) tp;
4886 -+ unsigned long addr = (unsigned long) regs;
4887 -+
4888 -+ if (addr >= base &&
4889 -+ addr <= (base + THREAD_SIZE - sizeof(*regs)))
4890 -+ goto check_magic;
4891 -+
4892 -+ if (hardirq_stack[tp->cpu]) {
4893 -+ base = (unsigned long) hardirq_stack[tp->cpu];
4894 -+ if (addr >= base &&
4895 -+ addr <= (base + THREAD_SIZE - sizeof(*regs)))
4896 -+ goto check_magic;
4897 -+ base = (unsigned long) softirq_stack[tp->cpu];
4898 -+ if (addr >= base &&
4899 -+ addr <= (base + THREAD_SIZE - sizeof(*regs)))
4900 -+ goto check_magic;
4901 -+ }
4902 -+ return false;
4903 -+
4904 -+check_magic:
4905 -+ if ((regs->magic & ~0x1ff) == PT_REGS_MAGIC)
4906 -+ return true;
4907 -+ return false;
4908 -+
4909 -+}
4910 -+
4911 -+#endif /* _KSTACK_H */
4912 -diff --git a/arch/sparc64/kernel/process.c b/arch/sparc64/kernel/process.c
4913 -index 2084f81..d9f4cd0 100644
4914 ---- a/arch/sparc64/kernel/process.c
4915 -+++ b/arch/sparc64/kernel/process.c
4916 -@@ -55,6 +55,8 @@
4917 -
4918 - /* #define VERBOSE_SHOWREGS */
4919 -
4920 -+#include "kstack.h"
4921 -+
4922 - static void sparc64_yield(int cpu)
4923 - {
4924 - if (tlb_type != hypervisor)
4925 -@@ -316,14 +318,22 @@ static void __global_reg_self(struct thread_info *tp, struct pt_regs *regs,
4926 - global_reg_snapshot[this_cpu].o7 = regs->u_regs[UREG_I7];
4927 -
4928 - if (regs->tstate & TSTATE_PRIV) {
4929 -+ struct thread_info *tp = current_thread_info();
4930 - struct reg_window *rw;
4931 -
4932 - rw = (struct reg_window *)
4933 - (regs->u_regs[UREG_FP] + STACK_BIAS);
4934 -- global_reg_snapshot[this_cpu].i7 = rw->ins[6];
4935 -- } else
4936 -+ if (kstack_valid(tp, (unsigned long) rw)) {
4937 -+ global_reg_snapshot[this_cpu].i7 = rw->ins[7];
4938 -+ rw = (struct reg_window *)
4939 -+ (rw->ins[6] + STACK_BIAS);
4940 -+ if (kstack_valid(tp, (unsigned long) rw))
4941 -+ global_reg_snapshot[this_cpu].rpc = rw->ins[7];
4942 -+ }
4943 -+ } else {
4944 - global_reg_snapshot[this_cpu].i7 = 0;
4945 --
4946 -+ global_reg_snapshot[this_cpu].rpc = 0;
4947 -+ }
4948 - global_reg_snapshot[this_cpu].thread = tp;
4949 - }
4950 -
4951 -@@ -384,12 +394,14 @@ static void sysrq_handle_globreg(int key, struct tty_struct *tty)
4952 - sprint_symbol(buffer, gp->o7);
4953 - printk("O7[%s] ", buffer);
4954 - sprint_symbol(buffer, gp->i7);
4955 -- printk("I7[%s]\n", buffer);
4956 -+ printk("I7[%s] ", buffer);
4957 -+ sprint_symbol(buffer, gp->rpc);
4958 -+ printk("RPC[%s]\n", buffer);
4959 - } else
4960 - #endif
4961 - {
4962 -- printk(" TPC[%lx] O7[%lx] I7[%lx]\n",
4963 -- gp->tpc, gp->o7, gp->i7);
4964 -+ printk(" TPC[%lx] O7[%lx] I7[%lx] RPC[%lx]\n",
4965 -+ gp->tpc, gp->o7, gp->i7, gp->rpc);
4966 - }
4967 - }
4968 -
4969 -@@ -876,7 +888,7 @@ out:
4970 - unsigned long get_wchan(struct task_struct *task)
4971 - {
4972 - unsigned long pc, fp, bias = 0;
4973 -- unsigned long thread_info_base;
4974 -+ struct thread_info *tp;
4975 - struct reg_window *rw;
4976 - unsigned long ret = 0;
4977 - int count = 0;
4978 -@@ -885,14 +897,12 @@ unsigned long get_wchan(struct task_struct *task)
4979 - task->state == TASK_RUNNING)
4980 - goto out;
4981 -
4982 -- thread_info_base = (unsigned long) task_stack_page(task);
4983 -+ tp = task_thread_info(task);
4984 - bias = STACK_BIAS;
4985 - fp = task_thread_info(task)->ksp + bias;
4986 -
4987 - do {
4988 -- /* Bogus frame pointer? */
4989 -- if (fp < (thread_info_base + sizeof(struct thread_info)) ||
4990 -- fp >= (thread_info_base + THREAD_SIZE))
4991 -+ if (!kstack_valid(tp, fp))
4992 - break;
4993 - rw = (struct reg_window *) fp;
4994 - pc = rw->ins[7];
4995 -diff --git a/arch/sparc64/kernel/signal.c b/arch/sparc64/kernel/signal.c
4996 -index 9667e96..10a12cb 100644
4997 ---- a/arch/sparc64/kernel/signal.c
4998 -+++ b/arch/sparc64/kernel/signal.c
4999 -@@ -2,7 +2,7 @@
5000 - * arch/sparc64/kernel/signal.c
5001 - *
5002 - * Copyright (C) 1991, 1992 Linus Torvalds
5003 -- * Copyright (C) 1995 David S. Miller (davem@××××××××××××.edu)
5004 -+ * Copyright (C) 1995, 2008 David S. Miller (davem@×××××××××.net)
5005 - * Copyright (C) 1996 Miguel de Icaza (miguel@××××××××××××.mx)
5006 - * Copyright (C) 1997 Eddie C. Dost (ecd@××××××.be)
5007 - * Copyright (C) 1997,1998 Jakub Jelinek (jj@××××××××××××××××.cz)
5008 -@@ -89,7 +89,9 @@ asmlinkage void sparc64_set_context(struct pt_regs *regs)
5009 - err |= __get_user(regs->u_regs[UREG_G4], (&(*grp)[MC_G4]));
5010 - err |= __get_user(regs->u_regs[UREG_G5], (&(*grp)[MC_G5]));
5011 - err |= __get_user(regs->u_regs[UREG_G6], (&(*grp)[MC_G6]));
5012 -- err |= __get_user(regs->u_regs[UREG_G7], (&(*grp)[MC_G7]));
5013 -+
5014 -+ /* Skip %g7 as that's the thread register in userspace. */
5015 -+
5016 - err |= __get_user(regs->u_regs[UREG_I0], (&(*grp)[MC_O0]));
5017 - err |= __get_user(regs->u_regs[UREG_I1], (&(*grp)[MC_O1]));
5018 - err |= __get_user(regs->u_regs[UREG_I2], (&(*grp)[MC_O2]));
5019 -diff --git a/arch/sparc64/kernel/stacktrace.c b/arch/sparc64/kernel/stacktrace.c
5020 -index c73ce3f..8d749ef 100644
5021 ---- a/arch/sparc64/kernel/stacktrace.c
5022 -+++ b/arch/sparc64/kernel/stacktrace.c
5023 -@@ -4,10 +4,12 @@
5024 - #include <asm/ptrace.h>
5025 - #include <asm/stacktrace.h>
5026 -
5027 -+#include "kstack.h"
5028 -+
5029 - void save_stack_trace(struct stack_trace *trace)
5030 - {
5031 -- unsigned long ksp, fp, thread_base;
5032 - struct thread_info *tp = task_thread_info(current);
5033 -+ unsigned long ksp, fp;
5034 -
5035 - stack_trace_flush();
5036 -
5037 -@@ -17,21 +19,18 @@ void save_stack_trace(struct stack_trace *trace)
5038 - );
5039 -
5040 - fp = ksp + STACK_BIAS;
5041 -- thread_base = (unsigned long) tp;
5042 - do {
5043 - struct sparc_stackf *sf;
5044 - struct pt_regs *regs;
5045 - unsigned long pc;
5046 -
5047 -- /* Bogus frame pointer? */
5048 -- if (fp < (thread_base + sizeof(struct thread_info)) ||
5049 -- fp >= (thread_base + THREAD_SIZE))
5050 -+ if (!kstack_valid(tp, fp))
5051 - break;
5052 -
5053 - sf = (struct sparc_stackf *) fp;
5054 - regs = (struct pt_regs *) (sf + 1);
5055 -
5056 -- if ((regs->magic & ~0x1ff) == PT_REGS_MAGIC) {
5057 -+ if (kstack_is_trap_frame(tp, regs)) {
5058 - if (!(regs->tstate & TSTATE_PRIV))
5059 - break;
5060 - pc = regs->tpc;
5061 -diff --git a/arch/sparc64/kernel/traps.c b/arch/sparc64/kernel/traps.c
5062 -index 3697492..1389e38 100644
5063 ---- a/arch/sparc64/kernel/traps.c
5064 -+++ b/arch/sparc64/kernel/traps.c
5065 -@@ -43,6 +43,7 @@
5066 - #include <asm/prom.h>
5067 -
5068 - #include "entry.h"
5069 -+#include "kstack.h"
5070 -
5071 - /* When an irrecoverable trap occurs at tl > 0, the trap entry
5072 - * code logs the trap state registers at every level in the trap
5073 -@@ -2120,14 +2121,12 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
5074 - struct pt_regs *regs;
5075 - unsigned long pc;
5076 -
5077 -- /* Bogus frame pointer? */
5078 -- if (fp < (thread_base + sizeof(struct thread_info)) ||
5079 -- fp >= (thread_base + THREAD_SIZE))
5080 -+ if (!kstack_valid(tp, fp))
5081 - break;
5082 - sf = (struct sparc_stackf *) fp;
5083 - regs = (struct pt_regs *) (sf + 1);
5084 -
5085 -- if ((regs->magic & ~0x1ff) == PT_REGS_MAGIC) {
5086 -+ if (kstack_is_trap_frame(tp, regs)) {
5087 - if (!(regs->tstate & TSTATE_PRIV))
5088 - break;
5089 - pc = regs->tpc;
5090 -diff --git a/arch/sparc64/lib/mcount.S b/arch/sparc64/lib/mcount.S
5091 -index 9e4534b..0935f84 100644
5092 ---- a/arch/sparc64/lib/mcount.S
5093 -+++ b/arch/sparc64/lib/mcount.S
5094 -@@ -45,12 +45,45 @@ _mcount:
5095 - sub %g3, STACK_BIAS, %g3
5096 - cmp %sp, %g3
5097 - bg,pt %xcc, 1f
5098 -- sethi %hi(panicstring), %g3
5099 -+ nop
5100 -+ lduh [%g6 + TI_CPU], %g1
5101 -+ sethi %hi(hardirq_stack), %g3
5102 -+ or %g3, %lo(hardirq_stack), %g3
5103 -+ sllx %g1, 3, %g1
5104 -+ ldx [%g3 + %g1], %g7
5105 -+ sub %g7, STACK_BIAS, %g7
5106 -+ cmp %sp, %g7
5107 -+ bleu,pt %xcc, 2f
5108 -+ sethi %hi(THREAD_SIZE), %g3
5109 -+ add %g7, %g3, %g7
5110 -+ cmp %sp, %g7
5111 -+ blu,pn %xcc, 1f
5112 -+2: sethi %hi(softirq_stack), %g3
5113 -+ or %g3, %lo(softirq_stack), %g3
5114 -+ ldx [%g3 + %g1], %g7
5115 -+ cmp %sp, %g7
5116 -+ bleu,pt %xcc, 2f
5117 -+ sethi %hi(THREAD_SIZE), %g3
5118 -+ add %g7, %g3, %g7
5119 -+ cmp %sp, %g7
5120 -+ blu,pn %xcc, 1f
5121 -+ nop
5122 -+ /* If we are already on ovstack, don't hop onto it
5123 -+ * again, we are already trying to output the stack overflow
5124 -+ * message.
5125 -+ */
5126 - sethi %hi(ovstack), %g7 ! cant move to panic stack fast enough
5127 - or %g7, %lo(ovstack), %g7
5128 -- add %g7, OVSTACKSIZE, %g7
5129 -+ add %g7, OVSTACKSIZE, %g3
5130 -+ sub %g3, STACK_BIAS + 192, %g3
5131 - sub %g7, STACK_BIAS, %g7
5132 -- mov %g7, %sp
5133 -+ cmp %sp, %g7
5134 -+ blu,pn %xcc, 2f
5135 -+ cmp %sp, %g3
5136 -+ bleu,pn %xcc, 1f
5137 -+ nop
5138 -+2: mov %g3, %sp
5139 -+ sethi %hi(panicstring), %g3
5140 - call prom_printf
5141 - or %g3, %lo(panicstring), %o0
5142 - call prom_halt
5143 -diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c
5144 -index 84898c4..e289a98 100644
5145 ---- a/arch/sparc64/mm/init.c
5146 -+++ b/arch/sparc64/mm/init.c
5147 -@@ -49,6 +49,7 @@
5148 - #include <asm/sstate.h>
5149 - #include <asm/mdesc.h>
5150 - #include <asm/cpudata.h>
5151 -+#include <asm/irq.h>
5152 -
5153 - #define MAX_PHYS_ADDRESS (1UL << 42UL)
5154 - #define KPTE_BITMAP_CHUNK_SZ (256UL * 1024UL * 1024UL)
5155 -@@ -1817,6 +1818,16 @@ void __init paging_init(void)
5156 - if (tlb_type == hypervisor)
5157 - sun4v_mdesc_init();
5158 -
5159 -+ /* Once the OF device tree and MDESC have been setup, we know
5160 -+ * the list of possible cpus. Therefore we can allocate the
5161 -+ * IRQ stacks.
5162 -+ */
5163 -+ for_each_possible_cpu(i) {
5164 -+ /* XXX Use node local allocations... XXX */
5165 -+ softirq_stack[i] = __va(lmb_alloc(THREAD_SIZE, THREAD_SIZE));
5166 -+ hardirq_stack[i] = __va(lmb_alloc(THREAD_SIZE, THREAD_SIZE));
5167 -+ }
5168 -+
5169 - /* Setup bootmem... */
5170 - last_valid_pfn = end_pfn = bootmem_init(phys_base);
5171 -
5172 -diff --git a/arch/sparc64/mm/ultra.S b/arch/sparc64/mm/ultra.S
5173 -index 9bb2d90..db57686 100644
5174 ---- a/arch/sparc64/mm/ultra.S
5175 -+++ b/arch/sparc64/mm/ultra.S
5176 -@@ -531,6 +531,13 @@ xcall_fetch_glob_regs:
5177 - stx %g7, [%g1 + GR_SNAP_TNPC]
5178 - stx %o7, [%g1 + GR_SNAP_O7]
5179 - stx %i7, [%g1 + GR_SNAP_I7]
5180 -+ /* Don't try this at home kids... */
5181 -+ rdpr %cwp, %g2
5182 -+ sub %g2, 1, %g7
5183 -+ wrpr %g7, %cwp
5184 -+ mov %i7, %g7
5185 -+ wrpr %g2, %cwp
5186 -+ stx %g7, [%g1 + GR_SNAP_RPC]
5187 - sethi %hi(trap_block), %g7
5188 - or %g7, %lo(trap_block), %g7
5189 - sllx %g2, TRAP_BLOCK_SZ_SHIFT, %g2
5190 -diff --git a/arch/x86/boot/boot.h b/arch/x86/boot/boot.h
5191 -index a34b998..9d4b4b4 100644
5192 ---- a/arch/x86/boot/boot.h
5193 -+++ b/arch/x86/boot/boot.h
5194 -@@ -25,6 +25,8 @@
5195 - #include <asm/boot.h>
5196 - #include <asm/setup.h>
5197 -
5198 -+#define NCAPINTS 8
5199 -+
5200 - /* Useful macros */
5201 - #define BUILD_BUG_ON(condition) ((void)sizeof(char[1 - 2*!!(condition)]))
5202 -
5203 -@@ -242,6 +244,12 @@ int cmdline_find_option(const char *option, char *buffer, int bufsize);
5204 - int cmdline_find_option_bool(const char *option);
5205 -
5206 - /* cpu.c, cpucheck.c */
5207 -+struct cpu_features {
5208 -+ int level; /* Family, or 64 for x86-64 */
5209 -+ int model;
5210 -+ u32 flags[NCAPINTS];
5211 -+};
5212 -+extern struct cpu_features cpu;
5213 - int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr);
5214 - int validate_cpu(void);
5215 -
5216 -diff --git a/arch/x86/boot/cpucheck.c b/arch/x86/boot/cpucheck.c
5217 -index 7804389..c1ce030 100644
5218 ---- a/arch/x86/boot/cpucheck.c
5219 -+++ b/arch/x86/boot/cpucheck.c
5220 -@@ -30,13 +30,7 @@
5221 - #include <asm/required-features.h>
5222 - #include <asm/msr-index.h>
5223 -
5224 --struct cpu_features {
5225 -- int level; /* Family, or 64 for x86-64 */
5226 -- int model;
5227 -- u32 flags[NCAPINTS];
5228 --};
5229 --
5230 --static struct cpu_features cpu;
5231 -+struct cpu_features cpu;
5232 - static u32 cpu_vendor[3];
5233 - static u32 err_flags[NCAPINTS];
5234 -
5235 -diff --git a/arch/x86/boot/main.c b/arch/x86/boot/main.c
5236 -index 77569a4..1b92cb6 100644
5237 ---- a/arch/x86/boot/main.c
5238 -+++ b/arch/x86/boot/main.c
5239 -@@ -73,6 +73,10 @@ static void keyboard_set_repeat(void)
5240 - */
5241 - static void query_ist(void)
5242 - {
5243 -+ /* Some 486 BIOSes apparently crash on this call */
5244 -+ if (cpu.level < 6)
5245 -+ return;
5246 -+
5247 - asm("int $0x15"
5248 - : "=a" (boot_params.ist_info.signature),
5249 - "=b" (boot_params.ist_info.command),
5250 -diff --git a/arch/x86/kernel/cpu/mtrr/generic.c b/arch/x86/kernel/cpu/mtrr/generic.c
5251 -index 5d241ce..75b14b1 100644
5252 ---- a/arch/x86/kernel/cpu/mtrr/generic.c
5253 -+++ b/arch/x86/kernel/cpu/mtrr/generic.c
5254 -@@ -219,7 +219,7 @@ void __init get_mtrr_state(void)
5255 - tom2 = hi;
5256 - tom2 <<= 32;
5257 - tom2 |= lo;
5258 -- tom2 &= 0xffffff8000000ULL;
5259 -+ tom2 &= 0xffffff800000ULL;
5260 - }
5261 - if (mtrr_show) {
5262 - int high_width;
5263 -diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
5264 -index c26d811..67d00bc 100644
5265 ---- a/arch/x86/kvm/mmu.c
5266 -+++ b/arch/x86/kvm/mmu.c
5267 -@@ -1792,6 +1792,7 @@ int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva)
5268 - spin_unlock(&vcpu->kvm->mmu_lock);
5269 - return r;
5270 - }
5271 -+EXPORT_SYMBOL_GPL(kvm_mmu_unprotect_page_virt);
5272 -
5273 - void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu)
5274 - {
5275 -diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
5276 -index 06992d6..7d6071d 100644
5277 ---- a/arch/x86/kvm/svm.c
5278 -+++ b/arch/x86/kvm/svm.c
5279 -@@ -1007,13 +1007,18 @@ static int pf_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
5280 - struct kvm *kvm = svm->vcpu.kvm;
5281 - u64 fault_address;
5282 - u32 error_code;
5283 -+ bool event_injection = false;
5284 -
5285 - if (!irqchip_in_kernel(kvm) &&
5286 -- is_external_interrupt(exit_int_info))
5287 -+ is_external_interrupt(exit_int_info)) {
5288 -+ event_injection = true;
5289 - push_irq(&svm->vcpu, exit_int_info & SVM_EVTINJ_VEC_MASK);
5290 -+ }
5291 -
5292 - fault_address = svm->vmcb->control.exit_info_2;
5293 - error_code = svm->vmcb->control.exit_info_1;
5294 -+ if (event_injection)
5295 -+ kvm_mmu_unprotect_page_virt(&svm->vcpu, fault_address);
5296 - return kvm_mmu_page_fault(&svm->vcpu, fault_address, error_code);
5297 - }
5298 -
5299 -diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
5300 -index 2ce9063..3ff39c1 100644
5301 ---- a/arch/x86/kvm/vmx.c
5302 -+++ b/arch/x86/kvm/vmx.c
5303 -@@ -2258,6 +2258,8 @@ static int handle_exception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
5304 - cr2 = vmcs_readl(EXIT_QUALIFICATION);
5305 - KVMTRACE_3D(PAGE_FAULT, vcpu, error_code, (u32)cr2,
5306 - (u32)((u64)cr2 >> 32), handler);
5307 -+ if (vect_info & VECTORING_INFO_VALID_MASK)
5308 -+ kvm_mmu_unprotect_page_virt(vcpu, cr2);
5309 - return kvm_mmu_page_fault(vcpu, cr2, error_code);
5310 - }
5311 -
5312 -diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
5313 -index 5a7406e..8ab14ab 100644
5314 ---- a/arch/x86/kvm/x86.c
5315 -+++ b/arch/x86/kvm/x86.c
5316 -@@ -3168,6 +3168,10 @@ static void seg_desct_to_kvm_desct(struct desc_struct *seg_desc, u16 selector,
5317 - kvm_desct->base |= seg_desc->base2 << 24;
5318 - kvm_desct->limit = seg_desc->limit0;
5319 - kvm_desct->limit |= seg_desc->limit << 16;
5320 -+ if (seg_desc->g) {
5321 -+ kvm_desct->limit <<= 12;
5322 -+ kvm_desct->limit |= 0xfff;
5323 -+ }
5324 - kvm_desct->selector = selector;
5325 - kvm_desct->type = seg_desc->type;
5326 - kvm_desct->present = seg_desc->p;
5327 -@@ -3207,6 +3211,7 @@ static void get_segment_descritptor_dtable(struct kvm_vcpu *vcpu,
5328 - static int load_guest_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector,
5329 - struct desc_struct *seg_desc)
5330 - {
5331 -+ gpa_t gpa;
5332 - struct descriptor_table dtable;
5333 - u16 index = selector >> 3;
5334 -
5335 -@@ -3216,13 +3221,16 @@ static int load_guest_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector,
5336 - kvm_queue_exception_e(vcpu, GP_VECTOR, selector & 0xfffc);
5337 - return 1;
5338 - }
5339 -- return kvm_read_guest(vcpu->kvm, dtable.base + index * 8, seg_desc, 8);
5340 -+ gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, dtable.base);
5341 -+ gpa += index * 8;
5342 -+ return kvm_read_guest(vcpu->kvm, gpa, seg_desc, 8);
5343 - }
5344 -
5345 - /* allowed just for 8 bytes segments */
5346 - static int save_guest_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector,
5347 - struct desc_struct *seg_desc)
5348 - {
5349 -+ gpa_t gpa;
5350 - struct descriptor_table dtable;
5351 - u16 index = selector >> 3;
5352 -
5353 -@@ -3230,7 +3238,9 @@ static int save_guest_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector,
5354 -
5355 - if (dtable.limit < index * 8 + 7)
5356 - return 1;
5357 -- return kvm_write_guest(vcpu->kvm, dtable.base + index * 8, seg_desc, 8);
5358 -+ gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, dtable.base);
5359 -+ gpa += index * 8;
5360 -+ return kvm_write_guest(vcpu->kvm, gpa, seg_desc, 8);
5361 - }
5362 -
5363 - static u32 get_tss_base_addr(struct kvm_vcpu *vcpu,
5364 -@@ -3242,55 +3252,7 @@ static u32 get_tss_base_addr(struct kvm_vcpu *vcpu,
5365 - base_addr |= (seg_desc->base1 << 16);
5366 - base_addr |= (seg_desc->base2 << 24);
5367 -
5368 -- return base_addr;
5369 --}
5370 --
5371 --static int load_tss_segment32(struct kvm_vcpu *vcpu,
5372 -- struct desc_struct *seg_desc,
5373 -- struct tss_segment_32 *tss)
5374 --{
5375 -- u32 base_addr;
5376 --
5377 -- base_addr = get_tss_base_addr(vcpu, seg_desc);
5378 --
5379 -- return kvm_read_guest(vcpu->kvm, base_addr, tss,
5380 -- sizeof(struct tss_segment_32));
5381 --}
5382 --
5383 --static int save_tss_segment32(struct kvm_vcpu *vcpu,
5384 -- struct desc_struct *seg_desc,
5385 -- struct tss_segment_32 *tss)
5386 --{
5387 -- u32 base_addr;
5388 --
5389 -- base_addr = get_tss_base_addr(vcpu, seg_desc);
5390 --
5391 -- return kvm_write_guest(vcpu->kvm, base_addr, tss,
5392 -- sizeof(struct tss_segment_32));
5393 --}
5394 --
5395 --static int load_tss_segment16(struct kvm_vcpu *vcpu,
5396 -- struct desc_struct *seg_desc,
5397 -- struct tss_segment_16 *tss)
5398 --{
5399 -- u32 base_addr;
5400 --
5401 -- base_addr = get_tss_base_addr(vcpu, seg_desc);
5402 --
5403 -- return kvm_read_guest(vcpu->kvm, base_addr, tss,
5404 -- sizeof(struct tss_segment_16));
5405 --}
5406 --
5407 --static int save_tss_segment16(struct kvm_vcpu *vcpu,
5408 -- struct desc_struct *seg_desc,
5409 -- struct tss_segment_16 *tss)
5410 --{
5411 -- u32 base_addr;
5412 --
5413 -- base_addr = get_tss_base_addr(vcpu, seg_desc);
5414 --
5415 -- return kvm_write_guest(vcpu->kvm, base_addr, tss,
5416 -- sizeof(struct tss_segment_16));
5417 -+ return vcpu->arch.mmu.gva_to_gpa(vcpu, base_addr);
5418 - }
5419 -
5420 - static u16 get_segment_selector(struct kvm_vcpu *vcpu, int seg)
5421 -@@ -3450,20 +3412,26 @@ static int load_state_from_tss16(struct kvm_vcpu *vcpu,
5422 - }
5423 -
5424 - int kvm_task_switch_16(struct kvm_vcpu *vcpu, u16 tss_selector,
5425 -- struct desc_struct *cseg_desc,
5426 -+ u32 old_tss_base,
5427 - struct desc_struct *nseg_desc)
5428 - {
5429 - struct tss_segment_16 tss_segment_16;
5430 - int ret = 0;
5431 -
5432 -- if (load_tss_segment16(vcpu, cseg_desc, &tss_segment_16))
5433 -+ if (kvm_read_guest(vcpu->kvm, old_tss_base, &tss_segment_16,
5434 -+ sizeof tss_segment_16))
5435 - goto out;
5436 -
5437 - save_state_to_tss16(vcpu, &tss_segment_16);
5438 -- save_tss_segment16(vcpu, cseg_desc, &tss_segment_16);
5439 -
5440 -- if (load_tss_segment16(vcpu, nseg_desc, &tss_segment_16))
5441 -+ if (kvm_write_guest(vcpu->kvm, old_tss_base, &tss_segment_16,
5442 -+ sizeof tss_segment_16))
5443 - goto out;
5444 -+
5445 -+ if (kvm_read_guest(vcpu->kvm, get_tss_base_addr(vcpu, nseg_desc),
5446 -+ &tss_segment_16, sizeof tss_segment_16))
5447 -+ goto out;
5448 -+
5449 - if (load_state_from_tss16(vcpu, &tss_segment_16))
5450 - goto out;
5451 -
5452 -@@ -3473,20 +3441,26 @@ out:
5453 - }
5454 -
5455 - int kvm_task_switch_32(struct kvm_vcpu *vcpu, u16 tss_selector,
5456 -- struct desc_struct *cseg_desc,
5457 -+ u32 old_tss_base,
5458 - struct desc_struct *nseg_desc)
5459 - {
5460 - struct tss_segment_32 tss_segment_32;
5461 - int ret = 0;
5462 -
5463 -- if (load_tss_segment32(vcpu, cseg_desc, &tss_segment_32))
5464 -+ if (kvm_read_guest(vcpu->kvm, old_tss_base, &tss_segment_32,
5465 -+ sizeof tss_segment_32))
5466 - goto out;
5467 -
5468 - save_state_to_tss32(vcpu, &tss_segment_32);
5469 -- save_tss_segment32(vcpu, cseg_desc, &tss_segment_32);
5470 -
5471 -- if (load_tss_segment32(vcpu, nseg_desc, &tss_segment_32))
5472 -+ if (kvm_write_guest(vcpu->kvm, old_tss_base, &tss_segment_32,
5473 -+ sizeof tss_segment_32))
5474 -+ goto out;
5475 -+
5476 -+ if (kvm_read_guest(vcpu->kvm, get_tss_base_addr(vcpu, nseg_desc),
5477 -+ &tss_segment_32, sizeof tss_segment_32))
5478 - goto out;
5479 -+
5480 - if (load_state_from_tss32(vcpu, &tss_segment_32))
5481 - goto out;
5482 -
5483 -@@ -3501,16 +3475,20 @@ int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason)
5484 - struct desc_struct cseg_desc;
5485 - struct desc_struct nseg_desc;
5486 - int ret = 0;
5487 -+ u32 old_tss_base = get_segment_base(vcpu, VCPU_SREG_TR);
5488 -+ u16 old_tss_sel = get_segment_selector(vcpu, VCPU_SREG_TR);
5489 -
5490 -- get_segment(vcpu, &tr_seg, VCPU_SREG_TR);
5491 -+ old_tss_base = vcpu->arch.mmu.gva_to_gpa(vcpu, old_tss_base);
5492 -
5493 -+ /* FIXME: Handle errors. Failure to read either TSS or their
5494 -+ * descriptors should generate a pagefault.
5495 -+ */
5496 - if (load_guest_segment_descriptor(vcpu, tss_selector, &nseg_desc))
5497 - goto out;
5498 -
5499 -- if (load_guest_segment_descriptor(vcpu, tr_seg.selector, &cseg_desc))
5500 -+ if (load_guest_segment_descriptor(vcpu, old_tss_sel, &cseg_desc))
5501 - goto out;
5502 -
5503 --
5504 - if (reason != TASK_SWITCH_IRET) {
5505 - int cpl;
5506 -
5507 -@@ -3528,8 +3506,7 @@ int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason)
5508 -
5509 - if (reason == TASK_SWITCH_IRET || reason == TASK_SWITCH_JMP) {
5510 - cseg_desc.type &= ~(1 << 1); //clear the B flag
5511 -- save_guest_segment_descriptor(vcpu, tr_seg.selector,
5512 -- &cseg_desc);
5513 -+ save_guest_segment_descriptor(vcpu, old_tss_sel, &cseg_desc);
5514 - }
5515 -
5516 - if (reason == TASK_SWITCH_IRET) {
5517 -@@ -3541,10 +3518,10 @@ int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason)
5518 - kvm_x86_ops->cache_regs(vcpu);
5519 -
5520 - if (nseg_desc.type & 8)
5521 -- ret = kvm_task_switch_32(vcpu, tss_selector, &cseg_desc,
5522 -+ ret = kvm_task_switch_32(vcpu, tss_selector, old_tss_base,
5523 - &nseg_desc);
5524 - else
5525 -- ret = kvm_task_switch_16(vcpu, tss_selector, &cseg_desc,
5526 -+ ret = kvm_task_switch_16(vcpu, tss_selector, old_tss_base,
5527 - &nseg_desc);
5528 -
5529 - if (reason == TASK_SWITCH_CALL || reason == TASK_SWITCH_GATE) {
5530 -diff --git a/arch/x86/pci/k8-bus_64.c b/arch/x86/pci/k8-bus_64.c
5531 -index 5c2799c..bfefdf0 100644
5532 ---- a/arch/x86/pci/k8-bus_64.c
5533 -+++ b/arch/x86/pci/k8-bus_64.c
5534 -@@ -384,7 +384,7 @@ static int __init early_fill_mp_bus_info(void)
5535 - /* need to take out [0, TOM) for RAM*/
5536 - address = MSR_K8_TOP_MEM1;
5537 - rdmsrl(address, val);
5538 -- end = (val & 0xffffff8000000ULL);
5539 -+ end = (val & 0xffffff800000ULL);
5540 - printk(KERN_INFO "TOM: %016lx aka %ldM\n", end, end>>20);
5541 - if (end < (1ULL<<32))
5542 - update_range(range, 0, end - 1);
5543 -@@ -478,7 +478,7 @@ static int __init early_fill_mp_bus_info(void)
5544 - /* TOP_MEM2 */
5545 - address = MSR_K8_TOP_MEM2;
5546 - rdmsrl(address, val);
5547 -- end = (val & 0xffffff8000000ULL);
5548 -+ end = (val & 0xffffff800000ULL);
5549 - printk(KERN_INFO "TOM2: %016lx aka %ldM\n", end, end>>20);
5550 - update_range(range, 1ULL<<32, end - 1);
5551 - }
5552 -diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
5553 -index 78199c0..f1d2e8a 100644
5554 ---- a/block/scsi_ioctl.c
5555 -+++ b/block/scsi_ioctl.c
5556 -@@ -629,7 +629,7 @@ int scsi_cmd_ioctl(struct file *file, struct request_queue *q,
5557 - hdr.sbp = cgc.sense;
5558 - if (hdr.sbp)
5559 - hdr.mx_sb_len = sizeof(struct request_sense);
5560 -- hdr.timeout = cgc.timeout;
5561 -+ hdr.timeout = jiffies_to_msecs(cgc.timeout);
5562 - hdr.cmdp = ((struct cdrom_generic_command __user*) arg)->cmd;
5563 - hdr.cmd_len = sizeof(cgc.cmd);
5564 -
5565 -diff --git a/drivers/char/hw_random/via-rng.c b/drivers/char/hw_random/via-rng.c
5566 -index f7feae4..128202e 100644
5567 ---- a/drivers/char/hw_random/via-rng.c
5568 -+++ b/drivers/char/hw_random/via-rng.c
5569 -@@ -31,6 +31,7 @@
5570 - #include <asm/io.h>
5571 - #include <asm/msr.h>
5572 - #include <asm/cpufeature.h>
5573 -+#include <asm/i387.h>
5574 -
5575 -
5576 - #define PFX KBUILD_MODNAME ": "
5577 -@@ -67,16 +68,23 @@ enum {
5578 - * Another possible performance boost may come from simply buffering
5579 - * until we have 4 bytes, thus returning a u32 at a time,
5580 - * instead of the current u8-at-a-time.
5581 -+ *
5582 -+ * Padlock instructions can generate a spurious DNA fault, so
5583 -+ * we have to call them in the context of irq_ts_save/restore()
5584 - */
5585 -
5586 - static inline u32 xstore(u32 *addr, u32 edx_in)
5587 - {
5588 - u32 eax_out;
5589 -+ int ts_state;
5590 -+
5591 -+ ts_state = irq_ts_save();
5592 -
5593 - asm(".byte 0x0F,0xA7,0xC0 /* xstore %%edi (addr=%0) */"
5594 - :"=m"(*addr), "=a"(eax_out)
5595 - :"D"(addr), "d"(edx_in));
5596 -
5597 -+ irq_ts_restore(ts_state);
5598 - return eax_out;
5599 - }
5600 -
5601 -diff --git a/drivers/crypto/padlock-aes.c b/drivers/crypto/padlock-aes.c
5602 -index bb30eb9..2a5c2db 100644
5603 ---- a/drivers/crypto/padlock-aes.c
5604 -+++ b/drivers/crypto/padlock-aes.c
5605 -@@ -16,6 +16,7 @@
5606 - #include <linux/interrupt.h>
5607 - #include <linux/kernel.h>
5608 - #include <asm/byteorder.h>
5609 -+#include <asm/i387.h>
5610 - #include "padlock.h"
5611 -
5612 - /* Control word. */
5613 -@@ -141,6 +142,12 @@ static inline void padlock_reset_key(void)
5614 - asm volatile ("pushfl; popfl");
5615 - }
5616 -
5617 -+/*
5618 -+ * While the padlock instructions don't use FP/SSE registers, they
5619 -+ * generate a spurious DNA fault when cr0.ts is '1'. These instructions
5620 -+ * should be used only inside the irq_ts_save/restore() context
5621 -+ */
5622 -+
5623 - static inline void padlock_xcrypt(const u8 *input, u8 *output, void *key,
5624 - void *control_word)
5625 - {
5626 -@@ -205,15 +212,23 @@ static inline u8 *padlock_xcrypt_cbc(const u8 *input, u8 *output, void *key,
5627 - static void aes_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
5628 - {
5629 - struct aes_ctx *ctx = aes_ctx(tfm);
5630 -+ int ts_state;
5631 - padlock_reset_key();
5632 -+
5633 -+ ts_state = irq_ts_save();
5634 - aes_crypt(in, out, ctx->E, &ctx->cword.encrypt);
5635 -+ irq_ts_restore(ts_state);
5636 - }
5637 -
5638 - static void aes_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
5639 - {
5640 - struct aes_ctx *ctx = aes_ctx(tfm);
5641 -+ int ts_state;
5642 - padlock_reset_key();
5643 -+
5644 -+ ts_state = irq_ts_save();
5645 - aes_crypt(in, out, ctx->D, &ctx->cword.decrypt);
5646 -+ irq_ts_restore(ts_state);
5647 - }
5648 -
5649 - static struct crypto_alg aes_alg = {
5650 -@@ -244,12 +259,14 @@ static int ecb_aes_encrypt(struct blkcipher_desc *desc,
5651 - struct aes_ctx *ctx = blk_aes_ctx(desc->tfm);
5652 - struct blkcipher_walk walk;
5653 - int err;
5654 -+ int ts_state;
5655 -
5656 - padlock_reset_key();
5657 -
5658 - blkcipher_walk_init(&walk, dst, src, nbytes);
5659 - err = blkcipher_walk_virt(desc, &walk);
5660 -
5661 -+ ts_state = irq_ts_save();
5662 - while ((nbytes = walk.nbytes)) {
5663 - padlock_xcrypt_ecb(walk.src.virt.addr, walk.dst.virt.addr,
5664 - ctx->E, &ctx->cword.encrypt,
5665 -@@ -257,6 +274,7 @@ static int ecb_aes_encrypt(struct blkcipher_desc *desc,
5666 - nbytes &= AES_BLOCK_SIZE - 1;
5667 - err = blkcipher_walk_done(desc, &walk, nbytes);
5668 - }
5669 -+ irq_ts_restore(ts_state);
5670 -
5671 - return err;
5672 - }
5673 -@@ -268,12 +286,14 @@ static int ecb_aes_decrypt(struct blkcipher_desc *desc,
5674 - struct aes_ctx *ctx = blk_aes_ctx(desc->tfm);
5675 - struct blkcipher_walk walk;
5676 - int err;
5677 -+ int ts_state;
5678 -
5679 - padlock_reset_key();
5680 -
5681 - blkcipher_walk_init(&walk, dst, src, nbytes);
5682 - err = blkcipher_walk_virt(desc, &walk);
5683 -
5684 -+ ts_state = irq_ts_save();
5685 - while ((nbytes = walk.nbytes)) {
5686 - padlock_xcrypt_ecb(walk.src.virt.addr, walk.dst.virt.addr,
5687 - ctx->D, &ctx->cword.decrypt,
5688 -@@ -281,7 +301,7 @@ static int ecb_aes_decrypt(struct blkcipher_desc *desc,
5689 - nbytes &= AES_BLOCK_SIZE - 1;
5690 - err = blkcipher_walk_done(desc, &walk, nbytes);
5691 - }
5692 --
5693 -+ irq_ts_restore(ts_state);
5694 - return err;
5695 - }
5696 -
5697 -@@ -314,12 +334,14 @@ static int cbc_aes_encrypt(struct blkcipher_desc *desc,
5698 - struct aes_ctx *ctx = blk_aes_ctx(desc->tfm);
5699 - struct blkcipher_walk walk;
5700 - int err;
5701 -+ int ts_state;
5702 -
5703 - padlock_reset_key();
5704 -
5705 - blkcipher_walk_init(&walk, dst, src, nbytes);
5706 - err = blkcipher_walk_virt(desc, &walk);
5707 -
5708 -+ ts_state = irq_ts_save();
5709 - while ((nbytes = walk.nbytes)) {
5710 - u8 *iv = padlock_xcrypt_cbc(walk.src.virt.addr,
5711 - walk.dst.virt.addr, ctx->E,
5712 -@@ -329,6 +351,7 @@ static int cbc_aes_encrypt(struct blkcipher_desc *desc,
5713 - nbytes &= AES_BLOCK_SIZE - 1;
5714 - err = blkcipher_walk_done(desc, &walk, nbytes);
5715 - }
5716 -+ irq_ts_restore(ts_state);
5717 -
5718 - return err;
5719 - }
5720 -@@ -340,12 +363,14 @@ static int cbc_aes_decrypt(struct blkcipher_desc *desc,
5721 - struct aes_ctx *ctx = blk_aes_ctx(desc->tfm);
5722 - struct blkcipher_walk walk;
5723 - int err;
5724 -+ int ts_state;
5725 -
5726 - padlock_reset_key();
5727 -
5728 - blkcipher_walk_init(&walk, dst, src, nbytes);
5729 - err = blkcipher_walk_virt(desc, &walk);
5730 -
5731 -+ ts_state = irq_ts_save();
5732 - while ((nbytes = walk.nbytes)) {
5733 - padlock_xcrypt_cbc(walk.src.virt.addr, walk.dst.virt.addr,
5734 - ctx->D, walk.iv, &ctx->cword.decrypt,
5735 -@@ -354,6 +379,7 @@ static int cbc_aes_decrypt(struct blkcipher_desc *desc,
5736 - err = blkcipher_walk_done(desc, &walk, nbytes);
5737 - }
5738 -
5739 -+ irq_ts_restore(ts_state);
5740 - return err;
5741 - }
5742 -
5743 -diff --git a/drivers/crypto/padlock-sha.c b/drivers/crypto/padlock-sha.c
5744 -index c666b4e..355f8c6 100644
5745 ---- a/drivers/crypto/padlock-sha.c
5746 -+++ b/drivers/crypto/padlock-sha.c
5747 -@@ -22,6 +22,7 @@
5748 - #include <linux/interrupt.h>
5749 - #include <linux/kernel.h>
5750 - #include <linux/scatterlist.h>
5751 -+#include <asm/i387.h>
5752 - #include "padlock.h"
5753 -
5754 - #define SHA1_DEFAULT_FALLBACK "sha1-generic"
5755 -@@ -102,6 +103,7 @@ static void padlock_do_sha1(const char *in, char *out, int count)
5756 - * PadLock microcode needs it that big. */
5757 - char buf[128+16];
5758 - char *result = NEAREST_ALIGNED(buf);
5759 -+ int ts_state;
5760 -
5761 - ((uint32_t *)result)[0] = SHA1_H0;
5762 - ((uint32_t *)result)[1] = SHA1_H1;
5763 -@@ -109,9 +111,12 @@ static void padlock_do_sha1(const char *in, char *out, int count)
5764 - ((uint32_t *)result)[3] = SHA1_H3;
5765 - ((uint32_t *)result)[4] = SHA1_H4;
5766 -
5767 -+ /* prevent taking the spurious DNA fault with padlock. */
5768 -+ ts_state = irq_ts_save();
5769 - asm volatile (".byte 0xf3,0x0f,0xa6,0xc8" /* rep xsha1 */
5770 - : "+S"(in), "+D"(result)
5771 - : "c"(count), "a"(0));
5772 -+ irq_ts_restore(ts_state);
5773 -
5774 - padlock_output_block((uint32_t *)result, (uint32_t *)out, 5);
5775 - }
5776 -@@ -123,6 +128,7 @@ static void padlock_do_sha256(const char *in, char *out, int count)
5777 - * PadLock microcode needs it that big. */
5778 - char buf[128+16];
5779 - char *result = NEAREST_ALIGNED(buf);
5780 -+ int ts_state;
5781 -
5782 - ((uint32_t *)result)[0] = SHA256_H0;
5783 - ((uint32_t *)result)[1] = SHA256_H1;
5784 -@@ -133,9 +139,12 @@ static void padlock_do_sha256(const char *in, char *out, int count)
5785 - ((uint32_t *)result)[6] = SHA256_H6;
5786 - ((uint32_t *)result)[7] = SHA256_H7;
5787 -
5788 -+ /* prevent taking the spurious DNA fault with padlock. */
5789 -+ ts_state = irq_ts_save();
5790 - asm volatile (".byte 0xf3,0x0f,0xa6,0xd0" /* rep xsha256 */
5791 - : "+S"(in), "+D"(result)
5792 - : "c"(count), "a"(0));
5793 -+ irq_ts_restore(ts_state);
5794 -
5795 - padlock_output_block((uint32_t *)result, (uint32_t *)out, 8);
5796 - }
5797 -diff --git a/drivers/i2c/Kconfig b/drivers/i2c/Kconfig
5798 -index 9686734..711ca08 100644
5799 ---- a/drivers/i2c/Kconfig
5800 -+++ b/drivers/i2c/Kconfig
5801 -@@ -38,6 +38,20 @@ config I2C_CHARDEV
5802 - This support is also available as a module. If so, the module
5803 - will be called i2c-dev.
5804 -
5805 -+config I2C_HELPER_AUTO
5806 -+ bool "Autoselect pertinent helper modules"
5807 -+ default y
5808 -+ help
5809 -+ Some I2C bus drivers require so-called "I2C algorithm" modules
5810 -+ to work. These are basically software-only abstractions of generic
5811 -+ I2C interfaces. This option will autoselect them so that you don't
5812 -+ have to care.
5813 -+
5814 -+ Unselect this only if you need to enable additional helper
5815 -+ modules, for example for use with external I2C bus drivers.
5816 -+
5817 -+ In doubt, say Y.
5818 -+
5819 - source drivers/i2c/algos/Kconfig
5820 - source drivers/i2c/busses/Kconfig
5821 - source drivers/i2c/chips/Kconfig
5822 -diff --git a/drivers/i2c/algos/Kconfig b/drivers/i2c/algos/Kconfig
5823 -index 7137a17..b788579 100644
5824 ---- a/drivers/i2c/algos/Kconfig
5825 -+++ b/drivers/i2c/algos/Kconfig
5826 -@@ -2,15 +2,20 @@
5827 - # I2C algorithm drivers configuration
5828 - #
5829 -
5830 -+menu "I2C Algorithms"
5831 -+ depends on !I2C_HELPER_AUTO
5832 -+
5833 - config I2C_ALGOBIT
5834 -- tristate
5835 -+ tristate "I2C bit-banging interfaces"
5836 -
5837 - config I2C_ALGOPCF
5838 -- tristate
5839 -+ tristate "I2C PCF 8584 interfaces"
5840 -
5841 - config I2C_ALGOPCA
5842 -- tristate
5843 -+ tristate "I2C PCA 9564 interfaces"
5844 -
5845 - config I2C_ALGO_SGI
5846 - tristate
5847 - depends on SGI_IP22 || SGI_IP32 || X86_VISWS
5848 -+
5849 -+endmenu
5850 -diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c
5851 -index d0175f4..08a7384 100644
5852 ---- a/drivers/i2c/i2c-core.c
5853 -+++ b/drivers/i2c/i2c-core.c
5854 -@@ -1196,9 +1196,11 @@ i2c_new_probed_device(struct i2c_adapter *adap,
5855 - if ((addr_list[i] & ~0x07) == 0x30
5856 - || (addr_list[i] & ~0x0f) == 0x50
5857 - || !i2c_check_functionality(adap, I2C_FUNC_SMBUS_QUICK)) {
5858 -+ union i2c_smbus_data data;
5859 -+
5860 - if (i2c_smbus_xfer(adap, addr_list[i], 0,
5861 - I2C_SMBUS_READ, 0,
5862 -- I2C_SMBUS_BYTE, NULL) >= 0)
5863 -+ I2C_SMBUS_BYTE, &data) >= 0)
5864 - break;
5865 - } else {
5866 - if (i2c_smbus_xfer(adap, addr_list[i], 0,
5867 -diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
5868 -index 0cc854e..614f9ce 100644
5869 ---- a/drivers/ide/ide-cd.c
5870 -+++ b/drivers/ide/ide-cd.c
5871 -@@ -1298,6 +1298,7 @@ static int cdrom_read_capacity(ide_drive_t *drive, unsigned long *capacity,
5872 -
5873 - int stat;
5874 - struct request req;
5875 -+ u32 blocklen;
5876 -
5877 - ide_cd_init_rq(drive, &req);
5878 -
5879 -@@ -1314,23 +1315,24 @@ static int cdrom_read_capacity(ide_drive_t *drive, unsigned long *capacity,
5880 - /*
5881 - * Sanity check the given block size
5882 - */
5883 -- switch (capbuf.blocklen) {
5884 -- case __constant_cpu_to_be32(512):
5885 -- case __constant_cpu_to_be32(1024):
5886 -- case __constant_cpu_to_be32(2048):
5887 -- case __constant_cpu_to_be32(4096):
5888 -+ blocklen = be32_to_cpu(capbuf.blocklen);
5889 -+ switch (blocklen) {
5890 -+ case 512:
5891 -+ case 1024:
5892 -+ case 2048:
5893 -+ case 4096:
5894 - break;
5895 - default:
5896 - printk(KERN_ERR "%s: weird block size %u\n",
5897 -- drive->name, capbuf.blocklen);
5898 -+ drive->name, blocklen);
5899 - printk(KERN_ERR "%s: default to 2kb block size\n",
5900 - drive->name);
5901 -- capbuf.blocklen = __constant_cpu_to_be32(2048);
5902 -+ blocklen = 2048;
5903 - break;
5904 - }
5905 -
5906 - *capacity = 1 + be32_to_cpu(capbuf.lba);
5907 -- *sectors_per_frame = be32_to_cpu(capbuf.blocklen) >> SECTOR_BITS;
5908 -+ *sectors_per_frame = blocklen >> SECTOR_BITS;
5909 - return 0;
5910 - }
5911 -
5912 -diff --git a/drivers/ide/pci/cs5520.c b/drivers/ide/pci/cs5520.c
5913 -index 992b1cf..0cfddf4 100644
5914 ---- a/drivers/ide/pci/cs5520.c
5915 -+++ b/drivers/ide/pci/cs5520.c
5916 -@@ -123,6 +123,7 @@ static const struct ide_dma_ops cs5520_dma_ops = {
5917 - #define DECLARE_CS_DEV(name_str) \
5918 - { \
5919 - .name = name_str, \
5920 -+ .enablebits = { {0x60, 0x01, 0x01}, {0x60, 0x02, 0x02} }, \
5921 - .port_ops = &cs5520_port_ops, \
5922 - .dma_ops = &cs5520_dma_ops, \
5923 - .host_flags = IDE_HFLAG_ISA_PORTS | \
5924 -diff --git a/drivers/ide/pci/it821x.c b/drivers/ide/pci/it821x.c
5925 -index 6ab0411..cbf6472 100644
5926 ---- a/drivers/ide/pci/it821x.c
5927 -+++ b/drivers/ide/pci/it821x.c
5928 -@@ -512,8 +512,14 @@ static void __devinit it821x_quirkproc(ide_drive_t *drive)
5929 - }
5930 -
5931 - static struct ide_dma_ops it821x_pass_through_dma_ops = {
5932 -+ .dma_host_set = ide_dma_host_set,
5933 -+ .dma_setup = ide_dma_setup,
5934 -+ .dma_exec_cmd = ide_dma_exec_cmd,
5935 - .dma_start = it821x_dma_start,
5936 - .dma_end = it821x_dma_end,
5937 -+ .dma_test_irq = ide_dma_test_irq,
5938 -+ .dma_timeout = ide_dma_timeout,
5939 -+ .dma_lost_irq = ide_dma_lost_irq,
5940 - };
5941 -
5942 - /**
5943 -diff --git a/drivers/misc/acer-wmi.c b/drivers/misc/acer-wmi.c
5944 -index dd13a37..3a3e4c1 100644
5945 ---- a/drivers/misc/acer-wmi.c
5946 -+++ b/drivers/misc/acer-wmi.c
5947 -@@ -742,11 +742,30 @@ static acpi_status get_u32(u32 *value, u32 cap)
5948 -
5949 - static acpi_status set_u32(u32 value, u32 cap)
5950 - {
5951 -+ acpi_status status;
5952 -+
5953 - if (interface->capability & cap) {
5954 - switch (interface->type) {
5955 - case ACER_AMW0:
5956 - return AMW0_set_u32(value, cap, interface);
5957 - case ACER_AMW0_V2:
5958 -+ if (cap == ACER_CAP_MAILLED)
5959 -+ return AMW0_set_u32(value, cap, interface);
5960 -+
5961 -+ /*
5962 -+ * On some models, some WMID methods don't toggle
5963 -+ * properly. For those cases, we want to run the AMW0
5964 -+ * method afterwards to be certain we've really toggled
5965 -+ * the device state.
5966 -+ */
5967 -+ if (cap == ACER_CAP_WIRELESS ||
5968 -+ cap == ACER_CAP_BLUETOOTH) {
5969 -+ status = WMID_set_u32(value, cap, interface);
5970 -+ if (ACPI_FAILURE(status))
5971 -+ return status;
5972 -+
5973 -+ return AMW0_set_u32(value, cap, interface);
5974 -+ }
5975 - case ACER_WMID:
5976 - return WMID_set_u32(value, cap, interface);
5977 - default:
5978 -diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
5979 -index 6572425..42d7c0a 100644
5980 ---- a/drivers/net/r8169.c
5981 -+++ b/drivers/net/r8169.c
5982 -@@ -1438,8 +1438,10 @@ static void rtl8169_init_phy(struct net_device *dev, struct rtl8169_private *tp)
5983 -
5984 - rtl_hw_phy_config(dev);
5985 -
5986 -- dprintk("Set MAC Reg C+CR Offset 0x82h = 0x01h\n");
5987 -- RTL_W8(0x82, 0x01);
5988 -+ if (tp->mac_version <= RTL_GIGA_MAC_VER_06) {
5989 -+ dprintk("Set MAC Reg C+CR Offset 0x82h = 0x01h\n");
5990 -+ RTL_W8(0x82, 0x01);
5991 -+ }
5992 -
5993 - pci_write_config_byte(tp->pci_dev, PCI_LATENCY_TIMER, 0x40);
5994 -
5995 -diff --git a/drivers/net/wireless/rtl8187.h b/drivers/net/wireless/rtl8187.h
5996 -index 076d88b..aefd4f6 100644
5997 ---- a/drivers/net/wireless/rtl8187.h
5998 -+++ b/drivers/net/wireless/rtl8187.h
5999 -@@ -67,6 +67,10 @@ struct rtl8187_priv {
6000 - const struct rtl818x_rf_ops *rf;
6001 - struct ieee80211_vif *vif;
6002 - int mode;
6003 -+ /* The mutex protects the TX loopback state.
6004 -+ * Any attempt to set channels concurrently locks the device.
6005 -+ */
6006 -+ struct mutex conf_mutex;
6007 -
6008 - /* rtl8187 specific */
6009 - struct ieee80211_channel channels[14];
6010 -diff --git a/drivers/net/wireless/rtl8187_dev.c b/drivers/net/wireless/rtl8187_dev.c
6011 -index 9223ada..d49d1c6 100644
6012 ---- a/drivers/net/wireless/rtl8187_dev.c
6013 -+++ b/drivers/net/wireless/rtl8187_dev.c
6014 -@@ -580,6 +580,7 @@ static int rtl8187_config(struct ieee80211_hw *dev, struct ieee80211_conf *conf)
6015 - struct rtl8187_priv *priv = dev->priv;
6016 - u32 reg;
6017 -
6018 -+ mutex_lock(&priv->conf_mutex);
6019 - reg = rtl818x_ioread32(priv, &priv->map->TX_CONF);
6020 - /* Enable TX loopback on MAC level to avoid TX during channel
6021 - * changes, as this has be seen to causes problems and the
6022 -@@ -610,6 +611,7 @@ static int rtl8187_config(struct ieee80211_hw *dev, struct ieee80211_conf *conf)
6023 - rtl818x_iowrite16(priv, &priv->map->ATIMTR_INTERVAL, 100);
6024 - rtl818x_iowrite16(priv, &priv->map->BEACON_INTERVAL, 100);
6025 - rtl818x_iowrite16(priv, &priv->map->BEACON_INTERVAL_TIME, 100);
6026 -+ mutex_unlock(&priv->conf_mutex);
6027 - return 0;
6028 - }
6029 -
6030 -@@ -814,6 +816,7 @@ static int __devinit rtl8187_probe(struct usb_interface *intf,
6031 - printk(KERN_ERR "rtl8187: Cannot register device\n");
6032 - goto err_free_dev;
6033 - }
6034 -+ mutex_init(&priv->conf_mutex);
6035 -
6036 - printk(KERN_INFO "%s: hwaddr %s, rtl8187 V%d + %s\n",
6037 - wiphy_name(dev->wiphy), print_mac(mac, dev->wiphy->perm_addr),
6038 -diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
6039 -index 338a3f9..c14de8e 100644
6040 ---- a/drivers/pci/quirks.c
6041 -+++ b/drivers/pci/quirks.c
6042 -@@ -1683,9 +1683,14 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_VIA, 0x324e, quirk_via_cx700_pci_parking_c
6043 - */
6044 - static void __devinit quirk_brcm_570x_limit_vpd(struct pci_dev *dev)
6045 - {
6046 -- /* Only disable the VPD capability for 5706, 5708, and 5709 rev. A */
6047 -+ /*
6048 -+ * Only disable the VPD capability for 5706, 5706S, 5708,
6049 -+ * 5708S and 5709 rev. A
6050 -+ */
6051 - if ((dev->device == PCI_DEVICE_ID_NX2_5706) ||
6052 -+ (dev->device == PCI_DEVICE_ID_NX2_5706S) ||
6053 - (dev->device == PCI_DEVICE_ID_NX2_5708) ||
6054 -+ (dev->device == PCI_DEVICE_ID_NX2_5708S) ||
6055 - ((dev->device == PCI_DEVICE_ID_NX2_5709) &&
6056 - (dev->revision & 0xf0) == 0x0)) {
6057 - if (dev->vpd)
6058 -diff --git a/drivers/scsi/hptiop.c b/drivers/scsi/hptiop.c
6059 -index da876d3..74d12b5 100644
6060 ---- a/drivers/scsi/hptiop.c
6061 -+++ b/drivers/scsi/hptiop.c
6062 -@@ -1249,6 +1249,13 @@ static struct pci_device_id hptiop_id_table[] = {
6063 - { PCI_VDEVICE(TTI, 0x3522), (kernel_ulong_t)&hptiop_itl_ops },
6064 - { PCI_VDEVICE(TTI, 0x3410), (kernel_ulong_t)&hptiop_itl_ops },
6065 - { PCI_VDEVICE(TTI, 0x3540), (kernel_ulong_t)&hptiop_itl_ops },
6066 -+ { PCI_VDEVICE(TTI, 0x3530), (kernel_ulong_t)&hptiop_itl_ops },
6067 -+ { PCI_VDEVICE(TTI, 0x3560), (kernel_ulong_t)&hptiop_itl_ops },
6068 -+ { PCI_VDEVICE(TTI, 0x4322), (kernel_ulong_t)&hptiop_itl_ops },
6069 -+ { PCI_VDEVICE(TTI, 0x4210), (kernel_ulong_t)&hptiop_itl_ops },
6070 -+ { PCI_VDEVICE(TTI, 0x4211), (kernel_ulong_t)&hptiop_itl_ops },
6071 -+ { PCI_VDEVICE(TTI, 0x4310), (kernel_ulong_t)&hptiop_itl_ops },
6072 -+ { PCI_VDEVICE(TTI, 0x4311), (kernel_ulong_t)&hptiop_itl_ops },
6073 - { PCI_VDEVICE(TTI, 0x3120), (kernel_ulong_t)&hptiop_mv_ops },
6074 - { PCI_VDEVICE(TTI, 0x3122), (kernel_ulong_t)&hptiop_mv_ops },
6075 - { PCI_VDEVICE(TTI, 0x3020), (kernel_ulong_t)&hptiop_mv_ops },
6076 -diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
6077 -index 8dd88fc..8728e87 100644
6078 ---- a/drivers/scsi/qla2xxx/qla_attr.c
6079 -+++ b/drivers/scsi/qla2xxx/qla_attr.c
6080 -@@ -972,26 +972,39 @@ qla2x00_get_starget_port_id(struct scsi_target *starget)
6081 - }
6082 -
6083 - static void
6084 --qla2x00_get_rport_loss_tmo(struct fc_rport *rport)
6085 -+qla2x00_set_rport_loss_tmo(struct fc_rport *rport, uint32_t timeout)
6086 - {
6087 -- struct Scsi_Host *host = rport_to_shost(rport);
6088 -- scsi_qla_host_t *ha = shost_priv(host);
6089 --
6090 -- rport->dev_loss_tmo = ha->port_down_retry_count + 5;
6091 -+ if (timeout)
6092 -+ rport->dev_loss_tmo = timeout;
6093 -+ else
6094 -+ rport->dev_loss_tmo = 1;
6095 - }
6096 -
6097 - static void
6098 --qla2x00_set_rport_loss_tmo(struct fc_rport *rport, uint32_t timeout)
6099 -+qla2x00_dev_loss_tmo_callbk(struct fc_rport *rport)
6100 - {
6101 - struct Scsi_Host *host = rport_to_shost(rport);
6102 -- scsi_qla_host_t *ha = shost_priv(host);
6103 -+ fc_port_t *fcport = *(fc_port_t **)rport->dd_data;
6104 -+
6105 -+ qla2x00_abort_fcport_cmds(fcport);
6106 -+
6107 -+ /*
6108 -+ * Transport has effectively 'deleted' the rport, clear
6109 -+ * all local references.
6110 -+ */
6111 -+ spin_lock_irq(host->host_lock);
6112 -+ fcport->rport = NULL;
6113 -+ *((fc_port_t **)rport->dd_data) = NULL;
6114 -+ spin_unlock_irq(host->host_lock);
6115 -+}
6116 -
6117 -- if (timeout)
6118 -- ha->port_down_retry_count = timeout;
6119 -- else
6120 -- ha->port_down_retry_count = 1;
6121 -+static void
6122 -+qla2x00_terminate_rport_io(struct fc_rport *rport)
6123 -+{
6124 -+ fc_port_t *fcport = *(fc_port_t **)rport->dd_data;
6125 -
6126 -- rport->dev_loss_tmo = ha->port_down_retry_count + 5;
6127 -+ qla2x00_abort_fcport_cmds(fcport);
6128 -+ scsi_target_unblock(&rport->dev);
6129 - }
6130 -
6131 - static int
6132 -@@ -1248,11 +1261,12 @@ struct fc_function_template qla2xxx_transport_functions = {
6133 - .get_starget_port_id = qla2x00_get_starget_port_id,
6134 - .show_starget_port_id = 1,
6135 -
6136 -- .get_rport_dev_loss_tmo = qla2x00_get_rport_loss_tmo,
6137 - .set_rport_dev_loss_tmo = qla2x00_set_rport_loss_tmo,
6138 - .show_rport_dev_loss_tmo = 1,
6139 -
6140 - .issue_fc_host_lip = qla2x00_issue_lip,
6141 -+ .dev_loss_tmo_callbk = qla2x00_dev_loss_tmo_callbk,
6142 -+ .terminate_rport_io = qla2x00_terminate_rport_io,
6143 - .get_fc_host_stats = qla2x00_get_fc_host_stats,
6144 -
6145 - .vport_create = qla24xx_vport_create,
6146 -@@ -1291,11 +1305,12 @@ struct fc_function_template qla2xxx_transport_vport_functions = {
6147 - .get_starget_port_id = qla2x00_get_starget_port_id,
6148 - .show_starget_port_id = 1,
6149 -
6150 -- .get_rport_dev_loss_tmo = qla2x00_get_rport_loss_tmo,
6151 - .set_rport_dev_loss_tmo = qla2x00_set_rport_loss_tmo,
6152 - .show_rport_dev_loss_tmo = 1,
6153 -
6154 - .issue_fc_host_lip = qla2x00_issue_lip,
6155 -+ .dev_loss_tmo_callbk = qla2x00_dev_loss_tmo_callbk,
6156 -+ .terminate_rport_io = qla2x00_terminate_rport_io,
6157 - .get_fc_host_stats = qla2x00_get_fc_host_stats,
6158 - };
6159 -
6160 -diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
6161 -index 8dd6000..7b0ddc8 100644
6162 ---- a/drivers/scsi/qla2xxx/qla_def.h
6163 -+++ b/drivers/scsi/qla2xxx/qla_def.h
6164 -@@ -1544,7 +1544,6 @@ typedef struct fc_port {
6165 - int login_retry;
6166 - atomic_t port_down_timer;
6167 -
6168 -- spinlock_t rport_lock;
6169 - struct fc_rport *rport, *drport;
6170 - u32 supported_classes;
6171 -
6172 -diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
6173 -index 9b4bebe..5a50fb7 100644
6174 ---- a/drivers/scsi/qla2xxx/qla_gbl.h
6175 -+++ b/drivers/scsi/qla2xxx/qla_gbl.h
6176 -@@ -71,6 +71,8 @@ extern int qla2x00_post_aen_work(struct scsi_qla_host *, enum
6177 - extern int qla2x00_post_hwe_work(struct scsi_qla_host *, uint16_t , uint16_t,
6178 - uint16_t, uint16_t);
6179 -
6180 -+extern void qla2x00_abort_fcport_cmds(fc_port_t *);
6181 -+
6182 - /*
6183 - * Global Functions in qla_mid.c source file.
6184 - */
6185 -diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
6186 -index bbbc5a6..c7388fa 100644
6187 ---- a/drivers/scsi/qla2xxx/qla_init.c
6188 -+++ b/drivers/scsi/qla2xxx/qla_init.c
6189 -@@ -1864,12 +1864,11 @@ qla2x00_rport_del(void *data)
6190 - {
6191 - fc_port_t *fcport = data;
6192 - struct fc_rport *rport;
6193 -- unsigned long flags;
6194 -
6195 -- spin_lock_irqsave(&fcport->rport_lock, flags);
6196 -+ spin_lock_irq(fcport->ha->host->host_lock);
6197 - rport = fcport->drport;
6198 - fcport->drport = NULL;
6199 -- spin_unlock_irqrestore(&fcport->rport_lock, flags);
6200 -+ spin_unlock_irq(fcport->ha->host->host_lock);
6201 - if (rport)
6202 - fc_remote_port_delete(rport);
6203 - }
6204 -@@ -1898,7 +1897,6 @@ qla2x00_alloc_fcport(scsi_qla_host_t *ha, gfp_t flags)
6205 - atomic_set(&fcport->state, FCS_UNCONFIGURED);
6206 - fcport->flags = FCF_RLC_SUPPORT;
6207 - fcport->supported_classes = FC_COS_UNSPECIFIED;
6208 -- spin_lock_init(&fcport->rport_lock);
6209 -
6210 - return fcport;
6211 - }
6212 -@@ -2243,28 +2241,24 @@ qla2x00_reg_remote_port(scsi_qla_host_t *ha, fc_port_t *fcport)
6213 - {
6214 - struct fc_rport_identifiers rport_ids;
6215 - struct fc_rport *rport;
6216 -- unsigned long flags;
6217 -
6218 - if (fcport->drport)
6219 - qla2x00_rport_del(fcport);
6220 -- if (fcport->rport)
6221 -- return;
6222 -
6223 - rport_ids.node_name = wwn_to_u64(fcport->node_name);
6224 - rport_ids.port_name = wwn_to_u64(fcport->port_name);
6225 - rport_ids.port_id = fcport->d_id.b.domain << 16 |
6226 - fcport->d_id.b.area << 8 | fcport->d_id.b.al_pa;
6227 - rport_ids.roles = FC_RPORT_ROLE_UNKNOWN;
6228 -- rport = fc_remote_port_add(ha->host, 0, &rport_ids);
6229 -+ fcport->rport = rport = fc_remote_port_add(ha->host, 0, &rport_ids);
6230 - if (!rport) {
6231 - qla_printk(KERN_WARNING, ha,
6232 - "Unable to allocate fc remote port!\n");
6233 - return;
6234 - }
6235 -- spin_lock_irqsave(&fcport->rport_lock, flags);
6236 -- fcport->rport = rport;
6237 -+ spin_lock_irq(fcport->ha->host->host_lock);
6238 - *((fc_port_t **)rport->dd_data) = fcport;
6239 -- spin_unlock_irqrestore(&fcport->rport_lock, flags);
6240 -+ spin_unlock_irq(fcport->ha->host->host_lock);
6241 -
6242 - rport->supported_classes = fcport->supported_classes;
6243 -
6244 -diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
6245 -index 48eaa3b..047ee64 100644
6246 ---- a/drivers/scsi/qla2xxx/qla_os.c
6247 -+++ b/drivers/scsi/qla2xxx/qla_os.c
6248 -@@ -388,7 +388,7 @@ qla2x00_queuecommand(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
6249 - }
6250 -
6251 - /* Close window on fcport/rport state-transitioning. */
6252 -- if (!*(fc_port_t **)rport->dd_data) {
6253 -+ if (fcport->drport) {
6254 - cmd->result = DID_IMM_RETRY << 16;
6255 - goto qc_fail_command;
6256 - }
6257 -@@ -455,7 +455,7 @@ qla24xx_queuecommand(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
6258 - }
6259 -
6260 - /* Close window on fcport/rport state-transitioning. */
6261 -- if (!*(fc_port_t **)rport->dd_data) {
6262 -+ if (fcport->drport) {
6263 - cmd->result = DID_IMM_RETRY << 16;
6264 - goto qc24_fail_command;
6265 - }
6266 -@@ -617,6 +617,40 @@ qla2x00_wait_for_loop_ready(scsi_qla_host_t *ha)
6267 - return (return_status);
6268 - }
6269 -
6270 -+void
6271 -+qla2x00_abort_fcport_cmds(fc_port_t *fcport)
6272 -+{
6273 -+ int cnt;
6274 -+ unsigned long flags;
6275 -+ srb_t *sp;
6276 -+ scsi_qla_host_t *ha = fcport->ha;
6277 -+ scsi_qla_host_t *pha = to_qla_parent(ha);
6278 -+
6279 -+ spin_lock_irqsave(&pha->hardware_lock, flags);
6280 -+ for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) {
6281 -+ sp = pha->outstanding_cmds[cnt];
6282 -+ if (!sp)
6283 -+ continue;
6284 -+ if (sp->fcport != fcport)
6285 -+ continue;
6286 -+
6287 -+ spin_unlock_irqrestore(&pha->hardware_lock, flags);
6288 -+ if (ha->isp_ops->abort_command(ha, sp)) {
6289 -+ DEBUG2(qla_printk(KERN_WARNING, ha,
6290 -+ "Abort failed -- %lx\n", sp->cmd->serial_number));
6291 -+ } else {
6292 -+ if (qla2x00_eh_wait_on_command(ha, sp->cmd) !=
6293 -+ QLA_SUCCESS)
6294 -+ DEBUG2(qla_printk(KERN_WARNING, ha,
6295 -+ "Abort failed while waiting -- %lx\n",
6296 -+ sp->cmd->serial_number));
6297 -+
6298 -+ }
6299 -+ spin_lock_irqsave(&pha->hardware_lock, flags);
6300 -+ }
6301 -+ spin_unlock_irqrestore(&pha->hardware_lock, flags);
6302 -+}
6303 -+
6304 - static void
6305 - qla2x00_block_error_handler(struct scsi_cmnd *cmnd)
6306 - {
6307 -@@ -1073,7 +1107,7 @@ qla2xxx_slave_configure(struct scsi_device *sdev)
6308 - else
6309 - scsi_deactivate_tcq(sdev, ha->max_q_depth);
6310 -
6311 -- rport->dev_loss_tmo = ha->port_down_retry_count + 5;
6312 -+ rport->dev_loss_tmo = ha->port_down_retry_count;
6313 -
6314 - return 0;
6315 - }
6316 -@@ -1813,7 +1847,6 @@ static inline void
6317 - qla2x00_schedule_rport_del(struct scsi_qla_host *ha, fc_port_t *fcport,
6318 - int defer)
6319 - {
6320 -- unsigned long flags;
6321 - struct fc_rport *rport;
6322 -
6323 - if (!fcport->rport)
6324 -@@ -1821,19 +1854,13 @@ qla2x00_schedule_rport_del(struct scsi_qla_host *ha, fc_port_t *fcport,
6325 -
6326 - rport = fcport->rport;
6327 - if (defer) {
6328 -- spin_lock_irqsave(&fcport->rport_lock, flags);
6329 -+ spin_lock_irq(ha->host->host_lock);
6330 - fcport->drport = rport;
6331 -- fcport->rport = NULL;
6332 -- *(fc_port_t **)rport->dd_data = NULL;
6333 -- spin_unlock_irqrestore(&fcport->rport_lock, flags);
6334 -+ spin_unlock_irq(ha->host->host_lock);
6335 - set_bit(FCPORT_UPDATE_NEEDED, &ha->dpc_flags);
6336 -- } else {
6337 -- spin_lock_irqsave(&fcport->rport_lock, flags);
6338 -- fcport->rport = NULL;
6339 -- *(fc_port_t **)rport->dd_data = NULL;
6340 -- spin_unlock_irqrestore(&fcport->rport_lock, flags);
6341 -+ qla2xxx_wake_dpc(ha);
6342 -+ } else
6343 - fc_remote_port_delete(rport);
6344 -- }
6345 - }
6346 -
6347 - /*
6348 -diff --git a/drivers/scsi/scsi_transport_spi.c b/drivers/scsi/scsi_transport_spi.c
6349 -index 75a64a6..b29360e 100644
6350 ---- a/drivers/scsi/scsi_transport_spi.c
6351 -+++ b/drivers/scsi/scsi_transport_spi.c
6352 -@@ -366,12 +366,14 @@ spi_transport_rd_attr(rti, "%d\n");
6353 - spi_transport_rd_attr(pcomp_en, "%d\n");
6354 - spi_transport_rd_attr(hold_mcs, "%d\n");
6355 -
6356 --/* we only care about the first child device so we return 1 */
6357 -+/* we only care about the first child device that's a real SCSI device
6358 -+ * so we return 1 to terminate the iteration when we find it */
6359 - static int child_iter(struct device *dev, void *data)
6360 - {
6361 -- struct scsi_device *sdev = to_scsi_device(dev);
6362 -+ if (!scsi_is_sdev_device(dev))
6363 -+ return 0;
6364 -
6365 -- spi_dv_device(sdev);
6366 -+ spi_dv_device(to_scsi_device(dev));
6367 - return 1;
6368 - }
6369 -
6370 -diff --git a/drivers/scsi/ses.c b/drivers/scsi/ses.c
6371 -index 0fe031f..1bcf3c3 100644
6372 ---- a/drivers/scsi/ses.c
6373 -+++ b/drivers/scsi/ses.c
6374 -@@ -345,14 +345,14 @@ static int ses_enclosure_find_by_addr(struct enclosure_device *edev,
6375 - return 0;
6376 - }
6377 -
6378 --#define VPD_INQUIRY_SIZE 512
6379 -+#define VPD_INQUIRY_SIZE 36
6380 -
6381 - static void ses_match_to_enclosure(struct enclosure_device *edev,
6382 - struct scsi_device *sdev)
6383 - {
6384 - unsigned char *buf = kmalloc(VPD_INQUIRY_SIZE, GFP_KERNEL);
6385 - unsigned char *desc;
6386 -- int len;
6387 -+ u16 vpd_len;
6388 - struct efd efd = {
6389 - .addr = 0,
6390 - };
6391 -@@ -372,9 +372,19 @@ static void ses_match_to_enclosure(struct enclosure_device *edev,
6392 - VPD_INQUIRY_SIZE, NULL, SES_TIMEOUT, SES_RETRIES))
6393 - goto free;
6394 -
6395 -- len = (buf[2] << 8) + buf[3];
6396 -+ vpd_len = (buf[2] << 8) + buf[3];
6397 -+ kfree(buf);
6398 -+ buf = kmalloc(vpd_len, GFP_KERNEL);
6399 -+ if (!buf)
6400 -+ return;
6401 -+ cmd[3] = vpd_len >> 8;
6402 -+ cmd[4] = vpd_len & 0xff;
6403 -+ if (scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buf,
6404 -+ vpd_len, NULL, SES_TIMEOUT, SES_RETRIES))
6405 -+ goto free;
6406 -+
6407 - desc = buf + 4;
6408 -- while (desc < buf + len) {
6409 -+ while (desc < buf + vpd_len) {
6410 - enum scsi_protocol proto = desc[0] >> 4;
6411 - u8 code_set = desc[0] & 0x0f;
6412 - u8 piv = desc[1] & 0x80;
6413 -diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
6414 -index fe47d14..2fdbc10 100644
6415 ---- a/drivers/usb/core/message.c
6416 -+++ b/drivers/usb/core/message.c
6417 -@@ -1091,8 +1091,8 @@ void usb_disable_device(struct usb_device *dev, int skip_ep0)
6418 - continue;
6419 - dev_dbg(&dev->dev, "unregistering interface %s\n",
6420 - interface->dev.bus_id);
6421 -- device_del(&interface->dev);
6422 - usb_remove_sysfs_intf_files(interface);
6423 -+ device_del(&interface->dev);
6424 - }
6425 -
6426 - /* Now that the interfaces are unbound, nobody should
6427 -diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
6428 -index 0ff4a39..7ee2abc 100644
6429 ---- a/drivers/usb/serial/ftdi_sio.c
6430 -+++ b/drivers/usb/serial/ftdi_sio.c
6431 -@@ -553,6 +553,7 @@ static struct usb_device_id id_table_combined [] = {
6432 - { USB_DEVICE(FTDI_VID, FTDI_ELV_FHZ1300PC_PID) },
6433 - { USB_DEVICE(FTDI_VID, FTDI_ELV_EM1010PC_PID) },
6434 - { USB_DEVICE(FTDI_VID, FTDI_ELV_WS500_PID) },
6435 -+ { USB_DEVICE(FTDI_VID, FTDI_ELV_HS485_PID) },
6436 - { USB_DEVICE(FTDI_VID, LINX_SDMUSBQSS_PID) },
6437 - { USB_DEVICE(FTDI_VID, LINX_MASTERDEVEL2_PID) },
6438 - { USB_DEVICE(FTDI_VID, LINX_FUTURE_0_PID) },
6439 -@@ -636,6 +637,10 @@ static struct usb_device_id id_table_combined [] = {
6440 - .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
6441 - { USB_DEVICE(FTDI_VID, FTDI_OOCDLINK_PID),
6442 - .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
6443 -+ { USB_DEVICE(FTDI_VID, LMI_LM3S_DEVEL_BOARD_PID),
6444 -+ .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
6445 -+ { USB_DEVICE(FTDI_VID, LMI_LM3S_EVAL_BOARD_PID),
6446 -+ .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
6447 - { USB_DEVICE(RATOC_VENDOR_ID, RATOC_PRODUCT_ID_USB60F) },
6448 - { USB_DEVICE(FTDI_VID, FTDI_REU_TINY_PID) },
6449 - { }, /* Optional parameter entry */
6450 -diff --git a/drivers/usb/serial/ftdi_sio.h b/drivers/usb/serial/ftdi_sio.h
6451 -index 8302eca..ac23a3a 100644
6452 ---- a/drivers/usb/serial/ftdi_sio.h
6453 -+++ b/drivers/usb/serial/ftdi_sio.h
6454 -@@ -524,6 +524,7 @@
6455 - #define FTDI_ELV_WS300PC_PID 0xE0F6 /* PC-Wetterstation (WS 300 PC) */
6456 - #define FTDI_ELV_FHZ1300PC_PID 0xE0E8 /* FHZ 1300 PC */
6457 - #define FTDI_ELV_WS500_PID 0xE0E9 /* PC-Wetterstation (WS 500) */
6458 -+#define FTDI_ELV_HS485_PID 0xE0EA /* USB to RS-485 adapter */
6459 - #define FTDI_ELV_EM1010PC_PID 0xE0EF /* Engery monitor EM 1010 PC */
6460 -
6461 - /*
6462 -@@ -815,6 +816,11 @@
6463 - #define OLIMEX_VID 0x15BA
6464 - #define OLIMEX_ARM_USB_OCD_PID 0x0003
6465 -
6466 -+/* Luminary Micro Stellaris Boards, VID = FTDI_VID */
6467 -+/* FTDI 2332C Dual channel device, side A=245 FIFO (JTAG), Side B=RS232 UART */
6468 -+#define LMI_LM3S_DEVEL_BOARD_PID 0xbcd8
6469 -+#define LMI_LM3S_EVAL_BOARD_PID 0xbcd9
6470 -+
6471 - /* www.elsterelectricity.com Elster Unicom III Optical Probe */
6472 - #define FTDI_ELSTER_UNICOM_PID 0xE700 /* Product Id */
6473 -
6474 -diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c
6475 -index 2a0dd1b..63287ad 100644
6476 ---- a/drivers/usb/serial/pl2303.c
6477 -+++ b/drivers/usb/serial/pl2303.c
6478 -@@ -89,7 +89,6 @@ static struct usb_device_id id_table [] = {
6479 - { USB_DEVICE(ALCOR_VENDOR_ID, ALCOR_PRODUCT_ID) },
6480 - { USB_DEVICE(WS002IN_VENDOR_ID, WS002IN_PRODUCT_ID) },
6481 - { USB_DEVICE(COREGA_VENDOR_ID, COREGA_PRODUCT_ID) },
6482 -- { USB_DEVICE(HL340_VENDOR_ID, HL340_PRODUCT_ID) },
6483 - { USB_DEVICE(YCCABLE_VENDOR_ID, YCCABLE_PRODUCT_ID) },
6484 - { } /* Terminating entry */
6485 - };
6486 -diff --git a/drivers/usb/serial/pl2303.h b/drivers/usb/serial/pl2303.h
6487 -index 6ac3bbc..a3bd039 100644
6488 ---- a/drivers/usb/serial/pl2303.h
6489 -+++ b/drivers/usb/serial/pl2303.h
6490 -@@ -107,10 +107,6 @@
6491 - #define COREGA_VENDOR_ID 0x07aa
6492 - #define COREGA_PRODUCT_ID 0x002a
6493 -
6494 --/* HL HL-340 (ID: 4348:5523) */
6495 --#define HL340_VENDOR_ID 0x4348
6496 --#define HL340_PRODUCT_ID 0x5523
6497 --
6498 - /* Y.C. Cable U.S.A., Inc - USB to RS-232 */
6499 - #define YCCABLE_VENDOR_ID 0x05ad
6500 - #define YCCABLE_PRODUCT_ID 0x0fba
6501 -diff --git a/drivers/usb/serial/usb-serial.c b/drivers/usb/serial/usb-serial.c
6502 -index db1db4c..38034e2 100644
6503 ---- a/drivers/usb/serial/usb-serial.c
6504 -+++ b/drivers/usb/serial/usb-serial.c
6505 -@@ -119,9 +119,6 @@ static void return_serial(struct usb_serial *serial)
6506 -
6507 - dbg("%s", __func__);
6508 -
6509 -- if (serial == NULL)
6510 -- return;
6511 --
6512 - for (i = 0; i < serial->num_ports; ++i) {
6513 - serial_table[serial->minor + i] = NULL;
6514 - }
6515 -@@ -140,7 +137,8 @@ static void destroy_serial(struct kref *kref)
6516 - serial->type->shutdown(serial);
6517 -
6518 - /* return the minor range that this device had */
6519 -- return_serial(serial);
6520 -+ if (serial->minor != SERIAL_TTY_NO_MINOR)
6521 -+ return_serial(serial);
6522 -
6523 - for (i = 0; i < serial->num_ports; ++i)
6524 - serial->port[i]->open_count = 0;
6525 -@@ -562,6 +560,7 @@ static struct usb_serial * create_serial (struct usb_device *dev,
6526 - serial->interface = interface;
6527 - kref_init(&serial->kref);
6528 - mutex_init(&serial->disc_mutex);
6529 -+ serial->minor = SERIAL_TTY_NO_MINOR;
6530 -
6531 - return serial;
6532 - }
6533 -diff --git a/drivers/usb/storage/scsiglue.c b/drivers/usb/storage/scsiglue.c
6534 -index 3fcde9f..d8d6633 100644
6535 ---- a/drivers/usb/storage/scsiglue.c
6536 -+++ b/drivers/usb/storage/scsiglue.c
6537 -@@ -73,7 +73,6 @@ static const char* host_info(struct Scsi_Host *host)
6538 - static int slave_alloc (struct scsi_device *sdev)
6539 - {
6540 - struct us_data *us = host_to_us(sdev->host);
6541 -- struct usb_host_endpoint *bulk_in_ep;
6542 -
6543 - /*
6544 - * Set the INQUIRY transfer length to 36. We don't use any of
6545 -@@ -82,16 +81,22 @@ static int slave_alloc (struct scsi_device *sdev)
6546 - */
6547 - sdev->inquiry_len = 36;
6548 -
6549 -- /* Scatter-gather buffers (all but the last) must have a length
6550 -- * divisible by the bulk maxpacket size. Otherwise a data packet
6551 -- * would end up being short, causing a premature end to the data
6552 -- * transfer. We'll use the maxpacket value of the bulk-IN pipe
6553 -- * to set the SCSI device queue's DMA alignment mask.
6554 -+ /* USB has unusual DMA-alignment requirements: Although the
6555 -+ * starting address of each scatter-gather element doesn't matter,
6556 -+ * the length of each element except the last must be divisible
6557 -+ * by the Bulk maxpacket value. There's currently no way to
6558 -+ * express this by block-layer constraints, so we'll cop out
6559 -+ * and simply require addresses to be aligned at 512-byte
6560 -+ * boundaries. This is okay since most block I/O involves
6561 -+ * hardware sectors that are multiples of 512 bytes in length,
6562 -+ * and since host controllers up through USB 2.0 have maxpacket
6563 -+ * values no larger than 512.
6564 -+ *
6565 -+ * But it doesn't suffice for Wireless USB, where Bulk maxpacket
6566 -+ * values can be as large as 2048. To make that work properly
6567 -+ * will require changes to the block layer.
6568 - */
6569 -- bulk_in_ep = us->pusb_dev->ep_in[usb_pipeendpoint(us->recv_bulk_pipe)];
6570 -- blk_queue_update_dma_alignment(sdev->request_queue,
6571 -- le16_to_cpu(bulk_in_ep->desc.wMaxPacketSize) - 1);
6572 -- /* wMaxPacketSize must be a power of 2 */
6573 -+ blk_queue_update_dma_alignment(sdev->request_queue, (512 - 1));
6574 -
6575 - /*
6576 - * The UFI spec treates the Peripheral Qualifier bits in an
6577 -diff --git a/drivers/usb/storage/transport.c b/drivers/usb/storage/transport.c
6578 -index 6610d2d..f2062e1 100644
6579 ---- a/drivers/usb/storage/transport.c
6580 -+++ b/drivers/usb/storage/transport.c
6581 -@@ -1034,8 +1034,21 @@ int usb_stor_Bulk_transport(struct scsi_cmnd *srb, struct us_data *us)
6582 -
6583 - /* try to compute the actual residue, based on how much data
6584 - * was really transferred and what the device tells us */
6585 -- if (residue) {
6586 -- if (!(us->flags & US_FL_IGNORE_RESIDUE)) {
6587 -+ if (residue && !(us->flags & US_FL_IGNORE_RESIDUE)) {
6588 -+
6589 -+ /* Heuristically detect devices that generate bogus residues
6590 -+ * by seeing what happens with INQUIRY and READ CAPACITY
6591 -+ * commands.
6592 -+ */
6593 -+ if (bcs->Status == US_BULK_STAT_OK &&
6594 -+ scsi_get_resid(srb) == 0 &&
6595 -+ ((srb->cmnd[0] == INQUIRY &&
6596 -+ transfer_length == 36) ||
6597 -+ (srb->cmnd[0] == READ_CAPACITY &&
6598 -+ transfer_length == 8))) {
6599 -+ us->flags |= US_FL_IGNORE_RESIDUE;
6600 -+
6601 -+ } else {
6602 - residue = min(residue, transfer_length);
6603 - scsi_set_resid(srb, max(scsi_get_resid(srb),
6604 - (int) residue));
6605 -diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
6606 -index 39a7c11..6a04476 100644
6607 ---- a/drivers/usb/storage/unusual_devs.h
6608 -+++ b/drivers/usb/storage/unusual_devs.h
6609 -@@ -358,14 +358,14 @@ UNUSUAL_DEV( 0x04b0, 0x040f, 0x0100, 0x0200,
6610 - US_FL_FIX_CAPACITY),
6611 -
6612 - /* Reported by Emil Larsson <emil@××××.net> */
6613 --UNUSUAL_DEV( 0x04b0, 0x0411, 0x0100, 0x0110,
6614 -+UNUSUAL_DEV( 0x04b0, 0x0411, 0x0100, 0x0111,
6615 - "NIKON",
6616 - "NIKON DSC D80",
6617 - US_SC_DEVICE, US_PR_DEVICE, NULL,
6618 - US_FL_FIX_CAPACITY),
6619 -
6620 - /* Reported by Ortwin Glueck <odi@×××.ch> */
6621 --UNUSUAL_DEV( 0x04b0, 0x0413, 0x0110, 0x0110,
6622 -+UNUSUAL_DEV( 0x04b0, 0x0413, 0x0110, 0x0111,
6623 - "NIKON",
6624 - "NIKON DSC D40",
6625 - US_SC_DEVICE, US_PR_DEVICE, NULL,
6626 -@@ -1187,6 +1187,13 @@ UNUSUAL_DEV( 0x07c4, 0xa400, 0x0000, 0xffff,
6627 - US_SC_DEVICE, US_PR_DEVICE, NULL,
6628 - US_FL_FIX_INQUIRY ),
6629 -
6630 -+/* Reported by Rauch Wolke <rauchwolke@×××.net> */
6631 -+UNUSUAL_DEV( 0x07c4, 0xa4a5, 0x0000, 0xffff,
6632 -+ "Simple Tech/Datafab",
6633 -+ "CF+SM Reader",
6634 -+ US_SC_DEVICE, US_PR_DEVICE, NULL,
6635 -+ US_FL_IGNORE_RESIDUE ),
6636 -+
6637 - /* Casio QV 2x00/3x00/4000/8000 digital still cameras are not conformant
6638 - * to the USB storage specification in two ways:
6639 - * - They tell us they are using transport protocol CBI. In reality they
6640 -@@ -1758,6 +1765,13 @@ UNUSUAL_DEV( 0x2770, 0x915d, 0x0010, 0x0010,
6641 - US_SC_DEVICE, US_PR_DEVICE, NULL,
6642 - US_FL_FIX_CAPACITY ),
6643 -
6644 -+/* Reported by Andrey Rahmatullin <wrar@××××××××.org> */
6645 -+UNUSUAL_DEV( 0x4102, 0x1020, 0x0100, 0x0100,
6646 -+ "iRiver",
6647 -+ "MP3 T10",
6648 -+ US_SC_DEVICE, US_PR_DEVICE, NULL,
6649 -+ US_FL_IGNORE_RESIDUE ),
6650 -+
6651 - /*
6652 - * David Härdeman <david@××××.com>
6653 - * The key makes the SCSI stack print confusing (but harmless) messages
6654 -diff --git a/drivers/video/arkfb.c b/drivers/video/arkfb.c
6655 -index 5001bd4..21d61b3 100644
6656 ---- a/drivers/video/arkfb.c
6657 -+++ b/drivers/video/arkfb.c
6658 -@@ -958,20 +958,20 @@ static int __devinit ark_pci_probe(struct pci_dev *dev, const struct pci_device_
6659 - /* Prepare PCI device */
6660 - rc = pci_enable_device(dev);
6661 - if (rc < 0) {
6662 -- dev_err(info->dev, "cannot enable PCI device\n");
6663 -+ dev_err(info->device, "cannot enable PCI device\n");
6664 - goto err_enable_device;
6665 - }
6666 -
6667 - rc = pci_request_regions(dev, "arkfb");
6668 - if (rc < 0) {
6669 -- dev_err(info->dev, "cannot reserve framebuffer region\n");
6670 -+ dev_err(info->device, "cannot reserve framebuffer region\n");
6671 - goto err_request_regions;
6672 - }
6673 -
6674 - par->dac = ics5342_init(ark_dac_read_regs, ark_dac_write_regs, info);
6675 - if (! par->dac) {
6676 - rc = -ENOMEM;
6677 -- dev_err(info->dev, "RAMDAC initialization failed\n");
6678 -+ dev_err(info->device, "RAMDAC initialization failed\n");
6679 - goto err_dac;
6680 - }
6681 -
6682 -@@ -982,7 +982,7 @@ static int __devinit ark_pci_probe(struct pci_dev *dev, const struct pci_device_
6683 - info->screen_base = pci_iomap(dev, 0, 0);
6684 - if (! info->screen_base) {
6685 - rc = -ENOMEM;
6686 -- dev_err(info->dev, "iomap for framebuffer failed\n");
6687 -+ dev_err(info->device, "iomap for framebuffer failed\n");
6688 - goto err_iomap;
6689 - }
6690 -
6691 -@@ -1004,19 +1004,19 @@ static int __devinit ark_pci_probe(struct pci_dev *dev, const struct pci_device_
6692 - rc = fb_find_mode(&(info->var), info, mode_option, NULL, 0, NULL, 8);
6693 - if (! ((rc == 1) || (rc == 2))) {
6694 - rc = -EINVAL;
6695 -- dev_err(info->dev, "mode %s not found\n", mode_option);
6696 -+ dev_err(info->device, "mode %s not found\n", mode_option);
6697 - goto err_find_mode;
6698 - }
6699 -
6700 - rc = fb_alloc_cmap(&info->cmap, 256, 0);
6701 - if (rc < 0) {
6702 -- dev_err(info->dev, "cannot allocate colormap\n");
6703 -+ dev_err(info->device, "cannot allocate colormap\n");
6704 - goto err_alloc_cmap;
6705 - }
6706 -
6707 - rc = register_framebuffer(info);
6708 - if (rc < 0) {
6709 -- dev_err(info->dev, "cannot register framebugger\n");
6710 -+ dev_err(info->device, "cannot register framebugger\n");
6711 - goto err_reg_fb;
6712 - }
6713 -
6714 -@@ -1090,7 +1090,7 @@ static int ark_pci_suspend (struct pci_dev* dev, pm_message_t state)
6715 - struct fb_info *info = pci_get_drvdata(dev);
6716 - struct arkfb_info *par = info->par;
6717 -
6718 -- dev_info(info->dev, "suspend\n");
6719 -+ dev_info(info->device, "suspend\n");
6720 -
6721 - acquire_console_sem();
6722 - mutex_lock(&(par->open_lock));
6723 -@@ -1121,7 +1121,7 @@ static int ark_pci_resume (struct pci_dev* dev)
6724 - struct fb_info *info = pci_get_drvdata(dev);
6725 - struct arkfb_info *par = info->par;
6726 -
6727 -- dev_info(info->dev, "resume\n");
6728 -+ dev_info(info->device, "resume\n");
6729 -
6730 - acquire_console_sem();
6731 - mutex_lock(&(par->open_lock));
6732 -diff --git a/drivers/video/aty/radeon_accel.c b/drivers/video/aty/radeon_accel.c
6733 -index 3ca27cb..aa95f83 100644
6734 ---- a/drivers/video/aty/radeon_accel.c
6735 -+++ b/drivers/video/aty/radeon_accel.c
6736 -@@ -55,6 +55,10 @@ static void radeonfb_prim_fillrect(struct radeonfb_info *rinfo,
6737 - OUTREG(DP_WRITE_MSK, 0xffffffff);
6738 - OUTREG(DP_CNTL, (DST_X_LEFT_TO_RIGHT | DST_Y_TOP_TO_BOTTOM));
6739 -
6740 -+ radeon_fifo_wait(2);
6741 -+ OUTREG(DSTCACHE_CTLSTAT, RB2D_DC_FLUSH_ALL);
6742 -+ OUTREG(WAIT_UNTIL, (WAIT_2D_IDLECLEAN | WAIT_DMA_GUI_IDLE));
6743 -+
6744 - radeon_fifo_wait(2);
6745 - OUTREG(DST_Y_X, (region->dy << 16) | region->dx);
6746 - OUTREG(DST_WIDTH_HEIGHT, (region->width << 16) | region->height);
6747 -@@ -116,6 +120,10 @@ static void radeonfb_prim_copyarea(struct radeonfb_info *rinfo,
6748 - OUTREG(DP_CNTL, (xdir>=0 ? DST_X_LEFT_TO_RIGHT : 0)
6749 - | (ydir>=0 ? DST_Y_TOP_TO_BOTTOM : 0));
6750 -
6751 -+ radeon_fifo_wait(2);
6752 -+ OUTREG(DSTCACHE_CTLSTAT, RB2D_DC_FLUSH_ALL);
6753 -+ OUTREG(WAIT_UNTIL, (WAIT_2D_IDLECLEAN | WAIT_DMA_GUI_IDLE));
6754 -+
6755 - radeon_fifo_wait(3);
6756 - OUTREG(SRC_Y_X, (sy << 16) | sx);
6757 - OUTREG(DST_Y_X, (dy << 16) | dx);
6758 -@@ -241,8 +249,8 @@ void radeonfb_engine_reset(struct radeonfb_info *rinfo)
6759 - INREG(HOST_PATH_CNTL);
6760 - OUTREG(HOST_PATH_CNTL, host_path_cntl);
6761 -
6762 -- if (rinfo->family != CHIP_FAMILY_R300 ||
6763 -- rinfo->family != CHIP_FAMILY_R350 ||
6764 -+ if (rinfo->family != CHIP_FAMILY_R300 &&
6765 -+ rinfo->family != CHIP_FAMILY_R350 &&
6766 - rinfo->family != CHIP_FAMILY_RV350)
6767 - OUTREG(RBBM_SOFT_RESET, rbbm_soft_reset);
6768 -
6769 -diff --git a/drivers/video/matrox/matroxfb_maven.c b/drivers/video/matrox/matroxfb_maven.c
6770 -index 89da27b..2ad06b0 100644
6771 ---- a/drivers/video/matrox/matroxfb_maven.c
6772 -+++ b/drivers/video/matrox/matroxfb_maven.c
6773 -@@ -1266,7 +1266,7 @@ static int maven_detect_client(struct i2c_adapter* adapter, int address, int kin
6774 - ERROR4:;
6775 - i2c_detach_client(new_client);
6776 - ERROR3:;
6777 -- kfree(new_client);
6778 -+ kfree(data);
6779 - ERROR0:;
6780 - return err;
6781 - }
6782 -diff --git a/drivers/video/s3fb.c b/drivers/video/s3fb.c
6783 -index 2972f11..8361bd0 100644
6784 ---- a/drivers/video/s3fb.c
6785 -+++ b/drivers/video/s3fb.c
6786 -@@ -903,13 +903,13 @@ static int __devinit s3_pci_probe(struct pci_dev *dev, const struct pci_device_i
6787 - /* Prepare PCI device */
6788 - rc = pci_enable_device(dev);
6789 - if (rc < 0) {
6790 -- dev_err(info->dev, "cannot enable PCI device\n");
6791 -+ dev_err(info->device, "cannot enable PCI device\n");
6792 - goto err_enable_device;
6793 - }
6794 -
6795 - rc = pci_request_regions(dev, "s3fb");
6796 - if (rc < 0) {
6797 -- dev_err(info->dev, "cannot reserve framebuffer region\n");
6798 -+ dev_err(info->device, "cannot reserve framebuffer region\n");
6799 - goto err_request_regions;
6800 - }
6801 -
6802 -@@ -921,7 +921,7 @@ static int __devinit s3_pci_probe(struct pci_dev *dev, const struct pci_device_i
6803 - info->screen_base = pci_iomap(dev, 0, 0);
6804 - if (! info->screen_base) {
6805 - rc = -ENOMEM;
6806 -- dev_err(info->dev, "iomap for framebuffer failed\n");
6807 -+ dev_err(info->device, "iomap for framebuffer failed\n");
6808 - goto err_iomap;
6809 - }
6810 -
6811 -@@ -965,19 +965,19 @@ static int __devinit s3_pci_probe(struct pci_dev *dev, const struct pci_device_i
6812 - rc = fb_find_mode(&(info->var), info, mode_option, NULL, 0, NULL, 8);
6813 - if (! ((rc == 1) || (rc == 2))) {
6814 - rc = -EINVAL;
6815 -- dev_err(info->dev, "mode %s not found\n", mode_option);
6816 -+ dev_err(info->device, "mode %s not found\n", mode_option);
6817 - goto err_find_mode;
6818 - }
6819 -
6820 - rc = fb_alloc_cmap(&info->cmap, 256, 0);
6821 - if (rc < 0) {
6822 -- dev_err(info->dev, "cannot allocate colormap\n");
6823 -+ dev_err(info->device, "cannot allocate colormap\n");
6824 - goto err_alloc_cmap;
6825 - }
6826 -
6827 - rc = register_framebuffer(info);
6828 - if (rc < 0) {
6829 -- dev_err(info->dev, "cannot register framebuffer\n");
6830 -+ dev_err(info->device, "cannot register framebuffer\n");
6831 - goto err_reg_fb;
6832 - }
6833 -
6834 -@@ -1053,7 +1053,7 @@ static int s3_pci_suspend(struct pci_dev* dev, pm_message_t state)
6835 - struct fb_info *info = pci_get_drvdata(dev);
6836 - struct s3fb_info *par = info->par;
6837 -
6838 -- dev_info(info->dev, "suspend\n");
6839 -+ dev_info(info->device, "suspend\n");
6840 -
6841 - acquire_console_sem();
6842 - mutex_lock(&(par->open_lock));
6843 -@@ -1085,7 +1085,7 @@ static int s3_pci_resume(struct pci_dev* dev)
6844 - struct s3fb_info *par = info->par;
6845 - int err;
6846 -
6847 -- dev_info(info->dev, "resume\n");
6848 -+ dev_info(info->device, "resume\n");
6849 -
6850 - acquire_console_sem();
6851 - mutex_lock(&(par->open_lock));
6852 -@@ -1102,7 +1102,7 @@ static int s3_pci_resume(struct pci_dev* dev)
6853 - if (err) {
6854 - mutex_unlock(&(par->open_lock));
6855 - release_console_sem();
6856 -- dev_err(info->dev, "error %d enabling device for resume\n", err);
6857 -+ dev_err(info->device, "error %d enabling device for resume\n", err);
6858 - return err;
6859 - }
6860 - pci_set_master(dev);
6861 -diff --git a/drivers/video/vt8623fb.c b/drivers/video/vt8623fb.c
6862 -index 536ab11..f5f282d 100644
6863 ---- a/drivers/video/vt8623fb.c
6864 -+++ b/drivers/video/vt8623fb.c
6865 -@@ -677,13 +677,13 @@ static int __devinit vt8623_pci_probe(struct pci_dev *dev, const struct pci_devi
6866 -
6867 - rc = pci_enable_device(dev);
6868 - if (rc < 0) {
6869 -- dev_err(info->dev, "cannot enable PCI device\n");
6870 -+ dev_err(info->device, "cannot enable PCI device\n");
6871 - goto err_enable_device;
6872 - }
6873 -
6874 - rc = pci_request_regions(dev, "vt8623fb");
6875 - if (rc < 0) {
6876 -- dev_err(info->dev, "cannot reserve framebuffer region\n");
6877 -+ dev_err(info->device, "cannot reserve framebuffer region\n");
6878 - goto err_request_regions;
6879 - }
6880 -
6881 -@@ -696,14 +696,14 @@ static int __devinit vt8623_pci_probe(struct pci_dev *dev, const struct pci_devi
6882 - info->screen_base = pci_iomap(dev, 0, 0);
6883 - if (! info->screen_base) {
6884 - rc = -ENOMEM;
6885 -- dev_err(info->dev, "iomap for framebuffer failed\n");
6886 -+ dev_err(info->device, "iomap for framebuffer failed\n");
6887 - goto err_iomap_1;
6888 - }
6889 -
6890 - par->mmio_base = pci_iomap(dev, 1, 0);
6891 - if (! par->mmio_base) {
6892 - rc = -ENOMEM;
6893 -- dev_err(info->dev, "iomap for MMIO failed\n");
6894 -+ dev_err(info->device, "iomap for MMIO failed\n");
6895 - goto err_iomap_2;
6896 - }
6897 -
6898 -@@ -714,7 +714,7 @@ static int __devinit vt8623_pci_probe(struct pci_dev *dev, const struct pci_devi
6899 - if ((16 <= memsize1) && (memsize1 <= 64) && (memsize1 == memsize2))
6900 - info->screen_size = memsize1 << 20;
6901 - else {
6902 -- dev_err(info->dev, "memory size detection failed (%x %x), suppose 16 MB\n", memsize1, memsize2);
6903 -+ dev_err(info->device, "memory size detection failed (%x %x), suppose 16 MB\n", memsize1, memsize2);
6904 - info->screen_size = 16 << 20;
6905 - }
6906 -
6907 -@@ -731,19 +731,19 @@ static int __devinit vt8623_pci_probe(struct pci_dev *dev, const struct pci_devi
6908 - rc = fb_find_mode(&(info->var), info, mode_option, NULL, 0, NULL, 8);
6909 - if (! ((rc == 1) || (rc == 2))) {
6910 - rc = -EINVAL;
6911 -- dev_err(info->dev, "mode %s not found\n", mode_option);
6912 -+ dev_err(info->device, "mode %s not found\n", mode_option);
6913 - goto err_find_mode;
6914 - }
6915 -
6916 - rc = fb_alloc_cmap(&info->cmap, 256, 0);
6917 - if (rc < 0) {
6918 -- dev_err(info->dev, "cannot allocate colormap\n");
6919 -+ dev_err(info->device, "cannot allocate colormap\n");
6920 - goto err_alloc_cmap;
6921 - }
6922 -
6923 - rc = register_framebuffer(info);
6924 - if (rc < 0) {
6925 -- dev_err(info->dev, "cannot register framebugger\n");
6926 -+ dev_err(info->device, "cannot register framebugger\n");
6927 - goto err_reg_fb;
6928 - }
6929 -
6930 -@@ -817,7 +817,7 @@ static int vt8623_pci_suspend(struct pci_dev* dev, pm_message_t state)
6931 - struct fb_info *info = pci_get_drvdata(dev);
6932 - struct vt8623fb_info *par = info->par;
6933 -
6934 -- dev_info(info->dev, "suspend\n");
6935 -+ dev_info(info->device, "suspend\n");
6936 -
6937 - acquire_console_sem();
6938 - mutex_lock(&(par->open_lock));
6939 -@@ -848,7 +848,7 @@ static int vt8623_pci_resume(struct pci_dev* dev)
6940 - struct fb_info *info = pci_get_drvdata(dev);
6941 - struct vt8623fb_info *par = info->par;
6942 -
6943 -- dev_info(info->dev, "resume\n");
6944 -+ dev_info(info->device, "resume\n");
6945 -
6946 - acquire_console_sem();
6947 - mutex_lock(&(par->open_lock));
6948 -diff --git a/fs/cifs/asn1.c b/fs/cifs/asn1.c
6949 -index f58e41d..4276546 100644
6950 ---- a/fs/cifs/asn1.c
6951 -+++ b/fs/cifs/asn1.c
6952 -@@ -400,7 +400,7 @@ asn1_oid_decode(struct asn1_ctx *ctx,
6953 - size = eoc - ctx->pointer + 1;
6954 -
6955 - /* first subid actually encodes first two subids */
6956 -- if (size < 2 || size > ULONG_MAX/sizeof(unsigned long))
6957 -+ if (size < 2 || size > UINT_MAX/sizeof(unsigned long))
6958 - return 0;
6959 -
6960 - *oid = kmalloc(size * sizeof(unsigned long), GFP_ATOMIC);
6961 -diff --git a/fs/cifs/cifs_spnego.c b/fs/cifs/cifs_spnego.c
6962 -index 7013aaf..2434ab0 100644
6963 ---- a/fs/cifs/cifs_spnego.c
6964 -+++ b/fs/cifs/cifs_spnego.c
6965 -@@ -66,8 +66,8 @@ struct key_type cifs_spnego_key_type = {
6966 - .describe = user_describe,
6967 - };
6968 -
6969 --#define MAX_VER_STR_LEN 9 /* length of longest version string e.g.
6970 -- strlen(";ver=0xFF") */
6971 -+#define MAX_VER_STR_LEN 8 /* length of longest version string e.g.
6972 -+ strlen("ver=0xFF") */
6973 - #define MAX_MECH_STR_LEN 13 /* length of longest security mechanism name, eg
6974 - in future could have strlen(";sec=ntlmsspi") */
6975 - #define MAX_IPV6_ADDR_LEN 42 /* eg FEDC:BA98:7654:3210:FEDC:BA98:7654:3210/60 */
6976 -@@ -81,11 +81,15 @@ cifs_get_spnego_key(struct cifsSesInfo *sesInfo)
6977 - struct key *spnego_key;
6978 - const char *hostname = server->hostname;
6979 -
6980 -- /* BB: come up with better scheme for determining length */
6981 -- /* length of fields (with semicolons): ver=0xyz ipv4= ipaddress host=
6982 -- hostname sec=mechanism uid=0x uid */
6983 -- desc_len = MAX_VER_STR_LEN + 5 + MAX_IPV6_ADDR_LEN + 1 + 6 +
6984 -- strlen(hostname) + MAX_MECH_STR_LEN + 8 + (sizeof(uid_t) * 2);
6985 -+ /* length of fields (with semicolons): ver=0xyz ip4=ipaddress
6986 -+ host=hostname sec=mechanism uid=0xFF user=username */
6987 -+ desc_len = MAX_VER_STR_LEN +
6988 -+ 6 /* len of "host=" */ + strlen(hostname) +
6989 -+ 5 /* len of ";ipv4=" */ + MAX_IPV6_ADDR_LEN +
6990 -+ MAX_MECH_STR_LEN +
6991 -+ 7 /* len of ";uid=0x" */ + (sizeof(uid_t) * 2) +
6992 -+ 6 /* len of ";user=" */ + strlen(sesInfo->userName) + 1;
6993 -+
6994 - spnego_key = ERR_PTR(-ENOMEM);
6995 - description = kzalloc(desc_len, GFP_KERNEL);
6996 - if (description == NULL)
6997 -diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
6998 -index 86b4d5f..6203609 100644
6999 ---- a/fs/cifs/cifsfs.c
7000 -+++ b/fs/cifs/cifsfs.c
7001 -@@ -175,6 +175,8 @@ out_no_root:
7002 - if (inode)
7003 - iput(inode);
7004 -
7005 -+ cifs_umount(sb, cifs_sb);
7006 -+
7007 - out_mount_failed:
7008 - if (cifs_sb) {
7009 - #ifdef CONFIG_CIFS_DFS_UPCALL
7010 -diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
7011 -index 2e904bd..227c553 100644
7012 ---- a/fs/cifs/inode.c
7013 -+++ b/fs/cifs/inode.c
7014 -@@ -649,6 +649,7 @@ struct inode *cifs_iget(struct super_block *sb, unsigned long ino)
7015 - inode->i_fop = &simple_dir_operations;
7016 - inode->i_uid = cifs_sb->mnt_uid;
7017 - inode->i_gid = cifs_sb->mnt_gid;
7018 -+ } else if (rc) {
7019 - _FreeXid(xid);
7020 - iget_failed(inode);
7021 - return ERR_PTR(rc);
7022 -diff --git a/include/asm-sparc64/futex.h b/include/asm-sparc64/futex.h
7023 -index d837893..47f9583 100644
7024 ---- a/include/asm-sparc64/futex.h
7025 -+++ b/include/asm-sparc64/futex.h
7026 -@@ -59,7 +59,7 @@ static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
7027 - __futex_cas_op("or\t%2, %4, %1", ret, oldval, uaddr, oparg);
7028 - break;
7029 - case FUTEX_OP_ANDN:
7030 -- __futex_cas_op("and\t%2, %4, %1", ret, oldval, uaddr, oparg);
7031 -+ __futex_cas_op("andn\t%2, %4, %1", ret, oldval, uaddr, oparg);
7032 - break;
7033 - case FUTEX_OP_XOR:
7034 - __futex_cas_op("xor\t%2, %4, %1", ret, oldval, uaddr, oparg);
7035 -diff --git a/include/asm-sparc64/irq.h b/include/asm-sparc64/irq.h
7036 -index 0bb9bf5..630eb4e 100644
7037 ---- a/include/asm-sparc64/irq.h
7038 -+++ b/include/asm-sparc64/irq.h
7039 -@@ -90,4 +90,8 @@ static inline unsigned long get_softint(void)
7040 - return retval;
7041 - }
7042 -
7043 -+extern void *hardirq_stack[NR_CPUS];
7044 -+extern void *softirq_stack[NR_CPUS];
7045 -+#define __ARCH_HAS_DO_SOFTIRQ
7046 -+
7047 - #endif
7048 -diff --git a/include/asm-sparc64/ptrace.h b/include/asm-sparc64/ptrace.h
7049 -index b163da7..4f18096 100644
7050 ---- a/include/asm-sparc64/ptrace.h
7051 -+++ b/include/asm-sparc64/ptrace.h
7052 -@@ -134,9 +134,9 @@ struct global_reg_snapshot {
7053 - unsigned long tnpc;
7054 - unsigned long o7;
7055 - unsigned long i7;
7056 -+ unsigned long rpc;
7057 - struct thread_info *thread;
7058 - unsigned long pad1;
7059 -- unsigned long pad2;
7060 - };
7061 -
7062 - #define __ARCH_WANT_COMPAT_SYS_PTRACE
7063 -@@ -314,9 +314,9 @@ extern void __show_regs(struct pt_regs *);
7064 - #define GR_SNAP_TNPC 0x10
7065 - #define GR_SNAP_O7 0x18
7066 - #define GR_SNAP_I7 0x20
7067 --#define GR_SNAP_THREAD 0x28
7068 --#define GR_SNAP_PAD1 0x30
7069 --#define GR_SNAP_PAD2 0x38
7070 -+#define GR_SNAP_RPC 0x28
7071 -+#define GR_SNAP_THREAD 0x30
7072 -+#define GR_SNAP_PAD1 0x38
7073 -
7074 - #endif /* __KERNEL__ */
7075 -
7076 -diff --git a/include/asm-x86/i387.h b/include/asm-x86/i387.h
7077 -index 37672f7..4b683af 100644
7078 ---- a/include/asm-x86/i387.h
7079 -+++ b/include/asm-x86/i387.h
7080 -@@ -13,6 +13,7 @@
7081 - #include <linux/sched.h>
7082 - #include <linux/kernel_stat.h>
7083 - #include <linux/regset.h>
7084 -+#include <linux/hardirq.h>
7085 - #include <asm/asm.h>
7086 - #include <asm/processor.h>
7087 - #include <asm/sigcontext.h>
7088 -@@ -290,6 +291,37 @@ static inline void kernel_fpu_end(void)
7089 - preempt_enable();
7090 - }
7091 -
7092 -+/*
7093 -+ * Some instructions like VIA's padlock instructions generate a spurious
7094 -+ * DNA fault but don't modify SSE registers. And these instructions
7095 -+ * get used from interrupt context aswell. To prevent these kernel instructions
7096 -+ * in interrupt context interact wrongly with other user/kernel fpu usage, we
7097 -+ * should use them only in the context of irq_ts_save/restore()
7098 -+ */
7099 -+static inline int irq_ts_save(void)
7100 -+{
7101 -+ /*
7102 -+ * If we are in process context, we are ok to take a spurious DNA fault.
7103 -+ * Otherwise, doing clts() in process context require pre-emption to
7104 -+ * be disabled or some heavy lifting like kernel_fpu_begin()
7105 -+ */
7106 -+ if (!in_interrupt())
7107 -+ return 0;
7108 -+
7109 -+ if (read_cr0() & X86_CR0_TS) {
7110 -+ clts();
7111 -+ return 1;
7112 -+ }
7113 -+
7114 -+ return 0;
7115 -+}
7116 -+
7117 -+static inline void irq_ts_restore(int TS_state)
7118 -+{
7119 -+ if (TS_state)
7120 -+ stts();
7121 -+}
7122 -+
7123 - #ifdef CONFIG_X86_64
7124 -
7125 - static inline void save_init_fpu(struct task_struct *tsk)
7126 -diff --git a/include/asm-x86/spinlock.h b/include/asm-x86/spinlock.h
7127 -index 21e89bf..bf2a3d2 100644
7128 ---- a/include/asm-x86/spinlock.h
7129 -+++ b/include/asm-x86/spinlock.h
7130 -@@ -65,7 +65,7 @@ static inline int __raw_spin_is_contended(raw_spinlock_t *lock)
7131 - {
7132 - int tmp = ACCESS_ONCE(lock->slock);
7133 -
7134 -- return (((tmp >> 8) & 0xff) - (tmp & 0xff)) > 1;
7135 -+ return (((tmp >> 8) - tmp) & 0xff) > 1;
7136 - }
7137 -
7138 - static __always_inline void __raw_spin_lock(raw_spinlock_t *lock)
7139 -@@ -129,7 +129,7 @@ static inline int __raw_spin_is_contended(raw_spinlock_t *lock)
7140 - {
7141 - int tmp = ACCESS_ONCE(lock->slock);
7142 -
7143 -- return (((tmp >> 16) & 0xffff) - (tmp & 0xffff)) > 1;
7144 -+ return (((tmp >> 16) - tmp) & 0xffff) > 1;
7145 - }
7146 -
7147 - static __always_inline void __raw_spin_lock(raw_spinlock_t *lock)
7148 -diff --git a/include/linux/usb/serial.h b/include/linux/usb/serial.h
7149 -index 8f891cb..4a6583d 100644
7150 ---- a/include/linux/usb/serial.h
7151 -+++ b/include/linux/usb/serial.h
7152 -@@ -17,7 +17,8 @@
7153 - #include <linux/mutex.h>
7154 -
7155 - #define SERIAL_TTY_MAJOR 188 /* Nice legal number now */
7156 --#define SERIAL_TTY_MINORS 255 /* loads of devices :) */
7157 -+#define SERIAL_TTY_MINORS 254 /* loads of devices :) */
7158 -+#define SERIAL_TTY_NO_MINOR 255 /* No minor was assigned */
7159 -
7160 - /* The maximum number of ports one device can grab at once */
7161 - #define MAX_NUM_PORTS 8
7162 -diff --git a/include/video/radeon.h b/include/video/radeon.h
7163 -index 83467e1..099ffa5 100644
7164 ---- a/include/video/radeon.h
7165 -+++ b/include/video/radeon.h
7166 -@@ -527,8 +527,9 @@
7167 -
7168 -
7169 - /* DSTCACHE_CTLSTAT bit constants */
7170 --#define RB2D_DC_FLUSH (3 << 0)
7171 --#define RB2D_DC_FLUSH_ALL 0xf
7172 -+#define RB2D_DC_FLUSH_2D (1 << 0)
7173 -+#define RB2D_DC_FREE_2D (1 << 2)
7174 -+#define RB2D_DC_FLUSH_ALL (RB2D_DC_FLUSH_2D | RB2D_DC_FREE_2D)
7175 - #define RB2D_DC_BUSY (1 << 31)
7176 -
7177 -
7178 -@@ -741,6 +742,10 @@
7179 - #define SOFT_RESET_RB (1 << 6)
7180 - #define SOFT_RESET_HDP (1 << 7)
7181 -
7182 -+/* WAIT_UNTIL bit constants */
7183 -+#define WAIT_DMA_GUI_IDLE (1 << 9)
7184 -+#define WAIT_2D_IDLECLEAN (1 << 16)
7185 -+
7186 - /* SURFACE_CNTL bit consants */
7187 - #define SURF_TRANSLATION_DIS (1 << 8)
7188 - #define NONSURF_AP0_SWP_16BPP (1 << 20)
7189 -diff --git a/kernel/posix-timers.c b/kernel/posix-timers.c
7190 -index dbd8398..0ffaeb0 100644
7191 ---- a/kernel/posix-timers.c
7192 -+++ b/kernel/posix-timers.c
7193 -@@ -289,21 +289,29 @@ void do_schedule_next_timer(struct siginfo *info)
7194 - else
7195 - schedule_next_timer(timr);
7196 -
7197 -- info->si_overrun = timr->it_overrun_last;
7198 -+ info->si_overrun += timr->it_overrun_last;
7199 - }
7200 -
7201 - if (timr)
7202 - unlock_timer(timr, flags);
7203 - }
7204 -
7205 --int posix_timer_event(struct k_itimer *timr,int si_private)
7206 -+int posix_timer_event(struct k_itimer *timr, int si_private)
7207 - {
7208 -- memset(&timr->sigq->info, 0, sizeof(siginfo_t));
7209 -+ /*
7210 -+ * FIXME: if ->sigq is queued we can race with
7211 -+ * dequeue_signal()->do_schedule_next_timer().
7212 -+ *
7213 -+ * If dequeue_signal() sees the "right" value of
7214 -+ * si_sys_private it calls do_schedule_next_timer().
7215 -+ * We re-queue ->sigq and drop ->it_lock().
7216 -+ * do_schedule_next_timer() locks the timer
7217 -+ * and re-schedules it while ->sigq is pending.
7218 -+ * Not really bad, but not that we want.
7219 -+ */
7220 - timr->sigq->info.si_sys_private = si_private;
7221 -- /* Send signal to the process that owns this timer.*/
7222 -
7223 - timr->sigq->info.si_signo = timr->it_sigev_signo;
7224 -- timr->sigq->info.si_errno = 0;
7225 - timr->sigq->info.si_code = SI_TIMER;
7226 - timr->sigq->info.si_tid = timr->it_id;
7227 - timr->sigq->info.si_value = timr->it_sigev_value;
7228 -@@ -435,6 +443,7 @@ static struct k_itimer * alloc_posix_timer(void)
7229 - kmem_cache_free(posix_timers_cache, tmr);
7230 - tmr = NULL;
7231 - }
7232 -+ memset(&tmr->sigq->info, 0, sizeof(siginfo_t));
7233 - return tmr;
7234 - }
7235 -
7236 -diff --git a/kernel/relay.c b/kernel/relay.c
7237 -index 7de644c..f5a5a96 100644
7238 ---- a/kernel/relay.c
7239 -+++ b/kernel/relay.c
7240 -@@ -832,6 +832,10 @@ static void relay_file_read_consume(struct rchan_buf *buf,
7241 - size_t n_subbufs = buf->chan->n_subbufs;
7242 - size_t read_subbuf;
7243 -
7244 -+ if (buf->subbufs_produced == buf->subbufs_consumed &&
7245 -+ buf->offset == buf->bytes_consumed)
7246 -+ return;
7247 -+
7248 - if (buf->bytes_consumed + bytes_consumed > subbuf_size) {
7249 - relay_subbufs_consumed(buf->chan, buf->cpu, 1);
7250 - buf->bytes_consumed = 0;
7251 -@@ -863,6 +867,8 @@ static int relay_file_read_avail(struct rchan_buf *buf, size_t read_pos)
7252 -
7253 - relay_file_read_consume(buf, read_pos, 0);
7254 -
7255 -+ consumed = buf->subbufs_consumed;
7256 -+
7257 - if (unlikely(buf->offset > subbuf_size)) {
7258 - if (produced == consumed)
7259 - return 0;
7260 -@@ -881,8 +887,12 @@ static int relay_file_read_avail(struct rchan_buf *buf, size_t read_pos)
7261 - if (consumed > produced)
7262 - produced += n_subbufs * subbuf_size;
7263 -
7264 -- if (consumed == produced)
7265 -+ if (consumed == produced) {
7266 -+ if (buf->offset == subbuf_size &&
7267 -+ buf->subbufs_produced > buf->subbufs_consumed)
7268 -+ return 1;
7269 - return 0;
7270 -+ }
7271 -
7272 - return 1;
7273 - }
7274 -diff --git a/kernel/signal.c b/kernel/signal.c
7275 -index 6c0958e..c5bf0c0 100644
7276 ---- a/kernel/signal.c
7277 -+++ b/kernel/signal.c
7278 -@@ -1319,6 +1319,7 @@ int send_sigqueue(struct sigqueue *q, struct task_struct *t, int group)
7279 - q->info.si_overrun++;
7280 - goto out;
7281 - }
7282 -+ q->info.si_overrun = 0;
7283 -
7284 - signalfd_notify(t, sig);
7285 - pending = group ? &t->signal->shared_pending : &t->pending;
7286 -diff --git a/lib/random32.c b/lib/random32.c
7287 -index ca87d86..217d5c4 100644
7288 ---- a/lib/random32.c
7289 -+++ b/lib/random32.c
7290 -@@ -56,23 +56,12 @@ static u32 __random32(struct rnd_state *state)
7291 - return (state->s1 ^ state->s2 ^ state->s3);
7292 - }
7293 -
7294 --static void __set_random32(struct rnd_state *state, unsigned long s)
7295 -+/*
7296 -+ * Handle minimum values for seeds
7297 -+ */
7298 -+static inline u32 __seed(u32 x, u32 m)
7299 - {
7300 -- if (s == 0)
7301 -- s = 1; /* default seed is 1 */
7302 --
7303 --#define LCG(n) (69069 * n)
7304 -- state->s1 = LCG(s);
7305 -- state->s2 = LCG(state->s1);
7306 -- state->s3 = LCG(state->s2);
7307 --
7308 -- /* "warm it up" */
7309 -- __random32(state);
7310 -- __random32(state);
7311 -- __random32(state);
7312 -- __random32(state);
7313 -- __random32(state);
7314 -- __random32(state);
7315 -+ return (x < m) ? x + m : x;
7316 - }
7317 -
7318 - /**
7319 -@@ -107,7 +96,7 @@ void srandom32(u32 entropy)
7320 - */
7321 - for_each_possible_cpu (i) {
7322 - struct rnd_state *state = &per_cpu(net_rand_state, i);
7323 -- __set_random32(state, state->s1 ^ entropy);
7324 -+ state->s1 = __seed(state->s1 ^ entropy, 1);
7325 - }
7326 - }
7327 - EXPORT_SYMBOL(srandom32);
7328 -@@ -122,7 +111,19 @@ static int __init random32_init(void)
7329 -
7330 - for_each_possible_cpu(i) {
7331 - struct rnd_state *state = &per_cpu(net_rand_state,i);
7332 -- __set_random32(state, i + jiffies);
7333 -+
7334 -+#define LCG(x) ((x) * 69069) /* super-duper LCG */
7335 -+ state->s1 = __seed(LCG(i + jiffies), 1);
7336 -+ state->s2 = __seed(LCG(state->s1), 7);
7337 -+ state->s3 = __seed(LCG(state->s2), 15);
7338 -+
7339 -+ /* "warm it up" */
7340 -+ __random32(state);
7341 -+ __random32(state);
7342 -+ __random32(state);
7343 -+ __random32(state);
7344 -+ __random32(state);
7345 -+ __random32(state);
7346 - }
7347 - return 0;
7348 - }
7349 -@@ -135,13 +136,18 @@ core_initcall(random32_init);
7350 - static int __init random32_reseed(void)
7351 - {
7352 - int i;
7353 -- unsigned long seed;
7354 -
7355 - for_each_possible_cpu(i) {
7356 - struct rnd_state *state = &per_cpu(net_rand_state,i);
7357 -+ u32 seeds[3];
7358 -+
7359 -+ get_random_bytes(&seeds, sizeof(seeds));
7360 -+ state->s1 = __seed(seeds[0], 1);
7361 -+ state->s2 = __seed(seeds[1], 7);
7362 -+ state->s3 = __seed(seeds[2], 15);
7363 -
7364 -- get_random_bytes(&seed, sizeof(seed));
7365 -- __set_random32(state, seed);
7366 -+ /* mix it in */
7367 -+ __random32(state);
7368 - }
7369 - return 0;
7370 - }
7371 -diff --git a/mm/memory.c b/mm/memory.c
7372 -index 2302d22..0755c52 100644
7373 ---- a/mm/memory.c
7374 -+++ b/mm/memory.c
7375 -@@ -2748,16 +2748,26 @@ int make_pages_present(unsigned long addr, unsigned long end)
7376 -
7377 - vma = find_vma(current->mm, addr);
7378 - if (!vma)
7379 -- return -1;
7380 -+ return -ENOMEM;
7381 - write = (vma->vm_flags & VM_WRITE) != 0;
7382 - BUG_ON(addr >= end);
7383 - BUG_ON(end > vma->vm_end);
7384 - len = DIV_ROUND_UP(end, PAGE_SIZE) - addr/PAGE_SIZE;
7385 - ret = get_user_pages(current, current->mm, addr,
7386 - len, write, 0, NULL, NULL);
7387 -- if (ret < 0)
7388 -+ if (ret < 0) {
7389 -+ /*
7390 -+ SUS require strange return value to mlock
7391 -+ - invalid addr generate to ENOMEM.
7392 -+ - out of memory should generate EAGAIN.
7393 -+ */
7394 -+ if (ret == -EFAULT)
7395 -+ ret = -ENOMEM;
7396 -+ else if (ret == -ENOMEM)
7397 -+ ret = -EAGAIN;
7398 - return ret;
7399 -- return ret == len ? 0 : -1;
7400 -+ }
7401 -+ return ret == len ? 0 : -ENOMEM;
7402 - }
7403 -
7404 - #if !defined(__HAVE_ARCH_GATE_AREA)
7405 -diff --git a/mm/mlock.c b/mm/mlock.c
7406 -index 7b26560..01fbe93 100644
7407 ---- a/mm/mlock.c
7408 -+++ b/mm/mlock.c
7409 -@@ -78,8 +78,6 @@ success:
7410 -
7411 - mm->locked_vm -= pages;
7412 - out:
7413 -- if (ret == -ENOMEM)
7414 -- ret = -EAGAIN;
7415 - return ret;
7416 - }
7417 -
7418 -diff --git a/net/dccp/proto.c b/net/dccp/proto.c
7419 -index 9dfe247..ebfd56b 100644
7420 ---- a/net/dccp/proto.c
7421 -+++ b/net/dccp/proto.c
7422 -@@ -476,6 +476,11 @@ static int dccp_setsockopt_change(struct sock *sk, int type,
7423 -
7424 - if (copy_from_user(&opt, optval, sizeof(opt)))
7425 - return -EFAULT;
7426 -+ /*
7427 -+ * rfc4340: 6.1. Change Options
7428 -+ */
7429 -+ if (opt.dccpsf_len < 1)
7430 -+ return -EINVAL;
7431 -
7432 - val = kmalloc(opt.dccpsf_len, GFP_KERNEL);
7433 - if (!val)
7434 -diff --git a/net/ipv4/ipvs/ip_vs_est.c b/net/ipv4/ipvs/ip_vs_est.c
7435 -index dfa0d71..f97ffc5 100644
7436 ---- a/net/ipv4/ipvs/ip_vs_est.c
7437 -+++ b/net/ipv4/ipvs/ip_vs_est.c
7438 -@@ -172,8 +172,11 @@ void ip_vs_kill_estimator(struct ip_vs_stats *stats)
7439 - kfree(est);
7440 - killed++;
7441 - }
7442 -- if (killed && est_list == NULL)
7443 -- del_timer_sync(&est_timer);
7444 -+ while (killed && !est_list && try_to_del_timer_sync(&est_timer) < 0) {
7445 -+ write_unlock_bh(&est_lock);
7446 -+ cpu_relax();
7447 -+ write_lock_bh(&est_lock);
7448 -+ }
7449 - write_unlock_bh(&est_lock);
7450 - }
7451 -
7452 -diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c
7453 -index d182a2a..3872d4d 100644
7454 ---- a/net/ipv4/syncookies.c
7455 -+++ b/net/ipv4/syncookies.c
7456 -@@ -301,6 +301,7 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb,
7457 - ireq->rmt_port = th->source;
7458 - ireq->loc_addr = ip_hdr(skb)->daddr;
7459 - ireq->rmt_addr = ip_hdr(skb)->saddr;
7460 -+ ireq->ecn_ok = 0;
7461 - ireq->snd_wscale = tcp_opt.snd_wscale;
7462 - ireq->rcv_wscale = tcp_opt.rcv_wscale;
7463 - ireq->sack_ok = tcp_opt.sack_ok;
7464 -diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
7465 -index 48cdce9..4019770 100644
7466 ---- a/net/ipv6/ip6_output.c
7467 -+++ b/net/ipv6/ip6_output.c
7468 -@@ -231,6 +231,10 @@ int ip6_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl,
7469 - skb_reset_network_header(skb);
7470 - hdr = ipv6_hdr(skb);
7471 -
7472 -+ /* Allow local fragmentation. */
7473 -+ if (ipfragok)
7474 -+ skb->local_df = 1;
7475 -+
7476 - /*
7477 - * Fill in the IPv6 header
7478 - */
7479 -diff --git a/net/ipv6/syncookies.c b/net/ipv6/syncookies.c
7480 -index 3ecc115..c8d84e3 100644
7481 ---- a/net/ipv6/syncookies.c
7482 -+++ b/net/ipv6/syncookies.c
7483 -@@ -223,6 +223,7 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb)
7484 -
7485 - req->expires = 0UL;
7486 - req->retrans = 0;
7487 -+ ireq->ecn_ok = 0;
7488 - ireq->snd_wscale = tcp_opt.snd_wscale;
7489 - ireq->rcv_wscale = tcp_opt.rcv_wscale;
7490 - ireq->sack_ok = tcp_opt.sack_ok;
7491 -diff --git a/sound/soc/fsl/fsl_dma.c b/sound/soc/fsl/fsl_dma.c
7492 -index 78de716..9596331 100644
7493 ---- a/sound/soc/fsl/fsl_dma.c
7494 -+++ b/sound/soc/fsl/fsl_dma.c
7495 -@@ -132,12 +132,17 @@ struct fsl_dma_private {
7496 - * Since each link descriptor has a 32-bit byte count field, we set
7497 - * period_bytes_max to the largest 32-bit number. We also have no maximum
7498 - * number of periods.
7499 -+ *
7500 -+ * Note that we specify SNDRV_PCM_INFO_JOINT_DUPLEX here, but only because a
7501 -+ * limitation in the SSI driver requires the sample rates for playback and
7502 -+ * capture to be the same.
7503 - */
7504 - static const struct snd_pcm_hardware fsl_dma_hardware = {
7505 -
7506 - .info = SNDRV_PCM_INFO_INTERLEAVED |
7507 - SNDRV_PCM_INFO_MMAP |
7508 -- SNDRV_PCM_INFO_MMAP_VALID,
7509 -+ SNDRV_PCM_INFO_MMAP_VALID |
7510 -+ SNDRV_PCM_INFO_JOINT_DUPLEX,
7511 - .formats = FSLDMA_PCM_FORMATS,
7512 - .rates = FSLDMA_PCM_RATES,
7513 - .rate_min = 5512,
7514 -@@ -322,14 +327,75 @@ static int fsl_dma_new(struct snd_card *card, struct snd_soc_codec_dai *dai,
7515 - * fsl_dma_open: open a new substream.
7516 - *
7517 - * Each substream has its own DMA buffer.
7518 -+ *
7519 -+ * ALSA divides the DMA buffer into N periods. We create NUM_DMA_LINKS link
7520 -+ * descriptors that ping-pong from one period to the next. For example, if
7521 -+ * there are six periods and two link descriptors, this is how they look
7522 -+ * before playback starts:
7523 -+ *
7524 -+ * The last link descriptor
7525 -+ * ____________ points back to the first
7526 -+ * | |
7527 -+ * V |
7528 -+ * ___ ___ |
7529 -+ * | |->| |->|
7530 -+ * |___| |___|
7531 -+ * | |
7532 -+ * | |
7533 -+ * V V
7534 -+ * _________________________________________
7535 -+ * | | | | | | | The DMA buffer is
7536 -+ * | | | | | | | divided into 6 parts
7537 -+ * |______|______|______|______|______|______|
7538 -+ *
7539 -+ * and here's how they look after the first period is finished playing:
7540 -+ *
7541 -+ * ____________
7542 -+ * | |
7543 -+ * V |
7544 -+ * ___ ___ |
7545 -+ * | |->| |->|
7546 -+ * |___| |___|
7547 -+ * | |
7548 -+ * |______________
7549 -+ * | |
7550 -+ * V V
7551 -+ * _________________________________________
7552 -+ * | | | | | | |
7553 -+ * | | | | | | |
7554 -+ * |______|______|______|______|______|______|
7555 -+ *
7556 -+ * The first link descriptor now points to the third period. The DMA
7557 -+ * controller is currently playing the second period. When it finishes, it
7558 -+ * will jump back to the first descriptor and play the third period.
7559 -+ *
7560 -+ * There are four reasons we do this:
7561 -+ *
7562 -+ * 1. The only way to get the DMA controller to automatically restart the
7563 -+ * transfer when it gets to the end of the buffer is to use chaining
7564 -+ * mode. Basic direct mode doesn't offer that feature.
7565 -+ * 2. We need to receive an interrupt at the end of every period. The DMA
7566 -+ * controller can generate an interrupt at the end of every link transfer
7567 -+ * (aka segment). Making each period into a DMA segment will give us the
7568 -+ * interrupts we need.
7569 -+ * 3. By creating only two link descriptors, regardless of the number of
7570 -+ * periods, we do not need to reallocate the link descriptors if the
7571 -+ * number of periods changes.
7572 -+ * 4. All of the audio data is still stored in a single, contiguous DMA
7573 -+ * buffer, which is what ALSA expects. We're just dividing it into
7574 -+ * contiguous parts, and creating a link descriptor for each one.
7575 - */
7576 - static int fsl_dma_open(struct snd_pcm_substream *substream)
7577 - {
7578 - struct snd_pcm_runtime *runtime = substream->runtime;
7579 - struct fsl_dma_private *dma_private;
7580 -+ struct ccsr_dma_channel __iomem *dma_channel;
7581 - dma_addr_t ld_buf_phys;
7582 -+ u64 temp_link; /* Pointer to next link descriptor */
7583 -+ u32 mr;
7584 - unsigned int channel;
7585 - int ret = 0;
7586 -+ unsigned int i;
7587 -
7588 - /*
7589 - * Reject any DMA buffer whose size is not a multiple of the period
7590 -@@ -390,68 +456,74 @@ static int fsl_dma_open(struct snd_pcm_substream *substream)
7591 - snd_soc_set_runtime_hwparams(substream, &fsl_dma_hardware);
7592 - runtime->private_data = dma_private;
7593 -
7594 -+ /* Program the fixed DMA controller parameters */
7595 -+
7596 -+ dma_channel = dma_private->dma_channel;
7597 -+
7598 -+ temp_link = dma_private->ld_buf_phys +
7599 -+ sizeof(struct fsl_dma_link_descriptor);
7600 -+
7601 -+ for (i = 0; i < NUM_DMA_LINKS; i++) {
7602 -+ struct fsl_dma_link_descriptor *link = &dma_private->link[i];
7603 -+
7604 -+ link->source_attr = cpu_to_be32(CCSR_DMA_ATR_SNOOP);
7605 -+ link->dest_attr = cpu_to_be32(CCSR_DMA_ATR_SNOOP);
7606 -+ link->next = cpu_to_be64(temp_link);
7607 -+
7608 -+ temp_link += sizeof(struct fsl_dma_link_descriptor);
7609 -+ }
7610 -+ /* The last link descriptor points to the first */
7611 -+ dma_private->link[i - 1].next = cpu_to_be64(dma_private->ld_buf_phys);
7612 -+
7613 -+ /* Tell the DMA controller where the first link descriptor is */
7614 -+ out_be32(&dma_channel->clndar,
7615 -+ CCSR_DMA_CLNDAR_ADDR(dma_private->ld_buf_phys));
7616 -+ out_be32(&dma_channel->eclndar,
7617 -+ CCSR_DMA_ECLNDAR_ADDR(dma_private->ld_buf_phys));
7618 -+
7619 -+ /* The manual says the BCR must be clear before enabling EMP */
7620 -+ out_be32(&dma_channel->bcr, 0);
7621 -+
7622 -+ /*
7623 -+ * Program the mode register for interrupts, external master control,
7624 -+ * and source/destination hold. Also clear the Channel Abort bit.
7625 -+ */
7626 -+ mr = in_be32(&dma_channel->mr) &
7627 -+ ~(CCSR_DMA_MR_CA | CCSR_DMA_MR_DAHE | CCSR_DMA_MR_SAHE);
7628 -+
7629 -+ /*
7630 -+ * We want External Master Start and External Master Pause enabled,
7631 -+ * because the SSI is controlling the DMA controller. We want the DMA
7632 -+ * controller to be set up in advance, and then we signal only the SSI
7633 -+ * to start transferring.
7634 -+ *
7635 -+ * We want End-Of-Segment Interrupts enabled, because this will generate
7636 -+ * an interrupt at the end of each segment (each link descriptor
7637 -+ * represents one segment). Each DMA segment is the same thing as an
7638 -+ * ALSA period, so this is how we get an interrupt at the end of every
7639 -+ * period.
7640 -+ *
7641 -+ * We want Error Interrupt enabled, so that we can get an error if
7642 -+ * the DMA controller is mis-programmed somehow.
7643 -+ */
7644 -+ mr |= CCSR_DMA_MR_EOSIE | CCSR_DMA_MR_EIE | CCSR_DMA_MR_EMP_EN |
7645 -+ CCSR_DMA_MR_EMS_EN;
7646 -+
7647 -+ /* For playback, we want the destination address to be held. For
7648 -+ capture, set the source address to be held. */
7649 -+ mr |= (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) ?
7650 -+ CCSR_DMA_MR_DAHE : CCSR_DMA_MR_SAHE;
7651 -+
7652 -+ out_be32(&dma_channel->mr, mr);
7653 -+
7654 - return 0;
7655 - }
7656 -
7657 - /**
7658 -- * fsl_dma_hw_params: allocate the DMA buffer and the DMA link descriptors.
7659 -+ * fsl_dma_hw_params: continue initializing the DMA links
7660 - *
7661 -- * ALSA divides the DMA buffer into N periods. We create NUM_DMA_LINKS link
7662 -- * descriptors that ping-pong from one period to the next. For example, if
7663 -- * there are six periods and two link descriptors, this is how they look
7664 -- * before playback starts:
7665 -- *
7666 -- * The last link descriptor
7667 -- * ____________ points back to the first
7668 -- * | |
7669 -- * V |
7670 -- * ___ ___ |
7671 -- * | |->| |->|
7672 -- * |___| |___|
7673 -- * | |
7674 -- * | |
7675 -- * V V
7676 -- * _________________________________________
7677 -- * | | | | | | | The DMA buffer is
7678 -- * | | | | | | | divided into 6 parts
7679 -- * |______|______|______|______|______|______|
7680 -- *
7681 -- * and here's how they look after the first period is finished playing:
7682 -- *
7683 -- * ____________
7684 -- * | |
7685 -- * V |
7686 -- * ___ ___ |
7687 -- * | |->| |->|
7688 -- * |___| |___|
7689 -- * | |
7690 -- * |______________
7691 -- * | |
7692 -- * V V
7693 -- * _________________________________________
7694 -- * | | | | | | |
7695 -- * | | | | | | |
7696 -- * |______|______|______|______|______|______|
7697 -- *
7698 -- * The first link descriptor now points to the third period. The DMA
7699 -- * controller is currently playing the second period. When it finishes, it
7700 -- * will jump back to the first descriptor and play the third period.
7701 -- *
7702 -- * There are four reasons we do this:
7703 -- *
7704 -- * 1. The only way to get the DMA controller to automatically restart the
7705 -- * transfer when it gets to the end of the buffer is to use chaining
7706 -- * mode. Basic direct mode doesn't offer that feature.
7707 -- * 2. We need to receive an interrupt at the end of every period. The DMA
7708 -- * controller can generate an interrupt at the end of every link transfer
7709 -- * (aka segment). Making each period into a DMA segment will give us the
7710 -- * interrupts we need.
7711 -- * 3. By creating only two link descriptors, regardless of the number of
7712 -- * periods, we do not need to reallocate the link descriptors if the
7713 -- * number of periods changes.
7714 -- * 4. All of the audio data is still stored in a single, contiguous DMA
7715 -- * buffer, which is what ALSA expects. We're just dividing it into
7716 -- * contiguous parts, and creating a link descriptor for each one.
7717 -+ * This function obtains hardware parameters about the opened stream and
7718 -+ * programs the DMA controller accordingly.
7719 - *
7720 - * Note that due to a quirk of the SSI's STX register, the target address
7721 - * for the DMA operations depends on the sample size. So we don't program
7722 -@@ -463,11 +535,8 @@ static int fsl_dma_hw_params(struct snd_pcm_substream *substream,
7723 - {
7724 - struct snd_pcm_runtime *runtime = substream->runtime;
7725 - struct fsl_dma_private *dma_private = runtime->private_data;
7726 -- struct ccsr_dma_channel __iomem *dma_channel = dma_private->dma_channel;
7727 -
7728 - dma_addr_t temp_addr; /* Pointer to next period */
7729 -- u64 temp_link; /* Pointer to next link descriptor */
7730 -- u32 mr; /* Temporary variable for MR register */
7731 -
7732 - unsigned int i;
7733 -
7734 -@@ -485,8 +554,6 @@ static int fsl_dma_hw_params(struct snd_pcm_substream *substream,
7735 - dma_private->dma_buf_next = dma_private->dma_buf_phys;
7736 -
7737 - /*
7738 -- * Initialize each link descriptor.
7739 -- *
7740 - * The actual address in STX0 (destination for playback, source for
7741 - * capture) is based on the sample size, but we don't know the sample
7742 - * size in this function, so we'll have to adjust that later. See
7743 -@@ -502,16 +569,11 @@ static int fsl_dma_hw_params(struct snd_pcm_substream *substream,
7744 - * buffer itself.
7745 - */
7746 - temp_addr = substream->dma_buffer.addr;
7747 -- temp_link = dma_private->ld_buf_phys +
7748 -- sizeof(struct fsl_dma_link_descriptor);
7749 -
7750 - for (i = 0; i < NUM_DMA_LINKS; i++) {
7751 - struct fsl_dma_link_descriptor *link = &dma_private->link[i];
7752 -
7753 - link->count = cpu_to_be32(period_size);
7754 -- link->source_attr = cpu_to_be32(CCSR_DMA_ATR_SNOOP);
7755 -- link->dest_attr = cpu_to_be32(CCSR_DMA_ATR_SNOOP);
7756 -- link->next = cpu_to_be64(temp_link);
7757 -
7758 - if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
7759 - link->source_addr = cpu_to_be32(temp_addr);
7760 -@@ -519,51 +581,7 @@ static int fsl_dma_hw_params(struct snd_pcm_substream *substream,
7761 - link->dest_addr = cpu_to_be32(temp_addr);
7762 -
7763 - temp_addr += period_size;
7764 -- temp_link += sizeof(struct fsl_dma_link_descriptor);
7765 - }
7766 -- /* The last link descriptor points to the first */
7767 -- dma_private->link[i - 1].next = cpu_to_be64(dma_private->ld_buf_phys);
7768 --
7769 -- /* Tell the DMA controller where the first link descriptor is */
7770 -- out_be32(&dma_channel->clndar,
7771 -- CCSR_DMA_CLNDAR_ADDR(dma_private->ld_buf_phys));
7772 -- out_be32(&dma_channel->eclndar,
7773 -- CCSR_DMA_ECLNDAR_ADDR(dma_private->ld_buf_phys));
7774 --
7775 -- /* The manual says the BCR must be clear before enabling EMP */
7776 -- out_be32(&dma_channel->bcr, 0);
7777 --
7778 -- /*
7779 -- * Program the mode register for interrupts, external master control,
7780 -- * and source/destination hold. Also clear the Channel Abort bit.
7781 -- */
7782 -- mr = in_be32(&dma_channel->mr) &
7783 -- ~(CCSR_DMA_MR_CA | CCSR_DMA_MR_DAHE | CCSR_DMA_MR_SAHE);
7784 --
7785 -- /*
7786 -- * We want External Master Start and External Master Pause enabled,
7787 -- * because the SSI is controlling the DMA controller. We want the DMA
7788 -- * controller to be set up in advance, and then we signal only the SSI
7789 -- * to start transfering.
7790 -- *
7791 -- * We want End-Of-Segment Interrupts enabled, because this will generate
7792 -- * an interrupt at the end of each segment (each link descriptor
7793 -- * represents one segment). Each DMA segment is the same thing as an
7794 -- * ALSA period, so this is how we get an interrupt at the end of every
7795 -- * period.
7796 -- *
7797 -- * We want Error Interrupt enabled, so that we can get an error if
7798 -- * the DMA controller is mis-programmed somehow.
7799 -- */
7800 -- mr |= CCSR_DMA_MR_EOSIE | CCSR_DMA_MR_EIE | CCSR_DMA_MR_EMP_EN |
7801 -- CCSR_DMA_MR_EMS_EN;
7802 --
7803 -- /* For playback, we want the destination address to be held. For
7804 -- capture, set the source address to be held. */
7805 -- mr |= (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) ?
7806 -- CCSR_DMA_MR_DAHE : CCSR_DMA_MR_SAHE;
7807 --
7808 -- out_be32(&dma_channel->mr, mr);
7809 -
7810 - return 0;
7811 - }
7812 -diff --git a/sound/soc/fsl/fsl_ssi.c b/sound/soc/fsl/fsl_ssi.c
7813 -index f588545..94f8567 100644
7814 ---- a/sound/soc/fsl/fsl_ssi.c
7815 -+++ b/sound/soc/fsl/fsl_ssi.c
7816 -@@ -67,6 +67,8 @@
7817 - * @ssi: pointer to the SSI's registers
7818 - * @ssi_phys: physical address of the SSI registers
7819 - * @irq: IRQ of this SSI
7820 -+ * @first_stream: pointer to the stream that was opened first
7821 -+ * @second_stream: pointer to second stream
7822 - * @dev: struct device pointer
7823 - * @playback: the number of playback streams opened
7824 - * @capture: the number of capture streams opened
7825 -@@ -79,6 +81,8 @@ struct fsl_ssi_private {
7826 - struct ccsr_ssi __iomem *ssi;
7827 - dma_addr_t ssi_phys;
7828 - unsigned int irq;
7829 -+ struct snd_pcm_substream *first_stream;
7830 -+ struct snd_pcm_substream *second_stream;
7831 - struct device *dev;
7832 - unsigned int playback;
7833 - unsigned int capture;
7834 -@@ -342,6 +346,49 @@ static int fsl_ssi_startup(struct snd_pcm_substream *substream)
7835 - */
7836 - }
7837 -
7838 -+ if (!ssi_private->first_stream)
7839 -+ ssi_private->first_stream = substream;
7840 -+ else {
7841 -+ /* This is the second stream open, so we need to impose sample
7842 -+ * rate and maybe sample size constraints. Note that this can
7843 -+ * cause a race condition if the second stream is opened before
7844 -+ * the first stream is fully initialized.
7845 -+ *
7846 -+ * We provide some protection by checking to make sure the first
7847 -+ * stream is initialized, but it's not perfect. ALSA sometimes
7848 -+ * re-initializes the driver with a different sample rate or
7849 -+ * size. If the second stream is opened before the first stream
7850 -+ * has received its final parameters, then the second stream may
7851 -+ * be constrained to the wrong sample rate or size.
7852 -+ *
7853 -+ * FIXME: This code does not handle opening and closing streams
7854 -+ * repeatedly. If you open two streams and then close the first
7855 -+ * one, you may not be able to open another stream until you
7856 -+ * close the second one as well.
7857 -+ */
7858 -+ struct snd_pcm_runtime *first_runtime =
7859 -+ ssi_private->first_stream->runtime;
7860 -+
7861 -+ if (!first_runtime->rate || !first_runtime->sample_bits) {
7862 -+ dev_err(substream->pcm->card->dev,
7863 -+ "set sample rate and size in %s stream first\n",
7864 -+ substream->stream == SNDRV_PCM_STREAM_PLAYBACK
7865 -+ ? "capture" : "playback");
7866 -+ return -EAGAIN;
7867 -+ }
7868 -+
7869 -+ snd_pcm_hw_constraint_minmax(substream->runtime,
7870 -+ SNDRV_PCM_HW_PARAM_RATE,
7871 -+ first_runtime->rate, first_runtime->rate);
7872 -+
7873 -+ snd_pcm_hw_constraint_minmax(substream->runtime,
7874 -+ SNDRV_PCM_HW_PARAM_SAMPLE_BITS,
7875 -+ first_runtime->sample_bits,
7876 -+ first_runtime->sample_bits);
7877 -+
7878 -+ ssi_private->second_stream = substream;
7879 -+ }
7880 -+
7881 - if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
7882 - ssi_private->playback++;
7883 -
7884 -@@ -371,18 +418,16 @@ static int fsl_ssi_prepare(struct snd_pcm_substream *substream)
7885 - struct fsl_ssi_private *ssi_private = rtd->dai->cpu_dai->private_data;
7886 -
7887 - struct ccsr_ssi __iomem *ssi = ssi_private->ssi;
7888 -- u32 wl;
7889 -
7890 -- wl = CCSR_SSI_SxCCR_WL(snd_pcm_format_width(runtime->format));
7891 -+ if (substream == ssi_private->first_stream) {
7892 -+ u32 wl;
7893 -
7894 -- clrbits32(&ssi->scr, CCSR_SSI_SCR_SSIEN);
7895 -+ /* The SSI should always be disabled at this points (SSIEN=0) */
7896 -+ wl = CCSR_SSI_SxCCR_WL(snd_pcm_format_width(runtime->format));
7897 -
7898 -- if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
7899 -+ /* In synchronous mode, the SSI uses STCCR for capture */
7900 - clrsetbits_be32(&ssi->stccr, CCSR_SSI_SxCCR_WL_MASK, wl);
7901 -- else
7902 -- clrsetbits_be32(&ssi->srccr, CCSR_SSI_SxCCR_WL_MASK, wl);
7903 --
7904 -- setbits32(&ssi->scr, CCSR_SSI_SCR_SSIEN);
7905 -+ }
7906 -
7907 - return 0;
7908 - }
7909 -@@ -407,9 +452,13 @@ static int fsl_ssi_trigger(struct snd_pcm_substream *substream, int cmd)
7910 - case SNDRV_PCM_TRIGGER_RESUME:
7911 - case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
7912 - if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
7913 -- setbits32(&ssi->scr, CCSR_SSI_SCR_TE);
7914 -+ clrbits32(&ssi->scr, CCSR_SSI_SCR_SSIEN);
7915 -+ setbits32(&ssi->scr,
7916 -+ CCSR_SSI_SCR_SSIEN | CCSR_SSI_SCR_TE);
7917 - } else {
7918 -- setbits32(&ssi->scr, CCSR_SSI_SCR_RE);
7919 -+ clrbits32(&ssi->scr, CCSR_SSI_SCR_SSIEN);
7920 -+ setbits32(&ssi->scr,
7921 -+ CCSR_SSI_SCR_SSIEN | CCSR_SSI_SCR_RE);
7922 -
7923 - /*
7924 - * I think we need this delay to allow time for the SSI
7925 -@@ -452,6 +501,11 @@ static void fsl_ssi_shutdown(struct snd_pcm_substream *substream)
7926 - if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
7927 - ssi_private->capture--;
7928 -
7929 -+ if (ssi_private->first_stream == substream)
7930 -+ ssi_private->first_stream = ssi_private->second_stream;
7931 -+
7932 -+ ssi_private->second_stream = NULL;
7933 -+
7934 - /*
7935 - * If this is the last active substream, disable the SSI and release
7936 - * the IRQ.
7937
7938 Deleted: genpatches-2.6/trunk/2.6.27/1003_linux-2.6.26.4.patch
7939 ===================================================================
7940 --- genpatches-2.6/trunk/2.6.27/1003_linux-2.6.26.4.patch 2008-10-10 23:58:26 UTC (rev 1350)
7941 +++ genpatches-2.6/trunk/2.6.27/1003_linux-2.6.26.4.patch 2008-10-11 00:00:47 UTC (rev 1351)
7942 @@ -1,1952 +0,0 @@
7943 -diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c
7944 -index e289a98..387d3f6 100644
7945 ---- a/arch/sparc64/mm/init.c
7946 -+++ b/arch/sparc64/mm/init.c
7947 -@@ -842,6 +842,9 @@ static unsigned long nid_range(unsigned long start, unsigned long end,
7948 - start += PAGE_SIZE;
7949 - }
7950 -
7951 -+ if (start > end)
7952 -+ start = end;
7953 -+
7954 - return start;
7955 - }
7956 - #else
7957 -@@ -1769,8 +1772,7 @@ void __init paging_init(void)
7958 -
7959 - find_ramdisk(phys_base);
7960 -
7961 -- if (cmdline_memory_size)
7962 -- lmb_enforce_memory_limit(phys_base + cmdline_memory_size);
7963 -+ lmb_enforce_memory_limit(cmdline_memory_size);
7964 -
7965 - lmb_analyze();
7966 - lmb_dump_all();
7967 -@@ -2007,6 +2009,15 @@ void __init mem_init(void)
7968 - void free_initmem(void)
7969 - {
7970 - unsigned long addr, initend;
7971 -+ int do_free = 1;
7972 -+
7973 -+ /* If the physical memory maps were trimmed by kernel command
7974 -+ * line options, don't even try freeing this initmem stuff up.
7975 -+ * The kernel image could have been in the trimmed out region
7976 -+ * and if so the freeing below will free invalid page structs.
7977 -+ */
7978 -+ if (cmdline_memory_size)
7979 -+ do_free = 0;
7980 -
7981 - /*
7982 - * The init section is aligned to 8k in vmlinux.lds. Page align for >8k pagesizes.
7983 -@@ -2021,13 +2032,16 @@ void free_initmem(void)
7984 - ((unsigned long) __va(kern_base)) -
7985 - ((unsigned long) KERNBASE));
7986 - memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE);
7987 -- p = virt_to_page(page);
7988 -
7989 -- ClearPageReserved(p);
7990 -- init_page_count(p);
7991 -- __free_page(p);
7992 -- num_physpages++;
7993 -- totalram_pages++;
7994 -+ if (do_free) {
7995 -+ p = virt_to_page(page);
7996 -+
7997 -+ ClearPageReserved(p);
7998 -+ init_page_count(p);
7999 -+ __free_page(p);
8000 -+ num_physpages++;
8001 -+ totalram_pages++;
8002 -+ }
8003 - }
8004 - }
8005 -
8006 -diff --git a/arch/x86/kernel/cpu/cyrix.c b/arch/x86/kernel/cpu/cyrix.c
8007 -index 3fd7a67..e710a21 100644
8008 ---- a/arch/x86/kernel/cpu/cyrix.c
8009 -+++ b/arch/x86/kernel/cpu/cyrix.c
8010 -@@ -134,23 +134,6 @@ static void __cpuinit set_cx86_memwb(void)
8011 - setCx86(CX86_CCR2, getCx86(CX86_CCR2) | 0x14);
8012 - }
8013 -
8014 --static void __cpuinit set_cx86_inc(void)
8015 --{
8016 -- unsigned char ccr3;
8017 --
8018 -- printk(KERN_INFO "Enable Incrementor on Cyrix/NSC processor.\n");
8019 --
8020 -- ccr3 = getCx86(CX86_CCR3);
8021 -- setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10); /* enable MAPEN */
8022 -- /* PCR1 -- Performance Control */
8023 -- /* Incrementor on, whatever that is */
8024 -- setCx86(CX86_PCR1, getCx86(CX86_PCR1) | 0x02);
8025 -- /* PCR0 -- Performance Control */
8026 -- /* Incrementor Margin 10 */
8027 -- setCx86(CX86_PCR0, getCx86(CX86_PCR0) | 0x04);
8028 -- setCx86(CX86_CCR3, ccr3); /* disable MAPEN */
8029 --}
8030 --
8031 - /*
8032 - * Configure later MediaGX and/or Geode processor.
8033 - */
8034 -@@ -174,7 +157,6 @@ static void __cpuinit geode_configure(void)
8035 -
8036 - set_cx86_memwb();
8037 - set_cx86_reorder();
8038 -- set_cx86_inc();
8039 -
8040 - local_irq_restore(flags);
8041 - }
8042 -diff --git a/arch/x86/kernel/cpu/mtrr/generic.c b/arch/x86/kernel/cpu/mtrr/generic.c
8043 -index 75b14b1..745b974 100644
8044 ---- a/arch/x86/kernel/cpu/mtrr/generic.c
8045 -+++ b/arch/x86/kernel/cpu/mtrr/generic.c
8046 -@@ -365,6 +365,7 @@ static void generic_get_mtrr(unsigned int reg, unsigned long *base,
8047 - unsigned long *size, mtrr_type *type)
8048 - {
8049 - unsigned int mask_lo, mask_hi, base_lo, base_hi;
8050 -+ unsigned int tmp, hi;
8051 -
8052 - rdmsr(MTRRphysMask_MSR(reg), mask_lo, mask_hi);
8053 - if ((mask_lo & 0x800) == 0) {
8054 -@@ -378,8 +379,23 @@ static void generic_get_mtrr(unsigned int reg, unsigned long *base,
8055 - rdmsr(MTRRphysBase_MSR(reg), base_lo, base_hi);
8056 -
8057 - /* Work out the shifted address mask. */
8058 -- mask_lo = size_or_mask | mask_hi << (32 - PAGE_SHIFT)
8059 -- | mask_lo >> PAGE_SHIFT;
8060 -+ tmp = mask_hi << (32 - PAGE_SHIFT) | mask_lo >> PAGE_SHIFT;
8061 -+ mask_lo = size_or_mask | tmp;
8062 -+ /* Expand tmp with high bits to all 1s*/
8063 -+ hi = fls(tmp);
8064 -+ if (hi > 0) {
8065 -+ tmp |= ~((1<<(hi - 1)) - 1);
8066 -+
8067 -+ if (tmp != mask_lo) {
8068 -+ static int once = 1;
8069 -+
8070 -+ if (once) {
8071 -+ printk(KERN_INFO "mtrr: your BIOS has set up an incorrect mask, fixing it up.\n");
8072 -+ once = 0;
8073 -+ }
8074 -+ mask_lo = tmp;
8075 -+ }
8076 -+ }
8077 -
8078 - /* This works correctly if size is a power of two, i.e. a
8079 - contiguous range. */
8080 -diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
8081 -index 934c7b6..d333a74 100644
8082 ---- a/arch/x86/kvm/paging_tmpl.h
8083 -+++ b/arch/x86/kvm/paging_tmpl.h
8084 -@@ -343,7 +343,7 @@ static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
8085 - shadow_addr = __pa(shadow_page->spt);
8086 - shadow_pte = shadow_addr | PT_PRESENT_MASK | PT_ACCESSED_MASK
8087 - | PT_WRITABLE_MASK | PT_USER_MASK;
8088 -- *shadow_ent = shadow_pte;
8089 -+ set_shadow_pte(shadow_ent, shadow_pte);
8090 - }
8091 -
8092 - mmu_set_spte(vcpu, shadow_ent, access, walker->pte_access & access,
8093 -diff --git a/crypto/authenc.c b/crypto/authenc.c
8094 -index 4b22676..fd9f06c 100644
8095 ---- a/crypto/authenc.c
8096 -+++ b/crypto/authenc.c
8097 -@@ -174,8 +174,9 @@ static int crypto_authenc_genicv(struct aead_request *req, u8 *iv,
8098 - static void crypto_authenc_encrypt_done(struct crypto_async_request *req,
8099 - int err)
8100 - {
8101 -+ struct aead_request *areq = req->data;
8102 -+
8103 - if (!err) {
8104 -- struct aead_request *areq = req->data;
8105 - struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
8106 - struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc);
8107 - struct ablkcipher_request *abreq = aead_request_ctx(areq);
8108 -@@ -185,7 +186,7 @@ static void crypto_authenc_encrypt_done(struct crypto_async_request *req,
8109 - err = crypto_authenc_genicv(areq, iv, 0);
8110 - }
8111 -
8112 -- aead_request_complete(req->data, err);
8113 -+ aead_request_complete(areq, err);
8114 - }
8115 -
8116 - static int crypto_authenc_encrypt(struct aead_request *req)
8117 -@@ -216,14 +217,15 @@ static int crypto_authenc_encrypt(struct aead_request *req)
8118 - static void crypto_authenc_givencrypt_done(struct crypto_async_request *req,
8119 - int err)
8120 - {
8121 -+ struct aead_request *areq = req->data;
8122 -+
8123 - if (!err) {
8124 -- struct aead_request *areq = req->data;
8125 - struct skcipher_givcrypt_request *greq = aead_request_ctx(areq);
8126 -
8127 - err = crypto_authenc_genicv(areq, greq->giv, 0);
8128 - }
8129 -
8130 -- aead_request_complete(req->data, err);
8131 -+ aead_request_complete(areq, err);
8132 - }
8133 -
8134 - static int crypto_authenc_givencrypt(struct aead_givcrypt_request *req)
8135 -diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c
8136 -index ad169ff..80c655f 100644
8137 ---- a/drivers/ata/sata_mv.c
8138 -+++ b/drivers/ata/sata_mv.c
8139 -@@ -1134,30 +1134,16 @@ static int mv_qc_defer(struct ata_queued_cmd *qc)
8140 - if (ap->nr_active_links == 0)
8141 - return 0;
8142 -
8143 -- if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
8144 -- /*
8145 -- * The port is operating in host queuing mode (EDMA).
8146 -- * It can accomodate a new qc if the qc protocol
8147 -- * is compatible with the current host queue mode.
8148 -- */
8149 -- if (pp->pp_flags & MV_PP_FLAG_NCQ_EN) {
8150 -- /*
8151 -- * The host queue (EDMA) is in NCQ mode.
8152 -- * If the new qc is also an NCQ command,
8153 -- * then allow the new qc.
8154 -- */
8155 -- if (qc->tf.protocol == ATA_PROT_NCQ)
8156 -- return 0;
8157 -- } else {
8158 -- /*
8159 -- * The host queue (EDMA) is in non-NCQ, DMA mode.
8160 -- * If the new qc is also a non-NCQ, DMA command,
8161 -- * then allow the new qc.
8162 -- */
8163 -- if (qc->tf.protocol == ATA_PROT_DMA)
8164 -- return 0;
8165 -- }
8166 -- }
8167 -+ /*
8168 -+ * The port is operating in host queuing mode (EDMA) with NCQ
8169 -+ * enabled, allow multiple NCQ commands. EDMA also allows
8170 -+ * queueing multiple DMA commands but libata core currently
8171 -+ * doesn't allow it.
8172 -+ */
8173 -+ if ((pp->pp_flags & MV_PP_FLAG_EDMA_EN) &&
8174 -+ (pp->pp_flags & MV_PP_FLAG_NCQ_EN) && ata_is_ncq(qc->tf.protocol))
8175 -+ return 0;
8176 -+
8177 - return ATA_DEFER_PORT;
8178 - }
8179 -
8180 -diff --git a/drivers/char/random.c b/drivers/char/random.c
8181 -index 0cf98bd..71320d2 100644
8182 ---- a/drivers/char/random.c
8183 -+++ b/drivers/char/random.c
8184 -@@ -406,7 +406,7 @@ struct entropy_store {
8185 - /* read-write data: */
8186 - spinlock_t lock;
8187 - unsigned add_ptr;
8188 -- int entropy_count;
8189 -+ int entropy_count; /* Must at no time exceed ->POOLBITS! */
8190 - int input_rotate;
8191 - };
8192 -
8193 -@@ -519,6 +519,7 @@ static void mix_pool_bytes(struct entropy_store *r, const void *in, int bytes)
8194 - static void credit_entropy_bits(struct entropy_store *r, int nbits)
8195 - {
8196 - unsigned long flags;
8197 -+ int entropy_count;
8198 -
8199 - if (!nbits)
8200 - return;
8201 -@@ -526,20 +527,20 @@ static void credit_entropy_bits(struct entropy_store *r, int nbits)
8202 - spin_lock_irqsave(&r->lock, flags);
8203 -
8204 - DEBUG_ENT("added %d entropy credits to %s\n", nbits, r->name);
8205 -- r->entropy_count += nbits;
8206 -- if (r->entropy_count < 0) {
8207 -+ entropy_count = r->entropy_count;
8208 -+ entropy_count += nbits;
8209 -+ if (entropy_count < 0) {
8210 - DEBUG_ENT("negative entropy/overflow\n");
8211 -- r->entropy_count = 0;
8212 -- } else if (r->entropy_count > r->poolinfo->POOLBITS)
8213 -- r->entropy_count = r->poolinfo->POOLBITS;
8214 -+ entropy_count = 0;
8215 -+ } else if (entropy_count > r->poolinfo->POOLBITS)
8216 -+ entropy_count = r->poolinfo->POOLBITS;
8217 -+ r->entropy_count = entropy_count;
8218 -
8219 - /* should we wake readers? */
8220 -- if (r == &input_pool &&
8221 -- r->entropy_count >= random_read_wakeup_thresh) {
8222 -+ if (r == &input_pool && entropy_count >= random_read_wakeup_thresh) {
8223 - wake_up_interruptible(&random_read_wait);
8224 - kill_fasync(&fasync, SIGIO, POLL_IN);
8225 - }
8226 --
8227 - spin_unlock_irqrestore(&r->lock, flags);
8228 - }
8229 -
8230 -diff --git a/drivers/misc/eeepc-laptop.c b/drivers/misc/eeepc-laptop.c
8231 -index 6d72760..3f3abf9 100644
8232 ---- a/drivers/misc/eeepc-laptop.c
8233 -+++ b/drivers/misc/eeepc-laptop.c
8234 -@@ -553,9 +553,9 @@ static void eeepc_hwmon_exit(void)
8235 - hwmon = eeepc_hwmon_device;
8236 - if (!hwmon)
8237 - return ;
8238 -- hwmon_device_unregister(hwmon);
8239 - sysfs_remove_group(&hwmon->kobj,
8240 - &hwmon_attribute_group);
8241 -+ hwmon_device_unregister(hwmon);
8242 - eeepc_hwmon_device = NULL;
8243 - }
8244 -
8245 -diff --git a/drivers/net/atlx/atl1.c b/drivers/net/atlx/atl1.c
8246 -index 3c798ae..8fe0a49 100644
8247 ---- a/drivers/net/atlx/atl1.c
8248 -+++ b/drivers/net/atlx/atl1.c
8249 -@@ -3019,7 +3019,6 @@ static int __devinit atl1_probe(struct pci_dev *pdev,
8250 - netdev->features = NETIF_F_HW_CSUM;
8251 - netdev->features |= NETIF_F_SG;
8252 - netdev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
8253 -- netdev->features |= NETIF_F_TSO;
8254 - netdev->features |= NETIF_F_LLTX;
8255 -
8256 - /*
8257 -diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c
8258 -index 20d4fe9..1652f10 100644
8259 ---- a/drivers/net/forcedeth.c
8260 -+++ b/drivers/net/forcedeth.c
8261 -@@ -5420,7 +5420,7 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
8262 - if (id->driver_data & DEV_HAS_CHECKSUM) {
8263 - np->rx_csum = 1;
8264 - np->txrxctl_bits |= NVREG_TXRXCTL_RXCHECK;
8265 -- dev->features |= NETIF_F_HW_CSUM | NETIF_F_SG;
8266 -+ dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
8267 - dev->features |= NETIF_F_TSO;
8268 - }
8269 -
8270 -@@ -5728,7 +5728,7 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
8271 -
8272 - dev_printk(KERN_INFO, &pci_dev->dev, "%s%s%s%s%s%s%s%s%s%sdesc-v%u\n",
8273 - dev->features & NETIF_F_HIGHDMA ? "highdma " : "",
8274 -- dev->features & (NETIF_F_HW_CSUM | NETIF_F_SG) ?
8275 -+ dev->features & (NETIF_F_IP_CSUM | NETIF_F_SG) ?
8276 - "csum " : "",
8277 - dev->features & (NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_TX) ?
8278 - "vlan " : "",
8279 -diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
8280 -index 42d7c0a..0e4eb15 100644
8281 ---- a/drivers/net/r8169.c
8282 -+++ b/drivers/net/r8169.c
8283 -@@ -2822,7 +2822,7 @@ static int rtl8169_rx_interrupt(struct net_device *dev,
8284 - pkt_size, PCI_DMA_FROMDEVICE);
8285 - rtl8169_mark_to_asic(desc, tp->rx_buf_sz);
8286 - } else {
8287 -- pci_unmap_single(pdev, addr, pkt_size,
8288 -+ pci_unmap_single(pdev, addr, tp->rx_buf_sz,
8289 - PCI_DMA_FROMDEVICE);
8290 - tp->Rx_skbuff[entry] = NULL;
8291 - }
8292 -diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
8293 -index cc4bde8..1710e49 100644
8294 ---- a/drivers/net/tg3.c
8295 -+++ b/drivers/net/tg3.c
8296 -@@ -1672,15 +1672,43 @@ static int tg3_set_power_state(struct tg3 *tp, pci_power_t state)
8297 - }
8298 -
8299 - /* tp->lock is held. */
8300 -+static inline void tg3_generate_fw_event(struct tg3 *tp)
8301 -+{
8302 -+ u32 val;
8303 -+
8304 -+ val = tr32(GRC_RX_CPU_EVENT);
8305 -+ val |= GRC_RX_CPU_DRIVER_EVENT;
8306 -+ tw32_f(GRC_RX_CPU_EVENT, val);
8307 -+
8308 -+ tp->last_event_jiffies = jiffies;
8309 -+}
8310 -+
8311 -+#define TG3_FW_EVENT_TIMEOUT_USEC 2500
8312 -+
8313 -+/* tp->lock is held. */
8314 - static void tg3_wait_for_event_ack(struct tg3 *tp)
8315 - {
8316 - int i;
8317 -+ unsigned int delay_cnt;
8318 -+ long time_remain;
8319 -+
8320 -+ /* If enough time has passed, no wait is necessary. */
8321 -+ time_remain = (long)(tp->last_event_jiffies + 1 +
8322 -+ usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
8323 -+ (long)jiffies;
8324 -+ if (time_remain < 0)
8325 -+ return;
8326 -
8327 -- /* Wait for up to 2.5 milliseconds */
8328 -- for (i = 0; i < 250000; i++) {
8329 -+ /* Check if we can shorten the wait time. */
8330 -+ delay_cnt = jiffies_to_usecs(time_remain);
8331 -+ if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
8332 -+ delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
8333 -+ delay_cnt = (delay_cnt >> 3) + 1;
8334 -+
8335 -+ for (i = 0; i < delay_cnt; i++) {
8336 - if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
8337 - break;
8338 -- udelay(10);
8339 -+ udelay(8);
8340 - }
8341 - }
8342 -
8343 -@@ -1729,9 +1757,7 @@ static void tg3_ump_link_report(struct tg3 *tp)
8344 - val = 0;
8345 - tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 12, val);
8346 -
8347 -- val = tr32(GRC_RX_CPU_EVENT);
8348 -- val |= GRC_RX_CPU_DRIVER_EVENT;
8349 -- tw32_f(GRC_RX_CPU_EVENT, val);
8350 -+ tg3_generate_fw_event(tp);
8351 - }
8352 -
8353 - static void tg3_link_report(struct tg3 *tp)
8354 -@@ -5565,6 +5591,7 @@ static int tg3_chip_reset(struct tg3 *tp)
8355 - tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
8356 - if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
8357 - tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
8358 -+ tp->last_event_jiffies = jiffies;
8359 - if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
8360 - tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
8361 - }
8362 -@@ -5578,15 +5605,12 @@ static void tg3_stop_fw(struct tg3 *tp)
8363 - {
8364 - if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
8365 - !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
8366 -- u32 val;
8367 --
8368 - /* Wait for RX cpu to ACK the previous event. */
8369 - tg3_wait_for_event_ack(tp);
8370 -
8371 - tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
8372 -- val = tr32(GRC_RX_CPU_EVENT);
8373 -- val |= GRC_RX_CPU_DRIVER_EVENT;
8374 -- tw32(GRC_RX_CPU_EVENT, val);
8375 -+
8376 -+ tg3_generate_fw_event(tp);
8377 -
8378 - /* Wait for RX cpu to ACK this event. */
8379 - tg3_wait_for_event_ack(tp);
8380 -@@ -7477,8 +7501,6 @@ static void tg3_timer(unsigned long __opaque)
8381 - */
8382 - if (!--tp->asf_counter) {
8383 - if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
8384 -- u32 val;
8385 --
8386 - tg3_wait_for_event_ack(tp);
8387 -
8388 - tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
8389 -@@ -7486,9 +7508,8 @@ static void tg3_timer(unsigned long __opaque)
8390 - tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
8391 - /* 5 seconds timeout */
8392 - tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, 5);
8393 -- val = tr32(GRC_RX_CPU_EVENT);
8394 -- val |= GRC_RX_CPU_DRIVER_EVENT;
8395 -- tw32_f(GRC_RX_CPU_EVENT, val);
8396 -+
8397 -+ tg3_generate_fw_event(tp);
8398 - }
8399 - tp->asf_counter = tp->asf_multiplier;
8400 - }
8401 -diff --git a/drivers/net/tg3.h b/drivers/net/tg3.h
8402 -index 0404f93..d68b579 100644
8403 ---- a/drivers/net/tg3.h
8404 -+++ b/drivers/net/tg3.h
8405 -@@ -2404,7 +2404,10 @@ struct tg3 {
8406 - struct tg3_ethtool_stats estats;
8407 - struct tg3_ethtool_stats estats_prev;
8408 -
8409 -+ union {
8410 - unsigned long phy_crc_errors;
8411 -+ unsigned long last_event_jiffies;
8412 -+ };
8413 -
8414 - u32 rx_offset;
8415 - u32 tg3_flags;
8416 -diff --git a/drivers/pci/search.c b/drivers/pci/search.c
8417 -index 217814f..3b3b5f1 100644
8418 ---- a/drivers/pci/search.c
8419 -+++ b/drivers/pci/search.c
8420 -@@ -280,6 +280,8 @@ static struct pci_dev *pci_get_dev_by_id(const struct pci_device_id *id,
8421 - match_pci_dev_by_id);
8422 - if (dev)
8423 - pdev = to_pci_dev(dev);
8424 -+ if (from)
8425 -+ pci_dev_put(from);
8426 - return pdev;
8427 - }
8428 -
8429 -diff --git a/drivers/rtc/rtc-lib.c b/drivers/rtc/rtc-lib.c
8430 -index 9f996ec..dd70bf7 100644
8431 ---- a/drivers/rtc/rtc-lib.c
8432 -+++ b/drivers/rtc/rtc-lib.c
8433 -@@ -51,10 +51,11 @@ EXPORT_SYMBOL(rtc_year_days);
8434 - */
8435 - void rtc_time_to_tm(unsigned long time, struct rtc_time *tm)
8436 - {
8437 -- unsigned int days, month, year;
8438 -+ unsigned int month, year;
8439 -+ int days;
8440 -
8441 - days = time / 86400;
8442 -- time -= days * 86400;
8443 -+ time -= (unsigned int) days * 86400;
8444 -
8445 - /* day of the week, 1970-01-01 was a Thursday */
8446 - tm->tm_wday = (days + 4) % 7;
8447 -diff --git a/drivers/s390/block/dasd_eckd.h b/drivers/s390/block/dasd_eckd.h
8448 -index fc2509c..a466820 100644
8449 ---- a/drivers/s390/block/dasd_eckd.h
8450 -+++ b/drivers/s390/block/dasd_eckd.h
8451 -@@ -379,7 +379,7 @@ struct dasd_psf_prssd_data {
8452 - unsigned char flags;
8453 - unsigned char reserved[4];
8454 - unsigned char suborder;
8455 -- unsigned char varies[9];
8456 -+ unsigned char varies[5];
8457 - } __attribute__ ((packed));
8458 -
8459 - /*
8460 -diff --git a/drivers/serial/8250.c b/drivers/serial/8250.c
8461 -index be95e55..4050845 100644
8462 ---- a/drivers/serial/8250.c
8463 -+++ b/drivers/serial/8250.c
8464 -@@ -1895,15 +1895,23 @@ static int serial8250_startup(struct uart_port *port)
8465 - * kick the UART on a regular basis.
8466 - */
8467 - if (!(iir1 & UART_IIR_NO_INT) && (iir & UART_IIR_NO_INT)) {
8468 -+ up->bugs |= UART_BUG_THRE;
8469 - pr_debug("ttyS%d - using backup timer\n", port->line);
8470 -- up->timer.function = serial8250_backup_timeout;
8471 -- up->timer.data = (unsigned long)up;
8472 -- mod_timer(&up->timer, jiffies +
8473 -- poll_timeout(up->port.timeout) + HZ / 5);
8474 - }
8475 - }
8476 -
8477 - /*
8478 -+ * The above check will only give an accurate result the first time
8479 -+ * the port is opened so this value needs to be preserved.
8480 -+ */
8481 -+ if (up->bugs & UART_BUG_THRE) {
8482 -+ up->timer.function = serial8250_backup_timeout;
8483 -+ up->timer.data = (unsigned long)up;
8484 -+ mod_timer(&up->timer, jiffies +
8485 -+ poll_timeout(up->port.timeout) + HZ / 5);
8486 -+ }
8487 -+
8488 -+ /*
8489 - * If the "interrupt" for this port doesn't correspond with any
8490 - * hardware interrupt, we use a timer-based system. The original
8491 - * driver used to do this with IRQ0.
8492 -diff --git a/drivers/serial/8250.h b/drivers/serial/8250.h
8493 -index 91bd28f..245288d 100644
8494 ---- a/drivers/serial/8250.h
8495 -+++ b/drivers/serial/8250.h
8496 -@@ -49,6 +49,7 @@ struct serial8250_config {
8497 - #define UART_BUG_QUOT (1 << 0) /* UART has buggy quot LSB */
8498 - #define UART_BUG_TXEN (1 << 1) /* UART has buggy TX IIR status */
8499 - #define UART_BUG_NOMSR (1 << 2) /* UART has buggy MSR status bits (Au1x00) */
8500 -+#define UART_BUG_THRE (1 << 3) /* UART has buggy THRE reassertion */
8501 -
8502 - #define PROBE_RSA (1 << 0)
8503 - #define PROBE_ANY (~0)
8504 -diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
8505 -index c3201af..560337a 100644
8506 ---- a/drivers/usb/class/cdc-acm.c
8507 -+++ b/drivers/usb/class/cdc-acm.c
8508 -@@ -525,8 +525,8 @@ static int acm_tty_open(struct tty_struct *tty, struct file *filp)
8509 - tasklet_schedule(&acm->urb_task);
8510 -
8511 - done:
8512 --err_out:
8513 - mutex_unlock(&acm->mutex);
8514 -+err_out:
8515 - mutex_unlock(&open_mutex);
8516 - return rv;
8517 -
8518 -diff --git a/drivers/video/fb_defio.c b/drivers/video/fb_defio.c
8519 -index 59df132..4835bdc 100644
8520 ---- a/drivers/video/fb_defio.c
8521 -+++ b/drivers/video/fb_defio.c
8522 -@@ -114,6 +114,17 @@ static struct vm_operations_struct fb_deferred_io_vm_ops = {
8523 - .page_mkwrite = fb_deferred_io_mkwrite,
8524 - };
8525 -
8526 -+static int fb_deferred_io_set_page_dirty(struct page *page)
8527 -+{
8528 -+ if (!PageDirty(page))
8529 -+ SetPageDirty(page);
8530 -+ return 0;
8531 -+}
8532 -+
8533 -+static const struct address_space_operations fb_deferred_io_aops = {
8534 -+ .set_page_dirty = fb_deferred_io_set_page_dirty,
8535 -+};
8536 -+
8537 - static int fb_deferred_io_mmap(struct fb_info *info, struct vm_area_struct *vma)
8538 - {
8539 - vma->vm_ops = &fb_deferred_io_vm_ops;
8540 -@@ -163,6 +174,14 @@ void fb_deferred_io_init(struct fb_info *info)
8541 - }
8542 - EXPORT_SYMBOL_GPL(fb_deferred_io_init);
8543 -
8544 -+void fb_deferred_io_open(struct fb_info *info,
8545 -+ struct inode *inode,
8546 -+ struct file *file)
8547 -+{
8548 -+ file->f_mapping->a_ops = &fb_deferred_io_aops;
8549 -+}
8550 -+EXPORT_SYMBOL_GPL(fb_deferred_io_open);
8551 -+
8552 - void fb_deferred_io_cleanup(struct fb_info *info)
8553 - {
8554 - void *screen_base = (void __force *) info->screen_base;
8555 -diff --git a/drivers/video/fbmem.c b/drivers/video/fbmem.c
8556 -index 776f7fc..ce6b5da 100644
8557 ---- a/drivers/video/fbmem.c
8558 -+++ b/drivers/video/fbmem.c
8559 -@@ -1340,6 +1340,10 @@ fb_open(struct inode *inode, struct file *file)
8560 - if (res)
8561 - module_put(info->fbops->owner);
8562 - }
8563 -+#ifdef CONFIG_FB_DEFERRED_IO
8564 -+ if (info->fbdefio)
8565 -+ fb_deferred_io_open(info, inode, file);
8566 -+#endif
8567 - return res;
8568 - }
8569 -
8570 -diff --git a/fs/binfmt_misc.c b/fs/binfmt_misc.c
8571 -index 7191306..a0a7157 100644
8572 ---- a/fs/binfmt_misc.c
8573 -+++ b/fs/binfmt_misc.c
8574 -@@ -119,8 +119,6 @@ static int load_misc_binary(struct linux_binprm *bprm, struct pt_regs *regs)
8575 - if (bprm->misc_bang)
8576 - goto _ret;
8577 -
8578 -- bprm->misc_bang = 1;
8579 --
8580 - /* to keep locking time low, we copy the interpreter string */
8581 - read_lock(&entries_lock);
8582 - fmt = check_file(bprm);
8583 -@@ -198,6 +196,8 @@ static int load_misc_binary(struct linux_binprm *bprm, struct pt_regs *regs)
8584 - if (retval < 0)
8585 - goto _error;
8586 -
8587 -+ bprm->misc_bang = 1;
8588 -+
8589 - retval = search_binary_handler (bprm, regs);
8590 - if (retval < 0)
8591 - goto _error;
8592 -diff --git a/fs/bio.c b/fs/bio.c
8593 -index 7856257..7db618c 100644
8594 ---- a/fs/bio.c
8595 -+++ b/fs/bio.c
8596 -@@ -464,20 +464,21 @@ static void bio_free_map_data(struct bio_map_data *bmd)
8597 - kfree(bmd);
8598 - }
8599 -
8600 --static struct bio_map_data *bio_alloc_map_data(int nr_segs, int iov_count)
8601 -+static struct bio_map_data *bio_alloc_map_data(int nr_segs, int iov_count,
8602 -+ gfp_t gfp_mask)
8603 - {
8604 -- struct bio_map_data *bmd = kmalloc(sizeof(*bmd), GFP_KERNEL);
8605 -+ struct bio_map_data *bmd = kmalloc(sizeof(*bmd), gfp_mask);
8606 -
8607 - if (!bmd)
8608 - return NULL;
8609 -
8610 -- bmd->iovecs = kmalloc(sizeof(struct bio_vec) * nr_segs, GFP_KERNEL);
8611 -+ bmd->iovecs = kmalloc(sizeof(struct bio_vec) * nr_segs, gfp_mask);
8612 - if (!bmd->iovecs) {
8613 - kfree(bmd);
8614 - return NULL;
8615 - }
8616 -
8617 -- bmd->sgvecs = kmalloc(sizeof(struct sg_iovec) * iov_count, GFP_KERNEL);
8618 -+ bmd->sgvecs = kmalloc(sizeof(struct sg_iovec) * iov_count, gfp_mask);
8619 - if (bmd->sgvecs)
8620 - return bmd;
8621 -
8622 -@@ -486,8 +487,8 @@ static struct bio_map_data *bio_alloc_map_data(int nr_segs, int iov_count)
8623 - return NULL;
8624 - }
8625 -
8626 --static int __bio_copy_iov(struct bio *bio, struct sg_iovec *iov, int iov_count,
8627 -- int uncopy)
8628 -+static int __bio_copy_iov(struct bio *bio, struct bio_vec *iovecs,
8629 -+ struct sg_iovec *iov, int iov_count, int uncopy)
8630 - {
8631 - int ret = 0, i;
8632 - struct bio_vec *bvec;
8633 -@@ -497,7 +498,7 @@ static int __bio_copy_iov(struct bio *bio, struct sg_iovec *iov, int iov_count,
8634 -
8635 - __bio_for_each_segment(bvec, bio, i, 0) {
8636 - char *bv_addr = page_address(bvec->bv_page);
8637 -- unsigned int bv_len = bvec->bv_len;
8638 -+ unsigned int bv_len = iovecs[i].bv_len;
8639 -
8640 - while (bv_len && iov_idx < iov_count) {
8641 - unsigned int bytes;
8642 -@@ -549,7 +550,7 @@ int bio_uncopy_user(struct bio *bio)
8643 - struct bio_map_data *bmd = bio->bi_private;
8644 - int ret;
8645 -
8646 -- ret = __bio_copy_iov(bio, bmd->sgvecs, bmd->nr_sgvecs, 1);
8647 -+ ret = __bio_copy_iov(bio, bmd->iovecs, bmd->sgvecs, bmd->nr_sgvecs, 1);
8648 -
8649 - bio_free_map_data(bmd);
8650 - bio_put(bio);
8651 -@@ -591,7 +592,7 @@ struct bio *bio_copy_user_iov(struct request_queue *q, struct sg_iovec *iov,
8652 - len += iov[i].iov_len;
8653 - }
8654 -
8655 -- bmd = bio_alloc_map_data(nr_pages, iov_count);
8656 -+ bmd = bio_alloc_map_data(nr_pages, iov_count, GFP_KERNEL);
8657 - if (!bmd)
8658 - return ERR_PTR(-ENOMEM);
8659 -
8660 -@@ -628,7 +629,7 @@ struct bio *bio_copy_user_iov(struct request_queue *q, struct sg_iovec *iov,
8661 - * success
8662 - */
8663 - if (!write_to_vm) {
8664 -- ret = __bio_copy_iov(bio, iov, iov_count, 0);
8665 -+ ret = __bio_copy_iov(bio, bio->bi_io_vec, iov, iov_count, 0);
8666 - if (ret)
8667 - goto cleanup;
8668 - }
8669 -@@ -941,19 +942,22 @@ static void bio_copy_kern_endio(struct bio *bio, int err)
8670 - {
8671 - struct bio_vec *bvec;
8672 - const int read = bio_data_dir(bio) == READ;
8673 -- char *p = bio->bi_private;
8674 -+ struct bio_map_data *bmd = bio->bi_private;
8675 - int i;
8676 -+ char *p = bmd->sgvecs[0].iov_base;
8677 -
8678 - __bio_for_each_segment(bvec, bio, i, 0) {
8679 - char *addr = page_address(bvec->bv_page);
8680 -+ int len = bmd->iovecs[i].bv_len;
8681 -
8682 - if (read && !err)
8683 -- memcpy(p, addr, bvec->bv_len);
8684 -+ memcpy(p, addr, len);
8685 -
8686 - __free_page(bvec->bv_page);
8687 -- p += bvec->bv_len;
8688 -+ p += len;
8689 - }
8690 -
8691 -+ bio_free_map_data(bmd);
8692 - bio_put(bio);
8693 - }
8694 -
8695 -@@ -977,11 +981,21 @@ struct bio *bio_copy_kern(struct request_queue *q, void *data, unsigned int len,
8696 - const int nr_pages = end - start;
8697 - struct bio *bio;
8698 - struct bio_vec *bvec;
8699 -+ struct bio_map_data *bmd;
8700 - int i, ret;
8701 -+ struct sg_iovec iov;
8702 -+
8703 -+ iov.iov_base = data;
8704 -+ iov.iov_len = len;
8705 -+
8706 -+ bmd = bio_alloc_map_data(nr_pages, 1, gfp_mask);
8707 -+ if (!bmd)
8708 -+ return ERR_PTR(-ENOMEM);
8709 -
8710 -+ ret = -ENOMEM;
8711 - bio = bio_alloc(gfp_mask, nr_pages);
8712 - if (!bio)
8713 -- return ERR_PTR(-ENOMEM);
8714 -+ goto out_bmd;
8715 -
8716 - while (len) {
8717 - struct page *page;
8718 -@@ -1015,14 +1029,18 @@ struct bio *bio_copy_kern(struct request_queue *q, void *data, unsigned int len,
8719 - }
8720 - }
8721 -
8722 -- bio->bi_private = data;
8723 -+ bio->bi_private = bmd;
8724 - bio->bi_end_io = bio_copy_kern_endio;
8725 -+
8726 -+ bio_set_map_data(bmd, bio, &iov, 1);
8727 - return bio;
8728 - cleanup:
8729 - bio_for_each_segment(bvec, bio, i)
8730 - __free_page(bvec->bv_page);
8731 -
8732 - bio_put(bio);
8733 -+out_bmd:
8734 -+ bio_free_map_data(bmd);
8735 -
8736 - return ERR_PTR(ret);
8737 - }
8738 -diff --git a/fs/cifs/file.c b/fs/cifs/file.c
8739 -index 0aac824..8da903b 100644
8740 ---- a/fs/cifs/file.c
8741 -+++ b/fs/cifs/file.c
8742 -@@ -832,6 +832,10 @@ ssize_t cifs_user_write(struct file *file, const char __user *write_data,
8743 - return -EBADF;
8744 - open_file = (struct cifsFileInfo *) file->private_data;
8745 -
8746 -+ rc = generic_write_checks(file, poffset, &write_size, 0);
8747 -+ if (rc)
8748 -+ return rc;
8749 -+
8750 - xid = GetXid();
8751 -
8752 - if (*poffset > file->f_path.dentry->d_inode->i_size)
8753 -diff --git a/fs/cramfs/inode.c b/fs/cramfs/inode.c
8754 -index 0c3b618..f40423e 100644
8755 ---- a/fs/cramfs/inode.c
8756 -+++ b/fs/cramfs/inode.c
8757 -@@ -43,58 +43,13 @@ static DEFINE_MUTEX(read_mutex);
8758 - static int cramfs_iget5_test(struct inode *inode, void *opaque)
8759 - {
8760 - struct cramfs_inode *cramfs_inode = opaque;
8761 --
8762 -- if (inode->i_ino != CRAMINO(cramfs_inode))
8763 -- return 0; /* does not match */
8764 --
8765 -- if (inode->i_ino != 1)
8766 -- return 1;
8767 --
8768 -- /* all empty directories, char, block, pipe, and sock, share inode #1 */
8769 --
8770 -- if ((inode->i_mode != cramfs_inode->mode) ||
8771 -- (inode->i_gid != cramfs_inode->gid) ||
8772 -- (inode->i_uid != cramfs_inode->uid))
8773 -- return 0; /* does not match */
8774 --
8775 -- if ((S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) &&
8776 -- (inode->i_rdev != old_decode_dev(cramfs_inode->size)))
8777 -- return 0; /* does not match */
8778 --
8779 -- return 1; /* matches */
8780 -+ return inode->i_ino == CRAMINO(cramfs_inode) && inode->i_ino != 1;
8781 - }
8782 -
8783 - static int cramfs_iget5_set(struct inode *inode, void *opaque)
8784 - {
8785 -- static struct timespec zerotime;
8786 - struct cramfs_inode *cramfs_inode = opaque;
8787 -- inode->i_mode = cramfs_inode->mode;
8788 -- inode->i_uid = cramfs_inode->uid;
8789 -- inode->i_size = cramfs_inode->size;
8790 -- inode->i_blocks = (cramfs_inode->size - 1) / 512 + 1;
8791 -- inode->i_gid = cramfs_inode->gid;
8792 -- /* Struct copy intentional */
8793 -- inode->i_mtime = inode->i_atime = inode->i_ctime = zerotime;
8794 - inode->i_ino = CRAMINO(cramfs_inode);
8795 -- /* inode->i_nlink is left 1 - arguably wrong for directories,
8796 -- but it's the best we can do without reading the directory
8797 -- contents. 1 yields the right result in GNU find, even
8798 -- without -noleaf option. */
8799 -- if (S_ISREG(inode->i_mode)) {
8800 -- inode->i_fop = &generic_ro_fops;
8801 -- inode->i_data.a_ops = &cramfs_aops;
8802 -- } else if (S_ISDIR(inode->i_mode)) {
8803 -- inode->i_op = &cramfs_dir_inode_operations;
8804 -- inode->i_fop = &cramfs_directory_operations;
8805 -- } else if (S_ISLNK(inode->i_mode)) {
8806 -- inode->i_op = &page_symlink_inode_operations;
8807 -- inode->i_data.a_ops = &cramfs_aops;
8808 -- } else {
8809 -- inode->i_size = 0;
8810 -- inode->i_blocks = 0;
8811 -- init_special_inode(inode, inode->i_mode,
8812 -- old_decode_dev(cramfs_inode->size));
8813 -- }
8814 - return 0;
8815 - }
8816 -
8817 -@@ -104,12 +59,48 @@ static struct inode *get_cramfs_inode(struct super_block *sb,
8818 - struct inode *inode = iget5_locked(sb, CRAMINO(cramfs_inode),
8819 - cramfs_iget5_test, cramfs_iget5_set,
8820 - cramfs_inode);
8821 -+ static struct timespec zerotime;
8822 -+
8823 - if (inode && (inode->i_state & I_NEW)) {
8824 -+ inode->i_mode = cramfs_inode->mode;
8825 -+ inode->i_uid = cramfs_inode->uid;
8826 -+ inode->i_size = cramfs_inode->size;
8827 -+ inode->i_blocks = (cramfs_inode->size - 1) / 512 + 1;
8828 -+ inode->i_gid = cramfs_inode->gid;
8829 -+ /* Struct copy intentional */
8830 -+ inode->i_mtime = inode->i_atime = inode->i_ctime = zerotime;
8831 -+ /* inode->i_nlink is left 1 - arguably wrong for directories,
8832 -+ but it's the best we can do without reading the directory
8833 -+ contents. 1 yields the right result in GNU find, even
8834 -+ without -noleaf option. */
8835 -+ if (S_ISREG(inode->i_mode)) {
8836 -+ inode->i_fop = &generic_ro_fops;
8837 -+ inode->i_data.a_ops = &cramfs_aops;
8838 -+ } else if (S_ISDIR(inode->i_mode)) {
8839 -+ inode->i_op = &cramfs_dir_inode_operations;
8840 -+ inode->i_fop = &cramfs_directory_operations;
8841 -+ } else if (S_ISLNK(inode->i_mode)) {
8842 -+ inode->i_op = &page_symlink_inode_operations;
8843 -+ inode->i_data.a_ops = &cramfs_aops;
8844 -+ } else {
8845 -+ inode->i_size = 0;
8846 -+ inode->i_blocks = 0;
8847 -+ init_special_inode(inode, inode->i_mode,
8848 -+ old_decode_dev(cramfs_inode->size));
8849 -+ }
8850 - unlock_new_inode(inode);
8851 - }
8852 - return inode;
8853 - }
8854 -
8855 -+static void cramfs_drop_inode(struct inode *inode)
8856 -+{
8857 -+ if (inode->i_ino == 1)
8858 -+ generic_delete_inode(inode);
8859 -+ else
8860 -+ generic_drop_inode(inode);
8861 -+}
8862 -+
8863 - /*
8864 - * We have our own block cache: don't fill up the buffer cache
8865 - * with the rom-image, because the way the filesystem is set
8866 -@@ -534,6 +525,7 @@ static const struct super_operations cramfs_ops = {
8867 - .put_super = cramfs_put_super,
8868 - .remount_fs = cramfs_remount,
8869 - .statfs = cramfs_statfs,
8870 -+ .drop_inode = cramfs_drop_inode,
8871 - };
8872 -
8873 - static int cramfs_get_sb(struct file_system_type *fs_type,
8874 -diff --git a/fs/nfsd/nfs4acl.c b/fs/nfsd/nfs4acl.c
8875 -index b6ed383..54b8b41 100644
8876 ---- a/fs/nfsd/nfs4acl.c
8877 -+++ b/fs/nfsd/nfs4acl.c
8878 -@@ -443,7 +443,7 @@ init_state(struct posix_acl_state *state, int cnt)
8879 - * enough space for either:
8880 - */
8881 - alloc = sizeof(struct posix_ace_state_array)
8882 -- + cnt*sizeof(struct posix_ace_state);
8883 -+ + cnt*sizeof(struct posix_user_ace_state);
8884 - state->users = kzalloc(alloc, GFP_KERNEL);
8885 - if (!state->users)
8886 - return -ENOMEM;
8887 -diff --git a/include/linux/Kbuild b/include/linux/Kbuild
8888 -index 71d70d1..27af0b8 100644
8889 ---- a/include/linux/Kbuild
8890 -+++ b/include/linux/Kbuild
8891 -@@ -293,7 +293,6 @@ unifdef-y += parport.h
8892 - unifdef-y += patchkey.h
8893 - unifdef-y += pci.h
8894 - unifdef-y += personality.h
8895 --unifdef-y += pim.h
8896 - unifdef-y += pktcdvd.h
8897 - unifdef-y += pmu.h
8898 - unifdef-y += poll.h
8899 -diff --git a/include/linux/fb.h b/include/linux/fb.h
8900 -index 72295b0..dd82c76 100644
8901 ---- a/include/linux/fb.h
8902 -+++ b/include/linux/fb.h
8903 -@@ -973,6 +973,9 @@ static inline void __fb_pad_aligned_buffer(u8 *dst, u32 d_pitch,
8904 -
8905 - /* drivers/video/fb_defio.c */
8906 - extern void fb_deferred_io_init(struct fb_info *info);
8907 -+extern void fb_deferred_io_open(struct fb_info *info,
8908 -+ struct inode *inode,
8909 -+ struct file *file);
8910 - extern void fb_deferred_io_cleanup(struct fb_info *info);
8911 - extern int fb_deferred_io_fsync(struct file *file, struct dentry *dentry,
8912 - int datasync);
8913 -diff --git a/include/linux/mroute.h b/include/linux/mroute.h
8914 -index de4decf..35a8277 100644
8915 ---- a/include/linux/mroute.h
8916 -+++ b/include/linux/mroute.h
8917 -@@ -2,11 +2,7 @@
8918 - #define __LINUX_MROUTE_H
8919 -
8920 - #include <linux/sockios.h>
8921 --#include <linux/types.h>
8922 --#ifdef __KERNEL__
8923 - #include <linux/in.h>
8924 --#endif
8925 --#include <linux/pim.h>
8926 -
8927 - /*
8928 - * Based on the MROUTING 3.5 defines primarily to keep
8929 -@@ -214,6 +210,27 @@ struct mfc_cache
8930 - #define IGMPMSG_WHOLEPKT 3 /* For PIM Register processing */
8931 -
8932 - #ifdef __KERNEL__
8933 -+
8934 -+#define PIM_V1_VERSION __constant_htonl(0x10000000)
8935 -+#define PIM_V1_REGISTER 1
8936 -+
8937 -+#define PIM_VERSION 2
8938 -+#define PIM_REGISTER 1
8939 -+
8940 -+#define PIM_NULL_REGISTER __constant_htonl(0x40000000)
8941 -+
8942 -+/* PIMv2 register message header layout (ietf-draft-idmr-pimvsm-v2-00.ps */
8943 -+
8944 -+struct pimreghdr
8945 -+{
8946 -+ __u8 type;
8947 -+ __u8 reserved;
8948 -+ __be16 csum;
8949 -+ __be32 flags;
8950 -+};
8951 -+
8952 -+extern int pim_rcv_v1(struct sk_buff *);
8953 -+
8954 - struct rtmsg;
8955 - extern int ipmr_get_route(struct sk_buff *skb, struct rtmsg *rtm, int nowait);
8956 - #endif
8957 -diff --git a/include/linux/pim.h b/include/linux/pim.h
8958 -deleted file mode 100644
8959 -index 236ffd3..0000000
8960 ---- a/include/linux/pim.h
8961 -+++ /dev/null
8962 -@@ -1,45 +0,0 @@
8963 --#ifndef __LINUX_PIM_H
8964 --#define __LINUX_PIM_H
8965 --
8966 --#include <asm/byteorder.h>
8967 --
8968 --#ifndef __KERNEL__
8969 --struct pim {
8970 --#if defined(__LITTLE_ENDIAN_BITFIELD)
8971 -- __u8 pim_type:4, /* PIM message type */
8972 -- pim_ver:4; /* PIM version */
8973 --#elif defined(__BIG_ENDIAN_BITFIELD)
8974 -- __u8 pim_ver:4; /* PIM version */
8975 -- pim_type:4; /* PIM message type */
8976 --#endif
8977 -- __u8 pim_rsv; /* Reserved */
8978 -- __be16 pim_cksum; /* Checksum */
8979 --};
8980 --
8981 --#define PIM_MINLEN 8
8982 --#endif
8983 --
8984 --/* Message types - V1 */
8985 --#define PIM_V1_VERSION __constant_htonl(0x10000000)
8986 --#define PIM_V1_REGISTER 1
8987 --
8988 --/* Message types - V2 */
8989 --#define PIM_VERSION 2
8990 --#define PIM_REGISTER 1
8991 --
8992 --#if defined(__KERNEL__)
8993 --#define PIM_NULL_REGISTER __constant_htonl(0x40000000)
8994 --
8995 --/* PIMv2 register message header layout (ietf-draft-idmr-pimvsm-v2-00.ps */
8996 --struct pimreghdr
8997 --{
8998 -- __u8 type;
8999 -- __u8 reserved;
9000 -- __be16 csum;
9001 -- __be32 flags;
9002 --};
9003 --
9004 --struct sk_buff;
9005 --extern int pim_rcv_v1(struct sk_buff *);
9006 --#endif
9007 --#endif
9008 -diff --git a/include/net/addrconf.h b/include/net/addrconf.h
9009 -index bbd3d58..99ca7cd 100644
9010 ---- a/include/net/addrconf.h
9011 -+++ b/include/net/addrconf.h
9012 -@@ -80,7 +80,8 @@ extern struct inet6_ifaddr *ipv6_get_ifaddr(struct net *net,
9013 - struct net_device *dev,
9014 - int strict);
9015 -
9016 --extern int ipv6_dev_get_saddr(struct net_device *dev,
9017 -+extern int ipv6_dev_get_saddr(struct net *net,
9018 -+ struct net_device *dev,
9019 - const struct in6_addr *daddr,
9020 - unsigned int srcprefs,
9021 - struct in6_addr *saddr);
9022 -diff --git a/include/net/ip6_route.h b/include/net/ip6_route.h
9023 -index 9313491..03462e5 100644
9024 ---- a/include/net/ip6_route.h
9025 -+++ b/include/net/ip6_route.h
9026 -@@ -112,6 +112,7 @@ struct rt6_rtnl_dump_arg
9027 - {
9028 - struct sk_buff *skb;
9029 - struct netlink_callback *cb;
9030 -+ struct net *net;
9031 - };
9032 -
9033 - extern int rt6_dump_route(struct rt6_info *rt, void *p_arg);
9034 -diff --git a/mm/page_alloc.c b/mm/page_alloc.c
9035 -index f32fae3..0d520dc 100644
9036 ---- a/mm/page_alloc.c
9037 -+++ b/mm/page_alloc.c
9038 -@@ -693,6 +693,9 @@ int move_freepages(struct zone *zone,
9039 - #endif
9040 -
9041 - for (page = start_page; page <= end_page;) {
9042 -+ /* Make sure we are not inadvertently changing nodes */
9043 -+ VM_BUG_ON(page_to_nid(page) != zone_to_nid(zone));
9044 -+
9045 - if (!pfn_valid_within(page_to_pfn(page))) {
9046 - page++;
9047 - continue;
9048 -@@ -2475,6 +2478,10 @@ static void setup_zone_migrate_reserve(struct zone *zone)
9049 - continue;
9050 - page = pfn_to_page(pfn);
9051 -
9052 -+ /* Watch out for overlapping nodes */
9053 -+ if (page_to_nid(page) != zone_to_nid(zone))
9054 -+ continue;
9055 -+
9056 - /* Blocks with reserved pages will never free, skip them. */
9057 - if (PageReserved(page))
9058 - continue;
9059 -diff --git a/net/ax25/sysctl_net_ax25.c b/net/ax25/sysctl_net_ax25.c
9060 -index f597987..f288fc4 100644
9061 ---- a/net/ax25/sysctl_net_ax25.c
9062 -+++ b/net/ax25/sysctl_net_ax25.c
9063 -@@ -36,6 +36,7 @@ static struct ctl_path ax25_path[] = {
9064 - { .procname = "ax25", .ctl_name = NET_AX25, },
9065 - { }
9066 - };
9067 -+
9068 - static const ctl_table ax25_param_table[] = {
9069 - {
9070 - .ctl_name = NET_AX25_IP_DEFAULT_MODE,
9071 -@@ -167,6 +168,7 @@ static const ctl_table ax25_param_table[] = {
9072 - .extra1 = &min_proto,
9073 - .extra2 = &max_proto
9074 - },
9075 -+#ifdef CONFIG_AX25_DAMA_SLAVE
9076 - {
9077 - .ctl_name = NET_AX25_DAMA_SLAVE_TIMEOUT,
9078 - .procname = "dama_slave_timeout",
9079 -@@ -177,6 +179,8 @@ static const ctl_table ax25_param_table[] = {
9080 - .extra1 = &min_ds_timeout,
9081 - .extra2 = &max_ds_timeout
9082 - },
9083 -+#endif
9084 -+
9085 - { .ctl_name = 0 } /* that's all, folks! */
9086 - };
9087 -
9088 -@@ -210,16 +214,6 @@ void ax25_register_sysctl(void)
9089 - ax25_table[n].procname = ax25_dev->dev->name;
9090 - ax25_table[n].mode = 0555;
9091 -
9092 --#ifndef CONFIG_AX25_DAMA_SLAVE
9093 -- /*
9094 -- * We do not wish to have a representation of this parameter
9095 -- * in /proc/sys/ when configured *not* to include the
9096 -- * AX.25 DAMA slave code, do we?
9097 -- */
9098 --
9099 -- child[AX25_VALUES_DS_TIMEOUT].procname = NULL;
9100 --#endif
9101 --
9102 - child[AX25_MAX_VALUES].ctl_name = 0; /* just in case... */
9103 -
9104 - for (k = 0; k < AX25_MAX_VALUES; k++)
9105 -diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
9106 -index 9f3f7ba..b6e7ec0 100644
9107 ---- a/net/ipv4/udp.c
9108 -+++ b/net/ipv4/udp.c
9109 -@@ -988,7 +988,9 @@ int udp_queue_rcv_skb(struct sock * sk, struct sk_buff *skb)
9110 - up->encap_rcv != NULL) {
9111 - int ret;
9112 -
9113 -+ bh_unlock_sock(sk);
9114 - ret = (*up->encap_rcv)(sk, skb);
9115 -+ bh_lock_sock(sk);
9116 - if (ret <= 0) {
9117 - UDP_INC_STATS_BH(UDP_MIB_INDATAGRAMS,
9118 - is_udplite);
9119 -@@ -1087,7 +1089,7 @@ static int __udp4_lib_mcast_deliver(struct sk_buff *skb,
9120 - if (skb1) {
9121 - int ret = 0;
9122 -
9123 -- bh_lock_sock_nested(sk);
9124 -+ bh_lock_sock(sk);
9125 - if (!sock_owned_by_user(sk))
9126 - ret = udp_queue_rcv_skb(sk, skb1);
9127 - else
9128 -@@ -1187,7 +1189,7 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct hlist_head udptable[],
9129 -
9130 - if (sk != NULL) {
9131 - int ret = 0;
9132 -- bh_lock_sock_nested(sk);
9133 -+ bh_lock_sock(sk);
9134 - if (!sock_owned_by_user(sk))
9135 - ret = udp_queue_rcv_skb(sk, skb);
9136 - else
9137 -diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
9138 -index ff61a5c..1a1d494 100644
9139 ---- a/net/ipv6/addrconf.c
9140 -+++ b/net/ipv6/addrconf.c
9141 -@@ -1076,13 +1076,12 @@ out:
9142 - return ret;
9143 - }
9144 -
9145 --int ipv6_dev_get_saddr(struct net_device *dst_dev,
9146 -+int ipv6_dev_get_saddr(struct net *net, struct net_device *dst_dev,
9147 - const struct in6_addr *daddr, unsigned int prefs,
9148 - struct in6_addr *saddr)
9149 - {
9150 - struct ipv6_saddr_score scores[2],
9151 - *score = &scores[0], *hiscore = &scores[1];
9152 -- struct net *net = dev_net(dst_dev);
9153 - struct ipv6_saddr_dst dst;
9154 - struct net_device *dev;
9155 - int dst_type;
9156 -diff --git a/net/ipv6/fib6_rules.c b/net/ipv6/fib6_rules.c
9157 -index 8d05527..f5de3f9 100644
9158 ---- a/net/ipv6/fib6_rules.c
9159 -+++ b/net/ipv6/fib6_rules.c
9160 -@@ -93,7 +93,8 @@ static int fib6_rule_action(struct fib_rule *rule, struct flowi *flp,
9161 - if (flags & RT6_LOOKUP_F_SRCPREF_COA)
9162 - srcprefs |= IPV6_PREFER_SRC_COA;
9163 -
9164 -- if (ipv6_dev_get_saddr(ip6_dst_idev(&rt->u.dst)->dev,
9165 -+ if (ipv6_dev_get_saddr(net,
9166 -+ ip6_dst_idev(&rt->u.dst)->dev,
9167 - &flp->fl6_dst, srcprefs,
9168 - &saddr))
9169 - goto again;
9170 -diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
9171 -index 918fde4..fe80171 100644
9172 ---- a/net/ipv6/ip6_fib.c
9173 -+++ b/net/ipv6/ip6_fib.c
9174 -@@ -380,6 +380,7 @@ static int inet6_dump_fib(struct sk_buff *skb, struct netlink_callback *cb)
9175 -
9176 - arg.skb = skb;
9177 - arg.cb = cb;
9178 -+ arg.net = net;
9179 - w->args = &arg;
9180 -
9181 - for (h = s_h; h < FIB_TABLE_HASHSZ; h++, s_e = 0) {
9182 -diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
9183 -index 4019770..d99f094 100644
9184 ---- a/net/ipv6/ip6_output.c
9185 -+++ b/net/ipv6/ip6_output.c
9186 -@@ -925,7 +925,7 @@ static int ip6_dst_lookup_tail(struct sock *sk,
9187 - goto out_err_release;
9188 -
9189 - if (ipv6_addr_any(&fl->fl6_src)) {
9190 -- err = ipv6_dev_get_saddr(ip6_dst_idev(*dst)->dev,
9191 -+ err = ipv6_dev_get_saddr(net, ip6_dst_idev(*dst)->dev,
9192 - &fl->fl6_dst,
9193 - sk ? inet6_sk(sk)->srcprefs : 0,
9194 - &fl->fl6_src);
9195 -diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
9196 -index 282fdb3..efa84ae 100644
9197 ---- a/net/ipv6/ndisc.c
9198 -+++ b/net/ipv6/ndisc.c
9199 -@@ -549,7 +549,7 @@ static void ndisc_send_na(struct net_device *dev, struct neighbour *neigh,
9200 - override = 0;
9201 - in6_ifa_put(ifp);
9202 - } else {
9203 -- if (ipv6_dev_get_saddr(dev, daddr,
9204 -+ if (ipv6_dev_get_saddr(dev_net(dev), dev, daddr,
9205 - inet6_sk(dev_net(dev)->ipv6.ndisc_sk)->srcprefs,
9206 - &tmpaddr))
9207 - return;
9208 -diff --git a/net/ipv6/route.c b/net/ipv6/route.c
9209 -index 7ff6870..9deee59 100644
9210 ---- a/net/ipv6/route.c
9211 -+++ b/net/ipv6/route.c
9212 -@@ -2098,7 +2098,8 @@ static inline size_t rt6_nlmsg_size(void)
9213 - + nla_total_size(sizeof(struct rta_cacheinfo));
9214 - }
9215 -
9216 --static int rt6_fill_node(struct sk_buff *skb, struct rt6_info *rt,
9217 -+static int rt6_fill_node(struct net *net,
9218 -+ struct sk_buff *skb, struct rt6_info *rt,
9219 - struct in6_addr *dst, struct in6_addr *src,
9220 - int iif, int type, u32 pid, u32 seq,
9221 - int prefix, int nowait, unsigned int flags)
9222 -@@ -2179,8 +2180,9 @@ static int rt6_fill_node(struct sk_buff *skb, struct rt6_info *rt,
9223 - #endif
9224 - NLA_PUT_U32(skb, RTA_IIF, iif);
9225 - } else if (dst) {
9226 -+ struct inet6_dev *idev = ip6_dst_idev(&rt->u.dst);
9227 - struct in6_addr saddr_buf;
9228 -- if (ipv6_dev_get_saddr(ip6_dst_idev(&rt->u.dst)->dev,
9229 -+ if (ipv6_dev_get_saddr(net, idev ? idev->dev : NULL,
9230 - dst, 0, &saddr_buf) == 0)
9231 - NLA_PUT(skb, RTA_PREFSRC, 16, &saddr_buf);
9232 - }
9233 -@@ -2225,7 +2227,8 @@ int rt6_dump_route(struct rt6_info *rt, void *p_arg)
9234 - } else
9235 - prefix = 0;
9236 -
9237 -- return rt6_fill_node(arg->skb, rt, NULL, NULL, 0, RTM_NEWROUTE,
9238 -+ return rt6_fill_node(arg->net,
9239 -+ arg->skb, rt, NULL, NULL, 0, RTM_NEWROUTE,
9240 - NETLINK_CB(arg->cb->skb).pid, arg->cb->nlh->nlmsg_seq,
9241 - prefix, 0, NLM_F_MULTI);
9242 - }
9243 -@@ -2291,7 +2294,7 @@ static int inet6_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr* nlh, void
9244 - rt = (struct rt6_info*) ip6_route_output(net, NULL, &fl);
9245 - skb->dst = &rt->u.dst;
9246 -
9247 -- err = rt6_fill_node(skb, rt, &fl.fl6_dst, &fl.fl6_src, iif,
9248 -+ err = rt6_fill_node(net, skb, rt, &fl.fl6_dst, &fl.fl6_src, iif,
9249 - RTM_NEWROUTE, NETLINK_CB(in_skb).pid,
9250 - nlh->nlmsg_seq, 0, 0, 0);
9251 - if (err < 0) {
9252 -@@ -2318,7 +2321,7 @@ void inet6_rt_notify(int event, struct rt6_info *rt, struct nl_info *info)
9253 - if (skb == NULL)
9254 - goto errout;
9255 -
9256 -- err = rt6_fill_node(skb, rt, NULL, NULL, 0,
9257 -+ err = rt6_fill_node(net, skb, rt, NULL, NULL, 0,
9258 - event, info->pid, seq, 0, 0, 0);
9259 - if (err < 0) {
9260 - /* -EMSGSIZE implies BUG in rt6_nlmsg_size() */
9261 -diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
9262 -index dd30962..e14aa66 100644
9263 ---- a/net/ipv6/udp.c
9264 -+++ b/net/ipv6/udp.c
9265 -@@ -376,7 +376,7 @@ static int __udp6_lib_mcast_deliver(struct sk_buff *skb, struct in6_addr *saddr,
9266 - uh->source, saddr, dif))) {
9267 - struct sk_buff *buff = skb_clone(skb, GFP_ATOMIC);
9268 - if (buff) {
9269 -- bh_lock_sock_nested(sk2);
9270 -+ bh_lock_sock(sk2);
9271 - if (!sock_owned_by_user(sk2))
9272 - udpv6_queue_rcv_skb(sk2, buff);
9273 - else
9274 -@@ -384,7 +384,7 @@ static int __udp6_lib_mcast_deliver(struct sk_buff *skb, struct in6_addr *saddr,
9275 - bh_unlock_sock(sk2);
9276 - }
9277 - }
9278 -- bh_lock_sock_nested(sk);
9279 -+ bh_lock_sock(sk);
9280 - if (!sock_owned_by_user(sk))
9281 - udpv6_queue_rcv_skb(sk, skb);
9282 - else
9283 -@@ -502,7 +502,7 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct hlist_head udptable[],
9284 -
9285 - /* deliver */
9286 -
9287 -- bh_lock_sock_nested(sk);
9288 -+ bh_lock_sock(sk);
9289 - if (!sock_owned_by_user(sk))
9290 - udpv6_queue_rcv_skb(sk, skb);
9291 - else
9292 -diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c
9293 -index 8f1e054..08e4cbb 100644
9294 ---- a/net/ipv6/xfrm6_policy.c
9295 -+++ b/net/ipv6/xfrm6_policy.c
9296 -@@ -52,12 +52,14 @@ static struct dst_entry *xfrm6_dst_lookup(int tos, xfrm_address_t *saddr,
9297 - static int xfrm6_get_saddr(xfrm_address_t *saddr, xfrm_address_t *daddr)
9298 - {
9299 - struct dst_entry *dst;
9300 -+ struct net_device *dev;
9301 -
9302 - dst = xfrm6_dst_lookup(0, NULL, daddr);
9303 - if (IS_ERR(dst))
9304 - return -EHOSTUNREACH;
9305 -
9306 -- ipv6_dev_get_saddr(ip6_dst_idev(dst)->dev,
9307 -+ dev = ip6_dst_idev(dst)->dev;
9308 -+ ipv6_dev_get_saddr(dev_net(dev), dev,
9309 - (struct in6_addr *)&daddr->a6, 0,
9310 - (struct in6_addr *)&saddr->a6);
9311 - dst_release(dst);
9312 -diff --git a/net/sched/act_api.c b/net/sched/act_api.c
9313 -index 74e662c..b5e116c 100644
9314 ---- a/net/sched/act_api.c
9315 -+++ b/net/sched/act_api.c
9316 -@@ -205,10 +205,9 @@ struct tcf_common *tcf_hash_check(u32 index, struct tc_action *a, int bind,
9317 - {
9318 - struct tcf_common *p = NULL;
9319 - if (index && (p = tcf_hash_lookup(index, hinfo)) != NULL) {
9320 -- if (bind) {
9321 -+ if (bind)
9322 - p->tcfc_bindcnt++;
9323 -- p->tcfc_refcnt++;
9324 -- }
9325 -+ p->tcfc_refcnt++;
9326 - a->priv = p;
9327 - }
9328 - return p;
9329 -diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
9330 -index 3fb58f4..51c3f68 100644
9331 ---- a/net/sched/sch_htb.c
9332 -+++ b/net/sched/sch_htb.c
9333 -@@ -595,11 +595,13 @@ static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch)
9334 - kfree_skb(skb);
9335 - return ret;
9336 - #endif
9337 -- } else if (cl->un.leaf.q->enqueue(skb, cl->un.leaf.q) !=
9338 -+ } else if ((ret = cl->un.leaf.q->enqueue(skb, cl->un.leaf.q)) !=
9339 - NET_XMIT_SUCCESS) {
9340 -- sch->qstats.drops++;
9341 -- cl->qstats.drops++;
9342 -- return NET_XMIT_DROP;
9343 -+ if (ret == NET_XMIT_DROP) {
9344 -+ sch->qstats.drops++;
9345 -+ cl->qstats.drops++;
9346 -+ }
9347 -+ return ret;
9348 - } else {
9349 - cl->bstats.packets +=
9350 - skb_is_gso(skb)?skb_shinfo(skb)->gso_segs:1;
9351 -@@ -639,11 +641,13 @@ static int htb_requeue(struct sk_buff *skb, struct Qdisc *sch)
9352 - kfree_skb(skb);
9353 - return ret;
9354 - #endif
9355 -- } else if (cl->un.leaf.q->ops->requeue(skb, cl->un.leaf.q) !=
9356 -+ } else if ((ret = cl->un.leaf.q->ops->requeue(skb, cl->un.leaf.q)) !=
9357 - NET_XMIT_SUCCESS) {
9358 -- sch->qstats.drops++;
9359 -- cl->qstats.drops++;
9360 -- return NET_XMIT_DROP;
9361 -+ if (ret == NET_XMIT_DROP) {
9362 -+ sch->qstats.drops++;
9363 -+ cl->qstats.drops++;
9364 -+ }
9365 -+ return ret;
9366 - } else
9367 - htb_activate(q, cl);
9368 -
9369 -diff --git a/net/sched/sch_prio.c b/net/sched/sch_prio.c
9370 -index 5532f10..ec0c921 100644
9371 ---- a/net/sched/sch_prio.c
9372 -+++ b/net/sched/sch_prio.c
9373 -@@ -228,14 +228,20 @@ static int prio_tune(struct Qdisc *sch, struct nlattr *opt)
9374 - {
9375 - struct prio_sched_data *q = qdisc_priv(sch);
9376 - struct tc_prio_qopt *qopt;
9377 -- struct nlattr *tb[TCA_PRIO_MAX + 1];
9378 -+ struct nlattr *tb[TCA_PRIO_MAX + 1] = {0};
9379 - int err;
9380 - int i;
9381 -
9382 -- err = nla_parse_nested_compat(tb, TCA_PRIO_MAX, opt, NULL, qopt,
9383 -- sizeof(*qopt));
9384 -- if (err < 0)
9385 -- return err;
9386 -+ qopt = nla_data(opt);
9387 -+ if (nla_len(opt) < sizeof(*qopt))
9388 -+ return -1;
9389 -+
9390 -+ if (nla_len(opt) >= sizeof(*qopt) + sizeof(struct nlattr)) {
9391 -+ err = nla_parse_nested(tb, TCA_PRIO_MAX,
9392 -+ (struct nlattr *) (qopt + 1), NULL);
9393 -+ if (err < 0)
9394 -+ return err;
9395 -+ }
9396 -
9397 - q->bands = qopt->bands;
9398 - /* If we're multiqueue, make sure the number of incoming bands
9399 -diff --git a/net/sched/sch_tbf.c b/net/sched/sch_tbf.c
9400 -index 0b7d78f..fc6f8f3 100644
9401 ---- a/net/sched/sch_tbf.c
9402 -+++ b/net/sched/sch_tbf.c
9403 -@@ -123,15 +123,8 @@ static int tbf_enqueue(struct sk_buff *skb, struct Qdisc* sch)
9404 - struct tbf_sched_data *q = qdisc_priv(sch);
9405 - int ret;
9406 -
9407 -- if (skb->len > q->max_size) {
9408 -- sch->qstats.drops++;
9409 --#ifdef CONFIG_NET_CLS_ACT
9410 -- if (sch->reshape_fail == NULL || sch->reshape_fail(skb, sch))
9411 --#endif
9412 -- kfree_skb(skb);
9413 --
9414 -- return NET_XMIT_DROP;
9415 -- }
9416 -+ if (skb->len > q->max_size)
9417 -+ return qdisc_reshape_fail(skb, sch);
9418 -
9419 - if ((ret = q->qdisc->enqueue(skb, q->qdisc)) != 0) {
9420 - sch->qstats.drops++;
9421 -diff --git a/net/sctp/auth.c b/net/sctp/auth.c
9422 -index 675a5c3..52db5f6 100644
9423 ---- a/net/sctp/auth.c
9424 -+++ b/net/sctp/auth.c
9425 -@@ -80,6 +80,10 @@ static struct sctp_auth_bytes *sctp_auth_create_key(__u32 key_len, gfp_t gfp)
9426 - {
9427 - struct sctp_auth_bytes *key;
9428 -
9429 -+ /* Verify that we are not going to overflow INT_MAX */
9430 -+ if ((INT_MAX - key_len) < sizeof(struct sctp_auth_bytes))
9431 -+ return NULL;
9432 -+
9433 - /* Allocate the shared key */
9434 - key = kmalloc(sizeof(struct sctp_auth_bytes) + key_len, gfp);
9435 - if (!key)
9436 -@@ -782,6 +786,9 @@ int sctp_auth_ep_set_hmacs(struct sctp_endpoint *ep,
9437 - for (i = 0; i < hmacs->shmac_num_idents; i++) {
9438 - id = hmacs->shmac_idents[i];
9439 -
9440 -+ if (id > SCTP_AUTH_HMAC_ID_MAX)
9441 -+ return -EOPNOTSUPP;
9442 -+
9443 - if (SCTP_AUTH_HMAC_ID_SHA1 == id)
9444 - has_sha1 = 1;
9445 -
9446 -diff --git a/net/sctp/endpointola.c b/net/sctp/endpointola.c
9447 -index e39a0cd..4c8d9f4 100644
9448 ---- a/net/sctp/endpointola.c
9449 -+++ b/net/sctp/endpointola.c
9450 -@@ -103,6 +103,7 @@ static struct sctp_endpoint *sctp_endpoint_init(struct sctp_endpoint *ep,
9451 -
9452 - /* Initialize the CHUNKS parameter */
9453 - auth_chunks->param_hdr.type = SCTP_PARAM_CHUNKS;
9454 -+ auth_chunks->param_hdr.length = htons(sizeof(sctp_paramhdr_t));
9455 -
9456 - /* If the Add-IP functionality is enabled, we must
9457 - * authenticate, ASCONF and ASCONF-ACK chunks
9458 -@@ -110,8 +111,7 @@ static struct sctp_endpoint *sctp_endpoint_init(struct sctp_endpoint *ep,
9459 - if (sctp_addip_enable) {
9460 - auth_chunks->chunks[0] = SCTP_CID_ASCONF;
9461 - auth_chunks->chunks[1] = SCTP_CID_ASCONF_ACK;
9462 -- auth_chunks->param_hdr.length =
9463 -- htons(sizeof(sctp_paramhdr_t) + 2);
9464 -+ auth_chunks->param_hdr.length += htons(2);
9465 - }
9466 - }
9467 -
9468 -diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
9469 -index a2f4d4d..38a5d80 100644
9470 ---- a/net/sctp/ipv6.c
9471 -+++ b/net/sctp/ipv6.c
9472 -@@ -317,7 +317,8 @@ static void sctp_v6_get_saddr(struct sctp_sock *sk,
9473 - __func__, asoc, dst, NIP6(daddr->v6.sin6_addr));
9474 -
9475 - if (!asoc) {
9476 -- ipv6_dev_get_saddr(dst ? ip6_dst_idev(dst)->dev : NULL,
9477 -+ ipv6_dev_get_saddr(sock_net(sctp_opt2sk(sk)),
9478 -+ dst ? ip6_dst_idev(dst)->dev : NULL,
9479 - &daddr->v6.sin6_addr,
9480 - inet6_sk(&sk->inet.sk)->srcprefs,
9481 - &saddr->v6.sin6_addr);
9482 -diff --git a/net/sctp/socket.c b/net/sctp/socket.c
9483 -index 0dbcde6..700d27d 100644
9484 ---- a/net/sctp/socket.c
9485 -+++ b/net/sctp/socket.c
9486 -@@ -2965,6 +2965,9 @@ static int sctp_setsockopt_auth_chunk(struct sock *sk,
9487 - {
9488 - struct sctp_authchunk val;
9489 -
9490 -+ if (!sctp_auth_enable)
9491 -+ return -EACCES;
9492 -+
9493 - if (optlen != sizeof(struct sctp_authchunk))
9494 - return -EINVAL;
9495 - if (copy_from_user(&val, optval, optlen))
9496 -@@ -2993,8 +2996,12 @@ static int sctp_setsockopt_hmac_ident(struct sock *sk,
9497 - int optlen)
9498 - {
9499 - struct sctp_hmacalgo *hmacs;
9500 -+ u32 idents;
9501 - int err;
9502 -
9503 -+ if (!sctp_auth_enable)
9504 -+ return -EACCES;
9505 -+
9506 - if (optlen < sizeof(struct sctp_hmacalgo))
9507 - return -EINVAL;
9508 -
9509 -@@ -3007,8 +3014,9 @@ static int sctp_setsockopt_hmac_ident(struct sock *sk,
9510 - goto out;
9511 - }
9512 -
9513 -- if (hmacs->shmac_num_idents == 0 ||
9514 -- hmacs->shmac_num_idents > SCTP_AUTH_NUM_HMACS) {
9515 -+ idents = hmacs->shmac_num_idents;
9516 -+ if (idents == 0 || idents > SCTP_AUTH_NUM_HMACS ||
9517 -+ (idents * sizeof(u16)) > (optlen - sizeof(struct sctp_hmacalgo))) {
9518 - err = -EINVAL;
9519 - goto out;
9520 - }
9521 -@@ -3033,6 +3041,9 @@ static int sctp_setsockopt_auth_key(struct sock *sk,
9522 - struct sctp_association *asoc;
9523 - int ret;
9524 -
9525 -+ if (!sctp_auth_enable)
9526 -+ return -EACCES;
9527 -+
9528 - if (optlen <= sizeof(struct sctp_authkey))
9529 - return -EINVAL;
9530 -
9531 -@@ -3045,6 +3056,11 @@ static int sctp_setsockopt_auth_key(struct sock *sk,
9532 - goto out;
9533 - }
9534 -
9535 -+ if (authkey->sca_keylength > optlen - sizeof(struct sctp_authkey)) {
9536 -+ ret = -EINVAL;
9537 -+ goto out;
9538 -+ }
9539 -+
9540 - asoc = sctp_id2assoc(sk, authkey->sca_assoc_id);
9541 - if (!asoc && authkey->sca_assoc_id && sctp_style(sk, UDP)) {
9542 - ret = -EINVAL;
9543 -@@ -3070,6 +3086,9 @@ static int sctp_setsockopt_active_key(struct sock *sk,
9544 - struct sctp_authkeyid val;
9545 - struct sctp_association *asoc;
9546 -
9547 -+ if (!sctp_auth_enable)
9548 -+ return -EACCES;
9549 -+
9550 - if (optlen != sizeof(struct sctp_authkeyid))
9551 - return -EINVAL;
9552 - if (copy_from_user(&val, optval, optlen))
9553 -@@ -3095,6 +3114,9 @@ static int sctp_setsockopt_del_key(struct sock *sk,
9554 - struct sctp_authkeyid val;
9555 - struct sctp_association *asoc;
9556 -
9557 -+ if (!sctp_auth_enable)
9558 -+ return -EACCES;
9559 -+
9560 - if (optlen != sizeof(struct sctp_authkeyid))
9561 - return -EINVAL;
9562 - if (copy_from_user(&val, optval, optlen))
9563 -@@ -5053,19 +5075,29 @@ static int sctp_getsockopt_maxburst(struct sock *sk, int len,
9564 - static int sctp_getsockopt_hmac_ident(struct sock *sk, int len,
9565 - char __user *optval, int __user *optlen)
9566 - {
9567 -+ struct sctp_hmacalgo __user *p = (void __user *)optval;
9568 - struct sctp_hmac_algo_param *hmacs;
9569 -- __u16 param_len;
9570 -+ __u16 data_len = 0;
9571 -+ u32 num_idents;
9572 -+
9573 -+ if (!sctp_auth_enable)
9574 -+ return -EACCES;
9575 -
9576 - hmacs = sctp_sk(sk)->ep->auth_hmacs_list;
9577 -- param_len = ntohs(hmacs->param_hdr.length);
9578 -+ data_len = ntohs(hmacs->param_hdr.length) - sizeof(sctp_paramhdr_t);
9579 -
9580 -- if (len < param_len)
9581 -+ if (len < sizeof(struct sctp_hmacalgo) + data_len)
9582 - return -EINVAL;
9583 -+
9584 -+ len = sizeof(struct sctp_hmacalgo) + data_len;
9585 -+ num_idents = data_len / sizeof(u16);
9586 -+
9587 - if (put_user(len, optlen))
9588 - return -EFAULT;
9589 -- if (copy_to_user(optval, hmacs->hmac_ids, len))
9590 -+ if (put_user(num_idents, &p->shmac_num_idents))
9591 -+ return -EFAULT;
9592 -+ if (copy_to_user(p->shmac_idents, hmacs->hmac_ids, data_len))
9593 - return -EFAULT;
9594 --
9595 - return 0;
9596 - }
9597 -
9598 -@@ -5075,6 +5107,9 @@ static int sctp_getsockopt_active_key(struct sock *sk, int len,
9599 - struct sctp_authkeyid val;
9600 - struct sctp_association *asoc;
9601 -
9602 -+ if (!sctp_auth_enable)
9603 -+ return -EACCES;
9604 -+
9605 - if (len < sizeof(struct sctp_authkeyid))
9606 - return -EINVAL;
9607 - if (copy_from_user(&val, optval, sizeof(struct sctp_authkeyid)))
9608 -@@ -5089,6 +5124,12 @@ static int sctp_getsockopt_active_key(struct sock *sk, int len,
9609 - else
9610 - val.scact_keynumber = sctp_sk(sk)->ep->active_key_id;
9611 -
9612 -+ len = sizeof(struct sctp_authkeyid);
9613 -+ if (put_user(len, optlen))
9614 -+ return -EFAULT;
9615 -+ if (copy_to_user(optval, &val, len))
9616 -+ return -EFAULT;
9617 -+
9618 - return 0;
9619 - }
9620 -
9621 -@@ -5099,13 +5140,16 @@ static int sctp_getsockopt_peer_auth_chunks(struct sock *sk, int len,
9622 - struct sctp_authchunks val;
9623 - struct sctp_association *asoc;
9624 - struct sctp_chunks_param *ch;
9625 -- u32 num_chunks;
9626 -+ u32 num_chunks = 0;
9627 - char __user *to;
9628 -
9629 -- if (len <= sizeof(struct sctp_authchunks))
9630 -+ if (!sctp_auth_enable)
9631 -+ return -EACCES;
9632 -+
9633 -+ if (len < sizeof(struct sctp_authchunks))
9634 - return -EINVAL;
9635 -
9636 -- if (copy_from_user(&val, p, sizeof(struct sctp_authchunks)))
9637 -+ if (copy_from_user(&val, optval, sizeof(struct sctp_authchunks)))
9638 - return -EFAULT;
9639 -
9640 - to = p->gauth_chunks;
9641 -@@ -5114,20 +5158,21 @@ static int sctp_getsockopt_peer_auth_chunks(struct sock *sk, int len,
9642 - return -EINVAL;
9643 -
9644 - ch = asoc->peer.peer_chunks;
9645 -+ if (!ch)
9646 -+ goto num;
9647 -
9648 - /* See if the user provided enough room for all the data */
9649 - num_chunks = ntohs(ch->param_hdr.length) - sizeof(sctp_paramhdr_t);
9650 - if (len < num_chunks)
9651 - return -EINVAL;
9652 -
9653 -- len = num_chunks;
9654 -- if (put_user(len, optlen))
9655 -+ if (copy_to_user(to, ch->chunks, num_chunks))
9656 - return -EFAULT;
9657 -+num:
9658 -+ len = sizeof(struct sctp_authchunks) + num_chunks;
9659 -+ if (put_user(len, optlen)) return -EFAULT;
9660 - if (put_user(num_chunks, &p->gauth_number_of_chunks))
9661 - return -EFAULT;
9662 -- if (copy_to_user(to, ch->chunks, len))
9663 -- return -EFAULT;
9664 --
9665 - return 0;
9666 - }
9667 -
9668 -@@ -5138,13 +5183,16 @@ static int sctp_getsockopt_local_auth_chunks(struct sock *sk, int len,
9669 - struct sctp_authchunks val;
9670 - struct sctp_association *asoc;
9671 - struct sctp_chunks_param *ch;
9672 -- u32 num_chunks;
9673 -+ u32 num_chunks = 0;
9674 - char __user *to;
9675 -
9676 -- if (len <= sizeof(struct sctp_authchunks))
9677 -+ if (!sctp_auth_enable)
9678 -+ return -EACCES;
9679 -+
9680 -+ if (len < sizeof(struct sctp_authchunks))
9681 - return -EINVAL;
9682 -
9683 -- if (copy_from_user(&val, p, sizeof(struct sctp_authchunks)))
9684 -+ if (copy_from_user(&val, optval, sizeof(struct sctp_authchunks)))
9685 - return -EFAULT;
9686 -
9687 - to = p->gauth_chunks;
9688 -@@ -5157,17 +5205,21 @@ static int sctp_getsockopt_local_auth_chunks(struct sock *sk, int len,
9689 - else
9690 - ch = sctp_sk(sk)->ep->auth_chunk_list;
9691 -
9692 -+ if (!ch)
9693 -+ goto num;
9694 -+
9695 - num_chunks = ntohs(ch->param_hdr.length) - sizeof(sctp_paramhdr_t);
9696 -- if (len < num_chunks)
9697 -+ if (len < sizeof(struct sctp_authchunks) + num_chunks)
9698 - return -EINVAL;
9699 -
9700 -- len = num_chunks;
9701 -+ if (copy_to_user(to, ch->chunks, num_chunks))
9702 -+ return -EFAULT;
9703 -+num:
9704 -+ len = sizeof(struct sctp_authchunks) + num_chunks;
9705 - if (put_user(len, optlen))
9706 - return -EFAULT;
9707 - if (put_user(num_chunks, &p->gauth_number_of_chunks))
9708 - return -EFAULT;
9709 -- if (copy_to_user(to, ch->chunks, len))
9710 -- return -EFAULT;
9711 -
9712 - return 0;
9713 - }
9714 -diff --git a/net/sunrpc/sysctl.c b/net/sunrpc/sysctl.c
9715 -index 0f8c439..5231f7a 100644
9716 ---- a/net/sunrpc/sysctl.c
9717 -+++ b/net/sunrpc/sysctl.c
9718 -@@ -60,24 +60,14 @@ static int proc_do_xprt(ctl_table *table, int write, struct file *file,
9719 - void __user *buffer, size_t *lenp, loff_t *ppos)
9720 - {
9721 - char tmpbuf[256];
9722 -- int len;
9723 -+ size_t len;
9724 -+
9725 - if ((*ppos && !write) || !*lenp) {
9726 - *lenp = 0;
9727 - return 0;
9728 - }
9729 -- if (write)
9730 -- return -EINVAL;
9731 -- else {
9732 -- len = svc_print_xprts(tmpbuf, sizeof(tmpbuf));
9733 -- if (!access_ok(VERIFY_WRITE, buffer, len))
9734 -- return -EFAULT;
9735 --
9736 -- if (__copy_to_user(buffer, tmpbuf, len))
9737 -- return -EFAULT;
9738 -- }
9739 -- *lenp -= len;
9740 -- *ppos += len;
9741 -- return 0;
9742 -+ len = svc_print_xprts(tmpbuf, sizeof(tmpbuf));
9743 -+ return simple_read_from_buffer(buffer, *lenp, ppos, tmpbuf, len);
9744 - }
9745 -
9746 - static int
9747 -diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
9748 -index 72fddaf..391f456 100644
9749 ---- a/net/xfrm/xfrm_state.c
9750 -+++ b/net/xfrm/xfrm_state.c
9751 -@@ -780,11 +780,13 @@ xfrm_state_find(xfrm_address_t *daddr, xfrm_address_t *saddr,
9752 - {
9753 - unsigned int h;
9754 - struct hlist_node *entry;
9755 -- struct xfrm_state *x, *x0;
9756 -+ struct xfrm_state *x, *x0, *to_put;
9757 - int acquire_in_progress = 0;
9758 - int error = 0;
9759 - struct xfrm_state *best = NULL;
9760 -
9761 -+ to_put = NULL;
9762 -+
9763 - spin_lock_bh(&xfrm_state_lock);
9764 - h = xfrm_dst_hash(daddr, saddr, tmpl->reqid, family);
9765 - hlist_for_each_entry(x, entry, xfrm_state_bydst+h, bydst) {
9766 -@@ -833,7 +835,7 @@ xfrm_state_find(xfrm_address_t *daddr, xfrm_address_t *saddr,
9767 - if (tmpl->id.spi &&
9768 - (x0 = __xfrm_state_lookup(daddr, tmpl->id.spi,
9769 - tmpl->id.proto, family)) != NULL) {
9770 -- xfrm_state_put(x0);
9771 -+ to_put = x0;
9772 - error = -EEXIST;
9773 - goto out;
9774 - }
9775 -@@ -849,7 +851,7 @@ xfrm_state_find(xfrm_address_t *daddr, xfrm_address_t *saddr,
9776 - error = security_xfrm_state_alloc_acquire(x, pol->security, fl->secid);
9777 - if (error) {
9778 - x->km.state = XFRM_STATE_DEAD;
9779 -- xfrm_state_put(x);
9780 -+ to_put = x;
9781 - x = NULL;
9782 - goto out;
9783 - }
9784 -@@ -870,7 +872,7 @@ xfrm_state_find(xfrm_address_t *daddr, xfrm_address_t *saddr,
9785 - xfrm_hash_grow_check(x->bydst.next != NULL);
9786 - } else {
9787 - x->km.state = XFRM_STATE_DEAD;
9788 -- xfrm_state_put(x);
9789 -+ to_put = x;
9790 - x = NULL;
9791 - error = -ESRCH;
9792 - }
9793 -@@ -881,6 +883,8 @@ out:
9794 - else
9795 - *err = acquire_in_progress ? -EAGAIN : error;
9796 - spin_unlock_bh(&xfrm_state_lock);
9797 -+ if (to_put)
9798 -+ xfrm_state_put(to_put);
9799 - return x;
9800 - }
9801 -
9802 -@@ -1067,18 +1071,20 @@ static struct xfrm_state *__xfrm_find_acq_byseq(u32 seq);
9803 -
9804 - int xfrm_state_add(struct xfrm_state *x)
9805 - {
9806 -- struct xfrm_state *x1;
9807 -+ struct xfrm_state *x1, *to_put;
9808 - int family;
9809 - int err;
9810 - int use_spi = xfrm_id_proto_match(x->id.proto, IPSEC_PROTO_ANY);
9811 -
9812 - family = x->props.family;
9813 -
9814 -+ to_put = NULL;
9815 -+
9816 - spin_lock_bh(&xfrm_state_lock);
9817 -
9818 - x1 = __xfrm_state_locate(x, use_spi, family);
9819 - if (x1) {
9820 -- xfrm_state_put(x1);
9821 -+ to_put = x1;
9822 - x1 = NULL;
9823 - err = -EEXIST;
9824 - goto out;
9825 -@@ -1088,7 +1094,7 @@ int xfrm_state_add(struct xfrm_state *x)
9826 - x1 = __xfrm_find_acq_byseq(x->km.seq);
9827 - if (x1 && ((x1->id.proto != x->id.proto) ||
9828 - xfrm_addr_cmp(&x1->id.daddr, &x->id.daddr, family))) {
9829 -- xfrm_state_put(x1);
9830 -+ to_put = x1;
9831 - x1 = NULL;
9832 - }
9833 - }
9834 -@@ -1110,6 +1116,9 @@ out:
9835 - xfrm_state_put(x1);
9836 - }
9837 -
9838 -+ if (to_put)
9839 -+ xfrm_state_put(to_put);
9840 -+
9841 - return err;
9842 - }
9843 - EXPORT_SYMBOL(xfrm_state_add);
9844 -@@ -1269,10 +1278,12 @@ EXPORT_SYMBOL(xfrm_state_migrate);
9845 -
9846 - int xfrm_state_update(struct xfrm_state *x)
9847 - {
9848 -- struct xfrm_state *x1;
9849 -+ struct xfrm_state *x1, *to_put;
9850 - int err;
9851 - int use_spi = xfrm_id_proto_match(x->id.proto, IPSEC_PROTO_ANY);
9852 -
9853 -+ to_put = NULL;
9854 -+
9855 - spin_lock_bh(&xfrm_state_lock);
9856 - x1 = __xfrm_state_locate(x, use_spi, x->props.family);
9857 -
9858 -@@ -1281,7 +1292,7 @@ int xfrm_state_update(struct xfrm_state *x)
9859 - goto out;
9860 -
9861 - if (xfrm_state_kern(x1)) {
9862 -- xfrm_state_put(x1);
9863 -+ to_put = x1;
9864 - err = -EEXIST;
9865 - goto out;
9866 - }
9867 -@@ -1295,6 +1306,9 @@ int xfrm_state_update(struct xfrm_state *x)
9868 - out:
9869 - spin_unlock_bh(&xfrm_state_lock);
9870 -
9871 -+ if (to_put)
9872 -+ xfrm_state_put(to_put);
9873 -+
9874 - if (err)
9875 - return err;
9876 -
9877 -diff --git a/sound/pci/oxygen/oxygen_mixer.c b/sound/pci/oxygen/oxygen_mixer.c
9878 -index 6facac5..05eb899 100644
9879 ---- a/sound/pci/oxygen/oxygen_mixer.c
9880 -+++ b/sound/pci/oxygen/oxygen_mixer.c
9881 -@@ -512,9 +512,12 @@ static int ac97_switch_get(struct snd_kcontrol *ctl,
9882 -
9883 - static void mute_ac97_ctl(struct oxygen *chip, unsigned int control)
9884 - {
9885 -- unsigned int priv_idx = chip->controls[control]->private_value & 0xff;
9886 -+ unsigned int priv_idx;
9887 - u16 value;
9888 -
9889 -+ if (!chip->controls[control])
9890 -+ return;
9891 -+ priv_idx = chip->controls[control]->private_value & 0xff;
9892 - value = oxygen_read_ac97(chip, 0, priv_idx);
9893 - if (!(value & 0x8000)) {
9894 - oxygen_write_ac97(chip, 0, priv_idx, value | 0x8000);
9895
9896 Deleted: genpatches-2.6/trunk/2.6.27/1004_linux-2.6.26.5.patch
9897 ===================================================================
9898 --- genpatches-2.6/trunk/2.6.27/1004_linux-2.6.26.5.patch 2008-10-10 23:58:26 UTC (rev 1350)
9899 +++ genpatches-2.6/trunk/2.6.27/1004_linux-2.6.26.5.patch 2008-10-11 00:00:47 UTC (rev 1351)
9900 @@ -1,96 +0,0 @@
9901 -diff --git a/include/linux/mroute.h b/include/linux/mroute.h
9902 -index 35a8277..5e30ac3 100644
9903 ---- a/include/linux/mroute.h
9904 -+++ b/include/linux/mroute.h
9905 -@@ -2,7 +2,10 @@
9906 - #define __LINUX_MROUTE_H
9907 -
9908 - #include <linux/sockios.h>
9909 -+#include <linux/types.h>
9910 -+#ifdef __KERNEL__
9911 - #include <linux/in.h>
9912 -+#endif
9913 -
9914 - /*
9915 - * Based on the MROUTING 3.5 defines primarily to keep
9916 -@@ -126,6 +129,7 @@ struct igmpmsg
9917 - */
9918 -
9919 - #ifdef __KERNEL__
9920 -+#include <linux/pim.h>
9921 - #include <net/sock.h>
9922 -
9923 - #ifdef CONFIG_IP_MROUTE
9924 -@@ -210,27 +214,6 @@ struct mfc_cache
9925 - #define IGMPMSG_WHOLEPKT 3 /* For PIM Register processing */
9926 -
9927 - #ifdef __KERNEL__
9928 --
9929 --#define PIM_V1_VERSION __constant_htonl(0x10000000)
9930 --#define PIM_V1_REGISTER 1
9931 --
9932 --#define PIM_VERSION 2
9933 --#define PIM_REGISTER 1
9934 --
9935 --#define PIM_NULL_REGISTER __constant_htonl(0x40000000)
9936 --
9937 --/* PIMv2 register message header layout (ietf-draft-idmr-pimvsm-v2-00.ps */
9938 --
9939 --struct pimreghdr
9940 --{
9941 -- __u8 type;
9942 -- __u8 reserved;
9943 -- __be16 csum;
9944 -- __be32 flags;
9945 --};
9946 --
9947 --extern int pim_rcv_v1(struct sk_buff *);
9948 --
9949 - struct rtmsg;
9950 - extern int ipmr_get_route(struct sk_buff *skb, struct rtmsg *rtm, int nowait);
9951 - #endif
9952 -diff --git a/include/linux/mroute6.h b/include/linux/mroute6.h
9953 -index e798959..90010dd 100644
9954 ---- a/include/linux/mroute6.h
9955 -+++ b/include/linux/mroute6.h
9956 -@@ -115,6 +115,7 @@ struct sioc_mif_req6
9957 -
9958 - #ifdef __KERNEL__
9959 -
9960 -+#include <linux/pim.h>
9961 - #include <linux/skbuff.h> /* for struct sk_buff_head */
9962 -
9963 - #ifdef CONFIG_IPV6_MROUTE
9964 -diff --git a/include/linux/pim.h b/include/linux/pim.h
9965 -new file mode 100644
9966 -index 0000000..1ba0661
9967 ---- /dev/null
9968 -+++ b/include/linux/pim.h
9969 -@@ -0,0 +1,27 @@
9970 -+#ifndef __LINUX_PIM_H
9971 -+#define __LINUX_PIM_H
9972 -+
9973 -+#include <asm/byteorder.h>
9974 -+
9975 -+/* Message types - V1 */
9976 -+#define PIM_V1_VERSION __constant_htonl(0x10000000)
9977 -+#define PIM_V1_REGISTER 1
9978 -+
9979 -+/* Message types - V2 */
9980 -+#define PIM_VERSION 2
9981 -+#define PIM_REGISTER 1
9982 -+
9983 -+#define PIM_NULL_REGISTER __constant_htonl(0x40000000)
9984 -+
9985 -+/* PIMv2 register message header layout (ietf-draft-idmr-pimvsm-v2-00.ps */
9986 -+struct pimreghdr
9987 -+{
9988 -+ __u8 type;
9989 -+ __u8 reserved;
9990 -+ __be16 csum;
9991 -+ __be32 flags;
9992 -+};
9993 -+
9994 -+struct sk_buff;
9995 -+extern int pim_rcv_v1(struct sk_buff *);
9996 -+#endif
9997
9998 Deleted: genpatches-2.6/trunk/2.6.27/1005_linux-2.6.26.6.patch
9999 ===================================================================
10000 --- genpatches-2.6/trunk/2.6.27/1005_linux-2.6.26.6.patch 2008-10-10 23:58:26 UTC (rev 1350)
10001 +++ genpatches-2.6/trunk/2.6.27/1005_linux-2.6.26.6.patch 2008-10-11 00:00:47 UTC (rev 1351)
10002 @@ -1,3097 +0,0 @@
10003 -diff --git a/arch/s390/kernel/compat_ptrace.h b/arch/s390/kernel/compat_ptrace.h
10004 -index 419aef9..7731b82 100644
10005 ---- a/arch/s390/kernel/compat_ptrace.h
10006 -+++ b/arch/s390/kernel/compat_ptrace.h
10007 -@@ -42,6 +42,7 @@ struct user_regs_struct32
10008 - u32 gprs[NUM_GPRS];
10009 - u32 acrs[NUM_ACRS];
10010 - u32 orig_gpr2;
10011 -+ /* nb: there's a 4-byte hole here */
10012 - s390_fp_regs fp_regs;
10013 - /*
10014 - * These per registers are in here so that gdb can modify them
10015 -diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c
10016 -index 35827b9..75fea19 100644
10017 ---- a/arch/s390/kernel/ptrace.c
10018 -+++ b/arch/s390/kernel/ptrace.c
10019 -@@ -177,6 +177,13 @@ peek_user(struct task_struct *child, addr_t addr, addr_t data)
10020 - */
10021 - tmp = (addr_t) task_pt_regs(child)->orig_gpr2;
10022 -
10023 -+ } else if (addr < (addr_t) &dummy->regs.fp_regs) {
10024 -+ /*
10025 -+ * prevent reads of padding hole between
10026 -+ * orig_gpr2 and fp_regs on s390.
10027 -+ */
10028 -+ tmp = 0;
10029 -+
10030 - } else if (addr < (addr_t) (&dummy->regs.fp_regs + 1)) {
10031 - /*
10032 - * floating point regs. are stored in the thread structure
10033 -@@ -268,6 +275,13 @@ poke_user(struct task_struct *child, addr_t addr, addr_t data)
10034 - */
10035 - task_pt_regs(child)->orig_gpr2 = data;
10036 -
10037 -+ } else if (addr < (addr_t) &dummy->regs.fp_regs) {
10038 -+ /*
10039 -+ * prevent writes of padding hole between
10040 -+ * orig_gpr2 and fp_regs on s390.
10041 -+ */
10042 -+ return 0;
10043 -+
10044 - } else if (addr < (addr_t) (&dummy->regs.fp_regs + 1)) {
10045 - /*
10046 - * floating point regs. are stored in the thread structure
10047 -@@ -409,6 +423,13 @@ peek_user_emu31(struct task_struct *child, addr_t addr, addr_t data)
10048 - */
10049 - tmp = *(__u32*)((addr_t) &task_pt_regs(child)->orig_gpr2 + 4);
10050 -
10051 -+ } else if (addr < (addr_t) &dummy32->regs.fp_regs) {
10052 -+ /*
10053 -+ * prevent reads of padding hole between
10054 -+ * orig_gpr2 and fp_regs on s390.
10055 -+ */
10056 -+ tmp = 0;
10057 -+
10058 - } else if (addr < (addr_t) (&dummy32->regs.fp_regs + 1)) {
10059 - /*
10060 - * floating point regs. are stored in the thread structure
10061 -@@ -488,6 +509,13 @@ poke_user_emu31(struct task_struct *child, addr_t addr, addr_t data)
10062 - */
10063 - *(__u32*)((addr_t) &task_pt_regs(child)->orig_gpr2 + 4) = tmp;
10064 -
10065 -+ } else if (addr < (addr_t) &dummy32->regs.fp_regs) {
10066 -+ /*
10067 -+ * prevent writess of padding hole between
10068 -+ * orig_gpr2 and fp_regs on s390.
10069 -+ */
10070 -+ return 0;
10071 -+
10072 - } else if (addr < (addr_t) (&dummy32->regs.fp_regs + 1)) {
10073 - /*
10074 - * floating point regs. are stored in the thread structure
10075 -diff --git a/arch/sparc64/kernel/of_device.c b/arch/sparc64/kernel/of_device.c
10076 -index d569f60..b456609 100644
10077 ---- a/arch/sparc64/kernel/of_device.c
10078 -+++ b/arch/sparc64/kernel/of_device.c
10079 -@@ -170,7 +170,7 @@ static unsigned int of_bus_default_get_flags(const u32 *addr)
10080 -
10081 - static int of_bus_pci_match(struct device_node *np)
10082 - {
10083 -- if (!strcmp(np->type, "pci") || !strcmp(np->type, "pciex")) {
10084 -+ if (!strcmp(np->name, "pci")) {
10085 - const char *model = of_get_property(np, "model", NULL);
10086 -
10087 - if (model && !strcmp(model, "SUNW,simba"))
10088 -@@ -201,7 +201,7 @@ static int of_bus_simba_match(struct device_node *np)
10089 - /* Treat PCI busses lacking ranges property just like
10090 - * simba.
10091 - */
10092 -- if (!strcmp(np->type, "pci") || !strcmp(np->type, "pciex")) {
10093 -+ if (!strcmp(np->name, "pci")) {
10094 - if (!of_find_property(np, "ranges", NULL))
10095 - return 1;
10096 - }
10097 -@@ -426,7 +426,7 @@ static int __init use_1to1_mapping(struct device_node *pp)
10098 - * it lacks a ranges property, and this will include
10099 - * cases like Simba.
10100 - */
10101 -- if (!strcmp(pp->type, "pci") || !strcmp(pp->type, "pciex"))
10102 -+ if (!strcmp(pp->name, "pci"))
10103 - return 0;
10104 -
10105 - return 1;
10106 -@@ -709,8 +709,7 @@ static unsigned int __init build_one_device_irq(struct of_device *op,
10107 - break;
10108 - }
10109 - } else {
10110 -- if (!strcmp(pp->type, "pci") ||
10111 -- !strcmp(pp->type, "pciex")) {
10112 -+ if (!strcmp(pp->name, "pci")) {
10113 - unsigned int this_orig_irq = irq;
10114 -
10115 - irq = pci_irq_swizzle(dp, pp, irq);
10116 -diff --git a/arch/sparc64/kernel/pci.c b/arch/sparc64/kernel/pci.c
10117 -index 112b09f..2db2148 100644
10118 ---- a/arch/sparc64/kernel/pci.c
10119 -+++ b/arch/sparc64/kernel/pci.c
10120 -@@ -425,7 +425,7 @@ struct pci_dev *of_create_pci_dev(struct pci_pbm_info *pbm,
10121 - dev->current_state = 4; /* unknown power state */
10122 - dev->error_state = pci_channel_io_normal;
10123 -
10124 -- if (!strcmp(type, "pci") || !strcmp(type, "pciex")) {
10125 -+ if (!strcmp(node->name, "pci")) {
10126 - /* a PCI-PCI bridge */
10127 - dev->hdr_type = PCI_HEADER_TYPE_BRIDGE;
10128 - dev->rom_base_reg = PCI_ROM_ADDRESS1;
10129 -diff --git a/arch/sparc64/kernel/pci_psycho.c b/arch/sparc64/kernel/pci_psycho.c
10130 -index 994dbe0..21128cf 100644
10131 ---- a/arch/sparc64/kernel/pci_psycho.c
10132 -+++ b/arch/sparc64/kernel/pci_psycho.c
10133 -@@ -575,7 +575,7 @@ static irqreturn_t psycho_pcierr_intr_other(struct pci_pbm_info *pbm, int is_pbm
10134 - {
10135 - unsigned long csr_reg, csr, csr_error_bits;
10136 - irqreturn_t ret = IRQ_NONE;
10137 -- u16 stat;
10138 -+ u16 stat, *addr;
10139 -
10140 - if (is_pbm_a) {
10141 - csr_reg = pbm->controller_regs + PSYCHO_PCIA_CTRL;
10142 -@@ -597,7 +597,9 @@ static irqreturn_t psycho_pcierr_intr_other(struct pci_pbm_info *pbm, int is_pbm
10143 - printk("%s: PCI SERR signal asserted.\n", pbm->name);
10144 - ret = IRQ_HANDLED;
10145 - }
10146 -- pci_read_config_word(pbm->pci_bus->self, PCI_STATUS, &stat);
10147 -+ addr = psycho_pci_config_mkaddr(pbm, pbm->pci_first_busno,
10148 -+ 0, PCI_STATUS);
10149 -+ pci_config_read16(addr, &stat);
10150 - if (stat & (PCI_STATUS_PARITY |
10151 - PCI_STATUS_SIG_TARGET_ABORT |
10152 - PCI_STATUS_REC_TARGET_ABORT |
10153 -@@ -605,7 +607,7 @@ static irqreturn_t psycho_pcierr_intr_other(struct pci_pbm_info *pbm, int is_pbm
10154 - PCI_STATUS_SIG_SYSTEM_ERROR)) {
10155 - printk("%s: PCI bus error, PCI_STATUS[%04x]\n",
10156 - pbm->name, stat);
10157 -- pci_write_config_word(pbm->pci_bus->self, PCI_STATUS, 0xffff);
10158 -+ pci_config_write16(addr, 0xffff);
10159 - ret = IRQ_HANDLED;
10160 - }
10161 - return ret;
10162 -@@ -744,16 +746,16 @@ static void psycho_register_error_handlers(struct pci_pbm_info *pbm)
10163 - * the second will just error out since we do not pass in
10164 - * IRQF_SHARED.
10165 - */
10166 -- err = request_irq(op->irqs[1], psycho_ue_intr, 0,
10167 -+ err = request_irq(op->irqs[1], psycho_ue_intr, IRQF_SHARED,
10168 - "PSYCHO_UE", pbm);
10169 -- err = request_irq(op->irqs[2], psycho_ce_intr, 0,
10170 -+ err = request_irq(op->irqs[2], psycho_ce_intr, IRQF_SHARED,
10171 - "PSYCHO_CE", pbm);
10172 -
10173 - /* This one, however, ought not to fail. We can just warn
10174 - * about it since the system can still operate properly even
10175 - * if this fails.
10176 - */
10177 -- err = request_irq(op->irqs[0], psycho_pcierr_intr, 0,
10178 -+ err = request_irq(op->irqs[0], psycho_pcierr_intr, IRQF_SHARED,
10179 - "PSYCHO_PCIERR", pbm);
10180 - if (err)
10181 - printk(KERN_WARNING "%s: Could not register PCIERR, "
10182 -diff --git a/arch/sparc64/kernel/prom.c b/arch/sparc64/kernel/prom.c
10183 -index ed03a18..a72f793 100644
10184 ---- a/arch/sparc64/kernel/prom.c
10185 -+++ b/arch/sparc64/kernel/prom.c
10186 -@@ -156,55 +156,11 @@ static unsigned long psycho_pcislot_imap_offset(unsigned long ino)
10187 - return PSYCHO_IMAP_B_SLOT0 + (slot * 8);
10188 - }
10189 -
10190 --#define PSYCHO_IMAP_SCSI 0x1000UL
10191 --#define PSYCHO_IMAP_ETH 0x1008UL
10192 --#define PSYCHO_IMAP_BPP 0x1010UL
10193 --#define PSYCHO_IMAP_AU_REC 0x1018UL
10194 --#define PSYCHO_IMAP_AU_PLAY 0x1020UL
10195 --#define PSYCHO_IMAP_PFAIL 0x1028UL
10196 --#define PSYCHO_IMAP_KMS 0x1030UL
10197 --#define PSYCHO_IMAP_FLPY 0x1038UL
10198 --#define PSYCHO_IMAP_SHW 0x1040UL
10199 --#define PSYCHO_IMAP_KBD 0x1048UL
10200 --#define PSYCHO_IMAP_MS 0x1050UL
10201 --#define PSYCHO_IMAP_SER 0x1058UL
10202 --#define PSYCHO_IMAP_TIM0 0x1060UL
10203 --#define PSYCHO_IMAP_TIM1 0x1068UL
10204 --#define PSYCHO_IMAP_UE 0x1070UL
10205 --#define PSYCHO_IMAP_CE 0x1078UL
10206 --#define PSYCHO_IMAP_A_ERR 0x1080UL
10207 --#define PSYCHO_IMAP_B_ERR 0x1088UL
10208 --#define PSYCHO_IMAP_PMGMT 0x1090UL
10209 --#define PSYCHO_IMAP_GFX 0x1098UL
10210 --#define PSYCHO_IMAP_EUPA 0x10a0UL
10211 --
10212 --static unsigned long __psycho_onboard_imap_off[] = {
10213 --/*0x20*/ PSYCHO_IMAP_SCSI,
10214 --/*0x21*/ PSYCHO_IMAP_ETH,
10215 --/*0x22*/ PSYCHO_IMAP_BPP,
10216 --/*0x23*/ PSYCHO_IMAP_AU_REC,
10217 --/*0x24*/ PSYCHO_IMAP_AU_PLAY,
10218 --/*0x25*/ PSYCHO_IMAP_PFAIL,
10219 --/*0x26*/ PSYCHO_IMAP_KMS,
10220 --/*0x27*/ PSYCHO_IMAP_FLPY,
10221 --/*0x28*/ PSYCHO_IMAP_SHW,
10222 --/*0x29*/ PSYCHO_IMAP_KBD,
10223 --/*0x2a*/ PSYCHO_IMAP_MS,
10224 --/*0x2b*/ PSYCHO_IMAP_SER,
10225 --/*0x2c*/ PSYCHO_IMAP_TIM0,
10226 --/*0x2d*/ PSYCHO_IMAP_TIM1,
10227 --/*0x2e*/ PSYCHO_IMAP_UE,
10228 --/*0x2f*/ PSYCHO_IMAP_CE,
10229 --/*0x30*/ PSYCHO_IMAP_A_ERR,
10230 --/*0x31*/ PSYCHO_IMAP_B_ERR,
10231 --/*0x32*/ PSYCHO_IMAP_PMGMT,
10232 --/*0x33*/ PSYCHO_IMAP_GFX,
10233 --/*0x34*/ PSYCHO_IMAP_EUPA,
10234 --};
10235 -+#define PSYCHO_OBIO_IMAP_BASE 0x1000UL
10236 -+
10237 - #define PSYCHO_ONBOARD_IRQ_BASE 0x20
10238 --#define PSYCHO_ONBOARD_IRQ_LAST 0x34
10239 - #define psycho_onboard_imap_offset(__ino) \
10240 -- __psycho_onboard_imap_off[(__ino) - PSYCHO_ONBOARD_IRQ_BASE]
10241 -+ (PSYCHO_OBIO_IMAP_BASE + (((__ino) & 0x1f) << 3))
10242 -
10243 - #define PSYCHO_ICLR_A_SLOT0 0x1400UL
10244 - #define PSYCHO_ICLR_SCSI 0x1800UL
10245 -@@ -228,10 +184,6 @@ static unsigned int psycho_irq_build(struct device_node *dp,
10246 - imap_off = psycho_pcislot_imap_offset(ino);
10247 - } else {
10248 - /* Onboard device */
10249 -- if (ino > PSYCHO_ONBOARD_IRQ_LAST) {
10250 -- prom_printf("psycho_irq_build: Wacky INO [%x]\n", ino);
10251 -- prom_halt();
10252 -- }
10253 - imap_off = psycho_onboard_imap_offset(ino);
10254 - }
10255 -
10256 -@@ -318,23 +270,6 @@ static void sabre_wsync_handler(unsigned int ino, void *_arg1, void *_arg2)
10257 -
10258 - #define SABRE_IMAP_A_SLOT0 0x0c00UL
10259 - #define SABRE_IMAP_B_SLOT0 0x0c20UL
10260 --#define SABRE_IMAP_SCSI 0x1000UL
10261 --#define SABRE_IMAP_ETH 0x1008UL
10262 --#define SABRE_IMAP_BPP 0x1010UL
10263 --#define SABRE_IMAP_AU_REC 0x1018UL
10264 --#define SABRE_IMAP_AU_PLAY 0x1020UL
10265 --#define SABRE_IMAP_PFAIL 0x1028UL
10266 --#define SABRE_IMAP_KMS 0x1030UL
10267 --#define SABRE_IMAP_FLPY 0x1038UL
10268 --#define SABRE_IMAP_SHW 0x1040UL
10269 --#define SABRE_IMAP_KBD 0x1048UL
10270 --#define SABRE_IMAP_MS 0x1050UL
10271 --#define SABRE_IMAP_SER 0x1058UL
10272 --#define SABRE_IMAP_UE 0x1070UL
10273 --#define SABRE_IMAP_CE 0x1078UL
10274 --#define SABRE_IMAP_PCIERR 0x1080UL
10275 --#define SABRE_IMAP_GFX 0x1098UL
10276 --#define SABRE_IMAP_EUPA 0x10a0UL
10277 - #define SABRE_ICLR_A_SLOT0 0x1400UL
10278 - #define SABRE_ICLR_B_SLOT0 0x1480UL
10279 - #define SABRE_ICLR_SCSI 0x1800UL
10280 -@@ -364,33 +299,10 @@ static unsigned long sabre_pcislot_imap_offset(unsigned long ino)
10281 - return SABRE_IMAP_B_SLOT0 + (slot * 8);
10282 - }
10283 -
10284 --static unsigned long __sabre_onboard_imap_off[] = {
10285 --/*0x20*/ SABRE_IMAP_SCSI,
10286 --/*0x21*/ SABRE_IMAP_ETH,
10287 --/*0x22*/ SABRE_IMAP_BPP,
10288 --/*0x23*/ SABRE_IMAP_AU_REC,
10289 --/*0x24*/ SABRE_IMAP_AU_PLAY,
10290 --/*0x25*/ SABRE_IMAP_PFAIL,
10291 --/*0x26*/ SABRE_IMAP_KMS,
10292 --/*0x27*/ SABRE_IMAP_FLPY,
10293 --/*0x28*/ SABRE_IMAP_SHW,
10294 --/*0x29*/ SABRE_IMAP_KBD,
10295 --/*0x2a*/ SABRE_IMAP_MS,
10296 --/*0x2b*/ SABRE_IMAP_SER,
10297 --/*0x2c*/ 0 /* reserved */,
10298 --/*0x2d*/ 0 /* reserved */,
10299 --/*0x2e*/ SABRE_IMAP_UE,
10300 --/*0x2f*/ SABRE_IMAP_CE,
10301 --/*0x30*/ SABRE_IMAP_PCIERR,
10302 --/*0x31*/ 0 /* reserved */,
10303 --/*0x32*/ 0 /* reserved */,
10304 --/*0x33*/ SABRE_IMAP_GFX,
10305 --/*0x34*/ SABRE_IMAP_EUPA,
10306 --};
10307 --#define SABRE_ONBOARD_IRQ_BASE 0x20
10308 --#define SABRE_ONBOARD_IRQ_LAST 0x30
10309 -+#define SABRE_OBIO_IMAP_BASE 0x1000UL
10310 -+#define SABRE_ONBOARD_IRQ_BASE 0x20
10311 - #define sabre_onboard_imap_offset(__ino) \
10312 -- __sabre_onboard_imap_off[(__ino) - SABRE_ONBOARD_IRQ_BASE]
10313 -+ (SABRE_OBIO_IMAP_BASE + (((__ino) & 0x1f) << 3))
10314 -
10315 - #define sabre_iclr_offset(ino) \
10316 - ((ino & 0x20) ? (SABRE_ICLR_SCSI + (((ino) & 0x1f) << 3)) : \
10317 -@@ -453,10 +365,6 @@ static unsigned int sabre_irq_build(struct device_node *dp,
10318 - imap_off = sabre_pcislot_imap_offset(ino);
10319 - } else {
10320 - /* onboard device */
10321 -- if (ino > SABRE_ONBOARD_IRQ_LAST) {
10322 -- prom_printf("sabre_irq_build: Wacky INO [%x]\n", ino);
10323 -- prom_halt();
10324 -- }
10325 - imap_off = sabre_onboard_imap_offset(ino);
10326 - }
10327 -
10328 -diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
10329 -index 65c7857..d5ccf42 100644
10330 ---- a/arch/x86/kernel/alternative.c
10331 -+++ b/arch/x86/kernel/alternative.c
10332 -@@ -1,6 +1,6 @@
10333 - #include <linux/module.h>
10334 - #include <linux/sched.h>
10335 --#include <linux/spinlock.h>
10336 -+#include <linux/mutex.h>
10337 - #include <linux/list.h>
10338 - #include <linux/kprobes.h>
10339 - #include <linux/mm.h>
10340 -@@ -279,7 +279,7 @@ struct smp_alt_module {
10341 - struct list_head next;
10342 - };
10343 - static LIST_HEAD(smp_alt_modules);
10344 --static DEFINE_SPINLOCK(smp_alt);
10345 -+static DEFINE_MUTEX(smp_alt);
10346 - static int smp_mode = 1; /* protected by smp_alt */
10347 -
10348 - void alternatives_smp_module_add(struct module *mod, char *name,
10349 -@@ -312,12 +312,12 @@ void alternatives_smp_module_add(struct module *mod, char *name,
10350 - __func__, smp->locks, smp->locks_end,
10351 - smp->text, smp->text_end, smp->name);
10352 -
10353 -- spin_lock(&smp_alt);
10354 -+ mutex_lock(&smp_alt);
10355 - list_add_tail(&smp->next, &smp_alt_modules);
10356 - if (boot_cpu_has(X86_FEATURE_UP))
10357 - alternatives_smp_unlock(smp->locks, smp->locks_end,
10358 - smp->text, smp->text_end);
10359 -- spin_unlock(&smp_alt);
10360 -+ mutex_unlock(&smp_alt);
10361 - }
10362 -
10363 - void alternatives_smp_module_del(struct module *mod)
10364 -@@ -327,17 +327,17 @@ void alternatives_smp_module_del(struct module *mod)
10365 - if (smp_alt_once || noreplace_smp)
10366 - return;
10367 -
10368 -- spin_lock(&smp_alt);
10369 -+ mutex_lock(&smp_alt);
10370 - list_for_each_entry(item, &smp_alt_modules, next) {
10371 - if (mod != item->mod)
10372 - continue;
10373 - list_del(&item->next);
10374 -- spin_unlock(&smp_alt);
10375 -+ mutex_unlock(&smp_alt);
10376 - DPRINTK("%s: %s\n", __func__, item->name);
10377 - kfree(item);
10378 - return;
10379 - }
10380 -- spin_unlock(&smp_alt);
10381 -+ mutex_unlock(&smp_alt);
10382 - }
10383 -
10384 - void alternatives_smp_switch(int smp)
10385 -@@ -359,7 +359,7 @@ void alternatives_smp_switch(int smp)
10386 - return;
10387 - BUG_ON(!smp && (num_online_cpus() > 1));
10388 -
10389 -- spin_lock(&smp_alt);
10390 -+ mutex_lock(&smp_alt);
10391 -
10392 - /*
10393 - * Avoid unnecessary switches because it forces JIT based VMs to
10394 -@@ -383,7 +383,7 @@ void alternatives_smp_switch(int smp)
10395 - mod->text, mod->text_end);
10396 - }
10397 - smp_mode = smp;
10398 -- spin_unlock(&smp_alt);
10399 -+ mutex_unlock(&smp_alt);
10400 - }
10401 -
10402 - #endif
10403 -diff --git a/arch/x86/kernel/apic_32.c b/arch/x86/kernel/apic_32.c
10404 -index 4b99b1b..c17fdb0 100644
10405 ---- a/arch/x86/kernel/apic_32.c
10406 -+++ b/arch/x86/kernel/apic_32.c
10407 -@@ -552,8 +552,31 @@ void __init setup_boot_APIC_clock(void)
10408 - setup_APIC_timer();
10409 - }
10410 -
10411 --void __devinit setup_secondary_APIC_clock(void)
10412 -+/*
10413 -+ * AMD C1E enabled CPUs have a real nasty problem: Some BIOSes set the
10414 -+ * C1E flag only in the secondary CPU, so when we detect the wreckage
10415 -+ * we already have enabled the boot CPU local apic timer. Check, if
10416 -+ * disable_apic_timer is set and the DUMMY flag is cleared. If yes,
10417 -+ * set the DUMMY flag again and force the broadcast mode in the
10418 -+ * clockevents layer.
10419 -+ */
10420 -+static void __cpuinit check_boot_apic_timer_broadcast(void)
10421 - {
10422 -+ if (!local_apic_timer_disabled ||
10423 -+ (lapic_clockevent.features & CLOCK_EVT_FEAT_DUMMY))
10424 -+ return;
10425 -+
10426 -+ lapic_clockevent.features |= CLOCK_EVT_FEAT_DUMMY;
10427 -+
10428 -+ local_irq_enable();
10429 -+ clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_FORCE,
10430 -+ &boot_cpu_physical_apicid);
10431 -+ local_irq_disable();
10432 -+}
10433 -+
10434 -+void __cpuinit setup_secondary_APIC_clock(void)
10435 -+{
10436 -+ check_boot_apic_timer_broadcast();
10437 - setup_APIC_timer();
10438 - }
10439 -
10440 -@@ -1513,6 +1536,9 @@ void __cpuinit generic_processor_info(int apicid, int version)
10441 - */
10442 - cpu = 0;
10443 -
10444 -+ if (apicid > max_physical_apicid)
10445 -+ max_physical_apicid = apicid;
10446 -+
10447 - /*
10448 - * Would be preferable to switch to bigsmp when CONFIG_HOTPLUG_CPU=y
10449 - * but we need to work other dependencies like SMP_SUSPEND etc
10450 -@@ -1520,7 +1546,7 @@ void __cpuinit generic_processor_info(int apicid, int version)
10451 - * if (CPU_HOTPLUG_ENABLED || num_processors > 8)
10452 - * - Ashok Raj <ashok.raj@×××××.com>
10453 - */
10454 -- if (num_processors > 8) {
10455 -+ if (max_physical_apicid >= 8) {
10456 - switch (boot_cpu_data.x86_vendor) {
10457 - case X86_VENDOR_INTEL:
10458 - if (!APIC_XAPIC(version)) {
10459 -diff --git a/arch/x86/kernel/apic_64.c b/arch/x86/kernel/apic_64.c
10460 -index 0633cfd..8472bdf 100644
10461 ---- a/arch/x86/kernel/apic_64.c
10462 -+++ b/arch/x86/kernel/apic_64.c
10463 -@@ -1090,6 +1090,9 @@ void __cpuinit generic_processor_info(int apicid, int version)
10464 - */
10465 - cpu = 0;
10466 - }
10467 -+ if (apicid > max_physical_apicid)
10468 -+ max_physical_apicid = apicid;
10469 -+
10470 - /* are we being called early in kernel startup? */
10471 - if (x86_cpu_to_apicid_early_ptr) {
10472 - u16 *cpu_to_apicid = x86_cpu_to_apicid_early_ptr;
10473 -diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
10474 -index 170d2f5..912a84b 100644
10475 ---- a/arch/x86/kernel/cpu/bugs.c
10476 -+++ b/arch/x86/kernel/cpu/bugs.c
10477 -@@ -50,6 +50,8 @@ static double __initdata y = 3145727.0;
10478 - */
10479 - static void __init check_fpu(void)
10480 - {
10481 -+ s32 fdiv_bug;
10482 -+
10483 - if (!boot_cpu_data.hard_math) {
10484 - #ifndef CONFIG_MATH_EMULATION
10485 - printk(KERN_EMERG "No coprocessor found and no math emulation present.\n");
10486 -@@ -70,8 +72,10 @@ static void __init check_fpu(void)
10487 - "fistpl %0\n\t"
10488 - "fwait\n\t"
10489 - "fninit"
10490 -- : "=m" (*&boot_cpu_data.fdiv_bug)
10491 -+ : "=m" (*&fdiv_bug)
10492 - : "m" (*&x), "m" (*&y));
10493 -+
10494 -+ boot_cpu_data.fdiv_bug = fdiv_bug;
10495 - if (boot_cpu_data.fdiv_bug)
10496 - printk("Hmm, FPU with FDIV bug.\n");
10497 - }
10498 -diff --git a/arch/x86/kernel/e820_32.c b/arch/x86/kernel/e820_32.c
10499 -index ed733e7..a540c4e 100644
10500 ---- a/arch/x86/kernel/e820_32.c
10501 -+++ b/arch/x86/kernel/e820_32.c
10502 -@@ -697,7 +697,7 @@ static int __init parse_memmap(char *arg)
10503 - if (!arg)
10504 - return -EINVAL;
10505 -
10506 -- if (strcmp(arg, "exactmap") == 0) {
10507 -+ if (strncmp(arg, "exactmap", 8) == 0) {
10508 - #ifdef CONFIG_CRASH_DUMP
10509 - /* If we are doing a crash dump, we
10510 - * still need to know the real mem
10511 -diff --git a/arch/x86/kernel/e820_64.c b/arch/x86/kernel/e820_64.c
10512 -index 124480c..4da8e2b 100644
10513 ---- a/arch/x86/kernel/e820_64.c
10514 -+++ b/arch/x86/kernel/e820_64.c
10515 -@@ -776,7 +776,7 @@ static int __init parse_memmap_opt(char *p)
10516 - char *oldp;
10517 - unsigned long long start_at, mem_size;
10518 -
10519 -- if (!strcmp(p, "exactmap")) {
10520 -+ if (!strncmp(p, "exactmap", 8)) {
10521 - #ifdef CONFIG_CRASH_DUMP
10522 - /*
10523 - * If we are doing a crash dump, we still need to know
10524 -diff --git a/arch/x86/kernel/genapic_64.c b/arch/x86/kernel/genapic_64.c
10525 -index cbaaf69..1fa8be5 100644
10526 ---- a/arch/x86/kernel/genapic_64.c
10527 -+++ b/arch/x86/kernel/genapic_64.c
10528 -@@ -51,7 +51,7 @@ void __init setup_apic_routing(void)
10529 - else
10530 - #endif
10531 -
10532 -- if (num_possible_cpus() <= 8)
10533 -+ if (max_physical_apicid < 8)
10534 - genapic = &apic_flat;
10535 - else
10536 - genapic = &apic_physflat;
10537 -diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c
10538 -index e25c57b..d946c37 100644
10539 ---- a/arch/x86/kernel/head64.c
10540 -+++ b/arch/x86/kernel/head64.c
10541 -@@ -135,6 +135,7 @@ void __init x86_64_start_kernel(char * real_mode_data)
10542 - BUILD_BUG_ON(!(MODULES_VADDR > __START_KERNEL));
10543 - BUILD_BUG_ON(!(((MODULES_END - 1) & PGDIR_MASK) ==
10544 - (__START_KERNEL & PGDIR_MASK)));
10545 -+ BUILD_BUG_ON(__fix_to_virt(__end_of_fixed_addresses) <= MODULES_END);
10546 -
10547 - /* clear bss before set_intr_gate with early_idt_handler */
10548 - clear_bss();
10549 -diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c
10550 -index 9b5cfcd..0f3e379 100644
10551 ---- a/arch/x86/kernel/hpet.c
10552 -+++ b/arch/x86/kernel/hpet.c
10553 -@@ -223,8 +223,8 @@ static void hpet_legacy_clockevent_register(void)
10554 - /* Calculate the min / max delta */
10555 - hpet_clockevent.max_delta_ns = clockevent_delta2ns(0x7FFFFFFF,
10556 - &hpet_clockevent);
10557 -- hpet_clockevent.min_delta_ns = clockevent_delta2ns(0x30,
10558 -- &hpet_clockevent);
10559 -+ /* 5 usec minimum reprogramming delta. */
10560 -+ hpet_clockevent.min_delta_ns = 5000;
10561 -
10562 - /*
10563 - * Start hpet with the boot cpu mask and make it
10564 -@@ -283,15 +283,22 @@ static void hpet_legacy_set_mode(enum clock_event_mode mode,
10565 - }
10566 -
10567 - static int hpet_legacy_next_event(unsigned long delta,
10568 -- struct clock_event_device *evt)
10569 -+ struct clock_event_device *evt)
10570 - {
10571 -- unsigned long cnt;
10572 -+ u32 cnt;
10573 -
10574 - cnt = hpet_readl(HPET_COUNTER);
10575 -- cnt += delta;
10576 -+ cnt += (u32) delta;
10577 - hpet_writel(cnt, HPET_T0_CMP);
10578 -
10579 -- return ((long)(hpet_readl(HPET_COUNTER) - cnt ) > 0) ? -ETIME : 0;
10580 -+ /*
10581 -+ * We need to read back the CMP register to make sure that
10582 -+ * what we wrote hit the chip before we compare it to the
10583 -+ * counter.
10584 -+ */
10585 -+ WARN_ON((u32)hpet_readl(HPET_T0_CMP) != cnt);
10586 -+
10587 -+ return (s32)((u32)hpet_readl(HPET_COUNTER) - cnt) >= 0 ? -ETIME : 0;
10588 - }
10589 -
10590 - /*
10591 -diff --git a/arch/x86/kernel/io_delay.c b/arch/x86/kernel/io_delay.c
10592 -index 1c3a66a..720d260 100644
10593 ---- a/arch/x86/kernel/io_delay.c
10594 -+++ b/arch/x86/kernel/io_delay.c
10595 -@@ -92,6 +92,14 @@ static struct dmi_system_id __initdata io_delay_0xed_port_dmi_table[] = {
10596 - DMI_MATCH(DMI_BOARD_NAME, "30BF")
10597 - }
10598 - },
10599 -+ {
10600 -+ .callback = dmi_io_delay_0xed_port,
10601 -+ .ident = "Presario F700",
10602 -+ .matches = {
10603 -+ DMI_MATCH(DMI_BOARD_VENDOR, "Quanta"),
10604 -+ DMI_MATCH(DMI_BOARD_NAME, "30D3")
10605 -+ }
10606 -+ },
10607 - { }
10608 - };
10609 -
10610 -diff --git a/arch/x86/kernel/mpparse.c b/arch/x86/kernel/mpparse.c
10611 -index 404683b..d5b8691 100644
10612 ---- a/arch/x86/kernel/mpparse.c
10613 -+++ b/arch/x86/kernel/mpparse.c
10614 -@@ -402,6 +402,11 @@ static int __init smp_read_mpc(struct mp_config_table *mpc, unsigned early)
10615 - ++mpc_record;
10616 - #endif
10617 - }
10618 -+
10619 -+#ifdef CONFIG_X86_GENERICARCH
10620 -+ generic_bigsmp_probe();
10621 -+#endif
10622 -+
10623 - setup_apic_routing();
10624 - if (!num_processors)
10625 - printk(KERN_ERR "MPTABLE: no processors registered!\n");
10626 -diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
10627 -index 6f80b85..03e357a 100644
10628 ---- a/arch/x86/kernel/setup.c
10629 -+++ b/arch/x86/kernel/setup.c
10630 -@@ -17,6 +17,7 @@ unsigned int num_processors;
10631 - unsigned disabled_cpus __cpuinitdata;
10632 - /* Processor that is doing the boot up */
10633 - unsigned int boot_cpu_physical_apicid = -1U;
10634 -+unsigned int max_physical_apicid;
10635 - EXPORT_SYMBOL(boot_cpu_physical_apicid);
10636 -
10637 - DEFINE_PER_CPU(u16, x86_cpu_to_apicid) = BAD_APICID;
10638 -diff --git a/arch/x86/kernel/setup_32.c b/arch/x86/kernel/setup_32.c
10639 -index 5a2f8e0..3bf22f0 100644
10640 ---- a/arch/x86/kernel/setup_32.c
10641 -+++ b/arch/x86/kernel/setup_32.c
10642 -@@ -914,6 +914,12 @@ void __init setup_arch(char **cmdline_p)
10643 -
10644 - #ifdef CONFIG_ACPI
10645 - acpi_boot_init();
10646 -+#endif
10647 -+
10648 -+#ifdef CONFIG_X86_LOCAL_APIC
10649 -+ if (smp_found_config)
10650 -+ get_smp_config();
10651 -+#endif
10652 -
10653 - #if defined(CONFIG_SMP) && defined(CONFIG_X86_PC)
10654 - if (def_to_bigsmp)
10655 -@@ -921,11 +927,6 @@ void __init setup_arch(char **cmdline_p)
10656 - "CONFIG_X86_PC cannot handle it.\nUse "
10657 - "CONFIG_X86_GENERICARCH or CONFIG_X86_BIGSMP.\n");
10658 - #endif
10659 --#endif
10660 --#ifdef CONFIG_X86_LOCAL_APIC
10661 -- if (smp_found_config)
10662 -- get_smp_config();
10663 --#endif
10664 -
10665 - e820_register_memory();
10666 - e820_mark_nosave_regions();
10667 -diff --git a/arch/x86/kernel/signal_64.c b/arch/x86/kernel/signal_64.c
10668 -index e53b267..c56034d 100644
10669 ---- a/arch/x86/kernel/signal_64.c
10670 -+++ b/arch/x86/kernel/signal_64.c
10671 -@@ -53,6 +53,68 @@ sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss,
10672 - return do_sigaltstack(uss, uoss, regs->sp);
10673 - }
10674 -
10675 -+/*
10676 -+ * Signal frame handlers.
10677 -+ */
10678 -+
10679 -+static inline int save_i387(struct _fpstate __user *buf)
10680 -+{
10681 -+ struct task_struct *tsk = current;
10682 -+ int err = 0;
10683 -+
10684 -+ BUILD_BUG_ON(sizeof(struct user_i387_struct) !=
10685 -+ sizeof(tsk->thread.xstate->fxsave));
10686 -+
10687 -+ if ((unsigned long)buf % 16)
10688 -+ printk("save_i387: bad fpstate %p\n", buf);
10689 -+
10690 -+ if (!used_math())
10691 -+ return 0;
10692 -+ clear_used_math(); /* trigger finit */
10693 -+ if (task_thread_info(tsk)->status & TS_USEDFPU) {
10694 -+ err = save_i387_checking((struct i387_fxsave_struct __user *)
10695 -+ buf);
10696 -+ if (err)
10697 -+ return err;
10698 -+ task_thread_info(tsk)->status &= ~TS_USEDFPU;
10699 -+ stts();
10700 -+ } else {
10701 -+ if (__copy_to_user(buf, &tsk->thread.xstate->fxsave,
10702 -+ sizeof(struct i387_fxsave_struct)))
10703 -+ return -1;
10704 -+ }
10705 -+ return 1;
10706 -+}
10707 -+
10708 -+/*
10709 -+ * This restores directly out of user space. Exceptions are handled.
10710 -+ */
10711 -+static inline int restore_i387(struct _fpstate __user *buf)
10712 -+{
10713 -+ struct task_struct *tsk = current;
10714 -+ int err;
10715 -+
10716 -+ if (!used_math()) {
10717 -+ err = init_fpu(tsk);
10718 -+ if (err)
10719 -+ return err;
10720 -+ }
10721 -+
10722 -+ if (!(task_thread_info(current)->status & TS_USEDFPU)) {
10723 -+ clts();
10724 -+ task_thread_info(current)->status |= TS_USEDFPU;
10725 -+ }
10726 -+ err = restore_fpu_checking((__force struct i387_fxsave_struct *)buf);
10727 -+ if (unlikely(err)) {
10728 -+ /*
10729 -+ * Encountered an error while doing the restore from the
10730 -+ * user buffer, clear the fpu state.
10731 -+ */
10732 -+ clear_fpu(tsk);
10733 -+ clear_used_math();
10734 -+ }
10735 -+ return err;
10736 -+}
10737 -
10738 - /*
10739 - * Do a signal return; undo the signal stack.
10740 -diff --git a/arch/x86/kernel/traps_64.c b/arch/x86/kernel/traps_64.c
10741 -index adff76e..9e26f39 100644
10742 ---- a/arch/x86/kernel/traps_64.c
10743 -+++ b/arch/x86/kernel/traps_64.c
10744 -@@ -1141,7 +1141,14 @@ asmlinkage void math_state_restore(void)
10745 - }
10746 -
10747 - clts(); /* Allow maths ops (or we recurse) */
10748 -- restore_fpu_checking(&me->thread.xstate->fxsave);
10749 -+ /*
10750 -+ * Paranoid restore. send a SIGSEGV if we fail to restore the state.
10751 -+ */
10752 -+ if (unlikely(restore_fpu_checking(&me->thread.xstate->fxsave))) {
10753 -+ stts();
10754 -+ force_sig(SIGSEGV, me);
10755 -+ return;
10756 -+ }
10757 - task_thread_info(me)->status |= TS_USEDFPU;
10758 - me->fpu_counter++;
10759 - }
10760 -diff --git a/arch/x86/kernel/vmi_32.c b/arch/x86/kernel/vmi_32.c
10761 -index 956f389..9b3e795 100644
10762 ---- a/arch/x86/kernel/vmi_32.c
10763 -+++ b/arch/x86/kernel/vmi_32.c
10764 -@@ -234,7 +234,7 @@ static void vmi_write_ldt_entry(struct desc_struct *dt, int entry,
10765 - const void *desc)
10766 - {
10767 - u32 *ldt_entry = (u32 *)desc;
10768 -- vmi_ops.write_idt_entry(dt, entry, ldt_entry[0], ldt_entry[1]);
10769 -+ vmi_ops.write_ldt_entry(dt, entry, ldt_entry[0], ldt_entry[1]);
10770 - }
10771 -
10772 - static void vmi_load_sp0(struct tss_struct *tss,
10773 -diff --git a/arch/x86/kernel/vsmp_64.c b/arch/x86/kernel/vsmp_64.c
10774 -index ba8c0b7..a3c9869 100644
10775 ---- a/arch/x86/kernel/vsmp_64.c
10776 -+++ b/arch/x86/kernel/vsmp_64.c
10777 -@@ -58,7 +58,7 @@ static void vsmp_irq_enable(void)
10778 - native_restore_fl((flags | X86_EFLAGS_IF) & (~X86_EFLAGS_AC));
10779 - }
10780 -
10781 --static unsigned __init vsmp_patch(u8 type, u16 clobbers, void *ibuf,
10782 -+static unsigned __init_or_module vsmp_patch(u8 type, u16 clobbers, void *ibuf,
10783 - unsigned long addr, unsigned len)
10784 - {
10785 - switch (type) {
10786 -diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
10787 -index 7d6071d..45e2280 100644
10788 ---- a/arch/x86/kvm/svm.c
10789 -+++ b/arch/x86/kvm/svm.c
10790 -@@ -60,6 +60,7 @@ static int npt = 1;
10791 - module_param(npt, int, S_IRUGO);
10792 -
10793 - static void kvm_reput_irq(struct vcpu_svm *svm);
10794 -+static void svm_flush_tlb(struct kvm_vcpu *vcpu);
10795 -
10796 - static inline struct vcpu_svm *to_svm(struct kvm_vcpu *vcpu)
10797 - {
10798 -@@ -879,6 +880,10 @@ set:
10799 - static void svm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
10800 - {
10801 - unsigned long host_cr4_mce = read_cr4() & X86_CR4_MCE;
10802 -+ unsigned long old_cr4 = to_svm(vcpu)->vmcb->save.cr4;
10803 -+
10804 -+ if (npt_enabled && ((old_cr4 ^ cr4) & X86_CR4_PGE))
10805 -+ force_new_asid(vcpu);
10806 -
10807 - vcpu->arch.cr4 = cr4;
10808 - if (!npt_enabled)
10809 -@@ -1017,6 +1022,15 @@ static int pf_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
10810 -
10811 - fault_address = svm->vmcb->control.exit_info_2;
10812 - error_code = svm->vmcb->control.exit_info_1;
10813 -+
10814 -+ /*
10815 -+ * FIXME: Tis shouldn't be necessary here, but there is a flush
10816 -+ * missing in the MMU code. Until we find this bug, flush the
10817 -+ * complete TLB here on an NPF
10818 -+ */
10819 -+ if (npt_enabled)
10820 -+ svm_flush_tlb(&svm->vcpu);
10821 -+
10822 - if (event_injection)
10823 - kvm_mmu_unprotect_page_virt(&svm->vcpu, fault_address);
10824 - return kvm_mmu_page_fault(&svm->vcpu, fault_address, error_code);
10825 -diff --git a/arch/x86/mach-generic/bigsmp.c b/arch/x86/mach-generic/bigsmp.c
10826 -index 95fc463..2a24301 100644
10827 ---- a/arch/x86/mach-generic/bigsmp.c
10828 -+++ b/arch/x86/mach-generic/bigsmp.c
10829 -@@ -48,7 +48,7 @@ static const struct dmi_system_id bigsmp_dmi_table[] = {
10830 - static int probe_bigsmp(void)
10831 - {
10832 - if (def_to_bigsmp)
10833 -- dmi_bigsmp = 1;
10834 -+ dmi_bigsmp = 1;
10835 - else
10836 - dmi_check_system(bigsmp_dmi_table);
10837 - return dmi_bigsmp;
10838 -diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
10839 -index 60bcb5b..b384297 100644
10840 ---- a/arch/x86/mm/pageattr.c
10841 -+++ b/arch/x86/mm/pageattr.c
10842 -@@ -789,7 +789,7 @@ int set_memory_uc(unsigned long addr, int numpages)
10843 - /*
10844 - * for now UC MINUS. see comments in ioremap_nocache()
10845 - */
10846 -- if (reserve_memtype(addr, addr + numpages * PAGE_SIZE,
10847 -+ if (reserve_memtype(__pa(addr), __pa(addr) + numpages * PAGE_SIZE,
10848 - _PAGE_CACHE_UC_MINUS, NULL))
10849 - return -EINVAL;
10850 -
10851 -@@ -808,7 +808,7 @@ int set_memory_wc(unsigned long addr, int numpages)
10852 - if (!pat_wc_enabled)
10853 - return set_memory_uc(addr, numpages);
10854 -
10855 -- if (reserve_memtype(addr, addr + numpages * PAGE_SIZE,
10856 -+ if (reserve_memtype(__pa(addr), __pa(addr) + numpages * PAGE_SIZE,
10857 - _PAGE_CACHE_WC, NULL))
10858 - return -EINVAL;
10859 -
10860 -@@ -824,7 +824,7 @@ int _set_memory_wb(unsigned long addr, int numpages)
10861 -
10862 - int set_memory_wb(unsigned long addr, int numpages)
10863 - {
10864 -- free_memtype(addr, addr + numpages * PAGE_SIZE);
10865 -+ free_memtype(__pa(addr), __pa(addr) + numpages * PAGE_SIZE);
10866 -
10867 - return _set_memory_wb(addr, numpages);
10868 - }
10869 -diff --git a/arch/x86/oprofile/nmi_int.c b/arch/x86/oprofile/nmi_int.c
10870 -index cc48d3f..d38d5d0 100644
10871 ---- a/arch/x86/oprofile/nmi_int.c
10872 -+++ b/arch/x86/oprofile/nmi_int.c
10873 -@@ -15,6 +15,7 @@
10874 - #include <linux/slab.h>
10875 - #include <linux/moduleparam.h>
10876 - #include <linux/kdebug.h>
10877 -+#include <linux/cpu.h>
10878 - #include <asm/nmi.h>
10879 - #include <asm/msr.h>
10880 - #include <asm/apic.h>
10881 -@@ -28,23 +29,48 @@ static DEFINE_PER_CPU(unsigned long, saved_lvtpc);
10882 -
10883 - static int nmi_start(void);
10884 - static void nmi_stop(void);
10885 -+static void nmi_cpu_start(void *dummy);
10886 -+static void nmi_cpu_stop(void *dummy);
10887 -
10888 - /* 0 == registered but off, 1 == registered and on */
10889 - static int nmi_enabled = 0;
10890 -
10891 -+#ifdef CONFIG_SMP
10892 -+static int oprofile_cpu_notifier(struct notifier_block *b, unsigned long action,
10893 -+ void *data)
10894 -+{
10895 -+ int cpu = (unsigned long)data;
10896 -+ switch (action) {
10897 -+ case CPU_DOWN_FAILED:
10898 -+ case CPU_ONLINE:
10899 -+ smp_call_function_single(cpu, nmi_cpu_start, NULL, 0, 0);
10900 -+ break;
10901 -+ case CPU_DOWN_PREPARE:
10902 -+ smp_call_function_single(cpu, nmi_cpu_stop, NULL, 0, 1);
10903 -+ break;
10904 -+ }
10905 -+ return NOTIFY_DONE;
10906 -+}
10907 -+
10908 -+static struct notifier_block oprofile_cpu_nb = {
10909 -+ .notifier_call = oprofile_cpu_notifier
10910 -+};
10911 -+#endif
10912 -+
10913 - #ifdef CONFIG_PM
10914 -
10915 - static int nmi_suspend(struct sys_device *dev, pm_message_t state)
10916 - {
10917 -+ /* Only one CPU left, just stop that one */
10918 - if (nmi_enabled == 1)
10919 -- nmi_stop();
10920 -+ nmi_cpu_stop(NULL);
10921 - return 0;
10922 - }
10923 -
10924 - static int nmi_resume(struct sys_device *dev)
10925 - {
10926 - if (nmi_enabled == 1)
10927 -- nmi_start();
10928 -+ nmi_cpu_start(NULL);
10929 - return 0;
10930 - }
10931 -
10932 -@@ -448,6 +474,9 @@ int __init op_nmi_init(struct oprofile_operations *ops)
10933 - }
10934 -
10935 - init_sysfs();
10936 -+#ifdef CONFIG_SMP
10937 -+ register_cpu_notifier(&oprofile_cpu_nb);
10938 -+#endif
10939 - using_nmi = 1;
10940 - ops->create_files = nmi_create_files;
10941 - ops->setup = nmi_setup;
10942 -@@ -461,6 +490,10 @@ int __init op_nmi_init(struct oprofile_operations *ops)
10943 -
10944 - void op_nmi_exit(void)
10945 - {
10946 -- if (using_nmi)
10947 -+ if (using_nmi) {
10948 - exit_sysfs();
10949 -+#ifdef CONFIG_SMP
10950 -+ unregister_cpu_notifier(&oprofile_cpu_nb);
10951 -+#endif
10952 -+ }
10953 - }
10954 -diff --git a/crypto/async_tx/async_tx.c b/crypto/async_tx/async_tx.c
10955 -index c6e772f..bfffb3d 100644
10956 ---- a/crypto/async_tx/async_tx.c
10957 -+++ b/crypto/async_tx/async_tx.c
10958 -@@ -136,7 +136,8 @@ async_tx_run_dependencies(struct dma_async_tx_descriptor *tx)
10959 - spin_lock_bh(&next->lock);
10960 - next->parent = NULL;
10961 - _next = next->next;
10962 -- next->next = NULL;
10963 -+ if (_next && _next->chan == chan)
10964 -+ next->next = NULL;
10965 - spin_unlock_bh(&next->lock);
10966 -
10967 - next->tx_submit(next);
10968 -diff --git a/drivers/accessibility/braille/braille_console.c b/drivers/accessibility/braille/braille_console.c
10969 -index 0a5f6b2..d672cfe 100644
10970 ---- a/drivers/accessibility/braille/braille_console.c
10971 -+++ b/drivers/accessibility/braille/braille_console.c
10972 -@@ -376,6 +376,8 @@ int braille_register_console(struct console *console, int index,
10973 - console->flags |= CON_ENABLED;
10974 - console->index = index;
10975 - braille_co = console;
10976 -+ register_keyboard_notifier(&keyboard_notifier_block);
10977 -+ register_vt_notifier(&vt_notifier_block);
10978 - return 0;
10979 - }
10980 -
10981 -@@ -383,15 +385,8 @@ int braille_unregister_console(struct console *console)
10982 - {
10983 - if (braille_co != console)
10984 - return -EINVAL;
10985 -+ unregister_keyboard_notifier(&keyboard_notifier_block);
10986 -+ unregister_vt_notifier(&vt_notifier_block);
10987 - braille_co = NULL;
10988 - return 0;
10989 - }
10990 --
10991 --static int __init braille_init(void)
10992 --{
10993 -- register_keyboard_notifier(&keyboard_notifier_block);
10994 -- register_vt_notifier(&vt_notifier_block);
10995 -- return 0;
10996 --}
10997 --
10998 --console_initcall(braille_init);
10999 -diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
11000 -index 5622aee..5670178 100644
11001 ---- a/drivers/acpi/ec.c
11002 -+++ b/drivers/acpi/ec.c
11003 -@@ -196,6 +196,8 @@ static int acpi_ec_wait(struct acpi_ec *ec, enum ec_event event, int force_poll)
11004 - return 0;
11005 - msleep(1);
11006 - }
11007 -+ if (acpi_ec_check_status(ec,event))
11008 -+ return 0;
11009 - }
11010 - pr_err(PREFIX "acpi_ec_wait timeout, status = 0x%2.2x, event = %s\n",
11011 - acpi_ec_read_status(ec),
11012 -diff --git a/drivers/acpi/processor_perflib.c b/drivers/acpi/processor_perflib.c
11013 -index 8c06a53..6f4a5e1 100644
11014 ---- a/drivers/acpi/processor_perflib.c
11015 -+++ b/drivers/acpi/processor_perflib.c
11016 -@@ -70,7 +70,7 @@ static DEFINE_MUTEX(performance_mutex);
11017 - * 0 -> cpufreq low level drivers initialized -> consider _PPC values
11018 - * 1 -> ignore _PPC totally -> forced by user through boot param
11019 - */
11020 --static unsigned int ignore_ppc = -1;
11021 -+static int ignore_ppc = -1;
11022 - module_param(ignore_ppc, uint, 0644);
11023 - MODULE_PARM_DESC(ignore_ppc, "If the frequency of your machine gets wrongly" \
11024 - "limited by BIOS, this should help");
11025 -diff --git a/drivers/i2c/i2c-dev.c b/drivers/i2c/i2c-dev.c
11026 -index d34c14c..436c7e1 100644
11027 ---- a/drivers/i2c/i2c-dev.c
11028 -+++ b/drivers/i2c/i2c-dev.c
11029 -@@ -581,8 +581,10 @@ static int __init i2c_dev_init(void)
11030 - goto out;
11031 -
11032 - i2c_dev_class = class_create(THIS_MODULE, "i2c-dev");
11033 -- if (IS_ERR(i2c_dev_class))
11034 -+ if (IS_ERR(i2c_dev_class)) {
11035 -+ res = PTR_ERR(i2c_dev_class);
11036 - goto out_unreg_chrdev;
11037 -+ }
11038 -
11039 - res = i2c_add_driver(&i2cdev_driver);
11040 - if (res)
11041 -diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
11042 -index f9ad960..55a104d 100644
11043 ---- a/drivers/mmc/card/block.c
11044 -+++ b/drivers/mmc/card/block.c
11045 -@@ -103,8 +103,10 @@ static int mmc_blk_open(struct inode *inode, struct file *filp)
11046 - check_disk_change(inode->i_bdev);
11047 - ret = 0;
11048 -
11049 -- if ((filp->f_mode & FMODE_WRITE) && md->read_only)
11050 -+ if ((filp->f_mode & FMODE_WRITE) && md->read_only) {
11051 -+ mmc_blk_put(md);
11052 - ret = -EROFS;
11053 -+ }
11054 - }
11055 -
11056 - return ret;
11057 -diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c
11058 -index e248f80..6fbfaf0 100644
11059 ---- a/drivers/net/ixgbe/ixgbe_main.c
11060 -+++ b/drivers/net/ixgbe/ixgbe_main.c
11061 -@@ -2258,6 +2258,12 @@ static int __devinit ixgbe_set_interrupt_capability(struct ixgbe_adapter
11062 - int vector, v_budget;
11063 -
11064 - /*
11065 -+ * Set the default interrupt throttle rate.
11066 -+ */
11067 -+ adapter->rx_eitr = (1000000 / IXGBE_DEFAULT_ITR_RX_USECS);
11068 -+ adapter->tx_eitr = (1000000 / IXGBE_DEFAULT_ITR_TX_USECS);
11069 -+
11070 -+ /*
11071 - * It's easy to be greedy for MSI-X vectors, but it really
11072 - * doesn't do us much good if we have a lot more vectors
11073 - * than CPU's. So let's be conservative and only ask for
11074 -diff --git a/drivers/net/niu.c b/drivers/net/niu.c
11075 -index 918f802..78d90eb 100644
11076 ---- a/drivers/net/niu.c
11077 -+++ b/drivers/net/niu.c
11078 -@@ -5978,6 +5978,56 @@ static void niu_netif_start(struct niu *np)
11079 - niu_enable_interrupts(np, 1);
11080 - }
11081 -
11082 -+static void niu_reset_buffers(struct niu *np)
11083 -+{
11084 -+ int i, j, k, err;
11085 -+
11086 -+ if (np->rx_rings) {
11087 -+ for (i = 0; i < np->num_rx_rings; i++) {
11088 -+ struct rx_ring_info *rp = &np->rx_rings[i];
11089 -+
11090 -+ for (j = 0, k = 0; j < MAX_RBR_RING_SIZE; j++) {
11091 -+ struct page *page;
11092 -+
11093 -+ page = rp->rxhash[j];
11094 -+ while (page) {
11095 -+ struct page *next =
11096 -+ (struct page *) page->mapping;
11097 -+ u64 base = page->index;
11098 -+ base = base >> RBR_DESCR_ADDR_SHIFT;
11099 -+ rp->rbr[k++] = cpu_to_le32(base);
11100 -+ page = next;
11101 -+ }
11102 -+ }
11103 -+ for (; k < MAX_RBR_RING_SIZE; k++) {
11104 -+ err = niu_rbr_add_page(np, rp, GFP_ATOMIC, k);
11105 -+ if (unlikely(err))
11106 -+ break;
11107 -+ }
11108 -+
11109 -+ rp->rbr_index = rp->rbr_table_size - 1;
11110 -+ rp->rcr_index = 0;
11111 -+ rp->rbr_pending = 0;
11112 -+ rp->rbr_refill_pending = 0;
11113 -+ }
11114 -+ }
11115 -+ if (np->tx_rings) {
11116 -+ for (i = 0; i < np->num_tx_rings; i++) {
11117 -+ struct tx_ring_info *rp = &np->tx_rings[i];
11118 -+
11119 -+ for (j = 0; j < MAX_TX_RING_SIZE; j++) {
11120 -+ if (rp->tx_buffs[j].skb)
11121 -+ (void) release_tx_packet(np, rp, j);
11122 -+ }
11123 -+
11124 -+ rp->pending = MAX_TX_RING_SIZE;
11125 -+ rp->prod = 0;
11126 -+ rp->cons = 0;
11127 -+ rp->wrap_bit = 0;
11128 -+ }
11129 -+ }
11130 -+}
11131 -+
11132 - static void niu_reset_task(struct work_struct *work)
11133 - {
11134 - struct niu *np = container_of(work, struct niu, reset_task);
11135 -@@ -6000,6 +6050,12 @@ static void niu_reset_task(struct work_struct *work)
11136 -
11137 - niu_stop_hw(np);
11138 -
11139 -+ spin_unlock_irqrestore(&np->lock, flags);
11140 -+
11141 -+ niu_reset_buffers(np);
11142 -+
11143 -+ spin_lock_irqsave(&np->lock, flags);
11144 -+
11145 - err = niu_init_hw(np);
11146 - if (!err) {
11147 - np->timer.expires = jiffies + HZ;
11148 -diff --git a/drivers/net/wireless/rt2x00/rt2x00.h b/drivers/net/wireless/rt2x00/rt2x00.h
11149 -index b4bf1e0..10c92bd 100644
11150 ---- a/drivers/net/wireless/rt2x00/rt2x00.h
11151 -+++ b/drivers/net/wireless/rt2x00/rt2x00.h
11152 -@@ -820,8 +820,10 @@ struct rt2x00_dev {
11153 -
11154 - /*
11155 - * Scheduled work.
11156 -+ * NOTE: intf_work will use ieee80211_iterate_active_interfaces()
11157 -+ * which means it cannot be placed on the hw->workqueue
11158 -+ * due to RTNL locking requirements.
11159 - */
11160 -- struct workqueue_struct *workqueue;
11161 - struct work_struct intf_work;
11162 - struct work_struct filter_work;
11163 -
11164 -diff --git a/drivers/net/wireless/rt2x00/rt2x00dev.c b/drivers/net/wireless/rt2x00/rt2x00dev.c
11165 -index c997d4f..78fa714 100644
11166 ---- a/drivers/net/wireless/rt2x00/rt2x00dev.c
11167 -+++ b/drivers/net/wireless/rt2x00/rt2x00dev.c
11168 -@@ -75,7 +75,7 @@ static void rt2x00lib_start_link_tuner(struct rt2x00_dev *rt2x00dev)
11169 -
11170 - rt2x00lib_reset_link_tuner(rt2x00dev);
11171 -
11172 -- queue_delayed_work(rt2x00dev->workqueue,
11173 -+ queue_delayed_work(rt2x00dev->hw->workqueue,
11174 - &rt2x00dev->link.work, LINK_TUNE_INTERVAL);
11175 - }
11176 -
11177 -@@ -390,7 +390,7 @@ static void rt2x00lib_link_tuner(struct work_struct *work)
11178 - * Increase tuner counter, and reschedule the next link tuner run.
11179 - */
11180 - rt2x00dev->link.count++;
11181 -- queue_delayed_work(rt2x00dev->workqueue,
11182 -+ queue_delayed_work(rt2x00dev->hw->workqueue,
11183 - &rt2x00dev->link.work, LINK_TUNE_INTERVAL);
11184 - }
11185 -
11186 -@@ -488,7 +488,7 @@ void rt2x00lib_beacondone(struct rt2x00_dev *rt2x00dev)
11187 - rt2x00lib_beacondone_iter,
11188 - rt2x00dev);
11189 -
11190 -- queue_work(rt2x00dev->workqueue, &rt2x00dev->intf_work);
11191 -+ schedule_work(&rt2x00dev->intf_work);
11192 - }
11193 - EXPORT_SYMBOL_GPL(rt2x00lib_beacondone);
11194 -
11195 -@@ -1131,10 +1131,6 @@ int rt2x00lib_probe_dev(struct rt2x00_dev *rt2x00dev)
11196 - /*
11197 - * Initialize configuration work.
11198 - */
11199 -- rt2x00dev->workqueue = create_singlethread_workqueue("rt2x00lib");
11200 -- if (!rt2x00dev->workqueue)
11201 -- goto exit;
11202 --
11203 - INIT_WORK(&rt2x00dev->intf_work, rt2x00lib_intf_scheduled);
11204 - INIT_WORK(&rt2x00dev->filter_work, rt2x00lib_packetfilter_scheduled);
11205 - INIT_DELAYED_WORK(&rt2x00dev->link.work, rt2x00lib_link_tuner);
11206 -@@ -1195,13 +1191,6 @@ void rt2x00lib_remove_dev(struct rt2x00_dev *rt2x00dev)
11207 - rt2x00leds_unregister(rt2x00dev);
11208 -
11209 - /*
11210 -- * Stop all queued work. Note that most tasks will already be halted
11211 -- * during rt2x00lib_disable_radio() and rt2x00lib_uninitialize().
11212 -- */
11213 -- flush_workqueue(rt2x00dev->workqueue);
11214 -- destroy_workqueue(rt2x00dev->workqueue);
11215 --
11216 -- /*
11217 - * Free ieee80211_hw memory.
11218 - */
11219 - rt2x00lib_remove_hw(rt2x00dev);
11220 -diff --git a/drivers/net/wireless/rt2x00/rt2x00mac.c b/drivers/net/wireless/rt2x00/rt2x00mac.c
11221 -index 9cb023e..802ddba 100644
11222 ---- a/drivers/net/wireless/rt2x00/rt2x00mac.c
11223 -+++ b/drivers/net/wireless/rt2x00/rt2x00mac.c
11224 -@@ -428,7 +428,7 @@ void rt2x00mac_configure_filter(struct ieee80211_hw *hw,
11225 - if (!test_bit(DRIVER_REQUIRE_SCHEDULED, &rt2x00dev->flags))
11226 - rt2x00dev->ops->lib->config_filter(rt2x00dev, *total_flags);
11227 - else
11228 -- queue_work(rt2x00dev->workqueue, &rt2x00dev->filter_work);
11229 -+ queue_work(rt2x00dev->hw->workqueue, &rt2x00dev->filter_work);
11230 - }
11231 - EXPORT_SYMBOL_GPL(rt2x00mac_configure_filter);
11232 -
11233 -@@ -509,7 +509,7 @@ void rt2x00mac_bss_info_changed(struct ieee80211_hw *hw,
11234 - memcpy(&intf->conf, bss_conf, sizeof(*bss_conf));
11235 - if (delayed) {
11236 - intf->delayed_flags |= delayed;
11237 -- queue_work(rt2x00dev->workqueue, &rt2x00dev->intf_work);
11238 -+ schedule_work(&rt2x00dev->intf_work);
11239 - }
11240 - spin_unlock(&intf->lock);
11241 - }
11242 -diff --git a/drivers/pcmcia/ds.c b/drivers/pcmcia/ds.c
11243 -index e407754..7d82315 100644
11244 ---- a/drivers/pcmcia/ds.c
11245 -+++ b/drivers/pcmcia/ds.c
11246 -@@ -428,6 +428,18 @@ static int pcmcia_device_probe(struct device * dev)
11247 - p_drv = to_pcmcia_drv(dev->driver);
11248 - s = p_dev->socket;
11249 -
11250 -+ /* The PCMCIA code passes the match data in via dev->driver_data
11251 -+ * which is an ugly hack. Once the driver probe is called it may
11252 -+ * and often will overwrite the match data so we must save it first
11253 -+ *
11254 -+ * handle pseudo multifunction devices:
11255 -+ * there are at most two pseudo multifunction devices.
11256 -+ * if we're matching against the first, schedule a
11257 -+ * call which will then check whether there are two
11258 -+ * pseudo devices, and if not, add the second one.
11259 -+ */
11260 -+ did = p_dev->dev.driver_data;
11261 -+
11262 - ds_dbg(1, "trying to bind %s to %s\n", p_dev->dev.bus_id,
11263 - p_drv->drv.name);
11264 -
11265 -@@ -456,21 +468,14 @@ static int pcmcia_device_probe(struct device * dev)
11266 - goto put_module;
11267 - }
11268 -
11269 -- /* handle pseudo multifunction devices:
11270 -- * there are at most two pseudo multifunction devices.
11271 -- * if we're matching against the first, schedule a
11272 -- * call which will then check whether there are two
11273 -- * pseudo devices, and if not, add the second one.
11274 -- */
11275 -- did = p_dev->dev.driver_data;
11276 - if (did && (did->match_flags & PCMCIA_DEV_ID_MATCH_DEVICE_NO) &&
11277 - (p_dev->socket->device_count == 1) && (p_dev->device_no == 0))
11278 - pcmcia_add_device_later(p_dev->socket, 0);
11279 -
11280 -- put_module:
11281 -+put_module:
11282 - if (ret)
11283 - module_put(p_drv->owner);
11284 -- put_dev:
11285 -+put_dev:
11286 - if (ret)
11287 - put_device(dev);
11288 - return (ret);
11289 -diff --git a/drivers/rtc/rtc-dev.c b/drivers/rtc/rtc-dev.c
11290 -index 90dfa0d..846582b 100644
11291 ---- a/drivers/rtc/rtc-dev.c
11292 -+++ b/drivers/rtc/rtc-dev.c
11293 -@@ -401,6 +401,12 @@ static int rtc_dev_ioctl(struct inode *inode, struct file *file,
11294 - return err;
11295 - }
11296 -
11297 -+static int rtc_dev_fasync(int fd, struct file *file, int on)
11298 -+{
11299 -+ struct rtc_device *rtc = file->private_data;
11300 -+ return fasync_helper(fd, file, on, &rtc->async_queue);
11301 -+}
11302 -+
11303 - static int rtc_dev_release(struct inode *inode, struct file *file)
11304 - {
11305 - struct rtc_device *rtc = file->private_data;
11306 -@@ -411,16 +417,13 @@ static int rtc_dev_release(struct inode *inode, struct file *file)
11307 - if (rtc->ops->release)
11308 - rtc->ops->release(rtc->dev.parent);
11309 -
11310 -+ if (file->f_flags & FASYNC)
11311 -+ rtc_dev_fasync(-1, file, 0);
11312 -+
11313 - clear_bit_unlock(RTC_DEV_BUSY, &rtc->flags);
11314 - return 0;
11315 - }
11316 -
11317 --static int rtc_dev_fasync(int fd, struct file *file, int on)
11318 --{
11319 -- struct rtc_device *rtc = file->private_data;
11320 -- return fasync_helper(fd, file, on, &rtc->async_queue);
11321 --}
11322 --
11323 - static const struct file_operations rtc_dev_fops = {
11324 - .owner = THIS_MODULE,
11325 - .llseek = no_llseek,
11326 -diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
11327 -index ec63b79..d191cec 100644
11328 ---- a/drivers/scsi/qla2xxx/qla_isr.c
11329 -+++ b/drivers/scsi/qla2xxx/qla_isr.c
11330 -@@ -1838,7 +1838,6 @@ clear_risc_ints:
11331 - WRT_REG_WORD(&reg->isp.hccr, HCCR_CLR_HOST_INT);
11332 - }
11333 - spin_unlock_irq(&ha->hardware_lock);
11334 -- ha->isp_ops->enable_intrs(ha);
11335 -
11336 - fail:
11337 - return ret;
11338 -diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
11339 -index 047ee64..4c6b902 100644
11340 ---- a/drivers/scsi/qla2xxx/qla_os.c
11341 -+++ b/drivers/scsi/qla2xxx/qla_os.c
11342 -@@ -1740,6 +1740,8 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
11343 - if (ret)
11344 - goto probe_failed;
11345 -
11346 -+ ha->isp_ops->enable_intrs(ha);
11347 -+
11348 - scsi_scan_host(host);
11349 -
11350 - qla2x00_alloc_sysfs_attr(ha);
11351 -diff --git a/drivers/spi/pxa2xx_spi.c b/drivers/spi/pxa2xx_spi.c
11352 -index 0c452c4..2b7ba85 100644
11353 ---- a/drivers/spi/pxa2xx_spi.c
11354 -+++ b/drivers/spi/pxa2xx_spi.c
11355 -@@ -48,9 +48,10 @@ MODULE_ALIAS("platform:pxa2xx-spi");
11356 -
11357 - #define MAX_BUSES 3
11358 -
11359 --#define DMA_INT_MASK (DCSR_ENDINTR | DCSR_STARTINTR | DCSR_BUSERR)
11360 --#define RESET_DMA_CHANNEL (DCSR_NODESC | DMA_INT_MASK)
11361 --#define IS_DMA_ALIGNED(x) (((u32)(x)&0x07)==0)
11362 -+#define DMA_INT_MASK (DCSR_ENDINTR | DCSR_STARTINTR | DCSR_BUSERR)
11363 -+#define RESET_DMA_CHANNEL (DCSR_NODESC | DMA_INT_MASK)
11364 -+#define IS_DMA_ALIGNED(x) ((((u32)(x)) & 0x07) == 0)
11365 -+#define MAX_DMA_LEN 8191
11366 -
11367 - /*
11368 - * for testing SSCR1 changes that require SSP restart, basically
11369 -@@ -145,7 +146,6 @@ struct driver_data {
11370 - size_t tx_map_len;
11371 - u8 n_bytes;
11372 - u32 dma_width;
11373 -- int cs_change;
11374 - int (*write)(struct driver_data *drv_data);
11375 - int (*read)(struct driver_data *drv_data);
11376 - irqreturn_t (*transfer_handler)(struct driver_data *drv_data);
11377 -@@ -407,8 +407,45 @@ static void giveback(struct driver_data *drv_data)
11378 - struct spi_transfer,
11379 - transfer_list);
11380 -
11381 -+ /* Delay if requested before any change in chip select */
11382 -+ if (last_transfer->delay_usecs)
11383 -+ udelay(last_transfer->delay_usecs);
11384 -+
11385 -+ /* Drop chip select UNLESS cs_change is true or we are returning
11386 -+ * a message with an error, or next message is for another chip
11387 -+ */
11388 - if (!last_transfer->cs_change)
11389 - drv_data->cs_control(PXA2XX_CS_DEASSERT);
11390 -+ else {
11391 -+ struct spi_message *next_msg;
11392 -+
11393 -+ /* Holding of cs was hinted, but we need to make sure
11394 -+ * the next message is for the same chip. Don't waste
11395 -+ * time with the following tests unless this was hinted.
11396 -+ *
11397 -+ * We cannot postpone this until pump_messages, because
11398 -+ * after calling msg->complete (below) the driver that
11399 -+ * sent the current message could be unloaded, which
11400 -+ * could invalidate the cs_control() callback...
11401 -+ */
11402 -+
11403 -+ /* get a pointer to the next message, if any */
11404 -+ spin_lock_irqsave(&drv_data->lock, flags);
11405 -+ if (list_empty(&drv_data->queue))
11406 -+ next_msg = NULL;
11407 -+ else
11408 -+ next_msg = list_entry(drv_data->queue.next,
11409 -+ struct spi_message, queue);
11410 -+ spin_unlock_irqrestore(&drv_data->lock, flags);
11411 -+
11412 -+ /* see if the next and current messages point
11413 -+ * to the same chip
11414 -+ */
11415 -+ if (next_msg && next_msg->spi != msg->spi)
11416 -+ next_msg = NULL;
11417 -+ if (!next_msg || msg->state == ERROR_STATE)
11418 -+ drv_data->cs_control(PXA2XX_CS_DEASSERT);
11419 -+ }
11420 -
11421 - msg->state = NULL;
11422 - if (msg->complete)
11423 -@@ -491,10 +528,9 @@ static void dma_transfer_complete(struct driver_data *drv_data)
11424 - msg->actual_length += drv_data->len -
11425 - (drv_data->rx_end - drv_data->rx);
11426 -
11427 -- /* Release chip select if requested, transfer delays are
11428 -- * handled in pump_transfers */
11429 -- if (drv_data->cs_change)
11430 -- drv_data->cs_control(PXA2XX_CS_DEASSERT);
11431 -+ /* Transfer delays and chip select release are
11432 -+ * handled in pump_transfers or giveback
11433 -+ */
11434 -
11435 - /* Move to next transfer */
11436 - msg->state = next_transfer(drv_data);
11437 -@@ -603,10 +639,9 @@ static void int_transfer_complete(struct driver_data *drv_data)
11438 - drv_data->cur_msg->actual_length += drv_data->len -
11439 - (drv_data->rx_end - drv_data->rx);
11440 -
11441 -- /* Release chip select if requested, transfer delays are
11442 -- * handled in pump_transfers */
11443 -- if (drv_data->cs_change)
11444 -- drv_data->cs_control(PXA2XX_CS_DEASSERT);
11445 -+ /* Transfer delays and chip select release are
11446 -+ * handled in pump_transfers or giveback
11447 -+ */
11448 -
11449 - /* Move to next transfer */
11450 - drv_data->cur_msg->state = next_transfer(drv_data);
11451 -@@ -841,23 +876,40 @@ static void pump_transfers(unsigned long data)
11452 - return;
11453 - }
11454 -
11455 -- /* Delay if requested at end of transfer*/
11456 -+ /* Delay if requested at end of transfer before CS change */
11457 - if (message->state == RUNNING_STATE) {
11458 - previous = list_entry(transfer->transfer_list.prev,
11459 - struct spi_transfer,
11460 - transfer_list);
11461 - if (previous->delay_usecs)
11462 - udelay(previous->delay_usecs);
11463 -+
11464 -+ /* Drop chip select only if cs_change is requested */
11465 -+ if (previous->cs_change)
11466 -+ drv_data->cs_control(PXA2XX_CS_DEASSERT);
11467 - }
11468 -
11469 -- /* Check transfer length */
11470 -- if (transfer->len > 8191)
11471 -- {
11472 -- dev_warn(&drv_data->pdev->dev, "pump_transfers: transfer "
11473 -- "length greater than 8191\n");
11474 -- message->status = -EINVAL;
11475 -- giveback(drv_data);
11476 -- return;
11477 -+ /* Check for transfers that need multiple DMA segments */
11478 -+ if (transfer->len > MAX_DMA_LEN && chip->enable_dma) {
11479 -+
11480 -+ /* reject already-mapped transfers; PIO won't always work */
11481 -+ if (message->is_dma_mapped
11482 -+ || transfer->rx_dma || transfer->tx_dma) {
11483 -+ dev_err(&drv_data->pdev->dev,
11484 -+ "pump_transfers: mapped transfer length "
11485 -+ "of %u is greater than %d\n",
11486 -+ transfer->len, MAX_DMA_LEN);
11487 -+ message->status = -EINVAL;
11488 -+ giveback(drv_data);
11489 -+ return;
11490 -+ }
11491 -+
11492 -+ /* warn ... we force this to PIO mode */
11493 -+ if (printk_ratelimit())
11494 -+ dev_warn(&message->spi->dev, "pump_transfers: "
11495 -+ "DMA disabled for transfer length %ld "
11496 -+ "greater than %d\n",
11497 -+ (long)drv_data->len, MAX_DMA_LEN);
11498 - }
11499 -
11500 - /* Setup the transfer state based on the type of transfer */
11501 -@@ -879,7 +931,6 @@ static void pump_transfers(unsigned long data)
11502 - drv_data->len = transfer->len & DCMD_LENGTH;
11503 - drv_data->write = drv_data->tx ? chip->write : null_writer;
11504 - drv_data->read = drv_data->rx ? chip->read : null_reader;
11505 -- drv_data->cs_change = transfer->cs_change;
11506 -
11507 - /* Change speed and bit per word on a per transfer */
11508 - cr0 = chip->cr0;
11509 -@@ -926,7 +977,7 @@ static void pump_transfers(unsigned long data)
11510 - &dma_thresh))
11511 - if (printk_ratelimit())
11512 - dev_warn(&message->spi->dev,
11513 -- "pump_transfer: "
11514 -+ "pump_transfers: "
11515 - "DMA burst size reduced to "
11516 - "match bits_per_word\n");
11517 - }
11518 -@@ -940,8 +991,23 @@ static void pump_transfers(unsigned long data)
11519 -
11520 - message->state = RUNNING_STATE;
11521 -
11522 -- /* Try to map dma buffer and do a dma transfer if successful */
11523 -- if ((drv_data->dma_mapped = map_dma_buffers(drv_data))) {
11524 -+ /* Try to map dma buffer and do a dma transfer if successful, but
11525 -+ * only if the length is non-zero and less than MAX_DMA_LEN.
11526 -+ *
11527 -+ * Zero-length non-descriptor DMA is illegal on PXA2xx; force use
11528 -+ * of PIO instead. Care is needed above because the transfer may
11529 -+ * have have been passed with buffers that are already dma mapped.
11530 -+ * A zero-length transfer in PIO mode will not try to write/read
11531 -+ * to/from the buffers
11532 -+ *
11533 -+ * REVISIT large transfers are exactly where we most want to be
11534 -+ * using DMA. If this happens much, split those transfers into
11535 -+ * multiple DMA segments rather than forcing PIO.
11536 -+ */
11537 -+ drv_data->dma_mapped = 0;
11538 -+ if (drv_data->len > 0 && drv_data->len <= MAX_DMA_LEN)
11539 -+ drv_data->dma_mapped = map_dma_buffers(drv_data);
11540 -+ if (drv_data->dma_mapped) {
11541 -
11542 - /* Ensure we have the correct interrupt handler */
11543 - drv_data->transfer_handler = dma_transfer;
11544 -diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
11545 -index 42a4364..7e6130a 100644
11546 ---- a/drivers/usb/core/hcd.c
11547 -+++ b/drivers/usb/core/hcd.c
11548 -@@ -1885,7 +1885,8 @@ int usb_add_hcd(struct usb_hcd *hcd,
11549 - * with IRQF_SHARED. As usb_hcd_irq() will always disable
11550 - * interrupts we can remove it here.
11551 - */
11552 -- irqflags &= ~IRQF_DISABLED;
11553 -+ if (irqflags & IRQF_SHARED)
11554 -+ irqflags &= ~IRQF_DISABLED;
11555 -
11556 - snprintf(hcd->irq_descr, sizeof(hcd->irq_descr), "%s:usb%d",
11557 - hcd->driver->description, hcd->self.busnum);
11558 -diff --git a/drivers/video/console/fbcon.h b/drivers/video/console/fbcon.h
11559 -index 0135e03..e3437c4 100644
11560 ---- a/drivers/video/console/fbcon.h
11561 -+++ b/drivers/video/console/fbcon.h
11562 -@@ -110,7 +110,7 @@ static inline int mono_col(const struct fb_info *info)
11563 - __u32 max_len;
11564 - max_len = max(info->var.green.length, info->var.red.length);
11565 - max_len = max(info->var.blue.length, max_len);
11566 -- return ~(0xfff << (max_len & 0xff));
11567 -+ return (~(0xfff << max_len)) & 0xff;
11568 - }
11569 -
11570 - static inline int attr_col_ec(int shift, struct vc_data *vc,
11571 -diff --git a/fs/buffer.c b/fs/buffer.c
11572 -index 0f51c0f..42d2104 100644
11573 ---- a/fs/buffer.c
11574 -+++ b/fs/buffer.c
11575 -@@ -2868,14 +2868,17 @@ int submit_bh(int rw, struct buffer_head * bh)
11576 - BUG_ON(!buffer_mapped(bh));
11577 - BUG_ON(!bh->b_end_io);
11578 -
11579 -- if (buffer_ordered(bh) && (rw == WRITE))
11580 -- rw = WRITE_BARRIER;
11581 -+ /*
11582 -+ * Mask in barrier bit for a write (could be either a WRITE or a
11583 -+ * WRITE_SYNC
11584 -+ */
11585 -+ if (buffer_ordered(bh) && (rw & WRITE))
11586 -+ rw |= WRITE_BARRIER;
11587 -
11588 - /*
11589 -- * Only clear out a write error when rewriting, should this
11590 -- * include WRITE_SYNC as well?
11591 -+ * Only clear out a write error when rewriting
11592 - */
11593 -- if (test_set_buffer_req(bh) && (rw == WRITE || rw == WRITE_BARRIER))
11594 -+ if (test_set_buffer_req(bh) && (rw & WRITE))
11595 - clear_buffer_write_io_error(bh);
11596 -
11597 - /*
11598 -diff --git a/fs/exec.c b/fs/exec.c
11599 -index fd92343..85e9948 100644
11600 ---- a/fs/exec.c
11601 -+++ b/fs/exec.c
11602 -@@ -740,11 +740,11 @@ static int exec_mmap(struct mm_struct *mm)
11603 - tsk->active_mm = mm;
11604 - activate_mm(active_mm, mm);
11605 - task_unlock(tsk);
11606 -- mm_update_next_owner(old_mm);
11607 - arch_pick_mmap_layout(mm);
11608 - if (old_mm) {
11609 - up_read(&old_mm->mmap_sem);
11610 - BUG_ON(active_mm != old_mm);
11611 -+ mm_update_next_owner(old_mm);
11612 - mmput(old_mm);
11613 - return 0;
11614 - }
11615 -diff --git a/fs/ocfs2/stackglue.c b/fs/ocfs2/stackglue.c
11616 -index 10e149a..07f348b 100644
11617 ---- a/fs/ocfs2/stackglue.c
11618 -+++ b/fs/ocfs2/stackglue.c
11619 -@@ -97,13 +97,14 @@ static int ocfs2_stack_driver_request(const char *stack_name,
11620 - goto out;
11621 - }
11622 -
11623 -- /* Ok, the stack is pinned */
11624 -- p->sp_count++;
11625 - active_stack = p;
11626 --
11627 - rc = 0;
11628 -
11629 - out:
11630 -+ /* If we found it, pin it */
11631 -+ if (!rc)
11632 -+ active_stack->sp_count++;
11633 -+
11634 - spin_unlock(&ocfs2_stack_lock);
11635 - return rc;
11636 - }
11637 -diff --git a/fs/proc/array.c b/fs/proc/array.c
11638 -index 797d775..0b2a88c 100644
11639 ---- a/fs/proc/array.c
11640 -+++ b/fs/proc/array.c
11641 -@@ -332,65 +332,6 @@ int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
11642 - return 0;
11643 - }
11644 -
11645 --/*
11646 -- * Use precise platform statistics if available:
11647 -- */
11648 --#ifdef CONFIG_VIRT_CPU_ACCOUNTING
11649 --static cputime_t task_utime(struct task_struct *p)
11650 --{
11651 -- return p->utime;
11652 --}
11653 --
11654 --static cputime_t task_stime(struct task_struct *p)
11655 --{
11656 -- return p->stime;
11657 --}
11658 --#else
11659 --static cputime_t task_utime(struct task_struct *p)
11660 --{
11661 -- clock_t utime = cputime_to_clock_t(p->utime),
11662 -- total = utime + cputime_to_clock_t(p->stime);
11663 -- u64 temp;
11664 --
11665 -- /*
11666 -- * Use CFS's precise accounting:
11667 -- */
11668 -- temp = (u64)nsec_to_clock_t(p->se.sum_exec_runtime);
11669 --
11670 -- if (total) {
11671 -- temp *= utime;
11672 -- do_div(temp, total);
11673 -- }
11674 -- utime = (clock_t)temp;
11675 --
11676 -- p->prev_utime = max(p->prev_utime, clock_t_to_cputime(utime));
11677 -- return p->prev_utime;
11678 --}
11679 --
11680 --static cputime_t task_stime(struct task_struct *p)
11681 --{
11682 -- clock_t stime;
11683 --
11684 -- /*
11685 -- * Use CFS's precise accounting. (we subtract utime from
11686 -- * the total, to make sure the total observed by userspace
11687 -- * grows monotonically - apps rely on that):
11688 -- */
11689 -- stime = nsec_to_clock_t(p->se.sum_exec_runtime) -
11690 -- cputime_to_clock_t(task_utime(p));
11691 --
11692 -- if (stime >= 0)
11693 -- p->prev_stime = max(p->prev_stime, clock_t_to_cputime(stime));
11694 --
11695 -- return p->prev_stime;
11696 --}
11697 --#endif
11698 --
11699 --static cputime_t task_gtime(struct task_struct *p)
11700 --{
11701 -- return p->gtime;
11702 --}
11703 --
11704 - static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
11705 - struct pid *pid, struct task_struct *task, int whole)
11706 - {
11707 -diff --git a/include/asm-generic/rtc.h b/include/asm-generic/rtc.h
11708 -index be4af00..71ef3f0 100644
11709 ---- a/include/asm-generic/rtc.h
11710 -+++ b/include/asm-generic/rtc.h
11711 -@@ -15,6 +15,7 @@
11712 - #include <linux/mc146818rtc.h>
11713 - #include <linux/rtc.h>
11714 - #include <linux/bcd.h>
11715 -+#include <linux/delay.h>
11716 -
11717 - #define RTC_PIE 0x40 /* periodic interrupt enable */
11718 - #define RTC_AIE 0x20 /* alarm interrupt enable */
11719 -@@ -43,7 +44,6 @@ static inline unsigned char rtc_is_updating(void)
11720 -
11721 - static inline unsigned int get_rtc_time(struct rtc_time *time)
11722 - {
11723 -- unsigned long uip_watchdog = jiffies;
11724 - unsigned char ctrl;
11725 - unsigned long flags;
11726 -
11727 -@@ -53,19 +53,15 @@ static inline unsigned int get_rtc_time(struct rtc_time *time)
11728 -
11729 - /*
11730 - * read RTC once any update in progress is done. The update
11731 -- * can take just over 2ms. We wait 10 to 20ms. There is no need to
11732 -+ * can take just over 2ms. We wait 20ms. There is no need to
11733 - * to poll-wait (up to 1s - eeccch) for the falling edge of RTC_UIP.
11734 - * If you need to know *exactly* when a second has started, enable
11735 - * periodic update complete interrupts, (via ioctl) and then
11736 - * immediately read /dev/rtc which will block until you get the IRQ.
11737 - * Once the read clears, read the RTC time (again via ioctl). Easy.
11738 - */
11739 --
11740 -- if (rtc_is_updating() != 0)
11741 -- while (jiffies - uip_watchdog < 2*HZ/100) {
11742 -- barrier();
11743 -- cpu_relax();
11744 -- }
11745 -+ if (rtc_is_updating())
11746 -+ mdelay(20);
11747 -
11748 - /*
11749 - * Only the values that we read from the RTC are set. We leave
11750 -diff --git a/include/asm-x86/i387.h b/include/asm-x86/i387.h
11751 -index 4b683af..56d00e3 100644
11752 ---- a/include/asm-x86/i387.h
11753 -+++ b/include/asm-x86/i387.h
11754 -@@ -63,8 +63,6 @@ static inline int restore_fpu_checking(struct i387_fxsave_struct *fx)
11755 - #else
11756 - : [fx] "cdaSDb" (fx), "m" (*fx), "0" (0));
11757 - #endif
11758 -- if (unlikely(err))
11759 -- init_fpu(current);
11760 - return err;
11761 - }
11762 -
11763 -@@ -138,60 +136,6 @@ static inline void __save_init_fpu(struct task_struct *tsk)
11764 - task_thread_info(tsk)->status &= ~TS_USEDFPU;
11765 - }
11766 -
11767 --/*
11768 -- * Signal frame handlers.
11769 -- */
11770 --
11771 --static inline int save_i387(struct _fpstate __user *buf)
11772 --{
11773 -- struct task_struct *tsk = current;
11774 -- int err = 0;
11775 --
11776 -- BUILD_BUG_ON(sizeof(struct user_i387_struct) !=
11777 -- sizeof(tsk->thread.xstate->fxsave));
11778 --
11779 -- if ((unsigned long)buf % 16)
11780 -- printk("save_i387: bad fpstate %p\n", buf);
11781 --
11782 -- if (!used_math())
11783 -- return 0;
11784 -- clear_used_math(); /* trigger finit */
11785 -- if (task_thread_info(tsk)->status & TS_USEDFPU) {
11786 -- err = save_i387_checking((struct i387_fxsave_struct __user *)
11787 -- buf);
11788 -- if (err)
11789 -- return err;
11790 -- task_thread_info(tsk)->status &= ~TS_USEDFPU;
11791 -- stts();
11792 -- } else {
11793 -- if (__copy_to_user(buf, &tsk->thread.xstate->fxsave,
11794 -- sizeof(struct i387_fxsave_struct)))
11795 -- return -1;
11796 -- }
11797 -- return 1;
11798 --}
11799 --
11800 --/*
11801 -- * This restores directly out of user space. Exceptions are handled.
11802 -- */
11803 --static inline int restore_i387(struct _fpstate __user *buf)
11804 --{
11805 -- struct task_struct *tsk = current;
11806 -- int err;
11807 --
11808 -- if (!used_math()) {
11809 -- err = init_fpu(tsk);
11810 -- if (err)
11811 -- return err;
11812 -- }
11813 --
11814 -- if (!(task_thread_info(current)->status & TS_USEDFPU)) {
11815 -- clts();
11816 -- task_thread_info(current)->status |= TS_USEDFPU;
11817 -- }
11818 -- return restore_fpu_checking((__force struct i387_fxsave_struct *)buf);
11819 --}
11820 --
11821 - #else /* CONFIG_X86_32 */
11822 -
11823 - extern void finit(void);
11824 -diff --git a/include/asm-x86/mpspec.h b/include/asm-x86/mpspec.h
11825 -index 57a991b..4c75587 100644
11826 ---- a/include/asm-x86/mpspec.h
11827 -+++ b/include/asm-x86/mpspec.h
11828 -@@ -35,6 +35,7 @@ extern DECLARE_BITMAP(mp_bus_not_pci, MAX_MP_BUSSES);
11829 - extern int mp_bus_id_to_pci_bus[MAX_MP_BUSSES];
11830 -
11831 - extern unsigned int boot_cpu_physical_apicid;
11832 -+extern unsigned int max_physical_apicid;
11833 - extern int smp_found_config;
11834 - extern int mpc_default_type;
11835 - extern unsigned long mp_lapic_addr;
11836 -diff --git a/include/asm-x86/pgtable_64.h b/include/asm-x86/pgtable_64.h
11837 -index 1cc50d2..3922eca 100644
11838 ---- a/include/asm-x86/pgtable_64.h
11839 -+++ b/include/asm-x86/pgtable_64.h
11840 -@@ -146,7 +146,7 @@ static inline void native_pgd_clear(pgd_t *pgd)
11841 - #define VMALLOC_END _AC(0xffffe1ffffffffff, UL)
11842 - #define VMEMMAP_START _AC(0xffffe20000000000, UL)
11843 - #define MODULES_VADDR _AC(0xffffffffa0000000, UL)
11844 --#define MODULES_END _AC(0xfffffffffff00000, UL)
11845 -+#define MODULES_END _AC(0xffffffffff000000, UL)
11846 - #define MODULES_LEN (MODULES_END - MODULES_VADDR)
11847 -
11848 - #ifndef __ASSEMBLY__
11849 -diff --git a/include/linux/clockchips.h b/include/linux/clockchips.h
11850 -index c33b0dc..ed3a5d4 100644
11851 ---- a/include/linux/clockchips.h
11852 -+++ b/include/linux/clockchips.h
11853 -@@ -127,6 +127,8 @@ extern int clockevents_register_notifier(struct notifier_block *nb);
11854 - extern int clockevents_program_event(struct clock_event_device *dev,
11855 - ktime_t expires, ktime_t now);
11856 -
11857 -+extern void clockevents_handle_noop(struct clock_event_device *dev);
11858 -+
11859 - #ifdef CONFIG_GENERIC_CLOCKEVENTS
11860 - extern void clockevents_notify(unsigned long reason, void *arg);
11861 - #else
11862 -diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
11863 -index 443bc7c..428328a 100644
11864 ---- a/include/linux/mmzone.h
11865 -+++ b/include/linux/mmzone.h
11866 -@@ -751,8 +751,9 @@ static inline int zonelist_node_idx(struct zoneref *zoneref)
11867 - *
11868 - * This function returns the next zone at or below a given zone index that is
11869 - * within the allowed nodemask using a cursor as the starting point for the
11870 -- * search. The zoneref returned is a cursor that is used as the next starting
11871 -- * point for future calls to next_zones_zonelist().
11872 -+ * search. The zoneref returned is a cursor that represents the current zone
11873 -+ * being examined. It should be advanced by one before calling
11874 -+ * next_zones_zonelist again.
11875 - */
11876 - struct zoneref *next_zones_zonelist(struct zoneref *z,
11877 - enum zone_type highest_zoneidx,
11878 -@@ -768,9 +769,8 @@ struct zoneref *next_zones_zonelist(struct zoneref *z,
11879 - *
11880 - * This function returns the first zone at or below a given zone index that is
11881 - * within the allowed nodemask. The zoneref returned is a cursor that can be
11882 -- * used to iterate the zonelist with next_zones_zonelist. The cursor should
11883 -- * not be used by the caller as it does not match the value of the zone
11884 -- * returned.
11885 -+ * used to iterate the zonelist with next_zones_zonelist by advancing it by
11886 -+ * one before calling.
11887 - */
11888 - static inline struct zoneref *first_zones_zonelist(struct zonelist *zonelist,
11889 - enum zone_type highest_zoneidx,
11890 -@@ -795,7 +795,7 @@ static inline struct zoneref *first_zones_zonelist(struct zonelist *zonelist,
11891 - #define for_each_zone_zonelist_nodemask(zone, z, zlist, highidx, nodemask) \
11892 - for (z = first_zones_zonelist(zlist, highidx, nodemask, &zone); \
11893 - zone; \
11894 -- z = next_zones_zonelist(z, highidx, nodemask, &zone)) \
11895 -+ z = next_zones_zonelist(++z, highidx, nodemask, &zone)) \
11896 -
11897 - /**
11898 - * for_each_zone_zonelist - helper macro to iterate over valid zones in a zonelist at or below a given zone index
11899 -diff --git a/include/linux/rmap.h b/include/linux/rmap.h
11900 -index 1383692..0e889fa 100644
11901 ---- a/include/linux/rmap.h
11902 -+++ b/include/linux/rmap.h
11903 -@@ -94,7 +94,7 @@ int try_to_unmap(struct page *, int ignore_refs);
11904 - * Called from mm/filemap_xip.c to unmap empty zero page
11905 - */
11906 - pte_t *page_check_address(struct page *, struct mm_struct *,
11907 -- unsigned long, spinlock_t **);
11908 -+ unsigned long, spinlock_t **, int);
11909 -
11910 - /*
11911 - * Used by swapoff to help locate where page is expected in vma.
11912 -diff --git a/include/linux/sched.h b/include/linux/sched.h
11913 -index c5d3f84..2103c73 100644
11914 ---- a/include/linux/sched.h
11915 -+++ b/include/linux/sched.h
11916 -@@ -1477,6 +1477,10 @@ static inline void put_task_struct(struct task_struct *t)
11917 - __put_task_struct(t);
11918 - }
11919 -
11920 -+extern cputime_t task_utime(struct task_struct *p);
11921 -+extern cputime_t task_stime(struct task_struct *p);
11922 -+extern cputime_t task_gtime(struct task_struct *p);
11923 -+
11924 - /*
11925 - * Per process flags
11926 - */
11927 -diff --git a/include/linux/smb.h b/include/linux/smb.h
11928 -index caa43b2..82fefdd 100644
11929 ---- a/include/linux/smb.h
11930 -+++ b/include/linux/smb.h
11931 -@@ -11,7 +11,9 @@
11932 -
11933 - #include <linux/types.h>
11934 - #include <linux/magic.h>
11935 -+#ifdef __KERNEL__
11936 - #include <linux/time.h>
11937 -+#endif
11938 -
11939 - enum smb_protocol {
11940 - SMB_PROTOCOL_NONE,
11941 -diff --git a/include/net/netlink.h b/include/net/netlink.h
11942 -index dfc3701..6a5fdd8 100644
11943 ---- a/include/net/netlink.h
11944 -+++ b/include/net/netlink.h
11945 -@@ -702,7 +702,7 @@ static inline int nla_len(const struct nlattr *nla)
11946 - */
11947 - static inline int nla_ok(const struct nlattr *nla, int remaining)
11948 - {
11949 -- return remaining >= sizeof(*nla) &&
11950 -+ return remaining >= (int) sizeof(*nla) &&
11951 - nla->nla_len >= sizeof(*nla) &&
11952 - nla->nla_len <= remaining;
11953 - }
11954 -diff --git a/kernel/cgroup.c b/kernel/cgroup.c
11955 -index 15ac0e1..d53caaa 100644
11956 ---- a/kernel/cgroup.c
11957 -+++ b/kernel/cgroup.c
11958 -@@ -2761,14 +2761,15 @@ void cgroup_fork_callbacks(struct task_struct *child)
11959 - */
11960 - void cgroup_mm_owner_callbacks(struct task_struct *old, struct task_struct *new)
11961 - {
11962 -- struct cgroup *oldcgrp, *newcgrp;
11963 -+ struct cgroup *oldcgrp, *newcgrp = NULL;
11964 -
11965 - if (need_mm_owner_callback) {
11966 - int i;
11967 - for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) {
11968 - struct cgroup_subsys *ss = subsys[i];
11969 - oldcgrp = task_cgroup(old, ss->subsys_id);
11970 -- newcgrp = task_cgroup(new, ss->subsys_id);
11971 -+ if (new)
11972 -+ newcgrp = task_cgroup(new, ss->subsys_id);
11973 - if (oldcgrp == newcgrp)
11974 - continue;
11975 - if (ss->mm_owner_changed)
11976 -diff --git a/kernel/exit.c b/kernel/exit.c
11977 -index 8f6185e..f68b081 100644
11978 ---- a/kernel/exit.c
11979 -+++ b/kernel/exit.c
11980 -@@ -111,9 +111,9 @@ static void __exit_signal(struct task_struct *tsk)
11981 - * We won't ever get here for the group leader, since it
11982 - * will have been the last reference on the signal_struct.
11983 - */
11984 -- sig->utime = cputime_add(sig->utime, tsk->utime);
11985 -- sig->stime = cputime_add(sig->stime, tsk->stime);
11986 -- sig->gtime = cputime_add(sig->gtime, tsk->gtime);
11987 -+ sig->utime = cputime_add(sig->utime, task_utime(tsk));
11988 -+ sig->stime = cputime_add(sig->stime, task_stime(tsk));
11989 -+ sig->gtime = cputime_add(sig->gtime, task_gtime(tsk));
11990 - sig->min_flt += tsk->min_flt;
11991 - sig->maj_flt += tsk->maj_flt;
11992 - sig->nvcsw += tsk->nvcsw;
11993 -@@ -577,8 +577,6 @@ mm_need_new_owner(struct mm_struct *mm, struct task_struct *p)
11994 - * If there are other users of the mm and the owner (us) is exiting
11995 - * we need to find a new owner to take on the responsibility.
11996 - */
11997 -- if (!mm)
11998 -- return 0;
11999 - if (atomic_read(&mm->mm_users) <= 1)
12000 - return 0;
12001 - if (mm->owner != p)
12002 -@@ -621,6 +619,16 @@ retry:
12003 - } while_each_thread(g, c);
12004 -
12005 - read_unlock(&tasklist_lock);
12006 -+ /*
12007 -+ * We found no owner yet mm_users > 1: this implies that we are
12008 -+ * most likely racing with swapoff (try_to_unuse()) or /proc or
12009 -+ * ptrace or page migration (get_task_mm()). Mark owner as NULL,
12010 -+ * so that subsystems can understand the callback and take action.
12011 -+ */
12012 -+ down_write(&mm->mmap_sem);
12013 -+ cgroup_mm_owner_callbacks(mm->owner, NULL);
12014 -+ mm->owner = NULL;
12015 -+ up_write(&mm->mmap_sem);
12016 - return;
12017 -
12018 - assign_new_owner:
12019 -diff --git a/kernel/sched.c b/kernel/sched.c
12020 -index 4e2f603..0a50ee4 100644
12021 ---- a/kernel/sched.c
12022 -+++ b/kernel/sched.c
12023 -@@ -3995,6 +3995,65 @@ void account_steal_time(struct task_struct *p, cputime_t steal)
12024 - }
12025 -
12026 - /*
12027 -+ * Use precise platform statistics if available:
12028 -+ */
12029 -+#ifdef CONFIG_VIRT_CPU_ACCOUNTING
12030 -+cputime_t task_utime(struct task_struct *p)
12031 -+{
12032 -+ return p->utime;
12033 -+}
12034 -+
12035 -+cputime_t task_stime(struct task_struct *p)
12036 -+{
12037 -+ return p->stime;
12038 -+}
12039 -+#else
12040 -+cputime_t task_utime(struct task_struct *p)
12041 -+{
12042 -+ clock_t utime = cputime_to_clock_t(p->utime),
12043 -+ total = utime + cputime_to_clock_t(p->stime);
12044 -+ u64 temp;
12045 -+
12046 -+ /*
12047 -+ * Use CFS's precise accounting:
12048 -+ */
12049 -+ temp = (u64)nsec_to_clock_t(p->se.sum_exec_runtime);
12050 -+
12051 -+ if (total) {
12052 -+ temp *= utime;
12053 -+ do_div(temp, total);
12054 -+ }
12055 -+ utime = (clock_t)temp;
12056 -+
12057 -+ p->prev_utime = max(p->prev_utime, clock_t_to_cputime(utime));
12058 -+ return p->prev_utime;
12059 -+}
12060 -+
12061 -+cputime_t task_stime(struct task_struct *p)
12062 -+{
12063 -+ clock_t stime;
12064 -+
12065 -+ /*
12066 -+ * Use CFS's precise accounting. (we subtract utime from
12067 -+ * the total, to make sure the total observed by userspace
12068 -+ * grows monotonically - apps rely on that):
12069 -+ */
12070 -+ stime = nsec_to_clock_t(p->se.sum_exec_runtime) -
12071 -+ cputime_to_clock_t(task_utime(p));
12072 -+
12073 -+ if (stime >= 0)
12074 -+ p->prev_stime = max(p->prev_stime, clock_t_to_cputime(stime));
12075 -+
12076 -+ return p->prev_stime;
12077 -+}
12078 -+#endif
12079 -+
12080 -+inline cputime_t task_gtime(struct task_struct *p)
12081 -+{
12082 -+ return p->gtime;
12083 -+}
12084 -+
12085 -+/*
12086 - * This function gets called by the timer code, with HZ frequency.
12087 - * We call it with interrupts disabled.
12088 - *
12089 -diff --git a/kernel/time/clockevents.c b/kernel/time/clockevents.c
12090 -index 3d1e3e1..1876b52 100644
12091 ---- a/kernel/time/clockevents.c
12092 -+++ b/kernel/time/clockevents.c
12093 -@@ -177,7 +177,7 @@ void clockevents_register_device(struct clock_event_device *dev)
12094 - /*
12095 - * Noop handler when we shut down an event device
12096 - */
12097 --static void clockevents_handle_noop(struct clock_event_device *dev)
12098 -+void clockevents_handle_noop(struct clock_event_device *dev)
12099 - {
12100 - }
12101 -
12102 -@@ -199,7 +199,6 @@ void clockevents_exchange_device(struct clock_event_device *old,
12103 - * released list and do a notify add later.
12104 - */
12105 - if (old) {
12106 -- old->event_handler = clockevents_handle_noop;
12107 - clockevents_set_mode(old, CLOCK_EVT_MODE_UNUSED);
12108 - list_del(&old->list);
12109 - list_add(&old->list, &clockevents_released);
12110 -diff --git a/kernel/time/ntp.c b/kernel/time/ntp.c
12111 -index 5125ddd..1ad46f3 100644
12112 ---- a/kernel/time/ntp.c
12113 -+++ b/kernel/time/ntp.c
12114 -@@ -245,7 +245,7 @@ static void sync_cmos_clock(unsigned long dummy)
12115 - if (abs(now.tv_nsec - (NSEC_PER_SEC / 2)) <= tick_nsec / 2)
12116 - fail = update_persistent_clock(now);
12117 -
12118 -- next.tv_nsec = (NSEC_PER_SEC / 2) - now.tv_nsec;
12119 -+ next.tv_nsec = (NSEC_PER_SEC / 2) - now.tv_nsec - (TICK_NSEC / 2);
12120 - if (next.tv_nsec <= 0)
12121 - next.tv_nsec += NSEC_PER_SEC;
12122 -
12123 -diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
12124 -index 57a1f02..e20a365 100644
12125 ---- a/kernel/time/tick-broadcast.c
12126 -+++ b/kernel/time/tick-broadcast.c
12127 -@@ -174,6 +174,8 @@ static void tick_do_periodic_broadcast(void)
12128 - */
12129 - static void tick_handle_periodic_broadcast(struct clock_event_device *dev)
12130 - {
12131 -+ ktime_t next;
12132 -+
12133 - tick_do_periodic_broadcast();
12134 -
12135 - /*
12136 -@@ -184,10 +186,13 @@ static void tick_handle_periodic_broadcast(struct clock_event_device *dev)
12137 -
12138 - /*
12139 - * Setup the next period for devices, which do not have
12140 -- * periodic mode:
12141 -+ * periodic mode. We read dev->next_event first and add to it
12142 -+ * when the event alrady expired. clockevents_program_event()
12143 -+ * sets dev->next_event only when the event is really
12144 -+ * programmed to the device.
12145 - */
12146 -- for (;;) {
12147 -- ktime_t next = ktime_add(dev->next_event, tick_period);
12148 -+ for (next = dev->next_event; ;) {
12149 -+ next = ktime_add(next, tick_period);
12150 -
12151 - if (!clockevents_program_event(dev, next, ktime_get()))
12152 - return;
12153 -@@ -204,7 +209,7 @@ static void tick_do_broadcast_on_off(void *why)
12154 - struct clock_event_device *bc, *dev;
12155 - struct tick_device *td;
12156 - unsigned long flags, *reason = why;
12157 -- int cpu;
12158 -+ int cpu, bc_stopped;
12159 -
12160 - spin_lock_irqsave(&tick_broadcast_lock, flags);
12161 -
12162 -@@ -222,6 +227,8 @@ static void tick_do_broadcast_on_off(void *why)
12163 - if (!tick_device_is_functional(dev))
12164 - goto out;
12165 -
12166 -+ bc_stopped = cpus_empty(tick_broadcast_mask);
12167 -+
12168 - switch (*reason) {
12169 - case CLOCK_EVT_NOTIFY_BROADCAST_ON:
12170 - case CLOCK_EVT_NOTIFY_BROADCAST_FORCE:
12171 -@@ -243,9 +250,10 @@ static void tick_do_broadcast_on_off(void *why)
12172 - break;
12173 - }
12174 -
12175 -- if (cpus_empty(tick_broadcast_mask))
12176 -- clockevents_set_mode(bc, CLOCK_EVT_MODE_SHUTDOWN);
12177 -- else {
12178 -+ if (cpus_empty(tick_broadcast_mask)) {
12179 -+ if (!bc_stopped)
12180 -+ clockevents_set_mode(bc, CLOCK_EVT_MODE_SHUTDOWN);
12181 -+ } else if (bc_stopped) {
12182 - if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC)
12183 - tick_broadcast_start_periodic(bc);
12184 - else
12185 -@@ -362,16 +370,8 @@ cpumask_t *tick_get_broadcast_oneshot_mask(void)
12186 - static int tick_broadcast_set_event(ktime_t expires, int force)
12187 - {
12188 - struct clock_event_device *bc = tick_broadcast_device.evtdev;
12189 -- ktime_t now = ktime_get();
12190 -- int res;
12191 --
12192 -- for(;;) {
12193 -- res = clockevents_program_event(bc, expires, now);
12194 -- if (!res || !force)
12195 -- return res;
12196 -- now = ktime_get();
12197 -- expires = ktime_add(now, ktime_set(0, bc->min_delta_ns));
12198 -- }
12199 -+
12200 -+ return tick_dev_program_event(bc, expires, force);
12201 - }
12202 -
12203 - int tick_resume_broadcast_oneshot(struct clock_event_device *bc)
12204 -@@ -490,14 +490,52 @@ static void tick_broadcast_clear_oneshot(int cpu)
12205 - cpu_clear(cpu, tick_broadcast_oneshot_mask);
12206 - }
12207 -
12208 -+static void tick_broadcast_init_next_event(cpumask_t *mask, ktime_t expires)
12209 -+{
12210 -+ struct tick_device *td;
12211 -+ int cpu;
12212 -+
12213 -+ for_each_cpu_mask_nr(cpu, *mask) {
12214 -+ td = &per_cpu(tick_cpu_device, cpu);
12215 -+ if (td->evtdev)
12216 -+ td->evtdev->next_event = expires;
12217 -+ }
12218 -+}
12219 -+
12220 - /**
12221 - * tick_broadcast_setup_oneshot - setup the broadcast device
12222 - */
12223 - void tick_broadcast_setup_oneshot(struct clock_event_device *bc)
12224 - {
12225 -- bc->event_handler = tick_handle_oneshot_broadcast;
12226 -- clockevents_set_mode(bc, CLOCK_EVT_MODE_ONESHOT);
12227 -- bc->next_event.tv64 = KTIME_MAX;
12228 -+ /* Set it up only once ! */
12229 -+ if (bc->event_handler != tick_handle_oneshot_broadcast) {
12230 -+ int was_periodic = bc->mode == CLOCK_EVT_MODE_PERIODIC;
12231 -+ int cpu = smp_processor_id();
12232 -+ cpumask_t mask;
12233 -+
12234 -+ bc->event_handler = tick_handle_oneshot_broadcast;
12235 -+ clockevents_set_mode(bc, CLOCK_EVT_MODE_ONESHOT);
12236 -+
12237 -+ /* Take the do_timer update */
12238 -+ tick_do_timer_cpu = cpu;
12239 -+
12240 -+ /*
12241 -+ * We must be careful here. There might be other CPUs
12242 -+ * waiting for periodic broadcast. We need to set the
12243 -+ * oneshot_mask bits for those and program the
12244 -+ * broadcast device to fire.
12245 -+ */
12246 -+ mask = tick_broadcast_mask;
12247 -+ cpu_clear(cpu, mask);
12248 -+ cpus_or(tick_broadcast_oneshot_mask,
12249 -+ tick_broadcast_oneshot_mask, mask);
12250 -+
12251 -+ if (was_periodic && !cpus_empty(mask)) {
12252 -+ tick_broadcast_init_next_event(&mask, tick_next_period);
12253 -+ tick_broadcast_set_event(tick_next_period, 1);
12254 -+ } else
12255 -+ bc->next_event.tv64 = KTIME_MAX;
12256 -+ }
12257 - }
12258 -
12259 - /*
12260 -diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c
12261 -index 4f38865..5471cba 100644
12262 ---- a/kernel/time/tick-common.c
12263 -+++ b/kernel/time/tick-common.c
12264 -@@ -161,6 +161,7 @@ static void tick_setup_device(struct tick_device *td,
12265 - } else {
12266 - handler = td->evtdev->event_handler;
12267 - next_event = td->evtdev->next_event;
12268 -+ td->evtdev->event_handler = clockevents_handle_noop;
12269 - }
12270 -
12271 - td->evtdev = newdev;
12272 -diff --git a/kernel/time/tick-internal.h b/kernel/time/tick-internal.h
12273 -index f13f2b7..0ffc291 100644
12274 ---- a/kernel/time/tick-internal.h
12275 -+++ b/kernel/time/tick-internal.h
12276 -@@ -17,6 +17,8 @@ extern void tick_handle_periodic(struct clock_event_device *dev);
12277 - extern void tick_setup_oneshot(struct clock_event_device *newdev,
12278 - void (*handler)(struct clock_event_device *),
12279 - ktime_t nextevt);
12280 -+extern int tick_dev_program_event(struct clock_event_device *dev,
12281 -+ ktime_t expires, int force);
12282 - extern int tick_program_event(ktime_t expires, int force);
12283 - extern void tick_oneshot_notify(void);
12284 - extern int tick_switch_to_oneshot(void (*handler)(struct clock_event_device *));
12285 -diff --git a/kernel/time/tick-oneshot.c b/kernel/time/tick-oneshot.c
12286 -index 450c049..2e8de67 100644
12287 ---- a/kernel/time/tick-oneshot.c
12288 -+++ b/kernel/time/tick-oneshot.c
12289 -@@ -23,24 +23,56 @@
12290 - #include "tick-internal.h"
12291 -
12292 - /**
12293 -- * tick_program_event
12294 -+ * tick_program_event internal worker function
12295 - */
12296 --int tick_program_event(ktime_t expires, int force)
12297 -+int tick_dev_program_event(struct clock_event_device *dev, ktime_t expires,
12298 -+ int force)
12299 - {
12300 -- struct clock_event_device *dev = __get_cpu_var(tick_cpu_device).evtdev;
12301 - ktime_t now = ktime_get();
12302 -+ int i;
12303 -
12304 -- while (1) {
12305 -+ for (i = 0;;) {
12306 - int ret = clockevents_program_event(dev, expires, now);
12307 -
12308 - if (!ret || !force)
12309 - return ret;
12310 -+
12311 -+ /*
12312 -+ * We tried 2 times to program the device with the given
12313 -+ * min_delta_ns. If that's not working then we double it
12314 -+ * and emit a warning.
12315 -+ */
12316 -+ if (++i > 2) {
12317 -+ /* Increase the min. delta and try again */
12318 -+ if (!dev->min_delta_ns)
12319 -+ dev->min_delta_ns = 5000;
12320 -+ else
12321 -+ dev->min_delta_ns += dev->min_delta_ns >> 1;
12322 -+
12323 -+ printk(KERN_WARNING
12324 -+ "CE: %s increasing min_delta_ns to %lu nsec\n",
12325 -+ dev->name ? dev->name : "?",
12326 -+ dev->min_delta_ns << 1);
12327 -+
12328 -+ i = 0;
12329 -+ }
12330 -+
12331 - now = ktime_get();
12332 -- expires = ktime_add(now, ktime_set(0, dev->min_delta_ns));
12333 -+ expires = ktime_add_ns(now, dev->min_delta_ns);
12334 - }
12335 - }
12336 -
12337 - /**
12338 -+ * tick_program_event
12339 -+ */
12340 -+int tick_program_event(ktime_t expires, int force)
12341 -+{
12342 -+ struct clock_event_device *dev = __get_cpu_var(tick_cpu_device).evtdev;
12343 -+
12344 -+ return tick_dev_program_event(dev, expires, force);
12345 -+}
12346 -+
12347 -+/**
12348 - * tick_resume_onshot - resume oneshot mode
12349 - */
12350 - void tick_resume_oneshot(void)
12351 -@@ -61,7 +93,7 @@ void tick_setup_oneshot(struct clock_event_device *newdev,
12352 - {
12353 - newdev->event_handler = handler;
12354 - clockevents_set_mode(newdev, CLOCK_EVT_MODE_ONESHOT);
12355 -- clockevents_program_event(newdev, next_event, ktime_get());
12356 -+ tick_dev_program_event(newdev, next_event, 1);
12357 - }
12358 -
12359 - /**
12360 -diff --git a/lib/scatterlist.c b/lib/scatterlist.c
12361 -index b80c211..8c11004 100644
12362 ---- a/lib/scatterlist.c
12363 -+++ b/lib/scatterlist.c
12364 -@@ -312,8 +312,9 @@ static size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents,
12365 - struct scatterlist *sg;
12366 - size_t buf_off = 0;
12367 - int i;
12368 -+ unsigned long flags;
12369 -
12370 -- WARN_ON(!irqs_disabled());
12371 -+ local_irq_save(flags);
12372 -
12373 - for_each_sg(sgl, sg, nents, i) {
12374 - struct page *page;
12375 -@@ -358,6 +359,8 @@ static size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents,
12376 - break;
12377 - }
12378 -
12379 -+ local_irq_restore(flags);
12380 -+
12381 - return buf_off;
12382 - }
12383 -
12384 -diff --git a/mm/filemap_xip.c b/mm/filemap_xip.c
12385 -index 3e744ab..4e8bd50 100644
12386 ---- a/mm/filemap_xip.c
12387 -+++ b/mm/filemap_xip.c
12388 -@@ -184,7 +184,7 @@ __xip_unmap (struct address_space * mapping,
12389 - address = vma->vm_start +
12390 - ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
12391 - BUG_ON(address < vma->vm_start || address >= vma->vm_end);
12392 -- pte = page_check_address(page, mm, address, &ptl);
12393 -+ pte = page_check_address(page, mm, address, &ptl, 1);
12394 - if (pte) {
12395 - /* Nuke the page table entry. */
12396 - flush_cache_page(vma, address, pte_pfn(*pte));
12397 -diff --git a/mm/memcontrol.c b/mm/memcontrol.c
12398 -index e46451e..ed1cfb1 100644
12399 ---- a/mm/memcontrol.c
12400 -+++ b/mm/memcontrol.c
12401 -@@ -250,6 +250,14 @@ static struct mem_cgroup *mem_cgroup_from_cont(struct cgroup *cont)
12402 -
12403 - struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p)
12404 - {
12405 -+ /*
12406 -+ * mm_update_next_owner() may clear mm->owner to NULL
12407 -+ * if it races with swapoff, page migration, etc.
12408 -+ * So this can be called with p == NULL.
12409 -+ */
12410 -+ if (unlikely(!p))
12411 -+ return NULL;
12412 -+
12413 - return container_of(task_subsys_state(p, mem_cgroup_subsys_id),
12414 - struct mem_cgroup, css);
12415 - }
12416 -@@ -574,6 +582,11 @@ retry:
12417 -
12418 - rcu_read_lock();
12419 - mem = mem_cgroup_from_task(rcu_dereference(mm->owner));
12420 -+ if (unlikely(!mem)) {
12421 -+ rcu_read_unlock();
12422 -+ kmem_cache_free(page_cgroup_cache, pc);
12423 -+ return 0;
12424 -+ }
12425 - /*
12426 - * For every charge from the cgroup, increment reference count
12427 - */
12428 -diff --git a/mm/mmzone.c b/mm/mmzone.c
12429 -index 486ed59..16ce8b9 100644
12430 ---- a/mm/mmzone.c
12431 -+++ b/mm/mmzone.c
12432 -@@ -69,6 +69,6 @@ struct zoneref *next_zones_zonelist(struct zoneref *z,
12433 - (z->zone && !zref_in_nodemask(z, nodes)))
12434 - z++;
12435 -
12436 -- *zone = zonelist_zone(z++);
12437 -+ *zone = zonelist_zone(z);
12438 - return z;
12439 - }
12440 -diff --git a/mm/rmap.c b/mm/rmap.c
12441 -index bf0a5b7..ded8f9e 100644
12442 ---- a/mm/rmap.c
12443 -+++ b/mm/rmap.c
12444 -@@ -223,10 +223,14 @@ unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma)
12445 - /*
12446 - * Check that @page is mapped at @address into @mm.
12447 - *
12448 -+ * If @sync is false, page_check_address may perform a racy check to avoid
12449 -+ * the page table lock when the pte is not present (helpful when reclaiming
12450 -+ * highly shared pages).
12451 -+ *
12452 - * On success returns with pte mapped and locked.
12453 - */
12454 - pte_t *page_check_address(struct page *page, struct mm_struct *mm,
12455 -- unsigned long address, spinlock_t **ptlp)
12456 -+ unsigned long address, spinlock_t **ptlp, int sync)
12457 - {
12458 - pgd_t *pgd;
12459 - pud_t *pud;
12460 -@@ -248,7 +252,7 @@ pte_t *page_check_address(struct page *page, struct mm_struct *mm,
12461 -
12462 - pte = pte_offset_map(pmd, address);
12463 - /* Make a quick check before getting the lock */
12464 -- if (!pte_present(*pte)) {
12465 -+ if (!sync && !pte_present(*pte)) {
12466 - pte_unmap(pte);
12467 - return NULL;
12468 - }
12469 -@@ -280,7 +284,7 @@ static int page_referenced_one(struct page *page,
12470 - if (address == -EFAULT)
12471 - goto out;
12472 -
12473 -- pte = page_check_address(page, mm, address, &ptl);
12474 -+ pte = page_check_address(page, mm, address, &ptl, 0);
12475 - if (!pte)
12476 - goto out;
12477 -
12478 -@@ -449,7 +453,7 @@ static int page_mkclean_one(struct page *page, struct vm_area_struct *vma)
12479 - if (address == -EFAULT)
12480 - goto out;
12481 -
12482 -- pte = page_check_address(page, mm, address, &ptl);
12483 -+ pte = page_check_address(page, mm, address, &ptl, 1);
12484 - if (!pte)
12485 - goto out;
12486 -
12487 -@@ -707,7 +711,7 @@ static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
12488 - if (address == -EFAULT)
12489 - goto out;
12490 -
12491 -- pte = page_check_address(page, mm, address, &ptl);
12492 -+ pte = page_check_address(page, mm, address, &ptl, 0);
12493 - if (!pte)
12494 - goto out;
12495 -
12496 -diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
12497 -index b6e7ec0..9ca32e6 100644
12498 ---- a/net/ipv4/udp.c
12499 -+++ b/net/ipv4/udp.c
12500 -@@ -950,6 +950,27 @@ int udp_disconnect(struct sock *sk, int flags)
12501 - return 0;
12502 - }
12503 -
12504 -+static int __udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
12505 -+{
12506 -+ int is_udplite = IS_UDPLITE(sk);
12507 -+ int rc;
12508 -+
12509 -+ if ((rc = sock_queue_rcv_skb(sk, skb)) < 0) {
12510 -+ /* Note that an ENOMEM error is charged twice */
12511 -+ if (rc == -ENOMEM)
12512 -+ UDP_INC_STATS_BH(UDP_MIB_RCVBUFERRORS,
12513 -+ is_udplite);
12514 -+ goto drop;
12515 -+ }
12516 -+
12517 -+ return 0;
12518 -+
12519 -+drop:
12520 -+ UDP_INC_STATS_BH(UDP_MIB_INERRORS, is_udplite);
12521 -+ kfree_skb(skb);
12522 -+ return -1;
12523 -+}
12524 -+
12525 - /* returns:
12526 - * -1: error
12527 - * 0: success
12528 -@@ -988,9 +1009,7 @@ int udp_queue_rcv_skb(struct sock * sk, struct sk_buff *skb)
12529 - up->encap_rcv != NULL) {
12530 - int ret;
12531 -
12532 -- bh_unlock_sock(sk);
12533 - ret = (*up->encap_rcv)(sk, skb);
12534 -- bh_lock_sock(sk);
12535 - if (ret <= 0) {
12536 - UDP_INC_STATS_BH(UDP_MIB_INDATAGRAMS,
12537 - is_udplite);
12538 -@@ -1042,14 +1061,16 @@ int udp_queue_rcv_skb(struct sock * sk, struct sk_buff *skb)
12539 - goto drop;
12540 - }
12541 -
12542 -- if ((rc = sock_queue_rcv_skb(sk,skb)) < 0) {
12543 -- /* Note that an ENOMEM error is charged twice */
12544 -- if (rc == -ENOMEM)
12545 -- UDP_INC_STATS_BH(UDP_MIB_RCVBUFERRORS, is_udplite);
12546 -- goto drop;
12547 -- }
12548 -+ rc = 0;
12549 -
12550 -- return 0;
12551 -+ bh_lock_sock(sk);
12552 -+ if (!sock_owned_by_user(sk))
12553 -+ rc = __udp_queue_rcv_skb(sk, skb);
12554 -+ else
12555 -+ sk_add_backlog(sk, skb);
12556 -+ bh_unlock_sock(sk);
12557 -+
12558 -+ return rc;
12559 -
12560 - drop:
12561 - UDP_INC_STATS_BH(UDP_MIB_INERRORS, is_udplite);
12562 -@@ -1087,15 +1108,7 @@ static int __udp4_lib_mcast_deliver(struct sk_buff *skb,
12563 - skb1 = skb_clone(skb, GFP_ATOMIC);
12564 -
12565 - if (skb1) {
12566 -- int ret = 0;
12567 --
12568 -- bh_lock_sock(sk);
12569 -- if (!sock_owned_by_user(sk))
12570 -- ret = udp_queue_rcv_skb(sk, skb1);
12571 -- else
12572 -- sk_add_backlog(sk, skb1);
12573 -- bh_unlock_sock(sk);
12574 --
12575 -+ int ret = udp_queue_rcv_skb(sk, skb1);
12576 - if (ret > 0)
12577 - /* we should probably re-process instead
12578 - * of dropping packets here. */
12579 -@@ -1188,13 +1201,7 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct hlist_head udptable[],
12580 - uh->dest, inet_iif(skb), udptable);
12581 -
12582 - if (sk != NULL) {
12583 -- int ret = 0;
12584 -- bh_lock_sock(sk);
12585 -- if (!sock_owned_by_user(sk))
12586 -- ret = udp_queue_rcv_skb(sk, skb);
12587 -- else
12588 -- sk_add_backlog(sk, skb);
12589 -- bh_unlock_sock(sk);
12590 -+ int ret = udp_queue_rcv_skb(sk, skb);
12591 - sock_put(sk);
12592 -
12593 - /* a return value > 0 means to resubmit the input, but
12594 -@@ -1487,7 +1494,7 @@ struct proto udp_prot = {
12595 - .sendmsg = udp_sendmsg,
12596 - .recvmsg = udp_recvmsg,
12597 - .sendpage = udp_sendpage,
12598 -- .backlog_rcv = udp_queue_rcv_skb,
12599 -+ .backlog_rcv = __udp_queue_rcv_skb,
12600 - .hash = udp_lib_hash,
12601 - .unhash = udp_lib_unhash,
12602 - .get_port = udp_v4_get_port,
12603 -diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
12604 -index d99f094..c3f6687 100644
12605 ---- a/net/ipv6/ip6_output.c
12606 -+++ b/net/ipv6/ip6_output.c
12607 -@@ -934,39 +934,39 @@ static int ip6_dst_lookup_tail(struct sock *sk,
12608 - }
12609 -
12610 - #ifdef CONFIG_IPV6_OPTIMISTIC_DAD
12611 -- /*
12612 -- * Here if the dst entry we've looked up
12613 -- * has a neighbour entry that is in the INCOMPLETE
12614 -- * state and the src address from the flow is
12615 -- * marked as OPTIMISTIC, we release the found
12616 -- * dst entry and replace it instead with the
12617 -- * dst entry of the nexthop router
12618 -- */
12619 -- if (!((*dst)->neighbour->nud_state & NUD_VALID)) {
12620 -- struct inet6_ifaddr *ifp;
12621 -- struct flowi fl_gw;
12622 -- int redirect;
12623 --
12624 -- ifp = ipv6_get_ifaddr(net, &fl->fl6_src,
12625 -- (*dst)->dev, 1);
12626 --
12627 -- redirect = (ifp && ifp->flags & IFA_F_OPTIMISTIC);
12628 -- if (ifp)
12629 -- in6_ifa_put(ifp);
12630 --
12631 -- if (redirect) {
12632 -- /*
12633 -- * We need to get the dst entry for the
12634 -- * default router instead
12635 -- */
12636 -- dst_release(*dst);
12637 -- memcpy(&fl_gw, fl, sizeof(struct flowi));
12638 -- memset(&fl_gw.fl6_dst, 0, sizeof(struct in6_addr));
12639 -- *dst = ip6_route_output(net, sk, &fl_gw);
12640 -- if ((err = (*dst)->error))
12641 -- goto out_err_release;
12642 -- }
12643 -+ /*
12644 -+ * Here if the dst entry we've looked up
12645 -+ * has a neighbour entry that is in the INCOMPLETE
12646 -+ * state and the src address from the flow is
12647 -+ * marked as OPTIMISTIC, we release the found
12648 -+ * dst entry and replace it instead with the
12649 -+ * dst entry of the nexthop router
12650 -+ */
12651 -+ if ((*dst)->neighbour && !((*dst)->neighbour->nud_state & NUD_VALID)) {
12652 -+ struct inet6_ifaddr *ifp;
12653 -+ struct flowi fl_gw;
12654 -+ int redirect;
12655 -+
12656 -+ ifp = ipv6_get_ifaddr(net, &fl->fl6_src,
12657 -+ (*dst)->dev, 1);
12658 -+
12659 -+ redirect = (ifp && ifp->flags & IFA_F_OPTIMISTIC);
12660 -+ if (ifp)
12661 -+ in6_ifa_put(ifp);
12662 -+
12663 -+ if (redirect) {
12664 -+ /*
12665 -+ * We need to get the dst entry for the
12666 -+ * default router instead
12667 -+ */
12668 -+ dst_release(*dst);
12669 -+ memcpy(&fl_gw, fl, sizeof(struct flowi));
12670 -+ memset(&fl_gw.fl6_dst, 0, sizeof(struct in6_addr));
12671 -+ *dst = ip6_route_output(net, sk, &fl_gw);
12672 -+ if ((err = (*dst)->error))
12673 -+ goto out_err_release;
12674 - }
12675 -+ }
12676 - #endif
12677 -
12678 - return 0;
12679 -diff --git a/net/ipv6/route.c b/net/ipv6/route.c
12680 -index 9deee59..990fef2 100644
12681 ---- a/net/ipv6/route.c
12682 -+++ b/net/ipv6/route.c
12683 -@@ -2718,6 +2718,8 @@ int __init ip6_route_init(void)
12684 - if (ret)
12685 - goto out_kmem_cache;
12686 -
12687 -+ ip6_dst_blackhole_ops.kmem_cachep = ip6_dst_ops_template.kmem_cachep;
12688 -+
12689 - /* Registering of the loopback is done before this portion of code,
12690 - * the loopback reference in rt6_info will not be taken, do it
12691 - * manually for init_net */
12692 -diff --git a/net/key/af_key.c b/net/key/af_key.c
12693 -index 7470e36..49805ec 100644
12694 ---- a/net/key/af_key.c
12695 -+++ b/net/key/af_key.c
12696 -@@ -73,22 +73,18 @@ static int pfkey_can_dump(struct sock *sk)
12697 - return 0;
12698 - }
12699 -
12700 --static int pfkey_do_dump(struct pfkey_sock *pfk)
12701 -+static void pfkey_terminate_dump(struct pfkey_sock *pfk)
12702 - {
12703 -- int rc;
12704 --
12705 -- rc = pfk->dump.dump(pfk);
12706 -- if (rc == -ENOBUFS)
12707 -- return 0;
12708 --
12709 -- pfk->dump.done(pfk);
12710 -- pfk->dump.dump = NULL;
12711 -- pfk->dump.done = NULL;
12712 -- return rc;
12713 -+ if (pfk->dump.dump) {
12714 -+ pfk->dump.done(pfk);
12715 -+ pfk->dump.dump = NULL;
12716 -+ pfk->dump.done = NULL;
12717 -+ }
12718 - }
12719 -
12720 - static void pfkey_sock_destruct(struct sock *sk)
12721 - {
12722 -+ pfkey_terminate_dump(pfkey_sk(sk));
12723 - skb_queue_purge(&sk->sk_receive_queue);
12724 -
12725 - if (!sock_flag(sk, SOCK_DEAD)) {
12726 -@@ -310,6 +306,18 @@ static int pfkey_broadcast(struct sk_buff *skb, gfp_t allocation,
12727 - return err;
12728 - }
12729 -
12730 -+static int pfkey_do_dump(struct pfkey_sock *pfk)
12731 -+{
12732 -+ int rc;
12733 -+
12734 -+ rc = pfk->dump.dump(pfk);
12735 -+ if (rc == -ENOBUFS)
12736 -+ return 0;
12737 -+
12738 -+ pfkey_terminate_dump(pfk);
12739 -+ return rc;
12740 -+}
12741 -+
12742 - static inline void pfkey_hdr_dup(struct sadb_msg *new, struct sadb_msg *orig)
12743 - {
12744 - *new = *orig;
12745 -diff --git a/net/sctp/associola.c b/net/sctp/associola.c
12746 -index 024c3eb..31ca4f4 100644
12747 ---- a/net/sctp/associola.c
12748 -+++ b/net/sctp/associola.c
12749 -@@ -597,11 +597,12 @@ struct sctp_transport *sctp_assoc_add_peer(struct sctp_association *asoc,
12750 - /* Check to see if this is a duplicate. */
12751 - peer = sctp_assoc_lookup_paddr(asoc, addr);
12752 - if (peer) {
12753 -+ /* An UNKNOWN state is only set on transports added by
12754 -+ * user in sctp_connectx() call. Such transports should be
12755 -+ * considered CONFIRMED per RFC 4960, Section 5.4.
12756 -+ */
12757 - if (peer->state == SCTP_UNKNOWN) {
12758 -- if (peer_state == SCTP_ACTIVE)
12759 -- peer->state = SCTP_ACTIVE;
12760 -- if (peer_state == SCTP_UNCONFIRMED)
12761 -- peer->state = SCTP_UNCONFIRMED;
12762 -+ peer->state = SCTP_ACTIVE;
12763 - }
12764 - return peer;
12765 - }
12766 -diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c
12767 -index bbc7107..650f759 100644
12768 ---- a/net/sctp/sm_make_chunk.c
12769 -+++ b/net/sctp/sm_make_chunk.c
12770 -@@ -1886,11 +1886,13 @@ static void sctp_process_ext_param(struct sctp_association *asoc,
12771 - /* if the peer reports AUTH, assume that he
12772 - * supports AUTH.
12773 - */
12774 -- asoc->peer.auth_capable = 1;
12775 -+ if (sctp_auth_enable)
12776 -+ asoc->peer.auth_capable = 1;
12777 - break;
12778 - case SCTP_CID_ASCONF:
12779 - case SCTP_CID_ASCONF_ACK:
12780 -- asoc->peer.asconf_capable = 1;
12781 -+ if (sctp_addip_enable)
12782 -+ asoc->peer.asconf_capable = 1;
12783 - break;
12784 - default:
12785 - break;
12786 -@@ -2319,12 +2321,10 @@ clean_up:
12787 - /* Release the transport structures. */
12788 - list_for_each_safe(pos, temp, &asoc->peer.transport_addr_list) {
12789 - transport = list_entry(pos, struct sctp_transport, transports);
12790 -- list_del_init(pos);
12791 -- sctp_transport_free(transport);
12792 -+ if (transport->state != SCTP_ACTIVE)
12793 -+ sctp_assoc_rm_peer(asoc, transport);
12794 - }
12795 -
12796 -- asoc->peer.transport_count = 0;
12797 --
12798 - nomem:
12799 - return 0;
12800 - }
12801 -@@ -2455,6 +2455,9 @@ static int sctp_process_param(struct sctp_association *asoc,
12802 - break;
12803 -
12804 - case SCTP_PARAM_SET_PRIMARY:
12805 -+ if (!sctp_addip_enable)
12806 -+ goto fall_through;
12807 -+
12808 - addr_param = param.v + sizeof(sctp_addip_param_t);
12809 -
12810 - af = sctp_get_af_specific(param_type2af(param.p->type));
12811 -diff --git a/net/xfrm/xfrm_output.c b/net/xfrm/xfrm_output.c
12812 -index 3f964db..5360c86 100644
12813 ---- a/net/xfrm/xfrm_output.c
12814 -+++ b/net/xfrm/xfrm_output.c
12815 -@@ -27,10 +27,14 @@ static int xfrm_state_check_space(struct xfrm_state *x, struct sk_buff *skb)
12816 - - skb_headroom(skb);
12817 - int ntail = dst->dev->needed_tailroom - skb_tailroom(skb);
12818 -
12819 -- if (nhead > 0 || ntail > 0)
12820 -- return pskb_expand_head(skb, nhead, ntail, GFP_ATOMIC);
12821 --
12822 -- return 0;
12823 -+ if (nhead <= 0) {
12824 -+ if (ntail <= 0)
12825 -+ return 0;
12826 -+ nhead = 0;
12827 -+ } else if (ntail < 0)
12828 -+ ntail = 0;
12829 -+
12830 -+ return pskb_expand_head(skb, nhead, ntail, GFP_ATOMIC);
12831 - }
12832 -
12833 - static int xfrm_output_one(struct sk_buff *skb, int err)
12834 -diff --git a/sound/core/pcm.c b/sound/core/pcm.c
12835 -index 9dd9bc7..ece25c7 100644
12836 ---- a/sound/core/pcm.c
12837 -+++ b/sound/core/pcm.c
12838 -@@ -781,7 +781,7 @@ int snd_pcm_attach_substream(struct snd_pcm *pcm, int stream,
12839 - return -ENODEV;
12840 -
12841 - card = pcm->card;
12842 -- down_read(&card->controls_rwsem);
12843 -+ read_lock(&card->ctl_files_rwlock);
12844 - list_for_each_entry(kctl, &card->ctl_files, list) {
12845 - if (kctl->pid == current->pid) {
12846 - prefer_subdevice = kctl->prefer_pcm_subdevice;
12847 -@@ -789,7 +789,7 @@ int snd_pcm_attach_substream(struct snd_pcm *pcm, int stream,
12848 - break;
12849 - }
12850 - }
12851 -- up_read(&card->controls_rwsem);
12852 -+ read_unlock(&card->ctl_files_rwlock);
12853 -
12854 - switch (stream) {
12855 - case SNDRV_PCM_STREAM_PLAYBACK:
12856 -diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
12857 -index 61f5d42..225112b 100644
12858 ---- a/sound/core/pcm_native.c
12859 -+++ b/sound/core/pcm_native.c
12860 -@@ -1545,16 +1545,10 @@ static int snd_pcm_drop(struct snd_pcm_substream *substream)
12861 - card = substream->pcm->card;
12862 -
12863 - if (runtime->status->state == SNDRV_PCM_STATE_OPEN ||
12864 -- runtime->status->state == SNDRV_PCM_STATE_DISCONNECTED)
12865 -+ runtime->status->state == SNDRV_PCM_STATE_DISCONNECTED ||
12866 -+ runtime->status->state == SNDRV_PCM_STATE_SUSPENDED)
12867 - return -EBADFD;
12868 -
12869 -- snd_power_lock(card);
12870 -- if (runtime->status->state == SNDRV_PCM_STATE_SUSPENDED) {
12871 -- result = snd_power_wait(card, SNDRV_CTL_POWER_D0);
12872 -- if (result < 0)
12873 -- goto _unlock;
12874 -- }
12875 --
12876 - snd_pcm_stream_lock_irq(substream);
12877 - /* resume pause */
12878 - if (runtime->status->state == SNDRV_PCM_STATE_PAUSED)
12879 -@@ -1563,8 +1557,7 @@ static int snd_pcm_drop(struct snd_pcm_substream *substream)
12880 - snd_pcm_stop(substream, SNDRV_PCM_STATE_SETUP);
12881 - /* runtime->control->appl_ptr = runtime->status->hw_ptr; */
12882 - snd_pcm_stream_unlock_irq(substream);
12883 -- _unlock:
12884 -- snd_power_unlock(card);
12885 -+
12886 - return result;
12887 - }
12888 -
12889 -diff --git a/sound/core/rawmidi.c b/sound/core/rawmidi.c
12890 -index f7ea728..b917a9f 100644
12891 ---- a/sound/core/rawmidi.c
12892 -+++ b/sound/core/rawmidi.c
12893 -@@ -418,7 +418,7 @@ static int snd_rawmidi_open(struct inode *inode, struct file *file)
12894 - mutex_lock(&rmidi->open_mutex);
12895 - while (1) {
12896 - subdevice = -1;
12897 -- down_read(&card->controls_rwsem);
12898 -+ read_lock(&card->ctl_files_rwlock);
12899 - list_for_each_entry(kctl, &card->ctl_files, list) {
12900 - if (kctl->pid == current->pid) {
12901 - subdevice = kctl->prefer_rawmidi_subdevice;
12902 -@@ -426,7 +426,7 @@ static int snd_rawmidi_open(struct inode *inode, struct file *file)
12903 - break;
12904 - }
12905 - }
12906 -- up_read(&card->controls_rwsem);
12907 -+ read_unlock(&card->ctl_files_rwlock);
12908 - err = snd_rawmidi_kernel_open(rmidi->card, rmidi->device,
12909 - subdevice, fflags, rawmidi_file);
12910 - if (err >= 0)
12911 -diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c
12912 -index a4f44a0..7207759 100644
12913 ---- a/sound/pci/hda/patch_sigmatel.c
12914 -+++ b/sound/pci/hda/patch_sigmatel.c
12915 -@@ -1667,8 +1667,8 @@ static struct snd_pci_quirk stac927x_cfg_tbl[] = {
12916 - /* Dell 3 stack systems with verb table in BIOS */
12917 - SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x01f3, "Dell Inspiron 1420", STAC_DELL_BIOS),
12918 - SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x0227, "Dell Vostro 1400 ", STAC_DELL_BIOS),
12919 -- SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x022f, "Dell ", STAC_DELL_BIOS),
12920 - SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x022e, "Dell ", STAC_DELL_BIOS),
12921 -+ SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x022f, "Dell Inspiron 1525", STAC_DELL_3ST),
12922 - SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x0242, "Dell ", STAC_DELL_BIOS),
12923 - SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x0243, "Dell ", STAC_DELL_BIOS),
12924 - SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x02ff, "Dell ", STAC_DELL_BIOS),
12925 -diff --git a/sound/pci/oxygen/hifier.c b/sound/pci/oxygen/hifier.c
12926 -index 090dd43..841e45d 100644
12927 ---- a/sound/pci/oxygen/hifier.c
12928 -+++ b/sound/pci/oxygen/hifier.c
12929 -@@ -17,6 +17,7 @@
12930 - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
12931 - */
12932 -
12933 -+#include <linux/delay.h>
12934 - #include <linux/pci.h>
12935 - #include <sound/control.h>
12936 - #include <sound/core.h>
12937 -@@ -95,6 +96,9 @@ static void set_ak4396_params(struct oxygen *chip,
12938 - else
12939 - value |= AK4396_DFS_QUAD;
12940 - data->ak4396_ctl2 = value;
12941 -+
12942 -+ msleep(1); /* wait for the new MCLK to become stable */
12943 -+
12944 - ak4396_write(chip, AK4396_CONTROL_1, AK4396_DIF_24_MSB);
12945 - ak4396_write(chip, AK4396_CONTROL_2, value);
12946 - ak4396_write(chip, AK4396_CONTROL_1, AK4396_DIF_24_MSB | AK4396_RSTN);
12947 -diff --git a/sound/pci/oxygen/oxygen.c b/sound/pci/oxygen/oxygen.c
12948 -index 63f185c..6a59041 100644
12949 ---- a/sound/pci/oxygen/oxygen.c
12950 -+++ b/sound/pci/oxygen/oxygen.c
12951 -@@ -28,6 +28,7 @@
12952 - * GPIO 1 -> DFS1 of AK5385
12953 - */
12954 -
12955 -+#include <linux/delay.h>
12956 - #include <linux/mutex.h>
12957 - #include <linux/pci.h>
12958 - #include <sound/ac97_codec.h>
12959 -@@ -173,6 +174,9 @@ static void set_ak4396_params(struct oxygen *chip,
12960 - else
12961 - value |= AK4396_DFS_QUAD;
12962 - data->ak4396_ctl2 = value;
12963 -+
12964 -+ msleep(1); /* wait for the new MCLK to become stable */
12965 -+
12966 - for (i = 0; i < 4; ++i) {
12967 - ak4396_write(chip, i,
12968 - AK4396_CONTROL_1, AK4396_DIF_24_MSB);
12969 -diff --git a/sound/ppc/awacs.c b/sound/ppc/awacs.c
12970 -index 566a6d0..106c482 100644
12971 ---- a/sound/ppc/awacs.c
12972 -+++ b/sound/ppc/awacs.c
12973 -@@ -621,6 +621,13 @@ static struct snd_kcontrol_new snd_pmac_screamer_mixers_imac[] __initdata = {
12974 - AWACS_SWITCH("CD Capture Switch", 0, SHIFT_MUX_CD, 0),
12975 - };
12976 -
12977 -+static struct snd_kcontrol_new snd_pmac_screamer_mixers_g4agp[] __initdata = {
12978 -+ AWACS_VOLUME("Line out Playback Volume", 2, 6, 1),
12979 -+ AWACS_VOLUME("Master Playback Volume", 5, 6, 1),
12980 -+ AWACS_SWITCH("CD Capture Switch", 0, SHIFT_MUX_CD, 0),
12981 -+ AWACS_SWITCH("Line Capture Switch", 0, SHIFT_MUX_MIC, 0),
12982 -+};
12983 -+
12984 - static struct snd_kcontrol_new snd_pmac_awacs_mixers_pmac7500[] __initdata = {
12985 - AWACS_VOLUME("Line out Playback Volume", 2, 6, 1),
12986 - AWACS_SWITCH("CD Capture Switch", 0, SHIFT_MUX_CD, 0),
12987 -@@ -688,7 +695,10 @@ static struct snd_kcontrol_new snd_pmac_awacs_speaker_vol[] __initdata = {
12988 - static struct snd_kcontrol_new snd_pmac_awacs_speaker_sw __initdata =
12989 - AWACS_SWITCH("PC Speaker Playback Switch", 1, SHIFT_SPKMUTE, 1);
12990 -
12991 --static struct snd_kcontrol_new snd_pmac_awacs_speaker_sw_imac __initdata =
12992 -+static struct snd_kcontrol_new snd_pmac_awacs_speaker_sw_imac1 __initdata =
12993 -+AWACS_SWITCH("PC Speaker Playback Switch", 1, SHIFT_PAROUT1, 1);
12994 -+
12995 -+static struct snd_kcontrol_new snd_pmac_awacs_speaker_sw_imac2 __initdata =
12996 - AWACS_SWITCH("PC Speaker Playback Switch", 1, SHIFT_PAROUT1, 0);
12997 -
12998 -
12999 -@@ -765,11 +775,12 @@ static void snd_pmac_awacs_resume(struct snd_pmac *chip)
13000 -
13001 - #define IS_PM7500 (machine_is_compatible("AAPL,7500"))
13002 - #define IS_BEIGE (machine_is_compatible("AAPL,Gossamer"))
13003 --#define IS_IMAC (machine_is_compatible("PowerMac2,1") \
13004 -- || machine_is_compatible("PowerMac2,2") \
13005 -+#define IS_IMAC1 (machine_is_compatible("PowerMac2,1"))
13006 -+#define IS_IMAC2 (machine_is_compatible("PowerMac2,2") \
13007 - || machine_is_compatible("PowerMac4,1"))
13008 -+#define IS_G4AGP (machine_is_compatible("PowerMac3,1"))
13009 -
13010 --static int imac;
13011 -+static int imac1, imac2;
13012 -
13013 - #ifdef PMAC_SUPPORT_AUTOMUTE
13014 - /*
13015 -@@ -815,13 +826,18 @@ static void snd_pmac_awacs_update_automute(struct snd_pmac *chip, int do_notify)
13016 - {
13017 - int reg = chip->awacs_reg[1]
13018 - | (MASK_HDMUTE | MASK_SPKMUTE);
13019 -- if (imac) {
13020 -+ if (imac1) {
13021 -+ reg &= ~MASK_SPKMUTE;
13022 -+ reg |= MASK_PAROUT1;
13023 -+ } else if (imac2) {
13024 - reg &= ~MASK_SPKMUTE;
13025 - reg &= ~MASK_PAROUT1;
13026 - }
13027 - if (snd_pmac_awacs_detect_headphone(chip))
13028 - reg &= ~MASK_HDMUTE;
13029 -- else if (imac)
13030 -+ else if (imac1)
13031 -+ reg &= ~MASK_PAROUT1;
13032 -+ else if (imac2)
13033 - reg |= MASK_PAROUT1;
13034 - else
13035 - reg &= ~MASK_SPKMUTE;
13036 -@@ -850,9 +866,13 @@ snd_pmac_awacs_init(struct snd_pmac *chip)
13037 - {
13038 - int pm7500 = IS_PM7500;
13039 - int beige = IS_BEIGE;
13040 -+ int g4agp = IS_G4AGP;
13041 -+ int imac;
13042 - int err, vol;
13043 -
13044 -- imac = IS_IMAC;
13045 -+ imac1 = IS_IMAC1;
13046 -+ imac2 = IS_IMAC2;
13047 -+ imac = imac1 || imac2;
13048 - /* looks like MASK_GAINLINE triggers something, so we set here
13049 - * as start-up
13050 - */
13051 -@@ -939,7 +959,7 @@ snd_pmac_awacs_init(struct snd_pmac *chip)
13052 - snd_pmac_awacs_mixers);
13053 - if (err < 0)
13054 - return err;
13055 -- if (beige)
13056 -+ if (beige || g4agp)
13057 - ;
13058 - else if (chip->model == PMAC_SCREAMER)
13059 - err = build_mixers(chip, ARRAY_SIZE(snd_pmac_screamer_mixers2),
13060 -@@ -961,13 +981,17 @@ snd_pmac_awacs_init(struct snd_pmac *chip)
13061 - err = build_mixers(chip,
13062 - ARRAY_SIZE(snd_pmac_screamer_mixers_imac),
13063 - snd_pmac_screamer_mixers_imac);
13064 -+ else if (g4agp)
13065 -+ err = build_mixers(chip,
13066 -+ ARRAY_SIZE(snd_pmac_screamer_mixers_g4agp),
13067 -+ snd_pmac_screamer_mixers_g4agp);
13068 - else
13069 - err = build_mixers(chip,
13070 - ARRAY_SIZE(snd_pmac_awacs_mixers_pmac),
13071 - snd_pmac_awacs_mixers_pmac);
13072 - if (err < 0)
13073 - return err;
13074 -- chip->master_sw_ctl = snd_ctl_new1((pm7500 || imac)
13075 -+ chip->master_sw_ctl = snd_ctl_new1((pm7500 || imac || g4agp)
13076 - ? &snd_pmac_awacs_master_sw_imac
13077 - : &snd_pmac_awacs_master_sw, chip);
13078 - err = snd_ctl_add(chip->card, chip->master_sw_ctl);
13079 -@@ -1004,15 +1028,17 @@ snd_pmac_awacs_init(struct snd_pmac *chip)
13080 - snd_pmac_awacs_speaker_vol);
13081 - if (err < 0)
13082 - return err;
13083 -- chip->speaker_sw_ctl = snd_ctl_new1(imac
13084 -- ? &snd_pmac_awacs_speaker_sw_imac
13085 -+ chip->speaker_sw_ctl = snd_ctl_new1(imac1
13086 -+ ? &snd_pmac_awacs_speaker_sw_imac1
13087 -+ : imac2
13088 -+ ? &snd_pmac_awacs_speaker_sw_imac2
13089 - : &snd_pmac_awacs_speaker_sw, chip);
13090 - err = snd_ctl_add(chip->card, chip->speaker_sw_ctl);
13091 - if (err < 0)
13092 - return err;
13093 - }
13094 -
13095 -- if (beige)
13096 -+ if (beige || g4agp)
13097 - err = build_mixers(chip,
13098 - ARRAY_SIZE(snd_pmac_screamer_mic_boost_beige),
13099 - snd_pmac_screamer_mic_boost_beige);