Gentoo Archives: gentoo-commits

From: "Mike Pagano (mpagano)" <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] linux-patches r1819 - genpatches-2.6/trunk/2.6.35
Date: Sun, 31 Oct 2010 23:04:39
Message-Id: 20101031230430.8DC6C20054@flycatcher.gentoo.org
1 Author: mpagano
2 Date: 2010-10-31 23:04:30 +0000 (Sun, 31 Oct 2010)
3 New Revision: 1819
4
5 Added:
6 genpatches-2.6/trunk/2.6.35/1007_linux-2.6.35.8.patch
7 Removed:
8 genpatches-2.6/trunk/2.6.35/1500_CVE-2010-3904-RDS-Priv-Escal-fix.patch
9 Modified:
10 genpatches-2.6/trunk/2.6.35/0000_README
11 Log:
12 Linux patch 2.6.35.8 and removal of redundant patch.
13
14 Modified: genpatches-2.6/trunk/2.6.35/0000_README
15 ===================================================================
16 --- genpatches-2.6/trunk/2.6.35/0000_README 2010-10-31 22:55:31 UTC (rev 1818)
17 +++ genpatches-2.6/trunk/2.6.35/0000_README 2010-10-31 23:04:30 UTC (rev 1819)
18 @@ -67,9 +67,9 @@
19 From: http://www.kernel.org
20 Desc: Linux 2.6.35.7
21
22 -Patch: 1500_CVE-2010-3904-RDS-Priv-Escal-fix.patch
23 +Patch: 1007_linux-2.6.35.8.patch
24 From: http://www.kernel.org
25 -Desc: CVE-2010-3904 RDS Protocol priviledge escalation
26 +Desc: Linux 2.6.35.8
27
28 Patch: 1900_create-mount-for-cgroupsfs.patch
29 From: http://bugs.gentoo.org/show_bug.cgi?id=318365
30
31 Added: genpatches-2.6/trunk/2.6.35/1007_linux-2.6.35.8.patch
32 ===================================================================
33 --- genpatches-2.6/trunk/2.6.35/1007_linux-2.6.35.8.patch (rev 0)
34 +++ genpatches-2.6/trunk/2.6.35/1007_linux-2.6.35.8.patch 2010-10-31 23:04:30 UTC (rev 1819)
35 @@ -0,0 +1,3171 @@
36 +diff --git a/Documentation/sound/alsa/HD-Audio-Models.txt b/Documentation/sound/alsa/HD-Audio-Models.txt
37 +index dc25bb8..50faa7e 100644
38 +--- a/Documentation/sound/alsa/HD-Audio-Models.txt
39 ++++ b/Documentation/sound/alsa/HD-Audio-Models.txt
40 +@@ -282,6 +282,7 @@ Conexant 5051
41 + hp HP Spartan laptop
42 + hp-dv6736 HP dv6736
43 + hp-f700 HP Compaq Presario F700
44 ++ ideapad Lenovo IdeaPad laptop
45 + lenovo-x200 Lenovo X200 laptop
46 + toshiba Toshiba Satellite M300
47 +
48 +diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
49 +index 4824fb4..831acd06 100644
50 +--- a/arch/arm/Kconfig
51 ++++ b/arch/arm/Kconfig
52 +@@ -273,7 +273,6 @@ config ARCH_AT91
53 + bool "Atmel AT91"
54 + select ARCH_REQUIRE_GPIOLIB
55 + select HAVE_CLK
56 +- select ARCH_USES_GETTIMEOFFSET
57 + help
58 + This enables support for systems based on the Atmel AT91RM9200,
59 + AT91SAM9 and AT91CAP9 processors.
60 +diff --git a/arch/arm/mach-vexpress/ct-ca9x4.c b/arch/arm/mach-vexpress/ct-ca9x4.c
61 +index 6353459..d6dd6f6 100644
62 +--- a/arch/arm/mach-vexpress/ct-ca9x4.c
63 ++++ b/arch/arm/mach-vexpress/ct-ca9x4.c
64 +@@ -225,7 +225,7 @@ static void ct_ca9x4_init(void)
65 + int i;
66 +
67 + #ifdef CONFIG_CACHE_L2X0
68 +- l2x0_init(MMIO_P2V(CT_CA9X4_L2CC), 0x00000000, 0xfe0fffff);
69 ++ l2x0_init(MMIO_P2V(CT_CA9X4_L2CC), 0x00400000, 0xfe0fffff);
70 + #endif
71 +
72 + clkdev_add_table(lookups, ARRAY_SIZE(lookups));
73 +diff --git a/arch/arm/oprofile/common.c b/arch/arm/oprofile/common.c
74 +index 0691176..72e09eb 100644
75 +--- a/arch/arm/oprofile/common.c
76 ++++ b/arch/arm/oprofile/common.c
77 +@@ -102,6 +102,7 @@ static int op_create_counter(int cpu, int event)
78 + if (IS_ERR(pevent)) {
79 + ret = PTR_ERR(pevent);
80 + } else if (pevent->state != PERF_EVENT_STATE_ACTIVE) {
81 ++ perf_event_release_kernel(pevent);
82 + pr_warning("oprofile: failed to enable event %d "
83 + "on CPU %d\n", event, cpu);
84 + ret = -EBUSY;
85 +@@ -365,6 +366,7 @@ int __init oprofile_arch_init(struct oprofile_operations *ops)
86 + ret = init_driverfs();
87 + if (ret) {
88 + kfree(counter_config);
89 ++ counter_config = NULL;
90 + return ret;
91 + }
92 +
93 +@@ -402,7 +404,6 @@ void oprofile_arch_exit(void)
94 + struct perf_event *event;
95 +
96 + if (*perf_events) {
97 +- exit_driverfs();
98 + for_each_possible_cpu(cpu) {
99 + for (id = 0; id < perf_num_counters; ++id) {
100 + event = perf_events[cpu][id];
101 +@@ -413,8 +414,10 @@ void oprofile_arch_exit(void)
102 + }
103 + }
104 +
105 +- if (counter_config)
106 ++ if (counter_config) {
107 + kfree(counter_config);
108 ++ exit_driverfs();
109 ++ }
110 + }
111 + #else
112 + int __init oprofile_arch_init(struct oprofile_operations *ops)
113 +diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S
114 +index 844a44b..c571cd3 100644
115 +--- a/arch/powerpc/kernel/head_64.S
116 ++++ b/arch/powerpc/kernel/head_64.S
117 +@@ -572,15 +572,21 @@ __secondary_start:
118 + /* Set thread priority to MEDIUM */
119 + HMT_MEDIUM
120 +
121 +- /* Do early setup for that CPU (stab, slb, hash table pointer) */
122 +- bl .early_setup_secondary
123 +-
124 + /* Initialize the kernel stack. Just a repeat for iSeries. */
125 + LOAD_REG_ADDR(r3, current_set)
126 + sldi r28,r24,3 /* get current_set[cpu#] */
127 +- ldx r1,r3,r28
128 +- addi r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD
129 +- std r1,PACAKSAVE(r13)
130 ++ ldx r14,r3,r28
131 ++ addi r14,r14,THREAD_SIZE-STACK_FRAME_OVERHEAD
132 ++ std r14,PACAKSAVE(r13)
133 ++
134 ++ /* Do early setup for that CPU (stab, slb, hash table pointer) */
135 ++ bl .early_setup_secondary
136 ++
137 ++ /*
138 ++ * setup the new stack pointer, but *don't* use this until
139 ++ * translation is on.
140 ++ */
141 ++ mr r1, r14
142 +
143 + /* Clear backchain so we get nice backtraces */
144 + li r7,0
145 +diff --git a/arch/um/drivers/ubd_kern.c b/arch/um/drivers/ubd_kern.c
146 +index da992a3..3d63b83 100644
147 +--- a/arch/um/drivers/ubd_kern.c
148 ++++ b/arch/um/drivers/ubd_kern.c
149 +@@ -162,6 +162,7 @@ struct ubd {
150 + struct scatterlist sg[MAX_SG];
151 + struct request *request;
152 + int start_sg, end_sg;
153 ++ sector_t rq_pos;
154 + };
155 +
156 + #define DEFAULT_COW { \
157 +@@ -186,6 +187,7 @@ struct ubd {
158 + .request = NULL, \
159 + .start_sg = 0, \
160 + .end_sg = 0, \
161 ++ .rq_pos = 0, \
162 + }
163 +
164 + /* Protected by ubd_lock */
165 +@@ -1223,7 +1225,6 @@ static void do_ubd_request(struct request_queue *q)
166 + {
167 + struct io_thread_req *io_req;
168 + struct request *req;
169 +- sector_t sector;
170 + int n;
171 +
172 + while(1){
173 +@@ -1234,12 +1235,12 @@ static void do_ubd_request(struct request_queue *q)
174 + return;
175 +
176 + dev->request = req;
177 ++ dev->rq_pos = blk_rq_pos(req);
178 + dev->start_sg = 0;
179 + dev->end_sg = blk_rq_map_sg(q, req, dev->sg);
180 + }
181 +
182 + req = dev->request;
183 +- sector = blk_rq_pos(req);
184 + while(dev->start_sg < dev->end_sg){
185 + struct scatterlist *sg = &dev->sg[dev->start_sg];
186 +
187 +@@ -1251,10 +1252,9 @@ static void do_ubd_request(struct request_queue *q)
188 + return;
189 + }
190 + prepare_request(req, io_req,
191 +- (unsigned long long)sector << 9,
192 ++ (unsigned long long)dev->rq_pos << 9,
193 + sg->offset, sg->length, sg_page(sg));
194 +
195 +- sector += sg->length >> 9;
196 + n = os_write_file(thread_fd, &io_req,
197 + sizeof(struct io_thread_req *));
198 + if(n != sizeof(struct io_thread_req *)){
199 +@@ -1267,6 +1267,7 @@ static void do_ubd_request(struct request_queue *q)
200 + return;
201 + }
202 +
203 ++ dev->rq_pos += sg->length >> 9;
204 + dev->start_sg++;
205 + }
206 + dev->end_sg = 0;
207 +diff --git a/arch/x86/include/asm/amd_iommu_proto.h b/arch/x86/include/asm/amd_iommu_proto.h
208 +index d2544f1..cb03037 100644
209 +--- a/arch/x86/include/asm/amd_iommu_proto.h
210 ++++ b/arch/x86/include/asm/amd_iommu_proto.h
211 +@@ -38,4 +38,10 @@ static inline void amd_iommu_stats_init(void) { }
212 +
213 + #endif /* !CONFIG_AMD_IOMMU_STATS */
214 +
215 ++static inline bool is_rd890_iommu(struct pci_dev *pdev)
216 ++{
217 ++ return (pdev->vendor == PCI_VENDOR_ID_ATI) &&
218 ++ (pdev->device == PCI_DEVICE_ID_RD890_IOMMU);
219 ++}
220 ++
221 + #endif /* _ASM_X86_AMD_IOMMU_PROTO_H */
222 +diff --git a/arch/x86/include/asm/amd_iommu_types.h b/arch/x86/include/asm/amd_iommu_types.h
223 +index 7014e88..0861618 100644
224 +--- a/arch/x86/include/asm/amd_iommu_types.h
225 ++++ b/arch/x86/include/asm/amd_iommu_types.h
226 +@@ -368,6 +368,9 @@ struct amd_iommu {
227 + /* capabilities of that IOMMU read from ACPI */
228 + u32 cap;
229 +
230 ++ /* flags read from acpi table */
231 ++ u8 acpi_flags;
232 ++
233 + /*
234 + * Capability pointer. There could be more than one IOMMU per PCI
235 + * device function if there are more than one AMD IOMMU capability
236 +@@ -411,6 +414,15 @@ struct amd_iommu {
237 +
238 + /* default dma_ops domain for that IOMMU */
239 + struct dma_ops_domain *default_dom;
240 ++
241 ++ /*
242 ++ * This array is required to work around a potential BIOS bug.
243 ++ * The BIOS may miss to restore parts of the PCI configuration
244 ++ * space when the system resumes from S3. The result is that the
245 ++ * IOMMU does not execute commands anymore which leads to system
246 ++ * failure.
247 ++ */
248 ++ u32 cache_cfg[4];
249 + };
250 +
251 + /*
252 +diff --git a/arch/x86/include/asm/pgtable_32.h b/arch/x86/include/asm/pgtable_32.h
253 +index 2984a25..f686f49 100644
254 +--- a/arch/x86/include/asm/pgtable_32.h
255 ++++ b/arch/x86/include/asm/pgtable_32.h
256 +@@ -26,6 +26,7 @@ struct mm_struct;
257 + struct vm_area_struct;
258 +
259 + extern pgd_t swapper_pg_dir[1024];
260 ++extern pgd_t trampoline_pg_dir[1024];
261 +
262 + static inline void pgtable_cache_init(void) { }
263 + static inline void check_pgt_cache(void) { }
264 +diff --git a/arch/x86/include/asm/trampoline.h b/arch/x86/include/asm/trampoline.h
265 +index cb507bb..4dde797 100644
266 +--- a/arch/x86/include/asm/trampoline.h
267 ++++ b/arch/x86/include/asm/trampoline.h
268 +@@ -13,14 +13,17 @@ extern unsigned char *trampoline_base;
269 +
270 + extern unsigned long init_rsp;
271 + extern unsigned long initial_code;
272 ++extern unsigned long initial_page_table;
273 + extern unsigned long initial_gs;
274 +
275 + #define TRAMPOLINE_SIZE roundup(trampoline_end - trampoline_data, PAGE_SIZE)
276 +
277 + extern unsigned long setup_trampoline(void);
278 ++extern void __init setup_trampoline_page_table(void);
279 + extern void __init reserve_trampoline_memory(void);
280 + #else
281 +-static inline void reserve_trampoline_memory(void) {};
282 ++static inline void setup_trampoline_page_table(void) {}
283 ++static inline void reserve_trampoline_memory(void) {}
284 + #endif /* CONFIG_X86_TRAMPOLINE */
285 +
286 + #endif /* __ASSEMBLY__ */
287 +diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
288 +index e77b220..0357c51 100644
289 +--- a/arch/x86/kernel/Makefile
290 ++++ b/arch/x86/kernel/Makefile
291 +@@ -11,6 +11,8 @@ ifdef CONFIG_FUNCTION_TRACER
292 + CFLAGS_REMOVE_tsc.o = -pg
293 + CFLAGS_REMOVE_rtc.o = -pg
294 + CFLAGS_REMOVE_paravirt-spinlocks.o = -pg
295 ++CFLAGS_REMOVE_pvclock.o = -pg
296 ++CFLAGS_REMOVE_kvmclock.o = -pg
297 + CFLAGS_REMOVE_ftrace.o = -pg
298 + CFLAGS_REMOVE_early_printk.o = -pg
299 + endif
300 +diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c
301 +index 0d20286..4424c73 100644
302 +--- a/arch/x86/kernel/amd_iommu.c
303 ++++ b/arch/x86/kernel/amd_iommu.c
304 +@@ -1953,6 +1953,7 @@ static void __unmap_single(struct dma_ops_domain *dma_dom,
305 + size_t size,
306 + int dir)
307 + {
308 ++ dma_addr_t flush_addr;
309 + dma_addr_t i, start;
310 + unsigned int pages;
311 +
312 +@@ -1960,6 +1961,7 @@ static void __unmap_single(struct dma_ops_domain *dma_dom,
313 + (dma_addr + size > dma_dom->aperture_size))
314 + return;
315 +
316 ++ flush_addr = dma_addr;
317 + pages = iommu_num_pages(dma_addr, size, PAGE_SIZE);
318 + dma_addr &= PAGE_MASK;
319 + start = dma_addr;
320 +@@ -1974,7 +1976,7 @@ static void __unmap_single(struct dma_ops_domain *dma_dom,
321 + dma_ops_free_addresses(dma_dom, dma_addr, pages);
322 +
323 + if (amd_iommu_unmap_flush || dma_dom->need_flush) {
324 +- iommu_flush_pages(&dma_dom->domain, dma_addr, size);
325 ++ iommu_flush_pages(&dma_dom->domain, flush_addr, size);
326 + dma_dom->need_flush = false;
327 + }
328 + }
329 +diff --git a/arch/x86/kernel/amd_iommu_init.c b/arch/x86/kernel/amd_iommu_init.c
330 +index 3cc63e2..5a170cb 100644
331 +--- a/arch/x86/kernel/amd_iommu_init.c
332 ++++ b/arch/x86/kernel/amd_iommu_init.c
333 +@@ -632,6 +632,13 @@ static void __init init_iommu_from_pci(struct amd_iommu *iommu)
334 + iommu->last_device = calc_devid(MMIO_GET_BUS(range),
335 + MMIO_GET_LD(range));
336 + iommu->evt_msi_num = MMIO_MSI_NUM(misc);
337 ++
338 ++ if (is_rd890_iommu(iommu->dev)) {
339 ++ pci_read_config_dword(iommu->dev, 0xf0, &iommu->cache_cfg[0]);
340 ++ pci_read_config_dword(iommu->dev, 0xf4, &iommu->cache_cfg[1]);
341 ++ pci_read_config_dword(iommu->dev, 0xf8, &iommu->cache_cfg[2]);
342 ++ pci_read_config_dword(iommu->dev, 0xfc, &iommu->cache_cfg[3]);
343 ++ }
344 + }
345 +
346 + /*
347 +@@ -649,29 +656,9 @@ static void __init init_iommu_from_acpi(struct amd_iommu *iommu,
348 + struct ivhd_entry *e;
349 +
350 + /*
351 +- * First set the recommended feature enable bits from ACPI
352 +- * into the IOMMU control registers
353 ++ * First save the recommended feature enable bits from ACPI
354 + */
355 +- h->flags & IVHD_FLAG_HT_TUN_EN_MASK ?
356 +- iommu_feature_enable(iommu, CONTROL_HT_TUN_EN) :
357 +- iommu_feature_disable(iommu, CONTROL_HT_TUN_EN);
358 +-
359 +- h->flags & IVHD_FLAG_PASSPW_EN_MASK ?
360 +- iommu_feature_enable(iommu, CONTROL_PASSPW_EN) :
361 +- iommu_feature_disable(iommu, CONTROL_PASSPW_EN);
362 +-
363 +- h->flags & IVHD_FLAG_RESPASSPW_EN_MASK ?
364 +- iommu_feature_enable(iommu, CONTROL_RESPASSPW_EN) :
365 +- iommu_feature_disable(iommu, CONTROL_RESPASSPW_EN);
366 +-
367 +- h->flags & IVHD_FLAG_ISOC_EN_MASK ?
368 +- iommu_feature_enable(iommu, CONTROL_ISOC_EN) :
369 +- iommu_feature_disable(iommu, CONTROL_ISOC_EN);
370 +-
371 +- /*
372 +- * make IOMMU memory accesses cache coherent
373 +- */
374 +- iommu_feature_enable(iommu, CONTROL_COHERENT_EN);
375 ++ iommu->acpi_flags = h->flags;
376 +
377 + /*
378 + * Done. Now parse the device entries
379 +@@ -1116,6 +1103,40 @@ static void init_device_table(void)
380 + }
381 + }
382 +
383 ++static void iommu_init_flags(struct amd_iommu *iommu)
384 ++{
385 ++ iommu->acpi_flags & IVHD_FLAG_HT_TUN_EN_MASK ?
386 ++ iommu_feature_enable(iommu, CONTROL_HT_TUN_EN) :
387 ++ iommu_feature_disable(iommu, CONTROL_HT_TUN_EN);
388 ++
389 ++ iommu->acpi_flags & IVHD_FLAG_PASSPW_EN_MASK ?
390 ++ iommu_feature_enable(iommu, CONTROL_PASSPW_EN) :
391 ++ iommu_feature_disable(iommu, CONTROL_PASSPW_EN);
392 ++
393 ++ iommu->acpi_flags & IVHD_FLAG_RESPASSPW_EN_MASK ?
394 ++ iommu_feature_enable(iommu, CONTROL_RESPASSPW_EN) :
395 ++ iommu_feature_disable(iommu, CONTROL_RESPASSPW_EN);
396 ++
397 ++ iommu->acpi_flags & IVHD_FLAG_ISOC_EN_MASK ?
398 ++ iommu_feature_enable(iommu, CONTROL_ISOC_EN) :
399 ++ iommu_feature_disable(iommu, CONTROL_ISOC_EN);
400 ++
401 ++ /*
402 ++ * make IOMMU memory accesses cache coherent
403 ++ */
404 ++ iommu_feature_enable(iommu, CONTROL_COHERENT_EN);
405 ++}
406 ++
407 ++static void iommu_apply_quirks(struct amd_iommu *iommu)
408 ++{
409 ++ if (is_rd890_iommu(iommu->dev)) {
410 ++ pci_write_config_dword(iommu->dev, 0xf0, iommu->cache_cfg[0]);
411 ++ pci_write_config_dword(iommu->dev, 0xf4, iommu->cache_cfg[1]);
412 ++ pci_write_config_dword(iommu->dev, 0xf8, iommu->cache_cfg[2]);
413 ++ pci_write_config_dword(iommu->dev, 0xfc, iommu->cache_cfg[3]);
414 ++ }
415 ++}
416 ++
417 + /*
418 + * This function finally enables all IOMMUs found in the system after
419 + * they have been initialized
420 +@@ -1126,6 +1147,8 @@ static void enable_iommus(void)
421 +
422 + for_each_iommu(iommu) {
423 + iommu_disable(iommu);
424 ++ iommu_apply_quirks(iommu);
425 ++ iommu_init_flags(iommu);
426 + iommu_set_device_table(iommu);
427 + iommu_enable_command_buffer(iommu);
428 + iommu_enable_event_buffer(iommu);
429 +diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
430 +index f1efeba..5c5b8f3 100644
431 +--- a/arch/x86/kernel/apic/io_apic.c
432 ++++ b/arch/x86/kernel/apic/io_apic.c
433 +@@ -306,14 +306,19 @@ void arch_init_copy_chip_data(struct irq_desc *old_desc,
434 +
435 + old_cfg = old_desc->chip_data;
436 +
437 +- memcpy(cfg, old_cfg, sizeof(struct irq_cfg));
438 ++ cfg->vector = old_cfg->vector;
439 ++ cfg->move_in_progress = old_cfg->move_in_progress;
440 ++ cpumask_copy(cfg->domain, old_cfg->domain);
441 ++ cpumask_copy(cfg->old_domain, old_cfg->old_domain);
442 +
443 + init_copy_irq_2_pin(old_cfg, cfg, node);
444 + }
445 +
446 +-static void free_irq_cfg(struct irq_cfg *old_cfg)
447 ++static void free_irq_cfg(struct irq_cfg *cfg)
448 + {
449 +- kfree(old_cfg);
450 ++ free_cpumask_var(cfg->domain);
451 ++ free_cpumask_var(cfg->old_domain);
452 ++ kfree(cfg);
453 + }
454 +
455 + void arch_free_chip_data(struct irq_desc *old_desc, struct irq_desc *desc)
456 +diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
457 +index 68e4a6f..d938871 100644
458 +--- a/arch/x86/kernel/cpu/common.c
459 ++++ b/arch/x86/kernel/cpu/common.c
460 +@@ -537,7 +537,7 @@ void __cpuinit cpu_detect(struct cpuinfo_x86 *c)
461 + }
462 + }
463 +
464 +-static void __cpuinit get_cpu_cap(struct cpuinfo_x86 *c)
465 ++void __cpuinit get_cpu_cap(struct cpuinfo_x86 *c)
466 + {
467 + u32 tfms, xlvl;
468 + u32 ebx;
469 +@@ -576,6 +576,7 @@ static void __cpuinit get_cpu_cap(struct cpuinfo_x86 *c)
470 + if (c->extended_cpuid_level >= 0x80000007)
471 + c->x86_power = cpuid_edx(0x80000007);
472 +
473 ++ init_scattered_cpuid_features(c);
474 + }
475 +
476 + static void __cpuinit identify_cpu_without_cpuid(struct cpuinfo_x86 *c)
477 +@@ -731,7 +732,6 @@ static void __cpuinit generic_identify(struct cpuinfo_x86 *c)
478 +
479 + get_model_name(c); /* Default name */
480 +
481 +- init_scattered_cpuid_features(c);
482 + detect_nopl(c);
483 + }
484 +
485 +diff --git a/arch/x86/kernel/cpu/cpu.h b/arch/x86/kernel/cpu/cpu.h
486 +index 3624e8a..f668bb1 100644
487 +--- a/arch/x86/kernel/cpu/cpu.h
488 ++++ b/arch/x86/kernel/cpu/cpu.h
489 +@@ -33,5 +33,6 @@ extern const struct cpu_dev *const __x86_cpu_dev_start[],
490 + *const __x86_cpu_dev_end[];
491 +
492 + extern void cpu_detect_cache_sizes(struct cpuinfo_x86 *c);
493 ++extern void get_cpu_cap(struct cpuinfo_x86 *c);
494 +
495 + #endif
496 +diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
497 +index 85f69cd..b438944 100644
498 +--- a/arch/x86/kernel/cpu/intel.c
499 ++++ b/arch/x86/kernel/cpu/intel.c
500 +@@ -39,6 +39,7 @@ static void __cpuinit early_init_intel(struct cpuinfo_x86 *c)
501 + misc_enable &= ~MSR_IA32_MISC_ENABLE_LIMIT_CPUID;
502 + wrmsrl(MSR_IA32_MISC_ENABLE, misc_enable);
503 + c->cpuid_level = cpuid_eax(0);
504 ++ get_cpu_cap(c);
505 + }
506 + }
507 +
508 +diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd.c b/arch/x86/kernel/cpu/mcheck/mce_amd.c
509 +index 224392d..f80ff85 100644
510 +--- a/arch/x86/kernel/cpu/mcheck/mce_amd.c
511 ++++ b/arch/x86/kernel/cpu/mcheck/mce_amd.c
512 +@@ -141,6 +141,7 @@ void mce_amd_feature_init(struct cpuinfo_x86 *c)
513 + address = (low & MASK_BLKPTR_LO) >> 21;
514 + if (!address)
515 + break;
516 ++
517 + address += MCG_XBLK_ADDR;
518 + } else
519 + ++address;
520 +@@ -148,12 +149,8 @@ void mce_amd_feature_init(struct cpuinfo_x86 *c)
521 + if (rdmsr_safe(address, &low, &high))
522 + break;
523 +
524 +- if (!(high & MASK_VALID_HI)) {
525 +- if (block)
526 +- continue;
527 +- else
528 +- break;
529 +- }
530 ++ if (!(high & MASK_VALID_HI))
531 ++ continue;
532 +
533 + if (!(high & MASK_CNTP_HI) ||
534 + (high & MASK_LOCKED_HI))
535 +diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
536 +index 37c3d4b..75e3981 100644
537 +--- a/arch/x86/kernel/head_32.S
538 ++++ b/arch/x86/kernel/head_32.S
539 +@@ -328,7 +328,7 @@ ENTRY(startup_32_smp)
540 + /*
541 + * Enable paging
542 + */
543 +- movl $pa(swapper_pg_dir),%eax
544 ++ movl pa(initial_page_table), %eax
545 + movl %eax,%cr3 /* set the page table pointer.. */
546 + movl %cr0,%eax
547 + orl $X86_CR0_PG,%eax
548 +@@ -608,6 +608,8 @@ ignore_int:
549 + .align 4
550 + ENTRY(initial_code)
551 + .long i386_start_kernel
552 ++ENTRY(initial_page_table)
553 ++ .long pa(swapper_pg_dir)
554 +
555 + /*
556 + * BSS section
557 +@@ -623,6 +625,10 @@ ENTRY(swapper_pg_dir)
558 + #endif
559 + swapper_pg_fixmap:
560 + .fill 1024,4,0
561 ++#ifdef CONFIG_X86_TRAMPOLINE
562 ++ENTRY(trampoline_pg_dir)
563 ++ .fill 1024,4,0
564 ++#endif
565 + ENTRY(empty_zero_page)
566 + .fill 4096,1,0
567 +
568 +diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c
569 +index 7cd1886..917c66f 100644
570 +--- a/arch/x86/kernel/hpet.c
571 ++++ b/arch/x86/kernel/hpet.c
572 +@@ -507,7 +507,7 @@ static int hpet_assign_irq(struct hpet_dev *dev)
573 + {
574 + unsigned int irq;
575 +
576 +- irq = create_irq();
577 ++ irq = create_irq_nr(0, -1);
578 + if (!irq)
579 + return -EINVAL;
580 +
581 +diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
582 +index b4ae4ac..6600cfd 100644
583 +--- a/arch/x86/kernel/setup.c
584 ++++ b/arch/x86/kernel/setup.c
585 +@@ -1008,6 +1008,8 @@ void __init setup_arch(char **cmdline_p)
586 + paging_init();
587 + x86_init.paging.pagetable_setup_done(swapper_pg_dir);
588 +
589 ++ setup_trampoline_page_table();
590 ++
591 + tboot_probe();
592 +
593 + #ifdef CONFIG_X86_64
594 +diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
595 +index 0bf2ece..821ee1b 100644
596 +--- a/arch/x86/kernel/smpboot.c
597 ++++ b/arch/x86/kernel/smpboot.c
598 +@@ -73,7 +73,6 @@
599 +
600 + #ifdef CONFIG_X86_32
601 + u8 apicid_2_node[MAX_APICID];
602 +-static int low_mappings;
603 + #endif
604 +
605 + /* State of each CPU */
606 +@@ -300,6 +299,18 @@ notrace static void __cpuinit start_secondary(void *unused)
607 + * fragile that we want to limit the things done here to the
608 + * most necessary things.
609 + */
610 ++
611 ++#ifdef CONFIG_X86_32
612 ++ /*
613 ++ * Switch away from the trampoline page-table
614 ++ *
615 ++ * Do this before cpu_init() because it needs to access per-cpu
616 ++ * data which may not be mapped in the trampoline page-table.
617 ++ */
618 ++ load_cr3(swapper_pg_dir);
619 ++ __flush_tlb_all();
620 ++#endif
621 ++
622 + vmi_bringup();
623 + cpu_init();
624 + preempt_disable();
625 +@@ -318,12 +329,6 @@ notrace static void __cpuinit start_secondary(void *unused)
626 + legacy_pic->chip->unmask(0);
627 + }
628 +
629 +-#ifdef CONFIG_X86_32
630 +- while (low_mappings)
631 +- cpu_relax();
632 +- __flush_tlb_all();
633 +-#endif
634 +-
635 + /* This must be done before setting cpu_online_mask */
636 + set_cpu_sibling_map(raw_smp_processor_id());
637 + wmb();
638 +@@ -773,6 +778,7 @@ do_rest:
639 + #ifdef CONFIG_X86_32
640 + /* Stack for startup_32 can be just as for start_secondary onwards */
641 + irq_ctx_init(cpu);
642 ++ initial_page_table = __pa(&trampoline_pg_dir);
643 + #else
644 + clear_tsk_thread_flag(c_idle.idle, TIF_FORK);
645 + initial_gs = per_cpu_offset(cpu);
646 +@@ -920,20 +926,8 @@ int __cpuinit native_cpu_up(unsigned int cpu)
647 +
648 + per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
649 +
650 +-#ifdef CONFIG_X86_32
651 +- /* init low mem mapping */
652 +- clone_pgd_range(swapper_pg_dir, swapper_pg_dir + KERNEL_PGD_BOUNDARY,
653 +- min_t(unsigned long, KERNEL_PGD_PTRS, KERNEL_PGD_BOUNDARY));
654 +- flush_tlb_all();
655 +- low_mappings = 1;
656 +-
657 + err = do_boot_cpu(apicid, cpu);
658 +
659 +- zap_low_mappings(false);
660 +- low_mappings = 0;
661 +-#else
662 +- err = do_boot_cpu(apicid, cpu);
663 +-#endif
664 + if (err) {
665 + pr_debug("do_boot_cpu failed %d\n", err);
666 + return -EIO;
667 +diff --git a/arch/x86/kernel/trampoline.c b/arch/x86/kernel/trampoline.c
668 +index c652ef6..e2a5952 100644
669 +--- a/arch/x86/kernel/trampoline.c
670 ++++ b/arch/x86/kernel/trampoline.c
671 +@@ -1,6 +1,7 @@
672 + #include <linux/io.h>
673 +
674 + #include <asm/trampoline.h>
675 ++#include <asm/pgtable.h>
676 + #include <asm/e820.h>
677 +
678 + #if defined(CONFIG_X86_64) && defined(CONFIG_ACPI_SLEEP)
679 +@@ -37,3 +38,19 @@ unsigned long __trampinit setup_trampoline(void)
680 + memcpy(trampoline_base, trampoline_data, TRAMPOLINE_SIZE);
681 + return virt_to_phys(trampoline_base);
682 + }
683 ++
684 ++void __init setup_trampoline_page_table(void)
685 ++{
686 ++#ifdef CONFIG_X86_32
687 ++ /* Copy kernel address range */
688 ++ clone_pgd_range(trampoline_pg_dir + KERNEL_PGD_BOUNDARY,
689 ++ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
690 ++ KERNEL_PGD_PTRS);
691 ++
692 ++ /* Initialize low mappings */
693 ++ clone_pgd_range(trampoline_pg_dir,
694 ++ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
695 ++ min_t(unsigned long, KERNEL_PGD_PTRS,
696 ++ KERNEL_PGD_BOUNDARY));
697 ++#endif
698 ++}
699 +diff --git a/arch/x86/kvm/irq.h b/arch/x86/kvm/irq.h
700 +index cd1f362..4227b1a 100644
701 +--- a/arch/x86/kvm/irq.h
702 ++++ b/arch/x86/kvm/irq.h
703 +@@ -45,7 +45,6 @@ struct kvm_kpic_state {
704 + u8 irr; /* interrupt request register */
705 + u8 imr; /* interrupt mask register */
706 + u8 isr; /* interrupt service register */
707 +- u8 isr_ack; /* interrupt ack detection */
708 + u8 priority_add; /* highest irq priority */
709 + u8 irq_base;
710 + u8 read_reg_select;
711 +@@ -58,6 +57,7 @@ struct kvm_kpic_state {
712 + u8 init4; /* true if 4 byte init */
713 + u8 elcr; /* PIIX edge/trigger selection */
714 + u8 elcr_mask;
715 ++ u8 isr_ack; /* interrupt ack detection */
716 + struct kvm_pic *pics_state;
717 + };
718 +
719 +diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
720 +index ce438e0..93730fe 100644
721 +--- a/arch/x86/kvm/svm.c
722 ++++ b/arch/x86/kvm/svm.c
723 +@@ -766,7 +766,6 @@ static void init_vmcb(struct vcpu_svm *svm)
724 +
725 + control->iopm_base_pa = iopm_base;
726 + control->msrpm_base_pa = __pa(svm->msrpm);
727 +- control->tsc_offset = 0;
728 + control->int_ctl = V_INTR_MASKING_MASK;
729 +
730 + init_seg(&save->es);
731 +@@ -902,6 +901,7 @@ static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id)
732 + svm->vmcb_pa = page_to_pfn(page) << PAGE_SHIFT;
733 + svm->asid_generation = 0;
734 + init_vmcb(svm);
735 ++ svm->vmcb->control.tsc_offset = 0-native_read_tsc();
736 +
737 + fx_init(&svm->vcpu);
738 + svm->vcpu.arch.apic_base = 0xfee00000 | MSR_IA32_APICBASE_ENABLE;
739 +diff --git a/arch/x86/mm/srat_64.c b/arch/x86/mm/srat_64.c
740 +index f9897f7..9c0d0d3 100644
741 +--- a/arch/x86/mm/srat_64.c
742 ++++ b/arch/x86/mm/srat_64.c
743 +@@ -420,9 +420,11 @@ int __init acpi_scan_nodes(unsigned long start, unsigned long end)
744 + return -1;
745 + }
746 +
747 +- for_each_node_mask(i, nodes_parsed)
748 +- e820_register_active_regions(i, nodes[i].start >> PAGE_SHIFT,
749 +- nodes[i].end >> PAGE_SHIFT);
750 ++ for (i = 0; i < num_node_memblks; i++)
751 ++ e820_register_active_regions(memblk_nodeid[i],
752 ++ node_memblk_range[i].start >> PAGE_SHIFT,
753 ++ node_memblk_range[i].end >> PAGE_SHIFT);
754 ++
755 + /* for out of order entries in SRAT */
756 + sort_node_map();
757 + if (!nodes_cover_memory(nodes)) {
758 +diff --git a/arch/x86/oprofile/nmi_int.c b/arch/x86/oprofile/nmi_int.c
759 +index 009b819..f1575c9 100644
760 +--- a/arch/x86/oprofile/nmi_int.c
761 ++++ b/arch/x86/oprofile/nmi_int.c
762 +@@ -674,6 +674,7 @@ static int __init ppro_init(char **cpu_type)
763 + case 0x0f:
764 + case 0x16:
765 + case 0x17:
766 ++ case 0x1d:
767 + *cpu_type = "i386/core_2";
768 + break;
769 + case 0x1a:
770 +diff --git a/block/bsg.c b/block/bsg.c
771 +index 82d5882..0c00870 100644
772 +--- a/block/bsg.c
773 ++++ b/block/bsg.c
774 +@@ -426,7 +426,7 @@ static int blk_complete_sgv4_hdr_rq(struct request *rq, struct sg_io_v4 *hdr,
775 + /*
776 + * fill in all the output members
777 + */
778 +- hdr->device_status = status_byte(rq->errors);
779 ++ hdr->device_status = rq->errors & 0xff;
780 + hdr->transport_status = host_byte(rq->errors);
781 + hdr->driver_status = driver_byte(rq->errors);
782 + hdr->info = 0;
783 +diff --git a/drivers/acpi/acpica/aclocal.h b/drivers/acpi/acpica/aclocal.h
784 +index 147a7e6..9f82a38 100644
785 +--- a/drivers/acpi/acpica/aclocal.h
786 ++++ b/drivers/acpi/acpica/aclocal.h
787 +@@ -853,6 +853,7 @@ struct acpi_bit_register_info {
788 + ACPI_BITMASK_POWER_BUTTON_STATUS | \
789 + ACPI_BITMASK_SLEEP_BUTTON_STATUS | \
790 + ACPI_BITMASK_RT_CLOCK_STATUS | \
791 ++ ACPI_BITMASK_PCIEXP_WAKE_DISABLE | \
792 + ACPI_BITMASK_WAKE_STATUS)
793 +
794 + #define ACPI_BITMASK_TIMER_ENABLE 0x0001
795 +diff --git a/drivers/acpi/blacklist.c b/drivers/acpi/blacklist.c
796 +index 2bb28b9..51cada7 100644
797 +--- a/drivers/acpi/blacklist.c
798 ++++ b/drivers/acpi/blacklist.c
799 +@@ -226,6 +226,14 @@ static struct dmi_system_id acpi_osi_dmi_table[] __initdata = {
800 + },
801 + },
802 + {
803 ++ .callback = dmi_disable_osi_vista,
804 ++ .ident = "Toshiba Satellite L355",
805 ++ .matches = {
806 ++ DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
807 ++ DMI_MATCH(DMI_PRODUCT_VERSION, "Satellite L355"),
808 ++ },
809 ++ },
810 ++ {
811 + .callback = dmi_disable_osi_win7,
812 + .ident = "ASUS K50IJ",
813 + .matches = {
814 +@@ -233,6 +241,14 @@ static struct dmi_system_id acpi_osi_dmi_table[] __initdata = {
815 + DMI_MATCH(DMI_PRODUCT_NAME, "K50IJ"),
816 + },
817 + },
818 ++ {
819 ++ .callback = dmi_disable_osi_vista,
820 ++ .ident = "Toshiba P305D",
821 ++ .matches = {
822 ++ DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
823 ++ DMI_MATCH(DMI_PRODUCT_NAME, "Satellite P305D"),
824 ++ },
825 ++ },
826 +
827 + /*
828 + * BIOS invocation of _OSI(Linux) is almost always a BIOS bug.
829 +diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
830 +index c1d23cd..a33fbc9 100644
831 +--- a/drivers/acpi/bus.c
832 ++++ b/drivers/acpi/bus.c
833 +@@ -80,23 +80,15 @@ static int set_copy_dsdt(const struct dmi_system_id *id)
834 +
835 + static struct dmi_system_id dsdt_dmi_table[] __initdata = {
836 + /*
837 +- * Insyde BIOS on some TOSHIBA machines corrupt the DSDT.
838 ++ * Invoke DSDT corruption work-around on all Toshiba Satellite.
839 + * https://bugzilla.kernel.org/show_bug.cgi?id=14679
840 + */
841 + {
842 + .callback = set_copy_dsdt,
843 +- .ident = "TOSHIBA Satellite A505",
844 ++ .ident = "TOSHIBA Satellite",
845 + .matches = {
846 + DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
847 +- DMI_MATCH(DMI_PRODUCT_NAME, "Satellite A505"),
848 +- },
849 +- },
850 +- {
851 +- .callback = set_copy_dsdt,
852 +- .ident = "TOSHIBA Satellite L505D",
853 +- .matches = {
854 +- DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
855 +- DMI_MATCH(DMI_PRODUCT_NAME, "Satellite L505D"),
856 ++ DMI_MATCH(DMI_PRODUCT_NAME, "Satellite"),
857 + },
858 + },
859 + {}
860 +diff --git a/drivers/acpi/processor_core.c b/drivers/acpi/processor_core.c
861 +index e9699aa..bec561c 100644
862 +--- a/drivers/acpi/processor_core.c
863 ++++ b/drivers/acpi/processor_core.c
864 +@@ -29,12 +29,6 @@ static int set_no_mwait(const struct dmi_system_id *id)
865 +
866 + static struct dmi_system_id __cpuinitdata processor_idle_dmi_table[] = {
867 + {
868 +- set_no_mwait, "IFL91 board", {
869 +- DMI_MATCH(DMI_BIOS_VENDOR, "COMPAL"),
870 +- DMI_MATCH(DMI_SYS_VENDOR, "ZEPTO"),
871 +- DMI_MATCH(DMI_PRODUCT_VERSION, "3215W"),
872 +- DMI_MATCH(DMI_BOARD_NAME, "IFL91") }, NULL},
873 +- {
874 + set_no_mwait, "Extensa 5220", {
875 + DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies LTD"),
876 + DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
877 +@@ -352,4 +346,5 @@ void __init acpi_early_processor_set_pdc(void)
878 + acpi_walk_namespace(ACPI_TYPE_PROCESSOR, ACPI_ROOT_OBJECT,
879 + ACPI_UINT32_MAX,
880 + early_init_pdc, NULL, NULL, NULL);
881 ++ acpi_get_devices("ACPI0007", early_init_pdc, NULL, NULL);
882 + }
883 +diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
884 +index 2862c78..4882bc1 100644
885 +--- a/drivers/acpi/sleep.c
886 ++++ b/drivers/acpi/sleep.c
887 +@@ -362,6 +362,12 @@ static int __init init_old_suspend_ordering(const struct dmi_system_id *d)
888 + return 0;
889 + }
890 +
891 ++static int __init init_nvs_nosave(const struct dmi_system_id *d)
892 ++{
893 ++ acpi_nvs_nosave();
894 ++ return 0;
895 ++}
896 ++
897 + static struct dmi_system_id __initdata acpisleep_dmi_table[] = {
898 + {
899 + .callback = init_old_suspend_ordering,
900 +@@ -396,6 +402,22 @@ static struct dmi_system_id __initdata acpisleep_dmi_table[] = {
901 + DMI_MATCH(DMI_BOARD_NAME, "CF51-2L"),
902 + },
903 + },
904 ++ {
905 ++ .callback = init_nvs_nosave,
906 ++ .ident = "Sony Vaio VGN-SR11M",
907 ++ .matches = {
908 ++ DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
909 ++ DMI_MATCH(DMI_PRODUCT_NAME, "VGN-SR11M"),
910 ++ },
911 ++ },
912 ++ {
913 ++ .callback = init_nvs_nosave,
914 ++ .ident = "Everex StepNote Series",
915 ++ .matches = {
916 ++ DMI_MATCH(DMI_SYS_VENDOR, "Everex Systems, Inc."),
917 ++ DMI_MATCH(DMI_PRODUCT_NAME, "Everex StepNote Series"),
918 ++ },
919 ++ },
920 + {},
921 + };
922 + #endif /* CONFIG_SUSPEND */
923 +diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
924 +index f252253..d571e3f 100644
925 +--- a/drivers/ata/ahci.c
926 ++++ b/drivers/ata/ahci.c
927 +@@ -89,6 +89,10 @@ static int ahci_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg);
928 + static int ahci_pci_device_resume(struct pci_dev *pdev);
929 + #endif
930 +
931 ++static struct scsi_host_template ahci_sht = {
932 ++ AHCI_SHT("ahci"),
933 ++};
934 ++
935 + static struct ata_port_operations ahci_vt8251_ops = {
936 + .inherits = &ahci_ops,
937 + .hardreset = ahci_vt8251_hardreset,
938 +diff --git a/drivers/ata/ahci.h b/drivers/ata/ahci.h
939 +index 7113c57..88aa888 100644
940 +--- a/drivers/ata/ahci.h
941 ++++ b/drivers/ata/ahci.h
942 +@@ -297,7 +297,17 @@ struct ahci_host_priv {
943 +
944 + extern int ahci_ignore_sss;
945 +
946 +-extern struct scsi_host_template ahci_sht;
947 ++extern struct device_attribute *ahci_shost_attrs[];
948 ++extern struct device_attribute *ahci_sdev_attrs[];
949 ++
950 ++#define AHCI_SHT(drv_name) \
951 ++ ATA_NCQ_SHT(drv_name), \
952 ++ .can_queue = AHCI_MAX_CMDS - 1, \
953 ++ .sg_tablesize = AHCI_MAX_SG, \
954 ++ .dma_boundary = AHCI_DMA_BOUNDARY, \
955 ++ .shost_attrs = ahci_shost_attrs, \
956 ++ .sdev_attrs = ahci_sdev_attrs
957 ++
958 + extern struct ata_port_operations ahci_ops;
959 +
960 + void ahci_save_initial_config(struct device *dev,
961 +diff --git a/drivers/ata/ahci_platform.c b/drivers/ata/ahci_platform.c
962 +index 5e11b16..9366743 100644
963 +--- a/drivers/ata/ahci_platform.c
964 ++++ b/drivers/ata/ahci_platform.c
965 +@@ -23,6 +23,10 @@
966 + #include <linux/ahci_platform.h>
967 + #include "ahci.h"
968 +
969 ++static struct scsi_host_template ahci_platform_sht = {
970 ++ AHCI_SHT("ahci_platform"),
971 ++};
972 ++
973 + static int __init ahci_probe(struct platform_device *pdev)
974 + {
975 + struct device *dev = &pdev->dev;
976 +@@ -140,7 +144,7 @@ static int __init ahci_probe(struct platform_device *pdev)
977 + ahci_print_info(host, "platform");
978 +
979 + rc = ata_host_activate(host, irq, ahci_interrupt, IRQF_SHARED,
980 +- &ahci_sht);
981 ++ &ahci_platform_sht);
982 + if (rc)
983 + goto err0;
984 +
985 +diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
986 +index 98c80e1..b765a01 100644
987 +--- a/drivers/ata/libahci.c
988 ++++ b/drivers/ata/libahci.c
989 +@@ -121,7 +121,7 @@ static DEVICE_ATTR(ahci_port_cmd, S_IRUGO, ahci_show_port_cmd, NULL);
990 + static DEVICE_ATTR(em_buffer, S_IWUSR | S_IRUGO,
991 + ahci_read_em_buffer, ahci_store_em_buffer);
992 +
993 +-static struct device_attribute *ahci_shost_attrs[] = {
994 ++struct device_attribute *ahci_shost_attrs[] = {
995 + &dev_attr_link_power_management_policy,
996 + &dev_attr_em_message_type,
997 + &dev_attr_em_message,
998 +@@ -132,22 +132,14 @@ static struct device_attribute *ahci_shost_attrs[] = {
999 + &dev_attr_em_buffer,
1000 + NULL
1001 + };
1002 ++EXPORT_SYMBOL_GPL(ahci_shost_attrs);
1003 +
1004 +-static struct device_attribute *ahci_sdev_attrs[] = {
1005 ++struct device_attribute *ahci_sdev_attrs[] = {
1006 + &dev_attr_sw_activity,
1007 + &dev_attr_unload_heads,
1008 + NULL
1009 + };
1010 +-
1011 +-struct scsi_host_template ahci_sht = {
1012 +- ATA_NCQ_SHT("ahci"),
1013 +- .can_queue = AHCI_MAX_CMDS - 1,
1014 +- .sg_tablesize = AHCI_MAX_SG,
1015 +- .dma_boundary = AHCI_DMA_BOUNDARY,
1016 +- .shost_attrs = ahci_shost_attrs,
1017 +- .sdev_attrs = ahci_sdev_attrs,
1018 +-};
1019 +-EXPORT_SYMBOL_GPL(ahci_sht);
1020 ++EXPORT_SYMBOL_GPL(ahci_sdev_attrs);
1021 +
1022 + struct ata_port_operations ahci_ops = {
1023 + .inherits = &sata_pmp_port_ops,
1024 +diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
1025 +index 258bc2a..3c108bc 100644
1026 +--- a/drivers/block/virtio_blk.c
1027 ++++ b/drivers/block/virtio_blk.c
1028 +@@ -203,6 +203,7 @@ static int virtblk_get_id(struct gendisk *disk, char *id_str)
1029 + struct virtio_blk *vblk = disk->private_data;
1030 + struct request *req;
1031 + struct bio *bio;
1032 ++ int err;
1033 +
1034 + bio = bio_map_kern(vblk->disk->queue, id_str, VIRTIO_BLK_ID_BYTES,
1035 + GFP_KERNEL);
1036 +@@ -216,7 +217,10 @@ static int virtblk_get_id(struct gendisk *disk, char *id_str)
1037 + }
1038 +
1039 + req->cmd_type = REQ_TYPE_SPECIAL;
1040 +- return blk_execute_rq(vblk->disk->queue, vblk->disk, req, false);
1041 ++ err = blk_execute_rq(vblk->disk->queue, vblk->disk, req, false);
1042 ++ blk_put_request(req);
1043 ++
1044 ++ return err;
1045 + }
1046 +
1047 + static int virtblk_ioctl(struct block_device *bdev, fmode_t mode,
1048 +diff --git a/drivers/char/agp/intel-agp.c b/drivers/char/agp/intel-agp.c
1049 +index adc0f89..4187121 100644
1050 +--- a/drivers/char/agp/intel-agp.c
1051 ++++ b/drivers/char/agp/intel-agp.c
1052 +@@ -12,6 +12,7 @@
1053 + #include <asm/smp.h>
1054 + #include "agp.h"
1055 + #include "intel-agp.h"
1056 ++#include <linux/intel-gtt.h>
1057 +
1058 + #include "intel-gtt.c"
1059 +
1060 +diff --git a/drivers/char/agp/intel-agp.h b/drivers/char/agp/intel-agp.h
1061 +index bc9a4ad..4c8949a 100644
1062 +--- a/drivers/char/agp/intel-agp.h
1063 ++++ b/drivers/char/agp/intel-agp.h
1064 +@@ -60,6 +60,12 @@
1065 + #define I810_PTE_LOCAL 0x00000002
1066 + #define I810_PTE_VALID 0x00000001
1067 + #define I830_PTE_SYSTEM_CACHED 0x00000006
1068 ++/* GT PTE cache control fields */
1069 ++#define GEN6_PTE_UNCACHED 0x00000002
1070 ++#define GEN6_PTE_LLC 0x00000004
1071 ++#define GEN6_PTE_LLC_MLC 0x00000006
1072 ++#define GEN6_PTE_GFDT 0x00000008
1073 ++
1074 + #define I810_SMRAM_MISCC 0x70
1075 + #define I810_GFX_MEM_WIN_SIZE 0x00010000
1076 + #define I810_GFX_MEM_WIN_32M 0x00010000
1077 +diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c
1078 +index ed11c77..86f5337 100644
1079 +--- a/drivers/char/agp/intel-gtt.c
1080 ++++ b/drivers/char/agp/intel-gtt.c
1081 +@@ -49,6 +49,26 @@ static struct gatt_mask intel_i810_masks[] =
1082 + .type = INTEL_AGP_CACHED_MEMORY}
1083 + };
1084 +
1085 ++#define INTEL_AGP_UNCACHED_MEMORY 0
1086 ++#define INTEL_AGP_CACHED_MEMORY_LLC 1
1087 ++#define INTEL_AGP_CACHED_MEMORY_LLC_GFDT 2
1088 ++#define INTEL_AGP_CACHED_MEMORY_LLC_MLC 3
1089 ++#define INTEL_AGP_CACHED_MEMORY_LLC_MLC_GFDT 4
1090 ++
1091 ++static struct gatt_mask intel_gen6_masks[] =
1092 ++{
1093 ++ {.mask = I810_PTE_VALID | GEN6_PTE_UNCACHED,
1094 ++ .type = INTEL_AGP_UNCACHED_MEMORY },
1095 ++ {.mask = I810_PTE_VALID | GEN6_PTE_LLC,
1096 ++ .type = INTEL_AGP_CACHED_MEMORY_LLC },
1097 ++ {.mask = I810_PTE_VALID | GEN6_PTE_LLC | GEN6_PTE_GFDT,
1098 ++ .type = INTEL_AGP_CACHED_MEMORY_LLC_GFDT },
1099 ++ {.mask = I810_PTE_VALID | GEN6_PTE_LLC_MLC,
1100 ++ .type = INTEL_AGP_CACHED_MEMORY_LLC_MLC },
1101 ++ {.mask = I810_PTE_VALID | GEN6_PTE_LLC_MLC | GEN6_PTE_GFDT,
1102 ++ .type = INTEL_AGP_CACHED_MEMORY_LLC_MLC_GFDT },
1103 ++};
1104 ++
1105 + static struct _intel_private {
1106 + struct pci_dev *pcidev; /* device one */
1107 + u8 __iomem *registers;
1108 +@@ -175,13 +195,6 @@ static void intel_agp_insert_sg_entries(struct agp_memory *mem,
1109 + off_t pg_start, int mask_type)
1110 + {
1111 + int i, j;
1112 +- u32 cache_bits = 0;
1113 +-
1114 +- if (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB ||
1115 +- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB)
1116 +- {
1117 +- cache_bits = I830_PTE_SYSTEM_CACHED;
1118 +- }
1119 +
1120 + for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
1121 + writel(agp_bridge->driver->mask_memory(agp_bridge,
1122 +@@ -314,6 +327,23 @@ static int intel_i830_type_to_mask_type(struct agp_bridge_data *bridge,
1123 + return 0;
1124 + }
1125 +
1126 ++static int intel_gen6_type_to_mask_type(struct agp_bridge_data *bridge,
1127 ++ int type)
1128 ++{
1129 ++ unsigned int type_mask = type & ~AGP_USER_CACHED_MEMORY_GFDT;
1130 ++ unsigned int gfdt = type & AGP_USER_CACHED_MEMORY_GFDT;
1131 ++
1132 ++ if (type_mask == AGP_USER_UNCACHED_MEMORY)
1133 ++ return INTEL_AGP_UNCACHED_MEMORY;
1134 ++ else if (type_mask == AGP_USER_CACHED_MEMORY_LLC_MLC)
1135 ++ return gfdt ? INTEL_AGP_CACHED_MEMORY_LLC_MLC_GFDT :
1136 ++ INTEL_AGP_CACHED_MEMORY_LLC_MLC;
1137 ++ else /* set 'normal'/'cached' to LLC by default */
1138 ++ return gfdt ? INTEL_AGP_CACHED_MEMORY_LLC_GFDT :
1139 ++ INTEL_AGP_CACHED_MEMORY_LLC;
1140 ++}
1141 ++
1142 ++
1143 + static int intel_i810_insert_entries(struct agp_memory *mem, off_t pg_start,
1144 + int type)
1145 + {
1146 +@@ -1155,7 +1185,7 @@ static int intel_i915_insert_entries(struct agp_memory *mem, off_t pg_start,
1147 +
1148 + mask_type = agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type);
1149 +
1150 +- if (mask_type != 0 && mask_type != AGP_PHYS_MEMORY &&
1151 ++ if (!IS_SNB && mask_type != 0 && mask_type != AGP_PHYS_MEMORY &&
1152 + mask_type != INTEL_AGP_CACHED_MEMORY)
1153 + goto out_err;
1154 +
1155 +@@ -1546,7 +1576,7 @@ static const struct agp_bridge_driver intel_gen6_driver = {
1156 + .fetch_size = intel_i9xx_fetch_size,
1157 + .cleanup = intel_i915_cleanup,
1158 + .mask_memory = intel_gen6_mask_memory,
1159 +- .masks = intel_i810_masks,
1160 ++ .masks = intel_gen6_masks,
1161 + .agp_enable = intel_i810_agp_enable,
1162 + .cache_flush = global_cache_flush,
1163 + .create_gatt_table = intel_i965_create_gatt_table,
1164 +@@ -1559,7 +1589,7 @@ static const struct agp_bridge_driver intel_gen6_driver = {
1165 + .agp_alloc_pages = agp_generic_alloc_pages,
1166 + .agp_destroy_page = agp_generic_destroy_page,
1167 + .agp_destroy_pages = agp_generic_destroy_pages,
1168 +- .agp_type_to_mask_type = intel_i830_type_to_mask_type,
1169 ++ .agp_type_to_mask_type = intel_gen6_type_to_mask_type,
1170 + .chipset_flush = intel_i915_chipset_flush,
1171 + #ifdef USE_PCI_DMA_API
1172 + .agp_map_page = intel_agp_map_page,
1173 +diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
1174 +index c810481..0f69c5e 100644
1175 +--- a/drivers/char/virtio_console.c
1176 ++++ b/drivers/char/virtio_console.c
1177 +@@ -459,9 +459,12 @@ static ssize_t send_buf(struct port *port, void *in_buf, size_t in_count,
1178 +
1179 + /*
1180 + * Wait till the host acknowledges it pushed out the data we
1181 +- * sent. This is done for ports in blocking mode or for data
1182 +- * from the hvc_console; the tty operations are performed with
1183 +- * spinlocks held so we can't sleep here.
1184 ++ * sent. This is done for data from the hvc_console; the tty
1185 ++ * operations are performed with spinlocks held so we can't
1186 ++ * sleep here. An alternative would be to copy the data to a
1187 ++ * buffer and relax the spinning requirement. The downside is
1188 ++ * we need to kmalloc a GFP_ATOMIC buffer each time the
1189 ++ * console driver writes something out.
1190 + */
1191 + while (!virtqueue_get_buf(out_vq, &len))
1192 + cpu_relax();
1193 +@@ -626,6 +629,14 @@ static ssize_t port_fops_write(struct file *filp, const char __user *ubuf,
1194 + goto free_buf;
1195 + }
1196 +
1197 ++ /*
1198 ++ * We now ask send_buf() to not spin for generic ports -- we
1199 ++ * can re-use the same code path that non-blocking file
1200 ++ * descriptors take for blocking file descriptors since the
1201 ++ * wait is already done and we're certain the write will go
1202 ++ * through to the host.
1203 ++ */
1204 ++ nonblock = true;
1205 + ret = send_buf(port, buf, count, nonblock);
1206 +
1207 + if (nonblock && ret > 0)
1208 +diff --git a/drivers/dma/ioat/dma_v2.c b/drivers/dma/ioat/dma_v2.c
1209 +index 216f9d3..effd140 100644
1210 +--- a/drivers/dma/ioat/dma_v2.c
1211 ++++ b/drivers/dma/ioat/dma_v2.c
1212 +@@ -879,7 +879,7 @@ int __devinit ioat2_dma_probe(struct ioatdma_device *device, int dca)
1213 + dma->device_issue_pending = ioat2_issue_pending;
1214 + dma->device_alloc_chan_resources = ioat2_alloc_chan_resources;
1215 + dma->device_free_chan_resources = ioat2_free_chan_resources;
1216 +- dma->device_tx_status = ioat_tx_status;
1217 ++ dma->device_tx_status = ioat_dma_tx_status;
1218 +
1219 + err = ioat_probe(device);
1220 + if (err)
1221 +diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c
1222 +index 86c5ae9..411d5bf 100644
1223 +--- a/drivers/dma/mv_xor.c
1224 ++++ b/drivers/dma/mv_xor.c
1225 +@@ -162,7 +162,7 @@ static int mv_is_err_intr(u32 intr_cause)
1226 +
1227 + static void mv_xor_device_clear_eoc_cause(struct mv_xor_chan *chan)
1228 + {
1229 +- u32 val = (1 << (1 + (chan->idx * 16)));
1230 ++ u32 val = ~(1 << (chan->idx * 16));
1231 + dev_dbg(chan->device->common.dev, "%s, val 0x%08x\n", __func__, val);
1232 + __raw_writel(val, XOR_INTR_CAUSE(chan));
1233 + }
1234 +diff --git a/drivers/edac/i7core_edac.c b/drivers/edac/i7core_edac.c
1235 +index e0187d1..0fd5b85 100644
1236 +--- a/drivers/edac/i7core_edac.c
1237 ++++ b/drivers/edac/i7core_edac.c
1238 +@@ -1140,6 +1140,7 @@ static struct mcidev_sysfs_attribute i7core_udimm_counters_attrs[] = {
1239 + ATTR_COUNTER(0),
1240 + ATTR_COUNTER(1),
1241 + ATTR_COUNTER(2),
1242 ++ { .attr = { .name = NULL } }
1243 + };
1244 +
1245 + static struct mcidev_sysfs_group i7core_udimm_counters = {
1246 +diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c
1247 +index 9f627e7..4b521dd 100644
1248 +--- a/drivers/firewire/ohci.c
1249 ++++ b/drivers/firewire/ohci.c
1250 +@@ -2439,7 +2439,7 @@ static int __devinit pci_probe(struct pci_dev *dev,
1251 + const struct pci_device_id *ent)
1252 + {
1253 + struct fw_ohci *ohci;
1254 +- u32 bus_options, max_receive, link_speed, version, link_enh;
1255 ++ u32 bus_options, max_receive, link_speed, version;
1256 + u64 guid;
1257 + int i, err, n_ir, n_it;
1258 + size_t size;
1259 +@@ -2492,23 +2492,6 @@ static int __devinit pci_probe(struct pci_dev *dev,
1260 + if (param_quirks)
1261 + ohci->quirks = param_quirks;
1262 +
1263 +- /* TI OHCI-Lynx and compatible: set recommended configuration bits. */
1264 +- if (dev->vendor == PCI_VENDOR_ID_TI) {
1265 +- pci_read_config_dword(dev, PCI_CFG_TI_LinkEnh, &link_enh);
1266 +-
1267 +- /* adjust latency of ATx FIFO: use 1.7 KB threshold */
1268 +- link_enh &= ~TI_LinkEnh_atx_thresh_mask;
1269 +- link_enh |= TI_LinkEnh_atx_thresh_1_7K;
1270 +-
1271 +- /* use priority arbitration for asynchronous responses */
1272 +- link_enh |= TI_LinkEnh_enab_unfair;
1273 +-
1274 +- /* required for aPhyEnhanceEnable to work */
1275 +- link_enh |= TI_LinkEnh_enab_accel;
1276 +-
1277 +- pci_write_config_dword(dev, PCI_CFG_TI_LinkEnh, link_enh);
1278 +- }
1279 +-
1280 + ar_context_init(&ohci->ar_request_ctx, ohci,
1281 + OHCI1394_AsReqRcvContextControlSet);
1282 +
1283 +diff --git a/drivers/firewire/ohci.h b/drivers/firewire/ohci.h
1284 +index 3bc9a5d..d49e146 100644
1285 +--- a/drivers/firewire/ohci.h
1286 ++++ b/drivers/firewire/ohci.h
1287 +@@ -154,12 +154,4 @@
1288 +
1289 + #define OHCI1394_phy_tcode 0xe
1290 +
1291 +-/* TI extensions */
1292 +-
1293 +-#define PCI_CFG_TI_LinkEnh 0xf4
1294 +-#define TI_LinkEnh_enab_accel 0x00000002
1295 +-#define TI_LinkEnh_enab_unfair 0x00000080
1296 +-#define TI_LinkEnh_atx_thresh_mask 0x00003000
1297 +-#define TI_LinkEnh_atx_thresh_1_7K 0x00001000
1298 +-
1299 + #endif /* _FIREWIRE_OHCI_H */
1300 +diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
1301 +index 33dad3f..f353cdf 100644
1302 +--- a/drivers/gpu/drm/drm_gem.c
1303 ++++ b/drivers/gpu/drm/drm_gem.c
1304 +@@ -451,28 +451,6 @@ drm_gem_object_free(struct kref *kref)
1305 + }
1306 + EXPORT_SYMBOL(drm_gem_object_free);
1307 +
1308 +-/**
1309 +- * Called after the last reference to the object has been lost.
1310 +- * Must be called without holding struct_mutex
1311 +- *
1312 +- * Frees the object
1313 +- */
1314 +-void
1315 +-drm_gem_object_free_unlocked(struct kref *kref)
1316 +-{
1317 +- struct drm_gem_object *obj = (struct drm_gem_object *) kref;
1318 +- struct drm_device *dev = obj->dev;
1319 +-
1320 +- if (dev->driver->gem_free_object_unlocked != NULL)
1321 +- dev->driver->gem_free_object_unlocked(obj);
1322 +- else if (dev->driver->gem_free_object != NULL) {
1323 +- mutex_lock(&dev->struct_mutex);
1324 +- dev->driver->gem_free_object(obj);
1325 +- mutex_unlock(&dev->struct_mutex);
1326 +- }
1327 +-}
1328 +-EXPORT_SYMBOL(drm_gem_object_free_unlocked);
1329 +-
1330 + static void drm_gem_object_ref_bug(struct kref *list_kref)
1331 + {
1332 + BUG();
1333 +@@ -517,6 +495,10 @@ void drm_gem_vm_open(struct vm_area_struct *vma)
1334 + struct drm_gem_object *obj = vma->vm_private_data;
1335 +
1336 + drm_gem_object_reference(obj);
1337 ++
1338 ++ mutex_lock(&obj->dev->struct_mutex);
1339 ++ drm_vm_open_locked(vma);
1340 ++ mutex_unlock(&obj->dev->struct_mutex);
1341 + }
1342 + EXPORT_SYMBOL(drm_gem_vm_open);
1343 +
1344 +@@ -524,7 +506,10 @@ void drm_gem_vm_close(struct vm_area_struct *vma)
1345 + {
1346 + struct drm_gem_object *obj = vma->vm_private_data;
1347 +
1348 +- drm_gem_object_unreference_unlocked(obj);
1349 ++ mutex_lock(&obj->dev->struct_mutex);
1350 ++ drm_vm_close_locked(vma);
1351 ++ drm_gem_object_unreference(obj);
1352 ++ mutex_unlock(&obj->dev->struct_mutex);
1353 + }
1354 + EXPORT_SYMBOL(drm_gem_vm_close);
1355 +
1356 +diff --git a/drivers/gpu/drm/drm_vm.c b/drivers/gpu/drm/drm_vm.c
1357 +index c3b13fb..3813e84 100644
1358 +--- a/drivers/gpu/drm/drm_vm.c
1359 ++++ b/drivers/gpu/drm/drm_vm.c
1360 +@@ -433,15 +433,7 @@ static void drm_vm_open(struct vm_area_struct *vma)
1361 + mutex_unlock(&dev->struct_mutex);
1362 + }
1363 +
1364 +-/**
1365 +- * \c close method for all virtual memory types.
1366 +- *
1367 +- * \param vma virtual memory area.
1368 +- *
1369 +- * Search the \p vma private data entry in drm_device::vmalist, unlink it, and
1370 +- * free it.
1371 +- */
1372 +-static void drm_vm_close(struct vm_area_struct *vma)
1373 ++void drm_vm_close_locked(struct vm_area_struct *vma)
1374 + {
1375 + struct drm_file *priv = vma->vm_file->private_data;
1376 + struct drm_device *dev = priv->minor->dev;
1377 +@@ -451,7 +443,6 @@ static void drm_vm_close(struct vm_area_struct *vma)
1378 + vma->vm_start, vma->vm_end - vma->vm_start);
1379 + atomic_dec(&dev->vma_count);
1380 +
1381 +- mutex_lock(&dev->struct_mutex);
1382 + list_for_each_entry_safe(pt, temp, &dev->vmalist, head) {
1383 + if (pt->vma == vma) {
1384 + list_del(&pt->head);
1385 +@@ -459,6 +450,23 @@ static void drm_vm_close(struct vm_area_struct *vma)
1386 + break;
1387 + }
1388 + }
1389 ++}
1390 ++
1391 ++/**
1392 ++ * \c close method for all virtual memory types.
1393 ++ *
1394 ++ * \param vma virtual memory area.
1395 ++ *
1396 ++ * Search the \p vma private data entry in drm_device::vmalist, unlink it, and
1397 ++ * free it.
1398 ++ */
1399 ++static void drm_vm_close(struct vm_area_struct *vma)
1400 ++{
1401 ++ struct drm_file *priv = vma->vm_file->private_data;
1402 ++ struct drm_device *dev = priv->minor->dev;
1403 ++
1404 ++ mutex_lock(&dev->struct_mutex);
1405 ++ drm_vm_close_locked(vma);
1406 + mutex_unlock(&dev->struct_mutex);
1407 + }
1408 +
1409 +diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
1410 +index ce8ff0e..d396fe5 100644
1411 +--- a/drivers/gpu/drm/i915/i915_dma.c
1412 ++++ b/drivers/gpu/drm/i915/i915_dma.c
1413 +@@ -1778,9 +1778,9 @@ unsigned long i915_chipset_val(struct drm_i915_private *dev_priv)
1414 + }
1415 + }
1416 +
1417 +- div_u64(diff, diff1);
1418 ++ diff = div_u64(diff, diff1);
1419 + ret = ((m * diff) + c);
1420 +- div_u64(ret, 10);
1421 ++ ret = div_u64(ret, 10);
1422 +
1423 + dev_priv->last_count1 = total_count;
1424 + dev_priv->last_time1 = now;
1425 +@@ -1849,7 +1849,7 @@ void i915_update_gfx_val(struct drm_i915_private *dev_priv)
1426 +
1427 + /* More magic constants... */
1428 + diff = diff * 1181;
1429 +- div_u64(diff, diffms * 10);
1430 ++ diff = div_u64(diff, diffms * 10);
1431 + dev_priv->gfx_power = diff;
1432 + }
1433 +
1434 +@@ -2222,6 +2222,9 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
1435 + dev_priv->mchdev_lock = &mchdev_lock;
1436 + spin_unlock(&mchdev_lock);
1437 +
1438 ++ /* XXX Prevent module unload due to memory corruption bugs. */
1439 ++ __module_get(THIS_MODULE);
1440 ++
1441 + return 0;
1442 +
1443 + out_workqueue_free:
1444 +diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
1445 +index 06f8238..dc5d2ef 100644
1446 +--- a/drivers/gpu/drm/i915/i915_gem.c
1447 ++++ b/drivers/gpu/drm/i915/i915_gem.c
1448 +@@ -34,6 +34,7 @@
1449 + #include <linux/slab.h>
1450 + #include <linux/swap.h>
1451 + #include <linux/pci.h>
1452 ++#include <linux/intel-gtt.h>
1453 +
1454 + static void i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj);
1455 + static void i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj);
1456 +@@ -465,8 +466,15 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data,
1457 + */
1458 + if (args->offset > obj->size || args->size > obj->size ||
1459 + args->offset + args->size > obj->size) {
1460 +- drm_gem_object_unreference_unlocked(obj);
1461 +- return -EINVAL;
1462 ++ ret = -EINVAL;
1463 ++ goto err;
1464 ++ }
1465 ++
1466 ++ if (!access_ok(VERIFY_WRITE,
1467 ++ (char __user *)(uintptr_t)args->data_ptr,
1468 ++ args->size)) {
1469 ++ ret = -EFAULT;
1470 ++ goto err;
1471 + }
1472 +
1473 + if (i915_gem_object_needs_bit17_swizzle(obj)) {
1474 +@@ -478,8 +486,8 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data,
1475 + file_priv);
1476 + }
1477 +
1478 ++err:
1479 + drm_gem_object_unreference_unlocked(obj);
1480 +-
1481 + return ret;
1482 + }
1483 +
1484 +@@ -568,8 +576,6 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj,
1485 +
1486 + user_data = (char __user *) (uintptr_t) args->data_ptr;
1487 + remain = args->size;
1488 +- if (!access_ok(VERIFY_READ, user_data, remain))
1489 +- return -EFAULT;
1490 +
1491 +
1492 + mutex_lock(&dev->struct_mutex);
1493 +@@ -928,8 +934,15 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
1494 + */
1495 + if (args->offset > obj->size || args->size > obj->size ||
1496 + args->offset + args->size > obj->size) {
1497 +- drm_gem_object_unreference_unlocked(obj);
1498 +- return -EINVAL;
1499 ++ ret = -EINVAL;
1500 ++ goto err;
1501 ++ }
1502 ++
1503 ++ if (!access_ok(VERIFY_READ,
1504 ++ (char __user *)(uintptr_t)args->data_ptr,
1505 ++ args->size)) {
1506 ++ ret = -EFAULT;
1507 ++ goto err;
1508 + }
1509 +
1510 + /* We can only do the GTT pwrite on untiled buffers, as otherwise
1511 +@@ -963,8 +976,8 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
1512 + DRM_INFO("pwrite failed %d\n", ret);
1513 + #endif
1514 +
1515 ++err:
1516 + drm_gem_object_unreference_unlocked(obj);
1517 +-
1518 + return ret;
1519 + }
1520 +
1521 +@@ -3367,6 +3380,8 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
1522 + (int) reloc->offset,
1523 + reloc->read_domains,
1524 + reloc->write_domain);
1525 ++ drm_gem_object_unreference(target_obj);
1526 ++ i915_gem_object_unpin(obj);
1527 + return -EINVAL;
1528 + }
1529 + if (reloc->write_domain & I915_GEM_DOMAIN_CPU ||
1530 +diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
1531 +index d448c25..82cc369 100644
1532 +--- a/drivers/gpu/drm/radeon/evergreen.c
1533 ++++ b/drivers/gpu/drm/radeon/evergreen.c
1534 +@@ -1120,7 +1120,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
1535 +
1536 + WREG32(RCU_IND_INDEX, 0x203);
1537 + efuse_straps_3 = RREG32(RCU_IND_DATA);
1538 +- efuse_box_bit_127_124 = (u8)(efuse_straps_3 & 0xF0000000) >> 28;
1539 ++ efuse_box_bit_127_124 = (u8)((efuse_straps_3 & 0xF0000000) >> 28);
1540 +
1541 + switch(efuse_box_bit_127_124) {
1542 + case 0x0:
1543 +@@ -1389,6 +1389,8 @@ int evergreen_mc_init(struct radeon_device *rdev)
1544 + rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024;
1545 + rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024;
1546 + rdev->mc.visible_vram_size = rdev->mc.aper_size;
1547 ++ /* limit it to the aperture size for now as there is no blit support in 2.6.35/36*/
1548 ++ rdev->mc.real_vram_size = rdev->mc.visible_vram_size;
1549 + r600_vram_gtt_location(rdev, &rdev->mc);
1550 + radeon_update_bandwidth_info(rdev);
1551 +
1552 +diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
1553 +index 952cc78..eddeff3 100644
1554 +--- a/drivers/gpu/drm/radeon/r600.c
1555 ++++ b/drivers/gpu/drm/radeon/r600.c
1556 +@@ -3522,7 +3522,8 @@ void r600_ioctl_wait_idle(struct radeon_device *rdev, struct radeon_bo *bo)
1557 + /* r7xx hw bug. write to HDP_DEBUG1 followed by fb read
1558 + * rather than write to HDP_REG_COHERENCY_FLUSH_CNTL
1559 + */
1560 +- if ((rdev->family >= CHIP_RV770) && (rdev->family <= CHIP_RV740)) {
1561 ++ if ((rdev->family >= CHIP_RV770) && (rdev->family <= CHIP_RV740) &&
1562 ++ rdev->vram_scratch.ptr) {
1563 + void __iomem *ptr = (void *)rdev->vram_scratch.ptr;
1564 + u32 tmp;
1565 +
1566 +diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
1567 +index 6bfef51..d7c18e5 100644
1568 +--- a/drivers/gpu/drm/radeon/radeon_atombios.c
1569 ++++ b/drivers/gpu/drm/radeon/radeon_atombios.c
1570 +@@ -226,6 +226,15 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev,
1571 + *connector_type = DRM_MODE_CONNECTOR_DVID;
1572 + }
1573 +
1574 ++ /* MSI K9A2GM V2/V3 board has no HDMI or DVI */
1575 ++ if ((dev->pdev->device == 0x796e) &&
1576 ++ (dev->pdev->subsystem_vendor == 0x1462) &&
1577 ++ (dev->pdev->subsystem_device == 0x7302)) {
1578 ++ if ((supported_device == ATOM_DEVICE_DFP2_SUPPORT) ||
1579 ++ (supported_device == ATOM_DEVICE_DFP3_SUPPORT))
1580 ++ return false;
1581 ++ }
1582 ++
1583 + /* a-bit f-i90hd - ciaranm on #radeonhd - this board has no DVI */
1584 + if ((dev->pdev->device == 0x7941) &&
1585 + (dev->pdev->subsystem_vendor == 0x147b) &&
1586 +diff --git a/drivers/gpu/drm/radeon/radeon_cursor.c b/drivers/gpu/drm/radeon/radeon_cursor.c
1587 +index 4eb67c0..f7ce8e1 100644
1588 +--- a/drivers/gpu/drm/radeon/radeon_cursor.c
1589 ++++ b/drivers/gpu/drm/radeon/radeon_cursor.c
1590 +@@ -203,6 +203,7 @@ int radeon_crtc_cursor_move(struct drm_crtc *crtc,
1591 + struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
1592 + struct radeon_device *rdev = crtc->dev->dev_private;
1593 + int xorigin = 0, yorigin = 0;
1594 ++ int w = radeon_crtc->cursor_width;
1595 +
1596 + if (x < 0)
1597 + xorigin = -x + 1;
1598 +@@ -213,22 +214,7 @@ int radeon_crtc_cursor_move(struct drm_crtc *crtc,
1599 + if (yorigin >= CURSOR_HEIGHT)
1600 + yorigin = CURSOR_HEIGHT - 1;
1601 +
1602 +- radeon_lock_cursor(crtc, true);
1603 +- if (ASIC_IS_DCE4(rdev)) {
1604 +- /* cursors are offset into the total surface */
1605 +- x += crtc->x;
1606 +- y += crtc->y;
1607 +- DRM_DEBUG("x %d y %d c->x %d c->y %d\n", x, y, crtc->x, crtc->y);
1608 +-
1609 +- /* XXX: check if evergreen has the same issues as avivo chips */
1610 +- WREG32(EVERGREEN_CUR_POSITION + radeon_crtc->crtc_offset,
1611 +- ((xorigin ? 0 : x) << 16) |
1612 +- (yorigin ? 0 : y));
1613 +- WREG32(EVERGREEN_CUR_HOT_SPOT + radeon_crtc->crtc_offset, (xorigin << 16) | yorigin);
1614 +- WREG32(EVERGREEN_CUR_SIZE + radeon_crtc->crtc_offset,
1615 +- ((radeon_crtc->cursor_width - 1) << 16) | (radeon_crtc->cursor_height - 1));
1616 +- } else if (ASIC_IS_AVIVO(rdev)) {
1617 +- int w = radeon_crtc->cursor_width;
1618 ++ if (ASIC_IS_AVIVO(rdev)) {
1619 + int i = 0;
1620 + struct drm_crtc *crtc_p;
1621 +
1622 +@@ -260,7 +246,17 @@ int radeon_crtc_cursor_move(struct drm_crtc *crtc,
1623 + if (w <= 0)
1624 + w = 1;
1625 + }
1626 ++ }
1627 +
1628 ++ radeon_lock_cursor(crtc, true);
1629 ++ if (ASIC_IS_DCE4(rdev)) {
1630 ++ WREG32(EVERGREEN_CUR_POSITION + radeon_crtc->crtc_offset,
1631 ++ ((xorigin ? 0 : x) << 16) |
1632 ++ (yorigin ? 0 : y));
1633 ++ WREG32(EVERGREEN_CUR_HOT_SPOT + radeon_crtc->crtc_offset, (xorigin << 16) | yorigin);
1634 ++ WREG32(EVERGREEN_CUR_SIZE + radeon_crtc->crtc_offset,
1635 ++ ((w - 1) << 16) | (radeon_crtc->cursor_height - 1));
1636 ++ } else if (ASIC_IS_AVIVO(rdev)) {
1637 + WREG32(AVIVO_D1CUR_POSITION + radeon_crtc->crtc_offset,
1638 + ((xorigin ? 0 : x) << 16) |
1639 + (yorigin ? 0 : y));
1640 +diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
1641 +index 8154cdf..2d95376 100644
1642 +--- a/drivers/gpu/drm/radeon/radeon_display.c
1643 ++++ b/drivers/gpu/drm/radeon/radeon_display.c
1644 +@@ -349,6 +349,8 @@ static void radeon_print_display_setup(struct drm_device *dev)
1645 + DRM_INFO(" DFP4: %s\n", encoder_names[radeon_encoder->encoder_id]);
1646 + if (devices & ATOM_DEVICE_DFP5_SUPPORT)
1647 + DRM_INFO(" DFP5: %s\n", encoder_names[radeon_encoder->encoder_id]);
1648 ++ if (devices & ATOM_DEVICE_DFP6_SUPPORT)
1649 ++ DRM_INFO(" DFP6: %s\n", encoder_names[radeon_encoder->encoder_id]);
1650 + if (devices & ATOM_DEVICE_TV1_SUPPORT)
1651 + DRM_INFO(" TV1: %s\n", encoder_names[radeon_encoder->encoder_id]);
1652 + if (devices & ATOM_DEVICE_CV_SUPPORT)
1653 +diff --git a/drivers/hid/hidraw.c b/drivers/hid/hidraw.c
1654 +index 3ccd478..7dca2fa 100644
1655 +--- a/drivers/hid/hidraw.c
1656 ++++ b/drivers/hid/hidraw.c
1657 +@@ -111,6 +111,12 @@ static ssize_t hidraw_write(struct file *file, const char __user *buffer, size_t
1658 + int ret = 0;
1659 +
1660 + mutex_lock(&minors_lock);
1661 ++
1662 ++ if (!hidraw_table[minor]) {
1663 ++ ret = -ENODEV;
1664 ++ goto out;
1665 ++ }
1666 ++
1667 + dev = hidraw_table[minor]->hid;
1668 +
1669 + if (!dev->hid_output_raw_report) {
1670 +@@ -246,6 +252,10 @@ static long hidraw_ioctl(struct file *file, unsigned int cmd,
1671 +
1672 + mutex_lock(&minors_lock);
1673 + dev = hidraw_table[minor];
1674 ++ if (!dev) {
1675 ++ ret = -ENODEV;
1676 ++ goto out;
1677 ++ }
1678 +
1679 + switch (cmd) {
1680 + case HIDIOCGRDESCSIZE:
1681 +@@ -319,6 +329,7 @@ static long hidraw_ioctl(struct file *file, unsigned int cmd,
1682 +
1683 + ret = -ENOTTY;
1684 + }
1685 ++out:
1686 + mutex_unlock(&minors_lock);
1687 + return ret;
1688 + }
1689 +diff --git a/drivers/hwmon/lis3lv02d.c b/drivers/hwmon/lis3lv02d.c
1690 +index 6138f03..fc591ae 100644
1691 +--- a/drivers/hwmon/lis3lv02d.c
1692 ++++ b/drivers/hwmon/lis3lv02d.c
1693 +@@ -277,7 +277,7 @@ static irqreturn_t lis302dl_interrupt(int irq, void *dummy)
1694 + wake_up_interruptible(&lis3_dev.misc_wait);
1695 + kill_fasync(&lis3_dev.async_queue, SIGIO, POLL_IN);
1696 + out:
1697 +- if (lis3_dev.whoami == WAI_8B && lis3_dev.idev &&
1698 ++ if (lis3_dev.pdata && lis3_dev.whoami == WAI_8B && lis3_dev.idev &&
1699 + lis3_dev.idev->input->users)
1700 + return IRQ_WAKE_THREAD;
1701 + return IRQ_HANDLED;
1702 +@@ -718,7 +718,7 @@ int lis3lv02d_init_device(struct lis3lv02d *dev)
1703 + * io-apic is not configurable (and generates a warning) but I keep it
1704 + * in case of support for other hardware.
1705 + */
1706 +- if (dev->whoami == WAI_8B)
1707 ++ if (dev->pdata && dev->whoami == WAI_8B)
1708 + thread_fn = lis302dl_interrupt_thread1_8b;
1709 + else
1710 + thread_fn = NULL;
1711 +diff --git a/drivers/i2c/busses/i2c-pca-isa.c b/drivers/i2c/busses/i2c-pca-isa.c
1712 +index bbd7760..29933f8 100644
1713 +--- a/drivers/i2c/busses/i2c-pca-isa.c
1714 ++++ b/drivers/i2c/busses/i2c-pca-isa.c
1715 +@@ -71,8 +71,8 @@ static int pca_isa_readbyte(void *pd, int reg)
1716 +
1717 + static int pca_isa_waitforcompletion(void *pd)
1718 + {
1719 +- long ret = ~0;
1720 + unsigned long timeout;
1721 ++ long ret;
1722 +
1723 + if (irq > -1) {
1724 + ret = wait_event_timeout(pca_wait,
1725 +@@ -81,11 +81,15 @@ static int pca_isa_waitforcompletion(void *pd)
1726 + } else {
1727 + /* Do polling */
1728 + timeout = jiffies + pca_isa_ops.timeout;
1729 +- while (((pca_isa_readbyte(pd, I2C_PCA_CON)
1730 +- & I2C_PCA_CON_SI) == 0)
1731 +- && (ret = time_before(jiffies, timeout)))
1732 ++ do {
1733 ++ ret = time_before(jiffies, timeout);
1734 ++ if (pca_isa_readbyte(pd, I2C_PCA_CON)
1735 ++ & I2C_PCA_CON_SI)
1736 ++ break;
1737 + udelay(100);
1738 ++ } while (ret);
1739 + }
1740 ++
1741 + return ret > 0;
1742 + }
1743 +
1744 +diff --git a/drivers/i2c/busses/i2c-pca-platform.c b/drivers/i2c/busses/i2c-pca-platform.c
1745 +index ef5c784..5f6d7f8 100644
1746 +--- a/drivers/i2c/busses/i2c-pca-platform.c
1747 ++++ b/drivers/i2c/busses/i2c-pca-platform.c
1748 +@@ -80,8 +80,8 @@ static void i2c_pca_pf_writebyte32(void *pd, int reg, int val)
1749 + static int i2c_pca_pf_waitforcompletion(void *pd)
1750 + {
1751 + struct i2c_pca_pf_data *i2c = pd;
1752 +- long ret = ~0;
1753 + unsigned long timeout;
1754 ++ long ret;
1755 +
1756 + if (i2c->irq) {
1757 + ret = wait_event_timeout(i2c->wait,
1758 +@@ -90,10 +90,13 @@ static int i2c_pca_pf_waitforcompletion(void *pd)
1759 + } else {
1760 + /* Do polling */
1761 + timeout = jiffies + i2c->adap.timeout;
1762 +- while (((i2c->algo_data.read_byte(i2c, I2C_PCA_CON)
1763 +- & I2C_PCA_CON_SI) == 0)
1764 +- && (ret = time_before(jiffies, timeout)))
1765 ++ do {
1766 ++ ret = time_before(jiffies, timeout);
1767 ++ if (i2c->algo_data.read_byte(i2c, I2C_PCA_CON)
1768 ++ & I2C_PCA_CON_SI)
1769 ++ break;
1770 + udelay(100);
1771 ++ } while (ret);
1772 + }
1773 +
1774 + return ret > 0;
1775 +diff --git a/drivers/infiniband/hw/cxgb3/iwch_cm.c b/drivers/infiniband/hw/cxgb3/iwch_cm.c
1776 +index ebfb117..9c05ddd 100644
1777 +--- a/drivers/infiniband/hw/cxgb3/iwch_cm.c
1778 ++++ b/drivers/infiniband/hw/cxgb3/iwch_cm.c
1779 +@@ -463,7 +463,8 @@ static int send_connect(struct iwch_ep *ep)
1780 + V_MSS_IDX(mtu_idx) |
1781 + V_L2T_IDX(ep->l2t->idx) | V_TX_CHANNEL(ep->l2t->smt_idx);
1782 + opt0l = V_TOS((ep->tos >> 2) & M_TOS) | V_RCV_BUFSIZ(rcv_win>>10);
1783 +- opt2 = V_FLAVORS_VALID(1) | V_CONG_CONTROL_FLAVOR(cong_flavor);
1784 ++ opt2 = F_RX_COALESCE_VALID | V_RX_COALESCE(0) | V_FLAVORS_VALID(1) |
1785 ++ V_CONG_CONTROL_FLAVOR(cong_flavor);
1786 + skb->priority = CPL_PRIORITY_SETUP;
1787 + set_arp_failure_handler(skb, act_open_req_arp_failure);
1788 +
1789 +@@ -1280,7 +1281,8 @@ static void accept_cr(struct iwch_ep *ep, __be32 peer_ip, struct sk_buff *skb)
1790 + V_MSS_IDX(mtu_idx) |
1791 + V_L2T_IDX(ep->l2t->idx) | V_TX_CHANNEL(ep->l2t->smt_idx);
1792 + opt0l = V_TOS((ep->tos >> 2) & M_TOS) | V_RCV_BUFSIZ(rcv_win>>10);
1793 +- opt2 = V_FLAVORS_VALID(1) | V_CONG_CONTROL_FLAVOR(cong_flavor);
1794 ++ opt2 = F_RX_COALESCE_VALID | V_RX_COALESCE(0) | V_FLAVORS_VALID(1) |
1795 ++ V_CONG_CONTROL_FLAVOR(cong_flavor);
1796 +
1797 + rpl = cplhdr(skb);
1798 + rpl->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
1799 +diff --git a/drivers/input/joydev.c b/drivers/input/joydev.c
1800 +index 34157bb..f5320b5 100644
1801 +--- a/drivers/input/joydev.c
1802 ++++ b/drivers/input/joydev.c
1803 +@@ -483,6 +483,9 @@ static int joydev_handle_JSIOCSAXMAP(struct joydev *joydev,
1804 +
1805 + memcpy(joydev->abspam, abspam, len);
1806 +
1807 ++ for (i = 0; i < joydev->nabs; i++)
1808 ++ joydev->absmap[joydev->abspam[i]] = i;
1809 ++
1810 + out:
1811 + kfree(abspam);
1812 + return retval;
1813 +diff --git a/drivers/input/tablet/wacom_wac.c b/drivers/input/tablet/wacom_wac.c
1814 +index 415f630..8a6a2a1 100644
1815 +--- a/drivers/input/tablet/wacom_wac.c
1816 ++++ b/drivers/input/tablet/wacom_wac.c
1817 +@@ -409,8 +409,10 @@ static void wacom_intuos_general(struct wacom_wac *wacom)
1818 + /* general pen packet */
1819 + if ((data[1] & 0xb8) == 0xa0) {
1820 + t = (data[6] << 2) | ((data[7] >> 6) & 3);
1821 +- if (features->type >= INTUOS4S && features->type <= INTUOS4L)
1822 ++ if ((features->type >= INTUOS4S && features->type <= INTUOS4L) ||
1823 ++ features->type == WACOM_21UX2) {
1824 + t = (t << 1) | (data[1] & 1);
1825 ++ }
1826 + input_report_abs(input, ABS_PRESSURE, t);
1827 + input_report_abs(input, ABS_TILT_X,
1828 + ((data[7] << 1) & 0x7e) | (data[8] >> 7));
1829 +diff --git a/drivers/media/IR/ir-keytable.c b/drivers/media/IR/ir-keytable.c
1830 +index 94a8577..78eb8cb 100644
1831 +--- a/drivers/media/IR/ir-keytable.c
1832 ++++ b/drivers/media/IR/ir-keytable.c
1833 +@@ -319,7 +319,7 @@ static void ir_timer_keyup(unsigned long cookie)
1834 + * a keyup event might follow immediately after the keydown.
1835 + */
1836 + spin_lock_irqsave(&ir->keylock, flags);
1837 +- if (time_is_after_eq_jiffies(ir->keyup_jiffies))
1838 ++ if (time_is_before_eq_jiffies(ir->keyup_jiffies))
1839 + ir_keyup(ir);
1840 + spin_unlock_irqrestore(&ir->keylock, flags);
1841 + }
1842 +diff --git a/drivers/media/dvb/dvb-usb/dib0700_devices.c b/drivers/media/dvb/dvb-usb/dib0700_devices.c
1843 +index 800800a..33af683 100644
1844 +--- a/drivers/media/dvb/dvb-usb/dib0700_devices.c
1845 ++++ b/drivers/media/dvb/dvb-usb/dib0700_devices.c
1846 +@@ -1240,6 +1240,57 @@ static int stk7070p_frontend_attach(struct dvb_usb_adapter *adap)
1847 + return adap->fe == NULL ? -ENODEV : 0;
1848 + }
1849 +
1850 ++/* STK7770P */
1851 ++static struct dib7000p_config dib7770p_dib7000p_config = {
1852 ++ .output_mpeg2_in_188_bytes = 1,
1853 ++
1854 ++ .agc_config_count = 1,
1855 ++ .agc = &dib7070_agc_config,
1856 ++ .bw = &dib7070_bw_config_12_mhz,
1857 ++ .tuner_is_baseband = 1,
1858 ++ .spur_protect = 1,
1859 ++
1860 ++ .gpio_dir = DIB7000P_GPIO_DEFAULT_DIRECTIONS,
1861 ++ .gpio_val = DIB7000P_GPIO_DEFAULT_VALUES,
1862 ++ .gpio_pwm_pos = DIB7000P_GPIO_DEFAULT_PWM_POS,
1863 ++
1864 ++ .hostbus_diversity = 1,
1865 ++ .enable_current_mirror = 1,
1866 ++};
1867 ++
1868 ++static int stk7770p_frontend_attach(struct dvb_usb_adapter *adap)
1869 ++{
1870 ++ struct usb_device_descriptor *p = &adap->dev->udev->descriptor;
1871 ++ if (p->idVendor == cpu_to_le16(USB_VID_PINNACLE) &&
1872 ++ p->idProduct == cpu_to_le16(USB_PID_PINNACLE_PCTV72E))
1873 ++ dib0700_set_gpio(adap->dev, GPIO6, GPIO_OUT, 0);
1874 ++ else
1875 ++ dib0700_set_gpio(adap->dev, GPIO6, GPIO_OUT, 1);
1876 ++ msleep(10);
1877 ++ dib0700_set_gpio(adap->dev, GPIO9, GPIO_OUT, 1);
1878 ++ dib0700_set_gpio(adap->dev, GPIO4, GPIO_OUT, 1);
1879 ++ dib0700_set_gpio(adap->dev, GPIO7, GPIO_OUT, 1);
1880 ++ dib0700_set_gpio(adap->dev, GPIO10, GPIO_OUT, 0);
1881 ++
1882 ++ dib0700_ctrl_clock(adap->dev, 72, 1);
1883 ++
1884 ++ msleep(10);
1885 ++ dib0700_set_gpio(adap->dev, GPIO10, GPIO_OUT, 1);
1886 ++ msleep(10);
1887 ++ dib0700_set_gpio(adap->dev, GPIO0, GPIO_OUT, 1);
1888 ++
1889 ++ if (dib7000p_i2c_enumeration(&adap->dev->i2c_adap, 1, 18,
1890 ++ &dib7770p_dib7000p_config) != 0) {
1891 ++ err("%s: dib7000p_i2c_enumeration failed. Cannot continue\n",
1892 ++ __func__);
1893 ++ return -ENODEV;
1894 ++ }
1895 ++
1896 ++ adap->fe = dvb_attach(dib7000p_attach, &adap->dev->i2c_adap, 0x80,
1897 ++ &dib7770p_dib7000p_config);
1898 ++ return adap->fe == NULL ? -ENODEV : 0;
1899 ++}
1900 ++
1901 + /* DIB807x generic */
1902 + static struct dibx000_agc_config dib807x_agc_config[2] = {
1903 + {
1904 +@@ -2081,7 +2132,7 @@ struct usb_device_id dib0700_usb_id_table[] = {
1905 + /* 60 */{ USB_DEVICE(USB_VID_TERRATEC, USB_PID_TERRATEC_CINERGY_T_XXS_2) },
1906 + { USB_DEVICE(USB_VID_DIBCOM, USB_PID_DIBCOM_STK807XPVR) },
1907 + { USB_DEVICE(USB_VID_DIBCOM, USB_PID_DIBCOM_STK807XP) },
1908 +- { USB_DEVICE(USB_VID_PIXELVIEW, USB_PID_PIXELVIEW_SBTVD) },
1909 ++ { USB_DEVICE_VER(USB_VID_PIXELVIEW, USB_PID_PIXELVIEW_SBTVD, 0x000, 0x3f00) },
1910 + { USB_DEVICE(USB_VID_EVOLUTEPC, USB_PID_TVWAY_PLUS) },
1911 + /* 65 */{ USB_DEVICE(USB_VID_PINNACLE, USB_PID_PINNACLE_PCTV73ESE) },
1912 + { USB_DEVICE(USB_VID_PINNACLE, USB_PID_PINNACLE_PCTV282E) },
1913 +@@ -2592,7 +2643,7 @@ struct dvb_usb_device_properties dib0700_devices[] = {
1914 + .pid_filter_count = 32,
1915 + .pid_filter = stk70x0p_pid_filter,
1916 + .pid_filter_ctrl = stk70x0p_pid_filter_ctrl,
1917 +- .frontend_attach = stk7070p_frontend_attach,
1918 ++ .frontend_attach = stk7770p_frontend_attach,
1919 + .tuner_attach = dib7770p_tuner_attach,
1920 +
1921 + DIB0700_DEFAULT_STREAMING_CONFIG(0x02),
1922 +diff --git a/drivers/media/dvb/frontends/dib7000p.c b/drivers/media/dvb/frontends/dib7000p.c
1923 +index 2e28b97..73f59ab 100644
1924 +--- a/drivers/media/dvb/frontends/dib7000p.c
1925 ++++ b/drivers/media/dvb/frontends/dib7000p.c
1926 +@@ -260,6 +260,8 @@ static void dib7000p_set_adc_state(struct dib7000p_state *state, enum dibx000_ad
1927 +
1928 + // dprintk( "908: %x, 909: %x\n", reg_908, reg_909);
1929 +
1930 ++ reg_908 |= (state->cfg.enable_current_mirror & 1) << 7;
1931 ++
1932 + dib7000p_write_word(state, 908, reg_908);
1933 + dib7000p_write_word(state, 909, reg_909);
1934 + }
1935 +diff --git a/drivers/media/dvb/frontends/dib7000p.h b/drivers/media/dvb/frontends/dib7000p.h
1936 +index 805dd13..04a7449 100644
1937 +--- a/drivers/media/dvb/frontends/dib7000p.h
1938 ++++ b/drivers/media/dvb/frontends/dib7000p.h
1939 +@@ -33,6 +33,9 @@ struct dib7000p_config {
1940 + int (*agc_control) (struct dvb_frontend *, u8 before);
1941 +
1942 + u8 output_mode;
1943 ++
1944 ++ u8 enable_current_mirror : 1;
1945 ++
1946 + };
1947 +
1948 + #define DEFAULT_DIB7000P_I2C_ADDRESS 18
1949 +diff --git a/drivers/media/video/cx231xx/Makefile b/drivers/media/video/cx231xx/Makefile
1950 +index 755dd0c..6f2b573 100644
1951 +--- a/drivers/media/video/cx231xx/Makefile
1952 ++++ b/drivers/media/video/cx231xx/Makefile
1953 +@@ -11,4 +11,5 @@ EXTRA_CFLAGS += -Idrivers/media/video
1954 + EXTRA_CFLAGS += -Idrivers/media/common/tuners
1955 + EXTRA_CFLAGS += -Idrivers/media/dvb/dvb-core
1956 + EXTRA_CFLAGS += -Idrivers/media/dvb/frontends
1957 ++EXTRA_CFLAGS += -Idrivers/media/dvb/dvb-usb
1958 +
1959 +diff --git a/drivers/media/video/cx231xx/cx231xx-cards.c b/drivers/media/video/cx231xx/cx231xx-cards.c
1960 +index 6bdc0ef..f2a4900 100644
1961 +--- a/drivers/media/video/cx231xx/cx231xx-cards.c
1962 ++++ b/drivers/media/video/cx231xx/cx231xx-cards.c
1963 +@@ -32,6 +32,7 @@
1964 + #include <media/v4l2-chip-ident.h>
1965 +
1966 + #include <media/cx25840.h>
1967 ++#include "dvb-usb-ids.h"
1968 + #include "xc5000.h"
1969 +
1970 + #include "cx231xx.h"
1971 +@@ -175,6 +176,8 @@ struct usb_device_id cx231xx_id_table[] = {
1972 + .driver_info = CX231XX_BOARD_CNXT_RDE_250},
1973 + {USB_DEVICE(0x0572, 0x58A1),
1974 + .driver_info = CX231XX_BOARD_CNXT_RDU_250},
1975 ++ {USB_DEVICE_VER(USB_VID_PIXELVIEW, USB_PID_PIXELVIEW_SBTVD, 0x4000,0x4fff),
1976 ++ .driver_info = CX231XX_BOARD_UNKNOWN},
1977 + {},
1978 + };
1979 +
1980 +@@ -226,14 +229,16 @@ void cx231xx_pre_card_setup(struct cx231xx *dev)
1981 + dev->board.name, dev->model);
1982 +
1983 + /* set the direction for GPIO pins */
1984 +- cx231xx_set_gpio_direction(dev, dev->board.tuner_gpio->bit, 1);
1985 +- cx231xx_set_gpio_value(dev, dev->board.tuner_gpio->bit, 1);
1986 +- cx231xx_set_gpio_direction(dev, dev->board.tuner_sif_gpio, 1);
1987 ++ if (dev->board.tuner_gpio) {
1988 ++ cx231xx_set_gpio_direction(dev, dev->board.tuner_gpio->bit, 1);
1989 ++ cx231xx_set_gpio_value(dev, dev->board.tuner_gpio->bit, 1);
1990 ++ cx231xx_set_gpio_direction(dev, dev->board.tuner_sif_gpio, 1);
1991 +
1992 +- /* request some modules if any required */
1993 ++ /* request some modules if any required */
1994 +
1995 +- /* reset the Tuner */
1996 +- cx231xx_gpio_set(dev, dev->board.tuner_gpio);
1997 ++ /* reset the Tuner */
1998 ++ cx231xx_gpio_set(dev, dev->board.tuner_gpio);
1999 ++ }
2000 +
2001 + /* set the mode to Analog mode initially */
2002 + cx231xx_set_mode(dev, CX231XX_ANALOG_MODE);
2003 +diff --git a/drivers/media/video/gspca/gspca.c b/drivers/media/video/gspca/gspca.c
2004 +index 678675b..59aba24 100644
2005 +--- a/drivers/media/video/gspca/gspca.c
2006 ++++ b/drivers/media/video/gspca/gspca.c
2007 +@@ -223,6 +223,7 @@ static int alloc_and_submit_int_urb(struct gspca_dev *gspca_dev,
2008 + usb_rcvintpipe(dev, ep->bEndpointAddress),
2009 + buffer, buffer_len,
2010 + int_irq, (void *)gspca_dev, interval);
2011 ++ urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
2012 + gspca_dev->int_urb = urb;
2013 + ret = usb_submit_urb(urb, GFP_KERNEL);
2014 + if (ret < 0) {
2015 +diff --git a/drivers/media/video/gspca/sn9c20x.c b/drivers/media/video/gspca/sn9c20x.c
2016 +index 644a7fd..74a005f 100644
2017 +--- a/drivers/media/video/gspca/sn9c20x.c
2018 ++++ b/drivers/media/video/gspca/sn9c20x.c
2019 +@@ -2368,8 +2368,7 @@ static void sd_pkt_scan(struct gspca_dev *gspca_dev,
2020 + (data[33] << 10);
2021 + avg_lum >>= 9;
2022 + atomic_set(&sd->avg_lum, avg_lum);
2023 +- gspca_frame_add(gspca_dev, LAST_PACKET,
2024 +- data, len);
2025 ++ gspca_frame_add(gspca_dev, LAST_PACKET, NULL, 0);
2026 + return;
2027 + }
2028 + if (gspca_dev->last_packet_type == LAST_PACKET) {
2029 +diff --git a/drivers/media/video/v4l2-compat-ioctl32.c b/drivers/media/video/v4l2-compat-ioctl32.c
2030 +index 9004a5f..85b175d 100644
2031 +--- a/drivers/media/video/v4l2-compat-ioctl32.c
2032 ++++ b/drivers/media/video/v4l2-compat-ioctl32.c
2033 +@@ -193,17 +193,24 @@ static int put_video_window32(struct video_window *kp, struct video_window32 __u
2034 + struct video_code32 {
2035 + char loadwhat[16]; /* name or tag of file being passed */
2036 + compat_int_t datasize;
2037 +- unsigned char *data;
2038 ++ compat_uptr_t data;
2039 + };
2040 +
2041 +-static int get_microcode32(struct video_code *kp, struct video_code32 __user *up)
2042 ++static struct video_code __user *get_microcode32(struct video_code32 *kp)
2043 + {
2044 +- if (!access_ok(VERIFY_READ, up, sizeof(struct video_code32)) ||
2045 +- copy_from_user(kp->loadwhat, up->loadwhat, sizeof(up->loadwhat)) ||
2046 +- get_user(kp->datasize, &up->datasize) ||
2047 +- copy_from_user(kp->data, up->data, up->datasize))
2048 +- return -EFAULT;
2049 +- return 0;
2050 ++ struct video_code __user *up;
2051 ++
2052 ++ up = compat_alloc_user_space(sizeof(*up));
2053 ++
2054 ++ /*
2055 ++ * NOTE! We don't actually care if these fail. If the
2056 ++ * user address is invalid, the native ioctl will do
2057 ++ * the error handling for us
2058 ++ */
2059 ++ (void) copy_to_user(up->loadwhat, kp->loadwhat, sizeof(up->loadwhat));
2060 ++ (void) put_user(kp->datasize, &up->datasize);
2061 ++ (void) put_user(compat_ptr(kp->data), &up->data);
2062 ++ return up;
2063 + }
2064 +
2065 + #define VIDIOCGTUNER32 _IOWR('v', 4, struct video_tuner32)
2066 +@@ -744,7 +751,7 @@ static long do_video_ioctl(struct file *file, unsigned int cmd, unsigned long ar
2067 + struct video_tuner vt;
2068 + struct video_buffer vb;
2069 + struct video_window vw;
2070 +- struct video_code vc;
2071 ++ struct video_code32 vc;
2072 + struct video_audio va;
2073 + #endif
2074 + struct v4l2_format v2f;
2075 +@@ -823,8 +830,11 @@ static long do_video_ioctl(struct file *file, unsigned int cmd, unsigned long ar
2076 + break;
2077 +
2078 + case VIDIOCSMICROCODE:
2079 +- err = get_microcode32(&karg.vc, up);
2080 +- compatible_arg = 0;
2081 ++ /* Copy the 32-bit "video_code32" to kernel space */
2082 ++ if (copy_from_user(&karg.vc, up, sizeof(karg.vc)))
2083 ++ return -EFAULT;
2084 ++ /* Convert the 32-bit version to a 64-bit version in user space */
2085 ++ up = get_microcode32(&karg.vc);
2086 + break;
2087 +
2088 + case VIDIOCSFREQ:
2089 +diff --git a/drivers/mfd/wm831x-irq.c b/drivers/mfd/wm831x-irq.c
2090 +index 7dabe4d..294183b 100644
2091 +--- a/drivers/mfd/wm831x-irq.c
2092 ++++ b/drivers/mfd/wm831x-irq.c
2093 +@@ -394,8 +394,13 @@ static int wm831x_irq_set_type(unsigned int irq, unsigned int type)
2094 +
2095 + irq = irq - wm831x->irq_base;
2096 +
2097 +- if (irq < WM831X_IRQ_GPIO_1 || irq > WM831X_IRQ_GPIO_11)
2098 +- return -EINVAL;
2099 ++ if (irq < WM831X_IRQ_GPIO_1 || irq > WM831X_IRQ_GPIO_11) {
2100 ++ /* Ignore internal-only IRQs */
2101 ++ if (irq >= 0 && irq < WM831X_NUM_IRQS)
2102 ++ return 0;
2103 ++ else
2104 ++ return -EINVAL;
2105 ++ }
2106 +
2107 + switch (type) {
2108 + case IRQ_TYPE_EDGE_BOTH:
2109 +diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
2110 +index 83e7543..2876c3d 100644
2111 +--- a/drivers/mmc/core/core.c
2112 ++++ b/drivers/mmc/core/core.c
2113 +@@ -1277,6 +1277,19 @@ int mmc_suspend_host(struct mmc_host *host)
2114 + if (host->bus_ops && !host->bus_dead) {
2115 + if (host->bus_ops->suspend)
2116 + err = host->bus_ops->suspend(host);
2117 ++ if (err == -ENOSYS || !host->bus_ops->resume) {
2118 ++ /*
2119 ++ * We simply "remove" the card in this case.
2120 ++ * It will be redetected on resume.
2121 ++ */
2122 ++ if (host->bus_ops->remove)
2123 ++ host->bus_ops->remove(host);
2124 ++ mmc_claim_host(host);
2125 ++ mmc_detach_bus(host);
2126 ++ mmc_release_host(host);
2127 ++ host->pm_flags = 0;
2128 ++ err = 0;
2129 ++ }
2130 + }
2131 + mmc_bus_put(host);
2132 +
2133 +diff --git a/drivers/mmc/host/sdhci-s3c.c b/drivers/mmc/host/sdhci-s3c.c
2134 +index ad30f07..76c0ec3 100644
2135 +--- a/drivers/mmc/host/sdhci-s3c.c
2136 ++++ b/drivers/mmc/host/sdhci-s3c.c
2137 +@@ -372,8 +372,10 @@ static int __devexit sdhci_s3c_remove(struct platform_device *pdev)
2138 + sdhci_remove_host(host, 1);
2139 +
2140 + for (ptr = 0; ptr < 3; ptr++) {
2141 +- clk_disable(sc->clk_bus[ptr]);
2142 +- clk_put(sc->clk_bus[ptr]);
2143 ++ if (sc->clk_bus[ptr]) {
2144 ++ clk_disable(sc->clk_bus[ptr]);
2145 ++ clk_put(sc->clk_bus[ptr]);
2146 ++ }
2147 + }
2148 + clk_disable(sc->clk_io);
2149 + clk_put(sc->clk_io);
2150 +diff --git a/drivers/net/atlx/atl1.c b/drivers/net/atlx/atl1.c
2151 +index 63b9ba0..bbd6e30 100644
2152 +--- a/drivers/net/atlx/atl1.c
2153 ++++ b/drivers/net/atlx/atl1.c
2154 +@@ -2847,10 +2847,11 @@ static int atl1_resume(struct pci_dev *pdev)
2155 + pci_enable_wake(pdev, PCI_D3cold, 0);
2156 +
2157 + atl1_reset_hw(&adapter->hw);
2158 +- adapter->cmb.cmb->int_stats = 0;
2159 +
2160 +- if (netif_running(netdev))
2161 ++ if (netif_running(netdev)) {
2162 ++ adapter->cmb.cmb->int_stats = 0;
2163 + atl1_up(adapter);
2164 ++ }
2165 + netif_device_attach(netdev);
2166 +
2167 + return 0;
2168 +diff --git a/drivers/net/b44.c b/drivers/net/b44.c
2169 +index 293f9c1..4173759 100644
2170 +--- a/drivers/net/b44.c
2171 ++++ b/drivers/net/b44.c
2172 +@@ -2168,8 +2168,6 @@ static int __devinit b44_init_one(struct ssb_device *sdev,
2173 + dev->irq = sdev->irq;
2174 + SET_ETHTOOL_OPS(dev, &b44_ethtool_ops);
2175 +
2176 +- netif_carrier_off(dev);
2177 +-
2178 + err = ssb_bus_powerup(sdev->bus, 0);
2179 + if (err) {
2180 + dev_err(sdev->dev,
2181 +@@ -2209,6 +2207,8 @@ static int __devinit b44_init_one(struct ssb_device *sdev,
2182 + goto err_out_powerdown;
2183 + }
2184 +
2185 ++ netif_carrier_off(dev);
2186 ++
2187 + ssb_set_drvdata(sdev, dev);
2188 +
2189 + /* Chip reset provides power to the b44 MAC & PCI cores, which
2190 +diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
2191 +index cdc6a5c..ad9833b 100644
2192 +--- a/drivers/net/r8169.c
2193 ++++ b/drivers/net/r8169.c
2194 +@@ -4008,7 +4008,7 @@ static inline void rtl8169_map_to_asic(struct RxDesc *desc, dma_addr_t mapping,
2195 + static struct sk_buff *rtl8169_alloc_rx_skb(struct pci_dev *pdev,
2196 + struct net_device *dev,
2197 + struct RxDesc *desc, int rx_buf_sz,
2198 +- unsigned int align)
2199 ++ unsigned int align, gfp_t gfp)
2200 + {
2201 + struct sk_buff *skb;
2202 + dma_addr_t mapping;
2203 +@@ -4016,7 +4016,7 @@ static struct sk_buff *rtl8169_alloc_rx_skb(struct pci_dev *pdev,
2204 +
2205 + pad = align ? align : NET_IP_ALIGN;
2206 +
2207 +- skb = netdev_alloc_skb(dev, rx_buf_sz + pad);
2208 ++ skb = __netdev_alloc_skb(dev, rx_buf_sz + pad, gfp);
2209 + if (!skb)
2210 + goto err_out;
2211 +
2212 +@@ -4047,7 +4047,7 @@ static void rtl8169_rx_clear(struct rtl8169_private *tp)
2213 + }
2214 +
2215 + static u32 rtl8169_rx_fill(struct rtl8169_private *tp, struct net_device *dev,
2216 +- u32 start, u32 end)
2217 ++ u32 start, u32 end, gfp_t gfp)
2218 + {
2219 + u32 cur;
2220 +
2221 +@@ -4062,7 +4062,7 @@ static u32 rtl8169_rx_fill(struct rtl8169_private *tp, struct net_device *dev,
2222 +
2223 + skb = rtl8169_alloc_rx_skb(tp->pci_dev, dev,
2224 + tp->RxDescArray + i,
2225 +- tp->rx_buf_sz, tp->align);
2226 ++ tp->rx_buf_sz, tp->align, gfp);
2227 + if (!skb)
2228 + break;
2229 +
2230 +@@ -4090,7 +4090,7 @@ static int rtl8169_init_ring(struct net_device *dev)
2231 + memset(tp->tx_skb, 0x0, NUM_TX_DESC * sizeof(struct ring_info));
2232 + memset(tp->Rx_skbuff, 0x0, NUM_RX_DESC * sizeof(struct sk_buff *));
2233 +
2234 +- if (rtl8169_rx_fill(tp, dev, 0, NUM_RX_DESC) != NUM_RX_DESC)
2235 ++ if (rtl8169_rx_fill(tp, dev, 0, NUM_RX_DESC, GFP_KERNEL) != NUM_RX_DESC)
2236 + goto err_out;
2237 +
2238 + rtl8169_mark_as_last_descriptor(tp->RxDescArray + NUM_RX_DESC - 1);
2239 +@@ -4591,7 +4591,7 @@ static int rtl8169_rx_interrupt(struct net_device *dev,
2240 + count = cur_rx - tp->cur_rx;
2241 + tp->cur_rx = cur_rx;
2242 +
2243 +- delta = rtl8169_rx_fill(tp, dev, tp->dirty_rx, tp->cur_rx);
2244 ++ delta = rtl8169_rx_fill(tp, dev, tp->dirty_rx, tp->cur_rx, GFP_ATOMIC);
2245 + if (!delta && count)
2246 + netif_info(tp, intr, dev, "no Rx buffer allocated\n");
2247 + tp->dirty_rx += delta;
2248 +diff --git a/drivers/net/skge.c b/drivers/net/skge.c
2249 +index 40e5c46..465ae7e 100644
2250 +--- a/drivers/net/skge.c
2251 ++++ b/drivers/net/skge.c
2252 +@@ -43,6 +43,7 @@
2253 + #include <linux/seq_file.h>
2254 + #include <linux/mii.h>
2255 + #include <linux/slab.h>
2256 ++#include <linux/dmi.h>
2257 + #include <asm/irq.h>
2258 +
2259 + #include "skge.h"
2260 +@@ -3868,6 +3869,8 @@ static void __devinit skge_show_addr(struct net_device *dev)
2261 + netif_info(skge, probe, skge->netdev, "addr %pM\n", dev->dev_addr);
2262 + }
2263 +
2264 ++static int only_32bit_dma;
2265 ++
2266 + static int __devinit skge_probe(struct pci_dev *pdev,
2267 + const struct pci_device_id *ent)
2268 + {
2269 +@@ -3889,7 +3892,7 @@ static int __devinit skge_probe(struct pci_dev *pdev,
2270 +
2271 + pci_set_master(pdev);
2272 +
2273 +- if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
2274 ++ if (!only_32bit_dma && !pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
2275 + using_dac = 1;
2276 + err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
2277 + } else if (!(err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))) {
2278 +@@ -4147,8 +4150,21 @@ static struct pci_driver skge_driver = {
2279 + .shutdown = skge_shutdown,
2280 + };
2281 +
2282 ++static struct dmi_system_id skge_32bit_dma_boards[] = {
2283 ++ {
2284 ++ .ident = "Gigabyte nForce boards",
2285 ++ .matches = {
2286 ++ DMI_MATCH(DMI_BOARD_VENDOR, "Gigabyte Technology Co"),
2287 ++ DMI_MATCH(DMI_BOARD_NAME, "nForce"),
2288 ++ },
2289 ++ },
2290 ++ {}
2291 ++};
2292 ++
2293 + static int __init skge_init_module(void)
2294 + {
2295 ++ if (dmi_check_system(skge_32bit_dma_boards))
2296 ++ only_32bit_dma = 1;
2297 + skge_debug_init();
2298 + return pci_register_driver(&skge_driver);
2299 + }
2300 +diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
2301 +index e28524e..2a73ceb 100644
2302 +--- a/drivers/pci/quirks.c
2303 ++++ b/drivers/pci/quirks.c
2304 +@@ -150,6 +150,26 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NEC, PCI_DEVICE_ID_NEC_CBUS_2, quirk_isa_d
2305 + DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NEC, PCI_DEVICE_ID_NEC_CBUS_3, quirk_isa_dma_hangs);
2306 +
2307 + /*
2308 ++ * Intel NM10 "TigerPoint" LPC PM1a_STS.BM_STS must be clear
2309 ++ * for some HT machines to use C4 w/o hanging.
2310 ++ */
2311 ++static void __devinit quirk_tigerpoint_bm_sts(struct pci_dev *dev)
2312 ++{
2313 ++ u32 pmbase;
2314 ++ u16 pm1a;
2315 ++
2316 ++ pci_read_config_dword(dev, 0x40, &pmbase);
2317 ++ pmbase = pmbase & 0xff80;
2318 ++ pm1a = inw(pmbase);
2319 ++
2320 ++ if (pm1a & 0x10) {
2321 ++ dev_info(&dev->dev, FW_BUG "TigerPoint LPC.BM_STS cleared\n");
2322 ++ outw(0x10, pmbase);
2323 ++ }
2324 ++}
2325 ++DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_TGP_LPC, quirk_tigerpoint_bm_sts);
2326 ++
2327 ++/*
2328 + * Chipsets where PCI->PCI transfers vanish or hang
2329 + */
2330 + static void __devinit quirk_nopcipci(struct pci_dev *dev)
2331 +diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
2332 +index ad0ed21..348fba0 100644
2333 +--- a/drivers/scsi/scsi.c
2334 ++++ b/drivers/scsi/scsi.c
2335 +@@ -1046,13 +1046,13 @@ int scsi_get_vpd_page(struct scsi_device *sdev, u8 page, unsigned char *buf,
2336 +
2337 + /* If the user actually wanted this page, we can skip the rest */
2338 + if (page == 0)
2339 +- return -EINVAL;
2340 ++ return 0;
2341 +
2342 + for (i = 0; i < min((int)buf[3], buf_len - 4); i++)
2343 + if (buf[i + 4] == page)
2344 + goto found;
2345 +
2346 +- if (i < buf[3] && i > buf_len)
2347 ++ if (i < buf[3] && i >= buf_len - 4)
2348 + /* ran off the end of the buffer, give us benefit of doubt */
2349 + goto found;
2350 + /* The device claims it doesn't support the requested page */
2351 +diff --git a/drivers/usb/core/file.c b/drivers/usb/core/file.c
2352 +index f06f5db..1e6ccef 100644
2353 +--- a/drivers/usb/core/file.c
2354 ++++ b/drivers/usb/core/file.c
2355 +@@ -159,9 +159,9 @@ void usb_major_cleanup(void)
2356 + int usb_register_dev(struct usb_interface *intf,
2357 + struct usb_class_driver *class_driver)
2358 + {
2359 +- int retval = -EINVAL;
2360 ++ int retval;
2361 + int minor_base = class_driver->minor_base;
2362 +- int minor = 0;
2363 ++ int minor;
2364 + char name[20];
2365 + char *temp;
2366 +
2367 +@@ -173,12 +173,17 @@ int usb_register_dev(struct usb_interface *intf,
2368 + */
2369 + minor_base = 0;
2370 + #endif
2371 +- intf->minor = -1;
2372 +-
2373 +- dbg ("looking for a minor, starting at %d", minor_base);
2374 +
2375 + if (class_driver->fops == NULL)
2376 +- goto exit;
2377 ++ return -EINVAL;
2378 ++ if (intf->minor >= 0)
2379 ++ return -EADDRINUSE;
2380 ++
2381 ++ retval = init_usb_class();
2382 ++ if (retval)
2383 ++ return retval;
2384 ++
2385 ++ dev_dbg(&intf->dev, "looking for a minor, starting at %d", minor_base);
2386 +
2387 + down_write(&minor_rwsem);
2388 + for (minor = minor_base; minor < MAX_USB_MINORS; ++minor) {
2389 +@@ -186,20 +191,12 @@ int usb_register_dev(struct usb_interface *intf,
2390 + continue;
2391 +
2392 + usb_minors[minor] = class_driver->fops;
2393 +-
2394 +- retval = 0;
2395 ++ intf->minor = minor;
2396 + break;
2397 + }
2398 + up_write(&minor_rwsem);
2399 +-
2400 +- if (retval)
2401 +- goto exit;
2402 +-
2403 +- retval = init_usb_class();
2404 +- if (retval)
2405 +- goto exit;
2406 +-
2407 +- intf->minor = minor;
2408 ++ if (intf->minor < 0)
2409 ++ return -EXFULL;
2410 +
2411 + /* create a usb class device for this usb interface */
2412 + snprintf(name, sizeof(name), class_driver->name, minor - minor_base);
2413 +@@ -213,11 +210,11 @@ int usb_register_dev(struct usb_interface *intf,
2414 + "%s", temp);
2415 + if (IS_ERR(intf->usb_dev)) {
2416 + down_write(&minor_rwsem);
2417 +- usb_minors[intf->minor] = NULL;
2418 ++ usb_minors[minor] = NULL;
2419 ++ intf->minor = -1;
2420 + up_write(&minor_rwsem);
2421 + retval = PTR_ERR(intf->usb_dev);
2422 + }
2423 +-exit:
2424 + return retval;
2425 + }
2426 + EXPORT_SYMBOL_GPL(usb_register_dev);
2427 +diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
2428 +index 844683e..9f0ce7d 100644
2429 +--- a/drivers/usb/core/message.c
2430 ++++ b/drivers/usb/core/message.c
2431 +@@ -1802,6 +1802,7 @@ free_interfaces:
2432 + intf->dev.groups = usb_interface_groups;
2433 + intf->dev.dma_mask = dev->dev.dma_mask;
2434 + INIT_WORK(&intf->reset_ws, __usb_queue_reset_device);
2435 ++ intf->minor = -1;
2436 + device_initialize(&intf->dev);
2437 + dev_set_name(&intf->dev, "%d-%s:%d.%d",
2438 + dev->bus->busnum, dev->devpath,
2439 +diff --git a/drivers/usb/musb/musb_gadget.c b/drivers/usb/musb/musb_gadget.c
2440 +index 6fca870..180da4e 100644
2441 +--- a/drivers/usb/musb/musb_gadget.c
2442 ++++ b/drivers/usb/musb/musb_gadget.c
2443 +@@ -568,11 +568,19 @@ static void rxstate(struct musb *musb, struct musb_request *req)
2444 + {
2445 + const u8 epnum = req->epnum;
2446 + struct usb_request *request = &req->request;
2447 +- struct musb_ep *musb_ep = &musb->endpoints[epnum].ep_out;
2448 ++ struct musb_ep *musb_ep;
2449 + void __iomem *epio = musb->endpoints[epnum].regs;
2450 + unsigned fifo_count = 0;
2451 +- u16 len = musb_ep->packet_sz;
2452 ++ u16 len;
2453 + u16 csr = musb_readw(epio, MUSB_RXCSR);
2454 ++ struct musb_hw_ep *hw_ep = &musb->endpoints[epnum];
2455 ++
2456 ++ if (hw_ep->is_shared_fifo)
2457 ++ musb_ep = &hw_ep->ep_in;
2458 ++ else
2459 ++ musb_ep = &hw_ep->ep_out;
2460 ++
2461 ++ len = musb_ep->packet_sz;
2462 +
2463 + /* We shouldn't get here while DMA is active, but we do... */
2464 + if (dma_channel_status(musb_ep->dma) == MUSB_DMA_STATUS_BUSY) {
2465 +@@ -740,9 +748,15 @@ void musb_g_rx(struct musb *musb, u8 epnum)
2466 + u16 csr;
2467 + struct usb_request *request;
2468 + void __iomem *mbase = musb->mregs;
2469 +- struct musb_ep *musb_ep = &musb->endpoints[epnum].ep_out;
2470 ++ struct musb_ep *musb_ep;
2471 + void __iomem *epio = musb->endpoints[epnum].regs;
2472 + struct dma_channel *dma;
2473 ++ struct musb_hw_ep *hw_ep = &musb->endpoints[epnum];
2474 ++
2475 ++ if (hw_ep->is_shared_fifo)
2476 ++ musb_ep = &hw_ep->ep_in;
2477 ++ else
2478 ++ musb_ep = &hw_ep->ep_out;
2479 +
2480 + musb_ep_select(mbase, epnum);
2481 +
2482 +@@ -1081,7 +1095,7 @@ struct free_record {
2483 + /*
2484 + * Context: controller locked, IRQs blocked.
2485 + */
2486 +-static void musb_ep_restart(struct musb *musb, struct musb_request *req)
2487 ++void musb_ep_restart(struct musb *musb, struct musb_request *req)
2488 + {
2489 + DBG(3, "<== %s request %p len %u on hw_ep%d\n",
2490 + req->tx ? "TX/IN" : "RX/OUT",
2491 +diff --git a/drivers/usb/musb/musb_gadget.h b/drivers/usb/musb/musb_gadget.h
2492 +index c8b1403..572b1da 100644
2493 +--- a/drivers/usb/musb/musb_gadget.h
2494 ++++ b/drivers/usb/musb/musb_gadget.h
2495 +@@ -105,4 +105,6 @@ extern void musb_gadget_cleanup(struct musb *);
2496 +
2497 + extern void musb_g_giveback(struct musb_ep *, struct usb_request *, int);
2498 +
2499 ++extern void musb_ep_restart(struct musb *, struct musb_request *);
2500 ++
2501 + #endif /* __MUSB_GADGET_H */
2502 +diff --git a/drivers/usb/musb/musb_gadget_ep0.c b/drivers/usb/musb/musb_gadget_ep0.c
2503 +index 21b9788..ecae714 100644
2504 +--- a/drivers/usb/musb/musb_gadget_ep0.c
2505 ++++ b/drivers/usb/musb/musb_gadget_ep0.c
2506 +@@ -261,6 +261,7 @@ __acquires(musb->lock)
2507 + ctrlrequest->wIndex & 0x0f;
2508 + struct musb_ep *musb_ep;
2509 + struct musb_hw_ep *ep;
2510 ++ struct musb_request *request;
2511 + void __iomem *regs;
2512 + int is_in;
2513 + u16 csr;
2514 +@@ -302,6 +303,14 @@ __acquires(musb->lock)
2515 + musb_writew(regs, MUSB_RXCSR, csr);
2516 + }
2517 +
2518 ++ /* Maybe start the first request in the queue */
2519 ++ request = to_musb_request(
2520 ++ next_request(musb_ep));
2521 ++ if (!musb_ep->busy && request) {
2522 ++ DBG(3, "restarting the request\n");
2523 ++ musb_ep_restart(musb, request);
2524 ++ }
2525 ++
2526 + /* select ep0 again */
2527 + musb_ep_select(mbase, 0);
2528 + } break;
2529 +diff --git a/fs/exec.c b/fs/exec.c
2530 +index e19de6a..956bd37 100644
2531 +--- a/fs/exec.c
2532 ++++ b/fs/exec.c
2533 +@@ -377,6 +377,9 @@ static int count(char __user * __user * argv, int max)
2534 + argv++;
2535 + if (i++ >= max)
2536 + return -E2BIG;
2537 ++
2538 ++ if (fatal_signal_pending(current))
2539 ++ return -ERESTARTNOHAND;
2540 + cond_resched();
2541 + }
2542 + }
2543 +@@ -420,6 +423,12 @@ static int copy_strings(int argc, char __user * __user * argv,
2544 + while (len > 0) {
2545 + int offset, bytes_to_copy;
2546 +
2547 ++ if (fatal_signal_pending(current)) {
2548 ++ ret = -ERESTARTNOHAND;
2549 ++ goto out;
2550 ++ }
2551 ++ cond_resched();
2552 ++
2553 + offset = pos % PAGE_SIZE;
2554 + if (offset == 0)
2555 + offset = PAGE_SIZE;
2556 +@@ -594,6 +603,11 @@ int setup_arg_pages(struct linux_binprm *bprm,