Gentoo Archives: gentoo-commits

From: "Mike Pagano (mpagano)" <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] linux-patches r1767 - genpatches-2.6/trunk/2.6.35
Date: Fri, 27 Aug 2010 01:02:21
Message-Id: 20100827010205.AB5E420051@flycatcher.gentoo.org
1 Author: mpagano
2 Date: 2010-08-27 01:02:05 +0000 (Fri, 27 Aug 2010)
3 New Revision: 1767
4
5 Added:
6 genpatches-2.6/trunk/2.6.35/1003_linux-2.6.35.4.patch
7 Modified:
8 genpatches-2.6/trunk/2.6.35/0000_README
9 Log:
10 Linux patch 2.6.35.4
11
12 Modified: genpatches-2.6/trunk/2.6.35/0000_README
13 ===================================================================
14 --- genpatches-2.6/trunk/2.6.35/0000_README 2010-08-25 13:21:51 UTC (rev 1766)
15 +++ genpatches-2.6/trunk/2.6.35/0000_README 2010-08-27 01:02:05 UTC (rev 1767)
16 @@ -47,6 +47,14 @@
17 From: http://www.kernel.org
18 Desc: Linux 2.6.35.2
19
20 +Patch: 1002_linux-2.6.35.3.patch
21 +From: http://www.kernel.org
22 +Desc: Linux 2.6.35.3
23 +
24 +Patch: 1003_linux-2.6.35.4.patch
25 +From: http://www.kernel.org
26 +Desc: Linux 2.6.35.4
27 +
28 Patch: 1800_page-table-unmap-for-stack-guard-fix.patch
29 From: https://bugzilla.kernel.org/show_bug.cgi?id=16588
30 Desc: Fix page table unmap for stack guard page properly
31
32 Added: genpatches-2.6/trunk/2.6.35/1003_linux-2.6.35.4.patch
33 ===================================================================
34 --- genpatches-2.6/trunk/2.6.35/1003_linux-2.6.35.4.patch (rev 0)
35 +++ genpatches-2.6/trunk/2.6.35/1003_linux-2.6.35.4.patch 2010-08-27 01:02:05 UTC (rev 1767)
36 @@ -0,0 +1,4732 @@
37 +diff --git a/arch/arm/include/asm/ptrace.h b/arch/arm/include/asm/ptrace.h
38 +index 9dcb11e..bf62c44 100644
39 +--- a/arch/arm/include/asm/ptrace.h
40 ++++ b/arch/arm/include/asm/ptrace.h
41 +@@ -158,15 +158,24 @@ struct pt_regs {
42 + */
43 + static inline int valid_user_regs(struct pt_regs *regs)
44 + {
45 +- if (user_mode(regs) && (regs->ARM_cpsr & PSR_I_BIT) == 0) {
46 +- regs->ARM_cpsr &= ~(PSR_F_BIT | PSR_A_BIT);
47 +- return 1;
48 ++ unsigned long mode = regs->ARM_cpsr & MODE_MASK;
49 ++
50 ++ /*
51 ++ * Always clear the F (FIQ) and A (delayed abort) bits
52 ++ */
53 ++ regs->ARM_cpsr &= ~(PSR_F_BIT | PSR_A_BIT);
54 ++
55 ++ if ((regs->ARM_cpsr & PSR_I_BIT) == 0) {
56 ++ if (mode == USR_MODE)
57 ++ return 1;
58 ++ if (elf_hwcap & HWCAP_26BIT && mode == USR26_MODE)
59 ++ return 1;
60 + }
61 +
62 + /*
63 + * Force CPSR to something logical...
64 + */
65 +- regs->ARM_cpsr &= PSR_f | PSR_s | (PSR_x & ~PSR_A_BIT) | PSR_T_BIT | MODE32_BIT;
66 ++ regs->ARM_cpsr &= PSR_f | PSR_s | PSR_x | PSR_T_BIT | MODE32_BIT;
67 + if (!(elf_hwcap & HWCAP_26BIT))
68 + regs->ARM_cpsr |= USR_MODE;
69 +
70 +diff --git a/arch/arm/mach-ixp4xx/ixdp425-setup.c b/arch/arm/mach-ixp4xx/ixdp425-setup.c
71 +index 827cbc4..ea9ee4e 100644
72 +--- a/arch/arm/mach-ixp4xx/ixdp425-setup.c
73 ++++ b/arch/arm/mach-ixp4xx/ixdp425-setup.c
74 +@@ -100,6 +100,7 @@ ixdp425_flash_nand_cmd_ctrl(struct mtd_info *mtd, int cmd, unsigned int ctrl)
75 +
76 + static struct platform_nand_data ixdp425_flash_nand_data = {
77 + .chip = {
78 ++ .nr_chips = 1,
79 + .chip_delay = 30,
80 + .options = NAND_NO_AUTOINCR,
81 + #ifdef CONFIG_MTD_PARTITIONS
82 +diff --git a/arch/arm/mach-mx3/mach-qong.c b/arch/arm/mach-mx3/mach-qong.c
83 +index e5b5b83..1f9363f 100644
84 +--- a/arch/arm/mach-mx3/mach-qong.c
85 ++++ b/arch/arm/mach-mx3/mach-qong.c
86 +@@ -169,6 +169,7 @@ static void qong_nand_select_chip(struct mtd_info *mtd, int chip)
87 +
88 + static struct platform_nand_data qong_nand_data = {
89 + .chip = {
90 ++ .nr_chips = 1,
91 + .chip_delay = 20,
92 + .options = 0,
93 + },
94 +diff --git a/arch/arm/mach-orion5x/ts78xx-setup.c b/arch/arm/mach-orion5x/ts78xx-setup.c
95 +index 5041d1b..696b1a9 100644
96 +--- a/arch/arm/mach-orion5x/ts78xx-setup.c
97 ++++ b/arch/arm/mach-orion5x/ts78xx-setup.c
98 +@@ -216,6 +216,7 @@ static struct mtd_partition ts78xx_ts_nand_parts[] = {
99 +
100 + static struct platform_nand_data ts78xx_ts_nand_data = {
101 + .chip = {
102 ++ .nr_chips = 1,
103 + .part_probe_types = ts_nand_part_probes,
104 + .partitions = ts78xx_ts_nand_parts,
105 + .nr_partitions = ARRAY_SIZE(ts78xx_ts_nand_parts),
106 +diff --git a/arch/blackfin/mach-bf537/boards/stamp.c b/arch/blackfin/mach-bf537/boards/stamp.c
107 +index 9eaf5b0..68a27bc 100644
108 +--- a/arch/blackfin/mach-bf537/boards/stamp.c
109 ++++ b/arch/blackfin/mach-bf537/boards/stamp.c
110 +@@ -400,6 +400,7 @@ static int bfin_plat_nand_dev_ready(struct mtd_info *mtd)
111 +
112 + static struct platform_nand_data bfin_plat_nand_data = {
113 + .chip = {
114 ++ .nr_chips = 1,
115 + .chip_delay = 30,
116 + #ifdef CONFIG_MTD_PARTITIONS
117 + .part_probe_types = part_probes,
118 +diff --git a/arch/blackfin/mach-bf561/boards/acvilon.c b/arch/blackfin/mach-bf561/boards/acvilon.c
119 +index bfcfa86..35b6d12 100644
120 +--- a/arch/blackfin/mach-bf561/boards/acvilon.c
121 ++++ b/arch/blackfin/mach-bf561/boards/acvilon.c
122 +@@ -284,6 +284,7 @@ static int bfin_plat_nand_dev_ready(struct mtd_info *mtd)
123 +
124 + static struct platform_nand_data bfin_plat_nand_data = {
125 + .chip = {
126 ++ .nr_chips = 1,
127 + .chip_delay = 30,
128 + #ifdef CONFIG_MTD_PARTITIONS
129 + .part_probe_types = part_probes,
130 +diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile
131 +index 5d2f17d..b2e3635 100644
132 +--- a/arch/powerpc/Makefile
133 ++++ b/arch/powerpc/Makefile
134 +@@ -164,7 +164,7 @@ drivers-$(CONFIG_OPROFILE) += arch/powerpc/oprofile/
135 + all: zImage
136 +
137 + # With make 3.82 we cannot mix normal and wildcard targets
138 +-BOOT_TARGETS1 := zImage zImage.initrd uImaged
139 ++BOOT_TARGETS1 := zImage zImage.initrd uImage
140 + BOOT_TARGETS2 := zImage% dtbImage% treeImage.% cuImage.% simpleImage.%
141 +
142 + PHONY += $(BOOT_TARGETS1) $(BOOT_TARGETS2)
143 +diff --git a/arch/sparc/include/asm/atomic_64.h b/arch/sparc/include/asm/atomic_64.h
144 +index 2050ca0..bdb2ff8 100644
145 +--- a/arch/sparc/include/asm/atomic_64.h
146 ++++ b/arch/sparc/include/asm/atomic_64.h
147 +@@ -20,14 +20,14 @@
148 + #define atomic64_set(v, i) (((v)->counter) = i)
149 +
150 + extern void atomic_add(int, atomic_t *);
151 +-extern void atomic64_add(int, atomic64_t *);
152 ++extern void atomic64_add(long, atomic64_t *);
153 + extern void atomic_sub(int, atomic_t *);
154 +-extern void atomic64_sub(int, atomic64_t *);
155 ++extern void atomic64_sub(long, atomic64_t *);
156 +
157 + extern int atomic_add_ret(int, atomic_t *);
158 +-extern int atomic64_add_ret(int, atomic64_t *);
159 ++extern long atomic64_add_ret(long, atomic64_t *);
160 + extern int atomic_sub_ret(int, atomic_t *);
161 +-extern int atomic64_sub_ret(int, atomic64_t *);
162 ++extern long atomic64_sub_ret(long, atomic64_t *);
163 +
164 + #define atomic_dec_return(v) atomic_sub_ret(1, v)
165 + #define atomic64_dec_return(v) atomic64_sub_ret(1, v)
166 +@@ -91,7 +91,7 @@ static inline int atomic_add_unless(atomic_t *v, int a, int u)
167 + ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
168 + #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
169 +
170 +-static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
171 ++static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
172 + {
173 + long c, old;
174 + c = atomic64_read(v);
175 +diff --git a/arch/sparc/include/asm/fb.h b/arch/sparc/include/asm/fb.h
176 +index e834880..2173432 100644
177 +--- a/arch/sparc/include/asm/fb.h
178 ++++ b/arch/sparc/include/asm/fb.h
179 +@@ -1,5 +1,6 @@
180 + #ifndef _SPARC_FB_H_
181 + #define _SPARC_FB_H_
182 ++#include <linux/console.h>
183 + #include <linux/fb.h>
184 + #include <linux/fs.h>
185 + #include <asm/page.h>
186 +@@ -18,6 +19,9 @@ static inline int fb_is_primary_device(struct fb_info *info)
187 + struct device *dev = info->device;
188 + struct device_node *node;
189 +
190 ++ if (console_set_on_cmdline)
191 ++ return 0;
192 ++
193 + node = dev->of_node;
194 + if (node &&
195 + node == of_console_device)
196 +diff --git a/arch/sparc/include/asm/parport.h b/arch/sparc/include/asm/parport.h
197 +index c333b8d..d21ad50 100644
198 +--- a/arch/sparc/include/asm/parport.h
199 ++++ b/arch/sparc/include/asm/parport.h
200 +@@ -228,6 +228,10 @@ static const struct of_device_id ecpp_match[] = {
201 + .name = "parallel",
202 + .compatible = "ns87317-ecpp",
203 + },
204 ++ {
205 ++ .name = "parallel",
206 ++ .compatible = "pnpALI,1533,3",
207 ++ },
208 + {},
209 + };
210 +
211 +diff --git a/arch/sparc/include/asm/rwsem-const.h b/arch/sparc/include/asm/rwsem-const.h
212 +index a303c9d..e4c61a1 100644
213 +--- a/arch/sparc/include/asm/rwsem-const.h
214 ++++ b/arch/sparc/include/asm/rwsem-const.h
215 +@@ -5,7 +5,7 @@
216 + #define RWSEM_UNLOCKED_VALUE 0x00000000
217 + #define RWSEM_ACTIVE_BIAS 0x00000001
218 + #define RWSEM_ACTIVE_MASK 0x0000ffff
219 +-#define RWSEM_WAITING_BIAS 0xffff0000
220 ++#define RWSEM_WAITING_BIAS (-0x00010000)
221 + #define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS
222 + #define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
223 +
224 +diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
225 +index dcb0593..f942bb7 100644
226 +--- a/arch/x86/Kconfig
227 ++++ b/arch/x86/Kconfig
228 +@@ -247,6 +247,11 @@ config ARCH_HWEIGHT_CFLAGS
229 +
230 + config KTIME_SCALAR
231 + def_bool X86_32
232 ++
233 ++config ARCH_CPU_PROBE_RELEASE
234 ++ def_bool y
235 ++ depends on HOTPLUG_CPU
236 ++
237 + source "init/Kconfig"
238 + source "kernel/Kconfig.freezer"
239 +
240 +diff --git a/arch/x86/include/asm/cmpxchg_32.h b/arch/x86/include/asm/cmpxchg_32.h
241 +index c1cf59d..20955ea 100644
242 +--- a/arch/x86/include/asm/cmpxchg_32.h
243 ++++ b/arch/x86/include/asm/cmpxchg_32.h
244 +@@ -53,60 +53,33 @@ struct __xchg_dummy {
245 + __xchg((v), (ptr), sizeof(*ptr))
246 +
247 + /*
248 +- * The semantics of XCHGCMP8B are a bit strange, this is why
249 +- * there is a loop and the loading of %%eax and %%edx has to
250 +- * be inside. This inlines well in most cases, the cached
251 +- * cost is around ~38 cycles. (in the future we might want
252 +- * to do an SIMD/3DNOW!/MMX/FPU 64-bit store here, but that
253 +- * might have an implicit FPU-save as a cost, so it's not
254 +- * clear which path to go.)
255 ++ * CMPXCHG8B only writes to the target if we had the previous
256 ++ * value in registers, otherwise it acts as a read and gives us the
257 ++ * "new previous" value. That is why there is a loop. Preloading
258 ++ * EDX:EAX is a performance optimization: in the common case it means
259 ++ * we need only one locked operation.
260 + *
261 +- * cmpxchg8b must be used with the lock prefix here to allow
262 +- * the instruction to be executed atomically, see page 3-102
263 +- * of the instruction set reference 24319102.pdf. We need
264 +- * the reader side to see the coherent 64bit value.
265 ++ * A SIMD/3DNOW!/MMX/FPU 64-bit store here would require at the very
266 ++ * least an FPU save and/or %cr0.ts manipulation.
267 ++ *
268 ++ * cmpxchg8b must be used with the lock prefix here to allow the
269 ++ * instruction to be executed atomically. We need to have the reader
270 ++ * side to see the coherent 64bit value.
271 + */
272 +-static inline void __set_64bit(unsigned long long *ptr,
273 +- unsigned int low, unsigned int high)
274 ++static inline void set_64bit(volatile u64 *ptr, u64 value)
275 + {
276 ++ u32 low = value;
277 ++ u32 high = value >> 32;
278 ++ u64 prev = *ptr;
279 ++
280 + asm volatile("\n1:\t"
281 +- "movl (%1), %%eax\n\t"
282 +- "movl 4(%1), %%edx\n\t"
283 +- LOCK_PREFIX "cmpxchg8b (%1)\n\t"
284 ++ LOCK_PREFIX "cmpxchg8b %0\n\t"
285 + "jnz 1b"
286 +- : "=m" (*ptr)
287 +- : "D" (ptr),
288 +- "b" (low),
289 +- "c" (high)
290 +- : "ax", "dx", "memory");
291 +-}
292 +-
293 +-static inline void __set_64bit_constant(unsigned long long *ptr,
294 +- unsigned long long value)
295 +-{
296 +- __set_64bit(ptr, (unsigned int)value, (unsigned int)(value >> 32));
297 +-}
298 +-
299 +-#define ll_low(x) *(((unsigned int *)&(x)) + 0)
300 +-#define ll_high(x) *(((unsigned int *)&(x)) + 1)
301 +-
302 +-static inline void __set_64bit_var(unsigned long long *ptr,
303 +- unsigned long long value)
304 +-{
305 +- __set_64bit(ptr, ll_low(value), ll_high(value));
306 ++ : "=m" (*ptr), "+A" (prev)
307 ++ : "b" (low), "c" (high)
308 ++ : "memory");
309 + }
310 +
311 +-#define set_64bit(ptr, value) \
312 +- (__builtin_constant_p((value)) \
313 +- ? __set_64bit_constant((ptr), (value)) \
314 +- : __set_64bit_var((ptr), (value)))
315 +-
316 +-#define _set_64bit(ptr, value) \
317 +- (__builtin_constant_p(value) \
318 +- ? __set_64bit(ptr, (unsigned int)(value), \
319 +- (unsigned int)((value) >> 32)) \
320 +- : __set_64bit(ptr, ll_low((value)), ll_high((value))))
321 +-
322 + extern void __cmpxchg_wrong_size(void);
323 +
324 + /*
325 +diff --git a/arch/x86/include/asm/cmpxchg_64.h b/arch/x86/include/asm/cmpxchg_64.h
326 +index b92f147..9596e7c 100644
327 +--- a/arch/x86/include/asm/cmpxchg_64.h
328 ++++ b/arch/x86/include/asm/cmpxchg_64.h
329 +@@ -5,13 +5,11 @@
330 +
331 + #define __xg(x) ((volatile long *)(x))
332 +
333 +-static inline void set_64bit(volatile unsigned long *ptr, unsigned long val)
334 ++static inline void set_64bit(volatile u64 *ptr, u64 val)
335 + {
336 + *ptr = val;
337 + }
338 +
339 +-#define _set_64bit set_64bit
340 +-
341 + extern void __xchg_wrong_size(void);
342 + extern void __cmpxchg_wrong_size(void);
343 +
344 +diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
345 +index a96489e..c07e513 100644
346 +--- a/arch/x86/kernel/apic/apic.c
347 ++++ b/arch/x86/kernel/apic/apic.c
348 +@@ -1606,7 +1606,7 @@ void __init init_apic_mappings(void)
349 + * acpi lapic path already maps that address in
350 + * acpi_register_lapic_address()
351 + */
352 +- if (!acpi_lapic)
353 ++ if (!acpi_lapic && !smp_found_config)
354 + set_fixmap_nocache(FIX_APIC_BASE, apic_phys);
355 +
356 + apic_printk(APIC_VERBOSE, "mapped APIC to %08lx (%08lx)\n",
357 +diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
358 +index e41ed24..2b18af1 100644
359 +--- a/arch/x86/kernel/apic/io_apic.c
360 ++++ b/arch/x86/kernel/apic/io_apic.c
361 +@@ -1728,6 +1728,8 @@ __apicdebuginit(void) print_IO_APIC(void)
362 + struct irq_pin_list *entry;
363 +
364 + cfg = desc->chip_data;
365 ++ if (!cfg)
366 ++ continue;
367 + entry = cfg->irq_2_pin;
368 + if (!entry)
369 + continue;
370 +diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
371 +index 214ac86..d8d86d0 100644
372 +--- a/arch/x86/kernel/cpu/perf_event_intel.c
373 ++++ b/arch/x86/kernel/cpu/perf_event_intel.c
374 +@@ -491,33 +491,78 @@ static void intel_pmu_enable_all(int added)
375 + * Intel Errata AAP53 (model 30)
376 + * Intel Errata BD53 (model 44)
377 + *
378 +- * These chips need to be 'reset' when adding counters by programming
379 +- * the magic three (non counting) events 0x4300D2, 0x4300B1 and 0x4300B5
380 +- * either in sequence on the same PMC or on different PMCs.
381 ++ * The official story:
382 ++ * These chips need to be 'reset' when adding counters by programming the
383 ++ * magic three (non-counting) events 0x4300B5, 0x4300D2, and 0x4300B1 either
384 ++ * in sequence on the same PMC or on different PMCs.
385 ++ *
386 ++ * In practise it appears some of these events do in fact count, and
387 ++ * we need to programm all 4 events.
388 + */
389 +-static void intel_pmu_nhm_enable_all(int added)
390 ++static void intel_pmu_nhm_workaround(void)
391 + {
392 +- if (added) {
393 +- struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
394 +- int i;
395 ++ struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
396 ++ static const unsigned long nhm_magic[4] = {
397 ++ 0x4300B5,
398 ++ 0x4300D2,
399 ++ 0x4300B1,
400 ++ 0x4300B1
401 ++ };
402 ++ struct perf_event *event;
403 ++ int i;
404 ++
405 ++ /*
406 ++ * The Errata requires below steps:
407 ++ * 1) Clear MSR_IA32_PEBS_ENABLE and MSR_CORE_PERF_GLOBAL_CTRL;
408 ++ * 2) Configure 4 PERFEVTSELx with the magic events and clear
409 ++ * the corresponding PMCx;
410 ++ * 3) set bit0~bit3 of MSR_CORE_PERF_GLOBAL_CTRL;
411 ++ * 4) Clear MSR_CORE_PERF_GLOBAL_CTRL;
412 ++ * 5) Clear 4 pairs of ERFEVTSELx and PMCx;
413 ++ */
414 ++
415 ++ /*
416 ++ * The real steps we choose are a little different from above.
417 ++ * A) To reduce MSR operations, we don't run step 1) as they
418 ++ * are already cleared before this function is called;
419 ++ * B) Call x86_perf_event_update to save PMCx before configuring
420 ++ * PERFEVTSELx with magic number;
421 ++ * C) With step 5), we do clear only when the PERFEVTSELx is
422 ++ * not used currently.
423 ++ * D) Call x86_perf_event_set_period to restore PMCx;
424 ++ */
425 +
426 +- wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + 0, 0x4300D2);
427 +- wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + 1, 0x4300B1);
428 +- wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + 2, 0x4300B5);
429 ++ /* We always operate 4 pairs of PERF Counters */
430 ++ for (i = 0; i < 4; i++) {
431 ++ event = cpuc->events[i];
432 ++ if (event)
433 ++ x86_perf_event_update(event);
434 ++ }
435 +
436 +- wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0x3);
437 +- wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0x0);
438 ++ for (i = 0; i < 4; i++) {
439 ++ wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + i, nhm_magic[i]);
440 ++ wrmsrl(MSR_ARCH_PERFMON_PERFCTR0 + i, 0x0);
441 ++ }
442 +
443 +- for (i = 0; i < 3; i++) {
444 +- struct perf_event *event = cpuc->events[i];
445 ++ wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0xf);
446 ++ wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0x0);
447 +
448 +- if (!event)
449 +- continue;
450 ++ for (i = 0; i < 4; i++) {
451 ++ event = cpuc->events[i];
452 +
453 ++ if (event) {
454 ++ x86_perf_event_set_period(event);
455 + __x86_pmu_enable_event(&event->hw,
456 +- ARCH_PERFMON_EVENTSEL_ENABLE);
457 +- }
458 ++ ARCH_PERFMON_EVENTSEL_ENABLE);
459 ++ } else
460 ++ wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + i, 0x0);
461 + }
462 ++}
463 ++
464 ++static void intel_pmu_nhm_enable_all(int added)
465 ++{
466 ++ if (added)
467 ++ intel_pmu_nhm_workaround();
468 + intel_pmu_enable_all(added);
469 + }
470 +
471 +diff --git a/arch/x86/kernel/cpu/perf_event_p4.c b/arch/x86/kernel/cpu/perf_event_p4.c
472 +index ae85d69..0ffe19e 100644
473 +--- a/arch/x86/kernel/cpu/perf_event_p4.c
474 ++++ b/arch/x86/kernel/cpu/perf_event_p4.c
475 +@@ -581,6 +581,7 @@ static int p4_pmu_handle_irq(struct pt_regs *regs)
476 + cpuc = &__get_cpu_var(cpu_hw_events);
477 +
478 + for (idx = 0; idx < x86_pmu.num_counters; idx++) {
479 ++ int overflow;
480 +
481 + if (!test_bit(idx, cpuc->active_mask))
482 + continue;
483 +@@ -591,12 +592,14 @@ static int p4_pmu_handle_irq(struct pt_regs *regs)
484 + WARN_ON_ONCE(hwc->idx != idx);
485 +
486 + /* it might be unflagged overflow */
487 +- handled = p4_pmu_clear_cccr_ovf(hwc);
488 ++ overflow = p4_pmu_clear_cccr_ovf(hwc);
489 +
490 + val = x86_perf_event_update(event);
491 +- if (!handled && (val & (1ULL << (x86_pmu.cntval_bits - 1))))
492 ++ if (!overflow && (val & (1ULL << (x86_pmu.cntval_bits - 1))))
493 + continue;
494 +
495 ++ handled += overflow;
496 ++
497 + /* event overflow for sure */
498 + data.period = event->hw.last_period;
499 +
500 +@@ -612,7 +615,7 @@ static int p4_pmu_handle_irq(struct pt_regs *regs)
501 + inc_irq_stat(apic_perf_irqs);
502 + }
503 +
504 +- return handled;
505 ++ return handled > 0;
506 + }
507 +
508 + /*
509 +diff --git a/arch/x86/kernel/mpparse.c b/arch/x86/kernel/mpparse.c
510 +index d86dbf7..d7b6f7f 100644
511 +--- a/arch/x86/kernel/mpparse.c
512 ++++ b/arch/x86/kernel/mpparse.c
513 +@@ -274,6 +274,18 @@ static void __init smp_dump_mptable(struct mpc_table *mpc, unsigned char *mpt)
514 +
515 + void __init default_smp_read_mpc_oem(struct mpc_table *mpc) { }
516 +
517 ++static void __init smp_register_lapic_address(unsigned long address)
518 ++{
519 ++ mp_lapic_addr = address;
520 ++
521 ++ set_fixmap_nocache(FIX_APIC_BASE, address);
522 ++ if (boot_cpu_physical_apicid == -1U) {
523 ++ boot_cpu_physical_apicid = read_apic_id();
524 ++ apic_version[boot_cpu_physical_apicid] =
525 ++ GET_APIC_VERSION(apic_read(APIC_LVR));
526 ++ }
527 ++}
528 ++
529 + static int __init smp_read_mpc(struct mpc_table *mpc, unsigned early)
530 + {
531 + char str[16];
532 +@@ -295,6 +307,10 @@ static int __init smp_read_mpc(struct mpc_table *mpc, unsigned early)
533 + if (early)
534 + return 1;
535 +
536 ++ /* Initialize the lapic mapping */
537 ++ if (!acpi_lapic)
538 ++ smp_register_lapic_address(mpc->lapic);
539 ++
540 + if (mpc->oemptr)
541 + x86_init.mpparse.smp_read_mpc_oem(mpc);
542 +
543 +diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
544 +index 11015fd..0bf2ece 100644
545 +--- a/arch/x86/kernel/smpboot.c
546 ++++ b/arch/x86/kernel/smpboot.c
547 +@@ -91,6 +91,25 @@ DEFINE_PER_CPU(int, cpu_state) = { 0 };
548 + static DEFINE_PER_CPU(struct task_struct *, idle_thread_array);
549 + #define get_idle_for_cpu(x) (per_cpu(idle_thread_array, x))
550 + #define set_idle_for_cpu(x, p) (per_cpu(idle_thread_array, x) = (p))
551 ++
552 ++/*
553 ++ * We need this for trampoline_base protection from concurrent accesses when
554 ++ * off- and onlining cores wildly.
555 ++ */
556 ++static DEFINE_MUTEX(x86_cpu_hotplug_driver_mutex);
557 ++
558 ++void cpu_hotplug_driver_lock()
559 ++{
560 ++ mutex_lock(&x86_cpu_hotplug_driver_mutex);
561 ++}
562 ++
563 ++void cpu_hotplug_driver_unlock()
564 ++{
565 ++ mutex_unlock(&x86_cpu_hotplug_driver_mutex);
566 ++}
567 ++
568 ++ssize_t arch_cpu_probe(const char *buf, size_t count) { return -1; }
569 ++ssize_t arch_cpu_release(const char *buf, size_t count) { return -1; }
570 + #else
571 + static struct task_struct *idle_thread_array[NR_CPUS] __cpuinitdata ;
572 + #define get_idle_for_cpu(x) (idle_thread_array[(x)])
573 +diff --git a/arch/x86/lib/atomic64_386_32.S b/arch/x86/lib/atomic64_386_32.S
574 +index 4a5979a..78ee8e0 100644
575 +--- a/arch/x86/lib/atomic64_386_32.S
576 ++++ b/arch/x86/lib/atomic64_386_32.S
577 +@@ -25,150 +25,170 @@
578 + CFI_ADJUST_CFA_OFFSET -4
579 + .endm
580 +
581 +-.macro BEGIN func reg
582 +-$v = \reg
583 +-
584 +-ENTRY(atomic64_\func\()_386)
585 +- CFI_STARTPROC
586 +- LOCK $v
587 +-
588 +-.macro RETURN
589 +- UNLOCK $v
590 ++#define BEGIN(op) \
591 ++.macro END; \
592 ++ CFI_ENDPROC; \
593 ++ENDPROC(atomic64_##op##_386); \
594 ++.purgem END; \
595 ++.endm; \
596 ++ENTRY(atomic64_##op##_386); \
597 ++ CFI_STARTPROC; \
598 ++ LOCK v;
599 ++
600 ++#define RET \
601 ++ UNLOCK v; \
602 + ret
603 +-.endm
604 +-
605 +-.macro END_
606 +- CFI_ENDPROC
607 +-ENDPROC(atomic64_\func\()_386)
608 +-.purgem RETURN
609 +-.purgem END_
610 +-.purgem END
611 +-.endm
612 +-
613 +-.macro END
614 +-RETURN
615 +-END_
616 +-.endm
617 +-.endm
618 +-
619 +-BEGIN read %ecx
620 +- movl ($v), %eax
621 +- movl 4($v), %edx
622 +-END
623 +-
624 +-BEGIN set %esi
625 +- movl %ebx, ($v)
626 +- movl %ecx, 4($v)
627 +-END
628 +-
629 +-BEGIN xchg %esi
630 +- movl ($v), %eax
631 +- movl 4($v), %edx
632 +- movl %ebx, ($v)
633 +- movl %ecx, 4($v)
634 +-END
635 +-
636 +-BEGIN add %ecx
637 +- addl %eax, ($v)
638 +- adcl %edx, 4($v)
639 +-END
640 +
641 +-BEGIN add_return %ecx
642 +- addl ($v), %eax
643 +- adcl 4($v), %edx
644 +- movl %eax, ($v)
645 +- movl %edx, 4($v)
646 +-END
647 +-
648 +-BEGIN sub %ecx
649 +- subl %eax, ($v)
650 +- sbbl %edx, 4($v)
651 +-END
652 +-
653 +-BEGIN sub_return %ecx
654 ++#define RET_END \
655 ++ RET; \
656 ++ END
657 ++
658 ++#define v %ecx
659 ++BEGIN(read)
660 ++ movl (v), %eax
661 ++ movl 4(v), %edx
662 ++RET_END
663 ++#undef v
664 ++
665 ++#define v %esi
666 ++BEGIN(set)
667 ++ movl %ebx, (v)
668 ++ movl %ecx, 4(v)
669 ++RET_END
670 ++#undef v
671 ++
672 ++#define v %esi
673 ++BEGIN(xchg)
674 ++ movl (v), %eax
675 ++ movl 4(v), %edx
676 ++ movl %ebx, (v)
677 ++ movl %ecx, 4(v)
678 ++RET_END
679 ++#undef v
680 ++
681 ++#define v %ecx
682 ++BEGIN(add)
683 ++ addl %eax, (v)
684 ++ adcl %edx, 4(v)
685 ++RET_END
686 ++#undef v
687 ++
688 ++#define v %ecx
689 ++BEGIN(add_return)
690 ++ addl (v), %eax
691 ++ adcl 4(v), %edx
692 ++ movl %eax, (v)
693 ++ movl %edx, 4(v)
694 ++RET_END
695 ++#undef v
696 ++
697 ++#define v %ecx
698 ++BEGIN(sub)
699 ++ subl %eax, (v)
700 ++ sbbl %edx, 4(v)
701 ++RET_END
702 ++#undef v
703 ++
704 ++#define v %ecx
705 ++BEGIN(sub_return)
706 + negl %edx
707 + negl %eax
708 + sbbl $0, %edx
709 +- addl ($v), %eax
710 +- adcl 4($v), %edx
711 +- movl %eax, ($v)
712 +- movl %edx, 4($v)
713 +-END
714 +-
715 +-BEGIN inc %esi
716 +- addl $1, ($v)
717 +- adcl $0, 4($v)
718 +-END
719 +-
720 +-BEGIN inc_return %esi
721 +- movl ($v), %eax
722 +- movl 4($v), %edx
723 ++ addl (v), %eax
724 ++ adcl 4(v), %edx
725 ++ movl %eax, (v)
726 ++ movl %edx, 4(v)
727 ++RET_END
728 ++#undef v
729 ++
730 ++#define v %esi
731 ++BEGIN(inc)
732 ++ addl $1, (v)
733 ++ adcl $0, 4(v)
734 ++RET_END
735 ++#undef v
736 ++
737 ++#define v %esi
738 ++BEGIN(inc_return)
739 ++ movl (v), %eax
740 ++ movl 4(v), %edx
741 + addl $1, %eax
742 + adcl $0, %edx
743 +- movl %eax, ($v)
744 +- movl %edx, 4($v)
745 +-END
746 +-
747 +-BEGIN dec %esi
748 +- subl $1, ($v)
749 +- sbbl $0, 4($v)
750 +-END
751 +-
752 +-BEGIN dec_return %esi
753 +- movl ($v), %eax
754 +- movl 4($v), %edx
755 ++ movl %eax, (v)
756 ++ movl %edx, 4(v)
757 ++RET_END
758 ++#undef v
759 ++
760 ++#define v %esi
761 ++BEGIN(dec)
762 ++ subl $1, (v)
763 ++ sbbl $0, 4(v)
764 ++RET_END
765 ++#undef v
766 ++
767 ++#define v %esi
768 ++BEGIN(dec_return)
769 ++ movl (v), %eax
770 ++ movl 4(v), %edx
771 + subl $1, %eax
772 + sbbl $0, %edx
773 +- movl %eax, ($v)
774 +- movl %edx, 4($v)
775 +-END
776 ++ movl %eax, (v)
777 ++ movl %edx, 4(v)
778 ++RET_END
779 ++#undef v
780 +
781 +-BEGIN add_unless %ecx
782 ++#define v %ecx
783 ++BEGIN(add_unless)
784 + addl %eax, %esi
785 + adcl %edx, %edi
786 +- addl ($v), %eax
787 +- adcl 4($v), %edx
788 ++ addl (v), %eax
789 ++ adcl 4(v), %edx
790 + cmpl %eax, %esi
791 + je 3f
792 + 1:
793 +- movl %eax, ($v)
794 +- movl %edx, 4($v)
795 ++ movl %eax, (v)
796 ++ movl %edx, 4(v)
797 + movl $1, %eax
798 + 2:
799 +-RETURN
800 ++ RET
801 + 3:
802 + cmpl %edx, %edi
803 + jne 1b
804 + xorl %eax, %eax
805 + jmp 2b
806 +-END_
807 ++END
808 ++#undef v
809 +
810 +-BEGIN inc_not_zero %esi
811 +- movl ($v), %eax
812 +- movl 4($v), %edx
813 ++#define v %esi
814 ++BEGIN(inc_not_zero)
815 ++ movl (v), %eax
816 ++ movl 4(v), %edx
817 + testl %eax, %eax
818 + je 3f
819 + 1:
820 + addl $1, %eax
821 + adcl $0, %edx
822 +- movl %eax, ($v)
823 +- movl %edx, 4($v)
824 ++ movl %eax, (v)
825 ++ movl %edx, 4(v)
826 + movl $1, %eax
827 + 2:
828 +-RETURN
829 ++ RET
830 + 3:
831 + testl %edx, %edx
832 + jne 1b
833 + jmp 2b
834 +-END_
835 ++END
836 ++#undef v
837 +
838 +-BEGIN dec_if_positive %esi
839 +- movl ($v), %eax
840 +- movl 4($v), %edx
841 ++#define v %esi
842 ++BEGIN(dec_if_positive)
843 ++ movl (v), %eax
844 ++ movl 4(v), %edx
845 + subl $1, %eax
846 + sbbl $0, %edx
847 + js 1f
848 +- movl %eax, ($v)
849 +- movl %edx, 4($v)
850 ++ movl %eax, (v)
851 ++ movl %edx, 4(v)
852 + 1:
853 +-END
854 ++RET_END
855 ++#undef v
856 +diff --git a/arch/x86/oprofile/nmi_int.c b/arch/x86/oprofile/nmi_int.c
857 +index b28d2f1..f6b48f6 100644
858 +--- a/arch/x86/oprofile/nmi_int.c
859 ++++ b/arch/x86/oprofile/nmi_int.c
860 +@@ -634,6 +634,18 @@ static int __init ppro_init(char **cpu_type)
861 + if (force_arch_perfmon && cpu_has_arch_perfmon)
862 + return 0;
863 +
864 ++ /*
865 ++ * Documentation on identifying Intel processors by CPU family
866 ++ * and model can be found in the Intel Software Developer's
867 ++ * Manuals (SDM):
868 ++ *
869 ++ * http://www.intel.com/products/processor/manuals/
870 ++ *
871 ++ * As of May 2010 the documentation for this was in the:
872 ++ * "Intel 64 and IA-32 Architectures Software Developer's
873 ++ * Manual Volume 3B: System Programming Guide", "Table B-1
874 ++ * CPUID Signature Values of DisplayFamily_DisplayModel".
875 ++ */
876 + switch (cpu_model) {
877 + case 0 ... 2:
878 + *cpu_type = "i386/ppro";
879 +@@ -655,12 +667,13 @@ static int __init ppro_init(char **cpu_type)
880 + case 15: case 23:
881 + *cpu_type = "i386/core_2";
882 + break;
883 ++ case 0x1a:
884 ++ case 0x1e:
885 + case 0x2e:
886 +- case 26:
887 + spec = &op_arch_perfmon_spec;
888 + *cpu_type = "i386/core_i7";
889 + break;
890 +- case 28:
891 ++ case 0x1c:
892 + *cpu_type = "i386/atom";
893 + break;
894 + default:
895 +diff --git a/drivers/acpi/apei/erst.c b/drivers/acpi/apei/erst.c
896 +index 864dd46..18645f4 100644
897 +--- a/drivers/acpi/apei/erst.c
898 ++++ b/drivers/acpi/apei/erst.c
899 +@@ -33,6 +33,7 @@
900 + #include <linux/uaccess.h>
901 + #include <linux/cper.h>
902 + #include <linux/nmi.h>
903 ++#include <linux/hardirq.h>
904 + #include <acpi/apei.h>
905 +
906 + #include "apei-internal.h"
907 +diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c
908 +index a754715..d84af6c 100644
909 +--- a/drivers/char/agp/intel-gtt.c
910 ++++ b/drivers/char/agp/intel-gtt.c
911 +@@ -25,6 +25,10 @@
912 + #define USE_PCI_DMA_API 1
913 + #endif
914 +
915 ++/* Max amount of stolen space, anything above will be returned to Linux */
916 ++int intel_max_stolen = 32 * 1024 * 1024;
917 ++EXPORT_SYMBOL(intel_max_stolen);
918 ++
919 + static const struct aper_size_info_fixed intel_i810_sizes[] =
920 + {
921 + {64, 16384, 4},
922 +@@ -710,7 +714,12 @@ static void intel_i830_init_gtt_entries(void)
923 + break;
924 + }
925 + }
926 +- if (gtt_entries > 0) {
927 ++ if (!local && gtt_entries > intel_max_stolen) {
928 ++ dev_info(&agp_bridge->dev->dev,
929 ++ "detected %dK stolen memory, trimming to %dK\n",
930 ++ gtt_entries / KB(1), intel_max_stolen / KB(1));
931 ++ gtt_entries = intel_max_stolen / KB(4);
932 ++ } else if (gtt_entries > 0) {
933 + dev_info(&agp_bridge->dev->dev, "detected %dK %s memory\n",
934 + gtt_entries / KB(1), local ? "local" : "stolen");
935 + gtt_entries /= KB(4);
936 +diff --git a/drivers/char/mem.c b/drivers/char/mem.c
937 +index f54dab8..a398ecd 100644
938 +--- a/drivers/char/mem.c
939 ++++ b/drivers/char/mem.c
940 +@@ -916,7 +916,7 @@ static int __init chr_dev_init(void)
941 + NULL, devlist[minor].name);
942 + }
943 +
944 +- return 0;
945 ++ return tty_init();
946 + }
947 +
948 + fs_initcall(chr_dev_init);
949 +diff --git a/drivers/char/tty_io.c b/drivers/char/tty_io.c
950 +index d71f0fc..507441a 100644
951 +--- a/drivers/char/tty_io.c
952 ++++ b/drivers/char/tty_io.c
953 +@@ -3128,7 +3128,7 @@ static struct cdev tty_cdev, console_cdev;
954 + * Ok, now we can initialize the rest of the tty devices and can count
955 + * on memory allocations, interrupts etc..
956 + */
957 +-static int __init tty_init(void)
958 ++int __init tty_init(void)
959 + {
960 + cdev_init(&tty_cdev, &tty_fops);
961 + if (cdev_add(&tty_cdev, MKDEV(TTYAUX_MAJOR, 0), 1) ||
962 +@@ -3149,4 +3149,4 @@ static int __init tty_init(void)
963 + #endif
964 + return 0;
965 + }
966 +-module_init(tty_init);
967 ++
968 +diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
969 +index 4a66201..c9736ed 100644
970 +--- a/drivers/gpu/drm/drm_drv.c
971 ++++ b/drivers/gpu/drm/drm_drv.c
972 +@@ -502,7 +502,9 @@ long drm_ioctl(struct file *filp,
973 + retcode = -EFAULT;
974 + goto err_i1;
975 + }
976 +- }
977 ++ } else
978 ++ memset(kdata, 0, _IOC_SIZE(cmd));
979 ++
980 + if (ioctl->flags & DRM_UNLOCKED)
981 + retcode = func(dev, kdata, file_priv);
982 + else {
983 +diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
984 +index 2305a12..013a0ae 100644
985 +--- a/drivers/gpu/drm/i915/i915_dma.c
986 ++++ b/drivers/gpu/drm/i915/i915_dma.c
987 +@@ -40,6 +40,8 @@
988 + #include <linux/vga_switcheroo.h>
989 + #include <linux/slab.h>
990 +
991 ++extern int intel_max_stolen; /* from AGP driver */
992 ++
993 + /**
994 + * Sets up the hardware status page for devices that need a physical address
995 + * in the register.
996 +@@ -2104,6 +2106,12 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
997 + if (ret)
998 + goto out_iomapfree;
999 +
1000 ++ if (prealloc_size > intel_max_stolen) {
1001 ++ DRM_INFO("detected %dM stolen memory, trimming to %dM\n",
1002 ++ prealloc_size >> 20, intel_max_stolen >> 20);
1003 ++ prealloc_size = intel_max_stolen;
1004 ++ }
1005 ++
1006 + dev_priv->wq = create_singlethread_workqueue("i915");
1007 + if (dev_priv->wq == NULL) {
1008 + DRM_ERROR("Failed to create our workqueue.\n");
1009 +diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
1010 +index 8a84306..e9a4b12 100644
1011 +--- a/drivers/gpu/drm/i915/intel_display.c
1012 ++++ b/drivers/gpu/drm/i915/intel_display.c
1013 +@@ -1502,6 +1502,7 @@ static void ironlake_enable_pll_edp (struct drm_crtc *crtc)
1014 + dpa_ctl = I915_READ(DP_A);
1015 + dpa_ctl |= DP_PLL_ENABLE;
1016 + I915_WRITE(DP_A, dpa_ctl);
1017 ++ POSTING_READ(DP_A);
1018 + udelay(200);
1019 + }
1020 +
1021 +@@ -4816,14 +4817,16 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
1022 + work->pending_flip_obj = obj;
1023 +
1024 + if (intel_crtc->plane)
1025 +- flip_mask = I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
1026 ++ flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
1027 + else
1028 +- flip_mask = I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT;
1029 ++ flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
1030 +
1031 +- /* Wait for any previous flip to finish */
1032 +- if (IS_GEN3(dev))
1033 +- while (I915_READ(ISR) & flip_mask)
1034 +- ;
1035 ++ if (IS_GEN3(dev) || IS_GEN2(dev)) {
1036 ++ BEGIN_LP_RING(2);
1037 ++ OUT_RING(MI_WAIT_FOR_EVENT | flip_mask);
1038 ++ OUT_RING(0);
1039 ++ ADVANCE_LP_RING();
1040 ++ }
1041 +
1042 + /* Offset into the new buffer for cases of shared fbs between CRTCs */
1043 + offset = obj_priv->gtt_offset;
1044 +@@ -4837,12 +4840,18 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
1045 + OUT_RING(offset | obj_priv->tiling_mode);
1046 + pipesrc = I915_READ(pipesrc_reg);
1047 + OUT_RING(pipesrc & 0x0fff0fff);
1048 +- } else {
1049 ++ } else if (IS_GEN3(dev)) {
1050 + OUT_RING(MI_DISPLAY_FLIP_I915 |
1051 + MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
1052 + OUT_RING(fb->pitch);
1053 + OUT_RING(offset);
1054 + OUT_RING(MI_NOOP);
1055 ++ } else {
1056 ++ OUT_RING(MI_DISPLAY_FLIP |
1057 ++ MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
1058 ++ OUT_RING(fb->pitch);
1059 ++ OUT_RING(offset);
1060 ++ OUT_RING(MI_NOOP);
1061 + }
1062 + ADVANCE_LP_RING();
1063 +
1064 +diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
1065 +index 10673ae..6bfef51 100644
1066 +--- a/drivers/gpu/drm/radeon/radeon_atombios.c
1067 ++++ b/drivers/gpu/drm/radeon/radeon_atombios.c
1068 +@@ -206,6 +206,7 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev,
1069 + uint16_t *line_mux,
1070 + struct radeon_hpd *hpd)
1071 + {
1072 ++ struct radeon_device *rdev = dev->dev_private;
1073 +
1074 + /* Asus M2A-VM HDMI board lists the DVI port as HDMI */
1075 + if ((dev->pdev->device == 0x791e) &&
1076 +@@ -308,13 +309,22 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev,
1077 + }
1078 + }
1079 +
1080 +- /* Acer laptop reports DVI-D as DVI-I */
1081 ++ /* Acer laptop reports DVI-D as DVI-I and hpd pins reversed */
1082 + if ((dev->pdev->device == 0x95c4) &&
1083 + (dev->pdev->subsystem_vendor == 0x1025) &&
1084 + (dev->pdev->subsystem_device == 0x013c)) {
1085 ++ struct radeon_gpio_rec gpio;
1086 ++
1087 + if ((*connector_type == DRM_MODE_CONNECTOR_DVII) &&
1088 +- (supported_device == ATOM_DEVICE_DFP1_SUPPORT))
1089 ++ (supported_device == ATOM_DEVICE_DFP1_SUPPORT)) {
1090 ++ gpio = radeon_lookup_gpio(rdev, 6);
1091 ++ *hpd = radeon_atom_get_hpd_info_from_gpio(rdev, &gpio);
1092 + *connector_type = DRM_MODE_CONNECTOR_DVID;
1093 ++ } else if ((*connector_type == DRM_MODE_CONNECTOR_HDMIA) &&
1094 ++ (supported_device == ATOM_DEVICE_DFP1_SUPPORT)) {
1095 ++ gpio = radeon_lookup_gpio(rdev, 7);
1096 ++ *hpd = radeon_atom_get_hpd_info_from_gpio(rdev, &gpio);
1097 ++ }
1098 + }
1099 +
1100 + /* XFX Pine Group device rv730 reports no VGA DDC lines
1101 +@@ -1049,7 +1059,7 @@ bool radeon_atombios_sideport_present(struct radeon_device *rdev)
1102 + }
1103 + break;
1104 + case 2:
1105 +- if (igp_info->info_2.ucMemoryType & 0x0f)
1106 ++ if (igp_info->info_2.ulBootUpSidePortClock)
1107 + return true;
1108 + break;
1109 + default:
1110 +diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
1111 +index dd279da..a718463 100644
1112 +--- a/drivers/gpu/drm/radeon/radeon_device.c
1113 ++++ b/drivers/gpu/drm/radeon/radeon_device.c
1114 +@@ -199,7 +199,7 @@ void radeon_vram_location(struct radeon_device *rdev, struct radeon_mc *mc, u64
1115 + mc->mc_vram_size = mc->aper_size;
1116 + }
1117 + mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
1118 +- if (rdev->flags & RADEON_IS_AGP && mc->vram_end > mc->gtt_start && mc->vram_end <= mc->gtt_end) {
1119 ++ if (rdev->flags & RADEON_IS_AGP && mc->vram_end > mc->gtt_start && mc->vram_start <= mc->gtt_end) {
1120 + dev_warn(rdev->dev, "limiting VRAM to PCI aperture size\n");
1121 + mc->real_vram_size = mc->aper_size;
1122 + mc->mc_vram_size = mc->aper_size;
1123 +diff --git a/drivers/gpu/drm/radeon/radeon_i2c.c b/drivers/gpu/drm/radeon/radeon_i2c.c
1124 +index 5def6f5..0cd2704 100644
1125 +--- a/drivers/gpu/drm/radeon/radeon_i2c.c
1126 ++++ b/drivers/gpu/drm/radeon/radeon_i2c.c
1127 +@@ -95,6 +95,13 @@ static void radeon_i2c_do_lock(struct radeon_i2c_chan *i2c, int lock_state)
1128 + }
1129 + }
1130 +
1131 ++ /* switch the pads to ddc mode */
1132 ++ if (ASIC_IS_DCE3(rdev) && rec->hw_capable) {
1133 ++ temp = RREG32(rec->mask_clk_reg);
1134 ++ temp &= ~(1 << 16);
1135 ++ WREG32(rec->mask_clk_reg, temp);
1136 ++ }
1137 ++
1138 + /* clear the output pin values */
1139 + temp = RREG32(rec->a_clk_reg) & ~rec->a_clk_mask;
1140 + WREG32(rec->a_clk_reg, temp);
1141 +diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c
1142 +index 059bfa4..a108c7e 100644
1143 +--- a/drivers/gpu/drm/radeon/radeon_irq_kms.c
1144 ++++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c
1145 +@@ -121,11 +121,12 @@ int radeon_irq_kms_init(struct radeon_device *rdev)
1146 + * chips. Disable MSI on them for now.
1147 + */
1148 + if ((rdev->family >= CHIP_RV380) &&
1149 +- (!(rdev->flags & RADEON_IS_IGP))) {
1150 ++ (!(rdev->flags & RADEON_IS_IGP)) &&
1151 ++ (!(rdev->flags & RADEON_IS_AGP))) {
1152 + int ret = pci_enable_msi(rdev->pdev);
1153 + if (!ret) {
1154 + rdev->msi_enabled = 1;
1155 +- DRM_INFO("radeon: using MSI.\n");
1156 ++ dev_info(rdev->dev, "radeon: using MSI.\n");
1157 + }
1158 + }
1159 + rdev->irq.installed = true;
1160 +diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
1161 +index ab389f8..b20379e 100644
1162 +--- a/drivers/gpu/drm/radeon/radeon_kms.c
1163 ++++ b/drivers/gpu/drm/radeon/radeon_kms.c
1164 +@@ -106,7 +106,9 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
1165 +
1166 + info = data;
1167 + value_ptr = (uint32_t *)((unsigned long)info->value);
1168 +- value = *value_ptr;
1169 ++ if (DRM_COPY_FROM_USER(&value, value_ptr, sizeof(value)))
1170 ++ return -EFAULT;
1171 ++
1172 + switch (info->request) {
1173 + case RADEON_INFO_DEVICE_ID:
1174 + value = dev->pci_device;
1175 +diff --git a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
1176 +index e1e5255..cf3a51f 100644
1177 +--- a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
1178 ++++ b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
1179 +@@ -272,7 +272,7 @@ static uint8_t radeon_compute_pll_gain(uint16_t ref_freq, uint16_t ref_div,
1180 + if (!ref_div)
1181 + return 1;
1182 +
1183 +- vcoFreq = ((unsigned)ref_freq & fb_div) / ref_div;
1184 ++ vcoFreq = ((unsigned)ref_freq * fb_div) / ref_div;
1185 +
1186 + /*
1187 + * This is horribly crude: the VCO frequency range is divided into
1188 +diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
1189 +index 3fa6984..c91b741 100644
1190 +--- a/drivers/gpu/drm/radeon/radeon_pm.c
1191 ++++ b/drivers/gpu/drm/radeon/radeon_pm.c
1192 +@@ -224,6 +224,11 @@ static void radeon_pm_set_clocks(struct radeon_device *rdev)
1193 + {
1194 + int i;
1195 +
1196 ++ /* no need to take locks, etc. if nothing's going to change */
1197 ++ if ((rdev->pm.requested_clock_mode_index == rdev->pm.current_clock_mode_index) &&
1198 ++ (rdev->pm.requested_power_state_index == rdev->pm.current_power_state_index))
1199 ++ return;
1200 ++
1201 + mutex_lock(&rdev->ddev->struct_mutex);
1202 + mutex_lock(&rdev->vram_mutex);
1203 + mutex_lock(&rdev->cp.mutex);
1204 +diff --git a/drivers/hwmon/pc87360.c b/drivers/hwmon/pc87360.c
1205 +index 4a64b85..68e69a4 100644
1206 +--- a/drivers/hwmon/pc87360.c
1207 ++++ b/drivers/hwmon/pc87360.c
1208 +@@ -1610,11 +1610,8 @@ static struct pc87360_data *pc87360_update_device(struct device *dev)
1209 +
1210 + static int __init pc87360_device_add(unsigned short address)
1211 + {
1212 +- struct resource res = {
1213 +- .name = "pc87360",
1214 +- .flags = IORESOURCE_IO,
1215 +- };
1216 +- int err, i;
1217 ++ struct resource res[3];
1218 ++ int err, i, res_count;
1219 +
1220 + pdev = platform_device_alloc("pc87360", address);
1221 + if (!pdev) {
1222 +@@ -1623,22 +1620,28 @@ static int __init pc87360_device_add(unsigned short address)
1223 + goto exit;
1224 + }
1225 +
1226 ++ memset(res, 0, 3 * sizeof(struct resource));
1227 ++ res_count = 0;
1228 + for (i = 0; i < 3; i++) {
1229 + if (!extra_isa[i])
1230 + continue;
1231 +- res.start = extra_isa[i];
1232 +- res.end = extra_isa[i] + PC87360_EXTENT - 1;
1233 ++ res[res_count].start = extra_isa[i];
1234 ++ res[res_count].end = extra_isa[i] + PC87360_EXTENT - 1;
1235 ++ res[res_count].name = "pc87360",
1236 ++ res[res_count].flags = IORESOURCE_IO,
1237 +
1238 +- err = acpi_check_resource_conflict(&res);
1239 ++ err = acpi_check_resource_conflict(&res[res_count]);
1240 + if (err)
1241 + goto exit_device_put;
1242 +
1243 +- err = platform_device_add_resources(pdev, &res, 1);
1244 +- if (err) {
1245 +- printk(KERN_ERR "pc87360: Device resource[%d] "
1246 +- "addition failed (%d)\n", i, err);
1247 +- goto exit_device_put;
1248 +- }
1249 ++ res_count++;
1250 ++ }
1251 ++
1252 ++ err = platform_device_add_resources(pdev, res, res_count);
1253 ++ if (err) {
1254 ++ printk(KERN_ERR "pc87360: Device resources addition failed "
1255 ++ "(%d)\n", err);
1256 ++ goto exit_device_put;
1257 + }
1258 +
1259 + err = platform_device_add(pdev);
1260 +diff --git a/drivers/isdn/gigaset/capi.c b/drivers/isdn/gigaset/capi.c
1261 +index 6fbe899..05b15ed 100644
1262 +--- a/drivers/isdn/gigaset/capi.c
1263 ++++ b/drivers/isdn/gigaset/capi.c
1264 +@@ -378,13 +378,13 @@ void gigaset_skb_sent(struct bc_state *bcs, struct sk_buff *dskb)
1265 + ++bcs->trans_up;
1266 +
1267 + if (!ap) {
1268 +- dev_err(cs->dev, "%s: no application\n", __func__);
1269 ++ gig_dbg(DEBUG_MCMD, "%s: application gone", __func__);
1270 + return;
1271 + }
1272 +
1273 + /* don't send further B3 messages if disconnected */
1274 + if (bcs->apconnstate < APCONN_ACTIVE) {
1275 +- gig_dbg(DEBUG_LLDATA, "disconnected, discarding ack");
1276 ++ gig_dbg(DEBUG_MCMD, "%s: disconnected", __func__);
1277 + return;
1278 + }
1279 +
1280 +@@ -422,13 +422,14 @@ void gigaset_skb_rcvd(struct bc_state *bcs, struct sk_buff *skb)
1281 + bcs->trans_down++;
1282 +
1283 + if (!ap) {
1284 +- dev_err(cs->dev, "%s: no application\n", __func__);
1285 ++ gig_dbg(DEBUG_MCMD, "%s: application gone", __func__);
1286 ++ dev_kfree_skb_any(skb);
1287 + return;
1288 + }
1289 +
1290 + /* don't send further B3 messages if disconnected */
1291 + if (bcs->apconnstate < APCONN_ACTIVE) {
1292 +- gig_dbg(DEBUG_LLDATA, "disconnected, discarding data");
1293 ++ gig_dbg(DEBUG_MCMD, "%s: disconnected", __func__);
1294 + dev_kfree_skb_any(skb);
1295 + return;
1296 + }
1297 +@@ -747,7 +748,7 @@ void gigaset_isdn_connD(struct bc_state *bcs)
1298 + ap = bcs->ap;
1299 + if (!ap) {
1300 + spin_unlock_irqrestore(&bcs->aplock, flags);
1301 +- dev_err(cs->dev, "%s: no application\n", __func__);
1302 ++ gig_dbg(DEBUG_CMD, "%s: application gone", __func__);
1303 + return;
1304 + }
1305 + if (bcs->apconnstate == APCONN_NONE) {
1306 +@@ -843,7 +844,7 @@ void gigaset_isdn_connB(struct bc_state *bcs)
1307 + ap = bcs->ap;
1308 + if (!ap) {
1309 + spin_unlock_irqrestore(&bcs->aplock, flags);
1310 +- dev_err(cs->dev, "%s: no application\n", __func__);
1311 ++ gig_dbg(DEBUG_CMD, "%s: application gone", __func__);
1312 + return;
1313 + }
1314 + if (!bcs->apconnstate) {
1315 +@@ -901,13 +902,12 @@ void gigaset_isdn_connB(struct bc_state *bcs)
1316 + */
1317 + void gigaset_isdn_hupB(struct bc_state *bcs)
1318 + {
1319 +- struct cardstate *cs = bcs->cs;
1320 + struct gigaset_capi_appl *ap = bcs->ap;
1321 +
1322 + /* ToDo: assure order of DISCONNECT_B3_IND and DISCONNECT_IND ? */
1323 +
1324 + if (!ap) {
1325 +- dev_err(cs->dev, "%s: no application\n", __func__);
1326 ++ gig_dbg(DEBUG_CMD, "%s: application gone", __func__);
1327 + return;
1328 + }
1329 +
1330 +@@ -1044,6 +1044,7 @@ static inline void remove_appl_from_channel(struct bc_state *bcs,
1331 + do {
1332 + if (bcap->bcnext == ap) {
1333 + bcap->bcnext = bcap->bcnext->bcnext;
1334 ++ spin_unlock_irqrestore(&bcs->aplock, flags);
1335 + return;
1336 + }
1337 + bcap = bcap->bcnext;
1338 +diff --git a/drivers/isdn/sc/ioctl.c b/drivers/isdn/sc/ioctl.c
1339 +index 1081091..2655e3a 100644
1340 +--- a/drivers/isdn/sc/ioctl.c
1341 ++++ b/drivers/isdn/sc/ioctl.c
1342 +@@ -174,7 +174,7 @@ int sc_ioctl(int card, scs_ioctl *data)
1343 + pr_debug("%s: SCIOGETSPID: ioctl received\n",
1344 + sc_adapter[card]->devicename);
1345 +
1346 +- spid = kmalloc(SCIOC_SPIDSIZE, GFP_KERNEL);
1347 ++ spid = kzalloc(SCIOC_SPIDSIZE, GFP_KERNEL);
1348 + if (!spid) {
1349 + kfree(rcvmsg);
1350 + return -ENOMEM;
1351 +@@ -194,7 +194,7 @@ int sc_ioctl(int card, scs_ioctl *data)
1352 + kfree(rcvmsg);
1353 + return status;
1354 + }
1355 +- strcpy(spid, rcvmsg->msg_data.byte_array);
1356 ++ strlcpy(spid, rcvmsg->msg_data.byte_array, SCIOC_SPIDSIZE);
1357 +
1358 + /*
1359 + * Package the switch type and send to user space
1360 +@@ -272,12 +272,12 @@ int sc_ioctl(int card, scs_ioctl *data)
1361 + return status;
1362 + }
1363 +
1364 +- dn = kmalloc(SCIOC_DNSIZE, GFP_KERNEL);
1365 ++ dn = kzalloc(SCIOC_DNSIZE, GFP_KERNEL);
1366 + if (!dn) {
1367 + kfree(rcvmsg);
1368 + return -ENOMEM;
1369 + }
1370 +- strcpy(dn, rcvmsg->msg_data.byte_array);
1371 ++ strlcpy(dn, rcvmsg->msg_data.byte_array, SCIOC_DNSIZE);
1372 + kfree(rcvmsg);
1373 +
1374 + /*
1375 +@@ -348,7 +348,7 @@ int sc_ioctl(int card, scs_ioctl *data)
1376 + pr_debug("%s: SCIOSTAT: ioctl received\n",
1377 + sc_adapter[card]->devicename);
1378 +
1379 +- bi = kmalloc (sizeof(boardInfo), GFP_KERNEL);
1380 ++ bi = kzalloc(sizeof(boardInfo), GFP_KERNEL);
1381 + if (!bi) {
1382 + kfree(rcvmsg);
1383 + return -ENOMEM;
1384 +diff --git a/drivers/md/dm-exception-store.c b/drivers/md/dm-exception-store.c
1385 +index 2b7907b..0bdb201 100644
1386 +--- a/drivers/md/dm-exception-store.c
1387 ++++ b/drivers/md/dm-exception-store.c
1388 +@@ -173,7 +173,9 @@ int dm_exception_store_set_chunk_size(struct dm_exception_store *store,
1389 +
1390 + /* Validate the chunk size against the device block size */
1391 + if (chunk_size %
1392 +- (bdev_logical_block_size(dm_snap_cow(store->snap)->bdev) >> 9)) {
1393 ++ (bdev_logical_block_size(dm_snap_cow(store->snap)->bdev) >> 9) ||
1394 ++ chunk_size %
1395 ++ (bdev_logical_block_size(dm_snap_origin(store->snap)->bdev) >> 9)) {
1396 + *error = "Chunk size is not a multiple of device blocksize";
1397 + return -EINVAL;
1398 + }
1399 +diff --git a/drivers/md/dm-exception-store.h b/drivers/md/dm-exception-store.h
1400 +index e8dfa06..0b25362 100644
1401 +--- a/drivers/md/dm-exception-store.h
1402 ++++ b/drivers/md/dm-exception-store.h
1403 +@@ -126,8 +126,9 @@ struct dm_exception_store {
1404 + };
1405 +
1406 + /*
1407 +- * Obtain the cow device used by a given snapshot.
1408 ++ * Obtain the origin or cow device used by a given snapshot.
1409 + */
1410 ++struct dm_dev *dm_snap_origin(struct dm_snapshot *snap);
1411 + struct dm_dev *dm_snap_cow(struct dm_snapshot *snap);
1412 +
1413 + /*
1414 +diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
1415 +index d7500e1..bb6bdc8 100644
1416 +--- a/drivers/md/dm-ioctl.c
1417 ++++ b/drivers/md/dm-ioctl.c
1418 +@@ -249,40 +249,50 @@ static void __hash_remove(struct hash_cell *hc)
1419 +
1420 + static void dm_hash_remove_all(int keep_open_devices)
1421 + {
1422 +- int i, dev_skipped, dev_removed;
1423 ++ int i, dev_skipped;
1424 + struct hash_cell *hc;
1425 +- struct list_head *tmp, *n;
1426 ++ struct mapped_device *md;
1427 ++
1428 ++retry:
1429 ++ dev_skipped = 0;
1430 +
1431 + down_write(&_hash_lock);
1432 +
1433 +-retry:
1434 +- dev_skipped = dev_removed = 0;
1435 + for (i = 0; i < NUM_BUCKETS; i++) {
1436 +- list_for_each_safe (tmp, n, _name_buckets + i) {
1437 +- hc = list_entry(tmp, struct hash_cell, name_list);
1438 ++ list_for_each_entry(hc, _name_buckets + i, name_list) {
1439 ++ md = hc->md;
1440 ++ dm_get(md);
1441 +
1442 +- if (keep_open_devices &&
1443 +- dm_lock_for_deletion(hc->md)) {
1444 ++ if (keep_open_devices && dm_lock_for_deletion(md)) {
1445 ++ dm_put(md);
1446 + dev_skipped++;
1447 + continue;
1448 + }
1449 ++
1450 + __hash_remove(hc);
1451 +- dev_removed = 1;
1452 +- }
1453 +- }
1454 +
1455 +- /*
1456 +- * Some mapped devices may be using other mapped devices, so if any
1457 +- * still exist, repeat until we make no further progress.
1458 +- */
1459 +- if (dev_skipped) {
1460 +- if (dev_removed)
1461 +- goto retry;
1462 ++ up_write(&_hash_lock);
1463 +
1464 +- DMWARN("remove_all left %d open device(s)", dev_skipped);
1465 ++ dm_put(md);
1466 ++ if (likely(keep_open_devices))
1467 ++ dm_destroy(md);
1468 ++ else
1469 ++ dm_destroy_immediate(md);
1470 ++
1471 ++ /*
1472 ++ * Some mapped devices may be using other mapped
1473 ++ * devices, so repeat until we make no further
1474 ++ * progress. If a new mapped device is created
1475 ++ * here it will also get removed.
1476 ++ */
1477 ++ goto retry;
1478 ++ }
1479 + }
1480 +
1481 + up_write(&_hash_lock);
1482 ++
1483 ++ if (dev_skipped)
1484 ++ DMWARN("remove_all left %d open device(s)", dev_skipped);
1485 + }
1486 +
1487 + static int dm_hash_rename(uint32_t cookie, uint32_t *flags, const char *old,
1488 +@@ -640,6 +650,7 @@ static int dev_create(struct dm_ioctl *param, size_t param_size)
1489 + r = dm_hash_insert(param->name, *param->uuid ? param->uuid : NULL, md);
1490 + if (r) {
1491 + dm_put(md);
1492 ++ dm_destroy(md);
1493 + return r;
1494 + }
1495 +
1496 +@@ -742,6 +753,7 @@ static int dev_remove(struct dm_ioctl *param, size_t param_size)
1497 + param->flags |= DM_UEVENT_GENERATED_FLAG;
1498 +
1499 + dm_put(md);
1500 ++ dm_destroy(md);
1501 + return 0;
1502 + }
1503 +
1504 +diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
1505 +index 5485377..a1f2ab5 100644
1506 +--- a/drivers/md/dm-snap.c
1507 ++++ b/drivers/md/dm-snap.c
1508 +@@ -148,6 +148,12 @@ struct dm_snapshot {
1509 + #define RUNNING_MERGE 0
1510 + #define SHUTDOWN_MERGE 1
1511 +
1512 ++struct dm_dev *dm_snap_origin(struct dm_snapshot *s)
1513 ++{
1514 ++ return s->origin;
1515 ++}
1516 ++EXPORT_SYMBOL(dm_snap_origin);
1517 ++
1518 + struct dm_dev *dm_snap_cow(struct dm_snapshot *s)
1519 + {
1520 + return s->cow;
1521 +@@ -1065,10 +1071,6 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
1522 + origin_mode = FMODE_WRITE;
1523 + }
1524 +
1525 +- origin_path = argv[0];
1526 +- argv++;
1527 +- argc--;
1528 +-
1529 + s = kmalloc(sizeof(*s), GFP_KERNEL);
1530 + if (!s) {
1531 + ti->error = "Cannot allocate snapshot context private "
1532 +@@ -1077,6 +1079,16 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
1533 + goto bad;
1534 + }
1535 +
1536 ++ origin_path = argv[0];
1537 ++ argv++;
1538 ++ argc--;
1539 ++
1540 ++ r = dm_get_device(ti, origin_path, origin_mode, &s->origin);
1541 ++ if (r) {
1542 ++ ti->error = "Cannot get origin device";
1543 ++ goto bad_origin;
1544 ++ }
1545 ++
1546 + cow_path = argv[0];
1547 + argv++;
1548 + argc--;
1549 +@@ -1097,12 +1109,6 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
1550 + argv += args_used;
1551 + argc -= args_used;
1552 +
1553 +- r = dm_get_device(ti, origin_path, origin_mode, &s->origin);
1554 +- if (r) {
1555 +- ti->error = "Cannot get origin device";
1556 +- goto bad_origin;
1557 +- }
1558 +-
1559 + s->ti = ti;
1560 + s->valid = 1;
1561 + s->active = 0;
1562 +@@ -1212,15 +1218,15 @@ bad_kcopyd:
1563 + dm_exception_table_exit(&s->complete, exception_cache);
1564 +
1565 + bad_hash_tables:
1566 +- dm_put_device(ti, s->origin);
1567 +-
1568 +-bad_origin:
1569 + dm_exception_store_destroy(s->store);
1570 +
1571 + bad_store:
1572 + dm_put_device(ti, s->cow);
1573 +
1574 + bad_cow:
1575 ++ dm_put_device(ti, s->origin);
1576 ++
1577 ++bad_origin:
1578 + kfree(s);
1579 +
1580 + bad:
1581 +@@ -1314,12 +1320,12 @@ static void snapshot_dtr(struct dm_target *ti)
1582 +
1583 + mempool_destroy(s->pending_pool);
1584 +
1585 +- dm_put_device(ti, s->origin);
1586 +-
1587 + dm_exception_store_destroy(s->store);
1588 +
1589 + dm_put_device(ti, s->cow);
1590 +
1591 ++ dm_put_device(ti, s->origin);
1592 ++
1593 + kfree(s);
1594 + }
1595 +
1596 +@@ -1899,8 +1905,14 @@ static int snapshot_iterate_devices(struct dm_target *ti,
1597 + iterate_devices_callout_fn fn, void *data)
1598 + {
1599 + struct dm_snapshot *snap = ti->private;
1600 ++ int r;
1601 ++
1602 ++ r = fn(ti, snap->origin, 0, ti->len, data);
1603 +
1604 +- return fn(ti, snap->origin, 0, ti->len, data);
1605 ++ if (!r)
1606 ++ r = fn(ti, snap->cow, 0, get_dev_size(snap->cow->bdev), data);
1607 ++
1608 ++ return r;
1609 + }
1610 +
1611 +
1612 +diff --git a/drivers/md/dm.c b/drivers/md/dm.c
1613 +index d21e128..e3a512d 100644
1614 +--- a/drivers/md/dm.c
1615 ++++ b/drivers/md/dm.c
1616 +@@ -19,6 +19,7 @@
1617 + #include <linux/slab.h>
1618 + #include <linux/idr.h>
1619 + #include <linux/hdreg.h>
1620 ++#include <linux/delay.h>
1621 +
1622 + #include <trace/events/block.h>
1623 +
1624 +@@ -2141,6 +2142,7 @@ static struct mapped_device *dm_find_md(dev_t dev)
1625 + md = idr_find(&_minor_idr, minor);
1626 + if (md && (md == MINOR_ALLOCED ||
1627 + (MINOR(disk_devt(dm_disk(md))) != minor) ||
1628 ++ dm_deleting_md(md) ||
1629 + test_bit(DMF_FREEING, &md->flags))) {
1630 + md = NULL;
1631 + goto out;
1632 +@@ -2175,6 +2177,7 @@ void dm_set_mdptr(struct mapped_device *md, void *ptr)
1633 + void dm_get(struct mapped_device *md)
1634 + {
1635 + atomic_inc(&md->holders);
1636 ++ BUG_ON(test_bit(DMF_FREEING, &md->flags));
1637 + }
1638 +
1639 + const char *dm_device_name(struct mapped_device *md)
1640 +@@ -2183,27 +2186,55 @@ const char *dm_device_name(struct mapped_device *md)
1641 + }
1642 + EXPORT_SYMBOL_GPL(dm_device_name);
1643 +
1644 +-void dm_put(struct mapped_device *md)
1645 ++static void __dm_destroy(struct mapped_device *md, bool wait)
1646 + {
1647 + struct dm_table *map;
1648 +
1649 +- BUG_ON(test_bit(DMF_FREEING, &md->flags));
1650 ++ might_sleep();
1651 +
1652 +- if (atomic_dec_and_lock(&md->holders, &_minor_lock)) {
1653 +- map = dm_get_live_table(md);
1654 +- idr_replace(&_minor_idr, MINOR_ALLOCED,
1655 +- MINOR(disk_devt(dm_disk(md))));
1656 +- set_bit(DMF_FREEING, &md->flags);
1657 +- spin_unlock(&_minor_lock);
1658 +- if (!dm_suspended_md(md)) {
1659 +- dm_table_presuspend_targets(map);
1660 +- dm_table_postsuspend_targets(map);
1661 +- }
1662 +- dm_sysfs_exit(md);
1663 +- dm_table_put(map);
1664 +- dm_table_destroy(__unbind(md));
1665 +- free_dev(md);
1666 ++ spin_lock(&_minor_lock);
1667 ++ map = dm_get_live_table(md);
1668 ++ idr_replace(&_minor_idr, MINOR_ALLOCED, MINOR(disk_devt(dm_disk(md))));
1669 ++ set_bit(DMF_FREEING, &md->flags);
1670 ++ spin_unlock(&_minor_lock);
1671 ++
1672 ++ if (!dm_suspended_md(md)) {
1673 ++ dm_table_presuspend_targets(map);
1674 ++ dm_table_postsuspend_targets(map);
1675 + }
1676 ++
1677 ++ /*
1678 ++ * Rare, but there may be I/O requests still going to complete,
1679 ++ * for example. Wait for all references to disappear.
1680 ++ * No one should increment the reference count of the mapped_device,
1681 ++ * after the mapped_device state becomes DMF_FREEING.
1682 ++ */
1683 ++ if (wait)
1684 ++ while (atomic_read(&md->holders))
1685 ++ msleep(1);
1686 ++ else if (atomic_read(&md->holders))
1687 ++ DMWARN("%s: Forcibly removing mapped_device still in use! (%d users)",
1688 ++ dm_device_name(md), atomic_read(&md->holders));
1689 ++
1690 ++ dm_sysfs_exit(md);
1691 ++ dm_table_put(map);
1692 ++ dm_table_destroy(__unbind(md));
1693 ++ free_dev(md);
1694 ++}
1695 ++
1696 ++void dm_destroy(struct mapped_device *md)
1697 ++{
1698 ++ __dm_destroy(md, true);
1699 ++}
1700 ++
1701 ++void dm_destroy_immediate(struct mapped_device *md)
1702 ++{
1703 ++ __dm_destroy(md, false);
1704 ++}
1705 ++
1706 ++void dm_put(struct mapped_device *md)
1707 ++{
1708 ++ atomic_dec(&md->holders);
1709 + }
1710 + EXPORT_SYMBOL_GPL(dm_put);
1711 +
1712 +diff --git a/drivers/md/dm.h b/drivers/md/dm.h
1713 +index bad1724..8223671 100644
1714 +--- a/drivers/md/dm.h
1715 ++++ b/drivers/md/dm.h
1716 +@@ -122,6 +122,11 @@ void dm_linear_exit(void);
1717 + int dm_stripe_init(void);
1718 + void dm_stripe_exit(void);
1719 +
1720 ++/*
1721 ++ * mapped_device operations
1722 ++ */
1723 ++void dm_destroy(struct mapped_device *md);
1724 ++void dm_destroy_immediate(struct mapped_device *md);
1725 + int dm_open_count(struct mapped_device *md);
1726 + int dm_lock_for_deletion(struct mapped_device *md);
1727 +
1728 +diff --git a/drivers/memstick/core/mspro_block.c b/drivers/memstick/core/mspro_block.c
1729 +index 8327e24..300ec15 100644
1730 +--- a/drivers/memstick/core/mspro_block.c
1731 ++++ b/drivers/memstick/core/mspro_block.c
1732 +@@ -1040,6 +1040,7 @@ static int mspro_block_read_attributes(struct memstick_dev *card)
1733 + snprintf(s_attr->name, sizeof(s_attr->name),
1734 + "attr_x%02x", attr->entries[cnt].id);
1735 +
1736 ++ sysfs_attr_init(&s_attr->dev_attr.attr);
1737 + s_attr->dev_attr.attr.name = s_attr->name;
1738 + s_attr->dev_attr.attr.mode = S_IRUGO;
1739 + s_attr->dev_attr.show = mspro_block_attr_show(s_attr->id);
1740 +@@ -1330,13 +1331,14 @@ static void mspro_block_remove(struct memstick_dev *card)
1741 + struct mspro_block_data *msb = memstick_get_drvdata(card);
1742 + unsigned long flags;
1743 +
1744 +- del_gendisk(msb->disk);
1745 +- dev_dbg(&card->dev, "mspro block remove\n");
1746 + spin_lock_irqsave(&msb->q_lock, flags);
1747 + msb->eject = 1;
1748 + blk_start_queue(msb->queue);
1749 + spin_unlock_irqrestore(&msb->q_lock, flags);
1750 +
1751 ++ del_gendisk(msb->disk);
1752 ++ dev_dbg(&card->dev, "mspro block remove\n");
1753 ++
1754 + blk_cleanup_queue(msb->queue);
1755 + msb->queue = NULL;
1756 +
1757 +diff --git a/drivers/mtd/chips/cfi_cmdset_0001.c b/drivers/mtd/chips/cfi_cmdset_0001.c
1758 +index 62f3ea9..3364061 100644
1759 +--- a/drivers/mtd/chips/cfi_cmdset_0001.c
1760 ++++ b/drivers/mtd/chips/cfi_cmdset_0001.c
1761 +@@ -717,7 +717,7 @@ static int cfi_intelext_partition_fixup(struct mtd_info *mtd,
1762 + chip = &newcfi->chips[0];
1763 + for (i = 0; i < cfi->numchips; i++) {
1764 + shared[i].writing = shared[i].erasing = NULL;
1765 +- spin_lock_init(&shared[i].lock);
1766 ++ mutex_init(&shared[i].lock);
1767 + for (j = 0; j < numparts; j++) {
1768 + *chip = cfi->chips[i];
1769 + chip->start += j << partshift;
1770 +@@ -886,7 +886,7 @@ static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr
1771 + */
1772 + struct flchip_shared *shared = chip->priv;
1773 + struct flchip *contender;
1774 +- spin_lock(&shared->lock);
1775 ++ mutex_lock(&shared->lock);
1776 + contender = shared->writing;
1777 + if (contender && contender != chip) {
1778 + /*
1779 +@@ -899,7 +899,7 @@ static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr
1780 + * get_chip returns success we're clear to go ahead.
1781 + */
1782 + ret = mutex_trylock(&contender->mutex);
1783 +- spin_unlock(&shared->lock);
1784 ++ mutex_unlock(&shared->lock);
1785 + if (!ret)
1786 + goto retry;
1787 + mutex_unlock(&chip->mutex);
1788 +@@ -914,7 +914,7 @@ static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr
1789 + mutex_unlock(&contender->mutex);
1790 + return ret;
1791 + }
1792 +- spin_lock(&shared->lock);
1793 ++ mutex_lock(&shared->lock);
1794 +
1795 + /* We should not own chip if it is already
1796 + * in FL_SYNCING state. Put contender and retry. */
1797 +@@ -930,7 +930,7 @@ static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr
1798 + * on this chip. Sleep. */
1799 + if (mode == FL_ERASING && shared->erasing
1800 + && shared->erasing->oldstate == FL_ERASING) {
1801 +- spin_unlock(&shared->lock);
1802 ++ mutex_unlock(&shared->lock);
1803 + set_current_state(TASK_UNINTERRUPTIBLE);
1804 + add_wait_queue(&chip->wq, &wait);
1805 + mutex_unlock(&chip->mutex);
1806 +@@ -944,7 +944,7 @@ static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr
1807 + shared->writing = chip;
1808 + if (mode == FL_ERASING)
1809 + shared->erasing = chip;
1810 +- spin_unlock(&shared->lock);
1811 ++ mutex_unlock(&shared->lock);
1812 + }
1813 + ret = chip_ready(map, chip, adr, mode);
1814 + if (ret == -EAGAIN)
1815 +@@ -959,7 +959,7 @@ static void put_chip(struct map_info *map, struct flchip *chip, unsigned long ad
1816 +
1817 + if (chip->priv) {
1818 + struct flchip_shared *shared = chip->priv;
1819 +- spin_lock(&shared->lock);
1820 ++ mutex_lock(&shared->lock);
1821 + if (shared->writing == chip && chip->oldstate == FL_READY) {
1822 + /* We own the ability to write, but we're done */
1823 + shared->writing = shared->erasing;
1824 +@@ -967,7 +967,7 @@ static void put_chip(struct map_info *map, struct flchip *chip, unsigned long ad
1825 + /* give back ownership to who we loaned it from */
1826 + struct flchip *loaner = shared->writing;
1827 + mutex_lock(&loaner->mutex);
1828 +- spin_unlock(&shared->lock);
1829 ++ mutex_unlock(&shared->lock);
1830 + mutex_unlock(&chip->mutex);
1831 + put_chip(map, loaner, loaner->start);
1832 + mutex_lock(&chip->mutex);
1833 +@@ -985,11 +985,11 @@ static void put_chip(struct map_info *map, struct flchip *chip, unsigned long ad
1834 + * Don't let the switch below mess things up since
1835 + * we don't have ownership to resume anything.
1836 + */
1837 +- spin_unlock(&shared->lock);
1838 ++ mutex_unlock(&shared->lock);
1839 + wake_up(&chip->wq);
1840 + return;
1841 + }
1842 +- spin_unlock(&shared->lock);
1843 ++ mutex_unlock(&shared->lock);
1844 + }
1845 +
1846 + switch(chip->oldstate) {
1847 +diff --git a/drivers/mtd/lpddr/lpddr_cmds.c b/drivers/mtd/lpddr/lpddr_cmds.c
1848 +index fece5be..04fdfcc 100644
1849 +--- a/drivers/mtd/lpddr/lpddr_cmds.c
1850 ++++ b/drivers/mtd/lpddr/lpddr_cmds.c
1851 +@@ -98,7 +98,7 @@ struct mtd_info *lpddr_cmdset(struct map_info *map)
1852 + numchips = lpddr->numchips / lpddr->qinfo->HWPartsNum;
1853 + for (i = 0; i < numchips; i++) {
1854 + shared[i].writing = shared[i].erasing = NULL;
1855 +- spin_lock_init(&shared[i].lock);
1856 ++ mutex_init(&shared[i].lock);
1857 + for (j = 0; j < lpddr->qinfo->HWPartsNum; j++) {
1858 + *chip = lpddr->chips[i];
1859 + chip->start += j << lpddr->chipshift;
1860 +@@ -217,7 +217,7 @@ static int get_chip(struct map_info *map, struct flchip *chip, int mode)
1861 + */
1862 + struct flchip_shared *shared = chip->priv;
1863 + struct flchip *contender;
1864 +- spin_lock(&shared->lock);
1865 ++ mutex_lock(&shared->lock);
1866 + contender = shared->writing;
1867 + if (contender && contender != chip) {
1868 + /*
1869 +@@ -230,7 +230,7 @@ static int get_chip(struct map_info *map, struct flchip *chip, int mode)
1870 + * get_chip returns success we're clear to go ahead.
1871 + */
1872 + ret = mutex_trylock(&contender->mutex);
1873 +- spin_unlock(&shared->lock);
1874 ++ mutex_unlock(&shared->lock);
1875 + if (!ret)
1876 + goto retry;
1877 + mutex_unlock(&chip->mutex);
1878 +@@ -245,7 +245,7 @@ static int get_chip(struct map_info *map, struct flchip *chip, int mode)
1879 + mutex_unlock(&contender->mutex);
1880 + return ret;
1881 + }
1882 +- spin_lock(&shared->lock);
1883 ++ mutex_lock(&shared->lock);
1884 +
1885 + /* We should not own chip if it is already in FL_SYNCING
1886 + * state. Put contender and retry. */
1887 +@@ -261,7 +261,7 @@ static int get_chip(struct map_info *map, struct flchip *chip, int mode)
1888 + Must sleep in such a case. */
1889 + if (mode == FL_ERASING && shared->erasing
1890 + && shared->erasing->oldstate == FL_ERASING) {
1891 +- spin_unlock(&shared->lock);
1892 ++ mutex_unlock(&shared->lock);
1893 + set_current_state(TASK_UNINTERRUPTIBLE);
1894 + add_wait_queue(&chip->wq, &wait);
1895 + mutex_unlock(&chip->mutex);
1896 +@@ -275,7 +275,7 @@ static int get_chip(struct map_info *map, struct flchip *chip, int mode)
1897 + shared->writing = chip;
1898 + if (mode == FL_ERASING)
1899 + shared->erasing = chip;
1900 +- spin_unlock(&shared->lock);
1901 ++ mutex_unlock(&shared->lock);
1902 + }
1903 +
1904 + ret = chip_ready(map, chip, mode);
1905 +@@ -348,7 +348,7 @@ static void put_chip(struct map_info *map, struct flchip *chip)
1906 + {
1907 + if (chip->priv) {
1908 + struct flchip_shared *shared = chip->priv;
1909 +- spin_lock(&shared->lock);
1910 ++ mutex_lock(&shared->lock);
1911 + if (shared->writing == chip && chip->oldstate == FL_READY) {
1912 + /* We own the ability to write, but we're done */
1913 + shared->writing = shared->erasing;
1914 +@@ -356,7 +356,7 @@ static void put_chip(struct map_info *map, struct flchip *chip)
1915 + /* give back the ownership */
1916 + struct flchip *loaner = shared->writing;
1917 + mutex_lock(&loaner->mutex);
1918 +- spin_unlock(&shared->lock);
1919 ++ mutex_unlock(&shared->lock);
1920 + mutex_unlock(&chip->mutex);
1921 + put_chip(map, loaner);
1922 + mutex_lock(&chip->mutex);
1923 +@@ -374,11 +374,11 @@ static void put_chip(struct map_info *map, struct flchip *chip)
1924 + * Don't let the switch below mess things up since
1925 + * we don't have ownership to resume anything.
1926 + */
1927 +- spin_unlock(&shared->lock);
1928 ++ mutex_unlock(&shared->lock);
1929 + wake_up(&chip->wq);
1930 + return;
1931 + }
1932 +- spin_unlock(&shared->lock);
1933 ++ mutex_unlock(&shared->lock);
1934 + }
1935 +
1936 + switch (chip->oldstate) {
1937 +diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
1938 +index 4a7b864..5bcc34a 100644
1939 +--- a/drivers/mtd/nand/nand_base.c
1940 ++++ b/drivers/mtd/nand/nand_base.c
1941 +@@ -2852,6 +2852,7 @@ static struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd,
1942 + */
1943 + if (id_data[0] == id_data[6] && id_data[1] == id_data[7] &&
1944 + id_data[0] == NAND_MFR_SAMSUNG &&
1945 ++ (chip->cellinfo & NAND_CI_CELLTYPE_MSK) &&
1946 + id_data[5] != 0x00) {
1947 + /* Calc pagesize */
1948 + mtd->writesize = 2048 << (extid & 0x03);
1949 +diff --git a/drivers/mtd/nand/plat_nand.c b/drivers/mtd/nand/plat_nand.c
1950 +index 90e143e..317aff4 100644
1951 +--- a/drivers/mtd/nand/plat_nand.c
1952 ++++ b/drivers/mtd/nand/plat_nand.c
1953 +@@ -37,6 +37,11 @@ static int __devinit plat_nand_probe(struct platform_device *pdev)
1954 + struct resource *res;
1955 + int err = 0;
1956 +
1957 ++ if (pdata->chip.nr_chips < 1) {
1958 ++ dev_err(&pdev->dev, "invalid number of chips specified\n");
1959 ++ return -EINVAL;
1960 ++ }
1961 ++
1962 + res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1963 + if (!res)
1964 + return -ENXIO;
1965 +diff --git a/drivers/mtd/nand/pxa3xx_nand.c b/drivers/mtd/nand/pxa3xx_nand.c
1966 +index e02fa4f..4d89f37 100644
1967 +--- a/drivers/mtd/nand/pxa3xx_nand.c
1968 ++++ b/drivers/mtd/nand/pxa3xx_nand.c
1969 +@@ -363,7 +363,7 @@ static struct pxa3xx_nand_flash *builtin_flash_types[] = {
1970 + #define tAR_NDTR1(r) (((r) >> 0) & 0xf)
1971 +
1972 + /* convert nano-seconds to nand flash controller clock cycles */
1973 +-#define ns2cycle(ns, clk) (int)(((ns) * (clk / 1000000) / 1000) - 1)
1974 ++#define ns2cycle(ns, clk) (int)((ns) * (clk / 1000000) / 1000)
1975 +
1976 + /* convert nand flash controller clock cycles to nano-seconds */
1977 + #define cycle2ns(c, clk) ((((c) + 1) * 1000000 + clk / 500) / (clk / 1000))
1978 +diff --git a/drivers/net/e1000e/82571.c b/drivers/net/e1000e/82571.c
1979 +index f654db9..d206f21 100644
1980 +--- a/drivers/net/e1000e/82571.c
1981 ++++ b/drivers/net/e1000e/82571.c
1982 +@@ -936,12 +936,14 @@ static s32 e1000_reset_hw_82571(struct e1000_hw *hw)
1983 + ew32(IMC, 0xffffffff);
1984 + icr = er32(ICR);
1985 +
1986 +- /* Install any alternate MAC address into RAR0 */
1987 +- ret_val = e1000_check_alt_mac_addr_generic(hw);
1988 +- if (ret_val)
1989 +- return ret_val;
1990 ++ if (hw->mac.type == e1000_82571) {
1991 ++ /* Install any alternate MAC address into RAR0 */
1992 ++ ret_val = e1000_check_alt_mac_addr_generic(hw);
1993 ++ if (ret_val)
1994 ++ return ret_val;
1995 +
1996 +- e1000e_set_laa_state_82571(hw, true);
1997 ++ e1000e_set_laa_state_82571(hw, true);
1998 ++ }
1999 +
2000 + /* Reinitialize the 82571 serdes link state machine */
2001 + if (hw->phy.media_type == e1000_media_type_internal_serdes)
2002 +@@ -1618,14 +1620,16 @@ static s32 e1000_read_mac_addr_82571(struct e1000_hw *hw)
2003 + {
2004 + s32 ret_val = 0;
2005 +
2006 +- /*
2007 +- * If there's an alternate MAC address place it in RAR0
2008 +- * so that it will override the Si installed default perm
2009 +- * address.
2010 +- */
2011 +- ret_val = e1000_check_alt_mac_addr_generic(hw);
2012 +- if (ret_val)
2013 +- goto out;
2014 ++ if (hw->mac.type == e1000_82571) {
2015 ++ /*
2016 ++ * If there's an alternate MAC address place it in RAR0
2017 ++ * so that it will override the Si installed default perm
2018 ++ * address.
2019 ++ */
2020 ++ ret_val = e1000_check_alt_mac_addr_generic(hw);
2021 ++ if (ret_val)
2022 ++ goto out;
2023 ++ }
2024 +
2025 + ret_val = e1000_read_mac_addr_generic(hw);
2026 +
2027 +@@ -1833,6 +1837,7 @@ struct e1000_info e1000_82573_info = {
2028 + | FLAG_HAS_SMART_POWER_DOWN
2029 + | FLAG_HAS_AMT
2030 + | FLAG_HAS_SWSM_ON_LOAD,
2031 ++ .flags2 = FLAG2_DISABLE_ASPM_L1,
2032 + .pba = 20,
2033 + .max_hw_frame_size = ETH_FRAME_LEN + ETH_FCS_LEN,
2034 + .get_variants = e1000_get_variants_82571,
2035 +diff --git a/drivers/net/e1000e/defines.h b/drivers/net/e1000e/defines.h
2036 +index 4dc02c7..75289ca 100644
2037 +--- a/drivers/net/e1000e/defines.h
2038 ++++ b/drivers/net/e1000e/defines.h
2039 +@@ -620,6 +620,7 @@
2040 + #define E1000_FLASH_UPDATES 2000
2041 +
2042 + /* NVM Word Offsets */
2043 ++#define NVM_COMPAT 0x0003
2044 + #define NVM_ID_LED_SETTINGS 0x0004
2045 + #define NVM_INIT_CONTROL2_REG 0x000F
2046 + #define NVM_INIT_CONTROL3_PORT_B 0x0014
2047 +@@ -642,6 +643,9 @@
2048 + /* Mask bits for fields in Word 0x1a of the NVM */
2049 + #define NVM_WORD1A_ASPM_MASK 0x000C
2050 +
2051 ++/* Mask bits for fields in Word 0x03 of the EEPROM */
2052 ++#define NVM_COMPAT_LOM 0x0800
2053 ++
2054 + /* For checksumming, the sum of all words in the NVM should equal 0xBABA. */
2055 + #define NVM_SUM 0xBABA
2056 +
2057 +diff --git a/drivers/net/e1000e/lib.c b/drivers/net/e1000e/lib.c
2058 +index a968e3a..768c105 100644
2059 +--- a/drivers/net/e1000e/lib.c
2060 ++++ b/drivers/net/e1000e/lib.c
2061 +@@ -183,6 +183,16 @@ s32 e1000_check_alt_mac_addr_generic(struct e1000_hw *hw)
2062 + u16 offset, nvm_alt_mac_addr_offset, nvm_data;
2063 + u8 alt_mac_addr[ETH_ALEN];
2064 +
2065 ++ ret_val = e1000_read_nvm(hw, NVM_COMPAT, 1, &nvm_data);
2066 ++ if (ret_val)
2067 ++ goto out;
2068 ++
2069 ++ /* Check for LOM (vs. NIC) or one of two valid mezzanine cards */
2070 ++ if (!((nvm_data & NVM_COMPAT_LOM) ||
2071 ++ (hw->adapter->pdev->device == E1000_DEV_ID_82571EB_SERDES_DUAL) ||
2072 ++ (hw->adapter->pdev->device == E1000_DEV_ID_82571EB_SERDES_QUAD)))
2073 ++ goto out;
2074 ++
2075 + ret_val = e1000_read_nvm(hw, NVM_ALT_MAC_ADDR_PTR, 1,
2076 + &nvm_alt_mac_addr_offset);
2077 + if (ret_val) {
2078 +diff --git a/drivers/net/wireless/ath/ath5k/base.c b/drivers/net/wireless/ath/ath5k/base.c
2079 +index 648972d..ab9fe22 100644
2080 +--- a/drivers/net/wireless/ath/ath5k/base.c
2081 ++++ b/drivers/net/wireless/ath/ath5k/base.c
2082 +@@ -48,6 +48,7 @@
2083 + #include <linux/netdevice.h>
2084 + #include <linux/cache.h>
2085 + #include <linux/pci.h>
2086 ++#include <linux/pci-aspm.h>
2087 + #include <linux/ethtool.h>
2088 + #include <linux/uaccess.h>
2089 + #include <linux/slab.h>
2090 +@@ -472,6 +473,26 @@ ath5k_pci_probe(struct pci_dev *pdev,
2091 + int ret;
2092 + u8 csz;
2093 +
2094 ++ /*
2095 ++ * L0s needs to be disabled on all ath5k cards.
2096 ++ *
2097 ++ * For distributions shipping with CONFIG_PCIEASPM (this will be enabled
2098 ++ * by default in the future in 2.6.36) this will also mean both L1 and
2099 ++ * L0s will be disabled when a pre 1.1 PCIe device is detected. We do
2100 ++ * know L1 works correctly even for all ath5k pre 1.1 PCIe devices
2101 ++ * though but cannot currently undue the effect of a blacklist, for
2102 ++ * details you can read pcie_aspm_sanity_check() and see how it adjusts
2103 ++ * the device link capability.
2104 ++ *
2105 ++ * It may be possible in the future to implement some PCI API to allow
2106 ++ * drivers to override blacklists for pre 1.1 PCIe but for now it is
2107 ++ * best to accept that both L0s and L1 will be disabled completely for
2108 ++ * distributions shipping with CONFIG_PCIEASPM rather than having this
2109 ++ * issue present. Motivation for adding this new API will be to help
2110 ++ * with power consumption for some of these devices.
2111 ++ */
2112 ++ pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S);
2113 ++
2114 + ret = pci_enable_device(pdev);
2115 + if (ret) {
2116 + dev_err(&pdev->dev, "can't enable device\n");
2117 +diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
2118 +index 2571b44..5fcbc2f 100644
2119 +--- a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
2120 ++++ b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
2121 +@@ -68,18 +68,23 @@ int ath9k_htc_tx_start(struct ath9k_htc_priv *priv, struct sk_buff *skb)
2122 + struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
2123 + struct ieee80211_sta *sta = tx_info->control.sta;
2124 + struct ath9k_htc_sta *ista;
2125 +- struct ath9k_htc_vif *avp;
2126 + struct ath9k_htc_tx_ctl tx_ctl;
2127 + enum htc_endpoint_id epid;
2128 + u16 qnum, hw_qnum;
2129 + __le16 fc;
2130 + u8 *tx_fhdr;
2131 +- u8 sta_idx;
2132 ++ u8 sta_idx, vif_idx;
2133 +
2134 + hdr = (struct ieee80211_hdr *) skb->data;
2135 + fc = hdr->frame_control;
2136 +
2137 +- avp = (struct ath9k_htc_vif *) tx_info->control.vif->drv_priv;
2138 ++ if (tx_info->control.vif &&
2139 ++ (struct ath9k_htc_vif *) tx_info->control.vif->drv_priv)
2140 ++ vif_idx = ((struct ath9k_htc_vif *)
2141 ++ tx_info->control.vif->drv_priv)->index;
2142 ++ else
2143 ++ vif_idx = priv->nvifs;
2144 ++
2145 + if (sta) {
2146 + ista = (struct ath9k_htc_sta *) sta->drv_priv;
2147 + sta_idx = ista->index;
2148 +@@ -96,7 +101,7 @@ int ath9k_htc_tx_start(struct ath9k_htc_priv *priv, struct sk_buff *skb)
2149 + memset(&tx_hdr, 0, sizeof(struct tx_frame_hdr));
2150 +
2151 + tx_hdr.node_idx = sta_idx;
2152 +- tx_hdr.vif_idx = avp->index;
2153 ++ tx_hdr.vif_idx = vif_idx;
2154 +
2155 + if (tx_info->flags & IEEE80211_TX_CTL_AMPDU) {
2156 + tx_ctl.type = ATH9K_HTC_AMPDU;
2157 +@@ -156,7 +161,7 @@ int ath9k_htc_tx_start(struct ath9k_htc_priv *priv, struct sk_buff *skb)
2158 + tx_ctl.type = ATH9K_HTC_NORMAL;
2159 +
2160 + mgmt_hdr.node_idx = sta_idx;
2161 +- mgmt_hdr.vif_idx = avp->index;
2162 ++ mgmt_hdr.vif_idx = vif_idx;
2163 + mgmt_hdr.tidno = 0;
2164 + mgmt_hdr.flags = 0;
2165 +
2166 +diff --git a/drivers/net/wireless/iwlwifi/iwl-3945.c b/drivers/net/wireless/iwlwifi/iwl-3945.c
2167 +index c44a303..2a9480d 100644
2168 +--- a/drivers/net/wireless/iwlwifi/iwl-3945.c
2169 ++++ b/drivers/net/wireless/iwlwifi/iwl-3945.c
2170 +@@ -915,22 +915,6 @@ void iwl3945_hw_build_tx_cmd_rate(struct iwl_priv *priv,
2171 + rts_retry_limit = data_retry_limit;
2172 + tx_cmd->rts_retry_limit = rts_retry_limit;
2173 +
2174 +- if (ieee80211_is_mgmt(fc)) {
2175 +- switch (fc & cpu_to_le16(IEEE80211_FCTL_STYPE)) {
2176 +- case cpu_to_le16(IEEE80211_STYPE_AUTH):
2177 +- case cpu_to_le16(IEEE80211_STYPE_DEAUTH):
2178 +- case cpu_to_le16(IEEE80211_STYPE_ASSOC_REQ):
2179 +- case cpu_to_le16(IEEE80211_STYPE_REASSOC_REQ):
2180 +- if (tx_flags & TX_CMD_FLG_RTS_MSK) {
2181 +- tx_flags &= ~TX_CMD_FLG_RTS_MSK;
2182 +- tx_flags |= TX_CMD_FLG_CTS_MSK;
2183 +- }
2184 +- break;
2185 +- default:
2186 +- break;
2187 +- }
2188 +- }
2189 +-
2190 + tx_cmd->rate = rate;
2191 + tx_cmd->tx_flags = tx_flags;
2192 +
2193 +diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-hcmd.c b/drivers/net/wireless/iwlwifi/iwl-agn-hcmd.c
2194 +index 01658cf..2a30397 100644
2195 +--- a/drivers/net/wireless/iwlwifi/iwl-agn-hcmd.c
2196 ++++ b/drivers/net/wireless/iwlwifi/iwl-agn-hcmd.c
2197 +@@ -209,10 +209,21 @@ static void iwlagn_chain_noise_reset(struct iwl_priv *priv)
2198 + }
2199 + }
2200 +
2201 +-static void iwlagn_rts_tx_cmd_flag(struct ieee80211_tx_info *info,
2202 +- __le32 *tx_flags)
2203 ++static void iwlagn_rts_tx_cmd_flag(struct iwl_priv *priv,
2204 ++ struct ieee80211_tx_info *info,
2205 ++ __le16 fc, __le32 *tx_flags)
2206 + {
2207 +- *tx_flags |= TX_CMD_FLG_RTS_CTS_MSK;
2208 ++ if (info->control.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS ||
2209 ++ info->control.rates[0].flags & IEEE80211_TX_RC_USE_CTS_PROTECT) {
2210 ++ *tx_flags |= TX_CMD_FLG_RTS_CTS_MSK;
2211 ++ return;
2212 ++ }
2213 ++
2214 ++ if (priv->cfg->use_rts_for_ht &&
2215 ++ info->flags & IEEE80211_TX_CTL_AMPDU) {
2216 ++ *tx_flags |= TX_CMD_FLG_RTS_CTS_MSK;
2217 ++ return;
2218 ++ }
2219 + }
2220 +
2221 + /* Calc max signal level (dBm) among 3 possible receivers */
2222 +diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
2223 +index cf4a95b..ca46831 100644
2224 +--- a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
2225 ++++ b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
2226 +@@ -325,18 +325,11 @@ static void rs_tl_turn_on_agg(struct iwl_priv *priv, u8 tid,
2227 + struct iwl_lq_sta *lq_data,
2228 + struct ieee80211_sta *sta)
2229 + {
2230 +- if ((tid < TID_MAX_LOAD_COUNT) &&
2231 +- !rs_tl_turn_on_agg_for_tid(priv, lq_data, tid, sta)) {
2232 +- if (priv->cfg->use_rts_for_ht) {
2233 +- /*
2234 +- * switch to RTS/CTS if it is the prefer protection
2235 +- * method for HT traffic
2236 +- */
2237 +- IWL_DEBUG_HT(priv, "use RTS/CTS protection for HT\n");
2238 +- priv->staging_rxon.flags &= ~RXON_FLG_SELF_CTS_EN;
2239 +- iwlcore_commit_rxon(priv);
2240 +- }
2241 +- }
2242 ++ if (tid < TID_MAX_LOAD_COUNT)
2243 ++ rs_tl_turn_on_agg_for_tid(priv, lq_data, tid, sta);
2244 ++ else
2245 ++ IWL_ERR(priv, "tid exceeds max load count: %d/%d\n",
2246 ++ tid, TID_MAX_LOAD_COUNT);
2247 + }
2248 +
2249 + static inline int get_num_of_ant_from_rate(u32 rate_n_flags)
2250 +diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-tx.c b/drivers/net/wireless/iwlwifi/iwl-agn-tx.c
2251 +index 7d614c4..3a3d27c 100644
2252 +--- a/drivers/net/wireless/iwlwifi/iwl-agn-tx.c
2253 ++++ b/drivers/net/wireless/iwlwifi/iwl-agn-tx.c
2254 +@@ -376,10 +376,7 @@ static void iwlagn_tx_cmd_build_basic(struct iwl_priv *priv,
2255 + tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
2256 + }
2257 +
2258 +- priv->cfg->ops->utils->rts_tx_cmd_flag(info, &tx_flags);
2259 +-
2260 +- if ((tx_flags & TX_CMD_FLG_RTS_MSK) || (tx_flags & TX_CMD_FLG_CTS_MSK))
2261 +- tx_flags |= TX_CMD_FLG_FULL_TXOP_PROT_MSK;
2262 ++ priv->cfg->ops->utils->rts_tx_cmd_flag(priv, info, fc, &tx_flags);
2263 +
2264 + tx_flags &= ~(TX_CMD_FLG_ANT_SEL_MSK);
2265 + if (ieee80211_is_mgmt(fc)) {
2266 +@@ -453,21 +450,6 @@ static void iwlagn_tx_cmd_build_rate(struct iwl_priv *priv,
2267 + if ((rate_idx >= IWL_FIRST_CCK_RATE) && (rate_idx <= IWL_LAST_CCK_RATE))
2268 + rate_flags |= RATE_MCS_CCK_MSK;
2269 +
2270 +- /* Set up RTS and CTS flags for certain packets */
2271 +- switch (fc & cpu_to_le16(IEEE80211_FCTL_STYPE)) {
2272 +- case cpu_to_le16(IEEE80211_STYPE_AUTH):
2273 +- case cpu_to_le16(IEEE80211_STYPE_DEAUTH):
2274 +- case cpu_to_le16(IEEE80211_STYPE_ASSOC_REQ):
2275 +- case cpu_to_le16(IEEE80211_STYPE_REASSOC_REQ):
2276 +- if (tx_cmd->tx_flags & TX_CMD_FLG_RTS_MSK) {
2277 +- tx_cmd->tx_flags &= ~TX_CMD_FLG_RTS_MSK;
2278 +- tx_cmd->tx_flags |= TX_CMD_FLG_CTS_MSK;
2279 +- }
2280 +- break;
2281 +- default:
2282 +- break;
2283 +- }
2284 +-
2285 + /* Set up antennas */
2286 + priv->mgmt_tx_ant = iwl_toggle_tx_ant(priv, priv->mgmt_tx_ant);
2287 + rate_flags |= iwl_ant_idx_to_flags(priv->mgmt_tx_ant);
2288 +diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.c b/drivers/net/wireless/iwlwifi/iwl-agn.c
2289 +index 24aff65..c7f56b4 100644
2290 +--- a/drivers/net/wireless/iwlwifi/iwl-agn.c
2291 ++++ b/drivers/net/wireless/iwlwifi/iwl-agn.c
2292 +@@ -200,13 +200,6 @@ int iwl_commit_rxon(struct iwl_priv *priv)
2293 +
2294 + priv->start_calib = 0;
2295 + if (new_assoc) {
2296 +- /*
2297 +- * allow CTS-to-self if possible for new association.
2298 +- * this is relevant only for 5000 series and up,
2299 +- * but will not damage 4965
2300 +- */
2301 +- priv->staging_rxon.flags |= RXON_FLG_SELF_CTS_EN;
2302 +-
2303 + /* Apply the new configuration
2304 + * RXON assoc doesn't clear the station table in uCode,
2305 + */
2306 +@@ -3336,13 +3329,40 @@ static int iwl_mac_ampdu_action(struct ieee80211_hw *hw,
2307 + IWL_DEBUG_HT(priv, "priv->_agn.agg_tids_count = %u\n",
2308 + priv->_agn.agg_tids_count);
2309 + }
2310 ++ if (priv->cfg->use_rts_for_ht) {
2311 ++ struct iwl_station_priv *sta_priv =
2312 ++ (void *) sta->drv_priv;
2313 ++ /*
2314 ++ * switch off RTS/CTS if it was previously enabled
2315 ++ */
2316 ++
2317 ++ sta_priv->lq_sta.lq.general_params.flags &=
2318 ++ ~LINK_QUAL_FLAGS_SET_STA_TLC_RTS_MSK;
2319 ++ iwl_send_lq_cmd(priv, &sta_priv->lq_sta.lq,
2320 ++ CMD_ASYNC, false);
2321 ++ }
2322 ++ break;
2323 + if (test_bit(STATUS_EXIT_PENDING, &priv->status))
2324 + return 0;
2325 + else
2326 + return ret;
2327 + case IEEE80211_AMPDU_TX_OPERATIONAL:
2328 +- /* do nothing */
2329 +- return -EOPNOTSUPP;
2330 ++ if (priv->cfg->use_rts_for_ht) {
2331 ++ struct iwl_station_priv *sta_priv =
2332 ++ (void *) sta->drv_priv;
2333 ++
2334 ++ /*
2335 ++ * switch to RTS/CTS if it is the prefer protection
2336 ++ * method for HT traffic
2337 ++ */
2338 ++
2339 ++ sta_priv->lq_sta.lq.general_params.flags |=
2340 ++ LINK_QUAL_FLAGS_SET_STA_TLC_RTS_MSK;
2341 ++ iwl_send_lq_cmd(priv, &sta_priv->lq_sta.lq,
2342 ++ CMD_ASYNC, false);
2343 ++ }
2344 ++ ret = 0;
2345 ++ break;
2346 + default:
2347 + IWL_DEBUG_HT(priv, "unknown\n");
2348 + return -EINVAL;
2349 +@@ -3423,6 +3443,49 @@ static int iwlagn_mac_sta_add(struct ieee80211_hw *hw,
2350 + return 0;
2351 + }
2352 +
2353 ++static void iwlagn_configure_filter(struct ieee80211_hw *hw,
2354 ++ unsigned int changed_flags,
2355 ++ unsigned int *total_flags,
2356 ++ u64 multicast)
2357 ++{
2358 ++ struct iwl_priv *priv = hw->priv;
2359 ++ __le32 filter_or = 0, filter_nand = 0;
2360 ++
2361 ++#define CHK(test, flag) do { \
2362 ++ if (*total_flags & (test)) \
2363 ++ filter_or |= (flag); \
2364 ++ else \
2365 ++ filter_nand |= (flag); \
2366 ++ } while (0)
2367 ++
2368 ++ IWL_DEBUG_MAC80211(priv, "Enter: changed: 0x%x, total: 0x%x\n",
2369 ++ changed_flags, *total_flags);
2370 ++
2371 ++ CHK(FIF_OTHER_BSS | FIF_PROMISC_IN_BSS, RXON_FILTER_PROMISC_MSK);
2372 ++ CHK(FIF_CONTROL, RXON_FILTER_CTL2HOST_MSK);
2373 ++ CHK(FIF_BCN_PRBRESP_PROMISC, RXON_FILTER_BCON_AWARE_MSK);
2374 ++
2375 ++#undef CHK
2376 ++
2377 ++ mutex_lock(&priv->mutex);
2378 ++
2379 ++ priv->staging_rxon.filter_flags &= ~filter_nand;
2380 ++ priv->staging_rxon.filter_flags |= filter_or;
2381 ++
2382 ++ iwlcore_commit_rxon(priv);
2383 ++
2384 ++ mutex_unlock(&priv->mutex);
2385 ++
2386 ++ /*
2387 ++ * Receiving all multicast frames is always enabled by the
2388 ++ * default flags setup in iwl_connection_init_rx_config()
2389 ++ * since we currently do not support programming multicast
2390 ++ * filters into the device.
2391 ++ */
2392 ++ *total_flags &= FIF_OTHER_BSS | FIF_ALLMULTI | FIF_PROMISC_IN_BSS |
2393 ++ FIF_BCN_PRBRESP_PROMISC | FIF_CONTROL;
2394 ++}
2395 ++
2396 + /*****************************************************************************
2397 + *
2398 + * driver setup and teardown
2399 +@@ -3583,7 +3646,7 @@ static struct ieee80211_ops iwl_hw_ops = {
2400 + .add_interface = iwl_mac_add_interface,
2401 + .remove_interface = iwl_mac_remove_interface,
2402 + .config = iwl_mac_config,
2403 +- .configure_filter = iwl_configure_filter,
2404 ++ .configure_filter = iwlagn_configure_filter,
2405 + .set_key = iwl_mac_set_key,
2406 + .update_tkip_key = iwl_mac_update_tkip_key,
2407 + .conf_tx = iwl_mac_conf_tx,
2408 +diff --git a/drivers/net/wireless/iwlwifi/iwl-core.c b/drivers/net/wireless/iwlwifi/iwl-core.c
2409 +index 5bbc529..cd5b664 100644
2410 +--- a/drivers/net/wireless/iwlwifi/iwl-core.c
2411 ++++ b/drivers/net/wireless/iwlwifi/iwl-core.c
2412 +@@ -403,19 +403,36 @@ EXPORT_SYMBOL(iwlcore_free_geos);
2413 + * iwlcore_rts_tx_cmd_flag: Set rts/cts. 3945 and 4965 only share this
2414 + * function.
2415 + */
2416 +-void iwlcore_rts_tx_cmd_flag(struct ieee80211_tx_info *info,
2417 +- __le32 *tx_flags)
2418 ++void iwlcore_rts_tx_cmd_flag(struct iwl_priv *priv,
2419 ++ struct ieee80211_tx_info *info,
2420 ++ __le16 fc, __le32 *tx_flags)
2421 + {
2422 + if (info->control.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS) {
2423 + *tx_flags |= TX_CMD_FLG_RTS_MSK;
2424 + *tx_flags &= ~TX_CMD_FLG_CTS_MSK;
2425 ++ *tx_flags |= TX_CMD_FLG_FULL_TXOP_PROT_MSK;
2426 ++
2427 ++ if (!ieee80211_is_mgmt(fc))
2428 ++ return;
2429 ++
2430 ++ switch (fc & cpu_to_le16(IEEE80211_FCTL_STYPE)) {
2431 ++ case cpu_to_le16(IEEE80211_STYPE_AUTH):
2432 ++ case cpu_to_le16(IEEE80211_STYPE_DEAUTH):
2433 ++ case cpu_to_le16(IEEE80211_STYPE_ASSOC_REQ):
2434 ++ case cpu_to_le16(IEEE80211_STYPE_REASSOC_REQ):
2435 ++ *tx_flags &= ~TX_CMD_FLG_RTS_MSK;
2436 ++ *tx_flags |= TX_CMD_FLG_CTS_MSK;
2437 ++ break;
2438 ++ }
2439 + } else if (info->control.rates[0].flags & IEEE80211_TX_RC_USE_CTS_PROTECT) {
2440 + *tx_flags &= ~TX_CMD_FLG_RTS_MSK;
2441 + *tx_flags |= TX_CMD_FLG_CTS_MSK;
2442 ++ *tx_flags |= TX_CMD_FLG_FULL_TXOP_PROT_MSK;
2443 + }
2444 + }
2445 + EXPORT_SYMBOL(iwlcore_rts_tx_cmd_flag);
2446 +
2447 ++
2448 + static bool is_single_rx_stream(struct iwl_priv *priv)
2449 + {
2450 + return priv->current_ht_config.smps == IEEE80211_SMPS_STATIC ||
2451 +@@ -1294,51 +1311,6 @@ out:
2452 + EXPORT_SYMBOL(iwl_apm_init);
2453 +
2454 +
2455 +-
2456 +-void iwl_configure_filter(struct ieee80211_hw *hw,
2457 +- unsigned int changed_flags,
2458 +- unsigned int *total_flags,
2459 +- u64 multicast)
2460 +-{
2461 +- struct iwl_priv *priv = hw->priv;
2462 +- __le32 filter_or = 0, filter_nand = 0;
2463 +-
2464 +-#define CHK(test, flag) do { \
2465 +- if (*total_flags & (test)) \
2466 +- filter_or |= (flag); \
2467 +- else \
2468 +- filter_nand |= (flag); \
2469 +- } while (0)
2470 +-
2471 +- IWL_DEBUG_MAC80211(priv, "Enter: changed: 0x%x, total: 0x%x\n",
2472 +- changed_flags, *total_flags);
2473 +-
2474 +- CHK(FIF_OTHER_BSS | FIF_PROMISC_IN_BSS, RXON_FILTER_PROMISC_MSK);
2475 +- CHK(FIF_CONTROL, RXON_FILTER_CTL2HOST_MSK);
2476 +- CHK(FIF_BCN_PRBRESP_PROMISC, RXON_FILTER_BCON_AWARE_MSK);
2477 +-
2478 +-#undef CHK
2479 +-
2480 +- mutex_lock(&priv->mutex);
2481 +-
2482 +- priv->staging_rxon.filter_flags &= ~filter_nand;
2483 +- priv->staging_rxon.filter_flags |= filter_or;
2484 +-
2485 +- iwlcore_commit_rxon(priv);
2486 +-
2487 +- mutex_unlock(&priv->mutex);
2488 +-
2489 +- /*
2490 +- * Receiving all multicast frames is always enabled by the
2491 +- * default flags setup in iwl_connection_init_rx_config()
2492 +- * since we currently do not support programming multicast
2493 +- * filters into the device.
2494 +- */
2495 +- *total_flags &= FIF_OTHER_BSS | FIF_ALLMULTI | FIF_PROMISC_IN_BSS |
2496 +- FIF_BCN_PRBRESP_PROMISC | FIF_CONTROL;
2497 +-}
2498 +-EXPORT_SYMBOL(iwl_configure_filter);
2499 +-
2500 + int iwl_set_hw_params(struct iwl_priv *priv)
2501 + {
2502 + priv->hw_params.max_rxq_size = RX_QUEUE_SIZE;
2503 +@@ -1936,6 +1908,10 @@ void iwl_bss_info_changed(struct ieee80211_hw *hw,
2504 + priv->staging_rxon.flags |= RXON_FLG_TGG_PROTECT_MSK;
2505 + else
2506 + priv->staging_rxon.flags &= ~RXON_FLG_TGG_PROTECT_MSK;
2507 ++ if (bss_conf->use_cts_prot)
2508 ++ priv->staging_rxon.flags |= RXON_FLG_SELF_CTS_EN;
2509 ++ else
2510 ++ priv->staging_rxon.flags &= ~RXON_FLG_SELF_CTS_EN;
2511 + }
2512 +
2513 + if (changes & BSS_CHANGED_BASIC_RATES) {
2514 +diff --git a/drivers/net/wireless/iwlwifi/iwl-core.h b/drivers/net/wireless/iwlwifi/iwl-core.h
2515 +index 31775bd..e8ef317 100644
2516 +--- a/drivers/net/wireless/iwlwifi/iwl-core.h
2517 ++++ b/drivers/net/wireless/iwlwifi/iwl-core.h
2518 +@@ -102,8 +102,9 @@ struct iwl_hcmd_utils_ops {
2519 + u32 min_average_noise,
2520 + u8 default_chain);
2521 + void (*chain_noise_reset)(struct iwl_priv *priv);
2522 +- void (*rts_tx_cmd_flag)(struct ieee80211_tx_info *info,
2523 +- __le32 *tx_flags);
2524 ++ void (*rts_tx_cmd_flag)(struct iwl_priv *priv,
2525 ++ struct ieee80211_tx_info *info,
2526 ++ __le16 fc, __le32 *tx_flags);
2527 + int (*calc_rssi)(struct iwl_priv *priv,
2528 + struct iwl_rx_phy_res *rx_resp);
2529 + void (*request_scan)(struct iwl_priv *priv, struct ieee80211_vif *vif);
2530 +@@ -355,9 +356,6 @@ int iwl_set_decrypted_flag(struct iwl_priv *priv,
2531 + u32 decrypt_res,
2532 + struct ieee80211_rx_status *stats);
2533 + void iwl_irq_handle_error(struct iwl_priv *priv);
2534 +-void iwl_configure_filter(struct ieee80211_hw *hw,
2535 +- unsigned int changed_flags,
2536 +- unsigned int *total_flags, u64 multicast);
2537 + int iwl_set_hw_params(struct iwl_priv *priv);
2538 + void iwl_post_associate(struct iwl_priv *priv, struct ieee80211_vif *vif);
2539 + void iwl_bss_info_changed(struct ieee80211_hw *hw,
2540 +@@ -375,8 +373,9 @@ void iwl_config_ap(struct iwl_priv *priv, struct ieee80211_vif *vif);
2541 + void iwl_mac_reset_tsf(struct ieee80211_hw *hw);
2542 + int iwl_alloc_txq_mem(struct iwl_priv *priv);
2543 + void iwl_free_txq_mem(struct iwl_priv *priv);
2544 +-void iwlcore_rts_tx_cmd_flag(struct ieee80211_tx_info *info,
2545 +- __le32 *tx_flags);
2546 ++void iwlcore_rts_tx_cmd_flag(struct iwl_priv *priv,
2547 ++ struct ieee80211_tx_info *info,
2548 ++ __le16 fc, __le32 *tx_flags);
2549 + #ifdef CONFIG_IWLWIFI_DEBUGFS
2550 + int iwl_alloc_traffic_mem(struct iwl_priv *priv);
2551 + void iwl_free_traffic_mem(struct iwl_priv *priv);
2552 +diff --git a/drivers/net/wireless/iwlwifi/iwl3945-base.c b/drivers/net/wireless/iwlwifi/iwl3945-base.c
2553 +index a27872d..39c0d2d 100644
2554 +--- a/drivers/net/wireless/iwlwifi/iwl3945-base.c
2555 ++++ b/drivers/net/wireless/iwlwifi/iwl3945-base.c
2556 +@@ -434,10 +434,7 @@ static void iwl3945_build_tx_cmd_basic(struct iwl_priv *priv,
2557 + tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
2558 + }
2559 +
2560 +- priv->cfg->ops->utils->rts_tx_cmd_flag(info, &tx_flags);
2561 +-
2562 +- if ((tx_flags & TX_CMD_FLG_RTS_MSK) || (tx_flags & TX_CMD_FLG_CTS_MSK))
2563 +- tx_flags |= TX_CMD_FLG_FULL_TXOP_PROT_MSK;
2564 ++ priv->cfg->ops->utils->rts_tx_cmd_flag(priv, info, fc, &tx_flags);
2565 +
2566 + tx_flags &= ~(TX_CMD_FLG_ANT_SEL_MSK);
2567 + if (ieee80211_is_mgmt(fc)) {
2568 +@@ -3465,6 +3462,55 @@ static int iwl3945_mac_sta_add(struct ieee80211_hw *hw,
2569 +
2570 + return 0;
2571 + }
2572 ++
2573 ++static void iwl3945_configure_filter(struct ieee80211_hw *hw,
2574 ++ unsigned int changed_flags,
2575 ++ unsigned int *total_flags,
2576 ++ u64 multicast)
2577 ++{
2578 ++ struct iwl_priv *priv = hw->priv;
2579 ++ __le32 filter_or = 0, filter_nand = 0;
2580 ++
2581 ++#define CHK(test, flag) do { \
2582 ++ if (*total_flags & (test)) \
2583 ++ filter_or |= (flag); \
2584 ++ else \
2585 ++ filter_nand |= (flag); \
2586 ++ } while (0)
2587 ++
2588 ++ IWL_DEBUG_MAC80211(priv, "Enter: changed: 0x%x, total: 0x%x\n",
2589 ++ changed_flags, *total_flags);
2590 ++
2591 ++ CHK(FIF_OTHER_BSS | FIF_PROMISC_IN_BSS, RXON_FILTER_PROMISC_MSK);
2592 ++ CHK(FIF_CONTROL, RXON_FILTER_CTL2HOST_MSK);
2593 ++ CHK(FIF_BCN_PRBRESP_PROMISC, RXON_FILTER_BCON_AWARE_MSK);
2594 ++
2595 ++#undef CHK
2596 ++
2597 ++ mutex_lock(&priv->mutex);
2598 ++
2599 ++ priv->staging_rxon.filter_flags &= ~filter_nand;
2600 ++ priv->staging_rxon.filter_flags |= filter_or;
2601 ++
2602 ++ /*
2603 ++ * Committing directly here breaks for some reason,
2604 ++ * but we'll eventually commit the filter flags
2605 ++ * change anyway.
2606 ++ */
2607 ++
2608 ++ mutex_unlock(&priv->mutex);
2609 ++
2610 ++ /*
2611 ++ * Receiving all multicast frames is always enabled by the
2612 ++ * default flags setup in iwl_connection_init_rx_config()
2613 ++ * since we currently do not support programming multicast
2614 ++ * filters into the device.
2615 ++ */
2616 ++ *total_flags &= FIF_OTHER_BSS | FIF_ALLMULTI | FIF_PROMISC_IN_BSS |
2617 ++ FIF_BCN_PRBRESP_PROMISC | FIF_CONTROL;
2618 ++}
2619 ++
2620 ++
2621 + /*****************************************************************************
2622 + *
2623 + * sysfs attributes
2624 +@@ -3870,7 +3916,7 @@ static struct ieee80211_ops iwl3945_hw_ops = {
2625 + .add_interface = iwl_mac_add_interface,
2626 + .remove_interface = iwl_mac_remove_interface,
2627 + .config = iwl_mac_config,
2628 +- .configure_filter = iwl_configure_filter,
2629 ++ .configure_filter = iwl3945_configure_filter,
2630 + .set_key = iwl3945_mac_set_key,
2631 + .conf_tx = iwl_mac_conf_tx,
2632 + .reset_tsf = iwl_mac_reset_tsf,
2633 +diff --git a/drivers/net/wireless/wl12xx/wl1251_cmd.c b/drivers/net/wireless/wl12xx/wl1251_cmd.c
2634 +index a37b30c..ce3722f 100644
2635 +--- a/drivers/net/wireless/wl12xx/wl1251_cmd.c
2636 ++++ b/drivers/net/wireless/wl12xx/wl1251_cmd.c
2637 +@@ -484,7 +484,7 @@ int wl1251_cmd_trigger_scan_to(struct wl1251 *wl, u32 timeout)
2638 +
2639 + cmd->timeout = timeout;
2640 +
2641 +- ret = wl1251_cmd_send(wl, CMD_SCAN, cmd, sizeof(*cmd));
2642 ++ ret = wl1251_cmd_send(wl, CMD_TRIGGER_SCAN_TO, cmd, sizeof(*cmd));
2643 + if (ret < 0) {
2644 + wl1251_error("cmd trigger scan to failed: %d", ret);
2645 + goto out;
2646 +diff --git a/drivers/platform/x86/compal-laptop.c b/drivers/platform/x86/compal-laptop.c
2647 +index 71ff154..90111d7 100644
2648 +--- a/drivers/platform/x86/compal-laptop.c
2649 ++++ b/drivers/platform/x86/compal-laptop.c
2650 +@@ -259,6 +259,14 @@ static struct dmi_system_id __initdata compal_dmi_table[] = {
2651 + .callback = dmi_check_cb
2652 + },
2653 + {
2654 ++ .ident = "Dell Mini 1012",
2655 ++ .matches = {
2656 ++ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
2657 ++ DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 1012"),
2658 ++ },
2659 ++ .callback = dmi_check_cb
2660 ++ },
2661 ++ {
2662 + .ident = "Dell Inspiron 11z",
2663 + .matches = {
2664 + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
2665 +@@ -375,5 +383,6 @@ MODULE_ALIAS("dmi:*:rnIFT00:rvrIFT00:*");
2666 + MODULE_ALIAS("dmi:*:svnDellInc.:pnInspiron910:*");
2667 + MODULE_ALIAS("dmi:*:svnDellInc.:pnInspiron1010:*");
2668 + MODULE_ALIAS("dmi:*:svnDellInc.:pnInspiron1011:*");
2669 ++MODULE_ALIAS("dmi:*:svnDellInc.:pnInspiron1012:*");
2670 + MODULE_ALIAS("dmi:*:svnDellInc.:pnInspiron1110:*");
2671 + MODULE_ALIAS("dmi:*:svnDellInc.:pnInspiron1210:*");
2672 +diff --git a/drivers/platform/x86/dell-laptop.c b/drivers/platform/x86/dell-laptop.c
2673 +index 661e3ac..6110601 100644
2674 +--- a/drivers/platform/x86/dell-laptop.c
2675 ++++ b/drivers/platform/x86/dell-laptop.c
2676 +@@ -116,6 +116,13 @@ static struct dmi_system_id __devinitdata dell_blacklist[] = {
2677 + },
2678 + },
2679 + {
2680 ++ .ident = "Dell Mini 1012",
2681 ++ .matches = {
2682 ++ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
2683 ++ DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 1012"),
2684 ++ },
2685 ++ },
2686 ++ {
2687 + .ident = "Dell Inspiron 11z",
2688 + .matches = {
2689 + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
2690 +diff --git a/drivers/regulator/wm8994-regulator.c b/drivers/regulator/wm8994-regulator.c
2691 +index 5a1dc8a..03713bc 100644
2692 +--- a/drivers/regulator/wm8994-regulator.c
2693 ++++ b/drivers/regulator/wm8994-regulator.c
2694 +@@ -219,8 +219,6 @@ static __devinit int wm8994_ldo_probe(struct platform_device *pdev)
2695 +
2696 + ldo->wm8994 = wm8994;
2697 +
2698 +- ldo->is_enabled = true;
2699 +-
2700 + if (pdata->ldo[id].enable && gpio_is_valid(pdata->ldo[id].enable)) {
2701 + ldo->enable = pdata->ldo[id].enable;
2702 +
2703 +@@ -237,7 +235,8 @@ static __devinit int wm8994_ldo_probe(struct platform_device *pdev)
2704 + ret);
2705 + goto err_gpio;
2706 + }
2707 +- }
2708 ++ } else
2709 ++ ldo->is_enabled = true;
2710 +
2711 + ldo->regulator = regulator_register(&wm8994_ldo_desc[id], &pdev->dev,
2712 + pdata->ldo[id].init_data, ldo);
2713 +diff --git a/drivers/serial/suncore.c b/drivers/serial/suncore.c
2714 +index 544f2e2..6381a02 100644
2715 +--- a/drivers/serial/suncore.c
2716 ++++ b/drivers/serial/suncore.c
2717 +@@ -55,7 +55,12 @@ EXPORT_SYMBOL(sunserial_unregister_minors);
2718 + int sunserial_console_match(struct console *con, struct device_node *dp,
2719 + struct uart_driver *drv, int line, bool ignore_line)
2720 + {
2721 +- if (!con || of_console_device != dp)
2722 ++ if (!con)
2723 ++ return 0;
2724 ++
2725 ++ drv->cons = con;
2726 ++
2727 ++ if (of_console_device != dp)
2728 + return 0;
2729 +
2730 + if (!ignore_line) {
2731 +@@ -69,12 +74,10 @@ int sunserial_console_match(struct console *con, struct device_node *dp,
2732 + return 0;
2733 + }
2734 +
2735 +- con->index = line;
2736 +- drv->cons = con;
2737 +-
2738 +- if (!console_set_on_cmdline)
2739 ++ if (!console_set_on_cmdline) {
2740 ++ con->index = line;
2741 + add_preferred_console(con->name, line, NULL);
2742 +-
2743 ++ }
2744 + return 1;
2745 + }
2746 + EXPORT_SYMBOL(sunserial_console_match);
2747 +diff --git a/drivers/staging/batman-adv/hard-interface.c b/drivers/staging/batman-adv/hard-interface.c
2748 +index 7a582e8..ce1d251 100644
2749 +--- a/drivers/staging/batman-adv/hard-interface.c
2750 ++++ b/drivers/staging/batman-adv/hard-interface.c
2751 +@@ -128,6 +128,9 @@ static bool hardif_is_iface_up(struct batman_if *batman_if)
2752 +
2753 + static void update_mac_addresses(struct batman_if *batman_if)
2754 + {
2755 ++ if (!batman_if || !batman_if->packet_buff)
2756 ++ return;
2757 ++
2758 + addr_to_string(batman_if->addr_str, batman_if->net_dev->dev_addr);
2759 +
2760 + memcpy(((struct batman_packet *)(batman_if->packet_buff))->orig,
2761 +@@ -194,8 +197,6 @@ static void hardif_activate_interface(struct bat_priv *bat_priv,
2762 + if (batman_if->if_status != IF_INACTIVE)
2763 + return;
2764 +
2765 +- dev_hold(batman_if->net_dev);
2766 +-
2767 + update_mac_addresses(batman_if);
2768 + batman_if->if_status = IF_TO_BE_ACTIVATED;
2769 +
2770 +@@ -222,8 +223,6 @@ static void hardif_deactivate_interface(struct batman_if *batman_if)
2771 + (batman_if->if_status != IF_TO_BE_ACTIVATED))
2772 + return;
2773 +
2774 +- dev_put(batman_if->net_dev);
2775 +-
2776 + batman_if->if_status = IF_INACTIVE;
2777 +
2778 + printk(KERN_INFO "batman-adv:Interface deactivated: %s\n",
2779 +@@ -321,12 +320,14 @@ static struct batman_if *hardif_add_interface(struct net_device *net_dev)
2780 + if (ret != 1)
2781 + goto out;
2782 +
2783 ++ dev_hold(net_dev);
2784 ++
2785 + batman_if = kmalloc(sizeof(struct batman_if), GFP_ATOMIC);
2786 + if (!batman_if) {
2787 + printk(KERN_ERR "batman-adv:"
2788 + "Can't add interface (%s): out of memory\n",
2789 + net_dev->name);
2790 +- goto out;
2791 ++ goto release_dev;
2792 + }
2793 +
2794 + batman_if->dev = kstrdup(net_dev->name, GFP_ATOMIC);
2795 +@@ -340,6 +341,7 @@ static struct batman_if *hardif_add_interface(struct net_device *net_dev)
2796 + batman_if->if_num = -1;
2797 + batman_if->net_dev = net_dev;
2798 + batman_if->if_status = IF_NOT_IN_USE;
2799 ++ batman_if->packet_buff = NULL;
2800 + INIT_LIST_HEAD(&batman_if->list);
2801 +
2802 + check_known_mac_addr(batman_if->net_dev->dev_addr);
2803 +@@ -350,6 +352,8 @@ free_dev:
2804 + kfree(batman_if->dev);
2805 + free_if:
2806 + kfree(batman_if);
2807 ++release_dev:
2808 ++ dev_put(net_dev);
2809 + out:
2810 + return NULL;
2811 + }
2812 +@@ -378,6 +382,7 @@ static void hardif_remove_interface(struct batman_if *batman_if)
2813 + batman_if->if_status = IF_TO_BE_REMOVED;
2814 + list_del_rcu(&batman_if->list);
2815 + sysfs_del_hardif(&batman_if->hardif_obj);
2816 ++ dev_put(batman_if->net_dev);
2817 + call_rcu(&batman_if->rcu, hardif_free_interface);
2818 + }
2819 +
2820 +@@ -397,15 +402,13 @@ static int hard_if_event(struct notifier_block *this,
2821 + /* FIXME: each batman_if will be attached to a softif */
2822 + struct bat_priv *bat_priv = netdev_priv(soft_device);
2823 +
2824 +- if (!batman_if)
2825 +- batman_if = hardif_add_interface(net_dev);
2826 ++ if (!batman_if && event == NETDEV_REGISTER)
2827 ++ batman_if = hardif_add_interface(net_dev);
2828 +
2829 + if (!batman_if)
2830 + goto out;
2831 +
2832 + switch (event) {
2833 +- case NETDEV_REGISTER:
2834 +- break;
2835 + case NETDEV_UP:
2836 + hardif_activate_interface(bat_priv, batman_if);
2837 + break;
2838 +diff --git a/drivers/staging/batman-adv/originator.c b/drivers/staging/batman-adv/originator.c
2839 +index 568aef8..2177c50 100644
2840 +--- a/drivers/staging/batman-adv/originator.c
2841 ++++ b/drivers/staging/batman-adv/originator.c
2842 +@@ -401,11 +401,12 @@ static int orig_node_add_if(struct orig_node *orig_node, int max_if_num)
2843 + int orig_hash_add_if(struct batman_if *batman_if, int max_if_num)
2844 + {
2845 + struct orig_node *orig_node;
2846 ++ unsigned long flags;
2847 + HASHIT(hashit);
2848 +
2849 + /* resize all orig nodes because orig_node->bcast_own(_sum) depend on
2850 + * if_num */
2851 +- spin_lock(&orig_hash_lock);
2852 ++ spin_lock_irqsave(&orig_hash_lock, flags);
2853 +
2854 + while (hash_iterate(orig_hash, &hashit)) {
2855 + orig_node = hashit.bucket->data;
2856 +@@ -414,11 +415,11 @@ int orig_hash_add_if(struct batman_if *batman_if, int max_if_num)
2857 + goto err;
2858 + }
2859 +
2860 +- spin_unlock(&orig_hash_lock);
2861 ++ spin_unlock_irqrestore(&orig_hash_lock, flags);
2862 + return 0;
2863 +
2864 + err:
2865 +- spin_unlock(&orig_hash_lock);
2866 ++ spin_unlock_irqrestore(&orig_hash_lock, flags);
2867 + return -ENOMEM;
2868 + }
2869 +
2870 +@@ -480,12 +481,13 @@ int orig_hash_del_if(struct batman_if *batman_if, int max_if_num)
2871 + {
2872 + struct batman_if *batman_if_tmp;
2873 + struct orig_node *orig_node;
2874 ++ unsigned long flags;
2875 + HASHIT(hashit);
2876 + int ret;
2877 +
2878 + /* resize all orig nodes because orig_node->bcast_own(_sum) depend on
2879 + * if_num */
2880 +- spin_lock(&orig_hash_lock);
2881 ++ spin_lock_irqsave(&orig_hash_lock, flags);
2882 +
2883 + while (hash_iterate(orig_hash, &hashit)) {
2884 + orig_node = hashit.bucket->data;
2885 +@@ -512,10 +514,10 @@ int orig_hash_del_if(struct batman_if *batman_if, int max_if_num)
2886 + rcu_read_unlock();
2887 +
2888 + batman_if->if_num = -1;
2889 +- spin_unlock(&orig_hash_lock);
2890 ++ spin_unlock_irqrestore(&orig_hash_lock, flags);
2891 + return 0;
2892 +
2893 + err:
2894 +- spin_unlock(&orig_hash_lock);
2895 ++ spin_unlock_irqrestore(&orig_hash_lock, flags);
2896 + return -ENOMEM;
2897 + }
2898 +diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
2899 +index bfc99a9..221f999 100644
2900 +--- a/drivers/usb/host/xhci-ring.c
2901 ++++ b/drivers/usb/host/xhci-ring.c
2902 +@@ -131,7 +131,7 @@ static void next_trb(struct xhci_hcd *xhci,
2903 + *seg = (*seg)->next;
2904 + *trb = ((*seg)->trbs);
2905 + } else {
2906 +- *trb = (*trb)++;
2907 ++ (*trb)++;
2908 + }
2909 + }
2910 +
2911 +diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
2912 +index 2bef441..80bf833 100644
2913 +--- a/drivers/usb/serial/cp210x.c
2914 ++++ b/drivers/usb/serial/cp210x.c
2915 +@@ -222,8 +222,8 @@ static struct usb_serial_driver cp210x_device = {
2916 + #define BITS_STOP_2 0x0002
2917 +
2918 + /* CP210X_SET_BREAK */
2919 +-#define BREAK_ON 0x0000
2920 +-#define BREAK_OFF 0x0001
2921 ++#define BREAK_ON 0x0001
2922 ++#define BREAK_OFF 0x0000
2923 +
2924 + /* CP210X_(SET_MHS|GET_MDMSTS) */
2925 + #define CONTROL_DTR 0x0001
2926 +diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
2927 +index eb12d9b..63ddb2f 100644
2928 +--- a/drivers/usb/serial/ftdi_sio.c
2929 ++++ b/drivers/usb/serial/ftdi_sio.c
2930 +@@ -180,6 +180,7 @@ static struct usb_device_id id_table_combined [] = {
2931 + { USB_DEVICE(INTERBIOMETRICS_VID, INTERBIOMETRICS_IOBOARD_PID) },
2932 + { USB_DEVICE(INTERBIOMETRICS_VID, INTERBIOMETRICS_MINI_IOBOARD_PID) },
2933 + { USB_DEVICE(FTDI_VID, FTDI_SPROG_II) },
2934 ++ { USB_DEVICE(FTDI_VID, FTDI_LENZ_LIUSB_PID) },
2935 + { USB_DEVICE(FTDI_VID, FTDI_XF_632_PID) },
2936 + { USB_DEVICE(FTDI_VID, FTDI_XF_634_PID) },
2937 + { USB_DEVICE(FTDI_VID, FTDI_XF_547_PID) },
2938 +@@ -750,6 +751,8 @@ static struct usb_device_id id_table_combined [] = {
2939 + { USB_DEVICE(FTDI_VID, XVERVE_SIGNALYZER_SH4_PID),
2940 + .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
2941 + { USB_DEVICE(FTDI_VID, SEGWAY_RMP200_PID) },
2942 ++ { USB_DEVICE(IONICS_VID, IONICS_PLUGCOMPUTER_PID),
2943 ++ .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
2944 + { }, /* Optional parameter entry */
2945 + { } /* Terminating entry */
2946 + };
2947 +@@ -1376,7 +1379,7 @@ static void ftdi_set_max_packet_size(struct usb_serial_port *port)
2948 + }
2949 +
2950 + /* set max packet size based on descriptor */
2951 +- priv->max_packet_size = ep_desc->wMaxPacketSize;
2952 ++ priv->max_packet_size = le16_to_cpu(ep_desc->wMaxPacketSize);
2953 +
2954 + dev_info(&udev->dev, "Setting MaxPacketSize %d\n", priv->max_packet_size);
2955 + }
2956 +diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
2957 +index 6e612c5..2e95857 100644
2958 +--- a/drivers/usb/serial/ftdi_sio_ids.h
2959 ++++ b/drivers/usb/serial/ftdi_sio_ids.h
2960 +@@ -110,6 +110,9 @@
2961 + /* Propox devices */
2962 + #define FTDI_PROPOX_JTAGCABLEII_PID 0xD738
2963 +
2964 ++/* Lenz LI-USB Computer Interface. */
2965 ++#define FTDI_LENZ_LIUSB_PID 0xD780
2966 ++
2967 + /*
2968 + * Xsens Technologies BV products (http://www.xsens.com).
2969 + */
2970 +@@ -989,6 +992,12 @@
2971 + #define ALTI2_N3_PID 0x6001 /* Neptune 3 */
2972 +
2973 + /*
2974 ++ * Ionics PlugComputer
2975 ++ */
2976 ++#define IONICS_VID 0x1c0c
2977 ++#define IONICS_PLUGCOMPUTER_PID 0x0102
2978 ++
2979 ++/*
2980 + * Dresden Elektronik Sensor Terminal Board
2981 + */
2982 + #define DE_VID 0x1cf1 /* Vendor ID */
2983 +diff --git a/drivers/usb/serial/io_ti.c b/drivers/usb/serial/io_ti.c
2984 +index 0fca265..9991063 100644
2985 +--- a/drivers/usb/serial/io_ti.c
2986 ++++ b/drivers/usb/serial/io_ti.c
2987 +@@ -1151,7 +1151,7 @@ static int download_fw(struct edgeport_serial *serial)
2988 +
2989 + /* Check if we have an old version in the I2C and
2990 + update if necessary */
2991 +- if (download_cur_ver != download_new_ver) {
2992 ++ if (download_cur_ver < download_new_ver) {
2993 + dbg("%s - Update I2C dld from %d.%d to %d.%d",
2994 + __func__,
2995 + firmware_version->Ver_Major,
2996 +diff --git a/drivers/usb/serial/navman.c b/drivers/usb/serial/navman.c
2997 +index a6b207c..1f00f24 100644
2998 +--- a/drivers/usb/serial/navman.c
2999 ++++ b/drivers/usb/serial/navman.c
3000 +@@ -25,6 +25,7 @@ static int debug;
3001 +
3002 + static const struct usb_device_id id_table[] = {
3003 + { USB_DEVICE(0x0a99, 0x0001) }, /* Talon Technology device */
3004 ++ { USB_DEVICE(0x0df7, 0x0900) }, /* Mobile Action i-gotU */
3005 + { },
3006 + };
3007 + MODULE_DEVICE_TABLE(usb, id_table);
3008 +diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
3009 +index 5c35b3a..80c74d4 100644
3010 +--- a/drivers/usb/serial/option.c
3011 ++++ b/drivers/usb/serial/option.c
3012 +@@ -368,6 +368,10 @@ static void option_instat_callback(struct urb *urb);
3013 + #define OLIVETTI_VENDOR_ID 0x0b3c
3014 + #define OLIVETTI_PRODUCT_OLICARD100 0xc000
3015 +
3016 ++/* Celot products */
3017 ++#define CELOT_VENDOR_ID 0x211f
3018 ++#define CELOT_PRODUCT_CT680M 0x6801
3019 ++
3020 + /* some devices interfaces need special handling due to a number of reasons */
3021 + enum option_blacklist_reason {
3022 + OPTION_BLACKLIST_NONE = 0,
3023 +@@ -891,10 +895,9 @@ static const struct usb_device_id option_ids[] = {
3024 + { USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_100F) },
3025 + { USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_1011)},
3026 + { USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_1012)},
3027 +-
3028 + { USB_DEVICE(CINTERION_VENDOR_ID, 0x0047) },
3029 +-
3030 + { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD100) },
3031 ++ { USB_DEVICE(CELOT_VENDOR_ID, CELOT_PRODUCT_CT680M) }, /* CT-650 CDMA 450 1xEVDO modem */
3032 + { } /* Terminating entry */
3033 + };
3034 + MODULE_DEVICE_TABLE(usb, option_ids);
3035 +diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c
3036 +index 6b60018..c98f0fb 100644
3037 +--- a/drivers/usb/serial/pl2303.c
3038 ++++ b/drivers/usb/serial/pl2303.c
3039 +@@ -86,6 +86,7 @@ static const struct usb_device_id id_table[] = {
3040 + { USB_DEVICE(SUPERIAL_VENDOR_ID, SUPERIAL_PRODUCT_ID) },
3041 + { USB_DEVICE(HP_VENDOR_ID, HP_LD220_PRODUCT_ID) },
3042 + { USB_DEVICE(CRESSI_VENDOR_ID, CRESSI_EDY_PRODUCT_ID) },
3043 ++ { USB_DEVICE(ZEAGLE_VENDOR_ID, ZEAGLE_N2ITION3_PRODUCT_ID) },
3044 + { USB_DEVICE(SONY_VENDOR_ID, SONY_QN3USB_PRODUCT_ID) },
3045 + { USB_DEVICE(SANWA_VENDOR_ID, SANWA_PRODUCT_ID) },
3046 + { USB_DEVICE(ADLINK_VENDOR_ID, ADLINK_ND6530_PRODUCT_ID) },
3047 +diff --git a/drivers/usb/serial/pl2303.h b/drivers/usb/serial/pl2303.h
3048 +index a871645..43eb9bd 100644
3049 +--- a/drivers/usb/serial/pl2303.h
3050 ++++ b/drivers/usb/serial/pl2303.h
3051 +@@ -128,6 +128,10 @@
3052 + #define CRESSI_VENDOR_ID 0x04b8
3053 + #define CRESSI_EDY_PRODUCT_ID 0x0521
3054 +
3055 ++/* Zeagle dive computer interface */
3056 ++#define ZEAGLE_VENDOR_ID 0x04b8
3057 ++#define ZEAGLE_N2ITION3_PRODUCT_ID 0x0522
3058 ++
3059 + /* Sony, USB data cable for CMD-Jxx mobile phones */
3060 + #define SONY_VENDOR_ID 0x054c
3061 + #define SONY_QN3USB_PRODUCT_ID 0x0437
3062 +diff --git a/drivers/video/matrox/matroxfb_base.h b/drivers/video/matrox/matroxfb_base.h
3063 +index f3a4e15..f96a471 100644
3064 +--- a/drivers/video/matrox/matroxfb_base.h
3065 ++++ b/drivers/video/matrox/matroxfb_base.h
3066 +@@ -151,13 +151,13 @@ static inline void mga_writel(vaddr_t va, unsigned int offs, u_int32_t value) {
3067 + static inline void mga_memcpy_toio(vaddr_t va, const void* src, int len) {
3068 + #if defined(__alpha__) || defined(__i386__) || defined(__x86_64__)
3069 + /*
3070 +- * memcpy_toio works for us if:
3071 ++ * iowrite32_rep works for us if:
3072 + * (1) Copies data as 32bit quantities, not byte after byte,
3073 + * (2) Performs LE ordered stores, and
3074 + * (3) It copes with unaligned source (destination is guaranteed to be page
3075 + * aligned and length is guaranteed to be multiple of 4).
3076 + */
3077 +- memcpy_toio(va.vaddr, src, len);
3078 ++ iowrite32_rep(va.vaddr, src, len >> 2);
3079 + #else
3080 + u_int32_t __iomem* addr = va.vaddr;
3081 +
3082 +diff --git a/firmware/Makefile b/firmware/Makefile
3083 +index 020e629..99955ed 100644
3084 +--- a/firmware/Makefile
3085 ++++ b/firmware/Makefile
3086 +@@ -142,7 +142,7 @@ fw-shipped-$(CONFIG_YAM) += yam/1200.bin yam/9600.bin
3087 + fw-shipped-all := $(fw-shipped-y) $(fw-shipped-m) $(fw-shipped-)
3088 +
3089 + # Directories which we _might_ need to create, so we have a rule for them.
3090 +-firmware-dirs := $(sort $(patsubst %,$(objtree)/$(obj)/%/,$(dir $(fw-external-y) $(fw-shipped-all))))
3091 ++firmware-dirs := $(sort $(addprefix $(objtree)/$(obj)/,$(dir $(fw-external-y) $(fw-shipped-all))))
3092 +
3093 + quiet_cmd_mkdir = MKDIR $(patsubst $(objtree)/%,%,$@)
3094 + cmd_mkdir = mkdir -p $@
3095 +diff --git a/fs/char_dev.c b/fs/char_dev.c
3096 +index d6db933..f80a4f2 100644
3097 +--- a/fs/char_dev.c
3098 ++++ b/fs/char_dev.c
3099 +@@ -20,6 +20,7 @@
3100 + #include <linux/cdev.h>
3101 + #include <linux/mutex.h>
3102 + #include <linux/backing-dev.h>
3103 ++#include <linux/tty.h>
3104 +
3105 + #include "internal.h"
3106 +
3107 +diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
3108 +index e60416d..d69551e 100644
3109 +--- a/fs/nfs/dir.c
3110 ++++ b/fs/nfs/dir.c
3111 +@@ -1103,7 +1103,7 @@ static int nfs_open_revalidate(struct dentry *dentry, struct nameidata *nd)
3112 + if ((openflags & (O_CREAT|O_EXCL)) == (O_CREAT|O_EXCL))
3113 + goto no_open_dput;
3114 + /* We can't create new files, or truncate existing ones here */
3115 +- openflags &= ~(O_CREAT|O_TRUNC);
3116 ++ openflags &= ~(O_CREAT|O_EXCL|O_TRUNC);
3117 +
3118 + /*
3119 + * Note: we're not holding inode->i_mutex and so may be racing with
3120 +diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
3121 +index 70015dd..330a3c9 100644
3122 +--- a/fs/nfs/nfs4proc.c
3123 ++++ b/fs/nfs/nfs4proc.c
3124 +@@ -2023,7 +2023,8 @@ nfs4_atomic_open(struct inode *dir, struct dentry *dentry, struct nameidata *nd)
3125 + struct rpc_cred *cred;
3126 + struct nfs4_state *state;
3127 + struct dentry *res;
3128 +- fmode_t fmode = nd->intent.open.flags & (FMODE_READ | FMODE_WRITE | FMODE_EXEC);
3129 ++ int open_flags = nd->intent.open.flags;
3130 ++ fmode_t fmode = open_flags & (FMODE_READ | FMODE_WRITE | FMODE_EXEC);
3131 +
3132 + if (nd->flags & LOOKUP_CREATE) {
3133 + attr.ia_mode = nd->intent.open.create_mode;
3134 +@@ -2031,8 +2032,9 @@ nfs4_atomic_open(struct inode *dir, struct dentry *dentry, struct nameidata *nd)
3135 + if (!IS_POSIXACL(dir))
3136 + attr.ia_mode &= ~current_umask();
3137 + } else {
3138 ++ open_flags &= ~O_EXCL;
3139 + attr.ia_valid = 0;
3140 +- BUG_ON(nd->intent.open.flags & O_CREAT);
3141 ++ BUG_ON(open_flags & O_CREAT);
3142 + }
3143 +
3144 + cred = rpc_lookup_cred();
3145 +@@ -2041,7 +2043,7 @@ nfs4_atomic_open(struct inode *dir, struct dentry *dentry, struct nameidata *nd)
3146 + parent = dentry->d_parent;
3147 + /* Protect against concurrent sillydeletes */
3148 + nfs_block_sillyrename(parent);
3149 +- state = nfs4_do_open(dir, &path, fmode, nd->intent.open.flags, &attr, cred);
3150 ++ state = nfs4_do_open(dir, &path, fmode, open_flags, &attr, cred);
3151 + put_rpccred(cred);
3152 + if (IS_ERR(state)) {
3153 + if (PTR_ERR(state) == -ENOENT) {
3154 +diff --git a/fs/nfs/super.c b/fs/nfs/super.c
3155 +index f9df16d..6bf11d7 100644
3156 +--- a/fs/nfs/super.c
3157 ++++ b/fs/nfs/super.c
3158 +@@ -652,6 +652,13 @@ static void nfs_show_mount_options(struct seq_file *m, struct nfs_server *nfss,
3159 +
3160 + if (nfss->options & NFS_OPTION_FSCACHE)
3161 + seq_printf(m, ",fsc");
3162 ++
3163 ++ if (nfss->flags & NFS_MOUNT_LOOKUP_CACHE_NONEG) {
3164 ++ if (nfss->flags & NFS_MOUNT_LOOKUP_CACHE_NONE)
3165 ++ seq_printf(m, ",lookupcache=none");
3166 ++ else
3167 ++ seq_printf(m, ",lookupcache=pos");
3168 ++ }
3169 + }
3170 +
3171 + /*
3172 +diff --git a/fs/nilfs2/super.c b/fs/nilfs2/super.c
3173 +index 414ef68..fbb354c 100644
3174 +--- a/fs/nilfs2/super.c
3175 ++++ b/fs/nilfs2/super.c
3176 +@@ -336,9 +336,10 @@ int nilfs_attach_checkpoint(struct nilfs_sb_info *sbi, __u64 cno)
3177 + list_add(&sbi->s_list, &nilfs->ns_supers);
3178 + up_write(&nilfs->ns_super_sem);
3179 +
3180 ++ err = -ENOMEM;
3181 + sbi->s_ifile = nilfs_ifile_new(sbi, nilfs->ns_inode_size);
3182 + if (!sbi->s_ifile)
3183 +- return -ENOMEM;
3184 ++ goto delist;
3185 +
3186 + down_read(&nilfs->ns_segctor_sem);
3187 + err = nilfs_cpfile_get_checkpoint(nilfs->ns_cpfile, cno, 0, &raw_cp,
3188 +@@ -369,6 +370,7 @@ int nilfs_attach_checkpoint(struct nilfs_sb_info *sbi, __u64 cno)
3189 + nilfs_mdt_destroy(sbi->s_ifile);
3190 + sbi->s_ifile = NULL;
3191 +
3192 ++ delist:
3193 + down_write(&nilfs->ns_super_sem);
3194 + list_del_init(&sbi->s_list);
3195 + up_write(&nilfs->ns_super_sem);
3196 +diff --git a/fs/ocfs2/acl.c b/fs/ocfs2/acl.c
3197 +index da70229..a76e0aa 100644
3198 +--- a/fs/ocfs2/acl.c
3199 ++++ b/fs/ocfs2/acl.c
3200 +@@ -290,12 +290,30 @@ static int ocfs2_set_acl(handle_t *handle,
3201 +
3202 + int ocfs2_check_acl(struct inode *inode, int mask)
3203 + {
3204 +- struct posix_acl *acl = ocfs2_get_acl(inode, ACL_TYPE_ACCESS);
3205 ++ struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
3206 ++ struct buffer_head *di_bh = NULL;
3207 ++ struct posix_acl *acl;
3208 ++ int ret = -EAGAIN;
3209 +
3210 +- if (IS_ERR(acl))
3211 ++ if (!(osb->s_mount_opt & OCFS2_MOUNT_POSIX_ACL))
3212 ++ return ret;
3213 ++
3214 ++ ret = ocfs2_read_inode_block(inode, &di_bh);
3215 ++ if (ret < 0) {
3216 ++ mlog_errno(ret);
3217 ++ return ret;
3218 ++ }
3219 ++
3220 ++ acl = ocfs2_get_acl_nolock(inode, ACL_TYPE_ACCESS, di_bh);
3221 ++
3222 ++ brelse(di_bh);
3223 ++
3224 ++ if (IS_ERR(acl)) {
3225 ++ mlog_errno(PTR_ERR(acl));
3226 + return PTR_ERR(acl);
3227 ++ }
3228 + if (acl) {
3229 +- int ret = posix_acl_permission(inode, acl, mask);
3230 ++ ret = posix_acl_permission(inode, acl, mask);
3231 + posix_acl_release(acl);
3232 + return ret;
3233 + }
3234 +@@ -344,7 +362,7 @@ int ocfs2_init_acl(handle_t *handle,
3235 + {
3236 + struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
3237 + struct posix_acl *acl = NULL;
3238 +- int ret = 0;
3239 ++ int ret = 0, ret2;
3240 + mode_t mode;
3241 +
3242 + if (!S_ISLNK(inode->i_mode)) {
3243 +@@ -381,7 +399,12 @@ int ocfs2_init_acl(handle_t *handle,
3244 + mode = inode->i_mode;
3245 + ret = posix_acl_create_masq(clone, &mode);
3246 + if (ret >= 0) {
3247 +- ret = ocfs2_acl_set_mode(inode, di_bh, handle, mode);
3248 ++ ret2 = ocfs2_acl_set_mode(inode, di_bh, handle, mode);
3249 ++ if (ret2) {
3250 ++ mlog_errno(ret2);
3251 ++ ret = ret2;
3252 ++ goto cleanup;
3253 ++ }
3254 + if (ret > 0) {
3255 + ret = ocfs2_set_acl(handle, inode,
3256 + di_bh, ACL_TYPE_ACCESS,
3257 +diff --git a/fs/ocfs2/dlm/dlmmaster.c b/fs/ocfs2/dlm/dlmmaster.c
3258 +index 94b97fc..ffb4c68 100644
3259 +--- a/fs/ocfs2/dlm/dlmmaster.c
3260 ++++ b/fs/ocfs2/dlm/dlmmaster.c
3261 +@@ -511,8 +511,6 @@ static void dlm_lockres_release(struct kref *kref)
3262 +
3263 + atomic_dec(&dlm->res_cur_count);
3264 +
3265 +- dlm_put(dlm);
3266 +-
3267 + if (!hlist_unhashed(&res->hash_node) ||
3268 + !list_empty(&res->granted) ||
3269 + !list_empty(&res->converting) ||
3270 +@@ -585,8 +583,6 @@ static void dlm_init_lockres(struct dlm_ctxt *dlm,
3271 + res->migration_pending = 0;
3272 + res->inflight_locks = 0;
3273 +
3274 +- /* put in dlm_lockres_release */
3275 +- dlm_grab(dlm);
3276 + res->dlm = dlm;
3277 +
3278 + kref_init(&res->refs);
3279 +@@ -3050,8 +3046,6 @@ int dlm_migrate_request_handler(struct o2net_msg *msg, u32 len, void *data,
3280 + /* check for pre-existing lock */
3281 + spin_lock(&dlm->spinlock);
3282 + res = __dlm_lookup_lockres(dlm, name, namelen, hash);
3283 +- spin_lock(&dlm->master_lock);
3284 +-
3285 + if (res) {
3286 + spin_lock(&res->spinlock);
3287 + if (res->state & DLM_LOCK_RES_RECOVERING) {
3288 +@@ -3069,14 +3063,15 @@ int dlm_migrate_request_handler(struct o2net_msg *msg, u32 len, void *data,
3289 + spin_unlock(&res->spinlock);
3290 + }
3291 +
3292 ++ spin_lock(&dlm->master_lock);
3293 + /* ignore status. only nonzero status would BUG. */
3294 + ret = dlm_add_migration_mle(dlm, res, mle, &oldmle,
3295 + name, namelen,
3296 + migrate->new_master,
3297 + migrate->master);
3298 +
3299 +-unlock:
3300 + spin_unlock(&dlm->master_lock);
3301 ++unlock:
3302 + spin_unlock(&dlm->spinlock);
3303 +
3304 + if (oldmle) {
3305 +diff --git a/fs/ocfs2/dlm/dlmrecovery.c b/fs/ocfs2/dlm/dlmrecovery.c
3306 +index 9dfaac7..aaaffbc 100644
3307 +--- a/fs/ocfs2/dlm/dlmrecovery.c
3308 ++++ b/fs/ocfs2/dlm/dlmrecovery.c
3309 +@@ -1997,6 +1997,8 @@ void dlm_move_lockres_to_recovery_list(struct dlm_ctxt *dlm,
3310 + struct list_head *queue;
3311 + struct dlm_lock *lock, *next;
3312 +
3313 ++ assert_spin_locked(&dlm->spinlock);
3314 ++ assert_spin_locked(&res->spinlock);
3315 + res->state |= DLM_LOCK_RES_RECOVERING;
3316 + if (!list_empty(&res->recovering)) {
3317 + mlog(0,
3318 +@@ -2326,19 +2328,15 @@ static void dlm_do_local_recovery_cleanup(struct dlm_ctxt *dlm, u8 dead_node)
3319 + /* zero the lvb if necessary */
3320 + dlm_revalidate_lvb(dlm, res, dead_node);
3321 + if (res->owner == dead_node) {
3322 +- if (res->state & DLM_LOCK_RES_DROPPING_REF)
3323 +- mlog(0, "%s:%.*s: owned by "
3324 +- "dead node %u, this node was "
3325 +- "dropping its ref when it died. "
3326 +- "continue, dropping the flag.\n",
3327 +- dlm->name, res->lockname.len,
3328 +- res->lockname.name, dead_node);
3329 +-
3330 +- /* the wake_up for this will happen when the
3331 +- * RECOVERING flag is dropped later */
3332 +- res->state &= ~DLM_LOCK_RES_DROPPING_REF;
3333 ++ if (res->state & DLM_LOCK_RES_DROPPING_REF) {
3334 ++ mlog(ML_NOTICE, "Ignore %.*s for "
3335 ++ "recovery as it is being freed\n",
3336 ++ res->lockname.len,
3337 ++ res->lockname.name);
3338 ++ } else
3339 ++ dlm_move_lockres_to_recovery_list(dlm,
3340 ++ res);
3341 +
3342 +- dlm_move_lockres_to_recovery_list(dlm, res);
3343 + } else if (res->owner == dlm->node_num) {
3344 + dlm_free_dead_locks(dlm, res, dead_node);
3345 + __dlm_lockres_calc_usage(dlm, res);
3346 +diff --git a/fs/ocfs2/dlm/dlmthread.c b/fs/ocfs2/dlm/dlmthread.c
3347 +index d4f73ca..2211acf 100644
3348 +--- a/fs/ocfs2/dlm/dlmthread.c
3349 ++++ b/fs/ocfs2/dlm/dlmthread.c
3350 +@@ -92,19 +92,27 @@ int __dlm_lockres_has_locks(struct dlm_lock_resource *res)
3351 + * truly ready to be freed. */
3352 + int __dlm_lockres_unused(struct dlm_lock_resource *res)
3353 + {
3354 +- if (!__dlm_lockres_has_locks(res) &&
3355 +- (list_empty(&res->dirty) && !(res->state & DLM_LOCK_RES_DIRTY))) {
3356 +- /* try not to scan the bitmap unless the first two
3357 +- * conditions are already true */
3358 +- int bit = find_next_bit(res->refmap, O2NM_MAX_NODES, 0);
3359 +- if (bit >= O2NM_MAX_NODES) {
3360 +- /* since the bit for dlm->node_num is not
3361 +- * set, inflight_locks better be zero */
3362 +- BUG_ON(res->inflight_locks != 0);
3363 +- return 1;
3364 +- }
3365 +- }
3366 +- return 0;
3367 ++ int bit;
3368 ++
3369 ++ if (__dlm_lockres_has_locks(res))
3370 ++ return 0;
3371 ++
3372 ++ if (!list_empty(&res->dirty) || res->state & DLM_LOCK_RES_DIRTY)
3373 ++ return 0;
3374 ++
3375 ++ if (res->state & DLM_LOCK_RES_RECOVERING)
3376 ++ return 0;
3377 ++
3378 ++ bit = find_next_bit(res->refmap, O2NM_MAX_NODES, 0);
3379 ++ if (bit < O2NM_MAX_NODES)
3380 ++ return 0;
3381 ++
3382 ++ /*
3383 ++ * since the bit for dlm->node_num is not set, inflight_locks better
3384 ++ * be zero
3385 ++ */
3386 ++ BUG_ON(res->inflight_locks != 0);
3387 ++ return 1;
3388 + }
3389 +
3390 +
3391 +@@ -152,45 +160,25 @@ void dlm_lockres_calc_usage(struct dlm_ctxt *dlm,
3392 + spin_unlock(&dlm->spinlock);
3393 + }
3394 +
3395 +-static int dlm_purge_lockres(struct dlm_ctxt *dlm,
3396 ++static void dlm_purge_lockres(struct dlm_ctxt *dlm,
3397 + struct dlm_lock_resource *res)
3398 + {
3399 + int master;
3400 + int ret = 0;
3401 +
3402 +- spin_lock(&res->spinlock);
3403 +- if (!__dlm_lockres_unused(res)) {
3404 +- mlog(0, "%s:%.*s: tried to purge but not unused\n",
3405 +- dlm->name, res->lockname.len, res->lockname.name);
3406 +- __dlm_print_one_lock_resource(res);
3407 +- spin_unlock(&res->spinlock);
3408 +- BUG();
3409 +- }
3410 +-
3411 +- if (res->state & DLM_LOCK_RES_MIGRATING) {
3412 +- mlog(0, "%s:%.*s: Delay dropref as this lockres is "
3413 +- "being remastered\n", dlm->name, res->lockname.len,
3414 +- res->lockname.name);
3415 +- /* Re-add the lockres to the end of the purge list */
3416 +- if (!list_empty(&res->purge)) {
3417 +- list_del_init(&res->purge);
3418 +- list_add_tail(&res->purge, &dlm->purge_list);
3419 +- }
3420 +- spin_unlock(&res->spinlock);
3421 +- return 0;
3422 +- }
3423 ++ assert_spin_locked(&dlm->spinlock);
3424 ++ assert_spin_locked(&res->spinlock);
3425 +
3426 + master = (res->owner == dlm->node_num);
3427 +
3428 +- if (!master)
3429 +- res->state |= DLM_LOCK_RES_DROPPING_REF;
3430 +- spin_unlock(&res->spinlock);
3431 +
3432 + mlog(0, "purging lockres %.*s, master = %d\n", res->lockname.len,
3433 + res->lockname.name, master);
3434 +
3435 + if (!master) {
3436 ++ res->state |= DLM_LOCK_RES_DROPPING_REF;
3437 + /* drop spinlock... retake below */
3438 ++ spin_unlock(&res->spinlock);
3439 + spin_unlock(&dlm->spinlock);
3440 +
3441 + spin_lock(&res->spinlock);
3442 +@@ -208,31 +196,35 @@ static int dlm_purge_lockres(struct dlm_ctxt *dlm,
3443 + mlog(0, "%s:%.*s: dlm_deref_lockres returned %d\n",
3444 + dlm->name, res->lockname.len, res->lockname.name, ret);
3445 + spin_lock(&dlm->spinlock);
3446 ++ spin_lock(&res->spinlock);
3447 + }
3448 +
3449 +- spin_lock(&res->spinlock);
3450 + if (!list_empty(&res->purge)) {
3451 + mlog(0, "removing lockres %.*s:%p from purgelist, "
3452 + "master = %d\n", res->lockname.len, res->lockname.name,
3453 + res, master);
3454 + list_del_init(&res->purge);
3455 +- spin_unlock(&res->spinlock);
3456 + dlm_lockres_put(res);
3457 + dlm->purge_count--;
3458 +- } else
3459 +- spin_unlock(&res->spinlock);
3460 ++ }
3461 ++
3462 ++ if (!__dlm_lockres_unused(res)) {
3463 ++ mlog(ML_ERROR, "found lockres %s:%.*s: in use after deref\n",
3464 ++ dlm->name, res->lockname.len, res->lockname.name);
3465 ++ __dlm_print_one_lock_resource(res);
3466 ++ BUG();
3467 ++ }
3468 +
3469 + __dlm_unhash_lockres(res);
3470 +
3471 + /* lockres is not in the hash now. drop the flag and wake up
3472 + * any processes waiting in dlm_get_lock_resource. */
3473 + if (!master) {
3474 +- spin_lock(&res->spinlock);
3475 + res->state &= ~DLM_LOCK_RES_DROPPING_REF;
3476 + spin_unlock(&res->spinlock);
3477 + wake_up(&res->wq);
3478 +- }
3479 +- return 0;
3480 ++ } else
3481 ++ spin_unlock(&res->spinlock);
3482 + }
3483 +
3484 + static void dlm_run_purge_list(struct dlm_ctxt *dlm,
3485 +@@ -251,17 +243,7 @@ static void dlm_run_purge_list(struct dlm_ctxt *dlm,
3486 + lockres = list_entry(dlm->purge_list.next,
3487 + struct dlm_lock_resource, purge);
3488 +
3489 +- /* Status of the lockres *might* change so double
3490 +- * check. If the lockres is unused, holding the dlm
3491 +- * spinlock will prevent people from getting and more
3492 +- * refs on it -- there's no need to keep the lockres
3493 +- * spinlock. */
3494 + spin_lock(&lockres->spinlock);
3495 +- unused = __dlm_lockres_unused(lockres);
3496 +- spin_unlock(&lockres->spinlock);
3497 +-
3498 +- if (!unused)
3499 +- continue;
3500 +
3501 + purge_jiffies = lockres->last_used +
3502 + msecs_to_jiffies(DLM_PURGE_INTERVAL_MS);
3503 +@@ -273,15 +255,29 @@ static void dlm_run_purge_list(struct dlm_ctxt *dlm,
3504 + * in tail order, we can stop at the first
3505 + * unpurgable resource -- anyone added after
3506 + * him will have a greater last_used value */
3507 ++ spin_unlock(&lockres->spinlock);
3508 + break;
3509 + }
3510 +
3511 ++ /* Status of the lockres *might* change so double
3512 ++ * check. If the lockres is unused, holding the dlm
3513 ++ * spinlock will prevent people from getting and more
3514 ++ * refs on it. */
3515 ++ unused = __dlm_lockres_unused(lockres);
3516 ++ if (!unused ||
3517 ++ (lockres->state & DLM_LOCK_RES_MIGRATING)) {
3518 ++ mlog(0, "lockres %s:%.*s: is in use or "
3519 ++ "being remastered, used %d, state %d\n",
3520 ++ dlm->name, lockres->lockname.len,
3521 ++ lockres->lockname.name, !unused, lockres->state);
3522 ++ list_move_tail(&dlm->purge_list, &lockres->purge);
3523 ++ spin_unlock(&lockres->spinlock);
3524 ++ continue;
3525 ++ }
3526 ++
3527 + dlm_lockres_get(lockres);
3528 +
3529 +- /* This may drop and reacquire the dlm spinlock if it
3530 +- * has to do migration. */
3531 +- if (dlm_purge_lockres(dlm, lockres))
3532 +- BUG();
3533 ++ dlm_purge_lockres(dlm, lockres);
3534 +
3535 + dlm_lockres_put(lockres);
3536 +
3537 +diff --git a/fs/ocfs2/refcounttree.c b/fs/ocfs2/refcounttree.c
3538 +index 3ac5aa7..73a11cc 100644
3539 +--- a/fs/ocfs2/refcounttree.c
3540 ++++ b/fs/ocfs2/refcounttree.c
3541 +@@ -2436,16 +2436,26 @@ static int ocfs2_calc_refcount_meta_credits(struct super_block *sb,
3542 + len = min((u64)cpos + clusters, le64_to_cpu(rec.r_cpos) +
3543 + le32_to_cpu(rec.r_clusters)) - cpos;
3544 + /*
3545 +- * If the refcount rec already exist, cool. We just need
3546 +- * to check whether there is a split. Otherwise we just need
3547 +- * to increase the refcount.
3548 +- * If we will insert one, increases recs_add.
3549 +- *
3550 + * We record all the records which will be inserted to the
3551 + * same refcount block, so that we can tell exactly whether
3552 + * we need a new refcount block or not.
3553 ++ *
3554 ++ * If we will insert a new one, this is easy and only happens
3555 ++ * during adding refcounted flag to the extent, so we don't
3556 ++ * have a chance of spliting. We just need one record.
3557 ++ *
3558 ++ * If the refcount rec already exists, that would be a little
3559 ++ * complicated. we may have to:
3560 ++ * 1) split at the beginning if the start pos isn't aligned.
3561 ++ * we need 1 more record in this case.
3562 ++ * 2) split int the end if the end pos isn't aligned.
3563 ++ * we need 1 more record in this case.
3564 ++ * 3) split in the middle because of file system fragmentation.
3565 ++ * we need 2 more records in this case(we can't detect this
3566 ++ * beforehand, so always think of the worst case).
3567 + */
3568 + if (rec.r_refcount) {
3569 ++ recs_add += 2;
3570 + /* Check whether we need a split at the beginning. */
3571 + if (cpos == start_cpos &&
3572 + cpos != le64_to_cpu(rec.r_cpos))
3573 +diff --git a/include/acpi/platform/aclinux.h b/include/acpi/platform/aclinux.h
3574 +index e5039a2..103f08a 100644
3575 +--- a/include/acpi/platform/aclinux.h
3576 ++++ b/include/acpi/platform/aclinux.h
3577 +@@ -148,13 +148,17 @@ static inline void *acpi_os_acquire_object(acpi_cache_t * cache)
3578 + #define ACPI_ALLOCATE_ZEROED(a) acpi_os_allocate_zeroed(a)
3579 + #define ACPI_FREE(a) kfree(a)
3580 +
3581 +-/* Used within ACPICA to show where it is safe to preempt execution */
3582 +-#include <linux/hardirq.h>
3583 ++#ifndef CONFIG_PREEMPT
3584 ++/*
3585 ++ * Used within ACPICA to show where it is safe to preempt execution
3586 ++ * when CONFIG_PREEMPT=n
3587 ++ */
3588 + #define ACPI_PREEMPTION_POINT() \
3589 + do { \
3590 +- if (!in_atomic_preempt_off() && !irqs_disabled()) \
3591 ++ if (!irqs_disabled()) \
3592 + cond_resched(); \
3593 + } while (0)
3594 ++#endif
3595 +
3596 + #endif /* __KERNEL__ */
3597 +
3598 +diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
3599 +index b8bb9a6..ee7e258 100644
3600 +--- a/include/linux/mm_types.h
3601 ++++ b/include/linux/mm_types.h
3602 +@@ -134,7 +134,7 @@ struct vm_area_struct {
3603 + within vm_mm. */
3604 +
3605 + /* linked list of VM areas per task, sorted by address */
3606 +- struct vm_area_struct *vm_next;
3607 ++ struct vm_area_struct *vm_next, *vm_prev;
3608 +
3609 + pgprot_t vm_page_prot; /* Access permissions of this VMA. */
3610 + unsigned long vm_flags; /* Flags, see mm.h. */
3611 +diff --git a/include/linux/mtd/flashchip.h b/include/linux/mtd/flashchip.h
3612 +index f43e9b4..23cc10f 100644
3613 +--- a/include/linux/mtd/flashchip.h
3614 ++++ b/include/linux/mtd/flashchip.h
3615 +@@ -92,7 +92,7 @@ struct flchip {
3616 + /* This is used to handle contention on write/erase operations
3617 + between partitions of the same physical chip. */
3618 + struct flchip_shared {
3619 +- spinlock_t lock;
3620 ++ struct mutex lock;
3621 + struct flchip *writing;
3622 + struct flchip *erasing;
3623 + };
3624 +diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
3625 +index f89e7fd..eb674b7 100644
3626 +--- a/include/linux/skbuff.h
3627 ++++ b/include/linux/skbuff.h
3628 +@@ -169,6 +169,7 @@ struct skb_shared_hwtstamps {
3629 + * @software: generate software time stamp
3630 + * @in_progress: device driver is going to provide
3631 + * hardware time stamp
3632 ++ * @prevent_sk_orphan: make sk reference available on driver level
3633 + * @flags: all shared_tx flags
3634 + *
3635 + * These flags are attached to packets as part of the
3636 +@@ -178,7 +179,8 @@ union skb_shared_tx {
3637 + struct {
3638 + __u8 hardware:1,
3639 + software:1,
3640 +- in_progress:1;
3641 ++ in_progress:1,
3642 ++ prevent_sk_orphan:1;
3643 + };
3644 + __u8 flags;
3645 + };
3646 +diff --git a/include/linux/tty.h b/include/linux/tty.h
3647 +index 931078b..7802a24 100644
3648 +--- a/include/linux/tty.h
3649 ++++ b/include/linux/tty.h
3650 +@@ -552,6 +552,9 @@ static inline void tty_audit_push_task(struct task_struct *tsk,
3651 + }
3652 + #endif
3653 +
3654 ++/* tty_io.c */
3655 ++extern int __init tty_init(void);
3656 ++
3657 + /* tty_ioctl.c */
3658 + extern int n_tty_ioctl_helper(struct tty_struct *tty, struct file *file,
3659 + unsigned int cmd, unsigned long arg);
3660 +diff --git a/include/sound/emu10k1.h b/include/sound/emu10k1.h
3661 +index 6a664c3..7dc97d1 100644
3662 +--- a/include/sound/emu10k1.h
3663 ++++ b/include/sound/emu10k1.h
3664 +@@ -1707,6 +1707,7 @@ struct snd_emu10k1 {
3665 + unsigned int card_type; /* EMU10K1_CARD_* */
3666 + unsigned int ecard_ctrl; /* ecard control bits */
3667 + unsigned long dma_mask; /* PCI DMA mask */
3668 ++ unsigned int delay_pcm_irq; /* in samples */
3669 + int max_cache_pages; /* max memory size / PAGE_SIZE */
3670 + struct snd_dma_buffer silent_page; /* silent page */
3671 + struct snd_dma_buffer ptb_pages; /* page table pages */
3672 +diff --git a/include/trace/events/timer.h b/include/trace/events/timer.h
3673 +index 9496b96..fa8223a 100644
3674 +--- a/include/trace/events/timer.h
3675 ++++ b/include/trace/events/timer.h
3676 +@@ -74,14 +74,16 @@ TRACE_EVENT(timer_expire_entry,
3677 + TP_STRUCT__entry(
3678 + __field( void *, timer )
3679 + __field( unsigned long, now )
3680 ++ __field( void *, function)
3681 + ),
3682 +
3683 + TP_fast_assign(
3684 + __entry->timer = timer;
3685 + __entry->now = jiffies;
3686 ++ __entry->function = timer->function;
3687 + ),
3688 +
3689 +- TP_printk("timer=%p now=%lu", __entry->timer, __entry->now)
3690 ++ TP_printk("timer=%p function=%pf now=%lu", __entry->timer, __entry->function,__entry->now)
3691 + );
3692 +
3693 + /**
3694 +@@ -213,14 +215,16 @@ TRACE_EVENT(hrtimer_expire_entry,
3695 + TP_STRUCT__entry(
3696 + __field( void *, hrtimer )
3697 + __field( s64, now )
3698 ++ __field( void *, function)
3699 + ),
3700 +
3701 + TP_fast_assign(
3702 + __entry->hrtimer = hrtimer;
3703 + __entry->now = now->tv64;
3704 ++ __entry->function = hrtimer->function;
3705 + ),
3706 +
3707 +- TP_printk("hrtimer=%p now=%llu", __entry->hrtimer,
3708 ++ TP_printk("hrtimer=%p function=%pf now=%llu", __entry->hrtimer, __entry->function,
3709 + (unsigned long long)ktime_to_ns((ktime_t) { .tv64 = __entry->now }))
3710 + );
3711 +
3712 +diff --git a/kernel/fork.c b/kernel/fork.c
3713 +index b6cce14..e96c0cd 100644
3714 +--- a/kernel/fork.c
3715 ++++ b/kernel/fork.c
3716 +@@ -300,7 +300,7 @@ out:
3717 + #ifdef CONFIG_MMU
3718 + static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
3719 + {
3720 +- struct vm_area_struct *mpnt, *tmp, **pprev;
3721 ++ struct vm_area_struct *mpnt, *tmp, *prev, **pprev;
3722 + struct rb_node **rb_link, *rb_parent;
3723 + int retval;
3724 + unsigned long charge;
3725 +@@ -328,6 +328,7 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
3726 + if (retval)
3727 + goto out;
3728 +
3729 ++ prev = NULL;
3730 + for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) {
3731 + struct file *file;
3732 +
3733 +@@ -359,7 +360,7 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
3734 + goto fail_nomem_anon_vma_fork;
3735 + tmp->vm_flags &= ~VM_LOCKED;
3736 + tmp->vm_mm = mm;
3737 +- tmp->vm_next = NULL;
3738 ++ tmp->vm_next = tmp->vm_prev = NULL;
3739 + file = tmp->vm_file;
3740 + if (file) {
3741 + struct inode *inode = file->f_path.dentry->d_inode;
3742 +@@ -392,6 +393,8 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
3743 + */
3744 + *pprev = tmp;
3745 + pprev = &tmp->vm_next;
3746 ++ tmp->vm_prev = prev;
3747 ++ prev = tmp;
3748 +
3749 + __vma_link_rb(mm, tmp, rb_link, rb_parent);
3750 + rb_link = &tmp->vm_rb.rb_right;
3751 +diff --git a/kernel/sched.c b/kernel/sched.c
3752 +index 63b4a14..6d0dbeb 100644
3753 +--- a/kernel/sched.c
3754 ++++ b/kernel/sched.c
3755 +@@ -3694,8 +3694,16 @@ int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner)
3756 + /*
3757 + * Owner changed, break to re-assess state.
3758 + */
3759 +- if (lock->owner != owner)
3760 ++ if (lock->owner != owner) {
3761 ++ /*
3762 ++ * If the lock has switched to a different owner,
3763 ++ * we likely have heavy contention. Return 0 to quit
3764 ++ * optimistic spinning and not contend further:
3765 ++ */
3766 ++ if (lock->owner)
3767 ++ return 0;
3768 + break;
3769 ++ }
3770 +
3771 + /*
3772 + * Is that owner really running on that cpu?
3773 +diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
3774 +index caf8d4d..b87c22f 100644
3775 +--- a/kernel/time/timekeeping.c
3776 ++++ b/kernel/time/timekeeping.c
3777 +@@ -736,6 +736,7 @@ static void timekeeping_adjust(s64 offset)
3778 + static cycle_t logarithmic_accumulation(cycle_t offset, int shift)
3779 + {
3780 + u64 nsecps = (u64)NSEC_PER_SEC << timekeeper.shift;
3781 ++ u64 raw_nsecs;
3782 +
3783 + /* If the offset is smaller then a shifted interval, do nothing */
3784 + if (offset < timekeeper.cycle_interval<<shift)
3785 +@@ -752,12 +753,15 @@ static cycle_t logarithmic_accumulation(cycle_t offset, int shift)
3786 + second_overflow();
3787 + }
3788 +
3789 +- /* Accumulate into raw time */
3790 +- raw_time.tv_nsec += timekeeper.raw_interval << shift;;
3791 +- while (raw_time.tv_nsec >= NSEC_PER_SEC) {
3792 +- raw_time.tv_nsec -= NSEC_PER_SEC;
3793 +- raw_time.tv_sec++;
3794 ++ /* Accumulate raw time */
3795 ++ raw_nsecs = timekeeper.raw_interval << shift;
3796 ++ raw_nsecs += raw_time.tv_nsec;
3797 ++ if (raw_nsecs >= NSEC_PER_SEC) {
3798 ++ u64 raw_secs = raw_nsecs;
3799 ++ raw_nsecs = do_div(raw_secs, NSEC_PER_SEC);
3800 ++ raw_time.tv_sec += raw_secs;
3801 + }
3802 ++ raw_time.tv_nsec = raw_nsecs;
3803 +
3804 + /* Accumulate error between NTP and clock interval */
3805 + timekeeper.ntp_error += tick_length << shift;
3806 +diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
3807 +index 1da7b6e..5ec8f1d 100644
3808 +--- a/kernel/trace/ring_buffer.c
3809 ++++ b/kernel/trace/ring_buffer.c
3810 +@@ -3868,6 +3868,9 @@ int ring_buffer_read_page(struct ring_buffer *buffer,
3811 + rpos = reader->read;
3812 + pos += size;
3813 +
3814 ++ if (rpos >= commit)
3815 ++ break;
3816 ++
3817 + event = rb_reader_event(cpu_buffer);
3818 + size = rb_event_length(event);
3819 + } while (len > size);
3820 +diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c
3821 +index 79f4bac..b4c179a 100644
3822 +--- a/kernel/trace/trace_functions_graph.c
3823 ++++ b/kernel/trace/trace_functions_graph.c
3824 +@@ -507,7 +507,15 @@ get_return_for_leaf(struct trace_iterator *iter,
3825 + * if the output fails.
3826 + */
3827 + data->ent = *curr;
3828 +- data->ret = *next;
3829 ++ /*
3830 ++ * If the next event is not a return type, then
3831 ++ * we only care about what type it is. Otherwise we can
3832 ++ * safely copy the entire event.
3833 ++ */
3834 ++ if (next->ent.type == TRACE_GRAPH_RET)
3835 ++ data->ret = *next;
3836 ++ else
3837 ++ data->ret.ent.type = next->ent.type;
3838 + }
3839 + }
3840 +
3841 +diff --git a/mm/memory.c b/mm/memory.c
3842 +index 307bf77..53cf85d 100644
3843 +--- a/mm/memory.c
3844 ++++ b/mm/memory.c
3845 +@@ -2770,11 +2770,18 @@ static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned lo
3846 + {
3847 + address &= PAGE_MASK;
3848 + if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) {
3849 +- address -= PAGE_SIZE;
3850 +- if (find_vma(vma->vm_mm, address) != vma)
3851 +- return -ENOMEM;
3852 ++ struct vm_area_struct *prev = vma->vm_prev;
3853 ++
3854 ++ /*
3855 ++ * Is there a mapping abutting this one below?
3856 ++ *
3857 ++ * That's only ok if it's the same stack mapping
3858 ++ * that has gotten split..
3859 ++ */
3860 ++ if (prev && prev->vm_end == address)
3861 ++ return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM;
3862 +
3863 +- expand_stack(vma, address);
3864 ++ expand_stack(vma, address - PAGE_SIZE);
3865 + }
3866 + return 0;
3867 + }
3868 +diff --git a/mm/mlock.c b/mm/mlock.c
3869 +index 49e5e4c..cbae7c5 100644
3870 +--- a/mm/mlock.c
3871 ++++ b/mm/mlock.c
3872 +@@ -135,6 +135,19 @@ void munlock_vma_page(struct page *page)
3873 + }
3874 + }
3875 +
3876 ++/* Is the vma a continuation of the stack vma above it? */
3877 ++static inline int vma_stack_continue(struct vm_area_struct *vma, unsigned long addr)
3878 ++{
3879 ++ return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
3880 ++}
3881 ++
3882 ++static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long addr)
3883 ++{
3884 ++ return (vma->vm_flags & VM_GROWSDOWN) &&
3885 ++ (vma->vm_start == addr) &&
3886 ++ !vma_stack_continue(vma->vm_prev, addr);
3887 ++}
3888 ++
3889 + /**
3890 + * __mlock_vma_pages_range() - mlock a range of pages in the vma.
3891 + * @vma: target vma
3892 +@@ -168,11 +181,9 @@ static long __mlock_vma_pages_range(struct vm_area_struct *vma,
3893 + gup_flags |= FOLL_WRITE;
3894 +
3895 + /* We don't try to access the guard page of a stack vma */
3896 +- if (vma->vm_flags & VM_GROWSDOWN) {
3897 +- if (start == vma->vm_start) {
3898 +- start += PAGE_SIZE;
3899 +- nr_pages--;
3900 +- }
3901 ++ if (stack_guard_page(vma, start)) {
3902 ++ addr += PAGE_SIZE;
3903 ++ nr_pages--;
3904 + }
3905 +
3906 + while (nr_pages > 0) {
3907 +diff --git a/mm/mmap.c b/mm/mmap.c
3908 +index 456ec6f..3867cfc 100644
3909 +--- a/mm/mmap.c
3910 ++++ b/mm/mmap.c
3911 +@@ -388,17 +388,23 @@ static inline void
3912 + __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma,
3913 + struct vm_area_struct *prev, struct rb_node *rb_parent)
3914 + {
3915 ++ struct vm_area_struct *next;
3916 ++
3917 ++ vma->vm_prev = prev;
3918 + if (prev) {
3919 +- vma->vm_next = prev->vm_next;
3920 ++ next = prev->vm_next;
3921 + prev->vm_next = vma;
3922 + } else {
3923 + mm->mmap = vma;
3924 + if (rb_parent)
3925 +- vma->vm_next = rb_entry(rb_parent,
3926 ++ next = rb_entry(rb_parent,
3927 + struct vm_area_struct, vm_rb);
3928 + else
3929 +- vma->vm_next = NULL;
3930 ++ next = NULL;
3931 + }
3932 ++ vma->vm_next = next;
3933 ++ if (next)
3934 ++ next->vm_prev = vma;
3935 + }
3936 +
3937 + void __vma_link_rb(struct mm_struct *mm, struct vm_area_struct *vma,
3938 +@@ -485,7 +491,11 @@ static inline void
3939 + __vma_unlink(struct mm_struct *mm, struct vm_area_struct *vma,
3940 + struct vm_area_struct *prev)
3941 + {
3942 +- prev->vm_next = vma->vm_next;
3943 ++ struct vm_area_struct *next = vma->vm_next;
3944 ++
3945 ++ prev->vm_next = next;
3946 ++ if (next)
3947 ++ next->vm_prev = prev;
3948 + rb_erase(&vma->vm_rb, &mm->mm_rb);
3949 + if (mm->mmap_cache == vma)
3950 + mm->mmap_cache = prev;
3951 +@@ -1900,6 +1910,7 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
3952 + unsigned long addr;
3953 +
3954 + insertion_point = (prev ? &prev->vm_next : &mm->mmap);
3955 ++ vma->vm_prev = NULL;
3956 + do {
3957 + rb_erase(&vma->vm_rb, &mm->mm_rb);
3958 + mm->map_count--;
3959 +@@ -1907,6 +1918,8 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
3960 + vma = vma->vm_next;
3961 + } while (vma && vma->vm_start < end);
3962 + *insertion_point = vma;
3963 ++ if (vma)
3964 ++ vma->vm_prev = prev;
3965 + tail_vma->vm_next = NULL;
3966 + if (mm->unmap_area == arch_unmap_area)
3967 + addr = prev ? prev->vm_end : mm->mmap_base;
3968 +diff --git a/mm/nommu.c b/mm/nommu.c
3969 +index b76f3ee..e48b38c 100644
3970 +--- a/mm/nommu.c
3971 ++++ b/mm/nommu.c
3972 +@@ -609,7 +609,7 @@ static void protect_vma(struct vm_area_struct *vma, unsigned long flags)
3973 + */
3974 + static void add_vma_to_mm(struct mm_struct *mm, struct vm_area_struct *vma)
3975 + {
3976 +- struct vm_area_struct *pvma, **pp;
3977 ++ struct vm_area_struct *pvma, **pp, *next;
3978 + struct address_space *mapping;
3979 + struct rb_node **p, *parent;
3980 +
3981 +@@ -669,8 +669,11 @@ static void add_vma_to_mm(struct mm_struct *mm, struct vm_area_struct *vma)
3982 + break;
3983 + }
3984 +
3985 +- vma->vm_next = *pp;
3986 ++ next = *pp;
3987 + *pp = vma;
3988 ++ vma->vm_next = next;
3989 ++ if (next)
3990 ++ next->vm_prev = vma;
3991 + }
3992 +
3993 + /*
3994 +diff --git a/mm/slab.c b/mm/slab.c
3995 +index e49f8f4..e4f747f 100644
3996 +--- a/mm/slab.c
3997 ++++ b/mm/slab.c
3998 +@@ -2331,8 +2331,8 @@ kmem_cache_create (const char *name, size_t size, size_t align,
3999 + }
4000 + #if FORCED_DEBUG && defined(CONFIG_DEBUG_PAGEALLOC)
4001 + if (size >= malloc_sizes[INDEX_L3 + 1].cs_size
4002 +- && cachep->obj_size > cache_line_size() && size < PAGE_SIZE) {
4003 +- cachep->obj_offset += PAGE_SIZE - size;
4004 ++ && cachep->obj_size > cache_line_size() && ALIGN(size, align) < PAGE_SIZE) {
4005 ++ cachep->obj_offset += PAGE_SIZE - ALIGN(size, align);
4006 + size = PAGE_SIZE;
4007 + }
4008 + #endif
4009 +diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c
4010 +index 753fc42..f49bcd9 100644
4011 +--- a/net/bridge/br_device.c
4012 ++++ b/net/bridge/br_device.c
4013 +@@ -22,7 +22,7 @@
4014 + #include <asm/uaccess.h>
4015 + #include "br_private.h"
4016 +
4017 +-/* net device transmit always called with no BH (preempt_disabled) */
4018 ++/* net device transmit always called with BH disabled */
4019 + netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev)
4020 + {
4021 + struct net_bridge *br = netdev_priv(dev);
4022 +@@ -46,9 +46,12 @@ netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev)
4023 + skb_reset_mac_header(skb);
4024 + skb_pull(skb, ETH_HLEN);
4025 +
4026 ++ rcu_read_lock();
4027 + if (is_multicast_ether_addr(dest)) {
4028 +- if (br_multicast_rcv(br, NULL, skb))
4029 ++ if (br_multicast_rcv(br, NULL, skb)) {
4030 ++ kfree_skb(skb);
4031 + goto out;
4032 ++ }
4033 +
4034 + mdst = br_mdb_get(br, skb);
4035 + if (mdst || BR_INPUT_SKB_CB_MROUTERS_ONLY(skb))
4036 +@@ -61,6 +64,7 @@ netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev)
4037 + br_flood_deliver(br, skb);
4038 +
4039 + out:
4040 ++ rcu_read_unlock();
4041 + return NETDEV_TX_OK;
4042 + }
4043 +
4044 +diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c
4045 +index b01dde3..7204ad3 100644
4046 +--- a/net/bridge/br_fdb.c
4047 ++++ b/net/bridge/br_fdb.c
4048 +@@ -214,7 +214,7 @@ void br_fdb_delete_by_port(struct net_bridge *br,
4049 + spin_unlock_bh(&br->hash_lock);
4050 + }
4051 +
4052 +-/* No locking or refcounting, assumes caller has no preempt (rcu_read_lock) */
4053 ++/* No locking or refcounting, assumes caller has rcu_read_lock */
4054 + struct net_bridge_fdb_entry *__br_fdb_get(struct net_bridge *br,
4055 + const unsigned char *addr)
4056 + {
4057 +diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c
4058 +index d36e700..114365c 100644
4059 +--- a/net/bridge/br_input.c
4060 ++++ b/net/bridge/br_input.c
4061 +@@ -37,7 +37,7 @@ static int br_pass_frame_up(struct sk_buff *skb)
4062 + netif_receive_skb);
4063 + }
4064 +
4065 +-/* note: already called with rcu_read_lock (preempt_disabled) */
4066 ++/* note: already called with rcu_read_lock */
4067 + int br_handle_frame_finish(struct sk_buff *skb)
4068 + {
4069 + const unsigned char *dest = eth_hdr(skb)->h_dest;
4070 +@@ -108,7 +108,7 @@ drop:
4071 + goto out;
4072 + }
4073 +
4074 +-/* note: already called with rcu_read_lock (preempt_disabled) */
4075 ++/* note: already called with rcu_read_lock */
4076 + static int br_handle_local_finish(struct sk_buff *skb)
4077 + {
4078 + struct net_bridge_port *p = rcu_dereference(skb->dev->br_port);
4079 +@@ -133,7 +133,7 @@ static inline int is_link_local(const unsigned char *dest)
4080 + /*
4081 + * Called via br_handle_frame_hook.
4082 + * Return NULL if skb is handled
4083 +- * note: already called with rcu_read_lock (preempt_disabled)
4084 ++ * note: already called with rcu_read_lock
4085 + */
4086 + struct sk_buff *br_handle_frame(struct net_bridge_port *p, struct sk_buff *skb)
4087 + {
4088 +diff --git a/net/bridge/br_stp_bpdu.c b/net/bridge/br_stp_bpdu.c
4089 +index 217bd22..5854e82 100644
4090 +--- a/net/bridge/br_stp_bpdu.c
4091 ++++ b/net/bridge/br_stp_bpdu.c
4092 +@@ -131,7 +131,7 @@ void br_send_tcn_bpdu(struct net_bridge_port *p)
4093 + /*
4094 + * Called from llc.
4095 + *
4096 +- * NO locks, but rcu_read_lock (preempt_disabled)
4097 ++ * NO locks, but rcu_read_lock
4098 + */
4099 + void br_stp_rcv(const struct stp_proto *proto, struct sk_buff *skb,
4100 + struct net_device *dev)
4101 +diff --git a/net/can/bcm.c b/net/can/bcm.c
4102 +index 9c65e9d..08ffe9e 100644
4103 +--- a/net/can/bcm.c
4104 ++++ b/net/can/bcm.c
4105 +@@ -60,6 +60,13 @@
4106 + #include <net/sock.h>
4107 + #include <net/net_namespace.h>
4108 +
4109 ++/*
4110 ++ * To send multiple CAN frame content within TX_SETUP or to filter
4111 ++ * CAN messages with multiplex index within RX_SETUP, the number of
4112 ++ * different filters is limited to 256 due to the one byte index value.
4113 ++ */
4114 ++#define MAX_NFRAMES 256
4115 ++
4116 + /* use of last_frames[index].can_dlc */
4117 + #define RX_RECV 0x40 /* received data for this element */
4118 + #define RX_THR 0x80 /* element not been sent due to throttle feature */
4119 +@@ -89,16 +96,16 @@ struct bcm_op {
4120 + struct list_head list;
4121 + int ifindex;
4122 + canid_t can_id;
4123 +- int flags;
4124 ++ u32 flags;
4125 + unsigned long frames_abs, frames_filtered;
4126 + struct timeval ival1, ival2;
4127 + struct hrtimer timer, thrtimer;
4128 + struct tasklet_struct tsklet, thrtsklet;
4129 + ktime_t rx_stamp, kt_ival1, kt_ival2, kt_lastmsg;
4130 + int rx_ifindex;
4131 +- int count;
4132 +- int nframes;
4133 +- int currframe;
4134 ++ u32 count;
4135 ++ u32 nframes;
4136 ++ u32 currframe;
4137 + struct can_frame *frames;
4138 + struct can_frame *last_frames;
4139 + struct can_frame sframe;
4140 +@@ -175,7 +182,7 @@ static int bcm_proc_show(struct seq_file *m, void *v)
4141 +
4142 + seq_printf(m, "rx_op: %03X %-5s ",
4143 + op->can_id, bcm_proc_getifname(ifname, op->ifindex));
4144 +- seq_printf(m, "[%d]%c ", op->nframes,
4145 ++ seq_printf(m, "[%u]%c ", op->nframes,
4146 + (op->flags & RX_CHECK_DLC)?'d':' ');
4147 + if (op->kt_ival1.tv64)
4148 + seq_printf(m, "timeo=%lld ",
4149 +@@ -198,7 +205,7 @@ static int bcm_proc_show(struct seq_file *m, void *v)
4150 +
4151 + list_for_each_entry(op, &bo->tx_ops, list) {
4152 +
4153 +- seq_printf(m, "tx_op: %03X %s [%d] ",
4154 ++ seq_printf(m, "tx_op: %03X %s [%u] ",
4155 + op->can_id,
4156 + bcm_proc_getifname(ifname, op->ifindex),
4157 + op->nframes);
4158 +@@ -283,7 +290,7 @@ static void bcm_send_to_user(struct bcm_op *op, struct bcm_msg_head *head,
4159 + struct can_frame *firstframe;
4160 + struct sockaddr_can *addr;
4161 + struct sock *sk = op->sk;
4162 +- int datalen = head->nframes * CFSIZ;
4163 ++ unsigned int datalen = head->nframes * CFSIZ;
4164 + int err;
4165 +
4166 + skb = alloc_skb(sizeof(*head) + datalen, gfp_any());
4167 +@@ -468,7 +475,7 @@ rx_changed_settime:
4168 + * bcm_rx_cmp_to_index - (bit)compares the currently received data to formerly
4169 + * received data stored in op->last_frames[]
4170 + */
4171 +-static void bcm_rx_cmp_to_index(struct bcm_op *op, int index,
4172 ++static void bcm_rx_cmp_to_index(struct bcm_op *op, unsigned int index,
4173 + const struct can_frame *rxdata)
4174 + {
4175 + /*
4176 +@@ -554,7 +561,8 @@ static enum hrtimer_restart bcm_rx_timeout_handler(struct hrtimer *hrtimer)
4177 + /*
4178 + * bcm_rx_do_flush - helper for bcm_rx_thr_flush
4179 + */
4180 +-static inline int bcm_rx_do_flush(struct bcm_op *op, int update, int index)
4181 ++static inline int bcm_rx_do_flush(struct bcm_op *op, int update,
4182 ++ unsigned int index)
4183 + {
4184 + if ((op->last_frames) && (op->last_frames[index].can_dlc & RX_THR)) {
4185 + if (update)
4186 +@@ -575,7 +583,7 @@ static int bcm_rx_thr_flush(struct bcm_op *op, int update)
4187 + int updated = 0;
4188 +
4189 + if (op->nframes > 1) {
4190 +- int i;
4191 ++ unsigned int i;
4192 +
4193 + /* for MUX filter we start at index 1 */
4194 + for (i = 1; i < op->nframes; i++)
4195 +@@ -624,7 +632,7 @@ static void bcm_rx_handler(struct sk_buff *skb, void *data)
4196 + {
4197 + struct bcm_op *op = (struct bcm_op *)data;
4198 + const struct can_frame *rxframe = (struct can_frame *)skb->data;
4199 +- int i;
4200 ++ unsigned int i;
4201 +
4202 + /* disable timeout */
4203 + hrtimer_cancel(&op->timer);
4204 +@@ -822,14 +830,15 @@ static int bcm_tx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
4205 + {
4206 + struct bcm_sock *bo = bcm_sk(sk);
4207 + struct bcm_op *op;
4208 +- int i, err;
4209 ++ unsigned int i;
4210 ++ int err;
4211 +
4212 + /* we need a real device to send frames */
4213 + if (!ifindex)
4214 + return -ENODEV;
4215 +
4216 +- /* we need at least one can_frame */
4217 +- if (msg_head->nframes < 1)
4218 ++ /* check nframes boundaries - we need at least one can_frame */
4219 ++ if (msg_head->nframes < 1 || msg_head->nframes > MAX_NFRAMES)
4220 + return -EINVAL;
4221 +
4222 + /* check the given can_id */
4223 +@@ -993,6 +1002,10 @@ static int bcm_rx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
4224 + msg_head->nframes = 0;
4225 + }
4226 +
4227 ++ /* the first element contains the mux-mask => MAX_NFRAMES + 1 */
4228 ++ if (msg_head->nframes > MAX_NFRAMES + 1)
4229 ++ return -EINVAL;
4230 ++
4231 + if ((msg_head->flags & RX_RTR_FRAME) &&
4232 + ((msg_head->nframes != 1) ||
4233 + (!(msg_head->can_id & CAN_RTR_FLAG))))
4234 +diff --git a/net/can/raw.c b/net/can/raw.c
4235 +index da99cf1..1650599 100644
4236 +--- a/net/can/raw.c
4237 ++++ b/net/can/raw.c
4238 +@@ -655,6 +655,10 @@ static int raw_sendmsg(struct kiocb *iocb, struct socket *sock,
4239 + err = sock_tx_timestamp(msg, sk, skb_tx(skb));
4240 + if (err < 0)
4241 + goto free_skb;
4242 ++
4243 ++ /* to be able to check the received tx sock reference in raw_rcv() */
4244 ++ skb_tx(skb)->prevent_sk_orphan = 1;
4245 ++
4246 + skb->dev = dev;
4247 + skb->sk = sk;
4248 +
4249 +diff --git a/net/core/dev.c b/net/core/dev.c
4250 +index 1f466e8..95cc486 100644
4251 +--- a/net/core/dev.c
4252 ++++ b/net/core/dev.c
4253 +@@ -2504,6 +2504,7 @@ int netif_rx(struct sk_buff *skb)
4254 + struct rps_dev_flow voidflow, *rflow = &voidflow;
4255 + int cpu;
4256 +
4257 ++ preempt_disable();
4258 + rcu_read_lock();
4259 +
4260 + cpu = get_rps_cpu(skb->dev, skb, &rflow);
4261 +@@ -2513,6 +2514,7 @@ int netif_rx(struct sk_buff *skb)
4262 + ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
4263 +
4264 + rcu_read_unlock();
4265 ++ preempt_enable();
4266 + }
4267 + #else
4268 + {
4269 +@@ -3064,7 +3066,7 @@ enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
4270 + int mac_len;
4271 + enum gro_result ret;
4272 +
4273 +- if (!(skb->dev->features & NETIF_F_GRO))
4274 ++ if (!(skb->dev->features & NETIF_F_GRO) || netpoll_rx_on(skb))
4275 + goto normal;
4276 +
4277 + if (skb_is_gso(skb) || skb_has_frags(skb))
4278 +@@ -3133,7 +3135,7 @@ pull:
4279 + put_page(skb_shinfo(skb)->frags[0].page);
4280 + memmove(skb_shinfo(skb)->frags,
4281 + skb_shinfo(skb)->frags + 1,
4282 +- --skb_shinfo(skb)->nr_frags);
4283 ++ --skb_shinfo(skb)->nr_frags * sizeof(skb_frag_t));
4284 + }
4285 + }
4286 +
4287 +@@ -3151,9 +3153,6 @@ __napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
4288 + {
4289 + struct sk_buff *p;
4290 +
4291 +- if (netpoll_rx_on(skb))
4292 +- return GRO_NORMAL;
4293 +-
4294 + for (p = napi->gro_list; p; p = p->next) {
4295 + NAPI_GRO_CB(p)->same_flow =
4296 + (p->dev == skb->dev) &&
4297 +diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
4298 +index 65afeae..c259714 100644
4299 +--- a/net/ipv4/tcp.c
4300 ++++ b/net/ipv4/tcp.c
4301 +@@ -2176,6 +2176,8 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
4302 + GFP_KERNEL);
4303 + if (cvp == NULL)
4304 + return -ENOMEM;
4305 ++
4306 ++ kref_init(&cvp->kref);
4307 + }
4308 + lock_sock(sk);
4309 + tp->rx_opt.cookie_in_always =
4310 +@@ -2190,12 +2192,11 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
4311 + */
4312 + kref_put(&tp->cookie_values->kref,
4313 + tcp_cookie_values_release);
4314 +- kref_init(&cvp->kref);
4315 +- tp->cookie_values = cvp;
4316 + } else {
4317 + cvp = tp->cookie_values;
4318 + }
4319 + }
4320 ++
4321 + if (cvp != NULL) {
4322 + cvp->cookie_desired = ctd.tcpct_cookie_desired;
4323 +
4324 +@@ -2209,6 +2210,8 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
4325 + cvp->s_data_desired = ctd.tcpct_s_data_desired;
4326 + cvp->s_data_constant = 0; /* false */
4327 + }
4328 ++
4329 ++ tp->cookie_values = cvp;
4330 + }
4331 + release_sock(sk);
4332 + return err;
4333 +diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
4334 +index a2eb965..54d7308 100644
4335 +--- a/net/netlink/af_netlink.c
4336 ++++ b/net/netlink/af_netlink.c
4337 +@@ -1400,7 +1400,7 @@ static int netlink_recvmsg(struct kiocb *kiocb, struct socket *sock,
4338 + struct netlink_sock *nlk = nlk_sk(sk);
4339 + int noblock = flags&MSG_DONTWAIT;
4340 + size_t copied;
4341 +- struct sk_buff *skb, *frag __maybe_unused = NULL;
4342 ++ struct sk_buff *skb, *data_skb;
4343 + int err;
4344 +
4345 + if (flags&MSG_OOB)
4346 +@@ -1412,45 +1412,35 @@ static int netlink_recvmsg(struct kiocb *kiocb, struct socket *sock,
4347 + if (skb == NULL)
4348 + goto out;
4349 +
4350 ++ data_skb = skb;
4351 ++
4352 + #ifdef CONFIG_COMPAT_NETLINK_MESSAGES
4353 + if (unlikely(skb_shinfo(skb)->frag_list)) {
4354 +- bool need_compat = !!(flags & MSG_CMSG_COMPAT);
4355 +-
4356 + /*
4357 +- * If this skb has a frag_list, then here that means that
4358 +- * we will have to use the frag_list skb for compat tasks
4359 +- * and the regular skb for non-compat tasks.
4360 ++ * If this skb has a frag_list, then here that means that we
4361 ++ * will have to use the frag_list skb's data for compat tasks
4362 ++ * and the regular skb's data for normal (non-compat) tasks.
4363 + *
4364 +- * The skb might (and likely will) be cloned, so we can't
4365 +- * just reset frag_list and go on with things -- we need to
4366 +- * keep that. For the compat case that's easy -- simply get
4367 +- * a reference to the compat skb and free the regular one
4368 +- * including the frag. For the non-compat case, we need to
4369 +- * avoid sending the frag to the user -- so assign NULL but
4370 +- * restore it below before freeing the skb.
4371 ++ * If we need to send the compat skb, assign it to the
4372 ++ * 'data_skb' variable so that it will be used below for data
4373 ++ * copying. We keep 'skb' for everything else, including
4374 ++ * freeing both later.
4375 + */
4376 +- if (need_compat) {
4377 +- struct sk_buff *compskb = skb_shinfo(skb)->frag_list;
4378 +- skb_get(compskb);
4379 +- kfree_skb(skb);
4380 +- skb = compskb;
4381 +- } else {
4382 +- frag = skb_shinfo(skb)->frag_list;
4383 +- skb_shinfo(skb)->frag_list = NULL;
4384 +- }
4385 ++ if (flags & MSG_CMSG_COMPAT)
4386 ++ data_skb = skb_shinfo(skb)->frag_list;
4387 + }
4388 + #endif
4389 +
4390 + msg->msg_namelen = 0;
4391 +
4392 +- copied = skb->len;
4393 ++ copied = data_skb->len;
4394 + if (len < copied) {
4395 + msg->msg_flags |= MSG_TRUNC;
4396 + copied = len;
4397 + }
4398 +
4399 +- skb_reset_transport_header(skb);
4400 +- err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
4401 ++ skb_reset_transport_header(data_skb);
4402 ++ err = skb_copy_datagram_iovec(data_skb, 0, msg->msg_iov, copied);
4403 +
4404 + if (msg->msg_name) {
4405 + struct sockaddr_nl *addr = (struct sockaddr_nl *)msg->msg_name;
4406 +@@ -1470,11 +1460,7 @@ static int netlink_recvmsg(struct kiocb *kiocb, struct socket *sock,
4407 + }
4408 + siocb->scm->creds = *NETLINK_CREDS(skb);
4409 + if (flags & MSG_TRUNC)
4410 +- copied = skb->len;
4411 +-
4412 +-#ifdef CONFIG_COMPAT_NETLINK_MESSAGES
4413 +- skb_shinfo(skb)->frag_list = frag;
4414 +-#endif
4415 ++ copied = data_skb->len;
4416 +
4417 + skb_free_datagram(sk, skb);
4418 +
4419 +diff --git a/net/sched/act_nat.c b/net/sched/act_nat.c
4420 +index 724553e..abbf4fa 100644
4421 +--- a/net/sched/act_nat.c
4422 ++++ b/net/sched/act_nat.c
4423 +@@ -218,6 +218,7 @@ static int tcf_nat(struct sk_buff *skb, struct tc_action *a,
4424 + if (!pskb_may_pull(skb, ihl + sizeof(*icmph) + sizeof(*iph)))
4425 + goto drop;
4426 +
4427 ++ icmph = (void *)(skb_network_header(skb) + ihl);
4428 + iph = (void *)(icmph + 1);
4429 + if (egress)
4430 + addr = iph->daddr;
4431 +@@ -246,7 +247,7 @@ static int tcf_nat(struct sk_buff *skb, struct tc_action *a,
4432 + iph->saddr = new_addr;
4433 +
4434 + inet_proto_csum_replace4(&icmph->checksum, skb, addr, new_addr,
4435 +- 1);
4436 ++ 0);
4437 + break;
4438 + }
4439 + default:
4440 +diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c
4441 +index c657628..a9be0ef 100644
4442 +--- a/net/sched/sch_sfq.c
4443 ++++ b/net/sched/sch_sfq.c
4444 +@@ -497,11 +497,22 @@ nla_put_failure:
4445 + return -1;
4446 + }
4447 +
4448 ++static struct Qdisc *sfq_leaf(struct Qdisc *sch, unsigned long arg)
4449 ++{
4450 ++ return NULL;
4451 ++}
4452 ++
4453 + static unsigned long sfq_get(struct Qdisc *sch, u32 classid)
4454 + {
4455 + return 0;
4456 + }
4457 +
4458 ++static unsigned long sfq_bind(struct Qdisc *sch, unsigned long parent,
4459 ++ u32 classid)
4460 ++{
4461 ++ return 0;
4462 ++}
4463 ++
4464 + static struct tcf_proto **sfq_find_tcf(struct Qdisc *sch, unsigned long cl)
4465 + {
4466 + struct sfq_sched_data *q = qdisc_priv(sch);
4467 +@@ -554,8 +565,10 @@ static void sfq_walk(struct Qdisc *sch, struct qdisc_walker *arg)
4468 + }
4469 +
4470 + static const struct Qdisc_class_ops sfq_class_ops = {
4471 ++ .leaf = sfq_leaf,
4472 + .get = sfq_get,
4473 + .tcf_chain = sfq_find_tcf,
4474 ++ .bind_tcf = sfq_bind,
4475 + .dump = sfq_dump_class,
4476 + .dump_stats = sfq_dump_class_stats,
4477 + .walk = sfq_walk,
4478 +diff --git a/net/wireless/mlme.c b/net/wireless/mlme.c
4479 +index ef17fcf..e4be688 100644
4480 +--- a/net/wireless/mlme.c
4481 ++++ b/net/wireless/mlme.c
4482 +@@ -842,12 +842,18 @@ int cfg80211_mlme_action(struct cfg80211_registered_device *rdev,
4483 + return -EINVAL;
4484 + if (mgmt->u.action.category != WLAN_CATEGORY_PUBLIC) {
4485 + /* Verify that we are associated with the destination AP */
4486 ++ wdev_lock(wdev);
4487 ++
4488 + if (!wdev->current_bss ||
4489 + memcmp(wdev->current_bss->pub.bssid, mgmt->bssid,
4490 + ETH_ALEN) != 0 ||
4491 + memcmp(wdev->current_bss->pub.bssid, mgmt->da,
4492 +- ETH_ALEN) != 0)
4493 ++ ETH_ALEN) != 0) {
4494 ++ wdev_unlock(wdev);
4495 + return -ENOTCONN;
4496 ++ }
4497 ++ wdev_unlock(wdev);
4498 ++
4499 + }
4500 +
4501 + if (memcmp(mgmt->sa, dev->dev_addr, ETH_ALEN) != 0)
4502 +diff --git a/scripts/mkmakefile b/scripts/mkmakefile
4503 +index 67d59c7..5325423 100644
4504 +--- a/scripts/mkmakefile
4505 ++++ b/scripts/mkmakefile
4506 +@@ -44,7 +44,9 @@ all:
4507 +
4508 + Makefile:;
4509 +
4510 +-\$(all) %/: all
4511 ++\$(all): all
4512 + @:
4513 +
4514 ++%/: all
4515 ++ @:
4516 + EOF
4517 +diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
4518 +index 303ac04..1990918 100644
4519 +--- a/sound/core/pcm_native.c
4520 ++++ b/sound/core/pcm_native.c
4521 +@@ -981,6 +981,10 @@ static int snd_pcm_do_pause(struct snd_pcm_substream *substream, int push)
4522 + {
4523 + if (substream->runtime->trigger_master != substream)
4524 + return 0;
4525 ++ /* some drivers might use hw_ptr to recover from the pause -
4526 ++ update the hw_ptr now */
4527 ++ if (push)
4528 ++ snd_pcm_update_hw_ptr(substream);
4529 + /* The jiffies check in snd_pcm_update_hw_ptr*() is done by
4530 + * a delta betwen the current jiffies, this gives a large enough
4531 + * delta, effectively to skip the check once.
4532 +diff --git a/sound/pci/emu10k1/emu10k1.c b/sound/pci/emu10k1/emu10k1.c
4533 +index 4203782..aff8387 100644
4534 +--- a/sound/pci/emu10k1/emu10k1.c
4535 ++++ b/sound/pci/emu10k1/emu10k1.c
4536 +@@ -52,6 +52,7 @@ static int max_synth_voices[SNDRV_CARDS] = {[0 ... (SNDRV_CARDS - 1)] = 64};
4537 + static int max_buffer_size[SNDRV_CARDS] = {[0 ... (SNDRV_CARDS - 1)] = 128};
4538 + static int enable_ir[SNDRV_CARDS];
4539 + static uint subsystem[SNDRV_CARDS]; /* Force card subsystem model */
4540 ++static uint delay_pcm_irq[SNDRV_CARDS] = {[0 ... (SNDRV_CARDS - 1)] = 2};
4541 +
4542 + module_param_array(index, int, NULL, 0444);
4543 + MODULE_PARM_DESC(index, "Index value for the EMU10K1 soundcard.");
4544 +@@ -73,6 +74,8 @@ module_param_array(enable_ir, bool, NULL, 0444);
4545 + MODULE_PARM_DESC(enable_ir, "Enable IR.");
4546 + module_param_array(subsystem, uint, NULL, 0444);
4547 + MODULE_PARM_DESC(subsystem, "Force card subsystem model.");
4548 ++module_param_array(delay_pcm_irq, uint, NULL, 0444);
4549 ++MODULE_PARM_DESC(delay_pcm_irq, "Delay PCM interrupt by specified number of samples (default 0).");
4550 + /*
4551 + * Class 0401: 1102:0008 (rev 00) Subsystem: 1102:1001 -> Audigy2 Value Model:SB0400
4552 + */
4553 +@@ -127,6 +130,7 @@ static int __devinit snd_card_emu10k1_probe(struct pci_dev *pci,
4554 + &emu)) < 0)
4555 + goto error;
4556 + card->private_data = emu;
4557 ++ emu->delay_pcm_irq = delay_pcm_irq[dev] & 0x1f;
4558 + if ((err = snd_emu10k1_pcm(emu, 0, NULL)) < 0)
4559 + goto error;
4560 + if ((err = snd_emu10k1_pcm_mic(emu, 1, NULL)) < 0)
4561 +diff --git a/sound/pci/emu10k1/emupcm.c b/sound/pci/emu10k1/emupcm.c
4562 +index 55b83ef..622bace 100644
4563 +--- a/sound/pci/emu10k1/emupcm.c
4564 ++++ b/sound/pci/emu10k1/emupcm.c
4565 +@@ -332,7 +332,7 @@ static void snd_emu10k1_pcm_init_voice(struct snd_emu10k1 *emu,
4566 + evoice->epcm->ccca_start_addr = start_addr + ccis;
4567 + if (extra) {
4568 + start_addr += ccis;
4569 +- end_addr += ccis;
4570 ++ end_addr += ccis + emu->delay_pcm_irq;
4571 + }
4572 + if (stereo && !extra) {
4573 + snd_emu10k1_ptr_write(emu, CPF, voice, CPF_STEREO_MASK);
4574 +@@ -360,7 +360,9 @@ static void snd_emu10k1_pcm_init_voice(struct snd_emu10k1 *emu,
4575 + /* Assumption that PT is already 0 so no harm overwriting */
4576 + snd_emu10k1_ptr_write(emu, PTRX, voice, (send_amount[0] << 8) | send_amount[1]);
4577 + snd_emu10k1_ptr_write(emu, DSL, voice, end_addr | (send_amount[3] << 24));
4578 +- snd_emu10k1_ptr_write(emu, PSST, voice, start_addr | (send_amount[2] << 24));
4579 ++ snd_emu10k1_ptr_write(emu, PSST, voice,
4580 ++ (start_addr + (extra ? emu->delay_pcm_irq : 0)) |
4581 ++ (send_amount[2] << 24));
4582 + if (emu->card_capabilities->emu_model)
4583 + pitch_target = PITCH_48000; /* Disable interpolators on emu1010 card */
4584 + else
4585 +@@ -732,6 +734,23 @@ static void snd_emu10k1_playback_stop_voice(struct snd_emu10k1 *emu, struct snd_
4586 + snd_emu10k1_ptr_write(emu, IP, voice, 0);
4587 + }
4588 +
4589 ++static inline void snd_emu10k1_playback_mangle_extra(struct snd_emu10k1 *emu,
4590 ++ struct snd_emu10k1_pcm *epcm,
4591 ++ struct snd_pcm_substream *substream,
4592 ++ struct snd_pcm_runtime *runtime)
4593 ++{
4594 ++ unsigned int ptr, period_pos;
4595 ++
4596 ++ /* try to sychronize the current position for the interrupt
4597 ++ source voice */
4598 ++ period_pos = runtime->status->hw_ptr - runtime->hw_ptr_interrupt;
4599 ++ period_pos %= runtime->period_size;
4600 ++ ptr = snd_emu10k1_ptr_read(emu, CCCA, epcm->extra->number);
4601 ++ ptr &= ~0x00ffffff;
4602 ++ ptr |= epcm->ccca_start_addr + period_pos;
4603 ++ snd_emu10k1_ptr_write(emu, CCCA, epcm->extra->number, ptr);
4604 ++}
4605 ++
4606 + static int snd_emu10k1_playback_trigger(struct snd_pcm_substream *substream,
4607 + int cmd)
4608 + {
4609 +@@ -753,6 +772,8 @@ static int snd_emu10k1_playback_trigger(struct snd_pcm_substream *substream,
4610 + /* follow thru */
4611 + case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
4612 + case SNDRV_PCM_TRIGGER_RESUME:
4613 ++ if (cmd == SNDRV_PCM_TRIGGER_PAUSE_RELEASE)
4614 ++ snd_emu10k1_playback_mangle_extra(emu, epcm, substream, runtime);
4615 + mix = &emu->pcm_mixer[substream->number];
4616 + snd_emu10k1_playback_prepare_voice(emu, epcm->voices[0], 1, 0, mix);
4617 + snd_emu10k1_playback_prepare_voice(emu, epcm->voices[1], 0, 0, mix);
4618 +@@ -869,8 +890,9 @@ static snd_pcm_uframes_t snd_emu10k1_playback_pointer(struct snd_pcm_substream *
4619 + #endif
4620 + /*
4621 + printk(KERN_DEBUG
4622 +- "ptr = 0x%x, buffer_size = 0x%x, period_size = 0x%x\n",
4623 +- ptr, runtime->buffer_size, runtime->period_size);
4624 ++ "ptr = 0x%lx, buffer_size = 0x%lx, period_size = 0x%lx\n",
4625 ++ (long)ptr, (long)runtime->buffer_size,
4626 ++ (long)runtime->period_size);
4627 + */
4628 + return ptr;
4629 + }
4630 +diff --git a/sound/pci/emu10k1/memory.c b/sound/pci/emu10k1/memory.c
4631 +index ffb1ddb..957a311 100644
4632 +--- a/sound/pci/emu10k1/memory.c
4633 ++++ b/sound/pci/emu10k1/memory.c
4634 +@@ -310,8 +310,10 @@ snd_emu10k1_alloc_pages(struct snd_emu10k1 *emu, struct snd_pcm_substream *subst
4635 + if (snd_BUG_ON(!hdr))
4636 + return NULL;
4637 +
4638 ++ idx = runtime->period_size >= runtime->buffer_size ?
4639 ++ (emu->delay_pcm_irq * 2) : 0;
4640 + mutex_lock(&hdr->block_mutex);
4641 +- blk = search_empty(emu, runtime->dma_bytes);
4642 ++ blk = search_empty(emu, runtime->dma_bytes + idx);
4643 + if (blk == NULL) {
4644 + mutex_unlock(&hdr->block_mutex);
4645 + return NULL;
4646 +diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
4647 +index 2bf2cb5..baadda4 100644
4648 +--- a/sound/pci/hda/patch_conexant.c
4649 ++++ b/sound/pci/hda/patch_conexant.c
4650 +@@ -2970,6 +2970,7 @@ static struct snd_pci_quirk cxt5066_cfg_tbl[] = {
4651 + SND_PCI_QUIRK(0x1028, 0x02f5, "Dell",
4652 + CXT5066_DELL_LAPTOP),
4653 + SND_PCI_QUIRK(0x152d, 0x0833, "OLPC XO-1.5", CXT5066_OLPC_XO_1_5),
4654 ++ SND_PCI_QUIRK(0x1028, 0x02d8, "Dell Vostro", CXT5066_DELL_VOSTO),
4655 + SND_PCI_QUIRK(0x1028, 0x0402, "Dell Vostro", CXT5066_DELL_VOSTO),
4656 + SND_PCI_QUIRK(0x1028, 0x0408, "Dell Inspiron One 19T", CXT5066_IDEAPAD),
4657 + SND_PCI_QUIRK(0x1179, 0xff50, "Toshiba Satellite P500-PSPGSC-01800T", CXT5066_OLPC_XO_1_5),
4658 +diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
4659 +index aa7cc51..6d9a542 100644
4660 +--- a/sound/pci/hda/patch_realtek.c
4661 ++++ b/sound/pci/hda/patch_realtek.c
4662 +@@ -6864,6 +6864,7 @@ static int patch_alc260(struct hda_codec *codec)
4663 +
4664 + spec->stream_analog_playback = &alc260_pcm_analog_playback;
4665 + spec->stream_analog_capture = &alc260_pcm_analog_capture;
4666 ++ spec->stream_analog_alt_capture = &alc260_pcm_analog_capture;
4667 +
4668 + spec->stream_digital_playback = &alc260_pcm_digital_playback;
4669 + spec->stream_digital_capture = &alc260_pcm_digital_capture;
4670 +diff --git a/sound/pci/intel8x0.c b/sound/pci/intel8x0.c
4671 +index 6433e65..4677492 100644
4672 +--- a/sound/pci/intel8x0.c
4673 ++++ b/sound/pci/intel8x0.c
4674 +@@ -1776,6 +1776,12 @@ static struct ac97_quirk ac97_quirks[] __devinitdata = {
4675 + },
4676 + {
4677 + .subvendor = 0x1014,
4678 ++ .subdevice = 0x0534,
4679 ++ .name = "ThinkPad X31",
4680 ++ .type = AC97_TUNE_INV_EAPD
4681 ++ },
4682 ++ {
4683 ++ .subvendor = 0x1014,
4684 + .subdevice = 0x1f00,
4685 + .name = "MS-9128",
4686 + .type = AC97_TUNE_ALC_JACK
4687 +diff --git a/sound/pci/riptide/riptide.c b/sound/pci/riptide/riptide.c
4688 +index ad44626..c737287 100644
4689 +--- a/sound/pci/riptide/riptide.c
4690 ++++ b/sound/pci/riptide/riptide.c
4691 +@@ -1224,15 +1224,14 @@ static int try_to_load_firmware(struct cmdif *cif, struct snd_riptide *chip)
4692 + firmware.firmware.ASIC, firmware.firmware.CODEC,
4693 + firmware.firmware.AUXDSP, firmware.firmware.PROG);
4694 +
4695 ++ if (!chip)
4696 ++ return 1;
4697 ++
4698 + for (i = 0; i < FIRMWARE_VERSIONS; i++) {
4699 + if (!memcmp(&firmware_versions[i], &firmware, sizeof(firmware)))
4700 +- break;
4701 +- }
4702 +- if (i >= FIRMWARE_VERSIONS)
4703 +- return 0; /* no match */
4704 ++ return 1; /* OK */
4705 +
4706 +- if (!chip)
4707 +- return 1; /* OK */
4708 ++ }
4709 +
4710 + snd_printdd("Writing Firmware\n");
4711 + if (!chip->fw_entry) {
4712 +diff --git a/sound/soc/codecs/wm8580.c b/sound/soc/codecs/wm8580.c
4713 +index c3571ee..72deeab 100644
4714 +--- a/sound/soc/codecs/wm8580.c
4715 ++++ b/sound/soc/codecs/wm8580.c
4716 +@@ -269,9 +269,9 @@ SOC_DOUBLE("DAC2 Invert Switch", WM8580_DAC_CONTROL4, 2, 3, 1, 0),
4717 + SOC_DOUBLE("DAC3 Invert Switch", WM8580_DAC_CONTROL4, 4, 5, 1, 0),
4718 +
4719 + SOC_SINGLE("DAC ZC Switch", WM8580_DAC_CONTROL5, 5, 1, 0),
4720 +-SOC_SINGLE("DAC1 Switch", WM8580_DAC_CONTROL5, 0, 1, 0),
4721 +-SOC_SINGLE("DAC2 Switch", WM8580_DAC_CONTROL5, 1, 1, 0),
4722 +-SOC_SINGLE("DAC3 Switch", WM8580_DAC_CONTROL5, 2, 1, 0),
4723 ++SOC_SINGLE("DAC1 Switch", WM8580_DAC_CONTROL5, 0, 1, 1),
4724 ++SOC_SINGLE("DAC2 Switch", WM8580_DAC_CONTROL5, 1, 1, 1),
4725 ++SOC_SINGLE("DAC3 Switch", WM8580_DAC_CONTROL5, 2, 1, 1),
4726 +
4727 + SOC_DOUBLE("ADC Mute Switch", WM8580_ADC_CONTROL1, 0, 1, 1, 0),
4728 + SOC_SINGLE("ADC High-Pass Filter Switch", WM8580_ADC_CONTROL1, 4, 1, 0),
4729 +diff --git a/sound/soc/codecs/wm8776.c b/sound/soc/codecs/wm8776.c
4730 +index 4e212ed..f8154e6 100644
4731 +--- a/sound/soc/codecs/wm8776.c
4732 ++++ b/sound/soc/codecs/wm8776.c
4733 +@@ -178,13 +178,6 @@ static int wm8776_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
4734 + case SND_SOC_DAIFMT_LEFT_J:
4735 + iface |= 0x0001;
4736 + break;
4737 +- /* FIXME: CHECK A/B */
4738 +- case SND_SOC_DAIFMT_DSP_A:
4739 +- iface |= 0x0003;
4740 +- break;
4741 +- case SND_SOC_DAIFMT_DSP_B:
4742 +- iface |= 0x0007;
4743 +- break;
4744 + default:
4745 + return -EINVAL;
4746 + }
4747 +diff --git a/sound/soc/soc-cache.c b/sound/soc/soc-cache.c
4748 +index 472af38..adbc68c 100644
4749 +--- a/sound/soc/soc-cache.c
4750 ++++ b/sound/soc/soc-cache.c
4751 +@@ -340,7 +340,7 @@ static unsigned int snd_soc_16_8_read_i2c(struct snd_soc_codec *codec,
4752 + static unsigned int snd_soc_16_8_read(struct snd_soc_codec *codec,
4753 + unsigned int reg)
4754 + {
4755 +- u16 *cache = codec->reg_cache;
4756 ++ u8 *cache = codec->reg_cache;
4757 +
4758 + reg &= 0xff;
4759 + if (reg >= codec->reg_cache_size)
4760 +@@ -351,7 +351,7 @@ static unsigned int snd_soc_16_8_read(struct snd_soc_codec *codec,
4761 + static int snd_soc_16_8_write(struct snd_soc_codec *codec, unsigned int reg,
4762 + unsigned int value)
4763 + {
4764 +- u16 *cache = codec->reg_cache;
4765 ++ u8 *cache = codec->reg_cache;
4766 + u8 data[3];
4767 + int ret;
4768 +