Gentoo Archives: gentoo-commits

From: "Mike Pagano (mpagano)" <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] linux-patches r1650 - genpatches-2.6/trunk/2.6.32
Date: Thu, 07 Jan 2010 02:44:00
Message-Id: E1NSiLl-0000eP-J8@stork.gentoo.org
1 Author: mpagano
2 Date: 2010-01-07 02:43:48 +0000 (Thu, 07 Jan 2010)
3 New Revision: 1650
4
5 Added:
6 genpatches-2.6/trunk/2.6.32/1002_linux-2.6.32.3.patch
7 Modified:
8 genpatches-2.6/trunk/2.6.32/0000_README
9 Log:
10 Linux patch 2.6.32.3
11
12 Modified: genpatches-2.6/trunk/2.6.32/0000_README
13 ===================================================================
14 --- genpatches-2.6/trunk/2.6.32/0000_README 2009-12-28 17:02:30 UTC (rev 1649)
15 +++ genpatches-2.6/trunk/2.6.32/0000_README 2010-01-07 02:43:48 UTC (rev 1650)
16 @@ -45,8 +45,12 @@
17
18 Patch: 1001_linux-2.6.32.2.patch
19 From: http://www.kernel.org
20 -Desc: Linux 2.6.32.1
21 +Desc: Linux 2.6.32.2
22
23 +Patch: 1002_linux-2.6.32.3.patch
24 +From: http://www.kernel.org
25 +Desc: Linux 2.6.32.3
26 +
27 Patch: 1900_EFI-GPT-header-read-fix.patch
28 From: http://bugs.gentoo.org/show_bug.cgi?id=296915
29 Desc: Read whole sector with EFI GPT header
30
31 Added: genpatches-2.6/trunk/2.6.32/1002_linux-2.6.32.3.patch
32 ===================================================================
33 --- genpatches-2.6/trunk/2.6.32/1002_linux-2.6.32.3.patch (rev 0)
34 +++ genpatches-2.6/trunk/2.6.32/1002_linux-2.6.32.3.patch 2010-01-07 02:43:48 UTC (rev 1650)
35 @@ -0,0 +1,4876 @@
36 +diff --git a/Documentation/filesystems/ext4.txt b/Documentation/filesystems/ext4.txt
37 +index af6885c..e1def17 100644
38 +--- a/Documentation/filesystems/ext4.txt
39 ++++ b/Documentation/filesystems/ext4.txt
40 +@@ -196,7 +196,7 @@ nobarrier This also requires an IO stack which can support
41 + also be used to enable or disable barriers, for
42 + consistency with other ext4 mount options.
43 +
44 +-inode_readahead=n This tuning parameter controls the maximum
45 ++inode_readahead_blks=n This tuning parameter controls the maximum
46 + number of inode table blocks that ext4's inode
47 + table readahead algorithm will pre-read into
48 + the buffer cache. The default value is 32 blocks.
49 +diff --git a/arch/powerpc/kernel/align.c b/arch/powerpc/kernel/align.c
50 +index a5b632e..f0c624f 100644
51 +--- a/arch/powerpc/kernel/align.c
52 ++++ b/arch/powerpc/kernel/align.c
53 +@@ -642,10 +642,14 @@ static int emulate_spe(struct pt_regs *regs, unsigned int reg,
54 + */
55 + static int emulate_vsx(unsigned char __user *addr, unsigned int reg,
56 + unsigned int areg, struct pt_regs *regs,
57 +- unsigned int flags, unsigned int length)
58 ++ unsigned int flags, unsigned int length,
59 ++ unsigned int elsize)
60 + {
61 + char *ptr;
62 ++ unsigned long *lptr;
63 + int ret = 0;
64 ++ int sw = 0;
65 ++ int i, j;
66 +
67 + flush_vsx_to_thread(current);
68 +
69 +@@ -654,19 +658,35 @@ static int emulate_vsx(unsigned char __user *addr, unsigned int reg,
70 + else
71 + ptr = (char *) &current->thread.vr[reg - 32];
72 +
73 +- if (flags & ST)
74 +- ret = __copy_to_user(addr, ptr, length);
75 +- else {
76 +- if (flags & SPLT){
77 +- ret = __copy_from_user(ptr, addr, length);
78 +- ptr += length;
79 ++ lptr = (unsigned long *) ptr;
80 ++
81 ++ if (flags & SW)
82 ++ sw = elsize-1;
83 ++
84 ++ for (j = 0; j < length; j += elsize) {
85 ++ for (i = 0; i < elsize; ++i) {
86 ++ if (flags & ST)
87 ++ ret |= __put_user(ptr[i^sw], addr + i);
88 ++ else
89 ++ ret |= __get_user(ptr[i^sw], addr + i);
90 + }
91 +- ret |= __copy_from_user(ptr, addr, length);
92 ++ ptr += elsize;
93 ++ addr += elsize;
94 + }
95 +- if (flags & U)
96 +- regs->gpr[areg] = regs->dar;
97 +- if (ret)
98 ++
99 ++ if (!ret) {
100 ++ if (flags & U)
101 ++ regs->gpr[areg] = regs->dar;
102 ++
103 ++ /* Splat load copies the same data to top and bottom 8 bytes */
104 ++ if (flags & SPLT)
105 ++ lptr[1] = lptr[0];
106 ++ /* For 8 byte loads, zero the top 8 bytes */
107 ++ else if (!(flags & ST) && (8 == length))
108 ++ lptr[1] = 0;
109 ++ } else
110 + return -EFAULT;
111 ++
112 + return 1;
113 + }
114 + #endif
115 +@@ -767,16 +787,25 @@ int fix_alignment(struct pt_regs *regs)
116 +
117 + #ifdef CONFIG_VSX
118 + if ((instruction & 0xfc00003e) == 0x7c000018) {
119 +- /* Additional register addressing bit (64 VSX vs 32 FPR/GPR */
120 ++ unsigned int elsize;
121 ++
122 ++ /* Additional register addressing bit (64 VSX vs 32 FPR/GPR) */
123 + reg |= (instruction & 0x1) << 5;
124 + /* Simple inline decoder instead of a table */
125 ++ /* VSX has only 8 and 16 byte memory accesses */
126 ++ nb = 8;
127 + if (instruction & 0x200)
128 + nb = 16;
129 +- else if (instruction & 0x080)
130 +- nb = 8;
131 +- else
132 +- nb = 4;
133 ++
134 ++ /* Vector stores in little-endian mode swap individual
135 ++ elements, so process them separately */
136 ++ elsize = 4;
137 ++ if (instruction & 0x80)
138 ++ elsize = 8;
139 ++
140 + flags = 0;
141 ++ if (regs->msr & MSR_LE)
142 ++ flags |= SW;
143 + if (instruction & 0x100)
144 + flags |= ST;
145 + if (instruction & 0x040)
146 +@@ -787,7 +816,7 @@ int fix_alignment(struct pt_regs *regs)
147 + nb = 8;
148 + }
149 + PPC_WARN_EMULATED(vsx);
150 +- return emulate_vsx(addr, reg, areg, regs, flags, nb);
151 ++ return emulate_vsx(addr, reg, areg, regs, flags, nb, elsize);
152 + }
153 + #endif
154 + /* A size of 0 indicates an instruction we don't support, with
155 +diff --git a/arch/x86/include/asm/msr.h b/arch/x86/include/asm/msr.h
156 +index 7e2b6ba..0e3e728 100644
157 +--- a/arch/x86/include/asm/msr.h
158 ++++ b/arch/x86/include/asm/msr.h
159 +@@ -27,6 +27,18 @@ struct msr {
160 + };
161 + };
162 +
163 ++struct msr_info {
164 ++ u32 msr_no;
165 ++ struct msr reg;
166 ++ struct msr *msrs;
167 ++ int err;
168 ++};
169 ++
170 ++struct msr_regs_info {
171 ++ u32 *regs;
172 ++ int err;
173 ++};
174 ++
175 + static inline unsigned long long native_read_tscp(unsigned int *aux)
176 + {
177 + unsigned long low, high;
178 +@@ -244,11 +256,14 @@ do { \
179 +
180 + #define write_rdtscp_aux(val) wrmsr(0xc0000103, (val), 0)
181 +
182 ++struct msr *msrs_alloc(void);
183 ++void msrs_free(struct msr *msrs);
184 ++
185 + #ifdef CONFIG_SMP
186 + int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h);
187 + int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h);
188 +-void rdmsr_on_cpus(const cpumask_t *mask, u32 msr_no, struct msr *msrs);
189 +-void wrmsr_on_cpus(const cpumask_t *mask, u32 msr_no, struct msr *msrs);
190 ++void rdmsr_on_cpus(const struct cpumask *mask, u32 msr_no, struct msr *msrs);
191 ++void wrmsr_on_cpus(const struct cpumask *mask, u32 msr_no, struct msr *msrs);
192 + int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h);
193 + int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h);
194 + int rdmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8]);
195 +diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
196 +index c978648..13b1885 100644
197 +--- a/arch/x86/include/asm/processor.h
198 ++++ b/arch/x86/include/asm/processor.h
199 +@@ -180,7 +180,7 @@ static inline void native_cpuid(unsigned int *eax, unsigned int *ebx,
200 + unsigned int *ecx, unsigned int *edx)
201 + {
202 + /* ecx is often an input as well as an output. */
203 +- asm("cpuid"
204 ++ asm volatile("cpuid"
205 + : "=a" (*eax),
206 + "=b" (*ebx),
207 + "=c" (*ecx),
208 +diff --git a/arch/x86/include/asm/uv/uv_hub.h b/arch/x86/include/asm/uv/uv_hub.h
209 +index d1414af..e90a8a9 100644
210 +--- a/arch/x86/include/asm/uv/uv_hub.h
211 ++++ b/arch/x86/include/asm/uv/uv_hub.h
212 +@@ -31,20 +31,20 @@
213 + * contiguous (although various IO spaces may punch holes in
214 + * it)..
215 + *
216 +- * N - Number of bits in the node portion of a socket physical
217 +- * address.
218 ++ * N - Number of bits in the node portion of a socket physical
219 ++ * address.
220 + *
221 +- * NASID - network ID of a router, Mbrick or Cbrick. Nasid values of
222 +- * routers always have low bit of 1, C/MBricks have low bit
223 +- * equal to 0. Most addressing macros that target UV hub chips
224 +- * right shift the NASID by 1 to exclude the always-zero bit.
225 +- * NASIDs contain up to 15 bits.
226 ++ * NASID - network ID of a router, Mbrick or Cbrick. Nasid values of
227 ++ * routers always have low bit of 1, C/MBricks have low bit
228 ++ * equal to 0. Most addressing macros that target UV hub chips
229 ++ * right shift the NASID by 1 to exclude the always-zero bit.
230 ++ * NASIDs contain up to 15 bits.
231 + *
232 + * GNODE - NASID right shifted by 1 bit. Most mmrs contain gnodes instead
233 + * of nasids.
234 + *
235 +- * PNODE - the low N bits of the GNODE. The PNODE is the most useful variant
236 +- * of the nasid for socket usage.
237 ++ * PNODE - the low N bits of the GNODE. The PNODE is the most useful variant
238 ++ * of the nasid for socket usage.
239 + *
240 + *
241 + * NumaLink Global Physical Address Format:
242 +@@ -71,12 +71,12 @@
243 + *
244 + *
245 + * APICID format
246 +- * NOTE!!!!!! This is the current format of the APICID. However, code
247 +- * should assume that this will change in the future. Use functions
248 +- * in this file for all APICID bit manipulations and conversion.
249 ++ * NOTE!!!!!! This is the current format of the APICID. However, code
250 ++ * should assume that this will change in the future. Use functions
251 ++ * in this file for all APICID bit manipulations and conversion.
252 + *
253 +- * 1111110000000000
254 +- * 5432109876543210
255 ++ * 1111110000000000
256 ++ * 5432109876543210
257 + * pppppppppplc0cch
258 + * sssssssssss
259 + *
260 +@@ -89,9 +89,9 @@
261 + * Note: Processor only supports 12 bits in the APICID register. The ACPI
262 + * tables hold all 16 bits. Software needs to be aware of this.
263 + *
264 +- * Unless otherwise specified, all references to APICID refer to
265 +- * the FULL value contained in ACPI tables, not the subset in the
266 +- * processor APICID register.
267 ++ * Unless otherwise specified, all references to APICID refer to
268 ++ * the FULL value contained in ACPI tables, not the subset in the
269 ++ * processor APICID register.
270 + */
271 +
272 +
273 +@@ -151,16 +151,16 @@ struct uv_hub_info_s {
274 + };
275 +
276 + DECLARE_PER_CPU(struct uv_hub_info_s, __uv_hub_info);
277 +-#define uv_hub_info (&__get_cpu_var(__uv_hub_info))
278 ++#define uv_hub_info (&__get_cpu_var(__uv_hub_info))
279 + #define uv_cpu_hub_info(cpu) (&per_cpu(__uv_hub_info, cpu))
280 +
281 + /*
282 + * Local & Global MMR space macros.
283 +- * Note: macros are intended to be used ONLY by inline functions
284 +- * in this file - not by other kernel code.
285 +- * n - NASID (full 15-bit global nasid)
286 +- * g - GNODE (full 15-bit global nasid, right shifted 1)
287 +- * p - PNODE (local part of nsids, right shifted 1)
288 ++ * Note: macros are intended to be used ONLY by inline functions
289 ++ * in this file - not by other kernel code.
290 ++ * n - NASID (full 15-bit global nasid)
291 ++ * g - GNODE (full 15-bit global nasid, right shifted 1)
292 ++ * p - PNODE (local part of nsids, right shifted 1)
293 + */
294 + #define UV_NASID_TO_PNODE(n) (((n) >> 1) & uv_hub_info->pnode_mask)
295 + #define UV_PNODE_TO_GNODE(p) ((p) |uv_hub_info->gnode_extra)
296 +@@ -213,8 +213,8 @@ DECLARE_PER_CPU(struct uv_hub_info_s, __uv_hub_info);
297 + /*
298 + * Macros for converting between kernel virtual addresses, socket local physical
299 + * addresses, and UV global physical addresses.
300 +- * Note: use the standard __pa() & __va() macros for converting
301 +- * between socket virtual and socket physical addresses.
302 ++ * Note: use the standard __pa() & __va() macros for converting
303 ++ * between socket virtual and socket physical addresses.
304 + */
305 +
306 + /* socket phys RAM --> UV global physical address */
307 +@@ -265,21 +265,18 @@ static inline int uv_apicid_to_pnode(int apicid)
308 + * Access global MMRs using the low memory MMR32 space. This region supports
309 + * faster MMR access but not all MMRs are accessible in this space.
310 + */
311 +-static inline unsigned long *uv_global_mmr32_address(int pnode,
312 +- unsigned long offset)
313 ++static inline unsigned long *uv_global_mmr32_address(int pnode, unsigned long offset)
314 + {
315 + return __va(UV_GLOBAL_MMR32_BASE |
316 + UV_GLOBAL_MMR32_PNODE_BITS(pnode) | offset);
317 + }
318 +
319 +-static inline void uv_write_global_mmr32(int pnode, unsigned long offset,
320 +- unsigned long val)
321 ++static inline void uv_write_global_mmr32(int pnode, unsigned long offset, unsigned long val)
322 + {
323 + writeq(val, uv_global_mmr32_address(pnode, offset));
324 + }
325 +
326 +-static inline unsigned long uv_read_global_mmr32(int pnode,
327 +- unsigned long offset)
328 ++static inline unsigned long uv_read_global_mmr32(int pnode, unsigned long offset)
329 + {
330 + return readq(uv_global_mmr32_address(pnode, offset));
331 + }
332 +@@ -288,25 +285,32 @@ static inline unsigned long uv_read_global_mmr32(int pnode,
333 + * Access Global MMR space using the MMR space located at the top of physical
334 + * memory.
335 + */
336 +-static inline unsigned long *uv_global_mmr64_address(int pnode,
337 +- unsigned long offset)
338 ++static inline unsigned long *uv_global_mmr64_address(int pnode, unsigned long offset)
339 + {
340 + return __va(UV_GLOBAL_MMR64_BASE |
341 + UV_GLOBAL_MMR64_PNODE_BITS(pnode) | offset);
342 + }
343 +
344 +-static inline void uv_write_global_mmr64(int pnode, unsigned long offset,
345 +- unsigned long val)
346 ++static inline void uv_write_global_mmr64(int pnode, unsigned long offset, unsigned long val)
347 + {
348 + writeq(val, uv_global_mmr64_address(pnode, offset));
349 + }
350 +
351 +-static inline unsigned long uv_read_global_mmr64(int pnode,
352 +- unsigned long offset)
353 ++static inline unsigned long uv_read_global_mmr64(int pnode, unsigned long offset)
354 + {
355 + return readq(uv_global_mmr64_address(pnode, offset));
356 + }
357 +
358 ++static inline void uv_write_global_mmr8(int pnode, unsigned long offset, unsigned char val)
359 ++{
360 ++ writeb(val, uv_global_mmr64_address(pnode, offset));
361 ++}
362 ++
363 ++static inline unsigned char uv_read_global_mmr8(int pnode, unsigned long offset)
364 ++{
365 ++ return readb(uv_global_mmr64_address(pnode, offset));
366 ++}
367 ++
368 + /*
369 + * Access hub local MMRs. Faster than using global space but only local MMRs
370 + * are accessible.
371 +@@ -426,11 +430,17 @@ static inline void uv_set_scir_bits(unsigned char value)
372 + }
373 + }
374 +
375 ++static inline unsigned long uv_scir_offset(int apicid)
376 ++{
377 ++ return SCIR_LOCAL_MMR_BASE | (apicid & 0x3f);
378 ++}
379 ++
380 + static inline void uv_set_cpu_scir_bits(int cpu, unsigned char value)
381 + {
382 + if (uv_cpu_hub_info(cpu)->scir.state != value) {
383 ++ uv_write_global_mmr8(uv_cpu_to_pnode(cpu),
384 ++ uv_cpu_hub_info(cpu)->scir.offset, value);
385 + uv_cpu_hub_info(cpu)->scir.state = value;
386 +- uv_write_local_mmr8(uv_cpu_hub_info(cpu)->scir.offset, value);
387 + }
388 + }
389 +
390 +diff --git a/arch/x86/kernel/amd_iommu_init.c b/arch/x86/kernel/amd_iommu_init.c
391 +index e0b3130..c8243f0 100644
392 +--- a/arch/x86/kernel/amd_iommu_init.c
393 ++++ b/arch/x86/kernel/amd_iommu_init.c
394 +@@ -136,6 +136,11 @@ LIST_HEAD(amd_iommu_list); /* list of all AMD IOMMUs in the
395 + system */
396 +
397 + /*
398 ++ * Set to true if ACPI table parsing and hardware intialization went properly
399 ++ */
400 ++static bool amd_iommu_initialized;
401 ++
402 ++/*
403 + * Pointer to the device table which is shared by all AMD IOMMUs
404 + * it is indexed by the PCI device id or the HT unit id and contains
405 + * information about the domain the device belongs to as well as the
406 +@@ -913,6 +918,8 @@ static int __init init_iommu_all(struct acpi_table_header *table)
407 + }
408 + WARN_ON(p != end);
409 +
410 ++ amd_iommu_initialized = true;
411 ++
412 + return 0;
413 + }
414 +
415 +@@ -1263,6 +1270,9 @@ int __init amd_iommu_init(void)
416 + if (acpi_table_parse("IVRS", init_iommu_all) != 0)
417 + goto free;
418 +
419 ++ if (!amd_iommu_initialized)
420 ++ goto free;
421 ++
422 + if (acpi_table_parse("IVRS", init_memory_definitions) != 0)
423 + goto free;
424 +
425 +diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c
426 +index 326c254..2ab3535 100644
427 +--- a/arch/x86/kernel/apic/x2apic_uv_x.c
428 ++++ b/arch/x86/kernel/apic/x2apic_uv_x.c
429 +@@ -607,8 +607,10 @@ void __init uv_system_init(void)
430 + uv_rtc_init();
431 +
432 + for_each_present_cpu(cpu) {
433 ++ int apicid = per_cpu(x86_cpu_to_apicid, cpu);
434 ++
435 + nid = cpu_to_node(cpu);
436 +- pnode = uv_apicid_to_pnode(per_cpu(x86_cpu_to_apicid, cpu));
437 ++ pnode = uv_apicid_to_pnode(apicid);
438 + blade = boot_pnode_to_blade(pnode);
439 + lcpu = uv_blade_info[blade].nr_possible_cpus;
440 + uv_blade_info[blade].nr_possible_cpus++;
441 +@@ -629,15 +631,13 @@ void __init uv_system_init(void)
442 + uv_cpu_hub_info(cpu)->gnode_extra = gnode_extra;
443 + uv_cpu_hub_info(cpu)->global_mmr_base = mmr_base;
444 + uv_cpu_hub_info(cpu)->coherency_domain_number = sn_coherency_id;
445 +- uv_cpu_hub_info(cpu)->scir.offset = SCIR_LOCAL_MMR_BASE + lcpu;
446 ++ uv_cpu_hub_info(cpu)->scir.offset = uv_scir_offset(apicid);
447 + uv_node_to_blade[nid] = blade;
448 + uv_cpu_to_blade[cpu] = blade;
449 + max_pnode = max(pnode, max_pnode);
450 +
451 +- printk(KERN_DEBUG "UV: cpu %d, apicid 0x%x, pnode %d, nid %d, "
452 +- "lcpu %d, blade %d\n",
453 +- cpu, per_cpu(x86_cpu_to_apicid, cpu), pnode, nid,
454 +- lcpu, blade);
455 ++ printk(KERN_DEBUG "UV: cpu %d, apicid 0x%x, pnode %d, nid %d, lcpu %d, blade %d\n",
456 ++ cpu, apicid, pnode, nid, lcpu, blade);
457 + }
458 +
459 + /* Add blade/pnode info for nodes without cpus */
460 +diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
461 +index 7b058a2..c06acdd 100644
462 +--- a/arch/x86/kernel/ptrace.c
463 ++++ b/arch/x86/kernel/ptrace.c
464 +@@ -408,14 +408,14 @@ static int genregs_get(struct task_struct *target,
465 + {
466 + if (kbuf) {
467 + unsigned long *k = kbuf;
468 +- while (count > 0) {
469 ++ while (count >= sizeof(*k)) {
470 + *k++ = getreg(target, pos);
471 + count -= sizeof(*k);
472 + pos += sizeof(*k);
473 + }
474 + } else {
475 + unsigned long __user *u = ubuf;
476 +- while (count > 0) {
477 ++ while (count >= sizeof(*u)) {
478 + if (__put_user(getreg(target, pos), u++))
479 + return -EFAULT;
480 + count -= sizeof(*u);
481 +@@ -434,14 +434,14 @@ static int genregs_set(struct task_struct *target,
482 + int ret = 0;
483 + if (kbuf) {
484 + const unsigned long *k = kbuf;
485 +- while (count > 0 && !ret) {
486 ++ while (count >= sizeof(*k) && !ret) {
487 + ret = putreg(target, pos, *k++);
488 + count -= sizeof(*k);
489 + pos += sizeof(*k);
490 + }
491 + } else {
492 + const unsigned long __user *u = ubuf;
493 +- while (count > 0 && !ret) {
494 ++ while (count >= sizeof(*u) && !ret) {
495 + unsigned long word;
496 + ret = __get_user(word, u++);
497 + if (ret)
498 +@@ -1219,14 +1219,14 @@ static int genregs32_get(struct task_struct *target,
499 + {
500 + if (kbuf) {
501 + compat_ulong_t *k = kbuf;
502 +- while (count > 0) {
503 ++ while (count >= sizeof(*k)) {
504 + getreg32(target, pos, k++);
505 + count -= sizeof(*k);
506 + pos += sizeof(*k);
507 + }
508 + } else {
509 + compat_ulong_t __user *u = ubuf;
510 +- while (count > 0) {
511 ++ while (count >= sizeof(*u)) {
512 + compat_ulong_t word;
513 + getreg32(target, pos, &word);
514 + if (__put_user(word, u++))
515 +@@ -1247,14 +1247,14 @@ static int genregs32_set(struct task_struct *target,
516 + int ret = 0;
517 + if (kbuf) {
518 + const compat_ulong_t *k = kbuf;
519 +- while (count > 0 && !ret) {
520 ++ while (count >= sizeof(*k) && !ret) {
521 + ret = putreg32(target, pos, *k++);
522 + count -= sizeof(*k);
523 + pos += sizeof(*k);
524 + }
525 + } else {
526 + const compat_ulong_t __user *u = ubuf;
527 +- while (count > 0 && !ret) {
528 ++ while (count >= sizeof(*u) && !ret) {
529 + compat_ulong_t word;
530 + ret = __get_user(word, u++);
531 + if (ret)
532 +diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
533 +index 23c2176..41659fb 100644
534 +--- a/arch/x86/kvm/lapic.c
535 ++++ b/arch/x86/kvm/lapic.c
536 +@@ -1156,6 +1156,7 @@ void kvm_apic_post_state_restore(struct kvm_vcpu *vcpu)
537 + hrtimer_cancel(&apic->lapic_timer.timer);
538 + update_divide_count(apic);
539 + start_apic_timer(apic);
540 ++ apic->irr_pending = true;
541 + }
542 +
543 + void __kvm_migrate_apic_timer(struct kvm_vcpu *vcpu)
544 +diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
545 +index 72558f8..85e12cd 100644
546 +--- a/arch/x86/kvm/paging_tmpl.h
547 ++++ b/arch/x86/kvm/paging_tmpl.h
548 +@@ -455,8 +455,6 @@ out_unlock:
549 + static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva)
550 + {
551 + struct kvm_shadow_walk_iterator iterator;
552 +- pt_element_t gpte;
553 +- gpa_t pte_gpa = -1;
554 + int level;
555 + u64 *sptep;
556 + int need_flush = 0;
557 +@@ -471,10 +469,6 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva)
558 + if (level == PT_PAGE_TABLE_LEVEL ||
559 + ((level == PT_DIRECTORY_LEVEL && is_large_pte(*sptep))) ||
560 + ((level == PT_PDPE_LEVEL && is_large_pte(*sptep)))) {
561 +- struct kvm_mmu_page *sp = page_header(__pa(sptep));
562 +-
563 +- pte_gpa = (sp->gfn << PAGE_SHIFT);
564 +- pte_gpa += (sptep - sp->spt) * sizeof(pt_element_t);
565 +
566 + if (is_shadow_present_pte(*sptep)) {
567 + rmap_remove(vcpu->kvm, sptep);
568 +@@ -493,18 +487,6 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva)
569 + if (need_flush)
570 + kvm_flush_remote_tlbs(vcpu->kvm);
571 + spin_unlock(&vcpu->kvm->mmu_lock);
572 +-
573 +- if (pte_gpa == -1)
574 +- return;
575 +- if (kvm_read_guest_atomic(vcpu->kvm, pte_gpa, &gpte,
576 +- sizeof(pt_element_t)))
577 +- return;
578 +- if (is_present_gpte(gpte) && (gpte & PT_ACCESSED_MASK)) {
579 +- if (mmu_topup_memory_caches(vcpu))
580 +- return;
581 +- kvm_mmu_pte_write(vcpu, pte_gpa, (const u8 *)&gpte,
582 +- sizeof(pt_element_t), 0);
583 +- }
584 + }
585 +
586 + static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t vaddr)
587 +diff --git a/arch/x86/lib/Makefile b/arch/x86/lib/Makefile
588 +index 85f5db9..c2b6f39 100644
589 +--- a/arch/x86/lib/Makefile
590 ++++ b/arch/x86/lib/Makefile
591 +@@ -2,14 +2,14 @@
592 + # Makefile for x86 specific library files.
593 + #
594 +
595 +-obj-$(CONFIG_SMP) := msr.o
596 ++obj-$(CONFIG_SMP) += msr-smp.o
597 +
598 + lib-y := delay.o
599 + lib-y += thunk_$(BITS).o
600 + lib-y += usercopy_$(BITS).o getuser.o putuser.o
601 + lib-y += memcpy_$(BITS).o
602 +
603 +-obj-y += msr-reg.o msr-reg-export.o
604 ++obj-y += msr.o msr-reg.o msr-reg-export.o
605 +
606 + ifeq ($(CONFIG_X86_32),y)
607 + obj-y += atomic64_32.o
608 +diff --git a/arch/x86/lib/msr-smp.c b/arch/x86/lib/msr-smp.c
609 +new file mode 100644
610 +index 0000000..a6b1b86
611 +--- /dev/null
612 ++++ b/arch/x86/lib/msr-smp.c
613 +@@ -0,0 +1,204 @@
614 ++#include <linux/module.h>
615 ++#include <linux/preempt.h>
616 ++#include <linux/smp.h>
617 ++#include <asm/msr.h>
618 ++
619 ++static void __rdmsr_on_cpu(void *info)
620 ++{
621 ++ struct msr_info *rv = info;
622 ++ struct msr *reg;
623 ++ int this_cpu = raw_smp_processor_id();
624 ++
625 ++ if (rv->msrs)
626 ++ reg = per_cpu_ptr(rv->msrs, this_cpu);
627 ++ else
628 ++ reg = &rv->reg;
629 ++
630 ++ rdmsr(rv->msr_no, reg->l, reg->h);
631 ++}
632 ++
633 ++static void __wrmsr_on_cpu(void *info)
634 ++{
635 ++ struct msr_info *rv = info;
636 ++ struct msr *reg;
637 ++ int this_cpu = raw_smp_processor_id();
638 ++
639 ++ if (rv->msrs)
640 ++ reg = per_cpu_ptr(rv->msrs, this_cpu);
641 ++ else
642 ++ reg = &rv->reg;
643 ++
644 ++ wrmsr(rv->msr_no, reg->l, reg->h);
645 ++}
646 ++
647 ++int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h)
648 ++{
649 ++ int err;
650 ++ struct msr_info rv;
651 ++
652 ++ memset(&rv, 0, sizeof(rv));
653 ++
654 ++ rv.msr_no = msr_no;
655 ++ err = smp_call_function_single(cpu, __rdmsr_on_cpu, &rv, 1);
656 ++ *l = rv.reg.l;
657 ++ *h = rv.reg.h;
658 ++
659 ++ return err;
660 ++}
661 ++EXPORT_SYMBOL(rdmsr_on_cpu);
662 ++
663 ++int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
664 ++{
665 ++ int err;
666 ++ struct msr_info rv;
667 ++
668 ++ memset(&rv, 0, sizeof(rv));
669 ++
670 ++ rv.msr_no = msr_no;
671 ++ rv.reg.l = l;
672 ++ rv.reg.h = h;
673 ++ err = smp_call_function_single(cpu, __wrmsr_on_cpu, &rv, 1);
674 ++
675 ++ return err;
676 ++}
677 ++EXPORT_SYMBOL(wrmsr_on_cpu);
678 ++
679 ++static void __rwmsr_on_cpus(const struct cpumask *mask, u32 msr_no,
680 ++ struct msr *msrs,
681 ++ void (*msr_func) (void *info))
682 ++{
683 ++ struct msr_info rv;
684 ++ int this_cpu;
685 ++
686 ++ memset(&rv, 0, sizeof(rv));
687 ++
688 ++ rv.msrs = msrs;
689 ++ rv.msr_no = msr_no;
690 ++
691 ++ this_cpu = get_cpu();
692 ++
693 ++ if (cpumask_test_cpu(this_cpu, mask))
694 ++ msr_func(&rv);
695 ++
696 ++ smp_call_function_many(mask, msr_func, &rv, 1);
697 ++ put_cpu();
698 ++}
699 ++
700 ++/* rdmsr on a bunch of CPUs
701 ++ *
702 ++ * @mask: which CPUs
703 ++ * @msr_no: which MSR
704 ++ * @msrs: array of MSR values
705 ++ *
706 ++ */
707 ++void rdmsr_on_cpus(const struct cpumask *mask, u32 msr_no, struct msr *msrs)
708 ++{
709 ++ __rwmsr_on_cpus(mask, msr_no, msrs, __rdmsr_on_cpu);
710 ++}
711 ++EXPORT_SYMBOL(rdmsr_on_cpus);
712 ++
713 ++/*
714 ++ * wrmsr on a bunch of CPUs
715 ++ *
716 ++ * @mask: which CPUs
717 ++ * @msr_no: which MSR
718 ++ * @msrs: array of MSR values
719 ++ *
720 ++ */
721 ++void wrmsr_on_cpus(const struct cpumask *mask, u32 msr_no, struct msr *msrs)
722 ++{
723 ++ __rwmsr_on_cpus(mask, msr_no, msrs, __wrmsr_on_cpu);
724 ++}
725 ++EXPORT_SYMBOL(wrmsr_on_cpus);
726 ++
727 ++/* These "safe" variants are slower and should be used when the target MSR
728 ++ may not actually exist. */
729 ++static void __rdmsr_safe_on_cpu(void *info)
730 ++{
731 ++ struct msr_info *rv = info;
732 ++
733 ++ rv->err = rdmsr_safe(rv->msr_no, &rv->reg.l, &rv->reg.h);
734 ++}
735 ++
736 ++static void __wrmsr_safe_on_cpu(void *info)
737 ++{
738 ++ struct msr_info *rv = info;
739 ++
740 ++ rv->err = wrmsr_safe(rv->msr_no, rv->reg.l, rv->reg.h);
741 ++}
742 ++
743 ++int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h)
744 ++{
745 ++ int err;
746 ++ struct msr_info rv;
747 ++
748 ++ memset(&rv, 0, sizeof(rv));
749 ++
750 ++ rv.msr_no = msr_no;
751 ++ err = smp_call_function_single(cpu, __rdmsr_safe_on_cpu, &rv, 1);
752 ++ *l = rv.reg.l;
753 ++ *h = rv.reg.h;
754 ++
755 ++ return err ? err : rv.err;
756 ++}
757 ++EXPORT_SYMBOL(rdmsr_safe_on_cpu);
758 ++
759 ++int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
760 ++{
761 ++ int err;
762 ++ struct msr_info rv;
763 ++
764 ++ memset(&rv, 0, sizeof(rv));
765 ++
766 ++ rv.msr_no = msr_no;
767 ++ rv.reg.l = l;
768 ++ rv.reg.h = h;
769 ++ err = smp_call_function_single(cpu, __wrmsr_safe_on_cpu, &rv, 1);
770 ++
771 ++ return err ? err : rv.err;
772 ++}
773 ++EXPORT_SYMBOL(wrmsr_safe_on_cpu);
774 ++
775 ++/*
776 ++ * These variants are significantly slower, but allows control over
777 ++ * the entire 32-bit GPR set.
778 ++ */
779 ++static void __rdmsr_safe_regs_on_cpu(void *info)
780 ++{
781 ++ struct msr_regs_info *rv = info;
782 ++
783 ++ rv->err = rdmsr_safe_regs(rv->regs);
784 ++}
785 ++
786 ++static void __wrmsr_safe_regs_on_cpu(void *info)
787 ++{
788 ++ struct msr_regs_info *rv = info;
789 ++
790 ++ rv->err = wrmsr_safe_regs(rv->regs);
791 ++}
792 ++
793 ++int rdmsr_safe_regs_on_cpu(unsigned int cpu, u32 *regs)
794 ++{
795 ++ int err;
796 ++ struct msr_regs_info rv;
797 ++
798 ++ rv.regs = regs;
799 ++ rv.err = -EIO;
800 ++ err = smp_call_function_single(cpu, __rdmsr_safe_regs_on_cpu, &rv, 1);
801 ++
802 ++ return err ? err : rv.err;
803 ++}
804 ++EXPORT_SYMBOL(rdmsr_safe_regs_on_cpu);
805 ++
806 ++int wrmsr_safe_regs_on_cpu(unsigned int cpu, u32 *regs)
807 ++{
808 ++ int err;
809 ++ struct msr_regs_info rv;
810 ++
811 ++ rv.regs = regs;
812 ++ rv.err = -EIO;
813 ++ err = smp_call_function_single(cpu, __wrmsr_safe_regs_on_cpu, &rv, 1);
814 ++
815 ++ return err ? err : rv.err;
816 ++}
817 ++EXPORT_SYMBOL(wrmsr_safe_regs_on_cpu);
818 +diff --git a/arch/x86/lib/msr.c b/arch/x86/lib/msr.c
819 +index 33a1e3c..8f8eebd 100644
820 +--- a/arch/x86/lib/msr.c
821 ++++ b/arch/x86/lib/msr.c
822 +@@ -1,226 +1,23 @@
823 + #include <linux/module.h>
824 + #include <linux/preempt.h>
825 +-#include <linux/smp.h>
826 + #include <asm/msr.h>
827 +
828 +-struct msr_info {
829 +- u32 msr_no;
830 +- struct msr reg;
831 +- struct msr *msrs;
832 +- int off;
833 +- int err;
834 +-};
835 +-
836 +-static void __rdmsr_on_cpu(void *info)
837 +-{
838 +- struct msr_info *rv = info;
839 +- struct msr *reg;
840 +- int this_cpu = raw_smp_processor_id();
841 +-
842 +- if (rv->msrs)
843 +- reg = &rv->msrs[this_cpu - rv->off];
844 +- else
845 +- reg = &rv->reg;
846 +-
847 +- rdmsr(rv->msr_no, reg->l, reg->h);
848 +-}
849 +-
850 +-static void __wrmsr_on_cpu(void *info)
851 +-{
852 +- struct msr_info *rv = info;
853 +- struct msr *reg;
854 +- int this_cpu = raw_smp_processor_id();
855 +-
856 +- if (rv->msrs)
857 +- reg = &rv->msrs[this_cpu - rv->off];
858 +- else
859 +- reg = &rv->reg;
860 +-
861 +- wrmsr(rv->msr_no, reg->l, reg->h);
862 +-}
863 +-
864 +-int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h)
865 +-{
866 +- int err;
867 +- struct msr_info rv;
868 +-
869 +- memset(&rv, 0, sizeof(rv));
870 +-
871 +- rv.msr_no = msr_no;
872 +- err = smp_call_function_single(cpu, __rdmsr_on_cpu, &rv, 1);
873 +- *l = rv.reg.l;
874 +- *h = rv.reg.h;
875 +-
876 +- return err;
877 +-}
878 +-EXPORT_SYMBOL(rdmsr_on_cpu);
879 +-
880 +-int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
881 +-{
882 +- int err;
883 +- struct msr_info rv;
884 +-
885 +- memset(&rv, 0, sizeof(rv));
886 +-
887 +- rv.msr_no = msr_no;
888 +- rv.reg.l = l;
889 +- rv.reg.h = h;
890 +- err = smp_call_function_single(cpu, __wrmsr_on_cpu, &rv, 1);
891 +-
892 +- return err;
893 +-}
894 +-EXPORT_SYMBOL(wrmsr_on_cpu);
895 +-
896 +-/* rdmsr on a bunch of CPUs
897 +- *
898 +- * @mask: which CPUs
899 +- * @msr_no: which MSR
900 +- * @msrs: array of MSR values
901 +- *
902 +- */
903 +-void rdmsr_on_cpus(const cpumask_t *mask, u32 msr_no, struct msr *msrs)
904 +-{
905 +- struct msr_info rv;
906 +- int this_cpu;
907 +-
908 +- memset(&rv, 0, sizeof(rv));
909 +-
910 +- rv.off = cpumask_first(mask);
911 +- rv.msrs = msrs;
912 +- rv.msr_no = msr_no;
913 +-
914 +- this_cpu = get_cpu();
915 +-
916 +- if (cpumask_test_cpu(this_cpu, mask))
917 +- __rdmsr_on_cpu(&rv);
918 +-
919 +- smp_call_function_many(mask, __rdmsr_on_cpu, &rv, 1);
920 +- put_cpu();
921 +-}
922 +-EXPORT_SYMBOL(rdmsr_on_cpus);
923 +-
924 +-/*
925 +- * wrmsr on a bunch of CPUs
926 +- *
927 +- * @mask: which CPUs
928 +- * @msr_no: which MSR
929 +- * @msrs: array of MSR values
930 +- *
931 +- */
932 +-void wrmsr_on_cpus(const cpumask_t *mask, u32 msr_no, struct msr *msrs)
933 +-{
934 +- struct msr_info rv;
935 +- int this_cpu;
936 +-
937 +- memset(&rv, 0, sizeof(rv));
938 +-
939 +- rv.off = cpumask_first(mask);
940 +- rv.msrs = msrs;
941 +- rv.msr_no = msr_no;
942 +-
943 +- this_cpu = get_cpu();
944 +-
945 +- if (cpumask_test_cpu(this_cpu, mask))
946 +- __wrmsr_on_cpu(&rv);
947 +-
948 +- smp_call_function_many(mask, __wrmsr_on_cpu, &rv, 1);
949 +- put_cpu();
950 +-}
951 +-EXPORT_SYMBOL(wrmsr_on_cpus);
952 +-
953 +-/* These "safe" variants are slower and should be used when the target MSR
954 +- may not actually exist. */
955 +-static void __rdmsr_safe_on_cpu(void *info)
956 +-{
957 +- struct msr_info *rv = info;
958 +-
959 +- rv->err = rdmsr_safe(rv->msr_no, &rv->reg.l, &rv->reg.h);
960 +-}
961 +-
962 +-static void __wrmsr_safe_on_cpu(void *info)
963 +-{
964 +- struct msr_info *rv = info;
965 +-
966 +- rv->err = wrmsr_safe(rv->msr_no, rv->reg.l, rv->reg.h);
967 +-}
968 +-
969 +-int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h)
970 ++struct msr *msrs_alloc(void)
971 + {
972 +- int err;
973 +- struct msr_info rv;
974 ++ struct msr *msrs = NULL;
975 +
976 +- memset(&rv, 0, sizeof(rv));
977 ++ msrs = alloc_percpu(struct msr);
978 ++ if (!msrs) {
979 ++ pr_warning("%s: error allocating msrs\n", __func__);
980 ++ return NULL;
981 ++ }
982 +
983 +- rv.msr_no = msr_no;
984 +- err = smp_call_function_single(cpu, __rdmsr_safe_on_cpu, &rv, 1);
985 +- *l = rv.reg.l;
986 +- *h = rv.reg.h;
987 +-
988 +- return err ? err : rv.err;
989 ++ return msrs;
990 + }
991 +-EXPORT_SYMBOL(rdmsr_safe_on_cpu);
992 ++EXPORT_SYMBOL(msrs_alloc);
993 +
994 +-int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
995 ++void msrs_free(struct msr *msrs)
996 + {
997 +- int err;
998 +- struct msr_info rv;
999 +-
1000 +- memset(&rv, 0, sizeof(rv));
1001 +-
1002 +- rv.msr_no = msr_no;
1003 +- rv.reg.l = l;
1004 +- rv.reg.h = h;
1005 +- err = smp_call_function_single(cpu, __wrmsr_safe_on_cpu, &rv, 1);
1006 +-
1007 +- return err ? err : rv.err;
1008 +-}
1009 +-EXPORT_SYMBOL(wrmsr_safe_on_cpu);
1010 +-
1011 +-/*
1012 +- * These variants are significantly slower, but allows control over
1013 +- * the entire 32-bit GPR set.
1014 +- */
1015 +-struct msr_regs_info {
1016 +- u32 *regs;
1017 +- int err;
1018 +-};
1019 +-
1020 +-static void __rdmsr_safe_regs_on_cpu(void *info)
1021 +-{
1022 +- struct msr_regs_info *rv = info;
1023 +-
1024 +- rv->err = rdmsr_safe_regs(rv->regs);
1025 +-}
1026 +-
1027 +-static void __wrmsr_safe_regs_on_cpu(void *info)
1028 +-{
1029 +- struct msr_regs_info *rv = info;
1030 +-
1031 +- rv->err = wrmsr_safe_regs(rv->regs);
1032 +-}
1033 +-
1034 +-int rdmsr_safe_regs_on_cpu(unsigned int cpu, u32 *regs)
1035 +-{
1036 +- int err;
1037 +- struct msr_regs_info rv;
1038 +-
1039 +- rv.regs = regs;
1040 +- rv.err = -EIO;
1041 +- err = smp_call_function_single(cpu, __rdmsr_safe_regs_on_cpu, &rv, 1);
1042 +-
1043 +- return err ? err : rv.err;
1044 +-}
1045 +-EXPORT_SYMBOL(rdmsr_safe_regs_on_cpu);
1046 +-
1047 +-int wrmsr_safe_regs_on_cpu(unsigned int cpu, u32 *regs)
1048 +-{
1049 +- int err;
1050 +- struct msr_regs_info rv;
1051 +-
1052 +- rv.regs = regs;
1053 +- rv.err = -EIO;
1054 +- err = smp_call_function_single(cpu, __wrmsr_safe_regs_on_cpu, &rv, 1);
1055 +-
1056 +- return err ? err : rv.err;
1057 ++ free_percpu(msrs);
1058 + }
1059 +-EXPORT_SYMBOL(wrmsr_safe_regs_on_cpu);
1060 ++EXPORT_SYMBOL(msrs_free);
1061 +diff --git a/drivers/acpi/button.c b/drivers/acpi/button.c
1062 +index 0c9c6a9..8a95e83 100644
1063 +--- a/drivers/acpi/button.c
1064 ++++ b/drivers/acpi/button.c
1065 +@@ -282,6 +282,13 @@ static int acpi_lid_send_state(struct acpi_device *device)
1066 + if (ret == NOTIFY_DONE)
1067 + ret = blocking_notifier_call_chain(&acpi_lid_notifier, state,
1068 + device);
1069 ++ if (ret == NOTIFY_DONE || ret == NOTIFY_OK) {
1070 ++ /*
1071 ++ * It is also regarded as success if the notifier_chain
1072 ++ * returns NOTIFY_OK or NOTIFY_DONE.
1073 ++ */
1074 ++ ret = 0;
1075 ++ }
1076 + return ret;
1077 + }
1078 +
1079 +diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
1080 +index baef28c..7511029 100644
1081 +--- a/drivers/acpi/ec.c
1082 ++++ b/drivers/acpi/ec.c
1083 +@@ -916,6 +916,7 @@ static int ec_validate_ecdt(const struct dmi_system_id *id)
1084 + /* MSI EC needs special treatment, enable it */
1085 + static int ec_flag_msi(const struct dmi_system_id *id)
1086 + {
1087 ++ printk(KERN_DEBUG PREFIX "Detected MSI hardware, enabling workarounds.\n");
1088 + EC_FLAGS_MSI = 1;
1089 + EC_FLAGS_VALIDATE_ECDT = 1;
1090 + return 0;
1091 +@@ -928,8 +929,13 @@ static struct dmi_system_id __initdata ec_dmi_table[] = {
1092 + DMI_MATCH(DMI_BOARD_NAME, "JFL92") }, NULL},
1093 + {
1094 + ec_flag_msi, "MSI hardware", {
1095 +- DMI_MATCH(DMI_BIOS_VENDOR, "Micro-Star"),
1096 +- DMI_MATCH(DMI_CHASSIS_VENDOR, "MICRO-Star") }, NULL},
1097 ++ DMI_MATCH(DMI_BIOS_VENDOR, "Micro-Star")}, NULL},
1098 ++ {
1099 ++ ec_flag_msi, "MSI hardware", {
1100 ++ DMI_MATCH(DMI_SYS_VENDOR, "Micro-Star")}, NULL},
1101 ++ {
1102 ++ ec_flag_msi, "MSI hardware", {
1103 ++ DMI_MATCH(DMI_CHASSIS_VENDOR, "MICRO-Star")}, NULL},
1104 + {
1105 + ec_validate_ecdt, "ASUS hardware", {
1106 + DMI_MATCH(DMI_BIOS_VENDOR, "ASUS") }, NULL},
1107 +diff --git a/drivers/ata/pata_cmd64x.c b/drivers/ata/pata_cmd64x.c
1108 +index f98dffe..f0bad9b 100644
1109 +--- a/drivers/ata/pata_cmd64x.c
1110 ++++ b/drivers/ata/pata_cmd64x.c
1111 +@@ -219,7 +219,7 @@ static void cmd64x_set_dmamode(struct ata_port *ap, struct ata_device *adev)
1112 + regU |= udma_data[adev->dma_mode - XFER_UDMA_0] << shift;
1113 + /* Merge the control bits */
1114 + regU |= 1 << adev->devno; /* UDMA on */
1115 +- if (adev->dma_mode > 2) /* 15nS timing */
1116 ++ if (adev->dma_mode > XFER_UDMA_2) /* 15nS timing */
1117 + regU |= 4 << adev->devno;
1118 + } else {
1119 + regU &= ~ (1 << adev->devno); /* UDMA off */
1120 +diff --git a/drivers/ata/pata_hpt3x2n.c b/drivers/ata/pata_hpt3x2n.c
1121 +index 21c5bd6..d16e87e 100644
1122 +--- a/drivers/ata/pata_hpt3x2n.c
1123 ++++ b/drivers/ata/pata_hpt3x2n.c
1124 +@@ -8,7 +8,7 @@
1125 + * Copyright (C) 1999-2003 Andre Hedrick <andre@×××××××××.org>
1126 + * Portions Copyright (C) 2001 Sun Microsystems, Inc.
1127 + * Portions Copyright (C) 2003 Red Hat Inc
1128 +- * Portions Copyright (C) 2005-2007 MontaVista Software, Inc.
1129 ++ * Portions Copyright (C) 2005-2009 MontaVista Software, Inc.
1130 + *
1131 + *
1132 + * TODO
1133 +@@ -25,7 +25,7 @@
1134 + #include <linux/libata.h>
1135 +
1136 + #define DRV_NAME "pata_hpt3x2n"
1137 +-#define DRV_VERSION "0.3.7"
1138 ++#define DRV_VERSION "0.3.8"
1139 +
1140 + enum {
1141 + HPT_PCI_FAST = (1 << 31),
1142 +@@ -262,7 +262,7 @@ static void hpt3x2n_bmdma_stop(struct ata_queued_cmd *qc)
1143 +
1144 + static void hpt3x2n_set_clock(struct ata_port *ap, int source)
1145 + {
1146 +- void __iomem *bmdma = ap->ioaddr.bmdma_addr;
1147 ++ void __iomem *bmdma = ap->ioaddr.bmdma_addr - ap->port_no * 8;
1148 +
1149 + /* Tristate the bus */
1150 + iowrite8(0x80, bmdma+0x73);
1151 +@@ -272,9 +272,9 @@ static void hpt3x2n_set_clock(struct ata_port *ap, int source)
1152 + iowrite8(source, bmdma+0x7B);
1153 + iowrite8(0xC0, bmdma+0x79);
1154 +
1155 +- /* Reset state machines */
1156 +- iowrite8(0x37, bmdma+0x70);
1157 +- iowrite8(0x37, bmdma+0x74);
1158 ++ /* Reset state machines, avoid enabling the disabled channels */
1159 ++ iowrite8(ioread8(bmdma+0x70) | 0x32, bmdma+0x70);
1160 ++ iowrite8(ioread8(bmdma+0x74) | 0x32, bmdma+0x74);
1161 +
1162 + /* Complete reset */
1163 + iowrite8(0x00, bmdma+0x79);
1164 +@@ -284,21 +284,10 @@ static void hpt3x2n_set_clock(struct ata_port *ap, int source)
1165 + iowrite8(0x00, bmdma+0x77);
1166 + }
1167 +
1168 +-/* Check if our partner interface is busy */
1169 +-
1170 +-static int hpt3x2n_pair_idle(struct ata_port *ap)
1171 +-{
1172 +- struct ata_host *host = ap->host;
1173 +- struct ata_port *pair = host->ports[ap->port_no ^ 1];
1174 +-
1175 +- if (pair->hsm_task_state == HSM_ST_IDLE)
1176 +- return 1;
1177 +- return 0;
1178 +-}
1179 +-
1180 + static int hpt3x2n_use_dpll(struct ata_port *ap, int writing)
1181 + {
1182 + long flags = (long)ap->host->private_data;
1183 ++
1184 + /* See if we should use the DPLL */
1185 + if (writing)
1186 + return USE_DPLL; /* Needed for write */
1187 +@@ -307,20 +296,35 @@ static int hpt3x2n_use_dpll(struct ata_port *ap, int writing)
1188 + return 0;
1189 + }
1190 +
1191 ++static int hpt3x2n_qc_defer(struct ata_queued_cmd *qc)
1192 ++{
1193 ++ struct ata_port *ap = qc->ap;
1194 ++ struct ata_port *alt = ap->host->ports[ap->port_no ^ 1];
1195 ++ int rc, flags = (long)ap->host->private_data;
1196 ++ int dpll = hpt3x2n_use_dpll(ap, qc->tf.flags & ATA_TFLAG_WRITE);
1197 ++
1198 ++ /* First apply the usual rules */
1199 ++ rc = ata_std_qc_defer(qc);
1200 ++ if (rc != 0)
1201 ++ return rc;
1202 ++
1203 ++ if ((flags & USE_DPLL) != dpll && alt->qc_active)
1204 ++ return ATA_DEFER_PORT;
1205 ++ return 0;
1206 ++}
1207 ++
1208 + static unsigned int hpt3x2n_qc_issue(struct ata_queued_cmd *qc)
1209 + {
1210 +- struct ata_taskfile *tf = &qc->tf;
1211 + struct ata_port *ap = qc->ap;
1212 + int flags = (long)ap->host->private_data;
1213 ++ int dpll = hpt3x2n_use_dpll(ap, qc->tf.flags & ATA_TFLAG_WRITE);
1214 +
1215 +- if (hpt3x2n_pair_idle(ap)) {
1216 +- int dpll = hpt3x2n_use_dpll(ap, (tf->flags & ATA_TFLAG_WRITE));
1217 +- if ((flags & USE_DPLL) != dpll) {
1218 +- if (dpll == 1)
1219 +- hpt3x2n_set_clock(ap, 0x21);
1220 +- else
1221 +- hpt3x2n_set_clock(ap, 0x23);
1222 +- }
1223 ++ if ((flags & USE_DPLL) != dpll) {
1224 ++ flags &= ~USE_DPLL;
1225 ++ flags |= dpll;
1226 ++ ap->host->private_data = (void *)(long)flags;
1227 ++
1228 ++ hpt3x2n_set_clock(ap, dpll ? 0x21 : 0x23);
1229 + }
1230 + return ata_sff_qc_issue(qc);
1231 + }
1232 +@@ -337,6 +341,8 @@ static struct ata_port_operations hpt3x2n_port_ops = {
1233 + .inherits = &ata_bmdma_port_ops,
1234 +
1235 + .bmdma_stop = hpt3x2n_bmdma_stop,
1236 ++
1237 ++ .qc_defer = hpt3x2n_qc_defer,
1238 + .qc_issue = hpt3x2n_qc_issue,
1239 +
1240 + .cable_detect = hpt3x2n_cable_detect,
1241 +@@ -454,7 +460,7 @@ static int hpt3x2n_init_one(struct pci_dev *dev, const struct pci_device_id *id)
1242 + unsigned int f_low, f_high;
1243 + int adjust;
1244 + unsigned long iobase = pci_resource_start(dev, 4);
1245 +- void *hpriv = NULL;
1246 ++ void *hpriv = (void *)USE_DPLL;
1247 + int rc;
1248 +
1249 + rc = pcim_enable_device(dev);
1250 +@@ -542,7 +548,7 @@ static int hpt3x2n_init_one(struct pci_dev *dev, const struct pci_device_id *id)
1251 + /* Set our private data up. We only need a few flags so we use
1252 + it directly */
1253 + if (pci_mhz > 60) {
1254 +- hpriv = (void *)PCI66;
1255 ++ hpriv = (void *)(PCI66 | USE_DPLL);
1256 + /*
1257 + * On HPT371N, if ATA clock is 66 MHz we must set bit 2 in
1258 + * the MISC. register to stretch the UltraDMA Tss timing.
1259 +diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
1260 +index 44bc8bb..1be7631 100644
1261 +--- a/drivers/bluetooth/btusb.c
1262 ++++ b/drivers/bluetooth/btusb.c
1263 +@@ -307,6 +307,7 @@ static void btusb_bulk_complete(struct urb *urb)
1264 + return;
1265 +
1266 + usb_anchor_urb(urb, &data->bulk_anchor);
1267 ++ usb_mark_last_busy(data->udev);
1268 +
1269 + err = usb_submit_urb(urb, GFP_ATOMIC);
1270 + if (err < 0) {
1271 +diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c
1272 +index 7585c41..c558fa1 100644
1273 +--- a/drivers/dma/at_hdmac.c
1274 ++++ b/drivers/dma/at_hdmac.c
1275 +@@ -815,7 +815,7 @@ atc_is_tx_complete(struct dma_chan *chan,
1276 + dev_vdbg(chan2dev(chan), "is_tx_complete: %d (d%d, u%d)\n",
1277 + cookie, done ? *done : 0, used ? *used : 0);
1278 +
1279 +- spin_lock_bh(atchan->lock);
1280 ++ spin_lock_bh(&atchan->lock);
1281 +
1282 + last_complete = atchan->completed_cookie;
1283 + last_used = chan->cookie;
1284 +@@ -830,7 +830,7 @@ atc_is_tx_complete(struct dma_chan *chan,
1285 + ret = dma_async_is_complete(cookie, last_complete, last_used);
1286 + }
1287 +
1288 +- spin_unlock_bh(atchan->lock);
1289 ++ spin_unlock_bh(&atchan->lock);
1290 +
1291 + if (done)
1292 + *done = last_complete;
1293 +diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c
1294 +index c524d36..dcc4ab7 100644
1295 +--- a/drivers/dma/ioat/dma.c
1296 ++++ b/drivers/dma/ioat/dma.c
1297 +@@ -1032,7 +1032,7 @@ int __devinit ioat_probe(struct ioatdma_device *device)
1298 + dma->dev = &pdev->dev;
1299 +
1300 + if (!dma->chancnt) {
1301 +- dev_err(dev, "zero channels detected\n");
1302 ++ dev_err(dev, "channel enumeration error\n");
1303 + goto err_setup_interrupts;
1304 + }
1305 +
1306 +diff --git a/drivers/dma/ioat/dma.h b/drivers/dma/ioat/dma.h
1307 +index 45edde9..bbc3e78 100644
1308 +--- a/drivers/dma/ioat/dma.h
1309 ++++ b/drivers/dma/ioat/dma.h
1310 +@@ -60,6 +60,7 @@
1311 + * @dca: direct cache access context
1312 + * @intr_quirk: interrupt setup quirk (for ioat_v1 devices)
1313 + * @enumerate_channels: hw version specific channel enumeration
1314 ++ * @reset_hw: hw version specific channel (re)initialization
1315 + * @cleanup_tasklet: select between the v2 and v3 cleanup routines
1316 + * @timer_fn: select between the v2 and v3 timer watchdog routines
1317 + * @self_test: hardware version specific self test for each supported op type
1318 +@@ -78,6 +79,7 @@ struct ioatdma_device {
1319 + struct dca_provider *dca;
1320 + void (*intr_quirk)(struct ioatdma_device *device);
1321 + int (*enumerate_channels)(struct ioatdma_device *device);
1322 ++ int (*reset_hw)(struct ioat_chan_common *chan);
1323 + void (*cleanup_tasklet)(unsigned long data);
1324 + void (*timer_fn)(unsigned long data);
1325 + int (*self_test)(struct ioatdma_device *device);
1326 +@@ -264,6 +266,22 @@ static inline void ioat_suspend(struct ioat_chan_common *chan)
1327 + writeb(IOAT_CHANCMD_SUSPEND, chan->reg_base + IOAT_CHANCMD_OFFSET(ver));
1328 + }
1329 +
1330 ++static inline void ioat_reset(struct ioat_chan_common *chan)
1331 ++{
1332 ++ u8 ver = chan->device->version;
1333 ++
1334 ++ writeb(IOAT_CHANCMD_RESET, chan->reg_base + IOAT_CHANCMD_OFFSET(ver));
1335 ++}
1336 ++
1337 ++static inline bool ioat_reset_pending(struct ioat_chan_common *chan)
1338 ++{
1339 ++ u8 ver = chan->device->version;
1340 ++ u8 cmd;
1341 ++
1342 ++ cmd = readb(chan->reg_base + IOAT_CHANCMD_OFFSET(ver));
1343 ++ return (cmd & IOAT_CHANCMD_RESET) == IOAT_CHANCMD_RESET;
1344 ++}
1345 ++
1346 + static inline void ioat_set_chainaddr(struct ioat_dma_chan *ioat, u64 addr)
1347 + {
1348 + struct ioat_chan_common *chan = &ioat->base;
1349 +diff --git a/drivers/dma/ioat/dma_v2.c b/drivers/dma/ioat/dma_v2.c
1350 +index 8f1f7f0..5f7a500 100644
1351 +--- a/drivers/dma/ioat/dma_v2.c
1352 ++++ b/drivers/dma/ioat/dma_v2.c
1353 +@@ -239,20 +239,50 @@ void __ioat2_restart_chan(struct ioat2_dma_chan *ioat)
1354 + __ioat2_start_null_desc(ioat);
1355 + }
1356 +
1357 +-static void ioat2_restart_channel(struct ioat2_dma_chan *ioat)
1358 ++int ioat2_quiesce(struct ioat_chan_common *chan, unsigned long tmo)
1359 + {
1360 +- struct ioat_chan_common *chan = &ioat->base;
1361 +- unsigned long phys_complete;
1362 ++ unsigned long end = jiffies + tmo;
1363 ++ int err = 0;
1364 + u32 status;
1365 +
1366 + status = ioat_chansts(chan);
1367 + if (is_ioat_active(status) || is_ioat_idle(status))
1368 + ioat_suspend(chan);
1369 + while (is_ioat_active(status) || is_ioat_idle(status)) {
1370 ++ if (end && time_after(jiffies, end)) {
1371 ++ err = -ETIMEDOUT;
1372 ++ break;
1373 ++ }
1374 + status = ioat_chansts(chan);
1375 + cpu_relax();
1376 + }
1377 +
1378 ++ return err;
1379 ++}
1380 ++
1381 ++int ioat2_reset_sync(struct ioat_chan_common *chan, unsigned long tmo)
1382 ++{
1383 ++ unsigned long end = jiffies + tmo;
1384 ++ int err = 0;
1385 ++
1386 ++ ioat_reset(chan);
1387 ++ while (ioat_reset_pending(chan)) {
1388 ++ if (end && time_after(jiffies, end)) {
1389 ++ err = -ETIMEDOUT;
1390 ++ break;
1391 ++ }
1392 ++ cpu_relax();
1393 ++ }
1394 ++
1395 ++ return err;
1396 ++}
1397 ++
1398 ++static void ioat2_restart_channel(struct ioat2_dma_chan *ioat)
1399 ++{
1400 ++ struct ioat_chan_common *chan = &ioat->base;
1401 ++ unsigned long phys_complete;
1402 ++
1403 ++ ioat2_quiesce(chan, 0);
1404 + if (ioat_cleanup_preamble(chan, &phys_complete))
1405 + __cleanup(ioat, phys_complete);
1406 +
1407 +@@ -318,6 +348,19 @@ void ioat2_timer_event(unsigned long data)
1408 + spin_unlock_bh(&chan->cleanup_lock);
1409 + }
1410 +
1411 ++static int ioat2_reset_hw(struct ioat_chan_common *chan)
1412 ++{
1413 ++ /* throw away whatever the channel was doing and get it initialized */
1414 ++ u32 chanerr;
1415 ++
1416 ++ ioat2_quiesce(chan, msecs_to_jiffies(100));
1417 ++
1418 ++ chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET);
1419 ++ writel(chanerr, chan->reg_base + IOAT_CHANERR_OFFSET);
1420 ++
1421 ++ return ioat2_reset_sync(chan, msecs_to_jiffies(200));
1422 ++}
1423 ++
1424 + /**
1425 + * ioat2_enumerate_channels - find and initialize the device's channels
1426 + * @device: the device to be enumerated
1427 +@@ -360,6 +403,10 @@ int ioat2_enumerate_channels(struct ioatdma_device *device)
1428 + (unsigned long) ioat);
1429 + ioat->xfercap_log = xfercap_log;
1430 + spin_lock_init(&ioat->ring_lock);
1431 ++ if (device->reset_hw(&ioat->base)) {
1432 ++ i = 0;
1433 ++ break;
1434 ++ }
1435 + }
1436 + dma->chancnt = i;
1437 + return i;
1438 +@@ -467,7 +514,6 @@ int ioat2_alloc_chan_resources(struct dma_chan *c)
1439 + struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
1440 + struct ioat_chan_common *chan = &ioat->base;
1441 + struct ioat_ring_ent **ring;
1442 +- u32 chanerr;
1443 + int order;
1444 +
1445 + /* have we already been set up? */
1446 +@@ -477,12 +523,6 @@ int ioat2_alloc_chan_resources(struct dma_chan *c)
1447 + /* Setup register to interrupt and write completion status on error */
1448 + writew(IOAT_CHANCTRL_RUN, chan->reg_base + IOAT_CHANCTRL_OFFSET);
1449 +
1450 +- chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET);
1451 +- if (chanerr) {
1452 +- dev_err(to_dev(chan), "CHANERR = %x, clearing\n", chanerr);
1453 +- writel(chanerr, chan->reg_base + IOAT_CHANERR_OFFSET);
1454 +- }
1455 +-
1456 + /* allocate a completion writeback area */
1457 + /* doing 2 32bit writes to mmio since 1 64b write doesn't work */
1458 + chan->completion = pci_pool_alloc(chan->device->completion_pool,
1459 +@@ -746,13 +786,7 @@ void ioat2_free_chan_resources(struct dma_chan *c)
1460 + tasklet_disable(&chan->cleanup_task);
1461 + del_timer_sync(&chan->timer);
1462 + device->cleanup_tasklet((unsigned long) ioat);
1463 +-
1464 +- /* Delay 100ms after reset to allow internal DMA logic to quiesce
1465 +- * before removing DMA descriptor resources.
1466 +- */
1467 +- writeb(IOAT_CHANCMD_RESET,
1468 +- chan->reg_base + IOAT_CHANCMD_OFFSET(chan->device->version));
1469 +- mdelay(100);
1470 ++ device->reset_hw(chan);
1471 +
1472 + spin_lock_bh(&ioat->ring_lock);
1473 + descs = ioat2_ring_space(ioat);
1474 +@@ -839,6 +873,7 @@ int __devinit ioat2_dma_probe(struct ioatdma_device *device, int dca)
1475 + int err;
1476 +
1477 + device->enumerate_channels = ioat2_enumerate_channels;
1478 ++ device->reset_hw = ioat2_reset_hw;
1479 + device->cleanup_tasklet = ioat2_cleanup_tasklet;
1480 + device->timer_fn = ioat2_timer_event;
1481 + device->self_test = ioat_dma_self_test;
1482 +diff --git a/drivers/dma/ioat/dma_v2.h b/drivers/dma/ioat/dma_v2.h
1483 +index 1d849ef..3afad8d 100644
1484 +--- a/drivers/dma/ioat/dma_v2.h
1485 ++++ b/drivers/dma/ioat/dma_v2.h
1486 +@@ -185,6 +185,8 @@ bool reshape_ring(struct ioat2_dma_chan *ioat, int order);
1487 + void __ioat2_issue_pending(struct ioat2_dma_chan *ioat);
1488 + void ioat2_cleanup_tasklet(unsigned long data);
1489 + void ioat2_timer_event(unsigned long data);
1490 ++int ioat2_quiesce(struct ioat_chan_common *chan, unsigned long tmo);
1491 ++int ioat2_reset_sync(struct ioat_chan_common *chan, unsigned long tmo);
1492 + extern struct kobj_type ioat2_ktype;
1493 + extern struct kmem_cache *ioat2_cache;
1494 + #endif /* IOATDMA_V2_H */
1495 +diff --git a/drivers/dma/ioat/dma_v3.c b/drivers/dma/ioat/dma_v3.c
1496 +index 42f6f10..9908c9e 100644
1497 +--- a/drivers/dma/ioat/dma_v3.c
1498 ++++ b/drivers/dma/ioat/dma_v3.c
1499 +@@ -650,9 +650,11 @@ __ioat3_prep_pq_lock(struct dma_chan *c, enum sum_check_flags *result,
1500 +
1501 + num_descs = ioat2_xferlen_to_descs(ioat, len);
1502 + /* we need 2x the number of descriptors to cover greater than 3
1503 +- * sources
1504 ++ * sources (we need 1 extra source in the q-only continuation
1505 ++ * case and 3 extra sources in the p+q continuation case.
1506 + */
1507 +- if (src_cnt > 3 || flags & DMA_PREP_CONTINUE) {
1508 ++ if (src_cnt + dmaf_p_disabled_continue(flags) > 3 ||
1509 ++ (dmaf_continue(flags) && !dmaf_p_disabled_continue(flags))) {
1510 + with_ext = 1;
1511 + num_descs *= 2;
1512 + } else
1513 +@@ -1128,6 +1130,45 @@ static int __devinit ioat3_dma_self_test(struct ioatdma_device *device)
1514 + return 0;
1515 + }
1516 +
1517 ++static int ioat3_reset_hw(struct ioat_chan_common *chan)
1518 ++{
1519 ++ /* throw away whatever the channel was doing and get it
1520 ++ * initialized, with ioat3 specific workarounds
1521 ++ */
1522 ++ struct ioatdma_device *device = chan->device;
1523 ++ struct pci_dev *pdev = device->pdev;
1524 ++ u32 chanerr;
1525 ++ u16 dev_id;
1526 ++ int err;
1527 ++
1528 ++ ioat2_quiesce(chan, msecs_to_jiffies(100));
1529 ++
1530 ++ chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET);
1531 ++ writel(chanerr, chan->reg_base + IOAT_CHANERR_OFFSET);
1532 ++
1533 ++ /* -= IOAT ver.3 workarounds =- */
1534 ++ /* Write CHANERRMSK_INT with 3E07h to mask out the errors
1535 ++ * that can cause stability issues for IOAT ver.3, and clear any
1536 ++ * pending errors
1537 ++ */
1538 ++ pci_write_config_dword(pdev, IOAT_PCI_CHANERRMASK_INT_OFFSET, 0x3e07);
1539 ++ err = pci_read_config_dword(pdev, IOAT_PCI_CHANERR_INT_OFFSET, &chanerr);
1540 ++ if (err) {
1541 ++ dev_err(&pdev->dev, "channel error register unreachable\n");
1542 ++ return err;
1543 ++ }
1544 ++ pci_write_config_dword(pdev, IOAT_PCI_CHANERR_INT_OFFSET, chanerr);
1545 ++
1546 ++ /* Clear DMAUNCERRSTS Cfg-Reg Parity Error status bit
1547 ++ * (workaround for spurious config parity error after restart)
1548 ++ */
1549 ++ pci_read_config_word(pdev, IOAT_PCI_DEVICE_ID_OFFSET, &dev_id);
1550 ++ if (dev_id == PCI_DEVICE_ID_INTEL_IOAT_TBG0)
1551 ++ pci_write_config_dword(pdev, IOAT_PCI_DMAUNCERRSTS_OFFSET, 0x10);
1552 ++
1553 ++ return ioat2_reset_sync(chan, msecs_to_jiffies(200));
1554 ++}
1555 ++
1556 + int __devinit ioat3_dma_probe(struct ioatdma_device *device, int dca)
1557 + {
1558 + struct pci_dev *pdev = device->pdev;
1559 +@@ -1137,10 +1178,10 @@ int __devinit ioat3_dma_probe(struct ioatdma_device *device, int dca)
1560 + struct ioat_chan_common *chan;
1561 + bool is_raid_device = false;
1562 + int err;
1563 +- u16 dev_id;
1564 + u32 cap;
1565 +
1566 + device->enumerate_channels = ioat2_enumerate_channels;
1567 ++ device->reset_hw = ioat3_reset_hw;
1568 + device->self_test = ioat3_dma_self_test;
1569 + dma = &device->common;
1570 + dma->device_prep_dma_memcpy = ioat2_dma_prep_memcpy_lock;
1571 +@@ -1216,19 +1257,6 @@ int __devinit ioat3_dma_probe(struct ioatdma_device *device, int dca)
1572 + dma->device_prep_dma_xor_val = NULL;
1573 + #endif
1574 +
1575 +- /* -= IOAT ver.3 workarounds =- */
1576 +- /* Write CHANERRMSK_INT with 3E07h to mask out the errors
1577 +- * that can cause stability issues for IOAT ver.3
1578 +- */
1579 +- pci_write_config_dword(pdev, IOAT_PCI_CHANERRMASK_INT_OFFSET, 0x3e07);
1580 +-
1581 +- /* Clear DMAUNCERRSTS Cfg-Reg Parity Error status bit
1582 +- * (workaround for spurious config parity error after restart)
1583 +- */
1584 +- pci_read_config_word(pdev, IOAT_PCI_DEVICE_ID_OFFSET, &dev_id);
1585 +- if (dev_id == PCI_DEVICE_ID_INTEL_IOAT_TBG0)
1586 +- pci_write_config_dword(pdev, IOAT_PCI_DMAUNCERRSTS_OFFSET, 0x10);
1587 +-
1588 + err = ioat_probe(device);
1589 + if (err)
1590 + return err;
1591 +diff --git a/drivers/dma/ioat/registers.h b/drivers/dma/ioat/registers.h
1592 +index f015ec1..e8ae63b 100644
1593 +--- a/drivers/dma/ioat/registers.h
1594 ++++ b/drivers/dma/ioat/registers.h
1595 +@@ -27,6 +27,7 @@
1596 +
1597 + #define IOAT_PCI_DEVICE_ID_OFFSET 0x02
1598 + #define IOAT_PCI_DMAUNCERRSTS_OFFSET 0x148
1599 ++#define IOAT_PCI_CHANERR_INT_OFFSET 0x180
1600 + #define IOAT_PCI_CHANERRMASK_INT_OFFSET 0x184
1601 +
1602 + /* MMIO Device Registers */
1603 +diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
1604 +index a38831c..a0bcfba 100644
1605 +--- a/drivers/edac/amd64_edac.c
1606 ++++ b/drivers/edac/amd64_edac.c
1607 +@@ -13,6 +13,8 @@ module_param(report_gart_errors, int, 0644);
1608 + static int ecc_enable_override;
1609 + module_param(ecc_enable_override, int, 0644);
1610 +
1611 ++static struct msr *msrs;
1612 ++
1613 + /* Lookup table for all possible MC control instances */
1614 + struct amd64_pvt;
1615 + static struct mem_ctl_info *mci_lookup[EDAC_MAX_NUMNODES];
1616 +@@ -2618,6 +2620,90 @@ static int amd64_init_csrows(struct mem_ctl_info *mci)
1617 + return empty;
1618 + }
1619 +
1620 ++/* get all cores on this DCT */
1621 ++static void get_cpus_on_this_dct_cpumask(struct cpumask *mask, int nid)
1622 ++{
1623 ++ int cpu;
1624 ++
1625 ++ for_each_online_cpu(cpu)
1626 ++ if (amd_get_nb_id(cpu) == nid)
1627 ++ cpumask_set_cpu(cpu, mask);
1628 ++}
1629 ++
1630 ++/* check MCG_CTL on all the cpus on this node */
1631 ++static bool amd64_nb_mce_bank_enabled_on_node(int nid)
1632 ++{
1633 ++ cpumask_var_t mask;
1634 ++ int cpu, nbe;
1635 ++ bool ret = false;
1636 ++
1637 ++ if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) {
1638 ++ amd64_printk(KERN_WARNING, "%s: error allocating mask\n",
1639 ++ __func__);
1640 ++ return false;
1641 ++ }
1642 ++
1643 ++ get_cpus_on_this_dct_cpumask(mask, nid);
1644 ++
1645 ++ rdmsr_on_cpus(mask, MSR_IA32_MCG_CTL, msrs);
1646 ++
1647 ++ for_each_cpu(cpu, mask) {
1648 ++ struct msr *reg = per_cpu_ptr(msrs, cpu);
1649 ++ nbe = reg->l & K8_MSR_MCGCTL_NBE;
1650 ++
1651 ++ debugf0("core: %u, MCG_CTL: 0x%llx, NB MSR is %s\n",
1652 ++ cpu, reg->q,
1653 ++ (nbe ? "enabled" : "disabled"));
1654 ++
1655 ++ if (!nbe)
1656 ++ goto out;
1657 ++ }
1658 ++ ret = true;
1659 ++
1660 ++out:
1661 ++ free_cpumask_var(mask);
1662 ++ return ret;
1663 ++}
1664 ++
1665 ++static int amd64_toggle_ecc_err_reporting(struct amd64_pvt *pvt, bool on)
1666 ++{
1667 ++ cpumask_var_t cmask;
1668 ++ int cpu;
1669 ++
1670 ++ if (!zalloc_cpumask_var(&cmask, GFP_KERNEL)) {
1671 ++ amd64_printk(KERN_WARNING, "%s: error allocating mask\n",
1672 ++ __func__);
1673 ++ return false;
1674 ++ }
1675 ++
1676 ++ get_cpus_on_this_dct_cpumask(cmask, pvt->mc_node_id);
1677 ++
1678 ++ rdmsr_on_cpus(cmask, MSR_IA32_MCG_CTL, msrs);
1679 ++
1680 ++ for_each_cpu(cpu, cmask) {
1681 ++
1682 ++ struct msr *reg = per_cpu_ptr(msrs, cpu);
1683 ++
1684 ++ if (on) {
1685 ++ if (reg->l & K8_MSR_MCGCTL_NBE)
1686 ++ pvt->flags.ecc_report = 1;
1687 ++
1688 ++ reg->l |= K8_MSR_MCGCTL_NBE;
1689 ++ } else {
1690 ++ /*
1691 ++ * Turn off ECC reporting only when it was off before
1692 ++ */
1693 ++ if (!pvt->flags.ecc_report)
1694 ++ reg->l &= ~K8_MSR_MCGCTL_NBE;
1695 ++ }
1696 ++ }
1697 ++ wrmsr_on_cpus(cmask, MSR_IA32_MCG_CTL, msrs);
1698 ++
1699 ++ free_cpumask_var(cmask);
1700 ++
1701 ++ return 0;
1702 ++}
1703 ++
1704 + /*
1705 + * Only if 'ecc_enable_override' is set AND BIOS had ECC disabled, do "we"
1706 + * enable it.
1707 +@@ -2625,17 +2711,12 @@ static int amd64_init_csrows(struct mem_ctl_info *mci)
1708 + static void amd64_enable_ecc_error_reporting(struct mem_ctl_info *mci)
1709 + {
1710 + struct amd64_pvt *pvt = mci->pvt_info;
1711 +- const cpumask_t *cpumask = cpumask_of_node(pvt->mc_node_id);
1712 +- int cpu, idx = 0, err = 0;
1713 +- struct msr msrs[cpumask_weight(cpumask)];
1714 +- u32 value;
1715 +- u32 mask = K8_NBCTL_CECCEn | K8_NBCTL_UECCEn;
1716 ++ int err = 0;
1717 ++ u32 value, mask = K8_NBCTL_CECCEn | K8_NBCTL_UECCEn;
1718 +
1719 + if (!ecc_enable_override)
1720 + return;
1721 +
1722 +- memset(msrs, 0, sizeof(msrs));
1723 +-
1724 + amd64_printk(KERN_WARNING,
1725 + "'ecc_enable_override' parameter is active, "
1726 + "Enabling AMD ECC hardware now: CAUTION\n");
1727 +@@ -2651,16 +2732,9 @@ static void amd64_enable_ecc_error_reporting(struct mem_ctl_info *mci)
1728 + value |= mask;
1729 + pci_write_config_dword(pvt->misc_f3_ctl, K8_NBCTL, value);
1730 +
1731 +- rdmsr_on_cpus(cpumask, K8_MSR_MCGCTL, msrs);
1732 +-
1733 +- for_each_cpu(cpu, cpumask) {
1734 +- if (msrs[idx].l & K8_MSR_MCGCTL_NBE)
1735 +- set_bit(idx, &pvt->old_mcgctl);
1736 +-
1737 +- msrs[idx].l |= K8_MSR_MCGCTL_NBE;
1738 +- idx++;
1739 +- }
1740 +- wrmsr_on_cpus(cpumask, K8_MSR_MCGCTL, msrs);
1741 ++ if (amd64_toggle_ecc_err_reporting(pvt, ON))
1742 ++ amd64_printk(KERN_WARNING, "Error enabling ECC reporting over "
1743 ++ "MCGCTL!\n");
1744 +
1745 + err = pci_read_config_dword(pvt->misc_f3_ctl, K8_NBCFG, &value);
1746 + if (err)
1747 +@@ -2701,17 +2775,12 @@ static void amd64_enable_ecc_error_reporting(struct mem_ctl_info *mci)
1748 +
1749 + static void amd64_restore_ecc_error_reporting(struct amd64_pvt *pvt)
1750 + {
1751 +- const cpumask_t *cpumask = cpumask_of_node(pvt->mc_node_id);
1752 +- int cpu, idx = 0, err = 0;
1753 +- struct msr msrs[cpumask_weight(cpumask)];
1754 +- u32 value;
1755 +- u32 mask = K8_NBCTL_CECCEn | K8_NBCTL_UECCEn;
1756 ++ int err = 0;
1757 ++ u32 value, mask = K8_NBCTL_CECCEn | K8_NBCTL_UECCEn;
1758 +
1759 + if (!pvt->nbctl_mcgctl_saved)
1760 + return;
1761 +
1762 +- memset(msrs, 0, sizeof(msrs));
1763 +-
1764 + err = pci_read_config_dword(pvt->misc_f3_ctl, K8_NBCTL, &value);
1765 + if (err)
1766 + debugf0("Reading K8_NBCTL failed\n");
1767 +@@ -2721,66 +2790,9 @@ static void amd64_restore_ecc_error_reporting(struct amd64_pvt *pvt)
1768 + /* restore the NB Enable MCGCTL bit */
1769 + pci_write_config_dword(pvt->misc_f3_ctl, K8_NBCTL, value);
1770 +
1771 +- rdmsr_on_cpus(cpumask, K8_MSR_MCGCTL, msrs);
1772 +-
1773 +- for_each_cpu(cpu, cpumask) {
1774 +- msrs[idx].l &= ~K8_MSR_MCGCTL_NBE;
1775 +- msrs[idx].l |=
1776 +- test_bit(idx, &pvt->old_mcgctl) << K8_MSR_MCGCTL_NBE;
1777 +- idx++;
1778 +- }
1779 +-
1780 +- wrmsr_on_cpus(cpumask, K8_MSR_MCGCTL, msrs);
1781 +-}
1782 +-
1783 +-/* get all cores on this DCT */
1784 +-static void get_cpus_on_this_dct_cpumask(cpumask_t *mask, int nid)
1785 +-{
1786 +- int cpu;
1787 +-
1788 +- for_each_online_cpu(cpu)
1789 +- if (amd_get_nb_id(cpu) == nid)
1790 +- cpumask_set_cpu(cpu, mask);
1791 +-}
1792 +-
1793 +-/* check MCG_CTL on all the cpus on this node */
1794 +-static bool amd64_nb_mce_bank_enabled_on_node(int nid)
1795 +-{
1796 +- cpumask_t mask;
1797 +- struct msr *msrs;
1798 +- int cpu, nbe, idx = 0;
1799 +- bool ret = false;
1800 +-
1801 +- cpumask_clear(&mask);
1802 +-
1803 +- get_cpus_on_this_dct_cpumask(&mask, nid);
1804 +-
1805 +- msrs = kzalloc(sizeof(struct msr) * cpumask_weight(&mask), GFP_KERNEL);
1806 +- if (!msrs) {
1807 +- amd64_printk(KERN_WARNING, "%s: error allocating msrs\n",
1808 +- __func__);
1809 +- return false;
1810 +- }
1811 +-
1812 +- rdmsr_on_cpus(&mask, MSR_IA32_MCG_CTL, msrs);
1813 +-
1814 +- for_each_cpu(cpu, &mask) {
1815 +- nbe = msrs[idx].l & K8_MSR_MCGCTL_NBE;
1816 +-
1817 +- debugf0("core: %u, MCG_CTL: 0x%llx, NB MSR is %s\n",
1818 +- cpu, msrs[idx].q,
1819 +- (nbe ? "enabled" : "disabled"));
1820 +-
1821 +- if (!nbe)
1822 +- goto out;
1823 +-
1824 +- idx++;
1825 +- }
1826 +- ret = true;
1827 +-
1828 +-out:
1829 +- kfree(msrs);
1830 +- return ret;
1831 ++ if (amd64_toggle_ecc_err_reporting(pvt, OFF))
1832 ++ amd64_printk(KERN_WARNING, "Error restoring ECC reporting over "
1833 ++ "MCGCTL!\n");
1834 + }
1835 +
1836 + /*
1837 +@@ -2824,9 +2836,8 @@ static int amd64_check_ecc_enabled(struct amd64_pvt *pvt)
1838 + amd64_printk(KERN_WARNING, "%s", ecc_warning);
1839 + return -ENODEV;
1840 + }
1841 +- } else
1842 +- /* CLEAR the override, since BIOS controlled it */
1843 + ecc_enable_override = 0;
1844 ++ }
1845 +
1846 + return 0;
1847 + }
1848 +@@ -2909,7 +2920,6 @@ static int amd64_probe_one_instance(struct pci_dev *dram_f2_ctl,
1849 + pvt->ext_model = boot_cpu_data.x86_model >> 4;
1850 + pvt->mc_type_index = mc_type_index;
1851 + pvt->ops = family_ops(mc_type_index);
1852 +- pvt->old_mcgctl = 0;
1853 +
1854 + /*
1855 + * We have the dram_f2_ctl device as an argument, now go reserve its
1856 +@@ -3071,16 +3081,15 @@ static void __devexit amd64_remove_one_instance(struct pci_dev *pdev)
1857 +
1858 + amd64_free_mc_sibling_devices(pvt);
1859 +
1860 +- kfree(pvt);
1861 +- mci->pvt_info = NULL;
1862 +-
1863 +- mci_lookup[pvt->mc_node_id] = NULL;
1864 +-
1865 + /* unregister from EDAC MCE */
1866 + amd_report_gart_errors(false);
1867 + amd_unregister_ecc_decoder(amd64_decode_bus_error);
1868 +
1869 + /* Free the EDAC CORE resources */
1870 ++ mci->pvt_info = NULL;
1871 ++ mci_lookup[pvt->mc_node_id] = NULL;
1872 ++
1873 ++ kfree(pvt);
1874 + edac_mc_free(mci);
1875 + }
1876 +
1877 +@@ -3157,23 +3166,29 @@ static void amd64_setup_pci_device(void)
1878 + static int __init amd64_edac_init(void)
1879 + {
1880 + int nb, err = -ENODEV;
1881 ++ bool load_ok = false;
1882 +
1883 + edac_printk(KERN_INFO, EDAC_MOD_STR, EDAC_AMD64_VERSION "\n");
1884 +
1885 + opstate_init();
1886 +
1887 + if (cache_k8_northbridges() < 0)
1888 +- return err;
1889 ++ goto err_ret;
1890 ++
1891 ++ msrs = msrs_alloc();
1892 ++ if (!msrs)
1893 ++ goto err_ret;
1894 +
1895 + err = pci_register_driver(&amd64_pci_driver);
1896 + if (err)
1897 +- return err;
1898 ++ goto err_pci;
1899 +
1900 + /*
1901 + * At this point, the array 'pvt_lookup[]' contains pointers to alloc'd
1902 + * amd64_pvt structs. These will be used in the 2nd stage init function
1903 + * to finish initialization of the MC instances.
1904 + */
1905 ++ err = -ENODEV;
1906 + for (nb = 0; nb < num_k8_northbridges; nb++) {
1907 + if (!pvt_lookup[nb])
1908 + continue;
1909 +@@ -3181,16 +3196,21 @@ static int __init amd64_edac_init(void)
1910 + err = amd64_init_2nd_stage(pvt_lookup[nb]);
1911 + if (err)
1912 + goto err_2nd_stage;
1913 +- }
1914 +
1915 +- amd64_setup_pci_device();
1916 ++ load_ok = true;
1917 ++ }
1918 +
1919 +- return 0;
1920 ++ if (load_ok) {
1921 ++ amd64_setup_pci_device();
1922 ++ return 0;
1923 ++ }
1924 +
1925 + err_2nd_stage:
1926 +- debugf0("2nd stage failed\n");
1927 + pci_unregister_driver(&amd64_pci_driver);
1928 +-
1929 ++err_pci:
1930 ++ msrs_free(msrs);
1931 ++ msrs = NULL;
1932 ++err_ret:
1933 + return err;
1934 + }
1935 +
1936 +@@ -3200,6 +3220,9 @@ static void __exit amd64_edac_exit(void)
1937 + edac_pci_release_generic_ctl(amd64_ctl_pci);
1938 +
1939 + pci_unregister_driver(&amd64_pci_driver);
1940 ++
1941 ++ msrs_free(msrs);
1942 ++ msrs = NULL;
1943 + }
1944 +
1945 + module_init(amd64_edac_init);
1946 +diff --git a/drivers/edac/amd64_edac.h b/drivers/edac/amd64_edac.h
1947 +index c6f359a..bba6c94 100644
1948 +--- a/drivers/edac/amd64_edac.h
1949 ++++ b/drivers/edac/amd64_edac.h
1950 +@@ -147,6 +147,8 @@
1951 + #define MAX_CS_COUNT 8
1952 + #define DRAM_REG_COUNT 8
1953 +
1954 ++#define ON true
1955 ++#define OFF false
1956 +
1957 + /*
1958 + * PCI-defined configuration space registers
1959 +@@ -386,10 +388,7 @@ enum {
1960 + #define K8_NBCAP_DUAL_NODE BIT(1)
1961 + #define K8_NBCAP_DCT_DUAL BIT(0)
1962 +
1963 +-/*
1964 +- * MSR Regs
1965 +- */
1966 +-#define K8_MSR_MCGCTL 0x017b
1967 ++/* MSRs */
1968 + #define K8_MSR_MCGCTL_NBE BIT(4)
1969 +
1970 + #define K8_MSR_MC4CTL 0x0410
1971 +@@ -487,7 +486,6 @@ struct amd64_pvt {
1972 + /* Save old hw registers' values before we modified them */
1973 + u32 nbctl_mcgctl_saved; /* When true, following 2 are valid */
1974 + u32 old_nbctl;
1975 +- unsigned long old_mcgctl; /* per core on this node */
1976 +
1977 + /* MC Type Index value: socket F vs Family 10h */
1978 + u32 mc_type_index;
1979 +@@ -495,6 +493,7 @@ struct amd64_pvt {
1980 + /* misc settings */
1981 + struct flags {
1982 + unsigned long cf8_extcfg:1;
1983 ++ unsigned long ecc_report:1;
1984 + } flags;
1985 + };
1986 +
1987 +diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
1988 +index bbfd110..afed886 100644
1989 +--- a/drivers/gpu/drm/drm_crtc_helper.c
1990 ++++ b/drivers/gpu/drm/drm_crtc_helper.c
1991 +@@ -1020,6 +1020,9 @@ bool drm_helper_initial_config(struct drm_device *dev)
1992 + {
1993 + int count = 0;
1994 +
1995 ++ /* disable all the possible outputs/crtcs before entering KMS mode */
1996 ++ drm_helper_disable_unused_functions(dev);
1997 ++
1998 + drm_fb_helper_parse_command_line(dev);
1999 +
2000 + count = drm_helper_probe_connector_modes(dev,
2001 +diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
2002 +index c6777cb..19f93f2 100644
2003 +--- a/drivers/gpu/drm/radeon/atombios_crtc.c
2004 ++++ b/drivers/gpu/drm/radeon/atombios_crtc.c
2005 +@@ -249,13 +249,15 @@ void atombios_crtc_dpms(struct drm_crtc *crtc, int mode)
2006 + if (ASIC_IS_DCE3(rdev))
2007 + atombios_enable_crtc_memreq(crtc, 1);
2008 + atombios_blank_crtc(crtc, 0);
2009 +- drm_vblank_post_modeset(dev, radeon_crtc->crtc_id);
2010 ++ if (rdev->family < CHIP_R600)
2011 ++ drm_vblank_post_modeset(dev, radeon_crtc->crtc_id);
2012 + radeon_crtc_load_lut(crtc);
2013 + break;
2014 + case DRM_MODE_DPMS_STANDBY:
2015 + case DRM_MODE_DPMS_SUSPEND:
2016 + case DRM_MODE_DPMS_OFF:
2017 +- drm_vblank_pre_modeset(dev, radeon_crtc->crtc_id);
2018 ++ if (rdev->family < CHIP_R600)
2019 ++ drm_vblank_pre_modeset(dev, radeon_crtc->crtc_id);
2020 + atombios_blank_crtc(crtc, 1);
2021 + if (ASIC_IS_DCE3(rdev))
2022 + atombios_enable_crtc_memreq(crtc, 0);
2023 +diff --git a/drivers/gpu/drm/radeon/radeon_test.c b/drivers/gpu/drm/radeon/radeon_test.c
2024 +index f8a465d..c8942ca 100644
2025 +--- a/drivers/gpu/drm/radeon/radeon_test.c
2026 ++++ b/drivers/gpu/drm/radeon/radeon_test.c
2027 +@@ -42,8 +42,8 @@ void radeon_test_moves(struct radeon_device *rdev)
2028 + /* Number of tests =
2029 + * (Total GTT - IB pool - writeback page - ring buffer) / test size
2030 + */
2031 +- n = (rdev->mc.gtt_size - RADEON_IB_POOL_SIZE*64*1024 - RADEON_GPU_PAGE_SIZE -
2032 +- rdev->cp.ring_size) / size;
2033 ++ n = ((u32)(rdev->mc.gtt_size - RADEON_IB_POOL_SIZE*64*1024 - RADEON_GPU_PAGE_SIZE -
2034 ++ rdev->cp.ring_size)) / size;
2035 +
2036 + gtt_obj = kzalloc(n * sizeof(*gtt_obj), GFP_KERNEL);
2037 + if (!gtt_obj) {
2038 +diff --git a/drivers/hwmon/sht15.c b/drivers/hwmon/sht15.c
2039 +index ebe38b6..864a371 100644
2040 +--- a/drivers/hwmon/sht15.c
2041 ++++ b/drivers/hwmon/sht15.c
2042 +@@ -305,7 +305,7 @@ static inline int sht15_calc_temp(struct sht15_data *data)
2043 + int d1 = 0;
2044 + int i;
2045 +
2046 +- for (i = 1; i < ARRAY_SIZE(temppoints) - 1; i++)
2047 ++ for (i = 1; i < ARRAY_SIZE(temppoints); i++)
2048 + /* Find pointer to interpolate */
2049 + if (data->supply_uV > temppoints[i - 1].vdd) {
2050 + d1 = (data->supply_uV/1000 - temppoints[i - 1].vdd)
2051 +@@ -332,12 +332,12 @@ static inline int sht15_calc_humid(struct sht15_data *data)
2052 +
2053 + const int c1 = -4;
2054 + const int c2 = 40500; /* x 10 ^ -6 */
2055 +- const int c3 = 2800; /* x10 ^ -9 */
2056 ++ const int c3 = -2800; /* x10 ^ -9 */
2057 +
2058 + RHlinear = c1*1000
2059 + + c2 * data->val_humid/1000
2060 + + (data->val_humid * data->val_humid * c3)/1000000;
2061 +- return (temp - 25000) * (10000 + 800 * data->val_humid)
2062 ++ return (temp - 25000) * (10000 + 80 * data->val_humid)
2063 + / 1000000 + RHlinear;
2064 + }
2065 +
2066 +diff --git a/drivers/lguest/segments.c b/drivers/lguest/segments.c
2067 +index 951c57b..ede4658 100644
2068 +--- a/drivers/lguest/segments.c
2069 ++++ b/drivers/lguest/segments.c
2070 +@@ -179,8 +179,10 @@ void load_guest_gdt_entry(struct lg_cpu *cpu, u32 num, u32 lo, u32 hi)
2071 + * We assume the Guest has the same number of GDT entries as the
2072 + * Host, otherwise we'd have to dynamically allocate the Guest GDT.
2073 + */
2074 +- if (num >= ARRAY_SIZE(cpu->arch.gdt))
2075 ++ if (num >= ARRAY_SIZE(cpu->arch.gdt)) {
2076 + kill_guest(cpu, "too many gdt entries %i", num);
2077 ++ return;
2078 ++ }
2079 +
2080 + /* Set it up, then fix it. */
2081 + cpu->arch.gdt[num].a = lo;
2082 +diff --git a/drivers/md/md.c b/drivers/md/md.c
2083 +index 02e4551..c6a6685 100644
2084 +--- a/drivers/md/md.c
2085 ++++ b/drivers/md/md.c
2086 +@@ -282,7 +282,9 @@ static void mddev_put(mddev_t *mddev)
2087 + if (!atomic_dec_and_lock(&mddev->active, &all_mddevs_lock))
2088 + return;
2089 + if (!mddev->raid_disks && list_empty(&mddev->disks) &&
2090 +- !mddev->hold_active) {
2091 ++ mddev->ctime == 0 && !mddev->hold_active) {
2092 ++ /* Array is not configured at all, and not held active,
2093 ++ * so destroy it */
2094 + list_del(&mddev->all_mddevs);
2095 + if (mddev->gendisk) {
2096 + /* we did a probe so need to clean up.
2097 +@@ -5071,6 +5073,10 @@ static int set_array_info(mddev_t * mddev, mdu_array_info_t *info)
2098 + mddev->minor_version = info->minor_version;
2099 + mddev->patch_version = info->patch_version;
2100 + mddev->persistent = !info->not_persistent;
2101 ++ /* ensure mddev_put doesn't delete this now that there
2102 ++ * is some minimal configuration.
2103 ++ */
2104 ++ mddev->ctime = get_seconds();
2105 + return 0;
2106 + }
2107 + mddev->major_version = MD_MAJOR_VERSION;
2108 +diff --git a/drivers/media/video/ov511.c b/drivers/media/video/ov511.c
2109 +index 0bc2cf5..2bed9e2 100644
2110 +--- a/drivers/media/video/ov511.c
2111 ++++ b/drivers/media/video/ov511.c
2112 +@@ -5878,7 +5878,7 @@ ov51x_probe(struct usb_interface *intf, const struct usb_device_id *id)
2113 + goto error;
2114 + }
2115 +
2116 +- mutex_lock(&ov->lock);
2117 ++ mutex_unlock(&ov->lock);
2118 +
2119 + return 0;
2120 +
2121 +diff --git a/drivers/net/e100.c b/drivers/net/e100.c
2122 +index d269a68..0c53c92 100644
2123 +--- a/drivers/net/e100.c
2124 ++++ b/drivers/net/e100.c
2125 +@@ -1817,6 +1817,7 @@ static int e100_alloc_cbs(struct nic *nic)
2126 + &nic->cbs_dma_addr);
2127 + if (!nic->cbs)
2128 + return -ENOMEM;
2129 ++ memset(nic->cbs, 0, count * sizeof(struct cb));
2130 +
2131 + for (cb = nic->cbs, i = 0; i < count; cb++, i++) {
2132 + cb->next = (i + 1 < count) ? cb + 1 : nic->cbs;
2133 +@@ -1825,7 +1826,6 @@ static int e100_alloc_cbs(struct nic *nic)
2134 + cb->dma_addr = nic->cbs_dma_addr + i * sizeof(struct cb);
2135 + cb->link = cpu_to_le32(nic->cbs_dma_addr +
2136 + ((i+1) % count) * sizeof(struct cb));
2137 +- cb->skb = NULL;
2138 + }
2139 +
2140 + nic->cb_to_use = nic->cb_to_send = nic->cb_to_clean = nic->cbs;
2141 +diff --git a/drivers/net/usb/rtl8150.c b/drivers/net/usb/rtl8150.c
2142 +index b091e20..f14d225 100644
2143 +--- a/drivers/net/usb/rtl8150.c
2144 ++++ b/drivers/net/usb/rtl8150.c
2145 +@@ -324,7 +324,7 @@ static int rtl8150_set_mac_address(struct net_device *netdev, void *p)
2146 + dbg("%02X:", netdev->dev_addr[i]);
2147 + dbg("%02X\n", netdev->dev_addr[i]);
2148 + /* Set the IDR registers. */
2149 +- set_registers(dev, IDR, sizeof(netdev->dev_addr), netdev->dev_addr);
2150 ++ set_registers(dev, IDR, netdev->addr_len, netdev->dev_addr);
2151 + #ifdef EEPROM_WRITE
2152 + {
2153 + u8 cr;
2154 +diff --git a/drivers/net/wireless/ath/ath5k/base.c b/drivers/net/wireless/ath/ath5k/base.c
2155 +index 95a8e23..8a82c75 100644
2156 +--- a/drivers/net/wireless/ath/ath5k/base.c
2157 ++++ b/drivers/net/wireless/ath/ath5k/base.c
2158 +@@ -2349,6 +2349,9 @@ ath5k_init(struct ath5k_softc *sc)
2159 + */
2160 + ath5k_stop_locked(sc);
2161 +
2162 ++ /* Set PHY calibration interval */
2163 ++ ah->ah_cal_intval = ath5k_calinterval;
2164 ++
2165 + /*
2166 + * The basic interface to setting the hardware in a good
2167 + * state is ``reset''. On return the hardware is known to
2168 +@@ -2376,10 +2379,6 @@ ath5k_init(struct ath5k_softc *sc)
2169 +
2170 + /* Set ack to be sent at low bit-rates */
2171 + ath5k_hw_set_ack_bitrate_high(ah, false);
2172 +-
2173 +- /* Set PHY calibration inteval */
2174 +- ah->ah_cal_intval = ath5k_calinterval;
2175 +-
2176 + ret = 0;
2177 + done:
2178 + mmiowb();
2179 +diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
2180 +index 57f1463..ff4383b 100644
2181 +--- a/drivers/net/wireless/ath/ath9k/hw.h
2182 ++++ b/drivers/net/wireless/ath/ath9k/hw.h
2183 +@@ -408,7 +408,7 @@ struct ath9k_hw_version {
2184 + * Using de Bruijin sequence to to look up 1's index in a 32 bit number
2185 + * debruijn32 = 0000 0111 0111 1100 1011 0101 0011 0001
2186 + */
2187 +-#define debruijn32 0x077CB531UL
2188 ++#define debruijn32 0x077CB531U
2189 +
2190 + struct ath_gen_timer_configuration {
2191 + u32 next_addr;
2192 +diff --git a/drivers/net/wireless/ath/ath9k/mac.c b/drivers/net/wireless/ath/ath9k/mac.c
2193 +index d4d9d82..110c16d 100644
2194 +--- a/drivers/net/wireless/ath/ath9k/mac.c
2195 ++++ b/drivers/net/wireless/ath/ath9k/mac.c
2196 +@@ -155,7 +155,7 @@ bool ath9k_hw_stoptxdma(struct ath_hw *ah, u32 q)
2197 + wait = wait_time;
2198 + while (ath9k_hw_numtxpending(ah, q)) {
2199 + if ((--wait) == 0) {
2200 +- DPRINTF(ah->ah_sc, ATH_DBG_QUEUE,
2201 ++ DPRINTF(ah->ah_sc, ATH_DBG_FATAL,
2202 + "Failed to stop TX DMA in 100 "
2203 + "msec after killing last frame\n");
2204 + break;
2205 +diff --git a/drivers/net/wireless/ath/ath9k/mac.h b/drivers/net/wireless/ath/ath9k/mac.h
2206 +index ff65f85..9720c4d 100644
2207 +--- a/drivers/net/wireless/ath/ath9k/mac.h
2208 ++++ b/drivers/net/wireless/ath/ath9k/mac.h
2209 +@@ -77,6 +77,9 @@
2210 + #define ATH9K_TXERR_XTXOP 0x08
2211 + #define ATH9K_TXERR_TIMER_EXPIRED 0x10
2212 + #define ATH9K_TX_ACKED 0x20
2213 ++#define ATH9K_TXERR_MASK \
2214 ++ (ATH9K_TXERR_XRETRY | ATH9K_TXERR_FILT | ATH9K_TXERR_FIFO | \
2215 ++ ATH9K_TXERR_XTXOP | ATH9K_TXERR_TIMER_EXPIRED)
2216 +
2217 + #define ATH9K_TX_BA 0x01
2218 + #define ATH9K_TX_PWRMGMT 0x02
2219 +diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
2220 +index 59359e3..80df8f3 100644
2221 +--- a/drivers/net/wireless/ath/ath9k/main.c
2222 ++++ b/drivers/net/wireless/ath/ath9k/main.c
2223 +@@ -2147,6 +2147,9 @@ static void ath9k_stop(struct ieee80211_hw *hw)
2224 + return; /* another wiphy still in use */
2225 + }
2226 +
2227 ++ /* Ensure HW is awake when we try to shut it down. */
2228 ++ ath9k_ps_wakeup(sc);
2229 ++
2230 + if (sc->sc_flags & SC_OP_BTCOEX_ENABLED) {
2231 + ath9k_hw_btcoex_disable(sc->sc_ah);
2232 + if (sc->btcoex_info.btcoex_scheme == ATH_BTCOEX_CFG_3WIRE)
2233 +@@ -2167,6 +2170,9 @@ static void ath9k_stop(struct ieee80211_hw *hw)
2234 + /* disable HAL and put h/w to sleep */
2235 + ath9k_hw_disable(sc->sc_ah);
2236 + ath9k_hw_configpcipowersave(sc->sc_ah, 1, 1);
2237 ++ ath9k_ps_restore(sc);
2238 ++
2239 ++ /* Finally, put the chip in FULL SLEEP mode */
2240 + ath9k_hw_setpower(sc->sc_ah, ATH9K_PM_FULL_SLEEP);
2241 +
2242 + sc->sc_flags |= SC_OP_INVALID;
2243 +@@ -2277,8 +2283,10 @@ static void ath9k_remove_interface(struct ieee80211_hw *hw,
2244 + if ((sc->sc_ah->opmode == NL80211_IFTYPE_AP) ||
2245 + (sc->sc_ah->opmode == NL80211_IFTYPE_ADHOC) ||
2246 + (sc->sc_ah->opmode == NL80211_IFTYPE_MESH_POINT)) {
2247 ++ ath9k_ps_wakeup(sc);
2248 + ath9k_hw_stoptxdma(sc->sc_ah, sc->beacon.beaconq);
2249 + ath_beacon_return(sc, avp);
2250 ++ ath9k_ps_restore(sc);
2251 + }
2252 +
2253 + sc->sc_flags &= ~SC_OP_BEACONS;
2254 +@@ -2724,15 +2732,21 @@ static int ath9k_ampdu_action(struct ieee80211_hw *hw,
2255 + case IEEE80211_AMPDU_RX_STOP:
2256 + break;
2257 + case IEEE80211_AMPDU_TX_START:
2258 ++ ath9k_ps_wakeup(sc);
2259 + ath_tx_aggr_start(sc, sta, tid, ssn);
2260 + ieee80211_start_tx_ba_cb_irqsafe(hw, sta->addr, tid);
2261 ++ ath9k_ps_restore(sc);
2262 + break;
2263 + case IEEE80211_AMPDU_TX_STOP:
2264 ++ ath9k_ps_wakeup(sc);
2265 + ath_tx_aggr_stop(sc, sta, tid);
2266 + ieee80211_stop_tx_ba_cb_irqsafe(hw, sta->addr, tid);
2267 ++ ath9k_ps_restore(sc);
2268 + break;
2269 + case IEEE80211_AMPDU_TX_OPERATIONAL:
2270 ++ ath9k_ps_wakeup(sc);
2271 + ath_tx_aggr_resume(sc, sta, tid);
2272 ++ ath9k_ps_restore(sc);
2273 + break;
2274 + default:
2275 + DPRINTF(sc, ATH_DBG_FATAL, "Unknown AMPDU action\n");
2276 +diff --git a/drivers/net/wireless/ath/ath9k/reg.h b/drivers/net/wireless/ath/ath9k/reg.h
2277 +index d83b77f..c0d7e65 100644
2278 +--- a/drivers/net/wireless/ath/ath9k/reg.h
2279 ++++ b/drivers/net/wireless/ath/ath9k/reg.h
2280 +@@ -969,10 +969,10 @@ enum {
2281 + #define AR_GPIO_INPUT_EN_VAL_BT_ACTIVE_S 4
2282 + #define AR_GPIO_INPUT_EN_VAL_RFSILENT_DEF 0x00000080
2283 + #define AR_GPIO_INPUT_EN_VAL_RFSILENT_DEF_S 7
2284 ++#define AR_GPIO_INPUT_EN_VAL_BT_PRIORITY_BB 0x00000400
2285 ++#define AR_GPIO_INPUT_EN_VAL_BT_PRIORITY_BB_S 10
2286 + #define AR_GPIO_INPUT_EN_VAL_BT_ACTIVE_BB 0x00001000
2287 + #define AR_GPIO_INPUT_EN_VAL_BT_ACTIVE_BB_S 12
2288 +-#define AR_GPIO_INPUT_EN_VAL_BT_PRIORITY_BB 0x00001000
2289 +-#define AR_GPIO_INPUT_EN_VAL_BT_PRIORITY_BB_S 1
2290 + #define AR_GPIO_INPUT_EN_VAL_RFSILENT_BB 0x00008000
2291 + #define AR_GPIO_INPUT_EN_VAL_RFSILENT_BB_S 15
2292 + #define AR_GPIO_RTC_RESET_OVERRIDE_ENABLE 0x00010000
2293 +diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
2294 +index 4753909..2c6b063 100644
2295 +--- a/drivers/net/wireless/ath/ath9k/xmit.c
2296 ++++ b/drivers/net/wireless/ath/ath9k/xmit.c
2297 +@@ -1076,10 +1076,10 @@ void ath_drain_all_txq(struct ath_softc *sc, bool retry_tx)
2298 + if (npend) {
2299 + int r;
2300 +
2301 +- DPRINTF(sc, ATH_DBG_XMIT, "Unable to stop TxDMA. Reset HAL!\n");
2302 ++ DPRINTF(sc, ATH_DBG_FATAL, "Unable to stop TxDMA. Reset HAL!\n");
2303 +
2304 + spin_lock_bh(&sc->sc_resetlock);
2305 +- r = ath9k_hw_reset(ah, sc->sc_ah->curchan, true);
2306 ++ r = ath9k_hw_reset(ah, sc->sc_ah->curchan, false);
2307 + if (r)
2308 + DPRINTF(sc, ATH_DBG_FATAL,
2309 + "Unable to reset hardware; reset status %d\n",
2310 +@@ -2020,7 +2020,7 @@ static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
2311 + if (bf_isaggr(bf))
2312 + txq->axq_aggr_depth--;
2313 +
2314 +- txok = !(ds->ds_txstat.ts_status & ATH9K_TXERR_FILT);
2315 ++ txok = !(ds->ds_txstat.ts_status & ATH9K_TXERR_MASK);
2316 + txq->axq_tx_inprogress = false;
2317 + spin_unlock_bh(&txq->axq_lock);
2318 +
2319 +diff --git a/drivers/net/wireless/b43/rfkill.c b/drivers/net/wireless/b43/rfkill.c
2320 +index ffdce6f..78016ae 100644
2321 +--- a/drivers/net/wireless/b43/rfkill.c
2322 ++++ b/drivers/net/wireless/b43/rfkill.c
2323 +@@ -33,8 +33,14 @@ bool b43_is_hw_radio_enabled(struct b43_wldev *dev)
2324 + & B43_MMIO_RADIO_HWENABLED_HI_MASK))
2325 + return 1;
2326 + } else {
2327 +- if (b43_status(dev) >= B43_STAT_STARTED &&
2328 +- b43_read16(dev, B43_MMIO_RADIO_HWENABLED_LO)
2329 ++ /* To prevent CPU fault on PPC, do not read a register
2330 ++ * unless the interface is started; however, on resume
2331 ++ * for hibernation, this routine is entered early. When
2332 ++ * that happens, unconditionally return TRUE.
2333 ++ */
2334 ++ if (b43_status(dev) < B43_STAT_STARTED)
2335 ++ return 1;
2336 ++ if (b43_read16(dev, B43_MMIO_RADIO_HWENABLED_LO)
2337 + & B43_MMIO_RADIO_HWENABLED_LO_MASK)
2338 + return 1;
2339 + }
2340 +diff --git a/drivers/net/wireless/iwlwifi/iwl-3945.c b/drivers/net/wireless/iwlwifi/iwl-3945.c
2341 +index f059b49..9d60f6c 100644
2342 +--- a/drivers/net/wireless/iwlwifi/iwl-3945.c
2343 ++++ b/drivers/net/wireless/iwlwifi/iwl-3945.c
2344 +@@ -2895,6 +2895,7 @@ static struct iwl_cfg iwl3945_bg_cfg = {
2345 + .mod_params = &iwl3945_mod_params,
2346 + .use_isr_legacy = true,
2347 + .ht_greenfield_support = false,
2348 ++ .broken_powersave = true,
2349 + };
2350 +
2351 + static struct iwl_cfg iwl3945_abg_cfg = {
2352 +@@ -2909,6 +2910,7 @@ static struct iwl_cfg iwl3945_abg_cfg = {
2353 + .mod_params = &iwl3945_mod_params,
2354 + .use_isr_legacy = true,
2355 + .ht_greenfield_support = false,
2356 ++ .broken_powersave = true,
2357 + };
2358 +
2359 + struct pci_device_id iwl3945_hw_card_ids[] = {
2360 +diff --git a/drivers/net/wireless/iwlwifi/iwl-4965.c b/drivers/net/wireless/iwlwifi/iwl-4965.c
2361 +index 6f703a0..f4e2e84 100644
2362 +--- a/drivers/net/wireless/iwlwifi/iwl-4965.c
2363 ++++ b/drivers/net/wireless/iwlwifi/iwl-4965.c
2364 +@@ -1337,7 +1337,7 @@ static int iwl4965_fill_txpower_tbl(struct iwl_priv *priv, u8 band, u16 channel,
2365 + iwl4965_interpolate_chan(priv, channel, &ch_eeprom_info);
2366 +
2367 + /* calculate tx gain adjustment based on power supply voltage */
2368 +- voltage = priv->calib_info->voltage;
2369 ++ voltage = le16_to_cpu(priv->calib_info->voltage);
2370 + init_voltage = (s32)le32_to_cpu(priv->card_alive_init.voltage);
2371 + voltage_compensation =
2372 + iwl4965_get_voltage_compensation(voltage, init_voltage);
2373 +diff --git a/drivers/net/wireless/iwlwifi/iwl-5000-hw.h b/drivers/net/wireless/iwlwifi/iwl-5000-hw.h
2374 +index 4ef6804..bc056e9 100644
2375 +--- a/drivers/net/wireless/iwlwifi/iwl-5000-hw.h
2376 ++++ b/drivers/net/wireless/iwlwifi/iwl-5000-hw.h
2377 +@@ -92,11 +92,15 @@
2378 +
2379 + static inline s32 iwl_temp_calib_to_offset(struct iwl_priv *priv)
2380 + {
2381 +- u16 *temp_calib = (u16 *)iwl_eeprom_query_addr(priv,
2382 +- EEPROM_5000_TEMPERATURE);
2383 +- /* offset = temperature - voltage / coef */
2384 +- s32 offset = (s32)(temp_calib[0] - temp_calib[1] / IWL_5150_VOLTAGE_TO_TEMPERATURE_COEFF);
2385 +- return offset;
2386 ++ u16 temperature, voltage;
2387 ++ __le16 *temp_calib =
2388 ++ (__le16 *)iwl_eeprom_query_addr(priv, EEPROM_5000_TEMPERATURE);
2389 ++
2390 ++ temperature = le16_to_cpu(temp_calib[0]);
2391 ++ voltage = le16_to_cpu(temp_calib[1]);
2392 ++
2393 ++ /* offset = temp - volt / coeff */
2394 ++ return (s32)(temperature - voltage / IWL_5150_VOLTAGE_TO_TEMPERATURE_COEFF);
2395 + }
2396 +
2397 + /* Fixed (non-configurable) rx data from phy */
2398 +diff --git a/drivers/net/wireless/iwlwifi/iwl-5000.c b/drivers/net/wireless/iwlwifi/iwl-5000.c
2399 +index 6e6f516..94a1225 100644
2400 +--- a/drivers/net/wireless/iwlwifi/iwl-5000.c
2401 ++++ b/drivers/net/wireless/iwlwifi/iwl-5000.c
2402 +@@ -460,14 +460,15 @@ static void iwl5000_set_ct_threshold(struct iwl_priv *priv)
2403 + static int iwl5000_set_Xtal_calib(struct iwl_priv *priv)
2404 + {
2405 + struct iwl_calib_xtal_freq_cmd cmd;
2406 +- u16 *xtal_calib = (u16 *)iwl_eeprom_query_addr(priv, EEPROM_5000_XTAL);
2407 ++ __le16 *xtal_calib =
2408 ++ (__le16 *)iwl_eeprom_query_addr(priv, EEPROM_5000_XTAL);
2409 +
2410 + cmd.hdr.op_code = IWL_PHY_CALIBRATE_CRYSTAL_FRQ_CMD;
2411 + cmd.hdr.first_group = 0;
2412 + cmd.hdr.groups_num = 1;
2413 + cmd.hdr.data_valid = 1;
2414 +- cmd.cap_pin1 = (u8)xtal_calib[0];
2415 +- cmd.cap_pin2 = (u8)xtal_calib[1];
2416 ++ cmd.cap_pin1 = le16_to_cpu(xtal_calib[0]);
2417 ++ cmd.cap_pin2 = le16_to_cpu(xtal_calib[1]);
2418 + return iwl_calib_set(&priv->calib_results[IWL_CALIB_XTAL],
2419 + (u8 *)&cmd, sizeof(cmd));
2420 + }
2421 +diff --git a/drivers/net/wireless/iwlwifi/iwl-dev.h b/drivers/net/wireless/iwlwifi/iwl-dev.h
2422 +index 028d505..c2d9b7a 100644
2423 +--- a/drivers/net/wireless/iwlwifi/iwl-dev.h
2424 ++++ b/drivers/net/wireless/iwlwifi/iwl-dev.h
2425 +@@ -1149,7 +1149,7 @@ struct iwl_priv {
2426 + u32 last_beacon_time;
2427 + u64 last_tsf;
2428 +
2429 +- /* eeprom */
2430 ++ /* eeprom -- this is in the card's little endian byte order */
2431 + u8 *eeprom;
2432 + int nvm_device_type;
2433 + struct iwl_eeprom_calib_info *calib_info;
2434 +diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom.c b/drivers/net/wireless/iwlwifi/iwl-eeprom.c
2435 +index e14c995..18dc3a4 100644
2436 +--- a/drivers/net/wireless/iwlwifi/iwl-eeprom.c
2437 ++++ b/drivers/net/wireless/iwlwifi/iwl-eeprom.c
2438 +@@ -337,7 +337,7 @@ static int iwl_init_otp_access(struct iwl_priv *priv)
2439 + return ret;
2440 + }
2441 +
2442 +-static int iwl_read_otp_word(struct iwl_priv *priv, u16 addr, u16 *eeprom_data)
2443 ++static int iwl_read_otp_word(struct iwl_priv *priv, u16 addr, __le16 *eeprom_data)
2444 + {
2445 + int ret = 0;
2446 + u32 r;
2447 +@@ -370,7 +370,7 @@ static int iwl_read_otp_word(struct iwl_priv *priv, u16 addr, u16 *eeprom_data)
2448 + CSR_OTP_GP_REG_ECC_CORR_STATUS_MSK);
2449 + IWL_ERR(priv, "Correctable OTP ECC error, continue read\n");
2450 + }
2451 +- *eeprom_data = le16_to_cpu((__force __le16)(r >> 16));
2452 ++ *eeprom_data = cpu_to_le16(r >> 16);
2453 + return 0;
2454 + }
2455 +
2456 +@@ -379,7 +379,8 @@ static int iwl_read_otp_word(struct iwl_priv *priv, u16 addr, u16 *eeprom_data)
2457 + */
2458 + static bool iwl_is_otp_empty(struct iwl_priv *priv)
2459 + {
2460 +- u16 next_link_addr = 0, link_value;
2461 ++ u16 next_link_addr = 0;
2462 ++ __le16 link_value;
2463 + bool is_empty = false;
2464 +
2465 + /* locate the beginning of OTP link list */
2466 +@@ -409,7 +410,8 @@ static bool iwl_is_otp_empty(struct iwl_priv *priv)
2467 + static int iwl_find_otp_image(struct iwl_priv *priv,
2468 + u16 *validblockaddr)
2469 + {
2470 +- u16 next_link_addr = 0, link_value = 0, valid_addr;
2471 ++ u16 next_link_addr = 0, valid_addr;
2472 ++ __le16 link_value = 0;
2473 + int usedblocks = 0;
2474 +
2475 + /* set addressing mode to absolute to traverse the link list */
2476 +@@ -429,7 +431,7 @@ static int iwl_find_otp_image(struct iwl_priv *priv,
2477 + * check for more block on the link list
2478 + */
2479 + valid_addr = next_link_addr;
2480 +- next_link_addr = link_value * sizeof(u16);
2481 ++ next_link_addr = le16_to_cpu(link_value) * sizeof(u16);
2482 + IWL_DEBUG_INFO(priv, "OTP blocks %d addr 0x%x\n",
2483 + usedblocks, next_link_addr);
2484 + if (iwl_read_otp_word(priv, next_link_addr, &link_value))
2485 +@@ -463,7 +465,7 @@ static int iwl_find_otp_image(struct iwl_priv *priv,
2486 + */
2487 + int iwl_eeprom_init(struct iwl_priv *priv)
2488 + {
2489 +- u16 *e;
2490 ++ __le16 *e;
2491 + u32 gp = iwl_read32(priv, CSR_EEPROM_GP);
2492 + int sz;
2493 + int ret;
2494 +@@ -482,7 +484,7 @@ int iwl_eeprom_init(struct iwl_priv *priv)
2495 + ret = -ENOMEM;
2496 + goto alloc_err;
2497 + }
2498 +- e = (u16 *)priv->eeprom;
2499 ++ e = (__le16 *)priv->eeprom;
2500 +
2501 + ret = priv->cfg->ops->lib->eeprom_ops.verify_signature(priv);
2502 + if (ret < 0) {
2503 +@@ -521,7 +523,7 @@ int iwl_eeprom_init(struct iwl_priv *priv)
2504 + }
2505 + for (addr = validblockaddr; addr < validblockaddr + sz;
2506 + addr += sizeof(u16)) {
2507 +- u16 eeprom_data;
2508 ++ __le16 eeprom_data;
2509 +
2510 + ret = iwl_read_otp_word(priv, addr, &eeprom_data);
2511 + if (ret)
2512 +@@ -545,7 +547,7 @@ int iwl_eeprom_init(struct iwl_priv *priv)
2513 + goto done;
2514 + }
2515 + r = _iwl_read_direct32(priv, CSR_EEPROM_REG);
2516 +- e[addr / 2] = le16_to_cpu((__force __le16)(r >> 16));
2517 ++ e[addr / 2] = cpu_to_le16(r >> 16);
2518 + }
2519 + }
2520 + ret = 0;
2521 +@@ -709,7 +711,8 @@ static int iwl_mod_ht40_chan_info(struct iwl_priv *priv,
2522 + ch_info->ht40_min_power = 0;
2523 + ch_info->ht40_scan_power = eeprom_ch->max_power_avg;
2524 + ch_info->ht40_flags = eeprom_ch->flags;
2525 +- ch_info->ht40_extension_channel &= ~clear_ht40_extension_channel;
2526 ++ if (eeprom_ch->flags & EEPROM_CHANNEL_VALID)
2527 ++ ch_info->ht40_extension_channel &= ~clear_ht40_extension_channel;
2528 +
2529 + return 0;
2530 + }
2531 +diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom.h b/drivers/net/wireless/iwlwifi/iwl-eeprom.h
2532 +index 80b9e45..fc93f12 100644
2533 +--- a/drivers/net/wireless/iwlwifi/iwl-eeprom.h
2534 ++++ b/drivers/net/wireless/iwlwifi/iwl-eeprom.h
2535 +@@ -133,7 +133,7 @@ struct iwl_eeprom_channel {
2536 + *
2537 + */
2538 + struct iwl_eeprom_enhanced_txpwr {
2539 +- u16 reserved;
2540 ++ __le16 common;
2541 + s8 chain_a_max;
2542 + s8 chain_b_max;
2543 + s8 chain_c_max;
2544 +@@ -347,7 +347,7 @@ struct iwl_eeprom_calib_subband_info {
2545 + struct iwl_eeprom_calib_info {
2546 + u8 saturation_power24; /* half-dBm (e.g. "34" = 17 dBm) */
2547 + u8 saturation_power52; /* half-dBm */
2548 +- s16 voltage; /* signed */
2549 ++ __le16 voltage; /* signed */
2550 + struct iwl_eeprom_calib_subband_info
2551 + band_info[EEPROM_TX_POWER_BANDS];
2552 + } __attribute__ ((packed));
2553 +diff --git a/drivers/net/wireless/iwlwifi/iwl3945-base.c b/drivers/net/wireless/iwlwifi/iwl3945-base.c
2554 +index d00a803..5f26c93 100644
2555 +--- a/drivers/net/wireless/iwlwifi/iwl3945-base.c
2556 ++++ b/drivers/net/wireless/iwlwifi/iwl3945-base.c
2557 +@@ -562,6 +562,9 @@ static int iwl3945_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
2558 + txq = &priv->txq[txq_id];
2559 + q = &txq->q;
2560 +
2561 ++ if ((iwl_queue_space(q) < q->high_mark))
2562 ++ goto drop;
2563 ++
2564 + spin_lock_irqsave(&priv->lock, flags);
2565 +
2566 + idx = get_cmd_index(q, q->write_ptr, 0);
2567 +@@ -3854,9 +3857,11 @@ static int iwl3945_setup_mac(struct iwl_priv *priv)
2568 + /* Tell mac80211 our characteristics */
2569 + hw->flags = IEEE80211_HW_SIGNAL_DBM |
2570 + IEEE80211_HW_NOISE_DBM |
2571 +- IEEE80211_HW_SPECTRUM_MGMT |
2572 +- IEEE80211_HW_SUPPORTS_PS |
2573 +- IEEE80211_HW_SUPPORTS_DYNAMIC_PS;
2574 ++ IEEE80211_HW_SPECTRUM_MGMT;
2575 ++
2576 ++ if (!priv->cfg->broken_powersave)
2577 ++ hw->flags |= IEEE80211_HW_SUPPORTS_PS |
2578 ++ IEEE80211_HW_SUPPORTS_DYNAMIC_PS;
2579 +
2580 + hw->wiphy->interface_modes =
2581 + BIT(NL80211_IFTYPE_STATION) |
2582 +diff --git a/drivers/net/wireless/iwmc3200wifi/iwm.h b/drivers/net/wireless/iwmc3200wifi/iwm.h
2583 +index 1b02a4e..93c8989 100644
2584 +--- a/drivers/net/wireless/iwmc3200wifi/iwm.h
2585 ++++ b/drivers/net/wireless/iwmc3200wifi/iwm.h
2586 +@@ -258,7 +258,7 @@ struct iwm_priv {
2587 +
2588 + struct sk_buff_head rx_list;
2589 + struct list_head rx_tickets;
2590 +- struct list_head rx_packets[IWM_RX_ID_HASH];
2591 ++ struct list_head rx_packets[IWM_RX_ID_HASH + 1];
2592 + struct workqueue_struct *rx_wq;
2593 + struct work_struct rx_worker;
2594 +
2595 +diff --git a/drivers/net/wireless/libertas/wext.c b/drivers/net/wireless/libertas/wext.c
2596 +index be837a0..01c738b 100644
2597 +--- a/drivers/net/wireless/libertas/wext.c
2598 ++++ b/drivers/net/wireless/libertas/wext.c
2599 +@@ -1953,10 +1953,8 @@ static int lbs_get_essid(struct net_device *dev, struct iw_request_info *info,
2600 + if (priv->connect_status == LBS_CONNECTED) {
2601 + memcpy(extra, priv->curbssparams.ssid,
2602 + priv->curbssparams.ssid_len);
2603 +- extra[priv->curbssparams.ssid_len] = '\0';
2604 + } else {
2605 + memset(extra, 0, 32);
2606 +- extra[priv->curbssparams.ssid_len] = '\0';
2607 + }
2608 + /*
2609 + * If none, we may want to get the one that was set
2610 +diff --git a/drivers/net/wireless/orinoco/wext.c b/drivers/net/wireless/orinoco/wext.c
2611 +index 7698fdd..31ca241 100644
2612 +--- a/drivers/net/wireless/orinoco/wext.c
2613 ++++ b/drivers/net/wireless/orinoco/wext.c
2614 +@@ -23,7 +23,7 @@
2615 + #define MAX_RID_LEN 1024
2616 +
2617 + /* Helper routine to record keys
2618 +- * Do not call from interrupt context */
2619 ++ * It is called under orinoco_lock so it may not sleep */
2620 + static int orinoco_set_key(struct orinoco_private *priv, int index,
2621 + enum orinoco_alg alg, const u8 *key, int key_len,
2622 + const u8 *seq, int seq_len)
2623 +@@ -32,14 +32,14 @@ static int orinoco_set_key(struct orinoco_private *priv, int index,
2624 + kzfree(priv->keys[index].seq);
2625 +
2626 + if (key_len) {
2627 +- priv->keys[index].key = kzalloc(key_len, GFP_KERNEL);
2628 ++ priv->keys[index].key = kzalloc(key_len, GFP_ATOMIC);
2629 + if (!priv->keys[index].key)
2630 + goto nomem;
2631 + } else
2632 + priv->keys[index].key = NULL;
2633 +
2634 + if (seq_len) {
2635 +- priv->keys[index].seq = kzalloc(seq_len, GFP_KERNEL);
2636 ++ priv->keys[index].seq = kzalloc(seq_len, GFP_ATOMIC);
2637 + if (!priv->keys[index].seq)
2638 + goto free_key;
2639 + } else
2640 +diff --git a/drivers/net/wireless/rt2x00/rt61pci.c b/drivers/net/wireless/rt2x00/rt61pci.c
2641 +index b20e3ea..9a6ceb4 100644
2642 +--- a/drivers/net/wireless/rt2x00/rt61pci.c
2643 ++++ b/drivers/net/wireless/rt2x00/rt61pci.c
2644 +@@ -2538,6 +2538,11 @@ static int rt61pci_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
2645 + unsigned int i;
2646 +
2647 + /*
2648 ++ * Disable powersaving as default.
2649 ++ */
2650 ++ rt2x00dev->hw->wiphy->ps_default = false;
2651 ++
2652 ++ /*
2653 + * Initialize all hw fields.
2654 + */
2655 + rt2x00dev->hw->flags =
2656 +diff --git a/drivers/platform/x86/acerhdf.c b/drivers/platform/x86/acerhdf.c
2657 +index d8b4229..4d922e4 100644
2658 +--- a/drivers/platform/x86/acerhdf.c
2659 ++++ b/drivers/platform/x86/acerhdf.c
2660 +@@ -640,9 +640,10 @@ static void __exit acerhdf_exit(void)
2661 + MODULE_LICENSE("GPL");
2662 + MODULE_AUTHOR("Peter Feuerer");
2663 + MODULE_DESCRIPTION("Aspire One temperature and fan driver");
2664 +-MODULE_ALIAS("dmi:*:*Acer*:*:");
2665 +-MODULE_ALIAS("dmi:*:*Gateway*:*:");
2666 +-MODULE_ALIAS("dmi:*:*Packard Bell*:*:");
2667 ++MODULE_ALIAS("dmi:*:*Acer*:pnAOA*:");
2668 ++MODULE_ALIAS("dmi:*:*Gateway*:pnAOA*:");
2669 ++MODULE_ALIAS("dmi:*:*Packard Bell*:pnAOA*:");
2670 ++MODULE_ALIAS("dmi:*:*Packard Bell*:pnDOA*:");
2671 +
2672 + module_init(acerhdf_init);
2673 + module_exit(acerhdf_exit);
2674 +diff --git a/drivers/s390/block/dasd_diag.c b/drivers/s390/block/dasd_diag.c
2675 +index 4e49b4a..8174ec9 100644
2676 +--- a/drivers/s390/block/dasd_diag.c
2677 ++++ b/drivers/s390/block/dasd_diag.c
2678 +@@ -145,6 +145,15 @@ dasd_diag_erp(struct dasd_device *device)
2679 +
2680 + mdsk_term_io(device);
2681 + rc = mdsk_init_io(device, device->block->bp_block, 0, NULL);
2682 ++ if (rc == 4) {
2683 ++ if (!(device->features & DASD_FEATURE_READONLY)) {
2684 ++ dev_warn(&device->cdev->dev,
2685 ++ "The access mode of a DIAG device changed"
2686 ++ " to read-only");
2687 ++ device->features |= DASD_FEATURE_READONLY;
2688 ++ }
2689 ++ rc = 0;
2690 ++ }
2691 + if (rc)
2692 + dev_warn(&device->cdev->dev, "DIAG ERP failed with "
2693 + "rc=%d\n", rc);
2694 +@@ -433,16 +442,20 @@ dasd_diag_check_device(struct dasd_device *device)
2695 + for (sb = 512; sb < bsize; sb = sb << 1)
2696 + block->s2b_shift++;
2697 + rc = mdsk_init_io(device, block->bp_block, 0, NULL);
2698 +- if (rc) {
2699 ++ if (rc && (rc != 4)) {
2700 + dev_warn(&device->cdev->dev, "DIAG initialization "
2701 + "failed with rc=%d\n", rc);
2702 + rc = -EIO;
2703 + } else {
2704 ++ if (rc == 4)
2705 ++ device->features |= DASD_FEATURE_READONLY;
2706 + dev_info(&device->cdev->dev,
2707 +- "New DASD with %ld byte/block, total size %ld KB\n",
2708 ++ "New DASD with %ld byte/block, total size %ld KB%s\n",
2709 + (unsigned long) block->bp_block,
2710 + (unsigned long) (block->blocks <<
2711 +- block->s2b_shift) >> 1);
2712 ++ block->s2b_shift) >> 1,
2713 ++ (rc == 4) ? ", read-only device" : "");
2714 ++ rc = 0;
2715 + }
2716 + out_label:
2717 + free_page((long) label);
2718 +diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
2719 +index 76d294f..c3ff9a6 100644
2720 +--- a/drivers/scsi/ipr.c
2721 ++++ b/drivers/scsi/ipr.c
2722 +@@ -6516,6 +6516,7 @@ static int ipr_reset_restore_cfg_space(struct ipr_cmnd *ipr_cmd)
2723 + int rc;
2724 +
2725 + ENTER;
2726 ++ ioa_cfg->pdev->state_saved = true;
2727 + rc = pci_restore_state(ioa_cfg->pdev);
2728 +
2729 + if (rc != PCIBIOS_SUCCESSFUL) {
2730 +diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
2731 +index b79fca7..06bbe0d 100644
2732 +--- a/drivers/scsi/qla2xxx/qla_os.c
2733 ++++ b/drivers/scsi/qla2xxx/qla_os.c
2734 +@@ -2016,13 +2016,13 @@ skip_dpc:
2735 + DEBUG2(printk("DEBUG: detect hba %ld at address = %p\n",
2736 + base_vha->host_no, ha));
2737 +
2738 +- base_vha->flags.init_done = 1;
2739 +- base_vha->flags.online = 1;
2740 +-
2741 + ret = scsi_add_host(host, &pdev->dev);
2742 + if (ret)
2743 + goto probe_failed;
2744 +
2745 ++ base_vha->flags.init_done = 1;
2746 ++ base_vha->flags.online = 1;
2747 ++
2748 + ha->isp_ops->enable_intrs(ha);
2749 +
2750 + scsi_scan_host(host);
2751 +diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
2752 +index c6f70da..45be82f 100644
2753 +--- a/drivers/scsi/scsi_transport_fc.c
2754 ++++ b/drivers/scsi/scsi_transport_fc.c
2755 +@@ -648,11 +648,22 @@ static __init int fc_transport_init(void)
2756 + return error;
2757 + error = transport_class_register(&fc_vport_class);
2758 + if (error)
2759 +- return error;
2760 ++ goto unreg_host_class;
2761 + error = transport_class_register(&fc_rport_class);
2762 + if (error)
2763 +- return error;
2764 +- return transport_class_register(&fc_transport_class);
2765 ++ goto unreg_vport_class;
2766 ++ error = transport_class_register(&fc_transport_class);
2767 ++ if (error)
2768 ++ goto unreg_rport_class;
2769 ++ return 0;
2770 ++
2771 ++unreg_rport_class:
2772 ++ transport_class_unregister(&fc_rport_class);
2773 ++unreg_vport_class:
2774 ++ transport_class_unregister(&fc_vport_class);
2775 ++unreg_host_class:
2776 ++ transport_class_unregister(&fc_host_class);
2777 ++ return error;
2778 + }
2779 +
2780 + static void __exit fc_transport_exit(void)
2781 +diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
2782 +index 12d58a7..5081f97 100644
2783 +--- a/drivers/scsi/st.c
2784 ++++ b/drivers/scsi/st.c
2785 +@@ -552,13 +552,15 @@ st_do_scsi(struct st_request * SRpnt, struct scsi_tape * STp, unsigned char *cmd
2786 + SRpnt->waiting = waiting;
2787 +
2788 + if (STp->buffer->do_dio) {
2789 ++ mdata->page_order = 0;
2790 + mdata->nr_entries = STp->buffer->sg_segs;
2791 + mdata->pages = STp->buffer->mapped_pages;
2792 + } else {
2793 ++ mdata->page_order = STp->buffer->reserved_page_order;
2794 + mdata->nr_entries =
2795 + DIV_ROUND_UP(bytes, PAGE_SIZE << mdata->page_order);
2796 +- STp->buffer->map_data.pages = STp->buffer->reserved_pages;
2797 +- STp->buffer->map_data.offset = 0;
2798 ++ mdata->pages = STp->buffer->reserved_pages;
2799 ++ mdata->offset = 0;
2800 + }
2801 +
2802 + memcpy(SRpnt->cmd, cmd, sizeof(SRpnt->cmd));
2803 +@@ -3718,7 +3720,7 @@ static int enlarge_buffer(struct st_buffer * STbuffer, int new_size, int need_dm
2804 + priority |= __GFP_ZERO;
2805 +
2806 + if (STbuffer->frp_segs) {
2807 +- order = STbuffer->map_data.page_order;
2808 ++ order = STbuffer->reserved_page_order;
2809 + b_size = PAGE_SIZE << order;
2810 + } else {
2811 + for (b_size = PAGE_SIZE, order = 0;
2812 +@@ -3751,7 +3753,7 @@ static int enlarge_buffer(struct st_buffer * STbuffer, int new_size, int need_dm
2813 + segs++;
2814 + }
2815 + STbuffer->b_data = page_address(STbuffer->reserved_pages[0]);
2816 +- STbuffer->map_data.page_order = order;
2817 ++ STbuffer->reserved_page_order = order;
2818 +
2819 + return 1;
2820 + }
2821 +@@ -3764,7 +3766,7 @@ static void clear_buffer(struct st_buffer * st_bp)
2822 +
2823 + for (i=0; i < st_bp->frp_segs; i++)
2824 + memset(page_address(st_bp->reserved_pages[i]), 0,
2825 +- PAGE_SIZE << st_bp->map_data.page_order);
2826 ++ PAGE_SIZE << st_bp->reserved_page_order);
2827 + st_bp->cleared = 1;
2828 + }
2829 +
2830 +@@ -3772,7 +3774,7 @@ static void clear_buffer(struct st_buffer * st_bp)
2831 + /* Release the extra buffer */
2832 + static void normalize_buffer(struct st_buffer * STbuffer)
2833 + {
2834 +- int i, order = STbuffer->map_data.page_order;
2835 ++ int i, order = STbuffer->reserved_page_order;
2836 +
2837 + for (i = 0; i < STbuffer->frp_segs; i++) {
2838 + __free_pages(STbuffer->reserved_pages[i], order);
2839 +@@ -3780,7 +3782,7 @@ static void normalize_buffer(struct st_buffer * STbuffer)
2840 + }
2841 + STbuffer->frp_segs = 0;
2842 + STbuffer->sg_segs = 0;
2843 +- STbuffer->map_data.page_order = 0;
2844 ++ STbuffer->reserved_page_order = 0;
2845 + STbuffer->map_data.offset = 0;
2846 + }
2847 +
2848 +@@ -3790,7 +3792,7 @@ static void normalize_buffer(struct st_buffer * STbuffer)
2849 + static int append_to_buffer(const char __user *ubp, struct st_buffer * st_bp, int do_count)
2850 + {
2851 + int i, cnt, res, offset;
2852 +- int length = PAGE_SIZE << st_bp->map_data.page_order;
2853 ++ int length = PAGE_SIZE << st_bp->reserved_page_order;
2854 +
2855 + for (i = 0, offset = st_bp->buffer_bytes;
2856 + i < st_bp->frp_segs && offset >= length; i++)
2857 +@@ -3822,7 +3824,7 @@ static int append_to_buffer(const char __user *ubp, struct st_buffer * st_bp, in
2858 + static int from_buffer(struct st_buffer * st_bp, char __user *ubp, int do_count)
2859 + {
2860 + int i, cnt, res, offset;
2861 +- int length = PAGE_SIZE << st_bp->map_data.page_order;
2862 ++ int length = PAGE_SIZE << st_bp->reserved_page_order;
2863 +
2864 + for (i = 0, offset = st_bp->read_pointer;
2865 + i < st_bp->frp_segs && offset >= length; i++)
2866 +@@ -3855,7 +3857,7 @@ static void move_buffer_data(struct st_buffer * st_bp, int offset)
2867 + {
2868 + int src_seg, dst_seg, src_offset = 0, dst_offset;
2869 + int count, total;
2870 +- int length = PAGE_SIZE << st_bp->map_data.page_order;
2871 ++ int length = PAGE_SIZE << st_bp->reserved_page_order;
2872 +
2873 + if (offset == 0)
2874 + return;
2875 +@@ -4577,7 +4579,6 @@ static int sgl_map_user_pages(struct st_buffer *STbp,
2876 + }
2877 +
2878 + mdata->offset = uaddr & ~PAGE_MASK;
2879 +- mdata->page_order = 0;
2880 + STbp->mapped_pages = pages;
2881 +
2882 + return nr_pages;
2883 +diff --git a/drivers/scsi/st.h b/drivers/scsi/st.h
2884 +index 544dc6b..f91a67c 100644
2885 +--- a/drivers/scsi/st.h
2886 ++++ b/drivers/scsi/st.h
2887 +@@ -46,6 +46,7 @@ struct st_buffer {
2888 + struct st_request *last_SRpnt;
2889 + struct st_cmdstatus cmdstat;
2890 + struct page **reserved_pages;
2891 ++ int reserved_page_order;
2892 + struct page **mapped_pages;
2893 + struct rq_map_data map_data;
2894 + unsigned char *b_data;
2895 +diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
2896 +index 0f857e6..8b0c235 100644
2897 +--- a/drivers/usb/core/hub.c
2898 ++++ b/drivers/usb/core/hub.c
2899 +@@ -1612,12 +1612,12 @@ static inline void announce_device(struct usb_device *udev) { }
2900 + #endif
2901 +
2902 + /**
2903 +- * usb_configure_device_otg - FIXME (usbcore-internal)
2904 ++ * usb_enumerate_device_otg - FIXME (usbcore-internal)
2905 + * @udev: newly addressed device (in ADDRESS state)
2906 + *
2907 +- * Do configuration for On-The-Go devices
2908 ++ * Finish enumeration for On-The-Go devices
2909 + */
2910 +-static int usb_configure_device_otg(struct usb_device *udev)
2911 ++static int usb_enumerate_device_otg(struct usb_device *udev)
2912 + {
2913 + int err = 0;
2914 +
2915 +@@ -1688,7 +1688,7 @@ fail:
2916 +
2917 +
2918 + /**
2919 +- * usb_configure_device - Detect and probe device intfs/otg (usbcore-internal)
2920 ++ * usb_enumerate_device - Read device configs/intfs/otg (usbcore-internal)
2921 + * @udev: newly addressed device (in ADDRESS state)
2922 + *
2923 + * This is only called by usb_new_device() and usb_authorize_device()
2924 +@@ -1699,7 +1699,7 @@ fail:
2925 + * the string descriptors, as they will be errored out by the device
2926 + * until it has been authorized.
2927 + */
2928 +-static int usb_configure_device(struct usb_device *udev)
2929 ++static int usb_enumerate_device(struct usb_device *udev)
2930 + {
2931 + int err;
2932 +
2933 +@@ -1723,7 +1723,7 @@ static int usb_configure_device(struct usb_device *udev)
2934 + udev->descriptor.iManufacturer);
2935 + udev->serial = usb_cache_string(udev, udev->descriptor.iSerialNumber);
2936 + }
2937 +- err = usb_configure_device_otg(udev);
2938 ++ err = usb_enumerate_device_otg(udev);
2939 + fail:
2940 + return err;
2941 + }
2942 +@@ -1733,8 +1733,8 @@ fail:
2943 + * usb_new_device - perform initial device setup (usbcore-internal)
2944 + * @udev: newly addressed device (in ADDRESS state)
2945 + *
2946 +- * This is called with devices which have been enumerated, but not yet
2947 +- * configured. The device descriptor is available, but not descriptors
2948 ++ * This is called with devices which have been detected but not fully
2949 ++ * enumerated. The device descriptor is available, but not descriptors
2950 + * for any device configuration. The caller must have locked either
2951 + * the parent hub (if udev is a normal device) or else the
2952 + * usb_bus_list_lock (if udev is a root hub). The parent's pointer to
2953 +@@ -1757,8 +1757,8 @@ int usb_new_device(struct usb_device *udev)
2954 + if (udev->parent)
2955 + usb_autoresume_device(udev->parent);
2956 +
2957 +- usb_detect_quirks(udev); /* Determine quirks */
2958 +- err = usb_configure_device(udev); /* detect & probe dev/intfs */
2959 ++ usb_detect_quirks(udev);
2960 ++ err = usb_enumerate_device(udev); /* Read descriptors */
2961 + if (err < 0)
2962 + goto fail;
2963 + dev_dbg(&udev->dev, "udev %d, busnum %d, minor = %d\n",
2964 +@@ -1803,21 +1803,23 @@ fail:
2965 + */
2966 + int usb_deauthorize_device(struct usb_device *usb_dev)
2967 + {
2968 +- unsigned cnt;
2969 + usb_lock_device(usb_dev);
2970 + if (usb_dev->authorized == 0)
2971 + goto out_unauthorized;
2972 ++
2973 + usb_dev->authorized = 0;
2974 + usb_set_configuration(usb_dev, -1);
2975 ++
2976 ++ kfree(usb_dev->product);
2977 + usb_dev->product = kstrdup("n/a (unauthorized)", GFP_KERNEL);
2978 ++ kfree(usb_dev->manufacturer);
2979 + usb_dev->manufacturer = kstrdup("n/a (unauthorized)", GFP_KERNEL);
2980 ++ kfree(usb_dev->serial);
2981 + usb_dev->serial = kstrdup("n/a (unauthorized)", GFP_KERNEL);
2982 +- kfree(usb_dev->config);
2983 +- usb_dev->config = NULL;
2984 +- for (cnt = 0; cnt < usb_dev->descriptor.bNumConfigurations; cnt++)
2985 +- kfree(usb_dev->rawdescriptors[cnt]);
2986 ++
2987 ++ usb_destroy_configuration(usb_dev);
2988 + usb_dev->descriptor.bNumConfigurations = 0;
2989 +- kfree(usb_dev->rawdescriptors);
2990 ++
2991 + out_unauthorized:
2992 + usb_unlock_device(usb_dev);
2993 + return 0;
2994 +@@ -1827,15 +1829,11 @@ out_unauthorized:
2995 + int usb_authorize_device(struct usb_device *usb_dev)
2996 + {
2997 + int result = 0, c;
2998 ++
2999 + usb_lock_device(usb_dev);
3000 + if (usb_dev->authorized == 1)
3001 + goto out_authorized;
3002 +- kfree(usb_dev->product);
3003 +- usb_dev->product = NULL;
3004 +- kfree(usb_dev->manufacturer);
3005 +- usb_dev->manufacturer = NULL;
3006 +- kfree(usb_dev->serial);
3007 +- usb_dev->serial = NULL;
3008 ++
3009 + result = usb_autoresume_device(usb_dev);
3010 + if (result < 0) {
3011 + dev_err(&usb_dev->dev,
3012 +@@ -1848,10 +1846,18 @@ int usb_authorize_device(struct usb_device *usb_dev)
3013 + "authorization: %d\n", result);
3014 + goto error_device_descriptor;
3015 + }
3016 ++
3017 ++ kfree(usb_dev->product);
3018 ++ usb_dev->product = NULL;
3019 ++ kfree(usb_dev->manufacturer);
3020 ++ usb_dev->manufacturer = NULL;
3021 ++ kfree(usb_dev->serial);
3022 ++ usb_dev->serial = NULL;
3023 ++
3024 + usb_dev->authorized = 1;
3025 +- result = usb_configure_device(usb_dev);
3026 ++ result = usb_enumerate_device(usb_dev);
3027 + if (result < 0)
3028 +- goto error_configure;
3029 ++ goto error_enumerate;
3030 + /* Choose and set the configuration. This registers the interfaces
3031 + * with the driver core and lets interface drivers bind to them.
3032 + */
3033 +@@ -1866,8 +1872,10 @@ int usb_authorize_device(struct usb_device *usb_dev)
3034 + }
3035 + }
3036 + dev_info(&usb_dev->dev, "authorized to connect\n");
3037 +-error_configure:
3038 ++
3039 ++error_enumerate:
3040 + error_device_descriptor:
3041 ++ usb_autosuspend_device(usb_dev);
3042 + error_autoresume:
3043 + out_authorized:
3044 + usb_unlock_device(usb_dev); // complements locktree
3045 +diff --git a/drivers/usb/core/sysfs.c b/drivers/usb/core/sysfs.c
3046 +index 7ec3041..8752e55 100644
3047 +--- a/drivers/usb/core/sysfs.c
3048 ++++ b/drivers/usb/core/sysfs.c
3049 +@@ -82,9 +82,13 @@ static ssize_t show_##name(struct device *dev, \
3050 + struct device_attribute *attr, char *buf) \
3051 + { \
3052 + struct usb_device *udev; \
3053 ++ int retval; \
3054 + \
3055 + udev = to_usb_device(dev); \
3056 +- return sprintf(buf, "%s\n", udev->name); \
3057 ++ usb_lock_device(udev); \
3058 ++ retval = sprintf(buf, "%s\n", udev->name); \
3059 ++ usb_unlock_device(udev); \
3060 ++ return retval; \
3061 + } \
3062 + static DEVICE_ATTR(name, S_IRUGO, show_##name, NULL);
3063 +
3064 +diff --git a/drivers/usb/misc/appledisplay.c b/drivers/usb/misc/appledisplay.c
3065 +index 1d8e39a..62ff5e7 100644
3066 +--- a/drivers/usb/misc/appledisplay.c
3067 ++++ b/drivers/usb/misc/appledisplay.c
3068 +@@ -72,8 +72,8 @@ struct appledisplay {
3069 + struct usb_device *udev; /* usb device */
3070 + struct urb *urb; /* usb request block */
3071 + struct backlight_device *bd; /* backlight device */
3072 +- char *urbdata; /* interrupt URB data buffer */
3073 +- char *msgdata; /* control message data buffer */
3074 ++ u8 *urbdata; /* interrupt URB data buffer */
3075 ++ u8 *msgdata; /* control message data buffer */
3076 +
3077 + struct delayed_work work;
3078 + int button_pressed;
3079 +diff --git a/drivers/usb/misc/emi62.c b/drivers/usb/misc/emi62.c
3080 +index 602ee05..59860b3 100644
3081 +--- a/drivers/usb/misc/emi62.c
3082 ++++ b/drivers/usb/misc/emi62.c
3083 +@@ -167,7 +167,7 @@ static int emi62_load_firmware (struct usb_device *dev)
3084 + err("%s - error loading firmware: error = %d", __func__, err);
3085 + goto wraperr;
3086 + }
3087 +- } while (i > 0);
3088 ++ } while (rec);
3089 +
3090 + /* Assert reset (stop the CPU in the EMI) */
3091 + err = emi62_set_reset(dev,1);
3092 +diff --git a/drivers/usb/musb/musb_gadget_ep0.c b/drivers/usb/musb/musb_gadget_ep0.c
3093 +index 1c44b97..067e5a9 100644
3094 +--- a/drivers/usb/musb/musb_gadget_ep0.c
3095 ++++ b/drivers/usb/musb/musb_gadget_ep0.c
3096 +@@ -647,7 +647,7 @@ irqreturn_t musb_g_ep0_irq(struct musb *musb)
3097 + musb->ep0_state = MUSB_EP0_STAGE_STATUSIN;
3098 + break;
3099 + default:
3100 +- ERR("SetupEnd came in a wrong ep0stage %s",
3101 ++ ERR("SetupEnd came in a wrong ep0stage %s\n",
3102 + decode_ep0stage(musb->ep0_state));
3103 + }
3104 + csr = musb_readw(regs, MUSB_CSR0);
3105 +@@ -770,12 +770,18 @@ setup:
3106 + handled = service_zero_data_request(
3107 + musb, &setup);
3108 +
3109 ++ /*
3110 ++ * We're expecting no data in any case, so
3111 ++ * always set the DATAEND bit -- doing this
3112 ++ * here helps avoid SetupEnd interrupt coming
3113 ++ * in the idle stage when we're stalling...
3114 ++ */
3115 ++ musb->ackpend |= MUSB_CSR0_P_DATAEND;
3116 ++
3117 + /* status stage might be immediate */
3118 +- if (handled > 0) {
3119 +- musb->ackpend |= MUSB_CSR0_P_DATAEND;
3120 ++ if (handled > 0)
3121 + musb->ep0_state =
3122 + MUSB_EP0_STAGE_STATUSIN;
3123 +- }
3124 + break;
3125 +
3126 + /* sequence #1 (IN to host), includes GET_STATUS
3127 +diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
3128 +index dffc8a1..be3dff1 100644
3129 +--- a/drivers/usb/serial/option.c
3130 ++++ b/drivers/usb/serial/option.c
3131 +@@ -340,6 +340,10 @@ static int option_resume(struct usb_serial *serial);
3132 + #define FOUR_G_SYSTEMS_VENDOR_ID 0x1c9e
3133 + #define FOUR_G_SYSTEMS_PRODUCT_W14 0x9603
3134 +
3135 ++/* Haier products */
3136 ++#define HAIER_VENDOR_ID 0x201e
3137 ++#define HAIER_PRODUCT_CE100 0x2009
3138 ++
3139 + static struct usb_device_id option_ids[] = {
3140 + { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_COLT) },
3141 + { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_RICOLA) },
3142 +@@ -641,6 +645,7 @@ static struct usb_device_id option_ids[] = {
3143 + { USB_DEVICE(AIRPLUS_VENDOR_ID, AIRPLUS_PRODUCT_MCD650) },
3144 + { USB_DEVICE(TLAYTECH_VENDOR_ID, TLAYTECH_PRODUCT_TEU800) },
3145 + { USB_DEVICE(FOUR_G_SYSTEMS_VENDOR_ID, FOUR_G_SYSTEMS_PRODUCT_W14) },
3146 ++ { USB_DEVICE(HAIER_VENDOR_ID, HAIER_PRODUCT_CE100) },
3147 + { } /* Terminating entry */
3148 + };
3149 + MODULE_DEVICE_TABLE(usb, option_ids);
3150 +diff --git a/drivers/xen/xenbus/xenbus_probe.c b/drivers/xen/xenbus/xenbus_probe.c
3151 +index 3800da7..649fcdf 100644
3152 +--- a/drivers/xen/xenbus/xenbus_probe.c
3153 ++++ b/drivers/xen/xenbus/xenbus_probe.c
3154 +@@ -843,7 +843,7 @@ postcore_initcall(xenbus_probe_init);
3155 +
3156 + MODULE_LICENSE("GPL");
3157 +
3158 +-static int is_disconnected_device(struct device *dev, void *data)
3159 ++static int is_device_connecting(struct device *dev, void *data)
3160 + {
3161 + struct xenbus_device *xendev = to_xenbus_device(dev);
3162 + struct device_driver *drv = data;
3163 +@@ -861,14 +861,15 @@ static int is_disconnected_device(struct device *dev, void *data)
3164 + return 0;
3165 +
3166 + xendrv = to_xenbus_driver(dev->driver);
3167 +- return (xendev->state != XenbusStateConnected ||
3168 +- (xendrv->is_ready && !xendrv->is_ready(xendev)));
3169 ++ return (xendev->state < XenbusStateConnected ||
3170 ++ (xendev->state == XenbusStateConnected &&
3171 ++ xendrv->is_ready && !xendrv->is_ready(xendev)));
3172 + }
3173 +
3174 +-static int exists_disconnected_device(struct device_driver *drv)
3175 ++static int exists_connecting_device(struct device_driver *drv)
3176 + {
3177 + return bus_for_each_dev(&xenbus_frontend.bus, NULL, drv,
3178 +- is_disconnected_device);
3179 ++ is_device_connecting);
3180 + }
3181 +
3182 + static int print_device_status(struct device *dev, void *data)
3183 +@@ -884,10 +885,13 @@ static int print_device_status(struct device *dev, void *data)
3184 + /* Information only: is this too noisy? */
3185 + printk(KERN_INFO "XENBUS: Device with no driver: %s\n",
3186 + xendev->nodename);
3187 +- } else if (xendev->state != XenbusStateConnected) {
3188 ++ } else if (xendev->state < XenbusStateConnected) {
3189 ++ enum xenbus_state rstate = XenbusStateUnknown;
3190 ++ if (xendev->otherend)
3191 ++ rstate = xenbus_read_driver_state(xendev->otherend);
3192 + printk(KERN_WARNING "XENBUS: Timeout connecting "
3193 +- "to device: %s (state %d)\n",
3194 +- xendev->nodename, xendev->state);
3195 ++ "to device: %s (local state %d, remote state %d)\n",
3196 ++ xendev->nodename, xendev->state, rstate);
3197 + }
3198 +
3199 + return 0;
3200 +@@ -897,7 +901,7 @@ static int print_device_status(struct device *dev, void *data)
3201 + static int ready_to_wait_for_devices;
3202 +
3203 + /*
3204 +- * On a 10 second timeout, wait for all devices currently configured. We need
3205 ++ * On a 5-minute timeout, wait for all devices currently configured. We need
3206 + * to do this to guarantee that the filesystems and / or network devices
3207 + * needed for boot are available, before we can allow the boot to proceed.
3208 + *
3209 +@@ -912,18 +916,30 @@ static int ready_to_wait_for_devices;
3210 + */
3211 + static void wait_for_devices(struct xenbus_driver *xendrv)
3212 + {
3213 +- unsigned long timeout = jiffies + 10*HZ;
3214 ++ unsigned long start = jiffies;
3215 + struct device_driver *drv = xendrv ? &xendrv->driver : NULL;
3216 ++ unsigned int seconds_waited = 0;
3217 +
3218 + if (!ready_to_wait_for_devices || !xen_domain())
3219 + return;
3220 +
3221 +- while (exists_disconnected_device(drv)) {
3222 +- if (time_after(jiffies, timeout))
3223 +- break;
3224 ++ while (exists_connecting_device(drv)) {
3225 ++ if (time_after(jiffies, start + (seconds_waited+5)*HZ)) {
3226 ++ if (!seconds_waited)
3227 ++ printk(KERN_WARNING "XENBUS: Waiting for "
3228 ++ "devices to initialise: ");
3229 ++ seconds_waited += 5;
3230 ++ printk("%us...", 300 - seconds_waited);
3231 ++ if (seconds_waited == 300)
3232 ++ break;
3233 ++ }
3234 ++
3235 + schedule_timeout_interruptible(HZ/10);
3236 + }
3237 +
3238 ++ if (seconds_waited)
3239 ++ printk("\n");
3240 ++
3241 + bus_for_each_dev(&xenbus_frontend.bus, NULL, drv,
3242 + print_device_status);
3243 + }
3244 +diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
3245 +index 63ea83f..3bbcaa7 100644
3246 +--- a/fs/cifs/connect.c
3247 ++++ b/fs/cifs/connect.c
3248 +@@ -2287,12 +2287,12 @@ int
3249 + cifs_mount(struct super_block *sb, struct cifs_sb_info *cifs_sb,
3250 + char *mount_data_global, const char *devname)
3251 + {
3252 +- int rc = 0;
3253 ++ int rc;
3254 + int xid;
3255 + struct smb_vol *volume_info;
3256 +- struct cifsSesInfo *pSesInfo = NULL;
3257 +- struct cifsTconInfo *tcon = NULL;
3258 +- struct TCP_Server_Info *srvTcp = NULL;
3259 ++ struct cifsSesInfo *pSesInfo;
3260 ++ struct cifsTconInfo *tcon;
3261 ++ struct TCP_Server_Info *srvTcp;
3262 + char *full_path;
3263 + char *mount_data = mount_data_global;
3264 + #ifdef CONFIG_CIFS_DFS_UPCALL
3265 +@@ -2301,6 +2301,10 @@ cifs_mount(struct super_block *sb, struct cifs_sb_info *cifs_sb,
3266 + int referral_walks_count = 0;
3267 + try_mount_again:
3268 + #endif
3269 ++ rc = 0;
3270 ++ tcon = NULL;
3271 ++ pSesInfo = NULL;
3272 ++ srvTcp = NULL;
3273 + full_path = NULL;
3274 +
3275 + xid = GetXid();
3276 +@@ -2597,6 +2601,7 @@ remote_path_check:
3277 +
3278 + cleanup_volume_info(&volume_info);
3279 + referral_walks_count++;
3280 ++ FreeXid(xid);
3281 + goto try_mount_again;
3282 + }
3283 + #else /* No DFS support, return error on mount */
3284 +diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
3285 +index bd2a9dd..d0a2afb 100644
3286 +--- a/fs/ext4/ext4.h
3287 ++++ b/fs/ext4/ext4.h
3288 +@@ -698,6 +698,10 @@ struct ext4_inode_info {
3289 + __u16 i_extra_isize;
3290 +
3291 + spinlock_t i_block_reservation_lock;
3292 ++#ifdef CONFIG_QUOTA
3293 ++ /* quota space reservation, managed internally by quota code */
3294 ++ qsize_t i_reserved_quota;
3295 ++#endif
3296 +
3297 + /* completed async DIOs that might need unwritten extents handling */
3298 + struct list_head i_aio_dio_complete_list;
3299 +@@ -1432,7 +1436,7 @@ extern int ext4_chunk_trans_blocks(struct inode *, int nrblocks);
3300 + extern int ext4_block_truncate_page(handle_t *handle,
3301 + struct address_space *mapping, loff_t from);
3302 + extern int ext4_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf);
3303 +-extern qsize_t ext4_get_reserved_space(struct inode *inode);
3304 ++extern qsize_t *ext4_get_reserved_space(struct inode *inode);
3305 + extern int flush_aio_dio_completed_IO(struct inode *inode);
3306 + /* ioctl.c */
3307 + extern long ext4_ioctl(struct file *, unsigned int, unsigned long);
3308 +diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
3309 +index 1dae9a4..e233879 100644
3310 +--- a/fs/ext4/inode.c
3311 ++++ b/fs/ext4/inode.c
3312 +@@ -1045,17 +1045,12 @@ out:
3313 + return err;
3314 + }
3315 +
3316 +-qsize_t ext4_get_reserved_space(struct inode *inode)
3317 ++#ifdef CONFIG_QUOTA
3318 ++qsize_t *ext4_get_reserved_space(struct inode *inode)
3319 + {
3320 +- unsigned long long total;
3321 +-
3322 +- spin_lock(&EXT4_I(inode)->i_block_reservation_lock);
3323 +- total = EXT4_I(inode)->i_reserved_data_blocks +
3324 +- EXT4_I(inode)->i_reserved_meta_blocks;
3325 +- spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
3326 +-
3327 +- return (total << inode->i_blkbits);
3328 ++ return &EXT4_I(inode)->i_reserved_quota;
3329 + }
3330 ++#endif
3331 + /*
3332 + * Calculate the number of metadata blocks need to reserve
3333 + * to allocate @blocks for non extent file based file
3334 +@@ -1858,19 +1853,17 @@ repeat:
3335 +
3336 + md_needed = mdblocks - EXT4_I(inode)->i_reserved_meta_blocks;
3337 + total = md_needed + nrblocks;
3338 ++ spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
3339 +
3340 + /*
3341 + * Make quota reservation here to prevent quota overflow
3342 + * later. Real quota accounting is done at pages writeout
3343 + * time.
3344 + */
3345 +- if (vfs_dq_reserve_block(inode, total)) {
3346 +- spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
3347 ++ if (vfs_dq_reserve_block(inode, total))
3348 + return -EDQUOT;
3349 +- }
3350 +
3351 + if (ext4_claim_free_blocks(sbi, total)) {
3352 +- spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
3353 + vfs_dq_release_reservation_block(inode, total);
3354 + if (ext4_should_retry_alloc(inode->i_sb, &retries)) {
3355 + yield();
3356 +@@ -1878,10 +1871,11 @@ repeat:
3357 + }
3358 + return -ENOSPC;
3359 + }
3360 ++ spin_lock(&EXT4_I(inode)->i_block_reservation_lock);
3361 + EXT4_I(inode)->i_reserved_data_blocks += nrblocks;
3362 +- EXT4_I(inode)->i_reserved_meta_blocks = mdblocks;
3363 +-
3364 ++ EXT4_I(inode)->i_reserved_meta_blocks += md_needed;
3365 + spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
3366 ++
3367 + return 0; /* success */
3368 + }
3369 +
3370 +@@ -4850,6 +4844,9 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
3371 + ((__u64)le16_to_cpu(raw_inode->i_file_acl_high)) << 32;
3372 + inode->i_size = ext4_isize(raw_inode);
3373 + ei->i_disksize = inode->i_size;
3374 ++#ifdef CONFIG_QUOTA
3375 ++ ei->i_reserved_quota = 0;
3376 ++#endif
3377 + inode->i_generation = le32_to_cpu(raw_inode->i_generation);
3378 + ei->i_block_group = iloc.block_group;
3379 + ei->i_last_alloc_group = ~0;
3380 +diff --git a/fs/ext4/super.c b/fs/ext4/super.c
3381 +index 9ae5217..92943f2 100644
3382 +--- a/fs/ext4/super.c
3383 ++++ b/fs/ext4/super.c
3384 +@@ -704,6 +704,9 @@ static struct inode *ext4_alloc_inode(struct super_block *sb)
3385 + ei->i_allocated_meta_blocks = 0;
3386 + ei->i_delalloc_reserved_flag = 0;
3387 + spin_lock_init(&(ei->i_block_reservation_lock));
3388 ++#ifdef CONFIG_QUOTA
3389 ++ ei->i_reserved_quota = 0;
3390 ++#endif
3391 + INIT_LIST_HEAD(&ei->i_aio_dio_complete_list);
3392 + ei->cur_aio_dio = NULL;
3393 + ei->i_sync_tid = 0;
3394 +@@ -1001,7 +1004,9 @@ static const struct dquot_operations ext4_quota_operations = {
3395 + .reserve_space = dquot_reserve_space,
3396 + .claim_space = dquot_claim_space,
3397 + .release_rsv = dquot_release_reserved_space,
3398 ++#ifdef CONFIG_QUOTA
3399 + .get_reserved_space = ext4_get_reserved_space,
3400 ++#endif
3401 + .alloc_inode = dquot_alloc_inode,
3402 + .free_space = dquot_free_space,
3403 + .free_inode = dquot_free_inode,
3404 +diff --git a/fs/namei.c b/fs/namei.c
3405 +index d11f404..a2b3c28 100644
3406 +--- a/fs/namei.c
3407 ++++ b/fs/namei.c
3408 +@@ -234,6 +234,7 @@ int generic_permission(struct inode *inode, int mask,
3409 + /*
3410 + * Searching includes executable on directories, else just read.
3411 + */
3412 ++ mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
3413 + if (mask == MAY_READ || (S_ISDIR(inode->i_mode) && !(mask & MAY_WRITE)))
3414 + if (capable(CAP_DAC_READ_SEARCH))
3415 + return 0;
3416 +diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c
3417 +index 39b49c4..c4d07a8 100644
3418 +--- a/fs/quota/dquot.c
3419 ++++ b/fs/quota/dquot.c
3420 +@@ -1388,6 +1388,67 @@ void vfs_dq_drop(struct inode *inode)
3421 + EXPORT_SYMBOL(vfs_dq_drop);
3422 +
3423 + /*
3424 ++ * inode_reserved_space is managed internally by quota, and protected by
3425 ++ * i_lock similar to i_blocks+i_bytes.
3426 ++ */
3427 ++static qsize_t *inode_reserved_space(struct inode * inode)
3428 ++{
3429 ++ /* Filesystem must explicitly define it's own method in order to use
3430 ++ * quota reservation interface */
3431 ++ BUG_ON(!inode->i_sb->dq_op->get_reserved_space);
3432 ++ return inode->i_sb->dq_op->get_reserved_space(inode);
3433 ++}
3434 ++
3435 ++static void inode_add_rsv_space(struct inode *inode, qsize_t number)
3436 ++{
3437 ++ spin_lock(&inode->i_lock);
3438 ++ *inode_reserved_space(inode) += number;
3439 ++ spin_unlock(&inode->i_lock);
3440 ++}
3441 ++
3442 ++
3443 ++static void inode_claim_rsv_space(struct inode *inode, qsize_t number)
3444 ++{
3445 ++ spin_lock(&inode->i_lock);
3446 ++ *inode_reserved_space(inode) -= number;
3447 ++ __inode_add_bytes(inode, number);
3448 ++ spin_unlock(&inode->i_lock);
3449 ++}
3450 ++
3451 ++static void inode_sub_rsv_space(struct inode *inode, qsize_t number)
3452 ++{
3453 ++ spin_lock(&inode->i_lock);
3454 ++ *inode_reserved_space(inode) -= number;
3455 ++ spin_unlock(&inode->i_lock);
3456 ++}
3457 ++
3458 ++static qsize_t inode_get_rsv_space(struct inode *inode)
3459 ++{
3460 ++ qsize_t ret;
3461 ++ spin_lock(&inode->i_lock);
3462 ++ ret = *inode_reserved_space(inode);
3463 ++ spin_unlock(&inode->i_lock);
3464 ++ return ret;
3465 ++}
3466 ++
3467 ++static void inode_incr_space(struct inode *inode, qsize_t number,
3468 ++ int reserve)
3469 ++{
3470 ++ if (reserve)
3471 ++ inode_add_rsv_space(inode, number);
3472 ++ else
3473 ++ inode_add_bytes(inode, number);
3474 ++}
3475 ++
3476 ++static void inode_decr_space(struct inode *inode, qsize_t number, int reserve)
3477 ++{
3478 ++ if (reserve)
3479 ++ inode_sub_rsv_space(inode, number);
3480 ++ else
3481 ++ inode_sub_bytes(inode, number);
3482 ++}
3483 ++
3484 ++/*
3485 + * Following four functions update i_blocks+i_bytes fields and
3486 + * quota information (together with appropriate checks)
3487 + * NOTE: We absolutely rely on the fact that caller dirties
3488 +@@ -1405,6 +1466,21 @@ int __dquot_alloc_space(struct inode *inode, qsize_t number,
3489 + int cnt, ret = QUOTA_OK;
3490 + char warntype[MAXQUOTAS];
3491 +
3492 ++ /*
3493 ++ * First test before acquiring mutex - solves deadlocks when we
3494 ++ * re-enter the quota code and are already holding the mutex
3495 ++ */
3496 ++ if (IS_NOQUOTA(inode)) {
3497 ++ inode_incr_space(inode, number, reserve);
3498 ++ goto out;
3499 ++ }
3500 ++
3501 ++ down_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
3502 ++ if (IS_NOQUOTA(inode)) {
3503 ++ inode_incr_space(inode, number, reserve);
3504 ++ goto out_unlock;
3505 ++ }
3506 ++
3507 + for (cnt = 0; cnt < MAXQUOTAS; cnt++)
3508 + warntype[cnt] = QUOTA_NL_NOWARN;
3509 +
3510 +@@ -1415,7 +1491,8 @@ int __dquot_alloc_space(struct inode *inode, qsize_t number,
3511 + if (check_bdq(inode->i_dquot[cnt], number, warn, warntype+cnt)
3512 + == NO_QUOTA) {
3513 + ret = NO_QUOTA;
3514 +- goto out_unlock;
3515 ++ spin_unlock(&dq_data_lock);
3516 ++ goto out_flush_warn;
3517 + }
3518 + }
3519 + for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
3520 +@@ -1426,64 +1503,32 @@ int __dquot_alloc_space(struct inode *inode, qsize_t number,
3521 + else
3522 + dquot_incr_space(inode->i_dquot[cnt], number);
3523 + }
3524 +- if (!reserve)
3525 +- inode_add_bytes(inode, number);
3526 +-out_unlock:
3527 ++ inode_incr_space(inode, number, reserve);
3528 + spin_unlock(&dq_data_lock);
3529 +- flush_warnings(inode->i_dquot, warntype);
3530 +- return ret;
3531 +-}
3532 +-
3533 +-int dquot_alloc_space(struct inode *inode, qsize_t number, int warn)
3534 +-{
3535 +- int cnt, ret = QUOTA_OK;
3536 +-
3537 +- /*
3538 +- * First test before acquiring mutex - solves deadlocks when we
3539 +- * re-enter the quota code and are already holding the mutex
3540 +- */
3541 +- if (IS_NOQUOTA(inode)) {
3542 +- inode_add_bytes(inode, number);
3543 +- goto out;
3544 +- }
3545 +-
3546 +- down_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
3547 +- if (IS_NOQUOTA(inode)) {
3548 +- inode_add_bytes(inode, number);
3549 +- goto out_unlock;
3550 +- }
3551 +-
3552 +- ret = __dquot_alloc_space(inode, number, warn, 0);
3553 +- if (ret == NO_QUOTA)
3554 +- goto out_unlock;
3555 +
3556 ++ if (reserve)
3557 ++ goto out_flush_warn;
3558 + /* Dirtify all the dquots - this can block when journalling */
3559 + for (cnt = 0; cnt < MAXQUOTAS; cnt++)
3560 + if (inode->i_dquot[cnt])
3561 + mark_dquot_dirty(inode->i_dquot[cnt]);
3562 ++out_flush_warn:
3563 ++ flush_warnings(inode->i_dquot, warntype);
3564 + out_unlock:
3565 + up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
3566 + out:
3567 + return ret;
3568 + }
3569 ++
3570 ++int dquot_alloc_space(struct inode *inode, qsize_t number, int warn)
3571 ++{
3572 ++ return __dquot_alloc_space(inode, number, warn, 0);
3573 ++}
3574 + EXPORT_SYMBOL(dquot_alloc_space);
3575 +
3576 + int dquot_reserve_space(struct inode *inode, qsize_t number, int warn)
3577 + {
3578 +- int ret = QUOTA_OK;
3579 +-
3580 +- if (IS_NOQUOTA(inode))
3581 +- goto out;
3582 +-
3583 +- down_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
3584 +- if (IS_NOQUOTA(inode))
3585 +- goto out_unlock;
3586 +-
3587 +- ret = __dquot_alloc_space(inode, number, warn, 1);
3588 +-out_unlock:
3589 +- up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
3590 +-out:
3591 +- return ret;
3592 ++ return __dquot_alloc_space(inode, number, warn, 1);
3593 + }
3594 + EXPORT_SYMBOL(dquot_reserve_space);
3595 +
3596 +@@ -1540,14 +1585,14 @@ int dquot_claim_space(struct inode *inode, qsize_t number)
3597 + int ret = QUOTA_OK;
3598 +
3599 + if (IS_NOQUOTA(inode)) {
3600 +- inode_add_bytes(inode, number);
3601 ++ inode_claim_rsv_space(inode, number);
3602 + goto out;
3603 + }
3604 +
3605 + down_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
3606 + if (IS_NOQUOTA(inode)) {
3607 + up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
3608 +- inode_add_bytes(inode, number);
3609 ++ inode_claim_rsv_space(inode, number);
3610 + goto out;
3611 + }
3612 +
3613 +@@ -1559,7 +1604,7 @@ int dquot_claim_space(struct inode *inode, qsize_t number)
3614 + number);
3615 + }
3616 + /* Update inode bytes */
3617 +- inode_add_bytes(inode, number);
3618 ++ inode_claim_rsv_space(inode, number);
3619 + spin_unlock(&dq_data_lock);
3620 + /* Dirtify all the dquots - this can block when journalling */
3621 + for (cnt = 0; cnt < MAXQUOTAS; cnt++)
3622 +@@ -1572,38 +1617,9 @@ out:
3623 + EXPORT_SYMBOL(dquot_claim_space);
3624 +
3625 + /*
3626 +- * Release reserved quota space
3627 +- */
3628 +-void dquot_release_reserved_space(struct inode *inode, qsize_t number)
3629 +-{
3630 +- int cnt;
3631 +-
3632 +- if (IS_NOQUOTA(inode))
3633 +- goto out;
3634 +-
3635 +- down_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
3636 +- if (IS_NOQUOTA(inode))
3637 +- goto out_unlock;
3638 +-
3639 +- spin_lock(&dq_data_lock);
3640 +- /* Release reserved dquots */
3641 +- for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
3642 +- if (inode->i_dquot[cnt])
3643 +- dquot_free_reserved_space(inode->i_dquot[cnt], number);
3644 +- }
3645 +- spin_unlock(&dq_data_lock);
3646 +-
3647 +-out_unlock:
3648 +- up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
3649 +-out:
3650 +- return;
3651 +-}
3652 +-EXPORT_SYMBOL(dquot_release_reserved_space);
3653 +-
3654 +-/*
3655 + * This operation can block, but only after everything is updated
3656 + */
3657 +-int dquot_free_space(struct inode *inode, qsize_t number)
3658 ++int __dquot_free_space(struct inode *inode, qsize_t number, int reserve)
3659 + {
3660 + unsigned int cnt;
3661 + char warntype[MAXQUOTAS];
3662 +@@ -1612,7 +1628,7 @@ int dquot_free_space(struct inode *inode, qsize_t number)
3663 + * re-enter the quota code and are already holding the mutex */
3664 + if (IS_NOQUOTA(inode)) {
3665 + out_sub:
3666 +- inode_sub_bytes(inode, number);
3667 ++ inode_decr_space(inode, number, reserve);
3668 + return QUOTA_OK;
3669 + }
3670 +
3671 +@@ -1627,21 +1643,43 @@ out_sub:
3672 + if (!inode->i_dquot[cnt])
3673 + continue;
3674 + warntype[cnt] = info_bdq_free(inode->i_dquot[cnt], number);
3675 +- dquot_decr_space(inode->i_dquot[cnt], number);
3676 ++ if (reserve)
3677 ++ dquot_free_reserved_space(inode->i_dquot[cnt], number);
3678 ++ else
3679 ++ dquot_decr_space(inode->i_dquot[cnt], number);
3680 + }
3681 +- inode_sub_bytes(inode, number);
3682 ++ inode_decr_space(inode, number, reserve);
3683 + spin_unlock(&dq_data_lock);
3684 ++
3685 ++ if (reserve)
3686 ++ goto out_unlock;
3687 + /* Dirtify all the dquots - this can block when journalling */
3688 + for (cnt = 0; cnt < MAXQUOTAS; cnt++)
3689 + if (inode->i_dquot[cnt])
3690 + mark_dquot_dirty(inode->i_dquot[cnt]);
3691 ++out_unlock:
3692 + flush_warnings(inode->i_dquot, warntype);
3693 + up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
3694 + return QUOTA_OK;
3695 + }
3696 ++
3697 ++int dquot_free_space(struct inode *inode, qsize_t number)
3698 ++{
3699 ++ return __dquot_free_space(inode, number, 0);
3700 ++}
3701 + EXPORT_SYMBOL(dquot_free_space);
3702 +
3703 + /*
3704 ++ * Release reserved quota space
3705 ++ */
3706 ++void dquot_release_reserved_space(struct inode *inode, qsize_t number)
3707 ++{
3708 ++ __dquot_free_space(inode, number, 1);
3709 ++
3710 ++}
3711 ++EXPORT_SYMBOL(dquot_release_reserved_space);
3712 ++
3713 ++/*
3714 + * This operation can block, but only after everything is updated
3715 + */
3716 + int dquot_free_inode(const struct inode *inode, qsize_t number)
3717 +@@ -1679,19 +1717,6 @@ int dquot_free_inode(const struct inode *inode, qsize_t number)
3718 + EXPORT_SYMBOL(dquot_free_inode);
3719 +
3720 + /*
3721 +- * call back function, get reserved quota space from underlying fs
3722 +- */
3723 +-qsize_t dquot_get_reserved_space(struct inode *inode)
3724 +-{
3725 +- qsize_t reserved_space = 0;
3726 +-
3727 +- if (sb_any_quota_active(inode->i_sb) &&
3728 +- inode->i_sb->dq_op->get_reserved_space)
3729 +- reserved_space = inode->i_sb->dq_op->get_reserved_space(inode);
3730 +- return reserved_space;
3731 +-}
3732 +-
3733 +-/*
3734 + * Transfer the number of inode and blocks from one diskquota to an other.
3735 + *
3736 + * This operation can block, but only after everything is updated
3737 +@@ -1734,7 +1759,7 @@ int dquot_transfer(struct inode *inode, struct iattr *iattr)
3738 + }
3739 + spin_lock(&dq_data_lock);
3740 + cur_space = inode_get_bytes(inode);
3741 +- rsv_space = dquot_get_reserved_space(inode);
3742 ++ rsv_space = inode_get_rsv_space(inode);
3743 + space = cur_space + rsv_space;
3744 + /* Build the transfer_from list and check the limits */
3745 + for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
3746 +diff --git a/fs/stat.c b/fs/stat.c
3747 +index 075694e..c4ecd52 100644
3748 +--- a/fs/stat.c
3749 ++++ b/fs/stat.c
3750 +@@ -401,9 +401,9 @@ SYSCALL_DEFINE4(fstatat64, int, dfd, char __user *, filename,
3751 + }
3752 + #endif /* __ARCH_WANT_STAT64 */
3753 +
3754 +-void inode_add_bytes(struct inode *inode, loff_t bytes)
3755 ++/* Caller is here responsible for sufficient locking (ie. inode->i_lock) */
3756 ++void __inode_add_bytes(struct inode *inode, loff_t bytes)
3757 + {
3758 +- spin_lock(&inode->i_lock);
3759 + inode->i_blocks += bytes >> 9;
3760 + bytes &= 511;
3761 + inode->i_bytes += bytes;
3762 +@@ -411,6 +411,12 @@ void inode_add_bytes(struct inode *inode, loff_t bytes)
3763 + inode->i_blocks++;
3764 + inode->i_bytes -= 512;
3765 + }
3766 ++}
3767 ++
3768 ++void inode_add_bytes(struct inode *inode, loff_t bytes)
3769 ++{
3770 ++ spin_lock(&inode->i_lock);
3771 ++ __inode_add_bytes(inode, bytes);
3772 + spin_unlock(&inode->i_lock);
3773 + }
3774 +
3775 +diff --git a/fs/udf/super.c b/fs/udf/super.c
3776 +index 9d1b8c2..1e4543c 100644
3777 +--- a/fs/udf/super.c
3778 ++++ b/fs/udf/super.c
3779 +@@ -1078,21 +1078,39 @@ static int udf_fill_partdesc_info(struct super_block *sb,
3780 + return 0;
3781 + }
3782 +
3783 +-static int udf_load_vat(struct super_block *sb, int p_index, int type1_index)
3784 ++static void udf_find_vat_block(struct super_block *sb, int p_index,
3785 ++ int type1_index, sector_t start_block)
3786 + {
3787 + struct udf_sb_info *sbi = UDF_SB(sb);
3788 + struct udf_part_map *map = &sbi->s_partmaps[p_index];
3789 ++ sector_t vat_block;
3790 + struct kernel_lb_addr ino;
3791 ++
3792 ++ /*
3793 ++ * VAT file entry is in the last recorded block. Some broken disks have
3794 ++ * it a few blocks before so try a bit harder...
3795 ++ */
3796 ++ ino.partitionReferenceNum = type1_index;
3797 ++ for (vat_block = start_block;
3798 ++ vat_block >= map->s_partition_root &&
3799 ++ vat_block >= start_block - 3 &&
3800 ++ !sbi->s_vat_inode; vat_block--) {
3801 ++ ino.logicalBlockNum = vat_block - map->s_partition_root;
3802 ++ sbi->s_vat_inode = udf_iget(sb, &ino);
3803 ++ }
3804 ++}
3805 ++
3806 ++static int udf_load_vat(struct super_block *sb, int p_index, int type1_index)
3807 ++{
3808 ++ struct udf_sb_info *sbi = UDF_SB(sb);
3809 ++ struct udf_part_map *map = &sbi->s_partmaps[p_index];
3810 + struct buffer_head *bh = NULL;
3811 + struct udf_inode_info *vati;
3812 + uint32_t pos;
3813 + struct virtualAllocationTable20 *vat20;
3814 + sector_t blocks = sb->s_bdev->bd_inode->i_size >> sb->s_blocksize_bits;
3815 +
3816 +- /* VAT file entry is in the last recorded block */
3817 +- ino.partitionReferenceNum = type1_index;
3818 +- ino.logicalBlockNum = sbi->s_last_block - map->s_partition_root;
3819 +- sbi->s_vat_inode = udf_iget(sb, &ino);
3820 ++ udf_find_vat_block(sb, p_index, type1_index, sbi->s_last_block);
3821 + if (!sbi->s_vat_inode &&
3822 + sbi->s_last_block != blocks - 1) {
3823 + printk(KERN_NOTICE "UDF-fs: Failed to read VAT inode from the"
3824 +@@ -1100,9 +1118,7 @@ static int udf_load_vat(struct super_block *sb, int p_index, int type1_index)
3825 + "block of the device (%lu).\n",
3826 + (unsigned long)sbi->s_last_block,
3827 + (unsigned long)blocks - 1);
3828 +- ino.partitionReferenceNum = type1_index;
3829 +- ino.logicalBlockNum = blocks - 1 - map->s_partition_root;
3830 +- sbi->s_vat_inode = udf_iget(sb, &ino);
3831 ++ udf_find_vat_block(sb, p_index, type1_index, blocks - 1);
3832 + }
3833 + if (!sbi->s_vat_inode)
3834 + return 1;
3835 +diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h
3836 +index 789cf5f..d77b547 100644
3837 +--- a/include/linux/cpumask.h
3838 ++++ b/include/linux/cpumask.h
3839 +@@ -84,6 +84,7 @@ extern const struct cpumask *const cpu_active_mask;
3840 + #define num_online_cpus() cpumask_weight(cpu_online_mask)
3841 + #define num_possible_cpus() cpumask_weight(cpu_possible_mask)
3842 + #define num_present_cpus() cpumask_weight(cpu_present_mask)
3843 ++#define num_active_cpus() cpumask_weight(cpu_active_mask)
3844 + #define cpu_online(cpu) cpumask_test_cpu((cpu), cpu_online_mask)
3845 + #define cpu_possible(cpu) cpumask_test_cpu((cpu), cpu_possible_mask)
3846 + #define cpu_present(cpu) cpumask_test_cpu((cpu), cpu_present_mask)
3847 +@@ -92,6 +93,7 @@ extern const struct cpumask *const cpu_active_mask;
3848 + #define num_online_cpus() 1
3849 + #define num_possible_cpus() 1
3850 + #define num_present_cpus() 1
3851 ++#define num_active_cpus() 1
3852 + #define cpu_online(cpu) ((cpu) == 0)
3853 + #define cpu_possible(cpu) ((cpu) == 0)
3854 + #define cpu_present(cpu) ((cpu) == 0)
3855 +diff --git a/include/linux/fs.h b/include/linux/fs.h
3856 +index 2620a8c..98ea200 100644
3857 +--- a/include/linux/fs.h
3858 ++++ b/include/linux/fs.h
3859 +@@ -2314,6 +2314,7 @@ extern const struct inode_operations page_symlink_inode_operations;
3860 + extern int generic_readlink(struct dentry *, char __user *, int);
3861 + extern void generic_fillattr(struct inode *, struct kstat *);
3862 + extern int vfs_getattr(struct vfsmount *, struct dentry *, struct kstat *);
3863 ++void __inode_add_bytes(struct inode *inode, loff_t bytes);
3864 + void inode_add_bytes(struct inode *inode, loff_t bytes);
3865 + void inode_sub_bytes(struct inode *inode, loff_t bytes);
3866 + loff_t inode_get_bytes(struct inode *inode);
3867 +diff --git a/include/linux/quota.h b/include/linux/quota.h
3868 +index 78c4889..8fd8efc 100644
3869 +--- a/include/linux/quota.h
3870 ++++ b/include/linux/quota.h
3871 +@@ -313,8 +313,9 @@ struct dquot_operations {
3872 + int (*claim_space) (struct inode *, qsize_t);
3873 + /* release rsved quota for delayed alloc */
3874 + void (*release_rsv) (struct inode *, qsize_t);
3875 +- /* get reserved quota for delayed alloc */
3876 +- qsize_t (*get_reserved_space) (struct inode *);
3877 ++ /* get reserved quota for delayed alloc, value returned is managed by
3878 ++ * quota code only */
3879 ++ qsize_t *(*get_reserved_space) (struct inode *);
3880 + };
3881 +
3882 + /* Operations handling requests from userspace */
3883 +diff --git a/include/linux/security.h b/include/linux/security.h
3884 +index 239e40d..d40d23f 100644
3885 +--- a/include/linux/security.h
3886 ++++ b/include/linux/security.h
3887 +@@ -95,8 +95,13 @@ struct seq_file;
3888 + extern int cap_netlink_send(struct sock *sk, struct sk_buff *skb);
3889 + extern int cap_netlink_recv(struct sk_buff *skb, int cap);
3890 +
3891 ++#ifdef CONFIG_MMU
3892 + extern unsigned long mmap_min_addr;
3893 + extern unsigned long dac_mmap_min_addr;
3894 ++#else
3895 ++#define dac_mmap_min_addr 0UL
3896 ++#endif
3897 ++
3898 + /*
3899 + * Values used in the task_security_ops calls
3900 + */
3901 +@@ -121,6 +126,7 @@ struct request_sock;
3902 + #define LSM_UNSAFE_PTRACE 2
3903 + #define LSM_UNSAFE_PTRACE_CAP 4
3904 +
3905 ++#ifdef CONFIG_MMU
3906 + /*
3907 + * If a hint addr is less than mmap_min_addr change hint to be as
3908 + * low as possible but still greater than mmap_min_addr
3909 +@@ -135,6 +141,7 @@ static inline unsigned long round_hint_to_min(unsigned long hint)
3910 + }
3911 + extern int mmap_min_addr_handler(struct ctl_table *table, int write,
3912 + void __user *buffer, size_t *lenp, loff_t *ppos);
3913 ++#endif
3914 +
3915 + #ifdef CONFIG_SECURITY
3916 +
3917 +diff --git a/include/net/ip.h b/include/net/ip.h
3918 +index 2f47e54..69db943 100644
3919 +--- a/include/net/ip.h
3920 ++++ b/include/net/ip.h
3921 +@@ -342,6 +342,7 @@ enum ip_defrag_users
3922 + IP_DEFRAG_CALL_RA_CHAIN,
3923 + IP_DEFRAG_CONNTRACK_IN,
3924 + IP_DEFRAG_CONNTRACK_OUT,
3925 ++ IP_DEFRAG_CONNTRACK_BRIDGE_IN,
3926 + IP_DEFRAG_VS_IN,
3927 + IP_DEFRAG_VS_OUT,
3928 + IP_DEFRAG_VS_FWD
3929 +diff --git a/include/net/ipv6.h b/include/net/ipv6.h
3930 +index 8c31d8a..639bbf0 100644
3931 +--- a/include/net/ipv6.h
3932 ++++ b/include/net/ipv6.h
3933 +@@ -354,8 +354,16 @@ static inline int ipv6_prefix_equal(const struct in6_addr *a1,
3934 +
3935 + struct inet_frag_queue;
3936 +
3937 ++enum ip6_defrag_users {
3938 ++ IP6_DEFRAG_LOCAL_DELIVER,
3939 ++ IP6_DEFRAG_CONNTRACK_IN,
3940 ++ IP6_DEFRAG_CONNTRACK_OUT,
3941 ++ IP6_DEFRAG_CONNTRACK_BRIDGE_IN,
3942 ++};
3943 ++
3944 + struct ip6_create_arg {
3945 + __be32 id;
3946 ++ u32 user;
3947 + struct in6_addr *src;
3948 + struct in6_addr *dst;
3949 + };
3950 +diff --git a/include/net/netfilter/ipv6/nf_conntrack_ipv6.h b/include/net/netfilter/ipv6/nf_conntrack_ipv6.h
3951 +index abc55ad..1ee717e 100644
3952 +--- a/include/net/netfilter/ipv6/nf_conntrack_ipv6.h
3953 ++++ b/include/net/netfilter/ipv6/nf_conntrack_ipv6.h
3954 +@@ -9,7 +9,7 @@ extern struct nf_conntrack_l4proto nf_conntrack_l4proto_icmpv6;
3955 +
3956 + extern int nf_ct_frag6_init(void);
3957 + extern void nf_ct_frag6_cleanup(void);
3958 +-extern struct sk_buff *nf_ct_frag6_gather(struct sk_buff *skb);
3959 ++extern struct sk_buff *nf_ct_frag6_gather(struct sk_buff *skb, u32 user);
3960 + extern void nf_ct_frag6_output(unsigned int hooknum, struct sk_buff *skb,
3961 + struct net_device *in,
3962 + struct net_device *out,
3963 +diff --git a/kernel/cpu.c b/kernel/cpu.c
3964 +index 6ba0f1e..b216886 100644
3965 +--- a/kernel/cpu.c
3966 ++++ b/kernel/cpu.c
3967 +@@ -212,6 +212,8 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)
3968 + err = __raw_notifier_call_chain(&cpu_chain, CPU_DOWN_PREPARE | mod,
3969 + hcpu, -1, &nr_calls);
3970 + if (err == NOTIFY_BAD) {
3971 ++ set_cpu_active(cpu, true);
3972 ++
3973 + nr_calls--;
3974 + __raw_notifier_call_chain(&cpu_chain, CPU_DOWN_FAILED | mod,
3975 + hcpu, nr_calls, NULL);
3976 +@@ -223,11 +225,11 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)
3977 +
3978 + /* Ensure that we are not runnable on dying cpu */
3979 + cpumask_copy(old_allowed, &current->cpus_allowed);
3980 +- set_cpus_allowed_ptr(current,
3981 +- cpumask_of(cpumask_any_but(cpu_online_mask, cpu)));
3982 ++ set_cpus_allowed_ptr(current, cpu_active_mask);
3983 +
3984 + err = __stop_machine(take_cpu_down, &tcd_param, cpumask_of(cpu));
3985 + if (err) {
3986 ++ set_cpu_active(cpu, true);
3987 + /* CPU didn't die: tell everyone. Can't complain. */
3988 + if (raw_notifier_call_chain(&cpu_chain, CPU_DOWN_FAILED | mod,
3989 + hcpu) == NOTIFY_BAD)
3990 +@@ -292,9 +294,6 @@ int __ref cpu_down(unsigned int cpu)
3991 +
3992 + err = _cpu_down(cpu, 0);
3993 +
3994 +- if (cpu_online(cpu))
3995 +- set_cpu_active(cpu, true);
3996 +-
3997 + out:
3998 + cpu_maps_update_done();
3999 + stop_machine_destroy();
4000 +@@ -387,6 +386,15 @@ int disable_nonboot_cpus(void)
4001 + * with the userspace trying to use the CPU hotplug at the same time
4002 + */
4003 + cpumask_clear(frozen_cpus);
4004 ++
4005 ++ for_each_online_cpu(cpu) {
4006 ++ if (cpu == first_cpu)
4007 ++ continue;
4008 ++ set_cpu_active(cpu, false);
4009 ++ }
4010 ++
4011 ++ synchronize_sched();
4012 ++
4013 + printk("Disabling non-boot CPUs ...\n");
4014 + for_each_online_cpu(cpu) {
4015 + if (cpu == first_cpu)
4016 +diff --git a/kernel/cpuset.c b/kernel/cpuset.c
4017 +index b5cb469..39e5121 100644
4018 +--- a/kernel/cpuset.c
4019 ++++ b/kernel/cpuset.c
4020 +@@ -873,7 +873,7 @@ static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs,
4021 + if (retval < 0)
4022 + return retval;
4023 +
4024 +- if (!cpumask_subset(trialcs->cpus_allowed, cpu_online_mask))
4025 ++ if (!cpumask_subset(trialcs->cpus_allowed, cpu_active_mask))
4026 + return -EINVAL;
4027 + }
4028 + retval = validate_change(cs, trialcs);
4029 +@@ -2011,7 +2011,7 @@ static void scan_for_empty_cpusets(struct cpuset *root)
4030 + }
4031 +
4032 + /* Continue past cpusets with all cpus, mems online */
4033 +- if (cpumask_subset(cp->cpus_allowed, cpu_online_mask) &&
4034 ++ if (cpumask_subset(cp->cpus_allowed, cpu_active_mask) &&
4035 + nodes_subset(cp->mems_allowed, node_states[N_HIGH_MEMORY]))
4036 + continue;
4037 +
4038 +@@ -2020,7 +2020,7 @@ static void scan_for_empty_cpusets(struct cpuset *root)
4039 + /* Remove offline cpus and mems from this cpuset. */
4040 + mutex_lock(&callback_mutex);
4041 + cpumask_and(cp->cpus_allowed, cp->cpus_allowed,
4042 +- cpu_online_mask);
4043 ++ cpu_active_mask);
4044 + nodes_and(cp->mems_allowed, cp->mems_allowed,
4045 + node_states[N_HIGH_MEMORY]);
4046 + mutex_unlock(&callback_mutex);
4047 +@@ -2058,8 +2058,10 @@ static int cpuset_track_online_cpus(struct notifier_block *unused_nb,
4048 + switch (phase) {
4049 + case CPU_ONLINE:
4050 + case CPU_ONLINE_FROZEN:
4051 +- case CPU_DEAD:
4052 +- case CPU_DEAD_FROZEN:
4053 ++ case CPU_DOWN_PREPARE:
4054 ++ case CPU_DOWN_PREPARE_FROZEN:
4055 ++ case CPU_DOWN_FAILED:
4056 ++ case CPU_DOWN_FAILED_FROZEN:
4057 + break;
4058 +
4059 + default:
4060 +@@ -2068,7 +2070,7 @@ static int cpuset_track_online_cpus(struct notifier_block *unused_nb,
4061 +
4062 + cgroup_lock();
4063 + mutex_lock(&callback_mutex);
4064 +- cpumask_copy(top_cpuset.cpus_allowed, cpu_online_mask);
4065 ++ cpumask_copy(top_cpuset.cpus_allowed, cpu_active_mask);
4066 + mutex_unlock(&callback_mutex);
4067 + scan_for_empty_cpusets(&top_cpuset);
4068 + ndoms = generate_sched_domains(&doms, &attr);
4069 +@@ -2115,7 +2117,7 @@ static int cpuset_track_online_nodes(struct notifier_block *self,
4070 +
4071 + void __init cpuset_init_smp(void)
4072 + {
4073 +- cpumask_copy(top_cpuset.cpus_allowed, cpu_online_mask);
4074 ++ cpumask_copy(top_cpuset.cpus_allowed, cpu_active_mask);
4075 + top_cpuset.mems_allowed = node_states[N_HIGH_MEMORY];
4076 +
4077 + hotcpu_notifier(cpuset_track_online_cpus, 0);
4078 +diff --git a/kernel/sched.c b/kernel/sched.c
4079 +index d079a9f..dd0dccd 100644
4080 +--- a/kernel/sched.c
4081 ++++ b/kernel/sched.c
4082 +@@ -2036,6 +2036,9 @@ task_hot(struct task_struct *p, u64 now, struct sched_domain *sd)
4083 + {
4084 + s64 delta;
4085 +
4086 ++ if (p->sched_class != &fair_sched_class)
4087 ++ return 0;
4088 ++
4089 + /*
4090 + * Buddy candidates are cache hot:
4091 + */
4092 +@@ -2044,9 +2047,6 @@ task_hot(struct task_struct *p, u64 now, struct sched_domain *sd)
4093 + &p->se == cfs_rq_of(&p->se)->last))
4094 + return 1;
4095 +
4096 +- if (p->sched_class != &fair_sched_class)
4097 +- return 0;
4098 +-
4099 + if (sysctl_sched_migration_cost == -1)
4100 + return 1;
4101 + if (sysctl_sched_migration_cost == 0)
4102 +@@ -4139,7 +4139,7 @@ static int load_balance(int this_cpu, struct rq *this_rq,
4103 + unsigned long flags;
4104 + struct cpumask *cpus = __get_cpu_var(load_balance_tmpmask);
4105 +
4106 +- cpumask_copy(cpus, cpu_online_mask);
4107 ++ cpumask_copy(cpus, cpu_active_mask);
4108 +
4109 + /*
4110 + * When power savings policy is enabled for the parent domain, idle
4111 +@@ -4302,7 +4302,7 @@ load_balance_newidle(int this_cpu, struct rq *this_rq, struct sched_domain *sd)
4112 + int all_pinned = 0;
4113 + struct cpumask *cpus = __get_cpu_var(load_balance_tmpmask);
4114 +
4115 +- cpumask_copy(cpus, cpu_online_mask);
4116 ++ cpumask_copy(cpus, cpu_active_mask);
4117 +
4118 + /*
4119 + * When power savings policy is enabled for the parent domain, idle
4120 +@@ -4699,7 +4699,7 @@ int select_nohz_load_balancer(int stop_tick)
4121 + cpumask_set_cpu(cpu, nohz.cpu_mask);
4122 +
4123 + /* time for ilb owner also to sleep */
4124 +- if (cpumask_weight(nohz.cpu_mask) == num_online_cpus()) {
4125 ++ if (cpumask_weight(nohz.cpu_mask) == num_active_cpus()) {
4126 + if (atomic_read(&nohz.load_balancer) == cpu)
4127 + atomic_set(&nohz.load_balancer, -1);
4128 + return 0;
4129 +@@ -7075,7 +7075,7 @@ int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
4130 + int ret = 0;
4131 +
4132 + rq = task_rq_lock(p, &flags);
4133 +- if (!cpumask_intersects(new_mask, cpu_online_mask)) {
4134 ++ if (!cpumask_intersects(new_mask, cpu_active_mask)) {
4135 + ret = -EINVAL;
4136 + goto out;
4137 + }
4138 +@@ -7097,7 +7097,7 @@ int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
4139 + if (cpumask_test_cpu(task_cpu(p), new_mask))
4140 + goto out;
4141 +
4142 +- if (migrate_task(p, cpumask_any_and(cpu_online_mask, new_mask), &req)) {
4143 ++ if (migrate_task(p, cpumask_any_and(cpu_active_mask, new_mask), &req)) {
4144 + /* Need help from migration thread: drop lock and wait. */
4145 + struct task_struct *mt = rq->migration_thread;
4146 +
4147 +@@ -7251,19 +7251,19 @@ static void move_task_off_dead_cpu(int dead_cpu, struct task_struct *p)
4148 +
4149 + again:
4150 + /* Look for allowed, online CPU in same node. */
4151 +- for_each_cpu_and(dest_cpu, nodemask, cpu_online_mask)
4152 ++ for_each_cpu_and(dest_cpu, nodemask, cpu_active_mask)
4153 + if (cpumask_test_cpu(dest_cpu, &p->cpus_allowed))
4154 + goto move;
4155 +
4156 + /* Any allowed, online CPU? */
4157 +- dest_cpu = cpumask_any_and(&p->cpus_allowed, cpu_online_mask);
4158 ++ dest_cpu = cpumask_any_and(&p->cpus_allowed, cpu_active_mask);
4159 + if (dest_cpu < nr_cpu_ids)
4160 + goto move;
4161 +
4162 + /* No more Mr. Nice Guy. */
4163 + if (dest_cpu >= nr_cpu_ids) {
4164 + cpuset_cpus_allowed_locked(p, &p->cpus_allowed);
4165 +- dest_cpu = cpumask_any_and(cpu_online_mask, &p->cpus_allowed);
4166 ++ dest_cpu = cpumask_any_and(cpu_active_mask, &p->cpus_allowed);
4167 +
4168 + /*
4169 + * Don't tell them about moving exiting tasks or
4170 +@@ -7292,7 +7292,7 @@ move:
4171 + */
4172 + static void migrate_nr_uninterruptible(struct rq *rq_src)
4173 + {
4174 +- struct rq *rq_dest = cpu_rq(cpumask_any(cpu_online_mask));
4175 ++ struct rq *rq_dest = cpu_rq(cpumask_any(cpu_active_mask));
4176 + unsigned long flags;
4177 +
4178 + local_irq_save(flags);
4179 +@@ -7546,7 +7546,7 @@ static ctl_table *sd_alloc_ctl_cpu_table(int cpu)
4180 + static struct ctl_table_header *sd_sysctl_header;
4181 + static void register_sched_domain_sysctl(void)
4182 + {
4183 +- int i, cpu_num = num_online_cpus();
4184 ++ int i, cpu_num = num_possible_cpus();
4185 + struct ctl_table *entry = sd_alloc_ctl_entry(cpu_num + 1);
4186 + char buf[32];
4187 +
4188 +@@ -7556,7 +7556,7 @@ static void register_sched_domain_sysctl(void)
4189 + if (entry == NULL)
4190 + return;
4191 +
4192 +- for_each_online_cpu(i) {
4193 ++ for_each_possible_cpu(i) {
4194 + snprintf(buf, 32, "cpu%d", i);
4195 + entry->procname = kstrdup(buf, GFP_KERNEL);
4196 + entry->mode = 0555;
4197 +@@ -7925,6 +7925,8 @@ sd_parent_degenerate(struct sched_domain *sd, struct sched_domain *parent)
4198 +
4199 + static void free_rootdomain(struct root_domain *rd)
4200 + {
4201 ++ synchronize_sched();
4202 ++
4203 + cpupri_cleanup(&rd->cpupri);
4204 +
4205 + free_cpumask_var(rd->rto_mask);
4206 +@@ -9042,7 +9044,7 @@ match1:
4207 + if (doms_new == NULL) {
4208 + ndoms_cur = 0;
4209 + doms_new = fallback_doms;
4210 +- cpumask_andnot(&doms_new[0], cpu_online_mask, cpu_isolated_map);
4211 ++ cpumask_andnot(&doms_new[0], cpu_active_mask, cpu_isolated_map);
4212 + WARN_ON_ONCE(dattr_new);
4213 + }
4214 +
4215 +@@ -9173,8 +9175,10 @@ static int update_sched_domains(struct notifier_block *nfb,
4216 + switch (action) {
4217 + case CPU_ONLINE:
4218 + case CPU_ONLINE_FROZEN:
4219 +- case CPU_DEAD:
4220 +- case CPU_DEAD_FROZEN:
4221 ++ case CPU_DOWN_PREPARE:
4222 ++ case CPU_DOWN_PREPARE_FROZEN:
4223 ++ case CPU_DOWN_FAILED:
4224 ++ case CPU_DOWN_FAILED_FROZEN:
4225 + partition_sched_domains(1, NULL, NULL);
4226 + return NOTIFY_OK;
4227 +
4228 +@@ -9221,7 +9225,7 @@ void __init sched_init_smp(void)
4229 + #endif
4230 + get_online_cpus();
4231 + mutex_lock(&sched_domains_mutex);
4232 +- arch_init_sched_domains(cpu_online_mask);
4233 ++ arch_init_sched_domains(cpu_active_mask);
4234 + cpumask_andnot(non_isolated_cpus, cpu_possible_mask, cpu_isolated_map);
4235 + if (cpumask_empty(non_isolated_cpus))
4236 + cpumask_set_cpu(smp_processor_id(), non_isolated_cpus);
4237 +diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
4238 +index 5488a5d..199228b 100644
4239 +--- a/kernel/sched_fair.c
4240 ++++ b/kernel/sched_fair.c
4241 +@@ -1374,6 +1374,9 @@ static int select_task_rq_fair(struct task_struct *p, int sd_flag, int wake_flag
4242 +
4243 + rcu_read_lock();
4244 + for_each_domain(cpu, tmp) {
4245 ++ if (!(tmp->flags & SD_LOAD_BALANCE))
4246 ++ continue;
4247 ++
4248 + /*
4249 + * If power savings logic is enabled for a domain, see if we
4250 + * are not overloaded, if so, don't balance wider.
4251 +diff --git a/kernel/sysctl.c b/kernel/sysctl.c
4252 +index 0d949c5..dd84be9 100644
4253 +--- a/kernel/sysctl.c
4254 ++++ b/kernel/sysctl.c
4255 +@@ -1200,6 +1200,7 @@ static struct ctl_table vm_table[] = {
4256 + .extra2 = (void *)&hugetlb_infinity,
4257 + },
4258 + #endif
4259 ++#ifdef CONFIG_MMU
4260 + {
4261 + .ctl_name = VM_LOWMEM_RESERVE_RATIO,
4262 + .procname = "lowmem_reserve_ratio",
4263 +@@ -1353,6 +1354,7 @@ static struct ctl_table vm_table[] = {
4264 + .mode = 0644,
4265 + .proc_handler = &mmap_min_addr_handler,
4266 + },
4267 ++#endif
4268 + #ifdef CONFIG_NUMA
4269 + {
4270 + .ctl_name = CTL_UNNUMBERED,
4271 +@@ -1605,7 +1607,8 @@ static struct ctl_table debug_table[] = {
4272 + .data = &show_unhandled_signals,
4273 + .maxlen = sizeof(int),
4274 + .mode = 0644,
4275 +- .proc_handler = proc_dointvec
4276 ++ .proc_handler = proc_dointvec_minmax,
4277 ++ .extra1 = &zero,
4278 + },
4279 + #endif
4280 + { .ctl_name = 0 }
4281 +diff --git a/kernel/time/clockevents.c b/kernel/time/clockevents.c
4282 +index 620b58a..9484be4 100644
4283 +--- a/kernel/time/clockevents.c
4284 ++++ b/kernel/time/clockevents.c
4285 +@@ -237,8 +237,9 @@ void clockevents_exchange_device(struct clock_event_device *old,
4286 + */
4287 + void clockevents_notify(unsigned long reason, void *arg)
4288 + {
4289 +- struct list_head *node, *tmp;
4290 ++ struct clock_event_device *dev, *tmp;
4291 + unsigned long flags;
4292 ++ int cpu;
4293 +
4294 + spin_lock_irqsave(&clockevents_lock, flags);
4295 + clockevents_do_notify(reason, arg);
4296 +@@ -249,8 +250,19 @@ void clockevents_notify(unsigned long reason, void *arg)
4297 + * Unregister the clock event devices which were
4298 + * released from the users in the notify chain.
4299 + */
4300 +- list_for_each_safe(node, tmp, &clockevents_released)
4301 +- list_del(node);
4302 ++ list_for_each_entry_safe(dev, tmp, &clockevents_released, list)
4303 ++ list_del(&dev->list);
4304 ++ /*
4305 ++ * Now check whether the CPU has left unused per cpu devices
4306 ++ */
4307 ++ cpu = *((int *)arg);
4308 ++ list_for_each_entry_safe(dev, tmp, &clockevent_devices, list) {
4309 ++ if (cpumask_test_cpu(cpu, dev->cpumask) &&
4310 ++ cpumask_weight(dev->cpumask) == 1) {
4311 ++ BUG_ON(dev->mode != CLOCK_EVT_MODE_UNUSED);
4312 ++ list_del(&dev->list);
4313 ++ }
4314 ++ }
4315 + break;
4316 + default:
4317 + break;
4318 +diff --git a/lib/dma-debug.c b/lib/dma-debug.c
4319 +index ce6b7ea..5a77c7c 100644
4320 +--- a/lib/dma-debug.c
4321 ++++ b/lib/dma-debug.c
4322 +@@ -670,12 +670,13 @@ static int device_dma_allocations(struct device *dev)
4323 + return count;
4324 + }
4325 +
4326 +-static int dma_debug_device_change(struct notifier_block *nb,
4327 +- unsigned long action, void *data)
4328 ++static int dma_debug_device_change(struct notifier_block *nb, unsigned long action, void *data)
4329 + {
4330 + struct device *dev = data;
4331 + int count;
4332 +
4333 ++ if (global_disable)
4334 ++ return 0;
4335 +
4336 + switch (action) {
4337 + case BUS_NOTIFY_UNBOUND_DRIVER:
4338 +@@ -697,6 +698,9 @@ void dma_debug_add_bus(struct bus_type *bus)
4339 + {
4340 + struct notifier_block *nb;
4341 +
4342 ++ if (global_disable)
4343 ++ return;
4344 ++
4345 + nb = kzalloc(sizeof(struct notifier_block), GFP_KERNEL);
4346 + if (nb == NULL) {
4347 + pr_err("dma_debug_add_bus: out of memory\n");
4348 +diff --git a/mm/Kconfig b/mm/Kconfig
4349 +index 44cf6f0..2c19c0b 100644
4350 +--- a/mm/Kconfig
4351 ++++ b/mm/Kconfig
4352 +@@ -227,6 +227,7 @@ config KSM
4353 +
4354 + config DEFAULT_MMAP_MIN_ADDR
4355 + int "Low address space to protect from user allocation"
4356 ++ depends on MMU
4357 + default 4096
4358 + help
4359 + This is the portion of low virtual memory which should be protected
4360 +diff --git a/mm/internal.h b/mm/internal.h
4361 +index 22ec8d2..17bc0df 100644
4362 +--- a/mm/internal.h
4363 ++++ b/mm/internal.h
4364 +@@ -107,9 +107,10 @@ static inline int is_mlocked_vma(struct vm_area_struct *vma, struct page *page)
4365 + }
4366 +
4367 + /*
4368 +- * must be called with vma's mmap_sem held for read, and page locked.
4369 ++ * must be called with vma's mmap_sem held for read or write, and page locked.
4370 + */
4371 + extern void mlock_vma_page(struct page *page);
4372 ++extern void munlock_vma_page(struct page *page);
4373 +
4374 + /*
4375 + * Clear the page's PageMlocked(). This can be useful in a situation where
4376 +diff --git a/mm/ksm.c b/mm/ksm.c
4377 +index 5575f86..e9501f8 100644
4378 +--- a/mm/ksm.c
4379 ++++ b/mm/ksm.c
4380 +@@ -34,6 +34,7 @@
4381 + #include <linux/ksm.h>
4382 +
4383 + #include <asm/tlbflush.h>
4384 ++#include "internal.h"
4385 +
4386 + /*
4387 + * A few notes about the KSM scanning process,
4388 +@@ -767,15 +768,14 @@ static int try_to_merge_one_page(struct vm_area_struct *vma,
4389 + * ptes are necessarily already write-protected. But in either
4390 + * case, we need to lock and check page_count is not raised.
4391 + */
4392 +- if (write_protect_page(vma, oldpage, &orig_pte)) {
4393 +- unlock_page(oldpage);
4394 +- goto out_putpage;
4395 +- }
4396 +- unlock_page(oldpage);
4397 +-
4398 +- if (pages_identical(oldpage, newpage))
4399 ++ if (write_protect_page(vma, oldpage, &orig_pte) == 0 &&
4400 ++ pages_identical(oldpage, newpage))
4401 + err = replace_page(vma, oldpage, newpage, orig_pte);
4402 +
4403 ++ if ((vma->vm_flags & VM_LOCKED) && !err)
4404 ++ munlock_vma_page(oldpage);
4405 ++
4406 ++ unlock_page(oldpage);
4407 + out_putpage:
4408 + put_page(oldpage);
4409 + put_page(newpage);
4410 +diff --git a/mm/memcontrol.c b/mm/memcontrol.c
4411 +index 6314015..5dc1037 100644
4412 +--- a/mm/memcontrol.c
4413 ++++ b/mm/memcontrol.c
4414 +@@ -758,7 +758,13 @@ int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *mem)
4415 + task_unlock(task);
4416 + if (!curr)
4417 + return 0;
4418 +- if (curr->use_hierarchy)
4419 ++ /*
4420 ++ * We should check use_hierarchy of "mem" not "curr". Because checking
4421 ++ * use_hierarchy of "curr" here make this function true if hierarchy is
4422 ++ * enabled in "curr" and "curr" is a child of "mem" in *cgroup*
4423 ++ * hierarchy(even if use_hierarchy is disabled in "mem").
4424 ++ */
4425 ++ if (mem->use_hierarchy)
4426 + ret = css_is_ancestor(&curr->css, &mem->css);
4427 + else
4428 + ret = (curr == mem);
4429 +diff --git a/mm/mlock.c b/mm/mlock.c
4430 +index bd6f0e4..2e05c97 100644
4431 +--- a/mm/mlock.c
4432 ++++ b/mm/mlock.c
4433 +@@ -99,14 +99,14 @@ void mlock_vma_page(struct page *page)
4434 + * not get another chance to clear PageMlocked. If we successfully
4435 + * isolate the page and try_to_munlock() detects other VM_LOCKED vmas
4436 + * mapping the page, it will restore the PageMlocked state, unless the page
4437 +- * is mapped in a non-linear vma. So, we go ahead and SetPageMlocked(),
4438 ++ * is mapped in a non-linear vma. So, we go ahead and ClearPageMlocked(),
4439 + * perhaps redundantly.
4440 + * If we lose the isolation race, and the page is mapped by other VM_LOCKED
4441 + * vmas, we'll detect this in vmscan--via try_to_munlock() or try_to_unmap()
4442 + * either of which will restore the PageMlocked state by calling
4443 + * mlock_vma_page() above, if it can grab the vma's mmap sem.
4444 + */
4445 +-static void munlock_vma_page(struct page *page)
4446 ++void munlock_vma_page(struct page *page)
4447 + {
4448 + BUG_ON(!PageLocked(page));
4449 +
4450 +diff --git a/mm/oom_kill.c b/mm/oom_kill.c
4451 +index ea2147d..9092b43 100644
4452 +--- a/mm/oom_kill.c
4453 ++++ b/mm/oom_kill.c
4454 +@@ -404,7 +404,7 @@ static int oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order,
4455 + cpuset_print_task_mems_allowed(current);
4456 + task_unlock(current);
4457 + dump_stack();
4458 +- mem_cgroup_print_oom_info(mem, current);
4459 ++ mem_cgroup_print_oom_info(mem, p);
4460 + show_mem();
4461 + if (sysctl_oom_dump_tasks)
4462 + dump_tasks(mem);
4463 +diff --git a/mm/vmscan.c b/mm/vmscan.c
4464 +index 777af57..692807f 100644
4465 +--- a/mm/vmscan.c
4466 ++++ b/mm/vmscan.c
4467 +@@ -1464,20 +1464,26 @@ static int inactive_file_is_low(struct zone *zone, struct scan_control *sc)
4468 + return low;
4469 + }
4470 +
4471 ++static int inactive_list_is_low(struct zone *zone, struct scan_control *sc,
4472 ++ int file)
4473 ++{
4474 ++ if (file)
4475 ++ return inactive_file_is_low(zone, sc);
4476 ++ else
4477 ++ return inactive_anon_is_low(zone, sc);
4478 ++}
4479 ++
4480 + static unsigned long shrink_list(enum lru_list lru, unsigned long nr_to_scan,
4481 + struct zone *zone, struct scan_control *sc, int priority)
4482 + {
4483 + int file = is_file_lru(lru);
4484 +
4485 +- if (lru == LRU_ACTIVE_FILE && inactive_file_is_low(zone, sc)) {
4486 +- shrink_active_list(nr_to_scan, zone, sc, priority, file);
4487 ++ if (is_active_lru(lru)) {
4488 ++ if (inactive_list_is_low(zone, sc, file))
4489 ++ shrink_active_list(nr_to_scan, zone, sc, priority, file);
4490 + return 0;
4491 + }
4492 +
4493 +- if (lru == LRU_ACTIVE_ANON && inactive_anon_is_low(zone, sc)) {
4494 +- shrink_active_list(nr_to_scan, zone, sc, priority, file);
4495 +- return 0;
4496 +- }
4497 + return shrink_inactive_list(nr_to_scan, zone, sc, priority, file);
4498 + }
4499 +
4500 +diff --git a/net/ipv4/netfilter/nf_defrag_ipv4.c b/net/ipv4/netfilter/nf_defrag_ipv4.c
4501 +index fa2d6b6..331ead3 100644
4502 +--- a/net/ipv4/netfilter/nf_defrag_ipv4.c
4503 ++++ b/net/ipv4/netfilter/nf_defrag_ipv4.c
4504 +@@ -14,6 +14,7 @@
4505 + #include <net/route.h>
4506 + #include <net/ip.h>
4507 +
4508 ++#include <linux/netfilter_bridge.h>
4509 + #include <linux/netfilter_ipv4.h>
4510 + #include <net/netfilter/ipv4/nf_defrag_ipv4.h>
4511 +
4512 +@@ -34,6 +35,20 @@ static int nf_ct_ipv4_gather_frags(struct sk_buff *skb, u_int32_t user)
4513 + return err;
4514 + }
4515 +
4516 ++static enum ip_defrag_users nf_ct_defrag_user(unsigned int hooknum,
4517 ++ struct sk_buff *skb)
4518 ++{
4519 ++#ifdef CONFIG_BRIDGE_NETFILTER
4520 ++ if (skb->nf_bridge &&
4521 ++ skb->nf_bridge->mask & BRNF_NF_BRIDGE_PREROUTING)
4522 ++ return IP_DEFRAG_CONNTRACK_BRIDGE_IN;
4523 ++#endif
4524 ++ if (hooknum == NF_INET_PRE_ROUTING)
4525 ++ return IP_DEFRAG_CONNTRACK_IN;
4526 ++ else
4527 ++ return IP_DEFRAG_CONNTRACK_OUT;
4528 ++}
4529 ++
4530 + static unsigned int ipv4_conntrack_defrag(unsigned int hooknum,
4531 + struct sk_buff *skb,
4532 + const struct net_device *in,
4533 +@@ -50,10 +65,8 @@ static unsigned int ipv4_conntrack_defrag(unsigned int hooknum,
4534 + #endif
4535 + /* Gather fragments. */
4536 + if (ip_hdr(skb)->frag_off & htons(IP_MF | IP_OFFSET)) {
4537 +- if (nf_ct_ipv4_gather_frags(skb,
4538 +- hooknum == NF_INET_PRE_ROUTING ?
4539 +- IP_DEFRAG_CONNTRACK_IN :
4540 +- IP_DEFRAG_CONNTRACK_OUT))
4541 ++ enum ip_defrag_users user = nf_ct_defrag_user(hooknum, skb);
4542 ++ if (nf_ct_ipv4_gather_frags(skb, user))
4543 + return NF_STOLEN;
4544 + }
4545 + return NF_ACCEPT;
4546 +diff --git a/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c b/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
4547 +index 5f2ec20..0956eba 100644
4548 +--- a/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
4549 ++++ b/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
4550 +@@ -20,6 +20,7 @@
4551 + #include <net/ipv6.h>
4552 + #include <net/inet_frag.h>
4553 +
4554 ++#include <linux/netfilter_bridge.h>
4555 + #include <linux/netfilter_ipv6.h>
4556 + #include <net/netfilter/nf_conntrack.h>
4557 + #include <net/netfilter/nf_conntrack_helper.h>
4558 +@@ -187,6 +188,21 @@ out:
4559 + return nf_conntrack_confirm(skb);
4560 + }
4561 +
4562 ++static enum ip6_defrag_users nf_ct6_defrag_user(unsigned int hooknum,
4563 ++ struct sk_buff *skb)
4564 ++{
4565 ++#ifdef CONFIG_BRIDGE_NETFILTER
4566 ++ if (skb->nf_bridge &&
4567 ++ skb->nf_bridge->mask & BRNF_NF_BRIDGE_PREROUTING)
4568 ++ return IP6_DEFRAG_CONNTRACK_BRIDGE_IN;
4569 ++#endif
4570 ++ if (hooknum == NF_INET_PRE_ROUTING)
4571 ++ return IP6_DEFRAG_CONNTRACK_IN;
4572 ++ else
4573 ++ return IP6_DEFRAG_CONNTRACK_OUT;
4574 ++
4575 ++}
4576 ++
4577 + static unsigned int ipv6_defrag(unsigned int hooknum,
4578 + struct sk_buff *skb,
4579 + const struct net_device *in,
4580 +@@ -199,8 +215,7 @@ static unsigned int ipv6_defrag(unsigned int hooknum,
4581 + if (skb->nfct)
4582 + return NF_ACCEPT;
4583 +
4584 +- reasm = nf_ct_frag6_gather(skb);
4585 +-
4586 ++ reasm = nf_ct_frag6_gather(skb, nf_ct6_defrag_user(hooknum, skb));
4587 + /* queued */
4588 + if (reasm == NULL)
4589 + return NF_STOLEN;
4590 +diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c
4591 +index f3aba25..4b6a539 100644
4592 +--- a/net/ipv6/netfilter/nf_conntrack_reasm.c
4593 ++++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
4594 +@@ -170,13 +170,14 @@ out:
4595 + /* Creation primitives. */
4596 +
4597 + static __inline__ struct nf_ct_frag6_queue *
4598 +-fq_find(__be32 id, struct in6_addr *src, struct in6_addr *dst)
4599 ++fq_find(__be32 id, u32 user, struct in6_addr *src, struct in6_addr *dst)
4600 + {
4601 + struct inet_frag_queue *q;
4602 + struct ip6_create_arg arg;
4603 + unsigned int hash;
4604 +
4605 + arg.id = id;
4606 ++ arg.user = user;
4607 + arg.src = src;
4608 + arg.dst = dst;
4609 +
4610 +@@ -561,7 +562,7 @@ find_prev_fhdr(struct sk_buff *skb, u8 *prevhdrp, int *prevhoff, int *fhoff)
4611 + return 0;
4612 + }
4613 +
4614 +-struct sk_buff *nf_ct_frag6_gather(struct sk_buff *skb)
4615 ++struct sk_buff *nf_ct_frag6_gather(struct sk_buff *skb, u32 user)
4616 + {
4617 + struct sk_buff *clone;
4618 + struct net_device *dev = skb->dev;
4619 +@@ -607,7 +608,7 @@ struct sk_buff *nf_ct_frag6_gather(struct sk_buff *skb)
4620 + if (atomic_read(&nf_init_frags.mem) > nf_init_frags.high_thresh)
4621 + nf_ct_frag6_evictor();
4622 +
4623 +- fq = fq_find(fhdr->identification, &hdr->saddr, &hdr->daddr);
4624 ++ fq = fq_find(fhdr->identification, user, &hdr->saddr, &hdr->daddr);
4625 + if (fq == NULL) {
4626 + pr_debug("Can't find and can't create new queue\n");
4627 + goto ret_orig;
4628 +diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
4629 +index da5bd0e..4d18699 100644
4630 +--- a/net/ipv6/reassembly.c
4631 ++++ b/net/ipv6/reassembly.c
4632 +@@ -72,6 +72,7 @@ struct frag_queue
4633 + struct inet_frag_queue q;
4634 +
4635 + __be32 id; /* fragment id */
4636 ++ u32 user;
4637 + struct in6_addr saddr;
4638 + struct in6_addr daddr;
4639 +
4640 +@@ -141,7 +142,7 @@ int ip6_frag_match(struct inet_frag_queue *q, void *a)
4641 + struct ip6_create_arg *arg = a;
4642 +
4643 + fq = container_of(q, struct frag_queue, q);
4644 +- return (fq->id == arg->id &&
4645 ++ return (fq->id == arg->id && fq->user == arg->user &&
4646 + ipv6_addr_equal(&fq->saddr, arg->src) &&
4647 + ipv6_addr_equal(&fq->daddr, arg->dst));
4648 + }
4649 +@@ -163,6 +164,7 @@ void ip6_frag_init(struct inet_frag_queue *q, void *a)
4650 + struct ip6_create_arg *arg = a;
4651 +
4652 + fq->id = arg->id;
4653 ++ fq->user = arg->user;
4654 + ipv6_addr_copy(&fq->saddr, arg->src);
4655 + ipv6_addr_copy(&fq->daddr, arg->dst);
4656 + }
4657 +@@ -244,6 +246,7 @@ fq_find(struct net *net, __be32 id, struct in6_addr *src, struct in6_addr *dst,
4658 + unsigned int hash;
4659 +
4660 + arg.id = id;
4661 ++ arg.user = IP6_DEFRAG_LOCAL_DELIVER;
4662 + arg.src = src;
4663 + arg.dst = dst;
4664 +
4665 +diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c
4666 +index f1362f3..fbffce9 100644
4667 +--- a/net/mac80211/ibss.c
4668 ++++ b/net/mac80211/ibss.c
4669 +@@ -455,6 +455,10 @@ static void ieee80211_sta_merge_ibss(struct ieee80211_sub_if_data *sdata)
4670 +
4671 + ieee80211_sta_expire(sdata, IEEE80211_IBSS_INACTIVITY_LIMIT);
4672 +
4673 ++ if (time_before(jiffies, ifibss->last_scan_completed +
4674 ++ IEEE80211_IBSS_MERGE_INTERVAL))
4675 ++ return;
4676 ++
4677 + if (ieee80211_sta_active_ibss(sdata))
4678 + return;
4679 +
4680 +diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
4681 +index dc5049d..f13d181 100644
4682 +--- a/net/mac80211/mlme.c
4683 ++++ b/net/mac80211/mlme.c
4684 +@@ -904,6 +904,14 @@ static void ieee80211_set_associated(struct ieee80211_sub_if_data *sdata,
4685 + sdata->u.mgd.flags &= ~(IEEE80211_STA_CONNECTION_POLL |
4686 + IEEE80211_STA_BEACON_POLL);
4687 +
4688 ++ /*
4689 ++ * Always handle WMM once after association regardless
4690 ++ * of the first value the AP uses. Setting -1 here has
4691 ++ * that effect because the AP values is an unsigned
4692 ++ * 4-bit value.
4693 ++ */
4694 ++ sdata->u.mgd.wmm_last_param_set = -1;
4695 ++
4696 + ieee80211_led_assoc(local, 1);
4697 +
4698 + sdata->vif.bss_conf.assoc = 1;
4699 +diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
4700 +index eaa4118..d398197 100644
4701 +--- a/net/mac80211/tx.c
4702 ++++ b/net/mac80211/tx.c
4703 +@@ -1401,6 +1401,7 @@ static void ieee80211_xmit(struct ieee80211_sub_if_data *sdata,
4704 +
4705 + if ((local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK) &&
4706 + local->hw.conf.dynamic_ps_timeout > 0 &&
4707 ++ !local->quiescing &&
4708 + !(local->scanning) && local->ps_sdata) {
4709 + if (local->hw.conf.flags & IEEE80211_CONF_PS) {
4710 + ieee80211_stop_queues_by_reason(&local->hw,
4711 +diff --git a/net/mac80211/util.c b/net/mac80211/util.c
4712 +index cbc5d20..51e0bd2 100644
4713 +--- a/net/mac80211/util.c
4714 ++++ b/net/mac80211/util.c
4715 +@@ -1031,7 +1031,19 @@ int ieee80211_reconfig(struct ieee80211_local *local)
4716 +
4717 + /* restart hardware */
4718 + if (local->open_count) {
4719 ++ /*
4720 ++ * Upon resume hardware can sometimes be goofy due to
4721 ++ * various platform / driver / bus issues, so restarting
4722 ++ * the device may at times not work immediately. Propagate
4723 ++ * the error.
4724 ++ */
4725 + res = drv_start(local);
4726 ++ if (res) {
4727 ++ WARN(local->suspended, "Harware became unavailable "
4728 ++ "upon resume. This is could be a software issue"
4729 ++ "prior to suspend or a harware issue\n");
4730 ++ return res;
4731 ++ }
4732 +
4733 + ieee80211_led_radio(local, true);
4734 + }
4735 +diff --git a/net/wireless/mlme.c b/net/wireless/mlme.c
4736 +index 0a6b7a0..0d86248 100644
4737 +--- a/net/wireless/mlme.c
4738 ++++ b/net/wireless/mlme.c
4739 +@@ -94,7 +94,18 @@ void cfg80211_send_rx_assoc(struct net_device *dev, const u8 *buf, size_t len)
4740 + }
4741 + }
4742 +
4743 +- WARN_ON(!bss);
4744 ++ /*
4745 ++ * We might be coming here because the driver reported
4746 ++ * a successful association at the same time as the
4747 ++ * user requested a deauth. In that case, we will have
4748 ++ * removed the BSS from the auth_bsses list due to the
4749 ++ * deauth request when the assoc response makes it. If
4750 ++ * the two code paths acquire the lock the other way
4751 ++ * around, that's just the standard situation of a
4752 ++ * deauth being requested while connected.
4753 ++ */
4754 ++ if (!bss)
4755 ++ goto out;
4756 + } else if (wdev->conn) {
4757 + cfg80211_sme_failed_assoc(wdev);
4758 + need_connect_result = false;
4759 +diff --git a/security/Makefile b/security/Makefile
4760 +index 95ecc06..510bbc8 100644
4761 +--- a/security/Makefile
4762 ++++ b/security/Makefile
4763 +@@ -8,7 +8,8 @@ subdir-$(CONFIG_SECURITY_SMACK) += smack
4764 + subdir-$(CONFIG_SECURITY_TOMOYO) += tomoyo
4765 +
4766 + # always enable default capabilities
4767 +-obj-y += commoncap.o min_addr.o
4768 ++obj-y += commoncap.o
4769 ++obj-$(CONFIG_MMU) += min_addr.o
4770 +
4771 + # Object file lists
4772 + obj-$(CONFIG_SECURITY) += security.o capability.o
4773 +diff --git a/security/keys/keyctl.c b/security/keys/keyctl.c
4774 +index 06ec722..1cad4c7 100644
4775 +--- a/security/keys/keyctl.c
4776 ++++ b/security/keys/keyctl.c
4777 +@@ -1236,6 +1236,7 @@ long keyctl_get_security(key_serial_t keyid,
4778 + */
4779 + long keyctl_session_to_parent(void)
4780 + {
4781 ++#ifdef TIF_NOTIFY_RESUME
4782 + struct task_struct *me, *parent;
4783 + const struct cred *mycred, *pcred;
4784 + struct cred *cred, *oldcred;
4785 +@@ -1326,6 +1327,15 @@ not_permitted:
4786 + error_keyring:
4787 + key_ref_put(keyring_r);
4788 + return ret;
4789 ++
4790 ++#else /* !TIF_NOTIFY_RESUME */
4791 ++ /*
4792 ++ * To be removed when TIF_NOTIFY_RESUME has been implemented on
4793 ++ * m68k/xtensa
4794 ++ */
4795 ++#warning TIF_NOTIFY_RESUME not implemented
4796 ++ return -EOPNOTSUPP;
4797 ++#endif /* !TIF_NOTIFY_RESUME */
4798 + }
4799 +
4800 + /*****************************************************************************/
4801 +diff --git a/sound/mips/sgio2audio.c b/sound/mips/sgio2audio.c
4802 +index 8691f4c..f1d9d16 100644
4803 +--- a/sound/mips/sgio2audio.c
4804 ++++ b/sound/mips/sgio2audio.c
4805 +@@ -609,7 +609,7 @@ static int snd_sgio2audio_pcm_hw_params(struct snd_pcm_substream *substream,
4806 + /* alloc virtual 'dma' area */
4807 + if (runtime->dma_area)
4808 + vfree(runtime->dma_area);
4809 +- runtime->dma_area = vmalloc(size);
4810 ++ runtime->dma_area = vmalloc_user(size);
4811 + if (runtime->dma_area == NULL)
4812 + return -ENOMEM;
4813 + runtime->dma_bytes = size;
4814 +diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
4815 +index 7058371..e40d31f 100644
4816 +--- a/sound/pci/hda/patch_realtek.c
4817 ++++ b/sound/pci/hda/patch_realtek.c
4818 +@@ -9141,6 +9141,8 @@ static struct alc_config_preset alc882_presets[] = {
4819 + .dac_nids = alc883_dac_nids,
4820 + .num_adc_nids = ARRAY_SIZE(alc889_adc_nids),
4821 + .adc_nids = alc889_adc_nids,
4822 ++ .capsrc_nids = alc889_capsrc_nids,
4823 ++ .capsrc_nids = alc889_capsrc_nids,
4824 + .dig_out_nid = ALC883_DIGOUT_NID,
4825 + .dig_in_nid = ALC883_DIGIN_NID,
4826 + .slave_dig_outs = alc883_slave_dig_outs,
4827 +@@ -9187,6 +9189,7 @@ static struct alc_config_preset alc882_presets[] = {
4828 + .dac_nids = alc883_dac_nids,
4829 + .adc_nids = alc883_adc_nids_alt,
4830 + .num_adc_nids = ARRAY_SIZE(alc883_adc_nids_alt),
4831 ++ .capsrc_nids = alc883_capsrc_nids,
4832 + .dig_out_nid = ALC883_DIGOUT_NID,
4833 + .num_channel_mode = ARRAY_SIZE(alc883_3ST_2ch_modes),
4834 + .channel_mode = alc883_3ST_2ch_modes,
4835 +@@ -9333,6 +9336,7 @@ static struct alc_config_preset alc882_presets[] = {
4836 + .dac_nids = alc883_dac_nids,
4837 + .adc_nids = alc883_adc_nids_alt,
4838 + .num_adc_nids = ARRAY_SIZE(alc883_adc_nids_alt),
4839 ++ .capsrc_nids = alc883_capsrc_nids,
4840 + .num_channel_mode = ARRAY_SIZE(alc883_sixstack_modes),
4841 + .channel_mode = alc883_sixstack_modes,
4842 + .input_mux = &alc883_capture_source,
4843 +@@ -9394,6 +9398,7 @@ static struct alc_config_preset alc882_presets[] = {
4844 + .dac_nids = alc883_dac_nids,
4845 + .adc_nids = alc883_adc_nids_alt,
4846 + .num_adc_nids = ARRAY_SIZE(alc883_adc_nids_alt),
4847 ++ .capsrc_nids = alc883_capsrc_nids,
4848 + .num_channel_mode = ARRAY_SIZE(alc883_3ST_2ch_modes),
4849 + .channel_mode = alc883_3ST_2ch_modes,
4850 + .input_mux = &alc883_lenovo_101e_capture_source,
4851 +@@ -9573,6 +9578,7 @@ static struct alc_config_preset alc882_presets[] = {
4852 + alc880_gpio1_init_verbs },
4853 + .adc_nids = alc883_adc_nids,
4854 + .num_adc_nids = ARRAY_SIZE(alc883_adc_nids),
4855 ++ .capsrc_nids = alc883_capsrc_nids,
4856 + .dac_nids = alc883_dac_nids,
4857 + .num_dacs = ARRAY_SIZE(alc883_dac_nids),
4858 + .channel_mode = alc889A_mb31_6ch_modes,
4859 +diff --git a/sound/pcmcia/pdaudiocf/pdaudiocf_pcm.c b/sound/pcmcia/pdaudiocf/pdaudiocf_pcm.c
4860 +index d057e64..5cfa608 100644
4861 +--- a/sound/pcmcia/pdaudiocf/pdaudiocf_pcm.c
4862 ++++ b/sound/pcmcia/pdaudiocf/pdaudiocf_pcm.c
4863 +@@ -51,7 +51,7 @@ static int snd_pcm_alloc_vmalloc_buffer(struct snd_pcm_substream *subs, size_t s
4864 + return 0; /* already enough large */
4865 + vfree(runtime->dma_area);
4866 + }
4867 +- runtime->dma_area = vmalloc_32(size);
4868 ++ runtime->dma_area = vmalloc_32_user(size);
4869 + if (! runtime->dma_area)
4870 + return -ENOMEM;
4871 + runtime->dma_bytes = size;
4872 +diff --git a/sound/soc/codecs/wm8974.c b/sound/soc/codecs/wm8974.c
4873 +index 98d663a..b0bd1c0 100644
4874 +--- a/sound/soc/codecs/wm8974.c
4875 ++++ b/sound/soc/codecs/wm8974.c
4876 +@@ -47,7 +47,7 @@ static const u16 wm8974_reg[WM8974_CACHEREGNUM] = {
4877 + };
4878 +
4879 + #define WM8974_POWER1_BIASEN 0x08
4880 +-#define WM8974_POWER1_BUFIOEN 0x10
4881 ++#define WM8974_POWER1_BUFIOEN 0x04
4882 +
4883 + struct wm8974_priv {
4884 + struct snd_soc_codec codec;
4885 +diff --git a/sound/soc/codecs/wm9712.c b/sound/soc/codecs/wm9712.c
4886 +index 1fd4e88..e9123f5 100644
4887 +--- a/sound/soc/codecs/wm9712.c
4888 ++++ b/sound/soc/codecs/wm9712.c
4889 +@@ -464,7 +464,8 @@ static int ac97_write(struct snd_soc_codec *codec, unsigned int reg,
4890 + {
4891 + u16 *cache = codec->reg_cache;
4892 +
4893 +- soc_ac97_ops.write(codec->ac97, reg, val);
4894 ++ if (reg < 0x7c)
4895 ++ soc_ac97_ops.write(codec->ac97, reg, val);
4896 + reg = reg >> 1;
4897 + if (reg < (ARRAY_SIZE(wm9712_reg)))
4898 + cache[reg] = val;
4899 +diff --git a/sound/usb/usbaudio.c b/sound/usb/usbaudio.c
4900 +index 8db0374..8803d9d 100644
4901 +--- a/sound/usb/usbaudio.c
4902 ++++ b/sound/usb/usbaudio.c
4903 +@@ -752,7 +752,7 @@ static int snd_pcm_alloc_vmalloc_buffer(struct snd_pcm_substream *subs, size_t s
4904 + return 0; /* already large enough */
4905 + vfree(runtime->dma_area);
4906 + }
4907 +- runtime->dma_area = vmalloc(size);
4908 ++ runtime->dma_area = vmalloc_user(size);
4909 + if (!runtime->dma_area)
4910 + return -ENOMEM;
4911 + runtime->dma_bytes = size;