Gentoo Archives: gentoo-commits

From: "Mike Pagano (mpagano)" <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] linux-patches r1347 - genpatches-2.6/trunk/2.6.26
Date: Fri, 10 Oct 2008 23:49:42
Message-Id: E1KoRjl-0001Dd-Sj@stork.gentoo.org
1 Author: mpagano
2 Date: 2008-10-10 23:49:37 +0000 (Fri, 10 Oct 2008)
3 New Revision: 1347
4
5 Added:
6 genpatches-2.6/trunk/2.6.26/1005_linux-2.6.26.6.patch
7 Modified:
8 genpatches-2.6/trunk/2.6.26/0000_README
9 Log:
10 Adding linux-2.6.26.6
11
12 Modified: genpatches-2.6/trunk/2.6.26/0000_README
13 ===================================================================
14 --- genpatches-2.6/trunk/2.6.26/0000_README 2008-10-06 20:45:07 UTC (rev 1346)
15 +++ genpatches-2.6/trunk/2.6.26/0000_README 2008-10-10 23:49:37 UTC (rev 1347)
16 @@ -59,6 +59,10 @@
17 From: http://www.kernel.org
18 Desc: Linux 2.6.26.5
19
20 +Patch: 1005_linux-2.6.26.6.patch
21 +From: http://www.kernel.org
22 +Desc: Linux 2.6.26.6
23 +
24 Patch: 1900_UTC-timestamp-option.patch
25 From: http://bugs.gentoo.org/233307
26 Desc: Fix to add UTC timestamp option
27
28 Added: genpatches-2.6/trunk/2.6.26/1005_linux-2.6.26.6.patch
29 ===================================================================
30 --- genpatches-2.6/trunk/2.6.26/1005_linux-2.6.26.6.patch (rev 0)
31 +++ genpatches-2.6/trunk/2.6.26/1005_linux-2.6.26.6.patch 2008-10-10 23:49:37 UTC (rev 1347)
32 @@ -0,0 +1,3097 @@
33 +diff --git a/arch/s390/kernel/compat_ptrace.h b/arch/s390/kernel/compat_ptrace.h
34 +index 419aef9..7731b82 100644
35 +--- a/arch/s390/kernel/compat_ptrace.h
36 ++++ b/arch/s390/kernel/compat_ptrace.h
37 +@@ -42,6 +42,7 @@ struct user_regs_struct32
38 + u32 gprs[NUM_GPRS];
39 + u32 acrs[NUM_ACRS];
40 + u32 orig_gpr2;
41 ++ /* nb: there's a 4-byte hole here */
42 + s390_fp_regs fp_regs;
43 + /*
44 + * These per registers are in here so that gdb can modify them
45 +diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c
46 +index 35827b9..75fea19 100644
47 +--- a/arch/s390/kernel/ptrace.c
48 ++++ b/arch/s390/kernel/ptrace.c
49 +@@ -177,6 +177,13 @@ peek_user(struct task_struct *child, addr_t addr, addr_t data)
50 + */
51 + tmp = (addr_t) task_pt_regs(child)->orig_gpr2;
52 +
53 ++ } else if (addr < (addr_t) &dummy->regs.fp_regs) {
54 ++ /*
55 ++ * prevent reads of padding hole between
56 ++ * orig_gpr2 and fp_regs on s390.
57 ++ */
58 ++ tmp = 0;
59 ++
60 + } else if (addr < (addr_t) (&dummy->regs.fp_regs + 1)) {
61 + /*
62 + * floating point regs. are stored in the thread structure
63 +@@ -268,6 +275,13 @@ poke_user(struct task_struct *child, addr_t addr, addr_t data)
64 + */
65 + task_pt_regs(child)->orig_gpr2 = data;
66 +
67 ++ } else if (addr < (addr_t) &dummy->regs.fp_regs) {
68 ++ /*
69 ++ * prevent writes of padding hole between
70 ++ * orig_gpr2 and fp_regs on s390.
71 ++ */
72 ++ return 0;
73 ++
74 + } else if (addr < (addr_t) (&dummy->regs.fp_regs + 1)) {
75 + /*
76 + * floating point regs. are stored in the thread structure
77 +@@ -409,6 +423,13 @@ peek_user_emu31(struct task_struct *child, addr_t addr, addr_t data)
78 + */
79 + tmp = *(__u32*)((addr_t) &task_pt_regs(child)->orig_gpr2 + 4);
80 +
81 ++ } else if (addr < (addr_t) &dummy32->regs.fp_regs) {
82 ++ /*
83 ++ * prevent reads of padding hole between
84 ++ * orig_gpr2 and fp_regs on s390.
85 ++ */
86 ++ tmp = 0;
87 ++
88 + } else if (addr < (addr_t) (&dummy32->regs.fp_regs + 1)) {
89 + /*
90 + * floating point regs. are stored in the thread structure
91 +@@ -488,6 +509,13 @@ poke_user_emu31(struct task_struct *child, addr_t addr, addr_t data)
92 + */
93 + *(__u32*)((addr_t) &task_pt_regs(child)->orig_gpr2 + 4) = tmp;
94 +
95 ++ } else if (addr < (addr_t) &dummy32->regs.fp_regs) {
96 ++ /*
97 ++ * prevent writess of padding hole between
98 ++ * orig_gpr2 and fp_regs on s390.
99 ++ */
100 ++ return 0;
101 ++
102 + } else if (addr < (addr_t) (&dummy32->regs.fp_regs + 1)) {
103 + /*
104 + * floating point regs. are stored in the thread structure
105 +diff --git a/arch/sparc64/kernel/of_device.c b/arch/sparc64/kernel/of_device.c
106 +index d569f60..b456609 100644
107 +--- a/arch/sparc64/kernel/of_device.c
108 ++++ b/arch/sparc64/kernel/of_device.c
109 +@@ -170,7 +170,7 @@ static unsigned int of_bus_default_get_flags(const u32 *addr)
110 +
111 + static int of_bus_pci_match(struct device_node *np)
112 + {
113 +- if (!strcmp(np->type, "pci") || !strcmp(np->type, "pciex")) {
114 ++ if (!strcmp(np->name, "pci")) {
115 + const char *model = of_get_property(np, "model", NULL);
116 +
117 + if (model && !strcmp(model, "SUNW,simba"))
118 +@@ -201,7 +201,7 @@ static int of_bus_simba_match(struct device_node *np)
119 + /* Treat PCI busses lacking ranges property just like
120 + * simba.
121 + */
122 +- if (!strcmp(np->type, "pci") || !strcmp(np->type, "pciex")) {
123 ++ if (!strcmp(np->name, "pci")) {
124 + if (!of_find_property(np, "ranges", NULL))
125 + return 1;
126 + }
127 +@@ -426,7 +426,7 @@ static int __init use_1to1_mapping(struct device_node *pp)
128 + * it lacks a ranges property, and this will include
129 + * cases like Simba.
130 + */
131 +- if (!strcmp(pp->type, "pci") || !strcmp(pp->type, "pciex"))
132 ++ if (!strcmp(pp->name, "pci"))
133 + return 0;
134 +
135 + return 1;
136 +@@ -709,8 +709,7 @@ static unsigned int __init build_one_device_irq(struct of_device *op,
137 + break;
138 + }
139 + } else {
140 +- if (!strcmp(pp->type, "pci") ||
141 +- !strcmp(pp->type, "pciex")) {
142 ++ if (!strcmp(pp->name, "pci")) {
143 + unsigned int this_orig_irq = irq;
144 +
145 + irq = pci_irq_swizzle(dp, pp, irq);
146 +diff --git a/arch/sparc64/kernel/pci.c b/arch/sparc64/kernel/pci.c
147 +index 112b09f..2db2148 100644
148 +--- a/arch/sparc64/kernel/pci.c
149 ++++ b/arch/sparc64/kernel/pci.c
150 +@@ -425,7 +425,7 @@ struct pci_dev *of_create_pci_dev(struct pci_pbm_info *pbm,
151 + dev->current_state = 4; /* unknown power state */
152 + dev->error_state = pci_channel_io_normal;
153 +
154 +- if (!strcmp(type, "pci") || !strcmp(type, "pciex")) {
155 ++ if (!strcmp(node->name, "pci")) {
156 + /* a PCI-PCI bridge */
157 + dev->hdr_type = PCI_HEADER_TYPE_BRIDGE;
158 + dev->rom_base_reg = PCI_ROM_ADDRESS1;
159 +diff --git a/arch/sparc64/kernel/pci_psycho.c b/arch/sparc64/kernel/pci_psycho.c
160 +index 994dbe0..21128cf 100644
161 +--- a/arch/sparc64/kernel/pci_psycho.c
162 ++++ b/arch/sparc64/kernel/pci_psycho.c
163 +@@ -575,7 +575,7 @@ static irqreturn_t psycho_pcierr_intr_other(struct pci_pbm_info *pbm, int is_pbm
164 + {
165 + unsigned long csr_reg, csr, csr_error_bits;
166 + irqreturn_t ret = IRQ_NONE;
167 +- u16 stat;
168 ++ u16 stat, *addr;
169 +
170 + if (is_pbm_a) {
171 + csr_reg = pbm->controller_regs + PSYCHO_PCIA_CTRL;
172 +@@ -597,7 +597,9 @@ static irqreturn_t psycho_pcierr_intr_other(struct pci_pbm_info *pbm, int is_pbm
173 + printk("%s: PCI SERR signal asserted.\n", pbm->name);
174 + ret = IRQ_HANDLED;
175 + }
176 +- pci_read_config_word(pbm->pci_bus->self, PCI_STATUS, &stat);
177 ++ addr = psycho_pci_config_mkaddr(pbm, pbm->pci_first_busno,
178 ++ 0, PCI_STATUS);
179 ++ pci_config_read16(addr, &stat);
180 + if (stat & (PCI_STATUS_PARITY |
181 + PCI_STATUS_SIG_TARGET_ABORT |
182 + PCI_STATUS_REC_TARGET_ABORT |
183 +@@ -605,7 +607,7 @@ static irqreturn_t psycho_pcierr_intr_other(struct pci_pbm_info *pbm, int is_pbm
184 + PCI_STATUS_SIG_SYSTEM_ERROR)) {
185 + printk("%s: PCI bus error, PCI_STATUS[%04x]\n",
186 + pbm->name, stat);
187 +- pci_write_config_word(pbm->pci_bus->self, PCI_STATUS, 0xffff);
188 ++ pci_config_write16(addr, 0xffff);
189 + ret = IRQ_HANDLED;
190 + }
191 + return ret;
192 +@@ -744,16 +746,16 @@ static void psycho_register_error_handlers(struct pci_pbm_info *pbm)
193 + * the second will just error out since we do not pass in
194 + * IRQF_SHARED.
195 + */
196 +- err = request_irq(op->irqs[1], psycho_ue_intr, 0,
197 ++ err = request_irq(op->irqs[1], psycho_ue_intr, IRQF_SHARED,
198 + "PSYCHO_UE", pbm);
199 +- err = request_irq(op->irqs[2], psycho_ce_intr, 0,
200 ++ err = request_irq(op->irqs[2], psycho_ce_intr, IRQF_SHARED,
201 + "PSYCHO_CE", pbm);
202 +
203 + /* This one, however, ought not to fail. We can just warn
204 + * about it since the system can still operate properly even
205 + * if this fails.
206 + */
207 +- err = request_irq(op->irqs[0], psycho_pcierr_intr, 0,
208 ++ err = request_irq(op->irqs[0], psycho_pcierr_intr, IRQF_SHARED,
209 + "PSYCHO_PCIERR", pbm);
210 + if (err)
211 + printk(KERN_WARNING "%s: Could not register PCIERR, "
212 +diff --git a/arch/sparc64/kernel/prom.c b/arch/sparc64/kernel/prom.c
213 +index ed03a18..a72f793 100644
214 +--- a/arch/sparc64/kernel/prom.c
215 ++++ b/arch/sparc64/kernel/prom.c
216 +@@ -156,55 +156,11 @@ static unsigned long psycho_pcislot_imap_offset(unsigned long ino)
217 + return PSYCHO_IMAP_B_SLOT0 + (slot * 8);
218 + }
219 +
220 +-#define PSYCHO_IMAP_SCSI 0x1000UL
221 +-#define PSYCHO_IMAP_ETH 0x1008UL
222 +-#define PSYCHO_IMAP_BPP 0x1010UL
223 +-#define PSYCHO_IMAP_AU_REC 0x1018UL
224 +-#define PSYCHO_IMAP_AU_PLAY 0x1020UL
225 +-#define PSYCHO_IMAP_PFAIL 0x1028UL
226 +-#define PSYCHO_IMAP_KMS 0x1030UL
227 +-#define PSYCHO_IMAP_FLPY 0x1038UL
228 +-#define PSYCHO_IMAP_SHW 0x1040UL
229 +-#define PSYCHO_IMAP_KBD 0x1048UL
230 +-#define PSYCHO_IMAP_MS 0x1050UL
231 +-#define PSYCHO_IMAP_SER 0x1058UL
232 +-#define PSYCHO_IMAP_TIM0 0x1060UL
233 +-#define PSYCHO_IMAP_TIM1 0x1068UL
234 +-#define PSYCHO_IMAP_UE 0x1070UL
235 +-#define PSYCHO_IMAP_CE 0x1078UL
236 +-#define PSYCHO_IMAP_A_ERR 0x1080UL
237 +-#define PSYCHO_IMAP_B_ERR 0x1088UL
238 +-#define PSYCHO_IMAP_PMGMT 0x1090UL
239 +-#define PSYCHO_IMAP_GFX 0x1098UL
240 +-#define PSYCHO_IMAP_EUPA 0x10a0UL
241 +-
242 +-static unsigned long __psycho_onboard_imap_off[] = {
243 +-/*0x20*/ PSYCHO_IMAP_SCSI,
244 +-/*0x21*/ PSYCHO_IMAP_ETH,
245 +-/*0x22*/ PSYCHO_IMAP_BPP,
246 +-/*0x23*/ PSYCHO_IMAP_AU_REC,
247 +-/*0x24*/ PSYCHO_IMAP_AU_PLAY,
248 +-/*0x25*/ PSYCHO_IMAP_PFAIL,
249 +-/*0x26*/ PSYCHO_IMAP_KMS,
250 +-/*0x27*/ PSYCHO_IMAP_FLPY,
251 +-/*0x28*/ PSYCHO_IMAP_SHW,
252 +-/*0x29*/ PSYCHO_IMAP_KBD,
253 +-/*0x2a*/ PSYCHO_IMAP_MS,
254 +-/*0x2b*/ PSYCHO_IMAP_SER,
255 +-/*0x2c*/ PSYCHO_IMAP_TIM0,
256 +-/*0x2d*/ PSYCHO_IMAP_TIM1,
257 +-/*0x2e*/ PSYCHO_IMAP_UE,
258 +-/*0x2f*/ PSYCHO_IMAP_CE,
259 +-/*0x30*/ PSYCHO_IMAP_A_ERR,
260 +-/*0x31*/ PSYCHO_IMAP_B_ERR,
261 +-/*0x32*/ PSYCHO_IMAP_PMGMT,
262 +-/*0x33*/ PSYCHO_IMAP_GFX,
263 +-/*0x34*/ PSYCHO_IMAP_EUPA,
264 +-};
265 ++#define PSYCHO_OBIO_IMAP_BASE 0x1000UL
266 ++
267 + #define PSYCHO_ONBOARD_IRQ_BASE 0x20
268 +-#define PSYCHO_ONBOARD_IRQ_LAST 0x34
269 + #define psycho_onboard_imap_offset(__ino) \
270 +- __psycho_onboard_imap_off[(__ino) - PSYCHO_ONBOARD_IRQ_BASE]
271 ++ (PSYCHO_OBIO_IMAP_BASE + (((__ino) & 0x1f) << 3))
272 +
273 + #define PSYCHO_ICLR_A_SLOT0 0x1400UL
274 + #define PSYCHO_ICLR_SCSI 0x1800UL
275 +@@ -228,10 +184,6 @@ static unsigned int psycho_irq_build(struct device_node *dp,
276 + imap_off = psycho_pcislot_imap_offset(ino);
277 + } else {
278 + /* Onboard device */
279 +- if (ino > PSYCHO_ONBOARD_IRQ_LAST) {
280 +- prom_printf("psycho_irq_build: Wacky INO [%x]\n", ino);
281 +- prom_halt();
282 +- }
283 + imap_off = psycho_onboard_imap_offset(ino);
284 + }
285 +
286 +@@ -318,23 +270,6 @@ static void sabre_wsync_handler(unsigned int ino, void *_arg1, void *_arg2)
287 +
288 + #define SABRE_IMAP_A_SLOT0 0x0c00UL
289 + #define SABRE_IMAP_B_SLOT0 0x0c20UL
290 +-#define SABRE_IMAP_SCSI 0x1000UL
291 +-#define SABRE_IMAP_ETH 0x1008UL
292 +-#define SABRE_IMAP_BPP 0x1010UL
293 +-#define SABRE_IMAP_AU_REC 0x1018UL
294 +-#define SABRE_IMAP_AU_PLAY 0x1020UL
295 +-#define SABRE_IMAP_PFAIL 0x1028UL
296 +-#define SABRE_IMAP_KMS 0x1030UL
297 +-#define SABRE_IMAP_FLPY 0x1038UL
298 +-#define SABRE_IMAP_SHW 0x1040UL
299 +-#define SABRE_IMAP_KBD 0x1048UL
300 +-#define SABRE_IMAP_MS 0x1050UL
301 +-#define SABRE_IMAP_SER 0x1058UL
302 +-#define SABRE_IMAP_UE 0x1070UL
303 +-#define SABRE_IMAP_CE 0x1078UL
304 +-#define SABRE_IMAP_PCIERR 0x1080UL
305 +-#define SABRE_IMAP_GFX 0x1098UL
306 +-#define SABRE_IMAP_EUPA 0x10a0UL
307 + #define SABRE_ICLR_A_SLOT0 0x1400UL
308 + #define SABRE_ICLR_B_SLOT0 0x1480UL
309 + #define SABRE_ICLR_SCSI 0x1800UL
310 +@@ -364,33 +299,10 @@ static unsigned long sabre_pcislot_imap_offset(unsigned long ino)
311 + return SABRE_IMAP_B_SLOT0 + (slot * 8);
312 + }
313 +
314 +-static unsigned long __sabre_onboard_imap_off[] = {
315 +-/*0x20*/ SABRE_IMAP_SCSI,
316 +-/*0x21*/ SABRE_IMAP_ETH,
317 +-/*0x22*/ SABRE_IMAP_BPP,
318 +-/*0x23*/ SABRE_IMAP_AU_REC,
319 +-/*0x24*/ SABRE_IMAP_AU_PLAY,
320 +-/*0x25*/ SABRE_IMAP_PFAIL,
321 +-/*0x26*/ SABRE_IMAP_KMS,
322 +-/*0x27*/ SABRE_IMAP_FLPY,
323 +-/*0x28*/ SABRE_IMAP_SHW,
324 +-/*0x29*/ SABRE_IMAP_KBD,
325 +-/*0x2a*/ SABRE_IMAP_MS,
326 +-/*0x2b*/ SABRE_IMAP_SER,
327 +-/*0x2c*/ 0 /* reserved */,
328 +-/*0x2d*/ 0 /* reserved */,
329 +-/*0x2e*/ SABRE_IMAP_UE,
330 +-/*0x2f*/ SABRE_IMAP_CE,
331 +-/*0x30*/ SABRE_IMAP_PCIERR,
332 +-/*0x31*/ 0 /* reserved */,
333 +-/*0x32*/ 0 /* reserved */,
334 +-/*0x33*/ SABRE_IMAP_GFX,
335 +-/*0x34*/ SABRE_IMAP_EUPA,
336 +-};
337 +-#define SABRE_ONBOARD_IRQ_BASE 0x20
338 +-#define SABRE_ONBOARD_IRQ_LAST 0x30
339 ++#define SABRE_OBIO_IMAP_BASE 0x1000UL
340 ++#define SABRE_ONBOARD_IRQ_BASE 0x20
341 + #define sabre_onboard_imap_offset(__ino) \
342 +- __sabre_onboard_imap_off[(__ino) - SABRE_ONBOARD_IRQ_BASE]
343 ++ (SABRE_OBIO_IMAP_BASE + (((__ino) & 0x1f) << 3))
344 +
345 + #define sabre_iclr_offset(ino) \
346 + ((ino & 0x20) ? (SABRE_ICLR_SCSI + (((ino) & 0x1f) << 3)) : \
347 +@@ -453,10 +365,6 @@ static unsigned int sabre_irq_build(struct device_node *dp,
348 + imap_off = sabre_pcislot_imap_offset(ino);
349 + } else {
350 + /* onboard device */
351 +- if (ino > SABRE_ONBOARD_IRQ_LAST) {
352 +- prom_printf("sabre_irq_build: Wacky INO [%x]\n", ino);
353 +- prom_halt();
354 +- }
355 + imap_off = sabre_onboard_imap_offset(ino);
356 + }
357 +
358 +diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
359 +index 65c7857..d5ccf42 100644
360 +--- a/arch/x86/kernel/alternative.c
361 ++++ b/arch/x86/kernel/alternative.c
362 +@@ -1,6 +1,6 @@
363 + #include <linux/module.h>
364 + #include <linux/sched.h>
365 +-#include <linux/spinlock.h>
366 ++#include <linux/mutex.h>
367 + #include <linux/list.h>
368 + #include <linux/kprobes.h>
369 + #include <linux/mm.h>
370 +@@ -279,7 +279,7 @@ struct smp_alt_module {
371 + struct list_head next;
372 + };
373 + static LIST_HEAD(smp_alt_modules);
374 +-static DEFINE_SPINLOCK(smp_alt);
375 ++static DEFINE_MUTEX(smp_alt);
376 + static int smp_mode = 1; /* protected by smp_alt */
377 +
378 + void alternatives_smp_module_add(struct module *mod, char *name,
379 +@@ -312,12 +312,12 @@ void alternatives_smp_module_add(struct module *mod, char *name,
380 + __func__, smp->locks, smp->locks_end,
381 + smp->text, smp->text_end, smp->name);
382 +
383 +- spin_lock(&smp_alt);
384 ++ mutex_lock(&smp_alt);
385 + list_add_tail(&smp->next, &smp_alt_modules);
386 + if (boot_cpu_has(X86_FEATURE_UP))
387 + alternatives_smp_unlock(smp->locks, smp->locks_end,
388 + smp->text, smp->text_end);
389 +- spin_unlock(&smp_alt);
390 ++ mutex_unlock(&smp_alt);
391 + }
392 +
393 + void alternatives_smp_module_del(struct module *mod)
394 +@@ -327,17 +327,17 @@ void alternatives_smp_module_del(struct module *mod)
395 + if (smp_alt_once || noreplace_smp)
396 + return;
397 +
398 +- spin_lock(&smp_alt);
399 ++ mutex_lock(&smp_alt);
400 + list_for_each_entry(item, &smp_alt_modules, next) {
401 + if (mod != item->mod)
402 + continue;
403 + list_del(&item->next);
404 +- spin_unlock(&smp_alt);
405 ++ mutex_unlock(&smp_alt);
406 + DPRINTK("%s: %s\n", __func__, item->name);
407 + kfree(item);
408 + return;
409 + }
410 +- spin_unlock(&smp_alt);
411 ++ mutex_unlock(&smp_alt);
412 + }
413 +
414 + void alternatives_smp_switch(int smp)
415 +@@ -359,7 +359,7 @@ void alternatives_smp_switch(int smp)
416 + return;
417 + BUG_ON(!smp && (num_online_cpus() > 1));
418 +
419 +- spin_lock(&smp_alt);
420 ++ mutex_lock(&smp_alt);
421 +
422 + /*
423 + * Avoid unnecessary switches because it forces JIT based VMs to
424 +@@ -383,7 +383,7 @@ void alternatives_smp_switch(int smp)
425 + mod->text, mod->text_end);
426 + }
427 + smp_mode = smp;
428 +- spin_unlock(&smp_alt);
429 ++ mutex_unlock(&smp_alt);
430 + }
431 +
432 + #endif
433 +diff --git a/arch/x86/kernel/apic_32.c b/arch/x86/kernel/apic_32.c
434 +index 4b99b1b..c17fdb0 100644
435 +--- a/arch/x86/kernel/apic_32.c
436 ++++ b/arch/x86/kernel/apic_32.c
437 +@@ -552,8 +552,31 @@ void __init setup_boot_APIC_clock(void)
438 + setup_APIC_timer();
439 + }
440 +
441 +-void __devinit setup_secondary_APIC_clock(void)
442 ++/*
443 ++ * AMD C1E enabled CPUs have a real nasty problem: Some BIOSes set the
444 ++ * C1E flag only in the secondary CPU, so when we detect the wreckage
445 ++ * we already have enabled the boot CPU local apic timer. Check, if
446 ++ * disable_apic_timer is set and the DUMMY flag is cleared. If yes,
447 ++ * set the DUMMY flag again and force the broadcast mode in the
448 ++ * clockevents layer.
449 ++ */
450 ++static void __cpuinit check_boot_apic_timer_broadcast(void)
451 + {
452 ++ if (!local_apic_timer_disabled ||
453 ++ (lapic_clockevent.features & CLOCK_EVT_FEAT_DUMMY))
454 ++ return;
455 ++
456 ++ lapic_clockevent.features |= CLOCK_EVT_FEAT_DUMMY;
457 ++
458 ++ local_irq_enable();
459 ++ clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_FORCE,
460 ++ &boot_cpu_physical_apicid);
461 ++ local_irq_disable();
462 ++}
463 ++
464 ++void __cpuinit setup_secondary_APIC_clock(void)
465 ++{
466 ++ check_boot_apic_timer_broadcast();
467 + setup_APIC_timer();
468 + }
469 +
470 +@@ -1513,6 +1536,9 @@ void __cpuinit generic_processor_info(int apicid, int version)
471 + */
472 + cpu = 0;
473 +
474 ++ if (apicid > max_physical_apicid)
475 ++ max_physical_apicid = apicid;
476 ++
477 + /*
478 + * Would be preferable to switch to bigsmp when CONFIG_HOTPLUG_CPU=y
479 + * but we need to work other dependencies like SMP_SUSPEND etc
480 +@@ -1520,7 +1546,7 @@ void __cpuinit generic_processor_info(int apicid, int version)
481 + * if (CPU_HOTPLUG_ENABLED || num_processors > 8)
482 + * - Ashok Raj <ashok.raj@×××××.com>
483 + */
484 +- if (num_processors > 8) {
485 ++ if (max_physical_apicid >= 8) {
486 + switch (boot_cpu_data.x86_vendor) {
487 + case X86_VENDOR_INTEL:
488 + if (!APIC_XAPIC(version)) {
489 +diff --git a/arch/x86/kernel/apic_64.c b/arch/x86/kernel/apic_64.c
490 +index 0633cfd..8472bdf 100644
491 +--- a/arch/x86/kernel/apic_64.c
492 ++++ b/arch/x86/kernel/apic_64.c
493 +@@ -1090,6 +1090,9 @@ void __cpuinit generic_processor_info(int apicid, int version)
494 + */
495 + cpu = 0;
496 + }
497 ++ if (apicid > max_physical_apicid)
498 ++ max_physical_apicid = apicid;
499 ++
500 + /* are we being called early in kernel startup? */
501 + if (x86_cpu_to_apicid_early_ptr) {
502 + u16 *cpu_to_apicid = x86_cpu_to_apicid_early_ptr;
503 +diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
504 +index 170d2f5..912a84b 100644
505 +--- a/arch/x86/kernel/cpu/bugs.c
506 ++++ b/arch/x86/kernel/cpu/bugs.c
507 +@@ -50,6 +50,8 @@ static double __initdata y = 3145727.0;
508 + */
509 + static void __init check_fpu(void)
510 + {
511 ++ s32 fdiv_bug;
512 ++
513 + if (!boot_cpu_data.hard_math) {
514 + #ifndef CONFIG_MATH_EMULATION
515 + printk(KERN_EMERG "No coprocessor found and no math emulation present.\n");
516 +@@ -70,8 +72,10 @@ static void __init check_fpu(void)
517 + "fistpl %0\n\t"
518 + "fwait\n\t"
519 + "fninit"
520 +- : "=m" (*&boot_cpu_data.fdiv_bug)
521 ++ : "=m" (*&fdiv_bug)
522 + : "m" (*&x), "m" (*&y));
523 ++
524 ++ boot_cpu_data.fdiv_bug = fdiv_bug;
525 + if (boot_cpu_data.fdiv_bug)
526 + printk("Hmm, FPU with FDIV bug.\n");
527 + }
528 +diff --git a/arch/x86/kernel/e820_32.c b/arch/x86/kernel/e820_32.c
529 +index ed733e7..a540c4e 100644
530 +--- a/arch/x86/kernel/e820_32.c
531 ++++ b/arch/x86/kernel/e820_32.c
532 +@@ -697,7 +697,7 @@ static int __init parse_memmap(char *arg)
533 + if (!arg)
534 + return -EINVAL;
535 +
536 +- if (strcmp(arg, "exactmap") == 0) {
537 ++ if (strncmp(arg, "exactmap", 8) == 0) {
538 + #ifdef CONFIG_CRASH_DUMP
539 + /* If we are doing a crash dump, we
540 + * still need to know the real mem
541 +diff --git a/arch/x86/kernel/e820_64.c b/arch/x86/kernel/e820_64.c
542 +index 124480c..4da8e2b 100644
543 +--- a/arch/x86/kernel/e820_64.c
544 ++++ b/arch/x86/kernel/e820_64.c
545 +@@ -776,7 +776,7 @@ static int __init parse_memmap_opt(char *p)
546 + char *oldp;
547 + unsigned long long start_at, mem_size;
548 +
549 +- if (!strcmp(p, "exactmap")) {
550 ++ if (!strncmp(p, "exactmap", 8)) {
551 + #ifdef CONFIG_CRASH_DUMP
552 + /*
553 + * If we are doing a crash dump, we still need to know
554 +diff --git a/arch/x86/kernel/genapic_64.c b/arch/x86/kernel/genapic_64.c
555 +index cbaaf69..1fa8be5 100644
556 +--- a/arch/x86/kernel/genapic_64.c
557 ++++ b/arch/x86/kernel/genapic_64.c
558 +@@ -51,7 +51,7 @@ void __init setup_apic_routing(void)
559 + else
560 + #endif
561 +
562 +- if (num_possible_cpus() <= 8)
563 ++ if (max_physical_apicid < 8)
564 + genapic = &apic_flat;
565 + else
566 + genapic = &apic_physflat;
567 +diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c
568 +index e25c57b..d946c37 100644
569 +--- a/arch/x86/kernel/head64.c
570 ++++ b/arch/x86/kernel/head64.c
571 +@@ -135,6 +135,7 @@ void __init x86_64_start_kernel(char * real_mode_data)
572 + BUILD_BUG_ON(!(MODULES_VADDR > __START_KERNEL));
573 + BUILD_BUG_ON(!(((MODULES_END - 1) & PGDIR_MASK) ==
574 + (__START_KERNEL & PGDIR_MASK)));
575 ++ BUILD_BUG_ON(__fix_to_virt(__end_of_fixed_addresses) <= MODULES_END);
576 +
577 + /* clear bss before set_intr_gate with early_idt_handler */
578 + clear_bss();
579 +diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c
580 +index 9b5cfcd..0f3e379 100644
581 +--- a/arch/x86/kernel/hpet.c
582 ++++ b/arch/x86/kernel/hpet.c
583 +@@ -223,8 +223,8 @@ static void hpet_legacy_clockevent_register(void)
584 + /* Calculate the min / max delta */
585 + hpet_clockevent.max_delta_ns = clockevent_delta2ns(0x7FFFFFFF,
586 + &hpet_clockevent);
587 +- hpet_clockevent.min_delta_ns = clockevent_delta2ns(0x30,
588 +- &hpet_clockevent);
589 ++ /* 5 usec minimum reprogramming delta. */
590 ++ hpet_clockevent.min_delta_ns = 5000;
591 +
592 + /*
593 + * Start hpet with the boot cpu mask and make it
594 +@@ -283,15 +283,22 @@ static void hpet_legacy_set_mode(enum clock_event_mode mode,
595 + }
596 +
597 + static int hpet_legacy_next_event(unsigned long delta,
598 +- struct clock_event_device *evt)
599 ++ struct clock_event_device *evt)
600 + {
601 +- unsigned long cnt;
602 ++ u32 cnt;
603 +
604 + cnt = hpet_readl(HPET_COUNTER);
605 +- cnt += delta;
606 ++ cnt += (u32) delta;
607 + hpet_writel(cnt, HPET_T0_CMP);
608 +
609 +- return ((long)(hpet_readl(HPET_COUNTER) - cnt ) > 0) ? -ETIME : 0;
610 ++ /*
611 ++ * We need to read back the CMP register to make sure that
612 ++ * what we wrote hit the chip before we compare it to the
613 ++ * counter.
614 ++ */
615 ++ WARN_ON((u32)hpet_readl(HPET_T0_CMP) != cnt);
616 ++
617 ++ return (s32)((u32)hpet_readl(HPET_COUNTER) - cnt) >= 0 ? -ETIME : 0;
618 + }
619 +
620 + /*
621 +diff --git a/arch/x86/kernel/io_delay.c b/arch/x86/kernel/io_delay.c
622 +index 1c3a66a..720d260 100644
623 +--- a/arch/x86/kernel/io_delay.c
624 ++++ b/arch/x86/kernel/io_delay.c
625 +@@ -92,6 +92,14 @@ static struct dmi_system_id __initdata io_delay_0xed_port_dmi_table[] = {
626 + DMI_MATCH(DMI_BOARD_NAME, "30BF")
627 + }
628 + },
629 ++ {
630 ++ .callback = dmi_io_delay_0xed_port,
631 ++ .ident = "Presario F700",
632 ++ .matches = {
633 ++ DMI_MATCH(DMI_BOARD_VENDOR, "Quanta"),
634 ++ DMI_MATCH(DMI_BOARD_NAME, "30D3")
635 ++ }
636 ++ },
637 + { }
638 + };
639 +
640 +diff --git a/arch/x86/kernel/mpparse.c b/arch/x86/kernel/mpparse.c
641 +index 404683b..d5b8691 100644
642 +--- a/arch/x86/kernel/mpparse.c
643 ++++ b/arch/x86/kernel/mpparse.c
644 +@@ -402,6 +402,11 @@ static int __init smp_read_mpc(struct mp_config_table *mpc, unsigned early)
645 + ++mpc_record;
646 + #endif
647 + }
648 ++
649 ++#ifdef CONFIG_X86_GENERICARCH
650 ++ generic_bigsmp_probe();
651 ++#endif
652 ++
653 + setup_apic_routing();
654 + if (!num_processors)
655 + printk(KERN_ERR "MPTABLE: no processors registered!\n");
656 +diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
657 +index 6f80b85..03e357a 100644
658 +--- a/arch/x86/kernel/setup.c
659 ++++ b/arch/x86/kernel/setup.c
660 +@@ -17,6 +17,7 @@ unsigned int num_processors;
661 + unsigned disabled_cpus __cpuinitdata;
662 + /* Processor that is doing the boot up */
663 + unsigned int boot_cpu_physical_apicid = -1U;
664 ++unsigned int max_physical_apicid;
665 + EXPORT_SYMBOL(boot_cpu_physical_apicid);
666 +
667 + DEFINE_PER_CPU(u16, x86_cpu_to_apicid) = BAD_APICID;
668 +diff --git a/arch/x86/kernel/setup_32.c b/arch/x86/kernel/setup_32.c
669 +index 5a2f8e0..3bf22f0 100644
670 +--- a/arch/x86/kernel/setup_32.c
671 ++++ b/arch/x86/kernel/setup_32.c
672 +@@ -914,6 +914,12 @@ void __init setup_arch(char **cmdline_p)
673 +
674 + #ifdef CONFIG_ACPI
675 + acpi_boot_init();
676 ++#endif
677 ++
678 ++#ifdef CONFIG_X86_LOCAL_APIC
679 ++ if (smp_found_config)
680 ++ get_smp_config();
681 ++#endif
682 +
683 + #if defined(CONFIG_SMP) && defined(CONFIG_X86_PC)
684 + if (def_to_bigsmp)
685 +@@ -921,11 +927,6 @@ void __init setup_arch(char **cmdline_p)
686 + "CONFIG_X86_PC cannot handle it.\nUse "
687 + "CONFIG_X86_GENERICARCH or CONFIG_X86_BIGSMP.\n");
688 + #endif
689 +-#endif
690 +-#ifdef CONFIG_X86_LOCAL_APIC
691 +- if (smp_found_config)
692 +- get_smp_config();
693 +-#endif
694 +
695 + e820_register_memory();
696 + e820_mark_nosave_regions();
697 +diff --git a/arch/x86/kernel/signal_64.c b/arch/x86/kernel/signal_64.c
698 +index e53b267..c56034d 100644
699 +--- a/arch/x86/kernel/signal_64.c
700 ++++ b/arch/x86/kernel/signal_64.c
701 +@@ -53,6 +53,68 @@ sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss,
702 + return do_sigaltstack(uss, uoss, regs->sp);
703 + }
704 +
705 ++/*
706 ++ * Signal frame handlers.
707 ++ */
708 ++
709 ++static inline int save_i387(struct _fpstate __user *buf)
710 ++{
711 ++ struct task_struct *tsk = current;
712 ++ int err = 0;
713 ++
714 ++ BUILD_BUG_ON(sizeof(struct user_i387_struct) !=
715 ++ sizeof(tsk->thread.xstate->fxsave));
716 ++
717 ++ if ((unsigned long)buf % 16)
718 ++ printk("save_i387: bad fpstate %p\n", buf);
719 ++
720 ++ if (!used_math())
721 ++ return 0;
722 ++ clear_used_math(); /* trigger finit */
723 ++ if (task_thread_info(tsk)->status & TS_USEDFPU) {
724 ++ err = save_i387_checking((struct i387_fxsave_struct __user *)
725 ++ buf);
726 ++ if (err)
727 ++ return err;
728 ++ task_thread_info(tsk)->status &= ~TS_USEDFPU;
729 ++ stts();
730 ++ } else {
731 ++ if (__copy_to_user(buf, &tsk->thread.xstate->fxsave,
732 ++ sizeof(struct i387_fxsave_struct)))
733 ++ return -1;
734 ++ }
735 ++ return 1;
736 ++}
737 ++
738 ++/*
739 ++ * This restores directly out of user space. Exceptions are handled.
740 ++ */
741 ++static inline int restore_i387(struct _fpstate __user *buf)
742 ++{
743 ++ struct task_struct *tsk = current;
744 ++ int err;
745 ++
746 ++ if (!used_math()) {
747 ++ err = init_fpu(tsk);
748 ++ if (err)
749 ++ return err;
750 ++ }
751 ++
752 ++ if (!(task_thread_info(current)->status & TS_USEDFPU)) {
753 ++ clts();
754 ++ task_thread_info(current)->status |= TS_USEDFPU;
755 ++ }
756 ++ err = restore_fpu_checking((__force struct i387_fxsave_struct *)buf);
757 ++ if (unlikely(err)) {
758 ++ /*
759 ++ * Encountered an error while doing the restore from the
760 ++ * user buffer, clear the fpu state.
761 ++ */
762 ++ clear_fpu(tsk);
763 ++ clear_used_math();
764 ++ }
765 ++ return err;
766 ++}
767 +
768 + /*
769 + * Do a signal return; undo the signal stack.
770 +diff --git a/arch/x86/kernel/traps_64.c b/arch/x86/kernel/traps_64.c
771 +index adff76e..9e26f39 100644
772 +--- a/arch/x86/kernel/traps_64.c
773 ++++ b/arch/x86/kernel/traps_64.c
774 +@@ -1141,7 +1141,14 @@ asmlinkage void math_state_restore(void)
775 + }
776 +
777 + clts(); /* Allow maths ops (or we recurse) */
778 +- restore_fpu_checking(&me->thread.xstate->fxsave);
779 ++ /*
780 ++ * Paranoid restore. send a SIGSEGV if we fail to restore the state.
781 ++ */
782 ++ if (unlikely(restore_fpu_checking(&me->thread.xstate->fxsave))) {
783 ++ stts();
784 ++ force_sig(SIGSEGV, me);
785 ++ return;
786 ++ }
787 + task_thread_info(me)->status |= TS_USEDFPU;
788 + me->fpu_counter++;
789 + }
790 +diff --git a/arch/x86/kernel/vmi_32.c b/arch/x86/kernel/vmi_32.c
791 +index 956f389..9b3e795 100644
792 +--- a/arch/x86/kernel/vmi_32.c
793 ++++ b/arch/x86/kernel/vmi_32.c
794 +@@ -234,7 +234,7 @@ static void vmi_write_ldt_entry(struct desc_struct *dt, int entry,
795 + const void *desc)
796 + {
797 + u32 *ldt_entry = (u32 *)desc;
798 +- vmi_ops.write_idt_entry(dt, entry, ldt_entry[0], ldt_entry[1]);
799 ++ vmi_ops.write_ldt_entry(dt, entry, ldt_entry[0], ldt_entry[1]);
800 + }
801 +
802 + static void vmi_load_sp0(struct tss_struct *tss,
803 +diff --git a/arch/x86/kernel/vsmp_64.c b/arch/x86/kernel/vsmp_64.c
804 +index ba8c0b7..a3c9869 100644
805 +--- a/arch/x86/kernel/vsmp_64.c
806 ++++ b/arch/x86/kernel/vsmp_64.c
807 +@@ -58,7 +58,7 @@ static void vsmp_irq_enable(void)
808 + native_restore_fl((flags | X86_EFLAGS_IF) & (~X86_EFLAGS_AC));
809 + }
810 +
811 +-static unsigned __init vsmp_patch(u8 type, u16 clobbers, void *ibuf,
812 ++static unsigned __init_or_module vsmp_patch(u8 type, u16 clobbers, void *ibuf,
813 + unsigned long addr, unsigned len)
814 + {
815 + switch (type) {
816 +diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
817 +index 7d6071d..45e2280 100644
818 +--- a/arch/x86/kvm/svm.c
819 ++++ b/arch/x86/kvm/svm.c
820 +@@ -60,6 +60,7 @@ static int npt = 1;
821 + module_param(npt, int, S_IRUGO);
822 +
823 + static void kvm_reput_irq(struct vcpu_svm *svm);
824 ++static void svm_flush_tlb(struct kvm_vcpu *vcpu);
825 +
826 + static inline struct vcpu_svm *to_svm(struct kvm_vcpu *vcpu)
827 + {
828 +@@ -879,6 +880,10 @@ set:
829 + static void svm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
830 + {
831 + unsigned long host_cr4_mce = read_cr4() & X86_CR4_MCE;
832 ++ unsigned long old_cr4 = to_svm(vcpu)->vmcb->save.cr4;
833 ++
834 ++ if (npt_enabled && ((old_cr4 ^ cr4) & X86_CR4_PGE))
835 ++ force_new_asid(vcpu);
836 +
837 + vcpu->arch.cr4 = cr4;
838 + if (!npt_enabled)
839 +@@ -1017,6 +1022,15 @@ static int pf_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
840 +
841 + fault_address = svm->vmcb->control.exit_info_2;
842 + error_code = svm->vmcb->control.exit_info_1;
843 ++
844 ++ /*
845 ++ * FIXME: Tis shouldn't be necessary here, but there is a flush
846 ++ * missing in the MMU code. Until we find this bug, flush the
847 ++ * complete TLB here on an NPF
848 ++ */
849 ++ if (npt_enabled)
850 ++ svm_flush_tlb(&svm->vcpu);
851 ++
852 + if (event_injection)
853 + kvm_mmu_unprotect_page_virt(&svm->vcpu, fault_address);
854 + return kvm_mmu_page_fault(&svm->vcpu, fault_address, error_code);
855 +diff --git a/arch/x86/mach-generic/bigsmp.c b/arch/x86/mach-generic/bigsmp.c
856 +index 95fc463..2a24301 100644
857 +--- a/arch/x86/mach-generic/bigsmp.c
858 ++++ b/arch/x86/mach-generic/bigsmp.c
859 +@@ -48,7 +48,7 @@ static const struct dmi_system_id bigsmp_dmi_table[] = {
860 + static int probe_bigsmp(void)
861 + {
862 + if (def_to_bigsmp)
863 +- dmi_bigsmp = 1;
864 ++ dmi_bigsmp = 1;
865 + else
866 + dmi_check_system(bigsmp_dmi_table);
867 + return dmi_bigsmp;
868 +diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
869 +index 60bcb5b..b384297 100644
870 +--- a/arch/x86/mm/pageattr.c
871 ++++ b/arch/x86/mm/pageattr.c
872 +@@ -789,7 +789,7 @@ int set_memory_uc(unsigned long addr, int numpages)
873 + /*
874 + * for now UC MINUS. see comments in ioremap_nocache()
875 + */
876 +- if (reserve_memtype(addr, addr + numpages * PAGE_SIZE,
877 ++ if (reserve_memtype(__pa(addr), __pa(addr) + numpages * PAGE_SIZE,
878 + _PAGE_CACHE_UC_MINUS, NULL))
879 + return -EINVAL;
880 +
881 +@@ -808,7 +808,7 @@ int set_memory_wc(unsigned long addr, int numpages)
882 + if (!pat_wc_enabled)
883 + return set_memory_uc(addr, numpages);
884 +
885 +- if (reserve_memtype(addr, addr + numpages * PAGE_SIZE,
886 ++ if (reserve_memtype(__pa(addr), __pa(addr) + numpages * PAGE_SIZE,
887 + _PAGE_CACHE_WC, NULL))
888 + return -EINVAL;
889 +
890 +@@ -824,7 +824,7 @@ int _set_memory_wb(unsigned long addr, int numpages)
891 +
892 + int set_memory_wb(unsigned long addr, int numpages)
893 + {
894 +- free_memtype(addr, addr + numpages * PAGE_SIZE);
895 ++ free_memtype(__pa(addr), __pa(addr) + numpages * PAGE_SIZE);
896 +
897 + return _set_memory_wb(addr, numpages);
898 + }
899 +diff --git a/arch/x86/oprofile/nmi_int.c b/arch/x86/oprofile/nmi_int.c
900 +index cc48d3f..d38d5d0 100644
901 +--- a/arch/x86/oprofile/nmi_int.c
902 ++++ b/arch/x86/oprofile/nmi_int.c
903 +@@ -15,6 +15,7 @@
904 + #include <linux/slab.h>
905 + #include <linux/moduleparam.h>
906 + #include <linux/kdebug.h>
907 ++#include <linux/cpu.h>
908 + #include <asm/nmi.h>
909 + #include <asm/msr.h>
910 + #include <asm/apic.h>
911 +@@ -28,23 +29,48 @@ static DEFINE_PER_CPU(unsigned long, saved_lvtpc);
912 +
913 + static int nmi_start(void);
914 + static void nmi_stop(void);
915 ++static void nmi_cpu_start(void *dummy);
916 ++static void nmi_cpu_stop(void *dummy);
917 +
918 + /* 0 == registered but off, 1 == registered and on */
919 + static int nmi_enabled = 0;
920 +
921 ++#ifdef CONFIG_SMP
922 ++static int oprofile_cpu_notifier(struct notifier_block *b, unsigned long action,
923 ++ void *data)
924 ++{
925 ++ int cpu = (unsigned long)data;
926 ++ switch (action) {
927 ++ case CPU_DOWN_FAILED:
928 ++ case CPU_ONLINE:
929 ++ smp_call_function_single(cpu, nmi_cpu_start, NULL, 0, 0);
930 ++ break;
931 ++ case CPU_DOWN_PREPARE:
932 ++ smp_call_function_single(cpu, nmi_cpu_stop, NULL, 0, 1);
933 ++ break;
934 ++ }
935 ++ return NOTIFY_DONE;
936 ++}
937 ++
938 ++static struct notifier_block oprofile_cpu_nb = {
939 ++ .notifier_call = oprofile_cpu_notifier
940 ++};
941 ++#endif
942 ++
943 + #ifdef CONFIG_PM
944 +
945 + static int nmi_suspend(struct sys_device *dev, pm_message_t state)
946 + {
947 ++ /* Only one CPU left, just stop that one */
948 + if (nmi_enabled == 1)
949 +- nmi_stop();
950 ++ nmi_cpu_stop(NULL);
951 + return 0;
952 + }
953 +
954 + static int nmi_resume(struct sys_device *dev)
955 + {
956 + if (nmi_enabled == 1)
957 +- nmi_start();
958 ++ nmi_cpu_start(NULL);
959 + return 0;
960 + }
961 +
962 +@@ -448,6 +474,9 @@ int __init op_nmi_init(struct oprofile_operations *ops)
963 + }
964 +
965 + init_sysfs();
966 ++#ifdef CONFIG_SMP
967 ++ register_cpu_notifier(&oprofile_cpu_nb);
968 ++#endif
969 + using_nmi = 1;
970 + ops->create_files = nmi_create_files;
971 + ops->setup = nmi_setup;
972 +@@ -461,6 +490,10 @@ int __init op_nmi_init(struct oprofile_operations *ops)
973 +
974 + void op_nmi_exit(void)
975 + {
976 +- if (using_nmi)
977 ++ if (using_nmi) {
978 + exit_sysfs();
979 ++#ifdef CONFIG_SMP
980 ++ unregister_cpu_notifier(&oprofile_cpu_nb);
981 ++#endif
982 ++ }
983 + }
984 +diff --git a/crypto/async_tx/async_tx.c b/crypto/async_tx/async_tx.c
985 +index c6e772f..bfffb3d 100644
986 +--- a/crypto/async_tx/async_tx.c
987 ++++ b/crypto/async_tx/async_tx.c
988 +@@ -136,7 +136,8 @@ async_tx_run_dependencies(struct dma_async_tx_descriptor *tx)
989 + spin_lock_bh(&next->lock);
990 + next->parent = NULL;
991 + _next = next->next;
992 +- next->next = NULL;
993 ++ if (_next && _next->chan == chan)
994 ++ next->next = NULL;
995 + spin_unlock_bh(&next->lock);
996 +
997 + next->tx_submit(next);
998 +diff --git a/drivers/accessibility/braille/braille_console.c b/drivers/accessibility/braille/braille_console.c
999 +index 0a5f6b2..d672cfe 100644
1000 +--- a/drivers/accessibility/braille/braille_console.c
1001 ++++ b/drivers/accessibility/braille/braille_console.c
1002 +@@ -376,6 +376,8 @@ int braille_register_console(struct console *console, int index,
1003 + console->flags |= CON_ENABLED;
1004 + console->index = index;
1005 + braille_co = console;
1006 ++ register_keyboard_notifier(&keyboard_notifier_block);
1007 ++ register_vt_notifier(&vt_notifier_block);
1008 + return 0;
1009 + }
1010 +
1011 +@@ -383,15 +385,8 @@ int braille_unregister_console(struct console *console)
1012 + {
1013 + if (braille_co != console)
1014 + return -EINVAL;
1015 ++ unregister_keyboard_notifier(&keyboard_notifier_block);
1016 ++ unregister_vt_notifier(&vt_notifier_block);
1017 + braille_co = NULL;
1018 + return 0;
1019 + }
1020 +-
1021 +-static int __init braille_init(void)
1022 +-{
1023 +- register_keyboard_notifier(&keyboard_notifier_block);
1024 +- register_vt_notifier(&vt_notifier_block);
1025 +- return 0;
1026 +-}
1027 +-
1028 +-console_initcall(braille_init);
1029 +diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
1030 +index 5622aee..5670178 100644
1031 +--- a/drivers/acpi/ec.c
1032 ++++ b/drivers/acpi/ec.c
1033 +@@ -196,6 +196,8 @@ static int acpi_ec_wait(struct acpi_ec *ec, enum ec_event event, int force_poll)
1034 + return 0;
1035 + msleep(1);
1036 + }
1037 ++ if (acpi_ec_check_status(ec,event))
1038 ++ return 0;
1039 + }
1040 + pr_err(PREFIX "acpi_ec_wait timeout, status = 0x%2.2x, event = %s\n",
1041 + acpi_ec_read_status(ec),
1042 +diff --git a/drivers/acpi/processor_perflib.c b/drivers/acpi/processor_perflib.c
1043 +index 8c06a53..6f4a5e1 100644
1044 +--- a/drivers/acpi/processor_perflib.c
1045 ++++ b/drivers/acpi/processor_perflib.c
1046 +@@ -70,7 +70,7 @@ static DEFINE_MUTEX(performance_mutex);
1047 + * 0 -> cpufreq low level drivers initialized -> consider _PPC values
1048 + * 1 -> ignore _PPC totally -> forced by user through boot param
1049 + */
1050 +-static unsigned int ignore_ppc = -1;
1051 ++static int ignore_ppc = -1;
1052 + module_param(ignore_ppc, uint, 0644);
1053 + MODULE_PARM_DESC(ignore_ppc, "If the frequency of your machine gets wrongly" \
1054 + "limited by BIOS, this should help");
1055 +diff --git a/drivers/i2c/i2c-dev.c b/drivers/i2c/i2c-dev.c
1056 +index d34c14c..436c7e1 100644
1057 +--- a/drivers/i2c/i2c-dev.c
1058 ++++ b/drivers/i2c/i2c-dev.c
1059 +@@ -581,8 +581,10 @@ static int __init i2c_dev_init(void)
1060 + goto out;
1061 +
1062 + i2c_dev_class = class_create(THIS_MODULE, "i2c-dev");
1063 +- if (IS_ERR(i2c_dev_class))
1064 ++ if (IS_ERR(i2c_dev_class)) {
1065 ++ res = PTR_ERR(i2c_dev_class);
1066 + goto out_unreg_chrdev;
1067 ++ }
1068 +
1069 + res = i2c_add_driver(&i2cdev_driver);
1070 + if (res)
1071 +diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
1072 +index f9ad960..55a104d 100644
1073 +--- a/drivers/mmc/card/block.c
1074 ++++ b/drivers/mmc/card/block.c
1075 +@@ -103,8 +103,10 @@ static int mmc_blk_open(struct inode *inode, struct file *filp)
1076 + check_disk_change(inode->i_bdev);
1077 + ret = 0;
1078 +
1079 +- if ((filp->f_mode & FMODE_WRITE) && md->read_only)
1080 ++ if ((filp->f_mode & FMODE_WRITE) && md->read_only) {
1081 ++ mmc_blk_put(md);
1082 + ret = -EROFS;
1083 ++ }
1084 + }
1085 +
1086 + return ret;
1087 +diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c
1088 +index e248f80..6fbfaf0 100644
1089 +--- a/drivers/net/ixgbe/ixgbe_main.c
1090 ++++ b/drivers/net/ixgbe/ixgbe_main.c
1091 +@@ -2258,6 +2258,12 @@ static int __devinit ixgbe_set_interrupt_capability(struct ixgbe_adapter
1092 + int vector, v_budget;
1093 +
1094 + /*
1095 ++ * Set the default interrupt throttle rate.
1096 ++ */
1097 ++ adapter->rx_eitr = (1000000 / IXGBE_DEFAULT_ITR_RX_USECS);
1098 ++ adapter->tx_eitr = (1000000 / IXGBE_DEFAULT_ITR_TX_USECS);
1099 ++
1100 ++ /*
1101 + * It's easy to be greedy for MSI-X vectors, but it really
1102 + * doesn't do us much good if we have a lot more vectors
1103 + * than CPU's. So let's be conservative and only ask for
1104 +diff --git a/drivers/net/niu.c b/drivers/net/niu.c
1105 +index 918f802..78d90eb 100644
1106 +--- a/drivers/net/niu.c
1107 ++++ b/drivers/net/niu.c
1108 +@@ -5978,6 +5978,56 @@ static void niu_netif_start(struct niu *np)
1109 + niu_enable_interrupts(np, 1);
1110 + }
1111 +
1112 ++static void niu_reset_buffers(struct niu *np)
1113 ++{
1114 ++ int i, j, k, err;
1115 ++
1116 ++ if (np->rx_rings) {
1117 ++ for (i = 0; i < np->num_rx_rings; i++) {
1118 ++ struct rx_ring_info *rp = &np->rx_rings[i];
1119 ++
1120 ++ for (j = 0, k = 0; j < MAX_RBR_RING_SIZE; j++) {
1121 ++ struct page *page;
1122 ++
1123 ++ page = rp->rxhash[j];
1124 ++ while (page) {
1125 ++ struct page *next =
1126 ++ (struct page *) page->mapping;
1127 ++ u64 base = page->index;
1128 ++ base = base >> RBR_DESCR_ADDR_SHIFT;
1129 ++ rp->rbr[k++] = cpu_to_le32(base);
1130 ++ page = next;
1131 ++ }
1132 ++ }
1133 ++ for (; k < MAX_RBR_RING_SIZE; k++) {
1134 ++ err = niu_rbr_add_page(np, rp, GFP_ATOMIC, k);
1135 ++ if (unlikely(err))
1136 ++ break;
1137 ++ }
1138 ++
1139 ++ rp->rbr_index = rp->rbr_table_size - 1;
1140 ++ rp->rcr_index = 0;
1141 ++ rp->rbr_pending = 0;
1142 ++ rp->rbr_refill_pending = 0;
1143 ++ }
1144 ++ }
1145 ++ if (np->tx_rings) {
1146 ++ for (i = 0; i < np->num_tx_rings; i++) {
1147 ++ struct tx_ring_info *rp = &np->tx_rings[i];
1148 ++
1149 ++ for (j = 0; j < MAX_TX_RING_SIZE; j++) {
1150 ++ if (rp->tx_buffs[j].skb)
1151 ++ (void) release_tx_packet(np, rp, j);
1152 ++ }
1153 ++
1154 ++ rp->pending = MAX_TX_RING_SIZE;
1155 ++ rp->prod = 0;
1156 ++ rp->cons = 0;
1157 ++ rp->wrap_bit = 0;
1158 ++ }
1159 ++ }
1160 ++}
1161 ++
1162 + static void niu_reset_task(struct work_struct *work)
1163 + {
1164 + struct niu *np = container_of(work, struct niu, reset_task);
1165 +@@ -6000,6 +6050,12 @@ static void niu_reset_task(struct work_struct *work)
1166 +
1167 + niu_stop_hw(np);
1168 +
1169 ++ spin_unlock_irqrestore(&np->lock, flags);
1170 ++
1171 ++ niu_reset_buffers(np);
1172 ++
1173 ++ spin_lock_irqsave(&np->lock, flags);
1174 ++
1175 + err = niu_init_hw(np);
1176 + if (!err) {
1177 + np->timer.expires = jiffies + HZ;
1178 +diff --git a/drivers/net/wireless/rt2x00/rt2x00.h b/drivers/net/wireless/rt2x00/rt2x00.h
1179 +index b4bf1e0..10c92bd 100644
1180 +--- a/drivers/net/wireless/rt2x00/rt2x00.h
1181 ++++ b/drivers/net/wireless/rt2x00/rt2x00.h
1182 +@@ -820,8 +820,10 @@ struct rt2x00_dev {
1183 +
1184 + /*
1185 + * Scheduled work.
1186 ++ * NOTE: intf_work will use ieee80211_iterate_active_interfaces()
1187 ++ * which means it cannot be placed on the hw->workqueue
1188 ++ * due to RTNL locking requirements.
1189 + */
1190 +- struct workqueue_struct *workqueue;
1191 + struct work_struct intf_work;
1192 + struct work_struct filter_work;
1193 +
1194 +diff --git a/drivers/net/wireless/rt2x00/rt2x00dev.c b/drivers/net/wireless/rt2x00/rt2x00dev.c
1195 +index c997d4f..78fa714 100644
1196 +--- a/drivers/net/wireless/rt2x00/rt2x00dev.c
1197 ++++ b/drivers/net/wireless/rt2x00/rt2x00dev.c
1198 +@@ -75,7 +75,7 @@ static void rt2x00lib_start_link_tuner(struct rt2x00_dev *rt2x00dev)
1199 +
1200 + rt2x00lib_reset_link_tuner(rt2x00dev);
1201 +
1202 +- queue_delayed_work(rt2x00dev->workqueue,
1203 ++ queue_delayed_work(rt2x00dev->hw->workqueue,
1204 + &rt2x00dev->link.work, LINK_TUNE_INTERVAL);
1205 + }
1206 +
1207 +@@ -390,7 +390,7 @@ static void rt2x00lib_link_tuner(struct work_struct *work)
1208 + * Increase tuner counter, and reschedule the next link tuner run.
1209 + */
1210 + rt2x00dev->link.count++;
1211 +- queue_delayed_work(rt2x00dev->workqueue,
1212 ++ queue_delayed_work(rt2x00dev->hw->workqueue,
1213 + &rt2x00dev->link.work, LINK_TUNE_INTERVAL);
1214 + }
1215 +
1216 +@@ -488,7 +488,7 @@ void rt2x00lib_beacondone(struct rt2x00_dev *rt2x00dev)
1217 + rt2x00lib_beacondone_iter,
1218 + rt2x00dev);
1219 +
1220 +- queue_work(rt2x00dev->workqueue, &rt2x00dev->intf_work);
1221 ++ schedule_work(&rt2x00dev->intf_work);
1222 + }
1223 + EXPORT_SYMBOL_GPL(rt2x00lib_beacondone);
1224 +
1225 +@@ -1131,10 +1131,6 @@ int rt2x00lib_probe_dev(struct rt2x00_dev *rt2x00dev)
1226 + /*
1227 + * Initialize configuration work.
1228 + */
1229 +- rt2x00dev->workqueue = create_singlethread_workqueue("rt2x00lib");
1230 +- if (!rt2x00dev->workqueue)
1231 +- goto exit;
1232 +-
1233 + INIT_WORK(&rt2x00dev->intf_work, rt2x00lib_intf_scheduled);
1234 + INIT_WORK(&rt2x00dev->filter_work, rt2x00lib_packetfilter_scheduled);
1235 + INIT_DELAYED_WORK(&rt2x00dev->link.work, rt2x00lib_link_tuner);
1236 +@@ -1195,13 +1191,6 @@ void rt2x00lib_remove_dev(struct rt2x00_dev *rt2x00dev)
1237 + rt2x00leds_unregister(rt2x00dev);
1238 +
1239 + /*
1240 +- * Stop all queued work. Note that most tasks will already be halted
1241 +- * during rt2x00lib_disable_radio() and rt2x00lib_uninitialize().
1242 +- */
1243 +- flush_workqueue(rt2x00dev->workqueue);
1244 +- destroy_workqueue(rt2x00dev->workqueue);
1245 +-
1246 +- /*
1247 + * Free ieee80211_hw memory.
1248 + */
1249 + rt2x00lib_remove_hw(rt2x00dev);
1250 +diff --git a/drivers/net/wireless/rt2x00/rt2x00mac.c b/drivers/net/wireless/rt2x00/rt2x00mac.c
1251 +index 9cb023e..802ddba 100644
1252 +--- a/drivers/net/wireless/rt2x00/rt2x00mac.c
1253 ++++ b/drivers/net/wireless/rt2x00/rt2x00mac.c
1254 +@@ -428,7 +428,7 @@ void rt2x00mac_configure_filter(struct ieee80211_hw *hw,
1255 + if (!test_bit(DRIVER_REQUIRE_SCHEDULED, &rt2x00dev->flags))
1256 + rt2x00dev->ops->lib->config_filter(rt2x00dev, *total_flags);
1257 + else
1258 +- queue_work(rt2x00dev->workqueue, &rt2x00dev->filter_work);
1259 ++ queue_work(rt2x00dev->hw->workqueue, &rt2x00dev->filter_work);
1260 + }
1261 + EXPORT_SYMBOL_GPL(rt2x00mac_configure_filter);
1262 +
1263 +@@ -509,7 +509,7 @@ void rt2x00mac_bss_info_changed(struct ieee80211_hw *hw,
1264 + memcpy(&intf->conf, bss_conf, sizeof(*bss_conf));
1265 + if (delayed) {
1266 + intf->delayed_flags |= delayed;
1267 +- queue_work(rt2x00dev->workqueue, &rt2x00dev->intf_work);
1268 ++ schedule_work(&rt2x00dev->intf_work);
1269 + }
1270 + spin_unlock(&intf->lock);
1271 + }
1272 +diff --git a/drivers/pcmcia/ds.c b/drivers/pcmcia/ds.c
1273 +index e407754..7d82315 100644
1274 +--- a/drivers/pcmcia/ds.c
1275 ++++ b/drivers/pcmcia/ds.c
1276 +@@ -428,6 +428,18 @@ static int pcmcia_device_probe(struct device * dev)
1277 + p_drv = to_pcmcia_drv(dev->driver);
1278 + s = p_dev->socket;
1279 +
1280 ++ /* The PCMCIA code passes the match data in via dev->driver_data
1281 ++ * which is an ugly hack. Once the driver probe is called it may
1282 ++ * and often will overwrite the match data so we must save it first
1283 ++ *
1284 ++ * handle pseudo multifunction devices:
1285 ++ * there are at most two pseudo multifunction devices.
1286 ++ * if we're matching against the first, schedule a
1287 ++ * call which will then check whether there are two
1288 ++ * pseudo devices, and if not, add the second one.
1289 ++ */
1290 ++ did = p_dev->dev.driver_data;
1291 ++
1292 + ds_dbg(1, "trying to bind %s to %s\n", p_dev->dev.bus_id,
1293 + p_drv->drv.name);
1294 +
1295 +@@ -456,21 +468,14 @@ static int pcmcia_device_probe(struct device * dev)
1296 + goto put_module;
1297 + }
1298 +
1299 +- /* handle pseudo multifunction devices:
1300 +- * there are at most two pseudo multifunction devices.
1301 +- * if we're matching against the first, schedule a
1302 +- * call which will then check whether there are two
1303 +- * pseudo devices, and if not, add the second one.
1304 +- */
1305 +- did = p_dev->dev.driver_data;
1306 + if (did && (did->match_flags & PCMCIA_DEV_ID_MATCH_DEVICE_NO) &&
1307 + (p_dev->socket->device_count == 1) && (p_dev->device_no == 0))
1308 + pcmcia_add_device_later(p_dev->socket, 0);
1309 +
1310 +- put_module:
1311 ++put_module:
1312 + if (ret)
1313 + module_put(p_drv->owner);
1314 +- put_dev:
1315 ++put_dev:
1316 + if (ret)
1317 + put_device(dev);
1318 + return (ret);
1319 +diff --git a/drivers/rtc/rtc-dev.c b/drivers/rtc/rtc-dev.c
1320 +index 90dfa0d..846582b 100644
1321 +--- a/drivers/rtc/rtc-dev.c
1322 ++++ b/drivers/rtc/rtc-dev.c
1323 +@@ -401,6 +401,12 @@ static int rtc_dev_ioctl(struct inode *inode, struct file *file,
1324 + return err;
1325 + }
1326 +
1327 ++static int rtc_dev_fasync(int fd, struct file *file, int on)
1328 ++{
1329 ++ struct rtc_device *rtc = file->private_data;
1330 ++ return fasync_helper(fd, file, on, &rtc->async_queue);
1331 ++}
1332 ++
1333 + static int rtc_dev_release(struct inode *inode, struct file *file)
1334 + {
1335 + struct rtc_device *rtc = file->private_data;
1336 +@@ -411,16 +417,13 @@ static int rtc_dev_release(struct inode *inode, struct file *file)
1337 + if (rtc->ops->release)
1338 + rtc->ops->release(rtc->dev.parent);
1339 +
1340 ++ if (file->f_flags & FASYNC)
1341 ++ rtc_dev_fasync(-1, file, 0);
1342 ++
1343 + clear_bit_unlock(RTC_DEV_BUSY, &rtc->flags);
1344 + return 0;
1345 + }
1346 +
1347 +-static int rtc_dev_fasync(int fd, struct file *file, int on)
1348 +-{
1349 +- struct rtc_device *rtc = file->private_data;
1350 +- return fasync_helper(fd, file, on, &rtc->async_queue);
1351 +-}
1352 +-
1353 + static const struct file_operations rtc_dev_fops = {
1354 + .owner = THIS_MODULE,
1355 + .llseek = no_llseek,
1356 +diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
1357 +index ec63b79..d191cec 100644
1358 +--- a/drivers/scsi/qla2xxx/qla_isr.c
1359 ++++ b/drivers/scsi/qla2xxx/qla_isr.c
1360 +@@ -1838,7 +1838,6 @@ clear_risc_ints:
1361 + WRT_REG_WORD(&reg->isp.hccr, HCCR_CLR_HOST_INT);
1362 + }
1363 + spin_unlock_irq(&ha->hardware_lock);
1364 +- ha->isp_ops->enable_intrs(ha);
1365 +
1366 + fail:
1367 + return ret;
1368 +diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
1369 +index 047ee64..4c6b902 100644
1370 +--- a/drivers/scsi/qla2xxx/qla_os.c
1371 ++++ b/drivers/scsi/qla2xxx/qla_os.c
1372 +@@ -1740,6 +1740,8 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
1373 + if (ret)
1374 + goto probe_failed;
1375 +
1376 ++ ha->isp_ops->enable_intrs(ha);
1377 ++
1378 + scsi_scan_host(host);
1379 +
1380 + qla2x00_alloc_sysfs_attr(ha);
1381 +diff --git a/drivers/spi/pxa2xx_spi.c b/drivers/spi/pxa2xx_spi.c
1382 +index 0c452c4..2b7ba85 100644
1383 +--- a/drivers/spi/pxa2xx_spi.c
1384 ++++ b/drivers/spi/pxa2xx_spi.c
1385 +@@ -48,9 +48,10 @@ MODULE_ALIAS("platform:pxa2xx-spi");
1386 +
1387 + #define MAX_BUSES 3
1388 +
1389 +-#define DMA_INT_MASK (DCSR_ENDINTR | DCSR_STARTINTR | DCSR_BUSERR)
1390 +-#define RESET_DMA_CHANNEL (DCSR_NODESC | DMA_INT_MASK)
1391 +-#define IS_DMA_ALIGNED(x) (((u32)(x)&0x07)==0)
1392 ++#define DMA_INT_MASK (DCSR_ENDINTR | DCSR_STARTINTR | DCSR_BUSERR)
1393 ++#define RESET_DMA_CHANNEL (DCSR_NODESC | DMA_INT_MASK)
1394 ++#define IS_DMA_ALIGNED(x) ((((u32)(x)) & 0x07) == 0)
1395 ++#define MAX_DMA_LEN 8191
1396 +
1397 + /*
1398 + * for testing SSCR1 changes that require SSP restart, basically
1399 +@@ -145,7 +146,6 @@ struct driver_data {
1400 + size_t tx_map_len;
1401 + u8 n_bytes;
1402 + u32 dma_width;
1403 +- int cs_change;
1404 + int (*write)(struct driver_data *drv_data);
1405 + int (*read)(struct driver_data *drv_data);
1406 + irqreturn_t (*transfer_handler)(struct driver_data *drv_data);
1407 +@@ -407,8 +407,45 @@ static void giveback(struct driver_data *drv_data)
1408 + struct spi_transfer,
1409 + transfer_list);
1410 +
1411 ++ /* Delay if requested before any change in chip select */
1412 ++ if (last_transfer->delay_usecs)
1413 ++ udelay(last_transfer->delay_usecs);
1414 ++
1415 ++ /* Drop chip select UNLESS cs_change is true or we are returning
1416 ++ * a message with an error, or next message is for another chip
1417 ++ */
1418 + if (!last_transfer->cs_change)
1419 + drv_data->cs_control(PXA2XX_CS_DEASSERT);
1420 ++ else {
1421 ++ struct spi_message *next_msg;
1422 ++
1423 ++ /* Holding of cs was hinted, but we need to make sure
1424 ++ * the next message is for the same chip. Don't waste
1425 ++ * time with the following tests unless this was hinted.
1426 ++ *
1427 ++ * We cannot postpone this until pump_messages, because
1428 ++ * after calling msg->complete (below) the driver that
1429 ++ * sent the current message could be unloaded, which
1430 ++ * could invalidate the cs_control() callback...
1431 ++ */
1432 ++
1433 ++ /* get a pointer to the next message, if any */
1434 ++ spin_lock_irqsave(&drv_data->lock, flags);
1435 ++ if (list_empty(&drv_data->queue))
1436 ++ next_msg = NULL;
1437 ++ else
1438 ++ next_msg = list_entry(drv_data->queue.next,
1439 ++ struct spi_message, queue);
1440 ++ spin_unlock_irqrestore(&drv_data->lock, flags);
1441 ++
1442 ++ /* see if the next and current messages point
1443 ++ * to the same chip
1444 ++ */
1445 ++ if (next_msg && next_msg->spi != msg->spi)
1446 ++ next_msg = NULL;
1447 ++ if (!next_msg || msg->state == ERROR_STATE)
1448 ++ drv_data->cs_control(PXA2XX_CS_DEASSERT);
1449 ++ }
1450 +
1451 + msg->state = NULL;
1452 + if (msg->complete)
1453 +@@ -491,10 +528,9 @@ static void dma_transfer_complete(struct driver_data *drv_data)
1454 + msg->actual_length += drv_data->len -
1455 + (drv_data->rx_end - drv_data->rx);
1456 +
1457 +- /* Release chip select if requested, transfer delays are
1458 +- * handled in pump_transfers */
1459 +- if (drv_data->cs_change)
1460 +- drv_data->cs_control(PXA2XX_CS_DEASSERT);
1461 ++ /* Transfer delays and chip select release are
1462 ++ * handled in pump_transfers or giveback
1463 ++ */
1464 +
1465 + /* Move to next transfer */
1466 + msg->state = next_transfer(drv_data);
1467 +@@ -603,10 +639,9 @@ static void int_transfer_complete(struct driver_data *drv_data)
1468 + drv_data->cur_msg->actual_length += drv_data->len -
1469 + (drv_data->rx_end - drv_data->rx);
1470 +
1471 +- /* Release chip select if requested, transfer delays are
1472 +- * handled in pump_transfers */
1473 +- if (drv_data->cs_change)
1474 +- drv_data->cs_control(PXA2XX_CS_DEASSERT);
1475 ++ /* Transfer delays and chip select release are
1476 ++ * handled in pump_transfers or giveback
1477 ++ */
1478 +
1479 + /* Move to next transfer */
1480 + drv_data->cur_msg->state = next_transfer(drv_data);
1481 +@@ -841,23 +876,40 @@ static void pump_transfers(unsigned long data)
1482 + return;
1483 + }
1484 +
1485 +- /* Delay if requested at end of transfer*/
1486 ++ /* Delay if requested at end of transfer before CS change */
1487 + if (message->state == RUNNING_STATE) {
1488 + previous = list_entry(transfer->transfer_list.prev,
1489 + struct spi_transfer,
1490 + transfer_list);
1491 + if (previous->delay_usecs)
1492 + udelay(previous->delay_usecs);
1493 ++
1494 ++ /* Drop chip select only if cs_change is requested */
1495 ++ if (previous->cs_change)
1496 ++ drv_data->cs_control(PXA2XX_CS_DEASSERT);
1497 + }
1498 +
1499 +- /* Check transfer length */
1500 +- if (transfer->len > 8191)
1501 +- {
1502 +- dev_warn(&drv_data->pdev->dev, "pump_transfers: transfer "
1503 +- "length greater than 8191\n");
1504 +- message->status = -EINVAL;
1505 +- giveback(drv_data);
1506 +- return;
1507 ++ /* Check for transfers that need multiple DMA segments */
1508 ++ if (transfer->len > MAX_DMA_LEN && chip->enable_dma) {
1509 ++
1510 ++ /* reject already-mapped transfers; PIO won't always work */
1511 ++ if (message->is_dma_mapped
1512 ++ || transfer->rx_dma || transfer->tx_dma) {
1513 ++ dev_err(&drv_data->pdev->dev,
1514 ++ "pump_transfers: mapped transfer length "
1515 ++ "of %u is greater than %d\n",
1516 ++ transfer->len, MAX_DMA_LEN);
1517 ++ message->status = -EINVAL;
1518 ++ giveback(drv_data);
1519 ++ return;
1520 ++ }
1521 ++
1522 ++ /* warn ... we force this to PIO mode */
1523 ++ if (printk_ratelimit())
1524 ++ dev_warn(&message->spi->dev, "pump_transfers: "
1525 ++ "DMA disabled for transfer length %ld "
1526 ++ "greater than %d\n",
1527 ++ (long)drv_data->len, MAX_DMA_LEN);
1528 + }
1529 +
1530 + /* Setup the transfer state based on the type of transfer */
1531 +@@ -879,7 +931,6 @@ static void pump_transfers(unsigned long data)
1532 + drv_data->len = transfer->len & DCMD_LENGTH;
1533 + drv_data->write = drv_data->tx ? chip->write : null_writer;
1534 + drv_data->read = drv_data->rx ? chip->read : null_reader;
1535 +- drv_data->cs_change = transfer->cs_change;
1536 +
1537 + /* Change speed and bit per word on a per transfer */
1538 + cr0 = chip->cr0;
1539 +@@ -926,7 +977,7 @@ static void pump_transfers(unsigned long data)
1540 + &dma_thresh))
1541 + if (printk_ratelimit())
1542 + dev_warn(&message->spi->dev,
1543 +- "pump_transfer: "
1544 ++ "pump_transfers: "
1545 + "DMA burst size reduced to "
1546 + "match bits_per_word\n");
1547 + }
1548 +@@ -940,8 +991,23 @@ static void pump_transfers(unsigned long data)
1549 +
1550 + message->state = RUNNING_STATE;
1551 +
1552 +- /* Try to map dma buffer and do a dma transfer if successful */
1553 +- if ((drv_data->dma_mapped = map_dma_buffers(drv_data))) {
1554 ++ /* Try to map dma buffer and do a dma transfer if successful, but
1555 ++ * only if the length is non-zero and less than MAX_DMA_LEN.
1556 ++ *
1557 ++ * Zero-length non-descriptor DMA is illegal on PXA2xx; force use
1558 ++ * of PIO instead. Care is needed above because the transfer may
1559 ++ * have have been passed with buffers that are already dma mapped.
1560 ++ * A zero-length transfer in PIO mode will not try to write/read
1561 ++ * to/from the buffers
1562 ++ *
1563 ++ * REVISIT large transfers are exactly where we most want to be
1564 ++ * using DMA. If this happens much, split those transfers into
1565 ++ * multiple DMA segments rather than forcing PIO.
1566 ++ */
1567 ++ drv_data->dma_mapped = 0;
1568 ++ if (drv_data->len > 0 && drv_data->len <= MAX_DMA_LEN)
1569 ++ drv_data->dma_mapped = map_dma_buffers(drv_data);
1570 ++ if (drv_data->dma_mapped) {
1571 +
1572 + /* Ensure we have the correct interrupt handler */
1573 + drv_data->transfer_handler = dma_transfer;
1574 +diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
1575 +index 42a4364..7e6130a 100644
1576 +--- a/drivers/usb/core/hcd.c
1577 ++++ b/drivers/usb/core/hcd.c
1578 +@@ -1885,7 +1885,8 @@ int usb_add_hcd(struct usb_hcd *hcd,
1579 + * with IRQF_SHARED. As usb_hcd_irq() will always disable
1580 + * interrupts we can remove it here.
1581 + */
1582 +- irqflags &= ~IRQF_DISABLED;
1583 ++ if (irqflags & IRQF_SHARED)
1584 ++ irqflags &= ~IRQF_DISABLED;
1585 +
1586 + snprintf(hcd->irq_descr, sizeof(hcd->irq_descr), "%s:usb%d",
1587 + hcd->driver->description, hcd->self.busnum);
1588 +diff --git a/drivers/video/console/fbcon.h b/drivers/video/console/fbcon.h
1589 +index 0135e03..e3437c4 100644
1590 +--- a/drivers/video/console/fbcon.h
1591 ++++ b/drivers/video/console/fbcon.h
1592 +@@ -110,7 +110,7 @@ static inline int mono_col(const struct fb_info *info)
1593 + __u32 max_len;
1594 + max_len = max(info->var.green.length, info->var.red.length);
1595 + max_len = max(info->var.blue.length, max_len);
1596 +- return ~(0xfff << (max_len & 0xff));
1597 ++ return (~(0xfff << max_len)) & 0xff;
1598 + }
1599 +
1600 + static inline int attr_col_ec(int shift, struct vc_data *vc,
1601 +diff --git a/fs/buffer.c b/fs/buffer.c
1602 +index 0f51c0f..42d2104 100644
1603 +--- a/fs/buffer.c
1604 ++++ b/fs/buffer.c
1605 +@@ -2868,14 +2868,17 @@ int submit_bh(int rw, struct buffer_head * bh)
1606 + BUG_ON(!buffer_mapped(bh));
1607 + BUG_ON(!bh->b_end_io);
1608 +
1609 +- if (buffer_ordered(bh) && (rw == WRITE))
1610 +- rw = WRITE_BARRIER;
1611 ++ /*
1612 ++ * Mask in barrier bit for a write (could be either a WRITE or a
1613 ++ * WRITE_SYNC
1614 ++ */
1615 ++ if (buffer_ordered(bh) && (rw & WRITE))
1616 ++ rw |= WRITE_BARRIER;
1617 +
1618 + /*
1619 +- * Only clear out a write error when rewriting, should this
1620 +- * include WRITE_SYNC as well?
1621 ++ * Only clear out a write error when rewriting
1622 + */
1623 +- if (test_set_buffer_req(bh) && (rw == WRITE || rw == WRITE_BARRIER))
1624 ++ if (test_set_buffer_req(bh) && (rw & WRITE))
1625 + clear_buffer_write_io_error(bh);
1626 +
1627 + /*
1628 +diff --git a/fs/exec.c b/fs/exec.c
1629 +index fd92343..85e9948 100644
1630 +--- a/fs/exec.c
1631 ++++ b/fs/exec.c
1632 +@@ -740,11 +740,11 @@ static int exec_mmap(struct mm_struct *mm)
1633 + tsk->active_mm = mm;
1634 + activate_mm(active_mm, mm);
1635 + task_unlock(tsk);
1636 +- mm_update_next_owner(old_mm);
1637 + arch_pick_mmap_layout(mm);
1638 + if (old_mm) {
1639 + up_read(&old_mm->mmap_sem);
1640 + BUG_ON(active_mm != old_mm);
1641 ++ mm_update_next_owner(old_mm);
1642 + mmput(old_mm);
1643 + return 0;
1644 + }
1645 +diff --git a/fs/ocfs2/stackglue.c b/fs/ocfs2/stackglue.c
1646 +index 10e149a..07f348b 100644
1647 +--- a/fs/ocfs2/stackglue.c
1648 ++++ b/fs/ocfs2/stackglue.c
1649 +@@ -97,13 +97,14 @@ static int ocfs2_stack_driver_request(const char *stack_name,
1650 + goto out;
1651 + }
1652 +
1653 +- /* Ok, the stack is pinned */
1654 +- p->sp_count++;
1655 + active_stack = p;
1656 +-
1657 + rc = 0;
1658 +
1659 + out:
1660 ++ /* If we found it, pin it */
1661 ++ if (!rc)
1662 ++ active_stack->sp_count++;
1663 ++
1664 + spin_unlock(&ocfs2_stack_lock);
1665 + return rc;
1666 + }
1667 +diff --git a/fs/proc/array.c b/fs/proc/array.c
1668 +index 797d775..0b2a88c 100644
1669 +--- a/fs/proc/array.c
1670 ++++ b/fs/proc/array.c
1671 +@@ -332,65 +332,6 @@ int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
1672 + return 0;
1673 + }
1674 +
1675 +-/*
1676 +- * Use precise platform statistics if available:
1677 +- */
1678 +-#ifdef CONFIG_VIRT_CPU_ACCOUNTING
1679 +-static cputime_t task_utime(struct task_struct *p)
1680 +-{
1681 +- return p->utime;
1682 +-}
1683 +-
1684 +-static cputime_t task_stime(struct task_struct *p)
1685 +-{
1686 +- return p->stime;
1687 +-}
1688 +-#else
1689 +-static cputime_t task_utime(struct task_struct *p)
1690 +-{
1691 +- clock_t utime = cputime_to_clock_t(p->utime),
1692 +- total = utime + cputime_to_clock_t(p->stime);
1693 +- u64 temp;
1694 +-
1695 +- /*
1696 +- * Use CFS's precise accounting:
1697 +- */
1698 +- temp = (u64)nsec_to_clock_t(p->se.sum_exec_runtime);
1699 +-
1700 +- if (total) {
1701 +- temp *= utime;
1702 +- do_div(temp, total);
1703 +- }
1704 +- utime = (clock_t)temp;
1705 +-
1706 +- p->prev_utime = max(p->prev_utime, clock_t_to_cputime(utime));
1707 +- return p->prev_utime;
1708 +-}
1709 +-
1710 +-static cputime_t task_stime(struct task_struct *p)
1711 +-{
1712 +- clock_t stime;
1713 +-
1714 +- /*
1715 +- * Use CFS's precise accounting. (we subtract utime from
1716 +- * the total, to make sure the total observed by userspace
1717 +- * grows monotonically - apps rely on that):
1718 +- */
1719 +- stime = nsec_to_clock_t(p->se.sum_exec_runtime) -
1720 +- cputime_to_clock_t(task_utime(p));
1721 +-
1722 +- if (stime >= 0)
1723 +- p->prev_stime = max(p->prev_stime, clock_t_to_cputime(stime));
1724 +-
1725 +- return p->prev_stime;
1726 +-}
1727 +-#endif
1728 +-
1729 +-static cputime_t task_gtime(struct task_struct *p)
1730 +-{
1731 +- return p->gtime;
1732 +-}
1733 +-
1734 + static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
1735 + struct pid *pid, struct task_struct *task, int whole)
1736 + {
1737 +diff --git a/include/asm-generic/rtc.h b/include/asm-generic/rtc.h
1738 +index be4af00..71ef3f0 100644
1739 +--- a/include/asm-generic/rtc.h
1740 ++++ b/include/asm-generic/rtc.h
1741 +@@ -15,6 +15,7 @@
1742 + #include <linux/mc146818rtc.h>
1743 + #include <linux/rtc.h>
1744 + #include <linux/bcd.h>
1745 ++#include <linux/delay.h>
1746 +
1747 + #define RTC_PIE 0x40 /* periodic interrupt enable */
1748 + #define RTC_AIE 0x20 /* alarm interrupt enable */
1749 +@@ -43,7 +44,6 @@ static inline unsigned char rtc_is_updating(void)
1750 +
1751 + static inline unsigned int get_rtc_time(struct rtc_time *time)
1752 + {
1753 +- unsigned long uip_watchdog = jiffies;
1754 + unsigned char ctrl;
1755 + unsigned long flags;
1756 +
1757 +@@ -53,19 +53,15 @@ static inline unsigned int get_rtc_time(struct rtc_time *time)
1758 +
1759 + /*
1760 + * read RTC once any update in progress is done. The update
1761 +- * can take just over 2ms. We wait 10 to 20ms. There is no need to
1762 ++ * can take just over 2ms. We wait 20ms. There is no need to
1763 + * to poll-wait (up to 1s - eeccch) for the falling edge of RTC_UIP.
1764 + * If you need to know *exactly* when a second has started, enable
1765 + * periodic update complete interrupts, (via ioctl) and then
1766 + * immediately read /dev/rtc which will block until you get the IRQ.
1767 + * Once the read clears, read the RTC time (again via ioctl). Easy.
1768 + */
1769 +-
1770 +- if (rtc_is_updating() != 0)
1771 +- while (jiffies - uip_watchdog < 2*HZ/100) {
1772 +- barrier();
1773 +- cpu_relax();
1774 +- }
1775 ++ if (rtc_is_updating())
1776 ++ mdelay(20);
1777 +
1778 + /*
1779 + * Only the values that we read from the RTC are set. We leave
1780 +diff --git a/include/asm-x86/i387.h b/include/asm-x86/i387.h
1781 +index 4b683af..56d00e3 100644
1782 +--- a/include/asm-x86/i387.h
1783 ++++ b/include/asm-x86/i387.h
1784 +@@ -63,8 +63,6 @@ static inline int restore_fpu_checking(struct i387_fxsave_struct *fx)
1785 + #else
1786 + : [fx] "cdaSDb" (fx), "m" (*fx), "0" (0));
1787 + #endif
1788 +- if (unlikely(err))
1789 +- init_fpu(current);
1790 + return err;
1791 + }
1792 +
1793 +@@ -138,60 +136,6 @@ static inline void __save_init_fpu(struct task_struct *tsk)
1794 + task_thread_info(tsk)->status &= ~TS_USEDFPU;
1795 + }
1796 +
1797 +-/*
1798 +- * Signal frame handlers.
1799 +- */
1800 +-
1801 +-static inline int save_i387(struct _fpstate __user *buf)
1802 +-{
1803 +- struct task_struct *tsk = current;
1804 +- int err = 0;
1805 +-
1806 +- BUILD_BUG_ON(sizeof(struct user_i387_struct) !=
1807 +- sizeof(tsk->thread.xstate->fxsave));
1808 +-
1809 +- if ((unsigned long)buf % 16)
1810 +- printk("save_i387: bad fpstate %p\n", buf);
1811 +-
1812 +- if (!used_math())
1813 +- return 0;
1814 +- clear_used_math(); /* trigger finit */
1815 +- if (task_thread_info(tsk)->status & TS_USEDFPU) {
1816 +- err = save_i387_checking((struct i387_fxsave_struct __user *)
1817 +- buf);
1818 +- if (err)
1819 +- return err;
1820 +- task_thread_info(tsk)->status &= ~TS_USEDFPU;
1821 +- stts();
1822 +- } else {
1823 +- if (__copy_to_user(buf, &tsk->thread.xstate->fxsave,
1824 +- sizeof(struct i387_fxsave_struct)))
1825 +- return -1;
1826 +- }
1827 +- return 1;
1828 +-}
1829 +-
1830 +-/*
1831 +- * This restores directly out of user space. Exceptions are handled.
1832 +- */
1833 +-static inline int restore_i387(struct _fpstate __user *buf)
1834 +-{
1835 +- struct task_struct *tsk = current;
1836 +- int err;
1837 +-
1838 +- if (!used_math()) {
1839 +- err = init_fpu(tsk);
1840 +- if (err)
1841 +- return err;
1842 +- }
1843 +-
1844 +- if (!(task_thread_info(current)->status & TS_USEDFPU)) {
1845 +- clts();
1846 +- task_thread_info(current)->status |= TS_USEDFPU;
1847 +- }
1848 +- return restore_fpu_checking((__force struct i387_fxsave_struct *)buf);
1849 +-}
1850 +-
1851 + #else /* CONFIG_X86_32 */
1852 +
1853 + extern void finit(void);
1854 +diff --git a/include/asm-x86/mpspec.h b/include/asm-x86/mpspec.h
1855 +index 57a991b..4c75587 100644
1856 +--- a/include/asm-x86/mpspec.h
1857 ++++ b/include/asm-x86/mpspec.h
1858 +@@ -35,6 +35,7 @@ extern DECLARE_BITMAP(mp_bus_not_pci, MAX_MP_BUSSES);
1859 + extern int mp_bus_id_to_pci_bus[MAX_MP_BUSSES];
1860 +
1861 + extern unsigned int boot_cpu_physical_apicid;
1862 ++extern unsigned int max_physical_apicid;
1863 + extern int smp_found_config;
1864 + extern int mpc_default_type;
1865 + extern unsigned long mp_lapic_addr;
1866 +diff --git a/include/asm-x86/pgtable_64.h b/include/asm-x86/pgtable_64.h
1867 +index 1cc50d2..3922eca 100644
1868 +--- a/include/asm-x86/pgtable_64.h
1869 ++++ b/include/asm-x86/pgtable_64.h
1870 +@@ -146,7 +146,7 @@ static inline void native_pgd_clear(pgd_t *pgd)
1871 + #define VMALLOC_END _AC(0xffffe1ffffffffff, UL)
1872 + #define VMEMMAP_START _AC(0xffffe20000000000, UL)
1873 + #define MODULES_VADDR _AC(0xffffffffa0000000, UL)
1874 +-#define MODULES_END _AC(0xfffffffffff00000, UL)
1875 ++#define MODULES_END _AC(0xffffffffff000000, UL)
1876 + #define MODULES_LEN (MODULES_END - MODULES_VADDR)
1877 +
1878 + #ifndef __ASSEMBLY__
1879 +diff --git a/include/linux/clockchips.h b/include/linux/clockchips.h
1880 +index c33b0dc..ed3a5d4 100644
1881 +--- a/include/linux/clockchips.h
1882 ++++ b/include/linux/clockchips.h
1883 +@@ -127,6 +127,8 @@ extern int clockevents_register_notifier(struct notifier_block *nb);
1884 + extern int clockevents_program_event(struct clock_event_device *dev,
1885 + ktime_t expires, ktime_t now);
1886 +
1887 ++extern void clockevents_handle_noop(struct clock_event_device *dev);
1888 ++
1889 + #ifdef CONFIG_GENERIC_CLOCKEVENTS
1890 + extern void clockevents_notify(unsigned long reason, void *arg);
1891 + #else
1892 +diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
1893 +index 443bc7c..428328a 100644
1894 +--- a/include/linux/mmzone.h
1895 ++++ b/include/linux/mmzone.h
1896 +@@ -751,8 +751,9 @@ static inline int zonelist_node_idx(struct zoneref *zoneref)
1897 + *
1898 + * This function returns the next zone at or below a given zone index that is
1899 + * within the allowed nodemask using a cursor as the starting point for the
1900 +- * search. The zoneref returned is a cursor that is used as the next starting
1901 +- * point for future calls to next_zones_zonelist().
1902 ++ * search. The zoneref returned is a cursor that represents the current zone
1903 ++ * being examined. It should be advanced by one before calling
1904 ++ * next_zones_zonelist again.
1905 + */
1906 + struct zoneref *next_zones_zonelist(struct zoneref *z,
1907 + enum zone_type highest_zoneidx,
1908 +@@ -768,9 +769,8 @@ struct zoneref *next_zones_zonelist(struct zoneref *z,
1909 + *
1910 + * This function returns the first zone at or below a given zone index that is
1911 + * within the allowed nodemask. The zoneref returned is a cursor that can be
1912 +- * used to iterate the zonelist with next_zones_zonelist. The cursor should
1913 +- * not be used by the caller as it does not match the value of the zone
1914 +- * returned.
1915 ++ * used to iterate the zonelist with next_zones_zonelist by advancing it by
1916 ++ * one before calling.
1917 + */
1918 + static inline struct zoneref *first_zones_zonelist(struct zonelist *zonelist,
1919 + enum zone_type highest_zoneidx,
1920 +@@ -795,7 +795,7 @@ static inline struct zoneref *first_zones_zonelist(struct zonelist *zonelist,
1921 + #define for_each_zone_zonelist_nodemask(zone, z, zlist, highidx, nodemask) \
1922 + for (z = first_zones_zonelist(zlist, highidx, nodemask, &zone); \
1923 + zone; \
1924 +- z = next_zones_zonelist(z, highidx, nodemask, &zone)) \
1925 ++ z = next_zones_zonelist(++z, highidx, nodemask, &zone)) \
1926 +
1927 + /**
1928 + * for_each_zone_zonelist - helper macro to iterate over valid zones in a zonelist at or below a given zone index
1929 +diff --git a/include/linux/rmap.h b/include/linux/rmap.h
1930 +index 1383692..0e889fa 100644
1931 +--- a/include/linux/rmap.h
1932 ++++ b/include/linux/rmap.h
1933 +@@ -94,7 +94,7 @@ int try_to_unmap(struct page *, int ignore_refs);
1934 + * Called from mm/filemap_xip.c to unmap empty zero page
1935 + */
1936 + pte_t *page_check_address(struct page *, struct mm_struct *,
1937 +- unsigned long, spinlock_t **);
1938 ++ unsigned long, spinlock_t **, int);
1939 +
1940 + /*
1941 + * Used by swapoff to help locate where page is expected in vma.
1942 +diff --git a/include/linux/sched.h b/include/linux/sched.h
1943 +index c5d3f84..2103c73 100644
1944 +--- a/include/linux/sched.h
1945 ++++ b/include/linux/sched.h
1946 +@@ -1477,6 +1477,10 @@ static inline void put_task_struct(struct task_struct *t)
1947 + __put_task_struct(t);
1948 + }
1949 +
1950 ++extern cputime_t task_utime(struct task_struct *p);
1951 ++extern cputime_t task_stime(struct task_struct *p);
1952 ++extern cputime_t task_gtime(struct task_struct *p);
1953 ++
1954 + /*
1955 + * Per process flags
1956 + */
1957 +diff --git a/include/linux/smb.h b/include/linux/smb.h
1958 +index caa43b2..82fefdd 100644
1959 +--- a/include/linux/smb.h
1960 ++++ b/include/linux/smb.h
1961 +@@ -11,7 +11,9 @@
1962 +
1963 + #include <linux/types.h>
1964 + #include <linux/magic.h>
1965 ++#ifdef __KERNEL__
1966 + #include <linux/time.h>
1967 ++#endif
1968 +
1969 + enum smb_protocol {
1970 + SMB_PROTOCOL_NONE,
1971 +diff --git a/include/net/netlink.h b/include/net/netlink.h
1972 +index dfc3701..6a5fdd8 100644
1973 +--- a/include/net/netlink.h
1974 ++++ b/include/net/netlink.h
1975 +@@ -702,7 +702,7 @@ static inline int nla_len(const struct nlattr *nla)
1976 + */
1977 + static inline int nla_ok(const struct nlattr *nla, int remaining)
1978 + {
1979 +- return remaining >= sizeof(*nla) &&
1980 ++ return remaining >= (int) sizeof(*nla) &&
1981 + nla->nla_len >= sizeof(*nla) &&
1982 + nla->nla_len <= remaining;
1983 + }
1984 +diff --git a/kernel/cgroup.c b/kernel/cgroup.c
1985 +index 15ac0e1..d53caaa 100644
1986 +--- a/kernel/cgroup.c
1987 ++++ b/kernel/cgroup.c
1988 +@@ -2761,14 +2761,15 @@ void cgroup_fork_callbacks(struct task_struct *child)
1989 + */
1990 + void cgroup_mm_owner_callbacks(struct task_struct *old, struct task_struct *new)
1991 + {
1992 +- struct cgroup *oldcgrp, *newcgrp;
1993 ++ struct cgroup *oldcgrp, *newcgrp = NULL;
1994 +
1995 + if (need_mm_owner_callback) {
1996 + int i;
1997 + for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) {
1998 + struct cgroup_subsys *ss = subsys[i];
1999 + oldcgrp = task_cgroup(old, ss->subsys_id);
2000 +- newcgrp = task_cgroup(new, ss->subsys_id);
2001 ++ if (new)
2002 ++ newcgrp = task_cgroup(new, ss->subsys_id);
2003 + if (oldcgrp == newcgrp)
2004 + continue;
2005 + if (ss->mm_owner_changed)
2006 +diff --git a/kernel/exit.c b/kernel/exit.c
2007 +index 8f6185e..f68b081 100644
2008 +--- a/kernel/exit.c
2009 ++++ b/kernel/exit.c
2010 +@@ -111,9 +111,9 @@ static void __exit_signal(struct task_struct *tsk)
2011 + * We won't ever get here for the group leader, since it
2012 + * will have been the last reference on the signal_struct.
2013 + */
2014 +- sig->utime = cputime_add(sig->utime, tsk->utime);
2015 +- sig->stime = cputime_add(sig->stime, tsk->stime);
2016 +- sig->gtime = cputime_add(sig->gtime, tsk->gtime);
2017 ++ sig->utime = cputime_add(sig->utime, task_utime(tsk));
2018 ++ sig->stime = cputime_add(sig->stime, task_stime(tsk));
2019 ++ sig->gtime = cputime_add(sig->gtime, task_gtime(tsk));
2020 + sig->min_flt += tsk->min_flt;
2021 + sig->maj_flt += tsk->maj_flt;
2022 + sig->nvcsw += tsk->nvcsw;
2023 +@@ -577,8 +577,6 @@ mm_need_new_owner(struct mm_struct *mm, struct task_struct *p)
2024 + * If there are other users of the mm and the owner (us) is exiting
2025 + * we need to find a new owner to take on the responsibility.
2026 + */
2027 +- if (!mm)
2028 +- return 0;
2029 + if (atomic_read(&mm->mm_users) <= 1)
2030 + return 0;
2031 + if (mm->owner != p)
2032 +@@ -621,6 +619,16 @@ retry:
2033 + } while_each_thread(g, c);
2034 +
2035 + read_unlock(&tasklist_lock);
2036 ++ /*
2037 ++ * We found no owner yet mm_users > 1: this implies that we are
2038 ++ * most likely racing with swapoff (try_to_unuse()) or /proc or
2039 ++ * ptrace or page migration (get_task_mm()). Mark owner as NULL,
2040 ++ * so that subsystems can understand the callback and take action.
2041 ++ */
2042 ++ down_write(&mm->mmap_sem);
2043 ++ cgroup_mm_owner_callbacks(mm->owner, NULL);
2044 ++ mm->owner = NULL;
2045 ++ up_write(&mm->mmap_sem);
2046 + return;
2047 +
2048 + assign_new_owner:
2049 +diff --git a/kernel/sched.c b/kernel/sched.c
2050 +index 4e2f603..0a50ee4 100644
2051 +--- a/kernel/sched.c
2052 ++++ b/kernel/sched.c
2053 +@@ -3995,6 +3995,65 @@ void account_steal_time(struct task_struct *p, cputime_t steal)
2054 + }
2055 +
2056 + /*
2057 ++ * Use precise platform statistics if available:
2058 ++ */
2059 ++#ifdef CONFIG_VIRT_CPU_ACCOUNTING
2060 ++cputime_t task_utime(struct task_struct *p)
2061 ++{
2062 ++ return p->utime;
2063 ++}
2064 ++
2065 ++cputime_t task_stime(struct task_struct *p)
2066 ++{
2067 ++ return p->stime;
2068 ++}
2069 ++#else
2070 ++cputime_t task_utime(struct task_struct *p)
2071 ++{
2072 ++ clock_t utime = cputime_to_clock_t(p->utime),
2073 ++ total = utime + cputime_to_clock_t(p->stime);
2074 ++ u64 temp;
2075 ++
2076 ++ /*
2077 ++ * Use CFS's precise accounting:
2078 ++ */
2079 ++ temp = (u64)nsec_to_clock_t(p->se.sum_exec_runtime);
2080 ++
2081 ++ if (total) {
2082 ++ temp *= utime;
2083 ++ do_div(temp, total);
2084 ++ }
2085 ++ utime = (clock_t)temp;
2086 ++
2087 ++ p->prev_utime = max(p->prev_utime, clock_t_to_cputime(utime));
2088 ++ return p->prev_utime;
2089 ++}
2090 ++
2091 ++cputime_t task_stime(struct task_struct *p)
2092 ++{
2093 ++ clock_t stime;
2094 ++
2095 ++ /*
2096 ++ * Use CFS's precise accounting. (we subtract utime from
2097 ++ * the total, to make sure the total observed by userspace
2098 ++ * grows monotonically - apps rely on that):
2099 ++ */
2100 ++ stime = nsec_to_clock_t(p->se.sum_exec_runtime) -
2101 ++ cputime_to_clock_t(task_utime(p));
2102 ++
2103 ++ if (stime >= 0)
2104 ++ p->prev_stime = max(p->prev_stime, clock_t_to_cputime(stime));
2105 ++
2106 ++ return p->prev_stime;
2107 ++}
2108 ++#endif
2109 ++
2110 ++inline cputime_t task_gtime(struct task_struct *p)
2111 ++{
2112 ++ return p->gtime;
2113 ++}
2114 ++
2115 ++/*
2116 + * This function gets called by the timer code, with HZ frequency.
2117 + * We call it with interrupts disabled.
2118 + *
2119 +diff --git a/kernel/time/clockevents.c b/kernel/time/clockevents.c
2120 +index 3d1e3e1..1876b52 100644
2121 +--- a/kernel/time/clockevents.c
2122 ++++ b/kernel/time/clockevents.c
2123 +@@ -177,7 +177,7 @@ void clockevents_register_device(struct clock_event_device *dev)
2124 + /*
2125 + * Noop handler when we shut down an event device
2126 + */
2127 +-static void clockevents_handle_noop(struct clock_event_device *dev)
2128 ++void clockevents_handle_noop(struct clock_event_device *dev)
2129 + {
2130 + }
2131 +
2132 +@@ -199,7 +199,6 @@ void clockevents_exchange_device(struct clock_event_device *old,
2133 + * released list and do a notify add later.
2134 + */
2135 + if (old) {
2136 +- old->event_handler = clockevents_handle_noop;
2137 + clockevents_set_mode(old, CLOCK_EVT_MODE_UNUSED);
2138 + list_del(&old->list);
2139 + list_add(&old->list, &clockevents_released);
2140 +diff --git a/kernel/time/ntp.c b/kernel/time/ntp.c
2141 +index 5125ddd..1ad46f3 100644
2142 +--- a/kernel/time/ntp.c
2143 ++++ b/kernel/time/ntp.c
2144 +@@ -245,7 +245,7 @@ static void sync_cmos_clock(unsigned long dummy)
2145 + if (abs(now.tv_nsec - (NSEC_PER_SEC / 2)) <= tick_nsec / 2)
2146 + fail = update_persistent_clock(now);
2147 +
2148 +- next.tv_nsec = (NSEC_PER_SEC / 2) - now.tv_nsec;
2149 ++ next.tv_nsec = (NSEC_PER_SEC / 2) - now.tv_nsec - (TICK_NSEC / 2);
2150 + if (next.tv_nsec <= 0)
2151 + next.tv_nsec += NSEC_PER_SEC;
2152 +
2153 +diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
2154 +index 57a1f02..e20a365 100644
2155 +--- a/kernel/time/tick-broadcast.c
2156 ++++ b/kernel/time/tick-broadcast.c
2157 +@@ -174,6 +174,8 @@ static void tick_do_periodic_broadcast(void)
2158 + */
2159 + static void tick_handle_periodic_broadcast(struct clock_event_device *dev)
2160 + {
2161 ++ ktime_t next;
2162 ++
2163 + tick_do_periodic_broadcast();
2164 +
2165 + /*
2166 +@@ -184,10 +186,13 @@ static void tick_handle_periodic_broadcast(struct clock_event_device *dev)
2167 +
2168 + /*
2169 + * Setup the next period for devices, which do not have
2170 +- * periodic mode:
2171 ++ * periodic mode. We read dev->next_event first and add to it
2172 ++ * when the event alrady expired. clockevents_program_event()
2173 ++ * sets dev->next_event only when the event is really
2174 ++ * programmed to the device.
2175 + */
2176 +- for (;;) {
2177 +- ktime_t next = ktime_add(dev->next_event, tick_period);
2178 ++ for (next = dev->next_event; ;) {
2179 ++ next = ktime_add(next, tick_period);
2180 +
2181 + if (!clockevents_program_event(dev, next, ktime_get()))
2182 + return;
2183 +@@ -204,7 +209,7 @@ static void tick_do_broadcast_on_off(void *why)
2184 + struct clock_event_device *bc, *dev;
2185 + struct tick_device *td;
2186 + unsigned long flags, *reason = why;
2187 +- int cpu;
2188 ++ int cpu, bc_stopped;
2189 +
2190 + spin_lock_irqsave(&tick_broadcast_lock, flags);
2191 +
2192 +@@ -222,6 +227,8 @@ static void tick_do_broadcast_on_off(void *why)
2193 + if (!tick_device_is_functional(dev))
2194 + goto out;
2195 +
2196 ++ bc_stopped = cpus_empty(tick_broadcast_mask);
2197 ++
2198 + switch (*reason) {
2199 + case CLOCK_EVT_NOTIFY_BROADCAST_ON:
2200 + case CLOCK_EVT_NOTIFY_BROADCAST_FORCE:
2201 +@@ -243,9 +250,10 @@ static void tick_do_broadcast_on_off(void *why)
2202 + break;
2203 + }
2204 +
2205 +- if (cpus_empty(tick_broadcast_mask))
2206 +- clockevents_set_mode(bc, CLOCK_EVT_MODE_SHUTDOWN);
2207 +- else {
2208 ++ if (cpus_empty(tick_broadcast_mask)) {
2209 ++ if (!bc_stopped)
2210 ++ clockevents_set_mode(bc, CLOCK_EVT_MODE_SHUTDOWN);
2211 ++ } else if (bc_stopped) {
2212 + if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC)
2213 + tick_broadcast_start_periodic(bc);
2214 + else
2215 +@@ -362,16 +370,8 @@ cpumask_t *tick_get_broadcast_oneshot_mask(void)
2216 + static int tick_broadcast_set_event(ktime_t expires, int force)
2217 + {
2218 + struct clock_event_device *bc = tick_broadcast_device.evtdev;
2219 +- ktime_t now = ktime_get();
2220 +- int res;
2221 +-
2222 +- for(;;) {
2223 +- res = clockevents_program_event(bc, expires, now);
2224 +- if (!res || !force)
2225 +- return res;
2226 +- now = ktime_get();
2227 +- expires = ktime_add(now, ktime_set(0, bc->min_delta_ns));
2228 +- }
2229 ++
2230 ++ return tick_dev_program_event(bc, expires, force);
2231 + }
2232 +
2233 + int tick_resume_broadcast_oneshot(struct clock_event_device *bc)
2234 +@@ -490,14 +490,52 @@ static void tick_broadcast_clear_oneshot(int cpu)
2235 + cpu_clear(cpu, tick_broadcast_oneshot_mask);
2236 + }
2237 +
2238 ++static void tick_broadcast_init_next_event(cpumask_t *mask, ktime_t expires)
2239 ++{
2240 ++ struct tick_device *td;
2241 ++ int cpu;
2242 ++
2243 ++ for_each_cpu_mask_nr(cpu, *mask) {
2244 ++ td = &per_cpu(tick_cpu_device, cpu);
2245 ++ if (td->evtdev)
2246 ++ td->evtdev->next_event = expires;
2247 ++ }
2248 ++}
2249 ++
2250 + /**
2251 + * tick_broadcast_setup_oneshot - setup the broadcast device
2252 + */
2253 + void tick_broadcast_setup_oneshot(struct clock_event_device *bc)
2254 + {
2255 +- bc->event_handler = tick_handle_oneshot_broadcast;
2256 +- clockevents_set_mode(bc, CLOCK_EVT_MODE_ONESHOT);
2257 +- bc->next_event.tv64 = KTIME_MAX;
2258 ++ /* Set it up only once ! */
2259 ++ if (bc->event_handler != tick_handle_oneshot_broadcast) {
2260 ++ int was_periodic = bc->mode == CLOCK_EVT_MODE_PERIODIC;
2261 ++ int cpu = smp_processor_id();
2262 ++ cpumask_t mask;
2263 ++
2264 ++ bc->event_handler = tick_handle_oneshot_broadcast;
2265 ++ clockevents_set_mode(bc, CLOCK_EVT_MODE_ONESHOT);
2266 ++
2267 ++ /* Take the do_timer update */
2268 ++ tick_do_timer_cpu = cpu;
2269 ++
2270 ++ /*
2271 ++ * We must be careful here. There might be other CPUs
2272 ++ * waiting for periodic broadcast. We need to set the
2273 ++ * oneshot_mask bits for those and program the
2274 ++ * broadcast device to fire.
2275 ++ */
2276 ++ mask = tick_broadcast_mask;
2277 ++ cpu_clear(cpu, mask);
2278 ++ cpus_or(tick_broadcast_oneshot_mask,
2279 ++ tick_broadcast_oneshot_mask, mask);
2280 ++
2281 ++ if (was_periodic && !cpus_empty(mask)) {
2282 ++ tick_broadcast_init_next_event(&mask, tick_next_period);
2283 ++ tick_broadcast_set_event(tick_next_period, 1);
2284 ++ } else
2285 ++ bc->next_event.tv64 = KTIME_MAX;
2286 ++ }
2287 + }
2288 +
2289 + /*
2290 +diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c
2291 +index 4f38865..5471cba 100644
2292 +--- a/kernel/time/tick-common.c
2293 ++++ b/kernel/time/tick-common.c
2294 +@@ -161,6 +161,7 @@ static void tick_setup_device(struct tick_device *td,
2295 + } else {
2296 + handler = td->evtdev->event_handler;
2297 + next_event = td->evtdev->next_event;
2298 ++ td->evtdev->event_handler = clockevents_handle_noop;
2299 + }
2300 +
2301 + td->evtdev = newdev;
2302 +diff --git a/kernel/time/tick-internal.h b/kernel/time/tick-internal.h
2303 +index f13f2b7..0ffc291 100644
2304 +--- a/kernel/time/tick-internal.h
2305 ++++ b/kernel/time/tick-internal.h
2306 +@@ -17,6 +17,8 @@ extern void tick_handle_periodic(struct clock_event_device *dev);
2307 + extern void tick_setup_oneshot(struct clock_event_device *newdev,
2308 + void (*handler)(struct clock_event_device *),
2309 + ktime_t nextevt);
2310 ++extern int tick_dev_program_event(struct clock_event_device *dev,
2311 ++ ktime_t expires, int force);
2312 + extern int tick_program_event(ktime_t expires, int force);
2313 + extern void tick_oneshot_notify(void);
2314 + extern int tick_switch_to_oneshot(void (*handler)(struct clock_event_device *));
2315 +diff --git a/kernel/time/tick-oneshot.c b/kernel/time/tick-oneshot.c
2316 +index 450c049..2e8de67 100644
2317 +--- a/kernel/time/tick-oneshot.c
2318 ++++ b/kernel/time/tick-oneshot.c
2319 +@@ -23,24 +23,56 @@
2320 + #include "tick-internal.h"
2321 +
2322 + /**
2323 +- * tick_program_event
2324 ++ * tick_program_event internal worker function
2325 + */
2326 +-int tick_program_event(ktime_t expires, int force)
2327 ++int tick_dev_program_event(struct clock_event_device *dev, ktime_t expires,
2328 ++ int force)
2329 + {
2330 +- struct clock_event_device *dev = __get_cpu_var(tick_cpu_device).evtdev;
2331 + ktime_t now = ktime_get();
2332 ++ int i;
2333 +
2334 +- while (1) {
2335 ++ for (i = 0;;) {
2336 + int ret = clockevents_program_event(dev, expires, now);
2337 +
2338 + if (!ret || !force)
2339 + return ret;
2340 ++
2341 ++ /*
2342 ++ * We tried 2 times to program the device with the given
2343 ++ * min_delta_ns. If that's not working then we double it
2344 ++ * and emit a warning.
2345 ++ */
2346 ++ if (++i > 2) {
2347 ++ /* Increase the min. delta and try again */
2348 ++ if (!dev->min_delta_ns)
2349 ++ dev->min_delta_ns = 5000;
2350 ++ else
2351 ++ dev->min_delta_ns += dev->min_delta_ns >> 1;
2352 ++
2353 ++ printk(KERN_WARNING
2354 ++ "CE: %s increasing min_delta_ns to %lu nsec\n",
2355 ++ dev->name ? dev->name : "?",
2356 ++ dev->min_delta_ns << 1);
2357 ++
2358 ++ i = 0;
2359 ++ }
2360 ++
2361 + now = ktime_get();
2362 +- expires = ktime_add(now, ktime_set(0, dev->min_delta_ns));
2363 ++ expires = ktime_add_ns(now, dev->min_delta_ns);
2364 + }
2365 + }
2366 +
2367 + /**
2368 ++ * tick_program_event
2369 ++ */
2370 ++int tick_program_event(ktime_t expires, int force)
2371 ++{
2372 ++ struct clock_event_device *dev = __get_cpu_var(tick_cpu_device).evtdev;
2373 ++
2374 ++ return tick_dev_program_event(dev, expires, force);
2375 ++}
2376 ++
2377 ++/**
2378 + * tick_resume_onshot - resume oneshot mode
2379 + */
2380 + void tick_resume_oneshot(void)
2381 +@@ -61,7 +93,7 @@ void tick_setup_oneshot(struct clock_event_device *newdev,
2382 + {
2383 + newdev->event_handler = handler;
2384 + clockevents_set_mode(newdev, CLOCK_EVT_MODE_ONESHOT);
2385 +- clockevents_program_event(newdev, next_event, ktime_get());
2386 ++ tick_dev_program_event(newdev, next_event, 1);
2387 + }
2388 +
2389 + /**
2390 +diff --git a/lib/scatterlist.c b/lib/scatterlist.c
2391 +index b80c211..8c11004 100644
2392 +--- a/lib/scatterlist.c
2393 ++++ b/lib/scatterlist.c
2394 +@@ -312,8 +312,9 @@ static size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents,
2395 + struct scatterlist *sg;
2396 + size_t buf_off = 0;
2397 + int i;
2398 ++ unsigned long flags;
2399 +
2400 +- WARN_ON(!irqs_disabled());
2401 ++ local_irq_save(flags);
2402 +
2403 + for_each_sg(sgl, sg, nents, i) {
2404 + struct page *page;
2405 +@@ -358,6 +359,8 @@ static size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents,
2406 + break;
2407 + }
2408 +
2409 ++ local_irq_restore(flags);
2410 ++
2411 + return buf_off;
2412 + }
2413 +
2414 +diff --git a/mm/filemap_xip.c b/mm/filemap_xip.c
2415 +index 3e744ab..4e8bd50 100644
2416 +--- a/mm/filemap_xip.c
2417 ++++ b/mm/filemap_xip.c
2418 +@@ -184,7 +184,7 @@ __xip_unmap (struct address_space * mapping,
2419 + address = vma->vm_start +
2420 + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
2421 + BUG_ON(address < vma->vm_start || address >= vma->vm_end);
2422 +- pte = page_check_address(page, mm, address, &ptl);
2423 ++ pte = page_check_address(page, mm, address, &ptl, 1);
2424 + if (pte) {
2425 + /* Nuke the page table entry. */
2426 + flush_cache_page(vma, address, pte_pfn(*pte));
2427 +diff --git a/mm/memcontrol.c b/mm/memcontrol.c
2428 +index e46451e..ed1cfb1 100644
2429 +--- a/mm/memcontrol.c
2430 ++++ b/mm/memcontrol.c
2431 +@@ -250,6 +250,14 @@ static struct mem_cgroup *mem_cgroup_from_cont(struct cgroup *cont)
2432 +
2433 + struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p)
2434 + {
2435 ++ /*
2436 ++ * mm_update_next_owner() may clear mm->owner to NULL
2437 ++ * if it races with swapoff, page migration, etc.
2438 ++ * So this can be called with p == NULL.
2439 ++ */
2440 ++ if (unlikely(!p))
2441 ++ return NULL;
2442 ++
2443 + return container_of(task_subsys_state(p, mem_cgroup_subsys_id),
2444 + struct mem_cgroup, css);
2445 + }
2446 +@@ -574,6 +582,11 @@ retry:
2447 +
2448 + rcu_read_lock();
2449 + mem = mem_cgroup_from_task(rcu_dereference(mm->owner));
2450 ++ if (unlikely(!mem)) {
2451 ++ rcu_read_unlock();
2452 ++ kmem_cache_free(page_cgroup_cache, pc);
2453 ++ return 0;
2454 ++ }
2455 + /*
2456 + * For every charge from the cgroup, increment reference count
2457 + */
2458 +diff --git a/mm/mmzone.c b/mm/mmzone.c
2459 +index 486ed59..16ce8b9 100644
2460 +--- a/mm/mmzone.c
2461 ++++ b/mm/mmzone.c
2462 +@@ -69,6 +69,6 @@ struct zoneref *next_zones_zonelist(struct zoneref *z,
2463 + (z->zone && !zref_in_nodemask(z, nodes)))
2464 + z++;
2465 +
2466 +- *zone = zonelist_zone(z++);
2467 ++ *zone = zonelist_zone(z);
2468 + return z;
2469 + }
2470 +diff --git a/mm/rmap.c b/mm/rmap.c
2471 +index bf0a5b7..ded8f9e 100644
2472 +--- a/mm/rmap.c
2473 ++++ b/mm/rmap.c
2474 +@@ -223,10 +223,14 @@ unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma)
2475 + /*
2476 + * Check that @page is mapped at @address into @mm.
2477 + *
2478 ++ * If @sync is false, page_check_address may perform a racy check to avoid
2479 ++ * the page table lock when the pte is not present (helpful when reclaiming
2480 ++ * highly shared pages).
2481 ++ *
2482 + * On success returns with pte mapped and locked.
2483 + */
2484 + pte_t *page_check_address(struct page *page, struct mm_struct *mm,
2485 +- unsigned long address, spinlock_t **ptlp)
2486 ++ unsigned long address, spinlock_t **ptlp, int sync)
2487 + {
2488 + pgd_t *pgd;
2489 + pud_t *pud;
2490 +@@ -248,7 +252,7 @@ pte_t *page_check_address(struct page *page, struct mm_struct *mm,
2491 +
2492 + pte = pte_offset_map(pmd, address);
2493 + /* Make a quick check before getting the lock */
2494 +- if (!pte_present(*pte)) {
2495 ++ if (!sync && !pte_present(*pte)) {
2496 + pte_unmap(pte);
2497 + return NULL;
2498 + }
2499 +@@ -280,7 +284,7 @@ static int page_referenced_one(struct page *page,
2500 + if (address == -EFAULT)
2501 + goto out;
2502 +
2503 +- pte = page_check_address(page, mm, address, &ptl);
2504 ++ pte = page_check_address(page, mm, address, &ptl, 0);
2505 + if (!pte)
2506 + goto out;
2507 +
2508 +@@ -449,7 +453,7 @@ static int page_mkclean_one(struct page *page, struct vm_area_struct *vma)
2509 + if (address == -EFAULT)
2510 + goto out;
2511 +
2512 +- pte = page_check_address(page, mm, address, &ptl);
2513 ++ pte = page_check_address(page, mm, address, &ptl, 1);
2514 + if (!pte)
2515 + goto out;
2516 +
2517 +@@ -707,7 +711,7 @@ static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
2518 + if (address == -EFAULT)
2519 + goto out;
2520 +
2521 +- pte = page_check_address(page, mm, address, &ptl);
2522 ++ pte = page_check_address(page, mm, address, &ptl, 0);
2523 + if (!pte)
2524 + goto out;
2525 +
2526 +diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
2527 +index b6e7ec0..9ca32e6 100644
2528 +--- a/net/ipv4/udp.c
2529 ++++ b/net/ipv4/udp.c
2530 +@@ -950,6 +950,27 @@ int udp_disconnect(struct sock *sk, int flags)
2531 + return 0;
2532 + }
2533 +
2534 ++static int __udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
2535 ++{
2536 ++ int is_udplite = IS_UDPLITE(sk);
2537 ++ int rc;
2538 ++
2539 ++ if ((rc = sock_queue_rcv_skb(sk, skb)) < 0) {
2540 ++ /* Note that an ENOMEM error is charged twice */
2541 ++ if (rc == -ENOMEM)
2542 ++ UDP_INC_STATS_BH(UDP_MIB_RCVBUFERRORS,
2543 ++ is_udplite);
2544 ++ goto drop;
2545 ++ }
2546 ++
2547 ++ return 0;
2548 ++
2549 ++drop:
2550 ++ UDP_INC_STATS_BH(UDP_MIB_INERRORS, is_udplite);
2551 ++ kfree_skb(skb);
2552 ++ return -1;
2553 ++}
2554 ++
2555 + /* returns:
2556 + * -1: error
2557 + * 0: success
2558 +@@ -988,9 +1009,7 @@ int udp_queue_rcv_skb(struct sock * sk, struct sk_buff *skb)
2559 + up->encap_rcv != NULL) {
2560 + int ret;
2561 +
2562 +- bh_unlock_sock(sk);
2563 + ret = (*up->encap_rcv)(sk, skb);
2564 +- bh_lock_sock(sk);
2565 + if (ret <= 0) {
2566 + UDP_INC_STATS_BH(UDP_MIB_INDATAGRAMS,
2567 + is_udplite);
2568 +@@ -1042,14 +1061,16 @@ int udp_queue_rcv_skb(struct sock * sk, struct sk_buff *skb)
2569 + goto drop;
2570 + }
2571 +
2572 +- if ((rc = sock_queue_rcv_skb(sk,skb)) < 0) {
2573 +- /* Note that an ENOMEM error is charged twice */
2574 +- if (rc == -ENOMEM)
2575 +- UDP_INC_STATS_BH(UDP_MIB_RCVBUFERRORS, is_udplite);
2576 +- goto drop;
2577 +- }
2578 ++ rc = 0;
2579 +
2580 +- return 0;
2581 ++ bh_lock_sock(sk);
2582 ++ if (!sock_owned_by_user(sk))
2583 ++ rc = __udp_queue_rcv_skb(sk, skb);
2584 ++ else
2585 ++ sk_add_backlog(sk, skb);
2586 ++ bh_unlock_sock(sk);
2587 ++
2588 ++ return rc;
2589 +
2590 + drop:
2591 + UDP_INC_STATS_BH(UDP_MIB_INERRORS, is_udplite);
2592 +@@ -1087,15 +1108,7 @@ static int __udp4_lib_mcast_deliver(struct sk_buff *skb,
2593 + skb1 = skb_clone(skb, GFP_ATOMIC);
2594 +
2595 + if (skb1) {
2596 +- int ret = 0;
2597 +-
2598 +- bh_lock_sock(sk);
2599 +- if (!sock_owned_by_user(sk))
2600 +- ret = udp_queue_rcv_skb(sk, skb1);
2601 +- else
2602 +- sk_add_backlog(sk, skb1);
2603 +- bh_unlock_sock(sk);
2604 +-
2605 ++ int ret = udp_queue_rcv_skb(sk, skb1);
2606 + if (ret > 0)
2607 + /* we should probably re-process instead
2608 + * of dropping packets here. */
2609 +@@ -1188,13 +1201,7 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct hlist_head udptable[],
2610 + uh->dest, inet_iif(skb), udptable);
2611 +
2612 + if (sk != NULL) {
2613 +- int ret = 0;
2614 +- bh_lock_sock(sk);
2615 +- if (!sock_owned_by_user(sk))
2616 +- ret = udp_queue_rcv_skb(sk, skb);
2617 +- else
2618 +- sk_add_backlog(sk, skb);
2619 +- bh_unlock_sock(sk);
2620 ++ int ret = udp_queue_rcv_skb(sk, skb);
2621 + sock_put(sk);
2622 +
2623 + /* a return value > 0 means to resubmit the input, but
2624 +@@ -1487,7 +1494,7 @@ struct proto udp_prot = {
2625 + .sendmsg = udp_sendmsg,
2626 + .recvmsg = udp_recvmsg,
2627 + .sendpage = udp_sendpage,
2628 +- .backlog_rcv = udp_queue_rcv_skb,
2629 ++ .backlog_rcv = __udp_queue_rcv_skb,
2630 + .hash = udp_lib_hash,
2631 + .unhash = udp_lib_unhash,
2632 + .get_port = udp_v4_get_port,
2633 +diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
2634 +index d99f094..c3f6687 100644
2635 +--- a/net/ipv6/ip6_output.c
2636 ++++ b/net/ipv6/ip6_output.c
2637 +@@ -934,39 +934,39 @@ static int ip6_dst_lookup_tail(struct sock *sk,
2638 + }
2639 +
2640 + #ifdef CONFIG_IPV6_OPTIMISTIC_DAD
2641 +- /*
2642 +- * Here if the dst entry we've looked up
2643 +- * has a neighbour entry that is in the INCOMPLETE
2644 +- * state and the src address from the flow is
2645 +- * marked as OPTIMISTIC, we release the found
2646 +- * dst entry and replace it instead with the
2647 +- * dst entry of the nexthop router
2648 +- */
2649 +- if (!((*dst)->neighbour->nud_state & NUD_VALID)) {
2650 +- struct inet6_ifaddr *ifp;
2651 +- struct flowi fl_gw;
2652 +- int redirect;
2653 +-
2654 +- ifp = ipv6_get_ifaddr(net, &fl->fl6_src,
2655 +- (*dst)->dev, 1);
2656 +-
2657 +- redirect = (ifp && ifp->flags & IFA_F_OPTIMISTIC);
2658 +- if (ifp)
2659 +- in6_ifa_put(ifp);
2660 +-
2661 +- if (redirect) {
2662 +- /*
2663 +- * We need to get the dst entry for the
2664 +- * default router instead
2665 +- */
2666 +- dst_release(*dst);
2667 +- memcpy(&fl_gw, fl, sizeof(struct flowi));
2668 +- memset(&fl_gw.fl6_dst, 0, sizeof(struct in6_addr));
2669 +- *dst = ip6_route_output(net, sk, &fl_gw);
2670 +- if ((err = (*dst)->error))
2671 +- goto out_err_release;
2672 +- }
2673 ++ /*
2674 ++ * Here if the dst entry we've looked up
2675 ++ * has a neighbour entry that is in the INCOMPLETE
2676 ++ * state and the src address from the flow is
2677 ++ * marked as OPTIMISTIC, we release the found
2678 ++ * dst entry and replace it instead with the
2679 ++ * dst entry of the nexthop router
2680 ++ */
2681 ++ if ((*dst)->neighbour && !((*dst)->neighbour->nud_state & NUD_VALID)) {
2682 ++ struct inet6_ifaddr *ifp;
2683 ++ struct flowi fl_gw;
2684 ++ int redirect;
2685 ++
2686 ++ ifp = ipv6_get_ifaddr(net, &fl->fl6_src,
2687 ++ (*dst)->dev, 1);
2688 ++
2689 ++ redirect = (ifp && ifp->flags & IFA_F_OPTIMISTIC);
2690 ++ if (ifp)
2691 ++ in6_ifa_put(ifp);
2692 ++
2693 ++ if (redirect) {
2694 ++ /*
2695 ++ * We need to get the dst entry for the
2696 ++ * default router instead
2697 ++ */
2698 ++ dst_release(*dst);
2699 ++ memcpy(&fl_gw, fl, sizeof(struct flowi));
2700 ++ memset(&fl_gw.fl6_dst, 0, sizeof(struct in6_addr));
2701 ++ *dst = ip6_route_output(net, sk, &fl_gw);
2702 ++ if ((err = (*dst)->error))
2703 ++ goto out_err_release;
2704 + }
2705 ++ }
2706 + #endif
2707 +
2708 + return 0;
2709 +diff --git a/net/ipv6/route.c b/net/ipv6/route.c
2710 +index 9deee59..990fef2 100644
2711 +--- a/net/ipv6/route.c
2712 ++++ b/net/ipv6/route.c
2713 +@@ -2718,6 +2718,8 @@ int __init ip6_route_init(void)
2714 + if (ret)
2715 + goto out_kmem_cache;
2716 +
2717 ++ ip6_dst_blackhole_ops.kmem_cachep = ip6_dst_ops_template.kmem_cachep;
2718 ++
2719 + /* Registering of the loopback is done before this portion of code,
2720 + * the loopback reference in rt6_info will not be taken, do it
2721 + * manually for init_net */
2722 +diff --git a/net/key/af_key.c b/net/key/af_key.c
2723 +index 7470e36..49805ec 100644
2724 +--- a/net/key/af_key.c
2725 ++++ b/net/key/af_key.c
2726 +@@ -73,22 +73,18 @@ static int pfkey_can_dump(struct sock *sk)
2727 + return 0;
2728 + }
2729 +
2730 +-static int pfkey_do_dump(struct pfkey_sock *pfk)
2731 ++static void pfkey_terminate_dump(struct pfkey_sock *pfk)
2732 + {
2733 +- int rc;
2734 +-
2735 +- rc = pfk->dump.dump(pfk);
2736 +- if (rc == -ENOBUFS)
2737 +- return 0;
2738 +-
2739 +- pfk->dump.done(pfk);
2740 +- pfk->dump.dump = NULL;
2741 +- pfk->dump.done = NULL;
2742 +- return rc;
2743 ++ if (pfk->dump.dump) {
2744 ++ pfk->dump.done(pfk);
2745 ++ pfk->dump.dump = NULL;
2746 ++ pfk->dump.done = NULL;
2747 ++ }
2748 + }
2749 +
2750 + static void pfkey_sock_destruct(struct sock *sk)
2751 + {
2752 ++ pfkey_terminate_dump(pfkey_sk(sk));
2753 + skb_queue_purge(&sk->sk_receive_queue);
2754 +
2755 + if (!sock_flag(sk, SOCK_DEAD)) {
2756 +@@ -310,6 +306,18 @@ static int pfkey_broadcast(struct sk_buff *skb, gfp_t allocation,
2757 + return err;
2758 + }
2759 +
2760 ++static int pfkey_do_dump(struct pfkey_sock *pfk)
2761 ++{
2762 ++ int rc;
2763 ++
2764 ++ rc = pfk->dump.dump(pfk);
2765 ++ if (rc == -ENOBUFS)
2766 ++ return 0;
2767 ++
2768 ++ pfkey_terminate_dump(pfk);
2769 ++ return rc;
2770 ++}
2771 ++
2772 + static inline void pfkey_hdr_dup(struct sadb_msg *new, struct sadb_msg *orig)
2773 + {
2774 + *new = *orig;
2775 +diff --git a/net/sctp/associola.c b/net/sctp/associola.c
2776 +index 024c3eb..31ca4f4 100644
2777 +--- a/net/sctp/associola.c
2778 ++++ b/net/sctp/associola.c
2779 +@@ -597,11 +597,12 @@ struct sctp_transport *sctp_assoc_add_peer(struct sctp_association *asoc,
2780 + /* Check to see if this is a duplicate. */
2781 + peer = sctp_assoc_lookup_paddr(asoc, addr);
2782 + if (peer) {
2783 ++ /* An UNKNOWN state is only set on transports added by
2784 ++ * user in sctp_connectx() call. Such transports should be
2785 ++ * considered CONFIRMED per RFC 4960, Section 5.4.
2786 ++ */
2787 + if (peer->state == SCTP_UNKNOWN) {
2788 +- if (peer_state == SCTP_ACTIVE)
2789 +- peer->state = SCTP_ACTIVE;
2790 +- if (peer_state == SCTP_UNCONFIRMED)
2791 +- peer->state = SCTP_UNCONFIRMED;
2792 ++ peer->state = SCTP_ACTIVE;
2793 + }
2794 + return peer;
2795 + }
2796 +diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c
2797 +index bbc7107..650f759 100644
2798 +--- a/net/sctp/sm_make_chunk.c
2799 ++++ b/net/sctp/sm_make_chunk.c
2800 +@@ -1886,11 +1886,13 @@ static void sctp_process_ext_param(struct sctp_association *asoc,
2801 + /* if the peer reports AUTH, assume that he
2802 + * supports AUTH.
2803 + */
2804 +- asoc->peer.auth_capable = 1;
2805 ++ if (sctp_auth_enable)
2806 ++ asoc->peer.auth_capable = 1;
2807 + break;
2808 + case SCTP_CID_ASCONF:
2809 + case SCTP_CID_ASCONF_ACK:
2810 +- asoc->peer.asconf_capable = 1;
2811 ++ if (sctp_addip_enable)
2812 ++ asoc->peer.asconf_capable = 1;
2813 + break;
2814 + default:
2815 + break;
2816 +@@ -2319,12 +2321,10 @@ clean_up:
2817 + /* Release the transport structures. */
2818 + list_for_each_safe(pos, temp, &asoc->peer.transport_addr_list) {
2819 + transport = list_entry(pos, struct sctp_transport, transports);
2820 +- list_del_init(pos);
2821 +- sctp_transport_free(transport);
2822 ++ if (transport->state != SCTP_ACTIVE)
2823 ++ sctp_assoc_rm_peer(asoc, transport);
2824 + }
2825 +
2826 +- asoc->peer.transport_count = 0;
2827 +-
2828 + nomem:
2829 + return 0;
2830 + }
2831 +@@ -2455,6 +2455,9 @@ static int sctp_process_param(struct sctp_association *asoc,
2832 + break;
2833 +
2834 + case SCTP_PARAM_SET_PRIMARY:
2835 ++ if (!sctp_addip_enable)
2836 ++ goto fall_through;
2837 ++
2838 + addr_param = param.v + sizeof(sctp_addip_param_t);
2839 +
2840 + af = sctp_get_af_specific(param_type2af(param.p->type));
2841 +diff --git a/net/xfrm/xfrm_output.c b/net/xfrm/xfrm_output.c
2842 +index 3f964db..5360c86 100644
2843 +--- a/net/xfrm/xfrm_output.c
2844 ++++ b/net/xfrm/xfrm_output.c
2845 +@@ -27,10 +27,14 @@ static int xfrm_state_check_space(struct xfrm_state *x, struct sk_buff *skb)
2846 + - skb_headroom(skb);
2847 + int ntail = dst->dev->needed_tailroom - skb_tailroom(skb);
2848 +
2849 +- if (nhead > 0 || ntail > 0)
2850 +- return pskb_expand_head(skb, nhead, ntail, GFP_ATOMIC);
2851 +-
2852 +- return 0;
2853 ++ if (nhead <= 0) {
2854 ++ if (ntail <= 0)
2855 ++ return 0;
2856 ++ nhead = 0;
2857 ++ } else if (ntail < 0)
2858 ++ ntail = 0;
2859 ++
2860 ++ return pskb_expand_head(skb, nhead, ntail, GFP_ATOMIC);
2861 + }
2862 +
2863 + static int xfrm_output_one(struct sk_buff *skb, int err)
2864 +diff --git a/sound/core/pcm.c b/sound/core/pcm.c
2865 +index 9dd9bc7..ece25c7 100644
2866 +--- a/sound/core/pcm.c
2867 ++++ b/sound/core/pcm.c
2868 +@@ -781,7 +781,7 @@ int snd_pcm_attach_substream(struct snd_pcm *pcm, int stream,
2869 + return -ENODEV;
2870 +
2871 + card = pcm->card;
2872 +- down_read(&card->controls_rwsem);
2873 ++ read_lock(&card->ctl_files_rwlock);
2874 + list_for_each_entry(kctl, &card->ctl_files, list) {
2875 + if (kctl->pid == current->pid) {
2876 + prefer_subdevice = kctl->prefer_pcm_subdevice;
2877 +@@ -789,7 +789,7 @@ int snd_pcm_attach_substream(struct snd_pcm *pcm, int stream,
2878 + break;
2879 + }
2880 + }
2881 +- up_read(&card->controls_rwsem);
2882 ++ read_unlock(&card->ctl_files_rwlock);
2883 +
2884 + switch (stream) {
2885 + case SNDRV_PCM_STREAM_PLAYBACK:
2886 +diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
2887 +index 61f5d42..225112b 100644
2888 +--- a/sound/core/pcm_native.c
2889 ++++ b/sound/core/pcm_native.c
2890 +@@ -1545,16 +1545,10 @@ static int snd_pcm_drop(struct snd_pcm_substream *substream)
2891 + card = substream->pcm->card;
2892 +
2893 + if (runtime->status->state == SNDRV_PCM_STATE_OPEN ||
2894 +- runtime->status->state == SNDRV_PCM_STATE_DISCONNECTED)
2895 ++ runtime->status->state == SNDRV_PCM_STATE_DISCONNECTED ||
2896 ++ runtime->status->state == SNDRV_PCM_STATE_SUSPENDED)
2897 + return -EBADFD;
2898 +
2899 +- snd_power_lock(card);
2900 +- if (runtime->status->state == SNDRV_PCM_STATE_SUSPENDED) {
2901 +- result = snd_power_wait(card, SNDRV_CTL_POWER_D0);
2902 +- if (result < 0)
2903 +- goto _unlock;
2904 +- }
2905 +-
2906 + snd_pcm_stream_lock_irq(substream);
2907 + /* resume pause */
2908 + if (runtime->status->state == SNDRV_PCM_STATE_PAUSED)
2909 +@@ -1563,8 +1557,7 @@ static int snd_pcm_drop(struct snd_pcm_substream *substream)
2910 + snd_pcm_stop(substream, SNDRV_PCM_STATE_SETUP);
2911 + /* runtime->control->appl_ptr = runtime->status->hw_ptr; */
2912 + snd_pcm_stream_unlock_irq(substream);
2913 +- _unlock:
2914 +- snd_power_unlock(card);
2915 ++
2916 + return result;
2917 + }
2918 +
2919 +diff --git a/sound/core/rawmidi.c b/sound/core/rawmidi.c
2920 +index f7ea728..b917a9f 100644
2921 +--- a/sound/core/rawmidi.c
2922 ++++ b/sound/core/rawmidi.c
2923 +@@ -418,7 +418,7 @@ static int snd_rawmidi_open(struct inode *inode, struct file *file)
2924 + mutex_lock(&rmidi->open_mutex);
2925 + while (1) {
2926 + subdevice = -1;
2927 +- down_read(&card->controls_rwsem);
2928 ++ read_lock(&card->ctl_files_rwlock);
2929 + list_for_each_entry(kctl, &card->ctl_files, list) {
2930 + if (kctl->pid == current->pid) {
2931 + subdevice = kctl->prefer_rawmidi_subdevice;
2932 +@@ -426,7 +426,7 @@ static int snd_rawmidi_open(struct inode *inode, struct file *file)
2933 + break;
2934 + }
2935 + }
2936 +- up_read(&card->controls_rwsem);
2937 ++ read_unlock(&card->ctl_files_rwlock);
2938 + err = snd_rawmidi_kernel_open(rmidi->card, rmidi->device,
2939 + subdevice, fflags, rawmidi_file);
2940 + if (err >= 0)
2941 +diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c
2942 +index a4f44a0..7207759 100644
2943 +--- a/sound/pci/hda/patch_sigmatel.c
2944 ++++ b/sound/pci/hda/patch_sigmatel.c
2945 +@@ -1667,8 +1667,8 @@ static struct snd_pci_quirk stac927x_cfg_tbl[] = {
2946 + /* Dell 3 stack systems with verb table in BIOS */
2947 + SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x01f3, "Dell Inspiron 1420", STAC_DELL_BIOS),
2948 + SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x0227, "Dell Vostro 1400 ", STAC_DELL_BIOS),
2949 +- SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x022f, "Dell ", STAC_DELL_BIOS),
2950 + SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x022e, "Dell ", STAC_DELL_BIOS),
2951 ++ SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x022f, "Dell Inspiron 1525", STAC_DELL_3ST),
2952 + SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x0242, "Dell ", STAC_DELL_BIOS),
2953 + SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x0243, "Dell ", STAC_DELL_BIOS),
2954 + SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x02ff, "Dell ", STAC_DELL_BIOS),
2955 +diff --git a/sound/pci/oxygen/hifier.c b/sound/pci/oxygen/hifier.c
2956 +index 090dd43..841e45d 100644
2957 +--- a/sound/pci/oxygen/hifier.c
2958 ++++ b/sound/pci/oxygen/hifier.c
2959 +@@ -17,6 +17,7 @@
2960 + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
2961 + */
2962 +
2963 ++#include <linux/delay.h>
2964 + #include <linux/pci.h>
2965 + #include <sound/control.h>
2966 + #include <sound/core.h>
2967 +@@ -95,6 +96,9 @@ static void set_ak4396_params(struct oxygen *chip,
2968 + else
2969 + value |= AK4396_DFS_QUAD;
2970 + data->ak4396_ctl2 = value;
2971 ++
2972 ++ msleep(1); /* wait for the new MCLK to become stable */
2973 ++
2974 + ak4396_write(chip, AK4396_CONTROL_1, AK4396_DIF_24_MSB);
2975 + ak4396_write(chip, AK4396_CONTROL_2, value);
2976 + ak4396_write(chip, AK4396_CONTROL_1, AK4396_DIF_24_MSB | AK4396_RSTN);
2977 +diff --git a/sound/pci/oxygen/oxygen.c b/sound/pci/oxygen/oxygen.c
2978 +index 63f185c..6a59041 100644
2979 +--- a/sound/pci/oxygen/oxygen.c
2980 ++++ b/sound/pci/oxygen/oxygen.c
2981 +@@ -28,6 +28,7 @@
2982 + * GPIO 1 -> DFS1 of AK5385
2983 + */
2984 +
2985 ++#include <linux/delay.h>
2986 + #include <linux/mutex.h>
2987 + #include <linux/pci.h>
2988 + #include <sound/ac97_codec.h>
2989 +@@ -173,6 +174,9 @@ static void set_ak4396_params(struct oxygen *chip,
2990 + else
2991 + value |= AK4396_DFS_QUAD;
2992 + data->ak4396_ctl2 = value;
2993 ++
2994 ++ msleep(1); /* wait for the new MCLK to become stable */
2995 ++
2996 + for (i = 0; i < 4; ++i) {
2997 + ak4396_write(chip, i,
2998 + AK4396_CONTROL_1, AK4396_DIF_24_MSB);
2999 +diff --git a/sound/ppc/awacs.c b/sound/ppc/awacs.c
3000 +index 566a6d0..106c482 100644
3001 +--- a/sound/ppc/awacs.c
3002 ++++ b/sound/ppc/awacs.c
3003 +@@ -621,6 +621,13 @@ static struct snd_kcontrol_new snd_pmac_screamer_mixers_imac[] __initdata = {
3004 + AWACS_SWITCH("CD Capture Switch", 0, SHIFT_MUX_CD, 0),
3005 + };
3006 +
3007 ++static struct snd_kcontrol_new snd_pmac_screamer_mixers_g4agp[] __initdata = {
3008 ++ AWACS_VOLUME("Line out Playback Volume", 2, 6, 1),
3009 ++ AWACS_VOLUME("Master Playback Volume", 5, 6, 1),
3010 ++ AWACS_SWITCH("CD Capture Switch", 0, SHIFT_MUX_CD, 0),
3011 ++ AWACS_SWITCH("Line Capture Switch", 0, SHIFT_MUX_MIC, 0),
3012 ++};
3013 ++
3014 + static struct snd_kcontrol_new snd_pmac_awacs_mixers_pmac7500[] __initdata = {
3015 + AWACS_VOLUME("Line out Playback Volume", 2, 6, 1),
3016 + AWACS_SWITCH("CD Capture Switch", 0, SHIFT_MUX_CD, 0),
3017 +@@ -688,7 +695,10 @@ static struct snd_kcontrol_new snd_pmac_awacs_speaker_vol[] __initdata = {
3018 + static struct snd_kcontrol_new snd_pmac_awacs_speaker_sw __initdata =
3019 + AWACS_SWITCH("PC Speaker Playback Switch", 1, SHIFT_SPKMUTE, 1);
3020 +
3021 +-static struct snd_kcontrol_new snd_pmac_awacs_speaker_sw_imac __initdata =
3022 ++static struct snd_kcontrol_new snd_pmac_awacs_speaker_sw_imac1 __initdata =
3023 ++AWACS_SWITCH("PC Speaker Playback Switch", 1, SHIFT_PAROUT1, 1);
3024 ++
3025 ++static struct snd_kcontrol_new snd_pmac_awacs_speaker_sw_imac2 __initdata =
3026 + AWACS_SWITCH("PC Speaker Playback Switch", 1, SHIFT_PAROUT1, 0);
3027 +
3028 +
3029 +@@ -765,11 +775,12 @@ static void snd_pmac_awacs_resume(struct snd_pmac *chip)
3030 +
3031 + #define IS_PM7500 (machine_is_compatible("AAPL,7500"))
3032 + #define IS_BEIGE (machine_is_compatible("AAPL,Gossamer"))
3033 +-#define IS_IMAC (machine_is_compatible("PowerMac2,1") \
3034 +- || machine_is_compatible("PowerMac2,2") \
3035 ++#define IS_IMAC1 (machine_is_compatible("PowerMac2,1"))
3036 ++#define IS_IMAC2 (machine_is_compatible("PowerMac2,2") \
3037 + || machine_is_compatible("PowerMac4,1"))
3038 ++#define IS_G4AGP (machine_is_compatible("PowerMac3,1"))
3039 +
3040 +-static int imac;
3041 ++static int imac1, imac2;
3042 +
3043 + #ifdef PMAC_SUPPORT_AUTOMUTE
3044 + /*
3045 +@@ -815,13 +826,18 @@ static void snd_pmac_awacs_update_automute(struct snd_pmac *chip, int do_notify)
3046 + {
3047 + int reg = chip->awacs_reg[1]
3048 + | (MASK_HDMUTE | MASK_SPKMUTE);
3049 +- if (imac) {
3050 ++ if (imac1) {
3051 ++ reg &= ~MASK_SPKMUTE;
3052 ++ reg |= MASK_PAROUT1;
3053 ++ } else if (imac2) {
3054 + reg &= ~MASK_SPKMUTE;
3055 + reg &= ~MASK_PAROUT1;
3056 + }
3057 + if (snd_pmac_awacs_detect_headphone(chip))
3058 + reg &= ~MASK_HDMUTE;
3059 +- else if (imac)
3060 ++ else if (imac1)
3061 ++ reg &= ~MASK_PAROUT1;
3062 ++ else if (imac2)
3063 + reg |= MASK_PAROUT1;
3064 + else
3065 + reg &= ~MASK_SPKMUTE;
3066 +@@ -850,9 +866,13 @@ snd_pmac_awacs_init(struct snd_pmac *chip)
3067 + {
3068 + int pm7500 = IS_PM7500;
3069 + int beige = IS_BEIGE;
3070 ++ int g4agp = IS_G4AGP;
3071 ++ int imac;
3072 + int err, vol;
3073 +
3074 +- imac = IS_IMAC;
3075 ++ imac1 = IS_IMAC1;
3076 ++ imac2 = IS_IMAC2;
3077 ++ imac = imac1 || imac2;
3078 + /* looks like MASK_GAINLINE triggers something, so we set here
3079 + * as start-up
3080 + */
3081 +@@ -939,7 +959,7 @@ snd_pmac_awacs_init(struct snd_pmac *chip)
3082 + snd_pmac_awacs_mixers);
3083 + if (err < 0)
3084 + return err;
3085 +- if (beige)
3086 ++ if (beige || g4agp)
3087 + ;
3088 + else if (chip->model == PMAC_SCREAMER)
3089 + err = build_mixers(chip, ARRAY_SIZE(snd_pmac_screamer_mixers2),
3090 +@@ -961,13 +981,17 @@ snd_pmac_awacs_init(struct snd_pmac *chip)
3091 + err = build_mixers(chip,
3092 + ARRAY_SIZE(snd_pmac_screamer_mixers_imac),
3093 + snd_pmac_screamer_mixers_imac);
3094 ++ else if (g4agp)
3095 ++ err = build_mixers(chip,
3096 ++ ARRAY_SIZE(snd_pmac_screamer_mixers_g4agp),
3097 ++ snd_pmac_screamer_mixers_g4agp);
3098 + else
3099 + err = build_mixers(chip,
3100 + ARRAY_SIZE(snd_pmac_awacs_mixers_pmac),
3101 + snd_pmac_awacs_mixers_pmac);
3102 + if (err < 0)
3103 + return err;
3104 +- chip->master_sw_ctl = snd_ctl_new1((pm7500 || imac)
3105 ++ chip->master_sw_ctl = snd_ctl_new1((pm7500 || imac || g4agp)
3106 + ? &snd_pmac_awacs_master_sw_imac
3107 + : &snd_pmac_awacs_master_sw, chip);
3108 + err = snd_ctl_add(chip->card, chip->master_sw_ctl);
3109 +@@ -1004,15 +1028,17 @@ snd_pmac_awacs_init(struct snd_pmac *chip)
3110 + snd_pmac_awacs_speaker_vol);
3111 + if (err < 0)
3112 + return err;
3113 +- chip->speaker_sw_ctl = snd_ctl_new1(imac
3114 +- ? &snd_pmac_awacs_speaker_sw_imac
3115 ++ chip->speaker_sw_ctl = snd_ctl_new1(imac1
3116 ++ ? &snd_pmac_awacs_speaker_sw_imac1
3117 ++ : imac2
3118 ++ ? &snd_pmac_awacs_speaker_sw_imac2
3119 + : &snd_pmac_awacs_speaker_sw, chip);
3120 + err = snd_ctl_add(chip->card, chip->speaker_sw_ctl);
3121 + if (err < 0)
3122 + return err;
3123 + }
3124 +
3125 +- if (beige)
3126 ++ if (beige || g4agp)
3127 + err = build_mixers(chip,
3128 + ARRAY_SIZE(snd_pmac_screamer_mic_boost_beige),
3129 + snd_pmac_screamer_mic_boost_beige);