Gentoo Archives: gentoo-commits

From: "Mike Pagano (mpagano)" <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] linux-patches r2050 - genpatches-2.6/trunk/3.0
Date: Sat, 07 Jan 2012 20:52:12
Message-Id: 20120107205159.8140E2004B@flycatcher.gentoo.org
1 Author: mpagano
2 Date: 2012-01-07 20:51:58 +0000 (Sat, 07 Jan 2012)
3 New Revision: 2050
4
5 Added:
6 genpatches-2.6/trunk/3.0/1015_linux-3.0.16.patch
7 Modified:
8 genpatches-2.6/trunk/3.0/0000_README
9 Log:
10 Linux patch 3.0.16
11
12 Modified: genpatches-2.6/trunk/3.0/0000_README
13 ===================================================================
14 --- genpatches-2.6/trunk/3.0/0000_README 2012-01-07 20:44:34 UTC (rev 2049)
15 +++ genpatches-2.6/trunk/3.0/0000_README 2012-01-07 20:51:58 UTC (rev 2050)
16 @@ -99,6 +99,10 @@
17 From: http://www.kernel.org
18 Desc: Linux 3.0.15
19
20 +Patch: 1014_linux-3.0.16.patch
21 +From: http://www.kernel.org
22 +Desc: Linux 3.0.16
23 +
24 Patch: 1800_fix-zcache-build.patch
25 From: http://bugs.gentoo.org/show_bug.cgi?id=376325
26 Desc: Fix zcache build error
27
28 Added: genpatches-2.6/trunk/3.0/1015_linux-3.0.16.patch
29 ===================================================================
30 --- genpatches-2.6/trunk/3.0/1015_linux-3.0.16.patch (rev 0)
31 +++ genpatches-2.6/trunk/3.0/1015_linux-3.0.16.patch 2012-01-07 20:51:58 UTC (rev 2050)
32 @@ -0,0 +1,3890 @@
33 +diff --git a/Makefile b/Makefile
34 +index 5b8c185..7f0d8e2 100644
35 +--- a/Makefile
36 ++++ b/Makefile
37 +@@ -1,6 +1,6 @@
38 + VERSION = 3
39 + PATCHLEVEL = 0
40 +-SUBLEVEL = 15
41 ++SUBLEVEL = 16
42 + EXTRAVERSION =
43 + NAME = Sneaky Weasel
44 +
45 +diff --git a/arch/arm/mach-omap2/board-rx51-peripherals.c b/arch/arm/mach-omap2/board-rx51-peripherals.c
46 +index 88bd6f7..c565971 100644
47 +--- a/arch/arm/mach-omap2/board-rx51-peripherals.c
48 ++++ b/arch/arm/mach-omap2/board-rx51-peripherals.c
49 +@@ -133,7 +133,7 @@ static struct platform_device rx51_charger_device = {
50 + static void __init rx51_charger_init(void)
51 + {
52 + WARN_ON(gpio_request_one(RX51_USB_TRANSCEIVER_RST_GPIO,
53 +- GPIOF_OUT_INIT_LOW, "isp1704_reset"));
54 ++ GPIOF_OUT_INIT_HIGH, "isp1704_reset"));
55 +
56 + platform_device_register(&rx51_charger_device);
57 + }
58 +diff --git a/arch/arm/oprofile/common.c b/arch/arm/oprofile/common.c
59 +index c074e66..4e0a371 100644
60 +--- a/arch/arm/oprofile/common.c
61 ++++ b/arch/arm/oprofile/common.c
62 +@@ -116,7 +116,7 @@ int __init oprofile_arch_init(struct oprofile_operations *ops)
63 + return oprofile_perf_init(ops);
64 + }
65 +
66 +-void __exit oprofile_arch_exit(void)
67 ++void oprofile_arch_exit(void)
68 + {
69 + oprofile_perf_exit();
70 + }
71 +diff --git a/arch/arm/plat-mxc/pwm.c b/arch/arm/plat-mxc/pwm.c
72 +index 7a61ef8..f4b68be 100644
73 +--- a/arch/arm/plat-mxc/pwm.c
74 ++++ b/arch/arm/plat-mxc/pwm.c
75 +@@ -32,6 +32,9 @@
76 + #define MX3_PWMSAR 0x0C /* PWM Sample Register */
77 + #define MX3_PWMPR 0x10 /* PWM Period Register */
78 + #define MX3_PWMCR_PRESCALER(x) (((x - 1) & 0xFFF) << 4)
79 ++#define MX3_PWMCR_DOZEEN (1 << 24)
80 ++#define MX3_PWMCR_WAITEN (1 << 23)
81 ++#define MX3_PWMCR_DBGEN (1 << 22)
82 + #define MX3_PWMCR_CLKSRC_IPG_HIGH (2 << 16)
83 + #define MX3_PWMCR_CLKSRC_IPG (1 << 16)
84 + #define MX3_PWMCR_EN (1 << 0)
85 +@@ -74,10 +77,21 @@ int pwm_config(struct pwm_device *pwm, int duty_ns, int period_ns)
86 + do_div(c, period_ns);
87 + duty_cycles = c;
88 +
89 ++ /*
90 ++ * according to imx pwm RM, the real period value should be
91 ++ * PERIOD value in PWMPR plus 2.
92 ++ */
93 ++ if (period_cycles > 2)
94 ++ period_cycles -= 2;
95 ++ else
96 ++ period_cycles = 0;
97 ++
98 + writel(duty_cycles, pwm->mmio_base + MX3_PWMSAR);
99 + writel(period_cycles, pwm->mmio_base + MX3_PWMPR);
100 +
101 +- cr = MX3_PWMCR_PRESCALER(prescale) | MX3_PWMCR_EN;
102 ++ cr = MX3_PWMCR_PRESCALER(prescale) |
103 ++ MX3_PWMCR_DOZEEN | MX3_PWMCR_WAITEN |
104 ++ MX3_PWMCR_DBGEN | MX3_PWMCR_EN;
105 +
106 + if (cpu_is_mx25())
107 + cr |= MX3_PWMCR_CLKSRC_IPG;
108 +diff --git a/arch/s390/oprofile/init.c b/arch/s390/oprofile/init.c
109 +index 0e358c2..422110a 100644
110 +--- a/arch/s390/oprofile/init.c
111 ++++ b/arch/s390/oprofile/init.c
112 +@@ -90,7 +90,7 @@ static ssize_t hwsampler_write(struct file *file, char const __user *buf,
113 + return -EINVAL;
114 +
115 + retval = oprofilefs_ulong_from_user(&val, buf, count);
116 +- if (retval)
117 ++ if (retval <= 0)
118 + return retval;
119 +
120 + if (oprofile_started)
121 +diff --git a/arch/sh/oprofile/common.c b/arch/sh/oprofile/common.c
122 +index b4c2d2b..e4dd5d5 100644
123 +--- a/arch/sh/oprofile/common.c
124 ++++ b/arch/sh/oprofile/common.c
125 +@@ -49,7 +49,7 @@ int __init oprofile_arch_init(struct oprofile_operations *ops)
126 + return oprofile_perf_init(ops);
127 + }
128 +
129 +-void __exit oprofile_arch_exit(void)
130 ++void oprofile_arch_exit(void)
131 + {
132 + oprofile_perf_exit();
133 + kfree(sh_pmu_op_name);
134 +@@ -60,5 +60,5 @@ int __init oprofile_arch_init(struct oprofile_operations *ops)
135 + ops->backtrace = sh_backtrace;
136 + return -ENODEV;
137 + }
138 +-void __exit oprofile_arch_exit(void) {}
139 ++void oprofile_arch_exit(void) {}
140 + #endif /* CONFIG_HW_PERF_EVENTS */
141 +diff --git a/arch/sparc/include/asm/pgtable_32.h b/arch/sparc/include/asm/pgtable_32.h
142 +index 5b31a8e..a790cc6 100644
143 +--- a/arch/sparc/include/asm/pgtable_32.h
144 ++++ b/arch/sparc/include/asm/pgtable_32.h
145 +@@ -431,10 +431,6 @@ extern unsigned long *sparc_valid_addr_bitmap;
146 + #define kern_addr_valid(addr) \
147 + (test_bit(__pa((unsigned long)(addr))>>20, sparc_valid_addr_bitmap))
148 +
149 +-extern int io_remap_pfn_range(struct vm_area_struct *vma,
150 +- unsigned long from, unsigned long pfn,
151 +- unsigned long size, pgprot_t prot);
152 +-
153 + /*
154 + * For sparc32&64, the pfn in io_remap_pfn_range() carries <iospace> in
155 + * its high 4 bits. These macros/functions put it there or get it from there.
156 +@@ -443,6 +439,22 @@ extern int io_remap_pfn_range(struct vm_area_struct *vma,
157 + #define GET_IOSPACE(pfn) (pfn >> (BITS_PER_LONG - 4))
158 + #define GET_PFN(pfn) (pfn & 0x0fffffffUL)
159 +
160 ++extern int remap_pfn_range(struct vm_area_struct *, unsigned long, unsigned long,
161 ++ unsigned long, pgprot_t);
162 ++
163 ++static inline int io_remap_pfn_range(struct vm_area_struct *vma,
164 ++ unsigned long from, unsigned long pfn,
165 ++ unsigned long size, pgprot_t prot)
166 ++{
167 ++ unsigned long long offset, space, phys_base;
168 ++
169 ++ offset = ((unsigned long long) GET_PFN(pfn)) << PAGE_SHIFT;
170 ++ space = GET_IOSPACE(pfn);
171 ++ phys_base = offset | (space << 32ULL);
172 ++
173 ++ return remap_pfn_range(vma, from, phys_base >> PAGE_SHIFT, size, prot);
174 ++}
175 ++
176 + #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
177 + #define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \
178 + ({ \
179 +diff --git a/arch/sparc/include/asm/pgtable_64.h b/arch/sparc/include/asm/pgtable_64.h
180 +index 1e03c5a..9822628 100644
181 +--- a/arch/sparc/include/asm/pgtable_64.h
182 ++++ b/arch/sparc/include/asm/pgtable_64.h
183 +@@ -750,10 +750,6 @@ static inline bool kern_addr_valid(unsigned long addr)
184 +
185 + extern int page_in_phys_avail(unsigned long paddr);
186 +
187 +-extern int io_remap_pfn_range(struct vm_area_struct *vma, unsigned long from,
188 +- unsigned long pfn,
189 +- unsigned long size, pgprot_t prot);
190 +-
191 + /*
192 + * For sparc32&64, the pfn in io_remap_pfn_range() carries <iospace> in
193 + * its high 4 bits. These macros/functions put it there or get it from there.
194 +@@ -762,6 +758,22 @@ extern int io_remap_pfn_range(struct vm_area_struct *vma, unsigned long from,
195 + #define GET_IOSPACE(pfn) (pfn >> (BITS_PER_LONG - 4))
196 + #define GET_PFN(pfn) (pfn & 0x0fffffffffffffffUL)
197 +
198 ++extern int remap_pfn_range(struct vm_area_struct *, unsigned long, unsigned long,
199 ++ unsigned long, pgprot_t);
200 ++
201 ++static inline int io_remap_pfn_range(struct vm_area_struct *vma,
202 ++ unsigned long from, unsigned long pfn,
203 ++ unsigned long size, pgprot_t prot)
204 ++{
205 ++ unsigned long offset = GET_PFN(pfn) << PAGE_SHIFT;
206 ++ int space = GET_IOSPACE(pfn);
207 ++ unsigned long phys_base;
208 ++
209 ++ phys_base = offset | (((unsigned long) space) << 32UL);
210 ++
211 ++ return remap_pfn_range(vma, from, phys_base >> PAGE_SHIFT, size, prot);
212 ++}
213 ++
214 + #include <asm-generic/pgtable.h>
215 +
216 + /* We provide our own get_unmapped_area to cope with VA holes and
217 +diff --git a/arch/sparc/kernel/entry.h b/arch/sparc/kernel/entry.h
218 +index e27f8ea..0c218e4 100644
219 +--- a/arch/sparc/kernel/entry.h
220 ++++ b/arch/sparc/kernel/entry.h
221 +@@ -42,6 +42,9 @@ extern void fpsave(unsigned long *fpregs, unsigned long *fsr,
222 + extern void fpload(unsigned long *fpregs, unsigned long *fsr);
223 +
224 + #else /* CONFIG_SPARC32 */
225 ++
226 ++#include <asm/trap_block.h>
227 ++
228 + struct popc_3insn_patch_entry {
229 + unsigned int addr;
230 + unsigned int insns[3];
231 +@@ -57,6 +60,10 @@ extern struct popc_6insn_patch_entry __popc_6insn_patch,
232 + __popc_6insn_patch_end;
233 +
234 + extern void __init per_cpu_patch(void);
235 ++extern void sun4v_patch_1insn_range(struct sun4v_1insn_patch_entry *,
236 ++ struct sun4v_1insn_patch_entry *);
237 ++extern void sun4v_patch_2insn_range(struct sun4v_2insn_patch_entry *,
238 ++ struct sun4v_2insn_patch_entry *);
239 + extern void __init sun4v_patch(void);
240 + extern void __init boot_cpu_id_too_large(int cpu);
241 + extern unsigned int dcache_parity_tl1_occurred;
242 +diff --git a/arch/sparc/kernel/module.c b/arch/sparc/kernel/module.c
243 +index 99ba5ba..8172c18 100644
244 +--- a/arch/sparc/kernel/module.c
245 ++++ b/arch/sparc/kernel/module.c
246 +@@ -17,6 +17,8 @@
247 + #include <asm/processor.h>
248 + #include <asm/spitfire.h>
249 +
250 ++#include "entry.h"
251 ++
252 + #ifdef CONFIG_SPARC64
253 +
254 + #include <linux/jump_label.h>
255 +@@ -220,6 +222,29 @@ int apply_relocate_add(Elf_Shdr *sechdrs,
256 + }
257 +
258 + #ifdef CONFIG_SPARC64
259 ++static void do_patch_sections(const Elf_Ehdr *hdr,
260 ++ const Elf_Shdr *sechdrs)
261 ++{
262 ++ const Elf_Shdr *s, *sun4v_1insn = NULL, *sun4v_2insn = NULL;
263 ++ char *secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
264 ++
265 ++ for (s = sechdrs; s < sechdrs + hdr->e_shnum; s++) {
266 ++ if (!strcmp(".sun4v_1insn_patch", secstrings + s->sh_name))
267 ++ sun4v_1insn = s;
268 ++ if (!strcmp(".sun4v_2insn_patch", secstrings + s->sh_name))
269 ++ sun4v_2insn = s;
270 ++ }
271 ++
272 ++ if (sun4v_1insn && tlb_type == hypervisor) {
273 ++ void *p = (void *) sun4v_1insn->sh_addr;
274 ++ sun4v_patch_1insn_range(p, p + sun4v_1insn->sh_size);
275 ++ }
276 ++ if (sun4v_2insn && tlb_type == hypervisor) {
277 ++ void *p = (void *) sun4v_2insn->sh_addr;
278 ++ sun4v_patch_2insn_range(p, p + sun4v_2insn->sh_size);
279 ++ }
280 ++}
281 ++
282 + int module_finalize(const Elf_Ehdr *hdr,
283 + const Elf_Shdr *sechdrs,
284 + struct module *me)
285 +@@ -227,6 +252,8 @@ int module_finalize(const Elf_Ehdr *hdr,
286 + /* make jump label nops */
287 + jump_label_apply_nops(me);
288 +
289 ++ do_patch_sections(hdr, sechdrs);
290 ++
291 + /* Cheetah's I-cache is fully coherent. */
292 + if (tlb_type == spitfire) {
293 + unsigned long va;
294 +diff --git a/arch/sparc/kernel/pci_sun4v.c b/arch/sparc/kernel/pci_sun4v.c
295 +index b01a06e..9e73c4a 100644
296 +--- a/arch/sparc/kernel/pci_sun4v.c
297 ++++ b/arch/sparc/kernel/pci_sun4v.c
298 +@@ -848,10 +848,10 @@ static int pci_sun4v_msiq_build_irq(struct pci_pbm_info *pbm,
299 + if (!irq)
300 + return -ENOMEM;
301 +
302 +- if (pci_sun4v_msiq_setstate(pbm->devhandle, msiqid, HV_MSIQSTATE_IDLE))
303 +- return -EINVAL;
304 + if (pci_sun4v_msiq_setvalid(pbm->devhandle, msiqid, HV_MSIQ_VALID))
305 + return -EINVAL;
306 ++ if (pci_sun4v_msiq_setstate(pbm->devhandle, msiqid, HV_MSIQSTATE_IDLE))
307 ++ return -EINVAL;
308 +
309 + return irq;
310 + }
311 +diff --git a/arch/sparc/kernel/setup_64.c b/arch/sparc/kernel/setup_64.c
312 +index 3c5bb78..4e7d3ff 100644
313 +--- a/arch/sparc/kernel/setup_64.c
314 ++++ b/arch/sparc/kernel/setup_64.c
315 +@@ -234,40 +234,50 @@ void __init per_cpu_patch(void)
316 + }
317 + }
318 +
319 +-void __init sun4v_patch(void)
320 ++void sun4v_patch_1insn_range(struct sun4v_1insn_patch_entry *start,
321 ++ struct sun4v_1insn_patch_entry *end)
322 + {
323 +- extern void sun4v_hvapi_init(void);
324 +- struct sun4v_1insn_patch_entry *p1;
325 +- struct sun4v_2insn_patch_entry *p2;
326 +-
327 +- if (tlb_type != hypervisor)
328 +- return;
329 ++ while (start < end) {
330 ++ unsigned long addr = start->addr;
331 +
332 +- p1 = &__sun4v_1insn_patch;
333 +- while (p1 < &__sun4v_1insn_patch_end) {
334 +- unsigned long addr = p1->addr;
335 +-
336 +- *(unsigned int *) (addr + 0) = p1->insn;
337 ++ *(unsigned int *) (addr + 0) = start->insn;
338 + wmb();
339 + __asm__ __volatile__("flush %0" : : "r" (addr + 0));
340 +
341 +- p1++;
342 ++ start++;
343 + }
344 ++}
345 +
346 +- p2 = &__sun4v_2insn_patch;
347 +- while (p2 < &__sun4v_2insn_patch_end) {
348 +- unsigned long addr = p2->addr;
349 ++void sun4v_patch_2insn_range(struct sun4v_2insn_patch_entry *start,
350 ++ struct sun4v_2insn_patch_entry *end)
351 ++{
352 ++ while (start < end) {
353 ++ unsigned long addr = start->addr;
354 +
355 +- *(unsigned int *) (addr + 0) = p2->insns[0];
356 ++ *(unsigned int *) (addr + 0) = start->insns[0];
357 + wmb();
358 + __asm__ __volatile__("flush %0" : : "r" (addr + 0));
359 +
360 +- *(unsigned int *) (addr + 4) = p2->insns[1];
361 ++ *(unsigned int *) (addr + 4) = start->insns[1];
362 + wmb();
363 + __asm__ __volatile__("flush %0" : : "r" (addr + 4));
364 +
365 +- p2++;
366 ++ start++;
367 + }
368 ++}
369 ++
370 ++void __init sun4v_patch(void)
371 ++{
372 ++ extern void sun4v_hvapi_init(void);
373 ++
374 ++ if (tlb_type != hypervisor)
375 ++ return;
376 ++
377 ++ sun4v_patch_1insn_range(&__sun4v_1insn_patch,
378 ++ &__sun4v_1insn_patch_end);
379 ++
380 ++ sun4v_patch_2insn_range(&__sun4v_2insn_patch,
381 ++ &__sun4v_2insn_patch_end);
382 +
383 + sun4v_hvapi_init();
384 + }
385 +diff --git a/arch/sparc/kernel/signal32.c b/arch/sparc/kernel/signal32.c
386 +index 5d92488..2e58328 100644
387 +--- a/arch/sparc/kernel/signal32.c
388 ++++ b/arch/sparc/kernel/signal32.c
389 +@@ -829,21 +829,23 @@ static inline void syscall_restart32(unsigned long orig_i0, struct pt_regs *regs
390 + * want to handle. Thus you cannot kill init even with a SIGKILL even by
391 + * mistake.
392 + */
393 +-void do_signal32(sigset_t *oldset, struct pt_regs * regs,
394 +- int restart_syscall, unsigned long orig_i0)
395 ++void do_signal32(sigset_t *oldset, struct pt_regs * regs)
396 + {
397 + struct k_sigaction ka;
398 ++ unsigned long orig_i0;
399 ++ int restart_syscall;
400 + siginfo_t info;
401 + int signr;
402 +
403 + signr = get_signal_to_deliver(&info, &ka, regs, NULL);
404 +
405 +- /* If the debugger messes with the program counter, it clears
406 +- * the "in syscall" bit, directing us to not perform a syscall
407 +- * restart.
408 +- */
409 +- if (restart_syscall && !pt_regs_is_syscall(regs))
410 +- restart_syscall = 0;
411 ++ restart_syscall = 0;
412 ++ orig_i0 = 0;
413 ++ if (pt_regs_is_syscall(regs) &&
414 ++ (regs->tstate & (TSTATE_XCARRY | TSTATE_ICARRY))) {
415 ++ restart_syscall = 1;
416 ++ orig_i0 = regs->u_regs[UREG_G6];
417 ++ }
418 +
419 + if (signr > 0) {
420 + if (restart_syscall)
421 +diff --git a/arch/sparc/kernel/signal_32.c b/arch/sparc/kernel/signal_32.c
422 +index 04ede8f..2302567 100644
423 +--- a/arch/sparc/kernel/signal_32.c
424 ++++ b/arch/sparc/kernel/signal_32.c
425 +@@ -525,10 +525,26 @@ static void do_signal(struct pt_regs *regs, unsigned long orig_i0)
426 + siginfo_t info;
427 + int signr;
428 +
429 ++ /* It's a lot of work and synchronization to add a new ptrace
430 ++ * register for GDB to save and restore in order to get
431 ++ * orig_i0 correct for syscall restarts when debugging.
432 ++ *
433 ++ * Although it should be the case that most of the global
434 ++ * registers are volatile across a system call, glibc already
435 ++ * depends upon that fact that we preserve them. So we can't
436 ++ * just use any global register to save away the orig_i0 value.
437 ++ *
438 ++ * In particular %g2, %g3, %g4, and %g5 are all assumed to be
439 ++ * preserved across a system call trap by various pieces of
440 ++ * code in glibc.
441 ++ *
442 ++ * %g7 is used as the "thread register". %g6 is not used in
443 ++ * any fixed manner. %g6 is used as a scratch register and
444 ++ * a compiler temporary, but it's value is never used across
445 ++ * a system call. Therefore %g6 is usable for orig_i0 storage.
446 ++ */
447 + if (pt_regs_is_syscall(regs) && (regs->psr & PSR_C))
448 +- restart_syscall = 1;
449 +- else
450 +- restart_syscall = 0;
451 ++ regs->u_regs[UREG_G6] = orig_i0;
452 +
453 + if (test_thread_flag(TIF_RESTORE_SIGMASK))
454 + oldset = &current->saved_sigmask;
455 +@@ -541,8 +557,12 @@ static void do_signal(struct pt_regs *regs, unsigned long orig_i0)
456 + * the software "in syscall" bit, directing us to not perform
457 + * a syscall restart.
458 + */
459 +- if (restart_syscall && !pt_regs_is_syscall(regs))
460 +- restart_syscall = 0;
461 ++ restart_syscall = 0;
462 ++ if (pt_regs_is_syscall(regs) && (regs->psr & PSR_C)) {
463 ++ restart_syscall = 1;
464 ++ orig_i0 = regs->u_regs[UREG_G6];
465 ++ }
466 ++
467 +
468 + if (signr > 0) {
469 + if (restart_syscall)
470 +diff --git a/arch/sparc/kernel/signal_64.c b/arch/sparc/kernel/signal_64.c
471 +index 47509df..d58260b 100644
472 +--- a/arch/sparc/kernel/signal_64.c
473 ++++ b/arch/sparc/kernel/signal_64.c
474 +@@ -535,11 +535,27 @@ static void do_signal(struct pt_regs *regs, unsigned long orig_i0)
475 + siginfo_t info;
476 + int signr;
477 +
478 ++ /* It's a lot of work and synchronization to add a new ptrace
479 ++ * register for GDB to save and restore in order to get
480 ++ * orig_i0 correct for syscall restarts when debugging.
481 ++ *
482 ++ * Although it should be the case that most of the global
483 ++ * registers are volatile across a system call, glibc already
484 ++ * depends upon that fact that we preserve them. So we can't
485 ++ * just use any global register to save away the orig_i0 value.
486 ++ *
487 ++ * In particular %g2, %g3, %g4, and %g5 are all assumed to be
488 ++ * preserved across a system call trap by various pieces of
489 ++ * code in glibc.
490 ++ *
491 ++ * %g7 is used as the "thread register". %g6 is not used in
492 ++ * any fixed manner. %g6 is used as a scratch register and
493 ++ * a compiler temporary, but it's value is never used across
494 ++ * a system call. Therefore %g6 is usable for orig_i0 storage.
495 ++ */
496 + if (pt_regs_is_syscall(regs) &&
497 +- (regs->tstate & (TSTATE_XCARRY | TSTATE_ICARRY))) {
498 +- restart_syscall = 1;
499 +- } else
500 +- restart_syscall = 0;
501 ++ (regs->tstate & (TSTATE_XCARRY | TSTATE_ICARRY)))
502 ++ regs->u_regs[UREG_G6] = orig_i0;
503 +
504 + if (current_thread_info()->status & TS_RESTORE_SIGMASK)
505 + oldset = &current->saved_sigmask;
506 +@@ -548,22 +564,20 @@ static void do_signal(struct pt_regs *regs, unsigned long orig_i0)
507 +
508 + #ifdef CONFIG_COMPAT
509 + if (test_thread_flag(TIF_32BIT)) {
510 +- extern void do_signal32(sigset_t *, struct pt_regs *,
511 +- int restart_syscall,
512 +- unsigned long orig_i0);
513 +- do_signal32(oldset, regs, restart_syscall, orig_i0);
514 ++ extern void do_signal32(sigset_t *, struct pt_regs *);
515 ++ do_signal32(oldset, regs);
516 + return;
517 + }
518 + #endif
519 +
520 + signr = get_signal_to_deliver(&info, &ka, regs, NULL);
521 +
522 +- /* If the debugger messes with the program counter, it clears
523 +- * the software "in syscall" bit, directing us to not perform
524 +- * a syscall restart.
525 +- */
526 +- if (restart_syscall && !pt_regs_is_syscall(regs))
527 +- restart_syscall = 0;
528 ++ restart_syscall = 0;
529 ++ if (pt_regs_is_syscall(regs) &&
530 ++ (regs->tstate & (TSTATE_XCARRY | TSTATE_ICARRY))) {
531 ++ restart_syscall = 1;
532 ++ orig_i0 = regs->u_regs[UREG_G6];
533 ++ }
534 +
535 + if (signr > 0) {
536 + if (restart_syscall)
537 +diff --git a/arch/sparc/kernel/visemul.c b/arch/sparc/kernel/visemul.c
538 +index 3635771..9384a0c 100644
539 +--- a/arch/sparc/kernel/visemul.c
540 ++++ b/arch/sparc/kernel/visemul.c
541 +@@ -713,17 +713,17 @@ static void pcmp(struct pt_regs *regs, unsigned int insn, unsigned int opf)
542 + s16 b = (rs2 >> (i * 16)) & 0xffff;
543 +
544 + if (a > b)
545 +- rd_val |= 1 << i;
546 ++ rd_val |= 8 >> i;
547 + }
548 + break;
549 +
550 + case FCMPGT32_OPF:
551 + for (i = 0; i < 2; i++) {
552 +- s32 a = (rs1 >> (i * 32)) & 0xffff;
553 +- s32 b = (rs2 >> (i * 32)) & 0xffff;
554 ++ s32 a = (rs1 >> (i * 32)) & 0xffffffff;
555 ++ s32 b = (rs2 >> (i * 32)) & 0xffffffff;
556 +
557 + if (a > b)
558 +- rd_val |= 1 << i;
559 ++ rd_val |= 2 >> i;
560 + }
561 + break;
562 +
563 +@@ -733,17 +733,17 @@ static void pcmp(struct pt_regs *regs, unsigned int insn, unsigned int opf)
564 + s16 b = (rs2 >> (i * 16)) & 0xffff;
565 +
566 + if (a <= b)
567 +- rd_val |= 1 << i;
568 ++ rd_val |= 8 >> i;
569 + }
570 + break;
571 +
572 + case FCMPLE32_OPF:
573 + for (i = 0; i < 2; i++) {
574 +- s32 a = (rs1 >> (i * 32)) & 0xffff;
575 +- s32 b = (rs2 >> (i * 32)) & 0xffff;
576 ++ s32 a = (rs1 >> (i * 32)) & 0xffffffff;
577 ++ s32 b = (rs2 >> (i * 32)) & 0xffffffff;
578 +
579 + if (a <= b)
580 +- rd_val |= 1 << i;
581 ++ rd_val |= 2 >> i;
582 + }
583 + break;
584 +
585 +@@ -753,17 +753,17 @@ static void pcmp(struct pt_regs *regs, unsigned int insn, unsigned int opf)
586 + s16 b = (rs2 >> (i * 16)) & 0xffff;
587 +
588 + if (a != b)
589 +- rd_val |= 1 << i;
590 ++ rd_val |= 8 >> i;
591 + }
592 + break;
593 +
594 + case FCMPNE32_OPF:
595 + for (i = 0; i < 2; i++) {
596 +- s32 a = (rs1 >> (i * 32)) & 0xffff;
597 +- s32 b = (rs2 >> (i * 32)) & 0xffff;
598 ++ s32 a = (rs1 >> (i * 32)) & 0xffffffff;
599 ++ s32 b = (rs2 >> (i * 32)) & 0xffffffff;
600 +
601 + if (a != b)
602 +- rd_val |= 1 << i;
603 ++ rd_val |= 2 >> i;
604 + }
605 + break;
606 +
607 +@@ -773,17 +773,17 @@ static void pcmp(struct pt_regs *regs, unsigned int insn, unsigned int opf)
608 + s16 b = (rs2 >> (i * 16)) & 0xffff;
609 +
610 + if (a == b)
611 +- rd_val |= 1 << i;
612 ++ rd_val |= 8 >> i;
613 + }
614 + break;
615 +
616 + case FCMPEQ32_OPF:
617 + for (i = 0; i < 2; i++) {
618 +- s32 a = (rs1 >> (i * 32)) & 0xffff;
619 +- s32 b = (rs2 >> (i * 32)) & 0xffff;
620 ++ s32 a = (rs1 >> (i * 32)) & 0xffffffff;
621 ++ s32 b = (rs2 >> (i * 32)) & 0xffffffff;
622 +
623 + if (a == b)
624 +- rd_val |= 1 << i;
625 ++ rd_val |= 2 >> i;
626 + }
627 + break;
628 + }
629 +diff --git a/arch/sparc/lib/memcpy.S b/arch/sparc/lib/memcpy.S
630 +index 34fe657..4d8c497 100644
631 +--- a/arch/sparc/lib/memcpy.S
632 ++++ b/arch/sparc/lib/memcpy.S
633 +@@ -7,40 +7,12 @@
634 + * Copyright (C) 1996 Jakub Jelinek (jj@××××××××××××××××.cz)
635 + */
636 +
637 +-#ifdef __KERNEL__
638 +-
639 +-#define FUNC(x) \
640 ++#define FUNC(x) \
641 + .globl x; \
642 + .type x,@function; \
643 +- .align 4; \
644 ++ .align 4; \
645 + x:
646 +
647 +-#undef FASTER_REVERSE
648 +-#undef FASTER_NONALIGNED
649 +-#define FASTER_ALIGNED
650 +-
651 +-/* In kernel these functions don't return a value.
652 +- * One should use macros in asm/string.h for that purpose.
653 +- * We return 0, so that bugs are more apparent.
654 +- */
655 +-#define SETUP_RETL
656 +-#define RETL_INSN clr %o0
657 +-
658 +-#else
659 +-
660 +-/* libc */
661 +-
662 +-#include "DEFS.h"
663 +-
664 +-#define FASTER_REVERSE
665 +-#define FASTER_NONALIGNED
666 +-#define FASTER_ALIGNED
667 +-
668 +-#define SETUP_RETL mov %o0, %g6
669 +-#define RETL_INSN mov %g6, %o0
670 +-
671 +-#endif
672 +-
673 + /* Both these macros have to start with exactly the same insn */
674 + #define MOVE_BIGCHUNK(src, dst, offset, t0, t1, t2, t3, t4, t5, t6, t7) \
675 + ldd [%src + (offset) + 0x00], %t0; \
676 +@@ -164,30 +136,6 @@ x:
677 + .text
678 + .align 4
679 +
680 +-#ifdef FASTER_REVERSE
681 +-
682 +-70: /* rdword_align */
683 +-
684 +- andcc %o1, 1, %g0
685 +- be 4f
686 +- andcc %o1, 2, %g0
687 +-
688 +- ldub [%o1 - 1], %g2
689 +- sub %o1, 1, %o1
690 +- stb %g2, [%o0 - 1]
691 +- sub %o2, 1, %o2
692 +- be 3f
693 +- sub %o0, 1, %o0
694 +-4:
695 +- lduh [%o1 - 2], %g2
696 +- sub %o1, 2, %o1
697 +- sth %g2, [%o0 - 2]
698 +- sub %o2, 2, %o2
699 +- b 3f
700 +- sub %o0, 2, %o0
701 +-
702 +-#endif /* FASTER_REVERSE */
703 +-
704 + 0:
705 + retl
706 + nop ! Only bcopy returns here and it retuns void...
707 +@@ -198,7 +146,7 @@ FUNC(__memmove)
708 + #endif
709 + FUNC(memmove)
710 + cmp %o0, %o1
711 +- SETUP_RETL
712 ++ mov %o0, %g7
713 + bleu 9f
714 + sub %o0, %o1, %o4
715 +
716 +@@ -207,8 +155,6 @@ FUNC(memmove)
717 + bleu 0f
718 + andcc %o4, 3, %o5
719 +
720 +-#ifndef FASTER_REVERSE
721 +-
722 + add %o1, %o2, %o1
723 + add %o0, %o2, %o0
724 + sub %o1, 1, %o1
725 +@@ -224,295 +170,7 @@ FUNC(memmove)
726 + sub %o0, 1, %o0
727 +
728 + retl
729 +- RETL_INSN
730 +-
731 +-#else /* FASTER_REVERSE */
732 +-
733 +- add %o1, %o2, %o1
734 +- add %o0, %o2, %o0
735 +- bne 77f
736 +- cmp %o2, 15
737 +- bleu 91f
738 +- andcc %o1, 3, %g0
739 +- bne 70b
740 +-3:
741 +- andcc %o1, 4, %g0
742 +-
743 +- be 2f
744 +- mov %o2, %g1
745 +-
746 +- ld [%o1 - 4], %o4
747 +- sub %g1, 4, %g1
748 +- st %o4, [%o0 - 4]
749 +- sub %o1, 4, %o1
750 +- sub %o0, 4, %o0
751 +-2:
752 +- andcc %g1, 0xffffff80, %g7
753 +- be 3f
754 +- andcc %o0, 4, %g0
755 +-
756 +- be 74f + 4
757 +-5:
758 +- RMOVE_BIGCHUNK(o1, o0, 0x00, o2, o3, o4, o5, g2, g3, g4, g5)
759 +- RMOVE_BIGCHUNK(o1, o0, 0x20, o2, o3, o4, o5, g2, g3, g4, g5)
760 +- RMOVE_BIGCHUNK(o1, o0, 0x40, o2, o3, o4, o5, g2, g3, g4, g5)
761 +- RMOVE_BIGCHUNK(o1, o0, 0x60, o2, o3, o4, o5, g2, g3, g4, g5)
762 +- subcc %g7, 128, %g7
763 +- sub %o1, 128, %o1
764 +- bne 5b
765 +- sub %o0, 128, %o0
766 +-3:
767 +- andcc %g1, 0x70, %g7
768 +- be 72f
769 +- andcc %g1, 8, %g0
770 +-
771 +- sethi %hi(72f), %o5
772 +- srl %g7, 1, %o4
773 +- add %g7, %o4, %o4
774 +- sub %o1, %g7, %o1
775 +- sub %o5, %o4, %o5
776 +- jmpl %o5 + %lo(72f), %g0
777 +- sub %o0, %g7, %o0
778 +-
779 +-71: /* rmemcpy_table */
780 +- RMOVE_LASTCHUNK(o1, o0, 0x60, g2, g3, g4, g5)
781 +- RMOVE_LASTCHUNK(o1, o0, 0x50, g2, g3, g4, g5)
782 +- RMOVE_LASTCHUNK(o1, o0, 0x40, g2, g3, g4, g5)
783 +- RMOVE_LASTCHUNK(o1, o0, 0x30, g2, g3, g4, g5)
784 +- RMOVE_LASTCHUNK(o1, o0, 0x20, g2, g3, g4, g5)
785 +- RMOVE_LASTCHUNK(o1, o0, 0x10, g2, g3, g4, g5)
786 +- RMOVE_LASTCHUNK(o1, o0, 0x00, g2, g3, g4, g5)
787 +-
788 +-72: /* rmemcpy_table_end */
789 +-
790 +- be 73f
791 +- andcc %g1, 4, %g0
792 +-
793 +- ldd [%o1 - 0x08], %g2
794 +- sub %o0, 8, %o0
795 +- sub %o1, 8, %o1
796 +- st %g2, [%o0]
797 +- st %g3, [%o0 + 0x04]
798 +-
799 +-73: /* rmemcpy_last7 */
800 +-
801 +- be 1f
802 +- andcc %g1, 2, %g0
803 +-
804 +- ld [%o1 - 4], %g2
805 +- sub %o1, 4, %o1
806 +- st %g2, [%o0 - 4]
807 +- sub %o0, 4, %o0
808 +-1:
809 +- be 1f
810 +- andcc %g1, 1, %g0
811 +-
812 +- lduh [%o1 - 2], %g2
813 +- sub %o1, 2, %o1
814 +- sth %g2, [%o0 - 2]
815 +- sub %o0, 2, %o0
816 +-1:
817 +- be 1f
818 +- nop
819 +-
820 +- ldub [%o1 - 1], %g2
821 +- stb %g2, [%o0 - 1]
822 +-1:
823 +- retl
824 +- RETL_INSN
825 +-
826 +-74: /* rldd_std */
827 +- RMOVE_BIGALIGNCHUNK(o1, o0, 0x00, o2, o3, o4, o5, g2, g3, g4, g5)
828 +- RMOVE_BIGALIGNCHUNK(o1, o0, 0x20, o2, o3, o4, o5, g2, g3, g4, g5)
829 +- RMOVE_BIGALIGNCHUNK(o1, o0, 0x40, o2, o3, o4, o5, g2, g3, g4, g5)
830 +- RMOVE_BIGALIGNCHUNK(o1, o0, 0x60, o2, o3, o4, o5, g2, g3, g4, g5)
831 +- subcc %g7, 128, %g7
832 +- sub %o1, 128, %o1
833 +- bne 74b
834 +- sub %o0, 128, %o0
835 +-
836 +- andcc %g1, 0x70, %g7
837 +- be 72b
838 +- andcc %g1, 8, %g0
839 +-
840 +- sethi %hi(72b), %o5
841 +- srl %g7, 1, %o4
842 +- add %g7, %o4, %o4
843 +- sub %o1, %g7, %o1
844 +- sub %o5, %o4, %o5
845 +- jmpl %o5 + %lo(72b), %g0
846 +- sub %o0, %g7, %o0
847 +-
848 +-75: /* rshort_end */
849 +-
850 +- and %o2, 0xe, %o3
851 +-2:
852 +- sethi %hi(76f), %o5
853 +- sll %o3, 3, %o4
854 +- sub %o0, %o3, %o0
855 +- sub %o5, %o4, %o5
856 +- sub %o1, %o3, %o1
857 +- jmpl %o5 + %lo(76f), %g0
858 +- andcc %o2, 1, %g0
859 +-
860 +- RMOVE_SHORTCHUNK(o1, o0, 0x0c, g2, g3)
861 +- RMOVE_SHORTCHUNK(o1, o0, 0x0a, g2, g3)
862 +- RMOVE_SHORTCHUNK(o1, o0, 0x08, g2, g3)
863 +- RMOVE_SHORTCHUNK(o1, o0, 0x06, g2, g3)
864 +- RMOVE_SHORTCHUNK(o1, o0, 0x04, g2, g3)
865 +- RMOVE_SHORTCHUNK(o1, o0, 0x02, g2, g3)
866 +- RMOVE_SHORTCHUNK(o1, o0, 0x00, g2, g3)
867 +-
868 +-76: /* rshort_table_end */
869 +-
870 +- be 1f
871 +- nop
872 +- ldub [%o1 - 1], %g2
873 +- stb %g2, [%o0 - 1]
874 +-1:
875 +- retl
876 +- RETL_INSN
877 +-
878 +-91: /* rshort_aligned_end */
879 +-
880 +- bne 75b
881 +- andcc %o2, 8, %g0
882 +-
883 +- be 1f
884 +- andcc %o2, 4, %g0
885 +-
886 +- ld [%o1 - 0x08], %g2
887 +- ld [%o1 - 0x04], %g3
888 +- sub %o1, 8, %o1
889 +- st %g2, [%o0 - 0x08]
890 +- st %g3, [%o0 - 0x04]
891 +- sub %o0, 8, %o0
892 +-1:
893 +- b 73b
894 +- mov %o2, %g1
895 +-
896 +-77: /* rnon_aligned */
897 +- cmp %o2, 15
898 +- bleu 75b
899 +- andcc %o0, 3, %g0
900 +- be 64f
901 +- andcc %o0, 1, %g0
902 +- be 63f
903 +- andcc %o0, 2, %g0
904 +- ldub [%o1 - 1], %g5
905 +- sub %o1, 1, %o1
906 +- stb %g5, [%o0 - 1]
907 +- sub %o0, 1, %o0
908 +- be 64f
909 +- sub %o2, 1, %o2
910 +-63:
911 +- ldub [%o1 - 1], %g5
912 +- sub %o1, 2, %o1
913 +- stb %g5, [%o0 - 1]
914 +- sub %o0, 2, %o0
915 +- ldub [%o1], %g5
916 +- sub %o2, 2, %o2
917 +- stb %g5, [%o0]
918 +-64:
919 +- and %o1, 3, %g2
920 +- and %o1, -4, %o1
921 +- and %o2, 0xc, %g3
922 +- add %o1, 4, %o1
923 +- cmp %g3, 4
924 +- sll %g2, 3, %g4
925 +- mov 32, %g2
926 +- be 4f
927 +- sub %g2, %g4, %g7
928 +-
929 +- blu 3f
930 +- cmp %g3, 8
931 +-
932 +- be 2f
933 +- srl %o2, 2, %g3
934 +-
935 +- ld [%o1 - 4], %o3
936 +- add %o0, -8, %o0
937 +- ld [%o1 - 8], %o4
938 +- add %o1, -16, %o1
939 +- b 7f
940 +- add %g3, 1, %g3
941 +-2:
942 +- ld [%o1 - 4], %o4
943 +- add %o0, -4, %o0
944 +- ld [%o1 - 8], %g1
945 +- add %o1, -12, %o1
946 +- b 8f
947 +- add %g3, 2, %g3
948 +-3:
949 +- ld [%o1 - 4], %o5
950 +- add %o0, -12, %o0
951 +- ld [%o1 - 8], %o3
952 +- add %o1, -20, %o1
953 +- b 6f
954 +- srl %o2, 2, %g3
955 +-4:
956 +- ld [%o1 - 4], %g1
957 +- srl %o2, 2, %g3
958 +- ld [%o1 - 8], %o5
959 +- add %o1, -24, %o1
960 +- add %o0, -16, %o0
961 +- add %g3, -1, %g3
962 +-
963 +- ld [%o1 + 12], %o3
964 +-5:
965 +- sll %o5, %g4, %g2
966 +- srl %g1, %g7, %g5
967 +- or %g2, %g5, %g2
968 +- st %g2, [%o0 + 12]
969 +-6:
970 +- ld [%o1 + 8], %o4
971 +- sll %o3, %g4, %g2
972 +- srl %o5, %g7, %g5
973 +- or %g2, %g5, %g2
974 +- st %g2, [%o0 + 8]
975 +-7:
976 +- ld [%o1 + 4], %g1
977 +- sll %o4, %g4, %g2
978 +- srl %o3, %g7, %g5
979 +- or %g2, %g5, %g2
980 +- st %g2, [%o0 + 4]
981 +-8:
982 +- ld [%o1], %o5
983 +- sll %g1, %g4, %g2
984 +- srl %o4, %g7, %g5
985 +- addcc %g3, -4, %g3
986 +- or %g2, %g5, %g2
987 +- add %o1, -16, %o1
988 +- st %g2, [%o0]
989 +- add %o0, -16, %o0
990 +- bne,a 5b
991 +- ld [%o1 + 12], %o3
992 +- sll %o5, %g4, %g2
993 +- srl %g1, %g7, %g5
994 +- srl %g4, 3, %g3
995 +- or %g2, %g5, %g2
996 +- add %o1, %g3, %o1
997 +- andcc %o2, 2, %g0
998 +- st %g2, [%o0 + 12]
999 +- be 1f
1000 +- andcc %o2, 1, %g0
1001 +-
1002 +- ldub [%o1 + 15], %g5
1003 +- add %o1, -2, %o1
1004 +- stb %g5, [%o0 + 11]
1005 +- add %o0, -2, %o0
1006 +- ldub [%o1 + 16], %g5
1007 +- stb %g5, [%o0 + 12]
1008 +-1:
1009 +- be 1f
1010 +- nop
1011 +- ldub [%o1 + 15], %g5
1012 +- stb %g5, [%o0 + 11]
1013 +-1:
1014 +- retl
1015 +- RETL_INSN
1016 +-
1017 +-#endif /* FASTER_REVERSE */
1018 ++ mov %g7, %o0
1019 +
1020 + /* NOTE: This code is executed just for the cases,
1021 + where %src (=%o1) & 3 is != 0.
1022 +@@ -546,7 +204,7 @@ FUNC(memmove)
1023 + FUNC(memcpy) /* %o0=dst %o1=src %o2=len */
1024 +
1025 + sub %o0, %o1, %o4
1026 +- SETUP_RETL
1027 ++ mov %o0, %g7
1028 + 9:
1029 + andcc %o4, 3, %o5
1030 + 0:
1031 +@@ -569,7 +227,7 @@ FUNC(memcpy) /* %o0=dst %o1=src %o2=len */
1032 + add %o1, 4, %o1
1033 + add %o0, 4, %o0
1034 + 2:
1035 +- andcc %g1, 0xffffff80, %g7
1036 ++ andcc %g1, 0xffffff80, %g0
1037 + be 3f
1038 + andcc %o0, 4, %g0
1039 +
1040 +@@ -579,22 +237,23 @@ FUNC(memcpy) /* %o0=dst %o1=src %o2=len */
1041 + MOVE_BIGCHUNK(o1, o0, 0x20, o2, o3, o4, o5, g2, g3, g4, g5)
1042 + MOVE_BIGCHUNK(o1, o0, 0x40, o2, o3, o4, o5, g2, g3, g4, g5)
1043 + MOVE_BIGCHUNK(o1, o0, 0x60, o2, o3, o4, o5, g2, g3, g4, g5)
1044 +- subcc %g7, 128, %g7
1045 ++ sub %g1, 128, %g1
1046 + add %o1, 128, %o1
1047 +- bne 5b
1048 ++ cmp %g1, 128
1049 ++ bge 5b
1050 + add %o0, 128, %o0
1051 + 3:
1052 +- andcc %g1, 0x70, %g7
1053 ++ andcc %g1, 0x70, %g4
1054 + be 80f
1055 + andcc %g1, 8, %g0
1056 +
1057 + sethi %hi(80f), %o5
1058 +- srl %g7, 1, %o4
1059 +- add %g7, %o4, %o4
1060 +- add %o1, %g7, %o1
1061 ++ srl %g4, 1, %o4
1062 ++ add %g4, %o4, %o4
1063 ++ add %o1, %g4, %o1
1064 + sub %o5, %o4, %o5
1065 + jmpl %o5 + %lo(80f), %g0
1066 +- add %o0, %g7, %o0
1067 ++ add %o0, %g4, %o0
1068 +
1069 + 79: /* memcpy_table */
1070 +
1071 +@@ -641,43 +300,28 @@ FUNC(memcpy) /* %o0=dst %o1=src %o2=len */
1072 + stb %g2, [%o0]
1073 + 1:
1074 + retl
1075 +- RETL_INSN
1076 ++ mov %g7, %o0
1077 +
1078 + 82: /* ldd_std */
1079 + MOVE_BIGALIGNCHUNK(o1, o0, 0x00, o2, o3, o4, o5, g2, g3, g4, g5)
1080 + MOVE_BIGALIGNCHUNK(o1, o0, 0x20, o2, o3, o4, o5, g2, g3, g4, g5)
1081 + MOVE_BIGALIGNCHUNK(o1, o0, 0x40, o2, o3, o4, o5, g2, g3, g4, g5)
1082 + MOVE_BIGALIGNCHUNK(o1, o0, 0x60, o2, o3, o4, o5, g2, g3, g4, g5)
1083 +- subcc %g7, 128, %g7
1084 ++ subcc %g1, 128, %g1
1085 + add %o1, 128, %o1
1086 +- bne 82b
1087 ++ cmp %g1, 128
1088 ++ bge 82b
1089 + add %o0, 128, %o0
1090 +
1091 +-#ifndef FASTER_ALIGNED
1092 +-
1093 +- andcc %g1, 0x70, %g7
1094 +- be 80b
1095 +- andcc %g1, 8, %g0
1096 +-
1097 +- sethi %hi(80b), %o5
1098 +- srl %g7, 1, %o4
1099 +- add %g7, %o4, %o4
1100 +- add %o1, %g7, %o1
1101 +- sub %o5, %o4, %o5
1102 +- jmpl %o5 + %lo(80b), %g0
1103 +- add %o0, %g7, %o0
1104 +-
1105 +-#else /* FASTER_ALIGNED */
1106 +-
1107 +- andcc %g1, 0x70, %g7
1108 ++ andcc %g1, 0x70, %g4
1109 + be 84f
1110 + andcc %g1, 8, %g0
1111 +
1112 + sethi %hi(84f), %o5
1113 +- add %o1, %g7, %o1
1114 +- sub %o5, %g7, %o5
1115 ++ add %o1, %g4, %o1
1116 ++ sub %o5, %g4, %o5
1117 + jmpl %o5 + %lo(84f), %g0
1118 +- add %o0, %g7, %o0
1119 ++ add %o0, %g4, %o0
1120 +
1121 + 83: /* amemcpy_table */
1122 +
1123 +@@ -721,382 +365,132 @@ FUNC(memcpy) /* %o0=dst %o1=src %o2=len */
1124 + stb %g2, [%o0]
1125 + 1:
1126 + retl
1127 +- RETL_INSN
1128 +-
1129 +-#endif /* FASTER_ALIGNED */
1130 ++ mov %g7, %o0
1131 +
1132 + 86: /* non_aligned */
1133 + cmp %o2, 6
1134 + bleu 88f
1135 ++ nop
1136 +
1137 +-#ifdef FASTER_NONALIGNED
1138 +-
1139 +- cmp %o2, 256
1140 +- bcc 87f
1141 +-
1142 +-#endif /* FASTER_NONALIGNED */
1143 +-
1144 +- andcc %o0, 3, %g0
1145 ++ save %sp, -96, %sp
1146 ++ andcc %i0, 3, %g0
1147 + be 61f
1148 +- andcc %o0, 1, %g0
1149 ++ andcc %i0, 1, %g0
1150 + be 60f
1151 +- andcc %o0, 2, %g0
1152 ++ andcc %i0, 2, %g0
1153 +
1154 +- ldub [%o1], %g5
1155 +- add %o1, 1, %o1
1156 +- stb %g5, [%o0]
1157 +- sub %o2, 1, %o2
1158 ++ ldub [%i1], %g5
1159 ++ add %i1, 1, %i1
1160 ++ stb %g5, [%i0]
1161 ++ sub %i2, 1, %i2
1162 + bne 61f
1163 +- add %o0, 1, %o0
1164 ++ add %i0, 1, %i0
1165 + 60:
1166 +- ldub [%o1], %g3
1167 +- add %o1, 2, %o1
1168 +- stb %g3, [%o0]
1169 +- sub %o2, 2, %o2
1170 +- ldub [%o1 - 1], %g3
1171 +- add %o0, 2, %o0
1172 +- stb %g3, [%o0 - 1]
1173 ++ ldub [%i1], %g3
1174 ++ add %i1, 2, %i1
1175 ++ stb %g3, [%i0]
1176 ++ sub %i2, 2, %i2
1177 ++ ldub [%i1 - 1], %g3
1178 ++ add %i0, 2, %i0
1179 ++ stb %g3, [%i0 - 1]
1180 + 61:
1181 +- and %o1, 3, %g2
1182 +- and %o2, 0xc, %g3
1183 +- and %o1, -4, %o1
1184 ++ and %i1, 3, %g2
1185 ++ and %i2, 0xc, %g3
1186 ++ and %i1, -4, %i1
1187 + cmp %g3, 4
1188 + sll %g2, 3, %g4
1189 + mov 32, %g2
1190 + be 4f
1191 +- sub %g2, %g4, %g7
1192 ++ sub %g2, %g4, %l0
1193 +
1194 + blu 3f
1195 + cmp %g3, 0x8
1196 +
1197 + be 2f
1198 +- srl %o2, 2, %g3
1199 ++ srl %i2, 2, %g3
1200 +
1201 +- ld [%o1], %o3
1202 +- add %o0, -8, %o0
1203 +- ld [%o1 + 4], %o4
1204 ++ ld [%i1], %i3
1205 ++ add %i0, -8, %i0
1206 ++ ld [%i1 + 4], %i4
1207 + b 8f
1208 + add %g3, 1, %g3
1209 + 2:
1210 +- ld [%o1], %o4
1211 +- add %o0, -12, %o0
1212 +- ld [%o1 + 4], %o5
1213 ++ ld [%i1], %i4
1214 ++ add %i0, -12, %i0
1215 ++ ld [%i1 + 4], %i5
1216 + add %g3, 2, %g3
1217 + b 9f
1218 +- add %o1, -4, %o1
1219 ++ add %i1, -4, %i1
1220 + 3:
1221 +- ld [%o1], %g1
1222 +- add %o0, -4, %o0
1223 +- ld [%o1 + 4], %o3
1224 +- srl %o2, 2, %g3
1225 ++ ld [%i1], %g1
1226 ++ add %i0, -4, %i0
1227 ++ ld [%i1 + 4], %i3
1228 ++ srl %i2, 2, %g3
1229 + b 7f
1230 +- add %o1, 4, %o1
1231 ++ add %i1, 4, %i1
1232 + 4:
1233 +- ld [%o1], %o5
1234 +- cmp %o2, 7
1235 +- ld [%o1 + 4], %g1
1236 +- srl %o2, 2, %g3
1237 ++ ld [%i1], %i5
1238 ++ cmp %i2, 7
1239 ++ ld [%i1 + 4], %g1
1240 ++ srl %i2, 2, %g3
1241 + bleu 10f
1242 +- add %o1, 8, %o1
1243 ++ add %i1, 8, %i1
1244 +
1245 +- ld [%o1], %o3
1246 ++ ld [%i1], %i3
1247 + add %g3, -1, %g3
1248 + 5:
1249 +- sll %o5, %g4, %g2
1250 +- srl %g1, %g7, %g5
1251 ++ sll %i5, %g4, %g2
1252 ++ srl %g1, %l0, %g5
1253 + or %g2, %g5, %g2
1254 +- st %g2, [%o0]
1255 ++ st %g2, [%i0]
1256 + 7:
1257 +- ld [%o1 + 4], %o4
1258 ++ ld [%i1 + 4], %i4
1259 + sll %g1, %g4, %g2
1260 +- srl %o3, %g7, %g5
1261 ++ srl %i3, %l0, %g5
1262 + or %g2, %g5, %g2
1263 +- st %g2, [%o0 + 4]
1264 ++ st %g2, [%i0 + 4]
1265 + 8:
1266 +- ld [%o1 + 8], %o5
1267 +- sll %o3, %g4, %g2
1268 +- srl %o4, %g7, %g5
1269 ++ ld [%i1 + 8], %i5
1270 ++ sll %i3, %g4, %g2
1271 ++ srl %i4, %l0, %g5
1272 + or %g2, %g5, %g2
1273 +- st %g2, [%o0 + 8]
1274 ++ st %g2, [%i0 + 8]
1275 + 9:
1276 +- ld [%o1 + 12], %g1
1277 +- sll %o4, %g4, %g2
1278 +- srl %o5, %g7, %g5
1279 ++ ld [%i1 + 12], %g1
1280 ++ sll %i4, %g4, %g2
1281 ++ srl %i5, %l0, %g5
1282 + addcc %g3, -4, %g3
1283 + or %g2, %g5, %g2
1284 +- add %o1, 16, %o1
1285 +- st %g2, [%o0 + 12]
1286 +- add %o0, 16, %o0
1287 ++ add %i1, 16, %i1
1288 ++ st %g2, [%i0 + 12]
1289 ++ add %i0, 16, %i0
1290 + bne,a 5b
1291 +- ld [%o1], %o3
1292 ++ ld [%i1], %i3
1293 + 10:
1294 +- sll %o5, %g4, %g2
1295 +- srl %g1, %g7, %g5
1296 +- srl %g7, 3, %g3
1297 ++ sll %i5, %g4, %g2
1298 ++ srl %g1, %l0, %g5
1299 ++ srl %l0, 3, %g3
1300 + or %g2, %g5, %g2
1301 +- sub %o1, %g3, %o1
1302 +- andcc %o2, 2, %g0
1303 +- st %g2, [%o0]
1304 ++ sub %i1, %g3, %i1
1305 ++ andcc %i2, 2, %g0
1306 ++ st %g2, [%i0]
1307 + be 1f
1308 +- andcc %o2, 1, %g0
1309 +-
1310 +- ldub [%o1], %g2
1311 +- add %o1, 2, %o1
1312 +- stb %g2, [%o0 + 4]
1313 +- add %o0, 2, %o0
1314 +- ldub [%o1 - 1], %g2
1315 +- stb %g2, [%o0 + 3]
1316 ++ andcc %i2, 1, %g0
1317 ++
1318 ++ ldub [%i1], %g2
1319 ++ add %i1, 2, %i1
1320 ++ stb %g2, [%i0 + 4]
1321 ++ add %i0, 2, %i0
1322 ++ ldub [%i1 - 1], %g2
1323 ++ stb %g2, [%i0 + 3]
1324 + 1:
1325 + be 1f
1326 + nop
1327 +- ldub [%o1], %g2
1328 +- stb %g2, [%o0 + 4]
1329 +-1:
1330 +- retl
1331 +- RETL_INSN
1332 +-
1333 +-#ifdef FASTER_NONALIGNED
1334 +-
1335 +-87: /* faster_nonaligned */
1336 +-
1337 +- andcc %o1, 3, %g0
1338 +- be 3f
1339 +- andcc %o1, 1, %g0
1340 +-
1341 +- be 4f
1342 +- andcc %o1, 2, %g0
1343 +-
1344 +- ldub [%o1], %g2
1345 +- add %o1, 1, %o1
1346 +- stb %g2, [%o0]
1347 +- sub %o2, 1, %o2
1348 +- bne 3f
1349 +- add %o0, 1, %o0
1350 +-4:
1351 +- lduh [%o1], %g2
1352 +- add %o1, 2, %o1
1353 +- srl %g2, 8, %g3
1354 +- sub %o2, 2, %o2
1355 +- stb %g3, [%o0]
1356 +- add %o0, 2, %o0
1357 +- stb %g2, [%o0 - 1]
1358 +-3:
1359 +- andcc %o1, 4, %g0
1360 +-
1361 +- bne 2f
1362 +- cmp %o5, 1
1363 +-
1364 +- ld [%o1], %o4
1365 +- srl %o4, 24, %g2
1366 +- stb %g2, [%o0]
1367 +- srl %o4, 16, %g3
1368 +- stb %g3, [%o0 + 1]
1369 +- srl %o4, 8, %g2
1370 +- stb %g2, [%o0 + 2]
1371 +- sub %o2, 4, %o2
1372 +- stb %o4, [%o0 + 3]
1373 +- add %o1, 4, %o1
1374 +- add %o0, 4, %o0
1375 +-2:
1376 +- be 33f
1377 +- cmp %o5, 2
1378 +- be 32f
1379 +- sub %o2, 4, %o2
1380 +-31:
1381 +- ld [%o1], %g2
1382 +- add %o1, 4, %o1
1383 +- srl %g2, 24, %g3
1384 +- and %o0, 7, %g5
1385 +- stb %g3, [%o0]
1386 +- cmp %g5, 7
1387 +- sll %g2, 8, %g1
1388 +- add %o0, 4, %o0
1389 +- be 41f
1390 +- and %o2, 0xffffffc0, %o3
1391 +- ld [%o0 - 7], %o4
1392 +-4:
1393 +- SMOVE_CHUNK(o1, o0, 0x00, g2, g3, g4, g5, o4, o5, g7, g1, 8, 24, -3)
1394 +- SMOVE_CHUNK(o1, o0, 0x10, g2, g3, g4, g5, o4, o5, g7, g1, 8, 24, -3)
1395 +- SMOVE_CHUNK(o1, o0, 0x20, g2, g3, g4, g5, o4, o5, g7, g1, 8, 24, -3)
1396 +- SMOVE_CHUNK(o1, o0, 0x30, g2, g3, g4, g5, o4, o5, g7, g1, 8, 24, -3)
1397 +- subcc %o3, 64, %o3
1398 +- add %o1, 64, %o1
1399 +- bne 4b
1400 +- add %o0, 64, %o0
1401 +-
1402 +- andcc %o2, 0x30, %o3
1403 +- be,a 1f
1404 +- srl %g1, 16, %g2
1405 +-4:
1406 +- SMOVE_CHUNK(o1, o0, 0x00, g2, g3, g4, g5, o4, o5, g7, g1, 8, 24, -3)
1407 +- subcc %o3, 16, %o3
1408 +- add %o1, 16, %o1
1409 +- bne 4b
1410 +- add %o0, 16, %o0
1411 +-
1412 +- srl %g1, 16, %g2
1413 +-1:
1414 +- st %o4, [%o0 - 7]
1415 +- sth %g2, [%o0 - 3]
1416 +- srl %g1, 8, %g4
1417 +- b 88f
1418 +- stb %g4, [%o0 - 1]
1419 +-32:
1420 +- ld [%o1], %g2
1421 +- add %o1, 4, %o1
1422 +- srl %g2, 16, %g3
1423 +- and %o0, 7, %g5
1424 +- sth %g3, [%o0]
1425 +- cmp %g5, 6
1426 +- sll %g2, 16, %g1
1427 +- add %o0, 4, %o0
1428 +- be 42f
1429 +- and %o2, 0xffffffc0, %o3
1430 +- ld [%o0 - 6], %o4
1431 +-4:
1432 +- SMOVE_CHUNK(o1, o0, 0x00, g2, g3, g4, g5, o4, o5, g7, g1, 16, 16, -2)
1433 +- SMOVE_CHUNK(o1, o0, 0x10, g2, g3, g4, g5, o4, o5, g7, g1, 16, 16, -2)
1434 +- SMOVE_CHUNK(o1, o0, 0x20, g2, g3, g4, g5, o4, o5, g7, g1, 16, 16, -2)
1435 +- SMOVE_CHUNK(o1, o0, 0x30, g2, g3, g4, g5, o4, o5, g7, g1, 16, 16, -2)
1436 +- subcc %o3, 64, %o3
1437 +- add %o1, 64, %o1
1438 +- bne 4b
1439 +- add %o0, 64, %o0
1440 +-
1441 +- andcc %o2, 0x30, %o3
1442 +- be,a 1f
1443 +- srl %g1, 16, %g2
1444 +-4:
1445 +- SMOVE_CHUNK(o1, o0, 0x00, g2, g3, g4, g5, o4, o5, g7, g1, 16, 16, -2)
1446 +- subcc %o3, 16, %o3
1447 +- add %o1, 16, %o1
1448 +- bne 4b
1449 +- add %o0, 16, %o0
1450 +-
1451 +- srl %g1, 16, %g2
1452 +-1:
1453 +- st %o4, [%o0 - 6]
1454 +- b 88f
1455 +- sth %g2, [%o0 - 2]
1456 +-33:
1457 +- ld [%o1], %g2
1458 +- sub %o2, 4, %o2
1459 +- srl %g2, 24, %g3
1460 +- and %o0, 7, %g5
1461 +- stb %g3, [%o0]
1462 +- cmp %g5, 5
1463 +- srl %g2, 8, %g4
1464 +- sll %g2, 24, %g1
1465 +- sth %g4, [%o0 + 1]
1466 +- add %o1, 4, %o1
1467 +- be 43f
1468 +- and %o2, 0xffffffc0, %o3
1469 +-
1470 +- ld [%o0 - 1], %o4
1471 +- add %o0, 4, %o0
1472 +-4:
1473 +- SMOVE_CHUNK(o1, o0, 0x00, g2, g3, g4, g5, o4, o5, g7, g1, 24, 8, -1)
1474 +- SMOVE_CHUNK(o1, o0, 0x10, g2, g3, g4, g5, o4, o5, g7, g1, 24, 8, -1)
1475 +- SMOVE_CHUNK(o1, o0, 0x20, g2, g3, g4, g5, o4, o5, g7, g1, 24, 8, -1)
1476 +- SMOVE_CHUNK(o1, o0, 0x30, g2, g3, g4, g5, o4, o5, g7, g1, 24, 8, -1)
1477 +- subcc %o3, 64, %o3
1478 +- add %o1, 64, %o1
1479 +- bne 4b
1480 +- add %o0, 64, %o0
1481 +-
1482 +- andcc %o2, 0x30, %o3
1483 +- be,a 1f
1484 +- srl %g1, 24, %g2
1485 +-4:
1486 +- SMOVE_CHUNK(o1, o0, 0x00, g2, g3, g4, g5, o4, o5, g7, g1, 24, 8, -1)
1487 +- subcc %o3, 16, %o3
1488 +- add %o1, 16, %o1
1489 +- bne 4b
1490 +- add %o0, 16, %o0
1491 +-
1492 +- srl %g1, 24, %g2
1493 +-1:
1494 +- st %o4, [%o0 - 5]
1495 +- b 88f
1496 +- stb %g2, [%o0 - 1]
1497 +-41:
1498 +- SMOVE_ALIGNCHUNK(o1, o0, 0x00, g2, g3, g4, g5, o4, o5, g7, g1, 8, 24, -3)
1499 +- SMOVE_ALIGNCHUNK(o1, o0, 0x10, g2, g3, g4, g5, o4, o5, g7, g1, 8, 24, -3)
1500 +- SMOVE_ALIGNCHUNK(o1, o0, 0x20, g2, g3, g4, g5, o4, o5, g7, g1, 8, 24, -3)
1501 +- SMOVE_ALIGNCHUNK(o1, o0, 0x30, g2, g3, g4, g5, o4, o5, g7, g1, 8, 24, -3)
1502 +- subcc %o3, 64, %o3
1503 +- add %o1, 64, %o1
1504 +- bne 41b
1505 +- add %o0, 64, %o0
1506 +-
1507 +- andcc %o2, 0x30, %o3
1508 +- be,a 1f
1509 +- srl %g1, 16, %g2
1510 +-4:
1511 +- SMOVE_ALIGNCHUNK(o1, o0, 0x00, g2, g3, g4, g5, o4, o5, g7, g1, 8, 24, -3)
1512 +- subcc %o3, 16, %o3
1513 +- add %o1, 16, %o1
1514 +- bne 4b
1515 +- add %o0, 16, %o0
1516 +-
1517 +- srl %g1, 16, %g2
1518 ++ ldub [%i1], %g2
1519 ++ stb %g2, [%i0 + 4]
1520 + 1:
1521 +- sth %g2, [%o0 - 3]
1522 +- srl %g1, 8, %g4
1523 +- b 88f
1524 +- stb %g4, [%o0 - 1]
1525 +-43:
1526 +- SMOVE_ALIGNCHUNK(o1, o0, 0x00, g2, g3, g4, g5, o4, o5, g7, g1, 24, 8, 3)
1527 +- SMOVE_ALIGNCHUNK(o1, o0, 0x10, g2, g3, g4, g5, o4, o5, g7, g1, 24, 8, 3)
1528 +- SMOVE_ALIGNCHUNK(o1, o0, 0x20, g2, g3, g4, g5, o4, o5, g7, g1, 24, 8, 3)
1529 +- SMOVE_ALIGNCHUNK(o1, o0, 0x30, g2, g3, g4, g5, o4, o5, g7, g1, 24, 8, 3)
1530 +- subcc %o3, 64, %o3
1531 +- add %o1, 64, %o1
1532 +- bne 43b
1533 +- add %o0, 64, %o0
1534 +-
1535 +- andcc %o2, 0x30, %o3
1536 +- be,a 1f
1537 +- srl %g1, 24, %g2
1538 +-4:
1539 +- SMOVE_ALIGNCHUNK(o1, o0, 0x00, g2, g3, g4, g5, o4, o5, g7, g1, 24, 8, 3)
1540 +- subcc %o3, 16, %o3
1541 +- add %o1, 16, %o1
1542 +- bne 4b
1543 +- add %o0, 16, %o0
1544 +-
1545 +- srl %g1, 24, %g2
1546 +-1:
1547 +- stb %g2, [%o0 + 3]
1548 +- b 88f
1549 +- add %o0, 4, %o0
1550 +-42:
1551 +- SMOVE_ALIGNCHUNK(o1, o0, 0x00, g2, g3, g4, g5, o4, o5, g7, g1, 16, 16, -2)
1552 +- SMOVE_ALIGNCHUNK(o1, o0, 0x10, g2, g3, g4, g5, o4, o5, g7, g1, 16, 16, -2)
1553 +- SMOVE_ALIGNCHUNK(o1, o0, 0x20, g2, g3, g4, g5, o4, o5, g7, g1, 16, 16, -2)
1554 +- SMOVE_ALIGNCHUNK(o1, o0, 0x30, g2, g3, g4, g5, o4, o5, g7, g1, 16, 16, -2)
1555 +- subcc %o3, 64, %o3
1556 +- add %o1, 64, %o1
1557 +- bne 42b
1558 +- add %o0, 64, %o0
1559 +-
1560 +- andcc %o2, 0x30, %o3
1561 +- be,a 1f
1562 +- srl %g1, 16, %g2
1563 +-4:
1564 +- SMOVE_ALIGNCHUNK(o1, o0, 0x00, g2, g3, g4, g5, o4, o5, g7, g1, 16, 16, -2)
1565 +- subcc %o3, 16, %o3
1566 +- add %o1, 16, %o1
1567 +- bne 4b
1568 +- add %o0, 16, %o0
1569 +-
1570 +- srl %g1, 16, %g2
1571 +-1:
1572 +- sth %g2, [%o0 - 2]
1573 +-
1574 +- /* Fall through */
1575 +-
1576 +-#endif /* FASTER_NONALIGNED */
1577 ++ ret
1578 ++ restore %g7, %g0, %o0
1579 +
1580 + 88: /* short_end */
1581 +
1582 +@@ -1127,7 +521,7 @@ FUNC(memcpy) /* %o0=dst %o1=src %o2=len */
1583 + stb %g2, [%o0]
1584 + 1:
1585 + retl
1586 +- RETL_INSN
1587 ++ mov %g7, %o0
1588 +
1589 + 90: /* short_aligned_end */
1590 + bne 88b
1591 +diff --git a/arch/sparc/mm/Makefile b/arch/sparc/mm/Makefile
1592 +index 79836a7..3b6e248 100644
1593 +--- a/arch/sparc/mm/Makefile
1594 ++++ b/arch/sparc/mm/Makefile
1595 +@@ -8,7 +8,6 @@ obj-$(CONFIG_SPARC64) += ultra.o tlb.o tsb.o
1596 + obj-y += fault_$(BITS).o
1597 + obj-y += init_$(BITS).o
1598 + obj-$(CONFIG_SPARC32) += loadmmu.o
1599 +-obj-y += generic_$(BITS).o
1600 + obj-$(CONFIG_SPARC32) += extable.o btfixup.o srmmu.o iommu.o io-unit.o
1601 + obj-$(CONFIG_SPARC32) += hypersparc.o viking.o tsunami.o swift.o
1602 + obj-$(CONFIG_SPARC_LEON)+= leon_mm.o
1603 +diff --git a/arch/sparc/mm/btfixup.c b/arch/sparc/mm/btfixup.c
1604 +index 5175ac2..8a7f817 100644
1605 +--- a/arch/sparc/mm/btfixup.c
1606 ++++ b/arch/sparc/mm/btfixup.c
1607 +@@ -302,8 +302,7 @@ void __init btfixup(void)
1608 + case 'i': /* INT */
1609 + if ((insn & 0xc1c00000) == 0x01000000) /* %HI */
1610 + set_addr(addr, q[1], fmangled, (insn & 0xffc00000) | (p[1] >> 10));
1611 +- else if ((insn & 0x80002000) == 0x80002000 &&
1612 +- (insn & 0x01800000) != 0x01800000) /* %LO */
1613 ++ else if ((insn & 0x80002000) == 0x80002000) /* %LO */
1614 + set_addr(addr, q[1], fmangled, (insn & 0xffffe000) | (p[1] & 0x3ff));
1615 + else {
1616 + prom_printf(insn_i, p, addr, insn);
1617 +diff --git a/arch/sparc/mm/generic_32.c b/arch/sparc/mm/generic_32.c
1618 +deleted file mode 100644
1619 +index e6067b7..0000000
1620 +--- a/arch/sparc/mm/generic_32.c
1621 ++++ /dev/null
1622 +@@ -1,98 +0,0 @@
1623 +-/*
1624 +- * generic.c: Generic Sparc mm routines that are not dependent upon
1625 +- * MMU type but are Sparc specific.
1626 +- *
1627 +- * Copyright (C) 1996 David S. Miller (davem@××××××××××××.edu)
1628 +- */
1629 +-
1630 +-#include <linux/kernel.h>
1631 +-#include <linux/mm.h>
1632 +-#include <linux/swap.h>
1633 +-#include <linux/pagemap.h>
1634 +-
1635 +-#include <asm/pgalloc.h>
1636 +-#include <asm/pgtable.h>
1637 +-#include <asm/page.h>
1638 +-#include <asm/cacheflush.h>
1639 +-#include <asm/tlbflush.h>
1640 +-
1641 +-/* Remap IO memory, the same way as remap_pfn_range(), but use
1642 +- * the obio memory space.
1643 +- *
1644 +- * They use a pgprot that sets PAGE_IO and does not check the
1645 +- * mem_map table as this is independent of normal memory.
1646 +- */
1647 +-static inline void io_remap_pte_range(struct mm_struct *mm, pte_t * pte, unsigned long address, unsigned long size,
1648 +- unsigned long offset, pgprot_t prot, int space)
1649 +-{
1650 +- unsigned long end;
1651 +-
1652 +- address &= ~PMD_MASK;
1653 +- end = address + size;
1654 +- if (end > PMD_SIZE)
1655 +- end = PMD_SIZE;
1656 +- do {
1657 +- set_pte_at(mm, address, pte, mk_pte_io(offset, prot, space));
1658 +- address += PAGE_SIZE;
1659 +- offset += PAGE_SIZE;
1660 +- pte++;
1661 +- } while (address < end);
1662 +-}
1663 +-
1664 +-static inline int io_remap_pmd_range(struct mm_struct *mm, pmd_t * pmd, unsigned long address, unsigned long size,
1665 +- unsigned long offset, pgprot_t prot, int space)
1666 +-{
1667 +- unsigned long end;
1668 +-
1669 +- address &= ~PGDIR_MASK;
1670 +- end = address + size;
1671 +- if (end > PGDIR_SIZE)
1672 +- end = PGDIR_SIZE;
1673 +- offset -= address;
1674 +- do {
1675 +- pte_t *pte = pte_alloc_map(mm, NULL, pmd, address);
1676 +- if (!pte)
1677 +- return -ENOMEM;
1678 +- io_remap_pte_range(mm, pte, address, end - address, address + offset, prot, space);
1679 +- address = (address + PMD_SIZE) & PMD_MASK;
1680 +- pmd++;
1681 +- } while (address < end);
1682 +- return 0;
1683 +-}
1684 +-
1685 +-int io_remap_pfn_range(struct vm_area_struct *vma, unsigned long from,
1686 +- unsigned long pfn, unsigned long size, pgprot_t prot)
1687 +-{
1688 +- int error = 0;
1689 +- pgd_t * dir;
1690 +- unsigned long beg = from;
1691 +- unsigned long end = from + size;
1692 +- struct mm_struct *mm = vma->vm_mm;
1693 +- int space = GET_IOSPACE(pfn);
1694 +- unsigned long offset = GET_PFN(pfn) << PAGE_SHIFT;
1695 +-
1696 +- /* See comment in mm/memory.c remap_pfn_range */
1697 +- vma->vm_flags |= VM_IO | VM_RESERVED | VM_PFNMAP;
1698 +- vma->vm_pgoff = (offset >> PAGE_SHIFT) |
1699 +- ((unsigned long)space << 28UL);
1700 +-
1701 +- offset -= from;
1702 +- dir = pgd_offset(mm, from);
1703 +- flush_cache_range(vma, beg, end);
1704 +-
1705 +- while (from < end) {
1706 +- pmd_t *pmd = pmd_alloc(mm, dir, from);
1707 +- error = -ENOMEM;
1708 +- if (!pmd)
1709 +- break;
1710 +- error = io_remap_pmd_range(mm, pmd, from, end - from, offset + from, prot, space);
1711 +- if (error)
1712 +- break;
1713 +- from = (from + PGDIR_SIZE) & PGDIR_MASK;
1714 +- dir++;
1715 +- }
1716 +-
1717 +- flush_tlb_range(vma, beg, end);
1718 +- return error;
1719 +-}
1720 +-EXPORT_SYMBOL(io_remap_pfn_range);
1721 +diff --git a/arch/sparc/mm/generic_64.c b/arch/sparc/mm/generic_64.c
1722 +deleted file mode 100644
1723 +index 3cb00df..0000000
1724 +--- a/arch/sparc/mm/generic_64.c
1725 ++++ /dev/null
1726 +@@ -1,164 +0,0 @@
1727 +-/*
1728 +- * generic.c: Generic Sparc mm routines that are not dependent upon
1729 +- * MMU type but are Sparc specific.
1730 +- *
1731 +- * Copyright (C) 1996 David S. Miller (davem@××××××××××××.edu)
1732 +- */
1733 +-
1734 +-#include <linux/kernel.h>
1735 +-#include <linux/mm.h>
1736 +-#include <linux/swap.h>
1737 +-#include <linux/pagemap.h>
1738 +-
1739 +-#include <asm/pgalloc.h>
1740 +-#include <asm/pgtable.h>
1741 +-#include <asm/page.h>
1742 +-#include <asm/tlbflush.h>
1743 +-
1744 +-/* Remap IO memory, the same way as remap_pfn_range(), but use
1745 +- * the obio memory space.
1746 +- *
1747 +- * They use a pgprot that sets PAGE_IO and does not check the
1748 +- * mem_map table as this is independent of normal memory.
1749 +- */
1750 +-static inline void io_remap_pte_range(struct mm_struct *mm, pte_t * pte,
1751 +- unsigned long address,
1752 +- unsigned long size,
1753 +- unsigned long offset, pgprot_t prot,
1754 +- int space)
1755 +-{
1756 +- unsigned long end;
1757 +-
1758 +- /* clear hack bit that was used as a write_combine side-effect flag */
1759 +- offset &= ~0x1UL;
1760 +- address &= ~PMD_MASK;
1761 +- end = address + size;
1762 +- if (end > PMD_SIZE)
1763 +- end = PMD_SIZE;
1764 +- do {
1765 +- pte_t entry;
1766 +- unsigned long curend = address + PAGE_SIZE;
1767 +-
1768 +- entry = mk_pte_io(offset, prot, space, PAGE_SIZE);
1769 +- if (!(address & 0xffff)) {
1770 +- if (PAGE_SIZE < (4 * 1024 * 1024) &&
1771 +- !(address & 0x3fffff) &&
1772 +- !(offset & 0x3ffffe) &&
1773 +- end >= address + 0x400000) {
1774 +- entry = mk_pte_io(offset, prot, space,
1775 +- 4 * 1024 * 1024);
1776 +- curend = address + 0x400000;
1777 +- offset += 0x400000;
1778 +- } else if (PAGE_SIZE < (512 * 1024) &&
1779 +- !(address & 0x7ffff) &&
1780 +- !(offset & 0x7fffe) &&
1781 +- end >= address + 0x80000) {
1782 +- entry = mk_pte_io(offset, prot, space,
1783 +- 512 * 1024 * 1024);
1784 +- curend = address + 0x80000;
1785 +- offset += 0x80000;
1786 +- } else if (PAGE_SIZE < (64 * 1024) &&
1787 +- !(offset & 0xfffe) &&
1788 +- end >= address + 0x10000) {
1789 +- entry = mk_pte_io(offset, prot, space,
1790 +- 64 * 1024);
1791 +- curend = address + 0x10000;
1792 +- offset += 0x10000;
1793 +- } else
1794 +- offset += PAGE_SIZE;
1795 +- } else
1796 +- offset += PAGE_SIZE;
1797 +-
1798 +- if (pte_write(entry))
1799 +- entry = pte_mkdirty(entry);
1800 +- do {
1801 +- BUG_ON(!pte_none(*pte));
1802 +- set_pte_at(mm, address, pte, entry);
1803 +- address += PAGE_SIZE;
1804 +- pte_val(entry) += PAGE_SIZE;
1805 +- pte++;
1806 +- } while (address < curend);
1807 +- } while (address < end);
1808 +-}
1809 +-
1810 +-static inline int io_remap_pmd_range(struct mm_struct *mm, pmd_t * pmd, unsigned long address, unsigned long size,
1811 +- unsigned long offset, pgprot_t prot, int space)
1812 +-{
1813 +- unsigned long end;
1814 +-
1815 +- address &= ~PGDIR_MASK;
1816 +- end = address + size;
1817 +- if (end > PGDIR_SIZE)
1818 +- end = PGDIR_SIZE;
1819 +- offset -= address;
1820 +- do {
1821 +- pte_t *pte = pte_alloc_map(mm, NULL, pmd, address);
1822 +- if (!pte)
1823 +- return -ENOMEM;
1824 +- io_remap_pte_range(mm, pte, address, end - address, address + offset, prot, space);
1825 +- pte_unmap(pte);
1826 +- address = (address + PMD_SIZE) & PMD_MASK;
1827 +- pmd++;
1828 +- } while (address < end);
1829 +- return 0;
1830 +-}
1831 +-
1832 +-static inline int io_remap_pud_range(struct mm_struct *mm, pud_t * pud, unsigned long address, unsigned long size,
1833 +- unsigned long offset, pgprot_t prot, int space)
1834 +-{
1835 +- unsigned long end;
1836 +-
1837 +- address &= ~PUD_MASK;
1838 +- end = address + size;
1839 +- if (end > PUD_SIZE)
1840 +- end = PUD_SIZE;
1841 +- offset -= address;
1842 +- do {
1843 +- pmd_t *pmd = pmd_alloc(mm, pud, address);
1844 +- if (!pud)
1845 +- return -ENOMEM;
1846 +- io_remap_pmd_range(mm, pmd, address, end - address, address + offset, prot, space);
1847 +- address = (address + PUD_SIZE) & PUD_MASK;
1848 +- pud++;
1849 +- } while (address < end);
1850 +- return 0;
1851 +-}
1852 +-
1853 +-int io_remap_pfn_range(struct vm_area_struct *vma, unsigned long from,
1854 +- unsigned long pfn, unsigned long size, pgprot_t prot)
1855 +-{
1856 +- int error = 0;
1857 +- pgd_t * dir;
1858 +- unsigned long beg = from;
1859 +- unsigned long end = from + size;
1860 +- struct mm_struct *mm = vma->vm_mm;
1861 +- int space = GET_IOSPACE(pfn);
1862 +- unsigned long offset = GET_PFN(pfn) << PAGE_SHIFT;
1863 +- unsigned long phys_base;
1864 +-
1865 +- phys_base = offset | (((unsigned long) space) << 32UL);
1866 +-
1867 +- /* See comment in mm/memory.c remap_pfn_range */
1868 +- vma->vm_flags |= VM_IO | VM_RESERVED | VM_PFNMAP;
1869 +- vma->vm_pgoff = phys_base >> PAGE_SHIFT;
1870 +-
1871 +- offset -= from;
1872 +- dir = pgd_offset(mm, from);
1873 +- flush_cache_range(vma, beg, end);
1874 +-
1875 +- while (from < end) {
1876 +- pud_t *pud = pud_alloc(mm, dir, from);
1877 +- error = -ENOMEM;
1878 +- if (!pud)
1879 +- break;
1880 +- error = io_remap_pud_range(mm, pud, from, end - from, offset + from, prot, space);
1881 +- if (error)
1882 +- break;
1883 +- from = (from + PGDIR_SIZE) & PGDIR_MASK;
1884 +- dir++;
1885 +- }
1886 +-
1887 +- flush_tlb_range(vma, beg, end);
1888 +- return error;
1889 +-}
1890 +-EXPORT_SYMBOL(io_remap_pfn_range);
1891 +diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
1892 +index bfab3fa..7b65f75 100644
1893 +--- a/arch/x86/net/bpf_jit_comp.c
1894 ++++ b/arch/x86/net/bpf_jit_comp.c
1895 +@@ -568,8 +568,8 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
1896 + break;
1897 + }
1898 + if (filter[i].jt != 0) {
1899 +- if (filter[i].jf)
1900 +- t_offset += is_near(f_offset) ? 2 : 6;
1901 ++ if (filter[i].jf && f_offset)
1902 ++ t_offset += is_near(f_offset) ? 2 : 5;
1903 + EMIT_COND_JMP(t_op, t_offset);
1904 + if (filter[i].jf)
1905 + EMIT_JMP(f_offset);
1906 +diff --git a/block/blk-core.c b/block/blk-core.c
1907 +index 847d04e..35ae52d 100644
1908 +--- a/block/blk-core.c
1909 ++++ b/block/blk-core.c
1910 +@@ -418,6 +418,7 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
1911 + q->backing_dev_info.state = 0;
1912 + q->backing_dev_info.capabilities = BDI_CAP_MAP_COPY;
1913 + q->backing_dev_info.name = "block";
1914 ++ q->node = node_id;
1915 +
1916 + err = bdi_init(&q->backing_dev_info);
1917 + if (err) {
1918 +@@ -502,7 +503,7 @@ blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id)
1919 + if (!uninit_q)
1920 + return NULL;
1921 +
1922 +- q = blk_init_allocated_queue_node(uninit_q, rfn, lock, node_id);
1923 ++ q = blk_init_allocated_queue(uninit_q, rfn, lock);
1924 + if (!q)
1925 + blk_cleanup_queue(uninit_q);
1926 +
1927 +@@ -514,18 +515,9 @@ struct request_queue *
1928 + blk_init_allocated_queue(struct request_queue *q, request_fn_proc *rfn,
1929 + spinlock_t *lock)
1930 + {
1931 +- return blk_init_allocated_queue_node(q, rfn, lock, -1);
1932 +-}
1933 +-EXPORT_SYMBOL(blk_init_allocated_queue);
1934 +-
1935 +-struct request_queue *
1936 +-blk_init_allocated_queue_node(struct request_queue *q, request_fn_proc *rfn,
1937 +- spinlock_t *lock, int node_id)
1938 +-{
1939 + if (!q)
1940 + return NULL;
1941 +
1942 +- q->node = node_id;
1943 + if (blk_init_free_list(q))
1944 + return NULL;
1945 +
1946 +@@ -555,7 +547,7 @@ blk_init_allocated_queue_node(struct request_queue *q, request_fn_proc *rfn,
1947 +
1948 + return NULL;
1949 + }
1950 +-EXPORT_SYMBOL(blk_init_allocated_queue_node);
1951 ++EXPORT_SYMBOL(blk_init_allocated_queue);
1952 +
1953 + int blk_get_queue(struct request_queue *q)
1954 + {
1955 +diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
1956 +index ae21919..23500ac 100644
1957 +--- a/block/cfq-iosched.c
1958 ++++ b/block/cfq-iosched.c
1959 +@@ -3169,7 +3169,7 @@ static int cfq_cic_link(struct cfq_data *cfqd, struct io_context *ioc,
1960 + }
1961 + }
1962 +
1963 +- if (ret)
1964 ++ if (ret && ret != -EEXIST)
1965 + printk(KERN_ERR "cfq: cic link failed!\n");
1966 +
1967 + return ret;
1968 +@@ -3185,6 +3185,7 @@ cfq_get_io_context(struct cfq_data *cfqd, gfp_t gfp_mask)
1969 + {
1970 + struct io_context *ioc = NULL;
1971 + struct cfq_io_context *cic;
1972 ++ int ret;
1973 +
1974 + might_sleep_if(gfp_mask & __GFP_WAIT);
1975 +
1976 +@@ -3192,6 +3193,7 @@ cfq_get_io_context(struct cfq_data *cfqd, gfp_t gfp_mask)
1977 + if (!ioc)
1978 + return NULL;
1979 +
1980 ++retry:
1981 + cic = cfq_cic_lookup(cfqd, ioc);
1982 + if (cic)
1983 + goto out;
1984 +@@ -3200,7 +3202,12 @@ cfq_get_io_context(struct cfq_data *cfqd, gfp_t gfp_mask)
1985 + if (cic == NULL)
1986 + goto err;
1987 +
1988 +- if (cfq_cic_link(cfqd, ioc, cic, gfp_mask))
1989 ++ ret = cfq_cic_link(cfqd, ioc, cic, gfp_mask);
1990 ++ if (ret == -EEXIST) {
1991 ++ /* someone has linked cic to ioc already */
1992 ++ cfq_cic_free(cic);
1993 ++ goto retry;
1994 ++ } else if (ret)
1995 + goto err_free;
1996 +
1997 + out:
1998 +@@ -4015,6 +4022,11 @@ static void *cfq_init_queue(struct request_queue *q)
1999 +
2000 + if (blkio_alloc_blkg_stats(&cfqg->blkg)) {
2001 + kfree(cfqg);
2002 ++
2003 ++ spin_lock(&cic_index_lock);
2004 ++ ida_remove(&cic_index_ida, cfqd->cic_index);
2005 ++ spin_unlock(&cic_index_lock);
2006 ++
2007 + kfree(cfqd);
2008 + return NULL;
2009 + }
2010 +diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
2011 +index 7eef6e1..ef16443 100644
2012 +--- a/drivers/gpu/drm/i915/i915_dma.c
2013 ++++ b/drivers/gpu/drm/i915/i915_dma.c
2014 +@@ -1451,6 +1451,14 @@ unsigned long i915_chipset_val(struct drm_i915_private *dev_priv)
2015 +
2016 + diff1 = now - dev_priv->last_time1;
2017 +
2018 ++ /* Prevent division-by-zero if we are asking too fast.
2019 ++ * Also, we don't get interesting results if we are polling
2020 ++ * faster than once in 10ms, so just return the saved value
2021 ++ * in such cases.
2022 ++ */
2023 ++ if (diff1 <= 10)
2024 ++ return dev_priv->chipset_power;
2025 ++
2026 + count1 = I915_READ(DMIEC);
2027 + count2 = I915_READ(DDREC);
2028 + count3 = I915_READ(CSIEC);
2029 +@@ -1481,6 +1489,8 @@ unsigned long i915_chipset_val(struct drm_i915_private *dev_priv)
2030 + dev_priv->last_count1 = total_count;
2031 + dev_priv->last_time1 = now;
2032 +
2033 ++ dev_priv->chipset_power = ret;
2034 ++
2035 + return ret;
2036 + }
2037 +
2038 +diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
2039 +index e0d0e27..335564e 100644
2040 +--- a/drivers/gpu/drm/i915/i915_drv.h
2041 ++++ b/drivers/gpu/drm/i915/i915_drv.h
2042 +@@ -702,6 +702,7 @@ typedef struct drm_i915_private {
2043 +
2044 + u64 last_count1;
2045 + unsigned long last_time1;
2046 ++ unsigned long chipset_power;
2047 + u64 last_count2;
2048 + struct timespec last_time2;
2049 + unsigned long gfx_power;
2050 +diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
2051 +index 21c5aa0..fe052c6 100644
2052 +--- a/drivers/gpu/drm/radeon/evergreen.c
2053 ++++ b/drivers/gpu/drm/radeon/evergreen.c
2054 +@@ -3257,6 +3257,18 @@ int evergreen_init(struct radeon_device *rdev)
2055 + rdev->accel_working = false;
2056 + }
2057 + }
2058 ++
2059 ++ /* Don't start up if the MC ucode is missing on BTC parts.
2060 ++ * The default clocks and voltages before the MC ucode
2061 ++ * is loaded are not suffient for advanced operations.
2062 ++ */
2063 ++ if (ASIC_IS_DCE5(rdev)) {
2064 ++ if (!rdev->mc_fw && !(rdev->flags & RADEON_IS_IGP)) {
2065 ++ DRM_ERROR("radeon: MC ucode required for NI+.\n");
2066 ++ return -EINVAL;
2067 ++ }
2068 ++ }
2069 ++
2070 + return 0;
2071 + }
2072 +
2073 +diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
2074 +index 285acc4..a098edc 100644
2075 +--- a/drivers/gpu/drm/radeon/radeon_atombios.c
2076 ++++ b/drivers/gpu/drm/radeon/radeon_atombios.c
2077 +@@ -2568,7 +2568,11 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev)
2078 +
2079 + rdev->pm.current_power_state_index = rdev->pm.default_power_state_index;
2080 + rdev->pm.current_clock_mode_index = 0;
2081 +- rdev->pm.current_vddc = rdev->pm.power_state[rdev->pm.default_power_state_index].clock_info[0].voltage.voltage;
2082 ++ if (rdev->pm.default_power_state_index >= 0)
2083 ++ rdev->pm.current_vddc =
2084 ++ rdev->pm.power_state[rdev->pm.default_power_state_index].clock_info[0].voltage.voltage;
2085 ++ else
2086 ++ rdev->pm.current_vddc = 0;
2087 + }
2088 +
2089 + void radeon_atom_set_clock_gating(struct radeon_device *rdev, int enable)
2090 +diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
2091 +index e06e045..6ad728f 100644
2092 +--- a/drivers/input/mouse/synaptics.c
2093 ++++ b/drivers/input/mouse/synaptics.c
2094 +@@ -24,6 +24,7 @@
2095 + */
2096 +
2097 + #include <linux/module.h>
2098 ++#include <linux/delay.h>
2099 + #include <linux/dmi.h>
2100 + #include <linux/input/mt.h>
2101 + #include <linux/serio.h>
2102 +@@ -760,6 +761,16 @@ static int synaptics_reconnect(struct psmouse *psmouse)
2103 +
2104 + do {
2105 + psmouse_reset(psmouse);
2106 ++ if (retry) {
2107 ++ /*
2108 ++ * On some boxes, right after resuming, the touchpad
2109 ++ * needs some time to finish initializing (I assume
2110 ++ * it needs time to calibrate) and start responding
2111 ++ * to Synaptics-specific queries, so let's wait a
2112 ++ * bit.
2113 ++ */
2114 ++ ssleep(1);
2115 ++ }
2116 + error = synaptics_detect(psmouse, 0);
2117 + } while (error && ++retry < 3);
2118 +
2119 +diff --git a/drivers/media/video/s5p-fimc/fimc-core.c b/drivers/media/video/s5p-fimc/fimc-core.c
2120 +index bdf19ad..e9babcb 100644
2121 +--- a/drivers/media/video/s5p-fimc/fimc-core.c
2122 ++++ b/drivers/media/video/s5p-fimc/fimc-core.c
2123 +@@ -36,7 +36,7 @@ static char *fimc_clocks[MAX_FIMC_CLOCKS] = {
2124 + static struct fimc_fmt fimc_formats[] = {
2125 + {
2126 + .name = "RGB565",
2127 +- .fourcc = V4L2_PIX_FMT_RGB565X,
2128 ++ .fourcc = V4L2_PIX_FMT_RGB565,
2129 + .depth = { 16 },
2130 + .color = S5P_FIMC_RGB565,
2131 + .memplanes = 1,
2132 +diff --git a/drivers/mfd/twl-core.c b/drivers/mfd/twl-core.c
2133 +index b8f2a4e..f82413a 100644
2134 +--- a/drivers/mfd/twl-core.c
2135 ++++ b/drivers/mfd/twl-core.c
2136 +@@ -362,13 +362,13 @@ int twl_i2c_write(u8 mod_no, u8 *value, u8 reg, unsigned num_bytes)
2137 + pr_err("%s: invalid module number %d\n", DRIVER_NAME, mod_no);
2138 + return -EPERM;
2139 + }
2140 +- sid = twl_map[mod_no].sid;
2141 +- twl = &twl_modules[sid];
2142 +-
2143 + if (unlikely(!inuse)) {
2144 +- pr_err("%s: client %d is not initialized\n", DRIVER_NAME, sid);
2145 ++ pr_err("%s: not initialized\n", DRIVER_NAME);
2146 + return -EPERM;
2147 + }
2148 ++ sid = twl_map[mod_no].sid;
2149 ++ twl = &twl_modules[sid];
2150 ++
2151 + mutex_lock(&twl->xfer_lock);
2152 + /*
2153 + * [MSG1]: fill the register address data
2154 +@@ -419,13 +419,13 @@ int twl_i2c_read(u8 mod_no, u8 *value, u8 reg, unsigned num_bytes)
2155 + pr_err("%s: invalid module number %d\n", DRIVER_NAME, mod_no);
2156 + return -EPERM;
2157 + }
2158 +- sid = twl_map[mod_no].sid;
2159 +- twl = &twl_modules[sid];
2160 +-
2161 + if (unlikely(!inuse)) {
2162 +- pr_err("%s: client %d is not initialized\n", DRIVER_NAME, sid);
2163 ++ pr_err("%s: not initialized\n", DRIVER_NAME);
2164 + return -EPERM;
2165 + }
2166 ++ sid = twl_map[mod_no].sid;
2167 ++ twl = &twl_modules[sid];
2168 ++
2169 + mutex_lock(&twl->xfer_lock);
2170 + /* [MSG1] fill the register address data */
2171 + msg = &twl->xfer_msg[0];
2172 +diff --git a/drivers/mfd/twl4030-madc.c b/drivers/mfd/twl4030-madc.c
2173 +index 3941ddc..834f824 100644
2174 +--- a/drivers/mfd/twl4030-madc.c
2175 ++++ b/drivers/mfd/twl4030-madc.c
2176 +@@ -510,8 +510,9 @@ int twl4030_madc_conversion(struct twl4030_madc_request *req)
2177 + u8 ch_msb, ch_lsb;
2178 + int ret;
2179 +
2180 +- if (!req)
2181 ++ if (!req || !twl4030_madc)
2182 + return -EINVAL;
2183 ++
2184 + mutex_lock(&twl4030_madc->lock);
2185 + if (req->method < TWL4030_MADC_RT || req->method > TWL4030_MADC_SW2) {
2186 + ret = -EINVAL;
2187 +@@ -530,13 +531,13 @@ int twl4030_madc_conversion(struct twl4030_madc_request *req)
2188 + if (ret) {
2189 + dev_err(twl4030_madc->dev,
2190 + "unable to write sel register 0x%X\n", method->sel + 1);
2191 +- return ret;
2192 ++ goto out;
2193 + }
2194 + ret = twl_i2c_write_u8(TWL4030_MODULE_MADC, ch_lsb, method->sel);
2195 + if (ret) {
2196 + dev_err(twl4030_madc->dev,
2197 + "unable to write sel register 0x%X\n", method->sel + 1);
2198 +- return ret;
2199 ++ goto out;
2200 + }
2201 + /* Select averaging for all channels if do_avg is set */
2202 + if (req->do_avg) {
2203 +@@ -546,7 +547,7 @@ int twl4030_madc_conversion(struct twl4030_madc_request *req)
2204 + dev_err(twl4030_madc->dev,
2205 + "unable to write avg register 0x%X\n",
2206 + method->avg + 1);
2207 +- return ret;
2208 ++ goto out;
2209 + }
2210 + ret = twl_i2c_write_u8(TWL4030_MODULE_MADC,
2211 + ch_lsb, method->avg);
2212 +@@ -554,7 +555,7 @@ int twl4030_madc_conversion(struct twl4030_madc_request *req)
2213 + dev_err(twl4030_madc->dev,
2214 + "unable to write sel reg 0x%X\n",
2215 + method->sel + 1);
2216 +- return ret;
2217 ++ goto out;
2218 + }
2219 + }
2220 + if (req->type == TWL4030_MADC_IRQ_ONESHOT && req->func_cb != NULL) {
2221 +@@ -706,6 +707,8 @@ static int __devinit twl4030_madc_probe(struct platform_device *pdev)
2222 + if (!madc)
2223 + return -ENOMEM;
2224 +
2225 ++ madc->dev = &pdev->dev;
2226 ++
2227 + /*
2228 + * Phoenix provides 2 interrupt lines. The first one is connected to
2229 + * the OMAP. The other one can be connected to the other processor such
2230 +@@ -737,6 +740,28 @@ static int __devinit twl4030_madc_probe(struct platform_device *pdev)
2231 + TWL4030_BCI_BCICTL1);
2232 + goto err_i2c;
2233 + }
2234 ++
2235 ++ /* Check that MADC clock is on */
2236 ++ ret = twl_i2c_read_u8(TWL4030_MODULE_INTBR, &regval, TWL4030_REG_GPBR1);
2237 ++ if (ret) {
2238 ++ dev_err(&pdev->dev, "unable to read reg GPBR1 0x%X\n",
2239 ++ TWL4030_REG_GPBR1);
2240 ++ goto err_i2c;
2241 ++ }
2242 ++
2243 ++ /* If MADC clk is not on, turn it on */
2244 ++ if (!(regval & TWL4030_GPBR1_MADC_HFCLK_EN)) {
2245 ++ dev_info(&pdev->dev, "clk disabled, enabling\n");
2246 ++ regval |= TWL4030_GPBR1_MADC_HFCLK_EN;
2247 ++ ret = twl_i2c_write_u8(TWL4030_MODULE_INTBR, regval,
2248 ++ TWL4030_REG_GPBR1);
2249 ++ if (ret) {
2250 ++ dev_err(&pdev->dev, "unable to write reg GPBR1 0x%X\n",
2251 ++ TWL4030_REG_GPBR1);
2252 ++ goto err_i2c;
2253 ++ }
2254 ++ }
2255 ++
2256 + platform_set_drvdata(pdev, madc);
2257 + mutex_init(&madc->lock);
2258 + ret = request_threaded_irq(platform_get_irq(pdev, 0), NULL,
2259 +diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c
2260 +index fe14072..9394d0b 100644
2261 +--- a/drivers/mmc/host/mmci.c
2262 ++++ b/drivers/mmc/host/mmci.c
2263 +@@ -557,7 +557,8 @@ mmci_data_irq(struct mmci_host *host, struct mmc_data *data,
2264 + unsigned int status)
2265 + {
2266 + /* First check for errors */
2267 +- if (status & (MCI_DATACRCFAIL|MCI_DATATIMEOUT|MCI_TXUNDERRUN|MCI_RXOVERRUN)) {
2268 ++ if (status & (MCI_DATACRCFAIL|MCI_DATATIMEOUT|MCI_STARTBITERR|
2269 ++ MCI_TXUNDERRUN|MCI_RXOVERRUN)) {
2270 + u32 remain, success;
2271 +
2272 + /* Terminate the DMA transfer */
2273 +@@ -636,8 +637,12 @@ mmci_cmd_irq(struct mmci_host *host, struct mmc_command *cmd,
2274 + }
2275 +
2276 + if (!cmd->data || cmd->error) {
2277 +- if (host->data)
2278 ++ if (host->data) {
2279 ++ /* Terminate the DMA transfer */
2280 ++ if (dma_inprogress(host))
2281 ++ mmci_dma_data_error(host);
2282 + mmci_stop_data(host);
2283 ++ }
2284 + mmci_request_end(host, cmd->mrq);
2285 + } else if (!(cmd->data->flags & MMC_DATA_READ)) {
2286 + mmci_start_data(host, cmd->data);
2287 +@@ -837,8 +842,9 @@ static irqreturn_t mmci_irq(int irq, void *dev_id)
2288 + dev_dbg(mmc_dev(host->mmc), "irq0 (data+cmd) %08x\n", status);
2289 +
2290 + data = host->data;
2291 +- if (status & (MCI_DATACRCFAIL|MCI_DATATIMEOUT|MCI_TXUNDERRUN|
2292 +- MCI_RXOVERRUN|MCI_DATAEND|MCI_DATABLOCKEND) && data)
2293 ++ if (status & (MCI_DATACRCFAIL|MCI_DATATIMEOUT|MCI_STARTBITERR|
2294 ++ MCI_TXUNDERRUN|MCI_RXOVERRUN|MCI_DATAEND|
2295 ++ MCI_DATABLOCKEND) && data)
2296 + mmci_data_irq(host, data, status);
2297 +
2298 + cmd = host->cmd;
2299 +diff --git a/drivers/mmc/host/vub300.c b/drivers/mmc/host/vub300.c
2300 +index d4455ff..52f4b64 100644
2301 +--- a/drivers/mmc/host/vub300.c
2302 ++++ b/drivers/mmc/host/vub300.c
2303 +@@ -259,7 +259,7 @@ static int firmware_rom_wait_states = 0x04;
2304 + static int firmware_rom_wait_states = 0x1C;
2305 + #endif
2306 +
2307 +-module_param(firmware_rom_wait_states, bool, 0644);
2308 ++module_param(firmware_rom_wait_states, int, 0644);
2309 + MODULE_PARM_DESC(firmware_rom_wait_states,
2310 + "ROM wait states byte=RRRIIEEE (Reserved Internal External)");
2311 +
2312 +diff --git a/drivers/net/pptp.c b/drivers/net/pptp.c
2313 +index 1286fe2..4b3a68b 100644
2314 +--- a/drivers/net/pptp.c
2315 ++++ b/drivers/net/pptp.c
2316 +@@ -418,10 +418,8 @@ static int pptp_bind(struct socket *sock, struct sockaddr *uservaddr,
2317 + lock_sock(sk);
2318 +
2319 + opt->src_addr = sp->sa_addr.pptp;
2320 +- if (add_chan(po)) {
2321 +- release_sock(sk);
2322 ++ if (add_chan(po))
2323 + error = -EBUSY;
2324 +- }
2325 +
2326 + release_sock(sk);
2327 + return error;
2328 +diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
2329 +index 5362306..a126a3e 100644
2330 +--- a/drivers/net/wireless/ath/ath9k/main.c
2331 ++++ b/drivers/net/wireless/ath/ath9k/main.c
2332 +@@ -1828,6 +1828,9 @@ static void ath9k_sta_notify(struct ieee80211_hw *hw,
2333 + struct ath_softc *sc = hw->priv;
2334 + struct ath_node *an = (struct ath_node *) sta->drv_priv;
2335 +
2336 ++ if (!(sc->sc_flags & SC_OP_TXAGGR))
2337 ++ return;
2338 ++
2339 + switch (cmd) {
2340 + case STA_NOTIFY_SLEEP:
2341 + an->sleeping = true;
2342 +diff --git a/drivers/net/wireless/ath/ath9k/rc.c b/drivers/net/wireless/ath/ath9k/rc.c
2343 +index ba7f36a..ea35843 100644
2344 +--- a/drivers/net/wireless/ath/ath9k/rc.c
2345 ++++ b/drivers/net/wireless/ath/ath9k/rc.c
2346 +@@ -1252,7 +1252,9 @@ static void ath_rc_init(struct ath_softc *sc,
2347 +
2348 + ath_rc_priv->max_valid_rate = k;
2349 + ath_rc_sort_validrates(rate_table, ath_rc_priv);
2350 +- ath_rc_priv->rate_max_phy = ath_rc_priv->valid_rate_index[k-4];
2351 ++ ath_rc_priv->rate_max_phy = (k > 4) ?
2352 ++ ath_rc_priv->valid_rate_index[k-4] :
2353 ++ ath_rc_priv->valid_rate_index[k-1];
2354 + ath_rc_priv->rate_table = rate_table;
2355 +
2356 + ath_dbg(common, ATH_DBG_CONFIG,
2357 +diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c b/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c
2358 +index b849ad7..39a3c9c 100644
2359 +--- a/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c
2360 ++++ b/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c
2361 +@@ -490,8 +490,8 @@ int iwlagn_mac_config(struct ieee80211_hw *hw, u32 changed)
2362 + if (ctx->ht.enabled) {
2363 + /* if HT40 is used, it should not change
2364 + * after associated except channel switch */
2365 +- if (iwl_is_associated_ctx(ctx) &&
2366 +- !ctx->ht.is_40mhz)
2367 ++ if (!ctx->ht.is_40mhz ||
2368 ++ !iwl_is_associated_ctx(ctx))
2369 + iwlagn_config_ht40(conf, ctx);
2370 + } else
2371 + ctx->ht.is_40mhz = false;
2372 +diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-tx.c b/drivers/net/wireless/iwlwifi/iwl-agn-tx.c
2373 +index 4974cd7..67cd2e3 100644
2374 +--- a/drivers/net/wireless/iwlwifi/iwl-agn-tx.c
2375 ++++ b/drivers/net/wireless/iwlwifi/iwl-agn-tx.c
2376 +@@ -385,7 +385,10 @@ static void iwlagn_tx_cmd_build_basic(struct iwl_priv *priv,
2377 + tx_cmd->tid_tspec = qc[0] & 0xf;
2378 + tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK;
2379 + } else {
2380 +- tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
2381 ++ if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ)
2382 ++ tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
2383 ++ else
2384 ++ tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK;
2385 + }
2386 +
2387 + priv->cfg->ops->utils->tx_cmd_protection(priv, info, fc, &tx_flags);
2388 +@@ -775,10 +778,7 @@ int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
2389 + iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx_cmd, sizeof(*tx_cmd));
2390 + iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx_cmd->hdr, hdr_len);
2391 +
2392 +- /* Set up entry for this TFD in Tx byte-count array */
2393 +- if (info->flags & IEEE80211_TX_CTL_AMPDU)
2394 +- iwlagn_txq_update_byte_cnt_tbl(priv, txq,
2395 +- le16_to_cpu(tx_cmd->len));
2396 ++ iwlagn_txq_update_byte_cnt_tbl(priv, txq, le16_to_cpu(tx_cmd->len));
2397 +
2398 + pci_dma_sync_single_for_device(priv->pci_dev, txcmd_phys,
2399 + firstlen, PCI_DMA_BIDIRECTIONAL);
2400 +diff --git a/drivers/oprofile/oprofile_files.c b/drivers/oprofile/oprofile_files.c
2401 +index 89f6345..84a208d 100644
2402 +--- a/drivers/oprofile/oprofile_files.c
2403 ++++ b/drivers/oprofile/oprofile_files.c
2404 +@@ -45,7 +45,7 @@ static ssize_t timeout_write(struct file *file, char const __user *buf,
2405 + return -EINVAL;
2406 +
2407 + retval = oprofilefs_ulong_from_user(&val, buf, count);
2408 +- if (retval)
2409 ++ if (retval <= 0)
2410 + return retval;
2411 +
2412 + retval = oprofile_set_timeout(val);
2413 +@@ -84,7 +84,7 @@ static ssize_t depth_write(struct file *file, char const __user *buf, size_t cou
2414 + return -EINVAL;
2415 +
2416 + retval = oprofilefs_ulong_from_user(&val, buf, count);
2417 +- if (retval)
2418 ++ if (retval <= 0)
2419 + return retval;
2420 +
2421 + retval = oprofile_set_ulong(&oprofile_backtrace_depth, val);
2422 +@@ -141,9 +141,10 @@ static ssize_t enable_write(struct file *file, char const __user *buf, size_t co
2423 + return -EINVAL;
2424 +
2425 + retval = oprofilefs_ulong_from_user(&val, buf, count);
2426 +- if (retval)
2427 ++ if (retval <= 0)
2428 + return retval;
2429 +
2430 ++ retval = 0;
2431 + if (val)
2432 + retval = oprofile_start();
2433 + else
2434 +diff --git a/drivers/oprofile/oprofilefs.c b/drivers/oprofile/oprofilefs.c
2435 +index e9ff6f7..1c0b799 100644
2436 +--- a/drivers/oprofile/oprofilefs.c
2437 ++++ b/drivers/oprofile/oprofilefs.c
2438 +@@ -60,6 +60,13 @@ ssize_t oprofilefs_ulong_to_user(unsigned long val, char __user *buf, size_t cou
2439 + }
2440 +
2441 +
2442 ++/*
2443 ++ * Note: If oprofilefs_ulong_from_user() returns 0, then *val remains
2444 ++ * unchanged and might be uninitialized. This follows write syscall
2445 ++ * implementation when count is zero: "If count is zero ... [and if]
2446 ++ * no errors are detected, 0 will be returned without causing any
2447 ++ * other effect." (man 2 write)
2448 ++ */
2449 + int oprofilefs_ulong_from_user(unsigned long *val, char const __user *buf, size_t count)
2450 + {
2451 + char tmpbuf[TMPBUFSIZE];
2452 +@@ -79,7 +86,7 @@ int oprofilefs_ulong_from_user(unsigned long *val, char const __user *buf, size_
2453 + spin_lock_irqsave(&oprofilefs_lock, flags);
2454 + *val = simple_strtoul(tmpbuf, NULL, 0);
2455 + spin_unlock_irqrestore(&oprofilefs_lock, flags);
2456 +- return 0;
2457 ++ return count;
2458 + }
2459 +
2460 +
2461 +@@ -99,7 +106,7 @@ static ssize_t ulong_write_file(struct file *file, char const __user *buf, size_
2462 + return -EINVAL;
2463 +
2464 + retval = oprofilefs_ulong_from_user(&value, buf, count);
2465 +- if (retval)
2466 ++ if (retval <= 0)
2467 + return retval;
2468 +
2469 + retval = oprofile_set_ulong(file->private_data, value);
2470 +diff --git a/drivers/rtc/interface.c b/drivers/rtc/interface.c
2471 +index bbb6f85..eb4c883 100644
2472 +--- a/drivers/rtc/interface.c
2473 ++++ b/drivers/rtc/interface.c
2474 +@@ -318,20 +318,6 @@ int rtc_read_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
2475 + }
2476 + EXPORT_SYMBOL_GPL(rtc_read_alarm);
2477 +
2478 +-static int ___rtc_set_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
2479 +-{
2480 +- int err;
2481 +-
2482 +- if (!rtc->ops)
2483 +- err = -ENODEV;
2484 +- else if (!rtc->ops->set_alarm)
2485 +- err = -EINVAL;
2486 +- else
2487 +- err = rtc->ops->set_alarm(rtc->dev.parent, alarm);
2488 +-
2489 +- return err;
2490 +-}
2491 +-
2492 + static int __rtc_set_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
2493 + {
2494 + struct rtc_time tm;
2495 +@@ -355,7 +341,14 @@ static int __rtc_set_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
2496 + * over right here, before we set the alarm.
2497 + */
2498 +
2499 +- return ___rtc_set_alarm(rtc, alarm);
2500 ++ if (!rtc->ops)
2501 ++ err = -ENODEV;
2502 ++ else if (!rtc->ops->set_alarm)
2503 ++ err = -EINVAL;
2504 ++ else
2505 ++ err = rtc->ops->set_alarm(rtc->dev.parent, alarm);
2506 ++
2507 ++ return err;
2508 + }
2509 +
2510 + int rtc_set_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
2511 +@@ -769,20 +762,6 @@ static int rtc_timer_enqueue(struct rtc_device *rtc, struct rtc_timer *timer)
2512 + return 0;
2513 + }
2514 +
2515 +-static void rtc_alarm_disable(struct rtc_device *rtc)
2516 +-{
2517 +- struct rtc_wkalrm alarm;
2518 +- struct rtc_time tm;
2519 +-
2520 +- __rtc_read_time(rtc, &tm);
2521 +-
2522 +- alarm.time = rtc_ktime_to_tm(ktime_add(rtc_tm_to_ktime(tm),
2523 +- ktime_set(300, 0)));
2524 +- alarm.enabled = 0;
2525 +-
2526 +- ___rtc_set_alarm(rtc, &alarm);
2527 +-}
2528 +-
2529 + /**
2530 + * rtc_timer_remove - Removes a rtc_timer from the rtc_device timerqueue
2531 + * @rtc rtc device
2532 +@@ -804,10 +783,8 @@ static void rtc_timer_remove(struct rtc_device *rtc, struct rtc_timer *timer)
2533 + struct rtc_wkalrm alarm;
2534 + int err;
2535 + next = timerqueue_getnext(&rtc->timerqueue);
2536 +- if (!next) {
2537 +- rtc_alarm_disable(rtc);
2538 ++ if (!next)
2539 + return;
2540 +- }
2541 + alarm.time = rtc_ktime_to_tm(next->expires);
2542 + alarm.enabled = 1;
2543 + err = __rtc_set_alarm(rtc, &alarm);
2544 +@@ -869,8 +846,7 @@ again:
2545 + err = __rtc_set_alarm(rtc, &alarm);
2546 + if (err == -ETIME)
2547 + goto again;
2548 +- } else
2549 +- rtc_alarm_disable(rtc);
2550 ++ }
2551 +
2552 + mutex_unlock(&rtc->ops_lock);
2553 + }
2554 +diff --git a/drivers/rtc/rtc-m41t80.c b/drivers/rtc/rtc-m41t80.c
2555 +index eda128f..64aedd8 100644
2556 +--- a/drivers/rtc/rtc-m41t80.c
2557 ++++ b/drivers/rtc/rtc-m41t80.c
2558 +@@ -357,10 +357,19 @@ static int m41t80_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *t)
2559 + static struct rtc_class_ops m41t80_rtc_ops = {
2560 + .read_time = m41t80_rtc_read_time,
2561 + .set_time = m41t80_rtc_set_time,
2562 ++ /*
2563 ++ * XXX - m41t80 alarm functionality is reported broken.
2564 ++ * until it is fixed, don't register alarm functions.
2565 ++ *
2566 + .read_alarm = m41t80_rtc_read_alarm,
2567 + .set_alarm = m41t80_rtc_set_alarm,
2568 ++ */
2569 + .proc = m41t80_rtc_proc,
2570 ++ /*
2571 ++ * See above comment on broken alarm
2572 ++ *
2573 + .alarm_irq_enable = m41t80_rtc_alarm_irq_enable,
2574 ++ */
2575 + };
2576 +
2577 + #if defined(CONFIG_RTC_INTF_SYSFS) || defined(CONFIG_RTC_INTF_SYSFS_MODULE)
2578 +diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c
2579 +index 2a4991d..3a417df 100644
2580 +--- a/drivers/s390/scsi/zfcp_scsi.c
2581 ++++ b/drivers/s390/scsi/zfcp_scsi.c
2582 +@@ -57,6 +57,10 @@ static void zfcp_scsi_slave_destroy(struct scsi_device *sdev)
2583 + {
2584 + struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
2585 +
2586 ++ /* if previous slave_alloc returned early, there is nothing to do */
2587 ++ if (!zfcp_sdev->port)
2588 ++ return;
2589 ++
2590 + zfcp_erp_lun_shutdown_wait(sdev, "scssd_1");
2591 + put_device(&zfcp_sdev->port->dev);
2592 + }
2593 +diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c
2594 +index 8885b3e..f829adc 100644
2595 +--- a/drivers/scsi/fcoe/fcoe.c
2596 ++++ b/drivers/scsi/fcoe/fcoe.c
2597 +@@ -1561,6 +1561,7 @@ static inline int fcoe_filter_frames(struct fc_lport *lport,
2598 + stats->InvalidCRCCount++;
2599 + if (stats->InvalidCRCCount < 5)
2600 + printk(KERN_WARNING "fcoe: dropping frame with CRC error\n");
2601 ++ put_cpu();
2602 + return -EINVAL;
2603 + }
2604 +
2605 +diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.c b/drivers/scsi/mpt2sas/mpt2sas_base.c
2606 +index 83035bd..39e81cd 100644
2607 +--- a/drivers/scsi/mpt2sas/mpt2sas_base.c
2608 ++++ b/drivers/scsi/mpt2sas/mpt2sas_base.c
2609 +@@ -1082,41 +1082,6 @@ _base_config_dma_addressing(struct MPT2SAS_ADAPTER *ioc, struct pci_dev *pdev)
2610 + }
2611 +
2612 + /**
2613 +- * _base_save_msix_table - backup msix vector table
2614 +- * @ioc: per adapter object
2615 +- *
2616 +- * This address an errata where diag reset clears out the table
2617 +- */
2618 +-static void
2619 +-_base_save_msix_table(struct MPT2SAS_ADAPTER *ioc)
2620 +-{
2621 +- int i;
2622 +-
2623 +- if (!ioc->msix_enable || ioc->msix_table_backup == NULL)
2624 +- return;
2625 +-
2626 +- for (i = 0; i < ioc->msix_vector_count; i++)
2627 +- ioc->msix_table_backup[i] = ioc->msix_table[i];
2628 +-}
2629 +-
2630 +-/**
2631 +- * _base_restore_msix_table - this restores the msix vector table
2632 +- * @ioc: per adapter object
2633 +- *
2634 +- */
2635 +-static void
2636 +-_base_restore_msix_table(struct MPT2SAS_ADAPTER *ioc)
2637 +-{
2638 +- int i;
2639 +-
2640 +- if (!ioc->msix_enable || ioc->msix_table_backup == NULL)
2641 +- return;
2642 +-
2643 +- for (i = 0; i < ioc->msix_vector_count; i++)
2644 +- ioc->msix_table[i] = ioc->msix_table_backup[i];
2645 +-}
2646 +-
2647 +-/**
2648 + * _base_check_enable_msix - checks MSIX capabable.
2649 + * @ioc: per adapter object
2650 + *
2651 +@@ -1128,7 +1093,7 @@ _base_check_enable_msix(struct MPT2SAS_ADAPTER *ioc)
2652 + {
2653 + int base;
2654 + u16 message_control;
2655 +- u32 msix_table_offset;
2656 ++
2657 +
2658 + base = pci_find_capability(ioc->pdev, PCI_CAP_ID_MSIX);
2659 + if (!base) {
2660 +@@ -1141,14 +1106,8 @@ _base_check_enable_msix(struct MPT2SAS_ADAPTER *ioc)
2661 + pci_read_config_word(ioc->pdev, base + 2, &message_control);
2662 + ioc->msix_vector_count = (message_control & 0x3FF) + 1;
2663 +
2664 +- /* get msix table */
2665 +- pci_read_config_dword(ioc->pdev, base + 4, &msix_table_offset);
2666 +- msix_table_offset &= 0xFFFFFFF8;
2667 +- ioc->msix_table = (u32 *)((void *)ioc->chip + msix_table_offset);
2668 +-
2669 + dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "msix is supported, "
2670 +- "vector_count(%d), table_offset(0x%08x), table(%p)\n", ioc->name,
2671 +- ioc->msix_vector_count, msix_table_offset, ioc->msix_table));
2672 ++ "vector_count(%d)\n", ioc->name, ioc->msix_vector_count));
2673 + return 0;
2674 + }
2675 +
2676 +@@ -1162,8 +1121,6 @@ _base_disable_msix(struct MPT2SAS_ADAPTER *ioc)
2677 + {
2678 + if (ioc->msix_enable) {
2679 + pci_disable_msix(ioc->pdev);
2680 +- kfree(ioc->msix_table_backup);
2681 +- ioc->msix_table_backup = NULL;
2682 + ioc->msix_enable = 0;
2683 + }
2684 + }
2685 +@@ -1189,14 +1146,6 @@ _base_enable_msix(struct MPT2SAS_ADAPTER *ioc)
2686 + if (_base_check_enable_msix(ioc) != 0)
2687 + goto try_ioapic;
2688 +
2689 +- ioc->msix_table_backup = kcalloc(ioc->msix_vector_count,
2690 +- sizeof(u32), GFP_KERNEL);
2691 +- if (!ioc->msix_table_backup) {
2692 +- dfailprintk(ioc, printk(MPT2SAS_INFO_FMT "allocation for "
2693 +- "msix_table_backup failed!!!\n", ioc->name));
2694 +- goto try_ioapic;
2695 +- }
2696 +-
2697 + memset(&entries, 0, sizeof(struct msix_entry));
2698 + r = pci_enable_msix(ioc->pdev, &entries, 1);
2699 + if (r) {
2700 +@@ -3513,9 +3462,6 @@ _base_diag_reset(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
2701 + u32 hcb_size;
2702 +
2703 + printk(MPT2SAS_INFO_FMT "sending diag reset !!\n", ioc->name);
2704 +-
2705 +- _base_save_msix_table(ioc);
2706 +-
2707 + drsprintk(ioc, printk(MPT2SAS_INFO_FMT "clear interrupts\n",
2708 + ioc->name));
2709 +
2710 +@@ -3611,7 +3557,6 @@ _base_diag_reset(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
2711 + goto out;
2712 + }
2713 +
2714 +- _base_restore_msix_table(ioc);
2715 + printk(MPT2SAS_INFO_FMT "diag reset: SUCCESS\n", ioc->name);
2716 + return 0;
2717 +
2718 +diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.h b/drivers/scsi/mpt2sas/mpt2sas_base.h
2719 +index 41a57a7..e1735f9 100644
2720 +--- a/drivers/scsi/mpt2sas/mpt2sas_base.h
2721 ++++ b/drivers/scsi/mpt2sas/mpt2sas_base.h
2722 +@@ -626,8 +626,6 @@ struct mpt2sas_port_facts {
2723 + * @wait_for_port_enable_to_complete:
2724 + * @msix_enable: flag indicating msix is enabled
2725 + * @msix_vector_count: number msix vectors
2726 +- * @msix_table: virt address to the msix table
2727 +- * @msix_table_backup: backup msix table
2728 + * @scsi_io_cb_idx: shost generated commands
2729 + * @tm_cb_idx: task management commands
2730 + * @scsih_cb_idx: scsih internal commands
2731 +@@ -768,8 +766,6 @@ struct MPT2SAS_ADAPTER {
2732 +
2733 + u8 msix_enable;
2734 + u16 msix_vector_count;
2735 +- u32 *msix_table;
2736 +- u32 *msix_table_backup;
2737 + u32 ioc_reset_count;
2738 +
2739 + /* internal commands, callback index */
2740 +diff --git a/drivers/scsi/mpt2sas/mpt2sas_scsih.c b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
2741 +index 5690f09..f88e52a 100644
2742 +--- a/drivers/scsi/mpt2sas/mpt2sas_scsih.c
2743 ++++ b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
2744 +@@ -4145,7 +4145,7 @@ _scsih_smart_predicted_fault(struct MPT2SAS_ADAPTER *ioc, u16 handle)
2745 + /* insert into event log */
2746 + sz = offsetof(Mpi2EventNotificationReply_t, EventData) +
2747 + sizeof(Mpi2EventDataSasDeviceStatusChange_t);
2748 +- event_reply = kzalloc(sz, GFP_KERNEL);
2749 ++ event_reply = kzalloc(sz, GFP_ATOMIC);
2750 + if (!event_reply) {
2751 + printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
2752 + ioc->name, __FILE__, __LINE__, __func__);
2753 +diff --git a/drivers/ssb/driver_pcicore.c b/drivers/ssb/driver_pcicore.c
2754 +index d6620ad..c828151 100644
2755 +--- a/drivers/ssb/driver_pcicore.c
2756 ++++ b/drivers/ssb/driver_pcicore.c
2757 +@@ -516,10 +516,14 @@ static void ssb_pcicore_pcie_setup_workarounds(struct ssb_pcicore *pc)
2758 +
2759 + static void ssb_pcicore_init_clientmode(struct ssb_pcicore *pc)
2760 + {
2761 +- ssb_pcicore_fix_sprom_core_index(pc);
2762 ++ struct ssb_device *pdev = pc->dev;
2763 ++ struct ssb_bus *bus = pdev->bus;
2764 ++
2765 ++ if (bus->bustype == SSB_BUSTYPE_PCI)
2766 ++ ssb_pcicore_fix_sprom_core_index(pc);
2767 +
2768 + /* Disable PCI interrupts. */
2769 +- ssb_write32(pc->dev, SSB_INTVEC, 0);
2770 ++ ssb_write32(pdev, SSB_INTVEC, 0);
2771 +
2772 + /* Additional PCIe always once-executed workarounds */
2773 + if (pc->dev->id.coreid == SSB_DEV_PCIE) {
2774 +diff --git a/drivers/watchdog/hpwdt.c b/drivers/watchdog/hpwdt.c
2775 +index 8cb2685..9cb60df 100644
2776 +--- a/drivers/watchdog/hpwdt.c
2777 ++++ b/drivers/watchdog/hpwdt.c
2778 +@@ -216,6 +216,7 @@ static int __devinit cru_detect(unsigned long map_entry,
2779 +
2780 + cmn_regs.u1.reax = CRU_BIOS_SIGNATURE_VALUE;
2781 +
2782 ++ set_memory_x((unsigned long)bios32_entrypoint, (2 * PAGE_SIZE));
2783 + asminline_call(&cmn_regs, bios32_entrypoint);
2784 +
2785 + if (cmn_regs.u1.ral != 0) {
2786 +@@ -233,8 +234,10 @@ static int __devinit cru_detect(unsigned long map_entry,
2787 + if ((physical_bios_base + physical_bios_offset)) {
2788 + cru_rom_addr =
2789 + ioremap(cru_physical_address, cru_length);
2790 +- if (cru_rom_addr)
2791 ++ if (cru_rom_addr) {
2792 ++ set_memory_x((unsigned long)cru_rom_addr, cru_length);
2793 + retval = 0;
2794 ++ }
2795 + }
2796 +
2797 + printk(KERN_DEBUG "hpwdt: CRU Base Address: 0x%lx\n",
2798 +diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
2799 +index 84f317e..fd60dff 100644
2800 +--- a/drivers/xen/swiotlb-xen.c
2801 ++++ b/drivers/xen/swiotlb-xen.c
2802 +@@ -162,7 +162,7 @@ void __init xen_swiotlb_init(int verbose)
2803 + /*
2804 + * Get IO TLB memory from any location.
2805 + */
2806 +- xen_io_tlb_start = alloc_bootmem(bytes);
2807 ++ xen_io_tlb_start = alloc_bootmem_pages(PAGE_ALIGN(bytes));
2808 + if (!xen_io_tlb_start)
2809 + panic("Cannot allocate SWIOTLB buffer");
2810 +
2811 +diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
2812 +index e97dd21..87822a3 100644
2813 +--- a/fs/nfs/nfs4state.c
2814 ++++ b/fs/nfs/nfs4state.c
2815 +@@ -1519,16 +1519,16 @@ void nfs41_handle_sequence_flag_errors(struct nfs_client *clp, u32 flags)
2816 + {
2817 + if (!flags)
2818 + return;
2819 +- else if (flags & SEQ4_STATUS_RESTART_RECLAIM_NEEDED)
2820 ++ if (flags & SEQ4_STATUS_RESTART_RECLAIM_NEEDED)
2821 + nfs41_handle_server_reboot(clp);
2822 +- else if (flags & (SEQ4_STATUS_EXPIRED_ALL_STATE_REVOKED |
2823 ++ if (flags & (SEQ4_STATUS_EXPIRED_ALL_STATE_REVOKED |
2824 + SEQ4_STATUS_EXPIRED_SOME_STATE_REVOKED |
2825 + SEQ4_STATUS_ADMIN_STATE_REVOKED |
2826 + SEQ4_STATUS_LEASE_MOVED))
2827 + nfs41_handle_state_revoked(clp);
2828 +- else if (flags & SEQ4_STATUS_RECALLABLE_STATE_REVOKED)
2829 ++ if (flags & SEQ4_STATUS_RECALLABLE_STATE_REVOKED)
2830 + nfs41_handle_recallable_state_revoked(clp);
2831 +- else if (flags & (SEQ4_STATUS_CB_PATH_DOWN |
2832 ++ if (flags & (SEQ4_STATUS_CB_PATH_DOWN |
2833 + SEQ4_STATUS_BACKCHANNEL_FAULT |
2834 + SEQ4_STATUS_CB_PATH_DOWN_SESSION))
2835 + nfs41_handle_cb_path_down(clp);
2836 +diff --git a/fs/nilfs2/ioctl.c b/fs/nilfs2/ioctl.c
2837 +index 41d6743..3e65427 100644
2838 +--- a/fs/nilfs2/ioctl.c
2839 ++++ b/fs/nilfs2/ioctl.c
2840 +@@ -842,6 +842,19 @@ long nilfs_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
2841 + case FS_IOC32_GETVERSION:
2842 + cmd = FS_IOC_GETVERSION;
2843 + break;
2844 ++ case NILFS_IOCTL_CHANGE_CPMODE:
2845 ++ case NILFS_IOCTL_DELETE_CHECKPOINT:
2846 ++ case NILFS_IOCTL_GET_CPINFO:
2847 ++ case NILFS_IOCTL_GET_CPSTAT:
2848 ++ case NILFS_IOCTL_GET_SUINFO:
2849 ++ case NILFS_IOCTL_GET_SUSTAT:
2850 ++ case NILFS_IOCTL_GET_VINFO:
2851 ++ case NILFS_IOCTL_GET_BDESCS:
2852 ++ case NILFS_IOCTL_CLEAN_SEGMENTS:
2853 ++ case NILFS_IOCTL_SYNC:
2854 ++ case NILFS_IOCTL_RESIZE:
2855 ++ case NILFS_IOCTL_SET_ALLOC_RANGE:
2856 ++ break;
2857 + default:
2858 + return -ENOIOCTLCMD;
2859 + }
2860 +diff --git a/fs/xfs/linux-2.6/xfs_super.c b/fs/xfs/linux-2.6/xfs_super.c
2861 +index 28de70b..e6ac98c 100644
2862 +--- a/fs/xfs/linux-2.6/xfs_super.c
2863 ++++ b/fs/xfs/linux-2.6/xfs_super.c
2864 +@@ -871,27 +871,6 @@ xfs_fs_dirty_inode(
2865 + }
2866 +
2867 + STATIC int
2868 +-xfs_log_inode(
2869 +- struct xfs_inode *ip)
2870 +-{
2871 +- struct xfs_mount *mp = ip->i_mount;
2872 +- struct xfs_trans *tp;
2873 +- int error;
2874 +-
2875 +- tp = xfs_trans_alloc(mp, XFS_TRANS_FSYNC_TS);
2876 +- error = xfs_trans_reserve(tp, 0, XFS_FSYNC_TS_LOG_RES(mp), 0, 0, 0);
2877 +- if (error) {
2878 +- xfs_trans_cancel(tp, 0);
2879 +- return error;
2880 +- }
2881 +-
2882 +- xfs_ilock(ip, XFS_ILOCK_EXCL);
2883 +- xfs_trans_ijoin_ref(tp, ip, XFS_ILOCK_EXCL);
2884 +- xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
2885 +- return xfs_trans_commit(tp, 0);
2886 +-}
2887 +-
2888 +-STATIC int
2889 + xfs_fs_write_inode(
2890 + struct inode *inode,
2891 + struct writeback_control *wbc)
2892 +@@ -904,10 +883,8 @@ xfs_fs_write_inode(
2893 +
2894 + if (XFS_FORCED_SHUTDOWN(mp))
2895 + return -XFS_ERROR(EIO);
2896 +- if (!ip->i_update_core)
2897 +- return 0;
2898 +
2899 +- if (wbc->sync_mode == WB_SYNC_ALL) {
2900 ++ if (wbc->sync_mode == WB_SYNC_ALL || wbc->for_kupdate) {
2901 + /*
2902 + * Make sure the inode has made it it into the log. Instead
2903 + * of forcing it all the way to stable storage using a
2904 +@@ -916,11 +893,14 @@ xfs_fs_write_inode(
2905 + * of synchronous log foces dramatically.
2906 + */
2907 + xfs_ioend_wait(ip);
2908 +- error = xfs_log_inode(ip);
2909 ++ error = xfs_log_dirty_inode(ip, NULL, 0);
2910 + if (error)
2911 + goto out;
2912 + return 0;
2913 + } else {
2914 ++ if (!ip->i_update_core)
2915 ++ return 0;
2916 ++
2917 + /*
2918 + * We make this non-blocking if the inode is contended, return
2919 + * EAGAIN to indicate to the caller that they did not succeed.
2920 +diff --git a/fs/xfs/linux-2.6/xfs_sync.c b/fs/xfs/linux-2.6/xfs_sync.c
2921 +index b69688d..2f277a0 100644
2922 +--- a/fs/xfs/linux-2.6/xfs_sync.c
2923 ++++ b/fs/xfs/linux-2.6/xfs_sync.c
2924 +@@ -336,6 +336,32 @@ xfs_sync_fsdata(
2925 + return xfs_bwrite(mp, bp);
2926 + }
2927 +
2928 ++int
2929 ++xfs_log_dirty_inode(
2930 ++ struct xfs_inode *ip,
2931 ++ struct xfs_perag *pag,
2932 ++ int flags)
2933 ++{
2934 ++ struct xfs_mount *mp = ip->i_mount;
2935 ++ struct xfs_trans *tp;
2936 ++ int error;
2937 ++
2938 ++ if (!ip->i_update_core)
2939 ++ return 0;
2940 ++
2941 ++ tp = xfs_trans_alloc(mp, XFS_TRANS_FSYNC_TS);
2942 ++ error = xfs_trans_reserve(tp, 0, XFS_FSYNC_TS_LOG_RES(mp), 0, 0, 0);
2943 ++ if (error) {
2944 ++ xfs_trans_cancel(tp, 0);
2945 ++ return error;
2946 ++ }
2947 ++
2948 ++ xfs_ilock(ip, XFS_ILOCK_EXCL);
2949 ++ xfs_trans_ijoin_ref(tp, ip, XFS_ILOCK_EXCL);
2950 ++ xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
2951 ++ return xfs_trans_commit(tp, 0);
2952 ++}
2953 ++
2954 + /*
2955 + * When remounting a filesystem read-only or freezing the filesystem, we have
2956 + * two phases to execute. This first phase is syncing the data before we
2957 +@@ -365,6 +391,17 @@ xfs_quiesce_data(
2958 +
2959 + /* push and block till complete */
2960 + xfs_sync_data(mp, SYNC_WAIT);
2961 ++
2962 ++ /*
2963 ++ * Log all pending size and timestamp updates. The vfs writeback
2964 ++ * code is supposed to do this, but due to its overagressive
2965 ++ * livelock detection it will skip inodes where appending writes
2966 ++ * were written out in the first non-blocking sync phase if their
2967 ++ * completion took long enough that it happened after taking the
2968 ++ * timestamp for the cut-off in the blocking phase.
2969 ++ */
2970 ++ xfs_inode_ag_iterator(mp, xfs_log_dirty_inode, 0);
2971 ++
2972 + xfs_qm_sync(mp, SYNC_WAIT);
2973 +
2974 + /* write superblock and hoover up shutdown errors */
2975 +diff --git a/fs/xfs/linux-2.6/xfs_sync.h b/fs/xfs/linux-2.6/xfs_sync.h
2976 +index e3a6ad2..ef5b2ce 100644
2977 +--- a/fs/xfs/linux-2.6/xfs_sync.h
2978 ++++ b/fs/xfs/linux-2.6/xfs_sync.h
2979 +@@ -42,6 +42,8 @@ void xfs_quiesce_attr(struct xfs_mount *mp);
2980 +
2981 + void xfs_flush_inodes(struct xfs_inode *ip);
2982 +
2983 ++int xfs_log_dirty_inode(struct xfs_inode *ip, struct xfs_perag *pag, int flags);
2984 ++
2985 + int xfs_reclaim_inodes(struct xfs_mount *mp, int mode);
2986 +
2987 + void xfs_inode_set_reclaim_tag(struct xfs_inode *ip);
2988 +diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
2989 +index 1a23722..cd93f99 100644
2990 +--- a/include/linux/blkdev.h
2991 ++++ b/include/linux/blkdev.h
2992 +@@ -798,9 +798,6 @@ extern void blk_unprep_request(struct request *);
2993 + */
2994 + extern struct request_queue *blk_init_queue_node(request_fn_proc *rfn,
2995 + spinlock_t *lock, int node_id);
2996 +-extern struct request_queue *blk_init_allocated_queue_node(struct request_queue *,
2997 +- request_fn_proc *,
2998 +- spinlock_t *, int node_id);
2999 + extern struct request_queue *blk_init_queue(request_fn_proc *, spinlock_t *);
3000 + extern struct request_queue *blk_init_allocated_queue(struct request_queue *,
3001 + request_fn_proc *, spinlock_t *);
3002 +diff --git a/include/linux/i2c/twl4030-madc.h b/include/linux/i2c/twl4030-madc.h
3003 +index 6427d29..530e11b 100644
3004 +--- a/include/linux/i2c/twl4030-madc.h
3005 ++++ b/include/linux/i2c/twl4030-madc.h
3006 +@@ -129,6 +129,10 @@ enum sample_type {
3007 + #define REG_BCICTL2 0x024
3008 + #define TWL4030_BCI_ITHSENS 0x007
3009 +
3010 ++/* Register and bits for GPBR1 register */
3011 ++#define TWL4030_REG_GPBR1 0x0c
3012 ++#define TWL4030_GPBR1_MADC_HFCLK_EN (1 << 7)
3013 ++
3014 + struct twl4030_madc_user_parms {
3015 + int channel;
3016 + int average;
3017 +diff --git a/include/linux/lglock.h b/include/linux/lglock.h
3018 +index f549056..87f402c 100644
3019 +--- a/include/linux/lglock.h
3020 ++++ b/include/linux/lglock.h
3021 +@@ -22,6 +22,7 @@
3022 + #include <linux/spinlock.h>
3023 + #include <linux/lockdep.h>
3024 + #include <linux/percpu.h>
3025 ++#include <linux/cpu.h>
3026 +
3027 + /* can make br locks by using local lock for read side, global lock for write */
3028 + #define br_lock_init(name) name##_lock_init()
3029 +@@ -72,9 +73,31 @@
3030 +
3031 + #define DEFINE_LGLOCK(name) \
3032 + \
3033 ++ DEFINE_SPINLOCK(name##_cpu_lock); \
3034 ++ cpumask_t name##_cpus __read_mostly; \
3035 + DEFINE_PER_CPU(arch_spinlock_t, name##_lock); \
3036 + DEFINE_LGLOCK_LOCKDEP(name); \
3037 + \
3038 ++ static int \
3039 ++ name##_lg_cpu_callback(struct notifier_block *nb, \
3040 ++ unsigned long action, void *hcpu) \
3041 ++ { \
3042 ++ switch (action & ~CPU_TASKS_FROZEN) { \
3043 ++ case CPU_UP_PREPARE: \
3044 ++ spin_lock(&name##_cpu_lock); \
3045 ++ cpu_set((unsigned long)hcpu, name##_cpus); \
3046 ++ spin_unlock(&name##_cpu_lock); \
3047 ++ break; \
3048 ++ case CPU_UP_CANCELED: case CPU_DEAD: \
3049 ++ spin_lock(&name##_cpu_lock); \
3050 ++ cpu_clear((unsigned long)hcpu, name##_cpus); \
3051 ++ spin_unlock(&name##_cpu_lock); \
3052 ++ } \
3053 ++ return NOTIFY_OK; \
3054 ++ } \
3055 ++ static struct notifier_block name##_lg_cpu_notifier = { \
3056 ++ .notifier_call = name##_lg_cpu_callback, \
3057 ++ }; \
3058 + void name##_lock_init(void) { \
3059 + int i; \
3060 + LOCKDEP_INIT_MAP(&name##_lock_dep_map, #name, &name##_lock_key, 0); \
3061 +@@ -83,6 +106,11 @@
3062 + lock = &per_cpu(name##_lock, i); \
3063 + *lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; \
3064 + } \
3065 ++ register_hotcpu_notifier(&name##_lg_cpu_notifier); \
3066 ++ get_online_cpus(); \
3067 ++ for_each_online_cpu(i) \
3068 ++ cpu_set(i, name##_cpus); \
3069 ++ put_online_cpus(); \
3070 + } \
3071 + EXPORT_SYMBOL(name##_lock_init); \
3072 + \
3073 +@@ -124,9 +152,9 @@
3074 + \
3075 + void name##_global_lock_online(void) { \
3076 + int i; \
3077 +- preempt_disable(); \
3078 ++ spin_lock(&name##_cpu_lock); \
3079 + rwlock_acquire(&name##_lock_dep_map, 0, 0, _RET_IP_); \
3080 +- for_each_online_cpu(i) { \
3081 ++ for_each_cpu(i, &name##_cpus) { \
3082 + arch_spinlock_t *lock; \
3083 + lock = &per_cpu(name##_lock, i); \
3084 + arch_spin_lock(lock); \
3085 +@@ -137,12 +165,12 @@
3086 + void name##_global_unlock_online(void) { \
3087 + int i; \
3088 + rwlock_release(&name##_lock_dep_map, 1, _RET_IP_); \
3089 +- for_each_online_cpu(i) { \
3090 ++ for_each_cpu(i, &name##_cpus) { \
3091 + arch_spinlock_t *lock; \
3092 + lock = &per_cpu(name##_lock, i); \
3093 + arch_spin_unlock(lock); \
3094 + } \
3095 +- preempt_enable(); \
3096 ++ spin_unlock(&name##_cpu_lock); \
3097 + } \
3098 + EXPORT_SYMBOL(name##_global_unlock_online); \
3099 + \
3100 +diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
3101 +index 7df327a..c388421 100644
3102 +--- a/include/net/sctp/structs.h
3103 ++++ b/include/net/sctp/structs.h
3104 +@@ -236,6 +236,9 @@ extern struct sctp_globals {
3105 + * bits is an indicator of when to send and window update SACK.
3106 + */
3107 + int rwnd_update_shift;
3108 ++
3109 ++ /* Threshold for autoclose timeout, in seconds. */
3110 ++ unsigned long max_autoclose;
3111 + } sctp_globals;
3112 +
3113 + #define sctp_rto_initial (sctp_globals.rto_initial)
3114 +@@ -271,6 +274,7 @@ extern struct sctp_globals {
3115 + #define sctp_auth_enable (sctp_globals.auth_enable)
3116 + #define sctp_checksum_disable (sctp_globals.checksum_disable)
3117 + #define sctp_rwnd_upd_shift (sctp_globals.rwnd_update_shift)
3118 ++#define sctp_max_autoclose (sctp_globals.max_autoclose)
3119 +
3120 + /* SCTP Socket type: UDP or TCP style. */
3121 + typedef enum {
3122 +diff --git a/kernel/cgroup.c b/kernel/cgroup.c
3123 +index 2731d11..575a5e7 100644
3124 +--- a/kernel/cgroup.c
3125 ++++ b/kernel/cgroup.c
3126 +@@ -2095,11 +2095,6 @@ int cgroup_attach_proc(struct cgroup *cgrp, struct task_struct *leader)
3127 + continue;
3128 + /* get old css_set pointer */
3129 + task_lock(tsk);
3130 +- if (tsk->flags & PF_EXITING) {
3131 +- /* ignore this task if it's going away */
3132 +- task_unlock(tsk);
3133 +- continue;
3134 +- }
3135 + oldcg = tsk->cgroups;
3136 + get_css_set(oldcg);
3137 + task_unlock(tsk);
3138 +diff --git a/kernel/exit.c b/kernel/exit.c
3139 +index f2b321b..303bed2 100644
3140 +--- a/kernel/exit.c
3141 ++++ b/kernel/exit.c
3142 +@@ -1553,8 +1553,15 @@ static int wait_consider_task(struct wait_opts *wo, int ptrace,
3143 + }
3144 +
3145 + /* dead body doesn't have much to contribute */
3146 +- if (p->exit_state == EXIT_DEAD)
3147 ++ if (unlikely(p->exit_state == EXIT_DEAD)) {
3148 ++ /*
3149 ++ * But do not ignore this task until the tracer does
3150 ++ * wait_task_zombie()->do_notify_parent().
3151 ++ */
3152 ++ if (likely(!ptrace) && unlikely(ptrace_reparented(p)))
3153 ++ wo->notask_error = 0;
3154 + return 0;
3155 ++ }
3156 +
3157 + /* slay zombie? */
3158 + if (p->exit_state == EXIT_ZOMBIE) {
3159 +diff --git a/kernel/futex.c b/kernel/futex.c
3160 +index 8b6da25..6487e4c 100644
3161 +--- a/kernel/futex.c
3162 ++++ b/kernel/futex.c
3163 +@@ -314,17 +314,29 @@ again:
3164 + #endif
3165 +
3166 + lock_page(page_head);
3167 ++
3168 ++ /*
3169 ++ * If page_head->mapping is NULL, then it cannot be a PageAnon
3170 ++ * page; but it might be the ZERO_PAGE or in the gate area or
3171 ++ * in a special mapping (all cases which we are happy to fail);
3172 ++ * or it may have been a good file page when get_user_pages_fast
3173 ++ * found it, but truncated or holepunched or subjected to
3174 ++ * invalidate_complete_page2 before we got the page lock (also
3175 ++ * cases which we are happy to fail). And we hold a reference,
3176 ++ * so refcount care in invalidate_complete_page's remove_mapping
3177 ++ * prevents drop_caches from setting mapping to NULL beneath us.
3178 ++ *
3179 ++ * The case we do have to guard against is when memory pressure made
3180 ++ * shmem_writepage move it from filecache to swapcache beneath us:
3181 ++ * an unlikely race, but we do need to retry for page_head->mapping.
3182 ++ */
3183 + if (!page_head->mapping) {
3184 ++ int shmem_swizzled = PageSwapCache(page_head);
3185 + unlock_page(page_head);
3186 + put_page(page_head);
3187 +- /*
3188 +- * ZERO_PAGE pages don't have a mapping. Avoid a busy loop
3189 +- * trying to find one. RW mapping would have COW'd (and thus
3190 +- * have a mapping) so this page is RO and won't ever change.
3191 +- */
3192 +- if ((page_head == ZERO_PAGE(address)))
3193 +- return -EFAULT;
3194 +- goto again;
3195 ++ if (shmem_swizzled)
3196 ++ goto again;
3197 ++ return -EFAULT;
3198 + }
3199 +
3200 + /*
3201 +diff --git a/kernel/hung_task.c b/kernel/hung_task.c
3202 +index ea64012..e972276 100644
3203 +--- a/kernel/hung_task.c
3204 ++++ b/kernel/hung_task.c
3205 +@@ -74,11 +74,17 @@ static void check_hung_task(struct task_struct *t, unsigned long timeout)
3206 +
3207 + /*
3208 + * Ensure the task is not frozen.
3209 +- * Also, when a freshly created task is scheduled once, changes
3210 +- * its state to TASK_UNINTERRUPTIBLE without having ever been
3211 +- * switched out once, it musn't be checked.
3212 ++ * Also, skip vfork and any other user process that freezer should skip.
3213 + */
3214 +- if (unlikely(t->flags & PF_FROZEN || !switch_count))
3215 ++ if (unlikely(t->flags & (PF_FROZEN | PF_FREEZER_SKIP)))
3216 ++ return;
3217 ++
3218 ++ /*
3219 ++ * When a freshly created task is scheduled once, changes its state to
3220 ++ * TASK_UNINTERRUPTIBLE without having ever been switched out once, it
3221 ++ * musn't be checked.
3222 ++ */
3223 ++ if (unlikely(!switch_count))
3224 + return;
3225 +
3226 + if (switch_count != t->last_switch_count) {
3227 +diff --git a/kernel/sysctl_binary.c b/kernel/sysctl_binary.c
3228 +index 3b8e028..e055e8b 100644
3229 +--- a/kernel/sysctl_binary.c
3230 ++++ b/kernel/sysctl_binary.c
3231 +@@ -1354,7 +1354,7 @@ static ssize_t binary_sysctl(const int *name, int nlen,
3232 +
3233 + fput(file);
3234 + out_putname:
3235 +- putname(pathname);
3236 ++ __putname(pathname);
3237 + out:
3238 + return result;
3239 + }
3240 +diff --git a/mm/filemap.c b/mm/filemap.c
3241 +index a8251a8..dd828ea 100644
3242 +--- a/mm/filemap.c
3243 ++++ b/mm/filemap.c
3244 +@@ -1807,7 +1807,7 @@ repeat:
3245 + page = __page_cache_alloc(gfp | __GFP_COLD);
3246 + if (!page)
3247 + return ERR_PTR(-ENOMEM);
3248 +- err = add_to_page_cache_lru(page, mapping, index, GFP_KERNEL);
3249 ++ err = add_to_page_cache_lru(page, mapping, index, gfp);
3250 + if (unlikely(err)) {
3251 + page_cache_release(page);
3252 + if (err == -EEXIST)
3253 +@@ -1904,10 +1904,7 @@ static struct page *wait_on_page_read(struct page *page)
3254 + * @gfp: the page allocator flags to use if allocating
3255 + *
3256 + * This is the same as "read_mapping_page(mapping, index, NULL)", but with
3257 +- * any new page allocations done using the specified allocation flags. Note
3258 +- * that the Radix tree operations will still use GFP_KERNEL, so you can't
3259 +- * expect to do this atomically or anything like that - but you can pass in
3260 +- * other page requirements.
3261 ++ * any new page allocations done using the specified allocation flags.
3262 + *
3263 + * If the page does not get brought uptodate, return -EIO.
3264 + */
3265 +diff --git a/mm/hugetlb.c b/mm/hugetlb.c
3266 +index 80936a1..f9c5849 100644
3267 +--- a/mm/hugetlb.c
3268 ++++ b/mm/hugetlb.c
3269 +@@ -901,7 +901,6 @@ retry:
3270 + h->resv_huge_pages += delta;
3271 + ret = 0;
3272 +
3273 +- spin_unlock(&hugetlb_lock);
3274 + /* Free the needed pages to the hugetlb pool */
3275 + list_for_each_entry_safe(page, tmp, &surplus_list, lru) {
3276 + if ((--needed) < 0)
3277 +@@ -915,6 +914,7 @@ retry:
3278 + VM_BUG_ON(page_count(page));
3279 + enqueue_huge_page(h, page);
3280 + }
3281 ++ spin_unlock(&hugetlb_lock);
3282 +
3283 + /* Free unnecessary surplus pages to the buddy allocator */
3284 + free:
3285 +diff --git a/mm/memcontrol.c b/mm/memcontrol.c
3286 +index 59ac5d6..d99217b 100644
3287 +--- a/mm/memcontrol.c
3288 ++++ b/mm/memcontrol.c
3289 +@@ -4963,9 +4963,9 @@ mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont)
3290 + int cpu;
3291 + enable_swap_cgroup();
3292 + parent = NULL;
3293 +- root_mem_cgroup = mem;
3294 + if (mem_cgroup_soft_limit_tree_init())
3295 + goto free_out;
3296 ++ root_mem_cgroup = mem;
3297 + for_each_possible_cpu(cpu) {
3298 + struct memcg_stock_pcp *stock =
3299 + &per_cpu(memcg_stock, cpu);
3300 +@@ -5004,7 +5004,6 @@ mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont)
3301 + return &mem->css;
3302 + free_out:
3303 + __mem_cgroup_free(mem);
3304 +- root_mem_cgroup = NULL;
3305 + return ERR_PTR(error);
3306 + }
3307 +
3308 +diff --git a/mm/oom_kill.c b/mm/oom_kill.c
3309 +index 8093fc7..7c72487 100644
3310 +--- a/mm/oom_kill.c
3311 ++++ b/mm/oom_kill.c
3312 +@@ -162,7 +162,7 @@ static bool oom_unkillable_task(struct task_struct *p,
3313 + unsigned int oom_badness(struct task_struct *p, struct mem_cgroup *mem,
3314 + const nodemask_t *nodemask, unsigned long totalpages)
3315 + {
3316 +- int points;
3317 ++ long points;
3318 +
3319 + if (oom_unkillable_task(p, mem, nodemask))
3320 + return 0;
3321 +diff --git a/mm/percpu.c b/mm/percpu.c
3322 +index 93b5a7c..0ae7a09 100644
3323 +--- a/mm/percpu.c
3324 ++++ b/mm/percpu.c
3325 +@@ -1011,9 +1011,11 @@ phys_addr_t per_cpu_ptr_to_phys(void *addr)
3326 + if (!is_vmalloc_addr(addr))
3327 + return __pa(addr);
3328 + else
3329 +- return page_to_phys(vmalloc_to_page(addr));
3330 ++ return page_to_phys(vmalloc_to_page(addr)) +
3331 ++ offset_in_page(addr);
3332 + } else
3333 +- return page_to_phys(pcpu_addr_to_page(addr));
3334 ++ return page_to_phys(pcpu_addr_to_page(addr)) +
3335 ++ offset_in_page(addr);
3336 + }
3337 +
3338 + /**
3339 +diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
3340 +index 4155abc..7d7fb20 100644
3341 +--- a/net/ipv4/devinet.c
3342 ++++ b/net/ipv4/devinet.c
3343 +@@ -1490,7 +1490,9 @@ static int devinet_conf_proc(ctl_table *ctl, int write,
3344 + void __user *buffer,
3345 + size_t *lenp, loff_t *ppos)
3346 + {
3347 ++ int old_value = *(int *)ctl->data;
3348 + int ret = proc_dointvec(ctl, write, buffer, lenp, ppos);
3349 ++ int new_value = *(int *)ctl->data;
3350 +
3351 + if (write) {
3352 + struct ipv4_devconf *cnf = ctl->extra1;
3353 +@@ -1501,6 +1503,9 @@ static int devinet_conf_proc(ctl_table *ctl, int write,
3354 +
3355 + if (cnf == net->ipv4.devconf_dflt)
3356 + devinet_copy_dflt_conf(net, i);
3357 ++ if (i == IPV4_DEVCONF_ACCEPT_LOCAL - 1)
3358 ++ if ((new_value == 0) && (old_value != 0))
3359 ++ rt_cache_flush(net, 0);
3360 + }
3361 +
3362 + return ret;
3363 +diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
3364 +index ab7e554..7fbcaba 100644
3365 +--- a/net/ipv4/ipconfig.c
3366 ++++ b/net/ipv4/ipconfig.c
3367 +@@ -252,6 +252,10 @@ static int __init ic_open_devs(void)
3368 + }
3369 + }
3370 +
3371 ++ /* no point in waiting if we could not bring up at least one device */
3372 ++ if (!ic_first_dev)
3373 ++ goto have_carrier;
3374 ++
3375 + /* wait for a carrier on at least one device */
3376 + start = jiffies;
3377 + while (jiffies - start < msecs_to_jiffies(CONF_CARRIER_TIMEOUT)) {
3378 +diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c
3379 +index 378b20b..6f06f7f 100644
3380 +--- a/net/ipv4/ipip.c
3381 ++++ b/net/ipv4/ipip.c
3382 +@@ -285,6 +285,8 @@ static struct ip_tunnel * ipip_tunnel_locate(struct net *net,
3383 + if (register_netdevice(dev) < 0)
3384 + goto failed_free;
3385 +
3386 ++ strcpy(nt->parms.name, dev->name);
3387 ++
3388 + dev_hold(dev);
3389 + ipip_tunnel_link(ipn, nt);
3390 + return nt;
3391 +@@ -759,7 +761,6 @@ static int ipip_tunnel_init(struct net_device *dev)
3392 + struct ip_tunnel *tunnel = netdev_priv(dev);
3393 +
3394 + tunnel->dev = dev;
3395 +- strcpy(tunnel->parms.name, dev->name);
3396 +
3397 + memcpy(dev->dev_addr, &tunnel->parms.iph.saddr, 4);
3398 + memcpy(dev->broadcast, &tunnel->parms.iph.daddr, 4);
3399 +@@ -825,6 +826,7 @@ static void ipip_destroy_tunnels(struct ipip_net *ipn, struct list_head *head)
3400 + static int __net_init ipip_init_net(struct net *net)
3401 + {
3402 + struct ipip_net *ipn = net_generic(net, ipip_net_id);
3403 ++ struct ip_tunnel *t;
3404 + int err;
3405 +
3406 + ipn->tunnels[0] = ipn->tunnels_wc;
3407 +@@ -848,6 +850,9 @@ static int __net_init ipip_init_net(struct net *net)
3408 + if ((err = register_netdev(ipn->fb_tunnel_dev)))
3409 + goto err_reg_dev;
3410 +
3411 ++ t = netdev_priv(ipn->fb_tunnel_dev);
3412 ++
3413 ++ strcpy(t->parms.name, ipn->fb_tunnel_dev->name);
3414 + return 0;
3415 +
3416 + err_reg_dev:
3417 +diff --git a/net/ipv4/route.c b/net/ipv4/route.c
3418 +index 75ef66f..4845bfe 100644
3419 +--- a/net/ipv4/route.c
3420 ++++ b/net/ipv4/route.c
3421 +@@ -91,6 +91,7 @@
3422 + #include <linux/rcupdate.h>
3423 + #include <linux/times.h>
3424 + #include <linux/slab.h>
3425 ++#include <linux/prefetch.h>
3426 + #include <net/dst.h>
3427 + #include <net/net_namespace.h>
3428 + #include <net/protocol.h>
3429 +@@ -132,6 +133,9 @@ static int ip_rt_min_pmtu __read_mostly = 512 + 20 + 20;
3430 + static int ip_rt_min_advmss __read_mostly = 256;
3431 + static int rt_chain_length_max __read_mostly = 20;
3432 +
3433 ++static struct delayed_work expires_work;
3434 ++static unsigned long expires_ljiffies;
3435 ++
3436 + /*
3437 + * Interface to generic destination cache.
3438 + */
3439 +@@ -821,6 +825,97 @@ static int has_noalias(const struct rtable *head, const struct rtable *rth)
3440 + return ONE;
3441 + }
3442 +
3443 ++static void rt_check_expire(void)
3444 ++{
3445 ++ static unsigned int rover;
3446 ++ unsigned int i = rover, goal;
3447 ++ struct rtable *rth;
3448 ++ struct rtable __rcu **rthp;
3449 ++ unsigned long samples = 0;
3450 ++ unsigned long sum = 0, sum2 = 0;
3451 ++ unsigned long delta;
3452 ++ u64 mult;
3453 ++
3454 ++ delta = jiffies - expires_ljiffies;
3455 ++ expires_ljiffies = jiffies;
3456 ++ mult = ((u64)delta) << rt_hash_log;
3457 ++ if (ip_rt_gc_timeout > 1)
3458 ++ do_div(mult, ip_rt_gc_timeout);
3459 ++ goal = (unsigned int)mult;
3460 ++ if (goal > rt_hash_mask)
3461 ++ goal = rt_hash_mask + 1;
3462 ++ for (; goal > 0; goal--) {
3463 ++ unsigned long tmo = ip_rt_gc_timeout;
3464 ++ unsigned long length;
3465 ++
3466 ++ i = (i + 1) & rt_hash_mask;
3467 ++ rthp = &rt_hash_table[i].chain;
3468 ++
3469 ++ if (need_resched())
3470 ++ cond_resched();
3471 ++
3472 ++ samples++;
3473 ++
3474 ++ if (rcu_dereference_raw(*rthp) == NULL)
3475 ++ continue;
3476 ++ length = 0;
3477 ++ spin_lock_bh(rt_hash_lock_addr(i));
3478 ++ while ((rth = rcu_dereference_protected(*rthp,
3479 ++ lockdep_is_held(rt_hash_lock_addr(i)))) != NULL) {
3480 ++ prefetch(rth->dst.rt_next);
3481 ++ if (rt_is_expired(rth)) {
3482 ++ *rthp = rth->dst.rt_next;
3483 ++ rt_free(rth);
3484 ++ continue;
3485 ++ }
3486 ++ if (rth->dst.expires) {
3487 ++ /* Entry is expired even if it is in use */
3488 ++ if (time_before_eq(jiffies, rth->dst.expires)) {
3489 ++nofree:
3490 ++ tmo >>= 1;
3491 ++ rthp = &rth->dst.rt_next;
3492 ++ /*
3493 ++ * We only count entries on
3494 ++ * a chain with equal hash inputs once
3495 ++ * so that entries for different QOS
3496 ++ * levels, and other non-hash input
3497 ++ * attributes don't unfairly skew
3498 ++ * the length computation
3499 ++ */
3500 ++ length += has_noalias(rt_hash_table[i].chain, rth);
3501 ++ continue;
3502 ++ }
3503 ++ } else if (!rt_may_expire(rth, tmo, ip_rt_gc_timeout))
3504 ++ goto nofree;
3505 ++
3506 ++ /* Cleanup aged off entries. */
3507 ++ *rthp = rth->dst.rt_next;
3508 ++ rt_free(rth);
3509 ++ }
3510 ++ spin_unlock_bh(rt_hash_lock_addr(i));
3511 ++ sum += length;
3512 ++ sum2 += length*length;
3513 ++ }
3514 ++ if (samples) {
3515 ++ unsigned long avg = sum / samples;
3516 ++ unsigned long sd = int_sqrt(sum2 / samples - avg*avg);
3517 ++ rt_chain_length_max = max_t(unsigned long,
3518 ++ ip_rt_gc_elasticity,
3519 ++ (avg + 4*sd) >> FRACT_BITS);
3520 ++ }
3521 ++ rover = i;
3522 ++}
3523 ++
3524 ++/*
3525 ++ * rt_worker_func() is run in process context.
3526 ++ * we call rt_check_expire() to scan part of the hash table
3527 ++ */
3528 ++static void rt_worker_func(struct work_struct *work)
3529 ++{
3530 ++ rt_check_expire();
3531 ++ schedule_delayed_work(&expires_work, ip_rt_gc_interval);
3532 ++}
3533 ++
3534 + /*
3535 + * Perturbation of rt_genid by a small quantity [1..256]
3536 + * Using 8 bits of shuffling ensure we can call rt_cache_invalidate()
3537 +@@ -3088,6 +3183,13 @@ static ctl_table ipv4_route_table[] = {
3538 + .proc_handler = proc_dointvec_jiffies,
3539 + },
3540 + {
3541 ++ .procname = "gc_interval",
3542 ++ .data = &ip_rt_gc_interval,
3543 ++ .maxlen = sizeof(int),
3544 ++ .mode = 0644,
3545 ++ .proc_handler = proc_dointvec_jiffies,
3546 ++ },
3547 ++ {
3548 + .procname = "redirect_load",
3549 + .data = &ip_rt_redirect_load,
3550 + .maxlen = sizeof(int),
3551 +@@ -3297,6 +3399,11 @@ int __init ip_rt_init(void)
3552 + devinet_init();
3553 + ip_fib_init();
3554 +
3555 ++ INIT_DELAYED_WORK_DEFERRABLE(&expires_work, rt_worker_func);
3556 ++ expires_ljiffies = jiffies;
3557 ++ schedule_delayed_work(&expires_work,
3558 ++ net_random() % ip_rt_gc_interval + ip_rt_gc_interval);
3559 ++
3560 + if (ip_rt_proc_init())
3561 + printk(KERN_ERR "Unable to create route proc files\n");
3562 + #ifdef CONFIG_XFRM
3563 +diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
3564 +index 1cca576..38490d5 100644
3565 +--- a/net/ipv6/sit.c
3566 ++++ b/net/ipv6/sit.c
3567 +@@ -263,6 +263,8 @@ static struct ip_tunnel *ipip6_tunnel_locate(struct net *net,
3568 + if (register_netdevice(dev) < 0)
3569 + goto failed_free;
3570 +
3571 ++ strcpy(nt->parms.name, dev->name);
3572 ++
3573 + dev_hold(dev);
3574 +
3575 + ipip6_tunnel_link(sitn, nt);
3576 +@@ -1141,7 +1143,6 @@ static int ipip6_tunnel_init(struct net_device *dev)
3577 + struct ip_tunnel *tunnel = netdev_priv(dev);
3578 +
3579 + tunnel->dev = dev;
3580 +- strcpy(tunnel->parms.name, dev->name);
3581 +
3582 + memcpy(dev->dev_addr, &tunnel->parms.iph.saddr, 4);
3583 + memcpy(dev->broadcast, &tunnel->parms.iph.daddr, 4);
3584 +@@ -1204,6 +1205,7 @@ static void __net_exit sit_destroy_tunnels(struct sit_net *sitn, struct list_hea
3585 + static int __net_init sit_init_net(struct net *net)
3586 + {
3587 + struct sit_net *sitn = net_generic(net, sit_net_id);
3588 ++ struct ip_tunnel *t;
3589 + int err;
3590 +
3591 + sitn->tunnels[0] = sitn->tunnels_wc;
3592 +@@ -1228,6 +1230,9 @@ static int __net_init sit_init_net(struct net *net)
3593 + if ((err = register_netdev(sitn->fb_tunnel_dev)))
3594 + goto err_reg_dev;
3595 +
3596 ++ t = netdev_priv(sitn->fb_tunnel_dev);
3597 ++
3598 ++ strcpy(t->parms.name, sitn->fb_tunnel_dev->name);
3599 + return 0;
3600 +
3601 + err_reg_dev:
3602 +diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c
3603 +index dfd3a64..a18e6c3 100644
3604 +--- a/net/llc/af_llc.c
3605 ++++ b/net/llc/af_llc.c
3606 +@@ -833,15 +833,15 @@ static int llc_ui_recvmsg(struct kiocb *iocb, struct socket *sock,
3607 + copied += used;
3608 + len -= used;
3609 +
3610 ++ /* For non stream protcols we get one packet per recvmsg call */
3611 ++ if (sk->sk_type != SOCK_STREAM)
3612 ++ goto copy_uaddr;
3613 ++
3614 + if (!(flags & MSG_PEEK)) {
3615 + sk_eat_skb(sk, skb, 0);
3616 + *seq = 0;
3617 + }
3618 +
3619 +- /* For non stream protcols we get one packet per recvmsg call */
3620 +- if (sk->sk_type != SOCK_STREAM)
3621 +- goto copy_uaddr;
3622 +-
3623 + /* Partial read */
3624 + if (used + offset < skb->len)
3625 + continue;
3626 +@@ -857,6 +857,12 @@ copy_uaddr:
3627 + }
3628 + if (llc_sk(sk)->cmsg_flags)
3629 + llc_cmsg_rcv(msg, skb);
3630 ++
3631 ++ if (!(flags & MSG_PEEK)) {
3632 ++ sk_eat_skb(sk, skb, 0);
3633 ++ *seq = 0;
3634 ++ }
3635 ++
3636 + goto out;
3637 + }
3638 +
3639 +diff --git a/net/mac80211/agg-tx.c b/net/mac80211/agg-tx.c
3640 +index db7db43..b7f4f5c 100644
3641 +--- a/net/mac80211/agg-tx.c
3642 ++++ b/net/mac80211/agg-tx.c
3643 +@@ -304,6 +304,38 @@ ieee80211_wake_queue_agg(struct ieee80211_local *local, int tid)
3644 + __release(agg_queue);
3645 + }
3646 +
3647 ++/*
3648 ++ * splice packets from the STA's pending to the local pending,
3649 ++ * requires a call to ieee80211_agg_splice_finish later
3650 ++ */
3651 ++static void __acquires(agg_queue)
3652 ++ieee80211_agg_splice_packets(struct ieee80211_local *local,
3653 ++ struct tid_ampdu_tx *tid_tx, u16 tid)
3654 ++{
3655 ++ int queue = ieee80211_ac_from_tid(tid);
3656 ++ unsigned long flags;
3657 ++
3658 ++ ieee80211_stop_queue_agg(local, tid);
3659 ++
3660 ++ if (WARN(!tid_tx, "TID %d gone but expected when splicing aggregates"
3661 ++ " from the pending queue\n", tid))
3662 ++ return;
3663 ++
3664 ++ if (!skb_queue_empty(&tid_tx->pending)) {
3665 ++ spin_lock_irqsave(&local->queue_stop_reason_lock, flags);
3666 ++ /* copy over remaining packets */
3667 ++ skb_queue_splice_tail_init(&tid_tx->pending,
3668 ++ &local->pending[queue]);
3669 ++ spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
3670 ++ }
3671 ++}
3672 ++
3673 ++static void __releases(agg_queue)
3674 ++ieee80211_agg_splice_finish(struct ieee80211_local *local, u16 tid)
3675 ++{
3676 ++ ieee80211_wake_queue_agg(local, tid);
3677 ++}
3678 ++
3679 + void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid)
3680 + {
3681 + struct tid_ampdu_tx *tid_tx;
3682 +@@ -315,19 +347,17 @@ void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid)
3683 + tid_tx = rcu_dereference_protected_tid_tx(sta, tid);
3684 +
3685 + /*
3686 +- * While we're asking the driver about the aggregation,
3687 +- * stop the AC queue so that we don't have to worry
3688 +- * about frames that came in while we were doing that,
3689 +- * which would require us to put them to the AC pending
3690 +- * afterwards which just makes the code more complex.
3691 ++ * Start queuing up packets for this aggregation session.
3692 ++ * We're going to release them once the driver is OK with
3693 ++ * that.
3694 + */
3695 +- ieee80211_stop_queue_agg(local, tid);
3696 +-
3697 + clear_bit(HT_AGG_STATE_WANT_START, &tid_tx->state);
3698 +
3699 + /*
3700 +- * make sure no packets are being processed to get
3701 +- * valid starting sequence number
3702 ++ * Make sure no packets are being processed. This ensures that
3703 ++ * we have a valid starting sequence number and that in-flight
3704 ++ * packets have been flushed out and no packets for this TID
3705 ++ * will go into the driver during the ampdu_action call.
3706 + */
3707 + synchronize_net();
3708 +
3709 +@@ -341,17 +371,15 @@ void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid)
3710 + " tid %d\n", tid);
3711 + #endif
3712 + spin_lock_bh(&sta->lock);
3713 ++ ieee80211_agg_splice_packets(local, tid_tx, tid);
3714 + ieee80211_assign_tid_tx(sta, tid, NULL);
3715 ++ ieee80211_agg_splice_finish(local, tid);
3716 + spin_unlock_bh(&sta->lock);
3717 +
3718 +- ieee80211_wake_queue_agg(local, tid);
3719 + kfree_rcu(tid_tx, rcu_head);
3720 + return;
3721 + }
3722 +
3723 +- /* we can take packets again now */
3724 +- ieee80211_wake_queue_agg(local, tid);
3725 +-
3726 + /* activate the timer for the recipient's addBA response */
3727 + mod_timer(&tid_tx->addba_resp_timer, jiffies + ADDBA_RESP_INTERVAL);
3728 + #ifdef CONFIG_MAC80211_HT_DEBUG
3729 +@@ -471,38 +499,6 @@ int ieee80211_start_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid,
3730 + }
3731 + EXPORT_SYMBOL(ieee80211_start_tx_ba_session);
3732 +
3733 +-/*
3734 +- * splice packets from the STA's pending to the local pending,
3735 +- * requires a call to ieee80211_agg_splice_finish later
3736 +- */
3737 +-static void __acquires(agg_queue)
3738 +-ieee80211_agg_splice_packets(struct ieee80211_local *local,
3739 +- struct tid_ampdu_tx *tid_tx, u16 tid)
3740 +-{
3741 +- int queue = ieee80211_ac_from_tid(tid);
3742 +- unsigned long flags;
3743 +-
3744 +- ieee80211_stop_queue_agg(local, tid);
3745 +-
3746 +- if (WARN(!tid_tx, "TID %d gone but expected when splicing aggregates"
3747 +- " from the pending queue\n", tid))
3748 +- return;
3749 +-
3750 +- if (!skb_queue_empty(&tid_tx->pending)) {
3751 +- spin_lock_irqsave(&local->queue_stop_reason_lock, flags);
3752 +- /* copy over remaining packets */
3753 +- skb_queue_splice_tail_init(&tid_tx->pending,
3754 +- &local->pending[queue]);
3755 +- spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
3756 +- }
3757 +-}
3758 +-
3759 +-static void __releases(agg_queue)
3760 +-ieee80211_agg_splice_finish(struct ieee80211_local *local, u16 tid)
3761 +-{
3762 +- ieee80211_wake_queue_agg(local, tid);
3763 +-}
3764 +-
3765 + static void ieee80211_agg_tx_operational(struct ieee80211_local *local,
3766 + struct sta_info *sta, u16 tid)
3767 + {
3768 +diff --git a/net/sched/sch_gred.c b/net/sched/sch_gred.c
3769 +index b9493a0..6cd8ddf 100644
3770 +--- a/net/sched/sch_gred.c
3771 ++++ b/net/sched/sch_gred.c
3772 +@@ -385,7 +385,7 @@ static inline int gred_change_vq(struct Qdisc *sch, int dp,
3773 + struct gred_sched_data *q;
3774 +
3775 + if (table->tab[dp] == NULL) {
3776 +- table->tab[dp] = kzalloc(sizeof(*q), GFP_KERNEL);
3777 ++ table->tab[dp] = kzalloc(sizeof(*q), GFP_ATOMIC);
3778 + if (table->tab[dp] == NULL)
3779 + return -ENOMEM;
3780 + }
3781 +diff --git a/net/sched/sch_mqprio.c b/net/sched/sch_mqprio.c
3782 +index ea17cbe..59b26b8 100644
3783 +--- a/net/sched/sch_mqprio.c
3784 ++++ b/net/sched/sch_mqprio.c
3785 +@@ -106,7 +106,7 @@ static int mqprio_init(struct Qdisc *sch, struct nlattr *opt)
3786 + if (!netif_is_multiqueue(dev))
3787 + return -EOPNOTSUPP;
3788 +
3789 +- if (nla_len(opt) < sizeof(*qopt))
3790 ++ if (!opt || nla_len(opt) < sizeof(*qopt))
3791 + return -EINVAL;
3792 +
3793 + qopt = nla_data(opt);
3794 +diff --git a/net/sctp/associola.c b/net/sctp/associola.c
3795 +index 4a62888..17a6e65 100644
3796 +--- a/net/sctp/associola.c
3797 ++++ b/net/sctp/associola.c
3798 +@@ -173,7 +173,7 @@ static struct sctp_association *sctp_association_init(struct sctp_association *a
3799 + asoc->timeouts[SCTP_EVENT_TIMEOUT_HEARTBEAT] = 0;
3800 + asoc->timeouts[SCTP_EVENT_TIMEOUT_SACK] = asoc->sackdelay;
3801 + asoc->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE] =
3802 +- (unsigned long)sp->autoclose * HZ;
3803 ++ min_t(unsigned long, sp->autoclose, sctp_max_autoclose) * HZ;
3804 +
3805 + /* Initializes the timers */
3806 + for (i = SCTP_EVENT_TIMEOUT_NONE; i < SCTP_NUM_TIMEOUT_TYPES; ++i)
3807 +diff --git a/net/sctp/output.c b/net/sctp/output.c
3808 +index 08b3cea..817174e 100644
3809 +--- a/net/sctp/output.c
3810 ++++ b/net/sctp/output.c
3811 +@@ -697,13 +697,7 @@ static void sctp_packet_append_data(struct sctp_packet *packet,
3812 + /* Keep track of how many bytes are in flight to the receiver. */
3813 + asoc->outqueue.outstanding_bytes += datasize;
3814 +
3815 +- /* Update our view of the receiver's rwnd. Include sk_buff overhead
3816 +- * while updating peer.rwnd so that it reduces the chances of a
3817 +- * receiver running out of receive buffer space even when receive
3818 +- * window is still open. This can happen when a sender is sending
3819 +- * sending small messages.
3820 +- */
3821 +- datasize += sizeof(struct sk_buff);
3822 ++ /* Update our view of the receiver's rwnd. */
3823 + if (datasize < rwnd)
3824 + rwnd -= datasize;
3825 + else
3826 +diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c
3827 +index d036821..1f2938f 100644
3828 +--- a/net/sctp/outqueue.c
3829 ++++ b/net/sctp/outqueue.c
3830 +@@ -411,8 +411,7 @@ void sctp_retransmit_mark(struct sctp_outq *q,
3831 + chunk->transport->flight_size -=
3832 + sctp_data_size(chunk);
3833 + q->outstanding_bytes -= sctp_data_size(chunk);
3834 +- q->asoc->peer.rwnd += (sctp_data_size(chunk) +
3835 +- sizeof(struct sk_buff));
3836 ++ q->asoc->peer.rwnd += sctp_data_size(chunk);
3837 + }
3838 + continue;
3839 + }
3840 +@@ -432,8 +431,7 @@ void sctp_retransmit_mark(struct sctp_outq *q,
3841 + * (Section 7.2.4)), add the data size of those
3842 + * chunks to the rwnd.
3843 + */
3844 +- q->asoc->peer.rwnd += (sctp_data_size(chunk) +
3845 +- sizeof(struct sk_buff));
3846 ++ q->asoc->peer.rwnd += sctp_data_size(chunk);
3847 + q->outstanding_bytes -= sctp_data_size(chunk);
3848 + if (chunk->transport)
3849 + transport->flight_size -= sctp_data_size(chunk);
3850 +diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
3851 +index 207175b..946afd6 100644
3852 +--- a/net/sctp/protocol.c
3853 ++++ b/net/sctp/protocol.c
3854 +@@ -1144,6 +1144,9 @@ SCTP_STATIC __init int sctp_init(void)
3855 + sctp_max_instreams = SCTP_DEFAULT_INSTREAMS;
3856 + sctp_max_outstreams = SCTP_DEFAULT_OUTSTREAMS;
3857 +
3858 ++ /* Initialize maximum autoclose timeout. */
3859 ++ sctp_max_autoclose = INT_MAX / HZ;
3860 ++
3861 + /* Initialize handle used for association ids. */
3862 + idr_init(&sctp_assocs_id);
3863 +
3864 +diff --git a/net/sctp/socket.c b/net/sctp/socket.c
3865 +index d3ccf79..fa9b5c7 100644
3866 +--- a/net/sctp/socket.c
3867 ++++ b/net/sctp/socket.c
3868 +@@ -2129,8 +2129,6 @@ static int sctp_setsockopt_autoclose(struct sock *sk, char __user *optval,
3869 + return -EINVAL;
3870 + if (copy_from_user(&sp->autoclose, optval, optlen))
3871 + return -EFAULT;
3872 +- /* make sure it won't exceed MAX_SCHEDULE_TIMEOUT */
3873 +- sp->autoclose = min_t(long, sp->autoclose, MAX_SCHEDULE_TIMEOUT / HZ);
3874 +
3875 + return 0;
3876 + }
3877 +diff --git a/net/sctp/sysctl.c b/net/sctp/sysctl.c
3878 +index 50cb57f..6752f48 100644
3879 +--- a/net/sctp/sysctl.c
3880 ++++ b/net/sctp/sysctl.c
3881 +@@ -53,6 +53,10 @@ static int sack_timer_min = 1;
3882 + static int sack_timer_max = 500;
3883 + static int addr_scope_max = 3; /* check sctp_scope_policy_t in include/net/sctp/constants.h for max entries */
3884 + static int rwnd_scale_max = 16;
3885 ++static unsigned long max_autoclose_min = 0;
3886 ++static unsigned long max_autoclose_max =
3887 ++ (MAX_SCHEDULE_TIMEOUT / HZ > UINT_MAX)
3888 ++ ? UINT_MAX : MAX_SCHEDULE_TIMEOUT / HZ;
3889 +
3890 + extern long sysctl_sctp_mem[3];
3891 + extern int sysctl_sctp_rmem[3];
3892 +@@ -251,6 +255,15 @@ static ctl_table sctp_table[] = {
3893 + .extra1 = &one,
3894 + .extra2 = &rwnd_scale_max,
3895 + },
3896 ++ {
3897 ++ .procname = "max_autoclose",
3898 ++ .data = &sctp_max_autoclose,
3899 ++ .maxlen = sizeof(unsigned long),
3900 ++ .mode = 0644,
3901 ++ .proc_handler = &proc_doulongvec_minmax,
3902 ++ .extra1 = &max_autoclose_min,
3903 ++ .extra2 = &max_autoclose_max,
3904 ++ },
3905 +
3906 + { /* sentinel */ }
3907 + };
3908 +diff --git a/security/selinux/netport.c b/security/selinux/netport.c
3909 +index cfe2d72..e2b74eb 100644
3910 +--- a/security/selinux/netport.c
3911 ++++ b/security/selinux/netport.c
3912 +@@ -139,7 +139,9 @@ static void sel_netport_insert(struct sel_netport *port)
3913 + if (sel_netport_hash[idx].size == SEL_NETPORT_HASH_BKT_LIMIT) {
3914 + struct sel_netport *tail;
3915 + tail = list_entry(
3916 +- rcu_dereference(sel_netport_hash[idx].list.prev),
3917 ++ rcu_dereference_protected(
3918 ++ sel_netport_hash[idx].list.prev,
3919 ++ lockdep_is_held(&sel_netport_lock)),
3920 + struct sel_netport, list);
3921 + list_del_rcu(&tail->list);
3922 + call_rcu(&tail->rcu, sel_netport_free);