Gentoo Archives: gentoo-commits

From: "Anthony G. Basile" <blueness@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/hardened-patchset:XT_PAX commit in: 3.1.1/
Date: Sun, 20 Nov 2011 21:55:43
Message-Id: b82cf9a7167a59ffa5dcfd2e8f6865340815f1bc.blueness@gentoo
1 commit: b82cf9a7167a59ffa5dcfd2e8f6865340815f1bc
2 Author: Anthony G. Basile <blueness <AT> gentoo <DOT> org>
3 AuthorDate: Sun Nov 20 21:53:41 2011 +0000
4 Commit: Anthony G. Basile <blueness <AT> gentoo <DOT> org>
5 CommitDate: Sun Nov 20 21:53:41 2011 +0000
6 URL: http://git.overlays.gentoo.org/gitweb/?p=proj/hardened-patchset.git;a=commit;h=b82cf9a7
7
8 XT_PAX support for 3.1.1
9
10 ---
11 .../4420_grsecurity-2.2.2-3.1.1-201111181902.patch |78460 ++++++++++++++++++++
12 3.1.1/4430_remove-legacy-EI_PAX.patch | 207 +
13 3.1.1/4440_replace-PT_PAX-with-XT_PAX.patch | 322 +
14 3.1.1/4450_grsec-remove-localversion-grsec.patch | 9 +
15 3.1.1/4460_grsec-mute-warnings.patch | 43 +
16 3.1.1/4470_grsec-remove-protected-paths.patch | 19 +
17 3.1.1/4480_grsec-pax-without-grsec.patch | 88 +
18 3.1.1/4490_grsec-kconfig-default-gids.patch | 77 +
19 3.1.1/4500_grsec-kconfig-gentoo.patch | 311 +
20 3.1.1/4510-grsec-kconfig-proc-user.patch | 26 +
21 3.1.1/4520_selinux-avc_audit-log-curr_ip.patch | 73 +
22 3.1.1/4530_disable-compat_vdso.patch | 46 +
23 12 files changed, 79681 insertions(+), 0 deletions(-)
24
25 diff --git a/3.1.1/4420_grsecurity-2.2.2-3.1.1-201111181902.patch b/3.1.1/4420_grsecurity-2.2.2-3.1.1-201111181902.patch
26 new file mode 100644
27 index 0000000..2b025b8
28 --- /dev/null
29 +++ b/3.1.1/4420_grsecurity-2.2.2-3.1.1-201111181902.patch
30 @@ -0,0 +1,78460 @@
31 +diff -urNp linux-3.1.1/arch/alpha/include/asm/elf.h linux-3.1.1/arch/alpha/include/asm/elf.h
32 +--- linux-3.1.1/arch/alpha/include/asm/elf.h 2011-11-11 15:19:27.000000000 -0500
33 ++++ linux-3.1.1/arch/alpha/include/asm/elf.h 2011-11-16 18:39:07.000000000 -0500
34 +@@ -90,6 +90,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_N
35 +
36 + #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x1000000)
37 +
38 ++#ifdef CONFIG_PAX_ASLR
39 ++#define PAX_ELF_ET_DYN_BASE (current->personality & ADDR_LIMIT_32BIT ? 0x10000 : 0x120000000UL)
40 ++
41 ++#define PAX_DELTA_MMAP_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 28)
42 ++#define PAX_DELTA_STACK_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 19)
43 ++#endif
44 ++
45 + /* $0 is set by ld.so to a pointer to a function which might be
46 + registered using atexit. This provides a mean for the dynamic
47 + linker to call DT_FINI functions for shared libraries that have
48 +diff -urNp linux-3.1.1/arch/alpha/include/asm/pgtable.h linux-3.1.1/arch/alpha/include/asm/pgtable.h
49 +--- linux-3.1.1/arch/alpha/include/asm/pgtable.h 2011-11-11 15:19:27.000000000 -0500
50 ++++ linux-3.1.1/arch/alpha/include/asm/pgtable.h 2011-11-16 18:39:07.000000000 -0500
51 +@@ -101,6 +101,17 @@ struct vm_area_struct;
52 + #define PAGE_SHARED __pgprot(_PAGE_VALID | __ACCESS_BITS)
53 + #define PAGE_COPY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
54 + #define PAGE_READONLY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
55 ++
56 ++#ifdef CONFIG_PAX_PAGEEXEC
57 ++# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOE)
58 ++# define PAGE_COPY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
59 ++# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
60 ++#else
61 ++# define PAGE_SHARED_NOEXEC PAGE_SHARED
62 ++# define PAGE_COPY_NOEXEC PAGE_COPY
63 ++# define PAGE_READONLY_NOEXEC PAGE_READONLY
64 ++#endif
65 ++
66 + #define PAGE_KERNEL __pgprot(_PAGE_VALID | _PAGE_ASM | _PAGE_KRE | _PAGE_KWE)
67 +
68 + #define _PAGE_NORMAL(x) __pgprot(_PAGE_VALID | __ACCESS_BITS | (x))
69 +diff -urNp linux-3.1.1/arch/alpha/kernel/module.c linux-3.1.1/arch/alpha/kernel/module.c
70 +--- linux-3.1.1/arch/alpha/kernel/module.c 2011-11-11 15:19:27.000000000 -0500
71 ++++ linux-3.1.1/arch/alpha/kernel/module.c 2011-11-16 18:39:07.000000000 -0500
72 +@@ -160,7 +160,7 @@ apply_relocate_add(Elf64_Shdr *sechdrs,
73 +
74 + /* The small sections were sorted to the end of the segment.
75 + The following should definitely cover them. */
76 +- gp = (u64)me->module_core + me->core_size - 0x8000;
77 ++ gp = (u64)me->module_core_rw + me->core_size_rw - 0x8000;
78 + got = sechdrs[me->arch.gotsecindex].sh_addr;
79 +
80 + for (i = 0; i < n; i++) {
81 +diff -urNp linux-3.1.1/arch/alpha/kernel/osf_sys.c linux-3.1.1/arch/alpha/kernel/osf_sys.c
82 +--- linux-3.1.1/arch/alpha/kernel/osf_sys.c 2011-11-11 15:19:27.000000000 -0500
83 ++++ linux-3.1.1/arch/alpha/kernel/osf_sys.c 2011-11-16 18:39:07.000000000 -0500
84 +@@ -1147,7 +1147,7 @@ arch_get_unmapped_area_1(unsigned long a
85 + /* At this point: (!vma || addr < vma->vm_end). */
86 + if (limit - len < addr)
87 + return -ENOMEM;
88 +- if (!vma || addr + len <= vma->vm_start)
89 ++ if (check_heap_stack_gap(vma, addr, len))
90 + return addr;
91 + addr = vma->vm_end;
92 + vma = vma->vm_next;
93 +@@ -1183,6 +1183,10 @@ arch_get_unmapped_area(struct file *filp
94 + merely specific addresses, but regions of memory -- perhaps
95 + this feature should be incorporated into all ports? */
96 +
97 ++#ifdef CONFIG_PAX_RANDMMAP
98 ++ if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
99 ++#endif
100 ++
101 + if (addr) {
102 + addr = arch_get_unmapped_area_1 (PAGE_ALIGN(addr), len, limit);
103 + if (addr != (unsigned long) -ENOMEM)
104 +@@ -1190,8 +1194,8 @@ arch_get_unmapped_area(struct file *filp
105 + }
106 +
107 + /* Next, try allocating at TASK_UNMAPPED_BASE. */
108 +- addr = arch_get_unmapped_area_1 (PAGE_ALIGN(TASK_UNMAPPED_BASE),
109 +- len, limit);
110 ++ addr = arch_get_unmapped_area_1 (PAGE_ALIGN(current->mm->mmap_base), len, limit);
111 ++
112 + if (addr != (unsigned long) -ENOMEM)
113 + return addr;
114 +
115 +diff -urNp linux-3.1.1/arch/alpha/mm/fault.c linux-3.1.1/arch/alpha/mm/fault.c
116 +--- linux-3.1.1/arch/alpha/mm/fault.c 2011-11-11 15:19:27.000000000 -0500
117 ++++ linux-3.1.1/arch/alpha/mm/fault.c 2011-11-16 18:39:07.000000000 -0500
118 +@@ -54,6 +54,124 @@ __load_new_mm_context(struct mm_struct *
119 + __reload_thread(pcb);
120 + }
121 +
122 ++#ifdef CONFIG_PAX_PAGEEXEC
123 ++/*
124 ++ * PaX: decide what to do with offenders (regs->pc = fault address)
125 ++ *
126 ++ * returns 1 when task should be killed
127 ++ * 2 when patched PLT trampoline was detected
128 ++ * 3 when unpatched PLT trampoline was detected
129 ++ */
130 ++static int pax_handle_fetch_fault(struct pt_regs *regs)
131 ++{
132 ++
133 ++#ifdef CONFIG_PAX_EMUPLT
134 ++ int err;
135 ++
136 ++ do { /* PaX: patched PLT emulation #1 */
137 ++ unsigned int ldah, ldq, jmp;
138 ++
139 ++ err = get_user(ldah, (unsigned int *)regs->pc);
140 ++ err |= get_user(ldq, (unsigned int *)(regs->pc+4));
141 ++ err |= get_user(jmp, (unsigned int *)(regs->pc+8));
142 ++
143 ++ if (err)
144 ++ break;
145 ++
146 ++ if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
147 ++ (ldq & 0xFFFF0000U) == 0xA77B0000U &&
148 ++ jmp == 0x6BFB0000U)
149 ++ {
150 ++ unsigned long r27, addr;
151 ++ unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
152 ++ unsigned long addrl = ldq | 0xFFFFFFFFFFFF0000UL;
153 ++
154 ++ addr = regs->r27 + ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
155 ++ err = get_user(r27, (unsigned long *)addr);
156 ++ if (err)
157 ++ break;
158 ++
159 ++ regs->r27 = r27;
160 ++ regs->pc = r27;
161 ++ return 2;
162 ++ }
163 ++ } while (0);
164 ++
165 ++ do { /* PaX: patched PLT emulation #2 */
166 ++ unsigned int ldah, lda, br;
167 ++
168 ++ err = get_user(ldah, (unsigned int *)regs->pc);
169 ++ err |= get_user(lda, (unsigned int *)(regs->pc+4));
170 ++ err |= get_user(br, (unsigned int *)(regs->pc+8));
171 ++
172 ++ if (err)
173 ++ break;
174 ++
175 ++ if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
176 ++ (lda & 0xFFFF0000U) == 0xA77B0000U &&
177 ++ (br & 0xFFE00000U) == 0xC3E00000U)
178 ++ {
179 ++ unsigned long addr = br | 0xFFFFFFFFFFE00000UL;
180 ++ unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
181 ++ unsigned long addrl = lda | 0xFFFFFFFFFFFF0000UL;
182 ++
183 ++ regs->r27 += ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
184 ++ regs->pc += 12 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
185 ++ return 2;
186 ++ }
187 ++ } while (0);
188 ++
189 ++ do { /* PaX: unpatched PLT emulation */
190 ++ unsigned int br;
191 ++
192 ++ err = get_user(br, (unsigned int *)regs->pc);
193 ++
194 ++ if (!err && (br & 0xFFE00000U) == 0xC3800000U) {
195 ++ unsigned int br2, ldq, nop, jmp;
196 ++ unsigned long addr = br | 0xFFFFFFFFFFE00000UL, resolver;
197 ++
198 ++ addr = regs->pc + 4 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
199 ++ err = get_user(br2, (unsigned int *)addr);
200 ++ err |= get_user(ldq, (unsigned int *)(addr+4));
201 ++ err |= get_user(nop, (unsigned int *)(addr+8));
202 ++ err |= get_user(jmp, (unsigned int *)(addr+12));
203 ++ err |= get_user(resolver, (unsigned long *)(addr+16));
204 ++
205 ++ if (err)
206 ++ break;
207 ++
208 ++ if (br2 == 0xC3600000U &&
209 ++ ldq == 0xA77B000CU &&
210 ++ nop == 0x47FF041FU &&
211 ++ jmp == 0x6B7B0000U)
212 ++ {
213 ++ regs->r28 = regs->pc+4;
214 ++ regs->r27 = addr+16;
215 ++ regs->pc = resolver;
216 ++ return 3;
217 ++ }
218 ++ }
219 ++ } while (0);
220 ++#endif
221 ++
222 ++ return 1;
223 ++}
224 ++
225 ++void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
226 ++{
227 ++ unsigned long i;
228 ++
229 ++ printk(KERN_ERR "PAX: bytes at PC: ");
230 ++ for (i = 0; i < 5; i++) {
231 ++ unsigned int c;
232 ++ if (get_user(c, (unsigned int *)pc+i))
233 ++ printk(KERN_CONT "???????? ");
234 ++ else
235 ++ printk(KERN_CONT "%08x ", c);
236 ++ }
237 ++ printk("\n");
238 ++}
239 ++#endif
240 +
241 + /*
242 + * This routine handles page faults. It determines the address,
243 +@@ -131,8 +249,29 @@ do_page_fault(unsigned long address, uns
244 + good_area:
245 + si_code = SEGV_ACCERR;
246 + if (cause < 0) {
247 +- if (!(vma->vm_flags & VM_EXEC))
248 ++ if (!(vma->vm_flags & VM_EXEC)) {
249 ++
250 ++#ifdef CONFIG_PAX_PAGEEXEC
251 ++ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->pc)
252 ++ goto bad_area;
253 ++
254 ++ up_read(&mm->mmap_sem);
255 ++ switch (pax_handle_fetch_fault(regs)) {
256 ++
257 ++#ifdef CONFIG_PAX_EMUPLT
258 ++ case 2:
259 ++ case 3:
260 ++ return;
261 ++#endif
262 ++
263 ++ }
264 ++ pax_report_fault(regs, (void *)regs->pc, (void *)rdusp());
265 ++ do_group_exit(SIGKILL);
266 ++#else
267 + goto bad_area;
268 ++#endif
269 ++
270 ++ }
271 + } else if (!cause) {
272 + /* Allow reads even for write-only mappings */
273 + if (!(vma->vm_flags & (VM_READ | VM_WRITE)))
274 +diff -urNp linux-3.1.1/arch/arm/include/asm/elf.h linux-3.1.1/arch/arm/include/asm/elf.h
275 +--- linux-3.1.1/arch/arm/include/asm/elf.h 2011-11-11 15:19:27.000000000 -0500
276 ++++ linux-3.1.1/arch/arm/include/asm/elf.h 2011-11-16 18:39:07.000000000 -0500
277 +@@ -116,7 +116,14 @@ int dump_task_regs(struct task_struct *t
278 + the loader. We need to make sure that it is out of the way of the program
279 + that it will "exec", and that there is sufficient room for the brk. */
280 +
281 +-#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
282 ++#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
283 ++
284 ++#ifdef CONFIG_PAX_ASLR
285 ++#define PAX_ELF_ET_DYN_BASE 0x00008000UL
286 ++
287 ++#define PAX_DELTA_MMAP_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
288 ++#define PAX_DELTA_STACK_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
289 ++#endif
290 +
291 + /* When the program starts, a1 contains a pointer to a function to be
292 + registered with atexit, as per the SVR4 ABI. A value of 0 means we
293 +@@ -126,10 +133,6 @@ int dump_task_regs(struct task_struct *t
294 + extern void elf_set_personality(const struct elf32_hdr *);
295 + #define SET_PERSONALITY(ex) elf_set_personality(&(ex))
296 +
297 +-struct mm_struct;
298 +-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
299 +-#define arch_randomize_brk arch_randomize_brk
300 +-
301 + extern int vectors_user_mapping(void);
302 + #define arch_setup_additional_pages(bprm, uses_interp) vectors_user_mapping()
303 + #define ARCH_HAS_SETUP_ADDITIONAL_PAGES
304 +diff -urNp linux-3.1.1/arch/arm/include/asm/kmap_types.h linux-3.1.1/arch/arm/include/asm/kmap_types.h
305 +--- linux-3.1.1/arch/arm/include/asm/kmap_types.h 2011-11-11 15:19:27.000000000 -0500
306 ++++ linux-3.1.1/arch/arm/include/asm/kmap_types.h 2011-11-16 18:39:07.000000000 -0500
307 +@@ -21,6 +21,7 @@ enum km_type {
308 + KM_L1_CACHE,
309 + KM_L2_CACHE,
310 + KM_KDB,
311 ++ KM_CLEARPAGE,
312 + KM_TYPE_NR
313 + };
314 +
315 +diff -urNp linux-3.1.1/arch/arm/include/asm/uaccess.h linux-3.1.1/arch/arm/include/asm/uaccess.h
316 +--- linux-3.1.1/arch/arm/include/asm/uaccess.h 2011-11-11 15:19:27.000000000 -0500
317 ++++ linux-3.1.1/arch/arm/include/asm/uaccess.h 2011-11-16 18:39:07.000000000 -0500
318 +@@ -22,6 +22,8 @@
319 + #define VERIFY_READ 0
320 + #define VERIFY_WRITE 1
321 +
322 ++extern void check_object_size(const void *ptr, unsigned long n, bool to);
323 ++
324 + /*
325 + * The exception table consists of pairs of addresses: the first is the
326 + * address of an instruction that is allowed to fault, and the second is
327 +@@ -387,8 +389,23 @@ do { \
328 +
329 +
330 + #ifdef CONFIG_MMU
331 +-extern unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n);
332 +-extern unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n);
333 ++extern unsigned long __must_check ___copy_from_user(void *to, const void __user *from, unsigned long n);
334 ++extern unsigned long __must_check ___copy_to_user(void __user *to, const void *from, unsigned long n);
335 ++
336 ++static inline unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n)
337 ++{
338 ++ if (!__builtin_constant_p(n))
339 ++ check_object_size(to, n, false);
340 ++ return ___copy_from_user(to, from, n);
341 ++}
342 ++
343 ++static inline unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n)
344 ++{
345 ++ if (!__builtin_constant_p(n))
346 ++ check_object_size(from, n, true);
347 ++ return ___copy_to_user(to, from, n);
348 ++}
349 ++
350 + extern unsigned long __must_check __copy_to_user_std(void __user *to, const void *from, unsigned long n);
351 + extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
352 + extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned long n);
353 +@@ -403,6 +420,9 @@ extern unsigned long __must_check __strn
354 +
355 + static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
356 + {
357 ++ if ((long)n < 0)
358 ++ return n;
359 ++
360 + if (access_ok(VERIFY_READ, from, n))
361 + n = __copy_from_user(to, from, n);
362 + else /* security hole - plug it */
363 +@@ -412,6 +432,9 @@ static inline unsigned long __must_check
364 +
365 + static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
366 + {
367 ++ if ((long)n < 0)
368 ++ return n;
369 ++
370 + if (access_ok(VERIFY_WRITE, to, n))
371 + n = __copy_to_user(to, from, n);
372 + return n;
373 +diff -urNp linux-3.1.1/arch/arm/kernel/armksyms.c linux-3.1.1/arch/arm/kernel/armksyms.c
374 +--- linux-3.1.1/arch/arm/kernel/armksyms.c 2011-11-11 15:19:27.000000000 -0500
375 ++++ linux-3.1.1/arch/arm/kernel/armksyms.c 2011-11-16 18:39:07.000000000 -0500
376 +@@ -98,8 +98,8 @@ EXPORT_SYMBOL(__strncpy_from_user);
377 + #ifdef CONFIG_MMU
378 + EXPORT_SYMBOL(copy_page);
379 +
380 +-EXPORT_SYMBOL(__copy_from_user);
381 +-EXPORT_SYMBOL(__copy_to_user);
382 ++EXPORT_SYMBOL(___copy_from_user);
383 ++EXPORT_SYMBOL(___copy_to_user);
384 + EXPORT_SYMBOL(__clear_user);
385 +
386 + EXPORT_SYMBOL(__get_user_1);
387 +diff -urNp linux-3.1.1/arch/arm/kernel/process.c linux-3.1.1/arch/arm/kernel/process.c
388 +--- linux-3.1.1/arch/arm/kernel/process.c 2011-11-11 15:19:27.000000000 -0500
389 ++++ linux-3.1.1/arch/arm/kernel/process.c 2011-11-16 18:39:07.000000000 -0500
390 +@@ -28,7 +28,6 @@
391 + #include <linux/tick.h>
392 + #include <linux/utsname.h>
393 + #include <linux/uaccess.h>
394 +-#include <linux/random.h>
395 + #include <linux/hw_breakpoint.h>
396 + #include <linux/cpuidle.h>
397 +
398 +@@ -481,12 +480,6 @@ unsigned long get_wchan(struct task_stru
399 + return 0;
400 + }
401 +
402 +-unsigned long arch_randomize_brk(struct mm_struct *mm)
403 +-{
404 +- unsigned long range_end = mm->brk + 0x02000000;
405 +- return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
406 +-}
407 +-
408 + #ifdef CONFIG_MMU
409 + /*
410 + * The vectors page is always readable from user space for the
411 +diff -urNp linux-3.1.1/arch/arm/kernel/traps.c linux-3.1.1/arch/arm/kernel/traps.c
412 +--- linux-3.1.1/arch/arm/kernel/traps.c 2011-11-11 15:19:27.000000000 -0500
413 ++++ linux-3.1.1/arch/arm/kernel/traps.c 2011-11-16 18:40:08.000000000 -0500
414 +@@ -257,6 +257,8 @@ static int __die(const char *str, int er
415 +
416 + static DEFINE_SPINLOCK(die_lock);
417 +
418 ++extern void gr_handle_kernel_exploit(void);
419 ++
420 + /*
421 + * This function is protected against re-entrancy.
422 + */
423 +@@ -284,6 +286,9 @@ void die(const char *str, struct pt_regs
424 + panic("Fatal exception in interrupt");
425 + if (panic_on_oops)
426 + panic("Fatal exception");
427 ++
428 ++ gr_handle_kernel_exploit();
429 ++
430 + if (ret != NOTIFY_STOP)
431 + do_exit(SIGSEGV);
432 + }
433 +diff -urNp linux-3.1.1/arch/arm/lib/copy_from_user.S linux-3.1.1/arch/arm/lib/copy_from_user.S
434 +--- linux-3.1.1/arch/arm/lib/copy_from_user.S 2011-11-11 15:19:27.000000000 -0500
435 ++++ linux-3.1.1/arch/arm/lib/copy_from_user.S 2011-11-16 18:39:07.000000000 -0500
436 +@@ -16,7 +16,7 @@
437 + /*
438 + * Prototype:
439 + *
440 +- * size_t __copy_from_user(void *to, const void *from, size_t n)
441 ++ * size_t ___copy_from_user(void *to, const void *from, size_t n)
442 + *
443 + * Purpose:
444 + *
445 +@@ -84,11 +84,11 @@
446 +
447 + .text
448 +
449 +-ENTRY(__copy_from_user)
450 ++ENTRY(___copy_from_user)
451 +
452 + #include "copy_template.S"
453 +
454 +-ENDPROC(__copy_from_user)
455 ++ENDPROC(___copy_from_user)
456 +
457 + .pushsection .fixup,"ax"
458 + .align 0
459 +diff -urNp linux-3.1.1/arch/arm/lib/copy_to_user.S linux-3.1.1/arch/arm/lib/copy_to_user.S
460 +--- linux-3.1.1/arch/arm/lib/copy_to_user.S 2011-11-11 15:19:27.000000000 -0500
461 ++++ linux-3.1.1/arch/arm/lib/copy_to_user.S 2011-11-16 18:39:07.000000000 -0500
462 +@@ -16,7 +16,7 @@
463 + /*
464 + * Prototype:
465 + *
466 +- * size_t __copy_to_user(void *to, const void *from, size_t n)
467 ++ * size_t ___copy_to_user(void *to, const void *from, size_t n)
468 + *
469 + * Purpose:
470 + *
471 +@@ -88,11 +88,11 @@
472 + .text
473 +
474 + ENTRY(__copy_to_user_std)
475 +-WEAK(__copy_to_user)
476 ++WEAK(___copy_to_user)
477 +
478 + #include "copy_template.S"
479 +
480 +-ENDPROC(__copy_to_user)
481 ++ENDPROC(___copy_to_user)
482 + ENDPROC(__copy_to_user_std)
483 +
484 + .pushsection .fixup,"ax"
485 +diff -urNp linux-3.1.1/arch/arm/lib/uaccess.S linux-3.1.1/arch/arm/lib/uaccess.S
486 +--- linux-3.1.1/arch/arm/lib/uaccess.S 2011-11-11 15:19:27.000000000 -0500
487 ++++ linux-3.1.1/arch/arm/lib/uaccess.S 2011-11-16 18:39:07.000000000 -0500
488 +@@ -20,7 +20,7 @@
489 +
490 + #define PAGE_SHIFT 12
491 +
492 +-/* Prototype: int __copy_to_user(void *to, const char *from, size_t n)
493 ++/* Prototype: int ___copy_to_user(void *to, const char *from, size_t n)
494 + * Purpose : copy a block to user memory from kernel memory
495 + * Params : to - user memory
496 + * : from - kernel memory
497 +@@ -40,7 +40,7 @@ USER( T(strgtb) r3, [r0], #1) @ May f
498 + sub r2, r2, ip
499 + b .Lc2u_dest_aligned
500 +
501 +-ENTRY(__copy_to_user)
502 ++ENTRY(___copy_to_user)
503 + stmfd sp!, {r2, r4 - r7, lr}
504 + cmp r2, #4
505 + blt .Lc2u_not_enough
506 +@@ -278,14 +278,14 @@ USER( T(strgeb) r3, [r0], #1) @ May f
507 + ldrgtb r3, [r1], #0
508 + USER( T(strgtb) r3, [r0], #1) @ May fault
509 + b .Lc2u_finished
510 +-ENDPROC(__copy_to_user)
511 ++ENDPROC(___copy_to_user)
512 +
513 + .pushsection .fixup,"ax"
514 + .align 0
515 + 9001: ldmfd sp!, {r0, r4 - r7, pc}
516 + .popsection
517 +
518 +-/* Prototype: unsigned long __copy_from_user(void *to,const void *from,unsigned long n);
519 ++/* Prototype: unsigned long ___copy_from_user(void *to,const void *from,unsigned long n);
520 + * Purpose : copy a block from user memory to kernel memory
521 + * Params : to - kernel memory
522 + * : from - user memory
523 +@@ -304,7 +304,7 @@ USER( T(ldrgtb) r3, [r1], #1) @ May f
524 + sub r2, r2, ip
525 + b .Lcfu_dest_aligned
526 +
527 +-ENTRY(__copy_from_user)
528 ++ENTRY(___copy_from_user)
529 + stmfd sp!, {r0, r2, r4 - r7, lr}
530 + cmp r2, #4
531 + blt .Lcfu_not_enough
532 +@@ -544,7 +544,7 @@ USER( T(ldrgeb) r3, [r1], #1) @ May f
533 + USER( T(ldrgtb) r3, [r1], #1) @ May fault
534 + strgtb r3, [r0], #1
535 + b .Lcfu_finished
536 +-ENDPROC(__copy_from_user)
537 ++ENDPROC(___copy_from_user)
538 +
539 + .pushsection .fixup,"ax"
540 + .align 0
541 +diff -urNp linux-3.1.1/arch/arm/lib/uaccess_with_memcpy.c linux-3.1.1/arch/arm/lib/uaccess_with_memcpy.c
542 +--- linux-3.1.1/arch/arm/lib/uaccess_with_memcpy.c 2011-11-11 15:19:27.000000000 -0500
543 ++++ linux-3.1.1/arch/arm/lib/uaccess_with_memcpy.c 2011-11-16 18:39:07.000000000 -0500
544 +@@ -103,7 +103,7 @@ out:
545 + }
546 +
547 + unsigned long
548 +-__copy_to_user(void __user *to, const void *from, unsigned long n)
549 ++___copy_to_user(void __user *to, const void *from, unsigned long n)
550 + {
551 + /*
552 + * This test is stubbed out of the main function above to keep
553 +diff -urNp linux-3.1.1/arch/arm/mach-ux500/mbox-db5500.c linux-3.1.1/arch/arm/mach-ux500/mbox-db5500.c
554 +--- linux-3.1.1/arch/arm/mach-ux500/mbox-db5500.c 2011-11-11 15:19:27.000000000 -0500
555 ++++ linux-3.1.1/arch/arm/mach-ux500/mbox-db5500.c 2011-11-16 18:40:08.000000000 -0500
556 +@@ -168,7 +168,7 @@ static ssize_t mbox_read_fifo(struct dev
557 + return sprintf(buf, "0x%X\n", mbox_value);
558 + }
559 +
560 +-static DEVICE_ATTR(fifo, S_IWUGO | S_IRUGO, mbox_read_fifo, mbox_write_fifo);
561 ++static DEVICE_ATTR(fifo, S_IWUSR | S_IRUGO, mbox_read_fifo, mbox_write_fifo);
562 +
563 + static int mbox_show(struct seq_file *s, void *data)
564 + {
565 +diff -urNp linux-3.1.1/arch/arm/mm/fault.c linux-3.1.1/arch/arm/mm/fault.c
566 +--- linux-3.1.1/arch/arm/mm/fault.c 2011-11-11 15:19:27.000000000 -0500
567 ++++ linux-3.1.1/arch/arm/mm/fault.c 2011-11-16 18:39:07.000000000 -0500
568 +@@ -182,6 +182,13 @@ __do_user_fault(struct task_struct *tsk,
569 + }
570 + #endif
571 +
572 ++#ifdef CONFIG_PAX_PAGEEXEC
573 ++ if (fsr & FSR_LNX_PF) {
574 ++ pax_report_fault(regs, (void *)regs->ARM_pc, (void *)regs->ARM_sp);
575 ++ do_group_exit(SIGKILL);
576 ++ }
577 ++#endif
578 ++
579 + tsk->thread.address = addr;
580 + tsk->thread.error_code = fsr;
581 + tsk->thread.trap_no = 14;
582 +@@ -383,6 +390,33 @@ do_page_fault(unsigned long addr, unsign
583 + }
584 + #endif /* CONFIG_MMU */
585 +
586 ++#ifdef CONFIG_PAX_PAGEEXEC
587 ++void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
588 ++{
589 ++ long i;
590 ++
591 ++ printk(KERN_ERR "PAX: bytes at PC: ");
592 ++ for (i = 0; i < 20; i++) {
593 ++ unsigned char c;
594 ++ if (get_user(c, (__force unsigned char __user *)pc+i))
595 ++ printk(KERN_CONT "?? ");
596 ++ else
597 ++ printk(KERN_CONT "%02x ", c);
598 ++ }
599 ++ printk("\n");
600 ++
601 ++ printk(KERN_ERR "PAX: bytes at SP-4: ");
602 ++ for (i = -1; i < 20; i++) {
603 ++ unsigned long c;
604 ++ if (get_user(c, (__force unsigned long __user *)sp+i))
605 ++ printk(KERN_CONT "???????? ");
606 ++ else
607 ++ printk(KERN_CONT "%08lx ", c);
608 ++ }
609 ++ printk("\n");
610 ++}
611 ++#endif
612 ++
613 + /*
614 + * First Level Translation Fault Handler
615 + *
616 +diff -urNp linux-3.1.1/arch/arm/mm/mmap.c linux-3.1.1/arch/arm/mm/mmap.c
617 +--- linux-3.1.1/arch/arm/mm/mmap.c 2011-11-11 15:19:27.000000000 -0500
618 ++++ linux-3.1.1/arch/arm/mm/mmap.c 2011-11-16 18:39:07.000000000 -0500
619 +@@ -65,6 +65,10 @@ arch_get_unmapped_area(struct file *filp
620 + if (len > TASK_SIZE)
621 + return -ENOMEM;
622 +
623 ++#ifdef CONFIG_PAX_RANDMMAP
624 ++ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
625 ++#endif
626 ++
627 + if (addr) {
628 + if (do_align)
629 + addr = COLOUR_ALIGN(addr, pgoff);
630 +@@ -72,15 +76,14 @@ arch_get_unmapped_area(struct file *filp
631 + addr = PAGE_ALIGN(addr);
632 +
633 + vma = find_vma(mm, addr);
634 +- if (TASK_SIZE - len >= addr &&
635 +- (!vma || addr + len <= vma->vm_start))
636 ++ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
637 + return addr;
638 + }
639 + if (len > mm->cached_hole_size) {
640 +- start_addr = addr = mm->free_area_cache;
641 ++ start_addr = addr = mm->free_area_cache;
642 + } else {
643 +- start_addr = addr = TASK_UNMAPPED_BASE;
644 +- mm->cached_hole_size = 0;
645 ++ start_addr = addr = mm->mmap_base;
646 ++ mm->cached_hole_size = 0;
647 + }
648 + /* 8 bits of randomness in 20 address space bits */
649 + if ((current->flags & PF_RANDOMIZE) &&
650 +@@ -100,14 +103,14 @@ full_search:
651 + * Start a new search - just in case we missed
652 + * some holes.
653 + */
654 +- if (start_addr != TASK_UNMAPPED_BASE) {
655 +- start_addr = addr = TASK_UNMAPPED_BASE;
656 ++ if (start_addr != mm->mmap_base) {
657 ++ start_addr = addr = mm->mmap_base;
658 + mm->cached_hole_size = 0;
659 + goto full_search;
660 + }
661 + return -ENOMEM;
662 + }
663 +- if (!vma || addr + len <= vma->vm_start) {
664 ++ if (check_heap_stack_gap(vma, addr, len)) {
665 + /*
666 + * Remember the place where we stopped the search:
667 + */
668 +diff -urNp linux-3.1.1/arch/avr32/include/asm/elf.h linux-3.1.1/arch/avr32/include/asm/elf.h
669 +--- linux-3.1.1/arch/avr32/include/asm/elf.h 2011-11-11 15:19:27.000000000 -0500
670 ++++ linux-3.1.1/arch/avr32/include/asm/elf.h 2011-11-16 18:39:07.000000000 -0500
671 +@@ -84,8 +84,14 @@ typedef struct user_fpu_struct elf_fpreg
672 + the loader. We need to make sure that it is out of the way of the program
673 + that it will "exec", and that there is sufficient room for the brk. */
674 +
675 +-#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
676 ++#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
677 +
678 ++#ifdef CONFIG_PAX_ASLR
679 ++#define PAX_ELF_ET_DYN_BASE 0x00001000UL
680 ++
681 ++#define PAX_DELTA_MMAP_LEN 15
682 ++#define PAX_DELTA_STACK_LEN 15
683 ++#endif
684 +
685 + /* This yields a mask that user programs can use to figure out what
686 + instruction set this CPU supports. This could be done in user space,
687 +diff -urNp linux-3.1.1/arch/avr32/include/asm/kmap_types.h linux-3.1.1/arch/avr32/include/asm/kmap_types.h
688 +--- linux-3.1.1/arch/avr32/include/asm/kmap_types.h 2011-11-11 15:19:27.000000000 -0500
689 ++++ linux-3.1.1/arch/avr32/include/asm/kmap_types.h 2011-11-16 18:39:07.000000000 -0500
690 +@@ -22,7 +22,8 @@ D(10) KM_IRQ0,
691 + D(11) KM_IRQ1,
692 + D(12) KM_SOFTIRQ0,
693 + D(13) KM_SOFTIRQ1,
694 +-D(14) KM_TYPE_NR
695 ++D(14) KM_CLEARPAGE,
696 ++D(15) KM_TYPE_NR
697 + };
698 +
699 + #undef D
700 +diff -urNp linux-3.1.1/arch/avr32/mm/fault.c linux-3.1.1/arch/avr32/mm/fault.c
701 +--- linux-3.1.1/arch/avr32/mm/fault.c 2011-11-11 15:19:27.000000000 -0500
702 ++++ linux-3.1.1/arch/avr32/mm/fault.c 2011-11-16 18:39:07.000000000 -0500
703 +@@ -41,6 +41,23 @@ static inline int notify_page_fault(stru
704 +
705 + int exception_trace = 1;
706 +
707 ++#ifdef CONFIG_PAX_PAGEEXEC
708 ++void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
709 ++{
710 ++ unsigned long i;
711 ++
712 ++ printk(KERN_ERR "PAX: bytes at PC: ");
713 ++ for (i = 0; i < 20; i++) {
714 ++ unsigned char c;
715 ++ if (get_user(c, (unsigned char *)pc+i))
716 ++ printk(KERN_CONT "???????? ");
717 ++ else
718 ++ printk(KERN_CONT "%02x ", c);
719 ++ }
720 ++ printk("\n");
721 ++}
722 ++#endif
723 ++
724 + /*
725 + * This routine handles page faults. It determines the address and the
726 + * problem, and then passes it off to one of the appropriate routines.
727 +@@ -156,6 +173,16 @@ bad_area:
728 + up_read(&mm->mmap_sem);
729 +
730 + if (user_mode(regs)) {
731 ++
732 ++#ifdef CONFIG_PAX_PAGEEXEC
733 ++ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
734 ++ if (ecr == ECR_PROTECTION_X || ecr == ECR_TLB_MISS_X) {
735 ++ pax_report_fault(regs, (void *)regs->pc, (void *)regs->sp);
736 ++ do_group_exit(SIGKILL);
737 ++ }
738 ++ }
739 ++#endif
740 ++
741 + if (exception_trace && printk_ratelimit())
742 + printk("%s%s[%d]: segfault at %08lx pc %08lx "
743 + "sp %08lx ecr %lu\n",
744 +diff -urNp linux-3.1.1/arch/frv/include/asm/kmap_types.h linux-3.1.1/arch/frv/include/asm/kmap_types.h
745 +--- linux-3.1.1/arch/frv/include/asm/kmap_types.h 2011-11-11 15:19:27.000000000 -0500
746 ++++ linux-3.1.1/arch/frv/include/asm/kmap_types.h 2011-11-16 18:39:07.000000000 -0500
747 +@@ -23,6 +23,7 @@ enum km_type {
748 + KM_IRQ1,
749 + KM_SOFTIRQ0,
750 + KM_SOFTIRQ1,
751 ++ KM_CLEARPAGE,
752 + KM_TYPE_NR
753 + };
754 +
755 +diff -urNp linux-3.1.1/arch/frv/mm/elf-fdpic.c linux-3.1.1/arch/frv/mm/elf-fdpic.c
756 +--- linux-3.1.1/arch/frv/mm/elf-fdpic.c 2011-11-11 15:19:27.000000000 -0500
757 ++++ linux-3.1.1/arch/frv/mm/elf-fdpic.c 2011-11-16 18:39:07.000000000 -0500
758 +@@ -73,8 +73,7 @@ unsigned long arch_get_unmapped_area(str
759 + if (addr) {
760 + addr = PAGE_ALIGN(addr);
761 + vma = find_vma(current->mm, addr);
762 +- if (TASK_SIZE - len >= addr &&
763 +- (!vma || addr + len <= vma->vm_start))
764 ++ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
765 + goto success;
766 + }
767 +
768 +@@ -89,7 +88,7 @@ unsigned long arch_get_unmapped_area(str
769 + for (; vma; vma = vma->vm_next) {
770 + if (addr > limit)
771 + break;
772 +- if (addr + len <= vma->vm_start)
773 ++ if (check_heap_stack_gap(vma, addr, len))
774 + goto success;
775 + addr = vma->vm_end;
776 + }
777 +@@ -104,7 +103,7 @@ unsigned long arch_get_unmapped_area(str
778 + for (; vma; vma = vma->vm_next) {
779 + if (addr > limit)
780 + break;
781 +- if (addr + len <= vma->vm_start)
782 ++ if (check_heap_stack_gap(vma, addr, len))
783 + goto success;
784 + addr = vma->vm_end;
785 + }
786 +diff -urNp linux-3.1.1/arch/ia64/include/asm/elf.h linux-3.1.1/arch/ia64/include/asm/elf.h
787 +--- linux-3.1.1/arch/ia64/include/asm/elf.h 2011-11-11 15:19:27.000000000 -0500
788 ++++ linux-3.1.1/arch/ia64/include/asm/elf.h 2011-11-16 18:39:07.000000000 -0500
789 +@@ -42,6 +42,13 @@
790 + */
791 + #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x800000000UL)
792 +
793 ++#ifdef CONFIG_PAX_ASLR
794 ++#define PAX_ELF_ET_DYN_BASE (current->personality == PER_LINUX32 ? 0x08048000UL : 0x4000000000000000UL)
795 ++
796 ++#define PAX_DELTA_MMAP_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
797 ++#define PAX_DELTA_STACK_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
798 ++#endif
799 ++
800 + #define PT_IA_64_UNWIND 0x70000001
801 +
802 + /* IA-64 relocations: */
803 +diff -urNp linux-3.1.1/arch/ia64/include/asm/pgtable.h linux-3.1.1/arch/ia64/include/asm/pgtable.h
804 +--- linux-3.1.1/arch/ia64/include/asm/pgtable.h 2011-11-11 15:19:27.000000000 -0500
805 ++++ linux-3.1.1/arch/ia64/include/asm/pgtable.h 2011-11-16 18:39:07.000000000 -0500
806 +@@ -12,7 +12,7 @@
807 + * David Mosberger-Tang <davidm@××××××.com>
808 + */
809 +
810 +-
811 ++#include <linux/const.h>
812 + #include <asm/mman.h>
813 + #include <asm/page.h>
814 + #include <asm/processor.h>
815 +@@ -143,6 +143,17 @@
816 + #define PAGE_READONLY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
817 + #define PAGE_COPY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
818 + #define PAGE_COPY_EXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
819 ++
820 ++#ifdef CONFIG_PAX_PAGEEXEC
821 ++# define PAGE_SHARED_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RW)
822 ++# define PAGE_READONLY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
823 ++# define PAGE_COPY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
824 ++#else
825 ++# define PAGE_SHARED_NOEXEC PAGE_SHARED
826 ++# define PAGE_READONLY_NOEXEC PAGE_READONLY
827 ++# define PAGE_COPY_NOEXEC PAGE_COPY
828 ++#endif
829 ++
830 + #define PAGE_GATE __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_X_RX)
831 + #define PAGE_KERNEL __pgprot(__DIRTY_BITS | _PAGE_PL_0 | _PAGE_AR_RWX)
832 + #define PAGE_KERNELRX __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_RX)
833 +diff -urNp linux-3.1.1/arch/ia64/include/asm/spinlock.h linux-3.1.1/arch/ia64/include/asm/spinlock.h
834 +--- linux-3.1.1/arch/ia64/include/asm/spinlock.h 2011-11-11 15:19:27.000000000 -0500
835 ++++ linux-3.1.1/arch/ia64/include/asm/spinlock.h 2011-11-16 18:39:07.000000000 -0500
836 +@@ -72,7 +72,7 @@ static __always_inline void __ticket_spi
837 + unsigned short *p = (unsigned short *)&lock->lock + 1, tmp;
838 +
839 + asm volatile ("ld2.bias %0=[%1]" : "=r"(tmp) : "r"(p));
840 +- ACCESS_ONCE(*p) = (tmp + 2) & ~1;
841 ++ ACCESS_ONCE_RW(*p) = (tmp + 2) & ~1;
842 + }
843 +
844 + static __always_inline void __ticket_spin_unlock_wait(arch_spinlock_t *lock)
845 +diff -urNp linux-3.1.1/arch/ia64/include/asm/uaccess.h linux-3.1.1/arch/ia64/include/asm/uaccess.h
846 +--- linux-3.1.1/arch/ia64/include/asm/uaccess.h 2011-11-11 15:19:27.000000000 -0500
847 ++++ linux-3.1.1/arch/ia64/include/asm/uaccess.h 2011-11-16 18:39:07.000000000 -0500
848 +@@ -257,7 +257,7 @@ __copy_from_user (void *to, const void _
849 + const void *__cu_from = (from); \
850 + long __cu_len = (n); \
851 + \
852 +- if (__access_ok(__cu_to, __cu_len, get_fs())) \
853 ++ if (__cu_len > 0 && __cu_len <= INT_MAX && __access_ok(__cu_to, __cu_len, get_fs())) \
854 + __cu_len = __copy_user(__cu_to, (__force void __user *) __cu_from, __cu_len); \
855 + __cu_len; \
856 + })
857 +@@ -269,7 +269,7 @@ __copy_from_user (void *to, const void _
858 + long __cu_len = (n); \
859 + \
860 + __chk_user_ptr(__cu_from); \
861 +- if (__access_ok(__cu_from, __cu_len, get_fs())) \
862 ++ if (__cu_len > 0 && __cu_len <= INT_MAX && __access_ok(__cu_from, __cu_len, get_fs())) \
863 + __cu_len = __copy_user((__force void __user *) __cu_to, __cu_from, __cu_len); \
864 + __cu_len; \
865 + })
866 +diff -urNp linux-3.1.1/arch/ia64/kernel/module.c linux-3.1.1/arch/ia64/kernel/module.c
867 +--- linux-3.1.1/arch/ia64/kernel/module.c 2011-11-11 15:19:27.000000000 -0500
868 ++++ linux-3.1.1/arch/ia64/kernel/module.c 2011-11-16 18:39:07.000000000 -0500
869 +@@ -307,8 +307,7 @@ plt_target (struct plt_entry *plt)
870 + void
871 + module_free (struct module *mod, void *module_region)
872 + {
873 +- if (mod && mod->arch.init_unw_table &&
874 +- module_region == mod->module_init) {
875 ++ if (mod && mod->arch.init_unw_table && module_region == mod->module_init_rx) {
876 + unw_remove_unwind_table(mod->arch.init_unw_table);
877 + mod->arch.init_unw_table = NULL;
878 + }
879 +@@ -494,15 +493,39 @@ module_frob_arch_sections (Elf_Ehdr *ehd
880 + }
881 +
882 + static inline int
883 ++in_init_rx (const struct module *mod, uint64_t addr)
884 ++{
885 ++ return addr - (uint64_t) mod->module_init_rx < mod->init_size_rx;
886 ++}
887 ++
888 ++static inline int
889 ++in_init_rw (const struct module *mod, uint64_t addr)
890 ++{
891 ++ return addr - (uint64_t) mod->module_init_rw < mod->init_size_rw;
892 ++}
893 ++
894 ++static inline int
895 + in_init (const struct module *mod, uint64_t addr)
896 + {
897 +- return addr - (uint64_t) mod->module_init < mod->init_size;
898 ++ return in_init_rx(mod, addr) || in_init_rw(mod, addr);
899 ++}
900 ++
901 ++static inline int
902 ++in_core_rx (const struct module *mod, uint64_t addr)
903 ++{
904 ++ return addr - (uint64_t) mod->module_core_rx < mod->core_size_rx;
905 ++}
906 ++
907 ++static inline int
908 ++in_core_rw (const struct module *mod, uint64_t addr)
909 ++{
910 ++ return addr - (uint64_t) mod->module_core_rw < mod->core_size_rw;
911 + }
912 +
913 + static inline int
914 + in_core (const struct module *mod, uint64_t addr)
915 + {
916 +- return addr - (uint64_t) mod->module_core < mod->core_size;
917 ++ return in_core_rx(mod, addr) || in_core_rw(mod, addr);
918 + }
919 +
920 + static inline int
921 +@@ -685,7 +708,14 @@ do_reloc (struct module *mod, uint8_t r_
922 + break;
923 +
924 + case RV_BDREL:
925 +- val -= (uint64_t) (in_init(mod, val) ? mod->module_init : mod->module_core);
926 ++ if (in_init_rx(mod, val))
927 ++ val -= (uint64_t) mod->module_init_rx;
928 ++ else if (in_init_rw(mod, val))
929 ++ val -= (uint64_t) mod->module_init_rw;
930 ++ else if (in_core_rx(mod, val))
931 ++ val -= (uint64_t) mod->module_core_rx;
932 ++ else if (in_core_rw(mod, val))
933 ++ val -= (uint64_t) mod->module_core_rw;
934 + break;
935 +
936 + case RV_LTV:
937 +@@ -820,15 +850,15 @@ apply_relocate_add (Elf64_Shdr *sechdrs,
938 + * addresses have been selected...
939 + */
940 + uint64_t gp;
941 +- if (mod->core_size > MAX_LTOFF)
942 ++ if (mod->core_size_rx + mod->core_size_rw > MAX_LTOFF)
943 + /*
944 + * This takes advantage of fact that SHF_ARCH_SMALL gets allocated
945 + * at the end of the module.
946 + */
947 +- gp = mod->core_size - MAX_LTOFF / 2;
948 ++ gp = mod->core_size_rx + mod->core_size_rw - MAX_LTOFF / 2;
949 + else
950 +- gp = mod->core_size / 2;
951 +- gp = (uint64_t) mod->module_core + ((gp + 7) & -8);
952 ++ gp = (mod->core_size_rx + mod->core_size_rw) / 2;
953 ++ gp = (uint64_t) mod->module_core_rx + ((gp + 7) & -8);
954 + mod->arch.gp = gp;
955 + DEBUGP("%s: placing gp at 0x%lx\n", __func__, gp);
956 + }
957 +diff -urNp linux-3.1.1/arch/ia64/kernel/sys_ia64.c linux-3.1.1/arch/ia64/kernel/sys_ia64.c
958 +--- linux-3.1.1/arch/ia64/kernel/sys_ia64.c 2011-11-11 15:19:27.000000000 -0500
959 ++++ linux-3.1.1/arch/ia64/kernel/sys_ia64.c 2011-11-16 18:39:07.000000000 -0500
960 +@@ -43,6 +43,13 @@ arch_get_unmapped_area (struct file *fil
961 + if (REGION_NUMBER(addr) == RGN_HPAGE)
962 + addr = 0;
963 + #endif
964 ++
965 ++#ifdef CONFIG_PAX_RANDMMAP
966 ++ if (mm->pax_flags & MF_PAX_RANDMMAP)
967 ++ addr = mm->free_area_cache;
968 ++ else
969 ++#endif
970 ++
971 + if (!addr)
972 + addr = mm->free_area_cache;
973 +
974 +@@ -61,14 +68,14 @@ arch_get_unmapped_area (struct file *fil
975 + for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
976 + /* At this point: (!vma || addr < vma->vm_end). */
977 + if (TASK_SIZE - len < addr || RGN_MAP_LIMIT - len < REGION_OFFSET(addr)) {
978 +- if (start_addr != TASK_UNMAPPED_BASE) {
979 ++ if (start_addr != mm->mmap_base) {
980 + /* Start a new search --- just in case we missed some holes. */
981 +- addr = TASK_UNMAPPED_BASE;
982 ++ addr = mm->mmap_base;
983 + goto full_search;
984 + }
985 + return -ENOMEM;
986 + }
987 +- if (!vma || addr + len <= vma->vm_start) {
988 ++ if (check_heap_stack_gap(vma, addr, len)) {
989 + /* Remember the address where we stopped this search: */
990 + mm->free_area_cache = addr + len;
991 + return addr;
992 +diff -urNp linux-3.1.1/arch/ia64/kernel/vmlinux.lds.S linux-3.1.1/arch/ia64/kernel/vmlinux.lds.S
993 +--- linux-3.1.1/arch/ia64/kernel/vmlinux.lds.S 2011-11-11 15:19:27.000000000 -0500
994 ++++ linux-3.1.1/arch/ia64/kernel/vmlinux.lds.S 2011-11-16 18:39:07.000000000 -0500
995 +@@ -199,7 +199,7 @@ SECTIONS {
996 + /* Per-cpu data: */
997 + . = ALIGN(PERCPU_PAGE_SIZE);
998 + PERCPU_VADDR(SMP_CACHE_BYTES, PERCPU_ADDR, :percpu)
999 +- __phys_per_cpu_start = __per_cpu_load;
1000 ++ __phys_per_cpu_start = per_cpu_load;
1001 + /*
1002 + * ensure percpu data fits
1003 + * into percpu page size
1004 +diff -urNp linux-3.1.1/arch/ia64/mm/fault.c linux-3.1.1/arch/ia64/mm/fault.c
1005 +--- linux-3.1.1/arch/ia64/mm/fault.c 2011-11-11 15:19:27.000000000 -0500
1006 ++++ linux-3.1.1/arch/ia64/mm/fault.c 2011-11-16 18:39:07.000000000 -0500
1007 +@@ -73,6 +73,23 @@ mapped_kernel_page_is_present (unsigned
1008 + return pte_present(pte);
1009 + }
1010 +
1011 ++#ifdef CONFIG_PAX_PAGEEXEC
1012 ++void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
1013 ++{
1014 ++ unsigned long i;
1015 ++
1016 ++ printk(KERN_ERR "PAX: bytes at PC: ");
1017 ++ for (i = 0; i < 8; i++) {
1018 ++ unsigned int c;
1019 ++ if (get_user(c, (unsigned int *)pc+i))
1020 ++ printk(KERN_CONT "???????? ");
1021 ++ else
1022 ++ printk(KERN_CONT "%08x ", c);
1023 ++ }
1024 ++ printk("\n");
1025 ++}
1026 ++#endif
1027 ++
1028 + void __kprobes
1029 + ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *regs)
1030 + {
1031 +@@ -146,9 +163,23 @@ ia64_do_page_fault (unsigned long addres
1032 + mask = ( (((isr >> IA64_ISR_X_BIT) & 1UL) << VM_EXEC_BIT)
1033 + | (((isr >> IA64_ISR_W_BIT) & 1UL) << VM_WRITE_BIT));
1034 +
1035 +- if ((vma->vm_flags & mask) != mask)
1036 ++ if ((vma->vm_flags & mask) != mask) {
1037 ++
1038 ++#ifdef CONFIG_PAX_PAGEEXEC
1039 ++ if (!(vma->vm_flags & VM_EXEC) && (mask & VM_EXEC)) {
1040 ++ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->cr_iip)
1041 ++ goto bad_area;
1042 ++
1043 ++ up_read(&mm->mmap_sem);
1044 ++ pax_report_fault(regs, (void *)regs->cr_iip, (void *)regs->r12);
1045 ++ do_group_exit(SIGKILL);
1046 ++ }
1047 ++#endif
1048 ++
1049 + goto bad_area;
1050 +
1051 ++ }
1052 ++
1053 + /*
1054 + * If for any reason at all we couldn't handle the fault, make
1055 + * sure we exit gracefully rather than endlessly redo the
1056 +diff -urNp linux-3.1.1/arch/ia64/mm/hugetlbpage.c linux-3.1.1/arch/ia64/mm/hugetlbpage.c
1057 +--- linux-3.1.1/arch/ia64/mm/hugetlbpage.c 2011-11-11 15:19:27.000000000 -0500
1058 ++++ linux-3.1.1/arch/ia64/mm/hugetlbpage.c 2011-11-16 18:39:07.000000000 -0500
1059 +@@ -171,7 +171,7 @@ unsigned long hugetlb_get_unmapped_area(
1060 + /* At this point: (!vmm || addr < vmm->vm_end). */
1061 + if (REGION_OFFSET(addr) + len > RGN_MAP_LIMIT)
1062 + return -ENOMEM;
1063 +- if (!vmm || (addr + len) <= vmm->vm_start)
1064 ++ if (check_heap_stack_gap(vmm, addr, len))
1065 + return addr;
1066 + addr = ALIGN(vmm->vm_end, HPAGE_SIZE);
1067 + }
1068 +diff -urNp linux-3.1.1/arch/ia64/mm/init.c linux-3.1.1/arch/ia64/mm/init.c
1069 +--- linux-3.1.1/arch/ia64/mm/init.c 2011-11-11 15:19:27.000000000 -0500
1070 ++++ linux-3.1.1/arch/ia64/mm/init.c 2011-11-16 18:39:07.000000000 -0500
1071 +@@ -120,6 +120,19 @@ ia64_init_addr_space (void)
1072 + vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
1073 + vma->vm_end = vma->vm_start + PAGE_SIZE;
1074 + vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT;
1075 ++
1076 ++#ifdef CONFIG_PAX_PAGEEXEC
1077 ++ if (current->mm->pax_flags & MF_PAX_PAGEEXEC) {
1078 ++ vma->vm_flags &= ~VM_EXEC;
1079 ++
1080 ++#ifdef CONFIG_PAX_MPROTECT
1081 ++ if (current->mm->pax_flags & MF_PAX_MPROTECT)
1082 ++ vma->vm_flags &= ~VM_MAYEXEC;
1083 ++#endif
1084 ++
1085 ++ }
1086 ++#endif
1087 ++
1088 + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
1089 + down_write(&current->mm->mmap_sem);
1090 + if (insert_vm_struct(current->mm, vma)) {
1091 +diff -urNp linux-3.1.1/arch/m32r/lib/usercopy.c linux-3.1.1/arch/m32r/lib/usercopy.c
1092 +--- linux-3.1.1/arch/m32r/lib/usercopy.c 2011-11-11 15:19:27.000000000 -0500
1093 ++++ linux-3.1.1/arch/m32r/lib/usercopy.c 2011-11-16 18:39:07.000000000 -0500
1094 +@@ -14,6 +14,9 @@
1095 + unsigned long
1096 + __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
1097 + {
1098 ++ if ((long)n < 0)
1099 ++ return n;
1100 ++
1101 + prefetch(from);
1102 + if (access_ok(VERIFY_WRITE, to, n))
1103 + __copy_user(to,from,n);
1104 +@@ -23,6 +26,9 @@ __generic_copy_to_user(void __user *to,
1105 + unsigned long
1106 + __generic_copy_from_user(void *to, const void __user *from, unsigned long n)
1107 + {
1108 ++ if ((long)n < 0)
1109 ++ return n;
1110 ++
1111 + prefetchw(to);
1112 + if (access_ok(VERIFY_READ, from, n))
1113 + __copy_user_zeroing(to,from,n);
1114 +diff -urNp linux-3.1.1/arch/mips/include/asm/elf.h linux-3.1.1/arch/mips/include/asm/elf.h
1115 +--- linux-3.1.1/arch/mips/include/asm/elf.h 2011-11-11 15:19:27.000000000 -0500
1116 ++++ linux-3.1.1/arch/mips/include/asm/elf.h 2011-11-16 18:39:07.000000000 -0500
1117 +@@ -372,13 +372,16 @@ extern const char *__elf_platform;
1118 + #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
1119 + #endif
1120 +
1121 ++#ifdef CONFIG_PAX_ASLR
1122 ++#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
1123 ++
1124 ++#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1125 ++#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1126 ++#endif
1127 ++
1128 + #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
1129 + struct linux_binprm;
1130 + extern int arch_setup_additional_pages(struct linux_binprm *bprm,
1131 + int uses_interp);
1132 +
1133 +-struct mm_struct;
1134 +-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
1135 +-#define arch_randomize_brk arch_randomize_brk
1136 +-
1137 + #endif /* _ASM_ELF_H */
1138 +diff -urNp linux-3.1.1/arch/mips/include/asm/page.h linux-3.1.1/arch/mips/include/asm/page.h
1139 +--- linux-3.1.1/arch/mips/include/asm/page.h 2011-11-11 15:19:27.000000000 -0500
1140 ++++ linux-3.1.1/arch/mips/include/asm/page.h 2011-11-16 18:39:07.000000000 -0500
1141 +@@ -93,7 +93,7 @@ extern void copy_user_highpage(struct pa
1142 + #ifdef CONFIG_CPU_MIPS32
1143 + typedef struct { unsigned long pte_low, pte_high; } pte_t;
1144 + #define pte_val(x) ((x).pte_low | ((unsigned long long)(x).pte_high << 32))
1145 +- #define __pte(x) ({ pte_t __pte = {(x), ((unsigned long long)(x)) >> 32}; __pte; })
1146 ++ #define __pte(x) ({ pte_t __pte = {(x), (x) >> 32}; __pte; })
1147 + #else
1148 + typedef struct { unsigned long long pte; } pte_t;
1149 + #define pte_val(x) ((x).pte)
1150 +diff -urNp linux-3.1.1/arch/mips/include/asm/system.h linux-3.1.1/arch/mips/include/asm/system.h
1151 +--- linux-3.1.1/arch/mips/include/asm/system.h 2011-11-11 15:19:27.000000000 -0500
1152 ++++ linux-3.1.1/arch/mips/include/asm/system.h 2011-11-16 18:39:07.000000000 -0500
1153 +@@ -230,6 +230,6 @@ extern void per_cpu_trap_init(void);
1154 + */
1155 + #define __ARCH_WANT_UNLOCKED_CTXSW
1156 +
1157 +-extern unsigned long arch_align_stack(unsigned long sp);
1158 ++#define arch_align_stack(x) ((x) & ~0xfUL)
1159 +
1160 + #endif /* _ASM_SYSTEM_H */
1161 +diff -urNp linux-3.1.1/arch/mips/kernel/binfmt_elfn32.c linux-3.1.1/arch/mips/kernel/binfmt_elfn32.c
1162 +--- linux-3.1.1/arch/mips/kernel/binfmt_elfn32.c 2011-11-11 15:19:27.000000000 -0500
1163 ++++ linux-3.1.1/arch/mips/kernel/binfmt_elfn32.c 2011-11-16 18:39:07.000000000 -0500
1164 +@@ -50,6 +50,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_N
1165 + #undef ELF_ET_DYN_BASE
1166 + #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
1167 +
1168 ++#ifdef CONFIG_PAX_ASLR
1169 ++#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
1170 ++
1171 ++#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1172 ++#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1173 ++#endif
1174 ++
1175 + #include <asm/processor.h>
1176 + #include <linux/module.h>
1177 + #include <linux/elfcore.h>
1178 +diff -urNp linux-3.1.1/arch/mips/kernel/binfmt_elfo32.c linux-3.1.1/arch/mips/kernel/binfmt_elfo32.c
1179 +--- linux-3.1.1/arch/mips/kernel/binfmt_elfo32.c 2011-11-11 15:19:27.000000000 -0500
1180 ++++ linux-3.1.1/arch/mips/kernel/binfmt_elfo32.c 2011-11-16 18:39:07.000000000 -0500
1181 +@@ -52,6 +52,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_N
1182 + #undef ELF_ET_DYN_BASE
1183 + #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
1184 +
1185 ++#ifdef CONFIG_PAX_ASLR
1186 ++#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
1187 ++
1188 ++#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1189 ++#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1190 ++#endif
1191 ++
1192 + #include <asm/processor.h>
1193 +
1194 + /*
1195 +diff -urNp linux-3.1.1/arch/mips/kernel/process.c linux-3.1.1/arch/mips/kernel/process.c
1196 +--- linux-3.1.1/arch/mips/kernel/process.c 2011-11-11 15:19:27.000000000 -0500
1197 ++++ linux-3.1.1/arch/mips/kernel/process.c 2011-11-16 18:39:07.000000000 -0500
1198 +@@ -481,15 +481,3 @@ unsigned long get_wchan(struct task_stru
1199 + out:
1200 + return pc;
1201 + }
1202 +-
1203 +-/*
1204 +- * Don't forget that the stack pointer must be aligned on a 8 bytes
1205 +- * boundary for 32-bits ABI and 16 bytes for 64-bits ABI.
1206 +- */
1207 +-unsigned long arch_align_stack(unsigned long sp)
1208 +-{
1209 +- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
1210 +- sp -= get_random_int() & ~PAGE_MASK;
1211 +-
1212 +- return sp & ALMASK;
1213 +-}
1214 +diff -urNp linux-3.1.1/arch/mips/mm/fault.c linux-3.1.1/arch/mips/mm/fault.c
1215 +--- linux-3.1.1/arch/mips/mm/fault.c 2011-11-11 15:19:27.000000000 -0500
1216 ++++ linux-3.1.1/arch/mips/mm/fault.c 2011-11-16 18:39:07.000000000 -0500
1217 +@@ -28,6 +28,23 @@
1218 + #include <asm/highmem.h> /* For VMALLOC_END */
1219 + #include <linux/kdebug.h>
1220 +
1221 ++#ifdef CONFIG_PAX_PAGEEXEC
1222 ++void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
1223 ++{
1224 ++ unsigned long i;
1225 ++
1226 ++ printk(KERN_ERR "PAX: bytes at PC: ");
1227 ++ for (i = 0; i < 5; i++) {
1228 ++ unsigned int c;
1229 ++ if (get_user(c, (unsigned int *)pc+i))
1230 ++ printk(KERN_CONT "???????? ");
1231 ++ else
1232 ++ printk(KERN_CONT "%08x ", c);
1233 ++ }
1234 ++ printk("\n");
1235 ++}
1236 ++#endif
1237 ++
1238 + /*
1239 + * This routine handles page faults. It determines the address,
1240 + * and the problem, and then passes it off to one of the appropriate
1241 +diff -urNp linux-3.1.1/arch/mips/mm/mmap.c linux-3.1.1/arch/mips/mm/mmap.c
1242 +--- linux-3.1.1/arch/mips/mm/mmap.c 2011-11-11 15:19:27.000000000 -0500
1243 ++++ linux-3.1.1/arch/mips/mm/mmap.c 2011-11-16 18:39:07.000000000 -0500
1244 +@@ -95,6 +95,11 @@ static unsigned long arch_get_unmapped_a
1245 + do_color_align = 1;
1246 +
1247 + /* requesting a specific address */
1248 ++
1249 ++#ifdef CONFIG_PAX_RANDMMAP
1250 ++ if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
1251 ++#endif
1252 ++
1253 + if (addr) {
1254 + if (do_color_align)
1255 + addr = COLOUR_ALIGN(addr, pgoff);
1256 +@@ -102,8 +107,7 @@ static unsigned long arch_get_unmapped_a
1257 + addr = PAGE_ALIGN(addr);
1258 +
1259 + vma = find_vma(mm, addr);
1260 +- if (TASK_SIZE - len >= addr &&
1261 +- (!vma || addr + len <= vma->vm_start))
1262 ++ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vmm, addr, len))
1263 + return addr;
1264 + }
1265 +
1266 +@@ -118,7 +122,7 @@ static unsigned long arch_get_unmapped_a
1267 + /* At this point: (!vma || addr < vma->vm_end). */
1268 + if (TASK_SIZE - len < addr)
1269 + return -ENOMEM;
1270 +- if (!vma || addr + len <= vma->vm_start)
1271 ++ if (check_heap_stack_gap(vmm, addr, len))
1272 + return addr;
1273 + addr = vma->vm_end;
1274 + if (do_color_align)
1275 +@@ -145,7 +149,7 @@ static unsigned long arch_get_unmapped_a
1276 + /* make sure it can fit in the remaining address space */
1277 + if (likely(addr > len)) {
1278 + vma = find_vma(mm, addr - len);
1279 +- if (!vma || addr <= vma->vm_start) {
1280 ++ if (check_heap_stack_gap(vmm, addr - len, len))
1281 + /* cache the address as a hint for next time */
1282 + return mm->free_area_cache = addr - len;
1283 + }
1284 +@@ -165,7 +169,7 @@ static unsigned long arch_get_unmapped_a
1285 + * return with success:
1286 + */
1287 + vma = find_vma(mm, addr);
1288 +- if (likely(!vma || addr + len <= vma->vm_start)) {
1289 ++ if (check_heap_stack_gap(vmm, addr, len)) {
1290 + /* cache the address as a hint for next time */
1291 + return mm->free_area_cache = addr;
1292 + }
1293 +@@ -242,30 +246,3 @@ void arch_pick_mmap_layout(struct mm_str
1294 + mm->unmap_area = arch_unmap_area_topdown;
1295 + }
1296 + }
1297 +-
1298 +-static inline unsigned long brk_rnd(void)
1299 +-{
1300 +- unsigned long rnd = get_random_int();
1301 +-
1302 +- rnd = rnd << PAGE_SHIFT;
1303 +- /* 8MB for 32bit, 256MB for 64bit */
1304 +- if (TASK_IS_32BIT_ADDR)
1305 +- rnd = rnd & 0x7ffffful;
1306 +- else
1307 +- rnd = rnd & 0xffffffful;
1308 +-
1309 +- return rnd;
1310 +-}
1311 +-
1312 +-unsigned long arch_randomize_brk(struct mm_struct *mm)
1313 +-{
1314 +- unsigned long base = mm->brk;
1315 +- unsigned long ret;
1316 +-
1317 +- ret = PAGE_ALIGN(base + brk_rnd());
1318 +-
1319 +- if (ret < mm->brk)
1320 +- return mm->brk;
1321 +-
1322 +- return ret;
1323 +-}
1324 +diff -urNp linux-3.1.1/arch/parisc/include/asm/elf.h linux-3.1.1/arch/parisc/include/asm/elf.h
1325 +--- linux-3.1.1/arch/parisc/include/asm/elf.h 2011-11-11 15:19:27.000000000 -0500
1326 ++++ linux-3.1.1/arch/parisc/include/asm/elf.h 2011-11-16 18:39:07.000000000 -0500
1327 +@@ -342,6 +342,13 @@ struct pt_regs; /* forward declaration..
1328 +
1329 + #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x01000000)
1330 +
1331 ++#ifdef CONFIG_PAX_ASLR
1332 ++#define PAX_ELF_ET_DYN_BASE 0x10000UL
1333 ++
1334 ++#define PAX_DELTA_MMAP_LEN 16
1335 ++#define PAX_DELTA_STACK_LEN 16
1336 ++#endif
1337 ++
1338 + /* This yields a mask that user programs can use to figure out what
1339 + instruction set this CPU supports. This could be done in user space,
1340 + but it's not easy, and we've already done it here. */
1341 +diff -urNp linux-3.1.1/arch/parisc/include/asm/pgtable.h linux-3.1.1/arch/parisc/include/asm/pgtable.h
1342 +--- linux-3.1.1/arch/parisc/include/asm/pgtable.h 2011-11-11 15:19:27.000000000 -0500
1343 ++++ linux-3.1.1/arch/parisc/include/asm/pgtable.h 2011-11-16 18:39:07.000000000 -0500
1344 +@@ -210,6 +210,17 @@ struct vm_area_struct;
1345 + #define PAGE_EXECREAD __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_EXEC |_PAGE_ACCESSED)
1346 + #define PAGE_COPY PAGE_EXECREAD
1347 + #define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC |_PAGE_ACCESSED)
1348 ++
1349 ++#ifdef CONFIG_PAX_PAGEEXEC
1350 ++# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_ACCESSED)
1351 ++# define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
1352 ++# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
1353 ++#else
1354 ++# define PAGE_SHARED_NOEXEC PAGE_SHARED
1355 ++# define PAGE_COPY_NOEXEC PAGE_COPY
1356 ++# define PAGE_READONLY_NOEXEC PAGE_READONLY
1357 ++#endif
1358 ++
1359 + #define PAGE_KERNEL __pgprot(_PAGE_KERNEL)
1360 + #define PAGE_KERNEL_EXEC __pgprot(_PAGE_KERNEL_EXEC)
1361 + #define PAGE_KERNEL_RWX __pgprot(_PAGE_KERNEL_RWX)
1362 +diff -urNp linux-3.1.1/arch/parisc/kernel/module.c linux-3.1.1/arch/parisc/kernel/module.c
1363 +--- linux-3.1.1/arch/parisc/kernel/module.c 2011-11-11 15:19:27.000000000 -0500
1364 ++++ linux-3.1.1/arch/parisc/kernel/module.c 2011-11-16 18:39:07.000000000 -0500
1365 +@@ -98,16 +98,38 @@
1366 +
1367 + /* three functions to determine where in the module core
1368 + * or init pieces the location is */
1369 ++static inline int in_init_rx(struct module *me, void *loc)
1370 ++{
1371 ++ return (loc >= me->module_init_rx &&
1372 ++ loc < (me->module_init_rx + me->init_size_rx));
1373 ++}
1374 ++
1375 ++static inline int in_init_rw(struct module *me, void *loc)
1376 ++{
1377 ++ return (loc >= me->module_init_rw &&
1378 ++ loc < (me->module_init_rw + me->init_size_rw));
1379 ++}
1380 ++
1381 + static inline int in_init(struct module *me, void *loc)
1382 + {
1383 +- return (loc >= me->module_init &&
1384 +- loc <= (me->module_init + me->init_size));
1385 ++ return in_init_rx(me, loc) || in_init_rw(me, loc);
1386 ++}
1387 ++
1388 ++static inline int in_core_rx(struct module *me, void *loc)
1389 ++{
1390 ++ return (loc >= me->module_core_rx &&
1391 ++ loc < (me->module_core_rx + me->core_size_rx));
1392 ++}
1393 ++
1394 ++static inline int in_core_rw(struct module *me, void *loc)
1395 ++{
1396 ++ return (loc >= me->module_core_rw &&
1397 ++ loc < (me->module_core_rw + me->core_size_rw));
1398 + }
1399 +
1400 + static inline int in_core(struct module *me, void *loc)
1401 + {
1402 +- return (loc >= me->module_core &&
1403 +- loc <= (me->module_core + me->core_size));
1404 ++ return in_core_rx(me, loc) || in_core_rw(me, loc);
1405 + }
1406 +
1407 + static inline int in_local(struct module *me, void *loc)
1408 +@@ -373,13 +395,13 @@ int module_frob_arch_sections(CONST Elf_
1409 + }
1410 +
1411 + /* align things a bit */
1412 +- me->core_size = ALIGN(me->core_size, 16);
1413 +- me->arch.got_offset = me->core_size;
1414 +- me->core_size += gots * sizeof(struct got_entry);
1415 +-
1416 +- me->core_size = ALIGN(me->core_size, 16);
1417 +- me->arch.fdesc_offset = me->core_size;
1418 +- me->core_size += fdescs * sizeof(Elf_Fdesc);
1419 ++ me->core_size_rw = ALIGN(me->core_size_rw, 16);
1420 ++ me->arch.got_offset = me->core_size_rw;
1421 ++ me->core_size_rw += gots * sizeof(struct got_entry);
1422 ++
1423 ++ me->core_size_rw = ALIGN(me->core_size_rw, 16);
1424 ++ me->arch.fdesc_offset = me->core_size_rw;
1425 ++ me->core_size_rw += fdescs * sizeof(Elf_Fdesc);
1426 +
1427 + me->arch.got_max = gots;
1428 + me->arch.fdesc_max = fdescs;
1429 +@@ -397,7 +419,7 @@ static Elf64_Word get_got(struct module
1430 +
1431 + BUG_ON(value == 0);
1432 +
1433 +- got = me->module_core + me->arch.got_offset;
1434 ++ got = me->module_core_rw + me->arch.got_offset;
1435 + for (i = 0; got[i].addr; i++)
1436 + if (got[i].addr == value)
1437 + goto out;
1438 +@@ -415,7 +437,7 @@ static Elf64_Word get_got(struct module
1439 + #ifdef CONFIG_64BIT
1440 + static Elf_Addr get_fdesc(struct module *me, unsigned long value)
1441 + {
1442 +- Elf_Fdesc *fdesc = me->module_core + me->arch.fdesc_offset;
1443 ++ Elf_Fdesc *fdesc = me->module_core_rw + me->arch.fdesc_offset;
1444 +
1445 + if (!value) {
1446 + printk(KERN_ERR "%s: zero OPD requested!\n", me->name);
1447 +@@ -433,7 +455,7 @@ static Elf_Addr get_fdesc(struct module
1448 +
1449 + /* Create new one */
1450 + fdesc->addr = value;
1451 +- fdesc->gp = (Elf_Addr)me->module_core + me->arch.got_offset;
1452 ++ fdesc->gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
1453 + return (Elf_Addr)fdesc;
1454 + }
1455 + #endif /* CONFIG_64BIT */
1456 +@@ -845,7 +867,7 @@ register_unwind_table(struct module *me,
1457 +
1458 + table = (unsigned char *)sechdrs[me->arch.unwind_section].sh_addr;
1459 + end = table + sechdrs[me->arch.unwind_section].sh_size;
1460 +- gp = (Elf_Addr)me->module_core + me->arch.got_offset;
1461 ++ gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
1462 +
1463 + DEBUGP("register_unwind_table(), sect = %d at 0x%p - 0x%p (gp=0x%lx)\n",
1464 + me->arch.unwind_section, table, end, gp);
1465 +diff -urNp linux-3.1.1/arch/parisc/kernel/sys_parisc.c linux-3.1.1/arch/parisc/kernel/sys_parisc.c
1466 +--- linux-3.1.1/arch/parisc/kernel/sys_parisc.c 2011-11-11 15:19:27.000000000 -0500
1467 ++++ linux-3.1.1/arch/parisc/kernel/sys_parisc.c 2011-11-16 18:39:07.000000000 -0500
1468 +@@ -43,7 +43,7 @@ static unsigned long get_unshared_area(u
1469 + /* At this point: (!vma || addr < vma->vm_end). */
1470 + if (TASK_SIZE - len < addr)
1471 + return -ENOMEM;
1472 +- if (!vma || addr + len <= vma->vm_start)
1473 ++ if (check_heap_stack_gap(vma, addr, len))
1474 + return addr;
1475 + addr = vma->vm_end;
1476 + }
1477 +@@ -79,7 +79,7 @@ static unsigned long get_shared_area(str
1478 + /* At this point: (!vma || addr < vma->vm_end). */
1479 + if (TASK_SIZE - len < addr)
1480 + return -ENOMEM;
1481 +- if (!vma || addr + len <= vma->vm_start)
1482 ++ if (check_heap_stack_gap(vma, addr, len))
1483 + return addr;
1484 + addr = DCACHE_ALIGN(vma->vm_end - offset) + offset;
1485 + if (addr < vma->vm_end) /* handle wraparound */
1486 +@@ -98,7 +98,7 @@ unsigned long arch_get_unmapped_area(str
1487 + if (flags & MAP_FIXED)
1488 + return addr;
1489 + if (!addr)
1490 +- addr = TASK_UNMAPPED_BASE;
1491 ++ addr = current->mm->mmap_base;
1492 +
1493 + if (filp) {
1494 + addr = get_shared_area(filp->f_mapping, addr, len, pgoff);
1495 +diff -urNp linux-3.1.1/arch/parisc/kernel/traps.c linux-3.1.1/arch/parisc/kernel/traps.c
1496 +--- linux-3.1.1/arch/parisc/kernel/traps.c 2011-11-11 15:19:27.000000000 -0500
1497 ++++ linux-3.1.1/arch/parisc/kernel/traps.c 2011-11-16 18:39:07.000000000 -0500
1498 +@@ -733,9 +733,7 @@ void notrace handle_interruption(int cod
1499 +
1500 + down_read(&current->mm->mmap_sem);
1501 + vma = find_vma(current->mm,regs->iaoq[0]);
1502 +- if (vma && (regs->iaoq[0] >= vma->vm_start)
1503 +- && (vma->vm_flags & VM_EXEC)) {
1504 +-
1505 ++ if (vma && (regs->iaoq[0] >= vma->vm_start)) {
1506 + fault_address = regs->iaoq[0];
1507 + fault_space = regs->iasq[0];
1508 +
1509 +diff -urNp linux-3.1.1/arch/parisc/mm/fault.c linux-3.1.1/arch/parisc/mm/fault.c
1510 +--- linux-3.1.1/arch/parisc/mm/fault.c 2011-11-11 15:19:27.000000000 -0500
1511 ++++ linux-3.1.1/arch/parisc/mm/fault.c 2011-11-16 18:39:07.000000000 -0500
1512 +@@ -15,6 +15,7 @@
1513 + #include <linux/sched.h>
1514 + #include <linux/interrupt.h>
1515 + #include <linux/module.h>
1516 ++#include <linux/unistd.h>
1517 +
1518 + #include <asm/uaccess.h>
1519 + #include <asm/traps.h>
1520 +@@ -52,7 +53,7 @@ DEFINE_PER_CPU(struct exception_data, ex
1521 + static unsigned long
1522 + parisc_acctyp(unsigned long code, unsigned int inst)
1523 + {
1524 +- if (code == 6 || code == 16)
1525 ++ if (code == 6 || code == 7 || code == 16)
1526 + return VM_EXEC;
1527 +
1528 + switch (inst & 0xf0000000) {
1529 +@@ -138,6 +139,116 @@ parisc_acctyp(unsigned long code, unsign
1530 + }
1531 + #endif
1532 +
1533 ++#ifdef CONFIG_PAX_PAGEEXEC
1534 ++/*
1535 ++ * PaX: decide what to do with offenders (instruction_pointer(regs) = fault address)
1536 ++ *
1537 ++ * returns 1 when task should be killed
1538 ++ * 2 when rt_sigreturn trampoline was detected
1539 ++ * 3 when unpatched PLT trampoline was detected
1540 ++ */
1541 ++static int pax_handle_fetch_fault(struct pt_regs *regs)
1542 ++{
1543 ++
1544 ++#ifdef CONFIG_PAX_EMUPLT
1545 ++ int err;
1546 ++
1547 ++ do { /* PaX: unpatched PLT emulation */
1548 ++ unsigned int bl, depwi;
1549 ++
1550 ++ err = get_user(bl, (unsigned int *)instruction_pointer(regs));
1551 ++ err |= get_user(depwi, (unsigned int *)(instruction_pointer(regs)+4));
1552 ++
1553 ++ if (err)
1554 ++ break;
1555 ++
1556 ++ if (bl == 0xEA9F1FDDU && depwi == 0xD6801C1EU) {
1557 ++ unsigned int ldw, bv, ldw2, addr = instruction_pointer(regs)-12;
1558 ++
1559 ++ err = get_user(ldw, (unsigned int *)addr);
1560 ++ err |= get_user(bv, (unsigned int *)(addr+4));
1561 ++ err |= get_user(ldw2, (unsigned int *)(addr+8));
1562 ++
1563 ++ if (err)
1564 ++ break;
1565 ++
1566 ++ if (ldw == 0x0E801096U &&
1567 ++ bv == 0xEAC0C000U &&
1568 ++ ldw2 == 0x0E881095U)
1569 ++ {
1570 ++ unsigned int resolver, map;
1571 ++
1572 ++ err = get_user(resolver, (unsigned int *)(instruction_pointer(regs)+8));
1573 ++ err |= get_user(map, (unsigned int *)(instruction_pointer(regs)+12));
1574 ++ if (err)
1575 ++ break;
1576 ++
1577 ++ regs->gr[20] = instruction_pointer(regs)+8;
1578 ++ regs->gr[21] = map;
1579 ++ regs->gr[22] = resolver;
1580 ++ regs->iaoq[0] = resolver | 3UL;
1581 ++ regs->iaoq[1] = regs->iaoq[0] + 4;
1582 ++ return 3;
1583 ++ }
1584 ++ }
1585 ++ } while (0);
1586 ++#endif
1587 ++
1588 ++#ifdef CONFIG_PAX_EMUTRAMP
1589 ++
1590 ++#ifndef CONFIG_PAX_EMUSIGRT
1591 ++ if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
1592 ++ return 1;
1593 ++#endif
1594 ++
1595 ++ do { /* PaX: rt_sigreturn emulation */
1596 ++ unsigned int ldi1, ldi2, bel, nop;
1597 ++
1598 ++ err = get_user(ldi1, (unsigned int *)instruction_pointer(regs));
1599 ++ err |= get_user(ldi2, (unsigned int *)(instruction_pointer(regs)+4));
1600 ++ err |= get_user(bel, (unsigned int *)(instruction_pointer(regs)+8));
1601 ++ err |= get_user(nop, (unsigned int *)(instruction_pointer(regs)+12));
1602 ++
1603 ++ if (err)
1604 ++ break;
1605 ++
1606 ++ if ((ldi1 == 0x34190000U || ldi1 == 0x34190002U) &&
1607 ++ ldi2 == 0x3414015AU &&
1608 ++ bel == 0xE4008200U &&
1609 ++ nop == 0x08000240U)
1610 ++ {
1611 ++ regs->gr[25] = (ldi1 & 2) >> 1;
1612 ++ regs->gr[20] = __NR_rt_sigreturn;
1613 ++ regs->gr[31] = regs->iaoq[1] + 16;
1614 ++ regs->sr[0] = regs->iasq[1];
1615 ++ regs->iaoq[0] = 0x100UL;
1616 ++ regs->iaoq[1] = regs->iaoq[0] + 4;
1617 ++ regs->iasq[0] = regs->sr[2];
1618 ++ regs->iasq[1] = regs->sr[2];
1619 ++ return 2;
1620 ++ }
1621 ++ } while (0);
1622 ++#endif
1623 ++
1624 ++ return 1;
1625 ++}
1626 ++
1627 ++void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
1628 ++{
1629 ++ unsigned long i;
1630 ++
1631 ++ printk(KERN_ERR "PAX: bytes at PC: ");
1632 ++ for (i = 0; i < 5; i++) {
1633 ++ unsigned int c;
1634 ++ if (get_user(c, (unsigned int *)pc+i))
1635 ++ printk(KERN_CONT "???????? ");
1636 ++ else
1637 ++ printk(KERN_CONT "%08x ", c);
1638 ++ }
1639 ++ printk("\n");
1640 ++}
1641 ++#endif
1642 ++
1643 + int fixup_exception(struct pt_regs *regs)
1644 + {
1645 + const struct exception_table_entry *fix;
1646 +@@ -192,8 +303,33 @@ good_area:
1647 +
1648 + acc_type = parisc_acctyp(code,regs->iir);
1649 +
1650 +- if ((vma->vm_flags & acc_type) != acc_type)
1651 ++ if ((vma->vm_flags & acc_type) != acc_type) {
1652 ++
1653 ++#ifdef CONFIG_PAX_PAGEEXEC
1654 ++ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && (acc_type & VM_EXEC) &&
1655 ++ (address & ~3UL) == instruction_pointer(regs))
1656 ++ {
1657 ++ up_read(&mm->mmap_sem);
1658 ++ switch (pax_handle_fetch_fault(regs)) {
1659 ++
1660 ++#ifdef CONFIG_PAX_EMUPLT
1661 ++ case 3:
1662 ++ return;
1663 ++#endif
1664 ++
1665 ++#ifdef CONFIG_PAX_EMUTRAMP
1666 ++ case 2:
1667 ++ return;
1668 ++#endif
1669 ++
1670 ++ }
1671 ++ pax_report_fault(regs, (void *)instruction_pointer(regs), (void *)regs->gr[30]);
1672 ++ do_group_exit(SIGKILL);
1673 ++ }
1674 ++#endif
1675 ++
1676 + goto bad_area;
1677 ++ }
1678 +
1679 + /*
1680 + * If for any reason at all we couldn't handle the fault, make
1681 +diff -urNp linux-3.1.1/arch/powerpc/include/asm/elf.h linux-3.1.1/arch/powerpc/include/asm/elf.h
1682 +--- linux-3.1.1/arch/powerpc/include/asm/elf.h 2011-11-11 15:19:27.000000000 -0500
1683 ++++ linux-3.1.1/arch/powerpc/include/asm/elf.h 2011-11-16 18:39:07.000000000 -0500
1684 +@@ -178,8 +178,19 @@ typedef elf_fpreg_t elf_vsrreghalf_t32[E
1685 + the loader. We need to make sure that it is out of the way of the program
1686 + that it will "exec", and that there is sufficient room for the brk. */
1687 +
1688 +-extern unsigned long randomize_et_dyn(unsigned long base);
1689 +-#define ELF_ET_DYN_BASE (randomize_et_dyn(0x20000000))
1690 ++#define ELF_ET_DYN_BASE (0x20000000)
1691 ++
1692 ++#ifdef CONFIG_PAX_ASLR
1693 ++#define PAX_ELF_ET_DYN_BASE (0x10000000UL)
1694 ++
1695 ++#ifdef __powerpc64__
1696 ++#define PAX_DELTA_MMAP_LEN (is_32bit_task() ? 16 : 28)
1697 ++#define PAX_DELTA_STACK_LEN (is_32bit_task() ? 16 : 28)
1698 ++#else
1699 ++#define PAX_DELTA_MMAP_LEN 15
1700 ++#define PAX_DELTA_STACK_LEN 15
1701 ++#endif
1702 ++#endif
1703 +
1704 + /*
1705 + * Our registers are always unsigned longs, whether we're a 32 bit
1706 +@@ -274,9 +285,6 @@ extern int arch_setup_additional_pages(s
1707 + (0x7ff >> (PAGE_SHIFT - 12)) : \
1708 + (0x3ffff >> (PAGE_SHIFT - 12)))
1709 +
1710 +-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
1711 +-#define arch_randomize_brk arch_randomize_brk
1712 +-
1713 + #endif /* __KERNEL__ */
1714 +
1715 + /*
1716 +diff -urNp linux-3.1.1/arch/powerpc/include/asm/kmap_types.h linux-3.1.1/arch/powerpc/include/asm/kmap_types.h
1717 +--- linux-3.1.1/arch/powerpc/include/asm/kmap_types.h 2011-11-11 15:19:27.000000000 -0500
1718 ++++ linux-3.1.1/arch/powerpc/include/asm/kmap_types.h 2011-11-16 18:39:07.000000000 -0500
1719 +@@ -27,6 +27,7 @@ enum km_type {
1720 + KM_PPC_SYNC_PAGE,
1721 + KM_PPC_SYNC_ICACHE,
1722 + KM_KDB,
1723 ++ KM_CLEARPAGE,
1724 + KM_TYPE_NR
1725 + };
1726 +
1727 +diff -urNp linux-3.1.1/arch/powerpc/include/asm/mman.h linux-3.1.1/arch/powerpc/include/asm/mman.h
1728 +--- linux-3.1.1/arch/powerpc/include/asm/mman.h 2011-11-11 15:19:27.000000000 -0500
1729 ++++ linux-3.1.1/arch/powerpc/include/asm/mman.h 2011-11-16 18:39:07.000000000 -0500
1730 +@@ -44,7 +44,7 @@ static inline unsigned long arch_calc_vm
1731 + }
1732 + #define arch_calc_vm_prot_bits(prot) arch_calc_vm_prot_bits(prot)
1733 +
1734 +-static inline pgprot_t arch_vm_get_page_prot(unsigned long vm_flags)
1735 ++static inline pgprot_t arch_vm_get_page_prot(vm_flags_t vm_flags)
1736 + {
1737 + return (vm_flags & VM_SAO) ? __pgprot(_PAGE_SAO) : __pgprot(0);
1738 + }
1739 +diff -urNp linux-3.1.1/arch/powerpc/include/asm/page_64.h linux-3.1.1/arch/powerpc/include/asm/page_64.h
1740 +--- linux-3.1.1/arch/powerpc/include/asm/page_64.h 2011-11-11 15:19:27.000000000 -0500
1741 ++++ linux-3.1.1/arch/powerpc/include/asm/page_64.h 2011-11-16 18:39:07.000000000 -0500
1742 +@@ -155,15 +155,18 @@ do { \
1743 + * stack by default, so in the absence of a PT_GNU_STACK program header
1744 + * we turn execute permission off.
1745 + */
1746 +-#define VM_STACK_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
1747 +- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
1748 ++#define VM_STACK_DEFAULT_FLAGS32 \
1749 ++ (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
1750 ++ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
1751 +
1752 + #define VM_STACK_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
1753 + VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
1754 +
1755 ++#ifndef CONFIG_PAX_PAGEEXEC
1756 + #define VM_STACK_DEFAULT_FLAGS \
1757 + (is_32bit_task() ? \
1758 + VM_STACK_DEFAULT_FLAGS32 : VM_STACK_DEFAULT_FLAGS64)
1759 ++#endif
1760 +
1761 + #include <asm-generic/getorder.h>
1762 +
1763 +diff -urNp linux-3.1.1/arch/powerpc/include/asm/page.h linux-3.1.1/arch/powerpc/include/asm/page.h
1764 +--- linux-3.1.1/arch/powerpc/include/asm/page.h 2011-11-11 15:19:27.000000000 -0500
1765 ++++ linux-3.1.1/arch/powerpc/include/asm/page.h 2011-11-16 18:39:07.000000000 -0500
1766 +@@ -129,8 +129,9 @@ extern phys_addr_t kernstart_addr;
1767 + * and needs to be executable. This means the whole heap ends
1768 + * up being executable.
1769 + */
1770 +-#define VM_DATA_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
1771 +- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
1772 ++#define VM_DATA_DEFAULT_FLAGS32 \
1773 ++ (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
1774 ++ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
1775 +
1776 + #define VM_DATA_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
1777 + VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
1778 +@@ -158,6 +159,9 @@ extern phys_addr_t kernstart_addr;
1779 + #define is_kernel_addr(x) ((x) >= PAGE_OFFSET)
1780 + #endif
1781 +
1782 ++#define ktla_ktva(addr) (addr)
1783 ++#define ktva_ktla(addr) (addr)
1784 ++
1785 + #ifndef __ASSEMBLY__
1786 +
1787 + #undef STRICT_MM_TYPECHECKS
1788 +diff -urNp linux-3.1.1/arch/powerpc/include/asm/pgtable.h linux-3.1.1/arch/powerpc/include/asm/pgtable.h
1789 +--- linux-3.1.1/arch/powerpc/include/asm/pgtable.h 2011-11-11 15:19:27.000000000 -0500
1790 ++++ linux-3.1.1/arch/powerpc/include/asm/pgtable.h 2011-11-16 18:39:07.000000000 -0500
1791 +@@ -2,6 +2,7 @@
1792 + #define _ASM_POWERPC_PGTABLE_H
1793 + #ifdef __KERNEL__
1794 +
1795 ++#include <linux/const.h>
1796 + #ifndef __ASSEMBLY__
1797 + #include <asm/processor.h> /* For TASK_SIZE */
1798 + #include <asm/mmu.h>
1799 +diff -urNp linux-3.1.1/arch/powerpc/include/asm/pte-hash32.h linux-3.1.1/arch/powerpc/include/asm/pte-hash32.h
1800 +--- linux-3.1.1/arch/powerpc/include/asm/pte-hash32.h 2011-11-11 15:19:27.000000000 -0500
1801 ++++ linux-3.1.1/arch/powerpc/include/asm/pte-hash32.h 2011-11-16 18:39:07.000000000 -0500
1802 +@@ -21,6 +21,7 @@
1803 + #define _PAGE_FILE 0x004 /* when !present: nonlinear file mapping */
1804 + #define _PAGE_USER 0x004 /* usermode access allowed */
1805 + #define _PAGE_GUARDED 0x008 /* G: prohibit speculative access */
1806 ++#define _PAGE_EXEC _PAGE_GUARDED
1807 + #define _PAGE_COHERENT 0x010 /* M: enforce memory coherence (SMP systems) */
1808 + #define _PAGE_NO_CACHE 0x020 /* I: cache inhibit */
1809 + #define _PAGE_WRITETHRU 0x040 /* W: cache write-through */
1810 +diff -urNp linux-3.1.1/arch/powerpc/include/asm/reg.h linux-3.1.1/arch/powerpc/include/asm/reg.h
1811 +--- linux-3.1.1/arch/powerpc/include/asm/reg.h 2011-11-11 15:19:27.000000000 -0500
1812 ++++ linux-3.1.1/arch/powerpc/include/asm/reg.h 2011-11-16 18:39:07.000000000 -0500
1813 +@@ -212,6 +212,7 @@
1814 + #define SPRN_DBCR 0x136 /* e300 Data Breakpoint Control Reg */
1815 + #define SPRN_DSISR 0x012 /* Data Storage Interrupt Status Register */
1816 + #define DSISR_NOHPTE 0x40000000 /* no translation found */
1817 ++#define DSISR_GUARDED 0x10000000 /* fetch from guarded storage */
1818 + #define DSISR_PROTFAULT 0x08000000 /* protection fault */
1819 + #define DSISR_ISSTORE 0x02000000 /* access was a store */
1820 + #define DSISR_DABRMATCH 0x00400000 /* hit data breakpoint */
1821 +diff -urNp linux-3.1.1/arch/powerpc/include/asm/system.h linux-3.1.1/arch/powerpc/include/asm/system.h
1822 +--- linux-3.1.1/arch/powerpc/include/asm/system.h 2011-11-11 15:19:27.000000000 -0500
1823 ++++ linux-3.1.1/arch/powerpc/include/asm/system.h 2011-11-16 18:39:07.000000000 -0500
1824 +@@ -530,7 +530,7 @@ __cmpxchg_local(volatile void *ptr, unsi
1825 + #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
1826 + #endif
1827 +
1828 +-extern unsigned long arch_align_stack(unsigned long sp);
1829 ++#define arch_align_stack(x) ((x) & ~0xfUL)
1830 +
1831 + /* Used in very early kernel initialization. */
1832 + extern unsigned long reloc_offset(void);
1833 +diff -urNp linux-3.1.1/arch/powerpc/include/asm/uaccess.h linux-3.1.1/arch/powerpc/include/asm/uaccess.h
1834 +--- linux-3.1.1/arch/powerpc/include/asm/uaccess.h 2011-11-11 15:19:27.000000000 -0500
1835 ++++ linux-3.1.1/arch/powerpc/include/asm/uaccess.h 2011-11-16 18:39:07.000000000 -0500
1836 +@@ -13,6 +13,8 @@
1837 + #define VERIFY_READ 0
1838 + #define VERIFY_WRITE 1
1839 +
1840 ++extern void check_object_size(const void *ptr, unsigned long n, bool to);
1841 ++
1842 + /*
1843 + * The fs value determines whether argument validity checking should be
1844 + * performed or not. If get_fs() == USER_DS, checking is performed, with
1845 +@@ -327,52 +329,6 @@ do { \
1846 + extern unsigned long __copy_tofrom_user(void __user *to,
1847 + const void __user *from, unsigned long size);
1848 +
1849 +-#ifndef __powerpc64__
1850 +-
1851 +-static inline unsigned long copy_from_user(void *to,
1852 +- const void __user *from, unsigned long n)
1853 +-{
1854 +- unsigned long over;
1855 +-
1856 +- if (access_ok(VERIFY_READ, from, n))
1857 +- return __copy_tofrom_user((__force void __user *)to, from, n);
1858 +- if ((unsigned long)from < TASK_SIZE) {
1859 +- over = (unsigned long)from + n - TASK_SIZE;
1860 +- return __copy_tofrom_user((__force void __user *)to, from,
1861 +- n - over) + over;
1862 +- }
1863 +- return n;
1864 +-}
1865 +-
1866 +-static inline unsigned long copy_to_user(void __user *to,
1867 +- const void *from, unsigned long n)
1868 +-{
1869 +- unsigned long over;
1870 +-
1871 +- if (access_ok(VERIFY_WRITE, to, n))
1872 +- return __copy_tofrom_user(to, (__force void __user *)from, n);
1873 +- if ((unsigned long)to < TASK_SIZE) {
1874 +- over = (unsigned long)to + n - TASK_SIZE;
1875 +- return __copy_tofrom_user(to, (__force void __user *)from,
1876 +- n - over) + over;
1877 +- }
1878 +- return n;
1879 +-}
1880 +-
1881 +-#else /* __powerpc64__ */
1882 +-
1883 +-#define __copy_in_user(to, from, size) \
1884 +- __copy_tofrom_user((to), (from), (size))
1885 +-
1886 +-extern unsigned long copy_from_user(void *to, const void __user *from,
1887 +- unsigned long n);
1888 +-extern unsigned long copy_to_user(void __user *to, const void *from,
1889 +- unsigned long n);
1890 +-extern unsigned long copy_in_user(void __user *to, const void __user *from,
1891 +- unsigned long n);
1892 +-
1893 +-#endif /* __powerpc64__ */
1894 +-
1895 + static inline unsigned long __copy_from_user_inatomic(void *to,
1896 + const void __user *from, unsigned long n)
1897 + {
1898 +@@ -396,6 +352,10 @@ static inline unsigned long __copy_from_
1899 + if (ret == 0)
1900 + return 0;
1901 + }
1902 ++
1903 ++ if (!__builtin_constant_p(n))
1904 ++ check_object_size(to, n, false);
1905 ++
1906 + return __copy_tofrom_user((__force void __user *)to, from, n);
1907 + }
1908 +
1909 +@@ -422,6 +382,10 @@ static inline unsigned long __copy_to_us
1910 + if (ret == 0)
1911 + return 0;
1912 + }
1913 ++
1914 ++ if (!__builtin_constant_p(n))
1915 ++ check_object_size(from, n, true);
1916 ++
1917 + return __copy_tofrom_user(to, (__force const void __user *)from, n);
1918 + }
1919 +
1920 +@@ -439,6 +403,92 @@ static inline unsigned long __copy_to_us
1921 + return __copy_to_user_inatomic(to, from, size);
1922 + }
1923 +
1924 ++#ifndef __powerpc64__
1925 ++
1926 ++static inline unsigned long __must_check copy_from_user(void *to,
1927 ++ const void __user *from, unsigned long n)
1928 ++{
1929 ++ unsigned long over;
1930 ++
1931 ++ if ((long)n < 0)
1932 ++ return n;
1933 ++
1934 ++ if (access_ok(VERIFY_READ, from, n)) {
1935 ++ if (!__builtin_constant_p(n))
1936 ++ check_object_size(to, n, false);
1937 ++ return __copy_tofrom_user((__force void __user *)to, from, n);
1938 ++ }
1939 ++ if ((unsigned long)from < TASK_SIZE) {
1940 ++ over = (unsigned long)from + n - TASK_SIZE;
1941 ++ if (!__builtin_constant_p(n - over))
1942 ++ check_object_size(to, n - over, false);
1943 ++ return __copy_tofrom_user((__force void __user *)to, from,
1944 ++ n - over) + over;
1945 ++ }
1946 ++ return n;
1947 ++}
1948 ++
1949 ++static inline unsigned long __must_check copy_to_user(void __user *to,
1950 ++ const void *from, unsigned long n)
1951 ++{
1952 ++ unsigned long over;
1953 ++
1954 ++ if ((long)n < 0)
1955 ++ return n;
1956 ++
1957 ++ if (access_ok(VERIFY_WRITE, to, n)) {
1958 ++ if (!__builtin_constant_p(n))
1959 ++ check_object_size(from, n, true);
1960 ++ return __copy_tofrom_user(to, (__force void __user *)from, n);
1961 ++ }
1962 ++ if ((unsigned long)to < TASK_SIZE) {
1963 ++ over = (unsigned long)to + n - TASK_SIZE;
1964 ++ if (!__builtin_constant_p(n))
1965 ++ check_object_size(from, n - over, true);
1966 ++ return __copy_tofrom_user(to, (__force void __user *)from,
1967 ++ n - over) + over;
1968 ++ }
1969 ++ return n;
1970 ++}
1971 ++
1972 ++#else /* __powerpc64__ */
1973 ++
1974 ++#define __copy_in_user(to, from, size) \
1975 ++ __copy_tofrom_user((to), (from), (size))
1976 ++
1977 ++static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
1978 ++{
1979 ++ if ((long)n < 0 || n > INT_MAX)
1980 ++ return n;
1981 ++
1982 ++ if (!__builtin_constant_p(n))
1983 ++ check_object_size(to, n, false);
1984 ++
1985 ++ if (likely(access_ok(VERIFY_READ, from, n)))
1986 ++ n = __copy_from_user(to, from, n);
1987 ++ else
1988 ++ memset(to, 0, n);
1989 ++ return n;
1990 ++}
1991 ++
1992 ++static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
1993 ++{
1994 ++ if ((long)n < 0 || n > INT_MAX)
1995 ++ return n;
1996 ++
1997 ++ if (likely(access_ok(VERIFY_WRITE, to, n))) {
1998 ++ if (!__builtin_constant_p(n))
1999 ++ check_object_size(from, n, true);
2000 ++ n = __copy_to_user(to, from, n);
2001 ++ }
2002 ++ return n;
2003 ++}
2004 ++
2005 ++extern unsigned long copy_in_user(void __user *to, const void __user *from,
2006 ++ unsigned long n);
2007 ++
2008 ++#endif /* __powerpc64__ */
2009 ++
2010 + extern unsigned long __clear_user(void __user *addr, unsigned long size);
2011 +
2012 + static inline unsigned long clear_user(void __user *addr, unsigned long size)
2013 +diff -urNp linux-3.1.1/arch/powerpc/kernel/exceptions-64e.S linux-3.1.1/arch/powerpc/kernel/exceptions-64e.S
2014 +--- linux-3.1.1/arch/powerpc/kernel/exceptions-64e.S 2011-11-11 15:19:27.000000000 -0500
2015 ++++ linux-3.1.1/arch/powerpc/kernel/exceptions-64e.S 2011-11-16 18:39:07.000000000 -0500
2016 +@@ -587,6 +587,7 @@ storage_fault_common:
2017 + std r14,_DAR(r1)
2018 + std r15,_DSISR(r1)
2019 + addi r3,r1,STACK_FRAME_OVERHEAD
2020 ++ bl .save_nvgprs
2021 + mr r4,r14
2022 + mr r5,r15
2023 + ld r14,PACA_EXGEN+EX_R14(r13)
2024 +@@ -596,8 +597,7 @@ storage_fault_common:
2025 + cmpdi r3,0
2026 + bne- 1f
2027 + b .ret_from_except_lite
2028 +-1: bl .save_nvgprs
2029 +- mr r5,r3
2030 ++1: mr r5,r3
2031 + addi r3,r1,STACK_FRAME_OVERHEAD
2032 + ld r4,_DAR(r1)
2033 + bl .bad_page_fault
2034 +diff -urNp linux-3.1.1/arch/powerpc/kernel/exceptions-64s.S linux-3.1.1/arch/powerpc/kernel/exceptions-64s.S
2035 +--- linux-3.1.1/arch/powerpc/kernel/exceptions-64s.S 2011-11-11 15:19:27.000000000 -0500
2036 ++++ linux-3.1.1/arch/powerpc/kernel/exceptions-64s.S 2011-11-16 18:39:07.000000000 -0500
2037 +@@ -1014,10 +1014,10 @@ handle_page_fault:
2038 + 11: ld r4,_DAR(r1)
2039 + ld r5,_DSISR(r1)
2040 + addi r3,r1,STACK_FRAME_OVERHEAD
2041 ++ bl .save_nvgprs
2042 + bl .do_page_fault
2043 + cmpdi r3,0
2044 + beq+ 13f
2045 +- bl .save_nvgprs
2046 + mr r5,r3
2047 + addi r3,r1,STACK_FRAME_OVERHEAD
2048 + lwz r4,_DAR(r1)
2049 +diff -urNp linux-3.1.1/arch/powerpc/kernel/module_32.c linux-3.1.1/arch/powerpc/kernel/module_32.c
2050 +--- linux-3.1.1/arch/powerpc/kernel/module_32.c 2011-11-11 15:19:27.000000000 -0500
2051 ++++ linux-3.1.1/arch/powerpc/kernel/module_32.c 2011-11-16 18:39:07.000000000 -0500
2052 +@@ -162,7 +162,7 @@ int module_frob_arch_sections(Elf32_Ehdr
2053 + me->arch.core_plt_section = i;
2054 + }
2055 + if (!me->arch.core_plt_section || !me->arch.init_plt_section) {
2056 +- printk("Module doesn't contain .plt or .init.plt sections.\n");
2057 ++ printk("Module %s doesn't contain .plt or .init.plt sections.\n", me->name);
2058 + return -ENOEXEC;
2059 + }
2060 +
2061 +@@ -192,11 +192,16 @@ static uint32_t do_plt_call(void *locati
2062 +
2063 + DEBUGP("Doing plt for call to 0x%x at 0x%x\n", val, (unsigned int)location);
2064 + /* Init, or core PLT? */
2065 +- if (location >= mod->module_core
2066 +- && location < mod->module_core + mod->core_size)
2067 ++ if ((location >= mod->module_core_rx && location < mod->module_core_rx + mod->core_size_rx) ||
2068 ++ (location >= mod->module_core_rw && location < mod->module_core_rw + mod->core_size_rw))
2069 + entry = (void *)sechdrs[mod->arch.core_plt_section].sh_addr;
2070 +- else
2071 ++ else if ((location >= mod->module_init_rx && location < mod->module_init_rx + mod->init_size_rx) ||
2072 ++ (location >= mod->module_init_rw && location < mod->module_init_rw + mod->init_size_rw))
2073 + entry = (void *)sechdrs[mod->arch.init_plt_section].sh_addr;
2074 ++ else {
2075 ++ printk(KERN_ERR "%s: invalid R_PPC_REL24 entry found\n", mod->name);
2076 ++ return ~0UL;
2077 ++ }
2078 +
2079 + /* Find this entry, or if that fails, the next avail. entry */
2080 + while (entry->jump[0]) {
2081 +diff -urNp linux-3.1.1/arch/powerpc/kernel/process.c linux-3.1.1/arch/powerpc/kernel/process.c
2082 +--- linux-3.1.1/arch/powerpc/kernel/process.c 2011-11-11 15:19:27.000000000 -0500
2083 ++++ linux-3.1.1/arch/powerpc/kernel/process.c 2011-11-16 18:40:08.000000000 -0500
2084 +@@ -682,8 +682,8 @@ void show_regs(struct pt_regs * regs)
2085 + * Lookup NIP late so we have the best change of getting the
2086 + * above info out without failing
2087 + */
2088 +- printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip);
2089 +- printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link);
2090 ++ printk("NIP ["REG"] %pA\n", regs->nip, (void *)regs->nip);
2091 ++ printk("LR ["REG"] %pA\n", regs->link, (void *)regs->link);
2092 + #endif
2093 + show_stack(current, (unsigned long *) regs->gpr[1]);
2094 + if (!user_mode(regs))
2095 +@@ -1187,10 +1187,10 @@ void show_stack(struct task_struct *tsk,
2096 + newsp = stack[0];
2097 + ip = stack[STACK_FRAME_LR_SAVE];
2098 + if (!firstframe || ip != lr) {
2099 +- printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip);
2100 ++ printk("["REG"] ["REG"] %pA", sp, ip, (void *)ip);
2101 + #ifdef CONFIG_FUNCTION_GRAPH_TRACER
2102 + if ((ip == rth || ip == mrth) && curr_frame >= 0) {
2103 +- printk(" (%pS)",
2104 ++ printk(" (%pA)",
2105 + (void *)current->ret_stack[curr_frame].ret);
2106 + curr_frame--;
2107 + }
2108 +@@ -1210,7 +1210,7 @@ void show_stack(struct task_struct *tsk,
2109 + struct pt_regs *regs = (struct pt_regs *)
2110 + (sp + STACK_FRAME_OVERHEAD);
2111 + lr = regs->link;
2112 +- printk("--- Exception: %lx at %pS\n LR = %pS\n",
2113 ++ printk("--- Exception: %lx at %pA\n LR = %pA\n",
2114 + regs->trap, (void *)regs->nip, (void *)lr);
2115 + firstframe = 1;
2116 + }
2117 +@@ -1285,58 +1285,3 @@ void thread_info_cache_init(void)
2118 + }
2119 +
2120 + #endif /* THREAD_SHIFT < PAGE_SHIFT */
2121 +-
2122 +-unsigned long arch_align_stack(unsigned long sp)
2123 +-{
2124 +- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
2125 +- sp -= get_random_int() & ~PAGE_MASK;
2126 +- return sp & ~0xf;
2127 +-}
2128 +-
2129 +-static inline unsigned long brk_rnd(void)
2130 +-{
2131 +- unsigned long rnd = 0;
2132 +-
2133 +- /* 8MB for 32bit, 1GB for 64bit */
2134 +- if (is_32bit_task())
2135 +- rnd = (long)(get_random_int() % (1<<(23-PAGE_SHIFT)));
2136 +- else
2137 +- rnd = (long)(get_random_int() % (1<<(30-PAGE_SHIFT)));
2138 +-
2139 +- return rnd << PAGE_SHIFT;
2140 +-}
2141 +-
2142 +-unsigned long arch_randomize_brk(struct mm_struct *mm)
2143 +-{
2144 +- unsigned long base = mm->brk;
2145 +- unsigned long ret;
2146 +-
2147 +-#ifdef CONFIG_PPC_STD_MMU_64
2148 +- /*
2149 +- * If we are using 1TB segments and we are allowed to randomise
2150 +- * the heap, we can put it above 1TB so it is backed by a 1TB
2151 +- * segment. Otherwise the heap will be in the bottom 1TB
2152 +- * which always uses 256MB segments and this may result in a
2153 +- * performance penalty.
2154 +- */
2155 +- if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T))
2156 +- base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T);
2157 +-#endif
2158 +-
2159 +- ret = PAGE_ALIGN(base + brk_rnd());
2160 +-
2161 +- if (ret < mm->brk)
2162 +- return mm->brk;
2163 +-
2164 +- return ret;
2165 +-}
2166 +-
2167 +-unsigned long randomize_et_dyn(unsigned long base)
2168 +-{
2169 +- unsigned long ret = PAGE_ALIGN(base + brk_rnd());
2170 +-
2171 +- if (ret < base)
2172 +- return base;
2173 +-
2174 +- return ret;
2175 +-}
2176 +diff -urNp linux-3.1.1/arch/powerpc/kernel/signal_32.c linux-3.1.1/arch/powerpc/kernel/signal_32.c
2177 +--- linux-3.1.1/arch/powerpc/kernel/signal_32.c 2011-11-11 15:19:27.000000000 -0500
2178 ++++ linux-3.1.1/arch/powerpc/kernel/signal_32.c 2011-11-16 18:39:07.000000000 -0500
2179 +@@ -859,7 +859,7 @@ int handle_rt_signal32(unsigned long sig
2180 + /* Save user registers on the stack */
2181 + frame = &rt_sf->uc.uc_mcontext;
2182 + addr = frame;
2183 +- if (vdso32_rt_sigtramp && current->mm->context.vdso_base) {
2184 ++ if (vdso32_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
2185 + if (save_user_regs(regs, frame, 0, 1))
2186 + goto badframe;
2187 + regs->link = current->mm->context.vdso_base + vdso32_rt_sigtramp;
2188 +diff -urNp linux-3.1.1/arch/powerpc/kernel/signal_64.c linux-3.1.1/arch/powerpc/kernel/signal_64.c
2189 +--- linux-3.1.1/arch/powerpc/kernel/signal_64.c 2011-11-11 15:19:27.000000000 -0500
2190 ++++ linux-3.1.1/arch/powerpc/kernel/signal_64.c 2011-11-16 18:39:07.000000000 -0500
2191 +@@ -430,7 +430,7 @@ int handle_rt_signal64(int signr, struct
2192 + current->thread.fpscr.val = 0;
2193 +
2194 + /* Set up to return from userspace. */
2195 +- if (vdso64_rt_sigtramp && current->mm->context.vdso_base) {
2196 ++ if (vdso64_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
2197 + regs->link = current->mm->context.vdso_base + vdso64_rt_sigtramp;
2198 + } else {
2199 + err |= setup_trampoline(__NR_rt_sigreturn, &frame->tramp[0]);
2200 +diff -urNp linux-3.1.1/arch/powerpc/kernel/traps.c linux-3.1.1/arch/powerpc/kernel/traps.c
2201 +--- linux-3.1.1/arch/powerpc/kernel/traps.c 2011-11-11 15:19:27.000000000 -0500
2202 ++++ linux-3.1.1/arch/powerpc/kernel/traps.c 2011-11-16 18:40:08.000000000 -0500
2203 +@@ -98,6 +98,8 @@ static void pmac_backlight_unblank(void)
2204 + static inline void pmac_backlight_unblank(void) { }
2205 + #endif
2206 +
2207 ++extern void gr_handle_kernel_exploit(void);
2208 ++
2209 + int die(const char *str, struct pt_regs *regs, long err)
2210 + {
2211 + static struct {
2212 +@@ -171,6 +173,8 @@ int die(const char *str, struct pt_regs
2213 + if (panic_on_oops)
2214 + panic("Fatal exception");
2215 +
2216 ++ gr_handle_kernel_exploit();
2217 ++
2218 + oops_exit();
2219 + do_exit(err);
2220 +
2221 +diff -urNp linux-3.1.1/arch/powerpc/kernel/vdso.c linux-3.1.1/arch/powerpc/kernel/vdso.c
2222 +--- linux-3.1.1/arch/powerpc/kernel/vdso.c 2011-11-11 15:19:27.000000000 -0500
2223 ++++ linux-3.1.1/arch/powerpc/kernel/vdso.c 2011-11-16 18:39:07.000000000 -0500
2224 +@@ -36,6 +36,7 @@
2225 + #include <asm/firmware.h>
2226 + #include <asm/vdso.h>
2227 + #include <asm/vdso_datapage.h>
2228 ++#include <asm/mman.h>
2229 +
2230 + #include "setup.h"
2231 +
2232 +@@ -220,7 +221,7 @@ int arch_setup_additional_pages(struct l
2233 + vdso_base = VDSO32_MBASE;
2234 + #endif
2235 +
2236 +- current->mm->context.vdso_base = 0;
2237 ++ current->mm->context.vdso_base = ~0UL;
2238 +
2239 + /* vDSO has a problem and was disabled, just don't "enable" it for the
2240 + * process
2241 +@@ -240,7 +241,7 @@ int arch_setup_additional_pages(struct l
2242 + vdso_base = get_unmapped_area(NULL, vdso_base,
2243 + (vdso_pages << PAGE_SHIFT) +
2244 + ((VDSO_ALIGNMENT - 1) & PAGE_MASK),
2245 +- 0, 0);
2246 ++ 0, MAP_PRIVATE | MAP_EXECUTABLE);
2247 + if (IS_ERR_VALUE(vdso_base)) {
2248 + rc = vdso_base;
2249 + goto fail_mmapsem;
2250 +diff -urNp linux-3.1.1/arch/powerpc/lib/usercopy_64.c linux-3.1.1/arch/powerpc/lib/usercopy_64.c
2251 +--- linux-3.1.1/arch/powerpc/lib/usercopy_64.c 2011-11-11 15:19:27.000000000 -0500
2252 ++++ linux-3.1.1/arch/powerpc/lib/usercopy_64.c 2011-11-16 18:39:07.000000000 -0500
2253 +@@ -9,22 +9,6 @@
2254 + #include <linux/module.h>
2255 + #include <asm/uaccess.h>
2256 +
2257 +-unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
2258 +-{
2259 +- if (likely(access_ok(VERIFY_READ, from, n)))
2260 +- n = __copy_from_user(to, from, n);
2261 +- else
2262 +- memset(to, 0, n);
2263 +- return n;
2264 +-}
2265 +-
2266 +-unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
2267 +-{
2268 +- if (likely(access_ok(VERIFY_WRITE, to, n)))
2269 +- n = __copy_to_user(to, from, n);
2270 +- return n;
2271 +-}
2272 +-
2273 + unsigned long copy_in_user(void __user *to, const void __user *from,
2274 + unsigned long n)
2275 + {
2276 +@@ -35,7 +19,5 @@ unsigned long copy_in_user(void __user *
2277 + return n;
2278 + }
2279 +
2280 +-EXPORT_SYMBOL(copy_from_user);
2281 +-EXPORT_SYMBOL(copy_to_user);
2282 + EXPORT_SYMBOL(copy_in_user);
2283 +
2284 +diff -urNp linux-3.1.1/arch/powerpc/mm/fault.c linux-3.1.1/arch/powerpc/mm/fault.c
2285 +--- linux-3.1.1/arch/powerpc/mm/fault.c 2011-11-11 15:19:27.000000000 -0500
2286 ++++ linux-3.1.1/arch/powerpc/mm/fault.c 2011-11-16 18:39:07.000000000 -0500
2287 +@@ -32,6 +32,10 @@
2288 + #include <linux/perf_event.h>
2289 + #include <linux/magic.h>
2290 + #include <linux/ratelimit.h>
2291 ++#include <linux/slab.h>
2292 ++#include <linux/pagemap.h>
2293 ++#include <linux/compiler.h>
2294 ++#include <linux/unistd.h>
2295 +
2296 + #include <asm/firmware.h>
2297 + #include <asm/page.h>
2298 +@@ -43,6 +47,7 @@
2299 + #include <asm/tlbflush.h>
2300 + #include <asm/siginfo.h>
2301 + #include <mm/mmu_decl.h>
2302 ++#include <asm/ptrace.h>
2303 +
2304 + #ifdef CONFIG_KPROBES
2305 + static inline int notify_page_fault(struct pt_regs *regs)
2306 +@@ -66,6 +71,33 @@ static inline int notify_page_fault(stru
2307 + }
2308 + #endif
2309 +
2310 ++#ifdef CONFIG_PAX_PAGEEXEC
2311 ++/*
2312 ++ * PaX: decide what to do with offenders (regs->nip = fault address)
2313 ++ *
2314 ++ * returns 1 when task should be killed
2315 ++ */
2316 ++static int pax_handle_fetch_fault(struct pt_regs *regs)
2317 ++{
2318 ++ return 1;
2319 ++}
2320 ++
2321 ++void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
2322 ++{
2323 ++ unsigned long i;
2324 ++
2325 ++ printk(KERN_ERR "PAX: bytes at PC: ");
2326 ++ for (i = 0; i < 5; i++) {
2327 ++ unsigned int c;
2328 ++ if (get_user(c, (unsigned int __user *)pc+i))
2329 ++ printk(KERN_CONT "???????? ");
2330 ++ else
2331 ++ printk(KERN_CONT "%08x ", c);
2332 ++ }
2333 ++ printk("\n");
2334 ++}
2335 ++#endif
2336 ++
2337 + /*
2338 + * Check whether the instruction at regs->nip is a store using
2339 + * an update addressing form which will update r1.
2340 +@@ -136,7 +168,7 @@ int __kprobes do_page_fault(struct pt_re
2341 + * indicate errors in DSISR but can validly be set in SRR1.
2342 + */
2343 + if (trap == 0x400)
2344 +- error_code &= 0x48200000;
2345 ++ error_code &= 0x58200000;
2346 + else
2347 + is_write = error_code & DSISR_ISSTORE;
2348 + #else
2349 +@@ -259,7 +291,7 @@ good_area:
2350 + * "undefined". Of those that can be set, this is the only
2351 + * one which seems bad.
2352 + */
2353 +- if (error_code & 0x10000000)
2354 ++ if (error_code & DSISR_GUARDED)
2355 + /* Guarded storage error. */
2356 + goto bad_area;
2357 + #endif /* CONFIG_8xx */
2358 +@@ -274,7 +306,7 @@ good_area:
2359 + * processors use the same I/D cache coherency mechanism
2360 + * as embedded.
2361 + */
2362 +- if (error_code & DSISR_PROTFAULT)
2363 ++ if (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))
2364 + goto bad_area;
2365 + #endif /* CONFIG_PPC_STD_MMU */
2366 +
2367 +@@ -343,6 +375,23 @@ bad_area:
2368 + bad_area_nosemaphore:
2369 + /* User mode accesses cause a SIGSEGV */
2370 + if (user_mode(regs)) {
2371 ++
2372 ++#ifdef CONFIG_PAX_PAGEEXEC
2373 ++ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
2374 ++#ifdef CONFIG_PPC_STD_MMU
2375 ++ if (is_exec && (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))) {
2376 ++#else
2377 ++ if (is_exec && regs->nip == address) {
2378 ++#endif
2379 ++ switch (pax_handle_fetch_fault(regs)) {
2380 ++ }
2381 ++
2382 ++ pax_report_fault(regs, (void *)regs->nip, (void *)regs->gpr[PT_R1]);
2383 ++ do_group_exit(SIGKILL);
2384 ++ }
2385 ++ }
2386 ++#endif
2387 ++
2388 + _exception(SIGSEGV, regs, code, address);
2389 + return 0;
2390 + }
2391 +diff -urNp linux-3.1.1/arch/powerpc/mm/mmap_64.c linux-3.1.1/arch/powerpc/mm/mmap_64.c
2392 +--- linux-3.1.1/arch/powerpc/mm/mmap_64.c 2011-11-11 15:19:27.000000000 -0500
2393 ++++ linux-3.1.1/arch/powerpc/mm/mmap_64.c 2011-11-16 18:39:07.000000000 -0500
2394 +@@ -99,10 +99,22 @@ void arch_pick_mmap_layout(struct mm_str
2395 + */
2396 + if (mmap_is_legacy()) {
2397 + mm->mmap_base = TASK_UNMAPPED_BASE;
2398 ++
2399 ++#ifdef CONFIG_PAX_RANDMMAP
2400 ++ if (mm->pax_flags & MF_PAX_RANDMMAP)
2401 ++ mm->mmap_base += mm->delta_mmap;
2402 ++#endif
2403 ++
2404 + mm->get_unmapped_area = arch_get_unmapped_area;
2405 + mm->unmap_area = arch_unmap_area;
2406 + } else {
2407 + mm->mmap_base = mmap_base();
2408 ++
2409 ++#ifdef CONFIG_PAX_RANDMMAP
2410 ++ if (mm->pax_flags & MF_PAX_RANDMMAP)
2411 ++ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
2412 ++#endif
2413 ++
2414 + mm->get_unmapped_area = arch_get_unmapped_area_topdown;
2415 + mm->unmap_area = arch_unmap_area_topdown;
2416 + }
2417 +diff -urNp linux-3.1.1/arch/powerpc/mm/slice.c linux-3.1.1/arch/powerpc/mm/slice.c
2418 +--- linux-3.1.1/arch/powerpc/mm/slice.c 2011-11-11 15:19:27.000000000 -0500
2419 ++++ linux-3.1.1/arch/powerpc/mm/slice.c 2011-11-16 18:39:07.000000000 -0500
2420 +@@ -98,7 +98,7 @@ static int slice_area_is_free(struct mm_
2421 + if ((mm->task_size - len) < addr)
2422 + return 0;
2423 + vma = find_vma(mm, addr);
2424 +- return (!vma || (addr + len) <= vma->vm_start);
2425 ++ return check_heap_stack_gap(vma, addr, len);
2426 + }
2427 +
2428 + static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice)
2429 +@@ -256,7 +256,7 @@ full_search:
2430 + addr = _ALIGN_UP(addr + 1, 1ul << SLICE_HIGH_SHIFT);
2431 + continue;
2432 + }
2433 +- if (!vma || addr + len <= vma->vm_start) {
2434 ++ if (check_heap_stack_gap(vma, addr, len)) {
2435 + /*
2436 + * Remember the place where we stopped the search:
2437 + */
2438 +@@ -313,10 +313,14 @@ static unsigned long slice_find_area_top
2439 + }
2440 + }
2441 +
2442 +- addr = mm->mmap_base;
2443 +- while (addr > len) {
2444 ++ if (mm->mmap_base < len)
2445 ++ addr = -ENOMEM;
2446 ++ else
2447 ++ addr = mm->mmap_base - len;
2448 ++
2449 ++ while (!IS_ERR_VALUE(addr)) {
2450 + /* Go down by chunk size */
2451 +- addr = _ALIGN_DOWN(addr - len, 1ul << pshift);
2452 ++ addr = _ALIGN_DOWN(addr, 1ul << pshift);
2453 +
2454 + /* Check for hit with different page size */
2455 + mask = slice_range_to_mask(addr, len);
2456 +@@ -336,7 +340,7 @@ static unsigned long slice_find_area_top
2457 + * return with success:
2458 + */
2459 + vma = find_vma(mm, addr);
2460 +- if (!vma || (addr + len) <= vma->vm_start) {
2461 ++ if (check_heap_stack_gap(vma, addr, len)) {
2462 + /* remember the address as a hint for next time */
2463 + if (use_cache)
2464 + mm->free_area_cache = addr;
2465 +@@ -348,7 +352,7 @@ static unsigned long slice_find_area_top
2466 + mm->cached_hole_size = vma->vm_start - addr;
2467 +
2468 + /* try just below the current vma->vm_start */
2469 +- addr = vma->vm_start;
2470 ++ addr = skip_heap_stack_gap(vma, len);
2471 + }
2472 +
2473 + /*
2474 +@@ -426,6 +430,11 @@ unsigned long slice_get_unmapped_area(un
2475 + if (fixed && addr > (mm->task_size - len))
2476 + return -EINVAL;
2477 +
2478 ++#ifdef CONFIG_PAX_RANDMMAP
2479 ++ if (!fixed && (mm->pax_flags & MF_PAX_RANDMMAP))
2480 ++ addr = 0;
2481 ++#endif
2482 ++
2483 + /* If hint, make sure it matches our alignment restrictions */
2484 + if (!fixed && addr) {
2485 + addr = _ALIGN_UP(addr, 1ul << pshift);
2486 +diff -urNp linux-3.1.1/arch/s390/include/asm/elf.h linux-3.1.1/arch/s390/include/asm/elf.h
2487 +--- linux-3.1.1/arch/s390/include/asm/elf.h 2011-11-11 15:19:27.000000000 -0500
2488 ++++ linux-3.1.1/arch/s390/include/asm/elf.h 2011-11-16 18:39:07.000000000 -0500
2489 +@@ -162,8 +162,14 @@ extern unsigned int vdso_enabled;
2490 + the loader. We need to make sure that it is out of the way of the program
2491 + that it will "exec", and that there is sufficient room for the brk. */
2492 +
2493 +-extern unsigned long randomize_et_dyn(unsigned long base);
2494 +-#define ELF_ET_DYN_BASE (randomize_et_dyn(STACK_TOP / 3 * 2))
2495 ++#define ELF_ET_DYN_BASE (STACK_TOP / 3 * 2)
2496 ++
2497 ++#ifdef CONFIG_PAX_ASLR
2498 ++#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_31BIT) ? 0x10000UL : 0x80000000UL)
2499 ++
2500 ++#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26 )
2501 ++#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26 )
2502 ++#endif
2503 +
2504 + /* This yields a mask that user programs can use to figure out what
2505 + instruction set this CPU supports. */
2506 +@@ -211,7 +217,4 @@ struct linux_binprm;
2507 + #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
2508 + int arch_setup_additional_pages(struct linux_binprm *, int);
2509 +
2510 +-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
2511 +-#define arch_randomize_brk arch_randomize_brk
2512 +-
2513 + #endif
2514 +diff -urNp linux-3.1.1/arch/s390/include/asm/system.h linux-3.1.1/arch/s390/include/asm/system.h
2515 +--- linux-3.1.1/arch/s390/include/asm/system.h 2011-11-11 15:19:27.000000000 -0500
2516 ++++ linux-3.1.1/arch/s390/include/asm/system.h 2011-11-16 18:39:07.000000000 -0500
2517 +@@ -256,7 +256,7 @@ extern void (*_machine_restart)(char *co
2518 + extern void (*_machine_halt)(void);
2519 + extern void (*_machine_power_off)(void);
2520 +
2521 +-extern unsigned long arch_align_stack(unsigned long sp);
2522 ++#define arch_align_stack(x) ((x) & ~0xfUL)
2523 +
2524 + static inline int tprot(unsigned long addr)
2525 + {
2526 +diff -urNp linux-3.1.1/arch/s390/include/asm/uaccess.h linux-3.1.1/arch/s390/include/asm/uaccess.h
2527 +--- linux-3.1.1/arch/s390/include/asm/uaccess.h 2011-11-11 15:19:27.000000000 -0500
2528 ++++ linux-3.1.1/arch/s390/include/asm/uaccess.h 2011-11-16 18:39:07.000000000 -0500
2529 +@@ -235,6 +235,10 @@ static inline unsigned long __must_check
2530 + copy_to_user(void __user *to, const void *from, unsigned long n)
2531 + {
2532 + might_fault();
2533 ++
2534 ++ if ((long)n < 0)
2535 ++ return n;
2536 ++
2537 + if (access_ok(VERIFY_WRITE, to, n))
2538 + n = __copy_to_user(to, from, n);
2539 + return n;
2540 +@@ -260,6 +264,9 @@ copy_to_user(void __user *to, const void
2541 + static inline unsigned long __must_check
2542 + __copy_from_user(void *to, const void __user *from, unsigned long n)
2543 + {
2544 ++ if ((long)n < 0)
2545 ++ return n;
2546 ++
2547 + if (__builtin_constant_p(n) && (n <= 256))
2548 + return uaccess.copy_from_user_small(n, from, to);
2549 + else
2550 +@@ -294,6 +301,10 @@ copy_from_user(void *to, const void __us
2551 + unsigned int sz = __compiletime_object_size(to);
2552 +
2553 + might_fault();
2554 ++
2555 ++ if ((long)n < 0)
2556 ++ return n;
2557 ++
2558 + if (unlikely(sz != -1 && sz < n)) {
2559 + copy_from_user_overflow();
2560 + return n;
2561 +diff -urNp linux-3.1.1/arch/s390/kernel/module.c linux-3.1.1/arch/s390/kernel/module.c
2562 +--- linux-3.1.1/arch/s390/kernel/module.c 2011-11-11 15:19:27.000000000 -0500
2563 ++++ linux-3.1.1/arch/s390/kernel/module.c 2011-11-16 18:39:07.000000000 -0500
2564 +@@ -161,11 +161,11 @@ module_frob_arch_sections(Elf_Ehdr *hdr,
2565 +
2566 + /* Increase core size by size of got & plt and set start
2567 + offsets for got and plt. */
2568 +- me->core_size = ALIGN(me->core_size, 4);
2569 +- me->arch.got_offset = me->core_size;
2570 +- me->core_size += me->arch.got_size;
2571 +- me->arch.plt_offset = me->core_size;
2572 +- me->core_size += me->arch.plt_size;
2573 ++ me->core_size_rw = ALIGN(me->core_size_rw, 4);
2574 ++ me->arch.got_offset = me->core_size_rw;
2575 ++ me->core_size_rw += me->arch.got_size;
2576 ++ me->arch.plt_offset = me->core_size_rx;
2577 ++ me->core_size_rx += me->arch.plt_size;
2578 + return 0;
2579 + }
2580 +
2581 +@@ -242,7 +242,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
2582 + if (info->got_initialized == 0) {
2583 + Elf_Addr *gotent;
2584 +
2585 +- gotent = me->module_core + me->arch.got_offset +
2586 ++ gotent = me->module_core_rw + me->arch.got_offset +
2587 + info->got_offset;
2588 + *gotent = val;
2589 + info->got_initialized = 1;
2590 +@@ -266,7 +266,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
2591 + else if (r_type == R_390_GOTENT ||
2592 + r_type == R_390_GOTPLTENT)
2593 + *(unsigned int *) loc =
2594 +- (val + (Elf_Addr) me->module_core - loc) >> 1;
2595 ++ (val + (Elf_Addr) me->module_core_rw - loc) >> 1;
2596 + else if (r_type == R_390_GOT64 ||
2597 + r_type == R_390_GOTPLT64)
2598 + *(unsigned long *) loc = val;
2599 +@@ -280,7 +280,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
2600 + case R_390_PLTOFF64: /* 16 bit offset from GOT to PLT. */
2601 + if (info->plt_initialized == 0) {
2602 + unsigned int *ip;
2603 +- ip = me->module_core + me->arch.plt_offset +
2604 ++ ip = me->module_core_rx + me->arch.plt_offset +
2605 + info->plt_offset;
2606 + #ifndef CONFIG_64BIT
2607 + ip[0] = 0x0d105810; /* basr 1,0; l 1,6(1); br 1 */
2608 +@@ -305,7 +305,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
2609 + val - loc + 0xffffUL < 0x1ffffeUL) ||
2610 + (r_type == R_390_PLT32DBL &&
2611 + val - loc + 0xffffffffULL < 0x1fffffffeULL)))
2612 +- val = (Elf_Addr) me->module_core +
2613 ++ val = (Elf_Addr) me->module_core_rx +
2614 + me->arch.plt_offset +
2615 + info->plt_offset;
2616 + val += rela->r_addend - loc;
2617 +@@ -327,7 +327,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
2618 + case R_390_GOTOFF32: /* 32 bit offset to GOT. */
2619 + case R_390_GOTOFF64: /* 64 bit offset to GOT. */
2620 + val = val + rela->r_addend -
2621 +- ((Elf_Addr) me->module_core + me->arch.got_offset);
2622 ++ ((Elf_Addr) me->module_core_rw + me->arch.got_offset);
2623 + if (r_type == R_390_GOTOFF16)
2624 + *(unsigned short *) loc = val;
2625 + else if (r_type == R_390_GOTOFF32)
2626 +@@ -337,7 +337,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
2627 + break;
2628 + case R_390_GOTPC: /* 32 bit PC relative offset to GOT. */
2629 + case R_390_GOTPCDBL: /* 32 bit PC rel. off. to GOT shifted by 1. */
2630 +- val = (Elf_Addr) me->module_core + me->arch.got_offset +
2631 ++ val = (Elf_Addr) me->module_core_rw + me->arch.got_offset +
2632 + rela->r_addend - loc;
2633 + if (r_type == R_390_GOTPC)
2634 + *(unsigned int *) loc = val;
2635 +diff -urNp linux-3.1.1/arch/s390/kernel/process.c linux-3.1.1/arch/s390/kernel/process.c
2636 +--- linux-3.1.1/arch/s390/kernel/process.c 2011-11-11 15:19:27.000000000 -0500
2637 ++++ linux-3.1.1/arch/s390/kernel/process.c 2011-11-16 18:39:07.000000000 -0500
2638 +@@ -319,39 +319,3 @@ unsigned long get_wchan(struct task_stru
2639 + }
2640 + return 0;
2641 + }
2642 +-
2643 +-unsigned long arch_align_stack(unsigned long sp)
2644 +-{
2645 +- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
2646 +- sp -= get_random_int() & ~PAGE_MASK;
2647 +- return sp & ~0xf;
2648 +-}
2649 +-
2650 +-static inline unsigned long brk_rnd(void)
2651 +-{
2652 +- /* 8MB for 32bit, 1GB for 64bit */
2653 +- if (is_32bit_task())
2654 +- return (get_random_int() & 0x7ffUL) << PAGE_SHIFT;
2655 +- else
2656 +- return (get_random_int() & 0x3ffffUL) << PAGE_SHIFT;
2657 +-}
2658 +-
2659 +-unsigned long arch_randomize_brk(struct mm_struct *mm)
2660 +-{
2661 +- unsigned long ret = PAGE_ALIGN(mm->brk + brk_rnd());
2662 +-
2663 +- if (ret < mm->brk)
2664 +- return mm->brk;
2665 +- return ret;
2666 +-}
2667 +-
2668 +-unsigned long randomize_et_dyn(unsigned long base)
2669 +-{
2670 +- unsigned long ret = PAGE_ALIGN(base + brk_rnd());
2671 +-
2672 +- if (!(current->flags & PF_RANDOMIZE))
2673 +- return base;
2674 +- if (ret < base)
2675 +- return base;
2676 +- return ret;
2677 +-}
2678 +diff -urNp linux-3.1.1/arch/s390/kernel/setup.c linux-3.1.1/arch/s390/kernel/setup.c
2679 +--- linux-3.1.1/arch/s390/kernel/setup.c 2011-11-11 15:19:27.000000000 -0500
2680 ++++ linux-3.1.1/arch/s390/kernel/setup.c 2011-11-16 18:39:07.000000000 -0500
2681 +@@ -271,7 +271,7 @@ static int __init early_parse_mem(char *
2682 + }
2683 + early_param("mem", early_parse_mem);
2684 +
2685 +-unsigned int user_mode = HOME_SPACE_MODE;
2686 ++unsigned int user_mode = SECONDARY_SPACE_MODE;
2687 + EXPORT_SYMBOL_GPL(user_mode);
2688 +
2689 + static int set_amode_and_uaccess(unsigned long user_amode,
2690 +diff -urNp linux-3.1.1/arch/s390/mm/mmap.c linux-3.1.1/arch/s390/mm/mmap.c
2691 +--- linux-3.1.1/arch/s390/mm/mmap.c 2011-11-11 15:19:27.000000000 -0500
2692 ++++ linux-3.1.1/arch/s390/mm/mmap.c 2011-11-16 18:39:07.000000000 -0500
2693 +@@ -91,10 +91,22 @@ void arch_pick_mmap_layout(struct mm_str
2694 + */
2695 + if (mmap_is_legacy()) {
2696 + mm->mmap_base = TASK_UNMAPPED_BASE;
2697 ++
2698 ++#ifdef CONFIG_PAX_RANDMMAP
2699 ++ if (mm->pax_flags & MF_PAX_RANDMMAP)
2700 ++ mm->mmap_base += mm->delta_mmap;
2701 ++#endif
2702 ++
2703 + mm->get_unmapped_area = arch_get_unmapped_area;
2704 + mm->unmap_area = arch_unmap_area;
2705 + } else {
2706 + mm->mmap_base = mmap_base();
2707 ++
2708 ++#ifdef CONFIG_PAX_RANDMMAP
2709 ++ if (mm->pax_flags & MF_PAX_RANDMMAP)
2710 ++ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
2711 ++#endif
2712 ++
2713 + mm->get_unmapped_area = arch_get_unmapped_area_topdown;
2714 + mm->unmap_area = arch_unmap_area_topdown;
2715 + }
2716 +@@ -166,10 +178,22 @@ void arch_pick_mmap_layout(struct mm_str
2717 + */
2718 + if (mmap_is_legacy()) {
2719 + mm->mmap_base = TASK_UNMAPPED_BASE;
2720 ++
2721 ++#ifdef CONFIG_PAX_RANDMMAP
2722 ++ if (mm->pax_flags & MF_PAX_RANDMMAP)
2723 ++ mm->mmap_base += mm->delta_mmap;
2724 ++#endif
2725 ++
2726 + mm->get_unmapped_area = s390_get_unmapped_area;
2727 + mm->unmap_area = arch_unmap_area;
2728 + } else {
2729 + mm->mmap_base = mmap_base();
2730 ++
2731 ++#ifdef CONFIG_PAX_RANDMMAP
2732 ++ if (mm->pax_flags & MF_PAX_RANDMMAP)
2733 ++ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
2734 ++#endif
2735 ++
2736 + mm->get_unmapped_area = s390_get_unmapped_area_topdown;
2737 + mm->unmap_area = arch_unmap_area_topdown;
2738 + }
2739 +diff -urNp linux-3.1.1/arch/score/include/asm/system.h linux-3.1.1/arch/score/include/asm/system.h
2740 +--- linux-3.1.1/arch/score/include/asm/system.h 2011-11-11 15:19:27.000000000 -0500
2741 ++++ linux-3.1.1/arch/score/include/asm/system.h 2011-11-16 18:39:07.000000000 -0500
2742 +@@ -17,7 +17,7 @@ do { \
2743 + #define finish_arch_switch(prev) do {} while (0)
2744 +
2745 + typedef void (*vi_handler_t)(void);
2746 +-extern unsigned long arch_align_stack(unsigned long sp);
2747 ++#define arch_align_stack(x) (x)
2748 +
2749 + #define mb() barrier()
2750 + #define rmb() barrier()
2751 +diff -urNp linux-3.1.1/arch/score/kernel/process.c linux-3.1.1/arch/score/kernel/process.c
2752 +--- linux-3.1.1/arch/score/kernel/process.c 2011-11-11 15:19:27.000000000 -0500
2753 ++++ linux-3.1.1/arch/score/kernel/process.c 2011-11-16 18:39:07.000000000 -0500
2754 +@@ -161,8 +161,3 @@ unsigned long get_wchan(struct task_stru
2755 +
2756 + return task_pt_regs(task)->cp0_epc;
2757 + }
2758 +-
2759 +-unsigned long arch_align_stack(unsigned long sp)
2760 +-{
2761 +- return sp;
2762 +-}
2763 +diff -urNp linux-3.1.1/arch/sh/mm/mmap.c linux-3.1.1/arch/sh/mm/mmap.c
2764 +--- linux-3.1.1/arch/sh/mm/mmap.c 2011-11-11 15:19:27.000000000 -0500
2765 ++++ linux-3.1.1/arch/sh/mm/mmap.c 2011-11-16 18:39:07.000000000 -0500
2766 +@@ -74,8 +74,7 @@ unsigned long arch_get_unmapped_area(str
2767 + addr = PAGE_ALIGN(addr);
2768 +
2769 + vma = find_vma(mm, addr);
2770 +- if (TASK_SIZE - len >= addr &&
2771 +- (!vma || addr + len <= vma->vm_start))
2772 ++ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
2773 + return addr;
2774 + }
2775 +
2776 +@@ -106,7 +105,7 @@ full_search:
2777 + }
2778 + return -ENOMEM;
2779 + }
2780 +- if (likely(!vma || addr + len <= vma->vm_start)) {
2781 ++ if (likely(check_heap_stack_gap(vma, addr, len))) {
2782 + /*
2783 + * Remember the place where we stopped the search:
2784 + */
2785 +@@ -157,8 +156,7 @@ arch_get_unmapped_area_topdown(struct fi
2786 + addr = PAGE_ALIGN(addr);
2787 +
2788 + vma = find_vma(mm, addr);
2789 +- if (TASK_SIZE - len >= addr &&
2790 +- (!vma || addr + len <= vma->vm_start))
2791 ++ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
2792 + return addr;
2793 + }
2794 +
2795 +@@ -179,7 +177,7 @@ arch_get_unmapped_area_topdown(struct fi
2796 + /* make sure it can fit in the remaining address space */
2797 + if (likely(addr > len)) {
2798 + vma = find_vma(mm, addr-len);
2799 +- if (!vma || addr <= vma->vm_start) {
2800 ++ if (check_heap_stack_gap(vma, addr - len, len)) {
2801 + /* remember the address as a hint for next time */
2802 + return (mm->free_area_cache = addr-len);
2803 + }
2804 +@@ -188,18 +186,18 @@ arch_get_unmapped_area_topdown(struct fi
2805 + if (unlikely(mm->mmap_base < len))
2806 + goto bottomup;
2807 +
2808 +- addr = mm->mmap_base-len;
2809 +- if (do_colour_align)
2810 +- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
2811 ++ addr = mm->mmap_base - len;
2812 +
2813 + do {
2814 ++ if (do_colour_align)
2815 ++ addr = COLOUR_ALIGN_DOWN(addr, pgoff);
2816 + /*
2817 + * Lookup failure means no vma is above this address,
2818 + * else if new region fits below vma->vm_start,
2819 + * return with success:
2820 + */
2821 + vma = find_vma(mm, addr);
2822 +- if (likely(!vma || addr+len <= vma->vm_start)) {
2823 ++ if (likely(check_heap_stack_gap(vma, addr, len))) {
2824 + /* remember the address as a hint for next time */
2825 + return (mm->free_area_cache = addr);
2826 + }
2827 +@@ -209,10 +207,8 @@ arch_get_unmapped_area_topdown(struct fi
2828 + mm->cached_hole_size = vma->vm_start - addr;
2829 +
2830 + /* try just below the current vma->vm_start */
2831 +- addr = vma->vm_start-len;
2832 +- if (do_colour_align)
2833 +- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
2834 +- } while (likely(len < vma->vm_start));
2835 ++ addr = skip_heap_stack_gap(vma, len);
2836 ++ } while (!IS_ERR_VALUE(addr));
2837 +
2838 + bottomup:
2839 + /*
2840 +diff -urNp linux-3.1.1/arch/sparc/include/asm/atomic_64.h linux-3.1.1/arch/sparc/include/asm/atomic_64.h
2841 +--- linux-3.1.1/arch/sparc/include/asm/atomic_64.h 2011-11-11 15:19:27.000000000 -0500
2842 ++++ linux-3.1.1/arch/sparc/include/asm/atomic_64.h 2011-11-16 18:39:07.000000000 -0500
2843 +@@ -14,18 +14,40 @@
2844 + #define ATOMIC64_INIT(i) { (i) }
2845 +
2846 + #define atomic_read(v) (*(volatile int *)&(v)->counter)
2847 ++static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
2848 ++{
2849 ++ return v->counter;
2850 ++}
2851 + #define atomic64_read(v) (*(volatile long *)&(v)->counter)
2852 ++static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
2853 ++{
2854 ++ return v->counter;
2855 ++}
2856 +
2857 + #define atomic_set(v, i) (((v)->counter) = i)
2858 ++static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
2859 ++{
2860 ++ v->counter = i;
2861 ++}
2862 + #define atomic64_set(v, i) (((v)->counter) = i)
2863 ++static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
2864 ++{
2865 ++ v->counter = i;
2866 ++}
2867 +
2868 + extern void atomic_add(int, atomic_t *);
2869 ++extern void atomic_add_unchecked(int, atomic_unchecked_t *);
2870 + extern void atomic64_add(long, atomic64_t *);
2871 ++extern void atomic64_add_unchecked(long, atomic64_unchecked_t *);
2872 + extern void atomic_sub(int, atomic_t *);
2873 ++extern void atomic_sub_unchecked(int, atomic_unchecked_t *);
2874 + extern void atomic64_sub(long, atomic64_t *);
2875 ++extern void atomic64_sub_unchecked(long, atomic64_unchecked_t *);
2876 +
2877 + extern int atomic_add_ret(int, atomic_t *);
2878 ++extern int atomic_add_ret_unchecked(int, atomic_unchecked_t *);
2879 + extern long atomic64_add_ret(long, atomic64_t *);
2880 ++extern long atomic64_add_ret_unchecked(long, atomic64_unchecked_t *);
2881 + extern int atomic_sub_ret(int, atomic_t *);
2882 + extern long atomic64_sub_ret(long, atomic64_t *);
2883 +
2884 +@@ -33,13 +55,29 @@ extern long atomic64_sub_ret(long, atomi
2885 + #define atomic64_dec_return(v) atomic64_sub_ret(1, v)
2886 +
2887 + #define atomic_inc_return(v) atomic_add_ret(1, v)
2888 ++static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
2889 ++{
2890 ++ return atomic_add_ret_unchecked(1, v);
2891 ++}
2892 + #define atomic64_inc_return(v) atomic64_add_ret(1, v)
2893 ++static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
2894 ++{
2895 ++ return atomic64_add_ret_unchecked(1, v);
2896 ++}
2897 +
2898 + #define atomic_sub_return(i, v) atomic_sub_ret(i, v)
2899 + #define atomic64_sub_return(i, v) atomic64_sub_ret(i, v)
2900 +
2901 + #define atomic_add_return(i, v) atomic_add_ret(i, v)
2902 ++static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
2903 ++{
2904 ++ return atomic_add_ret_unchecked(i, v);
2905 ++}
2906 + #define atomic64_add_return(i, v) atomic64_add_ret(i, v)
2907 ++static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
2908 ++{
2909 ++ return atomic64_add_ret_unchecked(i, v);
2910 ++}
2911 +
2912 + /*
2913 + * atomic_inc_and_test - increment and test
2914 +@@ -50,6 +88,10 @@ extern long atomic64_sub_ret(long, atomi
2915 + * other cases.
2916 + */
2917 + #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
2918 ++static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
2919 ++{
2920 ++ return atomic_inc_return_unchecked(v) == 0;
2921 ++}
2922 + #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
2923 +
2924 + #define atomic_sub_and_test(i, v) (atomic_sub_ret(i, v) == 0)
2925 +@@ -59,25 +101,60 @@ extern long atomic64_sub_ret(long, atomi
2926 + #define atomic64_dec_and_test(v) (atomic64_sub_ret(1, v) == 0)
2927 +
2928 + #define atomic_inc(v) atomic_add(1, v)
2929 ++static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
2930 ++{
2931 ++ atomic_add_unchecked(1, v);
2932 ++}
2933 + #define atomic64_inc(v) atomic64_add(1, v)
2934 ++static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
2935 ++{
2936 ++ atomic64_add_unchecked(1, v);
2937 ++}
2938 +
2939 + #define atomic_dec(v) atomic_sub(1, v)
2940 ++static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
2941 ++{
2942 ++ atomic_sub_unchecked(1, v);
2943 ++}
2944 + #define atomic64_dec(v) atomic64_sub(1, v)
2945 ++static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
2946 ++{
2947 ++ atomic64_sub_unchecked(1, v);
2948 ++}
2949 +
2950 + #define atomic_add_negative(i, v) (atomic_add_ret(i, v) < 0)
2951 + #define atomic64_add_negative(i, v) (atomic64_add_ret(i, v) < 0)
2952 +
2953 + #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
2954 ++static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
2955 ++{
2956 ++ return cmpxchg(&v->counter, old, new);
2957 ++}
2958 + #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
2959 ++static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
2960 ++{
2961 ++ return xchg(&v->counter, new);
2962 ++}
2963 +
2964 + static inline int __atomic_add_unless(atomic_t *v, int a, int u)
2965 + {
2966 +- int c, old;
2967 ++ int c, old, new;
2968 + c = atomic_read(v);
2969 + for (;;) {
2970 +- if (unlikely(c == (u)))
2971 ++ if (unlikely(c == u))
2972 + break;
2973 +- old = atomic_cmpxchg((v), c, c + (a));
2974 ++
2975 ++ asm volatile("addcc %2, %0, %0\n"
2976 ++
2977 ++#ifdef CONFIG_PAX_REFCOUNT
2978 ++ "tvs %%icc, 6\n"
2979 ++#endif
2980 ++
2981 ++ : "=r" (new)
2982 ++ : "0" (c), "ir" (a)
2983 ++ : "cc");
2984 ++
2985 ++ old = atomic_cmpxchg(v, c, new);
2986 + if (likely(old == c))
2987 + break;
2988 + c = old;
2989 +@@ -89,20 +166,35 @@ static inline int __atomic_add_unless(at
2990 + #define atomic64_cmpxchg(v, o, n) \
2991 + ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
2992 + #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
2993 ++static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
2994 ++{
2995 ++ return xchg(&v->counter, new);
2996 ++}
2997 +
2998 + static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
2999 + {
3000 +- long c, old;
3001 ++ long c, old, new;
3002 + c = atomic64_read(v);
3003 + for (;;) {
3004 +- if (unlikely(c == (u)))
3005 ++ if (unlikely(c == u))
3006 + break;
3007 +- old = atomic64_cmpxchg((v), c, c + (a));
3008 ++
3009 ++ asm volatile("addcc %2, %0, %0\n"
3010 ++
3011 ++#ifdef CONFIG_PAX_REFCOUNT
3012 ++ "tvs %%xcc, 6\n"
3013 ++#endif
3014 ++
3015 ++ : "=r" (new)
3016 ++ : "0" (c), "ir" (a)
3017 ++ : "cc");
3018 ++
3019 ++ old = atomic64_cmpxchg(v, c, new);
3020 + if (likely(old == c))
3021 + break;
3022 + c = old;
3023 + }
3024 +- return c != (u);
3025 ++ return c != u;
3026 + }
3027 +
3028 + #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
3029 +diff -urNp linux-3.1.1/arch/sparc/include/asm/cache.h linux-3.1.1/arch/sparc/include/asm/cache.h
3030 +--- linux-3.1.1/arch/sparc/include/asm/cache.h 2011-11-11 15:19:27.000000000 -0500
3031 ++++ linux-3.1.1/arch/sparc/include/asm/cache.h 2011-11-16 18:39:07.000000000 -0500
3032 +@@ -10,7 +10,7 @@
3033 + #define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
3034 +
3035 + #define L1_CACHE_SHIFT 5
3036 +-#define L1_CACHE_BYTES 32
3037 ++#define L1_CACHE_BYTES 32UL
3038 +
3039 + #ifdef CONFIG_SPARC32
3040 + #define SMP_CACHE_BYTES_SHIFT 5
3041 +diff -urNp linux-3.1.1/arch/sparc/include/asm/elf_32.h linux-3.1.1/arch/sparc/include/asm/elf_32.h
3042 +--- linux-3.1.1/arch/sparc/include/asm/elf_32.h 2011-11-11 15:19:27.000000000 -0500
3043 ++++ linux-3.1.1/arch/sparc/include/asm/elf_32.h 2011-11-16 18:39:07.000000000 -0500
3044 +@@ -114,6 +114,13 @@ typedef struct {
3045 +
3046 + #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE)
3047 +
3048 ++#ifdef CONFIG_PAX_ASLR
3049 ++#define PAX_ELF_ET_DYN_BASE 0x10000UL
3050 ++
3051 ++#define PAX_DELTA_MMAP_LEN 16
3052 ++#define PAX_DELTA_STACK_LEN 16
3053 ++#endif
3054 ++
3055 + /* This yields a mask that user programs can use to figure out what
3056 + instruction set this cpu supports. This can NOT be done in userspace
3057 + on Sparc. */
3058 +diff -urNp linux-3.1.1/arch/sparc/include/asm/elf_64.h linux-3.1.1/arch/sparc/include/asm/elf_64.h
3059 +--- linux-3.1.1/arch/sparc/include/asm/elf_64.h 2011-11-11 15:19:27.000000000 -0500
3060 ++++ linux-3.1.1/arch/sparc/include/asm/elf_64.h 2011-11-16 18:39:07.000000000 -0500
3061 +@@ -180,6 +180,13 @@ typedef struct {
3062 + #define ELF_ET_DYN_BASE 0x0000010000000000UL
3063 + #define COMPAT_ELF_ET_DYN_BASE 0x0000000070000000UL
3064 +
3065 ++#ifdef CONFIG_PAX_ASLR
3066 ++#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT) ? 0x10000UL : 0x100000UL)
3067 ++
3068 ++#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT) ? 14 : 28)
3069 ++#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT) ? 15 : 29)
3070 ++#endif
3071 ++
3072 + extern unsigned long sparc64_elf_hwcap;
3073 + #define ELF_HWCAP sparc64_elf_hwcap
3074 +
3075 +diff -urNp linux-3.1.1/arch/sparc/include/asm/pgtable_32.h linux-3.1.1/arch/sparc/include/asm/pgtable_32.h
3076 +--- linux-3.1.1/arch/sparc/include/asm/pgtable_32.h 2011-11-11 15:19:27.000000000 -0500
3077 ++++ linux-3.1.1/arch/sparc/include/asm/pgtable_32.h 2011-11-16 18:39:07.000000000 -0500
3078 +@@ -45,6 +45,13 @@ BTFIXUPDEF_SIMM13(user_ptrs_per_pgd)
3079 + BTFIXUPDEF_INT(page_none)
3080 + BTFIXUPDEF_INT(page_copy)
3081 + BTFIXUPDEF_INT(page_readonly)
3082 ++
3083 ++#ifdef CONFIG_PAX_PAGEEXEC
3084 ++BTFIXUPDEF_INT(page_shared_noexec)
3085 ++BTFIXUPDEF_INT(page_copy_noexec)
3086 ++BTFIXUPDEF_INT(page_readonly_noexec)
3087 ++#endif
3088 ++
3089 + BTFIXUPDEF_INT(page_kernel)
3090 +
3091 + #define PMD_SHIFT SUN4C_PMD_SHIFT
3092 +@@ -66,6 +73,16 @@ extern pgprot_t PAGE_SHARED;
3093 + #define PAGE_COPY __pgprot(BTFIXUP_INT(page_copy))
3094 + #define PAGE_READONLY __pgprot(BTFIXUP_INT(page_readonly))
3095 +
3096 ++#ifdef CONFIG_PAX_PAGEEXEC
3097 ++extern pgprot_t PAGE_SHARED_NOEXEC;
3098 ++# define PAGE_COPY_NOEXEC __pgprot(BTFIXUP_INT(page_copy_noexec))
3099 ++# define PAGE_READONLY_NOEXEC __pgprot(BTFIXUP_INT(page_readonly_noexec))
3100 ++#else
3101 ++# define PAGE_SHARED_NOEXEC PAGE_SHARED
3102 ++# define PAGE_COPY_NOEXEC PAGE_COPY
3103 ++# define PAGE_READONLY_NOEXEC PAGE_READONLY
3104 ++#endif
3105 ++
3106 + extern unsigned long page_kernel;
3107 +
3108 + #ifdef MODULE
3109 +diff -urNp linux-3.1.1/arch/sparc/include/asm/pgtsrmmu.h linux-3.1.1/arch/sparc/include/asm/pgtsrmmu.h
3110 +--- linux-3.1.1/arch/sparc/include/asm/pgtsrmmu.h 2011-11-11 15:19:27.000000000 -0500
3111 ++++ linux-3.1.1/arch/sparc/include/asm/pgtsrmmu.h 2011-11-16 18:39:07.000000000 -0500
3112 +@@ -115,6 +115,13 @@
3113 + SRMMU_EXEC | SRMMU_REF)
3114 + #define SRMMU_PAGE_RDONLY __pgprot(SRMMU_VALID | SRMMU_CACHE | \
3115 + SRMMU_EXEC | SRMMU_REF)
3116 ++
3117 ++#ifdef CONFIG_PAX_PAGEEXEC
3118 ++#define SRMMU_PAGE_SHARED_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_WRITE | SRMMU_REF)
3119 ++#define SRMMU_PAGE_COPY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
3120 ++#define SRMMU_PAGE_RDONLY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
3121 ++#endif
3122 ++
3123 + #define SRMMU_PAGE_KERNEL __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_PRIV | \
3124 + SRMMU_DIRTY | SRMMU_REF)
3125 +
3126 +diff -urNp linux-3.1.1/arch/sparc/include/asm/spinlock_64.h linux-3.1.1/arch/sparc/include/asm/spinlock_64.h
3127 +--- linux-3.1.1/arch/sparc/include/asm/spinlock_64.h 2011-11-11 15:19:27.000000000 -0500
3128 ++++ linux-3.1.1/arch/sparc/include/asm/spinlock_64.h 2011-11-16 18:39:07.000000000 -0500
3129 +@@ -92,14 +92,19 @@ static inline void arch_spin_lock_flags(
3130 +
3131 + /* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */
3132 +
3133 +-static void inline arch_read_lock(arch_rwlock_t *lock)
3134 ++static inline void arch_read_lock(arch_rwlock_t *lock)
3135 + {
3136 + unsigned long tmp1, tmp2;
3137 +
3138 + __asm__ __volatile__ (
3139 + "1: ldsw [%2], %0\n"
3140 + " brlz,pn %0, 2f\n"
3141 +-"4: add %0, 1, %1\n"
3142 ++"4: addcc %0, 1, %1\n"
3143 ++
3144 ++#ifdef CONFIG_PAX_REFCOUNT
3145 ++" tvs %%icc, 6\n"
3146 ++#endif
3147 ++
3148 + " cas [%2], %0, %1\n"
3149 + " cmp %0, %1\n"
3150 + " bne,pn %%icc, 1b\n"
3151 +@@ -112,10 +117,10 @@ static void inline arch_read_lock(arch_r
3152 + " .previous"
3153 + : "=&r" (tmp1), "=&r" (tmp2)
3154 + : "r" (lock)
3155 +- : "memory");
3156 ++ : "memory", "cc");
3157 + }
3158 +
3159 +-static int inline arch_read_trylock(arch_rwlock_t *lock)
3160 ++static inline int arch_read_trylock(arch_rwlock_t *lock)
3161 + {
3162 + int tmp1, tmp2;
3163 +
3164 +@@ -123,7 +128,12 @@ static int inline arch_read_trylock(arch
3165 + "1: ldsw [%2], %0\n"
3166 + " brlz,a,pn %0, 2f\n"
3167 + " mov 0, %0\n"
3168 +-" add %0, 1, %1\n"
3169 ++" addcc %0, 1, %1\n"
3170 ++
3171 ++#ifdef CONFIG_PAX_REFCOUNT
3172 ++" tvs %%icc, 6\n"
3173 ++#endif
3174 ++
3175 + " cas [%2], %0, %1\n"
3176 + " cmp %0, %1\n"
3177 + " bne,pn %%icc, 1b\n"
3178 +@@ -136,13 +146,18 @@ static int inline arch_read_trylock(arch
3179 + return tmp1;
3180 + }
3181 +
3182 +-static void inline arch_read_unlock(arch_rwlock_t *lock)
3183 ++static inline void arch_read_unlock(arch_rwlock_t *lock)
3184 + {
3185 + unsigned long tmp1, tmp2;
3186 +
3187 + __asm__ __volatile__(
3188 + "1: lduw [%2], %0\n"
3189 +-" sub %0, 1, %1\n"
3190 ++" subcc %0, 1, %1\n"
3191 ++
3192 ++#ifdef CONFIG_PAX_REFCOUNT
3193 ++" tvs %%icc, 6\n"
3194 ++#endif
3195 ++
3196 + " cas [%2], %0, %1\n"
3197 + " cmp %0, %1\n"
3198 + " bne,pn %%xcc, 1b\n"
3199 +@@ -152,7 +167,7 @@ static void inline arch_read_unlock(arch
3200 + : "memory");
3201 + }
3202 +
3203 +-static void inline arch_write_lock(arch_rwlock_t *lock)
3204 ++static inline void arch_write_lock(arch_rwlock_t *lock)
3205 + {
3206 + unsigned long mask, tmp1, tmp2;
3207 +
3208 +@@ -177,7 +192,7 @@ static void inline arch_write_lock(arch_
3209 + : "memory");
3210 + }
3211 +
3212 +-static void inline arch_write_unlock(arch_rwlock_t *lock)
3213 ++static inline void arch_write_unlock(arch_rwlock_t *lock)
3214 + {
3215 + __asm__ __volatile__(
3216 + " stw %%g0, [%0]"
3217 +@@ -186,7 +201,7 @@ static void inline arch_write_unlock(arc
3218 + : "memory");
3219 + }
3220 +
3221 +-static int inline arch_write_trylock(arch_rwlock_t *lock)
3222 ++static inline int arch_write_trylock(arch_rwlock_t *lock)
3223 + {
3224 + unsigned long mask, tmp1, tmp2, result;
3225 +
3226 +diff -urNp linux-3.1.1/arch/sparc/include/asm/thread_info_32.h linux-3.1.1/arch/sparc/include/asm/thread_info_32.h
3227 +--- linux-3.1.1/arch/sparc/include/asm/thread_info_32.h 2011-11-11 15:19:27.000000000 -0500
3228 ++++ linux-3.1.1/arch/sparc/include/asm/thread_info_32.h 2011-11-16 18:39:07.000000000 -0500
3229 +@@ -50,6 +50,8 @@ struct thread_info {
3230 + unsigned long w_saved;
3231 +
3232 + struct restart_block restart_block;
3233 ++
3234 ++ unsigned long lowest_stack;
3235 + };
3236 +
3237 + /*
3238 +diff -urNp linux-3.1.1/arch/sparc/include/asm/thread_info_64.h linux-3.1.1/arch/sparc/include/asm/thread_info_64.h
3239 +--- linux-3.1.1/arch/sparc/include/asm/thread_info_64.h 2011-11-11 15:19:27.000000000 -0500
3240 ++++ linux-3.1.1/arch/sparc/include/asm/thread_info_64.h 2011-11-16 18:39:07.000000000 -0500
3241 +@@ -63,6 +63,8 @@ struct thread_info {
3242 + struct pt_regs *kern_una_regs;
3243 + unsigned int kern_una_insn;
3244 +
3245 ++ unsigned long lowest_stack;
3246 ++
3247 + unsigned long fpregs[0] __attribute__ ((aligned(64)));
3248 + };
3249 +
3250 +diff -urNp linux-3.1.1/arch/sparc/include/asm/uaccess_32.h linux-3.1.1/arch/sparc/include/asm/uaccess_32.h
3251 +--- linux-3.1.1/arch/sparc/include/asm/uaccess_32.h 2011-11-11 15:19:27.000000000 -0500
3252 ++++ linux-3.1.1/arch/sparc/include/asm/uaccess_32.h 2011-11-16 18:39:07.000000000 -0500
3253 +@@ -249,27 +249,46 @@ extern unsigned long __copy_user(void __
3254 +
3255 + static inline unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
3256 + {
3257 +- if (n && __access_ok((unsigned long) to, n))
3258 ++ if ((long)n < 0)
3259 ++ return n;
3260 ++
3261 ++ if (n && __access_ok((unsigned long) to, n)) {
3262 ++ if (!__builtin_constant_p(n))
3263 ++ check_object_size(from, n, true);
3264 + return __copy_user(to, (__force void __user *) from, n);
3265 +- else
3266 ++ } else
3267 + return n;
3268 + }
3269 +
3270 + static inline unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n)
3271 + {
3272 ++ if ((long)n < 0)
3273 ++ return n;
3274 ++
3275 ++ if (!__builtin_constant_p(n))
3276 ++ check_object_size(from, n, true);
3277 ++
3278 + return __copy_user(to, (__force void __user *) from, n);
3279 + }
3280 +
3281 + static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
3282 + {
3283 +- if (n && __access_ok((unsigned long) from, n))
3284 ++ if ((long)n < 0)
3285 ++ return n;
3286 ++
3287 ++ if (n && __access_ok((unsigned long) from, n)) {
3288 ++ if (!__builtin_constant_p(n))
3289 ++ check_object_size(to, n, false);
3290 + return __copy_user((__force void __user *) to, from, n);
3291 +- else
3292 ++ } else
3293 + return n;
3294 + }
3295 +
3296 + static inline unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n)
3297 + {
3298 ++ if ((long)n < 0)
3299 ++ return n;
3300 ++
3301 + return __copy_user((__force void __user *) to, from, n);
3302 + }
3303 +
3304 +diff -urNp linux-3.1.1/arch/sparc/include/asm/uaccess_64.h linux-3.1.1/arch/sparc/include/asm/uaccess_64.h
3305 +--- linux-3.1.1/arch/sparc/include/asm/uaccess_64.h 2011-11-11 15:19:27.000000000 -0500
3306 ++++ linux-3.1.1/arch/sparc/include/asm/uaccess_64.h 2011-11-16 18:39:07.000000000 -0500
3307 +@@ -10,6 +10,7 @@
3308 + #include <linux/compiler.h>
3309 + #include <linux/string.h>
3310 + #include <linux/thread_info.h>
3311 ++#include <linux/kernel.h>
3312 + #include <asm/asi.h>
3313 + #include <asm/system.h>
3314 + #include <asm/spitfire.h>
3315 +@@ -213,8 +214,15 @@ extern unsigned long copy_from_user_fixu
3316 + static inline unsigned long __must_check
3317 + copy_from_user(void *to, const void __user *from, unsigned long size)
3318 + {
3319 +- unsigned long ret = ___copy_from_user(to, from, size);
3320 ++ unsigned long ret;
3321 +
3322 ++ if ((long)size < 0 || size > INT_MAX)
3323 ++ return size;
3324 ++
3325 ++ if (!__builtin_constant_p(size))
3326 ++ check_object_size(to, size, false);
3327 ++
3328 ++ ret = ___copy_from_user(to, from, size);
3329 + if (unlikely(ret))
3330 + ret = copy_from_user_fixup(to, from, size);
3331 +
3332 +@@ -230,8 +238,15 @@ extern unsigned long copy_to_user_fixup(
3333 + static inline unsigned long __must_check
3334 + copy_to_user(void __user *to, const void *from, unsigned long size)
3335 + {
3336 +- unsigned long ret = ___copy_to_user(to, from, size);
3337 ++ unsigned long ret;
3338 ++
3339 ++ if ((long)size < 0 || size > INT_MAX)
3340 ++ return size;
3341 ++
3342 ++ if (!__builtin_constant_p(size))
3343 ++ check_object_size(from, size, true);
3344 +
3345 ++ ret = ___copy_to_user(to, from, size);
3346 + if (unlikely(ret))
3347 + ret = copy_to_user_fixup(to, from, size);
3348 + return ret;
3349 +diff -urNp linux-3.1.1/arch/sparc/include/asm/uaccess.h linux-3.1.1/arch/sparc/include/asm/uaccess.h
3350 +--- linux-3.1.1/arch/sparc/include/asm/uaccess.h 2011-11-11 15:19:27.000000000 -0500
3351 ++++ linux-3.1.1/arch/sparc/include/asm/uaccess.h 2011-11-16 18:39:07.000000000 -0500
3352 +@@ -1,5 +1,13 @@
3353 + #ifndef ___ASM_SPARC_UACCESS_H
3354 + #define ___ASM_SPARC_UACCESS_H
3355 ++
3356 ++#ifdef __KERNEL__
3357 ++#ifndef __ASSEMBLY__
3358 ++#include <linux/types.h>
3359 ++extern void check_object_size(const void *ptr, unsigned long n, bool to);
3360 ++#endif
3361 ++#endif
3362 ++
3363 + #if defined(__sparc__) && defined(__arch64__)
3364 + #include <asm/uaccess_64.h>
3365 + #else
3366 +diff -urNp linux-3.1.1/arch/sparc/kernel/Makefile linux-3.1.1/arch/sparc/kernel/Makefile
3367 +--- linux-3.1.1/arch/sparc/kernel/Makefile 2011-11-11 15:19:27.000000000 -0500
3368 ++++ linux-3.1.1/arch/sparc/kernel/Makefile 2011-11-16 18:39:07.000000000 -0500
3369 +@@ -3,7 +3,7 @@
3370 + #
3371 +
3372 + asflags-y := -ansi
3373 +-ccflags-y := -Werror
3374 ++#ccflags-y := -Werror
3375 +
3376 + extra-y := head_$(BITS).o
3377 + extra-y += init_task.o
3378 +diff -urNp linux-3.1.1/arch/sparc/kernel/process_32.c linux-3.1.1/arch/sparc/kernel/process_32.c
3379 +--- linux-3.1.1/arch/sparc/kernel/process_32.c 2011-11-11 15:19:27.000000000 -0500
3380 ++++ linux-3.1.1/arch/sparc/kernel/process_32.c 2011-11-16 18:40:08.000000000 -0500
3381 +@@ -204,7 +204,7 @@ void __show_backtrace(unsigned long fp)
3382 + rw->ins[4], rw->ins[5],
3383 + rw->ins[6],
3384 + rw->ins[7]);
3385 +- printk("%pS\n", (void *) rw->ins[7]);
3386 ++ printk("%pA\n", (void *) rw->ins[7]);
3387 + rw = (struct reg_window32 *) rw->ins[6];
3388 + }
3389 + spin_unlock_irqrestore(&sparc_backtrace_lock, flags);
3390 +@@ -271,14 +271,14 @@ void show_regs(struct pt_regs *r)
3391 +
3392 + printk("PSR: %08lx PC: %08lx NPC: %08lx Y: %08lx %s\n",
3393 + r->psr, r->pc, r->npc, r->y, print_tainted());
3394 +- printk("PC: <%pS>\n", (void *) r->pc);
3395 ++ printk("PC: <%pA>\n", (void *) r->pc);
3396 + printk("%%G: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
3397 + r->u_regs[0], r->u_regs[1], r->u_regs[2], r->u_regs[3],
3398 + r->u_regs[4], r->u_regs[5], r->u_regs[6], r->u_regs[7]);
3399 + printk("%%O: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
3400 + r->u_regs[8], r->u_regs[9], r->u_regs[10], r->u_regs[11],
3401 + r->u_regs[12], r->u_regs[13], r->u_regs[14], r->u_regs[15]);
3402 +- printk("RPC: <%pS>\n", (void *) r->u_regs[15]);
3403 ++ printk("RPC: <%pA>\n", (void *) r->u_regs[15]);
3404 +
3405 + printk("%%L: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
3406 + rw->locals[0], rw->locals[1], rw->locals[2], rw->locals[3],
3407 +@@ -313,7 +313,7 @@ void show_stack(struct task_struct *tsk,
3408 + rw = (struct reg_window32 *) fp;
3409 + pc = rw->ins[7];
3410 + printk("[%08lx : ", pc);
3411 +- printk("%pS ] ", (void *) pc);
3412 ++ printk("%pA ] ", (void *) pc);
3413 + fp = rw->ins[6];
3414 + } while (++count < 16);
3415 + printk("\n");
3416 +diff -urNp linux-3.1.1/arch/sparc/kernel/process_64.c linux-3.1.1/arch/sparc/kernel/process_64.c
3417 +--- linux-3.1.1/arch/sparc/kernel/process_64.c 2011-11-11 15:19:27.000000000 -0500
3418 ++++ linux-3.1.1/arch/sparc/kernel/process_64.c 2011-11-16 18:40:08.000000000 -0500
3419 +@@ -180,14 +180,14 @@ static void show_regwindow(struct pt_reg
3420 + printk("i4: %016lx i5: %016lx i6: %016lx i7: %016lx\n",
3421 + rwk->ins[4], rwk->ins[5], rwk->ins[6], rwk->ins[7]);
3422 + if (regs->tstate & TSTATE_PRIV)
3423 +- printk("I7: <%pS>\n", (void *) rwk->ins[7]);
3424 ++ printk("I7: <%pA>\n", (void *) rwk->ins[7]);
3425 + }
3426 +
3427 + void show_regs(struct pt_regs *regs)
3428 + {
3429 + printk("TSTATE: %016lx TPC: %016lx TNPC: %016lx Y: %08x %s\n", regs->tstate,
3430 + regs->tpc, regs->tnpc, regs->y, print_tainted());
3431 +- printk("TPC: <%pS>\n", (void *) regs->tpc);
3432 ++ printk("TPC: <%pA>\n", (void *) regs->tpc);
3433 + printk("g0: %016lx g1: %016lx g2: %016lx g3: %016lx\n",
3434 + regs->u_regs[0], regs->u_regs[1], regs->u_regs[2],
3435 + regs->u_regs[3]);
3436 +@@ -200,7 +200,7 @@ void show_regs(struct pt_regs *regs)
3437 + printk("o4: %016lx o5: %016lx sp: %016lx ret_pc: %016lx\n",
3438 + regs->u_regs[12], regs->u_regs[13], regs->u_regs[14],
3439 + regs->u_regs[15]);
3440 +- printk("RPC: <%pS>\n", (void *) regs->u_regs[15]);
3441 ++ printk("RPC: <%pA>\n", (void *) regs->u_regs[15]);
3442 + show_regwindow(regs);
3443 + show_stack(current, (unsigned long *) regs->u_regs[UREG_FP]);
3444 + }
3445 +@@ -285,7 +285,7 @@ void arch_trigger_all_cpu_backtrace(void
3446 + ((tp && tp->task) ? tp->task->pid : -1));
3447 +
3448 + if (gp->tstate & TSTATE_PRIV) {
3449 +- printk(" TPC[%pS] O7[%pS] I7[%pS] RPC[%pS]\n",
3450 ++ printk(" TPC[%pA] O7[%pA] I7[%pA] RPC[%pA]\n",
3451 + (void *) gp->tpc,
3452 + (void *) gp->o7,
3453 + (void *) gp->i7,
3454 +diff -urNp linux-3.1.1/arch/sparc/kernel/sys_sparc_32.c linux-3.1.1/arch/sparc/kernel/sys_sparc_32.c
3455 +--- linux-3.1.1/arch/sparc/kernel/sys_sparc_32.c 2011-11-11 15:19:27.000000000 -0500
3456 ++++ linux-3.1.1/arch/sparc/kernel/sys_sparc_32.c 2011-11-16 18:39:07.000000000 -0500
3457 +@@ -56,7 +56,7 @@ unsigned long arch_get_unmapped_area(str
3458 + if (ARCH_SUN4C && len > 0x20000000)
3459 + return -ENOMEM;
3460 + if (!addr)
3461 +- addr = TASK_UNMAPPED_BASE;
3462 ++ addr = current->mm->mmap_base;
3463 +
3464 + if (flags & MAP_SHARED)
3465 + addr = COLOUR_ALIGN(addr);
3466 +@@ -71,7 +71,7 @@ unsigned long arch_get_unmapped_area(str
3467 + }
3468 + if (TASK_SIZE - PAGE_SIZE - len < addr)
3469 + return -ENOMEM;
3470 +- if (!vmm || addr + len <= vmm->vm_start)
3471 ++ if (check_heap_stack_gap(vmm, addr, len))
3472 + return addr;
3473 + addr = vmm->vm_end;
3474 + if (flags & MAP_SHARED)
3475 +diff -urNp linux-3.1.1/arch/sparc/kernel/sys_sparc_64.c linux-3.1.1/arch/sparc/kernel/sys_sparc_64.c
3476 +--- linux-3.1.1/arch/sparc/kernel/sys_sparc_64.c 2011-11-11 15:19:27.000000000 -0500
3477 ++++ linux-3.1.1/arch/sparc/kernel/sys_sparc_64.c 2011-11-16 18:39:07.000000000 -0500
3478 +@@ -124,7 +124,7 @@ unsigned long arch_get_unmapped_area(str
3479 + /* We do not accept a shared mapping if it would violate
3480 + * cache aliasing constraints.
3481 + */
3482 +- if ((flags & MAP_SHARED) &&
3483 ++ if ((filp || (flags & MAP_SHARED)) &&
3484 + ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
3485 + return -EINVAL;
3486 + return addr;
3487 +@@ -139,6 +139,10 @@ unsigned long arch_get_unmapped_area(str
3488 + if (filp || (flags & MAP_SHARED))
3489 + do_color_align = 1;
3490 +
3491 ++#ifdef CONFIG_PAX_RANDMMAP
3492 ++ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
3493 ++#endif
3494 ++
3495 + if (addr) {
3496 + if (do_color_align)
3497 + addr = COLOUR_ALIGN(addr, pgoff);
3498 +@@ -146,15 +150,14 @@ unsigned long arch_get_unmapped_area(str
3499 + addr = PAGE_ALIGN(addr);
3500 +
3501 + vma = find_vma(mm, addr);
3502 +- if (task_size - len >= addr &&
3503 +- (!vma || addr + len <= vma->vm_start))
3504 ++ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
3505 + return addr;
3506 + }
3507 +
3508 + if (len > mm->cached_hole_size) {
3509 +- start_addr = addr = mm->free_area_cache;
3510 ++ start_addr = addr = mm->free_area_cache;
3511 + } else {
3512 +- start_addr = addr = TASK_UNMAPPED_BASE;
3513 ++ start_addr = addr = mm->mmap_base;
3514 + mm->cached_hole_size = 0;
3515 + }
3516 +
3517 +@@ -174,14 +177,14 @@ full_search:
3518 + vma = find_vma(mm, VA_EXCLUDE_END);
3519 + }
3520 + if (unlikely(task_size < addr)) {
3521 +- if (start_addr != TASK_UNMAPPED_BASE) {
3522 +- start_addr = addr = TASK_UNMAPPED_BASE;
3523 ++ if (start_addr != mm->mmap_base) {
3524 ++ start_addr = addr = mm->mmap_base;
3525 + mm->cached_hole_size = 0;
3526 + goto full_search;
3527 + }
3528 + return -ENOMEM;
3529 + }
3530 +- if (likely(!vma || addr + len <= vma->vm_start)) {
3531 ++ if (likely(check_heap_stack_gap(vma, addr, len))) {
3532 + /*
3533 + * Remember the place where we stopped the search:
3534 + */
3535 +@@ -215,7 +218,7 @@ arch_get_unmapped_area_topdown(struct fi
3536 + /* We do not accept a shared mapping if it would violate
3537 + * cache aliasing constraints.
3538 + */
3539 +- if ((flags & MAP_SHARED) &&
3540 ++ if ((filp || (flags & MAP_SHARED)) &&
3541 + ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
3542 + return -EINVAL;
3543 + return addr;
3544 +@@ -236,8 +239,7 @@ arch_get_unmapped_area_topdown(struct fi
3545 + addr = PAGE_ALIGN(addr);
3546 +
3547 + vma = find_vma(mm, addr);
3548 +- if (task_size - len >= addr &&
3549 +- (!vma || addr + len <= vma->vm_start))
3550 ++ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
3551 + return addr;
3552 + }
3553 +
3554 +@@ -258,7 +260,7 @@ arch_get_unmapped_area_topdown(struct fi
3555 + /* make sure it can fit in the remaining address space */
3556 + if (likely(addr > len)) {
3557 + vma = find_vma(mm, addr-len);
3558 +- if (!vma || addr <= vma->vm_start) {
3559 ++ if (check_heap_stack_gap(vma, addr - len, len)) {
3560 + /* remember the address as a hint for next time */
3561 + return (mm->free_area_cache = addr-len);
3562 + }
3563 +@@ -267,18 +269,18 @@ arch_get_unmapped_area_topdown(struct fi
3564 + if (unlikely(mm->mmap_base < len))
3565 + goto bottomup;
3566 +
3567 +- addr = mm->mmap_base-len;
3568 +- if (do_color_align)
3569 +- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
3570 ++ addr = mm->mmap_base - len;
3571 +
3572 + do {
3573 ++ if (do_color_align)
3574 ++ addr = COLOUR_ALIGN_DOWN(addr, pgoff);
3575 + /*
3576 + * Lookup failure means no vma is above this address,
3577 + * else if new region fits below vma->vm_start,
3578 + * return with success:
3579 + */
3580 + vma = find_vma(mm, addr);
3581 +- if (likely(!vma || addr+len <= vma->vm_start)) {
3582 ++ if (likely(check_heap_stack_gap(vma, addr, len))) {
3583 + /* remember the address as a hint for next time */
3584 + return (mm->free_area_cache = addr);
3585 + }
3586 +@@ -288,10 +290,8 @@ arch_get_unmapped_area_topdown(struct fi
3587 + mm->cached_hole_size = vma->vm_start - addr;
3588 +
3589 + /* try just below the current vma->vm_start */
3590 +- addr = vma->vm_start-len;
3591 +- if (do_color_align)
3592 +- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
3593 +- } while (likely(len < vma->vm_start));
3594 ++ addr = skip_heap_stack_gap(vma, len);
3595 ++ } while (!IS_ERR_VALUE(addr));
3596 +
3597 + bottomup:
3598 + /*
3599 +@@ -390,6 +390,12 @@ void arch_pick_mmap_layout(struct mm_str
3600 + gap == RLIM_INFINITY ||
3601 + sysctl_legacy_va_layout) {
3602 + mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
3603 ++
3604 ++#ifdef CONFIG_PAX_RANDMMAP
3605 ++ if (mm->pax_flags & MF_PAX_RANDMMAP)
3606 ++ mm->mmap_base += mm->delta_mmap;
3607 ++#endif
3608 ++
3609 + mm->get_unmapped_area = arch_get_unmapped_area;
3610 + mm->unmap_area = arch_unmap_area;
3611 + } else {
3612 +@@ -402,6 +408,12 @@ void arch_pick_mmap_layout(struct mm_str
3613 + gap = (task_size / 6 * 5);
3614 +
3615 + mm->mmap_base = PAGE_ALIGN(task_size - gap - random_factor);
3616 ++
3617 ++#ifdef CONFIG_PAX_RANDMMAP
3618 ++ if (mm->pax_flags & MF_PAX_RANDMMAP)
3619 ++ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
3620 ++#endif
3621 ++
3622 + mm->get_unmapped_area = arch_get_unmapped_area_topdown;
3623 + mm->unmap_area = arch_unmap_area_topdown;
3624 + }
3625 +diff -urNp linux-3.1.1/arch/sparc/kernel/traps_32.c linux-3.1.1/arch/sparc/kernel/traps_32.c
3626 +--- linux-3.1.1/arch/sparc/kernel/traps_32.c 2011-11-11 15:19:27.000000000 -0500
3627 ++++ linux-3.1.1/arch/sparc/kernel/traps_32.c 2011-11-16 18:40:08.000000000 -0500
3628 +@@ -44,6 +44,8 @@ static void instruction_dump(unsigned lo
3629 + #define __SAVE __asm__ __volatile__("save %sp, -0x40, %sp\n\t")
3630 + #define __RESTORE __asm__ __volatile__("restore %g0, %g0, %g0\n\t")
3631 +
3632 ++extern void gr_handle_kernel_exploit(void);
3633 ++
3634 + void die_if_kernel(char *str, struct pt_regs *regs)
3635 + {
3636 + static int die_counter;
3637 +@@ -76,15 +78,17 @@ void die_if_kernel(char *str, struct pt_
3638 + count++ < 30 &&
3639 + (((unsigned long) rw) >= PAGE_OFFSET) &&
3640 + !(((unsigned long) rw) & 0x7)) {
3641 +- printk("Caller[%08lx]: %pS\n", rw->ins[7],
3642 ++ printk("Caller[%08lx]: %pA\n", rw->ins[7],
3643 + (void *) rw->ins[7]);
3644 + rw = (struct reg_window32 *)rw->ins[6];
3645 + }
3646 + }
3647 + printk("Instruction DUMP:");
3648 + instruction_dump ((unsigned long *) regs->pc);
3649 +- if(regs->psr & PSR_PS)
3650 ++ if(regs->psr & PSR_PS) {
3651 ++ gr_handle_kernel_exploit();
3652 + do_exit(SIGKILL);
3653 ++ }
3654 + do_exit(SIGSEGV);
3655 + }
3656 +
3657 +diff -urNp linux-3.1.1/arch/sparc/kernel/traps_64.c linux-3.1.1/arch/sparc/kernel/traps_64.c
3658 +--- linux-3.1.1/arch/sparc/kernel/traps_64.c 2011-11-11 15:19:27.000000000 -0500
3659 ++++ linux-3.1.1/arch/sparc/kernel/traps_64.c 2011-11-16 18:40:08.000000000 -0500
3660 +@@ -75,7 +75,7 @@ static void dump_tl1_traplog(struct tl1_
3661 + i + 1,
3662 + p->trapstack[i].tstate, p->trapstack[i].tpc,
3663 + p->trapstack[i].tnpc, p->trapstack[i].tt);
3664 +- printk("TRAPLOG: TPC<%pS>\n", (void *) p->trapstack[i].tpc);
3665 ++ printk("TRAPLOG: TPC<%pA>\n", (void *) p->trapstack[i].tpc);
3666 + }
3667 + }
3668 +
3669 +@@ -95,6 +95,12 @@ void bad_trap(struct pt_regs *regs, long
3670 +
3671 + lvl -= 0x100;
3672 + if (regs->tstate & TSTATE_PRIV) {
3673 ++
3674 ++#ifdef CONFIG_PAX_REFCOUNT
3675 ++ if (lvl == 6)
3676 ++ pax_report_refcount_overflow(regs);
3677 ++#endif
3678 ++
3679 + sprintf(buffer, "Kernel bad sw trap %lx", lvl);
3680 + die_if_kernel(buffer, regs);
3681 + }
3682 +@@ -113,11 +119,16 @@ void bad_trap(struct pt_regs *regs, long
3683 + void bad_trap_tl1(struct pt_regs *regs, long lvl)
3684 + {
3685 + char buffer[32];
3686 +-
3687 ++
3688 + if (notify_die(DIE_TRAP_TL1, "bad trap tl1", regs,
3689 + 0, lvl, SIGTRAP) == NOTIFY_STOP)
3690 + return;
3691 +
3692 ++#ifdef CONFIG_PAX_REFCOUNT
3693 ++ if (lvl == 6)
3694 ++ pax_report_refcount_overflow(regs);
3695 ++#endif
3696 ++
3697 + dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
3698 +
3699 + sprintf (buffer, "Bad trap %lx at tl>0", lvl);
3700 +@@ -1141,7 +1152,7 @@ static void cheetah_log_errors(struct pt
3701 + regs->tpc, regs->tnpc, regs->u_regs[UREG_I7], regs->tstate);
3702 + printk("%s" "ERROR(%d): ",
3703 + (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id());
3704 +- printk("TPC<%pS>\n", (void *) regs->tpc);
3705 ++ printk("TPC<%pA>\n", (void *) regs->tpc);
3706 + printk("%s" "ERROR(%d): M_SYND(%lx), E_SYND(%lx)%s%s\n",
3707 + (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
3708 + (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT,
3709 +@@ -1748,7 +1759,7 @@ void cheetah_plus_parity_error(int type,
3710 + smp_processor_id(),
3711 + (type & 0x1) ? 'I' : 'D',
3712 + regs->tpc);
3713 +- printk(KERN_EMERG "TPC<%pS>\n", (void *) regs->tpc);
3714 ++ printk(KERN_EMERG "TPC<%pA>\n", (void *) regs->tpc);
3715 + panic("Irrecoverable Cheetah+ parity error.");
3716 + }
3717 +
3718 +@@ -1756,7 +1767,7 @@ void cheetah_plus_parity_error(int type,
3719 + smp_processor_id(),
3720 + (type & 0x1) ? 'I' : 'D',
3721 + regs->tpc);
3722 +- printk(KERN_WARNING "TPC<%pS>\n", (void *) regs->tpc);
3723 ++ printk(KERN_WARNING "TPC<%pA>\n", (void *) regs->tpc);
3724 + }
3725 +
3726 + struct sun4v_error_entry {
3727 +@@ -1963,9 +1974,9 @@ void sun4v_itlb_error_report(struct pt_r
3728 +
3729 + printk(KERN_EMERG "SUN4V-ITLB: Error at TPC[%lx], tl %d\n",
3730 + regs->tpc, tl);
3731 +- printk(KERN_EMERG "SUN4V-ITLB: TPC<%pS>\n", (void *) regs->tpc);
3732 ++ printk(KERN_EMERG "SUN4V-ITLB: TPC<%pA>\n", (void *) regs->tpc);
3733 + printk(KERN_EMERG "SUN4V-ITLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
3734 +- printk(KERN_EMERG "SUN4V-ITLB: O7<%pS>\n",
3735 ++ printk(KERN_EMERG "SUN4V-ITLB: O7<%pA>\n",
3736 + (void *) regs->u_regs[UREG_I7]);
3737 + printk(KERN_EMERG "SUN4V-ITLB: vaddr[%lx] ctx[%lx] "
3738 + "pte[%lx] error[%lx]\n",
3739 +@@ -1987,9 +1998,9 @@ void sun4v_dtlb_error_report(struct pt_r
3740 +
3741 + printk(KERN_EMERG "SUN4V-DTLB: Error at TPC[%lx], tl %d\n",
3742 + regs->tpc, tl);
3743 +- printk(KERN_EMERG "SUN4V-DTLB: TPC<%pS>\n", (void *) regs->tpc);
3744 ++ printk(KERN_EMERG "SUN4V-DTLB: TPC<%pA>\n", (void *) regs->tpc);
3745 + printk(KERN_EMERG "SUN4V-DTLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
3746 +- printk(KERN_EMERG "SUN4V-DTLB: O7<%pS>\n",
3747 ++ printk(KERN_EMERG "SUN4V-DTLB: O7<%pA>\n",
3748 + (void *) regs->u_regs[UREG_I7]);
3749 + printk(KERN_EMERG "SUN4V-DTLB: vaddr[%lx] ctx[%lx] "
3750 + "pte[%lx] error[%lx]\n",
3751 +@@ -2195,13 +2206,13 @@ void show_stack(struct task_struct *tsk,
3752 + fp = (unsigned long)sf->fp + STACK_BIAS;
3753 + }
3754 +
3755 +- printk(" [%016lx] %pS\n", pc, (void *) pc);
3756 ++ printk(" [%016lx] %pA\n", pc, (void *) pc);
3757 + #ifdef CONFIG_FUNCTION_GRAPH_TRACER
3758 + if ((pc + 8UL) == (unsigned long) &return_to_handler) {
3759 + int index = tsk->curr_ret_stack;
3760 + if (tsk->ret_stack && index >= graph) {
3761 + pc = tsk->ret_stack[index - graph].ret;
3762 +- printk(" [%016lx] %pS\n", pc, (void *) pc);
3763 ++ printk(" [%016lx] %pA\n", pc, (void *) pc);
3764 + graph++;
3765 + }
3766 + }
3767 +@@ -2226,6 +2237,8 @@ static inline struct reg_window *kernel_
3768 + return (struct reg_window *) (fp + STACK_BIAS);
3769 + }
3770 +
3771 ++extern void gr_handle_kernel_exploit(void);
3772 ++
3773 + void die_if_kernel(char *str, struct pt_regs *regs)
3774 + {
3775 + static int die_counter;
3776 +@@ -2254,7 +2267,7 @@ void die_if_kernel(char *str, struct pt_
3777 + while (rw &&
3778 + count++ < 30 &&
3779 + kstack_valid(tp, (unsigned long) rw)) {
3780 +- printk("Caller[%016lx]: %pS\n", rw->ins[7],
3781 ++ printk("Caller[%016lx]: %pA\n", rw->ins[7],
3782 + (void *) rw->ins[7]);
3783 +
3784 + rw = kernel_stack_up(rw);
3785 +@@ -2267,8 +2280,10 @@ void die_if_kernel(char *str, struct pt_
3786 + }
3787 + user_instruction_dump ((unsigned int __user *) regs->tpc);
3788 + }
3789 +- if (regs->tstate & TSTATE_PRIV)
3790 ++ if (regs->tstate & TSTATE_PRIV) {
3791 ++ gr_handle_kernel_exploit();
3792 + do_exit(SIGKILL);
3793 ++ }
3794 + do_exit(SIGSEGV);
3795 + }
3796 + EXPORT_SYMBOL(die_if_kernel);
3797 +diff -urNp linux-3.1.1/arch/sparc/kernel/unaligned_64.c linux-3.1.1/arch/sparc/kernel/unaligned_64.c
3798 +--- linux-3.1.1/arch/sparc/kernel/unaligned_64.c 2011-11-11 15:19:27.000000000 -0500
3799 ++++ linux-3.1.1/arch/sparc/kernel/unaligned_64.c 2011-11-16 18:40:08.000000000 -0500
3800 +@@ -279,7 +279,7 @@ static void log_unaligned(struct pt_regs
3801 + static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 5);
3802 +
3803 + if (__ratelimit(&ratelimit)) {
3804 +- printk("Kernel unaligned access at TPC[%lx] %pS\n",
3805 ++ printk("Kernel unaligned access at TPC[%lx] %pA\n",
3806 + regs->tpc, (void *) regs->tpc);
3807 + }
3808 + }
3809 +diff -urNp linux-3.1.1/arch/sparc/lib/atomic_64.S linux-3.1.1/arch/sparc/lib/atomic_64.S
3810 +--- linux-3.1.1/arch/sparc/lib/atomic_64.S 2011-11-11 15:19:27.000000000 -0500
3811 ++++ linux-3.1.1/arch/sparc/lib/atomic_64.S 2011-11-16 18:39:07.000000000 -0500
3812 +@@ -18,7 +18,12 @@
3813 + atomic_add: /* %o0 = increment, %o1 = atomic_ptr */
3814 + BACKOFF_SETUP(%o2)
3815 + 1: lduw [%o1], %g1
3816 +- add %g1, %o0, %g7
3817 ++ addcc %g1, %o0, %g7
3818 ++
3819 ++#ifdef CONFIG_PAX_REFCOUNT
3820 ++ tvs %icc, 6
3821 ++#endif
3822 ++
3823 + cas [%o1], %g1, %g7
3824 + cmp %g1, %g7
3825 + bne,pn %icc, BACKOFF_LABEL(2f, 1b)
3826 +@@ -28,12 +33,32 @@ atomic_add: /* %o0 = increment, %o1 = at
3827 + 2: BACKOFF_SPIN(%o2, %o3, 1b)
3828 + .size atomic_add, .-atomic_add
3829 +
3830 ++ .globl atomic_add_unchecked
3831 ++ .type atomic_add_unchecked,#function
3832 ++atomic_add_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
3833 ++ BACKOFF_SETUP(%o2)
3834 ++1: lduw [%o1], %g1
3835 ++ add %g1, %o0, %g7
3836 ++ cas [%o1], %g1, %g7
3837 ++ cmp %g1, %g7
3838 ++ bne,pn %icc, 2f
3839 ++ nop
3840 ++ retl
3841 ++ nop
3842 ++2: BACKOFF_SPIN(%o2, %o3, 1b)
3843 ++ .size atomic_add_unchecked, .-atomic_add_unchecked
3844 ++
3845 + .globl atomic_sub
3846 + .type atomic_sub,#function
3847 + atomic_sub: /* %o0 = decrement, %o1 = atomic_ptr */
3848 + BACKOFF_SETUP(%o2)
3849 + 1: lduw [%o1], %g1
3850 +- sub %g1, %o0, %g7
3851 ++ subcc %g1, %o0, %g7
3852 ++
3853 ++#ifdef CONFIG_PAX_REFCOUNT
3854 ++ tvs %icc, 6
3855 ++#endif
3856 ++
3857 + cas [%o1], %g1, %g7
3858 + cmp %g1, %g7
3859 + bne,pn %icc, BACKOFF_LABEL(2f, 1b)
3860 +@@ -43,12 +68,32 @@ atomic_sub: /* %o0 = decrement, %o1 = at
3861 + 2: BACKOFF_SPIN(%o2, %o3, 1b)
3862 + .size atomic_sub, .-atomic_sub
3863 +
3864 ++ .globl atomic_sub_unchecked
3865 ++ .type atomic_sub_unchecked,#function
3866 ++atomic_sub_unchecked: /* %o0 = decrement, %o1 = atomic_ptr */
3867 ++ BACKOFF_SETUP(%o2)
3868 ++1: lduw [%o1], %g1
3869 ++ sub %g1, %o0, %g7
3870 ++ cas [%o1], %g1, %g7
3871 ++ cmp %g1, %g7
3872 ++ bne,pn %icc, 2f
3873 ++ nop
3874 ++ retl
3875 ++ nop
3876 ++2: BACKOFF_SPIN(%o2, %o3, 1b)
3877 ++ .size atomic_sub_unchecked, .-atomic_sub_unchecked
3878 ++
3879 + .globl atomic_add_ret
3880 + .type atomic_add_ret,#function
3881 + atomic_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
3882 + BACKOFF_SETUP(%o2)
3883 + 1: lduw [%o1], %g1
3884 +- add %g1, %o0, %g7
3885 ++ addcc %g1, %o0, %g7
3886 ++
3887 ++#ifdef CONFIG_PAX_REFCOUNT
3888 ++ tvs %icc, 6
3889 ++#endif
3890 ++
3891 + cas [%o1], %g1, %g7
3892 + cmp %g1, %g7
3893 + bne,pn %icc, BACKOFF_LABEL(2f, 1b)
3894 +@@ -58,12 +103,33 @@ atomic_add_ret: /* %o0 = increment, %o1
3895 + 2: BACKOFF_SPIN(%o2, %o3, 1b)
3896 + .size atomic_add_ret, .-atomic_add_ret
3897 +
3898 ++ .globl atomic_add_ret_unchecked
3899 ++ .type atomic_add_ret_unchecked,#function
3900 ++atomic_add_ret_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
3901 ++ BACKOFF_SETUP(%o2)
3902 ++1: lduw [%o1], %g1
3903 ++ addcc %g1, %o0, %g7
3904 ++ cas [%o1], %g1, %g7
3905 ++ cmp %g1, %g7
3906 ++ bne,pn %icc, 2f
3907 ++ add %g7, %o0, %g7
3908 ++ sra %g7, 0, %o0
3909 ++ retl
3910 ++ nop
3911 ++2: BACKOFF_SPIN(%o2, %o3, 1b)
3912 ++ .size atomic_add_ret_unchecked, .-atomic_add_ret_unchecked
3913 ++
3914 + .globl atomic_sub_ret
3915 + .type atomic_sub_ret,#function
3916 + atomic_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
3917 + BACKOFF_SETUP(%o2)
3918 + 1: lduw [%o1], %g1
3919 +- sub %g1, %o0, %g7
3920 ++ subcc %g1, %o0, %g7
3921 ++
3922 ++#ifdef CONFIG_PAX_REFCOUNT
3923 ++ tvs %icc, 6
3924 ++#endif
3925 ++
3926 + cas [%o1], %g1, %g7
3927 + cmp %g1, %g7
3928 + bne,pn %icc, BACKOFF_LABEL(2f, 1b)
3929 +@@ -78,7 +144,12 @@ atomic_sub_ret: /* %o0 = decrement, %o1
3930 + atomic64_add: /* %o0 = increment, %o1 = atomic_ptr */
3931 + BACKOFF_SETUP(%o2)
3932 + 1: ldx [%o1], %g1
3933 +- add %g1, %o0, %g7
3934 ++ addcc %g1, %o0, %g7
3935 ++
3936 ++#ifdef CONFIG_PAX_REFCOUNT
3937 ++ tvs %xcc, 6
3938 ++#endif
3939 ++
3940 + casx [%o1], %g1, %g7
3941 + cmp %g1, %g7
3942 + bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
3943 +@@ -88,12 +159,32 @@ atomic64_add: /* %o0 = increment, %o1 =
3944 + 2: BACKOFF_SPIN(%o2, %o3, 1b)
3945 + .size atomic64_add, .-atomic64_add
3946 +
3947 ++ .globl atomic64_add_unchecked
3948 ++ .type atomic64_add_unchecked,#function
3949 ++atomic64_add_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
3950 ++ BACKOFF_SETUP(%o2)
3951 ++1: ldx [%o1], %g1
3952 ++ addcc %g1, %o0, %g7
3953 ++ casx [%o1], %g1, %g7
3954 ++ cmp %g1, %g7
3955 ++ bne,pn %xcc, 2f
3956 ++ nop
3957 ++ retl
3958 ++ nop
3959 ++2: BACKOFF_SPIN(%o2, %o3, 1b)
3960 ++ .size atomic64_add_unchecked, .-atomic64_add_unchecked
3961 ++
3962 + .globl atomic64_sub
3963 + .type atomic64_sub,#function
3964 + atomic64_sub: /* %o0 = decrement, %o1 = atomic_ptr */
3965 + BACKOFF_SETUP(%o2)
3966 + 1: ldx [%o1], %g1
3967 +- sub %g1, %o0, %g7
3968 ++ subcc %g1, %o0, %g7
3969 ++
3970 ++#ifdef CONFIG_PAX_REFCOUNT
3971 ++ tvs %xcc, 6
3972 ++#endif
3973 ++
3974 + casx [%o1], %g1, %g7
3975 + cmp %g1, %g7
3976 + bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
3977 +@@ -103,12 +194,32 @@ atomic64_sub: /* %o0 = decrement, %o1 =
3978 + 2: BACKOFF_SPIN(%o2, %o3, 1b)
3979 + .size atomic64_sub, .-atomic64_sub
3980 +
3981 ++ .globl atomic64_sub_unchecked
3982 ++ .type atomic64_sub_unchecked,#function
3983 ++atomic64_sub_unchecked: /* %o0 = decrement, %o1 = atomic_ptr */
3984 ++ BACKOFF_SETUP(%o2)
3985 ++1: ldx [%o1], %g1
3986 ++ subcc %g1, %o0, %g7
3987 ++ casx [%o1], %g1, %g7
3988 ++ cmp %g1, %g7
3989 ++ bne,pn %xcc, 2f
3990 ++ nop
3991 ++ retl
3992 ++ nop
3993 ++2: BACKOFF_SPIN(%o2, %o3, 1b)
3994 ++ .size atomic64_sub_unchecked, .-atomic64_sub_unchecked
3995 ++
3996 + .globl atomic64_add_ret
3997 + .type atomic64_add_ret,#function
3998 + atomic64_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
3999 + BACKOFF_SETUP(%o2)
4000 + 1: ldx [%o1], %g1
4001 +- add %g1, %o0, %g7
4002 ++ addcc %g1, %o0, %g7
4003 ++
4004 ++#ifdef CONFIG_PAX_REFCOUNT
4005 ++ tvs %xcc, 6
4006 ++#endif
4007 ++
4008 + casx [%o1], %g1, %g7
4009 + cmp %g1, %g7
4010 + bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
4011 +@@ -118,12 +229,33 @@ atomic64_add_ret: /* %o0 = increment, %o
4012 + 2: BACKOFF_SPIN(%o2, %o3, 1b)
4013 + .size atomic64_add_ret, .-atomic64_add_ret
4014 +
4015 ++ .globl atomic64_add_ret_unchecked
4016 ++ .type atomic64_add_ret_unchecked,#function
4017 ++atomic64_add_ret_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
4018 ++ BACKOFF_SETUP(%o2)
4019 ++1: ldx [%o1], %g1
4020 ++ addcc %g1, %o0, %g7
4021 ++ casx [%o1], %g1, %g7
4022 ++ cmp %g1, %g7
4023 ++ bne,pn %xcc, 2f
4024 ++ add %g7, %o0, %g7
4025 ++ mov %g7, %o0
4026 ++ retl
4027 ++ nop
4028 ++2: BACKOFF_SPIN(%o2, %o3, 1b)
4029 ++ .size atomic64_add_ret_unchecked, .-atomic64_add_ret_unchecked
4030 ++
4031 + .globl atomic64_sub_ret
4032 + .type atomic64_sub_ret,#function
4033 + atomic64_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
4034 + BACKOFF_SETUP(%o2)
4035 + 1: ldx [%o1], %g1
4036 +- sub %g1, %o0, %g7
4037 ++ subcc %g1, %o0, %g7
4038 ++
4039 ++#ifdef CONFIG_PAX_REFCOUNT
4040 ++ tvs %xcc, 6
4041 ++#endif
4042 ++
4043 + casx [%o1], %g1, %g7
4044 + cmp %g1, %g7
4045 + bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
4046 +diff -urNp linux-3.1.1/arch/sparc/lib/ksyms.c linux-3.1.1/arch/sparc/lib/ksyms.c
4047 +--- linux-3.1.1/arch/sparc/lib/ksyms.c 2011-11-11 15:19:27.000000000 -0500
4048 ++++ linux-3.1.1/arch/sparc/lib/ksyms.c 2011-11-16 18:39:07.000000000 -0500
4049 +@@ -142,12 +142,18 @@ EXPORT_SYMBOL(__downgrade_write);
4050 +
4051 + /* Atomic counter implementation. */
4052 + EXPORT_SYMBOL(atomic_add);
4053 ++EXPORT_SYMBOL(atomic_add_unchecked);
4054 + EXPORT_SYMBOL(atomic_add_ret);
4055 ++EXPORT_SYMBOL(atomic_add_ret_unchecked);
4056 + EXPORT_SYMBOL(atomic_sub);
4057 ++EXPORT_SYMBOL(atomic_sub_unchecked);
4058 + EXPORT_SYMBOL(atomic_sub_ret);
4059 + EXPORT_SYMBOL(atomic64_add);
4060 ++EXPORT_SYMBOL(atomic64_add_unchecked);
4061 + EXPORT_SYMBOL(atomic64_add_ret);
4062 ++EXPORT_SYMBOL(atomic64_add_ret_unchecked);
4063 + EXPORT_SYMBOL(atomic64_sub);
4064 ++EXPORT_SYMBOL(atomic64_sub_unchecked);
4065 + EXPORT_SYMBOL(atomic64_sub_ret);
4066 +
4067 + /* Atomic bit operations. */
4068 +diff -urNp linux-3.1.1/arch/sparc/lib/Makefile linux-3.1.1/arch/sparc/lib/Makefile
4069 +--- linux-3.1.1/arch/sparc/lib/Makefile 2011-11-11 15:19:27.000000000 -0500
4070 ++++ linux-3.1.1/arch/sparc/lib/Makefile 2011-11-16 18:39:07.000000000 -0500
4071 +@@ -2,7 +2,7 @@
4072 + #
4073 +
4074 + asflags-y := -ansi -DST_DIV0=0x02
4075 +-ccflags-y := -Werror
4076 ++#ccflags-y := -Werror
4077 +
4078 + lib-$(CONFIG_SPARC32) += mul.o rem.o sdiv.o udiv.o umul.o urem.o ashrdi3.o
4079 + lib-$(CONFIG_SPARC32) += memcpy.o memset.o
4080 +diff -urNp linux-3.1.1/arch/sparc/Makefile linux-3.1.1/arch/sparc/Makefile
4081 +--- linux-3.1.1/arch/sparc/Makefile 2011-11-11 15:19:27.000000000 -0500
4082 ++++ linux-3.1.1/arch/sparc/Makefile 2011-11-16 18:40:08.000000000 -0500
4083 +@@ -75,7 +75,7 @@ drivers-$(CONFIG_OPROFILE) += arch/sparc
4084 + # Export what is needed by arch/sparc/boot/Makefile
4085 + export VMLINUX_INIT VMLINUX_MAIN
4086 + VMLINUX_INIT := $(head-y) $(init-y)
4087 +-VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/
4088 ++VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
4089 + VMLINUX_MAIN += $(patsubst %/, %/lib.a, $(libs-y)) $(libs-y)
4090 + VMLINUX_MAIN += $(drivers-y) $(net-y)
4091 +
4092 +diff -urNp linux-3.1.1/arch/sparc/mm/fault_32.c linux-3.1.1/arch/sparc/mm/fault_32.c
4093 +--- linux-3.1.1/arch/sparc/mm/fault_32.c 2011-11-11 15:19:27.000000000 -0500
4094 ++++ linux-3.1.1/arch/sparc/mm/fault_32.c 2011-11-16 18:39:07.000000000 -0500
4095 +@@ -22,6 +22,9 @@
4096 + #include <linux/interrupt.h>
4097 + #include <linux/module.h>
4098 + #include <linux/kdebug.h>
4099 ++#include <linux/slab.h>
4100 ++#include <linux/pagemap.h>
4101 ++#include <linux/compiler.h>
4102 +
4103 + #include <asm/system.h>
4104 + #include <asm/page.h>
4105 +@@ -209,6 +212,268 @@ static unsigned long compute_si_addr(str
4106 + return safe_compute_effective_address(regs, insn);
4107 + }
4108 +
4109 ++#ifdef CONFIG_PAX_PAGEEXEC
4110 ++#ifdef CONFIG_PAX_DLRESOLVE
4111 ++static void pax_emuplt_close(struct vm_area_struct *vma)
4112 ++{
4113 ++ vma->vm_mm->call_dl_resolve = 0UL;
4114 ++}
4115 ++
4116 ++static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
4117 ++{
4118 ++ unsigned int *kaddr;
4119 ++
4120 ++ vmf->page = alloc_page(GFP_HIGHUSER);
4121 ++ if (!vmf->page)
4122 ++ return VM_FAULT_OOM;
4123 ++
4124 ++ kaddr = kmap(vmf->page);
4125 ++ memset(kaddr, 0, PAGE_SIZE);
4126 ++ kaddr[0] = 0x9DE3BFA8U; /* save */
4127 ++ flush_dcache_page(vmf->page);
4128 ++ kunmap(vmf->page);
4129 ++ return VM_FAULT_MAJOR;
4130 ++}
4131 ++
4132 ++static const struct vm_operations_struct pax_vm_ops = {
4133 ++ .close = pax_emuplt_close,
4134 ++ .fault = pax_emuplt_fault
4135 ++};
4136 ++
4137 ++static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
4138 ++{
4139 ++ int ret;
4140 ++
4141 ++ INIT_LIST_HEAD(&vma->anon_vma_chain);
4142 ++ vma->vm_mm = current->mm;
4143 ++ vma->vm_start = addr;
4144 ++ vma->vm_end = addr + PAGE_SIZE;
4145 ++ vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
4146 ++ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
4147 ++ vma->vm_ops = &pax_vm_ops;
4148 ++
4149 ++ ret = insert_vm_struct(current->mm, vma);
4150 ++ if (ret)
4151 ++ return ret;
4152 ++
4153 ++ ++current->mm->total_vm;
4154 ++ return 0;
4155 ++}
4156 ++#endif
4157 ++
4158 ++/*
4159 ++ * PaX: decide what to do with offenders (regs->pc = fault address)
4160 ++ *
4161 ++ * returns 1 when task should be killed
4162 ++ * 2 when patched PLT trampoline was detected
4163 ++ * 3 when unpatched PLT trampoline was detected
4164 ++ */
4165 ++static int pax_handle_fetch_fault(struct pt_regs *regs)
4166 ++{
4167 ++
4168 ++#ifdef CONFIG_PAX_EMUPLT
4169 ++ int err;
4170 ++
4171 ++ do { /* PaX: patched PLT emulation #1 */
4172 ++ unsigned int sethi1, sethi2, jmpl;
4173 ++
4174 ++ err = get_user(sethi1, (unsigned int *)regs->pc);
4175 ++ err |= get_user(sethi2, (unsigned int *)(regs->pc+4));
4176 ++ err |= get_user(jmpl, (unsigned int *)(regs->pc+8));
4177 ++
4178 ++ if (err)
4179 ++ break;
4180 ++
4181 ++ if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
4182 ++ (sethi2 & 0xFFC00000U) == 0x03000000U &&
4183 ++ (jmpl & 0xFFFFE000U) == 0x81C06000U)
4184 ++ {
4185 ++ unsigned int addr;
4186 ++
4187 ++ regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
4188 ++ addr = regs->u_regs[UREG_G1];
4189 ++ addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
4190 ++ regs->pc = addr;
4191 ++ regs->npc = addr+4;
4192 ++ return 2;
4193 ++ }
4194 ++ } while (0);
4195 ++
4196 ++ { /* PaX: patched PLT emulation #2 */
4197 ++ unsigned int ba;
4198 ++
4199 ++ err = get_user(ba, (unsigned int *)regs->pc);
4200 ++
4201 ++ if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
4202 ++ unsigned int addr;
4203 ++
4204 ++ addr = regs->pc + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
4205 ++ regs->pc = addr;
4206 ++ regs->npc = addr+4;
4207 ++ return 2;
4208 ++ }
4209 ++ }
4210 ++
4211 ++ do { /* PaX: patched PLT emulation #3 */
4212 ++ unsigned int sethi, jmpl, nop;
4213 ++
4214 ++ err = get_user(sethi, (unsigned int *)regs->pc);
4215 ++ err |= get_user(jmpl, (unsigned int *)(regs->pc+4));
4216 ++ err |= get_user(nop, (unsigned int *)(regs->pc+8));
4217 ++
4218 ++ if (err)
4219 ++ break;
4220 ++
4221 ++ if ((sethi & 0xFFC00000U) == 0x03000000U &&
4222 ++ (jmpl & 0xFFFFE000U) == 0x81C06000U &&
4223 ++ nop == 0x01000000U)
4224 ++ {
4225 ++ unsigned int addr;
4226 ++
4227 ++ addr = (sethi & 0x003FFFFFU) << 10;
4228 ++ regs->u_regs[UREG_G1] = addr;
4229 ++ addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
4230 ++ regs->pc = addr;
4231 ++ regs->npc = addr+4;
4232 ++ return 2;
4233 ++ }
4234 ++ } while (0);
4235 ++
4236 ++ do { /* PaX: unpatched PLT emulation step 1 */
4237 ++ unsigned int sethi, ba, nop;
4238 ++
4239 ++ err = get_user(sethi, (unsigned int *)regs->pc);
4240 ++ err |= get_user(ba, (unsigned int *)(regs->pc+4));
4241 ++ err |= get_user(nop, (unsigned int *)(regs->pc+8));
4242 ++
4243 ++ if (err)
4244 ++ break;
4245 ++
4246 ++ if ((sethi & 0xFFC00000U) == 0x03000000U &&
4247 ++ ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
4248 ++ nop == 0x01000000U)
4249 ++ {
4250 ++ unsigned int addr, save, call;
4251 ++
4252 ++ if ((ba & 0xFFC00000U) == 0x30800000U)
4253 ++ addr = regs->pc + 4 + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
4254 ++ else
4255 ++ addr = regs->pc + 4 + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
4256 ++
4257 ++ err = get_user(save, (unsigned int *)addr);
4258 ++ err |= get_user(call, (unsigned int *)(addr+4));
4259 ++ err |= get_user(nop, (unsigned int *)(addr+8));
4260 ++ if (err)
4261 ++ break;
4262 ++
4263 ++#ifdef CONFIG_PAX_DLRESOLVE
4264 ++ if (save == 0x9DE3BFA8U &&
4265 ++ (call & 0xC0000000U) == 0x40000000U &&
4266 ++ nop == 0x01000000U)
4267 ++ {
4268 ++ struct vm_area_struct *vma;
4269 ++ unsigned long call_dl_resolve;
4270 ++
4271 ++ down_read(&current->mm->mmap_sem);
4272 ++ call_dl_resolve = current->mm->call_dl_resolve;
4273 ++ up_read(&current->mm->mmap_sem);
4274 ++ if (likely(call_dl_resolve))
4275 ++ goto emulate;
4276 ++
4277 ++ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
4278 ++
4279 ++ down_write(&current->mm->mmap_sem);
4280 ++ if (current->mm->call_dl_resolve) {
4281 ++ call_dl_resolve = current->mm->call_dl_resolve;
4282 ++ up_write(&current->mm->mmap_sem);
4283 ++ if (vma)
4284 ++ kmem_cache_free(vm_area_cachep, vma);
4285 ++ goto emulate;
4286 ++ }
4287 ++
4288 ++ call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
4289 ++ if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
4290 ++ up_write(&current->mm->mmap_sem);
4291 ++ if (vma)
4292 ++ kmem_cache_free(vm_area_cachep, vma);
4293 ++ return 1;
4294 ++ }
4295 ++
4296 ++ if (pax_insert_vma(vma, call_dl_resolve)) {
4297 ++ up_write(&current->mm->mmap_sem);
4298 ++ kmem_cache_free(vm_area_cachep, vma);
4299 ++ return 1;
4300 ++ }
4301 ++
4302 ++ current->mm->call_dl_resolve = call_dl_resolve;
4303 ++ up_write(&current->mm->mmap_sem);
4304 ++
4305 ++emulate:
4306 ++ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
4307 ++ regs->pc = call_dl_resolve;
4308 ++ regs->npc = addr+4;
4309 ++ return 3;
4310 ++ }
4311 ++#endif
4312 ++
4313 ++ /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
4314 ++ if ((save & 0xFFC00000U) == 0x05000000U &&
4315 ++ (call & 0xFFFFE000U) == 0x85C0A000U &&
4316 ++ nop == 0x01000000U)
4317 ++ {
4318 ++ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
4319 ++ regs->u_regs[UREG_G2] = addr + 4;
4320 ++ addr = (save & 0x003FFFFFU) << 10;
4321 ++ addr += (((call | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
4322 ++ regs->pc = addr;
4323 ++ regs->npc = addr+4;
4324 ++ return 3;
4325 ++ }
4326 ++ }
4327 ++ } while (0);
4328 ++
4329 ++ do { /* PaX: unpatched PLT emulation step 2 */
4330 ++ unsigned int save, call, nop;
4331 ++
4332 ++ err = get_user(save, (unsigned int *)(regs->pc-4));
4333 ++ err |= get_user(call, (unsigned int *)regs->pc);
4334 ++ err |= get_user(nop, (unsigned int *)(regs->pc+4));
4335 ++ if (err)
4336 ++ break;
4337 ++
4338 ++ if (save == 0x9DE3BFA8U &&
4339 ++ (call & 0xC0000000U) == 0x40000000U &&
4340 ++ nop == 0x01000000U)
4341 ++ {
4342 ++ unsigned int dl_resolve = regs->pc + ((((call | 0xC0000000U) ^ 0x20000000U) + 0x20000000U) << 2);
4343 ++
4344 ++ regs->u_regs[UREG_RETPC] = regs->pc;
4345 ++ regs->pc = dl_resolve;
4346 ++ regs->npc = dl_resolve+4;
4347 ++ return 3;
4348 ++ }
4349 ++ } while (0);
4350 ++#endif
4351 ++
4352 ++ return 1;
4353 ++}
4354 ++
4355 ++void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
4356 ++{
4357 ++ unsigned long i;
4358 ++
4359 ++ printk(KERN_ERR "PAX: bytes at PC: ");
4360 ++ for (i = 0; i < 8; i++) {
4361 ++ unsigned int c;
4362 ++ if (get_user(c, (unsigned int *)pc+i))
4363 ++ printk(KERN_CONT "???????? ");
4364 ++ else
4365 ++ printk(KERN_CONT "%08x ", c);
4366 ++ }
4367 ++ printk("\n");
4368 ++}
4369 ++#endif
4370 ++
4371 + static noinline void do_fault_siginfo(int code, int sig, struct pt_regs *regs,
4372 + int text_fault)
4373 + {
4374 +@@ -281,6 +546,24 @@ good_area:
4375 + if(!(vma->vm_flags & VM_WRITE))
4376 + goto bad_area;
4377 + } else {
4378 ++
4379 ++#ifdef CONFIG_PAX_PAGEEXEC
4380 ++ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && text_fault && !(vma->vm_flags & VM_EXEC)) {
4381 ++ up_read(&mm->mmap_sem);
4382 ++ switch (pax_handle_fetch_fault(regs)) {
4383 ++
4384 ++#ifdef CONFIG_PAX_EMUPLT
4385 ++ case 2:
4386 ++ case 3:
4387 ++ return;
4388 ++#endif
4389 ++
4390 ++ }
4391 ++ pax_report_fault(regs, (void *)regs->pc, (void *)regs->u_regs[UREG_FP]);
4392 ++ do_group_exit(SIGKILL);
4393 ++ }
4394 ++#endif
4395 ++
4396 + /* Allow reads even for write-only mappings */
4397 + if(!(vma->vm_flags & (VM_READ | VM_EXEC)))
4398 + goto bad_area;
4399 +diff -urNp linux-3.1.1/arch/sparc/mm/fault_64.c linux-3.1.1/arch/sparc/mm/fault_64.c
4400 +--- linux-3.1.1/arch/sparc/mm/fault_64.c 2011-11-11 15:19:27.000000000 -0500
4401 ++++ linux-3.1.1/arch/sparc/mm/fault_64.c 2011-11-16 18:40:08.000000000 -0500
4402 +@@ -21,6 +21,9 @@
4403 + #include <linux/kprobes.h>
4404 + #include <linux/kdebug.h>
4405 + #include <linux/percpu.h>
4406 ++#include <linux/slab.h>
4407 ++#include <linux/pagemap.h>
4408 ++#include <linux/compiler.h>
4409 +
4410 + #include <asm/page.h>
4411 + #include <asm/pgtable.h>
4412 +@@ -74,7 +77,7 @@ static void __kprobes bad_kernel_pc(stru
4413 + printk(KERN_CRIT "OOPS: Bogus kernel PC [%016lx] in fault handler\n",
4414 + regs->tpc);
4415 + printk(KERN_CRIT "OOPS: RPC [%016lx]\n", regs->u_regs[15]);
4416 +- printk("OOPS: RPC <%pS>\n", (void *) regs->u_regs[15]);
4417 ++ printk("OOPS: RPC <%pA>\n", (void *) regs->u_regs[15]);
4418 + printk(KERN_CRIT "OOPS: Fault was to vaddr[%lx]\n", vaddr);
4419 + dump_stack();
4420 + unhandled_fault(regs->tpc, current, regs);
4421 +@@ -272,6 +275,457 @@ static void noinline __kprobes bogus_32b
4422 + show_regs(regs);
4423 + }
4424 +
4425 ++#ifdef CONFIG_PAX_PAGEEXEC
4426 ++#ifdef CONFIG_PAX_DLRESOLVE
4427 ++static void pax_emuplt_close(struct vm_area_struct *vma)
4428 ++{
4429 ++ vma->vm_mm->call_dl_resolve = 0UL;
4430 ++}
4431 ++
4432 ++static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
4433 ++{
4434 ++ unsigned int *kaddr;
4435 ++
4436 ++ vmf->page = alloc_page(GFP_HIGHUSER);
4437 ++ if (!vmf->page)
4438 ++ return VM_FAULT_OOM;
4439 ++
4440 ++ kaddr = kmap(vmf->page);
4441 ++ memset(kaddr, 0, PAGE_SIZE);
4442 ++ kaddr[0] = 0x9DE3BFA8U; /* save */
4443 ++ flush_dcache_page(vmf->page);
4444 ++ kunmap(vmf->page);
4445 ++ return VM_FAULT_MAJOR;
4446 ++}
4447 ++
4448 ++static const struct vm_operations_struct pax_vm_ops = {
4449 ++ .close = pax_emuplt_close,
4450 ++ .fault = pax_emuplt_fault
4451 ++};
4452 ++
4453 ++static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
4454 ++{
4455 ++ int ret;
4456 ++
4457 ++ INIT_LIST_HEAD(&vma->anon_vma_chain);
4458 ++ vma->vm_mm = current->mm;
4459 ++ vma->vm_start = addr;
4460 ++ vma->vm_end = addr + PAGE_SIZE;
4461 ++ vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
4462 ++ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
4463 ++ vma->vm_ops = &pax_vm_ops;
4464 ++
4465 ++ ret = insert_vm_struct(current->mm, vma);
4466 ++ if (ret)
4467 ++ return ret;
4468 ++
4469 ++ ++current->mm->total_vm;
4470 ++ return 0;
4471 ++}
4472 ++#endif
4473 ++
4474 ++/*
4475 ++ * PaX: decide what to do with offenders (regs->tpc = fault address)
4476 ++ *
4477 ++ * returns 1 when task should be killed
4478 ++ * 2 when patched PLT trampoline was detected
4479 ++ * 3 when unpatched PLT trampoline was detected
4480 ++ */
4481 ++static int pax_handle_fetch_fault(struct pt_regs *regs)
4482 ++{
4483 ++
4484 ++#ifdef CONFIG_PAX_EMUPLT
4485 ++ int err;
4486 ++
4487 ++ do { /* PaX: patched PLT emulation #1 */
4488 ++ unsigned int sethi1, sethi2, jmpl;
4489 ++
4490 ++ err = get_user(sethi1, (unsigned int *)regs->tpc);
4491 ++ err |= get_user(sethi2, (unsigned int *)(regs->tpc+4));
4492 ++ err |= get_user(jmpl, (unsigned int *)(regs->tpc+8));
4493 ++
4494 ++ if (err)
4495 ++ break;
4496 ++
4497 ++ if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
4498 ++ (sethi2 & 0xFFC00000U) == 0x03000000U &&
4499 ++ (jmpl & 0xFFFFE000U) == 0x81C06000U)
4500 ++ {
4501 ++ unsigned long addr;
4502 ++
4503 ++ regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
4504 ++ addr = regs->u_regs[UREG_G1];
4505 ++ addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
4506 ++
4507 ++ if (test_thread_flag(TIF_32BIT))
4508 ++ addr &= 0xFFFFFFFFUL;
4509 ++
4510 ++ regs->tpc = addr;
4511 ++ regs->tnpc = addr+4;
4512 ++ return 2;
4513 ++ }
4514 ++ } while (0);
4515 ++
4516 ++ { /* PaX: patched PLT emulation #2 */
4517 ++ unsigned int ba;
4518 ++
4519 ++ err = get_user(ba, (unsigned int *)regs->tpc);
4520 ++
4521 ++ if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
4522 ++ unsigned long addr;
4523 ++
4524 ++ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
4525 ++
4526 ++ if (test_thread_flag(TIF_32BIT))
4527 ++ addr &= 0xFFFFFFFFUL;
4528 ++
4529 ++ regs->tpc = addr;
4530 ++ regs->tnpc = addr+4;
4531 ++ return 2;
4532 ++ }
4533 ++ }
4534 ++
4535 ++ do { /* PaX: patched PLT emulation #3 */
4536 ++ unsigned int sethi, jmpl, nop;
4537 ++
4538 ++ err = get_user(sethi, (unsigned int *)regs->tpc);
4539 ++ err |= get_user(jmpl, (unsigned int *)(regs->tpc+4));
4540 ++ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
4541 ++
4542 ++ if (err)
4543 ++ break;
4544 ++
4545 ++ if ((sethi & 0xFFC00000U) == 0x03000000U &&
4546 ++ (jmpl & 0xFFFFE000U) == 0x81C06000U &&
4547 ++ nop == 0x01000000U)
4548 ++ {
4549 ++ unsigned long addr;
4550 ++
4551 ++ addr = (sethi & 0x003FFFFFU) << 10;
4552 ++ regs->u_regs[UREG_G1] = addr;
4553 ++ addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
4554 ++
4555 ++ if (test_thread_flag(TIF_32BIT))
4556 ++ addr &= 0xFFFFFFFFUL;
4557 ++
4558 ++ regs->tpc = addr;
4559 ++ regs->tnpc = addr+4;
4560 ++ return 2;
4561 ++ }
4562 ++ } while (0);
4563 ++
4564 ++ do { /* PaX: patched PLT emulation #4 */
4565 ++ unsigned int sethi, mov1, call, mov2;
4566 ++
4567 ++ err = get_user(sethi, (unsigned int *)regs->tpc);
4568 ++ err |= get_user(mov1, (unsigned int *)(regs->tpc+4));
4569 ++ err |= get_user(call, (unsigned int *)(regs->tpc+8));
4570 ++ err |= get_user(mov2, (unsigned int *)(regs->tpc+12));
4571 ++
4572 ++ if (err)
4573 ++ break;
4574 ++
4575 ++ if ((sethi & 0xFFC00000U) == 0x03000000U &&
4576 ++ mov1 == 0x8210000FU &&
4577 ++ (call & 0xC0000000U) == 0x40000000U &&
4578 ++ mov2 == 0x9E100001U)
4579 ++ {
4580 ++ unsigned long addr;
4581 ++
4582 ++ regs->u_regs[UREG_G1] = regs->u_regs[UREG_RETPC];
4583 ++ addr = regs->tpc + 4 + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
4584 ++
4585 ++ if (test_thread_flag(TIF_32BIT))
4586 ++ addr &= 0xFFFFFFFFUL;
4587 ++
4588 ++ regs->tpc = addr;
4589 ++ regs->tnpc = addr+4;
4590 ++ return 2;
4591 ++ }
4592 ++ } while (0);
4593 ++
4594 ++ do { /* PaX: patched PLT emulation #5 */
4595 ++ unsigned int sethi, sethi1, sethi2, or1, or2, sllx, jmpl, nop;
4596 ++
4597 ++ err = get_user(sethi, (unsigned int *)regs->tpc);
4598 ++ err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
4599 ++ err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
4600 ++ err |= get_user(or1, (unsigned int *)(regs->tpc+12));
4601 ++ err |= get_user(or2, (unsigned int *)(regs->tpc+16));
4602 ++ err |= get_user(sllx, (unsigned int *)(regs->tpc+20));
4603 ++ err |= get_user(jmpl, (unsigned int *)(regs->tpc+24));
4604 ++ err |= get_user(nop, (unsigned int *)(regs->tpc+28));
4605 ++
4606 ++ if (err)
4607 ++ break;
4608 ++
4609 ++ if ((sethi & 0xFFC00000U) == 0x03000000U &&
4610 ++ (sethi1 & 0xFFC00000U) == 0x03000000U &&
4611 ++ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
4612 ++ (or1 & 0xFFFFE000U) == 0x82106000U &&
4613 ++ (or2 & 0xFFFFE000U) == 0x8A116000U &&
4614 ++ sllx == 0x83287020U &&
4615 ++ jmpl == 0x81C04005U &&
4616 ++ nop == 0x01000000U)
4617 ++ {
4618 ++ unsigned long addr;
4619 ++
4620 ++ regs->u_regs[UREG_G1] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
4621 ++ regs->u_regs[UREG_G1] <<= 32;
4622 ++ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
4623 ++ addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
4624 ++ regs->tpc = addr;
4625 ++ regs->tnpc = addr+4;
4626 ++ return 2;
4627 ++ }
4628 ++ } while (0);
4629 ++
4630 ++ do { /* PaX: patched PLT emulation #6 */
4631 ++ unsigned int sethi, sethi1, sethi2, sllx, or, jmpl, nop;
4632 ++
4633 ++ err = get_user(sethi, (unsigned int *)regs->tpc);
4634 ++ err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
4635 ++ err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
4636 ++ err |= get_user(sllx, (unsigned int *)(regs->tpc+12));
4637 ++ err |= get_user(or, (unsigned int *)(regs->tpc+16));
4638 ++ err |= get_user(jmpl, (unsigned int *)(regs->tpc+20));
4639 ++ err |= get_user(nop, (unsigned int *)(regs->tpc+24));
4640 ++
4641 ++ if (err)
4642 ++ break;
4643 ++
4644 ++ if ((sethi & 0xFFC00000U) == 0x03000000U &&
4645 ++ (sethi1 & 0xFFC00000U) == 0x03000000U &&
4646 ++ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
4647 ++ sllx == 0x83287020U &&
4648 ++ (or & 0xFFFFE000U) == 0x8A116000U &&
4649 ++ jmpl == 0x81C04005U &&
4650 ++ nop == 0x01000000U)
4651 ++ {
4652 ++ unsigned long addr;
4653 ++
4654 ++ regs->u_regs[UREG_G1] = (sethi1 & 0x003FFFFFU) << 10;
4655 ++ regs->u_regs[UREG_G1] <<= 32;
4656 ++ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or & 0x3FFU);
4657 ++ addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
4658 ++ regs->tpc = addr;
4659 ++ regs->tnpc = addr+4;
4660 ++ return 2;
4661 ++ }
4662 ++ } while (0);
4663 ++
4664 ++ do { /* PaX: unpatched PLT emulation step 1 */
4665 ++ unsigned int sethi, ba, nop;
4666 ++
4667 ++ err = get_user(sethi, (unsigned int *)regs->tpc);
4668 ++ err |= get_user(ba, (unsigned int *)(regs->tpc+4));
4669 ++ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
4670 ++
4671 ++ if (err)
4672 ++ break;
4673 ++
4674 ++ if ((sethi & 0xFFC00000U) == 0x03000000U &&
4675 ++ ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
4676 ++ nop == 0x01000000U)
4677 ++ {
4678 ++ unsigned long addr;
4679 ++ unsigned int save, call;
4680 ++ unsigned int sethi1, sethi2, or1, or2, sllx, add, jmpl;
4681 ++
4682 ++ if ((ba & 0xFFC00000U) == 0x30800000U)
4683 ++ addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
4684 ++ else
4685 ++ addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
4686 ++
4687 ++ if (test_thread_flag(TIF_32BIT))
4688 ++ addr &= 0xFFFFFFFFUL;
4689 ++
4690 ++ err = get_user(save, (unsigned int *)addr);
4691 ++ err |= get_user(call, (unsigned int *)(addr+4));
4692 ++ err |= get_user(nop, (unsigned int *)(addr+8));
4693 ++ if (err)
4694 ++ break;
4695 ++
4696 ++#ifdef CONFIG_PAX_DLRESOLVE
4697 ++ if (save == 0x9DE3BFA8U &&
4698 ++ (call & 0xC0000000U) == 0x40000000U &&
4699 ++ nop == 0x01000000U)
4700 ++ {
4701 ++ struct vm_area_struct *vma;
4702 ++ unsigned long call_dl_resolve;
4703 ++
4704 ++ down_read(&current->mm->mmap_sem);
4705 ++ call_dl_resolve = current->mm->call_dl_resolve;
4706 ++ up_read(&current->mm->mmap_sem);
4707 ++ if (likely(call_dl_resolve))
4708 ++ goto emulate;
4709 ++
4710 ++ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
4711 ++
4712 ++ down_write(&current->mm->mmap_sem);
4713 ++ if (current->mm->call_dl_resolve) {
4714 ++ call_dl_resolve = current->mm->call_dl_resolve;
4715 ++ up_write(&current->mm->mmap_sem);
4716 ++ if (vma)
4717 ++ kmem_cache_free(vm_area_cachep, vma);
4718 ++ goto emulate;
4719 ++ }
4720 ++
4721 ++ call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
4722 ++ if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
4723 ++ up_write(&current->mm->mmap_sem);
4724 ++ if (vma)
4725 ++ kmem_cache_free(vm_area_cachep, vma);
4726 ++ return 1;
4727 ++ }
4728 ++
4729 ++ if (pax_insert_vma(vma, call_dl_resolve)) {
4730 ++ up_write(&current->mm->mmap_sem);
4731 ++ kmem_cache_free(vm_area_cachep, vma);
4732 ++ return 1;
4733 ++ }
4734 ++
4735 ++ current->mm->call_dl_resolve = call_dl_resolve;
4736 ++ up_write(&current->mm->mmap_sem);
4737 ++
4738 ++emulate:
4739 ++ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
4740 ++ regs->tpc = call_dl_resolve;
4741 ++ regs->tnpc = addr+4;
4742 ++ return 3;
4743 ++ }
4744 ++#endif
4745 ++
4746 ++ /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
4747 ++ if ((save & 0xFFC00000U) == 0x05000000U &&
4748 ++ (call & 0xFFFFE000U) == 0x85C0A000U &&
4749 ++ nop == 0x01000000U)
4750 ++ {
4751 ++ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
4752 ++ regs->u_regs[UREG_G2] = addr + 4;
4753 ++ addr = (save & 0x003FFFFFU) << 10;
4754 ++ addr += (((call | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
4755 ++
4756 ++ if (test_thread_flag(TIF_32BIT))
4757 ++ addr &= 0xFFFFFFFFUL;
4758 ++
4759 ++ regs->tpc = addr;
4760 ++ regs->tnpc = addr+4;
4761 ++ return 3;
4762 ++ }
4763 ++
4764 ++ /* PaX: 64-bit PLT stub */
4765 ++ err = get_user(sethi1, (unsigned int *)addr);
4766 ++ err |= get_user(sethi2, (unsigned int *)(addr+4));
4767 ++ err |= get_user(or1, (unsigned int *)(addr+8));
4768 ++ err |= get_user(or2, (unsigned int *)(addr+12));
4769 ++ err |= get_user(sllx, (unsigned int *)(addr+16));
4770 ++ err |= get_user(add, (unsigned int *)(addr+20));
4771 ++ err |= get_user(jmpl, (unsigned int *)(addr+24));
4772 ++ err |= get_user(nop, (unsigned int *)(addr+28));
4773 ++ if (err)
4774 ++ break;
4775 ++
4776 ++ if ((sethi1 & 0xFFC00000U) == 0x09000000U &&
4777 ++ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
4778 ++ (or1 & 0xFFFFE000U) == 0x88112000U &&
4779 ++ (or2 & 0xFFFFE000U) == 0x8A116000U &&
4780 ++ sllx == 0x89293020U &&
4781 ++ add == 0x8A010005U &&
4782 ++ jmpl == 0x89C14000U &&
4783 ++ nop == 0x01000000U)
4784 ++ {
4785 ++ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
4786 ++ regs->u_regs[UREG_G4] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
4787 ++ regs->u_regs[UREG_G4] <<= 32;
4788 ++ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
4789 ++ regs->u_regs[UREG_G5] += regs->u_regs[UREG_G4];
4790 ++ regs->u_regs[UREG_G4] = addr + 24;
4791 ++ addr = regs->u_regs[UREG_G5];
4792 ++ regs->tpc = addr;
4793 ++ regs->tnpc = addr+4;
4794 ++ return 3;
4795 ++ }
4796 ++ }
4797 ++ } while (0);
4798 ++
4799 ++#ifdef CONFIG_PAX_DLRESOLVE
4800 ++ do { /* PaX: unpatched PLT emulation step 2 */
4801 ++ unsigned int save, call, nop;
4802 ++
4803 ++ err = get_user(save, (unsigned int *)(regs->tpc-4));
4804 ++ err |= get_user(call, (unsigned int *)regs->tpc);
4805 ++ err |= get_user(nop, (unsigned int *)(regs->tpc+4));
4806 ++ if (err)
4807 ++ break;
4808 ++
4809 ++ if (save == 0x9DE3BFA8U &&
4810 ++ (call & 0xC0000000U) == 0x40000000U &&
4811 ++ nop == 0x01000000U)
4812 ++ {
4813 ++ unsigned long dl_resolve = regs->tpc + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
4814 ++
4815 ++ if (test_thread_flag(TIF_32BIT))
4816 ++ dl_resolve &= 0xFFFFFFFFUL;
4817 ++
4818 ++ regs->u_regs[UREG_RETPC] = regs->tpc;
4819 ++ regs->tpc = dl_resolve;
4820 ++ regs->tnpc = dl_resolve+4;
4821 ++ return 3;
4822 ++ }
4823 ++ } while (0);
4824 ++#endif
4825 ++
4826 ++ do { /* PaX: patched PLT emulation #7, must be AFTER the unpatched PLT emulation */
4827 ++ unsigned int sethi, ba, nop;
4828 ++
4829 ++ err = get_user(sethi, (unsigned int *)regs->tpc);
4830 ++ err |= get_user(ba, (unsigned int *)(regs->tpc+4));
4831 ++ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
4832 ++
4833 ++ if (err)
4834 ++ break;
4835 ++
4836 ++ if ((sethi & 0xFFC00000U) == 0x03000000U &&
4837 ++ (ba & 0xFFF00000U) == 0x30600000U &&
4838 ++ nop == 0x01000000U)
4839 ++ {
4840 ++ unsigned long addr;
4841 ++
4842 ++ addr = (sethi & 0x003FFFFFU) << 10;
4843 ++ regs->u_regs[UREG_G1] = addr;
4844 ++ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
4845 ++
4846 ++ if (test_thread_flag(TIF_32BIT))
4847 ++ addr &= 0xFFFFFFFFUL;
4848 ++
4849 ++ regs->tpc = addr;
4850 ++ regs->tnpc = addr+4;
4851 ++ return 2;
4852 ++ }
4853 ++ } while (0);
4854 ++
4855 ++#endif
4856 ++
4857 ++ return 1;
4858 ++}
4859 ++
4860 ++void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
4861 ++{
4862 ++ unsigned long i;
4863 ++
4864 ++ printk(KERN_ERR "PAX: bytes at PC: ");
4865 ++ for (i = 0; i < 8; i++) {
4866 ++ unsigned int c;
4867 ++ if (get_user(c, (unsigned int *)pc+i))
4868 ++ printk(KERN_CONT "???????? ");
4869 ++ else
4870 ++ printk(KERN_CONT "%08x ", c);
4871 ++ }
4872 ++ printk("\n");
4873 ++}
4874 ++#endif
4875 ++
4876 + asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
4877 + {
4878 + struct mm_struct *mm = current->mm;
4879 +@@ -340,6 +794,29 @@ asmlinkage void __kprobes do_sparc64_fau
4880 + if (!vma)
4881 + goto bad_area;
4882 +
4883 ++#ifdef CONFIG_PAX_PAGEEXEC
4884 ++ /* PaX: detect ITLB misses on non-exec pages */
4885 ++ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && vma->vm_start <= address &&
4886 ++ !(vma->vm_flags & VM_EXEC) && (fault_code & FAULT_CODE_ITLB))
4887 ++ {
4888 ++ if (address != regs->tpc)
4889 ++ goto good_area;
4890 ++
4891 ++ up_read(&mm->mmap_sem);
4892 ++ switch (pax_handle_fetch_fault(regs)) {
4893 ++
4894 ++#ifdef CONFIG_PAX_EMUPLT
4895 ++ case 2:
4896 ++ case 3:
4897 ++ return;
4898 ++#endif
4899 ++
4900 ++ }
4901 ++ pax_report_fault(regs, (void *)regs->tpc, (void *)(regs->u_regs[UREG_FP] + STACK_BIAS));
4902 ++ do_group_exit(SIGKILL);
4903 ++ }
4904 ++#endif
4905 ++
4906 + /* Pure DTLB misses do not tell us whether the fault causing
4907 + * load/store/atomic was a write or not, it only says that there
4908 + * was no match. So in such a case we (carefully) read the
4909 +diff -urNp linux-3.1.1/arch/sparc/mm/hugetlbpage.c linux-3.1.1/arch/sparc/mm/hugetlbpage.c
4910 +--- linux-3.1.1/arch/sparc/mm/hugetlbpage.c 2011-11-11 15:19:27.000000000 -0500
4911 ++++ linux-3.1.1/arch/sparc/mm/hugetlbpage.c 2011-11-16 18:39:07.000000000 -0500
4912 +@@ -68,7 +68,7 @@ full_search:
4913 + }
4914 + return -ENOMEM;
4915 + }
4916 +- if (likely(!vma || addr + len <= vma->vm_start)) {
4917 ++ if (likely(check_heap_stack_gap(vma, addr, len))) {
4918 + /*
4919 + * Remember the place where we stopped the search:
4920 + */
4921 +@@ -107,7 +107,7 @@ hugetlb_get_unmapped_area_topdown(struct
4922 + /* make sure it can fit in the remaining address space */
4923 + if (likely(addr > len)) {
4924 + vma = find_vma(mm, addr-len);
4925 +- if (!vma || addr <= vma->vm_start) {
4926 ++ if (check_heap_stack_gap(vma, addr - len, len)) {
4927 + /* remember the address as a hint for next time */
4928 + return (mm->free_area_cache = addr-len);
4929 + }
4930 +@@ -116,16 +116,17 @@ hugetlb_get_unmapped_area_topdown(struct
4931 + if (unlikely(mm->mmap_base < len))
4932 + goto bottomup;
4933 +
4934 +- addr = (mm->mmap_base-len) & HPAGE_MASK;
4935 ++ addr = mm->mmap_base - len;
4936 +
4937 + do {
4938 ++ addr &= HPAGE_MASK;
4939 + /*
4940 + * Lookup failure means no vma is above this address,
4941 + * else if new region fits below vma->vm_start,
4942 + * return with success:
4943 + */
4944 + vma = find_vma(mm, addr);
4945 +- if (likely(!vma || addr+len <= vma->vm_start)) {
4946 ++ if (likely(check_heap_stack_gap(vma, addr, len))) {
4947 + /* remember the address as a hint for next time */
4948 + return (mm->free_area_cache = addr);
4949 + }
4950 +@@ -135,8 +136,8 @@ hugetlb_get_unmapped_area_topdown(struct
4951 + mm->cached_hole_size = vma->vm_start - addr;
4952 +
4953 + /* try just below the current vma->vm_start */
4954 +- addr = (vma->vm_start-len) & HPAGE_MASK;
4955 +- } while (likely(len < vma->vm_start));
4956 ++ addr = skip_heap_stack_gap(vma, len);
4957 ++ } while (!IS_ERR_VALUE(addr));
4958 +
4959 + bottomup:
4960 + /*
4961 +@@ -182,8 +183,7 @@ hugetlb_get_unmapped_area(struct file *f
4962 + if (addr) {
4963 + addr = ALIGN(addr, HPAGE_SIZE);
4964 + vma = find_vma(mm, addr);
4965 +- if (task_size - len >= addr &&
4966 +- (!vma || addr + len <= vma->vm_start))
4967 ++ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
4968 + return addr;
4969 + }
4970 + if (mm->get_unmapped_area == arch_get_unmapped_area)
4971 +diff -urNp linux-3.1.1/arch/sparc/mm/init_32.c linux-3.1.1/arch/sparc/mm/init_32.c
4972 +--- linux-3.1.1/arch/sparc/mm/init_32.c 2011-11-11 15:19:27.000000000 -0500
4973 ++++ linux-3.1.1/arch/sparc/mm/init_32.c 2011-11-16 18:39:07.000000000 -0500
4974 +@@ -316,6 +316,9 @@ extern void device_scan(void);
4975 + pgprot_t PAGE_SHARED __read_mostly;
4976 + EXPORT_SYMBOL(PAGE_SHARED);
4977 +
4978 ++pgprot_t PAGE_SHARED_NOEXEC __read_mostly;
4979 ++EXPORT_SYMBOL(PAGE_SHARED_NOEXEC);
4980 ++
4981 + void __init paging_init(void)
4982 + {
4983 + switch(sparc_cpu_model) {
4984 +@@ -344,17 +347,17 @@ void __init paging_init(void)
4985 +
4986 + /* Initialize the protection map with non-constant, MMU dependent values. */
4987 + protection_map[0] = PAGE_NONE;
4988 +- protection_map[1] = PAGE_READONLY;
4989 +- protection_map[2] = PAGE_COPY;
4990 +- protection_map[3] = PAGE_COPY;
4991 ++ protection_map[1] = PAGE_READONLY_NOEXEC;
4992 ++ protection_map[2] = PAGE_COPY_NOEXEC;
4993 ++ protection_map[3] = PAGE_COPY_NOEXEC;
4994 + protection_map[4] = PAGE_READONLY;
4995 + protection_map[5] = PAGE_READONLY;
4996 + protection_map[6] = PAGE_COPY;
4997 + protection_map[7] = PAGE_COPY;
4998 + protection_map[8] = PAGE_NONE;
4999 +- protection_map[9] = PAGE_READONLY;
5000 +- protection_map[10] = PAGE_SHARED;
5001 +- protection_map[11] = PAGE_SHARED;
5002 ++ protection_map[9] = PAGE_READONLY_NOEXEC;
5003 ++ protection_map[10] = PAGE_SHARED_NOEXEC;
5004 ++ protection_map[11] = PAGE_SHARED_NOEXEC;
5005 + protection_map[12] = PAGE_READONLY;
5006 + protection_map[13] = PAGE_READONLY;
5007 + protection_map[14] = PAGE_SHARED;
5008 +diff -urNp linux-3.1.1/arch/sparc/mm/Makefile linux-3.1.1/arch/sparc/mm/Makefile
5009 +--- linux-3.1.1/arch/sparc/mm/Makefile 2011-11-11 15:19:27.000000000 -0500
5010 ++++ linux-3.1.1/arch/sparc/mm/Makefile 2011-11-16 18:39:07.000000000 -0500
5011 +@@ -2,7 +2,7 @@
5012 + #
5013 +
5014 + asflags-y := -ansi
5015 +-ccflags-y := -Werror
5016 ++#ccflags-y := -Werror
5017 +
5018 + obj-$(CONFIG_SPARC64) += ultra.o tlb.o tsb.o gup.o
5019 + obj-y += fault_$(BITS).o
5020 +diff -urNp linux-3.1.1/arch/sparc/mm/srmmu.c linux-3.1.1/arch/sparc/mm/srmmu.c
5021 +--- linux-3.1.1/arch/sparc/mm/srmmu.c 2011-11-11 15:19:27.000000000 -0500
5022 ++++ linux-3.1.1/arch/sparc/mm/srmmu.c 2011-11-16 18:39:07.000000000 -0500
5023 +@@ -2200,6 +2200,13 @@ void __init ld_mmu_srmmu(void)
5024 + PAGE_SHARED = pgprot_val(SRMMU_PAGE_SHARED);
5025 + BTFIXUPSET_INT(page_copy, pgprot_val(SRMMU_PAGE_COPY));
5026 + BTFIXUPSET_INT(page_readonly, pgprot_val(SRMMU_PAGE_RDONLY));
5027 ++
5028 ++#ifdef CONFIG_PAX_PAGEEXEC
5029 ++ PAGE_SHARED_NOEXEC = pgprot_val(SRMMU_PAGE_SHARED_NOEXEC);
5030 ++ BTFIXUPSET_INT(page_copy_noexec, pgprot_val(SRMMU_PAGE_COPY_NOEXEC));
5031 ++ BTFIXUPSET_INT(page_readonly_noexec, pgprot_val(SRMMU_PAGE_RDONLY_NOEXEC));
5032 ++#endif
5033 ++
5034 + BTFIXUPSET_INT(page_kernel, pgprot_val(SRMMU_PAGE_KERNEL));
5035 + page_kernel = pgprot_val(SRMMU_PAGE_KERNEL);
5036 +
5037 +diff -urNp linux-3.1.1/arch/um/include/asm/kmap_types.h linux-3.1.1/arch/um/include/asm/kmap_types.h
5038 +--- linux-3.1.1/arch/um/include/asm/kmap_types.h 2011-11-11 15:19:27.000000000 -0500
5039 ++++ linux-3.1.1/arch/um/include/asm/kmap_types.h 2011-11-16 18:39:07.000000000 -0500
5040 +@@ -23,6 +23,7 @@ enum km_type {
5041 + KM_IRQ1,
5042 + KM_SOFTIRQ0,
5043 + KM_SOFTIRQ1,
5044 ++ KM_CLEARPAGE,
5045 + KM_TYPE_NR
5046 + };
5047 +
5048 +diff -urNp linux-3.1.1/arch/um/include/asm/page.h linux-3.1.1/arch/um/include/asm/page.h
5049 +--- linux-3.1.1/arch/um/include/asm/page.h 2011-11-11 15:19:27.000000000 -0500
5050 ++++ linux-3.1.1/arch/um/include/asm/page.h 2011-11-16 18:39:07.000000000 -0500
5051 +@@ -14,6 +14,9 @@
5052 + #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
5053 + #define PAGE_MASK (~(PAGE_SIZE-1))
5054 +
5055 ++#define ktla_ktva(addr) (addr)
5056 ++#define ktva_ktla(addr) (addr)
5057 ++
5058 + #ifndef __ASSEMBLY__
5059 +
5060 + struct page;
5061 +diff -urNp linux-3.1.1/arch/um/kernel/process.c linux-3.1.1/arch/um/kernel/process.c
5062 +--- linux-3.1.1/arch/um/kernel/process.c 2011-11-11 15:19:27.000000000 -0500
5063 ++++ linux-3.1.1/arch/um/kernel/process.c 2011-11-16 18:39:07.000000000 -0500
5064 +@@ -404,22 +404,6 @@ int singlestepping(void * t)
5065 + return 2;
5066 + }
5067 +
5068 +-/*
5069 +- * Only x86 and x86_64 have an arch_align_stack().
5070 +- * All other arches have "#define arch_align_stack(x) (x)"
5071 +- * in their asm/system.h
5072 +- * As this is included in UML from asm-um/system-generic.h,
5073 +- * we can use it to behave as the subarch does.
5074 +- */
5075 +-#ifndef arch_align_stack
5076 +-unsigned long arch_align_stack(unsigned long sp)
5077 +-{
5078 +- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
5079 +- sp -= get_random_int() % 8192;
5080 +- return sp & ~0xf;
5081 +-}
5082 +-#endif
5083 +-
5084 + unsigned long get_wchan(struct task_struct *p)
5085 + {
5086 + unsigned long stack_page, sp, ip;
5087 +diff -urNp linux-3.1.1/arch/um/Makefile linux-3.1.1/arch/um/Makefile
5088 +--- linux-3.1.1/arch/um/Makefile 2011-11-11 15:19:27.000000000 -0500
5089 ++++ linux-3.1.1/arch/um/Makefile 2011-11-16 18:39:07.000000000 -0500
5090 +@@ -49,6 +49,10 @@ USER_CFLAGS = $(patsubst $(KERNEL_DEFINE
5091 + $(patsubst -I%,,$(KBUILD_CFLAGS)))) $(ARCH_INCLUDE) $(MODE_INCLUDE) \
5092 + $(filter -I%,$(CFLAGS)) -D_FILE_OFFSET_BITS=64
5093 +
5094 ++ifdef CONSTIFY_PLUGIN
5095 ++USER_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
5096 ++endif
5097 ++
5098 + include $(srctree)/$(ARCH_DIR)/Makefile-$(SUBARCH)
5099 +
5100 + #This will adjust *FLAGS accordingly to the platform.
5101 +diff -urNp linux-3.1.1/arch/um/sys-i386/shared/sysdep/system.h linux-3.1.1/arch/um/sys-i386/shared/sysdep/system.h
5102 +--- linux-3.1.1/arch/um/sys-i386/shared/sysdep/system.h 2011-11-11 15:19:27.000000000 -0500
5103 ++++ linux-3.1.1/arch/um/sys-i386/shared/sysdep/system.h 2011-11-16 18:39:07.000000000 -0500
5104 +@@ -17,7 +17,7 @@
5105 + # define AT_VECTOR_SIZE_ARCH 1
5106 + #endif
5107 +
5108 +-extern unsigned long arch_align_stack(unsigned long sp);
5109 ++#define arch_align_stack(x) ((x) & ~0xfUL)
5110 +
5111 + void default_idle(void);
5112 +
5113 +diff -urNp linux-3.1.1/arch/um/sys-i386/syscalls.c linux-3.1.1/arch/um/sys-i386/syscalls.c
5114 +--- linux-3.1.1/arch/um/sys-i386/syscalls.c 2011-11-11 15:19:27.000000000 -0500
5115 ++++ linux-3.1.1/arch/um/sys-i386/syscalls.c 2011-11-16 18:39:07.000000000 -0500
5116 +@@ -11,6 +11,21 @@
5117 + #include "asm/uaccess.h"
5118 + #include "asm/unistd.h"
5119 +
5120 ++int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
5121 ++{
5122 ++ unsigned long pax_task_size = TASK_SIZE;
5123 ++
5124 ++#ifdef CONFIG_PAX_SEGMEXEC
5125 ++ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
5126 ++ pax_task_size = SEGMEXEC_TASK_SIZE;
5127 ++#endif
5128 ++
5129 ++ if (len > pax_task_size || addr > pax_task_size - len)
5130 ++ return -EINVAL;
5131 ++
5132 ++ return 0;
5133 ++}
5134 ++
5135 + /*
5136 + * The prototype on i386 is:
5137 + *
5138 +diff -urNp linux-3.1.1/arch/um/sys-x86_64/shared/sysdep/system.h linux-3.1.1/arch/um/sys-x86_64/shared/sysdep/system.h
5139 +--- linux-3.1.1/arch/um/sys-x86_64/shared/sysdep/system.h 2011-11-11 15:19:27.000000000 -0500
5140 ++++ linux-3.1.1/arch/um/sys-x86_64/shared/sysdep/system.h 2011-11-16 18:39:07.000000000 -0500
5141 +@@ -17,7 +17,7 @@
5142 + # define AT_VECTOR_SIZE_ARCH 1
5143 + #endif
5144 +
5145 +-extern unsigned long arch_align_stack(unsigned long sp);
5146 ++#define arch_align_stack(x) ((x) & ~0xfUL)
5147 +
5148 + void default_idle(void);
5149 +
5150 +diff -urNp linux-3.1.1/arch/x86/boot/bitops.h linux-3.1.1/arch/x86/boot/bitops.h
5151 +--- linux-3.1.1/arch/x86/boot/bitops.h 2011-11-11 15:19:27.000000000 -0500
5152 ++++ linux-3.1.1/arch/x86/boot/bitops.h 2011-11-16 18:39:07.000000000 -0500
5153 +@@ -26,7 +26,7 @@ static inline int variable_test_bit(int
5154 + u8 v;
5155 + const u32 *p = (const u32 *)addr;
5156 +
5157 +- asm("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
5158 ++ asm volatile("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
5159 + return v;
5160 + }
5161 +
5162 +@@ -37,7 +37,7 @@ static inline int variable_test_bit(int
5163 +
5164 + static inline void set_bit(int nr, void *addr)
5165 + {
5166 +- asm("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
5167 ++ asm volatile("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
5168 + }
5169 +
5170 + #endif /* BOOT_BITOPS_H */
5171 +diff -urNp linux-3.1.1/arch/x86/boot/boot.h linux-3.1.1/arch/x86/boot/boot.h
5172 +--- linux-3.1.1/arch/x86/boot/boot.h 2011-11-11 15:19:27.000000000 -0500
5173 ++++ linux-3.1.1/arch/x86/boot/boot.h 2011-11-16 18:39:07.000000000 -0500
5174 +@@ -85,7 +85,7 @@ static inline void io_delay(void)
5175 + static inline u16 ds(void)
5176 + {
5177 + u16 seg;
5178 +- asm("movw %%ds,%0" : "=rm" (seg));
5179 ++ asm volatile("movw %%ds,%0" : "=rm" (seg));
5180 + return seg;
5181 + }
5182 +
5183 +@@ -181,7 +181,7 @@ static inline void wrgs32(u32 v, addr_t
5184 + static inline int memcmp(const void *s1, const void *s2, size_t len)
5185 + {
5186 + u8 diff;
5187 +- asm("repe; cmpsb; setnz %0"
5188 ++ asm volatile("repe; cmpsb; setnz %0"
5189 + : "=qm" (diff), "+D" (s1), "+S" (s2), "+c" (len));
5190 + return diff;
5191 + }
5192 +diff -urNp linux-3.1.1/arch/x86/boot/compressed/head_32.S linux-3.1.1/arch/x86/boot/compressed/head_32.S
5193 +--- linux-3.1.1/arch/x86/boot/compressed/head_32.S 2011-11-11 15:19:27.000000000 -0500
5194 ++++ linux-3.1.1/arch/x86/boot/compressed/head_32.S 2011-11-16 18:39:07.000000000 -0500
5195 +@@ -76,7 +76,7 @@ ENTRY(startup_32)
5196 + notl %eax
5197 + andl %eax, %ebx
5198 + #else
5199 +- movl $LOAD_PHYSICAL_ADDR, %ebx
5200 ++ movl $____LOAD_PHYSICAL_ADDR, %ebx
5201 + #endif
5202 +
5203 + /* Target address to relocate to for decompression */
5204 +@@ -162,7 +162,7 @@ relocated:
5205 + * and where it was actually loaded.
5206 + */
5207 + movl %ebp, %ebx
5208 +- subl $LOAD_PHYSICAL_ADDR, %ebx
5209 ++ subl $____LOAD_PHYSICAL_ADDR, %ebx
5210 + jz 2f /* Nothing to be done if loaded at compiled addr. */
5211 + /*
5212 + * Process relocations.
5213 +@@ -170,8 +170,7 @@ relocated:
5214 +
5215 + 1: subl $4, %edi
5216 + movl (%edi), %ecx
5217 +- testl %ecx, %ecx
5218 +- jz 2f
5219 ++ jecxz 2f
5220 + addl %ebx, -__PAGE_OFFSET(%ebx, %ecx)
5221 + jmp 1b
5222 + 2:
5223 +diff -urNp linux-3.1.1/arch/x86/boot/compressed/head_64.S linux-3.1.1/arch/x86/boot/compressed/head_64.S
5224 +--- linux-3.1.1/arch/x86/boot/compressed/head_64.S 2011-11-11 15:19:27.000000000 -0500
5225 ++++ linux-3.1.1/arch/x86/boot/compressed/head_64.S 2011-11-16 18:39:07.000000000 -0500
5226 +@@ -91,7 +91,7 @@ ENTRY(startup_32)
5227 + notl %eax
5228 + andl %eax, %ebx
5229 + #else
5230 +- movl $LOAD_PHYSICAL_ADDR, %ebx
5231 ++ movl $____LOAD_PHYSICAL_ADDR, %ebx
5232 + #endif
5233 +
5234 + /* Target address to relocate to for decompression */
5235 +@@ -233,7 +233,7 @@ ENTRY(startup_64)
5236 + notq %rax
5237 + andq %rax, %rbp
5238 + #else
5239 +- movq $LOAD_PHYSICAL_ADDR, %rbp
5240 ++ movq $____LOAD_PHYSICAL_ADDR, %rbp
5241 + #endif
5242 +
5243 + /* Target address to relocate to for decompression */
5244 +diff -urNp linux-3.1.1/arch/x86/boot/compressed/Makefile linux-3.1.1/arch/x86/boot/compressed/Makefile
5245 +--- linux-3.1.1/arch/x86/boot/compressed/Makefile 2011-11-11 15:19:27.000000000 -0500
5246 ++++ linux-3.1.1/arch/x86/boot/compressed/Makefile 2011-11-16 18:39:07.000000000 -0500
5247 +@@ -14,6 +14,9 @@ cflags-$(CONFIG_X86_64) := -mcmodel=smal
5248 + KBUILD_CFLAGS += $(cflags-y)
5249 + KBUILD_CFLAGS += $(call cc-option,-ffreestanding)
5250 + KBUILD_CFLAGS += $(call cc-option,-fno-stack-protector)
5251 ++ifdef CONSTIFY_PLUGIN
5252 ++KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
5253 ++endif
5254 +
5255 + KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
5256 + GCOV_PROFILE := n
5257 +diff -urNp linux-3.1.1/arch/x86/boot/compressed/misc.c linux-3.1.1/arch/x86/boot/compressed/misc.c
5258 +--- linux-3.1.1/arch/x86/boot/compressed/misc.c 2011-11-11 15:19:27.000000000 -0500
5259 ++++ linux-3.1.1/arch/x86/boot/compressed/misc.c 2011-11-16 18:39:07.000000000 -0500
5260 +@@ -310,7 +310,7 @@ static void parse_elf(void *output)
5261 + case PT_LOAD:
5262 + #ifdef CONFIG_RELOCATABLE
5263 + dest = output;
5264 +- dest += (phdr->p_paddr - LOAD_PHYSICAL_ADDR);
5265 ++ dest += (phdr->p_paddr - ____LOAD_PHYSICAL_ADDR);
5266 + #else
5267 + dest = (void *)(phdr->p_paddr);
5268 + #endif
5269 +@@ -363,7 +363,7 @@ asmlinkage void decompress_kernel(void *
5270 + error("Destination address too large");
5271 + #endif
5272 + #ifndef CONFIG_RELOCATABLE
5273 +- if ((unsigned long)output != LOAD_PHYSICAL_ADDR)
5274 ++ if ((unsigned long)output != ____LOAD_PHYSICAL_ADDR)
5275 + error("Wrong destination address");
5276 + #endif
5277 +
5278 +diff -urNp linux-3.1.1/arch/x86/boot/compressed/relocs.c linux-3.1.1/arch/x86/boot/compressed/relocs.c
5279 +--- linux-3.1.1/arch/x86/boot/compressed/relocs.c 2011-11-11 15:19:27.000000000 -0500
5280 ++++ linux-3.1.1/arch/x86/boot/compressed/relocs.c 2011-11-16 18:39:07.000000000 -0500
5281 +@@ -13,8 +13,11 @@
5282 +
5283 + static void die(char *fmt, ...);
5284 +
5285 ++#include "../../../../include/generated/autoconf.h"
5286 ++
5287 + #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
5288 + static Elf32_Ehdr ehdr;
5289 ++static Elf32_Phdr *phdr;
5290 + static unsigned long reloc_count, reloc_idx;
5291 + static unsigned long *relocs;
5292 +
5293 +@@ -270,9 +273,39 @@ static void read_ehdr(FILE *fp)
5294 + }
5295 + }
5296 +
5297 ++static void read_phdrs(FILE *fp)
5298 ++{
5299 ++ unsigned int i;
5300 ++
5301 ++ phdr = calloc(ehdr.e_phnum, sizeof(Elf32_Phdr));
5302 ++ if (!phdr) {
5303 ++ die("Unable to allocate %d program headers\n",
5304 ++ ehdr.e_phnum);
5305 ++ }
5306 ++ if (fseek(fp, ehdr.e_phoff, SEEK_SET) < 0) {
5307 ++ die("Seek to %d failed: %s\n",
5308 ++ ehdr.e_phoff, strerror(errno));
5309 ++ }
5310 ++ if (fread(phdr, sizeof(*phdr), ehdr.e_phnum, fp) != ehdr.e_phnum) {
5311 ++ die("Cannot read ELF program headers: %s\n",
5312 ++ strerror(errno));
5313 ++ }
5314 ++ for(i = 0; i < ehdr.e_phnum; i++) {
5315 ++ phdr[i].p_type = elf32_to_cpu(phdr[i].p_type);
5316 ++ phdr[i].p_offset = elf32_to_cpu(phdr[i].p_offset);
5317 ++ phdr[i].p_vaddr = elf32_to_cpu(phdr[i].p_vaddr);
5318 ++ phdr[i].p_paddr = elf32_to_cpu(phdr[i].p_paddr);
5319 ++ phdr[i].p_filesz = elf32_to_cpu(phdr[i].p_filesz);
5320 ++ phdr[i].p_memsz = elf32_to_cpu(phdr[i].p_memsz);
5321 ++ phdr[i].p_flags = elf32_to_cpu(phdr[i].p_flags);
5322 ++ phdr[i].p_align = elf32_to_cpu(phdr[i].p_align);
5323 ++ }
5324 ++
5325 ++}
5326 ++
5327 + static void read_shdrs(FILE *fp)
5328 + {
5329 +- int i;
5330 ++ unsigned int i;
5331 + Elf32_Shdr shdr;
5332 +
5333 + secs = calloc(ehdr.e_shnum, sizeof(struct section));
5334 +@@ -307,7 +340,7 @@ static void read_shdrs(FILE *fp)
5335 +
5336 + static void read_strtabs(FILE *fp)
5337 + {
5338 +- int i;
5339 ++ unsigned int i;
5340 + for (i = 0; i < ehdr.e_shnum; i++) {
5341 + struct section *sec = &secs[i];
5342 + if (sec->shdr.sh_type != SHT_STRTAB) {
5343 +@@ -332,7 +365,7 @@ static void read_strtabs(FILE *fp)
5344 +
5345 + static void read_symtabs(FILE *fp)
5346 + {
5347 +- int i,j;
5348 ++ unsigned int i,j;
5349 + for (i = 0; i < ehdr.e_shnum; i++) {
5350 + struct section *sec = &secs[i];
5351 + if (sec->shdr.sh_type != SHT_SYMTAB) {
5352 +@@ -365,7 +398,9 @@ static void read_symtabs(FILE *fp)
5353 +
5354 + static void read_relocs(FILE *fp)
5355 + {
5356 +- int i,j;
5357 ++ unsigned int i,j;
5358 ++ uint32_t base;
5359 ++
5360 + for (i = 0; i < ehdr.e_shnum; i++) {
5361 + struct section *sec = &secs[i];
5362 + if (sec->shdr.sh_type != SHT_REL) {
5363 +@@ -385,9 +420,18 @@ static void read_relocs(FILE *fp)
5364 + die("Cannot read symbol table: %s\n",
5365 + strerror(errno));
5366 + }
5367 ++ base = 0;
5368 ++ for (j = 0; j < ehdr.e_phnum; j++) {
5369 ++ if (phdr[j].p_type != PT_LOAD )
5370 ++ continue;
5371 ++ if (secs[sec->shdr.sh_info].shdr.sh_offset < phdr[j].p_offset || secs[sec->shdr.sh_info].shdr.sh_offset >= phdr[j].p_offset + phdr[j].p_filesz)
5372 ++ continue;
5373 ++ base = CONFIG_PAGE_OFFSET + phdr[j].p_paddr - phdr[j].p_vaddr;
5374 ++ break;
5375 ++ }
5376 + for (j = 0; j < sec->shdr.sh_size/sizeof(Elf32_Rel); j++) {
5377 + Elf32_Rel *rel = &sec->reltab[j];
5378 +- rel->r_offset = elf32_to_cpu(rel->r_offset);
5379 ++ rel->r_offset = elf32_to_cpu(rel->r_offset) + base;
5380 + rel->r_info = elf32_to_cpu(rel->r_info);
5381 + }
5382 + }
5383 +@@ -396,14 +440,14 @@ static void read_relocs(FILE *fp)
5384 +
5385 + static void print_absolute_symbols(void)
5386 + {
5387 +- int i;
5388 ++ unsigned int i;
5389 + printf("Absolute symbols\n");
5390 + printf(" Num: Value Size Type Bind Visibility Name\n");
5391 + for (i = 0; i < ehdr.e_shnum; i++) {
5392 + struct section *sec = &secs[i];
5393 + char *sym_strtab;
5394 + Elf32_Sym *sh_symtab;
5395 +- int j;
5396 ++ unsigned int j;
5397 +
5398 + if (sec->shdr.sh_type != SHT_SYMTAB) {
5399 + continue;
5400 +@@ -431,14 +475,14 @@ static void print_absolute_symbols(void)
5401 +
5402 + static void print_absolute_relocs(void)
5403 + {
5404 +- int i, printed = 0;
5405 ++ unsigned int i, printed = 0;
5406 +
5407 + for (i = 0; i < ehdr.e_shnum; i++) {
5408 + struct section *sec = &secs[i];
5409 + struct section *sec_applies, *sec_symtab;
5410 + char *sym_strtab;
5411 + Elf32_Sym *sh_symtab;
5412 +- int j;
5413 ++ unsigned int j;
5414 + if (sec->shdr.sh_type != SHT_REL) {
5415 + continue;
5416 + }
5417 +@@ -499,13 +543,13 @@ static void print_absolute_relocs(void)
5418 +
5419 + static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym))
5420 + {
5421 +- int i;
5422 ++ unsigned int i;
5423 + /* Walk through the relocations */
5424 + for (i = 0; i < ehdr.e_shnum; i++) {
5425 + char *sym_strtab;
5426 + Elf32_Sym *sh_symtab;
5427 + struct section *sec_applies, *sec_symtab;
5428 +- int j;
5429 ++ unsigned int j;
5430 + struct section *sec = &secs[i];
5431 +
5432 + if (sec->shdr.sh_type != SHT_REL) {
5433 +@@ -530,6 +574,22 @@ static void walk_relocs(void (*visit)(El
5434 + !is_rel_reloc(sym_name(sym_strtab, sym))) {
5435 + continue;
5436 + }
5437 ++ /* Don't relocate actual per-cpu variables, they are absolute indices, not addresses */
5438 ++ if (!strcmp(sec_name(sym->st_shndx), ".data..percpu") && strcmp(sym_name(sym_strtab, sym), "__per_cpu_load"))
5439 ++ continue;
5440 ++
5441 ++#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_X86_32)
5442 ++ /* Don't relocate actual code, they are relocated implicitly by the base address of KERNEL_CS */
5443 ++ if (!strcmp(sec_name(sym->st_shndx), ".module.text") && !strcmp(sym_name(sym_strtab, sym), "_etext"))
5444 ++ continue;
5445 ++ if (!strcmp(sec_name(sym->st_shndx), ".init.text"))
5446 ++ continue;
5447 ++ if (!strcmp(sec_name(sym->st_shndx), ".exit.text"))
5448 ++ continue;
5449 ++ if (!strcmp(sec_name(sym->st_shndx), ".text") && strcmp(sym_name(sym_strtab, sym), "__LOAD_PHYSICAL_ADDR"))
5450 ++ continue;
5451 ++#endif
5452 ++
5453 + switch (r_type) {
5454 + case R_386_NONE:
5455 + case R_386_PC32:
5456 +@@ -571,7 +631,7 @@ static int cmp_relocs(const void *va, co
5457 +
5458 + static void emit_relocs(int as_text)
5459 + {
5460 +- int i;
5461 ++ unsigned int i;
5462 + /* Count how many relocations I have and allocate space for them. */
5463 + reloc_count = 0;
5464 + walk_relocs(count_reloc);
5465 +@@ -665,6 +725,7 @@ int main(int argc, char **argv)
5466 + fname, strerror(errno));
5467 + }
5468 + read_ehdr(fp);
5469 ++ read_phdrs(fp);
5470 + read_shdrs(fp);
5471 + read_strtabs(fp);
5472 + read_symtabs(fp);
5473 +diff -urNp linux-3.1.1/arch/x86/boot/cpucheck.c linux-3.1.1/arch/x86/boot/cpucheck.c
5474 +--- linux-3.1.1/arch/x86/boot/cpucheck.c 2011-11-11 15:19:27.000000000 -0500
5475 ++++ linux-3.1.1/arch/x86/boot/cpucheck.c 2011-11-16 18:39:07.000000000 -0500
5476 +@@ -74,7 +74,7 @@ static int has_fpu(void)
5477 + u16 fcw = -1, fsw = -1;
5478 + u32 cr0;
5479 +
5480 +- asm("movl %%cr0,%0" : "=r" (cr0));
5481 ++ asm volatile("movl %%cr0,%0" : "=r" (cr0));
5482 + if (cr0 & (X86_CR0_EM|X86_CR0_TS)) {
5483 + cr0 &= ~(X86_CR0_EM|X86_CR0_TS);
5484 + asm volatile("movl %0,%%cr0" : : "r" (cr0));
5485 +@@ -90,7 +90,7 @@ static int has_eflag(u32 mask)
5486 + {
5487 + u32 f0, f1;
5488 +
5489 +- asm("pushfl ; "
5490 ++ asm volatile("pushfl ; "
5491 + "pushfl ; "
5492 + "popl %0 ; "
5493 + "movl %0,%1 ; "
5494 +@@ -115,7 +115,7 @@ static void get_flags(void)
5495 + set_bit(X86_FEATURE_FPU, cpu.flags);
5496 +
5497 + if (has_eflag(X86_EFLAGS_ID)) {
5498 +- asm("cpuid"
5499 ++ asm volatile("cpuid"
5500 + : "=a" (max_intel_level),
5501 + "=b" (cpu_vendor[0]),
5502 + "=d" (cpu_vendor[1]),
5503 +@@ -124,7 +124,7 @@ static void get_flags(void)
5504 +
5505 + if (max_intel_level >= 0x00000001 &&
5506 + max_intel_level <= 0x0000ffff) {
5507 +- asm("cpuid"
5508 ++ asm volatile("cpuid"
5509 + : "=a" (tfms),
5510 + "=c" (cpu.flags[4]),
5511 + "=d" (cpu.flags[0])
5512 +@@ -136,7 +136,7 @@ static void get_flags(void)
5513 + cpu.model += ((tfms >> 16) & 0xf) << 4;
5514 + }
5515 +
5516 +- asm("cpuid"
5517 ++ asm volatile("cpuid"
5518 + : "=a" (max_amd_level)
5519 + : "a" (0x80000000)
5520 + : "ebx", "ecx", "edx");
5521 +@@ -144,7 +144,7 @@ static void get_flags(void)
5522 + if (max_amd_level >= 0x80000001 &&
5523 + max_amd_level <= 0x8000ffff) {
5524 + u32 eax = 0x80000001;
5525 +- asm("cpuid"
5526 ++ asm volatile("cpuid"
5527 + : "+a" (eax),
5528 + "=c" (cpu.flags[6]),
5529 + "=d" (cpu.flags[1])
5530 +@@ -203,9 +203,9 @@ int check_cpu(int *cpu_level_ptr, int *r
5531 + u32 ecx = MSR_K7_HWCR;
5532 + u32 eax, edx;
5533 +
5534 +- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
5535 ++ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
5536 + eax &= ~(1 << 15);
5537 +- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
5538 ++ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
5539 +
5540 + get_flags(); /* Make sure it really did something */
5541 + err = check_flags();
5542 +@@ -218,9 +218,9 @@ int check_cpu(int *cpu_level_ptr, int *r
5543 + u32 ecx = MSR_VIA_FCR;
5544 + u32 eax, edx;
5545 +
5546 +- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
5547 ++ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
5548 + eax |= (1<<1)|(1<<7);
5549 +- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
5550 ++ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
5551 +
5552 + set_bit(X86_FEATURE_CX8, cpu.flags);
5553 + err = check_flags();
5554 +@@ -231,12 +231,12 @@ int check_cpu(int *cpu_level_ptr, int *r
5555 + u32 eax, edx;
5556 + u32 level = 1;
5557 +
5558 +- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
5559 +- asm("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
5560 +- asm("cpuid"
5561 ++ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
5562 ++ asm volatile("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
5563 ++ asm volatile("cpuid"
5564 + : "+a" (level), "=d" (cpu.flags[0])
5565 + : : "ecx", "ebx");
5566 +- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
5567 ++ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
5568 +
5569 + err = check_flags();
5570 + }
5571 +diff -urNp linux-3.1.1/arch/x86/boot/header.S linux-3.1.1/arch/x86/boot/header.S
5572 +--- linux-3.1.1/arch/x86/boot/header.S 2011-11-11 15:19:27.000000000 -0500
5573 ++++ linux-3.1.1/arch/x86/boot/header.S 2011-11-16 18:39:07.000000000 -0500
5574 +@@ -224,7 +224,7 @@ setup_data: .quad 0 # 64-bit physical
5575 + # single linked list of
5576 + # struct setup_data
5577 +
5578 +-pref_address: .quad LOAD_PHYSICAL_ADDR # preferred load addr
5579 ++pref_address: .quad ____LOAD_PHYSICAL_ADDR # preferred load addr
5580 +
5581 + #define ZO_INIT_SIZE (ZO__end - ZO_startup_32 + ZO_z_extract_offset)
5582 + #define VO_INIT_SIZE (VO__end - VO__text)
5583 +diff -urNp linux-3.1.1/arch/x86/boot/Makefile linux-3.1.1/arch/x86/boot/Makefile
5584 +--- linux-3.1.1/arch/x86/boot/Makefile 2011-11-11 15:19:27.000000000 -0500
5585 ++++ linux-3.1.1/arch/x86/boot/Makefile 2011-11-16 18:39:07.000000000 -0500
5586 +@@ -63,6 +63,9 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -g -Os
5587 + $(call cc-option, -fno-stack-protector) \
5588 + $(call cc-option, -mpreferred-stack-boundary=2)
5589 + KBUILD_CFLAGS += $(call cc-option, -m32)
5590 ++ifdef CONSTIFY_PLUGIN
5591 ++KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
5592 ++endif
5593 + KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
5594 + GCOV_PROFILE := n
5595 +
5596 +diff -urNp linux-3.1.1/arch/x86/boot/memory.c linux-3.1.1/arch/x86/boot/memory.c
5597 +--- linux-3.1.1/arch/x86/boot/memory.c 2011-11-11 15:19:27.000000000 -0500
5598 ++++ linux-3.1.1/arch/x86/boot/memory.c 2011-11-16 18:39:07.000000000 -0500
5599 +@@ -19,7 +19,7 @@
5600 +
5601 + static int detect_memory_e820(void)
5602 + {
5603 +- int count = 0;
5604 ++ unsigned int count = 0;
5605 + struct biosregs ireg, oreg;
5606 + struct e820entry *desc = boot_params.e820_map;
5607 + static struct e820entry buf; /* static so it is zeroed */
5608 +diff -urNp linux-3.1.1/arch/x86/boot/video.c linux-3.1.1/arch/x86/boot/video.c
5609 +--- linux-3.1.1/arch/x86/boot/video.c 2011-11-11 15:19:27.000000000 -0500
5610 ++++ linux-3.1.1/arch/x86/boot/video.c 2011-11-16 18:39:07.000000000 -0500
5611 +@@ -96,7 +96,7 @@ static void store_mode_params(void)
5612 + static unsigned int get_entry(void)
5613 + {
5614 + char entry_buf[4];
5615 +- int i, len = 0;
5616 ++ unsigned int i, len = 0;
5617 + int key;
5618 + unsigned int v;
5619 +
5620 +diff -urNp linux-3.1.1/arch/x86/boot/video-vesa.c linux-3.1.1/arch/x86/boot/video-vesa.c
5621 +--- linux-3.1.1/arch/x86/boot/video-vesa.c 2011-11-11 15:19:27.000000000 -0500
5622 ++++ linux-3.1.1/arch/x86/boot/video-vesa.c 2011-11-16 18:39:07.000000000 -0500
5623 +@@ -200,6 +200,7 @@ static void vesa_store_pm_info(void)
5624 +
5625 + boot_params.screen_info.vesapm_seg = oreg.es;
5626 + boot_params.screen_info.vesapm_off = oreg.di;
5627 ++ boot_params.screen_info.vesapm_size = oreg.cx;
5628 + }
5629 +
5630 + /*
5631 +diff -urNp linux-3.1.1/arch/x86/crypto/aes-x86_64-asm_64.S linux-3.1.1/arch/x86/crypto/aes-x86_64-asm_64.S
5632 +--- linux-3.1.1/arch/x86/crypto/aes-x86_64-asm_64.S 2011-11-11 15:19:27.000000000 -0500
5633 ++++ linux-3.1.1/arch/x86/crypto/aes-x86_64-asm_64.S 2011-11-16 18:39:07.000000000 -0500
5634 +@@ -8,6 +8,8 @@
5635 + * including this sentence is retained in full.
5636 + */
5637 +
5638 ++#include <asm/alternative-asm.h>
5639 ++
5640 + .extern crypto_ft_tab
5641 + .extern crypto_it_tab
5642 + .extern crypto_fl_tab
5643 +@@ -71,6 +73,8 @@ FUNC: movq r1,r2; \
5644 + je B192; \
5645 + leaq 32(r9),r9;
5646 +
5647 ++#define ret pax_force_retaddr; ret
5648 ++
5649 + #define epilogue(r1,r2,r3,r4,r5,r6,r7,r8,r9) \
5650 + movq r1,r2; \
5651 + movq r3,r4; \
5652 +diff -urNp linux-3.1.1/arch/x86/crypto/salsa20-x86_64-asm_64.S linux-3.1.1/arch/x86/crypto/salsa20-x86_64-asm_64.S
5653 +--- linux-3.1.1/arch/x86/crypto/salsa20-x86_64-asm_64.S 2011-11-11 15:19:27.000000000 -0500
5654 ++++ linux-3.1.1/arch/x86/crypto/salsa20-x86_64-asm_64.S 2011-11-16 18:39:07.000000000 -0500
5655 +@@ -1,3 +1,5 @@
5656 ++#include <asm/alternative-asm.h>
5657 ++
5658 + # enter ECRYPT_encrypt_bytes
5659 + .text
5660 + .p2align 5
5661 +@@ -790,6 +792,7 @@ ECRYPT_encrypt_bytes:
5662 + add %r11,%rsp
5663 + mov %rdi,%rax
5664 + mov %rsi,%rdx
5665 ++ pax_force_retaddr
5666 + ret
5667 + # bytesatleast65:
5668 + ._bytesatleast65:
5669 +@@ -891,6 +894,7 @@ ECRYPT_keysetup:
5670 + add %r11,%rsp
5671 + mov %rdi,%rax
5672 + mov %rsi,%rdx
5673 ++ pax_force_retaddr
5674 + ret
5675 + # enter ECRYPT_ivsetup
5676 + .text
5677 +@@ -917,4 +921,5 @@ ECRYPT_ivsetup:
5678 + add %r11,%rsp
5679 + mov %rdi,%rax
5680 + mov %rsi,%rdx
5681 ++ pax_force_retaddr
5682 + ret
5683 +diff -urNp linux-3.1.1/arch/x86/crypto/twofish-x86_64-asm_64.S linux-3.1.1/arch/x86/crypto/twofish-x86_64-asm_64.S
5684 +--- linux-3.1.1/arch/x86/crypto/twofish-x86_64-asm_64.S 2011-11-11 15:19:27.000000000 -0500
5685 ++++ linux-3.1.1/arch/x86/crypto/twofish-x86_64-asm_64.S 2011-11-16 18:39:07.000000000 -0500
5686 +@@ -21,6 +21,7 @@
5687 + .text
5688 +
5689 + #include <asm/asm-offsets.h>
5690 ++#include <asm/alternative-asm.h>
5691 +
5692 + #define a_offset 0
5693 + #define b_offset 4
5694 +@@ -269,6 +270,7 @@ twofish_enc_blk:
5695 +
5696 + popq R1
5697 + movq $1,%rax
5698 ++ pax_force_retaddr
5699 + ret
5700 +
5701 + twofish_dec_blk:
5702 +@@ -321,4 +323,5 @@ twofish_dec_blk:
5703 +
5704 + popq R1
5705 + movq $1,%rax
5706 ++ pax_force_retaddr
5707 + ret
5708 +diff -urNp linux-3.1.1/arch/x86/ia32/ia32_aout.c linux-3.1.1/arch/x86/ia32/ia32_aout.c
5709 +--- linux-3.1.1/arch/x86/ia32/ia32_aout.c 2011-11-11 15:19:27.000000000 -0500
5710 ++++ linux-3.1.1/arch/x86/ia32/ia32_aout.c 2011-11-16 18:40:08.000000000 -0500
5711 +@@ -162,6 +162,8 @@ static int aout_core_dump(long signr, st
5712 + unsigned long dump_start, dump_size;
5713 + struct user32 dump;
5714 +
5715 ++ memset(&dump, 0, sizeof(dump));
5716 ++
5717 + fs = get_fs();
5718 + set_fs(KERNEL_DS);
5719 + has_dumped = 1;
5720 +diff -urNp linux-3.1.1/arch/x86/ia32/ia32entry.S linux-3.1.1/arch/x86/ia32/ia32entry.S
5721 +--- linux-3.1.1/arch/x86/ia32/ia32entry.S 2011-11-11 15:19:27.000000000 -0500
5722 ++++ linux-3.1.1/arch/x86/ia32/ia32entry.S 2011-11-17 18:27:57.000000000 -0500
5723 +@@ -13,7 +13,9 @@
5724 + #include <asm/thread_info.h>
5725 + #include <asm/segment.h>
5726 + #include <asm/irqflags.h>
5727 ++#include <asm/pgtable.h>
5728 + #include <linux/linkage.h>
5729 ++#include <asm/alternative-asm.h>
5730 +
5731 + /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
5732 + #include <linux/elf-em.h>
5733 +@@ -95,6 +97,29 @@ ENTRY(native_irq_enable_sysexit)
5734 + ENDPROC(native_irq_enable_sysexit)
5735 + #endif
5736 +
5737 ++ .macro pax_enter_kernel_user
5738 ++#ifdef CONFIG_PAX_MEMORY_UDEREF
5739 ++ call pax_enter_kernel_user
5740 ++#endif
5741 ++ .endm
5742 ++
5743 ++ .macro pax_exit_kernel_user
5744 ++#ifdef CONFIG_PAX_MEMORY_UDEREF
5745 ++ call pax_exit_kernel_user
5746 ++#endif
5747 ++#ifdef CONFIG_PAX_RANDKSTACK
5748 ++ pushq %rax
5749 ++ call pax_randomize_kstack
5750 ++ popq %rax
5751 ++#endif
5752 ++ .endm
5753 ++
5754 ++.macro pax_erase_kstack
5755 ++#ifdef CONFIG_PAX_MEMORY_STACKLEAK
5756 ++ call pax_erase_kstack
5757 ++#endif
5758 ++.endm
5759 ++
5760 + /*
5761 + * 32bit SYSENTER instruction entry.
5762 + *
5763 +@@ -121,7 +146,7 @@ ENTRY(ia32_sysenter_target)
5764 + CFI_REGISTER rsp,rbp
5765 + SWAPGS_UNSAFE_STACK
5766 + movq PER_CPU_VAR(kernel_stack), %rsp
5767 +- addq $(KERNEL_STACK_OFFSET),%rsp
5768 ++ pax_enter_kernel_user
5769 + /*
5770 + * No need to follow this irqs on/off section: the syscall
5771 + * disabled irqs, here we enable it straight after entry:
5772 +@@ -134,7 +159,8 @@ ENTRY(ia32_sysenter_target)
5773 + CFI_REL_OFFSET rsp,0
5774 + pushfq_cfi
5775 + /*CFI_REL_OFFSET rflags,0*/
5776 +- movl 8*3-THREAD_SIZE+TI_sysenter_return(%rsp), %r10d
5777 ++ GET_THREAD_INFO(%r10)
5778 ++ movl TI_sysenter_return(%r10), %r10d
5779 + CFI_REGISTER rip,r10
5780 + pushq_cfi $__USER32_CS
5781 + /*CFI_REL_OFFSET cs,0*/
5782 +@@ -146,6 +172,12 @@ ENTRY(ia32_sysenter_target)
5783 + SAVE_ARGS 0,1,0
5784 + /* no need to do an access_ok check here because rbp has been
5785 + 32bit zero extended */
5786 ++
5787 ++#ifdef CONFIG_PAX_MEMORY_UDEREF
5788 ++ mov $PAX_USER_SHADOW_BASE,%r10
5789 ++ add %r10,%rbp
5790 ++#endif
5791 ++
5792 + 1: movl (%rbp),%ebp
5793 + .section __ex_table,"a"
5794 + .quad 1b,ia32_badarg
5795 +@@ -168,6 +200,8 @@ sysenter_dispatch:
5796 + testl $_TIF_ALLWORK_MASK,TI_flags(%r10)
5797 + jnz sysexit_audit
5798 + sysexit_from_sys_call:
5799 ++ pax_exit_kernel_user
5800 ++ pax_erase_kstack
5801 + andl $~TS_COMPAT,TI_status(%r10)
5802 + /* clear IF, that popfq doesn't enable interrupts early */
5803 + andl $~0x200,EFLAGS-R11(%rsp)
5804 +@@ -194,6 +228,9 @@ sysexit_from_sys_call:
5805 + movl %eax,%esi /* 2nd arg: syscall number */
5806 + movl $AUDIT_ARCH_I386,%edi /* 1st arg: audit arch */
5807 + call audit_syscall_entry
5808 ++
5809 ++ pax_erase_kstack
5810 ++
5811 + movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall number */
5812 + cmpq $(IA32_NR_syscalls-1),%rax
5813 + ja ia32_badsys
5814 +@@ -246,6 +283,9 @@ sysenter_tracesys:
5815 + movq $-ENOSYS,RAX(%rsp)/* ptrace can change this for a bad syscall */
5816 + movq %rsp,%rdi /* &pt_regs -> arg1 */
5817 + call syscall_trace_enter
5818 ++
5819 ++ pax_erase_kstack
5820 ++
5821 + LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
5822 + RESTORE_REST
5823 + cmpq $(IA32_NR_syscalls-1),%rax
5824 +@@ -277,19 +317,24 @@ ENDPROC(ia32_sysenter_target)
5825 + ENTRY(ia32_cstar_target)
5826 + CFI_STARTPROC32 simple
5827 + CFI_SIGNAL_FRAME
5828 +- CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
5829 ++ CFI_DEF_CFA rsp,0
5830 + CFI_REGISTER rip,rcx
5831 + /*CFI_REGISTER rflags,r11*/
5832 + SWAPGS_UNSAFE_STACK
5833 + movl %esp,%r8d
5834 + CFI_REGISTER rsp,r8
5835 + movq PER_CPU_VAR(kernel_stack),%rsp
5836 ++
5837 ++#ifdef CONFIG_PAX_MEMORY_UDEREF
5838 ++ pax_enter_kernel_user
5839 ++#endif
5840 ++
5841 + /*
5842 + * No need to follow this irqs on/off section: the syscall
5843 + * disabled irqs and here we enable it straight after entry:
5844 + */
5845 + ENABLE_INTERRUPTS(CLBR_NONE)
5846 +- SAVE_ARGS 8,0,0
5847 ++ SAVE_ARGS 8*6,0,0
5848 + movl %eax,%eax /* zero extension */
5849 + movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
5850 + movq %rcx,RIP-ARGOFFSET(%rsp)
5851 +@@ -305,6 +350,12 @@ ENTRY(ia32_cstar_target)
5852 + /* no need to do an access_ok check here because r8 has been
5853 + 32bit zero extended */
5854 + /* hardware stack frame is complete now */
5855 ++
5856 ++#ifdef CONFIG_PAX_MEMORY_UDEREF
5857 ++ mov $PAX_USER_SHADOW_BASE,%r10
5858 ++ add %r10,%r8
5859 ++#endif
5860 ++
5861 + 1: movl (%r8),%r9d
5862 + .section __ex_table,"a"
5863 + .quad 1b,ia32_badarg
5864 +@@ -327,6 +378,8 @@ cstar_dispatch:
5865 + testl $_TIF_ALLWORK_MASK,TI_flags(%r10)
5866 + jnz sysretl_audit
5867 + sysretl_from_sys_call:
5868 ++ pax_exit_kernel_user
5869 ++ pax_erase_kstack
5870 + andl $~TS_COMPAT,TI_status(%r10)
5871 + RESTORE_ARGS 0,-ARG_SKIP,0,0,0
5872 + movl RIP-ARGOFFSET(%rsp),%ecx
5873 +@@ -364,6 +417,9 @@ cstar_tracesys:
5874 + movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
5875 + movq %rsp,%rdi /* &pt_regs -> arg1 */
5876 + call syscall_trace_enter
5877 ++
5878 ++ pax_erase_kstack
5879 ++
5880 + LOAD_ARGS32 ARGOFFSET, 1 /* reload args from stack in case ptrace changed it */
5881 + RESTORE_REST
5882 + xchgl %ebp,%r9d
5883 +@@ -409,6 +465,7 @@ ENTRY(ia32_syscall)
5884 + CFI_REL_OFFSET rip,RIP-RIP
5885 + PARAVIRT_ADJUST_EXCEPTION_FRAME
5886 + SWAPGS
5887 ++ pax_enter_kernel_user
5888 + /*
5889 + * No need to follow this irqs on/off section: the syscall
5890 + * disabled irqs and here we enable it straight after entry:
5891 +@@ -441,6 +498,9 @@ ia32_tracesys:
5892 + movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
5893 + movq %rsp,%rdi /* &pt_regs -> arg1 */
5894 + call syscall_trace_enter
5895 ++
5896 ++ pax_erase_kstack
5897 ++
5898 + LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
5899 + RESTORE_REST
5900 + cmpq $(IA32_NR_syscalls-1),%rax
5901 +@@ -455,6 +515,7 @@ ia32_badsys:
5902 +
5903 + quiet_ni_syscall:
5904 + movq $-ENOSYS,%rax
5905 ++ pax_force_retaddr
5906 + ret
5907 + CFI_ENDPROC
5908 +
5909 +diff -urNp linux-3.1.1/arch/x86/ia32/ia32_signal.c linux-3.1.1/arch/x86/ia32/ia32_signal.c
5910 +--- linux-3.1.1/arch/x86/ia32/ia32_signal.c 2011-11-11 15:19:27.000000000 -0500
5911 ++++ linux-3.1.1/arch/x86/ia32/ia32_signal.c 2011-11-16 18:39:07.000000000 -0500
5912 +@@ -169,7 +169,7 @@ asmlinkage long sys32_sigaltstack(const
5913 + }
5914 + seg = get_fs();
5915 + set_fs(KERNEL_DS);
5916 +- ret = do_sigaltstack(uss_ptr ? &uss : NULL, &uoss, regs->sp);
5917 ++ ret = do_sigaltstack(uss_ptr ? (const stack_t __force_user *)&uss : NULL, (stack_t __force_user *)&uoss, regs->sp);
5918 + set_fs(seg);
5919 + if (ret >= 0 && uoss_ptr) {
5920 + if (!access_ok(VERIFY_WRITE, uoss_ptr, sizeof(stack_ia32_t)))
5921 +@@ -370,7 +370,7 @@ static int ia32_setup_sigcontext(struct
5922 + */
5923 + static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
5924 + size_t frame_size,
5925 +- void **fpstate)
5926 ++ void __user **fpstate)
5927 + {
5928 + unsigned long sp;
5929 +
5930 +@@ -391,7 +391,7 @@ static void __user *get_sigframe(struct
5931 +
5932 + if (used_math()) {
5933 + sp = sp - sig_xstate_ia32_size;
5934 +- *fpstate = (struct _fpstate_ia32 *) sp;
5935 ++ *fpstate = (struct _fpstate_ia32 __user *) sp;
5936 + if (save_i387_xstate_ia32(*fpstate) < 0)
5937 + return (void __user *) -1L;
5938 + }
5939 +@@ -399,7 +399,7 @@ static void __user *get_sigframe(struct
5940 + sp -= frame_size;
5941 + /* Align the stack pointer according to the i386 ABI,
5942 + * i.e. so that on function entry ((sp + 4) & 15) == 0. */
5943 +- sp = ((sp + 4) & -16ul) - 4;
5944 ++ sp = ((sp - 12) & -16ul) - 4;
5945 + return (void __user *) sp;
5946 + }
5947 +
5948 +@@ -457,7 +457,7 @@ int ia32_setup_frame(int sig, struct k_s
5949 + * These are actually not used anymore, but left because some
5950 + * gdb versions depend on them as a marker.
5951 + */
5952 +- put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
5953 ++ put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
5954 + } put_user_catch(err);
5955 +
5956 + if (err)
5957 +@@ -499,7 +499,7 @@ int ia32_setup_rt_frame(int sig, struct
5958 + 0xb8,
5959 + __NR_ia32_rt_sigreturn,
5960 + 0x80cd,
5961 +- 0,
5962 ++ 0
5963 + };
5964 +
5965 + frame = get_sigframe(ka, regs, sizeof(*frame), &fpstate);
5966 +@@ -529,16 +529,18 @@ int ia32_setup_rt_frame(int sig, struct
5967 +
5968 + if (ka->sa.sa_flags & SA_RESTORER)
5969 + restorer = ka->sa.sa_restorer;
5970 ++ else if (current->mm->context.vdso)
5971 ++ /* Return stub is in 32bit vsyscall page */
5972 ++ restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
5973 + else
5974 +- restorer = VDSO32_SYMBOL(current->mm->context.vdso,
5975 +- rt_sigreturn);
5976 ++ restorer = &frame->retcode;
5977 + put_user_ex(ptr_to_compat(restorer), &frame->pretcode);
5978 +
5979 + /*
5980 + * Not actually used anymore, but left because some gdb
5981 + * versions need it.
5982 + */
5983 +- put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
5984 ++ put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
5985 + } put_user_catch(err);
5986 +
5987 + if (err)
5988 +diff -urNp linux-3.1.1/arch/x86/ia32/sys_ia32.c linux-3.1.1/arch/x86/ia32/sys_ia32.c
5989 +--- linux-3.1.1/arch/x86/ia32/sys_ia32.c 2011-11-11 15:19:27.000000000 -0500
5990 ++++ linux-3.1.1/arch/x86/ia32/sys_ia32.c 2011-11-16 18:39:07.000000000 -0500
5991 +@@ -69,8 +69,8 @@ asmlinkage long sys32_ftruncate64(unsign
5992 + */
5993 + static int cp_stat64(struct stat64 __user *ubuf, struct kstat *stat)
5994 + {
5995 +- typeof(ubuf->st_uid) uid = 0;
5996 +- typeof(ubuf->st_gid) gid = 0;
5997 ++ typeof(((struct stat64 *)0)->st_uid) uid = 0;
5998 ++ typeof(((struct stat64 *)0)->st_gid) gid = 0;
5999 + SET_UID(uid, stat->uid);
6000 + SET_GID(gid, stat->gid);
6001 + if (!access_ok(VERIFY_WRITE, ubuf, sizeof(struct stat64)) ||
6002 +@@ -308,8 +308,8 @@ asmlinkage long sys32_rt_sigprocmask(int
6003 + }
6004 + set_fs(KERNEL_DS);
6005 + ret = sys_rt_sigprocmask(how,
6006 +- set ? (sigset_t __user *)&s : NULL,
6007 +- oset ? (sigset_t __user *)&s : NULL,
6008 ++ set ? (sigset_t __force_user *)&s : NULL,
6009 ++ oset ? (sigset_t __force_user *)&s : NULL,
6010 + sigsetsize);
6011 + set_fs(old_fs);
6012 + if (ret)
6013 +@@ -332,7 +332,7 @@ asmlinkage long sys32_alarm(unsigned int
6014 + return alarm_setitimer(seconds);
6015 + }
6016 +
6017 +-asmlinkage long sys32_waitpid(compat_pid_t pid, unsigned int *stat_addr,
6018 ++asmlinkage long sys32_waitpid(compat_pid_t pid, unsigned int __user *stat_addr,
6019 + int options)
6020 + {
6021 + return compat_sys_wait4(pid, stat_addr, options, NULL);
6022 +@@ -353,7 +353,7 @@ asmlinkage long sys32_sched_rr_get_inter
6023 + mm_segment_t old_fs = get_fs();
6024 +
6025 + set_fs(KERNEL_DS);
6026 +- ret = sys_sched_rr_get_interval(pid, (struct timespec __user *)&t);
6027 ++ ret = sys_sched_rr_get_interval(pid, (struct timespec __force_user *)&t);
6028 + set_fs(old_fs);
6029 + if (put_compat_timespec(&t, interval))
6030 + return -EFAULT;
6031 +@@ -369,7 +369,7 @@ asmlinkage long sys32_rt_sigpending(comp
6032 + mm_segment_t old_fs = get_fs();
6033 +
6034 + set_fs(KERNEL_DS);
6035 +- ret = sys_rt_sigpending((sigset_t __user *)&s, sigsetsize);
6036 ++ ret = sys_rt_sigpending((sigset_t __force_user *)&s, sigsetsize);
6037 + set_fs(old_fs);
6038 + if (!ret) {
6039 + switch (_NSIG_WORDS) {
6040 +@@ -394,7 +394,7 @@ asmlinkage long sys32_rt_sigqueueinfo(in
6041 + if (copy_siginfo_from_user32(&info, uinfo))
6042 + return -EFAULT;
6043 + set_fs(KERNEL_DS);
6044 +- ret = sys_rt_sigqueueinfo(pid, sig, (siginfo_t __user *)&info);
6045 ++ ret = sys_rt_sigqueueinfo(pid, sig, (siginfo_t __force_user *)&info);
6046 + set_fs(old_fs);
6047 + return ret;
6048 + }
6049 +@@ -439,7 +439,7 @@ asmlinkage long sys32_sendfile(int out_f
6050 + return -EFAULT;
6051 +
6052 + set_fs(KERNEL_DS);
6053 +- ret = sys_sendfile(out_fd, in_fd, offset ? (off_t __user *)&of : NULL,
6054 ++ ret = sys_sendfile(out_fd, in_fd, offset ? (off_t __force_user *)&of : NULL,
6055 + count);
6056 + set_fs(old_fs);
6057 +
6058 +diff -urNp linux-3.1.1/arch/x86/include/asm/alternative-asm.h linux-3.1.1/arch/x86/include/asm/alternative-asm.h
6059 +--- linux-3.1.1/arch/x86/include/asm/alternative-asm.h 2011-11-11 15:19:27.000000000 -0500
6060 ++++ linux-3.1.1/arch/x86/include/asm/alternative-asm.h 2011-11-16 18:39:07.000000000 -0500
6061 +@@ -15,6 +15,20 @@
6062 + .endm
6063 + #endif
6064 +
6065 ++#ifdef CONFIG_PAX_KERNEXEC_PLUGIN
6066 ++ .macro pax_force_retaddr rip=0
6067 ++ btsq $63,\rip(%rsp)
6068 ++ .endm
6069 ++ .macro pax_force_fptr ptr
6070 ++ btsq $63,\ptr
6071 ++ .endm
6072 ++#else
6073 ++ .macro pax_force_retaddr rip=0
6074 ++ .endm
6075 ++ .macro pax_force_fptr ptr
6076 ++ .endm
6077 ++#endif
6078 ++
6079 + .macro altinstruction_entry orig alt feature orig_len alt_len
6080 + .long \orig - .
6081 + .long \alt - .
6082 +diff -urNp linux-3.1.1/arch/x86/include/asm/alternative.h linux-3.1.1/arch/x86/include/asm/alternative.h
6083 +--- linux-3.1.1/arch/x86/include/asm/alternative.h 2011-11-11 15:19:27.000000000 -0500
6084 ++++ linux-3.1.1/arch/x86/include/asm/alternative.h 2011-11-16 18:39:07.000000000 -0500
6085 +@@ -89,7 +89,7 @@ static inline int alternatives_text_rese
6086 + ".section .discard,\"aw\",@progbits\n" \
6087 + " .byte 0xff + (664f-663f) - (662b-661b)\n" /* rlen <= slen */ \
6088 + ".previous\n" \
6089 +- ".section .altinstr_replacement, \"ax\"\n" \
6090 ++ ".section .altinstr_replacement, \"a\"\n" \
6091 + "663:\n\t" newinstr "\n664:\n" /* replacement */ \
6092 + ".previous"
6093 +
6094 +diff -urNp linux-3.1.1/arch/x86/include/asm/apic.h linux-3.1.1/arch/x86/include/asm/apic.h
6095 +--- linux-3.1.1/arch/x86/include/asm/apic.h 2011-11-11 15:19:27.000000000 -0500
6096 ++++ linux-3.1.1/arch/x86/include/asm/apic.h 2011-11-16 18:39:07.000000000 -0500
6097 +@@ -45,7 +45,7 @@ static inline void generic_apic_probe(vo
6098 +
6099 + #ifdef CONFIG_X86_LOCAL_APIC
6100 +
6101 +-extern unsigned int apic_verbosity;
6102 ++extern int apic_verbosity;
6103 + extern int local_apic_timer_c2_ok;
6104 +
6105 + extern int disable_apic;
6106 +diff -urNp linux-3.1.1/arch/x86/include/asm/apm.h linux-3.1.1/arch/x86/include/asm/apm.h
6107 +--- linux-3.1.1/arch/x86/include/asm/apm.h 2011-11-11 15:19:27.000000000 -0500
6108 ++++ linux-3.1.1/arch/x86/include/asm/apm.h 2011-11-16 18:39:07.000000000 -0500
6109 +@@ -34,7 +34,7 @@ static inline void apm_bios_call_asm(u32
6110 + __asm__ __volatile__(APM_DO_ZERO_SEGS
6111 + "pushl %%edi\n\t"
6112 + "pushl %%ebp\n\t"
6113 +- "lcall *%%cs:apm_bios_entry\n\t"
6114 ++ "lcall *%%ss:apm_bios_entry\n\t"
6115 + "setc %%al\n\t"
6116 + "popl %%ebp\n\t"
6117 + "popl %%edi\n\t"
6118 +@@ -58,7 +58,7 @@ static inline u8 apm_bios_call_simple_as
6119 + __asm__ __volatile__(APM_DO_ZERO_SEGS
6120 + "pushl %%edi\n\t"
6121 + "pushl %%ebp\n\t"
6122 +- "lcall *%%cs:apm_bios_entry\n\t"
6123 ++ "lcall *%%ss:apm_bios_entry\n\t"
6124 + "setc %%bl\n\t"
6125 + "popl %%ebp\n\t"
6126 + "popl %%edi\n\t"
6127 +diff -urNp linux-3.1.1/arch/x86/include/asm/atomic64_32.h linux-3.1.1/arch/x86/include/asm/atomic64_32.h
6128 +--- linux-3.1.1/arch/x86/include/asm/atomic64_32.h 2011-11-11 15:19:27.000000000 -0500
6129 ++++ linux-3.1.1/arch/x86/include/asm/atomic64_32.h 2011-11-16 18:39:07.000000000 -0500
6130 +@@ -12,6 +12,14 @@ typedef struct {
6131 + u64 __aligned(8) counter;
6132 + } atomic64_t;
6133 +
6134 ++#ifdef CONFIG_PAX_REFCOUNT
6135 ++typedef struct {
6136 ++ u64 __aligned(8) counter;
6137 ++} atomic64_unchecked_t;
6138 ++#else
6139 ++typedef atomic64_t atomic64_unchecked_t;
6140 ++#endif
6141 ++
6142 + #define ATOMIC64_INIT(val) { (val) }
6143 +
6144 + #ifdef CONFIG_X86_CMPXCHG64
6145 +@@ -38,6 +46,21 @@ static inline long long atomic64_cmpxchg
6146 + }
6147 +
6148 + /**
6149 ++ * atomic64_cmpxchg_unchecked - cmpxchg atomic64 variable
6150 ++ * @p: pointer to type atomic64_unchecked_t
6151 ++ * @o: expected value
6152 ++ * @n: new value
6153 ++ *
6154 ++ * Atomically sets @v to @n if it was equal to @o and returns
6155 ++ * the old value.
6156 ++ */
6157 ++
6158 ++static inline long long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long long o, long long n)
6159 ++{
6160 ++ return cmpxchg64(&v->counter, o, n);
6161 ++}
6162 ++
6163 ++/**
6164 + * atomic64_xchg - xchg atomic64 variable
6165 + * @v: pointer to type atomic64_t
6166 + * @n: value to assign
6167 +@@ -77,6 +100,24 @@ static inline void atomic64_set(atomic64
6168 + }
6169 +
6170 + /**
6171 ++ * atomic64_set_unchecked - set atomic64 variable
6172 ++ * @v: pointer to type atomic64_unchecked_t
6173 ++ * @n: value to assign
6174 ++ *
6175 ++ * Atomically sets the value of @v to @n.
6176 ++ */
6177 ++static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long long i)
6178 ++{
6179 ++ unsigned high = (unsigned)(i >> 32);
6180 ++ unsigned low = (unsigned)i;
6181 ++ asm volatile(ATOMIC64_ALTERNATIVE(set)
6182 ++ : "+b" (low), "+c" (high)
6183 ++ : "S" (v)
6184 ++ : "eax", "edx", "memory"
6185 ++ );
6186 ++}
6187 ++
6188 ++/**
6189 + * atomic64_read - read atomic64 variable
6190 + * @v: pointer to type atomic64_t
6191 + *
6192 +@@ -93,6 +134,22 @@ static inline long long atomic64_read(at
6193 + }
6194 +
6195 + /**
6196 ++ * atomic64_read_unchecked - read atomic64 variable
6197 ++ * @v: pointer to type atomic64_unchecked_t
6198 ++ *
6199 ++ * Atomically reads the value of @v and returns it.
6200 ++ */
6201 ++static inline long long atomic64_read_unchecked(atomic64_unchecked_t *v)
6202 ++{
6203 ++ long long r;
6204 ++ asm volatile(ATOMIC64_ALTERNATIVE(read_unchecked)
6205 ++ : "=A" (r), "+c" (v)
6206 ++ : : "memory"
6207 ++ );
6208 ++ return r;
6209 ++ }
6210 ++
6211 ++/**
6212 + * atomic64_add_return - add and return
6213 + * @i: integer value to add
6214 + * @v: pointer to type atomic64_t
6215 +@@ -108,6 +165,22 @@ static inline long long atomic64_add_ret
6216 + return i;
6217 + }
6218 +
6219 ++/**
6220 ++ * atomic64_add_return_unchecked - add and return
6221 ++ * @i: integer value to add
6222 ++ * @v: pointer to type atomic64_unchecked_t
6223 ++ *
6224 ++ * Atomically adds @i to @v and returns @i + *@v
6225 ++ */
6226 ++static inline long long atomic64_add_return_unchecked(long long i, atomic64_unchecked_t *v)
6227 ++{
6228 ++ asm volatile(ATOMIC64_ALTERNATIVE(add_return_unchecked)
6229 ++ : "+A" (i), "+c" (v)
6230 ++ : : "memory"
6231 ++ );
6232 ++ return i;
6233 ++}
6234 ++
6235 + /*
6236 + * Other variants with different arithmetic operators:
6237 + */
6238 +@@ -131,6 +204,17 @@ static inline long long atomic64_inc_ret
6239 + return a;
6240 + }
6241 +
6242 ++static inline long long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
6243 ++{
6244 ++ long long a;
6245 ++ asm volatile(ATOMIC64_ALTERNATIVE(inc_return_unchecked)
6246 ++ : "=A" (a)
6247 ++ : "S" (v)
6248 ++ : "memory", "ecx"
6249 ++ );
6250 ++ return a;
6251 ++}
6252 ++
6253 + static inline long long atomic64_dec_return(atomic64_t *v)
6254 + {
6255 + long long a;
6256 +@@ -159,6 +243,22 @@ static inline long long atomic64_add(lon
6257 + }
6258 +
6259 + /**
6260 ++ * atomic64_add_unchecked - add integer to atomic64 variable
6261 ++ * @i: integer value to add
6262 ++ * @v: pointer to type atomic64_unchecked_t
6263 ++ *
6264 ++ * Atomically adds @i to @v.
6265 ++ */
6266 ++static inline long long atomic64_add_unchecked(long long i, atomic64_unchecked_t *v)
6267 ++{
6268 ++ asm volatile(ATOMIC64_ALTERNATIVE_(add_unchecked, add_return_unchecked)
6269 ++ : "+A" (i), "+c" (v)
6270 ++ : : "memory"
6271 ++ );
6272 ++ return i;
6273 ++}
6274 ++
6275 ++/**
6276 + * atomic64_sub - subtract the atomic64 variable
6277 + * @i: integer value to subtract
6278 + * @v: pointer to type atomic64_t
6279 +diff -urNp linux-3.1.1/arch/x86/include/asm/atomic64_64.h linux-3.1.1/arch/x86/include/asm/atomic64_64.h
6280 +--- linux-3.1.1/arch/x86/include/asm/atomic64_64.h 2011-11-11 15:19:27.000000000 -0500
6281 ++++ linux-3.1.1/arch/x86/include/asm/atomic64_64.h 2011-11-16 18:39:07.000000000 -0500
6282 +@@ -18,7 +18,19 @@
6283 + */
6284 + static inline long atomic64_read(const atomic64_t *v)
6285 + {
6286 +- return (*(volatile long *)&(v)->counter);
6287 ++ return (*(volatile const long *)&(v)->counter);
6288 ++}
6289 ++
6290 ++/**
6291 ++ * atomic64_read_unchecked - read atomic64 variable
6292 ++ * @v: pointer of type atomic64_unchecked_t
6293 ++ *
6294 ++ * Atomically reads the value of @v.
6295 ++ * Doesn't imply a read memory barrier.
6296 ++ */
6297 ++static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
6298 ++{
6299 ++ return (*(volatile const long *)&(v)->counter);
6300 + }
6301 +
6302 + /**
6303 +@@ -34,6 +46,18 @@ static inline void atomic64_set(atomic64
6304 + }
6305 +
6306 + /**
6307 ++ * atomic64_set_unchecked - set atomic64 variable
6308 ++ * @v: pointer to type atomic64_unchecked_t
6309 ++ * @i: required value
6310 ++ *
6311 ++ * Atomically sets the value of @v to @i.
6312 ++ */
6313 ++static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
6314 ++{
6315 ++ v->counter = i;
6316 ++}
6317 ++
6318 ++/**
6319 + * atomic64_add - add integer to atomic64 variable
6320 + * @i: integer value to add
6321 + * @v: pointer to type atomic64_t
6322 +@@ -42,6 +66,28 @@ static inline void atomic64_set(atomic64
6323 + */
6324 + static inline void atomic64_add(long i, atomic64_t *v)
6325 + {
6326 ++ asm volatile(LOCK_PREFIX "addq %1,%0\n"
6327 ++
6328 ++#ifdef CONFIG_PAX_REFCOUNT
6329 ++ "jno 0f\n"
6330 ++ LOCK_PREFIX "subq %1,%0\n"
6331 ++ "int $4\n0:\n"
6332 ++ _ASM_EXTABLE(0b, 0b)
6333 ++#endif
6334 ++
6335 ++ : "=m" (v->counter)
6336 ++ : "er" (i), "m" (v->counter));
6337 ++}
6338 ++
6339 ++/**
6340 ++ * atomic64_add_unchecked - add integer to atomic64 variable
6341 ++ * @i: integer value to add
6342 ++ * @v: pointer to type atomic64_unchecked_t
6343 ++ *
6344 ++ * Atomically adds @i to @v.
6345 ++ */
6346 ++static inline void atomic64_add_unchecked(long i, atomic64_unchecked_t *v)
6347 ++{
6348 + asm volatile(LOCK_PREFIX "addq %1,%0"
6349 + : "=m" (v->counter)
6350 + : "er" (i), "m" (v->counter));
6351 +@@ -56,7 +102,29 @@ static inline void atomic64_add(long i,
6352 + */
6353 + static inline void atomic64_sub(long i, atomic64_t *v)
6354 + {
6355 +- asm volatile(LOCK_PREFIX "subq %1,%0"
6356 ++ asm volatile(LOCK_PREFIX "subq %1,%0\n"
6357 ++
6358 ++#ifdef CONFIG_PAX_REFCOUNT
6359 ++ "jno 0f\n"
6360 ++ LOCK_PREFIX "addq %1,%0\n"
6361 ++ "int $4\n0:\n"
6362 ++ _ASM_EXTABLE(0b, 0b)
6363 ++#endif
6364 ++
6365 ++ : "=m" (v->counter)
6366 ++ : "er" (i), "m" (v->counter));
6367 ++}
6368 ++
6369 ++/**
6370 ++ * atomic64_sub_unchecked - subtract the atomic64 variable
6371 ++ * @i: integer value to subtract
6372 ++ * @v: pointer to type atomic64_unchecked_t
6373 ++ *
6374 ++ * Atomically subtracts @i from @v.
6375 ++ */
6376 ++static inline void atomic64_sub_unchecked(long i, atomic64_unchecked_t *v)
6377 ++{
6378 ++ asm volatile(LOCK_PREFIX "subq %1,%0\n"
6379 + : "=m" (v->counter)
6380 + : "er" (i), "m" (v->counter));
6381 + }
6382 +@@ -74,7 +142,16 @@ static inline int atomic64_sub_and_test(
6383 + {
6384 + unsigned char c;
6385 +
6386 +- asm volatile(LOCK_PREFIX "subq %2,%0; sete %1"
6387 ++ asm volatile(LOCK_PREFIX "subq %2,%0\n"
6388 ++
6389 ++#ifdef CONFIG_PAX_REFCOUNT
6390 ++ "jno 0f\n"
6391 ++ LOCK_PREFIX "addq %2,%0\n"
6392 ++ "int $4\n0:\n"
6393 ++ _ASM_EXTABLE(0b, 0b)
6394 ++#endif
6395 ++
6396 ++ "sete %1\n"
6397 + : "=m" (v->counter), "=qm" (c)
6398 + : "er" (i), "m" (v->counter) : "memory");
6399 + return c;
6400 +@@ -88,6 +165,27 @@ static inline int atomic64_sub_and_test(
6401 + */
6402 + static inline void atomic64_inc(atomic64_t *v)
6403 + {
6404 ++ asm volatile(LOCK_PREFIX "incq %0\n"
6405 ++
6406 ++#ifdef CONFIG_PAX_REFCOUNT
6407 ++ "jno 0f\n"
6408 ++ LOCK_PREFIX "decq %0\n"
6409 ++ "int $4\n0:\n"
6410 ++ _ASM_EXTABLE(0b, 0b)
6411 ++#endif
6412 ++
6413 ++ : "=m" (v->counter)
6414 ++ : "m" (v->counter));
6415 ++}
6416 ++
6417 ++/**
6418 ++ * atomic64_inc_unchecked - increment atomic64 variable
6419 ++ * @v: pointer to type atomic64_unchecked_t
6420 ++ *
6421 ++ * Atomically increments @v by 1.
6422 ++ */
6423 ++static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
6424 ++{
6425 + asm volatile(LOCK_PREFIX "incq %0"
6426 + : "=m" (v->counter)
6427 + : "m" (v->counter));
6428 +@@ -101,7 +199,28 @@ static inline void atomic64_inc(atomic64
6429 + */
6430 + static inline void atomic64_dec(atomic64_t *v)
6431 + {
6432 +- asm volatile(LOCK_PREFIX "decq %0"
6433 ++ asm volatile(LOCK_PREFIX "decq %0\n"
6434 ++
6435 ++#ifdef CONFIG_PAX_REFCOUNT
6436 ++ "jno 0f\n"
6437 ++ LOCK_PREFIX "incq %0\n"
6438 ++ "int $4\n0:\n"
6439 ++ _ASM_EXTABLE(0b, 0b)
6440 ++#endif
6441 ++
6442 ++ : "=m" (v->counter)
6443 ++ : "m" (v->counter));
6444 ++}
6445 ++
6446 ++/**
6447 ++ * atomic64_dec_unchecked - decrement atomic64 variable
6448 ++ * @v: pointer to type atomic64_t
6449 ++ *
6450 ++ * Atomically decrements @v by 1.
6451 ++ */
6452 ++static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
6453 ++{
6454 ++ asm volatile(LOCK_PREFIX "decq %0\n"
6455 + : "=m" (v->counter)
6456 + : "m" (v->counter));
6457 + }
6458 +@@ -118,7 +237,16 @@ static inline int atomic64_dec_and_test(
6459 + {
6460 + unsigned char c;
6461 +
6462 +- asm volatile(LOCK_PREFIX "decq %0; sete %1"
6463 ++ asm volatile(LOCK_PREFIX "decq %0\n"
6464 ++
6465 ++#ifdef CONFIG_PAX_REFCOUNT
6466 ++ "jno 0f\n"
6467 ++ LOCK_PREFIX "incq %0\n"
6468 ++ "int $4\n0:\n"
6469 ++ _ASM_EXTABLE(0b, 0b)
6470 ++#endif
6471 ++
6472 ++ "sete %1\n"
6473 + : "=m" (v->counter), "=qm" (c)
6474 + : "m" (v->counter) : "memory");
6475 + return c != 0;
6476 +@@ -136,7 +264,16 @@ static inline int atomic64_inc_and_test(
6477 + {
6478 + unsigned char c;
6479 +
6480 +- asm volatile(LOCK_PREFIX "incq %0; sete %1"
6481 ++ asm volatile(LOCK_PREFIX "incq %0\n"
6482 ++
6483 ++#ifdef CONFIG_PAX_REFCOUNT
6484 ++ "jno 0f\n"
6485 ++ LOCK_PREFIX "decq %0\n"
6486 ++ "int $4\n0:\n"
6487 ++ _ASM_EXTABLE(0b, 0b)
6488 ++#endif
6489 ++
6490 ++ "sete %1\n"
6491 + : "=m" (v->counter), "=qm" (c)
6492 + : "m" (v->counter) : "memory");
6493 + return c != 0;
6494 +@@ -155,7 +292,16 @@ static inline int atomic64_add_negative(
6495 + {
6496 + unsigned char c;
6497 +
6498 +- asm volatile(LOCK_PREFIX "addq %2,%0; sets %1"
6499 ++ asm volatile(LOCK_PREFIX "addq %2,%0\n"
6500 ++
6501 ++#ifdef CONFIG_PAX_REFCOUNT
6502 ++ "jno 0f\n"
6503 ++ LOCK_PREFIX "subq %2,%0\n"
6504 ++ "int $4\n0:\n"
6505 ++ _ASM_EXTABLE(0b, 0b)
6506 ++#endif
6507 ++
6508 ++ "sets %1\n"
6509 + : "=m" (v->counter), "=qm" (c)
6510 + : "er" (i), "m" (v->counter) : "memory");
6511 + return c;
6512 +@@ -171,7 +317,31 @@ static inline int atomic64_add_negative(
6513 + static inline long atomic64_add_return(long i, atomic64_t *v)
6514 + {
6515 + long __i = i;
6516 +- asm volatile(LOCK_PREFIX "xaddq %0, %1;"
6517 ++ asm volatile(LOCK_PREFIX "xaddq %0, %1\n"
6518 ++
6519 ++#ifdef CONFIG_PAX_REFCOUNT
6520 ++ "jno 0f\n"
6521 ++ "movq %0, %1\n"
6522 ++ "int $4\n0:\n"
6523 ++ _ASM_EXTABLE(0b, 0b)
6524 ++#endif
6525 ++
6526 ++ : "+r" (i), "+m" (v->counter)
6527 ++ : : "memory");
6528 ++ return i + __i;
6529 ++}
6530 ++
6531 ++/**
6532 ++ * atomic64_add_return_unchecked - add and return
6533 ++ * @i: integer value to add
6534 ++ * @v: pointer to type atomic64_unchecked_t
6535 ++ *
6536 ++ * Atomically adds @i to @v and returns @i + @v
6537 ++ */
6538 ++static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
6539 ++{
6540 ++ long __i = i;
6541 ++ asm volatile(LOCK_PREFIX "xaddq %0, %1"
6542 + : "+r" (i), "+m" (v->counter)
6543 + : : "memory");
6544 + return i + __i;
6545 +@@ -183,6 +353,10 @@ static inline long atomic64_sub_return(l
6546 + }
6547 +
6548 + #define atomic64_inc_return(v) (atomic64_add_return(1, (v)))
6549 ++static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
6550 ++{
6551 ++ return atomic64_add_return_unchecked(1, v);
6552 ++}
6553 + #define atomic64_dec_return(v) (atomic64_sub_return(1, (v)))
6554 +
6555 + static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
6556 +@@ -190,6 +364,11 @@ static inline long atomic64_cmpxchg(atom
6557 + return cmpxchg(&v->counter, old, new);
6558 + }
6559 +
6560 ++static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old, long new)
6561 ++{
6562 ++ return cmpxchg(&v->counter, old, new);
6563 ++}
6564 ++
6565 + static inline long atomic64_xchg(atomic64_t *v, long new)
6566 + {
6567 + return xchg(&v->counter, new);
6568 +@@ -206,17 +385,30 @@ static inline long atomic64_xchg(atomic6
6569 + */
6570 + static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
6571 + {
6572 +- long c, old;
6573 ++ long c, old, new;
6574 + c = atomic64_read(v);
6575 + for (;;) {
6576 +- if (unlikely(c == (u)))
6577 ++ if (unlikely(c == u))
6578 + break;
6579 +- old = atomic64_cmpxchg((v), c, c + (a));
6580 ++
6581 ++ asm volatile("add %2,%0\n"
6582 ++
6583 ++#ifdef CONFIG_PAX_REFCOUNT
6584 ++ "jno 0f\n"
6585 ++ "sub %2,%0\n"
6586 ++ "int $4\n0:\n"
6587 ++ _ASM_EXTABLE(0b, 0b)
6588 ++#endif
6589 ++
6590 ++ : "=r" (new)
6591 ++ : "0" (c), "ir" (a));
6592 ++
6593 ++ old = atomic64_cmpxchg(v, c, new);
6594 + if (likely(old == c))
6595 + break;
6596 + c = old;
6597 + }
6598 +- return c != (u);
6599 ++ return c != u;
6600 + }
6601 +
6602 + #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
6603 +diff -urNp linux-3.1.1/arch/x86/include/asm/atomic.h linux-3.1.1/arch/x86/include/asm/atomic.h
6604 +--- linux-3.1.1/arch/x86/include/asm/atomic.h 2011-11-11 15:19:27.000000000 -0500
6605 ++++ linux-3.1.1/arch/x86/include/asm/atomic.h 2011-11-16 18:39:07.000000000 -0500
6606 +@@ -22,7 +22,18 @@
6607 + */
6608 + static inline int atomic_read(const atomic_t *v)
6609 + {
6610 +- return (*(volatile int *)&(v)->counter);
6611 ++ return (*(volatile const int *)&(v)->counter);
6612 ++}
6613 ++
6614 ++/**
6615 ++ * atomic_read_unchecked - read atomic variable
6616 ++ * @v: pointer of type atomic_unchecked_t
6617 ++ *
6618 ++ * Atomically reads the value of @v.
6619 ++ */
6620 ++static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
6621 ++{
6622 ++ return (*(volatile const int *)&(v)->counter);
6623 + }
6624 +
6625 + /**
6626 +@@ -38,6 +49,18 @@ static inline void atomic_set(atomic_t *
6627 + }
6628 +
6629 + /**
6630 ++ * atomic_set_unchecked - set atomic variable
6631 ++ * @v: pointer of type atomic_unchecked_t
6632 ++ * @i: required value
6633 ++ *
6634 ++ * Atomically sets the value of @v to @i.
6635 ++ */
6636 ++static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
6637 ++{
6638 ++ v->counter = i;
6639 ++}
6640 ++
6641 ++/**
6642 + * atomic_add - add integer to atomic variable
6643 + * @i: integer value to add
6644 + * @v: pointer of type atomic_t
6645 +@@ -46,7 +69,29 @@ static inline void atomic_set(atomic_t *
6646 + */
6647 + static inline void atomic_add(int i, atomic_t *v)
6648 + {
6649 +- asm volatile(LOCK_PREFIX "addl %1,%0"
6650 ++ asm volatile(LOCK_PREFIX "addl %1,%0\n"
6651 ++
6652 ++#ifdef CONFIG_PAX_REFCOUNT
6653 ++ "jno 0f\n"
6654 ++ LOCK_PREFIX "subl %1,%0\n"
6655 ++ "int $4\n0:\n"
6656 ++ _ASM_EXTABLE(0b, 0b)
6657 ++#endif
6658 ++
6659 ++ : "+m" (v->counter)
6660 ++ : "ir" (i));
6661 ++}
6662 ++
6663 ++/**
6664 ++ * atomic_add_unchecked - add integer to atomic variable
6665 ++ * @i: integer value to add
6666 ++ * @v: pointer of type atomic_unchecked_t
6667 ++ *
6668 ++ * Atomically adds @i to @v.
6669 ++ */
6670 ++static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
6671 ++{
6672 ++ asm volatile(LOCK_PREFIX "addl %1,%0\n"
6673 + : "+m" (v->counter)
6674 + : "ir" (i));
6675 + }
6676 +@@ -60,7 +105,29 @@ static inline void atomic_add(int i, ato
6677 + */
6678 + static inline void atomic_sub(int i, atomic_t *v)
6679 + {
6680 +- asm volatile(LOCK_PREFIX "subl %1,%0"
6681 ++ asm volatile(LOCK_PREFIX "subl %1,%0\n"
6682 ++
6683 ++#ifdef CONFIG_PAX_REFCOUNT
6684 ++ "jno 0f\n"
6685 ++ LOCK_PREFIX "addl %1,%0\n"
6686 ++ "int $4\n0:\n"
6687 ++ _ASM_EXTABLE(0b, 0b)
6688 ++#endif
6689 ++
6690 ++ : "+m" (v->counter)
6691 ++ : "ir" (i));
6692 ++}
6693 ++
6694 ++/**
6695 ++ * atomic_sub_unchecked - subtract integer from atomic variable
6696 ++ * @i: integer value to subtract
6697 ++ * @v: pointer of type atomic_unchecked_t
6698 ++ *
6699 ++ * Atomically subtracts @i from @v.
6700 ++ */
6701 ++static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
6702 ++{
6703 ++ asm volatile(LOCK_PREFIX "subl %1,%0\n"
6704 + : "+m" (v->counter)
6705 + : "ir" (i));
6706 + }
6707 +@@ -78,7 +145,16 @@ static inline int atomic_sub_and_test(in
6708 + {
6709 + unsigned char c;
6710 +
6711 +- asm volatile(LOCK_PREFIX "subl %2,%0; sete %1"
6712 ++ asm volatile(LOCK_PREFIX "subl %2,%0\n"
6713 ++
6714 ++#ifdef CONFIG_PAX_REFCOUNT
6715 ++ "jno 0f\n"
6716 ++ LOCK_PREFIX "addl %2,%0\n"
6717 ++ "int $4\n0:\n"
6718 ++ _ASM_EXTABLE(0b, 0b)
6719 ++#endif
6720 ++
6721 ++ "sete %1\n"
6722 + : "+m" (v->counter), "=qm" (c)
6723 + : "ir" (i) : "memory");
6724 + return c;
6725 +@@ -92,7 +168,27 @@ static inline int atomic_sub_and_test(in
6726 + */
6727 + static inline void atomic_inc(atomic_t *v)
6728 + {
6729 +- asm volatile(LOCK_PREFIX "incl %0"
6730 ++ asm volatile(LOCK_PREFIX "incl %0\n"
6731 ++
6732 ++#ifdef CONFIG_PAX_REFCOUNT
6733 ++ "jno 0f\n"
6734 ++ LOCK_PREFIX "decl %0\n"
6735 ++ "int $4\n0:\n"
6736 ++ _ASM_EXTABLE(0b, 0b)
6737 ++#endif
6738 ++
6739 ++ : "+m" (v->counter));
6740 ++}
6741 ++
6742 ++/**
6743 ++ * atomic_inc_unchecked - increment atomic variable
6744 ++ * @v: pointer of type atomic_unchecked_t
6745 ++ *
6746 ++ * Atomically increments @v by 1.
6747 ++ */
6748 ++static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
6749 ++{
6750 ++ asm volatile(LOCK_PREFIX "incl %0\n"
6751 + : "+m" (v->counter));
6752 + }
6753 +
6754 +@@ -104,7 +200,27 @@ static inline void atomic_inc(atomic_t *
6755 + */
6756 + static inline void atomic_dec(atomic_t *v)
6757 + {
6758 +- asm volatile(LOCK_PREFIX "decl %0"
6759 ++ asm volatile(LOCK_PREFIX "decl %0\n"
6760 ++
6761 ++#ifdef CONFIG_PAX_REFCOUNT
6762 ++ "jno 0f\n"
6763 ++ LOCK_PREFIX "incl %0\n"
6764 ++ "int $4\n0:\n"
6765 ++ _ASM_EXTABLE(0b, 0b)
6766 ++#endif
6767 ++
6768 ++ : "+m" (v->counter));
6769 ++}
6770 ++
6771 ++/**
6772 ++ * atomic_dec_unchecked - decrement atomic variable
6773 ++ * @v: pointer of type atomic_unchecked_t
6774 ++ *
6775 ++ * Atomically decrements @v by 1.
6776 ++ */
6777 ++static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
6778 ++{
6779 ++ asm volatile(LOCK_PREFIX "decl %0\n"
6780 + : "+m" (v->counter));
6781 + }
6782 +
6783 +@@ -120,7 +236,16 @@ static inline int atomic_dec_and_test(at
6784 + {
6785 + unsigned char c;
6786 +
6787 +- asm volatile(LOCK_PREFIX "decl %0; sete %1"
6788 ++ asm volatile(LOCK_PREFIX "decl %0\n"
6789 ++
6790 ++#ifdef CONFIG_PAX_REFCOUNT
6791 ++ "jno 0f\n"
6792 ++ LOCK_PREFIX "incl %0\n"
6793 ++ "int $4\n0:\n"
6794 ++ _ASM_EXTABLE(0b, 0b)
6795 ++#endif
6796 ++
6797 ++ "sete %1\n"
6798 + : "+m" (v->counter), "=qm" (c)
6799 + : : "memory");
6800 + return c != 0;
6801 +@@ -138,7 +263,35 @@ static inline int atomic_inc_and_test(at
6802 + {
6803 + unsigned char c;
6804 +
6805 +- asm volatile(LOCK_PREFIX "incl %0; sete %1"
6806 ++ asm volatile(LOCK_PREFIX "incl %0\n"
6807 ++
6808 ++#ifdef CONFIG_PAX_REFCOUNT
6809 ++ "jno 0f\n"
6810 ++ LOCK_PREFIX "decl %0\n"
6811 ++ "int $4\n0:\n"
6812 ++ _ASM_EXTABLE(0b, 0b)
6813 ++#endif
6814 ++
6815 ++ "sete %1\n"
6816 ++ : "+m" (v->counter), "=qm" (c)
6817 ++ : : "memory");
6818 ++ return c != 0;
6819 ++}
6820 ++
6821 ++/**
6822 ++ * atomic_inc_and_test_unchecked - increment and test
6823 ++ * @v: pointer of type atomic_unchecked_t
6824 ++ *
6825 ++ * Atomically increments @v by 1
6826 ++ * and returns true if the result is zero, or false for all
6827 ++ * other cases.
6828 ++ */
6829 ++static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
6830 ++{
6831 ++ unsigned char c;
6832 ++
6833 ++ asm volatile(LOCK_PREFIX "incl %0\n"
6834 ++ "sete %1\n"
6835 + : "+m" (v->counter), "=qm" (c)
6836 + : : "memory");
6837 + return c != 0;
6838 +@@ -157,7 +310,16 @@ static inline int atomic_add_negative(in
6839 + {
6840 + unsigned char c;
6841 +
6842 +- asm volatile(LOCK_PREFIX "addl %2,%0; sets %1"
6843 ++ asm volatile(LOCK_PREFIX "addl %2,%0\n"
6844 ++
6845 ++#ifdef CONFIG_PAX_REFCOUNT
6846 ++ "jno 0f\n"
6847 ++ LOCK_PREFIX "subl %2,%0\n"
6848 ++ "int $4\n0:\n"
6849 ++ _ASM_EXTABLE(0b, 0b)
6850 ++#endif
6851 ++
6852 ++ "sets %1\n"
6853 + : "+m" (v->counter), "=qm" (c)
6854 + : "ir" (i) : "memory");
6855 + return c;
6856 +@@ -180,6 +342,46 @@ static inline int atomic_add_return(int
6857 + #endif
6858 + /* Modern 486+ processor */
6859 + __i = i;
6860 ++ asm volatile(LOCK_PREFIX "xaddl %0, %1\n"
6861 ++
6862 ++#ifdef CONFIG_PAX_REFCOUNT
6863 ++ "jno 0f\n"
6864 ++ "movl %0, %1\n"
6865 ++ "int $4\n0:\n"
6866 ++ _ASM_EXTABLE(0b, 0b)
6867 ++#endif
6868 ++
6869 ++ : "+r" (i), "+m" (v->counter)
6870 ++ : : "memory");
6871 ++ return i + __i;
6872 ++
6873 ++#ifdef CONFIG_M386
6874 ++no_xadd: /* Legacy 386 processor */
6875 ++ local_irq_save(flags);
6876 ++ __i = atomic_read(v);
6877 ++ atomic_set(v, i + __i);
6878 ++ local_irq_restore(flags);
6879 ++ return i + __i;
6880 ++#endif
6881 ++}
6882 ++
6883 ++/**
6884 ++ * atomic_add_return_unchecked - add integer and return
6885 ++ * @v: pointer of type atomic_unchecked_t
6886 ++ * @i: integer value to add
6887 ++ *
6888 ++ * Atomically adds @i to @v and returns @i + @v
6889 ++ */
6890 ++static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
6891 ++{
6892 ++ int __i;
6893 ++#ifdef CONFIG_M386
6894 ++ unsigned long flags;
6895 ++ if (unlikely(boot_cpu_data.x86 <= 3))
6896 ++ goto no_xadd;
6897 ++#endif
6898 ++ /* Modern 486+ processor */
6899 ++ __i = i;
6900 + asm volatile(LOCK_PREFIX "xaddl %0, %1"
6901 + : "+r" (i), "+m" (v->counter)
6902 + : : "memory");
6903 +@@ -208,6 +410,10 @@ static inline int atomic_sub_return(int
6904 + }
6905 +
6906 + #define atomic_inc_return(v) (atomic_add_return(1, v))
6907 ++static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
6908 ++{
6909 ++ return atomic_add_return_unchecked(1, v);
6910 ++}
6911 + #define atomic_dec_return(v) (atomic_sub_return(1, v))
6912 +
6913 + static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
6914 +@@ -215,11 +421,21 @@ static inline int atomic_cmpxchg(atomic_
6915 + return cmpxchg(&v->counter, old, new);
6916 + }
6917 +
6918 ++static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
6919 ++{
6920 ++ return cmpxchg(&v->counter, old, new);
6921 ++}
6922 ++
6923 + static inline int atomic_xchg(atomic_t *v, int new)
6924 + {
6925 + return xchg(&v->counter, new);
6926 + }
6927 +
6928 ++static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
6929 ++{
6930 ++ return xchg(&v->counter, new);
6931 ++}
6932 ++
6933 + /**
6934 + * __atomic_add_unless - add unless the number is already a given value
6935 + * @v: pointer of type atomic_t
6936 +@@ -231,12 +447,25 @@ static inline int atomic_xchg(atomic_t *
6937 + */
6938 + static inline int __atomic_add_unless(atomic_t *v, int a, int u)
6939 + {
6940 +- int c, old;
6941 ++ int c, old, new;
6942 + c = atomic_read(v);
6943 + for (;;) {
6944 +- if (unlikely(c == (u)))
6945 ++ if (unlikely(c == u))
6946 + break;
6947 +- old = atomic_cmpxchg((v), c, c + (a));
6948 ++
6949 ++ asm volatile("addl %2,%0\n"
6950 ++
6951 ++#ifdef CONFIG_PAX_REFCOUNT
6952 ++ "jno 0f\n"
6953 ++ "subl %2,%0\n"
6954 ++ "int $4\n0:\n"
6955 ++ _ASM_EXTABLE(0b, 0b)
6956 ++#endif
6957 ++
6958 ++ : "=r" (new)
6959 ++ : "0" (c), "ir" (a));
6960 ++
6961 ++ old = atomic_cmpxchg(v, c, new);
6962 + if (likely(old == c))
6963 + break;
6964 + c = old;
6965 +@@ -244,6 +473,48 @@ static inline int __atomic_add_unless(at
6966 + return c;
6967 + }
6968 +
6969 ++/**
6970 ++ * atomic_inc_not_zero_hint - increment if not null
6971 ++ * @v: pointer of type atomic_t
6972 ++ * @hint: probable value of the atomic before the increment
6973 ++ *
6974 ++ * This version of atomic_inc_not_zero() gives a hint of probable
6975 ++ * value of the atomic. This helps processor to not read the memory
6976 ++ * before doing the atomic read/modify/write cycle, lowering
6977 ++ * number of bus transactions on some arches.
6978 ++ *
6979 ++ * Returns: 0 if increment was not done, 1 otherwise.
6980 ++ */
6981 ++#define atomic_inc_not_zero_hint atomic_inc_not_zero_hint
6982 ++static inline int atomic_inc_not_zero_hint(atomic_t *v, int hint)
6983 ++{
6984 ++ int val, c = hint, new;
6985 ++
6986 ++ /* sanity test, should be removed by compiler if hint is a constant */
6987 ++ if (!hint)
6988 ++ return __atomic_add_unless(v, 1, 0);
6989 ++
6990 ++ do {
6991 ++ asm volatile("incl %0\n"
6992 ++
6993 ++#ifdef CONFIG_PAX_REFCOUNT
6994 ++ "jno 0f\n"
6995 ++ "decl %0\n"
6996 ++ "int $4\n0:\n"
6997 ++ _ASM_EXTABLE(0b, 0b)
6998 ++#endif
6999 ++
7000 ++ : "=r" (new)
7001 ++ : "0" (c));
7002 ++
7003 ++ val = atomic_cmpxchg(v, c, new);
7004 ++ if (val == c)
7005 ++ return 1;
7006 ++ c = val;
7007 ++ } while (c);
7008 ++
7009 ++ return 0;
7010 ++}
7011 +
7012 + /*
7013 + * atomic_dec_if_positive - decrement by 1 if old value positive
7014 +diff -urNp linux-3.1.1/arch/x86/include/asm/bitops.h linux-3.1.1/arch/x86/include/asm/bitops.h
7015 +--- linux-3.1.1/arch/x86/include/asm/bitops.h 2011-11-11 15:19:27.000000000 -0500
7016 ++++ linux-3.1.1/arch/x86/include/asm/bitops.h 2011-11-16 18:39:07.000000000 -0500
7017 +@@ -38,7 +38,7 @@
7018 + * a mask operation on a byte.
7019 + */
7020 + #define IS_IMMEDIATE(nr) (__builtin_constant_p(nr))
7021 +-#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((void *)(addr) + ((nr)>>3))
7022 ++#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((volatile void *)(addr) + ((nr)>>3))
7023 + #define CONST_MASK(nr) (1 << ((nr) & 7))
7024 +
7025 + /**
7026 +diff -urNp linux-3.1.1/arch/x86/include/asm/boot.h linux-3.1.1/arch/x86/include/asm/boot.h
7027 +--- linux-3.1.1/arch/x86/include/asm/boot.h 2011-11-11 15:19:27.000000000 -0500
7028 ++++ linux-3.1.1/arch/x86/include/asm/boot.h 2011-11-16 18:39:07.000000000 -0500
7029 +@@ -11,10 +11,15 @@
7030 + #include <asm/pgtable_types.h>
7031 +
7032 + /* Physical address where kernel should be loaded. */
7033 +-#define LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
7034 ++#define ____LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
7035 + + (CONFIG_PHYSICAL_ALIGN - 1)) \
7036 + & ~(CONFIG_PHYSICAL_ALIGN - 1))
7037 +
7038 ++#ifndef __ASSEMBLY__
7039 ++extern unsigned char __LOAD_PHYSICAL_ADDR[];
7040 ++#define LOAD_PHYSICAL_ADDR ((unsigned long)__LOAD_PHYSICAL_ADDR)
7041 ++#endif
7042 ++
7043 + /* Minimum kernel alignment, as a power of two */
7044 + #ifdef CONFIG_X86_64
7045 + #define MIN_KERNEL_ALIGN_LG2 PMD_SHIFT
7046 +diff -urNp linux-3.1.1/arch/x86/include/asm/cacheflush.h linux-3.1.1/arch/x86/include/asm/cacheflush.h
7047 +--- linux-3.1.1/arch/x86/include/asm/cacheflush.h 2011-11-11 15:19:27.000000000 -0500
7048 ++++ linux-3.1.1/arch/x86/include/asm/cacheflush.h 2011-11-16 18:39:07.000000000 -0500
7049 +@@ -26,7 +26,7 @@ static inline unsigned long get_page_mem
7050 + unsigned long pg_flags = pg->flags & _PGMT_MASK;
7051 +
7052 + if (pg_flags == _PGMT_DEFAULT)
7053 +- return -1;
7054 ++ return ~0UL;
7055 + else if (pg_flags == _PGMT_WC)
7056 + return _PAGE_CACHE_WC;
7057 + else if (pg_flags == _PGMT_UC_MINUS)
7058 +diff -urNp linux-3.1.1/arch/x86/include/asm/cache.h linux-3.1.1/arch/x86/include/asm/cache.h
7059 +--- linux-3.1.1/arch/x86/include/asm/cache.h 2011-11-11 15:19:27.000000000 -0500
7060 ++++ linux-3.1.1/arch/x86/include/asm/cache.h 2011-11-16 18:39:07.000000000 -0500
7061 +@@ -5,12 +5,13 @@
7062 +
7063 + /* L1 cache line size */
7064 + #define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
7065 +-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
7066 ++#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
7067 +
7068 + #define __read_mostly __attribute__((__section__(".data..read_mostly")))
7069 ++#define __read_only __attribute__((__section__(".data..read_only")))
7070 +
7071 + #define INTERNODE_CACHE_SHIFT CONFIG_X86_INTERNODE_CACHE_SHIFT
7072 +-#define INTERNODE_CACHE_BYTES (1 << INTERNODE_CACHE_SHIFT)
7073 ++#define INTERNODE_CACHE_BYTES (_AC(1,UL) << INTERNODE_CACHE_SHIFT)
7074 +
7075 + #ifdef CONFIG_X86_VSMP
7076 + #ifdef CONFIG_SMP
7077 +diff -urNp linux-3.1.1/arch/x86/include/asm/checksum_32.h linux-3.1.1/arch/x86/include/asm/checksum_32.h
7078 +--- linux-3.1.1/arch/x86/include/asm/checksum_32.h 2011-11-11 15:19:27.000000000 -0500
7079 ++++ linux-3.1.1/arch/x86/include/asm/checksum_32.h 2011-11-16 18:39:07.000000000 -0500
7080 +@@ -31,6 +31,14 @@ asmlinkage __wsum csum_partial_copy_gene
7081 + int len, __wsum sum,
7082 + int *src_err_ptr, int *dst_err_ptr);
7083 +
7084 ++asmlinkage __wsum csum_partial_copy_generic_to_user(const void *src, void *dst,
7085 ++ int len, __wsum sum,
7086 ++ int *src_err_ptr, int *dst_err_ptr);
7087 ++
7088 ++asmlinkage __wsum csum_partial_copy_generic_from_user(const void *src, void *dst,
7089 ++ int len, __wsum sum,
7090 ++ int *src_err_ptr, int *dst_err_ptr);
7091 ++
7092 + /*
7093 + * Note: when you get a NULL pointer exception here this means someone
7094 + * passed in an incorrect kernel address to one of these functions.
7095 +@@ -50,7 +58,7 @@ static inline __wsum csum_partial_copy_f
7096 + int *err_ptr)
7097 + {
7098 + might_sleep();
7099 +- return csum_partial_copy_generic((__force void *)src, dst,
7100 ++ return csum_partial_copy_generic_from_user((__force void *)src, dst,
7101 + len, sum, err_ptr, NULL);
7102 + }
7103 +
7104 +@@ -178,7 +186,7 @@ static inline __wsum csum_and_copy_to_us
7105 + {
7106 + might_sleep();
7107 + if (access_ok(VERIFY_WRITE, dst, len))
7108 +- return csum_partial_copy_generic(src, (__force void *)dst,
7109 ++ return csum_partial_copy_generic_to_user(src, (__force void *)dst,
7110 + len, sum, NULL, err_ptr);
7111 +
7112 + if (len)
7113 +diff -urNp linux-3.1.1/arch/x86/include/asm/cpufeature.h linux-3.1.1/arch/x86/include/asm/cpufeature.h
7114 +--- linux-3.1.1/arch/x86/include/asm/cpufeature.h 2011-11-11 15:19:27.000000000 -0500
7115 ++++ linux-3.1.1/arch/x86/include/asm/cpufeature.h 2011-11-16 18:39:07.000000000 -0500
7116 +@@ -358,7 +358,7 @@ static __always_inline __pure bool __sta
7117 + ".section .discard,\"aw\",@progbits\n"
7118 + " .byte 0xff + (4f-3f) - (2b-1b)\n" /* size check */
7119 + ".previous\n"
7120 +- ".section .altinstr_replacement,\"ax\"\n"
7121 ++ ".section .altinstr_replacement,\"a\"\n"
7122 + "3: movb $1,%0\n"
7123 + "4:\n"
7124 + ".previous\n"
7125 +diff -urNp linux-3.1.1/arch/x86/include/asm/desc_defs.h linux-3.1.1/arch/x86/include/asm/desc_defs.h
7126 +--- linux-3.1.1/arch/x86/include/asm/desc_defs.h 2011-11-11 15:19:27.000000000 -0500
7127 ++++ linux-3.1.1/arch/x86/include/asm/desc_defs.h 2011-11-16 18:39:07.000000000 -0500
7128 +@@ -31,6 +31,12 @@ struct desc_struct {
7129 + unsigned base1: 8, type: 4, s: 1, dpl: 2, p: 1;
7130 + unsigned limit: 4, avl: 1, l: 1, d: 1, g: 1, base2: 8;
7131 + };
7132 ++ struct {
7133 ++ u16 offset_low;
7134 ++ u16 seg;
7135 ++ unsigned reserved: 8, type: 4, s: 1, dpl: 2, p: 1;
7136 ++ unsigned offset_high: 16;
7137 ++ } gate;
7138 + };
7139 + } __attribute__((packed));
7140 +
7141 +diff -urNp linux-3.1.1/arch/x86/include/asm/desc.h linux-3.1.1/arch/x86/include/asm/desc.h
7142 +--- linux-3.1.1/arch/x86/include/asm/desc.h 2011-11-11 15:19:27.000000000 -0500
7143 ++++ linux-3.1.1/arch/x86/include/asm/desc.h 2011-11-16 18:39:07.000000000 -0500
7144 +@@ -4,6 +4,7 @@
7145 + #include <asm/desc_defs.h>
7146 + #include <asm/ldt.h>
7147 + #include <asm/mmu.h>
7148 ++#include <asm/pgtable.h>
7149 +
7150 + #include <linux/smp.h>
7151 +
7152 +@@ -16,6 +17,7 @@ static inline void fill_ldt(struct desc_
7153 +
7154 + desc->type = (info->read_exec_only ^ 1) << 1;
7155 + desc->type |= info->contents << 2;
7156 ++ desc->type |= info->seg_not_present ^ 1;
7157 +
7158 + desc->s = 1;
7159 + desc->dpl = 0x3;
7160 +@@ -34,17 +36,12 @@ static inline void fill_ldt(struct desc_
7161 + }
7162 +
7163 + extern struct desc_ptr idt_descr;
7164 +-extern gate_desc idt_table[];
7165 +-
7166 +-struct gdt_page {
7167 +- struct desc_struct gdt[GDT_ENTRIES];
7168 +-} __attribute__((aligned(PAGE_SIZE)));
7169 +-
7170 +-DECLARE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page);
7171 ++extern gate_desc idt_table[256];
7172 +
7173 ++extern struct desc_struct cpu_gdt_table[NR_CPUS][PAGE_SIZE / sizeof(struct desc_struct)];
7174 + static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu)
7175 + {
7176 +- return per_cpu(gdt_page, cpu).gdt;
7177 ++ return cpu_gdt_table[cpu];
7178 + }
7179 +
7180 + #ifdef CONFIG_X86_64
7181 +@@ -69,8 +66,14 @@ static inline void pack_gate(gate_desc *
7182 + unsigned long base, unsigned dpl, unsigned flags,
7183 + unsigned short seg)
7184 + {
7185 +- gate->a = (seg << 16) | (base & 0xffff);
7186 +- gate->b = (base & 0xffff0000) | (((0x80 | type | (dpl << 5)) & 0xff) << 8);
7187 ++ gate->gate.offset_low = base;
7188 ++ gate->gate.seg = seg;
7189 ++ gate->gate.reserved = 0;
7190 ++ gate->gate.type = type;
7191 ++ gate->gate.s = 0;
7192 ++ gate->gate.dpl = dpl;
7193 ++ gate->gate.p = 1;
7194 ++ gate->gate.offset_high = base >> 16;
7195 + }
7196 +
7197 + #endif
7198 +@@ -115,12 +118,16 @@ static inline void paravirt_free_ldt(str
7199 +
7200 + static inline void native_write_idt_entry(gate_desc *idt, int entry, const gate_desc *gate)
7201 + {
7202 ++ pax_open_kernel();
7203 + memcpy(&idt[entry], gate, sizeof(*gate));
7204 ++ pax_close_kernel();
7205 + }
7206 +
7207 + static inline void native_write_ldt_entry(struct desc_struct *ldt, int entry, const void *desc)
7208 + {
7209 ++ pax_open_kernel();
7210 + memcpy(&ldt[entry], desc, 8);
7211 ++ pax_close_kernel();
7212 + }
7213 +
7214 + static inline void
7215 +@@ -134,7 +141,9 @@ native_write_gdt_entry(struct desc_struc
7216 + default: size = sizeof(*gdt); break;
7217 + }
7218 +
7219 ++ pax_open_kernel();
7220 + memcpy(&gdt[entry], desc, size);
7221 ++ pax_close_kernel();
7222 + }
7223 +
7224 + static inline void pack_descriptor(struct desc_struct *desc, unsigned long base,
7225 +@@ -207,7 +216,9 @@ static inline void native_set_ldt(const
7226 +
7227 + static inline void native_load_tr_desc(void)
7228 + {
7229 ++ pax_open_kernel();
7230 + asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8));
7231 ++ pax_close_kernel();
7232 + }
7233 +
7234 + static inline void native_load_gdt(const struct desc_ptr *dtr)
7235 +@@ -244,8 +255,10 @@ static inline void native_load_tls(struc
7236 + struct desc_struct *gdt = get_cpu_gdt_table(cpu);
7237 + unsigned int i;
7238 +
7239 ++ pax_open_kernel();
7240 + for (i = 0; i < GDT_ENTRY_TLS_ENTRIES; i++)
7241 + gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i];
7242 ++ pax_close_kernel();
7243 + }
7244 +
7245 + #define _LDT_empty(info) \
7246 +@@ -307,7 +320,7 @@ static inline void set_desc_limit(struct
7247 + desc->limit = (limit >> 16) & 0xf;
7248 + }
7249 +
7250 +-static inline void _set_gate(int gate, unsigned type, void *addr,
7251 ++static inline void _set_gate(int gate, unsigned type, const void *addr,
7252 + unsigned dpl, unsigned ist, unsigned seg)
7253 + {
7254 + gate_desc s;
7255 +@@ -326,7 +339,7 @@ static inline void _set_gate(int gate, u
7256 + * Pentium F0 0F bugfix can have resulted in the mapped
7257 + * IDT being write-protected.
7258 + */
7259 +-static inline void set_intr_gate(unsigned int n, void *addr)
7260 ++static inline void set_intr_gate(unsigned int n, const void *addr)
7261 + {
7262 + BUG_ON((unsigned)n > 0xFF);
7263 + _set_gate(n, GATE_INTERRUPT, addr, 0, 0, __KERNEL_CS);
7264 +@@ -356,19 +369,19 @@ static inline void alloc_intr_gate(unsig
7265 + /*
7266 + * This routine sets up an interrupt gate at directory privilege level 3.
7267 + */
7268 +-static inline void set_system_intr_gate(unsigned int n, void *addr)
7269 ++static inline void set_system_intr_gate(unsigned int n, const void *addr)
7270 + {
7271 + BUG_ON((unsigned)n > 0xFF);
7272 + _set_gate(n, GATE_INTERRUPT, addr, 0x3, 0, __KERNEL_CS);
7273 + }
7274 +
7275 +-static inline void set_system_trap_gate(unsigned int n, void *addr)
7276 ++static inline void set_system_trap_gate(unsigned int n, const void *addr)
7277 + {
7278 + BUG_ON((unsigned)n > 0xFF);
7279 + _set_gate(n, GATE_TRAP, addr, 0x3, 0, __KERNEL_CS);
7280 + }
7281 +
7282 +-static inline void set_trap_gate(unsigned int n, void *addr)
7283 ++static inline void set_trap_gate(unsigned int n, const void *addr)
7284 + {
7285 + BUG_ON((unsigned)n > 0xFF);
7286 + _set_gate(n, GATE_TRAP, addr, 0, 0, __KERNEL_CS);
7287 +@@ -377,19 +390,31 @@ static inline void set_trap_gate(unsigne
7288 + static inline void set_task_gate(unsigned int n, unsigned int gdt_entry)
7289 + {
7290 + BUG_ON((unsigned)n > 0xFF);
7291 +- _set_gate(n, GATE_TASK, (void *)0, 0, 0, (gdt_entry<<3));
7292 ++ _set_gate(n, GATE_TASK, (const void *)0, 0, 0, (gdt_entry<<3));
7293 + }
7294 +
7295 +-static inline void set_intr_gate_ist(int n, void *addr, unsigned ist)
7296 ++static inline void set_intr_gate_ist(int n, const void *addr, unsigned ist)
7297 + {
7298 + BUG_ON((unsigned)n > 0xFF);
7299 + _set_gate(n, GATE_INTERRUPT, addr, 0, ist, __KERNEL_CS);
7300 + }
7301 +
7302 +-static inline void set_system_intr_gate_ist(int n, void *addr, unsigned ist)
7303 ++static inline void set_system_intr_gate_ist(int n, const void *addr, unsigned ist)
7304 + {
7305 + BUG_ON((unsigned)n > 0xFF);
7306 + _set_gate(n, GATE_INTERRUPT, addr, 0x3, ist, __KERNEL_CS);
7307 + }
7308 +
7309 ++#ifdef CONFIG_X86_32
7310 ++static inline void set_user_cs(unsigned long base, unsigned long limit, int cpu)
7311 ++{
7312 ++ struct desc_struct d;
7313 ++
7314 ++ if (likely(limit))
7315 ++ limit = (limit - 1UL) >> PAGE_SHIFT;
7316 ++ pack_descriptor(&d, base, limit, 0xFB, 0xC);
7317 ++ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_DEFAULT_USER_CS, &d, DESCTYPE_S);
7318 ++}
7319 ++#endif
7320 ++
7321 + #endif /* _ASM_X86_DESC_H */
7322 +diff -urNp linux-3.1.1/arch/x86/include/asm/e820.h linux-3.1.1/arch/x86/include/asm/e820.h
7323 +--- linux-3.1.1/arch/x86/include/asm/e820.h 2011-11-11 15:19:27.000000000 -0500
7324 ++++ linux-3.1.1/arch/x86/include/asm/e820.h 2011-11-16 18:39:07.000000000 -0500
7325 +@@ -69,7 +69,7 @@ struct e820map {
7326 + #define ISA_START_ADDRESS 0xa0000
7327 + #define ISA_END_ADDRESS 0x100000
7328 +
7329 +-#define BIOS_BEGIN 0x000a0000
7330 ++#define BIOS_BEGIN 0x000c0000
7331 + #define BIOS_END 0x00100000
7332 +
7333 + #define BIOS_ROM_BASE 0xffe00000
7334 +diff -urNp linux-3.1.1/arch/x86/include/asm/elf.h linux-3.1.1/arch/x86/include/asm/elf.h
7335 +--- linux-3.1.1/arch/x86/include/asm/elf.h 2011-11-11 15:19:27.000000000 -0500
7336 ++++ linux-3.1.1/arch/x86/include/asm/elf.h 2011-11-16 18:39:07.000000000 -0500
7337 +@@ -237,7 +237,25 @@ extern int force_personality32;
7338 + the loader. We need to make sure that it is out of the way of the program
7339 + that it will "exec", and that there is sufficient room for the brk. */
7340 +
7341 ++#ifdef CONFIG_PAX_SEGMEXEC
7342 ++#define ELF_ET_DYN_BASE ((current->mm->pax_flags & MF_PAX_SEGMEXEC) ? SEGMEXEC_TASK_SIZE/3*2 : TASK_SIZE/3*2)
7343 ++#else
7344 + #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
7345 ++#endif
7346 ++
7347 ++#ifdef CONFIG_PAX_ASLR
7348 ++#ifdef CONFIG_X86_32
7349 ++#define PAX_ELF_ET_DYN_BASE 0x10000000UL
7350 ++
7351 ++#define PAX_DELTA_MMAP_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
7352 ++#define PAX_DELTA_STACK_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
7353 ++#else
7354 ++#define PAX_ELF_ET_DYN_BASE 0x400000UL
7355 ++
7356 ++#define PAX_DELTA_MMAP_LEN ((test_thread_flag(TIF_IA32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
7357 ++#define PAX_DELTA_STACK_LEN ((test_thread_flag(TIF_IA32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
7358 ++#endif
7359 ++#endif
7360 +
7361 + /* This yields a mask that user programs can use to figure out what
7362 + instruction set this CPU supports. This could be done in user space,
7363 +@@ -290,9 +308,7 @@ do { \
7364 +
7365 + #define ARCH_DLINFO \
7366 + do { \
7367 +- if (vdso_enabled) \
7368 +- NEW_AUX_ENT(AT_SYSINFO_EHDR, \
7369 +- (unsigned long)current->mm->context.vdso); \
7370 ++ NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso); \
7371 + } while (0)
7372 +
7373 + #define AT_SYSINFO 32
7374 +@@ -303,7 +319,7 @@ do { \
7375 +
7376 + #endif /* !CONFIG_X86_32 */
7377 +
7378 +-#define VDSO_CURRENT_BASE ((unsigned long)current->mm->context.vdso)
7379 ++#define VDSO_CURRENT_BASE (current->mm->context.vdso)
7380 +
7381 + #define VDSO_ENTRY \
7382 + ((unsigned long)VDSO32_SYMBOL(VDSO_CURRENT_BASE, vsyscall))
7383 +@@ -317,7 +333,4 @@ extern int arch_setup_additional_pages(s
7384 + extern int syscall32_setup_pages(struct linux_binprm *, int exstack);
7385 + #define compat_arch_setup_additional_pages syscall32_setup_pages
7386 +
7387 +-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
7388 +-#define arch_randomize_brk arch_randomize_brk
7389 +-
7390 + #endif /* _ASM_X86_ELF_H */
7391 +diff -urNp linux-3.1.1/arch/x86/include/asm/emergency-restart.h linux-3.1.1/arch/x86/include/asm/emergency-restart.h
7392 +--- linux-3.1.1/arch/x86/include/asm/emergency-restart.h 2011-11-11 15:19:27.000000000 -0500
7393 ++++ linux-3.1.1/arch/x86/include/asm/emergency-restart.h 2011-11-16 18:39:07.000000000 -0500
7394 +@@ -15,6 +15,6 @@ enum reboot_type {
7395 +
7396 + extern enum reboot_type reboot_type;
7397 +
7398 +-extern void machine_emergency_restart(void);
7399 ++extern void machine_emergency_restart(void) __noreturn;
7400 +
7401 + #endif /* _ASM_X86_EMERGENCY_RESTART_H */
7402 +diff -urNp linux-3.1.1/arch/x86/include/asm/futex.h linux-3.1.1/arch/x86/include/asm/futex.h
7403 +--- linux-3.1.1/arch/x86/include/asm/futex.h 2011-11-11 15:19:27.000000000 -0500
7404 ++++ linux-3.1.1/arch/x86/include/asm/futex.h 2011-11-16 18:39:07.000000000 -0500
7405 +@@ -12,16 +12,18 @@
7406 + #include <asm/system.h>
7407 +
7408 + #define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \
7409 ++ typecheck(u32 __user *, uaddr); \
7410 + asm volatile("1:\t" insn "\n" \
7411 + "2:\t.section .fixup,\"ax\"\n" \
7412 + "3:\tmov\t%3, %1\n" \
7413 + "\tjmp\t2b\n" \
7414 + "\t.previous\n" \
7415 + _ASM_EXTABLE(1b, 3b) \
7416 +- : "=r" (oldval), "=r" (ret), "+m" (*uaddr) \
7417 ++ : "=r" (oldval), "=r" (ret), "+m" (*(u32 __user *)____m(uaddr))\
7418 + : "i" (-EFAULT), "0" (oparg), "1" (0))
7419 +
7420 + #define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \
7421 ++ typecheck(u32 __user *, uaddr); \
7422 + asm volatile("1:\tmovl %2, %0\n" \
7423 + "\tmovl\t%0, %3\n" \
7424 + "\t" insn "\n" \
7425 +@@ -34,7 +36,7 @@
7426 + _ASM_EXTABLE(1b, 4b) \
7427 + _ASM_EXTABLE(2b, 4b) \
7428 + : "=&a" (oldval), "=&r" (ret), \
7429 +- "+m" (*uaddr), "=&r" (tem) \
7430 ++ "+m" (*(u32 __user *)____m(uaddr)), "=&r" (tem) \
7431 + : "r" (oparg), "i" (-EFAULT), "1" (0))
7432 +
7433 + static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
7434 +@@ -61,10 +63,10 @@ static inline int futex_atomic_op_inuser
7435 +
7436 + switch (op) {
7437 + case FUTEX_OP_SET:
7438 +- __futex_atomic_op1("xchgl %0, %2", ret, oldval, uaddr, oparg);
7439 ++ __futex_atomic_op1(__copyuser_seg"xchgl %0, %2", ret, oldval, uaddr, oparg);
7440 + break;
7441 + case FUTEX_OP_ADD:
7442 +- __futex_atomic_op1(LOCK_PREFIX "xaddl %0, %2", ret, oldval,
7443 ++ __futex_atomic_op1(LOCK_PREFIX __copyuser_seg"xaddl %0, %2", ret, oldval,
7444 + uaddr, oparg);
7445 + break;
7446 + case FUTEX_OP_OR:
7447 +@@ -123,13 +125,13 @@ static inline int futex_atomic_cmpxchg_i
7448 + if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
7449 + return -EFAULT;
7450 +
7451 +- asm volatile("1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n"
7452 ++ asm volatile("1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgl %4, %2\n"
7453 + "2:\t.section .fixup, \"ax\"\n"
7454 + "3:\tmov %3, %0\n"
7455 + "\tjmp 2b\n"
7456 + "\t.previous\n"
7457 + _ASM_EXTABLE(1b, 3b)
7458 +- : "+r" (ret), "=a" (oldval), "+m" (*uaddr)
7459 ++ : "+r" (ret), "=a" (oldval), "+m" (*(u32 __user *)____m(uaddr))
7460 + : "i" (-EFAULT), "r" (newval), "1" (oldval)
7461 + : "memory"
7462 + );
7463 +diff -urNp linux-3.1.1/arch/x86/include/asm/hw_irq.h linux-3.1.1/arch/x86/include/asm/hw_irq.h
7464 +--- linux-3.1.1/arch/x86/include/asm/hw_irq.h 2011-11-11 15:19:27.000000000 -0500
7465 ++++ linux-3.1.1/arch/x86/include/asm/hw_irq.h 2011-11-16 18:39:07.000000000 -0500
7466 +@@ -136,8 +136,8 @@ extern void setup_ioapic_dest(void);
7467 + extern void enable_IO_APIC(void);
7468 +
7469 + /* Statistics */
7470 +-extern atomic_t irq_err_count;
7471 +-extern atomic_t irq_mis_count;
7472 ++extern atomic_unchecked_t irq_err_count;
7473 ++extern atomic_unchecked_t irq_mis_count;
7474 +
7475 + /* EISA */
7476 + extern void eisa_set_level_irq(unsigned int irq);
7477 +diff -urNp linux-3.1.1/arch/x86/include/asm/i387.h linux-3.1.1/arch/x86/include/asm/i387.h
7478 +--- linux-3.1.1/arch/x86/include/asm/i387.h 2011-11-11 15:19:27.000000000 -0500
7479 ++++ linux-3.1.1/arch/x86/include/asm/i387.h 2011-11-16 18:39:07.000000000 -0500
7480 +@@ -92,6 +92,11 @@ static inline int fxrstor_checking(struc
7481 + {
7482 + int err;
7483 +
7484 ++#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
7485 ++ if ((unsigned long)fx < PAX_USER_SHADOW_BASE)
7486 ++ fx = (struct i387_fxsave_struct __user *)((void *)fx + PAX_USER_SHADOW_BASE);
7487 ++#endif
7488 ++
7489 + /* See comment in fxsave() below. */
7490 + #ifdef CONFIG_AS_FXSAVEQ
7491 + asm volatile("1: fxrstorq %[fx]\n\t"
7492 +@@ -121,6 +126,11 @@ static inline int fxsave_user(struct i38
7493 + {
7494 + int err;
7495 +
7496 ++#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
7497 ++ if ((unsigned long)fx < PAX_USER_SHADOW_BASE)
7498 ++ fx = (struct i387_fxsave_struct __user *)((void __user *)fx + PAX_USER_SHADOW_BASE);
7499 ++#endif
7500 ++
7501 + /*
7502 + * Clear the bytes not touched by the fxsave and reserved
7503 + * for the SW usage.
7504 +@@ -213,13 +223,8 @@ static inline void fpu_fxsave(struct fpu
7505 + #endif /* CONFIG_X86_64 */
7506 +
7507 + /* We need a safe address that is cheap to find and that is already
7508 +- in L1 during context switch. The best choices are unfortunately
7509 +- different for UP and SMP */
7510 +-#ifdef CONFIG_SMP
7511 +-#define safe_address (__per_cpu_offset[0])
7512 +-#else
7513 +-#define safe_address (kstat_cpu(0).cpustat.user)
7514 +-#endif
7515 ++ in L1 during context switch. */
7516 ++#define safe_address (init_tss[smp_processor_id()].x86_tss.sp0)
7517 +
7518 + /*
7519 + * These must be called with preempt disabled
7520 +@@ -312,7 +317,7 @@ static inline void kernel_fpu_begin(void
7521 + struct thread_info *me = current_thread_info();
7522 + preempt_disable();
7523 + if (me->status & TS_USEDFPU)
7524 +- __save_init_fpu(me->task);
7525 ++ __save_init_fpu(current);
7526 + else
7527 + clts();
7528 + }
7529 +diff -urNp linux-3.1.1/arch/x86/include/asm/io.h linux-3.1.1/arch/x86/include/asm/io.h
7530 +--- linux-3.1.1/arch/x86/include/asm/io.h 2011-11-11 15:19:27.000000000 -0500
7531 ++++ linux-3.1.1/arch/x86/include/asm/io.h 2011-11-16 18:39:07.000000000 -0500
7532 +@@ -194,6 +194,17 @@ extern void set_iounmap_nonlazy(void);
7533 +
7534 + #include <linux/vmalloc.h>
7535 +
7536 ++#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
7537 ++static inline int valid_phys_addr_range(unsigned long addr, size_t count)
7538 ++{
7539 ++ return ((addr + count + PAGE_SIZE - 1) >> PAGE_SHIFT) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
7540 ++}
7541 ++
7542 ++static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t count)
7543 ++{
7544 ++ return (pfn + (count >> PAGE_SHIFT)) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
7545 ++}
7546 ++
7547 + /*
7548 + * Convert a virtual cached pointer to an uncached pointer
7549 + */
7550 +diff -urNp linux-3.1.1/arch/x86/include/asm/irqflags.h linux-3.1.1/arch/x86/include/asm/irqflags.h
7551 +--- linux-3.1.1/arch/x86/include/asm/irqflags.h 2011-11-11 15:19:27.000000000 -0500
7552 ++++ linux-3.1.1/arch/x86/include/asm/irqflags.h 2011-11-16 18:39:07.000000000 -0500
7553 +@@ -141,6 +141,11 @@ static inline notrace unsigned long arch
7554 + sti; \
7555 + sysexit
7556 +
7557 ++#define GET_CR0_INTO_RDI mov %cr0, %rdi
7558 ++#define SET_RDI_INTO_CR0 mov %rdi, %cr0
7559 ++#define GET_CR3_INTO_RDI mov %cr3, %rdi
7560 ++#define SET_RDI_INTO_CR3 mov %rdi, %cr3
7561 ++
7562 + #else
7563 + #define INTERRUPT_RETURN iret
7564 + #define ENABLE_INTERRUPTS_SYSEXIT sti; sysexit
7565 +diff -urNp linux-3.1.1/arch/x86/include/asm/kprobes.h linux-3.1.1/arch/x86/include/asm/kprobes.h
7566 +--- linux-3.1.1/arch/x86/include/asm/kprobes.h 2011-11-11 15:19:27.000000000 -0500
7567 ++++ linux-3.1.1/arch/x86/include/asm/kprobes.h 2011-11-16 18:39:07.000000000 -0500
7568 +@@ -37,13 +37,8 @@ typedef u8 kprobe_opcode_t;
7569 + #define RELATIVEJUMP_SIZE 5
7570 + #define RELATIVECALL_OPCODE 0xe8
7571 + #define RELATIVE_ADDR_SIZE 4
7572 +-#define MAX_STACK_SIZE 64
7573 +-#define MIN_STACK_SIZE(ADDR) \
7574 +- (((MAX_STACK_SIZE) < (((unsigned long)current_thread_info()) + \
7575 +- THREAD_SIZE - (unsigned long)(ADDR))) \
7576 +- ? (MAX_STACK_SIZE) \
7577 +- : (((unsigned long)current_thread_info()) + \
7578 +- THREAD_SIZE - (unsigned long)(ADDR)))
7579 ++#define MAX_STACK_SIZE 64UL
7580 ++#define MIN_STACK_SIZE(ADDR) min(MAX_STACK_SIZE, current->thread.sp0 - (unsigned long)(ADDR))
7581 +
7582 + #define flush_insn_slot(p) do { } while (0)
7583 +
7584 +diff -urNp linux-3.1.1/arch/x86/include/asm/kvm_host.h linux-3.1.1/arch/x86/include/asm/kvm_host.h
7585 +--- linux-3.1.1/arch/x86/include/asm/kvm_host.h 2011-11-11 15:19:27.000000000 -0500
7586 ++++ linux-3.1.1/arch/x86/include/asm/kvm_host.h 2011-11-16 18:39:07.000000000 -0500
7587 +@@ -456,7 +456,7 @@ struct kvm_arch {
7588 + unsigned int n_requested_mmu_pages;
7589 + unsigned int n_max_mmu_pages;
7590 + unsigned int indirect_shadow_pages;
7591 +- atomic_t invlpg_counter;
7592 ++ atomic_unchecked_t invlpg_counter;
7593 + struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES];
7594 + /*
7595 + * Hash table of struct kvm_mmu_page.
7596 +@@ -636,7 +636,7 @@ struct kvm_x86_ops {
7597 + enum x86_intercept_stage stage);
7598 +
7599 + const struct trace_print_flags *exit_reasons_str;
7600 +-};
7601 ++} __do_const;
7602 +
7603 + struct kvm_arch_async_pf {
7604 + u32 token;
7605 +diff -urNp linux-3.1.1/arch/x86/include/asm/local.h linux-3.1.1/arch/x86/include/asm/local.h
7606 +--- linux-3.1.1/arch/x86/include/asm/local.h 2011-11-11 15:19:27.000000000 -0500
7607 ++++ linux-3.1.1/arch/x86/include/asm/local.h 2011-11-16 18:39:07.000000000 -0500
7608 +@@ -18,26 +18,58 @@ typedef struct {
7609 +
7610 + static inline void local_inc(local_t *l)
7611 + {
7612 +- asm volatile(_ASM_INC "%0"
7613 ++ asm volatile(_ASM_INC "%0\n"
7614 ++
7615 ++#ifdef CONFIG_PAX_REFCOUNT
7616 ++ "jno 0f\n"
7617 ++ _ASM_DEC "%0\n"
7618 ++ "int $4\n0:\n"
7619 ++ _ASM_EXTABLE(0b, 0b)
7620 ++#endif
7621 ++
7622 + : "+m" (l->a.counter));
7623 + }
7624 +
7625 + static inline void local_dec(local_t *l)
7626 + {
7627 +- asm volatile(_ASM_DEC "%0"
7628 ++ asm volatile(_ASM_DEC "%0\n"
7629 ++
7630 ++#ifdef CONFIG_PAX_REFCOUNT
7631 ++ "jno 0f\n"
7632 ++ _ASM_INC "%0\n"
7633 ++ "int $4\n0:\n"
7634 ++ _ASM_EXTABLE(0b, 0b)
7635 ++#endif
7636 ++
7637 + : "+m" (l->a.counter));
7638 + }
7639 +
7640 + static inline void local_add(long i, local_t *l)
7641 + {
7642 +- asm volatile(_ASM_ADD "%1,%0"
7643 ++ asm volatile(_ASM_ADD "%1,%0\n"
7644 ++
7645 ++#ifdef CONFIG_PAX_REFCOUNT
7646 ++ "jno 0f\n"
7647 ++ _ASM_SUB "%1,%0\n"
7648 ++ "int $4\n0:\n"
7649 ++ _ASM_EXTABLE(0b, 0b)
7650 ++#endif
7651 ++
7652 + : "+m" (l->a.counter)
7653 + : "ir" (i));
7654 + }
7655 +
7656 + static inline void local_sub(long i, local_t *l)
7657 + {
7658 +- asm volatile(_ASM_SUB "%1,%0"
7659 ++ asm volatile(_ASM_SUB "%1,%0\n"
7660 ++
7661 ++#ifdef CONFIG_PAX_REFCOUNT
7662 ++ "jno 0f\n"
7663 ++ _ASM_ADD "%1,%0\n"
7664 ++ "int $4\n0:\n"
7665 ++ _ASM_EXTABLE(0b, 0b)
7666 ++#endif
7667 ++
7668 + : "+m" (l->a.counter)
7669 + : "ir" (i));
7670 + }
7671 +@@ -55,7 +87,16 @@ static inline int local_sub_and_test(lon
7672 + {
7673 + unsigned char c;
7674 +
7675 +- asm volatile(_ASM_SUB "%2,%0; sete %1"
7676 ++ asm volatile(_ASM_SUB "%2,%0\n"
7677 ++
7678 ++#ifdef CONFIG_PAX_REFCOUNT
7679 ++ "jno 0f\n"
7680 ++ _ASM_ADD "%2,%0\n"
7681 ++ "int $4\n0:\n"
7682 ++ _ASM_EXTABLE(0b, 0b)
7683 ++#endif
7684 ++
7685 ++ "sete %1\n"
7686 + : "+m" (l->a.counter), "=qm" (c)
7687 + : "ir" (i) : "memory");
7688 + return c;
7689 +@@ -73,7 +114,16 @@ static inline int local_dec_and_test(loc
7690 + {
7691 + unsigned char c;
7692 +
7693 +- asm volatile(_ASM_DEC "%0; sete %1"
7694 ++ asm volatile(_ASM_DEC "%0\n"
7695 ++
7696 ++#ifdef CONFIG_PAX_REFCOUNT
7697 ++ "jno 0f\n"
7698 ++ _ASM_INC "%0\n"
7699 ++ "int $4\n0:\n"
7700 ++ _ASM_EXTABLE(0b, 0b)
7701 ++#endif
7702 ++
7703 ++ "sete %1\n"
7704 + : "+m" (l->a.counter), "=qm" (c)
7705 + : : "memory");
7706 + return c != 0;
7707 +@@ -91,7 +141,16 @@ static inline int local_inc_and_test(loc
7708 + {
7709 + unsigned char c;
7710 +
7711 +- asm volatile(_ASM_INC "%0; sete %1"
7712 ++ asm volatile(_ASM_INC "%0\n"
7713 ++
7714 ++#ifdef CONFIG_PAX_REFCOUNT
7715 ++ "jno 0f\n"
7716 ++ _ASM_DEC "%0\n"
7717 ++ "int $4\n0:\n"
7718 ++ _ASM_EXTABLE(0b, 0b)
7719 ++#endif
7720 ++
7721 ++ "sete %1\n"
7722 + : "+m" (l->a.counter), "=qm" (c)
7723 + : : "memory");
7724 + return c != 0;
7725 +@@ -110,7 +169,16 @@ static inline int local_add_negative(lon
7726 + {
7727 + unsigned char c;
7728 +
7729 +- asm volatile(_ASM_ADD "%2,%0; sets %1"
7730 ++ asm volatile(_ASM_ADD "%2,%0\n"
7731 ++
7732 ++#ifdef CONFIG_PAX_REFCOUNT
7733 ++ "jno 0f\n"
7734 ++ _ASM_SUB "%2,%0\n"
7735 ++ "int $4\n0:\n"
7736 ++ _ASM_EXTABLE(0b, 0b)
7737 ++#endif
7738 ++
7739 ++ "sets %1\n"
7740 + : "+m" (l->a.counter), "=qm" (c)
7741 + : "ir" (i) : "memory");
7742 + return c;
7743 +@@ -133,7 +201,15 @@ static inline long local_add_return(long
7744 + #endif
7745 + /* Modern 486+ processor */
7746 + __i = i;
7747 +- asm volatile(_ASM_XADD "%0, %1;"
7748 ++ asm volatile(_ASM_XADD "%0, %1\n"
7749 ++
7750 ++#ifdef CONFIG_PAX_REFCOUNT
7751 ++ "jno 0f\n"
7752 ++ _ASM_MOV "%0,%1\n"
7753 ++ "int $4\n0:\n"
7754 ++ _ASM_EXTABLE(0b, 0b)
7755 ++#endif
7756 ++
7757 + : "+r" (i), "+m" (l->a.counter)
7758 + : : "memory");
7759 + return i + __i;
7760 +diff -urNp linux-3.1.1/arch/x86/include/asm/mman.h linux-3.1.1/arch/x86/include/asm/mman.h
7761 +--- linux-3.1.1/arch/x86/include/asm/mman.h 2011-11-11 15:19:27.000000000 -0500
7762 ++++ linux-3.1.1/arch/x86/include/asm/mman.h 2011-11-16 18:39:07.000000000 -0500
7763 +@@ -5,4 +5,14 @@
7764 +
7765 + #include <asm-generic/mman.h>
7766 +
7767 ++#ifdef __KERNEL__
7768 ++#ifndef __ASSEMBLY__
7769 ++#ifdef CONFIG_X86_32
7770 ++#define arch_mmap_check i386_mmap_check
7771 ++int i386_mmap_check(unsigned long addr, unsigned long len,
7772 ++ unsigned long flags);
7773 ++#endif
7774 ++#endif
7775 ++#endif
7776 ++
7777 + #endif /* _ASM_X86_MMAN_H */
7778 +diff -urNp linux-3.1.1/arch/x86/include/asm/mmu_context.h linux-3.1.1/arch/x86/include/asm/mmu_context.h
7779 +--- linux-3.1.1/arch/x86/include/asm/mmu_context.h 2011-11-11 15:19:27.000000000 -0500
7780 ++++ linux-3.1.1/arch/x86/include/asm/mmu_context.h 2011-11-16 18:39:07.000000000 -0500
7781 +@@ -24,6 +24,18 @@ void destroy_context(struct mm_struct *m
7782 +
7783 + static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
7784 + {
7785 ++
7786 ++#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
7787 ++ unsigned int i;
7788 ++ pgd_t *pgd;
7789 ++
7790 ++ pax_open_kernel();
7791 ++ pgd = get_cpu_pgd(smp_processor_id());
7792 ++ for (i = USER_PGD_PTRS; i < 2 * USER_PGD_PTRS; ++i)
7793 ++ set_pgd_batched(pgd+i, native_make_pgd(0));
7794 ++ pax_close_kernel();
7795 ++#endif
7796 ++
7797 + #ifdef CONFIG_SMP
7798 + if (percpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
7799 + percpu_write(cpu_tlbstate.state, TLBSTATE_LAZY);
7800 +@@ -34,16 +46,30 @@ static inline void switch_mm(struct mm_s
7801 + struct task_struct *tsk)
7802 + {
7803 + unsigned cpu = smp_processor_id();
7804 ++#if defined(CONFIG_X86_32) && defined(CONFIG_SMP) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
7805 ++ int tlbstate = TLBSTATE_OK;
7806 ++#endif
7807 +
7808 + if (likely(prev != next)) {
7809 + #ifdef CONFIG_SMP
7810 ++#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
7811 ++ tlbstate = percpu_read(cpu_tlbstate.state);
7812 ++#endif
7813 + percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
7814 + percpu_write(cpu_tlbstate.active_mm, next);
7815 + #endif
7816 + cpumask_set_cpu(cpu, mm_cpumask(next));
7817 +
7818 + /* Re-load page tables */
7819 ++#ifdef CONFIG_PAX_PER_CPU_PGD
7820 ++ pax_open_kernel();
7821 ++ __clone_user_pgds(get_cpu_pgd(cpu), next->pgd, USER_PGD_PTRS);
7822 ++ __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd, USER_PGD_PTRS);
7823 ++ pax_close_kernel();
7824 ++ load_cr3(get_cpu_pgd(cpu));
7825 ++#else
7826 + load_cr3(next->pgd);
7827 ++#endif
7828 +
7829 + /* stop flush ipis for the previous mm */
7830 + cpumask_clear_cpu(cpu, mm_cpumask(prev));
7831 +@@ -53,9 +79,38 @@ static inline void switch_mm(struct mm_s
7832 + */
7833 + if (unlikely(prev->context.ldt != next->context.ldt))
7834 + load_LDT_nolock(&next->context);
7835 +- }
7836 ++
7837 ++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
7838 ++ if (!(__supported_pte_mask & _PAGE_NX)) {
7839 ++ smp_mb__before_clear_bit();
7840 ++ cpu_clear(cpu, prev->context.cpu_user_cs_mask);
7841 ++ smp_mb__after_clear_bit();
7842 ++ cpu_set(cpu, next->context.cpu_user_cs_mask);
7843 ++ }
7844 ++#endif
7845 ++
7846 ++#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
7847 ++ if (unlikely(prev->context.user_cs_base != next->context.user_cs_base ||
7848 ++ prev->context.user_cs_limit != next->context.user_cs_limit))
7849 ++ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
7850 + #ifdef CONFIG_SMP
7851 ++ else if (unlikely(tlbstate != TLBSTATE_OK))
7852 ++ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
7853 ++#endif
7854 ++#endif
7855 ++
7856 ++ }
7857 + else {
7858 ++
7859 ++#ifdef CONFIG_PAX_PER_CPU_PGD
7860 ++ pax_open_kernel();
7861 ++ __clone_user_pgds(get_cpu_pgd(cpu), next->pgd, USER_PGD_PTRS);
7862 ++ __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd, USER_PGD_PTRS);
7863 ++ pax_close_kernel();
7864 ++ load_cr3(get_cpu_pgd(cpu));
7865 ++#endif
7866 ++
7867 ++#ifdef CONFIG_SMP
7868 + percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
7869 + BUG_ON(percpu_read(cpu_tlbstate.active_mm) != next);
7870 +
7871 +@@ -64,11 +119,28 @@ static inline void switch_mm(struct mm_s
7872 + * tlb flush IPI delivery. We must reload CR3
7873 + * to make sure to use no freed page tables.
7874 + */
7875 ++
7876 ++#ifndef CONFIG_PAX_PER_CPU_PGD
7877 + load_cr3(next->pgd);
7878 ++#endif
7879 ++
7880 + load_LDT_nolock(&next->context);
7881 ++
7882 ++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
7883 ++ if (!(__supported_pte_mask & _PAGE_NX))
7884 ++ cpu_set(cpu, next->context.cpu_user_cs_mask);
7885 ++#endif
7886 ++
7887 ++#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
7888 ++#ifdef CONFIG_PAX_PAGEEXEC
7889 ++ if (!((next->pax_flags & MF_PAX_PAGEEXEC) && (__supported_pte_mask & _PAGE_NX)))
7890 ++#endif
7891 ++ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
7892 ++#endif
7893 ++
7894 + }
7895 +- }
7896 + #endif
7897 ++ }
7898 + }
7899 +
7900 + #define activate_mm(prev, next) \
7901 +diff -urNp linux-3.1.1/arch/x86/include/asm/mmu.h linux-3.1.1/arch/x86/include/asm/mmu.h
7902 +--- linux-3.1.1/arch/x86/include/asm/mmu.h 2011-11-11 15:19:27.000000000 -0500
7903 ++++ linux-3.1.1/arch/x86/include/asm/mmu.h 2011-11-16 18:39:07.000000000 -0500
7904 +@@ -9,7 +9,7 @@
7905 + * we put the segment information here.
7906 + */
7907 + typedef struct {
7908 +- void *ldt;
7909 ++ struct desc_struct *ldt;
7910 + int size;
7911 +
7912 + #ifdef CONFIG_X86_64
7913 +@@ -18,7 +18,19 @@ typedef struct {
7914 + #endif
7915 +
7916 + struct mutex lock;
7917 +- void *vdso;
7918 ++ unsigned long vdso;
7919 ++
7920 ++#ifdef CONFIG_X86_32
7921 ++#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
7922 ++ unsigned long user_cs_base;
7923 ++ unsigned long user_cs_limit;
7924 ++
7925 ++#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
7926 ++ cpumask_t cpu_user_cs_mask;
7927 ++#endif
7928 ++
7929 ++#endif
7930 ++#endif
7931 + } mm_context_t;
7932 +
7933 + #ifdef CONFIG_SMP
7934 +diff -urNp linux-3.1.1/arch/x86/include/asm/module.h linux-3.1.1/arch/x86/include/asm/module.h
7935 +--- linux-3.1.1/arch/x86/include/asm/module.h 2011-11-11 15:19:27.000000000 -0500
7936 ++++ linux-3.1.1/arch/x86/include/asm/module.h 2011-11-16 18:39:07.000000000 -0500
7937 +@@ -5,6 +5,7 @@
7938 +
7939 + #ifdef CONFIG_X86_64
7940 + /* X86_64 does not define MODULE_PROC_FAMILY */
7941 ++#define MODULE_PROC_FAMILY ""
7942 + #elif defined CONFIG_M386
7943 + #define MODULE_PROC_FAMILY "386 "
7944 + #elif defined CONFIG_M486
7945 +@@ -59,8 +60,18 @@
7946 + #error unknown processor family
7947 + #endif
7948 +
7949 +-#ifdef CONFIG_X86_32
7950 +-# define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY
7951 ++#ifdef CONFIG_PAX_KERNEXEC
7952 ++#define MODULE_PAX_KERNEXEC "KERNEXEC "
7953 ++#else
7954 ++#define MODULE_PAX_KERNEXEC ""
7955 + #endif
7956 +
7957 ++#ifdef CONFIG_PAX_MEMORY_UDEREF
7958 ++#define MODULE_PAX_UDEREF "UDEREF "
7959 ++#else
7960 ++#define MODULE_PAX_UDEREF ""
7961 ++#endif
7962 ++
7963 ++#define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_PAX_KERNEXEC MODULE_PAX_UDEREF
7964 ++
7965 + #endif /* _ASM_X86_MODULE_H */
7966 +diff -urNp linux-3.1.1/arch/x86/include/asm/page_64_types.h linux-3.1.1/arch/x86/include/asm/page_64_types.h
7967 +--- linux-3.1.1/arch/x86/include/asm/page_64_types.h 2011-11-11 15:19:27.000000000 -0500
7968 ++++ linux-3.1.1/arch/x86/include/asm/page_64_types.h 2011-11-16 18:39:07.000000000 -0500
7969 +@@ -56,7 +56,7 @@ void copy_page(void *to, void *from);
7970 +
7971 + /* duplicated to the one in bootmem.h */
7972 + extern unsigned long max_pfn;
7973 +-extern unsigned long phys_base;
7974 ++extern const unsigned long phys_base;
7975 +
7976 + extern unsigned long __phys_addr(unsigned long);
7977 + #define __phys_reloc_hide(x) (x)
7978 +diff -urNp linux-3.1.1/arch/x86/include/asm/paravirt.h linux-3.1.1/arch/x86/include/asm/paravirt.h
7979 +--- linux-3.1.1/arch/x86/include/asm/paravirt.h 2011-11-11 15:19:27.000000000 -0500
7980 ++++ linux-3.1.1/arch/x86/include/asm/paravirt.h 2011-11-16 18:39:07.000000000 -0500
7981 +@@ -667,6 +667,18 @@ static inline void set_pgd(pgd_t *pgdp,
7982 + val);
7983 + }
7984 +
7985 ++static inline void set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
7986 ++{
7987 ++ pgdval_t val = native_pgd_val(pgd);
7988 ++
7989 ++ if (sizeof(pgdval_t) > sizeof(long))
7990 ++ PVOP_VCALL3(pv_mmu_ops.set_pgd_batched, pgdp,
7991 ++ val, (u64)val >> 32);
7992 ++ else
7993 ++ PVOP_VCALL2(pv_mmu_ops.set_pgd_batched, pgdp,
7994 ++ val);
7995 ++}
7996 ++
7997 + static inline void pgd_clear(pgd_t *pgdp)
7998 + {
7999 + set_pgd(pgdp, __pgd(0));
8000 +@@ -748,6 +760,21 @@ static inline void __set_fixmap(unsigned
8001 + pv_mmu_ops.set_fixmap(idx, phys, flags);
8002 + }
8003 +
8004 ++#ifdef CONFIG_PAX_KERNEXEC
8005 ++static inline unsigned long pax_open_kernel(void)
8006 ++{
8007 ++ return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_open_kernel);
8008 ++}
8009 ++
8010 ++static inline unsigned long pax_close_kernel(void)
8011 ++{
8012 ++ return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_close_kernel);
8013 ++}
8014 ++#else
8015 ++static inline unsigned long pax_open_kernel(void) { return 0; }
8016 ++static inline unsigned long pax_close_kernel(void) { return 0; }
8017 ++#endif
8018 ++
8019 + #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
8020 +
8021 + static inline int arch_spin_is_locked(struct arch_spinlock *lock)
8022 +@@ -964,7 +991,7 @@ extern void default_banner(void);
8023 +
8024 + #define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4)
8025 + #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
8026 +-#define PARA_INDIRECT(addr) *%cs:addr
8027 ++#define PARA_INDIRECT(addr) *%ss:addr
8028 + #endif
8029 +
8030 + #define INTERRUPT_RETURN \
8031 +@@ -1041,6 +1068,21 @@ extern void default_banner(void);
8032 + PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), \
8033 + CLBR_NONE, \
8034 + jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
8035 ++
8036 ++#define GET_CR0_INTO_RDI \
8037 ++ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \
8038 ++ mov %rax,%rdi
8039 ++
8040 ++#define SET_RDI_INTO_CR0 \
8041 ++ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
8042 ++
8043 ++#define GET_CR3_INTO_RDI \
8044 ++ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr3); \
8045 ++ mov %rax,%rdi
8046 ++
8047 ++#define SET_RDI_INTO_CR3 \
8048 ++ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_write_cr3)
8049 ++
8050 + #endif /* CONFIG_X86_32 */
8051 +
8052 + #endif /* __ASSEMBLY__ */
8053 +diff -urNp linux-3.1.1/arch/x86/include/asm/paravirt_types.h linux-3.1.1/arch/x86/include/asm/paravirt_types.h
8054 +--- linux-3.1.1/arch/x86/include/asm/paravirt_types.h 2011-11-11 15:19:27.000000000 -0500
8055 ++++ linux-3.1.1/arch/x86/include/asm/paravirt_types.h 2011-11-16 18:39:07.000000000 -0500
8056 +@@ -84,20 +84,20 @@ struct pv_init_ops {
8057 + */
8058 + unsigned (*patch)(u8 type, u16 clobber, void *insnbuf,
8059 + unsigned long addr, unsigned len);
8060 +-};
8061 ++} __no_const;
8062 +
8063 +
8064 + struct pv_lazy_ops {
8065 + /* Set deferred update mode, used for batching operations. */
8066 + void (*enter)(void);
8067 + void (*leave)(void);
8068 +-};
8069 ++} __no_const;
8070 +
8071 + struct pv_time_ops {
8072 + unsigned long long (*sched_clock)(void);
8073 + unsigned long long (*steal_clock)(int cpu);
8074 + unsigned long (*get_tsc_khz)(void);
8075 +-};
8076 ++} __no_const;
8077 +
8078 + struct pv_cpu_ops {
8079 + /* hooks for various privileged instructions */
8080 +@@ -193,7 +193,7 @@ struct pv_cpu_ops {
8081 +
8082 + void (*start_context_switch)(struct task_struct *prev);
8083 + void (*end_context_switch)(struct task_struct *next);
8084 +-};
8085 ++} __no_const;
8086 +
8087 + struct pv_irq_ops {
8088 + /*
8089 +@@ -224,7 +224,7 @@ struct pv_apic_ops {
8090 + unsigned long start_eip,
8091 + unsigned long start_esp);
8092 + #endif
8093 +-};
8094 ++} __no_const;
8095 +
8096 + struct pv_mmu_ops {
8097 + unsigned long (*read_cr2)(void);
8098 +@@ -313,6 +313,7 @@ struct pv_mmu_ops {
8099 + struct paravirt_callee_save make_pud;
8100 +
8101 + void (*set_pgd)(pgd_t *pudp, pgd_t pgdval);
8102 ++ void (*set_pgd_batched)(pgd_t *pudp, pgd_t pgdval);
8103 + #endif /* PAGETABLE_LEVELS == 4 */
8104 + #endif /* PAGETABLE_LEVELS >= 3 */
8105 +
8106 +@@ -324,6 +325,12 @@ struct pv_mmu_ops {
8107 + an mfn. We can tell which is which from the index. */
8108 + void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx,
8109 + phys_addr_t phys, pgprot_t flags);
8110 ++
8111 ++#ifdef CONFIG_PAX_KERNEXEC
8112 ++ unsigned long (*pax_open_kernel)(void);
8113 ++ unsigned long (*pax_close_kernel)(void);
8114 ++#endif
8115 ++
8116 + };
8117 +
8118 + struct arch_spinlock;
8119 +@@ -334,7 +341,7 @@ struct pv_lock_ops {
8120 + void (*spin_lock_flags)(struct arch_spinlock *lock, unsigned long flags);
8121 + int (*spin_trylock)(struct arch_spinlock *lock);
8122 + void (*spin_unlock)(struct arch_spinlock *lock);
8123 +-};
8124 ++} __no_const;
8125 +
8126 + /* This contains all the paravirt structures: we get a convenient
8127 + * number for each function using the offset which we use to indicate
8128 +diff -urNp linux-3.1.1/arch/x86/include/asm/pgalloc.h linux-3.1.1/arch/x86/include/asm/pgalloc.h
8129 +--- linux-3.1.1/arch/x86/include/asm/pgalloc.h 2011-11-11 15:19:27.000000000 -0500
8130 ++++ linux-3.1.1/arch/x86/include/asm/pgalloc.h 2011-11-16 18:39:07.000000000 -0500
8131 +@@ -63,6 +63,13 @@ static inline void pmd_populate_kernel(s
8132 + pmd_t *pmd, pte_t *pte)
8133 + {
8134 + paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
8135 ++ set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE));
8136 ++}
8137 ++
8138 ++static inline void pmd_populate_user(struct mm_struct *mm,
8139 ++ pmd_t *pmd, pte_t *pte)
8140 ++{
8141 ++ paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
8142 + set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE));
8143 + }
8144 +
8145 +diff -urNp linux-3.1.1/arch/x86/include/asm/pgtable-2level.h linux-3.1.1/arch/x86/include/asm/pgtable-2level.h
8146 +--- linux-3.1.1/arch/x86/include/asm/pgtable-2level.h 2011-11-11 15:19:27.000000000 -0500
8147 ++++ linux-3.1.1/arch/x86/include/asm/pgtable-2level.h 2011-11-16 18:39:07.000000000 -0500
8148 +@@ -18,7 +18,9 @@ static inline void native_set_pte(pte_t
8149 +
8150 + static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
8151 + {
8152 ++ pax_open_kernel();
8153 + *pmdp = pmd;
8154 ++ pax_close_kernel();
8155 + }
8156 +
8157 + static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
8158 +diff -urNp linux-3.1.1/arch/x86/include/asm/pgtable_32.h linux-3.1.1/arch/x86/include/asm/pgtable_32.h
8159 +--- linux-3.1.1/arch/x86/include/asm/pgtable_32.h 2011-11-11 15:19:27.000000000 -0500
8160 ++++ linux-3.1.1/arch/x86/include/asm/pgtable_32.h 2011-11-16 18:39:07.000000000 -0500
8161 +@@ -25,9 +25,6 @@
8162 + struct mm_struct;
8163 + struct vm_area_struct;
8164 +
8165 +-extern pgd_t swapper_pg_dir[1024];
8166 +-extern pgd_t initial_page_table[1024];
8167 +-
8168 + static inline void pgtable_cache_init(void) { }
8169 + static inline void check_pgt_cache(void) { }
8170 + void paging_init(void);
8171 +@@ -48,6 +45,12 @@ extern void set_pmd_pfn(unsigned long, u
8172 + # include <asm/pgtable-2level.h>
8173 + #endif
8174 +
8175 ++extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
8176 ++extern pgd_t initial_page_table[PTRS_PER_PGD];
8177 ++#ifdef CONFIG_X86_PAE
8178 ++extern pmd_t swapper_pm_dir[PTRS_PER_PGD][PTRS_PER_PMD];
8179 ++#endif
8180 ++
8181 + #if defined(CONFIG_HIGHPTE)
8182 + #define pte_offset_map(dir, address) \
8183 + ((pte_t *)kmap_atomic(pmd_page(*(dir))) + \
8184 +@@ -62,7 +65,9 @@ extern void set_pmd_pfn(unsigned long, u
8185 + /* Clear a kernel PTE and flush it from the TLB */
8186 + #define kpte_clear_flush(ptep, vaddr) \
8187 + do { \
8188 ++ pax_open_kernel(); \
8189 + pte_clear(&init_mm, (vaddr), (ptep)); \
8190 ++ pax_close_kernel(); \
8191 + __flush_tlb_one((vaddr)); \
8192 + } while (0)
8193 +
8194 +@@ -74,6 +79,9 @@ do { \
8195 +
8196 + #endif /* !__ASSEMBLY__ */
8197 +
8198 ++#define HAVE_ARCH_UNMAPPED_AREA
8199 ++#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
8200 ++
8201 + /*
8202 + * kern_addr_valid() is (1) for FLATMEM and (0) for
8203 + * SPARSEMEM and DISCONTIGMEM
8204 +diff -urNp linux-3.1.1/arch/x86/include/asm/pgtable_32_types.h linux-3.1.1/arch/x86/include/asm/pgtable_32_types.h
8205 +--- linux-3.1.1/arch/x86/include/asm/pgtable_32_types.h 2011-11-11 15:19:27.000000000 -0500
8206 ++++ linux-3.1.1/arch/x86/include/asm/pgtable_32_types.h 2011-11-16 18:39:07.000000000 -0500
8207 +@@ -8,7 +8,7 @@
8208 + */
8209 + #ifdef CONFIG_X86_PAE
8210 + # include <asm/pgtable-3level_types.h>
8211 +-# define PMD_SIZE (1UL << PMD_SHIFT)
8212 ++# define PMD_SIZE (_AC(1, UL) << PMD_SHIFT)
8213 + # define PMD_MASK (~(PMD_SIZE - 1))
8214 + #else
8215 + # include <asm/pgtable-2level_types.h>
8216 +@@ -46,6 +46,19 @@ extern bool __vmalloc_start_set; /* set
8217 + # define VMALLOC_END (FIXADDR_START - 2 * PAGE_SIZE)
8218 + #endif
8219 +
8220 ++#ifdef CONFIG_PAX_KERNEXEC
8221 ++#ifndef __ASSEMBLY__
8222 ++extern unsigned char MODULES_EXEC_VADDR[];
8223 ++extern unsigned char MODULES_EXEC_END[];
8224 ++#endif
8225 ++#include <asm/boot.h>
8226 ++#define ktla_ktva(addr) (addr + LOAD_PHYSICAL_ADDR + PAGE_OFFSET)
8227 ++#define ktva_ktla(addr) (addr - LOAD_PHYSICAL_ADDR - PAGE_OFFSET)
8228 ++#else
8229 ++#define ktla_ktva(addr) (addr)
8230 ++#define ktva_ktla(addr) (addr)
8231 ++#endif
8232 ++
8233 + #define MODULES_VADDR VMALLOC_START
8234 + #define MODULES_END VMALLOC_END
8235 + #define MODULES_LEN (MODULES_VADDR - MODULES_END)
8236 +diff -urNp linux-3.1.1/arch/x86/include/asm/pgtable-3level.h linux-3.1.1/arch/x86/include/asm/pgtable-3level.h
8237 +--- linux-3.1.1/arch/x86/include/asm/pgtable-3level.h 2011-11-11 15:19:27.000000000 -0500
8238 ++++ linux-3.1.1/arch/x86/include/asm/pgtable-3level.h 2011-11-16 18:39:07.000000000 -0500
8239 +@@ -38,12 +38,16 @@ static inline void native_set_pte_atomic
8240 +
8241 + static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
8242 + {
8243 ++ pax_open_kernel();
8244 + set_64bit((unsigned long long *)(pmdp), native_pmd_val(pmd));
8245 ++ pax_close_kernel();
8246 + }
8247 +
8248 + static inline void native_set_pud(pud_t *pudp, pud_t pud)
8249 + {
8250 ++ pax_open_kernel();
8251 + set_64bit((unsigned long long *)(pudp), native_pud_val(pud));
8252 ++ pax_close_kernel();
8253 + }
8254 +
8255 + /*
8256 +diff -urNp linux-3.1.1/arch/x86/include/asm/pgtable_64.h linux-3.1.1/arch/x86/include/asm/pgtable_64.h
8257 +--- linux-3.1.1/arch/x86/include/asm/pgtable_64.h 2011-11-11 15:19:27.000000000 -0500
8258 ++++ linux-3.1.1/arch/x86/include/asm/pgtable_64.h 2011-11-16 18:39:07.000000000 -0500
8259 +@@ -16,10 +16,13 @@
8260 +
8261 + extern pud_t level3_kernel_pgt[512];
8262 + extern pud_t level3_ident_pgt[512];
8263 ++extern pud_t level3_vmalloc_pgt[512];
8264 ++extern pud_t level3_vmemmap_pgt[512];
8265 ++extern pud_t level2_vmemmap_pgt[512];
8266 + extern pmd_t level2_kernel_pgt[512];
8267 + extern pmd_t level2_fixmap_pgt[512];
8268 +-extern pmd_t level2_ident_pgt[512];
8269 +-extern pgd_t init_level4_pgt[];
8270 ++extern pmd_t level2_ident_pgt[512*2];
8271 ++extern pgd_t init_level4_pgt[512];
8272 +
8273 + #define swapper_pg_dir init_level4_pgt
8274 +
8275 +@@ -61,7 +64,9 @@ static inline void native_set_pte_atomic
8276 +
8277 + static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
8278 + {
8279 ++ pax_open_kernel();
8280 + *pmdp = pmd;
8281 ++ pax_close_kernel();
8282 + }
8283 +
8284 + static inline void native_pmd_clear(pmd_t *pmd)
8285 +@@ -107,6 +112,13 @@ static inline void native_pud_clear(pud_
8286 +
8287 + static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd)
8288 + {
8289 ++ pax_open_kernel();
8290 ++ *pgdp = pgd;
8291 ++ pax_close_kernel();
8292 ++}
8293 ++
8294 ++static inline void native_set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
8295 ++{
8296 + *pgdp = pgd;
8297 + }
8298 +
8299 +diff -urNp linux-3.1.1/arch/x86/include/asm/pgtable_64_types.h linux-3.1.1/arch/x86/include/asm/pgtable_64_types.h
8300 +--- linux-3.1.1/arch/x86/include/asm/pgtable_64_types.h 2011-11-11 15:19:27.000000000 -0500
8301 ++++ linux-3.1.1/arch/x86/include/asm/pgtable_64_types.h 2011-11-16 18:39:07.000000000 -0500
8302 +@@ -59,5 +59,10 @@ typedef struct { pteval_t pte; } pte_t;
8303 + #define MODULES_VADDR _AC(0xffffffffa0000000, UL)
8304 + #define MODULES_END _AC(0xffffffffff000000, UL)
8305 + #define MODULES_LEN (MODULES_END - MODULES_VADDR)
8306 ++#define MODULES_EXEC_VADDR MODULES_VADDR
8307 ++#define MODULES_EXEC_END MODULES_END
8308 ++
8309 ++#define ktla_ktva(addr) (addr)
8310 ++#define ktva_ktla(addr) (addr)
8311 +
8312 + #endif /* _ASM_X86_PGTABLE_64_DEFS_H */
8313 +diff -urNp linux-3.1.1/arch/x86/include/asm/pgtable.h linux-3.1.1/arch/x86/include/asm/pgtable.h
8314 +--- linux-3.1.1/arch/x86/include/asm/pgtable.h 2011-11-11 15:19:27.000000000 -0500
8315 ++++ linux-3.1.1/arch/x86/include/asm/pgtable.h 2011-11-16 18:39:07.000000000 -0500
8316 +@@ -44,6 +44,7 @@ extern struct mm_struct *pgd_page_get_mm
8317 +
8318 + #ifndef __PAGETABLE_PUD_FOLDED
8319 + #define set_pgd(pgdp, pgd) native_set_pgd(pgdp, pgd)
8320 ++#define set_pgd_batched(pgdp, pgd) native_set_pgd_batched(pgdp, pgd)
8321 + #define pgd_clear(pgd) native_pgd_clear(pgd)
8322 + #endif
8323 +
8324 +@@ -81,12 +82,51 @@ extern struct mm_struct *pgd_page_get_mm
8325 +
8326 + #define arch_end_context_switch(prev) do {} while(0)
8327 +
8328 ++#define pax_open_kernel() native_pax_open_kernel()
8329 ++#define pax_close_kernel() native_pax_close_kernel()
8330 + #endif /* CONFIG_PARAVIRT */
8331 +
8332 ++#define __HAVE_ARCH_PAX_OPEN_KERNEL
8333 ++#define __HAVE_ARCH_PAX_CLOSE_KERNEL
8334 ++
8335 ++#ifdef CONFIG_PAX_KERNEXEC
8336 ++static inline unsigned long native_pax_open_kernel(void)
8337 ++{
8338 ++ unsigned long cr0;
8339 ++
8340 ++ preempt_disable();
8341 ++ barrier();
8342 ++ cr0 = read_cr0() ^ X86_CR0_WP;
8343 ++ BUG_ON(unlikely(cr0 & X86_CR0_WP));
8344 ++ write_cr0(cr0);
8345 ++ return cr0 ^ X86_CR0_WP;
8346 ++}
8347 ++
8348 ++static inline unsigned long native_pax_close_kernel(void)
8349 ++{
8350 ++ unsigned long cr0;
8351 ++
8352 ++ cr0 = read_cr0() ^ X86_CR0_WP;
8353 ++ BUG_ON(unlikely(!(cr0 & X86_CR0_WP)));
8354 ++ write_cr0(cr0);
8355 ++ barrier();
8356 ++ preempt_enable_no_resched();
8357 ++ return cr0 ^ X86_CR0_WP;
8358 ++}
8359 ++#else
8360 ++static inline unsigned long native_pax_open_kernel(void) { return 0; }
8361 ++static inline unsigned long native_pax_close_kernel(void) { return 0; }
8362 ++#endif
8363 ++
8364 + /*
8365 + * The following only work if pte_present() is true.
8366 + * Undefined behaviour if not..
8367 + */
8368 ++static inline int pte_user(pte_t pte)
8369 ++{
8370 ++ return pte_val(pte) & _PAGE_USER;
8371 ++}
8372 ++
8373 + static inline int pte_dirty(pte_t pte)
8374 + {
8375 + return pte_flags(pte) & _PAGE_DIRTY;
8376 +@@ -196,9 +236,29 @@ static inline pte_t pte_wrprotect(pte_t
8377 + return pte_clear_flags(pte, _PAGE_RW);
8378 + }
8379 +
8380 ++static inline pte_t pte_mkread(pte_t pte)
8381 ++{
8382 ++ return __pte(pte_val(pte) | _PAGE_USER);
8383 ++}
8384 ++
8385 + static inline pte_t pte_mkexec(pte_t pte)
8386 + {
8387 +- return pte_clear_flags(pte, _PAGE_NX);
8388 ++#ifdef CONFIG_X86_PAE
8389 ++ if (__supported_pte_mask & _PAGE_NX)
8390 ++ return pte_clear_flags(pte, _PAGE_NX);
8391 ++ else
8392 ++#endif
8393 ++ return pte_set_flags(pte, _PAGE_USER);
8394 ++}
8395 ++
8396 ++static inline pte_t pte_exprotect(pte_t pte)
8397 ++{
8398 ++#ifdef CONFIG_X86_PAE
8399 ++ if (__supported_pte_mask & _PAGE_NX)
8400 ++ return pte_set_flags(pte, _PAGE_NX);
8401 ++ else
8402 ++#endif
8403 ++ return pte_clear_flags(pte, _PAGE_USER);
8404 + }
8405 +
8406 + static inline pte_t pte_mkdirty(pte_t pte)
8407 +@@ -390,6 +450,15 @@ pte_t *populate_extra_pte(unsigned long
8408 + #endif
8409 +
8410 + #ifndef __ASSEMBLY__
8411 ++
8412 ++#ifdef CONFIG_PAX_PER_CPU_PGD
8413 ++extern pgd_t cpu_pgd[NR_CPUS][PTRS_PER_PGD];
8414 ++static inline pgd_t *get_cpu_pgd(unsigned int cpu)
8415 ++{
8416 ++ return cpu_pgd[cpu];
8417 ++}
8418 ++#endif
8419 ++
8420 + #include <linux/mm_types.h>
8421 +
8422 + static inline int pte_none(pte_t pte)
8423 +@@ -560,7 +629,7 @@ static inline pud_t *pud_offset(pgd_t *p
8424 +
8425 + static inline int pgd_bad(pgd_t pgd)
8426 + {
8427 +- return (pgd_flags(pgd) & ~_PAGE_USER) != _KERNPG_TABLE;
8428 ++ return (pgd_flags(pgd) & ~(_PAGE_USER | _PAGE_NX)) != _KERNPG_TABLE;
8429 + }
8430 +
8431 + static inline int pgd_none(pgd_t pgd)
8432 +@@ -583,7 +652,12 @@ static inline int pgd_none(pgd_t pgd)
8433 + * pgd_offset() returns a (pgd_t *)
8434 + * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
8435 + */
8436 +-#define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address)))
8437 ++#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
8438 ++
8439 ++#ifdef CONFIG_PAX_PER_CPU_PGD
8440 ++#define pgd_offset_cpu(cpu, address) (get_cpu_pgd(cpu) + pgd_index(address))
8441 ++#endif
8442 ++
8443 + /*
8444 + * a shortcut which implies the use of the kernel's pgd, instead
8445 + * of a process's
8446 +@@ -594,6 +668,20 @@ static inline int pgd_none(pgd_t pgd)
8447 + #define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET)
8448 + #define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
8449 +
8450 ++#ifdef CONFIG_X86_32
8451 ++#define USER_PGD_PTRS KERNEL_PGD_BOUNDARY
8452 ++#else
8453 ++#define TASK_SIZE_MAX_SHIFT CONFIG_TASK_SIZE_MAX_SHIFT
8454 ++#define USER_PGD_PTRS (_AC(1,UL) << (TASK_SIZE_MAX_SHIFT - PGDIR_SHIFT))
8455 ++
8456 ++#ifdef CONFIG_PAX_MEMORY_UDEREF
8457 ++#define PAX_USER_SHADOW_BASE (_AC(1,UL) << TASK_SIZE_MAX_SHIFT)
8458 ++#else
8459 ++#define PAX_USER_SHADOW_BASE (_AC(0,UL))
8460 ++#endif
8461 ++
8462 ++#endif
8463 ++
8464 + #ifndef __ASSEMBLY__
8465 +
8466 + extern int direct_gbpages;
8467 +@@ -758,11 +846,23 @@ static inline void pmdp_set_wrprotect(st
8468 + * dst and src can be on the same page, but the range must not overlap,
8469 + * and must not cross a page boundary.
8470 + */
8471 +-static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
8472 ++static inline void clone_pgd_range(pgd_t *dst, const pgd_t *src, int count)
8473 + {
8474 +- memcpy(dst, src, count * sizeof(pgd_t));
8475 ++ pax_open_kernel();
8476 ++ while (count--)
8477 ++ *dst++ = *src++;
8478 ++ pax_close_kernel();
8479 + }
8480 +
8481 ++#ifdef CONFIG_PAX_PER_CPU_PGD
8482 ++extern void __clone_user_pgds(pgd_t *dst, const pgd_t *src, int count);
8483 ++#endif
8484 ++
8485 ++#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
8486 ++extern void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count);
8487 ++#else
8488 ++static inline void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count) {}
8489 ++#endif
8490 +
8491 + #include <asm-generic/pgtable.h>
8492 + #endif /* __ASSEMBLY__ */
8493 +diff -urNp linux-3.1.1/arch/x86/include/asm/pgtable_types.h linux-3.1.1/arch/x86/include/asm/pgtable_types.h
8494 +--- linux-3.1.1/arch/x86/include/asm/pgtable_types.h 2011-11-11 15:19:27.000000000 -0500
8495 ++++ linux-3.1.1/arch/x86/include/asm/pgtable_types.h 2011-11-16 18:39:07.000000000 -0500
8496 +@@ -16,13 +16,12 @@
8497 + #define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page */
8498 + #define _PAGE_BIT_PAT 7 /* on 4KB pages */
8499 + #define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */
8500 +-#define _PAGE_BIT_UNUSED1 9 /* available for programmer */
8501 ++#define _PAGE_BIT_SPECIAL 9 /* special mappings, no associated struct page */
8502 + #define _PAGE_BIT_IOMAP 10 /* flag used to indicate IO mapping */
8503 + #define _PAGE_BIT_HIDDEN 11 /* hidden by kmemcheck */
8504 + #define _PAGE_BIT_PAT_LARGE 12 /* On 2MB or 1GB pages */
8505 +-#define _PAGE_BIT_SPECIAL _PAGE_BIT_UNUSED1
8506 +-#define _PAGE_BIT_CPA_TEST _PAGE_BIT_UNUSED1
8507 +-#define _PAGE_BIT_SPLITTING _PAGE_BIT_UNUSED1 /* only valid on a PSE pmd */
8508 ++#define _PAGE_BIT_CPA_TEST _PAGE_BIT_SPECIAL
8509 ++#define _PAGE_BIT_SPLITTING _PAGE_BIT_SPECIAL /* only valid on a PSE pmd */
8510 + #define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */
8511 +
8512 + /* If _PAGE_BIT_PRESENT is clear, we use these: */
8513 +@@ -40,7 +39,6 @@
8514 + #define _PAGE_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_DIRTY)
8515 + #define _PAGE_PSE (_AT(pteval_t, 1) << _PAGE_BIT_PSE)
8516 + #define _PAGE_GLOBAL (_AT(pteval_t, 1) << _PAGE_BIT_GLOBAL)
8517 +-#define _PAGE_UNUSED1 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED1)
8518 + #define _PAGE_IOMAP (_AT(pteval_t, 1) << _PAGE_BIT_IOMAP)
8519 + #define _PAGE_PAT (_AT(pteval_t, 1) << _PAGE_BIT_PAT)
8520 + #define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE)
8521 +@@ -57,8 +55,10 @@
8522 +
8523 + #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
8524 + #define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX)
8525 +-#else
8526 ++#elif defined(CONFIG_KMEMCHECK)
8527 + #define _PAGE_NX (_AT(pteval_t, 0))
8528 ++#else
8529 ++#define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_HIDDEN)
8530 + #endif
8531 +
8532 + #define _PAGE_FILE (_AT(pteval_t, 1) << _PAGE_BIT_FILE)
8533 +@@ -96,6 +96,9 @@
8534 + #define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
8535 + _PAGE_ACCESSED)
8536 +
8537 ++#define PAGE_READONLY_NOEXEC PAGE_READONLY
8538 ++#define PAGE_SHARED_NOEXEC PAGE_SHARED
8539 ++
8540 + #define __PAGE_KERNEL_EXEC \
8541 + (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_GLOBAL)
8542 + #define __PAGE_KERNEL (__PAGE_KERNEL_EXEC | _PAGE_NX)
8543 +@@ -106,7 +109,7 @@
8544 + #define __PAGE_KERNEL_WC (__PAGE_KERNEL | _PAGE_CACHE_WC)
8545 + #define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_PCD | _PAGE_PWT)
8546 + #define __PAGE_KERNEL_UC_MINUS (__PAGE_KERNEL | _PAGE_PCD)
8547 +-#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RX | _PAGE_USER)
8548 ++#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RO | _PAGE_USER)
8549 + #define __PAGE_KERNEL_VVAR (__PAGE_KERNEL_RO | _PAGE_USER)
8550 + #define __PAGE_KERNEL_VVAR_NOCACHE (__PAGE_KERNEL_VVAR | _PAGE_PCD | _PAGE_PWT)
8551 + #define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE)
8552 +@@ -168,8 +171,8 @@
8553 + * bits are combined, this will alow user to access the high address mapped
8554 + * VDSO in the presence of CONFIG_COMPAT_VDSO
8555 + */
8556 +-#define PTE_IDENT_ATTR 0x003 /* PRESENT+RW */
8557 +-#define PDE_IDENT_ATTR 0x067 /* PRESENT+RW+USER+DIRTY+ACCESSED */
8558 ++#define PTE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
8559 ++#define PDE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
8560 + #define PGD_IDENT_ATTR 0x001 /* PRESENT (no other attributes) */
8561 + #endif
8562 +
8563 +@@ -207,7 +210,17 @@ static inline pgdval_t pgd_flags(pgd_t p
8564 + {
8565 + return native_pgd_val(pgd) & PTE_FLAGS_MASK;
8566 + }
8567 ++#endif
8568 +
8569 ++#if PAGETABLE_LEVELS == 3
8570 ++#include <asm-generic/pgtable-nopud.h>
8571 ++#endif
8572 ++
8573 ++#if PAGETABLE_LEVELS == 2
8574 ++#include <asm-generic/pgtable-nopmd.h>
8575 ++#endif
8576 ++
8577 ++#ifndef __ASSEMBLY__
8578 + #if PAGETABLE_LEVELS > 3
8579 + typedef struct { pudval_t pud; } pud_t;
8580 +
8581 +@@ -221,8 +234,6 @@ static inline pudval_t native_pud_val(pu
8582 + return pud.pud;
8583 + }
8584 + #else
8585 +-#include <asm-generic/pgtable-nopud.h>
8586 +-
8587 + static inline pudval_t native_pud_val(pud_t pud)
8588 + {
8589 + return native_pgd_val(pud.pgd);
8590 +@@ -242,8 +253,6 @@ static inline pmdval_t native_pmd_val(pm
8591 + return pmd.pmd;
8592 + }
8593 + #else
8594 +-#include <asm-generic/pgtable-nopmd.h>
8595 +-
8596 + static inline pmdval_t native_pmd_val(pmd_t pmd)
8597 + {
8598 + return native_pgd_val(pmd.pud.pgd);
8599 +@@ -283,7 +292,6 @@ typedef struct page *pgtable_t;
8600 +
8601 + extern pteval_t __supported_pte_mask;
8602 + extern void set_nx(void);
8603 +-extern int nx_enabled;
8604 +
8605 + #define pgprot_writecombine pgprot_writecombine
8606 + extern pgprot_t pgprot_writecombine(pgprot_t prot);
8607 +diff -urNp linux-3.1.1/arch/x86/include/asm/processor.h linux-3.1.1/arch/x86/include/asm/processor.h
8608 +--- linux-3.1.1/arch/x86/include/asm/processor.h 2011-11-11 15:19:27.000000000 -0500
8609 ++++ linux-3.1.1/arch/x86/include/asm/processor.h 2011-11-16 18:39:07.000000000 -0500
8610 +@@ -266,7 +266,7 @@ struct tss_struct {
8611 +
8612 + } ____cacheline_aligned;
8613 +
8614 +-DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss);
8615 ++extern struct tss_struct init_tss[NR_CPUS];
8616 +
8617 + /*
8618 + * Save the original ist values for checking stack pointers during debugging
8619 +@@ -858,11 +858,18 @@ static inline void spin_lock_prefetch(co
8620 + */
8621 + #define TASK_SIZE PAGE_OFFSET
8622 + #define TASK_SIZE_MAX TASK_SIZE
8623 ++
8624 ++#ifdef CONFIG_PAX_SEGMEXEC
8625 ++#define SEGMEXEC_TASK_SIZE (TASK_SIZE / 2)
8626 ++#define STACK_TOP ((current->mm->pax_flags & MF_PAX_SEGMEXEC)?SEGMEXEC_TASK_SIZE:TASK_SIZE)
8627 ++#else
8628 + #define STACK_TOP TASK_SIZE
8629 +-#define STACK_TOP_MAX STACK_TOP
8630 ++#endif
8631 ++
8632 ++#define STACK_TOP_MAX TASK_SIZE
8633 +
8634 + #define INIT_THREAD { \
8635 +- .sp0 = sizeof(init_stack) + (long)&init_stack, \
8636 ++ .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
8637 + .vm86_info = NULL, \
8638 + .sysenter_cs = __KERNEL_CS, \
8639 + .io_bitmap_ptr = NULL, \
8640 +@@ -876,7 +883,7 @@ static inline void spin_lock_prefetch(co
8641 + */
8642 + #define INIT_TSS { \
8643 + .x86_tss = { \
8644 +- .sp0 = sizeof(init_stack) + (long)&init_stack, \
8645 ++ .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
8646 + .ss0 = __KERNEL_DS, \
8647 + .ss1 = __KERNEL_CS, \
8648 + .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \
8649 +@@ -887,11 +894,7 @@ static inline void spin_lock_prefetch(co
8650 + extern unsigned long thread_saved_pc(struct task_struct *tsk);
8651 +
8652 + #define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long))
8653 +-#define KSTK_TOP(info) \
8654 +-({ \
8655 +- unsigned long *__ptr = (unsigned long *)(info); \
8656 +- (unsigned long)(&__ptr[THREAD_SIZE_LONGS]); \
8657 +-})
8658 ++#define KSTK_TOP(info) ((container_of(info, struct task_struct, tinfo))->thread.sp0)
8659 +
8660 + /*
8661 + * The below -8 is to reserve 8 bytes on top of the ring0 stack.
8662 +@@ -906,7 +909,7 @@ extern unsigned long thread_saved_pc(str
8663 + #define task_pt_regs(task) \
8664 + ({ \
8665 + struct pt_regs *__regs__; \
8666 +- __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \
8667 ++ __regs__ = (struct pt_regs *)((task)->thread.sp0); \
8668 + __regs__ - 1; \
8669 + })
8670 +
8671 +@@ -916,13 +919,13 @@ extern unsigned long thread_saved_pc(str
8672 + /*
8673 + * User space process size. 47bits minus one guard page.
8674 + */
8675 +-#define TASK_SIZE_MAX ((1UL << 47) - PAGE_SIZE)
8676 ++#define TASK_SIZE_MAX ((1UL << TASK_SIZE_MAX_SHIFT) - PAGE_SIZE)
8677 +
8678 + /* This decides where the kernel will search for a free chunk of vm
8679 + * space during mmap's.
8680 + */
8681 + #define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \
8682 +- 0xc0000000 : 0xFFFFe000)
8683 ++ 0xc0000000 : 0xFFFFf000)
8684 +
8685 + #define TASK_SIZE (test_thread_flag(TIF_IA32) ? \
8686 + IA32_PAGE_OFFSET : TASK_SIZE_MAX)
8687 +@@ -933,11 +936,11 @@ extern unsigned long thread_saved_pc(str
8688 + #define STACK_TOP_MAX TASK_SIZE_MAX
8689 +
8690 + #define INIT_THREAD { \
8691 +- .sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
8692 ++ .sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
8693 + }
8694 +
8695 + #define INIT_TSS { \
8696 +- .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
8697 ++ .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
8698 + }
8699 +
8700 + /*
8701 +@@ -959,6 +962,10 @@ extern void start_thread(struct pt_regs
8702 + */
8703 + #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
8704 +
8705 ++#ifdef CONFIG_PAX_SEGMEXEC
8706 ++#define SEGMEXEC_TASK_UNMAPPED_BASE (PAGE_ALIGN(SEGMEXEC_TASK_SIZE / 3))
8707 ++#endif
8708 ++
8709 + #define KSTK_EIP(task) (task_pt_regs(task)->ip)
8710 +
8711 + /* Get/set a process' ability to use the timestamp counter instruction */
8712 +diff -urNp linux-3.1.1/arch/x86/include/asm/ptrace.h linux-3.1.1/arch/x86/include/asm/ptrace.h
8713 +--- linux-3.1.1/arch/x86/include/asm/ptrace.h 2011-11-11 15:19:27.000000000 -0500
8714 ++++ linux-3.1.1/arch/x86/include/asm/ptrace.h 2011-11-16 18:39:07.000000000 -0500
8715 +@@ -156,28 +156,29 @@ static inline unsigned long regs_return_
8716 + }
8717 +
8718 + /*
8719 +- * user_mode_vm(regs) determines whether a register set came from user mode.
8720 ++ * user_mode(regs) determines whether a register set came from user mode.
8721 + * This is true if V8086 mode was enabled OR if the register set was from
8722 + * protected mode with RPL-3 CS value. This tricky test checks that with
8723 + * one comparison. Many places in the kernel can bypass this full check
8724 +- * if they have already ruled out V8086 mode, so user_mode(regs) can be used.
8725 ++ * if they have already ruled out V8086 mode, so user_mode_novm(regs) can
8726 ++ * be used.
8727 + */
8728 +-static inline int user_mode(struct pt_regs *regs)
8729 ++static inline int user_mode_novm(struct pt_regs *regs)
8730 + {
8731 + #ifdef CONFIG_X86_32
8732 + return (regs->cs & SEGMENT_RPL_MASK) == USER_RPL;
8733 + #else
8734 +- return !!(regs->cs & 3);
8735 ++ return !!(regs->cs & SEGMENT_RPL_MASK);
8736 + #endif
8737 + }
8738 +
8739 +-static inline int user_mode_vm(struct pt_regs *regs)
8740 ++static inline int user_mode(struct pt_regs *regs)
8741 + {
8742 + #ifdef CONFIG_X86_32
8743 + return ((regs->cs & SEGMENT_RPL_MASK) | (regs->flags & X86_VM_MASK)) >=
8744 + USER_RPL;
8745 + #else
8746 +- return user_mode(regs);
8747 ++ return user_mode_novm(regs);
8748 + #endif
8749 + }
8750 +
8751 +@@ -193,15 +194,16 @@ static inline int v8086_mode(struct pt_r
8752 + #ifdef CONFIG_X86_64
8753 + static inline bool user_64bit_mode(struct pt_regs *regs)
8754 + {
8755 ++ unsigned long cs = regs->cs & 0xffff;
8756 + #ifndef CONFIG_PARAVIRT
8757 + /*
8758 + * On non-paravirt systems, this is the only long mode CPL 3
8759 + * selector. We do not allow long mode selectors in the LDT.
8760 + */
8761 +- return regs->cs == __USER_CS;
8762 ++ return cs == __USER_CS;
8763 + #else
8764 + /* Headers are too twisted for this to go in paravirt.h. */
8765 +- return regs->cs == __USER_CS || regs->cs == pv_info.extra_user_64bit_cs;
8766 ++ return cs == __USER_CS || cs == pv_info.extra_user_64bit_cs;
8767 + #endif
8768 + }
8769 + #endif
8770 +diff -urNp linux-3.1.1/arch/x86/include/asm/reboot.h linux-3.1.1/arch/x86/include/asm/reboot.h
8771 +--- linux-3.1.1/arch/x86/include/asm/reboot.h 2011-11-11 15:19:27.000000000 -0500
8772 ++++ linux-3.1.1/arch/x86/include/asm/reboot.h 2011-11-16 18:39:07.000000000 -0500
8773 +@@ -6,19 +6,19 @@
8774 + struct pt_regs;
8775 +
8776 + struct machine_ops {
8777 +- void (*restart)(char *cmd);
8778 +- void (*halt)(void);
8779 +- void (*power_off)(void);
8780 ++ void (* __noreturn restart)(char *cmd);
8781 ++ void (* __noreturn halt)(void);
8782 ++ void (* __noreturn power_off)(void);
8783 + void (*shutdown)(void);
8784 + void (*crash_shutdown)(struct pt_regs *);
8785 +- void (*emergency_restart)(void);
8786 +-};
8787 ++ void (* __noreturn emergency_restart)(void);
8788 ++} __no_const;
8789 +
8790 + extern struct machine_ops machine_ops;
8791 +
8792 + void native_machine_crash_shutdown(struct pt_regs *regs);
8793 + void native_machine_shutdown(void);
8794 +-void machine_real_restart(unsigned int type);
8795 ++void machine_real_restart(unsigned int type) __noreturn;
8796 + /* These must match dispatch_table in reboot_32.S */
8797 + #define MRR_BIOS 0
8798 + #define MRR_APM 1
8799 +diff -urNp linux-3.1.1/arch/x86/include/asm/rwsem.h linux-3.1.1/arch/x86/include/asm/rwsem.h
8800 +--- linux-3.1.1/arch/x86/include/asm/rwsem.h 2011-11-11 15:19:27.000000000 -0500
8801 ++++ linux-3.1.1/arch/x86/include/asm/rwsem.h 2011-11-16 18:39:07.000000000 -0500
8802 +@@ -64,6 +64,14 @@ static inline void __down_read(struct rw
8803 + {
8804 + asm volatile("# beginning down_read\n\t"
8805 + LOCK_PREFIX _ASM_INC "(%1)\n\t"
8806 ++
8807 ++#ifdef CONFIG_PAX_REFCOUNT
8808 ++ "jno 0f\n"
8809 ++ LOCK_PREFIX _ASM_DEC "(%1)\n"
8810 ++ "int $4\n0:\n"
8811 ++ _ASM_EXTABLE(0b, 0b)
8812 ++#endif
8813 ++
8814 + /* adds 0x00000001 */
8815 + " jns 1f\n"
8816 + " call call_rwsem_down_read_failed\n"
8817 +@@ -85,6 +93,14 @@ static inline int __down_read_trylock(st
8818 + "1:\n\t"
8819 + " mov %1,%2\n\t"
8820 + " add %3,%2\n\t"
8821 ++
8822 ++#ifdef CONFIG_PAX_REFCOUNT
8823 ++ "jno 0f\n"
8824 ++ "sub %3,%2\n"
8825 ++ "int $4\n0:\n"
8826 ++ _ASM_EXTABLE(0b, 0b)
8827 ++#endif
8828 ++
8829 + " jle 2f\n\t"
8830 + LOCK_PREFIX " cmpxchg %2,%0\n\t"
8831 + " jnz 1b\n\t"
8832 +@@ -104,6 +120,14 @@ static inline void __down_write_nested(s
8833 + long tmp;
8834 + asm volatile("# beginning down_write\n\t"
8835 + LOCK_PREFIX " xadd %1,(%2)\n\t"
8836 ++
8837 ++#ifdef CONFIG_PAX_REFCOUNT
8838 ++ "jno 0f\n"
8839 ++ "mov %1,(%2)\n"
8840 ++ "int $4\n0:\n"
8841 ++ _ASM_EXTABLE(0b, 0b)
8842 ++#endif
8843 ++
8844 + /* adds 0xffff0001, returns the old value */
8845 + " test %1,%1\n\t"
8846 + /* was the count 0 before? */
8847 +@@ -141,6 +165,14 @@ static inline void __up_read(struct rw_s
8848 + long tmp;
8849 + asm volatile("# beginning __up_read\n\t"
8850 + LOCK_PREFIX " xadd %1,(%2)\n\t"
8851 ++
8852 ++#ifdef CONFIG_PAX_REFCOUNT
8853 ++ "jno 0f\n"
8854 ++ "mov %1,(%2)\n"
8855 ++ "int $4\n0:\n"
8856 ++ _ASM_EXTABLE(0b, 0b)
8857 ++#endif
8858 ++
8859 + /* subtracts 1, returns the old value */
8860 + " jns 1f\n\t"
8861 + " call call_rwsem_wake\n" /* expects old value in %edx */
8862 +@@ -159,6 +191,14 @@ static inline void __up_write(struct rw_
8863 + long tmp;
8864 + asm volatile("# beginning __up_write\n\t"
8865 + LOCK_PREFIX " xadd %1,(%2)\n\t"
8866 ++
8867 ++#ifdef CONFIG_PAX_REFCOUNT
8868 ++ "jno 0f\n"
8869 ++ "mov %1,(%2)\n"
8870 ++ "int $4\n0:\n"
8871 ++ _ASM_EXTABLE(0b, 0b)
8872 ++#endif
8873 ++
8874 + /* subtracts 0xffff0001, returns the old value */
8875 + " jns 1f\n\t"
8876 + " call call_rwsem_wake\n" /* expects old value in %edx */
8877 +@@ -176,6 +216,14 @@ static inline void __downgrade_write(str
8878 + {
8879 + asm volatile("# beginning __downgrade_write\n\t"
8880 + LOCK_PREFIX _ASM_ADD "%2,(%1)\n\t"
8881 ++
8882 ++#ifdef CONFIG_PAX_REFCOUNT
8883 ++ "jno 0f\n"
8884 ++ LOCK_PREFIX _ASM_SUB "%2,(%1)\n"
8885 ++ "int $4\n0:\n"
8886 ++ _ASM_EXTABLE(0b, 0b)
8887 ++#endif
8888 ++
8889 + /*
8890 + * transitions 0xZZZZ0001 -> 0xYYYY0001 (i386)
8891 + * 0xZZZZZZZZ00000001 -> 0xYYYYYYYY00000001 (x86_64)
8892 +@@ -194,7 +242,15 @@ static inline void __downgrade_write(str
8893 + */
8894 + static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
8895 + {
8896 +- asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0"
8897 ++ asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0\n"
8898 ++
8899 ++#ifdef CONFIG_PAX_REFCOUNT
8900 ++ "jno 0f\n"
8901 ++ LOCK_PREFIX _ASM_SUB "%1,%0\n"
8902 ++ "int $4\n0:\n"
8903 ++ _ASM_EXTABLE(0b, 0b)
8904 ++#endif
8905 ++
8906 + : "+m" (sem->count)
8907 + : "er" (delta));
8908 + }
8909 +@@ -206,7 +262,15 @@ static inline long rwsem_atomic_update(l
8910 + {
8911 + long tmp = delta;
8912 +
8913 +- asm volatile(LOCK_PREFIX "xadd %0,%1"
8914 ++ asm volatile(LOCK_PREFIX "xadd %0,%1\n"
8915 ++
8916 ++#ifdef CONFIG_PAX_REFCOUNT
8917 ++ "jno 0f\n"
8918 ++ "mov %0,%1\n"
8919 ++ "int $4\n0:\n"
8920 ++ _ASM_EXTABLE(0b, 0b)
8921 ++#endif
8922 ++
8923 + : "+r" (tmp), "+m" (sem->count)
8924 + : : "memory");
8925 +
8926 +diff -urNp linux-3.1.1/arch/x86/include/asm/segment.h linux-3.1.1/arch/x86/include/asm/segment.h
8927 +--- linux-3.1.1/arch/x86/include/asm/segment.h 2011-11-11 15:19:27.000000000 -0500
8928 ++++ linux-3.1.1/arch/x86/include/asm/segment.h 2011-11-16 18:39:07.000000000 -0500
8929 +@@ -64,10 +64,15 @@
8930 + * 26 - ESPFIX small SS
8931 + * 27 - per-cpu [ offset to per-cpu data area ]
8932 + * 28 - stack_canary-20 [ for stack protector ]
8933 +- * 29 - unused
8934 +- * 30 - unused
8935 ++ * 29 - PCI BIOS CS
8936 ++ * 30 - PCI BIOS DS
8937 + * 31 - TSS for double fault handler
8938 + */
8939 ++#define GDT_ENTRY_KERNEXEC_EFI_CS (1)
8940 ++#define GDT_ENTRY_KERNEXEC_EFI_DS (2)
8941 ++#define __KERNEXEC_EFI_CS (GDT_ENTRY_KERNEXEC_EFI_CS*8)
8942 ++#define __KERNEXEC_EFI_DS (GDT_ENTRY_KERNEXEC_EFI_DS*8)
8943 ++
8944 + #define GDT_ENTRY_TLS_MIN 6
8945 + #define GDT_ENTRY_TLS_MAX (GDT_ENTRY_TLS_MIN + GDT_ENTRY_TLS_ENTRIES - 1)
8946 +
8947 +@@ -79,6 +84,8 @@
8948 +
8949 + #define GDT_ENTRY_KERNEL_CS (GDT_ENTRY_KERNEL_BASE+0)
8950 +
8951 ++#define GDT_ENTRY_KERNEXEC_KERNEL_CS (4)
8952 ++
8953 + #define GDT_ENTRY_KERNEL_DS (GDT_ENTRY_KERNEL_BASE+1)
8954 +
8955 + #define GDT_ENTRY_TSS (GDT_ENTRY_KERNEL_BASE+4)
8956 +@@ -104,6 +111,12 @@
8957 + #define __KERNEL_STACK_CANARY 0
8958 + #endif
8959 +
8960 ++#define GDT_ENTRY_PCIBIOS_CS (GDT_ENTRY_KERNEL_BASE+17)
8961 ++#define __PCIBIOS_CS (GDT_ENTRY_PCIBIOS_CS * 8)
8962 ++
8963 ++#define GDT_ENTRY_PCIBIOS_DS (GDT_ENTRY_KERNEL_BASE+18)
8964 ++#define __PCIBIOS_DS (GDT_ENTRY_PCIBIOS_DS * 8)
8965 ++
8966 + #define GDT_ENTRY_DOUBLEFAULT_TSS 31
8967 +
8968 + /*
8969 +@@ -141,7 +154,7 @@
8970 + */
8971 +
8972 + /* Matches PNP_CS32 and PNP_CS16 (they must be consecutive) */
8973 +-#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xf4) == GDT_ENTRY_PNPBIOS_BASE * 8)
8974 ++#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xFFFCU) == PNP_CS32 || ((x) & 0xFFFCU) == PNP_CS16)
8975 +
8976 +
8977 + #else
8978 +@@ -165,6 +178,8 @@
8979 + #define __USER32_CS (GDT_ENTRY_DEFAULT_USER32_CS*8+3)
8980 + #define __USER32_DS __USER_DS
8981 +
8982 ++#define GDT_ENTRY_KERNEXEC_KERNEL_CS 7
8983 ++
8984 + #define GDT_ENTRY_TSS 8 /* needs two entries */
8985 + #define GDT_ENTRY_LDT 10 /* needs two entries */
8986 + #define GDT_ENTRY_TLS_MIN 12
8987 +@@ -185,6 +200,7 @@
8988 + #endif
8989 +
8990 + #define __KERNEL_CS (GDT_ENTRY_KERNEL_CS*8)
8991 ++#define __KERNEXEC_KERNEL_CS (GDT_ENTRY_KERNEXEC_KERNEL_CS*8)
8992 + #define __KERNEL_DS (GDT_ENTRY_KERNEL_DS*8)
8993 + #define __USER_DS (GDT_ENTRY_DEFAULT_USER_DS*8+3)
8994 + #define __USER_CS (GDT_ENTRY_DEFAULT_USER_CS*8+3)
8995 +diff -urNp linux-3.1.1/arch/x86/include/asm/smp.h linux-3.1.1/arch/x86/include/asm/smp.h
8996 +--- linux-3.1.1/arch/x86/include/asm/smp.h 2011-11-11 15:19:27.000000000 -0500
8997 ++++ linux-3.1.1/arch/x86/include/asm/smp.h 2011-11-16 18:39:07.000000000 -0500
8998 +@@ -36,7 +36,7 @@ DECLARE_PER_CPU(cpumask_var_t, cpu_core_
8999 + /* cpus sharing the last level cache: */
9000 + DECLARE_PER_CPU(cpumask_var_t, cpu_llc_shared_map);
9001 + DECLARE_PER_CPU(u16, cpu_llc_id);
9002 +-DECLARE_PER_CPU(int, cpu_number);
9003 ++DECLARE_PER_CPU(unsigned int, cpu_number);
9004 +
9005 + static inline struct cpumask *cpu_sibling_mask(int cpu)
9006 + {
9007 +@@ -77,7 +77,7 @@ struct smp_ops {
9008 +
9009 + void (*send_call_func_ipi)(const struct cpumask *mask);
9010 + void (*send_call_func_single_ipi)(int cpu);
9011 +-};
9012 ++} __no_const;
9013 +
9014 + /* Globals due to paravirt */
9015 + extern void set_cpu_sibling_map(int cpu);
9016 +@@ -192,14 +192,8 @@ extern unsigned disabled_cpus __cpuinitd
9017 + extern int safe_smp_processor_id(void);
9018 +
9019 + #elif defined(CONFIG_X86_64_SMP)
9020 +-#define raw_smp_processor_id() (percpu_read(cpu_number))
9021 +-
9022 +-#define stack_smp_processor_id() \
9023 +-({ \
9024 +- struct thread_info *ti; \
9025 +- __asm__("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \
9026 +- ti->cpu; \
9027 +-})
9028 ++#define raw_smp_processor_id() (percpu_read(cpu_number))
9029 ++#define stack_smp_processor_id() raw_smp_processor_id()
9030 + #define safe_smp_processor_id() smp_processor_id()
9031 +
9032 + #endif
9033 +diff -urNp linux-3.1.1/arch/x86/include/asm/spinlock.h linux-3.1.1/arch/x86/include/asm/spinlock.h
9034 +--- linux-3.1.1/arch/x86/include/asm/spinlock.h 2011-11-11 15:19:27.000000000 -0500
9035 ++++ linux-3.1.1/arch/x86/include/asm/spinlock.h 2011-11-16 18:39:07.000000000 -0500
9036 +@@ -248,6 +248,14 @@ static inline int arch_write_can_lock(ar
9037 + static inline void arch_read_lock(arch_rwlock_t *rw)
9038 + {
9039 + asm volatile(LOCK_PREFIX READ_LOCK_SIZE(dec) " (%0)\n\t"
9040 ++
9041 ++#ifdef CONFIG_PAX_REFCOUNT
9042 ++ "jno 0f\n"
9043 ++ LOCK_PREFIX READ_LOCK_SIZE(inc) " (%0)\n"
9044 ++ "int $4\n0:\n"
9045 ++ _ASM_EXTABLE(0b, 0b)
9046 ++#endif
9047 ++
9048 + "jns 1f\n"
9049 + "call __read_lock_failed\n\t"
9050 + "1:\n"
9051 +@@ -257,6 +265,14 @@ static inline void arch_read_lock(arch_r
9052 + static inline void arch_write_lock(arch_rwlock_t *rw)
9053 + {
9054 + asm volatile(LOCK_PREFIX WRITE_LOCK_SUB(%1) "(%0)\n\t"
9055 ++
9056 ++#ifdef CONFIG_PAX_REFCOUNT
9057 ++ "jno 0f\n"
9058 ++ LOCK_PREFIX WRITE_LOCK_ADD(%1) "(%0)\n"
9059 ++ "int $4\n0:\n"
9060 ++ _ASM_EXTABLE(0b, 0b)
9061 ++#endif
9062 ++
9063 + "jz 1f\n"
9064 + "call __write_lock_failed\n\t"
9065 + "1:\n"
9066 +@@ -286,13 +302,29 @@ static inline int arch_write_trylock(arc
9067 +
9068 + static inline void arch_read_unlock(arch_rwlock_t *rw)
9069 + {
9070 +- asm volatile(LOCK_PREFIX READ_LOCK_SIZE(inc) " %0"
9071 ++ asm volatile(LOCK_PREFIX READ_LOCK_SIZE(inc) " %0\n"
9072 ++
9073 ++#ifdef CONFIG_PAX_REFCOUNT
9074 ++ "jno 0f\n"
9075 ++ LOCK_PREFIX READ_LOCK_SIZE(dec) " %0\n"
9076 ++ "int $4\n0:\n"
9077 ++ _ASM_EXTABLE(0b, 0b)
9078 ++#endif
9079 ++
9080 + :"+m" (rw->lock) : : "memory");
9081 + }
9082 +
9083 + static inline void arch_write_unlock(arch_rwlock_t *rw)
9084 + {
9085 +- asm volatile(LOCK_PREFIX WRITE_LOCK_ADD(%1) "%0"
9086 ++ asm volatile(LOCK_PREFIX WRITE_LOCK_ADD(%1) "%0\n"
9087 ++
9088 ++#ifdef CONFIG_PAX_REFCOUNT
9089 ++ "jno 0f\n"
9090 ++ LOCK_PREFIX WRITE_LOCK_SUB(%1) "%0\n"
9091 ++ "int $4\n0:\n"
9092 ++ _ASM_EXTABLE(0b, 0b)
9093 ++#endif
9094 ++
9095 + : "+m" (rw->write) : "i" (RW_LOCK_BIAS) : "memory");
9096 + }
9097 +
9098 +diff -urNp linux-3.1.1/arch/x86/include/asm/stackprotector.h linux-3.1.1/arch/x86/include/asm/stackprotector.h
9099 +--- linux-3.1.1/arch/x86/include/asm/stackprotector.h 2011-11-11 15:19:27.000000000 -0500
9100 ++++ linux-3.1.1/arch/x86/include/asm/stackprotector.h 2011-11-16 18:39:07.000000000 -0500
9101 +@@ -48,7 +48,7 @@
9102 + * head_32 for boot CPU and setup_per_cpu_areas() for others.
9103 + */
9104 + #define GDT_STACK_CANARY_INIT \
9105 +- [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x18),
9106 ++ [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x17),
9107 +
9108 + /*
9109 + * Initialize the stackprotector canary value.
9110 +@@ -113,7 +113,7 @@ static inline void setup_stack_canary_se
9111 +
9112 + static inline void load_stack_canary_segment(void)
9113 + {
9114 +-#ifdef CONFIG_X86_32
9115 ++#if defined(CONFIG_X86_32) && !defined(CONFIG_PAX_MEMORY_UDEREF)
9116 + asm volatile ("mov %0, %%gs" : : "r" (0));
9117 + #endif
9118 + }
9119 +diff -urNp linux-3.1.1/arch/x86/include/asm/stacktrace.h linux-3.1.1/arch/x86/include/asm/stacktrace.h
9120 +--- linux-3.1.1/arch/x86/include/asm/stacktrace.h 2011-11-11 15:19:27.000000000 -0500
9121 ++++ linux-3.1.1/arch/x86/include/asm/stacktrace.h 2011-11-16 18:39:07.000000000 -0500
9122 +@@ -11,28 +11,20 @@
9123 +
9124 + extern int kstack_depth_to_print;
9125 +
9126 +-struct thread_info;
9127 ++struct task_struct;
9128 + struct stacktrace_ops;
9129 +
9130 +-typedef unsigned long (*walk_stack_t)(struct thread_info *tinfo,
9131 +- unsigned long *stack,
9132 +- unsigned long bp,
9133 +- const struct stacktrace_ops *ops,
9134 +- void *data,
9135 +- unsigned long *end,
9136 +- int *graph);
9137 +-
9138 +-extern unsigned long
9139 +-print_context_stack(struct thread_info *tinfo,
9140 +- unsigned long *stack, unsigned long bp,
9141 +- const struct stacktrace_ops *ops, void *data,
9142 +- unsigned long *end, int *graph);
9143 +-
9144 +-extern unsigned long
9145 +-print_context_stack_bp(struct thread_info *tinfo,
9146 +- unsigned long *stack, unsigned long bp,
9147 +- const struct stacktrace_ops *ops, void *data,
9148 +- unsigned long *end, int *graph);
9149 ++typedef unsigned long walk_stack_t(struct task_struct *task,
9150 ++ void *stack_start,
9151 ++ unsigned long *stack,
9152 ++ unsigned long bp,
9153 ++ const struct stacktrace_ops *ops,
9154 ++ void *data,
9155 ++ unsigned long *end,
9156 ++ int *graph);
9157 ++
9158 ++extern walk_stack_t print_context_stack;
9159 ++extern walk_stack_t print_context_stack_bp;
9160 +
9161 + /* Generic stack tracer with callbacks */
9162 +
9163 +@@ -40,7 +32,7 @@ struct stacktrace_ops {
9164 + void (*address)(void *data, unsigned long address, int reliable);
9165 + /* On negative return stop dumping */
9166 + int (*stack)(void *data, char *name);
9167 +- walk_stack_t walk_stack;
9168 ++ walk_stack_t *walk_stack;
9169 + };
9170 +
9171 + void dump_trace(struct task_struct *tsk, struct pt_regs *regs,
9172 +diff -urNp linux-3.1.1/arch/x86/include/asm/sys_ia32.h linux-3.1.1/arch/x86/include/asm/sys_ia32.h
9173 +--- linux-3.1.1/arch/x86/include/asm/sys_ia32.h 2011-11-11 15:19:27.000000000 -0500
9174 ++++ linux-3.1.1/arch/x86/include/asm/sys_ia32.h 2011-11-16 18:39:07.000000000 -0500
9175 +@@ -40,7 +40,7 @@ asmlinkage long sys32_rt_sigprocmask(int
9176 + compat_sigset_t __user *, unsigned int);
9177 + asmlinkage long sys32_alarm(unsigned int);
9178 +
9179 +-asmlinkage long sys32_waitpid(compat_pid_t, unsigned int *, int);
9180 ++asmlinkage long sys32_waitpid(compat_pid_t, unsigned int __user *, int);
9181 + asmlinkage long sys32_sysfs(int, u32, u32);
9182 +
9183 + asmlinkage long sys32_sched_rr_get_interval(compat_pid_t,
9184 +diff -urNp linux-3.1.1/arch/x86/include/asm/system.h linux-3.1.1/arch/x86/include/asm/system.h
9185 +--- linux-3.1.1/arch/x86/include/asm/system.h 2011-11-11 15:19:27.000000000 -0500
9186 ++++ linux-3.1.1/arch/x86/include/asm/system.h 2011-11-16 18:39:07.000000000 -0500
9187 +@@ -129,7 +129,7 @@ do { \
9188 + "call __switch_to\n\t" \
9189 + "movq "__percpu_arg([current_task])",%%rsi\n\t" \
9190 + __switch_canary \
9191 +- "movq %P[thread_info](%%rsi),%%r8\n\t" \
9192 ++ "movq "__percpu_arg([thread_info])",%%r8\n\t" \
9193 + "movq %%rax,%%rdi\n\t" \
9194 + "testl %[_tif_fork],%P[ti_flags](%%r8)\n\t" \
9195 + "jnz ret_from_fork\n\t" \
9196 +@@ -140,7 +140,7 @@ do { \
9197 + [threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \
9198 + [ti_flags] "i" (offsetof(struct thread_info, flags)), \
9199 + [_tif_fork] "i" (_TIF_FORK), \
9200 +- [thread_info] "i" (offsetof(struct task_struct, stack)), \
9201 ++ [thread_info] "m" (current_tinfo), \
9202 + [current_task] "m" (current_task) \
9203 + __switch_canary_iparam \
9204 + : "memory", "cc" __EXTRA_CLOBBER)
9205 +@@ -200,7 +200,7 @@ static inline unsigned long get_limit(un
9206 + {
9207 + unsigned long __limit;
9208 + asm("lsll %1,%0" : "=r" (__limit) : "r" (segment));
9209 +- return __limit + 1;
9210 ++ return __limit;
9211 + }
9212 +
9213 + static inline void native_clts(void)
9214 +@@ -397,12 +397,12 @@ void enable_hlt(void);
9215 +
9216 + void cpu_idle_wait(void);
9217 +
9218 +-extern unsigned long arch_align_stack(unsigned long sp);
9219 ++#define arch_align_stack(x) ((x) & ~0xfUL)
9220 + extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
9221 +
9222 + void default_idle(void);
9223 +
9224 +-void stop_this_cpu(void *dummy);
9225 ++void stop_this_cpu(void *dummy) __noreturn;
9226 +
9227 + /*
9228 + * Force strict CPU ordering.
9229 +diff -urNp linux-3.1.1/arch/x86/include/asm/thread_info.h linux-3.1.1/arch/x86/include/asm/thread_info.h
9230 +--- linux-3.1.1/arch/x86/include/asm/thread_info.h 2011-11-11 15:19:27.000000000 -0500
9231 ++++ linux-3.1.1/arch/x86/include/asm/thread_info.h 2011-11-16 18:39:07.000000000 -0500
9232 +@@ -10,6 +10,7 @@
9233 + #include <linux/compiler.h>
9234 + #include <asm/page.h>
9235 + #include <asm/types.h>
9236 ++#include <asm/percpu.h>
9237 +
9238 + /*
9239 + * low level task data that entry.S needs immediate access to
9240 +@@ -24,7 +25,6 @@ struct exec_domain;
9241 + #include <linux/atomic.h>
9242 +
9243 + struct thread_info {
9244 +- struct task_struct *task; /* main task structure */
9245 + struct exec_domain *exec_domain; /* execution domain */
9246 + __u32 flags; /* low level flags */
9247 + __u32 status; /* thread synchronous flags */
9248 +@@ -34,18 +34,12 @@ struct thread_info {
9249 + mm_segment_t addr_limit;
9250 + struct restart_block restart_block;
9251 + void __user *sysenter_return;
9252 +-#ifdef CONFIG_X86_32
9253 +- unsigned long previous_esp; /* ESP of the previous stack in
9254 +- case of nested (IRQ) stacks
9255 +- */
9256 +- __u8 supervisor_stack[0];
9257 +-#endif
9258 ++ unsigned long lowest_stack;
9259 + int uaccess_err;
9260 + };
9261 +
9262 +-#define INIT_THREAD_INFO(tsk) \
9263 ++#define INIT_THREAD_INFO \
9264 + { \
9265 +- .task = &tsk, \
9266 + .exec_domain = &default_exec_domain, \
9267 + .flags = 0, \
9268 + .cpu = 0, \
9269 +@@ -56,7 +50,7 @@ struct thread_info {
9270 + }, \
9271 + }
9272 +
9273 +-#define init_thread_info (init_thread_union.thread_info)
9274 ++#define init_thread_info (init_thread_union.stack)
9275 + #define init_stack (init_thread_union.stack)
9276 +
9277 + #else /* !__ASSEMBLY__ */
9278 +@@ -170,6 +164,23 @@ struct thread_info {
9279 + ret; \
9280 + })
9281 +
9282 ++#ifdef __ASSEMBLY__
9283 ++/* how to get the thread information struct from ASM */
9284 ++#define GET_THREAD_INFO(reg) \
9285 ++ mov PER_CPU_VAR(current_tinfo), reg
9286 ++
9287 ++/* use this one if reg already contains %esp */
9288 ++#define GET_THREAD_INFO_WITH_ESP(reg) GET_THREAD_INFO(reg)
9289 ++#else
9290 ++/* how to get the thread information struct from C */
9291 ++DECLARE_PER_CPU(struct thread_info *, current_tinfo);
9292 ++
9293 ++static __always_inline struct thread_info *current_thread_info(void)
9294 ++{
9295 ++ return percpu_read_stable(current_tinfo);
9296 ++}
9297 ++#endif
9298 ++
9299 + #ifdef CONFIG_X86_32
9300 +
9301 + #define STACK_WARN (THREAD_SIZE/8)
9302 +@@ -180,35 +191,13 @@ struct thread_info {
9303 + */
9304 + #ifndef __ASSEMBLY__
9305 +
9306 +-
9307 + /* how to get the current stack pointer from C */
9308 + register unsigned long current_stack_pointer asm("esp") __used;
9309 +
9310 +-/* how to get the thread information struct from C */
9311 +-static inline struct thread_info *current_thread_info(void)
9312 +-{
9313 +- return (struct thread_info *)
9314 +- (current_stack_pointer & ~(THREAD_SIZE - 1));
9315 +-}
9316 +-
9317 +-#else /* !__ASSEMBLY__ */
9318 +-
9319 +-/* how to get the thread information struct from ASM */
9320 +-#define GET_THREAD_INFO(reg) \
9321 +- movl $-THREAD_SIZE, reg; \
9322 +- andl %esp, reg
9323 +-
9324 +-/* use this one if reg already contains %esp */
9325 +-#define GET_THREAD_INFO_WITH_ESP(reg) \
9326 +- andl $-THREAD_SIZE, reg
9327 +-
9328 + #endif
9329 +
9330 + #else /* X86_32 */
9331 +
9332 +-#include <asm/percpu.h>
9333 +-#define KERNEL_STACK_OFFSET (5*8)
9334 +-
9335 + /*
9336 + * macros/functions for gaining access to the thread information structure
9337 + * preempt_count needs to be 1 initially, until the scheduler is functional.
9338 +@@ -216,21 +205,8 @@ static inline struct thread_info *curren
9339 + #ifndef __ASSEMBLY__
9340 + DECLARE_PER_CPU(unsigned long, kernel_stack);
9341 +
9342 +-static inline struct thread_info *current_thread_info(void)
9343 +-{
9344 +- struct thread_info *ti;
9345 +- ti = (void *)(percpu_read_stable(kernel_stack) +
9346 +- KERNEL_STACK_OFFSET - THREAD_SIZE);
9347 +- return ti;
9348 +-}
9349 +-
9350 +-#else /* !__ASSEMBLY__ */
9351 +-
9352 +-/* how to get the thread information struct from ASM */
9353 +-#define GET_THREAD_INFO(reg) \
9354 +- movq PER_CPU_VAR(kernel_stack),reg ; \
9355 +- subq $(THREAD_SIZE-KERNEL_STACK_OFFSET),reg
9356 +-
9357 ++/* how to get the current stack pointer from C */
9358 ++register unsigned long current_stack_pointer asm("rsp") __used;
9359 + #endif
9360 +
9361 + #endif /* !X86_32 */
9362 +@@ -266,5 +242,16 @@ extern void arch_task_cache_init(void);
9363 + extern void free_thread_info(struct thread_info *ti);
9364 + extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
9365 + #define arch_task_cache_init arch_task_cache_init
9366 ++
9367 ++#define __HAVE_THREAD_FUNCTIONS
9368 ++#define task_thread_info(task) (&(task)->tinfo)
9369 ++#define task_stack_page(task) ((task)->stack)
9370 ++#define setup_thread_stack(p, org) do {} while (0)
9371 ++#define end_of_stack(p) ((unsigned long *)task_stack_page(p) + 1)
9372 ++
9373 ++#define __HAVE_ARCH_TASK_STRUCT_ALLOCATOR
9374 ++extern struct task_struct *alloc_task_struct_node(int node);
9375 ++extern void free_task_struct(struct task_struct *);
9376 ++
9377 + #endif
9378 + #endif /* _ASM_X86_THREAD_INFO_H */
9379 +diff -urNp linux-3.1.1/arch/x86/include/asm/uaccess_32.h linux-3.1.1/arch/x86/include/asm/uaccess_32.h
9380 +--- linux-3.1.1/arch/x86/include/asm/uaccess_32.h 2011-11-11 15:19:27.000000000 -0500
9381 ++++ linux-3.1.1/arch/x86/include/asm/uaccess_32.h 2011-11-16 18:40:08.000000000 -0500
9382 +@@ -43,6 +43,11 @@ unsigned long __must_check __copy_from_u
9383 + static __always_inline unsigned long __must_check
9384 + __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
9385 + {
9386 ++ pax_track_stack();
9387 ++
9388 ++ if ((long)n < 0)
9389 ++ return n;
9390 ++
9391 + if (__builtin_constant_p(n)) {
9392 + unsigned long ret;
9393 +
9394 +@@ -61,6 +66,8 @@ __copy_to_user_inatomic(void __user *to,
9395 + return ret;
9396 + }
9397 + }
9398 ++ if (!__builtin_constant_p(n))
9399 ++ check_object_size(from, n, true);
9400 + return __copy_to_user_ll(to, from, n);
9401 + }
9402 +
9403 +@@ -82,12 +89,16 @@ static __always_inline unsigned long __m
9404 + __copy_to_user(void __user *to, const void *from, unsigned long n)
9405 + {
9406 + might_fault();
9407 ++
9408 + return __copy_to_user_inatomic(to, from, n);
9409 + }
9410 +
9411 + static __always_inline unsigned long
9412 + __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
9413 + {
9414 ++ if ((long)n < 0)
9415 ++ return n;
9416 ++
9417 + /* Avoid zeroing the tail if the copy fails..
9418 + * If 'n' is constant and 1, 2, or 4, we do still zero on a failure,
9419 + * but as the zeroing behaviour is only significant when n is not
9420 +@@ -137,6 +148,12 @@ static __always_inline unsigned long
9421 + __copy_from_user(void *to, const void __user *from, unsigned long n)
9422 + {
9423 + might_fault();
9424 ++
9425 ++ pax_track_stack();
9426 ++
9427 ++ if ((long)n < 0)
9428 ++ return n;
9429 ++
9430 + if (__builtin_constant_p(n)) {
9431 + unsigned long ret;
9432 +
9433 +@@ -152,6 +169,8 @@ __copy_from_user(void *to, const void __
9434 + return ret;
9435 + }
9436 + }
9437 ++ if (!__builtin_constant_p(n))
9438 ++ check_object_size(to, n, false);
9439 + return __copy_from_user_ll(to, from, n);
9440 + }
9441 +
9442 +@@ -159,6 +178,10 @@ static __always_inline unsigned long __c
9443 + const void __user *from, unsigned long n)
9444 + {
9445 + might_fault();
9446 ++
9447 ++ if ((long)n < 0)
9448 ++ return n;
9449 ++
9450 + if (__builtin_constant_p(n)) {
9451 + unsigned long ret;
9452 +
9453 +@@ -181,15 +204,19 @@ static __always_inline unsigned long
9454 + __copy_from_user_inatomic_nocache(void *to, const void __user *from,
9455 + unsigned long n)
9456 + {
9457 +- return __copy_from_user_ll_nocache_nozero(to, from, n);
9458 +-}
9459 ++ if ((long)n < 0)
9460 ++ return n;
9461 +
9462 +-unsigned long __must_check copy_to_user(void __user *to,
9463 +- const void *from, unsigned long n);
9464 +-unsigned long __must_check _copy_from_user(void *to,
9465 +- const void __user *from,
9466 +- unsigned long n);
9467 ++ return __copy_from_user_ll_nocache_nozero(to, from, n);
9468 ++}
9469 +
9470 ++extern void copy_to_user_overflow(void)
9471 ++#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
9472 ++ __compiletime_error("copy_to_user() buffer size is not provably correct")
9473 ++#else
9474 ++ __compiletime_warning("copy_to_user() buffer size is not provably correct")
9475 ++#endif
9476 ++;
9477 +
9478 + extern void copy_from_user_overflow(void)
9479 + #ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
9480 +@@ -199,17 +226,61 @@ extern void copy_from_user_overflow(void
9481 + #endif
9482 + ;
9483 +
9484 +-static inline unsigned long __must_check copy_from_user(void *to,
9485 +- const void __user *from,
9486 +- unsigned long n)
9487 ++/**
9488 ++ * copy_to_user: - Copy a block of data into user space.
9489 ++ * @to: Destination address, in user space.
9490 ++ * @from: Source address, in kernel space.
9491 ++ * @n: Number of bytes to copy.
9492 ++ *
9493 ++ * Context: User context only. This function may sleep.
9494 ++ *
9495 ++ * Copy data from kernel space to user space.
9496 ++ *
9497 ++ * Returns number of bytes that could not be copied.
9498 ++ * On success, this will be zero.
9499 ++ */
9500 ++static inline unsigned long __must_check
9501 ++copy_to_user(void __user *to, const void *from, unsigned long n)
9502 ++{
9503 ++ int sz = __compiletime_object_size(from);
9504 ++
9505 ++ if (unlikely(sz != -1 && sz < n))
9506 ++ copy_to_user_overflow();
9507 ++ else if (access_ok(VERIFY_WRITE, to, n))
9508 ++ n = __copy_to_user(to, from, n);
9509 ++ return n;
9510 ++}
9511 ++
9512 ++/**
9513 ++ * copy_from_user: - Copy a block of data from user space.
9514 ++ * @to: Destination address, in kernel space.
9515 ++ * @from: Source address, in user space.
9516 ++ * @n: Number of bytes to copy.
9517 ++ *
9518 ++ * Context: User context only. This function may sleep.
9519 ++ *
9520 ++ * Copy data from user space to kernel space.
9521 ++ *
9522 ++ * Returns number of bytes that could not be copied.
9523 ++ * On success, this will be zero.
9524 ++ *
9525 ++ * If some data could not be copied, this function will pad the copied
9526 ++ * data to the requested size using zero bytes.
9527 ++ */
9528 ++static inline unsigned long __must_check
9529 ++copy_from_user(void *to, const void __user *from, unsigned long n)
9530 + {
9531 + int sz = __compiletime_object_size(to);
9532 +
9533 +- if (likely(sz == -1 || sz >= n))
9534 +- n = _copy_from_user(to, from, n);
9535 +- else
9536 ++ if (unlikely(sz != -1 && sz < n))
9537 + copy_from_user_overflow();
9538 +-
9539 ++ else if (access_ok(VERIFY_READ, from, n))
9540 ++ n = __copy_from_user(to, from, n);
9541 ++ else if ((long)n > 0) {
9542 ++ if (!__builtin_constant_p(n))
9543 ++ check_object_size(to, n, false);
9544 ++ memset(to, 0, n);
9545 ++ }
9546 + return n;
9547 + }
9548 +
9549 +diff -urNp linux-3.1.1/arch/x86/include/asm/uaccess_64.h linux-3.1.1/arch/x86/include/asm/uaccess_64.h
9550 +--- linux-3.1.1/arch/x86/include/asm/uaccess_64.h 2011-11-11 15:19:27.000000000 -0500
9551 ++++ linux-3.1.1/arch/x86/include/asm/uaccess_64.h 2011-11-16 18:40:08.000000000 -0500
9552 +@@ -10,6 +10,9 @@
9553 + #include <asm/alternative.h>
9554 + #include <asm/cpufeature.h>
9555 + #include <asm/page.h>
9556 ++#include <asm/pgtable.h>
9557 ++
9558 ++#define set_fs(x) (current_thread_info()->addr_limit = (x))
9559 +
9560 + /*
9561 + * Copy To/From Userspace
9562 +@@ -36,26 +39,26 @@ copy_user_generic(void *to, const void *
9563 + return ret;
9564 + }
9565 +
9566 +-__must_check unsigned long
9567 +-_copy_to_user(void __user *to, const void *from, unsigned len);
9568 +-__must_check unsigned long
9569 +-_copy_from_user(void *to, const void __user *from, unsigned len);
9570 ++static __always_inline __must_check unsigned long
9571 ++__copy_to_user(void __user *to, const void *from, unsigned len);
9572 ++static __always_inline __must_check unsigned long
9573 ++__copy_from_user(void *to, const void __user *from, unsigned len);
9574 + __must_check unsigned long
9575 + copy_in_user(void __user *to, const void __user *from, unsigned len);
9576 +
9577 + static inline unsigned long __must_check copy_from_user(void *to,
9578 + const void __user *from,
9579 +- unsigned long n)
9580 ++ unsigned n)
9581 + {
9582 +- int sz = __compiletime_object_size(to);
9583 +-
9584 + might_fault();
9585 +- if (likely(sz == -1 || sz >= n))
9586 +- n = _copy_from_user(to, from, n);
9587 +-#ifdef CONFIG_DEBUG_VM
9588 +- else
9589 +- WARN(1, "Buffer overflow detected!\n");
9590 +-#endif
9591 ++
9592 ++ if (access_ok(VERIFY_READ, from, n))
9593 ++ n = __copy_from_user(to, from, n);
9594 ++ else if ((int)n > 0) {
9595 ++ if (!__builtin_constant_p(n))
9596 ++ check_object_size(to, n, false);
9597 ++ memset(to, 0, n);
9598 ++ }
9599 + return n;
9600 + }
9601 +
9602 +@@ -64,110 +67,198 @@ int copy_to_user(void __user *dst, const
9603 + {
9604 + might_fault();
9605 +
9606 +- return _copy_to_user(dst, src, size);
9607 ++ if (access_ok(VERIFY_WRITE, dst, size))
9608 ++ size = __copy_to_user(dst, src, size);
9609 ++ return size;
9610 + }
9611 +
9612 + static __always_inline __must_check
9613 +-int __copy_from_user(void *dst, const void __user *src, unsigned size)
9614 ++unsigned long __copy_from_user(void *dst, const void __user *src, unsigned size)
9615 + {
9616 +- int ret = 0;
9617 ++ int sz = __compiletime_object_size(dst);
9618 ++ unsigned ret = 0;
9619 +
9620 + might_fault();
9621 +- if (!__builtin_constant_p(size))
9622 +- return copy_user_generic(dst, (__force void *)src, size);
9623 ++
9624 ++ pax_track_stack();
9625 ++
9626 ++ if ((int)size < 0)
9627 ++ return size;
9628 ++
9629 ++#ifdef CONFIG_PAX_MEMORY_UDEREF
9630 ++ if (!__access_ok(VERIFY_READ, src, size))
9631 ++ return size;
9632 ++#endif
9633 ++
9634 ++ if (unlikely(sz != -1 && sz < size)) {
9635 ++#ifdef CONFIG_DEBUG_VM
9636 ++ WARN(1, "Buffer overflow detected!\n");
9637 ++#endif
9638 ++ return size;
9639 ++ }
9640 ++
9641 ++ if (!__builtin_constant_p(size)) {
9642 ++ check_object_size(dst, size, false);
9643 ++
9644 ++#ifdef CONFIG_PAX_MEMORY_UDEREF
9645 ++ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
9646 ++ src += PAX_USER_SHADOW_BASE;
9647 ++#endif
9648 ++
9649 ++ return copy_user_generic(dst, (__force_kernel const void *)src, size);
9650 ++ }
9651 + switch (size) {
9652 +- case 1:__get_user_asm(*(u8 *)dst, (u8 __user *)src,
9653 ++ case 1:__get_user_asm(*(u8 *)dst, (const u8 __user *)src,
9654 + ret, "b", "b", "=q", 1);
9655 + return ret;
9656 +- case 2:__get_user_asm(*(u16 *)dst, (u16 __user *)src,
9657 ++ case 2:__get_user_asm(*(u16 *)dst, (const u16 __user *)src,
9658 + ret, "w", "w", "=r", 2);
9659 + return ret;
9660 +- case 4:__get_user_asm(*(u32 *)dst, (u32 __user *)src,
9661 ++ case 4:__get_user_asm(*(u32 *)dst, (const u32 __user *)src,
9662 + ret, "l", "k", "=r", 4);
9663 + return ret;
9664 +- case 8:__get_user_asm(*(u64 *)dst, (u64 __user *)src,
9665 ++ case 8:__get_user_asm(*(u64 *)dst, (const u64 __user *)src,
9666 + ret, "q", "", "=r", 8);
9667 + return ret;
9668 + case 10:
9669 +- __get_user_asm(*(u64 *)dst, (u64 __user *)src,
9670 ++ __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
9671 + ret, "q", "", "=r", 10);
9672 + if (unlikely(ret))
9673 + return ret;
9674 + __get_user_asm(*(u16 *)(8 + (char *)dst),
9675 +- (u16 __user *)(8 + (char __user *)src),
9676 ++ (const u16 __user *)(8 + (const char __user *)src),
9677 + ret, "w", "w", "=r", 2);
9678 + return ret;
9679 + case 16:
9680 +- __get_user_asm(*(u64 *)dst, (u64 __user *)src,
9681 ++ __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
9682 + ret, "q", "", "=r", 16);
9683 + if (unlikely(ret))
9684 + return ret;
9685 + __get_user_asm(*(u64 *)(8 + (char *)dst),
9686 +- (u64 __user *)(8 + (char __user *)src),
9687 ++ (const u64 __user *)(8 + (const char __user *)src),
9688 + ret, "q", "", "=r", 8);
9689 + return ret;
9690 + default:
9691 +- return copy_user_generic(dst, (__force void *)src, size);
9692 ++
9693 ++#ifdef CONFIG_PAX_MEMORY_UDEREF
9694 ++ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
9695 ++ src += PAX_USER_SHADOW_BASE;
9696 ++#endif
9697 ++
9698 ++ return copy_user_generic(dst, (__force_kernel const void *)src, size);
9699 + }
9700 + }
9701 +
9702 + static __always_inline __must_check
9703 +-int __copy_to_user(void __user *dst, const void *src, unsigned size)
9704 ++unsigned long __copy_to_user(void __user *dst, const void *src, unsigned size)
9705 + {
9706 +- int ret = 0;
9707 ++ int sz = __compiletime_object_size(src);
9708 ++ unsigned ret = 0;
9709 +
9710 + might_fault();
9711 +- if (!__builtin_constant_p(size))
9712 +- return copy_user_generic((__force void *)dst, src, size);
9713 ++
9714 ++ pax_track_stack();
9715 ++
9716 ++ if ((int)size < 0)
9717 ++ return size;
9718 ++
9719 ++#ifdef CONFIG_PAX_MEMORY_UDEREF
9720 ++ if (!__access_ok(VERIFY_WRITE, dst, size))
9721 ++ return size;
9722 ++#endif
9723 ++
9724 ++ if (unlikely(sz != -1 && sz < size)) {
9725 ++#ifdef CONFIG_DEBUG_VM
9726 ++ WARN(1, "Buffer overflow detected!\n");
9727 ++#endif
9728 ++ return size;
9729 ++ }
9730 ++
9731 ++ if (!__builtin_constant_p(size)) {
9732 ++ check_object_size(src, size, true);
9733 ++
9734 ++#ifdef CONFIG_PAX_MEMORY_UDEREF
9735 ++ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
9736 ++ dst += PAX_USER_SHADOW_BASE;
9737 ++#endif
9738 ++
9739 ++ return copy_user_generic((__force_kernel void *)dst, src, size);
9740 ++ }
9741 + switch (size) {
9742 +- case 1:__put_user_asm(*(u8 *)src, (u8 __user *)dst,
9743 ++ case 1:__put_user_asm(*(const u8 *)src, (u8 __user *)dst,
9744 + ret, "b", "b", "iq", 1);
9745 + return ret;
9746 +- case 2:__put_user_asm(*(u16 *)src, (u16 __user *)dst,
9747 ++ case 2:__put_user_asm(*(const u16 *)src, (u16 __user *)dst,
9748 + ret, "w", "w", "ir", 2);
9749 + return ret;
9750 +- case 4:__put_user_asm(*(u32 *)src, (u32 __user *)dst,
9751 ++ case 4:__put_user_asm(*(const u32 *)src, (u32 __user *)dst,
9752 + ret, "l", "k", "ir", 4);
9753 + return ret;
9754 +- case 8:__put_user_asm(*(u64 *)src, (u64 __user *)dst,
9755 ++ case 8:__put_user_asm(*(const u64 *)src, (u64 __user *)dst,
9756 + ret, "q", "", "er", 8);
9757 + return ret;
9758 + case 10:
9759 +- __put_user_asm(*(u64 *)src, (u64 __user *)dst,
9760 ++ __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
9761 + ret, "q", "", "er", 10);
9762 + if (unlikely(ret))
9763 + return ret;
9764 + asm("":::"memory");
9765 +- __put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst,
9766 ++ __put_user_asm(4[(const u16 *)src], 4 + (u16 __user *)dst,
9767 + ret, "w", "w", "ir", 2);
9768 + return ret;
9769 + case 16:
9770 +- __put_user_asm(*(u64 *)src, (u64 __user *)dst,
9771 ++ __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
9772 + ret, "q", "", "er", 16);
9773 + if (unlikely(ret))
9774 + return ret;
9775 + asm("":::"memory");
9776 +- __put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst,
9777 ++ __put_user_asm(1[(const u64 *)src], 1 + (u64 __user *)dst,
9778 + ret, "q", "", "er", 8);
9779 + return ret;
9780 + default:
9781 +- return copy_user_generic((__force void *)dst, src, size);
9782 ++
9783 ++#ifdef CONFIG_PAX_MEMORY_UDEREF
9784 ++ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
9785 ++ dst += PAX_USER_SHADOW_BASE;
9786 ++#endif
9787 ++
9788 ++ return copy_user_generic((__force_kernel void *)dst, src, size);
9789 + }
9790 + }
9791 +
9792 + static __always_inline __must_check
9793 +-int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
9794 ++unsigned long __copy_in_user(void __user *dst, const void __user *src, unsigned size)
9795 + {
9796 +- int ret = 0;
9797 ++ unsigned ret = 0;
9798 +
9799 + might_fault();
9800 +- if (!__builtin_constant_p(size))
9801 +- return copy_user_generic((__force void *)dst,
9802 +- (__force void *)src, size);
9803 ++
9804 ++ if ((int)size < 0)
9805 ++ return size;
9806 ++
9807 ++#ifdef CONFIG_PAX_MEMORY_UDEREF
9808 ++ if (!__access_ok(VERIFY_READ, src, size))
9809 ++ return size;
9810 ++ if (!__access_ok(VERIFY_WRITE, dst, size))
9811 ++ return size;
9812 ++#endif
9813 ++
9814 ++ if (!__builtin_constant_p(size)) {
9815 ++
9816 ++#ifdef CONFIG_PAX_MEMORY_UDEREF
9817 ++ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
9818 ++ src += PAX_USER_SHADOW_BASE;
9819 ++ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
9820 ++ dst += PAX_USER_SHADOW_BASE;
9821 ++#endif
9822 ++
9823 ++ return copy_user_generic((__force_kernel void *)dst,
9824 ++ (__force_kernel const void *)src, size);
9825 ++ }
9826 + switch (size) {
9827 + case 1: {
9828 + u8 tmp;
9829 +- __get_user_asm(tmp, (u8 __user *)src,
9830 ++ __get_user_asm(tmp, (const u8 __user *)src,
9831 + ret, "b", "b", "=q", 1);
9832 + if (likely(!ret))
9833 + __put_user_asm(tmp, (u8 __user *)dst,
9834 +@@ -176,7 +267,7 @@ int __copy_in_user(void __user *dst, con
9835 + }
9836 + case 2: {
9837 + u16 tmp;
9838 +- __get_user_asm(tmp, (u16 __user *)src,
9839 ++ __get_user_asm(tmp, (const u16 __user *)src,
9840 + ret, "w", "w", "=r", 2);
9841 + if (likely(!ret))
9842 + __put_user_asm(tmp, (u16 __user *)dst,
9843 +@@ -186,7 +277,7 @@ int __copy_in_user(void __user *dst, con
9844 +
9845 + case 4: {
9846 + u32 tmp;
9847 +- __get_user_asm(tmp, (u32 __user *)src,
9848 ++ __get_user_asm(tmp, (const u32 __user *)src,
9849 + ret, "l", "k", "=r", 4);
9850 + if (likely(!ret))
9851 + __put_user_asm(tmp, (u32 __user *)dst,
9852 +@@ -195,7 +286,7 @@ int __copy_in_user(void __user *dst, con
9853 + }
9854 + case 8: {
9855 + u64 tmp;
9856 +- __get_user_asm(tmp, (u64 __user *)src,
9857 ++ __get_user_asm(tmp, (const u64 __user *)src,
9858 + ret, "q", "", "=r", 8);
9859 + if (likely(!ret))
9860 + __put_user_asm(tmp, (u64 __user *)dst,
9861 +@@ -203,8 +294,16 @@ int __copy_in_user(void __user *dst, con
9862 + return ret;
9863 + }
9864 + default:
9865 +- return copy_user_generic((__force void *)dst,
9866 +- (__force void *)src, size);
9867 ++
9868 ++#ifdef CONFIG_PAX_MEMORY_UDEREF
9869 ++ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
9870 ++ src += PAX_USER_SHADOW_BASE;
9871 ++ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
9872 ++ dst += PAX_USER_SHADOW_BASE;
9873 ++#endif
9874 ++
9875 ++ return copy_user_generic((__force_kernel void *)dst,
9876 ++ (__force_kernel const void *)src, size);
9877 + }
9878 + }
9879 +
9880 +@@ -221,33 +320,72 @@ __must_check unsigned long __clear_user(
9881 + static __must_check __always_inline int
9882 + __copy_from_user_inatomic(void *dst, const void __user *src, unsigned size)
9883 + {
9884 +- return copy_user_generic(dst, (__force const void *)src, size);
9885 ++ pax_track_stack();
9886 ++
9887 ++ if ((int)size < 0)
9888 ++ return size;
9889 ++
9890 ++#ifdef CONFIG_PAX_MEMORY_UDEREF
9891 ++ if (!__access_ok(VERIFY_READ, src, size))
9892 ++ return size;
9893 ++
9894 ++ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
9895 ++ src += PAX_USER_SHADOW_BASE;
9896 ++#endif
9897 ++
9898 ++ return copy_user_generic(dst, (__force_kernel const void *)src, size);
9899 + }
9900 +
9901 +-static __must_check __always_inline int
9902 ++static __must_check __always_inline unsigned long
9903 + __copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
9904 + {
9905 +- return copy_user_generic((__force void *)dst, src, size);
9906 ++ if ((int)size < 0)
9907 ++ return size;
9908 ++
9909 ++#ifdef CONFIG_PAX_MEMORY_UDEREF
9910 ++ if (!__access_ok(VERIFY_WRITE, dst, size))
9911 ++ return size;
9912 ++
9913 ++ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
9914 ++ dst += PAX_USER_SHADOW_BASE;
9915 ++#endif
9916 ++
9917 ++ return copy_user_generic((__force_kernel void *)dst, src, size);
9918 + }
9919 +
9920 +-extern long __copy_user_nocache(void *dst, const void __user *src,
9921 ++extern unsigned long __copy_user_nocache(void *dst, const void __user *src,
9922 + unsigned size, int zerorest);
9923 +
9924 +-static inline int
9925 +-__copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
9926 ++static inline unsigned long __copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
9927 + {
9928 + might_sleep();
9929 ++
9930 ++ if ((int)size < 0)
9931 ++ return size;
9932 ++
9933 ++#ifdef CONFIG_PAX_MEMORY_UDEREF
9934 ++ if (!__access_ok(VERIFY_READ, src, size))
9935 ++ return size;
9936 ++#endif
9937 ++
9938 + return __copy_user_nocache(dst, src, size, 1);
9939 + }
9940 +
9941 +-static inline int
9942 +-__copy_from_user_inatomic_nocache(void *dst, const void __user *src,
9943 ++static inline unsigned long __copy_from_user_inatomic_nocache(void *dst, const void __user *src,
9944 + unsigned size)
9945 + {
9946 ++ if ((int)size < 0)
9947 ++ return size;
9948 ++
9949 ++#ifdef CONFIG_PAX_MEMORY_UDEREF
9950 ++ if (!__access_ok(VERIFY_READ, src, size))
9951 ++ return size;
9952 ++#endif
9953 ++
9954 + return __copy_user_nocache(dst, src, size, 0);
9955 + }
9956 +
9957 +-unsigned long
9958 +-copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest);
9959 ++extern unsigned long
9960 ++copy_user_handle_tail(char __user *to, char __user *from, unsigned len, unsigned zerorest);
9961 +
9962 + #endif /* _ASM_X86_UACCESS_64_H */
9963 +diff -urNp linux-3.1.1/arch/x86/include/asm/uaccess.h linux-3.1.1/arch/x86/include/asm/uaccess.h
9964 +--- linux-3.1.1/arch/x86/include/asm/uaccess.h 2011-11-11 15:19:27.000000000 -0500
9965 ++++ linux-3.1.1/arch/x86/include/asm/uaccess.h 2011-11-16 18:39:07.000000000 -0500
9966 +@@ -7,12 +7,15 @@
9967 + #include <linux/compiler.h>
9968 + #include <linux/thread_info.h>
9969 + #include <linux/string.h>
9970 ++#include <linux/sched.h>
9971 + #include <asm/asm.h>
9972 + #include <asm/page.h>
9973 +
9974 + #define VERIFY_READ 0
9975 + #define VERIFY_WRITE 1
9976 +
9977 ++extern void check_object_size(const void *ptr, unsigned long n, bool to);
9978 ++
9979 + /*
9980 + * The fs value determines whether argument validity checking should be
9981 + * performed or not. If get_fs() == USER_DS, checking is performed, with
9982 +@@ -28,7 +31,12 @@
9983 +
9984 + #define get_ds() (KERNEL_DS)
9985 + #define get_fs() (current_thread_info()->addr_limit)
9986 ++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
9987 ++void __set_fs(mm_segment_t x);
9988 ++void set_fs(mm_segment_t x);
9989 ++#else
9990 + #define set_fs(x) (current_thread_info()->addr_limit = (x))
9991 ++#endif
9992 +
9993 + #define segment_eq(a, b) ((a).seg == (b).seg)
9994 +
9995 +@@ -76,7 +84,33 @@
9996 + * checks that the pointer is in the user space range - after calling
9997 + * this function, memory access functions may still return -EFAULT.
9998 + */
9999 +-#define access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
10000 ++#define __access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
10001 ++#define access_ok(type, addr, size) \
10002 ++({ \
10003 ++ long __size = size; \
10004 ++ unsigned long __addr = (unsigned long)addr; \
10005 ++ unsigned long __addr_ao = __addr & PAGE_MASK; \
10006 ++ unsigned long __end_ao = __addr + __size - 1; \
10007 ++ bool __ret_ao = __range_not_ok(__addr, __size) == 0; \
10008 ++ if (__ret_ao && unlikely((__end_ao ^ __addr_ao) & PAGE_MASK)) { \
10009 ++ while(__addr_ao <= __end_ao) { \
10010 ++ char __c_ao; \
10011 ++ __addr_ao += PAGE_SIZE; \
10012 ++ if (__size > PAGE_SIZE) \
10013 ++ cond_resched(); \
10014 ++ if (__get_user(__c_ao, (char __user *)__addr)) \
10015 ++ break; \
10016 ++ if (type != VERIFY_WRITE) { \
10017 ++ __addr = __addr_ao; \
10018 ++ continue; \
10019 ++ } \
10020 ++ if (__put_user(__c_ao, (char __user *)__addr)) \
10021 ++ break; \
10022 ++ __addr = __addr_ao; \
10023 ++ } \
10024 ++ } \
10025 ++ __ret_ao; \
10026 ++})
10027 +
10028 + /*
10029 + * The exception table consists of pairs of addresses: the first is the
10030 +@@ -182,12 +216,20 @@ extern int __get_user_bad(void);
10031 + asm volatile("call __put_user_" #size : "=a" (__ret_pu) \
10032 + : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
10033 +
10034 +-
10035 ++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
10036 ++#define __copyuser_seg "gs;"
10037 ++#define __COPYUSER_SET_ES "pushl %%gs; popl %%es\n"
10038 ++#define __COPYUSER_RESTORE_ES "pushl %%ss; popl %%es\n"
10039 ++#else
10040 ++#define __copyuser_seg
10041 ++#define __COPYUSER_SET_ES
10042 ++#define __COPYUSER_RESTORE_ES
10043 ++#endif
10044 +
10045 + #ifdef CONFIG_X86_32
10046 + #define __put_user_asm_u64(x, addr, err, errret) \
10047 +- asm volatile("1: movl %%eax,0(%2)\n" \
10048 +- "2: movl %%edx,4(%2)\n" \
10049 ++ asm volatile("1: "__copyuser_seg"movl %%eax,0(%2)\n" \
10050 ++ "2: "__copyuser_seg"movl %%edx,4(%2)\n" \
10051 + "3:\n" \
10052 + ".section .fixup,\"ax\"\n" \
10053 + "4: movl %3,%0\n" \
10054 +@@ -199,8 +241,8 @@ extern int __get_user_bad(void);
10055 + : "A" (x), "r" (addr), "i" (errret), "0" (err))
10056 +
10057 + #define __put_user_asm_ex_u64(x, addr) \
10058 +- asm volatile("1: movl %%eax,0(%1)\n" \
10059 +- "2: movl %%edx,4(%1)\n" \
10060 ++ asm volatile("1: "__copyuser_seg"movl %%eax,0(%1)\n" \
10061 ++ "2: "__copyuser_seg"movl %%edx,4(%1)\n" \
10062 + "3:\n" \
10063 + _ASM_EXTABLE(1b, 2b - 1b) \
10064 + _ASM_EXTABLE(2b, 3b - 2b) \
10065 +@@ -252,7 +294,7 @@ extern void __put_user_8(void);
10066 + __typeof__(*(ptr)) __pu_val; \
10067 + __chk_user_ptr(ptr); \
10068 + might_fault(); \
10069 +- __pu_val = x; \
10070 ++ __pu_val = (x); \
10071 + switch (sizeof(*(ptr))) { \
10072 + case 1: \
10073 + __put_user_x(1, __pu_val, ptr, __ret_pu); \
10074 +@@ -373,7 +415,7 @@ do { \
10075 + } while (0)
10076 +
10077 + #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \
10078 +- asm volatile("1: mov"itype" %2,%"rtype"1\n" \
10079 ++ asm volatile("1: "__copyuser_seg"mov"itype" %2,%"rtype"1\n"\
10080 + "2:\n" \
10081 + ".section .fixup,\"ax\"\n" \
10082 + "3: mov %3,%0\n" \
10083 +@@ -381,7 +423,7 @@ do { \
10084 + " jmp 2b\n" \
10085 + ".previous\n" \
10086 + _ASM_EXTABLE(1b, 3b) \
10087 +- : "=r" (err), ltype(x) \
10088 ++ : "=r" (err), ltype (x) \
10089 + : "m" (__m(addr)), "i" (errret), "0" (err))
10090 +
10091 + #define __get_user_size_ex(x, ptr, size) \
10092 +@@ -406,7 +448,7 @@ do { \
10093 + } while (0)
10094 +
10095 + #define __get_user_asm_ex(x, addr, itype, rtype, ltype) \
10096 +- asm volatile("1: mov"itype" %1,%"rtype"0\n" \
10097 ++ asm volatile("1: "__copyuser_seg"mov"itype" %1,%"rtype"0\n"\
10098 + "2:\n" \
10099 + _ASM_EXTABLE(1b, 2b - 1b) \
10100 + : ltype(x) : "m" (__m(addr)))
10101 +@@ -423,13 +465,24 @@ do { \
10102 + int __gu_err; \
10103 + unsigned long __gu_val; \
10104 + __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \
10105 +- (x) = (__force __typeof__(*(ptr)))__gu_val; \
10106 ++ (x) = (__typeof__(*(ptr)))__gu_val; \
10107 + __gu_err; \
10108 + })
10109 +
10110 + /* FIXME: this hack is definitely wrong -AK */
10111 + struct __large_struct { unsigned long buf[100]; };
10112 +-#define __m(x) (*(struct __large_struct __user *)(x))
10113 ++#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10114 ++#define ____m(x) \
10115 ++({ \
10116 ++ unsigned long ____x = (unsigned long)(x); \
10117 ++ if (____x < PAX_USER_SHADOW_BASE) \
10118 ++ ____x += PAX_USER_SHADOW_BASE; \
10119 ++ (void __user *)____x; \
10120 ++})
10121 ++#else
10122 ++#define ____m(x) (x)
10123 ++#endif
10124 ++#define __m(x) (*(struct __large_struct __user *)____m(x))
10125 +
10126 + /*
10127 + * Tell gcc we read from memory instead of writing: this is because
10128 +@@ -437,7 +490,7 @@ struct __large_struct { unsigned long bu
10129 + * aliasing issues.
10130 + */
10131 + #define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \
10132 +- asm volatile("1: mov"itype" %"rtype"1,%2\n" \
10133 ++ asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"1,%2\n"\
10134 + "2:\n" \
10135 + ".section .fixup,\"ax\"\n" \
10136 + "3: mov %3,%0\n" \
10137 +@@ -445,10 +498,10 @@ struct __large_struct { unsigned long bu
10138 + ".previous\n" \
10139 + _ASM_EXTABLE(1b, 3b) \
10140 + : "=r"(err) \
10141 +- : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err))
10142 ++ : ltype (x), "m" (__m(addr)), "i" (errret), "0" (err))
10143 +
10144 + #define __put_user_asm_ex(x, addr, itype, rtype, ltype) \
10145 +- asm volatile("1: mov"itype" %"rtype"0,%1\n" \
10146 ++ asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"0,%1\n"\
10147 + "2:\n" \
10148 + _ASM_EXTABLE(1b, 2b - 1b) \
10149 + : : ltype(x), "m" (__m(addr)))
10150 +@@ -487,8 +540,12 @@ struct __large_struct { unsigned long bu
10151 + * On error, the variable @x is set to zero.
10152 + */
10153 +
10154 ++#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10155 ++#define __get_user(x, ptr) get_user((x), (ptr))
10156 ++#else
10157 + #define __get_user(x, ptr) \
10158 + __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
10159 ++#endif
10160 +
10161 + /**
10162 + * __put_user: - Write a simple value into user space, with less checking.
10163 +@@ -510,8 +567,12 @@ struct __large_struct { unsigned long bu
10164 + * Returns zero on success, or -EFAULT on error.
10165 + */
10166 +
10167 ++#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10168 ++#define __put_user(x, ptr) put_user((x), (ptr))
10169 ++#else
10170 + #define __put_user(x, ptr) \
10171 + __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
10172 ++#endif
10173 +
10174 + #define __get_user_unaligned __get_user
10175 + #define __put_user_unaligned __put_user
10176 +@@ -529,7 +590,7 @@ struct __large_struct { unsigned long bu
10177 + #define get_user_ex(x, ptr) do { \
10178 + unsigned long __gue_val; \
10179 + __get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr)))); \
10180 +- (x) = (__force __typeof__(*(ptr)))__gue_val; \
10181 ++ (x) = (__typeof__(*(ptr)))__gue_val; \
10182 + } while (0)
10183 +
10184 + #ifdef CONFIG_X86_WP_WORKS_OK
10185 +diff -urNp linux-3.1.1/arch/x86/include/asm/vdso.h linux-3.1.1/arch/x86/include/asm/vdso.h
10186 +--- linux-3.1.1/arch/x86/include/asm/vdso.h 2011-11-11 15:19:27.000000000 -0500
10187 ++++ linux-3.1.1/arch/x86/include/asm/vdso.h 2011-11-16 18:39:07.000000000 -0500
10188 +@@ -11,7 +11,7 @@ extern const char VDSO32_PRELINK[];
10189 + #define VDSO32_SYMBOL(base, name) \
10190 + ({ \
10191 + extern const char VDSO32_##name[]; \
10192 +- (void *)(VDSO32_##name - VDSO32_PRELINK + (unsigned long)(base)); \
10193 ++ (void __user *)(VDSO32_##name - VDSO32_PRELINK + (unsigned long)(base)); \
10194 + })
10195 + #endif
10196 +
10197 +diff -urNp linux-3.1.1/arch/x86/include/asm/x86_init.h linux-3.1.1/arch/x86/include/asm/x86_init.h
10198 +--- linux-3.1.1/arch/x86/include/asm/x86_init.h 2011-11-11 15:19:27.000000000 -0500
10199 ++++ linux-3.1.1/arch/x86/include/asm/x86_init.h 2011-11-16 18:39:07.000000000 -0500
10200 +@@ -28,7 +28,7 @@ struct x86_init_mpparse {
10201 + void (*mpc_oem_bus_info)(struct mpc_bus *m, char *name);
10202 + void (*find_smp_config)(void);
10203 + void (*get_smp_config)(unsigned int early);
10204 +-};
10205 ++} __no_const;
10206 +
10207 + /**
10208 + * struct x86_init_resources - platform specific resource related ops
10209 +@@ -42,7 +42,7 @@ struct x86_init_resources {
10210 + void (*probe_roms)(void);
10211 + void (*reserve_resources)(void);
10212 + char *(*memory_setup)(void);
10213 +-};
10214 ++} __no_const;
10215 +
10216 + /**
10217 + * struct x86_init_irqs - platform specific interrupt setup
10218 +@@ -55,7 +55,7 @@ struct x86_init_irqs {
10219 + void (*pre_vector_init)(void);
10220 + void (*intr_init)(void);
10221 + void (*trap_init)(void);
10222 +-};
10223 ++} __no_const;
10224 +
10225 + /**
10226 + * struct x86_init_oem - oem platform specific customizing functions
10227 +@@ -65,7 +65,7 @@ struct x86_init_irqs {
10228 + struct x86_init_oem {
10229 + void (*arch_setup)(void);
10230 + void (*banner)(void);
10231 +-};
10232 ++} __no_const;
10233 +
10234 + /**
10235 + * struct x86_init_mapping - platform specific initial kernel pagetable setup
10236 +@@ -76,7 +76,7 @@ struct x86_init_oem {
10237 + */
10238 + struct x86_init_mapping {
10239 + void (*pagetable_reserve)(u64 start, u64 end);
10240 +-};
10241 ++} __no_const;
10242 +
10243 + /**
10244 + * struct x86_init_paging - platform specific paging functions
10245 +@@ -86,7 +86,7 @@ struct x86_init_mapping {
10246 + struct x86_init_paging {
10247 + void (*pagetable_setup_start)(pgd_t *base);
10248 + void (*pagetable_setup_done)(pgd_t *base);
10249 +-};
10250 ++} __no_const;
10251 +
10252 + /**
10253 + * struct x86_init_timers - platform specific timer setup
10254 +@@ -101,7 +101,7 @@ struct x86_init_timers {
10255 + void (*tsc_pre_init)(void);
10256 + void (*timer_init)(void);
10257 + void (*wallclock_init)(void);
10258 +-};
10259 ++} __no_const;
10260 +
10261 + /**
10262 + * struct x86_init_iommu - platform specific iommu setup
10263 +@@ -109,7 +109,7 @@ struct x86_init_timers {
10264 + */
10265 + struct x86_init_iommu {
10266 + int (*iommu_init)(void);
10267 +-};
10268 ++} __no_const;
10269 +
10270 + /**
10271 + * struct x86_init_pci - platform specific pci init functions
10272 +@@ -123,7 +123,7 @@ struct x86_init_pci {
10273 + int (*init)(void);
10274 + void (*init_irq)(void);
10275 + void (*fixup_irqs)(void);
10276 +-};
10277 ++} __no_const;
10278 +
10279 + /**
10280 + * struct x86_init_ops - functions for platform specific setup
10281 +@@ -139,7 +139,7 @@ struct x86_init_ops {
10282 + struct x86_init_timers timers;
10283 + struct x86_init_iommu iommu;
10284 + struct x86_init_pci pci;
10285 +-};
10286 ++} __no_const;
10287 +
10288 + /**
10289 + * struct x86_cpuinit_ops - platform specific cpu hotplug setups
10290 +@@ -147,7 +147,7 @@ struct x86_init_ops {
10291 + */
10292 + struct x86_cpuinit_ops {
10293 + void (*setup_percpu_clockev)(void);
10294 +-};
10295 ++} __no_const;
10296 +
10297 + /**
10298 + * struct x86_platform_ops - platform specific runtime functions
10299 +@@ -166,7 +166,7 @@ struct x86_platform_ops {
10300 + bool (*is_untracked_pat_range)(u64 start, u64 end);
10301 + void (*nmi_init)(void);
10302 + int (*i8042_detect)(void);
10303 +-};
10304 ++} __no_const;
10305 +
10306 + struct pci_dev;
10307 +
10308 +@@ -174,7 +174,7 @@ struct x86_msi_ops {
10309 + int (*setup_msi_irqs)(struct pci_dev *dev, int nvec, int type);
10310 + void (*teardown_msi_irq)(unsigned int irq);
10311 + void (*teardown_msi_irqs)(struct pci_dev *dev);
10312 +-};
10313 ++} __no_const;
10314 +
10315 + extern struct x86_init_ops x86_init;
10316 + extern struct x86_cpuinit_ops x86_cpuinit;
10317 +diff -urNp linux-3.1.1/arch/x86/include/asm/xsave.h linux-3.1.1/arch/x86/include/asm/xsave.h
10318 +--- linux-3.1.1/arch/x86/include/asm/xsave.h 2011-11-11 15:19:27.000000000 -0500
10319 ++++ linux-3.1.1/arch/x86/include/asm/xsave.h 2011-11-16 18:39:07.000000000 -0500
10320 +@@ -65,6 +65,11 @@ static inline int xsave_user(struct xsav
10321 + {
10322 + int err;
10323 +
10324 ++#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10325 ++ if ((unsigned long)buf < PAX_USER_SHADOW_BASE)
10326 ++ buf = (struct xsave_struct __user *)((void __user*)buf + PAX_USER_SHADOW_BASE);
10327 ++#endif
10328 ++
10329 + /*
10330 + * Clear the xsave header first, so that reserved fields are
10331 + * initialized to zero.
10332 +@@ -96,10 +101,15 @@ static inline int xsave_user(struct xsav
10333 + static inline int xrestore_user(struct xsave_struct __user *buf, u64 mask)
10334 + {
10335 + int err;
10336 +- struct xsave_struct *xstate = ((__force struct xsave_struct *)buf);
10337 ++ struct xsave_struct *xstate = ((__force_kernel struct xsave_struct *)buf);
10338 + u32 lmask = mask;
10339 + u32 hmask = mask >> 32;
10340 +
10341 ++#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10342 ++ if ((unsigned long)xstate < PAX_USER_SHADOW_BASE)
10343 ++ xstate = (struct xsave_struct *)((void *)xstate + PAX_USER_SHADOW_BASE);
10344 ++#endif
10345 ++
10346 + __asm__ __volatile__("1: .byte " REX_PREFIX "0x0f,0xae,0x2f\n"
10347 + "2:\n"
10348 + ".section .fixup,\"ax\"\n"
10349 +diff -urNp linux-3.1.1/arch/x86/Kconfig linux-3.1.1/arch/x86/Kconfig
10350 +--- linux-3.1.1/arch/x86/Kconfig 2011-11-11 15:19:27.000000000 -0500
10351 ++++ linux-3.1.1/arch/x86/Kconfig 2011-11-16 18:40:08.000000000 -0500
10352 +@@ -236,7 +236,7 @@ config X86_HT
10353 +
10354 + config X86_32_LAZY_GS
10355 + def_bool y
10356 +- depends on X86_32 && !CC_STACKPROTECTOR
10357 ++ depends on X86_32 && !CC_STACKPROTECTOR && !PAX_MEMORY_UDEREF
10358 +
10359 + config ARCH_HWEIGHT_CFLAGS
10360 + string
10361 +@@ -1019,7 +1019,7 @@ choice
10362 +
10363 + config NOHIGHMEM
10364 + bool "off"
10365 +- depends on !X86_NUMAQ
10366 ++ depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
10367 + ---help---
10368 + Linux can use up to 64 Gigabytes of physical memory on x86 systems.
10369 + However, the address space of 32-bit x86 processors is only 4
10370 +@@ -1056,7 +1056,7 @@ config NOHIGHMEM
10371 +
10372 + config HIGHMEM4G
10373 + bool "4GB"
10374 +- depends on !X86_NUMAQ
10375 ++ depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
10376 + ---help---
10377 + Select this if you have a 32-bit processor and between 1 and 4
10378 + gigabytes of physical RAM.
10379 +@@ -1110,7 +1110,7 @@ config PAGE_OFFSET
10380 + hex
10381 + default 0xB0000000 if VMSPLIT_3G_OPT
10382 + default 0x80000000 if VMSPLIT_2G
10383 +- default 0x78000000 if VMSPLIT_2G_OPT
10384 ++ default 0x70000000 if VMSPLIT_2G_OPT
10385 + default 0x40000000 if VMSPLIT_1G
10386 + default 0xC0000000
10387 + depends on X86_32
10388 +@@ -1484,6 +1484,7 @@ config SECCOMP
10389 +
10390 + config CC_STACKPROTECTOR
10391 + bool "Enable -fstack-protector buffer overflow detection (EXPERIMENTAL)"
10392 ++ depends on X86_64 || !PAX_MEMORY_UDEREF
10393 + ---help---
10394 + This option turns on the -fstack-protector GCC feature. This
10395 + feature puts, at the beginning of functions, a canary value on
10396 +@@ -1541,6 +1542,7 @@ config KEXEC_JUMP
10397 + config PHYSICAL_START
10398 + hex "Physical address where the kernel is loaded" if (EXPERT || CRASH_DUMP)
10399 + default "0x1000000"
10400 ++ range 0x400000 0x40000000
10401 + ---help---
10402 + This gives the physical address where the kernel is loaded.
10403 +
10404 +@@ -1604,6 +1606,7 @@ config X86_NEED_RELOCS
10405 + config PHYSICAL_ALIGN
10406 + hex "Alignment value to which kernel should be aligned" if X86_32
10407 + default "0x1000000"
10408 ++ range 0x400000 0x1000000 if PAX_KERNEXEC
10409 + range 0x2000 0x1000000
10410 + ---help---
10411 + This value puts the alignment restrictions on physical address
10412 +@@ -1635,9 +1638,10 @@ config HOTPLUG_CPU
10413 + Say N if you want to disable CPU hotplug.
10414 +
10415 + config COMPAT_VDSO
10416 +- def_bool y
10417 ++ def_bool n
10418 + prompt "Compat VDSO support"
10419 + depends on X86_32 || IA32_EMULATION
10420 ++ depends on !PAX_NOEXEC && !PAX_MEMORY_UDEREF
10421 + ---help---
10422 + Map the 32-bit VDSO to the predictable old-style address too.
10423 +
10424 +diff -urNp linux-3.1.1/arch/x86/Kconfig.cpu linux-3.1.1/arch/x86/Kconfig.cpu
10425 +--- linux-3.1.1/arch/x86/Kconfig.cpu 2011-11-11 15:19:27.000000000 -0500
10426 ++++ linux-3.1.1/arch/x86/Kconfig.cpu 2011-11-16 18:39:07.000000000 -0500
10427 +@@ -341,7 +341,7 @@ config X86_PPRO_FENCE
10428 +
10429 + config X86_F00F_BUG
10430 + def_bool y
10431 +- depends on M586MMX || M586TSC || M586 || M486 || M386
10432 ++ depends on (M586MMX || M586TSC || M586 || M486 || M386) && !PAX_KERNEXEC
10433 +
10434 + config X86_INVD_BUG
10435 + def_bool y
10436 +@@ -365,7 +365,7 @@ config X86_POPAD_OK
10437 +
10438 + config X86_ALIGNMENT_16
10439 + def_bool y
10440 +- depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MELAN || MK6 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
10441 ++ depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK8 || MK7 || MK6 || MCORE2 || MPENTIUM4 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
10442 +
10443 + config X86_INTEL_USERCOPY
10444 + def_bool y
10445 +@@ -411,7 +411,7 @@ config X86_CMPXCHG64
10446 + # generates cmov.
10447 + config X86_CMOV
10448 + def_bool y
10449 +- depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
10450 ++ depends on (MK8 || MK7 || MCORE2 || MPSC || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
10451 +
10452 + config X86_MINIMUM_CPU_FAMILY
10453 + int
10454 +diff -urNp linux-3.1.1/arch/x86/Kconfig.debug linux-3.1.1/arch/x86/Kconfig.debug
10455 +--- linux-3.1.1/arch/x86/Kconfig.debug 2011-11-11 15:19:27.000000000 -0500
10456 ++++ linux-3.1.1/arch/x86/Kconfig.debug 2011-11-16 18:39:07.000000000 -0500
10457 +@@ -81,7 +81,7 @@ config X86_PTDUMP
10458 + config DEBUG_RODATA
10459 + bool "Write protect kernel read-only data structures"
10460 + default y
10461 +- depends on DEBUG_KERNEL
10462 ++ depends on DEBUG_KERNEL && BROKEN
10463 + ---help---
10464 + Mark the kernel read-only data as write-protected in the pagetables,
10465 + in order to catch accidental (and incorrect) writes to such const
10466 +@@ -99,7 +99,7 @@ config DEBUG_RODATA_TEST
10467 +
10468 + config DEBUG_SET_MODULE_RONX
10469 + bool "Set loadable kernel module data as NX and text as RO"
10470 +- depends on MODULES
10471 ++ depends on MODULES && BROKEN
10472 + ---help---
10473 + This option helps catch unintended modifications to loadable
10474 + kernel module's text and read-only data. It also prevents execution
10475 +diff -urNp linux-3.1.1/arch/x86/kernel/acpi/realmode/Makefile linux-3.1.1/arch/x86/kernel/acpi/realmode/Makefile
10476 +--- linux-3.1.1/arch/x86/kernel/acpi/realmode/Makefile 2011-11-11 15:19:27.000000000 -0500
10477 ++++ linux-3.1.1/arch/x86/kernel/acpi/realmode/Makefile 2011-11-16 18:39:07.000000000 -0500
10478 +@@ -41,6 +41,9 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -g -Os
10479 + $(call cc-option, -fno-stack-protector) \
10480 + $(call cc-option, -mpreferred-stack-boundary=2)
10481 + KBUILD_CFLAGS += $(call cc-option, -m32)
10482 ++ifdef CONSTIFY_PLUGIN
10483 ++KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
10484 ++endif
10485 + KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
10486 + GCOV_PROFILE := n
10487 +
10488 +diff -urNp linux-3.1.1/arch/x86/kernel/acpi/realmode/wakeup.S linux-3.1.1/arch/x86/kernel/acpi/realmode/wakeup.S
10489 +--- linux-3.1.1/arch/x86/kernel/acpi/realmode/wakeup.S 2011-11-11 15:19:27.000000000 -0500
10490 ++++ linux-3.1.1/arch/x86/kernel/acpi/realmode/wakeup.S 2011-11-16 18:40:08.000000000 -0500
10491 +@@ -108,6 +108,9 @@ wakeup_code:
10492 + /* Do any other stuff... */
10493 +
10494 + #ifndef CONFIG_64BIT
10495 ++ /* Recheck NX bit overrides (64bit path does this in trampoline */
10496 ++ call verify_cpu
10497 ++
10498 + /* This could also be done in C code... */
10499 + movl pmode_cr3, %eax
10500 + movl %eax, %cr3
10501 +@@ -131,6 +134,7 @@ wakeup_code:
10502 + movl pmode_cr0, %eax
10503 + movl %eax, %cr0
10504 + jmp pmode_return
10505 ++# include "../../verify_cpu.S"
10506 + #else
10507 + pushw $0
10508 + pushw trampoline_segment
10509 +diff -urNp linux-3.1.1/arch/x86/kernel/acpi/sleep.c linux-3.1.1/arch/x86/kernel/acpi/sleep.c
10510 +--- linux-3.1.1/arch/x86/kernel/acpi/sleep.c 2011-11-11 15:19:27.000000000 -0500
10511 ++++ linux-3.1.1/arch/x86/kernel/acpi/sleep.c 2011-11-16 18:39:07.000000000 -0500
10512 +@@ -94,8 +94,12 @@ int acpi_suspend_lowlevel(void)
10513 + header->trampoline_segment = trampoline_address() >> 4;
10514 + #ifdef CONFIG_SMP
10515 + stack_start = (unsigned long)temp_stack + sizeof(temp_stack);
10516 ++
10517 ++ pax_open_kernel();
10518 + early_gdt_descr.address =
10519 + (unsigned long)get_cpu_gdt_table(smp_processor_id());
10520 ++ pax_close_kernel();
10521 ++
10522 + initial_gs = per_cpu_offset(smp_processor_id());
10523 + #endif
10524 + initial_code = (unsigned long)wakeup_long64;
10525 +diff -urNp linux-3.1.1/arch/x86/kernel/acpi/wakeup_32.S linux-3.1.1/arch/x86/kernel/acpi/wakeup_32.S
10526 +--- linux-3.1.1/arch/x86/kernel/acpi/wakeup_32.S 2011-11-11 15:19:27.000000000 -0500
10527 ++++ linux-3.1.1/arch/x86/kernel/acpi/wakeup_32.S 2011-11-16 18:39:07.000000000 -0500
10528 +@@ -30,13 +30,11 @@ wakeup_pmode_return:
10529 + # and restore the stack ... but you need gdt for this to work
10530 + movl saved_context_esp, %esp
10531 +
10532 +- movl %cs:saved_magic, %eax
10533 +- cmpl $0x12345678, %eax
10534 ++ cmpl $0x12345678, saved_magic
10535 + jne bogus_magic
10536 +
10537 + # jump to place where we left off
10538 +- movl saved_eip, %eax
10539 +- jmp *%eax
10540 ++ jmp *(saved_eip)
10541 +
10542 + bogus_magic:
10543 + jmp bogus_magic
10544 +diff -urNp linux-3.1.1/arch/x86/kernel/alternative.c linux-3.1.1/arch/x86/kernel/alternative.c
10545 +--- linux-3.1.1/arch/x86/kernel/alternative.c 2011-11-11 15:19:27.000000000 -0500
10546 ++++ linux-3.1.1/arch/x86/kernel/alternative.c 2011-11-16 18:39:07.000000000 -0500
10547 +@@ -276,6 +276,13 @@ void __init_or_module apply_alternatives
10548 + */
10549 + for (a = start; a < end; a++) {
10550 + instr = (u8 *)&a->instr_offset + a->instr_offset;
10551 ++
10552 ++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
10553 ++ instr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
10554 ++ if (instr < (u8 *)_text || (u8 *)_einittext <= instr)
10555 ++ instr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
10556 ++#endif
10557 ++
10558 + replacement = (u8 *)&a->repl_offset + a->repl_offset;
10559 + BUG_ON(a->replacementlen > a->instrlen);
10560 + BUG_ON(a->instrlen > sizeof(insnbuf));
10561 +@@ -307,10 +314,16 @@ static void alternatives_smp_lock(const
10562 + for (poff = start; poff < end; poff++) {
10563 + u8 *ptr = (u8 *)poff + *poff;
10564 +
10565 ++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
10566 ++ ptr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
10567 ++ if (ptr < (u8 *)_text || (u8 *)_einittext <= ptr)
10568 ++ ptr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
10569 ++#endif
10570 ++
10571 + if (!*poff || ptr < text || ptr >= text_end)
10572 + continue;
10573 + /* turn DS segment override prefix into lock prefix */
10574 +- if (*ptr == 0x3e)
10575 ++ if (*ktla_ktva(ptr) == 0x3e)
10576 + text_poke(ptr, ((unsigned char []){0xf0}), 1);
10577 + };
10578 + mutex_unlock(&text_mutex);
10579 +@@ -328,10 +341,16 @@ static void alternatives_smp_unlock(cons
10580 + for (poff = start; poff < end; poff++) {
10581 + u8 *ptr = (u8 *)poff + *poff;
10582 +
10583 ++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
10584 ++ ptr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
10585 ++ if (ptr < (u8 *)_text || (u8 *)_einittext <= ptr)
10586 ++ ptr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
10587 ++#endif
10588 ++
10589 + if (!*poff || ptr < text || ptr >= text_end)
10590 + continue;
10591 + /* turn lock prefix into DS segment override prefix */
10592 +- if (*ptr == 0xf0)
10593 ++ if (*ktla_ktva(ptr) == 0xf0)
10594 + text_poke(ptr, ((unsigned char []){0x3E}), 1);
10595 + };
10596 + mutex_unlock(&text_mutex);
10597 +@@ -500,7 +519,7 @@ void __init_or_module apply_paravirt(str
10598 +
10599 + BUG_ON(p->len > MAX_PATCH_LEN);
10600 + /* prep the buffer with the original instructions */
10601 +- memcpy(insnbuf, p->instr, p->len);
10602 ++ memcpy(insnbuf, ktla_ktva(p->instr), p->len);
10603 + used = pv_init_ops.patch(p->instrtype, p->clobbers, insnbuf,
10604 + (unsigned long)p->instr, p->len);
10605 +
10606 +@@ -568,7 +587,7 @@ void __init alternative_instructions(voi
10607 + if (smp_alt_once)
10608 + free_init_pages("SMP alternatives",
10609 + (unsigned long)__smp_locks,
10610 +- (unsigned long)__smp_locks_end);
10611 ++ PAGE_ALIGN((unsigned long)__smp_locks_end));
10612 +
10613 + restart_nmi();
10614 + }
10615 +@@ -585,13 +604,17 @@ void __init alternative_instructions(voi
10616 + * instructions. And on the local CPU you need to be protected again NMI or MCE
10617 + * handlers seeing an inconsistent instruction while you patch.
10618 + */
10619 +-void *__init_or_module text_poke_early(void *addr, const void *opcode,
10620 ++void *__kprobes text_poke_early(void *addr, const void *opcode,
10621 + size_t len)
10622 + {
10623 + unsigned long flags;
10624 + local_irq_save(flags);
10625 +- memcpy(addr, opcode, len);
10626 ++
10627 ++ pax_open_kernel();
10628 ++ memcpy(ktla_ktva(addr), opcode, len);
10629 + sync_core();
10630 ++ pax_close_kernel();
10631 ++
10632 + local_irq_restore(flags);
10633 + /* Could also do a CLFLUSH here to speed up CPU recovery; but
10634 + that causes hangs on some VIA CPUs. */
10635 +@@ -613,36 +636,22 @@ void *__init_or_module text_poke_early(v
10636 + */
10637 + void *__kprobes text_poke(void *addr, const void *opcode, size_t len)
10638 + {
10639 +- unsigned long flags;
10640 +- char *vaddr;
10641 ++ unsigned char *vaddr = ktla_ktva(addr);
10642 + struct page *pages[2];
10643 +- int i;
10644 ++ size_t i;
10645 +
10646 + if (!core_kernel_text((unsigned long)addr)) {
10647 +- pages[0] = vmalloc_to_page(addr);
10648 +- pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
10649 ++ pages[0] = vmalloc_to_page(vaddr);
10650 ++ pages[1] = vmalloc_to_page(vaddr + PAGE_SIZE);
10651 + } else {
10652 +- pages[0] = virt_to_page(addr);
10653 ++ pages[0] = virt_to_page(vaddr);
10654 + WARN_ON(!PageReserved(pages[0]));
10655 +- pages[1] = virt_to_page(addr + PAGE_SIZE);
10656 ++ pages[1] = virt_to_page(vaddr + PAGE_SIZE);
10657 + }
10658 + BUG_ON(!pages[0]);
10659 +- local_irq_save(flags);
10660 +- set_fixmap(FIX_TEXT_POKE0, page_to_phys(pages[0]));
10661 +- if (pages[1])
10662 +- set_fixmap(FIX_TEXT_POKE1, page_to_phys(pages[1]));
10663 +- vaddr = (char *)fix_to_virt(FIX_TEXT_POKE0);
10664 +- memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len);
10665 +- clear_fixmap(FIX_TEXT_POKE0);
10666 +- if (pages[1])
10667 +- clear_fixmap(FIX_TEXT_POKE1);
10668 +- local_flush_tlb();
10669 +- sync_core();
10670 +- /* Could also do a CLFLUSH here to speed up CPU recovery; but
10671 +- that causes hangs on some VIA CPUs. */
10672 ++ text_poke_early(addr, opcode, len);
10673 + for (i = 0; i < len; i++)
10674 +- BUG_ON(((char *)addr)[i] != ((char *)opcode)[i]);
10675 +- local_irq_restore(flags);
10676 ++ BUG_ON((vaddr)[i] != ((const unsigned char *)opcode)[i]);
10677 + return addr;
10678 + }
10679 +
10680 +diff -urNp linux-3.1.1/arch/x86/kernel/apic/apic.c linux-3.1.1/arch/x86/kernel/apic/apic.c
10681 +--- linux-3.1.1/arch/x86/kernel/apic/apic.c 2011-11-11 15:19:27.000000000 -0500
10682 ++++ linux-3.1.1/arch/x86/kernel/apic/apic.c 2011-11-16 18:40:08.000000000 -0500
10683 +@@ -174,7 +174,7 @@ int first_system_vector = 0xfe;
10684 + /*
10685 + * Debug level, exported for io_apic.c
10686 + */
10687 +-unsigned int apic_verbosity;
10688 ++int apic_verbosity;
10689 +
10690 + int pic_mode;
10691 +
10692 +@@ -1835,7 +1835,7 @@ void smp_error_interrupt(struct pt_regs
10693 + apic_write(APIC_ESR, 0);
10694 + v1 = apic_read(APIC_ESR);
10695 + ack_APIC_irq();
10696 +- atomic_inc(&irq_err_count);
10697 ++ atomic_inc_unchecked(&irq_err_count);
10698 +
10699 + apic_printk(APIC_DEBUG, KERN_DEBUG "APIC error on CPU%d: %02x(%02x)",
10700 + smp_processor_id(), v0 , v1);
10701 +@@ -2209,6 +2209,8 @@ static int __cpuinit apic_cluster_num(vo
10702 + u16 *bios_cpu_apicid;
10703 + DECLARE_BITMAP(clustermap, NUM_APIC_CLUSTERS);
10704 +
10705 ++ pax_track_stack();
10706 ++
10707 + bios_cpu_apicid = early_per_cpu_ptr(x86_bios_cpu_apicid);
10708 + bitmap_zero(clustermap, NUM_APIC_CLUSTERS);
10709 +
10710 +diff -urNp linux-3.1.1/arch/x86/kernel/apic/io_apic.c linux-3.1.1/arch/x86/kernel/apic/io_apic.c
10711 +--- linux-3.1.1/arch/x86/kernel/apic/io_apic.c 2011-11-11 15:19:27.000000000 -0500
10712 ++++ linux-3.1.1/arch/x86/kernel/apic/io_apic.c 2011-11-16 18:39:07.000000000 -0500
10713 +@@ -1028,7 +1028,7 @@ int IO_APIC_get_PCI_irq_vector(int bus,
10714 + }
10715 + EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector);
10716 +
10717 +-void lock_vector_lock(void)
10718 ++void lock_vector_lock(void) __acquires(vector_lock)
10719 + {
10720 + /* Used to the online set of cpus does not change
10721 + * during assign_irq_vector.
10722 +@@ -1036,7 +1036,7 @@ void lock_vector_lock(void)
10723 + raw_spin_lock(&vector_lock);
10724 + }
10725 +
10726 +-void unlock_vector_lock(void)
10727 ++void unlock_vector_lock(void) __releases(vector_lock)
10728 + {
10729 + raw_spin_unlock(&vector_lock);
10730 + }
10731 +@@ -2405,7 +2405,7 @@ static void ack_apic_edge(struct irq_dat
10732 + ack_APIC_irq();
10733 + }
10734 +
10735 +-atomic_t irq_mis_count;
10736 ++atomic_unchecked_t irq_mis_count;
10737 +
10738 + /*
10739 + * IO-APIC versions below 0x20 don't support EOI register.
10740 +@@ -2513,7 +2513,7 @@ static void ack_apic_level(struct irq_da
10741 + * at the cpu.
10742 + */
10743 + if (!(v & (1 << (i & 0x1f)))) {
10744 +- atomic_inc(&irq_mis_count);
10745 ++ atomic_inc_unchecked(&irq_mis_count);
10746 +
10747 + eoi_ioapic_irq(irq, cfg);
10748 + }
10749 +diff -urNp linux-3.1.1/arch/x86/kernel/apm_32.c linux-3.1.1/arch/x86/kernel/apm_32.c
10750 +--- linux-3.1.1/arch/x86/kernel/apm_32.c 2011-11-11 15:19:27.000000000 -0500
10751 ++++ linux-3.1.1/arch/x86/kernel/apm_32.c 2011-11-16 18:39:07.000000000 -0500
10752 +@@ -413,7 +413,7 @@ static DEFINE_MUTEX(apm_mutex);
10753 + * This is for buggy BIOS's that refer to (real mode) segment 0x40
10754 + * even though they are called in protected mode.
10755 + */
10756 +-static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
10757 ++static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
10758 + (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
10759 +
10760 + static const char driver_version[] = "1.16ac"; /* no spaces */
10761 +@@ -591,7 +591,10 @@ static long __apm_bios_call(void *_call)
10762 + BUG_ON(cpu != 0);
10763 + gdt = get_cpu_gdt_table(cpu);
10764 + save_desc_40 = gdt[0x40 / 8];
10765 ++
10766 ++ pax_open_kernel();
10767 + gdt[0x40 / 8] = bad_bios_desc;
10768 ++ pax_close_kernel();
10769 +
10770 + apm_irq_save(flags);
10771 + APM_DO_SAVE_SEGS;
10772 +@@ -600,7 +603,11 @@ static long __apm_bios_call(void *_call)
10773 + &call->esi);
10774 + APM_DO_RESTORE_SEGS;
10775 + apm_irq_restore(flags);
10776 ++
10777 ++ pax_open_kernel();
10778 + gdt[0x40 / 8] = save_desc_40;
10779 ++ pax_close_kernel();
10780 ++
10781 + put_cpu();
10782 +
10783 + return call->eax & 0xff;
10784 +@@ -667,7 +674,10 @@ static long __apm_bios_call_simple(void
10785 + BUG_ON(cpu != 0);
10786 + gdt = get_cpu_gdt_table(cpu);
10787 + save_desc_40 = gdt[0x40 / 8];
10788 ++
10789 ++ pax_open_kernel();
10790 + gdt[0x40 / 8] = bad_bios_desc;
10791 ++ pax_close_kernel();
10792 +
10793 + apm_irq_save(flags);
10794 + APM_DO_SAVE_SEGS;
10795 +@@ -675,7 +685,11 @@ static long __apm_bios_call_simple(void
10796 + &call->eax);
10797 + APM_DO_RESTORE_SEGS;
10798 + apm_irq_restore(flags);
10799 ++
10800 ++ pax_open_kernel();
10801 + gdt[0x40 / 8] = save_desc_40;
10802 ++ pax_close_kernel();
10803 ++
10804 + put_cpu();
10805 + return error;
10806 + }
10807 +@@ -2349,12 +2363,15 @@ static int __init apm_init(void)
10808 + * code to that CPU.
10809 + */
10810 + gdt = get_cpu_gdt_table(0);
10811 ++
10812 ++ pax_open_kernel();
10813 + set_desc_base(&gdt[APM_CS >> 3],
10814 + (unsigned long)__va((unsigned long)apm_info.bios.cseg << 4));
10815 + set_desc_base(&gdt[APM_CS_16 >> 3],
10816 + (unsigned long)__va((unsigned long)apm_info.bios.cseg_16 << 4));
10817 + set_desc_base(&gdt[APM_DS >> 3],
10818 + (unsigned long)__va((unsigned long)apm_info.bios.dseg << 4));
10819 ++ pax_close_kernel();
10820 +
10821 + proc_create("apm", 0, NULL, &apm_file_ops);
10822 +
10823 +diff -urNp linux-3.1.1/arch/x86/kernel/asm-offsets_64.c linux-3.1.1/arch/x86/kernel/asm-offsets_64.c
10824 +--- linux-3.1.1/arch/x86/kernel/asm-offsets_64.c 2011-11-11 15:19:27.000000000 -0500
10825 ++++ linux-3.1.1/arch/x86/kernel/asm-offsets_64.c 2011-11-16 18:39:07.000000000 -0500
10826 +@@ -69,6 +69,7 @@ int main(void)
10827 + BLANK();
10828 + #undef ENTRY
10829 +
10830 ++ DEFINE(TSS_size, sizeof(struct tss_struct));
10831 + OFFSET(TSS_ist, tss_struct, x86_tss.ist);
10832 + BLANK();
10833 +
10834 +diff -urNp linux-3.1.1/arch/x86/kernel/asm-offsets.c linux-3.1.1/arch/x86/kernel/asm-offsets.c
10835 +--- linux-3.1.1/arch/x86/kernel/asm-offsets.c 2011-11-11 15:19:27.000000000 -0500
10836 ++++ linux-3.1.1/arch/x86/kernel/asm-offsets.c 2011-11-16 18:39:07.000000000 -0500
10837 +@@ -33,6 +33,8 @@ void common(void) {
10838 + OFFSET(TI_status, thread_info, status);
10839 + OFFSET(TI_addr_limit, thread_info, addr_limit);
10840 + OFFSET(TI_preempt_count, thread_info, preempt_count);
10841 ++ OFFSET(TI_lowest_stack, thread_info, lowest_stack);
10842 ++ DEFINE(TI_task_thread_sp0, offsetof(struct task_struct, thread.sp0) - offsetof(struct task_struct, tinfo));
10843 +
10844 + BLANK();
10845 + OFFSET(crypto_tfm_ctx_offset, crypto_tfm, __crt_ctx);
10846 +@@ -53,8 +55,26 @@ void common(void) {
10847 + OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
10848 + OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
10849 + OFFSET(PV_MMU_read_cr2, pv_mmu_ops, read_cr2);
10850 ++
10851 ++#ifdef CONFIG_PAX_KERNEXEC
10852 ++ OFFSET(PV_CPU_write_cr0, pv_cpu_ops, write_cr0);
10853 ++#endif
10854 ++
10855 ++#ifdef CONFIG_PAX_MEMORY_UDEREF
10856 ++ OFFSET(PV_MMU_read_cr3, pv_mmu_ops, read_cr3);
10857 ++ OFFSET(PV_MMU_write_cr3, pv_mmu_ops, write_cr3);
10858 ++#ifdef CONFIG_X86_64
10859 ++ OFFSET(PV_MMU_set_pgd_batched, pv_mmu_ops, set_pgd_batched);
10860 ++#endif
10861 + #endif
10862 +
10863 ++#endif
10864 ++
10865 ++ BLANK();
10866 ++ DEFINE(PAGE_SIZE_asm, PAGE_SIZE);
10867 ++ DEFINE(PAGE_SHIFT_asm, PAGE_SHIFT);
10868 ++ DEFINE(THREAD_SIZE_asm, THREAD_SIZE);
10869 ++
10870 + #ifdef CONFIG_XEN
10871 + BLANK();
10872 + OFFSET(XEN_vcpu_info_mask, vcpu_info, evtchn_upcall_mask);
10873 +diff -urNp linux-3.1.1/arch/x86/kernel/cpu/amd.c linux-3.1.1/arch/x86/kernel/cpu/amd.c
10874 +--- linux-3.1.1/arch/x86/kernel/cpu/amd.c 2011-11-11 15:19:27.000000000 -0500
10875 ++++ linux-3.1.1/arch/x86/kernel/cpu/amd.c 2011-11-16 18:39:07.000000000 -0500
10876 +@@ -647,7 +647,7 @@ static unsigned int __cpuinit amd_size_c
10877 + unsigned int size)
10878 + {
10879 + /* AMD errata T13 (order #21922) */
10880 +- if ((c->x86 == 6)) {
10881 ++ if (c->x86 == 6) {
10882 + /* Duron Rev A0 */
10883 + if (c->x86_model == 3 && c->x86_mask == 0)
10884 + size = 64;
10885 +diff -urNp linux-3.1.1/arch/x86/kernel/cpu/common.c linux-3.1.1/arch/x86/kernel/cpu/common.c
10886 +--- linux-3.1.1/arch/x86/kernel/cpu/common.c 2011-11-11 15:19:27.000000000 -0500
10887 ++++ linux-3.1.1/arch/x86/kernel/cpu/common.c 2011-11-16 18:39:07.000000000 -0500
10888 +@@ -83,60 +83,6 @@ static const struct cpu_dev __cpuinitcon
10889 +
10890 + static const struct cpu_dev *this_cpu __cpuinitdata = &default_cpu;
10891 +
10892 +-DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
10893 +-#ifdef CONFIG_X86_64
10894 +- /*
10895 +- * We need valid kernel segments for data and code in long mode too
10896 +- * IRET will check the segment types kkeil 2000/10/28
10897 +- * Also sysret mandates a special GDT layout
10898 +- *
10899 +- * TLS descriptors are currently at a different place compared to i386.
10900 +- * Hopefully nobody expects them at a fixed place (Wine?)
10901 +- */
10902 +- [GDT_ENTRY_KERNEL32_CS] = GDT_ENTRY_INIT(0xc09b, 0, 0xfffff),
10903 +- [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xa09b, 0, 0xfffff),
10904 +- [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc093, 0, 0xfffff),
10905 +- [GDT_ENTRY_DEFAULT_USER32_CS] = GDT_ENTRY_INIT(0xc0fb, 0, 0xfffff),
10906 +- [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f3, 0, 0xfffff),
10907 +- [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xa0fb, 0, 0xfffff),
10908 +-#else
10909 +- [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xc09a, 0, 0xfffff),
10910 +- [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
10911 +- [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xc0fa, 0, 0xfffff),
10912 +- [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f2, 0, 0xfffff),
10913 +- /*
10914 +- * Segments used for calling PnP BIOS have byte granularity.
10915 +- * They code segments and data segments have fixed 64k limits,
10916 +- * the transfer segment sizes are set at run time.
10917 +- */
10918 +- /* 32-bit code */
10919 +- [GDT_ENTRY_PNPBIOS_CS32] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
10920 +- /* 16-bit code */
10921 +- [GDT_ENTRY_PNPBIOS_CS16] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
10922 +- /* 16-bit data */
10923 +- [GDT_ENTRY_PNPBIOS_DS] = GDT_ENTRY_INIT(0x0092, 0, 0xffff),
10924 +- /* 16-bit data */
10925 +- [GDT_ENTRY_PNPBIOS_TS1] = GDT_ENTRY_INIT(0x0092, 0, 0),
10926 +- /* 16-bit data */
10927 +- [GDT_ENTRY_PNPBIOS_TS2] = GDT_ENTRY_INIT(0x0092, 0, 0),
10928 +- /*
10929 +- * The APM segments have byte granularity and their bases
10930 +- * are set at run time. All have 64k limits.
10931 +- */
10932 +- /* 32-bit code */
10933 +- [GDT_ENTRY_APMBIOS_BASE] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
10934 +- /* 16-bit code */
10935 +- [GDT_ENTRY_APMBIOS_BASE+1] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
10936 +- /* data */
10937 +- [GDT_ENTRY_APMBIOS_BASE+2] = GDT_ENTRY_INIT(0x4092, 0, 0xffff),
10938 +-
10939 +- [GDT_ENTRY_ESPFIX_SS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
10940 +- [GDT_ENTRY_PERCPU] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
10941 +- GDT_STACK_CANARY_INIT
10942 +-#endif
10943 +-} };
10944 +-EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
10945 +-
10946 + static int __init x86_xsave_setup(char *s)
10947 + {
10948 + setup_clear_cpu_cap(X86_FEATURE_XSAVE);
10949 +@@ -371,7 +317,7 @@ void switch_to_new_gdt(int cpu)
10950 + {
10951 + struct desc_ptr gdt_descr;
10952 +
10953 +- gdt_descr.address = (long)get_cpu_gdt_table(cpu);
10954 ++ gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
10955 + gdt_descr.size = GDT_SIZE - 1;
10956 + load_gdt(&gdt_descr);
10957 + /* Reload the per-cpu base */
10958 +@@ -840,6 +786,10 @@ static void __cpuinit identify_cpu(struc
10959 + /* Filter out anything that depends on CPUID levels we don't have */
10960 + filter_cpuid_features(c, true);
10961 +
10962 ++#if defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_KERNEXEC) || (defined(CONFIG_PAX_MEMORY_UDEREF) && defined(CONFIG_X86_32))
10963 ++ setup_clear_cpu_cap(X86_FEATURE_SEP);
10964 ++#endif
10965 ++
10966 + /* If the model name is still unset, do table lookup. */
10967 + if (!c->x86_model_id[0]) {
10968 + const char *p;
10969 +@@ -1019,6 +969,9 @@ static __init int setup_disablecpuid(cha
10970 + }
10971 + __setup("clearcpuid=", setup_disablecpuid);
10972 +
10973 ++DEFINE_PER_CPU(struct thread_info *, current_tinfo) = &init_task.tinfo;
10974 ++EXPORT_PER_CPU_SYMBOL(current_tinfo);
10975 ++
10976 + #ifdef CONFIG_X86_64
10977 + struct desc_ptr idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) idt_table };
10978 +
10979 +@@ -1034,7 +987,7 @@ DEFINE_PER_CPU(struct task_struct *, cur
10980 + EXPORT_PER_CPU_SYMBOL(current_task);
10981 +
10982 + DEFINE_PER_CPU(unsigned long, kernel_stack) =
10983 +- (unsigned long)&init_thread_union - KERNEL_STACK_OFFSET + THREAD_SIZE;
10984 ++ (unsigned long)&init_thread_union - 16 + THREAD_SIZE;
10985 + EXPORT_PER_CPU_SYMBOL(kernel_stack);
10986 +
10987 + DEFINE_PER_CPU(char *, irq_stack_ptr) =
10988 +@@ -1099,7 +1052,7 @@ struct pt_regs * __cpuinit idle_regs(str
10989 + {
10990 + memset(regs, 0, sizeof(struct pt_regs));
10991 + regs->fs = __KERNEL_PERCPU;
10992 +- regs->gs = __KERNEL_STACK_CANARY;
10993 ++ savesegment(gs, regs->gs);
10994 +
10995 + return regs;
10996 + }
10997 +@@ -1154,7 +1107,7 @@ void __cpuinit cpu_init(void)
10998 + int i;
10999 +
11000 + cpu = stack_smp_processor_id();
11001 +- t = &per_cpu(init_tss, cpu);
11002 ++ t = init_tss + cpu;
11003 + oist = &per_cpu(orig_ist, cpu);
11004 +
11005 + #ifdef CONFIG_NUMA
11006 +@@ -1180,7 +1133,7 @@ void __cpuinit cpu_init(void)
11007 + switch_to_new_gdt(cpu);
11008 + loadsegment(fs, 0);
11009 +
11010 +- load_idt((const struct desc_ptr *)&idt_descr);
11011 ++ load_idt(&idt_descr);
11012 +
11013 + memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8);
11014 + syscall_init();
11015 +@@ -1189,7 +1142,6 @@ void __cpuinit cpu_init(void)
11016 + wrmsrl(MSR_KERNEL_GS_BASE, 0);
11017 + barrier();
11018 +
11019 +- x86_configure_nx();
11020 + if (cpu != 0)
11021 + enable_x2apic();
11022 +
11023 +@@ -1243,7 +1195,7 @@ void __cpuinit cpu_init(void)
11024 + {
11025 + int cpu = smp_processor_id();
11026 + struct task_struct *curr = current;
11027 +- struct tss_struct *t = &per_cpu(init_tss, cpu);
11028 ++ struct tss_struct *t = init_tss + cpu;
11029 + struct thread_struct *thread = &curr->thread;
11030 +
11031 + if (cpumask_test_and_set_cpu(cpu, cpu_initialized_mask)) {
11032 +diff -urNp linux-3.1.1/arch/x86/kernel/cpu/intel.c linux-3.1.1/arch/x86/kernel/cpu/intel.c
11033 +--- linux-3.1.1/arch/x86/kernel/cpu/intel.c 2011-11-11 15:19:27.000000000 -0500
11034 ++++ linux-3.1.1/arch/x86/kernel/cpu/intel.c 2011-11-16 18:39:07.000000000 -0500
11035 +@@ -172,7 +172,7 @@ static void __cpuinit trap_init_f00f_bug
11036 + * Update the IDT descriptor and reload the IDT so that
11037 + * it uses the read-only mapped virtual address.
11038 + */
11039 +- idt_descr.address = fix_to_virt(FIX_F00F_IDT);
11040 ++ idt_descr.address = (struct desc_struct *)fix_to_virt(FIX_F00F_IDT);
11041 + load_idt(&idt_descr);
11042 + }
11043 + #endif
11044 +diff -urNp linux-3.1.1/arch/x86/kernel/cpu/Makefile linux-3.1.1/arch/x86/kernel/cpu/Makefile
11045 +--- linux-3.1.1/arch/x86/kernel/cpu/Makefile 2011-11-11 15:19:27.000000000 -0500
11046 ++++ linux-3.1.1/arch/x86/kernel/cpu/Makefile 2011-11-16 18:39:07.000000000 -0500
11047 +@@ -8,10 +8,6 @@ CFLAGS_REMOVE_common.o = -pg
11048 + CFLAGS_REMOVE_perf_event.o = -pg
11049 + endif
11050 +
11051 +-# Make sure load_percpu_segment has no stackprotector
11052 +-nostackp := $(call cc-option, -fno-stack-protector)
11053 +-CFLAGS_common.o := $(nostackp)
11054 +-
11055 + obj-y := intel_cacheinfo.o scattered.o topology.o
11056 + obj-y += proc.o capflags.o powerflags.o common.o
11057 + obj-y += vmware.o hypervisor.o sched.o mshyperv.o
11058 +diff -urNp linux-3.1.1/arch/x86/kernel/cpu/mcheck/mce.c linux-3.1.1/arch/x86/kernel/cpu/mcheck/mce.c
11059 +--- linux-3.1.1/arch/x86/kernel/cpu/mcheck/mce.c 2011-11-11 15:19:27.000000000 -0500
11060 ++++ linux-3.1.1/arch/x86/kernel/cpu/mcheck/mce.c 2011-11-16 18:39:07.000000000 -0500
11061 +@@ -42,6 +42,7 @@
11062 + #include <asm/processor.h>
11063 + #include <asm/mce.h>
11064 + #include <asm/msr.h>
11065 ++#include <asm/local.h>
11066 +
11067 + #include "mce-internal.h"
11068 +
11069 +@@ -205,7 +206,7 @@ static void print_mce(struct mce *m)
11070 + !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "",
11071 + m->cs, m->ip);
11072 +
11073 +- if (m->cs == __KERNEL_CS)
11074 ++ if (m->cs == __KERNEL_CS || m->cs == __KERNEXEC_KERNEL_CS)
11075 + print_symbol("{%s}", m->ip);
11076 + pr_cont("\n");
11077 + }
11078 +@@ -233,10 +234,10 @@ static void print_mce(struct mce *m)
11079 +
11080 + #define PANIC_TIMEOUT 5 /* 5 seconds */
11081 +
11082 +-static atomic_t mce_paniced;
11083 ++static atomic_unchecked_t mce_paniced;
11084 +
11085 + static int fake_panic;
11086 +-static atomic_t mce_fake_paniced;
11087 ++static atomic_unchecked_t mce_fake_paniced;
11088 +
11089 + /* Panic in progress. Enable interrupts and wait for final IPI */
11090 + static void wait_for_panic(void)
11091 +@@ -260,7 +261,7 @@ static void mce_panic(char *msg, struct
11092 + /*
11093 + * Make sure only one CPU runs in machine check panic
11094 + */
11095 +- if (atomic_inc_return(&mce_paniced) > 1)
11096 ++ if (atomic_inc_return_unchecked(&mce_paniced) > 1)
11097 + wait_for_panic();
11098 + barrier();
11099 +
11100 +@@ -268,7 +269,7 @@ static void mce_panic(char *msg, struct
11101 + console_verbose();
11102 + } else {
11103 + /* Don't log too much for fake panic */
11104 +- if (atomic_inc_return(&mce_fake_paniced) > 1)
11105 ++ if (atomic_inc_return_unchecked(&mce_fake_paniced) > 1)
11106 + return;
11107 + }
11108 + /* First print corrected ones that are still unlogged */
11109 +@@ -610,7 +611,7 @@ static int mce_timed_out(u64 *t)
11110 + * might have been modified by someone else.
11111 + */
11112 + rmb();
11113 +- if (atomic_read(&mce_paniced))
11114 ++ if (atomic_read_unchecked(&mce_paniced))
11115 + wait_for_panic();
11116 + if (!monarch_timeout)
11117 + goto out;
11118 +@@ -1429,7 +1430,7 @@ void __cpuinit mcheck_cpu_init(struct cp
11119 + */
11120 +
11121 + static DEFINE_SPINLOCK(mce_chrdev_state_lock);
11122 +-static int mce_chrdev_open_count; /* #times opened */
11123 ++static local_t mce_chrdev_open_count; /* #times opened */
11124 + static int mce_chrdev_open_exclu; /* already open exclusive? */
11125 +
11126 + static int mce_chrdev_open(struct inode *inode, struct file *file)
11127 +@@ -1437,7 +1438,7 @@ static int mce_chrdev_open(struct inode
11128 + spin_lock(&mce_chrdev_state_lock);
11129 +
11130 + if (mce_chrdev_open_exclu ||
11131 +- (mce_chrdev_open_count && (file->f_flags & O_EXCL))) {
11132 ++ (local_read(&mce_chrdev_open_count) && (file->f_flags & O_EXCL))) {
11133 + spin_unlock(&mce_chrdev_state_lock);
11134 +
11135 + return -EBUSY;
11136 +@@ -1445,7 +1446,7 @@ static int mce_chrdev_open(struct inode
11137 +
11138 + if (file->f_flags & O_EXCL)
11139 + mce_chrdev_open_exclu = 1;
11140 +- mce_chrdev_open_count++;
11141 ++ local_inc(&mce_chrdev_open_count);
11142 +
11143 + spin_unlock(&mce_chrdev_state_lock);
11144 +
11145 +@@ -1456,7 +1457,7 @@ static int mce_chrdev_release(struct ino
11146 + {
11147 + spin_lock(&mce_chrdev_state_lock);
11148 +
11149 +- mce_chrdev_open_count--;
11150 ++ local_dec(&mce_chrdev_open_count);
11151 + mce_chrdev_open_exclu = 0;
11152 +
11153 + spin_unlock(&mce_chrdev_state_lock);
11154 +@@ -2147,7 +2148,7 @@ struct dentry *mce_get_debugfs_dir(void)
11155 + static void mce_reset(void)
11156 + {
11157 + cpu_missing = 0;
11158 +- atomic_set(&mce_fake_paniced, 0);
11159 ++ atomic_set_unchecked(&mce_fake_paniced, 0);
11160 + atomic_set(&mce_executing, 0);
11161 + atomic_set(&mce_callin, 0);
11162 + atomic_set(&global_nwo, 0);
11163 +diff -urNp linux-3.1.1/arch/x86/kernel/cpu/mcheck/mce-inject.c linux-3.1.1/arch/x86/kernel/cpu/mcheck/mce-inject.c
11164 +--- linux-3.1.1/arch/x86/kernel/cpu/mcheck/mce-inject.c 2011-11-11 15:19:27.000000000 -0500
11165 ++++ linux-3.1.1/arch/x86/kernel/cpu/mcheck/mce-inject.c 2011-11-16 18:39:07.000000000 -0500
11166 +@@ -215,7 +215,9 @@ static int inject_init(void)
11167 + if (!alloc_cpumask_var(&mce_inject_cpumask, GFP_KERNEL))
11168 + return -ENOMEM;
11169 + printk(KERN_INFO "Machine check injector initialized\n");
11170 +- mce_chrdev_ops.write = mce_write;
11171 ++ pax_open_kernel();
11172 ++ *(void **)&mce_chrdev_ops.write = mce_write;
11173 ++ pax_close_kernel();
11174 + register_die_notifier(&mce_raise_nb);
11175 + return 0;
11176 + }
11177 +diff -urNp linux-3.1.1/arch/x86/kernel/cpu/mtrr/main.c linux-3.1.1/arch/x86/kernel/cpu/mtrr/main.c
11178 +--- linux-3.1.1/arch/x86/kernel/cpu/mtrr/main.c 2011-11-11 15:19:27.000000000 -0500
11179 ++++ linux-3.1.1/arch/x86/kernel/cpu/mtrr/main.c 2011-11-16 18:39:07.000000000 -0500
11180 +@@ -62,7 +62,7 @@ static DEFINE_MUTEX(mtrr_mutex);
11181 + u64 size_or_mask, size_and_mask;
11182 + static bool mtrr_aps_delayed_init;
11183 +
11184 +-static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM];
11185 ++static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM] __read_only;
11186 +
11187 + const struct mtrr_ops *mtrr_if;
11188 +
11189 +diff -urNp linux-3.1.1/arch/x86/kernel/cpu/mtrr/mtrr.h linux-3.1.1/arch/x86/kernel/cpu/mtrr/mtrr.h
11190 +--- linux-3.1.1/arch/x86/kernel/cpu/mtrr/mtrr.h 2011-11-11 15:19:27.000000000 -0500
11191 ++++ linux-3.1.1/arch/x86/kernel/cpu/mtrr/mtrr.h 2011-11-16 18:39:07.000000000 -0500
11192 +@@ -25,7 +25,7 @@ struct mtrr_ops {
11193 + int (*validate_add_page)(unsigned long base, unsigned long size,
11194 + unsigned int type);
11195 + int (*have_wrcomb)(void);
11196 +-};
11197 ++} __do_const;
11198 +
11199 + extern int generic_get_free_region(unsigned long base, unsigned long size,
11200 + int replace_reg);
11201 +diff -urNp linux-3.1.1/arch/x86/kernel/cpu/perf_event.c linux-3.1.1/arch/x86/kernel/cpu/perf_event.c
11202 +--- linux-3.1.1/arch/x86/kernel/cpu/perf_event.c 2011-11-11 15:19:27.000000000 -0500
11203 ++++ linux-3.1.1/arch/x86/kernel/cpu/perf_event.c 2011-11-16 18:40:08.000000000 -0500
11204 +@@ -795,6 +795,8 @@ static int x86_schedule_events(struct cp
11205 + int i, j, w, wmax, num = 0;
11206 + struct hw_perf_event *hwc;
11207 +
11208 ++ pax_track_stack();
11209 ++
11210 + bitmap_zero(used_mask, X86_PMC_IDX_MAX);
11211 +
11212 + for (i = 0; i < n; i++) {
11213 +@@ -1919,7 +1921,7 @@ perf_callchain_user(struct perf_callchai
11214 + break;
11215 +
11216 + perf_callchain_store(entry, frame.return_address);
11217 +- fp = frame.next_frame;
11218 ++ fp = (const void __force_user *)frame.next_frame;
11219 + }
11220 + }
11221 +
11222 +diff -urNp linux-3.1.1/arch/x86/kernel/crash.c linux-3.1.1/arch/x86/kernel/crash.c
11223 +--- linux-3.1.1/arch/x86/kernel/crash.c 2011-11-11 15:19:27.000000000 -0500
11224 ++++ linux-3.1.1/arch/x86/kernel/crash.c 2011-11-16 18:39:07.000000000 -0500
11225 +@@ -42,7 +42,7 @@ static void kdump_nmi_callback(int cpu,
11226 + regs = args->regs;
11227 +
11228 + #ifdef CONFIG_X86_32
11229 +- if (!user_mode_vm(regs)) {
11230 ++ if (!user_mode(regs)) {
11231 + crash_fixup_ss_esp(&fixed_regs, regs);
11232 + regs = &fixed_regs;
11233 + }
11234 +diff -urNp linux-3.1.1/arch/x86/kernel/doublefault_32.c linux-3.1.1/arch/x86/kernel/doublefault_32.c
11235 +--- linux-3.1.1/arch/x86/kernel/doublefault_32.c 2011-11-11 15:19:27.000000000 -0500
11236 ++++ linux-3.1.1/arch/x86/kernel/doublefault_32.c 2011-11-16 18:39:07.000000000 -0500
11237 +@@ -11,7 +11,7 @@
11238 +
11239 + #define DOUBLEFAULT_STACKSIZE (1024)
11240 + static unsigned long doublefault_stack[DOUBLEFAULT_STACKSIZE];
11241 +-#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE)
11242 ++#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE-2)
11243 +
11244 + #define ptr_ok(x) ((x) > PAGE_OFFSET && (x) < PAGE_OFFSET + MAXMEM)
11245 +
11246 +@@ -21,7 +21,7 @@ static void doublefault_fn(void)
11247 + unsigned long gdt, tss;
11248 +
11249 + store_gdt(&gdt_desc);
11250 +- gdt = gdt_desc.address;
11251 ++ gdt = (unsigned long)gdt_desc.address;
11252 +
11253 + printk(KERN_EMERG "PANIC: double fault, gdt at %08lx [%d bytes]\n", gdt, gdt_desc.size);
11254 +
11255 +@@ -58,10 +58,10 @@ struct tss_struct doublefault_tss __cach
11256 + /* 0x2 bit is always set */
11257 + .flags = X86_EFLAGS_SF | 0x2,
11258 + .sp = STACK_START,
11259 +- .es = __USER_DS,
11260 ++ .es = __KERNEL_DS,
11261 + .cs = __KERNEL_CS,
11262 + .ss = __KERNEL_DS,
11263 +- .ds = __USER_DS,
11264 ++ .ds = __KERNEL_DS,
11265 + .fs = __KERNEL_PERCPU,
11266 +
11267 + .__cr3 = __pa_nodebug(swapper_pg_dir),
11268 +diff -urNp linux-3.1.1/arch/x86/kernel/dumpstack_32.c linux-3.1.1/arch/x86/kernel/dumpstack_32.c
11269 +--- linux-3.1.1/arch/x86/kernel/dumpstack_32.c 2011-11-11 15:19:27.000000000 -0500
11270 ++++ linux-3.1.1/arch/x86/kernel/dumpstack_32.c 2011-11-16 18:39:07.000000000 -0500
11271 +@@ -38,15 +38,13 @@ void dump_trace(struct task_struct *task
11272 + bp = stack_frame(task, regs);
11273 +
11274 + for (;;) {
11275 +- struct thread_info *context;
11276 ++ void *stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
11277 +
11278 +- context = (struct thread_info *)
11279 +- ((unsigned long)stack & (~(THREAD_SIZE - 1)));
11280 +- bp = ops->walk_stack(context, stack, bp, ops, data, NULL, &graph);
11281 ++ bp = ops->walk_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
11282 +
11283 +- stack = (unsigned long *)context->previous_esp;
11284 +- if (!stack)
11285 ++ if (stack_start == task_stack_page(task))
11286 + break;
11287 ++ stack = *(unsigned long **)stack_start;
11288 + if (ops->stack(data, "IRQ") < 0)
11289 + break;
11290 + touch_nmi_watchdog();
11291 +@@ -96,21 +94,22 @@ void show_registers(struct pt_regs *regs
11292 + * When in-kernel, we also print out the stack and code at the
11293 + * time of the fault..
11294 + */
11295 +- if (!user_mode_vm(regs)) {
11296 ++ if (!user_mode(regs)) {
11297 + unsigned int code_prologue = code_bytes * 43 / 64;
11298 + unsigned int code_len = code_bytes;
11299 + unsigned char c;
11300 + u8 *ip;
11301 ++ unsigned long cs_base = get_desc_base(&get_cpu_gdt_table(smp_processor_id())[(0xffff & regs->cs) >> 3]);
11302 +
11303 + printk(KERN_EMERG "Stack:\n");
11304 + show_stack_log_lvl(NULL, regs, &regs->sp, 0, KERN_EMERG);
11305 +
11306 + printk(KERN_EMERG "Code: ");
11307 +
11308 +- ip = (u8 *)regs->ip - code_prologue;
11309 ++ ip = (u8 *)regs->ip - code_prologue + cs_base;
11310 + if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) {
11311 + /* try starting at IP */
11312 +- ip = (u8 *)regs->ip;
11313 ++ ip = (u8 *)regs->ip + cs_base;
11314 + code_len = code_len - code_prologue + 1;
11315 + }
11316 + for (i = 0; i < code_len; i++, ip++) {
11317 +@@ -119,7 +118,7 @@ void show_registers(struct pt_regs *regs
11318 + printk(" Bad EIP value.");
11319 + break;
11320 + }
11321 +- if (ip == (u8 *)regs->ip)
11322 ++ if (ip == (u8 *)regs->ip + cs_base)
11323 + printk("<%02x> ", c);
11324 + else
11325 + printk("%02x ", c);
11326 +@@ -132,6 +131,7 @@ int is_valid_bugaddr(unsigned long ip)
11327 + {
11328 + unsigned short ud2;
11329 +
11330 ++ ip = ktla_ktva(ip);
11331 + if (ip < PAGE_OFFSET)
11332 + return 0;
11333 + if (probe_kernel_address((unsigned short *)ip, ud2))
11334 +@@ -139,3 +139,15 @@ int is_valid_bugaddr(unsigned long ip)
11335 +
11336 + return ud2 == 0x0b0f;
11337 + }
11338 ++
11339 ++#ifdef CONFIG_PAX_MEMORY_STACKLEAK
11340 ++void pax_check_alloca(unsigned long size)
11341 ++{
11342 ++ unsigned long sp = (unsigned long)&sp, stack_left;
11343 ++
11344 ++ /* all kernel stacks are of the same size */
11345 ++ stack_left = sp & (THREAD_SIZE - 1);
11346 ++ BUG_ON(stack_left < 256 || size >= stack_left - 256);
11347 ++}
11348 ++EXPORT_SYMBOL(pax_check_alloca);
11349 ++#endif
11350 +diff -urNp linux-3.1.1/arch/x86/kernel/dumpstack_64.c linux-3.1.1/arch/x86/kernel/dumpstack_64.c
11351 +--- linux-3.1.1/arch/x86/kernel/dumpstack_64.c 2011-11-11 15:19:27.000000000 -0500
11352 ++++ linux-3.1.1/arch/x86/kernel/dumpstack_64.c 2011-11-16 18:39:07.000000000 -0500
11353 +@@ -119,9 +119,9 @@ void dump_trace(struct task_struct *task
11354 + unsigned long *irq_stack_end =
11355 + (unsigned long *)per_cpu(irq_stack_ptr, cpu);
11356 + unsigned used = 0;
11357 +- struct thread_info *tinfo;
11358 + int graph = 0;
11359 + unsigned long dummy;
11360 ++ void *stack_start;
11361 +
11362 + if (!task)
11363 + task = current;
11364 +@@ -142,10 +142,10 @@ void dump_trace(struct task_struct *task
11365 + * current stack address. If the stacks consist of nested
11366 + * exceptions
11367 + */
11368 +- tinfo = task_thread_info(task);
11369 + for (;;) {
11370 + char *id;
11371 + unsigned long *estack_end;
11372 ++
11373 + estack_end = in_exception_stack(cpu, (unsigned long)stack,
11374 + &used, &id);
11375 +
11376 +@@ -153,7 +153,7 @@ void dump_trace(struct task_struct *task
11377 + if (ops->stack(data, id) < 0)
11378 + break;
11379 +
11380 +- bp = ops->walk_stack(tinfo, stack, bp, ops,
11381 ++ bp = ops->walk_stack(task, estack_end - EXCEPTION_STKSZ, stack, bp, ops,
11382 + data, estack_end, &graph);
11383 + ops->stack(data, "<EOE>");
11384 + /*
11385 +@@ -172,7 +172,7 @@ void dump_trace(struct task_struct *task
11386 + if (in_irq_stack(stack, irq_stack, irq_stack_end)) {
11387 + if (ops->stack(data, "IRQ") < 0)
11388 + break;
11389 +- bp = ops->walk_stack(tinfo, stack, bp,
11390 ++ bp = ops->walk_stack(task, irq_stack, stack, bp,
11391 + ops, data, irq_stack_end, &graph);
11392 + /*
11393 + * We link to the next stack (which would be
11394 +@@ -191,7 +191,8 @@ void dump_trace(struct task_struct *task
11395 + /*
11396 + * This handles the process stack:
11397 + */
11398 +- bp = ops->walk_stack(tinfo, stack, bp, ops, data, NULL, &graph);
11399 ++ stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
11400 ++ bp = ops->walk_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
11401 + put_cpu();
11402 + }
11403 + EXPORT_SYMBOL(dump_trace);
11404 +@@ -305,3 +306,50 @@ int is_valid_bugaddr(unsigned long ip)
11405 +
11406 + return ud2 == 0x0b0f;
11407 + }
11408 ++
11409 ++#ifdef CONFIG_PAX_MEMORY_STACKLEAK
11410 ++void pax_check_alloca(unsigned long size)
11411 ++{
11412 ++ unsigned long sp = (unsigned long)&sp, stack_start, stack_end;
11413 ++ unsigned cpu, used;
11414 ++ char *id;
11415 ++
11416 ++ /* check the process stack first */
11417 ++ stack_start = (unsigned long)task_stack_page(current);
11418 ++ stack_end = stack_start + THREAD_SIZE;
11419 ++ if (likely(stack_start <= sp && sp < stack_end)) {
11420 ++ unsigned long stack_left = sp & (THREAD_SIZE - 1);
11421 ++ BUG_ON(stack_left < 256 || size >= stack_left - 256);
11422 ++ return;
11423 ++ }
11424 ++
11425 ++ cpu = get_cpu();
11426 ++
11427 ++ /* check the irq stacks */
11428 ++ stack_end = (unsigned long)per_cpu(irq_stack_ptr, cpu);
11429 ++ stack_start = stack_end - IRQ_STACK_SIZE;
11430 ++ if (stack_start <= sp && sp < stack_end) {
11431 ++ unsigned long stack_left = sp & (IRQ_STACK_SIZE - 1);
11432 ++ put_cpu();
11433 ++ BUG_ON(stack_left < 256 || size >= stack_left - 256);
11434 ++ return;
11435 ++ }
11436 ++
11437 ++ /* check the exception stacks */
11438 ++ used = 0;
11439 ++ stack_end = (unsigned long)in_exception_stack(cpu, sp, &used, &id);
11440 ++ stack_start = stack_end - EXCEPTION_STKSZ;
11441 ++ if (stack_end && stack_start <= sp && sp < stack_end) {
11442 ++ unsigned long stack_left = sp & (EXCEPTION_STKSZ - 1);
11443 ++ put_cpu();
11444 ++ BUG_ON(stack_left < 256 || size >= stack_left - 256);
11445 ++ return;
11446 ++ }
11447 ++
11448 ++ put_cpu();
11449 ++
11450 ++ /* unknown stack */
11451 ++ BUG();
11452 ++}
11453 ++EXPORT_SYMBOL(pax_check_alloca);
11454 ++#endif
11455 +diff -urNp linux-3.1.1/arch/x86/kernel/dumpstack.c linux-3.1.1/arch/x86/kernel/dumpstack.c
11456 +--- linux-3.1.1/arch/x86/kernel/dumpstack.c 2011-11-11 15:19:27.000000000 -0500
11457 ++++ linux-3.1.1/arch/x86/kernel/dumpstack.c 2011-11-16 18:40:08.000000000 -0500
11458 +@@ -2,6 +2,9 @@
11459 + * Copyright (C) 1991, 1992 Linus Torvalds
11460 + * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
11461 + */
11462 ++#ifdef CONFIG_GRKERNSEC_HIDESYM
11463 ++#define __INCLUDED_BY_HIDESYM 1
11464 ++#endif
11465 + #include <linux/kallsyms.h>
11466 + #include <linux/kprobes.h>
11467 + #include <linux/uaccess.h>
11468 +@@ -35,9 +38,8 @@ void printk_address(unsigned long addres
11469 + static void
11470 + print_ftrace_graph_addr(unsigned long addr, void *data,
11471 + const struct stacktrace_ops *ops,
11472 +- struct thread_info *tinfo, int *graph)
11473 ++ struct task_struct *task, int *graph)
11474 + {
11475 +- struct task_struct *task = tinfo->task;
11476 + unsigned long ret_addr;
11477 + int index = task->curr_ret_stack;
11478 +
11479 +@@ -58,7 +60,7 @@ print_ftrace_graph_addr(unsigned long ad
11480 + static inline void
11481 + print_ftrace_graph_addr(unsigned long addr, void *data,
11482 + const struct stacktrace_ops *ops,
11483 +- struct thread_info *tinfo, int *graph)
11484 ++ struct task_struct *task, int *graph)
11485 + { }
11486 + #endif
11487 +
11488 +@@ -69,10 +71,8 @@ print_ftrace_graph_addr(unsigned long ad
11489 + * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
11490 + */
11491 +
11492 +-static inline int valid_stack_ptr(struct thread_info *tinfo,
11493 +- void *p, unsigned int size, void *end)
11494 ++static inline int valid_stack_ptr(void *t, void *p, unsigned int size, void *end)
11495 + {
11496 +- void *t = tinfo;
11497 + if (end) {
11498 + if (p < end && p >= (end-THREAD_SIZE))
11499 + return 1;
11500 +@@ -83,14 +83,14 @@ static inline int valid_stack_ptr(struct
11501 + }
11502 +
11503 + unsigned long
11504 +-print_context_stack(struct thread_info *tinfo,
11505 ++print_context_stack(struct task_struct *task, void *stack_start,
11506 + unsigned long *stack, unsigned long bp,
11507 + const struct stacktrace_ops *ops, void *data,
11508 + unsigned long *end, int *graph)
11509 + {
11510 + struct stack_frame *frame = (struct stack_frame *)bp;
11511 +
11512 +- while (valid_stack_ptr(tinfo, stack, sizeof(*stack), end)) {
11513 ++ while (valid_stack_ptr(stack_start, stack, sizeof(*stack), end)) {
11514 + unsigned long addr;
11515 +
11516 + addr = *stack;
11517 +@@ -102,7 +102,7 @@ print_context_stack(struct thread_info *
11518 + } else {
11519 + ops->address(data, addr, 0);
11520 + }
11521 +- print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
11522 ++ print_ftrace_graph_addr(addr, data, ops, task, graph);
11523 + }
11524 + stack++;
11525 + }
11526 +@@ -111,7 +111,7 @@ print_context_stack(struct thread_info *
11527 + EXPORT_SYMBOL_GPL(print_context_stack);
11528 +
11529 + unsigned long
11530 +-print_context_stack_bp(struct thread_info *tinfo,
11531 ++print_context_stack_bp(struct task_struct *task, void *stack_start,
11532 + unsigned long *stack, unsigned long bp,
11533 + const struct stacktrace_ops *ops, void *data,
11534 + unsigned long *end, int *graph)
11535 +@@ -119,7 +119,7 @@ print_context_stack_bp(struct thread_inf
11536 + struct stack_frame *frame = (struct stack_frame *)bp;
11537 + unsigned long *ret_addr = &frame->return_address;
11538 +
11539 +- while (valid_stack_ptr(tinfo, ret_addr, sizeof(*ret_addr), end)) {
11540 ++ while (valid_stack_ptr(stack_start, ret_addr, sizeof(*ret_addr), end)) {
11541 + unsigned long addr = *ret_addr;
11542 +
11543 + if (!__kernel_text_address(addr))
11544 +@@ -128,7 +128,7 @@ print_context_stack_bp(struct thread_inf
11545 + ops->address(data, addr, 1);
11546 + frame = frame->next_frame;
11547 + ret_addr = &frame->return_address;
11548 +- print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
11549 ++ print_ftrace_graph_addr(addr, data, ops, task, graph);
11550 + }
11551 +
11552 + return (unsigned long)frame;
11553 +@@ -186,7 +186,7 @@ void dump_stack(void)
11554 +
11555 + bp = stack_frame(current, NULL);
11556 + printk("Pid: %d, comm: %.20s %s %s %.*s\n",
11557 +- current->pid, current->comm, print_tainted(),
11558 ++ task_pid_nr(current), current->comm, print_tainted(),
11559 + init_utsname()->release,
11560 + (int)strcspn(init_utsname()->version, " "),
11561 + init_utsname()->version);
11562 +@@ -222,6 +222,8 @@ unsigned __kprobes long oops_begin(void)
11563 + }
11564 + EXPORT_SYMBOL_GPL(oops_begin);
11565 +
11566 ++extern void gr_handle_kernel_exploit(void);
11567 ++
11568 + void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
11569 + {
11570 + if (regs && kexec_should_crash(current))
11571 +@@ -243,7 +245,10 @@ void __kprobes oops_end(unsigned long fl
11572 + panic("Fatal exception in interrupt");
11573 + if (panic_on_oops)
11574 + panic("Fatal exception");
11575 +- do_exit(signr);
11576 ++
11577 ++ gr_handle_kernel_exploit();
11578 ++
11579 ++ do_group_exit(signr);
11580 + }
11581 +
11582 + int __kprobes __die(const char *str, struct pt_regs *regs, long err)
11583 +@@ -269,7 +274,7 @@ int __kprobes __die(const char *str, str
11584 +
11585 + show_registers(regs);
11586 + #ifdef CONFIG_X86_32
11587 +- if (user_mode_vm(regs)) {
11588 ++ if (user_mode(regs)) {
11589 + sp = regs->sp;
11590 + ss = regs->ss & 0xffff;
11591 + } else {
11592 +@@ -297,7 +302,7 @@ void die(const char *str, struct pt_regs
11593 + unsigned long flags = oops_begin();
11594 + int sig = SIGSEGV;
11595 +
11596 +- if (!user_mode_vm(regs))
11597 ++ if (!user_mode(regs))
11598 + report_bug(regs->ip, regs);
11599 +
11600 + if (__die(str, regs, err))
11601 +diff -urNp linux-3.1.1/arch/x86/kernel/early_printk.c linux-3.1.1/arch/x86/kernel/early_printk.c
11602 +--- linux-3.1.1/arch/x86/kernel/early_printk.c 2011-11-11 15:19:27.000000000 -0500
11603 ++++ linux-3.1.1/arch/x86/kernel/early_printk.c 2011-11-16 18:40:08.000000000 -0500
11604 +@@ -7,6 +7,7 @@
11605 + #include <linux/pci_regs.h>
11606 + #include <linux/pci_ids.h>
11607 + #include <linux/errno.h>
11608 ++#include <linux/sched.h>
11609 + #include <asm/io.h>
11610 + #include <asm/processor.h>
11611 + #include <asm/fcntl.h>
11612 +@@ -179,6 +180,8 @@ asmlinkage void early_printk(const char
11613 + int n;
11614 + va_list ap;
11615 +
11616 ++ pax_track_stack();
11617 ++
11618 + va_start(ap, fmt);
11619 + n = vscnprintf(buf, sizeof(buf), fmt, ap);
11620 + early_console->write(early_console, buf, n);
11621 +diff -urNp linux-3.1.1/arch/x86/kernel/entry_32.S linux-3.1.1/arch/x86/kernel/entry_32.S
11622 +--- linux-3.1.1/arch/x86/kernel/entry_32.S 2011-11-11 15:19:27.000000000 -0500
11623 ++++ linux-3.1.1/arch/x86/kernel/entry_32.S 2011-11-16 18:40:08.000000000 -0500
11624 +@@ -186,13 +186,146 @@
11625 + /*CFI_REL_OFFSET gs, PT_GS*/
11626 + .endm
11627 + .macro SET_KERNEL_GS reg
11628 ++
11629 ++#ifdef CONFIG_CC_STACKPROTECTOR
11630 + movl $(__KERNEL_STACK_CANARY), \reg
11631 ++#elif defined(CONFIG_PAX_MEMORY_UDEREF)
11632 ++ movl $(__USER_DS), \reg
11633 ++#else
11634 ++ xorl \reg, \reg
11635 ++#endif
11636 ++
11637 + movl \reg, %gs
11638 + .endm
11639 +
11640 + #endif /* CONFIG_X86_32_LAZY_GS */
11641 +
11642 +-.macro SAVE_ALL
11643 ++.macro pax_enter_kernel
11644 ++#ifdef CONFIG_PAX_KERNEXEC
11645 ++ call pax_enter_kernel
11646 ++#endif
11647 ++.endm
11648 ++
11649 ++.macro pax_exit_kernel
11650 ++#ifdef CONFIG_PAX_KERNEXEC
11651 ++ call pax_exit_kernel
11652 ++#endif
11653 ++.endm
11654 ++
11655 ++#ifdef CONFIG_PAX_KERNEXEC
11656 ++ENTRY(pax_enter_kernel)
11657 ++#ifdef CONFIG_PARAVIRT
11658 ++ pushl %eax
11659 ++ pushl %ecx
11660 ++ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0)
11661 ++ mov %eax, %esi
11662 ++#else
11663 ++ mov %cr0, %esi
11664 ++#endif
11665 ++ bts $16, %esi
11666 ++ jnc 1f
11667 ++ mov %cs, %esi
11668 ++ cmp $__KERNEL_CS, %esi
11669 ++ jz 3f
11670 ++ ljmp $__KERNEL_CS, $3f
11671 ++1: ljmp $__KERNEXEC_KERNEL_CS, $2f
11672 ++2:
11673 ++#ifdef CONFIG_PARAVIRT
11674 ++ mov %esi, %eax
11675 ++ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
11676 ++#else
11677 ++ mov %esi, %cr0
11678 ++#endif
11679 ++3:
11680 ++#ifdef CONFIG_PARAVIRT
11681 ++ popl %ecx
11682 ++ popl %eax
11683 ++#endif
11684 ++ ret
11685 ++ENDPROC(pax_enter_kernel)
11686 ++
11687 ++ENTRY(pax_exit_kernel)
11688 ++#ifdef CONFIG_PARAVIRT
11689 ++ pushl %eax
11690 ++ pushl %ecx
11691 ++#endif
11692 ++ mov %cs, %esi
11693 ++ cmp $__KERNEXEC_KERNEL_CS, %esi
11694 ++ jnz 2f
11695 ++#ifdef CONFIG_PARAVIRT
11696 ++ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0);
11697 ++ mov %eax, %esi
11698 ++#else
11699 ++ mov %cr0, %esi
11700 ++#endif
11701 ++ btr $16, %esi
11702 ++ ljmp $__KERNEL_CS, $1f
11703 ++1:
11704 ++#ifdef CONFIG_PARAVIRT
11705 ++ mov %esi, %eax
11706 ++ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0);
11707 ++#else
11708 ++ mov %esi, %cr0
11709 ++#endif
11710 ++2:
11711 ++#ifdef CONFIG_PARAVIRT
11712 ++ popl %ecx
11713 ++ popl %eax
11714 ++#endif
11715 ++ ret
11716 ++ENDPROC(pax_exit_kernel)
11717 ++#endif
11718 ++
11719 ++.macro pax_erase_kstack
11720 ++#ifdef CONFIG_PAX_MEMORY_STACKLEAK
11721 ++ call pax_erase_kstack
11722 ++#endif
11723 ++.endm
11724 ++
11725 ++#ifdef CONFIG_PAX_MEMORY_STACKLEAK
11726 ++/*
11727 ++ * ebp: thread_info
11728 ++ * ecx, edx: can be clobbered
11729 ++ */
11730 ++ENTRY(pax_erase_kstack)
11731 ++ pushl %edi
11732 ++ pushl %eax
11733 ++
11734 ++ mov TI_lowest_stack(%ebp), %edi
11735 ++ mov $-0xBEEF, %eax
11736 ++ std
11737 ++
11738 ++1: mov %edi, %ecx
11739 ++ and $THREAD_SIZE_asm - 1, %ecx
11740 ++ shr $2, %ecx
11741 ++ repne scasl
11742 ++ jecxz 2f
11743 ++
11744 ++ cmp $2*16, %ecx
11745 ++ jc 2f
11746 ++
11747 ++ mov $2*16, %ecx
11748 ++ repe scasl
11749 ++ jecxz 2f
11750 ++ jne 1b
11751 ++
11752 ++2: cld
11753 ++ mov %esp, %ecx
11754 ++ sub %edi, %ecx
11755 ++ shr $2, %ecx
11756 ++ rep stosl
11757 ++
11758 ++ mov TI_task_thread_sp0(%ebp), %edi
11759 ++ sub $128, %edi
11760 ++ mov %edi, TI_lowest_stack(%ebp)
11761 ++
11762 ++ popl %eax
11763 ++ popl %edi
11764 ++ ret
11765 ++ENDPROC(pax_erase_kstack)
11766 ++#endif
11767 ++
11768 ++.macro __SAVE_ALL _DS
11769 + cld
11770 + PUSH_GS
11771 + pushl_cfi %fs
11772 +@@ -215,7 +348,7 @@
11773 + CFI_REL_OFFSET ecx, 0
11774 + pushl_cfi %ebx
11775 + CFI_REL_OFFSET ebx, 0
11776 +- movl $(__USER_DS), %edx
11777 ++ movl $\_DS, %edx
11778 + movl %edx, %ds
11779 + movl %edx, %es
11780 + movl $(__KERNEL_PERCPU), %edx
11781 +@@ -223,6 +356,15 @@
11782 + SET_KERNEL_GS %edx
11783 + .endm
11784 +
11785 ++.macro SAVE_ALL
11786 ++#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
11787 ++ __SAVE_ALL __KERNEL_DS
11788 ++ pax_enter_kernel
11789 ++#else
11790 ++ __SAVE_ALL __USER_DS
11791 ++#endif
11792 ++.endm
11793 ++
11794 + .macro RESTORE_INT_REGS
11795 + popl_cfi %ebx
11796 + CFI_RESTORE ebx
11797 +@@ -308,7 +450,7 @@ ENTRY(ret_from_fork)
11798 + popfl_cfi
11799 + jmp syscall_exit
11800 + CFI_ENDPROC
11801 +-END(ret_from_fork)
11802 ++ENDPROC(ret_from_fork)
11803 +
11804 + /*
11805 + * Interrupt exit functions should be protected against kprobes
11806 +@@ -333,7 +475,15 @@ check_userspace:
11807 + movb PT_CS(%esp), %al
11808 + andl $(X86_EFLAGS_VM | SEGMENT_RPL_MASK), %eax
11809 + cmpl $USER_RPL, %eax
11810 ++
11811 ++#ifdef CONFIG_PAX_KERNEXEC
11812 ++ jae resume_userspace
11813 ++
11814 ++ PAX_EXIT_KERNEL
11815 ++ jmp resume_kernel
11816 ++#else
11817 + jb resume_kernel # not returning to v8086 or userspace
11818 ++#endif
11819 +
11820 + ENTRY(resume_userspace)
11821 + LOCKDEP_SYS_EXIT
11822 +@@ -345,8 +495,8 @@ ENTRY(resume_userspace)
11823 + andl $_TIF_WORK_MASK, %ecx # is there any work to be done on
11824 + # int/exception return?
11825 + jne work_pending
11826 +- jmp restore_all
11827 +-END(ret_from_exception)
11828 ++ jmp restore_all_pax
11829 ++ENDPROC(ret_from_exception)
11830 +
11831 + #ifdef CONFIG_PREEMPT
11832 + ENTRY(resume_kernel)
11833 +@@ -361,7 +511,7 @@ need_resched:
11834 + jz restore_all
11835 + call preempt_schedule_irq
11836 + jmp need_resched
11837 +-END(resume_kernel)
11838 ++ENDPROC(resume_kernel)
11839 + #endif
11840 + CFI_ENDPROC
11841 + /*
11842 +@@ -395,23 +545,34 @@ sysenter_past_esp:
11843 + /*CFI_REL_OFFSET cs, 0*/
11844 + /*
11845 + * Push current_thread_info()->sysenter_return to the stack.
11846 +- * A tiny bit of offset fixup is necessary - 4*4 means the 4 words
11847 +- * pushed above; +8 corresponds to copy_thread's esp0 setting.
11848 + */
11849 +- pushl_cfi ((TI_sysenter_return)-THREAD_SIZE+8+4*4)(%esp)
11850 ++ pushl_cfi $0
11851 + CFI_REL_OFFSET eip, 0
11852 +
11853 + pushl_cfi %eax
11854 + SAVE_ALL
11855 ++ GET_THREAD_INFO(%ebp)
11856 ++ movl TI_sysenter_return(%ebp),%ebp
11857 ++ movl %ebp,PT_EIP(%esp)
11858 + ENABLE_INTERRUPTS(CLBR_NONE)
11859 +
11860 + /*
11861 + * Load the potential sixth argument from user stack.
11862 + * Careful about security.
11863 + */
11864 ++ movl PT_OLDESP(%esp),%ebp
11865 ++
11866 ++#ifdef CONFIG_PAX_MEMORY_UDEREF
11867 ++ mov PT_OLDSS(%esp),%ds
11868 ++1: movl %ds:(%ebp),%ebp
11869 ++ push %ss
11870 ++ pop %ds
11871 ++#else
11872 + cmpl $__PAGE_OFFSET-3,%ebp
11873 + jae syscall_fault
11874 + 1: movl (%ebp),%ebp
11875 ++#endif
11876 ++
11877 + movl %ebp,PT_EBP(%esp)
11878 + .section __ex_table,"a"
11879 + .align 4
11880 +@@ -434,12 +595,24 @@ sysenter_do_call:
11881 + testl $_TIF_ALLWORK_MASK, %ecx
11882 + jne sysexit_audit
11883 + sysenter_exit:
11884 ++
11885 ++#ifdef CONFIG_PAX_RANDKSTACK
11886 ++ pushl_cfi %eax
11887 ++ movl %esp, %eax
11888 ++ call pax_randomize_kstack
11889 ++ popl_cfi %eax
11890 ++#endif
11891 ++
11892 ++ pax_erase_kstack
11893 ++
11894 + /* if something modifies registers it must also disable sysexit */
11895 + movl PT_EIP(%esp), %edx
11896 + movl PT_OLDESP(%esp), %ecx
11897 + xorl %ebp,%ebp
11898 + TRACE_IRQS_ON
11899 + 1: mov PT_FS(%esp), %fs
11900 ++2: mov PT_DS(%esp), %ds
11901 ++3: mov PT_ES(%esp), %es
11902 + PTGS_TO_GS
11903 + ENABLE_INTERRUPTS_SYSEXIT
11904 +
11905 +@@ -456,6 +629,9 @@ sysenter_audit:
11906 + movl %eax,%edx /* 2nd arg: syscall number */
11907 + movl $AUDIT_ARCH_I386,%eax /* 1st arg: audit arch */
11908 + call audit_syscall_entry
11909 ++
11910 ++ pax_erase_kstack
11911 ++
11912 + pushl_cfi %ebx
11913 + movl PT_EAX(%esp),%eax /* reload syscall number */
11914 + jmp sysenter_do_call
11915 +@@ -482,11 +658,17 @@ sysexit_audit:
11916 +
11917 + CFI_ENDPROC
11918 + .pushsection .fixup,"ax"
11919 +-2: movl $0,PT_FS(%esp)
11920 ++4: movl $0,PT_FS(%esp)
11921 ++ jmp 1b
11922 ++5: movl $0,PT_DS(%esp)
11923 ++ jmp 1b
11924 ++6: movl $0,PT_ES(%esp)
11925 + jmp 1b
11926 + .section __ex_table,"a"
11927 + .align 4
11928 +- .long 1b,2b
11929 ++ .long 1b,4b
11930 ++ .long 2b,5b
11931 ++ .long 3b,6b
11932 + .popsection
11933 + PTGS_TO_GS_EX
11934 + ENDPROC(ia32_sysenter_target)
11935 +@@ -519,6 +701,15 @@ syscall_exit:
11936 + testl $_TIF_ALLWORK_MASK, %ecx # current->work
11937 + jne syscall_exit_work
11938 +
11939 ++restore_all_pax:
11940 ++
11941 ++#ifdef CONFIG_PAX_RANDKSTACK
11942 ++ movl %esp, %eax
11943 ++ call pax_randomize_kstack
11944 ++#endif
11945 ++
11946 ++ pax_erase_kstack
11947 ++
11948 + restore_all:
11949 + TRACE_IRQS_IRET
11950 + restore_all_notrace:
11951 +@@ -578,14 +769,34 @@ ldt_ss:
11952 + * compensating for the offset by changing to the ESPFIX segment with
11953 + * a base address that matches for the difference.
11954 + */
11955 +-#define GDT_ESPFIX_SS PER_CPU_VAR(gdt_page) + (GDT_ENTRY_ESPFIX_SS * 8)
11956 ++#define GDT_ESPFIX_SS (GDT_ENTRY_ESPFIX_SS * 8)(%ebx)
11957 + mov %esp, %edx /* load kernel esp */
11958 + mov PT_OLDESP(%esp), %eax /* load userspace esp */
11959 + mov %dx, %ax /* eax: new kernel esp */
11960 + sub %eax, %edx /* offset (low word is 0) */
11961 ++#ifdef CONFIG_SMP
11962 ++ movl PER_CPU_VAR(cpu_number), %ebx
11963 ++ shll $PAGE_SHIFT_asm, %ebx
11964 ++ addl $cpu_gdt_table, %ebx
11965 ++#else
11966 ++ movl $cpu_gdt_table, %ebx
11967 ++#endif
11968 + shr $16, %edx
11969 +- mov %dl, GDT_ESPFIX_SS + 4 /* bits 16..23 */
11970 +- mov %dh, GDT_ESPFIX_SS + 7 /* bits 24..31 */
11971 ++
11972 ++#ifdef CONFIG_PAX_KERNEXEC
11973 ++ mov %cr0, %esi
11974 ++ btr $16, %esi
11975 ++ mov %esi, %cr0
11976 ++#endif
11977 ++
11978 ++ mov %dl, 4 + GDT_ESPFIX_SS /* bits 16..23 */
11979 ++ mov %dh, 7 + GDT_ESPFIX_SS /* bits 24..31 */
11980 ++
11981 ++#ifdef CONFIG_PAX_KERNEXEC
11982 ++ bts $16, %esi
11983 ++ mov %esi, %cr0
11984 ++#endif
11985 ++
11986 + pushl_cfi $__ESPFIX_SS
11987 + pushl_cfi %eax /* new kernel esp */
11988 + /* Disable interrupts, but do not irqtrace this section: we
11989 +@@ -614,34 +825,28 @@ work_resched:
11990 + movl TI_flags(%ebp), %ecx
11991 + andl $_TIF_WORK_MASK, %ecx # is there any work to be done other
11992 + # than syscall tracing?
11993 +- jz restore_all
11994 ++ jz restore_all_pax
11995 + testb $_TIF_NEED_RESCHED, %cl
11996 + jnz work_resched
11997 +
11998 + work_notifysig: # deal with pending signals and
11999 + # notify-resume requests
12000 ++ movl %esp, %eax
12001 + #ifdef CONFIG_VM86
12002 + testl $X86_EFLAGS_VM, PT_EFLAGS(%esp)
12003 +- movl %esp, %eax
12004 +- jne work_notifysig_v86 # returning to kernel-space or
12005 ++ jz 1f # returning to kernel-space or
12006 + # vm86-space
12007 +- xorl %edx, %edx
12008 +- call do_notify_resume
12009 +- jmp resume_userspace_sig
12010 +
12011 +- ALIGN
12012 +-work_notifysig_v86:
12013 + pushl_cfi %ecx # save ti_flags for do_notify_resume
12014 + call save_v86_state # %eax contains pt_regs pointer
12015 + popl_cfi %ecx
12016 + movl %eax, %esp
12017 +-#else
12018 +- movl %esp, %eax
12019 ++1:
12020 + #endif
12021 + xorl %edx, %edx
12022 + call do_notify_resume
12023 + jmp resume_userspace_sig
12024 +-END(work_pending)
12025 ++ENDPROC(work_pending)
12026 +
12027 + # perform syscall exit tracing
12028 + ALIGN
12029 +@@ -649,11 +854,14 @@ syscall_trace_entry:
12030 + movl $-ENOSYS,PT_EAX(%esp)
12031 + movl %esp, %eax
12032 + call syscall_trace_enter
12033 ++
12034 ++ pax_erase_kstack
12035 ++
12036 + /* What it returned is what we'll actually use. */
12037 + cmpl $(nr_syscalls), %eax
12038 + jnae syscall_call
12039 + jmp syscall_exit
12040 +-END(syscall_trace_entry)
12041 ++ENDPROC(syscall_trace_entry)
12042 +
12043 + # perform syscall exit tracing
12044 + ALIGN
12045 +@@ -666,20 +874,24 @@ syscall_exit_work:
12046 + movl %esp, %eax
12047 + call syscall_trace_leave
12048 + jmp resume_userspace
12049 +-END(syscall_exit_work)
12050 ++ENDPROC(syscall_exit_work)
12051 + CFI_ENDPROC
12052 +
12053 + RING0_INT_FRAME # can't unwind into user space anyway
12054 + syscall_fault:
12055 ++#ifdef CONFIG_PAX_MEMORY_UDEREF
12056 ++ push %ss
12057 ++ pop %ds
12058 ++#endif
12059 + GET_THREAD_INFO(%ebp)
12060 + movl $-EFAULT,PT_EAX(%esp)
12061 + jmp resume_userspace
12062 +-END(syscall_fault)
12063 ++ENDPROC(syscall_fault)
12064 +
12065 + syscall_badsys:
12066 + movl $-ENOSYS,PT_EAX(%esp)
12067 + jmp resume_userspace
12068 +-END(syscall_badsys)
12069 ++ENDPROC(syscall_badsys)
12070 + CFI_ENDPROC
12071 + /*
12072 + * End of kprobes section
12073 +@@ -753,6 +965,36 @@ ptregs_clone:
12074 + CFI_ENDPROC
12075 + ENDPROC(ptregs_clone)
12076 +
12077 ++ ALIGN;
12078 ++ENTRY(kernel_execve)
12079 ++ CFI_STARTPROC
12080 ++ pushl_cfi %ebp
12081 ++ sub $PT_OLDSS+4,%esp
12082 ++ pushl_cfi %edi
12083 ++ pushl_cfi %ecx
12084 ++ pushl_cfi %eax
12085 ++ lea 3*4(%esp),%edi
12086 ++ mov $PT_OLDSS/4+1,%ecx
12087 ++ xorl %eax,%eax
12088 ++ rep stosl
12089 ++ popl_cfi %eax
12090 ++ popl_cfi %ecx
12091 ++ popl_cfi %edi
12092 ++ movl $X86_EFLAGS_IF,PT_EFLAGS(%esp)
12093 ++ pushl_cfi %esp
12094 ++ call sys_execve
12095 ++ add $4,%esp
12096 ++ CFI_ADJUST_CFA_OFFSET -4
12097 ++ GET_THREAD_INFO(%ebp)
12098 ++ test %eax,%eax
12099 ++ jz syscall_exit
12100 ++ add $PT_OLDSS+4,%esp
12101 ++ CFI_ADJUST_CFA_OFFSET -PT_OLDSS-4
12102 ++ popl_cfi %ebp
12103 ++ ret
12104 ++ CFI_ENDPROC
12105 ++ENDPROC(kernel_execve)
12106 ++
12107 + .macro FIXUP_ESPFIX_STACK
12108 + /*
12109 + * Switch back for ESPFIX stack to the normal zerobased stack
12110 +@@ -762,8 +1004,15 @@ ENDPROC(ptregs_clone)
12111 + * normal stack and adjusts ESP with the matching offset.
12112 + */
12113 + /* fixup the stack */
12114 +- mov GDT_ESPFIX_SS + 4, %al /* bits 16..23 */
12115 +- mov GDT_ESPFIX_SS + 7, %ah /* bits 24..31 */
12116 ++#ifdef CONFIG_SMP
12117 ++ movl PER_CPU_VAR(cpu_number), %ebx
12118 ++ shll $PAGE_SHIFT_asm, %ebx
12119 ++ addl $cpu_gdt_table, %ebx
12120 ++#else
12121 ++ movl $cpu_gdt_table, %ebx
12122 ++#endif
12123 ++ mov 4 + GDT_ESPFIX_SS, %al /* bits 16..23 */
12124 ++ mov 7 + GDT_ESPFIX_SS, %ah /* bits 24..31 */
12125 + shl $16, %eax
12126 + addl %esp, %eax /* the adjusted stack pointer */
12127 + pushl_cfi $__KERNEL_DS
12128 +@@ -816,7 +1065,7 @@ vector=vector+1
12129 + .endr
12130 + 2: jmp common_interrupt
12131 + .endr
12132 +-END(irq_entries_start)
12133 ++ENDPROC(irq_entries_start)
12134 +
12135 + .previous
12136 + END(interrupt)
12137 +@@ -864,7 +1113,7 @@ ENTRY(coprocessor_error)
12138 + pushl_cfi $do_coprocessor_error
12139 + jmp error_code
12140 + CFI_ENDPROC
12141 +-END(coprocessor_error)
12142 ++ENDPROC(coprocessor_error)
12143 +
12144 + ENTRY(simd_coprocessor_error)
12145 + RING0_INT_FRAME
12146 +@@ -885,7 +1134,7 @@ ENTRY(simd_coprocessor_error)
12147 + #endif
12148 + jmp error_code
12149 + CFI_ENDPROC
12150 +-END(simd_coprocessor_error)
12151 ++ENDPROC(simd_coprocessor_error)
12152 +
12153 + ENTRY(device_not_available)
12154 + RING0_INT_FRAME
12155 +@@ -893,7 +1142,7 @@ ENTRY(device_not_available)
12156 + pushl_cfi $do_device_not_available
12157 + jmp error_code
12158 + CFI_ENDPROC
12159 +-END(device_not_available)
12160 ++ENDPROC(device_not_available)
12161 +
12162 + #ifdef CONFIG_PARAVIRT
12163 + ENTRY(native_iret)
12164 +@@ -902,12 +1151,12 @@ ENTRY(native_iret)
12165 + .align 4
12166 + .long native_iret, iret_exc
12167 + .previous
12168 +-END(native_iret)
12169 ++ENDPROC(native_iret)
12170 +
12171 + ENTRY(native_irq_enable_sysexit)
12172 + sti
12173 + sysexit
12174 +-END(native_irq_enable_sysexit)
12175 ++ENDPROC(native_irq_enable_sysexit)
12176 + #endif
12177 +
12178 + ENTRY(overflow)
12179 +@@ -916,7 +1165,7 @@ ENTRY(overflow)
12180 + pushl_cfi $do_overflow
12181 + jmp error_code
12182 + CFI_ENDPROC
12183 +-END(overflow)
12184 ++ENDPROC(overflow)
12185 +
12186 + ENTRY(bounds)
12187 + RING0_INT_FRAME
12188 +@@ -924,7 +1173,7 @@ ENTRY(bounds)
12189 + pushl_cfi $do_bounds
12190 + jmp error_code
12191 + CFI_ENDPROC
12192 +-END(bounds)
12193 ++ENDPROC(bounds)
12194 +
12195 + ENTRY(invalid_op)
12196 + RING0_INT_FRAME
12197 +@@ -932,7 +1181,7 @@ ENTRY(invalid_op)
12198 + pushl_cfi $do_invalid_op
12199 + jmp error_code
12200 + CFI_ENDPROC
12201 +-END(invalid_op)
12202 ++ENDPROC(invalid_op)
12203 +
12204 + ENTRY(coprocessor_segment_overrun)
12205 + RING0_INT_FRAME
12206 +@@ -940,35 +1189,35 @@ ENTRY(coprocessor_segment_overrun)
12207 + pushl_cfi $do_coprocessor_segment_overrun
12208 + jmp error_code
12209 + CFI_ENDPROC
12210 +-END(coprocessor_segment_overrun)
12211 ++ENDPROC(coprocessor_segment_overrun)
12212 +
12213 + ENTRY(invalid_TSS)
12214 + RING0_EC_FRAME
12215 + pushl_cfi $do_invalid_TSS
12216 + jmp error_code
12217 + CFI_ENDPROC
12218 +-END(invalid_TSS)
12219 ++ENDPROC(invalid_TSS)
12220 +
12221 + ENTRY(segment_not_present)
12222 + RING0_EC_FRAME
12223 + pushl_cfi $do_segment_not_present
12224 + jmp error_code
12225 + CFI_ENDPROC
12226 +-END(segment_not_present)
12227 ++ENDPROC(segment_not_present)
12228 +
12229 + ENTRY(stack_segment)
12230 + RING0_EC_FRAME
12231 + pushl_cfi $do_stack_segment
12232 + jmp error_code
12233 + CFI_ENDPROC
12234 +-END(stack_segment)
12235 ++ENDPROC(stack_segment)
12236 +
12237 + ENTRY(alignment_check)
12238 + RING0_EC_FRAME
12239 + pushl_cfi $do_alignment_check
12240 + jmp error_code
12241 + CFI_ENDPROC
12242 +-END(alignment_check)
12243 ++ENDPROC(alignment_check)
12244 +
12245 + ENTRY(divide_error)
12246 + RING0_INT_FRAME
12247 +@@ -976,7 +1225,7 @@ ENTRY(divide_error)
12248 + pushl_cfi $do_divide_error
12249 + jmp error_code
12250 + CFI_ENDPROC
12251 +-END(divide_error)
12252 ++ENDPROC(divide_error)
12253 +
12254 + #ifdef CONFIG_X86_MCE
12255 + ENTRY(machine_check)
12256 +@@ -985,7 +1234,7 @@ ENTRY(machine_check)
12257 + pushl_cfi machine_check_vector
12258 + jmp error_code
12259 + CFI_ENDPROC
12260 +-END(machine_check)
12261 ++ENDPROC(machine_check)
12262 + #endif
12263 +
12264 + ENTRY(spurious_interrupt_bug)
12265 +@@ -994,7 +1243,7 @@ ENTRY(spurious_interrupt_bug)
12266 + pushl_cfi $do_spurious_interrupt_bug
12267 + jmp error_code
12268 + CFI_ENDPROC
12269 +-END(spurious_interrupt_bug)
12270 ++ENDPROC(spurious_interrupt_bug)
12271 + /*
12272 + * End of kprobes section
12273 + */
12274 +@@ -1109,7 +1358,7 @@ BUILD_INTERRUPT3(xen_hvm_callback_vector
12275 +
12276 + ENTRY(mcount)
12277 + ret
12278 +-END(mcount)
12279 ++ENDPROC(mcount)
12280 +
12281 + ENTRY(ftrace_caller)
12282 + cmpl $0, function_trace_stop
12283 +@@ -1138,7 +1387,7 @@ ftrace_graph_call:
12284 + .globl ftrace_stub
12285 + ftrace_stub:
12286 + ret
12287 +-END(ftrace_caller)
12288 ++ENDPROC(ftrace_caller)
12289 +
12290 + #else /* ! CONFIG_DYNAMIC_FTRACE */
12291 +
12292 +@@ -1174,7 +1423,7 @@ trace:
12293 + popl %ecx
12294 + popl %eax
12295 + jmp ftrace_stub
12296 +-END(mcount)
12297 ++ENDPROC(mcount)
12298 + #endif /* CONFIG_DYNAMIC_FTRACE */
12299 + #endif /* CONFIG_FUNCTION_TRACER */
12300 +
12301 +@@ -1195,7 +1444,7 @@ ENTRY(ftrace_graph_caller)
12302 + popl %ecx
12303 + popl %eax
12304 + ret
12305 +-END(ftrace_graph_caller)
12306 ++ENDPROC(ftrace_graph_caller)
12307 +
12308 + .globl return_to_handler
12309 + return_to_handler:
12310 +@@ -1209,7 +1458,6 @@ return_to_handler:
12311 + jmp *%ecx
12312 + #endif
12313 +
12314 +-.section .rodata,"a"
12315 + #include "syscall_table_32.S"
12316 +
12317 + syscall_table_size=(.-sys_call_table)
12318 +@@ -1255,15 +1503,18 @@ error_code:
12319 + movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
12320 + REG_TO_PTGS %ecx
12321 + SET_KERNEL_GS %ecx
12322 +- movl $(__USER_DS), %ecx
12323 ++ movl $(__KERNEL_DS), %ecx
12324 + movl %ecx, %ds
12325 + movl %ecx, %es
12326 ++
12327 ++ pax_enter_kernel
12328 ++
12329 + TRACE_IRQS_OFF
12330 + movl %esp,%eax # pt_regs pointer
12331 + call *%edi
12332 + jmp ret_from_exception
12333 + CFI_ENDPROC
12334 +-END(page_fault)
12335 ++ENDPROC(page_fault)
12336 +
12337 + /*
12338 + * Debug traps and NMI can happen at the one SYSENTER instruction
12339 +@@ -1305,7 +1556,7 @@ debug_stack_correct:
12340 + call do_debug
12341 + jmp ret_from_exception
12342 + CFI_ENDPROC
12343 +-END(debug)
12344 ++ENDPROC(debug)
12345 +
12346 + /*
12347 + * NMI is doubly nasty. It can happen _while_ we're handling
12348 +@@ -1342,6 +1593,9 @@ nmi_stack_correct:
12349 + xorl %edx,%edx # zero error code
12350 + movl %esp,%eax # pt_regs pointer
12351 + call do_nmi
12352 ++
12353 ++ pax_exit_kernel
12354 ++
12355 + jmp restore_all_notrace
12356 + CFI_ENDPROC
12357 +
12358 +@@ -1378,12 +1632,15 @@ nmi_espfix_stack:
12359 + FIXUP_ESPFIX_STACK # %eax == %esp
12360 + xorl %edx,%edx # zero error code
12361 + call do_nmi
12362 ++
12363 ++ pax_exit_kernel
12364 ++
12365 + RESTORE_REGS
12366 + lss 12+4(%esp), %esp # back to espfix stack
12367 + CFI_ADJUST_CFA_OFFSET -24
12368 + jmp irq_return
12369 + CFI_ENDPROC
12370 +-END(nmi)
12371 ++ENDPROC(nmi)
12372 +
12373 + ENTRY(int3)
12374 + RING0_INT_FRAME
12375 +@@ -1395,14 +1652,14 @@ ENTRY(int3)
12376 + call do_int3
12377 + jmp ret_from_exception
12378 + CFI_ENDPROC
12379 +-END(int3)
12380 ++ENDPROC(int3)
12381 +
12382 + ENTRY(general_protection)
12383 + RING0_EC_FRAME
12384 + pushl_cfi $do_general_protection
12385 + jmp error_code
12386 + CFI_ENDPROC
12387 +-END(general_protection)
12388 ++ENDPROC(general_protection)
12389 +
12390 + #ifdef CONFIG_KVM_GUEST
12391 + ENTRY(async_page_fault)
12392 +@@ -1410,7 +1667,7 @@ ENTRY(async_page_fault)
12393 + pushl_cfi $do_async_page_fault
12394 + jmp error_code
12395 + CFI_ENDPROC
12396 +-END(async_page_fault)
12397 ++ENDPROC(async_page_fault)
12398 + #endif
12399 +
12400 + /*
12401 +diff -urNp linux-3.1.1/arch/x86/kernel/entry_64.S linux-3.1.1/arch/x86/kernel/entry_64.S
12402 +--- linux-3.1.1/arch/x86/kernel/entry_64.S 2011-11-11 15:19:27.000000000 -0500
12403 ++++ linux-3.1.1/arch/x86/kernel/entry_64.S 2011-11-17 18:28:56.000000000 -0500
12404 +@@ -55,6 +55,8 @@
12405 + #include <asm/paravirt.h>
12406 + #include <asm/ftrace.h>
12407 + #include <asm/percpu.h>
12408 ++#include <asm/pgtable.h>
12409 ++#include <asm/alternative-asm.h>
12410 +
12411 + /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
12412 + #include <linux/elf-em.h>
12413 +@@ -68,8 +70,9 @@
12414 + #ifdef CONFIG_FUNCTION_TRACER
12415 + #ifdef CONFIG_DYNAMIC_FTRACE
12416 + ENTRY(mcount)
12417 ++ pax_force_retaddr
12418 + retq
12419 +-END(mcount)
12420 ++ENDPROC(mcount)
12421 +
12422 + ENTRY(ftrace_caller)
12423 + cmpl $0, function_trace_stop
12424 +@@ -92,8 +95,9 @@ GLOBAL(ftrace_graph_call)
12425 + #endif
12426 +
12427 + GLOBAL(ftrace_stub)
12428 ++ pax_force_retaddr
12429 + retq
12430 +-END(ftrace_caller)
12431 ++ENDPROC(ftrace_caller)
12432 +
12433 + #else /* ! CONFIG_DYNAMIC_FTRACE */
12434 + ENTRY(mcount)
12435 +@@ -112,6 +116,7 @@ ENTRY(mcount)
12436 + #endif
12437 +
12438 + GLOBAL(ftrace_stub)
12439 ++ pax_force_retaddr
12440 + retq
12441 +
12442 + trace:
12443 +@@ -121,12 +126,13 @@ trace:
12444 + movq 8(%rbp), %rsi
12445 + subq $MCOUNT_INSN_SIZE, %rdi
12446 +
12447 ++ pax_force_fptr ftrace_trace_function
12448 + call *ftrace_trace_function
12449 +
12450 + MCOUNT_RESTORE_FRAME
12451 +
12452 + jmp ftrace_stub
12453 +-END(mcount)
12454 ++ENDPROC(mcount)
12455 + #endif /* CONFIG_DYNAMIC_FTRACE */
12456 + #endif /* CONFIG_FUNCTION_TRACER */
12457 +
12458 +@@ -146,8 +152,9 @@ ENTRY(ftrace_graph_caller)
12459 +
12460 + MCOUNT_RESTORE_FRAME
12461 +
12462 ++ pax_force_retaddr
12463 + retq
12464 +-END(ftrace_graph_caller)
12465 ++ENDPROC(ftrace_graph_caller)
12466 +
12467 + GLOBAL(return_to_handler)
12468 + subq $24, %rsp
12469 +@@ -163,6 +170,7 @@ GLOBAL(return_to_handler)
12470 + movq 8(%rsp), %rdx
12471 + movq (%rsp), %rax
12472 + addq $24, %rsp
12473 ++ pax_force_fptr %rdi
12474 + jmp *%rdi
12475 + #endif
12476 +
12477 +@@ -178,6 +186,269 @@ ENTRY(native_usergs_sysret64)
12478 + ENDPROC(native_usergs_sysret64)
12479 + #endif /* CONFIG_PARAVIRT */
12480 +
12481 ++ .macro ljmpq sel, off
12482 ++#if defined(CONFIG_MPSC) || defined(CONFIG_MCORE2) || defined (CONFIG_MATOM)
12483 ++ .byte 0x48; ljmp *1234f(%rip)
12484 ++ .pushsection .rodata
12485 ++ .align 16
12486 ++ 1234: .quad \off; .word \sel
12487 ++ .popsection
12488 ++#else
12489 ++ pushq $\sel
12490 ++ pushq $\off
12491 ++ lretq
12492 ++#endif
12493 ++ .endm
12494 ++
12495 ++ .macro pax_enter_kernel
12496 ++#ifdef CONFIG_PAX_KERNEXEC
12497 ++ call pax_enter_kernel
12498 ++#endif
12499 ++ .endm
12500 ++
12501 ++ .macro pax_exit_kernel
12502 ++#ifdef CONFIG_PAX_KERNEXEC
12503 ++ call pax_exit_kernel
12504 ++#endif
12505 ++ .endm
12506 ++
12507 ++#ifdef CONFIG_PAX_KERNEXEC
12508 ++ENTRY(pax_enter_kernel)
12509 ++ pushq %rdi
12510 ++
12511 ++#ifdef CONFIG_PARAVIRT
12512 ++ PV_SAVE_REGS(CLBR_RDI)
12513 ++#endif
12514 ++
12515 ++ GET_CR0_INTO_RDI
12516 ++ bts $16,%rdi
12517 ++ jnc 1f
12518 ++ mov %cs,%edi
12519 ++ cmp $__KERNEL_CS,%edi
12520 ++ jz 3f
12521 ++ ljmpq __KERNEL_CS,3f
12522 ++1: ljmpq __KERNEXEC_KERNEL_CS,2f
12523 ++2: SET_RDI_INTO_CR0
12524 ++3:
12525 ++
12526 ++#ifdef CONFIG_PARAVIRT
12527 ++ PV_RESTORE_REGS(CLBR_RDI)
12528 ++#endif
12529 ++
12530 ++ popq %rdi
12531 ++ pax_force_retaddr
12532 ++ retq
12533 ++ENDPROC(pax_enter_kernel)
12534 ++
12535 ++ENTRY(pax_exit_kernel)
12536 ++ pushq %rdi
12537 ++
12538 ++#ifdef CONFIG_PARAVIRT
12539 ++ PV_SAVE_REGS(CLBR_RDI)
12540 ++#endif
12541 ++
12542 ++ mov %cs,%rdi
12543 ++ cmp $__KERNEXEC_KERNEL_CS,%edi
12544 ++ jnz 2f
12545 ++ GET_CR0_INTO_RDI
12546 ++ btr $16,%rdi
12547 ++ ljmpq __KERNEL_CS,1f
12548 ++1: SET_RDI_INTO_CR0
12549 ++2:
12550 ++
12551 ++#ifdef CONFIG_PARAVIRT
12552 ++ PV_RESTORE_REGS(CLBR_RDI);
12553 ++#endif
12554 ++
12555 ++ popq %rdi
12556 ++ pax_force_retaddr
12557 ++ retq
12558 ++ENDPROC(pax_exit_kernel)
12559 ++#endif
12560 ++
12561 ++ .macro pax_enter_kernel_user
12562 ++#ifdef CONFIG_PAX_MEMORY_UDEREF
12563 ++ call pax_enter_kernel_user
12564 ++#endif
12565 ++ .endm
12566 ++
12567 ++ .macro pax_exit_kernel_user
12568 ++#ifdef CONFIG_PAX_MEMORY_UDEREF
12569 ++ call pax_exit_kernel_user
12570 ++#endif
12571 ++#ifdef CONFIG_PAX_RANDKSTACK
12572 ++ push %rax
12573 ++ call pax_randomize_kstack
12574 ++ pop %rax
12575 ++#endif
12576 ++ .endm
12577 ++
12578 ++#ifdef CONFIG_PAX_MEMORY_UDEREF
12579 ++ENTRY(pax_enter_kernel_user)
12580 ++ pushq %rdi
12581 ++ pushq %rbx
12582 ++
12583 ++#ifdef CONFIG_PARAVIRT
12584 ++ PV_SAVE_REGS(CLBR_RDI)
12585 ++#endif
12586 ++
12587 ++ GET_CR3_INTO_RDI
12588 ++ mov %rdi,%rbx
12589 ++ add $__START_KERNEL_map,%rbx
12590 ++ sub phys_base(%rip),%rbx
12591 ++
12592 ++#ifdef CONFIG_PARAVIRT
12593 ++ pushq %rdi
12594 ++ cmpl $0, pv_info+PARAVIRT_enabled
12595 ++ jz 1f
12596 ++ i = 0
12597 ++ .rept USER_PGD_PTRS
12598 ++ mov i*8(%rbx),%rsi
12599 ++ mov $0,%sil
12600 ++ lea i*8(%rbx),%rdi
12601 ++ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
12602 ++ i = i + 1
12603 ++ .endr
12604 ++ jmp 2f
12605 ++1:
12606 ++#endif
12607 ++
12608 ++ i = 0
12609 ++ .rept USER_PGD_PTRS
12610 ++ movb $0,i*8(%rbx)
12611 ++ i = i + 1
12612 ++ .endr
12613 ++
12614 ++#ifdef CONFIG_PARAVIRT
12615 ++2: popq %rdi
12616 ++#endif
12617 ++ SET_RDI_INTO_CR3
12618 ++
12619 ++#ifdef CONFIG_PAX_KERNEXEC
12620 ++ GET_CR0_INTO_RDI
12621 ++ bts $16,%rdi
12622 ++ SET_RDI_INTO_CR0
12623 ++#endif
12624 ++
12625 ++#ifdef CONFIG_PARAVIRT
12626 ++ PV_RESTORE_REGS(CLBR_RDI)
12627 ++#endif
12628 ++
12629 ++ popq %rbx
12630 ++ popq %rdi
12631 ++ pax_force_retaddr
12632 ++ retq
12633 ++ENDPROC(pax_enter_kernel_user)
12634 ++
12635 ++ENTRY(pax_exit_kernel_user)
12636 ++ push %rdi
12637 ++
12638 ++#ifdef CONFIG_PARAVIRT
12639 ++ pushq %rbx
12640 ++ PV_SAVE_REGS(CLBR_RDI)
12641 ++#endif
12642 ++
12643 ++#ifdef CONFIG_PAX_KERNEXEC
12644 ++ GET_CR0_INTO_RDI
12645 ++ btr $16,%rdi
12646 ++ SET_RDI_INTO_CR0
12647 ++#endif
12648 ++
12649 ++ GET_CR3_INTO_RDI
12650 ++ add $__START_KERNEL_map,%rdi
12651 ++ sub phys_base(%rip),%rdi
12652 ++
12653 ++#ifdef CONFIG_PARAVIRT
12654 ++ cmpl $0, pv_info+PARAVIRT_enabled
12655 ++ jz 1f
12656 ++ mov %rdi,%rbx
12657 ++ i = 0
12658 ++ .rept USER_PGD_PTRS
12659 ++ mov i*8(%rbx),%rsi
12660 ++ mov $0x67,%sil
12661 ++ lea i*8(%rbx),%rdi
12662 ++ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
12663 ++ i = i + 1
12664 ++ .endr
12665 ++ jmp 2f
12666 ++1:
12667 ++#endif
12668 ++
12669 ++ i = 0
12670 ++ .rept USER_PGD_PTRS
12671 ++ movb $0x67,i*8(%rdi)
12672 ++ i = i + 1
12673 ++ .endr
12674 ++
12675 ++#ifdef CONFIG_PARAVIRT
12676 ++2: PV_RESTORE_REGS(CLBR_RDI)
12677 ++ popq %rbx
12678 ++#endif
12679 ++
12680 ++ popq %rdi
12681 ++ pax_force_retaddr
12682 ++ retq
12683 ++ENDPROC(pax_exit_kernel_user)
12684 ++#endif
12685 ++
12686 ++.macro pax_erase_kstack
12687 ++#ifdef CONFIG_PAX_MEMORY_STACKLEAK
12688 ++ call pax_erase_kstack
12689 ++#endif
12690 ++.endm
12691 ++
12692 ++#ifdef CONFIG_PAX_MEMORY_STACKLEAK
12693 ++/*
12694 ++ * r10: thread_info
12695 ++ * rcx, rdx: can be clobbered
12696 ++ */
12697 ++ENTRY(pax_erase_kstack)
12698 ++ pushq %rdi
12699 ++ pushq %rax
12700 ++ pushq %r10
12701 ++
12702 ++ GET_THREAD_INFO(%r10)
12703 ++ mov TI_lowest_stack(%r10), %rdi
12704 ++ mov $-0xBEEF, %rax
12705 ++ std
12706 ++
12707 ++1: mov %edi, %ecx
12708 ++ and $THREAD_SIZE_asm - 1, %ecx
12709 ++ shr $3, %ecx
12710 ++ repne scasq
12711 ++ jecxz 2f
12712 ++
12713 ++ cmp $2*8, %ecx
12714 ++ jc 2f
12715 ++
12716 ++ mov $2*8, %ecx
12717 ++ repe scasq
12718 ++ jecxz 2f
12719 ++ jne 1b
12720 ++
12721 ++2: cld
12722 ++ mov %esp, %ecx
12723 ++ sub %edi, %ecx
12724 ++
12725 ++ cmp $THREAD_SIZE_asm, %rcx
12726 ++ jb 3f
12727 ++ ud2
12728 ++3:
12729 ++
12730 ++ shr $3, %ecx
12731 ++ rep stosq
12732 ++
12733 ++ mov TI_task_thread_sp0(%r10), %rdi
12734 ++ sub $256, %rdi
12735 ++ mov %rdi, TI_lowest_stack(%r10)
12736 ++
12737 ++ popq %r10
12738 ++ popq %rax
12739 ++ popq %rdi
12740 ++ pax_force_retaddr
12741 ++ ret
12742 ++ENDPROC(pax_erase_kstack)
12743 ++#endif
12744 +
12745 + .macro TRACE_IRQS_IRETQ offset=ARGOFFSET
12746 + #ifdef CONFIG_TRACE_IRQFLAGS
12747 +@@ -319,7 +590,7 @@ ENDPROC(native_usergs_sysret64)
12748 + movq %rsp, %rsi
12749 +
12750 + leaq -RBP(%rsp),%rdi /* arg1 for handler */
12751 +- testl $3, CS(%rdi)
12752 ++ testb $3, CS(%rdi)
12753 + je 1f
12754 + SWAPGS
12755 + /*
12756 +@@ -350,9 +621,10 @@ ENTRY(save_rest)
12757 + movq_cfi r15, R15+16
12758 + movq %r11, 8(%rsp) /* return address */
12759 + FIXUP_TOP_OF_STACK %r11, 16
12760 ++ pax_force_retaddr
12761 + ret
12762 + CFI_ENDPROC
12763 +-END(save_rest)
12764 ++ENDPROC(save_rest)
12765 +
12766 + /* save complete stack frame */
12767 + .pushsection .kprobes.text, "ax"
12768 +@@ -381,9 +653,10 @@ ENTRY(save_paranoid)
12769 + js 1f /* negative -> in kernel */
12770 + SWAPGS
12771 + xorl %ebx,%ebx
12772 +-1: ret
12773 ++1: pax_force_retaddr
12774 ++ ret
12775 + CFI_ENDPROC
12776 +-END(save_paranoid)
12777 ++ENDPROC(save_paranoid)
12778 + .popsection
12779 +
12780 + /*
12781 +@@ -405,7 +678,7 @@ ENTRY(ret_from_fork)
12782 +
12783 + RESTORE_REST
12784 +
12785 +- testl $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
12786 ++ testb $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
12787 + je int_ret_from_sys_call
12788 +
12789 + testl $_TIF_IA32, TI_flags(%rcx) # 32-bit compat task needs IRET
12790 +@@ -415,7 +688,7 @@ ENTRY(ret_from_fork)
12791 + jmp ret_from_sys_call # go to the SYSRET fastpath
12792 +
12793 + CFI_ENDPROC
12794 +-END(ret_from_fork)
12795 ++ENDPROC(ret_from_fork)
12796 +
12797 + /*
12798 + * System call entry. Up to 6 arguments in registers are supported.
12799 +@@ -451,7 +724,7 @@ END(ret_from_fork)
12800 + ENTRY(system_call)
12801 + CFI_STARTPROC simple
12802 + CFI_SIGNAL_FRAME
12803 +- CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
12804 ++ CFI_DEF_CFA rsp,0
12805 + CFI_REGISTER rip,rcx
12806 + /*CFI_REGISTER rflags,r11*/
12807 + SWAPGS_UNSAFE_STACK
12808 +@@ -464,12 +737,13 @@ ENTRY(system_call_after_swapgs)
12809 +
12810 + movq %rsp,PER_CPU_VAR(old_rsp)
12811 + movq PER_CPU_VAR(kernel_stack),%rsp
12812 ++ pax_enter_kernel_user
12813 + /*
12814 + * No need to follow this irqs off/on section - it's straight
12815 + * and short:
12816 + */
12817 + ENABLE_INTERRUPTS(CLBR_NONE)
12818 +- SAVE_ARGS 8,0
12819 ++ SAVE_ARGS 8*6,0
12820 + movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
12821 + movq %rcx,RIP-ARGOFFSET(%rsp)
12822 + CFI_REL_OFFSET rip,RIP-ARGOFFSET
12823 +@@ -498,6 +772,8 @@ sysret_check:
12824 + andl %edi,%edx
12825 + jnz sysret_careful
12826 + CFI_REMEMBER_STATE
12827 ++ pax_exit_kernel_user
12828 ++ pax_erase_kstack
12829 + /*
12830 + * sysretq will re-enable interrupts:
12831 + */
12832 +@@ -556,6 +832,9 @@ auditsys:
12833 + movq %rax,%rsi /* 2nd arg: syscall number */
12834 + movl $AUDIT_ARCH_X86_64,%edi /* 1st arg: audit arch */
12835 + call audit_syscall_entry
12836 ++
12837 ++ pax_erase_kstack
12838 ++
12839 + LOAD_ARGS 0 /* reload call-clobbered registers */
12840 + jmp system_call_fastpath
12841 +
12842 +@@ -586,6 +865,9 @@ tracesys:
12843 + FIXUP_TOP_OF_STACK %rdi
12844 + movq %rsp,%rdi
12845 + call syscall_trace_enter
12846 ++
12847 ++ pax_erase_kstack
12848 ++
12849 + /*
12850 + * Reload arg registers from stack in case ptrace changed them.
12851 + * We don't reload %rax because syscall_trace_enter() returned
12852 +@@ -607,7 +889,7 @@ tracesys:
12853 + GLOBAL(int_ret_from_sys_call)
12854 + DISABLE_INTERRUPTS(CLBR_NONE)
12855 + TRACE_IRQS_OFF
12856 +- testl $3,CS-ARGOFFSET(%rsp)
12857 ++ testb $3,CS-ARGOFFSET(%rsp)
12858 + je retint_restore_args
12859 + movl $_TIF_ALLWORK_MASK,%edi
12860 + /* edi: mask to check */
12861 +@@ -664,7 +946,7 @@ int_restore_rest:
12862 + TRACE_IRQS_OFF
12863 + jmp int_with_check
12864 + CFI_ENDPROC
12865 +-END(system_call)
12866 ++ENDPROC(system_call)
12867 +
12868 + /*
12869 + * Certain special system calls that need to save a complete full stack frame.
12870 +@@ -680,7 +962,7 @@ ENTRY(\label)
12871 + call \func
12872 + jmp ptregscall_common
12873 + CFI_ENDPROC
12874 +-END(\label)
12875 ++ENDPROC(\label)
12876 + .endm
12877 +
12878 + PTREGSCALL stub_clone, sys_clone, %r8
12879 +@@ -698,9 +980,10 @@ ENTRY(ptregscall_common)
12880 + movq_cfi_restore R12+8, r12
12881 + movq_cfi_restore RBP+8, rbp
12882 + movq_cfi_restore RBX+8, rbx
12883 ++ pax_force_retaddr
12884 + ret $REST_SKIP /* pop extended registers */
12885 + CFI_ENDPROC
12886 +-END(ptregscall_common)
12887 ++ENDPROC(ptregscall_common)
12888 +
12889 + ENTRY(stub_execve)
12890 + CFI_STARTPROC
12891 +@@ -715,7 +998,7 @@ ENTRY(stub_execve)
12892 + RESTORE_REST
12893 + jmp int_ret_from_sys_call
12894 + CFI_ENDPROC
12895 +-END(stub_execve)
12896 ++ENDPROC(stub_execve)
12897 +
12898 + /*
12899 + * sigreturn is special because it needs to restore all registers on return.
12900 +@@ -733,7 +1016,7 @@ ENTRY(stub_rt_sigreturn)
12901 + RESTORE_REST
12902 + jmp int_ret_from_sys_call
12903 + CFI_ENDPROC
12904 +-END(stub_rt_sigreturn)
12905 ++ENDPROC(stub_rt_sigreturn)
12906 +
12907 + /*
12908 + * Build the entry stubs and pointer table with some assembler magic.
12909 +@@ -768,7 +1051,7 @@ vector=vector+1
12910 + 2: jmp common_interrupt
12911 + .endr
12912 + CFI_ENDPROC
12913 +-END(irq_entries_start)
12914 ++ENDPROC(irq_entries_start)
12915 +
12916 + .previous
12917 + END(interrupt)
12918 +@@ -789,6 +1072,16 @@ END(interrupt)
12919 + CFI_ADJUST_CFA_OFFSET ORIG_RAX-RBP
12920 + SAVE_ARGS_IRQ
12921 + PARTIAL_FRAME 0
12922 ++#ifdef CONFIG_PAX_MEMORY_UDEREF
12923 ++ testb $3, CS(%rdi)
12924 ++ jnz 1f
12925 ++ pax_enter_kernel
12926 ++ jmp 2f
12927 ++1: pax_enter_kernel_user
12928 ++2:
12929 ++#else
12930 ++ pax_enter_kernel
12931 ++#endif
12932 + call \func
12933 + .endm
12934 +
12935 +@@ -820,7 +1113,7 @@ ret_from_intr:
12936 +
12937 + exit_intr:
12938 + GET_THREAD_INFO(%rcx)
12939 +- testl $3,CS-ARGOFFSET(%rsp)
12940 ++ testb $3,CS-ARGOFFSET(%rsp)
12941 + je retint_kernel
12942 +
12943 + /* Interrupt came from user space */
12944 +@@ -842,12 +1135,16 @@ retint_swapgs: /* return to user-space
12945 + * The iretq could re-enable interrupts:
12946 + */
12947 + DISABLE_INTERRUPTS(CLBR_ANY)
12948 ++ pax_exit_kernel_user
12949 ++ pax_erase_kstack
12950 + TRACE_IRQS_IRETQ
12951 + SWAPGS
12952 + jmp restore_args
12953 +
12954 + retint_restore_args: /* return to kernel space */
12955 + DISABLE_INTERRUPTS(CLBR_ANY)
12956 ++ pax_exit_kernel
12957 ++ pax_force_retaddr RIP-ARGOFFSET
12958 + /*
12959 + * The iretq could re-enable interrupts:
12960 + */
12961 +@@ -936,7 +1233,7 @@ ENTRY(retint_kernel)
12962 + #endif
12963 +
12964 + CFI_ENDPROC
12965 +-END(common_interrupt)
12966 ++ENDPROC(common_interrupt)
12967 + /*
12968 + * End of kprobes section
12969 + */
12970 +@@ -952,7 +1249,7 @@ ENTRY(\sym)
12971 + interrupt \do_sym
12972 + jmp ret_from_intr
12973 + CFI_ENDPROC
12974 +-END(\sym)
12975 ++ENDPROC(\sym)
12976 + .endm
12977 +
12978 + #ifdef CONFIG_SMP
12979 +@@ -1017,12 +1314,22 @@ ENTRY(\sym)
12980 + CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
12981 + call error_entry
12982 + DEFAULT_FRAME 0
12983 ++#ifdef CONFIG_PAX_MEMORY_UDEREF
12984 ++ testb $3, CS(%rsp)
12985 ++ jnz 1f
12986 ++ pax_enter_kernel
12987 ++ jmp 2f
12988 ++1: pax_enter_kernel_user
12989 ++2:
12990 ++#else
12991 ++ pax_enter_kernel
12992 ++#endif
12993 + movq %rsp,%rdi /* pt_regs pointer */
12994 + xorl %esi,%esi /* no error code */
12995 + call \do_sym
12996 + jmp error_exit /* %ebx: no swapgs flag */
12997 + CFI_ENDPROC
12998 +-END(\sym)
12999 ++ENDPROC(\sym)
13000 + .endm
13001 +
13002 + .macro paranoidzeroentry sym do_sym
13003 +@@ -1034,15 +1341,25 @@ ENTRY(\sym)
13004 + CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
13005 + call save_paranoid
13006 + TRACE_IRQS_OFF
13007 ++#ifdef CONFIG_PAX_MEMORY_UDEREF
13008 ++ testb $3, CS(%rsp)
13009 ++ jnz 1f
13010 ++ pax_enter_kernel
13011 ++ jmp 2f
13012 ++1: pax_enter_kernel_user
13013 ++2:
13014 ++#else
13015 ++ pax_enter_kernel
13016 ++#endif
13017 + movq %rsp,%rdi /* pt_regs pointer */
13018 + xorl %esi,%esi /* no error code */
13019 + call \do_sym
13020 + jmp paranoid_exit /* %ebx: no swapgs flag */
13021 + CFI_ENDPROC
13022 +-END(\sym)
13023 ++ENDPROC(\sym)
13024 + .endm
13025 +
13026 +-#define INIT_TSS_IST(x) PER_CPU_VAR(init_tss) + (TSS_ist + ((x) - 1) * 8)
13027 ++#define INIT_TSS_IST(x) (TSS_ist + ((x) - 1) * 8)(%r12)
13028 + .macro paranoidzeroentry_ist sym do_sym ist
13029 + ENTRY(\sym)
13030 + INTR_FRAME
13031 +@@ -1052,14 +1369,30 @@ ENTRY(\sym)
13032 + CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
13033 + call save_paranoid
13034 + TRACE_IRQS_OFF
13035 ++#ifdef CONFIG_PAX_MEMORY_UDEREF
13036 ++ testb $3, CS(%rsp)
13037 ++ jnz 1f
13038 ++ pax_enter_kernel
13039 ++ jmp 2f
13040 ++1: pax_enter_kernel_user
13041 ++2:
13042 ++#else
13043 ++ pax_enter_kernel
13044 ++#endif
13045 + movq %rsp,%rdi /* pt_regs pointer */
13046 + xorl %esi,%esi /* no error code */
13047 ++#ifdef CONFIG_SMP
13048 ++ imul $TSS_size, PER_CPU_VAR(cpu_number), %r12d
13049 ++ lea init_tss(%r12), %r12
13050 ++#else
13051 ++ lea init_tss(%rip), %r12
13052 ++#endif
13053 + subq $EXCEPTION_STKSZ, INIT_TSS_IST(\ist)
13054 + call \do_sym
13055 + addq $EXCEPTION_STKSZ, INIT_TSS_IST(\ist)
13056 + jmp paranoid_exit /* %ebx: no swapgs flag */
13057 + CFI_ENDPROC
13058 +-END(\sym)
13059 ++ENDPROC(\sym)
13060 + .endm
13061 +
13062 + .macro errorentry sym do_sym
13063 +@@ -1070,13 +1403,23 @@ ENTRY(\sym)
13064 + CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
13065 + call error_entry
13066 + DEFAULT_FRAME 0
13067 ++#ifdef CONFIG_PAX_MEMORY_UDEREF
13068 ++ testb $3, CS(%rsp)
13069 ++ jnz 1f
13070 ++ pax_enter_kernel
13071 ++ jmp 2f
13072 ++1: pax_enter_kernel_user
13073 ++2:
13074 ++#else
13075 ++ pax_enter_kernel
13076 ++#endif
13077 + movq %rsp,%rdi /* pt_regs pointer */
13078 + movq ORIG_RAX(%rsp),%rsi /* get error code */
13079 + movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
13080 + call \do_sym
13081 + jmp error_exit /* %ebx: no swapgs flag */
13082 + CFI_ENDPROC
13083 +-END(\sym)
13084 ++ENDPROC(\sym)
13085 + .endm
13086 +
13087 + /* error code is on the stack already */
13088 +@@ -1089,13 +1432,23 @@ ENTRY(\sym)
13089 + call save_paranoid
13090 + DEFAULT_FRAME 0
13091 + TRACE_IRQS_OFF
13092 ++#ifdef CONFIG_PAX_MEMORY_UDEREF
13093 ++ testb $3, CS(%rsp)
13094 ++ jnz 1f
13095 ++ pax_enter_kernel
13096 ++ jmp 2f
13097 ++1: pax_enter_kernel_user
13098 ++2:
13099 ++#else
13100 ++ pax_enter_kernel
13101 ++#endif
13102 + movq %rsp,%rdi /* pt_regs pointer */
13103 + movq ORIG_RAX(%rsp),%rsi /* get error code */
13104 + movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
13105 + call \do_sym
13106 + jmp paranoid_exit /* %ebx: no swapgs flag */
13107 + CFI_ENDPROC
13108 +-END(\sym)
13109 ++ENDPROC(\sym)
13110 + .endm
13111 +
13112 + zeroentry divide_error do_divide_error
13113 +@@ -1125,9 +1478,10 @@ gs_change:
13114 + 2: mfence /* workaround */
13115 + SWAPGS
13116 + popfq_cfi
13117 ++ pax_force_retaddr
13118 + ret
13119 + CFI_ENDPROC
13120 +-END(native_load_gs_index)
13121 ++ENDPROC(native_load_gs_index)
13122 +
13123 + .section __ex_table,"a"
13124 + .align 8
13125 +@@ -1149,13 +1503,14 @@ ENTRY(kernel_thread_helper)
13126 + * Here we are in the child and the registers are set as they were
13127 + * at kernel_thread() invocation in the parent.
13128 + */
13129 ++ pax_force_fptr %rsi
13130 + call *%rsi
13131 + # exit
13132 + mov %eax, %edi
13133 + call do_exit
13134 + ud2 # padding for call trace
13135 + CFI_ENDPROC
13136 +-END(kernel_thread_helper)
13137 ++ENDPROC(kernel_thread_helper)
13138 +
13139 + /*
13140 + * execve(). This function needs to use IRET, not SYSRET, to set up all state properly.
13141 +@@ -1184,9 +1539,10 @@ ENTRY(kernel_execve)
13142 + je int_ret_from_sys_call
13143 + RESTORE_ARGS
13144 + UNFAKE_STACK_FRAME
13145 ++ pax_force_retaddr
13146 + ret
13147 + CFI_ENDPROC
13148 +-END(kernel_execve)
13149 ++ENDPROC(kernel_execve)
13150 +
13151 + /* Call softirq on interrupt stack. Interrupts are off. */
13152 + ENTRY(call_softirq)
13153 +@@ -1204,9 +1560,10 @@ ENTRY(call_softirq)
13154 + CFI_DEF_CFA_REGISTER rsp
13155 + CFI_ADJUST_CFA_OFFSET -8
13156 + decl PER_CPU_VAR(irq_count)
13157 ++ pax_force_retaddr
13158 + ret
13159 + CFI_ENDPROC
13160 +-END(call_softirq)
13161 ++ENDPROC(call_softirq)
13162 +
13163 + #ifdef CONFIG_XEN
13164 + zeroentry xen_hypervisor_callback xen_do_hypervisor_callback
13165 +@@ -1244,7 +1601,7 @@ ENTRY(xen_do_hypervisor_callback) # do
13166 + decl PER_CPU_VAR(irq_count)
13167 + jmp error_exit
13168 + CFI_ENDPROC
13169 +-END(xen_do_hypervisor_callback)
13170 ++ENDPROC(xen_do_hypervisor_callback)
13171 +
13172 + /*
13173 + * Hypervisor uses this for application faults while it executes.
13174 +@@ -1303,7 +1660,7 @@ ENTRY(xen_failsafe_callback)
13175 + SAVE_ALL
13176 + jmp error_exit
13177 + CFI_ENDPROC
13178 +-END(xen_failsafe_callback)
13179 ++ENDPROC(xen_failsafe_callback)
13180 +
13181 + apicinterrupt XEN_HVM_EVTCHN_CALLBACK \
13182 + xen_hvm_callback_vector xen_evtchn_do_upcall
13183 +@@ -1352,16 +1709,31 @@ ENTRY(paranoid_exit)
13184 + TRACE_IRQS_OFF
13185 + testl %ebx,%ebx /* swapgs needed? */
13186 + jnz paranoid_restore
13187 +- testl $3,CS(%rsp)
13188 ++ testb $3,CS(%rsp)
13189 + jnz paranoid_userspace
13190 ++#ifdef CONFIG_PAX_MEMORY_UDEREF
13191 ++ pax_exit_kernel
13192 ++ TRACE_IRQS_IRETQ 0
13193 ++ SWAPGS_UNSAFE_STACK
13194 ++ RESTORE_ALL 8
13195 ++ pax_force_retaddr
13196 ++ jmp irq_return
13197 ++#endif
13198 + paranoid_swapgs:
13199 ++#ifdef CONFIG_PAX_MEMORY_UDEREF
13200 ++ pax_exit_kernel_user
13201 ++#else
13202 ++ pax_exit_kernel
13203 ++#endif
13204 + TRACE_IRQS_IRETQ 0
13205 + SWAPGS_UNSAFE_STACK
13206 + RESTORE_ALL 8
13207 + jmp irq_return
13208 + paranoid_restore:
13209 ++ pax_exit_kernel
13210 + TRACE_IRQS_IRETQ 0
13211 + RESTORE_ALL 8
13212 ++ pax_force_retaddr
13213 + jmp irq_return
13214 + paranoid_userspace:
13215 + GET_THREAD_INFO(%rcx)
13216 +@@ -1390,7 +1762,7 @@ paranoid_schedule:
13217 + TRACE_IRQS_OFF
13218 + jmp paranoid_userspace
13219 + CFI_ENDPROC
13220 +-END(paranoid_exit)
13221 ++ENDPROC(paranoid_exit)
13222 +
13223 + /*
13224 + * Exception entry point. This expects an error code/orig_rax on the stack.
13225 +@@ -1417,12 +1789,13 @@ ENTRY(error_entry)
13226 + movq_cfi r14, R14+8
13227 + movq_cfi r15, R15+8
13228 + xorl %ebx,%ebx
13229 +- testl $3,CS+8(%rsp)
13230 ++ testb $3,CS+8(%rsp)
13231 + je error_kernelspace
13232 + error_swapgs:
13233 + SWAPGS
13234 + error_sti:
13235 + TRACE_IRQS_OFF
13236 ++ pax_force_retaddr
13237 + ret
13238 +
13239 + /*
13240 +@@ -1449,7 +1822,7 @@ bstep_iret:
13241 + movq %rcx,RIP+8(%rsp)
13242 + jmp error_swapgs
13243 + CFI_ENDPROC
13244 +-END(error_entry)
13245 ++ENDPROC(error_entry)
13246 +
13247 +
13248 + /* ebx: no swapgs flag (1: don't need swapgs, 0: need it) */
13249 +@@ -1469,7 +1842,7 @@ ENTRY(error_exit)
13250 + jnz retint_careful
13251 + jmp retint_swapgs
13252 + CFI_ENDPROC
13253 +-END(error_exit)
13254 ++ENDPROC(error_exit)
13255 +
13256 +
13257 + /* runs on exception stack */
13258 +@@ -1481,6 +1854,16 @@ ENTRY(nmi)
13259 + CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
13260 + call save_paranoid
13261 + DEFAULT_FRAME 0
13262 ++#ifdef CONFIG_PAX_MEMORY_UDEREF
13263 ++ testb $3, CS(%rsp)
13264 ++ jnz 1f
13265 ++ pax_enter_kernel
13266 ++ jmp 2f
13267 ++1: pax_enter_kernel_user
13268 ++2:
13269 ++#else
13270 ++ pax_enter_kernel
13271 ++#endif
13272 + /* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */
13273 + movq %rsp,%rdi
13274 + movq $-1,%rsi
13275 +@@ -1491,12 +1874,28 @@ ENTRY(nmi)
13276 + DISABLE_INTERRUPTS(CLBR_NONE)
13277 + testl %ebx,%ebx /* swapgs needed? */
13278 + jnz nmi_restore
13279 +- testl $3,CS(%rsp)
13280 ++ testb $3,CS(%rsp)
13281 + jnz nmi_userspace
13282 ++#ifdef CONFIG_PAX_MEMORY_UDEREF
13283 ++ pax_exit_kernel
13284 ++ SWAPGS_UNSAFE_STACK
13285 ++ RESTORE_ALL 8
13286 ++ pax_force_retaddr
13287 ++ jmp irq_return
13288 ++#endif
13289 + nmi_swapgs:
13290 ++#ifdef CONFIG_PAX_MEMORY_UDEREF
13291 ++ pax_exit_kernel_user
13292 ++#else
13293 ++ pax_exit_kernel
13294 ++#endif
13295 + SWAPGS_UNSAFE_STACK
13296 ++ RESTORE_ALL 8
13297 ++ jmp irq_return
13298 + nmi_restore:
13299 ++ pax_exit_kernel
13300 + RESTORE_ALL 8
13301 ++ pax_force_retaddr
13302 + jmp irq_return
13303 + nmi_userspace:
13304 + GET_THREAD_INFO(%rcx)
13305 +@@ -1525,14 +1924,14 @@ nmi_schedule:
13306 + jmp paranoid_exit
13307 + CFI_ENDPROC
13308 + #endif
13309 +-END(nmi)
13310 ++ENDPROC(nmi)
13311 +
13312 + ENTRY(ignore_sysret)
13313 + CFI_STARTPROC
13314 + mov $-ENOSYS,%eax
13315 + sysret
13316 + CFI_ENDPROC
13317 +-END(ignore_sysret)
13318 ++ENDPROC(ignore_sysret)
13319 +
13320 + /*
13321 + * End of kprobes section
13322 +diff -urNp linux-3.1.1/arch/x86/kernel/ftrace.c linux-3.1.1/arch/x86/kernel/ftrace.c
13323 +--- linux-3.1.1/arch/x86/kernel/ftrace.c 2011-11-11 15:19:27.000000000 -0500
13324 ++++ linux-3.1.1/arch/x86/kernel/ftrace.c 2011-11-16 18:39:07.000000000 -0500
13325 +@@ -126,7 +126,7 @@ static void *mod_code_ip; /* holds the
13326 + static const void *mod_code_newcode; /* holds the text to write to the IP */
13327 +
13328 + static unsigned nmi_wait_count;
13329 +-static atomic_t nmi_update_count = ATOMIC_INIT(0);
13330 ++static atomic_unchecked_t nmi_update_count = ATOMIC_INIT(0);
13331 +
13332 + int ftrace_arch_read_dyn_info(char *buf, int size)
13333 + {
13334 +@@ -134,7 +134,7 @@ int ftrace_arch_read_dyn_info(char *buf,
13335 +
13336 + r = snprintf(buf, size, "%u %u",
13337 + nmi_wait_count,
13338 +- atomic_read(&nmi_update_count));
13339 ++ atomic_read_unchecked(&nmi_update_count));
13340 + return r;
13341 + }
13342 +
13343 +@@ -177,8 +177,10 @@ void ftrace_nmi_enter(void)
13344 +
13345 + if (atomic_inc_return(&nmi_running) & MOD_CODE_WRITE_FLAG) {
13346 + smp_rmb();
13347 ++ pax_open_kernel();
13348 + ftrace_mod_code();
13349 +- atomic_inc(&nmi_update_count);
13350 ++ pax_close_kernel();
13351 ++ atomic_inc_unchecked(&nmi_update_count);
13352 + }
13353 + /* Must have previous changes seen before executions */
13354 + smp_mb();
13355 +@@ -271,6 +273,8 @@ ftrace_modify_code(unsigned long ip, uns
13356 + {
13357 + unsigned char replaced[MCOUNT_INSN_SIZE];
13358 +
13359 ++ ip = ktla_ktva(ip);
13360 ++
13361 + /*
13362 + * Note: Due to modules and __init, code can
13363 + * disappear and change, we need to protect against faulting
13364 +@@ -327,7 +331,7 @@ int ftrace_update_ftrace_func(ftrace_fun
13365 + unsigned char old[MCOUNT_INSN_SIZE], *new;
13366 + int ret;
13367 +
13368 +- memcpy(old, &ftrace_call, MCOUNT_INSN_SIZE);
13369 ++ memcpy(old, (void *)ktla_ktva((unsigned long)ftrace_call), MCOUNT_INSN_SIZE);
13370 + new = ftrace_call_replace(ip, (unsigned long)func);
13371 + ret = ftrace_modify_code(ip, old, new);
13372 +
13373 +@@ -353,6 +357,8 @@ static int ftrace_mod_jmp(unsigned long
13374 + {
13375 + unsigned char code[MCOUNT_INSN_SIZE];
13376 +
13377 ++ ip = ktla_ktva(ip);
13378 ++
13379 + if (probe_kernel_read(code, (void *)ip, MCOUNT_INSN_SIZE))
13380 + return -EFAULT;
13381 +
13382 +diff -urNp linux-3.1.1/arch/x86/kernel/head32.c linux-3.1.1/arch/x86/kernel/head32.c
13383 +--- linux-3.1.1/arch/x86/kernel/head32.c 2011-11-11 15:19:27.000000000 -0500
13384 ++++ linux-3.1.1/arch/x86/kernel/head32.c 2011-11-16 18:39:07.000000000 -0500
13385 +@@ -19,6 +19,7 @@
13386 + #include <asm/io_apic.h>
13387 + #include <asm/bios_ebda.h>
13388 + #include <asm/tlbflush.h>
13389 ++#include <asm/boot.h>
13390 +
13391 + static void __init i386_default_early_setup(void)
13392 + {
13393 +@@ -33,7 +34,7 @@ void __init i386_start_kernel(void)
13394 + {
13395 + memblock_init();
13396 +
13397 +- memblock_x86_reserve_range(__pa_symbol(&_text), __pa_symbol(&__bss_stop), "TEXT DATA BSS");
13398 ++ memblock_x86_reserve_range(LOAD_PHYSICAL_ADDR, __pa_symbol(&__bss_stop), "TEXT DATA BSS");
13399 +
13400 + #ifdef CONFIG_BLK_DEV_INITRD
13401 + /* Reserve INITRD */
13402 +diff -urNp linux-3.1.1/arch/x86/kernel/head_32.S linux-3.1.1/arch/x86/kernel/head_32.S
13403 +--- linux-3.1.1/arch/x86/kernel/head_32.S 2011-11-11 15:19:27.000000000 -0500
13404 ++++ linux-3.1.1/arch/x86/kernel/head_32.S 2011-11-16 18:39:07.000000000 -0500
13405 +@@ -25,6 +25,12 @@
13406 + /* Physical address */
13407 + #define pa(X) ((X) - __PAGE_OFFSET)
13408 +
13409 ++#ifdef CONFIG_PAX_KERNEXEC
13410 ++#define ta(X) (X)
13411 ++#else
13412 ++#define ta(X) ((X) - __PAGE_OFFSET)
13413 ++#endif
13414 ++
13415 + /*
13416 + * References to members of the new_cpu_data structure.
13417 + */
13418 +@@ -54,11 +60,7 @@
13419 + * and small than max_low_pfn, otherwise will waste some page table entries
13420 + */
13421 +
13422 +-#if PTRS_PER_PMD > 1
13423 +-#define PAGE_TABLE_SIZE(pages) (((pages) / PTRS_PER_PMD) + PTRS_PER_PGD)
13424 +-#else
13425 +-#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PGD)
13426 +-#endif
13427 ++#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PTE)
13428 +
13429 + /* Number of possible pages in the lowmem region */
13430 + LOWMEM_PAGES = (((1<<32) - __PAGE_OFFSET) >> PAGE_SHIFT)
13431 +@@ -77,6 +79,12 @@ INIT_MAP_SIZE = PAGE_TABLE_SIZE(KERNEL_P
13432 + RESERVE_BRK(pagetables, INIT_MAP_SIZE)
13433 +
13434 + /*
13435 ++ * Real beginning of normal "text" segment
13436 ++ */
13437 ++ENTRY(stext)
13438 ++ENTRY(_stext)
13439 ++
13440 ++/*
13441 + * 32-bit kernel entrypoint; only used by the boot CPU. On entry,
13442 + * %esi points to the real-mode code as a 32-bit pointer.
13443 + * CS and DS must be 4 GB flat segments, but we don't depend on
13444 +@@ -84,6 +92,13 @@ RESERVE_BRK(pagetables, INIT_MAP_SIZE)
13445 + * can.
13446 + */
13447 + __HEAD
13448 ++
13449 ++#ifdef CONFIG_PAX_KERNEXEC
13450 ++ jmp startup_32
13451 ++/* PaX: fill first page in .text with int3 to catch NULL derefs in kernel mode */
13452 ++.fill PAGE_SIZE-5,1,0xcc
13453 ++#endif
13454 ++
13455 + ENTRY(startup_32)
13456 + movl pa(stack_start),%ecx
13457 +
13458 +@@ -105,6 +120,57 @@ ENTRY(startup_32)
13459 + 2:
13460 + leal -__PAGE_OFFSET(%ecx),%esp
13461 +
13462 ++#ifdef CONFIG_SMP
13463 ++ movl $pa(cpu_gdt_table),%edi
13464 ++ movl $__per_cpu_load,%eax
13465 ++ movw %ax,__KERNEL_PERCPU + 2(%edi)
13466 ++ rorl $16,%eax
13467 ++ movb %al,__KERNEL_PERCPU + 4(%edi)
13468 ++ movb %ah,__KERNEL_PERCPU + 7(%edi)
13469 ++ movl $__per_cpu_end - 1,%eax
13470 ++ subl $__per_cpu_start,%eax
13471 ++ movw %ax,__KERNEL_PERCPU + 0(%edi)
13472 ++#endif
13473 ++
13474 ++#ifdef CONFIG_PAX_MEMORY_UDEREF
13475 ++ movl $NR_CPUS,%ecx
13476 ++ movl $pa(cpu_gdt_table),%edi
13477 ++1:
13478 ++ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c09700),GDT_ENTRY_KERNEL_DS * 8 + 4(%edi)
13479 ++ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0fb00),GDT_ENTRY_DEFAULT_USER_CS * 8 + 4(%edi)
13480 ++ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0f300),GDT_ENTRY_DEFAULT_USER_DS * 8 + 4(%edi)
13481 ++ addl $PAGE_SIZE_asm,%edi
13482 ++ loop 1b
13483 ++#endif
13484 ++
13485 ++#ifdef CONFIG_PAX_KERNEXEC
13486 ++ movl $pa(boot_gdt),%edi
13487 ++ movl $__LOAD_PHYSICAL_ADDR,%eax
13488 ++ movw %ax,__BOOT_CS + 2(%edi)
13489 ++ rorl $16,%eax
13490 ++ movb %al,__BOOT_CS + 4(%edi)
13491 ++ movb %ah,__BOOT_CS + 7(%edi)
13492 ++ rorl $16,%eax
13493 ++
13494 ++ ljmp $(__BOOT_CS),$1f
13495 ++1:
13496 ++
13497 ++ movl $NR_CPUS,%ecx
13498 ++ movl $pa(cpu_gdt_table),%edi
13499 ++ addl $__PAGE_OFFSET,%eax
13500 ++1:
13501 ++ movw %ax,__KERNEL_CS + 2(%edi)
13502 ++ movw %ax,__KERNEXEC_KERNEL_CS + 2(%edi)
13503 ++ rorl $16,%eax
13504 ++ movb %al,__KERNEL_CS + 4(%edi)
13505 ++ movb %al,__KERNEXEC_KERNEL_CS + 4(%edi)
13506 ++ movb %ah,__KERNEL_CS + 7(%edi)
13507 ++ movb %ah,__KERNEXEC_KERNEL_CS + 7(%edi)
13508 ++ rorl $16,%eax
13509 ++ addl $PAGE_SIZE_asm,%edi
13510 ++ loop 1b
13511 ++#endif
13512 ++
13513 + /*
13514 + * Clear BSS first so that there are no surprises...
13515 + */
13516 +@@ -195,8 +261,11 @@ ENTRY(startup_32)
13517 + movl %eax, pa(max_pfn_mapped)
13518 +
13519 + /* Do early initialization of the fixmap area */
13520 +- movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
13521 +- movl %eax,pa(initial_pg_pmd+0x1000*KPMDS-8)
13522 ++#ifdef CONFIG_COMPAT_VDSO
13523 ++ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_pg_pmd+0x1000*KPMDS-8)
13524 ++#else
13525 ++ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_pg_pmd+0x1000*KPMDS-8)
13526 ++#endif
13527 + #else /* Not PAE */
13528 +
13529 + page_pde_offset = (__PAGE_OFFSET >> 20);
13530 +@@ -226,8 +295,11 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
13531 + movl %eax, pa(max_pfn_mapped)
13532 +
13533 + /* Do early initialization of the fixmap area */
13534 +- movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
13535 +- movl %eax,pa(initial_page_table+0xffc)
13536 ++#ifdef CONFIG_COMPAT_VDSO
13537 ++ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_page_table+0xffc)
13538 ++#else
13539 ++ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_page_table+0xffc)
13540 ++#endif
13541 + #endif
13542 +
13543 + #ifdef CONFIG_PARAVIRT
13544 +@@ -241,9 +313,7 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
13545 + cmpl $num_subarch_entries, %eax
13546 + jae bad_subarch
13547 +
13548 +- movl pa(subarch_entries)(,%eax,4), %eax
13549 +- subl $__PAGE_OFFSET, %eax
13550 +- jmp *%eax
13551 ++ jmp *pa(subarch_entries)(,%eax,4)
13552 +
13553 + bad_subarch:
13554 + WEAK(lguest_entry)
13555 +@@ -255,10 +325,10 @@ WEAK(xen_entry)
13556 + __INITDATA
13557 +
13558 + subarch_entries:
13559 +- .long default_entry /* normal x86/PC */
13560 +- .long lguest_entry /* lguest hypervisor */
13561 +- .long xen_entry /* Xen hypervisor */
13562 +- .long default_entry /* Moorestown MID */
13563 ++ .long ta(default_entry) /* normal x86/PC */
13564 ++ .long ta(lguest_entry) /* lguest hypervisor */
13565 ++ .long ta(xen_entry) /* Xen hypervisor */
13566 ++ .long ta(default_entry) /* Moorestown MID */
13567 + num_subarch_entries = (. - subarch_entries) / 4
13568 + .previous
13569 + #else
13570 +@@ -312,6 +382,7 @@ default_entry:
13571 + orl %edx,%eax
13572 + movl %eax,%cr4
13573 +
13574 ++#ifdef CONFIG_X86_PAE
13575 + testb $X86_CR4_PAE, %al # check if PAE is enabled
13576 + jz 6f
13577 +
13578 +@@ -340,6 +411,9 @@ default_entry:
13579 + /* Make changes effective */
13580 + wrmsr
13581 +
13582 ++ btsl $_PAGE_BIT_NX-32,pa(__supported_pte_mask+4)
13583 ++#endif
13584 ++
13585 + 6:
13586 +
13587 + /*
13588 +@@ -443,7 +517,7 @@ is386: movl $2,%ecx # set MP
13589 + 1: movl $(__KERNEL_DS),%eax # reload all the segment registers
13590 + movl %eax,%ss # after changing gdt.
13591 +
13592 +- movl $(__USER_DS),%eax # DS/ES contains default USER segment
13593 ++# movl $(__KERNEL_DS),%eax # DS/ES contains default KERNEL segment
13594 + movl %eax,%ds
13595 + movl %eax,%es
13596 +
13597 +@@ -457,15 +531,22 @@ is386: movl $2,%ecx # set MP
13598 + */
13599 + cmpb $0,ready
13600 + jne 1f
13601 +- movl $gdt_page,%eax
13602 ++ movl $cpu_gdt_table,%eax
13603 + movl $stack_canary,%ecx
13604 ++#ifdef CONFIG_SMP
13605 ++ addl $__per_cpu_load,%ecx
13606 ++#endif
13607 + movw %cx, 8 * GDT_ENTRY_STACK_CANARY + 2(%eax)
13608 + shrl $16, %ecx
13609 + movb %cl, 8 * GDT_ENTRY_STACK_CANARY + 4(%eax)
13610 + movb %ch, 8 * GDT_ENTRY_STACK_CANARY + 7(%eax)
13611 + 1:
13612 +-#endif
13613 + movl $(__KERNEL_STACK_CANARY),%eax
13614 ++#elif defined(CONFIG_PAX_MEMORY_UDEREF)
13615 ++ movl $(__USER_DS),%eax
13616 ++#else
13617 ++ xorl %eax,%eax
13618 ++#endif
13619 + movl %eax,%gs
13620 +
13621 + xorl %eax,%eax # Clear LDT
13622 +@@ -558,22 +639,22 @@ early_page_fault:
13623 + jmp early_fault
13624 +
13625 + early_fault:
13626 +- cld
13627 + #ifdef CONFIG_PRINTK
13628 ++ cmpl $1,%ss:early_recursion_flag
13629 ++ je hlt_loop
13630 ++ incl %ss:early_recursion_flag
13631 ++ cld
13632 + pusha
13633 + movl $(__KERNEL_DS),%eax
13634 + movl %eax,%ds
13635 + movl %eax,%es
13636 +- cmpl $2,early_recursion_flag
13637 +- je hlt_loop
13638 +- incl early_recursion_flag
13639 + movl %cr2,%eax
13640 + pushl %eax
13641 + pushl %edx /* trapno */
13642 + pushl $fault_msg
13643 + call printk
13644 ++; call dump_stack
13645 + #endif
13646 +- call dump_stack
13647 + hlt_loop:
13648 + hlt
13649 + jmp hlt_loop
13650 +@@ -581,8 +662,11 @@ hlt_loop:
13651 + /* This is the default interrupt "handler" :-) */
13652 + ALIGN
13653 + ignore_int:
13654 +- cld
13655 + #ifdef CONFIG_PRINTK
13656 ++ cmpl $2,%ss:early_recursion_flag
13657 ++ je hlt_loop
13658 ++ incl %ss:early_recursion_flag
13659 ++ cld
13660 + pushl %eax
13661 + pushl %ecx
13662 + pushl %edx
13663 +@@ -591,9 +675,6 @@ ignore_int:
13664 + movl $(__KERNEL_DS),%eax
13665 + movl %eax,%ds
13666 + movl %eax,%es
13667 +- cmpl $2,early_recursion_flag
13668 +- je hlt_loop
13669 +- incl early_recursion_flag
13670 + pushl 16(%esp)
13671 + pushl 24(%esp)
13672 + pushl 32(%esp)
13673 +@@ -622,29 +703,43 @@ ENTRY(initial_code)
13674 + /*
13675 + * BSS section
13676 + */
13677 +-__PAGE_ALIGNED_BSS
13678 +- .align PAGE_SIZE
13679 + #ifdef CONFIG_X86_PAE
13680 ++.section .initial_pg_pmd,"a",@progbits
13681 + initial_pg_pmd:
13682 + .fill 1024*KPMDS,4,0
13683 + #else
13684 ++.section .initial_page_table,"a",@progbits
13685 + ENTRY(initial_page_table)
13686 + .fill 1024,4,0
13687 + #endif
13688 ++.section .initial_pg_fixmap,"a",@progbits
13689 + initial_pg_fixmap:
13690 + .fill 1024,4,0
13691 ++.section .empty_zero_page,"a",@progbits
13692 + ENTRY(empty_zero_page)
13693 + .fill 4096,1,0
13694 ++.section .swapper_pg_dir,"a",@progbits
13695 + ENTRY(swapper_pg_dir)
13696 ++#ifdef CONFIG_X86_PAE
13697 ++ .fill 4,8,0
13698 ++#else
13699 + .fill 1024,4,0
13700 ++#endif
13701 ++
13702 ++/*
13703 ++ * The IDT has to be page-aligned to simplify the Pentium
13704 ++ * F0 0F bug workaround.. We have a special link segment
13705 ++ * for this.
13706 ++ */
13707 ++.section .idt,"a",@progbits
13708 ++ENTRY(idt_table)
13709 ++ .fill 256,8,0
13710 +
13711 + /*
13712 + * This starts the data section.
13713 + */
13714 + #ifdef CONFIG_X86_PAE
13715 +-__PAGE_ALIGNED_DATA
13716 +- /* Page-aligned for the benefit of paravirt? */
13717 +- .align PAGE_SIZE
13718 ++.section .initial_page_table,"a",@progbits
13719 + ENTRY(initial_page_table)
13720 + .long pa(initial_pg_pmd+PGD_IDENT_ATTR),0 /* low identity map */
13721 + # if KPMDS == 3
13722 +@@ -663,18 +758,27 @@ ENTRY(initial_page_table)
13723 + # error "Kernel PMDs should be 1, 2 or 3"
13724 + # endif
13725 + .align PAGE_SIZE /* needs to be page-sized too */
13726 ++
13727 ++#ifdef CONFIG_PAX_PER_CPU_PGD
13728 ++ENTRY(cpu_pgd)
13729 ++ .rept NR_CPUS
13730 ++ .fill 4,8,0
13731 ++ .endr
13732 ++#endif
13733 ++
13734 + #endif
13735 +
13736 + .data
13737 + .balign 4
13738 + ENTRY(stack_start)
13739 +- .long init_thread_union+THREAD_SIZE
13740 ++ .long init_thread_union+THREAD_SIZE-8
13741 ++
13742 ++ready: .byte 0
13743 +
13744 ++.section .rodata,"a",@progbits
13745 + early_recursion_flag:
13746 + .long 0
13747 +
13748 +-ready: .byte 0
13749 +-
13750 + int_msg:
13751 + .asciz "Unknown interrupt or fault at: %p %p %p\n"
13752 +
13753 +@@ -707,7 +811,7 @@ fault_msg:
13754 + .word 0 # 32 bit align gdt_desc.address
13755 + boot_gdt_descr:
13756 + .word __BOOT_DS+7
13757 +- .long boot_gdt - __PAGE_OFFSET
13758 ++ .long pa(boot_gdt)
13759 +
13760 + .word 0 # 32-bit align idt_desc.address
13761 + idt_descr:
13762 +@@ -718,7 +822,7 @@ idt_descr:
13763 + .word 0 # 32 bit align gdt_desc.address
13764 + ENTRY(early_gdt_descr)
13765 + .word GDT_ENTRIES*8-1
13766 +- .long gdt_page /* Overwritten for secondary CPUs */
13767 ++ .long cpu_gdt_table /* Overwritten for secondary CPUs */
13768 +
13769 + /*
13770 + * The boot_gdt must mirror the equivalent in setup.S and is
13771 +@@ -727,5 +831,65 @@ ENTRY(early_gdt_descr)
13772 + .align L1_CACHE_BYTES
13773 + ENTRY(boot_gdt)
13774 + .fill GDT_ENTRY_BOOT_CS,8,0
13775 +- .quad 0x00cf9a000000ffff /* kernel 4GB code at 0x00000000 */
13776 +- .quad 0x00cf92000000ffff /* kernel 4GB data at 0x00000000 */
13777 ++ .quad 0x00cf9b000000ffff /* kernel 4GB code at 0x00000000 */
13778 ++ .quad 0x00cf93000000ffff /* kernel 4GB data at 0x00000000 */
13779 ++
13780 ++ .align PAGE_SIZE_asm
13781 ++ENTRY(cpu_gdt_table)
13782 ++ .rept NR_CPUS
13783 ++ .quad 0x0000000000000000 /* NULL descriptor */
13784 ++ .quad 0x0000000000000000 /* 0x0b reserved */
13785 ++ .quad 0x0000000000000000 /* 0x13 reserved */
13786 ++ .quad 0x0000000000000000 /* 0x1b reserved */
13787 ++
13788 ++#ifdef CONFIG_PAX_KERNEXEC
13789 ++ .quad 0x00cf9b000000ffff /* 0x20 alternate kernel 4GB code at 0x00000000 */
13790 ++#else
13791 ++ .quad 0x0000000000000000 /* 0x20 unused */
13792 ++#endif
13793 ++
13794 ++ .quad 0x0000000000000000 /* 0x28 unused */
13795 ++ .quad 0x0000000000000000 /* 0x33 TLS entry 1 */
13796 ++ .quad 0x0000000000000000 /* 0x3b TLS entry 2 */
13797 ++ .quad 0x0000000000000000 /* 0x43 TLS entry 3 */
13798 ++ .quad 0x0000000000000000 /* 0x4b reserved */
13799 ++ .quad 0x0000000000000000 /* 0x53 reserved */
13800 ++ .quad 0x0000000000000000 /* 0x5b reserved */
13801 ++
13802 ++ .quad 0x00cf9b000000ffff /* 0x60 kernel 4GB code at 0x00000000 */
13803 ++ .quad 0x00cf93000000ffff /* 0x68 kernel 4GB data at 0x00000000 */
13804 ++ .quad 0x00cffb000000ffff /* 0x73 user 4GB code at 0x00000000 */
13805 ++ .quad 0x00cff3000000ffff /* 0x7b user 4GB data at 0x00000000 */
13806 ++
13807 ++ .quad 0x0000000000000000 /* 0x80 TSS descriptor */
13808 ++ .quad 0x0000000000000000 /* 0x88 LDT descriptor */
13809 ++
13810 ++ /*
13811 ++ * Segments used for calling PnP BIOS have byte granularity.
13812 ++ * The code segments and data segments have fixed 64k limits,
13813 ++ * the transfer segment sizes are set at run time.
13814 ++ */
13815 ++ .quad 0x00409b000000ffff /* 0x90 32-bit code */
13816 ++ .quad 0x00009b000000ffff /* 0x98 16-bit code */
13817 ++ .quad 0x000093000000ffff /* 0xa0 16-bit data */
13818 ++ .quad 0x0000930000000000 /* 0xa8 16-bit data */
13819 ++ .quad 0x0000930000000000 /* 0xb0 16-bit data */
13820 ++
13821 ++ /*
13822 ++ * The APM segments have byte granularity and their bases
13823 ++ * are set at run time. All have 64k limits.
13824 ++ */
13825 ++ .quad 0x00409b000000ffff /* 0xb8 APM CS code */
13826 ++ .quad 0x00009b000000ffff /* 0xc0 APM CS 16 code (16 bit) */
13827 ++ .quad 0x004093000000ffff /* 0xc8 APM DS data */
13828 ++
13829 ++ .quad 0x00c0930000000000 /* 0xd0 - ESPFIX SS */
13830 ++ .quad 0x0040930000000000 /* 0xd8 - PERCPU */
13831 ++ .quad 0x0040910000000017 /* 0xe0 - STACK_CANARY */
13832 ++ .quad 0x0000000000000000 /* 0xe8 - PCIBIOS_CS */
13833 ++ .quad 0x0000000000000000 /* 0xf0 - PCIBIOS_DS */
13834 ++ .quad 0x0000000000000000 /* 0xf8 - GDT entry 31: double-fault TSS */
13835 ++
13836 ++ /* Be sure this is zeroed to avoid false validations in Xen */
13837 ++ .fill PAGE_SIZE_asm - GDT_SIZE,1,0
13838 ++ .endr
13839 +diff -urNp linux-3.1.1/arch/x86/kernel/head_64.S linux-3.1.1/arch/x86/kernel/head_64.S
13840 +--- linux-3.1.1/arch/x86/kernel/head_64.S 2011-11-11 15:19:27.000000000 -0500
13841 ++++ linux-3.1.1/arch/x86/kernel/head_64.S 2011-11-16 18:39:07.000000000 -0500
13842 +@@ -19,6 +19,7 @@
13843 + #include <asm/cache.h>
13844 + #include <asm/processor-flags.h>
13845 + #include <asm/percpu.h>
13846 ++#include <asm/cpufeature.h>
13847 +
13848 + #ifdef CONFIG_PARAVIRT
13849 + #include <asm/asm-offsets.h>
13850 +@@ -38,6 +39,10 @@ L4_PAGE_OFFSET = pgd_index(__PAGE_OFFSET
13851 + L3_PAGE_OFFSET = pud_index(__PAGE_OFFSET)
13852 + L4_START_KERNEL = pgd_index(__START_KERNEL_map)
13853 + L3_START_KERNEL = pud_index(__START_KERNEL_map)
13854 ++L4_VMALLOC_START = pgd_index(VMALLOC_START)
13855 ++L3_VMALLOC_START = pud_index(VMALLOC_START)
13856 ++L4_VMEMMAP_START = pgd_index(VMEMMAP_START)
13857 ++L3_VMEMMAP_START = pud_index(VMEMMAP_START)
13858 +
13859 + .text
13860 + __HEAD
13861 +@@ -85,35 +90,22 @@ startup_64:
13862 + */
13863 + addq %rbp, init_level4_pgt + 0(%rip)
13864 + addq %rbp, init_level4_pgt + (L4_PAGE_OFFSET*8)(%rip)
13865 ++ addq %rbp, init_level4_pgt + (L4_VMALLOC_START*8)(%rip)
13866 ++ addq %rbp, init_level4_pgt + (L4_VMEMMAP_START*8)(%rip)
13867 + addq %rbp, init_level4_pgt + (L4_START_KERNEL*8)(%rip)
13868 +
13869 + addq %rbp, level3_ident_pgt + 0(%rip)
13870 ++#ifndef CONFIG_XEN
13871 ++ addq %rbp, level3_ident_pgt + 8(%rip)
13872 ++#endif
13873 +
13874 +- addq %rbp, level3_kernel_pgt + (510*8)(%rip)
13875 +- addq %rbp, level3_kernel_pgt + (511*8)(%rip)
13876 ++ addq %rbp, level3_vmemmap_pgt + (L3_VMEMMAP_START*8)(%rip)
13877 +
13878 +- addq %rbp, level2_fixmap_pgt + (506*8)(%rip)
13879 ++ addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8)(%rip)
13880 ++ addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8+8)(%rip)
13881 +
13882 +- /* Add an Identity mapping if I am above 1G */
13883 +- leaq _text(%rip), %rdi
13884 +- andq $PMD_PAGE_MASK, %rdi
13885 +-
13886 +- movq %rdi, %rax
13887 +- shrq $PUD_SHIFT, %rax
13888 +- andq $(PTRS_PER_PUD - 1), %rax
13889 +- jz ident_complete
13890 +-
13891 +- leaq (level2_spare_pgt - __START_KERNEL_map + _KERNPG_TABLE)(%rbp), %rdx
13892 +- leaq level3_ident_pgt(%rip), %rbx
13893 +- movq %rdx, 0(%rbx, %rax, 8)
13894 +-
13895 +- movq %rdi, %rax
13896 +- shrq $PMD_SHIFT, %rax
13897 +- andq $(PTRS_PER_PMD - 1), %rax
13898 +- leaq __PAGE_KERNEL_IDENT_LARGE_EXEC(%rdi), %rdx
13899 +- leaq level2_spare_pgt(%rip), %rbx
13900 +- movq %rdx, 0(%rbx, %rax, 8)
13901 +-ident_complete:
13902 ++ addq %rbp, level2_fixmap_pgt + (506*8)(%rip)
13903 ++ addq %rbp, level2_fixmap_pgt + (507*8)(%rip)
13904 +
13905 + /*
13906 + * Fixup the kernel text+data virtual addresses. Note that
13907 +@@ -160,8 +152,8 @@ ENTRY(secondary_startup_64)
13908 + * after the boot processor executes this code.
13909 + */
13910 +
13911 +- /* Enable PAE mode and PGE */
13912 +- movl $(X86_CR4_PAE | X86_CR4_PGE), %eax
13913 ++ /* Enable PAE mode and PSE/PGE */
13914 ++ movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax
13915 + movq %rax, %cr4
13916 +
13917 + /* Setup early boot stage 4 level pagetables. */
13918 +@@ -183,9 +175,14 @@ ENTRY(secondary_startup_64)
13919 + movl $MSR_EFER, %ecx
13920 + rdmsr
13921 + btsl $_EFER_SCE, %eax /* Enable System Call */
13922 +- btl $20,%edi /* No Execute supported? */
13923 ++ btl $(X86_FEATURE_NX & 31),%edi /* No Execute supported? */
13924 + jnc 1f
13925 + btsl $_EFER_NX, %eax
13926 ++ leaq init_level4_pgt(%rip), %rdi
13927 ++ btsq $_PAGE_BIT_NX, 8*L4_PAGE_OFFSET(%rdi)
13928 ++ btsq $_PAGE_BIT_NX, 8*L4_VMALLOC_START(%rdi)
13929 ++ btsq $_PAGE_BIT_NX, 8*L4_VMEMMAP_START(%rdi)
13930 ++ btsq $_PAGE_BIT_NX, __supported_pte_mask(%rip)
13931 + 1: wrmsr /* Make changes effective */
13932 +
13933 + /* Setup cr0 */
13934 +@@ -269,7 +266,7 @@ ENTRY(secondary_startup_64)
13935 + bad_address:
13936 + jmp bad_address
13937 +
13938 +- .section ".init.text","ax"
13939 ++ __INIT
13940 + #ifdef CONFIG_EARLY_PRINTK
13941 + .globl early_idt_handlers
13942 + early_idt_handlers:
13943 +@@ -314,18 +311,23 @@ ENTRY(early_idt_handler)
13944 + #endif /* EARLY_PRINTK */
13945 + 1: hlt
13946 + jmp 1b
13947 ++ .previous
13948 +
13949 + #ifdef CONFIG_EARLY_PRINTK
13950 ++ __INITDATA
13951 + early_recursion_flag:
13952 + .long 0
13953 ++ .previous
13954 +
13955 ++ .section .rodata,"a",@progbits
13956 + early_idt_msg:
13957 + .asciz "PANIC: early exception %02lx rip %lx:%lx error %lx cr2 %lx\n"
13958 + early_idt_ripmsg:
13959 + .asciz "RIP %s\n"
13960 +-#endif /* CONFIG_EARLY_PRINTK */
13961 + .previous
13962 ++#endif /* CONFIG_EARLY_PRINTK */
13963 +
13964 ++ .section .rodata,"a",@progbits
13965 + #define NEXT_PAGE(name) \
13966 + .balign PAGE_SIZE; \
13967 + ENTRY(name)
13968 +@@ -338,7 +340,6 @@ ENTRY(name)
13969 + i = i + 1 ; \
13970 + .endr
13971 +
13972 +- .data
13973 + /*
13974 + * This default setting generates an ident mapping at address 0x100000
13975 + * and a mapping for the kernel that precisely maps virtual address
13976 +@@ -349,13 +350,36 @@ NEXT_PAGE(init_level4_pgt)
13977 + .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
13978 + .org init_level4_pgt + L4_PAGE_OFFSET*8, 0
13979 + .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
13980 ++ .org init_level4_pgt + L4_VMALLOC_START*8, 0
13981 ++ .quad level3_vmalloc_pgt - __START_KERNEL_map + _KERNPG_TABLE
13982 ++ .org init_level4_pgt + L4_VMEMMAP_START*8, 0
13983 ++ .quad level3_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
13984 + .org init_level4_pgt + L4_START_KERNEL*8, 0
13985 + /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
13986 + .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE
13987 +
13988 ++#ifdef CONFIG_PAX_PER_CPU_PGD
13989 ++NEXT_PAGE(cpu_pgd)
13990 ++ .rept NR_CPUS
13991 ++ .fill 512,8,0
13992 ++ .endr
13993 ++#endif
13994 ++
13995 + NEXT_PAGE(level3_ident_pgt)
13996 + .quad level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
13997 ++#ifdef CONFIG_XEN
13998 + .fill 511,8,0
13999 ++#else
14000 ++ .quad level2_ident_pgt + PAGE_SIZE - __START_KERNEL_map + _KERNPG_TABLE
14001 ++ .fill 510,8,0
14002 ++#endif
14003 ++
14004 ++NEXT_PAGE(level3_vmalloc_pgt)
14005 ++ .fill 512,8,0
14006 ++
14007 ++NEXT_PAGE(level3_vmemmap_pgt)
14008 ++ .fill L3_VMEMMAP_START,8,0
14009 ++ .quad level2_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
14010 +
14011 + NEXT_PAGE(level3_kernel_pgt)
14012 + .fill L3_START_KERNEL,8,0
14013 +@@ -363,20 +387,23 @@ NEXT_PAGE(level3_kernel_pgt)
14014 + .quad level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE
14015 + .quad level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
14016 +
14017 ++NEXT_PAGE(level2_vmemmap_pgt)
14018 ++ .fill 512,8,0
14019 ++
14020 + NEXT_PAGE(level2_fixmap_pgt)
14021 +- .fill 506,8,0
14022 +- .quad level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
14023 +- /* 8MB reserved for vsyscalls + a 2MB hole = 4 + 1 entries */
14024 +- .fill 5,8,0
14025 ++ .fill 507,8,0
14026 ++ .quad level1_vsyscall_pgt - __START_KERNEL_map + _PAGE_TABLE
14027 ++ /* 6MB reserved for vsyscalls + a 2MB hole = 3 + 1 entries */
14028 ++ .fill 4,8,0
14029 +
14030 +-NEXT_PAGE(level1_fixmap_pgt)
14031 ++NEXT_PAGE(level1_vsyscall_pgt)
14032 + .fill 512,8,0
14033 +
14034 +-NEXT_PAGE(level2_ident_pgt)
14035 +- /* Since I easily can, map the first 1G.
14036 ++ /* Since I easily can, map the first 2G.
14037 + * Don't set NX because code runs from these pages.
14038 + */
14039 +- PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD)
14040 ++NEXT_PAGE(level2_ident_pgt)
14041 ++ PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, 2*PTRS_PER_PMD)
14042 +
14043 + NEXT_PAGE(level2_kernel_pgt)
14044 + /*
14045 +@@ -389,33 +416,55 @@ NEXT_PAGE(level2_kernel_pgt)
14046 + * If you want to increase this then increase MODULES_VADDR
14047 + * too.)
14048 + */
14049 +- PMDS(0, __PAGE_KERNEL_LARGE_EXEC,
14050 +- KERNEL_IMAGE_SIZE/PMD_SIZE)
14051 +-
14052 +-NEXT_PAGE(level2_spare_pgt)
14053 +- .fill 512, 8, 0
14054 ++ PMDS(0, __PAGE_KERNEL_LARGE_EXEC, KERNEL_IMAGE_SIZE/PMD_SIZE)
14055 +
14056 + #undef PMDS
14057 + #undef NEXT_PAGE
14058 +
14059 +- .data
14060 ++ .align PAGE_SIZE
14061 ++ENTRY(cpu_gdt_table)
14062 ++ .rept NR_CPUS
14063 ++ .quad 0x0000000000000000 /* NULL descriptor */
14064 ++ .quad 0x00cf9b000000ffff /* __KERNEL32_CS */
14065 ++ .quad 0x00af9b000000ffff /* __KERNEL_CS */
14066 ++ .quad 0x00cf93000000ffff /* __KERNEL_DS */
14067 ++ .quad 0x00cffb000000ffff /* __USER32_CS */
14068 ++ .quad 0x00cff3000000ffff /* __USER_DS, __USER32_DS */
14069 ++ .quad 0x00affb000000ffff /* __USER_CS */
14070 ++
14071 ++#ifdef CONFIG_PAX_KERNEXEC
14072 ++ .quad 0x00af9b000000ffff /* __KERNEXEC_KERNEL_CS */
14073 ++#else
14074 ++ .quad 0x0 /* unused */
14075 ++#endif
14076 ++
14077 ++ .quad 0,0 /* TSS */
14078 ++ .quad 0,0 /* LDT */
14079 ++ .quad 0,0,0 /* three TLS descriptors */
14080 ++ .quad 0x0000f40000000000 /* node/CPU stored in limit */
14081 ++ /* asm/segment.h:GDT_ENTRIES must match this */
14082 ++
14083 ++ /* zero the remaining page */
14084 ++ .fill PAGE_SIZE / 8 - GDT_ENTRIES,8,0
14085 ++ .endr
14086 ++
14087 + .align 16
14088 + .globl early_gdt_descr
14089 + early_gdt_descr:
14090 + .word GDT_ENTRIES*8-1
14091 + early_gdt_descr_base:
14092 +- .quad INIT_PER_CPU_VAR(gdt_page)
14093 ++ .quad cpu_gdt_table
14094 +
14095 + ENTRY(phys_base)
14096 + /* This must match the first entry in level2_kernel_pgt */
14097 + .quad 0x0000000000000000
14098 +
14099 + #include "../../x86/xen/xen-head.S"
14100 +-
14101 +- .section .bss, "aw", @nobits
14102 ++
14103 ++ .section .rodata,"a",@progbits
14104 + .align L1_CACHE_BYTES
14105 + ENTRY(idt_table)
14106 +- .skip IDT_ENTRIES * 16
14107 ++ .fill 512,8,0
14108 +
14109 + __PAGE_ALIGNED_BSS
14110 + .align PAGE_SIZE
14111 +diff -urNp linux-3.1.1/arch/x86/kernel/i386_ksyms_32.c linux-3.1.1/arch/x86/kernel/i386_ksyms_32.c
14112 +--- linux-3.1.1/arch/x86/kernel/i386_ksyms_32.c 2011-11-11 15:19:27.000000000 -0500
14113 ++++ linux-3.1.1/arch/x86/kernel/i386_ksyms_32.c 2011-11-16 18:39:07.000000000 -0500
14114 +@@ -20,8 +20,12 @@ extern void cmpxchg8b_emu(void);
14115 + EXPORT_SYMBOL(cmpxchg8b_emu);
14116 + #endif
14117 +
14118 ++EXPORT_SYMBOL_GPL(cpu_gdt_table);
14119 ++
14120 + /* Networking helper routines. */
14121 + EXPORT_SYMBOL(csum_partial_copy_generic);
14122 ++EXPORT_SYMBOL(csum_partial_copy_generic_to_user);
14123 ++EXPORT_SYMBOL(csum_partial_copy_generic_from_user);
14124 +
14125 + EXPORT_SYMBOL(__get_user_1);
14126 + EXPORT_SYMBOL(__get_user_2);
14127 +@@ -36,3 +40,7 @@ EXPORT_SYMBOL(strstr);
14128 +
14129 + EXPORT_SYMBOL(csum_partial);
14130 + EXPORT_SYMBOL(empty_zero_page);
14131 ++
14132 ++#ifdef CONFIG_PAX_KERNEXEC
14133 ++EXPORT_SYMBOL(__LOAD_PHYSICAL_ADDR);
14134 ++#endif
14135 +diff -urNp linux-3.1.1/arch/x86/kernel/i8259.c linux-3.1.1/arch/x86/kernel/i8259.c
14136 +--- linux-3.1.1/arch/x86/kernel/i8259.c 2011-11-11 15:19:27.000000000 -0500
14137 ++++ linux-3.1.1/arch/x86/kernel/i8259.c 2011-11-16 18:39:07.000000000 -0500
14138 +@@ -210,7 +210,7 @@ spurious_8259A_irq:
14139 + "spurious 8259A interrupt: IRQ%d.\n", irq);
14140 + spurious_irq_mask |= irqmask;
14141 + }
14142 +- atomic_inc(&irq_err_count);
14143 ++ atomic_inc_unchecked(&irq_err_count);
14144 + /*
14145 + * Theoretically we do not have to handle this IRQ,
14146 + * but in Linux this does not cause problems and is
14147 +diff -urNp linux-3.1.1/arch/x86/kernel/init_task.c linux-3.1.1/arch/x86/kernel/init_task.c
14148 +--- linux-3.1.1/arch/x86/kernel/init_task.c 2011-11-11 15:19:27.000000000 -0500
14149 ++++ linux-3.1.1/arch/x86/kernel/init_task.c 2011-11-16 18:39:07.000000000 -0500
14150 +@@ -20,8 +20,7 @@ static struct sighand_struct init_sighan
14151 + * way process stacks are handled. This is done by having a special
14152 + * "init_task" linker map entry..
14153 + */
14154 +-union thread_union init_thread_union __init_task_data =
14155 +- { INIT_THREAD_INFO(init_task) };
14156 ++union thread_union init_thread_union __init_task_data;
14157 +
14158 + /*
14159 + * Initial task structure.
14160 +@@ -38,5 +37,5 @@ EXPORT_SYMBOL(init_task);
14161 + * section. Since TSS's are completely CPU-local, we want them
14162 + * on exact cacheline boundaries, to eliminate cacheline ping-pong.
14163 + */
14164 +-DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS;
14165 +-
14166 ++struct tss_struct init_tss[NR_CPUS] ____cacheline_internodealigned_in_smp = { [0 ... NR_CPUS-1] = INIT_TSS };
14167 ++EXPORT_SYMBOL(init_tss);
14168 +diff -urNp linux-3.1.1/arch/x86/kernel/ioport.c linux-3.1.1/arch/x86/kernel/ioport.c
14169 +--- linux-3.1.1/arch/x86/kernel/ioport.c 2011-11-11 15:19:27.000000000 -0500
14170 ++++ linux-3.1.1/arch/x86/kernel/ioport.c 2011-11-16 18:40:08.000000000 -0500
14171 +@@ -6,6 +6,7 @@
14172 + #include <linux/sched.h>
14173 + #include <linux/kernel.h>
14174 + #include <linux/capability.h>
14175 ++#include <linux/security.h>
14176 + #include <linux/errno.h>
14177 + #include <linux/types.h>
14178 + #include <linux/ioport.h>
14179 +@@ -28,6 +29,12 @@ asmlinkage long sys_ioperm(unsigned long
14180 +
14181 + if ((from + num <= from) || (from + num > IO_BITMAP_BITS))
14182 + return -EINVAL;
14183 ++#ifdef CONFIG_GRKERNSEC_IO
14184 ++ if (turn_on && grsec_disable_privio) {
14185 ++ gr_handle_ioperm();
14186 ++ return -EPERM;
14187 ++ }
14188 ++#endif
14189 + if (turn_on && !capable(CAP_SYS_RAWIO))
14190 + return -EPERM;
14191 +
14192 +@@ -54,7 +61,7 @@ asmlinkage long sys_ioperm(unsigned long
14193 + * because the ->io_bitmap_max value must match the bitmap
14194 + * contents:
14195 + */
14196 +- tss = &per_cpu(init_tss, get_cpu());
14197 ++ tss = init_tss + get_cpu();
14198 +
14199 + if (turn_on)
14200 + bitmap_clear(t->io_bitmap_ptr, from, num);
14201 +@@ -102,6 +109,12 @@ long sys_iopl(unsigned int level, struct
14202 + return -EINVAL;
14203 + /* Trying to gain more privileges? */
14204 + if (level > old) {
14205 ++#ifdef CONFIG_GRKERNSEC_IO
14206 ++ if (grsec_disable_privio) {
14207 ++ gr_handle_iopl();
14208 ++ return -EPERM;
14209 ++ }
14210 ++#endif
14211 + if (!capable(CAP_SYS_RAWIO))
14212 + return -EPERM;
14213 + }
14214 +diff -urNp linux-3.1.1/arch/x86/kernel/irq_32.c linux-3.1.1/arch/x86/kernel/irq_32.c
14215 +--- linux-3.1.1/arch/x86/kernel/irq_32.c 2011-11-11 15:19:27.000000000 -0500
14216 ++++ linux-3.1.1/arch/x86/kernel/irq_32.c 2011-11-16 18:39:07.000000000 -0500
14217 +@@ -36,7 +36,7 @@ static int check_stack_overflow(void)
14218 + __asm__ __volatile__("andl %%esp,%0" :
14219 + "=r" (sp) : "0" (THREAD_SIZE - 1));
14220 +
14221 +- return sp < (sizeof(struct thread_info) + STACK_WARN);
14222 ++ return sp < STACK_WARN;
14223 + }
14224 +
14225 + static void print_stack_overflow(void)
14226 +@@ -54,8 +54,8 @@ static inline void print_stack_overflow(
14227 + * per-CPU IRQ handling contexts (thread information and stack)
14228 + */
14229 + union irq_ctx {
14230 +- struct thread_info tinfo;
14231 +- u32 stack[THREAD_SIZE/sizeof(u32)];
14232 ++ unsigned long previous_esp;
14233 ++ u32 stack[THREAD_SIZE/sizeof(u32)];
14234 + } __attribute__((aligned(THREAD_SIZE)));
14235 +
14236 + static DEFINE_PER_CPU(union irq_ctx *, hardirq_ctx);
14237 +@@ -75,10 +75,9 @@ static void call_on_stack(void *func, vo
14238 + static inline int
14239 + execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
14240 + {
14241 +- union irq_ctx *curctx, *irqctx;
14242 ++ union irq_ctx *irqctx;
14243 + u32 *isp, arg1, arg2;
14244 +
14245 +- curctx = (union irq_ctx *) current_thread_info();
14246 + irqctx = __this_cpu_read(hardirq_ctx);
14247 +
14248 + /*
14249 +@@ -87,21 +86,16 @@ execute_on_irq_stack(int overflow, struc
14250 + * handler) we can't do that and just have to keep using the
14251 + * current stack (which is the irq stack already after all)
14252 + */
14253 +- if (unlikely(curctx == irqctx))
14254 ++ if (unlikely((void *)current_stack_pointer - (void *)irqctx < THREAD_SIZE))
14255 + return 0;
14256 +
14257 + /* build the stack frame on the IRQ stack */
14258 +- isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
14259 +- irqctx->tinfo.task = curctx->tinfo.task;
14260 +- irqctx->tinfo.previous_esp = current_stack_pointer;
14261 ++ isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
14262 ++ irqctx->previous_esp = current_stack_pointer;
14263 +
14264 +- /*
14265 +- * Copy the softirq bits in preempt_count so that the
14266 +- * softirq checks work in the hardirq context.
14267 +- */
14268 +- irqctx->tinfo.preempt_count =
14269 +- (irqctx->tinfo.preempt_count & ~SOFTIRQ_MASK) |
14270 +- (curctx->tinfo.preempt_count & SOFTIRQ_MASK);
14271 ++#ifdef CONFIG_PAX_MEMORY_UDEREF
14272 ++ __set_fs(MAKE_MM_SEG(0));
14273 ++#endif
14274 +
14275 + if (unlikely(overflow))
14276 + call_on_stack(print_stack_overflow, isp);
14277 +@@ -113,6 +107,11 @@ execute_on_irq_stack(int overflow, struc
14278 + : "0" (irq), "1" (desc), "2" (isp),
14279 + "D" (desc->handle_irq)
14280 + : "memory", "cc", "ecx");
14281 ++
14282 ++#ifdef CONFIG_PAX_MEMORY_UDEREF
14283 ++ __set_fs(current_thread_info()->addr_limit);
14284 ++#endif
14285 ++
14286 + return 1;
14287 + }
14288 +
14289 +@@ -121,29 +120,11 @@ execute_on_irq_stack(int overflow, struc
14290 + */
14291 + void __cpuinit irq_ctx_init(int cpu)
14292 + {
14293 +- union irq_ctx *irqctx;
14294 +-
14295 + if (per_cpu(hardirq_ctx, cpu))
14296 + return;
14297 +
14298 +- irqctx = page_address(alloc_pages_node(cpu_to_node(cpu),
14299 +- THREAD_FLAGS,
14300 +- THREAD_ORDER));
14301 +- memset(&irqctx->tinfo, 0, sizeof(struct thread_info));
14302 +- irqctx->tinfo.cpu = cpu;
14303 +- irqctx->tinfo.preempt_count = HARDIRQ_OFFSET;
14304 +- irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
14305 +-
14306 +- per_cpu(hardirq_ctx, cpu) = irqctx;
14307 +-
14308 +- irqctx = page_address(alloc_pages_node(cpu_to_node(cpu),
14309 +- THREAD_FLAGS,
14310 +- THREAD_ORDER));
14311 +- memset(&irqctx->tinfo, 0, sizeof(struct thread_info));
14312 +- irqctx->tinfo.cpu = cpu;
14313 +- irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
14314 +-
14315 +- per_cpu(softirq_ctx, cpu) = irqctx;
14316 ++ per_cpu(hardirq_ctx, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREAD_FLAGS, THREAD_ORDER));
14317 ++ per_cpu(softirq_ctx, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREAD_FLAGS, THREAD_ORDER));
14318 +
14319 + printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n",
14320 + cpu, per_cpu(hardirq_ctx, cpu), per_cpu(softirq_ctx, cpu));
14321 +@@ -152,7 +133,6 @@ void __cpuinit irq_ctx_init(int cpu)
14322 + asmlinkage void do_softirq(void)
14323 + {
14324 + unsigned long flags;
14325 +- struct thread_info *curctx;
14326 + union irq_ctx *irqctx;
14327 + u32 *isp;
14328 +
14329 +@@ -162,15 +142,22 @@ asmlinkage void do_softirq(void)
14330 + local_irq_save(flags);
14331 +
14332 + if (local_softirq_pending()) {
14333 +- curctx = current_thread_info();
14334 + irqctx = __this_cpu_read(softirq_ctx);
14335 +- irqctx->tinfo.task = curctx->task;
14336 +- irqctx->tinfo.previous_esp = current_stack_pointer;
14337 ++ irqctx->previous_esp = current_stack_pointer;
14338 +
14339 + /* build the stack frame on the softirq stack */
14340 +- isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
14341 ++ isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
14342 ++
14343 ++#ifdef CONFIG_PAX_MEMORY_UDEREF
14344 ++ __set_fs(MAKE_MM_SEG(0));
14345 ++#endif
14346 +
14347 + call_on_stack(__do_softirq, isp);
14348 ++
14349 ++#ifdef CONFIG_PAX_MEMORY_UDEREF
14350 ++ __set_fs(current_thread_info()->addr_limit);
14351 ++#endif
14352 ++
14353 + /*
14354 + * Shouldn't happen, we returned above if in_interrupt():
14355 + */
14356 +diff -urNp linux-3.1.1/arch/x86/kernel/irq.c linux-3.1.1/arch/x86/kernel/irq.c
14357 +--- linux-3.1.1/arch/x86/kernel/irq.c 2011-11-11 15:19:27.000000000 -0500
14358 ++++ linux-3.1.1/arch/x86/kernel/irq.c 2011-11-16 18:39:07.000000000 -0500
14359 +@@ -17,7 +17,7 @@
14360 + #include <asm/mce.h>
14361 + #include <asm/hw_irq.h>
14362 +
14363 +-atomic_t irq_err_count;
14364 ++atomic_unchecked_t irq_err_count;
14365 +
14366 + /* Function pointer for generic interrupt vector handling */
14367 + void (*x86_platform_ipi_callback)(void) = NULL;
14368 +@@ -116,9 +116,9 @@ int arch_show_interrupts(struct seq_file
14369 + seq_printf(p, "%10u ", per_cpu(mce_poll_count, j));
14370 + seq_printf(p, " Machine check polls\n");
14371 + #endif
14372 +- seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
14373 ++ seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read_unchecked(&irq_err_count));
14374 + #if defined(CONFIG_X86_IO_APIC)
14375 +- seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read(&irq_mis_count));
14376 ++ seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read_unchecked(&irq_mis_count));
14377 + #endif
14378 + return 0;
14379 + }
14380 +@@ -158,10 +158,10 @@ u64 arch_irq_stat_cpu(unsigned int cpu)
14381 +
14382 + u64 arch_irq_stat(void)
14383 + {
14384 +- u64 sum = atomic_read(&irq_err_count);
14385 ++ u64 sum = atomic_read_unchecked(&irq_err_count);
14386 +
14387 + #ifdef CONFIG_X86_IO_APIC
14388 +- sum += atomic_read(&irq_mis_count);
14389 ++ sum += atomic_read_unchecked(&irq_mis_count);
14390 + #endif
14391 + return sum;
14392 + }
14393 +diff -urNp linux-3.1.1/arch/x86/kernel/kgdb.c linux-3.1.1/arch/x86/kernel/kgdb.c
14394 +--- linux-3.1.1/arch/x86/kernel/kgdb.c 2011-11-11 15:19:27.000000000 -0500
14395 ++++ linux-3.1.1/arch/x86/kernel/kgdb.c 2011-11-16 18:39:07.000000000 -0500
14396 +@@ -124,11 +124,11 @@ char *dbg_get_reg(int regno, void *mem,
14397 + #ifdef CONFIG_X86_32
14398 + switch (regno) {
14399 + case GDB_SS:
14400 +- if (!user_mode_vm(regs))
14401 ++ if (!user_mode(regs))
14402 + *(unsigned long *)mem = __KERNEL_DS;
14403 + break;
14404 + case GDB_SP:
14405 +- if (!user_mode_vm(regs))
14406 ++ if (!user_mode(regs))
14407 + *(unsigned long *)mem = kernel_stack_pointer(regs);
14408 + break;
14409 + case GDB_GS:
14410 +@@ -473,12 +473,12 @@ int kgdb_arch_handle_exception(int e_vec
14411 + case 'k':
14412 + /* clear the trace bit */
14413 + linux_regs->flags &= ~X86_EFLAGS_TF;
14414 +- atomic_set(&kgdb_cpu_doing_single_step, -1);
14415 ++ atomic_set_unchecked(&kgdb_cpu_doing_single_step, -1);
14416 +
14417 + /* set the trace bit if we're stepping */
14418 + if (remcomInBuffer[0] == 's') {
14419 + linux_regs->flags |= X86_EFLAGS_TF;
14420 +- atomic_set(&kgdb_cpu_doing_single_step,
14421 ++ atomic_set_unchecked(&kgdb_cpu_doing_single_step,
14422 + raw_smp_processor_id());
14423 + }
14424 +
14425 +@@ -534,7 +534,7 @@ static int __kgdb_notify(struct die_args
14426 + return NOTIFY_DONE;
14427 +
14428 + case DIE_DEBUG:
14429 +- if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
14430 ++ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
14431 + if (user_mode(regs))
14432 + return single_step_cont(regs, args);
14433 + break;
14434 +diff -urNp linux-3.1.1/arch/x86/kernel/kprobes.c linux-3.1.1/arch/x86/kernel/kprobes.c
14435 +--- linux-3.1.1/arch/x86/kernel/kprobes.c 2011-11-11 15:19:27.000000000 -0500
14436 ++++ linux-3.1.1/arch/x86/kernel/kprobes.c 2011-11-16 18:39:07.000000000 -0500
14437 +@@ -117,8 +117,11 @@ static void __kprobes __synthesize_relat
14438 + } __attribute__((packed)) *insn;
14439 +
14440 + insn = (struct __arch_relative_insn *)from;
14441 ++
14442 ++ pax_open_kernel();
14443 + insn->raddr = (s32)((long)(to) - ((long)(from) + 5));
14444 + insn->op = op;
14445 ++ pax_close_kernel();
14446 + }
14447 +
14448 + /* Insert a jump instruction at address 'from', which jumps to address 'to'.*/
14449 +@@ -155,7 +158,7 @@ static int __kprobes can_boost(kprobe_op
14450 + kprobe_opcode_t opcode;
14451 + kprobe_opcode_t *orig_opcodes = opcodes;
14452 +
14453 +- if (search_exception_tables((unsigned long)opcodes))
14454 ++ if (search_exception_tables(ktva_ktla((unsigned long)opcodes)))
14455 + return 0; /* Page fault may occur on this address. */
14456 +
14457 + retry:
14458 +@@ -316,7 +319,9 @@ static int __kprobes __copy_instruction(
14459 + }
14460 + }
14461 + insn_get_length(&insn);
14462 ++ pax_open_kernel();
14463 + memcpy(dest, insn.kaddr, insn.length);
14464 ++ pax_close_kernel();
14465 +
14466 + #ifdef CONFIG_X86_64
14467 + if (insn_rip_relative(&insn)) {
14468 +@@ -340,7 +345,9 @@ static int __kprobes __copy_instruction(
14469 + (u8 *) dest;
14470 + BUG_ON((s64) (s32) newdisp != newdisp); /* Sanity check. */
14471 + disp = (u8 *) dest + insn_offset_displacement(&insn);
14472 ++ pax_open_kernel();
14473 + *(s32 *) disp = (s32) newdisp;
14474 ++ pax_close_kernel();
14475 + }
14476 + #endif
14477 + return insn.length;
14478 +@@ -354,12 +361,12 @@ static void __kprobes arch_copy_kprobe(s
14479 + */
14480 + __copy_instruction(p->ainsn.insn, p->addr, 0);
14481 +
14482 +- if (can_boost(p->addr))
14483 ++ if (can_boost(ktla_ktva(p->addr)))
14484 + p->ainsn.boostable = 0;
14485 + else
14486 + p->ainsn.boostable = -1;
14487 +
14488 +- p->opcode = *p->addr;
14489 ++ p->opcode = *(ktla_ktva(p->addr));
14490 + }
14491 +
14492 + int __kprobes arch_prepare_kprobe(struct kprobe *p)
14493 +@@ -476,7 +483,7 @@ static void __kprobes setup_singlestep(s
14494 + * nor set current_kprobe, because it doesn't use single
14495 + * stepping.
14496 + */
14497 +- regs->ip = (unsigned long)p->ainsn.insn;
14498 ++ regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
14499 + preempt_enable_no_resched();
14500 + return;
14501 + }
14502 +@@ -495,7 +502,7 @@ static void __kprobes setup_singlestep(s
14503 + if (p->opcode == BREAKPOINT_INSTRUCTION)
14504 + regs->ip = (unsigned long)p->addr;
14505 + else
14506 +- regs->ip = (unsigned long)p->ainsn.insn;
14507 ++ regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
14508 + }
14509 +
14510 + /*
14511 +@@ -574,7 +581,7 @@ static int __kprobes kprobe_handler(stru
14512 + setup_singlestep(p, regs, kcb, 0);
14513 + return 1;
14514 + }
14515 +- } else if (*addr != BREAKPOINT_INSTRUCTION) {
14516 ++ } else if (*(kprobe_opcode_t *)ktla_ktva((unsigned long)addr) != BREAKPOINT_INSTRUCTION) {
14517 + /*
14518 + * The breakpoint instruction was removed right
14519 + * after we hit it. Another cpu has removed
14520 +@@ -682,6 +689,9 @@ static void __used __kprobes kretprobe_t
14521 + " movq %rax, 152(%rsp)\n"
14522 + RESTORE_REGS_STRING
14523 + " popfq\n"
14524 ++#ifdef CONFIG_PAX_KERNEXEC_PLUGIN
14525 ++ " btsq $63,(%rsp)\n"
14526 ++#endif
14527 + #else
14528 + " pushf\n"
14529 + SAVE_REGS_STRING
14530 +@@ -819,7 +829,7 @@ static void __kprobes resume_execution(s
14531 + struct pt_regs *regs, struct kprobe_ctlblk *kcb)
14532 + {
14533 + unsigned long *tos = stack_addr(regs);
14534 +- unsigned long copy_ip = (unsigned long)p->ainsn.insn;
14535 ++ unsigned long copy_ip = ktva_ktla((unsigned long)p->ainsn.insn);
14536 + unsigned long orig_ip = (unsigned long)p->addr;
14537 + kprobe_opcode_t *insn = p->ainsn.insn;
14538 +
14539 +@@ -1001,7 +1011,7 @@ int __kprobes kprobe_exceptions_notify(s
14540 + struct die_args *args = data;
14541 + int ret = NOTIFY_DONE;
14542 +
14543 +- if (args->regs && user_mode_vm(args->regs))
14544 ++ if (args->regs && user_mode(args->regs))
14545 + return ret;
14546 +
14547 + switch (val) {
14548 +@@ -1383,7 +1393,7 @@ int __kprobes arch_prepare_optimized_kpr
14549 + * Verify if the address gap is in 2GB range, because this uses
14550 + * a relative jump.
14551 + */
14552 +- rel = (long)op->optinsn.insn - (long)op->kp.addr + RELATIVEJUMP_SIZE;
14553 ++ rel = (long)op->optinsn.insn - ktla_ktva((long)op->kp.addr) + RELATIVEJUMP_SIZE;
14554 + if (abs(rel) > 0x7fffffff)
14555 + return -ERANGE;
14556 +
14557 +@@ -1404,11 +1414,11 @@ int __kprobes arch_prepare_optimized_kpr
14558 + synthesize_set_arg1(buf + TMPL_MOVE_IDX, (unsigned long)op);
14559 +
14560 + /* Set probe function call */
14561 +- synthesize_relcall(buf + TMPL_CALL_IDX, optimized_callback);
14562 ++ synthesize_relcall(buf + TMPL_CALL_IDX, ktla_ktva(optimized_callback));
14563 +
14564 + /* Set returning jmp instruction at the tail of out-of-line buffer */
14565 + synthesize_reljump(buf + TMPL_END_IDX + op->optinsn.size,
14566 +- (u8 *)op->kp.addr + op->optinsn.size);
14567 ++ (u8 *)ktla_ktva(op->kp.addr) + op->optinsn.size);
14568 +
14569 + flush_icache_range((unsigned long) buf,
14570 + (unsigned long) buf + TMPL_END_IDX +
14571 +@@ -1430,7 +1440,7 @@ static void __kprobes setup_optimize_kpr
14572 + ((long)op->kp.addr + RELATIVEJUMP_SIZE));
14573 +
14574 + /* Backup instructions which will be replaced by jump address */
14575 +- memcpy(op->optinsn.copied_insn, op->kp.addr + INT3_SIZE,
14576 ++ memcpy(op->optinsn.copied_insn, ktla_ktva(op->kp.addr) + INT3_SIZE,
14577 + RELATIVE_ADDR_SIZE);
14578 +
14579 + insn_buf[0] = RELATIVEJUMP_OPCODE;
14580 +diff -urNp linux-3.1.1/arch/x86/kernel/kvm.c linux-3.1.1/arch/x86/kernel/kvm.c
14581 +--- linux-3.1.1/arch/x86/kernel/kvm.c 2011-11-11 15:19:27.000000000 -0500
14582 ++++ linux-3.1.1/arch/x86/kernel/kvm.c 2011-11-16 18:39:07.000000000 -0500
14583 +@@ -437,6 +437,7 @@ static void __init paravirt_ops_setup(vo
14584 + pv_mmu_ops.set_pud = kvm_set_pud;
14585 + #if PAGETABLE_LEVELS == 4
14586 + pv_mmu_ops.set_pgd = kvm_set_pgd;
14587 ++ pv_mmu_ops.set_pgd_batched = kvm_set_pgd;
14588 + #endif
14589 + #endif
14590 + pv_mmu_ops.flush_tlb_user = kvm_flush_tlb;
14591 +diff -urNp linux-3.1.1/arch/x86/kernel/ldt.c linux-3.1.1/arch/x86/kernel/ldt.c
14592 +--- linux-3.1.1/arch/x86/kernel/ldt.c 2011-11-11 15:19:27.000000000 -0500
14593 ++++ linux-3.1.1/arch/x86/kernel/ldt.c 2011-11-16 18:39:07.000000000 -0500
14594 +@@ -67,13 +67,13 @@ static int alloc_ldt(mm_context_t *pc, i
14595 + if (reload) {
14596 + #ifdef CONFIG_SMP
14597 + preempt_disable();
14598 +- load_LDT(pc);
14599 ++ load_LDT_nolock(pc);
14600 + if (!cpumask_equal(mm_cpumask(current->mm),
14601 + cpumask_of(smp_processor_id())))
14602 + smp_call_function(flush_ldt, current->mm, 1);
14603 + preempt_enable();
14604 + #else
14605 +- load_LDT(pc);
14606 ++ load_LDT_nolock(pc);
14607 + #endif
14608 + }
14609 + if (oldsize) {
14610 +@@ -95,7 +95,7 @@ static inline int copy_ldt(mm_context_t
14611 + return err;
14612 +
14613 + for (i = 0; i < old->size; i++)
14614 +- write_ldt_entry(new->ldt, i, old->ldt + i * LDT_ENTRY_SIZE);
14615 ++ write_ldt_entry(new->ldt, i, old->ldt + i);
14616 + return 0;
14617 + }
14618 +
14619 +@@ -116,6 +116,24 @@ int init_new_context(struct task_struct
14620 + retval = copy_ldt(&mm->context, &old_mm->context);
14621 + mutex_unlock(&old_mm->context.lock);
14622 + }
14623 ++
14624 ++ if (tsk == current) {
14625 ++ mm->context.vdso = 0;
14626 ++
14627 ++#ifdef CONFIG_X86_32
14628 ++#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
14629 ++ mm->context.user_cs_base = 0UL;
14630 ++ mm->context.user_cs_limit = ~0UL;
14631 ++
14632 ++#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
14633 ++ cpus_clear(mm->context.cpu_user_cs_mask);
14634 ++#endif
14635 ++
14636 ++#endif
14637 ++#endif
14638 ++
14639 ++ }
14640 ++
14641 + return retval;
14642 + }
14643 +
14644 +@@ -230,6 +248,13 @@ static int write_ldt(void __user *ptr, u
14645 + }
14646 + }
14647 +
14648 ++#ifdef CONFIG_PAX_SEGMEXEC
14649 ++ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (ldt_info.contents & MODIFY_LDT_CONTENTS_CODE)) {
14650 ++ error = -EINVAL;
14651 ++ goto out_unlock;
14652 ++ }
14653 ++#endif
14654 ++
14655 + fill_ldt(&ldt, &ldt_info);
14656 + if (oldmode)
14657 + ldt.avl = 0;
14658 +diff -urNp linux-3.1.1/arch/x86/kernel/machine_kexec_32.c linux-3.1.1/arch/x86/kernel/machine_kexec_32.c
14659 +--- linux-3.1.1/arch/x86/kernel/machine_kexec_32.c 2011-11-11 15:19:27.000000000 -0500
14660 ++++ linux-3.1.1/arch/x86/kernel/machine_kexec_32.c 2011-11-16 18:39:07.000000000 -0500
14661 +@@ -27,7 +27,7 @@
14662 + #include <asm/cacheflush.h>
14663 + #include <asm/debugreg.h>
14664 +
14665 +-static void set_idt(void *newidt, __u16 limit)
14666 ++static void set_idt(struct desc_struct *newidt, __u16 limit)
14667 + {
14668 + struct desc_ptr curidt;
14669 +
14670 +@@ -39,7 +39,7 @@ static void set_idt(void *newidt, __u16
14671 + }
14672 +
14673 +
14674 +-static void set_gdt(void *newgdt, __u16 limit)
14675 ++static void set_gdt(struct desc_struct *newgdt, __u16 limit)
14676 + {
14677 + struct desc_ptr curgdt;
14678 +
14679 +@@ -217,7 +217,7 @@ void machine_kexec(struct kimage *image)
14680 + }
14681 +
14682 + control_page = page_address(image->control_code_page);
14683 +- memcpy(control_page, relocate_kernel, KEXEC_CONTROL_CODE_MAX_SIZE);
14684 ++ memcpy(control_page, (void *)ktla_ktva((unsigned long)relocate_kernel), KEXEC_CONTROL_CODE_MAX_SIZE);
14685 +
14686 + relocate_kernel_ptr = control_page;
14687 + page_list[PA_CONTROL_PAGE] = __pa(control_page);
14688 +diff -urNp linux-3.1.1/arch/x86/kernel/microcode_intel.c linux-3.1.1/arch/x86/kernel/microcode_intel.c
14689 +--- linux-3.1.1/arch/x86/kernel/microcode_intel.c 2011-11-11 15:19:27.000000000 -0500
14690 ++++ linux-3.1.1/arch/x86/kernel/microcode_intel.c 2011-11-16 18:39:07.000000000 -0500
14691 +@@ -440,13 +440,13 @@ static enum ucode_state request_microcod
14692 +
14693 + static int get_ucode_user(void *to, const void *from, size_t n)
14694 + {
14695 +- return copy_from_user(to, from, n);
14696 ++ return copy_from_user(to, (const void __force_user *)from, n);
14697 + }
14698 +
14699 + static enum ucode_state
14700 + request_microcode_user(int cpu, const void __user *buf, size_t size)
14701 + {
14702 +- return generic_load_microcode(cpu, (void *)buf, size, &get_ucode_user);
14703 ++ return generic_load_microcode(cpu, (__force_kernel void *)buf, size, &get_ucode_user);
14704 + }
14705 +
14706 + static void microcode_fini_cpu(int cpu)
14707 +diff -urNp linux-3.1.1/arch/x86/kernel/module.c linux-3.1.1/arch/x86/kernel/module.c
14708 +--- linux-3.1.1/arch/x86/kernel/module.c 2011-11-11 15:19:27.000000000 -0500
14709 ++++ linux-3.1.1/arch/x86/kernel/module.c 2011-11-16 18:39:07.000000000 -0500
14710 +@@ -36,15 +36,60 @@
14711 + #define DEBUGP(fmt...)
14712 + #endif
14713 +
14714 +-void *module_alloc(unsigned long size)
14715 ++static inline void *__module_alloc(unsigned long size, pgprot_t prot)
14716 + {
14717 + if (PAGE_ALIGN(size) > MODULES_LEN)
14718 + return NULL;
14719 + return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
14720 +- GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
14721 ++ GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, prot,
14722 + -1, __builtin_return_address(0));
14723 + }
14724 +
14725 ++void *module_alloc(unsigned long size)
14726 ++{
14727 ++
14728 ++#ifdef CONFIG_PAX_KERNEXEC
14729 ++ return __module_alloc(size, PAGE_KERNEL);
14730 ++#else
14731 ++ return __module_alloc(size, PAGE_KERNEL_EXEC);
14732 ++#endif
14733 ++
14734 ++}
14735 ++
14736 ++#ifdef CONFIG_PAX_KERNEXEC
14737 ++#ifdef CONFIG_X86_32
14738 ++void *module_alloc_exec(unsigned long size)
14739 ++{
14740 ++ struct vm_struct *area;
14741 ++
14742 ++ if (size == 0)
14743 ++ return NULL;
14744 ++
14745 ++ area = __get_vm_area(size, VM_ALLOC, (unsigned long)&MODULES_EXEC_VADDR, (unsigned long)&MODULES_EXEC_END);
14746 ++ return area ? area->addr : NULL;
14747 ++}
14748 ++EXPORT_SYMBOL(module_alloc_exec);
14749 ++
14750 ++void module_free_exec(struct module *mod, void *module_region)
14751 ++{
14752 ++ vunmap(module_region);
14753 ++}
14754 ++EXPORT_SYMBOL(module_free_exec);
14755 ++#else
14756 ++void module_free_exec(struct module *mod, void *module_region)
14757 ++{
14758 ++ module_free(mod, module_region);
14759 ++}
14760 ++EXPORT_SYMBOL(module_free_exec);
14761 ++
14762 ++void *module_alloc_exec(unsigned long size)
14763 ++{
14764 ++ return __module_alloc(size, PAGE_KERNEL_RX);
14765 ++}
14766 ++EXPORT_SYMBOL(module_alloc_exec);
14767 ++#endif
14768 ++#endif
14769 ++
14770 + #ifdef CONFIG_X86_32
14771 + int apply_relocate(Elf32_Shdr *sechdrs,
14772 + const char *strtab,
14773 +@@ -55,14 +100,16 @@ int apply_relocate(Elf32_Shdr *sechdrs,
14774 + unsigned int i;
14775 + Elf32_Rel *rel = (void *)sechdrs[relsec].sh_addr;
14776 + Elf32_Sym *sym;
14777 +- uint32_t *location;
14778 ++ uint32_t *plocation, location;
14779 +
14780 + DEBUGP("Applying relocate section %u to %u\n", relsec,
14781 + sechdrs[relsec].sh_info);
14782 + for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
14783 + /* This is where to make the change */
14784 +- location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
14785 +- + rel[i].r_offset;
14786 ++ plocation = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr + rel[i].r_offset;
14787 ++ location = (uint32_t)plocation;
14788 ++ if (sechdrs[sechdrs[relsec].sh_info].sh_flags & SHF_EXECINSTR)
14789 ++ plocation = ktla_ktva((void *)plocation);
14790 + /* This is the symbol it is referring to. Note that all
14791 + undefined symbols have been resolved. */
14792 + sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
14793 +@@ -71,11 +118,15 @@ int apply_relocate(Elf32_Shdr *sechdrs,
14794 + switch (ELF32_R_TYPE(rel[i].r_info)) {
14795 + case R_386_32:
14796 + /* We add the value into the location given */
14797 +- *location += sym->st_value;
14798 ++ pax_open_kernel();
14799 ++ *plocation += sym->st_value;
14800 ++ pax_close_kernel();
14801 + break;
14802 + case R_386_PC32:
14803 + /* Add the value, subtract its postition */
14804 +- *location += sym->st_value - (uint32_t)location;
14805 ++ pax_open_kernel();
14806 ++ *plocation += sym->st_value - location;
14807 ++ pax_close_kernel();
14808 + break;
14809 + default:
14810 + printk(KERN_ERR "module %s: Unknown relocation: %u\n",
14811 +@@ -120,21 +171,30 @@ int apply_relocate_add(Elf64_Shdr *sechd
14812 + case R_X86_64_NONE:
14813 + break;
14814 + case R_X86_64_64:
14815 ++ pax_open_kernel();
14816 + *(u64 *)loc = val;
14817 ++ pax_close_kernel();
14818 + break;
14819 + case R_X86_64_32:
14820 ++ pax_open_kernel();
14821 + *(u32 *)loc = val;
14822 ++ pax_close_kernel();
14823 + if (val != *(u32 *)loc)
14824 + goto overflow;
14825 + break;
14826 + case R_X86_64_32S:
14827 ++ pax_open_kernel();
14828 + *(s32 *)loc = val;
14829 ++ pax_close_kernel();
14830 + if ((s64)val != *(s32 *)loc)
14831 + goto overflow;
14832 + break;
14833 + case R_X86_64_PC32:
14834 + val -= (u64)loc;
14835 ++ pax_open_kernel();
14836 + *(u32 *)loc = val;
14837 ++ pax_close_kernel();
14838 ++
14839 + #if 0
14840 + if ((s64)val != *(s32 *)loc)
14841 + goto overflow;
14842 +diff -urNp linux-3.1.1/arch/x86/kernel/paravirt.c linux-3.1.1/arch/x86/kernel/paravirt.c
14843 +--- linux-3.1.1/arch/x86/kernel/paravirt.c 2011-11-11 15:19:27.000000000 -0500
14844 ++++ linux-3.1.1/arch/x86/kernel/paravirt.c 2011-11-17 18:29:42.000000000 -0500
14845 +@@ -53,6 +53,9 @@ u64 _paravirt_ident_64(u64 x)
14846 + {
14847 + return x;
14848 + }
14849 ++#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
14850 ++PV_CALLEE_SAVE_REGS_THUNK(_paravirt_ident_64);
14851 ++#endif
14852 +
14853 + void __init default_banner(void)
14854 + {
14855 +@@ -133,6 +136,9 @@ static void *get_call_destination(u8 typ
14856 + .pv_lock_ops = pv_lock_ops,
14857 + #endif
14858 + };
14859 ++
14860 ++ pax_track_stack();
14861 ++
14862 + return *((void **)&tmpl + type);
14863 + }
14864 +
14865 +@@ -145,15 +151,19 @@ unsigned paravirt_patch_default(u8 type,
14866 + if (opfunc == NULL)
14867 + /* If there's no function, patch it with a ud2a (BUG) */
14868 + ret = paravirt_patch_insns(insnbuf, len, ud2a, ud2a+sizeof(ud2a));
14869 +- else if (opfunc == _paravirt_nop)
14870 ++ else if (opfunc == (void *)_paravirt_nop)
14871 + /* If the operation is a nop, then nop the callsite */
14872 + ret = paravirt_patch_nop();
14873 +
14874 + /* identity functions just return their single argument */
14875 +- else if (opfunc == _paravirt_ident_32)
14876 ++ else if (opfunc == (void *)_paravirt_ident_32)
14877 + ret = paravirt_patch_ident_32(insnbuf, len);
14878 +- else if (opfunc == _paravirt_ident_64)
14879 ++ else if (opfunc == (void *)_paravirt_ident_64)
14880 + ret = paravirt_patch_ident_64(insnbuf, len);
14881 ++#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
14882 ++ else if (opfunc == (void *)__raw_callee_save__paravirt_ident_64)
14883 ++ ret = paravirt_patch_ident_64(insnbuf, len);
14884 ++#endif
14885 +
14886 + else if (type == PARAVIRT_PATCH(pv_cpu_ops.iret) ||
14887 + type == PARAVIRT_PATCH(pv_cpu_ops.irq_enable_sysexit) ||
14888 +@@ -178,7 +188,7 @@ unsigned paravirt_patch_insns(void *insn
14889 + if (insn_len > len || start == NULL)
14890 + insn_len = len;
14891 + else
14892 +- memcpy(insnbuf, start, insn_len);
14893 ++ memcpy(insnbuf, ktla_ktva(start), insn_len);
14894 +
14895 + return insn_len;
14896 + }
14897 +@@ -302,7 +312,7 @@ void arch_flush_lazy_mmu_mode(void)
14898 + preempt_enable();
14899 + }
14900 +
14901 +-struct pv_info pv_info = {
14902 ++struct pv_info pv_info __read_only = {
14903 + .name = "bare hardware",
14904 + .paravirt_enabled = 0,
14905 + .kernel_rpl = 0,
14906 +@@ -313,16 +323,16 @@ struct pv_info pv_info = {
14907 + #endif
14908 + };
14909 +
14910 +-struct pv_init_ops pv_init_ops = {
14911 ++struct pv_init_ops pv_init_ops __read_only = {
14912 + .patch = native_patch,
14913 + };
14914 +
14915 +-struct pv_time_ops pv_time_ops = {
14916 ++struct pv_time_ops pv_time_ops __read_only = {
14917 + .sched_clock = native_sched_clock,
14918 + .steal_clock = native_steal_clock,
14919 + };
14920 +
14921 +-struct pv_irq_ops pv_irq_ops = {
14922 ++struct pv_irq_ops pv_irq_ops __read_only = {
14923 + .save_fl = __PV_IS_CALLEE_SAVE(native_save_fl),
14924 + .restore_fl = __PV_IS_CALLEE_SAVE(native_restore_fl),
14925 + .irq_disable = __PV_IS_CALLEE_SAVE(native_irq_disable),
14926 +@@ -334,7 +344,7 @@ struct pv_irq_ops pv_irq_ops = {
14927 + #endif
14928 + };
14929 +
14930 +-struct pv_cpu_ops pv_cpu_ops = {
14931 ++struct pv_cpu_ops pv_cpu_ops __read_only = {
14932 + .cpuid = native_cpuid,
14933 + .get_debugreg = native_get_debugreg,
14934 + .set_debugreg = native_set_debugreg,
14935 +@@ -395,21 +405,26 @@ struct pv_cpu_ops pv_cpu_ops = {
14936 + .end_context_switch = paravirt_nop,
14937 + };
14938 +
14939 +-struct pv_apic_ops pv_apic_ops = {
14940 ++struct pv_apic_ops pv_apic_ops __read_only = {
14941 + #ifdef CONFIG_X86_LOCAL_APIC
14942 + .startup_ipi_hook = paravirt_nop,
14943 + #endif
14944 + };
14945 +
14946 +-#if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE)
14947 ++#ifdef CONFIG_X86_32
14948 ++#ifdef CONFIG_X86_PAE
14949 ++/* 64-bit pagetable entries */
14950 ++#define PTE_IDENT PV_CALLEE_SAVE(_paravirt_ident_64)
14951 ++#else
14952 + /* 32-bit pagetable entries */
14953 + #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_32)
14954 ++#endif
14955 + #else
14956 + /* 64-bit pagetable entries */
14957 + #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_64)
14958 + #endif
14959 +
14960 +-struct pv_mmu_ops pv_mmu_ops = {
14961 ++struct pv_mmu_ops pv_mmu_ops __read_only = {
14962 +
14963 + .read_cr2 = native_read_cr2,
14964 + .write_cr2 = native_write_cr2,
14965 +@@ -459,6 +474,7 @@ struct pv_mmu_ops pv_mmu_ops = {
14966 + .make_pud = PTE_IDENT,
14967 +
14968 + .set_pgd = native_set_pgd,
14969 ++ .set_pgd_batched = native_set_pgd_batched,
14970 + #endif
14971 + #endif /* PAGETABLE_LEVELS >= 3 */
14972 +
14973 +@@ -478,6 +494,12 @@ struct pv_mmu_ops pv_mmu_ops = {
14974 + },
14975 +
14976 + .set_fixmap = native_set_fixmap,
14977 ++
14978 ++#ifdef CONFIG_PAX_KERNEXEC
14979 ++ .pax_open_kernel = native_pax_open_kernel,
14980 ++ .pax_close_kernel = native_pax_close_kernel,
14981 ++#endif
14982 ++
14983 + };
14984 +
14985 + EXPORT_SYMBOL_GPL(pv_time_ops);
14986 +diff -urNp linux-3.1.1/arch/x86/kernel/paravirt-spinlocks.c linux-3.1.1/arch/x86/kernel/paravirt-spinlocks.c
14987 +--- linux-3.1.1/arch/x86/kernel/paravirt-spinlocks.c 2011-11-11 15:19:27.000000000 -0500
14988 ++++ linux-3.1.1/arch/x86/kernel/paravirt-spinlocks.c 2011-11-16 18:39:07.000000000 -0500
14989 +@@ -13,7 +13,7 @@ default_spin_lock_flags(arch_spinlock_t
14990 + arch_spin_lock(lock);
14991 + }
14992 +
14993 +-struct pv_lock_ops pv_lock_ops = {
14994 ++struct pv_lock_ops pv_lock_ops __read_only = {
14995 + #ifdef CONFIG_SMP
14996 + .spin_is_locked = __ticket_spin_is_locked,
14997 + .spin_is_contended = __ticket_spin_is_contended,
14998 +diff -urNp linux-3.1.1/arch/x86/kernel/pci-iommu_table.c linux-3.1.1/arch/x86/kernel/pci-iommu_table.c
14999 +--- linux-3.1.1/arch/x86/kernel/pci-iommu_table.c 2011-11-11 15:19:27.000000000 -0500
15000 ++++ linux-3.1.1/arch/x86/kernel/pci-iommu_table.c 2011-11-16 18:40:08.000000000 -0500
15001 +@@ -2,7 +2,7 @@
15002 + #include <asm/iommu_table.h>
15003 + #include <linux/string.h>
15004 + #include <linux/kallsyms.h>
15005 +-
15006 ++#include <linux/sched.h>
15007 +
15008 + #define DEBUG 1
15009 +
15010 +@@ -51,6 +51,8 @@ void __init check_iommu_entries(struct i
15011 + {
15012 + struct iommu_table_entry *p, *q, *x;
15013 +
15014 ++ pax_track_stack();
15015 ++
15016 + /* Simple cyclic dependency checker. */
15017 + for (p = start; p < finish; p++) {
15018 + q = find_dependents_of(start, finish, p);
15019 +diff -urNp linux-3.1.1/arch/x86/kernel/process_32.c linux-3.1.1/arch/x86/kernel/process_32.c
15020 +--- linux-3.1.1/arch/x86/kernel/process_32.c 2011-11-11 15:19:27.000000000 -0500
15021 ++++ linux-3.1.1/arch/x86/kernel/process_32.c 2011-11-16 18:39:07.000000000 -0500
15022 +@@ -66,6 +66,7 @@ asmlinkage void ret_from_fork(void) __as
15023 + unsigned long thread_saved_pc(struct task_struct *tsk)
15024 + {
15025 + return ((unsigned long *)tsk->thread.sp)[3];
15026 ++//XXX return tsk->thread.eip;
15027 + }
15028 +
15029 + #ifndef CONFIG_SMP
15030 +@@ -128,15 +129,14 @@ void __show_regs(struct pt_regs *regs, i
15031 + unsigned long sp;
15032 + unsigned short ss, gs;
15033 +
15034 +- if (user_mode_vm(regs)) {
15035 ++ if (user_mode(regs)) {
15036 + sp = regs->sp;
15037 + ss = regs->ss & 0xffff;
15038 +- gs = get_user_gs(regs);
15039 + } else {
15040 + sp = kernel_stack_pointer(regs);
15041 + savesegment(ss, ss);
15042 +- savesegment(gs, gs);
15043 + }
15044 ++ gs = get_user_gs(regs);
15045 +
15046 + show_regs_common();
15047 +
15048 +@@ -198,13 +198,14 @@ int copy_thread(unsigned long clone_flag
15049 + struct task_struct *tsk;
15050 + int err;
15051 +
15052 +- childregs = task_pt_regs(p);
15053 ++ childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 8;
15054 + *childregs = *regs;
15055 + childregs->ax = 0;
15056 + childregs->sp = sp;
15057 +
15058 + p->thread.sp = (unsigned long) childregs;
15059 + p->thread.sp0 = (unsigned long) (childregs+1);
15060 ++ p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
15061 +
15062 + p->thread.ip = (unsigned long) ret_from_fork;
15063 +
15064 +@@ -294,7 +295,7 @@ __switch_to(struct task_struct *prev_p,
15065 + struct thread_struct *prev = &prev_p->thread,
15066 + *next = &next_p->thread;
15067 + int cpu = smp_processor_id();
15068 +- struct tss_struct *tss = &per_cpu(init_tss, cpu);
15069 ++ struct tss_struct *tss = init_tss + cpu;
15070 + bool preload_fpu;
15071 +
15072 + /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
15073 +@@ -329,6 +330,10 @@ __switch_to(struct task_struct *prev_p,
15074 + */
15075 + lazy_save_gs(prev->gs);
15076 +
15077 ++#ifdef CONFIG_PAX_MEMORY_UDEREF
15078 ++ __set_fs(task_thread_info(next_p)->addr_limit);
15079 ++#endif
15080 ++
15081 + /*
15082 + * Load the per-thread Thread-Local Storage descriptor.
15083 + */
15084 +@@ -364,6 +369,9 @@ __switch_to(struct task_struct *prev_p,
15085 + */
15086 + arch_end_context_switch(next_p);
15087 +
15088 ++ percpu_write(current_task, next_p);
15089 ++ percpu_write(current_tinfo, &next_p->tinfo);
15090 ++
15091 + if (preload_fpu)
15092 + __math_state_restore();
15093 +
15094 +@@ -373,8 +381,6 @@ __switch_to(struct task_struct *prev_p,
15095 + if (prev->gs | next->gs)
15096 + lazy_load_gs(next->gs);
15097 +
15098 +- percpu_write(current_task, next_p);
15099 +-
15100 + return prev_p;
15101 + }
15102 +
15103 +@@ -404,4 +410,3 @@ unsigned long get_wchan(struct task_stru
15104 + } while (count++ < 16);
15105 + return 0;
15106 + }
15107 +-
15108 +diff -urNp linux-3.1.1/arch/x86/kernel/process_64.c linux-3.1.1/arch/x86/kernel/process_64.c
15109 +--- linux-3.1.1/arch/x86/kernel/process_64.c 2011-11-11 15:19:27.000000000 -0500
15110 ++++ linux-3.1.1/arch/x86/kernel/process_64.c 2011-11-16 18:39:07.000000000 -0500
15111 +@@ -88,7 +88,7 @@ static void __exit_idle(void)
15112 + void exit_idle(void)
15113 + {
15114 + /* idle loop has pid 0 */
15115 +- if (current->pid)
15116 ++ if (task_pid_nr(current))
15117 + return;
15118 + __exit_idle();
15119 + }
15120 +@@ -262,8 +262,7 @@ int copy_thread(unsigned long clone_flag
15121 + struct pt_regs *childregs;
15122 + struct task_struct *me = current;
15123 +
15124 +- childregs = ((struct pt_regs *)
15125 +- (THREAD_SIZE + task_stack_page(p))) - 1;
15126 ++ childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 16;
15127 + *childregs = *regs;
15128 +
15129 + childregs->ax = 0;
15130 +@@ -275,6 +274,7 @@ int copy_thread(unsigned long clone_flag
15131 + p->thread.sp = (unsigned long) childregs;
15132 + p->thread.sp0 = (unsigned long) (childregs+1);
15133 + p->thread.usersp = me->thread.usersp;
15134 ++ p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
15135 +
15136 + set_tsk_thread_flag(p, TIF_FORK);
15137 +
15138 +@@ -377,7 +377,7 @@ __switch_to(struct task_struct *prev_p,
15139 + struct thread_struct *prev = &prev_p->thread;
15140 + struct thread_struct *next = &next_p->thread;
15141 + int cpu = smp_processor_id();
15142 +- struct tss_struct *tss = &per_cpu(init_tss, cpu);
15143 ++ struct tss_struct *tss = init_tss + cpu;
15144 + unsigned fsindex, gsindex;
15145 + bool preload_fpu;
15146 +
15147 +@@ -473,10 +473,9 @@ __switch_to(struct task_struct *prev_p,
15148 + prev->usersp = percpu_read(old_rsp);
15149 + percpu_write(old_rsp, next->usersp);
15150 + percpu_write(current_task, next_p);
15151 ++ percpu_write(current_tinfo, &next_p->tinfo);
15152 +
15153 +- percpu_write(kernel_stack,
15154 +- (unsigned long)task_stack_page(next_p) +
15155 +- THREAD_SIZE - KERNEL_STACK_OFFSET);
15156 ++ percpu_write(kernel_stack, next->sp0);
15157 +
15158 + /*
15159 + * Now maybe reload the debug registers and handle I/O bitmaps
15160 +@@ -538,12 +537,11 @@ unsigned long get_wchan(struct task_stru
15161 + if (!p || p == current || p->state == TASK_RUNNING)
15162 + return 0;
15163 + stack = (unsigned long)task_stack_page(p);
15164 +- if (p->thread.sp < stack || p->thread.sp >= stack+THREAD_SIZE)
15165 ++ if (p->thread.sp < stack || p->thread.sp > stack+THREAD_SIZE-16-sizeof(u64))
15166 + return 0;
15167 + fp = *(u64 *)(p->thread.sp);
15168 + do {
15169 +- if (fp < (unsigned long)stack ||
15170 +- fp >= (unsigned long)stack+THREAD_SIZE)
15171 ++ if (fp < stack || fp > stack+THREAD_SIZE-16-sizeof(u64))
15172 + return 0;
15173 + ip = *(u64 *)(fp+8);
15174 + if (!in_sched_functions(ip))
15175 +diff -urNp linux-3.1.1/arch/x86/kernel/process.c linux-3.1.1/arch/x86/kernel/process.c
15176 +--- linux-3.1.1/arch/x86/kernel/process.c 2011-11-11 15:19:27.000000000 -0500
15177 ++++ linux-3.1.1/arch/x86/kernel/process.c 2011-11-16 18:39:07.000000000 -0500
15178 +@@ -48,16 +48,33 @@ void free_thread_xstate(struct task_stru
15179 +
15180 + void free_thread_info(struct thread_info *ti)
15181 + {
15182 +- free_thread_xstate(ti->task);
15183 + free_pages((unsigned long)ti, get_order(THREAD_SIZE));
15184 + }
15185 +
15186 ++static struct kmem_cache *task_struct_cachep;
15187 ++
15188 + void arch_task_cache_init(void)
15189 + {
15190 +- task_xstate_cachep =
15191 +- kmem_cache_create("task_xstate", xstate_size,
15192 ++ /* create a slab on which task_structs can be allocated */
15193 ++ task_struct_cachep =
15194 ++ kmem_cache_create("task_struct", sizeof(struct task_struct),
15195 ++ ARCH_MIN_TASKALIGN, SLAB_PANIC | SLAB_NOTRACK, NULL);
15196 ++
15197 ++ task_xstate_cachep =
15198 ++ kmem_cache_create("task_xstate", xstate_size,
15199 + __alignof__(union thread_xstate),
15200 +- SLAB_PANIC | SLAB_NOTRACK, NULL);
15201 ++ SLAB_PANIC | SLAB_NOTRACK | SLAB_USERCOPY, NULL);
15202 ++}
15203 ++
15204 ++struct task_struct *alloc_task_struct_node(int node)
15205 ++{
15206 ++ return kmem_cache_alloc_node(task_struct_cachep, GFP_KERNEL, node);
15207 ++}
15208 ++
15209 ++void free_task_struct(struct task_struct *task)
15210 ++{
15211 ++ free_thread_xstate(task);
15212 ++ kmem_cache_free(task_struct_cachep, task);
15213 + }
15214 +
15215 + /*
15216 +@@ -70,7 +87,7 @@ void exit_thread(void)
15217 + unsigned long *bp = t->io_bitmap_ptr;
15218 +
15219 + if (bp) {
15220 +- struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
15221 ++ struct tss_struct *tss = init_tss + get_cpu();
15222 +
15223 + t->io_bitmap_ptr = NULL;
15224 + clear_thread_flag(TIF_IO_BITMAP);
15225 +@@ -106,7 +123,7 @@ void show_regs_common(void)
15226 +
15227 + printk(KERN_CONT "\n");
15228 + printk(KERN_DEFAULT "Pid: %d, comm: %.20s %s %s %.*s",
15229 +- current->pid, current->comm, print_tainted(),
15230 ++ task_pid_nr(current), current->comm, print_tainted(),
15231 + init_utsname()->release,
15232 + (int)strcspn(init_utsname()->version, " "),
15233 + init_utsname()->version);
15234 +@@ -120,6 +137,9 @@ void flush_thread(void)
15235 + {
15236 + struct task_struct *tsk = current;
15237 +
15238 ++#if defined(CONFIG_X86_32) && !defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_PAX_MEMORY_UDEREF)
15239 ++ loadsegment(gs, 0);
15240 ++#endif
15241 + flush_ptrace_hw_breakpoint(tsk);
15242 + memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
15243 + /*
15244 +@@ -282,10 +302,10 @@ int kernel_thread(int (*fn)(void *), voi
15245 + regs.di = (unsigned long) arg;
15246 +
15247 + #ifdef CONFIG_X86_32
15248 +- regs.ds = __USER_DS;
15249 +- regs.es = __USER_DS;
15250 ++ regs.ds = __KERNEL_DS;
15251 ++ regs.es = __KERNEL_DS;
15252 + regs.fs = __KERNEL_PERCPU;
15253 +- regs.gs = __KERNEL_STACK_CANARY;
15254 ++ savesegment(gs, regs.gs);
15255 + #else
15256 + regs.ss = __KERNEL_DS;
15257 + #endif
15258 +@@ -403,7 +423,7 @@ void default_idle(void)
15259 + EXPORT_SYMBOL(default_idle);
15260 + #endif
15261 +
15262 +-void stop_this_cpu(void *dummy)
15263 ++__noreturn void stop_this_cpu(void *dummy)
15264 + {
15265 + local_irq_disable();
15266 + /*
15267 +@@ -645,16 +665,37 @@ static int __init idle_setup(char *str)
15268 + }
15269 + early_param("idle", idle_setup);
15270 +
15271 +-unsigned long arch_align_stack(unsigned long sp)
15272 ++#ifdef CONFIG_PAX_RANDKSTACK
15273 ++void pax_randomize_kstack(struct pt_regs *regs)
15274 + {
15275 +- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
15276 +- sp -= get_random_int() % 8192;
15277 +- return sp & ~0xf;
15278 +-}
15279 ++ struct thread_struct *thread = &current->thread;
15280 ++ unsigned long time;
15281 +
15282 +-unsigned long arch_randomize_brk(struct mm_struct *mm)
15283 +-{
15284 +- unsigned long range_end = mm->brk + 0x02000000;
15285 +- return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
15286 +-}
15287 ++ if (!randomize_va_space)
15288 ++ return;
15289 ++
15290 ++ if (v8086_mode(regs))
15291 ++ return;
15292 +
15293 ++ rdtscl(time);
15294 ++
15295 ++ /* P4 seems to return a 0 LSB, ignore it */
15296 ++#ifdef CONFIG_MPENTIUM4
15297 ++ time &= 0x3EUL;
15298 ++ time <<= 2;
15299 ++#elif defined(CONFIG_X86_64)
15300 ++ time &= 0xFUL;
15301 ++ time <<= 4;
15302 ++#else
15303 ++ time &= 0x1FUL;
15304 ++ time <<= 3;
15305 ++#endif
15306 ++
15307 ++ thread->sp0 ^= time;
15308 ++ load_sp0(init_tss + smp_processor_id(), thread);
15309 ++
15310 ++#ifdef CONFIG_X86_64
15311 ++ percpu_write(kernel_stack, thread->sp0);
15312 ++#endif
15313 ++}
15314 ++#endif
15315 +diff -urNp linux-3.1.1/arch/x86/kernel/ptrace.c linux-3.1.1/arch/x86/kernel/ptrace.c
15316 +--- linux-3.1.1/arch/x86/kernel/ptrace.c 2011-11-11 15:19:27.000000000 -0500
15317 ++++ linux-3.1.1/arch/x86/kernel/ptrace.c 2011-11-16 18:39:07.000000000 -0500
15318 +@@ -822,7 +822,7 @@ long arch_ptrace(struct task_struct *chi
15319 + unsigned long addr, unsigned long data)
15320 + {
15321 + int ret;
15322 +- unsigned long __user *datap = (unsigned long __user *)data;
15323 ++ unsigned long __user *datap = (__force unsigned long __user *)data;
15324 +
15325 + switch (request) {
15326 + /* read the word at location addr in the USER area. */
15327 +@@ -907,14 +907,14 @@ long arch_ptrace(struct task_struct *chi
15328 + if ((int) addr < 0)
15329 + return -EIO;
15330 + ret = do_get_thread_area(child, addr,
15331 +- (struct user_desc __user *)data);
15332 ++ (__force struct user_desc __user *) data);
15333 + break;
15334 +
15335 + case PTRACE_SET_THREAD_AREA:
15336 + if ((int) addr < 0)
15337 + return -EIO;
15338 + ret = do_set_thread_area(child, addr,
15339 +- (struct user_desc __user *)data, 0);
15340 ++ (__force struct user_desc __user *) data, 0);
15341 + break;
15342 + #endif
15343 +
15344 +@@ -1331,7 +1331,7 @@ static void fill_sigtrap_info(struct tas
15345 + memset(info, 0, sizeof(*info));
15346 + info->si_signo = SIGTRAP;
15347 + info->si_code = si_code;
15348 +- info->si_addr = user_mode_vm(regs) ? (void __user *)regs->ip : NULL;
15349 ++ info->si_addr = user_mode(regs) ? (__force void __user *)regs->ip : NULL;
15350 + }
15351 +
15352 + void user_single_step_siginfo(struct task_struct *tsk,
15353 +diff -urNp linux-3.1.1/arch/x86/kernel/pvclock.c linux-3.1.1/arch/x86/kernel/pvclock.c
15354 +--- linux-3.1.1/arch/x86/kernel/pvclock.c 2011-11-11 15:19:27.000000000 -0500
15355 ++++ linux-3.1.1/arch/x86/kernel/pvclock.c 2011-11-16 18:39:07.000000000 -0500
15356 +@@ -81,11 +81,11 @@ unsigned long pvclock_tsc_khz(struct pvc
15357 + return pv_tsc_khz;
15358 + }
15359 +
15360 +-static atomic64_t last_value = ATOMIC64_INIT(0);
15361 ++static atomic64_unchecked_t last_value = ATOMIC64_INIT(0);
15362 +
15363 + void pvclock_resume(void)
15364 + {
15365 +- atomic64_set(&last_value, 0);
15366 ++ atomic64_set_unchecked(&last_value, 0);
15367 + }
15368 +
15369 + cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src)
15370 +@@ -121,11 +121,11 @@ cycle_t pvclock_clocksource_read(struct
15371 + * updating at the same time, and one of them could be slightly behind,
15372 + * making the assumption that last_value always go forward fail to hold.
15373 + */
15374 +- last = atomic64_read(&last_value);
15375 ++ last = atomic64_read_unchecked(&last_value);
15376 + do {
15377 + if (ret < last)
15378 + return last;
15379 +- last = atomic64_cmpxchg(&last_value, last, ret);
15380 ++ last = atomic64_cmpxchg_unchecked(&last_value, last, ret);
15381 + } while (unlikely(last != ret));
15382 +
15383 + return ret;
15384 +diff -urNp linux-3.1.1/arch/x86/kernel/reboot.c linux-3.1.1/arch/x86/kernel/reboot.c
15385 +--- linux-3.1.1/arch/x86/kernel/reboot.c 2011-11-11 15:19:27.000000000 -0500
15386 ++++ linux-3.1.1/arch/x86/kernel/reboot.c 2011-11-16 18:39:07.000000000 -0500
15387 +@@ -35,7 +35,7 @@ void (*pm_power_off)(void);
15388 + EXPORT_SYMBOL(pm_power_off);
15389 +
15390 + static const struct desc_ptr no_idt = {};
15391 +-static int reboot_mode;
15392 ++static unsigned short reboot_mode;
15393 + enum reboot_type reboot_type = BOOT_ACPI;
15394 + int reboot_force;
15395 +
15396 +@@ -315,13 +315,17 @@ core_initcall(reboot_init);
15397 + extern const unsigned char machine_real_restart_asm[];
15398 + extern const u64 machine_real_restart_gdt[3];
15399 +
15400 +-void machine_real_restart(unsigned int type)
15401 ++__noreturn void machine_real_restart(unsigned int type)
15402 + {
15403 + void *restart_va;
15404 + unsigned long restart_pa;
15405 +- void (*restart_lowmem)(unsigned int);
15406 ++ void (* __noreturn restart_lowmem)(unsigned int);
15407 + u64 *lowmem_gdt;
15408 +
15409 ++#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
15410 ++ struct desc_struct *gdt;
15411 ++#endif
15412 ++
15413 + local_irq_disable();
15414 +
15415 + /* Write zero to CMOS register number 0x0f, which the BIOS POST
15416 +@@ -347,14 +351,14 @@ void machine_real_restart(unsigned int t
15417 + boot)". This seems like a fairly standard thing that gets set by
15418 + REBOOT.COM programs, and the previous reset routine did this
15419 + too. */
15420 +- *((unsigned short *)0x472) = reboot_mode;
15421 ++ *(unsigned short *)(__va(0x472)) = reboot_mode;
15422 +
15423 + /* Patch the GDT in the low memory trampoline */
15424 + lowmem_gdt = TRAMPOLINE_SYM(machine_real_restart_gdt);
15425 +
15426 + restart_va = TRAMPOLINE_SYM(machine_real_restart_asm);
15427 + restart_pa = virt_to_phys(restart_va);
15428 +- restart_lowmem = (void (*)(unsigned int))restart_pa;
15429 ++ restart_lowmem = (void *)restart_pa;
15430 +
15431 + /* GDT[0]: GDT self-pointer */
15432 + lowmem_gdt[0] =
15433 +@@ -365,7 +369,33 @@ void machine_real_restart(unsigned int t
15434 + GDT_ENTRY(0x009b, restart_pa, 0xffff);
15435 +
15436 + /* Jump to the identity-mapped low memory code */
15437 ++
15438 ++#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
15439 ++ gdt = get_cpu_gdt_table(smp_processor_id());
15440 ++ pax_open_kernel();
15441 ++#ifdef CONFIG_PAX_MEMORY_UDEREF
15442 ++ gdt[GDT_ENTRY_KERNEL_DS].type = 3;
15443 ++ gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
15444 ++ asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r" (__KERNEL_DS) : "memory");
15445 ++#endif
15446 ++#ifdef CONFIG_PAX_KERNEXEC
15447 ++ gdt[GDT_ENTRY_KERNEL_CS].base0 = 0;
15448 ++ gdt[GDT_ENTRY_KERNEL_CS].base1 = 0;
15449 ++ gdt[GDT_ENTRY_KERNEL_CS].base2 = 0;
15450 ++ gdt[GDT_ENTRY_KERNEL_CS].limit0 = 0xffff;
15451 ++ gdt[GDT_ENTRY_KERNEL_CS].limit = 0xf;
15452 ++ gdt[GDT_ENTRY_KERNEL_CS].g = 1;
15453 ++#endif
15454 ++ pax_close_kernel();
15455 ++#endif
15456 ++
15457 ++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
15458 ++ asm volatile("push %0; push %1; lret\n" : : "i" (__KERNEL_CS), "rm" (restart_lowmem), "a" (type));
15459 ++ unreachable();
15460 ++#else
15461 + restart_lowmem(type);
15462 ++#endif
15463 ++
15464 + }
15465 + #ifdef CONFIG_APM_MODULE
15466 + EXPORT_SYMBOL(machine_real_restart);
15467 +@@ -523,7 +553,7 @@ void __attribute__((weak)) mach_reboot_f
15468 + * try to force a triple fault and then cycle between hitting the keyboard
15469 + * controller and doing that
15470 + */
15471 +-static void native_machine_emergency_restart(void)
15472 ++__noreturn static void native_machine_emergency_restart(void)
15473 + {
15474 + int i;
15475 + int attempt = 0;
15476 +@@ -647,13 +677,13 @@ void native_machine_shutdown(void)
15477 + #endif
15478 + }
15479 +
15480 +-static void __machine_emergency_restart(int emergency)
15481 ++static __noreturn void __machine_emergency_restart(int emergency)
15482 + {
15483 + reboot_emergency = emergency;
15484 + machine_ops.emergency_restart();
15485 + }
15486 +
15487 +-static void native_machine_restart(char *__unused)
15488 ++static __noreturn void native_machine_restart(char *__unused)
15489 + {
15490 + printk("machine restart\n");
15491 +
15492 +@@ -662,7 +692,7 @@ static void native_machine_restart(char
15493 + __machine_emergency_restart(0);
15494 + }
15495 +
15496 +-static void native_machine_halt(void)
15497 ++static __noreturn void native_machine_halt(void)
15498 + {
15499 + /* stop other cpus and apics */
15500 + machine_shutdown();
15501 +@@ -673,7 +703,7 @@ static void native_machine_halt(void)
15502 + stop_this_cpu(NULL);
15503 + }
15504 +
15505 +-static void native_machine_power_off(void)
15506 ++__noreturn static void native_machine_power_off(void)
15507 + {
15508 + if (pm_power_off) {
15509 + if (!reboot_force)
15510 +@@ -682,6 +712,7 @@ static void native_machine_power_off(voi
15511 + }
15512 + /* a fallback in case there is no PM info available */
15513 + tboot_shutdown(TB_SHUTDOWN_HALT);
15514 ++ unreachable();
15515 + }
15516 +
15517 + struct machine_ops machine_ops = {
15518 +diff -urNp linux-3.1.1/arch/x86/kernel/setup.c linux-3.1.1/arch/x86/kernel/setup.c
15519 +--- linux-3.1.1/arch/x86/kernel/setup.c 2011-11-11 15:19:27.000000000 -0500
15520 ++++ linux-3.1.1/arch/x86/kernel/setup.c 2011-11-16 18:39:07.000000000 -0500
15521 +@@ -447,7 +447,7 @@ static void __init parse_setup_data(void
15522 +
15523 + switch (data->type) {
15524 + case SETUP_E820_EXT:
15525 +- parse_e820_ext(data);
15526 ++ parse_e820_ext((struct setup_data __force_kernel *)data);
15527 + break;
15528 + case SETUP_DTB:
15529 + add_dtb(pa_data);
15530 +@@ -650,7 +650,7 @@ static void __init trim_bios_range(void)
15531 + * area (640->1Mb) as ram even though it is not.
15532 + * take them out.
15533 + */
15534 +- e820_remove_range(BIOS_BEGIN, BIOS_END - BIOS_BEGIN, E820_RAM, 1);
15535 ++ e820_remove_range(ISA_START_ADDRESS, ISA_END_ADDRESS - ISA_START_ADDRESS, E820_RAM, 1);
15536 + sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
15537 + }
15538 +
15539 +@@ -773,14 +773,14 @@ void __init setup_arch(char **cmdline_p)
15540 +
15541 + if (!boot_params.hdr.root_flags)
15542 + root_mountflags &= ~MS_RDONLY;
15543 +- init_mm.start_code = (unsigned long) _text;
15544 +- init_mm.end_code = (unsigned long) _etext;
15545 ++ init_mm.start_code = ktla_ktva((unsigned long) _text);
15546 ++ init_mm.end_code = ktla_ktva((unsigned long) _etext);
15547 + init_mm.end_data = (unsigned long) _edata;
15548 + init_mm.brk = _brk_end;
15549 +
15550 +- code_resource.start = virt_to_phys(_text);
15551 +- code_resource.end = virt_to_phys(_etext)-1;
15552 +- data_resource.start = virt_to_phys(_etext);
15553 ++ code_resource.start = virt_to_phys(ktla_ktva(_text));
15554 ++ code_resource.end = virt_to_phys(ktla_ktva(_etext))-1;
15555 ++ data_resource.start = virt_to_phys(_sdata);
15556 + data_resource.end = virt_to_phys(_edata)-1;
15557 + bss_resource.start = virt_to_phys(&__bss_start);
15558 + bss_resource.end = virt_to_phys(&__bss_stop)-1;
15559 +diff -urNp linux-3.1.1/arch/x86/kernel/setup_percpu.c linux-3.1.1/arch/x86/kernel/setup_percpu.c
15560 +--- linux-3.1.1/arch/x86/kernel/setup_percpu.c 2011-11-11 15:19:27.000000000 -0500
15561 ++++ linux-3.1.1/arch/x86/kernel/setup_percpu.c 2011-11-16 18:39:07.000000000 -0500
15562 +@@ -21,19 +21,17 @@
15563 + #include <asm/cpu.h>
15564 + #include <asm/stackprotector.h>
15565 +
15566 +-DEFINE_PER_CPU(int, cpu_number);
15567 ++#ifdef CONFIG_SMP
15568 ++DEFINE_PER_CPU(unsigned int, cpu_number);
15569 + EXPORT_PER_CPU_SYMBOL(cpu_number);
15570 ++#endif
15571 +
15572 +-#ifdef CONFIG_X86_64
15573 + #define BOOT_PERCPU_OFFSET ((unsigned long)__per_cpu_load)
15574 +-#else
15575 +-#define BOOT_PERCPU_OFFSET 0
15576 +-#endif
15577 +
15578 + DEFINE_PER_CPU(unsigned long, this_cpu_off) = BOOT_PERCPU_OFFSET;
15579 + EXPORT_PER_CPU_SYMBOL(this_cpu_off);
15580 +
15581 +-unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = {
15582 ++unsigned long __per_cpu_offset[NR_CPUS] __read_only = {
15583 + [0 ... NR_CPUS-1] = BOOT_PERCPU_OFFSET,
15584 + };
15585 + EXPORT_SYMBOL(__per_cpu_offset);
15586 +@@ -155,10 +153,10 @@ static inline void setup_percpu_segment(
15587 + {
15588 + #ifdef CONFIG_X86_32
15589 + struct desc_struct gdt;
15590 ++ unsigned long base = per_cpu_offset(cpu);
15591 +
15592 +- pack_descriptor(&gdt, per_cpu_offset(cpu), 0xFFFFF,
15593 +- 0x2 | DESCTYPE_S, 0x8);
15594 +- gdt.s = 1;
15595 ++ pack_descriptor(&gdt, base, (VMALLOC_END - base - 1) >> PAGE_SHIFT,
15596 ++ 0x83 | DESCTYPE_S, 0xC);
15597 + write_gdt_entry(get_cpu_gdt_table(cpu),
15598 + GDT_ENTRY_PERCPU, &gdt, DESCTYPE_S);
15599 + #endif
15600 +@@ -207,6 +205,11 @@ void __init setup_per_cpu_areas(void)
15601 + /* alrighty, percpu areas up and running */
15602 + delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
15603 + for_each_possible_cpu(cpu) {
15604 ++#ifdef CONFIG_CC_STACKPROTECTOR
15605 ++#ifdef CONFIG_X86_32
15606 ++ unsigned long canary = per_cpu(stack_canary.canary, cpu);
15607 ++#endif
15608 ++#endif
15609 + per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu];
15610 + per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
15611 + per_cpu(cpu_number, cpu) = cpu;
15612 +@@ -247,6 +250,12 @@ void __init setup_per_cpu_areas(void)
15613 + */
15614 + set_cpu_numa_node(cpu, early_cpu_to_node(cpu));
15615 + #endif
15616 ++#ifdef CONFIG_CC_STACKPROTECTOR
15617 ++#ifdef CONFIG_X86_32
15618 ++ if (!cpu)
15619 ++ per_cpu(stack_canary.canary, cpu) = canary;
15620 ++#endif
15621 ++#endif
15622 + /*
15623 + * Up to this point, the boot CPU has been using .init.data
15624 + * area. Reload any changed state for the boot CPU.
15625 +diff -urNp linux-3.1.1/arch/x86/kernel/signal.c linux-3.1.1/arch/x86/kernel/signal.c
15626 +--- linux-3.1.1/arch/x86/kernel/signal.c 2011-11-11 15:19:27.000000000 -0500
15627 ++++ linux-3.1.1/arch/x86/kernel/signal.c 2011-11-16 19:39:49.000000000 -0500
15628 +@@ -198,7 +198,7 @@ static unsigned long align_sigframe(unsi
15629 + * Align the stack pointer according to the i386 ABI,
15630 + * i.e. so that on function entry ((sp + 4) & 15) == 0.
15631 + */
15632 +- sp = ((sp + 4) & -16ul) - 4;
15633 ++ sp = ((sp - 12) & -16ul) - 4;
15634 + #else /* !CONFIG_X86_32 */
15635 + sp = round_down(sp, 16) - 8;
15636 + #endif
15637 +@@ -249,11 +249,11 @@ get_sigframe(struct k_sigaction *ka, str
15638 + * Return an always-bogus address instead so we will die with SIGSEGV.
15639 + */
15640 + if (onsigstack && !likely(on_sig_stack(sp)))
15641 +- return (void __user *)-1L;
15642 ++ return (__force void __user *)-1L;
15643 +
15644 + /* save i387 state */
15645 + if (used_math() && save_i387_xstate(*fpstate) < 0)
15646 +- return (void __user *)-1L;
15647 ++ return (__force void __user *)-1L;
15648 +
15649 + return (void __user *)sp;
15650 + }
15651 +@@ -308,9 +308,9 @@ __setup_frame(int sig, struct k_sigactio
15652 + }
15653 +
15654 + if (current->mm->context.vdso)
15655 +- restorer = VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
15656 ++ restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
15657 + else
15658 +- restorer = &frame->retcode;
15659 ++ restorer = (void __user *)&frame->retcode;
15660 + if (ka->sa.sa_flags & SA_RESTORER)
15661 + restorer = ka->sa.sa_restorer;
15662 +
15663 +@@ -324,7 +324,7 @@ __setup_frame(int sig, struct k_sigactio
15664 + * reasons and because gdb uses it as a signature to notice
15665 + * signal handler stack frames.
15666 + */
15667 +- err |= __put_user(*((u64 *)&retcode), (u64 *)frame->retcode);
15668 ++ err |= __put_user(*((u64 *)&retcode), (u64 __user *)frame->retcode);
15669 +
15670 + if (err)
15671 + return -EFAULT;
15672 +@@ -378,7 +378,10 @@ static int __setup_rt_frame(int sig, str
15673 + err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
15674 +
15675 + /* Set up to return from userspace. */
15676 +- restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
15677 ++ if (current->mm->context.vdso)
15678 ++ restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
15679 ++ else
15680 ++ restorer = (void __user *)&frame->retcode;
15681 + if (ka->sa.sa_flags & SA_RESTORER)
15682 + restorer = ka->sa.sa_restorer;
15683 + put_user_ex(restorer, &frame->pretcode);
15684 +@@ -390,7 +393,7 @@ static int __setup_rt_frame(int sig, str
15685 + * reasons and because gdb uses it as a signature to notice
15686 + * signal handler stack frames.
15687 + */
15688 +- put_user_ex(*((u64 *)&rt_retcode), (u64 *)frame->retcode);
15689 ++ put_user_ex(*((u64 *)&rt_retcode), (u64 __user *)frame->retcode);
15690 + } put_user_catch(err);
15691 +
15692 + if (err)
15693 +@@ -762,6 +765,8 @@ static void do_signal(struct pt_regs *re
15694 + siginfo_t info;
15695 + int signr;
15696 +
15697 ++ pax_track_stack();
15698 ++
15699 + /*
15700 + * We want the common case to go fast, which is why we may in certain
15701 + * cases get here from kernel mode. Just return without doing anything
15702 +@@ -769,7 +774,7 @@ static void do_signal(struct pt_regs *re
15703 + * X86_32: vm86 regs switched out by assembly code before reaching
15704 + * here, so testing against kernel CS suffices.
15705 + */
15706 +- if (!user_mode(regs))
15707 ++ if (!user_mode_novm(regs))
15708 + return;
15709 +
15710 + signr = get_signal_to_deliver(&info, &ka, regs, NULL);
15711 +diff -urNp linux-3.1.1/arch/x86/kernel/smpboot.c linux-3.1.1/arch/x86/kernel/smpboot.c
15712 +--- linux-3.1.1/arch/x86/kernel/smpboot.c 2011-11-11 15:19:27.000000000 -0500
15713 ++++ linux-3.1.1/arch/x86/kernel/smpboot.c 2011-11-16 18:39:07.000000000 -0500
15714 +@@ -709,17 +709,20 @@ static int __cpuinit do_boot_cpu(int api
15715 + set_idle_for_cpu(cpu, c_idle.idle);
15716 + do_rest:
15717 + per_cpu(current_task, cpu) = c_idle.idle;
15718 ++ per_cpu(current_tinfo, cpu) = &c_idle.idle->tinfo;
15719 + #ifdef CONFIG_X86_32
15720 + /* Stack for startup_32 can be just as for start_secondary onwards */
15721 + irq_ctx_init(cpu);
15722 + #else
15723 + clear_tsk_thread_flag(c_idle.idle, TIF_FORK);
15724 + initial_gs = per_cpu_offset(cpu);
15725 +- per_cpu(kernel_stack, cpu) =
15726 +- (unsigned long)task_stack_page(c_idle.idle) -
15727 +- KERNEL_STACK_OFFSET + THREAD_SIZE;
15728 ++ per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(c_idle.idle) - 16 + THREAD_SIZE;
15729 + #endif
15730 ++
15731 ++ pax_open_kernel();
15732 + early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
15733 ++ pax_close_kernel();
15734 ++
15735 + initial_code = (unsigned long)start_secondary;
15736 + stack_start = c_idle.idle->thread.sp;
15737 +
15738 +@@ -861,6 +864,12 @@ int __cpuinit native_cpu_up(unsigned int
15739 +
15740 + per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
15741 +
15742 ++#ifdef CONFIG_PAX_PER_CPU_PGD
15743 ++ clone_pgd_range(get_cpu_pgd(cpu) + KERNEL_PGD_BOUNDARY,
15744 ++ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
15745 ++ KERNEL_PGD_PTRS);
15746 ++#endif
15747 ++
15748 + err = do_boot_cpu(apicid, cpu);
15749 + if (err) {
15750 + pr_debug("do_boot_cpu failed %d\n", err);
15751 +diff -urNp linux-3.1.1/arch/x86/kernel/step.c linux-3.1.1/arch/x86/kernel/step.c
15752 +--- linux-3.1.1/arch/x86/kernel/step.c 2011-11-11 15:19:27.000000000 -0500
15753 ++++ linux-3.1.1/arch/x86/kernel/step.c 2011-11-16 18:39:07.000000000 -0500
15754 +@@ -27,10 +27,10 @@ unsigned long convert_ip_to_linear(struc
15755 + struct desc_struct *desc;
15756 + unsigned long base;
15757 +
15758 +- seg &= ~7UL;
15759 ++ seg >>= 3;
15760 +
15761 + mutex_lock(&child->mm->context.lock);
15762 +- if (unlikely((seg >> 3) >= child->mm->context.size))
15763 ++ if (unlikely(seg >= child->mm->context.size))
15764 + addr = -1L; /* bogus selector, access would fault */
15765 + else {
15766 + desc = child->mm->context.ldt + seg;
15767 +@@ -42,7 +42,8 @@ unsigned long convert_ip_to_linear(struc
15768 + addr += base;
15769 + }
15770 + mutex_unlock(&child->mm->context.lock);
15771 +- }
15772 ++ } else if (seg == __KERNEL_CS || seg == __KERNEXEC_KERNEL_CS)
15773 ++ addr = ktla_ktva(addr);
15774 +
15775 + return addr;
15776 + }
15777 +@@ -53,6 +54,9 @@ static int is_setting_trap_flag(struct t
15778 + unsigned char opcode[15];
15779 + unsigned long addr = convert_ip_to_linear(child, regs);
15780 +
15781 ++ if (addr == -EINVAL)
15782 ++ return 0;
15783 ++
15784 + copied = access_process_vm(child, addr, opcode, sizeof(opcode), 0);
15785 + for (i = 0; i < copied; i++) {
15786 + switch (opcode[i]) {
15787 +diff -urNp linux-3.1.1/arch/x86/kernel/syscall_table_32.S linux-3.1.1/arch/x86/kernel/syscall_table_32.S
15788 +--- linux-3.1.1/arch/x86/kernel/syscall_table_32.S 2011-11-11 15:19:27.000000000 -0500
15789 ++++ linux-3.1.1/arch/x86/kernel/syscall_table_32.S 2011-11-16 18:39:07.000000000 -0500
15790 +@@ -1,3 +1,4 @@
15791 ++.section .rodata,"a",@progbits
15792 + ENTRY(sys_call_table)
15793 + .long sys_restart_syscall /* 0 - old "setup()" system call, used for restarting */
15794 + .long sys_exit
15795 +diff -urNp linux-3.1.1/arch/x86/kernel/sys_i386_32.c linux-3.1.1/arch/x86/kernel/sys_i386_32.c
15796 +--- linux-3.1.1/arch/x86/kernel/sys_i386_32.c 2011-11-11 15:19:27.000000000 -0500
15797 ++++ linux-3.1.1/arch/x86/kernel/sys_i386_32.c 2011-11-16 18:39:07.000000000 -0500
15798 +@@ -24,17 +24,224 @@
15799 +
15800 + #include <asm/syscalls.h>
15801 +
15802 +-/*
15803 +- * Do a system call from kernel instead of calling sys_execve so we
15804 +- * end up with proper pt_regs.
15805 +- */
15806 +-int kernel_execve(const char *filename,
15807 +- const char *const argv[],
15808 +- const char *const envp[])
15809 ++int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
15810 + {
15811 +- long __res;
15812 +- asm volatile ("int $0x80"
15813 +- : "=a" (__res)
15814 +- : "0" (__NR_execve), "b" (filename), "c" (argv), "d" (envp) : "memory");
15815 +- return __res;
15816 ++ unsigned long pax_task_size = TASK_SIZE;
15817 ++
15818 ++#ifdef CONFIG_PAX_SEGMEXEC
15819 ++ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
15820 ++ pax_task_size = SEGMEXEC_TASK_SIZE;
15821 ++#endif
15822 ++
15823 ++ if (len > pax_task_size || addr > pax_task_size - len)
15824 ++ return -EINVAL;
15825 ++
15826 ++ return 0;
15827 ++}
15828 ++
15829 ++unsigned long
15830 ++arch_get_unmapped_area(struct file *filp, unsigned long addr,
15831 ++ unsigned long len, unsigned long pgoff, unsigned long flags)
15832 ++{
15833 ++ struct mm_struct *mm = current->mm;
15834 ++ struct vm_area_struct *vma;
15835 ++ unsigned long start_addr, pax_task_size = TASK_SIZE;
15836 ++
15837 ++#ifdef CONFIG_PAX_SEGMEXEC
15838 ++ if (mm->pax_flags & MF_PAX_SEGMEXEC)
15839 ++ pax_task_size = SEGMEXEC_TASK_SIZE;
15840 ++#endif
15841 ++
15842 ++ pax_task_size -= PAGE_SIZE;
15843 ++
15844 ++ if (len > pax_task_size)
15845 ++ return -ENOMEM;
15846 ++
15847 ++ if (flags & MAP_FIXED)
15848 ++ return addr;
15849 ++
15850 ++#ifdef CONFIG_PAX_RANDMMAP
15851 ++ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
15852 ++#endif
15853 ++
15854 ++ if (addr) {
15855 ++ addr = PAGE_ALIGN(addr);
15856 ++ if (pax_task_size - len >= addr) {
15857 ++ vma = find_vma(mm, addr);
15858 ++ if (check_heap_stack_gap(vma, addr, len))
15859 ++ return addr;
15860 ++ }
15861 ++ }
15862 ++ if (len > mm->cached_hole_size) {
15863 ++ start_addr = addr = mm->free_area_cache;
15864 ++ } else {
15865 ++ start_addr = addr = mm->mmap_base;
15866 ++ mm->cached_hole_size = 0;
15867 ++ }
15868 ++
15869 ++#ifdef CONFIG_PAX_PAGEEXEC
15870 ++ if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE) && start_addr >= mm->mmap_base) {
15871 ++ start_addr = 0x00110000UL;
15872 ++
15873 ++#ifdef CONFIG_PAX_RANDMMAP
15874 ++ if (mm->pax_flags & MF_PAX_RANDMMAP)
15875 ++ start_addr += mm->delta_mmap & 0x03FFF000UL;
15876 ++#endif
15877 ++
15878 ++ if (mm->start_brk <= start_addr && start_addr < mm->mmap_base)
15879 ++ start_addr = addr = mm->mmap_base;
15880 ++ else
15881 ++ addr = start_addr;
15882 ++ }
15883 ++#endif
15884 ++
15885 ++full_search:
15886 ++ for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
15887 ++ /* At this point: (!vma || addr < vma->vm_end). */
15888 ++ if (pax_task_size - len < addr) {
15889 ++ /*
15890 ++ * Start a new search - just in case we missed
15891 ++ * some holes.
15892 ++ */
15893 ++ if (start_addr != mm->mmap_base) {
15894 ++ start_addr = addr = mm->mmap_base;
15895 ++ mm->cached_hole_size = 0;
15896 ++ goto full_search;
15897 ++ }
15898 ++ return -ENOMEM;
15899 ++ }
15900 ++ if (check_heap_stack_gap(vma, addr, len))
15901 ++ break;
15902 ++ if (addr + mm->cached_hole_size < vma->vm_start)
15903 ++ mm->cached_hole_size = vma->vm_start - addr;
15904 ++ addr = vma->vm_end;
15905 ++ if (mm->start_brk <= addr && addr < mm->mmap_base) {
15906 ++ start_addr = addr = mm->mmap_base;
15907 ++ mm->cached_hole_size = 0;
15908 ++ goto full_search;
15909 ++ }
15910 ++ }
15911 ++
15912 ++ /*
15913 ++ * Remember the place where we stopped the search:
15914 ++ */
15915 ++ mm->free_area_cache = addr + len;
15916 ++ return addr;
15917 ++}
15918 ++
15919 ++unsigned long
15920 ++arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
15921 ++ const unsigned long len, const unsigned long pgoff,
15922 ++ const unsigned long flags)
15923 ++{
15924 ++ struct vm_area_struct *vma;
15925 ++ struct mm_struct *mm = current->mm;
15926 ++ unsigned long base = mm->mmap_base, addr = addr0, pax_task_size = TASK_SIZE;
15927 ++
15928 ++#ifdef CONFIG_PAX_SEGMEXEC
15929 ++ if (mm->pax_flags & MF_PAX_SEGMEXEC)
15930 ++ pax_task_size = SEGMEXEC_TASK_SIZE;
15931 ++#endif
15932 ++
15933 ++ pax_task_size -= PAGE_SIZE;
15934 ++
15935 ++ /* requested length too big for entire address space */
15936 ++ if (len > pax_task_size)
15937 ++ return -ENOMEM;
15938 ++
15939 ++ if (flags & MAP_FIXED)
15940 ++ return addr;
15941 ++
15942 ++#ifdef CONFIG_PAX_PAGEEXEC
15943 ++ if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE))
15944 ++ goto bottomup;
15945 ++#endif
15946 ++
15947 ++#ifdef CONFIG_PAX_RANDMMAP
15948 ++ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
15949 ++#endif
15950 ++
15951 ++ /* requesting a specific address */
15952 ++ if (addr) {
15953 ++ addr = PAGE_ALIGN(addr);
15954 ++ if (pax_task_size - len >= addr) {
15955 ++ vma = find_vma(mm, addr);
15956 ++ if (check_heap_stack_gap(vma, addr, len))
15957 ++ return addr;
15958 ++ }
15959 ++ }
15960 ++
15961 ++ /* check if free_area_cache is useful for us */
15962 ++ if (len <= mm->cached_hole_size) {
15963 ++ mm->cached_hole_size = 0;
15964 ++ mm->free_area_cache = mm->mmap_base;
15965 ++ }
15966 ++
15967 ++ /* either no address requested or can't fit in requested address hole */
15968 ++ addr = mm->free_area_cache;
15969 ++
15970 ++ /* make sure it can fit in the remaining address space */
15971 ++ if (addr > len) {
15972 ++ vma = find_vma(mm, addr-len);
15973 ++ if (check_heap_stack_gap(vma, addr - len, len))
15974 ++ /* remember the address as a hint for next time */
15975 ++ return (mm->free_area_cache = addr-len);
15976 ++ }
15977 ++
15978 ++ if (mm->mmap_base < len)
15979 ++ goto bottomup;
15980 ++
15981 ++ addr = mm->mmap_base-len;
15982 ++
15983 ++ do {
15984 ++ /*
15985 ++ * Lookup failure means no vma is above this address,
15986 ++ * else if new region fits below vma->vm_start,
15987 ++ * return with success:
15988 ++ */
15989 ++ vma = find_vma(mm, addr);
15990 ++ if (check_heap_stack_gap(vma, addr, len))
15991 ++ /* remember the address as a hint for next time */
15992 ++ return (mm->free_area_cache = addr);
15993 ++
15994 ++ /* remember the largest hole we saw so far */
15995 ++ if (addr + mm->cached_hole_size < vma->vm_start)
15996 ++ mm->cached_hole_size = vma->vm_start - addr;
15997 ++
15998 ++ /* try just below the current vma->vm_start */
15999 ++ addr = skip_heap_stack_gap(vma, len);
16000 ++ } while (!IS_ERR_VALUE(addr));
16001 ++
16002 ++bottomup:
16003 ++ /*
16004 ++ * A failed mmap() very likely causes application failure,
16005 ++ * so fall back to the bottom-up function here. This scenario
16006 ++ * can happen with large stack limits and large mmap()
16007 ++ * allocations.
16008 ++ */
16009 ++
16010 ++#ifdef CONFIG_PAX_SEGMEXEC
16011 ++ if (mm->pax_flags & MF_PAX_SEGMEXEC)
16012 ++ mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
16013 ++ else
16014 ++#endif
16015 ++
16016 ++ mm->mmap_base = TASK_UNMAPPED_BASE;
16017 ++
16018 ++#ifdef CONFIG_PAX_RANDMMAP
16019 ++ if (mm->pax_flags & MF_PAX_RANDMMAP)
16020 ++ mm->mmap_base += mm->delta_mmap;
16021 ++#endif
16022 ++
16023 ++ mm->free_area_cache = mm->mmap_base;
16024 ++ mm->cached_hole_size = ~0UL;
16025 ++ addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
16026 ++ /*
16027 ++ * Restore the topdown base:
16028 ++ */
16029 ++ mm->mmap_base = base;
16030 ++ mm->free_area_cache = base;
16031 ++ mm->cached_hole_size = ~0UL;
16032 ++
16033 ++ return addr;
16034 + }
16035 +diff -urNp linux-3.1.1/arch/x86/kernel/sys_x86_64.c linux-3.1.1/arch/x86/kernel/sys_x86_64.c
16036 +--- linux-3.1.1/arch/x86/kernel/sys_x86_64.c 2011-11-11 15:19:27.000000000 -0500
16037 ++++ linux-3.1.1/arch/x86/kernel/sys_x86_64.c 2011-11-16 18:39:07.000000000 -0500
16038 +@@ -32,8 +32,8 @@ out:
16039 + return error;
16040 + }
16041 +
16042 +-static void find_start_end(unsigned long flags, unsigned long *begin,
16043 +- unsigned long *end)
16044 ++static void find_start_end(struct mm_struct *mm, unsigned long flags,
16045 ++ unsigned long *begin, unsigned long *end)
16046 + {
16047 + if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT)) {
16048 + unsigned long new_begin;
16049 +@@ -52,7 +52,7 @@ static void find_start_end(unsigned long
16050 + *begin = new_begin;
16051 + }
16052 + } else {
16053 +- *begin = TASK_UNMAPPED_BASE;
16054 ++ *begin = mm->mmap_base;
16055 + *end = TASK_SIZE;
16056 + }
16057 + }
16058 +@@ -69,16 +69,19 @@ arch_get_unmapped_area(struct file *filp
16059 + if (flags & MAP_FIXED)
16060 + return addr;
16061 +
16062 +- find_start_end(flags, &begin, &end);
16063 ++ find_start_end(mm, flags, &begin, &end);
16064 +
16065 + if (len > end)
16066 + return -ENOMEM;
16067 +
16068 ++#ifdef CONFIG_PAX_RANDMMAP
16069 ++ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
16070 ++#endif
16071 ++
16072 + if (addr) {
16073 + addr = PAGE_ALIGN(addr);
16074 + vma = find_vma(mm, addr);
16075 +- if (end - len >= addr &&
16076 +- (!vma || addr + len <= vma->vm_start))
16077 ++ if (end - len >= addr && check_heap_stack_gap(vma, addr, len))
16078 + return addr;
16079 + }
16080 + if (((flags & MAP_32BIT) || test_thread_flag(TIF_IA32))
16081 +@@ -106,7 +109,7 @@ full_search:
16082 + }
16083 + return -ENOMEM;
16084 + }
16085 +- if (!vma || addr + len <= vma->vm_start) {
16086 ++ if (check_heap_stack_gap(vma, addr, len)) {
16087 + /*
16088 + * Remember the place where we stopped the search:
16089 + */
16090 +@@ -128,7 +131,7 @@ arch_get_unmapped_area_topdown(struct fi
16091 + {
16092 + struct vm_area_struct *vma;
16093 + struct mm_struct *mm = current->mm;
16094 +- unsigned long addr = addr0;
16095 ++ unsigned long base = mm->mmap_base, addr = addr0;
16096 +
16097 + /* requested length too big for entire address space */
16098 + if (len > TASK_SIZE)
16099 +@@ -141,13 +144,18 @@ arch_get_unmapped_area_topdown(struct fi
16100 + if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT))
16101 + goto bottomup;
16102 +
16103 ++#ifdef CONFIG_PAX_RANDMMAP
16104 ++ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
16105 ++#endif
16106 ++
16107 + /* requesting a specific address */
16108 + if (addr) {
16109 + addr = PAGE_ALIGN(addr);
16110 +- vma = find_vma(mm, addr);
16111 +- if (TASK_SIZE - len >= addr &&
16112 +- (!vma || addr + len <= vma->vm_start))
16113 +- return addr;
16114 ++ if (TASK_SIZE - len >= addr) {
16115 ++ vma = find_vma(mm, addr);
16116 ++ if (check_heap_stack_gap(vma, addr, len))
16117 ++ return addr;
16118 ++ }
16119 + }
16120 +
16121 + /* check if free_area_cache is useful for us */
16122 +@@ -162,7 +170,7 @@ arch_get_unmapped_area_topdown(struct fi
16123 + /* make sure it can fit in the remaining address space */
16124 + if (addr > len) {
16125 + vma = find_vma(mm, addr-len);
16126 +- if (!vma || addr <= vma->vm_start)
16127 ++ if (check_heap_stack_gap(vma, addr - len, len))
16128 + /* remember the address as a hint for next time */
16129 + return mm->free_area_cache = addr-len;
16130 + }
16131 +@@ -179,7 +187,7 @@ arch_get_unmapped_area_topdown(struct fi
16132 + * return with success:
16133 + */
16134 + vma = find_vma(mm, addr);
16135 +- if (!vma || addr+len <= vma->vm_start)
16136 ++ if (check_heap_stack_gap(vma, addr, len))
16137 + /* remember the address as a hint for next time */
16138 + return mm->free_area_cache = addr;
16139 +
16140 +@@ -188,8 +196,8 @@ arch_get_unmapped_area_topdown(struct fi
16141 + mm->cached_hole_size = vma->vm_start - addr;
16142 +
16143 + /* try just below the current vma->vm_start */
16144 +- addr = vma->vm_start-len;
16145 +- } while (len < vma->vm_start);
16146 ++ addr = skip_heap_stack_gap(vma, len);
16147 ++ } while (!IS_ERR_VALUE(addr));
16148 +
16149 + bottomup:
16150 + /*
16151 +@@ -198,13 +206,21 @@ bottomup:
16152 + * can happen with large stack limits and large mmap()
16153 + * allocations.
16154 + */
16155 ++ mm->mmap_base = TASK_UNMAPPED_BASE;
16156 ++
16157 ++#ifdef CONFIG_PAX_RANDMMAP
16158 ++ if (mm->pax_flags & MF_PAX_RANDMMAP)
16159 ++ mm->mmap_base += mm->delta_mmap;
16160 ++#endif
16161 ++
16162 ++ mm->free_area_cache = mm->mmap_base;
16163 + mm->cached_hole_size = ~0UL;
16164 +- mm->free_area_cache = TASK_UNMAPPED_BASE;
16165 + addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
16166 + /*
16167 + * Restore the topdown base:
16168 + */
16169 +- mm->free_area_cache = mm->mmap_base;
16170 ++ mm->mmap_base = base;
16171 ++ mm->free_area_cache = base;
16172 + mm->cached_hole_size = ~0UL;
16173 +
16174 + return addr;
16175 +diff -urNp linux-3.1.1/arch/x86/kernel/tboot.c linux-3.1.1/arch/x86/kernel/tboot.c
16176 +--- linux-3.1.1/arch/x86/kernel/tboot.c 2011-11-11 15:19:27.000000000 -0500
16177 ++++ linux-3.1.1/arch/x86/kernel/tboot.c 2011-11-16 18:39:07.000000000 -0500
16178 +@@ -218,7 +218,7 @@ static int tboot_setup_sleep(void)
16179 +
16180 + void tboot_shutdown(u32 shutdown_type)
16181 + {
16182 +- void (*shutdown)(void);
16183 ++ void (* __noreturn shutdown)(void);
16184 +
16185 + if (!tboot_enabled())
16186 + return;
16187 +@@ -240,7 +240,7 @@ void tboot_shutdown(u32 shutdown_type)
16188 +
16189 + switch_to_tboot_pt();
16190 +
16191 +- shutdown = (void(*)(void))(unsigned long)tboot->shutdown_entry;
16192 ++ shutdown = (void *)tboot->shutdown_entry;
16193 + shutdown();
16194 +
16195 + /* should not reach here */
16196 +@@ -297,7 +297,7 @@ void tboot_sleep(u8 sleep_state, u32 pm1
16197 + tboot_shutdown(acpi_shutdown_map[sleep_state]);
16198 + }
16199 +
16200 +-static atomic_t ap_wfs_count;
16201 ++static atomic_unchecked_t ap_wfs_count;
16202 +
16203 + static int tboot_wait_for_aps(int num_aps)
16204 + {
16205 +@@ -321,9 +321,9 @@ static int __cpuinit tboot_cpu_callback(
16206 + {
16207 + switch (action) {
16208 + case CPU_DYING:
16209 +- atomic_inc(&ap_wfs_count);
16210 ++ atomic_inc_unchecked(&ap_wfs_count);
16211 + if (num_online_cpus() == 1)
16212 +- if (tboot_wait_for_aps(atomic_read(&ap_wfs_count)))
16213 ++ if (tboot_wait_for_aps(atomic_read_unchecked(&ap_wfs_count)))
16214 + return NOTIFY_BAD;
16215 + break;
16216 + }
16217 +@@ -342,7 +342,7 @@ static __init int tboot_late_init(void)
16218 +
16219 + tboot_create_trampoline();
16220 +
16221 +- atomic_set(&ap_wfs_count, 0);
16222 ++ atomic_set_unchecked(&ap_wfs_count, 0);
16223 + register_hotcpu_notifier(&tboot_cpu_notifier);
16224 + return 0;
16225 + }
16226 +diff -urNp linux-3.1.1/arch/x86/kernel/time.c linux-3.1.1/arch/x86/kernel/time.c
16227 +--- linux-3.1.1/arch/x86/kernel/time.c 2011-11-11 15:19:27.000000000 -0500
16228 ++++ linux-3.1.1/arch/x86/kernel/time.c 2011-11-16 18:39:07.000000000 -0500
16229 +@@ -30,9 +30,9 @@ unsigned long profile_pc(struct pt_regs
16230 + {
16231 + unsigned long pc = instruction_pointer(regs);
16232 +
16233 +- if (!user_mode_vm(regs) && in_lock_functions(pc)) {
16234 ++ if (!user_mode(regs) && in_lock_functions(pc)) {
16235 + #ifdef CONFIG_FRAME_POINTER
16236 +- return *(unsigned long *)(regs->bp + sizeof(long));
16237 ++ return ktla_ktva(*(unsigned long *)(regs->bp + sizeof(long)));
16238 + #else
16239 + unsigned long *sp =
16240 + (unsigned long *)kernel_stack_pointer(regs);
16241 +@@ -41,11 +41,17 @@ unsigned long profile_pc(struct pt_regs
16242 + * or above a saved flags. Eflags has bits 22-31 zero,
16243 + * kernel addresses don't.
16244 + */
16245 ++
16246 ++#ifdef CONFIG_PAX_KERNEXEC
16247 ++ return ktla_ktva(sp[0]);
16248 ++#else
16249 + if (sp[0] >> 22)
16250 + return sp[0];
16251 + if (sp[1] >> 22)
16252 + return sp[1];
16253 + #endif
16254 ++
16255 ++#endif
16256 + }
16257 + return pc;
16258 + }
16259 +diff -urNp linux-3.1.1/arch/x86/kernel/tls.c linux-3.1.1/arch/x86/kernel/tls.c
16260 +--- linux-3.1.1/arch/x86/kernel/tls.c 2011-11-11 15:19:27.000000000 -0500
16261 ++++ linux-3.1.1/arch/x86/kernel/tls.c 2011-11-16 18:39:07.000000000 -0500
16262 +@@ -85,6 +85,11 @@ int do_set_thread_area(struct task_struc
16263 + if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
16264 + return -EINVAL;
16265 +
16266 ++#ifdef CONFIG_PAX_SEGMEXEC
16267 ++ if ((p->mm->pax_flags & MF_PAX_SEGMEXEC) && (info.contents & MODIFY_LDT_CONTENTS_CODE))
16268 ++ return -EINVAL;
16269 ++#endif
16270 ++
16271 + set_tls_desc(p, idx, &info, 1);
16272 +
16273 + return 0;
16274 +diff -urNp linux-3.1.1/arch/x86/kernel/trampoline_32.S linux-3.1.1/arch/x86/kernel/trampoline_32.S
16275 +--- linux-3.1.1/arch/x86/kernel/trampoline_32.S 2011-11-11 15:19:27.000000000 -0500
16276 ++++ linux-3.1.1/arch/x86/kernel/trampoline_32.S 2011-11-16 18:39:07.000000000 -0500
16277 +@@ -32,6 +32,12 @@
16278 + #include <asm/segment.h>
16279 + #include <asm/page_types.h>
16280 +
16281 ++#ifdef CONFIG_PAX_KERNEXEC
16282 ++#define ta(X) (X)
16283 ++#else
16284 ++#define ta(X) ((X) - __PAGE_OFFSET)
16285 ++#endif
16286 ++
16287 + #ifdef CONFIG_SMP
16288 +
16289 + .section ".x86_trampoline","a"
16290 +@@ -62,7 +68,7 @@ r_base = .
16291 + inc %ax # protected mode (PE) bit
16292 + lmsw %ax # into protected mode
16293 + # flush prefetch and jump to startup_32_smp in arch/i386/kernel/head.S
16294 +- ljmpl $__BOOT_CS, $(startup_32_smp-__PAGE_OFFSET)
16295 ++ ljmpl $__BOOT_CS, $ta(startup_32_smp)
16296 +
16297 + # These need to be in the same 64K segment as the above;
16298 + # hence we don't use the boot_gdt_descr defined in head.S
16299 +diff -urNp linux-3.1.1/arch/x86/kernel/trampoline_64.S linux-3.1.1/arch/x86/kernel/trampoline_64.S
16300 +--- linux-3.1.1/arch/x86/kernel/trampoline_64.S 2011-11-11 15:19:27.000000000 -0500
16301 ++++ linux-3.1.1/arch/x86/kernel/trampoline_64.S 2011-11-16 18:39:07.000000000 -0500
16302 +@@ -90,7 +90,7 @@ startup_32:
16303 + movl $__KERNEL_DS, %eax # Initialize the %ds segment register
16304 + movl %eax, %ds
16305 +
16306 +- movl $X86_CR4_PAE, %eax
16307 ++ movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax
16308 + movl %eax, %cr4 # Enable PAE mode
16309 +
16310 + # Setup trampoline 4 level pagetables
16311 +@@ -138,7 +138,7 @@ tidt:
16312 + # so the kernel can live anywhere
16313 + .balign 4
16314 + tgdt:
16315 +- .short tgdt_end - tgdt # gdt limit
16316 ++ .short tgdt_end - tgdt - 1 # gdt limit
16317 + .long tgdt - r_base
16318 + .short 0
16319 + .quad 0x00cf9b000000ffff # __KERNEL32_CS
16320 +diff -urNp linux-3.1.1/arch/x86/kernel/traps.c linux-3.1.1/arch/x86/kernel/traps.c
16321 +--- linux-3.1.1/arch/x86/kernel/traps.c 2011-11-11 15:19:27.000000000 -0500
16322 ++++ linux-3.1.1/arch/x86/kernel/traps.c 2011-11-16 18:39:07.000000000 -0500
16323 +@@ -70,12 +70,6 @@ asmlinkage int system_call(void);
16324 +
16325 + /* Do we ignore FPU interrupts ? */
16326 + char ignore_fpu_irq;
16327 +-
16328 +-/*
16329 +- * The IDT has to be page-aligned to simplify the Pentium
16330 +- * F0 0F bug workaround.
16331 +- */
16332 +-gate_desc idt_table[NR_VECTORS] __page_aligned_data = { { { { 0, 0 } } }, };
16333 + #endif
16334 +
16335 + DECLARE_BITMAP(used_vectors, NR_VECTORS);
16336 +@@ -117,13 +111,13 @@ static inline void preempt_conditional_c
16337 + }
16338 +
16339 + static void __kprobes
16340 +-do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
16341 ++do_trap(int trapnr, int signr, const char *str, struct pt_regs *regs,
16342 + long error_code, siginfo_t *info)
16343 + {
16344 + struct task_struct *tsk = current;
16345 +
16346 + #ifdef CONFIG_X86_32
16347 +- if (regs->flags & X86_VM_MASK) {
16348 ++ if (v8086_mode(regs)) {
16349 + /*
16350 + * traps 0, 1, 3, 4, and 5 should be forwarded to vm86.
16351 + * On nmi (interrupt 2), do_trap should not be called.
16352 +@@ -134,7 +128,7 @@ do_trap(int trapnr, int signr, char *str
16353 + }
16354 + #endif
16355 +
16356 +- if (!user_mode(regs))
16357 ++ if (!user_mode_novm(regs))
16358 + goto kernel_trap;
16359 +
16360 + #ifdef CONFIG_X86_32
16361 +@@ -157,7 +151,7 @@ trap_signal:
16362 + printk_ratelimit()) {
16363 + printk(KERN_INFO
16364 + "%s[%d] trap %s ip:%lx sp:%lx error:%lx",
16365 +- tsk->comm, tsk->pid, str,
16366 ++ tsk->comm, task_pid_nr(tsk), str,
16367 + regs->ip, regs->sp, error_code);
16368 + print_vma_addr(" in ", regs->ip);
16369 + printk("\n");
16370 +@@ -174,8 +168,20 @@ kernel_trap:
16371 + if (!fixup_exception(regs)) {
16372 + tsk->thread.error_code = error_code;
16373 + tsk->thread.trap_no = trapnr;
16374 ++
16375 ++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
16376 ++ if (trapnr == 12 && ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS))
16377 ++ str = "PAX: suspicious stack segment fault";
16378 ++#endif
16379 ++
16380 + die(str, regs, error_code);
16381 + }
16382 ++
16383 ++#ifdef CONFIG_PAX_REFCOUNT
16384 ++ if (trapnr == 4)
16385 ++ pax_report_refcount_overflow(regs);
16386 ++#endif
16387 ++
16388 + return;
16389 +
16390 + #ifdef CONFIG_X86_32
16391 +@@ -264,14 +270,30 @@ do_general_protection(struct pt_regs *re
16392 + conditional_sti(regs);
16393 +
16394 + #ifdef CONFIG_X86_32
16395 +- if (regs->flags & X86_VM_MASK)
16396 ++ if (v8086_mode(regs))
16397 + goto gp_in_vm86;
16398 + #endif
16399 +
16400 + tsk = current;
16401 +- if (!user_mode(regs))
16402 ++ if (!user_mode_novm(regs))
16403 + goto gp_in_kernel;
16404 +
16405 ++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
16406 ++ if (!(__supported_pte_mask & _PAGE_NX) && tsk->mm && (tsk->mm->pax_flags & MF_PAX_PAGEEXEC)) {
16407 ++ struct mm_struct *mm = tsk->mm;
16408 ++ unsigned long limit;
16409 ++
16410 ++ down_write(&mm->mmap_sem);
16411 ++ limit = mm->context.user_cs_limit;
16412 ++ if (limit < TASK_SIZE) {
16413 ++ track_exec_limit(mm, limit, TASK_SIZE, VM_EXEC);
16414 ++ up_write(&mm->mmap_sem);
16415 ++ return;
16416 ++ }
16417 ++ up_write(&mm->mmap_sem);
16418 ++ }
16419 ++#endif
16420 ++
16421 + tsk->thread.error_code = error_code;
16422 + tsk->thread.trap_no = 13;
16423 +
16424 +@@ -304,6 +326,13 @@ gp_in_kernel:
16425 + if (notify_die(DIE_GPF, "general protection fault", regs,
16426 + error_code, 13, SIGSEGV) == NOTIFY_STOP)
16427 + return;
16428 ++
16429 ++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
16430 ++ if ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS)
16431 ++ die("PAX: suspicious general protection fault", regs, error_code);
16432 ++ else
16433 ++#endif
16434 ++
16435 + die("general protection fault", regs, error_code);
16436 + }
16437 +
16438 +@@ -433,6 +462,17 @@ static notrace __kprobes void default_do
16439 + dotraplinkage notrace __kprobes void
16440 + do_nmi(struct pt_regs *regs, long error_code)
16441 + {
16442 ++
16443 ++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
16444 ++ if (!user_mode(regs)) {
16445 ++ unsigned long cs = regs->cs & 0xFFFF;
16446 ++ unsigned long ip = ktva_ktla(regs->ip);
16447 ++
16448 ++ if ((cs == __KERNEL_CS || cs == __KERNEXEC_KERNEL_CS) && ip <= (unsigned long)_etext)
16449 ++ regs->ip = ip;
16450 ++ }
16451 ++#endif
16452 ++
16453 + nmi_enter();
16454 +
16455 + inc_irq_stat(__nmi_count);
16456 +@@ -569,7 +609,7 @@ dotraplinkage void __kprobes do_debug(st
16457 + /* It's safe to allow irq's after DR6 has been saved */
16458 + preempt_conditional_sti(regs);
16459 +
16460 +- if (regs->flags & X86_VM_MASK) {
16461 ++ if (v8086_mode(regs)) {
16462 + handle_vm86_trap((struct kernel_vm86_regs *) regs,
16463 + error_code, 1);
16464 + preempt_conditional_cli(regs);
16465 +@@ -583,7 +623,7 @@ dotraplinkage void __kprobes do_debug(st
16466 + * We already checked v86 mode above, so we can check for kernel mode
16467 + * by just checking the CPL of CS.
16468 + */
16469 +- if ((dr6 & DR_STEP) && !user_mode(regs)) {
16470 ++ if ((dr6 & DR_STEP) && !user_mode_novm(regs)) {
16471 + tsk->thread.debugreg6 &= ~DR_STEP;
16472 + set_tsk_thread_flag(tsk, TIF_SINGLESTEP);
16473 + regs->flags &= ~X86_EFLAGS_TF;
16474 +@@ -612,7 +652,7 @@ void math_error(struct pt_regs *regs, in
16475 + return;
16476 + conditional_sti(regs);
16477 +
16478 +- if (!user_mode_vm(regs))
16479 ++ if (!user_mode(regs))
16480 + {
16481 + if (!fixup_exception(regs)) {
16482 + task->thread.error_code = error_code;
16483 +@@ -723,7 +763,7 @@ asmlinkage void __attribute__((weak)) sm
16484 + void __math_state_restore(void)
16485 + {
16486 + struct thread_info *thread = current_thread_info();
16487 +- struct task_struct *tsk = thread->task;
16488 ++ struct task_struct *tsk = current;
16489 +
16490 + /*
16491 + * Paranoid restore. send a SIGSEGV if we fail to restore the state.
16492 +@@ -750,8 +790,7 @@ void __math_state_restore(void)
16493 + */
16494 + asmlinkage void math_state_restore(void)
16495 + {
16496 +- struct thread_info *thread = current_thread_info();
16497 +- struct task_struct *tsk = thread->task;
16498 ++ struct task_struct *tsk = current;
16499 +
16500 + if (!tsk_used_math(tsk)) {
16501 + local_irq_enable();
16502 +diff -urNp linux-3.1.1/arch/x86/kernel/verify_cpu.S linux-3.1.1/arch/x86/kernel/verify_cpu.S
16503 +--- linux-3.1.1/arch/x86/kernel/verify_cpu.S 2011-11-11 15:19:27.000000000 -0500
16504 ++++ linux-3.1.1/arch/x86/kernel/verify_cpu.S 2011-11-16 18:40:08.000000000 -0500
16505 +@@ -20,6 +20,7 @@
16506 + * arch/x86/boot/compressed/head_64.S: Boot cpu verification
16507 + * arch/x86/kernel/trampoline_64.S: secondary processor verification
16508 + * arch/x86/kernel/head_32.S: processor startup
16509 ++ * arch/x86/kernel/acpi/realmode/wakeup.S: 32bit processor resume
16510 + *
16511 + * verify_cpu, returns the status of longmode and SSE in register %eax.
16512 + * 0: Success 1: Failure
16513 +diff -urNp linux-3.1.1/arch/x86/kernel/vm86_32.c linux-3.1.1/arch/x86/kernel/vm86_32.c
16514 +--- linux-3.1.1/arch/x86/kernel/vm86_32.c 2011-11-11 15:19:27.000000000 -0500
16515 ++++ linux-3.1.1/arch/x86/kernel/vm86_32.c 2011-11-16 18:40:08.000000000 -0500
16516 +@@ -41,6 +41,7 @@
16517 + #include <linux/ptrace.h>
16518 + #include <linux/audit.h>
16519 + #include <linux/stddef.h>
16520 ++#include <linux/grsecurity.h>
16521 +
16522 + #include <asm/uaccess.h>
16523 + #include <asm/io.h>
16524 +@@ -148,7 +149,7 @@ struct pt_regs *save_v86_state(struct ke
16525 + do_exit(SIGSEGV);
16526 + }
16527 +
16528 +- tss = &per_cpu(init_tss, get_cpu());
16529 ++ tss = init_tss + get_cpu();
16530 + current->thread.sp0 = current->thread.saved_sp0;
16531 + current->thread.sysenter_cs = __KERNEL_CS;
16532 + load_sp0(tss, &current->thread);
16533 +@@ -208,6 +209,13 @@ int sys_vm86old(struct vm86_struct __use
16534 + struct task_struct *tsk;
16535 + int tmp, ret = -EPERM;
16536 +
16537 ++#ifdef CONFIG_GRKERNSEC_VM86
16538 ++ if (!capable(CAP_SYS_RAWIO)) {
16539 ++ gr_handle_vm86();
16540 ++ goto out;
16541 ++ }
16542 ++#endif
16543 ++
16544 + tsk = current;
16545 + if (tsk->thread.saved_sp0)
16546 + goto out;
16547 +@@ -238,6 +246,14 @@ int sys_vm86(unsigned long cmd, unsigned
16548 + int tmp, ret;
16549 + struct vm86plus_struct __user *v86;
16550 +
16551 ++#ifdef CONFIG_GRKERNSEC_VM86
16552 ++ if (!capable(CAP_SYS_RAWIO)) {
16553 ++ gr_handle_vm86();
16554 ++ ret = -EPERM;
16555 ++ goto out;
16556 ++ }
16557 ++#endif
16558 ++
16559 + tsk = current;
16560 + switch (cmd) {
16561 + case VM86_REQUEST_IRQ:
16562 +@@ -324,7 +340,7 @@ static void do_sys_vm86(struct kernel_vm
16563 + tsk->thread.saved_fs = info->regs32->fs;
16564 + tsk->thread.saved_gs = get_user_gs(info->regs32);
16565 +
16566 +- tss = &per_cpu(init_tss, get_cpu());
16567 ++ tss = init_tss + get_cpu();
16568 + tsk->thread.sp0 = (unsigned long) &info->VM86_TSS_ESP0;
16569 + if (cpu_has_sep)
16570 + tsk->thread.sysenter_cs = 0;
16571 +@@ -529,7 +545,7 @@ static void do_int(struct kernel_vm86_re
16572 + goto cannot_handle;
16573 + if (i == 0x21 && is_revectored(AH(regs), &KVM86->int21_revectored))
16574 + goto cannot_handle;
16575 +- intr_ptr = (unsigned long __user *) (i << 2);
16576 ++ intr_ptr = (__force unsigned long __user *) (i << 2);
16577 + if (get_user(segoffs, intr_ptr))
16578 + goto cannot_handle;
16579 + if ((segoffs >> 16) == BIOSSEG)
16580 +diff -urNp linux-3.1.1/arch/x86/kernel/vmlinux.lds.S linux-3.1.1/arch/x86/kernel/vmlinux.lds.S
16581 +--- linux-3.1.1/arch/x86/kernel/vmlinux.lds.S 2011-11-11 15:19:27.000000000 -0500
16582 ++++ linux-3.1.1/arch/x86/kernel/vmlinux.lds.S 2011-11-16 18:39:07.000000000 -0500
16583 +@@ -26,6 +26,13 @@
16584 + #include <asm/page_types.h>
16585 + #include <asm/cache.h>
16586 + #include <asm/boot.h>
16587 ++#include <asm/segment.h>
16588 ++
16589 ++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
16590 ++#define __KERNEL_TEXT_OFFSET (LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR)
16591 ++#else
16592 ++#define __KERNEL_TEXT_OFFSET 0
16593 ++#endif
16594 +
16595 + #undef i386 /* in case the preprocessor is a 32bit one */
16596 +
16597 +@@ -69,30 +76,43 @@ jiffies_64 = jiffies;
16598 +
16599 + PHDRS {
16600 + text PT_LOAD FLAGS(5); /* R_E */
16601 ++#ifdef CONFIG_X86_32
16602 ++ module PT_LOAD FLAGS(5); /* R_E */
16603 ++#endif
16604 ++#ifdef CONFIG_XEN
16605 ++ rodata PT_LOAD FLAGS(5); /* R_E */
16606 ++#else
16607 ++ rodata PT_LOAD FLAGS(4); /* R__ */
16608 ++#endif
16609 + data PT_LOAD FLAGS(6); /* RW_ */
16610 +-#ifdef CONFIG_X86_64
16611 ++ init.begin PT_LOAD FLAGS(6); /* RW_ */
16612 + #ifdef CONFIG_SMP
16613 + percpu PT_LOAD FLAGS(6); /* RW_ */
16614 + #endif
16615 ++ text.init PT_LOAD FLAGS(5); /* R_E */
16616 ++ text.exit PT_LOAD FLAGS(5); /* R_E */
16617 + init PT_LOAD FLAGS(7); /* RWE */
16618 +-#endif
16619 + note PT_NOTE FLAGS(0); /* ___ */
16620 + }
16621 +
16622 + SECTIONS
16623 + {
16624 + #ifdef CONFIG_X86_32
16625 +- . = LOAD_OFFSET + LOAD_PHYSICAL_ADDR;
16626 +- phys_startup_32 = startup_32 - LOAD_OFFSET;
16627 ++ . = LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR;
16628 + #else
16629 +- . = __START_KERNEL;
16630 +- phys_startup_64 = startup_64 - LOAD_OFFSET;
16631 ++ . = __START_KERNEL;
16632 + #endif
16633 +
16634 + /* Text and read-only data */
16635 +- .text : AT(ADDR(.text) - LOAD_OFFSET) {
16636 +- _text = .;
16637 ++ .text (. - __KERNEL_TEXT_OFFSET): AT(ADDR(.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
16638 + /* bootstrapping code */
16639 ++#ifdef CONFIG_X86_32
16640 ++ phys_startup_32 = startup_32 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
16641 ++#else
16642 ++ phys_startup_64 = startup_64 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
16643 ++#endif
16644 ++ __LOAD_PHYSICAL_ADDR = . - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
16645 ++ _text = .;
16646 + HEAD_TEXT
16647 + #ifdef CONFIG_X86_32
16648 + . = ALIGN(PAGE_SIZE);
16649 +@@ -108,13 +128,47 @@ SECTIONS
16650 + IRQENTRY_TEXT
16651 + *(.fixup)
16652 + *(.gnu.warning)
16653 +- /* End of text section */
16654 +- _etext = .;
16655 + } :text = 0x9090
16656 +
16657 +- NOTES :text :note
16658 ++ . += __KERNEL_TEXT_OFFSET;
16659 ++
16660 ++#ifdef CONFIG_X86_32
16661 ++ . = ALIGN(PAGE_SIZE);
16662 ++ .module.text : AT(ADDR(.module.text) - LOAD_OFFSET) {
16663 ++
16664 ++#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_MODULES)
16665 ++ MODULES_EXEC_VADDR = .;
16666 ++ BYTE(0)
16667 ++ . += (CONFIG_PAX_KERNEXEC_MODULE_TEXT * 1024 * 1024);
16668 ++ . = ALIGN(HPAGE_SIZE);
16669 ++ MODULES_EXEC_END = . - 1;
16670 ++#endif
16671 ++
16672 ++ } :module
16673 ++#endif
16674 ++
16675 ++ .text.end : AT(ADDR(.text.end) - LOAD_OFFSET) {
16676 ++ /* End of text section */
16677 ++ _etext = . - __KERNEL_TEXT_OFFSET;
16678 ++ }
16679 ++
16680 ++#ifdef CONFIG_X86_32
16681 ++ . = ALIGN(PAGE_SIZE);
16682 ++ .rodata.page_aligned : AT(ADDR(.rodata.page_aligned) - LOAD_OFFSET) {
16683 ++ *(.idt)
16684 ++ . = ALIGN(PAGE_SIZE);
16685 ++ *(.empty_zero_page)
16686 ++ *(.initial_pg_fixmap)
16687 ++ *(.initial_pg_pmd)
16688 ++ *(.initial_page_table)
16689 ++ *(.swapper_pg_dir)
16690 ++ } :rodata
16691 ++#endif
16692 ++
16693 ++ . = ALIGN(PAGE_SIZE);
16694 ++ NOTES :rodata :note
16695 +
16696 +- EXCEPTION_TABLE(16) :text = 0x9090
16697 ++ EXCEPTION_TABLE(16) :rodata
16698 +
16699 + #if defined(CONFIG_DEBUG_RODATA)
16700 + /* .text should occupy whole number of pages */
16701 +@@ -126,16 +180,20 @@ SECTIONS
16702 +
16703 + /* Data */
16704 + .data : AT(ADDR(.data) - LOAD_OFFSET) {
16705 ++
16706 ++#ifdef CONFIG_PAX_KERNEXEC
16707 ++ . = ALIGN(HPAGE_SIZE);
16708 ++#else
16709 ++ . = ALIGN(PAGE_SIZE);
16710 ++#endif
16711 ++
16712 + /* Start of data section */
16713 + _sdata = .;
16714 +
16715 + /* init_task */
16716 + INIT_TASK_DATA(THREAD_SIZE)
16717 +
16718 +-#ifdef CONFIG_X86_32
16719 +- /* 32 bit has nosave before _edata */
16720 + NOSAVE_DATA
16721 +-#endif
16722 +
16723 + PAGE_ALIGNED_DATA(PAGE_SIZE)
16724 +
16725 +@@ -176,12 +234,19 @@ SECTIONS
16726 + #endif /* CONFIG_X86_64 */
16727 +
16728 + /* Init code and data - will be freed after init */
16729 +- . = ALIGN(PAGE_SIZE);
16730 + .init.begin : AT(ADDR(.init.begin) - LOAD_OFFSET) {
16731 ++ BYTE(0)
16732 ++
16733 ++#ifdef CONFIG_PAX_KERNEXEC
16734 ++ . = ALIGN(HPAGE_SIZE);
16735 ++#else
16736 ++ . = ALIGN(PAGE_SIZE);
16737 ++#endif
16738 ++
16739 + __init_begin = .; /* paired with __init_end */
16740 +- }
16741 ++ } :init.begin
16742 +
16743 +-#if defined(CONFIG_X86_64) && defined(CONFIG_SMP)
16744 ++#ifdef CONFIG_SMP
16745 + /*
16746 + * percpu offsets are zero-based on SMP. PERCPU_VADDR() changes the
16747 + * output PHDR, so the next output section - .init.text - should
16748 +@@ -190,12 +255,27 @@ SECTIONS
16749 + PERCPU_VADDR(INTERNODE_CACHE_BYTES, 0, :percpu)
16750 + #endif
16751 +
16752 +- INIT_TEXT_SECTION(PAGE_SIZE)
16753 +-#ifdef CONFIG_X86_64
16754 +- :init
16755 +-#endif
16756 ++ . = ALIGN(PAGE_SIZE);
16757 ++ init_begin = .;
16758 ++ .init.text (. - __KERNEL_TEXT_OFFSET): AT(init_begin - LOAD_OFFSET) {
16759 ++ VMLINUX_SYMBOL(_sinittext) = .;
16760 ++ INIT_TEXT
16761 ++ VMLINUX_SYMBOL(_einittext) = .;
16762 ++ . = ALIGN(PAGE_SIZE);
16763 ++ } :text.init
16764 +
16765 +- INIT_DATA_SECTION(16)
16766 ++ /*
16767 ++ * .exit.text is discard at runtime, not link time, to deal with
16768 ++ * references from .altinstructions and .eh_frame
16769 ++ */
16770 ++ .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
16771 ++ EXIT_TEXT
16772 ++ . = ALIGN(16);
16773 ++ } :text.exit
16774 ++ . = init_begin + SIZEOF(.init.text) + SIZEOF(.exit.text);
16775 ++
16776 ++ . = ALIGN(PAGE_SIZE);
16777 ++ INIT_DATA_SECTION(16) :init
16778 +
16779 + /*
16780 + * Code and data for a variety of lowlevel trampolines, to be
16781 +@@ -269,19 +349,12 @@ SECTIONS
16782 + }
16783 +
16784 + . = ALIGN(8);
16785 +- /*
16786 +- * .exit.text is discard at runtime, not link time, to deal with
16787 +- * references from .altinstructions and .eh_frame
16788 +- */
16789 +- .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) {
16790 +- EXIT_TEXT
16791 +- }
16792 +
16793 + .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) {
16794 + EXIT_DATA
16795 + }
16796 +
16797 +-#if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP)
16798 ++#ifndef CONFIG_SMP
16799 + PERCPU_SECTION(INTERNODE_CACHE_BYTES)
16800 + #endif
16801 +
16802 +@@ -300,16 +373,10 @@ SECTIONS
16803 + .smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) {
16804 + __smp_locks = .;
16805 + *(.smp_locks)
16806 +- . = ALIGN(PAGE_SIZE);
16807 + __smp_locks_end = .;
16808 ++ . = ALIGN(PAGE_SIZE);
16809 + }
16810 +
16811 +-#ifdef CONFIG_X86_64
16812 +- .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
16813 +- NOSAVE_DATA
16814 +- }
16815 +-#endif
16816 +-
16817 + /* BSS */
16818 + . = ALIGN(PAGE_SIZE);
16819 + .bss : AT(ADDR(.bss) - LOAD_OFFSET) {
16820 +@@ -325,6 +392,7 @@ SECTIONS
16821 + __brk_base = .;
16822 + . += 64 * 1024; /* 64k alignment slop space */
16823 + *(.brk_reservation) /* areas brk users have reserved */
16824 ++ . = ALIGN(HPAGE_SIZE);
16825 + __brk_limit = .;
16826 + }
16827 +
16828 +@@ -351,13 +419,12 @@ SECTIONS
16829 + * for the boot processor.
16830 + */
16831 + #define INIT_PER_CPU(x) init_per_cpu__##x = x + __per_cpu_load
16832 +-INIT_PER_CPU(gdt_page);
16833 + INIT_PER_CPU(irq_stack_union);
16834 +
16835 + /*
16836 + * Build-time check on the image size:
16837 + */
16838 +-. = ASSERT((_end - _text <= KERNEL_IMAGE_SIZE),
16839 ++. = ASSERT((_end - _text - __KERNEL_TEXT_OFFSET <= KERNEL_IMAGE_SIZE),
16840 + "kernel image bigger than KERNEL_IMAGE_SIZE");
16841 +
16842 + #ifdef CONFIG_SMP
16843 +diff -urNp linux-3.1.1/arch/x86/kernel/vsyscall_64.c linux-3.1.1/arch/x86/kernel/vsyscall_64.c
16844 +--- linux-3.1.1/arch/x86/kernel/vsyscall_64.c 2011-11-11 15:19:27.000000000 -0500
16845 ++++ linux-3.1.1/arch/x86/kernel/vsyscall_64.c 2011-11-16 18:39:07.000000000 -0500
16846 +@@ -56,15 +56,13 @@ DEFINE_VVAR(struct vsyscall_gtod_data, v
16847 + .lock = __SEQLOCK_UNLOCKED(__vsyscall_gtod_data.lock),
16848 + };
16849 +
16850 +-static enum { EMULATE, NATIVE, NONE } vsyscall_mode = NATIVE;
16851 ++static enum { EMULATE, NONE } vsyscall_mode = EMULATE;
16852 +
16853 + static int __init vsyscall_setup(char *str)
16854 + {
16855 + if (str) {
16856 + if (!strcmp("emulate", str))
16857 + vsyscall_mode = EMULATE;
16858 +- else if (!strcmp("native", str))
16859 +- vsyscall_mode = NATIVE;
16860 + else if (!strcmp("none", str))
16861 + vsyscall_mode = NONE;
16862 + else
16863 +@@ -177,7 +175,7 @@ bool emulate_vsyscall(struct pt_regs *re
16864 +
16865 + tsk = current;
16866 + if (seccomp_mode(&tsk->seccomp))
16867 +- do_exit(SIGKILL);
16868 ++ do_group_exit(SIGKILL);
16869 +
16870 + switch (vsyscall_nr) {
16871 + case 0:
16872 +@@ -219,8 +217,7 @@ bool emulate_vsyscall(struct pt_regs *re
16873 + return true;
16874 +
16875 + sigsegv:
16876 +- force_sig(SIGSEGV, current);
16877 +- return true;
16878 ++ do_group_exit(SIGKILL);
16879 + }
16880 +
16881 + /*
16882 +@@ -273,10 +270,7 @@ void __init map_vsyscall(void)
16883 + extern char __vvar_page;
16884 + unsigned long physaddr_vvar_page = __pa_symbol(&__vvar_page);
16885 +
16886 +- __set_fixmap(VSYSCALL_FIRST_PAGE, physaddr_vsyscall,
16887 +- vsyscall_mode == NATIVE
16888 +- ? PAGE_KERNEL_VSYSCALL
16889 +- : PAGE_KERNEL_VVAR);
16890 ++ __set_fixmap(VSYSCALL_FIRST_PAGE, physaddr_vsyscall, PAGE_KERNEL_VVAR);
16891 + BUILD_BUG_ON((unsigned long)__fix_to_virt(VSYSCALL_FIRST_PAGE) !=
16892 + (unsigned long)VSYSCALL_START);
16893 +
16894 +diff -urNp linux-3.1.1/arch/x86/kernel/x8664_ksyms_64.c linux-3.1.1/arch/x86/kernel/x8664_ksyms_64.c
16895 +--- linux-3.1.1/arch/x86/kernel/x8664_ksyms_64.c 2011-11-11 15:19:27.000000000 -0500
16896 ++++ linux-3.1.1/arch/x86/kernel/x8664_ksyms_64.c 2011-11-16 18:39:07.000000000 -0500
16897 +@@ -29,8 +29,6 @@ EXPORT_SYMBOL(__put_user_8);
16898 + EXPORT_SYMBOL(copy_user_generic_string);
16899 + EXPORT_SYMBOL(copy_user_generic_unrolled);
16900 + EXPORT_SYMBOL(__copy_user_nocache);
16901 +-EXPORT_SYMBOL(_copy_from_user);
16902 +-EXPORT_SYMBOL(_copy_to_user);
16903 +
16904 + EXPORT_SYMBOL(copy_page);
16905 + EXPORT_SYMBOL(clear_page);
16906 +diff -urNp linux-3.1.1/arch/x86/kernel/xsave.c linux-3.1.1/arch/x86/kernel/xsave.c
16907 +--- linux-3.1.1/arch/x86/kernel/xsave.c 2011-11-11 15:19:27.000000000 -0500
16908 ++++ linux-3.1.1/arch/x86/kernel/xsave.c 2011-11-16 18:39:07.000000000 -0500
16909 +@@ -130,7 +130,7 @@ int check_for_xstate(struct i387_fxsave_
16910 + fx_sw_user->xstate_size > fx_sw_user->extended_size)
16911 + return -EINVAL;
16912 +
16913 +- err = __get_user(magic2, (__u32 *) (((void *)fpstate) +
16914 ++ err = __get_user(magic2, (__u32 __user *) (((void __user *)fpstate) +
16915 + fx_sw_user->extended_size -
16916 + FP_XSTATE_MAGIC2_SIZE));
16917 + if (err)
16918 +@@ -267,7 +267,7 @@ fx_only:
16919 + * the other extended state.
16920 + */
16921 + xrstor_state(init_xstate_buf, pcntxt_mask & ~XSTATE_FPSSE);
16922 +- return fxrstor_checking((__force struct i387_fxsave_struct *)buf);
16923 ++ return fxrstor_checking((struct i387_fxsave_struct __force_kernel *)buf);
16924 + }
16925 +
16926 + /*
16927 +@@ -299,7 +299,7 @@ int restore_i387_xstate(void __user *buf
16928 + if (use_xsave())
16929 + err = restore_user_xstate(buf);
16930 + else
16931 +- err = fxrstor_checking((__force struct i387_fxsave_struct *)
16932 ++ err = fxrstor_checking((struct i387_fxsave_struct __force_kernel *)
16933 + buf);
16934 + if (unlikely(err)) {
16935 + /*
16936 +diff -urNp linux-3.1.1/arch/x86/kvm/emulate.c linux-3.1.1/arch/x86/kvm/emulate.c
16937 +--- linux-3.1.1/arch/x86/kvm/emulate.c 2011-11-11 15:19:27.000000000 -0500
16938 ++++ linux-3.1.1/arch/x86/kvm/emulate.c 2011-11-16 18:39:07.000000000 -0500
16939 +@@ -96,7 +96,7 @@
16940 + #define Src2ImmByte (2<<29)
16941 + #define Src2One (3<<29)
16942 + #define Src2Imm (4<<29)
16943 +-#define Src2Mask (7<<29)
16944 ++#define Src2Mask (7U<<29)
16945 +
16946 + #define X2(x...) x, x
16947 + #define X3(x...) X2(x), x
16948 +@@ -207,6 +207,7 @@ struct gprefix {
16949 +
16950 + #define ____emulate_2op(_op, _src, _dst, _eflags, _x, _y, _suffix, _dsttype) \
16951 + do { \
16952 ++ unsigned long _tmp; \
16953 + __asm__ __volatile__ ( \
16954 + _PRE_EFLAGS("0", "4", "2") \
16955 + _op _suffix " %"_x"3,%1; " \
16956 +@@ -220,8 +221,6 @@ struct gprefix {
16957 + /* Raw emulation: instruction has two explicit operands. */
16958 + #define __emulate_2op_nobyte(_op,_src,_dst,_eflags,_wx,_wy,_lx,_ly,_qx,_qy) \
16959 + do { \
16960 +- unsigned long _tmp; \
16961 +- \
16962 + switch ((_dst).bytes) { \
16963 + case 2: \
16964 + ____emulate_2op(_op,_src,_dst,_eflags,_wx,_wy,"w",u16);\
16965 +@@ -237,7 +236,6 @@ struct gprefix {
16966 +
16967 + #define __emulate_2op(_op,_src,_dst,_eflags,_bx,_by,_wx,_wy,_lx,_ly,_qx,_qy) \
16968 + do { \
16969 +- unsigned long _tmp; \
16970 + switch ((_dst).bytes) { \
16971 + case 1: \
16972 + ____emulate_2op(_op,_src,_dst,_eflags,_bx,_by,"b",u8); \
16973 +diff -urNp linux-3.1.1/arch/x86/kvm/lapic.c linux-3.1.1/arch/x86/kvm/lapic.c
16974 +--- linux-3.1.1/arch/x86/kvm/lapic.c 2011-11-11 15:19:27.000000000 -0500
16975 ++++ linux-3.1.1/arch/x86/kvm/lapic.c 2011-11-16 18:39:07.000000000 -0500
16976 +@@ -53,7 +53,7 @@
16977 + #define APIC_BUS_CYCLE_NS 1
16978 +
16979 + /* #define apic_debug(fmt,arg...) printk(KERN_WARNING fmt,##arg) */
16980 +-#define apic_debug(fmt, arg...)
16981 ++#define apic_debug(fmt, arg...) do {} while (0)
16982 +
16983 + #define APIC_LVT_NUM 6
16984 + /* 14 is the version for Xeon and Pentium 8.4.8*/
16985 +diff -urNp linux-3.1.1/arch/x86/kvm/mmu.c linux-3.1.1/arch/x86/kvm/mmu.c
16986 +--- linux-3.1.1/arch/x86/kvm/mmu.c 2011-11-11 15:19:27.000000000 -0500
16987 ++++ linux-3.1.1/arch/x86/kvm/mmu.c 2011-11-16 18:39:07.000000000 -0500
16988 +@@ -3552,7 +3552,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *
16989 +
16990 + pgprintk("%s: gpa %llx bytes %d\n", __func__, gpa, bytes);
16991 +
16992 +- invlpg_counter = atomic_read(&vcpu->kvm->arch.invlpg_counter);
16993 ++ invlpg_counter = atomic_read_unchecked(&vcpu->kvm->arch.invlpg_counter);
16994 +
16995 + /*
16996 + * Assume that the pte write on a page table of the same type
16997 +@@ -3584,7 +3584,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *
16998 + }
16999 +
17000 + spin_lock(&vcpu->kvm->mmu_lock);
17001 +- if (atomic_read(&vcpu->kvm->arch.invlpg_counter) != invlpg_counter)
17002 ++ if (atomic_read_unchecked(&vcpu->kvm->arch.invlpg_counter) != invlpg_counter)
17003 + gentry = 0;
17004 + kvm_mmu_free_some_pages(vcpu);
17005 + ++vcpu->kvm->stat.mmu_pte_write;
17006 +diff -urNp linux-3.1.1/arch/x86/kvm/paging_tmpl.h linux-3.1.1/arch/x86/kvm/paging_tmpl.h
17007 +--- linux-3.1.1/arch/x86/kvm/paging_tmpl.h 2011-11-11 15:19:27.000000000 -0500
17008 ++++ linux-3.1.1/arch/x86/kvm/paging_tmpl.h 2011-11-16 19:40:44.000000000 -0500
17009 +@@ -197,7 +197,7 @@ retry_walk:
17010 + if (unlikely(kvm_is_error_hva(host_addr)))
17011 + goto error;
17012 +
17013 +- ptep_user = (pt_element_t __user *)((void *)host_addr + offset);
17014 ++ ptep_user = (pt_element_t __force_user *)((void *)host_addr + offset);
17015 + if (unlikely(__copy_from_user(&pte, ptep_user, sizeof(pte))))
17016 + goto error;
17017 +
17018 +@@ -575,6 +575,8 @@ static int FNAME(page_fault)(struct kvm_
17019 + unsigned long mmu_seq;
17020 + bool map_writable;
17021 +
17022 ++ pax_track_stack();
17023 ++
17024 + pgprintk("%s: addr %lx err %x\n", __func__, addr, error_code);
17025 +
17026 + if (unlikely(error_code & PFERR_RSVD_MASK))
17027 +@@ -701,7 +703,7 @@ static void FNAME(invlpg)(struct kvm_vcp
17028 + if (need_flush)
17029 + kvm_flush_remote_tlbs(vcpu->kvm);
17030 +
17031 +- atomic_inc(&vcpu->kvm->arch.invlpg_counter);
17032 ++ atomic_inc_unchecked(&vcpu->kvm->arch.invlpg_counter);
17033 +
17034 + spin_unlock(&vcpu->kvm->mmu_lock);
17035 +
17036 +diff -urNp linux-3.1.1/arch/x86/kvm/svm.c linux-3.1.1/arch/x86/kvm/svm.c
17037 +--- linux-3.1.1/arch/x86/kvm/svm.c 2011-11-11 15:19:27.000000000 -0500
17038 ++++ linux-3.1.1/arch/x86/kvm/svm.c 2011-11-16 18:39:07.000000000 -0500
17039 +@@ -3381,7 +3381,11 @@ static void reload_tss(struct kvm_vcpu *
17040 + int cpu = raw_smp_processor_id();
17041 +
17042 + struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
17043 ++
17044 ++ pax_open_kernel();
17045 + sd->tss_desc->type = 9; /* available 32/64-bit TSS */
17046 ++ pax_close_kernel();
17047 ++
17048 + load_TR_desc();
17049 + }
17050 +
17051 +@@ -3759,6 +3763,10 @@ static void svm_vcpu_run(struct kvm_vcpu
17052 + #endif
17053 + #endif
17054 +
17055 ++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
17056 ++ __set_fs(current_thread_info()->addr_limit);
17057 ++#endif
17058 ++
17059 + reload_tss(vcpu);
17060 +
17061 + local_irq_disable();
17062 +diff -urNp linux-3.1.1/arch/x86/kvm/vmx.c linux-3.1.1/arch/x86/kvm/vmx.c
17063 +--- linux-3.1.1/arch/x86/kvm/vmx.c 2011-11-11 15:19:27.000000000 -0500
17064 ++++ linux-3.1.1/arch/x86/kvm/vmx.c 2011-11-16 18:39:07.000000000 -0500
17065 +@@ -1251,7 +1251,11 @@ static void reload_tss(void)
17066 + struct desc_struct *descs;
17067 +
17068 + descs = (void *)gdt->address;
17069 ++
17070 ++ pax_open_kernel();
17071 + descs[GDT_ENTRY_TSS].type = 9; /* available TSS */
17072 ++ pax_close_kernel();
17073 ++
17074 + load_TR_desc();
17075 + }
17076 +
17077 +@@ -2520,8 +2524,11 @@ static __init int hardware_setup(void)
17078 + if (!cpu_has_vmx_flexpriority())
17079 + flexpriority_enabled = 0;
17080 +
17081 +- if (!cpu_has_vmx_tpr_shadow())
17082 +- kvm_x86_ops->update_cr8_intercept = NULL;
17083 ++ if (!cpu_has_vmx_tpr_shadow()) {
17084 ++ pax_open_kernel();
17085 ++ *(void **)&kvm_x86_ops->update_cr8_intercept = NULL;
17086 ++ pax_close_kernel();
17087 ++ }
17088 +
17089 + if (enable_ept && !cpu_has_vmx_ept_2m_page())
17090 + kvm_disable_largepages();
17091 +@@ -3535,7 +3542,7 @@ static void vmx_set_constant_host_state(
17092 + vmcs_writel(HOST_IDTR_BASE, dt.address); /* 22.2.4 */
17093 +
17094 + asm("mov $.Lkvm_vmx_return, %0" : "=r"(tmpl));
17095 +- vmcs_writel(HOST_RIP, tmpl); /* 22.2.5 */
17096 ++ vmcs_writel(HOST_RIP, ktla_ktva(tmpl)); /* 22.2.5 */
17097 +
17098 + rdmsr(MSR_IA32_SYSENTER_CS, low32, high32);
17099 + vmcs_write32(HOST_IA32_SYSENTER_CS, low32);
17100 +@@ -6021,6 +6028,12 @@ static void __noclone vmx_vcpu_run(struc
17101 + "jmp .Lkvm_vmx_return \n\t"
17102 + ".Llaunched: " __ex(ASM_VMX_VMRESUME) "\n\t"
17103 + ".Lkvm_vmx_return: "
17104 ++
17105 ++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
17106 ++ "ljmp %[cs],$.Lkvm_vmx_return2\n\t"
17107 ++ ".Lkvm_vmx_return2: "
17108 ++#endif
17109 ++
17110 + /* Save guest registers, load host registers, keep flags */
17111 + "mov %0, %c[wordsize](%%"R"sp) \n\t"
17112 + "pop %0 \n\t"
17113 +@@ -6069,6 +6082,11 @@ static void __noclone vmx_vcpu_run(struc
17114 + #endif
17115 + [cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2)),
17116 + [wordsize]"i"(sizeof(ulong))
17117 ++
17118 ++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
17119 ++ ,[cs]"i"(__KERNEL_CS)
17120 ++#endif
17121 ++
17122 + : "cc", "memory"
17123 + , R"ax", R"bx", R"di", R"si"
17124 + #ifdef CONFIG_X86_64
17125 +@@ -6097,7 +6115,16 @@ static void __noclone vmx_vcpu_run(struc
17126 + }
17127 + }
17128 +
17129 +- asm("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS));
17130 ++ asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r"(__KERNEL_DS));
17131 ++
17132 ++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
17133 ++ loadsegment(fs, __KERNEL_PERCPU);
17134 ++#endif
17135 ++
17136 ++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
17137 ++ __set_fs(current_thread_info()->addr_limit);
17138 ++#endif
17139 ++
17140 + vmx->loaded_vmcs->launched = 1;
17141 +
17142 + vmx->exit_reason = vmcs_read32(VM_EXIT_REASON);
17143 +diff -urNp linux-3.1.1/arch/x86/kvm/x86.c linux-3.1.1/arch/x86/kvm/x86.c
17144 +--- linux-3.1.1/arch/x86/kvm/x86.c 2011-11-11 15:19:27.000000000 -0500
17145 ++++ linux-3.1.1/arch/x86/kvm/x86.c 2011-11-16 18:39:07.000000000 -0500
17146 +@@ -1334,8 +1334,8 @@ static int xen_hvm_config(struct kvm_vcp
17147 + {
17148 + struct kvm *kvm = vcpu->kvm;
17149 + int lm = is_long_mode(vcpu);
17150 +- u8 *blob_addr = lm ? (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_64
17151 +- : (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
17152 ++ u8 __user *blob_addr = lm ? (u8 __user *)(long)kvm->arch.xen_hvm_config.blob_addr_64
17153 ++ : (u8 __user *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
17154 + u8 blob_size = lm ? kvm->arch.xen_hvm_config.blob_size_64
17155 + : kvm->arch.xen_hvm_config.blob_size_32;
17156 + u32 page_num = data & ~PAGE_MASK;
17157 +@@ -2137,6 +2137,8 @@ long kvm_arch_dev_ioctl(struct file *fil
17158 + if (n < msr_list.nmsrs)
17159 + goto out;
17160 + r = -EFAULT;
17161 ++ if (num_msrs_to_save > ARRAY_SIZE(msrs_to_save))
17162 ++ goto out;
17163 + if (copy_to_user(user_msr_list->indices, &msrs_to_save,
17164 + num_msrs_to_save * sizeof(u32)))
17165 + goto out;
17166 +@@ -2312,15 +2314,20 @@ static int kvm_vcpu_ioctl_set_cpuid2(str
17167 + struct kvm_cpuid2 *cpuid,
17168 + struct kvm_cpuid_entry2 __user *entries)
17169 + {
17170 +- int r;
17171 ++ int r, i;
17172 +
17173 + r = -E2BIG;
17174 + if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
17175 + goto out;
17176 + r = -EFAULT;
17177 +- if (copy_from_user(&vcpu->arch.cpuid_entries, entries,
17178 +- cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
17179 ++ if (!access_ok(VERIFY_READ, entries, cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
17180 + goto out;
17181 ++ for (i = 0; i < cpuid->nent; ++i) {
17182 ++ struct kvm_cpuid_entry2 cpuid_entry;
17183 ++ if (__copy_from_user(&cpuid_entry, entries + i, sizeof(cpuid_entry)))
17184 ++ goto out;
17185 ++ vcpu->arch.cpuid_entries[i] = cpuid_entry;
17186 ++ }
17187 + vcpu->arch.cpuid_nent = cpuid->nent;
17188 + kvm_apic_set_version(vcpu);
17189 + kvm_x86_ops->cpuid_update(vcpu);
17190 +@@ -2335,15 +2342,19 @@ static int kvm_vcpu_ioctl_get_cpuid2(str
17191 + struct kvm_cpuid2 *cpuid,
17192 + struct kvm_cpuid_entry2 __user *entries)
17193 + {
17194 +- int r;
17195 ++ int r, i;
17196 +
17197 + r = -E2BIG;
17198 + if (cpuid->nent < vcpu->arch.cpuid_nent)
17199 + goto out;
17200 + r = -EFAULT;
17201 +- if (copy_to_user(entries, &vcpu->arch.cpuid_entries,
17202 +- vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
17203 ++ if (!access_ok(VERIFY_WRITE, entries, vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
17204 + goto out;
17205 ++ for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
17206 ++ struct kvm_cpuid_entry2 cpuid_entry = vcpu->arch.cpuid_entries[i];
17207 ++ if (__copy_to_user(entries + i, &cpuid_entry, sizeof(cpuid_entry)))
17208 ++ goto out;
17209 ++ }
17210 + return 0;
17211 +
17212 + out:
17213 +@@ -2718,7 +2729,7 @@ static int kvm_vcpu_ioctl_set_lapic(stru
17214 + static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
17215 + struct kvm_interrupt *irq)
17216 + {
17217 +- if (irq->irq < 0 || irq->irq >= 256)
17218 ++ if (irq->irq >= 256)
17219 + return -EINVAL;
17220 + if (irqchip_in_kernel(vcpu->kvm))
17221 + return -ENXIO;
17222 +@@ -5089,7 +5100,7 @@ static void kvm_set_mmio_spte_mask(void)
17223 + kvm_mmu_set_mmio_spte_mask(mask);
17224 + }
17225 +
17226 +-int kvm_arch_init(void *opaque)
17227 ++int kvm_arch_init(const void *opaque)
17228 + {
17229 + int r;
17230 + struct kvm_x86_ops *ops = (struct kvm_x86_ops *)opaque;
17231 +diff -urNp linux-3.1.1/arch/x86/lguest/boot.c linux-3.1.1/arch/x86/lguest/boot.c
17232 +--- linux-3.1.1/arch/x86/lguest/boot.c 2011-11-11 15:19:27.000000000 -0500
17233 ++++ linux-3.1.1/arch/x86/lguest/boot.c 2011-11-16 18:39:07.000000000 -0500
17234 +@@ -1184,9 +1184,10 @@ static __init int early_put_chars(u32 vt
17235 + * Rebooting also tells the Host we're finished, but the RESTART flag tells the
17236 + * Launcher to reboot us.
17237 + */
17238 +-static void lguest_restart(char *reason)
17239 ++static __noreturn void lguest_restart(char *reason)
17240 + {
17241 + hcall(LHCALL_SHUTDOWN, __pa(reason), LGUEST_SHUTDOWN_RESTART, 0, 0);
17242 ++ BUG();
17243 + }
17244 +
17245 + /*G:050
17246 +diff -urNp linux-3.1.1/arch/x86/lib/atomic64_32.c linux-3.1.1/arch/x86/lib/atomic64_32.c
17247 +--- linux-3.1.1/arch/x86/lib/atomic64_32.c 2011-11-11 15:19:27.000000000 -0500
17248 ++++ linux-3.1.1/arch/x86/lib/atomic64_32.c 2011-11-16 18:39:07.000000000 -0500
17249 +@@ -8,18 +8,30 @@
17250 +
17251 + long long atomic64_read_cx8(long long, const atomic64_t *v);
17252 + EXPORT_SYMBOL(atomic64_read_cx8);
17253 ++long long atomic64_read_unchecked_cx8(long long, const atomic64_unchecked_t *v);
17254 ++EXPORT_SYMBOL(atomic64_read_unchecked_cx8);
17255 + long long atomic64_set_cx8(long long, const atomic64_t *v);
17256 + EXPORT_SYMBOL(atomic64_set_cx8);
17257 ++long long atomic64_set_unchecked_cx8(long long, const atomic64_unchecked_t *v);
17258 ++EXPORT_SYMBOL(atomic64_set_unchecked_cx8);
17259 + long long atomic64_xchg_cx8(long long, unsigned high);
17260 + EXPORT_SYMBOL(atomic64_xchg_cx8);
17261 + long long atomic64_add_return_cx8(long long a, atomic64_t *v);
17262 + EXPORT_SYMBOL(atomic64_add_return_cx8);
17263 ++long long atomic64_add_return_unchecked_cx8(long long a, atomic64_unchecked_t *v);
17264 ++EXPORT_SYMBOL(atomic64_add_return_unchecked_cx8);
17265 + long long atomic64_sub_return_cx8(long long a, atomic64_t *v);
17266 + EXPORT_SYMBOL(atomic64_sub_return_cx8);
17267 ++long long atomic64_sub_return_unchecked_cx8(long long a, atomic64_unchecked_t *v);
17268 ++EXPORT_SYMBOL(atomic64_sub_return_unchecked_cx8);
17269 + long long atomic64_inc_return_cx8(long long a, atomic64_t *v);
17270 + EXPORT_SYMBOL(atomic64_inc_return_cx8);
17271 ++long long atomic64_inc_return_unchecked_cx8(long long a, atomic64_unchecked_t *v);
17272 ++EXPORT_SYMBOL(atomic64_inc_return_unchecked_cx8);
17273 + long long atomic64_dec_return_cx8(long long a, atomic64_t *v);
17274 + EXPORT_SYMBOL(atomic64_dec_return_cx8);
17275 ++long long atomic64_dec_return_unchecked_cx8(long long a, atomic64_unchecked_t *v);
17276 ++EXPORT_SYMBOL(atomic64_dec_return_unchecked_cx8);
17277 + long long atomic64_dec_if_positive_cx8(atomic64_t *v);
17278 + EXPORT_SYMBOL(atomic64_dec_if_positive_cx8);
17279 + int atomic64_inc_not_zero_cx8(atomic64_t *v);
17280 +@@ -30,26 +42,46 @@ EXPORT_SYMBOL(atomic64_add_unless_cx8);
17281 + #ifndef CONFIG_X86_CMPXCHG64
17282 + long long atomic64_read_386(long long, const atomic64_t *v);
17283 + EXPORT_SYMBOL(atomic64_read_386);
17284 ++long long atomic64_read_unchecked_386(long long, const atomic64_unchecked_t *v);
17285 ++EXPORT_SYMBOL(atomic64_read_unchecked_386);
17286 + long long atomic64_set_386(long long, const atomic64_t *v);
17287 + EXPORT_SYMBOL(atomic64_set_386);
17288 ++long long atomic64_set_unchecked_386(long long, const atomic64_unchecked_t *v);
17289 ++EXPORT_SYMBOL(atomic64_set_unchecked_386);
17290 + long long atomic64_xchg_386(long long, unsigned high);
17291 + EXPORT_SYMBOL(atomic64_xchg_386);
17292 + long long atomic64_add_return_386(long long a, atomic64_t *v);
17293 + EXPORT_SYMBOL(atomic64_add_return_386);
17294 ++long long atomic64_add_return_unchecked_386(long long a, atomic64_unchecked_t *v);
17295 ++EXPORT_SYMBOL(atomic64_add_return_unchecked_386);
17296 + long long atomic64_sub_return_386(long long a, atomic64_t *v);
17297 + EXPORT_SYMBOL(atomic64_sub_return_386);
17298 ++long long atomic64_sub_return_unchecked_386(long long a, atomic64_unchecked_t *v);
17299 ++EXPORT_SYMBOL(atomic64_sub_return_unchecked_386);
17300 + long long atomic64_inc_return_386(long long a, atomic64_t *v);
17301 + EXPORT_SYMBOL(atomic64_inc_return_386);
17302 ++long long atomic64_inc_return_unchecked_386(long long a, atomic64_unchecked_t *v);
17303 ++EXPORT_SYMBOL(atomic64_inc_return_unchecked_386);
17304 + long long atomic64_dec_return_386(long long a, atomic64_t *v);
17305 + EXPORT_SYMBOL(atomic64_dec_return_386);
17306 ++long long atomic64_dec_return_unchecked_386(long long a, atomic64_unchecked_t *v);
17307 ++EXPORT_SYMBOL(atomic64_dec_return_unchecked_386);
17308 + long long atomic64_add_386(long long a, atomic64_t *v);
17309 + EXPORT_SYMBOL(atomic64_add_386);
17310 ++long long atomic64_add_unchecked_386(long long a, atomic64_unchecked_t *v);
17311 ++EXPORT_SYMBOL(atomic64_add_unchecked_386);
17312 + long long atomic64_sub_386(long long a, atomic64_t *v);
17313 + EXPORT_SYMBOL(atomic64_sub_386);
17314 ++long long atomic64_sub_unchecked_386(long long a, atomic64_unchecked_t *v);
17315 ++EXPORT_SYMBOL(atomic64_sub_unchecked_386);
17316 + long long atomic64_inc_386(long long a, atomic64_t *v);
17317 + EXPORT_SYMBOL(atomic64_inc_386);
17318 ++long long atomic64_inc_unchecked_386(long long a, atomic64_unchecked_t *v);
17319 ++EXPORT_SYMBOL(atomic64_inc_unchecked_386);
17320 + long long atomic64_dec_386(long long a, atomic64_t *v);
17321 + EXPORT_SYMBOL(atomic64_dec_386);
17322 ++long long atomic64_dec_unchecked_386(long long a, atomic64_unchecked_t *v);
17323 ++EXPORT_SYMBOL(atomic64_dec_unchecked_386);
17324 + long long atomic64_dec_if_positive_386(atomic64_t *v);
17325 + EXPORT_SYMBOL(atomic64_dec_if_positive_386);
17326 + int atomic64_inc_not_zero_386(atomic64_t *v);
17327 +diff -urNp linux-3.1.1/arch/x86/lib/atomic64_386_32.S linux-3.1.1/arch/x86/lib/atomic64_386_32.S
17328 +--- linux-3.1.1/arch/x86/lib/atomic64_386_32.S 2011-11-11 15:19:27.000000000 -0500
17329 ++++ linux-3.1.1/arch/x86/lib/atomic64_386_32.S 2011-11-16 18:39:07.000000000 -0500
17330 +@@ -48,6 +48,10 @@ BEGIN(read)
17331 + movl (v), %eax
17332 + movl 4(v), %edx
17333 + RET_ENDP
17334 ++BEGIN(read_unchecked)
17335 ++ movl (v), %eax
17336 ++ movl 4(v), %edx
17337 ++RET_ENDP
17338 + #undef v
17339 +
17340 + #define v %esi
17341 +@@ -55,6 +59,10 @@ BEGIN(set)
17342 + movl %ebx, (v)
17343 + movl %ecx, 4(v)
17344 + RET_ENDP
17345 ++BEGIN(set_unchecked)
17346 ++ movl %ebx, (v)
17347 ++ movl %ecx, 4(v)
17348 ++RET_ENDP
17349 + #undef v
17350 +
17351 + #define v %esi
17352 +@@ -70,6 +78,20 @@ RET_ENDP
17353 + BEGIN(add)
17354 + addl %eax, (v)
17355 + adcl %edx, 4(v)
17356 ++
17357 ++#ifdef CONFIG_PAX_REFCOUNT
17358 ++ jno 0f
17359 ++ subl %eax, (v)
17360 ++ sbbl %edx, 4(v)
17361 ++ int $4
17362 ++0:
17363 ++ _ASM_EXTABLE(0b, 0b)
17364 ++#endif
17365 ++
17366 ++RET_ENDP
17367 ++BEGIN(add_unchecked)
17368 ++ addl %eax, (v)
17369 ++ adcl %edx, 4(v)
17370 + RET_ENDP
17371 + #undef v
17372 +
17373 +@@ -77,6 +99,24 @@ RET_ENDP
17374 + BEGIN(add_return)
17375 + addl (v), %eax
17376 + adcl 4(v), %edx
17377 ++
17378 ++#ifdef CONFIG_PAX_REFCOUNT
17379 ++ into
17380 ++1234:
17381 ++ _ASM_EXTABLE(1234b, 2f)
17382 ++#endif
17383 ++
17384 ++ movl %eax, (v)
17385 ++ movl %edx, 4(v)
17386 ++
17387 ++#ifdef CONFIG_PAX_REFCOUNT
17388 ++2:
17389 ++#endif
17390 ++
17391 ++RET_ENDP
17392 ++BEGIN(add_return_unchecked)
17393 ++ addl (v), %eax
17394 ++ adcl 4(v), %edx
17395 + movl %eax, (v)
17396 + movl %edx, 4(v)
17397 + RET_ENDP
17398 +@@ -86,6 +126,20 @@ RET_ENDP
17399 + BEGIN(sub)
17400 + subl %eax, (v)
17401 + sbbl %edx, 4(v)
17402 ++
17403 ++#ifdef CONFIG_PAX_REFCOUNT
17404 ++ jno 0f
17405 ++ addl %eax, (v)
17406 ++ adcl %edx, 4(v)
17407 ++ int $4
17408 ++0:
17409 ++ _ASM_EXTABLE(0b, 0b)
17410 ++#endif
17411 ++
17412 ++RET_ENDP
17413 ++BEGIN(sub_unchecked)
17414 ++ subl %eax, (v)
17415 ++ sbbl %edx, 4(v)
17416 + RET_ENDP
17417 + #undef v
17418 +
17419 +@@ -96,6 +150,27 @@ BEGIN(sub_return)
17420 + sbbl $0, %edx
17421 + addl (v), %eax
17422 + adcl 4(v), %edx
17423 ++
17424 ++#ifdef CONFIG_PAX_REFCOUNT
17425 ++ into
17426 ++1234:
17427 ++ _ASM_EXTABLE(1234b, 2f)
17428 ++#endif
17429 ++
17430 ++ movl %eax, (v)
17431 ++ movl %edx, 4(v)
17432 ++
17433 ++#ifdef CONFIG_PAX_REFCOUNT
17434 ++2:
17435 ++#endif
17436 ++
17437 ++RET_ENDP
17438 ++BEGIN(sub_return_unchecked)
17439 ++ negl %edx
17440 ++ negl %eax
17441 ++ sbbl $0, %edx
17442 ++ addl (v), %eax
17443 ++ adcl 4(v), %edx
17444 + movl %eax, (v)
17445 + movl %edx, 4(v)
17446 + RET_ENDP
17447 +@@ -105,6 +180,20 @@ RET_ENDP
17448 + BEGIN(inc)
17449 + addl $1, (v)
17450 + adcl $0, 4(v)
17451 ++
17452 ++#ifdef CONFIG_PAX_REFCOUNT
17453 ++ jno 0f
17454 ++ subl $1, (v)
17455 ++ sbbl $0, 4(v)
17456 ++ int $4
17457 ++0:
17458 ++ _ASM_EXTABLE(0b, 0b)
17459 ++#endif
17460 ++
17461 ++RET_ENDP
17462 ++BEGIN(inc_unchecked)
17463 ++ addl $1, (v)
17464 ++ adcl $0, 4(v)
17465 + RET_ENDP
17466 + #undef v
17467 +
17468 +@@ -114,6 +203,26 @@ BEGIN(inc_return)
17469 + movl 4(v), %edx
17470 + addl $1, %eax
17471 + adcl $0, %edx
17472 ++
17473 ++#ifdef CONFIG_PAX_REFCOUNT
17474 ++ into
17475 ++1234:
17476 ++ _ASM_EXTABLE(1234b, 2f)
17477 ++#endif
17478 ++
17479 ++ movl %eax, (v)
17480 ++ movl %edx, 4(v)
17481 ++
17482 ++#ifdef CONFIG_PAX_REFCOUNT
17483 ++2:
17484 ++#endif
17485 ++
17486 ++RET_ENDP
17487 ++BEGIN(inc_return_unchecked)
17488 ++ movl (v), %eax
17489 ++ movl 4(v), %edx
17490 ++ addl $1, %eax
17491 ++ adcl $0, %edx
17492 + movl %eax, (v)
17493 + movl %edx, 4(v)
17494 + RET_ENDP
17495 +@@ -123,6 +232,20 @@ RET_ENDP
17496 + BEGIN(dec)
17497 + subl $1, (v)
17498 + sbbl $0, 4(v)
17499 ++
17500 ++#ifdef CONFIG_PAX_REFCOUNT
17501 ++ jno 0f
17502 ++ addl $1, (v)
17503 ++ adcl $0, 4(v)
17504 ++ int $4
17505 ++0:
17506 ++ _ASM_EXTABLE(0b, 0b)
17507 ++#endif
17508 ++
17509 ++RET_ENDP
17510 ++BEGIN(dec_unchecked)
17511 ++ subl $1, (v)
17512 ++ sbbl $0, 4(v)
17513 + RET_ENDP
17514 + #undef v
17515 +
17516 +@@ -132,6 +255,26 @@ BEGIN(dec_return)
17517 + movl 4(v), %edx
17518 + subl $1, %eax
17519 + sbbl $0, %edx
17520 ++
17521 ++#ifdef CONFIG_PAX_REFCOUNT
17522 ++ into
17523 ++1234:
17524 ++ _ASM_EXTABLE(1234b, 2f)
17525 ++#endif
17526 ++
17527 ++ movl %eax, (v)
17528 ++ movl %edx, 4(v)
17529 ++
17530 ++#ifdef CONFIG_PAX_REFCOUNT
17531 ++2:
17532 ++#endif
17533 ++
17534 ++RET_ENDP
17535 ++BEGIN(dec_return_unchecked)
17536 ++ movl (v), %eax
17537 ++ movl 4(v), %edx
17538 ++ subl $1, %eax
17539 ++ sbbl $0, %edx
17540 + movl %eax, (v)
17541 + movl %edx, 4(v)
17542 + RET_ENDP
17543 +@@ -143,6 +286,13 @@ BEGIN(add_unless)
17544 + adcl %edx, %edi
17545 + addl (v), %eax
17546 + adcl 4(v), %edx
17547 ++
17548 ++#ifdef CONFIG_PAX_REFCOUNT
17549 ++ into
17550 ++1234:
17551 ++ _ASM_EXTABLE(1234b, 2f)
17552 ++#endif
17553 ++
17554 + cmpl %eax, %esi
17555 + je 3f
17556 + 1:
17557 +@@ -168,6 +318,13 @@ BEGIN(inc_not_zero)
17558 + 1:
17559 + addl $1, %eax
17560 + adcl $0, %edx
17561 ++
17562 ++#ifdef CONFIG_PAX_REFCOUNT
17563 ++ into
17564 ++1234:
17565 ++ _ASM_EXTABLE(1234b, 2f)
17566 ++#endif
17567 ++
17568 + movl %eax, (v)
17569 + movl %edx, 4(v)
17570 + movl $1, %eax
17571 +@@ -186,6 +343,13 @@ BEGIN(dec_if_positive)
17572 + movl 4(v), %edx
17573 + subl $1, %eax
17574 + sbbl $0, %edx
17575 ++
17576 ++#ifdef CONFIG_PAX_REFCOUNT
17577 ++ into
17578 ++1234:
17579 ++ _ASM_EXTABLE(1234b, 1f)
17580 ++#endif
17581 ++
17582 + js 1f
17583 + movl %eax, (v)
17584 + movl %edx, 4(v)
17585 +diff -urNp linux-3.1.1/arch/x86/lib/atomic64_cx8_32.S linux-3.1.1/arch/x86/lib/atomic64_cx8_32.S
17586 +--- linux-3.1.1/arch/x86/lib/atomic64_cx8_32.S 2011-11-11 15:19:27.000000000 -0500
17587 ++++ linux-3.1.1/arch/x86/lib/atomic64_cx8_32.S 2011-11-16 18:39:07.000000000 -0500
17588 +@@ -35,10 +35,20 @@ ENTRY(atomic64_read_cx8)
17589 + CFI_STARTPROC
17590 +
17591 + read64 %ecx
17592 ++ pax_force_retaddr
17593 + ret
17594 + CFI_ENDPROC
17595 + ENDPROC(atomic64_read_cx8)
17596 +
17597 ++ENTRY(atomic64_read_unchecked_cx8)
17598 ++ CFI_STARTPROC
17599 ++
17600 ++ read64 %ecx
17601 ++ pax_force_retaddr
17602 ++ ret
17603 ++ CFI_ENDPROC
17604 ++ENDPROC(atomic64_read_unchecked_cx8)
17605 ++
17606 + ENTRY(atomic64_set_cx8)
17607 + CFI_STARTPROC
17608 +
17609 +@@ -48,10 +58,25 @@ ENTRY(atomic64_set_cx8)
17610 + cmpxchg8b (%esi)
17611 + jne 1b
17612 +
17613 ++ pax_force_retaddr
17614 + ret
17615 + CFI_ENDPROC
17616 + ENDPROC(atomic64_set_cx8)
17617 +
17618 ++ENTRY(atomic64_set_unchecked_cx8)
17619 ++ CFI_STARTPROC
17620 ++
17621 ++1:
17622 ++/* we don't need LOCK_PREFIX since aligned 64-bit writes
17623 ++ * are atomic on 586 and newer */
17624 ++ cmpxchg8b (%esi)
17625 ++ jne 1b
17626 ++
17627 ++ pax_force_retaddr
17628 ++ ret
17629 ++ CFI_ENDPROC
17630 ++ENDPROC(atomic64_set_unchecked_cx8)
17631 ++
17632 + ENTRY(atomic64_xchg_cx8)
17633 + CFI_STARTPROC
17634 +
17635 +@@ -62,12 +87,13 @@ ENTRY(atomic64_xchg_cx8)
17636 + cmpxchg8b (%esi)
17637 + jne 1b
17638 +
17639 ++ pax_force_retaddr
17640 + ret
17641 + CFI_ENDPROC
17642 + ENDPROC(atomic64_xchg_cx8)
17643 +
17644 +-.macro addsub_return func ins insc
17645 +-ENTRY(atomic64_\func\()_return_cx8)
17646 ++.macro addsub_return func ins insc unchecked=""
17647 ++ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
17648 + CFI_STARTPROC
17649 + SAVE ebp
17650 + SAVE ebx
17651 +@@ -84,27 +110,44 @@ ENTRY(atomic64_\func\()_return_cx8)
17652 + movl %edx, %ecx
17653 + \ins\()l %esi, %ebx
17654 + \insc\()l %edi, %ecx
17655 ++
17656 ++.ifb \unchecked
17657 ++#ifdef CONFIG_PAX_REFCOUNT
17658 ++ into
17659 ++2:
17660 ++ _ASM_EXTABLE(2b, 3f)
17661 ++#endif
17662 ++.endif
17663 ++
17664 + LOCK_PREFIX
17665 + cmpxchg8b (%ebp)
17666 + jne 1b
17667 +-
17668 +-10:
17669 + movl %ebx, %eax
17670 + movl %ecx, %edx
17671 ++
17672 ++.ifb \unchecked
17673 ++#ifdef CONFIG_PAX_REFCOUNT
17674 ++3:
17675 ++#endif
17676 ++.endif
17677 ++
17678 + RESTORE edi
17679 + RESTORE esi
17680 + RESTORE ebx
17681 + RESTORE ebp
17682 ++ pax_force_retaddr
17683 + ret
17684 + CFI_ENDPROC
17685 +-ENDPROC(atomic64_\func\()_return_cx8)
17686 ++ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
17687 + .endm
17688 +
17689 + addsub_return add add adc
17690 + addsub_return sub sub sbb
17691 ++addsub_return add add adc _unchecked
17692 ++addsub_return sub sub sbb _unchecked
17693 +
17694 +-.macro incdec_return func ins insc
17695 +-ENTRY(atomic64_\func\()_return_cx8)
17696 ++.macro incdec_return func ins insc unchecked
17697 ++ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
17698 + CFI_STARTPROC
17699 + SAVE ebx
17700 +
17701 +@@ -114,21 +157,39 @@ ENTRY(atomic64_\func\()_return_cx8)
17702 + movl %edx, %ecx
17703 + \ins\()l $1, %ebx
17704 + \insc\()l $0, %ecx
17705 ++
17706 ++.ifb \unchecked
17707 ++#ifdef CONFIG_PAX_REFCOUNT
17708 ++ into
17709 ++2:
17710 ++ _ASM_EXTABLE(2b, 3f)
17711 ++#endif
17712 ++.endif
17713 ++
17714 + LOCK_PREFIX
17715 + cmpxchg8b (%esi)
17716 + jne 1b
17717 +
17718 +-10:
17719 + movl %ebx, %eax
17720 + movl %ecx, %edx
17721 ++
17722 ++.ifb \unchecked
17723 ++#ifdef CONFIG_PAX_REFCOUNT
17724 ++3:
17725 ++#endif
17726 ++.endif
17727 ++
17728 + RESTORE ebx
17729 ++ pax_force_retaddr
17730 + ret
17731 + CFI_ENDPROC
17732 +-ENDPROC(atomic64_\func\()_return_cx8)
17733 ++ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
17734 + .endm
17735 +
17736 + incdec_return inc add adc
17737 + incdec_return dec sub sbb
17738 ++incdec_return inc add adc _unchecked
17739 ++incdec_return dec sub sbb _unchecked
17740 +
17741 + ENTRY(atomic64_dec_if_positive_cx8)
17742 + CFI_STARTPROC
17743 +@@ -140,6 +201,13 @@ ENTRY(atomic64_dec_if_positive_cx8)
17744 + movl %edx, %ecx
17745 + subl $1, %ebx
17746 + sbb $0, %ecx
17747 ++
17748 ++#ifdef CONFIG_PAX_REFCOUNT
17749 ++ into
17750 ++1234:
17751 ++ _ASM_EXTABLE(1234b, 2f)
17752 ++#endif
17753 ++
17754 + js 2f
17755 + LOCK_PREFIX
17756 + cmpxchg8b (%esi)
17757 +@@ -149,6 +217,7 @@ ENTRY(atomic64_dec_if_positive_cx8)
17758 + movl %ebx, %eax
17759 + movl %ecx, %edx
17760 + RESTORE ebx
17761 ++ pax_force_retaddr
17762 + ret
17763 + CFI_ENDPROC
17764 + ENDPROC(atomic64_dec_if_positive_cx8)
17765 +@@ -174,6 +243,13 @@ ENTRY(atomic64_add_unless_cx8)
17766 + movl %edx, %ecx
17767 + addl %esi, %ebx
17768 + adcl %edi, %ecx
17769 ++
17770 ++#ifdef CONFIG_PAX_REFCOUNT
17771 ++ into
17772 ++1234:
17773 ++ _ASM_EXTABLE(1234b, 3f)
17774 ++#endif
17775 ++
17776 + LOCK_PREFIX
17777 + cmpxchg8b (%ebp)
17778 + jne 1b
17779 +@@ -184,6 +260,7 @@ ENTRY(atomic64_add_unless_cx8)
17780 + CFI_ADJUST_CFA_OFFSET -8
17781 + RESTORE ebx
17782 + RESTORE ebp
17783 ++ pax_force_retaddr
17784 + ret
17785 + 4:
17786 + cmpl %edx, 4(%esp)
17787 +@@ -206,6 +283,13 @@ ENTRY(atomic64_inc_not_zero_cx8)
17788 + movl %edx, %ecx
17789 + addl $1, %ebx
17790 + adcl $0, %ecx
17791 ++
17792 ++#ifdef CONFIG_PAX_REFCOUNT
17793 ++ into
17794 ++1234:
17795 ++ _ASM_EXTABLE(1234b, 3f)
17796 ++#endif
17797 ++
17798 + LOCK_PREFIX
17799 + cmpxchg8b (%esi)
17800 + jne 1b
17801 +@@ -213,6 +297,7 @@ ENTRY(atomic64_inc_not_zero_cx8)
17802 + movl $1, %eax
17803 + 3:
17804 + RESTORE ebx
17805 ++ pax_force_retaddr
17806 + ret
17807 + 4:
17808 + testl %edx, %edx
17809 +diff -urNp linux-3.1.1/arch/x86/lib/checksum_32.S linux-3.1.1/arch/x86/lib/checksum_32.S
17810 +--- linux-3.1.1/arch/x86/lib/checksum_32.S 2011-11-11 15:19:27.000000000 -0500
17811 ++++ linux-3.1.1/arch/x86/lib/checksum_32.S 2011-11-16 18:39:07.000000000 -0500
17812 +@@ -28,7 +28,8 @@
17813 + #include <linux/linkage.h>
17814 + #include <asm/dwarf2.h>
17815 + #include <asm/errno.h>
17816 +-
17817 ++#include <asm/segment.h>
17818 ++
17819 + /*
17820 + * computes a partial checksum, e.g. for TCP/UDP fragments
17821 + */
17822 +@@ -296,9 +297,24 @@ unsigned int csum_partial_copy_generic (
17823 +
17824 + #define ARGBASE 16
17825 + #define FP 12
17826 +-
17827 +-ENTRY(csum_partial_copy_generic)
17828 ++
17829 ++ENTRY(csum_partial_copy_generic_to_user)
17830 + CFI_STARTPROC
17831 ++
17832 ++#ifdef CONFIG_PAX_MEMORY_UDEREF
17833 ++ pushl_cfi %gs
17834 ++ popl_cfi %es
17835 ++ jmp csum_partial_copy_generic
17836 ++#endif
17837 ++
17838 ++ENTRY(csum_partial_copy_generic_from_user)
17839 ++
17840 ++#ifdef CONFIG_PAX_MEMORY_UDEREF
17841 ++ pushl_cfi %gs
17842 ++ popl_cfi %ds
17843 ++#endif
17844 ++
17845 ++ENTRY(csum_partial_copy_generic)
17846 + subl $4,%esp
17847 + CFI_ADJUST_CFA_OFFSET 4
17848 + pushl_cfi %edi
17849 +@@ -320,7 +336,7 @@ ENTRY(csum_partial_copy_generic)
17850 + jmp 4f
17851 + SRC(1: movw (%esi), %bx )
17852 + addl $2, %esi
17853 +-DST( movw %bx, (%edi) )
17854 ++DST( movw %bx, %es:(%edi) )
17855 + addl $2, %edi
17856 + addw %bx, %ax
17857 + adcl $0, %eax
17858 +@@ -332,30 +348,30 @@ DST( movw %bx, (%edi) )
17859 + SRC(1: movl (%esi), %ebx )
17860 + SRC( movl 4(%esi), %edx )
17861 + adcl %ebx, %eax
17862 +-DST( movl %ebx, (%edi) )
17863 ++DST( movl %ebx, %es:(%edi) )
17864 + adcl %edx, %eax
17865 +-DST( movl %edx, 4(%edi) )
17866 ++DST( movl %edx, %es:4(%edi) )
17867 +
17868 + SRC( movl 8(%esi), %ebx )
17869 + SRC( movl 12(%esi), %edx )
17870 + adcl %ebx, %eax
17871 +-DST( movl %ebx, 8(%edi) )
17872 ++DST( movl %ebx, %es:8(%edi) )
17873 + adcl %edx, %eax
17874 +-DST( movl %edx, 12(%edi) )
17875 ++DST( movl %edx, %es:12(%edi) )
17876 +
17877 + SRC( movl 16(%esi), %ebx )
17878 + SRC( movl 20(%esi), %edx )
17879 + adcl %ebx, %eax
17880 +-DST( movl %ebx, 16(%edi) )
17881 ++DST( movl %ebx, %es:16(%edi) )
17882 + adcl %edx, %eax
17883 +-DST( movl %edx, 20(%edi) )
17884 ++DST( movl %edx, %es:20(%edi) )
17885 +
17886 + SRC( movl 24(%esi), %ebx )
17887 + SRC( movl 28(%esi), %edx )
17888 + adcl %ebx, %eax
17889 +-DST( movl %ebx, 24(%edi) )
17890 ++DST( movl %ebx, %es:24(%edi) )
17891 + adcl %edx, %eax
17892 +-DST( movl %edx, 28(%edi) )
17893 ++DST( movl %edx, %es:28(%edi) )
17894 +
17895 + lea 32(%esi), %esi
17896 + lea 32(%edi), %edi
17897 +@@ -369,7 +385,7 @@ DST( movl %edx, 28(%edi) )
17898 + shrl $2, %edx # This clears CF
17899 + SRC(3: movl (%esi), %ebx )
17900 + adcl %ebx, %eax
17901 +-DST( movl %ebx, (%edi) )
17902 ++DST( movl %ebx, %es:(%edi) )
17903 + lea 4(%esi), %esi
17904 + lea 4(%edi), %edi
17905 + dec %edx
17906 +@@ -381,12 +397,12 @@ DST( movl %ebx, (%edi) )
17907 + jb 5f
17908 + SRC( movw (%esi), %cx )
17909 + leal 2(%esi), %esi
17910 +-DST( movw %cx, (%edi) )
17911 ++DST( movw %cx, %es:(%edi) )
17912 + leal 2(%edi), %edi
17913 + je 6f
17914 + shll $16,%ecx
17915 + SRC(5: movb (%esi), %cl )
17916 +-DST( movb %cl, (%edi) )
17917 ++DST( movb %cl, %es:(%edi) )
17918 + 6: addl %ecx, %eax
17919 + adcl $0, %eax
17920 + 7:
17921 +@@ -397,7 +413,7 @@ DST( movb %cl, (%edi) )
17922 +
17923 + 6001:
17924 + movl ARGBASE+20(%esp), %ebx # src_err_ptr
17925 +- movl $-EFAULT, (%ebx)
17926 ++ movl $-EFAULT, %ss:(%ebx)
17927 +
17928 + # zero the complete destination - computing the rest
17929 + # is too much work
17930 +@@ -410,11 +426,15 @@ DST( movb %cl, (%edi) )
17931 +
17932 + 6002:
17933 + movl ARGBASE+24(%esp), %ebx # dst_err_ptr
17934 +- movl $-EFAULT,(%ebx)
17935 ++ movl $-EFAULT,%ss:(%ebx)
17936 + jmp 5000b
17937 +
17938 + .previous
17939 +
17940 ++ pushl_cfi %ss
17941 ++ popl_cfi %ds
17942 ++ pushl_cfi %ss
17943 ++ popl_cfi %es
17944 + popl_cfi %ebx
17945 + CFI_RESTORE ebx
17946 + popl_cfi %esi
17947 +@@ -424,26 +444,43 @@ DST( movb %cl, (%edi) )
17948 + popl_cfi %ecx # equivalent to addl $4,%esp
17949 + ret
17950 + CFI_ENDPROC
17951 +-ENDPROC(csum_partial_copy_generic)
17952 ++ENDPROC(csum_partial_copy_generic_to_user)
17953 +
17954 + #else
17955 +
17956 + /* Version for PentiumII/PPro */
17957 +
17958 + #define ROUND1(x) \
17959 ++ nop; nop; nop; \
17960 + SRC(movl x(%esi), %ebx ) ; \
17961 + addl %ebx, %eax ; \
17962 +- DST(movl %ebx, x(%edi) ) ;
17963 ++ DST(movl %ebx, %es:x(%edi)) ;
17964 +
17965 + #define ROUND(x) \
17966 ++ nop; nop; nop; \
17967 + SRC(movl x(%esi), %ebx ) ; \
17968 + adcl %ebx, %eax ; \
17969 +- DST(movl %ebx, x(%edi) ) ;
17970 ++ DST(movl %ebx, %es:x(%edi)) ;
17971 +
17972 + #define ARGBASE 12
17973 +-
17974 +-ENTRY(csum_partial_copy_generic)
17975 ++
17976 ++ENTRY(csum_partial_copy_generic_to_user)
17977 + CFI_STARTPROC
17978 ++
17979 ++#ifdef CONFIG_PAX_MEMORY_UDEREF
17980 ++ pushl_cfi %gs
17981 ++ popl_cfi %es
17982 ++ jmp csum_partial_copy_generic
17983 ++#endif
17984 ++
17985 ++ENTRY(csum_partial_copy_generic_from_user)
17986 ++
17987 ++#ifdef CONFIG_PAX_MEMORY_UDEREF
17988 ++ pushl_cfi %gs
17989 ++ popl_cfi %ds
17990 ++#endif
17991 ++
17992 ++ENTRY(csum_partial_copy_generic)
17993 + pushl_cfi %ebx
17994 + CFI_REL_OFFSET ebx, 0
17995 + pushl_cfi %edi
17996 +@@ -464,7 +501,7 @@ ENTRY(csum_partial_copy_generic)
17997 + subl %ebx, %edi
17998 + lea -1(%esi),%edx
17999 + andl $-32,%edx
18000 +- lea 3f(%ebx,%ebx), %ebx
18001 ++ lea 3f(%ebx,%ebx,2), %ebx
18002 + testl %esi, %esi
18003 + jmp *%ebx
18004 + 1: addl $64,%esi
18005 +@@ -485,19 +522,19 @@ ENTRY(csum_partial_copy_generic)
18006 + jb 5f
18007 + SRC( movw (%esi), %dx )
18008 + leal 2(%esi), %esi
18009 +-DST( movw %dx, (%edi) )
18010 ++DST( movw %dx, %es:(%edi) )
18011 + leal 2(%edi), %edi
18012 + je 6f
18013 + shll $16,%edx
18014 + 5:
18015 + SRC( movb (%esi), %dl )
18016 +-DST( movb %dl, (%edi) )
18017 ++DST( movb %dl, %es:(%edi) )
18018 + 6: addl %edx, %eax
18019 + adcl $0, %eax
18020 + 7:
18021 + .section .fixup, "ax"
18022 + 6001: movl ARGBASE+20(%esp), %ebx # src_err_ptr
18023 +- movl $-EFAULT, (%ebx)
18024 ++ movl $-EFAULT, %ss:(%ebx)
18025 + # zero the complete destination (computing the rest is too much work)
18026 + movl ARGBASE+8(%esp),%edi # dst
18027 + movl ARGBASE+12(%esp),%ecx # len
18028 +@@ -505,10 +542,17 @@ DST( movb %dl, (%edi) )
18029 + rep; stosb
18030 + jmp 7b
18031 + 6002: movl ARGBASE+24(%esp), %ebx # dst_err_ptr
18032 +- movl $-EFAULT, (%ebx)
18033 ++ movl $-EFAULT, %ss:(%ebx)
18034 + jmp 7b
18035 + .previous
18036 +
18037 ++#ifdef CONFIG_PAX_MEMORY_UDEREF
18038 ++ pushl_cfi %ss
18039 ++ popl_cfi %ds
18040 ++ pushl_cfi %ss
18041 ++ popl_cfi %es
18042 ++#endif
18043 ++
18044 + popl_cfi %esi
18045 + CFI_RESTORE esi
18046 + popl_cfi %edi
18047 +@@ -517,7 +561,7 @@ DST( movb %dl, (%edi) )
18048 + CFI_RESTORE ebx
18049 + ret
18050 + CFI_ENDPROC
18051 +-ENDPROC(csum_partial_copy_generic)
18052 ++ENDPROC(csum_partial_copy_generic_to_user)
18053 +
18054 + #undef ROUND
18055 + #undef ROUND1
18056 +diff -urNp linux-3.1.1/arch/x86/lib/clear_page_64.S linux-3.1.1/arch/x86/lib/clear_page_64.S
18057 +--- linux-3.1.1/arch/x86/lib/clear_page_64.S 2011-11-11 15:19:27.000000000 -0500
18058 ++++ linux-3.1.1/arch/x86/lib/clear_page_64.S 2011-11-16 18:39:07.000000000 -0500
18059 +@@ -11,6 +11,7 @@ ENTRY(clear_page_c)
18060 + movl $4096/8,%ecx
18061 + xorl %eax,%eax
18062 + rep stosq
18063 ++ pax_force_retaddr
18064 + ret
18065 + CFI_ENDPROC
18066 + ENDPROC(clear_page_c)
18067 +@@ -20,6 +21,7 @@ ENTRY(clear_page_c_e)
18068 + movl $4096,%ecx
18069 + xorl %eax,%eax
18070 + rep stosb
18071 ++ pax_force_retaddr
18072 + ret
18073 + CFI_ENDPROC
18074 + ENDPROC(clear_page_c_e)
18075 +@@ -43,6 +45,7 @@ ENTRY(clear_page)
18076 + leaq 64(%rdi),%rdi
18077 + jnz .Lloop
18078 + nop
18079 ++ pax_force_retaddr
18080 + ret
18081 + CFI_ENDPROC
18082 + .Lclear_page_end:
18083 +@@ -58,7 +61,7 @@ ENDPROC(clear_page)
18084 +
18085 + #include <asm/cpufeature.h>
18086 +
18087 +- .section .altinstr_replacement,"ax"
18088 ++ .section .altinstr_replacement,"a"
18089 + 1: .byte 0xeb /* jmp <disp8> */
18090 + .byte (clear_page_c - clear_page) - (2f - 1b) /* offset */
18091 + 2: .byte 0xeb /* jmp <disp8> */
18092 +diff -urNp linux-3.1.1/arch/x86/lib/cmpxchg16b_emu.S linux-3.1.1/arch/x86/lib/cmpxchg16b_emu.S
18093 +--- linux-3.1.1/arch/x86/lib/cmpxchg16b_emu.S 2011-11-11 15:19:27.000000000 -0500
18094 ++++ linux-3.1.1/arch/x86/lib/cmpxchg16b_emu.S 2011-11-16 18:39:07.000000000 -0500
18095 +@@ -53,11 +53,13 @@ this_cpu_cmpxchg16b_emu:
18096 +
18097 + popf
18098 + mov $1, %al
18099 ++ pax_force_retaddr
18100 + ret
18101 +
18102 + not_same:
18103 + popf
18104 + xor %al,%al
18105 ++ pax_force_retaddr
18106 + ret
18107 +
18108 + CFI_ENDPROC
18109 +diff -urNp linux-3.1.1/arch/x86/lib/copy_page_64.S linux-3.1.1/arch/x86/lib/copy_page_64.S
18110 +--- linux-3.1.1/arch/x86/lib/copy_page_64.S 2011-11-11 15:19:27.000000000 -0500
18111 ++++ linux-3.1.1/arch/x86/lib/copy_page_64.S 2011-11-16 18:39:07.000000000 -0500
18112 +@@ -9,6 +9,7 @@ copy_page_c:
18113 + CFI_STARTPROC
18114 + movl $4096/8,%ecx
18115 + rep movsq
18116 ++ pax_force_retaddr
18117 + ret
18118 + CFI_ENDPROC
18119 + ENDPROC(copy_page_c)
18120 +@@ -95,6 +96,7 @@ ENTRY(copy_page)
18121 + CFI_RESTORE r13
18122 + addq $3*8,%rsp
18123 + CFI_ADJUST_CFA_OFFSET -3*8
18124 ++ pax_force_retaddr
18125 + ret
18126 + .Lcopy_page_end:
18127 + CFI_ENDPROC
18128 +@@ -105,7 +107,7 @@ ENDPROC(copy_page)
18129 +
18130 + #include <asm/cpufeature.h>
18131 +
18132 +- .section .altinstr_replacement,"ax"
18133 ++ .section .altinstr_replacement,"a"
18134 + 1: .byte 0xeb /* jmp <disp8> */
18135 + .byte (copy_page_c - copy_page) - (2f - 1b) /* offset */
18136 + 2:
18137 +diff -urNp linux-3.1.1/arch/x86/lib/copy_user_64.S linux-3.1.1/arch/x86/lib/copy_user_64.S
18138 +--- linux-3.1.1/arch/x86/lib/copy_user_64.S 2011-11-11 15:19:27.000000000 -0500
18139 ++++ linux-3.1.1/arch/x86/lib/copy_user_64.S 2011-11-16 18:39:07.000000000 -0500
18140 +@@ -16,6 +16,7 @@
18141 + #include <asm/thread_info.h>
18142 + #include <asm/cpufeature.h>
18143 + #include <asm/alternative-asm.h>
18144 ++#include <asm/pgtable.h>
18145 +
18146 + /*
18147 + * By placing feature2 after feature1 in altinstructions section, we logically
18148 +@@ -29,7 +30,7 @@
18149 + .byte 0xe9 /* 32bit jump */
18150 + .long \orig-1f /* by default jump to orig */
18151 + 1:
18152 +- .section .altinstr_replacement,"ax"
18153 ++ .section .altinstr_replacement,"a"
18154 + 2: .byte 0xe9 /* near jump with 32bit immediate */
18155 + .long \alt1-1b /* offset */ /* or alternatively to alt1 */
18156 + 3: .byte 0xe9 /* near jump with 32bit immediate */
18157 +@@ -71,47 +72,20 @@
18158 + #endif
18159 + .endm
18160 +
18161 +-/* Standard copy_to_user with segment limit checking */
18162 +-ENTRY(_copy_to_user)
18163 +- CFI_STARTPROC
18164 +- GET_THREAD_INFO(%rax)
18165 +- movq %rdi,%rcx
18166 +- addq %rdx,%rcx
18167 +- jc bad_to_user
18168 +- cmpq TI_addr_limit(%rax),%rcx
18169 +- ja bad_to_user
18170 +- ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \
18171 +- copy_user_generic_unrolled,copy_user_generic_string, \
18172 +- copy_user_enhanced_fast_string
18173 +- CFI_ENDPROC
18174 +-ENDPROC(_copy_to_user)
18175 +-
18176 +-/* Standard copy_from_user with segment limit checking */
18177 +-ENTRY(_copy_from_user)
18178 +- CFI_STARTPROC
18179 +- GET_THREAD_INFO(%rax)
18180 +- movq %rsi,%rcx
18181 +- addq %rdx,%rcx
18182 +- jc bad_from_user
18183 +- cmpq TI_addr_limit(%rax),%rcx
18184 +- ja bad_from_user
18185 +- ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \
18186 +- copy_user_generic_unrolled,copy_user_generic_string, \
18187 +- copy_user_enhanced_fast_string
18188 +- CFI_ENDPROC
18189 +-ENDPROC(_copy_from_user)
18190 +-
18191 + .section .fixup,"ax"
18192 + /* must zero dest */
18193 + ENTRY(bad_from_user)
18194 + bad_from_user:
18195 + CFI_STARTPROC
18196 ++ testl %edx,%edx
18197 ++ js bad_to_user
18198 + movl %edx,%ecx
18199 + xorl %eax,%eax
18200 + rep
18201 + stosb
18202 + bad_to_user:
18203 + movl %edx,%eax
18204 ++ pax_force_retaddr
18205 + ret
18206 + CFI_ENDPROC
18207 + ENDPROC(bad_from_user)
18208 +@@ -179,6 +153,7 @@ ENTRY(copy_user_generic_unrolled)
18209 + decl %ecx
18210 + jnz 21b
18211 + 23: xor %eax,%eax
18212 ++ pax_force_retaddr
18213 + ret
18214 +
18215 + .section .fixup,"ax"
18216 +@@ -251,6 +226,7 @@ ENTRY(copy_user_generic_string)
18217 + 3: rep
18218 + movsb
18219 + 4: xorl %eax,%eax
18220 ++ pax_force_retaddr
18221 + ret
18222 +
18223 + .section .fixup,"ax"
18224 +@@ -287,6 +263,7 @@ ENTRY(copy_user_enhanced_fast_string)
18225 + 1: rep
18226 + movsb
18227 + 2: xorl %eax,%eax
18228 ++ pax_force_retaddr
18229 + ret
18230 +
18231 + .section .fixup,"ax"
18232 +diff -urNp linux-3.1.1/arch/x86/lib/copy_user_nocache_64.S linux-3.1.1/arch/x86/lib/copy_user_nocache_64.S
18233 +--- linux-3.1.1/arch/x86/lib/copy_user_nocache_64.S 2011-11-11 15:19:27.000000000 -0500
18234 ++++ linux-3.1.1/arch/x86/lib/copy_user_nocache_64.S 2011-11-16 18:39:07.000000000 -0500
18235 +@@ -8,12 +8,14 @@
18236 +
18237 + #include <linux/linkage.h>
18238 + #include <asm/dwarf2.h>
18239 ++#include <asm/alternative-asm.h>
18240 +
18241 + #define FIX_ALIGNMENT 1
18242 +
18243 + #include <asm/current.h>
18244 + #include <asm/asm-offsets.h>
18245 + #include <asm/thread_info.h>
18246 ++#include <asm/pgtable.h>
18247 +
18248 + .macro ALIGN_DESTINATION
18249 + #ifdef FIX_ALIGNMENT
18250 +@@ -50,6 +52,15 @@
18251 + */
18252 + ENTRY(__copy_user_nocache)
18253 + CFI_STARTPROC
18254 ++
18255 ++#ifdef CONFIG_PAX_MEMORY_UDEREF
18256 ++ mov $PAX_USER_SHADOW_BASE,%rcx
18257 ++ cmp %rcx,%rsi
18258 ++ jae 1f
18259 ++ add %rcx,%rsi
18260 ++1:
18261 ++#endif
18262 ++
18263 + cmpl $8,%edx
18264 + jb 20f /* less then 8 bytes, go to byte copy loop */
18265 + ALIGN_DESTINATION
18266 +@@ -98,6 +109,7 @@ ENTRY(__copy_user_nocache)
18267 + jnz 21b
18268 + 23: xorl %eax,%eax
18269 + sfence
18270 ++ pax_force_retaddr
18271 + ret
18272 +
18273 + .section .fixup,"ax"
18274 +diff -urNp linux-3.1.1/arch/x86/lib/csum-copy_64.S linux-3.1.1/arch/x86/lib/csum-copy_64.S
18275 +--- linux-3.1.1/arch/x86/lib/csum-copy_64.S 2011-11-11 15:19:27.000000000 -0500
18276 ++++ linux-3.1.1/arch/x86/lib/csum-copy_64.S 2011-11-16 18:39:07.000000000 -0500
18277 +@@ -8,6 +8,7 @@
18278 + #include <linux/linkage.h>
18279 + #include <asm/dwarf2.h>
18280 + #include <asm/errno.h>
18281 ++#include <asm/alternative-asm.h>
18282 +
18283 + /*
18284 + * Checksum copy with exception handling.
18285 +@@ -228,6 +229,7 @@ ENTRY(csum_partial_copy_generic)
18286 + CFI_RESTORE rbp
18287 + addq $7*8, %rsp
18288 + CFI_ADJUST_CFA_OFFSET -7*8
18289 ++ pax_force_retaddr
18290 + ret
18291 + CFI_RESTORE_STATE
18292 +
18293 +diff -urNp linux-3.1.1/arch/x86/lib/csum-wrappers_64.c linux-3.1.1/arch/x86/lib/csum-wrappers_64.c
18294 +--- linux-3.1.1/arch/x86/lib/csum-wrappers_64.c 2011-11-11 15:19:27.000000000 -0500
18295 ++++ linux-3.1.1/arch/x86/lib/csum-wrappers_64.c 2011-11-16 18:39:07.000000000 -0500
18296 +@@ -52,7 +52,13 @@ csum_partial_copy_from_user(const void _
18297 + len -= 2;
18298 + }
18299 + }
18300 +- isum = csum_partial_copy_generic((__force const void *)src,
18301 ++
18302 ++#ifdef CONFIG_PAX_MEMORY_UDEREF
18303 ++ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
18304 ++ src += PAX_USER_SHADOW_BASE;
18305 ++#endif
18306 ++
18307 ++ isum = csum_partial_copy_generic((const void __force_kernel *)src,
18308 + dst, len, isum, errp, NULL);
18309 + if (unlikely(*errp))
18310 + goto out_err;
18311 +@@ -105,7 +111,13 @@ csum_partial_copy_to_user(const void *sr
18312 + }
18313 +
18314 + *errp = 0;
18315 +- return csum_partial_copy_generic(src, (void __force *)dst,
18316 ++
18317 ++#ifdef CONFIG_PAX_MEMORY_UDEREF
18318 ++ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
18319 ++ dst += PAX_USER_SHADOW_BASE;
18320 ++#endif
18321 ++
18322 ++ return csum_partial_copy_generic(src, (void __force_kernel *)dst,
18323 + len, isum, NULL, errp);
18324 + }
18325 + EXPORT_SYMBOL(csum_partial_copy_to_user);
18326 +diff -urNp linux-3.1.1/arch/x86/lib/getuser.S linux-3.1.1/arch/x86/lib/getuser.S
18327 +--- linux-3.1.1/arch/x86/lib/getuser.S 2011-11-11 15:19:27.000000000 -0500
18328 ++++ linux-3.1.1/arch/x86/lib/getuser.S 2011-11-16 18:39:07.000000000 -0500
18329 +@@ -33,15 +33,38 @@
18330 + #include <asm/asm-offsets.h>
18331 + #include <asm/thread_info.h>
18332 + #include <asm/asm.h>
18333 ++#include <asm/segment.h>
18334 ++#include <asm/pgtable.h>
18335 ++#include <asm/alternative-asm.h>
18336 ++
18337 ++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
18338 ++#define __copyuser_seg gs;
18339 ++#else
18340 ++#define __copyuser_seg
18341 ++#endif
18342 +
18343 + .text
18344 + ENTRY(__get_user_1)
18345 + CFI_STARTPROC
18346 ++
18347 ++#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
18348 + GET_THREAD_INFO(%_ASM_DX)
18349 + cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
18350 + jae bad_get_user
18351 +-1: movzb (%_ASM_AX),%edx
18352 ++
18353 ++#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
18354 ++ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
18355 ++ cmp %_ASM_DX,%_ASM_AX
18356 ++ jae 1234f
18357 ++ add %_ASM_DX,%_ASM_AX
18358 ++1234:
18359 ++#endif
18360 ++
18361 ++#endif
18362 ++
18363 ++1: __copyuser_seg movzb (%_ASM_AX),%edx
18364 + xor %eax,%eax
18365 ++ pax_force_retaddr
18366 + ret
18367 + CFI_ENDPROC
18368 + ENDPROC(__get_user_1)
18369 +@@ -49,12 +72,26 @@ ENDPROC(__get_user_1)
18370 + ENTRY(__get_user_2)
18371 + CFI_STARTPROC
18372 + add $1,%_ASM_AX
18373 ++
18374 ++#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
18375 + jc bad_get_user
18376 + GET_THREAD_INFO(%_ASM_DX)
18377 + cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
18378 + jae bad_get_user
18379 +-2: movzwl -1(%_ASM_AX),%edx
18380 ++
18381 ++#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
18382 ++ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
18383 ++ cmp %_ASM_DX,%_ASM_AX
18384 ++ jae 1234f
18385 ++ add %_ASM_DX,%_ASM_AX
18386 ++1234:
18387 ++#endif
18388 ++
18389 ++#endif
18390 ++
18391 ++2: __copyuser_seg movzwl -1(%_ASM_AX),%edx
18392 + xor %eax,%eax
18393 ++ pax_force_retaddr
18394 + ret
18395 + CFI_ENDPROC
18396 + ENDPROC(__get_user_2)
18397 +@@ -62,12 +99,26 @@ ENDPROC(__get_user_2)
18398 + ENTRY(__get_user_4)
18399 + CFI_STARTPROC
18400 + add $3,%_ASM_AX
18401 ++
18402 ++#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
18403 + jc bad_get_user
18404 + GET_THREAD_INFO(%_ASM_DX)
18405 + cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
18406 + jae bad_get_user
18407 +-3: mov -3(%_ASM_AX),%edx
18408 ++
18409 ++#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
18410 ++ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
18411 ++ cmp %_ASM_DX,%_ASM_AX
18412 ++ jae 1234f
18413 ++ add %_ASM_DX,%_ASM_AX
18414 ++1234:
18415 ++#endif
18416 ++
18417 ++#endif
18418 ++
18419 ++3: __copyuser_seg mov -3(%_ASM_AX),%edx
18420 + xor %eax,%eax
18421 ++ pax_force_retaddr
18422 + ret
18423 + CFI_ENDPROC
18424 + ENDPROC(__get_user_4)
18425 +@@ -80,8 +131,18 @@ ENTRY(__get_user_8)
18426 + GET_THREAD_INFO(%_ASM_DX)
18427 + cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
18428 + jae bad_get_user
18429 ++
18430 ++#ifdef CONFIG_PAX_MEMORY_UDEREF
18431 ++ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
18432 ++ cmp %_ASM_DX,%_ASM_AX
18433 ++ jae 1234f
18434 ++ add %_ASM_DX,%_ASM_AX
18435 ++1234:
18436 ++#endif
18437 ++
18438 + 4: movq -7(%_ASM_AX),%_ASM_DX
18439 + xor %eax,%eax
18440 ++ pax_force_retaddr
18441 + ret
18442 + CFI_ENDPROC
18443 + ENDPROC(__get_user_8)
18444 +@@ -91,6 +152,7 @@ bad_get_user:
18445 + CFI_STARTPROC
18446 + xor %edx,%edx
18447 + mov $(-EFAULT),%_ASM_AX
18448 ++ pax_force_retaddr
18449 + ret
18450 + CFI_ENDPROC
18451 + END(bad_get_user)
18452 +diff -urNp linux-3.1.1/arch/x86/lib/insn.c linux-3.1.1/arch/x86/lib/insn.c
18453 +--- linux-3.1.1/arch/x86/lib/insn.c 2011-11-11 15:19:27.000000000 -0500
18454 ++++ linux-3.1.1/arch/x86/lib/insn.c 2011-11-16 18:39:07.000000000 -0500
18455 +@@ -21,6 +21,11 @@
18456 + #include <linux/string.h>
18457 + #include <asm/inat.h>
18458 + #include <asm/insn.h>
18459 ++#ifdef __KERNEL__
18460 ++#include <asm/pgtable_types.h>
18461 ++#else
18462 ++#define ktla_ktva(addr) addr
18463 ++#endif
18464 +
18465 + #define get_next(t, insn) \
18466 + ({t r; r = *(t*)insn->next_byte; insn->next_byte += sizeof(t); r; })
18467 +@@ -40,8 +45,8 @@
18468 + void insn_init(struct insn *insn, const void *kaddr, int x86_64)
18469 + {
18470 + memset(insn, 0, sizeof(*insn));
18471 +- insn->kaddr = kaddr;
18472 +- insn->next_byte = kaddr;
18473 ++ insn->kaddr = ktla_ktva(kaddr);
18474 ++ insn->next_byte = ktla_ktva(kaddr);
18475 + insn->x86_64 = x86_64 ? 1 : 0;
18476 + insn->opnd_bytes = 4;
18477 + if (x86_64)
18478 +diff -urNp linux-3.1.1/arch/x86/lib/iomap_copy_64.S linux-3.1.1/arch/x86/lib/iomap_copy_64.S
18479 +--- linux-3.1.1/arch/x86/lib/iomap_copy_64.S 2011-11-11 15:19:27.000000000 -0500
18480 ++++ linux-3.1.1/arch/x86/lib/iomap_copy_64.S 2011-11-16 18:39:07.000000000 -0500
18481 +@@ -17,6 +17,7 @@
18482 +
18483 + #include <linux/linkage.h>
18484 + #include <asm/dwarf2.h>
18485 ++#include <asm/alternative-asm.h>
18486 +
18487 + /*
18488 + * override generic version in lib/iomap_copy.c
18489 +@@ -25,6 +26,7 @@ ENTRY(__iowrite32_copy)
18490 + CFI_STARTPROC
18491 + movl %edx,%ecx
18492 + rep movsd
18493 ++ pax_force_retaddr
18494 + ret
18495 + CFI_ENDPROC
18496 + ENDPROC(__iowrite32_copy)
18497 +diff -urNp linux-3.1.1/arch/x86/lib/memcpy_64.S linux-3.1.1/arch/x86/lib/memcpy_64.S
18498 +--- linux-3.1.1/arch/x86/lib/memcpy_64.S 2011-11-11 15:19:27.000000000 -0500
18499 ++++ linux-3.1.1/arch/x86/lib/memcpy_64.S 2011-11-16 18:39:07.000000000 -0500
18500 +@@ -34,6 +34,7 @@
18501 + rep movsq
18502 + movl %edx, %ecx
18503 + rep movsb
18504 ++ pax_force_retaddr
18505 + ret
18506 + .Lmemcpy_e:
18507 + .previous
18508 +@@ -51,6 +52,7 @@
18509 +
18510 + movl %edx, %ecx
18511 + rep movsb
18512 ++ pax_force_retaddr
18513 + ret
18514 + .Lmemcpy_e_e:
18515 + .previous
18516 +@@ -141,6 +143,7 @@ ENTRY(memcpy)
18517 + movq %r9, 1*8(%rdi)
18518 + movq %r10, -2*8(%rdi, %rdx)
18519 + movq %r11, -1*8(%rdi, %rdx)
18520 ++ pax_force_retaddr
18521 + retq
18522 + .p2align 4
18523 + .Lless_16bytes:
18524 +@@ -153,6 +156,7 @@ ENTRY(memcpy)
18525 + movq -1*8(%rsi, %rdx), %r9
18526 + movq %r8, 0*8(%rdi)
18527 + movq %r9, -1*8(%rdi, %rdx)
18528 ++ pax_force_retaddr
18529 + retq
18530 + .p2align 4
18531 + .Lless_8bytes:
18532 +@@ -166,6 +170,7 @@ ENTRY(memcpy)
18533 + movl -4(%rsi, %rdx), %r8d
18534 + movl %ecx, (%rdi)
18535 + movl %r8d, -4(%rdi, %rdx)
18536 ++ pax_force_retaddr
18537 + retq
18538 + .p2align 4
18539 + .Lless_3bytes:
18540 +@@ -183,6 +188,7 @@ ENTRY(memcpy)
18541 + jnz .Lloop_1
18542 +
18543 + .Lend:
18544 ++ pax_force_retaddr
18545 + retq
18546 + CFI_ENDPROC
18547 + ENDPROC(memcpy)
18548 +diff -urNp linux-3.1.1/arch/x86/lib/memmove_64.S linux-3.1.1/arch/x86/lib/memmove_64.S
18549 +--- linux-3.1.1/arch/x86/lib/memmove_64.S 2011-11-11 15:19:27.000000000 -0500
18550 ++++ linux-3.1.1/arch/x86/lib/memmove_64.S 2011-11-16 18:39:07.000000000 -0500
18551 +@@ -202,6 +202,7 @@ ENTRY(memmove)
18552 + movb (%rsi), %r11b
18553 + movb %r11b, (%rdi)
18554 + 13:
18555 ++ pax_force_retaddr
18556 + retq
18557 + CFI_ENDPROC
18558 +
18559 +@@ -210,6 +211,7 @@ ENTRY(memmove)
18560 + /* Forward moving data. */
18561 + movq %rdx, %rcx
18562 + rep movsb
18563 ++ pax_force_retaddr
18564 + retq
18565 + .Lmemmove_end_forward_efs:
18566 + .previous
18567 +diff -urNp linux-3.1.1/arch/x86/lib/memset_64.S linux-3.1.1/arch/x86/lib/memset_64.S
18568 +--- linux-3.1.1/arch/x86/lib/memset_64.S 2011-11-11 15:19:27.000000000 -0500
18569 ++++ linux-3.1.1/arch/x86/lib/memset_64.S 2011-11-16 18:39:07.000000000 -0500
18570 +@@ -31,6 +31,7 @@
18571 + movl %r8d,%ecx
18572 + rep stosb
18573 + movq %r9,%rax
18574 ++ pax_force_retaddr
18575 + ret
18576 + .Lmemset_e:
18577 + .previous
18578 +@@ -53,6 +54,7 @@
18579 + movl %edx,%ecx
18580 + rep stosb
18581 + movq %r9,%rax
18582 ++ pax_force_retaddr
18583 + ret
18584 + .Lmemset_e_e:
18585 + .previous
18586 +@@ -121,6 +123,7 @@ ENTRY(__memset)
18587 +
18588 + .Lende:
18589 + movq %r10,%rax
18590 ++ pax_force_retaddr
18591 + ret
18592 +
18593 + CFI_RESTORE_STATE
18594 +diff -urNp linux-3.1.1/arch/x86/lib/mmx_32.c linux-3.1.1/arch/x86/lib/mmx_32.c
18595 +--- linux-3.1.1/arch/x86/lib/mmx_32.c 2011-11-11 15:19:27.000000000 -0500
18596 ++++ linux-3.1.1/arch/x86/lib/mmx_32.c 2011-11-16 18:39:07.000000000 -0500
18597 +@@ -29,6 +29,7 @@ void *_mmx_memcpy(void *to, const void *
18598 + {
18599 + void *p;
18600 + int i;
18601 ++ unsigned long cr0;
18602 +
18603 + if (unlikely(in_interrupt()))
18604 + return __memcpy(to, from, len);
18605 +@@ -39,44 +40,72 @@ void *_mmx_memcpy(void *to, const void *
18606 + kernel_fpu_begin();
18607 +
18608 + __asm__ __volatile__ (
18609 +- "1: prefetch (%0)\n" /* This set is 28 bytes */
18610 +- " prefetch 64(%0)\n"
18611 +- " prefetch 128(%0)\n"
18612 +- " prefetch 192(%0)\n"
18613 +- " prefetch 256(%0)\n"
18614 ++ "1: prefetch (%1)\n" /* This set is 28 bytes */
18615 ++ " prefetch 64(%1)\n"
18616 ++ " prefetch 128(%1)\n"
18617 ++ " prefetch 192(%1)\n"
18618 ++ " prefetch 256(%1)\n"
18619 + "2: \n"
18620 + ".section .fixup, \"ax\"\n"
18621 +- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
18622 ++ "3: \n"
18623 ++
18624 ++#ifdef CONFIG_PAX_KERNEXEC
18625 ++ " movl %%cr0, %0\n"
18626 ++ " movl %0, %%eax\n"
18627 ++ " andl $0xFFFEFFFF, %%eax\n"
18628 ++ " movl %%eax, %%cr0\n"
18629 ++#endif
18630 ++
18631 ++ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
18632 ++
18633 ++#ifdef CONFIG_PAX_KERNEXEC
18634 ++ " movl %0, %%cr0\n"
18635 ++#endif
18636 ++
18637 + " jmp 2b\n"
18638 + ".previous\n"
18639 + _ASM_EXTABLE(1b, 3b)
18640 +- : : "r" (from));
18641 ++ : "=&r" (cr0) : "r" (from) : "ax");
18642 +
18643 + for ( ; i > 5; i--) {
18644 + __asm__ __volatile__ (
18645 +- "1: prefetch 320(%0)\n"
18646 +- "2: movq (%0), %%mm0\n"
18647 +- " movq 8(%0), %%mm1\n"
18648 +- " movq 16(%0), %%mm2\n"
18649 +- " movq 24(%0), %%mm3\n"
18650 +- " movq %%mm0, (%1)\n"
18651 +- " movq %%mm1, 8(%1)\n"
18652 +- " movq %%mm2, 16(%1)\n"
18653 +- " movq %%mm3, 24(%1)\n"
18654 +- " movq 32(%0), %%mm0\n"
18655 +- " movq 40(%0), %%mm1\n"
18656 +- " movq 48(%0), %%mm2\n"
18657 +- " movq 56(%0), %%mm3\n"
18658 +- " movq %%mm0, 32(%1)\n"
18659 +- " movq %%mm1, 40(%1)\n"
18660 +- " movq %%mm2, 48(%1)\n"
18661 +- " movq %%mm3, 56(%1)\n"
18662 ++ "1: prefetch 320(%1)\n"
18663 ++ "2: movq (%1), %%mm0\n"
18664 ++ " movq 8(%1), %%mm1\n"
18665 ++ " movq 16(%1), %%mm2\n"
18666 ++ " movq 24(%1), %%mm3\n"
18667 ++ " movq %%mm0, (%2)\n"
18668 ++ " movq %%mm1, 8(%2)\n"
18669 ++ " movq %%mm2, 16(%2)\n"
18670 ++ " movq %%mm3, 24(%2)\n"
18671 ++ " movq 32(%1), %%mm0\n"
18672 ++ " movq 40(%1), %%mm1\n"
18673 ++ " movq 48(%1), %%mm2\n"
18674 ++ " movq 56(%1), %%mm3\n"
18675 ++ " movq %%mm0, 32(%2)\n"
18676 ++ " movq %%mm1, 40(%2)\n"
18677 ++ " movq %%mm2, 48(%2)\n"
18678 ++ " movq %%mm3, 56(%2)\n"
18679 + ".section .fixup, \"ax\"\n"
18680 +- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
18681 ++ "3:\n"
18682 ++
18683 ++#ifdef CONFIG_PAX_KERNEXEC
18684 ++ " movl %%cr0, %0\n"
18685 ++ " movl %0, %%eax\n"
18686 ++ " andl $0xFFFEFFFF, %%eax\n"
18687 ++ " movl %%eax, %%cr0\n"
18688 ++#endif
18689 ++
18690 ++ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
18691 ++
18692 ++#ifdef CONFIG_PAX_KERNEXEC
18693 ++ " movl %0, %%cr0\n"
18694 ++#endif
18695 ++
18696 + " jmp 2b\n"
18697 + ".previous\n"
18698 + _ASM_EXTABLE(1b, 3b)
18699 +- : : "r" (from), "r" (to) : "memory");
18700 ++ : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
18701 +
18702 + from += 64;
18703 + to += 64;
18704 +@@ -158,6 +187,7 @@ static void fast_clear_page(void *page)
18705 + static void fast_copy_page(void *to, void *from)
18706 + {
18707 + int i;
18708 ++ unsigned long cr0;
18709 +
18710 + kernel_fpu_begin();
18711 +
18712 +@@ -166,42 +196,70 @@ static void fast_copy_page(void *to, voi
18713 + * but that is for later. -AV
18714 + */
18715 + __asm__ __volatile__(
18716 +- "1: prefetch (%0)\n"
18717 +- " prefetch 64(%0)\n"
18718 +- " prefetch 128(%0)\n"
18719 +- " prefetch 192(%0)\n"
18720 +- " prefetch 256(%0)\n"
18721 ++ "1: prefetch (%1)\n"
18722 ++ " prefetch 64(%1)\n"
18723 ++ " prefetch 128(%1)\n"
18724 ++ " prefetch 192(%1)\n"
18725 ++ " prefetch 256(%1)\n"
18726 + "2: \n"
18727 + ".section .fixup, \"ax\"\n"
18728 +- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
18729 ++ "3: \n"
18730 ++
18731 ++#ifdef CONFIG_PAX_KERNEXEC
18732 ++ " movl %%cr0, %0\n"
18733 ++ " movl %0, %%eax\n"
18734 ++ " andl $0xFFFEFFFF, %%eax\n"
18735 ++ " movl %%eax, %%cr0\n"
18736 ++#endif
18737 ++
18738 ++ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
18739 ++
18740 ++#ifdef CONFIG_PAX_KERNEXEC
18741 ++ " movl %0, %%cr0\n"
18742 ++#endif
18743 ++
18744 + " jmp 2b\n"
18745 + ".previous\n"
18746 +- _ASM_EXTABLE(1b, 3b) : : "r" (from));
18747 ++ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
18748 +
18749 + for (i = 0; i < (4096-320)/64; i++) {
18750 + __asm__ __volatile__ (
18751 +- "1: prefetch 320(%0)\n"
18752 +- "2: movq (%0), %%mm0\n"
18753 +- " movntq %%mm0, (%1)\n"
18754 +- " movq 8(%0), %%mm1\n"
18755 +- " movntq %%mm1, 8(%1)\n"
18756 +- " movq 16(%0), %%mm2\n"
18757 +- " movntq %%mm2, 16(%1)\n"
18758 +- " movq 24(%0), %%mm3\n"
18759 +- " movntq %%mm3, 24(%1)\n"
18760 +- " movq 32(%0), %%mm4\n"
18761 +- " movntq %%mm4, 32(%1)\n"
18762 +- " movq 40(%0), %%mm5\n"
18763 +- " movntq %%mm5, 40(%1)\n"
18764 +- " movq 48(%0), %%mm6\n"
18765 +- " movntq %%mm6, 48(%1)\n"
18766 +- " movq 56(%0), %%mm7\n"
18767 +- " movntq %%mm7, 56(%1)\n"
18768 ++ "1: prefetch 320(%1)\n"
18769 ++ "2: movq (%1), %%mm0\n"
18770 ++ " movntq %%mm0, (%2)\n"
18771 ++ " movq 8(%1), %%mm1\n"
18772 ++ " movntq %%mm1, 8(%2)\n"
18773 ++ " movq 16(%1), %%mm2\n"
18774 ++ " movntq %%mm2, 16(%2)\n"
18775 ++ " movq 24(%1), %%mm3\n"
18776 ++ " movntq %%mm3, 24(%2)\n"
18777 ++ " movq 32(%1), %%mm4\n"
18778 ++ " movntq %%mm4, 32(%2)\n"
18779 ++ " movq 40(%1), %%mm5\n"
18780 ++ " movntq %%mm5, 40(%2)\n"
18781 ++ " movq 48(%1), %%mm6\n"
18782 ++ " movntq %%mm6, 48(%2)\n"
18783 ++ " movq 56(%1), %%mm7\n"
18784 ++ " movntq %%mm7, 56(%2)\n"
18785 + ".section .fixup, \"ax\"\n"
18786 +- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
18787 ++ "3:\n"
18788 ++
18789 ++#ifdef CONFIG_PAX_KERNEXEC
18790 ++ " movl %%cr0, %0\n"
18791 ++ " movl %0, %%eax\n"
18792 ++ " andl $0xFFFEFFFF, %%eax\n"
18793 ++ " movl %%eax, %%cr0\n"
18794 ++#endif
18795 ++
18796 ++ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
18797 ++
18798 ++#ifdef CONFIG_PAX_KERNEXEC
18799 ++ " movl %0, %%cr0\n"
18800 ++#endif
18801 ++
18802 + " jmp 2b\n"
18803 + ".previous\n"
18804 +- _ASM_EXTABLE(1b, 3b) : : "r" (from), "r" (to) : "memory");
18805 ++ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
18806 +
18807 + from += 64;
18808 + to += 64;
18809 +@@ -280,47 +338,76 @@ static void fast_clear_page(void *page)
18810 + static void fast_copy_page(void *to, void *from)
18811 + {
18812 + int i;
18813 ++ unsigned long cr0;
18814 +
18815 + kernel_fpu_begin();
18816 +
18817 + __asm__ __volatile__ (
18818 +- "1: prefetch (%0)\n"
18819 +- " prefetch 64(%0)\n"
18820 +- " prefetch 128(%0)\n"
18821 +- " prefetch 192(%0)\n"
18822 +- " prefetch 256(%0)\n"
18823 ++ "1: prefetch (%1)\n"
18824 ++ " prefetch 64(%1)\n"
18825 ++ " prefetch 128(%1)\n"
18826 ++ " prefetch 192(%1)\n"
18827 ++ " prefetch 256(%1)\n"
18828 + "2: \n"
18829 + ".section .fixup, \"ax\"\n"
18830 +- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
18831 ++ "3: \n"
18832 ++
18833 ++#ifdef CONFIG_PAX_KERNEXEC
18834 ++ " movl %%cr0, %0\n"
18835 ++ " movl %0, %%eax\n"
18836 ++ " andl $0xFFFEFFFF, %%eax\n"
18837 ++ " movl %%eax, %%cr0\n"
18838 ++#endif
18839 ++
18840 ++ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
18841 ++
18842 ++#ifdef CONFIG_PAX_KERNEXEC
18843 ++ " movl %0, %%cr0\n"
18844 ++#endif
18845 ++
18846 + " jmp 2b\n"
18847 + ".previous\n"
18848 +- _ASM_EXTABLE(1b, 3b) : : "r" (from));
18849 ++ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
18850 +
18851 + for (i = 0; i < 4096/64; i++) {
18852 + __asm__ __volatile__ (
18853 +- "1: prefetch 320(%0)\n"
18854 +- "2: movq (%0), %%mm0\n"
18855 +- " movq 8(%0), %%mm1\n"
18856 +- " movq 16(%0), %%mm2\n"
18857 +- " movq 24(%0), %%mm3\n"
18858 +- " movq %%mm0, (%1)\n"
18859 +- " movq %%mm1, 8(%1)\n"
18860 +- " movq %%mm2, 16(%1)\n"
18861 +- " movq %%mm3, 24(%1)\n"
18862 +- " movq 32(%0), %%mm0\n"
18863 +- " movq 40(%0), %%mm1\n"
18864 +- " movq 48(%0), %%mm2\n"
18865 +- " movq 56(%0), %%mm3\n"
18866 +- " movq %%mm0, 32(%1)\n"
18867 +- " movq %%mm1, 40(%1)\n"
18868 +- " movq %%mm2, 48(%1)\n"
18869 +- " movq %%mm3, 56(%1)\n"
18870 ++ "1: prefetch 320(%1)\n"
18871 ++ "2: movq (%1), %%mm0\n"
18872 ++ " movq 8(%1), %%mm1\n"
18873 ++ " movq 16(%1), %%mm2\n"
18874 ++ " movq 24(%1), %%mm3\n"
18875 ++ " movq %%mm0, (%2)\n"
18876 ++ " movq %%mm1, 8(%2)\n"
18877 ++ " movq %%mm2, 16(%2)\n"
18878 ++ " movq %%mm3, 24(%2)\n"
18879 ++ " movq 32(%1), %%mm0\n"
18880 ++ " movq 40(%1), %%mm1\n"
18881 ++ " movq 48(%1), %%mm2\n"
18882 ++ " movq 56(%1), %%mm3\n"
18883 ++ " movq %%mm0, 32(%2)\n"
18884 ++ " movq %%mm1, 40(%2)\n"
18885 ++ " movq %%mm2, 48(%2)\n"
18886 ++ " movq %%mm3, 56(%2)\n"
18887 + ".section .fixup, \"ax\"\n"
18888 +- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
18889 ++ "3:\n"
18890 ++
18891 ++#ifdef CONFIG_PAX_KERNEXEC
18892 ++ " movl %%cr0, %0\n"
18893 ++ " movl %0, %%eax\n"
18894 ++ " andl $0xFFFEFFFF, %%eax\n"
18895 ++ " movl %%eax, %%cr0\n"
18896 ++#endif
18897 ++
18898 ++ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
18899 ++
18900 ++#ifdef CONFIG_PAX_KERNEXEC
18901 ++ " movl %0, %%cr0\n"
18902 ++#endif
18903 ++
18904 + " jmp 2b\n"
18905 + ".previous\n"
18906 + _ASM_EXTABLE(1b, 3b)
18907 +- : : "r" (from), "r" (to) : "memory");
18908 ++ : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
18909 +
18910 + from += 64;
18911 + to += 64;
18912 +diff -urNp linux-3.1.1/arch/x86/lib/msr-reg.S linux-3.1.1/arch/x86/lib/msr-reg.S
18913 +--- linux-3.1.1/arch/x86/lib/msr-reg.S 2011-11-11 15:19:27.000000000 -0500
18914 ++++ linux-3.1.1/arch/x86/lib/msr-reg.S 2011-11-16 18:39:07.000000000 -0500
18915 +@@ -3,6 +3,7 @@
18916 + #include <asm/dwarf2.h>
18917 + #include <asm/asm.h>
18918 + #include <asm/msr.h>
18919 ++#include <asm/alternative-asm.h>
18920 +
18921 + #ifdef CONFIG_X86_64
18922 + /*
18923 +@@ -37,6 +38,7 @@ ENTRY(native_\op\()_safe_regs)
18924 + movl %edi, 28(%r10)
18925 + popq_cfi %rbp
18926 + popq_cfi %rbx
18927 ++ pax_force_retaddr
18928 + ret
18929 + 3:
18930 + CFI_RESTORE_STATE
18931 +diff -urNp linux-3.1.1/arch/x86/lib/putuser.S linux-3.1.1/arch/x86/lib/putuser.S
18932 +--- linux-3.1.1/arch/x86/lib/putuser.S 2011-11-11 15:19:27.000000000 -0500
18933 ++++ linux-3.1.1/arch/x86/lib/putuser.S 2011-11-16 18:39:07.000000000 -0500
18934 +@@ -15,7 +15,9 @@
18935 + #include <asm/thread_info.h>
18936 + #include <asm/errno.h>
18937 + #include <asm/asm.h>
18938 +-
18939 ++#include <asm/segment.h>
18940 ++#include <asm/pgtable.h>
18941 ++#include <asm/alternative-asm.h>
18942 +
18943 + /*
18944 + * __put_user_X
18945 +@@ -29,52 +31,119 @@
18946 + * as they get called from within inline assembly.
18947 + */
18948 +
18949 +-#define ENTER CFI_STARTPROC ; \
18950 +- GET_THREAD_INFO(%_ASM_BX)
18951 +-#define EXIT ret ; \
18952 ++#define ENTER CFI_STARTPROC
18953 ++#define EXIT pax_force_retaddr; ret ; \
18954 + CFI_ENDPROC
18955 +
18956 ++#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
18957 ++#define _DEST %_ASM_CX,%_ASM_BX
18958 ++#else
18959 ++#define _DEST %_ASM_CX
18960 ++#endif
18961 ++
18962 ++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
18963 ++#define __copyuser_seg gs;
18964 ++#else
18965 ++#define __copyuser_seg
18966 ++#endif
18967 ++
18968 + .text
18969 + ENTRY(__put_user_1)
18970 + ENTER
18971 ++
18972 ++#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
18973 ++ GET_THREAD_INFO(%_ASM_BX)
18974 + cmp TI_addr_limit(%_ASM_BX),%_ASM_CX
18975 + jae bad_put_user
18976 +-1: movb %al,(%_ASM_CX)
18977 ++
18978 ++#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
18979 ++ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
18980 ++ cmp %_ASM_BX,%_ASM_CX
18981 ++ jb 1234f
18982 ++ xor %ebx,%ebx
18983 ++1234:
18984 ++#endif
18985 ++
18986 ++#endif
18987 ++
18988 ++1: __copyuser_seg movb %al,(_DEST)
18989 + xor %eax,%eax
18990 + EXIT
18991 + ENDPROC(__put_user_1)
18992 +
18993 + ENTRY(__put_user_2)
18994 + ENTER
18995 ++
18996 ++#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
18997 ++ GET_THREAD_INFO(%_ASM_BX)
18998 + mov TI_addr_limit(%_ASM_BX),%_ASM_BX
18999 + sub $1,%_ASM_BX
19000 + cmp %_ASM_BX,%_ASM_CX
19001 + jae bad_put_user
19002 +-2: movw %ax,(%_ASM_CX)
19003 ++
19004 ++#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19005 ++ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
19006 ++ cmp %_ASM_BX,%_ASM_CX
19007 ++ jb 1234f
19008 ++ xor %ebx,%ebx
19009 ++1234:
19010 ++#endif
19011 ++
19012 ++#endif
19013 ++
19014 ++2: __copyuser_seg movw %ax,(_DEST)
19015 + xor %eax,%eax
19016 + EXIT
19017 + ENDPROC(__put_user_2)
19018 +
19019 + ENTRY(__put_user_4)
19020 + ENTER
19021 ++
19022 ++#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
19023 ++ GET_THREAD_INFO(%_ASM_BX)
19024 + mov TI_addr_limit(%_ASM_BX),%_ASM_BX
19025 + sub $3,%_ASM_BX
19026 + cmp %_ASM_BX,%_ASM_CX
19027 + jae bad_put_user
19028 +-3: movl %eax,(%_ASM_CX)
19029 ++
19030 ++#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19031 ++ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
19032 ++ cmp %_ASM_BX,%_ASM_CX
19033 ++ jb 1234f
19034 ++ xor %ebx,%ebx
19035 ++1234:
19036 ++#endif
19037 ++
19038 ++#endif
19039 ++
19040 ++3: __copyuser_seg movl %eax,(_DEST)
19041 + xor %eax,%eax
19042 + EXIT
19043 + ENDPROC(__put_user_4)
19044 +
19045 + ENTRY(__put_user_8)
19046 + ENTER
19047 ++
19048 ++#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
19049 ++ GET_THREAD_INFO(%_ASM_BX)
19050 + mov TI_addr_limit(%_ASM_BX),%_ASM_BX
19051 + sub $7,%_ASM_BX
19052 + cmp %_ASM_BX,%_ASM_CX
19053 + jae bad_put_user
19054 +-4: mov %_ASM_AX,(%_ASM_CX)
19055 ++
19056 ++#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19057 ++ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
19058 ++ cmp %_ASM_BX,%_ASM_CX
19059 ++ jb 1234f
19060 ++ xor %ebx,%ebx
19061 ++1234:
19062 ++#endif
19063 ++
19064 ++#endif
19065 ++
19066 ++4: __copyuser_seg mov %_ASM_AX,(_DEST)
19067 + #ifdef CONFIG_X86_32
19068 +-5: movl %edx,4(%_ASM_CX)
19069 ++5: __copyuser_seg movl %edx,4(_DEST)
19070 + #endif
19071 + xor %eax,%eax
19072 + EXIT
19073 +diff -urNp linux-3.1.1/arch/x86/lib/rwlock.S linux-3.1.1/arch/x86/lib/rwlock.S
19074 +--- linux-3.1.1/arch/x86/lib/rwlock.S 2011-11-11 15:19:27.000000000 -0500
19075 ++++ linux-3.1.1/arch/x86/lib/rwlock.S 2011-11-16 18:39:07.000000000 -0500
19076 +@@ -23,6 +23,7 @@ ENTRY(__write_lock_failed)
19077 + WRITE_LOCK_SUB($RW_LOCK_BIAS) (%__lock_ptr)
19078 + jnz 0b
19079 + ENDFRAME
19080 ++ pax_force_retaddr
19081 + ret
19082 + CFI_ENDPROC
19083 + END(__write_lock_failed)
19084 +@@ -39,6 +40,7 @@ ENTRY(__read_lock_failed)
19085 + READ_LOCK_SIZE(dec) (%__lock_ptr)
19086 + js 0b
19087 + ENDFRAME
19088 ++ pax_force_retaddr
19089 + ret
19090 + CFI_ENDPROC
19091 + END(__read_lock_failed)
19092 +diff -urNp linux-3.1.1/arch/x86/lib/rwsem.S linux-3.1.1/arch/x86/lib/rwsem.S
19093 +--- linux-3.1.1/arch/x86/lib/rwsem.S 2011-11-11 15:19:27.000000000 -0500
19094 ++++ linux-3.1.1/arch/x86/lib/rwsem.S 2011-11-16 18:39:07.000000000 -0500
19095 +@@ -94,6 +94,7 @@ ENTRY(call_rwsem_down_read_failed)
19096 + __ASM_SIZE(pop,_cfi) %__ASM_REG(dx)
19097 + CFI_RESTORE __ASM_REG(dx)
19098 + restore_common_regs
19099 ++ pax_force_retaddr
19100 + ret
19101 + CFI_ENDPROC
19102 + ENDPROC(call_rwsem_down_read_failed)
19103 +@@ -104,6 +105,7 @@ ENTRY(call_rwsem_down_write_failed)
19104 + movq %rax,%rdi
19105 + call rwsem_down_write_failed
19106 + restore_common_regs
19107 ++ pax_force_retaddr
19108 + ret
19109 + CFI_ENDPROC
19110 + ENDPROC(call_rwsem_down_write_failed)
19111 +@@ -117,7 +119,8 @@ ENTRY(call_rwsem_wake)
19112 + movq %rax,%rdi
19113 + call rwsem_wake
19114 + restore_common_regs
19115 +-1: ret
19116 ++1: pax_force_retaddr
19117 ++ ret
19118 + CFI_ENDPROC
19119 + ENDPROC(call_rwsem_wake)
19120 +
19121 +@@ -131,6 +134,7 @@ ENTRY(call_rwsem_downgrade_wake)
19122 + __ASM_SIZE(pop,_cfi) %__ASM_REG(dx)
19123 + CFI_RESTORE __ASM_REG(dx)
19124 + restore_common_regs
19125 ++ pax_force_retaddr
19126 + ret
19127 + CFI_ENDPROC
19128 + ENDPROC(call_rwsem_downgrade_wake)
19129 +diff -urNp linux-3.1.1/arch/x86/lib/thunk_64.S linux-3.1.1/arch/x86/lib/thunk_64.S
19130 +--- linux-3.1.1/arch/x86/lib/thunk_64.S 2011-11-11 15:19:27.000000000 -0500
19131 ++++ linux-3.1.1/arch/x86/lib/thunk_64.S 2011-11-16 18:39:07.000000000 -0500
19132 +@@ -8,6 +8,7 @@
19133 + #include <linux/linkage.h>
19134 + #include <asm/dwarf2.h>
19135 + #include <asm/calling.h>
19136 ++#include <asm/alternative-asm.h>
19137 +
19138 + /* rdi: arg1 ... normal C conventions. rax is saved/restored. */
19139 + .macro THUNK name, func, put_ret_addr_in_rdi=0
19140 +@@ -41,5 +42,6 @@
19141 + SAVE_ARGS
19142 + restore:
19143 + RESTORE_ARGS
19144 ++ pax_force_retaddr
19145 + ret
19146 + CFI_ENDPROC
19147 +diff -urNp linux-3.1.1/arch/x86/lib/usercopy_32.c linux-3.1.1/arch/x86/lib/usercopy_32.c
19148 +--- linux-3.1.1/arch/x86/lib/usercopy_32.c 2011-11-11 15:19:27.000000000 -0500
19149 ++++ linux-3.1.1/arch/x86/lib/usercopy_32.c 2011-11-16 18:39:07.000000000 -0500
19150 +@@ -43,7 +43,7 @@ do { \
19151 + __asm__ __volatile__( \
19152 + " testl %1,%1\n" \
19153 + " jz 2f\n" \
19154 +- "0: lodsb\n" \
19155 ++ "0: "__copyuser_seg"lodsb\n" \
19156 + " stosb\n" \
19157 + " testb %%al,%%al\n" \
19158 + " jz 1f\n" \
19159 +@@ -128,10 +128,12 @@ do { \
19160 + int __d0; \
19161 + might_fault(); \
19162 + __asm__ __volatile__( \
19163 ++ __COPYUSER_SET_ES \
19164 + "0: rep; stosl\n" \
19165 + " movl %2,%0\n" \
19166 + "1: rep; stosb\n" \
19167 + "2:\n" \
19168 ++ __COPYUSER_RESTORE_ES \
19169 + ".section .fixup,\"ax\"\n" \
19170 + "3: lea 0(%2,%0,4),%0\n" \
19171 + " jmp 2b\n" \
19172 +@@ -200,6 +202,7 @@ long strnlen_user(const char __user *s,
19173 + might_fault();
19174 +
19175 + __asm__ __volatile__(
19176 ++ __COPYUSER_SET_ES
19177 + " testl %0, %0\n"
19178 + " jz 3f\n"
19179 + " andl %0,%%ecx\n"
19180 +@@ -208,6 +211,7 @@ long strnlen_user(const char __user *s,
19181 + " subl %%ecx,%0\n"
19182 + " addl %0,%%eax\n"
19183 + "1:\n"
19184 ++ __COPYUSER_RESTORE_ES
19185 + ".section .fixup,\"ax\"\n"
19186 + "2: xorl %%eax,%%eax\n"
19187 + " jmp 1b\n"
19188 +@@ -227,7 +231,7 @@ EXPORT_SYMBOL(strnlen_user);
19189 +
19190 + #ifdef CONFIG_X86_INTEL_USERCOPY
19191 + static unsigned long
19192 +-__copy_user_intel(void __user *to, const void *from, unsigned long size)
19193 ++__generic_copy_to_user_intel(void __user *to, const void *from, unsigned long size)
19194 + {
19195 + int d0, d1;
19196 + __asm__ __volatile__(
19197 +@@ -239,36 +243,36 @@ __copy_user_intel(void __user *to, const
19198 + " .align 2,0x90\n"
19199 + "3: movl 0(%4), %%eax\n"
19200 + "4: movl 4(%4), %%edx\n"
19201 +- "5: movl %%eax, 0(%3)\n"
19202 +- "6: movl %%edx, 4(%3)\n"
19203 ++ "5: "__copyuser_seg" movl %%eax, 0(%3)\n"
19204 ++ "6: "__copyuser_seg" movl %%edx, 4(%3)\n"
19205 + "7: movl 8(%4), %%eax\n"
19206 + "8: movl 12(%4),%%edx\n"
19207 +- "9: movl %%eax, 8(%3)\n"
19208 +- "10: movl %%edx, 12(%3)\n"
19209 ++ "9: "__copyuser_seg" movl %%eax, 8(%3)\n"
19210 ++ "10: "__copyuser_seg" movl %%edx, 12(%3)\n"
19211 + "11: movl 16(%4), %%eax\n"
19212 + "12: movl 20(%4), %%edx\n"
19213 +- "13: movl %%eax, 16(%3)\n"
19214 +- "14: movl %%edx, 20(%3)\n"
19215 ++ "13: "__copyuser_seg" movl %%eax, 16(%3)\n"
19216 ++ "14: "__copyuser_seg" movl %%edx, 20(%3)\n"
19217 + "15: movl 24(%4), %%eax\n"
19218 + "16: movl 28(%4), %%edx\n"
19219 +- "17: movl %%eax, 24(%3)\n"
19220 +- "18: movl %%edx, 28(%3)\n"
19221 ++ "17: "__copyuser_seg" movl %%eax, 24(%3)\n"
19222 ++ "18: "__copyuser_seg" movl %%edx, 28(%3)\n"
19223 + "19: movl 32(%4), %%eax\n"
19224 + "20: movl 36(%4), %%edx\n"
19225 +- "21: movl %%eax, 32(%3)\n"
19226 +- "22: movl %%edx, 36(%3)\n"
19227 ++ "21: "__copyuser_seg" movl %%eax, 32(%3)\n"
19228 ++ "22: "__copyuser_seg" movl %%edx, 36(%3)\n"
19229 + "23: movl 40(%4), %%eax\n"
19230 + "24: movl 44(%4), %%edx\n"
19231 +- "25: movl %%eax, 40(%3)\n"
19232 +- "26: movl %%edx, 44(%3)\n"
19233 ++ "25: "__copyuser_seg" movl %%eax, 40(%3)\n"
19234 ++ "26: "__copyuser_seg" movl %%edx, 44(%3)\n"
19235 + "27: movl 48(%4), %%eax\n"
19236 + "28: movl 52(%4), %%edx\n"
19237 +- "29: movl %%eax, 48(%3)\n"
19238 +- "30: movl %%edx, 52(%3)\n"
19239 ++ "29: "__copyuser_seg" movl %%eax, 48(%3)\n"
19240 ++ "30: "__copyuser_seg" movl %%edx, 52(%3)\n"
19241 + "31: movl 56(%4), %%eax\n"
19242 + "32: movl 60(%4), %%edx\n"
19243 +- "33: movl %%eax, 56(%3)\n"
19244 +- "34: movl %%edx, 60(%3)\n"
19245 ++ "33: "__copyuser_seg" movl %%eax, 56(%3)\n"
19246 ++ "34: "__copyuser_seg" movl %%edx, 60(%3)\n"
19247 + " addl $-64, %0\n"
19248 + " addl $64, %4\n"
19249 + " addl $64, %3\n"
19250 +@@ -278,10 +282,119 @@ __copy_user_intel(void __user *to, const
19251 + " shrl $2, %0\n"
19252 + " andl $3, %%eax\n"
19253 + " cld\n"
19254 ++ __COPYUSER_SET_ES
19255 + "99: rep; movsl\n"
19256 + "36: movl %%eax, %0\n"
19257 + "37: rep; movsb\n"
19258 + "100:\n"
19259 ++ __COPYUSER_RESTORE_ES
19260 ++ ".section .fixup,\"ax\"\n"
19261 ++ "101: lea 0(%%eax,%0,4),%0\n"
19262 ++ " jmp 100b\n"
19263 ++ ".previous\n"
19264 ++ ".section __ex_table,\"a\"\n"
19265 ++ " .align 4\n"
19266 ++ " .long 1b,100b\n"
19267 ++ " .long 2b,100b\n"
19268 ++ " .long 3b,100b\n"
19269 ++ " .long 4b,100b\n"
19270 ++ " .long 5b,100b\n"
19271 ++ " .long 6b,100b\n"
19272 ++ " .long 7b,100b\n"
19273 ++ " .long 8b,100b\n"
19274 ++ " .long 9b,100b\n"
19275 ++ " .long 10b,100b\n"
19276 ++ " .long 11b,100b\n"
19277 ++ " .long 12b,100b\n"
19278 ++ " .long 13b,100b\n"
19279 ++ " .long 14b,100b\n"
19280 ++ " .long 15b,100b\n"
19281 ++ " .long 16b,100b\n"
19282 ++ " .long 17b,100b\n"
19283 ++ " .long 18b,100b\n"
19284 ++ " .long 19b,100b\n"
19285 ++ " .long 20b,100b\n"
19286 ++ " .long 21b,100b\n"
19287 ++ " .long 22b,100b\n"
19288 ++ " .long 23b,100b\n"
19289 ++ " .long 24b,100b\n"
19290 ++ " .long 25b,100b\n"
19291 ++ " .long 26b,100b\n"
19292 ++ " .long 27b,100b\n"
19293 ++ " .long 28b,100b\n"
19294 ++ " .long 29b,100b\n"
19295 ++ " .long 30b,100b\n"
19296 ++ " .long 31b,100b\n"
19297 ++ " .long 32b,100b\n"
19298 ++ " .long 33b,100b\n"
19299 ++ " .long 34b,100b\n"
19300 ++ " .long 35b,100b\n"
19301 ++ " .long 36b,100b\n"
19302 ++ " .long 37b,100b\n"
19303 ++ " .long 99b,101b\n"
19304 ++ ".previous"
19305 ++ : "=&c"(size), "=&D" (d0), "=&S" (d1)
19306 ++ : "1"(to), "2"(from), "0"(size)
19307 ++ : "eax", "edx", "memory");
19308 ++ return size;
19309 ++}
19310 ++
19311 ++static unsigned long
19312 ++__generic_copy_from_user_intel(void *to, const void __user *from, unsigned long size)
19313 ++{
19314 ++ int d0, d1;
19315 ++ __asm__ __volatile__(
19316 ++ " .align 2,0x90\n"
19317 ++ "1: "__copyuser_seg" movl 32(%4), %%eax\n"
19318 ++ " cmpl $67, %0\n"
19319 ++ " jbe 3f\n"
19320 ++ "2: "__copyuser_seg" movl 64(%4), %%eax\n"
19321 ++ " .align 2,0x90\n"
19322 ++ "3: "__copyuser_seg" movl 0(%4), %%eax\n"
19323 ++ "4: "__copyuser_seg" movl 4(%4), %%edx\n"
19324 ++ "5: movl %%eax, 0(%3)\n"
19325 ++ "6: movl %%edx, 4(%3)\n"
19326 ++ "7: "__copyuser_seg" movl 8(%4), %%eax\n"
19327 ++ "8: "__copyuser_seg" movl 12(%4),%%edx\n"
19328 ++ "9: movl %%eax, 8(%3)\n"
19329 ++ "10: movl %%edx, 12(%3)\n"
19330 ++ "11: "__copyuser_seg" movl 16(%4), %%eax\n"
19331 ++ "12: "__copyuser_seg" movl 20(%4), %%edx\n"
19332 ++ "13: movl %%eax, 16(%3)\n"
19333 ++ "14: movl %%edx, 20(%3)\n"
19334 ++ "15: "__copyuser_seg" movl 24(%4), %%eax\n"
19335 ++ "16: "__copyuser_seg" movl 28(%4), %%edx\n"
19336 ++ "17: movl %%eax, 24(%3)\n"
19337 ++ "18: movl %%edx, 28(%3)\n"
19338 ++ "19: "__copyuser_seg" movl 32(%4), %%eax\n"
19339 ++ "20: "__copyuser_seg" movl 36(%4), %%edx\n"
19340 ++ "21: movl %%eax, 32(%3)\n"
19341 ++ "22: movl %%edx, 36(%3)\n"
19342 ++ "23: "__copyuser_seg" movl 40(%4), %%eax\n"
19343 ++ "24: "__copyuser_seg" movl 44(%4), %%edx\n"
19344 ++ "25: movl %%eax, 40(%3)\n"
19345 ++ "26: movl %%edx, 44(%3)\n"
19346 ++ "27: "__copyuser_seg" movl 48(%4), %%eax\n"
19347 ++ "28: "__copyuser_seg" movl 52(%4), %%edx\n"
19348 ++ "29: movl %%eax, 48(%3)\n"
19349 ++ "30: movl %%edx, 52(%3)\n"
19350 ++ "31: "__copyuser_seg" movl 56(%4), %%eax\n"
19351 ++ "32: "__copyuser_seg" movl 60(%4), %%edx\n"
19352 ++ "33: movl %%eax, 56(%3)\n"
19353 ++ "34: movl %%edx, 60(%3)\n"
19354 ++ " addl $-64, %0\n"
19355 ++ " addl $64, %4\n"
19356 ++ " addl $64, %3\n"
19357 ++ " cmpl $63, %0\n"
19358 ++ " ja 1b\n"
19359 ++ "35: movl %0, %%eax\n"
19360 ++ " shrl $2, %0\n"
19361 ++ " andl $3, %%eax\n"
19362 ++ " cld\n"
19363 ++ "99: rep; "__copyuser_seg" movsl\n"
19364 ++ "36: movl %%eax, %0\n"
19365 ++ "37: rep; "__copyuser_seg" movsb\n"
19366 ++ "100:\n"
19367 + ".section .fixup,\"ax\"\n"
19368 + "101: lea 0(%%eax,%0,4),%0\n"
19369 + " jmp 100b\n"
19370 +@@ -339,41 +452,41 @@ __copy_user_zeroing_intel(void *to, cons
19371 + int d0, d1;
19372 + __asm__ __volatile__(
19373 + " .align 2,0x90\n"
19374 +- "0: movl 32(%4), %%eax\n"
19375 ++ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
19376 + " cmpl $67, %0\n"
19377 + " jbe 2f\n"
19378 +- "1: movl 64(%4), %%eax\n"
19379 ++ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
19380 + " .align 2,0x90\n"
19381 +- "2: movl 0(%4), %%eax\n"
19382 +- "21: movl 4(%4), %%edx\n"
19383 ++ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
19384 ++ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
19385 + " movl %%eax, 0(%3)\n"
19386 + " movl %%edx, 4(%3)\n"
19387 +- "3: movl 8(%4), %%eax\n"
19388 +- "31: movl 12(%4),%%edx\n"
19389 ++ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
19390 ++ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
19391 + " movl %%eax, 8(%3)\n"
19392 + " movl %%edx, 12(%3)\n"
19393 +- "4: movl 16(%4), %%eax\n"
19394 +- "41: movl 20(%4), %%edx\n"
19395 ++ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
19396 ++ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
19397 + " movl %%eax, 16(%3)\n"
19398 + " movl %%edx, 20(%3)\n"
19399 +- "10: movl 24(%4), %%eax\n"
19400 +- "51: movl 28(%4), %%edx\n"
19401 ++ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
19402 ++ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
19403 + " movl %%eax, 24(%3)\n"
19404 + " movl %%edx, 28(%3)\n"
19405 +- "11: movl 32(%4), %%eax\n"
19406 +- "61: movl 36(%4), %%edx\n"
19407 ++ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
19408 ++ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
19409 + " movl %%eax, 32(%3)\n"
19410 + " movl %%edx, 36(%3)\n"
19411 +- "12: movl 40(%4), %%eax\n"
19412 +- "71: movl 44(%4), %%edx\n"
19413 ++ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
19414 ++ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
19415 + " movl %%eax, 40(%3)\n"
19416 + " movl %%edx, 44(%3)\n"
19417 +- "13: movl 48(%4), %%eax\n"
19418 +- "81: movl 52(%4), %%edx\n"
19419 ++ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
19420 ++ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
19421 + " movl %%eax, 48(%3)\n"
19422 + " movl %%edx, 52(%3)\n"
19423 +- "14: movl 56(%4), %%eax\n"
19424 +- "91: movl 60(%4), %%edx\n"
19425 ++ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
19426 ++ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
19427 + " movl %%eax, 56(%3)\n"
19428 + " movl %%edx, 60(%3)\n"
19429 + " addl $-64, %0\n"
19430 +@@ -385,9 +498,9 @@ __copy_user_zeroing_intel(void *to, cons
19431 + " shrl $2, %0\n"
19432 + " andl $3, %%eax\n"
19433 + " cld\n"
19434 +- "6: rep; movsl\n"
19435 ++ "6: rep; "__copyuser_seg" movsl\n"
19436 + " movl %%eax,%0\n"
19437 +- "7: rep; movsb\n"
19438 ++ "7: rep; "__copyuser_seg" movsb\n"
19439 + "8:\n"
19440 + ".section .fixup,\"ax\"\n"
19441 + "9: lea 0(%%eax,%0,4),%0\n"
19442 +@@ -440,41 +553,41 @@ static unsigned long __copy_user_zeroing
19443 +
19444 + __asm__ __volatile__(
19445 + " .align 2,0x90\n"
19446 +- "0: movl 32(%4), %%eax\n"
19447 ++ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
19448 + " cmpl $67, %0\n"
19449 + " jbe 2f\n"
19450 +- "1: movl 64(%4), %%eax\n"
19451 ++ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
19452 + " .align 2,0x90\n"
19453 +- "2: movl 0(%4), %%eax\n"
19454 +- "21: movl 4(%4), %%edx\n"
19455 ++ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
19456 ++ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
19457 + " movnti %%eax, 0(%3)\n"
19458 + " movnti %%edx, 4(%3)\n"
19459 +- "3: movl 8(%4), %%eax\n"
19460 +- "31: movl 12(%4),%%edx\n"
19461 ++ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
19462 ++ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
19463 + " movnti %%eax, 8(%3)\n"
19464 + " movnti %%edx, 12(%3)\n"
19465 +- "4: movl 16(%4), %%eax\n"
19466 +- "41: movl 20(%4), %%edx\n"
19467 ++ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
19468 ++ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
19469 + " movnti %%eax, 16(%3)\n"
19470 + " movnti %%edx, 20(%3)\n"
19471 +- "10: movl 24(%4), %%eax\n"
19472 +- "51: movl 28(%4), %%edx\n"
19473 ++ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
19474 ++ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
19475 + " movnti %%eax, 24(%3)\n"
19476 + " movnti %%edx, 28(%3)\n"
19477 +- "11: movl 32(%4), %%eax\n"
19478 +- "61: movl 36(%4), %%edx\n"
19479 ++ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
19480 ++ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
19481 + " movnti %%eax, 32(%3)\n"
19482 + " movnti %%edx, 36(%3)\n"
19483 +- "12: movl 40(%4), %%eax\n"
19484 +- "71: movl 44(%4), %%edx\n"
19485 ++ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
19486 ++ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
19487 + " movnti %%eax, 40(%3)\n"
19488 + " movnti %%edx, 44(%3)\n"
19489 +- "13: movl 48(%4), %%eax\n"
19490 +- "81: movl 52(%4), %%edx\n"
19491 ++ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
19492 ++ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
19493 + " movnti %%eax, 48(%3)\n"
19494 + " movnti %%edx, 52(%3)\n"
19495 +- "14: movl 56(%4), %%eax\n"
19496 +- "91: movl 60(%4), %%edx\n"
19497 ++ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
19498 ++ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
19499 + " movnti %%eax, 56(%3)\n"
19500 + " movnti %%edx, 60(%3)\n"
19501 + " addl $-64, %0\n"
19502 +@@ -487,9 +600,9 @@ static unsigned long __copy_user_zeroing
19503 + " shrl $2, %0\n"
19504 + " andl $3, %%eax\n"
19505 + " cld\n"
19506 +- "6: rep; movsl\n"
19507 ++ "6: rep; "__copyuser_seg" movsl\n"
19508 + " movl %%eax,%0\n"
19509 +- "7: rep; movsb\n"
19510 ++ "7: rep; "__copyuser_seg" movsb\n"
19511 + "8:\n"
19512 + ".section .fixup,\"ax\"\n"
19513 + "9: lea 0(%%eax,%0,4),%0\n"
19514 +@@ -537,41 +650,41 @@ static unsigned long __copy_user_intel_n
19515 +
19516 + __asm__ __volatile__(
19517 + " .align 2,0x90\n"
19518 +- "0: movl 32(%4), %%eax\n"
19519 ++ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
19520 + " cmpl $67, %0\n"
19521 + " jbe 2f\n"
19522 +- "1: movl 64(%4), %%eax\n"
19523 ++ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
19524 + " .align 2,0x90\n"
19525 +- "2: movl 0(%4), %%eax\n"
19526 +- "21: movl 4(%4), %%edx\n"
19527 ++ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
19528 ++ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
19529 + " movnti %%eax, 0(%3)\n"
19530 + " movnti %%edx, 4(%3)\n"
19531 +- "3: movl 8(%4), %%eax\n"
19532 +- "31: movl 12(%4),%%edx\n"
19533 ++ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
19534 ++ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
19535 + " movnti %%eax, 8(%3)\n"
19536 + " movnti %%edx, 12(%3)\n"
19537 +- "4: movl 16(%4), %%eax\n"
19538 +- "41: movl 20(%4), %%edx\n"
19539 ++ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
19540 ++ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
19541 + " movnti %%eax, 16(%3)\n"
19542 + " movnti %%edx, 20(%3)\n"
19543 +- "10: movl 24(%4), %%eax\n"
19544 +- "51: movl 28(%4), %%edx\n"
19545 ++ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
19546 ++ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
19547 + " movnti %%eax, 24(%3)\n"
19548 + " movnti %%edx, 28(%3)\n"
19549 +- "11: movl 32(%4), %%eax\n"
19550 +- "61: movl 36(%4), %%edx\n"
19551 ++ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
19552 ++ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
19553 + " movnti %%eax, 32(%3)\n"
19554 + " movnti %%edx, 36(%3)\n"
19555 +- "12: movl 40(%4), %%eax\n"
19556 +- "71: movl 44(%4), %%edx\n"
19557 ++ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
19558 ++ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
19559 + " movnti %%eax, 40(%3)\n"
19560 + " movnti %%edx, 44(%3)\n"
19561 +- "13: movl 48(%4), %%eax\n"
19562 +- "81: movl 52(%4), %%edx\n"
19563 ++ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
19564 ++ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
19565 + " movnti %%eax, 48(%3)\n"
19566 + " movnti %%edx, 52(%3)\n"
19567 +- "14: movl 56(%4), %%eax\n"
19568 +- "91: movl 60(%4), %%edx\n"
19569 ++ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
19570 ++ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
19571 + " movnti %%eax, 56(%3)\n"
19572 + " movnti %%edx, 60(%3)\n"
19573 + " addl $-64, %0\n"
19574 +@@ -584,9 +697,9 @@ static unsigned long __copy_user_intel_n
19575 + " shrl $2, %0\n"
19576 + " andl $3, %%eax\n"
19577 + " cld\n"
19578 +- "6: rep; movsl\n"
19579 ++ "6: rep; "__copyuser_seg" movsl\n"
19580 + " movl %%eax,%0\n"
19581 +- "7: rep; movsb\n"
19582 ++ "7: rep; "__copyuser_seg" movsb\n"
19583 + "8:\n"
19584 + ".section .fixup,\"ax\"\n"
19585 + "9: lea 0(%%eax,%0,4),%0\n"
19586 +@@ -629,32 +742,36 @@ static unsigned long __copy_user_intel_n
19587 + */
19588 + unsigned long __copy_user_zeroing_intel(void *to, const void __user *from,
19589 + unsigned long size);
19590 +-unsigned long __copy_user_intel(void __user *to, const void *from,
19591 ++unsigned long __generic_copy_to_user_intel(void __user *to, const void *from,
19592 ++ unsigned long size);
19593 ++unsigned long __generic_copy_from_user_intel(void *to, const void __user *from,
19594 + unsigned long size);
19595 + unsigned long __copy_user_zeroing_intel_nocache(void *to,
19596 + const void __user *from, unsigned long size);
19597 + #endif /* CONFIG_X86_INTEL_USERCOPY */
19598 +
19599 + /* Generic arbitrary sized copy. */
19600 +-#define __copy_user(to, from, size) \
19601 ++#define __copy_user(to, from, size, prefix, set, restore) \
19602 + do { \
19603 + int __d0, __d1, __d2; \
19604 + __asm__ __volatile__( \
19605 ++ set \
19606 + " cmp $7,%0\n" \
19607 + " jbe 1f\n" \
19608 + " movl %1,%0\n" \
19609 + " negl %0\n" \
19610 + " andl $7,%0\n" \
19611 + " subl %0,%3\n" \
19612 +- "4: rep; movsb\n" \
19613 ++ "4: rep; "prefix"movsb\n" \
19614 + " movl %3,%0\n" \
19615 + " shrl $2,%0\n" \
19616 + " andl $3,%3\n" \
19617 + " .align 2,0x90\n" \
19618 +- "0: rep; movsl\n" \
19619 ++ "0: rep; "prefix"movsl\n" \
19620 + " movl %3,%0\n" \
19621 +- "1: rep; movsb\n" \
19622 ++ "1: rep; "prefix"movsb\n" \
19623 + "2:\n" \
19624 ++ restore \
19625 + ".section .fixup,\"ax\"\n" \
19626 + "5: addl %3,%0\n" \
19627 + " jmp 2b\n" \
19628 +@@ -682,14 +799,14 @@ do { \
19629 + " negl %0\n" \
19630 + " andl $7,%0\n" \
19631 + " subl %0,%3\n" \
19632 +- "4: rep; movsb\n" \
19633 ++ "4: rep; "__copyuser_seg"movsb\n" \
19634 + " movl %3,%0\n" \
19635 + " shrl $2,%0\n" \
19636 + " andl $3,%3\n" \
19637 + " .align 2,0x90\n" \
19638 +- "0: rep; movsl\n" \
19639 ++ "0: rep; "__copyuser_seg"movsl\n" \
19640 + " movl %3,%0\n" \
19641 +- "1: rep; movsb\n" \
19642 ++ "1: rep; "__copyuser_seg"movsb\n" \
19643 + "2:\n" \
19644 + ".section .fixup,\"ax\"\n" \
19645 + "5: addl %3,%0\n" \
19646 +@@ -775,9 +892,9 @@ survive:
19647 + }
19648 + #endif
19649 + if (movsl_is_ok(to, from, n))
19650 +- __copy_user(to, from, n);
19651 ++ __copy_user(to, from, n, "", __COPYUSER_SET_ES, __COPYUSER_RESTORE_ES);
19652 + else
19653 +- n = __copy_user_intel(to, from, n);
19654 ++ n = __generic_copy_to_user_intel(to, from, n);
19655 + return n;
19656 + }
19657 + EXPORT_SYMBOL(__copy_to_user_ll);
19658 +@@ -797,10 +914,9 @@ unsigned long __copy_from_user_ll_nozero
19659 + unsigned long n)
19660 + {
19661 + if (movsl_is_ok(to, from, n))
19662 +- __copy_user(to, from, n);
19663 ++ __copy_user(to, from, n, __copyuser_seg, "", "");
19664 + else
19665 +- n = __copy_user_intel((void __user *)to,
19666 +- (const void *)from, n);
19667 ++ n = __generic_copy_from_user_intel(to, from, n);
19668 + return n;
19669 + }
19670 + EXPORT_SYMBOL(__copy_from_user_ll_nozero);
19671 +@@ -827,65 +943,50 @@ unsigned long __copy_from_user_ll_nocach
19672 + if (n > 64 && cpu_has_xmm2)
19673 + n = __copy_user_intel_nocache(to, from, n);
19674 + else
19675 +- __copy_user(to, from, n);
19676 ++ __copy_user(to, from, n, __copyuser_seg, "", "");
19677 + #else
19678 +- __copy_user(to, from, n);
19679 ++ __copy_user(to, from, n, __copyuser_seg, "", "");
19680 + #endif
19681 + return n;
19682 + }
19683 + EXPORT_SYMBOL(__copy_from_user_ll_nocache_nozero);
19684 +
19685 +-/**
19686 +- * copy_to_user: - Copy a block of data into user space.
19687 +- * @to: Destination address, in user space.
19688 +- * @from: Source address, in kernel space.
19689 +- * @n: Number of bytes to copy.
19690 +- *
19691 +- * Context: User context only. This function may sleep.
19692 +- *
19693 +- * Copy data from kernel space to user space.
19694 +- *
19695 +- * Returns number of bytes that could not be copied.
19696 +- * On success, this will be zero.
19697 +- */
19698 +-unsigned long
19699 +-copy_to_user(void __user *to, const void *from, unsigned long n)
19700 ++void copy_from_user_overflow(void)
19701 + {
19702 +- if (access_ok(VERIFY_WRITE, to, n))
19703 +- n = __copy_to_user(to, from, n);
19704 +- return n;
19705 ++ WARN(1, "Buffer overflow detected!\n");
19706 + }
19707 +-EXPORT_SYMBOL(copy_to_user);
19708 ++EXPORT_SYMBOL(copy_from_user_overflow);
19709 +
19710 +-/**
19711 +- * copy_from_user: - Copy a block of data from user space.
19712 +- * @to: Destination address, in kernel space.
19713 +- * @from: Source address, in user space.
19714 +- * @n: Number of bytes to copy.
19715 +- *
19716 +- * Context: User context only. This function may sleep.
19717 +- *
19718 +- * Copy data from user space to kernel space.
19719 +- *
19720 +- * Returns number of bytes that could not be copied.
19721 +- * On success, this will be zero.
19722 +- *
19723 +- * If some data could not be copied, this function will pad the copied
19724 +- * data to the requested size using zero bytes.
19725 +- */
19726 +-unsigned long
19727 +-_copy_from_user(void *to, const void __user *from, unsigned long n)
19728 ++void copy_to_user_overflow(void)
19729 + {
19730 +- if (access_ok(VERIFY_READ, from, n))
19731 +- n = __copy_from_user(to, from, n);
19732 +- else
19733 +- memset(to, 0, n);
19734 +- return n;
19735 ++ WARN(1, "Buffer overflow detected!\n");
19736 + }
19737 +-EXPORT_SYMBOL(_copy_from_user);
19738 ++EXPORT_SYMBOL(copy_to_user_overflow);
19739 +
19740 +-void copy_from_user_overflow(void)
19741 ++#ifdef CONFIG_PAX_MEMORY_UDEREF
19742 ++void __set_fs(mm_segment_t x)
19743 + {
19744 +- WARN(1, "Buffer overflow detected!\n");
19745 ++ switch (x.seg) {
19746 ++ case 0:
19747 ++ loadsegment(gs, 0);
19748 ++ break;
19749 ++ case TASK_SIZE_MAX:
19750 ++ loadsegment(gs, __USER_DS);
19751 ++ break;
19752 ++ case -1UL:
19753 ++ loadsegment(gs, __KERNEL_DS);
19754 ++ break;
19755 ++ default:
19756 ++ BUG();
19757 ++ }
19758 ++ return;
19759 + }
19760 +-EXPORT_SYMBOL(copy_from_user_overflow);
19761 ++EXPORT_SYMBOL(__set_fs);
19762 ++
19763 ++void set_fs(mm_segment_t x)
19764 ++{
19765 ++ current_thread_info()->addr_limit = x;
19766 ++ __set_fs(x);
19767 ++}
19768 ++EXPORT_SYMBOL(set_fs);
19769 ++#endif
19770 +diff -urNp linux-3.1.1/arch/x86/lib/usercopy_64.c linux-3.1.1/arch/x86/lib/usercopy_64.c
19771 +--- linux-3.1.1/arch/x86/lib/usercopy_64.c 2011-11-11 15:19:27.000000000 -0500
19772 ++++ linux-3.1.1/arch/x86/lib/usercopy_64.c 2011-11-16 18:39:07.000000000 -0500
19773 +@@ -42,6 +42,12 @@ long
19774 + __strncpy_from_user(char *dst, const char __user *src, long count)
19775 + {
19776 + long res;
19777 ++
19778 ++#ifdef CONFIG_PAX_MEMORY_UDEREF
19779 ++ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
19780 ++ src += PAX_USER_SHADOW_BASE;
19781 ++#endif
19782 ++
19783 + __do_strncpy_from_user(dst, src, count, res);
19784 + return res;
19785 + }
19786 +@@ -65,6 +71,12 @@ unsigned long __clear_user(void __user *
19787 + {
19788 + long __d0;
19789 + might_fault();
19790 ++
19791 ++#ifdef CONFIG_PAX_MEMORY_UDEREF
19792 ++ if ((unsigned long)addr < PAX_USER_SHADOW_BASE)
19793 ++ addr += PAX_USER_SHADOW_BASE;
19794 ++#endif
19795 ++
19796 + /* no memory constraint because it doesn't change any memory gcc knows
19797 + about */
19798 + asm volatile(
19799 +@@ -151,10 +163,18 @@ EXPORT_SYMBOL(strlen_user);
19800 +
19801 + unsigned long copy_in_user(void __user *to, const void __user *from, unsigned len)
19802 + {
19803 +- if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
19804 +- return copy_user_generic((__force void *)to, (__force void *)from, len);
19805 +- }
19806 +- return len;
19807 ++ if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
19808 ++
19809 ++#ifdef CONFIG_PAX_MEMORY_UDEREF
19810 ++ if ((unsigned long)to < PAX_USER_SHADOW_BASE)
19811 ++ to += PAX_USER_SHADOW_BASE;
19812 ++ if ((unsigned long)from < PAX_USER_SHADOW_BASE)
19813 ++ from += PAX_USER_SHADOW_BASE;
19814 ++#endif
19815 ++
19816 ++ return copy_user_generic((void __force_kernel *)to, (void __force_kernel *)from, len);
19817 ++ }
19818 ++ return len;
19819 + }
19820 + EXPORT_SYMBOL(copy_in_user);
19821 +
19822 +@@ -164,7 +184,7 @@ EXPORT_SYMBOL(copy_in_user);
19823 + * it is not necessary to optimize tail handling.
19824 + */
19825 + unsigned long
19826 +-copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest)
19827 ++copy_user_handle_tail(char __user *to, char __user *from, unsigned len, unsigned zerorest)
19828 + {
19829 + char c;
19830 + unsigned zero_len;
19831 +diff -urNp linux-3.1.1/arch/x86/Makefile linux-3.1.1/arch/x86/Makefile
19832 +--- linux-3.1.1/arch/x86/Makefile 2011-11-11 15:19:27.000000000 -0500
19833 ++++ linux-3.1.1/arch/x86/Makefile 2011-11-17 18:30:30.000000000 -0500
19834 +@@ -46,6 +46,7 @@ else
19835 + UTS_MACHINE := x86_64
19836 + CHECKFLAGS += -D__x86_64__ -m64
19837 +
19838 ++ biarch := $(call cc-option,-m64)
19839 + KBUILD_AFLAGS += -m64
19840 + KBUILD_CFLAGS += -m64
19841 +
19842 +@@ -195,3 +196,12 @@ define archhelp
19843 + echo ' FDARGS="..." arguments for the booted kernel'
19844 + echo ' FDINITRD=file initrd for the booted kernel'
19845 + endef
19846 ++
19847 ++define OLD_LD
19848 ++
19849 ++*** ${VERSION}.${PATCHLEVEL} PaX kernels no longer build correctly with old versions of binutils.
19850 ++*** Please upgrade your binutils to 2.18 or newer
19851 ++endef
19852 ++
19853 ++archprepare:
19854 ++ $(if $(LDFLAGS_BUILD_ID),,$(error $(OLD_LD)))
19855 +diff -urNp linux-3.1.1/arch/x86/mm/extable.c linux-3.1.1/arch/x86/mm/extable.c
19856 +--- linux-3.1.1/arch/x86/mm/extable.c 2011-11-11 15:19:27.000000000 -0500
19857 ++++ linux-3.1.1/arch/x86/mm/extable.c 2011-11-16 18:39:07.000000000 -0500
19858 +@@ -8,7 +8,7 @@ int fixup_exception(struct pt_regs *regs
19859 + const struct exception_table_entry *fixup;
19860 +
19861 + #ifdef CONFIG_PNPBIOS
19862 +- if (unlikely(SEGMENT_IS_PNP_CODE(regs->cs))) {
19863 ++ if (unlikely(!v8086_mode(regs) && SEGMENT_IS_PNP_CODE(regs->cs))) {
19864 + extern u32 pnp_bios_fault_eip, pnp_bios_fault_esp;
19865 + extern u32 pnp_bios_is_utter_crap;
19866 + pnp_bios_is_utter_crap = 1;
19867 +diff -urNp linux-3.1.1/arch/x86/mm/fault.c linux-3.1.1/arch/x86/mm/fault.c
19868 +--- linux-3.1.1/arch/x86/mm/fault.c 2011-11-11 15:19:27.000000000 -0500
19869 ++++ linux-3.1.1/arch/x86/mm/fault.c 2011-11-16 20:43:50.000000000 -0500
19870 +@@ -13,11 +13,18 @@
19871 + #include <linux/perf_event.h> /* perf_sw_event */
19872 + #include <linux/hugetlb.h> /* hstate_index_to_shift */
19873 + #include <linux/prefetch.h> /* prefetchw */
19874 ++#include <linux/unistd.h>
19875 ++#include <linux/compiler.h>
19876 +
19877 + #include <asm/traps.h> /* dotraplinkage, ... */
19878 + #include <asm/pgalloc.h> /* pgd_*(), ... */
19879 + #include <asm/kmemcheck.h> /* kmemcheck_*(), ... */
19880 + #include <asm/vsyscall.h>
19881 ++#include <asm/tlbflush.h>
19882 ++
19883 ++#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19884 ++#include <asm/stacktrace.h>
19885 ++#endif
19886 +
19887 + /*
19888 + * Page fault error code bits:
19889 +@@ -55,7 +62,7 @@ static inline int __kprobes notify_page_
19890 + int ret = 0;
19891 +
19892 + /* kprobe_running() needs smp_processor_id() */
19893 +- if (kprobes_built_in() && !user_mode_vm(regs)) {
19894 ++ if (kprobes_built_in() && !user_mode(regs)) {
19895 + preempt_disable();
19896 + if (kprobe_running() && kprobe_fault_handler(regs, 14))
19897 + ret = 1;
19898 +@@ -116,7 +123,10 @@ check_prefetch_opcode(struct pt_regs *re
19899 + return !instr_lo || (instr_lo>>1) == 1;
19900 + case 0x00:
19901 + /* Prefetch instruction is 0x0F0D or 0x0F18 */
19902 +- if (probe_kernel_address(instr, opcode))
19903 ++ if (user_mode(regs)) {
19904 ++ if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
19905 ++ return 0;
19906 ++ } else if (probe_kernel_address(instr, opcode))
19907 + return 0;
19908 +
19909 + *prefetch = (instr_lo == 0xF) &&
19910 +@@ -150,7 +160,10 @@ is_prefetch(struct pt_regs *regs, unsign
19911 + while (instr < max_instr) {
19912 + unsigned char opcode;
19913 +
19914 +- if (probe_kernel_address(instr, opcode))
19915 ++ if (user_mode(regs)) {
19916 ++ if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
19917 ++ break;
19918 ++ } else if (probe_kernel_address(instr, opcode))
19919 + break;
19920 +
19921 + instr++;
19922 +@@ -181,6 +194,34 @@ force_sig_info_fault(int si_signo, int s
19923 + force_sig_info(si_signo, &info, tsk);
19924 + }
19925 +
19926 ++#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
19927 ++static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address);
19928 ++#endif
19929 ++
19930 ++#ifdef CONFIG_PAX_EMUTRAMP
19931 ++static int pax_handle_fetch_fault(struct pt_regs *regs);
19932 ++#endif
19933 ++
19934 ++#ifdef CONFIG_PAX_PAGEEXEC
19935 ++static inline pmd_t * pax_get_pmd(struct mm_struct *mm, unsigned long address)
19936 ++{
19937 ++ pgd_t *pgd;
19938 ++ pud_t *pud;
19939 ++ pmd_t *pmd;
19940 ++
19941 ++ pgd = pgd_offset(mm, address);
19942 ++ if (!pgd_present(*pgd))
19943 ++ return NULL;
19944 ++ pud = pud_offset(pgd, address);
19945 ++ if (!pud_present(*pud))
19946 ++ return NULL;
19947 ++ pmd = pmd_offset(pud, address);
19948 ++ if (!pmd_present(*pmd))
19949 ++ return NULL;
19950 ++ return pmd;
19951 ++}
19952 ++#endif
19953 ++
19954 + DEFINE_SPINLOCK(pgd_lock);
19955 + LIST_HEAD(pgd_list);
19956 +
19957 +@@ -231,10 +272,22 @@ void vmalloc_sync_all(void)
19958 + for (address = VMALLOC_START & PMD_MASK;
19959 + address >= TASK_SIZE && address < FIXADDR_TOP;
19960 + address += PMD_SIZE) {
19961 ++
19962 ++#ifdef CONFIG_PAX_PER_CPU_PGD
19963 ++ unsigned long cpu;
19964 ++#else
19965 + struct page *page;
19966 ++#endif
19967 +
19968 + spin_lock(&pgd_lock);
19969 ++
19970 ++#ifdef CONFIG_PAX_PER_CPU_PGD
19971 ++ for (cpu = 0; cpu < NR_CPUS; ++cpu) {
19972 ++ pgd_t *pgd = get_cpu_pgd(cpu);
19973 ++ pmd_t *ret;
19974 ++#else
19975 + list_for_each_entry(page, &pgd_list, lru) {
19976 ++ pgd_t *pgd = page_address(page);
19977 + spinlock_t *pgt_lock;
19978 + pmd_t *ret;
19979 +
19980 +@@ -242,8 +295,13 @@ void vmalloc_sync_all(void)
19981 + pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
19982 +
19983 + spin_lock(pgt_lock);
19984 +- ret = vmalloc_sync_one(page_address(page), address);
19985 ++#endif
19986 ++
19987 ++ ret = vmalloc_sync_one(pgd, address);
19988 ++
19989 ++#ifndef CONFIG_PAX_PER_CPU_PGD
19990 + spin_unlock(pgt_lock);
19991 ++#endif
19992 +
19993 + if (!ret)
19994 + break;
19995 +@@ -277,6 +335,11 @@ static noinline __kprobes int vmalloc_fa
19996 + * an interrupt in the middle of a task switch..
19997 + */
19998 + pgd_paddr = read_cr3();
19999 ++
20000 ++#ifdef CONFIG_PAX_PER_CPU_PGD
20001 ++ BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (pgd_paddr & PHYSICAL_PAGE_MASK));
20002 ++#endif
20003 ++
20004 + pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
20005 + if (!pmd_k)
20006 + return -1;
20007 +@@ -372,7 +435,14 @@ static noinline __kprobes int vmalloc_fa
20008 + * happen within a race in page table update. In the later
20009 + * case just flush:
20010 + */
20011 ++
20012 ++#ifdef CONFIG_PAX_PER_CPU_PGD
20013 ++ BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (read_cr3() & PHYSICAL_PAGE_MASK));
20014 ++ pgd = pgd_offset_cpu(smp_processor_id(), address);
20015 ++#else
20016 + pgd = pgd_offset(current->active_mm, address);
20017 ++#endif
20018 ++
20019 + pgd_ref = pgd_offset_k(address);
20020 + if (pgd_none(*pgd_ref))
20021 + return -1;
20022 +@@ -534,7 +604,7 @@ static int is_errata93(struct pt_regs *r
20023 + static int is_errata100(struct pt_regs *regs, unsigned long address)
20024 + {
20025 + #ifdef CONFIG_X86_64
20026 +- if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && (address >> 32))
20027 ++ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)) && (address >> 32))
20028 + return 1;
20029 + #endif
20030 + return 0;
20031 +@@ -561,7 +631,7 @@ static int is_f00f_bug(struct pt_regs *r
20032 + }
20033 +
20034 + static const char nx_warning[] = KERN_CRIT
20035 +-"kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n";
20036 ++"kernel tried to execute NX-protected page - exploit attempt? (uid: %d, task: %s, pid: %d)\n";
20037 +
20038 + static void
20039 + show_fault_oops(struct pt_regs *regs, unsigned long error_code,
20040 +@@ -570,14 +640,25 @@ show_fault_oops(struct pt_regs *regs, un
20041 + if (!oops_may_print())
20042 + return;
20043 +
20044 +- if (error_code & PF_INSTR) {
20045 ++ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR)) {
20046 + unsigned int level;
20047 +
20048 + pte_t *pte = lookup_address(address, &level);
20049 +
20050 + if (pte && pte_present(*pte) && !pte_exec(*pte))
20051 +- printk(nx_warning, current_uid());
20052 ++ printk(nx_warning, current_uid(), current->comm, task_pid_nr(current));
20053 ++ }
20054 ++
20055 ++#ifdef CONFIG_PAX_KERNEXEC
20056 ++ if (init_mm.start_code <= address && address < init_mm.end_code) {
20057 ++ if (current->signal->curr_ip)
20058 ++ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
20059 ++ &current->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid());
20060 ++ else
20061 ++ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
20062 ++ current->comm, task_pid_nr(current), current_uid(), current_euid());
20063 + }
20064 ++#endif
20065 +
20066 + printk(KERN_ALERT "BUG: unable to handle kernel ");
20067 + if (address < PAGE_SIZE)
20068 +@@ -733,6 +814,21 @@ __bad_area_nosemaphore(struct pt_regs *r
20069 + }
20070 + #endif
20071 +
20072 ++#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
20073 ++ if (pax_is_fetch_fault(regs, error_code, address)) {
20074 ++
20075 ++#ifdef CONFIG_PAX_EMUTRAMP
20076 ++ switch (pax_handle_fetch_fault(regs)) {
20077 ++ case 2:
20078 ++ return;
20079 ++ }
20080 ++#endif
20081 ++
20082 ++ pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
20083 ++ do_group_exit(SIGKILL);
20084 ++ }
20085 ++#endif
20086 ++
20087 + if (unlikely(show_unhandled_signals))
20088 + show_signal_msg(regs, error_code, address, tsk);
20089 +
20090 +@@ -829,7 +925,7 @@ do_sigbus(struct pt_regs *regs, unsigned
20091 + if (fault & (VM_FAULT_HWPOISON|VM_FAULT_HWPOISON_LARGE)) {
20092 + printk(KERN_ERR
20093 + "MCE: Killing %s:%d due to hardware memory corruption fault at %lx\n",
20094 +- tsk->comm, tsk->pid, address);
20095 ++ tsk->comm, task_pid_nr(tsk), address);
20096 + code = BUS_MCEERR_AR;
20097 + }
20098 + #endif
20099 +@@ -884,6 +980,99 @@ static int spurious_fault_check(unsigned
20100 + return 1;
20101 + }
20102 +
20103 ++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
20104 ++static int pax_handle_pageexec_fault(struct pt_regs *regs, struct mm_struct *mm, unsigned long address, unsigned long error_code)
20105 ++{
20106 ++ pte_t *pte;
20107 ++ pmd_t *pmd;
20108 ++ spinlock_t *ptl;
20109 ++ unsigned char pte_mask;
20110 ++
20111 ++ if ((__supported_pte_mask & _PAGE_NX) || (error_code & (PF_PROT|PF_USER)) != (PF_PROT|PF_USER) || v8086_mode(regs) ||
20112 ++ !(mm->pax_flags & MF_PAX_PAGEEXEC))
20113 ++ return 0;
20114 ++
20115 ++ /* PaX: it's our fault, let's handle it if we can */
20116 ++
20117 ++ /* PaX: take a look at read faults before acquiring any locks */
20118 ++ if (unlikely(!(error_code & PF_WRITE) && (regs->ip == address))) {
20119 ++ /* instruction fetch attempt from a protected page in user mode */
20120 ++ up_read(&mm->mmap_sem);
20121 ++
20122 ++#ifdef CONFIG_PAX_EMUTRAMP
20123 ++ switch (pax_handle_fetch_fault(regs)) {
20124 ++ case 2:
20125 ++ return 1;
20126 ++ }
20127 ++#endif
20128 ++
20129 ++ pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
20130 ++ do_group_exit(SIGKILL);
20131 ++ }
20132 ++
20133 ++ pmd = pax_get_pmd(mm, address);
20134 ++ if (unlikely(!pmd))
20135 ++ return 0;
20136 ++
20137 ++ pte = pte_offset_map_lock(mm, pmd, address, &ptl);
20138 ++ if (unlikely(!(pte_val(*pte) & _PAGE_PRESENT) || pte_user(*pte))) {
20139 ++ pte_unmap_unlock(pte, ptl);
20140 ++ return 0;
20141 ++ }
20142 ++
20143 ++ if (unlikely((error_code & PF_WRITE) && !pte_write(*pte))) {
20144 ++ /* write attempt to a protected page in user mode */
20145 ++ pte_unmap_unlock(pte, ptl);
20146 ++ return 0;
20147 ++ }
20148 ++
20149 ++#ifdef CONFIG_SMP
20150 ++ if (likely(address > get_limit(regs->cs) && cpu_isset(smp_processor_id(), mm->context.cpu_user_cs_mask)))
20151 ++#else
20152 ++ if (likely(address > get_limit(regs->cs)))
20153 ++#endif
20154 ++ {
20155 ++ set_pte(pte, pte_mkread(*pte));
20156 ++ __flush_tlb_one(address);
20157 ++ pte_unmap_unlock(pte, ptl);
20158 ++ up_read(&mm->mmap_sem);
20159 ++ return 1;
20160 ++ }
20161 ++
20162 ++ pte_mask = _PAGE_ACCESSED | _PAGE_USER | ((error_code & PF_WRITE) << (_PAGE_BIT_DIRTY-1));
20163 ++
20164 ++ /*
20165 ++ * PaX: fill DTLB with user rights and retry
20166 ++ */
20167 ++ __asm__ __volatile__ (
20168 ++ "orb %2,(%1)\n"
20169 ++#if defined(CONFIG_M586) || defined(CONFIG_M586TSC)
20170 ++/*
20171 ++ * PaX: let this uncommented 'invlpg' remind us on the behaviour of Intel's
20172 ++ * (and AMD's) TLBs. namely, they do not cache PTEs that would raise *any*
20173 ++ * page fault when examined during a TLB load attempt. this is true not only
20174 ++ * for PTEs holding a non-present entry but also present entries that will
20175 ++ * raise a page fault (such as those set up by PaX, or the copy-on-write
20176 ++ * mechanism). in effect it means that we do *not* need to flush the TLBs
20177 ++ * for our target pages since their PTEs are simply not in the TLBs at all.
20178 ++
20179 ++ * the best thing in omitting it is that we gain around 15-20% speed in the
20180 ++ * fast path of the page fault handler and can get rid of tracing since we
20181 ++ * can no longer flush unintended entries.
20182 ++ */
20183 ++ "invlpg (%0)\n"
20184 ++#endif
20185 ++ __copyuser_seg"testb $0,(%0)\n"
20186 ++ "xorb %3,(%1)\n"
20187 ++ :
20188 ++ : "r" (address), "r" (pte), "q" (pte_mask), "i" (_PAGE_USER)
20189 ++ : "memory", "cc");
20190 ++ pte_unmap_unlock(pte, ptl);
20191 ++ up_read(&mm->mmap_sem);
20192 ++ return 1;
20193 ++}
20194 ++#endif
20195 ++
20196 + /*
20197 + * Handle a spurious fault caused by a stale TLB entry.
20198 + *
20199 +@@ -956,6 +1145,9 @@ int show_unhandled_signals = 1;
20200 + static inline int
20201 + access_error(unsigned long error_code, struct vm_area_struct *vma)
20202 + {
20203 ++ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR) && !(vma->vm_flags & VM_EXEC))
20204 ++ return 1;
20205 ++
20206 + if (error_code & PF_WRITE) {
20207 + /* write, present and write, not present: */
20208 + if (unlikely(!(vma->vm_flags & VM_WRITE)))
20209 +@@ -989,19 +1181,33 @@ do_page_fault(struct pt_regs *regs, unsi
20210 + {
20211 + struct vm_area_struct *vma;
20212 + struct task_struct *tsk;
20213 +- unsigned long address;
20214 + struct mm_struct *mm;
20215 + int fault;
20216 + int write = error_code & PF_WRITE;
20217 + unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE |
20218 + (write ? FAULT_FLAG_WRITE : 0);
20219 +
20220 ++ /* Get the faulting address: */
20221 ++ unsigned long address = read_cr2();
20222 ++
20223 ++#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
20224 ++ if (!user_mode(regs) && address < 2 * PAX_USER_SHADOW_BASE) {
20225 ++ if (!search_exception_tables(regs->ip)) {
20226 ++ bad_area_nosemaphore(regs, error_code, address);
20227 ++ return;
20228 ++ }
20229 ++ if (address < PAX_USER_SHADOW_BASE) {
20230 ++ printk(KERN_ERR "PAX: please report this to pageexec@××××××××.hu\n");
20231 ++ printk(KERN_ERR "PAX: faulting IP: %pS\n", (void *)regs->ip);
20232 ++ show_trace_log_lvl(NULL, NULL, (void *)regs->sp, regs->bp, KERN_ERR);
20233 ++ } else
20234 ++ address -= PAX_USER_SHADOW_BASE;
20235 ++ }
20236 ++#endif
20237 ++
20238 + tsk = current;
20239 + mm = tsk->mm;
20240 +
20241 +- /* Get the faulting address: */
20242 +- address = read_cr2();
20243 +-
20244 + /*
20245 + * Detect and handle instructions that would cause a page fault for
20246 + * both a tracked kernel page and a userspace page.
20247 +@@ -1061,7 +1267,7 @@ do_page_fault(struct pt_regs *regs, unsi
20248 + * User-mode registers count as a user access even for any
20249 + * potential system fault or CPU buglet:
20250 + */
20251 +- if (user_mode_vm(regs)) {
20252 ++ if (user_mode(regs)) {
20253 + local_irq_enable();
20254 + error_code |= PF_USER;
20255 + } else {
20256 +@@ -1116,6 +1322,11 @@ retry:
20257 + might_sleep();
20258 + }
20259 +
20260 ++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
20261 ++ if (pax_handle_pageexec_fault(regs, mm, address, error_code))
20262 ++ return;
20263 ++#endif
20264 ++
20265 + vma = find_vma(mm, address);
20266 + if (unlikely(!vma)) {
20267 + bad_area(regs, error_code, address);
20268 +@@ -1127,18 +1338,24 @@ retry:
20269 + bad_area(regs, error_code, address);
20270 + return;
20271 + }
20272 +- if (error_code & PF_USER) {
20273 +- /*
20274 +- * Accessing the stack below %sp is always a bug.
20275 +- * The large cushion allows instructions like enter
20276 +- * and pusha to work. ("enter $65535, $31" pushes
20277 +- * 32 pointers and then decrements %sp by 65535.)
20278 +- */
20279 +- if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < regs->sp)) {
20280 +- bad_area(regs, error_code, address);
20281 +- return;
20282 +- }
20283 ++ /*
20284 ++ * Accessing the stack below %sp is always a bug.
20285 ++ * The large cushion allows instructions like enter
20286 ++ * and pusha to work. ("enter $65535, $31" pushes
20287 ++ * 32 pointers and then decrements %sp by 65535.)
20288 ++ */
20289 ++ if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < task_pt_regs(tsk)->sp)) {
20290 ++ bad_area(regs, error_code, address);
20291 ++ return;
20292 + }
20293 ++
20294 ++#ifdef CONFIG_PAX_SEGMEXEC
20295 ++ if (unlikely((mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end - SEGMEXEC_TASK_SIZE - 1 < address - SEGMEXEC_TASK_SIZE - 1)) {
20296 ++ bad_area(regs, error_code, address);
20297 ++ return;
20298 ++ }
20299 ++#endif
20300 ++
20301 + if (unlikely(expand_stack(vma, address))) {
20302 + bad_area(regs, error_code, address);
20303 + return;
20304 +@@ -1193,3 +1410,240 @@ good_area:
20305 +
20306 + up_read(&mm->mmap_sem);
20307 + }
20308 ++
20309 ++#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
20310 ++static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address)
20311 ++{
20312 ++ struct mm_struct *mm = current->mm;
20313 ++ unsigned long ip = regs->ip;
20314 ++
20315 ++ if (v8086_mode(regs))
20316 ++ ip = ((regs->cs & 0xffff) << 4) + (ip & 0xffff);
20317 ++
20318 ++#ifdef CONFIG_PAX_PAGEEXEC
20319 ++ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
20320 ++ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR))
20321 ++ return true;
20322 ++ if (!(error_code & (PF_PROT | PF_WRITE)) && ip == address)
20323 ++ return true;
20324 ++ return false;
20325 ++ }
20326 ++#endif
20327 ++
20328 ++#ifdef CONFIG_PAX_SEGMEXEC
20329 ++ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
20330 ++ if (!(error_code & (PF_PROT | PF_WRITE)) && (ip + SEGMEXEC_TASK_SIZE == address))
20331 ++ return true;
20332 ++ return false;
20333 ++ }
20334 ++#endif
20335 ++
20336 ++ return false;
20337 ++}
20338 ++#endif
20339 ++
20340 ++#ifdef CONFIG_PAX_EMUTRAMP
20341 ++static int pax_handle_fetch_fault_32(struct pt_regs *regs)
20342 ++{
20343 ++ int err;
20344 ++
20345 ++ do { /* PaX: gcc trampoline emulation #1 */
20346 ++ unsigned char mov1, mov2;
20347 ++ unsigned short jmp;
20348 ++ unsigned int addr1, addr2;
20349 ++
20350 ++#ifdef CONFIG_X86_64
20351 ++ if ((regs->ip + 11) >> 32)
20352 ++ break;
20353 ++#endif
20354 ++
20355 ++ err = get_user(mov1, (unsigned char __user *)regs->ip);
20356 ++ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
20357 ++ err |= get_user(mov2, (unsigned char __user *)(regs->ip + 5));
20358 ++ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
20359 ++ err |= get_user(jmp, (unsigned short __user *)(regs->ip + 10));
20360 ++
20361 ++ if (err)
20362 ++ break;
20363 ++
20364 ++ if (mov1 == 0xB9 && mov2 == 0xB8 && jmp == 0xE0FF) {
20365 ++ regs->cx = addr1;
20366 ++ regs->ax = addr2;
20367 ++ regs->ip = addr2;
20368 ++ return 2;
20369 ++ }
20370 ++ } while (0);
20371 ++
20372 ++ do { /* PaX: gcc trampoline emulation #2 */
20373 ++ unsigned char mov, jmp;
20374 ++ unsigned int addr1, addr2;
20375 ++
20376 ++#ifdef CONFIG_X86_64
20377 ++ if ((regs->ip + 9) >> 32)
20378 ++ break;
20379 ++#endif
20380 ++
20381 ++ err = get_user(mov, (unsigned char __user *)regs->ip);
20382 ++ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
20383 ++ err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
20384 ++ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
20385 ++
20386 ++ if (err)
20387 ++ break;
20388 ++
20389 ++ if (mov == 0xB9 && jmp == 0xE9) {
20390 ++ regs->cx = addr1;
20391 ++ regs->ip = (unsigned int)(regs->ip + addr2 + 10);
20392 ++ return 2;
20393 ++ }
20394 ++ } while (0);
20395 ++
20396 ++ return 1; /* PaX in action */
20397 ++}
20398 ++
20399 ++#ifdef CONFIG_X86_64
20400 ++static int pax_handle_fetch_fault_64(struct pt_regs *regs)
20401 ++{
20402 ++ int err;
20403 ++
20404 ++ do { /* PaX: gcc trampoline emulation #1 */
20405 ++ unsigned short mov1, mov2, jmp1;
20406 ++ unsigned char jmp2;
20407 ++ unsigned int addr1;
20408 ++ unsigned long addr2;
20409 ++
20410 ++ err = get_user(mov1, (unsigned short __user *)regs->ip);
20411 ++ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 2));
20412 ++ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 6));
20413 ++ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 8));
20414 ++ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 16));
20415 ++ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 18));
20416 ++
20417 ++ if (err)
20418 ++ break;
20419 ++
20420 ++ if (mov1 == 0xBB41 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
20421 ++ regs->r11 = addr1;
20422 ++ regs->r10 = addr2;
20423 ++ regs->ip = addr1;
20424 ++ return 2;
20425 ++ }
20426 ++ } while (0);
20427 ++
20428 ++ do { /* PaX: gcc trampoline emulation #2 */
20429 ++ unsigned short mov1, mov2, jmp1;
20430 ++ unsigned char jmp2;
20431 ++ unsigned long addr1, addr2;
20432 ++
20433 ++ err = get_user(mov1, (unsigned short __user *)regs->ip);
20434 ++ err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
20435 ++ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
20436 ++ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
20437 ++ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 20));
20438 ++ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 22));
20439 ++
20440 ++ if (err)
20441 ++ break;
20442 ++
20443 ++ if (mov1 == 0xBB49 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
20444 ++ regs->r11 = addr1;
20445 ++ regs->r10 = addr2;
20446 ++ regs->ip = addr1;
20447 ++ return 2;
20448 ++ }
20449 ++ } while (0);
20450 ++
20451 ++ return 1; /* PaX in action */
20452 ++}
20453 ++#endif
20454 ++
20455 ++/*
20456 ++ * PaX: decide what to do with offenders (regs->ip = fault address)
20457 ++ *
20458 ++ * returns 1 when task should be killed
20459 ++ * 2 when gcc trampoline was detected
20460 ++ */
20461 ++static int pax_handle_fetch_fault(struct pt_regs *regs)
20462 ++{
20463 ++ if (v8086_mode(regs))
20464 ++ return 1;
20465 ++
20466 ++ if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
20467 ++ return 1;
20468 ++
20469 ++#ifdef CONFIG_X86_32
20470 ++ return pax_handle_fetch_fault_32(regs);
20471 ++#else
20472 ++ if (regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))
20473 ++ return pax_handle_fetch_fault_32(regs);
20474 ++ else
20475 ++ return pax_handle_fetch_fault_64(regs);
20476 ++#endif
20477 ++}
20478 ++#endif
20479 ++
20480 ++#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
20481 ++void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
20482 ++{
20483 ++ long i;
20484 ++
20485 ++ printk(KERN_ERR "PAX: bytes at PC: ");
20486 ++ for (i = 0; i < 20; i++) {
20487 ++ unsigned char c;
20488 ++ if (get_user(c, (unsigned char __force_user *)pc+i))
20489 ++ printk(KERN_CONT "?? ");
20490 ++ else
20491 ++ printk(KERN_CONT "%02x ", c);
20492 ++ }
20493 ++ printk("\n");
20494 ++
20495 ++ printk(KERN_ERR "PAX: bytes at SP-%lu: ", (unsigned long)sizeof(long));
20496 ++ for (i = -1; i < 80 / (long)sizeof(long); i++) {
20497 ++ unsigned long c;
20498 ++ if (get_user(c, (unsigned long __force_user *)sp+i)) {
20499 ++#ifdef CONFIG_X86_32
20500 ++ printk(KERN_CONT "???????? ");
20501 ++#else
20502 ++ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)))
20503 ++ printk(KERN_CONT "???????? ???????? ");
20504 ++ else
20505 ++ printk(KERN_CONT "???????????????? ");
20506 ++#endif
20507 ++ } else {
20508 ++#ifdef CONFIG_X86_64
20509 ++ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))) {
20510 ++ printk(KERN_CONT "%08x ", (unsigned int)c);
20511 ++ printk(KERN_CONT "%08x ", (unsigned int)(c >> 32));
20512 ++ } else
20513 ++#endif
20514 ++ printk(KERN_CONT "%0*lx ", 2 * (int)sizeof(long), c);
20515 ++ }
20516 ++ }
20517 ++ printk("\n");
20518 ++}
20519 ++#endif
20520 ++
20521 ++/**
20522 ++ * probe_kernel_write(): safely attempt to write to a location
20523 ++ * @dst: address to write to
20524 ++ * @src: pointer to the data that shall be written
20525 ++ * @size: size of the data chunk
20526 ++ *
20527 ++ * Safely write to address @dst from the buffer at @src. If a kernel fault
20528 ++ * happens, handle that and return -EFAULT.
20529 ++ */
20530 ++long notrace probe_kernel_write(void *dst, const void *src, size_t size)
20531 ++{
20532 ++ long ret;
20533 ++ mm_segment_t old_fs = get_fs();
20534 ++
20535 ++ set_fs(KERNEL_DS);
20536 ++ pagefault_disable();
20537 ++ pax_open_kernel();
20538 ++ ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
20539 ++ pax_close_kernel();
20540 ++ pagefault_enable();
20541 ++ set_fs(old_fs);
20542 ++
20543 ++ return ret ? -EFAULT : 0;
20544 ++}
20545 +diff -urNp linux-3.1.1/arch/x86/mm/gup.c linux-3.1.1/arch/x86/mm/gup.c
20546 +--- linux-3.1.1/arch/x86/mm/gup.c 2011-11-11 15:19:27.000000000 -0500
20547 ++++ linux-3.1.1/arch/x86/mm/gup.c 2011-11-16 18:39:07.000000000 -0500
20548 +@@ -253,7 +253,7 @@ int __get_user_pages_fast(unsigned long
20549 + addr = start;
20550 + len = (unsigned long) nr_pages << PAGE_SHIFT;
20551 + end = start + len;
20552 +- if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
20553 ++ if (unlikely(!__access_ok(write ? VERIFY_WRITE : VERIFY_READ,
20554 + (void __user *)start, len)))
20555 + return 0;
20556 +
20557 +diff -urNp linux-3.1.1/arch/x86/mm/highmem_32.c linux-3.1.1/arch/x86/mm/highmem_32.c
20558 +--- linux-3.1.1/arch/x86/mm/highmem_32.c 2011-11-11 15:19:27.000000000 -0500
20559 ++++ linux-3.1.1/arch/x86/mm/highmem_32.c 2011-11-16 18:39:07.000000000 -0500
20560 +@@ -44,7 +44,10 @@ void *kmap_atomic_prot(struct page *page
20561 + idx = type + KM_TYPE_NR*smp_processor_id();
20562 + vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
20563 + BUG_ON(!pte_none(*(kmap_pte-idx)));
20564 ++
20565 ++ pax_open_kernel();
20566 + set_pte(kmap_pte-idx, mk_pte(page, prot));
20567 ++ pax_close_kernel();
20568 +
20569 + return (void *)vaddr;
20570 + }
20571 +diff -urNp linux-3.1.1/arch/x86/mm/hugetlbpage.c linux-3.1.1/arch/x86/mm/hugetlbpage.c
20572 +--- linux-3.1.1/arch/x86/mm/hugetlbpage.c 2011-11-11 15:19:27.000000000 -0500
20573 ++++ linux-3.1.1/arch/x86/mm/hugetlbpage.c 2011-11-16 18:39:07.000000000 -0500
20574 +@@ -266,13 +266,20 @@ static unsigned long hugetlb_get_unmappe
20575 + struct hstate *h = hstate_file(file);
20576 + struct mm_struct *mm = current->mm;
20577 + struct vm_area_struct *vma;
20578 +- unsigned long start_addr;
20579 ++ unsigned long start_addr, pax_task_size = TASK_SIZE;
20580 ++
20581 ++#ifdef CONFIG_PAX_SEGMEXEC
20582 ++ if (mm->pax_flags & MF_PAX_SEGMEXEC)
20583 ++ pax_task_size = SEGMEXEC_TASK_SIZE;
20584 ++#endif
20585 ++
20586 ++ pax_task_size -= PAGE_SIZE;
20587 +
20588 + if (len > mm->cached_hole_size) {
20589 +- start_addr = mm->free_area_cache;
20590 ++ start_addr = mm->free_area_cache;
20591 + } else {
20592 +- start_addr = TASK_UNMAPPED_BASE;
20593 +- mm->cached_hole_size = 0;
20594 ++ start_addr = mm->mmap_base;
20595 ++ mm->cached_hole_size = 0;
20596 + }
20597 +
20598 + full_search:
20599 +@@ -280,26 +287,27 @@ full_search:
20600 +
20601 + for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
20602 + /* At this point: (!vma || addr < vma->vm_end). */
20603 +- if (TASK_SIZE - len < addr) {
20604 ++ if (pax_task_size - len < addr) {
20605 + /*
20606 + * Start a new search - just in case we missed
20607 + * some holes.
20608 + */
20609 +- if (start_addr != TASK_UNMAPPED_BASE) {
20610 +- start_addr = TASK_UNMAPPED_BASE;
20611 ++ if (start_addr != mm->mmap_base) {
20612 ++ start_addr = mm->mmap_base;
20613 + mm->cached_hole_size = 0;
20614 + goto full_search;
20615 + }
20616 + return -ENOMEM;
20617 + }
20618 +- if (!vma || addr + len <= vma->vm_start) {
20619 +- mm->free_area_cache = addr + len;
20620 +- return addr;
20621 +- }
20622 ++ if (check_heap_stack_gap(vma, addr, len))
20623 ++ break;
20624 + if (addr + mm->cached_hole_size < vma->vm_start)
20625 + mm->cached_hole_size = vma->vm_start - addr;
20626 + addr = ALIGN(vma->vm_end, huge_page_size(h));
20627 + }
20628 ++
20629 ++ mm->free_area_cache = addr + len;
20630 ++ return addr;
20631 + }
20632 +
20633 + static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
20634 +@@ -308,10 +316,9 @@ static unsigned long hugetlb_get_unmappe
20635 + {
20636 + struct hstate *h = hstate_file(file);
20637 + struct mm_struct *mm = current->mm;
20638 +- struct vm_area_struct *vma, *prev_vma;
20639 +- unsigned long base = mm->mmap_base, addr = addr0;
20640 ++ struct vm_area_struct *vma;
20641 ++ unsigned long base = mm->mmap_base, addr;
20642 + unsigned long largest_hole = mm->cached_hole_size;
20643 +- int first_time = 1;
20644 +
20645 + /* don't allow allocations above current base */
20646 + if (mm->free_area_cache > base)
20647 +@@ -321,64 +328,63 @@ static unsigned long hugetlb_get_unmappe
20648 + largest_hole = 0;
20649 + mm->free_area_cache = base;
20650 + }
20651 +-try_again:
20652 ++
20653 + /* make sure it can fit in the remaining address space */
20654 + if (mm->free_area_cache < len)
20655 + goto fail;
20656 +
20657 + /* either no address requested or can't fit in requested address hole */
20658 +- addr = (mm->free_area_cache - len) & huge_page_mask(h);
20659 ++ addr = (mm->free_area_cache - len);
20660 + do {
20661 ++ addr &= huge_page_mask(h);
20662 ++ vma = find_vma(mm, addr);
20663 + /*
20664 + * Lookup failure means no vma is above this address,
20665 + * i.e. return with success:
20666 +- */
20667 +- if (!(vma = find_vma_prev(mm, addr, &prev_vma)))
20668 +- return addr;
20669 +-
20670 +- /*
20671 + * new region fits between prev_vma->vm_end and
20672 + * vma->vm_start, use it:
20673 + */
20674 +- if (addr + len <= vma->vm_start &&
20675 +- (!prev_vma || (addr >= prev_vma->vm_end))) {
20676 ++ if (check_heap_stack_gap(vma, addr, len)) {
20677 + /* remember the address as a hint for next time */
20678 +- mm->cached_hole_size = largest_hole;
20679 +- return (mm->free_area_cache = addr);
20680 +- } else {
20681 +- /* pull free_area_cache down to the first hole */
20682 +- if (mm->free_area_cache == vma->vm_end) {
20683 +- mm->free_area_cache = vma->vm_start;
20684 +- mm->cached_hole_size = largest_hole;
20685 +- }
20686 ++ mm->cached_hole_size = largest_hole;
20687 ++ return (mm->free_area_cache = addr);
20688 ++ }
20689 ++ /* pull free_area_cache down to the first hole */
20690 ++ if (mm->free_area_cache == vma->vm_end) {
20691 ++ mm->free_area_cache = vma->vm_start;
20692 ++ mm->cached_hole_size = largest_hole;
20693 + }
20694 +
20695 + /* remember the largest hole we saw so far */
20696 + if (addr + largest_hole < vma->vm_start)
20697 +- largest_hole = vma->vm_start - addr;
20698 ++ largest_hole = vma->vm_start - addr;
20699 +
20700 + /* try just below the current vma->vm_start */
20701 +- addr = (vma->vm_start - len) & huge_page_mask(h);
20702 +- } while (len <= vma->vm_start);
20703 ++ addr = skip_heap_stack_gap(vma, len);
20704 ++ } while (!IS_ERR_VALUE(addr));
20705 +
20706 + fail:
20707 + /*
20708 +- * if hint left us with no space for the requested
20709 +- * mapping then try again:
20710 +- */
20711 +- if (first_time) {
20712 +- mm->free_area_cache = base;
20713 +- largest_hole = 0;
20714 +- first_time = 0;
20715 +- goto try_again;
20716 +- }
20717 +- /*
20718 + * A failed mmap() very likely causes application failure,
20719 + * so fall back to the bottom-up function here. This scenario
20720 + * can happen with large stack limits and large mmap()
20721 + * allocations.
20722 + */
20723 +- mm->free_area_cache = TASK_UNMAPPED_BASE;
20724 ++
20725 ++#ifdef CONFIG_PAX_SEGMEXEC
20726 ++ if (mm->pax_flags & MF_PAX_SEGMEXEC)
20727 ++ mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
20728 ++ else
20729 ++#endif
20730 ++
20731 ++ mm->mmap_base = TASK_UNMAPPED_BASE;
20732 ++
20733 ++#ifdef CONFIG_PAX_RANDMMAP
20734 ++ if (mm->pax_flags & MF_PAX_RANDMMAP)
20735 ++ mm->mmap_base += mm->delta_mmap;
20736 ++#endif
20737 ++
20738 ++ mm->free_area_cache = mm->mmap_base;
20739 + mm->cached_hole_size = ~0UL;
20740 + addr = hugetlb_get_unmapped_area_bottomup(file, addr0,
20741 + len, pgoff, flags);
20742 +@@ -386,6 +392,7 @@ fail:
20743 + /*
20744 + * Restore the topdown base:
20745 + */
20746 ++ mm->mmap_base = base;
20747 + mm->free_area_cache = base;
20748 + mm->cached_hole_size = ~0UL;
20749 +
20750 +@@ -399,10 +406,19 @@ hugetlb_get_unmapped_area(struct file *f
20751 + struct hstate *h = hstate_file(file);
20752 + struct mm_struct *mm = current->mm;
20753 + struct vm_area_struct *vma;
20754 ++ unsigned long pax_task_size = TASK_SIZE;
20755 +
20756 + if (len & ~huge_page_mask(h))
20757 + return -EINVAL;
20758 +- if (len > TASK_SIZE)
20759 ++
20760 ++#ifdef CONFIG_PAX_SEGMEXEC
20761 ++ if (mm->pax_flags & MF_PAX_SEGMEXEC)
20762 ++ pax_task_size = SEGMEXEC_TASK_SIZE;
20763 ++#endif
20764 ++
20765 ++ pax_task_size -= PAGE_SIZE;
20766 ++
20767 ++ if (len > pax_task_size)
20768 + return -ENOMEM;
20769 +
20770 + if (flags & MAP_FIXED) {
20771 +@@ -414,8 +430,7 @@ hugetlb_get_unmapped_area(struct file *f
20772 + if (addr) {
20773 + addr = ALIGN(addr, huge_page_size(h));
20774 + vma = find_vma(mm, addr);
20775 +- if (TASK_SIZE - len >= addr &&
20776 +- (!vma || addr + len <= vma->vm_start))
20777 ++ if (pax_task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
20778 + return addr;
20779 + }
20780 + if (mm->get_unmapped_area == arch_get_unmapped_area)
20781 +diff -urNp linux-3.1.1/arch/x86/mm/init_32.c linux-3.1.1/arch/x86/mm/init_32.c
20782 +--- linux-3.1.1/arch/x86/mm/init_32.c 2011-11-11 15:19:27.000000000 -0500
20783 ++++ linux-3.1.1/arch/x86/mm/init_32.c 2011-11-16 18:39:07.000000000 -0500
20784 +@@ -74,36 +74,6 @@ static __init void *alloc_low_page(void)
20785 + }
20786 +
20787 + /*
20788 +- * Creates a middle page table and puts a pointer to it in the
20789 +- * given global directory entry. This only returns the gd entry
20790 +- * in non-PAE compilation mode, since the middle layer is folded.
20791 +- */
20792 +-static pmd_t * __init one_md_table_init(pgd_t *pgd)
20793 +-{
20794 +- pud_t *pud;
20795 +- pmd_t *pmd_table;
20796 +-
20797 +-#ifdef CONFIG_X86_PAE
20798 +- if (!(pgd_val(*pgd) & _PAGE_PRESENT)) {
20799 +- if (after_bootmem)
20800 +- pmd_table = (pmd_t *)alloc_bootmem_pages(PAGE_SIZE);
20801 +- else
20802 +- pmd_table = (pmd_t *)alloc_low_page();
20803 +- paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT);
20804 +- set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
20805 +- pud = pud_offset(pgd, 0);
20806 +- BUG_ON(pmd_table != pmd_offset(pud, 0));
20807 +-
20808 +- return pmd_table;
20809 +- }
20810 +-#endif
20811 +- pud = pud_offset(pgd, 0);
20812 +- pmd_table = pmd_offset(pud, 0);
20813 +-
20814 +- return pmd_table;
20815 +-}
20816 +-
20817 +-/*
20818 + * Create a page table and place a pointer to it in a middle page
20819 + * directory entry:
20820 + */
20821 +@@ -123,13 +93,28 @@ static pte_t * __init one_page_table_ini
20822 + page_table = (pte_t *)alloc_low_page();
20823 +
20824 + paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT);
20825 ++#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
20826 ++ set_pmd(pmd, __pmd(__pa(page_table) | _KERNPG_TABLE));
20827 ++#else
20828 + set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
20829 ++#endif
20830 + BUG_ON(page_table != pte_offset_kernel(pmd, 0));
20831 + }
20832 +
20833 + return pte_offset_kernel(pmd, 0);
20834 + }
20835 +
20836 ++static pmd_t * __init one_md_table_init(pgd_t *pgd)
20837 ++{
20838 ++ pud_t *pud;
20839 ++ pmd_t *pmd_table;
20840 ++
20841 ++ pud = pud_offset(pgd, 0);
20842 ++ pmd_table = pmd_offset(pud, 0);
20843 ++
20844 ++ return pmd_table;
20845 ++}
20846 ++
20847 + pmd_t * __init populate_extra_pmd(unsigned long vaddr)
20848 + {
20849 + int pgd_idx = pgd_index(vaddr);
20850 +@@ -203,6 +188,7 @@ page_table_range_init(unsigned long star
20851 + int pgd_idx, pmd_idx;
20852 + unsigned long vaddr;
20853 + pgd_t *pgd;
20854 ++ pud_t *pud;
20855 + pmd_t *pmd;
20856 + pte_t *pte = NULL;
20857 +
20858 +@@ -212,8 +198,13 @@ page_table_range_init(unsigned long star
20859 + pgd = pgd_base + pgd_idx;
20860 +
20861 + for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
20862 +- pmd = one_md_table_init(pgd);
20863 +- pmd = pmd + pmd_index(vaddr);
20864 ++ pud = pud_offset(pgd, vaddr);
20865 ++ pmd = pmd_offset(pud, vaddr);
20866 ++
20867 ++#ifdef CONFIG_X86_PAE
20868 ++ paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
20869 ++#endif
20870 ++
20871 + for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
20872 + pmd++, pmd_idx++) {
20873 + pte = page_table_kmap_check(one_page_table_init(pmd),
20874 +@@ -225,11 +216,20 @@ page_table_range_init(unsigned long star
20875 + }
20876 + }
20877 +
20878 +-static inline int is_kernel_text(unsigned long addr)
20879 ++static inline int is_kernel_text(unsigned long start, unsigned long end)
20880 + {
20881 +- if (addr >= (unsigned long)_text && addr <= (unsigned long)__init_end)
20882 +- return 1;
20883 +- return 0;
20884 ++ if ((start > ktla_ktva((unsigned long)_etext) ||
20885 ++ end <= ktla_ktva((unsigned long)_stext)) &&
20886 ++ (start > ktla_ktva((unsigned long)_einittext) ||
20887 ++ end <= ktla_ktva((unsigned long)_sinittext)) &&
20888 ++
20889 ++#ifdef CONFIG_ACPI_SLEEP
20890 ++ (start > (unsigned long)__va(acpi_wakeup_address) + 0x4000 || end <= (unsigned long)__va(acpi_wakeup_address)) &&
20891 ++#endif
20892 ++
20893 ++ (start > (unsigned long)__va(0xfffff) || end <= (unsigned long)__va(0xc0000)))
20894 ++ return 0;
20895 ++ return 1;
20896 + }
20897 +
20898 + /*
20899 +@@ -246,9 +246,10 @@ kernel_physical_mapping_init(unsigned lo
20900 + unsigned long last_map_addr = end;
20901 + unsigned long start_pfn, end_pfn;
20902 + pgd_t *pgd_base = swapper_pg_dir;
20903 +- int pgd_idx, pmd_idx, pte_ofs;
20904 ++ unsigned int pgd_idx, pmd_idx, pte_ofs;
20905 + unsigned long pfn;
20906 + pgd_t *pgd;
20907 ++ pud_t *pud;
20908 + pmd_t *pmd;
20909 + pte_t *pte;
20910 + unsigned pages_2m, pages_4k;
20911 +@@ -281,8 +282,13 @@ repeat:
20912 + pfn = start_pfn;
20913 + pgd_idx = pgd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
20914 + pgd = pgd_base + pgd_idx;
20915 +- for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
20916 +- pmd = one_md_table_init(pgd);
20917 ++ for (; pgd_idx < PTRS_PER_PGD && pfn < max_low_pfn; pgd++, pgd_idx++) {
20918 ++ pud = pud_offset(pgd, 0);
20919 ++ pmd = pmd_offset(pud, 0);
20920 ++
20921 ++#ifdef CONFIG_X86_PAE
20922 ++ paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
20923 ++#endif
20924 +
20925 + if (pfn >= end_pfn)
20926 + continue;
20927 +@@ -294,14 +300,13 @@ repeat:
20928 + #endif
20929 + for (; pmd_idx < PTRS_PER_PMD && pfn < end_pfn;
20930 + pmd++, pmd_idx++) {
20931 +- unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET;
20932 ++ unsigned long address = pfn * PAGE_SIZE + PAGE_OFFSET;
20933 +
20934 + /*
20935 + * Map with big pages if possible, otherwise
20936 + * create normal page tables:
20937 + */
20938 + if (use_pse) {
20939 +- unsigned int addr2;
20940 + pgprot_t prot = PAGE_KERNEL_LARGE;
20941 + /*
20942 + * first pass will use the same initial
20943 +@@ -311,11 +316,7 @@ repeat:
20944 + __pgprot(PTE_IDENT_ATTR |
20945 + _PAGE_PSE);
20946 +
20947 +- addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE +
20948 +- PAGE_OFFSET + PAGE_SIZE-1;
20949 +-
20950 +- if (is_kernel_text(addr) ||
20951 +- is_kernel_text(addr2))
20952 ++ if (is_kernel_text(address, address + PMD_SIZE))
20953 + prot = PAGE_KERNEL_LARGE_EXEC;
20954 +
20955 + pages_2m++;
20956 +@@ -332,7 +333,7 @@ repeat:
20957 + pte_ofs = pte_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
20958 + pte += pte_ofs;
20959 + for (; pte_ofs < PTRS_PER_PTE && pfn < end_pfn;
20960 +- pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) {
20961 ++ pte++, pfn++, pte_ofs++, address += PAGE_SIZE) {
20962 + pgprot_t prot = PAGE_KERNEL;
20963 + /*
20964 + * first pass will use the same initial
20965 +@@ -340,7 +341,7 @@ repeat:
20966 + */
20967 + pgprot_t init_prot = __pgprot(PTE_IDENT_ATTR);
20968 +
20969 +- if (is_kernel_text(addr))
20970 ++ if (is_kernel_text(address, address + PAGE_SIZE))
20971 + prot = PAGE_KERNEL_EXEC;
20972 +
20973 + pages_4k++;
20974 +@@ -472,7 +473,7 @@ void __init native_pagetable_setup_start
20975 +
20976 + pud = pud_offset(pgd, va);
20977 + pmd = pmd_offset(pud, va);
20978 +- if (!pmd_present(*pmd))
20979 ++ if (!pmd_present(*pmd) || pmd_huge(*pmd))
20980 + break;
20981 +
20982 + pte = pte_offset_kernel(pmd, va);
20983 +@@ -524,12 +525,10 @@ void __init early_ioremap_page_table_ran
20984 +
20985 + static void __init pagetable_init(void)
20986 + {
20987 +- pgd_t *pgd_base = swapper_pg_dir;
20988 +-
20989 +- permanent_kmaps_init(pgd_base);
20990 ++ permanent_kmaps_init(swapper_pg_dir);
20991 + }
20992 +
20993 +-pteval_t __supported_pte_mask __read_mostly = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
20994 ++pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
20995 + EXPORT_SYMBOL_GPL(__supported_pte_mask);
20996 +
20997 + /* user-defined highmem size */
20998 +@@ -757,6 +756,12 @@ void __init mem_init(void)
20999 +
21000 + pci_iommu_alloc();
21001 +
21002 ++#ifdef CONFIG_PAX_PER_CPU_PGD
21003 ++ clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
21004 ++ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
21005 ++ KERNEL_PGD_PTRS);
21006 ++#endif
21007 ++
21008 + #ifdef CONFIG_FLATMEM
21009 + BUG_ON(!mem_map);
21010 + #endif
21011 +@@ -774,7 +779,7 @@ void __init mem_init(void)
21012 + set_highmem_pages_init();
21013 +
21014 + codesize = (unsigned long) &_etext - (unsigned long) &_text;
21015 +- datasize = (unsigned long) &_edata - (unsigned long) &_etext;
21016 ++ datasize = (unsigned long) &_edata - (unsigned long) &_sdata;
21017 + initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
21018 +
21019 + printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, "
21020 +@@ -815,10 +820,10 @@ void __init mem_init(void)
21021 + ((unsigned long)&__init_end -
21022 + (unsigned long)&__init_begin) >> 10,
21023 +
21024 +- (unsigned long)&_etext, (unsigned long)&_edata,
21025 +- ((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
21026 ++ (unsigned long)&_sdata, (unsigned long)&_edata,
21027 ++ ((unsigned long)&_edata - (unsigned long)&_sdata) >> 10,
21028 +
21029 +- (unsigned long)&_text, (unsigned long)&_etext,
21030 ++ ktla_ktva((unsigned long)&_text), ktla_ktva((unsigned long)&_etext),
21031 + ((unsigned long)&_etext - (unsigned long)&_text) >> 10);
21032 +
21033 + /*
21034 +@@ -896,6 +901,7 @@ void set_kernel_text_rw(void)
21035 + if (!kernel_set_to_readonly)
21036 + return;
21037 +
21038 ++ start = ktla_ktva(start);
21039 + pr_debug("Set kernel text: %lx - %lx for read write\n",
21040 + start, start+size);
21041 +
21042 +@@ -910,6 +916,7 @@ void set_kernel_text_ro(void)
21043 + if (!kernel_set_to_readonly)
21044 + return;
21045 +
21046 ++ start = ktla_ktva(start);
21047 + pr_debug("Set kernel text: %lx - %lx for read only\n",
21048 + start, start+size);
21049 +
21050 +@@ -938,6 +945,7 @@ void mark_rodata_ro(void)
21051 + unsigned long start = PFN_ALIGN(_text);
21052 + unsigned long size = PFN_ALIGN(_etext) - start;
21053 +
21054 ++ start = ktla_ktva(start);
21055 + set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
21056 + printk(KERN_INFO "Write protecting the kernel text: %luk\n",
21057 + size >> 10);
21058 +diff -urNp linux-3.1.1/arch/x86/mm/init_64.c linux-3.1.1/arch/x86/mm/init_64.c
21059 +--- linux-3.1.1/arch/x86/mm/init_64.c 2011-11-11 15:19:27.000000000 -0500
21060 ++++ linux-3.1.1/arch/x86/mm/init_64.c 2011-11-16 18:39:07.000000000 -0500
21061 +@@ -75,7 +75,7 @@ early_param("gbpages", parse_direct_gbpa
21062 + * around without checking the pgd every time.
21063 + */
21064 +
21065 +-pteval_t __supported_pte_mask __read_mostly = ~_PAGE_IOMAP;
21066 ++pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_IOMAP);
21067 + EXPORT_SYMBOL_GPL(__supported_pte_mask);
21068 +
21069 + int force_personality32;
21070 +@@ -108,12 +108,22 @@ void sync_global_pgds(unsigned long star
21071 +
21072 + for (address = start; address <= end; address += PGDIR_SIZE) {
21073 + const pgd_t *pgd_ref = pgd_offset_k(address);
21074 ++
21075 ++#ifdef CONFIG_PAX_PER_CPU_PGD
21076 ++ unsigned long cpu;
21077 ++#else
21078 + struct page *page;
21079 ++#endif
21080 +
21081 + if (pgd_none(*pgd_ref))
21082 + continue;
21083 +
21084 + spin_lock(&pgd_lock);
21085 ++
21086 ++#ifdef CONFIG_PAX_PER_CPU_PGD
21087 ++ for (cpu = 0; cpu < NR_CPUS; ++cpu) {
21088 ++ pgd_t *pgd = pgd_offset_cpu(cpu, address);
21089 ++#else
21090 + list_for_each_entry(page, &pgd_list, lru) {
21091 + pgd_t *pgd;
21092 + spinlock_t *pgt_lock;
21093 +@@ -122,6 +132,7 @@ void sync_global_pgds(unsigned long star
21094 + /* the pgt_lock only for Xen */
21095 + pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
21096 + spin_lock(pgt_lock);
21097 ++#endif
21098 +
21099 + if (pgd_none(*pgd))
21100 + set_pgd(pgd, *pgd_ref);
21101 +@@ -129,7 +140,10 @@ void sync_global_pgds(unsigned long star
21102 + BUG_ON(pgd_page_vaddr(*pgd)
21103 + != pgd_page_vaddr(*pgd_ref));
21104 +
21105 ++#ifndef CONFIG_PAX_PER_CPU_PGD
21106 + spin_unlock(pgt_lock);
21107 ++#endif
21108 ++
21109 + }
21110 + spin_unlock(&pgd_lock);
21111 + }
21112 +@@ -203,7 +217,9 @@ void set_pte_vaddr_pud(pud_t *pud_page,
21113 + pmd = fill_pmd(pud, vaddr);
21114 + pte = fill_pte(pmd, vaddr);
21115 +
21116 ++ pax_open_kernel();
21117 + set_pte(pte, new_pte);
21118 ++ pax_close_kernel();
21119 +
21120 + /*
21121 + * It's enough to flush this one mapping.
21122 +@@ -262,14 +278,12 @@ static void __init __init_extra_mapping(
21123 + pgd = pgd_offset_k((unsigned long)__va(phys));
21124 + if (pgd_none(*pgd)) {
21125 + pud = (pud_t *) spp_getpage();
21126 +- set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE |
21127 +- _PAGE_USER));
21128 ++ set_pgd(pgd, __pgd(__pa(pud) | _PAGE_TABLE));
21129 + }
21130 + pud = pud_offset(pgd, (unsigned long)__va(phys));
21131 + if (pud_none(*pud)) {
21132 + pmd = (pmd_t *) spp_getpage();
21133 +- set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE |
21134 +- _PAGE_USER));
21135 ++ set_pud(pud, __pud(__pa(pmd) | _PAGE_TABLE));
21136 + }
21137 + pmd = pmd_offset(pud, phys);
21138 + BUG_ON(!pmd_none(*pmd));
21139 +@@ -330,7 +344,7 @@ static __ref void *alloc_low_page(unsign
21140 + if (pfn >= pgt_buf_top)
21141 + panic("alloc_low_page: ran out of memory");
21142 +
21143 +- adr = early_memremap(pfn * PAGE_SIZE, PAGE_SIZE);
21144 ++ adr = (void __force_kernel *)early_memremap(pfn * PAGE_SIZE, PAGE_SIZE);
21145 + clear_page(adr);
21146 + *phys = pfn * PAGE_SIZE;
21147 + return adr;
21148 +@@ -346,7 +360,7 @@ static __ref void *map_low_page(void *vi
21149 +
21150 + phys = __pa(virt);
21151 + left = phys & (PAGE_SIZE - 1);
21152 +- adr = early_memremap(phys & PAGE_MASK, PAGE_SIZE);
21153 ++ adr = (void __force_kernel *)early_memremap(phys & PAGE_MASK, PAGE_SIZE);
21154 + adr = (void *)(((unsigned long)adr) | left);
21155 +
21156 + return adr;
21157 +@@ -693,6 +707,12 @@ void __init mem_init(void)
21158 +
21159 + pci_iommu_alloc();
21160 +
21161 ++#ifdef CONFIG_PAX_PER_CPU_PGD
21162 ++ clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
21163 ++ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
21164 ++ KERNEL_PGD_PTRS);
21165 ++#endif
21166 ++
21167 + /* clear_bss() already clear the empty_zero_page */
21168 +
21169 + reservedpages = 0;
21170 +@@ -853,8 +873,8 @@ int kern_addr_valid(unsigned long addr)
21171 + static struct vm_area_struct gate_vma = {
21172 + .vm_start = VSYSCALL_START,
21173 + .vm_end = VSYSCALL_START + (VSYSCALL_MAPPED_PAGES * PAGE_SIZE),
21174 +- .vm_page_prot = PAGE_READONLY_EXEC,
21175 +- .vm_flags = VM_READ | VM_EXEC
21176 ++ .vm_page_prot = PAGE_READONLY,
21177 ++ .vm_flags = VM_READ
21178 + };
21179 +
21180 + struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
21181 +@@ -888,7 +908,7 @@ int in_gate_area_no_mm(unsigned long add
21182 +
21183 + const char *arch_vma_name(struct vm_area_struct *vma)
21184 + {
21185 +- if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
21186 ++ if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
21187 + return "[vdso]";
21188 + if (vma == &gate_vma)
21189 + return "[vsyscall]";
21190 +diff -urNp linux-3.1.1/arch/x86/mm/init.c linux-3.1.1/arch/x86/mm/init.c
21191 +--- linux-3.1.1/arch/x86/mm/init.c 2011-11-11 15:19:27.000000000 -0500
21192 ++++ linux-3.1.1/arch/x86/mm/init.c 2011-11-17 18:31:28.000000000 -0500
21193 +@@ -31,7 +31,7 @@ int direct_gbpages
21194 + static void __init find_early_table_space(unsigned long end, int use_pse,
21195 + int use_gbpages)
21196 + {
21197 +- unsigned long puds, pmds, ptes, tables, start = 0, good_end = end;
21198 ++ unsigned long puds, pmds, ptes, tables, start = 0x100000, good_end = end;
21199 + phys_addr_t base;
21200 +
21201 + puds = (end + PUD_SIZE - 1) >> PUD_SHIFT;
21202 +@@ -312,8 +312,29 @@ unsigned long __init_refok init_memory_m
21203 + */
21204 + int devmem_is_allowed(unsigned long pagenr)
21205 + {
21206 +- if (pagenr <= 256)
21207 ++#ifdef CONFIG_GRKERNSEC_KMEM
21208 ++ /* allow BDA */
21209 ++ if (!pagenr)
21210 ++ return 1;
21211 ++ /* allow EBDA */
21212 ++ if ((0x9f000 >> PAGE_SHIFT) == pagenr)
21213 ++ return 1;
21214 ++#else
21215 ++ if (!pagenr)
21216 ++ return 1;
21217 ++#ifdef CONFIG_VM86
21218 ++ if (pagenr < (ISA_START_ADDRESS >> PAGE_SHIFT))
21219 ++ return 1;
21220 ++#endif
21221 ++#endif
21222 ++
21223 ++ if ((ISA_START_ADDRESS >> PAGE_SHIFT) <= pagenr && pagenr < (ISA_END_ADDRESS >> PAGE_SHIFT))
21224 + return 1;
21225 ++#ifdef CONFIG_GRKERNSEC_KMEM
21226 ++ /* throw out everything else below 1MB */
21227 ++ if (pagenr <= 256)
21228 ++ return 0;
21229 ++#endif
21230 + if (iomem_is_exclusive(pagenr << PAGE_SHIFT))
21231 + return 0;
21232 + if (!page_is_ram(pagenr))
21233 +@@ -372,6 +393,86 @@ void free_init_pages(char *what, unsigne
21234 +
21235 + void free_initmem(void)
21236 + {
21237 ++
21238 ++#ifdef CONFIG_PAX_KERNEXEC
21239 ++#ifdef CONFIG_X86_32
21240 ++ /* PaX: limit KERNEL_CS to actual size */
21241 ++ unsigned long addr, limit;
21242 ++ struct desc_struct d;
21243 ++ int cpu;
21244 ++
21245 ++ limit = paravirt_enabled() ? ktva_ktla(0xffffffff) : (unsigned long)&_etext;
21246 ++ limit = (limit - 1UL) >> PAGE_SHIFT;
21247 ++
21248 ++ memset(__LOAD_PHYSICAL_ADDR + PAGE_OFFSET, POISON_FREE_INITMEM, PAGE_SIZE);
21249 ++ for (cpu = 0; cpu < NR_CPUS; cpu++) {
21250 ++ pack_descriptor(&d, get_desc_base(&get_cpu_gdt_table(cpu)[GDT_ENTRY_KERNEL_CS]), limit, 0x9B, 0xC);
21251 ++ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_KERNEL_CS, &d, DESCTYPE_S);
21252 ++ }
21253 ++
21254 ++ /* PaX: make KERNEL_CS read-only */
21255 ++ addr = PFN_ALIGN(ktla_ktva((unsigned long)&_text));
21256 ++ if (!paravirt_enabled())
21257 ++ set_memory_ro(addr, (PFN_ALIGN(_sdata) - addr) >> PAGE_SHIFT);
21258 ++/*
21259 ++ for (addr = ktla_ktva((unsigned long)&_text); addr < (unsigned long)&_sdata; addr += PMD_SIZE) {
21260 ++ pgd = pgd_offset_k(addr);
21261 ++ pud = pud_offset(pgd, addr);
21262 ++ pmd = pmd_offset(pud, addr);
21263 ++ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
21264 ++ }
21265 ++*/
21266 ++#ifdef CONFIG_X86_PAE
21267 ++ set_memory_nx(PFN_ALIGN(__init_begin), (PFN_ALIGN(__init_end) - PFN_ALIGN(__init_begin)) >> PAGE_SHIFT);
21268 ++/*
21269 ++ for (addr = (unsigned long)&__init_begin; addr < (unsigned long)&__init_end; addr += PMD_SIZE) {
21270 ++ pgd = pgd_offset_k(addr);
21271 ++ pud = pud_offset(pgd, addr);
21272 ++ pmd = pmd_offset(pud, addr);
21273 ++ set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
21274 ++ }
21275 ++*/
21276 ++#endif
21277 ++
21278 ++#ifdef CONFIG_MODULES
21279 ++ set_memory_4k((unsigned long)MODULES_EXEC_VADDR, (MODULES_EXEC_END - MODULES_EXEC_VADDR) >> PAGE_SHIFT);
21280 ++#endif
21281 ++
21282 ++#else
21283 ++ pgd_t *pgd;
21284 ++ pud_t *pud;
21285 ++ pmd_t *pmd;
21286 ++ unsigned long addr, end;
21287 ++
21288 ++ /* PaX: make kernel code/rodata read-only, rest non-executable */
21289 ++ for (addr = __START_KERNEL_map; addr < __START_KERNEL_map + KERNEL_IMAGE_SIZE; addr += PMD_SIZE) {
21290 ++ pgd = pgd_offset_k(addr);
21291 ++ pud = pud_offset(pgd, addr);
21292 ++ pmd = pmd_offset(pud, addr);
21293 ++ if (!pmd_present(*pmd))
21294 ++ continue;
21295 ++ if ((unsigned long)_text <= addr && addr < (unsigned long)_sdata)
21296 ++ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
21297 ++ else
21298 ++ set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
21299 ++ }
21300 ++
21301 ++ addr = (unsigned long)__va(__pa(__START_KERNEL_map));
21302 ++ end = addr + KERNEL_IMAGE_SIZE;
21303 ++ for (; addr < end; addr += PMD_SIZE) {
21304 ++ pgd = pgd_offset_k(addr);
21305 ++ pud = pud_offset(pgd, addr);
21306 ++ pmd = pmd_offset(pud, addr);
21307 ++ if (!pmd_present(*pmd))
21308 ++ continue;
21309 ++ if ((unsigned long)__va(__pa(_text)) <= addr && addr < (unsigned long)__va(__pa(_sdata)))
21310 ++ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
21311 ++ }
21312 ++#endif
21313 ++
21314 ++ flush_tlb_all();
21315 ++#endif
21316 ++
21317 + free_init_pages("unused kernel memory",
21318 + (unsigned long)(&__init_begin),
21319 + (unsigned long)(&__init_end));
21320 +diff -urNp linux-3.1.1/arch/x86/mm/iomap_32.c linux-3.1.1/arch/x86/mm/iomap_32.c
21321 +--- linux-3.1.1/arch/x86/mm/iomap_32.c 2011-11-11 15:19:27.000000000 -0500
21322 ++++ linux-3.1.1/arch/x86/mm/iomap_32.c 2011-11-16 18:39:07.000000000 -0500
21323 +@@ -64,7 +64,11 @@ void *kmap_atomic_prot_pfn(unsigned long
21324 + type = kmap_atomic_idx_push();
21325 + idx = type + KM_TYPE_NR * smp_processor_id();
21326 + vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
21327 ++
21328 ++ pax_open_kernel();
21329 + set_pte(kmap_pte - idx, pfn_pte(pfn, prot));
21330 ++ pax_close_kernel();
21331 ++
21332 + arch_flush_lazy_mmu_mode();
21333 +
21334 + return (void *)vaddr;
21335 +diff -urNp linux-3.1.1/arch/x86/mm/ioremap.c linux-3.1.1/arch/x86/mm/ioremap.c
21336 +--- linux-3.1.1/arch/x86/mm/ioremap.c 2011-11-11 15:19:27.000000000 -0500
21337 ++++ linux-3.1.1/arch/x86/mm/ioremap.c 2011-11-16 18:39:07.000000000 -0500
21338 +@@ -97,7 +97,7 @@ static void __iomem *__ioremap_caller(re
21339 + for (pfn = phys_addr >> PAGE_SHIFT; pfn <= last_pfn; pfn++) {
21340 + int is_ram = page_is_ram(pfn);
21341 +
21342 +- if (is_ram && pfn_valid(pfn) && !PageReserved(pfn_to_page(pfn)))
21343 ++ if (is_ram && pfn_valid(pfn) && (pfn >= 0x100 || !PageReserved(pfn_to_page(pfn))))
21344 + return NULL;
21345 + WARN_ON_ONCE(is_ram);
21346 + }
21347 +@@ -344,7 +344,7 @@ static int __init early_ioremap_debug_se
21348 + early_param("early_ioremap_debug", early_ioremap_debug_setup);
21349 +
21350 + static __initdata int after_paging_init;
21351 +-static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
21352 ++static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __read_only __aligned(PAGE_SIZE);
21353 +
21354 + static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
21355 + {
21356 +@@ -381,8 +381,7 @@ void __init early_ioremap_init(void)
21357 + slot_virt[i] = __fix_to_virt(FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*i);
21358 +
21359 + pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
21360 +- memset(bm_pte, 0, sizeof(bm_pte));
21361 +- pmd_populate_kernel(&init_mm, pmd, bm_pte);
21362 ++ pmd_populate_user(&init_mm, pmd, bm_pte);
21363 +
21364 + /*
21365 + * The boot-ioremap range spans multiple pmds, for which
21366 +diff -urNp linux-3.1.1/arch/x86/mm/kmemcheck/kmemcheck.c linux-3.1.1/arch/x86/mm/kmemcheck/kmemcheck.c
21367 +--- linux-3.1.1/arch/x86/mm/kmemcheck/kmemcheck.c 2011-11-11 15:19:27.000000000 -0500
21368 ++++ linux-3.1.1/arch/x86/mm/kmemcheck/kmemcheck.c 2011-11-16 18:39:07.000000000 -0500
21369 +@@ -622,9 +622,9 @@ bool kmemcheck_fault(struct pt_regs *reg
21370 + * memory (e.g. tracked pages)? For now, we need this to avoid
21371 + * invoking kmemcheck for PnP BIOS calls.
21372 + */
21373 +- if (regs->flags & X86_VM_MASK)
21374 ++ if (v8086_mode(regs))
21375 + return false;
21376 +- if (regs->cs != __KERNEL_CS)
21377 ++ if (regs->cs != __KERNEL_CS && regs->cs != __KERNEXEC_KERNEL_CS)
21378 + return false;
21379 +
21380 + pte = kmemcheck_pte_lookup(address);
21381 +diff -urNp linux-3.1.1/arch/x86/mm/mmap.c linux-3.1.1/arch/x86/mm/mmap.c
21382 +--- linux-3.1.1/arch/x86/mm/mmap.c 2011-11-11 15:19:27.000000000 -0500
21383 ++++ linux-3.1.1/arch/x86/mm/mmap.c 2011-11-16 18:39:07.000000000 -0500
21384 +@@ -49,7 +49,7 @@ static unsigned int stack_maxrandom_size
21385 + * Leave an at least ~128 MB hole with possible stack randomization.
21386 + */
21387 + #define MIN_GAP (128*1024*1024UL + stack_maxrandom_size())
21388 +-#define MAX_GAP (TASK_SIZE/6*5)
21389 ++#define MAX_GAP (pax_task_size/6*5)
21390 +
21391 + /*
21392 + * True on X86_32 or when emulating IA32 on X86_64
21393 +@@ -94,27 +94,40 @@ static unsigned long mmap_rnd(void)
21394 + return rnd << PAGE_SHIFT;
21395 + }
21396 +
21397 +-static unsigned long mmap_base(void)
21398 ++static unsigned long mmap_base(struct mm_struct *mm)
21399 + {
21400 + unsigned long gap = rlimit(RLIMIT_STACK);
21401 ++ unsigned long pax_task_size = TASK_SIZE;
21402 ++
21403 ++#ifdef CONFIG_PAX_SEGMEXEC
21404 ++ if (mm->pax_flags & MF_PAX_SEGMEXEC)
21405 ++ pax_task_size = SEGMEXEC_TASK_SIZE;
21406 ++#endif
21407 +
21408 + if (gap < MIN_GAP)
21409 + gap = MIN_GAP;
21410 + else if (gap > MAX_GAP)
21411 + gap = MAX_GAP;
21412 +
21413 +- return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd());
21414 ++ return PAGE_ALIGN(pax_task_size - gap - mmap_rnd());
21415 + }
21416 +
21417 + /*
21418 + * Bottom-up (legacy) layout on X86_32 did not support randomization, X86_64
21419 + * does, but not when emulating X86_32
21420 + */
21421 +-static unsigned long mmap_legacy_base(void)
21422 ++static unsigned long mmap_legacy_base(struct mm_struct *mm)
21423 + {
21424 +- if (mmap_is_ia32())
21425 ++ if (mmap_is_ia32()) {
21426 ++
21427 ++#ifdef CONFIG_PAX_SEGMEXEC
21428 ++ if (mm->pax_flags & MF_PAX_SEGMEXEC)
21429 ++ return SEGMEXEC_TASK_UNMAPPED_BASE;
21430 ++ else
21431 ++#endif
21432 ++
21433 + return TASK_UNMAPPED_BASE;
21434 +- else
21435 ++ } else
21436 + return TASK_UNMAPPED_BASE + mmap_rnd();
21437 + }
21438 +
21439 +@@ -125,11 +138,23 @@ static unsigned long mmap_legacy_base(vo
21440 + void arch_pick_mmap_layout(struct mm_struct *mm)
21441 + {
21442 + if (mmap_is_legacy()) {
21443 +- mm->mmap_base = mmap_legacy_base();
21444 ++ mm->mmap_base = mmap_legacy_base(mm);
21445 ++
21446 ++#ifdef CONFIG_PAX_RANDMMAP
21447 ++ if (mm->pax_flags & MF_PAX_RANDMMAP)
21448 ++ mm->mmap_base += mm->delta_mmap;
21449 ++#endif
21450 ++
21451 + mm->get_unmapped_area = arch_get_unmapped_area;
21452 + mm->unmap_area = arch_unmap_area;
21453 + } else {
21454 +- mm->mmap_base = mmap_base();
21455 ++ mm->mmap_base = mmap_base(mm);
21456 ++
21457 ++#ifdef CONFIG_PAX_RANDMMAP
21458 ++ if (mm->pax_flags & MF_PAX_RANDMMAP)
21459 ++ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
21460 ++#endif
21461 ++
21462 + mm->get_unmapped_area = arch_get_unmapped_area_topdown;
21463 + mm->unmap_area = arch_unmap_area_topdown;
21464 + }
21465 +diff -urNp linux-3.1.1/arch/x86/mm/mmio-mod.c linux-3.1.1/arch/x86/mm/mmio-mod.c
21466 +--- linux-3.1.1/arch/x86/mm/mmio-mod.c 2011-11-11 15:19:27.000000000 -0500
21467 ++++ linux-3.1.1/arch/x86/mm/mmio-mod.c 2011-11-16 18:39:07.000000000 -0500
21468 +@@ -195,7 +195,7 @@ static void pre(struct kmmio_probe *p, s
21469 + break;
21470 + default:
21471 + {
21472 +- unsigned char *ip = (unsigned char *)instptr;
21473 ++ unsigned char *ip = (unsigned char *)ktla_ktva(instptr);
21474 + my_trace->opcode = MMIO_UNKNOWN_OP;
21475 + my_trace->width = 0;
21476 + my_trace->value = (*ip) << 16 | *(ip + 1) << 8 |
21477 +@@ -235,7 +235,7 @@ static void post(struct kmmio_probe *p,
21478 + static void ioremap_trace_core(resource_size_t offset, unsigned long size,
21479 + void __iomem *addr)
21480 + {
21481 +- static atomic_t next_id;
21482 ++ static atomic_unchecked_t next_id;
21483 + struct remap_trace *trace = kmalloc(sizeof(*trace), GFP_KERNEL);
21484 + /* These are page-unaligned. */
21485 + struct mmiotrace_map map = {
21486 +@@ -259,7 +259,7 @@ static void ioremap_trace_core(resource_
21487 + .private = trace
21488 + },
21489 + .phys = offset,
21490 +- .id = atomic_inc_return(&next_id)
21491 ++ .id = atomic_inc_return_unchecked(&next_id)
21492 + };
21493 + map.map_id = trace->id;
21494 +
21495 +diff -urNp linux-3.1.1/arch/x86/mm/pageattr.c linux-3.1.1/arch/x86/mm/pageattr.c
21496 +--- linux-3.1.1/arch/x86/mm/pageattr.c 2011-11-11 15:19:27.000000000 -0500
21497 ++++ linux-3.1.1/arch/x86/mm/pageattr.c 2011-11-16 18:39:07.000000000 -0500
21498 +@@ -261,7 +261,7 @@ static inline pgprot_t static_protection
21499 + */
21500 + #ifdef CONFIG_PCI_BIOS
21501 + if (pcibios_enabled && within(pfn, BIOS_BEGIN >> PAGE_SHIFT, BIOS_END >> PAGE_SHIFT))
21502 +- pgprot_val(forbidden) |= _PAGE_NX;
21503 ++ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
21504 + #endif
21505 +
21506 + /*
21507 +@@ -269,9 +269,10 @@ static inline pgprot_t static_protection
21508 + * Does not cover __inittext since that is gone later on. On
21509 + * 64bit we do not enforce !NX on the low mapping
21510 + */
21511 +- if (within(address, (unsigned long)_text, (unsigned long)_etext))
21512 +- pgprot_val(forbidden) |= _PAGE_NX;
21513 ++ if (within(address, ktla_ktva((unsigned long)_text), ktla_ktva((unsigned long)_etext)))
21514 ++ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
21515 +
21516 ++#ifdef CONFIG_DEBUG_RODATA
21517 + /*
21518 + * The .rodata section needs to be read-only. Using the pfn
21519 + * catches all aliases.
21520 +@@ -279,6 +280,7 @@ static inline pgprot_t static_protection
21521 + if (within(pfn, __pa((unsigned long)__start_rodata) >> PAGE_SHIFT,
21522 + __pa((unsigned long)__end_rodata) >> PAGE_SHIFT))
21523 + pgprot_val(forbidden) |= _PAGE_RW;
21524 ++#endif
21525 +
21526 + #if defined(CONFIG_X86_64) && defined(CONFIG_DEBUG_RODATA)
21527 + /*
21528 +@@ -317,6 +319,13 @@ static inline pgprot_t static_protection
21529 + }
21530 + #endif
21531 +
21532 ++#ifdef CONFIG_PAX_KERNEXEC
21533 ++ if (within(pfn, __pa((unsigned long)&_text), __pa((unsigned long)&_sdata))) {
21534 ++ pgprot_val(forbidden) |= _PAGE_RW;
21535 ++ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
21536 ++ }
21537 ++#endif
21538 ++
21539 + prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden));
21540 +
21541 + return prot;
21542 +@@ -369,23 +378,37 @@ EXPORT_SYMBOL_GPL(lookup_address);
21543 + static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
21544 + {
21545 + /* change init_mm */
21546 ++ pax_open_kernel();
21547 + set_pte_atomic(kpte, pte);
21548 ++
21549 + #ifdef CONFIG_X86_32
21550 + if (!SHARED_KERNEL_PMD) {
21551 ++
21552 ++#ifdef CONFIG_PAX_PER_CPU_PGD
21553 ++ unsigned long cpu;
21554 ++#else
21555 + struct page *page;
21556 ++#endif
21557 +
21558 ++#ifdef CONFIG_PAX_PER_CPU_PGD
21559 ++ for (cpu = 0; cpu < NR_CPUS; ++cpu) {
21560 ++ pgd_t *pgd = get_cpu_pgd(cpu);
21561 ++#else
21562 + list_for_each_entry(page, &pgd_list, lru) {
21563 +- pgd_t *pgd;
21564 ++ pgd_t *pgd = (pgd_t *)page_address(page);
21565 ++#endif
21566 ++
21567 + pud_t *pud;
21568 + pmd_t *pmd;
21569 +
21570 +- pgd = (pgd_t *)page_address(page) + pgd_index(address);
21571 ++ pgd += pgd_index(address);
21572 + pud = pud_offset(pgd, address);
21573 + pmd = pmd_offset(pud, address);
21574 + set_pte_atomic((pte_t *)pmd, pte);
21575 + }
21576 + }
21577 + #endif
21578 ++ pax_close_kernel();
21579 + }
21580 +
21581 + static int
21582 +diff -urNp linux-3.1.1/arch/x86/mm/pageattr-test.c linux-3.1.1/arch/x86/mm/pageattr-test.c
21583 +--- linux-3.1.1/arch/x86/mm/pageattr-test.c 2011-11-11 15:19:27.000000000 -0500
21584 ++++ linux-3.1.1/arch/x86/mm/pageattr-test.c 2011-11-16 18:39:07.000000000 -0500
21585 +@@ -36,7 +36,7 @@ enum {
21586 +
21587 + static int pte_testbit(pte_t pte)
21588 + {
21589 +- return pte_flags(pte) & _PAGE_UNUSED1;
21590 ++ return pte_flags(pte) & _PAGE_CPA_TEST;
21591 + }
21592 +
21593 + struct split_state {
21594 +diff -urNp linux-3.1.1/arch/x86/mm/pat.c linux-3.1.1/arch/x86/mm/pat.c
21595 +--- linux-3.1.1/arch/x86/mm/pat.c 2011-11-11 15:19:27.000000000 -0500
21596 ++++ linux-3.1.1/arch/x86/mm/pat.c 2011-11-16 18:39:07.000000000 -0500
21597 +@@ -361,7 +361,7 @@ int free_memtype(u64 start, u64 end)
21598 +
21599 + if (!entry) {
21600 + printk(KERN_INFO "%s:%d freeing invalid memtype %Lx-%Lx\n",
21601 +- current->comm, current->pid, start, end);
21602 ++ current->comm, task_pid_nr(current), start, end);
21603 + return -EINVAL;
21604 + }
21605 +
21606 +@@ -492,8 +492,8 @@ static inline int range_is_allowed(unsig
21607 + while (cursor < to) {
21608 + if (!devmem_is_allowed(pfn)) {
21609 + printk(KERN_INFO
21610 +- "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
21611 +- current->comm, from, to);
21612 ++ "Program %s tried to access /dev/mem between %Lx->%Lx (%Lx).\n",
21613 ++ current->comm, from, to, cursor);
21614 + return 0;
21615 + }
21616 + cursor += PAGE_SIZE;
21617 +@@ -557,7 +557,7 @@ int kernel_map_sync_memtype(u64 base, un
21618 + printk(KERN_INFO
21619 + "%s:%d ioremap_change_attr failed %s "
21620 + "for %Lx-%Lx\n",
21621 +- current->comm, current->pid,
21622 ++ current->comm, task_pid_nr(current),
21623 + cattr_name(flags),
21624 + base, (unsigned long long)(base + size));
21625 + return -EINVAL;
21626 +@@ -593,7 +593,7 @@ static int reserve_pfn_range(u64 paddr,
21627 + if (want_flags != flags) {
21628 + printk(KERN_WARNING
21629 + "%s:%d map pfn RAM range req %s for %Lx-%Lx, got %s\n",
21630 +- current->comm, current->pid,
21631 ++ current->comm, task_pid_nr(current),
21632 + cattr_name(want_flags),
21633 + (unsigned long long)paddr,
21634 + (unsigned long long)(paddr + size),
21635 +@@ -615,7 +615,7 @@ static int reserve_pfn_range(u64 paddr,
21636 + free_memtype(paddr, paddr + size);
21637 + printk(KERN_ERR "%s:%d map pfn expected mapping type %s"
21638 + " for %Lx-%Lx, got %s\n",
21639 +- current->comm, current->pid,
21640 ++ current->comm, task_pid_nr(current),
21641 + cattr_name(want_flags),
21642 + (unsigned long long)paddr,
21643 + (unsigned long long)(paddr + size),
21644 +diff -urNp linux-3.1.1/arch/x86/mm/pf_in.c linux-3.1.1/arch/x86/mm/pf_in.c
21645 +--- linux-3.1.1/arch/x86/mm/pf_in.c 2011-11-11 15:19:27.000000000 -0500
21646 ++++ linux-3.1.1/arch/x86/mm/pf_in.c 2011-11-16 18:39:07.000000000 -0500
21647 +@@ -148,7 +148,7 @@ enum reason_type get_ins_type(unsigned l
21648 + int i;
21649 + enum reason_type rv = OTHERS;
21650 +
21651 +- p = (unsigned char *)ins_addr;
21652 ++ p = (unsigned char *)ktla_ktva(ins_addr);
21653 + p += skip_prefix(p, &prf);
21654 + p += get_opcode(p, &opcode);
21655 +
21656 +@@ -168,7 +168,7 @@ static unsigned int get_ins_reg_width(un
21657 + struct prefix_bits prf;
21658 + int i;
21659 +
21660 +- p = (unsigned char *)ins_addr;
21661 ++ p = (unsigned char *)ktla_ktva(ins_addr);
21662 + p += skip_prefix(p, &prf);
21663 + p += get_opcode(p, &opcode);
21664 +
21665 +@@ -191,7 +191,7 @@ unsigned int get_ins_mem_width(unsigned
21666 + struct prefix_bits prf;
21667 + int i;
21668 +
21669 +- p = (unsigned char *)ins_addr;
21670 ++ p = (unsigned char *)ktla_ktva(ins_addr);
21671 + p += skip_prefix(p, &prf);
21672 + p += get_opcode(p, &opcode);
21673 +
21674 +@@ -415,7 +415,7 @@ unsigned long get_ins_reg_val(unsigned l
21675 + struct prefix_bits prf;
21676 + int i;
21677 +
21678 +- p = (unsigned char *)ins_addr;
21679 ++ p = (unsigned char *)ktla_ktva(ins_addr);
21680 + p += skip_prefix(p, &prf);
21681 + p += get_opcode(p, &opcode);
21682 + for (i = 0; i < ARRAY_SIZE(reg_rop); i++)
21683 +@@ -470,7 +470,7 @@ unsigned long get_ins_imm_val(unsigned l
21684 + struct prefix_bits prf;
21685 + int i;
21686 +
21687 +- p = (unsigned char *)ins_addr;
21688 ++ p = (unsigned char *)ktla_ktva(ins_addr);
21689 + p += skip_prefix(p, &prf);
21690 + p += get_opcode(p, &opcode);
21691 + for (i = 0; i < ARRAY_SIZE(imm_wop); i++)
21692 +diff -urNp linux-3.1.1/arch/x86/mm/pgtable_32.c linux-3.1.1/arch/x86/mm/pgtable_32.c
21693 +--- linux-3.1.1/arch/x86/mm/pgtable_32.c 2011-11-11 15:19:27.000000000 -0500
21694 ++++ linux-3.1.1/arch/x86/mm/pgtable_32.c 2011-11-16 18:39:07.000000000 -0500
21695 +@@ -48,10 +48,13 @@ void set_pte_vaddr(unsigned long vaddr,
21696 + return;
21697 + }
21698 + pte = pte_offset_kernel(pmd, vaddr);
21699 ++
21700 ++ pax_open_kernel();
21701 + if (pte_val(pteval))
21702 + set_pte_at(&init_mm, vaddr, pte, pteval);
21703 + else
21704 + pte_clear(&init_mm, vaddr, pte);
21705 ++ pax_close_kernel();
21706 +
21707 + /*
21708 + * It's enough to flush this one mapping.
21709 +diff -urNp linux-3.1.1/arch/x86/mm/pgtable.c linux-3.1.1/arch/x86/mm/pgtable.c
21710 +--- linux-3.1.1/arch/x86/mm/pgtable.c 2011-11-11 15:19:27.000000000 -0500
21711 ++++ linux-3.1.1/arch/x86/mm/pgtable.c 2011-11-16 18:39:07.000000000 -0500
21712 +@@ -84,10 +84,52 @@ static inline void pgd_list_del(pgd_t *p
21713 + list_del(&page->lru);
21714 + }
21715 +
21716 +-#define UNSHARED_PTRS_PER_PGD \
21717 +- (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
21718 ++#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
21719 ++pgdval_t clone_pgd_mask __read_only = ~_PAGE_PRESENT;
21720 +
21721 ++void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count)
21722 ++{
21723 ++ while (count--)
21724 ++ *dst++ = __pgd((pgd_val(*src++) | (_PAGE_NX & __supported_pte_mask)) & ~_PAGE_USER);
21725 ++}
21726 ++#endif
21727 ++
21728 ++#ifdef CONFIG_PAX_PER_CPU_PGD
21729 ++void __clone_user_pgds(pgd_t *dst, const pgd_t *src, int count)
21730 ++{
21731 ++ while (count--)
21732 ++
21733 ++#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
21734 ++ *dst++ = __pgd(pgd_val(*src++) & clone_pgd_mask);
21735 ++#else
21736 ++ *dst++ = *src++;
21737 ++#endif
21738 +
21739 ++}
21740 ++#endif
21741 ++
21742 ++#ifdef CONFIG_X86_64
21743 ++#define pxd_t pud_t
21744 ++#define pyd_t pgd_t
21745 ++#define paravirt_release_pxd(pfn) paravirt_release_pud(pfn)
21746 ++#define pxd_free(mm, pud) pud_free((mm), (pud))
21747 ++#define pyd_populate(mm, pgd, pud) pgd_populate((mm), (pgd), (pud))
21748 ++#define pyd_offset(mm ,address) pgd_offset((mm), (address))
21749 ++#define PYD_SIZE PGDIR_SIZE
21750 ++#else
21751 ++#define pxd_t pmd_t
21752 ++#define pyd_t pud_t
21753 ++#define paravirt_release_pxd(pfn) paravirt_release_pmd(pfn)
21754 ++#define pxd_free(mm, pud) pmd_free((mm), (pud))
21755 ++#define pyd_populate(mm, pgd, pud) pud_populate((mm), (pgd), (pud))
21756 ++#define pyd_offset(mm ,address) pud_offset((mm), (address))
21757 ++#define PYD_SIZE PUD_SIZE
21758 ++#endif
21759 ++
21760 ++#ifdef CONFIG_PAX_PER_CPU_PGD
21761 ++static inline void pgd_ctor(struct mm_struct *mm, pgd_t *pgd) {}
21762 ++static inline void pgd_dtor(pgd_t *pgd) {}
21763 ++#else
21764 + static void pgd_set_mm(pgd_t *pgd, struct mm_struct *mm)
21765 + {
21766 + BUILD_BUG_ON(sizeof(virt_to_page(pgd)->index) < sizeof(mm));
21767 +@@ -128,6 +170,7 @@ static void pgd_dtor(pgd_t *pgd)
21768 + pgd_list_del(pgd);
21769 + spin_unlock(&pgd_lock);
21770 + }
21771 ++#endif
21772 +
21773 + /*
21774 + * List of all pgd's needed for non-PAE so it can invalidate entries
21775 +@@ -140,7 +183,7 @@ static void pgd_dtor(pgd_t *pgd)
21776 + * -- wli
21777 + */
21778 +
21779 +-#ifdef CONFIG_X86_PAE
21780 ++#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
21781 + /*
21782 + * In PAE mode, we need to do a cr3 reload (=tlb flush) when
21783 + * updating the top-level pagetable entries to guarantee the
21784 +@@ -152,7 +195,7 @@ static void pgd_dtor(pgd_t *pgd)
21785 + * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate
21786 + * and initialize the kernel pmds here.
21787 + */
21788 +-#define PREALLOCATED_PMDS UNSHARED_PTRS_PER_PGD
21789 ++#define PREALLOCATED_PXDS (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
21790 +
21791 + void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
21792 + {
21793 +@@ -170,36 +213,38 @@ void pud_populate(struct mm_struct *mm,
21794 + */
21795 + flush_tlb_mm(mm);
21796 + }
21797 ++#elif defined(CONFIG_X86_64) && defined(CONFIG_PAX_PER_CPU_PGD)
21798 ++#define PREALLOCATED_PXDS USER_PGD_PTRS
21799 + #else /* !CONFIG_X86_PAE */
21800 +
21801 + /* No need to prepopulate any pagetable entries in non-PAE modes. */
21802 +-#define PREALLOCATED_PMDS 0
21803 ++#define PREALLOCATED_PXDS 0
21804 +
21805 + #endif /* CONFIG_X86_PAE */
21806 +
21807 +-static void free_pmds(pmd_t *pmds[])
21808 ++static void free_pxds(pxd_t *pxds[])
21809 + {
21810 + int i;
21811 +
21812 +- for(i = 0; i < PREALLOCATED_PMDS; i++)
21813 +- if (pmds[i])
21814 +- free_page((unsigned long)pmds[i]);
21815 ++ for(i = 0; i < PREALLOCATED_PXDS; i++)
21816 ++ if (pxds[i])
21817 ++ free_page((unsigned long)pxds[i]);
21818 + }
21819 +
21820 +-static int preallocate_pmds(pmd_t *pmds[])
21821 ++static int preallocate_pxds(pxd_t *pxds[])
21822 + {
21823 + int i;
21824 + bool failed = false;
21825 +
21826 +- for(i = 0; i < PREALLOCATED_PMDS; i++) {
21827 +- pmd_t *pmd = (pmd_t *)__get_free_page(PGALLOC_GFP);
21828 +- if (pmd == NULL)
21829 ++ for(i = 0; i < PREALLOCATED_PXDS; i++) {
21830 ++ pxd_t *pxd = (pxd_t *)__get_free_page(PGALLOC_GFP);
21831 ++ if (pxd == NULL)
21832 + failed = true;
21833 +- pmds[i] = pmd;
21834 ++ pxds[i] = pxd;
21835 + }
21836 +
21837 + if (failed) {
21838 +- free_pmds(pmds);
21839 ++ free_pxds(pxds);
21840 + return -ENOMEM;
21841 + }
21842 +
21843 +@@ -212,51 +257,55 @@ static int preallocate_pmds(pmd_t *pmds[
21844 + * preallocate which never got a corresponding vma will need to be
21845 + * freed manually.
21846 + */
21847 +-static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
21848 ++static void pgd_mop_up_pxds(struct mm_struct *mm, pgd_t *pgdp)
21849 + {
21850 + int i;
21851 +
21852 +- for(i = 0; i < PREALLOCATED_PMDS; i++) {
21853 ++ for(i = 0; i < PREALLOCATED_PXDS; i++) {
21854 + pgd_t pgd = pgdp[i];
21855 +
21856 + if (pgd_val(pgd) != 0) {
21857 +- pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);
21858 ++ pxd_t *pxd = (pxd_t *)pgd_page_vaddr(pgd);
21859 +
21860 +- pgdp[i] = native_make_pgd(0);
21861 ++ set_pgd(pgdp + i, native_make_pgd(0));
21862 +
21863 +- paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT);
21864 +- pmd_free(mm, pmd);
21865 ++ paravirt_release_pxd(pgd_val(pgd) >> PAGE_SHIFT);
21866 ++ pxd_free(mm, pxd);
21867 + }
21868 + }
21869 + }
21870 +
21871 +-static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[])
21872 ++static void pgd_prepopulate_pxd(struct mm_struct *mm, pgd_t *pgd, pxd_t *pxds[])
21873 + {
21874 +- pud_t *pud;
21875 ++ pyd_t *pyd;
21876 + unsigned long addr;
21877 + int i;
21878 +
21879 +- if (PREALLOCATED_PMDS == 0) /* Work around gcc-3.4.x bug */
21880 ++ if (PREALLOCATED_PXDS == 0) /* Work around gcc-3.4.x bug */
21881 + return;
21882 +
21883 +- pud = pud_offset(pgd, 0);
21884 ++#ifdef CONFIG_X86_64
21885 ++ pyd = pyd_offset(mm, 0L);
21886 ++#else
21887 ++ pyd = pyd_offset(pgd, 0L);
21888 ++#endif
21889 +
21890 +- for (addr = i = 0; i < PREALLOCATED_PMDS;
21891 +- i++, pud++, addr += PUD_SIZE) {
21892 +- pmd_t *pmd = pmds[i];
21893 ++ for (addr = i = 0; i < PREALLOCATED_PXDS;
21894 ++ i++, pyd++, addr += PYD_SIZE) {
21895 ++ pxd_t *pxd = pxds[i];
21896 +
21897 + if (i >= KERNEL_PGD_BOUNDARY)
21898 +- memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
21899 +- sizeof(pmd_t) * PTRS_PER_PMD);
21900 ++ memcpy(pxd, (pxd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
21901 ++ sizeof(pxd_t) * PTRS_PER_PMD);
21902 +
21903 +- pud_populate(mm, pud, pmd);
21904 ++ pyd_populate(mm, pyd, pxd);
21905 + }
21906 + }
21907 +
21908 + pgd_t *pgd_alloc(struct mm_struct *mm)
21909 + {
21910 + pgd_t *pgd;
21911 +- pmd_t *pmds[PREALLOCATED_PMDS];
21912 ++ pxd_t *pxds[PREALLOCATED_PXDS];
21913 +
21914 + pgd = (pgd_t *)__get_free_page(PGALLOC_GFP);
21915 +
21916 +@@ -265,11 +314,11 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
21917 +
21918 + mm->pgd = pgd;
21919 +
21920 +- if (preallocate_pmds(pmds) != 0)
21921 ++ if (preallocate_pxds(pxds) != 0)
21922 + goto out_free_pgd;
21923 +
21924 + if (paravirt_pgd_alloc(mm) != 0)
21925 +- goto out_free_pmds;
21926 ++ goto out_free_pxds;
21927 +
21928 + /*
21929 + * Make sure that pre-populating the pmds is atomic with
21930 +@@ -279,14 +328,14 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
21931 + spin_lock(&pgd_lock);
21932 +
21933 + pgd_ctor(mm, pgd);
21934 +- pgd_prepopulate_pmd(mm, pgd, pmds);
21935 ++ pgd_prepopulate_pxd(mm, pgd, pxds);
21936 +
21937 + spin_unlock(&pgd_lock);
21938 +
21939 + return pgd;
21940 +
21941 +-out_free_pmds:
21942 +- free_pmds(pmds);
21943 ++out_free_pxds:
21944 ++ free_pxds(pxds);
21945 + out_free_pgd:
21946 + free_page((unsigned long)pgd);
21947 + out:
21948 +@@ -295,7 +344,7 @@ out:
21949 +
21950 + void pgd_free(struct mm_struct *mm, pgd_t *pgd)
21951 + {
21952 +- pgd_mop_up_pmds(mm, pgd);
21953 ++ pgd_mop_up_pxds(mm, pgd);
21954 + pgd_dtor(pgd);
21955 + paravirt_pgd_free(mm, pgd);
21956 + free_page((unsigned long)pgd);
21957 +diff -urNp linux-3.1.1/arch/x86/mm/setup_nx.c linux-3.1.1/arch/x86/mm/setup_nx.c
21958 +--- linux-3.1.1/arch/x86/mm/setup_nx.c 2011-11-11 15:19:27.000000000 -0500
21959 ++++ linux-3.1.1/arch/x86/mm/setup_nx.c 2011-11-16 18:39:07.000000000 -0500
21960 +@@ -5,8 +5,10 @@
21961 + #include <asm/pgtable.h>
21962 + #include <asm/proto.h>
21963 +
21964 ++#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
21965 + static int disable_nx __cpuinitdata;
21966 +
21967 ++#ifndef CONFIG_PAX_PAGEEXEC
21968 + /*
21969 + * noexec = on|off
21970 + *
21971 +@@ -28,12 +30,17 @@ static int __init noexec_setup(char *str
21972 + return 0;
21973 + }
21974 + early_param("noexec", noexec_setup);
21975 ++#endif
21976 ++
21977 ++#endif
21978 +
21979 + void __cpuinit x86_configure_nx(void)
21980 + {
21981 ++#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
21982 + if (cpu_has_nx && !disable_nx)
21983 + __supported_pte_mask |= _PAGE_NX;
21984 + else
21985 ++#endif
21986 + __supported_pte_mask &= ~_PAGE_NX;
21987 + }
21988 +
21989 +diff -urNp linux-3.1.1/arch/x86/mm/tlb.c linux-3.1.1/arch/x86/mm/tlb.c
21990 +--- linux-3.1.1/arch/x86/mm/tlb.c 2011-11-11 15:19:27.000000000 -0500
21991 ++++ linux-3.1.1/arch/x86/mm/tlb.c 2011-11-16 18:39:07.000000000 -0500
21992 +@@ -65,7 +65,11 @@ void leave_mm(int cpu)
21993 + BUG();
21994 + cpumask_clear_cpu(cpu,
21995 + mm_cpumask(percpu_read(cpu_tlbstate.active_mm)));
21996 ++
21997 ++#ifndef CONFIG_PAX_PER_CPU_PGD
21998 + load_cr3(swapper_pg_dir);
21999 ++#endif
22000 ++
22001 + }
22002 + EXPORT_SYMBOL_GPL(leave_mm);
22003 +
22004 +diff -urNp linux-3.1.1/arch/x86/net/bpf_jit_comp.c linux-3.1.1/arch/x86/net/bpf_jit_comp.c
22005 +--- linux-3.1.1/arch/x86/net/bpf_jit_comp.c 2011-11-11 15:19:27.000000000 -0500
22006 ++++ linux-3.1.1/arch/x86/net/bpf_jit_comp.c 2011-11-18 17:57:33.000000000 -0500
22007 +@@ -586,10 +586,12 @@ cond_branch: f_offset = addrs[i + filt
22008 + if (unlikely(proglen + ilen > oldproglen)) {
22009 + pr_err("bpb_jit_compile fatal error\n");
22010 + kfree(addrs);
22011 +- module_free(NULL, image);
22012 ++ module_free_exec(NULL, image);
22013 + return;
22014 + }
22015 ++ pax_open_kernel();
22016 + memcpy(image + proglen, temp, ilen);
22017 ++ pax_close_kernel();
22018 + }
22019 + proglen += ilen;
22020 + addrs[i] = proglen;
22021 +@@ -609,7 +611,7 @@ cond_branch: f_offset = addrs[i + filt
22022 + break;
22023 + }
22024 + if (proglen == oldproglen) {
22025 +- image = module_alloc(max_t(unsigned int,
22026 ++ image = module_alloc_exec(max_t(unsigned int,
22027 + proglen,
22028 + sizeof(struct work_struct)));
22029 + if (!image)
22030 +@@ -637,11 +639,11 @@ out:
22031 +
22032 + static void jit_free_defer(struct work_struct *arg)
22033 + {
22034 +- module_free(NULL, arg);
22035 ++ module_free_exec(NULL, arg);
22036 + }
22037 +
22038 + /* run from softirq, we must use a work_struct to call
22039 +- * module_free() from process context
22040 ++ * module_free_exec() from process context
22041 + */
22042 + void bpf_jit_free(struct sk_filter *fp)
22043 + {
22044 +diff -urNp linux-3.1.1/arch/x86/net/bpf_jit.S linux-3.1.1/arch/x86/net/bpf_jit.S
22045 +--- linux-3.1.1/arch/x86/net/bpf_jit.S 2011-11-11 15:19:27.000000000 -0500
22046 ++++ linux-3.1.1/arch/x86/net/bpf_jit.S 2011-11-16 18:39:07.000000000 -0500
22047 +@@ -9,6 +9,7 @@
22048 + */
22049 + #include <linux/linkage.h>
22050 + #include <asm/dwarf2.h>
22051 ++#include <asm/alternative-asm.h>
22052 +
22053 + /*
22054 + * Calling convention :
22055 +@@ -35,6 +36,7 @@ sk_load_word:
22056 + jle bpf_slow_path_word
22057 + mov (SKBDATA,%rsi),%eax
22058 + bswap %eax /* ntohl() */
22059 ++ pax_force_retaddr
22060 + ret
22061 +
22062 +
22063 +@@ -53,6 +55,7 @@ sk_load_half:
22064 + jle bpf_slow_path_half
22065 + movzwl (SKBDATA,%rsi),%eax
22066 + rol $8,%ax # ntohs()
22067 ++ pax_force_retaddr
22068 + ret
22069 +
22070 + sk_load_byte_ind:
22071 +@@ -66,6 +69,7 @@ sk_load_byte:
22072 + cmp %esi,%r9d /* if (offset >= hlen) goto bpf_slow_path_byte */
22073 + jle bpf_slow_path_byte
22074 + movzbl (SKBDATA,%rsi),%eax
22075 ++ pax_force_retaddr
22076 + ret
22077 +
22078 + /**
22079 +@@ -82,6 +86,7 @@ ENTRY(sk_load_byte_msh)
22080 + movzbl (SKBDATA,%rsi),%ebx
22081 + and $15,%bl
22082 + shl $2,%bl
22083 ++ pax_force_retaddr
22084 + ret
22085 + CFI_ENDPROC
22086 + ENDPROC(sk_load_byte_msh)
22087 +@@ -91,6 +96,7 @@ bpf_error:
22088 + xor %eax,%eax
22089 + mov -8(%rbp),%rbx
22090 + leaveq
22091 ++ pax_force_retaddr
22092 + ret
22093 +
22094 + /* rsi contains offset and can be scratched */
22095 +@@ -113,6 +119,7 @@ bpf_slow_path_word:
22096 + js bpf_error
22097 + mov -12(%rbp),%eax
22098 + bswap %eax
22099 ++ pax_force_retaddr
22100 + ret
22101 +
22102 + bpf_slow_path_half:
22103 +@@ -121,12 +128,14 @@ bpf_slow_path_half:
22104 + mov -12(%rbp),%ax
22105 + rol $8,%ax
22106 + movzwl %ax,%eax
22107 ++ pax_force_retaddr
22108 + ret
22109 +
22110 + bpf_slow_path_byte:
22111 + bpf_slow_path_common(1)
22112 + js bpf_error
22113 + movzbl -12(%rbp),%eax
22114 ++ pax_force_retaddr
22115 + ret
22116 +
22117 + bpf_slow_path_byte_msh:
22118 +@@ -137,4 +146,5 @@ bpf_slow_path_byte_msh:
22119 + and $15,%al
22120 + shl $2,%al
22121 + xchg %eax,%ebx
22122 ++ pax_force_retaddr
22123 + ret
22124 +diff -urNp linux-3.1.1/arch/x86/oprofile/backtrace.c linux-3.1.1/arch/x86/oprofile/backtrace.c
22125 +--- linux-3.1.1/arch/x86/oprofile/backtrace.c 2011-11-11 15:19:27.000000000 -0500
22126 ++++ linux-3.1.1/arch/x86/oprofile/backtrace.c 2011-11-16 18:39:07.000000000 -0500
22127 +@@ -46,11 +46,11 @@ dump_user_backtrace_32(struct stack_fram
22128 + struct stack_frame_ia32 *fp;
22129 + unsigned long bytes;
22130 +
22131 +- bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead));
22132 ++ bytes = copy_from_user_nmi(bufhead, (const char __force_user *)head, sizeof(bufhead));
22133 + if (bytes != sizeof(bufhead))
22134 + return NULL;
22135 +
22136 +- fp = (struct stack_frame_ia32 *) compat_ptr(bufhead[0].next_frame);
22137 ++ fp = (struct stack_frame_ia32 __force_kernel *) compat_ptr(bufhead[0].next_frame);
22138 +
22139 + oprofile_add_trace(bufhead[0].return_address);
22140 +
22141 +@@ -92,7 +92,7 @@ static struct stack_frame *dump_user_bac
22142 + struct stack_frame bufhead[2];
22143 + unsigned long bytes;
22144 +
22145 +- bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead));
22146 ++ bytes = copy_from_user_nmi(bufhead, (const char __force_user *)head, sizeof(bufhead));
22147 + if (bytes != sizeof(bufhead))
22148 + return NULL;
22149 +
22150 +@@ -111,7 +111,7 @@ x86_backtrace(struct pt_regs * const reg
22151 + {
22152 + struct stack_frame *head = (struct stack_frame *)frame_pointer(regs);
22153 +
22154 +- if (!user_mode_vm(regs)) {
22155 ++ if (!user_mode(regs)) {
22156 + unsigned long stack = kernel_stack_pointer(regs);
22157 + if (depth)
22158 + dump_trace(NULL, regs, (unsigned long *)stack, 0,
22159 +diff -urNp linux-3.1.1/arch/x86/pci/mrst.c linux-3.1.1/arch/x86/pci/mrst.c
22160 +--- linux-3.1.1/arch/x86/pci/mrst.c 2011-11-11 15:19:27.000000000 -0500
22161 ++++ linux-3.1.1/arch/x86/pci/mrst.c 2011-11-16 18:39:07.000000000 -0500
22162 +@@ -234,7 +234,9 @@ int __init pci_mrst_init(void)
22163 + printk(KERN_INFO "Moorestown platform detected, using MRST PCI ops\n");
22164 + pci_mmcfg_late_init();
22165 + pcibios_enable_irq = mrst_pci_irq_enable;
22166 +- pci_root_ops = pci_mrst_ops;
22167 ++ pax_open_kernel();
22168 ++ memcpy((void *)&pci_root_ops, &pci_mrst_ops, sizeof(pci_mrst_ops));
22169 ++ pax_close_kernel();
22170 + /* Continue with standard init */
22171 + return 1;
22172 + }
22173 +diff -urNp linux-3.1.1/arch/x86/pci/pcbios.c linux-3.1.1/arch/x86/pci/pcbios.c
22174 +--- linux-3.1.1/arch/x86/pci/pcbios.c 2011-11-11 15:19:27.000000000 -0500
22175 ++++ linux-3.1.1/arch/x86/pci/pcbios.c 2011-11-16 18:39:07.000000000 -0500
22176 +@@ -79,50 +79,93 @@ union bios32 {
22177 + static struct {
22178 + unsigned long address;
22179 + unsigned short segment;
22180 +-} bios32_indirect = { 0, __KERNEL_CS };
22181 ++} bios32_indirect __read_only = { 0, __PCIBIOS_CS };
22182 +
22183 + /*
22184 + * Returns the entry point for the given service, NULL on error
22185 + */
22186 +
22187 +-static unsigned long bios32_service(unsigned long service)
22188 ++static unsigned long __devinit bios32_service(unsigned long service)
22189 + {
22190 + unsigned char return_code; /* %al */
22191 + unsigned long address; /* %ebx */
22192 + unsigned long length; /* %ecx */
22193 + unsigned long entry; /* %edx */
22194 + unsigned long flags;
22195 ++ struct desc_struct d, *gdt;
22196 +
22197 + local_irq_save(flags);
22198 +- __asm__("lcall *(%%edi); cld"
22199 ++
22200 ++ gdt = get_cpu_gdt_table(smp_processor_id());
22201 ++
22202 ++ pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x9B, 0xC);
22203 ++ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
22204 ++ pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x93, 0xC);
22205 ++ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
22206 ++
22207 ++ __asm__("movw %w7, %%ds; lcall *(%%edi); push %%ss; pop %%ds; cld"
22208 + : "=a" (return_code),
22209 + "=b" (address),
22210 + "=c" (length),
22211 + "=d" (entry)
22212 + : "0" (service),
22213 + "1" (0),
22214 +- "D" (&bios32_indirect));
22215 ++ "D" (&bios32_indirect),
22216 ++ "r"(__PCIBIOS_DS)
22217 ++ : "memory");
22218 ++
22219 ++ pax_open_kernel();
22220 ++ gdt[GDT_ENTRY_PCIBIOS_CS].a = 0;
22221 ++ gdt[GDT_ENTRY_PCIBIOS_CS].b = 0;
22222 ++ gdt[GDT_ENTRY_PCIBIOS_DS].a = 0;
22223 ++ gdt[GDT_ENTRY_PCIBIOS_DS].b = 0;
22224 ++ pax_close_kernel();
22225 ++
22226 + local_irq_restore(flags);
22227 +
22228 + switch (return_code) {
22229 +- case 0:
22230 +- return address + entry;
22231 +- case 0x80: /* Not present */
22232 +- printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
22233 +- return 0;
22234 +- default: /* Shouldn't happen */
22235 +- printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
22236 +- service, return_code);
22237 ++ case 0: {
22238 ++ int cpu;
22239 ++ unsigned char flags;
22240 ++
22241 ++ printk(KERN_INFO "bios32_service: base:%08lx length:%08lx entry:%08lx\n", address, length, entry);
22242 ++ if (address >= 0xFFFF0 || length > 0x100000 - address || length <= entry) {
22243 ++ printk(KERN_WARNING "bios32_service: not valid\n");
22244 + return 0;
22245 ++ }
22246 ++ address = address + PAGE_OFFSET;
22247 ++ length += 16UL; /* some BIOSs underreport this... */
22248 ++ flags = 4;
22249 ++ if (length >= 64*1024*1024) {
22250 ++ length >>= PAGE_SHIFT;
22251 ++ flags |= 8;
22252 ++ }
22253 ++
22254 ++ for (cpu = 0; cpu < NR_CPUS; cpu++) {
22255 ++ gdt = get_cpu_gdt_table(cpu);
22256 ++ pack_descriptor(&d, address, length, 0x9b, flags);
22257 ++ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
22258 ++ pack_descriptor(&d, address, length, 0x93, flags);
22259 ++ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
22260 ++ }
22261 ++ return entry;
22262 ++ }
22263 ++ case 0x80: /* Not present */
22264 ++ printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
22265 ++ return 0;
22266 ++ default: /* Shouldn't happen */
22267 ++ printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
22268 ++ service, return_code);
22269 ++ return 0;
22270 + }
22271 + }
22272 +
22273 + static struct {
22274 + unsigned long address;
22275 + unsigned short segment;
22276 +-} pci_indirect = { 0, __KERNEL_CS };
22277 ++} pci_indirect __read_only = { 0, __PCIBIOS_CS };
22278 +
22279 +-static int pci_bios_present;
22280 ++static int pci_bios_present __read_only;
22281 +
22282 + static int __devinit check_pcibios(void)
22283 + {
22284 +@@ -131,11 +174,13 @@ static int __devinit check_pcibios(void)
22285 + unsigned long flags, pcibios_entry;
22286 +
22287 + if ((pcibios_entry = bios32_service(PCI_SERVICE))) {
22288 +- pci_indirect.address = pcibios_entry + PAGE_OFFSET;
22289 ++ pci_indirect.address = pcibios_entry;
22290 +
22291 + local_irq_save(flags);
22292 +- __asm__(
22293 +- "lcall *(%%edi); cld\n\t"
22294 ++ __asm__("movw %w6, %%ds\n\t"
22295 ++ "lcall *%%ss:(%%edi); cld\n\t"
22296 ++ "push %%ss\n\t"
22297 ++ "pop %%ds\n\t"
22298 + "jc 1f\n\t"
22299 + "xor %%ah, %%ah\n"
22300 + "1:"
22301 +@@ -144,7 +189,8 @@ static int __devinit check_pcibios(void)
22302 + "=b" (ebx),
22303 + "=c" (ecx)
22304 + : "1" (PCIBIOS_PCI_BIOS_PRESENT),
22305 +- "D" (&pci_indirect)
22306 ++ "D" (&pci_indirect),
22307 ++ "r" (__PCIBIOS_DS)
22308 + : "memory");
22309 + local_irq_restore(flags);
22310 +
22311 +@@ -189,7 +235,10 @@ static int pci_bios_read(unsigned int se
22312 +
22313 + switch (len) {
22314 + case 1:
22315 +- __asm__("lcall *(%%esi); cld\n\t"
22316 ++ __asm__("movw %w6, %%ds\n\t"
22317 ++ "lcall *%%ss:(%%esi); cld\n\t"
22318 ++ "push %%ss\n\t"
22319 ++ "pop %%ds\n\t"
22320 + "jc 1f\n\t"
22321 + "xor %%ah, %%ah\n"
22322 + "1:"
22323 +@@ -198,7 +247,8 @@ static int pci_bios_read(unsigned int se
22324 + : "1" (PCIBIOS_READ_CONFIG_BYTE),
22325 + "b" (bx),
22326 + "D" ((long)reg),
22327 +- "S" (&pci_indirect));
22328 ++ "S" (&pci_indirect),
22329 ++ "r" (__PCIBIOS_DS));
22330 + /*
22331 + * Zero-extend the result beyond 8 bits, do not trust the
22332 + * BIOS having done it:
22333 +@@ -206,7 +256,10 @@ static int pci_bios_read(unsigned int se
22334 + *value &= 0xff;
22335 + break;
22336 + case 2:
22337 +- __asm__("lcall *(%%esi); cld\n\t"
22338 ++ __asm__("movw %w6, %%ds\n\t"
22339 ++ "lcall *%%ss:(%%esi); cld\n\t"
22340 ++ "push %%ss\n\t"
22341 ++ "pop %%ds\n\t"
22342 + "jc 1f\n\t"
22343 + "xor %%ah, %%ah\n"
22344 + "1:"
22345 +@@ -215,7 +268,8 @@ static int pci_bios_read(unsigned int se
22346 + : "1" (PCIBIOS_READ_CONFIG_WORD),
22347 + "b" (bx),
22348 + "D" ((long)reg),
22349 +- "S" (&pci_indirect));
22350 ++ "S" (&pci_indirect),
22351 ++ "r" (__PCIBIOS_DS));
22352 + /*
22353 + * Zero-extend the result beyond 16 bits, do not trust the
22354 + * BIOS having done it:
22355 +@@ -223,7 +277,10 @@ static int pci_bios_read(unsigned int se
22356 + *value &= 0xffff;
22357 + break;
22358 + case 4:
22359 +- __asm__("lcall *(%%esi); cld\n\t"
22360 ++ __asm__("movw %w6, %%ds\n\t"
22361 ++ "lcall *%%ss:(%%esi); cld\n\t"
22362 ++ "push %%ss\n\t"
22363 ++ "pop %%ds\n\t"
22364 + "jc 1f\n\t"
22365 + "xor %%ah, %%ah\n"
22366 + "1:"
22367 +@@ -232,7 +289,8 @@ static int pci_bios_read(unsigned int se
22368 + : "1" (PCIBIOS_READ_CONFIG_DWORD),
22369 + "b" (bx),
22370 + "D" ((long)reg),
22371 +- "S" (&pci_indirect));
22372 ++ "S" (&pci_indirect),
22373 ++ "r" (__PCIBIOS_DS));
22374 + break;
22375 + }
22376 +
22377 +@@ -256,7 +314,10 @@ static int pci_bios_write(unsigned int s
22378 +
22379 + switch (len) {
22380 + case 1:
22381 +- __asm__("lcall *(%%esi); cld\n\t"
22382 ++ __asm__("movw %w6, %%ds\n\t"
22383 ++ "lcall *%%ss:(%%esi); cld\n\t"
22384 ++ "push %%ss\n\t"
22385 ++ "pop %%ds\n\t"
22386 + "jc 1f\n\t"
22387 + "xor %%ah, %%ah\n"
22388 + "1:"
22389 +@@ -265,10 +326,14 @@ static int pci_bios_write(unsigned int s
22390 + "c" (value),
22391 + "b" (bx),
22392 + "D" ((long)reg),
22393 +- "S" (&pci_indirect));
22394 ++ "S" (&pci_indirect),
22395 ++ "r" (__PCIBIOS_DS));
22396 + break;
22397 + case 2:
22398 +- __asm__("lcall *(%%esi); cld\n\t"
22399 ++ __asm__("movw %w6, %%ds\n\t"
22400 ++ "lcall *%%ss:(%%esi); cld\n\t"
22401 ++ "push %%ss\n\t"
22402 ++ "pop %%ds\n\t"
22403 + "jc 1f\n\t"
22404 + "xor %%ah, %%ah\n"
22405 + "1:"
22406 +@@ -277,10 +342,14 @@ static int pci_bios_write(unsigned int s
22407 + "c" (value),
22408 + "b" (bx),
22409 + "D" ((long)reg),
22410 +- "S" (&pci_indirect));
22411 ++ "S" (&pci_indirect),
22412 ++ "r" (__PCIBIOS_DS));
22413 + break;
22414 + case 4:
22415 +- __asm__("lcall *(%%esi); cld\n\t"
22416 ++ __asm__("movw %w6, %%ds\n\t"
22417 ++ "lcall *%%ss:(%%esi); cld\n\t"
22418 ++ "push %%ss\n\t"
22419 ++ "pop %%ds\n\t"
22420 + "jc 1f\n\t"
22421 + "xor %%ah, %%ah\n"
22422 + "1:"
22423 +@@ -289,7 +358,8 @@ static int pci_bios_write(unsigned int s
22424 + "c" (value),
22425 + "b" (bx),
22426 + "D" ((long)reg),
22427 +- "S" (&pci_indirect));
22428 ++ "S" (&pci_indirect),
22429 ++ "r" (__PCIBIOS_DS));
22430 + break;
22431 + }
22432 +
22433 +@@ -394,10 +464,13 @@ struct irq_routing_table * pcibios_get_i
22434 +
22435 + DBG("PCI: Fetching IRQ routing table... ");
22436 + __asm__("push %%es\n\t"
22437 ++ "movw %w8, %%ds\n\t"
22438 + "push %%ds\n\t"
22439 + "pop %%es\n\t"
22440 +- "lcall *(%%esi); cld\n\t"
22441 ++ "lcall *%%ss:(%%esi); cld\n\t"
22442 + "pop %%es\n\t"
22443 ++ "push %%ss\n\t"
22444 ++ "pop %%ds\n"
22445 + "jc 1f\n\t"
22446 + "xor %%ah, %%ah\n"
22447 + "1:"
22448 +@@ -408,7 +481,8 @@ struct irq_routing_table * pcibios_get_i
22449 + "1" (0),
22450 + "D" ((long) &opt),
22451 + "S" (&pci_indirect),
22452 +- "m" (opt)
22453 ++ "m" (opt),
22454 ++ "r" (__PCIBIOS_DS)
22455 + : "memory");
22456 + DBG("OK ret=%d, size=%d, map=%x\n", ret, opt.size, map);
22457 + if (ret & 0xff00)
22458 +@@ -432,7 +506,10 @@ int pcibios_set_irq_routing(struct pci_d
22459 + {
22460 + int ret;
22461 +
22462 +- __asm__("lcall *(%%esi); cld\n\t"
22463 ++ __asm__("movw %w5, %%ds\n\t"
22464 ++ "lcall *%%ss:(%%esi); cld\n\t"
22465 ++ "push %%ss\n\t"
22466 ++ "pop %%ds\n"
22467 + "jc 1f\n\t"
22468 + "xor %%ah, %%ah\n"
22469 + "1:"
22470 +@@ -440,7 +517,8 @@ int pcibios_set_irq_routing(struct pci_d
22471 + : "0" (PCIBIOS_SET_PCI_HW_INT),
22472 + "b" ((dev->bus->number << 8) | dev->devfn),
22473 + "c" ((irq << 8) | (pin + 10)),
22474 +- "S" (&pci_indirect));
22475 ++ "S" (&pci_indirect),
22476 ++ "r" (__PCIBIOS_DS));
22477 + return !(ret & 0xff00);
22478 + }
22479 + EXPORT_SYMBOL(pcibios_set_irq_routing);
22480 +diff -urNp linux-3.1.1/arch/x86/platform/efi/efi_32.c linux-3.1.1/arch/x86/platform/efi/efi_32.c
22481 +--- linux-3.1.1/arch/x86/platform/efi/efi_32.c 2011-11-11 15:19:27.000000000 -0500
22482 ++++ linux-3.1.1/arch/x86/platform/efi/efi_32.c 2011-11-16 18:39:07.000000000 -0500
22483 +@@ -38,70 +38,56 @@
22484 + */
22485 +
22486 + static unsigned long efi_rt_eflags;
22487 +-static pgd_t efi_bak_pg_dir_pointer[2];
22488 ++static pgd_t __initdata efi_bak_pg_dir_pointer[KERNEL_PGD_PTRS];
22489 +
22490 +-void efi_call_phys_prelog(void)
22491 ++void __init efi_call_phys_prelog(void)
22492 + {
22493 +- unsigned long cr4;
22494 +- unsigned long temp;
22495 + struct desc_ptr gdt_descr;
22496 +
22497 +- local_irq_save(efi_rt_eflags);
22498 ++#ifdef CONFIG_PAX_KERNEXEC
22499 ++ struct desc_struct d;
22500 ++#endif
22501 +
22502 +- /*
22503 +- * If I don't have PAE, I should just duplicate two entries in page
22504 +- * directory. If I have PAE, I just need to duplicate one entry in
22505 +- * page directory.
22506 +- */
22507 +- cr4 = read_cr4_safe();
22508 ++ local_irq_save(efi_rt_eflags);
22509 +
22510 +- if (cr4 & X86_CR4_PAE) {
22511 +- efi_bak_pg_dir_pointer[0].pgd =
22512 +- swapper_pg_dir[pgd_index(0)].pgd;
22513 +- swapper_pg_dir[0].pgd =
22514 +- swapper_pg_dir[pgd_index(PAGE_OFFSET)].pgd;
22515 +- } else {
22516 +- efi_bak_pg_dir_pointer[0].pgd =
22517 +- swapper_pg_dir[pgd_index(0)].pgd;
22518 +- efi_bak_pg_dir_pointer[1].pgd =
22519 +- swapper_pg_dir[pgd_index(0x400000)].pgd;
22520 +- swapper_pg_dir[pgd_index(0)].pgd =
22521 +- swapper_pg_dir[pgd_index(PAGE_OFFSET)].pgd;
22522 +- temp = PAGE_OFFSET + 0x400000;
22523 +- swapper_pg_dir[pgd_index(0x400000)].pgd =
22524 +- swapper_pg_dir[pgd_index(temp)].pgd;
22525 +- }
22526 ++ clone_pgd_range(efi_bak_pg_dir_pointer, swapper_pg_dir, KERNEL_PGD_PTRS);
22527 ++ clone_pgd_range(swapper_pg_dir, swapper_pg_dir + KERNEL_PGD_BOUNDARY,
22528 ++ min_t(unsigned long, KERNEL_PGD_PTRS, KERNEL_PGD_BOUNDARY));
22529 +
22530 + /*
22531 + * After the lock is released, the original page table is restored.
22532 + */
22533 + __flush_tlb_all();
22534 +
22535 ++#ifdef CONFIG_PAX_KERNEXEC
22536 ++ pack_descriptor(&d, 0, 0xFFFFF, 0x9B, 0xC);
22537 ++ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
22538 ++ pack_descriptor(&d, 0, 0xFFFFF, 0x93, 0xC);
22539 ++ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
22540 ++#endif
22541 ++
22542 + gdt_descr.address = __pa(get_cpu_gdt_table(0));
22543 + gdt_descr.size = GDT_SIZE - 1;
22544 + load_gdt(&gdt_descr);
22545 + }
22546 +
22547 +-void efi_call_phys_epilog(void)
22548 ++void __init efi_call_phys_epilog(void)
22549 + {
22550 +- unsigned long cr4;
22551 + struct desc_ptr gdt_descr;
22552 +
22553 ++#ifdef CONFIG_PAX_KERNEXEC
22554 ++ struct desc_struct d;
22555 ++
22556 ++ memset(&d, 0, sizeof d);
22557 ++ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
22558 ++ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
22559 ++#endif
22560 ++
22561 + gdt_descr.address = (unsigned long)get_cpu_gdt_table(0);
22562 + gdt_descr.size = GDT_SIZE - 1;
22563 + load_gdt(&gdt_descr);
22564 +
22565 +- cr4 = read_cr4_safe();
22566 +-
22567 +- if (cr4 & X86_CR4_PAE) {
22568 +- swapper_pg_dir[pgd_index(0)].pgd =
22569 +- efi_bak_pg_dir_pointer[0].pgd;
22570 +- } else {
22571 +- swapper_pg_dir[pgd_index(0)].pgd =
22572 +- efi_bak_pg_dir_pointer[0].pgd;
22573 +- swapper_pg_dir[pgd_index(0x400000)].pgd =
22574 +- efi_bak_pg_dir_pointer[1].pgd;
22575 +- }
22576 ++ clone_pgd_range(swapper_pg_dir, efi_bak_pg_dir_pointer, KERNEL_PGD_PTRS);
22577 +
22578 + /*
22579 + * After the lock is released, the original page table is restored.
22580 +diff -urNp linux-3.1.1/arch/x86/platform/efi/efi_stub_32.S linux-3.1.1/arch/x86/platform/efi/efi_stub_32.S
22581 +--- linux-3.1.1/arch/x86/platform/efi/efi_stub_32.S 2011-11-11 15:19:27.000000000 -0500
22582 ++++ linux-3.1.1/arch/x86/platform/efi/efi_stub_32.S 2011-11-16 18:39:07.000000000 -0500
22583 +@@ -6,7 +6,9 @@
22584 + */
22585 +
22586 + #include <linux/linkage.h>
22587 ++#include <linux/init.h>
22588 + #include <asm/page_types.h>
22589 ++#include <asm/segment.h>
22590 +
22591 + /*
22592 + * efi_call_phys(void *, ...) is a function with variable parameters.
22593 +@@ -20,7 +22,7 @@
22594 + * service functions will comply with gcc calling convention, too.
22595 + */
22596 +
22597 +-.text
22598 ++__INIT
22599 + ENTRY(efi_call_phys)
22600 + /*
22601 + * 0. The function can only be called in Linux kernel. So CS has been
22602 +@@ -36,9 +38,11 @@ ENTRY(efi_call_phys)
22603 + * The mapping of lower virtual memory has been created in prelog and
22604 + * epilog.
22605 + */
22606 +- movl $1f, %edx
22607 +- subl $__PAGE_OFFSET, %edx
22608 +- jmp *%edx
22609 ++ movl $(__KERNEXEC_EFI_DS), %edx
22610 ++ mov %edx, %ds
22611 ++ mov %edx, %es
22612 ++ mov %edx, %ss
22613 ++ ljmp $(__KERNEXEC_EFI_CS),$1f-__PAGE_OFFSET
22614 + 1:
22615 +
22616 + /*
22617 +@@ -47,14 +51,8 @@ ENTRY(efi_call_phys)
22618 + * parameter 2, ..., param n. To make things easy, we save the return
22619 + * address of efi_call_phys in a global variable.
22620 + */
22621 +- popl %edx
22622 +- movl %edx, saved_return_addr
22623 +- /* get the function pointer into ECX*/
22624 +- popl %ecx
22625 +- movl %ecx, efi_rt_function_ptr
22626 +- movl $2f, %edx
22627 +- subl $__PAGE_OFFSET, %edx
22628 +- pushl %edx
22629 ++ popl (saved_return_addr)
22630 ++ popl (efi_rt_function_ptr)
22631 +
22632 + /*
22633 + * 3. Clear PG bit in %CR0.
22634 +@@ -73,9 +71,8 @@ ENTRY(efi_call_phys)
22635 + /*
22636 + * 5. Call the physical function.
22637 + */
22638 +- jmp *%ecx
22639 ++ call *(efi_rt_function_ptr-__PAGE_OFFSET)
22640 +
22641 +-2:
22642 + /*
22643 + * 6. After EFI runtime service returns, control will return to
22644 + * following instruction. We'd better readjust stack pointer first.
22645 +@@ -88,35 +85,32 @@ ENTRY(efi_call_phys)
22646 + movl %cr0, %edx
22647 + orl $0x80000000, %edx
22648 + movl %edx, %cr0
22649 +- jmp 1f
22650 +-1:
22651 ++
22652 + /*
22653 + * 8. Now restore the virtual mode from flat mode by
22654 + * adding EIP with PAGE_OFFSET.
22655 + */
22656 +- movl $1f, %edx
22657 +- jmp *%edx
22658 ++ ljmp $(__KERNEL_CS),$1f+__PAGE_OFFSET
22659 + 1:
22660 ++ movl $(__KERNEL_DS), %edx
22661 ++ mov %edx, %ds
22662 ++ mov %edx, %es
22663 ++ mov %edx, %ss
22664 +
22665 + /*
22666 + * 9. Balance the stack. And because EAX contain the return value,
22667 + * we'd better not clobber it.
22668 + */
22669 +- leal efi_rt_function_ptr, %edx
22670 +- movl (%edx), %ecx
22671 +- pushl %ecx
22672 ++ pushl (efi_rt_function_ptr)
22673 +
22674 + /*
22675 +- * 10. Push the saved return address onto the stack and return.
22676 ++ * 10. Return to the saved return address.
22677 + */
22678 +- leal saved_return_addr, %edx
22679 +- movl (%edx), %ecx
22680 +- pushl %ecx
22681 +- ret
22682 ++ jmpl *(saved_return_addr)
22683 + ENDPROC(efi_call_phys)
22684 + .previous
22685 +
22686 +-.data
22687 ++__INITDATA
22688 + saved_return_addr:
22689 + .long 0
22690 + efi_rt_function_ptr:
22691 +diff -urNp linux-3.1.1/arch/x86/platform/efi/efi_stub_64.S linux-3.1.1/arch/x86/platform/efi/efi_stub_64.S
22692 +--- linux-3.1.1/arch/x86/platform/efi/efi_stub_64.S 2011-11-11 15:19:27.000000000 -0500
22693 ++++ linux-3.1.1/arch/x86/platform/efi/efi_stub_64.S 2011-11-16 18:39:07.000000000 -0500
22694 +@@ -7,6 +7,7 @@
22695 + */
22696 +
22697 + #include <linux/linkage.h>
22698 ++#include <asm/alternative-asm.h>
22699 +
22700 + #define SAVE_XMM \
22701 + mov %rsp, %rax; \
22702 +@@ -40,6 +41,7 @@ ENTRY(efi_call0)
22703 + call *%rdi
22704 + addq $32, %rsp
22705 + RESTORE_XMM
22706 ++ pax_force_retaddr
22707 + ret
22708 + ENDPROC(efi_call0)
22709 +
22710 +@@ -50,6 +52,7 @@ ENTRY(efi_call1)
22711 + call *%rdi
22712 + addq $32, %rsp
22713 + RESTORE_XMM
22714 ++ pax_force_retaddr
22715 + ret
22716 + ENDPROC(efi_call1)
22717 +
22718 +@@ -60,6 +63,7 @@ ENTRY(efi_call2)
22719 + call *%rdi
22720 + addq $32, %rsp
22721 + RESTORE_XMM
22722 ++ pax_force_retaddr
22723 + ret
22724 + ENDPROC(efi_call2)
22725 +
22726 +@@ -71,6 +75,7 @@ ENTRY(efi_call3)
22727 + call *%rdi
22728 + addq $32, %rsp
22729 + RESTORE_XMM
22730 ++ pax_force_retaddr
22731 + ret
22732 + ENDPROC(efi_call3)
22733 +
22734 +@@ -83,6 +88,7 @@ ENTRY(efi_call4)
22735 + call *%rdi
22736 + addq $32, %rsp
22737 + RESTORE_XMM
22738 ++ pax_force_retaddr
22739 + ret
22740 + ENDPROC(efi_call4)
22741 +
22742 +@@ -96,6 +102,7 @@ ENTRY(efi_call5)
22743 + call *%rdi
22744 + addq $48, %rsp
22745 + RESTORE_XMM
22746 ++ pax_force_retaddr
22747 + ret
22748 + ENDPROC(efi_call5)
22749 +
22750 +@@ -112,5 +119,6 @@ ENTRY(efi_call6)
22751 + call *%rdi
22752 + addq $48, %rsp
22753 + RESTORE_XMM
22754 ++ pax_force_retaddr
22755 + ret
22756 + ENDPROC(efi_call6)
22757 +diff -urNp linux-3.1.1/arch/x86/platform/mrst/mrst.c linux-3.1.1/arch/x86/platform/mrst/mrst.c
22758 +--- linux-3.1.1/arch/x86/platform/mrst/mrst.c 2011-11-11 15:19:27.000000000 -0500
22759 ++++ linux-3.1.1/arch/x86/platform/mrst/mrst.c 2011-11-16 18:39:07.000000000 -0500
22760 +@@ -239,14 +239,16 @@ static int mrst_i8042_detect(void)
22761 + }
22762 +
22763 + /* Reboot and power off are handled by the SCU on a MID device */
22764 +-static void mrst_power_off(void)
22765 ++static __noreturn void mrst_power_off(void)
22766 + {
22767 + intel_scu_ipc_simple_command(0xf1, 1);
22768 ++ BUG();
22769 + }
22770 +
22771 +-static void mrst_reboot(void)
22772 ++static __noreturn void mrst_reboot(void)
22773 + {
22774 + intel_scu_ipc_simple_command(0xf1, 0);
22775 ++ BUG();
22776 + }
22777 +
22778 + /*
22779 +diff -urNp linux-3.1.1/arch/x86/platform/uv/tlb_uv.c linux-3.1.1/arch/x86/platform/uv/tlb_uv.c
22780 +--- linux-3.1.1/arch/x86/platform/uv/tlb_uv.c 2011-11-11 15:19:27.000000000 -0500
22781 ++++ linux-3.1.1/arch/x86/platform/uv/tlb_uv.c 2011-11-16 19:39:11.000000000 -0500
22782 +@@ -377,6 +377,8 @@ static void reset_with_ipi(struct pnmask
22783 + struct bau_control *smaster = bcp->socket_master;
22784 + struct reset_args reset_args;
22785 +
22786 ++ pax_track_stack();
22787 ++
22788 + reset_args.sender = sender;
22789 + cpus_clear(*mask);
22790 + /* find a single cpu for each uvhub in this distribution mask */
22791 +diff -urNp linux-3.1.1/arch/x86/power/cpu.c linux-3.1.1/arch/x86/power/cpu.c
22792 +--- linux-3.1.1/arch/x86/power/cpu.c 2011-11-11 15:19:27.000000000 -0500
22793 ++++ linux-3.1.1/arch/x86/power/cpu.c 2011-11-16 18:39:07.000000000 -0500
22794 +@@ -130,7 +130,7 @@ static void do_fpu_end(void)
22795 + static void fix_processor_context(void)
22796 + {
22797 + int cpu = smp_processor_id();
22798 +- struct tss_struct *t = &per_cpu(init_tss, cpu);
22799 ++ struct tss_struct *t = init_tss + cpu;
22800 +
22801 + set_tss_desc(cpu, t); /*
22802 + * This just modifies memory; should not be
22803 +@@ -140,7 +140,9 @@ static void fix_processor_context(void)
22804 + */
22805 +
22806 + #ifdef CONFIG_X86_64
22807 ++ pax_open_kernel();
22808 + get_cpu_gdt_table(cpu)[GDT_ENTRY_TSS].type = 9;
22809 ++ pax_close_kernel();
22810 +
22811 + syscall_init(); /* This sets MSR_*STAR and related */
22812 + #endif
22813 +diff -urNp linux-3.1.1/arch/x86/vdso/Makefile linux-3.1.1/arch/x86/vdso/Makefile
22814 +--- linux-3.1.1/arch/x86/vdso/Makefile 2011-11-11 15:19:27.000000000 -0500
22815 ++++ linux-3.1.1/arch/x86/vdso/Makefile 2011-11-16 18:39:07.000000000 -0500
22816 +@@ -137,7 +137,7 @@ quiet_cmd_vdso = VDSO $@
22817 + -Wl,-T,$(filter %.lds,$^) $(filter %.o,$^) && \
22818 + sh $(srctree)/$(src)/checkundef.sh '$(NM)' '$@'
22819 +
22820 +-VDSO_LDFLAGS = -fPIC -shared $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
22821 ++VDSO_LDFLAGS = -fPIC -shared -Wl,--no-undefined $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
22822 + GCOV_PROFILE := n
22823 +
22824 + #
22825 +diff -urNp linux-3.1.1/arch/x86/vdso/vdso32-setup.c linux-3.1.1/arch/x86/vdso/vdso32-setup.c
22826 +--- linux-3.1.1/arch/x86/vdso/vdso32-setup.c 2011-11-11 15:19:27.000000000 -0500
22827 ++++ linux-3.1.1/arch/x86/vdso/vdso32-setup.c 2011-11-16 18:39:07.000000000 -0500
22828 +@@ -25,6 +25,7 @@
22829 + #include <asm/tlbflush.h>
22830 + #include <asm/vdso.h>
22831 + #include <asm/proto.h>
22832 ++#include <asm/mman.h>
22833 +
22834 + enum {
22835 + VDSO_DISABLED = 0,
22836 +@@ -226,7 +227,7 @@ static inline void map_compat_vdso(int m
22837 + void enable_sep_cpu(void)
22838 + {
22839 + int cpu = get_cpu();
22840 +- struct tss_struct *tss = &per_cpu(init_tss, cpu);
22841 ++ struct tss_struct *tss = init_tss + cpu;
22842 +
22843 + if (!boot_cpu_has(X86_FEATURE_SEP)) {
22844 + put_cpu();
22845 +@@ -249,7 +250,7 @@ static int __init gate_vma_init(void)
22846 + gate_vma.vm_start = FIXADDR_USER_START;
22847 + gate_vma.vm_end = FIXADDR_USER_END;
22848 + gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
22849 +- gate_vma.vm_page_prot = __P101;
22850 ++ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
22851 + /*
22852 + * Make sure the vDSO gets into every core dump.
22853 + * Dumping its contents makes post-mortem fully interpretable later
22854 +@@ -331,14 +332,14 @@ int arch_setup_additional_pages(struct l
22855 + if (compat)
22856 + addr = VDSO_HIGH_BASE;
22857 + else {
22858 +- addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
22859 ++ addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, MAP_EXECUTABLE);
22860 + if (IS_ERR_VALUE(addr)) {
22861 + ret = addr;
22862 + goto up_fail;
22863 + }
22864 + }
22865 +
22866 +- current->mm->context.vdso = (void *)addr;
22867 ++ current->mm->context.vdso = addr;
22868 +
22869 + if (compat_uses_vma || !compat) {
22870 + /*
22871 +@@ -361,11 +362,11 @@ int arch_setup_additional_pages(struct l
22872 + }
22873 +
22874 + current_thread_info()->sysenter_return =
22875 +- VDSO32_SYMBOL(addr, SYSENTER_RETURN);
22876 ++ (__force void __user *)VDSO32_SYMBOL(addr, SYSENTER_RETURN);
22877 +
22878 + up_fail:
22879 + if (ret)
22880 +- current->mm->context.vdso = NULL;
22881 ++ current->mm->context.vdso = 0;
22882 +
22883 + up_write(&mm->mmap_sem);
22884 +
22885 +@@ -412,8 +413,14 @@ __initcall(ia32_binfmt_init);
22886 +
22887 + const char *arch_vma_name(struct vm_area_struct *vma)
22888 + {
22889 +- if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
22890 ++ if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
22891 + return "[vdso]";
22892 ++
22893 ++#ifdef CONFIG_PAX_SEGMEXEC
22894 ++ if (vma->vm_mm && vma->vm_mirror && vma->vm_mirror->vm_start == vma->vm_mm->context.vdso)
22895 ++ return "[vdso]";
22896 ++#endif
22897 ++
22898 + return NULL;
22899 + }
22900 +
22901 +@@ -423,7 +430,7 @@ struct vm_area_struct *get_gate_vma(stru
22902 + * Check to see if the corresponding task was created in compat vdso
22903 + * mode.
22904 + */
22905 +- if (mm && mm->context.vdso == (void *)VDSO_HIGH_BASE)
22906 ++ if (mm && mm->context.vdso == VDSO_HIGH_BASE)
22907 + return &gate_vma;
22908 + return NULL;
22909 + }
22910 +diff -urNp linux-3.1.1/arch/x86/vdso/vma.c linux-3.1.1/arch/x86/vdso/vma.c
22911 +--- linux-3.1.1/arch/x86/vdso/vma.c 2011-11-11 15:19:27.000000000 -0500
22912 ++++ linux-3.1.1/arch/x86/vdso/vma.c 2011-11-16 18:39:07.000000000 -0500
22913 +@@ -16,8 +16,6 @@
22914 + #include <asm/vdso.h>
22915 + #include <asm/page.h>
22916 +
22917 +-unsigned int __read_mostly vdso_enabled = 1;
22918 +-
22919 + extern char vdso_start[], vdso_end[];
22920 + extern unsigned short vdso_sync_cpuid;
22921 +
22922 +@@ -97,13 +95,15 @@ static unsigned long vdso_addr(unsigned
22923 + int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
22924 + {
22925 + struct mm_struct *mm = current->mm;
22926 +- unsigned long addr;
22927 ++ unsigned long addr = 0;
22928 + int ret;
22929 +
22930 +- if (!vdso_enabled)
22931 +- return 0;
22932 +-
22933 + down_write(&mm->mmap_sem);
22934 ++
22935 ++#ifdef CONFIG_PAX_RANDMMAP
22936 ++ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
22937 ++#endif
22938 ++
22939 + addr = vdso_addr(mm->start_stack, vdso_size);
22940 + addr = get_unmapped_area(NULL, addr, vdso_size, 0, 0);
22941 + if (IS_ERR_VALUE(addr)) {
22942 +@@ -111,26 +111,18 @@ int arch_setup_additional_pages(struct l
22943 + goto up_fail;
22944 + }
22945 +
22946 +- current->mm->context.vdso = (void *)addr;
22947 ++ mm->context.vdso = addr;
22948 +
22949 + ret = install_special_mapping(mm, addr, vdso_size,
22950 + VM_READ|VM_EXEC|
22951 + VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC|
22952 + VM_ALWAYSDUMP,
22953 + vdso_pages);
22954 +- if (ret) {
22955 +- current->mm->context.vdso = NULL;
22956 +- goto up_fail;
22957 +- }
22958 ++
22959 ++ if (ret)
22960 ++ mm->context.vdso = 0;
22961 +
22962 + up_fail:
22963 + up_write(&mm->mmap_sem);
22964 + return ret;
22965 + }
22966 +-
22967 +-static __init int vdso_setup(char *s)
22968 +-{
22969 +- vdso_enabled = simple_strtoul(s, NULL, 0);
22970 +- return 0;
22971 +-}
22972 +-__setup("vdso=", vdso_setup);
22973 +diff -urNp linux-3.1.1/arch/x86/xen/enlighten.c linux-3.1.1/arch/x86/xen/enlighten.c
22974 +--- linux-3.1.1/arch/x86/xen/enlighten.c 2011-11-11 15:19:27.000000000 -0500
22975 ++++ linux-3.1.1/arch/x86/xen/enlighten.c 2011-11-16 18:39:07.000000000 -0500
22976 +@@ -85,8 +85,6 @@ EXPORT_SYMBOL_GPL(xen_start_info);
22977 +
22978 + struct shared_info xen_dummy_shared_info;
22979 +
22980 +-void *xen_initial_gdt;
22981 +-
22982 + RESERVE_BRK(shared_info_page_brk, PAGE_SIZE);
22983 + __read_mostly int xen_have_vector_callback;
22984 + EXPORT_SYMBOL_GPL(xen_have_vector_callback);
22985 +@@ -1028,7 +1026,7 @@ static const struct pv_apic_ops xen_apic
22986 + #endif
22987 + };
22988 +
22989 +-static void xen_reboot(int reason)
22990 ++static __noreturn void xen_reboot(int reason)
22991 + {
22992 + struct sched_shutdown r = { .reason = reason };
22993 +
22994 +@@ -1036,17 +1034,17 @@ static void xen_reboot(int reason)
22995 + BUG();
22996 + }
22997 +
22998 +-static void xen_restart(char *msg)
22999 ++static __noreturn void xen_restart(char *msg)
23000 + {
23001 + xen_reboot(SHUTDOWN_reboot);
23002 + }
23003 +
23004 +-static void xen_emergency_restart(void)
23005 ++static __noreturn void xen_emergency_restart(void)
23006 + {
23007 + xen_reboot(SHUTDOWN_reboot);
23008 + }
23009 +
23010 +-static void xen_machine_halt(void)
23011 ++static __noreturn void xen_machine_halt(void)
23012 + {
23013 + xen_reboot(SHUTDOWN_poweroff);
23014 + }
23015 +@@ -1152,7 +1150,17 @@ asmlinkage void __init xen_start_kernel(
23016 + __userpte_alloc_gfp &= ~__GFP_HIGHMEM;
23017 +
23018 + /* Work out if we support NX */
23019 +- x86_configure_nx();
23020 ++#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
23021 ++ if ((cpuid_eax(0x80000000) & 0xffff0000) == 0x80000000 &&
23022 ++ (cpuid_edx(0x80000001) & (1U << (X86_FEATURE_NX & 31)))) {
23023 ++ unsigned l, h;
23024 ++
23025 ++ __supported_pte_mask |= _PAGE_NX;
23026 ++ rdmsr(MSR_EFER, l, h);
23027 ++ l |= EFER_NX;
23028 ++ wrmsr(MSR_EFER, l, h);
23029 ++ }
23030 ++#endif
23031 +
23032 + xen_setup_features();
23033 +
23034 +@@ -1183,13 +1191,6 @@ asmlinkage void __init xen_start_kernel(
23035 +
23036 + machine_ops = xen_machine_ops;
23037 +
23038 +- /*
23039 +- * The only reliable way to retain the initial address of the
23040 +- * percpu gdt_page is to remember it here, so we can go and
23041 +- * mark it RW later, when the initial percpu area is freed.
23042 +- */
23043 +- xen_initial_gdt = &per_cpu(gdt_page, 0);
23044 +-
23045 + xen_smp_init();
23046 +
23047 + #ifdef CONFIG_ACPI_NUMA
23048 +diff -urNp linux-3.1.1/arch/x86/xen/mmu.c linux-3.1.1/arch/x86/xen/mmu.c
23049 +--- linux-3.1.1/arch/x86/xen/mmu.c 2011-11-11 15:19:27.000000000 -0500
23050 ++++ linux-3.1.1/arch/x86/xen/mmu.c 2011-11-16 18:39:07.000000000 -0500
23051 +@@ -1768,6 +1768,8 @@ pgd_t * __init xen_setup_kernel_pagetabl
23052 + convert_pfn_mfn(init_level4_pgt);
23053 + convert_pfn_mfn(level3_ident_pgt);
23054 + convert_pfn_mfn(level3_kernel_pgt);
23055 ++ convert_pfn_mfn(level3_vmalloc_pgt);
23056 ++ convert_pfn_mfn(level3_vmemmap_pgt);
23057 +
23058 + l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd);
23059 + l2 = m2v(l3[pud_index(__START_KERNEL_map)].pud);
23060 +@@ -1786,7 +1788,10 @@ pgd_t * __init xen_setup_kernel_pagetabl
23061 + set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
23062 + set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
23063 + set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
23064 ++ set_page_prot(level3_vmalloc_pgt, PAGE_KERNEL_RO);
23065 ++ set_page_prot(level3_vmemmap_pgt, PAGE_KERNEL_RO);
23066 + set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO);
23067 ++ set_page_prot(level2_vmemmap_pgt, PAGE_KERNEL_RO);
23068 + set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
23069 + set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
23070 +
23071 +@@ -2000,6 +2005,7 @@ static void __init xen_post_allocator_in
23072 + pv_mmu_ops.set_pud = xen_set_pud;
23073 + #if PAGETABLE_LEVELS == 4
23074 + pv_mmu_ops.set_pgd = xen_set_pgd;
23075 ++ pv_mmu_ops.set_pgd_batched = xen_set_pgd;
23076 + #endif
23077 +
23078 + /* This will work as long as patching hasn't happened yet
23079 +@@ -2081,6 +2087,7 @@ static const struct pv_mmu_ops xen_mmu_o
23080 + .pud_val = PV_CALLEE_SAVE(xen_pud_val),
23081 + .make_pud = PV_CALLEE_SAVE(xen_make_pud),
23082 + .set_pgd = xen_set_pgd_hyper,
23083 ++ .set_pgd_batched = xen_set_pgd_hyper,
23084 +
23085 + .alloc_pud = xen_alloc_pmd_init,
23086 + .release_pud = xen_release_pmd_init,
23087 +diff -urNp linux-3.1.1/arch/x86/xen/smp.c linux-3.1.1/arch/x86/xen/smp.c
23088 +--- linux-3.1.1/arch/x86/xen/smp.c 2011-11-11 15:19:27.000000000 -0500
23089 ++++ linux-3.1.1/arch/x86/xen/smp.c 2011-11-16 18:39:07.000000000 -0500
23090 +@@ -194,11 +194,6 @@ static void __init xen_smp_prepare_boot_
23091 + {
23092 + BUG_ON(smp_processor_id() != 0);
23093 + native_smp_prepare_boot_cpu();
23094 +-
23095 +- /* We've switched to the "real" per-cpu gdt, so make sure the
23096 +- old memory can be recycled */
23097 +- make_lowmem_page_readwrite(xen_initial_gdt);
23098 +-
23099 + xen_filter_cpu_maps();
23100 + xen_setup_vcpu_info_placement();
23101 + }
23102 +@@ -275,12 +270,12 @@ cpu_initialize_context(unsigned int cpu,
23103 + gdt = get_cpu_gdt_table(cpu);
23104 +
23105 + ctxt->flags = VGCF_IN_KERNEL;
23106 +- ctxt->user_regs.ds = __USER_DS;
23107 +- ctxt->user_regs.es = __USER_DS;
23108 ++ ctxt->user_regs.ds = __KERNEL_DS;
23109 ++ ctxt->user_regs.es = __KERNEL_DS;
23110 + ctxt->user_regs.ss = __KERNEL_DS;
23111 + #ifdef CONFIG_X86_32
23112 + ctxt->user_regs.fs = __KERNEL_PERCPU;
23113 +- ctxt->user_regs.gs = __KERNEL_STACK_CANARY;
23114 ++ savesegment(gs, ctxt->user_regs.gs);
23115 + #else
23116 + ctxt->gs_base_kernel = per_cpu_offset(cpu);
23117 + #endif
23118 +@@ -331,13 +326,12 @@ static int __cpuinit xen_cpu_up(unsigned
23119 + int rc;
23120 +
23121 + per_cpu(current_task, cpu) = idle;
23122 ++ per_cpu(current_tinfo, cpu) = &idle->tinfo;
23123 + #ifdef CONFIG_X86_32
23124 + irq_ctx_init(cpu);
23125 + #else
23126 + clear_tsk_thread_flag(idle, TIF_FORK);
23127 +- per_cpu(kernel_stack, cpu) =
23128 +- (unsigned long)task_stack_page(idle) -
23129 +- KERNEL_STACK_OFFSET + THREAD_SIZE;
23130 ++ per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(idle) - 16 + THREAD_SIZE;
23131 + #endif
23132 + xen_setup_runstate_info(cpu);
23133 + xen_setup_timer(cpu);
23134 +diff -urNp linux-3.1.1/arch/x86/xen/xen-asm_32.S linux-3.1.1/arch/x86/xen/xen-asm_32.S
23135 +--- linux-3.1.1/arch/x86/xen/xen-asm_32.S 2011-11-11 15:19:27.000000000 -0500
23136 ++++ linux-3.1.1/arch/x86/xen/xen-asm_32.S 2011-11-16 18:39:07.000000000 -0500
23137 +@@ -83,14 +83,14 @@ ENTRY(xen_iret)
23138 + ESP_OFFSET=4 # bytes pushed onto stack
23139 +
23140 + /*
23141 +- * Store vcpu_info pointer for easy access. Do it this way to
23142 +- * avoid having to reload %fs
23143 ++ * Store vcpu_info pointer for easy access.
23144 + */
23145 + #ifdef CONFIG_SMP
23146 +- GET_THREAD_INFO(%eax)
23147 +- movl TI_cpu(%eax), %eax
23148 +- movl __per_cpu_offset(,%eax,4), %eax
23149 +- mov xen_vcpu(%eax), %eax
23150 ++ push %fs
23151 ++ mov $(__KERNEL_PERCPU), %eax
23152 ++ mov %eax, %fs
23153 ++ mov PER_CPU_VAR(xen_vcpu), %eax
23154 ++ pop %fs
23155 + #else
23156 + movl xen_vcpu, %eax
23157 + #endif
23158 +diff -urNp linux-3.1.1/arch/x86/xen/xen-head.S linux-3.1.1/arch/x86/xen/xen-head.S
23159 +--- linux-3.1.1/arch/x86/xen/xen-head.S 2011-11-11 15:19:27.000000000 -0500
23160 ++++ linux-3.1.1/arch/x86/xen/xen-head.S 2011-11-16 18:39:07.000000000 -0500
23161 +@@ -19,6 +19,17 @@ ENTRY(startup_xen)
23162 + #ifdef CONFIG_X86_32
23163 + mov %esi,xen_start_info
23164 + mov $init_thread_union+THREAD_SIZE,%esp
23165 ++#ifdef CONFIG_SMP
23166 ++ movl $cpu_gdt_table,%edi
23167 ++ movl $__per_cpu_load,%eax
23168 ++ movw %ax,__KERNEL_PERCPU + 2(%edi)
23169 ++ rorl $16,%eax
23170 ++ movb %al,__KERNEL_PERCPU + 4(%edi)
23171 ++ movb %ah,__KERNEL_PERCPU + 7(%edi)
23172 ++ movl $__per_cpu_end - 1,%eax
23173 ++ subl $__per_cpu_start,%eax
23174 ++ movw %ax,__KERNEL_PERCPU + 0(%edi)
23175 ++#endif
23176 + #else
23177 + mov %rsi,xen_start_info
23178 + mov $init_thread_union+THREAD_SIZE,%rsp
23179 +diff -urNp linux-3.1.1/arch/x86/xen/xen-ops.h linux-3.1.1/arch/x86/xen/xen-ops.h
23180 +--- linux-3.1.1/arch/x86/xen/xen-ops.h 2011-11-11 15:19:27.000000000 -0500
23181 ++++ linux-3.1.1/arch/x86/xen/xen-ops.h 2011-11-16 18:39:07.000000000 -0500
23182 +@@ -10,8 +10,6 @@
23183 + extern const char xen_hypervisor_callback[];
23184 + extern const char xen_failsafe_callback[];
23185 +
23186 +-extern void *xen_initial_gdt;
23187 +-
23188 + struct trap_info;
23189 + void xen_copy_trap_info(struct trap_info *traps);
23190 +
23191 +diff -urNp linux-3.1.1/block/blk-iopoll.c linux-3.1.1/block/blk-iopoll.c
23192 +--- linux-3.1.1/block/blk-iopoll.c 2011-11-11 15:19:27.000000000 -0500
23193 ++++ linux-3.1.1/block/blk-iopoll.c 2011-11-16 18:39:07.000000000 -0500
23194 +@@ -77,7 +77,7 @@ void blk_iopoll_complete(struct blk_iopo
23195 + }
23196 + EXPORT_SYMBOL(blk_iopoll_complete);
23197 +
23198 +-static void blk_iopoll_softirq(struct softirq_action *h)
23199 ++static void blk_iopoll_softirq(void)
23200 + {
23201 + struct list_head *list = &__get_cpu_var(blk_cpu_iopoll);
23202 + int rearm = 0, budget = blk_iopoll_budget;
23203 +diff -urNp linux-3.1.1/block/blk-map.c linux-3.1.1/block/blk-map.c
23204 +--- linux-3.1.1/block/blk-map.c 2011-11-11 15:19:27.000000000 -0500
23205 ++++ linux-3.1.1/block/blk-map.c 2011-11-16 18:39:07.000000000 -0500
23206 +@@ -301,7 +301,7 @@ int blk_rq_map_kern(struct request_queue
23207 + if (!len || !kbuf)
23208 + return -EINVAL;
23209 +
23210 +- do_copy = !blk_rq_aligned(q, addr, len) || object_is_on_stack(kbuf);
23211 ++ do_copy = !blk_rq_aligned(q, addr, len) || object_starts_on_stack(kbuf);
23212 + if (do_copy)
23213 + bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
23214 + else
23215 +diff -urNp linux-3.1.1/block/blk-softirq.c linux-3.1.1/block/blk-softirq.c
23216 +--- linux-3.1.1/block/blk-softirq.c 2011-11-11 15:19:27.000000000 -0500
23217 ++++ linux-3.1.1/block/blk-softirq.c 2011-11-16 18:39:07.000000000 -0500
23218 +@@ -17,7 +17,7 @@ static DEFINE_PER_CPU(struct list_head,
23219 + * Softirq action handler - move entries to local list and loop over them
23220 + * while passing them to the queue registered handler.
23221 + */
23222 +-static void blk_done_softirq(struct softirq_action *h)
23223 ++static void blk_done_softirq(void)
23224 + {
23225 + struct list_head *cpu_list, local_list;
23226 +
23227 +diff -urNp linux-3.1.1/block/bsg.c linux-3.1.1/block/bsg.c
23228 +--- linux-3.1.1/block/bsg.c 2011-11-11 15:19:27.000000000 -0500
23229 ++++ linux-3.1.1/block/bsg.c 2011-11-16 18:39:07.000000000 -0500
23230 +@@ -176,16 +176,24 @@ static int blk_fill_sgv4_hdr_rq(struct r
23231 + struct sg_io_v4 *hdr, struct bsg_device *bd,
23232 + fmode_t has_write_perm)
23233 + {
23234 ++ unsigned char tmpcmd[sizeof(rq->__cmd)];
23235 ++ unsigned char *cmdptr;
23236 ++
23237 + if (hdr->request_len > BLK_MAX_CDB) {
23238 + rq->cmd = kzalloc(hdr->request_len, GFP_KERNEL);
23239 + if (!rq->cmd)
23240 + return -ENOMEM;
23241 +- }
23242 ++ cmdptr = rq->cmd;
23243 ++ } else
23244 ++ cmdptr = tmpcmd;
23245 +
23246 +- if (copy_from_user(rq->cmd, (void __user *)(unsigned long)hdr->request,
23247 ++ if (copy_from_user(cmdptr, (void __user *)(unsigned long)hdr->request,
23248 + hdr->request_len))
23249 + return -EFAULT;
23250 +
23251 ++ if (cmdptr != rq->cmd)
23252 ++ memcpy(rq->cmd, cmdptr, hdr->request_len);
23253 ++
23254 + if (hdr->subprotocol == BSG_SUB_PROTOCOL_SCSI_CMD) {
23255 + if (blk_verify_command(rq->cmd, has_write_perm))
23256 + return -EPERM;
23257 +diff -urNp linux-3.1.1/block/compat_ioctl.c linux-3.1.1/block/compat_ioctl.c
23258 +--- linux-3.1.1/block/compat_ioctl.c 2011-11-11 15:19:27.000000000 -0500
23259 ++++ linux-3.1.1/block/compat_ioctl.c 2011-11-16 18:39:07.000000000 -0500
23260 +@@ -340,7 +340,7 @@ static int compat_fd_ioctl(struct block_
23261 + err |= __get_user(f->spec1, &uf->spec1);
23262 + err |= __get_user(f->fmt_gap, &uf->fmt_gap);
23263 + err |= __get_user(name, &uf->name);
23264 +- f->name = compat_ptr(name);
23265 ++ f->name = (void __force_kernel *)compat_ptr(name);
23266 + if (err) {
23267 + err = -EFAULT;
23268 + goto out;
23269 +diff -urNp linux-3.1.1/block/scsi_ioctl.c linux-3.1.1/block/scsi_ioctl.c
23270 +--- linux-3.1.1/block/scsi_ioctl.c 2011-11-11 15:19:27.000000000 -0500
23271 ++++ linux-3.1.1/block/scsi_ioctl.c 2011-11-16 18:39:07.000000000 -0500
23272 +@@ -222,8 +222,20 @@ EXPORT_SYMBOL(blk_verify_command);
23273 + static int blk_fill_sghdr_rq(struct request_queue *q, struct request *rq,
23274 + struct sg_io_hdr *hdr, fmode_t mode)
23275 + {
23276 +- if (copy_from_user(rq->cmd, hdr->cmdp, hdr->cmd_len))
23277 ++ unsigned char tmpcmd[sizeof(rq->__cmd)];
23278 ++ unsigned char *cmdptr;
23279 ++
23280 ++ if (rq->cmd != rq->__cmd)
23281 ++ cmdptr = rq->cmd;
23282 ++ else
23283 ++ cmdptr = tmpcmd;
23284 ++
23285 ++ if (copy_from_user(cmdptr, hdr->cmdp, hdr->cmd_len))
23286 + return -EFAULT;
23287 ++
23288 ++ if (cmdptr != rq->cmd)
23289 ++ memcpy(rq->cmd, cmdptr, hdr->cmd_len);
23290 ++
23291 + if (blk_verify_command(rq->cmd, mode & FMODE_WRITE))
23292 + return -EPERM;
23293 +
23294 +@@ -432,6 +444,8 @@ int sg_scsi_ioctl(struct request_queue *
23295 + int err;
23296 + unsigned int in_len, out_len, bytes, opcode, cmdlen;
23297 + char *buffer = NULL, sense[SCSI_SENSE_BUFFERSIZE];
23298 ++ unsigned char tmpcmd[sizeof(rq->__cmd)];
23299 ++ unsigned char *cmdptr;
23300 +
23301 + if (!sic)
23302 + return -EINVAL;
23303 +@@ -465,9 +479,18 @@ int sg_scsi_ioctl(struct request_queue *
23304 + */
23305 + err = -EFAULT;
23306 + rq->cmd_len = cmdlen;
23307 +- if (copy_from_user(rq->cmd, sic->data, cmdlen))
23308 ++
23309 ++ if (rq->cmd != rq->__cmd)
23310 ++ cmdptr = rq->cmd;
23311 ++ else
23312 ++ cmdptr = tmpcmd;
23313 ++
23314 ++ if (copy_from_user(cmdptr, sic->data, cmdlen))
23315 + goto error;
23316 +
23317 ++ if (rq->cmd != cmdptr)
23318 ++ memcpy(rq->cmd, cmdptr, cmdlen);
23319 ++
23320 + if (in_len && copy_from_user(buffer, sic->data + cmdlen, in_len))
23321 + goto error;
23322 +
23323 +diff -urNp linux-3.1.1/crypto/cryptd.c linux-3.1.1/crypto/cryptd.c
23324 +--- linux-3.1.1/crypto/cryptd.c 2011-11-11 15:19:27.000000000 -0500
23325 ++++ linux-3.1.1/crypto/cryptd.c 2011-11-16 18:39:07.000000000 -0500
23326 +@@ -63,7 +63,7 @@ struct cryptd_blkcipher_ctx {
23327 +
23328 + struct cryptd_blkcipher_request_ctx {
23329 + crypto_completion_t complete;
23330 +-};
23331 ++} __no_const;
23332 +
23333 + struct cryptd_hash_ctx {
23334 + struct crypto_shash *child;
23335 +@@ -80,7 +80,7 @@ struct cryptd_aead_ctx {
23336 +
23337 + struct cryptd_aead_request_ctx {
23338 + crypto_completion_t complete;
23339 +-};
23340 ++} __no_const;
23341 +
23342 + static void cryptd_queue_worker(struct work_struct *work);
23343 +
23344 +diff -urNp linux-3.1.1/crypto/serpent.c linux-3.1.1/crypto/serpent.c
23345 +--- linux-3.1.1/crypto/serpent.c 2011-11-11 15:19:27.000000000 -0500
23346 ++++ linux-3.1.1/crypto/serpent.c 2011-11-16 18:40:10.000000000 -0500
23347 +@@ -224,6 +224,8 @@ static int serpent_setkey(struct crypto_
23348 + u32 r0,r1,r2,r3,r4;
23349 + int i;
23350 +
23351 ++ pax_track_stack();
23352 ++
23353 + /* Copy key, add padding */
23354 +
23355 + for (i = 0; i < keylen; ++i)
23356 +diff -urNp linux-3.1.1/Documentation/dontdiff linux-3.1.1/Documentation/dontdiff
23357 +--- linux-3.1.1/Documentation/dontdiff 2011-11-11 15:19:27.000000000 -0500
23358 ++++ linux-3.1.1/Documentation/dontdiff 2011-11-16 18:39:07.000000000 -0500
23359 +@@ -5,6 +5,7 @@
23360 + *.cis
23361 + *.cpio
23362 + *.csp
23363 ++*.dbg
23364 + *.dsp
23365 + *.dvi
23366 + *.elf
23367 +@@ -48,9 +49,11 @@
23368 + *.tab.h
23369 + *.tex
23370 + *.ver
23371 ++*.vim
23372 + *.xml
23373 + *.xz
23374 + *_MODULES
23375 ++*_reg_safe.h
23376 + *_vga16.c
23377 + *~
23378 + \#*#
23379 +@@ -70,6 +73,7 @@ Kerntypes
23380 + Module.markers
23381 + Module.symvers
23382 + PENDING
23383 ++PERF*
23384 + SCCS
23385 + System.map*
23386 + TAGS
23387 +@@ -93,19 +97,24 @@ bounds.h
23388 + bsetup
23389 + btfixupprep
23390 + build
23391 ++builtin-policy.h
23392 + bvmlinux
23393 + bzImage*
23394 + capability_names.h
23395 + capflags.c
23396 + classlist.h*
23397 ++clut_vga16.c
23398 ++common-cmds.h
23399 + comp*.log
23400 + compile.h*
23401 + conf
23402 + config
23403 + config-*
23404 + config_data.h*
23405 ++config.c
23406 + config.mak
23407 + config.mak.autogen
23408 ++config.tmp
23409 + conmakehash
23410 + consolemap_deftbl.c*
23411 + cpustr.h
23412 +@@ -119,6 +128,7 @@ dslm
23413 + elf2ecoff
23414 + elfconfig.h*
23415 + evergreen_reg_safe.h
23416 ++exception_policy.conf
23417 + fixdep
23418 + flask.h
23419 + fore200e_mkfirm
23420 +@@ -126,12 +136,14 @@ fore200e_pca_fw.c*
23421 + gconf
23422 + gconf.glade.h
23423 + gen-devlist
23424 ++gen-kdb_cmds.c
23425 + gen_crc32table
23426 + gen_init_cpio
23427 + generated
23428 + genheaders
23429 + genksyms
23430 + *_gray256.c
23431 ++hash
23432 + hpet_example
23433 + hugepage-mmap
23434 + hugepage-shm
23435 +@@ -146,7 +158,7 @@ int32.c
23436 + int4.c
23437 + int8.c
23438 + kallsyms
23439 +-kconfig
23440 ++kern_constants.h
23441 + keywords.c
23442 + ksym.c*
23443 + ksym.h*
23444 +@@ -154,7 +166,6 @@ kxgettext
23445 + lkc_defs.h
23446 + lex.c
23447 + lex.*.c
23448 +-linux
23449 + logo_*.c
23450 + logo_*_clut224.c
23451 + logo_*_mono.c
23452 +@@ -166,7 +177,6 @@ machtypes.h
23453 + map
23454 + map_hugetlb
23455 + maui_boot.h
23456 +-media
23457 + mconf
23458 + miboot*
23459 + mk_elfconfig
23460 +@@ -174,6 +184,7 @@ mkboot
23461 + mkbugboot
23462 + mkcpustr
23463 + mkdep
23464 ++mkpiggy
23465 + mkprep
23466 + mkregtable
23467 + mktables
23468 +@@ -209,6 +220,7 @@ r300_reg_safe.h
23469 + r420_reg_safe.h
23470 + r600_reg_safe.h
23471 + recordmcount
23472 ++regdb.c
23473 + relocs
23474 + rlim_names.h
23475 + rn50_reg_safe.h
23476 +@@ -219,6 +231,7 @@ setup
23477 + setup.bin
23478 + setup.elf
23479 + sImage
23480 ++slabinfo
23481 + sm_tbl*
23482 + split-include
23483 + syscalltab.h
23484 +@@ -229,6 +242,7 @@ tftpboot.img
23485 + timeconst.h
23486 + times.h*
23487 + trix_boot.h
23488 ++user_constants.h
23489 + utsrelease.h*
23490 + vdso-syms.lds
23491 + vdso.lds
23492 +@@ -246,7 +260,9 @@ vmlinux
23493 + vmlinux-*
23494 + vmlinux.aout
23495 + vmlinux.bin.all
23496 ++vmlinux.bin.bz2
23497 + vmlinux.lds
23498 ++vmlinux.relocs
23499 + vmlinuz
23500 + voffset.h
23501 + vsyscall.lds
23502 +@@ -254,9 +270,11 @@ vsyscall_32.lds
23503 + wanxlfw.inc
23504 + uImage
23505 + unifdef
23506 ++utsrelease.h
23507 + wakeup.bin
23508 + wakeup.elf
23509 + wakeup.lds
23510 + zImage*
23511 + zconf.hash.c
23512 ++zconf.lex.c
23513 + zoffset.h
23514 +diff -urNp linux-3.1.1/Documentation/kernel-parameters.txt linux-3.1.1/Documentation/kernel-parameters.txt
23515 +--- linux-3.1.1/Documentation/kernel-parameters.txt 2011-11-11 15:19:27.000000000 -0500
23516 ++++ linux-3.1.1/Documentation/kernel-parameters.txt 2011-11-16 18:39:07.000000000 -0500
23517 +@@ -1898,6 +1898,13 @@ bytes respectively. Such letter suffixes
23518 + the specified number of seconds. This is to be used if
23519 + your oopses keep scrolling off the screen.
23520 +
23521 ++ pax_nouderef [X86] disables UDEREF. Most likely needed under certain
23522 ++ virtualization environments that don't cope well with the
23523 ++ expand down segment used by UDEREF on X86-32 or the frequent
23524 ++ page table updates on X86-64.
23525 ++
23526 ++ pax_softmode= 0/1 to disable/enable PaX softmode on boot already.
23527 ++
23528 + pcbit= [HW,ISDN]
23529 +
23530 + pcd. [PARIDE]
23531 +diff -urNp linux-3.1.1/drivers/acpi/apei/cper.c linux-3.1.1/drivers/acpi/apei/cper.c
23532 +--- linux-3.1.1/drivers/acpi/apei/cper.c 2011-11-11 15:19:27.000000000 -0500
23533 ++++ linux-3.1.1/drivers/acpi/apei/cper.c 2011-11-16 18:39:07.000000000 -0500
23534 +@@ -38,12 +38,12 @@
23535 + */
23536 + u64 cper_next_record_id(void)
23537 + {
23538 +- static atomic64_t seq;
23539 ++ static atomic64_unchecked_t seq;
23540 +
23541 +- if (!atomic64_read(&seq))
23542 +- atomic64_set(&seq, ((u64)get_seconds()) << 32);
23543 ++ if (!atomic64_read_unchecked(&seq))
23544 ++ atomic64_set_unchecked(&seq, ((u64)get_seconds()) << 32);
23545 +
23546 +- return atomic64_inc_return(&seq);
23547 ++ return atomic64_inc_return_unchecked(&seq);
23548 + }
23549 + EXPORT_SYMBOL_GPL(cper_next_record_id);
23550 +
23551 +diff -urNp linux-3.1.1/drivers/acpi/ec_sys.c linux-3.1.1/drivers/acpi/ec_sys.c
23552 +--- linux-3.1.1/drivers/acpi/ec_sys.c 2011-11-11 15:19:27.000000000 -0500
23553 ++++ linux-3.1.1/drivers/acpi/ec_sys.c 2011-11-16 18:39:07.000000000 -0500
23554 +@@ -11,6 +11,7 @@
23555 + #include <linux/kernel.h>
23556 + #include <linux/acpi.h>
23557 + #include <linux/debugfs.h>
23558 ++#include <asm/uaccess.h>
23559 + #include "internal.h"
23560 +
23561 + MODULE_AUTHOR("Thomas Renninger <trenn@××××.de>");
23562 +@@ -39,7 +40,7 @@ static ssize_t acpi_ec_read_io(struct fi
23563 + * struct acpi_ec *ec = ((struct seq_file *)f->private_data)->private;
23564 + */
23565 + unsigned int size = EC_SPACE_SIZE;
23566 +- u8 *data = (u8 *) buf;
23567 ++ u8 data;
23568 + loff_t init_off = *off;
23569 + int err = 0;
23570 +
23571 +@@ -52,9 +53,11 @@ static ssize_t acpi_ec_read_io(struct fi
23572 + size = count;
23573 +
23574 + while (size) {
23575 +- err = ec_read(*off, &data[*off - init_off]);
23576 ++ err = ec_read(*off, &data);
23577 + if (err)
23578 + return err;
23579 ++ if (put_user(data, &buf[*off - init_off]))
23580 ++ return -EFAULT;
23581 + *off += 1;
23582 + size--;
23583 + }
23584 +@@ -70,7 +73,6 @@ static ssize_t acpi_ec_write_io(struct f
23585 +
23586 + unsigned int size = count;
23587 + loff_t init_off = *off;
23588 +- u8 *data = (u8 *) buf;
23589 + int err = 0;
23590 +
23591 + if (*off >= EC_SPACE_SIZE)
23592 +@@ -81,7 +83,9 @@ static ssize_t acpi_ec_write_io(struct f
23593 + }
23594 +
23595 + while (size) {
23596 +- u8 byte_write = data[*off - init_off];
23597 ++ u8 byte_write;
23598 ++ if (get_user(byte_write, &buf[*off - init_off]))
23599 ++ return -EFAULT;
23600 + err = ec_write(*off, byte_write);
23601 + if (err)
23602 + return err;
23603 +diff -urNp linux-3.1.1/drivers/acpi/proc.c linux-3.1.1/drivers/acpi/proc.c
23604 +--- linux-3.1.1/drivers/acpi/proc.c 2011-11-11 15:19:27.000000000 -0500
23605 ++++ linux-3.1.1/drivers/acpi/proc.c 2011-11-16 18:39:07.000000000 -0500
23606 +@@ -342,19 +342,13 @@ acpi_system_write_wakeup_device(struct f
23607 + size_t count, loff_t * ppos)
23608 + {
23609 + struct list_head *node, *next;
23610 +- char strbuf[5];
23611 +- char str[5] = "";
23612 +- unsigned int len = count;
23613 +-
23614 +- if (len > 4)
23615 +- len = 4;
23616 +- if (len < 0)
23617 +- return -EFAULT;
23618 ++ char strbuf[5] = {0};
23619 +
23620 +- if (copy_from_user(strbuf, buffer, len))
23621 ++ if (count > 4)
23622 ++ count = 4;
23623 ++ if (copy_from_user(strbuf, buffer, count))
23624 + return -EFAULT;
23625 +- strbuf[len] = '\0';
23626 +- sscanf(strbuf, "%s", str);
23627 ++ strbuf[count] = '\0';
23628 +
23629 + mutex_lock(&acpi_device_lock);
23630 + list_for_each_safe(node, next, &acpi_wakeup_device_list) {
23631 +@@ -363,7 +357,7 @@ acpi_system_write_wakeup_device(struct f
23632 + if (!dev->wakeup.flags.valid)
23633 + continue;
23634 +
23635 +- if (!strncmp(dev->pnp.bus_id, str, 4)) {
23636 ++ if (!strncmp(dev->pnp.bus_id, strbuf, 4)) {
23637 + if (device_can_wakeup(&dev->dev)) {
23638 + bool enable = !device_may_wakeup(&dev->dev);
23639 + device_set_wakeup_enable(&dev->dev, enable);
23640 +diff -urNp linux-3.1.1/drivers/acpi/processor_driver.c linux-3.1.1/drivers/acpi/processor_driver.c
23641 +--- linux-3.1.1/drivers/acpi/processor_driver.c 2011-11-11 15:19:27.000000000 -0500
23642 ++++ linux-3.1.1/drivers/acpi/processor_driver.c 2011-11-16 18:39:07.000000000 -0500
23643 +@@ -473,7 +473,7 @@ static int __cpuinit acpi_processor_add(
23644 + return 0;
23645 + #endif
23646 +
23647 +- BUG_ON((pr->id >= nr_cpu_ids) || (pr->id < 0));
23648 ++ BUG_ON(pr->id >= nr_cpu_ids);
23649 +
23650 + /*
23651 + * Buggy BIOS check
23652 +diff -urNp linux-3.1.1/drivers/ata/libata-core.c linux-3.1.1/drivers/ata/libata-core.c
23653 +--- linux-3.1.1/drivers/ata/libata-core.c 2011-11-11 15:19:27.000000000 -0500
23654 ++++ linux-3.1.1/drivers/ata/libata-core.c 2011-11-16 18:39:07.000000000 -0500
23655 +@@ -4733,7 +4733,7 @@ void ata_qc_free(struct ata_queued_cmd *
23656 + struct ata_port *ap;
23657 + unsigned int tag;
23658 +
23659 +- WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
23660 ++ BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
23661 + ap = qc->ap;
23662 +
23663 + qc->flags = 0;
23664 +@@ -4749,7 +4749,7 @@ void __ata_qc_complete(struct ata_queued
23665 + struct ata_port *ap;
23666 + struct ata_link *link;
23667 +
23668 +- WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
23669 ++ BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
23670 + WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE));
23671 + ap = qc->ap;
23672 + link = qc->dev->link;
23673 +@@ -5754,6 +5754,7 @@ static void ata_finalize_port_ops(struct
23674 + return;
23675 +
23676 + spin_lock(&lock);
23677 ++ pax_open_kernel();
23678 +
23679 + for (cur = ops->inherits; cur; cur = cur->inherits) {
23680 + void **inherit = (void **)cur;
23681 +@@ -5767,8 +5768,9 @@ static void ata_finalize_port_ops(struct
23682 + if (IS_ERR(*pp))
23683 + *pp = NULL;
23684 +
23685 +- ops->inherits = NULL;
23686 ++ *(struct ata_port_operations **)&ops->inherits = NULL;
23687 +
23688 ++ pax_close_kernel();
23689 + spin_unlock(&lock);
23690 + }
23691 +
23692 +diff -urNp linux-3.1.1/drivers/ata/libata-eh.c linux-3.1.1/drivers/ata/libata-eh.c
23693 +--- linux-3.1.1/drivers/ata/libata-eh.c 2011-11-11 15:19:27.000000000 -0500
23694 ++++ linux-3.1.1/drivers/ata/libata-eh.c 2011-11-16 18:40:10.000000000 -0500
23695 +@@ -2515,6 +2515,8 @@ void ata_eh_report(struct ata_port *ap)
23696 + {
23697 + struct ata_link *link;
23698 +
23699 ++ pax_track_stack();
23700 ++
23701 + ata_for_each_link(link, ap, HOST_FIRST)
23702 + ata_eh_link_report(link);
23703 + }
23704 +diff -urNp linux-3.1.1/drivers/ata/pata_arasan_cf.c linux-3.1.1/drivers/ata/pata_arasan_cf.c
23705 +--- linux-3.1.1/drivers/ata/pata_arasan_cf.c 2011-11-11 15:19:27.000000000 -0500
23706 ++++ linux-3.1.1/drivers/ata/pata_arasan_cf.c 2011-11-16 18:39:07.000000000 -0500
23707 +@@ -862,7 +862,9 @@ static int __devinit arasan_cf_probe(str
23708 + /* Handle platform specific quirks */
23709 + if (pdata->quirk) {
23710 + if (pdata->quirk & CF_BROKEN_PIO) {
23711 +- ap->ops->set_piomode = NULL;
23712 ++ pax_open_kernel();
23713 ++ *(void **)&ap->ops->set_piomode = NULL;
23714 ++ pax_close_kernel();
23715 + ap->pio_mask = 0;
23716 + }
23717 + if (pdata->quirk & CF_BROKEN_MWDMA)
23718 +diff -urNp linux-3.1.1/drivers/atm/adummy.c linux-3.1.1/drivers/atm/adummy.c
23719 +--- linux-3.1.1/drivers/atm/adummy.c 2011-11-11 15:19:27.000000000 -0500
23720 ++++ linux-3.1.1/drivers/atm/adummy.c 2011-11-16 18:39:07.000000000 -0500
23721 +@@ -114,7 +114,7 @@ adummy_send(struct atm_vcc *vcc, struct
23722 + vcc->pop(vcc, skb);
23723 + else
23724 + dev_kfree_skb_any(skb);
23725 +- atomic_inc(&vcc->stats->tx);
23726 ++ atomic_inc_unchecked(&vcc->stats->tx);
23727 +
23728 + return 0;
23729 + }
23730 +diff -urNp linux-3.1.1/drivers/atm/ambassador.c linux-3.1.1/drivers/atm/ambassador.c
23731 +--- linux-3.1.1/drivers/atm/ambassador.c 2011-11-11 15:19:27.000000000 -0500
23732 ++++ linux-3.1.1/drivers/atm/ambassador.c 2011-11-16 18:39:07.000000000 -0500
23733 +@@ -454,7 +454,7 @@ static void tx_complete (amb_dev * dev,
23734 + PRINTD (DBG_FLOW|DBG_TX, "tx_complete %p %p", dev, tx);
23735 +
23736 + // VC layer stats
23737 +- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
23738 ++ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
23739 +
23740 + // free the descriptor
23741 + kfree (tx_descr);
23742 +@@ -495,7 +495,7 @@ static void rx_complete (amb_dev * dev,
23743 + dump_skb ("<<<", vc, skb);
23744 +
23745 + // VC layer stats
23746 +- atomic_inc(&atm_vcc->stats->rx);
23747 ++ atomic_inc_unchecked(&atm_vcc->stats->rx);
23748 + __net_timestamp(skb);
23749 + // end of our responsibility
23750 + atm_vcc->push (atm_vcc, skb);
23751 +@@ -510,7 +510,7 @@ static void rx_complete (amb_dev * dev,
23752 + } else {
23753 + PRINTK (KERN_INFO, "dropped over-size frame");
23754 + // should we count this?
23755 +- atomic_inc(&atm_vcc->stats->rx_drop);
23756 ++ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
23757 + }
23758 +
23759 + } else {
23760 +@@ -1338,7 +1338,7 @@ static int amb_send (struct atm_vcc * at
23761 + }
23762 +
23763 + if (check_area (skb->data, skb->len)) {
23764 +- atomic_inc(&atm_vcc->stats->tx_err);
23765 ++ atomic_inc_unchecked(&atm_vcc->stats->tx_err);
23766 + return -ENOMEM; // ?
23767 + }
23768 +
23769 +diff -urNp linux-3.1.1/drivers/atm/atmtcp.c linux-3.1.1/drivers/atm/atmtcp.c
23770 +--- linux-3.1.1/drivers/atm/atmtcp.c 2011-11-11 15:19:27.000000000 -0500
23771 ++++ linux-3.1.1/drivers/atm/atmtcp.c 2011-11-16 18:39:07.000000000 -0500
23772 +@@ -207,7 +207,7 @@ static int atmtcp_v_send(struct atm_vcc
23773 + if (vcc->pop) vcc->pop(vcc,skb);
23774 + else dev_kfree_skb(skb);
23775 + if (dev_data) return 0;
23776 +- atomic_inc(&vcc->stats->tx_err);
23777 ++ atomic_inc_unchecked(&vcc->stats->tx_err);
23778 + return -ENOLINK;
23779 + }
23780 + size = skb->len+sizeof(struct atmtcp_hdr);
23781 +@@ -215,7 +215,7 @@ static int atmtcp_v_send(struct atm_vcc
23782 + if (!new_skb) {
23783 + if (vcc->pop) vcc->pop(vcc,skb);
23784 + else dev_kfree_skb(skb);
23785 +- atomic_inc(&vcc->stats->tx_err);
23786 ++ atomic_inc_unchecked(&vcc->stats->tx_err);
23787 + return -ENOBUFS;
23788 + }
23789 + hdr = (void *) skb_put(new_skb,sizeof(struct atmtcp_hdr));
23790 +@@ -226,8 +226,8 @@ static int atmtcp_v_send(struct atm_vcc
23791 + if (vcc->pop) vcc->pop(vcc,skb);
23792 + else dev_kfree_skb(skb);
23793 + out_vcc->push(out_vcc,new_skb);
23794 +- atomic_inc(&vcc->stats->tx);
23795 +- atomic_inc(&out_vcc->stats->rx);
23796 ++ atomic_inc_unchecked(&vcc->stats->tx);
23797 ++ atomic_inc_unchecked(&out_vcc->stats->rx);
23798 + return 0;
23799 + }
23800 +
23801 +@@ -301,7 +301,7 @@ static int atmtcp_c_send(struct atm_vcc
23802 + out_vcc = find_vcc(dev, ntohs(hdr->vpi), ntohs(hdr->vci));
23803 + read_unlock(&vcc_sklist_lock);
23804 + if (!out_vcc) {
23805 +- atomic_inc(&vcc->stats->tx_err);
23806 ++ atomic_inc_unchecked(&vcc->stats->tx_err);
23807 + goto done;
23808 + }
23809 + skb_pull(skb,sizeof(struct atmtcp_hdr));
23810 +@@ -313,8 +313,8 @@ static int atmtcp_c_send(struct atm_vcc
23811 + __net_timestamp(new_skb);
23812 + skb_copy_from_linear_data(skb, skb_put(new_skb, skb->len), skb->len);
23813 + out_vcc->push(out_vcc,new_skb);
23814 +- atomic_inc(&vcc->stats->tx);
23815 +- atomic_inc(&out_vcc->stats->rx);
23816 ++ atomic_inc_unchecked(&vcc->stats->tx);
23817 ++ atomic_inc_unchecked(&out_vcc->stats->rx);
23818 + done:
23819 + if (vcc->pop) vcc->pop(vcc,skb);
23820 + else dev_kfree_skb(skb);
23821 +diff -urNp linux-3.1.1/drivers/atm/eni.c linux-3.1.1/drivers/atm/eni.c
23822 +--- linux-3.1.1/drivers/atm/eni.c 2011-11-11 15:19:27.000000000 -0500
23823 ++++ linux-3.1.1/drivers/atm/eni.c 2011-11-16 18:39:07.000000000 -0500
23824 +@@ -526,7 +526,7 @@ static int rx_aal0(struct atm_vcc *vcc)
23825 + DPRINTK(DEV_LABEL "(itf %d): trashing empty cell\n",
23826 + vcc->dev->number);
23827 + length = 0;
23828 +- atomic_inc(&vcc->stats->rx_err);
23829 ++ atomic_inc_unchecked(&vcc->stats->rx_err);
23830 + }
23831 + else {
23832 + length = ATM_CELL_SIZE-1; /* no HEC */
23833 +@@ -581,7 +581,7 @@ static int rx_aal5(struct atm_vcc *vcc)
23834 + size);
23835 + }
23836 + eff = length = 0;
23837 +- atomic_inc(&vcc->stats->rx_err);
23838 ++ atomic_inc_unchecked(&vcc->stats->rx_err);
23839 + }
23840 + else {
23841 + size = (descr & MID_RED_COUNT)*(ATM_CELL_PAYLOAD >> 2);
23842 +@@ -598,7 +598,7 @@ static int rx_aal5(struct atm_vcc *vcc)
23843 + "(VCI=%d,length=%ld,size=%ld (descr 0x%lx))\n",
23844 + vcc->dev->number,vcc->vci,length,size << 2,descr);
23845 + length = eff = 0;
23846 +- atomic_inc(&vcc->stats->rx_err);
23847 ++ atomic_inc_unchecked(&vcc->stats->rx_err);
23848 + }
23849 + }
23850 + skb = eff ? atm_alloc_charge(vcc,eff << 2,GFP_ATOMIC) : NULL;
23851 +@@ -771,7 +771,7 @@ rx_dequeued++;
23852 + vcc->push(vcc,skb);
23853 + pushed++;
23854 + }
23855 +- atomic_inc(&vcc->stats->rx);
23856 ++ atomic_inc_unchecked(&vcc->stats->rx);
23857 + }
23858 + wake_up(&eni_dev->rx_wait);
23859 + }
23860 +@@ -1228,7 +1228,7 @@ static void dequeue_tx(struct atm_dev *d
23861 + PCI_DMA_TODEVICE);
23862 + if (vcc->pop) vcc->pop(vcc,skb);
23863 + else dev_kfree_skb_irq(skb);
23864 +- atomic_inc(&vcc->stats->tx);
23865 ++ atomic_inc_unchecked(&vcc->stats->tx);
23866 + wake_up(&eni_dev->tx_wait);
23867 + dma_complete++;
23868 + }
23869 +@@ -1568,7 +1568,7 @@ tx_complete++;
23870 + /*--------------------------------- entries ---------------------------------*/
23871 +
23872 +
23873 +-static const char *media_name[] __devinitdata = {
23874 ++static const char *media_name[] __devinitconst = {
23875 + "MMF", "SMF", "MMF", "03?", /* 0- 3 */
23876 + "UTP", "05?", "06?", "07?", /* 4- 7 */
23877 + "TAXI","09?", "10?", "11?", /* 8-11 */
23878 +diff -urNp linux-3.1.1/drivers/atm/firestream.c linux-3.1.1/drivers/atm/firestream.c
23879 +--- linux-3.1.1/drivers/atm/firestream.c 2011-11-11 15:19:27.000000000 -0500
23880 ++++ linux-3.1.1/drivers/atm/firestream.c 2011-11-16 18:39:07.000000000 -0500
23881 +@@ -750,7 +750,7 @@ static void process_txdone_queue (struct
23882 + }
23883 + }
23884 +
23885 +- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
23886 ++ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
23887 +
23888 + fs_dprintk (FS_DEBUG_TXMEM, "i");
23889 + fs_dprintk (FS_DEBUG_ALLOC, "Free t-skb: %p\n", skb);
23890 +@@ -817,7 +817,7 @@ static void process_incoming (struct fs_
23891 + #endif
23892 + skb_put (skb, qe->p1 & 0xffff);
23893 + ATM_SKB(skb)->vcc = atm_vcc;
23894 +- atomic_inc(&atm_vcc->stats->rx);
23895 ++ atomic_inc_unchecked(&atm_vcc->stats->rx);
23896 + __net_timestamp(skb);
23897 + fs_dprintk (FS_DEBUG_ALLOC, "Free rec-skb: %p (pushed)\n", skb);
23898 + atm_vcc->push (atm_vcc, skb);
23899 +@@ -838,12 +838,12 @@ static void process_incoming (struct fs_
23900 + kfree (pe);
23901 + }
23902 + if (atm_vcc)
23903 +- atomic_inc(&atm_vcc->stats->rx_drop);
23904 ++ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
23905 + break;
23906 + case 0x1f: /* Reassembly abort: no buffers. */
23907 + /* Silently increment error counter. */
23908 + if (atm_vcc)
23909 +- atomic_inc(&atm_vcc->stats->rx_drop);
23910 ++ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
23911 + break;
23912 + default: /* Hmm. Haven't written the code to handle the others yet... -- REW */
23913 + printk (KERN_WARNING "Don't know what to do with RX status %x: %s.\n",
23914 +diff -urNp linux-3.1.1/drivers/atm/fore200e.c linux-3.1.1/drivers/atm/fore200e.c
23915 +--- linux-3.1.1/drivers/atm/fore200e.c 2011-11-11 15:19:27.000000000 -0500
23916 ++++ linux-3.1.1/drivers/atm/fore200e.c 2011-11-16 18:39:07.000000000 -0500
23917 +@@ -933,9 +933,9 @@ fore200e_tx_irq(struct fore200e* fore200
23918 + #endif
23919 + /* check error condition */
23920 + if (*entry->status & STATUS_ERROR)
23921 +- atomic_inc(&vcc->stats->tx_err);
23922 ++ atomic_inc_unchecked(&vcc->stats->tx_err);
23923 + else
23924 +- atomic_inc(&vcc->stats->tx);
23925 ++ atomic_inc_unchecked(&vcc->stats->tx);
23926 + }
23927 + }
23928 +
23929 +@@ -1084,7 +1084,7 @@ fore200e_push_rpd(struct fore200e* fore2
23930 + if (skb == NULL) {
23931 + DPRINTK(2, "unable to alloc new skb, rx PDU length = %d\n", pdu_len);
23932 +
23933 +- atomic_inc(&vcc->stats->rx_drop);
23934 ++ atomic_inc_unchecked(&vcc->stats->rx_drop);
23935 + return -ENOMEM;
23936 + }
23937 +
23938 +@@ -1127,14 +1127,14 @@ fore200e_push_rpd(struct fore200e* fore2
23939 +
23940 + dev_kfree_skb_any(skb);
23941 +
23942 +- atomic_inc(&vcc->stats->rx_drop);
23943 ++ atomic_inc_unchecked(&vcc->stats->rx_drop);
23944 + return -ENOMEM;
23945 + }
23946 +
23947 + ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
23948 +
23949 + vcc->push(vcc, skb);
23950 +- atomic_inc(&vcc->stats->rx);
23951 ++ atomic_inc_unchecked(&vcc->stats->rx);
23952 +
23953 + ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
23954 +
23955 +@@ -1212,7 +1212,7 @@ fore200e_rx_irq(struct fore200e* fore200
23956 + DPRINTK(2, "damaged PDU on %d.%d.%d\n",
23957 + fore200e->atm_dev->number,
23958 + entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
23959 +- atomic_inc(&vcc->stats->rx_err);
23960 ++ atomic_inc_unchecked(&vcc->stats->rx_err);
23961 + }
23962 + }
23963 +
23964 +@@ -1657,7 +1657,7 @@ fore200e_send(struct atm_vcc *vcc, struc
23965 + goto retry_here;
23966 + }
23967 +
23968 +- atomic_inc(&vcc->stats->tx_err);
23969 ++ atomic_inc_unchecked(&vcc->stats->tx_err);
23970 +
23971 + fore200e->tx_sat++;
23972 + DPRINTK(2, "tx queue of device %s is saturated, PDU dropped - heartbeat is %08x\n",
23973 +diff -urNp linux-3.1.1/drivers/atm/he.c linux-3.1.1/drivers/atm/he.c
23974 +--- linux-3.1.1/drivers/atm/he.c 2011-11-11 15:19:27.000000000 -0500
23975 ++++ linux-3.1.1/drivers/atm/he.c 2011-11-16 18:39:07.000000000 -0500
23976 +@@ -1709,7 +1709,7 @@ he_service_rbrq(struct he_dev *he_dev, i
23977 +
23978 + if (RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
23979 + hprintk("HBUF_ERR! (cid 0x%x)\n", cid);
23980 +- atomic_inc(&vcc->stats->rx_drop);
23981 ++ atomic_inc_unchecked(&vcc->stats->rx_drop);
23982 + goto return_host_buffers;
23983 + }
23984 +
23985 +@@ -1736,7 +1736,7 @@ he_service_rbrq(struct he_dev *he_dev, i
23986 + RBRQ_LEN_ERR(he_dev->rbrq_head)
23987 + ? "LEN_ERR" : "",
23988 + vcc->vpi, vcc->vci);
23989 +- atomic_inc(&vcc->stats->rx_err);
23990 ++ atomic_inc_unchecked(&vcc->stats->rx_err);
23991 + goto return_host_buffers;
23992 + }
23993 +
23994 +@@ -1788,7 +1788,7 @@ he_service_rbrq(struct he_dev *he_dev, i
23995 + vcc->push(vcc, skb);
23996 + spin_lock(&he_dev->global_lock);
23997 +
23998 +- atomic_inc(&vcc->stats->rx);
23999 ++ atomic_inc_unchecked(&vcc->stats->rx);
24000 +
24001 + return_host_buffers:
24002 + ++pdus_assembled;
24003 +@@ -2114,7 +2114,7 @@ __enqueue_tpd(struct he_dev *he_dev, str
24004 + tpd->vcc->pop(tpd->vcc, tpd->skb);
24005 + else
24006 + dev_kfree_skb_any(tpd->skb);
24007 +- atomic_inc(&tpd->vcc->stats->tx_err);
24008 ++ atomic_inc_unchecked(&tpd->vcc->stats->tx_err);
24009 + }
24010 + pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
24011 + return;
24012 +@@ -2526,7 +2526,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
24013 + vcc->pop(vcc, skb);
24014 + else
24015 + dev_kfree_skb_any(skb);
24016 +- atomic_inc(&vcc->stats->tx_err);
24017 ++ atomic_inc_unchecked(&vcc->stats->tx_err);
24018 + return -EINVAL;
24019 + }
24020 +
24021 +@@ -2537,7 +2537,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
24022 + vcc->pop(vcc, skb);
24023 + else
24024 + dev_kfree_skb_any(skb);
24025 +- atomic_inc(&vcc->stats->tx_err);
24026 ++ atomic_inc_unchecked(&vcc->stats->tx_err);
24027 + return -EINVAL;
24028 + }
24029 + #endif
24030 +@@ -2549,7 +2549,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
24031 + vcc->pop(vcc, skb);
24032 + else
24033 + dev_kfree_skb_any(skb);
24034 +- atomic_inc(&vcc->stats->tx_err);
24035 ++ atomic_inc_unchecked(&vcc->stats->tx_err);
24036 + spin_unlock_irqrestore(&he_dev->global_lock, flags);
24037 + return -ENOMEM;
24038 + }
24039 +@@ -2591,7 +2591,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
24040 + vcc->pop(vcc, skb);
24041 + else
24042 + dev_kfree_skb_any(skb);
24043 +- atomic_inc(&vcc->stats->tx_err);
24044 ++ atomic_inc_unchecked(&vcc->stats->tx_err);
24045 + spin_unlock_irqrestore(&he_dev->global_lock, flags);
24046 + return -ENOMEM;
24047 + }
24048 +@@ -2622,7 +2622,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
24049 + __enqueue_tpd(he_dev, tpd, cid);
24050 + spin_unlock_irqrestore(&he_dev->global_lock, flags);
24051 +
24052 +- atomic_inc(&vcc->stats->tx);
24053 ++ atomic_inc_unchecked(&vcc->stats->tx);
24054 +
24055 + return 0;
24056 + }
24057 +diff -urNp linux-3.1.1/drivers/atm/horizon.c linux-3.1.1/drivers/atm/horizon.c
24058 +--- linux-3.1.1/drivers/atm/horizon.c 2011-11-11 15:19:27.000000000 -0500
24059 ++++ linux-3.1.1/drivers/atm/horizon.c 2011-11-16 18:39:07.000000000 -0500
24060 +@@ -1035,7 +1035,7 @@ static void rx_schedule (hrz_dev * dev,
24061 + {
24062 + struct atm_vcc * vcc = ATM_SKB(skb)->vcc;
24063 + // VC layer stats
24064 +- atomic_inc(&vcc->stats->rx);
24065 ++ atomic_inc_unchecked(&vcc->stats->rx);
24066 + __net_timestamp(skb);
24067 + // end of our responsibility
24068 + vcc->push (vcc, skb);
24069 +@@ -1187,7 +1187,7 @@ static void tx_schedule (hrz_dev * const
24070 + dev->tx_iovec = NULL;
24071 +
24072 + // VC layer stats
24073 +- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
24074 ++ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
24075 +
24076 + // free the skb
24077 + hrz_kfree_skb (skb);
24078 +diff -urNp linux-3.1.1/drivers/atm/idt77252.c linux-3.1.1/drivers/atm/idt77252.c
24079 +--- linux-3.1.1/drivers/atm/idt77252.c 2011-11-11 15:19:27.000000000 -0500
24080 ++++ linux-3.1.1/drivers/atm/idt77252.c 2011-11-16 18:39:07.000000000 -0500
24081 +@@ -812,7 +812,7 @@ drain_scq(struct idt77252_dev *card, str
24082 + else
24083 + dev_kfree_skb(skb);
24084 +
24085 +- atomic_inc(&vcc->stats->tx);
24086 ++ atomic_inc_unchecked(&vcc->stats->tx);
24087 + }
24088 +
24089 + atomic_dec(&scq->used);
24090 +@@ -1075,13 +1075,13 @@ dequeue_rx(struct idt77252_dev *card, st
24091 + if ((sb = dev_alloc_skb(64)) == NULL) {
24092 + printk("%s: Can't allocate buffers for aal0.\n",
24093 + card->name);
24094 +- atomic_add(i, &vcc->stats->rx_drop);
24095 ++ atomic_add_unchecked(i, &vcc->stats->rx_drop);
24096 + break;
24097 + }
24098 + if (!atm_charge(vcc, sb->truesize)) {
24099 + RXPRINTK("%s: atm_charge() dropped aal0 packets.\n",
24100 + card->name);
24101 +- atomic_add(i - 1, &vcc->stats->rx_drop);
24102 ++ atomic_add_unchecked(i - 1, &vcc->stats->rx_drop);
24103 + dev_kfree_skb(sb);
24104 + break;
24105 + }
24106 +@@ -1098,7 +1098,7 @@ dequeue_rx(struct idt77252_dev *card, st
24107 + ATM_SKB(sb)->vcc = vcc;
24108 + __net_timestamp(sb);
24109 + vcc->push(vcc, sb);
24110 +- atomic_inc(&vcc->stats->rx);
24111 ++ atomic_inc_unchecked(&vcc->stats->rx);
24112 +
24113 + cell += ATM_CELL_PAYLOAD;
24114 + }
24115 +@@ -1135,13 +1135,13 @@ dequeue_rx(struct idt77252_dev *card, st
24116 + "(CDC: %08x)\n",
24117 + card->name, len, rpp->len, readl(SAR_REG_CDC));
24118 + recycle_rx_pool_skb(card, rpp);
24119 +- atomic_inc(&vcc->stats->rx_err);
24120 ++ atomic_inc_unchecked(&vcc->stats->rx_err);
24121 + return;
24122 + }
24123 + if (stat & SAR_RSQE_CRC) {
24124 + RXPRINTK("%s: AAL5 CRC error.\n", card->name);
24125 + recycle_rx_pool_skb(card, rpp);
24126 +- atomic_inc(&vcc->stats->rx_err);
24127 ++ atomic_inc_unchecked(&vcc->stats->rx_err);
24128 + return;
24129 + }
24130 + if (skb_queue_len(&rpp->queue) > 1) {
24131 +@@ -1152,7 +1152,7 @@ dequeue_rx(struct idt77252_dev *card, st
24132 + RXPRINTK("%s: Can't alloc RX skb.\n",
24133 + card->name);
24134 + recycle_rx_pool_skb(card, rpp);
24135 +- atomic_inc(&vcc->stats->rx_err);
24136 ++ atomic_inc_unchecked(&vcc->stats->rx_err);
24137 + return;
24138 + }
24139 + if (!atm_charge(vcc, skb->truesize)) {
24140 +@@ -1171,7 +1171,7 @@ dequeue_rx(struct idt77252_dev *card, st
24141 + __net_timestamp(skb);
24142 +
24143 + vcc->push(vcc, skb);
24144 +- atomic_inc(&vcc->stats->rx);
24145 ++ atomic_inc_unchecked(&vcc->stats->rx);
24146 +
24147 + return;
24148 + }
24149 +@@ -1193,7 +1193,7 @@ dequeue_rx(struct idt77252_dev *card, st
24150 + __net_timestamp(skb);
24151 +
24152 + vcc->push(vcc, skb);
24153 +- atomic_inc(&vcc->stats->rx);
24154 ++ atomic_inc_unchecked(&vcc->stats->rx);
24155 +
24156 + if (skb->truesize > SAR_FB_SIZE_3)
24157 + add_rx_skb(card, 3, SAR_FB_SIZE_3, 1);
24158 +@@ -1304,14 +1304,14 @@ idt77252_rx_raw(struct idt77252_dev *car
24159 + if (vcc->qos.aal != ATM_AAL0) {
24160 + RPRINTK("%s: raw cell for non AAL0 vc %u.%u\n",
24161 + card->name, vpi, vci);
24162 +- atomic_inc(&vcc->stats->rx_drop);
24163 ++ atomic_inc_unchecked(&vcc->stats->rx_drop);
24164 + goto drop;
24165 + }
24166 +
24167 + if ((sb = dev_alloc_skb(64)) == NULL) {
24168 + printk("%s: Can't allocate buffers for AAL0.\n",
24169 + card->name);
24170 +- atomic_inc(&vcc->stats->rx_err);
24171 ++ atomic_inc_unchecked(&vcc->stats->rx_err);
24172 + goto drop;
24173 + }
24174 +
24175 +@@ -1330,7 +1330,7 @@ idt77252_rx_raw(struct idt77252_dev *car
24176 + ATM_SKB(sb)->vcc = vcc;
24177 + __net_timestamp(sb);
24178 + vcc->push(vcc, sb);
24179 +- atomic_inc(&vcc->stats->rx);
24180 ++ atomic_inc_unchecked(&vcc->stats->rx);
24181 +
24182 + drop:
24183 + skb_pull(queue, 64);
24184 +@@ -1955,13 +1955,13 @@ idt77252_send_skb(struct atm_vcc *vcc, s
24185 +
24186 + if (vc == NULL) {
24187 + printk("%s: NULL connection in send().\n", card->name);
24188 +- atomic_inc(&vcc->stats->tx_err);
24189 ++ atomic_inc_unchecked(&vcc->stats->tx_err);
24190 + dev_kfree_skb(skb);
24191 + return -EINVAL;
24192 + }
24193 + if (!test_bit(VCF_TX, &vc->flags)) {
24194 + printk("%s: Trying to transmit on a non-tx VC.\n", card->name);
24195 +- atomic_inc(&vcc->stats->tx_err);
24196 ++ atomic_inc_unchecked(&vcc->stats->tx_err);
24197 + dev_kfree_skb(skb);
24198 + return -EINVAL;
24199 + }
24200 +@@ -1973,14 +1973,14 @@ idt77252_send_skb(struct atm_vcc *vcc, s
24201 + break;
24202 + default:
24203 + printk("%s: Unsupported AAL: %d\n", card->name, vcc->qos.aal);
24204 +- atomic_inc(&vcc->stats->tx_err);
24205 ++ atomic_inc_unchecked(&vcc->stats->tx_err);
24206 + dev_kfree_skb(skb);
24207 + return -EINVAL;
24208 + }
24209 +
24210 + if (skb_shinfo(skb)->nr_frags != 0) {
24211 + printk("%s: No scatter-gather yet.\n", card->name);
24212 +- atomic_inc(&vcc->stats->tx_err);
24213 ++ atomic_inc_unchecked(&vcc->stats->tx_err);
24214 + dev_kfree_skb(skb);
24215 + return -EINVAL;
24216 + }
24217 +@@ -1988,7 +1988,7 @@ idt77252_send_skb(struct atm_vcc *vcc, s
24218 +
24219 + err = queue_skb(card, vc, skb, oam);
24220 + if (err) {
24221 +- atomic_inc(&vcc->stats->tx_err);
24222 ++ atomic_inc_unchecked(&vcc->stats->tx_err);
24223 + dev_kfree_skb(skb);
24224 + return err;
24225 + }
24226 +@@ -2011,7 +2011,7 @@ idt77252_send_oam(struct atm_vcc *vcc, v
24227 + skb = dev_alloc_skb(64);
24228 + if (!skb) {
24229 + printk("%s: Out of memory in send_oam().\n", card->name);
24230 +- atomic_inc(&vcc->stats->tx_err);
24231 ++ atomic_inc_unchecked(&vcc->stats->tx_err);
24232 + return -ENOMEM;
24233 + }
24234 + atomic_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
24235 +diff -urNp linux-3.1.1/drivers/atm/iphase.c linux-3.1.1/drivers/atm/iphase.c
24236 +--- linux-3.1.1/drivers/atm/iphase.c 2011-11-11 15:19:27.000000000 -0500
24237 ++++ linux-3.1.1/drivers/atm/iphase.c 2011-11-16 18:39:07.000000000 -0500
24238 +@@ -1121,7 +1121,7 @@ static int rx_pkt(struct atm_dev *dev)
24239 + status = (u_short) (buf_desc_ptr->desc_mode);
24240 + if (status & (RX_CER | RX_PTE | RX_OFL))
24241 + {
24242 +- atomic_inc(&vcc->stats->rx_err);
24243 ++ atomic_inc_unchecked(&vcc->stats->rx_err);
24244 + IF_ERR(printk("IA: bad packet, dropping it");)
24245 + if (status & RX_CER) {
24246 + IF_ERR(printk(" cause: packet CRC error\n");)
24247 +@@ -1144,7 +1144,7 @@ static int rx_pkt(struct atm_dev *dev)
24248 + len = dma_addr - buf_addr;
24249 + if (len > iadev->rx_buf_sz) {
24250 + printk("Over %d bytes sdu received, dropped!!!\n", iadev->rx_buf_sz);
24251 +- atomic_inc(&vcc->stats->rx_err);
24252 ++ atomic_inc_unchecked(&vcc->stats->rx_err);
24253 + goto out_free_desc;
24254 + }
24255 +
24256 +@@ -1294,7 +1294,7 @@ static void rx_dle_intr(struct atm_dev *
24257 + ia_vcc = INPH_IA_VCC(vcc);
24258 + if (ia_vcc == NULL)
24259 + {
24260 +- atomic_inc(&vcc->stats->rx_err);
24261 ++ atomic_inc_unchecked(&vcc->stats->rx_err);
24262 + dev_kfree_skb_any(skb);
24263 + atm_return(vcc, atm_guess_pdu2truesize(len));
24264 + goto INCR_DLE;
24265 +@@ -1306,7 +1306,7 @@ static void rx_dle_intr(struct atm_dev *
24266 + if ((length > iadev->rx_buf_sz) || (length >
24267 + (skb->len - sizeof(struct cpcs_trailer))))
24268 + {
24269 +- atomic_inc(&vcc->stats->rx_err);
24270 ++ atomic_inc_unchecked(&vcc->stats->rx_err);
24271 + IF_ERR(printk("rx_dle_intr: Bad AAL5 trailer %d (skb len %d)",
24272 + length, skb->len);)
24273 + dev_kfree_skb_any(skb);
24274 +@@ -1322,7 +1322,7 @@ static void rx_dle_intr(struct atm_dev *
24275 +
24276 + IF_RX(printk("rx_dle_intr: skb push");)
24277 + vcc->push(vcc,skb);
24278 +- atomic_inc(&vcc->stats->rx);
24279 ++ atomic_inc_unchecked(&vcc->stats->rx);
24280 + iadev->rx_pkt_cnt++;
24281 + }
24282 + INCR_DLE:
24283 +@@ -2802,15 +2802,15 @@ static int ia_ioctl(struct atm_dev *dev,
24284 + {
24285 + struct k_sonet_stats *stats;
24286 + stats = &PRIV(_ia_dev[board])->sonet_stats;
24287 +- printk("section_bip: %d\n", atomic_read(&stats->section_bip));
24288 +- printk("line_bip : %d\n", atomic_read(&stats->line_bip));
24289 +- printk("path_bip : %d\n", atomic_read(&stats->path_bip));
24290 +- printk("line_febe : %d\n", atomic_read(&stats->line_febe));
24291 +- printk("path_febe : %d\n", atomic_read(&stats->path_febe));
24292 +- printk("corr_hcs : %d\n", atomic_read(&stats->corr_hcs));
24293 +- printk("uncorr_hcs : %d\n", atomic_read(&stats->uncorr_hcs));
24294 +- printk("tx_cells : %d\n", atomic_read(&stats->tx_cells));
24295 +- printk("rx_cells : %d\n", atomic_read(&stats->rx_cells));
24296 ++ printk("section_bip: %d\n", atomic_read_unchecked(&stats->section_bip));
24297 ++ printk("line_bip : %d\n", atomic_read_unchecked(&stats->line_bip));
24298 ++ printk("path_bip : %d\n", atomic_read_unchecked(&stats->path_bip));
24299 ++ printk("line_febe : %d\n", atomic_read_unchecked(&stats->line_febe));
24300 ++ printk("path_febe : %d\n", atomic_read_unchecked(&stats->path_febe));
24301 ++ printk("corr_hcs : %d\n", atomic_read_unchecked(&stats->corr_hcs));
24302 ++ printk("uncorr_hcs : %d\n", atomic_read_unchecked(&stats->uncorr_hcs));
24303 ++ printk("tx_cells : %d\n", atomic_read_unchecked(&stats->tx_cells));
24304 ++ printk("rx_cells : %d\n", atomic_read_unchecked(&stats->rx_cells));
24305 + }
24306 + ia_cmds.status = 0;
24307 + break;
24308 +@@ -2915,7 +2915,7 @@ static int ia_pkt_tx (struct atm_vcc *vc
24309 + if ((desc == 0) || (desc > iadev->num_tx_desc))
24310 + {
24311 + IF_ERR(printk(DEV_LABEL "invalid desc for send: %d\n", desc);)
24312 +- atomic_inc(&vcc->stats->tx);
24313 ++ atomic_inc_unchecked(&vcc->stats->tx);
24314 + if (vcc->pop)
24315 + vcc->pop(vcc, skb);
24316 + else
24317 +@@ -3020,14 +3020,14 @@ static int ia_pkt_tx (struct atm_vcc *vc
24318 + ATM_DESC(skb) = vcc->vci;
24319 + skb_queue_tail(&iadev->tx_dma_q, skb);
24320 +
24321 +- atomic_inc(&vcc->stats->tx);
24322 ++ atomic_inc_unchecked(&vcc->stats->tx);
24323 + iadev->tx_pkt_cnt++;
24324 + /* Increment transaction counter */
24325 + writel(2, iadev->dma+IPHASE5575_TX_COUNTER);
24326 +
24327 + #if 0
24328 + /* add flow control logic */
24329 +- if (atomic_read(&vcc->stats->tx) % 20 == 0) {
24330 ++ if (atomic_read_unchecked(&vcc->stats->tx) % 20 == 0) {
24331 + if (iavcc->vc_desc_cnt > 10) {
24332 + vcc->tx_quota = vcc->tx_quota * 3 / 4;
24333 + printk("Tx1: vcc->tx_quota = %d \n", (u32)vcc->tx_quota );
24334 +diff -urNp linux-3.1.1/drivers/atm/lanai.c linux-3.1.1/drivers/atm/lanai.c
24335 +--- linux-3.1.1/drivers/atm/lanai.c 2011-11-11 15:19:27.000000000 -0500
24336 ++++ linux-3.1.1/drivers/atm/lanai.c 2011-11-16 18:39:07.000000000 -0500
24337 +@@ -1303,7 +1303,7 @@ static void lanai_send_one_aal5(struct l
24338 + vcc_tx_add_aal5_trailer(lvcc, skb->len, 0, 0);
24339 + lanai_endtx(lanai, lvcc);
24340 + lanai_free_skb(lvcc->tx.atmvcc, skb);
24341 +- atomic_inc(&lvcc->tx.atmvcc->stats->tx);
24342 ++ atomic_inc_unchecked(&lvcc->tx.atmvcc->stats->tx);
24343 + }
24344 +
24345 + /* Try to fill the buffer - don't call unless there is backlog */
24346 +@@ -1426,7 +1426,7 @@ static void vcc_rx_aal5(struct lanai_vcc
24347 + ATM_SKB(skb)->vcc = lvcc->rx.atmvcc;
24348 + __net_timestamp(skb);
24349 + lvcc->rx.atmvcc->push(lvcc->rx.atmvcc, skb);
24350 +- atomic_inc(&lvcc->rx.atmvcc->stats->rx);
24351 ++ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx);
24352 + out:
24353 + lvcc->rx.buf.ptr = end;
24354 + cardvcc_write(lvcc, endptr, vcc_rxreadptr);
24355 +@@ -1668,7 +1668,7 @@ static int handle_service(struct lanai_d
24356 + DPRINTK("(itf %d) got RX service entry 0x%X for non-AAL5 "
24357 + "vcc %d\n", lanai->number, (unsigned int) s, vci);
24358 + lanai->stats.service_rxnotaal5++;
24359 +- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
24360 ++ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
24361 + return 0;
24362 + }
24363 + if (likely(!(s & (SERVICE_TRASH | SERVICE_STREAM | SERVICE_CRCERR)))) {
24364 +@@ -1680,7 +1680,7 @@ static int handle_service(struct lanai_d
24365 + int bytes;
24366 + read_unlock(&vcc_sklist_lock);
24367 + DPRINTK("got trashed rx pdu on vci %d\n", vci);
24368 +- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
24369 ++ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
24370 + lvcc->stats.x.aal5.service_trash++;
24371 + bytes = (SERVICE_GET_END(s) * 16) -
24372 + (((unsigned long) lvcc->rx.buf.ptr) -
24373 +@@ -1692,7 +1692,7 @@ static int handle_service(struct lanai_d
24374 + }
24375 + if (s & SERVICE_STREAM) {
24376 + read_unlock(&vcc_sklist_lock);
24377 +- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
24378 ++ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
24379 + lvcc->stats.x.aal5.service_stream++;
24380 + printk(KERN_ERR DEV_LABEL "(itf %d): Got AAL5 stream "
24381 + "PDU on VCI %d!\n", lanai->number, vci);
24382 +@@ -1700,7 +1700,7 @@ static int handle_service(struct lanai_d
24383 + return 0;
24384 + }
24385 + DPRINTK("got rx crc error on vci %d\n", vci);
24386 +- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
24387 ++ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
24388 + lvcc->stats.x.aal5.service_rxcrc++;
24389 + lvcc->rx.buf.ptr = &lvcc->rx.buf.start[SERVICE_GET_END(s) * 4];
24390 + cardvcc_write(lvcc, SERVICE_GET_END(s), vcc_rxreadptr);
24391 +diff -urNp linux-3.1.1/drivers/atm/nicstar.c linux-3.1.1/drivers/atm/nicstar.c
24392 +--- linux-3.1.1/drivers/atm/nicstar.c 2011-11-11 15:19:27.000000000 -0500
24393 ++++ linux-3.1.1/drivers/atm/nicstar.c 2011-11-16 18:39:07.000000000 -0500
24394 +@@ -1654,7 +1654,7 @@ static int ns_send(struct atm_vcc *vcc,
24395 + if ((vc = (vc_map *) vcc->dev_data) == NULL) {
24396 + printk("nicstar%d: vcc->dev_data == NULL on ns_send().\n",
24397 + card->index);
24398 +- atomic_inc(&vcc->stats->tx_err);
24399 ++ atomic_inc_unchecked(&vcc->stats->tx_err);
24400 + dev_kfree_skb_any(skb);
24401 + return -EINVAL;
24402 + }
24403 +@@ -1662,7 +1662,7 @@ static int ns_send(struct atm_vcc *vcc,
24404 + if (!vc->tx) {
24405 + printk("nicstar%d: Trying to transmit on a non-tx VC.\n",
24406 + card->index);
24407 +- atomic_inc(&vcc->stats->tx_err);
24408 ++ atomic_inc_unchecked(&vcc->stats->tx_err);
24409 + dev_kfree_skb_any(skb);
24410 + return -EINVAL;
24411 + }
24412 +@@ -1670,14 +1670,14 @@ static int ns_send(struct atm_vcc *vcc,
24413 + if (vcc->qos.aal != ATM_AAL5 && vcc->qos.aal != ATM_AAL0) {
24414 + printk("nicstar%d: Only AAL0 and AAL5 are supported.\n",
24415 + card->index);
24416 +- atomic_inc(&vcc->stats->tx_err);
24417 ++ atomic_inc_unchecked(&vcc->stats->tx_err);
24418 + dev_kfree_skb_any(skb);
24419 + return -EINVAL;
24420 + }
24421 +
24422 + if (skb_shinfo(skb)->nr_frags != 0) {
24423 + printk("nicstar%d: No scatter-gather yet.\n", card->index);
24424 +- atomic_inc(&vcc->stats->tx_err);
24425 ++ atomic_inc_unchecked(&vcc->stats->tx_err);
24426 + dev_kfree_skb_any(skb);
24427 + return -EINVAL;
24428 + }
24429 +@@ -1725,11 +1725,11 @@ static int ns_send(struct atm_vcc *vcc,
24430 + }
24431 +
24432 + if (push_scqe(card, vc, scq, &scqe, skb) != 0) {
24433 +- atomic_inc(&vcc->stats->tx_err);
24434 ++ atomic_inc_unchecked(&vcc->stats->tx_err);
24435 + dev_kfree_skb_any(skb);
24436 + return -EIO;
24437 + }
24438 +- atomic_inc(&vcc->stats->tx);
24439 ++ atomic_inc_unchecked(&vcc->stats->tx);
24440 +
24441 + return 0;
24442 + }
24443 +@@ -2046,14 +2046,14 @@ static void dequeue_rx(ns_dev * card, ns
24444 + printk
24445 + ("nicstar%d: Can't allocate buffers for aal0.\n",
24446 + card->index);
24447 +- atomic_add(i, &vcc->stats->rx_drop);
24448 ++ atomic_add_unchecked(i, &vcc->stats->rx_drop);
24449 + break;
24450 + }
24451 + if (!atm_charge(vcc, sb->truesize)) {
24452 + RXPRINTK
24453 + ("nicstar%d: atm_charge() dropped aal0 packets.\n",
24454 + card->index);
24455 +- atomic_add(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */
24456 ++ atomic_add_unchecked(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */
24457 + dev_kfree_skb_any(sb);
24458 + break;
24459 + }
24460 +@@ -2068,7 +2068,7 @@ static void dequeue_rx(ns_dev * card, ns
24461 + ATM_SKB(sb)->vcc = vcc;
24462 + __net_timestamp(sb);
24463 + vcc->push(vcc, sb);
24464 +- atomic_inc(&vcc->stats->rx);
24465 ++ atomic_inc_unchecked(&vcc->stats->rx);
24466 + cell += ATM_CELL_PAYLOAD;
24467 + }
24468 +
24469 +@@ -2085,7 +2085,7 @@ static void dequeue_rx(ns_dev * card, ns
24470 + if (iovb == NULL) {
24471 + printk("nicstar%d: Out of iovec buffers.\n",
24472 + card->index);
24473 +- atomic_inc(&vcc->stats->rx_drop);
24474 ++ atomic_inc_unchecked(&vcc->stats->rx_drop);
24475 + recycle_rx_buf(card, skb);
24476 + return;
24477 + }
24478 +@@ -2109,7 +2109,7 @@ static void dequeue_rx(ns_dev * card, ns
24479 + small or large buffer itself. */
24480 + } else if (NS_PRV_IOVCNT(iovb) >= NS_MAX_IOVECS) {
24481 + printk("nicstar%d: received too big AAL5 SDU.\n", card->index);
24482 +- atomic_inc(&vcc->stats->rx_err);
24483 ++ atomic_inc_unchecked(&vcc->stats->rx_err);
24484 + recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
24485 + NS_MAX_IOVECS);
24486 + NS_PRV_IOVCNT(iovb) = 0;
24487 +@@ -2129,7 +2129,7 @@ static void dequeue_rx(ns_dev * card, ns
24488 + ("nicstar%d: Expected a small buffer, and this is not one.\n",
24489 + card->index);
24490 + which_list(card, skb);
24491 +- atomic_inc(&vcc->stats->rx_err);
24492 ++ atomic_inc_unchecked(&vcc->stats->rx_err);
24493 + recycle_rx_buf(card, skb);
24494 + vc->rx_iov = NULL;
24495 + recycle_iov_buf(card, iovb);
24496 +@@ -2142,7 +2142,7 @@ static void dequeue_rx(ns_dev * card, ns
24497 + ("nicstar%d: Expected a large buffer, and this is not one.\n",
24498 + card->index);
24499 + which_list(card, skb);
24500 +- atomic_inc(&vcc->stats->rx_err);
24501 ++ atomic_inc_unchecked(&vcc->stats->rx_err);
24502 + recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
24503 + NS_PRV_IOVCNT(iovb));
24504 + vc->rx_iov = NULL;
24505 +@@ -2165,7 +2165,7 @@ static void dequeue_rx(ns_dev * card, ns
24506 + printk(" - PDU size mismatch.\n");
24507 + else
24508 + printk(".\n");
24509 +- atomic_inc(&vcc->stats->rx_err);
24510 ++ atomic_inc_unchecked(&vcc->stats->rx_err);
24511 + recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
24512 + NS_PRV_IOVCNT(iovb));
24513 + vc->rx_iov = NULL;
24514 +@@ -2179,7 +2179,7 @@ static void dequeue_rx(ns_dev * card, ns
24515 + /* skb points to a small buffer */
24516 + if (!atm_charge(vcc, skb->truesize)) {
24517 + push_rxbufs(card, skb);
24518 +- atomic_inc(&vcc->stats->rx_drop);
24519 ++ atomic_inc_unchecked(&vcc->stats->rx_drop);
24520 + } else {
24521 + skb_put(skb, len);
24522 + dequeue_sm_buf(card, skb);
24523 +@@ -2189,7 +2189,7 @@ static void dequeue_rx(ns_dev * card, ns
24524 + ATM_SKB(skb)->vcc = vcc;
24525 + __net_timestamp(skb);
24526 + vcc->push(vcc, skb);
24527 +- atomic_inc(&vcc->stats->rx);
24528 ++ atomic_inc_unchecked(&vcc->stats->rx);
24529 + }
24530 + } else if (NS_PRV_IOVCNT(iovb) == 2) { /* One small plus one large buffer */
24531 + struct sk_buff *sb;
24532 +@@ -2200,7 +2200,7 @@ static void dequeue_rx(ns_dev * card, ns
24533 + if (len <= NS_SMBUFSIZE) {
24534 + if (!atm_charge(vcc, sb->truesize)) {
24535 + push_rxbufs(card, sb);
24536 +- atomic_inc(&vcc->stats->rx_drop);
24537 ++ atomic_inc_unchecked(&vcc->stats->rx_drop);
24538 + } else {
24539 + skb_put(sb, len);
24540 + dequeue_sm_buf(card, sb);
24541 +@@ -2210,7 +2210,7 @@ static void dequeue_rx(ns_dev * card, ns
24542 + ATM_SKB(sb)->vcc = vcc;
24543 + __net_timestamp(sb);
24544 + vcc->push(vcc, sb);
24545 +- atomic_inc(&vcc->stats->rx);
24546 ++ atomic_inc_unchecked(&vcc->stats->rx);
24547 + }
24548 +
24549 + push_rxbufs(card, skb);
24550 +@@ -2219,7 +2219,7 @@ static void dequeue_rx(ns_dev * card, ns
24551 +
24552 + if (!atm_charge(vcc, skb->truesize)) {
24553 + push_rxbufs(card, skb);
24554 +- atomic_inc(&vcc->stats->rx_drop);
24555 ++ atomic_inc_unchecked(&vcc->stats->rx_drop);
24556 + } else {
24557 + dequeue_lg_buf(card, skb);
24558 + #ifdef NS_USE_DESTRUCTORS
24559 +@@ -2232,7 +2232,7 @@ static void dequeue_rx(ns_dev * card, ns
24560 + ATM_SKB(skb)->vcc = vcc;
24561 + __net_timestamp(skb);
24562 + vcc->push(vcc, skb);
24563 +- atomic_inc(&vcc->stats->rx);
24564 ++ atomic_inc_unchecked(&vcc->stats->rx);
24565 + }
24566 +
24567 + push_rxbufs(card, sb);
24568 +@@ -2253,7 +2253,7 @@ static void dequeue_rx(ns_dev * card, ns
24569 + printk
24570 + ("nicstar%d: Out of huge buffers.\n",
24571 + card->index);
24572 +- atomic_inc(&vcc->stats->rx_drop);
24573 ++ atomic_inc_unchecked(&vcc->stats->rx_drop);
24574 + recycle_iovec_rx_bufs(card,
24575 + (struct iovec *)
24576 + iovb->data,
24577 +@@ -2304,7 +2304,7 @@ static void dequeue_rx(ns_dev * card, ns
24578 + card->hbpool.count++;
24579 + } else
24580 + dev_kfree_skb_any(hb);
24581 +- atomic_inc(&vcc->stats->rx_drop);
24582 ++ atomic_inc_unchecked(&vcc->stats->rx_drop);
24583 + } else {
24584 + /* Copy the small buffer to the huge buffer */
24585 + sb = (struct sk_buff *)iov->iov_base;
24586 +@@ -2341,7 +2341,7 @@ static void dequeue_rx(ns_dev * card, ns
24587 + #endif /* NS_USE_DESTRUCTORS */
24588 + __net_timestamp(hb);
24589 + vcc->push(vcc, hb);
24590 +- atomic_inc(&vcc->stats->rx);
24591 ++ atomic_inc_unchecked(&vcc->stats->rx);
24592 + }
24593 + }
24594 +
24595 +diff -urNp linux-3.1.1/drivers/atm/solos-pci.c linux-3.1.1/drivers/atm/solos-pci.c
24596 +--- linux-3.1.1/drivers/atm/solos-pci.c 2011-11-11 15:19:27.000000000 -0500
24597 ++++ linux-3.1.1/drivers/atm/solos-pci.c 2011-11-16 18:40:10.000000000 -0500
24598 +@@ -714,7 +714,7 @@ void solos_bh(unsigned long card_arg)
24599 + }
24600 + atm_charge(vcc, skb->truesize);
24601 + vcc->push(vcc, skb);
24602 +- atomic_inc(&vcc->stats->rx);
24603 ++ atomic_inc_unchecked(&vcc->stats->rx);
24604 + break;
24605 +
24606 + case PKT_STATUS:
24607 +@@ -899,6 +899,8 @@ static int print_buffer(struct sk_buff *
24608 + char msg[500];
24609 + char item[10];
24610 +
24611 ++ pax_track_stack();
24612 ++
24613 + len = buf->len;
24614 + for (i = 0; i < len; i++){
24615 + if(i % 8 == 0)
24616 +@@ -1008,7 +1010,7 @@ static uint32_t fpga_tx(struct solos_car
24617 + vcc = SKB_CB(oldskb)->vcc;
24618 +
24619 + if (vcc) {
24620 +- atomic_inc(&vcc->stats->tx);
24621 ++ atomic_inc_unchecked(&vcc->stats->tx);
24622 + solos_pop(vcc, oldskb);
24623 + } else
24624 + dev_kfree_skb_irq(oldskb);
24625 +diff -urNp linux-3.1.1/drivers/atm/suni.c linux-3.1.1/drivers/atm/suni.c
24626 +--- linux-3.1.1/drivers/atm/suni.c 2011-11-11 15:19:27.000000000 -0500
24627 ++++ linux-3.1.1/drivers/atm/suni.c 2011-11-16 18:39:07.000000000 -0500
24628 +@@ -50,8 +50,8 @@ static DEFINE_SPINLOCK(sunis_lock);
24629 +
24630 +
24631 + #define ADD_LIMITED(s,v) \
24632 +- atomic_add((v),&stats->s); \
24633 +- if (atomic_read(&stats->s) < 0) atomic_set(&stats->s,INT_MAX);
24634 ++ atomic_add_unchecked((v),&stats->s); \
24635 ++ if (atomic_read_unchecked(&stats->s) < 0) atomic_set_unchecked(&stats->s,INT_MAX);
24636 +
24637 +
24638 + static void suni_hz(unsigned long from_timer)
24639 +diff -urNp linux-3.1.1/drivers/atm/uPD98402.c linux-3.1.1/drivers/atm/uPD98402.c
24640 +--- linux-3.1.1/drivers/atm/uPD98402.c 2011-11-11 15:19:27.000000000 -0500
24641 ++++ linux-3.1.1/drivers/atm/uPD98402.c 2011-11-16 18:39:07.000000000 -0500
24642 +@@ -42,7 +42,7 @@ static int fetch_stats(struct atm_dev *d
24643 + struct sonet_stats tmp;
24644 + int error = 0;
24645 +
24646 +- atomic_add(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
24647 ++ atomic_add_unchecked(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
24648 + sonet_copy_stats(&PRIV(dev)->sonet_stats,&tmp);
24649 + if (arg) error = copy_to_user(arg,&tmp,sizeof(tmp));
24650 + if (zero && !error) {
24651 +@@ -161,9 +161,9 @@ static int uPD98402_ioctl(struct atm_dev
24652 +
24653 +
24654 + #define ADD_LIMITED(s,v) \
24655 +- { atomic_add(GET(v),&PRIV(dev)->sonet_stats.s); \
24656 +- if (atomic_read(&PRIV(dev)->sonet_stats.s) < 0) \
24657 +- atomic_set(&PRIV(dev)->sonet_stats.s,INT_MAX); }
24658 ++ { atomic_add_unchecked(GET(v),&PRIV(dev)->sonet_stats.s); \
24659 ++ if (atomic_read_unchecked(&PRIV(dev)->sonet_stats.s) < 0) \
24660 ++ atomic_set_unchecked(&PRIV(dev)->sonet_stats.s,INT_MAX); }
24661 +
24662 +
24663 + static void stat_event(struct atm_dev *dev)
24664 +@@ -194,7 +194,7 @@ static void uPD98402_int(struct atm_dev
24665 + if (reason & uPD98402_INT_PFM) stat_event(dev);
24666 + if (reason & uPD98402_INT_PCO) {
24667 + (void) GET(PCOCR); /* clear interrupt cause */
24668 +- atomic_add(GET(HECCT),
24669 ++ atomic_add_unchecked(GET(HECCT),
24670 + &PRIV(dev)->sonet_stats.uncorr_hcs);
24671 + }
24672 + if ((reason & uPD98402_INT_RFO) &&
24673 +@@ -222,9 +222,9 @@ static int uPD98402_start(struct atm_dev
24674 + PUT(~(uPD98402_INT_PFM | uPD98402_INT_ALM | uPD98402_INT_RFO |
24675 + uPD98402_INT_LOS),PIMR); /* enable them */
24676 + (void) fetch_stats(dev,NULL,1); /* clear kernel counters */
24677 +- atomic_set(&PRIV(dev)->sonet_stats.corr_hcs,-1);
24678 +- atomic_set(&PRIV(dev)->sonet_stats.tx_cells,-1);
24679 +- atomic_set(&PRIV(dev)->sonet_stats.rx_cells,-1);
24680 ++ atomic_set_unchecked(&PRIV(dev)->sonet_stats.corr_hcs,-1);
24681 ++ atomic_set_unchecked(&PRIV(dev)->sonet_stats.tx_cells,-1);
24682 ++ atomic_set_unchecked(&PRIV(dev)->sonet_stats.rx_cells,-1);
24683 + return 0;
24684 + }
24685 +
24686 +diff -urNp linux-3.1.1/drivers/atm/zatm.c linux-3.1.1/drivers/atm/zatm.c
24687 +--- linux-3.1.1/drivers/atm/zatm.c 2011-11-11 15:19:27.000000000 -0500
24688 ++++ linux-3.1.1/drivers/atm/zatm.c 2011-11-16 18:39:07.000000000 -0500
24689 +@@ -460,7 +460,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy
24690 + }
24691 + if (!size) {
24692 + dev_kfree_skb_irq(skb);
24693 +- if (vcc) atomic_inc(&vcc->stats->rx_err);
24694 ++ if (vcc) atomic_inc_unchecked(&vcc->stats->rx_err);
24695 + continue;
24696 + }
24697 + if (!atm_charge(vcc,skb->truesize)) {
24698 +@@ -470,7 +470,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy
24699 + skb->len = size;
24700 + ATM_SKB(skb)->vcc = vcc;
24701 + vcc->push(vcc,skb);
24702 +- atomic_inc(&vcc->stats->rx);
24703 ++ atomic_inc_unchecked(&vcc->stats->rx);
24704 + }
24705 + zout(pos & 0xffff,MTA(mbx));
24706 + #if 0 /* probably a stupid idea */
24707 +@@ -734,7 +734,7 @@ if (*ZATM_PRV_DSC(skb) != (uPD98401_TXPD
24708 + skb_queue_head(&zatm_vcc->backlog,skb);
24709 + break;
24710 + }
24711 +- atomic_inc(&vcc->stats->tx);
24712 ++ atomic_inc_unchecked(&vcc->stats->tx);
24713 + wake_up(&zatm_vcc->tx_wait);
24714 + }
24715 +
24716 +diff -urNp linux-3.1.1/drivers/base/devtmpfs.c linux-3.1.1/drivers/base/devtmpfs.c
24717 +--- linux-3.1.1/drivers/base/devtmpfs.c 2011-11-11 15:19:27.000000000 -0500
24718 ++++ linux-3.1.1/drivers/base/devtmpfs.c 2011-11-16 18:39:07.000000000 -0500
24719 +@@ -368,7 +368,7 @@ int devtmpfs_mount(const char *mntdir)
24720 + if (!thread)
24721 + return 0;
24722 +
24723 +- err = sys_mount("devtmpfs", (char *)mntdir, "devtmpfs", MS_SILENT, NULL);
24724 ++ err = sys_mount((char __force_user *)"devtmpfs", (char __force_user *)mntdir, (char __force_user *)"devtmpfs", MS_SILENT, NULL);
24725 + if (err)
24726 + printk(KERN_INFO "devtmpfs: error mounting %i\n", err);
24727 + else
24728 +diff -urNp linux-3.1.1/drivers/base/power/wakeup.c linux-3.1.1/drivers/base/power/wakeup.c
24729 +--- linux-3.1.1/drivers/base/power/wakeup.c 2011-11-11 15:19:27.000000000 -0500
24730 ++++ linux-3.1.1/drivers/base/power/wakeup.c 2011-11-16 18:39:07.000000000 -0500
24731 +@@ -29,14 +29,14 @@ bool events_check_enabled;
24732 + * They need to be modified together atomically, so it's better to use one
24733 + * atomic variable to hold them both.
24734 + */
24735 +-static atomic_t combined_event_count = ATOMIC_INIT(0);
24736 ++static atomic_unchecked_t combined_event_count = ATOMIC_INIT(0);
24737 +
24738 + #define IN_PROGRESS_BITS (sizeof(int) * 4)
24739 + #define MAX_IN_PROGRESS ((1 << IN_PROGRESS_BITS) - 1)
24740 +
24741 + static void split_counters(unsigned int *cnt, unsigned int *inpr)
24742 + {
24743 +- unsigned int comb = atomic_read(&combined_event_count);
24744 ++ unsigned int comb = atomic_read_unchecked(&combined_event_count);
24745 +
24746 + *cnt = (comb >> IN_PROGRESS_BITS);
24747 + *inpr = comb & MAX_IN_PROGRESS;
24748 +@@ -350,7 +350,7 @@ static void wakeup_source_activate(struc
24749 + ws->last_time = ktime_get();
24750 +
24751 + /* Increment the counter of events in progress. */
24752 +- atomic_inc(&combined_event_count);
24753 ++ atomic_inc_unchecked(&combined_event_count);
24754 + }
24755 +
24756 + /**
24757 +@@ -440,7 +440,7 @@ static void wakeup_source_deactivate(str
24758 + * Increment the counter of registered wakeup events and decrement the
24759 + * couter of wakeup events in progress simultaneously.
24760 + */
24761 +- atomic_add(MAX_IN_PROGRESS, &combined_event_count);
24762 ++ atomic_add_unchecked(MAX_IN_PROGRESS, &combined_event_count);
24763 + }
24764 +
24765 + /**
24766 +diff -urNp linux-3.1.1/drivers/block/cciss.c linux-3.1.1/drivers/block/cciss.c
24767 +--- linux-3.1.1/drivers/block/cciss.c 2011-11-11 15:19:27.000000000 -0500
24768 ++++ linux-3.1.1/drivers/block/cciss.c 2011-11-16 18:40:10.000000000 -0500
24769 +@@ -1179,6 +1179,8 @@ static int cciss_ioctl32_passthru(struct
24770 + int err;
24771 + u32 cp;
24772 +
24773 ++ memset(&arg64, 0, sizeof(arg64));
24774 ++
24775 + err = 0;
24776 + err |=
24777 + copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
24778 +@@ -2986,7 +2988,7 @@ static void start_io(ctlr_info_t *h)
24779 + while (!list_empty(&h->reqQ)) {
24780 + c = list_entry(h->reqQ.next, CommandList_struct, list);
24781 + /* can't do anything if fifo is full */
24782 +- if ((h->access.fifo_full(h))) {
24783 ++ if ((h->access->fifo_full(h))) {
24784 + dev_warn(&h->pdev->dev, "fifo full\n");
24785 + break;
24786 + }
24787 +@@ -2996,7 +2998,7 @@ static void start_io(ctlr_info_t *h)
24788 + h->Qdepth--;
24789 +
24790 + /* Tell the controller execute command */
24791 +- h->access.submit_command(h, c);
24792 ++ h->access->submit_command(h, c);
24793 +
24794 + /* Put job onto the completed Q */
24795 + addQ(&h->cmpQ, c);
24796 +@@ -3422,17 +3424,17 @@ startio:
24797 +
24798 + static inline unsigned long get_next_completion(ctlr_info_t *h)
24799 + {
24800 +- return h->access.command_completed(h);
24801 ++ return h->access->command_completed(h);
24802 + }
24803 +
24804 + static inline int interrupt_pending(ctlr_info_t *h)
24805 + {
24806 +- return h->access.intr_pending(h);
24807 ++ return h->access->intr_pending(h);
24808 + }
24809 +
24810 + static inline long interrupt_not_for_us(ctlr_info_t *h)
24811 + {
24812 +- return ((h->access.intr_pending(h) == 0) ||
24813 ++ return ((h->access->intr_pending(h) == 0) ||
24814 + (h->interrupts_enabled == 0));
24815 + }
24816 +
24817 +@@ -3465,7 +3467,7 @@ static inline u32 next_command(ctlr_info
24818 + u32 a;
24819 +
24820 + if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
24821 +- return h->access.command_completed(h);
24822 ++ return h->access->command_completed(h);
24823 +
24824 + if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
24825 + a = *(h->reply_pool_head); /* Next cmd in ring buffer */
24826 +@@ -4020,7 +4022,7 @@ static void __devinit cciss_put_controll
24827 + trans_support & CFGTBL_Trans_use_short_tags);
24828 +
24829 + /* Change the access methods to the performant access methods */
24830 +- h->access = SA5_performant_access;
24831 ++ h->access = &SA5_performant_access;
24832 + h->transMethod = CFGTBL_Trans_Performant;
24833 +
24834 + return;
24835 +@@ -4292,7 +4294,7 @@ static int __devinit cciss_pci_init(ctlr
24836 + if (prod_index < 0)
24837 + return -ENODEV;
24838 + h->product_name = products[prod_index].product_name;
24839 +- h->access = *(products[prod_index].access);
24840 ++ h->access = products[prod_index].access;
24841 +
24842 + if (cciss_board_disabled(h)) {
24843 + dev_warn(&h->pdev->dev, "controller appears to be disabled\n");
24844 +@@ -5009,7 +5011,7 @@ reinit_after_soft_reset:
24845 + }
24846 +
24847 + /* make sure the board interrupts are off */
24848 +- h->access.set_intr_mask(h, CCISS_INTR_OFF);
24849 ++ h->access->set_intr_mask(h, CCISS_INTR_OFF);
24850 + rc = cciss_request_irq(h, do_cciss_msix_intr, do_cciss_intx);
24851 + if (rc)
24852 + goto clean2;
24853 +@@ -5061,7 +5063,7 @@ reinit_after_soft_reset:
24854 + * fake ones to scoop up any residual completions.
24855 + */
24856 + spin_lock_irqsave(&h->lock, flags);
24857 +- h->access.set_intr_mask(h, CCISS_INTR_OFF);
24858 ++ h->access->set_intr_mask(h, CCISS_INTR_OFF);
24859 + spin_unlock_irqrestore(&h->lock, flags);
24860 + free_irq(h->intr[PERF_MODE_INT], h);
24861 + rc = cciss_request_irq(h, cciss_msix_discard_completions,
24862 +@@ -5081,9 +5083,9 @@ reinit_after_soft_reset:
24863 + dev_info(&h->pdev->dev, "Board READY.\n");
24864 + dev_info(&h->pdev->dev,
24865 + "Waiting for stale completions to drain.\n");
24866 +- h->access.set_intr_mask(h, CCISS_INTR_ON);
24867 ++ h->access->set_intr_mask(h, CCISS_INTR_ON);
24868 + msleep(10000);
24869 +- h->access.set_intr_mask(h, CCISS_INTR_OFF);
24870 ++ h->access->set_intr_mask(h, CCISS_INTR_OFF);
24871 +
24872 + rc = controller_reset_failed(h->cfgtable);
24873 + if (rc)
24874 +@@ -5106,7 +5108,7 @@ reinit_after_soft_reset:
24875 + cciss_scsi_setup(h);
24876 +
24877 + /* Turn the interrupts on so we can service requests */
24878 +- h->access.set_intr_mask(h, CCISS_INTR_ON);
24879 ++ h->access->set_intr_mask(h, CCISS_INTR_ON);
24880 +
24881 + /* Get the firmware version */
24882 + inq_buff = kzalloc(sizeof(InquiryData_struct), GFP_KERNEL);
24883 +@@ -5178,7 +5180,7 @@ static void cciss_shutdown(struct pci_de
24884 + kfree(flush_buf);
24885 + if (return_code != IO_OK)
24886 + dev_warn(&h->pdev->dev, "Error flushing cache\n");
24887 +- h->access.set_intr_mask(h, CCISS_INTR_OFF);
24888 ++ h->access->set_intr_mask(h, CCISS_INTR_OFF);
24889 + free_irq(h->intr[PERF_MODE_INT], h);
24890 + }
24891 +
24892 +diff -urNp linux-3.1.1/drivers/block/cciss.h linux-3.1.1/drivers/block/cciss.h
24893 +--- linux-3.1.1/drivers/block/cciss.h 2011-11-11 15:19:27.000000000 -0500
24894 ++++ linux-3.1.1/drivers/block/cciss.h 2011-11-16 18:39:07.000000000 -0500
24895 +@@ -100,7 +100,7 @@ struct ctlr_info
24896 + /* information about each logical volume */
24897 + drive_info_struct *drv[CISS_MAX_LUN];
24898 +
24899 +- struct access_method access;
24900 ++ struct access_method *access;
24901 +
24902 + /* queue and queue Info */
24903 + struct list_head reqQ;
24904 +diff -urNp linux-3.1.1/drivers/block/cpqarray.c linux-3.1.1/drivers/block/cpqarray.c
24905 +--- linux-3.1.1/drivers/block/cpqarray.c 2011-11-11 15:19:27.000000000 -0500
24906 ++++ linux-3.1.1/drivers/block/cpqarray.c 2011-11-16 18:40:10.000000000 -0500
24907 +@@ -404,7 +404,7 @@ static int __devinit cpqarray_register_c
24908 + if (register_blkdev(COMPAQ_SMART2_MAJOR+i, hba[i]->devname)) {
24909 + goto Enomem4;
24910 + }
24911 +- hba[i]->access.set_intr_mask(hba[i], 0);
24912 ++ hba[i]->access->set_intr_mask(hba[i], 0);
24913 + if (request_irq(hba[i]->intr, do_ida_intr,
24914 + IRQF_DISABLED|IRQF_SHARED, hba[i]->devname, hba[i]))
24915 + {
24916 +@@ -459,7 +459,7 @@ static int __devinit cpqarray_register_c
24917 + add_timer(&hba[i]->timer);
24918 +
24919 + /* Enable IRQ now that spinlock and rate limit timer are set up */
24920 +- hba[i]->access.set_intr_mask(hba[i], FIFO_NOT_EMPTY);
24921 ++ hba[i]->access->set_intr_mask(hba[i], FIFO_NOT_EMPTY);
24922 +
24923 + for(j=0; j<NWD; j++) {
24924 + struct gendisk *disk = ida_gendisk[i][j];
24925 +@@ -694,7 +694,7 @@ DBGINFO(
24926 + for(i=0; i<NR_PRODUCTS; i++) {
24927 + if (board_id == products[i].board_id) {
24928 + c->product_name = products[i].product_name;
24929 +- c->access = *(products[i].access);
24930 ++ c->access = products[i].access;
24931 + break;
24932 + }
24933 + }
24934 +@@ -792,7 +792,7 @@ static int __devinit cpqarray_eisa_detec
24935 + hba[ctlr]->intr = intr;
24936 + sprintf(hba[ctlr]->devname, "ida%d", nr_ctlr);
24937 + hba[ctlr]->product_name = products[j].product_name;
24938 +- hba[ctlr]->access = *(products[j].access);
24939 ++ hba[ctlr]->access = products[j].access;
24940 + hba[ctlr]->ctlr = ctlr;
24941 + hba[ctlr]->board_id = board_id;
24942 + hba[ctlr]->pci_dev = NULL; /* not PCI */
24943 +@@ -911,6 +911,8 @@ static void do_ida_request(struct reques
24944 + struct scatterlist tmp_sg[SG_MAX];
24945 + int i, dir, seg;
24946 +
24947 ++ pax_track_stack();
24948 ++
24949 + queue_next:
24950 + creq = blk_peek_request(q);
24951 + if (!creq)
24952 +@@ -980,7 +982,7 @@ static void start_io(ctlr_info_t *h)
24953 +
24954 + while((c = h->reqQ) != NULL) {
24955 + /* Can't do anything if we're busy */
24956 +- if (h->access.fifo_full(h) == 0)
24957 ++ if (h->access->fifo_full(h) == 0)
24958 + return;
24959 +
24960 + /* Get the first entry from the request Q */
24961 +@@ -988,7 +990,7 @@ static void start_io(ctlr_info_t *h)
24962 + h->Qdepth--;
24963 +
24964 + /* Tell the controller to do our bidding */
24965 +- h->access.submit_command(h, c);
24966 ++ h->access->submit_command(h, c);
24967 +
24968 + /* Get onto the completion Q */
24969 + addQ(&h->cmpQ, c);
24970 +@@ -1050,7 +1052,7 @@ static irqreturn_t do_ida_intr(int irq,
24971 + unsigned long flags;
24972 + __u32 a,a1;
24973 +
24974 +- istat = h->access.intr_pending(h);
24975 ++ istat = h->access->intr_pending(h);
24976 + /* Is this interrupt for us? */
24977 + if (istat == 0)
24978 + return IRQ_NONE;
24979 +@@ -1061,7 +1063,7 @@ static irqreturn_t do_ida_intr(int irq,
24980 + */
24981 + spin_lock_irqsave(IDA_LOCK(h->ctlr), flags);
24982 + if (istat & FIFO_NOT_EMPTY) {
24983 +- while((a = h->access.command_completed(h))) {
24984 ++ while((a = h->access->command_completed(h))) {
24985 + a1 = a; a &= ~3;
24986 + if ((c = h->cmpQ) == NULL)
24987 + {
24988 +@@ -1449,11 +1451,11 @@ static int sendcmd(
24989 + /*
24990 + * Disable interrupt
24991 + */
24992 +- info_p->access.set_intr_mask(info_p, 0);
24993 ++ info_p->access->set_intr_mask(info_p, 0);
24994 + /* Make sure there is room in the command FIFO */
24995 + /* Actually it should be completely empty at this time. */
24996 + for (i = 200000; i > 0; i--) {
24997 +- temp = info_p->access.fifo_full(info_p);
24998 ++ temp = info_p->access->fifo_full(info_p);
24999 + if (temp != 0) {
25000 + break;
25001 + }
25002 +@@ -1466,7 +1468,7 @@ DBG(
25003 + /*
25004 + * Send the cmd
25005 + */
25006 +- info_p->access.submit_command(info_p, c);
25007 ++ info_p->access->submit_command(info_p, c);
25008 + complete = pollcomplete(ctlr);
25009 +
25010 + pci_unmap_single(info_p->pci_dev, (dma_addr_t) c->req.sg[0].addr,
25011 +@@ -1549,9 +1551,9 @@ static int revalidate_allvol(ctlr_info_t
25012 + * we check the new geometry. Then turn interrupts back on when
25013 + * we're done.
25014 + */
25015 +- host->access.set_intr_mask(host, 0);
25016 ++ host->access->set_intr_mask(host, 0);
25017 + getgeometry(ctlr);
25018 +- host->access.set_intr_mask(host, FIFO_NOT_EMPTY);
25019 ++ host->access->set_intr_mask(host, FIFO_NOT_EMPTY);
25020 +
25021 + for(i=0; i<NWD; i++) {
25022 + struct gendisk *disk = ida_gendisk[ctlr][i];
25023 +@@ -1591,7 +1593,7 @@ static int pollcomplete(int ctlr)
25024 + /* Wait (up to 2 seconds) for a command to complete */
25025 +
25026 + for (i = 200000; i > 0; i--) {
25027 +- done = hba[ctlr]->access.command_completed(hba[ctlr]);
25028 ++ done = hba[ctlr]->access->command_completed(hba[ctlr]);
25029 + if (done == 0) {
25030 + udelay(10); /* a short fixed delay */
25031 + } else
25032 +diff -urNp linux-3.1.1/drivers/block/cpqarray.h linux-3.1.1/drivers/block/cpqarray.h
25033 +--- linux-3.1.1/drivers/block/cpqarray.h 2011-11-11 15:19:27.000000000 -0500
25034 ++++ linux-3.1.1/drivers/block/cpqarray.h 2011-11-16 18:39:07.000000000 -0500
25035 +@@ -99,7 +99,7 @@ struct ctlr_info {
25036 + drv_info_t drv[NWD];
25037 + struct proc_dir_entry *proc;
25038 +
25039 +- struct access_method access;
25040 ++ struct access_method *access;
25041 +
25042 + cmdlist_t *reqQ;
25043 + cmdlist_t *cmpQ;
25044 +diff -urNp linux-3.1.1/drivers/block/DAC960.c linux-3.1.1/drivers/block/DAC960.c
25045 +--- linux-3.1.1/drivers/block/DAC960.c 2011-11-11 15:19:27.000000000 -0500
25046 ++++ linux-3.1.1/drivers/block/DAC960.c 2011-11-16 18:40:10.000000000 -0500
25047 +@@ -1980,6 +1980,8 @@ static bool DAC960_V1_ReadDeviceConfigur
25048 + unsigned long flags;
25049 + int Channel, TargetID;
25050 +
25051 ++ pax_track_stack();
25052 ++
25053 + if (!init_dma_loaf(Controller->PCIDevice, &local_dma,
25054 + DAC960_V1_MaxChannels*(sizeof(DAC960_V1_DCDB_T) +
25055 + sizeof(DAC960_SCSI_Inquiry_T) +
25056 +diff -urNp linux-3.1.1/drivers/block/drbd/drbd_int.h linux-3.1.1/drivers/block/drbd/drbd_int.h
25057 +--- linux-3.1.1/drivers/block/drbd/drbd_int.h 2011-11-11 15:19:27.000000000 -0500
25058 ++++ linux-3.1.1/drivers/block/drbd/drbd_int.h 2011-11-16 18:39:07.000000000 -0500
25059 +@@ -737,7 +737,7 @@ struct drbd_request;
25060 + struct drbd_epoch {
25061 + struct list_head list;
25062 + unsigned int barrier_nr;
25063 +- atomic_t epoch_size; /* increased on every request added. */
25064 ++ atomic_unchecked_t epoch_size; /* increased on every request added. */
25065 + atomic_t active; /* increased on every req. added, and dec on every finished. */
25066 + unsigned long flags;
25067 + };
25068 +@@ -1109,7 +1109,7 @@ struct drbd_conf {
25069 + void *int_dig_in;
25070 + void *int_dig_vv;
25071 + wait_queue_head_t seq_wait;
25072 +- atomic_t packet_seq;
25073 ++ atomic_unchecked_t packet_seq;
25074 + unsigned int peer_seq;
25075 + spinlock_t peer_seq_lock;
25076 + unsigned int minor;
25077 +@@ -1618,30 +1618,30 @@ static inline int drbd_setsockopt(struct
25078 +
25079 + static inline void drbd_tcp_cork(struct socket *sock)
25080 + {
25081 +- int __user val = 1;
25082 ++ int val = 1;
25083 + (void) drbd_setsockopt(sock, SOL_TCP, TCP_CORK,
25084 +- (char __user *)&val, sizeof(val));
25085 ++ (char __force_user *)&val, sizeof(val));
25086 + }
25087 +
25088 + static inline void drbd_tcp_uncork(struct socket *sock)
25089 + {
25090 +- int __user val = 0;
25091 ++ int val = 0;
25092 + (void) drbd_setsockopt(sock, SOL_TCP, TCP_CORK,
25093 +- (char __user *)&val, sizeof(val));
25094 ++ (char __force_user *)&val, sizeof(val));
25095 + }
25096 +
25097 + static inline void drbd_tcp_nodelay(struct socket *sock)
25098 + {
25099 +- int __user val = 1;
25100 ++ int val = 1;
25101 + (void) drbd_setsockopt(sock, SOL_TCP, TCP_NODELAY,
25102 +- (char __user *)&val, sizeof(val));
25103 ++ (char __force_user *)&val, sizeof(val));
25104 + }
25105 +
25106 + static inline void drbd_tcp_quickack(struct socket *sock)
25107 + {
25108 +- int __user val = 2;
25109 ++ int val = 2;
25110 + (void) drbd_setsockopt(sock, SOL_TCP, TCP_QUICKACK,
25111 +- (char __user *)&val, sizeof(val));
25112 ++ (char __force_user *)&val, sizeof(val));
25113 + }
25114 +
25115 + void drbd_bump_write_ordering(struct drbd_conf *mdev, enum write_ordering_e wo);
25116 +diff -urNp linux-3.1.1/drivers/block/drbd/drbd_main.c linux-3.1.1/drivers/block/drbd/drbd_main.c
25117 +--- linux-3.1.1/drivers/block/drbd/drbd_main.c 2011-11-11 15:19:27.000000000 -0500
25118 ++++ linux-3.1.1/drivers/block/drbd/drbd_main.c 2011-11-16 18:39:07.000000000 -0500
25119 +@@ -2397,7 +2397,7 @@ static int _drbd_send_ack(struct drbd_co
25120 + p.sector = sector;
25121 + p.block_id = block_id;
25122 + p.blksize = blksize;
25123 +- p.seq_num = cpu_to_be32(atomic_add_return(1, &mdev->packet_seq));
25124 ++ p.seq_num = cpu_to_be32(atomic_add_return_unchecked(1, &mdev->packet_seq));
25125 +
25126 + if (!mdev->meta.socket || mdev->state.conn < C_CONNECTED)
25127 + return false;
25128 +@@ -2696,7 +2696,7 @@ int drbd_send_dblock(struct drbd_conf *m
25129 + p.sector = cpu_to_be64(req->sector);
25130 + p.block_id = (unsigned long)req;
25131 + p.seq_num = cpu_to_be32(req->seq_num =
25132 +- atomic_add_return(1, &mdev->packet_seq));
25133 ++ atomic_add_return_unchecked(1, &mdev->packet_seq));
25134 +
25135 + dp_flags = bio_flags_to_wire(mdev, req->master_bio->bi_rw);
25136 +
25137 +@@ -2981,7 +2981,7 @@ void drbd_init_set_defaults(struct drbd_
25138 + atomic_set(&mdev->unacked_cnt, 0);
25139 + atomic_set(&mdev->local_cnt, 0);
25140 + atomic_set(&mdev->net_cnt, 0);
25141 +- atomic_set(&mdev->packet_seq, 0);
25142 ++ atomic_set_unchecked(&mdev->packet_seq, 0);
25143 + atomic_set(&mdev->pp_in_use, 0);
25144 + atomic_set(&mdev->pp_in_use_by_net, 0);
25145 + atomic_set(&mdev->rs_sect_in, 0);
25146 +@@ -3063,8 +3063,8 @@ void drbd_mdev_cleanup(struct drbd_conf
25147 + mdev->receiver.t_state);
25148 +
25149 + /* no need to lock it, I'm the only thread alive */
25150 +- if (atomic_read(&mdev->current_epoch->epoch_size) != 0)
25151 +- dev_err(DEV, "epoch_size:%d\n", atomic_read(&mdev->current_epoch->epoch_size));
25152 ++ if (atomic_read_unchecked(&mdev->current_epoch->epoch_size) != 0)
25153 ++ dev_err(DEV, "epoch_size:%d\n", atomic_read_unchecked(&mdev->current_epoch->epoch_size));
25154 + mdev->al_writ_cnt =
25155 + mdev->bm_writ_cnt =
25156 + mdev->read_cnt =
25157 +diff -urNp linux-3.1.1/drivers/block/drbd/drbd_nl.c linux-3.1.1/drivers/block/drbd/drbd_nl.c
25158 +--- linux-3.1.1/drivers/block/drbd/drbd_nl.c 2011-11-11 15:19:27.000000000 -0500
25159 ++++ linux-3.1.1/drivers/block/drbd/drbd_nl.c 2011-11-16 18:39:07.000000000 -0500
25160 +@@ -2359,7 +2359,7 @@ static void drbd_connector_callback(stru
25161 + module_put(THIS_MODULE);
25162 + }
25163 +
25164 +-static atomic_t drbd_nl_seq = ATOMIC_INIT(2); /* two. */
25165 ++static atomic_unchecked_t drbd_nl_seq = ATOMIC_INIT(2); /* two. */
25166 +
25167 + static unsigned short *
25168 + __tl_add_blob(unsigned short *tl, enum drbd_tags tag, const void *data,
25169 +@@ -2430,7 +2430,7 @@ void drbd_bcast_state(struct drbd_conf *
25170 + cn_reply->id.idx = CN_IDX_DRBD;
25171 + cn_reply->id.val = CN_VAL_DRBD;
25172 +
25173 +- cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
25174 ++ cn_reply->seq = atomic_add_return_unchecked(1, &drbd_nl_seq);
25175 + cn_reply->ack = 0; /* not used here. */
25176 + cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
25177 + (int)((char *)tl - (char *)reply->tag_list);
25178 +@@ -2462,7 +2462,7 @@ void drbd_bcast_ev_helper(struct drbd_co
25179 + cn_reply->id.idx = CN_IDX_DRBD;
25180 + cn_reply->id.val = CN_VAL_DRBD;
25181 +
25182 +- cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
25183 ++ cn_reply->seq = atomic_add_return_unchecked(1, &drbd_nl_seq);
25184 + cn_reply->ack = 0; /* not used here. */
25185 + cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
25186 + (int)((char *)tl - (char *)reply->tag_list);
25187 +@@ -2540,7 +2540,7 @@ void drbd_bcast_ee(struct drbd_conf *mde
25188 + cn_reply->id.idx = CN_IDX_DRBD;
25189 + cn_reply->id.val = CN_VAL_DRBD;
25190 +
25191 +- cn_reply->seq = atomic_add_return(1,&drbd_nl_seq);
25192 ++ cn_reply->seq = atomic_add_return_unchecked(1,&drbd_nl_seq);
25193 + cn_reply->ack = 0; // not used here.
25194 + cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
25195 + (int)((char*)tl - (char*)reply->tag_list);
25196 +@@ -2579,7 +2579,7 @@ void drbd_bcast_sync_progress(struct drb
25197 + cn_reply->id.idx = CN_IDX_DRBD;
25198 + cn_reply->id.val = CN_VAL_DRBD;
25199 +
25200 +- cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
25201 ++ cn_reply->seq = atomic_add_return_unchecked(1, &drbd_nl_seq);
25202 + cn_reply->ack = 0; /* not used here. */
25203 + cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
25204 + (int)((char *)tl - (char *)reply->tag_list);
25205 +diff -urNp linux-3.1.1/drivers/block/drbd/drbd_receiver.c linux-3.1.1/drivers/block/drbd/drbd_receiver.c
25206 +--- linux-3.1.1/drivers/block/drbd/drbd_receiver.c 2011-11-11 15:19:27.000000000 -0500
25207 ++++ linux-3.1.1/drivers/block/drbd/drbd_receiver.c 2011-11-16 18:39:07.000000000 -0500
25208 +@@ -894,7 +894,7 @@ retry:
25209 + sock->sk->sk_sndtimeo = mdev->net_conf->timeout*HZ/10;
25210 + sock->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
25211 +
25212 +- atomic_set(&mdev->packet_seq, 0);
25213 ++ atomic_set_unchecked(&mdev->packet_seq, 0);
25214 + mdev->peer_seq = 0;
25215 +
25216 + drbd_thread_start(&mdev->asender);
25217 +@@ -985,7 +985,7 @@ static enum finish_epoch drbd_may_finish
25218 + do {
25219 + next_epoch = NULL;
25220 +
25221 +- epoch_size = atomic_read(&epoch->epoch_size);
25222 ++ epoch_size = atomic_read_unchecked(&epoch->epoch_size);
25223 +
25224 + switch (ev & ~EV_CLEANUP) {
25225 + case EV_PUT:
25226 +@@ -1020,7 +1020,7 @@ static enum finish_epoch drbd_may_finish
25227 + rv = FE_DESTROYED;
25228 + } else {
25229 + epoch->flags = 0;
25230 +- atomic_set(&epoch->epoch_size, 0);
25231 ++ atomic_set_unchecked(&epoch->epoch_size, 0);
25232 + /* atomic_set(&epoch->active, 0); is already zero */
25233 + if (rv == FE_STILL_LIVE)
25234 + rv = FE_RECYCLED;
25235 +@@ -1191,14 +1191,14 @@ static int receive_Barrier(struct drbd_c
25236 + drbd_wait_ee_list_empty(mdev, &mdev->active_ee);
25237 + drbd_flush(mdev);
25238 +
25239 +- if (atomic_read(&mdev->current_epoch->epoch_size)) {
25240 ++ if (atomic_read_unchecked(&mdev->current_epoch->epoch_size)) {
25241 + epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
25242 + if (epoch)
25243 + break;
25244 + }
25245 +
25246 + epoch = mdev->current_epoch;
25247 +- wait_event(mdev->ee_wait, atomic_read(&epoch->epoch_size) == 0);
25248 ++ wait_event(mdev->ee_wait, atomic_read_unchecked(&epoch->epoch_size) == 0);
25249 +
25250 + D_ASSERT(atomic_read(&epoch->active) == 0);
25251 + D_ASSERT(epoch->flags == 0);
25252 +@@ -1210,11 +1210,11 @@ static int receive_Barrier(struct drbd_c
25253 + }
25254 +
25255 + epoch->flags = 0;
25256 +- atomic_set(&epoch->epoch_size, 0);
25257 ++ atomic_set_unchecked(&epoch->epoch_size, 0);
25258 + atomic_set(&epoch->active, 0);
25259 +
25260 + spin_lock(&mdev->epoch_lock);
25261 +- if (atomic_read(&mdev->current_epoch->epoch_size)) {
25262 ++ if (atomic_read_unchecked(&mdev->current_epoch->epoch_size)) {
25263 + list_add(&epoch->list, &mdev->current_epoch->list);
25264 + mdev->current_epoch = epoch;
25265 + mdev->epochs++;
25266 +@@ -1663,7 +1663,7 @@ static int receive_Data(struct drbd_conf
25267 + spin_unlock(&mdev->peer_seq_lock);
25268 +
25269 + drbd_send_ack_dp(mdev, P_NEG_ACK, p, data_size);
25270 +- atomic_inc(&mdev->current_epoch->epoch_size);
25271 ++ atomic_inc_unchecked(&mdev->current_epoch->epoch_size);
25272 + return drbd_drain_block(mdev, data_size);
25273 + }
25274 +
25275 +@@ -1689,7 +1689,7 @@ static int receive_Data(struct drbd_conf
25276 +
25277 + spin_lock(&mdev->epoch_lock);
25278 + e->epoch = mdev->current_epoch;
25279 +- atomic_inc(&e->epoch->epoch_size);
25280 ++ atomic_inc_unchecked(&e->epoch->epoch_size);
25281 + atomic_inc(&e->epoch->active);
25282 + spin_unlock(&mdev->epoch_lock);
25283 +
25284 +@@ -3885,7 +3885,7 @@ static void drbd_disconnect(struct drbd_
25285 + D_ASSERT(list_empty(&mdev->done_ee));
25286 +
25287 + /* ok, no more ee's on the fly, it is safe to reset the epoch_size */
25288 +- atomic_set(&mdev->current_epoch->epoch_size, 0);
25289 ++ atomic_set_unchecked(&mdev->current_epoch->epoch_size, 0);
25290 + D_ASSERT(list_empty(&mdev->current_epoch->list));
25291 + }
25292 +
25293 +diff -urNp linux-3.1.1/drivers/block/loop.c linux-3.1.1/drivers/block/loop.c
25294 +--- linux-3.1.1/drivers/block/loop.c 2011-11-11 15:19:27.000000000 -0500
25295 ++++ linux-3.1.1/drivers/block/loop.c 2011-11-16 18:39:07.000000000 -0500
25296 +@@ -283,7 +283,7 @@ static int __do_lo_send_write(struct fil
25297 + mm_segment_t old_fs = get_fs();
25298 +
25299 + set_fs(get_ds());
25300 +- bw = file->f_op->write(file, buf, len, &pos);
25301 ++ bw = file->f_op->write(file, (const char __force_user *)buf, len, &pos);
25302 + set_fs(old_fs);
25303 + if (likely(bw == len))
25304 + return 0;
25305 +diff -urNp linux-3.1.1/drivers/block/nbd.c linux-3.1.1/drivers/block/nbd.c
25306 +--- linux-3.1.1/drivers/block/nbd.c 2011-11-11 15:19:27.000000000 -0500
25307 ++++ linux-3.1.1/drivers/block/nbd.c 2011-11-16 18:40:10.000000000 -0500
25308 +@@ -157,6 +157,8 @@ static int sock_xmit(struct nbd_device *
25309 + struct kvec iov;
25310 + sigset_t blocked, oldset;
25311 +
25312 ++ pax_track_stack();
25313 ++
25314 + if (unlikely(!sock)) {
25315 + printk(KERN_ERR "%s: Attempted %s on closed socket in sock_xmit\n",
25316 + lo->disk->disk_name, (send ? "send" : "recv"));
25317 +@@ -572,6 +574,8 @@ static void do_nbd_request(struct reques
25318 + static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *lo,
25319 + unsigned int cmd, unsigned long arg)
25320 + {
25321 ++ pax_track_stack();
25322 ++
25323 + switch (cmd) {
25324 + case NBD_DISCONNECT: {
25325 + struct request sreq;
25326 +diff -urNp linux-3.1.1/drivers/char/agp/frontend.c linux-3.1.1/drivers/char/agp/frontend.c
25327 +--- linux-3.1.1/drivers/char/agp/frontend.c 2011-11-11 15:19:27.000000000 -0500
25328 ++++ linux-3.1.1/drivers/char/agp/frontend.c 2011-11-16 18:39:07.000000000 -0500
25329 +@@ -817,7 +817,7 @@ static int agpioc_reserve_wrap(struct ag
25330 + if (copy_from_user(&reserve, arg, sizeof(struct agp_region)))
25331 + return -EFAULT;
25332 +
25333 +- if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment))
25334 ++ if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment_priv))
25335 + return -EFAULT;
25336 +
25337 + client = agp_find_client_by_pid(reserve.pid);
25338 +diff -urNp linux-3.1.1/drivers/char/briq_panel.c linux-3.1.1/drivers/char/briq_panel.c
25339 +--- linux-3.1.1/drivers/char/briq_panel.c 2011-11-11 15:19:27.000000000 -0500
25340 ++++ linux-3.1.1/drivers/char/briq_panel.c 2011-11-16 18:40:10.000000000 -0500
25341 +@@ -9,6 +9,7 @@
25342 + #include <linux/types.h>
25343 + #include <linux/errno.h>
25344 + #include <linux/tty.h>
25345 ++#include <linux/mutex.h>
25346 + #include <linux/timer.h>
25347 + #include <linux/kernel.h>
25348 + #include <linux/wait.h>
25349 +@@ -34,6 +35,7 @@ static int vfd_is_open;
25350 + static unsigned char vfd[40];
25351 + static int vfd_cursor;
25352 + static unsigned char ledpb, led;
25353 ++static DEFINE_MUTEX(vfd_mutex);
25354 +
25355 + static void update_vfd(void)
25356 + {
25357 +@@ -140,12 +142,15 @@ static ssize_t briq_panel_write(struct f
25358 + if (!vfd_is_open)
25359 + return -EBUSY;
25360 +
25361 ++ mutex_lock(&vfd_mutex);
25362 + for (;;) {
25363 + char c;
25364 + if (!indx)
25365 + break;
25366 +- if (get_user(c, buf))
25367 ++ if (get_user(c, buf)) {
25368 ++ mutex_unlock(&vfd_mutex);
25369 + return -EFAULT;
25370 ++ }
25371 + if (esc) {
25372 + set_led(c);
25373 + esc = 0;
25374 +@@ -175,6 +180,7 @@ static ssize_t briq_panel_write(struct f
25375 + buf++;
25376 + }
25377 + update_vfd();
25378 ++ mutex_unlock(&vfd_mutex);
25379 +
25380 + return len;
25381 + }
25382 +diff -urNp linux-3.1.1/drivers/char/genrtc.c linux-3.1.1/drivers/char/genrtc.c
25383 +--- linux-3.1.1/drivers/char/genrtc.c 2011-11-11 15:19:27.000000000 -0500
25384 ++++ linux-3.1.1/drivers/char/genrtc.c 2011-11-16 18:40:10.000000000 -0500
25385 +@@ -273,6 +273,7 @@ static int gen_rtc_ioctl(struct file *fi
25386 + switch (cmd) {
25387 +
25388 + case RTC_PLL_GET:
25389 ++ memset(&pll, 0, sizeof(pll));
25390 + if (get_rtc_pll(&pll))
25391 + return -EINVAL;
25392 + else
25393 +diff -urNp linux-3.1.1/drivers/char/hpet.c linux-3.1.1/drivers/char/hpet.c
25394 +--- linux-3.1.1/drivers/char/hpet.c 2011-11-11 15:19:27.000000000 -0500
25395 ++++ linux-3.1.1/drivers/char/hpet.c 2011-11-16 18:39:07.000000000 -0500
25396 +@@ -572,7 +572,7 @@ static inline unsigned long hpet_time_di
25397 + }
25398 +
25399 + static int
25400 +-hpet_ioctl_common(struct hpet_dev *devp, int cmd, unsigned long arg,
25401 ++hpet_ioctl_common(struct hpet_dev *devp, unsigned int cmd, unsigned long arg,
25402 + struct hpet_info *info)
25403 + {
25404 + struct hpet_timer __iomem *timer;
25405 +diff -urNp linux-3.1.1/drivers/char/ipmi/ipmi_msghandler.c linux-3.1.1/drivers/char/ipmi/ipmi_msghandler.c
25406 +--- linux-3.1.1/drivers/char/ipmi/ipmi_msghandler.c 2011-11-11 15:19:27.000000000 -0500
25407 ++++ linux-3.1.1/drivers/char/ipmi/ipmi_msghandler.c 2011-11-16 18:40:10.000000000 -0500
25408 +@@ -415,7 +415,7 @@ struct ipmi_smi {
25409 + struct proc_dir_entry *proc_dir;
25410 + char proc_dir_name[10];
25411 +
25412 +- atomic_t stats[IPMI_NUM_STATS];
25413 ++ atomic_unchecked_t stats[IPMI_NUM_STATS];
25414 +
25415 + /*
25416 + * run_to_completion duplicate of smb_info, smi_info
25417 +@@ -448,9 +448,9 @@ static DEFINE_MUTEX(smi_watchers_mutex);
25418 +
25419 +
25420 + #define ipmi_inc_stat(intf, stat) \
25421 +- atomic_inc(&(intf)->stats[IPMI_STAT_ ## stat])
25422 ++ atomic_inc_unchecked(&(intf)->stats[IPMI_STAT_ ## stat])
25423 + #define ipmi_get_stat(intf, stat) \
25424 +- ((unsigned int) atomic_read(&(intf)->stats[IPMI_STAT_ ## stat]))
25425 ++ ((unsigned int) atomic_read_unchecked(&(intf)->stats[IPMI_STAT_ ## stat]))
25426 +
25427 + static int is_lan_addr(struct ipmi_addr *addr)
25428 + {
25429 +@@ -2868,7 +2868,7 @@ int ipmi_register_smi(struct ipmi_smi_ha
25430 + INIT_LIST_HEAD(&intf->cmd_rcvrs);
25431 + init_waitqueue_head(&intf->waitq);
25432 + for (i = 0; i < IPMI_NUM_STATS; i++)
25433 +- atomic_set(&intf->stats[i], 0);
25434 ++ atomic_set_unchecked(&intf->stats[i], 0);
25435 +
25436 + intf->proc_dir = NULL;
25437 +
25438 +@@ -4220,6 +4220,8 @@ static void send_panic_events(char *str)
25439 + struct ipmi_smi_msg smi_msg;
25440 + struct ipmi_recv_msg recv_msg;
25441 +
25442 ++ pax_track_stack();
25443 ++
25444 + si = (struct ipmi_system_interface_addr *) &addr;
25445 + si->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
25446 + si->channel = IPMI_BMC_CHANNEL;
25447 +diff -urNp linux-3.1.1/drivers/char/ipmi/ipmi_si_intf.c linux-3.1.1/drivers/char/ipmi/ipmi_si_intf.c
25448 +--- linux-3.1.1/drivers/char/ipmi/ipmi_si_intf.c 2011-11-11 15:19:27.000000000 -0500
25449 ++++ linux-3.1.1/drivers/char/ipmi/ipmi_si_intf.c 2011-11-16 18:39:07.000000000 -0500
25450 +@@ -277,7 +277,7 @@ struct smi_info {
25451 + unsigned char slave_addr;
25452 +
25453 + /* Counters and things for the proc filesystem. */
25454 +- atomic_t stats[SI_NUM_STATS];
25455 ++ atomic_unchecked_t stats[SI_NUM_STATS];
25456 +
25457 + struct task_struct *thread;
25458 +
25459 +@@ -286,9 +286,9 @@ struct smi_info {
25460 + };
25461 +
25462 + #define smi_inc_stat(smi, stat) \
25463 +- atomic_inc(&(smi)->stats[SI_STAT_ ## stat])
25464 ++ atomic_inc_unchecked(&(smi)->stats[SI_STAT_ ## stat])
25465 + #define smi_get_stat(smi, stat) \
25466 +- ((unsigned int) atomic_read(&(smi)->stats[SI_STAT_ ## stat]))
25467 ++ ((unsigned int) atomic_read_unchecked(&(smi)->stats[SI_STAT_ ## stat]))
25468 +
25469 + #define SI_MAX_PARMS 4
25470 +
25471 +@@ -3230,7 +3230,7 @@ static int try_smi_init(struct smi_info
25472 + atomic_set(&new_smi->req_events, 0);
25473 + new_smi->run_to_completion = 0;
25474 + for (i = 0; i < SI_NUM_STATS; i++)
25475 +- atomic_set(&new_smi->stats[i], 0);
25476 ++ atomic_set_unchecked(&new_smi->stats[i], 0);
25477 +
25478 + new_smi->interrupt_disabled = 1;
25479 + atomic_set(&new_smi->stop_operation, 0);
25480 +diff -urNp linux-3.1.1/drivers/char/Kconfig linux-3.1.1/drivers/char/Kconfig
25481 +--- linux-3.1.1/drivers/char/Kconfig 2011-11-11 15:19:27.000000000 -0500
25482 ++++ linux-3.1.1/drivers/char/Kconfig 2011-11-16 18:40:10.000000000 -0500
25483 +@@ -8,7 +8,8 @@ source "drivers/tty/Kconfig"
25484 +
25485 + config DEVKMEM
25486 + bool "/dev/kmem virtual device support"
25487 +- default y
25488 ++ default n
25489 ++ depends on !GRKERNSEC_KMEM
25490 + help
25491 + Say Y here if you want to support the /dev/kmem device. The
25492 + /dev/kmem device is rarely used, but can be used for certain
25493 +@@ -596,6 +597,7 @@ config DEVPORT
25494 + bool
25495 + depends on !M68K
25496 + depends on ISA || PCI
25497 ++ depends on !GRKERNSEC_KMEM
25498 + default y
25499 +
25500 + source "drivers/s390/char/Kconfig"
25501 +diff -urNp linux-3.1.1/drivers/char/mbcs.c linux-3.1.1/drivers/char/mbcs.c
25502 +--- linux-3.1.1/drivers/char/mbcs.c 2011-11-11 15:19:27.000000000 -0500
25503 ++++ linux-3.1.1/drivers/char/mbcs.c 2011-11-16 18:39:07.000000000 -0500
25504 +@@ -800,7 +800,7 @@ static int mbcs_remove(struct cx_dev *de
25505 + return 0;
25506 + }
25507 +
25508 +-static const struct cx_device_id __devinitdata mbcs_id_table[] = {
25509 ++static const struct cx_device_id __devinitconst mbcs_id_table[] = {
25510 + {
25511 + .part_num = MBCS_PART_NUM,
25512 + .mfg_num = MBCS_MFG_NUM,
25513 +diff -urNp linux-3.1.1/drivers/char/mem.c linux-3.1.1/drivers/char/mem.c
25514 +--- linux-3.1.1/drivers/char/mem.c 2011-11-11 15:19:27.000000000 -0500
25515 ++++ linux-3.1.1/drivers/char/mem.c 2011-11-17 18:31:56.000000000 -0500
25516 +@@ -18,6 +18,7 @@
25517 + #include <linux/raw.h>
25518 + #include <linux/tty.h>
25519 + #include <linux/capability.h>
25520 ++#include <linux/security.h>
25521 + #include <linux/ptrace.h>
25522 + #include <linux/device.h>
25523 + #include <linux/highmem.h>
25524 +@@ -34,6 +35,10 @@
25525 + # include <linux/efi.h>
25526 + #endif
25527 +
25528 ++#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
25529 ++extern const struct file_operations grsec_fops;
25530 ++#endif
25531 ++
25532 + static inline unsigned long size_inside_page(unsigned long start,
25533 + unsigned long size)
25534 + {
25535 +@@ -65,9 +70,13 @@ static inline int range_is_allowed(unsig
25536 +
25537 + while (cursor < to) {
25538 + if (!devmem_is_allowed(pfn)) {
25539 ++#ifdef CONFIG_GRKERNSEC_KMEM
25540 ++ gr_handle_mem_readwrite(from, to);
25541 ++#else
25542 + printk(KERN_INFO
25543 + "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
25544 + current->comm, from, to);
25545 ++#endif
25546 + return 0;
25547 + }
25548 + cursor += PAGE_SIZE;
25549 +@@ -75,6 +84,11 @@ static inline int range_is_allowed(unsig
25550 + }
25551 + return 1;
25552 + }
25553 ++#elif defined(CONFIG_GRKERNSEC_KMEM)
25554 ++static inline int range_is_allowed(unsigned long pfn, unsigned long size)
25555 ++{
25556 ++ return 0;
25557 ++}
25558 + #else
25559 + static inline int range_is_allowed(unsigned long pfn, unsigned long size)
25560 + {
25561 +@@ -117,6 +131,7 @@ static ssize_t read_mem(struct file *fil
25562 +
25563 + while (count > 0) {
25564 + unsigned long remaining;
25565 ++ char *temp;
25566 +
25567 + sz = size_inside_page(p, count);
25568 +
25569 +@@ -132,7 +147,23 @@ static ssize_t read_mem(struct file *fil
25570 + if (!ptr)
25571 + return -EFAULT;
25572 +
25573 +- remaining = copy_to_user(buf, ptr, sz);
25574 ++#ifdef CONFIG_PAX_USERCOPY
25575 ++ temp = kmalloc(sz, GFP_KERNEL);
25576 ++ if (!temp) {
25577 ++ unxlate_dev_mem_ptr(p, ptr);
25578 ++ return -ENOMEM;
25579 ++ }
25580 ++ memcpy(temp, ptr, sz);
25581 ++#else
25582 ++ temp = ptr;
25583 ++#endif
25584 ++
25585 ++ remaining = copy_to_user(buf, temp, sz);
25586 ++
25587 ++#ifdef CONFIG_PAX_USERCOPY
25588 ++ kfree(temp);
25589 ++#endif
25590 ++
25591 + unxlate_dev_mem_ptr(p, ptr);
25592 + if (remaining)
25593 + return -EFAULT;
25594 +@@ -395,9 +426,8 @@ static ssize_t read_kmem(struct file *fi
25595 + size_t count, loff_t *ppos)
25596 + {
25597 + unsigned long p = *ppos;
25598 +- ssize_t low_count, read, sz;
25599 ++ ssize_t low_count, read, sz, err = 0;
25600 + char * kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
25601 +- int err = 0;
25602 +
25603 + read = 0;
25604 + if (p < (unsigned long) high_memory) {
25605 +@@ -419,6 +449,8 @@ static ssize_t read_kmem(struct file *fi
25606 + }
25607 + #endif
25608 + while (low_count > 0) {
25609 ++ char *temp;
25610 ++
25611 + sz = size_inside_page(p, low_count);
25612 +
25613 + /*
25614 +@@ -428,7 +460,22 @@ static ssize_t read_kmem(struct file *fi
25615 + */
25616 + kbuf = xlate_dev_kmem_ptr((char *)p);
25617 +
25618 +- if (copy_to_user(buf, kbuf, sz))
25619 ++#ifdef CONFIG_PAX_USERCOPY
25620 ++ temp = kmalloc(sz, GFP_KERNEL);
25621 ++ if (!temp)
25622 ++ return -ENOMEM;
25623 ++ memcpy(temp, kbuf, sz);
25624 ++#else
25625 ++ temp = kbuf;
25626 ++#endif
25627 ++
25628 ++ err = copy_to_user(buf, temp, sz);
25629 ++
25630 ++#ifdef CONFIG_PAX_USERCOPY
25631 ++ kfree(temp);
25632 ++#endif
25633 ++
25634 ++ if (err)
25635 + return -EFAULT;
25636 + buf += sz;
25637 + p += sz;
25638 +@@ -866,6 +913,9 @@ static const struct memdev {
25639 + #ifdef CONFIG_CRASH_DUMP
25640 + [12] = { "oldmem", 0, &oldmem_fops, NULL },
25641 + #endif
25642 ++#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
25643 ++ [13] = { "grsec",S_IRUSR | S_IWUGO, &grsec_fops, NULL },
25644 ++#endif
25645 + };
25646 +
25647 + static int memory_open(struct inode *inode, struct file *filp)
25648 +diff -urNp linux-3.1.1/drivers/char/nvram.c linux-3.1.1/drivers/char/nvram.c
25649 +--- linux-3.1.1/drivers/char/nvram.c 2011-11-11 15:19:27.000000000 -0500
25650 ++++ linux-3.1.1/drivers/char/nvram.c 2011-11-16 18:39:07.000000000 -0500
25651 +@@ -248,7 +248,7 @@ static ssize_t nvram_read(struct file *f
25652 +
25653 + spin_unlock_irq(&rtc_lock);
25654 +
25655 +- if (copy_to_user(buf, contents, tmp - contents))
25656 ++ if (tmp - contents > sizeof(contents) || copy_to_user(buf, contents, tmp - contents))
25657 + return -EFAULT;
25658 +
25659 + *ppos = i;
25660 +diff -urNp linux-3.1.1/drivers/char/random.c linux-3.1.1/drivers/char/random.c
25661 +--- linux-3.1.1/drivers/char/random.c 2011-11-11 15:19:27.000000000 -0500
25662 ++++ linux-3.1.1/drivers/char/random.c 2011-11-16 18:40:10.000000000 -0500
25663 +@@ -261,8 +261,13 @@
25664 + /*
25665 + * Configuration information
25666 + */
25667 ++#ifdef CONFIG_GRKERNSEC_RANDNET
25668 ++#define INPUT_POOL_WORDS 512
25669 ++#define OUTPUT_POOL_WORDS 128
25670 ++#else
25671 + #define INPUT_POOL_WORDS 128
25672 + #define OUTPUT_POOL_WORDS 32
25673 ++#endif
25674 + #define SEC_XFER_SIZE 512
25675 + #define EXTRACT_SIZE 10
25676 +
25677 +@@ -300,10 +305,17 @@ static struct poolinfo {
25678 + int poolwords;
25679 + int tap1, tap2, tap3, tap4, tap5;
25680 + } poolinfo_table[] = {
25681 ++#ifdef CONFIG_GRKERNSEC_RANDNET
25682 ++ /* x^512 + x^411 + x^308 + x^208 +x^104 + x + 1 -- 225 */
25683 ++ { 512, 411, 308, 208, 104, 1 },
25684 ++ /* x^128 + x^103 + x^76 + x^51 + x^25 + x + 1 -- 105 */
25685 ++ { 128, 103, 76, 51, 25, 1 },
25686 ++#else
25687 + /* x^128 + x^103 + x^76 + x^51 +x^25 + x + 1 -- 105 */
25688 + { 128, 103, 76, 51, 25, 1 },
25689 + /* x^32 + x^26 + x^20 + x^14 + x^7 + x + 1 -- 15 */
25690 + { 32, 26, 20, 14, 7, 1 },
25691 ++#endif
25692 + #if 0
25693 + /* x^2048 + x^1638 + x^1231 + x^819 + x^411 + x + 1 -- 115 */
25694 + { 2048, 1638, 1231, 819, 411, 1 },
25695 +@@ -909,7 +921,7 @@ static ssize_t extract_entropy_user(stru
25696 +
25697 + extract_buf(r, tmp);
25698 + i = min_t(int, nbytes, EXTRACT_SIZE);
25699 +- if (copy_to_user(buf, tmp, i)) {
25700 ++ if (i > sizeof(tmp) || copy_to_user(buf, tmp, i)) {
25701 + ret = -EFAULT;
25702 + break;
25703 + }
25704 +@@ -1214,7 +1226,7 @@ EXPORT_SYMBOL(generate_random_uuid);
25705 + #include <linux/sysctl.h>
25706 +
25707 + static int min_read_thresh = 8, min_write_thresh;
25708 +-static int max_read_thresh = INPUT_POOL_WORDS * 32;
25709 ++static int max_read_thresh = OUTPUT_POOL_WORDS * 32;
25710 + static int max_write_thresh = INPUT_POOL_WORDS * 32;
25711 + static char sysctl_bootid[16];
25712 +
25713 +diff -urNp linux-3.1.1/drivers/char/sonypi.c linux-3.1.1/drivers/char/sonypi.c
25714 +--- linux-3.1.1/drivers/char/sonypi.c 2011-11-11 15:19:27.000000000 -0500
25715 ++++ linux-3.1.1/drivers/char/sonypi.c 2011-11-16 18:39:07.000000000 -0500
25716 +@@ -55,6 +55,7 @@
25717 + #include <asm/uaccess.h>
25718 + #include <asm/io.h>
25719 + #include <asm/system.h>
25720 ++#include <asm/local.h>
25721 +
25722 + #include <linux/sonypi.h>
25723 +
25724 +@@ -491,7 +492,7 @@ static struct sonypi_device {
25725 + spinlock_t fifo_lock;
25726 + wait_queue_head_t fifo_proc_list;
25727 + struct fasync_struct *fifo_async;
25728 +- int open_count;
25729 ++ local_t open_count;
25730 + int model;
25731 + struct input_dev *input_jog_dev;
25732 + struct input_dev *input_key_dev;
25733 +@@ -898,7 +899,7 @@ static int sonypi_misc_fasync(int fd, st
25734 + static int sonypi_misc_release(struct inode *inode, struct file *file)
25735 + {
25736 + mutex_lock(&sonypi_device.lock);
25737 +- sonypi_device.open_count--;
25738 ++ local_dec(&sonypi_device.open_count);
25739 + mutex_unlock(&sonypi_device.lock);
25740 + return 0;
25741 + }
25742 +@@ -907,9 +908,9 @@ static int sonypi_misc_open(struct inode
25743 + {
25744 + mutex_lock(&sonypi_device.lock);
25745 + /* Flush input queue on first open */
25746 +- if (!sonypi_device.open_count)
25747 ++ if (!local_read(&sonypi_device.open_count))
25748 + kfifo_reset(&sonypi_device.fifo);
25749 +- sonypi_device.open_count++;
25750 ++ local_inc(&sonypi_device.open_count);
25751 + mutex_unlock(&sonypi_device.lock);
25752 +
25753 + return 0;
25754 +diff -urNp linux-3.1.1/drivers/char/tpm/tpm_bios.c linux-3.1.1/drivers/char/tpm/tpm_bios.c
25755 +--- linux-3.1.1/drivers/char/tpm/tpm_bios.c 2011-11-11 15:19:27.000000000 -0500
25756 ++++ linux-3.1.1/drivers/char/tpm/tpm_bios.c 2011-11-16 18:39:07.000000000 -0500
25757 +@@ -173,7 +173,7 @@ static void *tpm_bios_measurements_start
25758 + event = addr;
25759 +
25760 + if ((event->event_type == 0 && event->event_size == 0) ||
25761 +- ((addr + sizeof(struct tcpa_event) + event->event_size) >= limit))
25762 ++ (event->event_size >= limit - addr - sizeof(struct tcpa_event)))
25763 + return NULL;
25764 +
25765 + return addr;
25766 +@@ -198,7 +198,7 @@ static void *tpm_bios_measurements_next(
25767 + return NULL;
25768 +
25769 + if ((event->event_type == 0 && event->event_size == 0) ||
25770 +- ((v + sizeof(struct tcpa_event) + event->event_size) >= limit))
25771 ++ (event->event_size >= limit - v - sizeof(struct tcpa_event)))
25772 + return NULL;
25773 +
25774 + (*pos)++;
25775 +@@ -291,7 +291,8 @@ static int tpm_binary_bios_measurements_
25776 + int i;
25777 +
25778 + for (i = 0; i < sizeof(struct tcpa_event) + event->event_size; i++)
25779 +- seq_putc(m, data[i]);
25780 ++ if (!seq_putc(m, data[i]))
25781 ++ return -EFAULT;
25782 +
25783 + return 0;
25784 + }
25785 +@@ -410,8 +411,13 @@ static int read_log(struct tpm_bios_log
25786 + log->bios_event_log_end = log->bios_event_log + len;
25787 +
25788 + virt = acpi_os_map_memory(start, len);
25789 ++ if (!virt) {
25790 ++ kfree(log->bios_event_log);
25791 ++ log->bios_event_log = NULL;
25792 ++ return -EFAULT;
25793 ++ }
25794 +
25795 +- memcpy(log->bios_event_log, virt, len);
25796 ++ memcpy(log->bios_event_log, (const char __force_kernel *)virt, len);
25797 +
25798 + acpi_os_unmap_memory(virt, len);
25799 + return 0;
25800 +diff -urNp linux-3.1.1/drivers/char/tpm/tpm.c linux-3.1.1/drivers/char/tpm/tpm.c
25801 +--- linux-3.1.1/drivers/char/tpm/tpm.c 2011-11-11 15:19:27.000000000 -0500
25802 ++++ linux-3.1.1/drivers/char/tpm/tpm.c 2011-11-16 18:40:10.000000000 -0500
25803 +@@ -414,7 +414,7 @@ static ssize_t tpm_transmit(struct tpm_c
25804 + chip->vendor.req_complete_val)
25805 + goto out_recv;
25806 +
25807 +- if ((status == chip->vendor.req_canceled)) {
25808 ++ if (status == chip->vendor.req_canceled) {
25809 + dev_err(chip->dev, "Operation Canceled\n");
25810 + rc = -ECANCELED;
25811 + goto out;
25812 +@@ -862,6 +862,8 @@ ssize_t tpm_show_pubek(struct device *de
25813 +
25814 + struct tpm_chip *chip = dev_get_drvdata(dev);
25815 +
25816 ++ pax_track_stack();
25817 ++
25818 + tpm_cmd.header.in = tpm_readpubek_header;
25819 + err = transmit_cmd(chip, &tpm_cmd, READ_PUBEK_RESULT_SIZE,
25820 + "attempting to read the PUBEK");
25821 +diff -urNp linux-3.1.1/drivers/char/virtio_console.c linux-3.1.1/drivers/char/virtio_console.c
25822 +--- linux-3.1.1/drivers/char/virtio_console.c 2011-11-11 15:19:27.000000000 -0500
25823 ++++ linux-3.1.1/drivers/char/virtio_console.c 2011-11-16 18:39:07.000000000 -0500
25824 +@@ -555,7 +555,7 @@ static ssize_t fill_readbuf(struct port
25825 + if (to_user) {
25826 + ssize_t ret;
25827 +
25828 +- ret = copy_to_user(out_buf, buf->buf + buf->offset, out_count);
25829 ++ ret = copy_to_user((char __force_user *)out_buf, buf->buf + buf->offset, out_count);
25830 + if (ret)
25831 + return -EFAULT;
25832 + } else {
25833 +@@ -654,7 +654,7 @@ static ssize_t port_fops_read(struct fil
25834 + if (!port_has_data(port) && !port->host_connected)
25835 + return 0;
25836 +
25837 +- return fill_readbuf(port, ubuf, count, true);
25838 ++ return fill_readbuf(port, (char __force_kernel *)ubuf, count, true);
25839 + }
25840 +
25841 + static ssize_t port_fops_write(struct file *filp, const char __user *ubuf,
25842 +diff -urNp linux-3.1.1/drivers/crypto/hifn_795x.c linux-3.1.1/drivers/crypto/hifn_795x.c
25843 +--- linux-3.1.1/drivers/crypto/hifn_795x.c 2011-11-11 15:19:27.000000000 -0500
25844 ++++ linux-3.1.1/drivers/crypto/hifn_795x.c 2011-11-16 18:40:10.000000000 -0500
25845 +@@ -1655,6 +1655,8 @@ static int hifn_test(struct hifn_device
25846 + 0xCA, 0x34, 0x2B, 0x2E};
25847 + struct scatterlist sg;
25848 +
25849 ++ pax_track_stack();
25850 ++
25851 + memset(src, 0, sizeof(src));
25852 + memset(ctx.key, 0, sizeof(ctx.key));
25853 +
25854 +diff -urNp linux-3.1.1/drivers/crypto/padlock-aes.c linux-3.1.1/drivers/crypto/padlock-aes.c
25855 +--- linux-3.1.1/drivers/crypto/padlock-aes.c 2011-11-11 15:19:27.000000000 -0500
25856 ++++ linux-3.1.1/drivers/crypto/padlock-aes.c 2011-11-16 18:40:10.000000000 -0500
25857 +@@ -109,6 +109,8 @@ static int aes_set_key(struct crypto_tfm
25858 + struct crypto_aes_ctx gen_aes;
25859 + int cpu;
25860 +
25861 ++ pax_track_stack();
25862 ++
25863 + if (key_len % 8) {
25864 + *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
25865 + return -EINVAL;
25866 +diff -urNp linux-3.1.1/drivers/edac/amd64_edac.c linux-3.1.1/drivers/edac/amd64_edac.c
25867 +--- linux-3.1.1/drivers/edac/amd64_edac.c 2011-11-11 15:19:27.000000000 -0500
25868 ++++ linux-3.1.1/drivers/edac/amd64_edac.c 2011-11-16 18:39:07.000000000 -0500
25869 +@@ -2670,7 +2670,7 @@ static void __devexit amd64_remove_one_i
25870 + * PCI core identifies what devices are on a system during boot, and then
25871 + * inquiry this table to see if this driver is for a given device found.
25872 + */
25873 +-static const struct pci_device_id amd64_pci_table[] __devinitdata = {
25874 ++static const struct pci_device_id amd64_pci_table[] __devinitconst = {
25875 + {
25876 + .vendor = PCI_VENDOR_ID_AMD,
25877 + .device = PCI_DEVICE_ID_AMD_K8_NB_MEMCTL,
25878 +diff -urNp linux-3.1.1/drivers/edac/amd76x_edac.c linux-3.1.1/drivers/edac/amd76x_edac.c
25879 +--- linux-3.1.1/drivers/edac/amd76x_edac.c 2011-11-11 15:19:27.000000000 -0500
25880 ++++ linux-3.1.1/drivers/edac/amd76x_edac.c 2011-11-16 18:39:07.000000000 -0500
25881 +@@ -321,7 +321,7 @@ static void __devexit amd76x_remove_one(
25882 + edac_mc_free(mci);
25883 + }
25884 +
25885 +-static const struct pci_device_id amd76x_pci_tbl[] __devinitdata = {
25886 ++static const struct pci_device_id amd76x_pci_tbl[] __devinitconst = {
25887 + {
25888 + PCI_VEND_DEV(AMD, FE_GATE_700C), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
25889 + AMD762},
25890 +diff -urNp linux-3.1.1/drivers/edac/e752x_edac.c linux-3.1.1/drivers/edac/e752x_edac.c
25891 +--- linux-3.1.1/drivers/edac/e752x_edac.c 2011-11-11 15:19:27.000000000 -0500
25892 ++++ linux-3.1.1/drivers/edac/e752x_edac.c 2011-11-16 18:39:07.000000000 -0500
25893 +@@ -1380,7 +1380,7 @@ static void __devexit e752x_remove_one(s
25894 + edac_mc_free(mci);
25895 + }
25896 +
25897 +-static const struct pci_device_id e752x_pci_tbl[] __devinitdata = {
25898 ++static const struct pci_device_id e752x_pci_tbl[] __devinitconst = {
25899 + {
25900 + PCI_VEND_DEV(INTEL, 7520_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
25901 + E7520},
25902 +diff -urNp linux-3.1.1/drivers/edac/e7xxx_edac.c linux-3.1.1/drivers/edac/e7xxx_edac.c
25903 +--- linux-3.1.1/drivers/edac/e7xxx_edac.c 2011-11-11 15:19:27.000000000 -0500
25904 ++++ linux-3.1.1/drivers/edac/e7xxx_edac.c 2011-11-16 18:39:07.000000000 -0500
25905 +@@ -525,7 +525,7 @@ static void __devexit e7xxx_remove_one(s
25906 + edac_mc_free(mci);
25907 + }
25908 +
25909 +-static const struct pci_device_id e7xxx_pci_tbl[] __devinitdata = {
25910 ++static const struct pci_device_id e7xxx_pci_tbl[] __devinitconst = {
25911 + {
25912 + PCI_VEND_DEV(INTEL, 7205_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
25913 + E7205},
25914 +diff -urNp linux-3.1.1/drivers/edac/edac_pci_sysfs.c linux-3.1.1/drivers/edac/edac_pci_sysfs.c
25915 +--- linux-3.1.1/drivers/edac/edac_pci_sysfs.c 2011-11-11 15:19:27.000000000 -0500
25916 ++++ linux-3.1.1/drivers/edac/edac_pci_sysfs.c 2011-11-16 18:39:07.000000000 -0500
25917 +@@ -26,8 +26,8 @@ static int edac_pci_log_pe = 1; /* log
25918 + static int edac_pci_log_npe = 1; /* log PCI non-parity error errors */
25919 + static int edac_pci_poll_msec = 1000; /* one second workq period */
25920 +
25921 +-static atomic_t pci_parity_count = ATOMIC_INIT(0);
25922 +-static atomic_t pci_nonparity_count = ATOMIC_INIT(0);
25923 ++static atomic_unchecked_t pci_parity_count = ATOMIC_INIT(0);
25924 ++static atomic_unchecked_t pci_nonparity_count = ATOMIC_INIT(0);
25925 +
25926 + static struct kobject *edac_pci_top_main_kobj;
25927 + static atomic_t edac_pci_sysfs_refcount = ATOMIC_INIT(0);
25928 +@@ -582,7 +582,7 @@ static void edac_pci_dev_parity_test(str
25929 + edac_printk(KERN_CRIT, EDAC_PCI,
25930 + "Signaled System Error on %s\n",
25931 + pci_name(dev));
25932 +- atomic_inc(&pci_nonparity_count);
25933 ++ atomic_inc_unchecked(&pci_nonparity_count);
25934 + }
25935 +
25936 + if (status & (PCI_STATUS_PARITY)) {
25937 +@@ -590,7 +590,7 @@ static void edac_pci_dev_parity_test(str
25938 + "Master Data Parity Error on %s\n",
25939 + pci_name(dev));
25940 +
25941 +- atomic_inc(&pci_parity_count);
25942 ++ atomic_inc_unchecked(&pci_parity_count);
25943 + }
25944 +
25945 + if (status & (PCI_STATUS_DETECTED_PARITY)) {
25946 +@@ -598,7 +598,7 @@ static void edac_pci_dev_parity_test(str
25947 + "Detected Parity Error on %s\n",
25948 + pci_name(dev));
25949 +
25950 +- atomic_inc(&pci_parity_count);
25951 ++ atomic_inc_unchecked(&pci_parity_count);
25952 + }
25953 + }
25954 +
25955 +@@ -619,7 +619,7 @@ static void edac_pci_dev_parity_test(str
25956 + edac_printk(KERN_CRIT, EDAC_PCI, "Bridge "
25957 + "Signaled System Error on %s\n",
25958 + pci_name(dev));
25959 +- atomic_inc(&pci_nonparity_count);
25960 ++ atomic_inc_unchecked(&pci_nonparity_count);
25961 + }
25962 +
25963 + if (status & (PCI_STATUS_PARITY)) {
25964 +@@ -627,7 +627,7 @@ static void edac_pci_dev_parity_test(str
25965 + "Master Data Parity Error on "
25966 + "%s\n", pci_name(dev));
25967 +
25968 +- atomic_inc(&pci_parity_count);
25969 ++ atomic_inc_unchecked(&pci_parity_count);
25970 + }
25971 +
25972 + if (status & (PCI_STATUS_DETECTED_PARITY)) {
25973 +@@ -635,7 +635,7 @@ static void edac_pci_dev_parity_test(str
25974 + "Detected Parity Error on %s\n",
25975 + pci_name(dev));
25976 +
25977 +- atomic_inc(&pci_parity_count);
25978 ++ atomic_inc_unchecked(&pci_parity_count);
25979 + }
25980 + }
25981 + }
25982 +@@ -677,7 +677,7 @@ void edac_pci_do_parity_check(void)
25983 + if (!check_pci_errors)
25984 + return;
25985 +
25986 +- before_count = atomic_read(&pci_parity_count);
25987 ++ before_count = atomic_read_unchecked(&pci_parity_count);
25988 +
25989 + /* scan all PCI devices looking for a Parity Error on devices and
25990 + * bridges.
25991 +@@ -689,7 +689,7 @@ void edac_pci_do_parity_check(void)
25992 + /* Only if operator has selected panic on PCI Error */
25993 + if (edac_pci_get_panic_on_pe()) {
25994 + /* If the count is different 'after' from 'before' */
25995 +- if (before_count != atomic_read(&pci_parity_count))
25996 ++ if (before_count != atomic_read_unchecked(&pci_parity_count))
25997 + panic("EDAC: PCI Parity Error");
25998 + }
25999 + }
26000 +diff -urNp linux-3.1.1/drivers/edac/i3000_edac.c linux-3.1.1/drivers/edac/i3000_edac.c
26001 +--- linux-3.1.1/drivers/edac/i3000_edac.c 2011-11-11 15:19:27.000000000 -0500
26002 ++++ linux-3.1.1/drivers/edac/i3000_edac.c 2011-11-16 18:39:07.000000000 -0500
26003 +@@ -470,7 +470,7 @@ static void __devexit i3000_remove_one(s
26004 + edac_mc_free(mci);
26005 + }
26006 +
26007 +-static const struct pci_device_id i3000_pci_tbl[] __devinitdata = {
26008 ++static const struct pci_device_id i3000_pci_tbl[] __devinitconst = {
26009 + {
26010 + PCI_VEND_DEV(INTEL, 3000_HB), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
26011 + I3000},
26012 +diff -urNp linux-3.1.1/drivers/edac/i3200_edac.c linux-3.1.1/drivers/edac/i3200_edac.c
26013 +--- linux-3.1.1/drivers/edac/i3200_edac.c 2011-11-11 15:19:27.000000000 -0500
26014 ++++ linux-3.1.1/drivers/edac/i3200_edac.c 2011-11-16 18:39:07.000000000 -0500
26015 +@@ -456,7 +456,7 @@ static void __devexit i3200_remove_one(s
26016 + edac_mc_free(mci);
26017 + }
26018 +
26019 +-static const struct pci_device_id i3200_pci_tbl[] __devinitdata = {
26020 ++static const struct pci_device_id i3200_pci_tbl[] __devinitconst = {
26021 + {
26022 + PCI_VEND_DEV(INTEL, 3200_HB), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
26023 + I3200},
26024 +diff -urNp linux-3.1.1/drivers/edac/i5000_edac.c linux-3.1.1/drivers/edac/i5000_edac.c
26025 +--- linux-3.1.1/drivers/edac/i5000_edac.c 2011-11-11 15:19:27.000000000 -0500
26026 ++++ linux-3.1.1/drivers/edac/i5000_edac.c 2011-11-16 18:39:07.000000000 -0500
26027 +@@ -1516,7 +1516,7 @@ static void __devexit i5000_remove_one(s
26028 + *
26029 + * The "E500P" device is the first device supported.
26030 + */
26031 +-static const struct pci_device_id i5000_pci_tbl[] __devinitdata = {
26032 ++static const struct pci_device_id i5000_pci_tbl[] __devinitconst = {
26033 + {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_I5000_DEV16),
26034 + .driver_data = I5000P},
26035 +
26036 +diff -urNp linux-3.1.1/drivers/edac/i5100_edac.c linux-3.1.1/drivers/edac/i5100_edac.c
26037 +--- linux-3.1.1/drivers/edac/i5100_edac.c 2011-11-11 15:19:27.000000000 -0500
26038 ++++ linux-3.1.1/drivers/edac/i5100_edac.c 2011-11-16 18:39:07.000000000 -0500
26039 +@@ -1051,7 +1051,7 @@ static void __devexit i5100_remove_one(s
26040 + edac_mc_free(mci);
26041 + }
26042 +
26043 +-static const struct pci_device_id i5100_pci_tbl[] __devinitdata = {
26044 ++static const struct pci_device_id i5100_pci_tbl[] __devinitconst = {
26045 + /* Device 16, Function 0, Channel 0 Memory Map, Error Flag/Mask, ... */
26046 + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_5100_16) },
26047 + { 0, }
26048 +diff -urNp linux-3.1.1/drivers/edac/i5400_edac.c linux-3.1.1/drivers/edac/i5400_edac.c
26049 +--- linux-3.1.1/drivers/edac/i5400_edac.c 2011-11-11 15:19:27.000000000 -0500
26050 ++++ linux-3.1.1/drivers/edac/i5400_edac.c 2011-11-16 18:39:07.000000000 -0500
26051 +@@ -1383,7 +1383,7 @@ static void __devexit i5400_remove_one(s
26052 + *
26053 + * The "E500P" device is the first device supported.
26054 + */
26055 +-static const struct pci_device_id i5400_pci_tbl[] __devinitdata = {
26056 ++static const struct pci_device_id i5400_pci_tbl[] __devinitconst = {
26057 + {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_5400_ERR)},
26058 + {0,} /* 0 terminated list. */
26059 + };
26060 +diff -urNp linux-3.1.1/drivers/edac/i7300_edac.c linux-3.1.1/drivers/edac/i7300_edac.c
26061 +--- linux-3.1.1/drivers/edac/i7300_edac.c 2011-11-11 15:19:27.000000000 -0500
26062 ++++ linux-3.1.1/drivers/edac/i7300_edac.c 2011-11-16 18:39:07.000000000 -0500
26063 +@@ -1191,7 +1191,7 @@ static void __devexit i7300_remove_one(s
26064 + *
26065 + * Has only 8086:360c PCI ID
26066 + */
26067 +-static const struct pci_device_id i7300_pci_tbl[] __devinitdata = {
26068 ++static const struct pci_device_id i7300_pci_tbl[] __devinitconst = {
26069 + {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_I7300_MCH_ERR)},
26070 + {0,} /* 0 terminated list. */
26071 + };
26072 +diff -urNp linux-3.1.1/drivers/edac/i7core_edac.c linux-3.1.1/drivers/edac/i7core_edac.c
26073 +--- linux-3.1.1/drivers/edac/i7core_edac.c 2011-11-11 15:19:27.000000000 -0500
26074 ++++ linux-3.1.1/drivers/edac/i7core_edac.c 2011-11-16 18:39:07.000000000 -0500
26075 +@@ -359,7 +359,7 @@ static const struct pci_id_table pci_dev
26076 + /*
26077 + * pci_device_id table for which devices we are looking for
26078 + */
26079 +-static const struct pci_device_id i7core_pci_tbl[] __devinitdata = {
26080 ++static const struct pci_device_id i7core_pci_tbl[] __devinitconst = {
26081 + {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_X58_HUB_MGMT)},
26082 + {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_LYNNFIELD_QPI_LINK0)},
26083 + {0,} /* 0 terminated list. */
26084 +diff -urNp linux-3.1.1/drivers/edac/i82443bxgx_edac.c linux-3.1.1/drivers/edac/i82443bxgx_edac.c
26085 +--- linux-3.1.1/drivers/edac/i82443bxgx_edac.c 2011-11-11 15:19:27.000000000 -0500
26086 ++++ linux-3.1.1/drivers/edac/i82443bxgx_edac.c 2011-11-16 18:39:07.000000000 -0500
26087 +@@ -380,7 +380,7 @@ static void __devexit i82443bxgx_edacmc_
26088 +
26089 + EXPORT_SYMBOL_GPL(i82443bxgx_edacmc_remove_one);
26090 +
26091 +-static const struct pci_device_id i82443bxgx_pci_tbl[] __devinitdata = {
26092 ++static const struct pci_device_id i82443bxgx_pci_tbl[] __devinitconst = {
26093 + {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443BX_0)},
26094 + {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443BX_2)},
26095 + {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443GX_0)},
26096 +diff -urNp linux-3.1.1/drivers/edac/i82860_edac.c linux-3.1.1/drivers/edac/i82860_edac.c
26097 +--- linux-3.1.1/drivers/edac/i82860_edac.c 2011-11-11 15:19:27.000000000 -0500
26098 ++++ linux-3.1.1/drivers/edac/i82860_edac.c 2011-11-16 18:39:07.000000000 -0500
26099 +@@ -270,7 +270,7 @@ static void __devexit i82860_remove_one(
26100 + edac_mc_free(mci);
26101 + }
26102 +
26103 +-static const struct pci_device_id i82860_pci_tbl[] __devinitdata = {
26104 ++static const struct pci_device_id i82860_pci_tbl[] __devinitconst = {
26105 + {
26106 + PCI_VEND_DEV(INTEL, 82860_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
26107 + I82860},
26108 +diff -urNp linux-3.1.1/drivers/edac/i82875p_edac.c linux-3.1.1/drivers/edac/i82875p_edac.c
26109 +--- linux-3.1.1/drivers/edac/i82875p_edac.c 2011-11-11 15:19:27.000000000 -0500
26110 ++++ linux-3.1.1/drivers/edac/i82875p_edac.c 2011-11-16 18:39:07.000000000 -0500
26111 +@@ -511,7 +511,7 @@ static void __devexit i82875p_remove_one
26112 + edac_mc_free(mci);
26113 + }
26114 +
26115 +-static const struct pci_device_id i82875p_pci_tbl[] __devinitdata = {
26116 ++static const struct pci_device_id i82875p_pci_tbl[] __devinitconst = {
26117 + {
26118 + PCI_VEND_DEV(INTEL, 82875_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
26119 + I82875P},
26120 +diff -urNp linux-3.1.1/drivers/edac/i82975x_edac.c linux-3.1.1/drivers/edac/i82975x_edac.c
26121 +--- linux-3.1.1/drivers/edac/i82975x_edac.c 2011-11-11 15:19:27.000000000 -0500
26122 ++++ linux-3.1.1/drivers/edac/i82975x_edac.c 2011-11-16 18:39:07.000000000 -0500
26123 +@@ -604,7 +604,7 @@ static void __devexit i82975x_remove_one
26124 + edac_mc_free(mci);
26125 + }
26126 +
26127 +-static const struct pci_device_id i82975x_pci_tbl[] __devinitdata = {
26128 ++static const struct pci_device_id i82975x_pci_tbl[] __devinitconst = {
26129 + {
26130 + PCI_VEND_DEV(INTEL, 82975_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
26131 + I82975X
26132 +diff -urNp linux-3.1.1/drivers/edac/mce_amd.h linux-3.1.1/drivers/edac/mce_amd.h
26133 +--- linux-3.1.1/drivers/edac/mce_amd.h 2011-11-11 15:19:27.000000000 -0500
26134 ++++ linux-3.1.1/drivers/edac/mce_amd.h 2011-11-16 18:39:07.000000000 -0500
26135 +@@ -83,7 +83,7 @@ struct amd_decoder_ops {
26136 + bool (*dc_mce)(u16, u8);
26137 + bool (*ic_mce)(u16, u8);
26138 + bool (*nb_mce)(u16, u8);
26139 +-};
26140 ++} __no_const;
26141 +
26142 + void amd_report_gart_errors(bool);
26143 + void amd_register_ecc_decoder(void (*f)(int, struct mce *, u32));
26144 +diff -urNp linux-3.1.1/drivers/edac/r82600_edac.c linux-3.1.1/drivers/edac/r82600_edac.c
26145 +--- linux-3.1.1/drivers/edac/r82600_edac.c 2011-11-11 15:19:27.000000000 -0500
26146 ++++ linux-3.1.1/drivers/edac/r82600_edac.c 2011-11-16 18:39:07.000000000 -0500
26147 +@@ -373,7 +373,7 @@ static void __devexit r82600_remove_one(
26148 + edac_mc_free(mci);
26149 + }
26150 +
26151 +-static const struct pci_device_id r82600_pci_tbl[] __devinitdata = {
26152 ++static const struct pci_device_id r82600_pci_tbl[] __devinitconst = {
26153 + {
26154 + PCI_DEVICE(PCI_VENDOR_ID_RADISYS, R82600_BRIDGE_ID)
26155 + },
26156 +diff -urNp linux-3.1.1/drivers/edac/x38_edac.c linux-3.1.1/drivers/edac/x38_edac.c
26157 +--- linux-3.1.1/drivers/edac/x38_edac.c 2011-11-11 15:19:27.000000000 -0500
26158 ++++ linux-3.1.1/drivers/edac/x38_edac.c 2011-11-16 18:39:07.000000000 -0500
26159 +@@ -440,7 +440,7 @@ static void __devexit x38_remove_one(str
26160 + edac_mc_free(mci);
26161 + }
26162 +
26163 +-static const struct pci_device_id x38_pci_tbl[] __devinitdata = {
26164 ++static const struct pci_device_id x38_pci_tbl[] __devinitconst = {
26165 + {
26166 + PCI_VEND_DEV(INTEL, X38_HB), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
26167 + X38},
26168 +diff -urNp linux-3.1.1/drivers/firewire/core-card.c linux-3.1.1/drivers/firewire/core-card.c
26169 +--- linux-3.1.1/drivers/firewire/core-card.c 2011-11-11 15:19:27.000000000 -0500
26170 ++++ linux-3.1.1/drivers/firewire/core-card.c 2011-11-16 18:39:07.000000000 -0500
26171 +@@ -657,7 +657,7 @@ void fw_card_release(struct kref *kref)
26172 +
26173 + void fw_core_remove_card(struct fw_card *card)
26174 + {
26175 +- struct fw_card_driver dummy_driver = dummy_driver_template;
26176 ++ fw_card_driver_no_const dummy_driver = dummy_driver_template;
26177 +
26178 + card->driver->update_phy_reg(card, 4,
26179 + PHY_LINK_ACTIVE | PHY_CONTENDER, 0);
26180 +diff -urNp linux-3.1.1/drivers/firewire/core-cdev.c linux-3.1.1/drivers/firewire/core-cdev.c
26181 +--- linux-3.1.1/drivers/firewire/core-cdev.c 2011-11-11 15:19:27.000000000 -0500
26182 ++++ linux-3.1.1/drivers/firewire/core-cdev.c 2011-11-16 18:39:07.000000000 -0500
26183 +@@ -1331,8 +1331,7 @@ static int init_iso_resource(struct clie
26184 + int ret;
26185 +
26186 + if ((request->channels == 0 && request->bandwidth == 0) ||
26187 +- request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL ||
26188 +- request->bandwidth < 0)
26189 ++ request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL)
26190 + return -EINVAL;
26191 +
26192 + r = kmalloc(sizeof(*r), GFP_KERNEL);
26193 +diff -urNp linux-3.1.1/drivers/firewire/core.h linux-3.1.1/drivers/firewire/core.h
26194 +--- linux-3.1.1/drivers/firewire/core.h 2011-11-11 15:19:27.000000000 -0500
26195 ++++ linux-3.1.1/drivers/firewire/core.h 2011-11-16 18:39:07.000000000 -0500
26196 +@@ -101,6 +101,7 @@ struct fw_card_driver {
26197 +
26198 + int (*stop_iso)(struct fw_iso_context *ctx);
26199 + };
26200 ++typedef struct fw_card_driver __no_const fw_card_driver_no_const;
26201 +
26202 + void fw_card_initialize(struct fw_card *card,
26203 + const struct fw_card_driver *driver, struct device *device);
26204 +diff -urNp linux-3.1.1/drivers/firewire/core-transaction.c linux-3.1.1/drivers/firewire/core-transaction.c
26205 +--- linux-3.1.1/drivers/firewire/core-transaction.c 2011-11-11 15:19:27.000000000 -0500
26206 ++++ linux-3.1.1/drivers/firewire/core-transaction.c 2011-11-16 18:40:10.000000000 -0500
26207 +@@ -37,6 +37,7 @@
26208 + #include <linux/timer.h>
26209 + #include <linux/types.h>
26210 + #include <linux/workqueue.h>
26211 ++#include <linux/sched.h>
26212 +
26213 + #include <asm/byteorder.h>
26214 +
26215 +@@ -422,6 +423,8 @@ int fw_run_transaction(struct fw_card *c
26216 + struct transaction_callback_data d;
26217 + struct fw_transaction t;
26218 +
26219 ++ pax_track_stack();
26220 ++
26221 + init_timer_on_stack(&t.split_timeout_timer);
26222 + init_completion(&d.done);
26223 + d.payload = payload;
26224 +diff -urNp linux-3.1.1/drivers/firmware/dmi_scan.c linux-3.1.1/drivers/firmware/dmi_scan.c
26225 +--- linux-3.1.1/drivers/firmware/dmi_scan.c 2011-11-11 15:19:27.000000000 -0500
26226 ++++ linux-3.1.1/drivers/firmware/dmi_scan.c 2011-11-16 18:39:07.000000000 -0500
26227 +@@ -449,11 +449,6 @@ void __init dmi_scan_machine(void)
26228 + }
26229 + }
26230 + else {
26231 +- /*
26232 +- * no iounmap() for that ioremap(); it would be a no-op, but
26233 +- * it's so early in setup that sucker gets confused into doing
26234 +- * what it shouldn't if we actually call it.
26235 +- */
26236 + p = dmi_ioremap(0xF0000, 0x10000);
26237 + if (p == NULL)
26238 + goto error;
26239 +@@ -725,7 +720,7 @@ int dmi_walk(void (*decode)(const struct
26240 + if (buf == NULL)
26241 + return -1;
26242 +
26243 +- dmi_table(buf, dmi_len, dmi_num, decode, private_data);
26244 ++ dmi_table((char __force_kernel *)buf, dmi_len, dmi_num, decode, private_data);
26245 +
26246 + iounmap(buf);
26247 + return 0;
26248 +diff -urNp linux-3.1.1/drivers/gpio/gpio-vr41xx.c linux-3.1.1/drivers/gpio/gpio-vr41xx.c
26249 +--- linux-3.1.1/drivers/gpio/gpio-vr41xx.c 2011-11-11 15:19:27.000000000 -0500
26250 ++++ linux-3.1.1/drivers/gpio/gpio-vr41xx.c 2011-11-16 18:39:07.000000000 -0500
26251 +@@ -204,7 +204,7 @@ static int giu_get_irq(unsigned int irq)
26252 + printk(KERN_ERR "spurious GIU interrupt: %04x(%04x),%04x(%04x)\n",
26253 + maskl, pendl, maskh, pendh);
26254 +
26255 +- atomic_inc(&irq_err_count);
26256 ++ atomic_inc_unchecked(&irq_err_count);
26257 +
26258 + return -EINVAL;
26259 + }
26260 +diff -urNp linux-3.1.1/drivers/gpu/drm/drm_crtc.c linux-3.1.1/drivers/gpu/drm/drm_crtc.c
26261 +--- linux-3.1.1/drivers/gpu/drm/drm_crtc.c 2011-11-11 15:19:27.000000000 -0500
26262 ++++ linux-3.1.1/drivers/gpu/drm/drm_crtc.c 2011-11-16 18:39:07.000000000 -0500
26263 +@@ -1374,7 +1374,7 @@ int drm_mode_getconnector(struct drm_dev
26264 + */
26265 + if ((out_resp->count_modes >= mode_count) && mode_count) {
26266 + copied = 0;
26267 +- mode_ptr = (struct drm_mode_modeinfo *)(unsigned long)out_resp->modes_ptr;
26268 ++ mode_ptr = (struct drm_mode_modeinfo __user *)(unsigned long)out_resp->modes_ptr;
26269 + list_for_each_entry(mode, &connector->modes, head) {
26270 + drm_crtc_convert_to_umode(&u_mode, mode);
26271 + if (copy_to_user(mode_ptr + copied,
26272 +@@ -1389,8 +1389,8 @@ int drm_mode_getconnector(struct drm_dev
26273 +
26274 + if ((out_resp->count_props >= props_count) && props_count) {
26275 + copied = 0;
26276 +- prop_ptr = (uint32_t *)(unsigned long)(out_resp->props_ptr);
26277 +- prop_values = (uint64_t *)(unsigned long)(out_resp->prop_values_ptr);
26278 ++ prop_ptr = (uint32_t __user *)(unsigned long)(out_resp->props_ptr);
26279 ++ prop_values = (uint64_t __user *)(unsigned long)(out_resp->prop_values_ptr);
26280 + for (i = 0; i < DRM_CONNECTOR_MAX_PROPERTY; i++) {
26281 + if (connector->property_ids[i] != 0) {
26282 + if (put_user(connector->property_ids[i],
26283 +@@ -1412,7 +1412,7 @@ int drm_mode_getconnector(struct drm_dev
26284 +
26285 + if ((out_resp->count_encoders >= encoders_count) && encoders_count) {
26286 + copied = 0;
26287 +- encoder_ptr = (uint32_t *)(unsigned long)(out_resp->encoders_ptr);
26288 ++ encoder_ptr = (uint32_t __user *)(unsigned long)(out_resp->encoders_ptr);
26289 + for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
26290 + if (connector->encoder_ids[i] != 0) {
26291 + if (put_user(connector->encoder_ids[i],
26292 +@@ -1571,7 +1571,7 @@ int drm_mode_setcrtc(struct drm_device *
26293 + }
26294 +
26295 + for (i = 0; i < crtc_req->count_connectors; i++) {
26296 +- set_connectors_ptr = (uint32_t *)(unsigned long)crtc_req->set_connectors_ptr;
26297 ++ set_connectors_ptr = (uint32_t __user *)(unsigned long)crtc_req->set_connectors_ptr;
26298 + if (get_user(out_id, &set_connectors_ptr[i])) {
26299 + ret = -EFAULT;
26300 + goto out;
26301 +@@ -1852,7 +1852,7 @@ int drm_mode_dirtyfb_ioctl(struct drm_de
26302 + fb = obj_to_fb(obj);
26303 +
26304 + num_clips = r->num_clips;
26305 +- clips_ptr = (struct drm_clip_rect *)(unsigned long)r->clips_ptr;
26306 ++ clips_ptr = (struct drm_clip_rect __user *)(unsigned long)r->clips_ptr;
26307 +
26308 + if (!num_clips != !clips_ptr) {
26309 + ret = -EINVAL;
26310 +@@ -2272,7 +2272,7 @@ int drm_mode_getproperty_ioctl(struct dr
26311 + out_resp->flags = property->flags;
26312 +
26313 + if ((out_resp->count_values >= value_count) && value_count) {
26314 +- values_ptr = (uint64_t *)(unsigned long)out_resp->values_ptr;
26315 ++ values_ptr = (uint64_t __user *)(unsigned long)out_resp->values_ptr;
26316 + for (i = 0; i < value_count; i++) {
26317 + if (copy_to_user(values_ptr + i, &property->values[i], sizeof(uint64_t))) {
26318 + ret = -EFAULT;
26319 +@@ -2285,7 +2285,7 @@ int drm_mode_getproperty_ioctl(struct dr
26320 + if (property->flags & DRM_MODE_PROP_ENUM) {
26321 + if ((out_resp->count_enum_blobs >= enum_count) && enum_count) {
26322 + copied = 0;
26323 +- enum_ptr = (struct drm_mode_property_enum *)(unsigned long)out_resp->enum_blob_ptr;
26324 ++ enum_ptr = (struct drm_mode_property_enum __user *)(unsigned long)out_resp->enum_blob_ptr;
26325 + list_for_each_entry(prop_enum, &property->enum_blob_list, head) {
26326 +
26327 + if (copy_to_user(&enum_ptr[copied].value, &prop_enum->value, sizeof(uint64_t))) {
26328 +@@ -2308,7 +2308,7 @@ int drm_mode_getproperty_ioctl(struct dr
26329 + if ((out_resp->count_enum_blobs >= blob_count) && blob_count) {
26330 + copied = 0;
26331 + blob_id_ptr = (uint32_t *)(unsigned long)out_resp->enum_blob_ptr;
26332 +- blob_length_ptr = (uint32_t *)(unsigned long)out_resp->values_ptr;
26333 ++ blob_length_ptr = (uint32_t __user *)(unsigned long)out_resp->values_ptr;
26334 +
26335 + list_for_each_entry(prop_blob, &property->enum_blob_list, head) {
26336 + if (put_user(prop_blob->base.id, blob_id_ptr + copied)) {
26337 +@@ -2369,7 +2369,7 @@ int drm_mode_getblob_ioctl(struct drm_de
26338 + struct drm_mode_get_blob *out_resp = data;
26339 + struct drm_property_blob *blob;
26340 + int ret = 0;
26341 +- void *blob_ptr;
26342 ++ void __user *blob_ptr;
26343 +
26344 + if (!drm_core_check_feature(dev, DRIVER_MODESET))
26345 + return -EINVAL;
26346 +@@ -2383,7 +2383,7 @@ int drm_mode_getblob_ioctl(struct drm_de
26347 + blob = obj_to_blob(obj);
26348 +
26349 + if (out_resp->length == blob->length) {
26350 +- blob_ptr = (void *)(unsigned long)out_resp->data;
26351 ++ blob_ptr = (void __user *)(unsigned long)out_resp->data;
26352 + if (copy_to_user(blob_ptr, blob->data, blob->length)){
26353 + ret = -EFAULT;
26354 + goto done;
26355 +diff -urNp linux-3.1.1/drivers/gpu/drm/drm_crtc_helper.c linux-3.1.1/drivers/gpu/drm/drm_crtc_helper.c
26356 +--- linux-3.1.1/drivers/gpu/drm/drm_crtc_helper.c 2011-11-11 15:19:27.000000000 -0500
26357 ++++ linux-3.1.1/drivers/gpu/drm/drm_crtc_helper.c 2011-11-16 18:40:10.000000000 -0500
26358 +@@ -276,7 +276,7 @@ static bool drm_encoder_crtc_ok(struct d
26359 + struct drm_crtc *tmp;
26360 + int crtc_mask = 1;
26361 +
26362 +- WARN(!crtc, "checking null crtc?\n");
26363 ++ BUG_ON(!crtc);
26364 +
26365 + dev = crtc->dev;
26366 +
26367 +@@ -343,6 +343,8 @@ bool drm_crtc_helper_set_mode(struct drm
26368 + struct drm_encoder *encoder;
26369 + bool ret = true;
26370 +
26371 ++ pax_track_stack();
26372 ++
26373 + crtc->enabled = drm_helper_crtc_in_use(crtc);
26374 + if (!crtc->enabled)
26375 + return true;
26376 +diff -urNp linux-3.1.1/drivers/gpu/drm/drm_drv.c linux-3.1.1/drivers/gpu/drm/drm_drv.c
26377 +--- linux-3.1.1/drivers/gpu/drm/drm_drv.c 2011-11-11 15:19:27.000000000 -0500
26378 ++++ linux-3.1.1/drivers/gpu/drm/drm_drv.c 2011-11-16 18:39:07.000000000 -0500
26379 +@@ -307,7 +307,7 @@ module_exit(drm_core_exit);
26380 + /**
26381 + * Copy and IOCTL return string to user space
26382 + */
26383 +-static int drm_copy_field(char *buf, size_t *buf_len, const char *value)
26384 ++static int drm_copy_field(char __user *buf, size_t *buf_len, const char *value)
26385 + {
26386 + int len;
26387 +
26388 +@@ -386,7 +386,7 @@ long drm_ioctl(struct file *filp,
26389 +
26390 + dev = file_priv->minor->dev;
26391 + atomic_inc(&dev->ioctl_count);
26392 +- atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]);
26393 ++ atomic_inc_unchecked(&dev->counts[_DRM_STAT_IOCTLS]);
26394 + ++file_priv->ioctl_count;
26395 +
26396 + DRM_DEBUG("pid=%d, cmd=0x%02x, nr=0x%02x, dev 0x%lx, auth=%d\n",
26397 +diff -urNp linux-3.1.1/drivers/gpu/drm/drm_fops.c linux-3.1.1/drivers/gpu/drm/drm_fops.c
26398 +--- linux-3.1.1/drivers/gpu/drm/drm_fops.c 2011-11-11 15:19:27.000000000 -0500
26399 ++++ linux-3.1.1/drivers/gpu/drm/drm_fops.c 2011-11-16 18:39:07.000000000 -0500
26400 +@@ -70,7 +70,7 @@ static int drm_setup(struct drm_device *
26401 + }
26402 +
26403 + for (i = 0; i < ARRAY_SIZE(dev->counts); i++)
26404 +- atomic_set(&dev->counts[i], 0);
26405 ++ atomic_set_unchecked(&dev->counts[i], 0);
26406 +
26407 + dev->sigdata.lock = NULL;
26408 +
26409 +@@ -134,8 +134,8 @@ int drm_open(struct inode *inode, struct
26410 +
26411 + retcode = drm_open_helper(inode, filp, dev);
26412 + if (!retcode) {
26413 +- atomic_inc(&dev->counts[_DRM_STAT_OPENS]);
26414 +- if (!dev->open_count++)
26415 ++ atomic_inc_unchecked(&dev->counts[_DRM_STAT_OPENS]);
26416 ++ if (local_inc_return(&dev->open_count) == 1)
26417 + retcode = drm_setup(dev);
26418 + }
26419 + if (!retcode) {
26420 +@@ -472,7 +472,7 @@ int drm_release(struct inode *inode, str
26421 +
26422 + mutex_lock(&drm_global_mutex);
26423 +
26424 +- DRM_DEBUG("open_count = %d\n", dev->open_count);
26425 ++ DRM_DEBUG("open_count = %d\n", local_read(&dev->open_count));
26426 +
26427 + if (dev->driver->preclose)
26428 + dev->driver->preclose(dev, file_priv);
26429 +@@ -484,7 +484,7 @@ int drm_release(struct inode *inode, str
26430 + DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
26431 + task_pid_nr(current),
26432 + (long)old_encode_dev(file_priv->minor->device),
26433 +- dev->open_count);
26434 ++ local_read(&dev->open_count));
26435 +
26436 + /* if the master has gone away we can't do anything with the lock */
26437 + if (file_priv->minor->master)
26438 +@@ -565,8 +565,8 @@ int drm_release(struct inode *inode, str
26439 + * End inline drm_release
26440 + */
26441 +
26442 +- atomic_inc(&dev->counts[_DRM_STAT_CLOSES]);
26443 +- if (!--dev->open_count) {
26444 ++ atomic_inc_unchecked(&dev->counts[_DRM_STAT_CLOSES]);
26445 ++ if (local_dec_and_test(&dev->open_count)) {
26446 + if (atomic_read(&dev->ioctl_count)) {
26447 + DRM_ERROR("Device busy: %d\n",
26448 + atomic_read(&dev->ioctl_count));
26449 +diff -urNp linux-3.1.1/drivers/gpu/drm/drm_global.c linux-3.1.1/drivers/gpu/drm/drm_global.c
26450 +--- linux-3.1.1/drivers/gpu/drm/drm_global.c 2011-11-11 15:19:27.000000000 -0500
26451 ++++ linux-3.1.1/drivers/gpu/drm/drm_global.c 2011-11-16 18:39:07.000000000 -0500
26452 +@@ -36,7 +36,7 @@
26453 + struct drm_global_item {
26454 + struct mutex mutex;
26455 + void *object;
26456 +- int refcount;
26457 ++ atomic_t refcount;
26458 + };
26459 +
26460 + static struct drm_global_item glob[DRM_GLOBAL_NUM];
26461 +@@ -49,7 +49,7 @@ void drm_global_init(void)
26462 + struct drm_global_item *item = &glob[i];
26463 + mutex_init(&item->mutex);
26464 + item->object = NULL;
26465 +- item->refcount = 0;
26466 ++ atomic_set(&item->refcount, 0);
26467 + }
26468 + }
26469 +
26470 +@@ -59,7 +59,7 @@ void drm_global_release(void)
26471 + for (i = 0; i < DRM_GLOBAL_NUM; ++i) {
26472 + struct drm_global_item *item = &glob[i];
26473 + BUG_ON(item->object != NULL);
26474 +- BUG_ON(item->refcount != 0);
26475 ++ BUG_ON(atomic_read(&item->refcount) != 0);
26476 + }
26477 + }
26478 +
26479 +@@ -70,7 +70,7 @@ int drm_global_item_ref(struct drm_globa
26480 + void *object;
26481 +
26482 + mutex_lock(&item->mutex);
26483 +- if (item->refcount == 0) {
26484 ++ if (atomic_read(&item->refcount) == 0) {
26485 + item->object = kzalloc(ref->size, GFP_KERNEL);
26486 + if (unlikely(item->object == NULL)) {
26487 + ret = -ENOMEM;
26488 +@@ -83,7 +83,7 @@ int drm_global_item_ref(struct drm_globa
26489 + goto out_err;
26490 +
26491 + }
26492 +- ++item->refcount;
26493 ++ atomic_inc(&item->refcount);
26494 + ref->object = item->object;
26495 + object = item->object;
26496 + mutex_unlock(&item->mutex);
26497 +@@ -100,9 +100,9 @@ void drm_global_item_unref(struct drm_gl
26498 + struct drm_global_item *item = &glob[ref->global_type];
26499 +
26500 + mutex_lock(&item->mutex);
26501 +- BUG_ON(item->refcount == 0);
26502 ++ BUG_ON(atomic_read(&item->refcount) == 0);
26503 + BUG_ON(ref->object != item->object);
26504 +- if (--item->refcount == 0) {
26505 ++ if (atomic_dec_and_test(&item->refcount)) {
26506 + ref->release(ref);
26507 + item->object = NULL;
26508 + }
26509 +diff -urNp linux-3.1.1/drivers/gpu/drm/drm_info.c linux-3.1.1/drivers/gpu/drm/drm_info.c
26510 +--- linux-3.1.1/drivers/gpu/drm/drm_info.c 2011-11-11 15:19:27.000000000 -0500
26511 ++++ linux-3.1.1/drivers/gpu/drm/drm_info.c 2011-11-16 18:40:10.000000000 -0500
26512 +@@ -75,10 +75,14 @@ int drm_vm_info(struct seq_file *m, void
26513 + struct drm_local_map *map;
26514 + struct drm_map_list *r_list;
26515 +
26516 +- /* Hardcoded from _DRM_FRAME_BUFFER,
26517 +- _DRM_REGISTERS, _DRM_SHM, _DRM_AGP, and
26518 +- _DRM_SCATTER_GATHER and _DRM_CONSISTENT */
26519 +- const char *types[] = { "FB", "REG", "SHM", "AGP", "SG", "PCI" };
26520 ++ static const char * const types[] = {
26521 ++ [_DRM_FRAME_BUFFER] = "FB",
26522 ++ [_DRM_REGISTERS] = "REG",
26523 ++ [_DRM_SHM] = "SHM",
26524 ++ [_DRM_AGP] = "AGP",
26525 ++ [_DRM_SCATTER_GATHER] = "SG",
26526 ++ [_DRM_CONSISTENT] = "PCI",
26527 ++ [_DRM_GEM] = "GEM" };
26528 + const char *type;
26529 + int i;
26530 +
26531 +@@ -89,7 +93,7 @@ int drm_vm_info(struct seq_file *m, void
26532 + map = r_list->map;
26533 + if (!map)
26534 + continue;
26535 +- if (map->type < 0 || map->type > 5)
26536 ++ if (map->type >= ARRAY_SIZE(types))
26537 + type = "??";
26538 + else
26539 + type = types[map->type];
26540 +@@ -290,7 +294,11 @@ int drm_vma_info(struct seq_file *m, voi
26541 + vma->vm_flags & VM_MAYSHARE ? 's' : 'p',
26542 + vma->vm_flags & VM_LOCKED ? 'l' : '-',
26543 + vma->vm_flags & VM_IO ? 'i' : '-',
26544 ++#ifdef CONFIG_GRKERNSEC_HIDESYM
26545 ++ 0);
26546 ++#else
26547 + vma->vm_pgoff);
26548 ++#endif
26549 +
26550 + #if defined(__i386__)
26551 + pgprot = pgprot_val(vma->vm_page_prot);
26552 +diff -urNp linux-3.1.1/drivers/gpu/drm/drm_ioc32.c linux-3.1.1/drivers/gpu/drm/drm_ioc32.c
26553 +--- linux-3.1.1/drivers/gpu/drm/drm_ioc32.c 2011-11-11 15:19:27.000000000 -0500
26554 ++++ linux-3.1.1/drivers/gpu/drm/drm_ioc32.c 2011-11-16 18:39:07.000000000 -0500
26555 +@@ -455,7 +455,7 @@ static int compat_drm_infobufs(struct fi
26556 + request = compat_alloc_user_space(nbytes);
26557 + if (!access_ok(VERIFY_WRITE, request, nbytes))
26558 + return -EFAULT;
26559 +- list = (struct drm_buf_desc *) (request + 1);
26560 ++ list = (struct drm_buf_desc __user *) (request + 1);
26561 +
26562 + if (__put_user(count, &request->count)
26563 + || __put_user(list, &request->list))
26564 +@@ -516,7 +516,7 @@ static int compat_drm_mapbufs(struct fil
26565 + request = compat_alloc_user_space(nbytes);
26566 + if (!access_ok(VERIFY_WRITE, request, nbytes))
26567 + return -EFAULT;
26568 +- list = (struct drm_buf_pub *) (request + 1);
26569 ++ list = (struct drm_buf_pub __user *) (request + 1);
26570 +
26571 + if (__put_user(count, &request->count)
26572 + || __put_user(list, &request->list))
26573 +diff -urNp linux-3.1.1/drivers/gpu/drm/drm_ioctl.c linux-3.1.1/drivers/gpu/drm/drm_ioctl.c
26574 +--- linux-3.1.1/drivers/gpu/drm/drm_ioctl.c 2011-11-11 15:19:27.000000000 -0500
26575 ++++ linux-3.1.1/drivers/gpu/drm/drm_ioctl.c 2011-11-16 18:39:07.000000000 -0500
26576 +@@ -256,7 +256,7 @@ int drm_getstats(struct drm_device *dev,
26577 + stats->data[i].value =
26578 + (file_priv->master->lock.hw_lock ? file_priv->master->lock.hw_lock->lock : 0);
26579 + else
26580 +- stats->data[i].value = atomic_read(&dev->counts[i]);
26581 ++ stats->data[i].value = atomic_read_unchecked(&dev->counts[i]);
26582 + stats->data[i].type = dev->types[i];
26583 + }
26584 +
26585 +diff -urNp linux-3.1.1/drivers/gpu/drm/drm_lock.c linux-3.1.1/drivers/gpu/drm/drm_lock.c
26586 +--- linux-3.1.1/drivers/gpu/drm/drm_lock.c 2011-11-11 15:19:27.000000000 -0500
26587 ++++ linux-3.1.1/drivers/gpu/drm/drm_lock.c 2011-11-16 18:39:07.000000000 -0500
26588 +@@ -89,7 +89,7 @@ int drm_lock(struct drm_device *dev, voi
26589 + if (drm_lock_take(&master->lock, lock->context)) {
26590 + master->lock.file_priv = file_priv;
26591 + master->lock.lock_time = jiffies;
26592 +- atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
26593 ++ atomic_inc_unchecked(&dev->counts[_DRM_STAT_LOCKS]);
26594 + break; /* Got lock */
26595 + }
26596 +
26597 +@@ -160,7 +160,7 @@ int drm_unlock(struct drm_device *dev, v
26598 + return -EINVAL;
26599 + }
26600 +
26601 +- atomic_inc(&dev->counts[_DRM_STAT_UNLOCKS]);
26602 ++ atomic_inc_unchecked(&dev->counts[_DRM_STAT_UNLOCKS]);
26603 +
26604 + if (drm_lock_free(&master->lock, lock->context)) {
26605 + /* FIXME: Should really bail out here. */
26606 +diff -urNp linux-3.1.1/drivers/gpu/drm/i810/i810_dma.c linux-3.1.1/drivers/gpu/drm/i810/i810_dma.c
26607 +--- linux-3.1.1/drivers/gpu/drm/i810/i810_dma.c 2011-11-11 15:19:27.000000000 -0500
26608 ++++ linux-3.1.1/drivers/gpu/drm/i810/i810_dma.c 2011-11-16 18:39:07.000000000 -0500
26609 +@@ -950,8 +950,8 @@ static int i810_dma_vertex(struct drm_de
26610 + dma->buflist[vertex->idx],
26611 + vertex->discard, vertex->used);
26612 +
26613 +- atomic_add(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
26614 +- atomic_inc(&dev->counts[_DRM_STAT_DMA]);
26615 ++ atomic_add_unchecked(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
26616 ++ atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
26617 + sarea_priv->last_enqueue = dev_priv->counter - 1;
26618 + sarea_priv->last_dispatch = (int)hw_status[5];
26619 +
26620 +@@ -1111,8 +1111,8 @@ static int i810_dma_mc(struct drm_device
26621 + i810_dma_dispatch_mc(dev, dma->buflist[mc->idx], mc->used,
26622 + mc->last_render);
26623 +
26624 +- atomic_add(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
26625 +- atomic_inc(&dev->counts[_DRM_STAT_DMA]);
26626 ++ atomic_add_unchecked(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
26627 ++ atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
26628 + sarea_priv->last_enqueue = dev_priv->counter - 1;
26629 + sarea_priv->last_dispatch = (int)hw_status[5];
26630 +
26631 +diff -urNp linux-3.1.1/drivers/gpu/drm/i810/i810_drv.h linux-3.1.1/drivers/gpu/drm/i810/i810_drv.h
26632 +--- linux-3.1.1/drivers/gpu/drm/i810/i810_drv.h 2011-11-11 15:19:27.000000000 -0500
26633 ++++ linux-3.1.1/drivers/gpu/drm/i810/i810_drv.h 2011-11-16 18:39:07.000000000 -0500
26634 +@@ -108,8 +108,8 @@ typedef struct drm_i810_private {
26635 + int page_flipping;
26636 +
26637 + wait_queue_head_t irq_queue;
26638 +- atomic_t irq_received;
26639 +- atomic_t irq_emitted;
26640 ++ atomic_unchecked_t irq_received;
26641 ++ atomic_unchecked_t irq_emitted;
26642 +
26643 + int front_offset;
26644 + } drm_i810_private_t;
26645 +diff -urNp linux-3.1.1/drivers/gpu/drm/i915/i915_debugfs.c linux-3.1.1/drivers/gpu/drm/i915/i915_debugfs.c
26646 +--- linux-3.1.1/drivers/gpu/drm/i915/i915_debugfs.c 2011-11-11 15:19:27.000000000 -0500
26647 ++++ linux-3.1.1/drivers/gpu/drm/i915/i915_debugfs.c 2011-11-16 18:39:07.000000000 -0500
26648 +@@ -497,7 +497,7 @@ static int i915_interrupt_info(struct se
26649 + I915_READ(GTIMR));
26650 + }
26651 + seq_printf(m, "Interrupts received: %d\n",
26652 +- atomic_read(&dev_priv->irq_received));
26653 ++ atomic_read_unchecked(&dev_priv->irq_received));
26654 + for (i = 0; i < I915_NUM_RINGS; i++) {
26655 + if (IS_GEN6(dev) || IS_GEN7(dev)) {
26656 + seq_printf(m, "Graphics Interrupt mask (%s): %08x\n",
26657 +@@ -1185,7 +1185,7 @@ static int i915_opregion(struct seq_file
26658 + return ret;
26659 +
26660 + if (opregion->header)
26661 +- seq_write(m, opregion->header, OPREGION_SIZE);
26662 ++ seq_write(m, (const void __force_kernel *)opregion->header, OPREGION_SIZE);
26663 +
26664 + mutex_unlock(&dev->struct_mutex);
26665 +
26666 +diff -urNp linux-3.1.1/drivers/gpu/drm/i915/i915_dma.c linux-3.1.1/drivers/gpu/drm/i915/i915_dma.c
26667 +--- linux-3.1.1/drivers/gpu/drm/i915/i915_dma.c 2011-11-11 15:19:27.000000000 -0500
26668 ++++ linux-3.1.1/drivers/gpu/drm/i915/i915_dma.c 2011-11-16 18:39:07.000000000 -0500
26669 +@@ -1171,7 +1171,7 @@ static bool i915_switcheroo_can_switch(s
26670 + bool can_switch;
26671 +
26672 + spin_lock(&dev->count_lock);
26673 +- can_switch = (dev->open_count == 0);
26674 ++ can_switch = (local_read(&dev->open_count) == 0);
26675 + spin_unlock(&dev->count_lock);
26676 + return can_switch;
26677 + }
26678 +diff -urNp linux-3.1.1/drivers/gpu/drm/i915/i915_drv.h linux-3.1.1/drivers/gpu/drm/i915/i915_drv.h
26679 +--- linux-3.1.1/drivers/gpu/drm/i915/i915_drv.h 2011-11-11 15:19:27.000000000 -0500
26680 ++++ linux-3.1.1/drivers/gpu/drm/i915/i915_drv.h 2011-11-16 18:39:07.000000000 -0500
26681 +@@ -222,7 +222,7 @@ struct drm_i915_display_funcs {
26682 + /* render clock increase/decrease */
26683 + /* display clock increase/decrease */
26684 + /* pll clock increase/decrease */
26685 +-};
26686 ++} __no_const;
26687 +
26688 + struct intel_device_info {
26689 + u8 gen;
26690 +@@ -305,7 +305,7 @@ typedef struct drm_i915_private {
26691 + int current_page;
26692 + int page_flipping;
26693 +
26694 +- atomic_t irq_received;
26695 ++ atomic_unchecked_t irq_received;
26696 +
26697 + /* protects the irq masks */
26698 + spinlock_t irq_lock;
26699 +@@ -882,7 +882,7 @@ struct drm_i915_gem_object {
26700 + * will be page flipped away on the next vblank. When it
26701 + * reaches 0, dev_priv->pending_flip_queue will be woken up.
26702 + */
26703 +- atomic_t pending_flip;
26704 ++ atomic_unchecked_t pending_flip;
26705 + };
26706 +
26707 + #define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base)
26708 +@@ -1262,7 +1262,7 @@ extern int intel_setup_gmbus(struct drm_
26709 + extern void intel_teardown_gmbus(struct drm_device *dev);
26710 + extern void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed);
26711 + extern void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit);
26712 +-extern inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
26713 ++static inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
26714 + {
26715 + return container_of(adapter, struct intel_gmbus, adapter)->force_bit;
26716 + }
26717 +diff -urNp linux-3.1.1/drivers/gpu/drm/i915/i915_gem_execbuffer.c linux-3.1.1/drivers/gpu/drm/i915/i915_gem_execbuffer.c
26718 +--- linux-3.1.1/drivers/gpu/drm/i915/i915_gem_execbuffer.c 2011-11-11 15:19:27.000000000 -0500
26719 ++++ linux-3.1.1/drivers/gpu/drm/i915/i915_gem_execbuffer.c 2011-11-16 18:39:07.000000000 -0500
26720 +@@ -188,7 +188,7 @@ i915_gem_object_set_to_gpu_domain(struct
26721 + i915_gem_clflush_object(obj);
26722 +
26723 + if (obj->base.pending_write_domain)
26724 +- cd->flips |= atomic_read(&obj->pending_flip);
26725 ++ cd->flips |= atomic_read_unchecked(&obj->pending_flip);
26726 +
26727 + /* The actual obj->write_domain will be updated with
26728 + * pending_write_domain after we emit the accumulated flush for all
26729 +diff -urNp linux-3.1.1/drivers/gpu/drm/i915/i915_irq.c linux-3.1.1/drivers/gpu/drm/i915/i915_irq.c
26730 +--- linux-3.1.1/drivers/gpu/drm/i915/i915_irq.c 2011-11-11 15:19:27.000000000 -0500
26731 ++++ linux-3.1.1/drivers/gpu/drm/i915/i915_irq.c 2011-11-16 18:39:07.000000000 -0500
26732 +@@ -475,7 +475,7 @@ static irqreturn_t ivybridge_irq_handler
26733 + u32 de_iir, gt_iir, de_ier, pch_iir, pm_iir;
26734 + struct drm_i915_master_private *master_priv;
26735 +
26736 +- atomic_inc(&dev_priv->irq_received);
26737 ++ atomic_inc_unchecked(&dev_priv->irq_received);
26738 +
26739 + /* disable master interrupt before clearing iir */
26740 + de_ier = I915_READ(DEIER);
26741 +@@ -565,7 +565,7 @@ static irqreturn_t ironlake_irq_handler(
26742 + struct drm_i915_master_private *master_priv;
26743 + u32 bsd_usr_interrupt = GT_BSD_USER_INTERRUPT;
26744 +
26745 +- atomic_inc(&dev_priv->irq_received);
26746 ++ atomic_inc_unchecked(&dev_priv->irq_received);
26747 +
26748 + if (IS_GEN6(dev))
26749 + bsd_usr_interrupt = GT_GEN6_BSD_USER_INTERRUPT;
26750 +@@ -1228,7 +1228,7 @@ static irqreturn_t i915_driver_irq_handl
26751 + int ret = IRQ_NONE, pipe;
26752 + bool blc_event = false;
26753 +
26754 +- atomic_inc(&dev_priv->irq_received);
26755 ++ atomic_inc_unchecked(&dev_priv->irq_received);
26756 +
26757 + iir = I915_READ(IIR);
26758 +
26759 +@@ -1740,7 +1740,7 @@ static void ironlake_irq_preinstall(stru
26760 + {
26761 + drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
26762 +
26763 +- atomic_set(&dev_priv->irq_received, 0);
26764 ++ atomic_set_unchecked(&dev_priv->irq_received, 0);
26765 +
26766 + INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
26767 + INIT_WORK(&dev_priv->error_work, i915_error_work_func);
26768 +@@ -1904,7 +1904,7 @@ static void i915_driver_irq_preinstall(s
26769 + drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
26770 + int pipe;
26771 +
26772 +- atomic_set(&dev_priv->irq_received, 0);
26773 ++ atomic_set_unchecked(&dev_priv->irq_received, 0);
26774 +
26775 + INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
26776 + INIT_WORK(&dev_priv->error_work, i915_error_work_func);
26777 +diff -urNp linux-3.1.1/drivers/gpu/drm/i915/intel_display.c linux-3.1.1/drivers/gpu/drm/i915/intel_display.c
26778 +--- linux-3.1.1/drivers/gpu/drm/i915/intel_display.c 2011-11-11 15:19:27.000000000 -0500
26779 ++++ linux-3.1.1/drivers/gpu/drm/i915/intel_display.c 2011-11-16 18:39:07.000000000 -0500
26780 +@@ -2205,7 +2205,7 @@ intel_pipe_set_base(struct drm_crtc *crt
26781 +
26782 + wait_event(dev_priv->pending_flip_queue,
26783 + atomic_read(&dev_priv->mm.wedged) ||
26784 +- atomic_read(&obj->pending_flip) == 0);
26785 ++ atomic_read_unchecked(&obj->pending_flip) == 0);
26786 +
26787 + /* Big Hammer, we also need to ensure that any pending
26788 + * MI_WAIT_FOR_EVENT inside a user batch buffer on the
26789 +@@ -2824,7 +2824,7 @@ static void intel_crtc_wait_for_pending_
26790 + obj = to_intel_framebuffer(crtc->fb)->obj;
26791 + dev_priv = crtc->dev->dev_private;
26792 + wait_event(dev_priv->pending_flip_queue,
26793 +- atomic_read(&obj->pending_flip) == 0);
26794 ++ atomic_read_unchecked(&obj->pending_flip) == 0);
26795 + }
26796 +
26797 + static bool intel_crtc_driving_pch(struct drm_crtc *crtc)
26798 +@@ -6644,7 +6644,7 @@ static void do_intel_finish_page_flip(st
26799 +
26800 + atomic_clear_mask(1 << intel_crtc->plane,
26801 + &obj->pending_flip.counter);
26802 +- if (atomic_read(&obj->pending_flip) == 0)
26803 ++ if (atomic_read_unchecked(&obj->pending_flip) == 0)
26804 + wake_up(&dev_priv->pending_flip_queue);
26805 +
26806 + schedule_work(&work->work);
26807 +@@ -6933,7 +6933,7 @@ static int intel_crtc_page_flip(struct d
26808 + /* Block clients from rendering to the new back buffer until
26809 + * the flip occurs and the object is no longer visible.
26810 + */
26811 +- atomic_add(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
26812 ++ atomic_add_unchecked(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
26813 +
26814 + ret = dev_priv->display.queue_flip(dev, crtc, fb, obj);
26815 + if (ret)
26816 +@@ -6947,7 +6947,7 @@ static int intel_crtc_page_flip(struct d
26817 + return 0;
26818 +
26819 + cleanup_pending:
26820 +- atomic_sub(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
26821 ++ atomic_sub_unchecked(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
26822 + cleanup_objs:
26823 + drm_gem_object_unreference(&work->old_fb_obj->base);
26824 + drm_gem_object_unreference(&obj->base);
26825 +diff -urNp linux-3.1.1/drivers/gpu/drm/mga/mga_drv.h linux-3.1.1/drivers/gpu/drm/mga/mga_drv.h
26826 +--- linux-3.1.1/drivers/gpu/drm/mga/mga_drv.h 2011-11-11 15:19:27.000000000 -0500
26827 ++++ linux-3.1.1/drivers/gpu/drm/mga/mga_drv.h 2011-11-16 18:39:07.000000000 -0500
26828 +@@ -120,9 +120,9 @@ typedef struct drm_mga_private {
26829 + u32 clear_cmd;
26830 + u32 maccess;
26831 +
26832 +- atomic_t vbl_received; /**< Number of vblanks received. */
26833 ++ atomic_unchecked_t vbl_received; /**< Number of vblanks received. */
26834 + wait_queue_head_t fence_queue;
26835 +- atomic_t last_fence_retired;
26836 ++ atomic_unchecked_t last_fence_retired;
26837 + u32 next_fence_to_post;
26838 +
26839 + unsigned int fb_cpp;
26840 +diff -urNp linux-3.1.1/drivers/gpu/drm/mga/mga_irq.c linux-3.1.1/drivers/gpu/drm/mga/mga_irq.c
26841 +--- linux-3.1.1/drivers/gpu/drm/mga/mga_irq.c 2011-11-11 15:19:27.000000000 -0500
26842 ++++ linux-3.1.1/drivers/gpu/drm/mga/mga_irq.c 2011-11-16 18:39:07.000000000 -0500
26843 +@@ -44,7 +44,7 @@ u32 mga_get_vblank_counter(struct drm_de
26844 + if (crtc != 0)
26845 + return 0;
26846 +
26847 +- return atomic_read(&dev_priv->vbl_received);
26848 ++ return atomic_read_unchecked(&dev_priv->vbl_received);
26849 + }
26850 +
26851 +
26852 +@@ -60,7 +60,7 @@ irqreturn_t mga_driver_irq_handler(DRM_I
26853 + /* VBLANK interrupt */
26854 + if (status & MGA_VLINEPEN) {
26855 + MGA_WRITE(MGA_ICLEAR, MGA_VLINEICLR);
26856 +- atomic_inc(&dev_priv->vbl_received);
26857 ++ atomic_inc_unchecked(&dev_priv->vbl_received);
26858 + drm_handle_vblank(dev, 0);
26859 + handled = 1;
26860 + }
26861 +@@ -79,7 +79,7 @@ irqreturn_t mga_driver_irq_handler(DRM_I
26862 + if ((prim_start & ~0x03) != (prim_end & ~0x03))
26863 + MGA_WRITE(MGA_PRIMEND, prim_end);
26864 +
26865 +- atomic_inc(&dev_priv->last_fence_retired);
26866 ++ atomic_inc_unchecked(&dev_priv->last_fence_retired);
26867 + DRM_WAKEUP(&dev_priv->fence_queue);
26868 + handled = 1;
26869 + }
26870 +@@ -130,7 +130,7 @@ int mga_driver_fence_wait(struct drm_dev
26871 + * using fences.
26872 + */
26873 + DRM_WAIT_ON(ret, dev_priv->fence_queue, 3 * DRM_HZ,
26874 +- (((cur_fence = atomic_read(&dev_priv->last_fence_retired))
26875 ++ (((cur_fence = atomic_read_unchecked(&dev_priv->last_fence_retired))
26876 + - *sequence) <= (1 << 23)));
26877 +
26878 + *sequence = cur_fence;
26879 +diff -urNp linux-3.1.1/drivers/gpu/drm/nouveau/nouveau_bios.c linux-3.1.1/drivers/gpu/drm/nouveau/nouveau_bios.c
26880 +--- linux-3.1.1/drivers/gpu/drm/nouveau/nouveau_bios.c 2011-11-11 15:19:27.000000000 -0500
26881 ++++ linux-3.1.1/drivers/gpu/drm/nouveau/nouveau_bios.c 2011-11-16 18:39:07.000000000 -0500
26882 +@@ -201,7 +201,7 @@ struct methods {
26883 + const char desc[8];
26884 + void (*loadbios)(struct drm_device *, uint8_t *);
26885 + const bool rw;
26886 +-};
26887 ++} __do_const;
26888 +
26889 + static struct methods shadow_methods[] = {
26890 + { "PRAMIN", load_vbios_pramin, true },
26891 +@@ -5489,7 +5489,7 @@ parse_bit_displayport_tbl_entry(struct d
26892 + struct bit_table {
26893 + const char id;
26894 + int (* const parse_fn)(struct drm_device *, struct nvbios *, struct bit_entry *);
26895 +-};
26896 ++} __no_const;
26897 +
26898 + #define BIT_TABLE(id, funcid) ((struct bit_table){ id, parse_bit_##funcid##_tbl_entry })
26899 +
26900 +diff -urNp linux-3.1.1/drivers/gpu/drm/nouveau/nouveau_drv.h linux-3.1.1/drivers/gpu/drm/nouveau/nouveau_drv.h
26901 +--- linux-3.1.1/drivers/gpu/drm/nouveau/nouveau_drv.h 2011-11-11 15:19:27.000000000 -0500
26902 ++++ linux-3.1.1/drivers/gpu/drm/nouveau/nouveau_drv.h 2011-11-16 18:39:07.000000000 -0500
26903 +@@ -238,7 +238,7 @@ struct nouveau_channel {
26904 + struct list_head pending;
26905 + uint32_t sequence;
26906 + uint32_t sequence_ack;
26907 +- atomic_t last_sequence_irq;
26908 ++ atomic_unchecked_t last_sequence_irq;
26909 + struct nouveau_vma vma;
26910 + } fence;
26911 +
26912 +@@ -319,7 +319,7 @@ struct nouveau_exec_engine {
26913 + u32 handle, u16 class);
26914 + void (*set_tile_region)(struct drm_device *dev, int i);
26915 + void (*tlb_flush)(struct drm_device *, int engine);
26916 +-};
26917 ++} __no_const;
26918 +
26919 + struct nouveau_instmem_engine {
26920 + void *priv;
26921 +@@ -341,13 +341,13 @@ struct nouveau_instmem_engine {
26922 + struct nouveau_mc_engine {
26923 + int (*init)(struct drm_device *dev);
26924 + void (*takedown)(struct drm_device *dev);
26925 +-};
26926 ++} __no_const;
26927 +
26928 + struct nouveau_timer_engine {
26929 + int (*init)(struct drm_device *dev);
26930 + void (*takedown)(struct drm_device *dev);
26931 + uint64_t (*read)(struct drm_device *dev);
26932 +-};
26933 ++} __no_const;
26934 +
26935 + struct nouveau_fb_engine {
26936 + int num_tiles;
26937 +@@ -513,7 +513,7 @@ struct nouveau_vram_engine {
26938 + void (*put)(struct drm_device *, struct nouveau_mem **);
26939 +
26940 + bool (*flags_valid)(struct drm_device *, u32 tile_flags);
26941 +-};
26942 ++} __no_const;
26943 +
26944 + struct nouveau_engine {
26945 + struct nouveau_instmem_engine instmem;
26946 +@@ -660,7 +660,7 @@ struct drm_nouveau_private {
26947 + struct drm_global_reference mem_global_ref;
26948 + struct ttm_bo_global_ref bo_global_ref;
26949 + struct ttm_bo_device bdev;
26950 +- atomic_t validate_sequence;
26951 ++ atomic_unchecked_t validate_sequence;
26952 + } ttm;
26953 +
26954 + struct {
26955 +diff -urNp linux-3.1.1/drivers/gpu/drm/nouveau/nouveau_fence.c linux-3.1.1/drivers/gpu/drm/nouveau/nouveau_fence.c
26956 +--- linux-3.1.1/drivers/gpu/drm/nouveau/nouveau_fence.c 2011-11-11 15:19:27.000000000 -0500
26957 ++++ linux-3.1.1/drivers/gpu/drm/nouveau/nouveau_fence.c 2011-11-16 18:39:07.000000000 -0500
26958 +@@ -85,7 +85,7 @@ nouveau_fence_update(struct nouveau_chan
26959 + if (USE_REFCNT(dev))
26960 + sequence = nvchan_rd32(chan, 0x48);
26961 + else
26962 +- sequence = atomic_read(&chan->fence.last_sequence_irq);
26963 ++ sequence = atomic_read_unchecked(&chan->fence.last_sequence_irq);
26964 +
26965 + if (chan->fence.sequence_ack == sequence)
26966 + goto out;
26967 +@@ -541,7 +541,7 @@ nouveau_fence_channel_init(struct nouvea
26968 +
26969 + INIT_LIST_HEAD(&chan->fence.pending);
26970 + spin_lock_init(&chan->fence.lock);
26971 +- atomic_set(&chan->fence.last_sequence_irq, 0);
26972 ++ atomic_set_unchecked(&chan->fence.last_sequence_irq, 0);
26973 + return 0;
26974 + }
26975 +
26976 +diff -urNp linux-3.1.1/drivers/gpu/drm/nouveau/nouveau_gem.c linux-3.1.1/drivers/gpu/drm/nouveau/nouveau_gem.c
26977 +--- linux-3.1.1/drivers/gpu/drm/nouveau/nouveau_gem.c 2011-11-11 15:19:27.000000000 -0500
26978 ++++ linux-3.1.1/drivers/gpu/drm/nouveau/nouveau_gem.c 2011-11-16 18:39:07.000000000 -0500
26979 +@@ -314,7 +314,7 @@ validate_init(struct nouveau_channel *ch
26980 + int trycnt = 0;
26981 + int ret, i;
26982 +
26983 +- sequence = atomic_add_return(1, &dev_priv->ttm.validate_sequence);
26984 ++ sequence = atomic_add_return_unchecked(1, &dev_priv->ttm.validate_sequence);
26985 + retry:
26986 + if (++trycnt > 100000) {
26987 + NV_ERROR(dev, "%s failed and gave up.\n", __func__);
26988 +diff -urNp linux-3.1.1/drivers/gpu/drm/nouveau/nouveau_state.c linux-3.1.1/drivers/gpu/drm/nouveau/nouveau_state.c
26989 +--- linux-3.1.1/drivers/gpu/drm/nouveau/nouveau_state.c 2011-11-11 15:19:27.000000000 -0500
26990 ++++ linux-3.1.1/drivers/gpu/drm/nouveau/nouveau_state.c 2011-11-16 18:39:07.000000000 -0500
26991 +@@ -496,7 +496,7 @@ static bool nouveau_switcheroo_can_switc
26992 + bool can_switch;
26993 +
26994 + spin_lock(&dev->count_lock);
26995 +- can_switch = (dev->open_count == 0);
26996 ++ can_switch = (local_read(&dev->open_count) == 0);
26997 + spin_unlock(&dev->count_lock);
26998 + return can_switch;
26999 + }
27000 +diff -urNp linux-3.1.1/drivers/gpu/drm/nouveau/nv04_graph.c linux-3.1.1/drivers/gpu/drm/nouveau/nv04_graph.c
27001 +--- linux-3.1.1/drivers/gpu/drm/nouveau/nv04_graph.c 2011-11-11 15:19:27.000000000 -0500
27002 ++++ linux-3.1.1/drivers/gpu/drm/nouveau/nv04_graph.c 2011-11-16 18:39:07.000000000 -0500
27003 +@@ -554,7 +554,7 @@ static int
27004 + nv04_graph_mthd_set_ref(struct nouveau_channel *chan,
27005 + u32 class, u32 mthd, u32 data)
27006 + {
27007 +- atomic_set(&chan->fence.last_sequence_irq, data);
27008 ++ atomic_set_unchecked(&chan->fence.last_sequence_irq, data);
27009 + return 0;
27010 + }
27011 +
27012 +diff -urNp linux-3.1.1/drivers/gpu/drm/r128/r128_cce.c linux-3.1.1/drivers/gpu/drm/r128/r128_cce.c
27013 +--- linux-3.1.1/drivers/gpu/drm/r128/r128_cce.c 2011-11-11 15:19:27.000000000 -0500
27014 ++++ linux-3.1.1/drivers/gpu/drm/r128/r128_cce.c 2011-11-16 18:39:07.000000000 -0500
27015 +@@ -377,7 +377,7 @@ static int r128_do_init_cce(struct drm_d
27016 +
27017 + /* GH: Simple idle check.
27018 + */
27019 +- atomic_set(&dev_priv->idle_count, 0);
27020 ++ atomic_set_unchecked(&dev_priv->idle_count, 0);
27021 +
27022 + /* We don't support anything other than bus-mastering ring mode,
27023 + * but the ring can be in either AGP or PCI space for the ring
27024 +diff -urNp linux-3.1.1/drivers/gpu/drm/r128/r128_drv.h linux-3.1.1/drivers/gpu/drm/r128/r128_drv.h
27025 +--- linux-3.1.1/drivers/gpu/drm/r128/r128_drv.h 2011-11-11 15:19:27.000000000 -0500
27026 ++++ linux-3.1.1/drivers/gpu/drm/r128/r128_drv.h 2011-11-16 18:39:07.000000000 -0500
27027 +@@ -90,14 +90,14 @@ typedef struct drm_r128_private {
27028 + int is_pci;
27029 + unsigned long cce_buffers_offset;
27030 +
27031 +- atomic_t idle_count;
27032 ++ atomic_unchecked_t idle_count;
27033 +
27034 + int page_flipping;
27035 + int current_page;
27036 + u32 crtc_offset;
27037 + u32 crtc_offset_cntl;
27038 +
27039 +- atomic_t vbl_received;
27040 ++ atomic_unchecked_t vbl_received;
27041 +
27042 + u32 color_fmt;
27043 + unsigned int front_offset;
27044 +diff -urNp linux-3.1.1/drivers/gpu/drm/r128/r128_irq.c linux-3.1.1/drivers/gpu/drm/r128/r128_irq.c
27045 +--- linux-3.1.1/drivers/gpu/drm/r128/r128_irq.c 2011-11-11 15:19:27.000000000 -0500
27046 ++++ linux-3.1.1/drivers/gpu/drm/r128/r128_irq.c 2011-11-16 18:39:07.000000000 -0500
27047 +@@ -42,7 +42,7 @@ u32 r128_get_vblank_counter(struct drm_d
27048 + if (crtc != 0)
27049 + return 0;
27050 +
27051 +- return atomic_read(&dev_priv->vbl_received);
27052 ++ return atomic_read_unchecked(&dev_priv->vbl_received);
27053 + }
27054 +
27055 + irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
27056 +@@ -56,7 +56,7 @@ irqreturn_t r128_driver_irq_handler(DRM_
27057 + /* VBLANK interrupt */
27058 + if (status & R128_CRTC_VBLANK_INT) {
27059 + R128_WRITE(R128_GEN_INT_STATUS, R128_CRTC_VBLANK_INT_AK);
27060 +- atomic_inc(&dev_priv->vbl_received);
27061 ++ atomic_inc_unchecked(&dev_priv->vbl_received);
27062 + drm_handle_vblank(dev, 0);
27063 + return IRQ_HANDLED;
27064 + }
27065 +diff -urNp linux-3.1.1/drivers/gpu/drm/r128/r128_state.c linux-3.1.1/drivers/gpu/drm/r128/r128_state.c
27066 +--- linux-3.1.1/drivers/gpu/drm/r128/r128_state.c 2011-11-11 15:19:27.000000000 -0500
27067 ++++ linux-3.1.1/drivers/gpu/drm/r128/r128_state.c 2011-11-16 18:39:07.000000000 -0500
27068 +@@ -321,10 +321,10 @@ static void r128_clear_box(drm_r128_priv
27069 +
27070 + static void r128_cce_performance_boxes(drm_r128_private_t *dev_priv)
27071 + {
27072 +- if (atomic_read(&dev_priv->idle_count) == 0)
27073 ++ if (atomic_read_unchecked(&dev_priv->idle_count) == 0)
27074 + r128_clear_box(dev_priv, 64, 4, 8, 8, 0, 255, 0);
27075 + else
27076 +- atomic_set(&dev_priv->idle_count, 0);
27077 ++ atomic_set_unchecked(&dev_priv->idle_count, 0);
27078 + }
27079 +
27080 + #endif
27081 +diff -urNp linux-3.1.1/drivers/gpu/drm/radeon/atom.c linux-3.1.1/drivers/gpu/drm/radeon/atom.c
27082 +--- linux-3.1.1/drivers/gpu/drm/radeon/atom.c 2011-11-11 15:19:27.000000000 -0500
27083 ++++ linux-3.1.1/drivers/gpu/drm/radeon/atom.c 2011-11-16 19:09:42.000000000 -0500
27084 +@@ -1254,6 +1254,8 @@ struct atom_context *atom_parse(struct c
27085 + char name[512];
27086 + int i;
27087 +
27088 ++ pax_track_stack();
27089 ++
27090 + if (!ctx)
27091 + return NULL;
27092 +
27093 +diff -urNp linux-3.1.1/drivers/gpu/drm/radeon/mkregtable.c linux-3.1.1/drivers/gpu/drm/radeon/mkregtable.c
27094 +--- linux-3.1.1/drivers/gpu/drm/radeon/mkregtable.c 2011-11-11 15:19:27.000000000 -0500
27095 ++++ linux-3.1.1/drivers/gpu/drm/radeon/mkregtable.c 2011-11-16 18:39:07.000000000 -0500
27096 +@@ -637,14 +637,14 @@ static int parser_auth(struct table *t,
27097 + regex_t mask_rex;
27098 + regmatch_t match[4];
27099 + char buf[1024];
27100 +- size_t end;
27101 ++ long end;
27102 + int len;
27103 + int done = 0;
27104 + int r;
27105 + unsigned o;
27106 + struct offset *offset;
27107 + char last_reg_s[10];
27108 +- int last_reg;
27109 ++ unsigned long last_reg;
27110 +
27111 + if (regcomp
27112 + (&mask_rex, "(0x[0-9a-fA-F]*) *([_a-zA-Z0-9]*)", REG_EXTENDED)) {
27113 +diff -urNp linux-3.1.1/drivers/gpu/drm/radeon/radeon_atombios.c linux-3.1.1/drivers/gpu/drm/radeon/radeon_atombios.c
27114 +--- linux-3.1.1/drivers/gpu/drm/radeon/radeon_atombios.c 2011-11-11 15:19:27.000000000 -0500
27115 ++++ linux-3.1.1/drivers/gpu/drm/radeon/radeon_atombios.c 2011-11-16 18:40:10.000000000 -0500
27116 +@@ -545,6 +545,8 @@ bool radeon_get_atom_connector_info_from
27117 + struct radeon_gpio_rec gpio;
27118 + struct radeon_hpd hpd;
27119 +
27120 ++ pax_track_stack();
27121 ++
27122 + if (!atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset))
27123 + return false;
27124 +
27125 +diff -urNp linux-3.1.1/drivers/gpu/drm/radeon/radeon_device.c linux-3.1.1/drivers/gpu/drm/radeon/radeon_device.c
27126 +--- linux-3.1.1/drivers/gpu/drm/radeon/radeon_device.c 2011-11-11 15:19:27.000000000 -0500
27127 ++++ linux-3.1.1/drivers/gpu/drm/radeon/radeon_device.c 2011-11-16 18:39:07.000000000 -0500
27128 +@@ -684,7 +684,7 @@ static bool radeon_switcheroo_can_switch
27129 + bool can_switch;
27130 +
27131 + spin_lock(&dev->count_lock);
27132 +- can_switch = (dev->open_count == 0);
27133 ++ can_switch = (local_read(&dev->open_count) == 0);
27134 + spin_unlock(&dev->count_lock);
27135 + return can_switch;
27136 + }
27137 +diff -urNp linux-3.1.1/drivers/gpu/drm/radeon/radeon_display.c linux-3.1.1/drivers/gpu/drm/radeon/radeon_display.c
27138 +--- linux-3.1.1/drivers/gpu/drm/radeon/radeon_display.c 2011-11-11 15:19:27.000000000 -0500
27139 ++++ linux-3.1.1/drivers/gpu/drm/radeon/radeon_display.c 2011-11-16 18:40:10.000000000 -0500
27140 +@@ -925,6 +925,8 @@ void radeon_compute_pll_legacy(struct ra
27141 + uint32_t post_div;
27142 + u32 pll_out_min, pll_out_max;
27143 +
27144 ++ pax_track_stack();
27145 ++
27146 + DRM_DEBUG_KMS("PLL freq %llu %u %u\n", freq, pll->min_ref_div, pll->max_ref_div);
27147 + freq = freq * 1000;
27148 +
27149 +diff -urNp linux-3.1.1/drivers/gpu/drm/radeon/radeon_drv.h linux-3.1.1/drivers/gpu/drm/radeon/radeon_drv.h
27150 +--- linux-3.1.1/drivers/gpu/drm/radeon/radeon_drv.h 2011-11-11 15:19:27.000000000 -0500
27151 ++++ linux-3.1.1/drivers/gpu/drm/radeon/radeon_drv.h 2011-11-16 18:39:07.000000000 -0500
27152 +@@ -255,7 +255,7 @@ typedef struct drm_radeon_private {
27153 +
27154 + /* SW interrupt */
27155 + wait_queue_head_t swi_queue;
27156 +- atomic_t swi_emitted;
27157 ++ atomic_unchecked_t swi_emitted;
27158 + int vblank_crtc;
27159 + uint32_t irq_enable_reg;
27160 + uint32_t r500_disp_irq_reg;
27161 +diff -urNp linux-3.1.1/drivers/gpu/drm/radeon/radeon_fence.c linux-3.1.1/drivers/gpu/drm/radeon/radeon_fence.c
27162 +--- linux-3.1.1/drivers/gpu/drm/radeon/radeon_fence.c 2011-11-11 15:19:27.000000000 -0500
27163 ++++ linux-3.1.1/drivers/gpu/drm/radeon/radeon_fence.c 2011-11-16 18:39:07.000000000 -0500
27164 +@@ -78,7 +78,7 @@ int radeon_fence_emit(struct radeon_devi
27165 + write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
27166 + return 0;
27167 + }
27168 +- fence->seq = atomic_add_return(1, &rdev->fence_drv.seq);
27169 ++ fence->seq = atomic_add_return_unchecked(1, &rdev->fence_drv.seq);
27170 + if (!rdev->cp.ready)
27171 + /* FIXME: cp is not running assume everythings is done right
27172 + * away
27173 +@@ -373,7 +373,7 @@ int radeon_fence_driver_init(struct rade
27174 + return r;
27175 + }
27176 + radeon_fence_write(rdev, 0);
27177 +- atomic_set(&rdev->fence_drv.seq, 0);
27178 ++ atomic_set_unchecked(&rdev->fence_drv.seq, 0);
27179 + INIT_LIST_HEAD(&rdev->fence_drv.created);
27180 + INIT_LIST_HEAD(&rdev->fence_drv.emited);
27181 + INIT_LIST_HEAD(&rdev->fence_drv.signaled);
27182 +diff -urNp linux-3.1.1/drivers/gpu/drm/radeon/radeon.h linux-3.1.1/drivers/gpu/drm/radeon/radeon.h
27183 +--- linux-3.1.1/drivers/gpu/drm/radeon/radeon.h 2011-11-11 15:19:27.000000000 -0500
27184 ++++ linux-3.1.1/drivers/gpu/drm/radeon/radeon.h 2011-11-16 18:39:07.000000000 -0500
27185 +@@ -192,7 +192,7 @@ extern int sumo_get_temp(struct radeon_d
27186 + */
27187 + struct radeon_fence_driver {
27188 + uint32_t scratch_reg;
27189 +- atomic_t seq;
27190 ++ atomic_unchecked_t seq;
27191 + uint32_t last_seq;
27192 + unsigned long last_jiffies;
27193 + unsigned long last_timeout;
27194 +@@ -962,7 +962,7 @@ struct radeon_asic {
27195 + void (*pre_page_flip)(struct radeon_device *rdev, int crtc);
27196 + u32 (*page_flip)(struct radeon_device *rdev, int crtc, u64 crtc_base);
27197 + void (*post_page_flip)(struct radeon_device *rdev, int crtc);
27198 +-};
27199 ++} __no_const;
27200 +
27201 + /*
27202 + * Asic structures
27203 +diff -urNp linux-3.1.1/drivers/gpu/drm/radeon/radeon_ioc32.c linux-3.1.1/drivers/gpu/drm/radeon/radeon_ioc32.c
27204 +--- linux-3.1.1/drivers/gpu/drm/radeon/radeon_ioc32.c 2011-11-11 15:19:27.000000000 -0500
27205 ++++ linux-3.1.1/drivers/gpu/drm/radeon/radeon_ioc32.c 2011-11-16 18:39:07.000000000 -0500
27206 +@@ -359,7 +359,7 @@ static int compat_radeon_cp_setparam(str
27207 + request = compat_alloc_user_space(sizeof(*request));
27208 + if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
27209 + || __put_user(req32.param, &request->param)
27210 +- || __put_user((void __user *)(unsigned long)req32.value,
27211 ++ || __put_user((unsigned long)req32.value,
27212 + &request->value))
27213 + return -EFAULT;
27214 +
27215 +diff -urNp linux-3.1.1/drivers/gpu/drm/radeon/radeon_irq.c linux-3.1.1/drivers/gpu/drm/radeon/radeon_irq.c
27216 +--- linux-3.1.1/drivers/gpu/drm/radeon/radeon_irq.c 2011-11-11 15:19:27.000000000 -0500
27217 ++++ linux-3.1.1/drivers/gpu/drm/radeon/radeon_irq.c 2011-11-16 18:39:07.000000000 -0500
27218 +@@ -225,8 +225,8 @@ static int radeon_emit_irq(struct drm_de
27219 + unsigned int ret;
27220 + RING_LOCALS;
27221 +
27222 +- atomic_inc(&dev_priv->swi_emitted);
27223 +- ret = atomic_read(&dev_priv->swi_emitted);
27224 ++ atomic_inc_unchecked(&dev_priv->swi_emitted);
27225 ++ ret = atomic_read_unchecked(&dev_priv->swi_emitted);
27226 +
27227 + BEGIN_RING(4);
27228 + OUT_RING_REG(RADEON_LAST_SWI_REG, ret);
27229 +@@ -352,7 +352,7 @@ int radeon_driver_irq_postinstall(struct
27230 + drm_radeon_private_t *dev_priv =
27231 + (drm_radeon_private_t *) dev->dev_private;
27232 +
27233 +- atomic_set(&dev_priv->swi_emitted, 0);
27234 ++ atomic_set_unchecked(&dev_priv->swi_emitted, 0);
27235 + DRM_INIT_WAITQUEUE(&dev_priv->swi_queue);
27236 +
27237 + dev->max_vblank_count = 0x001fffff;
27238 +diff -urNp linux-3.1.1/drivers/gpu/drm/radeon/radeon_state.c linux-3.1.1/drivers/gpu/drm/radeon/radeon_state.c
27239 +--- linux-3.1.1/drivers/gpu/drm/radeon/radeon_state.c 2011-11-11 15:19:27.000000000 -0500
27240 ++++ linux-3.1.1/drivers/gpu/drm/radeon/radeon_state.c 2011-11-16 18:39:07.000000000 -0500
27241 +@@ -2168,7 +2168,7 @@ static int radeon_cp_clear(struct drm_de
27242 + if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS)
27243 + sarea_priv->nbox = RADEON_NR_SAREA_CLIPRECTS;
27244 +
27245 +- if (DRM_COPY_FROM_USER(&depth_boxes, clear->depth_boxes,
27246 ++ if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS || DRM_COPY_FROM_USER(&depth_boxes, clear->depth_boxes,
27247 + sarea_priv->nbox * sizeof(depth_boxes[0])))
27248 + return -EFAULT;
27249 +
27250 +@@ -3031,7 +3031,7 @@ static int radeon_cp_getparam(struct drm
27251 + {
27252 + drm_radeon_private_t *dev_priv = dev->dev_private;
27253 + drm_radeon_getparam_t *param = data;
27254 +- int value;
27255 ++ int value = 0;
27256 +
27257 + DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
27258 +
27259 +diff -urNp linux-3.1.1/drivers/gpu/drm/radeon/radeon_ttm.c linux-3.1.1/drivers/gpu/drm/radeon/radeon_ttm.c
27260 +--- linux-3.1.1/drivers/gpu/drm/radeon/radeon_ttm.c 2011-11-11 15:19:27.000000000 -0500
27261 ++++ linux-3.1.1/drivers/gpu/drm/radeon/radeon_ttm.c 2011-11-16 18:39:07.000000000 -0500
27262 +@@ -672,8 +672,10 @@ int radeon_mmap(struct file *filp, struc
27263 + }
27264 + if (unlikely(ttm_vm_ops == NULL)) {
27265 + ttm_vm_ops = vma->vm_ops;
27266 +- radeon_ttm_vm_ops = *ttm_vm_ops;
27267 +- radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
27268 ++ pax_open_kernel();
27269 ++ memcpy((void *)&radeon_ttm_vm_ops, ttm_vm_ops, sizeof(radeon_ttm_vm_ops));
27270 ++ *(void **)&radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
27271 ++ pax_close_kernel();
27272 + }
27273 + vma->vm_ops = &radeon_ttm_vm_ops;
27274 + return 0;
27275 +diff -urNp linux-3.1.1/drivers/gpu/drm/radeon/rs690.c linux-3.1.1/drivers/gpu/drm/radeon/rs690.c
27276 +--- linux-3.1.1/drivers/gpu/drm/radeon/rs690.c 2011-11-11 15:19:27.000000000 -0500
27277 ++++ linux-3.1.1/drivers/gpu/drm/radeon/rs690.c 2011-11-16 18:39:07.000000000 -0500
27278 +@@ -304,9 +304,11 @@ void rs690_crtc_bandwidth_compute(struct
27279 + if (rdev->pm.max_bandwidth.full > rdev->pm.sideport_bandwidth.full &&
27280 + rdev->pm.sideport_bandwidth.full)
27281 + rdev->pm.max_bandwidth = rdev->pm.sideport_bandwidth;
27282 +- read_delay_latency.full = dfixed_const(370 * 800 * 1000);
27283 ++ read_delay_latency.full = dfixed_const(800 * 1000);
27284 + read_delay_latency.full = dfixed_div(read_delay_latency,
27285 + rdev->pm.igp_sideport_mclk);
27286 ++ a.full = dfixed_const(370);
27287 ++ read_delay_latency.full = dfixed_mul(read_delay_latency, a);
27288 + } else {
27289 + if (rdev->pm.max_bandwidth.full > rdev->pm.k8_bandwidth.full &&
27290 + rdev->pm.k8_bandwidth.full)
27291 +diff -urNp linux-3.1.1/drivers/gpu/drm/ttm/ttm_page_alloc.c linux-3.1.1/drivers/gpu/drm/ttm/ttm_page_alloc.c
27292 +--- linux-3.1.1/drivers/gpu/drm/ttm/ttm_page_alloc.c 2011-11-11 15:19:27.000000000 -0500
27293 ++++ linux-3.1.1/drivers/gpu/drm/ttm/ttm_page_alloc.c 2011-11-16 18:39:07.000000000 -0500
27294 +@@ -398,9 +398,9 @@ static int ttm_pool_get_num_unused_pages
27295 + static int ttm_pool_mm_shrink(struct shrinker *shrink,
27296 + struct shrink_control *sc)
27297 + {
27298 +- static atomic_t start_pool = ATOMIC_INIT(0);
27299 ++ static atomic_unchecked_t start_pool = ATOMIC_INIT(0);
27300 + unsigned i;
27301 +- unsigned pool_offset = atomic_add_return(1, &start_pool);
27302 ++ unsigned pool_offset = atomic_add_return_unchecked(1, &start_pool);
27303 + struct ttm_page_pool *pool;
27304 + int shrink_pages = sc->nr_to_scan;
27305 +
27306 +diff -urNp linux-3.1.1/drivers/gpu/drm/via/via_drv.h linux-3.1.1/drivers/gpu/drm/via/via_drv.h
27307 +--- linux-3.1.1/drivers/gpu/drm/via/via_drv.h 2011-11-11 15:19:27.000000000 -0500
27308 ++++ linux-3.1.1/drivers/gpu/drm/via/via_drv.h 2011-11-16 18:39:07.000000000 -0500
27309 +@@ -51,7 +51,7 @@ typedef struct drm_via_ring_buffer {
27310 + typedef uint32_t maskarray_t[5];
27311 +
27312 + typedef struct drm_via_irq {
27313 +- atomic_t irq_received;
27314 ++ atomic_unchecked_t irq_received;
27315 + uint32_t pending_mask;
27316 + uint32_t enable_mask;
27317 + wait_queue_head_t irq_queue;
27318 +@@ -75,7 +75,7 @@ typedef struct drm_via_private {
27319 + struct timeval last_vblank;
27320 + int last_vblank_valid;
27321 + unsigned usec_per_vblank;
27322 +- atomic_t vbl_received;
27323 ++ atomic_unchecked_t vbl_received;
27324 + drm_via_state_t hc_state;
27325 + char pci_buf[VIA_PCI_BUF_SIZE];
27326 + const uint32_t *fire_offsets[VIA_FIRE_BUF_SIZE];
27327 +diff -urNp linux-3.1.1/drivers/gpu/drm/via/via_irq.c linux-3.1.1/drivers/gpu/drm/via/via_irq.c
27328 +--- linux-3.1.1/drivers/gpu/drm/via/via_irq.c 2011-11-11 15:19:27.000000000 -0500
27329 ++++ linux-3.1.1/drivers/gpu/drm/via/via_irq.c 2011-11-16 18:39:07.000000000 -0500
27330 +@@ -102,7 +102,7 @@ u32 via_get_vblank_counter(struct drm_de
27331 + if (crtc != 0)
27332 + return 0;
27333 +
27334 +- return atomic_read(&dev_priv->vbl_received);
27335 ++ return atomic_read_unchecked(&dev_priv->vbl_received);
27336 + }
27337 +
27338 + irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
27339 +@@ -117,8 +117,8 @@ irqreturn_t via_driver_irq_handler(DRM_I
27340 +
27341 + status = VIA_READ(VIA_REG_INTERRUPT);
27342 + if (status & VIA_IRQ_VBLANK_PENDING) {
27343 +- atomic_inc(&dev_priv->vbl_received);
27344 +- if (!(atomic_read(&dev_priv->vbl_received) & 0x0F)) {
27345 ++ atomic_inc_unchecked(&dev_priv->vbl_received);
27346 ++ if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0x0F)) {
27347 + do_gettimeofday(&cur_vblank);
27348 + if (dev_priv->last_vblank_valid) {
27349 + dev_priv->usec_per_vblank =
27350 +@@ -128,7 +128,7 @@ irqreturn_t via_driver_irq_handler(DRM_I
27351 + dev_priv->last_vblank = cur_vblank;
27352 + dev_priv->last_vblank_valid = 1;
27353 + }
27354 +- if (!(atomic_read(&dev_priv->vbl_received) & 0xFF)) {
27355 ++ if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0xFF)) {
27356 + DRM_DEBUG("US per vblank is: %u\n",
27357 + dev_priv->usec_per_vblank);
27358 + }
27359 +@@ -138,7 +138,7 @@ irqreturn_t via_driver_irq_handler(DRM_I
27360 +
27361 + for (i = 0; i < dev_priv->num_irqs; ++i) {
27362 + if (status & cur_irq->pending_mask) {
27363 +- atomic_inc(&cur_irq->irq_received);
27364 ++ atomic_inc_unchecked(&cur_irq->irq_received);
27365 + DRM_WAKEUP(&cur_irq->irq_queue);
27366 + handled = 1;
27367 + if (dev_priv->irq_map[drm_via_irq_dma0_td] == i)
27368 +@@ -243,11 +243,11 @@ via_driver_irq_wait(struct drm_device *d
27369 + DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
27370 + ((VIA_READ(masks[irq][2]) & masks[irq][3]) ==
27371 + masks[irq][4]));
27372 +- cur_irq_sequence = atomic_read(&cur_irq->irq_received);
27373 ++ cur_irq_sequence = atomic_read_unchecked(&cur_irq->irq_received);
27374 + } else {
27375 + DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
27376 + (((cur_irq_sequence =
27377 +- atomic_read(&cur_irq->irq_received)) -
27378 ++ atomic_read_unchecked(&cur_irq->irq_received)) -
27379 + *sequence) <= (1 << 23)));
27380 + }
27381 + *sequence = cur_irq_sequence;
27382 +@@ -285,7 +285,7 @@ void via_driver_irq_preinstall(struct dr
27383 + }
27384 +
27385 + for (i = 0; i < dev_priv->num_irqs; ++i) {
27386 +- atomic_set(&cur_irq->irq_received, 0);
27387 ++ atomic_set_unchecked(&cur_irq->irq_received, 0);
27388 + cur_irq->enable_mask = dev_priv->irq_masks[i][0];
27389 + cur_irq->pending_mask = dev_priv->irq_masks[i][1];
27390 + DRM_INIT_WAITQUEUE(&cur_irq->irq_queue);
27391 +@@ -367,7 +367,7 @@ int via_wait_irq(struct drm_device *dev,
27392 + switch (irqwait->request.type & ~VIA_IRQ_FLAGS_MASK) {
27393 + case VIA_IRQ_RELATIVE:
27394 + irqwait->request.sequence +=
27395 +- atomic_read(&cur_irq->irq_received);
27396 ++ atomic_read_unchecked(&cur_irq->irq_received);
27397 + irqwait->request.type &= ~_DRM_VBLANK_RELATIVE;
27398 + case VIA_IRQ_ABSOLUTE:
27399 + break;
27400 +diff -urNp linux-3.1.1/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h linux-3.1.1/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
27401 +--- linux-3.1.1/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h 2011-11-11 15:19:27.000000000 -0500
27402 ++++ linux-3.1.1/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h 2011-11-16 18:39:07.000000000 -0500
27403 +@@ -240,7 +240,7 @@ struct vmw_private {
27404 + * Fencing and IRQs.
27405 + */
27406 +
27407 +- atomic_t fence_seq;
27408 ++ atomic_unchecked_t fence_seq;
27409 + wait_queue_head_t fence_queue;
27410 + wait_queue_head_t fifo_queue;
27411 + atomic_t fence_queue_waiters;
27412 +diff -urNp linux-3.1.1/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c linux-3.1.1/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
27413 +--- linux-3.1.1/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c 2011-11-11 15:19:27.000000000 -0500
27414 ++++ linux-3.1.1/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c 2011-11-16 18:39:07.000000000 -0500
27415 +@@ -610,7 +610,7 @@ int vmw_execbuf_ioctl(struct drm_device
27416 + struct drm_vmw_fence_rep fence_rep;
27417 + struct drm_vmw_fence_rep __user *user_fence_rep;
27418 + int ret;
27419 +- void *user_cmd;
27420 ++ void __user *user_cmd;
27421 + void *cmd;
27422 + uint32_t sequence;
27423 + struct vmw_sw_context *sw_context = &dev_priv->ctx;
27424 +diff -urNp linux-3.1.1/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c linux-3.1.1/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
27425 +--- linux-3.1.1/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c 2011-11-11 15:19:27.000000000 -0500
27426 ++++ linux-3.1.1/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c 2011-11-16 18:39:07.000000000 -0500
27427 +@@ -151,7 +151,7 @@ int vmw_wait_lag(struct vmw_private *dev
27428 + while (!vmw_lag_lt(queue, us)) {
27429 + spin_lock(&queue->lock);
27430 + if (list_empty(&queue->head))
27431 +- sequence = atomic_read(&dev_priv->fence_seq);
27432 ++ sequence = atomic_read_unchecked(&dev_priv->fence_seq);
27433 + else {
27434 + fence = list_first_entry(&queue->head,
27435 + struct vmw_fence, head);
27436 +diff -urNp linux-3.1.1/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c linux-3.1.1/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
27437 +--- linux-3.1.1/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c 2011-11-11 15:19:27.000000000 -0500
27438 ++++ linux-3.1.1/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c 2011-11-16 18:39:07.000000000 -0500
27439 +@@ -137,7 +137,7 @@ int vmw_fifo_init(struct vmw_private *de
27440 + (unsigned int) min,
27441 + (unsigned int) fifo->capabilities);
27442 +
27443 +- atomic_set(&dev_priv->fence_seq, dev_priv->last_read_sequence);
27444 ++ atomic_set_unchecked(&dev_priv->fence_seq, dev_priv->last_read_sequence);
27445 + iowrite32(dev_priv->last_read_sequence, fifo_mem + SVGA_FIFO_FENCE);
27446 + vmw_fence_queue_init(&fifo->fence_queue);
27447 + return vmw_fifo_send_fence(dev_priv, &dummy);
27448 +@@ -356,7 +356,7 @@ void *vmw_fifo_reserve(struct vmw_privat
27449 + if (reserveable)
27450 + iowrite32(bytes, fifo_mem +
27451 + SVGA_FIFO_RESERVED);
27452 +- return fifo_mem + (next_cmd >> 2);
27453 ++ return (__le32 __force_kernel *)fifo_mem + (next_cmd >> 2);
27454 + } else {
27455 + need_bounce = true;
27456 + }
27457 +@@ -476,7 +476,7 @@ int vmw_fifo_send_fence(struct vmw_priva
27458 +
27459 + fm = vmw_fifo_reserve(dev_priv, bytes);
27460 + if (unlikely(fm == NULL)) {
27461 +- *sequence = atomic_read(&dev_priv->fence_seq);
27462 ++ *sequence = atomic_read_unchecked(&dev_priv->fence_seq);
27463 + ret = -ENOMEM;
27464 + (void)vmw_fallback_wait(dev_priv, false, true, *sequence,
27465 + false, 3*HZ);
27466 +@@ -484,7 +484,7 @@ int vmw_fifo_send_fence(struct vmw_priva
27467 + }
27468 +
27469 + do {
27470 +- *sequence = atomic_add_return(1, &dev_priv->fence_seq);
27471 ++ *sequence = atomic_add_return_unchecked(1, &dev_priv->fence_seq);
27472 + } while (*sequence == 0);
27473 +
27474 + if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE)) {
27475 +diff -urNp linux-3.1.1/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c linux-3.1.1/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
27476 +--- linux-3.1.1/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c 2011-11-11 15:19:27.000000000 -0500
27477 ++++ linux-3.1.1/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c 2011-11-16 18:39:07.000000000 -0500
27478 +@@ -100,7 +100,7 @@ bool vmw_fence_signaled(struct vmw_priva
27479 + * emitted. Then the fence is stale and signaled.
27480 + */
27481 +
27482 +- ret = ((atomic_read(&dev_priv->fence_seq) - sequence)
27483 ++ ret = ((atomic_read_unchecked(&dev_priv->fence_seq) - sequence)
27484 + > VMW_FENCE_WRAP);
27485 +
27486 + return ret;
27487 +@@ -131,7 +131,7 @@ int vmw_fallback_wait(struct vmw_private
27488 +
27489 + if (fifo_idle)
27490 + down_read(&fifo_state->rwsem);
27491 +- signal_seq = atomic_read(&dev_priv->fence_seq);
27492 ++ signal_seq = atomic_read_unchecked(&dev_priv->fence_seq);
27493 + ret = 0;
27494 +
27495 + for (;;) {
27496 +diff -urNp linux-3.1.1/drivers/hid/hid-core.c linux-3.1.1/drivers/hid/hid-core.c
27497 +--- linux-3.1.1/drivers/hid/hid-core.c 2011-11-11 15:19:27.000000000 -0500
27498 ++++ linux-3.1.1/drivers/hid/hid-core.c 2011-11-16 18:39:07.000000000 -0500
27499 +@@ -1951,7 +1951,7 @@ static bool hid_ignore(struct hid_device
27500 +
27501 + int hid_add_device(struct hid_device *hdev)
27502 + {
27503 +- static atomic_t id = ATOMIC_INIT(0);
27504 ++ static atomic_unchecked_t id = ATOMIC_INIT(0);
27505 + int ret;
27506 +
27507 + if (WARN_ON(hdev->status & HID_STAT_ADDED))
27508 +@@ -1966,7 +1966,7 @@ int hid_add_device(struct hid_device *hd
27509 + /* XXX hack, any other cleaner solution after the driver core
27510 + * is converted to allow more than 20 bytes as the device name? */
27511 + dev_set_name(&hdev->dev, "%04X:%04X:%04X.%04X", hdev->bus,
27512 +- hdev->vendor, hdev->product, atomic_inc_return(&id));
27513 ++ hdev->vendor, hdev->product, atomic_inc_return_unchecked(&id));
27514 +
27515 + hid_debug_register(hdev, dev_name(&hdev->dev));
27516 + ret = device_add(&hdev->dev);
27517 +diff -urNp linux-3.1.1/drivers/hid/usbhid/hiddev.c linux-3.1.1/drivers/hid/usbhid/hiddev.c
27518 +--- linux-3.1.1/drivers/hid/usbhid/hiddev.c 2011-11-11 15:19:27.000000000 -0500
27519 ++++ linux-3.1.1/drivers/hid/usbhid/hiddev.c 2011-11-16 18:39:07.000000000 -0500
27520 +@@ -624,7 +624,7 @@ static long hiddev_ioctl(struct file *fi
27521 + break;
27522 +
27523 + case HIDIOCAPPLICATION:
27524 +- if (arg < 0 || arg >= hid->maxapplication)
27525 ++ if (arg >= hid->maxapplication)
27526 + break;
27527 +
27528 + for (i = 0; i < hid->maxcollection; i++)
27529 +diff -urNp linux-3.1.1/drivers/hwmon/acpi_power_meter.c linux-3.1.1/drivers/hwmon/acpi_power_meter.c
27530 +--- linux-3.1.1/drivers/hwmon/acpi_power_meter.c 2011-11-11 15:19:27.000000000 -0500
27531 ++++ linux-3.1.1/drivers/hwmon/acpi_power_meter.c 2011-11-16 18:39:07.000000000 -0500
27532 +@@ -316,8 +316,6 @@ static ssize_t set_trip(struct device *d
27533 + return res;
27534 +
27535 + temp /= 1000;
27536 +- if (temp < 0)
27537 +- return -EINVAL;
27538 +
27539 + mutex_lock(&resource->lock);
27540 + resource->trip[attr->index - 7] = temp;
27541 +diff -urNp linux-3.1.1/drivers/hwmon/sht15.c linux-3.1.1/drivers/hwmon/sht15.c
27542 +--- linux-3.1.1/drivers/hwmon/sht15.c 2011-11-11 15:19:27.000000000 -0500
27543 ++++ linux-3.1.1/drivers/hwmon/sht15.c 2011-11-16 18:39:07.000000000 -0500
27544 +@@ -166,7 +166,7 @@ struct sht15_data {
27545 + int supply_uV;
27546 + bool supply_uV_valid;
27547 + struct work_struct update_supply_work;
27548 +- atomic_t interrupt_handled;
27549 ++ atomic_unchecked_t interrupt_handled;
27550 + };
27551 +
27552 + /**
27553 +@@ -509,13 +509,13 @@ static int sht15_measurement(struct sht1
27554 + return ret;
27555 +
27556 + gpio_direction_input(data->pdata->gpio_data);
27557 +- atomic_set(&data->interrupt_handled, 0);
27558 ++ atomic_set_unchecked(&data->interrupt_handled, 0);
27559 +
27560 + enable_irq(gpio_to_irq(data->pdata->gpio_data));
27561 + if (gpio_get_value(data->pdata->gpio_data) == 0) {
27562 + disable_irq_nosync(gpio_to_irq(data->pdata->gpio_data));
27563 + /* Only relevant if the interrupt hasn't occurred. */
27564 +- if (!atomic_read(&data->interrupt_handled))
27565 ++ if (!atomic_read_unchecked(&data->interrupt_handled))
27566 + schedule_work(&data->read_work);
27567 + }
27568 + ret = wait_event_timeout(data->wait_queue,
27569 +@@ -782,7 +782,7 @@ static irqreturn_t sht15_interrupt_fired
27570 +
27571 + /* First disable the interrupt */
27572 + disable_irq_nosync(irq);
27573 +- atomic_inc(&data->interrupt_handled);
27574 ++ atomic_inc_unchecked(&data->interrupt_handled);
27575 + /* Then schedule a reading work struct */
27576 + if (data->state != SHT15_READING_NOTHING)
27577 + schedule_work(&data->read_work);
27578 +@@ -804,11 +804,11 @@ static void sht15_bh_read_data(struct wo
27579 + * If not, then start the interrupt again - care here as could
27580 + * have gone low in meantime so verify it hasn't!
27581 + */
27582 +- atomic_set(&data->interrupt_handled, 0);
27583 ++ atomic_set_unchecked(&data->interrupt_handled, 0);
27584 + enable_irq(gpio_to_irq(data->pdata->gpio_data));
27585 + /* If still not occurred or another handler has been scheduled */
27586 + if (gpio_get_value(data->pdata->gpio_data)
27587 +- || atomic_read(&data->interrupt_handled))
27588 ++ || atomic_read_unchecked(&data->interrupt_handled))
27589 + return;
27590 + }
27591 +
27592 +diff -urNp linux-3.1.1/drivers/i2c/busses/i2c-amd756-s4882.c linux-3.1.1/drivers/i2c/busses/i2c-amd756-s4882.c
27593 +--- linux-3.1.1/drivers/i2c/busses/i2c-amd756-s4882.c 2011-11-11 15:19:27.000000000 -0500
27594 ++++ linux-3.1.1/drivers/i2c/busses/i2c-amd756-s4882.c 2011-11-16 18:39:07.000000000 -0500
27595 +@@ -43,7 +43,7 @@
27596 + extern struct i2c_adapter amd756_smbus;
27597 +
27598 + static struct i2c_adapter *s4882_adapter;
27599 +-static struct i2c_algorithm *s4882_algo;
27600 ++static i2c_algorithm_no_const *s4882_algo;
27601 +
27602 + /* Wrapper access functions for multiplexed SMBus */
27603 + static DEFINE_MUTEX(amd756_lock);
27604 +diff -urNp linux-3.1.1/drivers/i2c/busses/i2c-nforce2-s4985.c linux-3.1.1/drivers/i2c/busses/i2c-nforce2-s4985.c
27605 +--- linux-3.1.1/drivers/i2c/busses/i2c-nforce2-s4985.c 2011-11-11 15:19:27.000000000 -0500
27606 ++++ linux-3.1.1/drivers/i2c/busses/i2c-nforce2-s4985.c 2011-11-16 18:39:07.000000000 -0500
27607 +@@ -41,7 +41,7 @@
27608 + extern struct i2c_adapter *nforce2_smbus;
27609 +
27610 + static struct i2c_adapter *s4985_adapter;
27611 +-static struct i2c_algorithm *s4985_algo;
27612 ++static i2c_algorithm_no_const *s4985_algo;
27613 +
27614 + /* Wrapper access functions for multiplexed SMBus */
27615 + static DEFINE_MUTEX(nforce2_lock);
27616 +diff -urNp linux-3.1.1/drivers/i2c/i2c-mux.c linux-3.1.1/drivers/i2c/i2c-mux.c
27617 +--- linux-3.1.1/drivers/i2c/i2c-mux.c 2011-11-11 15:19:27.000000000 -0500
27618 ++++ linux-3.1.1/drivers/i2c/i2c-mux.c 2011-11-16 18:39:07.000000000 -0500
27619 +@@ -28,7 +28,7 @@
27620 + /* multiplexer per channel data */
27621 + struct i2c_mux_priv {
27622 + struct i2c_adapter adap;
27623 +- struct i2c_algorithm algo;
27624 ++ i2c_algorithm_no_const algo;
27625 +
27626 + struct i2c_adapter *parent;
27627 + void *mux_dev; /* the mux chip/device */
27628 +diff -urNp linux-3.1.1/drivers/ide/aec62xx.c linux-3.1.1/drivers/ide/aec62xx.c
27629 +--- linux-3.1.1/drivers/ide/aec62xx.c 2011-11-11 15:19:27.000000000 -0500
27630 ++++ linux-3.1.1/drivers/ide/aec62xx.c 2011-11-16 18:39:07.000000000 -0500
27631 +@@ -181,7 +181,7 @@ static const struct ide_port_ops atp86x_
27632 + .cable_detect = atp86x_cable_detect,
27633 + };
27634 +
27635 +-static const struct ide_port_info aec62xx_chipsets[] __devinitdata = {
27636 ++static const struct ide_port_info aec62xx_chipsets[] __devinitconst = {
27637 + { /* 0: AEC6210 */
27638 + .name = DRV_NAME,
27639 + .init_chipset = init_chipset_aec62xx,
27640 +diff -urNp linux-3.1.1/drivers/ide/alim15x3.c linux-3.1.1/drivers/ide/alim15x3.c
27641 +--- linux-3.1.1/drivers/ide/alim15x3.c 2011-11-11 15:19:27.000000000 -0500
27642 ++++ linux-3.1.1/drivers/ide/alim15x3.c 2011-11-16 18:39:07.000000000 -0500
27643 +@@ -512,7 +512,7 @@ static const struct ide_dma_ops ali_dma_
27644 + .dma_sff_read_status = ide_dma_sff_read_status,
27645 + };
27646 +
27647 +-static const struct ide_port_info ali15x3_chipset __devinitdata = {
27648 ++static const struct ide_port_info ali15x3_chipset __devinitconst = {
27649 + .name = DRV_NAME,
27650 + .init_chipset = init_chipset_ali15x3,
27651 + .init_hwif = init_hwif_ali15x3,
27652 +diff -urNp linux-3.1.1/drivers/ide/amd74xx.c linux-3.1.1/drivers/ide/amd74xx.c
27653 +--- linux-3.1.1/drivers/ide/amd74xx.c 2011-11-11 15:19:27.000000000 -0500
27654 ++++ linux-3.1.1/drivers/ide/amd74xx.c 2011-11-16 18:39:07.000000000 -0500
27655 +@@ -223,7 +223,7 @@ static const struct ide_port_ops amd_por
27656 + .udma_mask = udma, \
27657 + }
27658 +
27659 +-static const struct ide_port_info amd74xx_chipsets[] __devinitdata = {
27660 ++static const struct ide_port_info amd74xx_chipsets[] __devinitconst = {
27661 + /* 0: AMD7401 */ DECLARE_AMD_DEV(0x00, ATA_UDMA2),
27662 + /* 1: AMD7409 */ DECLARE_AMD_DEV(ATA_SWDMA2, ATA_UDMA4),
27663 + /* 2: AMD7411/7441 */ DECLARE_AMD_DEV(ATA_SWDMA2, ATA_UDMA5),
27664 +diff -urNp linux-3.1.1/drivers/ide/atiixp.c linux-3.1.1/drivers/ide/atiixp.c
27665 +--- linux-3.1.1/drivers/ide/atiixp.c 2011-11-11 15:19:27.000000000 -0500
27666 ++++ linux-3.1.1/drivers/ide/atiixp.c 2011-11-16 18:39:07.000000000 -0500
27667 +@@ -139,7 +139,7 @@ static const struct ide_port_ops atiixp_
27668 + .cable_detect = atiixp_cable_detect,
27669 + };
27670 +
27671 +-static const struct ide_port_info atiixp_pci_info[] __devinitdata = {
27672 ++static const struct ide_port_info atiixp_pci_info[] __devinitconst = {
27673 + { /* 0: IXP200/300/400/700 */
27674 + .name = DRV_NAME,
27675 + .enablebits = {{0x48,0x01,0x00}, {0x48,0x08,0x00}},
27676 +diff -urNp linux-3.1.1/drivers/ide/cmd64x.c linux-3.1.1/drivers/ide/cmd64x.c
27677 +--- linux-3.1.1/drivers/ide/cmd64x.c 2011-11-11 15:19:27.000000000 -0500
27678 ++++ linux-3.1.1/drivers/ide/cmd64x.c 2011-11-16 18:39:07.000000000 -0500
27679 +@@ -327,7 +327,7 @@ static const struct ide_dma_ops cmd646_r
27680 + .dma_sff_read_status = ide_dma_sff_read_status,
27681 + };
27682 +
27683 +-static const struct ide_port_info cmd64x_chipsets[] __devinitdata = {
27684 ++static const struct ide_port_info cmd64x_chipsets[] __devinitconst = {
27685 + { /* 0: CMD643 */
27686 + .name = DRV_NAME,
27687 + .init_chipset = init_chipset_cmd64x,
27688 +diff -urNp linux-3.1.1/drivers/ide/cs5520.c linux-3.1.1/drivers/ide/cs5520.c
27689 +--- linux-3.1.1/drivers/ide/cs5520.c 2011-11-11 15:19:27.000000000 -0500
27690 ++++ linux-3.1.1/drivers/ide/cs5520.c 2011-11-16 18:39:07.000000000 -0500
27691 +@@ -94,7 +94,7 @@ static const struct ide_port_ops cs5520_
27692 + .set_dma_mode = cs5520_set_dma_mode,
27693 + };
27694 +
27695 +-static const struct ide_port_info cyrix_chipset __devinitdata = {
27696 ++static const struct ide_port_info cyrix_chipset __devinitconst = {
27697 + .name = DRV_NAME,
27698 + .enablebits = { { 0x60, 0x01, 0x01 }, { 0x60, 0x02, 0x02 } },
27699 + .port_ops = &cs5520_port_ops,
27700 +diff -urNp linux-3.1.1/drivers/ide/cs5530.c linux-3.1.1/drivers/ide/cs5530.c
27701 +--- linux-3.1.1/drivers/ide/cs5530.c 2011-11-11 15:19:27.000000000 -0500
27702 ++++ linux-3.1.1/drivers/ide/cs5530.c 2011-11-16 18:39:07.000000000 -0500
27703 +@@ -245,7 +245,7 @@ static const struct ide_port_ops cs5530_
27704 + .udma_filter = cs5530_udma_filter,
27705 + };
27706 +
27707 +-static const struct ide_port_info cs5530_chipset __devinitdata = {
27708 ++static const struct ide_port_info cs5530_chipset __devinitconst = {
27709 + .name = DRV_NAME,
27710 + .init_chipset = init_chipset_cs5530,
27711 + .init_hwif = init_hwif_cs5530,
27712 +diff -urNp linux-3.1.1/drivers/ide/cs5535.c linux-3.1.1/drivers/ide/cs5535.c
27713 +--- linux-3.1.1/drivers/ide/cs5535.c 2011-11-11 15:19:27.000000000 -0500
27714 ++++ linux-3.1.1/drivers/ide/cs5535.c 2011-11-16 18:39:07.000000000 -0500
27715 +@@ -170,7 +170,7 @@ static const struct ide_port_ops cs5535_
27716 + .cable_detect = cs5535_cable_detect,
27717 + };
27718 +
27719 +-static const struct ide_port_info cs5535_chipset __devinitdata = {
27720 ++static const struct ide_port_info cs5535_chipset __devinitconst = {
27721 + .name = DRV_NAME,
27722 + .port_ops = &cs5535_port_ops,
27723 + .host_flags = IDE_HFLAG_SINGLE | IDE_HFLAG_POST_SET_MODE,
27724 +diff -urNp linux-3.1.1/drivers/ide/cy82c693.c linux-3.1.1/drivers/ide/cy82c693.c
27725 +--- linux-3.1.1/drivers/ide/cy82c693.c 2011-11-11 15:19:27.000000000 -0500
27726 ++++ linux-3.1.1/drivers/ide/cy82c693.c 2011-11-16 18:39:07.000000000 -0500
27727 +@@ -163,7 +163,7 @@ static const struct ide_port_ops cy82c69
27728 + .set_dma_mode = cy82c693_set_dma_mode,
27729 + };
27730 +
27731 +-static const struct ide_port_info cy82c693_chipset __devinitdata = {
27732 ++static const struct ide_port_info cy82c693_chipset __devinitconst = {
27733 + .name = DRV_NAME,
27734 + .init_iops = init_iops_cy82c693,
27735 + .port_ops = &cy82c693_port_ops,
27736 +diff -urNp linux-3.1.1/drivers/ide/hpt366.c linux-3.1.1/drivers/ide/hpt366.c
27737 +--- linux-3.1.1/drivers/ide/hpt366.c 2011-11-11 15:19:27.000000000 -0500
27738 ++++ linux-3.1.1/drivers/ide/hpt366.c 2011-11-16 18:39:07.000000000 -0500
27739 +@@ -443,7 +443,7 @@ static struct hpt_timings hpt37x_timings
27740 + }
27741 + };
27742 +
27743 +-static const struct hpt_info hpt36x __devinitdata = {
27744 ++static const struct hpt_info hpt36x __devinitconst = {
27745 + .chip_name = "HPT36x",
27746 + .chip_type = HPT36x,
27747 + .udma_mask = HPT366_ALLOW_ATA66_3 ? (HPT366_ALLOW_ATA66_4 ? ATA_UDMA4 : ATA_UDMA3) : ATA_UDMA2,
27748 +@@ -451,7 +451,7 @@ static const struct hpt_info hpt36x __de
27749 + .timings = &hpt36x_timings
27750 + };
27751 +
27752 +-static const struct hpt_info hpt370 __devinitdata = {
27753 ++static const struct hpt_info hpt370 __devinitconst = {
27754 + .chip_name = "HPT370",
27755 + .chip_type = HPT370,
27756 + .udma_mask = HPT370_ALLOW_ATA100_5 ? ATA_UDMA5 : ATA_UDMA4,
27757 +@@ -459,7 +459,7 @@ static const struct hpt_info hpt370 __de
27758 + .timings = &hpt37x_timings
27759 + };
27760 +
27761 +-static const struct hpt_info hpt370a __devinitdata = {
27762 ++static const struct hpt_info hpt370a __devinitconst = {
27763 + .chip_name = "HPT370A",
27764 + .chip_type = HPT370A,
27765 + .udma_mask = HPT370_ALLOW_ATA100_5 ? ATA_UDMA5 : ATA_UDMA4,
27766 +@@ -467,7 +467,7 @@ static const struct hpt_info hpt370a __d
27767 + .timings = &hpt37x_timings
27768 + };
27769 +
27770 +-static const struct hpt_info hpt374 __devinitdata = {
27771 ++static const struct hpt_info hpt374 __devinitconst = {
27772 + .chip_name = "HPT374",
27773 + .chip_type = HPT374,
27774 + .udma_mask = ATA_UDMA5,
27775 +@@ -475,7 +475,7 @@ static const struct hpt_info hpt374 __de
27776 + .timings = &hpt37x_timings
27777 + };
27778 +
27779 +-static const struct hpt_info hpt372 __devinitdata = {
27780 ++static const struct hpt_info hpt372 __devinitconst = {
27781 + .chip_name = "HPT372",
27782 + .chip_type = HPT372,
27783 + .udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
27784 +@@ -483,7 +483,7 @@ static const struct hpt_info hpt372 __de
27785 + .timings = &hpt37x_timings
27786 + };
27787 +
27788 +-static const struct hpt_info hpt372a __devinitdata = {
27789 ++static const struct hpt_info hpt372a __devinitconst = {
27790 + .chip_name = "HPT372A",
27791 + .chip_type = HPT372A,
27792 + .udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
27793 +@@ -491,7 +491,7 @@ static const struct hpt_info hpt372a __d
27794 + .timings = &hpt37x_timings
27795 + };
27796 +
27797 +-static const struct hpt_info hpt302 __devinitdata = {
27798 ++static const struct hpt_info hpt302 __devinitconst = {
27799 + .chip_name = "HPT302",
27800 + .chip_type = HPT302,
27801 + .udma_mask = HPT302_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
27802 +@@ -499,7 +499,7 @@ static const struct hpt_info hpt302 __de
27803 + .timings = &hpt37x_timings
27804 + };
27805 +
27806 +-static const struct hpt_info hpt371 __devinitdata = {
27807 ++static const struct hpt_info hpt371 __devinitconst = {
27808 + .chip_name = "HPT371",
27809 + .chip_type = HPT371,
27810 + .udma_mask = HPT371_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
27811 +@@ -507,7 +507,7 @@ static const struct hpt_info hpt371 __de
27812 + .timings = &hpt37x_timings
27813 + };
27814 +
27815 +-static const struct hpt_info hpt372n __devinitdata = {
27816 ++static const struct hpt_info hpt372n __devinitconst = {
27817 + .chip_name = "HPT372N",
27818 + .chip_type = HPT372N,
27819 + .udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
27820 +@@ -515,7 +515,7 @@ static const struct hpt_info hpt372n __d
27821 + .timings = &hpt37x_timings
27822 + };
27823 +
27824 +-static const struct hpt_info hpt302n __devinitdata = {
27825 ++static const struct hpt_info hpt302n __devinitconst = {
27826 + .chip_name = "HPT302N",
27827 + .chip_type = HPT302N,
27828 + .udma_mask = HPT302_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
27829 +@@ -523,7 +523,7 @@ static const struct hpt_info hpt302n __d
27830 + .timings = &hpt37x_timings
27831 + };
27832 +
27833 +-static const struct hpt_info hpt371n __devinitdata = {
27834 ++static const struct hpt_info hpt371n __devinitconst = {
27835 + .chip_name = "HPT371N",
27836 + .chip_type = HPT371N,
27837 + .udma_mask = HPT371_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
27838 +@@ -1361,7 +1361,7 @@ static const struct ide_dma_ops hpt36x_d
27839 + .dma_sff_read_status = ide_dma_sff_read_status,
27840 + };
27841 +
27842 +-static const struct ide_port_info hpt366_chipsets[] __devinitdata = {
27843 ++static const struct ide_port_info hpt366_chipsets[] __devinitconst = {
27844 + { /* 0: HPT36x */
27845 + .name = DRV_NAME,
27846 + .init_chipset = init_chipset_hpt366,
27847 +diff -urNp linux-3.1.1/drivers/ide/ide-cd.c linux-3.1.1/drivers/ide/ide-cd.c
27848 +--- linux-3.1.1/drivers/ide/ide-cd.c 2011-11-11 15:19:27.000000000 -0500
27849 ++++ linux-3.1.1/drivers/ide/ide-cd.c 2011-11-16 18:39:07.000000000 -0500
27850 +@@ -769,7 +769,7 @@ static void cdrom_do_block_pc(ide_drive_
27851 + alignment = queue_dma_alignment(q) | q->dma_pad_mask;
27852 + if ((unsigned long)buf & alignment
27853 + || blk_rq_bytes(rq) & q->dma_pad_mask
27854 +- || object_is_on_stack(buf))
27855 ++ || object_starts_on_stack(buf))
27856 + drive->dma = 0;
27857 + }
27858 + }
27859 +diff -urNp linux-3.1.1/drivers/ide/ide-floppy.c linux-3.1.1/drivers/ide/ide-floppy.c
27860 +--- linux-3.1.1/drivers/ide/ide-floppy.c 2011-11-11 15:19:27.000000000 -0500
27861 ++++ linux-3.1.1/drivers/ide/ide-floppy.c 2011-11-16 18:40:10.000000000 -0500
27862 +@@ -379,6 +379,8 @@ static int ide_floppy_get_capacity(ide_d
27863 + u8 pc_buf[256], header_len, desc_cnt;
27864 + int i, rc = 1, blocks, length;
27865 +
27866 ++ pax_track_stack();
27867 ++
27868 + ide_debug_log(IDE_DBG_FUNC, "enter");
27869 +
27870 + drive->bios_cyl = 0;
27871 +diff -urNp linux-3.1.1/drivers/ide/ide-pci-generic.c linux-3.1.1/drivers/ide/ide-pci-generic.c
27872 +--- linux-3.1.1/drivers/ide/ide-pci-generic.c 2011-11-11 15:19:27.000000000 -0500
27873 ++++ linux-3.1.1/drivers/ide/ide-pci-generic.c 2011-11-16 18:39:07.000000000 -0500
27874 +@@ -53,7 +53,7 @@ static const struct ide_port_ops netcell
27875 + .udma_mask = ATA_UDMA6, \
27876 + }
27877 +
27878 +-static const struct ide_port_info generic_chipsets[] __devinitdata = {
27879 ++static const struct ide_port_info generic_chipsets[] __devinitconst = {
27880 + /* 0: Unknown */
27881 + DECLARE_GENERIC_PCI_DEV(0),
27882 +
27883 +diff -urNp linux-3.1.1/drivers/ide/it8172.c linux-3.1.1/drivers/ide/it8172.c
27884 +--- linux-3.1.1/drivers/ide/it8172.c 2011-11-11 15:19:27.000000000 -0500
27885 ++++ linux-3.1.1/drivers/ide/it8172.c 2011-11-16 18:39:07.000000000 -0500
27886 +@@ -115,7 +115,7 @@ static const struct ide_port_ops it8172_
27887 + .set_dma_mode = it8172_set_dma_mode,
27888 + };
27889 +
27890 +-static const struct ide_port_info it8172_port_info __devinitdata = {
27891 ++static const struct ide_port_info it8172_port_info __devinitconst = {
27892 + .name = DRV_NAME,
27893 + .port_ops = &it8172_port_ops,
27894 + .enablebits = { {0x41, 0x80, 0x80}, {0x00, 0x00, 0x00} },
27895 +diff -urNp linux-3.1.1/drivers/ide/it8213.c linux-3.1.1/drivers/ide/it8213.c
27896 +--- linux-3.1.1/drivers/ide/it8213.c 2011-11-11 15:19:27.000000000 -0500
27897 ++++ linux-3.1.1/drivers/ide/it8213.c 2011-11-16 18:39:07.000000000 -0500
27898 +@@ -156,7 +156,7 @@ static const struct ide_port_ops it8213_
27899 + .cable_detect = it8213_cable_detect,
27900 + };
27901 +
27902 +-static const struct ide_port_info it8213_chipset __devinitdata = {
27903 ++static const struct ide_port_info it8213_chipset __devinitconst = {
27904 + .name = DRV_NAME,
27905 + .enablebits = { {0x41, 0x80, 0x80} },
27906 + .port_ops = &it8213_port_ops,
27907 +diff -urNp linux-3.1.1/drivers/ide/it821x.c linux-3.1.1/drivers/ide/it821x.c
27908 +--- linux-3.1.1/drivers/ide/it821x.c 2011-11-11 15:19:27.000000000 -0500
27909 ++++ linux-3.1.1/drivers/ide/it821x.c 2011-11-16 18:39:07.000000000 -0500
27910 +@@ -630,7 +630,7 @@ static const struct ide_port_ops it821x_
27911 + .cable_detect = it821x_cable_detect,
27912 + };
27913 +
27914 +-static const struct ide_port_info it821x_chipset __devinitdata = {
27915 ++static const struct ide_port_info it821x_chipset __devinitconst = {
27916 + .name = DRV_NAME,
27917 + .init_chipset = init_chipset_it821x,
27918 + .init_hwif = init_hwif_it821x,
27919 +diff -urNp linux-3.1.1/drivers/ide/jmicron.c linux-3.1.1/drivers/ide/jmicron.c
27920 +--- linux-3.1.1/drivers/ide/jmicron.c 2011-11-11 15:19:27.000000000 -0500
27921 ++++ linux-3.1.1/drivers/ide/jmicron.c 2011-11-16 18:39:07.000000000 -0500
27922 +@@ -102,7 +102,7 @@ static const struct ide_port_ops jmicron
27923 + .cable_detect = jmicron_cable_detect,
27924 + };
27925 +
27926 +-static const struct ide_port_info jmicron_chipset __devinitdata = {
27927 ++static const struct ide_port_info jmicron_chipset __devinitconst = {
27928 + .name = DRV_NAME,
27929 + .enablebits = { { 0x40, 0x01, 0x01 }, { 0x40, 0x10, 0x10 } },
27930 + .port_ops = &jmicron_port_ops,
27931 +diff -urNp linux-3.1.1/drivers/ide/ns87415.c linux-3.1.1/drivers/ide/ns87415.c
27932 +--- linux-3.1.1/drivers/ide/ns87415.c 2011-11-11 15:19:27.000000000 -0500
27933 ++++ linux-3.1.1/drivers/ide/ns87415.c 2011-11-16 18:39:07.000000000 -0500
27934 +@@ -293,7 +293,7 @@ static const struct ide_dma_ops ns87415_
27935 + .dma_sff_read_status = superio_dma_sff_read_status,
27936 + };
27937 +
27938 +-static const struct ide_port_info ns87415_chipset __devinitdata = {
27939 ++static const struct ide_port_info ns87415_chipset __devinitconst = {
27940 + .name = DRV_NAME,
27941 + .init_hwif = init_hwif_ns87415,
27942 + .tp_ops = &ns87415_tp_ops,
27943 +diff -urNp linux-3.1.1/drivers/ide/opti621.c linux-3.1.1/drivers/ide/opti621.c
27944 +--- linux-3.1.1/drivers/ide/opti621.c 2011-11-11 15:19:27.000000000 -0500
27945 ++++ linux-3.1.1/drivers/ide/opti621.c 2011-11-16 18:39:07.000000000 -0500
27946 +@@ -131,7 +131,7 @@ static const struct ide_port_ops opti621
27947 + .set_pio_mode = opti621_set_pio_mode,
27948 + };
27949 +
27950 +-static const struct ide_port_info opti621_chipset __devinitdata = {
27951 ++static const struct ide_port_info opti621_chipset __devinitconst = {
27952 + .name = DRV_NAME,
27953 + .enablebits = { {0x45, 0x80, 0x00}, {0x40, 0x08, 0x00} },
27954 + .port_ops = &opti621_port_ops,
27955 +diff -urNp linux-3.1.1/drivers/ide/pdc202xx_new.c linux-3.1.1/drivers/ide/pdc202xx_new.c
27956 +--- linux-3.1.1/drivers/ide/pdc202xx_new.c 2011-11-11 15:19:27.000000000 -0500
27957 ++++ linux-3.1.1/drivers/ide/pdc202xx_new.c 2011-11-16 18:39:07.000000000 -0500
27958 +@@ -465,7 +465,7 @@ static const struct ide_port_ops pdcnew_
27959 + .udma_mask = udma, \
27960 + }
27961 +
27962 +-static const struct ide_port_info pdcnew_chipsets[] __devinitdata = {
27963 ++static const struct ide_port_info pdcnew_chipsets[] __devinitconst = {
27964 + /* 0: PDC202{68,70} */ DECLARE_PDCNEW_DEV(ATA_UDMA5),
27965 + /* 1: PDC202{69,71,75,76,77} */ DECLARE_PDCNEW_DEV(ATA_UDMA6),
27966 + };
27967 +diff -urNp linux-3.1.1/drivers/ide/pdc202xx_old.c linux-3.1.1/drivers/ide/pdc202xx_old.c
27968 +--- linux-3.1.1/drivers/ide/pdc202xx_old.c 2011-11-11 15:19:27.000000000 -0500
27969 ++++ linux-3.1.1/drivers/ide/pdc202xx_old.c 2011-11-16 18:39:07.000000000 -0500
27970 +@@ -270,7 +270,7 @@ static const struct ide_dma_ops pdc2026x
27971 + .max_sectors = sectors, \
27972 + }
27973 +
27974 +-static const struct ide_port_info pdc202xx_chipsets[] __devinitdata = {
27975 ++static const struct ide_port_info pdc202xx_chipsets[] __devinitconst = {
27976 + { /* 0: PDC20246 */
27977 + .name = DRV_NAME,
27978 + .init_chipset = init_chipset_pdc202xx,
27979 +diff -urNp linux-3.1.1/drivers/ide/piix.c linux-3.1.1/drivers/ide/piix.c
27980 +--- linux-3.1.1/drivers/ide/piix.c 2011-11-11 15:19:27.000000000 -0500
27981 ++++ linux-3.1.1/drivers/ide/piix.c 2011-11-16 18:39:07.000000000 -0500
27982 +@@ -344,7 +344,7 @@ static const struct ide_port_ops ich_por
27983 + .udma_mask = udma, \
27984 + }
27985 +
27986 +-static const struct ide_port_info piix_pci_info[] __devinitdata = {
27987 ++static const struct ide_port_info piix_pci_info[] __devinitconst = {
27988 + /* 0: MPIIX */
27989 + { /*
27990 + * MPIIX actually has only a single IDE channel mapped to
27991 +diff -urNp linux-3.1.1/drivers/ide/rz1000.c linux-3.1.1/drivers/ide/rz1000.c
27992 +--- linux-3.1.1/drivers/ide/rz1000.c 2011-11-11 15:19:27.000000000 -0500
27993 ++++ linux-3.1.1/drivers/ide/rz1000.c 2011-11-16 18:39:07.000000000 -0500
27994 +@@ -38,7 +38,7 @@ static int __devinit rz1000_disable_read
27995 + }
27996 + }
27997 +
27998 +-static const struct ide_port_info rz1000_chipset __devinitdata = {
27999 ++static const struct ide_port_info rz1000_chipset __devinitconst = {
28000 + .name = DRV_NAME,
28001 + .host_flags = IDE_HFLAG_NO_DMA,
28002 + };
28003 +diff -urNp linux-3.1.1/drivers/ide/sc1200.c linux-3.1.1/drivers/ide/sc1200.c
28004 +--- linux-3.1.1/drivers/ide/sc1200.c 2011-11-11 15:19:27.000000000 -0500
28005 ++++ linux-3.1.1/drivers/ide/sc1200.c 2011-11-16 18:39:07.000000000 -0500
28006 +@@ -291,7 +291,7 @@ static const struct ide_dma_ops sc1200_d
28007 + .dma_sff_read_status = ide_dma_sff_read_status,
28008 + };
28009 +
28010 +-static const struct ide_port_info sc1200_chipset __devinitdata = {
28011 ++static const struct ide_port_info sc1200_chipset __devinitconst = {
28012 + .name = DRV_NAME,
28013 + .port_ops = &sc1200_port_ops,
28014 + .dma_ops = &sc1200_dma_ops,
28015 +diff -urNp linux-3.1.1/drivers/ide/scc_pata.c linux-3.1.1/drivers/ide/scc_pata.c
28016 +--- linux-3.1.1/drivers/ide/scc_pata.c 2011-11-11 15:19:27.000000000 -0500
28017 ++++ linux-3.1.1/drivers/ide/scc_pata.c 2011-11-16 18:39:07.000000000 -0500
28018 +@@ -811,7 +811,7 @@ static const struct ide_dma_ops scc_dma_
28019 + .dma_sff_read_status = scc_dma_sff_read_status,
28020 + };
28021 +
28022 +-static const struct ide_port_info scc_chipset __devinitdata = {
28023 ++static const struct ide_port_info scc_chipset __devinitconst = {
28024 + .name = "sccIDE",
28025 + .init_iops = init_iops_scc,
28026 + .init_dma = scc_init_dma,
28027 +diff -urNp linux-3.1.1/drivers/ide/serverworks.c linux-3.1.1/drivers/ide/serverworks.c
28028 +--- linux-3.1.1/drivers/ide/serverworks.c 2011-11-11 15:19:27.000000000 -0500
28029 ++++ linux-3.1.1/drivers/ide/serverworks.c 2011-11-16 18:39:07.000000000 -0500
28030 +@@ -337,7 +337,7 @@ static const struct ide_port_ops svwks_p
28031 + .cable_detect = svwks_cable_detect,
28032 + };
28033 +
28034 +-static const struct ide_port_info serverworks_chipsets[] __devinitdata = {
28035 ++static const struct ide_port_info serverworks_chipsets[] __devinitconst = {
28036 + { /* 0: OSB4 */
28037 + .name = DRV_NAME,
28038 + .init_chipset = init_chipset_svwks,
28039 +diff -urNp linux-3.1.1/drivers/ide/setup-pci.c linux-3.1.1/drivers/ide/setup-pci.c
28040 +--- linux-3.1.1/drivers/ide/setup-pci.c 2011-11-11 15:19:27.000000000 -0500
28041 ++++ linux-3.1.1/drivers/ide/setup-pci.c 2011-11-16 18:40:10.000000000 -0500
28042 +@@ -542,6 +542,8 @@ int ide_pci_init_two(struct pci_dev *dev
28043 + int ret, i, n_ports = dev2 ? 4 : 2;
28044 + struct ide_hw hw[4], *hws[] = { NULL, NULL, NULL, NULL };
28045 +
28046 ++ pax_track_stack();
28047 ++
28048 + for (i = 0; i < n_ports / 2; i++) {
28049 + ret = ide_setup_pci_controller(pdev[i], d, !i);
28050 + if (ret < 0)
28051 +diff -urNp linux-3.1.1/drivers/ide/siimage.c linux-3.1.1/drivers/ide/siimage.c
28052 +--- linux-3.1.1/drivers/ide/siimage.c 2011-11-11 15:19:27.000000000 -0500
28053 ++++ linux-3.1.1/drivers/ide/siimage.c 2011-11-16 18:39:07.000000000 -0500
28054 +@@ -719,7 +719,7 @@ static const struct ide_dma_ops sil_dma_
28055 + .udma_mask = ATA_UDMA6, \
28056 + }
28057 +
28058 +-static const struct ide_port_info siimage_chipsets[] __devinitdata = {
28059 ++static const struct ide_port_info siimage_chipsets[] __devinitconst = {
28060 + /* 0: SiI680 */ DECLARE_SII_DEV(&sil_pata_port_ops),
28061 + /* 1: SiI3112 */ DECLARE_SII_DEV(&sil_sata_port_ops)
28062 + };
28063 +diff -urNp linux-3.1.1/drivers/ide/sis5513.c linux-3.1.1/drivers/ide/sis5513.c
28064 +--- linux-3.1.1/drivers/ide/sis5513.c 2011-11-11 15:19:27.000000000 -0500
28065 ++++ linux-3.1.1/drivers/ide/sis5513.c 2011-11-16 18:39:07.000000000 -0500
28066 +@@ -563,7 +563,7 @@ static const struct ide_port_ops sis_ata
28067 + .cable_detect = sis_cable_detect,
28068 + };
28069 +
28070 +-static const struct ide_port_info sis5513_chipset __devinitdata = {
28071 ++static const struct ide_port_info sis5513_chipset __devinitconst = {
28072 + .name = DRV_NAME,
28073 + .init_chipset = init_chipset_sis5513,
28074 + .enablebits = { {0x4a, 0x02, 0x02}, {0x4a, 0x04, 0x04} },
28075 +diff -urNp linux-3.1.1/drivers/ide/sl82c105.c linux-3.1.1/drivers/ide/sl82c105.c
28076 +--- linux-3.1.1/drivers/ide/sl82c105.c 2011-11-11 15:19:27.000000000 -0500
28077 ++++ linux-3.1.1/drivers/ide/sl82c105.c 2011-11-16 18:39:07.000000000 -0500
28078 +@@ -299,7 +299,7 @@ static const struct ide_dma_ops sl82c105
28079 + .dma_sff_read_status = ide_dma_sff_read_status,
28080 + };
28081 +
28082 +-static const struct ide_port_info sl82c105_chipset __devinitdata = {
28083 ++static const struct ide_port_info sl82c105_chipset __devinitconst = {
28084 + .name = DRV_NAME,
28085 + .init_chipset = init_chipset_sl82c105,
28086 + .enablebits = {{0x40,0x01,0x01}, {0x40,0x10,0x10}},
28087 +diff -urNp linux-3.1.1/drivers/ide/slc90e66.c linux-3.1.1/drivers/ide/slc90e66.c
28088 +--- linux-3.1.1/drivers/ide/slc90e66.c 2011-11-11 15:19:27.000000000 -0500
28089 ++++ linux-3.1.1/drivers/ide/slc90e66.c 2011-11-16 18:39:07.000000000 -0500
28090 +@@ -132,7 +132,7 @@ static const struct ide_port_ops slc90e6
28091 + .cable_detect = slc90e66_cable_detect,
28092 + };
28093 +
28094 +-static const struct ide_port_info slc90e66_chipset __devinitdata = {
28095 ++static const struct ide_port_info slc90e66_chipset __devinitconst = {
28096 + .name = DRV_NAME,
28097 + .enablebits = { {0x41, 0x80, 0x80}, {0x43, 0x80, 0x80} },
28098 + .port_ops = &slc90e66_port_ops,
28099 +diff -urNp linux-3.1.1/drivers/ide/tc86c001.c linux-3.1.1/drivers/ide/tc86c001.c
28100 +--- linux-3.1.1/drivers/ide/tc86c001.c 2011-11-11 15:19:27.000000000 -0500
28101 ++++ linux-3.1.1/drivers/ide/tc86c001.c 2011-11-16 18:39:07.000000000 -0500
28102 +@@ -191,7 +191,7 @@ static const struct ide_dma_ops tc86c001
28103 + .dma_sff_read_status = ide_dma_sff_read_status,
28104 + };
28105 +
28106 +-static const struct ide_port_info tc86c001_chipset __devinitdata = {
28107 ++static const struct ide_port_info tc86c001_chipset __devinitconst = {
28108 + .name = DRV_NAME,
28109 + .init_hwif = init_hwif_tc86c001,
28110 + .port_ops = &tc86c001_port_ops,
28111 +diff -urNp linux-3.1.1/drivers/ide/triflex.c linux-3.1.1/drivers/ide/triflex.c
28112 +--- linux-3.1.1/drivers/ide/triflex.c 2011-11-11 15:19:27.000000000 -0500
28113 ++++ linux-3.1.1/drivers/ide/triflex.c 2011-11-16 18:39:07.000000000 -0500
28114 +@@ -92,7 +92,7 @@ static const struct ide_port_ops triflex
28115 + .set_dma_mode = triflex_set_mode,
28116 + };
28117 +
28118 +-static const struct ide_port_info triflex_device __devinitdata = {
28119 ++static const struct ide_port_info triflex_device __devinitconst = {
28120 + .name = DRV_NAME,
28121 + .enablebits = {{0x80, 0x01, 0x01}, {0x80, 0x02, 0x02}},
28122 + .port_ops = &triflex_port_ops,
28123 +diff -urNp linux-3.1.1/drivers/ide/trm290.c linux-3.1.1/drivers/ide/trm290.c
28124 +--- linux-3.1.1/drivers/ide/trm290.c 2011-11-11 15:19:27.000000000 -0500
28125 ++++ linux-3.1.1/drivers/ide/trm290.c 2011-11-16 18:39:07.000000000 -0500
28126 +@@ -324,7 +324,7 @@ static struct ide_dma_ops trm290_dma_ops
28127 + .dma_check = trm290_dma_check,
28128 + };
28129 +
28130 +-static const struct ide_port_info trm290_chipset __devinitdata = {
28131 ++static const struct ide_port_info trm290_chipset __devinitconst = {
28132 + .name = DRV_NAME,
28133 + .init_hwif = init_hwif_trm290,
28134 + .tp_ops = &trm290_tp_ops,
28135 +diff -urNp linux-3.1.1/drivers/ide/via82cxxx.c linux-3.1.1/drivers/ide/via82cxxx.c
28136 +--- linux-3.1.1/drivers/ide/via82cxxx.c 2011-11-11 15:19:27.000000000 -0500
28137 ++++ linux-3.1.1/drivers/ide/via82cxxx.c 2011-11-16 18:39:07.000000000 -0500
28138 +@@ -403,7 +403,7 @@ static const struct ide_port_ops via_por
28139 + .cable_detect = via82cxxx_cable_detect,
28140 + };
28141 +
28142 +-static const struct ide_port_info via82cxxx_chipset __devinitdata = {
28143 ++static const struct ide_port_info via82cxxx_chipset __devinitconst = {
28144 + .name = DRV_NAME,
28145 + .init_chipset = init_chipset_via82cxxx,
28146 + .enablebits = { { 0x40, 0x02, 0x02 }, { 0x40, 0x01, 0x01 } },
28147 +diff -urNp linux-3.1.1/drivers/infiniband/core/cm.c linux-3.1.1/drivers/infiniband/core/cm.c
28148 +--- linux-3.1.1/drivers/infiniband/core/cm.c 2011-11-11 15:19:27.000000000 -0500
28149 ++++ linux-3.1.1/drivers/infiniband/core/cm.c 2011-11-16 18:39:07.000000000 -0500
28150 +@@ -113,7 +113,7 @@ static char const counter_group_names[CM
28151 +
28152 + struct cm_counter_group {
28153 + struct kobject obj;
28154 +- atomic_long_t counter[CM_ATTR_COUNT];
28155 ++ atomic_long_unchecked_t counter[CM_ATTR_COUNT];
28156 + };
28157 +
28158 + struct cm_counter_attribute {
28159 +@@ -1387,7 +1387,7 @@ static void cm_dup_req_handler(struct cm
28160 + struct ib_mad_send_buf *msg = NULL;
28161 + int ret;
28162 +
28163 +- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
28164 ++ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
28165 + counter[CM_REQ_COUNTER]);
28166 +
28167 + /* Quick state check to discard duplicate REQs. */
28168 +@@ -1765,7 +1765,7 @@ static void cm_dup_rep_handler(struct cm
28169 + if (!cm_id_priv)
28170 + return;
28171 +
28172 +- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
28173 ++ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
28174 + counter[CM_REP_COUNTER]);
28175 + ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
28176 + if (ret)
28177 +@@ -1932,7 +1932,7 @@ static int cm_rtu_handler(struct cm_work
28178 + if (cm_id_priv->id.state != IB_CM_REP_SENT &&
28179 + cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) {
28180 + spin_unlock_irq(&cm_id_priv->lock);
28181 +- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
28182 ++ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
28183 + counter[CM_RTU_COUNTER]);
28184 + goto out;
28185 + }
28186 +@@ -2115,7 +2115,7 @@ static int cm_dreq_handler(struct cm_wor
28187 + cm_id_priv = cm_acquire_id(dreq_msg->remote_comm_id,
28188 + dreq_msg->local_comm_id);
28189 + if (!cm_id_priv) {
28190 +- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
28191 ++ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
28192 + counter[CM_DREQ_COUNTER]);
28193 + cm_issue_drep(work->port, work->mad_recv_wc);
28194 + return -EINVAL;
28195 +@@ -2140,7 +2140,7 @@ static int cm_dreq_handler(struct cm_wor
28196 + case IB_CM_MRA_REP_RCVD:
28197 + break;
28198 + case IB_CM_TIMEWAIT:
28199 +- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
28200 ++ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
28201 + counter[CM_DREQ_COUNTER]);
28202 + if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
28203 + goto unlock;
28204 +@@ -2154,7 +2154,7 @@ static int cm_dreq_handler(struct cm_wor
28205 + cm_free_msg(msg);
28206 + goto deref;
28207 + case IB_CM_DREQ_RCVD:
28208 +- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
28209 ++ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
28210 + counter[CM_DREQ_COUNTER]);
28211 + goto unlock;
28212 + default:
28213 +@@ -2521,7 +2521,7 @@ static int cm_mra_handler(struct cm_work
28214 + ib_modify_mad(cm_id_priv->av.port->mad_agent,
28215 + cm_id_priv->msg, timeout)) {
28216 + if (cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
28217 +- atomic_long_inc(&work->port->
28218 ++ atomic_long_inc_unchecked(&work->port->
28219 + counter_group[CM_RECV_DUPLICATES].
28220 + counter[CM_MRA_COUNTER]);
28221 + goto out;
28222 +@@ -2530,7 +2530,7 @@ static int cm_mra_handler(struct cm_work
28223 + break;
28224 + case IB_CM_MRA_REQ_RCVD:
28225 + case IB_CM_MRA_REP_RCVD:
28226 +- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
28227 ++ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
28228 + counter[CM_MRA_COUNTER]);
28229 + /* fall through */
28230 + default:
28231 +@@ -2692,7 +2692,7 @@ static int cm_lap_handler(struct cm_work
28232 + case IB_CM_LAP_IDLE:
28233 + break;
28234 + case IB_CM_MRA_LAP_SENT:
28235 +- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
28236 ++ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
28237 + counter[CM_LAP_COUNTER]);
28238 + if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
28239 + goto unlock;
28240 +@@ -2708,7 +2708,7 @@ static int cm_lap_handler(struct cm_work
28241 + cm_free_msg(msg);
28242 + goto deref;
28243 + case IB_CM_LAP_RCVD:
28244 +- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
28245 ++ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
28246 + counter[CM_LAP_COUNTER]);
28247 + goto unlock;
28248 + default:
28249 +@@ -2992,7 +2992,7 @@ static int cm_sidr_req_handler(struct cm
28250 + cur_cm_id_priv = cm_insert_remote_sidr(cm_id_priv);
28251 + if (cur_cm_id_priv) {
28252 + spin_unlock_irq(&cm.lock);
28253 +- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
28254 ++ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
28255 + counter[CM_SIDR_REQ_COUNTER]);
28256 + goto out; /* Duplicate message. */
28257 + }
28258 +@@ -3204,10 +3204,10 @@ static void cm_send_handler(struct ib_ma
28259 + if (!msg->context[0] && (attr_index != CM_REJ_COUNTER))
28260 + msg->retries = 1;
28261 +
28262 +- atomic_long_add(1 + msg->retries,
28263 ++ atomic_long_add_unchecked(1 + msg->retries,
28264 + &port->counter_group[CM_XMIT].counter[attr_index]);
28265 + if (msg->retries)
28266 +- atomic_long_add(msg->retries,
28267 ++ atomic_long_add_unchecked(msg->retries,
28268 + &port->counter_group[CM_XMIT_RETRIES].
28269 + counter[attr_index]);
28270 +
28271 +@@ -3417,7 +3417,7 @@ static void cm_recv_handler(struct ib_ma
28272 + }
28273 +
28274 + attr_id = be16_to_cpu(mad_recv_wc->recv_buf.mad->mad_hdr.attr_id);
28275 +- atomic_long_inc(&port->counter_group[CM_RECV].
28276 ++ atomic_long_inc_unchecked(&port->counter_group[CM_RECV].
28277 + counter[attr_id - CM_ATTR_ID_OFFSET]);
28278 +
28279 + work = kmalloc(sizeof *work + sizeof(struct ib_sa_path_rec) * paths,
28280 +@@ -3615,7 +3615,7 @@ static ssize_t cm_show_counter(struct ko
28281 + cm_attr = container_of(attr, struct cm_counter_attribute, attr);
28282 +
28283 + return sprintf(buf, "%ld\n",
28284 +- atomic_long_read(&group->counter[cm_attr->index]));
28285 ++ atomic_long_read_unchecked(&group->counter[cm_attr->index]));
28286 + }
28287 +
28288 + static const struct sysfs_ops cm_counter_ops = {
28289 +diff -urNp linux-3.1.1/drivers/infiniband/core/fmr_pool.c linux-3.1.1/drivers/infiniband/core/fmr_pool.c
28290 +--- linux-3.1.1/drivers/infiniband/core/fmr_pool.c 2011-11-11 15:19:27.000000000 -0500
28291 ++++ linux-3.1.1/drivers/infiniband/core/fmr_pool.c 2011-11-16 18:39:07.000000000 -0500
28292 +@@ -97,8 +97,8 @@ struct ib_fmr_pool {
28293 +
28294 + struct task_struct *thread;
28295 +
28296 +- atomic_t req_ser;
28297 +- atomic_t flush_ser;
28298 ++ atomic_unchecked_t req_ser;
28299 ++ atomic_unchecked_t flush_ser;
28300 +
28301 + wait_queue_head_t force_wait;
28302 + };
28303 +@@ -179,10 +179,10 @@ static int ib_fmr_cleanup_thread(void *p
28304 + struct ib_fmr_pool *pool = pool_ptr;
28305 +
28306 + do {
28307 +- if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) < 0) {
28308 ++ if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) < 0) {
28309 + ib_fmr_batch_release(pool);
28310 +
28311 +- atomic_inc(&pool->flush_ser);
28312 ++ atomic_inc_unchecked(&pool->flush_ser);
28313 + wake_up_interruptible(&pool->force_wait);
28314 +
28315 + if (pool->flush_function)
28316 +@@ -190,7 +190,7 @@ static int ib_fmr_cleanup_thread(void *p
28317 + }
28318 +
28319 + set_current_state(TASK_INTERRUPTIBLE);
28320 +- if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) >= 0 &&
28321 ++ if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) >= 0 &&
28322 + !kthread_should_stop())
28323 + schedule();
28324 + __set_current_state(TASK_RUNNING);
28325 +@@ -282,8 +282,8 @@ struct ib_fmr_pool *ib_create_fmr_pool(s
28326 + pool->dirty_watermark = params->dirty_watermark;
28327 + pool->dirty_len = 0;
28328 + spin_lock_init(&pool->pool_lock);
28329 +- atomic_set(&pool->req_ser, 0);
28330 +- atomic_set(&pool->flush_ser, 0);
28331 ++ atomic_set_unchecked(&pool->req_ser, 0);
28332 ++ atomic_set_unchecked(&pool->flush_ser, 0);
28333 + init_waitqueue_head(&pool->force_wait);
28334 +
28335 + pool->thread = kthread_run(ib_fmr_cleanup_thread,
28336 +@@ -411,11 +411,11 @@ int ib_flush_fmr_pool(struct ib_fmr_pool
28337 + }
28338 + spin_unlock_irq(&pool->pool_lock);
28339 +
28340 +- serial = atomic_inc_return(&pool->req_ser);
28341 ++ serial = atomic_inc_return_unchecked(&pool->req_ser);
28342 + wake_up_process(pool->thread);
28343 +
28344 + if (wait_event_interruptible(pool->force_wait,
28345 +- atomic_read(&pool->flush_ser) - serial >= 0))
28346 ++ atomic_read_unchecked(&pool->flush_ser) - serial >= 0))
28347 + return -EINTR;
28348 +
28349 + return 0;
28350 +@@ -525,7 +525,7 @@ int ib_fmr_pool_unmap(struct ib_pool_fmr
28351 + } else {
28352 + list_add_tail(&fmr->list, &pool->dirty_list);
28353 + if (++pool->dirty_len >= pool->dirty_watermark) {
28354 +- atomic_inc(&pool->req_ser);
28355 ++ atomic_inc_unchecked(&pool->req_ser);
28356 + wake_up_process(pool->thread);
28357 + }
28358 + }
28359 +diff -urNp linux-3.1.1/drivers/infiniband/hw/cxgb4/mem.c linux-3.1.1/drivers/infiniband/hw/cxgb4/mem.c
28360 +--- linux-3.1.1/drivers/infiniband/hw/cxgb4/mem.c 2011-11-11 15:19:27.000000000 -0500
28361 ++++ linux-3.1.1/drivers/infiniband/hw/cxgb4/mem.c 2011-11-16 18:39:07.000000000 -0500
28362 +@@ -122,7 +122,7 @@ static int write_tpt_entry(struct c4iw_r
28363 + int err;
28364 + struct fw_ri_tpte tpt;
28365 + u32 stag_idx;
28366 +- static atomic_t key;
28367 ++ static atomic_unchecked_t key;
28368 +
28369 + if (c4iw_fatal_error(rdev))
28370 + return -EIO;
28371 +@@ -135,7 +135,7 @@ static int write_tpt_entry(struct c4iw_r
28372 + &rdev->resource.tpt_fifo_lock);
28373 + if (!stag_idx)
28374 + return -ENOMEM;
28375 +- *stag = (stag_idx << 8) | (atomic_inc_return(&key) & 0xff);
28376 ++ *stag = (stag_idx << 8) | (atomic_inc_return_unchecked(&key) & 0xff);
28377 + }
28378 + PDBG("%s stag_state 0x%0x type 0x%0x pdid 0x%0x, stag_idx 0x%x\n",
28379 + __func__, stag_state, type, pdid, stag_idx);
28380 +diff -urNp linux-3.1.1/drivers/infiniband/hw/ipath/ipath_fs.c linux-3.1.1/drivers/infiniband/hw/ipath/ipath_fs.c
28381 +--- linux-3.1.1/drivers/infiniband/hw/ipath/ipath_fs.c 2011-11-11 15:19:27.000000000 -0500
28382 ++++ linux-3.1.1/drivers/infiniband/hw/ipath/ipath_fs.c 2011-11-16 18:40:10.000000000 -0500
28383 +@@ -113,6 +113,8 @@ static ssize_t atomic_counters_read(stru
28384 + struct infinipath_counters counters;
28385 + struct ipath_devdata *dd;
28386 +
28387 ++ pax_track_stack();
28388 ++
28389 + dd = file->f_path.dentry->d_inode->i_private;
28390 + dd->ipath_f_read_counters(dd, &counters);
28391 +
28392 +diff -urNp linux-3.1.1/drivers/infiniband/hw/ipath/ipath_rc.c linux-3.1.1/drivers/infiniband/hw/ipath/ipath_rc.c
28393 +--- linux-3.1.1/drivers/infiniband/hw/ipath/ipath_rc.c 2011-11-11 15:19:27.000000000 -0500
28394 ++++ linux-3.1.1/drivers/infiniband/hw/ipath/ipath_rc.c 2011-11-16 18:39:07.000000000 -0500
28395 +@@ -1868,7 +1868,7 @@ void ipath_rc_rcv(struct ipath_ibdev *de
28396 + struct ib_atomic_eth *ateth;
28397 + struct ipath_ack_entry *e;
28398 + u64 vaddr;
28399 +- atomic64_t *maddr;
28400 ++ atomic64_unchecked_t *maddr;
28401 + u64 sdata;
28402 + u32 rkey;
28403 + u8 next;
28404 +@@ -1903,11 +1903,11 @@ void ipath_rc_rcv(struct ipath_ibdev *de
28405 + IB_ACCESS_REMOTE_ATOMIC)))
28406 + goto nack_acc_unlck;
28407 + /* Perform atomic OP and save result. */
28408 +- maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
28409 ++ maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
28410 + sdata = be64_to_cpu(ateth->swap_data);
28411 + e = &qp->s_ack_queue[qp->r_head_ack_queue];
28412 + e->atomic_data = (opcode == OP(FETCH_ADD)) ?
28413 +- (u64) atomic64_add_return(sdata, maddr) - sdata :
28414 ++ (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
28415 + (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
28416 + be64_to_cpu(ateth->compare_data),
28417 + sdata);
28418 +diff -urNp linux-3.1.1/drivers/infiniband/hw/ipath/ipath_ruc.c linux-3.1.1/drivers/infiniband/hw/ipath/ipath_ruc.c
28419 +--- linux-3.1.1/drivers/infiniband/hw/ipath/ipath_ruc.c 2011-11-11 15:19:27.000000000 -0500
28420 ++++ linux-3.1.1/drivers/infiniband/hw/ipath/ipath_ruc.c 2011-11-16 18:39:07.000000000 -0500
28421 +@@ -266,7 +266,7 @@ static void ipath_ruc_loopback(struct ip
28422 + unsigned long flags;
28423 + struct ib_wc wc;
28424 + u64 sdata;
28425 +- atomic64_t *maddr;
28426 ++ atomic64_unchecked_t *maddr;
28427 + enum ib_wc_status send_status;
28428 +
28429 + /*
28430 +@@ -382,11 +382,11 @@ again:
28431 + IB_ACCESS_REMOTE_ATOMIC)))
28432 + goto acc_err;
28433 + /* Perform atomic OP and save result. */
28434 +- maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
28435 ++ maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
28436 + sdata = wqe->wr.wr.atomic.compare_add;
28437 + *(u64 *) sqp->s_sge.sge.vaddr =
28438 + (wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) ?
28439 +- (u64) atomic64_add_return(sdata, maddr) - sdata :
28440 ++ (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
28441 + (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
28442 + sdata, wqe->wr.wr.atomic.swap);
28443 + goto send_comp;
28444 +diff -urNp linux-3.1.1/drivers/infiniband/hw/nes/nes.c linux-3.1.1/drivers/infiniband/hw/nes/nes.c
28445 +--- linux-3.1.1/drivers/infiniband/hw/nes/nes.c 2011-11-11 15:19:27.000000000 -0500
28446 ++++ linux-3.1.1/drivers/infiniband/hw/nes/nes.c 2011-11-16 18:39:07.000000000 -0500
28447 +@@ -103,7 +103,7 @@ MODULE_PARM_DESC(limit_maxrdreqsz, "Limi
28448 + LIST_HEAD(nes_adapter_list);
28449 + static LIST_HEAD(nes_dev_list);
28450 +
28451 +-atomic_t qps_destroyed;
28452 ++atomic_unchecked_t qps_destroyed;
28453 +
28454 + static unsigned int ee_flsh_adapter;
28455 + static unsigned int sysfs_nonidx_addr;
28456 +@@ -275,7 +275,7 @@ static void nes_cqp_rem_ref_callback(str
28457 + struct nes_qp *nesqp = cqp_request->cqp_callback_pointer;
28458 + struct nes_adapter *nesadapter = nesdev->nesadapter;
28459 +
28460 +- atomic_inc(&qps_destroyed);
28461 ++ atomic_inc_unchecked(&qps_destroyed);
28462 +
28463 + /* Free the control structures */
28464 +
28465 +diff -urNp linux-3.1.1/drivers/infiniband/hw/nes/nes_cm.c linux-3.1.1/drivers/infiniband/hw/nes/nes_cm.c
28466 +--- linux-3.1.1/drivers/infiniband/hw/nes/nes_cm.c 2011-11-11 15:19:27.000000000 -0500
28467 ++++ linux-3.1.1/drivers/infiniband/hw/nes/nes_cm.c 2011-11-16 18:39:07.000000000 -0500
28468 +@@ -68,14 +68,14 @@ u32 cm_packets_dropped;
28469 + u32 cm_packets_retrans;
28470 + u32 cm_packets_created;
28471 + u32 cm_packets_received;
28472 +-atomic_t cm_listens_created;
28473 +-atomic_t cm_listens_destroyed;
28474 ++atomic_unchecked_t cm_listens_created;
28475 ++atomic_unchecked_t cm_listens_destroyed;
28476 + u32 cm_backlog_drops;
28477 +-atomic_t cm_loopbacks;
28478 +-atomic_t cm_nodes_created;
28479 +-atomic_t cm_nodes_destroyed;
28480 +-atomic_t cm_accel_dropped_pkts;
28481 +-atomic_t cm_resets_recvd;
28482 ++atomic_unchecked_t cm_loopbacks;
28483 ++atomic_unchecked_t cm_nodes_created;
28484 ++atomic_unchecked_t cm_nodes_destroyed;
28485 ++atomic_unchecked_t cm_accel_dropped_pkts;
28486 ++atomic_unchecked_t cm_resets_recvd;
28487 +
28488 + static inline int mini_cm_accelerated(struct nes_cm_core *,
28489 + struct nes_cm_node *);
28490 +@@ -151,13 +151,13 @@ static struct nes_cm_ops nes_cm_api = {
28491 +
28492 + static struct nes_cm_core *g_cm_core;
28493 +
28494 +-atomic_t cm_connects;
28495 +-atomic_t cm_accepts;
28496 +-atomic_t cm_disconnects;
28497 +-atomic_t cm_closes;
28498 +-atomic_t cm_connecteds;
28499 +-atomic_t cm_connect_reqs;
28500 +-atomic_t cm_rejects;
28501 ++atomic_unchecked_t cm_connects;
28502 ++atomic_unchecked_t cm_accepts;
28503 ++atomic_unchecked_t cm_disconnects;
28504 ++atomic_unchecked_t cm_closes;
28505 ++atomic_unchecked_t cm_connecteds;
28506 ++atomic_unchecked_t cm_connect_reqs;
28507 ++atomic_unchecked_t cm_rejects;
28508 +
28509 +
28510 + /**
28511 +@@ -1045,7 +1045,7 @@ static int mini_cm_dec_refcnt_listen(str
28512 + kfree(listener);
28513 + listener = NULL;
28514 + ret = 0;
28515 +- atomic_inc(&cm_listens_destroyed);
28516 ++ atomic_inc_unchecked(&cm_listens_destroyed);
28517 + } else {
28518 + spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
28519 + }
28520 +@@ -1240,7 +1240,7 @@ static struct nes_cm_node *make_cm_node(
28521 + cm_node->rem_mac);
28522 +
28523 + add_hte_node(cm_core, cm_node);
28524 +- atomic_inc(&cm_nodes_created);
28525 ++ atomic_inc_unchecked(&cm_nodes_created);
28526 +
28527 + return cm_node;
28528 + }
28529 +@@ -1298,7 +1298,7 @@ static int rem_ref_cm_node(struct nes_cm
28530 + }
28531 +
28532 + atomic_dec(&cm_core->node_cnt);
28533 +- atomic_inc(&cm_nodes_destroyed);
28534 ++ atomic_inc_unchecked(&cm_nodes_destroyed);
28535 + nesqp = cm_node->nesqp;
28536 + if (nesqp) {
28537 + nesqp->cm_node = NULL;
28538 +@@ -1365,7 +1365,7 @@ static int process_options(struct nes_cm
28539 +
28540 + static void drop_packet(struct sk_buff *skb)
28541 + {
28542 +- atomic_inc(&cm_accel_dropped_pkts);
28543 ++ atomic_inc_unchecked(&cm_accel_dropped_pkts);
28544 + dev_kfree_skb_any(skb);
28545 + }
28546 +
28547 +@@ -1428,7 +1428,7 @@ static void handle_rst_pkt(struct nes_cm
28548 + {
28549 +
28550 + int reset = 0; /* whether to send reset in case of err.. */
28551 +- atomic_inc(&cm_resets_recvd);
28552 ++ atomic_inc_unchecked(&cm_resets_recvd);
28553 + nes_debug(NES_DBG_CM, "Received Reset, cm_node = %p, state = %u."
28554 + " refcnt=%d\n", cm_node, cm_node->state,
28555 + atomic_read(&cm_node->ref_count));
28556 +@@ -2057,7 +2057,7 @@ static struct nes_cm_node *mini_cm_conne
28557 + rem_ref_cm_node(cm_node->cm_core, cm_node);
28558 + return NULL;
28559 + }
28560 +- atomic_inc(&cm_loopbacks);
28561 ++ atomic_inc_unchecked(&cm_loopbacks);
28562 + loopbackremotenode->loopbackpartner = cm_node;
28563 + loopbackremotenode->tcp_cntxt.rcv_wscale =
28564 + NES_CM_DEFAULT_RCV_WND_SCALE;
28565 +@@ -2332,7 +2332,7 @@ static int mini_cm_recv_pkt(struct nes_c
28566 + add_ref_cm_node(cm_node);
28567 + } else if (cm_node->state == NES_CM_STATE_TSA) {
28568 + rem_ref_cm_node(cm_core, cm_node);
28569 +- atomic_inc(&cm_accel_dropped_pkts);
28570 ++ atomic_inc_unchecked(&cm_accel_dropped_pkts);
28571 + dev_kfree_skb_any(skb);
28572 + break;
28573 + }
28574 +@@ -2638,7 +2638,7 @@ static int nes_cm_disconn_true(struct ne
28575 +
28576 + if ((cm_id) && (cm_id->event_handler)) {
28577 + if (issue_disconn) {
28578 +- atomic_inc(&cm_disconnects);
28579 ++ atomic_inc_unchecked(&cm_disconnects);
28580 + cm_event.event = IW_CM_EVENT_DISCONNECT;
28581 + cm_event.status = disconn_status;
28582 + cm_event.local_addr = cm_id->local_addr;
28583 +@@ -2660,7 +2660,7 @@ static int nes_cm_disconn_true(struct ne
28584 + }
28585 +
28586 + if (issue_close) {
28587 +- atomic_inc(&cm_closes);
28588 ++ atomic_inc_unchecked(&cm_closes);
28589 + nes_disconnect(nesqp, 1);
28590 +
28591 + cm_id->provider_data = nesqp;
28592 +@@ -2791,7 +2791,7 @@ int nes_accept(struct iw_cm_id *cm_id, s
28593 +
28594 + nes_debug(NES_DBG_CM, "QP%u, cm_node=%p, jiffies = %lu listener = %p\n",
28595 + nesqp->hwqp.qp_id, cm_node, jiffies, cm_node->listener);
28596 +- atomic_inc(&cm_accepts);
28597 ++ atomic_inc_unchecked(&cm_accepts);
28598 +
28599 + nes_debug(NES_DBG_CM, "netdev refcnt = %u.\n",
28600 + netdev_refcnt_read(nesvnic->netdev));
28601 +@@ -3001,7 +3001,7 @@ int nes_reject(struct iw_cm_id *cm_id, c
28602 +
28603 + struct nes_cm_core *cm_core;
28604 +
28605 +- atomic_inc(&cm_rejects);
28606 ++ atomic_inc_unchecked(&cm_rejects);
28607 + cm_node = (struct nes_cm_node *) cm_id->provider_data;
28608 + loopback = cm_node->loopbackpartner;
28609 + cm_core = cm_node->cm_core;
28610 +@@ -3067,7 +3067,7 @@ int nes_connect(struct iw_cm_id *cm_id,
28611 + ntohl(cm_id->local_addr.sin_addr.s_addr),
28612 + ntohs(cm_id->local_addr.sin_port));
28613 +
28614 +- atomic_inc(&cm_connects);
28615 ++ atomic_inc_unchecked(&cm_connects);
28616 + nesqp->active_conn = 1;
28617 +
28618 + /* cache the cm_id in the qp */
28619 +@@ -3173,7 +3173,7 @@ int nes_create_listen(struct iw_cm_id *c
28620 + g_cm_core->api->stop_listener(g_cm_core, (void *)cm_node);
28621 + return err;
28622 + }
28623 +- atomic_inc(&cm_listens_created);
28624 ++ atomic_inc_unchecked(&cm_listens_created);
28625 + }
28626 +
28627 + cm_id->add_ref(cm_id);
28628 +@@ -3278,7 +3278,7 @@ static void cm_event_connected(struct ne
28629 + if (nesqp->destroyed) {
28630 + return;
28631 + }
28632 +- atomic_inc(&cm_connecteds);
28633 ++ atomic_inc_unchecked(&cm_connecteds);
28634 + nes_debug(NES_DBG_CM, "QP%u attempting to connect to 0x%08X:0x%04X on"
28635 + " local port 0x%04X. jiffies = %lu.\n",
28636 + nesqp->hwqp.qp_id,
28637 +@@ -3493,7 +3493,7 @@ static void cm_event_reset(struct nes_cm
28638 +
28639 + cm_id->add_ref(cm_id);
28640 + ret = cm_id->event_handler(cm_id, &cm_event);
28641 +- atomic_inc(&cm_closes);
28642 ++ atomic_inc_unchecked(&cm_closes);
28643 + cm_event.event = IW_CM_EVENT_CLOSE;
28644 + cm_event.status = 0;
28645 + cm_event.provider_data = cm_id->provider_data;
28646 +@@ -3529,7 +3529,7 @@ static void cm_event_mpa_req(struct nes_
28647 + return;
28648 + cm_id = cm_node->cm_id;
28649 +
28650 +- atomic_inc(&cm_connect_reqs);
28651 ++ atomic_inc_unchecked(&cm_connect_reqs);
28652 + nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
28653 + cm_node, cm_id, jiffies);
28654 +
28655 +@@ -3567,7 +3567,7 @@ static void cm_event_mpa_reject(struct n
28656 + return;
28657 + cm_id = cm_node->cm_id;
28658 +
28659 +- atomic_inc(&cm_connect_reqs);
28660 ++ atomic_inc_unchecked(&cm_connect_reqs);
28661 + nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
28662 + cm_node, cm_id, jiffies);
28663 +
28664 +diff -urNp linux-3.1.1/drivers/infiniband/hw/nes/nes.h linux-3.1.1/drivers/infiniband/hw/nes/nes.h
28665 +--- linux-3.1.1/drivers/infiniband/hw/nes/nes.h 2011-11-11 15:19:27.000000000 -0500
28666 ++++ linux-3.1.1/drivers/infiniband/hw/nes/nes.h 2011-11-16 18:39:07.000000000 -0500
28667 +@@ -175,17 +175,17 @@ extern unsigned int nes_debug_level;
28668 + extern unsigned int wqm_quanta;
28669 + extern struct list_head nes_adapter_list;
28670 +
28671 +-extern atomic_t cm_connects;
28672 +-extern atomic_t cm_accepts;
28673 +-extern atomic_t cm_disconnects;
28674 +-extern atomic_t cm_closes;
28675 +-extern atomic_t cm_connecteds;
28676 +-extern atomic_t cm_connect_reqs;
28677 +-extern atomic_t cm_rejects;
28678 +-extern atomic_t mod_qp_timouts;
28679 +-extern atomic_t qps_created;
28680 +-extern atomic_t qps_destroyed;
28681 +-extern atomic_t sw_qps_destroyed;
28682 ++extern atomic_unchecked_t cm_connects;
28683 ++extern atomic_unchecked_t cm_accepts;
28684 ++extern atomic_unchecked_t cm_disconnects;
28685 ++extern atomic_unchecked_t cm_closes;
28686 ++extern atomic_unchecked_t cm_connecteds;
28687 ++extern atomic_unchecked_t cm_connect_reqs;
28688 ++extern atomic_unchecked_t cm_rejects;
28689 ++extern atomic_unchecked_t mod_qp_timouts;
28690 ++extern atomic_unchecked_t qps_created;
28691 ++extern atomic_unchecked_t qps_destroyed;
28692 ++extern atomic_unchecked_t sw_qps_destroyed;
28693 + extern u32 mh_detected;
28694 + extern u32 mh_pauses_sent;
28695 + extern u32 cm_packets_sent;
28696 +@@ -194,14 +194,14 @@ extern u32 cm_packets_created;
28697 + extern u32 cm_packets_received;
28698 + extern u32 cm_packets_dropped;
28699 + extern u32 cm_packets_retrans;
28700 +-extern atomic_t cm_listens_created;
28701 +-extern atomic_t cm_listens_destroyed;
28702 ++extern atomic_unchecked_t cm_listens_created;
28703 ++extern atomic_unchecked_t cm_listens_destroyed;
28704 + extern u32 cm_backlog_drops;
28705 +-extern atomic_t cm_loopbacks;
28706 +-extern atomic_t cm_nodes_created;
28707 +-extern atomic_t cm_nodes_destroyed;
28708 +-extern atomic_t cm_accel_dropped_pkts;
28709 +-extern atomic_t cm_resets_recvd;
28710 ++extern atomic_unchecked_t cm_loopbacks;
28711 ++extern atomic_unchecked_t cm_nodes_created;
28712 ++extern atomic_unchecked_t cm_nodes_destroyed;
28713 ++extern atomic_unchecked_t cm_accel_dropped_pkts;
28714 ++extern atomic_unchecked_t cm_resets_recvd;
28715 +
28716 + extern u32 int_mod_timer_init;
28717 + extern u32 int_mod_cq_depth_256;
28718 +diff -urNp linux-3.1.1/drivers/infiniband/hw/nes/nes_nic.c linux-3.1.1/drivers/infiniband/hw/nes/nes_nic.c
28719 +--- linux-3.1.1/drivers/infiniband/hw/nes/nes_nic.c 2011-11-11 15:19:27.000000000 -0500
28720 ++++ linux-3.1.1/drivers/infiniband/hw/nes/nes_nic.c 2011-11-16 18:39:07.000000000 -0500
28721 +@@ -1274,31 +1274,31 @@ static void nes_netdev_get_ethtool_stats
28722 + target_stat_values[++index] = mh_detected;
28723 + target_stat_values[++index] = mh_pauses_sent;
28724 + target_stat_values[++index] = nesvnic->endnode_ipv4_tcp_retransmits;
28725 +- target_stat_values[++index] = atomic_read(&cm_connects);
28726 +- target_stat_values[++index] = atomic_read(&cm_accepts);
28727 +- target_stat_values[++index] = atomic_read(&cm_disconnects);
28728 +- target_stat_values[++index] = atomic_read(&cm_connecteds);
28729 +- target_stat_values[++index] = atomic_read(&cm_connect_reqs);
28730 +- target_stat_values[++index] = atomic_read(&cm_rejects);
28731 +- target_stat_values[++index] = atomic_read(&mod_qp_timouts);
28732 +- target_stat_values[++index] = atomic_read(&qps_created);
28733 +- target_stat_values[++index] = atomic_read(&sw_qps_destroyed);
28734 +- target_stat_values[++index] = atomic_read(&qps_destroyed);
28735 +- target_stat_values[++index] = atomic_read(&cm_closes);
28736 ++ target_stat_values[++index] = atomic_read_unchecked(&cm_connects);
28737 ++ target_stat_values[++index] = atomic_read_unchecked(&cm_accepts);
28738 ++ target_stat_values[++index] = atomic_read_unchecked(&cm_disconnects);
28739 ++ target_stat_values[++index] = atomic_read_unchecked(&cm_connecteds);
28740 ++ target_stat_values[++index] = atomic_read_unchecked(&cm_connect_reqs);
28741 ++ target_stat_values[++index] = atomic_read_unchecked(&cm_rejects);
28742 ++ target_stat_values[++index] = atomic_read_unchecked(&mod_qp_timouts);
28743 ++ target_stat_values[++index] = atomic_read_unchecked(&qps_created);
28744 ++ target_stat_values[++index] = atomic_read_unchecked(&sw_qps_destroyed);
28745 ++ target_stat_values[++index] = atomic_read_unchecked(&qps_destroyed);
28746 ++ target_stat_values[++index] = atomic_read_unchecked(&cm_closes);
28747 + target_stat_values[++index] = cm_packets_sent;
28748 + target_stat_values[++index] = cm_packets_bounced;
28749 + target_stat_values[++index] = cm_packets_created;
28750 + target_stat_values[++index] = cm_packets_received;
28751 + target_stat_values[++index] = cm_packets_dropped;
28752 + target_stat_values[++index] = cm_packets_retrans;
28753 +- target_stat_values[++index] = atomic_read(&cm_listens_created);
28754 +- target_stat_values[++index] = atomic_read(&cm_listens_destroyed);
28755 ++ target_stat_values[++index] = atomic_read_unchecked(&cm_listens_created);
28756 ++ target_stat_values[++index] = atomic_read_unchecked(&cm_listens_destroyed);
28757 + target_stat_values[++index] = cm_backlog_drops;
28758 +- target_stat_values[++index] = atomic_read(&cm_loopbacks);
28759 +- target_stat_values[++index] = atomic_read(&cm_nodes_created);
28760 +- target_stat_values[++index] = atomic_read(&cm_nodes_destroyed);
28761 +- target_stat_values[++index] = atomic_read(&cm_accel_dropped_pkts);
28762 +- target_stat_values[++index] = atomic_read(&cm_resets_recvd);
28763 ++ target_stat_values[++index] = atomic_read_unchecked(&cm_loopbacks);
28764 ++ target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_created);
28765 ++ target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_destroyed);
28766 ++ target_stat_values[++index] = atomic_read_unchecked(&cm_accel_dropped_pkts);
28767 ++ target_stat_values[++index] = atomic_read_unchecked(&cm_resets_recvd);
28768 + target_stat_values[++index] = nesadapter->free_4kpbl;
28769 + target_stat_values[++index] = nesadapter->free_256pbl;
28770 + target_stat_values[++index] = int_mod_timer_init;
28771 +diff -urNp linux-3.1.1/drivers/infiniband/hw/nes/nes_verbs.c linux-3.1.1/drivers/infiniband/hw/nes/nes_verbs.c
28772 +--- linux-3.1.1/drivers/infiniband/hw/nes/nes_verbs.c 2011-11-11 15:19:27.000000000 -0500
28773 ++++ linux-3.1.1/drivers/infiniband/hw/nes/nes_verbs.c 2011-11-16 18:39:07.000000000 -0500
28774 +@@ -46,9 +46,9 @@
28775 +
28776 + #include <rdma/ib_umem.h>
28777 +
28778 +-atomic_t mod_qp_timouts;
28779 +-atomic_t qps_created;
28780 +-atomic_t sw_qps_destroyed;
28781 ++atomic_unchecked_t mod_qp_timouts;
28782 ++atomic_unchecked_t qps_created;
28783 ++atomic_unchecked_t sw_qps_destroyed;
28784 +
28785 + static void nes_unregister_ofa_device(struct nes_ib_device *nesibdev);
28786 +
28787 +@@ -1131,7 +1131,7 @@ static struct ib_qp *nes_create_qp(struc
28788 + if (init_attr->create_flags)
28789 + return ERR_PTR(-EINVAL);
28790 +
28791 +- atomic_inc(&qps_created);
28792 ++ atomic_inc_unchecked(&qps_created);
28793 + switch (init_attr->qp_type) {
28794 + case IB_QPT_RC:
28795 + if (nes_drv_opt & NES_DRV_OPT_NO_INLINE_DATA) {
28796 +@@ -1460,7 +1460,7 @@ static int nes_destroy_qp(struct ib_qp *
28797 + struct iw_cm_event cm_event;
28798 + int ret;
28799 +
28800 +- atomic_inc(&sw_qps_destroyed);
28801 ++ atomic_inc_unchecked(&sw_qps_destroyed);
28802 + nesqp->destroyed = 1;
28803 +
28804 + /* Blow away the connection if it exists. */
28805 +diff -urNp linux-3.1.1/drivers/infiniband/hw/qib/qib.h linux-3.1.1/drivers/infiniband/hw/qib/qib.h
28806 +--- linux-3.1.1/drivers/infiniband/hw/qib/qib.h 2011-11-11 15:19:27.000000000 -0500
28807 ++++ linux-3.1.1/drivers/infiniband/hw/qib/qib.h 2011-11-16 18:39:07.000000000 -0500
28808 +@@ -51,6 +51,7 @@
28809 + #include <linux/completion.h>
28810 + #include <linux/kref.h>
28811 + #include <linux/sched.h>
28812 ++#include <linux/slab.h>
28813 +
28814 + #include "qib_common.h"
28815 + #include "qib_verbs.h"
28816 +diff -urNp linux-3.1.1/drivers/input/gameport/gameport.c linux-3.1.1/drivers/input/gameport/gameport.c
28817 +--- linux-3.1.1/drivers/input/gameport/gameport.c 2011-11-11 15:19:27.000000000 -0500
28818 ++++ linux-3.1.1/drivers/input/gameport/gameport.c 2011-11-16 18:39:07.000000000 -0500
28819 +@@ -488,14 +488,14 @@ EXPORT_SYMBOL(gameport_set_phys);
28820 + */
28821 + static void gameport_init_port(struct gameport *gameport)
28822 + {
28823 +- static atomic_t gameport_no = ATOMIC_INIT(0);
28824 ++ static atomic_unchecked_t gameport_no = ATOMIC_INIT(0);
28825 +
28826 + __module_get(THIS_MODULE);
28827 +
28828 + mutex_init(&gameport->drv_mutex);
28829 + device_initialize(&gameport->dev);
28830 + dev_set_name(&gameport->dev, "gameport%lu",
28831 +- (unsigned long)atomic_inc_return(&gameport_no) - 1);
28832 ++ (unsigned long)atomic_inc_return_unchecked(&gameport_no) - 1);
28833 + gameport->dev.bus = &gameport_bus;
28834 + gameport->dev.release = gameport_release_port;
28835 + if (gameport->parent)
28836 +diff -urNp linux-3.1.1/drivers/input/input.c linux-3.1.1/drivers/input/input.c
28837 +--- linux-3.1.1/drivers/input/input.c 2011-11-11 15:19:27.000000000 -0500
28838 ++++ linux-3.1.1/drivers/input/input.c 2011-11-16 18:39:07.000000000 -0500
28839 +@@ -1814,7 +1814,7 @@ static void input_cleanse_bitmasks(struc
28840 + */
28841 + int input_register_device(struct input_dev *dev)
28842 + {
28843 +- static atomic_t input_no = ATOMIC_INIT(0);
28844 ++ static atomic_unchecked_t input_no = ATOMIC_INIT(0);
28845 + struct input_handler *handler;
28846 + const char *path;
28847 + int error;
28848 +@@ -1851,7 +1851,7 @@ int input_register_device(struct input_d
28849 + dev->setkeycode = input_default_setkeycode;
28850 +
28851 + dev_set_name(&dev->dev, "input%ld",
28852 +- (unsigned long) atomic_inc_return(&input_no) - 1);
28853 ++ (unsigned long) atomic_inc_return_unchecked(&input_no) - 1);
28854 +
28855 + error = device_add(&dev->dev);
28856 + if (error)
28857 +diff -urNp linux-3.1.1/drivers/input/joystick/sidewinder.c linux-3.1.1/drivers/input/joystick/sidewinder.c
28858 +--- linux-3.1.1/drivers/input/joystick/sidewinder.c 2011-11-11 15:19:27.000000000 -0500
28859 ++++ linux-3.1.1/drivers/input/joystick/sidewinder.c 2011-11-16 18:40:10.000000000 -0500
28860 +@@ -30,6 +30,7 @@
28861 + #include <linux/kernel.h>
28862 + #include <linux/module.h>
28863 + #include <linux/slab.h>
28864 ++#include <linux/sched.h>
28865 + #include <linux/init.h>
28866 + #include <linux/input.h>
28867 + #include <linux/gameport.h>
28868 +@@ -428,6 +429,8 @@ static int sw_read(struct sw *sw)
28869 + unsigned char buf[SW_LENGTH];
28870 + int i;
28871 +
28872 ++ pax_track_stack();
28873 ++
28874 + i = sw_read_packet(sw->gameport, buf, sw->length, 0);
28875 +
28876 + if (sw->type == SW_ID_3DP && sw->length == 66 && i != 66) { /* Broken packet, try to fix */
28877 +diff -urNp linux-3.1.1/drivers/input/joystick/xpad.c linux-3.1.1/drivers/input/joystick/xpad.c
28878 +--- linux-3.1.1/drivers/input/joystick/xpad.c 2011-11-11 15:19:27.000000000 -0500
28879 ++++ linux-3.1.1/drivers/input/joystick/xpad.c 2011-11-16 18:39:07.000000000 -0500
28880 +@@ -710,7 +710,7 @@ static void xpad_led_set(struct led_clas
28881 +
28882 + static int xpad_led_probe(struct usb_xpad *xpad)
28883 + {
28884 +- static atomic_t led_seq = ATOMIC_INIT(0);
28885 ++ static atomic_unchecked_t led_seq = ATOMIC_INIT(0);
28886 + long led_no;
28887 + struct xpad_led *led;
28888 + struct led_classdev *led_cdev;
28889 +@@ -723,7 +723,7 @@ static int xpad_led_probe(struct usb_xpa
28890 + if (!led)
28891 + return -ENOMEM;
28892 +
28893 +- led_no = (long)atomic_inc_return(&led_seq) - 1;
28894 ++ led_no = (long)atomic_inc_return_unchecked(&led_seq) - 1;
28895 +
28896 + snprintf(led->name, sizeof(led->name), "xpad%ld", led_no);
28897 + led->xpad = xpad;
28898 +diff -urNp linux-3.1.1/drivers/input/mousedev.c linux-3.1.1/drivers/input/mousedev.c
28899 +--- linux-3.1.1/drivers/input/mousedev.c 2011-11-11 15:19:27.000000000 -0500
28900 ++++ linux-3.1.1/drivers/input/mousedev.c 2011-11-16 18:39:07.000000000 -0500
28901 +@@ -763,7 +763,7 @@ static ssize_t mousedev_read(struct file
28902 +
28903 + spin_unlock_irq(&client->packet_lock);
28904 +
28905 +- if (copy_to_user(buffer, data, count))
28906 ++ if (count > sizeof(data) || copy_to_user(buffer, data, count))
28907 + return -EFAULT;
28908 +
28909 + return count;
28910 +diff -urNp linux-3.1.1/drivers/input/serio/serio.c linux-3.1.1/drivers/input/serio/serio.c
28911 +--- linux-3.1.1/drivers/input/serio/serio.c 2011-11-11 15:19:27.000000000 -0500
28912 ++++ linux-3.1.1/drivers/input/serio/serio.c 2011-11-16 18:39:07.000000000 -0500
28913 +@@ -497,7 +497,7 @@ static void serio_release_port(struct de
28914 + */
28915 + static void serio_init_port(struct serio *serio)
28916 + {
28917 +- static atomic_t serio_no = ATOMIC_INIT(0);
28918 ++ static atomic_unchecked_t serio_no = ATOMIC_INIT(0);
28919 +
28920 + __module_get(THIS_MODULE);
28921 +
28922 +@@ -508,7 +508,7 @@ static void serio_init_port(struct serio
28923 + mutex_init(&serio->drv_mutex);
28924 + device_initialize(&serio->dev);
28925 + dev_set_name(&serio->dev, "serio%ld",
28926 +- (long)atomic_inc_return(&serio_no) - 1);
28927 ++ (long)atomic_inc_return_unchecked(&serio_no) - 1);
28928 + serio->dev.bus = &serio_bus;
28929 + serio->dev.release = serio_release_port;
28930 + serio->dev.groups = serio_device_attr_groups;
28931 +diff -urNp linux-3.1.1/drivers/isdn/capi/capi.c linux-3.1.1/drivers/isdn/capi/capi.c
28932 +--- linux-3.1.1/drivers/isdn/capi/capi.c 2011-11-11 15:19:27.000000000 -0500
28933 ++++ linux-3.1.1/drivers/isdn/capi/capi.c 2011-11-16 18:39:07.000000000 -0500
28934 +@@ -83,8 +83,8 @@ struct capiminor {
28935 +
28936 + struct capi20_appl *ap;
28937 + u32 ncci;
28938 +- atomic_t datahandle;
28939 +- atomic_t msgid;
28940 ++ atomic_unchecked_t datahandle;
28941 ++ atomic_unchecked_t msgid;
28942 +
28943 + struct tty_port port;
28944 + int ttyinstop;
28945 +@@ -397,7 +397,7 @@ gen_data_b3_resp_for(struct capiminor *m
28946 + capimsg_setu16(s, 2, mp->ap->applid);
28947 + capimsg_setu8 (s, 4, CAPI_DATA_B3);
28948 + capimsg_setu8 (s, 5, CAPI_RESP);
28949 +- capimsg_setu16(s, 6, atomic_inc_return(&mp->msgid));
28950 ++ capimsg_setu16(s, 6, atomic_inc_return_unchecked(&mp->msgid));
28951 + capimsg_setu32(s, 8, mp->ncci);
28952 + capimsg_setu16(s, 12, datahandle);
28953 + }
28954 +@@ -518,14 +518,14 @@ static void handle_minor_send(struct cap
28955 + mp->outbytes -= len;
28956 + spin_unlock_bh(&mp->outlock);
28957 +
28958 +- datahandle = atomic_inc_return(&mp->datahandle);
28959 ++ datahandle = atomic_inc_return_unchecked(&mp->datahandle);
28960 + skb_push(skb, CAPI_DATA_B3_REQ_LEN);
28961 + memset(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
28962 + capimsg_setu16(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
28963 + capimsg_setu16(skb->data, 2, mp->ap->applid);
28964 + capimsg_setu8 (skb->data, 4, CAPI_DATA_B3);
28965 + capimsg_setu8 (skb->data, 5, CAPI_REQ);
28966 +- capimsg_setu16(skb->data, 6, atomic_inc_return(&mp->msgid));
28967 ++ capimsg_setu16(skb->data, 6, atomic_inc_return_unchecked(&mp->msgid));
28968 + capimsg_setu32(skb->data, 8, mp->ncci); /* NCCI */
28969 + capimsg_setu32(skb->data, 12, (u32)(long)skb->data);/* Data32 */
28970 + capimsg_setu16(skb->data, 16, len); /* Data length */
28971 +diff -urNp linux-3.1.1/drivers/isdn/gigaset/common.c linux-3.1.1/drivers/isdn/gigaset/common.c
28972 +--- linux-3.1.1/drivers/isdn/gigaset/common.c 2011-11-11 15:19:27.000000000 -0500
28973 ++++ linux-3.1.1/drivers/isdn/gigaset/common.c 2011-11-16 18:39:07.000000000 -0500
28974 +@@ -723,7 +723,7 @@ struct cardstate *gigaset_initcs(struct
28975 + cs->commands_pending = 0;
28976 + cs->cur_at_seq = 0;
28977 + cs->gotfwver = -1;
28978 +- cs->open_count = 0;
28979 ++ local_set(&cs->open_count, 0);
28980 + cs->dev = NULL;
28981 + cs->tty = NULL;
28982 + cs->tty_dev = NULL;
28983 +diff -urNp linux-3.1.1/drivers/isdn/gigaset/gigaset.h linux-3.1.1/drivers/isdn/gigaset/gigaset.h
28984 +--- linux-3.1.1/drivers/isdn/gigaset/gigaset.h 2011-11-11 15:19:27.000000000 -0500
28985 ++++ linux-3.1.1/drivers/isdn/gigaset/gigaset.h 2011-11-16 18:39:07.000000000 -0500
28986 +@@ -35,6 +35,7 @@
28987 + #include <linux/tty_driver.h>
28988 + #include <linux/list.h>
28989 + #include <linux/atomic.h>
28990 ++#include <asm/local.h>
28991 +
28992 + #define GIG_VERSION {0, 5, 0, 0}
28993 + #define GIG_COMPAT {0, 4, 0, 0}
28994 +@@ -433,7 +434,7 @@ struct cardstate {
28995 + spinlock_t cmdlock;
28996 + unsigned curlen, cmdbytes;
28997 +
28998 +- unsigned open_count;
28999 ++ local_t open_count;
29000 + struct tty_struct *tty;
29001 + struct tasklet_struct if_wake_tasklet;
29002 + unsigned control_state;
29003 +diff -urNp linux-3.1.1/drivers/isdn/gigaset/interface.c linux-3.1.1/drivers/isdn/gigaset/interface.c
29004 +--- linux-3.1.1/drivers/isdn/gigaset/interface.c 2011-11-11 15:19:27.000000000 -0500
29005 ++++ linux-3.1.1/drivers/isdn/gigaset/interface.c 2011-11-16 18:39:07.000000000 -0500
29006 +@@ -162,9 +162,7 @@ static int if_open(struct tty_struct *tt
29007 + }
29008 + tty->driver_data = cs;
29009 +
29010 +- ++cs->open_count;
29011 +-
29012 +- if (cs->open_count == 1) {
29013 ++ if (local_inc_return(&cs->open_count) == 1) {
29014 + spin_lock_irqsave(&cs->lock, flags);
29015 + cs->tty = tty;
29016 + spin_unlock_irqrestore(&cs->lock, flags);
29017 +@@ -192,10 +190,10 @@ static void if_close(struct tty_struct *
29018 +
29019 + if (!cs->connected)
29020 + gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
29021 +- else if (!cs->open_count)
29022 ++ else if (!local_read(&cs->open_count))
29023 + dev_warn(cs->dev, "%s: device not opened\n", __func__);
29024 + else {
29025 +- if (!--cs->open_count) {
29026 ++ if (!local_dec_return(&cs->open_count)) {
29027 + spin_lock_irqsave(&cs->lock, flags);
29028 + cs->tty = NULL;
29029 + spin_unlock_irqrestore(&cs->lock, flags);
29030 +@@ -230,7 +228,7 @@ static int if_ioctl(struct tty_struct *t
29031 + if (!cs->connected) {
29032 + gig_dbg(DEBUG_IF, "not connected");
29033 + retval = -ENODEV;
29034 +- } else if (!cs->open_count)
29035 ++ } else if (!local_read(&cs->open_count))
29036 + dev_warn(cs->dev, "%s: device not opened\n", __func__);
29037 + else {
29038 + retval = 0;
29039 +@@ -360,7 +358,7 @@ static int if_write(struct tty_struct *t
29040 + retval = -ENODEV;
29041 + goto done;
29042 + }
29043 +- if (!cs->open_count) {
29044 ++ if (!local_read(&cs->open_count)) {
29045 + dev_warn(cs->dev, "%s: device not opened\n", __func__);
29046 + retval = -ENODEV;
29047 + goto done;
29048 +@@ -413,7 +411,7 @@ static int if_write_room(struct tty_stru
29049 + if (!cs->connected) {
29050 + gig_dbg(DEBUG_IF, "not connected");
29051 + retval = -ENODEV;
29052 +- } else if (!cs->open_count)
29053 ++ } else if (!local_read(&cs->open_count))
29054 + dev_warn(cs->dev, "%s: device not opened\n", __func__);
29055 + else if (cs->mstate != MS_LOCKED) {
29056 + dev_warn(cs->dev, "can't write to unlocked device\n");
29057 +@@ -443,7 +441,7 @@ static int if_chars_in_buffer(struct tty
29058 +
29059 + if (!cs->connected)
29060 + gig_dbg(DEBUG_IF, "not connected");
29061 +- else if (!cs->open_count)
29062 ++ else if (!local_read(&cs->open_count))
29063 + dev_warn(cs->dev, "%s: device not opened\n", __func__);
29064 + else if (cs->mstate != MS_LOCKED)
29065 + dev_warn(cs->dev, "can't write to unlocked device\n");
29066 +@@ -471,7 +469,7 @@ static void if_throttle(struct tty_struc
29067 +
29068 + if (!cs->connected)
29069 + gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
29070 +- else if (!cs->open_count)
29071 ++ else if (!local_read(&cs->open_count))
29072 + dev_warn(cs->dev, "%s: device not opened\n", __func__);
29073 + else
29074 + gig_dbg(DEBUG_IF, "%s: not implemented\n", __func__);
29075 +@@ -495,7 +493,7 @@ static void if_unthrottle(struct tty_str
29076 +
29077 + if (!cs->connected)
29078 + gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
29079 +- else if (!cs->open_count)
29080 ++ else if (!local_read(&cs->open_count))
29081 + dev_warn(cs->dev, "%s: device not opened\n", __func__);
29082 + else
29083 + gig_dbg(DEBUG_IF, "%s: not implemented\n", __func__);
29084 +@@ -526,7 +524,7 @@ static void if_set_termios(struct tty_st
29085 + goto out;
29086 + }
29087 +
29088 +- if (!cs->open_count) {
29089 ++ if (!local_read(&cs->open_count)) {
29090 + dev_warn(cs->dev, "%s: device not opened\n", __func__);
29091 + goto out;
29092 + }
29093 +diff -urNp linux-3.1.1/drivers/isdn/hardware/avm/b1.c linux-3.1.1/drivers/isdn/hardware/avm/b1.c
29094 +--- linux-3.1.1/drivers/isdn/hardware/avm/b1.c 2011-11-11 15:19:27.000000000 -0500
29095 ++++ linux-3.1.1/drivers/isdn/hardware/avm/b1.c 2011-11-16 18:39:07.000000000 -0500
29096 +@@ -176,7 +176,7 @@ int b1_load_t4file(avmcard *card, capilo
29097 + }
29098 + if (left) {
29099 + if (t4file->user) {
29100 +- if (copy_from_user(buf, dp, left))
29101 ++ if (left > sizeof buf || copy_from_user(buf, dp, left))
29102 + return -EFAULT;
29103 + } else {
29104 + memcpy(buf, dp, left);
29105 +@@ -224,7 +224,7 @@ int b1_load_config(avmcard *card, capilo
29106 + }
29107 + if (left) {
29108 + if (config->user) {
29109 +- if (copy_from_user(buf, dp, left))
29110 ++ if (left > sizeof buf || copy_from_user(buf, dp, left))
29111 + return -EFAULT;
29112 + } else {
29113 + memcpy(buf, dp, left);
29114 +diff -urNp linux-3.1.1/drivers/isdn/hardware/eicon/capidtmf.c linux-3.1.1/drivers/isdn/hardware/eicon/capidtmf.c
29115 +--- linux-3.1.1/drivers/isdn/hardware/eicon/capidtmf.c 2011-11-11 15:19:27.000000000 -0500
29116 ++++ linux-3.1.1/drivers/isdn/hardware/eicon/capidtmf.c 2011-11-16 18:40:10.000000000 -0500
29117 +@@ -498,6 +498,7 @@ void capidtmf_recv_block (t_capidtmf_sta
29118 + byte goertzel_result_buffer[CAPIDTMF_RECV_TOTAL_FREQUENCY_COUNT];
29119 + short windowed_sample_buffer[CAPIDTMF_RECV_WINDOWED_SAMPLES];
29120 +
29121 ++ pax_track_stack();
29122 +
29123 + if (p_state->recv.state & CAPIDTMF_RECV_STATE_DTMF_ACTIVE)
29124 + {
29125 +diff -urNp linux-3.1.1/drivers/isdn/hardware/eicon/capifunc.c linux-3.1.1/drivers/isdn/hardware/eicon/capifunc.c
29126 +--- linux-3.1.1/drivers/isdn/hardware/eicon/capifunc.c 2011-11-11 15:19:27.000000000 -0500
29127 ++++ linux-3.1.1/drivers/isdn/hardware/eicon/capifunc.c 2011-11-16 18:40:10.000000000 -0500
29128 +@@ -1055,6 +1055,8 @@ static int divacapi_connect_didd(void)
29129 + IDI_SYNC_REQ req;
29130 + DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
29131 +
29132 ++ pax_track_stack();
29133 ++
29134 + DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
29135 +
29136 + for (x = 0; x < MAX_DESCRIPTORS; x++) {
29137 +diff -urNp linux-3.1.1/drivers/isdn/hardware/eicon/diddfunc.c linux-3.1.1/drivers/isdn/hardware/eicon/diddfunc.c
29138 +--- linux-3.1.1/drivers/isdn/hardware/eicon/diddfunc.c 2011-11-11 15:19:27.000000000 -0500
29139 ++++ linux-3.1.1/drivers/isdn/hardware/eicon/diddfunc.c 2011-11-16 18:40:10.000000000 -0500
29140 +@@ -54,6 +54,8 @@ static int DIVA_INIT_FUNCTION connect_di
29141 + IDI_SYNC_REQ req;
29142 + DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
29143 +
29144 ++ pax_track_stack();
29145 ++
29146 + DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
29147 +
29148 + for (x = 0; x < MAX_DESCRIPTORS; x++) {
29149 +diff -urNp linux-3.1.1/drivers/isdn/hardware/eicon/divasfunc.c linux-3.1.1/drivers/isdn/hardware/eicon/divasfunc.c
29150 +--- linux-3.1.1/drivers/isdn/hardware/eicon/divasfunc.c 2011-11-11 15:19:27.000000000 -0500
29151 ++++ linux-3.1.1/drivers/isdn/hardware/eicon/divasfunc.c 2011-11-16 18:40:10.000000000 -0500
29152 +@@ -160,6 +160,8 @@ static int DIVA_INIT_FUNCTION connect_di
29153 + IDI_SYNC_REQ req;
29154 + DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
29155 +
29156 ++ pax_track_stack();
29157 ++
29158 + DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
29159 +
29160 + for (x = 0; x < MAX_DESCRIPTORS; x++) {
29161 +diff -urNp linux-3.1.1/drivers/isdn/hardware/eicon/divasync.h linux-3.1.1/drivers/isdn/hardware/eicon/divasync.h
29162 +--- linux-3.1.1/drivers/isdn/hardware/eicon/divasync.h 2011-11-11 15:19:27.000000000 -0500
29163 ++++ linux-3.1.1/drivers/isdn/hardware/eicon/divasync.h 2011-11-16 18:39:07.000000000 -0500
29164 +@@ -146,7 +146,7 @@ typedef struct _diva_didd_add_adapter {
29165 + } diva_didd_add_adapter_t;
29166 + typedef struct _diva_didd_remove_adapter {
29167 + IDI_CALL p_request;
29168 +-} diva_didd_remove_adapter_t;
29169 ++} __no_const diva_didd_remove_adapter_t;
29170 + typedef struct _diva_didd_read_adapter_array {
29171 + void * buffer;
29172 + dword length;
29173 +diff -urNp linux-3.1.1/drivers/isdn/hardware/eicon/idifunc.c linux-3.1.1/drivers/isdn/hardware/eicon/idifunc.c
29174 +--- linux-3.1.1/drivers/isdn/hardware/eicon/idifunc.c 2011-11-11 15:19:27.000000000 -0500
29175 ++++ linux-3.1.1/drivers/isdn/hardware/eicon/idifunc.c 2011-11-16 18:40:10.000000000 -0500
29176 +@@ -188,6 +188,8 @@ static int DIVA_INIT_FUNCTION connect_di
29177 + IDI_SYNC_REQ req;
29178 + DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
29179 +
29180 ++ pax_track_stack();
29181 ++
29182 + DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
29183 +
29184 + for (x = 0; x < MAX_DESCRIPTORS; x++) {
29185 +diff -urNp linux-3.1.1/drivers/isdn/hardware/eicon/message.c linux-3.1.1/drivers/isdn/hardware/eicon/message.c
29186 +--- linux-3.1.1/drivers/isdn/hardware/eicon/message.c 2011-11-11 15:19:27.000000000 -0500
29187 ++++ linux-3.1.1/drivers/isdn/hardware/eicon/message.c 2011-11-16 18:40:10.000000000 -0500
29188 +@@ -4886,6 +4886,8 @@ static void sig_ind(PLCI *plci)
29189 + dword d;
29190 + word w;
29191 +
29192 ++ pax_track_stack();
29193 ++
29194 + a = plci->adapter;
29195 + Id = ((word)plci->Id<<8)|a->Id;
29196 + PUT_WORD(&SS_Ind[4],0x0000);
29197 +@@ -7480,6 +7482,8 @@ static word add_b1(PLCI *plci, API_PARSE
29198 + word j, n, w;
29199 + dword d;
29200 +
29201 ++ pax_track_stack();
29202 ++
29203 +
29204 + for(i=0;i<8;i++) bp_parms[i].length = 0;
29205 + for(i=0;i<2;i++) global_config[i].length = 0;
29206 +@@ -7954,6 +7958,8 @@ static word add_b23(PLCI *plci, API_PARS
29207 + const byte llc3[] = {4,3,2,2,6,6,0};
29208 + const byte header[] = {0,2,3,3,0,0,0};
29209 +
29210 ++ pax_track_stack();
29211 ++
29212 + for(i=0;i<8;i++) bp_parms[i].length = 0;
29213 + for(i=0;i<6;i++) b2_config_parms[i].length = 0;
29214 + for(i=0;i<5;i++) b3_config_parms[i].length = 0;
29215 +@@ -14741,6 +14747,8 @@ static void group_optimization(DIVA_CAPI
29216 + word appl_number_group_type[MAX_APPL];
29217 + PLCI *auxplci;
29218 +
29219 ++ pax_track_stack();
29220 ++
29221 + set_group_ind_mask (plci); /* all APPLs within this inc. call are allowed to dial in */
29222 +
29223 + if(!a->group_optimization_enabled)
29224 +diff -urNp linux-3.1.1/drivers/isdn/hardware/eicon/mntfunc.c linux-3.1.1/drivers/isdn/hardware/eicon/mntfunc.c
29225 +--- linux-3.1.1/drivers/isdn/hardware/eicon/mntfunc.c 2011-11-11 15:19:27.000000000 -0500
29226 ++++ linux-3.1.1/drivers/isdn/hardware/eicon/mntfunc.c 2011-11-16 18:40:10.000000000 -0500
29227 +@@ -79,6 +79,8 @@ static int DIVA_INIT_FUNCTION connect_di
29228 + IDI_SYNC_REQ req;
29229 + DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
29230 +
29231 ++ pax_track_stack();
29232 ++
29233 + DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
29234 +
29235 + for (x = 0; x < MAX_DESCRIPTORS; x++) {
29236 +diff -urNp linux-3.1.1/drivers/isdn/hardware/eicon/xdi_adapter.h linux-3.1.1/drivers/isdn/hardware/eicon/xdi_adapter.h
29237 +--- linux-3.1.1/drivers/isdn/hardware/eicon/xdi_adapter.h 2011-11-11 15:19:27.000000000 -0500
29238 ++++ linux-3.1.1/drivers/isdn/hardware/eicon/xdi_adapter.h 2011-11-16 18:39:07.000000000 -0500
29239 +@@ -44,7 +44,7 @@ typedef struct _xdi_mbox_t {
29240 + typedef struct _diva_os_idi_adapter_interface {
29241 + diva_init_card_proc_t cleanup_adapter_proc;
29242 + diva_cmd_card_proc_t cmd_proc;
29243 +-} diva_os_idi_adapter_interface_t;
29244 ++} __no_const diva_os_idi_adapter_interface_t;
29245 +
29246 + typedef struct _diva_os_xdi_adapter {
29247 + struct list_head link;
29248 +diff -urNp linux-3.1.1/drivers/isdn/i4l/isdn_common.c linux-3.1.1/drivers/isdn/i4l/isdn_common.c
29249 +--- linux-3.1.1/drivers/isdn/i4l/isdn_common.c 2011-11-11 15:19:27.000000000 -0500
29250 ++++ linux-3.1.1/drivers/isdn/i4l/isdn_common.c 2011-11-16 18:40:10.000000000 -0500
29251 +@@ -1286,6 +1286,8 @@ isdn_ioctl(struct file *file, uint cmd,
29252 + } iocpar;
29253 + void __user *argp = (void __user *)arg;
29254 +
29255 ++ pax_track_stack();
29256 ++
29257 + #define name iocpar.name
29258 + #define bname iocpar.bname
29259 + #define iocts iocpar.iocts
29260 +diff -urNp linux-3.1.1/drivers/isdn/icn/icn.c linux-3.1.1/drivers/isdn/icn/icn.c
29261 +--- linux-3.1.1/drivers/isdn/icn/icn.c 2011-11-11 15:19:27.000000000 -0500
29262 ++++ linux-3.1.1/drivers/isdn/icn/icn.c 2011-11-16 18:39:07.000000000 -0500
29263 +@@ -1045,7 +1045,7 @@ icn_writecmd(const u_char * buf, int len
29264 + if (count > len)
29265 + count = len;
29266 + if (user) {
29267 +- if (copy_from_user(msg, buf, count))
29268 ++ if (count > sizeof msg || copy_from_user(msg, buf, count))
29269 + return -EFAULT;
29270 + } else
29271 + memcpy(msg, buf, count);
29272 +diff -urNp linux-3.1.1/drivers/lguest/core.c linux-3.1.1/drivers/lguest/core.c
29273 +--- linux-3.1.1/drivers/lguest/core.c 2011-11-11 15:19:27.000000000 -0500
29274 ++++ linux-3.1.1/drivers/lguest/core.c 2011-11-16 18:39:07.000000000 -0500
29275 +@@ -92,9 +92,17 @@ static __init int map_switcher(void)
29276 + * it's worked so far. The end address needs +1 because __get_vm_area
29277 + * allocates an extra guard page, so we need space for that.
29278 + */
29279 ++
29280 ++#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
29281 ++ switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
29282 ++ VM_ALLOC | VM_KERNEXEC, SWITCHER_ADDR, SWITCHER_ADDR
29283 ++ + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
29284 ++#else
29285 + switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
29286 + VM_ALLOC, SWITCHER_ADDR, SWITCHER_ADDR
29287 + + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
29288 ++#endif
29289 ++
29290 + if (!switcher_vma) {
29291 + err = -ENOMEM;
29292 + printk("lguest: could not map switcher pages high\n");
29293 +@@ -119,7 +127,7 @@ static __init int map_switcher(void)
29294 + * Now the Switcher is mapped at the right address, we can't fail!
29295 + * Copy in the compiled-in Switcher code (from x86/switcher_32.S).
29296 + */
29297 +- memcpy(switcher_vma->addr, start_switcher_text,
29298 ++ memcpy(switcher_vma->addr, ktla_ktva(start_switcher_text),
29299 + end_switcher_text - start_switcher_text);
29300 +
29301 + printk(KERN_INFO "lguest: mapped switcher at %p\n",
29302 +diff -urNp linux-3.1.1/drivers/lguest/x86/core.c linux-3.1.1/drivers/lguest/x86/core.c
29303 +--- linux-3.1.1/drivers/lguest/x86/core.c 2011-11-11 15:19:27.000000000 -0500
29304 ++++ linux-3.1.1/drivers/lguest/x86/core.c 2011-11-16 18:39:07.000000000 -0500
29305 +@@ -59,7 +59,7 @@ static struct {
29306 + /* Offset from where switcher.S was compiled to where we've copied it */
29307 + static unsigned long switcher_offset(void)
29308 + {
29309 +- return SWITCHER_ADDR - (unsigned long)start_switcher_text;
29310 ++ return SWITCHER_ADDR - (unsigned long)ktla_ktva(start_switcher_text);
29311 + }
29312 +
29313 + /* This cpu's struct lguest_pages. */
29314 +@@ -100,7 +100,13 @@ static void copy_in_guest_info(struct lg
29315 + * These copies are pretty cheap, so we do them unconditionally: */
29316 + /* Save the current Host top-level page directory.
29317 + */
29318 ++
29319 ++#ifdef CONFIG_PAX_PER_CPU_PGD
29320 ++ pages->state.host_cr3 = read_cr3();
29321 ++#else
29322 + pages->state.host_cr3 = __pa(current->mm->pgd);
29323 ++#endif
29324 ++
29325 + /*
29326 + * Set up the Guest's page tables to see this CPU's pages (and no
29327 + * other CPU's pages).
29328 +@@ -472,7 +478,7 @@ void __init lguest_arch_host_init(void)
29329 + * compiled-in switcher code and the high-mapped copy we just made.
29330 + */
29331 + for (i = 0; i < IDT_ENTRIES; i++)
29332 +- default_idt_entries[i] += switcher_offset();
29333 ++ default_idt_entries[i] = ktla_ktva(default_idt_entries[i]) + switcher_offset();
29334 +
29335 + /*
29336 + * Set up the Switcher's per-cpu areas.
29337 +@@ -555,7 +561,7 @@ void __init lguest_arch_host_init(void)
29338 + * it will be undisturbed when we switch. To change %cs and jump we
29339 + * need this structure to feed to Intel's "lcall" instruction.
29340 + */
29341 +- lguest_entry.offset = (long)switch_to_guest + switcher_offset();
29342 ++ lguest_entry.offset = (long)ktla_ktva(switch_to_guest) + switcher_offset();
29343 + lguest_entry.segment = LGUEST_CS;
29344 +
29345 + /*
29346 +diff -urNp linux-3.1.1/drivers/lguest/x86/switcher_32.S linux-3.1.1/drivers/lguest/x86/switcher_32.S
29347 +--- linux-3.1.1/drivers/lguest/x86/switcher_32.S 2011-11-11 15:19:27.000000000 -0500
29348 ++++ linux-3.1.1/drivers/lguest/x86/switcher_32.S 2011-11-16 18:39:07.000000000 -0500
29349 +@@ -87,6 +87,7 @@
29350 + #include <asm/page.h>
29351 + #include <asm/segment.h>
29352 + #include <asm/lguest.h>
29353 ++#include <asm/processor-flags.h>
29354 +
29355 + // We mark the start of the code to copy
29356 + // It's placed in .text tho it's never run here
29357 +@@ -149,6 +150,13 @@ ENTRY(switch_to_guest)
29358 + // Changes type when we load it: damn Intel!
29359 + // For after we switch over our page tables
29360 + // That entry will be read-only: we'd crash.
29361 ++
29362 ++#ifdef CONFIG_PAX_KERNEXEC
29363 ++ mov %cr0, %edx
29364 ++ xor $X86_CR0_WP, %edx
29365 ++ mov %edx, %cr0
29366 ++#endif
29367 ++
29368 + movl $(GDT_ENTRY_TSS*8), %edx
29369 + ltr %dx
29370 +
29371 +@@ -157,9 +165,15 @@ ENTRY(switch_to_guest)
29372 + // Let's clear it again for our return.
29373 + // The GDT descriptor of the Host
29374 + // Points to the table after two "size" bytes
29375 +- movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %edx
29376 ++ movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %eax
29377 + // Clear "used" from type field (byte 5, bit 2)
29378 +- andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%edx)
29379 ++ andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%eax)
29380 ++
29381 ++#ifdef CONFIG_PAX_KERNEXEC
29382 ++ mov %cr0, %eax
29383 ++ xor $X86_CR0_WP, %eax
29384 ++ mov %eax, %cr0
29385 ++#endif
29386 +
29387 + // Once our page table's switched, the Guest is live!
29388 + // The Host fades as we run this final step.
29389 +@@ -295,13 +309,12 @@ deliver_to_host:
29390 + // I consulted gcc, and it gave
29391 + // These instructions, which I gladly credit:
29392 + leal (%edx,%ebx,8), %eax
29393 +- movzwl (%eax),%edx
29394 +- movl 4(%eax), %eax
29395 +- xorw %ax, %ax
29396 +- orl %eax, %edx
29397 ++ movl 4(%eax), %edx
29398 ++ movw (%eax), %dx
29399 + // Now the address of the handler's in %edx
29400 + // We call it now: its "iret" drops us home.
29401 +- jmp *%edx
29402 ++ ljmp $__KERNEL_CS, $1f
29403 ++1: jmp *%edx
29404 +
29405 + // Every interrupt can come to us here
29406 + // But we must truly tell each apart.
29407 +diff -urNp linux-3.1.1/drivers/macintosh/macio_asic.c linux-3.1.1/drivers/macintosh/macio_asic.c
29408 +--- linux-3.1.1/drivers/macintosh/macio_asic.c 2011-11-11 15:19:27.000000000 -0500
29409 ++++ linux-3.1.1/drivers/macintosh/macio_asic.c 2011-11-16 18:39:07.000000000 -0500
29410 +@@ -748,7 +748,7 @@ static void __devexit macio_pci_remove(s
29411 + * MacIO is matched against any Apple ID, it's probe() function
29412 + * will then decide wether it applies or not
29413 + */
29414 +-static const struct pci_device_id __devinitdata pci_ids [] = { {
29415 ++static const struct pci_device_id __devinitconst pci_ids [] = { {
29416 + .vendor = PCI_VENDOR_ID_APPLE,
29417 + .device = PCI_ANY_ID,
29418 + .subvendor = PCI_ANY_ID,
29419 +diff -urNp linux-3.1.1/drivers/md/dm.c linux-3.1.1/drivers/md/dm.c
29420 +--- linux-3.1.1/drivers/md/dm.c 2011-11-11 15:19:27.000000000 -0500
29421 ++++ linux-3.1.1/drivers/md/dm.c 2011-11-16 18:39:07.000000000 -0500
29422 +@@ -165,9 +165,9 @@ struct mapped_device {
29423 + /*
29424 + * Event handling.
29425 + */
29426 +- atomic_t event_nr;
29427 ++ atomic_unchecked_t event_nr;
29428 + wait_queue_head_t eventq;
29429 +- atomic_t uevent_seq;
29430 ++ atomic_unchecked_t uevent_seq;
29431 + struct list_head uevent_list;
29432 + spinlock_t uevent_lock; /* Protect access to uevent_list */
29433 +
29434 +@@ -1843,8 +1843,8 @@ static struct mapped_device *alloc_dev(i
29435 + rwlock_init(&md->map_lock);
29436 + atomic_set(&md->holders, 1);
29437 + atomic_set(&md->open_count, 0);
29438 +- atomic_set(&md->event_nr, 0);
29439 +- atomic_set(&md->uevent_seq, 0);
29440 ++ atomic_set_unchecked(&md->event_nr, 0);
29441 ++ atomic_set_unchecked(&md->uevent_seq, 0);
29442 + INIT_LIST_HEAD(&md->uevent_list);
29443 + spin_lock_init(&md->uevent_lock);
29444 +
29445 +@@ -1978,7 +1978,7 @@ static void event_callback(void *context
29446 +
29447 + dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
29448 +
29449 +- atomic_inc(&md->event_nr);
29450 ++ atomic_inc_unchecked(&md->event_nr);
29451 + wake_up(&md->eventq);
29452 + }
29453 +
29454 +@@ -2614,18 +2614,18 @@ int dm_kobject_uevent(struct mapped_devi
29455 +
29456 + uint32_t dm_next_uevent_seq(struct mapped_device *md)
29457 + {
29458 +- return atomic_add_return(1, &md->uevent_seq);
29459 ++ return atomic_add_return_unchecked(1, &md->uevent_seq);
29460 + }
29461 +
29462 + uint32_t dm_get_event_nr(struct mapped_device *md)
29463 + {
29464 +- return atomic_read(&md->event_nr);
29465 ++ return atomic_read_unchecked(&md->event_nr);
29466 + }
29467 +
29468 + int dm_wait_event(struct mapped_device *md, int event_nr)
29469 + {
29470 + return wait_event_interruptible(md->eventq,
29471 +- (event_nr != atomic_read(&md->event_nr)));
29472 ++ (event_nr != atomic_read_unchecked(&md->event_nr)));
29473 + }
29474 +
29475 + void dm_uevent_add(struct mapped_device *md, struct list_head *elist)
29476 +diff -urNp linux-3.1.1/drivers/md/dm-ioctl.c linux-3.1.1/drivers/md/dm-ioctl.c
29477 +--- linux-3.1.1/drivers/md/dm-ioctl.c 2011-11-11 15:19:27.000000000 -0500
29478 ++++ linux-3.1.1/drivers/md/dm-ioctl.c 2011-11-16 18:39:07.000000000 -0500
29479 +@@ -1578,7 +1578,7 @@ static int validate_params(uint cmd, str
29480 + cmd == DM_LIST_VERSIONS_CMD)
29481 + return 0;
29482 +
29483 +- if ((cmd == DM_DEV_CREATE_CMD)) {
29484 ++ if (cmd == DM_DEV_CREATE_CMD) {
29485 + if (!*param->name) {
29486 + DMWARN("name not supplied when creating device");
29487 + return -EINVAL;
29488 +diff -urNp linux-3.1.1/drivers/md/dm-raid1.c linux-3.1.1/drivers/md/dm-raid1.c
29489 +--- linux-3.1.1/drivers/md/dm-raid1.c 2011-11-11 15:19:27.000000000 -0500
29490 ++++ linux-3.1.1/drivers/md/dm-raid1.c 2011-11-16 18:39:07.000000000 -0500
29491 +@@ -40,7 +40,7 @@ enum dm_raid1_error {
29492 +
29493 + struct mirror {
29494 + struct mirror_set *ms;
29495 +- atomic_t error_count;
29496 ++ atomic_unchecked_t error_count;
29497 + unsigned long error_type;
29498 + struct dm_dev *dev;
29499 + sector_t offset;
29500 +@@ -185,7 +185,7 @@ static struct mirror *get_valid_mirror(s
29501 + struct mirror *m;
29502 +
29503 + for (m = ms->mirror; m < ms->mirror + ms->nr_mirrors; m++)
29504 +- if (!atomic_read(&m->error_count))
29505 ++ if (!atomic_read_unchecked(&m->error_count))
29506 + return m;
29507 +
29508 + return NULL;
29509 +@@ -217,7 +217,7 @@ static void fail_mirror(struct mirror *m
29510 + * simple way to tell if a device has encountered
29511 + * errors.
29512 + */
29513 +- atomic_inc(&m->error_count);
29514 ++ atomic_inc_unchecked(&m->error_count);
29515 +
29516 + if (test_and_set_bit(error_type, &m->error_type))
29517 + return;
29518 +@@ -408,7 +408,7 @@ static struct mirror *choose_mirror(stru
29519 + struct mirror *m = get_default_mirror(ms);
29520 +
29521 + do {
29522 +- if (likely(!atomic_read(&m->error_count)))
29523 ++ if (likely(!atomic_read_unchecked(&m->error_count)))
29524 + return m;
29525 +
29526 + if (m-- == ms->mirror)
29527 +@@ -422,7 +422,7 @@ static int default_ok(struct mirror *m)
29528 + {
29529 + struct mirror *default_mirror = get_default_mirror(m->ms);
29530 +
29531 +- return !atomic_read(&default_mirror->error_count);
29532 ++ return !atomic_read_unchecked(&default_mirror->error_count);
29533 + }
29534 +
29535 + static int mirror_available(struct mirror_set *ms, struct bio *bio)
29536 +@@ -559,7 +559,7 @@ static void do_reads(struct mirror_set *
29537 + */
29538 + if (likely(region_in_sync(ms, region, 1)))
29539 + m = choose_mirror(ms, bio->bi_sector);
29540 +- else if (m && atomic_read(&m->error_count))
29541 ++ else if (m && atomic_read_unchecked(&m->error_count))
29542 + m = NULL;
29543 +
29544 + if (likely(m))
29545 +@@ -937,7 +937,7 @@ static int get_mirror(struct mirror_set
29546 + }
29547 +
29548 + ms->mirror[mirror].ms = ms;
29549 +- atomic_set(&(ms->mirror[mirror].error_count), 0);
29550 ++ atomic_set_unchecked(&(ms->mirror[mirror].error_count), 0);
29551 + ms->mirror[mirror].error_type = 0;
29552 + ms->mirror[mirror].offset = offset;
29553 +
29554 +@@ -1347,7 +1347,7 @@ static void mirror_resume(struct dm_targ
29555 + */
29556 + static char device_status_char(struct mirror *m)
29557 + {
29558 +- if (!atomic_read(&(m->error_count)))
29559 ++ if (!atomic_read_unchecked(&(m->error_count)))
29560 + return 'A';
29561 +
29562 + return (test_bit(DM_RAID1_FLUSH_ERROR, &(m->error_type))) ? 'F' :
29563 +diff -urNp linux-3.1.1/drivers/md/dm-stripe.c linux-3.1.1/drivers/md/dm-stripe.c
29564 +--- linux-3.1.1/drivers/md/dm-stripe.c 2011-11-11 15:19:27.000000000 -0500
29565 ++++ linux-3.1.1/drivers/md/dm-stripe.c 2011-11-16 18:39:07.000000000 -0500
29566 +@@ -20,7 +20,7 @@ struct stripe {
29567 + struct dm_dev *dev;
29568 + sector_t physical_start;
29569 +
29570 +- atomic_t error_count;
29571 ++ atomic_unchecked_t error_count;
29572 + };
29573 +
29574 + struct stripe_c {
29575 +@@ -192,7 +192,7 @@ static int stripe_ctr(struct dm_target *
29576 + kfree(sc);
29577 + return r;
29578 + }
29579 +- atomic_set(&(sc->stripe[i].error_count), 0);
29580 ++ atomic_set_unchecked(&(sc->stripe[i].error_count), 0);
29581 + }
29582 +
29583 + ti->private = sc;
29584 +@@ -314,7 +314,7 @@ static int stripe_status(struct dm_targe
29585 + DMEMIT("%d ", sc->stripes);
29586 + for (i = 0; i < sc->stripes; i++) {
29587 + DMEMIT("%s ", sc->stripe[i].dev->name);
29588 +- buffer[i] = atomic_read(&(sc->stripe[i].error_count)) ?
29589 ++ buffer[i] = atomic_read_unchecked(&(sc->stripe[i].error_count)) ?
29590 + 'D' : 'A';
29591 + }
29592 + buffer[i] = '\0';
29593 +@@ -361,8 +361,8 @@ static int stripe_end_io(struct dm_targe
29594 + */
29595 + for (i = 0; i < sc->stripes; i++)
29596 + if (!strcmp(sc->stripe[i].dev->name, major_minor)) {
29597 +- atomic_inc(&(sc->stripe[i].error_count));
29598 +- if (atomic_read(&(sc->stripe[i].error_count)) <
29599 ++ atomic_inc_unchecked(&(sc->stripe[i].error_count));
29600 ++ if (atomic_read_unchecked(&(sc->stripe[i].error_count)) <
29601 + DM_IO_ERROR_THRESHOLD)
29602 + schedule_work(&sc->trigger_event);
29603 + }
29604 +diff -urNp linux-3.1.1/drivers/md/dm-table.c linux-3.1.1/drivers/md/dm-table.c
29605 +--- linux-3.1.1/drivers/md/dm-table.c 2011-11-11 15:19:27.000000000 -0500
29606 ++++ linux-3.1.1/drivers/md/dm-table.c 2011-11-16 18:39:07.000000000 -0500
29607 +@@ -389,7 +389,7 @@ static int device_area_is_invalid(struct
29608 + if (!dev_size)
29609 + return 0;
29610 +
29611 +- if ((start >= dev_size) || (start + len > dev_size)) {
29612 ++ if ((start >= dev_size) || (len > dev_size - start)) {
29613 + DMWARN("%s: %s too small for target: "
29614 + "start=%llu, len=%llu, dev_size=%llu",
29615 + dm_device_name(ti->table->md), bdevname(bdev, b),
29616 +diff -urNp linux-3.1.1/drivers/md/md.c linux-3.1.1/drivers/md/md.c
29617 +--- linux-3.1.1/drivers/md/md.c 2011-11-11 15:19:27.000000000 -0500
29618 ++++ linux-3.1.1/drivers/md/md.c 2011-11-16 18:39:07.000000000 -0500
29619 +@@ -280,10 +280,10 @@ EXPORT_SYMBOL_GPL(md_trim_bio);
29620 + * start build, activate spare
29621 + */
29622 + static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters);
29623 +-static atomic_t md_event_count;
29624 ++static atomic_unchecked_t md_event_count;
29625 + void md_new_event(mddev_t *mddev)
29626 + {
29627 +- atomic_inc(&md_event_count);
29628 ++ atomic_inc_unchecked(&md_event_count);
29629 + wake_up(&md_event_waiters);
29630 + }
29631 + EXPORT_SYMBOL_GPL(md_new_event);
29632 +@@ -293,7 +293,7 @@ EXPORT_SYMBOL_GPL(md_new_event);
29633 + */
29634 + static void md_new_event_inintr(mddev_t *mddev)
29635 + {
29636 +- atomic_inc(&md_event_count);
29637 ++ atomic_inc_unchecked(&md_event_count);
29638 + wake_up(&md_event_waiters);
29639 + }
29640 +
29641 +@@ -1531,7 +1531,7 @@ static int super_1_load(mdk_rdev_t *rdev
29642 +
29643 + rdev->preferred_minor = 0xffff;
29644 + rdev->data_offset = le64_to_cpu(sb->data_offset);
29645 +- atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
29646 ++ atomic_set_unchecked(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
29647 +
29648 + rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
29649 + bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
29650 +@@ -1748,7 +1748,7 @@ static void super_1_sync(mddev_t *mddev,
29651 + else
29652 + sb->resync_offset = cpu_to_le64(0);
29653 +
29654 +- sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors));
29655 ++ sb->cnt_corrected_read = cpu_to_le32(atomic_read_unchecked(&rdev->corrected_errors));
29656 +
29657 + sb->raid_disks = cpu_to_le32(mddev->raid_disks);
29658 + sb->size = cpu_to_le64(mddev->dev_sectors);
29659 +@@ -2643,7 +2643,7 @@ __ATTR(state, S_IRUGO|S_IWUSR, state_sho
29660 + static ssize_t
29661 + errors_show(mdk_rdev_t *rdev, char *page)
29662 + {
29663 +- return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors));
29664 ++ return sprintf(page, "%d\n", atomic_read_unchecked(&rdev->corrected_errors));
29665 + }
29666 +
29667 + static ssize_t
29668 +@@ -2652,7 +2652,7 @@ errors_store(mdk_rdev_t *rdev, const cha
29669 + char *e;
29670 + unsigned long n = simple_strtoul(buf, &e, 10);
29671 + if (*buf && (*e == 0 || *e == '\n')) {
29672 +- atomic_set(&rdev->corrected_errors, n);
29673 ++ atomic_set_unchecked(&rdev->corrected_errors, n);
29674 + return len;
29675 + }
29676 + return -EINVAL;
29677 +@@ -3042,8 +3042,8 @@ int md_rdev_init(mdk_rdev_t *rdev)
29678 + rdev->sb_loaded = 0;
29679 + rdev->bb_page = NULL;
29680 + atomic_set(&rdev->nr_pending, 0);
29681 +- atomic_set(&rdev->read_errors, 0);
29682 +- atomic_set(&rdev->corrected_errors, 0);
29683 ++ atomic_set_unchecked(&rdev->read_errors, 0);
29684 ++ atomic_set_unchecked(&rdev->corrected_errors, 0);
29685 +
29686 + INIT_LIST_HEAD(&rdev->same_set);
29687 + init_waitqueue_head(&rdev->blocked_wait);
29688 +@@ -6667,7 +6667,7 @@ static int md_seq_show(struct seq_file *
29689 +
29690 + spin_unlock(&pers_lock);
29691 + seq_printf(seq, "\n");
29692 +- seq->poll_event = atomic_read(&md_event_count);
29693 ++ seq->poll_event = atomic_read_unchecked(&md_event_count);
29694 + return 0;
29695 + }
29696 + if (v == (void*)2) {
29697 +@@ -6756,7 +6756,7 @@ static int md_seq_show(struct seq_file *
29698 + chunk_kb ? "KB" : "B");
29699 + if (bitmap->file) {
29700 + seq_printf(seq, ", file: ");
29701 +- seq_path(seq, &bitmap->file->f_path, " \t\n");
29702 ++ seq_path(seq, &bitmap->file->f_path, " \t\n\\");
29703 + }
29704 +
29705 + seq_printf(seq, "\n");
29706 +@@ -6787,7 +6787,7 @@ static int md_seq_open(struct inode *ino
29707 + return error;
29708 +
29709 + seq = file->private_data;
29710 +- seq->poll_event = atomic_read(&md_event_count);
29711 ++ seq->poll_event = atomic_read_unchecked(&md_event_count);
29712 + return error;
29713 + }
29714 +
29715 +@@ -6801,7 +6801,7 @@ static unsigned int mdstat_poll(struct f
29716 + /* always allow read */
29717 + mask = POLLIN | POLLRDNORM;
29718 +
29719 +- if (seq->poll_event != atomic_read(&md_event_count))
29720 ++ if (seq->poll_event != atomic_read_unchecked(&md_event_count))
29721 + mask |= POLLERR | POLLPRI;
29722 + return mask;
29723 + }
29724 +@@ -6845,7 +6845,7 @@ static int is_mddev_idle(mddev_t *mddev,
29725 + struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
29726 + curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
29727 + (int)part_stat_read(&disk->part0, sectors[1]) -
29728 +- atomic_read(&disk->sync_io);
29729 ++ atomic_read_unchecked(&disk->sync_io);
29730 + /* sync IO will cause sync_io to increase before the disk_stats
29731 + * as sync_io is counted when a request starts, and
29732 + * disk_stats is counted when it completes.
29733 +diff -urNp linux-3.1.1/drivers/md/md.h linux-3.1.1/drivers/md/md.h
29734 +--- linux-3.1.1/drivers/md/md.h 2011-11-11 15:19:27.000000000 -0500
29735 ++++ linux-3.1.1/drivers/md/md.h 2011-11-16 18:39:07.000000000 -0500
29736 +@@ -124,13 +124,13 @@ struct mdk_rdev_s
29737 + * only maintained for arrays that
29738 + * support hot removal
29739 + */
29740 +- atomic_t read_errors; /* number of consecutive read errors that
29741 ++ atomic_unchecked_t read_errors; /* number of consecutive read errors that
29742 + * we have tried to ignore.
29743 + */
29744 + struct timespec last_read_error; /* monotonic time since our
29745 + * last read error
29746 + */
29747 +- atomic_t corrected_errors; /* number of corrected read errors,
29748 ++ atomic_unchecked_t corrected_errors; /* number of corrected read errors,
29749 + * for reporting to userspace and storing
29750 + * in superblock.
29751 + */
29752 +@@ -415,7 +415,7 @@ static inline void rdev_dec_pending(mdk_
29753 +
29754 + static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sectors)
29755 + {
29756 +- atomic_add(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
29757 ++ atomic_add_unchecked(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
29758 + }
29759 +
29760 + struct mdk_personality
29761 +diff -urNp linux-3.1.1/drivers/md/raid10.c linux-3.1.1/drivers/md/raid10.c
29762 +--- linux-3.1.1/drivers/md/raid10.c 2011-11-11 15:19:27.000000000 -0500
29763 ++++ linux-3.1.1/drivers/md/raid10.c 2011-11-16 18:39:07.000000000 -0500
29764 +@@ -1423,7 +1423,7 @@ static void end_sync_read(struct bio *bi
29765 + /* The write handler will notice the lack of
29766 + * R10BIO_Uptodate and record any errors etc
29767 + */
29768 +- atomic_add(r10_bio->sectors,
29769 ++ atomic_add_unchecked(r10_bio->sectors,
29770 + &conf->mirrors[d].rdev->corrected_errors);
29771 +
29772 + /* for reconstruct, we always reschedule after a read.
29773 +@@ -1723,7 +1723,7 @@ static void check_decay_read_errors(mdde
29774 + {
29775 + struct timespec cur_time_mon;
29776 + unsigned long hours_since_last;
29777 +- unsigned int read_errors = atomic_read(&rdev->read_errors);
29778 ++ unsigned int read_errors = atomic_read_unchecked(&rdev->read_errors);
29779 +
29780 + ktime_get_ts(&cur_time_mon);
29781 +
29782 +@@ -1745,9 +1745,9 @@ static void check_decay_read_errors(mdde
29783 + * overflowing the shift of read_errors by hours_since_last.
29784 + */
29785 + if (hours_since_last >= 8 * sizeof(read_errors))
29786 +- atomic_set(&rdev->read_errors, 0);
29787 ++ atomic_set_unchecked(&rdev->read_errors, 0);
29788 + else
29789 +- atomic_set(&rdev->read_errors, read_errors >> hours_since_last);
29790 ++ atomic_set_unchecked(&rdev->read_errors, read_errors >> hours_since_last);
29791 + }
29792 +
29793 + static int r10_sync_page_io(mdk_rdev_t *rdev, sector_t sector,
29794 +@@ -1797,8 +1797,8 @@ static void fix_read_error(conf_t *conf,
29795 + return;
29796 +
29797 + check_decay_read_errors(mddev, rdev);
29798 +- atomic_inc(&rdev->read_errors);
29799 +- if (atomic_read(&rdev->read_errors) > max_read_errors) {
29800 ++ atomic_inc_unchecked(&rdev->read_errors);
29801 ++ if (atomic_read_unchecked(&rdev->read_errors) > max_read_errors) {
29802 + char b[BDEVNAME_SIZE];
29803 + bdevname(rdev->bdev, b);
29804 +
29805 +@@ -1806,7 +1806,7 @@ static void fix_read_error(conf_t *conf,
29806 + "md/raid10:%s: %s: Raid device exceeded "
29807 + "read_error threshold [cur %d:max %d]\n",
29808 + mdname(mddev), b,
29809 +- atomic_read(&rdev->read_errors), max_read_errors);
29810 ++ atomic_read_unchecked(&rdev->read_errors), max_read_errors);
29811 + printk(KERN_NOTICE
29812 + "md/raid10:%s: %s: Failing raid device\n",
29813 + mdname(mddev), b);
29814 +@@ -1951,7 +1951,7 @@ static void fix_read_error(conf_t *conf,
29815 + (unsigned long long)(
29816 + sect + rdev->data_offset),
29817 + bdevname(rdev->bdev, b));
29818 +- atomic_add(s, &rdev->corrected_errors);
29819 ++ atomic_add_unchecked(s, &rdev->corrected_errors);
29820 + }
29821 +
29822 + rdev_dec_pending(rdev, mddev);
29823 +diff -urNp linux-3.1.1/drivers/md/raid1.c linux-3.1.1/drivers/md/raid1.c
29824 +--- linux-3.1.1/drivers/md/raid1.c 2011-11-11 15:19:27.000000000 -0500
29825 ++++ linux-3.1.1/drivers/md/raid1.c 2011-11-16 18:39:07.000000000 -0500
29826 +@@ -1541,7 +1541,7 @@ static int fix_sync_read_error(r1bio_t *
29827 + if (r1_sync_page_io(rdev, sect, s,
29828 + bio->bi_io_vec[idx].bv_page,
29829 + READ) != 0)
29830 +- atomic_add(s, &rdev->corrected_errors);
29831 ++ atomic_add_unchecked(s, &rdev->corrected_errors);
29832 + }
29833 + sectors -= s;
29834 + sect += s;
29835 +@@ -1754,7 +1754,7 @@ static void fix_read_error(conf_t *conf,
29836 + test_bit(In_sync, &rdev->flags)) {
29837 + if (r1_sync_page_io(rdev, sect, s,
29838 + conf->tmppage, READ)) {
29839 +- atomic_add(s, &rdev->corrected_errors);
29840 ++ atomic_add_unchecked(s, &rdev->corrected_errors);
29841 + printk(KERN_INFO
29842 + "md/raid1:%s: read error corrected "
29843 + "(%d sectors at %llu on %s)\n",
29844 +diff -urNp linux-3.1.1/drivers/md/raid5.c linux-3.1.1/drivers/md/raid5.c
29845 +--- linux-3.1.1/drivers/md/raid5.c 2011-11-11 15:19:27.000000000 -0500
29846 ++++ linux-3.1.1/drivers/md/raid5.c 2011-11-16 18:40:10.000000000 -0500
29847 +@@ -1616,19 +1616,19 @@ static void raid5_end_read_request(struc
29848 + (unsigned long long)(sh->sector
29849 + + rdev->data_offset),
29850 + bdevname(rdev->bdev, b));
29851 +- atomic_add(STRIPE_SECTORS, &rdev->corrected_errors);
29852 ++ atomic_add_unchecked(STRIPE_SECTORS, &rdev->corrected_errors);
29853 + clear_bit(R5_ReadError, &sh->dev[i].flags);
29854 + clear_bit(R5_ReWrite, &sh->dev[i].flags);
29855 + }
29856 +- if (atomic_read(&conf->disks[i].rdev->read_errors))
29857 +- atomic_set(&conf->disks[i].rdev->read_errors, 0);
29858 ++ if (atomic_read_unchecked(&conf->disks[i].rdev->read_errors))
29859 ++ atomic_set_unchecked(&conf->disks[i].rdev->read_errors, 0);
29860 + } else {
29861 + const char *bdn = bdevname(conf->disks[i].rdev->bdev, b);
29862 + int retry = 0;
29863 + rdev = conf->disks[i].rdev;
29864 +
29865 + clear_bit(R5_UPTODATE, &sh->dev[i].flags);
29866 +- atomic_inc(&rdev->read_errors);
29867 ++ atomic_inc_unchecked(&rdev->read_errors);
29868 + if (conf->mddev->degraded >= conf->max_degraded)
29869 + printk_ratelimited(
29870 + KERN_WARNING
29871 +@@ -1648,7 +1648,7 @@ static void raid5_end_read_request(struc
29872 + (unsigned long long)(sh->sector
29873 + + rdev->data_offset),
29874 + bdn);
29875 +- else if (atomic_read(&rdev->read_errors)
29876 ++ else if (atomic_read_unchecked(&rdev->read_errors)
29877 + > conf->max_nr_stripes)
29878 + printk(KERN_WARNING
29879 + "md/raid:%s: Too many read errors, failing device %s.\n",
29880 +@@ -1978,6 +1978,7 @@ static sector_t compute_blocknr(struct s
29881 + sector_t r_sector;
29882 + struct stripe_head sh2;
29883 +
29884 ++ pax_track_stack();
29885 +
29886 + chunk_offset = sector_div(new_sector, sectors_per_chunk);
29887 + stripe = new_sector;
29888 +diff -urNp linux-3.1.1/drivers/media/common/saa7146_hlp.c linux-3.1.1/drivers/media/common/saa7146_hlp.c
29889 +--- linux-3.1.1/drivers/media/common/saa7146_hlp.c 2011-11-11 15:19:27.000000000 -0500
29890 ++++ linux-3.1.1/drivers/media/common/saa7146_hlp.c 2011-11-16 18:40:10.000000000 -0500
29891 +@@ -353,6 +353,8 @@ static void calculate_clipping_registers
29892 +
29893 + int x[32], y[32], w[32], h[32];
29894 +
29895 ++ pax_track_stack();
29896 ++
29897 + /* clear out memory */
29898 + memset(&line_list[0], 0x00, sizeof(u32)*32);
29899 + memset(&pixel_list[0], 0x00, sizeof(u32)*32);
29900 +diff -urNp linux-3.1.1/drivers/media/dvb/ddbridge/ddbridge-core.c linux-3.1.1/drivers/media/dvb/ddbridge/ddbridge-core.c
29901 +--- linux-3.1.1/drivers/media/dvb/ddbridge/ddbridge-core.c 2011-11-11 15:19:27.000000000 -0500
29902 ++++ linux-3.1.1/drivers/media/dvb/ddbridge/ddbridge-core.c 2011-11-16 18:39:07.000000000 -0500
29903 +@@ -1675,7 +1675,7 @@ static struct ddb_info ddb_v6 = {
29904 + .subvendor = _subvend, .subdevice = _subdev, \
29905 + .driver_data = (unsigned long)&_driverdata }
29906 +
29907 +-static const struct pci_device_id ddb_id_tbl[] __devinitdata = {
29908 ++static const struct pci_device_id ddb_id_tbl[] __devinitconst = {
29909 + DDB_ID(DDVID, 0x0002, DDVID, 0x0001, ddb_octopus),
29910 + DDB_ID(DDVID, 0x0003, DDVID, 0x0001, ddb_octopus),
29911 + DDB_ID(DDVID, 0x0003, DDVID, 0x0002, ddb_octopus_le),
29912 +diff -urNp linux-3.1.1/drivers/media/dvb/dvb-core/dvb_ca_en50221.c linux-3.1.1/drivers/media/dvb/dvb-core/dvb_ca_en50221.c
29913 +--- linux-3.1.1/drivers/media/dvb/dvb-core/dvb_ca_en50221.c 2011-11-11 15:19:27.000000000 -0500
29914 ++++ linux-3.1.1/drivers/media/dvb/dvb-core/dvb_ca_en50221.c 2011-11-16 18:40:10.000000000 -0500
29915 +@@ -590,6 +590,8 @@ static int dvb_ca_en50221_read_data(stru
29916 + u8 buf[HOST_LINK_BUF_SIZE];
29917 + int i;
29918 +
29919 ++ pax_track_stack();
29920 ++
29921 + dprintk("%s\n", __func__);
29922 +
29923 + /* check if we have space for a link buf in the rx_buffer */
29924 +@@ -1285,6 +1287,8 @@ static ssize_t dvb_ca_en50221_io_write(s
29925 + unsigned long timeout;
29926 + int written;
29927 +
29928 ++ pax_track_stack();
29929 ++
29930 + dprintk("%s\n", __func__);
29931 +
29932 + /* Incoming packet has a 2 byte header. hdr[0] = slot_id, hdr[1] = connection_id */
29933 +diff -urNp linux-3.1.1/drivers/media/dvb/dvb-core/dvb_demux.h linux-3.1.1/drivers/media/dvb/dvb-core/dvb_demux.h
29934 +--- linux-3.1.1/drivers/media/dvb/dvb-core/dvb_demux.h 2011-11-11 15:19:27.000000000 -0500
29935 ++++ linux-3.1.1/drivers/media/dvb/dvb-core/dvb_demux.h 2011-11-17 18:34:32.000000000 -0500
29936 +@@ -73,7 +73,7 @@ struct dvb_demux_feed {
29937 + union {
29938 + dmx_ts_cb ts;
29939 + dmx_section_cb sec;
29940 +- } cb;
29941 ++ } __no_const cb;
29942 +
29943 + struct dvb_demux *demux;
29944 + void *priv;
29945 +diff -urNp linux-3.1.1/drivers/media/dvb/dvb-core/dvbdev.c linux-3.1.1/drivers/media/dvb/dvb-core/dvbdev.c
29946 +--- linux-3.1.1/drivers/media/dvb/dvb-core/dvbdev.c 2011-11-11 15:19:27.000000000 -0500
29947 ++++ linux-3.1.1/drivers/media/dvb/dvb-core/dvbdev.c 2011-11-16 18:39:07.000000000 -0500
29948 +@@ -192,7 +192,7 @@ int dvb_register_device(struct dvb_adapt
29949 + const struct dvb_device *template, void *priv, int type)
29950 + {
29951 + struct dvb_device *dvbdev;
29952 +- struct file_operations *dvbdevfops;
29953 ++ file_operations_no_const *dvbdevfops;
29954 + struct device *clsdev;
29955 + int minor;
29956 + int id;
29957 +diff -urNp linux-3.1.1/drivers/media/dvb/dvb-usb/cxusb.c linux-3.1.1/drivers/media/dvb/dvb-usb/cxusb.c
29958 +--- linux-3.1.1/drivers/media/dvb/dvb-usb/cxusb.c 2011-11-11 15:19:27.000000000 -0500
29959 ++++ linux-3.1.1/drivers/media/dvb/dvb-usb/cxusb.c 2011-11-16 18:39:07.000000000 -0500
29960 +@@ -1059,7 +1059,7 @@ static struct dib0070_config dib7070p_di
29961 + struct dib0700_adapter_state {
29962 + int (*set_param_save) (struct dvb_frontend *,
29963 + struct dvb_frontend_parameters *);
29964 +-};
29965 ++} __no_const;
29966 +
29967 + static int dib7070_set_param_override(struct dvb_frontend *fe,
29968 + struct dvb_frontend_parameters *fep)
29969 +diff -urNp linux-3.1.1/drivers/media/dvb/dvb-usb/dib0700_core.c linux-3.1.1/drivers/media/dvb/dvb-usb/dib0700_core.c
29970 +--- linux-3.1.1/drivers/media/dvb/dvb-usb/dib0700_core.c 2011-11-11 15:19:27.000000000 -0500
29971 ++++ linux-3.1.1/drivers/media/dvb/dvb-usb/dib0700_core.c 2011-11-16 18:40:10.000000000 -0500
29972 +@@ -478,6 +478,8 @@ int dib0700_download_firmware(struct usb
29973 + if (!buf)
29974 + return -ENOMEM;
29975 +
29976 ++ pax_track_stack();
29977 ++
29978 + while ((ret = dvb_usb_get_hexline(fw, &hx, &pos)) > 0) {
29979 + deb_fwdata("writing to address 0x%08x (buffer: 0x%02x %02x)\n",
29980 + hx.addr, hx.len, hx.chk);
29981 +diff -urNp linux-3.1.1/drivers/media/dvb/dvb-usb/dw2102.c linux-3.1.1/drivers/media/dvb/dvb-usb/dw2102.c
29982 +--- linux-3.1.1/drivers/media/dvb/dvb-usb/dw2102.c 2011-11-11 15:19:27.000000000 -0500
29983 ++++ linux-3.1.1/drivers/media/dvb/dvb-usb/dw2102.c 2011-11-16 18:39:07.000000000 -0500
29984 +@@ -95,7 +95,7 @@ struct su3000_state {
29985 +
29986 + struct s6x0_state {
29987 + int (*old_set_voltage)(struct dvb_frontend *f, fe_sec_voltage_t v);
29988 +-};
29989 ++} __no_const;
29990 +
29991 + /* debug */
29992 + static int dvb_usb_dw2102_debug;
29993 +diff -urNp linux-3.1.1/drivers/media/dvb/dvb-usb/lmedm04.c linux-3.1.1/drivers/media/dvb/dvb-usb/lmedm04.c
29994 +--- linux-3.1.1/drivers/media/dvb/dvb-usb/lmedm04.c 2011-11-11 15:19:27.000000000 -0500
29995 ++++ linux-3.1.1/drivers/media/dvb/dvb-usb/lmedm04.c 2011-11-16 18:40:10.000000000 -0500
29996 +@@ -742,6 +742,7 @@ static int lme2510_download_firmware(str
29997 + usb_control_msg(dev, usb_rcvctrlpipe(dev, 0),
29998 + 0x06, 0x80, 0x0200, 0x00, data, 0x0109, 1000);
29999 +
30000 ++ pax_track_stack();
30001 +
30002 + data[0] = 0x8a;
30003 + len_in = 1;
30004 +@@ -764,6 +765,8 @@ static void lme_coldreset(struct usb_dev
30005 + int ret = 0, len_in;
30006 + u8 data[512] = {0};
30007 +
30008 ++ pax_track_stack();
30009 ++
30010 + data[0] = 0x0a;
30011 + len_in = 1;
30012 + info("FRM Firmware Cold Reset");
30013 +diff -urNp linux-3.1.1/drivers/media/dvb/frontends/dib3000.h linux-3.1.1/drivers/media/dvb/frontends/dib3000.h
30014 +--- linux-3.1.1/drivers/media/dvb/frontends/dib3000.h 2011-11-11 15:19:27.000000000 -0500
30015 ++++ linux-3.1.1/drivers/media/dvb/frontends/dib3000.h 2011-11-17 18:38:05.000000000 -0500
30016 +@@ -39,7 +39,7 @@ struct dib_fe_xfer_ops
30017 + int (*fifo_ctrl)(struct dvb_frontend *fe, int onoff);
30018 + int (*pid_ctrl)(struct dvb_frontend *fe, int index, int pid, int onoff);
30019 + int (*tuner_pass_ctrl)(struct dvb_frontend *fe, int onoff, u8 pll_ctrl);
30020 +-};
30021 ++} __no_const;
30022 +
30023 + #if defined(CONFIG_DVB_DIB3000MB) || (defined(CONFIG_DVB_DIB3000MB_MODULE) && defined(MODULE))
30024 + extern struct dvb_frontend* dib3000mb_attach(const struct dib3000_config* config,
30025 +diff -urNp linux-3.1.1/drivers/media/dvb/frontends/mb86a16.c linux-3.1.1/drivers/media/dvb/frontends/mb86a16.c
30026 +--- linux-3.1.1/drivers/media/dvb/frontends/mb86a16.c 2011-11-11 15:19:27.000000000 -0500
30027 ++++ linux-3.1.1/drivers/media/dvb/frontends/mb86a16.c 2011-11-16 18:40:10.000000000 -0500
30028 +@@ -1060,6 +1060,8 @@ static int mb86a16_set_fe(struct mb86a16
30029 + int ret = -1;
30030 + int sync;
30031 +
30032 ++ pax_track_stack();
30033 ++
30034 + dprintk(verbose, MB86A16_INFO, 1, "freq=%d Mhz, symbrt=%d Ksps", state->frequency, state->srate);
30035 +
30036 + fcp = 3000;
30037 +diff -urNp linux-3.1.1/drivers/media/dvb/frontends/or51211.c linux-3.1.1/drivers/media/dvb/frontends/or51211.c
30038 +--- linux-3.1.1/drivers/media/dvb/frontends/or51211.c 2011-11-11 15:19:27.000000000 -0500
30039 ++++ linux-3.1.1/drivers/media/dvb/frontends/or51211.c 2011-11-16 18:40:10.000000000 -0500
30040 +@@ -113,6 +113,8 @@ static int or51211_load_firmware (struct
30041 + u8 tudata[585];
30042 + int i;
30043 +
30044 ++ pax_track_stack();
30045 ++
30046 + dprintk("Firmware is %zd bytes\n",fw->size);
30047 +
30048 + /* Get eprom data */
30049 +diff -urNp linux-3.1.1/drivers/media/dvb/ngene/ngene-cards.c linux-3.1.1/drivers/media/dvb/ngene/ngene-cards.c
30050 +--- linux-3.1.1/drivers/media/dvb/ngene/ngene-cards.c 2011-11-11 15:19:27.000000000 -0500
30051 ++++ linux-3.1.1/drivers/media/dvb/ngene/ngene-cards.c 2011-11-16 18:39:07.000000000 -0500
30052 +@@ -477,7 +477,7 @@ static struct ngene_info ngene_info_m780
30053 +
30054 + /****************************************************************************/
30055 +
30056 +-static const struct pci_device_id ngene_id_tbl[] __devinitdata = {
30057 ++static const struct pci_device_id ngene_id_tbl[] __devinitconst = {
30058 + NGENE_ID(0x18c3, 0xabc3, ngene_info_cineS2),
30059 + NGENE_ID(0x18c3, 0xabc4, ngene_info_cineS2),
30060 + NGENE_ID(0x18c3, 0xdb01, ngene_info_satixS2),
30061 +diff -urNp linux-3.1.1/drivers/media/radio/radio-cadet.c linux-3.1.1/drivers/media/radio/radio-cadet.c
30062 +--- linux-3.1.1/drivers/media/radio/radio-cadet.c 2011-11-11 15:19:27.000000000 -0500
30063 ++++ linux-3.1.1/drivers/media/radio/radio-cadet.c 2011-11-16 18:39:07.000000000 -0500
30064 +@@ -326,6 +326,8 @@ static ssize_t cadet_read(struct file *f
30065 + unsigned char readbuf[RDS_BUFFER];
30066 + int i = 0;
30067 +
30068 ++ if (count > RDS_BUFFER)
30069 ++ return -EFAULT;
30070 + mutex_lock(&dev->lock);
30071 + if (dev->rdsstat == 0) {
30072 + dev->rdsstat = 1;
30073 +diff -urNp linux-3.1.1/drivers/media/video/au0828/au0828.h linux-3.1.1/drivers/media/video/au0828/au0828.h
30074 +--- linux-3.1.1/drivers/media/video/au0828/au0828.h 2011-11-11 15:19:27.000000000 -0500
30075 ++++ linux-3.1.1/drivers/media/video/au0828/au0828.h 2011-11-16 18:39:07.000000000 -0500
30076 +@@ -191,7 +191,7 @@ struct au0828_dev {
30077 +
30078 + /* I2C */
30079 + struct i2c_adapter i2c_adap;
30080 +- struct i2c_algorithm i2c_algo;
30081 ++ i2c_algorithm_no_const i2c_algo;
30082 + struct i2c_client i2c_client;
30083 + u32 i2c_rc;
30084 +
30085 +diff -urNp linux-3.1.1/drivers/media/video/cx18/cx18-driver.c linux-3.1.1/drivers/media/video/cx18/cx18-driver.c
30086 +--- linux-3.1.1/drivers/media/video/cx18/cx18-driver.c 2011-11-11 15:19:27.000000000 -0500
30087 ++++ linux-3.1.1/drivers/media/video/cx18/cx18-driver.c 2011-11-16 18:40:10.000000000 -0500
30088 +@@ -327,6 +327,8 @@ void cx18_read_eeprom(struct cx18 *cx, s
30089 + struct i2c_client c;
30090 + u8 eedata[256];
30091 +
30092 ++ pax_track_stack();
30093 ++
30094 + memset(&c, 0, sizeof(c));
30095 + strlcpy(c.name, "cx18 tveeprom tmp", sizeof(c.name));
30096 + c.adapter = &cx->i2c_adap[0];
30097 +diff -urNp linux-3.1.1/drivers/media/video/cx23885/cx23885-input.c linux-3.1.1/drivers/media/video/cx23885/cx23885-input.c
30098 +--- linux-3.1.1/drivers/media/video/cx23885/cx23885-input.c 2011-11-11 15:19:27.000000000 -0500
30099 ++++ linux-3.1.1/drivers/media/video/cx23885/cx23885-input.c 2011-11-16 18:40:10.000000000 -0500
30100 +@@ -53,6 +53,8 @@ static void cx23885_input_process_measur
30101 + bool handle = false;
30102 + struct ir_raw_event ir_core_event[64];
30103 +
30104 ++ pax_track_stack();
30105 ++
30106 + do {
30107 + num = 0;
30108 + v4l2_subdev_call(dev->sd_ir, ir, rx_read, (u8 *) ir_core_event,
30109 +diff -urNp linux-3.1.1/drivers/media/video/cx88/cx88-alsa.c linux-3.1.1/drivers/media/video/cx88/cx88-alsa.c
30110 +--- linux-3.1.1/drivers/media/video/cx88/cx88-alsa.c 2011-11-11 15:19:27.000000000 -0500
30111 ++++ linux-3.1.1/drivers/media/video/cx88/cx88-alsa.c 2011-11-16 18:39:07.000000000 -0500
30112 +@@ -766,7 +766,7 @@ static struct snd_kcontrol_new snd_cx88_
30113 + * Only boards with eeprom and byte 1 at eeprom=1 have it
30114 + */
30115 +
30116 +-static const struct pci_device_id const cx88_audio_pci_tbl[] __devinitdata = {
30117 ++static const struct pci_device_id const cx88_audio_pci_tbl[] __devinitconst = {
30118 + {0x14f1,0x8801,PCI_ANY_ID,PCI_ANY_ID,0,0,0},
30119 + {0x14f1,0x8811,PCI_ANY_ID,PCI_ANY_ID,0,0,0},
30120 + {0, }
30121 +diff -urNp linux-3.1.1/drivers/media/video/pvrusb2/pvrusb2-eeprom.c linux-3.1.1/drivers/media/video/pvrusb2/pvrusb2-eeprom.c
30122 +--- linux-3.1.1/drivers/media/video/pvrusb2/pvrusb2-eeprom.c 2011-11-11 15:19:27.000000000 -0500
30123 ++++ linux-3.1.1/drivers/media/video/pvrusb2/pvrusb2-eeprom.c 2011-11-16 18:40:10.000000000 -0500
30124 +@@ -120,6 +120,8 @@ int pvr2_eeprom_analyze(struct pvr2_hdw
30125 + u8 *eeprom;
30126 + struct tveeprom tvdata;
30127 +
30128 ++ pax_track_stack();
30129 ++
30130 + memset(&tvdata,0,sizeof(tvdata));
30131 +
30132 + eeprom = pvr2_eeprom_fetch(hdw);
30133 +diff -urNp linux-3.1.1/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h linux-3.1.1/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h
30134 +--- linux-3.1.1/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h 2011-11-11 15:19:27.000000000 -0500
30135 ++++ linux-3.1.1/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h 2011-11-16 18:39:07.000000000 -0500
30136 +@@ -196,7 +196,7 @@ struct pvr2_hdw {
30137 +
30138 + /* I2C stuff */
30139 + struct i2c_adapter i2c_adap;
30140 +- struct i2c_algorithm i2c_algo;
30141 ++ i2c_algorithm_no_const i2c_algo;
30142 + pvr2_i2c_func i2c_func[PVR2_I2C_FUNC_CNT];
30143 + int i2c_cx25840_hack_state;
30144 + int i2c_linked;
30145 +diff -urNp linux-3.1.1/drivers/media/video/saa7134/saa6752hs.c linux-3.1.1/drivers/media/video/saa7134/saa6752hs.c
30146 +--- linux-3.1.1/drivers/media/video/saa7134/saa6752hs.c 2011-11-11 15:19:27.000000000 -0500
30147 ++++ linux-3.1.1/drivers/media/video/saa7134/saa6752hs.c 2011-11-16 18:40:10.000000000 -0500
30148 +@@ -682,6 +682,8 @@ static int saa6752hs_init(struct v4l2_su
30149 + unsigned char localPAT[256];
30150 + unsigned char localPMT[256];
30151 +
30152 ++ pax_track_stack();
30153 ++
30154 + /* Set video format - must be done first as it resets other settings */
30155 + set_reg8(client, 0x41, h->video_format);
30156 +
30157 +diff -urNp linux-3.1.1/drivers/media/video/saa7164/saa7164-cmd.c linux-3.1.1/drivers/media/video/saa7164/saa7164-cmd.c
30158 +--- linux-3.1.1/drivers/media/video/saa7164/saa7164-cmd.c 2011-11-11 15:19:27.000000000 -0500
30159 ++++ linux-3.1.1/drivers/media/video/saa7164/saa7164-cmd.c 2011-11-16 18:40:10.000000000 -0500
30160 +@@ -88,6 +88,8 @@ int saa7164_irq_dequeue(struct saa7164_d
30161 + u8 tmp[512];
30162 + dprintk(DBGLVL_CMD, "%s()\n", __func__);
30163 +
30164 ++ pax_track_stack();
30165 ++
30166 + /* While any outstand message on the bus exists... */
30167 + do {
30168 +
30169 +@@ -141,6 +143,8 @@ int saa7164_cmd_dequeue(struct saa7164_d
30170 + u8 tmp[512];
30171 + dprintk(DBGLVL_CMD, "%s()\n", __func__);
30172 +
30173 ++ pax_track_stack();
30174 ++
30175 + while (loop) {
30176 +
30177 + struct tmComResInfo tRsp = { 0, 0, 0, 0, 0, 0 };
30178 +diff -urNp linux-3.1.1/drivers/media/video/timblogiw.c linux-3.1.1/drivers/media/video/timblogiw.c
30179 +--- linux-3.1.1/drivers/media/video/timblogiw.c 2011-11-11 15:19:27.000000000 -0500
30180 ++++ linux-3.1.1/drivers/media/video/timblogiw.c 2011-11-17 18:36:32.000000000 -0500
30181 +@@ -744,7 +744,7 @@ static int timblogiw_mmap(struct file *f
30182 +
30183 + /* Platform device functions */
30184 +
30185 +-static __devinitconst struct v4l2_ioctl_ops timblogiw_ioctl_ops = {
30186 ++static __devinitconst v4l2_ioctl_ops_no_const timblogiw_ioctl_ops = {
30187 + .vidioc_querycap = timblogiw_querycap,
30188 + .vidioc_enum_fmt_vid_cap = timblogiw_enum_fmt,
30189 + .vidioc_g_fmt_vid_cap = timblogiw_g_fmt,
30190 +@@ -766,7 +766,7 @@ static __devinitconst struct v4l2_ioctl_
30191 + .vidioc_enum_framesizes = timblogiw_enum_framesizes,
30192 + };
30193 +
30194 +-static __devinitconst struct v4l2_file_operations timblogiw_fops = {
30195 ++static __devinitconst v4l2_file_operations_no_const timblogiw_fops = {
30196 + .owner = THIS_MODULE,
30197 + .open = timblogiw_open,
30198 + .release = timblogiw_close,
30199 +diff -urNp linux-3.1.1/drivers/media/video/usbvision/usbvision-core.c linux-3.1.1/drivers/media/video/usbvision/usbvision-core.c
30200 +--- linux-3.1.1/drivers/media/video/usbvision/usbvision-core.c 2011-11-11 15:19:27.000000000 -0500
30201 ++++ linux-3.1.1/drivers/media/video/usbvision/usbvision-core.c 2011-11-16 18:40:10.000000000 -0500
30202 +@@ -707,6 +707,8 @@ static enum parse_state usbvision_parse_
30203 + unsigned char rv, gv, bv;
30204 + static unsigned char *Y, *U, *V;
30205 +
30206 ++ pax_track_stack();
30207 ++
30208 + frame = usbvision->cur_frame;
30209 + image_size = frame->frmwidth * frame->frmheight;
30210 + if ((frame->v4l2_format.format == V4L2_PIX_FMT_YUV422P) ||
30211 +diff -urNp linux-3.1.1/drivers/media/video/videobuf-dma-sg.c linux-3.1.1/drivers/media/video/videobuf-dma-sg.c
30212 +--- linux-3.1.1/drivers/media/video/videobuf-dma-sg.c 2011-11-11 15:19:27.000000000 -0500
30213 ++++ linux-3.1.1/drivers/media/video/videobuf-dma-sg.c 2011-11-16 18:40:10.000000000 -0500
30214 +@@ -607,6 +607,8 @@ void *videobuf_sg_alloc(size_t size)
30215 + {
30216 + struct videobuf_queue q;
30217 +
30218 ++ pax_track_stack();
30219 ++
30220 + /* Required to make generic handler to call __videobuf_alloc */
30221 + q.int_ops = &sg_ops;
30222 +
30223 +diff -urNp linux-3.1.1/drivers/message/fusion/mptbase.c linux-3.1.1/drivers/message/fusion/mptbase.c
30224 +--- linux-3.1.1/drivers/message/fusion/mptbase.c 2011-11-11 15:19:27.000000000 -0500
30225 ++++ linux-3.1.1/drivers/message/fusion/mptbase.c 2011-11-16 18:40:10.000000000 -0500
30226 +@@ -6681,8 +6681,13 @@ static int mpt_iocinfo_proc_show(struct
30227 + seq_printf(m, " MaxChainDepth = 0x%02x frames\n", ioc->facts.MaxChainDepth);
30228 + seq_printf(m, " MinBlockSize = 0x%02x bytes\n", 4*ioc->facts.BlockSize);
30229 +
30230 ++#ifdef CONFIG_GRKERNSEC_HIDESYM
30231 ++ seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n", NULL, NULL);
30232 ++#else
30233 + seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n",
30234 + (void *)ioc->req_frames, (void *)(ulong)ioc->req_frames_dma);
30235 ++#endif
30236 ++
30237 + /*
30238 + * Rounding UP to nearest 4-kB boundary here...
30239 + */
30240 +diff -urNp linux-3.1.1/drivers/message/fusion/mptsas.c linux-3.1.1/drivers/message/fusion/mptsas.c
30241 +--- linux-3.1.1/drivers/message/fusion/mptsas.c 2011-11-11 15:19:27.000000000 -0500
30242 ++++ linux-3.1.1/drivers/message/fusion/mptsas.c 2011-11-16 18:39:07.000000000 -0500
30243 +@@ -439,6 +439,23 @@ mptsas_is_end_device(struct mptsas_devin
30244 + return 0;
30245 + }
30246 +
30247 ++static inline void
30248 ++mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
30249 ++{
30250 ++ if (phy_info->port_details) {
30251 ++ phy_info->port_details->rphy = rphy;
30252 ++ dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
30253 ++ ioc->name, rphy));
30254 ++ }
30255 ++
30256 ++ if (rphy) {
30257 ++ dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
30258 ++ &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
30259 ++ dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
30260 ++ ioc->name, rphy, rphy->dev.release));
30261 ++ }
30262 ++}
30263 ++
30264 + /* no mutex */
30265 + static void
30266 + mptsas_port_delete(MPT_ADAPTER *ioc, struct mptsas_portinfo_details * port_details)
30267 +@@ -477,23 +494,6 @@ mptsas_get_rphy(struct mptsas_phyinfo *p
30268 + return NULL;
30269 + }
30270 +
30271 +-static inline void
30272 +-mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
30273 +-{
30274 +- if (phy_info->port_details) {
30275 +- phy_info->port_details->rphy = rphy;
30276 +- dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
30277 +- ioc->name, rphy));
30278 +- }
30279 +-
30280 +- if (rphy) {
30281 +- dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
30282 +- &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
30283 +- dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
30284 +- ioc->name, rphy, rphy->dev.release));
30285 +- }
30286 +-}
30287 +-
30288 + static inline struct sas_port *
30289 + mptsas_get_port(struct mptsas_phyinfo *phy_info)
30290 + {
30291 +diff -urNp linux-3.1.1/drivers/message/fusion/mptscsih.c linux-3.1.1/drivers/message/fusion/mptscsih.c
30292 +--- linux-3.1.1/drivers/message/fusion/mptscsih.c 2011-11-11 15:19:27.000000000 -0500
30293 ++++ linux-3.1.1/drivers/message/fusion/mptscsih.c 2011-11-16 18:39:07.000000000 -0500
30294 +@@ -1268,15 +1268,16 @@ mptscsih_info(struct Scsi_Host *SChost)
30295 +
30296 + h = shost_priv(SChost);
30297 +
30298 +- if (h) {
30299 +- if (h->info_kbuf == NULL)
30300 +- if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
30301 +- return h->info_kbuf;
30302 +- h->info_kbuf[0] = '\0';
30303 ++ if (!h)
30304 ++ return NULL;
30305 +
30306 +- mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
30307 +- h->info_kbuf[size-1] = '\0';
30308 +- }
30309 ++ if (h->info_kbuf == NULL)
30310 ++ if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
30311 ++ return h->info_kbuf;
30312 ++ h->info_kbuf[0] = '\0';
30313 ++
30314 ++ mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
30315 ++ h->info_kbuf[size-1] = '\0';
30316 +
30317 + return h->info_kbuf;
30318 + }
30319 +diff -urNp linux-3.1.1/drivers/message/i2o/i2o_config.c linux-3.1.1/drivers/message/i2o/i2o_config.c
30320 +--- linux-3.1.1/drivers/message/i2o/i2o_config.c 2011-11-11 15:19:27.000000000 -0500
30321 ++++ linux-3.1.1/drivers/message/i2o/i2o_config.c 2011-11-16 18:40:10.000000000 -0500
30322 +@@ -781,6 +781,8 @@ static int i2o_cfg_passthru(unsigned lon
30323 + struct i2o_message *msg;
30324 + unsigned int iop;
30325 +
30326 ++ pax_track_stack();
30327 ++
30328 + if (get_user(iop, &cmd->iop) || get_user(user_msg, &cmd->msg))
30329 + return -EFAULT;
30330 +
30331 +diff -urNp linux-3.1.1/drivers/message/i2o/i2o_proc.c linux-3.1.1/drivers/message/i2o/i2o_proc.c
30332 +--- linux-3.1.1/drivers/message/i2o/i2o_proc.c 2011-11-11 15:19:27.000000000 -0500
30333 ++++ linux-3.1.1/drivers/message/i2o/i2o_proc.c 2011-11-16 18:39:07.000000000 -0500
30334 +@@ -255,13 +255,6 @@ static char *scsi_devices[] = {
30335 + "Array Controller Device"
30336 + };
30337 +
30338 +-static char *chtostr(u8 * chars, int n)
30339 +-{
30340 +- char tmp[256];
30341 +- tmp[0] = 0;
30342 +- return strncat(tmp, (char *)chars, n);
30343 +-}
30344 +-
30345 + static int i2o_report_query_status(struct seq_file *seq, int block_status,
30346 + char *group)
30347 + {
30348 +@@ -838,8 +831,7 @@ static int i2o_seq_show_ddm_table(struct
30349 +
30350 + seq_printf(seq, "%-#7x", ddm_table.i2o_vendor_id);
30351 + seq_printf(seq, "%-#8x", ddm_table.module_id);
30352 +- seq_printf(seq, "%-29s",
30353 +- chtostr(ddm_table.module_name_version, 28));
30354 ++ seq_printf(seq, "%-.28s", ddm_table.module_name_version);
30355 + seq_printf(seq, "%9d ", ddm_table.data_size);
30356 + seq_printf(seq, "%8d", ddm_table.code_size);
30357 +
30358 +@@ -940,8 +932,8 @@ static int i2o_seq_show_drivers_stored(s
30359 +
30360 + seq_printf(seq, "%-#7x", dst->i2o_vendor_id);
30361 + seq_printf(seq, "%-#8x", dst->module_id);
30362 +- seq_printf(seq, "%-29s", chtostr(dst->module_name_version, 28));
30363 +- seq_printf(seq, "%-9s", chtostr(dst->date, 8));
30364 ++ seq_printf(seq, "%-.28s", dst->module_name_version);
30365 ++ seq_printf(seq, "%-.8s", dst->date);
30366 + seq_printf(seq, "%8d ", dst->module_size);
30367 + seq_printf(seq, "%8d ", dst->mpb_size);
30368 + seq_printf(seq, "0x%04x", dst->module_flags);
30369 +@@ -1272,14 +1264,10 @@ static int i2o_seq_show_dev_identity(str
30370 + seq_printf(seq, "Device Class : %s\n", i2o_get_class_name(work16[0]));
30371 + seq_printf(seq, "Owner TID : %0#5x\n", work16[2]);
30372 + seq_printf(seq, "Parent TID : %0#5x\n", work16[3]);
30373 +- seq_printf(seq, "Vendor info : %s\n",
30374 +- chtostr((u8 *) (work32 + 2), 16));
30375 +- seq_printf(seq, "Product info : %s\n",
30376 +- chtostr((u8 *) (work32 + 6), 16));
30377 +- seq_printf(seq, "Description : %s\n",
30378 +- chtostr((u8 *) (work32 + 10), 16));
30379 +- seq_printf(seq, "Product rev. : %s\n",
30380 +- chtostr((u8 *) (work32 + 14), 8));
30381 ++ seq_printf(seq, "Vendor info : %.16s\n", (u8 *) (work32 + 2));
30382 ++ seq_printf(seq, "Product info : %.16s\n", (u8 *) (work32 + 6));
30383 ++ seq_printf(seq, "Description : %.16s\n", (u8 *) (work32 + 10));
30384 ++ seq_printf(seq, "Product rev. : %.8s\n", (u8 *) (work32 + 14));
30385 +
30386 + seq_printf(seq, "Serial number : ");
30387 + print_serial_number(seq, (u8 *) (work32 + 16),
30388 +@@ -1324,10 +1312,8 @@ static int i2o_seq_show_ddm_identity(str
30389 + }
30390 +
30391 + seq_printf(seq, "Registering DDM TID : 0x%03x\n", result.ddm_tid);
30392 +- seq_printf(seq, "Module name : %s\n",
30393 +- chtostr(result.module_name, 24));
30394 +- seq_printf(seq, "Module revision : %s\n",
30395 +- chtostr(result.module_rev, 8));
30396 ++ seq_printf(seq, "Module name : %.24s\n", result.module_name);
30397 ++ seq_printf(seq, "Module revision : %.8s\n", result.module_rev);
30398 +
30399 + seq_printf(seq, "Serial number : ");
30400 + print_serial_number(seq, result.serial_number, sizeof(result) - 36);
30401 +@@ -1358,14 +1344,10 @@ static int i2o_seq_show_uinfo(struct seq
30402 + return 0;
30403 + }
30404 +
30405 +- seq_printf(seq, "Device name : %s\n",
30406 +- chtostr(result.device_name, 64));
30407 +- seq_printf(seq, "Service name : %s\n",
30408 +- chtostr(result.service_name, 64));
30409 +- seq_printf(seq, "Physical name : %s\n",
30410 +- chtostr(result.physical_location, 64));
30411 +- seq_printf(seq, "Instance number : %s\n",
30412 +- chtostr(result.instance_number, 4));
30413 ++ seq_printf(seq, "Device name : %.64s\n", result.device_name);
30414 ++ seq_printf(seq, "Service name : %.64s\n", result.service_name);
30415 ++ seq_printf(seq, "Physical name : %.64s\n", result.physical_location);
30416 ++ seq_printf(seq, "Instance number : %.4s\n", result.instance_number);
30417 +
30418 + return 0;
30419 + }
30420 +diff -urNp linux-3.1.1/drivers/message/i2o/iop.c linux-3.1.1/drivers/message/i2o/iop.c
30421 +--- linux-3.1.1/drivers/message/i2o/iop.c 2011-11-11 15:19:27.000000000 -0500
30422 ++++ linux-3.1.1/drivers/message/i2o/iop.c 2011-11-16 18:39:07.000000000 -0500
30423 +@@ -111,10 +111,10 @@ u32 i2o_cntxt_list_add(struct i2o_contro
30424 +
30425 + spin_lock_irqsave(&c->context_list_lock, flags);
30426 +
30427 +- if (unlikely(atomic_inc_and_test(&c->context_list_counter)))
30428 +- atomic_inc(&c->context_list_counter);
30429 ++ if (unlikely(atomic_inc_and_test_unchecked(&c->context_list_counter)))
30430 ++ atomic_inc_unchecked(&c->context_list_counter);
30431 +
30432 +- entry->context = atomic_read(&c->context_list_counter);
30433 ++ entry->context = atomic_read_unchecked(&c->context_list_counter);
30434 +
30435 + list_add(&entry->list, &c->context_list);
30436 +
30437 +@@ -1077,7 +1077,7 @@ struct i2o_controller *i2o_iop_alloc(voi
30438 +
30439 + #if BITS_PER_LONG == 64
30440 + spin_lock_init(&c->context_list_lock);
30441 +- atomic_set(&c->context_list_counter, 0);
30442 ++ atomic_set_unchecked(&c->context_list_counter, 0);
30443 + INIT_LIST_HEAD(&c->context_list);
30444 + #endif
30445 +
30446 +diff -urNp linux-3.1.1/drivers/mfd/ab3100-core.c linux-3.1.1/drivers/mfd/ab3100-core.c
30447 +--- linux-3.1.1/drivers/mfd/ab3100-core.c 2011-11-11 15:19:27.000000000 -0500
30448 ++++ linux-3.1.1/drivers/mfd/ab3100-core.c 2011-11-16 18:39:07.000000000 -0500
30449 +@@ -809,7 +809,7 @@ struct ab_family_id {
30450 + char *name;
30451 + };
30452 +
30453 +-static const struct ab_family_id ids[] __devinitdata = {
30454 ++static const struct ab_family_id ids[] __devinitconst = {
30455 + /* AB3100 */
30456 + {
30457 + .id = 0xc0,
30458 +diff -urNp linux-3.1.1/drivers/mfd/abx500-core.c linux-3.1.1/drivers/mfd/abx500-core.c
30459 +--- linux-3.1.1/drivers/mfd/abx500-core.c 2011-11-11 15:19:27.000000000 -0500
30460 ++++ linux-3.1.1/drivers/mfd/abx500-core.c 2011-11-16 18:39:07.000000000 -0500
30461 +@@ -14,7 +14,7 @@ static LIST_HEAD(abx500_list);
30462 +
30463 + struct abx500_device_entry {
30464 + struct list_head list;
30465 +- struct abx500_ops ops;
30466 ++ abx500_ops_no_const ops;
30467 + struct device *dev;
30468 + };
30469 +
30470 +diff -urNp linux-3.1.1/drivers/mfd/janz-cmodio.c linux-3.1.1/drivers/mfd/janz-cmodio.c
30471 +--- linux-3.1.1/drivers/mfd/janz-cmodio.c 2011-11-11 15:19:27.000000000 -0500
30472 ++++ linux-3.1.1/drivers/mfd/janz-cmodio.c 2011-11-16 18:39:07.000000000 -0500
30473 +@@ -13,6 +13,7 @@
30474 +
30475 + #include <linux/kernel.h>
30476 + #include <linux/module.h>
30477 ++#include <linux/slab.h>
30478 + #include <linux/init.h>
30479 + #include <linux/pci.h>
30480 + #include <linux/interrupt.h>
30481 +diff -urNp linux-3.1.1/drivers/mfd/wm8350-i2c.c linux-3.1.1/drivers/mfd/wm8350-i2c.c
30482 +--- linux-3.1.1/drivers/mfd/wm8350-i2c.c 2011-11-11 15:19:27.000000000 -0500
30483 ++++ linux-3.1.1/drivers/mfd/wm8350-i2c.c 2011-11-16 18:40:10.000000000 -0500
30484 +@@ -44,6 +44,8 @@ static int wm8350_i2c_write_device(struc
30485 + u8 msg[(WM8350_MAX_REGISTER << 1) + 1];
30486 + int ret;
30487 +
30488 ++ pax_track_stack();
30489 ++
30490 + if (bytes > ((WM8350_MAX_REGISTER << 1) + 1))
30491 + return -EINVAL;
30492 +
30493 +diff -urNp linux-3.1.1/drivers/misc/lis3lv02d/lis3lv02d.c linux-3.1.1/drivers/misc/lis3lv02d/lis3lv02d.c
30494 +--- linux-3.1.1/drivers/misc/lis3lv02d/lis3lv02d.c 2011-11-11 15:19:27.000000000 -0500
30495 ++++ linux-3.1.1/drivers/misc/lis3lv02d/lis3lv02d.c 2011-11-16 18:39:07.000000000 -0500
30496 +@@ -437,7 +437,7 @@ static irqreturn_t lis302dl_interrupt(in
30497 + * the lid is closed. This leads to interrupts as soon as a little move
30498 + * is done.
30499 + */
30500 +- atomic_inc(&lis3_dev.count);
30501 ++ atomic_inc_unchecked(&lis3_dev.count);
30502 +
30503 + wake_up_interruptible(&lis3_dev.misc_wait);
30504 + kill_fasync(&lis3_dev.async_queue, SIGIO, POLL_IN);
30505 +@@ -520,7 +520,7 @@ static int lis3lv02d_misc_open(struct in
30506 + if (lis3_dev.pm_dev)
30507 + pm_runtime_get_sync(lis3_dev.pm_dev);
30508 +
30509 +- atomic_set(&lis3_dev.count, 0);
30510 ++ atomic_set_unchecked(&lis3_dev.count, 0);
30511 + return 0;
30512 + }
30513 +
30514 +@@ -547,7 +547,7 @@ static ssize_t lis3lv02d_misc_read(struc
30515 + add_wait_queue(&lis3_dev.misc_wait, &wait);
30516 + while (true) {
30517 + set_current_state(TASK_INTERRUPTIBLE);
30518 +- data = atomic_xchg(&lis3_dev.count, 0);
30519 ++ data = atomic_xchg_unchecked(&lis3_dev.count, 0);
30520 + if (data)
30521 + break;
30522 +
30523 +@@ -585,7 +585,7 @@ out:
30524 + static unsigned int lis3lv02d_misc_poll(struct file *file, poll_table *wait)
30525 + {
30526 + poll_wait(file, &lis3_dev.misc_wait, wait);
30527 +- if (atomic_read(&lis3_dev.count))
30528 ++ if (atomic_read_unchecked(&lis3_dev.count))
30529 + return POLLIN | POLLRDNORM;
30530 + return 0;
30531 + }
30532 +diff -urNp linux-3.1.1/drivers/misc/lis3lv02d/lis3lv02d.h linux-3.1.1/drivers/misc/lis3lv02d/lis3lv02d.h
30533 +--- linux-3.1.1/drivers/misc/lis3lv02d/lis3lv02d.h 2011-11-11 15:19:27.000000000 -0500
30534 ++++ linux-3.1.1/drivers/misc/lis3lv02d/lis3lv02d.h 2011-11-16 18:39:07.000000000 -0500
30535 +@@ -265,7 +265,7 @@ struct lis3lv02d {
30536 + struct input_polled_dev *idev; /* input device */
30537 + struct platform_device *pdev; /* platform device */
30538 + struct regulator_bulk_data regulators[2];
30539 +- atomic_t count; /* interrupt count after last read */
30540 ++ atomic_unchecked_t count; /* interrupt count after last read */
30541 + union axis_conversion ac; /* hw -> logical axis */
30542 + int mapped_btns[3];
30543 +
30544 +diff -urNp linux-3.1.1/drivers/misc/sgi-gru/gruhandles.c linux-3.1.1/drivers/misc/sgi-gru/gruhandles.c
30545 +--- linux-3.1.1/drivers/misc/sgi-gru/gruhandles.c 2011-11-11 15:19:27.000000000 -0500
30546 ++++ linux-3.1.1/drivers/misc/sgi-gru/gruhandles.c 2011-11-16 18:39:07.000000000 -0500
30547 +@@ -44,8 +44,8 @@ static void update_mcs_stats(enum mcs_op
30548 + unsigned long nsec;
30549 +
30550 + nsec = CLKS2NSEC(clks);
30551 +- atomic_long_inc(&mcs_op_statistics[op].count);
30552 +- atomic_long_add(nsec, &mcs_op_statistics[op].total);
30553 ++ atomic_long_inc_unchecked(&mcs_op_statistics[op].count);
30554 ++ atomic_long_add_unchecked(nsec, &mcs_op_statistics[op].total);
30555 + if (mcs_op_statistics[op].max < nsec)
30556 + mcs_op_statistics[op].max = nsec;
30557 + }
30558 +diff -urNp linux-3.1.1/drivers/misc/sgi-gru/gruprocfs.c linux-3.1.1/drivers/misc/sgi-gru/gruprocfs.c
30559 +--- linux-3.1.1/drivers/misc/sgi-gru/gruprocfs.c 2011-11-11 15:19:27.000000000 -0500
30560 ++++ linux-3.1.1/drivers/misc/sgi-gru/gruprocfs.c 2011-11-16 18:39:07.000000000 -0500
30561 +@@ -32,9 +32,9 @@
30562 +
30563 + #define printstat(s, f) printstat_val(s, &gru_stats.f, #f)
30564 +
30565 +-static void printstat_val(struct seq_file *s, atomic_long_t *v, char *id)
30566 ++static void printstat_val(struct seq_file *s, atomic_long_unchecked_t *v, char *id)
30567 + {
30568 +- unsigned long val = atomic_long_read(v);
30569 ++ unsigned long val = atomic_long_read_unchecked(v);
30570 +
30571 + seq_printf(s, "%16lu %s\n", val, id);
30572 + }
30573 +@@ -134,8 +134,8 @@ static int mcs_statistics_show(struct se
30574 +
30575 + seq_printf(s, "%-20s%12s%12s%12s\n", "#id", "count", "aver-clks", "max-clks");
30576 + for (op = 0; op < mcsop_last; op++) {
30577 +- count = atomic_long_read(&mcs_op_statistics[op].count);
30578 +- total = atomic_long_read(&mcs_op_statistics[op].total);
30579 ++ count = atomic_long_read_unchecked(&mcs_op_statistics[op].count);
30580 ++ total = atomic_long_read_unchecked(&mcs_op_statistics[op].total);
30581 + max = mcs_op_statistics[op].max;
30582 + seq_printf(s, "%-20s%12ld%12ld%12ld\n", id[op], count,
30583 + count ? total / count : 0, max);
30584 +diff -urNp linux-3.1.1/drivers/misc/sgi-gru/grutables.h linux-3.1.1/drivers/misc/sgi-gru/grutables.h
30585 +--- linux-3.1.1/drivers/misc/sgi-gru/grutables.h 2011-11-11 15:19:27.000000000 -0500
30586 ++++ linux-3.1.1/drivers/misc/sgi-gru/grutables.h 2011-11-16 18:39:07.000000000 -0500
30587 +@@ -167,82 +167,82 @@ extern unsigned int gru_max_gids;
30588 + * GRU statistics.
30589 + */
30590 + struct gru_stats_s {
30591 +- atomic_long_t vdata_alloc;
30592 +- atomic_long_t vdata_free;
30593 +- atomic_long_t gts_alloc;
30594 +- atomic_long_t gts_free;
30595 +- atomic_long_t gms_alloc;
30596 +- atomic_long_t gms_free;
30597 +- atomic_long_t gts_double_allocate;
30598 +- atomic_long_t assign_context;
30599 +- atomic_long_t assign_context_failed;
30600 +- atomic_long_t free_context;
30601 +- atomic_long_t load_user_context;
30602 +- atomic_long_t load_kernel_context;
30603 +- atomic_long_t lock_kernel_context;
30604 +- atomic_long_t unlock_kernel_context;
30605 +- atomic_long_t steal_user_context;
30606 +- atomic_long_t steal_kernel_context;
30607 +- atomic_long_t steal_context_failed;
30608 +- atomic_long_t nopfn;
30609 +- atomic_long_t asid_new;
30610 +- atomic_long_t asid_next;
30611 +- atomic_long_t asid_wrap;
30612 +- atomic_long_t asid_reuse;
30613 +- atomic_long_t intr;
30614 +- atomic_long_t intr_cbr;
30615 +- atomic_long_t intr_tfh;
30616 +- atomic_long_t intr_spurious;
30617 +- atomic_long_t intr_mm_lock_failed;
30618 +- atomic_long_t call_os;
30619 +- atomic_long_t call_os_wait_queue;
30620 +- atomic_long_t user_flush_tlb;
30621 +- atomic_long_t user_unload_context;
30622 +- atomic_long_t user_exception;
30623 +- atomic_long_t set_context_option;
30624 +- atomic_long_t check_context_retarget_intr;
30625 +- atomic_long_t check_context_unload;
30626 +- atomic_long_t tlb_dropin;
30627 +- atomic_long_t tlb_preload_page;
30628 +- atomic_long_t tlb_dropin_fail_no_asid;
30629 +- atomic_long_t tlb_dropin_fail_upm;
30630 +- atomic_long_t tlb_dropin_fail_invalid;
30631 +- atomic_long_t tlb_dropin_fail_range_active;
30632 +- atomic_long_t tlb_dropin_fail_idle;
30633 +- atomic_long_t tlb_dropin_fail_fmm;
30634 +- atomic_long_t tlb_dropin_fail_no_exception;
30635 +- atomic_long_t tfh_stale_on_fault;
30636 +- atomic_long_t mmu_invalidate_range;
30637 +- atomic_long_t mmu_invalidate_page;
30638 +- atomic_long_t flush_tlb;
30639 +- atomic_long_t flush_tlb_gru;
30640 +- atomic_long_t flush_tlb_gru_tgh;
30641 +- atomic_long_t flush_tlb_gru_zero_asid;
30642 +-
30643 +- atomic_long_t copy_gpa;
30644 +- atomic_long_t read_gpa;
30645 +-
30646 +- atomic_long_t mesq_receive;
30647 +- atomic_long_t mesq_receive_none;
30648 +- atomic_long_t mesq_send;
30649 +- atomic_long_t mesq_send_failed;
30650 +- atomic_long_t mesq_noop;
30651 +- atomic_long_t mesq_send_unexpected_error;
30652 +- atomic_long_t mesq_send_lb_overflow;
30653 +- atomic_long_t mesq_send_qlimit_reached;
30654 +- atomic_long_t mesq_send_amo_nacked;
30655 +- atomic_long_t mesq_send_put_nacked;
30656 +- atomic_long_t mesq_page_overflow;
30657 +- atomic_long_t mesq_qf_locked;
30658 +- atomic_long_t mesq_qf_noop_not_full;
30659 +- atomic_long_t mesq_qf_switch_head_failed;
30660 +- atomic_long_t mesq_qf_unexpected_error;
30661 +- atomic_long_t mesq_noop_unexpected_error;
30662 +- atomic_long_t mesq_noop_lb_overflow;
30663 +- atomic_long_t mesq_noop_qlimit_reached;
30664 +- atomic_long_t mesq_noop_amo_nacked;
30665 +- atomic_long_t mesq_noop_put_nacked;
30666 +- atomic_long_t mesq_noop_page_overflow;
30667 ++ atomic_long_unchecked_t vdata_alloc;
30668 ++ atomic_long_unchecked_t vdata_free;
30669 ++ atomic_long_unchecked_t gts_alloc;
30670 ++ atomic_long_unchecked_t gts_free;
30671 ++ atomic_long_unchecked_t gms_alloc;
30672 ++ atomic_long_unchecked_t gms_free;
30673 ++ atomic_long_unchecked_t gts_double_allocate;
30674 ++ atomic_long_unchecked_t assign_context;
30675 ++ atomic_long_unchecked_t assign_context_failed;
30676 ++ atomic_long_unchecked_t free_context;
30677 ++ atomic_long_unchecked_t load_user_context;
30678 ++ atomic_long_unchecked_t load_kernel_context;
30679 ++ atomic_long_unchecked_t lock_kernel_context;
30680 ++ atomic_long_unchecked_t unlock_kernel_context;
30681 ++ atomic_long_unchecked_t steal_user_context;
30682 ++ atomic_long_unchecked_t steal_kernel_context;
30683 ++ atomic_long_unchecked_t steal_context_failed;
30684 ++ atomic_long_unchecked_t nopfn;
30685 ++ atomic_long_unchecked_t asid_new;
30686 ++ atomic_long_unchecked_t asid_next;
30687 ++ atomic_long_unchecked_t asid_wrap;
30688 ++ atomic_long_unchecked_t asid_reuse;
30689 ++ atomic_long_unchecked_t intr;
30690 ++ atomic_long_unchecked_t intr_cbr;
30691 ++ atomic_long_unchecked_t intr_tfh;
30692 ++ atomic_long_unchecked_t intr_spurious;
30693 ++ atomic_long_unchecked_t intr_mm_lock_failed;
30694 ++ atomic_long_unchecked_t call_os;
30695 ++ atomic_long_unchecked_t call_os_wait_queue;
30696 ++ atomic_long_unchecked_t user_flush_tlb;
30697 ++ atomic_long_unchecked_t user_unload_context;
30698 ++ atomic_long_unchecked_t user_exception;
30699 ++ atomic_long_unchecked_t set_context_option;
30700 ++ atomic_long_unchecked_t check_context_retarget_intr;
30701 ++ atomic_long_unchecked_t check_context_unload;
30702 ++ atomic_long_unchecked_t tlb_dropin;
30703 ++ atomic_long_unchecked_t tlb_preload_page;
30704 ++ atomic_long_unchecked_t tlb_dropin_fail_no_asid;
30705 ++ atomic_long_unchecked_t tlb_dropin_fail_upm;
30706 ++ atomic_long_unchecked_t tlb_dropin_fail_invalid;
30707 ++ atomic_long_unchecked_t tlb_dropin_fail_range_active;
30708 ++ atomic_long_unchecked_t tlb_dropin_fail_idle;
30709 ++ atomic_long_unchecked_t tlb_dropin_fail_fmm;
30710 ++ atomic_long_unchecked_t tlb_dropin_fail_no_exception;
30711 ++ atomic_long_unchecked_t tfh_stale_on_fault;
30712 ++ atomic_long_unchecked_t mmu_invalidate_range;
30713 ++ atomic_long_unchecked_t mmu_invalidate_page;
30714 ++ atomic_long_unchecked_t flush_tlb;
30715 ++ atomic_long_unchecked_t flush_tlb_gru;
30716 ++ atomic_long_unchecked_t flush_tlb_gru_tgh;
30717 ++ atomic_long_unchecked_t flush_tlb_gru_zero_asid;
30718 ++
30719 ++ atomic_long_unchecked_t copy_gpa;
30720 ++ atomic_long_unchecked_t read_gpa;
30721 ++
30722 ++ atomic_long_unchecked_t mesq_receive;
30723 ++ atomic_long_unchecked_t mesq_receive_none;
30724 ++ atomic_long_unchecked_t mesq_send;
30725 ++ atomic_long_unchecked_t mesq_send_failed;
30726 ++ atomic_long_unchecked_t mesq_noop;
30727 ++ atomic_long_unchecked_t mesq_send_unexpected_error;
30728 ++ atomic_long_unchecked_t mesq_send_lb_overflow;
30729 ++ atomic_long_unchecked_t mesq_send_qlimit_reached;
30730 ++ atomic_long_unchecked_t mesq_send_amo_nacked;
30731 ++ atomic_long_unchecked_t mesq_send_put_nacked;
30732 ++ atomic_long_unchecked_t mesq_page_overflow;
30733 ++ atomic_long_unchecked_t mesq_qf_locked;
30734 ++ atomic_long_unchecked_t mesq_qf_noop_not_full;
30735 ++ atomic_long_unchecked_t mesq_qf_switch_head_failed;
30736 ++ atomic_long_unchecked_t mesq_qf_unexpected_error;
30737 ++ atomic_long_unchecked_t mesq_noop_unexpected_error;
30738 ++ atomic_long_unchecked_t mesq_noop_lb_overflow;
30739 ++ atomic_long_unchecked_t mesq_noop_qlimit_reached;
30740 ++ atomic_long_unchecked_t mesq_noop_amo_nacked;
30741 ++ atomic_long_unchecked_t mesq_noop_put_nacked;
30742 ++ atomic_long_unchecked_t mesq_noop_page_overflow;
30743 +
30744 + };
30745 +
30746 +@@ -251,8 +251,8 @@ enum mcs_op {cchop_allocate, cchop_start
30747 + tghop_invalidate, mcsop_last};
30748 +
30749 + struct mcs_op_statistic {
30750 +- atomic_long_t count;
30751 +- atomic_long_t total;
30752 ++ atomic_long_unchecked_t count;
30753 ++ atomic_long_unchecked_t total;
30754 + unsigned long max;
30755 + };
30756 +
30757 +@@ -275,7 +275,7 @@ extern struct mcs_op_statistic mcs_op_st
30758 +
30759 + #define STAT(id) do { \
30760 + if (gru_options & OPT_STATS) \
30761 +- atomic_long_inc(&gru_stats.id); \
30762 ++ atomic_long_inc_unchecked(&gru_stats.id); \
30763 + } while (0)
30764 +
30765 + #ifdef CONFIG_SGI_GRU_DEBUG
30766 +diff -urNp linux-3.1.1/drivers/misc/sgi-xp/xpc.h linux-3.1.1/drivers/misc/sgi-xp/xpc.h
30767 +--- linux-3.1.1/drivers/misc/sgi-xp/xpc.h 2011-11-11 15:19:27.000000000 -0500
30768 ++++ linux-3.1.1/drivers/misc/sgi-xp/xpc.h 2011-11-16 18:39:07.000000000 -0500
30769 +@@ -835,6 +835,7 @@ struct xpc_arch_operations {
30770 + void (*received_payload) (struct xpc_channel *, void *);
30771 + void (*notify_senders_of_disconnect) (struct xpc_channel *);
30772 + };
30773 ++typedef struct xpc_arch_operations __no_const xpc_arch_operations_no_const;
30774 +
30775 + /* struct xpc_partition act_state values (for XPC HB) */
30776 +
30777 +@@ -876,7 +877,7 @@ extern struct xpc_registration xpc_regis
30778 + /* found in xpc_main.c */
30779 + extern struct device *xpc_part;
30780 + extern struct device *xpc_chan;
30781 +-extern struct xpc_arch_operations xpc_arch_ops;
30782 ++extern xpc_arch_operations_no_const xpc_arch_ops;
30783 + extern int xpc_disengage_timelimit;
30784 + extern int xpc_disengage_timedout;
30785 + extern int xpc_activate_IRQ_rcvd;
30786 +diff -urNp linux-3.1.1/drivers/misc/sgi-xp/xpc_main.c linux-3.1.1/drivers/misc/sgi-xp/xpc_main.c
30787 +--- linux-3.1.1/drivers/misc/sgi-xp/xpc_main.c 2011-11-11 15:19:27.000000000 -0500
30788 ++++ linux-3.1.1/drivers/misc/sgi-xp/xpc_main.c 2011-11-16 18:39:07.000000000 -0500
30789 +@@ -162,7 +162,7 @@ static struct notifier_block xpc_die_not
30790 + .notifier_call = xpc_system_die,
30791 + };
30792 +
30793 +-struct xpc_arch_operations xpc_arch_ops;
30794 ++xpc_arch_operations_no_const xpc_arch_ops;
30795 +
30796 + /*
30797 + * Timer function to enforce the timelimit on the partition disengage.
30798 +diff -urNp linux-3.1.1/drivers/misc/sgi-xp/xp.h linux-3.1.1/drivers/misc/sgi-xp/xp.h
30799 +--- linux-3.1.1/drivers/misc/sgi-xp/xp.h 2011-11-11 15:19:27.000000000 -0500
30800 ++++ linux-3.1.1/drivers/misc/sgi-xp/xp.h 2011-11-16 18:39:07.000000000 -0500
30801 +@@ -289,7 +289,7 @@ struct xpc_interface {
30802 + xpc_notify_func, void *);
30803 + void (*received) (short, int, void *);
30804 + enum xp_retval (*partid_to_nasids) (short, void *);
30805 +-};
30806 ++} __no_const;
30807 +
30808 + extern struct xpc_interface xpc_interface;
30809 +
30810 +diff -urNp linux-3.1.1/drivers/mmc/host/sdhci-pci.c linux-3.1.1/drivers/mmc/host/sdhci-pci.c
30811 +--- linux-3.1.1/drivers/mmc/host/sdhci-pci.c 2011-11-11 15:19:27.000000000 -0500
30812 ++++ linux-3.1.1/drivers/mmc/host/sdhci-pci.c 2011-11-16 18:39:07.000000000 -0500
30813 +@@ -542,7 +542,7 @@ static const struct sdhci_pci_fixes sdhc
30814 + .probe = via_probe,
30815 + };
30816 +
30817 +-static const struct pci_device_id pci_ids[] __devinitdata = {
30818 ++static const struct pci_device_id pci_ids[] __devinitconst = {
30819 + {
30820 + .vendor = PCI_VENDOR_ID_RICOH,
30821 + .device = PCI_DEVICE_ID_RICOH_R5C822,
30822 +diff -urNp linux-3.1.1/drivers/mtd/chips/cfi_cmdset_0001.c linux-3.1.1/drivers/mtd/chips/cfi_cmdset_0001.c
30823 +--- linux-3.1.1/drivers/mtd/chips/cfi_cmdset_0001.c 2011-11-11 15:19:27.000000000 -0500
30824 ++++ linux-3.1.1/drivers/mtd/chips/cfi_cmdset_0001.c 2011-11-16 18:40:10.000000000 -0500
30825 +@@ -757,6 +757,8 @@ static int chip_ready (struct map_info *
30826 + struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
30827 + unsigned long timeo = jiffies + HZ;
30828 +
30829 ++ pax_track_stack();
30830 ++
30831 + /* Prevent setting state FL_SYNCING for chip in suspended state. */
30832 + if (mode == FL_SYNCING && chip->oldstate != FL_READY)
30833 + goto sleep;
30834 +@@ -1653,6 +1655,8 @@ static int __xipram do_write_buffer(stru
30835 + unsigned long initial_adr;
30836 + int initial_len = len;
30837 +
30838 ++ pax_track_stack();
30839 ++
30840 + wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
30841 + adr += chip->start;
30842 + initial_adr = adr;
30843 +@@ -1871,6 +1875,8 @@ static int __xipram do_erase_oneblock(st
30844 + int retries = 3;
30845 + int ret;
30846 +
30847 ++ pax_track_stack();
30848 ++
30849 + adr += chip->start;
30850 +
30851 + retry:
30852 +diff -urNp linux-3.1.1/drivers/mtd/chips/cfi_cmdset_0020.c linux-3.1.1/drivers/mtd/chips/cfi_cmdset_0020.c
30853 +--- linux-3.1.1/drivers/mtd/chips/cfi_cmdset_0020.c 2011-11-11 15:19:27.000000000 -0500
30854 ++++ linux-3.1.1/drivers/mtd/chips/cfi_cmdset_0020.c 2011-11-16 18:40:10.000000000 -0500
30855 +@@ -255,6 +255,8 @@ static inline int do_read_onechip(struct
30856 + unsigned long cmd_addr;
30857 + struct cfi_private *cfi = map->fldrv_priv;
30858 +
30859 ++ pax_track_stack();
30860 ++
30861 + adr += chip->start;
30862 +
30863 + /* Ensure cmd read/writes are aligned. */
30864 +@@ -429,6 +431,8 @@ static inline int do_write_buffer(struct
30865 + DECLARE_WAITQUEUE(wait, current);
30866 + int wbufsize, z;
30867 +
30868 ++ pax_track_stack();
30869 ++
30870 + /* M58LW064A requires bus alignment for buffer wriets -- saw */
30871 + if (adr & (map_bankwidth(map)-1))
30872 + return -EINVAL;
30873 +@@ -743,6 +747,8 @@ static inline int do_erase_oneblock(stru
30874 + DECLARE_WAITQUEUE(wait, current);
30875 + int ret = 0;
30876 +
30877 ++ pax_track_stack();
30878 ++
30879 + adr += chip->start;
30880 +
30881 + /* Let's determine this according to the interleave only once */
30882 +@@ -1048,6 +1054,8 @@ static inline int do_lock_oneblock(struc
30883 + unsigned long timeo = jiffies + HZ;
30884 + DECLARE_WAITQUEUE(wait, current);
30885 +
30886 ++ pax_track_stack();
30887 ++
30888 + adr += chip->start;
30889 +
30890 + /* Let's determine this according to the interleave only once */
30891 +@@ -1197,6 +1205,8 @@ static inline int do_unlock_oneblock(str
30892 + unsigned long timeo = jiffies + HZ;
30893 + DECLARE_WAITQUEUE(wait, current);
30894 +
30895 ++ pax_track_stack();
30896 ++
30897 + adr += chip->start;
30898 +
30899 + /* Let's determine this according to the interleave only once */
30900 +diff -urNp linux-3.1.1/drivers/mtd/devices/doc2000.c linux-3.1.1/drivers/mtd/devices/doc2000.c
30901 +--- linux-3.1.1/drivers/mtd/devices/doc2000.c 2011-11-11 15:19:27.000000000 -0500
30902 ++++ linux-3.1.1/drivers/mtd/devices/doc2000.c 2011-11-16 18:39:07.000000000 -0500
30903 +@@ -776,7 +776,7 @@ static int doc_write(struct mtd_info *mt
30904 +
30905 + /* The ECC will not be calculated correctly if less than 512 is written */
30906 + /* DBB-
30907 +- if (len != 0x200 && eccbuf)
30908 ++ if (len != 0x200)
30909 + printk(KERN_WARNING
30910 + "ECC needs a full sector write (adr: %lx size %lx)\n",
30911 + (long) to, (long) len);
30912 +diff -urNp linux-3.1.1/drivers/mtd/devices/doc2001.c linux-3.1.1/drivers/mtd/devices/doc2001.c
30913 +--- linux-3.1.1/drivers/mtd/devices/doc2001.c 2011-11-11 15:19:27.000000000 -0500
30914 ++++ linux-3.1.1/drivers/mtd/devices/doc2001.c 2011-11-16 18:39:07.000000000 -0500
30915 +@@ -393,7 +393,7 @@ static int doc_read (struct mtd_info *mt
30916 + struct Nand *mychip = &this->chips[from >> (this->chipshift)];
30917 +
30918 + /* Don't allow read past end of device */
30919 +- if (from >= this->totlen)
30920 ++ if (from >= this->totlen || !len)
30921 + return -EINVAL;
30922 +
30923 + /* Don't allow a single read to cross a 512-byte block boundary */
30924 +diff -urNp linux-3.1.1/drivers/mtd/ftl.c linux-3.1.1/drivers/mtd/ftl.c
30925 +--- linux-3.1.1/drivers/mtd/ftl.c 2011-11-11 15:19:27.000000000 -0500
30926 ++++ linux-3.1.1/drivers/mtd/ftl.c 2011-11-16 18:40:10.000000000 -0500
30927 +@@ -474,6 +474,8 @@ static int copy_erase_unit(partition_t *
30928 + loff_t offset;
30929 + uint16_t srcunitswap = cpu_to_le16(srcunit);
30930 +
30931 ++ pax_track_stack();
30932 ++
30933 + eun = &part->EUNInfo[srcunit];
30934 + xfer = &part->XferInfo[xferunit];
30935 + DEBUG(2, "ftl_cs: copying block 0x%x to 0x%x\n",
30936 +diff -urNp linux-3.1.1/drivers/mtd/inftlcore.c linux-3.1.1/drivers/mtd/inftlcore.c
30937 +--- linux-3.1.1/drivers/mtd/inftlcore.c 2011-11-11 15:19:27.000000000 -0500
30938 ++++ linux-3.1.1/drivers/mtd/inftlcore.c 2011-11-16 18:40:10.000000000 -0500
30939 +@@ -259,6 +259,8 @@ static u16 INFTL_foldchain(struct INFTLr
30940 + struct inftl_oob oob;
30941 + size_t retlen;
30942 +
30943 ++ pax_track_stack();
30944 ++
30945 + DEBUG(MTD_DEBUG_LEVEL3, "INFTL: INFTL_foldchain(inftl=%p,thisVUC=%d,"
30946 + "pending=%d)\n", inftl, thisVUC, pendingblock);
30947 +
30948 +diff -urNp linux-3.1.1/drivers/mtd/inftlmount.c linux-3.1.1/drivers/mtd/inftlmount.c
30949 +--- linux-3.1.1/drivers/mtd/inftlmount.c 2011-11-11 15:19:27.000000000 -0500
30950 ++++ linux-3.1.1/drivers/mtd/inftlmount.c 2011-11-16 18:40:10.000000000 -0500
30951 +@@ -53,6 +53,8 @@ static int find_boot_record(struct INFTL
30952 + struct INFTLPartition *ip;
30953 + size_t retlen;
30954 +
30955 ++ pax_track_stack();
30956 ++
30957 + DEBUG(MTD_DEBUG_LEVEL3, "INFTL: find_boot_record(inftl=%p)\n", inftl);
30958 +
30959 + /*
30960 +diff -urNp linux-3.1.1/drivers/mtd/lpddr/qinfo_probe.c linux-3.1.1/drivers/mtd/lpddr/qinfo_probe.c
30961 +--- linux-3.1.1/drivers/mtd/lpddr/qinfo_probe.c 2011-11-11 15:19:27.000000000 -0500
30962 ++++ linux-3.1.1/drivers/mtd/lpddr/qinfo_probe.c 2011-11-16 18:40:10.000000000 -0500
30963 +@@ -106,6 +106,8 @@ static int lpddr_pfow_present(struct map
30964 + {
30965 + map_word pfow_val[4];
30966 +
30967 ++ pax_track_stack();
30968 ++
30969 + /* Check identification string */
30970 + pfow_val[0] = map_read(map, map->pfow_base + PFOW_QUERY_STRING_P);
30971 + pfow_val[1] = map_read(map, map->pfow_base + PFOW_QUERY_STRING_F);
30972 +diff -urNp linux-3.1.1/drivers/mtd/mtdchar.c linux-3.1.1/drivers/mtd/mtdchar.c
30973 +--- linux-3.1.1/drivers/mtd/mtdchar.c 2011-11-11 15:19:27.000000000 -0500
30974 ++++ linux-3.1.1/drivers/mtd/mtdchar.c 2011-11-16 18:40:10.000000000 -0500
30975 +@@ -554,6 +554,8 @@ static int mtd_ioctl(struct file *file,
30976 + u_long size;
30977 + struct mtd_info_user info;
30978 +
30979 ++ pax_track_stack();
30980 ++
30981 + DEBUG(MTD_DEBUG_LEVEL0, "MTD_ioctl\n");
30982 +
30983 + size = (cmd & IOCSIZE_MASK) >> IOCSIZE_SHIFT;
30984 +diff -urNp linux-3.1.1/drivers/mtd/nand/denali.c linux-3.1.1/drivers/mtd/nand/denali.c
30985 +--- linux-3.1.1/drivers/mtd/nand/denali.c 2011-11-11 15:19:27.000000000 -0500
30986 ++++ linux-3.1.1/drivers/mtd/nand/denali.c 2011-11-16 18:39:07.000000000 -0500
30987 +@@ -26,6 +26,7 @@
30988 + #include <linux/pci.h>
30989 + #include <linux/mtd/mtd.h>
30990 + #include <linux/module.h>
30991 ++#include <linux/slab.h>
30992 +
30993 + #include "denali.h"
30994 +
30995 +diff -urNp linux-3.1.1/drivers/mtd/nftlcore.c linux-3.1.1/drivers/mtd/nftlcore.c
30996 +--- linux-3.1.1/drivers/mtd/nftlcore.c 2011-11-11 15:19:27.000000000 -0500
30997 ++++ linux-3.1.1/drivers/mtd/nftlcore.c 2011-11-16 18:40:10.000000000 -0500
30998 +@@ -264,6 +264,8 @@ static u16 NFTL_foldchain (struct NFTLre
30999 + int inplace = 1;
31000 + size_t retlen;
31001 +
31002 ++ pax_track_stack();
31003 ++
31004 + memset(BlockMap, 0xff, sizeof(BlockMap));
31005 + memset(BlockFreeFound, 0, sizeof(BlockFreeFound));
31006 +
31007 +diff -urNp linux-3.1.1/drivers/mtd/nftlmount.c linux-3.1.1/drivers/mtd/nftlmount.c
31008 +--- linux-3.1.1/drivers/mtd/nftlmount.c 2011-11-11 15:19:27.000000000 -0500
31009 ++++ linux-3.1.1/drivers/mtd/nftlmount.c 2011-11-16 18:40:10.000000000 -0500
31010 +@@ -24,6 +24,7 @@
31011 + #include <asm/errno.h>
31012 + #include <linux/delay.h>
31013 + #include <linux/slab.h>
31014 ++#include <linux/sched.h>
31015 + #include <linux/mtd/mtd.h>
31016 + #include <linux/mtd/nand.h>
31017 + #include <linux/mtd/nftl.h>
31018 +@@ -45,6 +46,8 @@ static int find_boot_record(struct NFTLr
31019 + struct mtd_info *mtd = nftl->mbd.mtd;
31020 + unsigned int i;
31021 +
31022 ++ pax_track_stack();
31023 ++
31024 + /* Assume logical EraseSize == physical erasesize for starting the scan.
31025 + We'll sort it out later if we find a MediaHeader which says otherwise */
31026 + /* Actually, we won't. The new DiskOnChip driver has already scanned
31027 +diff -urNp linux-3.1.1/drivers/mtd/ubi/build.c linux-3.1.1/drivers/mtd/ubi/build.c
31028 +--- linux-3.1.1/drivers/mtd/ubi/build.c 2011-11-11 15:19:27.000000000 -0500
31029 ++++ linux-3.1.1/drivers/mtd/ubi/build.c 2011-11-16 18:39:07.000000000 -0500
31030 +@@ -1311,7 +1311,7 @@ module_exit(ubi_exit);
31031 + static int __init bytes_str_to_int(const char *str)
31032 + {
31033 + char *endp;
31034 +- unsigned long result;
31035 ++ unsigned long result, scale = 1;
31036 +
31037 + result = simple_strtoul(str, &endp, 0);
31038 + if (str == endp || result >= INT_MAX) {
31039 +@@ -1322,11 +1322,11 @@ static int __init bytes_str_to_int(const
31040 +
31041 + switch (*endp) {
31042 + case 'G':
31043 +- result *= 1024;
31044 ++ scale *= 1024;
31045 + case 'M':
31046 +- result *= 1024;
31047 ++ scale *= 1024;
31048 + case 'K':
31049 +- result *= 1024;
31050 ++ scale *= 1024;
31051 + if (endp[1] == 'i' && endp[2] == 'B')
31052 + endp += 2;
31053 + case '\0':
31054 +@@ -1337,7 +1337,13 @@ static int __init bytes_str_to_int(const
31055 + return -EINVAL;
31056 + }
31057 +
31058 +- return result;
31059 ++ if ((intoverflow_t)result*scale >= INT_MAX) {
31060 ++ printk(KERN_ERR "UBI error: incorrect bytes count: \"%s\"\n",
31061 ++ str);
31062 ++ return -EINVAL;
31063 ++ }
31064 ++
31065 ++ return result*scale;
31066 + }
31067 +
31068 + /**
31069 +diff -urNp linux-3.1.1/drivers/net/atlx/atl2.c linux-3.1.1/drivers/net/atlx/atl2.c
31070 +--- linux-3.1.1/drivers/net/atlx/atl2.c 2011-11-11 15:19:27.000000000 -0500
31071 ++++ linux-3.1.1/drivers/net/atlx/atl2.c 2011-11-16 18:39:07.000000000 -0500
31072 +@@ -2857,7 +2857,7 @@ static void atl2_force_ps(struct atl2_hw
31073 + */
31074 +
31075 + #define ATL2_PARAM(X, desc) \
31076 +- static const int __devinitdata X[ATL2_MAX_NIC + 1] = ATL2_PARAM_INIT; \
31077 ++ static const int __devinitconst X[ATL2_MAX_NIC + 1] = ATL2_PARAM_INIT; \
31078 + MODULE_PARM(X, "1-" __MODULE_STRING(ATL2_MAX_NIC) "i"); \
31079 + MODULE_PARM_DESC(X, desc);
31080 + #else
31081 +diff -urNp linux-3.1.1/drivers/net/bna/bfa_ioc_ct.c linux-3.1.1/drivers/net/bna/bfa_ioc_ct.c
31082 +--- linux-3.1.1/drivers/net/bna/bfa_ioc_ct.c 2011-11-11 15:19:27.000000000 -0500
31083 ++++ linux-3.1.1/drivers/net/bna/bfa_ioc_ct.c 2011-11-16 18:39:07.000000000 -0500
31084 +@@ -48,7 +48,21 @@ static void bfa_ioc_ct_sync_ack(struct b
31085 + static bool bfa_ioc_ct_sync_complete(struct bfa_ioc *ioc);
31086 + static enum bfa_status bfa_ioc_ct_pll_init(void __iomem *rb, bool fcmode);
31087 +
31088 +-static struct bfa_ioc_hwif nw_hwif_ct;
31089 ++static struct bfa_ioc_hwif nw_hwif_ct = {
31090 ++ .ioc_pll_init = bfa_ioc_ct_pll_init,
31091 ++ .ioc_firmware_lock = bfa_ioc_ct_firmware_lock,
31092 ++ .ioc_firmware_unlock = bfa_ioc_ct_firmware_unlock,
31093 ++ .ioc_reg_init = bfa_ioc_ct_reg_init,
31094 ++ .ioc_map_port = bfa_ioc_ct_map_port,
31095 ++ .ioc_isr_mode_set = bfa_ioc_ct_isr_mode_set,
31096 ++ .ioc_notify_fail = bfa_ioc_ct_notify_fail,
31097 ++ .ioc_ownership_reset = bfa_ioc_ct_ownership_reset,
31098 ++ .ioc_sync_start = bfa_ioc_ct_sync_start,
31099 ++ .ioc_sync_join = bfa_ioc_ct_sync_join,
31100 ++ .ioc_sync_leave = bfa_ioc_ct_sync_leave,
31101 ++ .ioc_sync_ack = bfa_ioc_ct_sync_ack,
31102 ++ .ioc_sync_complete = bfa_ioc_ct_sync_complete
31103 ++};
31104 +
31105 + /**
31106 + * Called from bfa_ioc_attach() to map asic specific calls.
31107 +@@ -56,20 +70,6 @@ static struct bfa_ioc_hwif nw_hwif_ct;
31108 + void
31109 + bfa_nw_ioc_set_ct_hwif(struct bfa_ioc *ioc)
31110 + {
31111 +- nw_hwif_ct.ioc_pll_init = bfa_ioc_ct_pll_init;
31112 +- nw_hwif_ct.ioc_firmware_lock = bfa_ioc_ct_firmware_lock;
31113 +- nw_hwif_ct.ioc_firmware_unlock = bfa_ioc_ct_firmware_unlock;
31114 +- nw_hwif_ct.ioc_reg_init = bfa_ioc_ct_reg_init;
31115 +- nw_hwif_ct.ioc_map_port = bfa_ioc_ct_map_port;
31116 +- nw_hwif_ct.ioc_isr_mode_set = bfa_ioc_ct_isr_mode_set;
31117 +- nw_hwif_ct.ioc_notify_fail = bfa_ioc_ct_notify_fail;
31118 +- nw_hwif_ct.ioc_ownership_reset = bfa_ioc_ct_ownership_reset;
31119 +- nw_hwif_ct.ioc_sync_start = bfa_ioc_ct_sync_start;
31120 +- nw_hwif_ct.ioc_sync_join = bfa_ioc_ct_sync_join;
31121 +- nw_hwif_ct.ioc_sync_leave = bfa_ioc_ct_sync_leave;
31122 +- nw_hwif_ct.ioc_sync_ack = bfa_ioc_ct_sync_ack;
31123 +- nw_hwif_ct.ioc_sync_complete = bfa_ioc_ct_sync_complete;
31124 +-
31125 + ioc->ioc_hwif = &nw_hwif_ct;
31126 + }
31127 +
31128 +diff -urNp linux-3.1.1/drivers/net/bna/bnad.c linux-3.1.1/drivers/net/bna/bnad.c
31129 +--- linux-3.1.1/drivers/net/bna/bnad.c 2011-11-11 15:19:27.000000000 -0500
31130 ++++ linux-3.1.1/drivers/net/bna/bnad.c 2011-11-16 18:39:07.000000000 -0500
31131 +@@ -1673,7 +1673,14 @@ bnad_setup_tx(struct bnad *bnad, uint tx
31132 + struct bna_intr_info *intr_info =
31133 + &res_info[BNA_TX_RES_INTR_T_TXCMPL].res_u.intr_info;
31134 + struct bna_tx_config *tx_config = &bnad->tx_config[tx_id];
31135 +- struct bna_tx_event_cbfn tx_cbfn;
31136 ++ static struct bna_tx_event_cbfn tx_cbfn = {
31137 ++ /* Initialize the tx event handlers */
31138 ++ .tcb_setup_cbfn = bnad_cb_tcb_setup,
31139 ++ .tcb_destroy_cbfn = bnad_cb_tcb_destroy,
31140 ++ .tx_stall_cbfn = bnad_cb_tx_stall,
31141 ++ .tx_resume_cbfn = bnad_cb_tx_resume,
31142 ++ .tx_cleanup_cbfn = bnad_cb_tx_cleanup
31143 ++ };
31144 + struct bna_tx *tx;
31145 + unsigned long flags;
31146 +
31147 +@@ -1682,13 +1689,6 @@ bnad_setup_tx(struct bnad *bnad, uint tx
31148 + tx_config->txq_depth = bnad->txq_depth;
31149 + tx_config->tx_type = BNA_TX_T_REGULAR;
31150 +
31151 +- /* Initialize the tx event handlers */
31152 +- tx_cbfn.tcb_setup_cbfn = bnad_cb_tcb_setup;
31153 +- tx_cbfn.tcb_destroy_cbfn = bnad_cb_tcb_destroy;
31154 +- tx_cbfn.tx_stall_cbfn = bnad_cb_tx_stall;
31155 +- tx_cbfn.tx_resume_cbfn = bnad_cb_tx_resume;
31156 +- tx_cbfn.tx_cleanup_cbfn = bnad_cb_tx_cleanup;
31157 +-
31158 + /* Get BNA's resource requirement for one tx object */
31159 + spin_lock_irqsave(&bnad->bna_lock, flags);
31160 + bna_tx_res_req(bnad->num_txq_per_tx,
31161 +@@ -1819,21 +1819,21 @@ bnad_setup_rx(struct bnad *bnad, uint rx
31162 + struct bna_intr_info *intr_info =
31163 + &res_info[BNA_RX_RES_T_INTR].res_u.intr_info;
31164 + struct bna_rx_config *rx_config = &bnad->rx_config[rx_id];
31165 +- struct bna_rx_event_cbfn rx_cbfn;
31166 ++ static struct bna_rx_event_cbfn rx_cbfn = {
31167 ++ /* Initialize the Rx event handlers */
31168 ++ .rcb_setup_cbfn = bnad_cb_rcb_setup,
31169 ++ .rcb_destroy_cbfn = bnad_cb_rcb_destroy,
31170 ++ .ccb_setup_cbfn = bnad_cb_ccb_setup,
31171 ++ .ccb_destroy_cbfn = bnad_cb_ccb_destroy,
31172 ++ .rx_cleanup_cbfn = bnad_cb_rx_cleanup,
31173 ++ .rx_post_cbfn = bnad_cb_rx_post
31174 ++ };
31175 + struct bna_rx *rx;
31176 + unsigned long flags;
31177 +
31178 + /* Initialize the Rx object configuration */
31179 + bnad_init_rx_config(bnad, rx_config);
31180 +
31181 +- /* Initialize the Rx event handlers */
31182 +- rx_cbfn.rcb_setup_cbfn = bnad_cb_rcb_setup;
31183 +- rx_cbfn.rcb_destroy_cbfn = bnad_cb_rcb_destroy;
31184 +- rx_cbfn.ccb_setup_cbfn = bnad_cb_ccb_setup;
31185 +- rx_cbfn.ccb_destroy_cbfn = bnad_cb_ccb_destroy;
31186 +- rx_cbfn.rx_cleanup_cbfn = bnad_cb_rx_cleanup;
31187 +- rx_cbfn.rx_post_cbfn = bnad_cb_rx_post;
31188 +-
31189 + /* Get BNA's resource requirement for one Rx object */
31190 + spin_lock_irqsave(&bnad->bna_lock, flags);
31191 + bna_rx_res_req(rx_config, res_info);
31192 +diff -urNp linux-3.1.1/drivers/net/bnx2.c linux-3.1.1/drivers/net/bnx2.c
31193 +--- linux-3.1.1/drivers/net/bnx2.c 2011-11-11 15:19:27.000000000 -0500
31194 ++++ linux-3.1.1/drivers/net/bnx2.c 2011-11-16 18:40:11.000000000 -0500
31195 +@@ -5877,6 +5877,8 @@ bnx2_test_nvram(struct bnx2 *bp)
31196 + int rc = 0;
31197 + u32 magic, csum;
31198 +
31199 ++ pax_track_stack();
31200 ++
31201 + if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
31202 + goto test_nvram_done;
31203 +
31204 +diff -urNp linux-3.1.1/drivers/net/bnx2x/bnx2x_ethtool.c linux-3.1.1/drivers/net/bnx2x/bnx2x_ethtool.c
31205 +--- linux-3.1.1/drivers/net/bnx2x/bnx2x_ethtool.c 2011-11-11 15:19:27.000000000 -0500
31206 ++++ linux-3.1.1/drivers/net/bnx2x/bnx2x_ethtool.c 2011-11-16 18:40:11.000000000 -0500
31207 +@@ -1943,6 +1943,8 @@ static int bnx2x_test_nvram(struct bnx2x
31208 + int i, rc;
31209 + u32 magic, crc;
31210 +
31211 ++ pax_track_stack();
31212 ++
31213 + if (BP_NOMCP(bp))
31214 + return 0;
31215 +
31216 +diff -urNp linux-3.1.1/drivers/net/bnx2x/bnx2x_sp.h linux-3.1.1/drivers/net/bnx2x/bnx2x_sp.h
31217 +--- linux-3.1.1/drivers/net/bnx2x/bnx2x_sp.h 2011-11-11 15:19:27.000000000 -0500
31218 ++++ linux-3.1.1/drivers/net/bnx2x/bnx2x_sp.h 2011-11-16 18:39:07.000000000 -0500
31219 +@@ -449,7 +449,7 @@ struct bnx2x_rx_mode_obj {
31220 +
31221 + int (*wait_comp)(struct bnx2x *bp,
31222 + struct bnx2x_rx_mode_ramrod_params *p);
31223 +-};
31224 ++} __no_const;
31225 +
31226 + /********************** Set multicast group ***********************************/
31227 +
31228 +diff -urNp linux-3.1.1/drivers/net/cxgb3/l2t.h linux-3.1.1/drivers/net/cxgb3/l2t.h
31229 +--- linux-3.1.1/drivers/net/cxgb3/l2t.h 2011-11-11 15:19:27.000000000 -0500
31230 ++++ linux-3.1.1/drivers/net/cxgb3/l2t.h 2011-11-16 18:39:07.000000000 -0500
31231 +@@ -87,7 +87,7 @@ typedef void (*arp_failure_handler_func)
31232 + */
31233 + struct l2t_skb_cb {
31234 + arp_failure_handler_func arp_failure_handler;
31235 +-};
31236 ++} __no_const;
31237 +
31238 + #define L2T_SKB_CB(skb) ((struct l2t_skb_cb *)(skb)->cb)
31239 +
31240 +diff -urNp linux-3.1.1/drivers/net/cxgb4/cxgb4_main.c linux-3.1.1/drivers/net/cxgb4/cxgb4_main.c
31241 +--- linux-3.1.1/drivers/net/cxgb4/cxgb4_main.c 2011-11-11 15:19:27.000000000 -0500
31242 ++++ linux-3.1.1/drivers/net/cxgb4/cxgb4_main.c 2011-11-16 18:40:22.000000000 -0500
31243 +@@ -3396,6 +3396,8 @@ static int __devinit enable_msix(struct
31244 + unsigned int nchan = adap->params.nports;
31245 + struct msix_entry entries[MAX_INGQ + 1];
31246 +
31247 ++ pax_track_stack();
31248 ++
31249 + for (i = 0; i < ARRAY_SIZE(entries); ++i)
31250 + entries[i].entry = i;
31251 +
31252 +diff -urNp linux-3.1.1/drivers/net/cxgb4/t4_hw.c linux-3.1.1/drivers/net/cxgb4/t4_hw.c
31253 +--- linux-3.1.1/drivers/net/cxgb4/t4_hw.c 2011-11-11 15:19:27.000000000 -0500
31254 ++++ linux-3.1.1/drivers/net/cxgb4/t4_hw.c 2011-11-16 18:40:22.000000000 -0500
31255 +@@ -362,6 +362,8 @@ static int get_vpd_params(struct adapter
31256 + u8 vpd[VPD_LEN], csum;
31257 + unsigned int vpdr_len, kw_offset, id_len;
31258 +
31259 ++ pax_track_stack();
31260 ++
31261 + ret = pci_read_vpd(adapter->pdev, VPD_BASE, sizeof(vpd), vpd);
31262 + if (ret < 0)
31263 + return ret;
31264 +diff -urNp linux-3.1.1/drivers/net/e1000e/82571.c linux-3.1.1/drivers/net/e1000e/82571.c
31265 +--- linux-3.1.1/drivers/net/e1000e/82571.c 2011-11-11 15:19:27.000000000 -0500
31266 ++++ linux-3.1.1/drivers/net/e1000e/82571.c 2011-11-16 18:39:07.000000000 -0500
31267 +@@ -239,7 +239,7 @@ static s32 e1000_init_mac_params_82571(s
31268 + {
31269 + struct e1000_hw *hw = &adapter->hw;
31270 + struct e1000_mac_info *mac = &hw->mac;
31271 +- struct e1000_mac_operations *func = &mac->ops;
31272 ++ e1000_mac_operations_no_const *func = &mac->ops;
31273 + u32 swsm = 0;
31274 + u32 swsm2 = 0;
31275 + bool force_clear_smbi = false;
31276 +diff -urNp linux-3.1.1/drivers/net/e1000e/es2lan.c linux-3.1.1/drivers/net/e1000e/es2lan.c
31277 +--- linux-3.1.1/drivers/net/e1000e/es2lan.c 2011-11-11 15:19:27.000000000 -0500
31278 ++++ linux-3.1.1/drivers/net/e1000e/es2lan.c 2011-11-16 18:39:07.000000000 -0500
31279 +@@ -205,7 +205,7 @@ static s32 e1000_init_mac_params_80003es
31280 + {
31281 + struct e1000_hw *hw = &adapter->hw;
31282 + struct e1000_mac_info *mac = &hw->mac;
31283 +- struct e1000_mac_operations *func = &mac->ops;
31284 ++ e1000_mac_operations_no_const *func = &mac->ops;
31285 +
31286 + /* Set media type */
31287 + switch (adapter->pdev->device) {
31288 +diff -urNp linux-3.1.1/drivers/net/e1000e/hw.h linux-3.1.1/drivers/net/e1000e/hw.h
31289 +--- linux-3.1.1/drivers/net/e1000e/hw.h 2011-11-11 15:19:27.000000000 -0500
31290 ++++ linux-3.1.1/drivers/net/e1000e/hw.h 2011-11-16 18:39:07.000000000 -0500
31291 +@@ -778,6 +778,7 @@ struct e1000_mac_operations {
31292 + void (*write_vfta)(struct e1000_hw *, u32, u32);
31293 + s32 (*read_mac_addr)(struct e1000_hw *);
31294 + };
31295 ++typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
31296 +
31297 + /*
31298 + * When to use various PHY register access functions:
31299 +@@ -818,6 +819,7 @@ struct e1000_phy_operations {
31300 + void (*power_up)(struct e1000_hw *);
31301 + void (*power_down)(struct e1000_hw *);
31302 + };
31303 ++typedef struct e1000_phy_operations __no_const e1000_phy_operations_no_const;
31304 +
31305 + /* Function pointers for the NVM. */
31306 + struct e1000_nvm_operations {
31307 +@@ -829,9 +831,10 @@ struct e1000_nvm_operations {
31308 + s32 (*validate)(struct e1000_hw *);
31309 + s32 (*write)(struct e1000_hw *, u16, u16, u16 *);
31310 + };
31311 ++typedef struct e1000_nvm_operations __no_const e1000_nvm_operations_no_const;
31312 +
31313 + struct e1000_mac_info {
31314 +- struct e1000_mac_operations ops;
31315 ++ e1000_mac_operations_no_const ops;
31316 + u8 addr[ETH_ALEN];
31317 + u8 perm_addr[ETH_ALEN];
31318 +
31319 +@@ -872,7 +875,7 @@ struct e1000_mac_info {
31320 + };
31321 +
31322 + struct e1000_phy_info {
31323 +- struct e1000_phy_operations ops;
31324 ++ e1000_phy_operations_no_const ops;
31325 +
31326 + enum e1000_phy_type type;
31327 +
31328 +@@ -906,7 +909,7 @@ struct e1000_phy_info {
31329 + };
31330 +
31331 + struct e1000_nvm_info {
31332 +- struct e1000_nvm_operations ops;
31333 ++ e1000_nvm_operations_no_const ops;
31334 +
31335 + enum e1000_nvm_type type;
31336 + enum e1000_nvm_override override;
31337 +diff -urNp linux-3.1.1/drivers/net/fealnx.c linux-3.1.1/drivers/net/fealnx.c
31338 +--- linux-3.1.1/drivers/net/fealnx.c 2011-11-11 15:19:27.000000000 -0500
31339 ++++ linux-3.1.1/drivers/net/fealnx.c 2011-11-16 18:39:07.000000000 -0500
31340 +@@ -150,7 +150,7 @@ struct chip_info {
31341 + int flags;
31342 + };
31343 +
31344 +-static const struct chip_info skel_netdrv_tbl[] __devinitdata = {
31345 ++static const struct chip_info skel_netdrv_tbl[] __devinitconst = {
31346 + { "100/10M Ethernet PCI Adapter", HAS_MII_XCVR },
31347 + { "100/10M Ethernet PCI Adapter", HAS_CHIP_XCVR },
31348 + { "1000/100/10M Ethernet PCI Adapter", HAS_MII_XCVR },
31349 +diff -urNp linux-3.1.1/drivers/net/hamradio/6pack.c linux-3.1.1/drivers/net/hamradio/6pack.c
31350 +--- linux-3.1.1/drivers/net/hamradio/6pack.c 2011-11-11 15:19:27.000000000 -0500
31351 ++++ linux-3.1.1/drivers/net/hamradio/6pack.c 2011-11-16 18:40:22.000000000 -0500
31352 +@@ -463,6 +463,8 @@ static void sixpack_receive_buf(struct t
31353 + unsigned char buf[512];
31354 + int count1;
31355 +
31356 ++ pax_track_stack();
31357 ++
31358 + if (!count)
31359 + return;
31360 +
31361 +diff -urNp linux-3.1.1/drivers/net/igb/e1000_hw.h linux-3.1.1/drivers/net/igb/e1000_hw.h
31362 +--- linux-3.1.1/drivers/net/igb/e1000_hw.h 2011-11-11 15:19:27.000000000 -0500
31363 ++++ linux-3.1.1/drivers/net/igb/e1000_hw.h 2011-11-16 18:39:07.000000000 -0500
31364 +@@ -314,6 +314,7 @@ struct e1000_mac_operations {
31365 + s32 (*read_mac_addr)(struct e1000_hw *);
31366 + s32 (*get_speed_and_duplex)(struct e1000_hw *, u16 *, u16 *);
31367 + };
31368 ++typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
31369 +
31370 + struct e1000_phy_operations {
31371 + s32 (*acquire)(struct e1000_hw *);
31372 +@@ -330,6 +331,7 @@ struct e1000_phy_operations {
31373 + s32 (*set_d3_lplu_state)(struct e1000_hw *, bool);
31374 + s32 (*write_reg)(struct e1000_hw *, u32, u16);
31375 + };
31376 ++typedef struct e1000_phy_operations __no_const e1000_phy_operations_no_const;
31377 +
31378 + struct e1000_nvm_operations {
31379 + s32 (*acquire)(struct e1000_hw *);
31380 +@@ -339,6 +341,7 @@ struct e1000_nvm_operations {
31381 + s32 (*update)(struct e1000_hw *);
31382 + s32 (*validate)(struct e1000_hw *);
31383 + };
31384 ++typedef struct e1000_nvm_operations __no_const e1000_nvm_operations_no_const;
31385 +
31386 + struct e1000_info {
31387 + s32 (*get_invariants)(struct e1000_hw *);
31388 +@@ -350,7 +353,7 @@ struct e1000_info {
31389 + extern const struct e1000_info e1000_82575_info;
31390 +
31391 + struct e1000_mac_info {
31392 +- struct e1000_mac_operations ops;
31393 ++ e1000_mac_operations_no_const ops;
31394 +
31395 + u8 addr[6];
31396 + u8 perm_addr[6];
31397 +@@ -388,7 +391,7 @@ struct e1000_mac_info {
31398 + };
31399 +
31400 + struct e1000_phy_info {
31401 +- struct e1000_phy_operations ops;
31402 ++ e1000_phy_operations_no_const ops;
31403 +
31404 + enum e1000_phy_type type;
31405 +
31406 +@@ -423,7 +426,7 @@ struct e1000_phy_info {
31407 + };
31408 +
31409 + struct e1000_nvm_info {
31410 +- struct e1000_nvm_operations ops;
31411 ++ e1000_nvm_operations_no_const ops;
31412 + enum e1000_nvm_type type;
31413 + enum e1000_nvm_override override;
31414 +
31415 +@@ -468,6 +471,7 @@ struct e1000_mbx_operations {
31416 + s32 (*check_for_ack)(struct e1000_hw *, u16);
31417 + s32 (*check_for_rst)(struct e1000_hw *, u16);
31418 + };
31419 ++typedef struct e1000_mbx_operations __no_const e1000_mbx_operations_no_const;
31420 +
31421 + struct e1000_mbx_stats {
31422 + u32 msgs_tx;
31423 +@@ -479,7 +483,7 @@ struct e1000_mbx_stats {
31424 + };
31425 +
31426 + struct e1000_mbx_info {
31427 +- struct e1000_mbx_operations ops;
31428 ++ e1000_mbx_operations_no_const ops;
31429 + struct e1000_mbx_stats stats;
31430 + u32 timeout;
31431 + u32 usec_delay;
31432 +diff -urNp linux-3.1.1/drivers/net/igbvf/vf.h linux-3.1.1/drivers/net/igbvf/vf.h
31433 +--- linux-3.1.1/drivers/net/igbvf/vf.h 2011-11-11 15:19:27.000000000 -0500
31434 ++++ linux-3.1.1/drivers/net/igbvf/vf.h 2011-11-16 18:39:07.000000000 -0500
31435 +@@ -189,9 +189,10 @@ struct e1000_mac_operations {
31436 + s32 (*read_mac_addr)(struct e1000_hw *);
31437 + s32 (*set_vfta)(struct e1000_hw *, u16, bool);
31438 + };
31439 ++typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
31440 +
31441 + struct e1000_mac_info {
31442 +- struct e1000_mac_operations ops;
31443 ++ e1000_mac_operations_no_const ops;
31444 + u8 addr[6];
31445 + u8 perm_addr[6];
31446 +
31447 +@@ -213,6 +214,7 @@ struct e1000_mbx_operations {
31448 + s32 (*check_for_ack)(struct e1000_hw *);
31449 + s32 (*check_for_rst)(struct e1000_hw *);
31450 + };
31451 ++typedef struct e1000_mbx_operations __no_const e1000_mbx_operations_no_const;
31452 +
31453 + struct e1000_mbx_stats {
31454 + u32 msgs_tx;
31455 +@@ -224,7 +226,7 @@ struct e1000_mbx_stats {
31456 + };
31457 +
31458 + struct e1000_mbx_info {
31459 +- struct e1000_mbx_operations ops;
31460 ++ e1000_mbx_operations_no_const ops;
31461 + struct e1000_mbx_stats stats;
31462 + u32 timeout;
31463 + u32 usec_delay;
31464 +diff -urNp linux-3.1.1/drivers/net/ixgb/ixgb_main.c linux-3.1.1/drivers/net/ixgb/ixgb_main.c
31465 +--- linux-3.1.1/drivers/net/ixgb/ixgb_main.c 2011-11-11 15:19:27.000000000 -0500
31466 ++++ linux-3.1.1/drivers/net/ixgb/ixgb_main.c 2011-11-16 18:40:22.000000000 -0500
31467 +@@ -1070,6 +1070,8 @@ ixgb_set_multi(struct net_device *netdev
31468 + u32 rctl;
31469 + int i;
31470 +
31471 ++ pax_track_stack();
31472 ++
31473 + /* Check for Promiscuous and All Multicast modes */
31474 +
31475 + rctl = IXGB_READ_REG(hw, RCTL);
31476 +diff -urNp linux-3.1.1/drivers/net/ixgb/ixgb_param.c linux-3.1.1/drivers/net/ixgb/ixgb_param.c
31477 +--- linux-3.1.1/drivers/net/ixgb/ixgb_param.c 2011-11-11 15:19:27.000000000 -0500
31478 ++++ linux-3.1.1/drivers/net/ixgb/ixgb_param.c 2011-11-16 18:40:22.000000000 -0500
31479 +@@ -261,6 +261,9 @@ void __devinit
31480 + ixgb_check_options(struct ixgb_adapter *adapter)
31481 + {
31482 + int bd = adapter->bd_number;
31483 ++
31484 ++ pax_track_stack();
31485 ++
31486 + if (bd >= IXGB_MAX_NIC) {
31487 + pr_notice("Warning: no configuration for board #%i\n", bd);
31488 + pr_notice("Using defaults for all values\n");
31489 +diff -urNp linux-3.1.1/drivers/net/ixgbe/ixgbe_type.h linux-3.1.1/drivers/net/ixgbe/ixgbe_type.h
31490 +--- linux-3.1.1/drivers/net/ixgbe/ixgbe_type.h 2011-11-11 15:19:27.000000000 -0500
31491 ++++ linux-3.1.1/drivers/net/ixgbe/ixgbe_type.h 2011-11-16 18:39:07.000000000 -0500
31492 +@@ -2642,6 +2642,7 @@ struct ixgbe_eeprom_operations {
31493 + s32 (*update_checksum)(struct ixgbe_hw *);
31494 + u16 (*calc_checksum)(struct ixgbe_hw *);
31495 + };
31496 ++typedef struct ixgbe_eeprom_operations __no_const ixgbe_eeprom_operations_no_const;
31497 +
31498 + struct ixgbe_mac_operations {
31499 + s32 (*init_hw)(struct ixgbe_hw *);
31500 +@@ -2703,6 +2704,7 @@ struct ixgbe_mac_operations {
31501 + /* Manageability interface */
31502 + s32 (*set_fw_drv_ver)(struct ixgbe_hw *, u8, u8, u8, u8);
31503 + };
31504 ++typedef struct ixgbe_mac_operations __no_const ixgbe_mac_operations_no_const;
31505 +
31506 + struct ixgbe_phy_operations {
31507 + s32 (*identify)(struct ixgbe_hw *);
31508 +@@ -2722,9 +2724,10 @@ struct ixgbe_phy_operations {
31509 + s32 (*write_i2c_eeprom)(struct ixgbe_hw *, u8, u8);
31510 + s32 (*check_overtemp)(struct ixgbe_hw *);
31511 + };
31512 ++typedef struct ixgbe_phy_operations __no_const ixgbe_phy_operations_no_const;
31513 +
31514 + struct ixgbe_eeprom_info {
31515 +- struct ixgbe_eeprom_operations ops;
31516 ++ ixgbe_eeprom_operations_no_const ops;
31517 + enum ixgbe_eeprom_type type;
31518 + u32 semaphore_delay;
31519 + u16 word_size;
31520 +@@ -2734,7 +2737,7 @@ struct ixgbe_eeprom_info {
31521 +
31522 + #define IXGBE_FLAGS_DOUBLE_RESET_REQUIRED 0x01
31523 + struct ixgbe_mac_info {
31524 +- struct ixgbe_mac_operations ops;
31525 ++ ixgbe_mac_operations_no_const ops;
31526 + enum ixgbe_mac_type type;
31527 + u8 addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
31528 + u8 perm_addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
31529 +@@ -2762,7 +2765,7 @@ struct ixgbe_mac_info {
31530 + };
31531 +
31532 + struct ixgbe_phy_info {
31533 +- struct ixgbe_phy_operations ops;
31534 ++ ixgbe_phy_operations_no_const ops;
31535 + struct mdio_if_info mdio;
31536 + enum ixgbe_phy_type type;
31537 + u32 id;
31538 +@@ -2790,6 +2793,7 @@ struct ixgbe_mbx_operations {
31539 + s32 (*check_for_ack)(struct ixgbe_hw *, u16);
31540 + s32 (*check_for_rst)(struct ixgbe_hw *, u16);
31541 + };
31542 ++typedef struct ixgbe_mbx_operations __no_const ixgbe_mbx_operations_no_const;
31543 +
31544 + struct ixgbe_mbx_stats {
31545 + u32 msgs_tx;
31546 +@@ -2801,7 +2805,7 @@ struct ixgbe_mbx_stats {
31547 + };
31548 +
31549 + struct ixgbe_mbx_info {
31550 +- struct ixgbe_mbx_operations ops;
31551 ++ ixgbe_mbx_operations_no_const ops;
31552 + struct ixgbe_mbx_stats stats;
31553 + u32 timeout;
31554 + u32 usec_delay;
31555 +diff -urNp linux-3.1.1/drivers/net/ixgbevf/vf.h linux-3.1.1/drivers/net/ixgbevf/vf.h
31556 +--- linux-3.1.1/drivers/net/ixgbevf/vf.h 2011-11-11 15:19:27.000000000 -0500
31557 ++++ linux-3.1.1/drivers/net/ixgbevf/vf.h 2011-11-16 18:39:07.000000000 -0500
31558 +@@ -70,6 +70,7 @@ struct ixgbe_mac_operations {
31559 + s32 (*clear_vfta)(struct ixgbe_hw *);
31560 + s32 (*set_vfta)(struct ixgbe_hw *, u32, u32, bool);
31561 + };
31562 ++typedef struct ixgbe_mac_operations __no_const ixgbe_mac_operations_no_const;
31563 +
31564 + enum ixgbe_mac_type {
31565 + ixgbe_mac_unknown = 0,
31566 +@@ -79,7 +80,7 @@ enum ixgbe_mac_type {
31567 + };
31568 +
31569 + struct ixgbe_mac_info {
31570 +- struct ixgbe_mac_operations ops;
31571 ++ ixgbe_mac_operations_no_const ops;
31572 + u8 addr[6];
31573 + u8 perm_addr[6];
31574 +
31575 +@@ -103,6 +104,7 @@ struct ixgbe_mbx_operations {
31576 + s32 (*check_for_ack)(struct ixgbe_hw *);
31577 + s32 (*check_for_rst)(struct ixgbe_hw *);
31578 + };
31579 ++typedef struct ixgbe_mbx_operations __no_const ixgbe_mbx_operations_no_const;
31580 +
31581 + struct ixgbe_mbx_stats {
31582 + u32 msgs_tx;
31583 +@@ -114,7 +116,7 @@ struct ixgbe_mbx_stats {
31584 + };
31585 +
31586 + struct ixgbe_mbx_info {
31587 +- struct ixgbe_mbx_operations ops;
31588 ++ ixgbe_mbx_operations_no_const ops;
31589 + struct ixgbe_mbx_stats stats;
31590 + u32 timeout;
31591 + u32 udelay;
31592 +diff -urNp linux-3.1.1/drivers/net/ksz884x.c linux-3.1.1/drivers/net/ksz884x.c
31593 +--- linux-3.1.1/drivers/net/ksz884x.c 2011-11-11 15:19:27.000000000 -0500
31594 ++++ linux-3.1.1/drivers/net/ksz884x.c 2011-11-16 18:40:22.000000000 -0500
31595 +@@ -6533,6 +6533,8 @@ static void netdev_get_ethtool_stats(str
31596 + int rc;
31597 + u64 counter[TOTAL_PORT_COUNTER_NUM];
31598 +
31599 ++ pax_track_stack();
31600 ++
31601 + mutex_lock(&hw_priv->lock);
31602 + n = SWITCH_PORT_NUM;
31603 + for (i = 0, p = port->first_port; i < port->mib_port_cnt; i++, p++) {
31604 +diff -urNp linux-3.1.1/drivers/net/mlx4/main.c linux-3.1.1/drivers/net/mlx4/main.c
31605 +--- linux-3.1.1/drivers/net/mlx4/main.c 2011-11-11 15:19:27.000000000 -0500
31606 ++++ linux-3.1.1/drivers/net/mlx4/main.c 2011-11-16 18:40:22.000000000 -0500
31607 +@@ -40,6 +40,7 @@
31608 + #include <linux/dma-mapping.h>
31609 + #include <linux/slab.h>
31610 + #include <linux/io-mapping.h>
31611 ++#include <linux/sched.h>
31612 +
31613 + #include <linux/mlx4/device.h>
31614 + #include <linux/mlx4/doorbell.h>
31615 +@@ -762,6 +763,8 @@ static int mlx4_init_hca(struct mlx4_dev
31616 + u64 icm_size;
31617 + int err;
31618 +
31619 ++ pax_track_stack();
31620 ++
31621 + err = mlx4_QUERY_FW(dev);
31622 + if (err) {
31623 + if (err == -EACCES)
31624 +diff -urNp linux-3.1.1/drivers/net/niu.c linux-3.1.1/drivers/net/niu.c
31625 +--- linux-3.1.1/drivers/net/niu.c 2011-11-11 15:19:27.000000000 -0500
31626 ++++ linux-3.1.1/drivers/net/niu.c 2011-11-16 18:40:22.000000000 -0500
31627 +@@ -9061,6 +9061,8 @@ static void __devinit niu_try_msix(struc
31628 + int i, num_irqs, err;
31629 + u8 first_ldg;
31630 +
31631 ++ pax_track_stack();
31632 ++
31633 + first_ldg = (NIU_NUM_LDG / parent->num_ports) * np->port;
31634 + for (i = 0; i < (NIU_NUM_LDG / parent->num_ports); i++)
31635 + ldg_num_map[i] = first_ldg + i;
31636 +diff -urNp linux-3.1.1/drivers/net/pcnet32.c linux-3.1.1/drivers/net/pcnet32.c
31637 +--- linux-3.1.1/drivers/net/pcnet32.c 2011-11-11 15:19:27.000000000 -0500
31638 ++++ linux-3.1.1/drivers/net/pcnet32.c 2011-11-16 18:39:07.000000000 -0500
31639 +@@ -270,7 +270,7 @@ struct pcnet32_private {
31640 + struct sk_buff **rx_skbuff;
31641 + dma_addr_t *tx_dma_addr;
31642 + dma_addr_t *rx_dma_addr;
31643 +- struct pcnet32_access a;
31644 ++ struct pcnet32_access *a;
31645 + spinlock_t lock; /* Guard lock */
31646 + unsigned int cur_rx, cur_tx; /* The next free ring entry */
31647 + unsigned int rx_ring_size; /* current rx ring size */
31648 +@@ -460,9 +460,9 @@ static void pcnet32_netif_start(struct n
31649 + u16 val;
31650 +
31651 + netif_wake_queue(dev);
31652 +- val = lp->a.read_csr(ioaddr, CSR3);
31653 ++ val = lp->a->read_csr(ioaddr, CSR3);
31654 + val &= 0x00ff;
31655 +- lp->a.write_csr(ioaddr, CSR3, val);
31656 ++ lp->a->write_csr(ioaddr, CSR3, val);
31657 + napi_enable(&lp->napi);
31658 + }
31659 +
31660 +@@ -730,7 +730,7 @@ static u32 pcnet32_get_link(struct net_d
31661 + r = mii_link_ok(&lp->mii_if);
31662 + } else if (lp->chip_version >= PCNET32_79C970A) {
31663 + ulong ioaddr = dev->base_addr; /* card base I/O address */
31664 +- r = (lp->a.read_bcr(ioaddr, 4) != 0xc0);
31665 ++ r = (lp->a->read_bcr(ioaddr, 4) != 0xc0);
31666 + } else { /* can not detect link on really old chips */
31667 + r = 1;
31668 + }
31669 +@@ -792,7 +792,7 @@ static int pcnet32_set_ringparam(struct
31670 + pcnet32_netif_stop(dev);
31671 +
31672 + spin_lock_irqsave(&lp->lock, flags);
31673 +- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
31674 ++ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
31675 +
31676 + size = min(ering->tx_pending, (unsigned int)TX_MAX_RING_SIZE);
31677 +
31678 +@@ -868,7 +868,7 @@ static void pcnet32_ethtool_test(struct
31679 + static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
31680 + {
31681 + struct pcnet32_private *lp = netdev_priv(dev);
31682 +- struct pcnet32_access *a = &lp->a; /* access to registers */
31683 ++ struct pcnet32_access *a = lp->a; /* access to registers */
31684 + ulong ioaddr = dev->base_addr; /* card base I/O address */
31685 + struct sk_buff *skb; /* sk buff */
31686 + int x, i; /* counters */
31687 +@@ -888,21 +888,21 @@ static int pcnet32_loopback_test(struct
31688 + pcnet32_netif_stop(dev);
31689 +
31690 + spin_lock_irqsave(&lp->lock, flags);
31691 +- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
31692 ++ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
31693 +
31694 + numbuffs = min(numbuffs, (int)min(lp->rx_ring_size, lp->tx_ring_size));
31695 +
31696 + /* Reset the PCNET32 */
31697 +- lp->a.reset(ioaddr);
31698 +- lp->a.write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
31699 ++ lp->a->reset(ioaddr);
31700 ++ lp->a->write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
31701 +
31702 + /* switch pcnet32 to 32bit mode */
31703 +- lp->a.write_bcr(ioaddr, 20, 2);
31704 ++ lp->a->write_bcr(ioaddr, 20, 2);
31705 +
31706 + /* purge & init rings but don't actually restart */
31707 + pcnet32_restart(dev, 0x0000);
31708 +
31709 +- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
31710 ++ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
31711 +
31712 + /* Initialize Transmit buffers. */
31713 + size = data_len + 15;
31714 +@@ -947,10 +947,10 @@ static int pcnet32_loopback_test(struct
31715 +
31716 + /* set int loopback in CSR15 */
31717 + x = a->read_csr(ioaddr, CSR15) & 0xfffc;
31718 +- lp->a.write_csr(ioaddr, CSR15, x | 0x0044);
31719 ++ lp->a->write_csr(ioaddr, CSR15, x | 0x0044);
31720 +
31721 + teststatus = cpu_to_le16(0x8000);
31722 +- lp->a.write_csr(ioaddr, CSR0, CSR0_START); /* Set STRT bit */
31723 ++ lp->a->write_csr(ioaddr, CSR0, CSR0_START); /* Set STRT bit */
31724 +
31725 + /* Check status of descriptors */
31726 + for (x = 0; x < numbuffs; x++) {
31727 +@@ -969,7 +969,7 @@ static int pcnet32_loopback_test(struct
31728 + }
31729 + }
31730 +
31731 +- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
31732 ++ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
31733 + wmb();
31734 + if (netif_msg_hw(lp) && netif_msg_pktdata(lp)) {
31735 + netdev_printk(KERN_DEBUG, dev, "RX loopback packets:\n");
31736 +@@ -1015,7 +1015,7 @@ clean_up:
31737 + pcnet32_restart(dev, CSR0_NORMAL);
31738 + } else {
31739 + pcnet32_purge_rx_ring(dev);
31740 +- lp->a.write_bcr(ioaddr, 20, 4); /* return to 16bit mode */
31741 ++ lp->a->write_bcr(ioaddr, 20, 4); /* return to 16bit mode */
31742 + }
31743 + spin_unlock_irqrestore(&lp->lock, flags);
31744 +
31745 +@@ -1026,7 +1026,7 @@ static int pcnet32_set_phys_id(struct ne
31746 + enum ethtool_phys_id_state state)
31747 + {
31748 + struct pcnet32_private *lp = netdev_priv(dev);
31749 +- struct pcnet32_access *a = &lp->a;
31750 ++ struct pcnet32_access *a = lp->a;
31751 + ulong ioaddr = dev->base_addr;
31752 + unsigned long flags;
31753 + int i;
31754 +@@ -1067,7 +1067,7 @@ static int pcnet32_suspend(struct net_de
31755 + {
31756 + int csr5;
31757 + struct pcnet32_private *lp = netdev_priv(dev);
31758 +- struct pcnet32_access *a = &lp->a;
31759 ++ struct pcnet32_access *a = lp->a;
31760 + ulong ioaddr = dev->base_addr;
31761 + int ticks;
31762 +
31763 +@@ -1324,8 +1324,8 @@ static int pcnet32_poll(struct napi_stru
31764 + spin_lock_irqsave(&lp->lock, flags);
31765 + if (pcnet32_tx(dev)) {
31766 + /* reset the chip to clear the error condition, then restart */
31767 +- lp->a.reset(ioaddr);
31768 +- lp->a.write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
31769 ++ lp->a->reset(ioaddr);
31770 ++ lp->a->write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
31771 + pcnet32_restart(dev, CSR0_START);
31772 + netif_wake_queue(dev);
31773 + }
31774 +@@ -1337,12 +1337,12 @@ static int pcnet32_poll(struct napi_stru
31775 + __napi_complete(napi);
31776 +
31777 + /* clear interrupt masks */
31778 +- val = lp->a.read_csr(ioaddr, CSR3);
31779 ++ val = lp->a->read_csr(ioaddr, CSR3);
31780 + val &= 0x00ff;
31781 +- lp->a.write_csr(ioaddr, CSR3, val);
31782 ++ lp->a->write_csr(ioaddr, CSR3, val);
31783 +
31784 + /* Set interrupt enable. */
31785 +- lp->a.write_csr(ioaddr, CSR0, CSR0_INTEN);
31786 ++ lp->a->write_csr(ioaddr, CSR0, CSR0_INTEN);
31787 +
31788 + spin_unlock_irqrestore(&lp->lock, flags);
31789 + }
31790 +@@ -1365,7 +1365,7 @@ static void pcnet32_get_regs(struct net_
31791 + int i, csr0;
31792 + u16 *buff = ptr;
31793 + struct pcnet32_private *lp = netdev_priv(dev);
31794 +- struct pcnet32_access *a = &lp->a;
31795 ++ struct pcnet32_access *a = lp->a;
31796 + ulong ioaddr = dev->base_addr;
31797 + unsigned long flags;
31798 +
31799 +@@ -1401,9 +1401,9 @@ static void pcnet32_get_regs(struct net_
31800 + for (j = 0; j < PCNET32_MAX_PHYS; j++) {
31801 + if (lp->phymask & (1 << j)) {
31802 + for (i = 0; i < PCNET32_REGS_PER_PHY; i++) {
31803 +- lp->a.write_bcr(ioaddr, 33,
31804 ++ lp->a->write_bcr(ioaddr, 33,
31805 + (j << 5) | i);
31806 +- *buff++ = lp->a.read_bcr(ioaddr, 34);
31807 ++ *buff++ = lp->a->read_bcr(ioaddr, 34);
31808 + }
31809 + }
31810 + }
31811 +@@ -1785,7 +1785,7 @@ pcnet32_probe1(unsigned long ioaddr, int
31812 + ((cards_found >= MAX_UNITS) || full_duplex[cards_found]))
31813 + lp->options |= PCNET32_PORT_FD;
31814 +
31815 +- lp->a = *a;
31816 ++ lp->a = a;
31817 +
31818 + /* prior to register_netdev, dev->name is not yet correct */
31819 + if (pcnet32_alloc_ring(dev, pci_name(lp->pci_dev))) {
31820 +@@ -1844,7 +1844,7 @@ pcnet32_probe1(unsigned long ioaddr, int
31821 + if (lp->mii) {
31822 + /* lp->phycount and lp->phymask are set to 0 by memset above */
31823 +
31824 +- lp->mii_if.phy_id = ((lp->a.read_bcr(ioaddr, 33)) >> 5) & 0x1f;
31825 ++ lp->mii_if.phy_id = ((lp->a->read_bcr(ioaddr, 33)) >> 5) & 0x1f;
31826 + /* scan for PHYs */
31827 + for (i = 0; i < PCNET32_MAX_PHYS; i++) {
31828 + unsigned short id1, id2;
31829 +@@ -1864,7 +1864,7 @@ pcnet32_probe1(unsigned long ioaddr, int
31830 + pr_info("Found PHY %04x:%04x at address %d\n",
31831 + id1, id2, i);
31832 + }
31833 +- lp->a.write_bcr(ioaddr, 33, (lp->mii_if.phy_id) << 5);
31834 ++ lp->a->write_bcr(ioaddr, 33, (lp->mii_if.phy_id) << 5);
31835 + if (lp->phycount > 1)
31836 + lp->options |= PCNET32_PORT_MII;
31837 + }
31838 +@@ -2020,10 +2020,10 @@ static int pcnet32_open(struct net_devic
31839 + }
31840 +
31841 + /* Reset the PCNET32 */
31842 +- lp->a.reset(ioaddr);
31843 ++ lp->a->reset(ioaddr);
31844 +
31845 + /* switch pcnet32 to 32bit mode */
31846 +- lp->a.write_bcr(ioaddr, 20, 2);
31847 ++ lp->a->write_bcr(ioaddr, 20, 2);
31848 +
31849 + netif_printk(lp, ifup, KERN_DEBUG, dev,
31850 + "%s() irq %d tx/rx rings %#x/%#x init %#x\n",
31851 +@@ -2032,14 +2032,14 @@ static int pcnet32_open(struct net_devic
31852 + (u32) (lp->init_dma_addr));
31853 +
31854 + /* set/reset autoselect bit */
31855 +- val = lp->a.read_bcr(ioaddr, 2) & ~2;
31856 ++ val = lp->a->read_bcr(ioaddr, 2) & ~2;
31857 + if (lp->options & PCNET32_PORT_ASEL)
31858 + val |= 2;
31859 +- lp->a.write_bcr(ioaddr, 2, val);
31860 ++ lp->a->write_bcr(ioaddr, 2, val);
31861 +
31862 + /* handle full duplex setting */
31863 + if (lp->mii_if.full_duplex) {
31864 +- val = lp->a.read_bcr(ioaddr, 9) & ~3;
31865 ++ val = lp->a->read_bcr(ioaddr, 9) & ~3;
31866 + if (lp->options & PCNET32_PORT_FD) {
31867 + val |= 1;
31868 + if (lp->options == (PCNET32_PORT_FD | PCNET32_PORT_AUI))
31869 +@@ -2049,14 +2049,14 @@ static int pcnet32_open(struct net_devic
31870 + if (lp->chip_version == 0x2627)
31871 + val |= 3;
31872 + }
31873 +- lp->a.write_bcr(ioaddr, 9, val);
31874 ++ lp->a->write_bcr(ioaddr, 9, val);
31875 + }
31876 +
31877 + /* set/reset GPSI bit in test register */
31878 +- val = lp->a.read_csr(ioaddr, 124) & ~0x10;
31879 ++ val = lp->a->read_csr(ioaddr, 124) & ~0x10;
31880 + if ((lp->options & PCNET32_PORT_PORTSEL) == PCNET32_PORT_GPSI)
31881 + val |= 0x10;
31882 +- lp->a.write_csr(ioaddr, 124, val);
31883 ++ lp->a->write_csr(ioaddr, 124, val);
31884 +
31885 + /* Allied Telesyn AT 2700/2701 FX are 100Mbit only and do not negotiate */
31886 + if (pdev && pdev->subsystem_vendor == PCI_VENDOR_ID_AT &&
31887 +@@ -2075,24 +2075,24 @@ static int pcnet32_open(struct net_devic
31888 + * duplex, and/or enable auto negotiation, and clear DANAS
31889 + */
31890 + if (lp->mii && !(lp->options & PCNET32_PORT_ASEL)) {
31891 +- lp->a.write_bcr(ioaddr, 32,
31892 +- lp->a.read_bcr(ioaddr, 32) | 0x0080);
31893 ++ lp->a->write_bcr(ioaddr, 32,
31894 ++ lp->a->read_bcr(ioaddr, 32) | 0x0080);
31895 + /* disable Auto Negotiation, set 10Mpbs, HD */
31896 +- val = lp->a.read_bcr(ioaddr, 32) & ~0xb8;
31897 ++ val = lp->a->read_bcr(ioaddr, 32) & ~0xb8;
31898 + if (lp->options & PCNET32_PORT_FD)
31899 + val |= 0x10;
31900 + if (lp->options & PCNET32_PORT_100)
31901 + val |= 0x08;
31902 +- lp->a.write_bcr(ioaddr, 32, val);
31903 ++ lp->a->write_bcr(ioaddr, 32, val);
31904 + } else {
31905 + if (lp->options & PCNET32_PORT_ASEL) {
31906 +- lp->a.write_bcr(ioaddr, 32,
31907 +- lp->a.read_bcr(ioaddr,
31908 ++ lp->a->write_bcr(ioaddr, 32,
31909 ++ lp->a->read_bcr(ioaddr,
31910 + 32) | 0x0080);
31911 + /* enable auto negotiate, setup, disable fd */
31912 +- val = lp->a.read_bcr(ioaddr, 32) & ~0x98;
31913 ++ val = lp->a->read_bcr(ioaddr, 32) & ~0x98;
31914 + val |= 0x20;
31915 +- lp->a.write_bcr(ioaddr, 32, val);
31916 ++ lp->a->write_bcr(ioaddr, 32, val);
31917 + }
31918 + }
31919 + } else {
31920 +@@ -2105,10 +2105,10 @@ static int pcnet32_open(struct net_devic
31921 + * There is really no good other way to handle multiple PHYs
31922 + * other than turning off all automatics
31923 + */
31924 +- val = lp->a.read_bcr(ioaddr, 2);
31925 +- lp->a.write_bcr(ioaddr, 2, val & ~2);
31926 +- val = lp->a.read_bcr(ioaddr, 32);
31927 +- lp->a.write_bcr(ioaddr, 32, val & ~(1 << 7)); /* stop MII manager */
31928 ++ val = lp->a->read_bcr(ioaddr, 2);
31929 ++ lp->a->write_bcr(ioaddr, 2, val & ~2);
31930 ++ val = lp->a->read_bcr(ioaddr, 32);
31931 ++ lp->a->write_bcr(ioaddr, 32, val & ~(1 << 7)); /* stop MII manager */
31932 +
31933 + if (!(lp->options & PCNET32_PORT_ASEL)) {
31934 + /* setup ecmd */
31935 +@@ -2118,7 +2118,7 @@ static int pcnet32_open(struct net_devic
31936 + ethtool_cmd_speed_set(&ecmd,
31937 + (lp->options & PCNET32_PORT_100) ?
31938 + SPEED_100 : SPEED_10);
31939 +- bcr9 = lp->a.read_bcr(ioaddr, 9);
31940 ++ bcr9 = lp->a->read_bcr(ioaddr, 9);
31941 +
31942 + if (lp->options & PCNET32_PORT_FD) {
31943 + ecmd.duplex = DUPLEX_FULL;
31944 +@@ -2127,7 +2127,7 @@ static int pcnet32_open(struct net_devic
31945 + ecmd.duplex = DUPLEX_HALF;
31946 + bcr9 |= ~(1 << 0);
31947 + }
31948 +- lp->a.write_bcr(ioaddr, 9, bcr9);
31949 ++ lp->a->write_bcr(ioaddr, 9, bcr9);
31950 + }
31951 +
31952 + for (i = 0; i < PCNET32_MAX_PHYS; i++) {
31953 +@@ -2158,9 +2158,9 @@ static int pcnet32_open(struct net_devic
31954 +
31955 + #ifdef DO_DXSUFLO
31956 + if (lp->dxsuflo) { /* Disable transmit stop on underflow */
31957 +- val = lp->a.read_csr(ioaddr, CSR3);
31958 ++ val = lp->a->read_csr(ioaddr, CSR3);
31959 + val |= 0x40;
31960 +- lp->a.write_csr(ioaddr, CSR3, val);
31961 ++ lp->a->write_csr(ioaddr, CSR3, val);
31962 + }
31963 + #endif
31964 +
31965 +@@ -2176,11 +2176,11 @@ static int pcnet32_open(struct net_devic
31966 + napi_enable(&lp->napi);
31967 +
31968 + /* Re-initialize the PCNET32, and start it when done. */
31969 +- lp->a.write_csr(ioaddr, 1, (lp->init_dma_addr & 0xffff));
31970 +- lp->a.write_csr(ioaddr, 2, (lp->init_dma_addr >> 16));
31971 ++ lp->a->write_csr(ioaddr, 1, (lp->init_dma_addr & 0xffff));
31972 ++ lp->a->write_csr(ioaddr, 2, (lp->init_dma_addr >> 16));
31973 +
31974 +- lp->a.write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
31975 +- lp->a.write_csr(ioaddr, CSR0, CSR0_INIT);
31976 ++ lp->a->write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
31977 ++ lp->a->write_csr(ioaddr, CSR0, CSR0_INIT);
31978 +
31979 + netif_start_queue(dev);
31980 +
31981 +@@ -2192,19 +2192,19 @@ static int pcnet32_open(struct net_devic
31982 +
31983 + i = 0;
31984 + while (i++ < 100)
31985 +- if (lp->a.read_csr(ioaddr, CSR0) & CSR0_IDON)
31986 ++ if (lp->a->read_csr(ioaddr, CSR0) & CSR0_IDON)
31987 + break;
31988 + /*
31989 + * We used to clear the InitDone bit, 0x0100, here but Mark Stockton
31990 + * reports that doing so triggers a bug in the '974.
31991 + */
31992 +- lp->a.write_csr(ioaddr, CSR0, CSR0_NORMAL);
31993 ++ lp->a->write_csr(ioaddr, CSR0, CSR0_NORMAL);
31994 +
31995 + netif_printk(lp, ifup, KERN_DEBUG, dev,
31996 + "pcnet32 open after %d ticks, init block %#x csr0 %4.4x\n",
31997 + i,
31998 + (u32) (lp->init_dma_addr),
31999 +- lp->a.read_csr(ioaddr, CSR0));
32000 ++ lp->a->read_csr(ioaddr, CSR0));
32001 +
32002 + spin_unlock_irqrestore(&lp->lock, flags);
32003 +
32004 +@@ -2218,7 +2218,7 @@ err_free_ring:
32005 + * Switch back to 16bit mode to avoid problems with dumb
32006 + * DOS packet driver after a warm reboot
32007 + */
32008 +- lp->a.write_bcr(ioaddr, 20, 4);
32009 ++ lp->a->write_bcr(ioaddr, 20, 4);
32010 +
32011 + err_free_irq:
32012 + spin_unlock_irqrestore(&lp->lock, flags);
32013 +@@ -2323,7 +2323,7 @@ static void pcnet32_restart(struct net_d
32014 +
32015 + /* wait for stop */
32016 + for (i = 0; i < 100; i++)
32017 +- if (lp->a.read_csr(ioaddr, CSR0) & CSR0_STOP)
32018 ++ if (lp->a->read_csr(ioaddr, CSR0) & CSR0_STOP)
32019 + break;
32020 +
32021 + if (i >= 100)
32022 +@@ -2335,13 +2335,13 @@ static void pcnet32_restart(struct net_d
32023 + return;
32024 +
32025 + /* ReInit Ring */
32026 +- lp->a.write_csr(ioaddr, CSR0, CSR0_INIT);
32027 ++ lp->a->write_csr(ioaddr, CSR0, CSR0_INIT);
32028 + i = 0;
32029 + while (i++ < 1000)
32030 +- if (lp->a.read_csr(ioaddr, CSR0) & CSR0_IDON)
32031 ++ if (lp->a->read_csr(ioaddr, CSR0) & CSR0_IDON)
32032 + break;
32033 +
32034 +- lp->a.write_csr(ioaddr, CSR0, csr0_bits);
32035 ++ lp->a->write_csr(ioaddr, CSR0, csr0_bits);
32036 + }
32037 +
32038 + static void pcnet32_tx_timeout(struct net_device *dev)
32039 +@@ -2353,8 +2353,8 @@ static void pcnet32_tx_timeout(struct ne
32040 + /* Transmitter timeout, serious problems. */
32041 + if (pcnet32_debug & NETIF_MSG_DRV)
32042 + pr_err("%s: transmit timed out, status %4.4x, resetting\n",
32043 +- dev->name, lp->a.read_csr(ioaddr, CSR0));
32044 +- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP);
32045 ++ dev->name, lp->a->read_csr(ioaddr, CSR0));
32046 ++ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP);
32047 + dev->stats.tx_errors++;
32048 + if (netif_msg_tx_err(lp)) {
32049 + int i;
32050 +@@ -2397,7 +2397,7 @@ static netdev_tx_t pcnet32_start_xmit(st
32051 +
32052 + netif_printk(lp, tx_queued, KERN_DEBUG, dev,
32053 + "%s() called, csr0 %4.4x\n",
32054 +- __func__, lp->a.read_csr(ioaddr, CSR0));
32055 ++ __func__, lp->a->read_csr(ioaddr, CSR0));
32056 +
32057 + /* Default status -- will not enable Successful-TxDone
32058 + * interrupt when that option is available to us.
32059 +@@ -2427,7 +2427,7 @@ static netdev_tx_t pcnet32_start_xmit(st
32060 + dev->stats.tx_bytes += skb->len;
32061 +
32062 + /* Trigger an immediate send poll. */
32063 +- lp->a.write_csr(ioaddr, CSR0, CSR0_INTEN | CSR0_TXPOLL);
32064 ++ lp->a->write_csr(ioaddr, CSR0, CSR0_INTEN | CSR0_TXPOLL);
32065 +
32066 + if (lp->tx_ring[(entry + 1) & lp->tx_mod_mask].base != 0) {
32067 + lp->tx_full = 1;
32068 +@@ -2452,16 +2452,16 @@ pcnet32_interrupt(int irq, void *dev_id)
32069 +
32070 + spin_lock(&lp->lock);
32071 +
32072 +- csr0 = lp->a.read_csr(ioaddr, CSR0);
32073 ++ csr0 = lp->a->read_csr(ioaddr, CSR0);
32074 + while ((csr0 & 0x8f00) && --boguscnt >= 0) {
32075 + if (csr0 == 0xffff)
32076 + break; /* PCMCIA remove happened */
32077 + /* Acknowledge all of the current interrupt sources ASAP. */
32078 +- lp->a.write_csr(ioaddr, CSR0, csr0 & ~0x004f);
32079 ++ lp->a->write_csr(ioaddr, CSR0, csr0 & ~0x004f);
32080 +
32081 + netif_printk(lp, intr, KERN_DEBUG, dev,
32082 + "interrupt csr0=%#2.2x new csr=%#2.2x\n",
32083 +- csr0, lp->a.read_csr(ioaddr, CSR0));
32084 ++ csr0, lp->a->read_csr(ioaddr, CSR0));
32085 +
32086 + /* Log misc errors. */
32087 + if (csr0 & 0x4000)
32088 +@@ -2488,19 +2488,19 @@ pcnet32_interrupt(int irq, void *dev_id)
32089 + if (napi_schedule_prep(&lp->napi)) {
32090 + u16 val;
32091 + /* set interrupt masks */
32092 +- val = lp->a.read_csr(ioaddr, CSR3);
32093 ++ val = lp->a->read_csr(ioaddr, CSR3);
32094 + val |= 0x5f00;
32095 +- lp->a.write_csr(ioaddr, CSR3, val);
32096 ++ lp->a->write_csr(ioaddr, CSR3, val);
32097 +
32098 + __napi_schedule(&lp->napi);
32099 + break;
32100 + }
32101 +- csr0 = lp->a.read_csr(ioaddr, CSR0);
32102 ++ csr0 = lp->a->read_csr(ioaddr, CSR0);
32103 + }
32104 +
32105 + netif_printk(lp, intr, KERN_DEBUG, dev,
32106 + "exiting interrupt, csr0=%#4.4x\n",
32107 +- lp->a.read_csr(ioaddr, CSR0));
32108 ++ lp->a->read_csr(ioaddr, CSR0));
32109 +
32110 + spin_unlock(&lp->lock);
32111 +
32112 +@@ -2520,20 +2520,20 @@ static int pcnet32_close(struct net_devi
32113 +
32114 + spin_lock_irqsave(&lp->lock, flags);
32115 +
32116 +- dev->stats.rx_missed_errors = lp->a.read_csr(ioaddr, 112);
32117 ++ dev->stats.rx_missed_errors = lp->a->read_csr(ioaddr, 112);
32118 +
32119 + netif_printk(lp, ifdown, KERN_DEBUG, dev,
32120 + "Shutting down ethercard, status was %2.2x\n",
32121 +- lp->a.read_csr(ioaddr, CSR0));
32122 ++ lp->a->read_csr(ioaddr, CSR0));
32123 +
32124 + /* We stop the PCNET32 here -- it occasionally polls memory if we don't. */
32125 +- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP);
32126 ++ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP);
32127 +
32128 + /*
32129 + * Switch back to 16bit mode to avoid problems with dumb
32130 + * DOS packet driver after a warm reboot
32131 + */
32132 +- lp->a.write_bcr(ioaddr, 20, 4);
32133 ++ lp->a->write_bcr(ioaddr, 20, 4);
32134 +
32135 + spin_unlock_irqrestore(&lp->lock, flags);
32136 +
32137 +@@ -2556,7 +2556,7 @@ static struct net_device_stats *pcnet32_
32138 + unsigned long flags;
32139 +
32140 + spin_lock_irqsave(&lp->lock, flags);
32141 +- dev->stats.rx_missed_errors = lp->a.read_csr(ioaddr, 112);
32142 ++ dev->stats.rx_missed_errors = lp->a->read_csr(ioaddr, 112);
32143 + spin_unlock_irqrestore(&lp->lock, flags);
32144 +
32145 + return &dev->stats;
32146 +@@ -2577,10 +2577,10 @@ static void pcnet32_load_multicast(struc
32147 + if (dev->flags & IFF_ALLMULTI) {
32148 + ib->filter[0] = cpu_to_le32(~0U);
32149 + ib->filter[1] = cpu_to_le32(~0U);
32150 +- lp->a.write_csr(ioaddr, PCNET32_MC_FILTER, 0xffff);
32151 +- lp->a.write_csr(ioaddr, PCNET32_MC_FILTER+1, 0xffff);
32152 +- lp->a.write_csr(ioaddr, PCNET32_MC_FILTER+2, 0xffff);
32153 +- lp->a.write_csr(ioaddr, PCNET32_MC_FILTER+3, 0xffff);
32154 ++ lp->a->write_csr(ioaddr, PCNET32_MC_FILTER, 0xffff);
32155 ++ lp->a->write_csr(ioaddr, PCNET32_MC_FILTER+1, 0xffff);
32156 ++ lp->a->write_csr(ioaddr, PCNET32_MC_FILTER+2, 0xffff);
32157 ++ lp->a->write_csr(ioaddr, PCNET32_MC_FILTER+3, 0xffff);
32158 + return;
32159 + }
32160 + /* clear the multicast filter */
32161 +@@ -2594,7 +2594,7 @@ static void pcnet32_load_multicast(struc
32162 + mcast_table[crc >> 4] |= cpu_to_le16(1 << (crc & 0xf));
32163 + }
32164 + for (i = 0; i < 4; i++)
32165 +- lp->a.write_csr(ioaddr, PCNET32_MC_FILTER + i,
32166 ++ lp->a->write_csr(ioaddr, PCNET32_MC_FILTER + i,
32167 + le16_to_cpu(mcast_table[i]));
32168 + }
32169 +
32170 +@@ -2609,28 +2609,28 @@ static void pcnet32_set_multicast_list(s
32171 +
32172 + spin_lock_irqsave(&lp->lock, flags);
32173 + suspended = pcnet32_suspend(dev, &flags, 0);
32174 +- csr15 = lp->a.read_csr(ioaddr, CSR15);
32175 ++ csr15 = lp->a->read_csr(ioaddr, CSR15);
32176 + if (dev->flags & IFF_PROMISC) {
32177 + /* Log any net taps. */
32178 + netif_info(lp, hw, dev, "Promiscuous mode enabled\n");
32179 + lp->init_block->mode =
32180 + cpu_to_le16(0x8000 | (lp->options & PCNET32_PORT_PORTSEL) <<
32181 + 7);
32182 +- lp->a.write_csr(ioaddr, CSR15, csr15 | 0x8000);
32183 ++ lp->a->write_csr(ioaddr, CSR15, csr15 | 0x8000);
32184 + } else {
32185 + lp->init_block->mode =
32186 + cpu_to_le16((lp->options & PCNET32_PORT_PORTSEL) << 7);
32187 +- lp->a.write_csr(ioaddr, CSR15, csr15 & 0x7fff);
32188 ++ lp->a->write_csr(ioaddr, CSR15, csr15 & 0x7fff);
32189 + pcnet32_load_multicast(dev);
32190 + }
32191 +
32192 + if (suspended) {
32193 + int csr5;
32194 + /* clear SUSPEND (SPND) - CSR5 bit 0 */
32195 +- csr5 = lp->a.read_csr(ioaddr, CSR5);
32196 +- lp->a.write_csr(ioaddr, CSR5, csr5 & (~CSR5_SUSPEND));
32197 ++ csr5 = lp->a->read_csr(ioaddr, CSR5);
32198 ++ lp->a->write_csr(ioaddr, CSR5, csr5 & (~CSR5_SUSPEND));
32199 + } else {
32200 +- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP);
32201 ++ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP);
32202 + pcnet32_restart(dev, CSR0_NORMAL);
32203 + netif_wake_queue(dev);
32204 + }
32205 +@@ -2648,8 +2648,8 @@ static int mdio_read(struct net_device *
32206 + if (!lp->mii)
32207 + return 0;
32208 +
32209 +- lp->a.write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
32210 +- val_out = lp->a.read_bcr(ioaddr, 34);
32211 ++ lp->a->write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
32212 ++ val_out = lp->a->read_bcr(ioaddr, 34);
32213 +
32214 + return val_out;
32215 + }
32216 +@@ -2663,8 +2663,8 @@ static void mdio_write(struct net_device
32217 + if (!lp->mii)
32218 + return;
32219 +
32220 +- lp->a.write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
32221 +- lp->a.write_bcr(ioaddr, 34, val);
32222 ++ lp->a->write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
32223 ++ lp->a->write_bcr(ioaddr, 34, val);
32224 + }
32225 +
32226 + static int pcnet32_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
32227 +@@ -2741,7 +2741,7 @@ static void pcnet32_check_media(struct n
32228 + curr_link = mii_link_ok(&lp->mii_if);
32229 + } else {
32230 + ulong ioaddr = dev->base_addr; /* card base I/O address */
32231 +- curr_link = (lp->a.read_bcr(ioaddr, 4) != 0xc0);
32232 ++ curr_link = (lp->a->read_bcr(ioaddr, 4) != 0xc0);
32233 + }
32234 + if (!curr_link) {
32235 + if (prev_link || verbose) {
32236 +@@ -2764,13 +2764,13 @@ static void pcnet32_check_media(struct n
32237 + (ecmd.duplex == DUPLEX_FULL)
32238 + ? "full" : "half");
32239 + }
32240 +- bcr9 = lp->a.read_bcr(dev->base_addr, 9);
32241 ++ bcr9 = lp->a->read_bcr(dev->base_addr, 9);
32242 + if ((bcr9 & (1 << 0)) != lp->mii_if.full_duplex) {
32243 + if (lp->mii_if.full_duplex)
32244 + bcr9 |= (1 << 0);
32245 + else
32246 + bcr9 &= ~(1 << 0);
32247 +- lp->a.write_bcr(dev->base_addr, 9, bcr9);
32248 ++ lp->a->write_bcr(dev->base_addr, 9, bcr9);
32249 + }
32250 + } else {
32251 + netif_info(lp, link, dev, "link up\n");
32252 +diff -urNp linux-3.1.1/drivers/net/ppp_generic.c linux-3.1.1/drivers/net/ppp_generic.c
32253 +--- linux-3.1.1/drivers/net/ppp_generic.c 2011-11-11 15:19:27.000000000 -0500
32254 ++++ linux-3.1.1/drivers/net/ppp_generic.c 2011-11-16 18:39:07.000000000 -0500
32255 +@@ -987,7 +987,6 @@ ppp_net_ioctl(struct net_device *dev, st
32256 + void __user *addr = (void __user *) ifr->ifr_ifru.ifru_data;
32257 + struct ppp_stats stats;
32258 + struct ppp_comp_stats cstats;
32259 +- char *vers;
32260 +
32261 + switch (cmd) {
32262 + case SIOCGPPPSTATS:
32263 +@@ -1009,8 +1008,7 @@ ppp_net_ioctl(struct net_device *dev, st
32264 + break;
32265 +
32266 + case SIOCGPPPVER:
32267 +- vers = PPP_VERSION;
32268 +- if (copy_to_user(addr, vers, strlen(vers) + 1))
32269 ++ if (copy_to_user(addr, PPP_VERSION, sizeof(PPP_VERSION)))
32270 + break;
32271 + err = 0;
32272 + break;
32273 +diff -urNp linux-3.1.1/drivers/net/r8169.c linux-3.1.1/drivers/net/r8169.c
32274 +--- linux-3.1.1/drivers/net/r8169.c 2011-11-11 15:19:27.000000000 -0500
32275 ++++ linux-3.1.1/drivers/net/r8169.c 2011-11-16 18:39:07.000000000 -0500
32276 +@@ -663,12 +663,12 @@ struct rtl8169_private {
32277 + struct mdio_ops {
32278 + void (*write)(void __iomem *, int, int);
32279 + int (*read)(void __iomem *, int);
32280 +- } mdio_ops;
32281 ++ } __no_const mdio_ops;
32282 +
32283 + struct pll_power_ops {
32284 + void (*down)(struct rtl8169_private *);
32285 + void (*up)(struct rtl8169_private *);
32286 +- } pll_power_ops;
32287 ++ } __no_const pll_power_ops;
32288 +
32289 + int (*set_speed)(struct net_device *, u8 aneg, u16 sp, u8 dpx, u32 adv);
32290 + int (*get_settings)(struct net_device *, struct ethtool_cmd *);
32291 +diff -urNp linux-3.1.1/drivers/net/sis190.c linux-3.1.1/drivers/net/sis190.c
32292 +--- linux-3.1.1/drivers/net/sis190.c 2011-11-11 15:19:27.000000000 -0500
32293 ++++ linux-3.1.1/drivers/net/sis190.c 2011-11-16 18:39:07.000000000 -0500
32294 +@@ -1624,7 +1624,7 @@ static int __devinit sis190_get_mac_addr
32295 + static int __devinit sis190_get_mac_addr_from_apc(struct pci_dev *pdev,
32296 + struct net_device *dev)
32297 + {
32298 +- static const u16 __devinitdata ids[] = { 0x0965, 0x0966, 0x0968 };
32299 ++ static const u16 __devinitconst ids[] = { 0x0965, 0x0966, 0x0968 };
32300 + struct sis190_private *tp = netdev_priv(dev);
32301 + struct pci_dev *isa_bridge;
32302 + u8 reg, tmp8;
32303 +diff -urNp linux-3.1.1/drivers/net/sundance.c linux-3.1.1/drivers/net/sundance.c
32304 +--- linux-3.1.1/drivers/net/sundance.c 2011-11-11 15:19:27.000000000 -0500
32305 ++++ linux-3.1.1/drivers/net/sundance.c 2011-11-16 18:39:07.000000000 -0500
32306 +@@ -218,7 +218,7 @@ enum {
32307 + struct pci_id_info {
32308 + const char *name;
32309 + };
32310 +-static const struct pci_id_info pci_id_tbl[] __devinitdata = {
32311 ++static const struct pci_id_info pci_id_tbl[] __devinitconst = {
32312 + {"D-Link DFE-550TX FAST Ethernet Adapter"},
32313 + {"D-Link DFE-550FX 100Mbps Fiber-optics Adapter"},
32314 + {"D-Link DFE-580TX 4 port Server Adapter"},
32315 +diff -urNp linux-3.1.1/drivers/net/tg3.h linux-3.1.1/drivers/net/tg3.h
32316 +--- linux-3.1.1/drivers/net/tg3.h 2011-11-11 15:19:27.000000000 -0500
32317 ++++ linux-3.1.1/drivers/net/tg3.h 2011-11-16 18:39:07.000000000 -0500
32318 +@@ -134,6 +134,7 @@
32319 + #define CHIPREV_ID_5750_A0 0x4000
32320 + #define CHIPREV_ID_5750_A1 0x4001
32321 + #define CHIPREV_ID_5750_A3 0x4003
32322 ++#define CHIPREV_ID_5750_C1 0x4201
32323 + #define CHIPREV_ID_5750_C2 0x4202
32324 + #define CHIPREV_ID_5752_A0_HW 0x5000
32325 + #define CHIPREV_ID_5752_A0 0x6000
32326 +diff -urNp linux-3.1.1/drivers/net/tokenring/abyss.c linux-3.1.1/drivers/net/tokenring/abyss.c
32327 +--- linux-3.1.1/drivers/net/tokenring/abyss.c 2011-11-11 15:19:27.000000000 -0500
32328 ++++ linux-3.1.1/drivers/net/tokenring/abyss.c 2011-11-16 18:39:07.000000000 -0500
32329 +@@ -451,10 +451,12 @@ static struct pci_driver abyss_driver =
32330 +
32331 + static int __init abyss_init (void)
32332 + {
32333 +- abyss_netdev_ops = tms380tr_netdev_ops;
32334 ++ pax_open_kernel();
32335 ++ memcpy((void *)&abyss_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
32336 +
32337 +- abyss_netdev_ops.ndo_open = abyss_open;
32338 +- abyss_netdev_ops.ndo_stop = abyss_close;
32339 ++ *(void **)&abyss_netdev_ops.ndo_open = abyss_open;
32340 ++ *(void **)&abyss_netdev_ops.ndo_stop = abyss_close;
32341 ++ pax_close_kernel();
32342 +
32343 + return pci_register_driver(&abyss_driver);
32344 + }
32345 +diff -urNp linux-3.1.1/drivers/net/tokenring/madgemc.c linux-3.1.1/drivers/net/tokenring/madgemc.c
32346 +--- linux-3.1.1/drivers/net/tokenring/madgemc.c 2011-11-11 15:19:27.000000000 -0500
32347 ++++ linux-3.1.1/drivers/net/tokenring/madgemc.c 2011-11-16 18:39:07.000000000 -0500
32348 +@@ -744,9 +744,11 @@ static struct mca_driver madgemc_driver
32349 +
32350 + static int __init madgemc_init (void)
32351 + {
32352 +- madgemc_netdev_ops = tms380tr_netdev_ops;
32353 +- madgemc_netdev_ops.ndo_open = madgemc_open;
32354 +- madgemc_netdev_ops.ndo_stop = madgemc_close;
32355 ++ pax_open_kernel();
32356 ++ memcpy((void *)&madgemc_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
32357 ++ *(void **)&madgemc_netdev_ops.ndo_open = madgemc_open;
32358 ++ *(void **)&madgemc_netdev_ops.ndo_stop = madgemc_close;
32359 ++ pax_close_kernel();
32360 +
32361 + return mca_register_driver (&madgemc_driver);
32362 + }
32363 +diff -urNp linux-3.1.1/drivers/net/tokenring/proteon.c linux-3.1.1/drivers/net/tokenring/proteon.c
32364 +--- linux-3.1.1/drivers/net/tokenring/proteon.c 2011-11-11 15:19:27.000000000 -0500
32365 ++++ linux-3.1.1/drivers/net/tokenring/proteon.c 2011-11-16 18:39:07.000000000 -0500
32366 +@@ -353,9 +353,11 @@ static int __init proteon_init(void)
32367 + struct platform_device *pdev;
32368 + int i, num = 0, err = 0;
32369 +
32370 +- proteon_netdev_ops = tms380tr_netdev_ops;
32371 +- proteon_netdev_ops.ndo_open = proteon_open;
32372 +- proteon_netdev_ops.ndo_stop = tms380tr_close;
32373 ++ pax_open_kernel();
32374 ++ memcpy((void *)&proteon_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
32375 ++ *(void **)&proteon_netdev_ops.ndo_open = proteon_open;
32376 ++ *(void **)&proteon_netdev_ops.ndo_stop = tms380tr_close;
32377 ++ pax_close_kernel();
32378 +
32379 + err = platform_driver_register(&proteon_driver);
32380 + if (err)
32381 +diff -urNp linux-3.1.1/drivers/net/tokenring/skisa.c linux-3.1.1/drivers/net/tokenring/skisa.c
32382 +--- linux-3.1.1/drivers/net/tokenring/skisa.c 2011-11-11 15:19:27.000000000 -0500
32383 ++++ linux-3.1.1/drivers/net/tokenring/skisa.c 2011-11-16 18:39:07.000000000 -0500
32384 +@@ -363,9 +363,11 @@ static int __init sk_isa_init(void)
32385 + struct platform_device *pdev;
32386 + int i, num = 0, err = 0;
32387 +
32388 +- sk_isa_netdev_ops = tms380tr_netdev_ops;
32389 +- sk_isa_netdev_ops.ndo_open = sk_isa_open;
32390 +- sk_isa_netdev_ops.ndo_stop = tms380tr_close;
32391 ++ pax_open_kernel();
32392 ++ memcpy((void *)&sk_isa_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
32393 ++ *(void **)&sk_isa_netdev_ops.ndo_open = sk_isa_open;
32394 ++ *(void **)&sk_isa_netdev_ops.ndo_stop = tms380tr_close;
32395 ++ pax_close_kernel();
32396 +
32397 + err = platform_driver_register(&sk_isa_driver);
32398 + if (err)
32399 +diff -urNp linux-3.1.1/drivers/net/tulip/de2104x.c linux-3.1.1/drivers/net/tulip/de2104x.c
32400 +--- linux-3.1.1/drivers/net/tulip/de2104x.c 2011-11-11 15:19:27.000000000 -0500
32401 ++++ linux-3.1.1/drivers/net/tulip/de2104x.c 2011-11-16 18:40:22.000000000 -0500
32402 +@@ -1795,6 +1795,8 @@ static void __devinit de21041_get_srom_i
32403 + struct de_srom_info_leaf *il;
32404 + void *bufp;
32405 +
32406 ++ pax_track_stack();
32407 ++
32408 + /* download entire eeprom */
32409 + for (i = 0; i < DE_EEPROM_WORDS; i++)
32410 + ((__le16 *)ee_data)[i] =
32411 +diff -urNp linux-3.1.1/drivers/net/tulip/de4x5.c linux-3.1.1/drivers/net/tulip/de4x5.c
32412 +--- linux-3.1.1/drivers/net/tulip/de4x5.c 2011-11-11 15:19:27.000000000 -0500
32413 ++++ linux-3.1.1/drivers/net/tulip/de4x5.c 2011-11-16 18:39:07.000000000 -0500
32414 +@@ -5397,7 +5397,7 @@ de4x5_ioctl(struct net_device *dev, stru
32415 + for (i=0; i<ETH_ALEN; i++) {
32416 + tmp.addr[i] = dev->dev_addr[i];
32417 + }
32418 +- if (copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
32419 ++ if (ioc->len > sizeof tmp.addr || copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
32420 + break;
32421 +
32422 + case DE4X5_SET_HWADDR: /* Set the hardware address */
32423 +@@ -5437,7 +5437,7 @@ de4x5_ioctl(struct net_device *dev, stru
32424 + spin_lock_irqsave(&lp->lock, flags);
32425 + memcpy(&statbuf, &lp->pktStats, ioc->len);
32426 + spin_unlock_irqrestore(&lp->lock, flags);
32427 +- if (copy_to_user(ioc->data, &statbuf, ioc->len))
32428 ++ if (ioc->len > sizeof statbuf || copy_to_user(ioc->data, &statbuf, ioc->len))
32429 + return -EFAULT;
32430 + break;
32431 + }
32432 +diff -urNp linux-3.1.1/drivers/net/tulip/eeprom.c linux-3.1.1/drivers/net/tulip/eeprom.c
32433 +--- linux-3.1.1/drivers/net/tulip/eeprom.c 2011-11-11 15:19:27.000000000 -0500
32434 ++++ linux-3.1.1/drivers/net/tulip/eeprom.c 2011-11-16 18:39:07.000000000 -0500
32435 +@@ -81,7 +81,7 @@ static struct eeprom_fixup eeprom_fixups
32436 + {NULL}};
32437 +
32438 +
32439 +-static const char *block_name[] __devinitdata = {
32440 ++static const char *block_name[] __devinitconst = {
32441 + "21140 non-MII",
32442 + "21140 MII PHY",
32443 + "21142 Serial PHY",
32444 +diff -urNp linux-3.1.1/drivers/net/tulip/winbond-840.c linux-3.1.1/drivers/net/tulip/winbond-840.c
32445 +--- linux-3.1.1/drivers/net/tulip/winbond-840.c 2011-11-11 15:19:27.000000000 -0500
32446 ++++ linux-3.1.1/drivers/net/tulip/winbond-840.c 2011-11-16 18:39:07.000000000 -0500
32447 +@@ -236,7 +236,7 @@ struct pci_id_info {
32448 + int drv_flags; /* Driver use, intended as capability flags. */
32449 + };
32450 +
32451 +-static const struct pci_id_info pci_id_tbl[] __devinitdata = {
32452 ++static const struct pci_id_info pci_id_tbl[] __devinitconst = {
32453 + { /* Sometime a Level-One switch card. */
32454 + "Winbond W89c840", CanHaveMII | HasBrokenTx | FDXOnNoMII},
32455 + { "Winbond W89c840", CanHaveMII | HasBrokenTx},
32456 +diff -urNp linux-3.1.1/drivers/net/usb/hso.c linux-3.1.1/drivers/net/usb/hso.c
32457 +--- linux-3.1.1/drivers/net/usb/hso.c 2011-11-11 15:19:27.000000000 -0500
32458 ++++ linux-3.1.1/drivers/net/usb/hso.c 2011-11-16 18:39:07.000000000 -0500
32459 +@@ -71,7 +71,7 @@
32460 + #include <asm/byteorder.h>
32461 + #include <linux/serial_core.h>
32462 + #include <linux/serial.h>
32463 +-
32464 ++#include <asm/local.h>
32465 +
32466 + #define MOD_AUTHOR "Option Wireless"
32467 + #define MOD_DESCRIPTION "USB High Speed Option driver"
32468 +@@ -257,7 +257,7 @@ struct hso_serial {
32469 +
32470 + /* from usb_serial_port */
32471 + struct tty_struct *tty;
32472 +- int open_count;
32473 ++ local_t open_count;
32474 + spinlock_t serial_lock;
32475 +
32476 + int (*write_data) (struct hso_serial *serial);
32477 +@@ -1190,7 +1190,7 @@ static void put_rxbuf_data_and_resubmit_
32478 + struct urb *urb;
32479 +
32480 + urb = serial->rx_urb[0];
32481 +- if (serial->open_count > 0) {
32482 ++ if (local_read(&serial->open_count) > 0) {
32483 + count = put_rxbuf_data(urb, serial);
32484 + if (count == -1)
32485 + return;
32486 +@@ -1226,7 +1226,7 @@ static void hso_std_serial_read_bulk_cal
32487 + DUMP1(urb->transfer_buffer, urb->actual_length);
32488 +
32489 + /* Anyone listening? */
32490 +- if (serial->open_count == 0)
32491 ++ if (local_read(&serial->open_count) == 0)
32492 + return;
32493 +
32494 + if (status == 0) {
32495 +@@ -1311,8 +1311,7 @@ static int hso_serial_open(struct tty_st
32496 + spin_unlock_irq(&serial->serial_lock);
32497 +
32498 + /* check for port already opened, if not set the termios */
32499 +- serial->open_count++;
32500 +- if (serial->open_count == 1) {
32501 ++ if (local_inc_return(&serial->open_count) == 1) {
32502 + serial->rx_state = RX_IDLE;
32503 + /* Force default termio settings */
32504 + _hso_serial_set_termios(tty, NULL);
32505 +@@ -1324,7 +1323,7 @@ static int hso_serial_open(struct tty_st
32506 + result = hso_start_serial_device(serial->parent, GFP_KERNEL);
32507 + if (result) {
32508 + hso_stop_serial_device(serial->parent);
32509 +- serial->open_count--;
32510 ++ local_dec(&serial->open_count);
32511 + kref_put(&serial->parent->ref, hso_serial_ref_free);
32512 + }
32513 + } else {
32514 +@@ -1361,10 +1360,10 @@ static void hso_serial_close(struct tty_
32515 +
32516 + /* reset the rts and dtr */
32517 + /* do the actual close */
32518 +- serial->open_count--;
32519 ++ local_dec(&serial->open_count);
32520 +
32521 +- if (serial->open_count <= 0) {
32522 +- serial->open_count = 0;
32523 ++ if (local_read(&serial->open_count) <= 0) {
32524 ++ local_set(&serial->open_count, 0);
32525 + spin_lock_irq(&serial->serial_lock);
32526 + if (serial->tty == tty) {
32527 + serial->tty->driver_data = NULL;
32528 +@@ -1446,7 +1445,7 @@ static void hso_serial_set_termios(struc
32529 +
32530 + /* the actual setup */
32531 + spin_lock_irqsave(&serial->serial_lock, flags);
32532 +- if (serial->open_count)
32533 ++ if (local_read(&serial->open_count))
32534 + _hso_serial_set_termios(tty, old);
32535 + else
32536 + tty->termios = old;
32537 +@@ -1905,7 +1904,7 @@ static void intr_callback(struct urb *ur
32538 + D1("Pending read interrupt on port %d\n", i);
32539 + spin_lock(&serial->serial_lock);
32540 + if (serial->rx_state == RX_IDLE &&
32541 +- serial->open_count > 0) {
32542 ++ local_read(&serial->open_count) > 0) {
32543 + /* Setup and send a ctrl req read on
32544 + * port i */
32545 + if (!serial->rx_urb_filled[0]) {
32546 +@@ -3098,7 +3097,7 @@ static int hso_resume(struct usb_interfa
32547 + /* Start all serial ports */
32548 + for (i = 0; i < HSO_SERIAL_TTY_MINORS; i++) {
32549 + if (serial_table[i] && (serial_table[i]->interface == iface)) {
32550 +- if (dev2ser(serial_table[i])->open_count) {
32551 ++ if (local_read(&dev2ser(serial_table[i])->open_count)) {
32552 + result =
32553 + hso_start_serial_device(serial_table[i], GFP_NOIO);
32554 + hso_kick_transmit(dev2ser(serial_table[i]));
32555 +diff -urNp linux-3.1.1/drivers/net/vmxnet3/vmxnet3_ethtool.c linux-3.1.1/drivers/net/vmxnet3/vmxnet3_ethtool.c
32556 +--- linux-3.1.1/drivers/net/vmxnet3/vmxnet3_ethtool.c 2011-11-11 15:19:27.000000000 -0500
32557 ++++ linux-3.1.1/drivers/net/vmxnet3/vmxnet3_ethtool.c 2011-11-16 18:39:07.000000000 -0500
32558 +@@ -601,8 +601,7 @@ vmxnet3_set_rss_indir(struct net_device
32559 + * Return with error code if any of the queue indices
32560 + * is out of range
32561 + */
32562 +- if (p->ring_index[i] < 0 ||
32563 +- p->ring_index[i] >= adapter->num_rx_queues)
32564 ++ if (p->ring_index[i] >= adapter->num_rx_queues)
32565 + return -EINVAL;
32566 + }
32567 +
32568 +diff -urNp linux-3.1.1/drivers/net/vxge/vxge-config.h linux-3.1.1/drivers/net/vxge/vxge-config.h
32569 +--- linux-3.1.1/drivers/net/vxge/vxge-config.h 2011-11-11 15:19:27.000000000 -0500
32570 ++++ linux-3.1.1/drivers/net/vxge/vxge-config.h 2011-11-16 18:39:07.000000000 -0500
32571 +@@ -514,7 +514,7 @@ struct vxge_hw_uld_cbs {
32572 + void (*link_down)(struct __vxge_hw_device *devh);
32573 + void (*crit_err)(struct __vxge_hw_device *devh,
32574 + enum vxge_hw_event type, u64 ext_data);
32575 +-};
32576 ++} __no_const;
32577 +
32578 + /*
32579 + * struct __vxge_hw_blockpool_entry - Block private data structure
32580 +diff -urNp linux-3.1.1/drivers/net/vxge/vxge-main.c linux-3.1.1/drivers/net/vxge/vxge-main.c
32581 +--- linux-3.1.1/drivers/net/vxge/vxge-main.c 2011-11-11 15:19:27.000000000 -0500
32582 ++++ linux-3.1.1/drivers/net/vxge/vxge-main.c 2011-11-16 18:40:22.000000000 -0500
32583 +@@ -100,6 +100,8 @@ static inline void VXGE_COMPLETE_VPATH_T
32584 + struct sk_buff *completed[NR_SKB_COMPLETED];
32585 + int more;
32586 +
32587 ++ pax_track_stack();
32588 ++
32589 + do {
32590 + more = 0;
32591 + skb_ptr = completed;
32592 +@@ -1915,6 +1917,8 @@ static enum vxge_hw_status vxge_rth_conf
32593 + u8 mtable[256] = {0}; /* CPU to vpath mapping */
32594 + int index;
32595 +
32596 ++ pax_track_stack();
32597 ++
32598 + /*
32599 + * Filling
32600 + * - itable with bucket numbers
32601 +diff -urNp linux-3.1.1/drivers/net/vxge/vxge-traffic.h linux-3.1.1/drivers/net/vxge/vxge-traffic.h
32602 +--- linux-3.1.1/drivers/net/vxge/vxge-traffic.h 2011-11-11 15:19:27.000000000 -0500
32603 ++++ linux-3.1.1/drivers/net/vxge/vxge-traffic.h 2011-11-16 18:39:07.000000000 -0500
32604 +@@ -2088,7 +2088,7 @@ struct vxge_hw_mempool_cbs {
32605 + struct vxge_hw_mempool_dma *dma_object,
32606 + u32 index,
32607 + u32 is_last);
32608 +-};
32609 ++} __no_const;
32610 +
32611 + #define VXGE_HW_VIRTUAL_PATH_HANDLE(vpath) \
32612 + ((struct __vxge_hw_vpath_handle *)(vpath)->vpath_handles.next)
32613 +diff -urNp linux-3.1.1/drivers/net/wan/hdlc_x25.c linux-3.1.1/drivers/net/wan/hdlc_x25.c
32614 +--- linux-3.1.1/drivers/net/wan/hdlc_x25.c 2011-11-11 15:19:27.000000000 -0500
32615 ++++ linux-3.1.1/drivers/net/wan/hdlc_x25.c 2011-11-16 18:39:07.000000000 -0500
32616 +@@ -134,16 +134,16 @@ static netdev_tx_t x25_xmit(struct sk_bu
32617 +
32618 + static int x25_open(struct net_device *dev)
32619 + {
32620 +- struct lapb_register_struct cb;
32621 ++ static struct lapb_register_struct cb = {
32622 ++ .connect_confirmation = x25_connected,
32623 ++ .connect_indication = x25_connected,
32624 ++ .disconnect_confirmation = x25_disconnected,
32625 ++ .disconnect_indication = x25_disconnected,
32626 ++ .data_indication = x25_data_indication,
32627 ++ .data_transmit = x25_data_transmit
32628 ++ };
32629 + int result;
32630 +
32631 +- cb.connect_confirmation = x25_connected;
32632 +- cb.connect_indication = x25_connected;
32633 +- cb.disconnect_confirmation = x25_disconnected;
32634 +- cb.disconnect_indication = x25_disconnected;
32635 +- cb.data_indication = x25_data_indication;
32636 +- cb.data_transmit = x25_data_transmit;
32637 +-
32638 + result = lapb_register(dev, &cb);
32639 + if (result != LAPB_OK)
32640 + return result;
32641 +diff -urNp linux-3.1.1/drivers/net/wimax/i2400m/usb-fw.c linux-3.1.1/drivers/net/wimax/i2400m/usb-fw.c
32642 +--- linux-3.1.1/drivers/net/wimax/i2400m/usb-fw.c 2011-11-11 15:19:27.000000000 -0500
32643 ++++ linux-3.1.1/drivers/net/wimax/i2400m/usb-fw.c 2011-11-16 18:40:22.000000000 -0500
32644 +@@ -287,6 +287,8 @@ ssize_t i2400mu_bus_bm_wait_for_ack(stru
32645 + int do_autopm = 1;
32646 + DECLARE_COMPLETION_ONSTACK(notif_completion);
32647 +
32648 ++ pax_track_stack();
32649 ++
32650 + d_fnstart(8, dev, "(i2400m %p ack %p size %zu)\n",
32651 + i2400m, ack, ack_size);
32652 + BUG_ON(_ack == i2400m->bm_ack_buf);
32653 +diff -urNp linux-3.1.1/drivers/net/wireless/airo.c linux-3.1.1/drivers/net/wireless/airo.c
32654 +--- linux-3.1.1/drivers/net/wireless/airo.c 2011-11-11 15:19:27.000000000 -0500
32655 ++++ linux-3.1.1/drivers/net/wireless/airo.c 2011-11-16 18:40:22.000000000 -0500
32656 +@@ -3003,6 +3003,8 @@ static void airo_process_scan_results (s
32657 + BSSListElement * loop_net;
32658 + BSSListElement * tmp_net;
32659 +
32660 ++ pax_track_stack();
32661 ++
32662 + /* Blow away current list of scan results */
32663 + list_for_each_entry_safe (loop_net, tmp_net, &ai->network_list, list) {
32664 + list_move_tail (&loop_net->list, &ai->network_free_list);
32665 +@@ -3794,6 +3796,8 @@ static u16 setup_card(struct airo_info *
32666 + WepKeyRid wkr;
32667 + int rc;
32668 +
32669 ++ pax_track_stack();
32670 ++
32671 + memset( &mySsid, 0, sizeof( mySsid ) );
32672 + kfree (ai->flash);
32673 + ai->flash = NULL;
32674 +@@ -4753,6 +4757,8 @@ static int proc_stats_rid_open( struct i
32675 + __le32 *vals = stats.vals;
32676 + int len;
32677 +
32678 ++ pax_track_stack();
32679 ++
32680 + if ((file->private_data = kzalloc(sizeof(struct proc_data ), GFP_KERNEL)) == NULL)
32681 + return -ENOMEM;
32682 + data = file->private_data;
32683 +@@ -5476,6 +5482,8 @@ static int proc_BSSList_open( struct ino
32684 + /* If doLoseSync is not 1, we won't do a Lose Sync */
32685 + int doLoseSync = -1;
32686 +
32687 ++ pax_track_stack();
32688 ++
32689 + if ((file->private_data = kzalloc(sizeof(struct proc_data ), GFP_KERNEL)) == NULL)
32690 + return -ENOMEM;
32691 + data = file->private_data;
32692 +@@ -7181,6 +7189,8 @@ static int airo_get_aplist(struct net_de
32693 + int i;
32694 + int loseSync = capable(CAP_NET_ADMIN) ? 1: -1;
32695 +
32696 ++ pax_track_stack();
32697 ++
32698 + qual = kmalloc(IW_MAX_AP * sizeof(*qual), GFP_KERNEL);
32699 + if (!qual)
32700 + return -ENOMEM;
32701 +@@ -7741,6 +7751,8 @@ static void airo_read_wireless_stats(str
32702 + CapabilityRid cap_rid;
32703 + __le32 *vals = stats_rid.vals;
32704 +
32705 ++ pax_track_stack();
32706 ++
32707 + /* Get stats out of the card */
32708 + clear_bit(JOB_WSTATS, &local->jobs);
32709 + if (local->power.event) {
32710 +diff -urNp linux-3.1.1/drivers/net/wireless/ath/ath5k/debug.c linux-3.1.1/drivers/net/wireless/ath/ath5k/debug.c
32711 +--- linux-3.1.1/drivers/net/wireless/ath/ath5k/debug.c 2011-11-11 15:19:27.000000000 -0500
32712 ++++ linux-3.1.1/drivers/net/wireless/ath/ath5k/debug.c 2011-11-16 19:08:21.000000000 -0500
32713 +@@ -203,6 +203,8 @@ static ssize_t read_file_beacon(struct f
32714 + unsigned int v;
32715 + u64 tsf;
32716 +
32717 ++ pax_track_stack();
32718 ++
32719 + v = ath5k_hw_reg_read(ah, AR5K_BEACON);
32720 + len += snprintf(buf + len, sizeof(buf) - len,
32721 + "%-24s0x%08x\tintval: %d\tTIM: 0x%x\n",
32722 +@@ -321,6 +323,8 @@ static ssize_t read_file_debug(struct fi
32723 + unsigned int len = 0;
32724 + unsigned int i;
32725 +
32726 ++ pax_track_stack();
32727 ++
32728 + len += snprintf(buf + len, sizeof(buf) - len,
32729 + "DEBUG LEVEL: 0x%08x\n\n", ah->debug.level);
32730 +
32731 +@@ -492,6 +496,8 @@ static ssize_t read_file_misc(struct fil
32732 + unsigned int len = 0;
32733 + u32 filt = ath5k_hw_get_rx_filter(ah);
32734 +
32735 ++ pax_track_stack();
32736 ++
32737 + len += snprintf(buf + len, sizeof(buf) - len, "bssid-mask: %pM\n",
32738 + ah->bssidmask);
32739 + len += snprintf(buf + len, sizeof(buf) - len, "filter-flags: 0x%x ",
32740 +@@ -548,6 +554,8 @@ static ssize_t read_file_frameerrors(str
32741 + unsigned int len = 0;
32742 + int i;
32743 +
32744 ++ pax_track_stack();
32745 ++
32746 + len += snprintf(buf + len, sizeof(buf) - len,
32747 + "RX\n---------------------\n");
32748 + len += snprintf(buf + len, sizeof(buf) - len, "CRC\t%u\t(%u%%)\n",
32749 +@@ -665,6 +673,8 @@ static ssize_t read_file_ani(struct file
32750 + char buf[700];
32751 + unsigned int len = 0;
32752 +
32753 ++ pax_track_stack();
32754 ++
32755 + len += snprintf(buf + len, sizeof(buf) - len,
32756 + "HW has PHY error counters:\t%s\n",
32757 + ah->ah_capabilities.cap_has_phyerr_counters ?
32758 +@@ -829,6 +839,8 @@ static ssize_t read_file_queue(struct fi
32759 + struct ath5k_buf *bf, *bf0;
32760 + int i, n;
32761 +
32762 ++ pax_track_stack();
32763 ++
32764 + len += snprintf(buf + len, sizeof(buf) - len,
32765 + "available txbuffers: %d\n", ah->txbuf_len);
32766 +
32767 +diff -urNp linux-3.1.1/drivers/net/wireless/ath/ath9k/ar9003_calib.c linux-3.1.1/drivers/net/wireless/ath/ath9k/ar9003_calib.c
32768 +--- linux-3.1.1/drivers/net/wireless/ath/ath9k/ar9003_calib.c 2011-11-11 15:19:27.000000000 -0500
32769 ++++ linux-3.1.1/drivers/net/wireless/ath/ath9k/ar9003_calib.c 2011-11-16 18:40:22.000000000 -0500
32770 +@@ -758,6 +758,8 @@ static void ar9003_hw_tx_iq_cal_post_pro
32771 + int i, im, j;
32772 + int nmeasurement;
32773 +
32774 ++ pax_track_stack();
32775 ++
32776 + for (i = 0; i < AR9300_MAX_CHAINS; i++) {
32777 + if (ah->txchainmask & (1 << i))
32778 + num_chains++;
32779 +diff -urNp linux-3.1.1/drivers/net/wireless/ath/ath9k/ar9003_paprd.c linux-3.1.1/drivers/net/wireless/ath/ath9k/ar9003_paprd.c
32780 +--- linux-3.1.1/drivers/net/wireless/ath/ath9k/ar9003_paprd.c 2011-11-11 15:19:27.000000000 -0500
32781 ++++ linux-3.1.1/drivers/net/wireless/ath/ath9k/ar9003_paprd.c 2011-11-16 18:40:22.000000000 -0500
32782 +@@ -406,6 +406,8 @@ static bool create_pa_curve(u32 *data_L,
32783 + int theta_low_bin = 0;
32784 + int i;
32785 +
32786 ++ pax_track_stack();
32787 ++
32788 + /* disregard any bin that contains <= 16 samples */
32789 + thresh_accum_cnt = 16;
32790 + scale_factor = 5;
32791 +diff -urNp linux-3.1.1/drivers/net/wireless/ath/ath9k/debug.c linux-3.1.1/drivers/net/wireless/ath/ath9k/debug.c
32792 +--- linux-3.1.1/drivers/net/wireless/ath/ath9k/debug.c 2011-11-11 15:19:27.000000000 -0500
32793 ++++ linux-3.1.1/drivers/net/wireless/ath/ath9k/debug.c 2011-11-16 18:40:22.000000000 -0500
32794 +@@ -387,6 +387,8 @@ static ssize_t read_file_interrupt(struc
32795 + char buf[512];
32796 + unsigned int len = 0;
32797 +
32798 ++ pax_track_stack();
32799 ++
32800 + if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
32801 + len += snprintf(buf + len, sizeof(buf) - len,
32802 + "%8s: %10u\n", "RXLP", sc->debug.stats.istats.rxlp);
32803 +@@ -477,6 +479,8 @@ static ssize_t read_file_wiphy(struct fi
32804 + u8 addr[ETH_ALEN];
32805 + u32 tmp;
32806 +
32807 ++ pax_track_stack();
32808 ++
32809 + len += snprintf(buf + len, sizeof(buf) - len,
32810 + "%s (chan=%d center-freq: %d MHz channel-type: %d (%s))\n",
32811 + wiphy_name(sc->hw->wiphy),
32812 +diff -urNp linux-3.1.1/drivers/net/wireless/ath/ath9k/htc_drv_debug.c linux-3.1.1/drivers/net/wireless/ath/ath9k/htc_drv_debug.c
32813 +--- linux-3.1.1/drivers/net/wireless/ath/ath9k/htc_drv_debug.c 2011-11-11 15:19:27.000000000 -0500
32814 ++++ linux-3.1.1/drivers/net/wireless/ath/ath9k/htc_drv_debug.c 2011-11-16 18:40:22.000000000 -0500
32815 +@@ -31,6 +31,8 @@ static ssize_t read_file_tgt_int_stats(s
32816 + unsigned int len = 0;
32817 + int ret = 0;
32818 +
32819 ++ pax_track_stack();
32820 ++
32821 + memset(&cmd_rsp, 0, sizeof(cmd_rsp));
32822 +
32823 + ath9k_htc_ps_wakeup(priv);
32824 +@@ -89,6 +91,8 @@ static ssize_t read_file_tgt_tx_stats(st
32825 + unsigned int len = 0;
32826 + int ret = 0;
32827 +
32828 ++ pax_track_stack();
32829 ++
32830 + memset(&cmd_rsp, 0, sizeof(cmd_rsp));
32831 +
32832 + ath9k_htc_ps_wakeup(priv);
32833 +@@ -159,6 +163,8 @@ static ssize_t read_file_tgt_rx_stats(st
32834 + unsigned int len = 0;
32835 + int ret = 0;
32836 +
32837 ++ pax_track_stack();
32838 ++
32839 + memset(&cmd_rsp, 0, sizeof(cmd_rsp));
32840 +
32841 + ath9k_htc_ps_wakeup(priv);
32842 +@@ -203,6 +209,8 @@ static ssize_t read_file_xmit(struct fil
32843 + char buf[512];
32844 + unsigned int len = 0;
32845 +
32846 ++ pax_track_stack();
32847 ++
32848 + len += snprintf(buf + len, sizeof(buf) - len,
32849 + "%20s : %10u\n", "Buffers queued",
32850 + priv->debug.tx_stats.buf_queued);
32851 +@@ -376,6 +384,8 @@ static ssize_t read_file_slot(struct fil
32852 + char buf[512];
32853 + unsigned int len = 0;
32854 +
32855 ++ pax_track_stack();
32856 ++
32857 + spin_lock_bh(&priv->tx.tx_lock);
32858 +
32859 + len += snprintf(buf + len, sizeof(buf) - len, "TX slot bitmap : ");
32860 +@@ -411,6 +421,8 @@ static ssize_t read_file_queue(struct fi
32861 + char buf[512];
32862 + unsigned int len = 0;
32863 +
32864 ++ pax_track_stack();
32865 ++
32866 + len += snprintf(buf + len, sizeof(buf) - len, "%20s : %10u\n",
32867 + "Mgmt endpoint", skb_queue_len(&priv->tx.mgmt_ep_queue));
32868 +
32869 +diff -urNp linux-3.1.1/drivers/net/wireless/ath/ath9k/hw.h linux-3.1.1/drivers/net/wireless/ath/ath9k/hw.h
32870 +--- linux-3.1.1/drivers/net/wireless/ath/ath9k/hw.h 2011-11-11 15:19:27.000000000 -0500
32871 ++++ linux-3.1.1/drivers/net/wireless/ath/ath9k/hw.h 2011-11-16 18:39:07.000000000 -0500
32872 +@@ -588,7 +588,7 @@ struct ath_hw_private_ops {
32873 +
32874 + /* ANI */
32875 + void (*ani_cache_ini_regs)(struct ath_hw *ah);
32876 +-};
32877 ++} __no_const;
32878 +
32879 + /**
32880 + * struct ath_hw_ops - callbacks used by hardware code and driver code
32881 +@@ -639,7 +639,7 @@ struct ath_hw_ops {
32882 + void (*antdiv_comb_conf_set)(struct ath_hw *ah,
32883 + struct ath_hw_antcomb_conf *antconf);
32884 +
32885 +-};
32886 ++} __no_const;
32887 +
32888 + struct ath_nf_limits {
32889 + s16 max;
32890 +@@ -652,7 +652,7 @@ struct ath_nf_limits {
32891 + #define AH_UNPLUGGED 0x2 /* The card has been physically removed. */
32892 +
32893 + struct ath_hw {
32894 +- struct ath_ops reg_ops;
32895 ++ ath_ops_no_const reg_ops;
32896 +
32897 + struct ieee80211_hw *hw;
32898 + struct ath_common common;
32899 +diff -urNp linux-3.1.1/drivers/net/wireless/ath/ath.h linux-3.1.1/drivers/net/wireless/ath/ath.h
32900 +--- linux-3.1.1/drivers/net/wireless/ath/ath.h 2011-11-11 15:19:27.000000000 -0500
32901 ++++ linux-3.1.1/drivers/net/wireless/ath/ath.h 2011-11-16 18:39:07.000000000 -0500
32902 +@@ -121,6 +121,7 @@ struct ath_ops {
32903 + void (*write_flush) (void *);
32904 + u32 (*rmw)(void *, u32 reg_offset, u32 set, u32 clr);
32905 + };
32906 ++typedef struct ath_ops __no_const ath_ops_no_const;
32907 +
32908 + struct ath_common;
32909 + struct ath_bus_ops;
32910 +diff -urNp linux-3.1.1/drivers/net/wireless/ipw2x00/ipw2100.c linux-3.1.1/drivers/net/wireless/ipw2x00/ipw2100.c
32911 +--- linux-3.1.1/drivers/net/wireless/ipw2x00/ipw2100.c 2011-11-11 15:19:27.000000000 -0500
32912 ++++ linux-3.1.1/drivers/net/wireless/ipw2x00/ipw2100.c 2011-11-16 18:40:22.000000000 -0500
32913 +@@ -2102,6 +2102,8 @@ static int ipw2100_set_essid(struct ipw2
32914 + int err;
32915 + DECLARE_SSID_BUF(ssid);
32916 +
32917 ++ pax_track_stack();
32918 ++
32919 + IPW_DEBUG_HC("SSID: '%s'\n", print_ssid(ssid, essid, ssid_len));
32920 +
32921 + if (ssid_len)
32922 +@@ -5451,6 +5453,8 @@ static int ipw2100_set_key(struct ipw210
32923 + struct ipw2100_wep_key *wep_key = (void *)cmd.host_command_parameters;
32924 + int err;
32925 +
32926 ++ pax_track_stack();
32927 ++
32928 + IPW_DEBUG_HC("WEP_KEY_INFO: index = %d, len = %d/%d\n",
32929 + idx, keylen, len);
32930 +
32931 +diff -urNp linux-3.1.1/drivers/net/wireless/ipw2x00/libipw_rx.c linux-3.1.1/drivers/net/wireless/ipw2x00/libipw_rx.c
32932 +--- linux-3.1.1/drivers/net/wireless/ipw2x00/libipw_rx.c 2011-11-11 15:19:27.000000000 -0500
32933 ++++ linux-3.1.1/drivers/net/wireless/ipw2x00/libipw_rx.c 2011-11-16 18:40:22.000000000 -0500
32934 +@@ -1565,6 +1565,8 @@ static void libipw_process_probe_respons
32935 + unsigned long flags;
32936 + DECLARE_SSID_BUF(ssid);
32937 +
32938 ++ pax_track_stack();
32939 ++
32940 + LIBIPW_DEBUG_SCAN("'%s' (%pM"
32941 + "): %c%c%c%c %c%c%c%c-%c%c%c%c %c%c%c%c\n",
32942 + print_ssid(ssid, info_element->data, info_element->len),
32943 +diff -urNp linux-3.1.1/drivers/net/wireless/iwlegacy/iwl3945-base.c linux-3.1.1/drivers/net/wireless/iwlegacy/iwl3945-base.c
32944 +--- linux-3.1.1/drivers/net/wireless/iwlegacy/iwl3945-base.c 2011-11-11 15:19:27.000000000 -0500
32945 ++++ linux-3.1.1/drivers/net/wireless/iwlegacy/iwl3945-base.c 2011-11-16 18:39:07.000000000 -0500
32946 +@@ -3687,7 +3687,9 @@ static int iwl3945_pci_probe(struct pci_
32947 + */
32948 + if (iwl3945_mod_params.disable_hw_scan) {
32949 + IWL_DEBUG_INFO(priv, "Disabling hw_scan\n");
32950 +- iwl3945_hw_ops.hw_scan = NULL;
32951 ++ pax_open_kernel();
32952 ++ *(void **)&iwl3945_hw_ops.hw_scan = NULL;
32953 ++ pax_close_kernel();
32954 + }
32955 +
32956 + IWL_DEBUG_INFO(priv, "*** LOAD DRIVER ***\n");
32957 +diff -urNp linux-3.1.1/drivers/net/wireless/iwlwifi/iwl-agn-rs.c linux-3.1.1/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
32958 +--- linux-3.1.1/drivers/net/wireless/iwlwifi/iwl-agn-rs.c 2011-11-11 15:19:27.000000000 -0500
32959 ++++ linux-3.1.1/drivers/net/wireless/iwlwifi/iwl-agn-rs.c 2011-11-16 18:40:22.000000000 -0500
32960 +@@ -920,6 +920,8 @@ static void rs_tx_status(void *priv_r, s
32961 + struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
32962 + struct iwl_rxon_context *ctx = sta_priv->common.ctx;
32963 +
32964 ++ pax_track_stack();
32965 ++
32966 + IWL_DEBUG_RATE_LIMIT(priv, "get frame ack response, update rate scale window\n");
32967 +
32968 + /* Treat uninitialized rate scaling data same as non-existing. */
32969 +@@ -2931,6 +2933,8 @@ static void rs_fill_link_cmd(struct iwl_
32970 + container_of(lq_sta, struct iwl_station_priv, lq_sta);
32971 + struct iwl_link_quality_cmd *lq_cmd = &lq_sta->lq;
32972 +
32973 ++ pax_track_stack();
32974 ++
32975 + /* Override starting rate (index 0) if needed for debug purposes */
32976 + rs_dbgfs_set_mcs(lq_sta, &new_rate, index);
32977 +
32978 +diff -urNp linux-3.1.1/drivers/net/wireless/iwlwifi/iwl-debugfs.c linux-3.1.1/drivers/net/wireless/iwlwifi/iwl-debugfs.c
32979 +--- linux-3.1.1/drivers/net/wireless/iwlwifi/iwl-debugfs.c 2011-11-11 15:19:27.000000000 -0500
32980 ++++ linux-3.1.1/drivers/net/wireless/iwlwifi/iwl-debugfs.c 2011-11-16 18:40:22.000000000 -0500
32981 +@@ -561,6 +561,8 @@ static ssize_t iwl_dbgfs_status_read(str
32982 + int pos = 0;
32983 + const size_t bufsz = sizeof(buf);
32984 +
32985 ++ pax_track_stack();
32986 ++
32987 + pos += scnprintf(buf + pos, bufsz - pos, "STATUS_HCMD_ACTIVE:\t %d\n",
32988 + test_bit(STATUS_HCMD_ACTIVE, &priv->status));
32989 + pos += scnprintf(buf + pos, bufsz - pos, "STATUS_INT_ENABLED:\t %d\n",
32990 +@@ -693,6 +695,8 @@ static ssize_t iwl_dbgfs_qos_read(struct
32991 + char buf[256 * NUM_IWL_RXON_CTX];
32992 + const size_t bufsz = sizeof(buf);
32993 +
32994 ++ pax_track_stack();
32995 ++
32996 + for_each_context(priv, ctx) {
32997 + pos += scnprintf(buf + pos, bufsz - pos, "context %d:\n",
32998 + ctx->ctxid);
32999 +diff -urNp linux-3.1.1/drivers/net/wireless/iwlwifi/iwl-debug.h linux-3.1.1/drivers/net/wireless/iwlwifi/iwl-debug.h
33000 +--- linux-3.1.1/drivers/net/wireless/iwlwifi/iwl-debug.h 2011-11-11 15:19:27.000000000 -0500
33001 ++++ linux-3.1.1/drivers/net/wireless/iwlwifi/iwl-debug.h 2011-11-16 18:39:07.000000000 -0500
33002 +@@ -68,8 +68,8 @@ do {
33003 + } while (0)
33004 +
33005 + #else
33006 +-#define IWL_DEBUG(__priv, level, fmt, args...)
33007 +-#define IWL_DEBUG_LIMIT(__priv, level, fmt, args...)
33008 ++#define IWL_DEBUG(__priv, level, fmt, args...) do {} while (0)
33009 ++#define IWL_DEBUG_LIMIT(__priv, level, fmt, args...) do {} while (0)
33010 + static inline void iwl_print_hex_dump(struct iwl_priv *priv, int level,
33011 + const void *p, u32 len)
33012 + {}
33013 +diff -urNp linux-3.1.1/drivers/net/wireless/iwmc3200wifi/debugfs.c linux-3.1.1/drivers/net/wireless/iwmc3200wifi/debugfs.c
33014 +--- linux-3.1.1/drivers/net/wireless/iwmc3200wifi/debugfs.c 2011-11-11 15:19:27.000000000 -0500
33015 ++++ linux-3.1.1/drivers/net/wireless/iwmc3200wifi/debugfs.c 2011-11-16 18:40:22.000000000 -0500
33016 +@@ -327,6 +327,8 @@ static ssize_t iwm_debugfs_fw_err_read(s
33017 + int buf_len = 512;
33018 + size_t len = 0;
33019 +
33020 ++ pax_track_stack();
33021 ++
33022 + if (*ppos != 0)
33023 + return 0;
33024 + if (count < sizeof(buf))
33025 +diff -urNp linux-3.1.1/drivers/net/wireless/mac80211_hwsim.c linux-3.1.1/drivers/net/wireless/mac80211_hwsim.c
33026 +--- linux-3.1.1/drivers/net/wireless/mac80211_hwsim.c 2011-11-11 15:19:27.000000000 -0500
33027 ++++ linux-3.1.1/drivers/net/wireless/mac80211_hwsim.c 2011-11-16 18:39:07.000000000 -0500
33028 +@@ -1670,9 +1670,11 @@ static int __init init_mac80211_hwsim(vo
33029 + return -EINVAL;
33030 +
33031 + if (fake_hw_scan) {
33032 +- mac80211_hwsim_ops.hw_scan = mac80211_hwsim_hw_scan;
33033 +- mac80211_hwsim_ops.sw_scan_start = NULL;
33034 +- mac80211_hwsim_ops.sw_scan_complete = NULL;
33035 ++ pax_open_kernel();
33036 ++ *(void **)&mac80211_hwsim_ops.hw_scan = mac80211_hwsim_hw_scan;
33037 ++ *(void **)&mac80211_hwsim_ops.sw_scan_start = NULL;
33038 ++ *(void **)&mac80211_hwsim_ops.sw_scan_complete = NULL;
33039 ++ pax_close_kernel();
33040 + }
33041 +
33042 + spin_lock_init(&hwsim_radio_lock);
33043 +diff -urNp linux-3.1.1/drivers/net/wireless/mwifiex/main.h linux-3.1.1/drivers/net/wireless/mwifiex/main.h
33044 +--- linux-3.1.1/drivers/net/wireless/mwifiex/main.h 2011-11-11 15:19:27.000000000 -0500
33045 ++++ linux-3.1.1/drivers/net/wireless/mwifiex/main.h 2011-11-16 18:39:07.000000000 -0500
33046 +@@ -560,7 +560,7 @@ struct mwifiex_if_ops {
33047 +
33048 + void (*update_mp_end_port) (struct mwifiex_adapter *, u16);
33049 + void (*cleanup_mpa_buf) (struct mwifiex_adapter *);
33050 +-};
33051 ++} __no_const;
33052 +
33053 + struct mwifiex_adapter {
33054 + struct mwifiex_private *priv[MWIFIEX_MAX_BSS_NUM];
33055 +diff -urNp linux-3.1.1/drivers/net/wireless/rndis_wlan.c linux-3.1.1/drivers/net/wireless/rndis_wlan.c
33056 +--- linux-3.1.1/drivers/net/wireless/rndis_wlan.c 2011-11-11 15:19:27.000000000 -0500
33057 ++++ linux-3.1.1/drivers/net/wireless/rndis_wlan.c 2011-11-16 18:39:07.000000000 -0500
33058 +@@ -1277,7 +1277,7 @@ static int set_rts_threshold(struct usbn
33059 +
33060 + netdev_dbg(usbdev->net, "%s(): %i\n", __func__, rts_threshold);
33061 +
33062 +- if (rts_threshold < 0 || rts_threshold > 2347)
33063 ++ if (rts_threshold > 2347)
33064 + rts_threshold = 2347;
33065 +
33066 + tmp = cpu_to_le32(rts_threshold);
33067 +diff -urNp linux-3.1.1/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c linux-3.1.1/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c
33068 +--- linux-3.1.1/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c 2011-11-11 15:19:27.000000000 -0500
33069 ++++ linux-3.1.1/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c 2011-11-16 18:40:22.000000000 -0500
33070 +@@ -837,6 +837,8 @@ bool _rtl92c_phy_sw_chnl_step_by_step(st
33071 + u8 rfpath;
33072 + u8 num_total_rfpath = rtlphy->num_total_rfpath;
33073 +
33074 ++ pax_track_stack();
33075 ++
33076 + precommoncmdcnt = 0;
33077 + _rtl92c_phy_set_sw_chnl_cmdarray(precommoncmd, precommoncmdcnt++,
33078 + MAX_PRECMD_CNT,
33079 +diff -urNp linux-3.1.1/drivers/net/wireless/wl1251/wl1251.h linux-3.1.1/drivers/net/wireless/wl1251/wl1251.h
33080 +--- linux-3.1.1/drivers/net/wireless/wl1251/wl1251.h 2011-11-11 15:19:27.000000000 -0500
33081 ++++ linux-3.1.1/drivers/net/wireless/wl1251/wl1251.h 2011-11-16 18:39:07.000000000 -0500
33082 +@@ -266,7 +266,7 @@ struct wl1251_if_operations {
33083 + void (*reset)(struct wl1251 *wl);
33084 + void (*enable_irq)(struct wl1251 *wl);
33085 + void (*disable_irq)(struct wl1251 *wl);
33086 +-};
33087 ++} __no_const;
33088 +
33089 + struct wl1251 {
33090 + struct ieee80211_hw *hw;
33091 +diff -urNp linux-3.1.1/drivers/net/wireless/wl12xx/spi.c linux-3.1.1/drivers/net/wireless/wl12xx/spi.c
33092 +--- linux-3.1.1/drivers/net/wireless/wl12xx/spi.c 2011-11-11 15:19:27.000000000 -0500
33093 ++++ linux-3.1.1/drivers/net/wireless/wl12xx/spi.c 2011-11-16 18:40:22.000000000 -0500
33094 +@@ -281,6 +281,8 @@ static void wl1271_spi_raw_write(struct
33095 + u32 chunk_len;
33096 + int i;
33097 +
33098 ++ pax_track_stack();
33099 ++
33100 + WARN_ON(len > WL1271_AGGR_BUFFER_SIZE);
33101 +
33102 + spi_message_init(&m);
33103 +diff -urNp linux-3.1.1/drivers/oprofile/buffer_sync.c linux-3.1.1/drivers/oprofile/buffer_sync.c
33104 +--- linux-3.1.1/drivers/oprofile/buffer_sync.c 2011-11-11 15:19:27.000000000 -0500
33105 ++++ linux-3.1.1/drivers/oprofile/buffer_sync.c 2011-11-16 18:39:07.000000000 -0500
33106 +@@ -343,7 +343,7 @@ static void add_data(struct op_entry *en
33107 + if (cookie == NO_COOKIE)
33108 + offset = pc;
33109 + if (cookie == INVALID_COOKIE) {
33110 +- atomic_inc(&oprofile_stats.sample_lost_no_mapping);
33111 ++ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
33112 + offset = pc;
33113 + }
33114 + if (cookie != last_cookie) {
33115 +@@ -387,14 +387,14 @@ add_sample(struct mm_struct *mm, struct
33116 + /* add userspace sample */
33117 +
33118 + if (!mm) {
33119 +- atomic_inc(&oprofile_stats.sample_lost_no_mm);
33120 ++ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mm);
33121 + return 0;
33122 + }
33123 +
33124 + cookie = lookup_dcookie(mm, s->eip, &offset);
33125 +
33126 + if (cookie == INVALID_COOKIE) {
33127 +- atomic_inc(&oprofile_stats.sample_lost_no_mapping);
33128 ++ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
33129 + return 0;
33130 + }
33131 +
33132 +@@ -563,7 +563,7 @@ void sync_buffer(int cpu)
33133 + /* ignore backtraces if failed to add a sample */
33134 + if (state == sb_bt_start) {
33135 + state = sb_bt_ignore;
33136 +- atomic_inc(&oprofile_stats.bt_lost_no_mapping);
33137 ++ atomic_inc_unchecked(&oprofile_stats.bt_lost_no_mapping);
33138 + }
33139 + }
33140 + release_mm(mm);
33141 +diff -urNp linux-3.1.1/drivers/oprofile/event_buffer.c linux-3.1.1/drivers/oprofile/event_buffer.c
33142 +--- linux-3.1.1/drivers/oprofile/event_buffer.c 2011-11-11 15:19:27.000000000 -0500
33143 ++++ linux-3.1.1/drivers/oprofile/event_buffer.c 2011-11-16 18:39:07.000000000 -0500
33144 +@@ -53,7 +53,7 @@ void add_event_entry(unsigned long value
33145 + }
33146 +
33147 + if (buffer_pos == buffer_size) {
33148 +- atomic_inc(&oprofile_stats.event_lost_overflow);
33149 ++ atomic_inc_unchecked(&oprofile_stats.event_lost_overflow);
33150 + return;
33151 + }
33152 +
33153 +diff -urNp linux-3.1.1/drivers/oprofile/oprof.c linux-3.1.1/drivers/oprofile/oprof.c
33154 +--- linux-3.1.1/drivers/oprofile/oprof.c 2011-11-11 15:19:27.000000000 -0500
33155 ++++ linux-3.1.1/drivers/oprofile/oprof.c 2011-11-16 18:39:07.000000000 -0500
33156 +@@ -110,7 +110,7 @@ static void switch_worker(struct work_st
33157 + if (oprofile_ops.switch_events())
33158 + return;
33159 +
33160 +- atomic_inc(&oprofile_stats.multiplex_counter);
33161 ++ atomic_inc_unchecked(&oprofile_stats.multiplex_counter);
33162 + start_switch_worker();
33163 + }
33164 +
33165 +diff -urNp linux-3.1.1/drivers/oprofile/oprofilefs.c linux-3.1.1/drivers/oprofile/oprofilefs.c
33166 +--- linux-3.1.1/drivers/oprofile/oprofilefs.c 2011-11-11 15:19:27.000000000 -0500
33167 ++++ linux-3.1.1/drivers/oprofile/oprofilefs.c 2011-11-16 18:39:07.000000000 -0500
33168 +@@ -186,7 +186,7 @@ static const struct file_operations atom
33169 +
33170 +
33171 + int oprofilefs_create_ro_atomic(struct super_block *sb, struct dentry *root,
33172 +- char const *name, atomic_t *val)
33173 ++ char const *name, atomic_unchecked_t *val)
33174 + {
33175 + return __oprofilefs_create_file(sb, root, name,
33176 + &atomic_ro_fops, 0444, val);
33177 +diff -urNp linux-3.1.1/drivers/oprofile/oprofile_stats.c linux-3.1.1/drivers/oprofile/oprofile_stats.c
33178 +--- linux-3.1.1/drivers/oprofile/oprofile_stats.c 2011-11-11 15:19:27.000000000 -0500
33179 ++++ linux-3.1.1/drivers/oprofile/oprofile_stats.c 2011-11-16 18:39:07.000000000 -0500
33180 +@@ -30,11 +30,11 @@ void oprofile_reset_stats(void)
33181 + cpu_buf->sample_invalid_eip = 0;
33182 + }
33183 +
33184 +- atomic_set(&oprofile_stats.sample_lost_no_mm, 0);
33185 +- atomic_set(&oprofile_stats.sample_lost_no_mapping, 0);
33186 +- atomic_set(&oprofile_stats.event_lost_overflow, 0);
33187 +- atomic_set(&oprofile_stats.bt_lost_no_mapping, 0);
33188 +- atomic_set(&oprofile_stats.multiplex_counter, 0);
33189 ++ atomic_set_unchecked(&oprofile_stats.sample_lost_no_mm, 0);
33190 ++ atomic_set_unchecked(&oprofile_stats.sample_lost_no_mapping, 0);
33191 ++ atomic_set_unchecked(&oprofile_stats.event_lost_overflow, 0);
33192 ++ atomic_set_unchecked(&oprofile_stats.bt_lost_no_mapping, 0);
33193 ++ atomic_set_unchecked(&oprofile_stats.multiplex_counter, 0);
33194 + }
33195 +
33196 +
33197 +diff -urNp linux-3.1.1/drivers/oprofile/oprofile_stats.h linux-3.1.1/drivers/oprofile/oprofile_stats.h
33198 +--- linux-3.1.1/drivers/oprofile/oprofile_stats.h 2011-11-11 15:19:27.000000000 -0500
33199 ++++ linux-3.1.1/drivers/oprofile/oprofile_stats.h 2011-11-16 18:39:07.000000000 -0500
33200 +@@ -13,11 +13,11 @@
33201 + #include <linux/atomic.h>
33202 +
33203 + struct oprofile_stat_struct {
33204 +- atomic_t sample_lost_no_mm;
33205 +- atomic_t sample_lost_no_mapping;
33206 +- atomic_t bt_lost_no_mapping;
33207 +- atomic_t event_lost_overflow;
33208 +- atomic_t multiplex_counter;
33209 ++ atomic_unchecked_t sample_lost_no_mm;
33210 ++ atomic_unchecked_t sample_lost_no_mapping;
33211 ++ atomic_unchecked_t bt_lost_no_mapping;
33212 ++ atomic_unchecked_t event_lost_overflow;
33213 ++ atomic_unchecked_t multiplex_counter;
33214 + };
33215 +
33216 + extern struct oprofile_stat_struct oprofile_stats;
33217 +diff -urNp linux-3.1.1/drivers/parport/procfs.c linux-3.1.1/drivers/parport/procfs.c
33218 +--- linux-3.1.1/drivers/parport/procfs.c 2011-11-11 15:19:27.000000000 -0500
33219 ++++ linux-3.1.1/drivers/parport/procfs.c 2011-11-16 18:39:07.000000000 -0500
33220 +@@ -64,7 +64,7 @@ static int do_active_device(ctl_table *t
33221 +
33222 + *ppos += len;
33223 +
33224 +- return copy_to_user(result, buffer, len) ? -EFAULT : 0;
33225 ++ return (len > sizeof buffer || copy_to_user(result, buffer, len)) ? -EFAULT : 0;
33226 + }
33227 +
33228 + #ifdef CONFIG_PARPORT_1284
33229 +@@ -106,7 +106,7 @@ static int do_autoprobe(ctl_table *table
33230 +
33231 + *ppos += len;
33232 +
33233 +- return copy_to_user (result, buffer, len) ? -EFAULT : 0;
33234 ++ return (len > sizeof buffer || copy_to_user (result, buffer, len)) ? -EFAULT : 0;
33235 + }
33236 + #endif /* IEEE1284.3 support. */
33237 +
33238 +diff -urNp linux-3.1.1/drivers/pci/hotplug/cpci_hotplug.h linux-3.1.1/drivers/pci/hotplug/cpci_hotplug.h
33239 +--- linux-3.1.1/drivers/pci/hotplug/cpci_hotplug.h 2011-11-11 15:19:27.000000000 -0500
33240 ++++ linux-3.1.1/drivers/pci/hotplug/cpci_hotplug.h 2011-11-16 18:39:07.000000000 -0500
33241 +@@ -59,7 +59,7 @@ struct cpci_hp_controller_ops {
33242 + int (*hardware_test) (struct slot* slot, u32 value);
33243 + u8 (*get_power) (struct slot* slot);
33244 + int (*set_power) (struct slot* slot, int value);
33245 +-};
33246 ++} __no_const;
33247 +
33248 + struct cpci_hp_controller {
33249 + unsigned int irq;
33250 +diff -urNp linux-3.1.1/drivers/pci/hotplug/cpqphp_nvram.c linux-3.1.1/drivers/pci/hotplug/cpqphp_nvram.c
33251 +--- linux-3.1.1/drivers/pci/hotplug/cpqphp_nvram.c 2011-11-11 15:19:27.000000000 -0500
33252 ++++ linux-3.1.1/drivers/pci/hotplug/cpqphp_nvram.c 2011-11-16 18:39:07.000000000 -0500
33253 +@@ -428,9 +428,13 @@ static u32 store_HRT (void __iomem *rom_
33254 +
33255 + void compaq_nvram_init (void __iomem *rom_start)
33256 + {
33257 ++
33258 ++#ifndef CONFIG_PAX_KERNEXEC
33259 + if (rom_start) {
33260 + compaq_int15_entry_point = (rom_start + ROM_INT15_PHY_ADDR - ROM_PHY_ADDR);
33261 + }
33262 ++#endif
33263 ++
33264 + dbg("int15 entry = %p\n", compaq_int15_entry_point);
33265 +
33266 + /* initialize our int15 lock */
33267 +diff -urNp linux-3.1.1/drivers/pci/pcie/aspm.c linux-3.1.1/drivers/pci/pcie/aspm.c
33268 +--- linux-3.1.1/drivers/pci/pcie/aspm.c 2011-11-11 15:19:27.000000000 -0500
33269 ++++ linux-3.1.1/drivers/pci/pcie/aspm.c 2011-11-16 18:39:07.000000000 -0500
33270 +@@ -27,9 +27,9 @@
33271 + #define MODULE_PARAM_PREFIX "pcie_aspm."
33272 +
33273 + /* Note: those are not register definitions */
33274 +-#define ASPM_STATE_L0S_UP (1) /* Upstream direction L0s state */
33275 +-#define ASPM_STATE_L0S_DW (2) /* Downstream direction L0s state */
33276 +-#define ASPM_STATE_L1 (4) /* L1 state */
33277 ++#define ASPM_STATE_L0S_UP (1U) /* Upstream direction L0s state */
33278 ++#define ASPM_STATE_L0S_DW (2U) /* Downstream direction L0s state */
33279 ++#define ASPM_STATE_L1 (4U) /* L1 state */
33280 + #define ASPM_STATE_L0S (ASPM_STATE_L0S_UP | ASPM_STATE_L0S_DW)
33281 + #define ASPM_STATE_ALL (ASPM_STATE_L0S | ASPM_STATE_L1)
33282 +
33283 +diff -urNp linux-3.1.1/drivers/pci/probe.c linux-3.1.1/drivers/pci/probe.c
33284 +--- linux-3.1.1/drivers/pci/probe.c 2011-11-11 15:19:27.000000000 -0500
33285 ++++ linux-3.1.1/drivers/pci/probe.c 2011-11-16 18:39:07.000000000 -0500
33286 +@@ -136,7 +136,7 @@ int __pci_read_base(struct pci_dev *dev,
33287 + u32 l, sz, mask;
33288 + u16 orig_cmd;
33289 +
33290 +- mask = type ? PCI_ROM_ADDRESS_MASK : ~0;
33291 ++ mask = type ? (u32)PCI_ROM_ADDRESS_MASK : ~0;
33292 +
33293 + if (!dev->mmio_always_on) {
33294 + pci_read_config_word(dev, PCI_COMMAND, &orig_cmd);
33295 +diff -urNp linux-3.1.1/drivers/pci/proc.c linux-3.1.1/drivers/pci/proc.c
33296 +--- linux-3.1.1/drivers/pci/proc.c 2011-11-11 15:19:27.000000000 -0500
33297 ++++ linux-3.1.1/drivers/pci/proc.c 2011-11-16 18:40:22.000000000 -0500
33298 +@@ -476,7 +476,16 @@ static const struct file_operations proc
33299 + static int __init pci_proc_init(void)
33300 + {
33301 + struct pci_dev *dev = NULL;
33302 ++
33303 ++#ifdef CONFIG_GRKERNSEC_PROC_ADD
33304 ++#ifdef CONFIG_GRKERNSEC_PROC_USER
33305 ++ proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR, NULL);
33306 ++#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
33307 ++ proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
33308 ++#endif
33309 ++#else
33310 + proc_bus_pci_dir = proc_mkdir("bus/pci", NULL);
33311 ++#endif
33312 + proc_create("devices", 0, proc_bus_pci_dir,
33313 + &proc_bus_pci_dev_operations);
33314 + proc_initialized = 1;
33315 +diff -urNp linux-3.1.1/drivers/pci/xen-pcifront.c linux-3.1.1/drivers/pci/xen-pcifront.c
33316 +--- linux-3.1.1/drivers/pci/xen-pcifront.c 2011-11-11 15:19:27.000000000 -0500
33317 ++++ linux-3.1.1/drivers/pci/xen-pcifront.c 2011-11-16 18:40:22.000000000 -0500
33318 +@@ -187,6 +187,8 @@ static int pcifront_bus_read(struct pci_
33319 + struct pcifront_sd *sd = bus->sysdata;
33320 + struct pcifront_device *pdev = pcifront_get_pdev(sd);
33321 +
33322 ++ pax_track_stack();
33323 ++
33324 + if (verbose_request)
33325 + dev_info(&pdev->xdev->dev,
33326 + "read dev=%04x:%02x:%02x.%01x - offset %x size %d\n",
33327 +@@ -226,6 +228,8 @@ static int pcifront_bus_write(struct pci
33328 + struct pcifront_sd *sd = bus->sysdata;
33329 + struct pcifront_device *pdev = pcifront_get_pdev(sd);
33330 +
33331 ++ pax_track_stack();
33332 ++
33333 + if (verbose_request)
33334 + dev_info(&pdev->xdev->dev,
33335 + "write dev=%04x:%02x:%02x.%01x - "
33336 +@@ -258,6 +262,8 @@ static int pci_frontend_enable_msix(stru
33337 + struct pcifront_device *pdev = pcifront_get_pdev(sd);
33338 + struct msi_desc *entry;
33339 +
33340 ++ pax_track_stack();
33341 ++
33342 + if (nvec > SH_INFO_MAX_VEC) {
33343 + dev_err(&dev->dev, "too much vector for pci frontend: %x."
33344 + " Increase SH_INFO_MAX_VEC.\n", nvec);
33345 +@@ -309,6 +315,8 @@ static void pci_frontend_disable_msix(st
33346 + struct pcifront_sd *sd = dev->bus->sysdata;
33347 + struct pcifront_device *pdev = pcifront_get_pdev(sd);
33348 +
33349 ++ pax_track_stack();
33350 ++
33351 + err = do_pci_op(pdev, &op);
33352 +
33353 + /* What should do for error ? */
33354 +@@ -328,6 +336,8 @@ static int pci_frontend_enable_msi(struc
33355 + struct pcifront_sd *sd = dev->bus->sysdata;
33356 + struct pcifront_device *pdev = pcifront_get_pdev(sd);
33357 +
33358 ++ pax_track_stack();
33359 ++
33360 + err = do_pci_op(pdev, &op);
33361 + if (likely(!err)) {
33362 + vector[0] = op.value;
33363 +diff -urNp linux-3.1.1/drivers/platform/x86/thinkpad_acpi.c linux-3.1.1/drivers/platform/x86/thinkpad_acpi.c
33364 +--- linux-3.1.1/drivers/platform/x86/thinkpad_acpi.c 2011-11-11 15:19:27.000000000 -0500
33365 ++++ linux-3.1.1/drivers/platform/x86/thinkpad_acpi.c 2011-11-16 18:39:07.000000000 -0500
33366 +@@ -2094,7 +2094,7 @@ static int hotkey_mask_get(void)
33367 + return 0;
33368 + }
33369 +
33370 +-void static hotkey_mask_warn_incomplete_mask(void)
33371 ++static void hotkey_mask_warn_incomplete_mask(void)
33372 + {
33373 + /* log only what the user can fix... */
33374 + const u32 wantedmask = hotkey_driver_mask &
33375 +diff -urNp linux-3.1.1/drivers/pnp/pnpbios/bioscalls.c linux-3.1.1/drivers/pnp/pnpbios/bioscalls.c
33376 +--- linux-3.1.1/drivers/pnp/pnpbios/bioscalls.c 2011-11-11 15:19:27.000000000 -0500
33377 ++++ linux-3.1.1/drivers/pnp/pnpbios/bioscalls.c 2011-11-16 18:39:07.000000000 -0500
33378 +@@ -59,7 +59,7 @@ do { \
33379 + set_desc_limit(&gdt[(selname) >> 3], (size) - 1); \
33380 + } while(0)
33381 +
33382 +-static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
33383 ++static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
33384 + (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
33385 +
33386 + /*
33387 +@@ -96,7 +96,10 @@ static inline u16 call_pnp_bios(u16 func
33388 +
33389 + cpu = get_cpu();
33390 + save_desc_40 = get_cpu_gdt_table(cpu)[0x40 / 8];
33391 ++
33392 ++ pax_open_kernel();
33393 + get_cpu_gdt_table(cpu)[0x40 / 8] = bad_bios_desc;
33394 ++ pax_close_kernel();
33395 +
33396 + /* On some boxes IRQ's during PnP BIOS calls are deadly. */
33397 + spin_lock_irqsave(&pnp_bios_lock, flags);
33398 +@@ -134,7 +137,10 @@ static inline u16 call_pnp_bios(u16 func
33399 + :"memory");
33400 + spin_unlock_irqrestore(&pnp_bios_lock, flags);
33401 +
33402 ++ pax_open_kernel();
33403 + get_cpu_gdt_table(cpu)[0x40 / 8] = save_desc_40;
33404 ++ pax_close_kernel();
33405 ++
33406 + put_cpu();
33407 +
33408 + /* If we get here and this is set then the PnP BIOS faulted on us. */
33409 +@@ -468,7 +474,7 @@ int pnp_bios_read_escd(char *data, u32 n
33410 + return status;
33411 + }
33412 +
33413 +-void pnpbios_calls_init(union pnp_bios_install_struct *header)
33414 ++void __init pnpbios_calls_init(union pnp_bios_install_struct *header)
33415 + {
33416 + int i;
33417 +
33418 +@@ -476,6 +482,8 @@ void pnpbios_calls_init(union pnp_bios_i
33419 + pnp_bios_callpoint.offset = header->fields.pm16offset;
33420 + pnp_bios_callpoint.segment = PNP_CS16;
33421 +
33422 ++ pax_open_kernel();
33423 ++
33424 + for_each_possible_cpu(i) {
33425 + struct desc_struct *gdt = get_cpu_gdt_table(i);
33426 + if (!gdt)
33427 +@@ -487,4 +495,6 @@ void pnpbios_calls_init(union pnp_bios_i
33428 + set_desc_base(&gdt[GDT_ENTRY_PNPBIOS_DS],
33429 + (unsigned long)__va(header->fields.pm16dseg));
33430 + }
33431 ++
33432 ++ pax_close_kernel();
33433 + }
33434 +diff -urNp linux-3.1.1/drivers/pnp/resource.c linux-3.1.1/drivers/pnp/resource.c
33435 +--- linux-3.1.1/drivers/pnp/resource.c 2011-11-11 15:19:27.000000000 -0500
33436 ++++ linux-3.1.1/drivers/pnp/resource.c 2011-11-16 18:39:07.000000000 -0500
33437 +@@ -360,7 +360,7 @@ int pnp_check_irq(struct pnp_dev *dev, s
33438 + return 1;
33439 +
33440 + /* check if the resource is valid */
33441 +- if (*irq < 0 || *irq > 15)
33442 ++ if (*irq > 15)
33443 + return 0;
33444 +
33445 + /* check if the resource is reserved */
33446 +@@ -424,7 +424,7 @@ int pnp_check_dma(struct pnp_dev *dev, s
33447 + return 1;
33448 +
33449 + /* check if the resource is valid */
33450 +- if (*dma < 0 || *dma == 4 || *dma > 7)
33451 ++ if (*dma == 4 || *dma > 7)
33452 + return 0;
33453 +
33454 + /* check if the resource is reserved */
33455 +diff -urNp linux-3.1.1/drivers/power/bq27x00_battery.c linux-3.1.1/drivers/power/bq27x00_battery.c
33456 +--- linux-3.1.1/drivers/power/bq27x00_battery.c 2011-11-11 15:19:27.000000000 -0500
33457 ++++ linux-3.1.1/drivers/power/bq27x00_battery.c 2011-11-16 18:39:07.000000000 -0500
33458 +@@ -67,7 +67,7 @@
33459 + struct bq27x00_device_info;
33460 + struct bq27x00_access_methods {
33461 + int (*read)(struct bq27x00_device_info *di, u8 reg, bool single);
33462 +-};
33463 ++} __no_const;
33464 +
33465 + enum bq27x00_chip { BQ27000, BQ27500 };
33466 +
33467 +diff -urNp linux-3.1.1/drivers/regulator/max8660.c linux-3.1.1/drivers/regulator/max8660.c
33468 +--- linux-3.1.1/drivers/regulator/max8660.c 2011-11-11 15:19:27.000000000 -0500
33469 ++++ linux-3.1.1/drivers/regulator/max8660.c 2011-11-16 18:39:07.000000000 -0500
33470 +@@ -383,8 +383,10 @@ static int __devinit max8660_probe(struc
33471 + max8660->shadow_regs[MAX8660_OVER1] = 5;
33472 + } else {
33473 + /* Otherwise devices can be toggled via software */
33474 +- max8660_dcdc_ops.enable = max8660_dcdc_enable;
33475 +- max8660_dcdc_ops.disable = max8660_dcdc_disable;
33476 ++ pax_open_kernel();
33477 ++ *(void **)&max8660_dcdc_ops.enable = max8660_dcdc_enable;
33478 ++ *(void **)&max8660_dcdc_ops.disable = max8660_dcdc_disable;
33479 ++ pax_close_kernel();
33480 + }
33481 +
33482 + /*
33483 +diff -urNp linux-3.1.1/drivers/regulator/mc13892-regulator.c linux-3.1.1/drivers/regulator/mc13892-regulator.c
33484 +--- linux-3.1.1/drivers/regulator/mc13892-regulator.c 2011-11-11 15:19:27.000000000 -0500
33485 ++++ linux-3.1.1/drivers/regulator/mc13892-regulator.c 2011-11-16 18:39:07.000000000 -0500
33486 +@@ -564,10 +564,12 @@ static int __devinit mc13892_regulator_p
33487 + }
33488 + mc13xxx_unlock(mc13892);
33489 +
33490 +- mc13892_regulators[MC13892_VCAM].desc.ops->set_mode
33491 ++ pax_open_kernel();
33492 ++ *(void **)&mc13892_regulators[MC13892_VCAM].desc.ops->set_mode
33493 + = mc13892_vcam_set_mode;
33494 +- mc13892_regulators[MC13892_VCAM].desc.ops->get_mode
33495 ++ *(void **)&mc13892_regulators[MC13892_VCAM].desc.ops->get_mode
33496 + = mc13892_vcam_get_mode;
33497 ++ pax_close_kernel();
33498 + for (i = 0; i < pdata->num_regulators; i++) {
33499 + init_data = &pdata->regulators[i];
33500 + priv->regulators[i] = regulator_register(
33501 +diff -urNp linux-3.1.1/drivers/rtc/rtc-dev.c linux-3.1.1/drivers/rtc/rtc-dev.c
33502 +--- linux-3.1.1/drivers/rtc/rtc-dev.c 2011-11-11 15:19:27.000000000 -0500
33503 ++++ linux-3.1.1/drivers/rtc/rtc-dev.c 2011-11-16 18:40:22.000000000 -0500
33504 +@@ -14,6 +14,7 @@
33505 + #include <linux/module.h>
33506 + #include <linux/rtc.h>
33507 + #include <linux/sched.h>
33508 ++#include <linux/grsecurity.h>
33509 + #include "rtc-core.h"
33510 +
33511 + static dev_t rtc_devt;
33512 +@@ -345,6 +346,8 @@ static long rtc_dev_ioctl(struct file *f
33513 + if (copy_from_user(&tm, uarg, sizeof(tm)))
33514 + return -EFAULT;
33515 +
33516 ++ gr_log_timechange();
33517 ++
33518 + return rtc_set_time(rtc, &tm);
33519 +
33520 + case RTC_PIE_ON:
33521 +diff -urNp linux-3.1.1/drivers/scsi/aacraid/aacraid.h linux-3.1.1/drivers/scsi/aacraid/aacraid.h
33522 +--- linux-3.1.1/drivers/scsi/aacraid/aacraid.h 2011-11-11 15:19:27.000000000 -0500
33523 ++++ linux-3.1.1/drivers/scsi/aacraid/aacraid.h 2011-11-16 18:39:07.000000000 -0500
33524 +@@ -492,7 +492,7 @@ struct adapter_ops
33525 + int (*adapter_scsi)(struct fib * fib, struct scsi_cmnd * cmd);
33526 + /* Administrative operations */
33527 + int (*adapter_comm)(struct aac_dev * dev, int comm);
33528 +-};
33529 ++} __no_const;
33530 +
33531 + /*
33532 + * Define which interrupt handler needs to be installed
33533 +diff -urNp linux-3.1.1/drivers/scsi/aacraid/commctrl.c linux-3.1.1/drivers/scsi/aacraid/commctrl.c
33534 +--- linux-3.1.1/drivers/scsi/aacraid/commctrl.c 2011-11-11 15:19:27.000000000 -0500
33535 ++++ linux-3.1.1/drivers/scsi/aacraid/commctrl.c 2011-11-16 18:40:22.000000000 -0500
33536 +@@ -482,6 +482,7 @@ static int aac_send_raw_srb(struct aac_d
33537 + u32 actual_fibsize64, actual_fibsize = 0;
33538 + int i;
33539 +
33540 ++ pax_track_stack();
33541 +
33542 + if (dev->in_reset) {
33543 + dprintk((KERN_DEBUG"aacraid: send raw srb -EBUSY\n"));
33544 +diff -urNp linux-3.1.1/drivers/scsi/aacraid/linit.c linux-3.1.1/drivers/scsi/aacraid/linit.c
33545 +--- linux-3.1.1/drivers/scsi/aacraid/linit.c 2011-11-11 15:19:27.000000000 -0500
33546 ++++ linux-3.1.1/drivers/scsi/aacraid/linit.c 2011-11-16 18:39:07.000000000 -0500
33547 +@@ -92,7 +92,7 @@ static DECLARE_PCI_DEVICE_TABLE(aac_pci_
33548 + #elif defined(__devinitconst)
33549 + static const struct pci_device_id aac_pci_tbl[] __devinitconst = {
33550 + #else
33551 +-static const struct pci_device_id aac_pci_tbl[] __devinitdata = {
33552 ++static const struct pci_device_id aac_pci_tbl[] __devinitconst = {
33553 + #endif
33554 + { 0x1028, 0x0001, 0x1028, 0x0001, 0, 0, 0 }, /* PERC 2/Si (Iguana/PERC2Si) */
33555 + { 0x1028, 0x0002, 0x1028, 0x0002, 0, 0, 1 }, /* PERC 3/Di (Opal/PERC3Di) */
33556 +diff -urNp linux-3.1.1/drivers/scsi/aic94xx/aic94xx_init.c linux-3.1.1/drivers/scsi/aic94xx/aic94xx_init.c
33557 +--- linux-3.1.1/drivers/scsi/aic94xx/aic94xx_init.c 2011-11-11 15:19:27.000000000 -0500
33558 ++++ linux-3.1.1/drivers/scsi/aic94xx/aic94xx_init.c 2011-11-16 18:39:07.000000000 -0500
33559 +@@ -1012,7 +1012,7 @@ static struct sas_domain_function_templa
33560 + .lldd_control_phy = asd_control_phy,
33561 + };
33562 +
33563 +-static const struct pci_device_id aic94xx_pci_table[] __devinitdata = {
33564 ++static const struct pci_device_id aic94xx_pci_table[] __devinitconst = {
33565 + {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x410),0, 0, 1},
33566 + {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x412),0, 0, 1},
33567 + {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x416),0, 0, 1},
33568 +diff -urNp linux-3.1.1/drivers/scsi/bfa/bfad.c linux-3.1.1/drivers/scsi/bfa/bfad.c
33569 +--- linux-3.1.1/drivers/scsi/bfa/bfad.c 2011-11-11 15:19:27.000000000 -0500
33570 ++++ linux-3.1.1/drivers/scsi/bfa/bfad.c 2011-11-16 19:01:15.000000000 -0500
33571 +@@ -1019,6 +1019,8 @@ bfad_start_ops(struct bfad_s *bfad) {
33572 + struct bfad_vport_s *vport, *vport_new;
33573 + struct bfa_fcs_driver_info_s driver_info;
33574 +
33575 ++ pax_track_stack();
33576 ++
33577 + /* Limit min/max. xfer size to [64k-32MB] */
33578 + if (max_xfer_size < BFAD_MIN_SECTORS >> 1)
33579 + max_xfer_size = BFAD_MIN_SECTORS >> 1;
33580 +diff -urNp linux-3.1.1/drivers/scsi/bfa/bfa_fcpim.c linux-3.1.1/drivers/scsi/bfa/bfa_fcpim.c
33581 +--- linux-3.1.1/drivers/scsi/bfa/bfa_fcpim.c 2011-11-11 15:19:27.000000000 -0500
33582 ++++ linux-3.1.1/drivers/scsi/bfa/bfa_fcpim.c 2011-11-16 18:39:07.000000000 -0500
33583 +@@ -4179,7 +4179,7 @@ bfa_itn_create(struct bfa_s *bfa, struct
33584 + void (*isr)(struct bfa_s *bfa, struct bfi_msg_s *m))
33585 + {
33586 + struct bfa_fcp_mod_s *fcp = BFA_FCP_MOD(bfa);
33587 +- struct bfa_itn_s *itn;
33588 ++ bfa_itn_s_no_const *itn;
33589 +
33590 + itn = BFA_ITN_FROM_TAG(fcp, rport->rport_tag);
33591 + itn->isr = isr;
33592 +diff -urNp linux-3.1.1/drivers/scsi/bfa/bfa_fcpim.h linux-3.1.1/drivers/scsi/bfa/bfa_fcpim.h
33593 +--- linux-3.1.1/drivers/scsi/bfa/bfa_fcpim.h 2011-11-11 15:19:27.000000000 -0500
33594 ++++ linux-3.1.1/drivers/scsi/bfa/bfa_fcpim.h 2011-11-16 18:39:07.000000000 -0500
33595 +@@ -37,6 +37,7 @@ struct bfa_iotag_s {
33596 + struct bfa_itn_s {
33597 + bfa_isr_func_t isr;
33598 + };
33599 ++typedef struct bfa_itn_s __no_const bfa_itn_s_no_const;
33600 +
33601 + void bfa_itn_create(struct bfa_s *bfa, struct bfa_rport_s *rport,
33602 + void (*isr)(struct bfa_s *bfa, struct bfi_msg_s *m));
33603 +@@ -149,7 +150,7 @@ struct bfa_fcp_mod_s {
33604 + struct list_head iotag_tio_free_q; /* free IO resources */
33605 + struct list_head iotag_unused_q; /* unused IO resources*/
33606 + struct bfa_iotag_s *iotag_arr;
33607 +- struct bfa_itn_s *itn_arr;
33608 ++ bfa_itn_s_no_const *itn_arr;
33609 + int num_ioim_reqs;
33610 + int num_fwtio_reqs;
33611 + int num_itns;
33612 +diff -urNp linux-3.1.1/drivers/scsi/bfa/bfa_fcs_lport.c linux-3.1.1/drivers/scsi/bfa/bfa_fcs_lport.c
33613 +--- linux-3.1.1/drivers/scsi/bfa/bfa_fcs_lport.c 2011-11-11 15:19:27.000000000 -0500
33614 ++++ linux-3.1.1/drivers/scsi/bfa/bfa_fcs_lport.c 2011-11-16 18:40:22.000000000 -0500
33615 +@@ -1700,6 +1700,8 @@ bfa_fcs_lport_fdmi_build_rhba_pyld(struc
33616 + u16 len, count;
33617 + u16 templen;
33618 +
33619 ++ pax_track_stack();
33620 ++
33621 + /*
33622 + * get hba attributes
33623 + */
33624 +@@ -1977,6 +1979,8 @@ bfa_fcs_lport_fdmi_build_portattr_block(
33625 + u8 count = 0;
33626 + u16 templen;
33627 +
33628 ++ pax_track_stack();
33629 ++
33630 + /*
33631 + * get port attributes
33632 + */
33633 +diff -urNp linux-3.1.1/drivers/scsi/bfa/bfa_fcs_rport.c linux-3.1.1/drivers/scsi/bfa/bfa_fcs_rport.c
33634 +--- linux-3.1.1/drivers/scsi/bfa/bfa_fcs_rport.c 2011-11-11 15:19:27.000000000 -0500
33635 ++++ linux-3.1.1/drivers/scsi/bfa/bfa_fcs_rport.c 2011-11-16 18:40:22.000000000 -0500
33636 +@@ -1871,6 +1871,8 @@ bfa_fcs_rport_process_rpsc(struct bfa_fc
33637 + struct fc_rpsc_speed_info_s speeds;
33638 + struct bfa_port_attr_s pport_attr;
33639 +
33640 ++ pax_track_stack();
33641 ++
33642 + bfa_trc(port->fcs, rx_fchs->s_id);
33643 + bfa_trc(port->fcs, rx_fchs->d_id);
33644 +
33645 +diff -urNp linux-3.1.1/drivers/scsi/bfa/bfa.h linux-3.1.1/drivers/scsi/bfa/bfa.h
33646 +--- linux-3.1.1/drivers/scsi/bfa/bfa.h 2011-11-11 15:19:27.000000000 -0500
33647 ++++ linux-3.1.1/drivers/scsi/bfa/bfa.h 2011-11-16 18:39:07.000000000 -0500
33648 +@@ -196,7 +196,7 @@ struct bfa_hwif_s {
33649 + u32 *end);
33650 + int cpe_vec_q0;
33651 + int rme_vec_q0;
33652 +-};
33653 ++} __no_const;
33654 + typedef void (*bfa_cb_iocfc_t) (void *cbarg, enum bfa_status status);
33655 +
33656 + struct bfa_faa_cbfn_s {
33657 +diff -urNp linux-3.1.1/drivers/scsi/bfa/bfa_ioc.h linux-3.1.1/drivers/scsi/bfa/bfa_ioc.h
33658 +--- linux-3.1.1/drivers/scsi/bfa/bfa_ioc.h 2011-11-11 15:19:27.000000000 -0500
33659 ++++ linux-3.1.1/drivers/scsi/bfa/bfa_ioc.h 2011-11-16 18:39:07.000000000 -0500
33660 +@@ -258,7 +258,7 @@ struct bfa_ioc_cbfn_s {
33661 + bfa_ioc_disable_cbfn_t disable_cbfn;
33662 + bfa_ioc_hbfail_cbfn_t hbfail_cbfn;
33663 + bfa_ioc_reset_cbfn_t reset_cbfn;
33664 +-};
33665 ++} __no_const;
33666 +
33667 + /*
33668 + * IOC event notification mechanism.
33669 +@@ -346,7 +346,7 @@ struct bfa_ioc_hwif_s {
33670 + void (*ioc_sync_ack) (struct bfa_ioc_s *ioc);
33671 + bfa_boolean_t (*ioc_sync_complete) (struct bfa_ioc_s *ioc);
33672 + bfa_boolean_t (*ioc_lpu_read_stat) (struct bfa_ioc_s *ioc);
33673 +-};
33674 ++} __no_const;
33675 +
33676 + /*
33677 + * Queue element to wait for room in request queue. FIFO order is
33678 +diff -urNp linux-3.1.1/drivers/scsi/BusLogic.c linux-3.1.1/drivers/scsi/BusLogic.c
33679 +--- linux-3.1.1/drivers/scsi/BusLogic.c 2011-11-11 15:19:27.000000000 -0500
33680 ++++ linux-3.1.1/drivers/scsi/BusLogic.c 2011-11-16 18:40:22.000000000 -0500
33681 +@@ -962,6 +962,8 @@ static int __init BusLogic_InitializeFla
33682 + static void __init BusLogic_InitializeProbeInfoList(struct BusLogic_HostAdapter
33683 + *PrototypeHostAdapter)
33684 + {
33685 ++ pax_track_stack();
33686 ++
33687 + /*
33688 + If a PCI BIOS is present, interrogate it for MultiMaster and FlashPoint
33689 + Host Adapters; otherwise, default to the standard ISA MultiMaster probe.
33690 +diff -urNp linux-3.1.1/drivers/scsi/dpt_i2o.c linux-3.1.1/drivers/scsi/dpt_i2o.c
33691 +--- linux-3.1.1/drivers/scsi/dpt_i2o.c 2011-11-11 15:19:27.000000000 -0500
33692 ++++ linux-3.1.1/drivers/scsi/dpt_i2o.c 2011-11-16 18:40:22.000000000 -0500
33693 +@@ -1811,6 +1811,8 @@ static int adpt_i2o_passthru(adpt_hba* p
33694 + dma_addr_t addr;
33695 + ulong flags = 0;
33696 +
33697 ++ pax_track_stack();
33698 ++
33699 + memset(&msg, 0, MAX_MESSAGE_SIZE*4);
33700 + // get user msg size in u32s
33701 + if(get_user(size, &user_msg[0])){
33702 +@@ -2317,6 +2319,8 @@ static s32 adpt_scsi_to_i2o(adpt_hba* pH
33703 + s32 rcode;
33704 + dma_addr_t addr;
33705 +
33706 ++ pax_track_stack();
33707 ++
33708 + memset(msg, 0 , sizeof(msg));
33709 + len = scsi_bufflen(cmd);
33710 + direction = 0x00000000;
33711 +diff -urNp linux-3.1.1/drivers/scsi/eata.c linux-3.1.1/drivers/scsi/eata.c
33712 +--- linux-3.1.1/drivers/scsi/eata.c 2011-11-11 15:19:27.000000000 -0500
33713 ++++ linux-3.1.1/drivers/scsi/eata.c 2011-11-16 18:40:22.000000000 -0500
33714 +@@ -1087,6 +1087,8 @@ static int port_detect(unsigned long por
33715 + struct hostdata *ha;
33716 + char name[16];
33717 +
33718 ++ pax_track_stack();
33719 ++
33720 + sprintf(name, "%s%d", driver_name, j);
33721 +
33722 + if (!request_region(port_base, REGION_SIZE, driver_name)) {
33723 +diff -urNp linux-3.1.1/drivers/scsi/fcoe/fcoe_ctlr.c linux-3.1.1/drivers/scsi/fcoe/fcoe_ctlr.c
33724 +--- linux-3.1.1/drivers/scsi/fcoe/fcoe_ctlr.c 2011-11-11 15:19:27.000000000 -0500
33725 ++++ linux-3.1.1/drivers/scsi/fcoe/fcoe_ctlr.c 2011-11-16 18:40:22.000000000 -0500
33726 +@@ -2503,6 +2503,8 @@ static int fcoe_ctlr_vn_recv(struct fcoe
33727 + } buf;
33728 + int rc;
33729 +
33730 ++ pax_track_stack();
33731 ++
33732 + fiph = (struct fip_header *)skb->data;
33733 + sub = fiph->fip_subcode;
33734 +
33735 +diff -urNp linux-3.1.1/drivers/scsi/gdth.c linux-3.1.1/drivers/scsi/gdth.c
33736 +--- linux-3.1.1/drivers/scsi/gdth.c 2011-11-11 15:19:27.000000000 -0500
33737 ++++ linux-3.1.1/drivers/scsi/gdth.c 2011-11-16 18:40:22.000000000 -0500
33738 +@@ -4107,6 +4107,8 @@ static int ioc_lockdrv(void __user *arg)
33739 + unsigned long flags;
33740 + gdth_ha_str *ha;
33741 +
33742 ++ pax_track_stack();
33743 ++
33744 + if (copy_from_user(&ldrv, arg, sizeof(gdth_ioctl_lockdrv)))
33745 + return -EFAULT;
33746 + ha = gdth_find_ha(ldrv.ionode);
33747 +@@ -4139,6 +4141,8 @@ static int ioc_resetdrv(void __user *arg
33748 + gdth_ha_str *ha;
33749 + int rval;
33750 +
33751 ++ pax_track_stack();
33752 ++
33753 + if (copy_from_user(&res, arg, sizeof(gdth_ioctl_reset)) ||
33754 + res.number >= MAX_HDRIVES)
33755 + return -EFAULT;
33756 +@@ -4174,6 +4178,8 @@ static int ioc_general(void __user *arg,
33757 + gdth_ha_str *ha;
33758 + int rval;
33759 +
33760 ++ pax_track_stack();
33761 ++
33762 + if (copy_from_user(&gen, arg, sizeof(gdth_ioctl_general)))
33763 + return -EFAULT;
33764 + ha = gdth_find_ha(gen.ionode);
33765 +@@ -4642,6 +4648,9 @@ static void gdth_flush(gdth_ha_str *ha)
33766 + int i;
33767 + gdth_cmd_str gdtcmd;
33768 + char cmnd[MAX_COMMAND_SIZE];
33769 ++
33770 ++ pax_track_stack();
33771 ++
33772 + memset(cmnd, 0xff, MAX_COMMAND_SIZE);
33773 +
33774 + TRACE2(("gdth_flush() hanum %d\n", ha->hanum));
33775 +diff -urNp linux-3.1.1/drivers/scsi/gdth_proc.c linux-3.1.1/drivers/scsi/gdth_proc.c
33776 +--- linux-3.1.1/drivers/scsi/gdth_proc.c 2011-11-11 15:19:27.000000000 -0500
33777 ++++ linux-3.1.1/drivers/scsi/gdth_proc.c 2011-11-16 18:40:22.000000000 -0500
33778 +@@ -47,6 +47,9 @@ static int gdth_set_asc_info(struct Scsi
33779 + u64 paddr;
33780 +
33781 + char cmnd[MAX_COMMAND_SIZE];
33782 ++
33783 ++ pax_track_stack();
33784 ++
33785 + memset(cmnd, 0xff, 12);
33786 + memset(&gdtcmd, 0, sizeof(gdth_cmd_str));
33787 +
33788 +@@ -175,6 +178,8 @@ static int gdth_get_info(char *buffer,ch
33789 + gdth_hget_str *phg;
33790 + char cmnd[MAX_COMMAND_SIZE];
33791 +
33792 ++ pax_track_stack();
33793 ++
33794 + gdtcmd = kmalloc(sizeof(*gdtcmd), GFP_KERNEL);
33795 + estr = kmalloc(sizeof(*estr), GFP_KERNEL);
33796 + if (!gdtcmd || !estr)
33797 +diff -urNp linux-3.1.1/drivers/scsi/hosts.c linux-3.1.1/drivers/scsi/hosts.c
33798 +--- linux-3.1.1/drivers/scsi/hosts.c 2011-11-11 15:19:27.000000000 -0500
33799 ++++ linux-3.1.1/drivers/scsi/hosts.c 2011-11-16 18:39:07.000000000 -0500
33800 +@@ -42,7 +42,7 @@
33801 + #include "scsi_logging.h"
33802 +
33803 +
33804 +-static atomic_t scsi_host_next_hn; /* host_no for next new host */
33805 ++static atomic_unchecked_t scsi_host_next_hn; /* host_no for next new host */
33806 +
33807 +
33808 + static void scsi_host_cls_release(struct device *dev)
33809 +@@ -357,7 +357,7 @@ struct Scsi_Host *scsi_host_alloc(struct
33810 + * subtract one because we increment first then return, but we need to
33811 + * know what the next host number was before increment
33812 + */
33813 +- shost->host_no = atomic_inc_return(&scsi_host_next_hn) - 1;
33814 ++ shost->host_no = atomic_inc_return_unchecked(&scsi_host_next_hn) - 1;
33815 + shost->dma_channel = 0xff;
33816 +
33817 + /* These three are default values which can be overridden */
33818 +diff -urNp linux-3.1.1/drivers/scsi/hpsa.c linux-3.1.1/drivers/scsi/hpsa.c
33819 +--- linux-3.1.1/drivers/scsi/hpsa.c 2011-11-11 15:19:27.000000000 -0500
33820 ++++ linux-3.1.1/drivers/scsi/hpsa.c 2011-11-16 18:39:07.000000000 -0500
33821 +@@ -498,7 +498,7 @@ static inline u32 next_command(struct ct
33822 + u32 a;
33823 +
33824 + if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
33825 +- return h->access.command_completed(h);
33826 ++ return h->access->command_completed(h);
33827 +
33828 + if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
33829 + a = *(h->reply_pool_head); /* Next cmd in ring buffer */
33830 +@@ -2955,7 +2955,7 @@ static void start_io(struct ctlr_info *h
33831 + while (!list_empty(&h->reqQ)) {
33832 + c = list_entry(h->reqQ.next, struct CommandList, list);
33833 + /* can't do anything if fifo is full */
33834 +- if ((h->access.fifo_full(h))) {
33835 ++ if ((h->access->fifo_full(h))) {
33836 + dev_warn(&h->pdev->dev, "fifo full\n");
33837 + break;
33838 + }
33839 +@@ -2965,7 +2965,7 @@ static void start_io(struct ctlr_info *h
33840 + h->Qdepth--;
33841 +
33842 + /* Tell the controller execute command */
33843 +- h->access.submit_command(h, c);
33844 ++ h->access->submit_command(h, c);
33845 +
33846 + /* Put job onto the completed Q */
33847 + addQ(&h->cmpQ, c);
33848 +@@ -2974,17 +2974,17 @@ static void start_io(struct ctlr_info *h
33849 +
33850 + static inline unsigned long get_next_completion(struct ctlr_info *h)
33851 + {
33852 +- return h->access.command_completed(h);
33853 ++ return h->access->command_completed(h);
33854 + }
33855 +
33856 + static inline bool interrupt_pending(struct ctlr_info *h)
33857 + {
33858 +- return h->access.intr_pending(h);
33859 ++ return h->access->intr_pending(h);
33860 + }
33861 +
33862 + static inline long interrupt_not_for_us(struct ctlr_info *h)
33863 + {
33864 +- return (h->access.intr_pending(h) == 0) ||
33865 ++ return (h->access->intr_pending(h) == 0) ||
33866 + (h->interrupts_enabled == 0);
33867 + }
33868 +
33869 +@@ -3881,7 +3881,7 @@ static int __devinit hpsa_pci_init(struc
33870 + if (prod_index < 0)
33871 + return -ENODEV;
33872 + h->product_name = products[prod_index].product_name;
33873 +- h->access = *(products[prod_index].access);
33874 ++ h->access = products[prod_index].access;
33875 +
33876 + if (hpsa_board_disabled(h->pdev)) {
33877 + dev_warn(&h->pdev->dev, "controller appears to be disabled\n");
33878 +@@ -4158,7 +4158,7 @@ reinit_after_soft_reset:
33879 + }
33880 +
33881 + /* make sure the board interrupts are off */
33882 +- h->access.set_intr_mask(h, HPSA_INTR_OFF);
33883 ++ h->access->set_intr_mask(h, HPSA_INTR_OFF);
33884 +
33885 + if (hpsa_request_irq(h, do_hpsa_intr_msi, do_hpsa_intr_intx))
33886 + goto clean2;
33887 +@@ -4192,7 +4192,7 @@ reinit_after_soft_reset:
33888 + * fake ones to scoop up any residual completions.
33889 + */
33890 + spin_lock_irqsave(&h->lock, flags);
33891 +- h->access.set_intr_mask(h, HPSA_INTR_OFF);
33892 ++ h->access->set_intr_mask(h, HPSA_INTR_OFF);
33893 + spin_unlock_irqrestore(&h->lock, flags);
33894 + free_irq(h->intr[h->intr_mode], h);
33895 + rc = hpsa_request_irq(h, hpsa_msix_discard_completions,
33896 +@@ -4211,9 +4211,9 @@ reinit_after_soft_reset:
33897 + dev_info(&h->pdev->dev, "Board READY.\n");
33898 + dev_info(&h->pdev->dev,
33899 + "Waiting for stale completions to drain.\n");
33900 +- h->access.set_intr_mask(h, HPSA_INTR_ON);
33901 ++ h->access->set_intr_mask(h, HPSA_INTR_ON);
33902 + msleep(10000);
33903 +- h->access.set_intr_mask(h, HPSA_INTR_OFF);
33904 ++ h->access->set_intr_mask(h, HPSA_INTR_OFF);
33905 +
33906 + rc = controller_reset_failed(h->cfgtable);
33907 + if (rc)
33908 +@@ -4234,7 +4234,7 @@ reinit_after_soft_reset:
33909 + }
33910 +
33911 + /* Turn the interrupts on so we can service requests */
33912 +- h->access.set_intr_mask(h, HPSA_INTR_ON);
33913 ++ h->access->set_intr_mask(h, HPSA_INTR_ON);
33914 +
33915 + hpsa_hba_inquiry(h);
33916 + hpsa_register_scsi(h); /* hook ourselves into SCSI subsystem */
33917 +@@ -4287,7 +4287,7 @@ static void hpsa_shutdown(struct pci_dev
33918 + * To write all data in the battery backed cache to disks
33919 + */
33920 + hpsa_flush_cache(h);
33921 +- h->access.set_intr_mask(h, HPSA_INTR_OFF);
33922 ++ h->access->set_intr_mask(h, HPSA_INTR_OFF);
33923 + free_irq(h->intr[h->intr_mode], h);
33924 + #ifdef CONFIG_PCI_MSI
33925 + if (h->msix_vector)
33926 +@@ -4450,7 +4450,7 @@ static __devinit void hpsa_enter_perform
33927 + return;
33928 + }
33929 + /* Change the access methods to the performant access methods */
33930 +- h->access = SA5_performant_access;
33931 ++ h->access = &SA5_performant_access;
33932 + h->transMethod = CFGTBL_Trans_Performant;
33933 + }
33934 +
33935 +diff -urNp linux-3.1.1/drivers/scsi/hpsa.h linux-3.1.1/drivers/scsi/hpsa.h
33936 +--- linux-3.1.1/drivers/scsi/hpsa.h 2011-11-11 15:19:27.000000000 -0500
33937 ++++ linux-3.1.1/drivers/scsi/hpsa.h 2011-11-16 18:39:07.000000000 -0500
33938 +@@ -73,7 +73,7 @@ struct ctlr_info {
33939 + unsigned int msix_vector;
33940 + unsigned int msi_vector;
33941 + int intr_mode; /* either PERF_MODE_INT or SIMPLE_MODE_INT */
33942 +- struct access_method access;
33943 ++ struct access_method *access;
33944 +
33945 + /* queue and queue Info */
33946 + struct list_head reqQ;
33947 +diff -urNp linux-3.1.1/drivers/scsi/ips.h linux-3.1.1/drivers/scsi/ips.h
33948 +--- linux-3.1.1/drivers/scsi/ips.h 2011-11-11 15:19:27.000000000 -0500
33949 ++++ linux-3.1.1/drivers/scsi/ips.h 2011-11-16 18:39:07.000000000 -0500
33950 +@@ -1027,7 +1027,7 @@ typedef struct {
33951 + int (*intr)(struct ips_ha *);
33952 + void (*enableint)(struct ips_ha *);
33953 + uint32_t (*statupd)(struct ips_ha *);
33954 +-} ips_hw_func_t;
33955 ++} __no_const ips_hw_func_t;
33956 +
33957 + typedef struct ips_ha {
33958 + uint8_t ha_id[IPS_MAX_CHANNELS+1];
33959 +diff -urNp linux-3.1.1/drivers/scsi/libfc/fc_exch.c linux-3.1.1/drivers/scsi/libfc/fc_exch.c
33960 +--- linux-3.1.1/drivers/scsi/libfc/fc_exch.c 2011-11-11 15:19:27.000000000 -0500
33961 ++++ linux-3.1.1/drivers/scsi/libfc/fc_exch.c 2011-11-16 18:39:07.000000000 -0500
33962 +@@ -105,12 +105,12 @@ struct fc_exch_mgr {
33963 + * all together if not used XXX
33964 + */
33965 + struct {
33966 +- atomic_t no_free_exch;
33967 +- atomic_t no_free_exch_xid;
33968 +- atomic_t xid_not_found;
33969 +- atomic_t xid_busy;
33970 +- atomic_t seq_not_found;
33971 +- atomic_t non_bls_resp;
33972 ++ atomic_unchecked_t no_free_exch;
33973 ++ atomic_unchecked_t no_free_exch_xid;
33974 ++ atomic_unchecked_t xid_not_found;
33975 ++ atomic_unchecked_t xid_busy;
33976 ++ atomic_unchecked_t seq_not_found;
33977 ++ atomic_unchecked_t non_bls_resp;
33978 + } stats;
33979 + };
33980 +
33981 +@@ -718,7 +718,7 @@ static struct fc_exch *fc_exch_em_alloc(
33982 + /* allocate memory for exchange */
33983 + ep = mempool_alloc(mp->ep_pool, GFP_ATOMIC);
33984 + if (!ep) {
33985 +- atomic_inc(&mp->stats.no_free_exch);
33986 ++ atomic_inc_unchecked(&mp->stats.no_free_exch);
33987 + goto out;
33988 + }
33989 + memset(ep, 0, sizeof(*ep));
33990 +@@ -779,7 +779,7 @@ out:
33991 + return ep;
33992 + err:
33993 + spin_unlock_bh(&pool->lock);
33994 +- atomic_inc(&mp->stats.no_free_exch_xid);
33995 ++ atomic_inc_unchecked(&mp->stats.no_free_exch_xid);
33996 + mempool_free(ep, mp->ep_pool);
33997 + return NULL;
33998 + }
33999 +@@ -922,7 +922,7 @@ static enum fc_pf_rjt_reason fc_seq_look
34000 + xid = ntohs(fh->fh_ox_id); /* we originated exch */
34001 + ep = fc_exch_find(mp, xid);
34002 + if (!ep) {
34003 +- atomic_inc(&mp->stats.xid_not_found);
34004 ++ atomic_inc_unchecked(&mp->stats.xid_not_found);
34005 + reject = FC_RJT_OX_ID;
34006 + goto out;
34007 + }
34008 +@@ -952,7 +952,7 @@ static enum fc_pf_rjt_reason fc_seq_look
34009 + ep = fc_exch_find(mp, xid);
34010 + if ((f_ctl & FC_FC_FIRST_SEQ) && fc_sof_is_init(fr_sof(fp))) {
34011 + if (ep) {
34012 +- atomic_inc(&mp->stats.xid_busy);
34013 ++ atomic_inc_unchecked(&mp->stats.xid_busy);
34014 + reject = FC_RJT_RX_ID;
34015 + goto rel;
34016 + }
34017 +@@ -963,7 +963,7 @@ static enum fc_pf_rjt_reason fc_seq_look
34018 + }
34019 + xid = ep->xid; /* get our XID */
34020 + } else if (!ep) {
34021 +- atomic_inc(&mp->stats.xid_not_found);
34022 ++ atomic_inc_unchecked(&mp->stats.xid_not_found);
34023 + reject = FC_RJT_RX_ID; /* XID not found */
34024 + goto out;
34025 + }
34026 +@@ -980,7 +980,7 @@ static enum fc_pf_rjt_reason fc_seq_look
34027 + } else {
34028 + sp = &ep->seq;
34029 + if (sp->id != fh->fh_seq_id) {
34030 +- atomic_inc(&mp->stats.seq_not_found);
34031 ++ atomic_inc_unchecked(&mp->stats.seq_not_found);
34032 + if (f_ctl & FC_FC_END_SEQ) {
34033 + /*
34034 + * Update sequence_id based on incoming last
34035 +@@ -1430,22 +1430,22 @@ static void fc_exch_recv_seq_resp(struct
34036 +
34037 + ep = fc_exch_find(mp, ntohs(fh->fh_ox_id));
34038 + if (!ep) {
34039 +- atomic_inc(&mp->stats.xid_not_found);
34040 ++ atomic_inc_unchecked(&mp->stats.xid_not_found);
34041 + goto out;
34042 + }
34043 + if (ep->esb_stat & ESB_ST_COMPLETE) {
34044 +- atomic_inc(&mp->stats.xid_not_found);
34045 ++ atomic_inc_unchecked(&mp->stats.xid_not_found);
34046 + goto rel;
34047 + }
34048 + if (ep->rxid == FC_XID_UNKNOWN)
34049 + ep->rxid = ntohs(fh->fh_rx_id);
34050 + if (ep->sid != 0 && ep->sid != ntoh24(fh->fh_d_id)) {
34051 +- atomic_inc(&mp->stats.xid_not_found);
34052 ++ atomic_inc_unchecked(&mp->stats.xid_not_found);
34053 + goto rel;
34054 + }
34055 + if (ep->did != ntoh24(fh->fh_s_id) &&
34056 + ep->did != FC_FID_FLOGI) {
34057 +- atomic_inc(&mp->stats.xid_not_found);
34058 ++ atomic_inc_unchecked(&mp->stats.xid_not_found);
34059 + goto rel;
34060 + }
34061 + sof = fr_sof(fp);
34062 +@@ -1454,7 +1454,7 @@ static void fc_exch_recv_seq_resp(struct
34063 + sp->ssb_stat |= SSB_ST_RESP;
34064 + sp->id = fh->fh_seq_id;
34065 + } else if (sp->id != fh->fh_seq_id) {
34066 +- atomic_inc(&mp->stats.seq_not_found);
34067 ++ atomic_inc_unchecked(&mp->stats.seq_not_found);
34068 + goto rel;
34069 + }
34070 +
34071 +@@ -1518,9 +1518,9 @@ static void fc_exch_recv_resp(struct fc_
34072 + sp = fc_seq_lookup_orig(mp, fp); /* doesn't hold sequence */
34073 +
34074 + if (!sp)
34075 +- atomic_inc(&mp->stats.xid_not_found);
34076 ++ atomic_inc_unchecked(&mp->stats.xid_not_found);
34077 + else
34078 +- atomic_inc(&mp->stats.non_bls_resp);
34079 ++ atomic_inc_unchecked(&mp->stats.non_bls_resp);
34080 +
34081 + fc_frame_free(fp);
34082 + }
34083 +diff -urNp linux-3.1.1/drivers/scsi/libsas/sas_ata.c linux-3.1.1/drivers/scsi/libsas/sas_ata.c
34084 +--- linux-3.1.1/drivers/scsi/libsas/sas_ata.c 2011-11-11 15:19:27.000000000 -0500
34085 ++++ linux-3.1.1/drivers/scsi/libsas/sas_ata.c 2011-11-16 18:39:07.000000000 -0500
34086 +@@ -368,7 +368,7 @@ static struct ata_port_operations sas_sa
34087 + .postreset = ata_std_postreset,
34088 + .error_handler = ata_std_error_handler,
34089 + .post_internal_cmd = sas_ata_post_internal,
34090 +- .qc_defer = ata_std_qc_defer,
34091 ++ .qc_defer = ata_std_qc_defer,
34092 + .qc_prep = ata_noop_qc_prep,
34093 + .qc_issue = sas_ata_qc_issue,
34094 + .qc_fill_rtf = sas_ata_qc_fill_rtf,
34095 +diff -urNp linux-3.1.1/drivers/scsi/lpfc/lpfc_debugfs.c linux-3.1.1/drivers/scsi/lpfc/lpfc_debugfs.c
34096 +--- linux-3.1.1/drivers/scsi/lpfc/lpfc_debugfs.c 2011-11-11 15:19:27.000000000 -0500
34097 ++++ linux-3.1.1/drivers/scsi/lpfc/lpfc_debugfs.c 2011-11-16 18:40:22.000000000 -0500
34098 +@@ -105,7 +105,7 @@ MODULE_PARM_DESC(lpfc_debugfs_mask_disc_
34099 +
34100 + #include <linux/debugfs.h>
34101 +
34102 +-static atomic_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
34103 ++static atomic_unchecked_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
34104 + static unsigned long lpfc_debugfs_start_time = 0L;
34105 +
34106 + /* iDiag */
34107 +@@ -146,7 +146,7 @@ lpfc_debugfs_disc_trc_data(struct lpfc_v
34108 + lpfc_debugfs_enable = 0;
34109 +
34110 + len = 0;
34111 +- index = (atomic_read(&vport->disc_trc_cnt) + 1) &
34112 ++ index = (atomic_read_unchecked(&vport->disc_trc_cnt) + 1) &
34113 + (lpfc_debugfs_max_disc_trc - 1);
34114 + for (i = index; i < lpfc_debugfs_max_disc_trc; i++) {
34115 + dtp = vport->disc_trc + i;
34116 +@@ -212,7 +212,7 @@ lpfc_debugfs_slow_ring_trc_data(struct l
34117 + lpfc_debugfs_enable = 0;
34118 +
34119 + len = 0;
34120 +- index = (atomic_read(&phba->slow_ring_trc_cnt) + 1) &
34121 ++ index = (atomic_read_unchecked(&phba->slow_ring_trc_cnt) + 1) &
34122 + (lpfc_debugfs_max_slow_ring_trc - 1);
34123 + for (i = index; i < lpfc_debugfs_max_slow_ring_trc; i++) {
34124 + dtp = phba->slow_ring_trc + i;
34125 +@@ -635,14 +635,14 @@ lpfc_debugfs_disc_trc(struct lpfc_vport
34126 + !vport || !vport->disc_trc)
34127 + return;
34128 +
34129 +- index = atomic_inc_return(&vport->disc_trc_cnt) &
34130 ++ index = atomic_inc_return_unchecked(&vport->disc_trc_cnt) &
34131 + (lpfc_debugfs_max_disc_trc - 1);
34132 + dtp = vport->disc_trc + index;
34133 + dtp->fmt = fmt;
34134 + dtp->data1 = data1;
34135 + dtp->data2 = data2;
34136 + dtp->data3 = data3;
34137 +- dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
34138 ++ dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
34139 + dtp->jif = jiffies;
34140 + #endif
34141 + return;
34142 +@@ -673,14 +673,14 @@ lpfc_debugfs_slow_ring_trc(struct lpfc_h
34143 + !phba || !phba->slow_ring_trc)
34144 + return;
34145 +
34146 +- index = atomic_inc_return(&phba->slow_ring_trc_cnt) &
34147 ++ index = atomic_inc_return_unchecked(&phba->slow_ring_trc_cnt) &
34148 + (lpfc_debugfs_max_slow_ring_trc - 1);
34149 + dtp = phba->slow_ring_trc + index;
34150 + dtp->fmt = fmt;
34151 + dtp->data1 = data1;
34152 + dtp->data2 = data2;
34153 + dtp->data3 = data3;
34154 +- dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
34155 ++ dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
34156 + dtp->jif = jiffies;
34157 + #endif
34158 + return;
34159 +@@ -3828,7 +3828,7 @@ lpfc_debugfs_initialize(struct lpfc_vpor
34160 + "slow_ring buffer\n");
34161 + goto debug_failed;
34162 + }
34163 +- atomic_set(&phba->slow_ring_trc_cnt, 0);
34164 ++ atomic_set_unchecked(&phba->slow_ring_trc_cnt, 0);
34165 + memset(phba->slow_ring_trc, 0,
34166 + (sizeof(struct lpfc_debugfs_trc) *
34167 + lpfc_debugfs_max_slow_ring_trc));
34168 +@@ -3874,7 +3874,7 @@ lpfc_debugfs_initialize(struct lpfc_vpor
34169 + "buffer\n");
34170 + goto debug_failed;
34171 + }
34172 +- atomic_set(&vport->disc_trc_cnt, 0);
34173 ++ atomic_set_unchecked(&vport->disc_trc_cnt, 0);
34174 +
34175 + snprintf(name, sizeof(name), "discovery_trace");
34176 + vport->debug_disc_trc =
34177 +diff -urNp linux-3.1.1/drivers/scsi/lpfc/lpfc.h linux-3.1.1/drivers/scsi/lpfc/lpfc.h
34178 +--- linux-3.1.1/drivers/scsi/lpfc/lpfc.h 2011-11-11 15:19:27.000000000 -0500
34179 ++++ linux-3.1.1/drivers/scsi/lpfc/lpfc.h 2011-11-16 18:39:07.000000000 -0500
34180 +@@ -425,7 +425,7 @@ struct lpfc_vport {
34181 + struct dentry *debug_nodelist;
34182 + struct dentry *vport_debugfs_root;
34183 + struct lpfc_debugfs_trc *disc_trc;
34184 +- atomic_t disc_trc_cnt;
34185 ++ atomic_unchecked_t disc_trc_cnt;
34186 + #endif
34187 + uint8_t stat_data_enabled;
34188 + uint8_t stat_data_blocked;
34189 +@@ -835,8 +835,8 @@ struct lpfc_hba {
34190 + struct timer_list fabric_block_timer;
34191 + unsigned long bit_flags;
34192 + #define FABRIC_COMANDS_BLOCKED 0
34193 +- atomic_t num_rsrc_err;
34194 +- atomic_t num_cmd_success;
34195 ++ atomic_unchecked_t num_rsrc_err;
34196 ++ atomic_unchecked_t num_cmd_success;
34197 + unsigned long last_rsrc_error_time;
34198 + unsigned long last_ramp_down_time;
34199 + unsigned long last_ramp_up_time;
34200 +@@ -850,7 +850,7 @@ struct lpfc_hba {
34201 + struct dentry *debug_dumpDif; /* BlockGuard BPL*/
34202 + struct dentry *debug_slow_ring_trc;
34203 + struct lpfc_debugfs_trc *slow_ring_trc;
34204 +- atomic_t slow_ring_trc_cnt;
34205 ++ atomic_unchecked_t slow_ring_trc_cnt;
34206 + /* iDiag debugfs sub-directory */
34207 + struct dentry *idiag_root;
34208 + struct dentry *idiag_pci_cfg;
34209 +diff -urNp linux-3.1.1/drivers/scsi/lpfc/lpfc_init.c linux-3.1.1/drivers/scsi/lpfc/lpfc_init.c
34210 +--- linux-3.1.1/drivers/scsi/lpfc/lpfc_init.c 2011-11-11 15:19:27.000000000 -0500
34211 ++++ linux-3.1.1/drivers/scsi/lpfc/lpfc_init.c 2011-11-16 18:39:07.000000000 -0500
34212 +@@ -9969,8 +9969,10 @@ lpfc_init(void)
34213 + printk(LPFC_COPYRIGHT "\n");
34214 +
34215 + if (lpfc_enable_npiv) {
34216 +- lpfc_transport_functions.vport_create = lpfc_vport_create;
34217 +- lpfc_transport_functions.vport_delete = lpfc_vport_delete;
34218 ++ pax_open_kernel();
34219 ++ *(void **)&lpfc_transport_functions.vport_create = lpfc_vport_create;
34220 ++ *(void **)&lpfc_transport_functions.vport_delete = lpfc_vport_delete;
34221 ++ pax_close_kernel();
34222 + }
34223 + lpfc_transport_template =
34224 + fc_attach_transport(&lpfc_transport_functions);
34225 +diff -urNp linux-3.1.1/drivers/scsi/lpfc/lpfc_scsi.c linux-3.1.1/drivers/scsi/lpfc/lpfc_scsi.c
34226 +--- linux-3.1.1/drivers/scsi/lpfc/lpfc_scsi.c 2011-11-11 15:19:27.000000000 -0500
34227 ++++ linux-3.1.1/drivers/scsi/lpfc/lpfc_scsi.c 2011-11-16 18:39:07.000000000 -0500
34228 +@@ -297,7 +297,7 @@ lpfc_rampdown_queue_depth(struct lpfc_hb
34229 + uint32_t evt_posted;
34230 +
34231 + spin_lock_irqsave(&phba->hbalock, flags);
34232 +- atomic_inc(&phba->num_rsrc_err);
34233 ++ atomic_inc_unchecked(&phba->num_rsrc_err);
34234 + phba->last_rsrc_error_time = jiffies;
34235 +
34236 + if ((phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL) > jiffies) {
34237 +@@ -338,7 +338,7 @@ lpfc_rampup_queue_depth(struct lpfc_vpor
34238 + unsigned long flags;
34239 + struct lpfc_hba *phba = vport->phba;
34240 + uint32_t evt_posted;
34241 +- atomic_inc(&phba->num_cmd_success);
34242 ++ atomic_inc_unchecked(&phba->num_cmd_success);
34243 +
34244 + if (vport->cfg_lun_queue_depth <= queue_depth)
34245 + return;
34246 +@@ -382,8 +382,8 @@ lpfc_ramp_down_queue_handler(struct lpfc
34247 + unsigned long num_rsrc_err, num_cmd_success;
34248 + int i;
34249 +
34250 +- num_rsrc_err = atomic_read(&phba->num_rsrc_err);
34251 +- num_cmd_success = atomic_read(&phba->num_cmd_success);
34252 ++ num_rsrc_err = atomic_read_unchecked(&phba->num_rsrc_err);
34253 ++ num_cmd_success = atomic_read_unchecked(&phba->num_cmd_success);
34254 +
34255 + vports = lpfc_create_vport_work_array(phba);
34256 + if (vports != NULL)
34257 +@@ -403,8 +403,8 @@ lpfc_ramp_down_queue_handler(struct lpfc
34258 + }
34259 + }
34260 + lpfc_destroy_vport_work_array(phba, vports);
34261 +- atomic_set(&phba->num_rsrc_err, 0);
34262 +- atomic_set(&phba->num_cmd_success, 0);
34263 ++ atomic_set_unchecked(&phba->num_rsrc_err, 0);
34264 ++ atomic_set_unchecked(&phba->num_cmd_success, 0);
34265 + }
34266 +
34267 + /**
34268 +@@ -438,8 +438,8 @@ lpfc_ramp_up_queue_handler(struct lpfc_h
34269 + }
34270 + }
34271 + lpfc_destroy_vport_work_array(phba, vports);
34272 +- atomic_set(&phba->num_rsrc_err, 0);
34273 +- atomic_set(&phba->num_cmd_success, 0);
34274 ++ atomic_set_unchecked(&phba->num_rsrc_err, 0);
34275 ++ atomic_set_unchecked(&phba->num_cmd_success, 0);
34276 + }
34277 +
34278 + /**
34279 +diff -urNp linux-3.1.1/drivers/scsi/megaraid/megaraid_mbox.c linux-3.1.1/drivers/scsi/megaraid/megaraid_mbox.c
34280 +--- linux-3.1.1/drivers/scsi/megaraid/megaraid_mbox.c 2011-11-11 15:19:27.000000000 -0500
34281 ++++ linux-3.1.1/drivers/scsi/megaraid/megaraid_mbox.c 2011-11-16 18:40:22.000000000 -0500
34282 +@@ -3503,6 +3503,8 @@ megaraid_cmm_register(adapter_t *adapter
34283 + int rval;
34284 + int i;
34285 +
34286 ++ pax_track_stack();
34287 ++
34288 + // Allocate memory for the base list of scb for management module.
34289 + adapter->uscb_list = kcalloc(MBOX_MAX_USER_CMDS, sizeof(scb_t), GFP_KERNEL);
34290 +
34291 +diff -urNp linux-3.1.1/drivers/scsi/osd/osd_initiator.c linux-3.1.1/drivers/scsi/osd/osd_initiator.c
34292 +--- linux-3.1.1/drivers/scsi/osd/osd_initiator.c 2011-11-11 15:19:27.000000000 -0500
34293 ++++ linux-3.1.1/drivers/scsi/osd/osd_initiator.c 2011-11-16 18:40:22.000000000 -0500
34294 +@@ -97,6 +97,8 @@ static int _osd_get_print_system_info(st
34295 + int nelem = ARRAY_SIZE(get_attrs), a = 0;
34296 + int ret;
34297 +
34298 ++ pax_track_stack();
34299 ++
34300 + or = osd_start_request(od, GFP_KERNEL);
34301 + if (!or)
34302 + return -ENOMEM;
34303 +diff -urNp linux-3.1.1/drivers/scsi/pmcraid.c linux-3.1.1/drivers/scsi/pmcraid.c
34304 +--- linux-3.1.1/drivers/scsi/pmcraid.c 2011-11-11 15:19:27.000000000 -0500
34305 ++++ linux-3.1.1/drivers/scsi/pmcraid.c 2011-11-16 18:39:07.000000000 -0500
34306 +@@ -201,8 +201,8 @@ static int pmcraid_slave_alloc(struct sc
34307 + res->scsi_dev = scsi_dev;
34308 + scsi_dev->hostdata = res;
34309 + res->change_detected = 0;
34310 +- atomic_set(&res->read_failures, 0);
34311 +- atomic_set(&res->write_failures, 0);
34312 ++ atomic_set_unchecked(&res->read_failures, 0);
34313 ++ atomic_set_unchecked(&res->write_failures, 0);
34314 + rc = 0;
34315 + }
34316 + spin_unlock_irqrestore(&pinstance->resource_lock, lock_flags);
34317 +@@ -2677,9 +2677,9 @@ static int pmcraid_error_handler(struct
34318 +
34319 + /* If this was a SCSI read/write command keep count of errors */
34320 + if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_READ_CMD)
34321 +- atomic_inc(&res->read_failures);
34322 ++ atomic_inc_unchecked(&res->read_failures);
34323 + else if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_WRITE_CMD)
34324 +- atomic_inc(&res->write_failures);
34325 ++ atomic_inc_unchecked(&res->write_failures);
34326 +
34327 + if (!RES_IS_GSCSI(res->cfg_entry) &&
34328 + masked_ioasc != PMCRAID_IOASC_HW_DEVICE_BUS_STATUS_ERROR) {
34329 +@@ -3535,7 +3535,7 @@ static int pmcraid_queuecommand_lck(
34330 + * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
34331 + * hrrq_id assigned here in queuecommand
34332 + */
34333 +- ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
34334 ++ ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
34335 + pinstance->num_hrrq;
34336 + cmd->cmd_done = pmcraid_io_done;
34337 +
34338 +@@ -3860,7 +3860,7 @@ static long pmcraid_ioctl_passthrough(
34339 + * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
34340 + * hrrq_id assigned here in queuecommand
34341 + */
34342 +- ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
34343 ++ ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
34344 + pinstance->num_hrrq;
34345 +
34346 + if (request_size) {
34347 +@@ -4498,7 +4498,7 @@ static void pmcraid_worker_function(stru
34348 +
34349 + pinstance = container_of(workp, struct pmcraid_instance, worker_q);
34350 + /* add resources only after host is added into system */
34351 +- if (!atomic_read(&pinstance->expose_resources))
34352 ++ if (!atomic_read_unchecked(&pinstance->expose_resources))
34353 + return;
34354 +
34355 + fw_version = be16_to_cpu(pinstance->inq_data->fw_version);
34356 +@@ -5332,8 +5332,8 @@ static int __devinit pmcraid_init_instan
34357 + init_waitqueue_head(&pinstance->reset_wait_q);
34358 +
34359 + atomic_set(&pinstance->outstanding_cmds, 0);
34360 +- atomic_set(&pinstance->last_message_id, 0);
34361 +- atomic_set(&pinstance->expose_resources, 0);
34362 ++ atomic_set_unchecked(&pinstance->last_message_id, 0);
34363 ++ atomic_set_unchecked(&pinstance->expose_resources, 0);
34364 +
34365 + INIT_LIST_HEAD(&pinstance->free_res_q);
34366 + INIT_LIST_HEAD(&pinstance->used_res_q);
34367 +@@ -6048,7 +6048,7 @@ static int __devinit pmcraid_probe(
34368 + /* Schedule worker thread to handle CCN and take care of adding and
34369 + * removing devices to OS
34370 + */
34371 +- atomic_set(&pinstance->expose_resources, 1);
34372 ++ atomic_set_unchecked(&pinstance->expose_resources, 1);
34373 + schedule_work(&pinstance->worker_q);
34374 + return rc;
34375 +
34376 +diff -urNp linux-3.1.1/drivers/scsi/pmcraid.h linux-3.1.1/drivers/scsi/pmcraid.h
34377 +--- linux-3.1.1/drivers/scsi/pmcraid.h 2011-11-11 15:19:27.000000000 -0500
34378 ++++ linux-3.1.1/drivers/scsi/pmcraid.h 2011-11-16 18:39:07.000000000 -0500
34379 +@@ -749,7 +749,7 @@ struct pmcraid_instance {
34380 + struct pmcraid_isr_param hrrq_vector[PMCRAID_NUM_MSIX_VECTORS];
34381 +
34382 + /* Message id as filled in last fired IOARCB, used to identify HRRQ */
34383 +- atomic_t last_message_id;
34384 ++ atomic_unchecked_t last_message_id;
34385 +
34386 + /* configuration table */
34387 + struct pmcraid_config_table *cfg_table;
34388 +@@ -778,7 +778,7 @@ struct pmcraid_instance {
34389 + atomic_t outstanding_cmds;
34390 +
34391 + /* should add/delete resources to mid-layer now ?*/
34392 +- atomic_t expose_resources;
34393 ++ atomic_unchecked_t expose_resources;
34394 +
34395 +
34396 +
34397 +@@ -814,8 +814,8 @@ struct pmcraid_resource_entry {
34398 + struct pmcraid_config_table_entry_ext cfg_entry_ext;
34399 + };
34400 + struct scsi_device *scsi_dev; /* Link scsi_device structure */
34401 +- atomic_t read_failures; /* count of failed READ commands */
34402 +- atomic_t write_failures; /* count of failed WRITE commands */
34403 ++ atomic_unchecked_t read_failures; /* count of failed READ commands */
34404 ++ atomic_unchecked_t write_failures; /* count of failed WRITE commands */
34405 +
34406 + /* To indicate add/delete/modify during CCN */
34407 + u8 change_detected;
34408 +diff -urNp linux-3.1.1/drivers/scsi/qla2xxx/qla_def.h linux-3.1.1/drivers/scsi/qla2xxx/qla_def.h
34409 +--- linux-3.1.1/drivers/scsi/qla2xxx/qla_def.h 2011-11-11 15:19:27.000000000 -0500
34410 ++++ linux-3.1.1/drivers/scsi/qla2xxx/qla_def.h 2011-11-16 18:39:07.000000000 -0500
34411 +@@ -2244,7 +2244,7 @@ struct isp_operations {
34412 + int (*get_flash_version) (struct scsi_qla_host *, void *);
34413 + int (*start_scsi) (srb_t *);
34414 + int (*abort_isp) (struct scsi_qla_host *);
34415 +-};
34416 ++} __no_const;
34417 +
34418 + /* MSI-X Support *************************************************************/
34419 +
34420 +diff -urNp linux-3.1.1/drivers/scsi/qla4xxx/ql4_def.h linux-3.1.1/drivers/scsi/qla4xxx/ql4_def.h
34421 +--- linux-3.1.1/drivers/scsi/qla4xxx/ql4_def.h 2011-11-11 15:19:27.000000000 -0500
34422 ++++ linux-3.1.1/drivers/scsi/qla4xxx/ql4_def.h 2011-11-16 18:39:07.000000000 -0500
34423 +@@ -256,7 +256,7 @@ struct ddb_entry {
34424 + atomic_t retry_relogin_timer; /* Min Time between relogins
34425 + * (4000 only) */
34426 + atomic_t relogin_timer; /* Max Time to wait for relogin to complete */
34427 +- atomic_t relogin_retry_count; /* Num of times relogin has been
34428 ++ atomic_unchecked_t relogin_retry_count; /* Num of times relogin has been
34429 + * retried */
34430 +
34431 + uint16_t port;
34432 +diff -urNp linux-3.1.1/drivers/scsi/qla4xxx/ql4_init.c linux-3.1.1/drivers/scsi/qla4xxx/ql4_init.c
34433 +--- linux-3.1.1/drivers/scsi/qla4xxx/ql4_init.c 2011-11-11 15:19:27.000000000 -0500
34434 ++++ linux-3.1.1/drivers/scsi/qla4xxx/ql4_init.c 2011-11-16 18:39:07.000000000 -0500
34435 +@@ -680,7 +680,7 @@ static struct ddb_entry * qla4xxx_alloc_
34436 + ddb_entry->fw_ddb_index = fw_ddb_index;
34437 + atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY);
34438 + atomic_set(&ddb_entry->relogin_timer, 0);
34439 +- atomic_set(&ddb_entry->relogin_retry_count, 0);
34440 ++ atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0);
34441 + atomic_set(&ddb_entry->state, DDB_STATE_ONLINE);
34442 + list_add_tail(&ddb_entry->list, &ha->ddb_list);
34443 + ha->fw_ddb_index_map[fw_ddb_index] = ddb_entry;
34444 +@@ -1433,7 +1433,7 @@ int qla4xxx_process_ddb_changed(struct s
34445 + if ((ddb_entry->fw_ddb_device_state == DDB_DS_SESSION_ACTIVE) &&
34446 + (atomic_read(&ddb_entry->state) != DDB_STATE_ONLINE)) {
34447 + atomic_set(&ddb_entry->state, DDB_STATE_ONLINE);
34448 +- atomic_set(&ddb_entry->relogin_retry_count, 0);
34449 ++ atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0);
34450 + atomic_set(&ddb_entry->relogin_timer, 0);
34451 + clear_bit(DF_RELOGIN, &ddb_entry->flags);
34452 + iscsi_unblock_session(ddb_entry->sess);
34453 +diff -urNp linux-3.1.1/drivers/scsi/qla4xxx/ql4_os.c linux-3.1.1/drivers/scsi/qla4xxx/ql4_os.c
34454 +--- linux-3.1.1/drivers/scsi/qla4xxx/ql4_os.c 2011-11-11 15:19:27.000000000 -0500
34455 ++++ linux-3.1.1/drivers/scsi/qla4xxx/ql4_os.c 2011-11-16 18:39:07.000000000 -0500
34456 +@@ -811,13 +811,13 @@ static void qla4xxx_timer(struct scsi_ql
34457 + ddb_entry->fw_ddb_device_state ==
34458 + DDB_DS_SESSION_FAILED) {
34459 + /* Reset retry relogin timer */
34460 +- atomic_inc(&ddb_entry->relogin_retry_count);
34461 ++ atomic_inc_unchecked(&ddb_entry->relogin_retry_count);
34462 + DEBUG2(printk("scsi%ld: ddb [%d] relogin"
34463 + " timed out-retrying"
34464 + " relogin (%d)\n",
34465 + ha->host_no,
34466 + ddb_entry->fw_ddb_index,
34467 +- atomic_read(&ddb_entry->
34468 ++ atomic_read_unchecked(&ddb_entry->
34469 + relogin_retry_count))
34470 + );
34471 + start_dpc++;
34472 +diff -urNp linux-3.1.1/drivers/scsi/scsi.c linux-3.1.1/drivers/scsi/scsi.c
34473 +--- linux-3.1.1/drivers/scsi/scsi.c 2011-11-11 15:19:27.000000000 -0500
34474 ++++ linux-3.1.1/drivers/scsi/scsi.c 2011-11-16 18:39:07.000000000 -0500
34475 +@@ -655,7 +655,7 @@ int scsi_dispatch_cmd(struct scsi_cmnd *
34476 + unsigned long timeout;
34477 + int rtn = 0;
34478 +
34479 +- atomic_inc(&cmd->device->iorequest_cnt);
34480 ++ atomic_inc_unchecked(&cmd->device->iorequest_cnt);
34481 +
34482 + /* check if the device is still usable */
34483 + if (unlikely(cmd->device->sdev_state == SDEV_DEL)) {
34484 +diff -urNp linux-3.1.1/drivers/scsi/scsi_debug.c linux-3.1.1/drivers/scsi/scsi_debug.c
34485 +--- linux-3.1.1/drivers/scsi/scsi_debug.c 2011-11-11 15:19:27.000000000 -0500
34486 ++++ linux-3.1.1/drivers/scsi/scsi_debug.c 2011-11-16 18:40:22.000000000 -0500
34487 +@@ -1493,6 +1493,8 @@ static int resp_mode_select(struct scsi_
34488 + unsigned char arr[SDEBUG_MAX_MSELECT_SZ];
34489 + unsigned char *cmd = (unsigned char *)scp->cmnd;
34490 +
34491 ++ pax_track_stack();
34492 ++
34493 + if ((errsts = check_readiness(scp, 1, devip)))
34494 + return errsts;
34495 + memset(arr, 0, sizeof(arr));
34496 +@@ -1590,6 +1592,8 @@ static int resp_log_sense(struct scsi_cm
34497 + unsigned char arr[SDEBUG_MAX_LSENSE_SZ];
34498 + unsigned char *cmd = (unsigned char *)scp->cmnd;
34499 +
34500 ++ pax_track_stack();
34501 ++
34502 + if ((errsts = check_readiness(scp, 1, devip)))
34503 + return errsts;
34504 + memset(arr, 0, sizeof(arr));
34505 +diff -urNp linux-3.1.1/drivers/scsi/scsi_lib.c linux-3.1.1/drivers/scsi/scsi_lib.c
34506 +--- linux-3.1.1/drivers/scsi/scsi_lib.c 2011-11-11 15:19:27.000000000 -0500
34507 ++++ linux-3.1.1/drivers/scsi/scsi_lib.c 2011-11-16 18:39:07.000000000 -0500
34508 +@@ -1413,7 +1413,7 @@ static void scsi_kill_request(struct req
34509 + shost = sdev->host;
34510 + scsi_init_cmd_errh(cmd);
34511 + cmd->result = DID_NO_CONNECT << 16;
34512 +- atomic_inc(&cmd->device->iorequest_cnt);
34513 ++ atomic_inc_unchecked(&cmd->device->iorequest_cnt);
34514 +
34515 + /*
34516 + * SCSI request completion path will do scsi_device_unbusy(),
34517 +@@ -1439,9 +1439,9 @@ static void scsi_softirq_done(struct req
34518 +
34519 + INIT_LIST_HEAD(&cmd->eh_entry);
34520 +
34521 +- atomic_inc(&cmd->device->iodone_cnt);
34522 ++ atomic_inc_unchecked(&cmd->device->iodone_cnt);
34523 + if (cmd->result)
34524 +- atomic_inc(&cmd->device->ioerr_cnt);
34525 ++ atomic_inc_unchecked(&cmd->device->ioerr_cnt);
34526 +
34527 + disposition = scsi_decide_disposition(cmd);
34528 + if (disposition != SUCCESS &&
34529 +diff -urNp linux-3.1.1/drivers/scsi/scsi_sysfs.c linux-3.1.1/drivers/scsi/scsi_sysfs.c
34530 +--- linux-3.1.1/drivers/scsi/scsi_sysfs.c 2011-11-11 15:19:27.000000000 -0500
34531 ++++ linux-3.1.1/drivers/scsi/scsi_sysfs.c 2011-11-16 18:39:07.000000000 -0500
34532 +@@ -622,7 +622,7 @@ show_iostat_##field(struct device *dev,
34533 + char *buf) \
34534 + { \
34535 + struct scsi_device *sdev = to_scsi_device(dev); \
34536 +- unsigned long long count = atomic_read(&sdev->field); \
34537 ++ unsigned long long count = atomic_read_unchecked(&sdev->field); \
34538 + return snprintf(buf, 20, "0x%llx\n", count); \
34539 + } \
34540 + static DEVICE_ATTR(field, S_IRUGO, show_iostat_##field, NULL)
34541 +diff -urNp linux-3.1.1/drivers/scsi/scsi_tgt_lib.c linux-3.1.1/drivers/scsi/scsi_tgt_lib.c
34542 +--- linux-3.1.1/drivers/scsi/scsi_tgt_lib.c 2011-11-11 15:19:27.000000000 -0500
34543 ++++ linux-3.1.1/drivers/scsi/scsi_tgt_lib.c 2011-11-16 18:39:07.000000000 -0500
34544 +@@ -362,7 +362,7 @@ static int scsi_map_user_pages(struct sc
34545 + int err;
34546 +
34547 + dprintk("%lx %u\n", uaddr, len);
34548 +- err = blk_rq_map_user(q, rq, NULL, (void *)uaddr, len, GFP_KERNEL);
34549 ++ err = blk_rq_map_user(q, rq, NULL, (void __user *)uaddr, len, GFP_KERNEL);
34550 + if (err) {
34551 + /*
34552 + * TODO: need to fixup sg_tablesize, max_segment_size,
34553 +diff -urNp linux-3.1.1/drivers/scsi/scsi_transport_fc.c linux-3.1.1/drivers/scsi/scsi_transport_fc.c
34554 +--- linux-3.1.1/drivers/scsi/scsi_transport_fc.c 2011-11-11 15:19:27.000000000 -0500
34555 ++++ linux-3.1.1/drivers/scsi/scsi_transport_fc.c 2011-11-16 18:39:07.000000000 -0500
34556 +@@ -484,7 +484,7 @@ static DECLARE_TRANSPORT_CLASS(fc_vport_
34557 + * Netlink Infrastructure
34558 + */
34559 +
34560 +-static atomic_t fc_event_seq;
34561 ++static atomic_unchecked_t fc_event_seq;
34562 +
34563 + /**
34564 + * fc_get_event_number - Obtain the next sequential FC event number
34565 +@@ -497,7 +497,7 @@ static atomic_t fc_event_seq;
34566 + u32
34567 + fc_get_event_number(void)
34568 + {
34569 +- return atomic_add_return(1, &fc_event_seq);
34570 ++ return atomic_add_return_unchecked(1, &fc_event_seq);
34571 + }
34572 + EXPORT_SYMBOL(fc_get_event_number);
34573 +
34574 +@@ -645,7 +645,7 @@ static __init int fc_transport_init(void
34575 + {
34576 + int error;
34577 +
34578 +- atomic_set(&fc_event_seq, 0);
34579 ++ atomic_set_unchecked(&fc_event_seq, 0);
34580 +
34581 + error = transport_class_register(&fc_host_class);
34582 + if (error)
34583 +@@ -835,7 +835,7 @@ static int fc_str_to_dev_loss(const char
34584 + char *cp;
34585 +
34586 + *val = simple_strtoul(buf, &cp, 0);
34587 +- if ((*cp && (*cp != '\n')) || (*val < 0))
34588 ++ if (*cp && (*cp != '\n'))
34589 + return -EINVAL;
34590 + /*
34591 + * Check for overflow; dev_loss_tmo is u32
34592 +diff -urNp linux-3.1.1/drivers/scsi/scsi_transport_iscsi.c linux-3.1.1/drivers/scsi/scsi_transport_iscsi.c
34593 +--- linux-3.1.1/drivers/scsi/scsi_transport_iscsi.c 2011-11-11 15:19:27.000000000 -0500
34594 ++++ linux-3.1.1/drivers/scsi/scsi_transport_iscsi.c 2011-11-16 18:39:07.000000000 -0500
34595 +@@ -83,7 +83,7 @@ struct iscsi_internal {
34596 + struct device_attribute *session_attrs[ISCSI_SESSION_ATTRS + 1];
34597 + };
34598 +
34599 +-static atomic_t iscsi_session_nr; /* sysfs session id for next new session */
34600 ++static atomic_unchecked_t iscsi_session_nr; /* sysfs session id for next new session */
34601 + static struct workqueue_struct *iscsi_eh_timer_workq;
34602 +
34603 + /*
34604 +@@ -761,7 +761,7 @@ int iscsi_add_session(struct iscsi_cls_s
34605 + int err;
34606 +
34607 + ihost = shost->shost_data;
34608 +- session->sid = atomic_add_return(1, &iscsi_session_nr);
34609 ++ session->sid = atomic_add_return_unchecked(1, &iscsi_session_nr);
34610 +
34611 + if (id == ISCSI_MAX_TARGET) {
34612 + for (id = 0; id < ISCSI_MAX_TARGET; id++) {
34613 +@@ -2200,7 +2200,7 @@ static __init int iscsi_transport_init(v
34614 + printk(KERN_INFO "Loading iSCSI transport class v%s.\n",
34615 + ISCSI_TRANSPORT_VERSION);
34616 +
34617 +- atomic_set(&iscsi_session_nr, 0);
34618 ++ atomic_set_unchecked(&iscsi_session_nr, 0);
34619 +
34620 + err = class_register(&iscsi_transport_class);
34621 + if (err)
34622 +diff -urNp linux-3.1.1/drivers/scsi/scsi_transport_srp.c linux-3.1.1/drivers/scsi/scsi_transport_srp.c
34623 +--- linux-3.1.1/drivers/scsi/scsi_transport_srp.c 2011-11-11 15:19:27.000000000 -0500
34624 ++++ linux-3.1.1/drivers/scsi/scsi_transport_srp.c 2011-11-16 18:39:07.000000000 -0500
34625 +@@ -33,7 +33,7 @@
34626 + #include "scsi_transport_srp_internal.h"
34627 +
34628 + struct srp_host_attrs {
34629 +- atomic_t next_port_id;
34630 ++ atomic_unchecked_t next_port_id;
34631 + };
34632 + #define to_srp_host_attrs(host) ((struct srp_host_attrs *)(host)->shost_data)
34633 +
34634 +@@ -62,7 +62,7 @@ static int srp_host_setup(struct transpo
34635 + struct Scsi_Host *shost = dev_to_shost(dev);
34636 + struct srp_host_attrs *srp_host = to_srp_host_attrs(shost);
34637 +
34638 +- atomic_set(&srp_host->next_port_id, 0);
34639 ++ atomic_set_unchecked(&srp_host->next_port_id, 0);
34640 + return 0;
34641 + }
34642 +
34643 +@@ -211,7 +211,7 @@ struct srp_rport *srp_rport_add(struct S
34644 + memcpy(rport->port_id, ids->port_id, sizeof(rport->port_id));
34645 + rport->roles = ids->roles;
34646 +
34647 +- id = atomic_inc_return(&to_srp_host_attrs(shost)->next_port_id);
34648 ++ id = atomic_inc_return_unchecked(&to_srp_host_attrs(shost)->next_port_id);
34649 + dev_set_name(&rport->dev, "port-%d:%d", shost->host_no, id);
34650 +
34651 + transport_setup_device(&rport->dev);
34652 +diff -urNp linux-3.1.1/drivers/scsi/sg.c linux-3.1.1/drivers/scsi/sg.c
34653 +--- linux-3.1.1/drivers/scsi/sg.c 2011-11-11 15:19:27.000000000 -0500
34654 ++++ linux-3.1.1/drivers/scsi/sg.c 2011-11-16 18:39:07.000000000 -0500
34655 +@@ -1075,7 +1075,7 @@ sg_ioctl(struct file *filp, unsigned int
34656 + sdp->disk->disk_name,
34657 + MKDEV(SCSI_GENERIC_MAJOR, sdp->index),
34658 + NULL,
34659 +- (char *)arg);
34660 ++ (char __user *)arg);
34661 + case BLKTRACESTART:
34662 + return blk_trace_startstop(sdp->device->request_queue, 1);
34663 + case BLKTRACESTOP:
34664 +@@ -2310,7 +2310,7 @@ struct sg_proc_leaf {
34665 + const struct file_operations * fops;
34666 + };
34667 +
34668 +-static struct sg_proc_leaf sg_proc_leaf_arr[] = {
34669 ++static const struct sg_proc_leaf sg_proc_leaf_arr[] = {
34670 + {"allow_dio", &adio_fops},
34671 + {"debug", &debug_fops},
34672 + {"def_reserved_size", &dressz_fops},
34673 +@@ -2325,7 +2325,7 @@ sg_proc_init(void)
34674 + {
34675 + int k, mask;
34676 + int num_leaves = ARRAY_SIZE(sg_proc_leaf_arr);
34677 +- struct sg_proc_leaf * leaf;
34678 ++ const struct sg_proc_leaf * leaf;
34679 +
34680 + sg_proc_sgp = proc_mkdir(sg_proc_sg_dirname, NULL);
34681 + if (!sg_proc_sgp)
34682 +diff -urNp linux-3.1.1/drivers/scsi/sym53c8xx_2/sym_glue.c linux-3.1.1/drivers/scsi/sym53c8xx_2/sym_glue.c
34683 +--- linux-3.1.1/drivers/scsi/sym53c8xx_2/sym_glue.c 2011-11-11 15:19:27.000000000 -0500
34684 ++++ linux-3.1.1/drivers/scsi/sym53c8xx_2/sym_glue.c 2011-11-16 18:40:22.000000000 -0500
34685 +@@ -1756,6 +1756,8 @@ static int __devinit sym2_probe(struct p
34686 + int do_iounmap = 0;
34687 + int do_disable_device = 1;
34688 +
34689 ++ pax_track_stack();
34690 ++
34691 + memset(&sym_dev, 0, sizeof(sym_dev));
34692 + memset(&nvram, 0, sizeof(nvram));
34693 + sym_dev.pdev = pdev;
34694 +diff -urNp linux-3.1.1/drivers/scsi/vmw_pvscsi.c linux-3.1.1/drivers/scsi/vmw_pvscsi.c
34695 +--- linux-3.1.1/drivers/scsi/vmw_pvscsi.c 2011-11-11 15:19:27.000000000 -0500
34696 ++++ linux-3.1.1/drivers/scsi/vmw_pvscsi.c 2011-11-16 18:40:22.000000000 -0500
34697 +@@ -447,6 +447,8 @@ static void pvscsi_setup_all_rings(const
34698 + dma_addr_t base;
34699 + unsigned i;
34700 +
34701 ++ pax_track_stack();
34702 ++
34703 + cmd.ringsStatePPN = adapter->ringStatePA >> PAGE_SHIFT;
34704 + cmd.reqRingNumPages = adapter->req_pages;
34705 + cmd.cmpRingNumPages = adapter->cmp_pages;
34706 +diff -urNp linux-3.1.1/drivers/spi/spi.c linux-3.1.1/drivers/spi/spi.c
34707 +--- linux-3.1.1/drivers/spi/spi.c 2011-11-11 15:19:27.000000000 -0500
34708 ++++ linux-3.1.1/drivers/spi/spi.c 2011-11-16 18:39:07.000000000 -0500
34709 +@@ -1023,7 +1023,7 @@ int spi_bus_unlock(struct spi_master *ma
34710 + EXPORT_SYMBOL_GPL(spi_bus_unlock);
34711 +
34712 + /* portable code must never pass more than 32 bytes */
34713 +-#define SPI_BUFSIZ max(32,SMP_CACHE_BYTES)
34714 ++#define SPI_BUFSIZ max(32UL,SMP_CACHE_BYTES)
34715 +
34716 + static u8 *buf;
34717 +
34718 +diff -urNp linux-3.1.1/drivers/spi/spi-dw-pci.c linux-3.1.1/drivers/spi/spi-dw-pci.c
34719 +--- linux-3.1.1/drivers/spi/spi-dw-pci.c 2011-11-11 15:19:27.000000000 -0500
34720 ++++ linux-3.1.1/drivers/spi/spi-dw-pci.c 2011-11-16 18:39:07.000000000 -0500
34721 +@@ -148,7 +148,7 @@ static int spi_resume(struct pci_dev *pd
34722 + #define spi_resume NULL
34723 + #endif
34724 +
34725 +-static const struct pci_device_id pci_ids[] __devinitdata = {
34726 ++static const struct pci_device_id pci_ids[] __devinitconst = {
34727 + /* Intel MID platform SPI controller 0 */
34728 + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0800) },
34729 + {},
34730 +diff -urNp linux-3.1.1/drivers/staging/ath6kl/os/linux/ar6000_drv.c linux-3.1.1/drivers/staging/ath6kl/os/linux/ar6000_drv.c
34731 +--- linux-3.1.1/drivers/staging/ath6kl/os/linux/ar6000_drv.c 2011-11-11 15:19:27.000000000 -0500
34732 ++++ linux-3.1.1/drivers/staging/ath6kl/os/linux/ar6000_drv.c 2011-11-16 18:39:07.000000000 -0500
34733 +@@ -362,7 +362,7 @@ static struct ar_cookie s_ar_cookie_mem[
34734 + (((ar)->arTargetType == TARGET_TYPE_AR6003) ? AR6003_HOST_INTEREST_ITEM_ADDRESS(item) : 0))
34735 +
34736 +
34737 +-static struct net_device_ops ar6000_netdev_ops = {
34738 ++static net_device_ops_no_const ar6000_netdev_ops = {
34739 + .ndo_init = NULL,
34740 + .ndo_open = ar6000_open,
34741 + .ndo_stop = ar6000_close,
34742 +diff -urNp linux-3.1.1/drivers/staging/ath6kl/os/linux/include/ar6k_pal.h linux-3.1.1/drivers/staging/ath6kl/os/linux/include/ar6k_pal.h
34743 +--- linux-3.1.1/drivers/staging/ath6kl/os/linux/include/ar6k_pal.h 2011-11-11 15:19:27.000000000 -0500
34744 ++++ linux-3.1.1/drivers/staging/ath6kl/os/linux/include/ar6k_pal.h 2011-11-16 18:39:07.000000000 -0500
34745 +@@ -30,7 +30,7 @@ typedef bool (*ar6k_pal_recv_pkt_t)(void
34746 + typedef struct ar6k_pal_config_s
34747 + {
34748 + ar6k_pal_recv_pkt_t fpar6k_pal_recv_pkt;
34749 +-}ar6k_pal_config_t;
34750 ++} __no_const ar6k_pal_config_t;
34751 +
34752 + void register_pal_cb(ar6k_pal_config_t *palConfig_p);
34753 + #endif /* _AR6K_PAL_H_ */
34754 +diff -urNp linux-3.1.1/drivers/staging/brcm80211/brcmfmac/dhd_linux.c linux-3.1.1/drivers/staging/brcm80211/brcmfmac/dhd_linux.c
34755 +--- linux-3.1.1/drivers/staging/brcm80211/brcmfmac/dhd_linux.c 2011-11-11 15:19:27.000000000 -0500
34756 ++++ linux-3.1.1/drivers/staging/brcm80211/brcmfmac/dhd_linux.c 2011-11-16 18:39:07.000000000 -0500
34757 +@@ -451,14 +451,14 @@ static void brcmf_op_if(struct brcmf_if
34758 + free_netdev(ifp->net);
34759 + }
34760 + /* Allocate etherdev, including space for private structure */
34761 +- ifp->net = alloc_etherdev(sizeof(drvr_priv));
34762 ++ ifp->net = alloc_etherdev(sizeof(*drvr_priv));
34763 + if (!ifp->net) {
34764 + BRCMF_ERROR(("%s: OOM - alloc_etherdev\n", __func__));
34765 + ret = -ENOMEM;
34766 + }
34767 + if (ret == 0) {
34768 + strcpy(ifp->net->name, ifp->name);
34769 +- memcpy(netdev_priv(ifp->net), &drvr_priv, sizeof(drvr_priv));
34770 ++ memcpy(netdev_priv(ifp->net), &drvr_priv, sizeof(*drvr_priv));
34771 + err = brcmf_net_attach(&drvr_priv->pub, ifp->idx);
34772 + if (err != 0) {
34773 + BRCMF_ERROR(("%s: brcmf_net_attach failed, "
34774 +@@ -1279,7 +1279,7 @@ struct brcmf_pub *brcmf_attach(struct br
34775 + BRCMF_TRACE(("%s: Enter\n", __func__));
34776 +
34777 + /* Allocate etherdev, including space for private structure */
34778 +- net = alloc_etherdev(sizeof(drvr_priv));
34779 ++ net = alloc_etherdev(sizeof(*drvr_priv));
34780 + if (!net) {
34781 + BRCMF_ERROR(("%s: OOM - alloc_etherdev\n", __func__));
34782 + goto fail;
34783 +@@ -1295,7 +1295,7 @@ struct brcmf_pub *brcmf_attach(struct br
34784 + /*
34785 + * Save the brcmf_info into the priv
34786 + */
34787 +- memcpy(netdev_priv(net), &drvr_priv, sizeof(drvr_priv));
34788 ++ memcpy(netdev_priv(net), &drvr_priv, sizeof(*drvr_priv));
34789 +
34790 + /* Set network interface name if it was provided as module parameter */
34791 + if (iface_name[0]) {
34792 +@@ -1352,7 +1352,7 @@ struct brcmf_pub *brcmf_attach(struct br
34793 + /*
34794 + * Save the brcmf_info into the priv
34795 + */
34796 +- memcpy(netdev_priv(net), &drvr_priv, sizeof(drvr_priv));
34797 ++ memcpy(netdev_priv(net), &drvr_priv, sizeof(*drvr_priv));
34798 +
34799 + #if defined(CONFIG_PM_SLEEP)
34800 + atomic_set(&brcmf_mmc_suspend, false);
34801 +diff -urNp linux-3.1.1/drivers/staging/brcm80211/brcmfmac/sdio_host.h linux-3.1.1/drivers/staging/brcm80211/brcmfmac/sdio_host.h
34802 +--- linux-3.1.1/drivers/staging/brcm80211/brcmfmac/sdio_host.h 2011-11-11 15:19:27.000000000 -0500
34803 ++++ linux-3.1.1/drivers/staging/brcm80211/brcmfmac/sdio_host.h 2011-11-16 18:39:07.000000000 -0500
34804 +@@ -263,7 +263,7 @@ struct brcmf_sdioh_driver {
34805 + u16 func, uint bustype, u32 regsva, void *param);
34806 + /* detach from device */
34807 + void (*detach) (void *ch);
34808 +-};
34809 ++} __no_const;
34810 +
34811 + struct sdioh_info;
34812 +
34813 +diff -urNp linux-3.1.1/drivers/staging/brcm80211/brcmsmac/phy/phy_int.h linux-3.1.1/drivers/staging/brcm80211/brcmsmac/phy/phy_int.h
34814 +--- linux-3.1.1/drivers/staging/brcm80211/brcmsmac/phy/phy_int.h 2011-11-11 15:19:27.000000000 -0500
34815 ++++ linux-3.1.1/drivers/staging/brcm80211/brcmsmac/phy/phy_int.h 2011-11-16 18:39:07.000000000 -0500
34816 +@@ -591,7 +591,7 @@ struct phy_func_ptr {
34817 + initfn_t carrsuppr;
34818 + rxsigpwrfn_t rxsigpwr;
34819 + detachfn_t detach;
34820 +-};
34821 ++} __no_const;
34822 +
34823 + struct brcms_phy {
34824 + struct brcms_phy_pub pubpi_ro;
34825 +diff -urNp linux-3.1.1/drivers/staging/et131x/et1310_tx.c linux-3.1.1/drivers/staging/et131x/et1310_tx.c
34826 +--- linux-3.1.1/drivers/staging/et131x/et1310_tx.c 2011-11-11 15:19:27.000000000 -0500
34827 ++++ linux-3.1.1/drivers/staging/et131x/et1310_tx.c 2011-11-16 18:39:07.000000000 -0500
34828 +@@ -635,11 +635,11 @@ inline void et131x_free_send_packet(stru
34829 + struct net_device_stats *stats = &etdev->net_stats;
34830 +
34831 + if (tcb->flags & fMP_DEST_BROAD)
34832 +- atomic_inc(&etdev->stats.brdcstxmt);
34833 ++ atomic_inc_unchecked(&etdev->stats.brdcstxmt);
34834 + else if (tcb->flags & fMP_DEST_MULTI)
34835 +- atomic_inc(&etdev->stats.multixmt);
34836 ++ atomic_inc_unchecked(&etdev->stats.multixmt);
34837 + else
34838 +- atomic_inc(&etdev->stats.unixmt);
34839 ++ atomic_inc_unchecked(&etdev->stats.unixmt);
34840 +
34841 + if (tcb->skb) {
34842 + stats->tx_bytes += tcb->skb->len;
34843 +diff -urNp linux-3.1.1/drivers/staging/et131x/et131x_adapter.h linux-3.1.1/drivers/staging/et131x/et131x_adapter.h
34844 +--- linux-3.1.1/drivers/staging/et131x/et131x_adapter.h 2011-11-11 15:19:27.000000000 -0500
34845 ++++ linux-3.1.1/drivers/staging/et131x/et131x_adapter.h 2011-11-16 18:39:07.000000000 -0500
34846 +@@ -106,11 +106,11 @@ struct ce_stats {
34847 + * operations
34848 + */
34849 + u32 unircv; /* # multicast packets received */
34850 +- atomic_t unixmt; /* # multicast packets for Tx */
34851 ++ atomic_unchecked_t unixmt; /* # multicast packets for Tx */
34852 + u32 multircv; /* # multicast packets received */
34853 +- atomic_t multixmt; /* # multicast packets for Tx */
34854 ++ atomic_unchecked_t multixmt; /* # multicast packets for Tx */
34855 + u32 brdcstrcv; /* # broadcast packets received */
34856 +- atomic_t brdcstxmt; /* # broadcast packets for Tx */
34857 ++ atomic_unchecked_t brdcstxmt; /* # broadcast packets for Tx */
34858 + u32 norcvbuf; /* # Rx packets discarded */
34859 + u32 noxmtbuf; /* # Tx packets discarded */
34860 +
34861 +diff -urNp linux-3.1.1/drivers/staging/hv/channel.c linux-3.1.1/drivers/staging/hv/channel.c
34862 +--- linux-3.1.1/drivers/staging/hv/channel.c 2011-11-11 15:19:27.000000000 -0500
34863 ++++ linux-3.1.1/drivers/staging/hv/channel.c 2011-11-16 18:39:07.000000000 -0500
34864 +@@ -447,8 +447,8 @@ int vmbus_establish_gpadl(struct vmbus_c
34865 + int ret = 0;
34866 + int t;
34867 +
34868 +- next_gpadl_handle = atomic_read(&vmbus_connection.next_gpadl_handle);
34869 +- atomic_inc(&vmbus_connection.next_gpadl_handle);
34870 ++ next_gpadl_handle = atomic_read_unchecked(&vmbus_connection.next_gpadl_handle);
34871 ++ atomic_inc_unchecked(&vmbus_connection.next_gpadl_handle);
34872 +
34873 + ret = create_gpadl_header(kbuffer, size, &msginfo, &msgcount);
34874 + if (ret)
34875 +diff -urNp linux-3.1.1/drivers/staging/hv/hv.c linux-3.1.1/drivers/staging/hv/hv.c
34876 +--- linux-3.1.1/drivers/staging/hv/hv.c 2011-11-11 15:19:27.000000000 -0500
34877 ++++ linux-3.1.1/drivers/staging/hv/hv.c 2011-11-16 18:39:07.000000000 -0500
34878 +@@ -132,7 +132,7 @@ static u64 do_hypercall(u64 control, voi
34879 + u64 output_address = (output) ? virt_to_phys(output) : 0;
34880 + u32 output_address_hi = output_address >> 32;
34881 + u32 output_address_lo = output_address & 0xFFFFFFFF;
34882 +- volatile void *hypercall_page = hv_context.hypercall_page;
34883 ++ volatile void *hypercall_page = ktva_ktla(hv_context.hypercall_page);
34884 +
34885 + __asm__ __volatile__ ("call *%8" : "=d"(hv_status_hi),
34886 + "=a"(hv_status_lo) : "d" (control_hi),
34887 +diff -urNp linux-3.1.1/drivers/staging/hv/hv_mouse.c linux-3.1.1/drivers/staging/hv/hv_mouse.c
34888 +--- linux-3.1.1/drivers/staging/hv/hv_mouse.c 2011-11-11 15:19:27.000000000 -0500
34889 ++++ linux-3.1.1/drivers/staging/hv/hv_mouse.c 2011-11-16 18:39:07.000000000 -0500
34890 +@@ -878,8 +878,10 @@ static void reportdesc_callback(struct h
34891 + if (hid_dev) {
34892 + DPRINT_INFO(INPUTVSC_DRV, "hid_device created");
34893 +
34894 +- hid_dev->ll_driver->open = mousevsc_hid_open;
34895 +- hid_dev->ll_driver->close = mousevsc_hid_close;
34896 ++ pax_open_kernel();
34897 ++ *(void **)&hid_dev->ll_driver->open = mousevsc_hid_open;
34898 ++ *(void **)&hid_dev->ll_driver->close = mousevsc_hid_close;
34899 ++ pax_close_kernel();
34900 +
34901 + hid_dev->bus = BUS_VIRTUAL;
34902 + hid_dev->vendor = input_device_ctx->device_info.vendor;
34903 +diff -urNp linux-3.1.1/drivers/staging/hv/hyperv_vmbus.h linux-3.1.1/drivers/staging/hv/hyperv_vmbus.h
34904 +--- linux-3.1.1/drivers/staging/hv/hyperv_vmbus.h 2011-11-11 15:19:27.000000000 -0500
34905 ++++ linux-3.1.1/drivers/staging/hv/hyperv_vmbus.h 2011-11-16 18:39:07.000000000 -0500
34906 +@@ -559,7 +559,7 @@ enum vmbus_connect_state {
34907 + struct vmbus_connection {
34908 + enum vmbus_connect_state conn_state;
34909 +
34910 +- atomic_t next_gpadl_handle;
34911 ++ atomic_unchecked_t next_gpadl_handle;
34912 +
34913 + /*
34914 + * Represents channel interrupts. Each bit position represents a
34915 +diff -urNp linux-3.1.1/drivers/staging/hv/rndis_filter.c linux-3.1.1/drivers/staging/hv/rndis_filter.c
34916 +--- linux-3.1.1/drivers/staging/hv/rndis_filter.c 2011-11-11 15:19:27.000000000 -0500
34917 ++++ linux-3.1.1/drivers/staging/hv/rndis_filter.c 2011-11-16 18:39:07.000000000 -0500
34918 +@@ -43,7 +43,7 @@ struct rndis_device {
34919 +
34920 + enum rndis_device_state state;
34921 + u32 link_stat;
34922 +- atomic_t new_req_id;
34923 ++ atomic_unchecked_t new_req_id;
34924 +
34925 + spinlock_t request_lock;
34926 + struct list_head req_list;
34927 +@@ -117,7 +117,7 @@ static struct rndis_request *get_rndis_r
34928 + * template
34929 + */
34930 + set = &rndis_msg->msg.set_req;
34931 +- set->req_id = atomic_inc_return(&dev->new_req_id);
34932 ++ set->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
34933 +
34934 + /* Add to the request list */
34935 + spin_lock_irqsave(&dev->request_lock, flags);
34936 +@@ -622,7 +622,7 @@ static void rndis_filter_halt_device(str
34937 +
34938 + /* Setup the rndis set */
34939 + halt = &request->request_msg.msg.halt_req;
34940 +- halt->req_id = atomic_inc_return(&dev->new_req_id);
34941 ++ halt->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
34942 +
34943 + /* Ignore return since this msg is optional. */
34944 + rndis_filter_send_request(dev, request);
34945 +diff -urNp linux-3.1.1/drivers/staging/hv/vmbus_drv.c linux-3.1.1/drivers/staging/hv/vmbus_drv.c
34946 +--- linux-3.1.1/drivers/staging/hv/vmbus_drv.c 2011-11-11 15:19:27.000000000 -0500
34947 ++++ linux-3.1.1/drivers/staging/hv/vmbus_drv.c 2011-11-16 18:39:07.000000000 -0500
34948 +@@ -660,11 +660,11 @@ int vmbus_child_device_register(struct h
34949 + {
34950 + int ret = 0;
34951 +
34952 +- static atomic_t device_num = ATOMIC_INIT(0);
34953 ++ static atomic_unchecked_t device_num = ATOMIC_INIT(0);
34954 +
34955 + /* Set the device name. Otherwise, device_register() will fail. */
34956 + dev_set_name(&child_device_obj->device, "vmbus_0_%d",
34957 +- atomic_inc_return(&device_num));
34958 ++ atomic_inc_return_unchecked(&device_num));
34959 +
34960 + /* The new device belongs to this bus */
34961 + child_device_obj->device.bus = &hv_bus; /* device->dev.bus; */
34962 +diff -urNp linux-3.1.1/drivers/staging/iio/ring_generic.h linux-3.1.1/drivers/staging/iio/ring_generic.h
34963 +--- linux-3.1.1/drivers/staging/iio/ring_generic.h 2011-11-11 15:19:27.000000000 -0500
34964 ++++ linux-3.1.1/drivers/staging/iio/ring_generic.h 2011-11-16 18:39:07.000000000 -0500
34965 +@@ -62,7 +62,7 @@ struct iio_ring_access_funcs {
34966 +
34967 + int (*is_enabled)(struct iio_ring_buffer *ring);
34968 + int (*enable)(struct iio_ring_buffer *ring);
34969 +-};
34970 ++} __no_const;
34971 +
34972 + struct iio_ring_setup_ops {
34973 + int (*preenable)(struct iio_dev *);
34974 +diff -urNp linux-3.1.1/drivers/staging/mei/interface.c linux-3.1.1/drivers/staging/mei/interface.c
34975 +--- linux-3.1.1/drivers/staging/mei/interface.c 2011-11-11 15:19:27.000000000 -0500
34976 ++++ linux-3.1.1/drivers/staging/mei/interface.c 2011-11-17 18:39:18.000000000 -0500
34977 +@@ -332,7 +332,7 @@ int mei_send_flow_control(struct mei_dev
34978 + mei_hdr->reserved = 0;
34979 +
34980 + mei_flow_control = (struct hbm_flow_control *) &dev->wr_msg_buf[1];
34981 +- memset(mei_flow_control, 0, sizeof(mei_flow_control));
34982 ++ memset(mei_flow_control, 0, sizeof(*mei_flow_control));
34983 + mei_flow_control->host_addr = cl->host_client_id;
34984 + mei_flow_control->me_addr = cl->me_client_id;
34985 + mei_flow_control->cmd.cmd = MEI_FLOW_CONTROL_CMD;
34986 +@@ -396,7 +396,7 @@ int mei_disconnect(struct mei_device *de
34987 +
34988 + mei_cli_disconnect =
34989 + (struct hbm_client_disconnect_request *) &dev->wr_msg_buf[1];
34990 +- memset(mei_cli_disconnect, 0, sizeof(mei_cli_disconnect));
34991 ++ memset(mei_cli_disconnect, 0, sizeof(*mei_cli_disconnect));
34992 + mei_cli_disconnect->host_addr = cl->host_client_id;
34993 + mei_cli_disconnect->me_addr = cl->me_client_id;
34994 + mei_cli_disconnect->cmd.cmd = CLIENT_DISCONNECT_REQ_CMD;
34995 +diff -urNp linux-3.1.1/drivers/staging/octeon/ethernet.c linux-3.1.1/drivers/staging/octeon/ethernet.c
34996 +--- linux-3.1.1/drivers/staging/octeon/ethernet.c 2011-11-11 15:19:27.000000000 -0500
34997 ++++ linux-3.1.1/drivers/staging/octeon/ethernet.c 2011-11-16 18:39:07.000000000 -0500
34998 +@@ -258,11 +258,11 @@ static struct net_device_stats *cvm_oct_
34999 + * since the RX tasklet also increments it.
35000 + */
35001 + #ifdef CONFIG_64BIT
35002 +- atomic64_add(rx_status.dropped_packets,
35003 +- (atomic64_t *)&priv->stats.rx_dropped);
35004 ++ atomic64_add_unchecked(rx_status.dropped_packets,
35005 ++ (atomic64_unchecked_t *)&priv->stats.rx_dropped);
35006 + #else
35007 +- atomic_add(rx_status.dropped_packets,
35008 +- (atomic_t *)&priv->stats.rx_dropped);
35009 ++ atomic_add_unchecked(rx_status.dropped_packets,
35010 ++ (atomic_unchecked_t *)&priv->stats.rx_dropped);
35011 + #endif
35012 + }
35013 +
35014 +diff -urNp linux-3.1.1/drivers/staging/octeon/ethernet-rx.c linux-3.1.1/drivers/staging/octeon/ethernet-rx.c
35015 +--- linux-3.1.1/drivers/staging/octeon/ethernet-rx.c 2011-11-11 15:19:27.000000000 -0500
35016 ++++ linux-3.1.1/drivers/staging/octeon/ethernet-rx.c 2011-11-16 18:39:07.000000000 -0500
35017 +@@ -420,11 +420,11 @@ static int cvm_oct_napi_poll(struct napi
35018 + /* Increment RX stats for virtual ports */
35019 + if (work->ipprt >= CVMX_PIP_NUM_INPUT_PORTS) {
35020 + #ifdef CONFIG_64BIT
35021 +- atomic64_add(1, (atomic64_t *)&priv->stats.rx_packets);
35022 +- atomic64_add(skb->len, (atomic64_t *)&priv->stats.rx_bytes);
35023 ++ atomic64_add_unchecked(1, (atomic64_unchecked_t *)&priv->stats.rx_packets);
35024 ++ atomic64_add_unchecked(skb->len, (atomic64_unchecked_t *)&priv->stats.rx_bytes);
35025 + #else
35026 +- atomic_add(1, (atomic_t *)&priv->stats.rx_packets);
35027 +- atomic_add(skb->len, (atomic_t *)&priv->stats.rx_bytes);
35028 ++ atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_packets);
35029 ++ atomic_add_unchecked(skb->len, (atomic_unchecked_t *)&priv->stats.rx_bytes);
35030 + #endif
35031 + }
35032 + netif_receive_skb(skb);
35033 +@@ -436,9 +436,9 @@ static int cvm_oct_napi_poll(struct napi
35034 + dev->name);
35035 + */
35036 + #ifdef CONFIG_64BIT
35037 +- atomic64_add(1, (atomic64_t *)&priv->stats.rx_dropped);
35038 ++ atomic64_unchecked_add(1, (atomic64_unchecked_t *)&priv->stats.rx_dropped);
35039 + #else
35040 +- atomic_add(1, (atomic_t *)&priv->stats.rx_dropped);
35041 ++ atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_dropped);
35042 + #endif
35043 + dev_kfree_skb_irq(skb);
35044 + }
35045 +diff -urNp linux-3.1.1/drivers/staging/pohmelfs/inode.c linux-3.1.1/drivers/staging/pohmelfs/inode.c
35046 +--- linux-3.1.1/drivers/staging/pohmelfs/inode.c 2011-11-11 15:19:27.000000000 -0500
35047 ++++ linux-3.1.1/drivers/staging/pohmelfs/inode.c 2011-11-16 18:39:07.000000000 -0500
35048 +@@ -1861,7 +1861,7 @@ static int pohmelfs_fill_super(struct su
35049 + mutex_init(&psb->mcache_lock);
35050 + psb->mcache_root = RB_ROOT;
35051 + psb->mcache_timeout = msecs_to_jiffies(5000);
35052 +- atomic_long_set(&psb->mcache_gen, 0);
35053 ++ atomic_long_set_unchecked(&psb->mcache_gen, 0);
35054 +
35055 + psb->trans_max_pages = 100;
35056 +
35057 +@@ -1876,7 +1876,7 @@ static int pohmelfs_fill_super(struct su
35058 + INIT_LIST_HEAD(&psb->crypto_ready_list);
35059 + INIT_LIST_HEAD(&psb->crypto_active_list);
35060 +
35061 +- atomic_set(&psb->trans_gen, 1);
35062 ++ atomic_set_unchecked(&psb->trans_gen, 1);
35063 + atomic_long_set(&psb->total_inodes, 0);
35064 +
35065 + mutex_init(&psb->state_lock);
35066 +diff -urNp linux-3.1.1/drivers/staging/pohmelfs/mcache.c linux-3.1.1/drivers/staging/pohmelfs/mcache.c
35067 +--- linux-3.1.1/drivers/staging/pohmelfs/mcache.c 2011-11-11 15:19:27.000000000 -0500
35068 ++++ linux-3.1.1/drivers/staging/pohmelfs/mcache.c 2011-11-16 18:39:07.000000000 -0500
35069 +@@ -121,7 +121,7 @@ struct pohmelfs_mcache *pohmelfs_mcache_
35070 + m->data = data;
35071 + m->start = start;
35072 + m->size = size;
35073 +- m->gen = atomic_long_inc_return(&psb->mcache_gen);
35074 ++ m->gen = atomic_long_inc_return_unchecked(&psb->mcache_gen);
35075 +
35076 + mutex_lock(&psb->mcache_lock);
35077 + err = pohmelfs_mcache_insert(psb, m);
35078 +diff -urNp linux-3.1.1/drivers/staging/pohmelfs/netfs.h linux-3.1.1/drivers/staging/pohmelfs/netfs.h
35079 +--- linux-3.1.1/drivers/staging/pohmelfs/netfs.h 2011-11-11 15:19:27.000000000 -0500
35080 ++++ linux-3.1.1/drivers/staging/pohmelfs/netfs.h 2011-11-16 18:39:07.000000000 -0500
35081 +@@ -571,14 +571,14 @@ struct pohmelfs_config;
35082 + struct pohmelfs_sb {
35083 + struct rb_root mcache_root;
35084 + struct mutex mcache_lock;
35085 +- atomic_long_t mcache_gen;
35086 ++ atomic_long_unchecked_t mcache_gen;
35087 + unsigned long mcache_timeout;
35088 +
35089 + unsigned int idx;
35090 +
35091 + unsigned int trans_retries;
35092 +
35093 +- atomic_t trans_gen;
35094 ++ atomic_unchecked_t trans_gen;
35095 +
35096 + unsigned int crypto_attached_size;
35097 + unsigned int crypto_align_size;
35098 +diff -urNp linux-3.1.1/drivers/staging/pohmelfs/trans.c linux-3.1.1/drivers/staging/pohmelfs/trans.c
35099 +--- linux-3.1.1/drivers/staging/pohmelfs/trans.c 2011-11-11 15:19:27.000000000 -0500
35100 ++++ linux-3.1.1/drivers/staging/pohmelfs/trans.c 2011-11-16 18:39:07.000000000 -0500
35101 +@@ -492,7 +492,7 @@ int netfs_trans_finish(struct netfs_tran
35102 + int err;
35103 + struct netfs_cmd *cmd = t->iovec.iov_base;
35104 +
35105 +- t->gen = atomic_inc_return(&psb->trans_gen);
35106 ++ t->gen = atomic_inc_return_unchecked(&psb->trans_gen);
35107 +
35108 + cmd->size = t->iovec.iov_len - sizeof(struct netfs_cmd) +
35109 + t->attached_size + t->attached_pages * sizeof(struct netfs_cmd);
35110 +diff -urNp linux-3.1.1/drivers/staging/rtl8712/rtl871x_io.h linux-3.1.1/drivers/staging/rtl8712/rtl871x_io.h
35111 +--- linux-3.1.1/drivers/staging/rtl8712/rtl871x_io.h 2011-11-11 15:19:27.000000000 -0500
35112 ++++ linux-3.1.1/drivers/staging/rtl8712/rtl871x_io.h 2011-11-16 18:39:07.000000000 -0500
35113 +@@ -83,7 +83,7 @@ struct _io_ops {
35114 + u8 *pmem);
35115 + u32 (*_write_port)(struct intf_hdl *pintfhdl, u32 addr, u32 cnt,
35116 + u8 *pmem);
35117 +-};
35118 ++} __no_const;
35119 +
35120 + struct io_req {
35121 + struct list_head list;
35122 +diff -urNp linux-3.1.1/drivers/staging/sbe-2t3e3/netdev.c linux-3.1.1/drivers/staging/sbe-2t3e3/netdev.c
35123 +--- linux-3.1.1/drivers/staging/sbe-2t3e3/netdev.c 2011-11-11 15:19:27.000000000 -0500
35124 ++++ linux-3.1.1/drivers/staging/sbe-2t3e3/netdev.c 2011-11-16 18:39:08.000000000 -0500
35125 +@@ -51,7 +51,7 @@ int t3e3_ioctl(struct net_device *dev, s
35126 + t3e3_if_config(sc, cmd_2t3e3, (char *)&param, &resp, &rlen);
35127 +
35128 + if (rlen)
35129 +- if (copy_to_user(data, &resp, rlen))
35130 ++ if (rlen > sizeof resp || copy_to_user(data, &resp, rlen))
35131 + return -EFAULT;
35132 +
35133 + return 0;
35134 +diff -urNp linux-3.1.1/drivers/staging/usbip/usbip_common.h linux-3.1.1/drivers/staging/usbip/usbip_common.h
35135 +--- linux-3.1.1/drivers/staging/usbip/usbip_common.h 2011-11-11 15:19:27.000000000 -0500
35136 ++++ linux-3.1.1/drivers/staging/usbip/usbip_common.h 2011-11-16 18:39:08.000000000 -0500
35137 +@@ -289,7 +289,7 @@ struct usbip_device {
35138 + void (*shutdown)(struct usbip_device *);
35139 + void (*reset)(struct usbip_device *);
35140 + void (*unusable)(struct usbip_device *);
35141 +- } eh_ops;
35142 ++ } __no_const eh_ops;
35143 + };
35144 +
35145 + #if 0
35146 +diff -urNp linux-3.1.1/drivers/staging/usbip/vhci.h linux-3.1.1/drivers/staging/usbip/vhci.h
35147 +--- linux-3.1.1/drivers/staging/usbip/vhci.h 2011-11-11 15:19:27.000000000 -0500
35148 ++++ linux-3.1.1/drivers/staging/usbip/vhci.h 2011-11-16 18:39:08.000000000 -0500
35149 +@@ -85,7 +85,7 @@ struct vhci_hcd {
35150 + unsigned resuming:1;
35151 + unsigned long re_timeout;
35152 +
35153 +- atomic_t seqnum;
35154 ++ atomic_unchecked_t seqnum;
35155 +
35156 + /*
35157 + * NOTE:
35158 +diff -urNp linux-3.1.1/drivers/staging/usbip/vhci_hcd.c linux-3.1.1/drivers/staging/usbip/vhci_hcd.c
35159 +--- linux-3.1.1/drivers/staging/usbip/vhci_hcd.c 2011-11-11 15:19:27.000000000 -0500
35160 ++++ linux-3.1.1/drivers/staging/usbip/vhci_hcd.c 2011-11-16 18:39:08.000000000 -0500
35161 +@@ -527,7 +527,7 @@ static void vhci_tx_urb(struct urb *urb)
35162 + return;
35163 + }
35164 +
35165 +- priv->seqnum = atomic_inc_return(&the_controller->seqnum);
35166 ++ priv->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
35167 + if (priv->seqnum == 0xffff)
35168 + dev_info(&urb->dev->dev, "seqnum max\n");
35169 +
35170 +@@ -779,7 +779,7 @@ static int vhci_urb_dequeue(struct usb_h
35171 + return -ENOMEM;
35172 + }
35173 +
35174 +- unlink->seqnum = atomic_inc_return(&the_controller->seqnum);
35175 ++ unlink->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
35176 + if (unlink->seqnum == 0xffff)
35177 + pr_info("seqnum max\n");
35178 +
35179 +@@ -969,7 +969,7 @@ static int vhci_start(struct usb_hcd *hc
35180 + vdev->rhport = rhport;
35181 + }
35182 +
35183 +- atomic_set(&vhci->seqnum, 0);
35184 ++ atomic_set_unchecked(&vhci->seqnum, 0);
35185 + spin_lock_init(&vhci->lock);
35186 +
35187 + hcd->power_budget = 0; /* no limit */
35188 +diff -urNp linux-3.1.1/drivers/staging/usbip/vhci_rx.c linux-3.1.1/drivers/staging/usbip/vhci_rx.c
35189 +--- linux-3.1.1/drivers/staging/usbip/vhci_rx.c 2011-11-11 15:19:27.000000000 -0500
35190 ++++ linux-3.1.1/drivers/staging/usbip/vhci_rx.c 2011-11-16 18:39:08.000000000 -0500
35191 +@@ -76,7 +76,7 @@ static void vhci_recv_ret_submit(struct
35192 + if (!urb) {
35193 + pr_err("cannot find a urb of seqnum %u\n", pdu->base.seqnum);
35194 + pr_info("max seqnum %d\n",
35195 +- atomic_read(&the_controller->seqnum));
35196 ++ atomic_read_unchecked(&the_controller->seqnum));
35197 + usbip_event_add(ud, VDEV_EVENT_ERROR_TCP);
35198 + return;
35199 + }
35200 +diff -urNp linux-3.1.1/drivers/staging/vt6655/hostap.c linux-3.1.1/drivers/staging/vt6655/hostap.c
35201 +--- linux-3.1.1/drivers/staging/vt6655/hostap.c 2011-11-11 15:19:27.000000000 -0500
35202 ++++ linux-3.1.1/drivers/staging/vt6655/hostap.c 2011-11-16 18:39:08.000000000 -0500
35203 +@@ -79,14 +79,13 @@ static int msglevel
35204 + *
35205 + */
35206 +
35207 ++static net_device_ops_no_const apdev_netdev_ops;
35208 ++
35209 + static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
35210 + {
35211 + PSDevice apdev_priv;
35212 + struct net_device *dev = pDevice->dev;
35213 + int ret;
35214 +- const struct net_device_ops apdev_netdev_ops = {
35215 +- .ndo_start_xmit = pDevice->tx_80211,
35216 +- };
35217 +
35218 + DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Enabling hostapd mode\n", dev->name);
35219 +
35220 +@@ -98,6 +97,8 @@ static int hostap_enable_hostapd(PSDevic
35221 + *apdev_priv = *pDevice;
35222 + memcpy(pDevice->apdev->dev_addr, dev->dev_addr, ETH_ALEN);
35223 +
35224 ++ /* only half broken now */
35225 ++ apdev_netdev_ops.ndo_start_xmit = pDevice->tx_80211;
35226 + pDevice->apdev->netdev_ops = &apdev_netdev_ops;
35227 +
35228 + pDevice->apdev->type = ARPHRD_IEEE80211;
35229 +diff -urNp linux-3.1.1/drivers/staging/vt6656/hostap.c linux-3.1.1/drivers/staging/vt6656/hostap.c
35230 +--- linux-3.1.1/drivers/staging/vt6656/hostap.c 2011-11-11 15:19:27.000000000 -0500
35231 ++++ linux-3.1.1/drivers/staging/vt6656/hostap.c 2011-11-16 18:39:08.000000000 -0500
35232 +@@ -80,14 +80,13 @@ static int msglevel
35233 + *
35234 + */
35235 +
35236 ++static net_device_ops_no_const apdev_netdev_ops;
35237 ++
35238 + static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
35239 + {
35240 + PSDevice apdev_priv;
35241 + struct net_device *dev = pDevice->dev;
35242 + int ret;
35243 +- const struct net_device_ops apdev_netdev_ops = {
35244 +- .ndo_start_xmit = pDevice->tx_80211,
35245 +- };
35246 +
35247 + DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Enabling hostapd mode\n", dev->name);
35248 +
35249 +@@ -99,6 +98,8 @@ static int hostap_enable_hostapd(PSDevic
35250 + *apdev_priv = *pDevice;
35251 + memcpy(pDevice->apdev->dev_addr, dev->dev_addr, ETH_ALEN);
35252 +
35253 ++ /* only half broken now */
35254 ++ apdev_netdev_ops.ndo_start_xmit = pDevice->tx_80211;
35255 + pDevice->apdev->netdev_ops = &apdev_netdev_ops;
35256 +
35257 + pDevice->apdev->type = ARPHRD_IEEE80211;
35258 +diff -urNp linux-3.1.1/drivers/staging/wlan-ng/hfa384x_usb.c linux-3.1.1/drivers/staging/wlan-ng/hfa384x_usb.c
35259 +--- linux-3.1.1/drivers/staging/wlan-ng/hfa384x_usb.c 2011-11-11 15:19:27.000000000 -0500
35260 ++++ linux-3.1.1/drivers/staging/wlan-ng/hfa384x_usb.c 2011-11-16 18:39:08.000000000 -0500
35261 +@@ -204,7 +204,7 @@ static void unlocked_usbctlx_complete(hf
35262 +
35263 + struct usbctlx_completor {
35264 + int (*complete) (struct usbctlx_completor *);
35265 +-};
35266 ++} __no_const;
35267 +
35268 + static int
35269 + hfa384x_usbctlx_complete_sync(hfa384x_t *hw,
35270 +diff -urNp linux-3.1.1/drivers/staging/zcache/tmem.c linux-3.1.1/drivers/staging/zcache/tmem.c
35271 +--- linux-3.1.1/drivers/staging/zcache/tmem.c 2011-11-11 15:19:27.000000000 -0500
35272 ++++ linux-3.1.1/drivers/staging/zcache/tmem.c 2011-11-16 18:39:08.000000000 -0500
35273 +@@ -39,7 +39,7 @@
35274 + * A tmem host implementation must use this function to register callbacks
35275 + * for memory allocation.
35276 + */
35277 +-static struct tmem_hostops tmem_hostops;
35278 ++static tmem_hostops_no_const tmem_hostops;
35279 +
35280 + static void tmem_objnode_tree_init(void);
35281 +
35282 +@@ -53,7 +53,7 @@ void tmem_register_hostops(struct tmem_h
35283 + * A tmem host implementation must use this function to register
35284 + * callbacks for a page-accessible memory (PAM) implementation
35285 + */
35286 +-static struct tmem_pamops tmem_pamops;
35287 ++static tmem_pamops_no_const tmem_pamops;
35288 +
35289 + void tmem_register_pamops(struct tmem_pamops *m)
35290 + {
35291 +diff -urNp linux-3.1.1/drivers/staging/zcache/tmem.h linux-3.1.1/drivers/staging/zcache/tmem.h
35292 +--- linux-3.1.1/drivers/staging/zcache/tmem.h 2011-11-11 15:19:27.000000000 -0500
35293 ++++ linux-3.1.1/drivers/staging/zcache/tmem.h 2011-11-16 18:39:08.000000000 -0500
35294 +@@ -180,6 +180,7 @@ struct tmem_pamops {
35295 + void (*new_obj)(struct tmem_obj *);
35296 + int (*replace_in_obj)(void *, struct tmem_obj *);
35297 + };
35298 ++typedef struct tmem_pamops __no_const tmem_pamops_no_const;
35299 + extern void tmem_register_pamops(struct tmem_pamops *m);
35300 +
35301 + /* memory allocation methods provided by the host implementation */
35302 +@@ -189,6 +190,7 @@ struct tmem_hostops {
35303 + struct tmem_objnode *(*objnode_alloc)(struct tmem_pool *);
35304 + void (*objnode_free)(struct tmem_objnode *, struct tmem_pool *);
35305 + };
35306 ++typedef struct tmem_hostops __no_const tmem_hostops_no_const;
35307 + extern void tmem_register_hostops(struct tmem_hostops *m);
35308 +
35309 + /* core tmem accessor functions */
35310 +diff -urNp linux-3.1.1/drivers/target/iscsi/iscsi_target.c linux-3.1.1/drivers/target/iscsi/iscsi_target.c
35311 +--- linux-3.1.1/drivers/target/iscsi/iscsi_target.c 2011-11-11 15:19:27.000000000 -0500
35312 ++++ linux-3.1.1/drivers/target/iscsi/iscsi_target.c 2011-11-16 18:39:08.000000000 -0500
35313 +@@ -1368,7 +1368,7 @@ static int iscsit_handle_data_out(struct
35314 + * outstanding_r2ts reaches zero, go ahead and send the delayed
35315 + * TASK_ABORTED status.
35316 + */
35317 +- if (atomic_read(&se_cmd->t_transport_aborted) != 0) {
35318 ++ if (atomic_read_unchecked(&se_cmd->t_transport_aborted) != 0) {
35319 + if (hdr->flags & ISCSI_FLAG_CMD_FINAL)
35320 + if (--cmd->outstanding_r2ts < 1) {
35321 + iscsit_stop_dataout_timer(cmd);
35322 +diff -urNp linux-3.1.1/drivers/target/target_core_alua.c linux-3.1.1/drivers/target/target_core_alua.c
35323 +--- linux-3.1.1/drivers/target/target_core_alua.c 2011-11-11 15:19:27.000000000 -0500
35324 ++++ linux-3.1.1/drivers/target/target_core_alua.c 2011-11-16 18:40:29.000000000 -0500
35325 +@@ -723,6 +723,8 @@ static int core_alua_update_tpg_primary_
35326 + char path[ALUA_METADATA_PATH_LEN];
35327 + int len;
35328 +
35329 ++ pax_track_stack();
35330 ++
35331 + memset(path, 0, ALUA_METADATA_PATH_LEN);
35332 +
35333 + len = snprintf(md_buf, tg_pt_gp->tg_pt_gp_md_buf_len,
35334 +@@ -986,6 +988,8 @@ static int core_alua_update_tpg_secondar
35335 + char path[ALUA_METADATA_PATH_LEN], wwn[ALUA_SECONDARY_METADATA_WWN_LEN];
35336 + int len;
35337 +
35338 ++ pax_track_stack();
35339 ++
35340 + memset(path, 0, ALUA_METADATA_PATH_LEN);
35341 + memset(wwn, 0, ALUA_SECONDARY_METADATA_WWN_LEN);
35342 +
35343 +diff -urNp linux-3.1.1/drivers/target/target_core_cdb.c linux-3.1.1/drivers/target/target_core_cdb.c
35344 +--- linux-3.1.1/drivers/target/target_core_cdb.c 2011-11-11 15:19:27.000000000 -0500
35345 ++++ linux-3.1.1/drivers/target/target_core_cdb.c 2011-11-16 18:40:29.000000000 -0500
35346 +@@ -933,6 +933,8 @@ target_emulate_modesense(struct se_cmd *
35347 + int length = 0;
35348 + unsigned char buf[SE_MODE_PAGE_BUF];
35349 +
35350 ++ pax_track_stack();
35351 ++
35352 + memset(buf, 0, SE_MODE_PAGE_BUF);
35353 +
35354 + switch (cdb[2] & 0x3f) {
35355 +diff -urNp linux-3.1.1/drivers/target/target_core_configfs.c linux-3.1.1/drivers/target/target_core_configfs.c
35356 +--- linux-3.1.1/drivers/target/target_core_configfs.c 2011-11-11 15:19:27.000000000 -0500
35357 ++++ linux-3.1.1/drivers/target/target_core_configfs.c 2011-11-16 19:04:37.000000000 -0500
35358 +@@ -1267,6 +1267,8 @@ static ssize_t target_core_dev_pr_show_a
35359 + ssize_t len = 0;
35360 + int reg_count = 0, prf_isid;
35361 +
35362 ++ pax_track_stack();
35363 ++
35364 + if (!su_dev->se_dev_ptr)
35365 + return -ENODEV;
35366 +
35367 +diff -urNp linux-3.1.1/drivers/target/target_core_pr.c linux-3.1.1/drivers/target/target_core_pr.c
35368 +--- linux-3.1.1/drivers/target/target_core_pr.c 2011-11-11 15:19:27.000000000 -0500
35369 ++++ linux-3.1.1/drivers/target/target_core_pr.c 2011-11-16 18:40:29.000000000 -0500
35370 +@@ -918,6 +918,8 @@ static int __core_scsi3_check_aptpl_regi
35371 + unsigned char t_port[PR_APTPL_MAX_TPORT_LEN];
35372 + u16 tpgt;
35373 +
35374 ++ pax_track_stack();
35375 ++
35376 + memset(i_port, 0, PR_APTPL_MAX_IPORT_LEN);
35377 + memset(t_port, 0, PR_APTPL_MAX_TPORT_LEN);
35378 + /*
35379 +@@ -1867,6 +1869,8 @@ static int __core_scsi3_update_aptpl_buf
35380 + ssize_t len = 0;
35381 + int reg_count = 0;
35382 +
35383 ++ pax_track_stack();
35384 ++
35385 + memset(buf, 0, pr_aptpl_buf_len);
35386 + /*
35387 + * Called to clear metadata once APTPL has been deactivated.
35388 +@@ -1989,6 +1993,8 @@ static int __core_scsi3_write_aptpl_to_f
35389 + char path[512];
35390 + int ret;
35391 +
35392 ++ pax_track_stack();
35393 ++
35394 + memset(iov, 0, sizeof(struct iovec));
35395 + memset(path, 0, 512);
35396 +
35397 +diff -urNp linux-3.1.1/drivers/target/target_core_tmr.c linux-3.1.1/drivers/target/target_core_tmr.c
35398 +--- linux-3.1.1/drivers/target/target_core_tmr.c 2011-11-11 15:19:27.000000000 -0500
35399 ++++ linux-3.1.1/drivers/target/target_core_tmr.c 2011-11-16 18:39:08.000000000 -0500
35400 +@@ -255,7 +255,7 @@ static void core_tmr_drain_task_list(
35401 + cmd->se_tfo->get_task_tag(cmd), cmd->pr_res_key,
35402 + cmd->t_task_list_num,
35403 + atomic_read(&cmd->t_task_cdbs_left),
35404 +- atomic_read(&cmd->t_task_cdbs_sent),
35405 ++ atomic_read_unchecked(&cmd->t_task_cdbs_sent),
35406 + atomic_read(&cmd->t_transport_active),
35407 + atomic_read(&cmd->t_transport_stop),
35408 + atomic_read(&cmd->t_transport_sent));
35409 +@@ -291,7 +291,7 @@ static void core_tmr_drain_task_list(
35410 + pr_debug("LUN_RESET: got t_transport_active = 1 for"
35411 + " task: %p, t_fe_count: %d dev: %p\n", task,
35412 + fe_count, dev);
35413 +- atomic_set(&cmd->t_transport_aborted, 1);
35414 ++ atomic_set_unchecked(&cmd->t_transport_aborted, 1);
35415 + spin_unlock_irqrestore(&cmd->t_state_lock, flags);
35416 +
35417 + core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count);
35418 +@@ -299,7 +299,7 @@ static void core_tmr_drain_task_list(
35419 + }
35420 + pr_debug("LUN_RESET: Got t_transport_active = 0 for task: %p,"
35421 + " t_fe_count: %d dev: %p\n", task, fe_count, dev);
35422 +- atomic_set(&cmd->t_transport_aborted, 1);
35423 ++ atomic_set_unchecked(&cmd->t_transport_aborted, 1);
35424 + spin_unlock_irqrestore(&cmd->t_state_lock, flags);
35425 +
35426 + core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count);
35427 +diff -urNp linux-3.1.1/drivers/target/target_core_transport.c linux-3.1.1/drivers/target/target_core_transport.c
35428 +--- linux-3.1.1/drivers/target/target_core_transport.c 2011-11-11 15:19:27.000000000 -0500
35429 ++++ linux-3.1.1/drivers/target/target_core_transport.c 2011-11-16 18:39:08.000000000 -0500
35430 +@@ -1445,7 +1445,7 @@ struct se_device *transport_add_device_t
35431 +
35432 + dev->queue_depth = dev_limits->queue_depth;
35433 + atomic_set(&dev->depth_left, dev->queue_depth);
35434 +- atomic_set(&dev->dev_ordered_id, 0);
35435 ++ atomic_set_unchecked(&dev->dev_ordered_id, 0);
35436 +
35437 + se_dev_set_default_attribs(dev, dev_limits);
35438 +
35439 +@@ -1633,7 +1633,7 @@ static int transport_check_alloc_task_at
35440 + * Used to determine when ORDERED commands should go from
35441 + * Dormant to Active status.
35442 + */
35443 +- cmd->se_ordered_id = atomic_inc_return(&cmd->se_dev->dev_ordered_id);
35444 ++ cmd->se_ordered_id = atomic_inc_return_unchecked(&cmd->se_dev->dev_ordered_id);
35445 + smp_mb__after_atomic_inc();
35446 + pr_debug("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n",
35447 + cmd->se_ordered_id, cmd->sam_task_attr,
35448 +@@ -1960,7 +1960,7 @@ static void transport_generic_request_fa
35449 + " t_transport_active: %d t_transport_stop: %d"
35450 + " t_transport_sent: %d\n", cmd->t_task_list_num,
35451 + atomic_read(&cmd->t_task_cdbs_left),
35452 +- atomic_read(&cmd->t_task_cdbs_sent),
35453 ++ atomic_read_unchecked(&cmd->t_task_cdbs_sent),
35454 + atomic_read(&cmd->t_task_cdbs_ex_left),
35455 + atomic_read(&cmd->t_transport_active),
35456 + atomic_read(&cmd->t_transport_stop),
35457 +@@ -2460,9 +2460,9 @@ check_depth:
35458 + spin_lock_irqsave(&cmd->t_state_lock, flags);
35459 + atomic_set(&task->task_active, 1);
35460 + atomic_set(&task->task_sent, 1);
35461 +- atomic_inc(&cmd->t_task_cdbs_sent);
35462 ++ atomic_inc_unchecked(&cmd->t_task_cdbs_sent);
35463 +
35464 +- if (atomic_read(&cmd->t_task_cdbs_sent) ==
35465 ++ if (atomic_read_unchecked(&cmd->t_task_cdbs_sent) ==
35466 + cmd->t_task_list_num)
35467 + atomic_set(&cmd->transport_sent, 1);
35468 +
35469 +@@ -4665,7 +4665,7 @@ static void transport_generic_wait_for_t
35470 + atomic_set(&cmd->transport_lun_stop, 0);
35471 + }
35472 + if (!atomic_read(&cmd->t_transport_active) ||
35473 +- atomic_read(&cmd->t_transport_aborted))
35474 ++ atomic_read_unchecked(&cmd->t_transport_aborted))
35475 + goto remove;
35476 +
35477 + atomic_set(&cmd->t_transport_stop, 1);
35478 +@@ -4900,7 +4900,7 @@ int transport_check_aborted_status(struc
35479 + {
35480 + int ret = 0;
35481 +
35482 +- if (atomic_read(&cmd->t_transport_aborted) != 0) {
35483 ++ if (atomic_read_unchecked(&cmd->t_transport_aborted) != 0) {
35484 + if (!send_status ||
35485 + (cmd->se_cmd_flags & SCF_SENT_DELAYED_TAS))
35486 + return 1;
35487 +@@ -4937,7 +4937,7 @@ void transport_send_task_abort(struct se
35488 + */
35489 + if (cmd->data_direction == DMA_TO_DEVICE) {
35490 + if (cmd->se_tfo->write_pending_status(cmd) != 0) {
35491 +- atomic_inc(&cmd->t_transport_aborted);
35492 ++ atomic_inc_unchecked(&cmd->t_transport_aborted);
35493 + smp_mb__after_atomic_inc();
35494 + cmd->scsi_status = SAM_STAT_TASK_ABORTED;
35495 + transport_new_cmd_failure(cmd);
35496 +@@ -5051,7 +5051,7 @@ static void transport_processing_shutdow
35497 + cmd->se_tfo->get_task_tag(cmd),
35498 + cmd->t_task_list_num,
35499 + atomic_read(&cmd->t_task_cdbs_left),
35500 +- atomic_read(&cmd->t_task_cdbs_sent),
35501 ++ atomic_read_unchecked(&cmd->t_task_cdbs_sent),
35502 + atomic_read(&cmd->t_transport_active),
35503 + atomic_read(&cmd->t_transport_stop),
35504 + atomic_read(&cmd->t_transport_sent));
35505 +diff -urNp linux-3.1.1/drivers/telephony/ixj.c linux-3.1.1/drivers/telephony/ixj.c
35506 +--- linux-3.1.1/drivers/telephony/ixj.c 2011-11-11 15:19:27.000000000 -0500
35507 ++++ linux-3.1.1/drivers/telephony/ixj.c 2011-11-16 18:40:29.000000000 -0500
35508 +@@ -4976,6 +4976,8 @@ static int ixj_daa_cid_read(IXJ *j)
35509 + bool mContinue;
35510 + char *pIn, *pOut;
35511 +
35512 ++ pax_track_stack();
35513 ++
35514 + if (!SCI_Prepare(j))
35515 + return 0;
35516 +
35517 +diff -urNp linux-3.1.1/drivers/tty/hvc/hvcs.c linux-3.1.1/drivers/tty/hvc/hvcs.c
35518 +--- linux-3.1.1/drivers/tty/hvc/hvcs.c 2011-11-11 15:19:27.000000000 -0500
35519 ++++ linux-3.1.1/drivers/tty/hvc/hvcs.c 2011-11-16 18:39:08.000000000 -0500
35520 +@@ -83,6 +83,7 @@
35521 + #include <asm/hvcserver.h>
35522 + #include <asm/uaccess.h>
35523 + #include <asm/vio.h>
35524 ++#include <asm/local.h>
35525 +
35526 + /*
35527 + * 1.3.0 -> 1.3.1 In hvcs_open memset(..,0x00,..) instead of memset(..,0x3F,00).
35528 +@@ -270,7 +271,7 @@ struct hvcs_struct {
35529 + unsigned int index;
35530 +
35531 + struct tty_struct *tty;
35532 +- int open_count;
35533 ++ local_t open_count;
35534 +
35535 + /*
35536 + * Used to tell the driver kernel_thread what operations need to take
35537 +@@ -422,7 +423,7 @@ static ssize_t hvcs_vterm_state_store(st
35538 +
35539 + spin_lock_irqsave(&hvcsd->lock, flags);
35540 +
35541 +- if (hvcsd->open_count > 0) {
35542 ++ if (local_read(&hvcsd->open_count) > 0) {
35543 + spin_unlock_irqrestore(&hvcsd->lock, flags);
35544 + printk(KERN_INFO "HVCS: vterm state unchanged. "
35545 + "The hvcs device node is still in use.\n");
35546 +@@ -1145,7 +1146,7 @@ static int hvcs_open(struct tty_struct *
35547 + if ((retval = hvcs_partner_connect(hvcsd)))
35548 + goto error_release;
35549 +
35550 +- hvcsd->open_count = 1;
35551 ++ local_set(&hvcsd->open_count, 1);
35552 + hvcsd->tty = tty;
35553 + tty->driver_data = hvcsd;
35554 +
35555 +@@ -1179,7 +1180,7 @@ fast_open:
35556 +
35557 + spin_lock_irqsave(&hvcsd->lock, flags);
35558 + kref_get(&hvcsd->kref);
35559 +- hvcsd->open_count++;
35560 ++ local_inc(&hvcsd->open_count);
35561 + hvcsd->todo_mask |= HVCS_SCHED_READ;
35562 + spin_unlock_irqrestore(&hvcsd->lock, flags);
35563 +
35564 +@@ -1223,7 +1224,7 @@ static void hvcs_close(struct tty_struct
35565 + hvcsd = tty->driver_data;
35566 +
35567 + spin_lock_irqsave(&hvcsd->lock, flags);
35568 +- if (--hvcsd->open_count == 0) {
35569 ++ if (local_dec_and_test(&hvcsd->open_count)) {
35570 +
35571 + vio_disable_interrupts(hvcsd->vdev);
35572 +
35573 +@@ -1249,10 +1250,10 @@ static void hvcs_close(struct tty_struct
35574 + free_irq(irq, hvcsd);
35575 + kref_put(&hvcsd->kref, destroy_hvcs_struct);
35576 + return;
35577 +- } else if (hvcsd->open_count < 0) {
35578 ++ } else if (local_read(&hvcsd->open_count) < 0) {
35579 + printk(KERN_ERR "HVCS: vty-server@%X open_count: %d"
35580 + " is missmanaged.\n",
35581 +- hvcsd->vdev->unit_address, hvcsd->open_count);
35582 ++ hvcsd->vdev->unit_address, local_read(&hvcsd->open_count));
35583 + }
35584 +
35585 + spin_unlock_irqrestore(&hvcsd->lock, flags);
35586 +@@ -1268,7 +1269,7 @@ static void hvcs_hangup(struct tty_struc
35587 +
35588 + spin_lock_irqsave(&hvcsd->lock, flags);
35589 + /* Preserve this so that we know how many kref refs to put */
35590 +- temp_open_count = hvcsd->open_count;
35591 ++ temp_open_count = local_read(&hvcsd->open_count);
35592 +
35593 + /*
35594 + * Don't kref put inside the spinlock because the destruction
35595 +@@ -1283,7 +1284,7 @@ static void hvcs_hangup(struct tty_struc
35596 + hvcsd->tty->driver_data = NULL;
35597 + hvcsd->tty = NULL;
35598 +
35599 +- hvcsd->open_count = 0;
35600 ++ local_set(&hvcsd->open_count, 0);
35601 +
35602 + /* This will drop any buffered data on the floor which is OK in a hangup
35603 + * scenario. */
35604 +@@ -1354,7 +1355,7 @@ static int hvcs_write(struct tty_struct
35605 + * the middle of a write operation? This is a crummy place to do this
35606 + * but we want to keep it all in the spinlock.
35607 + */
35608 +- if (hvcsd->open_count <= 0) {
35609 ++ if (local_read(&hvcsd->open_count) <= 0) {
35610 + spin_unlock_irqrestore(&hvcsd->lock, flags);
35611 + return -ENODEV;
35612 + }
35613 +@@ -1428,7 +1429,7 @@ static int hvcs_write_room(struct tty_st
35614 + {
35615 + struct hvcs_struct *hvcsd = tty->driver_data;
35616 +
35617 +- if (!hvcsd || hvcsd->open_count <= 0)
35618 ++ if (!hvcsd || local_read(&hvcsd->open_count) <= 0)
35619 + return 0;
35620 +
35621 + return HVCS_BUFF_LEN - hvcsd->chars_in_buffer;
35622 +diff -urNp linux-3.1.1/drivers/tty/ipwireless/tty.c linux-3.1.1/drivers/tty/ipwireless/tty.c
35623 +--- linux-3.1.1/drivers/tty/ipwireless/tty.c 2011-11-11 15:19:27.000000000 -0500
35624 ++++ linux-3.1.1/drivers/tty/ipwireless/tty.c 2011-11-16 18:39:08.000000000 -0500
35625 +@@ -29,6 +29,7 @@
35626 + #include <linux/tty_driver.h>
35627 + #include <linux/tty_flip.h>
35628 + #include <linux/uaccess.h>
35629 ++#include <asm/local.h>
35630 +
35631 + #include "tty.h"
35632 + #include "network.h"
35633 +@@ -51,7 +52,7 @@ struct ipw_tty {
35634 + int tty_type;
35635 + struct ipw_network *network;
35636 + struct tty_struct *linux_tty;
35637 +- int open_count;
35638 ++ local_t open_count;
35639 + unsigned int control_lines;
35640 + struct mutex ipw_tty_mutex;
35641 + int tx_bytes_queued;
35642 +@@ -127,10 +128,10 @@ static int ipw_open(struct tty_struct *l
35643 + mutex_unlock(&tty->ipw_tty_mutex);
35644 + return -ENODEV;
35645 + }
35646 +- if (tty->open_count == 0)
35647 ++ if (local_read(&tty->open_count) == 0)
35648 + tty->tx_bytes_queued = 0;
35649 +
35650 +- tty->open_count++;
35651 ++ local_inc(&tty->open_count);
35652 +
35653 + tty->linux_tty = linux_tty;
35654 + linux_tty->driver_data = tty;
35655 +@@ -146,9 +147,7 @@ static int ipw_open(struct tty_struct *l
35656 +
35657 + static void do_ipw_close(struct ipw_tty *tty)
35658 + {
35659 +- tty->open_count--;
35660 +-
35661 +- if (tty->open_count == 0) {
35662 ++ if (local_dec_return(&tty->open_count) == 0) {
35663 + struct tty_struct *linux_tty = tty->linux_tty;
35664 +
35665 + if (linux_tty != NULL) {
35666 +@@ -169,7 +168,7 @@ static void ipw_hangup(struct tty_struct
35667 + return;
35668 +
35669 + mutex_lock(&tty->ipw_tty_mutex);
35670 +- if (tty->open_count == 0) {
35671 ++ if (local_read(&tty->open_count) == 0) {
35672 + mutex_unlock(&tty->ipw_tty_mutex);
35673 + return;
35674 + }
35675 +@@ -198,7 +197,7 @@ void ipwireless_tty_received(struct ipw_
35676 + return;
35677 + }
35678 +
35679 +- if (!tty->open_count) {
35680 ++ if (!local_read(&tty->open_count)) {
35681 + mutex_unlock(&tty->ipw_tty_mutex);
35682 + return;
35683 + }
35684 +@@ -240,7 +239,7 @@ static int ipw_write(struct tty_struct *
35685 + return -ENODEV;
35686 +
35687 + mutex_lock(&tty->ipw_tty_mutex);
35688 +- if (!tty->open_count) {
35689 ++ if (!local_read(&tty->open_count)) {
35690 + mutex_unlock(&tty->ipw_tty_mutex);
35691 + return -EINVAL;
35692 + }
35693 +@@ -280,7 +279,7 @@ static int ipw_write_room(struct tty_str
35694 + if (!tty)
35695 + return -ENODEV;
35696 +
35697 +- if (!tty->open_count)
35698 ++ if (!local_read(&tty->open_count))
35699 + return -EINVAL;
35700 +
35701 + room = IPWIRELESS_TX_QUEUE_SIZE - tty->tx_bytes_queued;
35702 +@@ -322,7 +321,7 @@ static int ipw_chars_in_buffer(struct tt
35703 + if (!tty)
35704 + return 0;
35705 +
35706 +- if (!tty->open_count)
35707 ++ if (!local_read(&tty->open_count))
35708 + return 0;
35709 +
35710 + return tty->tx_bytes_queued;
35711 +@@ -403,7 +402,7 @@ static int ipw_tiocmget(struct tty_struc
35712 + if (!tty)
35713 + return -ENODEV;
35714 +
35715 +- if (!tty->open_count)
35716 ++ if (!local_read(&tty->open_count))
35717 + return -EINVAL;
35718 +
35719 + return get_control_lines(tty);
35720 +@@ -419,7 +418,7 @@ ipw_tiocmset(struct tty_struct *linux_tt
35721 + if (!tty)
35722 + return -ENODEV;
35723 +
35724 +- if (!tty->open_count)
35725 ++ if (!local_read(&tty->open_count))
35726 + return -EINVAL;
35727 +
35728 + return set_control_lines(tty, set, clear);
35729 +@@ -433,7 +432,7 @@ static int ipw_ioctl(struct tty_struct *
35730 + if (!tty)
35731 + return -ENODEV;
35732 +
35733 +- if (!tty->open_count)
35734 ++ if (!local_read(&tty->open_count))
35735 + return -EINVAL;
35736 +
35737 + /* FIXME: Exactly how is the tty object locked here .. */
35738 +@@ -582,7 +581,7 @@ void ipwireless_tty_free(struct ipw_tty
35739 + against a parallel ioctl etc */
35740 + mutex_lock(&ttyj->ipw_tty_mutex);
35741 + }
35742 +- while (ttyj->open_count)
35743 ++ while (local_read(&ttyj->open_count))
35744 + do_ipw_close(ttyj);
35745 + ipwireless_disassociate_network_ttys(network,
35746 + ttyj->channel_idx);
35747 +diff -urNp linux-3.1.1/drivers/tty/n_gsm.c linux-3.1.1/drivers/tty/n_gsm.c
35748 +--- linux-3.1.1/drivers/tty/n_gsm.c 2011-11-11 15:19:27.000000000 -0500
35749 ++++ linux-3.1.1/drivers/tty/n_gsm.c 2011-11-16 18:39:08.000000000 -0500
35750 +@@ -1625,7 +1625,7 @@ static struct gsm_dlci *gsm_dlci_alloc(s
35751 + kref_init(&dlci->ref);
35752 + mutex_init(&dlci->mutex);
35753 + dlci->fifo = &dlci->_fifo;
35754 +- if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL) < 0) {
35755 ++ if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL)) {
35756 + kfree(dlci);
35757 + return NULL;
35758 + }
35759 +diff -urNp linux-3.1.1/drivers/tty/n_tty.c linux-3.1.1/drivers/tty/n_tty.c
35760 +--- linux-3.1.1/drivers/tty/n_tty.c 2011-11-11 15:19:27.000000000 -0500
35761 ++++ linux-3.1.1/drivers/tty/n_tty.c 2011-11-16 18:39:08.000000000 -0500
35762 +@@ -2123,6 +2123,7 @@ void n_tty_inherit_ops(struct tty_ldisc_
35763 + {
35764 + *ops = tty_ldisc_N_TTY;
35765 + ops->owner = NULL;
35766 +- ops->refcount = ops->flags = 0;
35767 ++ atomic_set(&ops->refcount, 0);
35768 ++ ops->flags = 0;
35769 + }
35770 + EXPORT_SYMBOL_GPL(n_tty_inherit_ops);
35771 +diff -urNp linux-3.1.1/drivers/tty/pty.c linux-3.1.1/drivers/tty/pty.c
35772 +--- linux-3.1.1/drivers/tty/pty.c 2011-11-11 15:19:27.000000000 -0500
35773 ++++ linux-3.1.1/drivers/tty/pty.c 2011-11-16 18:39:08.000000000 -0500
35774 +@@ -773,8 +773,10 @@ static void __init unix98_pty_init(void)
35775 + register_sysctl_table(pty_root_table);
35776 +
35777 + /* Now create the /dev/ptmx special device */
35778 ++ pax_open_kernel();
35779 + tty_default_fops(&ptmx_fops);
35780 +- ptmx_fops.open = ptmx_open;
35781 ++ *(void **)&ptmx_fops.open = ptmx_open;
35782 ++ pax_close_kernel();
35783 +
35784 + cdev_init(&ptmx_cdev, &ptmx_fops);
35785 + if (cdev_add(&ptmx_cdev, MKDEV(TTYAUX_MAJOR, 2), 1) ||
35786 +diff -urNp linux-3.1.1/drivers/tty/rocket.c linux-3.1.1/drivers/tty/rocket.c
35787 +--- linux-3.1.1/drivers/tty/rocket.c 2011-11-11 15:19:27.000000000 -0500
35788 ++++ linux-3.1.1/drivers/tty/rocket.c 2011-11-16 18:40:29.000000000 -0500
35789 +@@ -1277,6 +1277,8 @@ static int get_ports(struct r_port *info
35790 + struct rocket_ports tmp;
35791 + int board;
35792 +
35793 ++ pax_track_stack();
35794 ++
35795 + if (!retports)
35796 + return -EFAULT;
35797 + memset(&tmp, 0, sizeof (tmp));
35798 +diff -urNp linux-3.1.1/drivers/tty/serial/kgdboc.c linux-3.1.1/drivers/tty/serial/kgdboc.c
35799 +--- linux-3.1.1/drivers/tty/serial/kgdboc.c 2011-11-11 15:19:27.000000000 -0500
35800 ++++ linux-3.1.1/drivers/tty/serial/kgdboc.c 2011-11-16 18:39:08.000000000 -0500
35801 +@@ -23,8 +23,9 @@
35802 + #define MAX_CONFIG_LEN 40
35803 +
35804 + static struct kgdb_io kgdboc_io_ops;
35805 ++static struct kgdb_io kgdboc_io_ops_console;
35806 +
35807 +-/* -1 = init not run yet, 0 = unconfigured, 1 = configured. */
35808 ++/* -1 = init not run yet, 0 = unconfigured, 1/2 = configured. */
35809 + static int configured = -1;
35810 +
35811 + static char config[MAX_CONFIG_LEN];
35812 +@@ -147,6 +148,8 @@ static void cleanup_kgdboc(void)
35813 + kgdboc_unregister_kbd();
35814 + if (configured == 1)
35815 + kgdb_unregister_io_module(&kgdboc_io_ops);
35816 ++ else if (configured == 2)
35817 ++ kgdb_unregister_io_module(&kgdboc_io_ops_console);
35818 + }
35819 +
35820 + static int configure_kgdboc(void)
35821 +@@ -156,13 +159,13 @@ static int configure_kgdboc(void)
35822 + int err;
35823 + char *cptr = config;
35824 + struct console *cons;
35825 ++ int is_console = 0;
35826 +
35827 + err = kgdboc_option_setup(config);
35828 + if (err || !strlen(config) || isspace(config[0]))
35829 + goto noconfig;
35830 +
35831 + err = -ENODEV;
35832 +- kgdboc_io_ops.is_console = 0;
35833 + kgdb_tty_driver = NULL;
35834 +
35835 + kgdboc_use_kms = 0;
35836 +@@ -183,7 +186,7 @@ static int configure_kgdboc(void)
35837 + int idx;
35838 + if (cons->device && cons->device(cons, &idx) == p &&
35839 + idx == tty_line) {
35840 +- kgdboc_io_ops.is_console = 1;
35841 ++ is_console = 1;
35842 + break;
35843 + }
35844 + cons = cons->next;
35845 +@@ -193,12 +196,16 @@ static int configure_kgdboc(void)
35846 + kgdb_tty_line = tty_line;
35847 +
35848 + do_register:
35849 +- err = kgdb_register_io_module(&kgdboc_io_ops);
35850 ++ if (is_console) {
35851 ++ err = kgdb_register_io_module(&kgdboc_io_ops_console);
35852 ++ configured = 2;
35853 ++ } else {
35854 ++ err = kgdb_register_io_module(&kgdboc_io_ops);
35855 ++ configured = 1;
35856 ++ }
35857 + if (err)
35858 + goto noconfig;
35859 +
35860 +- configured = 1;
35861 +-
35862 + return 0;
35863 +
35864 + noconfig:
35865 +@@ -212,7 +219,7 @@ noconfig:
35866 + static int __init init_kgdboc(void)
35867 + {
35868 + /* Already configured? */
35869 +- if (configured == 1)
35870 ++ if (configured >= 1)
35871 + return 0;
35872 +
35873 + return configure_kgdboc();
35874 +@@ -261,7 +268,7 @@ static int param_set_kgdboc_var(const ch
35875 + if (config[len - 1] == '\n')
35876 + config[len - 1] = '\0';
35877 +
35878 +- if (configured == 1)
35879 ++ if (configured >= 1)
35880 + cleanup_kgdboc();
35881 +
35882 + /* Go and configure with the new params. */
35883 +@@ -301,6 +308,15 @@ static struct kgdb_io kgdboc_io_ops = {
35884 + .post_exception = kgdboc_post_exp_handler,
35885 + };
35886 +
35887 ++static struct kgdb_io kgdboc_io_ops_console = {
35888 ++ .name = "kgdboc",
35889 ++ .read_char = kgdboc_get_char,
35890 ++ .write_char = kgdboc_put_char,
35891 ++ .pre_exception = kgdboc_pre_exp_handler,
35892 ++ .post_exception = kgdboc_post_exp_handler,
35893 ++ .is_console = 1
35894 ++};
35895 ++
35896 + #ifdef CONFIG_KGDB_SERIAL_CONSOLE
35897 + /* This is only available if kgdboc is a built in for early debugging */
35898 + static int __init kgdboc_early_init(char *opt)
35899 +diff -urNp linux-3.1.1/drivers/tty/serial/mfd.c linux-3.1.1/drivers/tty/serial/mfd.c
35900 +--- linux-3.1.1/drivers/tty/serial/mfd.c 2011-11-11 15:19:27.000000000 -0500
35901 ++++ linux-3.1.1/drivers/tty/serial/mfd.c 2011-11-16 18:39:08.000000000 -0500
35902 +@@ -1423,7 +1423,7 @@ static void serial_hsu_remove(struct pci
35903 + }
35904 +
35905 + /* First 3 are UART ports, and the 4th is the DMA */
35906 +-static const struct pci_device_id pci_ids[] __devinitdata = {
35907 ++static const struct pci_device_id pci_ids[] __devinitconst = {
35908 + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x081B) },
35909 + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x081C) },
35910 + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x081D) },
35911 +diff -urNp linux-3.1.1/drivers/tty/serial/mrst_max3110.c linux-3.1.1/drivers/tty/serial/mrst_max3110.c
35912 +--- linux-3.1.1/drivers/tty/serial/mrst_max3110.c 2011-11-11 15:19:27.000000000 -0500
35913 ++++ linux-3.1.1/drivers/tty/serial/mrst_max3110.c 2011-11-16 18:40:29.000000000 -0500
35914 +@@ -393,6 +393,8 @@ static void max3110_con_receive(struct u
35915 + int loop = 1, num, total = 0;
35916 + u8 recv_buf[512], *pbuf;
35917 +
35918 ++ pax_track_stack();
35919 ++
35920 + pbuf = recv_buf;
35921 + do {
35922 + num = max3110_read_multi(max, pbuf);
35923 +diff -urNp linux-3.1.1/drivers/tty/tty_io.c linux-3.1.1/drivers/tty/tty_io.c
35924 +--- linux-3.1.1/drivers/tty/tty_io.c 2011-11-11 15:19:27.000000000 -0500
35925 ++++ linux-3.1.1/drivers/tty/tty_io.c 2011-11-16 18:39:08.000000000 -0500
35926 +@@ -3238,7 +3238,7 @@ EXPORT_SYMBOL_GPL(get_current_tty);
35927 +
35928 + void tty_default_fops(struct file_operations *fops)
35929 + {
35930 +- *fops = tty_fops;
35931 ++ memcpy((void *)fops, &tty_fops, sizeof(tty_fops));
35932 + }
35933 +
35934 + /*
35935 +diff -urNp linux-3.1.1/drivers/tty/tty_ldisc.c linux-3.1.1/drivers/tty/tty_ldisc.c
35936 +--- linux-3.1.1/drivers/tty/tty_ldisc.c 2011-11-11 15:19:27.000000000 -0500
35937 ++++ linux-3.1.1/drivers/tty/tty_ldisc.c 2011-11-16 18:39:08.000000000 -0500
35938 +@@ -74,7 +74,7 @@ static void put_ldisc(struct tty_ldisc *
35939 + if (atomic_dec_and_lock(&ld->users, &tty_ldisc_lock)) {
35940 + struct tty_ldisc_ops *ldo = ld->ops;
35941 +
35942 +- ldo->refcount--;
35943 ++ atomic_dec(&ldo->refcount);
35944 + module_put(ldo->owner);
35945 + spin_unlock_irqrestore(&tty_ldisc_lock, flags);
35946 +
35947 +@@ -109,7 +109,7 @@ int tty_register_ldisc(int disc, struct
35948 + spin_lock_irqsave(&tty_ldisc_lock, flags);
35949 + tty_ldiscs[disc] = new_ldisc;
35950 + new_ldisc->num = disc;
35951 +- new_ldisc->refcount = 0;
35952 ++ atomic_set(&new_ldisc->refcount, 0);
35953 + spin_unlock_irqrestore(&tty_ldisc_lock, flags);
35954 +
35955 + return ret;
35956 +@@ -137,7 +137,7 @@ int tty_unregister_ldisc(int disc)
35957 + return -EINVAL;
35958 +
35959 + spin_lock_irqsave(&tty_ldisc_lock, flags);
35960 +- if (tty_ldiscs[disc]->refcount)
35961 ++ if (atomic_read(&tty_ldiscs[disc]->refcount))
35962 + ret = -EBUSY;
35963 + else
35964 + tty_ldiscs[disc] = NULL;
35965 +@@ -158,7 +158,7 @@ static struct tty_ldisc_ops *get_ldops(i
35966 + if (ldops) {
35967 + ret = ERR_PTR(-EAGAIN);
35968 + if (try_module_get(ldops->owner)) {
35969 +- ldops->refcount++;
35970 ++ atomic_inc(&ldops->refcount);
35971 + ret = ldops;
35972 + }
35973 + }
35974 +@@ -171,7 +171,7 @@ static void put_ldops(struct tty_ldisc_o
35975 + unsigned long flags;
35976 +
35977 + spin_lock_irqsave(&tty_ldisc_lock, flags);
35978 +- ldops->refcount--;
35979 ++ atomic_dec(&ldops->refcount);
35980 + module_put(ldops->owner);
35981 + spin_unlock_irqrestore(&tty_ldisc_lock, flags);
35982 + }
35983 +diff -urNp linux-3.1.1/drivers/tty/vt/keyboard.c linux-3.1.1/drivers/tty/vt/keyboard.c
35984 +--- linux-3.1.1/drivers/tty/vt/keyboard.c 2011-11-11 15:19:27.000000000 -0500
35985 ++++ linux-3.1.1/drivers/tty/vt/keyboard.c 2011-11-16 18:40:29.000000000 -0500
35986 +@@ -656,6 +656,16 @@ static void k_spec(struct vc_data *vc, u
35987 + kbd->kbdmode == VC_OFF) &&
35988 + value != KVAL(K_SAK))
35989 + return; /* SAK is allowed even in raw mode */
35990 ++
35991 ++#if defined(CONFIG_GRKERNSEC_PROC) || defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
35992 ++ {
35993 ++ void *func = fn_handler[value];
35994 ++ if (func == fn_show_state || func == fn_show_ptregs ||
35995 ++ func == fn_show_mem)
35996 ++ return;
35997 ++ }
35998 ++#endif
35999 ++
36000 + fn_handler[value](vc);
36001 + }
36002 +
36003 +diff -urNp linux-3.1.1/drivers/tty/vt/vt.c linux-3.1.1/drivers/tty/vt/vt.c
36004 +--- linux-3.1.1/drivers/tty/vt/vt.c 2011-11-11 15:19:27.000000000 -0500
36005 ++++ linux-3.1.1/drivers/tty/vt/vt.c 2011-11-16 18:39:08.000000000 -0500
36006 +@@ -259,7 +259,7 @@ EXPORT_SYMBOL_GPL(unregister_vt_notifier
36007 +
36008 + static void notify_write(struct vc_data *vc, unsigned int unicode)
36009 + {
36010 +- struct vt_notifier_param param = { .vc = vc, unicode = unicode };
36011 ++ struct vt_notifier_param param = { .vc = vc, .c = unicode };
36012 + atomic_notifier_call_chain(&vt_notifier_list, VT_WRITE, &param);
36013 + }
36014 +
36015 +diff -urNp linux-3.1.1/drivers/tty/vt/vt_ioctl.c linux-3.1.1/drivers/tty/vt/vt_ioctl.c
36016 +--- linux-3.1.1/drivers/tty/vt/vt_ioctl.c 2011-11-11 15:19:27.000000000 -0500
36017 ++++ linux-3.1.1/drivers/tty/vt/vt_ioctl.c 2011-11-16 18:40:29.000000000 -0500
36018 +@@ -207,9 +207,6 @@ do_kdsk_ioctl(int cmd, struct kbentry __
36019 + if (copy_from_user(&tmp, user_kbe, sizeof(struct kbentry)))
36020 + return -EFAULT;
36021 +
36022 +- if (!capable(CAP_SYS_TTY_CONFIG))
36023 +- perm = 0;
36024 +-
36025 + switch (cmd) {
36026 + case KDGKBENT:
36027 + key_map = key_maps[s];
36028 +@@ -221,6 +218,9 @@ do_kdsk_ioctl(int cmd, struct kbentry __
36029 + val = (i ? K_HOLE : K_NOSUCHMAP);
36030 + return put_user(val, &user_kbe->kb_value);
36031 + case KDSKBENT:
36032 ++ if (!capable(CAP_SYS_TTY_CONFIG))
36033 ++ perm = 0;
36034 ++
36035 + if (!perm)
36036 + return -EPERM;
36037 + if (!i && v == K_NOSUCHMAP) {
36038 +@@ -322,9 +322,6 @@ do_kdgkb_ioctl(int cmd, struct kbsentry
36039 + int i, j, k;
36040 + int ret;
36041 +
36042 +- if (!capable(CAP_SYS_TTY_CONFIG))
36043 +- perm = 0;
36044 +-
36045 + kbs = kmalloc(sizeof(*kbs), GFP_KERNEL);
36046 + if (!kbs) {
36047 + ret = -ENOMEM;
36048 +@@ -358,6 +355,9 @@ do_kdgkb_ioctl(int cmd, struct kbsentry
36049 + kfree(kbs);
36050 + return ((p && *p) ? -EOVERFLOW : 0);
36051 + case KDSKBSENT:
36052 ++ if (!capable(CAP_SYS_TTY_CONFIG))
36053 ++ perm = 0;
36054 ++
36055 + if (!perm) {
36056 + ret = -EPERM;
36057 + goto reterr;
36058 +diff -urNp linux-3.1.1/drivers/uio/uio.c linux-3.1.1/drivers/uio/uio.c
36059 +--- linux-3.1.1/drivers/uio/uio.c 2011-11-11 15:19:27.000000000 -0500
36060 ++++ linux-3.1.1/drivers/uio/uio.c 2011-11-16 18:39:08.000000000 -0500
36061 +@@ -25,6 +25,7 @@
36062 + #include <linux/kobject.h>
36063 + #include <linux/cdev.h>
36064 + #include <linux/uio_driver.h>
36065 ++#include <asm/local.h>
36066 +
36067 + #define UIO_MAX_DEVICES (1U << MINORBITS)
36068 +
36069 +@@ -32,10 +33,10 @@ struct uio_device {
36070 + struct module *owner;
36071 + struct device *dev;
36072 + int minor;
36073 +- atomic_t event;
36074 ++ atomic_unchecked_t event;
36075 + struct fasync_struct *async_queue;
36076 + wait_queue_head_t wait;
36077 +- int vma_count;
36078 ++ local_t vma_count;
36079 + struct uio_info *info;
36080 + struct kobject *map_dir;
36081 + struct kobject *portio_dir;
36082 +@@ -242,7 +243,7 @@ static ssize_t show_event(struct device
36083 + struct device_attribute *attr, char *buf)
36084 + {
36085 + struct uio_device *idev = dev_get_drvdata(dev);
36086 +- return sprintf(buf, "%u\n", (unsigned int)atomic_read(&idev->event));
36087 ++ return sprintf(buf, "%u\n", (unsigned int)atomic_read_unchecked(&idev->event));
36088 + }
36089 +
36090 + static struct device_attribute uio_class_attributes[] = {
36091 +@@ -408,7 +409,7 @@ void uio_event_notify(struct uio_info *i
36092 + {
36093 + struct uio_device *idev = info->uio_dev;
36094 +
36095 +- atomic_inc(&idev->event);
36096 ++ atomic_inc_unchecked(&idev->event);
36097 + wake_up_interruptible(&idev->wait);
36098 + kill_fasync(&idev->async_queue, SIGIO, POLL_IN);
36099 + }
36100 +@@ -461,7 +462,7 @@ static int uio_open(struct inode *inode,
36101 + }
36102 +
36103 + listener->dev = idev;
36104 +- listener->event_count = atomic_read(&idev->event);
36105 ++ listener->event_count = atomic_read_unchecked(&idev->event);
36106 + filep->private_data = listener;
36107 +
36108 + if (idev->info->open) {
36109 +@@ -512,7 +513,7 @@ static unsigned int uio_poll(struct file
36110 + return -EIO;
36111 +
36112 + poll_wait(filep, &idev->wait, wait);
36113 +- if (listener->event_count != atomic_read(&idev->event))
36114 ++ if (listener->event_count != atomic_read_unchecked(&idev->event))
36115 + return POLLIN | POLLRDNORM;
36116 + return 0;
36117 + }
36118 +@@ -537,7 +538,7 @@ static ssize_t uio_read(struct file *fil
36119 + do {
36120 + set_current_state(TASK_INTERRUPTIBLE);
36121 +
36122 +- event_count = atomic_read(&idev->event);
36123 ++ event_count = atomic_read_unchecked(&idev->event);
36124 + if (event_count != listener->event_count) {
36125 + if (copy_to_user(buf, &event_count, count))
36126 + retval = -EFAULT;
36127 +@@ -606,13 +607,13 @@ static int uio_find_mem_index(struct vm_
36128 + static void uio_vma_open(struct vm_area_struct *vma)
36129 + {
36130 + struct uio_device *idev = vma->vm_private_data;
36131 +- idev->vma_count++;
36132 ++ local_inc(&idev->vma_count);
36133 + }
36134 +
36135 + static void uio_vma_close(struct vm_area_struct *vma)
36136 + {
36137 + struct uio_device *idev = vma->vm_private_data;
36138 +- idev->vma_count--;
36139 ++ local_dec(&idev->vma_count);
36140 + }
36141 +
36142 + static int uio_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
36143 +@@ -823,7 +824,7 @@ int __uio_register_device(struct module
36144 + idev->owner = owner;
36145 + idev->info = info;
36146 + init_waitqueue_head(&idev->wait);
36147 +- atomic_set(&idev->event, 0);
36148 ++ atomic_set_unchecked(&idev->event, 0);
36149 +
36150 + ret = uio_get_minor(idev);
36151 + if (ret)
36152 +diff -urNp linux-3.1.1/drivers/usb/atm/cxacru.c linux-3.1.1/drivers/usb/atm/cxacru.c
36153 +--- linux-3.1.1/drivers/usb/atm/cxacru.c 2011-11-11 15:19:27.000000000 -0500
36154 ++++ linux-3.1.1/drivers/usb/atm/cxacru.c 2011-11-16 18:39:08.000000000 -0500
36155 +@@ -473,7 +473,7 @@ static ssize_t cxacru_sysfs_store_adsl_c
36156 + ret = sscanf(buf + pos, "%x=%x%n", &index, &value, &tmp);
36157 + if (ret < 2)
36158 + return -EINVAL;
36159 +- if (index < 0 || index > 0x7f)
36160 ++ if (index > 0x7f)
36161 + return -EINVAL;
36162 + pos += tmp;
36163 +
36164 +diff -urNp linux-3.1.1/drivers/usb/atm/usbatm.c linux-3.1.1/drivers/usb/atm/usbatm.c
36165 +--- linux-3.1.1/drivers/usb/atm/usbatm.c 2011-11-11 15:19:27.000000000 -0500
36166 ++++ linux-3.1.1/drivers/usb/atm/usbatm.c 2011-11-16 18:39:08.000000000 -0500
36167 +@@ -333,7 +333,7 @@ static void usbatm_extract_one_cell(stru
36168 + if (printk_ratelimit())
36169 + atm_warn(instance, "%s: OAM not supported (vpi %d, vci %d)!\n",
36170 + __func__, vpi, vci);
36171 +- atomic_inc(&vcc->stats->rx_err);
36172 ++ atomic_inc_unchecked(&vcc->stats->rx_err);
36173 + return;
36174 + }
36175 +
36176 +@@ -361,7 +361,7 @@ static void usbatm_extract_one_cell(stru
36177 + if (length > ATM_MAX_AAL5_PDU) {
36178 + atm_rldbg(instance, "%s: bogus length %u (vcc: 0x%p)!\n",
36179 + __func__, length, vcc);
36180 +- atomic_inc(&vcc->stats->rx_err);
36181 ++ atomic_inc_unchecked(&vcc->stats->rx_err);
36182 + goto out;
36183 + }
36184 +
36185 +@@ -370,14 +370,14 @@ static void usbatm_extract_one_cell(stru
36186 + if (sarb->len < pdu_length) {
36187 + atm_rldbg(instance, "%s: bogus pdu_length %u (sarb->len: %u, vcc: 0x%p)!\n",
36188 + __func__, pdu_length, sarb->len, vcc);
36189 +- atomic_inc(&vcc->stats->rx_err);
36190 ++ atomic_inc_unchecked(&vcc->stats->rx_err);
36191 + goto out;
36192 + }
36193 +
36194 + if (crc32_be(~0, skb_tail_pointer(sarb) - pdu_length, pdu_length) != 0xc704dd7b) {
36195 + atm_rldbg(instance, "%s: packet failed crc check (vcc: 0x%p)!\n",
36196 + __func__, vcc);
36197 +- atomic_inc(&vcc->stats->rx_err);
36198 ++ atomic_inc_unchecked(&vcc->stats->rx_err);
36199 + goto out;
36200 + }
36201 +
36202 +@@ -387,7 +387,7 @@ static void usbatm_extract_one_cell(stru
36203 + if (printk_ratelimit())
36204 + atm_err(instance, "%s: no memory for skb (length: %u)!\n",
36205 + __func__, length);
36206 +- atomic_inc(&vcc->stats->rx_drop);
36207 ++ atomic_inc_unchecked(&vcc->stats->rx_drop);
36208 + goto out;
36209 + }
36210 +
36211 +@@ -412,7 +412,7 @@ static void usbatm_extract_one_cell(stru
36212 +
36213 + vcc->push(vcc, skb);
36214 +
36215 +- atomic_inc(&vcc->stats->rx);
36216 ++ atomic_inc_unchecked(&vcc->stats->rx);
36217 + out:
36218 + skb_trim(sarb, 0);
36219 + }
36220 +@@ -615,7 +615,7 @@ static void usbatm_tx_process(unsigned l
36221 + struct atm_vcc *vcc = UDSL_SKB(skb)->atm.vcc;
36222 +
36223 + usbatm_pop(vcc, skb);
36224 +- atomic_inc(&vcc->stats->tx);
36225 ++ atomic_inc_unchecked(&vcc->stats->tx);
36226 +
36227 + skb = skb_dequeue(&instance->sndqueue);
36228 + }
36229 +@@ -773,11 +773,11 @@ static int usbatm_atm_proc_read(struct a
36230 + if (!left--)
36231 + return sprintf(page,
36232 + "AAL5: tx %d ( %d err ), rx %d ( %d err, %d drop )\n",
36233 +- atomic_read(&atm_dev->stats.aal5.tx),
36234 +- atomic_read(&atm_dev->stats.aal5.tx_err),
36235 +- atomic_read(&atm_dev->stats.aal5.rx),
36236 +- atomic_read(&atm_dev->stats.aal5.rx_err),
36237 +- atomic_read(&atm_dev->stats.aal5.rx_drop));
36238 ++ atomic_read_unchecked(&atm_dev->stats.aal5.tx),
36239 ++ atomic_read_unchecked(&atm_dev->stats.aal5.tx_err),
36240 ++ atomic_read_unchecked(&atm_dev->stats.aal5.rx),
36241 ++ atomic_read_unchecked(&atm_dev->stats.aal5.rx_err),
36242 ++ atomic_read_unchecked(&atm_dev->stats.aal5.rx_drop));
36243 +
36244 + if (!left--) {
36245 + if (instance->disconnected)
36246 +diff -urNp linux-3.1.1/drivers/usb/core/devices.c linux-3.1.1/drivers/usb/core/devices.c
36247 +--- linux-3.1.1/drivers/usb/core/devices.c 2011-11-11 15:19:27.000000000 -0500
36248 ++++ linux-3.1.1/drivers/usb/core/devices.c 2011-11-16 18:39:08.000000000 -0500
36249 +@@ -126,7 +126,7 @@ static const char format_endpt[] =
36250 + * time it gets called.
36251 + */
36252 + static struct device_connect_event {
36253 +- atomic_t count;
36254 ++ atomic_unchecked_t count;
36255 + wait_queue_head_t wait;
36256 + } device_event = {
36257 + .count = ATOMIC_INIT(1),
36258 +@@ -164,7 +164,7 @@ static const struct class_info clas_info
36259 +
36260 + void usbfs_conn_disc_event(void)
36261 + {
36262 +- atomic_add(2, &device_event.count);
36263 ++ atomic_add_unchecked(2, &device_event.count);
36264 + wake_up(&device_event.wait);
36265 + }
36266 +
36267 +@@ -648,7 +648,7 @@ static unsigned int usb_device_poll(stru
36268 +
36269 + poll_wait(file, &device_event.wait, wait);
36270 +
36271 +- event_count = atomic_read(&device_event.count);
36272 ++ event_count = atomic_read_unchecked(&device_event.count);
36273 + if (file->f_version != event_count) {
36274 + file->f_version = event_count;
36275 + return POLLIN | POLLRDNORM;
36276 +diff -urNp linux-3.1.1/drivers/usb/core/message.c linux-3.1.1/drivers/usb/core/message.c
36277 +--- linux-3.1.1/drivers/usb/core/message.c 2011-11-11 15:19:27.000000000 -0500
36278 ++++ linux-3.1.1/drivers/usb/core/message.c 2011-11-16 18:39:08.000000000 -0500
36279 +@@ -869,8 +869,8 @@ char *usb_cache_string(struct usb_device
36280 + buf = kmalloc(MAX_USB_STRING_SIZE, GFP_NOIO);
36281 + if (buf) {
36282 + len = usb_string(udev, index, buf, MAX_USB_STRING_SIZE);
36283 +- if (len > 0) {
36284 +- smallbuf = kmalloc(++len, GFP_NOIO);
36285 ++ if (len++ > 0) {
36286 ++ smallbuf = kmalloc(len, GFP_NOIO);
36287 + if (!smallbuf)
36288 + return buf;
36289 + memcpy(smallbuf, buf, len);
36290 +diff -urNp linux-3.1.1/drivers/usb/early/ehci-dbgp.c linux-3.1.1/drivers/usb/early/ehci-dbgp.c
36291 +--- linux-3.1.1/drivers/usb/early/ehci-dbgp.c 2011-11-11 15:19:27.000000000 -0500
36292 ++++ linux-3.1.1/drivers/usb/early/ehci-dbgp.c 2011-11-16 18:39:08.000000000 -0500
36293 +@@ -97,7 +97,8 @@ static inline u32 dbgp_len_update(u32 x,
36294 +
36295 + #ifdef CONFIG_KGDB
36296 + static struct kgdb_io kgdbdbgp_io_ops;
36297 +-#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops)
36298 ++static struct kgdb_io kgdbdbgp_io_ops_console;
36299 ++#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops || dbg_io_ops == &kgdbdbgp_io_ops_console)
36300 + #else
36301 + #define dbgp_kgdb_mode (0)
36302 + #endif
36303 +@@ -1035,6 +1036,13 @@ static struct kgdb_io kgdbdbgp_io_ops =
36304 + .write_char = kgdbdbgp_write_char,
36305 + };
36306 +
36307 ++static struct kgdb_io kgdbdbgp_io_ops_console = {
36308 ++ .name = "kgdbdbgp",
36309 ++ .read_char = kgdbdbgp_read_char,
36310 ++ .write_char = kgdbdbgp_write_char,
36311 ++ .is_console = 1
36312 ++};
36313 ++
36314 + static int kgdbdbgp_wait_time;
36315 +
36316 + static int __init kgdbdbgp_parse_config(char *str)
36317 +@@ -1050,8 +1058,10 @@ static int __init kgdbdbgp_parse_config(
36318 + ptr++;
36319 + kgdbdbgp_wait_time = simple_strtoul(ptr, &ptr, 10);
36320 + }
36321 +- kgdb_register_io_module(&kgdbdbgp_io_ops);
36322 +- kgdbdbgp_io_ops.is_console = early_dbgp_console.index != -1;
36323 ++ if (early_dbgp_console.index != -1)
36324 ++ kgdb_register_io_module(&kgdbdbgp_io_ops_console);
36325 ++ else
36326 ++ kgdb_register_io_module(&kgdbdbgp_io_ops);
36327 +
36328 + return 0;
36329 + }
36330 +diff -urNp linux-3.1.1/drivers/usb/host/xhci-mem.c linux-3.1.1/drivers/usb/host/xhci-mem.c
36331 +--- linux-3.1.1/drivers/usb/host/xhci-mem.c 2011-11-11 15:19:27.000000000 -0500
36332 ++++ linux-3.1.1/drivers/usb/host/xhci-mem.c 2011-11-16 18:40:29.000000000 -0500
36333 +@@ -1690,6 +1690,8 @@ static int xhci_check_trb_in_td_math(str
36334 + unsigned int num_tests;
36335 + int i, ret;
36336 +
36337 ++ pax_track_stack();
36338 ++
36339 + num_tests = ARRAY_SIZE(simple_test_vector);
36340 + for (i = 0; i < num_tests; i++) {
36341 + ret = xhci_test_trb_in_td(xhci,
36342 +diff -urNp linux-3.1.1/drivers/usb/wusbcore/wa-hc.h linux-3.1.1/drivers/usb/wusbcore/wa-hc.h
36343 +--- linux-3.1.1/drivers/usb/wusbcore/wa-hc.h 2011-11-11 15:19:27.000000000 -0500
36344 ++++ linux-3.1.1/drivers/usb/wusbcore/wa-hc.h 2011-11-16 18:39:08.000000000 -0500
36345 +@@ -192,7 +192,7 @@ struct wahc {
36346 + struct list_head xfer_delayed_list;
36347 + spinlock_t xfer_list_lock;
36348 + struct work_struct xfer_work;
36349 +- atomic_t xfer_id_count;
36350 ++ atomic_unchecked_t xfer_id_count;
36351 + };
36352 +
36353 +
36354 +@@ -246,7 +246,7 @@ static inline void wa_init(struct wahc *
36355 + INIT_LIST_HEAD(&wa->xfer_delayed_list);
36356 + spin_lock_init(&wa->xfer_list_lock);
36357 + INIT_WORK(&wa->xfer_work, wa_urb_enqueue_run);
36358 +- atomic_set(&wa->xfer_id_count, 1);
36359 ++ atomic_set_unchecked(&wa->xfer_id_count, 1);
36360 + }
36361 +
36362 + /**
36363 +diff -urNp linux-3.1.1/drivers/usb/wusbcore/wa-xfer.c linux-3.1.1/drivers/usb/wusbcore/wa-xfer.c
36364 +--- linux-3.1.1/drivers/usb/wusbcore/wa-xfer.c 2011-11-11 15:19:27.000000000 -0500
36365 ++++ linux-3.1.1/drivers/usb/wusbcore/wa-xfer.c 2011-11-16 18:39:08.000000000 -0500
36366 +@@ -295,7 +295,7 @@ out:
36367 + */
36368 + static void wa_xfer_id_init(struct wa_xfer *xfer)
36369 + {
36370 +- xfer->id = atomic_add_return(1, &xfer->wa->xfer_id_count);
36371 ++ xfer->id = atomic_add_return_unchecked(1, &xfer->wa->xfer_id_count);
36372 + }
36373 +
36374 + /*
36375 +diff -urNp linux-3.1.1/drivers/vhost/vhost.c linux-3.1.1/drivers/vhost/vhost.c
36376 +--- linux-3.1.1/drivers/vhost/vhost.c 2011-11-11 15:19:27.000000000 -0500
36377 ++++ linux-3.1.1/drivers/vhost/vhost.c 2011-11-16 18:39:08.000000000 -0500
36378 +@@ -629,7 +629,7 @@ static long vhost_set_memory(struct vhos
36379 + return 0;
36380 + }
36381 +
36382 +-static long vhost_set_vring(struct vhost_dev *d, int ioctl, void __user *argp)
36383 ++static long vhost_set_vring(struct vhost_dev *d, unsigned int ioctl, void __user *argp)
36384 + {
36385 + struct file *eventfp, *filep = NULL,
36386 + *pollstart = NULL, *pollstop = NULL;
36387 +diff -urNp linux-3.1.1/drivers/video/aty/aty128fb.c linux-3.1.1/drivers/video/aty/aty128fb.c
36388 +--- linux-3.1.1/drivers/video/aty/aty128fb.c 2011-11-11 15:19:27.000000000 -0500
36389 ++++ linux-3.1.1/drivers/video/aty/aty128fb.c 2011-11-16 18:39:08.000000000 -0500
36390 +@@ -148,7 +148,7 @@ enum {
36391 + };
36392 +
36393 + /* Must match above enum */
36394 +-static const char *r128_family[] __devinitdata = {
36395 ++static const char *r128_family[] __devinitconst = {
36396 + "AGP",
36397 + "PCI",
36398 + "PRO AGP",
36399 +diff -urNp linux-3.1.1/drivers/video/fbcmap.c linux-3.1.1/drivers/video/fbcmap.c
36400 +--- linux-3.1.1/drivers/video/fbcmap.c 2011-11-11 15:19:27.000000000 -0500
36401 ++++ linux-3.1.1/drivers/video/fbcmap.c 2011-11-16 18:39:08.000000000 -0500
36402 +@@ -285,8 +285,7 @@ int fb_set_user_cmap(struct fb_cmap_user
36403 + rc = -ENODEV;
36404 + goto out;
36405 + }
36406 +- if (cmap->start < 0 || (!info->fbops->fb_setcolreg &&
36407 +- !info->fbops->fb_setcmap)) {
36408 ++ if (!info->fbops->fb_setcolreg && !info->fbops->fb_setcmap) {
36409 + rc = -EINVAL;
36410 + goto out1;
36411 + }
36412 +diff -urNp linux-3.1.1/drivers/video/fbmem.c linux-3.1.1/drivers/video/fbmem.c
36413 +--- linux-3.1.1/drivers/video/fbmem.c 2011-11-11 15:19:27.000000000 -0500
36414 ++++ linux-3.1.1/drivers/video/fbmem.c 2011-11-16 18:40:29.000000000 -0500
36415 +@@ -428,7 +428,7 @@ static void fb_do_show_logo(struct fb_in
36416 + image->dx += image->width + 8;
36417 + }
36418 + } else if (rotate == FB_ROTATE_UD) {
36419 +- for (x = 0; x < num && image->dx >= 0; x++) {
36420 ++ for (x = 0; x < num && (__s32)image->dx >= 0; x++) {
36421 + info->fbops->fb_imageblit(info, image);
36422 + image->dx -= image->width + 8;
36423 + }
36424 +@@ -440,7 +440,7 @@ static void fb_do_show_logo(struct fb_in
36425 + image->dy += image->height + 8;
36426 + }
36427 + } else if (rotate == FB_ROTATE_CCW) {
36428 +- for (x = 0; x < num && image->dy >= 0; x++) {
36429 ++ for (x = 0; x < num && (__s32)image->dy >= 0; x++) {
36430 + info->fbops->fb_imageblit(info, image);
36431 + image->dy -= image->height + 8;
36432 + }
36433 +@@ -939,6 +939,8 @@ fb_set_var(struct fb_info *info, struct
36434 + int flags = info->flags;
36435 + int ret = 0;
36436 +
36437 ++ pax_track_stack();
36438 ++
36439 + if (var->activate & FB_ACTIVATE_INV_MODE) {
36440 + struct fb_videomode mode1, mode2;
36441 +
36442 +@@ -1064,6 +1066,8 @@ static long do_fb_ioctl(struct fb_info *
36443 + void __user *argp = (void __user *)arg;
36444 + long ret = 0;
36445 +
36446 ++ pax_track_stack();
36447 ++
36448 + switch (cmd) {
36449 + case FBIOGET_VSCREENINFO:
36450 + if (!lock_fb_info(info))
36451 +@@ -1143,7 +1147,7 @@ static long do_fb_ioctl(struct fb_info *
36452 + return -EFAULT;
36453 + if (con2fb.console < 1 || con2fb.console > MAX_NR_CONSOLES)
36454 + return -EINVAL;
36455 +- if (con2fb.framebuffer < 0 || con2fb.framebuffer >= FB_MAX)
36456 ++ if (con2fb.framebuffer >= FB_MAX)
36457 + return -EINVAL;
36458 + if (!registered_fb[con2fb.framebuffer])
36459 + request_module("fb%d", con2fb.framebuffer);
36460 +diff -urNp linux-3.1.1/drivers/video/geode/gx1fb_core.c linux-3.1.1/drivers/video/geode/gx1fb_core.c
36461 +--- linux-3.1.1/drivers/video/geode/gx1fb_core.c 2011-11-11 15:19:27.000000000 -0500
36462 ++++ linux-3.1.1/drivers/video/geode/gx1fb_core.c 2011-11-16 18:39:08.000000000 -0500
36463 +@@ -29,7 +29,7 @@ static int crt_option = 1;
36464 + static char panel_option[32] = "";
36465 +
36466 + /* Modes relevant to the GX1 (taken from modedb.c) */
36467 +-static const struct fb_videomode __devinitdata gx1_modedb[] = {
36468 ++static const struct fb_videomode __devinitconst gx1_modedb[] = {
36469 + /* 640x480-60 VESA */
36470 + { NULL, 60, 640, 480, 39682, 48, 16, 33, 10, 96, 2,
36471 + 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA },
36472 +diff -urNp linux-3.1.1/drivers/video/gxt4500.c linux-3.1.1/drivers/video/gxt4500.c
36473 +--- linux-3.1.1/drivers/video/gxt4500.c 2011-11-11 15:19:27.000000000 -0500
36474 ++++ linux-3.1.1/drivers/video/gxt4500.c 2011-11-16 18:39:08.000000000 -0500
36475 +@@ -156,7 +156,7 @@ struct gxt4500_par {
36476 + static char *mode_option;
36477 +
36478 + /* default mode: 1280x1024 @ 60 Hz, 8 bpp */
36479 +-static const struct fb_videomode defaultmode __devinitdata = {
36480 ++static const struct fb_videomode defaultmode __devinitconst = {
36481 + .refresh = 60,
36482 + .xres = 1280,
36483 + .yres = 1024,
36484 +@@ -581,7 +581,7 @@ static int gxt4500_blank(int blank, stru
36485 + return 0;
36486 + }
36487 +
36488 +-static const struct fb_fix_screeninfo gxt4500_fix __devinitdata = {
36489 ++static const struct fb_fix_screeninfo gxt4500_fix __devinitconst = {
36490 + .id = "IBM GXT4500P",
36491 + .type = FB_TYPE_PACKED_PIXELS,
36492 + .visual = FB_VISUAL_PSEUDOCOLOR,
36493 +diff -urNp linux-3.1.1/drivers/video/i810/i810_accel.c linux-3.1.1/drivers/video/i810/i810_accel.c
36494 +--- linux-3.1.1/drivers/video/i810/i810_accel.c 2011-11-11 15:19:27.000000000 -0500
36495 ++++ linux-3.1.1/drivers/video/i810/i810_accel.c 2011-11-16 18:39:08.000000000 -0500
36496 +@@ -73,6 +73,7 @@ static inline int wait_for_space(struct
36497 + }
36498 + }
36499 + printk("ringbuffer lockup!!!\n");
36500 ++ printk("head:%u tail:%u iring.size:%u space:%u\n", head, tail, par->iring.size, space);
36501 + i810_report_error(mmio);
36502 + par->dev_flags |= LOCKUP;
36503 + info->pixmap.scan_align = 1;
36504 +diff -urNp linux-3.1.1/drivers/video/i810/i810_main.c linux-3.1.1/drivers/video/i810/i810_main.c
36505 +--- linux-3.1.1/drivers/video/i810/i810_main.c 2011-11-11 15:19:27.000000000 -0500
36506 ++++ linux-3.1.1/drivers/video/i810/i810_main.c 2011-11-16 18:39:08.000000000 -0500
36507 +@@ -97,7 +97,7 @@ static int i810fb_blank (int blank_
36508 + static void i810fb_release_resource (struct fb_info *info, struct i810fb_par *par);
36509 +
36510 + /* PCI */
36511 +-static const char *i810_pci_list[] __devinitdata = {
36512 ++static const char *i810_pci_list[] __devinitconst = {
36513 + "Intel(R) 810 Framebuffer Device" ,
36514 + "Intel(R) 810-DC100 Framebuffer Device" ,
36515 + "Intel(R) 810E Framebuffer Device" ,
36516 +diff -urNp linux-3.1.1/drivers/video/jz4740_fb.c linux-3.1.1/drivers/video/jz4740_fb.c
36517 +--- linux-3.1.1/drivers/video/jz4740_fb.c 2011-11-11 15:19:27.000000000 -0500
36518 ++++ linux-3.1.1/drivers/video/jz4740_fb.c 2011-11-16 18:39:08.000000000 -0500
36519 +@@ -136,7 +136,7 @@ struct jzfb {
36520 + uint32_t pseudo_palette[16];
36521 + };
36522 +
36523 +-static const struct fb_fix_screeninfo jzfb_fix __devinitdata = {
36524 ++static const struct fb_fix_screeninfo jzfb_fix __devinitconst = {
36525 + .id = "JZ4740 FB",
36526 + .type = FB_TYPE_PACKED_PIXELS,
36527 + .visual = FB_VISUAL_TRUECOLOR,
36528 +diff -urNp linux-3.1.1/drivers/video/logo/logo_linux_clut224.ppm linux-3.1.1/drivers/video/logo/logo_linux_clut224.ppm
36529 +--- linux-3.1.1/drivers/video/logo/logo_linux_clut224.ppm 2011-11-11 15:19:27.000000000 -0500
36530 ++++ linux-3.1.1/drivers/video/logo/logo_linux_clut224.ppm 2011-11-16 18:40:29.000000000 -0500
36531 +@@ -1,1604 +1,1123 @@
36532 + P3
36533 +-# Standard 224-color Linux logo
36534 + 80 80
36535 + 255
36536 +- 0 0 0 0 0 0 0 0 0 0 0 0
36537 +- 0 0 0 0 0 0 0 0 0 0 0 0
36538 +- 0 0 0 0 0 0 0 0 0 0 0 0
36539 +- 0 0 0 0 0 0 0 0 0 0 0 0
36540 +- 0 0 0 0 0 0 0 0 0 0 0 0
36541 +- 0 0 0 0 0 0 0 0 0 0 0 0
36542 +- 0 0 0 0 0 0 0 0 0 0 0 0
36543 +- 0 0 0 0 0 0 0 0 0 0 0 0
36544 +- 0 0 0 0 0 0 0 0 0 0 0 0
36545 +- 6 6 6 6 6 6 10 10 10 10 10 10
36546 +- 10 10 10 6 6 6 6 6 6 6 6 6
36547 +- 0 0 0 0 0 0 0 0 0 0 0 0
36548 +- 0 0 0 0 0 0 0 0 0 0 0 0
36549 +- 0 0 0 0 0 0 0 0 0 0 0 0
36550 +- 0 0 0 0 0 0 0 0 0 0 0 0
36551 +- 0 0 0 0 0 0 0 0 0 0 0 0
36552 +- 0 0 0 0 0 0 0 0 0 0 0 0
36553 +- 0 0 0 0 0 0 0 0 0 0 0 0
36554 +- 0 0 0 0 0 0 0 0 0 0 0 0
36555 +- 0 0 0 0 0 0 0 0 0 0 0 0
36556 +- 0 0 0 0 0 0 0 0 0 0 0 0
36557 +- 0 0 0 0 0 0 0 0 0 0 0 0
36558 +- 0 0 0 0 0 0 0 0 0 0 0 0
36559 +- 0 0 0 0 0 0 0 0 0 0 0 0
36560 +- 0 0 0 0 0 0 0 0 0 0 0 0
36561 +- 0 0 0 0 0 0 0 0 0 0 0 0
36562 +- 0 0 0 0 0 0 0 0 0 0 0 0
36563 +- 0 0 0 0 0 0 0 0 0 0 0 0
36564 +- 0 0 0 6 6 6 10 10 10 14 14 14
36565 +- 22 22 22 26 26 26 30 30 30 34 34 34
36566 +- 30 30 30 30 30 30 26 26 26 18 18 18
36567 +- 14 14 14 10 10 10 6 6 6 0 0 0
36568 +- 0 0 0 0 0 0 0 0 0 0 0 0
36569 +- 0 0 0 0 0 0 0 0 0 0 0 0
36570 +- 0 0 0 0 0 0 0 0 0 0 0 0
36571 +- 0 0 0 0 0 0 0 0 0 0 0 0
36572 +- 0 0 0 0 0 0 0 0 0 0 0 0
36573 +- 0 0 0 0 0 0 0 0 0 0 0 0
36574 +- 0 0 0 0 0 0 0 0 0 0 0 0
36575 +- 0 0 0 0 0 0 0 0 0 0 0 0
36576 +- 0 0 0 0 0 0 0 0 0 0 0 0
36577 +- 0 0 0 0 0 1 0 0 1 0 0 0
36578 +- 0 0 0 0 0 0 0 0 0 0 0 0
36579 +- 0 0 0 0 0 0 0 0 0 0 0 0
36580 +- 0 0 0 0 0 0 0 0 0 0 0 0
36581 +- 0 0 0 0 0 0 0 0 0 0 0 0
36582 +- 0 0 0 0 0 0 0 0 0 0 0 0
36583 +- 0 0 0 0 0 0 0 0 0 0 0 0
36584 +- 6 6 6 14 14 14 26 26 26 42 42 42
36585 +- 54 54 54 66 66 66 78 78 78 78 78 78
36586 +- 78 78 78 74 74 74 66 66 66 54 54 54
36587 +- 42 42 42 26 26 26 18 18 18 10 10 10
36588 +- 6 6 6 0 0 0 0 0 0 0 0 0
36589 +- 0 0 0 0 0 0 0 0 0 0 0 0
36590 +- 0 0 0 0 0 0 0 0 0 0 0 0
36591 +- 0 0 0 0 0 0 0 0 0 0 0 0
36592 +- 0 0 0 0 0 0 0 0 0 0 0 0
36593 +- 0 0 0 0 0 0 0 0 0 0 0 0
36594 +- 0 0 0 0 0 0 0 0 0 0 0 0
36595 +- 0 0 0 0 0 0 0 0 0 0 0 0
36596 +- 0 0 0 0 0 0 0 0 0 0 0 0
36597 +- 0 0 1 0 0 0 0 0 0 0 0 0
36598 +- 0 0 0 0 0 0 0 0 0 0 0 0
36599 +- 0 0 0 0 0 0 0 0 0 0 0 0
36600 +- 0 0 0 0 0 0 0 0 0 0 0 0
36601 +- 0 0 0 0 0 0 0 0 0 0 0 0
36602 +- 0 0 0 0 0 0 0 0 0 0 0 0
36603 +- 0 0 0 0 0 0 0 0 0 10 10 10
36604 +- 22 22 22 42 42 42 66 66 66 86 86 86
36605 +- 66 66 66 38 38 38 38 38 38 22 22 22
36606 +- 26 26 26 34 34 34 54 54 54 66 66 66
36607 +- 86 86 86 70 70 70 46 46 46 26 26 26
36608 +- 14 14 14 6 6 6 0 0 0 0 0 0
36609 +- 0 0 0 0 0 0 0 0 0 0 0 0
36610 +- 0 0 0 0 0 0 0 0 0 0 0 0
36611 +- 0 0 0 0 0 0 0 0 0 0 0 0
36612 +- 0 0 0 0 0 0 0 0 0 0 0 0
36613 +- 0 0 0 0 0 0 0 0 0 0 0 0
36614 +- 0 0 0 0 0 0 0 0 0 0 0 0
36615 +- 0 0 0 0 0 0 0 0 0 0 0 0
36616 +- 0 0 0 0 0 0 0 0 0 0 0 0
36617 +- 0 0 1 0 0 1 0 0 1 0 0 0
36618 +- 0 0 0 0 0 0 0 0 0 0 0 0
36619 +- 0 0 0 0 0 0 0 0 0 0 0 0
36620 +- 0 0 0 0 0 0 0 0 0 0 0 0
36621 +- 0 0 0 0 0 0 0 0 0 0 0 0
36622 +- 0 0 0 0 0 0 0 0 0 0 0 0
36623 +- 0 0 0 0 0 0 10 10 10 26 26 26
36624 +- 50 50 50 82 82 82 58 58 58 6 6 6
36625 +- 2 2 6 2 2 6 2 2 6 2 2 6
36626 +- 2 2 6 2 2 6 2 2 6 2 2 6
36627 +- 6 6 6 54 54 54 86 86 86 66 66 66
36628 +- 38 38 38 18 18 18 6 6 6 0 0 0
36629 +- 0 0 0 0 0 0 0 0 0 0 0 0
36630 +- 0 0 0 0 0 0 0 0 0 0 0 0
36631 +- 0 0 0 0 0 0 0 0 0 0 0 0
36632 +- 0 0 0 0 0 0 0 0 0 0 0 0
36633 +- 0 0 0 0 0 0 0 0 0 0 0 0
36634 +- 0 0 0 0 0 0 0 0 0 0 0 0
36635 +- 0 0 0 0 0 0 0 0 0 0 0 0
36636 +- 0 0 0 0 0 0 0 0 0 0 0 0
36637 +- 0 0 0 0 0 0 0 0 0 0 0 0
36638 +- 0 0 0 0 0 0 0 0 0 0 0 0
36639 +- 0 0 0 0 0 0 0 0 0 0 0 0
36640 +- 0 0 0 0 0 0 0 0 0 0 0 0
36641 +- 0 0 0 0 0 0 0 0 0 0 0 0
36642 +- 0 0 0 0 0 0 0 0 0 0 0 0
36643 +- 0 0 0 6 6 6 22 22 22 50 50 50
36644 +- 78 78 78 34 34 34 2 2 6 2 2 6
36645 +- 2 2 6 2 2 6 2 2 6 2 2 6
36646 +- 2 2 6 2 2 6 2 2 6 2 2 6
36647 +- 2 2 6 2 2 6 6 6 6 70 70 70
36648 +- 78 78 78 46 46 46 22 22 22 6 6 6
36649 +- 0 0 0 0 0 0 0 0 0 0 0 0
36650 +- 0 0 0 0 0 0 0 0 0 0 0 0
36651 +- 0 0 0 0 0 0 0 0 0 0 0 0
36652 +- 0 0 0 0 0 0 0 0 0 0 0 0
36653 +- 0 0 0 0 0 0 0 0 0 0 0 0
36654 +- 0 0 0 0 0 0 0 0 0 0 0 0
36655 +- 0 0 0 0 0 0 0 0 0 0 0 0
36656 +- 0 0 0 0 0 0 0 0 0 0 0 0
36657 +- 0 0 1 0 0 1 0 0 1 0 0 0
36658 +- 0 0 0 0 0 0 0 0 0 0 0 0
36659 +- 0 0 0 0 0 0 0 0 0 0 0 0
36660 +- 0 0 0 0 0 0 0 0 0 0 0 0
36661 +- 0 0 0 0 0 0 0 0 0 0 0 0
36662 +- 0 0 0 0 0 0 0 0 0 0 0 0
36663 +- 6 6 6 18 18 18 42 42 42 82 82 82
36664 +- 26 26 26 2 2 6 2 2 6 2 2 6
36665 +- 2 2 6 2 2 6 2 2 6 2 2 6
36666 +- 2 2 6 2 2 6 2 2 6 14 14 14
36667 +- 46 46 46 34 34 34 6 6 6 2 2 6
36668 +- 42 42 42 78 78 78 42 42 42 18 18 18
36669 +- 6 6 6 0 0 0 0 0 0 0 0 0
36670 +- 0 0 0 0 0 0 0 0 0 0 0 0
36671 +- 0 0 0 0 0 0 0 0 0 0 0 0
36672 +- 0 0 0 0 0 0 0 0 0 0 0 0
36673 +- 0 0 0 0 0 0 0 0 0 0 0 0
36674 +- 0 0 0 0 0 0 0 0 0 0 0 0
36675 +- 0 0 0 0 0 0 0 0 0 0 0 0
36676 +- 0 0 0 0 0 0 0 0 0 0 0 0
36677 +- 0 0 1 0 0 0 0 0 1 0 0 0
36678 +- 0 0 0 0 0 0 0 0 0 0 0 0
36679 +- 0 0 0 0 0 0 0 0 0 0 0 0
36680 +- 0 0 0 0 0 0 0 0 0 0 0 0
36681 +- 0 0 0 0 0 0 0 0 0 0 0 0
36682 +- 0 0 0 0 0 0 0 0 0 0 0 0
36683 +- 10 10 10 30 30 30 66 66 66 58 58 58
36684 +- 2 2 6 2 2 6 2 2 6 2 2 6
36685 +- 2 2 6 2 2 6 2 2 6 2 2 6
36686 +- 2 2 6 2 2 6 2 2 6 26 26 26
36687 +- 86 86 86 101 101 101 46 46 46 10 10 10
36688 +- 2 2 6 58 58 58 70 70 70 34 34 34
36689 +- 10 10 10 0 0 0 0 0 0 0 0 0
36690 +- 0 0 0 0 0 0 0 0 0 0 0 0
36691 +- 0 0 0 0 0 0 0 0 0 0 0 0
36692 +- 0 0 0 0 0 0 0 0 0 0 0 0
36693 +- 0 0 0 0 0 0 0 0 0 0 0 0
36694 +- 0 0 0 0 0 0 0 0 0 0 0 0
36695 +- 0 0 0 0 0 0 0 0 0 0 0 0
36696 +- 0 0 0 0 0 0 0 0 0 0 0 0
36697 +- 0 0 1 0 0 1 0 0 1 0 0 0
36698 +- 0 0 0 0 0 0 0 0 0 0 0 0
36699 +- 0 0 0 0 0 0 0 0 0 0 0 0
36700 +- 0 0 0 0 0 0 0 0 0 0 0 0
36701 +- 0 0 0 0 0 0 0 0 0 0 0 0
36702 +- 0 0 0 0 0 0 0 0 0 0 0 0
36703 +- 14 14 14 42 42 42 86 86 86 10 10 10
36704 +- 2 2 6 2 2 6 2 2 6 2 2 6
36705 +- 2 2 6 2 2 6 2 2 6 2 2 6
36706 +- 2 2 6 2 2 6 2 2 6 30 30 30
36707 +- 94 94 94 94 94 94 58 58 58 26 26 26
36708 +- 2 2 6 6 6 6 78 78 78 54 54 54
36709 +- 22 22 22 6 6 6 0 0 0 0 0 0
36710 +- 0 0 0 0 0 0 0 0 0 0 0 0
36711 +- 0 0 0 0 0 0 0 0 0 0 0 0
36712 +- 0 0 0 0 0 0 0 0 0 0 0 0
36713 +- 0 0 0 0 0 0 0 0 0 0 0 0
36714 +- 0 0 0 0 0 0 0 0 0 0 0 0
36715 +- 0 0 0 0 0 0 0 0 0 0 0 0
36716 +- 0 0 0 0 0 0 0 0 0 0 0 0
36717 +- 0 0 0 0 0 0 0 0 0 0 0 0
36718 +- 0 0 0 0 0 0 0 0 0 0 0 0
36719 +- 0 0 0 0 0 0 0 0 0 0 0 0
36720 +- 0 0 0 0 0 0 0 0 0 0 0 0
36721 +- 0 0 0 0 0 0 0 0 0 0 0 0
36722 +- 0 0 0 0 0 0 0 0 0 6 6 6
36723 +- 22 22 22 62 62 62 62 62 62 2 2 6
36724 +- 2 2 6 2 2 6 2 2 6 2 2 6
36725 +- 2 2 6 2 2 6 2 2 6 2 2 6
36726 +- 2 2 6 2 2 6 2 2 6 26 26 26
36727 +- 54 54 54 38 38 38 18 18 18 10 10 10
36728 +- 2 2 6 2 2 6 34 34 34 82 82 82
36729 +- 38 38 38 14 14 14 0 0 0 0 0 0
36730 +- 0 0 0 0 0 0 0 0 0 0 0 0
36731 +- 0 0 0 0 0 0 0 0 0 0 0 0
36732 +- 0 0 0 0 0 0 0 0 0 0 0 0
36733 +- 0 0 0 0 0 0 0 0 0 0 0 0
36734 +- 0 0 0 0 0 0 0 0 0 0 0 0
36735 +- 0 0 0 0 0 0 0 0 0 0 0 0
36736 +- 0 0 0 0 0 0 0 0 0 0 0 0
36737 +- 0 0 0 0 0 1 0 0 1 0 0 0
36738 +- 0 0 0 0 0 0 0 0 0 0 0 0
36739 +- 0 0 0 0 0 0 0 0 0 0 0 0
36740 +- 0 0 0 0 0 0 0 0 0 0 0 0
36741 +- 0 0 0 0 0 0 0 0 0 0 0 0
36742 +- 0 0 0 0 0 0 0 0 0 6 6 6
36743 +- 30 30 30 78 78 78 30 30 30 2 2 6
36744 +- 2 2 6 2 2 6 2 2 6 2 2 6
36745 +- 2 2 6 2 2 6 2 2 6 2 2 6
36746 +- 2 2 6 2 2 6 2 2 6 10 10 10
36747 +- 10 10 10 2 2 6 2 2 6 2 2 6
36748 +- 2 2 6 2 2 6 2 2 6 78 78 78
36749 +- 50 50 50 18 18 18 6 6 6 0 0 0
36750 +- 0 0 0 0 0 0 0 0 0 0 0 0
36751 +- 0 0 0 0 0 0 0 0 0 0 0 0
36752 +- 0 0 0 0 0 0 0 0 0 0 0 0
36753 +- 0 0 0 0 0 0 0 0 0 0 0 0
36754 +- 0 0 0 0 0 0 0 0 0 0 0 0
36755 +- 0 0 0 0 0 0 0 0 0 0 0 0
36756 +- 0 0 0 0 0 0 0 0 0 0 0 0
36757 +- 0 0 1 0 0 0 0 0 0 0 0 0
36758 +- 0 0 0 0 0 0 0 0 0 0 0 0
36759 +- 0 0 0 0 0 0 0 0 0 0 0 0
36760 +- 0 0 0 0 0 0 0 0 0 0 0 0
36761 +- 0 0 0 0 0 0 0 0 0 0 0 0
36762 +- 0 0 0 0 0 0 0 0 0 10 10 10
36763 +- 38 38 38 86 86 86 14 14 14 2 2 6
36764 +- 2 2 6 2 2 6 2 2 6 2 2 6
36765 +- 2 2 6 2 2 6 2 2 6 2 2 6
36766 +- 2 2 6 2 2 6 2 2 6 2 2 6
36767 +- 2 2 6 2 2 6 2 2 6 2 2 6
36768 +- 2 2 6 2 2 6 2 2 6 54 54 54
36769 +- 66 66 66 26 26 26 6 6 6 0 0 0
36770 +- 0 0 0 0 0 0 0 0 0 0 0 0
36771 +- 0 0 0 0 0 0 0 0 0 0 0 0
36772 +- 0 0 0 0 0 0 0 0 0 0 0 0
36773 +- 0 0 0 0 0 0 0 0 0 0 0 0
36774 +- 0 0 0 0 0 0 0 0 0 0 0 0
36775 +- 0 0 0 0 0 0 0 0 0 0 0 0
36776 +- 0 0 0 0 0 0 0 0 0 0 0 0
36777 +- 0 0 0 0 0 1 0 0 1 0 0 0
36778 +- 0 0 0 0 0 0 0 0 0 0 0 0
36779 +- 0 0 0 0 0 0 0 0 0 0 0 0
36780 +- 0 0 0 0 0 0 0 0 0 0 0 0
36781 +- 0 0 0 0 0 0 0 0 0 0 0 0
36782 +- 0 0 0 0 0 0 0 0 0 14 14 14
36783 +- 42 42 42 82 82 82 2 2 6 2 2 6
36784 +- 2 2 6 6 6 6 10 10 10 2 2 6
36785 +- 2 2 6 2 2 6 2 2 6 2 2 6
36786 +- 2 2 6 2 2 6 2 2 6 6 6 6
36787 +- 14 14 14 10 10 10 2 2 6 2 2 6
36788 +- 2 2 6 2 2 6 2 2 6 18 18 18
36789 +- 82 82 82 34 34 34 10 10 10 0 0 0
36790 +- 0 0 0 0 0 0 0 0 0 0 0 0
36791 +- 0 0 0 0 0 0 0 0 0 0 0 0
36792 +- 0 0 0 0 0 0 0 0 0 0 0 0
36793 +- 0 0 0 0 0 0 0 0 0 0 0 0
36794 +- 0 0 0 0 0 0 0 0 0 0 0 0
36795 +- 0 0 0 0 0 0 0 0 0 0 0 0
36796 +- 0 0 0 0 0 0 0 0 0 0 0 0
36797 +- 0 0 1 0 0 0 0 0 0 0 0 0
36798 +- 0 0 0 0 0 0 0 0 0 0 0 0
36799 +- 0 0 0 0 0 0 0 0 0 0 0 0
36800 +- 0 0 0 0 0 0 0 0 0 0 0 0
36801 +- 0 0 0 0 0 0 0 0 0 0 0 0
36802 +- 0 0 0 0 0 0 0 0 0 14 14 14
36803 +- 46 46 46 86 86 86 2 2 6 2 2 6
36804 +- 6 6 6 6 6 6 22 22 22 34 34 34
36805 +- 6 6 6 2 2 6 2 2 6 2 2 6
36806 +- 2 2 6 2 2 6 18 18 18 34 34 34
36807 +- 10 10 10 50 50 50 22 22 22 2 2 6
36808 +- 2 2 6 2 2 6 2 2 6 10 10 10
36809 +- 86 86 86 42 42 42 14 14 14 0 0 0
36810 +- 0 0 0 0 0 0 0 0 0 0 0 0
36811 +- 0 0 0 0 0 0 0 0 0 0 0 0
36812 +- 0 0 0 0 0 0 0 0 0 0 0 0
36813 +- 0 0 0 0 0 0 0 0 0 0 0 0
36814 +- 0 0 0 0 0 0 0 0 0 0 0 0
36815 +- 0 0 0 0 0 0 0 0 0 0 0 0
36816 +- 0 0 0 0 0 0 0 0 0 0 0 0
36817 +- 0 0 1 0 0 1 0 0 1 0 0 0
36818 +- 0 0 0 0 0 0 0 0 0 0 0 0
36819 +- 0 0 0 0 0 0 0 0 0 0 0 0
36820 +- 0 0 0 0 0 0 0 0 0 0 0 0
36821 +- 0 0 0 0 0 0 0 0 0 0 0 0
36822 +- 0 0 0 0 0 0 0 0 0 14 14 14
36823 +- 46 46 46 86 86 86 2 2 6 2 2 6
36824 +- 38 38 38 116 116 116 94 94 94 22 22 22
36825 +- 22 22 22 2 2 6 2 2 6 2 2 6
36826 +- 14 14 14 86 86 86 138 138 138 162 162 162
36827 +-154 154 154 38 38 38 26 26 26 6 6 6
36828 +- 2 2 6 2 2 6 2 2 6 2 2 6
36829 +- 86 86 86 46 46 46 14 14 14 0 0 0
36830 +- 0 0 0 0 0 0 0 0 0 0 0 0
36831 +- 0 0 0 0 0 0 0 0 0 0 0 0
36832 +- 0 0 0 0 0 0 0 0 0 0 0 0
36833 +- 0 0 0 0 0 0 0 0 0 0 0 0
36834 +- 0 0 0 0 0 0 0 0 0 0 0 0
36835 +- 0 0 0 0 0 0 0 0 0 0 0 0
36836 +- 0 0 0 0 0 0 0 0 0 0 0 0
36837 +- 0 0 0 0 0 0 0 0 0 0 0 0
36838 +- 0 0 0 0 0 0 0 0 0 0 0 0
36839 +- 0 0 0 0 0 0 0 0 0 0 0 0
36840 +- 0 0 0 0 0 0 0 0 0 0 0 0
36841 +- 0 0 0 0 0 0 0 0 0 0 0 0
36842 +- 0 0 0 0 0 0 0 0 0 14 14 14
36843 +- 46 46 46 86 86 86 2 2 6 14 14 14
36844 +-134 134 134 198 198 198 195 195 195 116 116 116
36845 +- 10 10 10 2 2 6 2 2 6 6 6 6
36846 +-101 98 89 187 187 187 210 210 210 218 218 218
36847 +-214 214 214 134 134 134 14 14 14 6 6 6
36848 +- 2 2 6 2 2 6 2 2 6 2 2 6
36849 +- 86 86 86 50 50 50 18 18 18 6 6 6
36850 +- 0 0 0 0 0 0 0 0 0 0 0 0
36851 +- 0 0 0 0 0 0 0 0 0 0 0 0
36852 +- 0 0 0 0 0 0 0 0 0 0 0 0
36853 +- 0 0 0 0 0 0 0 0 0 0 0 0
36854 +- 0 0 0 0 0 0 0 0 0 0 0 0
36855 +- 0 0 0 0 0 0 0 0 0 0 0 0
36856 +- 0 0 0 0 0 0 0 0 1 0 0 0
36857 +- 0 0 1 0 0 1 0 0 1 0 0 0
36858 +- 0 0 0 0 0 0 0 0 0 0 0 0
36859 +- 0 0 0 0 0 0 0 0 0 0 0 0
36860 +- 0 0 0 0 0 0 0 0 0 0 0 0
36861 +- 0 0 0 0 0 0 0 0 0 0 0 0
36862 +- 0 0 0 0 0 0 0 0 0 14 14 14
36863 +- 46 46 46 86 86 86 2 2 6 54 54 54
36864 +-218 218 218 195 195 195 226 226 226 246 246 246
36865 +- 58 58 58 2 2 6 2 2 6 30 30 30
36866 +-210 210 210 253 253 253 174 174 174 123 123 123
36867 +-221 221 221 234 234 234 74 74 74 2 2 6
36868 +- 2 2 6 2 2 6 2 2 6 2 2 6
36869 +- 70 70 70 58 58 58 22 22 22 6 6 6
36870 +- 0 0 0 0 0 0 0 0 0 0 0 0
36871 +- 0 0 0 0 0 0 0 0 0 0 0 0
36872 +- 0 0 0 0 0 0 0 0 0 0 0 0
36873 +- 0 0 0 0 0 0 0 0 0 0 0 0
36874 +- 0 0 0 0 0 0 0 0 0 0 0 0
36875 +- 0 0 0 0 0 0 0 0 0 0 0 0
36876 +- 0 0 0 0 0 0 0 0 0 0 0 0
36877 +- 0 0 0 0 0 0 0 0 0 0 0 0
36878 +- 0 0 0 0 0 0 0 0 0 0 0 0
36879 +- 0 0 0 0 0 0 0 0 0 0 0 0
36880 +- 0 0 0 0 0 0 0 0 0 0 0 0
36881 +- 0 0 0 0 0 0 0 0 0 0 0 0
36882 +- 0 0 0 0 0 0 0 0 0 14 14 14
36883 +- 46 46 46 82 82 82 2 2 6 106 106 106
36884 +-170 170 170 26 26 26 86 86 86 226 226 226
36885 +-123 123 123 10 10 10 14 14 14 46 46 46
36886 +-231 231 231 190 190 190 6 6 6 70 70 70
36887 +- 90 90 90 238 238 238 158 158 158 2 2 6
36888 +- 2 2 6 2 2 6 2 2 6 2 2 6
36889 +- 70 70 70 58 58 58 22 22 22 6 6 6
36890 +- 0 0 0 0 0 0 0 0 0 0 0 0
36891 +- 0 0 0 0 0 0 0 0 0 0 0 0
36892 +- 0 0 0 0 0 0 0 0 0 0 0 0
36893 +- 0 0 0 0 0 0 0 0 0 0 0 0
36894 +- 0 0 0 0 0 0 0 0 0 0 0 0
36895 +- 0 0 0 0 0 0 0 0 0 0 0 0
36896 +- 0 0 0 0 0 0 0 0 1 0 0 0
36897 +- 0 0 1 0 0 1 0 0 1 0 0 0
36898 +- 0 0 0 0 0 0 0 0 0 0 0 0
36899 +- 0 0 0 0 0 0 0 0 0 0 0 0
36900 +- 0 0 0 0 0 0 0 0 0 0 0 0
36901 +- 0 0 0 0 0 0 0 0 0 0 0 0
36902 +- 0 0 0 0 0 0 0 0 0 14 14 14
36903 +- 42 42 42 86 86 86 6 6 6 116 116 116
36904 +-106 106 106 6 6 6 70 70 70 149 149 149
36905 +-128 128 128 18 18 18 38 38 38 54 54 54
36906 +-221 221 221 106 106 106 2 2 6 14 14 14
36907 +- 46 46 46 190 190 190 198 198 198 2 2 6
36908 +- 2 2 6 2 2 6 2 2 6 2 2 6
36909 +- 74 74 74 62 62 62 22 22 22 6 6 6
36910 +- 0 0 0 0 0 0 0 0 0 0 0 0
36911 +- 0 0 0 0 0 0 0 0 0 0 0 0
36912 +- 0 0 0 0 0 0 0 0 0 0 0 0
36913 +- 0 0 0 0 0 0 0 0 0 0 0 0
36914 +- 0 0 0 0 0 0 0 0 0 0 0 0
36915 +- 0 0 0 0 0 0 0 0 0 0 0 0
36916 +- 0 0 0 0 0 0 0 0 1 0 0 0
36917 +- 0 0 1 0 0 0 0 0 1 0 0 0
36918 +- 0 0 0 0 0 0 0 0 0 0 0 0
36919 +- 0 0 0 0 0 0 0 0 0 0 0 0
36920 +- 0 0 0 0 0 0 0 0 0 0 0 0
36921 +- 0 0 0 0 0 0 0 0 0 0 0 0
36922 +- 0 0 0 0 0 0 0 0 0 14 14 14
36923 +- 42 42 42 94 94 94 14 14 14 101 101 101
36924 +-128 128 128 2 2 6 18 18 18 116 116 116
36925 +-118 98 46 121 92 8 121 92 8 98 78 10
36926 +-162 162 162 106 106 106 2 2 6 2 2 6
36927 +- 2 2 6 195 195 195 195 195 195 6 6 6
36928 +- 2 2 6 2 2 6 2 2 6 2 2 6
36929 +- 74 74 74 62 62 62 22 22 22 6 6 6
36930 +- 0 0 0 0 0 0 0 0 0 0 0 0
36931 +- 0 0 0 0 0 0 0 0 0 0 0 0
36932 +- 0 0 0 0 0 0 0 0 0 0 0 0
36933 +- 0 0 0 0 0 0 0 0 0 0 0 0
36934 +- 0 0 0 0 0 0 0 0 0 0 0 0
36935 +- 0 0 0 0 0 0 0 0 0 0 0 0
36936 +- 0 0 0 0 0 0 0 0 1 0 0 1
36937 +- 0 0 1 0 0 0 0 0 1 0 0 0
36938 +- 0 0 0 0 0 0 0 0 0 0 0 0
36939 +- 0 0 0 0 0 0 0 0 0 0 0 0
36940 +- 0 0 0 0 0 0 0 0 0 0 0 0
36941 +- 0 0 0 0 0 0 0 0 0 0 0 0
36942 +- 0 0 0 0 0 0 0 0 0 10 10 10
36943 +- 38 38 38 90 90 90 14 14 14 58 58 58
36944 +-210 210 210 26 26 26 54 38 6 154 114 10
36945 +-226 170 11 236 186 11 225 175 15 184 144 12
36946 +-215 174 15 175 146 61 37 26 9 2 2 6
36947 +- 70 70 70 246 246 246 138 138 138 2 2 6
36948 +- 2 2 6 2 2 6 2 2 6 2 2 6
36949 +- 70 70 70 66 66 66 26 26 26 6 6 6
36950 +- 0 0 0 0 0 0 0 0 0 0 0 0
36951 +- 0 0 0 0 0 0 0 0 0 0 0 0
36952 +- 0 0 0 0 0 0 0 0 0 0 0 0
36953 +- 0 0 0 0 0 0 0 0 0 0 0 0
36954 +- 0 0 0 0 0 0 0 0 0 0 0 0
36955 +- 0 0 0 0 0 0 0 0 0 0 0 0
36956 +- 0 0 0 0 0 0 0 0 0 0 0 0
36957 +- 0 0 0 0 0 0 0 0 0 0 0 0
36958 +- 0 0 0 0 0 0 0 0 0 0 0 0
36959 +- 0 0 0 0 0 0 0 0 0 0 0 0
36960 +- 0 0 0 0 0 0 0 0 0 0 0 0
36961 +- 0 0 0 0 0 0 0 0 0 0 0 0
36962 +- 0 0 0 0 0 0 0 0 0 10 10 10
36963 +- 38 38 38 86 86 86 14 14 14 10 10 10
36964 +-195 195 195 188 164 115 192 133 9 225 175 15
36965 +-239 182 13 234 190 10 232 195 16 232 200 30
36966 +-245 207 45 241 208 19 232 195 16 184 144 12
36967 +-218 194 134 211 206 186 42 42 42 2 2 6
36968 +- 2 2 6 2 2 6 2 2 6 2 2 6
36969 +- 50 50 50 74 74 74 30 30 30 6 6 6
36970 +- 0 0 0 0 0 0 0 0 0 0 0 0
36971 +- 0 0 0 0 0 0 0 0 0 0 0 0
36972 +- 0 0 0 0 0 0 0 0 0 0 0 0
36973 +- 0 0 0 0 0 0 0 0 0 0 0 0
36974 +- 0 0 0 0 0 0 0 0 0 0 0 0
36975 +- 0 0 0 0 0 0 0 0 0 0 0 0
36976 +- 0 0 0 0 0 0 0 0 0 0 0 0
36977 +- 0 0 0 0 0 0 0 0 0 0 0 0
36978 +- 0 0 0 0 0 0 0 0 0 0 0 0
36979 +- 0 0 0 0 0 0 0 0 0 0 0 0
36980 +- 0 0 0 0 0 0 0 0 0 0 0 0
36981 +- 0 0 0 0 0 0 0 0 0 0 0 0
36982 +- 0 0 0 0 0 0 0 0 0 10 10 10
36983 +- 34 34 34 86 86 86 14 14 14 2 2 6
36984 +-121 87 25 192 133 9 219 162 10 239 182 13
36985 +-236 186 11 232 195 16 241 208 19 244 214 54
36986 +-246 218 60 246 218 38 246 215 20 241 208 19
36987 +-241 208 19 226 184 13 121 87 25 2 2 6
36988 +- 2 2 6 2 2 6 2 2 6 2 2 6
36989 +- 50 50 50 82 82 82 34 34 34 10 10 10
36990 +- 0 0 0 0 0 0 0 0 0 0 0 0
36991 +- 0 0 0 0 0 0 0 0 0 0 0 0
36992 +- 0 0 0 0 0 0 0 0 0 0 0 0
36993 +- 0 0 0 0 0 0 0 0 0 0 0 0
36994 +- 0 0 0 0 0 0 0 0 0 0 0 0
36995 +- 0 0 0 0 0 0 0 0 0 0 0 0
36996 +- 0 0 0 0 0 0 0 0 0 0 0 0
36997 +- 0 0 0 0 0 0 0 0 0 0 0 0
36998 +- 0 0 0 0 0 0 0 0 0 0 0 0
36999 +- 0 0 0 0 0 0 0 0 0 0 0 0
37000 +- 0 0 0 0 0 0 0 0 0 0 0 0
37001 +- 0 0 0 0 0 0 0 0 0 0 0 0
37002 +- 0 0 0 0 0 0 0 0 0 10 10 10
37003 +- 34 34 34 82 82 82 30 30 30 61 42 6
37004 +-180 123 7 206 145 10 230 174 11 239 182 13
37005 +-234 190 10 238 202 15 241 208 19 246 218 74
37006 +-246 218 38 246 215 20 246 215 20 246 215 20
37007 +-226 184 13 215 174 15 184 144 12 6 6 6
37008 +- 2 2 6 2 2 6 2 2 6 2 2 6
37009 +- 26 26 26 94 94 94 42 42 42 14 14 14
37010 +- 0 0 0 0 0 0 0 0 0 0 0 0
37011 +- 0 0 0 0 0 0 0 0 0 0 0 0
37012 +- 0 0 0 0 0 0 0 0 0 0 0 0
37013 +- 0 0 0 0 0 0 0 0 0 0 0 0
37014 +- 0 0 0 0 0 0 0 0 0 0 0 0
37015 +- 0 0 0 0 0 0 0 0 0 0 0 0
37016 +- 0 0 0 0 0 0 0 0 0 0 0 0
37017 +- 0 0 0 0 0 0 0 0 0 0 0 0
37018 +- 0 0 0 0 0 0 0 0 0 0 0 0
37019 +- 0 0 0 0 0 0 0 0 0 0 0 0
37020 +- 0 0 0 0 0 0 0 0 0 0 0 0
37021 +- 0 0 0 0 0 0 0 0 0 0 0 0
37022 +- 0 0 0 0 0 0 0 0 0 10 10 10
37023 +- 30 30 30 78 78 78 50 50 50 104 69 6
37024 +-192 133 9 216 158 10 236 178 12 236 186 11
37025 +-232 195 16 241 208 19 244 214 54 245 215 43
37026 +-246 215 20 246 215 20 241 208 19 198 155 10
37027 +-200 144 11 216 158 10 156 118 10 2 2 6
37028 +- 2 2 6 2 2 6 2 2 6 2 2 6
37029 +- 6 6 6 90 90 90 54 54 54 18 18 18
37030 +- 6 6 6 0 0 0 0 0 0 0 0 0
37031 +- 0 0 0 0 0 0 0 0 0 0 0 0
37032 +- 0 0 0 0 0 0 0 0 0 0 0 0
37033 +- 0 0 0 0 0 0 0 0 0 0 0 0
37034 +- 0 0 0 0 0 0 0 0 0 0 0 0
37035 +- 0 0 0 0 0 0 0 0 0 0 0 0
37036 +- 0 0 0 0 0 0 0 0 0 0 0 0
37037 +- 0 0 0 0 0 0 0 0 0 0 0 0
37038 +- 0 0 0 0 0 0 0 0 0 0 0 0
37039 +- 0 0 0 0 0 0 0 0 0 0 0 0
37040 +- 0 0 0 0 0 0 0 0 0 0 0 0
37041 +- 0 0 0 0 0 0 0 0 0 0 0 0
37042 +- 0 0 0 0 0 0 0 0 0 10 10 10
37043 +- 30 30 30 78 78 78 46 46 46 22 22 22
37044 +-137 92 6 210 162 10 239 182 13 238 190 10
37045 +-238 202 15 241 208 19 246 215 20 246 215 20
37046 +-241 208 19 203 166 17 185 133 11 210 150 10
37047 +-216 158 10 210 150 10 102 78 10 2 2 6
37048 +- 6 6 6 54 54 54 14 14 14 2 2 6
37049 +- 2 2 6 62 62 62 74 74 74 30 30 30
37050 +- 10 10 10 0 0 0 0 0 0 0 0 0
37051 +- 0 0 0 0 0 0 0 0 0 0 0 0
37052 +- 0 0 0 0 0 0 0 0 0 0 0 0
37053 +- 0 0 0 0 0 0 0 0 0 0 0 0
37054 +- 0 0 0 0 0 0 0 0 0 0 0 0
37055 +- 0 0 0 0 0 0 0 0 0 0 0 0
37056 +- 0 0 0 0 0 0 0 0 0 0 0 0
37057 +- 0 0 0 0 0 0 0 0 0 0 0 0
37058 +- 0 0 0 0 0 0 0 0 0 0 0 0
37059 +- 0 0 0 0 0 0 0 0 0 0 0 0
37060 +- 0 0 0 0 0 0 0 0 0 0 0 0
37061 +- 0 0 0 0 0 0 0 0 0 0 0 0
37062 +- 0 0 0 0 0 0 0 0 0 10 10 10
37063 +- 34 34 34 78 78 78 50 50 50 6 6 6
37064 +- 94 70 30 139 102 15 190 146 13 226 184 13
37065 +-232 200 30 232 195 16 215 174 15 190 146 13
37066 +-168 122 10 192 133 9 210 150 10 213 154 11
37067 +-202 150 34 182 157 106 101 98 89 2 2 6
37068 +- 2 2 6 78 78 78 116 116 116 58 58 58
37069 +- 2 2 6 22 22 22 90 90 90 46 46 46
37070 +- 18 18 18 6 6 6 0 0 0 0 0 0
37071 +- 0 0 0 0 0 0 0 0 0 0 0 0
37072 +- 0 0 0 0 0 0 0 0 0 0 0 0
37073 +- 0 0 0 0 0 0 0 0 0 0 0 0
37074 +- 0 0 0 0 0 0 0 0 0 0 0 0
37075 +- 0 0 0 0 0 0 0 0 0 0 0 0
37076 +- 0 0 0 0 0 0 0 0 0 0 0 0
37077 +- 0 0 0 0 0 0 0 0 0 0 0 0
37078 +- 0 0 0 0 0 0 0 0 0 0 0 0
37079 +- 0 0 0 0 0 0 0 0 0 0 0 0
37080 +- 0 0 0 0 0 0 0 0 0 0 0 0
37081 +- 0 0 0 0 0 0 0 0 0 0 0 0
37082 +- 0 0 0 0 0 0 0 0 0 10 10 10
37083 +- 38 38 38 86 86 86 50 50 50 6 6 6
37084 +-128 128 128 174 154 114 156 107 11 168 122 10
37085 +-198 155 10 184 144 12 197 138 11 200 144 11
37086 +-206 145 10 206 145 10 197 138 11 188 164 115
37087 +-195 195 195 198 198 198 174 174 174 14 14 14
37088 +- 2 2 6 22 22 22 116 116 116 116 116 116
37089 +- 22 22 22 2 2 6 74 74 74 70 70 70
37090 +- 30 30 30 10 10 10 0 0 0 0 0 0
37091 +- 0 0 0 0 0 0 0 0 0 0 0 0
37092 +- 0 0 0 0 0 0 0 0 0 0 0 0
37093 +- 0 0 0 0 0 0 0 0 0 0 0 0
37094 +- 0 0 0 0 0 0 0 0 0 0 0 0
37095 +- 0 0 0 0 0 0 0 0 0 0 0 0
37096 +- 0 0 0 0 0 0 0 0 0 0 0 0
37097 +- 0 0 0 0 0 0 0 0 0 0 0 0
37098 +- 0 0 0 0 0 0 0 0 0 0 0 0
37099 +- 0 0 0 0 0 0 0 0 0 0 0 0
37100 +- 0 0 0 0 0 0 0 0 0 0 0 0
37101 +- 0 0 0 0 0 0 0 0 0 0 0 0
37102 +- 0 0 0 0 0 0 6 6 6 18 18 18
37103 +- 50 50 50 101 101 101 26 26 26 10 10 10
37104 +-138 138 138 190 190 190 174 154 114 156 107 11
37105 +-197 138 11 200 144 11 197 138 11 192 133 9
37106 +-180 123 7 190 142 34 190 178 144 187 187 187
37107 +-202 202 202 221 221 221 214 214 214 66 66 66
37108 +- 2 2 6 2 2 6 50 50 50 62 62 62
37109 +- 6 6 6 2 2 6 10 10 10 90 90 90
37110 +- 50 50 50 18 18 18 6 6 6 0 0 0
37111 +- 0 0 0 0 0 0 0 0 0 0 0 0
37112 +- 0 0 0 0 0 0 0 0 0 0 0 0
37113 +- 0 0 0 0 0 0 0 0 0 0 0 0
37114 +- 0 0 0 0 0 0 0 0 0 0 0 0
37115 +- 0 0 0 0 0 0 0 0 0 0 0 0
37116 +- 0 0 0 0 0 0 0 0 0 0 0 0
37117 +- 0 0 0 0 0 0 0 0 0 0 0 0
37118 +- 0 0 0 0 0 0 0 0 0 0 0 0
37119 +- 0 0 0 0 0 0 0 0 0 0 0 0
37120 +- 0 0 0 0 0 0 0 0 0 0 0 0
37121 +- 0 0 0 0 0 0 0 0 0 0 0 0
37122 +- 0 0 0 0 0 0 10 10 10 34 34 34
37123 +- 74 74 74 74 74 74 2 2 6 6 6 6
37124 +-144 144 144 198 198 198 190 190 190 178 166 146
37125 +-154 121 60 156 107 11 156 107 11 168 124 44
37126 +-174 154 114 187 187 187 190 190 190 210 210 210
37127 +-246 246 246 253 253 253 253 253 253 182 182 182
37128 +- 6 6 6 2 2 6 2 2 6 2 2 6
37129 +- 2 2 6 2 2 6 2 2 6 62 62 62
37130 +- 74 74 74 34 34 34 14 14 14 0 0 0
37131 +- 0 0 0 0 0 0 0 0 0 0 0 0
37132 +- 0 0 0 0 0 0 0 0 0 0 0 0
37133 +- 0 0 0 0 0 0 0 0 0 0 0 0
37134 +- 0 0 0 0 0 0 0 0 0 0 0 0
37135 +- 0 0 0 0 0 0 0 0 0 0 0 0
37136 +- 0 0 0 0 0 0 0 0 0 0 0 0
37137 +- 0 0 0 0 0 0 0 0 0 0 0 0
37138 +- 0 0 0 0 0 0 0 0 0 0 0 0
37139 +- 0 0 0 0 0 0 0 0 0 0 0 0
37140 +- 0 0 0 0 0 0 0 0 0 0 0 0
37141 +- 0 0 0 0 0 0 0 0 0 0 0 0
37142 +- 0 0 0 10 10 10 22 22 22 54 54 54
37143 +- 94 94 94 18 18 18 2 2 6 46 46 46
37144 +-234 234 234 221 221 221 190 190 190 190 190 190
37145 +-190 190 190 187 187 187 187 187 187 190 190 190
37146 +-190 190 190 195 195 195 214 214 214 242 242 242
37147 +-253 253 253 253 253 253 253 253 253 253 253 253
37148 +- 82 82 82 2 2 6 2 2 6 2 2 6
37149 +- 2 2 6 2 2 6 2 2 6 14 14 14
37150 +- 86 86 86 54 54 54 22 22 22 6 6 6
37151 +- 0 0 0 0 0 0 0 0 0 0 0 0
37152 +- 0 0 0 0 0 0 0 0 0 0 0 0
37153 +- 0 0 0 0 0 0 0 0 0 0 0 0
37154 +- 0 0 0 0 0 0 0 0 0 0 0 0
37155 +- 0 0 0 0 0 0 0 0 0 0 0 0
37156 +- 0 0 0 0 0 0 0 0 0 0 0 0
37157 +- 0 0 0 0 0 0 0 0 0 0 0 0
37158 +- 0 0 0 0 0 0 0 0 0 0 0 0
37159 +- 0 0 0 0 0 0 0 0 0 0 0 0
37160 +- 0 0 0 0 0 0 0 0 0 0 0 0
37161 +- 0 0 0 0 0 0 0 0 0 0 0 0
37162 +- 6 6 6 18 18 18 46 46 46 90 90 90
37163 +- 46 46 46 18 18 18 6 6 6 182 182 182
37164 +-253 253 253 246 246 246 206 206 206 190 190 190
37165 +-190 190 190 190 190 190 190 190 190 190 190 190
37166 +-206 206 206 231 231 231 250 250 250 253 253 253
37167 +-253 253 253 253 253 253 253 253 253 253 253 253
37168 +-202 202 202 14 14 14 2 2 6 2 2 6
37169 +- 2 2 6 2 2 6 2 2 6 2 2 6
37170 +- 42 42 42 86 86 86 42 42 42 18 18 18
37171 +- 6 6 6 0 0 0 0 0 0 0 0 0
37172 +- 0 0 0 0 0 0 0 0 0 0 0 0
37173 +- 0 0 0 0 0 0 0 0 0 0 0 0
37174 +- 0 0 0 0 0 0 0 0 0 0 0 0
37175 +- 0 0 0 0 0 0 0 0 0 0 0 0
37176 +- 0 0 0 0 0 0 0 0 0 0 0 0
37177 +- 0 0 0 0 0 0 0 0 0 0 0 0
37178 +- 0 0 0 0 0 0 0 0 0 0 0 0
37179 +- 0 0 0 0 0 0 0 0 0 0 0 0
37180 +- 0 0 0 0 0 0 0 0 0 0 0 0
37181 +- 0 0 0 0 0 0 0 0 0 6 6 6
37182 +- 14 14 14 38 38 38 74 74 74 66 66 66
37183 +- 2 2 6 6 6 6 90 90 90 250 250 250
37184 +-253 253 253 253 253 253 238 238 238 198 198 198
37185 +-190 190 190 190 190 190 195 195 195 221 221 221
37186 +-246 246 246 253 253 253 253 253 253 253 253 253
37187 +-253 253 253 253 253 253 253 253 253 253 253 253
37188 +-253 253 253 82 82 82 2 2 6 2 2 6
37189 +- 2 2 6 2 2 6 2 2 6 2 2 6
37190 +- 2 2 6 78 78 78 70 70 70 34 34 34
37191 +- 14 14 14 6 6 6 0 0 0 0 0 0
37192 +- 0 0 0 0 0 0 0 0 0 0 0 0
37193 +- 0 0 0 0 0 0 0 0 0 0 0 0
37194 +- 0 0 0 0 0 0 0 0 0 0 0 0
37195 +- 0 0 0 0 0 0 0 0 0 0 0 0
37196 +- 0 0 0 0 0 0 0 0 0 0 0 0
37197 +- 0 0 0 0 0 0 0 0 0 0 0 0
37198 +- 0 0 0 0 0 0 0 0 0 0 0 0
37199 +- 0 0 0 0 0 0 0 0 0 0 0 0
37200 +- 0 0 0 0 0 0 0 0 0 0 0 0
37201 +- 0 0 0 0 0 0 0 0 0 14 14 14
37202 +- 34 34 34 66 66 66 78 78 78 6 6 6
37203 +- 2 2 6 18 18 18 218 218 218 253 253 253
37204 +-253 253 253 253 253 253 253 253 253 246 246 246
37205 +-226 226 226 231 231 231 246 246 246 253 253 253
37206 +-253 253 253 253 253 253 253 253 253 253 253 253
37207 +-253 253 253 253 253 253 253 253 253 253 253 253
37208 +-253 253 253 178 178 178 2 2 6 2 2 6
37209 +- 2 2 6 2 2 6 2 2 6 2 2 6
37210 +- 2 2 6 18 18 18 90 90 90 62 62 62
37211 +- 30 30 30 10 10 10 0 0 0 0 0 0
37212 +- 0 0 0 0 0 0 0 0 0 0 0 0
37213 +- 0 0 0 0 0 0 0 0 0 0 0 0
37214 +- 0 0 0 0 0 0 0 0 0 0 0 0
37215 +- 0 0 0 0 0 0 0 0 0 0 0 0
37216 +- 0 0 0 0 0 0 0 0 0 0 0 0
37217 +- 0 0 0 0 0 0 0 0 0 0 0 0
37218 +- 0 0 0 0 0 0 0 0 0 0 0 0
37219 +- 0 0 0 0 0 0 0 0 0 0 0 0
37220 +- 0 0 0 0 0 0 0 0 0 0 0 0
37221 +- 0 0 0 0 0 0 10 10 10 26 26 26
37222 +- 58 58 58 90 90 90 18 18 18 2 2 6
37223 +- 2 2 6 110 110 110 253 253 253 253 253 253
37224 +-253 253 253 253 253 253 253 253 253 253 253 253
37225 +-250 250 250 253 253 253 253 253 253 253 253 253
37226 +-253 253 253 253 253 253 253 253 253 253 253 253
37227 +-253 253 253 253 253 253 253 253 253 253 253 253
37228 +-253 253 253 231 231 231 18 18 18 2 2 6
37229 +- 2 2 6 2 2 6 2 2 6 2 2 6
37230 +- 2 2 6 2 2 6 18 18 18 94 94 94
37231 +- 54 54 54 26 26 26 10 10 10 0 0 0
37232 +- 0 0 0 0 0 0 0 0 0 0 0 0
37233 +- 0 0 0 0 0 0 0 0 0 0 0 0
37234 +- 0 0 0 0 0 0 0 0 0 0 0 0
37235 +- 0 0 0 0 0 0 0 0 0 0 0 0
37236 +- 0 0 0 0 0 0 0 0 0 0 0 0
37237 +- 0 0 0 0 0 0 0 0 0 0 0 0
37238 +- 0 0 0 0 0 0 0 0 0 0 0 0
37239 +- 0 0 0 0 0 0 0 0 0 0 0 0
37240 +- 0 0 0 0 0 0 0 0 0 0 0 0
37241 +- 0 0 0 6 6 6 22 22 22 50 50 50
37242 +- 90 90 90 26 26 26 2 2 6 2 2 6
37243 +- 14 14 14 195 195 195 250 250 250 253 253 253
37244 +-253 253 253 253 253 253 253 253 253 253 253 253
37245 +-253 253 253 253 253 253 253 253 253 253 253 253
37246 +-253 253 253 253 253 253 253 253 253 253 253 253
37247 +-253 253 253 253 253 253 253 253 253 253 253 253
37248 +-250 250 250 242 242 242 54 54 54 2 2 6
37249 +- 2 2 6 2 2 6 2 2 6 2 2 6
37250 +- 2 2 6 2 2 6 2 2 6 38 38 38
37251 +- 86 86 86 50 50 50 22 22 22 6 6 6
37252 +- 0 0 0 0 0 0 0 0 0 0 0 0
37253 +- 0 0 0 0 0 0 0 0 0 0 0 0
37254 +- 0 0 0 0 0 0 0 0 0 0 0 0
37255 +- 0 0 0 0 0 0 0 0 0 0 0 0
37256 +- 0 0 0 0 0 0 0 0 0 0 0 0
37257 +- 0 0 0 0 0 0 0 0 0 0 0 0
37258 +- 0 0 0 0 0 0 0 0 0 0 0 0
37259 +- 0 0 0 0 0 0 0 0 0 0 0 0
37260 +- 0 0 0 0 0 0 0 0 0 0 0 0
37261 +- 6 6 6 14 14 14 38 38 38 82 82 82
37262 +- 34 34 34 2 2 6 2 2 6 2 2 6
37263 +- 42 42 42 195 195 195 246 246 246 253 253 253
37264 +-253 253 253 253 253 253 253 253 253 250 250 250
37265 +-242 242 242 242 242 242 250 250 250 253 253 253
37266 +-253 253 253 253 253 253 253 253 253 253 253 253
37267 +-253 253 253 250 250 250 246 246 246 238 238 238
37268 +-226 226 226 231 231 231 101 101 101 6 6 6
37269 +- 2 2 6 2 2 6 2 2 6 2 2 6
37270 +- 2 2 6 2 2 6 2 2 6 2 2 6
37271 +- 38 38 38 82 82 82 42 42 42 14 14 14
37272 +- 6 6 6 0 0 0 0 0 0 0 0 0
37273 +- 0 0 0 0 0 0 0 0 0 0 0 0
37274 +- 0 0 0 0 0 0 0 0 0 0 0 0
37275 +- 0 0 0 0 0 0 0 0 0 0 0 0
37276 +- 0 0 0 0 0 0 0 0 0 0 0 0
37277 +- 0 0 0 0 0 0 0 0 0 0 0 0
37278 +- 0 0 0 0 0 0 0 0 0 0 0 0
37279 +- 0 0 0 0 0 0 0 0 0 0 0 0
37280 +- 0 0 0 0 0 0 0 0 0 0 0 0
37281 +- 10 10 10 26 26 26 62 62 62 66 66 66
37282 +- 2 2 6 2 2 6 2 2 6 6 6 6
37283 +- 70 70 70 170 170 170 206 206 206 234 234 234
37284 +-246 246 246 250 250 250 250 250 250 238 238 238
37285 +-226 226 226 231 231 231 238 238 238 250 250 250
37286 +-250 250 250 250 250 250 246 246 246 231 231 231
37287 +-214 214 214 206 206 206 202 202 202 202 202 202
37288 +-198 198 198 202 202 202 182 182 182 18 18 18
37289 +- 2 2 6 2 2 6 2 2 6 2 2 6
37290 +- 2 2 6 2 2 6 2 2 6 2 2 6
37291 +- 2 2 6 62 62 62 66 66 66 30 30 30
37292 +- 10 10 10 0 0 0 0 0 0 0 0 0
37293 +- 0 0 0 0 0 0 0 0 0 0 0 0
37294 +- 0 0 0 0 0 0 0 0 0 0 0 0
37295 +- 0 0 0 0 0 0 0 0 0 0 0 0
37296 +- 0 0 0 0 0 0 0 0 0 0 0 0
37297 +- 0 0 0 0 0 0 0 0 0 0 0 0
37298 +- 0 0 0 0 0 0 0 0 0 0 0 0
37299 +- 0 0 0 0 0 0 0 0 0 0 0 0
37300 +- 0 0 0 0 0 0 0 0 0 0 0 0
37301 +- 14 14 14 42 42 42 82 82 82 18 18 18
37302 +- 2 2 6 2 2 6 2 2 6 10 10 10
37303 +- 94 94 94 182 182 182 218 218 218 242 242 242
37304 +-250 250 250 253 253 253 253 253 253 250 250 250
37305 +-234 234 234 253 253 253 253 253 253 253 253 253
37306 +-253 253 253 253 253 253 253 253 253 246 246 246
37307 +-238 238 238 226 226 226 210 210 210 202 202 202
37308 +-195 195 195 195 195 195 210 210 210 158 158 158
37309 +- 6 6 6 14 14 14 50 50 50 14 14 14
37310 +- 2 2 6 2 2 6 2 2 6 2 2 6
37311 +- 2 2 6 6 6 6 86 86 86 46 46 46
37312 +- 18 18 18 6 6 6 0 0 0 0 0 0
37313 +- 0 0 0 0 0 0 0 0 0 0 0 0
37314 +- 0 0 0 0 0 0 0 0 0 0 0 0
37315 +- 0 0 0 0 0 0 0 0 0 0 0 0
37316 +- 0 0 0 0 0 0 0 0 0 0 0 0
37317 +- 0 0 0 0 0 0 0 0 0 0 0 0
37318 +- 0 0 0 0 0 0 0 0 0 0 0 0
37319 +- 0 0 0 0 0 0 0 0 0 0 0 0
37320 +- 0 0 0 0 0 0 0 0 0 6 6 6
37321 +- 22 22 22 54 54 54 70 70 70 2 2 6
37322 +- 2 2 6 10 10 10 2 2 6 22 22 22
37323 +-166 166 166 231 231 231 250 250 250 253 253 253
37324 +-253 253 253 253 253 253 253 253 253 250 250 250
37325 +-242 242 242 253 253 253 253 253 253 253 253 253
37326 +-253 253 253 253 253 253 253 253 253 253 253 253
37327 +-253 253 253 253 253 253 253 253 253 246 246 246
37328 +-231 231 231 206 206 206 198 198 198 226 226 226
37329 +- 94 94 94 2 2 6 6 6 6 38 38 38
37330 +- 30 30 30 2 2 6 2 2 6 2 2 6
37331 +- 2 2 6 2 2 6 62 62 62 66 66 66
37332 +- 26 26 26 10 10 10 0 0 0 0 0 0
37333 +- 0 0 0 0 0 0 0 0 0 0 0 0
37334 +- 0 0 0 0 0 0 0 0 0 0 0 0
37335 +- 0 0 0 0 0 0 0 0 0 0 0 0
37336 +- 0 0 0 0 0 0 0 0 0 0 0 0
37337 +- 0 0 0 0 0 0 0 0 0 0 0 0
37338 +- 0 0 0 0 0 0 0 0 0 0 0 0
37339 +- 0 0 0 0 0 0 0 0 0 0 0 0
37340 +- 0 0 0 0 0 0 0 0 0 10 10 10
37341 +- 30 30 30 74 74 74 50 50 50 2 2 6
37342 +- 26 26 26 26 26 26 2 2 6 106 106 106
37343 +-238 238 238 253 253 253 253 253 253 253 253 253
37344 +-253 253 253 253 253 253 253 253 253 253 253 253
37345 +-253 253 253 253 253 253 253 253 253 253 253 253
37346 +-253 253 253 253 253 253 253 253 253 253 253 253
37347 +-253 253 253 253 253 253 253 253 253 253 253 253
37348 +-253 253 253 246 246 246 218 218 218 202 202 202
37349 +-210 210 210 14 14 14 2 2 6 2 2 6
37350 +- 30 30 30 22 22 22 2 2 6 2 2 6
37351 +- 2 2 6 2 2 6 18 18 18 86 86 86
37352 +- 42 42 42 14 14 14 0 0 0 0 0 0
37353 +- 0 0 0 0 0 0 0 0 0 0 0 0
37354 +- 0 0 0 0 0 0 0 0 0 0 0 0
37355 +- 0 0 0 0 0 0 0 0 0 0 0 0
37356 +- 0 0 0 0 0 0 0 0 0 0 0 0
37357 +- 0 0 0 0 0 0 0 0 0 0 0 0
37358 +- 0 0 0 0 0 0 0 0 0 0 0 0
37359 +- 0 0 0 0 0 0 0 0 0 0 0 0
37360 +- 0 0 0 0 0 0 0 0 0 14 14 14
37361 +- 42 42 42 90 90 90 22 22 22 2 2 6
37362 +- 42 42 42 2 2 6 18 18 18 218 218 218
37363 +-253 253 253 253 253 253 253 253 253 253 253 253
37364 +-253 253 253 253 253 253 253 253 253 253 253 253
37365 +-253 253 253 253 253 253 253 253 253 253 253 253
37366 +-253 253 253 253 253 253 253 253 253 253 253 253
37367 +-253 253 253 253 253 253 253 253 253 253 253 253
37368 +-253 253 253 253 253 253 250 250 250 221 221 221
37369 +-218 218 218 101 101 101 2 2 6 14 14 14
37370 +- 18 18 18 38 38 38 10 10 10 2 2 6
37371 +- 2 2 6 2 2 6 2 2 6 78 78 78
37372 +- 58 58 58 22 22 22 6 6 6 0 0 0
37373 +- 0 0 0 0 0 0 0 0 0 0 0 0
37374 +- 0 0 0 0 0 0 0 0 0 0 0 0
37375 +- 0 0 0 0 0 0 0 0 0 0 0 0
37376 +- 0 0 0 0 0 0 0 0 0 0 0 0
37377 +- 0 0 0 0 0 0 0 0 0 0 0 0
37378 +- 0 0 0 0 0 0 0 0 0 0 0 0
37379 +- 0 0 0 0 0 0 0 0 0 0 0 0
37380 +- 0 0 0 0 0 0 6 6 6 18 18 18
37381 +- 54 54 54 82 82 82 2 2 6 26 26 26
37382 +- 22 22 22 2 2 6 123 123 123 253 253 253
37383 +-253 253 253 253 253 253 253 253 253 253 253 253
37384 +-253 253 253 253 253 253 253 253 253 253 253 253
37385 +-253 253 253 253 253 253 253 253 253 253 253 253
37386 +-253 253 253 253 253 253 253 253 253 253 253 253
37387 +-253 253 253 253 253 253 253 253 253 253 253 253
37388 +-253 253 253 253 253 253 253 253 253 250 250 250
37389 +-238 238 238 198 198 198 6 6 6 38 38 38
37390 +- 58 58 58 26 26 26 38 38 38 2 2 6
37391 +- 2 2 6 2 2 6 2 2 6 46 46 46
37392 +- 78 78 78 30 30 30 10 10 10 0 0 0
37393 +- 0 0 0 0 0 0 0 0 0 0 0 0
37394 +- 0 0 0 0 0 0 0 0 0 0 0 0
37395 +- 0 0 0 0 0 0 0 0 0 0 0 0
37396 +- 0 0 0 0 0 0 0 0 0 0 0 0
37397 +- 0 0 0 0 0 0 0 0 0 0 0 0
37398 +- 0 0 0 0 0 0 0 0 0 0 0 0
37399 +- 0 0 0 0 0 0 0 0 0 0 0 0
37400 +- 0 0 0 0 0 0 10 10 10 30 30 30
37401 +- 74 74 74 58 58 58 2 2 6 42 42 42
37402 +- 2 2 6 22 22 22 231 231 231 253 253 253
37403 +-253 253 253 253 253 253 253 253 253 253 253 253
37404 +-253 253 253 253 253 253 253 253 253 250 250 250
37405 +-253 253 253 253 253 253 253 253 253 253 253 253
37406 +-253 253 253 253 253 253 253 253 253 253 253 253
37407 +-253 253 253 253 253 253 253 253 253 253 253 253
37408 +-253 253 253 253 253 253 253 253 253 253 253 253
37409 +-253 253 253 246 246 246 46 46 46 38 38 38
37410 +- 42 42 42 14 14 14 38 38 38 14 14 14
37411 +- 2 2 6 2 2 6 2 2 6 6 6 6
37412 +- 86 86 86 46 46 46 14 14 14 0 0 0
37413 +- 0 0 0 0 0 0 0 0 0 0 0 0
37414 +- 0 0 0 0 0 0 0 0 0 0 0 0
37415 +- 0 0 0 0 0 0 0 0 0 0 0 0
37416 +- 0 0 0 0 0 0 0 0 0 0 0 0
37417 +- 0 0 0 0 0 0 0 0 0 0 0 0
37418 +- 0 0 0 0 0 0 0 0 0 0 0 0
37419 +- 0 0 0 0 0 0 0 0 0 0 0 0
37420 +- 0 0 0 6 6 6 14 14 14 42 42 42
37421 +- 90 90 90 18 18 18 18 18 18 26 26 26
37422 +- 2 2 6 116 116 116 253 253 253 253 253 253
37423 +-253 253 253 253 253 253 253 253 253 253 253 253
37424 +-253 253 253 253 253 253 250 250 250 238 238 238
37425 +-253 253 253 253 253 253 253 253 253 253 253 253
37426 +-253 253 253 253 253 253 253 253 253 253 253 253
37427 +-253 253 253 253 253 253 253 253 253 253 253 253
37428 +-253 253 253 253 253 253 253 253 253 253 253 253
37429 +-253 253 253 253 253 253 94 94 94 6 6 6
37430 +- 2 2 6 2 2 6 10 10 10 34 34 34
37431 +- 2 2 6 2 2 6 2 2 6 2 2 6
37432 +- 74 74 74 58 58 58 22 22 22 6 6 6
37433 +- 0 0 0 0 0 0 0 0 0 0 0 0
37434 +- 0 0 0 0 0 0 0 0 0 0 0 0
37435 +- 0 0 0 0 0 0 0 0 0 0 0 0
37436 +- 0 0 0 0 0 0 0 0 0 0 0 0
37437 +- 0 0 0 0 0 0 0 0 0 0 0 0
37438 +- 0 0 0 0 0 0 0 0 0 0 0 0
37439 +- 0 0 0 0 0 0 0 0 0 0 0 0
37440 +- 0 0 0 10 10 10 26 26 26 66 66 66
37441 +- 82 82 82 2 2 6 38 38 38 6 6 6
37442 +- 14 14 14 210 210 210 253 253 253 253 253 253
37443 +-253 253 253 253 253 253 253 253 253 253 253 253
37444 +-253 253 253 253 253 253 246 246 246 242 242 242
37445 +-253 253 253 253 253 253 253 253 253 253 253 253
37446 +-253 253 253 253 253 253 253 253 253 253 253 253
37447 +-253 253 253 253 253 253 253 253 253 253 253 253
37448 +-253 253 253 253 253 253 253 253 253 253 253 253
37449 +-253 253 253 253 253 253 144 144 144 2 2 6
37450 +- 2 2 6 2 2 6 2 2 6 46 46 46
37451 +- 2 2 6 2 2 6 2 2 6 2 2 6
37452 +- 42 42 42 74 74 74 30 30 30 10 10 10
37453 +- 0 0 0 0 0 0 0 0 0 0 0 0
37454 +- 0 0 0 0 0 0 0 0 0 0 0 0
37455 +- 0 0 0 0 0 0 0 0 0 0 0 0
37456 +- 0 0 0 0 0 0 0 0 0 0 0 0
37457 +- 0 0 0 0 0 0 0 0 0 0 0 0
37458 +- 0 0 0 0 0 0 0 0 0 0 0 0
37459 +- 0 0 0 0 0 0 0 0 0 0 0 0
37460 +- 6 6 6 14 14 14 42 42 42 90 90 90
37461 +- 26 26 26 6 6 6 42 42 42 2 2 6
37462 +- 74 74 74 250 250 250 253 253 253 253 253 253
37463 +-253 253 253 253 253 253 253 253 253 253 253 253
37464 +-253 253 253 253 253 253 242 242 242 242 242 242
37465 +-253 253 253 253 253 253 253 253 253 253 253 253
37466 +-253 253 253 253 253 253 253 253 253 253 253 253
37467 +-253 253 253 253 253 253 253 253 253 253 253 253
37468 +-253 253 253 253 253 253 253 253 253 253 253 253
37469 +-253 253 253 253 253 253 182 182 182 2 2 6
37470 +- 2 2 6 2 2 6 2 2 6 46 46 46
37471 +- 2 2 6 2 2 6 2 2 6 2 2 6
37472 +- 10 10 10 86 86 86 38 38 38 10 10 10
37473 +- 0 0 0 0 0 0 0 0 0 0 0 0
37474 +- 0 0 0 0 0 0 0 0 0 0 0 0
37475 +- 0 0 0 0 0 0 0 0 0 0 0 0
37476 +- 0 0 0 0 0 0 0 0 0 0 0 0
37477 +- 0 0 0 0 0 0 0 0 0 0 0 0
37478 +- 0 0 0 0 0 0 0 0 0 0 0 0
37479 +- 0 0 0 0 0 0 0 0 0 0 0 0
37480 +- 10 10 10 26 26 26 66 66 66 82 82 82
37481 +- 2 2 6 22 22 22 18 18 18 2 2 6
37482 +-149 149 149 253 253 253 253 253 253 253 253 253
37483 +-253 253 253 253 253 253 253 253 253 253 253 253
37484 +-253 253 253 253 253 253 234 234 234 242 242 242
37485 +-253 253 253 253 253 253 253 253 253 253 253 253
37486 +-253 253 253 253 253 253 253 253 253 253 253 253
37487 +-253 253 253 253 253 253 253 253 253 253 253 253
37488 +-253 253 253 253 253 253 253 253 253 253 253 253
37489 +-253 253 253 253 253 253 206 206 206 2 2 6
37490 +- 2 2 6 2 2 6 2 2 6 38 38 38
37491 +- 2 2 6 2 2 6 2 2 6 2 2 6
37492 +- 6 6 6 86 86 86 46 46 46 14 14 14
37493 +- 0 0 0 0 0 0 0 0 0 0 0 0
37494 +- 0 0 0 0 0 0 0 0 0 0 0 0
37495 +- 0 0 0 0 0 0 0 0 0 0 0 0
37496 +- 0 0 0 0 0 0 0 0 0 0 0 0
37497 +- 0 0 0 0 0 0 0 0 0 0 0 0
37498 +- 0 0 0 0 0 0 0 0 0 0 0 0
37499 +- 0 0 0 0 0 0 0 0 0 6 6 6
37500 +- 18 18 18 46 46 46 86 86 86 18 18 18
37501 +- 2 2 6 34 34 34 10 10 10 6 6 6
37502 +-210 210 210 253 253 253 253 253 253 253 253 253
37503 +-253 253 253 253 253 253 253 253 253 253 253 253
37504 +-253 253 253 253 253 253 234 234 234 242 242 242
37505 +-253 253 253 253 253 253 253 253 253 253 253 253
37506 +-253 253 253 253 253 253 253 253 253 253 253 253
37507 +-253 253 253 253 253 253 253 253 253 253 253 253
37508 +-253 253 253 253 253 253 253 253 253 253 253 253
37509 +-253 253 253 253 253 253 221 221 221 6 6 6
37510 +- 2 2 6 2 2 6 6 6 6 30 30 30
37511 +- 2 2 6 2 2 6 2 2 6 2 2 6
37512 +- 2 2 6 82 82 82 54 54 54 18 18 18
37513 +- 6 6 6 0 0 0 0 0 0 0 0 0
37514 +- 0 0 0 0 0 0 0 0 0 0 0 0
37515 +- 0 0 0 0 0 0 0 0 0 0 0 0
37516 +- 0 0 0 0 0 0 0 0 0 0 0 0
37517 +- 0 0 0 0 0 0 0 0 0 0 0 0
37518 +- 0 0 0 0 0 0 0 0 0 0 0 0
37519 +- 0 0 0 0 0 0 0 0 0 10 10 10
37520 +- 26 26 26 66 66 66 62 62 62 2 2 6
37521 +- 2 2 6 38 38 38 10 10 10 26 26 26
37522 +-238 238 238 253 253 253 253 253 253 253 253 253
37523 +-253 253 253 253 253 253 253 253 253 253 253 253
37524 +-253 253 253 253 253 253 231 231 231 238 238 238
37525 +-253 253 253 253 253 253 253 253 253 253 253 253
37526 +-253 253 253 253 253 253 253 253 253 253 253 253
37527 +-253 253 253 253 253 253 253 253 253 253 253 253
37528 +-253 253 253 253 253 253 253 253 253 253 253 253
37529 +-253 253 253 253 253 253 231 231 231 6 6 6
37530 +- 2 2 6 2 2 6 10 10 10 30 30 30
37531 +- 2 2 6 2 2 6 2 2 6 2 2 6
37532 +- 2 2 6 66 66 66 58 58 58 22 22 22
37533 +- 6 6 6 0 0 0 0 0 0 0 0 0
37534 +- 0 0 0 0 0 0 0 0 0 0 0 0
37535 +- 0 0 0 0 0 0 0 0 0 0 0 0
37536 +- 0 0 0 0 0 0 0 0 0 0 0 0
37537 +- 0 0 0 0 0 0 0 0 0 0 0 0
37538 +- 0 0 0 0 0 0 0 0 0 0 0 0
37539 +- 0 0 0 0 0 0 0 0 0 10 10 10
37540 +- 38 38 38 78 78 78 6 6 6 2 2 6
37541 +- 2 2 6 46 46 46 14 14 14 42 42 42
37542 +-246 246 246 253 253 253 253 253 253 253 253 253
37543 +-253 253 253 253 253 253 253 253 253 253 253 253
37544 +-253 253 253 253 253 253 231 231 231 242 242 242
37545 +-253 253 253 253 253 253 253 253 253 253 253 253
37546 +-253 253 253 253 253 253 253 253 253 253 253 253
37547 +-253 253 253 253 253 253 253 253 253 253 253 253
37548 +-253 253 253 253 253 253 253 253 253 253 253 253
37549 +-253 253 253 253 253 253 234 234 234 10 10 10
37550 +- 2 2 6 2 2 6 22 22 22 14 14 14
37551 +- 2 2 6 2 2 6 2 2 6 2 2 6
37552 +- 2 2 6 66 66 66 62 62 62 22 22 22
37553 +- 6 6 6 0 0 0 0 0 0 0 0 0
37554 +- 0 0 0 0 0 0 0 0 0 0 0 0
37555 +- 0 0 0 0 0 0 0 0 0 0 0 0
37556 +- 0 0 0 0 0 0 0 0 0 0 0 0
37557 +- 0 0 0 0 0 0 0 0 0 0 0 0
37558 +- 0 0 0 0 0 0 0 0 0 0 0 0
37559 +- 0 0 0 0 0 0 6 6 6 18 18 18
37560 +- 50 50 50 74 74 74 2 2 6 2 2 6
37561 +- 14 14 14 70 70 70 34 34 34 62 62 62
37562 +-250 250 250 253 253 253 253 253 253 253 253 253
37563 +-253 253 253 253 253 253 253 253 253 253 253 253
37564 +-253 253 253 253 253 253 231 231 231 246 246 246
37565 +-253 253 253 253 253 253 253 253 253 253 253 253
37566 +-253 253 253 253 253 253 253 253 253 253 253 253
37567 +-253 253 253 253 253 253 253 253 253 253 253 253
37568 +-253 253 253 253 253 253 253 253 253 253 253 253
37569 +-253 253 253 253 253 253 234 234 234 14 14 14
37570 +- 2 2 6 2 2 6 30 30 30 2 2 6
37571 +- 2 2 6 2 2 6 2 2 6 2 2 6
37572 +- 2 2 6 66 66 66 62 62 62 22 22 22
37573 +- 6 6 6 0 0 0 0 0 0 0 0 0
37574 +- 0 0 0 0 0 0 0 0 0 0 0 0
37575 +- 0 0 0 0 0 0 0 0 0 0 0 0
37576 +- 0 0 0 0 0 0 0 0 0 0 0 0
37577 +- 0 0 0 0 0 0 0 0 0 0 0 0
37578 +- 0 0 0 0 0 0 0 0 0 0 0 0
37579 +- 0 0 0 0 0 0 6 6 6 18 18 18
37580 +- 54 54 54 62 62 62 2 2 6 2 2 6
37581 +- 2 2 6 30 30 30 46 46 46 70 70 70
37582 +-250 250 250 253 253 253 253 253 253 253 253 253
37583 +-253 253 253 253 253 253 253 253 253 253 253 253
37584 +-253 253 253 253 253 253 231 231 231 246 246 246
37585 +-253 253 253 253 253 253 253 253 253 253 253 253
37586 +-253 253 253 253 253 253 253 253 253 253 253 253
37587 +-253 253 253 253 253 253 253 253 253 253 253 253
37588 +-253 253 253 253 253 253 253 253 253 253 253 253
37589 +-253 253 253 253 253 253 226 226 226 10 10 10
37590 +- 2 2 6 6 6 6 30 30 30 2 2 6
37591 +- 2 2 6 2 2 6 2 2 6 2 2 6
37592 +- 2 2 6 66 66 66 58 58 58 22 22 22
37593 +- 6 6 6 0 0 0 0 0 0 0 0 0
37594 +- 0 0 0 0 0 0 0 0 0 0 0 0
37595 +- 0 0 0 0 0 0 0 0 0 0 0 0
37596 +- 0 0 0 0 0 0 0 0 0 0 0 0
37597 +- 0 0 0 0 0 0 0 0 0 0 0 0
37598 +- 0 0 0 0 0 0 0 0 0 0 0 0
37599 +- 0 0 0 0 0 0 6 6 6 22 22 22
37600 +- 58 58 58 62 62 62 2 2 6 2 2 6
37601 +- 2 2 6 2 2 6 30 30 30 78 78 78
37602 +-250 250 250 253 253 253 253 253 253 253 253 253
37603 +-253 253 253 253 253 253 253 253 253 253 253 253
37604 +-253 253 253 253 253 253 231 231 231 246 246 246
37605 +-253 253 253 253 253 253 253 253 253 253 253 253
37606 +-253 253 253 253 253 253 253 253 253 253 253 253
37607 +-253 253 253 253 253 253 253 253 253 253 253 253
37608 +-253 253 253 253 253 253 253 253 253 253 253 253
37609 +-253 253 253 253 253 253 206 206 206 2 2 6
37610 +- 22 22 22 34 34 34 18 14 6 22 22 22
37611 +- 26 26 26 18 18 18 6 6 6 2 2 6
37612 +- 2 2 6 82 82 82 54 54 54 18 18 18
37613 +- 6 6 6 0 0 0 0 0 0 0 0 0
37614 +- 0 0 0 0 0 0 0 0 0 0 0 0
37615 +- 0 0 0 0 0 0 0 0 0 0 0 0
37616 +- 0 0 0 0 0 0 0 0 0 0 0 0
37617 +- 0 0 0 0 0 0 0 0 0 0 0 0
37618 +- 0 0 0 0 0 0 0 0 0 0 0 0
37619 +- 0 0 0 0 0 0 6 6 6 26 26 26
37620 +- 62 62 62 106 106 106 74 54 14 185 133 11
37621 +-210 162 10 121 92 8 6 6 6 62 62 62
37622 +-238 238 238 253 253 253 253 253 253 253 253 253
37623 +-253 253 253 253 253 253 253 253 253 253 253 253
37624 +-253 253 253 253 253 253 231 231 231 246 246 246
37625 +-253 253 253 253 253 253 253 253 253 253 253 253
37626 +-253 253 253 253 253 253 253 253 253 253 253 253
37627 +-253 253 253 253 253 253 253 253 253 253 253 253
37628 +-253 253 253 253 253 253 253 253 253 253 253 253
37629 +-253 253 253 253 253 253 158 158 158 18 18 18
37630 +- 14 14 14 2 2 6 2 2 6 2 2 6
37631 +- 6 6 6 18 18 18 66 66 66 38 38 38
37632 +- 6 6 6 94 94 94 50 50 50 18 18 18
37633 +- 6 6 6 0 0 0 0 0 0 0 0 0
37634 +- 0 0 0 0 0 0 0 0 0 0 0 0
37635 +- 0 0 0 0 0 0 0 0 0 0 0 0
37636 +- 0 0 0 0 0 0 0 0 0 0 0 0
37637 +- 0 0 0 0 0 0 0 0 0 0 0 0
37638 +- 0 0 0 0 0 0 0 0 0 6 6 6
37639 +- 10 10 10 10 10 10 18 18 18 38 38 38
37640 +- 78 78 78 142 134 106 216 158 10 242 186 14
37641 +-246 190 14 246 190 14 156 118 10 10 10 10
37642 +- 90 90 90 238 238 238 253 253 253 253 253 253
37643 +-253 253 253 253 253 253 253 253 253 253 253 253
37644 +-253 253 253 253 253 253 231 231 231 250 250 250
37645 +-253 253 253 253 253 253 253 253 253 253 253 253
37646 +-253 253 253 253 253 253 253 253 253 253 253 253
37647 +-253 253 253 253 253 253 253 253 253 253 253 253
37648 +-253 253 253 253 253 253 253 253 253 246 230 190
37649 +-238 204 91 238 204 91 181 142 44 37 26 9
37650 +- 2 2 6 2 2 6 2 2 6 2 2 6
37651 +- 2 2 6 2 2 6 38 38 38 46 46 46
37652 +- 26 26 26 106 106 106 54 54 54 18 18 18
37653 +- 6 6 6 0 0 0 0 0 0 0 0 0
37654 +- 0 0 0 0 0 0 0 0 0 0 0 0
37655 +- 0 0 0 0 0 0 0 0 0 0 0 0
37656 +- 0 0 0 0 0 0 0 0 0 0 0 0
37657 +- 0 0 0 0 0 0 0 0 0 0 0 0
37658 +- 0 0 0 6 6 6 14 14 14 22 22 22
37659 +- 30 30 30 38 38 38 50 50 50 70 70 70
37660 +-106 106 106 190 142 34 226 170 11 242 186 14
37661 +-246 190 14 246 190 14 246 190 14 154 114 10
37662 +- 6 6 6 74 74 74 226 226 226 253 253 253
37663 +-253 253 253 253 253 253 253 253 253 253 253 253
37664 +-253 253 253 253 253 253 231 231 231 250 250 250
37665 +-253 253 253 253 253 253 253 253 253 253 253 253
37666 +-253 253 253 253 253 253 253 253 253 253 253 253
37667 +-253 253 253 253 253 253 253 253 253 253 253 253
37668 +-253 253 253 253 253 253 253 253 253 228 184 62
37669 +-241 196 14 241 208 19 232 195 16 38 30 10
37670 +- 2 2 6 2 2 6 2 2 6 2 2 6
37671 +- 2 2 6 6 6 6 30 30 30 26 26 26
37672 +-203 166 17 154 142 90 66 66 66 26 26 26
37673 +- 6 6 6 0 0 0 0 0 0 0 0 0
37674 +- 0 0 0 0 0 0 0 0 0 0 0 0
37675 +- 0 0 0 0 0 0 0 0 0 0 0 0
37676 +- 0 0 0 0 0 0 0 0 0 0 0 0
37677 +- 0 0 0 0 0 0 0 0 0 0 0 0
37678 +- 6 6 6 18 18 18 38 38 38 58 58 58
37679 +- 78 78 78 86 86 86 101 101 101 123 123 123
37680 +-175 146 61 210 150 10 234 174 13 246 186 14
37681 +-246 190 14 246 190 14 246 190 14 238 190 10
37682 +-102 78 10 2 2 6 46 46 46 198 198 198
37683 +-253 253 253 253 253 253 253 253 253 253 253 253
37684 +-253 253 253 253 253 253 234 234 234 242 242 242
37685 +-253 253 253 253 253 253 253 253 253 253 253 253
37686 +-253 253 253 253 253 253 253 253 253 253 253 253
37687 +-253 253 253 253 253 253 253 253 253 253 253 253
37688 +-253 253 253 253 253 253 253 253 253 224 178 62
37689 +-242 186 14 241 196 14 210 166 10 22 18 6
37690 +- 2 2 6 2 2 6 2 2 6 2 2 6
37691 +- 2 2 6 2 2 6 6 6 6 121 92 8
37692 +-238 202 15 232 195 16 82 82 82 34 34 34
37693 +- 10 10 10 0 0 0 0 0 0 0 0 0
37694 +- 0 0 0 0 0 0 0 0 0 0 0 0
37695 +- 0 0 0 0 0 0 0 0 0 0 0 0
37696 +- 0 0 0 0 0 0 0 0 0 0 0 0
37697 +- 0 0 0 0 0 0 0 0 0 0 0 0
37698 +- 14 14 14 38 38 38 70 70 70 154 122 46
37699 +-190 142 34 200 144 11 197 138 11 197 138 11
37700 +-213 154 11 226 170 11 242 186 14 246 190 14
37701 +-246 190 14 246 190 14 246 190 14 246 190 14
37702 +-225 175 15 46 32 6 2 2 6 22 22 22
37703 +-158 158 158 250 250 250 253 253 253 253 253 253
37704 +-253 253 253 253 253 253 253 253 253 253 253 253
37705 +-253 253 253 253 253 253 253 253 253 253 253 253
37706 +-253 253 253 253 253 253 253 253 253 253 253 253
37707 +-253 253 253 253 253 253 253 253 253 253 253 253
37708 +-253 253 253 250 250 250 242 242 242 224 178 62
37709 +-239 182 13 236 186 11 213 154 11 46 32 6
37710 +- 2 2 6 2 2 6 2 2 6 2 2 6
37711 +- 2 2 6 2 2 6 61 42 6 225 175 15
37712 +-238 190 10 236 186 11 112 100 78 42 42 42
37713 +- 14 14 14 0 0 0 0 0 0 0 0 0
37714 +- 0 0 0 0 0 0 0 0 0 0 0 0
37715 +- 0 0 0 0 0 0 0 0 0 0 0 0
37716 +- 0 0 0 0 0 0 0 0 0 0 0 0
37717 +- 0 0 0 0 0 0 0 0 0 6 6 6
37718 +- 22 22 22 54 54 54 154 122 46 213 154 11
37719 +-226 170 11 230 174 11 226 170 11 226 170 11
37720 +-236 178 12 242 186 14 246 190 14 246 190 14
37721 +-246 190 14 246 190 14 246 190 14 246 190 14
37722 +-241 196 14 184 144 12 10 10 10 2 2 6
37723 +- 6 6 6 116 116 116 242 242 242 253 253 253
37724 +-253 253 253 253 253 253 253 253 253 253 253 253
37725 +-253 253 253 253 253 253 253 253 253 253 253 253
37726 +-253 253 253 253 253 253 253 253 253 253 253 253
37727 +-253 253 253 253 253 253 253 253 253 253 253 253
37728 +-253 253 253 231 231 231 198 198 198 214 170 54
37729 +-236 178 12 236 178 12 210 150 10 137 92 6
37730 +- 18 14 6 2 2 6 2 2 6 2 2 6
37731 +- 6 6 6 70 47 6 200 144 11 236 178 12
37732 +-239 182 13 239 182 13 124 112 88 58 58 58
37733 +- 22 22 22 6 6 6 0 0 0 0 0 0
37734 +- 0 0 0 0 0 0 0 0 0 0 0 0
37735 +- 0 0 0 0 0 0 0 0 0 0 0 0
37736 +- 0 0 0 0 0 0 0 0 0 0 0 0
37737 +- 0 0 0 0 0 0 0 0 0 10 10 10
37738 +- 30 30 30 70 70 70 180 133 36 226 170 11
37739 +-239 182 13 242 186 14 242 186 14 246 186 14
37740 +-246 190 14 246 190 14 246 190 14 246 190 14
37741 +-246 190 14 246 190 14 246 190 14 246 190 14
37742 +-246 190 14 232 195 16 98 70 6 2 2 6
37743 +- 2 2 6 2 2 6 66 66 66 221 221 221
37744 +-253 253 253 253 253 253 253 253 253 253 253 253
37745 +-253 253 253 253 253 253 253 253 253 253 253 253
37746 +-253 253 253 253 253 253 253 253 253 253 253 253
37747 +-253 253 253 253 253 253 253 253 253 253 253 253
37748 +-253 253 253 206 206 206 198 198 198 214 166 58
37749 +-230 174 11 230 174 11 216 158 10 192 133 9
37750 +-163 110 8 116 81 8 102 78 10 116 81 8
37751 +-167 114 7 197 138 11 226 170 11 239 182 13
37752 +-242 186 14 242 186 14 162 146 94 78 78 78
37753 +- 34 34 34 14 14 14 6 6 6 0 0 0
37754 +- 0 0 0 0 0 0 0 0 0 0 0 0
37755 +- 0 0 0 0 0 0 0 0 0 0 0 0
37756 +- 0 0 0 0 0 0 0 0 0 0 0 0
37757 +- 0 0 0 0 0 0 0 0 0 6 6 6
37758 +- 30 30 30 78 78 78 190 142 34 226 170 11
37759 +-239 182 13 246 190 14 246 190 14 246 190 14
37760 +-246 190 14 246 190 14 246 190 14 246 190 14
37761 +-246 190 14 246 190 14 246 190 14 246 190 14
37762 +-246 190 14 241 196 14 203 166 17 22 18 6
37763 +- 2 2 6 2 2 6 2 2 6 38 38 38
37764 +-218 218 218 253 253 253 253 253 253 253 253 253
37765 +-253 253 253 253 253 253 253 253 253 253 253 253
37766 +-253 253 253 253 253 253 253 253 253 253 253 253
37767 +-253 253 253 253 253 253 253 253 253 253 253 253
37768 +-250 250 250 206 206 206 198 198 198 202 162 69
37769 +-226 170 11 236 178 12 224 166 10 210 150 10
37770 +-200 144 11 197 138 11 192 133 9 197 138 11
37771 +-210 150 10 226 170 11 242 186 14 246 190 14
37772 +-246 190 14 246 186 14 225 175 15 124 112 88
37773 +- 62 62 62 30 30 30 14 14 14 6 6 6
37774 +- 0 0 0 0 0 0 0 0 0 0 0 0
37775 +- 0 0 0 0 0 0 0 0 0 0 0 0
37776 +- 0 0 0 0 0 0 0 0 0 0 0 0
37777 +- 0 0 0 0 0 0 0 0 0 10 10 10
37778 +- 30 30 30 78 78 78 174 135 50 224 166 10
37779 +-239 182 13 246 190 14 246 190 14 246 190 14
37780 +-246 190 14 246 190 14 246 190 14 246 190 14
37781 +-246 190 14 246 190 14 246 190 14 246 190 14
37782 +-246 190 14 246 190 14 241 196 14 139 102 15
37783 +- 2 2 6 2 2 6 2 2 6 2 2 6
37784 +- 78 78 78 250 250 250 253 253 253 253 253 253
37785 +-253 253 253 253 253 253 253 253 253 253 253 253
37786 +-253 253 253 253 253 253 253 253 253 253 253 253
37787 +-253 253 253 253 253 253 253 253 253 253 253 253
37788 +-250 250 250 214 214 214 198 198 198 190 150 46
37789 +-219 162 10 236 178 12 234 174 13 224 166 10
37790 +-216 158 10 213 154 11 213 154 11 216 158 10
37791 +-226 170 11 239 182 13 246 190 14 246 190 14
37792 +-246 190 14 246 190 14 242 186 14 206 162 42
37793 +-101 101 101 58 58 58 30 30 30 14 14 14
37794 +- 6 6 6 0 0 0 0 0 0 0 0 0
37795 +- 0 0 0 0 0 0 0 0 0 0 0 0
37796 +- 0 0 0 0 0 0 0 0 0 0 0 0
37797 +- 0 0 0 0 0 0 0 0 0 10 10 10
37798 +- 30 30 30 74 74 74 174 135 50 216 158 10
37799 +-236 178 12 246 190 14 246 190 14 246 190 14
37800 +-246 190 14 246 190 14 246 190 14 246 190 14
37801 +-246 190 14 246 190 14 246 190 14 246 190 14
37802 +-246 190 14 246 190 14 241 196 14 226 184 13
37803 +- 61 42 6 2 2 6 2 2 6 2 2 6
37804 +- 22 22 22 238 238 238 253 253 253 253 253 253
37805 +-253 253 253 253 253 253 253 253 253 253 253 253
37806 +-253 253 253 253 253 253 253 253 253 253 253 253
37807 +-253 253 253 253 253 253 253 253 253 253 253 253
37808 +-253 253 253 226 226 226 187 187 187 180 133 36
37809 +-216 158 10 236 178 12 239 182 13 236 178 12
37810 +-230 174 11 226 170 11 226 170 11 230 174 11
37811 +-236 178 12 242 186 14 246 190 14 246 190 14
37812 +-246 190 14 246 190 14 246 186 14 239 182 13
37813 +-206 162 42 106 106 106 66 66 66 34 34 34
37814 +- 14 14 14 6 6 6 0 0 0 0 0 0
37815 +- 0 0 0 0 0 0 0 0 0 0 0 0
37816 +- 0 0 0 0 0 0 0 0 0 0 0 0
37817 +- 0 0 0 0 0 0 0 0 0 6 6 6
37818 +- 26 26 26 70 70 70 163 133 67 213 154 11
37819 +-236 178 12 246 190 14 246 190 14 246 190 14
37820 +-246 190 14 246 190 14 246 190 14 246 190 14
37821 +-246 190 14 246 190 14 246 190 14 246 190 14
37822 +-246 190 14 246 190 14 246 190 14 241 196 14
37823 +-190 146 13 18 14 6 2 2 6 2 2 6
37824 +- 46 46 46 246 246 246 253 253 253 253 253 253
37825 +-253 253 253 253 253 253 253 253 253 253 253 253
37826 +-253 253 253 253 253 253 253 253 253 253 253 253
37827 +-253 253 253 253 253 253 253 253 253 253 253 253
37828 +-253 253 253 221 221 221 86 86 86 156 107 11
37829 +-216 158 10 236 178 12 242 186 14 246 186 14
37830 +-242 186 14 239 182 13 239 182 13 242 186 14
37831 +-242 186 14 246 186 14 246 190 14 246 190 14
37832 +-246 190 14 246 190 14 246 190 14 246 190 14
37833 +-242 186 14 225 175 15 142 122 72 66 66 66
37834 +- 30 30 30 10 10 10 0 0 0 0 0 0
37835 +- 0 0 0 0 0 0 0 0 0 0 0 0
37836 +- 0 0 0 0 0 0 0 0 0 0 0 0
37837 +- 0 0 0 0 0 0 0 0 0 6 6 6
37838 +- 26 26 26 70 70 70 163 133 67 210 150 10
37839 +-236 178 12 246 190 14 246 190 14 246 190 14
37840 +-246 190 14 246 190 14 246 190 14 246 190 14
37841 +-246 190 14 246 190 14 246 190 14 246 190 14
37842 +-246 190 14 246 190 14 246 190 14 246 190 14
37843 +-232 195 16 121 92 8 34 34 34 106 106 106
37844 +-221 221 221 253 253 253 253 253 253 253 253 253
37845 +-253 253 253 253 253 253 253 253 253 253 253 253
37846 +-253 253 253 253 253 253 253 253 253 253 253 253
37847 +-253 253 253 253 253 253 253 253 253 253 253 253
37848 +-242 242 242 82 82 82 18 14 6 163 110 8
37849 +-216 158 10 236 178 12 242 186 14 246 190 14
37850 +-246 190 14 246 190 14 246 190 14 246 190 14
37851 +-246 190 14 246 190 14 246 190 14 246 190 14
37852 +-246 190 14 246 190 14 246 190 14 246 190 14
37853 +-246 190 14 246 190 14 242 186 14 163 133 67
37854 +- 46 46 46 18 18 18 6 6 6 0 0 0
37855 +- 0 0 0 0 0 0 0 0 0 0 0 0
37856 +- 0 0 0 0 0 0 0 0 0 0 0 0
37857 +- 0 0 0 0 0 0 0 0 0 10 10 10
37858 +- 30 30 30 78 78 78 163 133 67 210 150 10
37859 +-236 178 12 246 186 14 246 190 14 246 190 14
37860 +-246 190 14 246 190 14 246 190 14 246 190 14
37861 +-246 190 14 246 190 14 246 190 14 246 190 14
37862 +-246 190 14 246 190 14 246 190 14 246 190 14
37863 +-241 196 14 215 174 15 190 178 144 253 253 253
37864 +-253 253 253 253 253 253 253 253 253 253 253 253
37865 +-253 253 253 253 253 253 253 253 253 253 253 253
37866 +-253 253 253 253 253 253 253 253 253 253 253 253
37867 +-253 253 253 253 253 253 253 253 253 218 218 218
37868 +- 58 58 58 2 2 6 22 18 6 167 114 7
37869 +-216 158 10 236 178 12 246 186 14 246 190 14
37870 +-246 190 14 246 190 14 246 190 14 246 190 14
37871 +-246 190 14 246 190 14 246 190 14 246 190 14
37872 +-246 190 14 246 190 14 246 190 14 246 190 14
37873 +-246 190 14 246 186 14 242 186 14 190 150 46
37874 +- 54 54 54 22 22 22 6 6 6 0 0 0
37875 +- 0 0 0 0 0 0 0 0 0 0 0 0
37876 +- 0 0 0 0 0 0 0 0 0 0 0 0
37877 +- 0 0 0 0 0 0 0 0 0 14 14 14
37878 +- 38 38 38 86 86 86 180 133 36 213 154 11
37879 +-236 178 12 246 186 14 246 190 14 246 190 14
37880 +-246 190 14 246 190 14 246 190 14 246 190 14
37881 +-246 190 14 246 190 14 246 190 14 246 190 14
37882 +-246 190 14 246 190 14 246 190 14 246 190 14
37883 +-246 190 14 232 195 16 190 146 13 214 214 214
37884 +-253 253 253 253 253 253 253 253 253 253 253 253
37885 +-253 253 253 253 253 253 253 253 253 253 253 253
37886 +-253 253 253 253 253 253 253 253 253 253 253 253
37887 +-253 253 253 250 250 250 170 170 170 26 26 26
37888 +- 2 2 6 2 2 6 37 26 9 163 110 8
37889 +-219 162 10 239 182 13 246 186 14 246 190 14
37890 +-246 190 14 246 190 14 246 190 14 246 190 14
37891 +-246 190 14 246 190 14 246 190 14 246 190 14
37892 +-246 190 14 246 190 14 246 190 14 246 190 14
37893 +-246 186 14 236 178 12 224 166 10 142 122 72
37894 +- 46 46 46 18 18 18 6 6 6 0 0 0
37895 +- 0 0 0 0 0 0 0 0 0 0 0 0
37896 +- 0 0 0 0 0 0 0 0 0 0 0 0
37897 +- 0 0 0 0 0 0 6 6 6 18 18 18
37898 +- 50 50 50 109 106 95 192 133 9 224 166 10
37899 +-242 186 14 246 190 14 246 190 14 246 190 14
37900 +-246 190 14 246 190 14 246 190 14 246 190 14
37901 +-246 190 14 246 190 14 246 190 14 246 190 14
37902 +-246 190 14 246 190 14 246 190 14 246 190 14
37903 +-242 186 14 226 184 13 210 162 10 142 110 46
37904 +-226 226 226 253 253 253 253 253 253 253 253 253
37905 +-253 253 253 253 253 253 253 253 253 253 253 253
37906 +-253 253 253 253 253 253 253 253 253 253 253 253
37907 +-198 198 198 66 66 66 2 2 6 2 2 6
37908 +- 2 2 6 2 2 6 50 34 6 156 107 11
37909 +-219 162 10 239 182 13 246 186 14 246 190 14
37910 +-246 190 14 246 190 14 246 190 14 246 190 14
37911 +-246 190 14 246 190 14 246 190 14 246 190 14
37912 +-246 190 14 246 190 14 246 190 14 242 186 14
37913 +-234 174 13 213 154 11 154 122 46 66 66 66
37914 +- 30 30 30 10 10 10 0 0 0 0 0 0
37915 +- 0 0 0 0 0 0 0 0 0 0 0 0
37916 +- 0 0 0 0 0 0 0 0 0 0 0 0
37917 +- 0 0 0 0 0 0 6 6 6 22 22 22
37918 +- 58 58 58 154 121 60 206 145 10 234 174 13
37919 +-242 186 14 246 186 14 246 190 14 246 190 14
37920 +-246 190 14 246 190 14 246 190 14 246 190 14
37921 +-246 190 14 246 190 14 246 190 14 246 190 14
37922 +-246 190 14 246 190 14 246 190 14 246 190 14
37923 +-246 186 14 236 178 12 210 162 10 163 110 8
37924 +- 61 42 6 138 138 138 218 218 218 250 250 250
37925 +-253 253 253 253 253 253 253 253 253 250 250 250
37926 +-242 242 242 210 210 210 144 144 144 66 66 66
37927 +- 6 6 6 2 2 6 2 2 6 2 2 6
37928 +- 2 2 6 2 2 6 61 42 6 163 110 8
37929 +-216 158 10 236 178 12 246 190 14 246 190 14
37930 +-246 190 14 246 190 14 246 190 14 246 190 14
37931 +-246 190 14 246 190 14 246 190 14 246 190 14
37932 +-246 190 14 239 182 13 230 174 11 216 158 10
37933 +-190 142 34 124 112 88 70 70 70 38 38 38
37934 +- 18 18 18 6 6 6 0 0 0 0 0 0
37935 +- 0 0 0 0 0 0 0 0 0 0 0 0
37936 +- 0 0 0 0 0 0 0 0 0 0 0 0
37937 +- 0 0 0 0 0 0 6 6 6 22 22 22
37938 +- 62 62 62 168 124 44 206 145 10 224 166 10
37939 +-236 178 12 239 182 13 242 186 14 242 186 14
37940 +-246 186 14 246 190 14 246 190 14 246 190 14
37941 +-246 190 14 246 190 14 246 190 14 246 190 14
37942 +-246 190 14 246 190 14 246 190 14 246 190 14
37943 +-246 190 14 236 178 12 216 158 10 175 118 6
37944 +- 80 54 7 2 2 6 6 6 6 30 30 30
37945 +- 54 54 54 62 62 62 50 50 50 38 38 38
37946 +- 14 14 14 2 2 6 2 2 6 2 2 6
37947 +- 2 2 6 2 2 6 2 2 6 2 2 6
37948 +- 2 2 6 6 6 6 80 54 7 167 114 7
37949 +-213 154 11 236 178 12 246 190 14 246 190 14
37950 +-246 190 14 246 190 14 246 190 14 246 190 14
37951 +-246 190 14 242 186 14 239 182 13 239 182 13
37952 +-230 174 11 210 150 10 174 135 50 124 112 88
37953 +- 82 82 82 54 54 54 34 34 34 18 18 18
37954 +- 6 6 6 0 0 0 0 0 0 0 0 0
37955 +- 0 0 0 0 0 0 0 0 0 0 0 0
37956 +- 0 0 0 0 0 0 0 0 0 0 0 0
37957 +- 0 0 0 0 0 0 6 6 6 18 18 18
37958 +- 50 50 50 158 118 36 192 133 9 200 144 11
37959 +-216 158 10 219 162 10 224 166 10 226 170 11
37960 +-230 174 11 236 178 12 239 182 13 239 182 13
37961 +-242 186 14 246 186 14 246 190 14 246 190 14
37962 +-246 190 14 246 190 14 246 190 14 246 190 14
37963 +-246 186 14 230 174 11 210 150 10 163 110 8
37964 +-104 69 6 10 10 10 2 2 6 2 2 6
37965 +- 2 2 6 2 2 6 2 2 6 2 2 6
37966 +- 2 2 6 2 2 6 2 2 6 2 2 6
37967 +- 2 2 6 2 2 6 2 2 6 2 2 6
37968 +- 2 2 6 6 6 6 91 60 6 167 114 7
37969 +-206 145 10 230 174 11 242 186 14 246 190 14
37970 +-246 190 14 246 190 14 246 186 14 242 186 14
37971 +-239 182 13 230 174 11 224 166 10 213 154 11
37972 +-180 133 36 124 112 88 86 86 86 58 58 58
37973 +- 38 38 38 22 22 22 10 10 10 6 6 6
37974 +- 0 0 0 0 0 0 0 0 0 0 0 0
37975 +- 0 0 0 0 0 0 0 0 0 0 0 0
37976 +- 0 0 0 0 0 0 0 0 0 0 0 0
37977 +- 0 0 0 0 0 0 0 0 0 14 14 14
37978 +- 34 34 34 70 70 70 138 110 50 158 118 36
37979 +-167 114 7 180 123 7 192 133 9 197 138 11
37980 +-200 144 11 206 145 10 213 154 11 219 162 10
37981 +-224 166 10 230 174 11 239 182 13 242 186 14
37982 +-246 186 14 246 186 14 246 186 14 246 186 14
37983 +-239 182 13 216 158 10 185 133 11 152 99 6
37984 +-104 69 6 18 14 6 2 2 6 2 2 6
37985 +- 2 2 6 2 2 6 2 2 6 2 2 6
37986 +- 2 2 6 2 2 6 2 2 6 2 2 6
37987 +- 2 2 6 2 2 6 2 2 6 2 2 6
37988 +- 2 2 6 6 6 6 80 54 7 152 99 6
37989 +-192 133 9 219 162 10 236 178 12 239 182 13
37990 +-246 186 14 242 186 14 239 182 13 236 178 12
37991 +-224 166 10 206 145 10 192 133 9 154 121 60
37992 +- 94 94 94 62 62 62 42 42 42 22 22 22
37993 +- 14 14 14 6 6 6 0 0 0 0 0 0
37994 +- 0 0 0 0 0 0 0 0 0 0 0 0
37995 +- 0 0 0 0 0 0 0 0 0 0 0 0
37996 +- 0 0 0 0 0 0 0 0 0 0 0 0
37997 +- 0 0 0 0 0 0 0 0 0 6 6 6
37998 +- 18 18 18 34 34 34 58 58 58 78 78 78
37999 +-101 98 89 124 112 88 142 110 46 156 107 11
38000 +-163 110 8 167 114 7 175 118 6 180 123 7
38001 +-185 133 11 197 138 11 210 150 10 219 162 10
38002 +-226 170 11 236 178 12 236 178 12 234 174 13
38003 +-219 162 10 197 138 11 163 110 8 130 83 6
38004 +- 91 60 6 10 10 10 2 2 6 2 2 6
38005 +- 18 18 18 38 38 38 38 38 38 38 38 38
38006 +- 38 38 38 38 38 38 38 38 38 38 38 38
38007 +- 38 38 38 38 38 38 26 26 26 2 2 6
38008 +- 2 2 6 6 6 6 70 47 6 137 92 6
38009 +-175 118 6 200 144 11 219 162 10 230 174 11
38010 +-234 174 13 230 174 11 219 162 10 210 150 10
38011 +-192 133 9 163 110 8 124 112 88 82 82 82
38012 +- 50 50 50 30 30 30 14 14 14 6 6 6
38013 +- 0 0 0 0 0 0 0 0 0 0 0 0
38014 +- 0 0 0 0 0 0 0 0 0 0 0 0
38015 +- 0 0 0 0 0 0 0 0 0 0 0 0
38016 +- 0 0 0 0 0 0 0 0 0 0 0 0
38017 +- 0 0 0 0 0 0 0 0 0 0 0 0
38018 +- 6 6 6 14 14 14 22 22 22 34 34 34
38019 +- 42 42 42 58 58 58 74 74 74 86 86 86
38020 +-101 98 89 122 102 70 130 98 46 121 87 25
38021 +-137 92 6 152 99 6 163 110 8 180 123 7
38022 +-185 133 11 197 138 11 206 145 10 200 144 11
38023 +-180 123 7 156 107 11 130 83 6 104 69 6
38024 +- 50 34 6 54 54 54 110 110 110 101 98 89
38025 +- 86 86 86 82 82 82 78 78 78 78 78 78
38026 +- 78 78 78 78 78 78 78 78 78 78 78 78
38027 +- 78 78 78 82 82 82 86 86 86 94 94 94
38028 +-106 106 106 101 101 101 86 66 34 124 80 6
38029 +-156 107 11 180 123 7 192 133 9 200 144 11
38030 +-206 145 10 200 144 11 192 133 9 175 118 6
38031 +-139 102 15 109 106 95 70 70 70 42 42 42
38032 +- 22 22 22 10 10 10 0 0 0 0 0 0
38033 +- 0 0 0 0 0 0 0 0 0 0 0 0
38034 +- 0 0 0 0 0 0 0 0 0 0 0 0
38035 +- 0 0 0 0 0 0 0 0 0 0 0 0
38036 +- 0 0 0 0 0 0 0 0 0 0 0 0
38037 +- 0 0 0 0 0 0 0 0 0 0 0 0
38038 +- 0 0 0 0 0 0 6 6 6 10 10 10
38039 +- 14 14 14 22 22 22 30 30 30 38 38 38
38040 +- 50 50 50 62 62 62 74 74 74 90 90 90
38041 +-101 98 89 112 100 78 121 87 25 124 80 6
38042 +-137 92 6 152 99 6 152 99 6 152 99 6
38043 +-138 86 6 124 80 6 98 70 6 86 66 30
38044 +-101 98 89 82 82 82 58 58 58 46 46 46
38045 +- 38 38 38 34 34 34 34 34 34 34 34 34
38046 +- 34 34 34 34 34 34 34 34 34 34 34 34
38047 +- 34 34 34 34 34 34 38 38 38 42 42 42
38048 +- 54 54 54 82 82 82 94 86 76 91 60 6
38049 +-134 86 6 156 107 11 167 114 7 175 118 6
38050 +-175 118 6 167 114 7 152 99 6 121 87 25
38051 +-101 98 89 62 62 62 34 34 34 18 18 18
38052 +- 6 6 6 0 0 0 0 0 0 0 0 0
38053 +- 0 0 0 0 0 0 0 0 0 0 0 0
38054 +- 0 0 0 0 0 0 0 0 0 0 0 0
38055 +- 0 0 0 0 0 0 0 0 0 0 0 0
38056 +- 0 0 0 0 0 0 0 0 0 0 0 0
38057 +- 0 0 0 0 0 0 0 0 0 0 0 0
38058 +- 0 0 0 0 0 0 0 0 0 0 0 0
38059 +- 0 0 0 6 6 6 6 6 6 10 10 10
38060 +- 18 18 18 22 22 22 30 30 30 42 42 42
38061 +- 50 50 50 66 66 66 86 86 86 101 98 89
38062 +-106 86 58 98 70 6 104 69 6 104 69 6
38063 +-104 69 6 91 60 6 82 62 34 90 90 90
38064 +- 62 62 62 38 38 38 22 22 22 14 14 14
38065 +- 10 10 10 10 10 10 10 10 10 10 10 10
38066 +- 10 10 10 10 10 10 6 6 6 10 10 10
38067 +- 10 10 10 10 10 10 10 10 10 14 14 14
38068 +- 22 22 22 42 42 42 70 70 70 89 81 66
38069 +- 80 54 7 104 69 6 124 80 6 137 92 6
38070 +-134 86 6 116 81 8 100 82 52 86 86 86
38071 +- 58 58 58 30 30 30 14 14 14 6 6 6
38072 +- 0 0 0 0 0 0 0 0 0 0 0 0
38073 +- 0 0 0 0 0 0 0 0 0 0 0 0
38074 +- 0 0 0 0 0 0 0 0 0 0 0 0
38075 +- 0 0 0 0 0 0 0 0 0 0 0 0
38076 +- 0 0 0 0 0 0 0 0 0 0 0 0
38077 +- 0 0 0 0 0 0 0 0 0 0 0 0
38078 +- 0 0 0 0 0 0 0 0 0 0 0 0
38079 +- 0 0 0 0 0 0 0 0 0 0 0 0
38080 +- 0 0 0 6 6 6 10 10 10 14 14 14
38081 +- 18 18 18 26 26 26 38 38 38 54 54 54
38082 +- 70 70 70 86 86 86 94 86 76 89 81 66
38083 +- 89 81 66 86 86 86 74 74 74 50 50 50
38084 +- 30 30 30 14 14 14 6 6 6 0 0 0
38085 +- 0 0 0 0 0 0 0 0 0 0 0 0
38086 +- 0 0 0 0 0 0 0 0 0 0 0 0
38087 +- 0 0 0 0 0 0 0 0 0 0 0 0
38088 +- 6 6 6 18 18 18 34 34 34 58 58 58
38089 +- 82 82 82 89 81 66 89 81 66 89 81 66
38090 +- 94 86 66 94 86 76 74 74 74 50 50 50
38091 +- 26 26 26 14 14 14 6 6 6 0 0 0
38092 +- 0 0 0 0 0 0 0 0 0 0 0 0
38093 +- 0 0 0 0 0 0 0 0 0 0 0 0
38094 +- 0 0 0 0 0 0 0 0 0 0 0 0
38095 +- 0 0 0 0 0 0 0 0 0 0 0 0
38096 +- 0 0 0 0 0 0 0 0 0 0 0 0
38097 +- 0 0 0 0 0 0 0 0 0 0 0 0
38098 +- 0 0 0 0 0 0 0 0 0 0 0 0
38099 +- 0 0 0 0 0 0 0 0 0 0 0 0
38100 +- 0 0 0 0 0 0 0 0 0 0 0 0
38101 +- 6 6 6 6 6 6 14 14 14 18 18 18
38102 +- 30 30 30 38 38 38 46 46 46 54 54 54
38103 +- 50 50 50 42 42 42 30 30 30 18 18 18
38104 +- 10 10 10 0 0 0 0 0 0 0 0 0
38105 +- 0 0 0 0 0 0 0 0 0 0 0 0
38106 +- 0 0 0 0 0 0 0 0 0 0 0 0
38107 +- 0 0 0 0 0 0 0 0 0 0 0 0
38108 +- 0 0 0 6 6 6 14 14 14 26 26 26
38109 +- 38 38 38 50 50 50 58 58 58 58 58 58
38110 +- 54 54 54 42 42 42 30 30 30 18 18 18
38111 +- 10 10 10 0 0 0 0 0 0 0 0 0
38112 +- 0 0 0 0 0 0 0 0 0 0 0 0
38113 +- 0 0 0 0 0 0 0 0 0 0 0 0
38114 +- 0 0 0 0 0 0 0 0 0 0 0 0
38115 +- 0 0 0 0 0 0 0 0 0 0 0 0
38116 +- 0 0 0 0 0 0 0 0 0 0 0 0
38117 +- 0 0 0 0 0 0 0 0 0 0 0 0
38118 +- 0 0 0 0 0 0 0 0 0 0 0 0
38119 +- 0 0 0 0 0 0 0 0 0 0 0 0
38120 +- 0 0 0 0 0 0 0 0 0 0 0 0
38121 +- 0 0 0 0 0 0 0 0 0 6 6 6
38122 +- 6 6 6 10 10 10 14 14 14 18 18 18
38123 +- 18 18 18 14 14 14 10 10 10 6 6 6
38124 +- 0 0 0 0 0 0 0 0 0 0 0 0
38125 +- 0 0 0 0 0 0 0 0 0 0 0 0
38126 +- 0 0 0 0 0 0 0 0 0 0 0 0
38127 +- 0 0 0 0 0 0 0 0 0 0 0 0
38128 +- 0 0 0 0 0 0 0 0 0 6 6 6
38129 +- 14 14 14 18 18 18 22 22 22 22 22 22
38130 +- 18 18 18 14 14 14 10 10 10 6 6 6
38131 +- 0 0 0 0 0 0 0 0 0 0 0 0
38132 +- 0 0 0 0 0 0 0 0 0 0 0 0
38133 +- 0 0 0 0 0 0 0 0 0 0 0 0
38134 +- 0 0 0 0 0 0 0 0 0 0 0 0
38135 +- 0 0 0 0 0 0 0 0 0 0 0 0
38136 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38137 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38138 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38139 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38140 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38141 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38142 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38143 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38144 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38145 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38146 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38147 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38148 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38149 ++4 4 4 4 4 4
38150 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38151 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38152 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38153 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38154 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38155 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38156 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38157 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38158 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38159 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38160 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38161 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38162 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38163 ++4 4 4 4 4 4
38164 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38165 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38166 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38167 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38168 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38169 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38170 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38171 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38172 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38173 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38174 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38175 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38176 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38177 ++4 4 4 4 4 4
38178 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38179 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38180 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38181 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38182 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38183 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38184 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38185 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38186 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38187 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38188 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38189 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38190 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38191 ++4 4 4 4 4 4
38192 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38193 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38194 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38195 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38196 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38197 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38198 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38199 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38200 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38201 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38202 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38203 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38204 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38205 ++4 4 4 4 4 4
38206 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38207 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38208 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38209 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38210 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38211 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38212 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38213 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38214 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38215 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38216 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38217 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38218 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38219 ++4 4 4 4 4 4
38220 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38221 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38222 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38223 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38224 ++4 4 4 4 4 4 4 4 4 3 3 3 0 0 0 0 0 0
38225 ++0 0 0 0 0 0 0 0 0 0 0 0 3 3 3 4 4 4
38226 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38227 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38228 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38229 ++4 4 4 4 4 4 4 4 4 4 4 4 1 1 1 0 0 0
38230 ++0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
38231 ++4 4 4 4 4 4 4 4 4 2 1 0 2 1 0 3 2 2
38232 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38233 ++4 4 4 4 4 4
38234 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38235 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38236 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38237 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38238 ++4 4 4 4 4 4 2 2 2 0 0 0 3 4 3 26 28 28
38239 ++37 38 37 37 38 37 14 17 19 2 2 2 0 0 0 2 2 2
38240 ++5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38241 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38242 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38243 ++4 4 4 4 4 4 3 3 3 0 0 0 1 1 1 6 6 6
38244 ++2 2 2 0 0 0 3 3 3 4 4 4 4 4 4 4 4 4
38245 ++4 4 5 3 3 3 1 0 0 0 0 0 1 0 0 0 0 0
38246 ++1 1 1 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38247 ++4 4 4 4 4 4
38248 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38249 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38250 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38251 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38252 ++2 2 2 0 0 0 0 0 0 14 17 19 60 74 84 137 136 137
38253 ++153 152 153 137 136 137 125 124 125 60 73 81 6 6 6 3 1 0
38254 ++0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
38255 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38256 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38257 ++4 4 4 4 4 4 0 0 0 4 4 4 41 54 63 125 124 125
38258 ++60 73 81 6 6 6 4 0 0 3 3 3 4 4 4 4 4 4
38259 ++4 4 4 0 0 0 6 9 11 41 54 63 41 65 82 22 30 35
38260 ++2 2 2 2 1 0 4 4 4 4 4 4 4 4 4 4 4 4
38261 ++4 4 4 4 4 4
38262 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38263 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38264 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38265 ++4 4 4 4 4 4 5 5 5 5 5 5 2 2 2 0 0 0
38266 ++4 0 0 6 6 6 41 54 63 137 136 137 174 174 174 167 166 167
38267 ++165 164 165 165 164 165 163 162 163 163 162 163 125 124 125 41 54 63
38268 ++1 1 1 0 0 0 0 0 0 3 3 3 5 5 5 4 4 4
38269 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38270 ++4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
38271 ++3 3 3 2 0 0 4 0 0 60 73 81 156 155 156 167 166 167
38272 ++163 162 163 85 115 134 5 7 8 0 0 0 4 4 4 5 5 5
38273 ++0 0 0 2 5 5 55 98 126 90 154 193 90 154 193 72 125 159
38274 ++37 51 59 2 0 0 1 1 1 4 5 5 4 4 4 4 4 4
38275 ++4 4 4 4 4 4
38276 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38277 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38278 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38279 ++4 4 4 5 5 5 4 4 4 1 1 1 0 0 0 3 3 3
38280 ++37 38 37 125 124 125 163 162 163 174 174 174 158 157 158 158 157 158
38281 ++156 155 156 156 155 156 158 157 158 165 164 165 174 174 174 166 165 166
38282 ++125 124 125 16 19 21 1 0 0 0 0 0 0 0 0 4 4 4
38283 ++5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
38284 ++4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 1 1 1
38285 ++0 0 0 0 0 0 37 38 37 153 152 153 174 174 174 158 157 158
38286 ++174 174 174 163 162 163 37 38 37 4 3 3 4 0 0 1 1 1
38287 ++0 0 0 22 40 52 101 161 196 101 161 196 90 154 193 101 161 196
38288 ++64 123 161 14 17 19 0 0 0 4 4 4 4 4 4 4 4 4
38289 ++4 4 4 4 4 4
38290 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38291 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38292 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
38293 ++5 5 5 2 2 2 0 0 0 4 0 0 24 26 27 85 115 134
38294 ++156 155 156 174 174 174 167 166 167 156 155 156 154 153 154 157 156 157
38295 ++156 155 156 156 155 156 155 154 155 153 152 153 158 157 158 167 166 167
38296 ++174 174 174 156 155 156 60 74 84 16 19 21 0 0 0 0 0 0
38297 ++1 1 1 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
38298 ++4 4 4 5 5 5 6 6 6 3 3 3 0 0 0 4 0 0
38299 ++13 16 17 60 73 81 137 136 137 165 164 165 156 155 156 153 152 153
38300 ++174 174 174 177 184 187 60 73 81 3 1 0 0 0 0 1 1 2
38301 ++22 30 35 64 123 161 136 185 209 90 154 193 90 154 193 90 154 193
38302 ++90 154 193 21 29 34 0 0 0 3 2 2 4 4 5 4 4 4
38303 ++4 4 4 4 4 4
38304 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38305 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38306 ++4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 3 3 3
38307 ++0 0 0 0 0 0 10 13 16 60 74 84 157 156 157 174 174 174
38308 ++174 174 174 158 157 158 153 152 153 154 153 154 156 155 156 155 154 155
38309 ++156 155 156 155 154 155 154 153 154 157 156 157 154 153 154 153 152 153
38310 ++163 162 163 174 174 174 177 184 187 137 136 137 60 73 81 13 16 17
38311 ++4 0 0 0 0 0 3 3 3 5 5 5 4 4 4 4 4 4
38312 ++5 5 5 4 4 4 1 1 1 0 0 0 3 3 3 41 54 63
38313 ++131 129 131 174 174 174 174 174 174 174 174 174 167 166 167 174 174 174
38314 ++190 197 201 137 136 137 24 26 27 4 0 0 16 21 25 50 82 103
38315 ++90 154 193 136 185 209 90 154 193 101 161 196 101 161 196 101 161 196
38316 ++31 91 132 3 6 7 0 0 0 4 4 4 4 4 4 4 4 4
38317 ++4 4 4 4 4 4
38318 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38319 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38320 ++4 4 4 4 4 4 4 4 4 2 2 2 0 0 0 4 0 0
38321 ++4 0 0 43 57 68 137 136 137 177 184 187 174 174 174 163 162 163
38322 ++155 154 155 155 154 155 156 155 156 155 154 155 158 157 158 165 164 165
38323 ++167 166 167 166 165 166 163 162 163 157 156 157 155 154 155 155 154 155
38324 ++153 152 153 156 155 156 167 166 167 174 174 174 174 174 174 131 129 131
38325 ++41 54 63 5 5 5 0 0 0 0 0 0 3 3 3 4 4 4
38326 ++1 1 1 0 0 0 1 0 0 26 28 28 125 124 125 174 174 174
38327 ++177 184 187 174 174 174 174 174 174 156 155 156 131 129 131 137 136 137
38328 ++125 124 125 24 26 27 4 0 0 41 65 82 90 154 193 136 185 209
38329 ++136 185 209 101 161 196 53 118 160 37 112 160 90 154 193 34 86 122
38330 ++7 12 15 0 0 0 4 4 4 4 4 4 4 4 4 4 4 4
38331 ++4 4 4 4 4 4
38332 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38333 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38334 ++4 4 4 3 3 3 0 0 0 0 0 0 5 5 5 37 38 37
38335 ++125 124 125 167 166 167 174 174 174 167 166 167 158 157 158 155 154 155
38336 ++156 155 156 156 155 156 156 155 156 163 162 163 167 166 167 155 154 155
38337 ++137 136 137 153 152 153 156 155 156 165 164 165 163 162 163 156 155 156
38338 ++156 155 156 156 155 156 155 154 155 158 157 158 166 165 166 174 174 174
38339 ++167 166 167 125 124 125 37 38 37 1 0 0 0 0 0 0 0 0
38340 ++0 0 0 24 26 27 60 74 84 158 157 158 174 174 174 174 174 174
38341 ++166 165 166 158 157 158 125 124 125 41 54 63 13 16 17 6 6 6
38342 ++6 6 6 37 38 37 80 127 157 136 185 209 101 161 196 101 161 196
38343 ++90 154 193 28 67 93 6 10 14 13 20 25 13 20 25 6 10 14
38344 ++1 1 2 4 3 3 4 4 4 4 4 4 4 4 4 4 4 4
38345 ++4 4 4 4 4 4
38346 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38347 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38348 ++1 1 1 1 0 0 4 3 3 37 38 37 60 74 84 153 152 153
38349 ++167 166 167 167 166 167 158 157 158 154 153 154 155 154 155 156 155 156
38350 ++157 156 157 158 157 158 167 166 167 167 166 167 131 129 131 43 57 68
38351 ++26 28 28 37 38 37 60 73 81 131 129 131 165 164 165 166 165 166
38352 ++158 157 158 155 154 155 156 155 156 156 155 156 156 155 156 158 157 158
38353 ++165 164 165 174 174 174 163 162 163 60 74 84 16 19 21 13 16 17
38354 ++60 73 81 131 129 131 174 174 174 174 174 174 167 166 167 165 164 165
38355 ++137 136 137 60 73 81 24 26 27 4 0 0 4 0 0 16 19 21
38356 ++52 104 138 101 161 196 136 185 209 136 185 209 90 154 193 27 99 146
38357 ++13 20 25 4 5 7 2 5 5 4 5 7 1 1 2 0 0 0
38358 ++4 4 4 4 4 4 3 3 3 2 2 2 2 2 2 4 4 4
38359 ++4 4 4 4 4 4
38360 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38361 ++4 4 4 4 4 4 4 4 4 4 4 4 3 3 3 0 0 0
38362 ++0 0 0 13 16 17 60 73 81 137 136 137 174 174 174 166 165 166
38363 ++158 157 158 156 155 156 157 156 157 156 155 156 155 154 155 158 157 158
38364 ++167 166 167 174 174 174 153 152 153 60 73 81 16 19 21 4 0 0
38365 ++4 0 0 4 0 0 6 6 6 26 28 28 60 74 84 158 157 158
38366 ++174 174 174 166 165 166 157 156 157 155 154 155 156 155 156 156 155 156
38367 ++155 154 155 158 157 158 167 166 167 167 166 167 131 129 131 125 124 125
38368 ++137 136 137 167 166 167 167 166 167 174 174 174 158 157 158 125 124 125
38369 ++16 19 21 4 0 0 4 0 0 10 13 16 49 76 92 107 159 188
38370 ++136 185 209 136 185 209 90 154 193 26 108 161 22 40 52 6 10 14
38371 ++2 3 3 1 1 2 1 1 2 4 4 5 4 4 5 4 4 5
38372 ++4 4 5 2 2 1 0 0 0 0 0 0 0 0 0 2 2 2
38373 ++4 4 4 4 4 4
38374 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38375 ++4 4 4 5 5 5 3 3 3 0 0 0 1 0 0 4 0 0
38376 ++37 51 59 131 129 131 167 166 167 167 166 167 163 162 163 157 156 157
38377 ++157 156 157 155 154 155 153 152 153 157 156 157 167 166 167 174 174 174
38378 ++153 152 153 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
38379 ++4 3 3 4 3 3 4 0 0 6 6 6 4 0 0 37 38 37
38380 ++125 124 125 174 174 174 174 174 174 165 164 165 156 155 156 154 153 154
38381 ++156 155 156 156 155 156 155 154 155 163 162 163 158 157 158 163 162 163
38382 ++174 174 174 174 174 174 174 174 174 125 124 125 37 38 37 0 0 0
38383 ++4 0 0 6 9 11 41 54 63 90 154 193 136 185 209 146 190 211
38384 ++136 185 209 37 112 160 22 40 52 6 10 14 3 6 7 1 1 2
38385 ++1 1 2 3 3 3 1 1 2 3 3 3 4 4 4 4 4 4
38386 ++2 2 2 2 0 0 16 19 21 37 38 37 24 26 27 0 0 0
38387 ++0 0 0 4 4 4
38388 ++4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
38389 ++4 4 4 0 0 0 0 0 0 0 0 0 26 28 28 120 125 127
38390 ++158 157 158 174 174 174 165 164 165 157 156 157 155 154 155 156 155 156
38391 ++153 152 153 153 152 153 167 166 167 174 174 174 174 174 174 125 124 125
38392 ++37 38 37 4 0 0 0 0 0 4 0 0 4 3 3 4 4 4
38393 ++4 4 4 4 4 4 5 5 5 4 0 0 4 0 0 4 0 0
38394 ++4 3 3 43 57 68 137 136 137 174 174 174 174 174 174 165 164 165
38395 ++154 153 154 153 152 153 153 152 153 153 152 153 163 162 163 174 174 174
38396 ++174 174 174 153 152 153 60 73 81 6 6 6 4 0 0 4 3 3
38397 ++32 43 50 80 127 157 136 185 209 146 190 211 146 190 211 90 154 193
38398 ++28 67 93 28 67 93 40 71 93 3 6 7 1 1 2 2 5 5
38399 ++50 82 103 79 117 143 26 37 45 0 0 0 3 3 3 1 1 1
38400 ++0 0 0 41 54 63 137 136 137 174 174 174 153 152 153 60 73 81
38401 ++2 0 0 0 0 0
38402 ++4 4 4 4 4 4 4 4 4 4 4 4 6 6 6 2 2 2
38403 ++0 0 0 2 0 0 24 26 27 60 74 84 153 152 153 174 174 174
38404 ++174 174 174 157 156 157 154 153 154 156 155 156 154 153 154 153 152 153
38405 ++165 164 165 174 174 174 177 184 187 137 136 137 43 57 68 6 6 6
38406 ++4 0 0 2 0 0 3 3 3 5 5 5 5 5 5 4 4 4
38407 ++4 4 4 4 4 4 4 4 4 5 5 5 6 6 6 4 3 3
38408 ++4 0 0 4 0 0 24 26 27 60 73 81 153 152 153 174 174 174
38409 ++174 174 174 158 157 158 158 157 158 174 174 174 174 174 174 158 157 158
38410 ++60 74 84 24 26 27 4 0 0 4 0 0 17 23 27 59 113 148
38411 ++136 185 209 191 222 234 146 190 211 136 185 209 31 91 132 7 11 13
38412 ++22 40 52 101 161 196 90 154 193 6 9 11 3 4 4 43 95 132
38413 ++136 185 209 172 205 220 55 98 126 0 0 0 0 0 0 2 0 0
38414 ++26 28 28 153 152 153 177 184 187 167 166 167 177 184 187 165 164 165
38415 ++37 38 37 0 0 0
38416 ++4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
38417 ++13 16 17 60 73 81 137 136 137 174 174 174 174 174 174 165 164 165
38418 ++153 152 153 153 152 153 155 154 155 154 153 154 158 157 158 174 174 174
38419 ++177 184 187 163 162 163 60 73 81 16 19 21 4 0 0 4 0 0
38420 ++4 3 3 4 4 4 5 5 5 5 5 5 4 4 4 5 5 5
38421 ++5 5 5 5 5 5 5 5 5 4 4 4 4 4 4 5 5 5
38422 ++6 6 6 4 0 0 4 0 0 4 0 0 24 26 27 60 74 84
38423 ++166 165 166 174 174 174 177 184 187 165 164 165 125 124 125 24 26 27
38424 ++4 0 0 4 0 0 5 5 5 50 82 103 136 185 209 172 205 220
38425 ++146 190 211 136 185 209 26 108 161 22 40 52 7 12 15 44 81 103
38426 ++71 116 144 28 67 93 37 51 59 41 65 82 100 139 164 101 161 196
38427 ++90 154 193 90 154 193 28 67 93 0 0 0 0 0 0 26 28 28
38428 ++125 124 125 167 166 167 163 162 163 153 152 153 163 162 163 174 174 174
38429 ++85 115 134 4 0 0
38430 ++4 4 4 5 5 5 4 4 4 1 0 0 4 0 0 34 47 55
38431 ++125 124 125 174 174 174 174 174 174 167 166 167 157 156 157 153 152 153
38432 ++155 154 155 155 154 155 158 157 158 166 165 166 167 166 167 154 153 154
38433 ++125 124 125 26 28 28 4 0 0 4 0 0 4 0 0 5 5 5
38434 ++5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 1 1 1
38435 ++0 0 0 0 0 0 1 1 1 4 4 4 4 4 4 4 4 4
38436 ++5 5 5 5 5 5 4 3 3 4 0 0 4 0 0 6 6 6
38437 ++37 38 37 131 129 131 137 136 137 37 38 37 0 0 0 4 0 0
38438 ++4 5 5 43 61 72 90 154 193 172 205 220 146 190 211 136 185 209
38439 ++90 154 193 28 67 93 13 20 25 43 61 72 71 116 144 44 81 103
38440 ++2 5 5 7 11 13 59 113 148 101 161 196 90 154 193 28 67 93
38441 ++13 20 25 6 10 14 0 0 0 13 16 17 60 73 81 137 136 137
38442 ++166 165 166 158 157 158 156 155 156 154 153 154 167 166 167 174 174 174
38443 ++60 73 81 4 0 0
38444 ++4 4 4 4 4 4 0 0 0 3 3 3 60 74 84 174 174 174
38445 ++174 174 174 167 166 167 163 162 163 155 154 155 157 156 157 155 154 155
38446 ++156 155 156 163 162 163 167 166 167 158 157 158 125 124 125 37 38 37
38447 ++4 3 3 4 0 0 4 0 0 6 6 6 6 6 6 5 5 5
38448 ++4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 2 3 3
38449 ++10 13 16 7 11 13 1 0 0 0 0 0 2 2 1 4 4 4
38450 ++4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 4 0 0
38451 ++4 0 0 7 11 13 13 16 17 4 0 0 3 3 3 34 47 55
38452 ++80 127 157 146 190 211 172 205 220 136 185 209 136 185 209 136 185 209
38453 ++28 67 93 22 40 52 55 98 126 55 98 126 21 29 34 7 11 13
38454 ++50 82 103 101 161 196 101 161 196 35 83 115 13 20 25 2 2 1
38455 ++1 1 2 1 1 2 37 51 59 131 129 131 174 174 174 174 174 174
38456 ++167 166 167 163 162 163 163 162 163 167 166 167 174 174 174 125 124 125
38457 ++16 19 21 4 0 0
38458 ++4 4 4 4 0 0 4 0 0 60 74 84 174 174 174 174 174 174
38459 ++158 157 158 155 154 155 155 154 155 156 155 156 155 154 155 158 157 158
38460 ++167 166 167 165 164 165 131 129 131 60 73 81 13 16 17 4 0 0
38461 ++4 0 0 4 3 3 6 6 6 4 3 3 5 5 5 4 4 4
38462 ++4 4 4 3 2 2 0 0 0 0 0 0 7 11 13 45 69 86
38463 ++80 127 157 71 116 144 43 61 72 7 11 13 0 0 0 1 1 1
38464 ++4 3 3 4 4 4 4 4 4 4 4 4 6 6 6 5 5 5
38465 ++3 2 2 4 0 0 1 0 0 21 29 34 59 113 148 136 185 209
38466 ++146 190 211 136 185 209 136 185 209 136 185 209 136 185 209 136 185 209
38467 ++68 124 159 44 81 103 22 40 52 13 16 17 43 61 72 90 154 193
38468 ++136 185 209 59 113 148 21 29 34 3 4 3 1 1 1 0 0 0
38469 ++24 26 27 125 124 125 163 162 163 174 174 174 166 165 166 165 164 165
38470 ++163 162 163 125 124 125 125 124 125 125 124 125 125 124 125 26 28 28
38471 ++4 0 0 4 3 3
38472 ++3 3 3 0 0 0 24 26 27 153 152 153 177 184 187 158 157 158
38473 ++156 155 156 156 155 156 155 154 155 155 154 155 165 164 165 174 174 174
38474 ++155 154 155 60 74 84 26 28 28 4 0 0 4 0 0 3 1 0
38475 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 3 3
38476 ++2 0 0 0 0 0 0 0 0 32 43 50 72 125 159 101 161 196
38477 ++136 185 209 101 161 196 101 161 196 79 117 143 32 43 50 0 0 0
38478 ++0 0 0 2 2 2 4 4 4 4 4 4 3 3 3 1 0 0
38479 ++0 0 0 4 5 5 49 76 92 101 161 196 146 190 211 146 190 211
38480 ++136 185 209 136 185 209 136 185 209 136 185 209 136 185 209 90 154 193
38481 ++28 67 93 13 16 17 37 51 59 80 127 157 136 185 209 90 154 193
38482 ++22 40 52 6 9 11 3 4 3 2 2 1 16 19 21 60 73 81
38483 ++137 136 137 163 162 163 158 157 158 166 165 166 167 166 167 153 152 153
38484 ++60 74 84 37 38 37 6 6 6 13 16 17 4 0 0 1 0 0
38485 ++3 2 2 4 4 4
38486 ++3 2 2 4 0 0 37 38 37 137 136 137 167 166 167 158 157 158
38487 ++157 156 157 154 153 154 157 156 157 167 166 167 174 174 174 125 124 125
38488 ++37 38 37 4 0 0 4 0 0 4 0 0 4 3 3 4 4 4
38489 ++4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
38490 ++0 0 0 16 21 25 55 98 126 90 154 193 136 185 209 101 161 196
38491 ++101 161 196 101 161 196 136 185 209 136 185 209 101 161 196 55 98 126
38492 ++14 17 19 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
38493 ++22 40 52 90 154 193 146 190 211 146 190 211 136 185 209 136 185 209
38494 ++136 185 209 136 185 209 136 185 209 101 161 196 35 83 115 7 11 13
38495 ++17 23 27 59 113 148 136 185 209 101 161 196 34 86 122 7 12 15
38496 ++2 5 5 3 4 3 6 6 6 60 73 81 131 129 131 163 162 163
38497 ++166 165 166 174 174 174 174 174 174 163 162 163 125 124 125 41 54 63
38498 ++13 16 17 4 0 0 4 0 0 4 0 0 1 0 0 2 2 2
38499 ++4 4 4 4 4 4
38500 ++1 1 1 2 1 0 43 57 68 137 136 137 153 152 153 153 152 153
38501 ++163 162 163 156 155 156 165 164 165 167 166 167 60 74 84 6 6 6
38502 ++4 0 0 4 0 0 5 5 5 4 4 4 4 4 4 4 4 4
38503 ++4 5 5 6 6 6 4 3 3 0 0 0 0 0 0 11 15 18
38504 ++40 71 93 100 139 164 101 161 196 101 161 196 101 161 196 101 161 196
38505 ++101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 136 185 209
38506 ++101 161 196 45 69 86 6 6 6 0 0 0 17 23 27 55 98 126
38507 ++136 185 209 146 190 211 136 185 209 136 185 209 136 185 209 136 185 209
38508 ++136 185 209 136 185 209 90 154 193 22 40 52 7 11 13 50 82 103
38509 ++136 185 209 136 185 209 53 118 160 22 40 52 7 11 13 2 5 5
38510 ++3 4 3 37 38 37 125 124 125 157 156 157 166 165 166 167 166 167
38511 ++174 174 174 174 174 174 137 136 137 60 73 81 4 0 0 4 0 0
38512 ++4 0 0 4 0 0 5 5 5 3 3 3 3 3 3 4 4 4
38513 ++4 4 4 4 4 4
38514 ++4 0 0 4 0 0 41 54 63 137 136 137 125 124 125 131 129 131
38515 ++155 154 155 167 166 167 174 174 174 60 74 84 6 6 6 4 0 0
38516 ++4 3 3 6 6 6 4 4 4 4 4 4 4 4 4 5 5 5
38517 ++4 4 4 1 1 1 0 0 0 3 6 7 41 65 82 72 125 159
38518 ++101 161 196 101 161 196 101 161 196 90 154 193 90 154 193 101 161 196
38519 ++101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
38520 ++136 185 209 136 185 209 80 127 157 55 98 126 101 161 196 146 190 211
38521 ++136 185 209 136 185 209 136 185 209 101 161 196 136 185 209 101 161 196
38522 ++136 185 209 101 161 196 35 83 115 22 30 35 101 161 196 172 205 220
38523 ++90 154 193 28 67 93 7 11 13 2 5 5 3 4 3 13 16 17
38524 ++85 115 134 167 166 167 174 174 174 174 174 174 174 174 174 174 174 174
38525 ++167 166 167 60 74 84 13 16 17 4 0 0 4 0 0 4 3 3
38526 ++6 6 6 5 5 5 4 4 4 5 5 5 4 4 4 5 5 5
38527 ++5 5 5 5 5 5
38528 ++1 1 1 4 0 0 41 54 63 137 136 137 137 136 137 125 124 125
38529 ++131 129 131 167 166 167 157 156 157 37 38 37 6 6 6 4 0 0
38530 ++6 6 6 5 5 5 4 4 4 4 4 4 4 5 5 2 2 1
38531 ++0 0 0 0 0 0 26 37 45 58 111 146 101 161 196 101 161 196
38532 ++101 161 196 90 154 193 90 154 193 90 154 193 101 161 196 101 161 196
38533 ++101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
38534 ++101 161 196 136 185 209 136 185 209 136 185 209 146 190 211 136 185 209
38535 ++136 185 209 101 161 196 136 185 209 136 185 209 101 161 196 136 185 209
38536 ++101 161 196 136 185 209 136 185 209 136 185 209 136 185 209 16 89 141
38537 ++7 11 13 2 5 5 2 5 5 13 16 17 60 73 81 154 154 154
38538 ++174 174 174 174 174 174 174 174 174 174 174 174 163 162 163 125 124 125
38539 ++24 26 27 4 0 0 4 0 0 4 0 0 5 5 5 5 5 5
38540 ++4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
38541 ++5 5 5 4 4 4
38542 ++4 0 0 6 6 6 37 38 37 137 136 137 137 136 137 131 129 131
38543 ++131 129 131 153 152 153 131 129 131 26 28 28 4 0 0 4 3 3
38544 ++6 6 6 4 4 4 4 4 4 4 4 4 0 0 0 0 0 0
38545 ++13 20 25 51 88 114 90 154 193 101 161 196 101 161 196 90 154 193
38546 ++90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
38547 ++101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 101 161 196
38548 ++101 161 196 136 185 209 101 161 196 136 185 209 136 185 209 101 161 196
38549 ++136 185 209 101 161 196 136 185 209 101 161 196 101 161 196 101 161 196
38550 ++136 185 209 136 185 209 136 185 209 37 112 160 21 29 34 5 7 8
38551 ++2 5 5 13 16 17 43 57 68 131 129 131 174 174 174 174 174 174
38552 ++174 174 174 167 166 167 157 156 157 125 124 125 37 38 37 4 0 0
38553 ++4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
38554 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38555 ++4 4 4 4 4 4
38556 ++1 1 1 4 0 0 41 54 63 153 152 153 137 136 137 137 136 137
38557 ++137 136 137 153 152 153 125 124 125 24 26 27 4 0 0 3 2 2
38558 ++4 4 4 4 4 4 4 3 3 4 0 0 3 6 7 43 61 72
38559 ++64 123 161 101 161 196 90 154 193 90 154 193 90 154 193 90 154 193
38560 ++90 154 193 90 154 193 90 154 193 90 154 193 101 161 196 90 154 193
38561 ++101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
38562 ++101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
38563 ++136 185 209 101 161 196 101 161 196 136 185 209 136 185 209 101 161 196
38564 ++101 161 196 90 154 193 28 67 93 13 16 17 7 11 13 3 6 7
38565 ++37 51 59 125 124 125 163 162 163 174 174 174 167 166 167 166 165 166
38566 ++167 166 167 131 129 131 60 73 81 4 0 0 4 0 0 4 0 0
38567 ++3 3 3 5 5 5 6 6 6 4 4 4 4 4 4 4 4 4
38568 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38569 ++4 4 4 4 4 4
38570 ++4 0 0 4 0 0 41 54 63 137 136 137 153 152 153 137 136 137
38571 ++153 152 153 157 156 157 125 124 125 24 26 27 0 0 0 2 2 2
38572 ++4 4 4 4 4 4 2 0 0 0 0 0 28 67 93 90 154 193
38573 ++90 154 193 90 154 193 90 154 193 90 154 193 64 123 161 90 154 193
38574 ++90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
38575 ++90 154 193 101 161 196 101 161 196 101 161 196 90 154 193 136 185 209
38576 ++101 161 196 101 161 196 136 185 209 101 161 196 136 185 209 101 161 196
38577 ++101 161 196 101 161 196 136 185 209 101 161 196 101 161 196 90 154 193
38578 ++35 83 115 13 16 17 3 6 7 2 5 5 13 16 17 60 74 84
38579 ++154 154 154 166 165 166 165 164 165 158 157 158 163 162 163 157 156 157
38580 ++60 74 84 13 16 17 4 0 0 4 0 0 3 2 2 4 4 4
38581 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38582 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38583 ++4 4 4 4 4 4
38584 ++1 1 1 4 0 0 41 54 63 157 156 157 155 154 155 137 136 137
38585 ++153 152 153 158 157 158 137 136 137 26 28 28 2 0 0 2 2 2
38586 ++4 4 4 4 4 4 1 0 0 6 10 14 34 86 122 90 154 193
38587 ++64 123 161 90 154 193 64 123 161 90 154 193 90 154 193 90 154 193
38588 ++64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
38589 ++101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
38590 ++101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
38591 ++136 185 209 101 161 196 136 185 209 90 154 193 26 108 161 22 40 52
38592 ++13 16 17 5 7 8 2 5 5 2 5 5 37 38 37 165 164 165
38593 ++174 174 174 163 162 163 154 154 154 165 164 165 167 166 167 60 73 81
38594 ++6 6 6 4 0 0 4 0 0 4 4 4 4 4 4 4 4 4
38595 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38596 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38597 ++4 4 4 4 4 4
38598 ++4 0 0 6 6 6 41 54 63 156 155 156 158 157 158 153 152 153
38599 ++156 155 156 165 164 165 137 136 137 26 28 28 0 0 0 2 2 2
38600 ++4 4 5 4 4 4 2 0 0 7 12 15 31 96 139 64 123 161
38601 ++90 154 193 64 123 161 90 154 193 90 154 193 64 123 161 90 154 193
38602 ++90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
38603 ++90 154 193 90 154 193 90 154 193 101 161 196 101 161 196 101 161 196
38604 ++101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
38605 ++101 161 196 136 185 209 26 108 161 22 40 52 7 11 13 5 7 8
38606 ++2 5 5 2 5 5 2 5 5 2 2 1 37 38 37 158 157 158
38607 ++174 174 174 154 154 154 156 155 156 167 166 167 165 164 165 37 38 37
38608 ++4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38609 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38610 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38611 ++4 4 4 4 4 4
38612 ++3 1 0 4 0 0 60 73 81 157 156 157 163 162 163 153 152 153
38613 ++158 157 158 167 166 167 137 136 137 26 28 28 2 0 0 2 2 2
38614 ++4 5 5 4 4 4 4 0 0 7 12 15 24 86 132 26 108 161
38615 ++37 112 160 64 123 161 90 154 193 64 123 161 90 154 193 90 154 193
38616 ++90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
38617 ++90 154 193 101 161 196 90 154 193 101 161 196 101 161 196 101 161 196
38618 ++101 161 196 101 161 196 101 161 196 136 185 209 101 161 196 136 185 209
38619 ++90 154 193 35 83 115 13 16 17 13 16 17 7 11 13 3 6 7
38620 ++5 7 8 6 6 6 3 4 3 2 2 1 30 32 34 154 154 154
38621 ++167 166 167 154 154 154 154 154 154 174 174 174 165 164 165 37 38 37
38622 ++6 6 6 4 0 0 6 6 6 4 4 4 4 4 4 4 4 4
38623 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38624 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38625 ++4 4 4 4 4 4
38626 ++4 0 0 4 0 0 41 54 63 163 162 163 166 165 166 154 154 154
38627 ++163 162 163 174 174 174 137 136 137 26 28 28 0 0 0 2 2 2
38628 ++4 5 5 4 4 5 1 1 2 6 10 14 28 67 93 18 97 151
38629 ++18 97 151 18 97 151 26 108 161 37 112 160 37 112 160 90 154 193
38630 ++64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
38631 ++90 154 193 101 161 196 101 161 196 90 154 193 101 161 196 101 161 196
38632 ++101 161 196 101 161 196 101 161 196 136 185 209 90 154 193 16 89 141
38633 ++13 20 25 7 11 13 5 7 8 5 7 8 2 5 5 4 5 5
38634 ++3 4 3 4 5 5 3 4 3 0 0 0 37 38 37 158 157 158
38635 ++174 174 174 158 157 158 158 157 158 167 166 167 174 174 174 41 54 63
38636 ++4 0 0 3 2 2 5 5 5 4 4 4 4 4 4 4 4 4
38637 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38638 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38639 ++4 4 4 4 4 4
38640 ++1 1 1 4 0 0 60 73 81 165 164 165 174 174 174 158 157 158
38641 ++167 166 167 174 174 174 153 152 153 26 28 28 2 0 0 2 2 2
38642 ++4 5 5 4 4 4 4 0 0 7 12 15 10 87 144 10 87 144
38643 ++18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
38644 ++26 108 161 37 112 160 53 118 160 90 154 193 90 154 193 90 154 193
38645 ++90 154 193 90 154 193 101 161 196 101 161 196 101 161 196 101 161 196
38646 ++101 161 196 136 185 209 90 154 193 26 108 161 22 40 52 13 16 17
38647 ++7 11 13 3 6 7 5 7 8 5 7 8 2 5 5 4 5 5
38648 ++4 5 5 6 6 6 3 4 3 0 0 0 30 32 34 158 157 158
38649 ++174 174 174 156 155 156 155 154 155 165 164 165 154 153 154 37 38 37
38650 ++4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38651 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38652 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38653 ++4 4 4 4 4 4
38654 ++4 0 0 4 0 0 60 73 81 167 166 167 174 174 174 163 162 163
38655 ++174 174 174 174 174 174 153 152 153 26 28 28 0 0 0 3 3 3
38656 ++5 5 5 4 4 4 1 1 2 7 12 15 28 67 93 18 97 151
38657 ++18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
38658 ++26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
38659 ++90 154 193 26 108 161 90 154 193 90 154 193 90 154 193 101 161 196
38660 ++101 161 196 26 108 161 22 40 52 13 16 17 7 11 13 2 5 5
38661 ++2 5 5 6 6 6 2 5 5 4 5 5 4 5 5 4 5 5
38662 ++3 4 3 5 5 5 3 4 3 2 0 0 30 32 34 137 136 137
38663 ++153 152 153 137 136 137 131 129 131 137 136 137 131 129 131 37 38 37
38664 ++4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38665 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38666 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38667 ++4 4 4 4 4 4
38668 ++1 1 1 4 0 0 60 73 81 167 166 167 174 174 174 166 165 166
38669 ++174 174 174 177 184 187 153 152 153 30 32 34 1 0 0 3 3 3
38670 ++5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
38671 ++18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
38672 ++26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
38673 ++26 108 161 26 108 161 26 108 161 90 154 193 90 154 193 26 108 161
38674 ++35 83 115 13 16 17 7 11 13 5 7 8 3 6 7 5 7 8
38675 ++2 5 5 6 6 6 4 5 5 4 5 5 3 4 3 4 5 5
38676 ++3 4 3 6 6 6 3 4 3 0 0 0 26 28 28 125 124 125
38677 ++131 129 131 125 124 125 125 124 125 131 129 131 131 129 131 37 38 37
38678 ++4 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38679 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38680 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38681 ++4 4 4 4 4 4
38682 ++3 1 0 4 0 0 60 73 81 174 174 174 177 184 187 167 166 167
38683 ++174 174 174 177 184 187 153 152 153 30 32 34 0 0 0 3 3 3
38684 ++5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
38685 ++18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
38686 ++26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
38687 ++26 108 161 90 154 193 26 108 161 26 108 161 24 86 132 13 20 25
38688 ++7 11 13 13 20 25 22 40 52 5 7 8 3 4 3 3 4 3
38689 ++4 5 5 3 4 3 4 5 5 3 4 3 4 5 5 3 4 3
38690 ++4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
38691 ++137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38692 ++0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38693 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38694 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38695 ++4 4 4 4 4 4
38696 ++1 1 1 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
38697 ++174 174 174 190 197 201 157 156 157 30 32 34 1 0 0 3 3 3
38698 ++5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
38699 ++18 97 151 19 95 150 19 95 150 18 97 151 18 97 151 26 108 161
38700 ++18 97 151 26 108 161 26 108 161 26 108 161 26 108 161 90 154 193
38701 ++26 108 161 26 108 161 26 108 161 22 40 52 2 5 5 3 4 3
38702 ++28 67 93 37 112 160 34 86 122 2 5 5 3 4 3 3 4 3
38703 ++3 4 3 3 4 3 3 4 3 2 2 1 3 4 3 4 4 4
38704 ++4 5 5 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
38705 ++137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38706 ++0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38707 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38708 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38709 ++4 4 4 4 4 4
38710 ++4 0 0 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
38711 ++174 174 174 190 197 201 158 157 158 30 32 34 0 0 0 2 2 2
38712 ++5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
38713 ++10 87 144 19 95 150 19 95 150 18 97 151 18 97 151 18 97 151
38714 ++26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
38715 ++18 97 151 22 40 52 2 5 5 2 2 1 22 40 52 26 108 161
38716 ++90 154 193 37 112 160 22 40 52 3 4 3 13 20 25 22 30 35
38717 ++3 6 7 1 1 1 2 2 2 6 9 11 5 5 5 4 3 3
38718 ++4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
38719 ++137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38720 ++0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38721 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38722 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38723 ++4 4 4 4 4 4
38724 ++1 1 1 4 0 0 60 73 81 177 184 187 193 200 203 174 174 174
38725 ++177 184 187 193 200 203 163 162 163 30 32 34 4 0 0 2 2 2
38726 ++5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
38727 ++10 87 144 10 87 144 19 95 150 19 95 150 19 95 150 18 97 151
38728 ++26 108 161 26 108 161 26 108 161 90 154 193 26 108 161 28 67 93
38729 ++6 10 14 2 5 5 13 20 25 24 86 132 37 112 160 90 154 193
38730 ++10 87 144 7 12 15 2 5 5 28 67 93 37 112 160 28 67 93
38731 ++2 2 1 7 12 15 35 83 115 28 67 93 3 6 7 1 0 0
38732 ++4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
38733 ++137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38734 ++0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38735 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38736 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38737 ++4 4 4 4 4 4
38738 ++4 0 0 4 0 0 60 73 81 174 174 174 190 197 201 174 174 174
38739 ++177 184 187 193 200 203 163 162 163 30 32 34 0 0 0 2 2 2
38740 ++5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
38741 ++10 87 144 16 89 141 19 95 150 10 87 144 26 108 161 26 108 161
38742 ++26 108 161 26 108 161 26 108 161 28 67 93 6 10 14 1 1 2
38743 ++7 12 15 28 67 93 26 108 161 16 89 141 24 86 132 21 29 34
38744 ++3 4 3 21 29 34 37 112 160 37 112 160 27 99 146 21 29 34
38745 ++21 29 34 26 108 161 90 154 193 35 83 115 1 1 2 2 0 0
38746 ++4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
38747 ++137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38748 ++0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38749 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38750 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38751 ++4 4 4 4 4 4
38752 ++3 1 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
38753 ++190 197 201 193 200 203 165 164 165 37 38 37 4 0 0 2 2 2
38754 ++5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
38755 ++10 87 144 10 87 144 16 89 141 18 97 151 18 97 151 10 87 144
38756 ++24 86 132 24 86 132 13 20 25 4 5 7 4 5 7 22 40 52
38757 ++18 97 151 37 112 160 26 108 161 7 12 15 1 1 1 0 0 0
38758 ++28 67 93 37 112 160 26 108 161 28 67 93 22 40 52 28 67 93
38759 ++26 108 161 90 154 193 26 108 161 10 87 144 0 0 0 2 0 0
38760 ++4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
38761 ++137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38762 ++0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38763 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38764 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38765 ++4 4 4 4 4 4
38766 ++4 0 0 6 6 6 60 73 81 174 174 174 193 200 203 174 174 174
38767 ++190 197 201 193 200 203 165 164 165 30 32 34 0 0 0 2 2 2
38768 ++5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
38769 ++10 87 144 10 87 144 10 87 144 18 97 151 28 67 93 6 10 14
38770 ++0 0 0 1 1 2 4 5 7 13 20 25 16 89 141 26 108 161
38771 ++26 108 161 26 108 161 24 86 132 6 9 11 2 3 3 22 40 52
38772 ++37 112 160 16 89 141 22 40 52 28 67 93 26 108 161 26 108 161
38773 ++90 154 193 26 108 161 26 108 161 28 67 93 1 1 1 4 0 0
38774 ++4 4 4 5 5 5 3 3 3 4 0 0 26 28 28 124 126 130
38775 ++137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38776 ++0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38777 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38778 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38779 ++4 4 4 4 4 4
38780 ++4 0 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
38781 ++193 200 203 193 200 203 167 166 167 37 38 37 4 0 0 2 2 2
38782 ++5 5 5 4 4 4 4 0 0 6 10 14 28 67 93 10 87 144
38783 ++10 87 144 10 87 144 18 97 151 10 87 144 13 20 25 4 5 7
38784 ++1 1 2 1 1 1 22 40 52 26 108 161 26 108 161 26 108 161
38785 ++26 108 161 26 108 161 26 108 161 24 86 132 22 40 52 22 40 52
38786 ++22 40 52 22 40 52 10 87 144 26 108 161 26 108 161 26 108 161
38787 ++26 108 161 26 108 161 90 154 193 10 87 144 0 0 0 4 0 0
38788 ++4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
38789 ++137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38790 ++0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38791 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38792 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38793 ++4 4 4 4 4 4
38794 ++4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
38795 ++190 197 201 205 212 215 167 166 167 30 32 34 0 0 0 2 2 2
38796 ++5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
38797 ++10 87 144 10 87 144 10 87 144 10 87 144 22 40 52 1 1 2
38798 ++2 0 0 1 1 2 24 86 132 26 108 161 26 108 161 26 108 161
38799 ++26 108 161 19 95 150 16 89 141 10 87 144 22 40 52 22 40 52
38800 ++10 87 144 26 108 161 37 112 160 26 108 161 26 108 161 26 108 161
38801 ++26 108 161 26 108 161 26 108 161 28 67 93 2 0 0 3 1 0
38802 ++4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
38803 ++137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38804 ++0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38805 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38806 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38807 ++4 4 4 4 4 4
38808 ++4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
38809 ++193 200 203 193 200 203 174 174 174 37 38 37 4 0 0 2 2 2
38810 ++5 5 5 4 4 4 3 2 2 1 1 2 13 20 25 10 87 144
38811 ++10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 13 20 25
38812 ++13 20 25 22 40 52 10 87 144 18 97 151 18 97 151 26 108 161
38813 ++10 87 144 13 20 25 6 10 14 21 29 34 24 86 132 18 97 151
38814 ++26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
38815 ++26 108 161 90 154 193 18 97 151 13 20 25 0 0 0 4 3 3
38816 ++4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
38817 ++137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38818 ++0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38819 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38820 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38821 ++4 4 4 4 4 4
38822 ++4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
38823 ++190 197 201 220 221 221 167 166 167 30 32 34 1 0 0 2 2 2
38824 ++5 5 5 4 4 4 4 4 5 2 5 5 4 5 7 13 20 25
38825 ++28 67 93 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
38826 ++10 87 144 10 87 144 18 97 151 10 87 144 18 97 151 18 97 151
38827 ++28 67 93 2 3 3 0 0 0 28 67 93 26 108 161 26 108 161
38828 ++26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
38829 ++26 108 161 10 87 144 13 20 25 1 1 2 3 2 2 4 4 4
38830 ++4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
38831 ++137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38832 ++0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38833 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38834 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38835 ++4 4 4 4 4 4
38836 ++4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
38837 ++193 200 203 193 200 203 174 174 174 26 28 28 4 0 0 4 3 3
38838 ++5 5 5 4 4 4 4 4 4 4 4 5 1 1 2 2 5 5
38839 ++4 5 7 22 40 52 10 87 144 10 87 144 18 97 151 10 87 144
38840 ++10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 18 97 151
38841 ++10 87 144 28 67 93 22 40 52 10 87 144 26 108 161 18 97 151
38842 ++18 97 151 18 97 151 26 108 161 26 108 161 26 108 161 26 108 161
38843 ++22 40 52 1 1 2 0 0 0 2 3 3 4 4 4 4 4 4
38844 ++4 4 4 5 5 5 4 4 4 0 0 0 26 28 28 131 129 131
38845 ++137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38846 ++0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38847 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38848 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38849 ++4 4 4 4 4 4
38850 ++4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
38851 ++190 197 201 220 221 221 190 197 201 41 54 63 4 0 0 2 2 2
38852 ++6 6 6 4 4 4 4 4 4 4 4 5 4 4 5 3 3 3
38853 ++1 1 2 1 1 2 6 10 14 22 40 52 10 87 144 18 97 151
38854 ++18 97 151 10 87 144 10 87 144 10 87 144 18 97 151 10 87 144
38855 ++10 87 144 18 97 151 26 108 161 18 97 151 18 97 151 10 87 144
38856 ++26 108 161 26 108 161 26 108 161 10 87 144 28 67 93 6 10 14
38857 ++1 1 2 1 1 2 4 3 3 4 4 5 4 4 4 4 4 4
38858 ++5 5 5 5 5 5 1 1 1 4 0 0 37 51 59 137 136 137
38859 ++137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38860 ++0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38861 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38862 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38863 ++4 4 4 4 4 4
38864 ++4 0 0 4 0 0 60 73 81 220 221 221 193 200 203 174 174 174
38865 ++193 200 203 193 200 203 220 221 221 137 136 137 13 16 17 4 0 0
38866 ++2 2 2 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5
38867 ++4 4 5 4 3 3 1 1 2 4 5 7 13 20 25 28 67 93
38868 ++10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
38869 ++10 87 144 18 97 151 18 97 151 10 87 144 18 97 151 26 108 161
38870 ++26 108 161 18 97 151 28 67 93 6 10 14 0 0 0 0 0 0
38871 ++2 3 3 4 5 5 4 4 5 4 4 4 4 4 4 5 5 5
38872 ++3 3 3 1 1 1 0 0 0 16 19 21 125 124 125 137 136 137
38873 ++131 129 131 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38874 ++0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38875 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38876 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38877 ++4 4 4 4 4 4
38878 ++4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
38879 ++193 200 203 190 197 201 220 221 221 220 221 221 153 152 153 30 32 34
38880 ++0 0 0 0 0 0 2 2 2 4 4 4 4 4 4 4 4 4
38881 ++4 4 4 4 5 5 4 5 7 1 1 2 1 1 2 4 5 7
38882 ++13 20 25 28 67 93 10 87 144 18 97 151 10 87 144 10 87 144
38883 ++10 87 144 10 87 144 10 87 144 18 97 151 26 108 161 18 97 151
38884 ++28 67 93 7 12 15 0 0 0 0 0 0 2 2 1 4 4 4
38885 ++4 5 5 4 5 5 4 4 4 4 4 4 3 3 3 0 0 0
38886 ++0 0 0 0 0 0 37 38 37 125 124 125 158 157 158 131 129 131
38887 ++125 124 125 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38888 ++0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38889 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38890 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38891 ++4 4 4 4 4 4
38892 ++4 3 3 4 0 0 41 54 63 193 200 203 220 221 221 174 174 174
38893 ++193 200 203 193 200 203 193 200 203 220 221 221 244 246 246 193 200 203
38894 ++120 125 127 5 5 5 1 0 0 0 0 0 1 1 1 4 4 4
38895 ++4 4 4 4 4 4 4 5 5 4 5 5 4 4 5 1 1 2
38896 ++4 5 7 4 5 7 22 40 52 10 87 144 10 87 144 10 87 144
38897 ++10 87 144 10 87 144 18 97 151 10 87 144 10 87 144 13 20 25
38898 ++4 5 7 2 3 3 1 1 2 4 4 4 4 5 5 4 4 4
38899 ++4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 1 2
38900 ++24 26 27 60 74 84 153 152 153 163 162 163 137 136 137 125 124 125
38901 ++125 124 125 125 124 125 125 124 125 137 136 137 125 124 125 26 28 28
38902 ++0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38903 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38904 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38905 ++4 4 4 4 4 4
38906 ++4 0 0 6 6 6 26 28 28 156 155 156 220 221 221 220 221 221
38907 ++174 174 174 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
38908 ++220 221 221 167 166 167 60 73 81 7 11 13 0 0 0 0 0 0
38909 ++3 3 3 4 4 4 4 4 4 4 4 4 4 4 5 4 4 5
38910 ++4 4 5 1 1 2 1 1 2 4 5 7 22 40 52 10 87 144
38911 ++10 87 144 10 87 144 10 87 144 22 40 52 4 5 7 1 1 2
38912 ++1 1 2 4 4 5 4 4 4 4 4 4 4 4 4 4 4 4
38913 ++5 5 5 2 2 2 0 0 0 4 0 0 16 19 21 60 73 81
38914 ++137 136 137 167 166 167 158 157 158 137 136 137 131 129 131 131 129 131
38915 ++125 124 125 125 124 125 131 129 131 155 154 155 60 74 84 5 7 8
38916 ++0 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38917 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38918 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38919 ++4 4 4 4 4 4
38920 ++5 5 5 4 0 0 4 0 0 60 73 81 193 200 203 220 221 221
38921 ++193 200 203 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
38922 ++220 221 221 220 221 221 220 221 221 137 136 137 43 57 68 6 6 6
38923 ++4 0 0 1 1 1 4 4 4 4 4 4 4 4 4 4 4 4
38924 ++4 4 5 4 4 5 3 2 2 1 1 2 2 5 5 13 20 25
38925 ++22 40 52 22 40 52 13 20 25 2 3 3 1 1 2 3 3 3
38926 ++4 5 7 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38927 ++1 1 1 0 0 0 2 3 3 41 54 63 131 129 131 166 165 166
38928 ++166 165 166 155 154 155 153 152 153 137 136 137 137 136 137 125 124 125
38929 ++125 124 125 137 136 137 137 136 137 125 124 125 37 38 37 4 3 3
38930 ++4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
38931 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38932 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38933 ++4 4 4 4 4 4
38934 ++4 3 3 6 6 6 6 6 6 13 16 17 60 73 81 167 166 167
38935 ++220 221 221 220 221 221 220 221 221 193 200 203 193 200 203 193 200 203
38936 ++205 212 215 220 221 221 220 221 221 244 246 246 205 212 215 125 124 125
38937 ++24 26 27 0 0 0 0 0 0 2 2 2 5 5 5 5 5 5
38938 ++4 4 4 4 4 4 4 4 4 4 4 5 1 1 2 4 5 7
38939 ++4 5 7 4 5 7 1 1 2 3 2 2 4 4 5 4 4 4
38940 ++4 4 4 4 4 4 5 5 5 4 4 4 0 0 0 0 0 0
38941 ++2 0 0 26 28 28 125 124 125 174 174 174 174 174 174 166 165 166
38942 ++156 155 156 153 152 153 137 136 137 137 136 137 131 129 131 137 136 137
38943 ++137 136 137 137 136 137 60 74 84 30 32 34 4 0 0 4 0 0
38944 ++5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38945 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38946 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38947 ++4 4 4 4 4 4
38948 ++5 5 5 6 6 6 4 0 0 4 0 0 6 6 6 26 28 28
38949 ++125 124 125 174 174 174 220 221 221 220 221 221 220 221 221 193 200 203
38950 ++205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
38951 ++193 200 203 60 74 84 13 16 17 4 0 0 0 0 0 3 3 3
38952 ++5 5 5 5 5 5 4 4 4 4 4 4 4 4 5 3 3 3
38953 ++1 1 2 3 3 3 4 4 5 4 4 5 4 4 4 4 4 4
38954 ++5 5 5 5 5 5 2 2 2 0 0 0 0 0 0 13 16 17
38955 ++60 74 84 174 174 174 193 200 203 174 174 174 167 166 167 163 162 163
38956 ++153 152 153 153 152 153 137 136 137 137 136 137 153 152 153 137 136 137
38957 ++125 124 125 41 54 63 24 26 27 4 0 0 4 0 0 5 5 5
38958 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38959 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38960 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38961 ++4 4 4 4 4 4
38962 ++4 3 3 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
38963 ++6 6 6 37 38 37 131 129 131 220 221 221 220 221 221 220 221 221
38964 ++193 200 203 193 200 203 220 221 221 205 212 215 220 221 221 244 246 246
38965 ++244 246 246 244 246 246 174 174 174 41 54 63 0 0 0 0 0 0
38966 ++0 0 0 4 4 4 5 5 5 5 5 5 4 4 4 4 4 5
38967 ++4 4 5 4 4 5 4 4 4 4 4 4 6 6 6 6 6 6
38968 ++3 3 3 0 0 0 2 0 0 13 16 17 60 73 81 156 155 156
38969 ++220 221 221 193 200 203 174 174 174 165 164 165 163 162 163 154 153 154
38970 ++153 152 153 153 152 153 158 157 158 163 162 163 137 136 137 60 73 81
38971 ++13 16 17 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
38972 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38973 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38974 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38975 ++4 4 4 4 4 4
38976 ++5 5 5 4 3 3 4 3 3 6 6 6 6 6 6 6 6 6
38977 ++6 6 6 6 6 6 6 6 6 37 38 37 167 166 167 244 246 246
38978 ++244 246 246 220 221 221 205 212 215 205 212 215 220 221 221 193 200 203
38979 ++220 221 221 244 246 246 244 246 246 244 246 246 137 136 137 37 38 37
38980 ++3 2 2 0 0 0 1 1 1 5 5 5 5 5 5 4 4 4
38981 ++4 4 4 4 4 4 4 4 4 5 5 5 4 4 4 1 1 1
38982 ++0 0 0 5 5 5 43 57 68 153 152 153 193 200 203 220 221 221
38983 ++177 184 187 174 174 174 167 166 167 166 165 166 158 157 158 157 156 157
38984 ++158 157 158 166 165 166 156 155 156 85 115 134 13 16 17 4 0 0
38985 ++4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
38986 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38987 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38988 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38989 ++4 4 4 4 4 4
38990 ++5 5 5 4 3 3 6 6 6 6 6 6 4 0 0 6 6 6
38991 ++6 6 6 6 6 6 6 6 6 6 6 6 13 16 17 60 73 81
38992 ++177 184 187 220 221 221 220 221 221 220 221 221 205 212 215 220 221 221
38993 ++220 221 221 205 212 215 220 221 221 244 246 246 244 246 246 205 212 215
38994 ++125 124 125 30 32 34 0 0 0 0 0 0 2 2 2 5 5 5
38995 ++4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 0 0
38996 ++37 38 37 131 129 131 205 212 215 220 221 221 193 200 203 174 174 174
38997 ++174 174 174 174 174 174 167 166 167 165 164 165 166 165 166 167 166 167
38998 ++158 157 158 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
38999 ++4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
39000 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39001 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39002 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39003 ++4 4 4 4 4 4
39004 ++4 4 4 5 5 5 4 3 3 4 3 3 6 6 6 6 6 6
39005 ++4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
39006 ++26 28 28 125 124 125 205 212 215 220 221 221 220 221 221 220 221 221
39007 ++205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
39008 ++244 246 246 190 197 201 60 74 84 16 19 21 4 0 0 0 0 0
39009 ++0 0 0 0 0 0 0 0 0 0 0 0 16 19 21 120 125 127
39010 ++177 184 187 220 221 221 205 212 215 177 184 187 174 174 174 177 184 187
39011 ++174 174 174 174 174 174 167 166 167 174 174 174 166 165 166 137 136 137
39012 ++60 73 81 13 16 17 4 0 0 4 0 0 4 3 3 6 6 6
39013 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39014 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39015 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39016 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39017 ++4 4 4 4 4 4
39018 ++5 5 5 4 3 3 5 5 5 4 3 3 6 6 6 4 0 0
39019 ++6 6 6 6 6 6 4 0 0 6 6 6 4 0 0 6 6 6
39020 ++6 6 6 6 6 6 37 38 37 137 136 137 193 200 203 220 221 221
39021 ++220 221 221 205 212 215 220 221 221 205 212 215 205 212 215 220 221 221
39022 ++220 221 221 220 221 221 244 246 246 166 165 166 43 57 68 2 2 2
39023 ++0 0 0 4 0 0 16 19 21 60 73 81 157 156 157 202 210 214
39024 ++220 221 221 193 200 203 177 184 187 177 184 187 177 184 187 174 174 174
39025 ++174 174 174 174 174 174 174 174 174 157 156 157 60 74 84 24 26 27
39026 ++4 0 0 4 0 0 4 0 0 6 6 6 4 4 4 4 4 4
39027 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39028 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39029 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39030 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39031 ++4 4 4 4 4 4
39032 ++4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
39033 ++6 6 6 4 0 0 6 6 6 6 6 6 6 6 6 4 0 0
39034 ++4 0 0 4 0 0 6 6 6 24 26 27 60 73 81 167 166 167
39035 ++220 221 221 220 221 221 220 221 221 205 212 215 205 212 215 205 212 215
39036 ++205 212 215 220 221 221 220 221 221 220 221 221 205 212 215 137 136 137
39037 ++60 74 84 125 124 125 137 136 137 190 197 201 220 221 221 193 200 203
39038 ++177 184 187 177 184 187 177 184 187 174 174 174 174 174 174 177 184 187
39039 ++190 197 201 174 174 174 125 124 125 37 38 37 6 6 6 4 0 0
39040 ++4 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39041 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39042 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39043 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39044 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39045 ++4 4 4 4 4 4
39046 ++4 4 4 4 4 4 5 5 5 5 5 5 4 3 3 6 6 6
39047 ++4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 6 6 6
39048 ++6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
39049 ++125 124 125 193 200 203 244 246 246 220 221 221 205 212 215 205 212 215
39050 ++205 212 215 193 200 203 205 212 215 205 212 215 220 221 221 220 221 221
39051 ++193 200 203 193 200 203 205 212 215 193 200 203 193 200 203 177 184 187
39052 ++190 197 201 190 197 201 174 174 174 190 197 201 193 200 203 190 197 201
39053 ++153 152 153 60 73 81 4 0 0 4 0 0 4 0 0 3 2 2
39054 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39055 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39056 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39057 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39058 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39059 ++4 4 4 4 4 4
39060 ++4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
39061 ++6 6 6 4 3 3 4 3 3 4 3 3 6 6 6 6 6 6
39062 ++4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 4 0 0
39063 ++4 0 0 26 28 28 131 129 131 220 221 221 244 246 246 220 221 221
39064 ++205 212 215 193 200 203 205 212 215 193 200 203 193 200 203 205 212 215
39065 ++220 221 221 193 200 203 193 200 203 193 200 203 190 197 201 174 174 174
39066 ++174 174 174 190 197 201 193 200 203 193 200 203 167 166 167 125 124 125
39067 ++6 6 6 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
39068 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39069 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39070 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39071 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39072 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39073 ++4 4 4 4 4 4
39074 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
39075 ++5 5 5 4 3 3 5 5 5 6 6 6 4 3 3 5 5 5
39076 ++6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
39077 ++4 0 0 4 0 0 6 6 6 41 54 63 158 157 158 220 221 221
39078 ++220 221 221 220 221 221 193 200 203 193 200 203 193 200 203 190 197 201
39079 ++190 197 201 190 197 201 190 197 201 190 197 201 174 174 174 193 200 203
39080 ++193 200 203 220 221 221 174 174 174 125 124 125 37 38 37 4 0 0
39081 ++4 0 0 4 3 3 6 6 6 4 4 4 4 4 4 4 4 4
39082 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39083 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39084 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39085 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39086 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39087 ++4 4 4 4 4 4
39088 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39089 ++4 4 4 5 5 5 4 3 3 4 3 3 4 3 3 5 5 5
39090 ++4 3 3 6 6 6 5 5 5 4 3 3 6 6 6 6 6 6
39091 ++6 6 6 6 6 6 4 0 0 4 0 0 13 16 17 60 73 81
39092 ++174 174 174 220 221 221 220 221 221 205 212 215 190 197 201 174 174 174
39093 ++193 200 203 174 174 174 190 197 201 174 174 174 193 200 203 220 221 221
39094 ++193 200 203 131 129 131 37 38 37 6 6 6 4 0 0 4 0 0
39095 ++6 6 6 6 6 6 4 3 3 5 5 5 4 4 4 4 4 4
39096 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39097 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39098 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39099 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39100 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39101 ++4 4 4 4 4 4
39102 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39103 ++4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
39104 ++5 5 5 4 3 3 4 3 3 5 5 5 4 3 3 4 3 3
39105 ++5 5 5 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
39106 ++6 6 6 125 124 125 174 174 174 220 221 221 220 221 221 193 200 203
39107 ++193 200 203 193 200 203 193 200 203 193 200 203 220 221 221 158 157 158
39108 ++60 73 81 6 6 6 4 0 0 4 0 0 5 5 5 6 6 6
39109 ++5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
39110 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39111 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39112 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39113 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39114 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39115 ++4 4 4 4 4 4
39116 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39117 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39118 ++4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
39119 ++5 5 5 5 5 5 6 6 6 6 6 6 4 0 0 4 0 0
39120 ++4 0 0 4 0 0 26 28 28 125 124 125 174 174 174 193 200 203
39121 ++193 200 203 174 174 174 193 200 203 167 166 167 125 124 125 6 6 6
39122 ++6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 5 5 5
39123 ++4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
39124 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39125 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39126 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39127 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39128 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39129 ++4 4 4 4 4 4
39130 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39131 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39132 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
39133 ++4 3 3 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
39134 ++6 6 6 4 0 0 4 0 0 6 6 6 37 38 37 125 124 125
39135 ++153 152 153 131 129 131 125 124 125 37 38 37 6 6 6 6 6 6
39136 ++6 6 6 4 0 0 6 6 6 6 6 6 4 3 3 5 5 5
39137 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39138 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39139 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39140 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39141 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39142 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39143 ++4 4 4 4 4 4
39144 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39145 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39146 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39147 ++4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
39148 ++6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
39149 ++24 26 27 24 26 27 6 6 6 6 6 6 6 6 6 4 0 0
39150 ++6 6 6 6 6 6 4 0 0 6 6 6 5 5 5 4 3 3
39151 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39152 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39153 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39154 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39155 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39156 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39157 ++4 4 4 4 4 4
39158 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39159 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39160 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39161 ++4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
39162 ++4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
39163 ++6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
39164 ++4 0 0 6 6 6 6 6 6 4 3 3 5 5 5 4 4 4
39165 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39166 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39167 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39168 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39169 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39170 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39171 ++4 4 4 4 4 4
39172 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39173 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39174 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39175 ++4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 5 5 5
39176 ++5 5 5 5 5 5 4 0 0 6 6 6 4 0 0 6 6 6
39177 ++6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 4 0 0
39178 ++6 6 6 4 3 3 5 5 5 4 3 3 5 5 5 4 4 4
39179 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39180 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39181 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39182 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39183 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39184 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39185 ++4 4 4 4 4 4
39186 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39187 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39188 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39189 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
39190 ++4 3 3 6 6 6 4 3 3 6 6 6 6 6 6 6 6 6
39191 ++4 0 0 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
39192 ++6 6 6 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
39193 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39194 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39195 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39196 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39197 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39198 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39199 ++4 4 4 4 4 4
39200 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39201 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39202 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39203 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39204 ++4 4 4 5 5 5 4 3 3 5 5 5 4 0 0 6 6 6
39205 ++6 6 6 4 0 0 6 6 6 6 6 6 4 0 0 6 6 6
39206 ++4 3 3 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
39207 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39208 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39209 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39210 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39211 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39212 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39213 ++4 4 4 4 4 4
39214 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39215 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39216 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39217 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39218 ++4 4 4 5 5 5 4 3 3 5 5 5 6 6 6 4 3 3
39219 ++4 3 3 6 6 6 6 6 6 4 3 3 6 6 6 4 3 3
39220 ++5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39221 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39222 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39223 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39224 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39225 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39226 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39227 ++4 4 4 4 4 4
39228 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39229 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39230 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39231 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39232 ++4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 6 6 6
39233 ++5 5 5 4 3 3 4 3 3 4 3 3 5 5 5 5 5 5
39234 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39235 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39236 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39237 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39238 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39239 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39240 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39241 ++4 4 4 4 4 4
39242 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39243 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39244 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39245 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39246 ++4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
39247 ++5 5 5 4 3 3 5 5 5 5 5 5 4 4 4 4 4 4
39248 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39249 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39250 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39251 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39252 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39253 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39254 ++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39255 ++4 4 4 4 4 4
39256 +diff -urNp linux-3.1.1/drivers/video/udlfb.c linux-3.1.1/drivers/video/udlfb.c
39257 +--- linux-3.1.1/drivers/video/udlfb.c 2011-11-11 15:19:27.000000000 -0500
39258 ++++ linux-3.1.1/drivers/video/udlfb.c 2011-11-16 18:39:08.000000000 -0500
39259 +@@ -585,11 +585,11 @@ int dlfb_handle_damage(struct dlfb_data
39260 + dlfb_urb_completion(urb);
39261 +
39262 + error:
39263 +- atomic_add(bytes_sent, &dev->bytes_sent);
39264 +- atomic_add(bytes_identical, &dev->bytes_identical);
39265 +- atomic_add(width*height*2, &dev->bytes_rendered);
39266 ++ atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
39267 ++ atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
39268 ++ atomic_add_unchecked(width*height*2, &dev->bytes_rendered);
39269 + end_cycles = get_cycles();
39270 +- atomic_add(((unsigned int) ((end_cycles - start_cycles)
39271 ++ atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
39272 + >> 10)), /* Kcycles */
39273 + &dev->cpu_kcycles_used);
39274 +
39275 +@@ -710,11 +710,11 @@ static void dlfb_dpy_deferred_io(struct
39276 + dlfb_urb_completion(urb);
39277 +
39278 + error:
39279 +- atomic_add(bytes_sent, &dev->bytes_sent);
39280 +- atomic_add(bytes_identical, &dev->bytes_identical);
39281 +- atomic_add(bytes_rendered, &dev->bytes_rendered);
39282 ++ atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
39283 ++ atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
39284 ++ atomic_add_unchecked(bytes_rendered, &dev->bytes_rendered);
39285 + end_cycles = get_cycles();
39286 +- atomic_add(((unsigned int) ((end_cycles - start_cycles)
39287 ++ atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
39288 + >> 10)), /* Kcycles */
39289 + &dev->cpu_kcycles_used);
39290 + }
39291 +@@ -1306,7 +1306,7 @@ static ssize_t metrics_bytes_rendered_sh
39292 + struct fb_info *fb_info = dev_get_drvdata(fbdev);
39293 + struct dlfb_data *dev = fb_info->par;
39294 + return snprintf(buf, PAGE_SIZE, "%u\n",
39295 +- atomic_read(&dev->bytes_rendered));
39296 ++ atomic_read_unchecked(&dev->bytes_rendered));
39297 + }
39298 +
39299 + static ssize_t metrics_bytes_identical_show(struct device *fbdev,
39300 +@@ -1314,7 +1314,7 @@ static ssize_t metrics_bytes_identical_s
39301 + struct fb_info *fb_info = dev_get_drvdata(fbdev);
39302 + struct dlfb_data *dev = fb_info->par;
39303 + return snprintf(buf, PAGE_SIZE, "%u\n",
39304 +- atomic_read(&dev->bytes_identical));
39305 ++ atomic_read_unchecked(&dev->bytes_identical));
39306 + }
39307 +
39308 + static ssize_t metrics_bytes_sent_show(struct device *fbdev,
39309 +@@ -1322,7 +1322,7 @@ static ssize_t metrics_bytes_sent_show(s
39310 + struct fb_info *fb_info = dev_get_drvdata(fbdev);
39311 + struct dlfb_data *dev = fb_info->par;
39312 + return snprintf(buf, PAGE_SIZE, "%u\n",
39313 +- atomic_read(&dev->bytes_sent));
39314 ++ atomic_read_unchecked(&dev->bytes_sent));
39315 + }
39316 +
39317 + static ssize_t metrics_cpu_kcycles_used_show(struct device *fbdev,
39318 +@@ -1330,7 +1330,7 @@ static ssize_t metrics_cpu_kcycles_used_
39319 + struct fb_info *fb_info = dev_get_drvdata(fbdev);
39320 + struct dlfb_data *dev = fb_info->par;
39321 + return snprintf(buf, PAGE_SIZE, "%u\n",
39322 +- atomic_read(&dev->cpu_kcycles_used));
39323 ++ atomic_read_unchecked(&dev->cpu_kcycles_used));
39324 + }
39325 +
39326 + static ssize_t edid_show(
39327 +@@ -1387,10 +1387,10 @@ static ssize_t metrics_reset_store(struc
39328 + struct fb_info *fb_info = dev_get_drvdata(fbdev);
39329 + struct dlfb_data *dev = fb_info->par;
39330 +
39331 +- atomic_set(&dev->bytes_rendered, 0);
39332 +- atomic_set(&dev->bytes_identical, 0);
39333 +- atomic_set(&dev->bytes_sent, 0);
39334 +- atomic_set(&dev->cpu_kcycles_used, 0);
39335 ++ atomic_set_unchecked(&dev->bytes_rendered, 0);
39336 ++ atomic_set_unchecked(&dev->bytes_identical, 0);
39337 ++ atomic_set_unchecked(&dev->bytes_sent, 0);
39338 ++ atomic_set_unchecked(&dev->cpu_kcycles_used, 0);
39339 +
39340 + return count;
39341 + }
39342 +diff -urNp linux-3.1.1/drivers/video/uvesafb.c linux-3.1.1/drivers/video/uvesafb.c
39343 +--- linux-3.1.1/drivers/video/uvesafb.c 2011-11-11 15:19:27.000000000 -0500
39344 ++++ linux-3.1.1/drivers/video/uvesafb.c 2011-11-16 18:39:08.000000000 -0500
39345 +@@ -19,6 +19,7 @@
39346 + #include <linux/io.h>
39347 + #include <linux/mutex.h>
39348 + #include <linux/slab.h>
39349 ++#include <linux/moduleloader.h>
39350 + #include <video/edid.h>
39351 + #include <video/uvesafb.h>
39352 + #ifdef CONFIG_X86
39353 +@@ -121,7 +122,7 @@ static int uvesafb_helper_start(void)
39354 + NULL,
39355 + };
39356 +
39357 +- return call_usermodehelper(v86d_path, argv, envp, 1);
39358 ++ return call_usermodehelper(v86d_path, argv, envp, UMH_WAIT_PROC);
39359 + }
39360 +
39361 + /*
39362 +@@ -569,10 +570,32 @@ static int __devinit uvesafb_vbe_getpmi(
39363 + if ((task->t.regs.eax & 0xffff) != 0x4f || task->t.regs.es < 0xc000) {
39364 + par->pmi_setpal = par->ypan = 0;
39365 + } else {
39366 ++
39367 ++#ifdef CONFIG_PAX_KERNEXEC
39368 ++#ifdef CONFIG_MODULES
39369 ++ par->pmi_code = module_alloc_exec((u16)task->t.regs.ecx);
39370 ++#endif
39371 ++ if (!par->pmi_code) {
39372 ++ par->pmi_setpal = par->ypan = 0;
39373 ++ return 0;
39374 ++ }
39375 ++#endif
39376 ++
39377 + par->pmi_base = (u16 *)phys_to_virt(((u32)task->t.regs.es << 4)
39378 + + task->t.regs.edi);
39379 ++
39380 ++#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
39381 ++ pax_open_kernel();
39382 ++ memcpy(par->pmi_code, par->pmi_base, (u16)task->t.regs.ecx);
39383 ++ pax_close_kernel();
39384 ++
39385 ++ par->pmi_start = ktva_ktla(par->pmi_code + par->pmi_base[1]);
39386 ++ par->pmi_pal = ktva_ktla(par->pmi_code + par->pmi_base[2]);
39387 ++#else
39388 + par->pmi_start = (u8 *)par->pmi_base + par->pmi_base[1];
39389 + par->pmi_pal = (u8 *)par->pmi_base + par->pmi_base[2];
39390 ++#endif
39391 ++
39392 + printk(KERN_INFO "uvesafb: protected mode interface info at "
39393 + "%04x:%04x\n",
39394 + (u16)task->t.regs.es, (u16)task->t.regs.edi);
39395 +@@ -1821,6 +1844,11 @@ out:
39396 + if (par->vbe_modes)
39397 + kfree(par->vbe_modes);
39398 +
39399 ++#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
39400 ++ if (par->pmi_code)
39401 ++ module_free_exec(NULL, par->pmi_code);
39402 ++#endif
39403 ++
39404 + framebuffer_release(info);
39405 + return err;
39406 + }
39407 +@@ -1847,6 +1875,12 @@ static int uvesafb_remove(struct platfor
39408 + kfree(par->vbe_state_orig);
39409 + if (par->vbe_state_saved)
39410 + kfree(par->vbe_state_saved);
39411 ++
39412 ++#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
39413 ++ if (par->pmi_code)
39414 ++ module_free_exec(NULL, par->pmi_code);
39415 ++#endif
39416 ++
39417 + }
39418 +
39419 + framebuffer_release(info);
39420 +diff -urNp linux-3.1.1/drivers/video/vesafb.c linux-3.1.1/drivers/video/vesafb.c
39421 +--- linux-3.1.1/drivers/video/vesafb.c 2011-11-11 15:19:27.000000000 -0500
39422 ++++ linux-3.1.1/drivers/video/vesafb.c 2011-11-16 18:39:08.000000000 -0500
39423 +@@ -9,6 +9,7 @@
39424 + */
39425 +
39426 + #include <linux/module.h>
39427 ++#include <linux/moduleloader.h>
39428 + #include <linux/kernel.h>
39429 + #include <linux/errno.h>
39430 + #include <linux/string.h>
39431 +@@ -52,8 +53,8 @@ static int vram_remap __initdata; /*
39432 + static int vram_total __initdata; /* Set total amount of memory */
39433 + static int pmi_setpal __read_mostly = 1; /* pmi for palette changes ??? */
39434 + static int ypan __read_mostly; /* 0..nothing, 1..ypan, 2..ywrap */
39435 +-static void (*pmi_start)(void) __read_mostly;
39436 +-static void (*pmi_pal) (void) __read_mostly;
39437 ++static void (*pmi_start)(void) __read_only;
39438 ++static void (*pmi_pal) (void) __read_only;
39439 + static int depth __read_mostly;
39440 + static int vga_compat __read_mostly;
39441 + /* --------------------------------------------------------------------- */
39442 +@@ -233,6 +234,7 @@ static int __init vesafb_probe(struct pl
39443 + unsigned int size_vmode;
39444 + unsigned int size_remap;
39445 + unsigned int size_total;
39446 ++ void *pmi_code = NULL;
39447 +
39448 + if (screen_info.orig_video_isVGA != VIDEO_TYPE_VLFB)
39449 + return -ENODEV;
39450 +@@ -275,10 +277,6 @@ static int __init vesafb_probe(struct pl
39451 + size_remap = size_total;
39452 + vesafb_fix.smem_len = size_remap;
39453 +
39454 +-#ifndef __i386__
39455 +- screen_info.vesapm_seg = 0;
39456 +-#endif
39457 +-
39458 + if (!request_mem_region(vesafb_fix.smem_start, size_total, "vesafb")) {
39459 + printk(KERN_WARNING
39460 + "vesafb: cannot reserve video memory at 0x%lx\n",
39461 +@@ -307,9 +305,21 @@ static int __init vesafb_probe(struct pl
39462 + printk(KERN_INFO "vesafb: mode is %dx%dx%d, linelength=%d, pages=%d\n",
39463 + vesafb_defined.xres, vesafb_defined.yres, vesafb_defined.bits_per_pixel, vesafb_fix.line_length, screen_info.pages);
39464 +
39465 ++#ifdef __i386__
39466 ++
39467 ++#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
39468 ++ pmi_code = module_alloc_exec(screen_info.vesapm_size);
39469 ++ if (!pmi_code)
39470 ++#elif !defined(CONFIG_PAX_KERNEXEC)
39471 ++ if (0)
39472 ++#endif
39473 ++
39474 ++#endif
39475 ++ screen_info.vesapm_seg = 0;
39476 ++
39477 + if (screen_info.vesapm_seg) {
39478 +- printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x\n",
39479 +- screen_info.vesapm_seg,screen_info.vesapm_off);
39480 ++ printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x %04x bytes\n",
39481 ++ screen_info.vesapm_seg,screen_info.vesapm_off,screen_info.vesapm_size);
39482 + }
39483 +
39484 + if (screen_info.vesapm_seg < 0xc000)
39485 +@@ -317,9 +327,25 @@ static int __init vesafb_probe(struct pl
39486 +
39487 + if (ypan || pmi_setpal) {
39488 + unsigned short *pmi_base;
39489 ++
39490 + pmi_base = (unsigned short*)phys_to_virt(((unsigned long)screen_info.vesapm_seg << 4) + screen_info.vesapm_off);
39491 +- pmi_start = (void*)((char*)pmi_base + pmi_base[1]);
39492 +- pmi_pal = (void*)((char*)pmi_base + pmi_base[2]);
39493 ++
39494 ++#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
39495 ++ pax_open_kernel();
39496 ++ memcpy(pmi_code, pmi_base, screen_info.vesapm_size);
39497 ++#else
39498 ++ pmi_code = pmi_base;
39499 ++#endif
39500 ++
39501 ++ pmi_start = (void*)((char*)pmi_code + pmi_base[1]);
39502 ++ pmi_pal = (void*)((char*)pmi_code + pmi_base[2]);
39503 ++
39504 ++#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
39505 ++ pmi_start = ktva_ktla(pmi_start);
39506 ++ pmi_pal = ktva_ktla(pmi_pal);
39507 ++ pax_close_kernel();
39508 ++#endif
39509 ++
39510 + printk(KERN_INFO "vesafb: pmi: set display start = %p, set palette = %p\n",pmi_start,pmi_pal);
39511 + if (pmi_base[3]) {
39512 + printk(KERN_INFO "vesafb: pmi: ports = ");
39513 +@@ -488,6 +514,11 @@ static int __init vesafb_probe(struct pl
39514 + info->node, info->fix.id);
39515 + return 0;
39516 + err:
39517 ++
39518 ++#if defined(__i386__) && defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
39519 ++ module_free_exec(NULL, pmi_code);
39520 ++#endif
39521 ++
39522 + if (info->screen_base)
39523 + iounmap(info->screen_base);
39524 + framebuffer_release(info);
39525 +diff -urNp linux-3.1.1/drivers/video/via/via_clock.h linux-3.1.1/drivers/video/via/via_clock.h
39526 +--- linux-3.1.1/drivers/video/via/via_clock.h 2011-11-11 15:19:27.000000000 -0500
39527 ++++ linux-3.1.1/drivers/video/via/via_clock.h 2011-11-16 18:39:08.000000000 -0500
39528 +@@ -56,7 +56,7 @@ struct via_clock {
39529 +
39530 + void (*set_engine_pll_state)(u8 state);
39531 + void (*set_engine_pll)(struct via_pll_config config);
39532 +-};
39533 ++} __no_const;
39534 +
39535 +
39536 + static inline u32 get_pll_internal_frequency(u32 ref_freq,
39537 +diff -urNp linux-3.1.1/drivers/virtio/virtio_balloon.c linux-3.1.1/drivers/virtio/virtio_balloon.c
39538 +--- linux-3.1.1/drivers/virtio/virtio_balloon.c 2011-11-11 15:19:27.000000000 -0500
39539 ++++ linux-3.1.1/drivers/virtio/virtio_balloon.c 2011-11-16 18:40:29.000000000 -0500
39540 +@@ -174,6 +174,8 @@ static void update_balloon_stats(struct
39541 + struct sysinfo i;
39542 + int idx = 0;
39543 +
39544 ++ pax_track_stack();
39545 ++
39546 + all_vm_events(events);
39547 + si_meminfo(&i);
39548 +
39549 +diff -urNp linux-3.1.1/drivers/xen/xen-pciback/conf_space.h linux-3.1.1/drivers/xen/xen-pciback/conf_space.h
39550 +--- linux-3.1.1/drivers/xen/xen-pciback/conf_space.h 2011-11-11 15:19:27.000000000 -0500
39551 ++++ linux-3.1.1/drivers/xen/xen-pciback/conf_space.h 2011-11-16 18:39:08.000000000 -0500
39552 +@@ -44,15 +44,15 @@ struct config_field {
39553 + struct {
39554 + conf_dword_write write;
39555 + conf_dword_read read;
39556 +- } dw;
39557 ++ } __no_const dw;
39558 + struct {
39559 + conf_word_write write;
39560 + conf_word_read read;
39561 +- } w;
39562 ++ } __no_const w;
39563 + struct {
39564 + conf_byte_write write;
39565 + conf_byte_read read;
39566 +- } b;
39567 ++ } __no_const b;
39568 + } u;
39569 + struct list_head list;
39570 + };
39571 +diff -urNp linux-3.1.1/fs/9p/vfs_inode.c linux-3.1.1/fs/9p/vfs_inode.c
39572 +--- linux-3.1.1/fs/9p/vfs_inode.c 2011-11-11 15:19:27.000000000 -0500
39573 ++++ linux-3.1.1/fs/9p/vfs_inode.c 2011-11-16 18:39:08.000000000 -0500
39574 +@@ -1288,7 +1288,7 @@ static void *v9fs_vfs_follow_link(struct
39575 + void
39576 + v9fs_vfs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
39577 + {
39578 +- char *s = nd_get_link(nd);
39579 ++ const char *s = nd_get_link(nd);
39580 +
39581 + P9_DPRINTK(P9_DEBUG_VFS, " %s %s\n", dentry->d_name.name,
39582 + IS_ERR(s) ? "<error>" : s);
39583 +diff -urNp linux-3.1.1/fs/aio.c linux-3.1.1/fs/aio.c
39584 +--- linux-3.1.1/fs/aio.c 2011-11-11 15:19:27.000000000 -0500
39585 ++++ linux-3.1.1/fs/aio.c 2011-11-16 18:40:29.000000000 -0500
39586 +@@ -119,7 +119,7 @@ static int aio_setup_ring(struct kioctx
39587 + size += sizeof(struct io_event) * nr_events;
39588 + nr_pages = (size + PAGE_SIZE-1) >> PAGE_SHIFT;
39589 +
39590 +- if (nr_pages < 0)
39591 ++ if (nr_pages <= 0)
39592 + return -EINVAL;
39593 +
39594 + nr_events = (PAGE_SIZE * nr_pages - sizeof(struct aio_ring)) / sizeof(struct io_event);
39595 +@@ -1088,6 +1088,8 @@ static int read_events(struct kioctx *ct
39596 + struct aio_timeout to;
39597 + int retry = 0;
39598 +
39599 ++ pax_track_stack();
39600 ++
39601 + /* needed to zero any padding within an entry (there shouldn't be
39602 + * any, but C is fun!
39603 + */
39604 +@@ -1381,22 +1383,27 @@ static ssize_t aio_fsync(struct kiocb *i
39605 + static ssize_t aio_setup_vectored_rw(int type, struct kiocb *kiocb, bool compat)
39606 + {
39607 + ssize_t ret;
39608 ++ struct iovec iovstack;
39609 +
39610 + #ifdef CONFIG_COMPAT
39611 + if (compat)
39612 + ret = compat_rw_copy_check_uvector(type,
39613 + (struct compat_iovec __user *)kiocb->ki_buf,
39614 +- kiocb->ki_nbytes, 1, &kiocb->ki_inline_vec,
39615 ++ kiocb->ki_nbytes, 1, &iovstack,
39616 + &kiocb->ki_iovec);
39617 + else
39618 + #endif
39619 + ret = rw_copy_check_uvector(type,
39620 + (struct iovec __user *)kiocb->ki_buf,
39621 +- kiocb->ki_nbytes, 1, &kiocb->ki_inline_vec,
39622 ++ kiocb->ki_nbytes, 1, &iovstack,
39623 + &kiocb->ki_iovec);
39624 + if (ret < 0)
39625 + goto out;
39626 +
39627 ++ if (kiocb->ki_iovec == &iovstack) {
39628 ++ kiocb->ki_inline_vec = iovstack;
39629 ++ kiocb->ki_iovec = &kiocb->ki_inline_vec;
39630 ++ }
39631 + kiocb->ki_nr_segs = kiocb->ki_nbytes;
39632 + kiocb->ki_cur_seg = 0;
39633 + /* ki_nbytes/left now reflect bytes instead of segs */
39634 +diff -urNp linux-3.1.1/fs/attr.c linux-3.1.1/fs/attr.c
39635 +--- linux-3.1.1/fs/attr.c 2011-11-11 15:19:27.000000000 -0500
39636 ++++ linux-3.1.1/fs/attr.c 2011-11-16 18:40:29.000000000 -0500
39637 +@@ -98,6 +98,7 @@ int inode_newsize_ok(const struct inode
39638 + unsigned long limit;
39639 +
39640 + limit = rlimit(RLIMIT_FSIZE);
39641 ++ gr_learn_resource(current, RLIMIT_FSIZE, (unsigned long)offset, 1);
39642 + if (limit != RLIM_INFINITY && offset > limit)
39643 + goto out_sig;
39644 + if (offset > inode->i_sb->s_maxbytes)
39645 +diff -urNp linux-3.1.1/fs/autofs4/waitq.c linux-3.1.1/fs/autofs4/waitq.c
39646 +--- linux-3.1.1/fs/autofs4/waitq.c 2011-11-11 15:19:27.000000000 -0500
39647 ++++ linux-3.1.1/fs/autofs4/waitq.c 2011-11-16 18:39:08.000000000 -0500
39648 +@@ -60,7 +60,7 @@ static int autofs4_write(struct file *fi
39649 + {
39650 + unsigned long sigpipe, flags;
39651 + mm_segment_t fs;
39652 +- const char *data = (const char *)addr;
39653 ++ const char __user *data = (const char __force_user *)addr;
39654 + ssize_t wr = 0;
39655 +
39656 + /** WARNING: this is not safe for writing more than PIPE_BUF bytes! **/
39657 +diff -urNp linux-3.1.1/fs/befs/linuxvfs.c linux-3.1.1/fs/befs/linuxvfs.c
39658 +--- linux-3.1.1/fs/befs/linuxvfs.c 2011-11-11 15:19:27.000000000 -0500
39659 ++++ linux-3.1.1/fs/befs/linuxvfs.c 2011-11-16 18:39:08.000000000 -0500
39660 +@@ -503,7 +503,7 @@ static void befs_put_link(struct dentry
39661 + {
39662 + befs_inode_info *befs_ino = BEFS_I(dentry->d_inode);
39663 + if (befs_ino->i_flags & BEFS_LONG_SYMLINK) {
39664 +- char *link = nd_get_link(nd);
39665 ++ const char *link = nd_get_link(nd);
39666 + if (!IS_ERR(link))
39667 + kfree(link);
39668 + }
39669 +diff -urNp linux-3.1.1/fs/binfmt_aout.c linux-3.1.1/fs/binfmt_aout.c
39670 +--- linux-3.1.1/fs/binfmt_aout.c 2011-11-11 15:19:27.000000000 -0500
39671 ++++ linux-3.1.1/fs/binfmt_aout.c 2011-11-16 18:40:29.000000000 -0500
39672 +@@ -16,6 +16,7 @@
39673 + #include <linux/string.h>
39674 + #include <linux/fs.h>
39675 + #include <linux/file.h>
39676 ++#include <linux/security.h>
39677 + #include <linux/stat.h>
39678 + #include <linux/fcntl.h>
39679 + #include <linux/ptrace.h>
39680 +@@ -86,6 +87,8 @@ static int aout_core_dump(struct coredum
39681 + #endif
39682 + # define START_STACK(u) ((void __user *)u.start_stack)
39683 +
39684 ++ memset(&dump, 0, sizeof(dump));
39685 ++
39686 + fs = get_fs();
39687 + set_fs(KERNEL_DS);
39688 + has_dumped = 1;
39689 +@@ -97,10 +100,12 @@ static int aout_core_dump(struct coredum
39690 +
39691 + /* If the size of the dump file exceeds the rlimit, then see what would happen
39692 + if we wrote the stack, but not the data area. */
39693 ++ gr_learn_resource(current, RLIMIT_CORE, (dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE, 1);
39694 + if ((dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE > cprm->limit)
39695 + dump.u_dsize = 0;
39696 +
39697 + /* Make sure we have enough room to write the stack and data areas. */
39698 ++ gr_learn_resource(current, RLIMIT_CORE, (dump.u_ssize + 1) * PAGE_SIZE, 1);
39699 + if ((dump.u_ssize + 1) * PAGE_SIZE > cprm->limit)
39700 + dump.u_ssize = 0;
39701 +
39702 +@@ -234,6 +239,8 @@ static int load_aout_binary(struct linux
39703 + rlim = rlimit(RLIMIT_DATA);
39704 + if (rlim >= RLIM_INFINITY)
39705 + rlim = ~0;
39706 ++
39707 ++ gr_learn_resource(current, RLIMIT_DATA, ex.a_data + ex.a_bss, 1);
39708 + if (ex.a_data + ex.a_bss > rlim)
39709 + return -ENOMEM;
39710 +
39711 +@@ -262,6 +269,27 @@ static int load_aout_binary(struct linux
39712 + install_exec_creds(bprm);
39713 + current->flags &= ~PF_FORKNOEXEC;
39714 +
39715 ++#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
39716 ++ current->mm->pax_flags = 0UL;
39717 ++#endif
39718 ++
39719 ++#ifdef CONFIG_PAX_PAGEEXEC
39720 ++ if (!(N_FLAGS(ex) & F_PAX_PAGEEXEC)) {
39721 ++ current->mm->pax_flags |= MF_PAX_PAGEEXEC;
39722 ++
39723 ++#ifdef CONFIG_PAX_EMUTRAMP
39724 ++ if (N_FLAGS(ex) & F_PAX_EMUTRAMP)
39725 ++ current->mm->pax_flags |= MF_PAX_EMUTRAMP;
39726 ++#endif
39727 ++
39728 ++#ifdef CONFIG_PAX_MPROTECT
39729 ++ if (!(N_FLAGS(ex) & F_PAX_MPROTECT))
39730 ++ current->mm->pax_flags |= MF_PAX_MPROTECT;
39731 ++#endif
39732 ++
39733 ++ }
39734 ++#endif
39735 ++
39736 + if (N_MAGIC(ex) == OMAGIC) {
39737 + unsigned long text_addr, map_size;
39738 + loff_t pos;
39739 +@@ -334,7 +362,7 @@ static int load_aout_binary(struct linux
39740 +
39741 + down_write(&current->mm->mmap_sem);
39742 + error = do_mmap(bprm->file, N_DATADDR(ex), ex.a_data,
39743 +- PROT_READ | PROT_WRITE | PROT_EXEC,
39744 ++ PROT_READ | PROT_WRITE,
39745 + MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE,
39746 + fd_offset + ex.a_text);
39747 + up_write(&current->mm->mmap_sem);
39748 +diff -urNp linux-3.1.1/fs/binfmt_elf.c linux-3.1.1/fs/binfmt_elf.c
39749 +--- linux-3.1.1/fs/binfmt_elf.c 2011-11-11 15:19:27.000000000 -0500
39750 ++++ linux-3.1.1/fs/binfmt_elf.c 2011-11-16 18:40:29.000000000 -0500
39751 +@@ -51,6 +51,10 @@ static int elf_core_dump(struct coredump
39752 + #define elf_core_dump NULL
39753 + #endif
39754 +
39755 ++#ifdef CONFIG_PAX_MPROTECT
39756 ++static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags);
39757 ++#endif
39758 ++
39759 + #if ELF_EXEC_PAGESIZE > PAGE_SIZE
39760 + #define ELF_MIN_ALIGN ELF_EXEC_PAGESIZE
39761 + #else
39762 +@@ -70,6 +74,11 @@ static struct linux_binfmt elf_format =
39763 + .load_binary = load_elf_binary,
39764 + .load_shlib = load_elf_library,
39765 + .core_dump = elf_core_dump,
39766 ++
39767 ++#ifdef CONFIG_PAX_MPROTECT
39768 ++ .handle_mprotect= elf_handle_mprotect,
39769 ++#endif
39770 ++
39771 + .min_coredump = ELF_EXEC_PAGESIZE,
39772 + };
39773 +
39774 +@@ -77,6 +86,8 @@ static struct linux_binfmt elf_format =
39775 +
39776 + static int set_brk(unsigned long start, unsigned long end)
39777 + {
39778 ++ unsigned long e = end;
39779 ++
39780 + start = ELF_PAGEALIGN(start);
39781 + end = ELF_PAGEALIGN(end);
39782 + if (end > start) {
39783 +@@ -87,7 +98,7 @@ static int set_brk(unsigned long start,
39784 + if (BAD_ADDR(addr))
39785 + return addr;
39786 + }
39787 +- current->mm->start_brk = current->mm->brk = end;
39788 ++ current->mm->start_brk = current->mm->brk = e;
39789 + return 0;
39790 + }
39791 +
39792 +@@ -148,12 +159,15 @@ create_elf_tables(struct linux_binprm *b
39793 + elf_addr_t __user *u_rand_bytes;
39794 + const char *k_platform = ELF_PLATFORM;
39795 + const char *k_base_platform = ELF_BASE_PLATFORM;
39796 +- unsigned char k_rand_bytes[16];
39797 ++ u32 k_rand_bytes[4];
39798 + int items;
39799 + elf_addr_t *elf_info;
39800 + int ei_index = 0;
39801 + const struct cred *cred = current_cred();
39802 + struct vm_area_struct *vma;
39803 ++ unsigned long saved_auxv[AT_VECTOR_SIZE];
39804 ++
39805 ++ pax_track_stack();
39806 +
39807 + /*
39808 + * In some cases (e.g. Hyper-Threading), we want to avoid L1
39809 +@@ -195,8 +209,12 @@ create_elf_tables(struct linux_binprm *b
39810 + * Generate 16 random bytes for userspace PRNG seeding.
39811 + */
39812 + get_random_bytes(k_rand_bytes, sizeof(k_rand_bytes));
39813 +- u_rand_bytes = (elf_addr_t __user *)
39814 +- STACK_ALLOC(p, sizeof(k_rand_bytes));
39815 ++ srandom32(k_rand_bytes[0] ^ random32());
39816 ++ srandom32(k_rand_bytes[1] ^ random32());
39817 ++ srandom32(k_rand_bytes[2] ^ random32());
39818 ++ srandom32(k_rand_bytes[3] ^ random32());
39819 ++ p = STACK_ROUND(p, sizeof(k_rand_bytes));
39820 ++ u_rand_bytes = (elf_addr_t __user *) p;
39821 + if (__copy_to_user(u_rand_bytes, k_rand_bytes, sizeof(k_rand_bytes)))
39822 + return -EFAULT;
39823 +
39824 +@@ -308,9 +326,11 @@ create_elf_tables(struct linux_binprm *b
39825 + return -EFAULT;
39826 + current->mm->env_end = p;
39827 +
39828 ++ memcpy(saved_auxv, elf_info, ei_index * sizeof(elf_addr_t));
39829 ++
39830 + /* Put the elf_info on the stack in the right place. */
39831 + sp = (elf_addr_t __user *)envp + 1;
39832 +- if (copy_to_user(sp, elf_info, ei_index * sizeof(elf_addr_t)))
39833 ++ if (copy_to_user(sp, saved_auxv, ei_index * sizeof(elf_addr_t)))
39834 + return -EFAULT;
39835 + return 0;
39836 + }
39837 +@@ -381,10 +401,10 @@ static unsigned long load_elf_interp(str
39838 + {
39839 + struct elf_phdr *elf_phdata;
39840 + struct elf_phdr *eppnt;
39841 +- unsigned long load_addr = 0;
39842 ++ unsigned long load_addr = 0, pax_task_size = TASK_SIZE;
39843 + int load_addr_set = 0;
39844 + unsigned long last_bss = 0, elf_bss = 0;
39845 +- unsigned long error = ~0UL;
39846 ++ unsigned long error = -EINVAL;
39847 + unsigned long total_size;
39848 + int retval, i, size;
39849 +
39850 +@@ -430,6 +450,11 @@ static unsigned long load_elf_interp(str
39851 + goto out_close;
39852 + }
39853 +
39854 ++#ifdef CONFIG_PAX_SEGMEXEC
39855 ++ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
39856 ++ pax_task_size = SEGMEXEC_TASK_SIZE;
39857 ++#endif
39858 ++
39859 + eppnt = elf_phdata;
39860 + for (i = 0; i < interp_elf_ex->e_phnum; i++, eppnt++) {
39861 + if (eppnt->p_type == PT_LOAD) {
39862 +@@ -473,8 +498,8 @@ static unsigned long load_elf_interp(str
39863 + k = load_addr + eppnt->p_vaddr;
39864 + if (BAD_ADDR(k) ||
39865 + eppnt->p_filesz > eppnt->p_memsz ||
39866 +- eppnt->p_memsz > TASK_SIZE ||
39867 +- TASK_SIZE - eppnt->p_memsz < k) {
39868 ++ eppnt->p_memsz > pax_task_size ||
39869 ++ pax_task_size - eppnt->p_memsz < k) {
39870 + error = -ENOMEM;
39871 + goto out_close;
39872 + }
39873 +@@ -528,6 +553,193 @@ out:
39874 + return error;
39875 + }
39876 +
39877 ++#if (defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS)) && defined(CONFIG_PAX_SOFTMODE)
39878 ++static unsigned long pax_parse_softmode(const struct elf_phdr * const elf_phdata)
39879 ++{
39880 ++ unsigned long pax_flags = 0UL;
39881 ++
39882 ++#ifdef CONFIG_PAX_PAGEEXEC
39883 ++ if (elf_phdata->p_flags & PF_PAGEEXEC)
39884 ++ pax_flags |= MF_PAX_PAGEEXEC;
39885 ++#endif
39886 ++
39887 ++#ifdef CONFIG_PAX_SEGMEXEC
39888 ++ if (elf_phdata->p_flags & PF_SEGMEXEC)
39889 ++ pax_flags |= MF_PAX_SEGMEXEC;
39890 ++#endif
39891 ++
39892 ++#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
39893 ++ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
39894 ++ if ((__supported_pte_mask & _PAGE_NX))
39895 ++ pax_flags &= ~MF_PAX_SEGMEXEC;
39896 ++ else
39897 ++ pax_flags &= ~MF_PAX_PAGEEXEC;
39898 ++ }
39899 ++#endif
39900 ++
39901 ++#ifdef CONFIG_PAX_EMUTRAMP
39902 ++ if (elf_phdata->p_flags & PF_EMUTRAMP)
39903 ++ pax_flags |= MF_PAX_EMUTRAMP;
39904 ++#endif
39905 ++
39906 ++#ifdef CONFIG_PAX_MPROTECT
39907 ++ if (elf_phdata->p_flags & PF_MPROTECT)
39908 ++ pax_flags |= MF_PAX_MPROTECT;
39909 ++#endif
39910 ++
39911 ++#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
39912 ++ if (randomize_va_space && (elf_phdata->p_flags & PF_RANDMMAP))
39913 ++ pax_flags |= MF_PAX_RANDMMAP;
39914 ++#endif
39915 ++
39916 ++ return pax_flags;
39917 ++}
39918 ++#endif
39919 ++
39920 ++#ifdef CONFIG_PAX_PT_PAX_FLAGS
39921 ++static unsigned long pax_parse_hardmode(const struct elf_phdr * const elf_phdata)
39922 ++{
39923 ++ unsigned long pax_flags = 0UL;
39924 ++
39925 ++#ifdef CONFIG_PAX_PAGEEXEC
39926 ++ if (!(elf_phdata->p_flags & PF_NOPAGEEXEC))
39927 ++ pax_flags |= MF_PAX_PAGEEXEC;
39928 ++#endif
39929 ++
39930 ++#ifdef CONFIG_PAX_SEGMEXEC
39931 ++ if (!(elf_phdata->p_flags & PF_NOSEGMEXEC))
39932 ++ pax_flags |= MF_PAX_SEGMEXEC;
39933 ++#endif
39934 ++
39935 ++#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
39936 ++ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
39937 ++ if ((__supported_pte_mask & _PAGE_NX))
39938 ++ pax_flags &= ~MF_PAX_SEGMEXEC;
39939 ++ else
39940 ++ pax_flags &= ~MF_PAX_PAGEEXEC;
39941 ++ }
39942 ++#endif
39943 ++
39944 ++#ifdef CONFIG_PAX_EMUTRAMP
39945 ++ if (!(elf_phdata->p_flags & PF_NOEMUTRAMP))
39946 ++ pax_flags |= MF_PAX_EMUTRAMP;
39947 ++#endif
39948 ++
39949 ++#ifdef CONFIG_PAX_MPROTECT
39950 ++ if (!(elf_phdata->p_flags & PF_NOMPROTECT))
39951 ++ pax_flags |= MF_PAX_MPROTECT;
39952 ++#endif
39953 ++
39954 ++#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
39955 ++ if (randomize_va_space && !(elf_phdata->p_flags & PF_NORANDMMAP))
39956 ++ pax_flags |= MF_PAX_RANDMMAP;
39957 ++#endif
39958 ++
39959 ++ return pax_flags;
39960 ++}
39961 ++#endif
39962 ++
39963 ++#ifdef CONFIG_PAX_EI_PAX
39964 ++static unsigned long pax_parse_ei_pax(const struct elfhdr * const elf_ex)
39965 ++{
39966 ++ unsigned long pax_flags = 0UL;
39967 ++
39968 ++#ifdef CONFIG_PAX_PAGEEXEC
39969 ++ if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_PAGEEXEC))
39970 ++ pax_flags |= MF_PAX_PAGEEXEC;
39971 ++#endif
39972 ++
39973 ++#ifdef CONFIG_PAX_SEGMEXEC
39974 ++ if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_SEGMEXEC))
39975 ++ pax_flags |= MF_PAX_SEGMEXEC;
39976 ++#endif
39977 ++
39978 ++#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
39979 ++ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
39980 ++ if ((__supported_pte_mask & _PAGE_NX))
39981 ++ pax_flags &= ~MF_PAX_SEGMEXEC;
39982 ++ else
39983 ++ pax_flags &= ~MF_PAX_PAGEEXEC;
39984 ++ }
39985 ++#endif
39986 ++
39987 ++#ifdef CONFIG_PAX_EMUTRAMP
39988 ++ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && (elf_ex->e_ident[EI_PAX] & EF_PAX_EMUTRAMP))
39989 ++ pax_flags |= MF_PAX_EMUTRAMP;
39990 ++#endif
39991 ++
39992 ++#ifdef CONFIG_PAX_MPROTECT
39993 ++ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && !(elf_ex->e_ident[EI_PAX] & EF_PAX_MPROTECT))
39994 ++ pax_flags |= MF_PAX_MPROTECT;
39995 ++#endif
39996 ++
39997 ++#ifdef CONFIG_PAX_ASLR
39998 ++ if (randomize_va_space && !(elf_ex->e_ident[EI_PAX] & EF_PAX_RANDMMAP))
39999 ++ pax_flags |= MF_PAX_RANDMMAP;
40000 ++#endif
40001 ++
40002 ++ return pax_flags;
40003 ++}
40004 ++#endif
40005 ++
40006 ++#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS)
40007 ++static long pax_parse_elf_flags(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata)
40008 ++{
40009 ++ unsigned long pax_flags = 0UL;
40010 ++
40011 ++#ifdef CONFIG_PAX_PT_PAX_FLAGS
40012 ++ unsigned long i;
40013 ++ int found_flags = 0;
40014 ++#endif
40015 ++
40016 ++#ifdef CONFIG_PAX_EI_PAX
40017 ++ pax_flags = pax_parse_ei_pax(elf_ex);
40018 ++#endif
40019 ++
40020 ++#ifdef CONFIG_PAX_PT_PAX_FLAGS
40021 ++ for (i = 0UL; i < elf_ex->e_phnum; i++)
40022 ++ if (elf_phdata[i].p_type == PT_PAX_FLAGS) {
40023 ++ if (((elf_phdata[i].p_flags & PF_PAGEEXEC) && (elf_phdata[i].p_flags & PF_NOPAGEEXEC)) ||
40024 ++ ((elf_phdata[i].p_flags & PF_SEGMEXEC) && (elf_phdata[i].p_flags & PF_NOSEGMEXEC)) ||
40025 ++ ((elf_phdata[i].p_flags & PF_EMUTRAMP) && (elf_phdata[i].p_flags & PF_NOEMUTRAMP)) ||
40026 ++ ((elf_phdata[i].p_flags & PF_MPROTECT) && (elf_phdata[i].p_flags & PF_NOMPROTECT)) ||
40027 ++ ((elf_phdata[i].p_flags & PF_RANDMMAP) && (elf_phdata[i].p_flags & PF_NORANDMMAP)))
40028 ++ return -EINVAL;
40029 ++
40030 ++#ifdef CONFIG_PAX_SOFTMODE
40031 ++ if (pax_softmode)
40032 ++ pax_flags = pax_parse_softmode(&elf_phdata[i]);
40033 ++ else
40034 ++#endif
40035 ++
40036 ++ pax_flags = pax_parse_hardmode(&elf_phdata[i]);
40037 ++ found_flags = 1;
40038 ++ break;
40039 ++ }
40040 ++#endif
40041 ++
40042 ++#if !defined(CONFIG_PAX_EI_PAX) && defined(CONFIG_PAX_PT_PAX_FLAGS)
40043 ++ if (found_flags == 0) {
40044 ++ struct elf_phdr phdr;
40045 ++ memset(&phdr, 0, sizeof(phdr));
40046 ++ phdr.p_flags = PF_NOEMUTRAMP;
40047 ++#ifdef CONFIG_PAX_SOFTMODE
40048 ++ if (pax_softmode)
40049 ++ pax_flags = pax_parse_softmode(&phdr);
40050 ++ else
40051 ++#endif
40052 ++ pax_flags = pax_parse_hardmode(&phdr);
40053 ++ }
40054 ++#endif
40055 ++
40056 ++ if (0 > pax_check_flags(&pax_flags))
40057 ++ return -EINVAL;
40058 ++
40059 ++ current->mm->pax_flags = pax_flags;
40060 ++ return 0;
40061 ++}
40062 ++#endif
40063 ++
40064 + /*
40065 + * These are the functions used to load ELF style executables and shared
40066 + * libraries. There is no binary dependent code anywhere else.
40067 +@@ -544,6 +756,11 @@ static unsigned long randomize_stack_top
40068 + {
40069 + unsigned int random_variable = 0;
40070 +
40071 ++#ifdef CONFIG_PAX_RANDUSTACK
40072 ++ if (randomize_va_space)
40073 ++ return stack_top - current->mm->delta_stack;
40074 ++#endif
40075 ++
40076 + if ((current->flags & PF_RANDOMIZE) &&
40077 + !(current->personality & ADDR_NO_RANDOMIZE)) {
40078 + random_variable = get_random_int() & STACK_RND_MASK;
40079 +@@ -562,7 +779,7 @@ static int load_elf_binary(struct linux_
40080 + unsigned long load_addr = 0, load_bias = 0;
40081 + int load_addr_set = 0;
40082 + char * elf_interpreter = NULL;
40083 +- unsigned long error;
40084 ++ unsigned long error = 0;
40085 + struct elf_phdr *elf_ppnt, *elf_phdata;
40086 + unsigned long elf_bss, elf_brk;
40087 + int retval, i;
40088 +@@ -572,11 +789,11 @@ static int load_elf_binary(struct linux_
40089 + unsigned long start_code, end_code, start_data, end_data;
40090 + unsigned long reloc_func_desc __maybe_unused = 0;
40091 + int executable_stack = EXSTACK_DEFAULT;
40092 +- unsigned long def_flags = 0;
40093 + struct {
40094 + struct elfhdr elf_ex;
40095 + struct elfhdr interp_elf_ex;
40096 + } *loc;
40097 ++ unsigned long pax_task_size = TASK_SIZE;
40098 +
40099 + loc = kmalloc(sizeof(*loc), GFP_KERNEL);
40100 + if (!loc) {
40101 +@@ -713,11 +930,81 @@ static int load_elf_binary(struct linux_
40102 +
40103 + /* OK, This is the point of no return */
40104 + current->flags &= ~PF_FORKNOEXEC;
40105 +- current->mm->def_flags = def_flags;
40106 ++
40107 ++#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
40108 ++ current->mm->pax_flags = 0UL;
40109 ++#endif
40110 ++
40111 ++#ifdef CONFIG_PAX_DLRESOLVE
40112 ++ current->mm->call_dl_resolve = 0UL;
40113 ++#endif
40114 ++
40115 ++#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
40116 ++ current->mm->call_syscall = 0UL;
40117 ++#endif
40118 ++
40119 ++#ifdef CONFIG_PAX_ASLR
40120 ++ current->mm->delta_mmap = 0UL;
40121 ++ current->mm->delta_stack = 0UL;
40122 ++#endif
40123 ++
40124 ++ current->mm->def_flags = 0;
40125 ++
40126 ++#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS)
40127 ++ if (0 > pax_parse_elf_flags(&loc->elf_ex, elf_phdata)) {
40128 ++ send_sig(SIGKILL, current, 0);
40129 ++ goto out_free_dentry;
40130 ++ }
40131 ++#endif
40132 ++
40133 ++#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
40134 ++ pax_set_initial_flags(bprm);
40135 ++#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
40136 ++ if (pax_set_initial_flags_func)
40137 ++ (pax_set_initial_flags_func)(bprm);
40138 ++#endif
40139 ++
40140 ++#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
40141 ++ if ((current->mm->pax_flags & MF_PAX_PAGEEXEC) && !(__supported_pte_mask & _PAGE_NX)) {
40142 ++ current->mm->context.user_cs_limit = PAGE_SIZE;
40143 ++ current->mm->def_flags |= VM_PAGEEXEC;
40144 ++ }
40145 ++#endif
40146 ++
40147 ++#ifdef CONFIG_PAX_SEGMEXEC
40148 ++ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
40149 ++ current->mm->context.user_cs_base = SEGMEXEC_TASK_SIZE;
40150 ++ current->mm->context.user_cs_limit = TASK_SIZE-SEGMEXEC_TASK_SIZE;
40151 ++ pax_task_size = SEGMEXEC_TASK_SIZE;
40152 ++ current->mm->def_flags |= VM_NOHUGEPAGE;
40153 ++ }
40154 ++#endif
40155 ++
40156 ++#if defined(CONFIG_ARCH_TRACK_EXEC_LIMIT) || defined(CONFIG_PAX_SEGMEXEC)
40157 ++ if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
40158 ++ set_user_cs(current->mm->context.user_cs_base, current->mm->context.user_cs_limit, get_cpu());
40159 ++ put_cpu();
40160 ++ }
40161 ++#endif
40162 +
40163 + /* Do this immediately, since STACK_TOP as used in setup_arg_pages
40164 + may depend on the personality. */
40165 + SET_PERSONALITY(loc->elf_ex);
40166 ++
40167 ++#ifdef CONFIG_PAX_ASLR
40168 ++ if (current->mm->pax_flags & MF_PAX_RANDMMAP) {
40169 ++ current->mm->delta_mmap = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN)-1)) << PAGE_SHIFT;
40170 ++ current->mm->delta_stack = (pax_get_random_long() & ((1UL << PAX_DELTA_STACK_LEN)-1)) << PAGE_SHIFT;
40171 ++ }
40172 ++#endif
40173 ++
40174 ++#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
40175 ++ if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
40176 ++ executable_stack = EXSTACK_DISABLE_X;
40177 ++ current->personality &= ~READ_IMPLIES_EXEC;
40178 ++ } else
40179 ++#endif
40180 ++
40181 + if (elf_read_implies_exec(loc->elf_ex, executable_stack))
40182 + current->personality |= READ_IMPLIES_EXEC;
40183 +
40184 +@@ -808,6 +1095,20 @@ static int load_elf_binary(struct linux_
40185 + #else
40186 + load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
40187 + #endif
40188 ++
40189 ++#ifdef CONFIG_PAX_RANDMMAP
40190 ++ /* PaX: randomize base address at the default exe base if requested */
40191 ++ if ((current->mm->pax_flags & MF_PAX_RANDMMAP) && elf_interpreter) {
40192 ++#ifdef CONFIG_SPARC64
40193 ++ load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << (PAGE_SHIFT+1);
40194 ++#else
40195 ++ load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << PAGE_SHIFT;
40196 ++#endif
40197 ++ load_bias = ELF_PAGESTART(PAX_ELF_ET_DYN_BASE - vaddr + load_bias);
40198 ++ elf_flags |= MAP_FIXED;
40199 ++ }
40200 ++#endif
40201 ++
40202 + }
40203 +
40204 + error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt,
40205 +@@ -840,9 +1141,9 @@ static int load_elf_binary(struct linux_
40206 + * allowed task size. Note that p_filesz must always be
40207 + * <= p_memsz so it is only necessary to check p_memsz.
40208 + */
40209 +- if (BAD_ADDR(k) || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
40210 +- elf_ppnt->p_memsz > TASK_SIZE ||
40211 +- TASK_SIZE - elf_ppnt->p_memsz < k) {
40212 ++ if (k >= pax_task_size || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
40213 ++ elf_ppnt->p_memsz > pax_task_size ||
40214 ++ pax_task_size - elf_ppnt->p_memsz < k) {
40215 + /* set_brk can never work. Avoid overflows. */
40216 + send_sig(SIGKILL, current, 0);
40217 + retval = -EINVAL;
40218 +@@ -870,6 +1171,11 @@ static int load_elf_binary(struct linux_
40219 + start_data += load_bias;
40220 + end_data += load_bias;
40221 +
40222 ++#ifdef CONFIG_PAX_RANDMMAP
40223 ++ if (current->mm->pax_flags & MF_PAX_RANDMMAP)
40224 ++ elf_brk += PAGE_SIZE + ((pax_get_random_long() & ~PAGE_MASK) << 4);
40225 ++#endif
40226 ++
40227 + /* Calling set_brk effectively mmaps the pages that we need
40228 + * for the bss and break sections. We must do this before
40229 + * mapping in the interpreter, to make sure it doesn't wind
40230 +@@ -881,9 +1187,11 @@ static int load_elf_binary(struct linux_
40231 + goto out_free_dentry;
40232 + }
40233 + if (likely(elf_bss != elf_brk) && unlikely(padzero(elf_bss))) {
40234 +- send_sig(SIGSEGV, current, 0);
40235 +- retval = -EFAULT; /* Nobody gets to see this, but.. */
40236 +- goto out_free_dentry;
40237 ++ /*
40238 ++ * This bss-zeroing can fail if the ELF
40239 ++ * file specifies odd protections. So
40240 ++ * we don't check the return value
40241 ++ */
40242 + }
40243 +
40244 + if (elf_interpreter) {
40245 +@@ -1098,7 +1406,7 @@ out:
40246 + * Decide what to dump of a segment, part, all or none.
40247 + */
40248 + static unsigned long vma_dump_size(struct vm_area_struct *vma,
40249 +- unsigned long mm_flags)
40250 ++ unsigned long mm_flags, long signr)
40251 + {
40252 + #define FILTER(type) (mm_flags & (1UL << MMF_DUMP_##type))
40253 +
40254 +@@ -1132,7 +1440,7 @@ static unsigned long vma_dump_size(struc
40255 + if (vma->vm_file == NULL)
40256 + return 0;
40257 +
40258 +- if (FILTER(MAPPED_PRIVATE))
40259 ++ if (signr == SIGKILL || FILTER(MAPPED_PRIVATE))
40260 + goto whole;
40261 +
40262 + /*
40263 +@@ -1354,9 +1662,9 @@ static void fill_auxv_note(struct memelf
40264 + {
40265 + elf_addr_t *auxv = (elf_addr_t *) mm->saved_auxv;
40266 + int i = 0;
40267 +- do
40268 ++ do {
40269 + i += 2;
40270 +- while (auxv[i - 2] != AT_NULL);
40271 ++ } while (auxv[i - 2] != AT_NULL);
40272 + fill_note(note, "CORE", NT_AUXV, i * sizeof(elf_addr_t), auxv);
40273 + }
40274 +
40275 +@@ -1862,14 +2170,14 @@ static void fill_extnum_info(struct elfh
40276 + }
40277 +
40278 + static size_t elf_core_vma_data_size(struct vm_area_struct *gate_vma,
40279 +- unsigned long mm_flags)
40280 ++ struct coredump_params *cprm)
40281 + {
40282 + struct vm_area_struct *vma;
40283 + size_t size = 0;
40284 +
40285 + for (vma = first_vma(current, gate_vma); vma != NULL;
40286 + vma = next_vma(vma, gate_vma))
40287 +- size += vma_dump_size(vma, mm_flags);
40288 ++ size += vma_dump_size(vma, cprm->mm_flags, cprm->signr);
40289 + return size;
40290 + }
40291 +
40292 +@@ -1963,7 +2271,7 @@ static int elf_core_dump(struct coredump
40293 +
40294 + dataoff = offset = roundup(offset, ELF_EXEC_PAGESIZE);
40295 +
40296 +- offset += elf_core_vma_data_size(gate_vma, cprm->mm_flags);
40297 ++ offset += elf_core_vma_data_size(gate_vma, cprm);
40298 + offset += elf_core_extra_data_size();
40299 + e_shoff = offset;
40300 +
40301 +@@ -1977,10 +2285,12 @@ static int elf_core_dump(struct coredump
40302 + offset = dataoff;
40303 +
40304 + size += sizeof(*elf);
40305 ++ gr_learn_resource(current, RLIMIT_CORE, size, 1);
40306 + if (size > cprm->limit || !dump_write(cprm->file, elf, sizeof(*elf)))
40307 + goto end_coredump;
40308 +
40309 + size += sizeof(*phdr4note);
40310 ++ gr_learn_resource(current, RLIMIT_CORE, size, 1);
40311 + if (size > cprm->limit
40312 + || !dump_write(cprm->file, phdr4note, sizeof(*phdr4note)))
40313 + goto end_coredump;
40314 +@@ -1994,7 +2304,7 @@ static int elf_core_dump(struct coredump
40315 + phdr.p_offset = offset;
40316 + phdr.p_vaddr = vma->vm_start;
40317 + phdr.p_paddr = 0;
40318 +- phdr.p_filesz = vma_dump_size(vma, cprm->mm_flags);
40319 ++ phdr.p_filesz = vma_dump_size(vma, cprm->mm_flags, cprm->signr);
40320 + phdr.p_memsz = vma->vm_end - vma->vm_start;
40321 + offset += phdr.p_filesz;
40322 + phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0;
40323 +@@ -2005,6 +2315,7 @@ static int elf_core_dump(struct coredump
40324 + phdr.p_align = ELF_EXEC_PAGESIZE;
40325 +
40326 + size += sizeof(phdr);
40327 ++ gr_learn_resource(current, RLIMIT_CORE, size, 1);
40328 + if (size > cprm->limit
40329 + || !dump_write(cprm->file, &phdr, sizeof(phdr)))
40330 + goto end_coredump;
40331 +@@ -2029,7 +2340,7 @@ static int elf_core_dump(struct coredump
40332 + unsigned long addr;
40333 + unsigned long end;
40334 +
40335 +- end = vma->vm_start + vma_dump_size(vma, cprm->mm_flags);
40336 ++ end = vma->vm_start + vma_dump_size(vma, cprm->mm_flags, cprm->signr);
40337 +
40338 + for (addr = vma->vm_start; addr < end; addr += PAGE_SIZE) {
40339 + struct page *page;
40340 +@@ -2038,6 +2349,7 @@ static int elf_core_dump(struct coredump
40341 + page = get_dump_page(addr);
40342 + if (page) {
40343 + void *kaddr = kmap(page);
40344 ++ gr_learn_resource(current, RLIMIT_CORE, size + PAGE_SIZE, 1);
40345 + stop = ((size += PAGE_SIZE) > cprm->limit) ||
40346 + !dump_write(cprm->file, kaddr,
40347 + PAGE_SIZE);
40348 +@@ -2055,6 +2367,7 @@ static int elf_core_dump(struct coredump
40349 +
40350 + if (e_phnum == PN_XNUM) {
40351 + size += sizeof(*shdr4extnum);
40352 ++ gr_learn_resource(current, RLIMIT_CORE, size, 1);
40353 + if (size > cprm->limit
40354 + || !dump_write(cprm->file, shdr4extnum,
40355 + sizeof(*shdr4extnum)))
40356 +@@ -2075,6 +2388,97 @@ out:
40357 +
40358 + #endif /* CONFIG_ELF_CORE */
40359 +
40360 ++#ifdef CONFIG_PAX_MPROTECT
40361 ++/* PaX: non-PIC ELF libraries need relocations on their executable segments
40362 ++ * therefore we'll grant them VM_MAYWRITE once during their life. Similarly
40363 ++ * we'll remove VM_MAYWRITE for good on RELRO segments.
40364 ++ *
40365 ++ * The checks favour ld-linux.so behaviour which operates on a per ELF segment
40366 ++ * basis because we want to allow the common case and not the special ones.
40367 ++ */
40368 ++static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags)
40369 ++{
40370 ++ struct elfhdr elf_h;
40371 ++ struct elf_phdr elf_p;
40372 ++ unsigned long i;
40373 ++ unsigned long oldflags;
40374 ++ bool is_textrel_rw, is_textrel_rx, is_relro;
40375 ++
40376 ++ if (!(vma->vm_mm->pax_flags & MF_PAX_MPROTECT))
40377 ++ return;
40378 ++
40379 ++ oldflags = vma->vm_flags & (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ);
40380 ++ newflags &= VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ;
40381 ++
40382 ++#ifdef CONFIG_PAX_ELFRELOCS
40383 ++ /* possible TEXTREL */
40384 ++ is_textrel_rw = vma->vm_file && !vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYREAD | VM_EXEC | VM_READ) && newflags == (VM_WRITE | VM_READ);
40385 ++ is_textrel_rx = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_WRITE | VM_READ) && newflags == (VM_EXEC | VM_READ);
40386 ++#else
40387 ++ is_textrel_rw = false;
40388 ++ is_textrel_rx = false;
40389 ++#endif
40390 ++
40391 ++ /* possible RELRO */
40392 ++ is_relro = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ) && newflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ);
40393 ++
40394 ++ if (!is_textrel_rw && !is_textrel_rx && !is_relro)
40395 ++ return;
40396 ++
40397 ++ if (sizeof(elf_h) != kernel_read(vma->vm_file, 0UL, (char *)&elf_h, sizeof(elf_h)) ||
40398 ++ memcmp(elf_h.e_ident, ELFMAG, SELFMAG) ||
40399 ++
40400 ++#ifdef CONFIG_PAX_ETEXECRELOCS
40401 ++ ((is_textrel_rw || is_textrel_rx) && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
40402 ++#else
40403 ++ ((is_textrel_rw || is_textrel_rx) && elf_h.e_type != ET_DYN) ||
40404 ++#endif
40405 ++
40406 ++ (is_relro && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
40407 ++ !elf_check_arch(&elf_h) ||
40408 ++ elf_h.e_phentsize != sizeof(struct elf_phdr) ||
40409 ++ elf_h.e_phnum > 65536UL / sizeof(struct elf_phdr))
40410 ++ return;
40411 ++
40412 ++ for (i = 0UL; i < elf_h.e_phnum; i++) {
40413 ++ if (sizeof(elf_p) != kernel_read(vma->vm_file, elf_h.e_phoff + i*sizeof(elf_p), (char *)&elf_p, sizeof(elf_p)))
40414 ++ return;
40415 ++ switch (elf_p.p_type) {
40416 ++ case PT_DYNAMIC:
40417 ++ if (!is_textrel_rw && !is_textrel_rx)
40418 ++ continue;
40419 ++ i = 0UL;
40420 ++ while ((i+1) * sizeof(elf_dyn) <= elf_p.p_filesz) {
40421 ++ elf_dyn dyn;
40422 ++
40423 ++ if (sizeof(dyn) != kernel_read(vma->vm_file, elf_p.p_offset + i*sizeof(dyn), (char *)&dyn, sizeof(dyn)))
40424 ++ return;
40425 ++ if (dyn.d_tag == DT_NULL)
40426 ++ return;
40427 ++ if (dyn.d_tag == DT_TEXTREL || (dyn.d_tag == DT_FLAGS && (dyn.d_un.d_val & DF_TEXTREL))) {
40428 ++ gr_log_textrel(vma);
40429 ++ if (is_textrel_rw)
40430 ++ vma->vm_flags |= VM_MAYWRITE;
40431 ++ else
40432 ++ /* PaX: disallow write access after relocs are done, hopefully noone else needs it... */
40433 ++ vma->vm_flags &= ~VM_MAYWRITE;
40434 ++ return;
40435 ++ }
40436 ++ i++;
40437 ++ }
40438 ++ return;
40439 ++
40440 ++ case PT_GNU_RELRO:
40441 ++ if (!is_relro)
40442 ++ continue;
40443 ++ if ((elf_p.p_offset >> PAGE_SHIFT) == vma->vm_pgoff && ELF_PAGEALIGN(elf_p.p_memsz) == vma->vm_end - vma->vm_start)
40444 ++ vma->vm_flags &= ~VM_MAYWRITE;
40445 ++ return;
40446 ++ }
40447 ++ }
40448 ++}
40449 ++#endif
40450 ++
40451 + static int __init init_elf_binfmt(void)
40452 + {
40453 + return register_binfmt(&elf_format);
40454 +diff -urNp linux-3.1.1/fs/binfmt_flat.c linux-3.1.1/fs/binfmt_flat.c
40455 +--- linux-3.1.1/fs/binfmt_flat.c 2011-11-11 15:19:27.000000000 -0500
40456 ++++ linux-3.1.1/fs/binfmt_flat.c 2011-11-16 18:39:08.000000000 -0500
40457 +@@ -567,7 +567,9 @@ static int load_flat_file(struct linux_b
40458 + realdatastart = (unsigned long) -ENOMEM;
40459 + printk("Unable to allocate RAM for process data, errno %d\n",
40460 + (int)-realdatastart);
40461 ++ down_write(&current->mm->mmap_sem);
40462 + do_munmap(current->mm, textpos, text_len);
40463 ++ up_write(&current->mm->mmap_sem);
40464 + ret = realdatastart;
40465 + goto err;
40466 + }
40467 +@@ -591,8 +593,10 @@ static int load_flat_file(struct linux_b
40468 + }
40469 + if (IS_ERR_VALUE(result)) {
40470 + printk("Unable to read data+bss, errno %d\n", (int)-result);
40471 ++ down_write(&current->mm->mmap_sem);
40472 + do_munmap(current->mm, textpos, text_len);
40473 + do_munmap(current->mm, realdatastart, len);
40474 ++ up_write(&current->mm->mmap_sem);
40475 + ret = result;
40476 + goto err;
40477 + }
40478 +@@ -661,8 +665,10 @@ static int load_flat_file(struct linux_b
40479 + }
40480 + if (IS_ERR_VALUE(result)) {
40481 + printk("Unable to read code+data+bss, errno %d\n",(int)-result);
40482 ++ down_write(&current->mm->mmap_sem);
40483 + do_munmap(current->mm, textpos, text_len + data_len + extra +
40484 + MAX_SHARED_LIBS * sizeof(unsigned long));
40485 ++ up_write(&current->mm->mmap_sem);
40486 + ret = result;
40487 + goto err;
40488 + }
40489 +diff -urNp linux-3.1.1/fs/bio.c linux-3.1.1/fs/bio.c
40490 +--- linux-3.1.1/fs/bio.c 2011-11-11 15:19:27.000000000 -0500
40491 ++++ linux-3.1.1/fs/bio.c 2011-11-16 18:39:08.000000000 -0500
40492 +@@ -1233,7 +1233,7 @@ static void bio_copy_kern_endio(struct b
40493 + const int read = bio_data_dir(bio) == READ;
40494 + struct bio_map_data *bmd = bio->bi_private;
40495 + int i;
40496 +- char *p = bmd->sgvecs[0].iov_base;
40497 ++ char *p = (char __force_kernel *)bmd->sgvecs[0].iov_base;
40498 +
40499 + __bio_for_each_segment(bvec, bio, i, 0) {
40500 + char *addr = page_address(bvec->bv_page);
40501 +diff -urNp linux-3.1.1/fs/block_dev.c linux-3.1.1/fs/block_dev.c
40502 +--- linux-3.1.1/fs/block_dev.c 2011-11-11 15:19:27.000000000 -0500
40503 ++++ linux-3.1.1/fs/block_dev.c 2011-11-16 18:39:08.000000000 -0500
40504 +@@ -681,7 +681,7 @@ static bool bd_may_claim(struct block_de
40505 + else if (bdev->bd_contains == bdev)
40506 + return true; /* is a whole device which isn't held */
40507 +
40508 +- else if (whole->bd_holder == bd_may_claim)
40509 ++ else if (whole->bd_holder == (void *)bd_may_claim)
40510 + return true; /* is a partition of a device that is being partitioned */
40511 + else if (whole->bd_holder != NULL)
40512 + return false; /* is a partition of a held device */
40513 +diff -urNp linux-3.1.1/fs/btrfs/ctree.c linux-3.1.1/fs/btrfs/ctree.c
40514 +--- linux-3.1.1/fs/btrfs/ctree.c 2011-11-11 15:19:27.000000000 -0500
40515 ++++ linux-3.1.1/fs/btrfs/ctree.c 2011-11-16 18:39:08.000000000 -0500
40516 +@@ -488,9 +488,12 @@ static noinline int __btrfs_cow_block(st
40517 + free_extent_buffer(buf);
40518 + add_root_to_dirty_list(root);
40519 + } else {
40520 +- if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
40521 +- parent_start = parent->start;
40522 +- else
40523 ++ if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
40524 ++ if (parent)
40525 ++ parent_start = parent->start;
40526 ++ else
40527 ++ parent_start = 0;
40528 ++ } else
40529 + parent_start = 0;
40530 +
40531 + WARN_ON(trans->transid != btrfs_header_generation(parent));
40532 +diff -urNp linux-3.1.1/fs/btrfs/inode.c linux-3.1.1/fs/btrfs/inode.c
40533 +--- linux-3.1.1/fs/btrfs/inode.c 2011-11-11 15:19:27.000000000 -0500
40534 ++++ linux-3.1.1/fs/btrfs/inode.c 2011-11-17 18:12:11.000000000 -0500
40535 +@@ -6922,7 +6922,7 @@ fail:
40536 + return -ENOMEM;
40537 + }
40538 +
40539 +-static int btrfs_getattr(struct vfsmount *mnt,
40540 ++int btrfs_getattr(struct vfsmount *mnt,
40541 + struct dentry *dentry, struct kstat *stat)
40542 + {
40543 + struct inode *inode = dentry->d_inode;
40544 +@@ -6934,6 +6934,14 @@ static int btrfs_getattr(struct vfsmount
40545 + return 0;
40546 + }
40547 +
40548 ++EXPORT_SYMBOL(btrfs_getattr);
40549 ++
40550 ++dev_t get_btrfs_dev_from_inode(struct inode *inode)
40551 ++{
40552 ++ return BTRFS_I(inode)->root->anon_dev;
40553 ++}
40554 ++EXPORT_SYMBOL(get_btrfs_dev_from_inode);
40555 ++
40556 + /*
40557 + * If a file is moved, it will inherit the cow and compression flags of the new
40558 + * directory.
40559 +diff -urNp linux-3.1.1/fs/btrfs/ioctl.c linux-3.1.1/fs/btrfs/ioctl.c
40560 +--- linux-3.1.1/fs/btrfs/ioctl.c 2011-11-11 15:19:27.000000000 -0500
40561 ++++ linux-3.1.1/fs/btrfs/ioctl.c 2011-11-16 18:40:29.000000000 -0500
40562 +@@ -2704,9 +2704,12 @@ long btrfs_ioctl_space_info(struct btrfs
40563 + for (i = 0; i < num_types; i++) {
40564 + struct btrfs_space_info *tmp;
40565 +
40566 ++ /* Don't copy in more than we allocated */
40567 + if (!slot_count)
40568 + break;
40569 +
40570 ++ slot_count--;
40571 ++
40572 + info = NULL;
40573 + rcu_read_lock();
40574 + list_for_each_entry_rcu(tmp, &root->fs_info->space_info,
40575 +@@ -2728,15 +2731,12 @@ long btrfs_ioctl_space_info(struct btrfs
40576 + memcpy(dest, &space, sizeof(space));
40577 + dest++;
40578 + space_args.total_spaces++;
40579 +- slot_count--;
40580 + }
40581 +- if (!slot_count)
40582 +- break;
40583 + }
40584 + up_read(&info->groups_sem);
40585 + }
40586 +
40587 +- user_dest = (struct btrfs_ioctl_space_info *)
40588 ++ user_dest = (struct btrfs_ioctl_space_info __user *)
40589 + (arg + sizeof(struct btrfs_ioctl_space_args));
40590 +
40591 + if (copy_to_user(user_dest, dest_orig, alloc_size))
40592 +diff -urNp linux-3.1.1/fs/btrfs/relocation.c linux-3.1.1/fs/btrfs/relocation.c
40593 +--- linux-3.1.1/fs/btrfs/relocation.c 2011-11-11 15:19:27.000000000 -0500
40594 ++++ linux-3.1.1/fs/btrfs/relocation.c 2011-11-16 18:39:08.000000000 -0500
40595 +@@ -1242,7 +1242,7 @@ static int __update_reloc_root(struct bt
40596 + }
40597 + spin_unlock(&rc->reloc_root_tree.lock);
40598 +
40599 +- BUG_ON((struct btrfs_root *)node->data != root);
40600 ++ BUG_ON(!node || (struct btrfs_root *)node->data != root);
40601 +
40602 + if (!del) {
40603 + spin_lock(&rc->reloc_root_tree.lock);
40604 +diff -urNp linux-3.1.1/fs/cachefiles/bind.c linux-3.1.1/fs/cachefiles/bind.c
40605 +--- linux-3.1.1/fs/cachefiles/bind.c 2011-11-11 15:19:27.000000000 -0500
40606 ++++ linux-3.1.1/fs/cachefiles/bind.c 2011-11-16 18:39:08.000000000 -0500
40607 +@@ -39,13 +39,11 @@ int cachefiles_daemon_bind(struct cachef
40608 + args);
40609 +
40610 + /* start by checking things over */
40611 +- ASSERT(cache->fstop_percent >= 0 &&
40612 +- cache->fstop_percent < cache->fcull_percent &&
40613 ++ ASSERT(cache->fstop_percent < cache->fcull_percent &&
40614 + cache->fcull_percent < cache->frun_percent &&
40615 + cache->frun_percent < 100);
40616 +
40617 +- ASSERT(cache->bstop_percent >= 0 &&
40618 +- cache->bstop_percent < cache->bcull_percent &&
40619 ++ ASSERT(cache->bstop_percent < cache->bcull_percent &&
40620 + cache->bcull_percent < cache->brun_percent &&
40621 + cache->brun_percent < 100);
40622 +
40623 +diff -urNp linux-3.1.1/fs/cachefiles/daemon.c linux-3.1.1/fs/cachefiles/daemon.c
40624 +--- linux-3.1.1/fs/cachefiles/daemon.c 2011-11-11 15:19:27.000000000 -0500
40625 ++++ linux-3.1.1/fs/cachefiles/daemon.c 2011-11-16 18:39:08.000000000 -0500
40626 +@@ -196,7 +196,7 @@ static ssize_t cachefiles_daemon_read(st
40627 + if (n > buflen)
40628 + return -EMSGSIZE;
40629 +
40630 +- if (copy_to_user(_buffer, buffer, n) != 0)
40631 ++ if (n > sizeof(buffer) || copy_to_user(_buffer, buffer, n) != 0)
40632 + return -EFAULT;
40633 +
40634 + return n;
40635 +@@ -222,7 +222,7 @@ static ssize_t cachefiles_daemon_write(s
40636 + if (test_bit(CACHEFILES_DEAD, &cache->flags))
40637 + return -EIO;
40638 +
40639 +- if (datalen < 0 || datalen > PAGE_SIZE - 1)
40640 ++ if (datalen > PAGE_SIZE - 1)
40641 + return -EOPNOTSUPP;
40642 +
40643 + /* drag the command string into the kernel so we can parse it */
40644 +@@ -386,7 +386,7 @@ static int cachefiles_daemon_fstop(struc
40645 + if (args[0] != '%' || args[1] != '\0')
40646 + return -EINVAL;
40647 +
40648 +- if (fstop < 0 || fstop >= cache->fcull_percent)
40649 ++ if (fstop >= cache->fcull_percent)
40650 + return cachefiles_daemon_range_error(cache, args);
40651 +
40652 + cache->fstop_percent = fstop;
40653 +@@ -458,7 +458,7 @@ static int cachefiles_daemon_bstop(struc
40654 + if (args[0] != '%' || args[1] != '\0')
40655 + return -EINVAL;
40656 +
40657 +- if (bstop < 0 || bstop >= cache->bcull_percent)
40658 ++ if (bstop >= cache->bcull_percent)
40659 + return cachefiles_daemon_range_error(cache, args);
40660 +
40661 + cache->bstop_percent = bstop;
40662 +diff -urNp linux-3.1.1/fs/cachefiles/internal.h linux-3.1.1/fs/cachefiles/internal.h
40663 +--- linux-3.1.1/fs/cachefiles/internal.h 2011-11-11 15:19:27.000000000 -0500
40664 ++++ linux-3.1.1/fs/cachefiles/internal.h 2011-11-16 18:39:08.000000000 -0500
40665 +@@ -57,7 +57,7 @@ struct cachefiles_cache {
40666 + wait_queue_head_t daemon_pollwq; /* poll waitqueue for daemon */
40667 + struct rb_root active_nodes; /* active nodes (can't be culled) */
40668 + rwlock_t active_lock; /* lock for active_nodes */
40669 +- atomic_t gravecounter; /* graveyard uniquifier */
40670 ++ atomic_unchecked_t gravecounter; /* graveyard uniquifier */
40671 + unsigned frun_percent; /* when to stop culling (% files) */
40672 + unsigned fcull_percent; /* when to start culling (% files) */
40673 + unsigned fstop_percent; /* when to stop allocating (% files) */
40674 +@@ -169,19 +169,19 @@ extern int cachefiles_check_in_use(struc
40675 + * proc.c
40676 + */
40677 + #ifdef CONFIG_CACHEFILES_HISTOGRAM
40678 +-extern atomic_t cachefiles_lookup_histogram[HZ];
40679 +-extern atomic_t cachefiles_mkdir_histogram[HZ];
40680 +-extern atomic_t cachefiles_create_histogram[HZ];
40681 ++extern atomic_unchecked_t cachefiles_lookup_histogram[HZ];
40682 ++extern atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
40683 ++extern atomic_unchecked_t cachefiles_create_histogram[HZ];
40684 +
40685 + extern int __init cachefiles_proc_init(void);
40686 + extern void cachefiles_proc_cleanup(void);
40687 + static inline
40688 +-void cachefiles_hist(atomic_t histogram[], unsigned long start_jif)
40689 ++void cachefiles_hist(atomic_unchecked_t histogram[], unsigned long start_jif)
40690 + {
40691 + unsigned long jif = jiffies - start_jif;
40692 + if (jif >= HZ)
40693 + jif = HZ - 1;
40694 +- atomic_inc(&histogram[jif]);
40695 ++ atomic_inc_unchecked(&histogram[jif]);
40696 + }
40697 +
40698 + #else
40699 +diff -urNp linux-3.1.1/fs/cachefiles/namei.c linux-3.1.1/fs/cachefiles/namei.c
40700 +--- linux-3.1.1/fs/cachefiles/namei.c 2011-11-11 15:19:27.000000000 -0500
40701 ++++ linux-3.1.1/fs/cachefiles/namei.c 2011-11-16 18:39:08.000000000 -0500
40702 +@@ -318,7 +318,7 @@ try_again:
40703 + /* first step is to make up a grave dentry in the graveyard */
40704 + sprintf(nbuffer, "%08x%08x",
40705 + (uint32_t) get_seconds(),
40706 +- (uint32_t) atomic_inc_return(&cache->gravecounter));
40707 ++ (uint32_t) atomic_inc_return_unchecked(&cache->gravecounter));
40708 +
40709 + /* do the multiway lock magic */
40710 + trap = lock_rename(cache->graveyard, dir);
40711 +diff -urNp linux-3.1.1/fs/cachefiles/proc.c linux-3.1.1/fs/cachefiles/proc.c
40712 +--- linux-3.1.1/fs/cachefiles/proc.c 2011-11-11 15:19:27.000000000 -0500
40713 ++++ linux-3.1.1/fs/cachefiles/proc.c 2011-11-16 18:39:08.000000000 -0500
40714 +@@ -14,9 +14,9 @@
40715 + #include <linux/seq_file.h>
40716 + #include "internal.h"
40717 +
40718 +-atomic_t cachefiles_lookup_histogram[HZ];
40719 +-atomic_t cachefiles_mkdir_histogram[HZ];
40720 +-atomic_t cachefiles_create_histogram[HZ];
40721 ++atomic_unchecked_t cachefiles_lookup_histogram[HZ];
40722 ++atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
40723 ++atomic_unchecked_t cachefiles_create_histogram[HZ];
40724 +
40725 + /*
40726 + * display the latency histogram
40727 +@@ -35,9 +35,9 @@ static int cachefiles_histogram_show(str
40728 + return 0;
40729 + default:
40730 + index = (unsigned long) v - 3;
40731 +- x = atomic_read(&cachefiles_lookup_histogram[index]);
40732 +- y = atomic_read(&cachefiles_mkdir_histogram[index]);
40733 +- z = atomic_read(&cachefiles_create_histogram[index]);
40734 ++ x = atomic_read_unchecked(&cachefiles_lookup_histogram[index]);
40735 ++ y = atomic_read_unchecked(&cachefiles_mkdir_histogram[index]);
40736 ++ z = atomic_read_unchecked(&cachefiles_create_histogram[index]);
40737 + if (x == 0 && y == 0 && z == 0)
40738 + return 0;
40739 +
40740 +diff -urNp linux-3.1.1/fs/cachefiles/rdwr.c linux-3.1.1/fs/cachefiles/rdwr.c
40741 +--- linux-3.1.1/fs/cachefiles/rdwr.c 2011-11-11 15:19:27.000000000 -0500
40742 ++++ linux-3.1.1/fs/cachefiles/rdwr.c 2011-11-16 18:39:08.000000000 -0500
40743 +@@ -945,7 +945,7 @@ int cachefiles_write_page(struct fscache
40744 + old_fs = get_fs();
40745 + set_fs(KERNEL_DS);
40746 + ret = file->f_op->write(
40747 +- file, (const void __user *) data, len, &pos);
40748 ++ file, (const void __force_user *) data, len, &pos);
40749 + set_fs(old_fs);
40750 + kunmap(page);
40751 + if (ret != len)
40752 +diff -urNp linux-3.1.1/fs/ceph/dir.c linux-3.1.1/fs/ceph/dir.c
40753 +--- linux-3.1.1/fs/ceph/dir.c 2011-11-11 15:19:27.000000000 -0500
40754 ++++ linux-3.1.1/fs/ceph/dir.c 2011-11-16 18:39:08.000000000 -0500
40755 +@@ -244,7 +244,7 @@ static int ceph_readdir(struct file *fil
40756 + struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
40757 + struct ceph_mds_client *mdsc = fsc->mdsc;
40758 + unsigned frag = fpos_frag(filp->f_pos);
40759 +- int off = fpos_off(filp->f_pos);
40760 ++ unsigned int off = fpos_off(filp->f_pos);
40761 + int err;
40762 + u32 ftype;
40763 + struct ceph_mds_reply_info_parsed *rinfo;
40764 +diff -urNp linux-3.1.1/fs/cifs/cifs_debug.c linux-3.1.1/fs/cifs/cifs_debug.c
40765 +--- linux-3.1.1/fs/cifs/cifs_debug.c 2011-11-11 15:19:27.000000000 -0500
40766 ++++ linux-3.1.1/fs/cifs/cifs_debug.c 2011-11-16 18:39:08.000000000 -0500
40767 +@@ -265,8 +265,8 @@ static ssize_t cifs_stats_proc_write(str
40768 +
40769 + if (c == '1' || c == 'y' || c == 'Y' || c == '0') {
40770 + #ifdef CONFIG_CIFS_STATS2
40771 +- atomic_set(&totBufAllocCount, 0);
40772 +- atomic_set(&totSmBufAllocCount, 0);
40773 ++ atomic_set_unchecked(&totBufAllocCount, 0);
40774 ++ atomic_set_unchecked(&totSmBufAllocCount, 0);
40775 + #endif /* CONFIG_CIFS_STATS2 */
40776 + spin_lock(&cifs_tcp_ses_lock);
40777 + list_for_each(tmp1, &cifs_tcp_ses_list) {
40778 +@@ -279,25 +279,25 @@ static ssize_t cifs_stats_proc_write(str
40779 + tcon = list_entry(tmp3,
40780 + struct cifs_tcon,
40781 + tcon_list);
40782 +- atomic_set(&tcon->num_smbs_sent, 0);
40783 +- atomic_set(&tcon->num_writes, 0);
40784 +- atomic_set(&tcon->num_reads, 0);
40785 +- atomic_set(&tcon->num_oplock_brks, 0);
40786 +- atomic_set(&tcon->num_opens, 0);
40787 +- atomic_set(&tcon->num_posixopens, 0);
40788 +- atomic_set(&tcon->num_posixmkdirs, 0);
40789 +- atomic_set(&tcon->num_closes, 0);
40790 +- atomic_set(&tcon->num_deletes, 0);
40791 +- atomic_set(&tcon->num_mkdirs, 0);
40792 +- atomic_set(&tcon->num_rmdirs, 0);
40793 +- atomic_set(&tcon->num_renames, 0);
40794 +- atomic_set(&tcon->num_t2renames, 0);
40795 +- atomic_set(&tcon->num_ffirst, 0);
40796 +- atomic_set(&tcon->num_fnext, 0);
40797 +- atomic_set(&tcon->num_fclose, 0);
40798 +- atomic_set(&tcon->num_hardlinks, 0);
40799 +- atomic_set(&tcon->num_symlinks, 0);
40800 +- atomic_set(&tcon->num_locks, 0);
40801 ++ atomic_set_unchecked(&tcon->num_smbs_sent, 0);
40802 ++ atomic_set_unchecked(&tcon->num_writes, 0);
40803 ++ atomic_set_unchecked(&tcon->num_reads, 0);
40804 ++ atomic_set_unchecked(&tcon->num_oplock_brks, 0);
40805 ++ atomic_set_unchecked(&tcon->num_opens, 0);
40806 ++ atomic_set_unchecked(&tcon->num_posixopens, 0);
40807 ++ atomic_set_unchecked(&tcon->num_posixmkdirs, 0);
40808 ++ atomic_set_unchecked(&tcon->num_closes, 0);
40809 ++ atomic_set_unchecked(&tcon->num_deletes, 0);
40810 ++ atomic_set_unchecked(&tcon->num_mkdirs, 0);
40811 ++ atomic_set_unchecked(&tcon->num_rmdirs, 0);
40812 ++ atomic_set_unchecked(&tcon->num_renames, 0);
40813 ++ atomic_set_unchecked(&tcon->num_t2renames, 0);
40814 ++ atomic_set_unchecked(&tcon->num_ffirst, 0);
40815 ++ atomic_set_unchecked(&tcon->num_fnext, 0);
40816 ++ atomic_set_unchecked(&tcon->num_fclose, 0);
40817 ++ atomic_set_unchecked(&tcon->num_hardlinks, 0);
40818 ++ atomic_set_unchecked(&tcon->num_symlinks, 0);
40819 ++ atomic_set_unchecked(&tcon->num_locks, 0);
40820 + }
40821 + }
40822 + }
40823 +@@ -327,8 +327,8 @@ static int cifs_stats_proc_show(struct s
40824 + smBufAllocCount.counter, cifs_min_small);
40825 + #ifdef CONFIG_CIFS_STATS2
40826 + seq_printf(m, "Total Large %d Small %d Allocations\n",
40827 +- atomic_read(&totBufAllocCount),
40828 +- atomic_read(&totSmBufAllocCount));
40829 ++ atomic_read_unchecked(&totBufAllocCount),
40830 ++ atomic_read_unchecked(&totSmBufAllocCount));
40831 + #endif /* CONFIG_CIFS_STATS2 */
40832 +
40833 + seq_printf(m, "Operations (MIDs): %d\n", atomic_read(&midCount));
40834 +@@ -357,41 +357,41 @@ static int cifs_stats_proc_show(struct s
40835 + if (tcon->need_reconnect)
40836 + seq_puts(m, "\tDISCONNECTED ");
40837 + seq_printf(m, "\nSMBs: %d Oplock Breaks: %d",
40838 +- atomic_read(&tcon->num_smbs_sent),
40839 +- atomic_read(&tcon->num_oplock_brks));
40840 ++ atomic_read_unchecked(&tcon->num_smbs_sent),
40841 ++ atomic_read_unchecked(&tcon->num_oplock_brks));
40842 + seq_printf(m, "\nReads: %d Bytes: %lld",
40843 +- atomic_read(&tcon->num_reads),
40844 ++ atomic_read_unchecked(&tcon->num_reads),
40845 + (long long)(tcon->bytes_read));
40846 + seq_printf(m, "\nWrites: %d Bytes: %lld",
40847 +- atomic_read(&tcon->num_writes),
40848 ++ atomic_read_unchecked(&tcon->num_writes),
40849 + (long long)(tcon->bytes_written));
40850 + seq_printf(m, "\nFlushes: %d",
40851 +- atomic_read(&tcon->num_flushes));
40852 ++ atomic_read_unchecked(&tcon->num_flushes));
40853 + seq_printf(m, "\nLocks: %d HardLinks: %d "
40854 + "Symlinks: %d",
40855 +- atomic_read(&tcon->num_locks),
40856 +- atomic_read(&tcon->num_hardlinks),
40857 +- atomic_read(&tcon->num_symlinks));
40858 ++ atomic_read_unchecked(&tcon->num_locks),
40859 ++ atomic_read_unchecked(&tcon->num_hardlinks),
40860 ++ atomic_read_unchecked(&tcon->num_symlinks));
40861 + seq_printf(m, "\nOpens: %d Closes: %d "
40862 + "Deletes: %d",
40863 +- atomic_read(&tcon->num_opens),
40864 +- atomic_read(&tcon->num_closes),
40865 +- atomic_read(&tcon->num_deletes));
40866 ++ atomic_read_unchecked(&tcon->num_opens),
40867 ++ atomic_read_unchecked(&tcon->num_closes),
40868 ++ atomic_read_unchecked(&tcon->num_deletes));
40869 + seq_printf(m, "\nPosix Opens: %d "
40870 + "Posix Mkdirs: %d",
40871 +- atomic_read(&tcon->num_posixopens),
40872 +- atomic_read(&tcon->num_posixmkdirs));
40873 ++ atomic_read_unchecked(&tcon->num_posixopens),
40874 ++ atomic_read_unchecked(&tcon->num_posixmkdirs));
40875 + seq_printf(m, "\nMkdirs: %d Rmdirs: %d",
40876 +- atomic_read(&tcon->num_mkdirs),
40877 +- atomic_read(&tcon->num_rmdirs));
40878 ++ atomic_read_unchecked(&tcon->num_mkdirs),
40879 ++ atomic_read_unchecked(&tcon->num_rmdirs));
40880 + seq_printf(m, "\nRenames: %d T2 Renames %d",
40881 +- atomic_read(&tcon->num_renames),
40882 +- atomic_read(&tcon->num_t2renames));
40883 ++ atomic_read_unchecked(&tcon->num_renames),
40884 ++ atomic_read_unchecked(&tcon->num_t2renames));
40885 + seq_printf(m, "\nFindFirst: %d FNext %d "
40886 + "FClose %d",
40887 +- atomic_read(&tcon->num_ffirst),
40888 +- atomic_read(&tcon->num_fnext),
40889 +- atomic_read(&tcon->num_fclose));
40890 ++ atomic_read_unchecked(&tcon->num_ffirst),
40891 ++ atomic_read_unchecked(&tcon->num_fnext),
40892 ++ atomic_read_unchecked(&tcon->num_fclose));
40893 + }
40894 + }
40895 + }
40896 +diff -urNp linux-3.1.1/fs/cifs/cifsfs.c linux-3.1.1/fs/cifs/cifsfs.c
40897 +--- linux-3.1.1/fs/cifs/cifsfs.c 2011-11-11 15:19:27.000000000 -0500
40898 ++++ linux-3.1.1/fs/cifs/cifsfs.c 2011-11-16 18:39:08.000000000 -0500
40899 +@@ -981,7 +981,7 @@ cifs_init_request_bufs(void)
40900 + cifs_req_cachep = kmem_cache_create("cifs_request",
40901 + CIFSMaxBufSize +
40902 + MAX_CIFS_HDR_SIZE, 0,
40903 +- SLAB_HWCACHE_ALIGN, NULL);
40904 ++ SLAB_HWCACHE_ALIGN | SLAB_USERCOPY, NULL);
40905 + if (cifs_req_cachep == NULL)
40906 + return -ENOMEM;
40907 +
40908 +@@ -1008,7 +1008,7 @@ cifs_init_request_bufs(void)
40909 + efficient to alloc 1 per page off the slab compared to 17K (5page)
40910 + alloc of large cifs buffers even when page debugging is on */
40911 + cifs_sm_req_cachep = kmem_cache_create("cifs_small_rq",
40912 +- MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN,
40913 ++ MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN | SLAB_USERCOPY,
40914 + NULL);
40915 + if (cifs_sm_req_cachep == NULL) {
40916 + mempool_destroy(cifs_req_poolp);
40917 +@@ -1093,8 +1093,8 @@ init_cifs(void)
40918 + atomic_set(&bufAllocCount, 0);
40919 + atomic_set(&smBufAllocCount, 0);
40920 + #ifdef CONFIG_CIFS_STATS2
40921 +- atomic_set(&totBufAllocCount, 0);
40922 +- atomic_set(&totSmBufAllocCount, 0);
40923 ++ atomic_set_unchecked(&totBufAllocCount, 0);
40924 ++ atomic_set_unchecked(&totSmBufAllocCount, 0);
40925 + #endif /* CONFIG_CIFS_STATS2 */
40926 +
40927 + atomic_set(&midCount, 0);
40928 +diff -urNp linux-3.1.1/fs/cifs/cifsglob.h linux-3.1.1/fs/cifs/cifsglob.h
40929 +--- linux-3.1.1/fs/cifs/cifsglob.h 2011-11-11 15:19:27.000000000 -0500
40930 ++++ linux-3.1.1/fs/cifs/cifsglob.h 2011-11-16 18:39:08.000000000 -0500
40931 +@@ -381,28 +381,28 @@ struct cifs_tcon {
40932 + __u16 Flags; /* optional support bits */
40933 + enum statusEnum tidStatus;
40934 + #ifdef CONFIG_CIFS_STATS
40935 +- atomic_t num_smbs_sent;
40936 +- atomic_t num_writes;
40937 +- atomic_t num_reads;
40938 +- atomic_t num_flushes;
40939 +- atomic_t num_oplock_brks;
40940 +- atomic_t num_opens;
40941 +- atomic_t num_closes;
40942 +- atomic_t num_deletes;
40943 +- atomic_t num_mkdirs;
40944 +- atomic_t num_posixopens;
40945 +- atomic_t num_posixmkdirs;
40946 +- atomic_t num_rmdirs;
40947 +- atomic_t num_renames;
40948 +- atomic_t num_t2renames;
40949 +- atomic_t num_ffirst;
40950 +- atomic_t num_fnext;
40951 +- atomic_t num_fclose;
40952 +- atomic_t num_hardlinks;
40953 +- atomic_t num_symlinks;
40954 +- atomic_t num_locks;
40955 +- atomic_t num_acl_get;
40956 +- atomic_t num_acl_set;
40957 ++ atomic_unchecked_t num_smbs_sent;
40958 ++ atomic_unchecked_t num_writes;
40959 ++ atomic_unchecked_t num_reads;
40960 ++ atomic_unchecked_t num_flushes;
40961 ++ atomic_unchecked_t num_oplock_brks;
40962 ++ atomic_unchecked_t num_opens;
40963 ++ atomic_unchecked_t num_closes;
40964 ++ atomic_unchecked_t num_deletes;
40965 ++ atomic_unchecked_t num_mkdirs;
40966 ++ atomic_unchecked_t num_posixopens;
40967 ++ atomic_unchecked_t num_posixmkdirs;
40968 ++ atomic_unchecked_t num_rmdirs;
40969 ++ atomic_unchecked_t num_renames;
40970 ++ atomic_unchecked_t num_t2renames;
40971 ++ atomic_unchecked_t num_ffirst;
40972 ++ atomic_unchecked_t num_fnext;
40973 ++ atomic_unchecked_t num_fclose;
40974 ++ atomic_unchecked_t num_hardlinks;
40975 ++ atomic_unchecked_t num_symlinks;
40976 ++ atomic_unchecked_t num_locks;
40977 ++ atomic_unchecked_t num_acl_get;
40978 ++ atomic_unchecked_t num_acl_set;
40979 + #ifdef CONFIG_CIFS_STATS2
40980 + unsigned long long time_writes;
40981 + unsigned long long time_reads;
40982 +@@ -613,7 +613,7 @@ convert_delimiter(char *path, char delim
40983 + }
40984 +
40985 + #ifdef CONFIG_CIFS_STATS
40986 +-#define cifs_stats_inc atomic_inc
40987 ++#define cifs_stats_inc atomic_inc_unchecked
40988 +
40989 + static inline void cifs_stats_bytes_written(struct cifs_tcon *tcon,
40990 + unsigned int bytes)
40991 +@@ -953,8 +953,8 @@ GLOBAL_EXTERN atomic_t tconInfoReconnect
40992 + /* Various Debug counters */
40993 + GLOBAL_EXTERN atomic_t bufAllocCount; /* current number allocated */
40994 + #ifdef CONFIG_CIFS_STATS2
40995 +-GLOBAL_EXTERN atomic_t totBufAllocCount; /* total allocated over all time */
40996 +-GLOBAL_EXTERN atomic_t totSmBufAllocCount;
40997 ++GLOBAL_EXTERN atomic_unchecked_t totBufAllocCount; /* total allocated over all time */
40998 ++GLOBAL_EXTERN atomic_unchecked_t totSmBufAllocCount;
40999 + #endif
41000 + GLOBAL_EXTERN atomic_t smBufAllocCount;
41001 + GLOBAL_EXTERN atomic_t midCount;
41002 +diff -urNp linux-3.1.1/fs/cifs/link.c linux-3.1.1/fs/cifs/link.c
41003 +--- linux-3.1.1/fs/cifs/link.c 2011-11-11 15:19:27.000000000 -0500
41004 ++++ linux-3.1.1/fs/cifs/link.c 2011-11-16 18:39:08.000000000 -0500
41005 +@@ -593,7 +593,7 @@ symlink_exit:
41006 +
41007 + void cifs_put_link(struct dentry *direntry, struct nameidata *nd, void *cookie)
41008 + {
41009 +- char *p = nd_get_link(nd);
41010 ++ const char *p = nd_get_link(nd);
41011 + if (!IS_ERR(p))
41012 + kfree(p);
41013 + }
41014 +diff -urNp linux-3.1.1/fs/cifs/misc.c linux-3.1.1/fs/cifs/misc.c
41015 +--- linux-3.1.1/fs/cifs/misc.c 2011-11-11 15:19:27.000000000 -0500
41016 ++++ linux-3.1.1/fs/cifs/misc.c 2011-11-16 18:39:08.000000000 -0500
41017 +@@ -156,7 +156,7 @@ cifs_buf_get(void)
41018 + memset(ret_buf, 0, sizeof(struct smb_hdr) + 3);
41019 + atomic_inc(&bufAllocCount);
41020 + #ifdef CONFIG_CIFS_STATS2
41021 +- atomic_inc(&totBufAllocCount);
41022 ++ atomic_inc_unchecked(&totBufAllocCount);
41023 + #endif /* CONFIG_CIFS_STATS2 */
41024 + }
41025 +
41026 +@@ -191,7 +191,7 @@ cifs_small_buf_get(void)
41027 + /* memset(ret_buf, 0, sizeof(struct smb_hdr) + 27);*/
41028 + atomic_inc(&smBufAllocCount);
41029 + #ifdef CONFIG_CIFS_STATS2
41030 +- atomic_inc(&totSmBufAllocCount);
41031 ++ atomic_inc_unchecked(&totSmBufAllocCount);
41032 + #endif /* CONFIG_CIFS_STATS2 */
41033 +
41034 + }
41035 +diff -urNp linux-3.1.1/fs/coda/cache.c linux-3.1.1/fs/coda/cache.c
41036 +--- linux-3.1.1/fs/coda/cache.c 2011-11-11 15:19:27.000000000 -0500
41037 ++++ linux-3.1.1/fs/coda/cache.c 2011-11-16 18:39:08.000000000 -0500
41038 +@@ -24,7 +24,7 @@
41039 + #include "coda_linux.h"
41040 + #include "coda_cache.h"
41041 +
41042 +-static atomic_t permission_epoch = ATOMIC_INIT(0);
41043 ++static atomic_unchecked_t permission_epoch = ATOMIC_INIT(0);
41044 +
41045 + /* replace or extend an acl cache hit */
41046 + void coda_cache_enter(struct inode *inode, int mask)
41047 +@@ -32,7 +32,7 @@ void coda_cache_enter(struct inode *inod
41048 + struct coda_inode_info *cii = ITOC(inode);
41049 +
41050 + spin_lock(&cii->c_lock);
41051 +- cii->c_cached_epoch = atomic_read(&permission_epoch);
41052 ++ cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch);
41053 + if (cii->c_uid != current_fsuid()) {
41054 + cii->c_uid = current_fsuid();
41055 + cii->c_cached_perm = mask;
41056 +@@ -46,14 +46,14 @@ void coda_cache_clear_inode(struct inode
41057 + {
41058 + struct coda_inode_info *cii = ITOC(inode);
41059 + spin_lock(&cii->c_lock);
41060 +- cii->c_cached_epoch = atomic_read(&permission_epoch) - 1;
41061 ++ cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch) - 1;
41062 + spin_unlock(&cii->c_lock);
41063 + }
41064 +
41065 + /* remove all acl caches */
41066 + void coda_cache_clear_all(struct super_block *sb)
41067 + {
41068 +- atomic_inc(&permission_epoch);
41069 ++ atomic_inc_unchecked(&permission_epoch);
41070 + }
41071 +
41072 +
41073 +@@ -66,7 +66,7 @@ int coda_cache_check(struct inode *inode
41074 + spin_lock(&cii->c_lock);
41075 + hit = (mask & cii->c_cached_perm) == mask &&
41076 + cii->c_uid == current_fsuid() &&
41077 +- cii->c_cached_epoch == atomic_read(&permission_epoch);
41078 ++ cii->c_cached_epoch == atomic_read_unchecked(&permission_epoch);
41079 + spin_unlock(&cii->c_lock);
41080 +
41081 + return hit;
41082 +diff -urNp linux-3.1.1/fs/compat_binfmt_elf.c linux-3.1.1/fs/compat_binfmt_elf.c
41083 +--- linux-3.1.1/fs/compat_binfmt_elf.c 2011-11-11 15:19:27.000000000 -0500
41084 ++++ linux-3.1.1/fs/compat_binfmt_elf.c 2011-11-16 18:39:08.000000000 -0500
41085 +@@ -30,11 +30,13 @@
41086 + #undef elf_phdr
41087 + #undef elf_shdr
41088 + #undef elf_note
41089 ++#undef elf_dyn
41090 + #undef elf_addr_t
41091 + #define elfhdr elf32_hdr
41092 + #define elf_phdr elf32_phdr
41093 + #define elf_shdr elf32_shdr
41094 + #define elf_note elf32_note
41095 ++#define elf_dyn Elf32_Dyn
41096 + #define elf_addr_t Elf32_Addr
41097 +
41098 + /*
41099 +diff -urNp linux-3.1.1/fs/compat.c linux-3.1.1/fs/compat.c
41100 +--- linux-3.1.1/fs/compat.c 2011-11-11 15:19:27.000000000 -0500
41101 ++++ linux-3.1.1/fs/compat.c 2011-11-16 18:40:29.000000000 -0500
41102 +@@ -133,8 +133,8 @@ asmlinkage long compat_sys_utimes(const
41103 + static int cp_compat_stat(struct kstat *stat, struct compat_stat __user *ubuf)
41104 + {
41105 + compat_ino_t ino = stat->ino;
41106 +- typeof(ubuf->st_uid) uid = 0;
41107 +- typeof(ubuf->st_gid) gid = 0;
41108 ++ typeof(((struct compat_stat *)0)->st_uid) uid = 0;
41109 ++ typeof(((struct compat_stat *)0)->st_gid) gid = 0;
41110 + int err;
41111 +
41112 + SET_UID(uid, stat->uid);
41113 +@@ -508,7 +508,7 @@ compat_sys_io_setup(unsigned nr_reqs, u3
41114 +
41115 + set_fs(KERNEL_DS);
41116 + /* The __user pointer cast is valid because of the set_fs() */
41117 +- ret = sys_io_setup(nr_reqs, (aio_context_t __user *) &ctx64);
41118 ++ ret = sys_io_setup(nr_reqs, (aio_context_t __force_user *) &ctx64);
41119 + set_fs(oldfs);
41120 + /* truncating is ok because it's a user address */
41121 + if (!ret)
41122 +@@ -566,7 +566,7 @@ ssize_t compat_rw_copy_check_uvector(int
41123 + goto out;
41124 +
41125 + ret = -EINVAL;
41126 +- if (nr_segs > UIO_MAXIOV || nr_segs < 0)
41127 ++ if (nr_segs > UIO_MAXIOV)
41128 + goto out;
41129 + if (nr_segs > fast_segs) {
41130 + ret = -ENOMEM;
41131 +@@ -848,6 +848,7 @@ struct compat_old_linux_dirent {
41132 +
41133 + struct compat_readdir_callback {
41134 + struct compat_old_linux_dirent __user *dirent;
41135 ++ struct file * file;
41136 + int result;
41137 + };
41138 +
41139 +@@ -865,6 +866,10 @@ static int compat_fillonedir(void *__buf
41140 + buf->result = -EOVERFLOW;
41141 + return -EOVERFLOW;
41142 + }
41143 ++
41144 ++ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
41145 ++ return 0;
41146 ++
41147 + buf->result++;
41148 + dirent = buf->dirent;
41149 + if (!access_ok(VERIFY_WRITE, dirent,
41150 +@@ -897,6 +902,7 @@ asmlinkage long compat_sys_old_readdir(u
41151 +
41152 + buf.result = 0;
41153 + buf.dirent = dirent;
41154 ++ buf.file = file;
41155 +
41156 + error = vfs_readdir(file, compat_fillonedir, &buf);
41157 + if (buf.result)
41158 +@@ -917,6 +923,7 @@ struct compat_linux_dirent {
41159 + struct compat_getdents_callback {
41160 + struct compat_linux_dirent __user *current_dir;
41161 + struct compat_linux_dirent __user *previous;
41162 ++ struct file * file;
41163 + int count;
41164 + int error;
41165 + };
41166 +@@ -938,6 +945,10 @@ static int compat_filldir(void *__buf, c
41167 + buf->error = -EOVERFLOW;
41168 + return -EOVERFLOW;
41169 + }
41170 ++
41171 ++ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
41172 ++ return 0;
41173 ++
41174 + dirent = buf->previous;
41175 + if (dirent) {
41176 + if (__put_user(offset, &dirent->d_off))
41177 +@@ -985,6 +996,7 @@ asmlinkage long compat_sys_getdents(unsi
41178 + buf.previous = NULL;
41179 + buf.count = count;
41180 + buf.error = 0;
41181 ++ buf.file = file;
41182 +
41183 + error = vfs_readdir(file, compat_filldir, &buf);
41184 + if (error >= 0)
41185 +@@ -1006,6 +1018,7 @@ out:
41186 + struct compat_getdents_callback64 {
41187 + struct linux_dirent64 __user *current_dir;
41188 + struct linux_dirent64 __user *previous;
41189 ++ struct file * file;
41190 + int count;
41191 + int error;
41192 + };
41193 +@@ -1022,6 +1035,10 @@ static int compat_filldir64(void * __buf
41194 + buf->error = -EINVAL; /* only used if we fail.. */
41195 + if (reclen > buf->count)
41196 + return -EINVAL;
41197 ++
41198 ++ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
41199 ++ return 0;
41200 ++
41201 + dirent = buf->previous;
41202 +
41203 + if (dirent) {
41204 +@@ -1073,13 +1090,14 @@ asmlinkage long compat_sys_getdents64(un
41205 + buf.previous = NULL;
41206 + buf.count = count;
41207 + buf.error = 0;
41208 ++ buf.file = file;
41209 +
41210 + error = vfs_readdir(file, compat_filldir64, &buf);
41211 + if (error >= 0)
41212 + error = buf.error;
41213 + lastdirent = buf.previous;
41214 + if (lastdirent) {
41215 +- typeof(lastdirent->d_off) d_off = file->f_pos;
41216 ++ typeof(((struct linux_dirent64 *)0)->d_off) d_off = file->f_pos;
41217 + if (__put_user_unaligned(d_off, &lastdirent->d_off))
41218 + error = -EFAULT;
41219 + else
41220 +@@ -1446,6 +1464,8 @@ int compat_core_sys_select(int n, compat
41221 + struct fdtable *fdt;
41222 + long stack_fds[SELECT_STACK_ALLOC/sizeof(long)];
41223 +
41224 ++ pax_track_stack();
41225 ++
41226 + if (n < 0)
41227 + goto out_nofds;
41228 +
41229 +diff -urNp linux-3.1.1/fs/compat_ioctl.c linux-3.1.1/fs/compat_ioctl.c
41230 +--- linux-3.1.1/fs/compat_ioctl.c 2011-11-11 15:19:27.000000000 -0500
41231 ++++ linux-3.1.1/fs/compat_ioctl.c 2011-11-16 18:39:08.000000000 -0500
41232 +@@ -210,6 +210,8 @@ static int do_video_set_spu_palette(unsi
41233 +
41234 + err = get_user(palp, &up->palette);
41235 + err |= get_user(length, &up->length);
41236 ++ if (err)
41237 ++ return -EFAULT;
41238 +
41239 + up_native = compat_alloc_user_space(sizeof(struct video_spu_palette));
41240 + err = put_user(compat_ptr(palp), &up_native->palette);
41241 +@@ -621,7 +623,7 @@ static int serial_struct_ioctl(unsigned
41242 + return -EFAULT;
41243 + if (__get_user(udata, &ss32->iomem_base))
41244 + return -EFAULT;
41245 +- ss.iomem_base = compat_ptr(udata);
41246 ++ ss.iomem_base = (unsigned char __force_kernel *)compat_ptr(udata);
41247 + if (__get_user(ss.iomem_reg_shift, &ss32->iomem_reg_shift) ||
41248 + __get_user(ss.port_high, &ss32->port_high))
41249 + return -EFAULT;
41250 +@@ -796,7 +798,7 @@ static int compat_ioctl_preallocate(stru
41251 + copy_in_user(&p->l_len, &p32->l_len, sizeof(s64)) ||
41252 + copy_in_user(&p->l_sysid, &p32->l_sysid, sizeof(s32)) ||
41253 + copy_in_user(&p->l_pid, &p32->l_pid, sizeof(u32)) ||
41254 +- copy_in_user(&p->l_pad, &p32->l_pad, 4*sizeof(u32)))
41255 ++ copy_in_user(p->l_pad, &p32->l_pad, 4*sizeof(u32)))
41256 + return -EFAULT;
41257 +
41258 + return ioctl_preallocate(file, p);
41259 +@@ -1644,8 +1646,8 @@ asmlinkage long compat_sys_ioctl(unsigne
41260 + static int __init init_sys32_ioctl_cmp(const void *p, const void *q)
41261 + {
41262 + unsigned int a, b;
41263 +- a = *(unsigned int *)p;
41264 +- b = *(unsigned int *)q;
41265 ++ a = *(const unsigned int *)p;
41266 ++ b = *(const unsigned int *)q;
41267 + if (a > b)
41268 + return 1;
41269 + if (a < b)
41270 +diff -urNp linux-3.1.1/fs/configfs/dir.c linux-3.1.1/fs/configfs/dir.c
41271 +--- linux-3.1.1/fs/configfs/dir.c 2011-11-11 15:19:27.000000000 -0500
41272 ++++ linux-3.1.1/fs/configfs/dir.c 2011-11-16 18:39:08.000000000 -0500
41273 +@@ -1575,7 +1575,8 @@ static int configfs_readdir(struct file
41274 + }
41275 + for (p=q->next; p!= &parent_sd->s_children; p=p->next) {
41276 + struct configfs_dirent *next;
41277 +- const char * name;
41278 ++ const unsigned char * name;
41279 ++ char d_name[sizeof(next->s_dentry->d_iname)];
41280 + int len;
41281 + struct inode *inode = NULL;
41282 +
41283 +@@ -1585,7 +1586,12 @@ static int configfs_readdir(struct file
41284 + continue;
41285 +
41286 + name = configfs_get_name(next);
41287 +- len = strlen(name);
41288 ++ if (next->s_dentry && name == next->s_dentry->d_iname) {
41289 ++ len = next->s_dentry->d_name.len;
41290 ++ memcpy(d_name, name, len);
41291 ++ name = d_name;
41292 ++ } else
41293 ++ len = strlen(name);
41294 +
41295 + /*
41296 + * We'll have a dentry and an inode for
41297 +diff -urNp linux-3.1.1/fs/dcache.c linux-3.1.1/fs/dcache.c
41298 +--- linux-3.1.1/fs/dcache.c 2011-11-11 15:19:27.000000000 -0500
41299 ++++ linux-3.1.1/fs/dcache.c 2011-11-16 18:39:08.000000000 -0500
41300 +@@ -2998,7 +2998,7 @@ void __init vfs_caches_init(unsigned lon
41301 + mempages -= reserve;
41302 +
41303 + names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0,
41304 +- SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
41305 ++ SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_USERCOPY, NULL);
41306 +
41307 + dcache_init();
41308 + inode_init();
41309 +diff -urNp linux-3.1.1/fs/ecryptfs/inode.c linux-3.1.1/fs/ecryptfs/inode.c
41310 +--- linux-3.1.1/fs/ecryptfs/inode.c 2011-11-11 15:19:27.000000000 -0500
41311 ++++ linux-3.1.1/fs/ecryptfs/inode.c 2011-11-16 18:39:08.000000000 -0500
41312 +@@ -681,7 +681,7 @@ static int ecryptfs_readlink_lower(struc
41313 + old_fs = get_fs();
41314 + set_fs(get_ds());
41315 + rc = lower_dentry->d_inode->i_op->readlink(lower_dentry,
41316 +- (char __user *)lower_buf,
41317 ++ (char __force_user *)lower_buf,
41318 + lower_bufsiz);
41319 + set_fs(old_fs);
41320 + if (rc < 0)
41321 +@@ -727,7 +727,7 @@ static void *ecryptfs_follow_link(struct
41322 + }
41323 + old_fs = get_fs();
41324 + set_fs(get_ds());
41325 +- rc = dentry->d_inode->i_op->readlink(dentry, (char __user *)buf, len);
41326 ++ rc = dentry->d_inode->i_op->readlink(dentry, (char __force_user *)buf, len);
41327 + set_fs(old_fs);
41328 + if (rc < 0) {
41329 + kfree(buf);
41330 +@@ -742,7 +742,7 @@ out:
41331 + static void
41332 + ecryptfs_put_link(struct dentry *dentry, struct nameidata *nd, void *ptr)
41333 + {
41334 +- char *buf = nd_get_link(nd);
41335 ++ const char *buf = nd_get_link(nd);
41336 + if (!IS_ERR(buf)) {
41337 + /* Free the char* */
41338 + kfree(buf);
41339 +diff -urNp linux-3.1.1/fs/ecryptfs/miscdev.c linux-3.1.1/fs/ecryptfs/miscdev.c
41340 +--- linux-3.1.1/fs/ecryptfs/miscdev.c 2011-11-11 15:19:27.000000000 -0500
41341 ++++ linux-3.1.1/fs/ecryptfs/miscdev.c 2011-11-16 18:39:08.000000000 -0500
41342 +@@ -328,7 +328,7 @@ check_list:
41343 + goto out_unlock_msg_ctx;
41344 + i = 5;
41345 + if (msg_ctx->msg) {
41346 +- if (copy_to_user(&buf[i], packet_length, packet_length_size))
41347 ++ if (packet_length_size > sizeof(packet_length) || copy_to_user(&buf[i], packet_length, packet_length_size))
41348 + goto out_unlock_msg_ctx;
41349 + i += packet_length_size;
41350 + if (copy_to_user(&buf[i], msg_ctx->msg, msg_ctx->msg_size))
41351 +diff -urNp linux-3.1.1/fs/ecryptfs/read_write.c linux-3.1.1/fs/ecryptfs/read_write.c
41352 +--- linux-3.1.1/fs/ecryptfs/read_write.c 2011-11-11 15:19:27.000000000 -0500
41353 ++++ linux-3.1.1/fs/ecryptfs/read_write.c 2011-11-16 18:39:08.000000000 -0500
41354 +@@ -48,7 +48,7 @@ int ecryptfs_write_lower(struct inode *e
41355 + return -EIO;
41356 + fs_save = get_fs();
41357 + set_fs(get_ds());
41358 +- rc = vfs_write(lower_file, data, size, &offset);
41359 ++ rc = vfs_write(lower_file, (const char __force_user *)data, size, &offset);
41360 + set_fs(fs_save);
41361 + mark_inode_dirty_sync(ecryptfs_inode);
41362 + return rc;
41363 +@@ -235,7 +235,7 @@ int ecryptfs_read_lower(char *data, loff
41364 + return -EIO;
41365 + fs_save = get_fs();
41366 + set_fs(get_ds());
41367 +- rc = vfs_read(lower_file, data, size, &offset);
41368 ++ rc = vfs_read(lower_file, (char __force_user *)data, size, &offset);
41369 + set_fs(fs_save);
41370 + return rc;
41371 + }
41372 +diff -urNp linux-3.1.1/fs/exec.c linux-3.1.1/fs/exec.c
41373 +--- linux-3.1.1/fs/exec.c 2011-11-11 15:19:27.000000000 -0500
41374 ++++ linux-3.1.1/fs/exec.c 2011-11-17 18:40:47.000000000 -0500
41375 +@@ -55,12 +55,24 @@
41376 + #include <linux/pipe_fs_i.h>
41377 + #include <linux/oom.h>
41378 + #include <linux/compat.h>
41379 ++#include <linux/random.h>
41380 ++#include <linux/seq_file.h>
41381 ++
41382 ++#ifdef CONFIG_PAX_REFCOUNT
41383 ++#include <linux/kallsyms.h>
41384 ++#include <linux/kdebug.h>
41385 ++#endif
41386 +
41387 + #include <asm/uaccess.h>
41388 + #include <asm/mmu_context.h>
41389 + #include <asm/tlb.h>
41390 + #include "internal.h"
41391 +
41392 ++#ifdef CONFIG_PAX_HOOK_ACL_FLAGS
41393 ++void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
41394 ++EXPORT_SYMBOL(pax_set_initial_flags_func);
41395 ++#endif
41396 ++
41397 + int core_uses_pid;
41398 + char core_pattern[CORENAME_MAX_SIZE] = "core";
41399 + unsigned int core_pipe_limit;
41400 +@@ -70,7 +82,7 @@ struct core_name {
41401 + char *corename;
41402 + int used, size;
41403 + };
41404 +-static atomic_t call_count = ATOMIC_INIT(1);
41405 ++static atomic_unchecked_t call_count = ATOMIC_INIT(1);
41406 +
41407 + /* The maximal length of core_pattern is also specified in sysctl.c */
41408 +
41409 +@@ -188,18 +200,10 @@ static struct page *get_arg_page(struct
41410 + int write)
41411 + {
41412 + struct page *page;
41413 +- int ret;
41414 +
41415 +-#ifdef CONFIG_STACK_GROWSUP
41416 +- if (write) {
41417 +- ret = expand_downwards(bprm->vma, pos);
41418 +- if (ret < 0)
41419 +- return NULL;
41420 +- }
41421 +-#endif
41422 +- ret = get_user_pages(current, bprm->mm, pos,
41423 +- 1, write, 1, &page, NULL);
41424 +- if (ret <= 0)
41425 ++ if (0 > expand_downwards(bprm->vma, pos))
41426 ++ return NULL;
41427 ++ if (0 >= get_user_pages(current, bprm->mm, pos, 1, write, 1, &page, NULL))
41428 + return NULL;
41429 +
41430 + if (write) {
41431 +@@ -274,6 +278,11 @@ static int __bprm_mm_init(struct linux_b
41432 + vma->vm_end = STACK_TOP_MAX;
41433 + vma->vm_start = vma->vm_end - PAGE_SIZE;
41434 + vma->vm_flags = VM_STACK_FLAGS | VM_STACK_INCOMPLETE_SETUP;
41435 ++
41436 ++#ifdef CONFIG_PAX_SEGMEXEC
41437 ++ vma->vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
41438 ++#endif
41439 ++
41440 + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
41441 + INIT_LIST_HEAD(&vma->anon_vma_chain);
41442 +
41443 +@@ -288,6 +297,12 @@ static int __bprm_mm_init(struct linux_b
41444 + mm->stack_vm = mm->total_vm = 1;
41445 + up_write(&mm->mmap_sem);
41446 + bprm->p = vma->vm_end - sizeof(void *);
41447 ++
41448 ++#ifdef CONFIG_PAX_RANDUSTACK
41449 ++ if (randomize_va_space)
41450 ++ bprm->p ^= (pax_get_random_long() & ~15) & ~PAGE_MASK;
41451 ++#endif
41452 ++
41453 + return 0;
41454 + err:
41455 + up_write(&mm->mmap_sem);
41456 +@@ -396,19 +411,7 @@ err:
41457 + return err;
41458 + }
41459 +
41460 +-struct user_arg_ptr {
41461 +-#ifdef CONFIG_COMPAT
41462 +- bool is_compat;
41463 +-#endif
41464 +- union {
41465 +- const char __user *const __user *native;
41466 +-#ifdef CONFIG_COMPAT
41467 +- compat_uptr_t __user *compat;
41468 +-#endif
41469 +- } ptr;
41470 +-};
41471 +-
41472 +-static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
41473 ++const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
41474 + {
41475 + const char __user *native;
41476 +
41477 +@@ -417,14 +420,14 @@ static const char __user *get_user_arg_p
41478 + compat_uptr_t compat;
41479 +
41480 + if (get_user(compat, argv.ptr.compat + nr))
41481 +- return ERR_PTR(-EFAULT);
41482 ++ return (const char __force_user *)ERR_PTR(-EFAULT);
41483 +
41484 + return compat_ptr(compat);
41485 + }
41486 + #endif
41487 +
41488 + if (get_user(native, argv.ptr.native + nr))
41489 +- return ERR_PTR(-EFAULT);
41490 ++ return (const char __force_user *)ERR_PTR(-EFAULT);
41491 +
41492 + return native;
41493 + }
41494 +@@ -443,7 +446,7 @@ static int count(struct user_arg_ptr arg
41495 + if (!p)
41496 + break;
41497 +
41498 +- if (IS_ERR(p))
41499 ++ if (IS_ERR((const char __force_kernel *)p))
41500 + return -EFAULT;
41501 +
41502 + if (i++ >= max)
41503 +@@ -477,7 +480,7 @@ static int copy_strings(int argc, struct
41504 +
41505 + ret = -EFAULT;
41506 + str = get_user_arg_ptr(argv, argc);
41507 +- if (IS_ERR(str))
41508 ++ if (IS_ERR((const char __force_kernel *)str))
41509 + goto out;
41510 +
41511 + len = strnlen_user(str, MAX_ARG_STRLEN);
41512 +@@ -559,7 +562,7 @@ int copy_strings_kernel(int argc, const
41513 + int r;
41514 + mm_segment_t oldfs = get_fs();
41515 + struct user_arg_ptr argv = {
41516 +- .ptr.native = (const char __user *const __user *)__argv,
41517 ++ .ptr.native = (const char __force_user *const __force_user *)__argv,
41518 + };
41519 +
41520 + set_fs(KERNEL_DS);
41521 +@@ -594,7 +597,8 @@ static int shift_arg_pages(struct vm_are
41522 + unsigned long new_end = old_end - shift;
41523 + struct mmu_gather tlb;
41524 +
41525 +- BUG_ON(new_start > new_end);
41526 ++ if (new_start >= new_end || new_start < mmap_min_addr)
41527 ++ return -ENOMEM;
41528 +
41529 + /*
41530 + * ensure there are no vmas between where we want to go
41531 +@@ -603,6 +607,10 @@ static int shift_arg_pages(struct vm_are
41532 + if (vma != find_vma(mm, new_start))
41533 + return -EFAULT;
41534 +
41535 ++#ifdef CONFIG_PAX_SEGMEXEC
41536 ++ BUG_ON(pax_find_mirror_vma(vma));
41537 ++#endif
41538 ++
41539 + /*
41540 + * cover the whole range: [new_start, old_end)
41541 + */
41542 +@@ -683,10 +691,6 @@ int setup_arg_pages(struct linux_binprm
41543 + stack_top = arch_align_stack(stack_top);
41544 + stack_top = PAGE_ALIGN(stack_top);
41545 +
41546 +- if (unlikely(stack_top < mmap_min_addr) ||
41547 +- unlikely(vma->vm_end - vma->vm_start >= stack_top - mmap_min_addr))
41548 +- return -ENOMEM;
41549 +-
41550 + stack_shift = vma->vm_end - stack_top;
41551 +
41552 + bprm->p -= stack_shift;
41553 +@@ -698,8 +702,28 @@ int setup_arg_pages(struct linux_binprm
41554 + bprm->exec -= stack_shift;
41555 +
41556 + down_write(&mm->mmap_sem);
41557 ++
41558 ++ /* Move stack pages down in memory. */
41559 ++ if (stack_shift) {
41560 ++ ret = shift_arg_pages(vma, stack_shift);
41561 ++ if (ret)
41562 ++ goto out_unlock;
41563 ++ }
41564 ++
41565 + vm_flags = VM_STACK_FLAGS;
41566 +
41567 ++#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
41568 ++ if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
41569 ++ vm_flags &= ~VM_EXEC;
41570 ++
41571 ++#ifdef CONFIG_PAX_MPROTECT
41572 ++ if (mm->pax_flags & MF_PAX_MPROTECT)
41573 ++ vm_flags &= ~VM_MAYEXEC;
41574 ++#endif
41575 ++
41576 ++ }
41577 ++#endif
41578 ++
41579 + /*
41580 + * Adjust stack execute permissions; explicitly enable for
41581 + * EXSTACK_ENABLE_X, disable for EXSTACK_DISABLE_X and leave alone
41582 +@@ -718,13 +742,6 @@ int setup_arg_pages(struct linux_binprm
41583 + goto out_unlock;
41584 + BUG_ON(prev != vma);
41585 +
41586 +- /* Move stack pages down in memory. */
41587 +- if (stack_shift) {
41588 +- ret = shift_arg_pages(vma, stack_shift);
41589 +- if (ret)
41590 +- goto out_unlock;
41591 +- }
41592 +-
41593 + /* mprotect_fixup is overkill to remove the temporary stack flags */
41594 + vma->vm_flags &= ~VM_STACK_INCOMPLETE_SETUP;
41595 +
41596 +@@ -805,7 +822,7 @@ int kernel_read(struct file *file, loff_
41597 + old_fs = get_fs();
41598 + set_fs(get_ds());
41599 + /* The cast to a user pointer is valid due to the set_fs() */
41600 +- result = vfs_read(file, (void __user *)addr, count, &pos);
41601 ++ result = vfs_read(file, (void __force_user *)addr, count, &pos);
41602 + set_fs(old_fs);
41603 + return result;
41604 + }
41605 +@@ -1251,7 +1268,7 @@ int check_unsafe_exec(struct linux_binpr
41606 + }
41607 + rcu_read_unlock();
41608 +
41609 +- if (p->fs->users > n_fs) {
41610 ++ if (atomic_read(&p->fs->users) > n_fs) {
41611 + bprm->unsafe |= LSM_UNSAFE_SHARE;
41612 + } else {
41613 + res = -EAGAIN;
41614 +@@ -1454,6 +1471,11 @@ static int do_execve_common(const char *
41615 + struct user_arg_ptr envp,
41616 + struct pt_regs *regs)
41617 + {
41618 ++#ifdef CONFIG_GRKERNSEC
41619 ++ struct file *old_exec_file;
41620 ++ struct acl_subject_label *old_acl;
41621 ++ struct rlimit old_rlim[RLIM_NLIMITS];
41622 ++#endif
41623 + struct linux_binprm *bprm;
41624 + struct file *file;
41625 + struct files_struct *displaced;
41626 +@@ -1461,6 +1483,8 @@ static int do_execve_common(const char *
41627 + int retval;
41628 + const struct cred *cred = current_cred();
41629 +
41630 ++ gr_learn_resource(current, RLIMIT_NPROC, atomic_read(&current->cred->user->processes), 1);
41631 ++
41632 + /*
41633 + * We move the actual failure in case of RLIMIT_NPROC excess from
41634 + * set*uid() to execve() because too many poorly written programs
41635 +@@ -1507,6 +1531,16 @@ static int do_execve_common(const char *
41636 + bprm->filename = filename;
41637 + bprm->interp = filename;
41638 +
41639 ++ if (gr_process_user_ban()) {
41640 ++ retval = -EPERM;
41641 ++ goto out_file;
41642 ++ }
41643 ++
41644 ++ if (!gr_acl_handle_execve(file->f_dentry, file->f_vfsmnt)) {
41645 ++ retval = -EACCES;
41646 ++ goto out_file;
41647 ++ }
41648 ++
41649 + retval = bprm_mm_init(bprm);
41650 + if (retval)
41651 + goto out_file;
41652 +@@ -1536,9 +1570,40 @@ static int do_execve_common(const char *
41653 + if (retval < 0)
41654 + goto out;
41655 +
41656 ++ if (!gr_tpe_allow(file)) {
41657 ++ retval = -EACCES;
41658 ++ goto out;
41659 ++ }
41660 ++
41661 ++ if (gr_check_crash_exec(file)) {
41662 ++ retval = -EACCES;
41663 ++ goto out;
41664 ++ }
41665 ++
41666 ++ gr_log_chroot_exec(file->f_dentry, file->f_vfsmnt);
41667 ++
41668 ++ gr_handle_exec_args(bprm, argv);
41669 ++
41670 ++#ifdef CONFIG_GRKERNSEC
41671 ++ old_acl = current->acl;
41672 ++ memcpy(old_rlim, current->signal->rlim, sizeof(old_rlim));
41673 ++ old_exec_file = current->exec_file;
41674 ++ get_file(file);
41675 ++ current->exec_file = file;
41676 ++#endif
41677 ++
41678 ++ retval = gr_set_proc_label(file->f_dentry, file->f_vfsmnt,
41679 ++ bprm->unsafe & LSM_UNSAFE_SHARE);
41680 ++ if (retval < 0)
41681 ++ goto out_fail;
41682 ++
41683 + retval = search_binary_handler(bprm,regs);
41684 + if (retval < 0)
41685 +- goto out;
41686 ++ goto out_fail;
41687 ++#ifdef CONFIG_GRKERNSEC
41688 ++ if (old_exec_file)
41689 ++ fput(old_exec_file);
41690 ++#endif
41691 +
41692 + /* execve succeeded */
41693 + current->fs->in_exec = 0;
41694 +@@ -1549,6 +1614,14 @@ static int do_execve_common(const char *
41695 + put_files_struct(displaced);
41696 + return retval;
41697 +
41698 ++out_fail:
41699 ++#ifdef CONFIG_GRKERNSEC
41700 ++ current->acl = old_acl;
41701 ++ memcpy(current->signal->rlim, old_rlim, sizeof(old_rlim));
41702 ++ fput(current->exec_file);
41703 ++ current->exec_file = old_exec_file;
41704 ++#endif
41705 ++
41706 + out:
41707 + if (bprm->mm) {
41708 + acct_arg_size(bprm, 0);
41709 +@@ -1622,7 +1695,7 @@ static int expand_corename(struct core_n
41710 + {
41711 + char *old_corename = cn->corename;
41712 +
41713 +- cn->size = CORENAME_MAX_SIZE * atomic_inc_return(&call_count);
41714 ++ cn->size = CORENAME_MAX_SIZE * atomic_inc_return_unchecked(&call_count);
41715 + cn->corename = krealloc(old_corename, cn->size, GFP_KERNEL);
41716 +
41717 + if (!cn->corename) {
41718 +@@ -1719,7 +1792,7 @@ static int format_corename(struct core_n
41719 + int pid_in_pattern = 0;
41720 + int err = 0;
41721 +
41722 +- cn->size = CORENAME_MAX_SIZE * atomic_read(&call_count);
41723 ++ cn->size = CORENAME_MAX_SIZE * atomic_read_unchecked(&call_count);
41724 + cn->corename = kmalloc(cn->size, GFP_KERNEL);
41725 + cn->used = 0;
41726 +
41727 +@@ -1816,6 +1889,218 @@ out:
41728 + return ispipe;
41729 + }
41730 +
41731 ++int pax_check_flags(unsigned long *flags)
41732 ++{
41733 ++ int retval = 0;
41734 ++
41735 ++#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_SEGMEXEC)
41736 ++ if (*flags & MF_PAX_SEGMEXEC)
41737 ++ {
41738 ++ *flags &= ~MF_PAX_SEGMEXEC;
41739 ++ retval = -EINVAL;
41740 ++ }
41741 ++#endif
41742 ++
41743 ++ if ((*flags & MF_PAX_PAGEEXEC)
41744 ++
41745 ++#ifdef CONFIG_PAX_PAGEEXEC
41746 ++ && (*flags & MF_PAX_SEGMEXEC)
41747 ++#endif
41748 ++
41749 ++ )
41750 ++ {
41751 ++ *flags &= ~MF_PAX_PAGEEXEC;
41752 ++ retval = -EINVAL;
41753 ++ }
41754 ++
41755 ++ if ((*flags & MF_PAX_MPROTECT)
41756 ++
41757 ++#ifdef CONFIG_PAX_MPROTECT
41758 ++ && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
41759 ++#endif
41760 ++
41761 ++ )
41762 ++ {
41763 ++ *flags &= ~MF_PAX_MPROTECT;
41764 ++ retval = -EINVAL;
41765 ++ }
41766 ++
41767 ++ if ((*flags & MF_PAX_EMUTRAMP)
41768 ++
41769 ++#ifdef CONFIG_PAX_EMUTRAMP
41770 ++ && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
41771 ++#endif
41772 ++
41773 ++ )
41774 ++ {
41775 ++ *flags &= ~MF_PAX_EMUTRAMP;
41776 ++ retval = -EINVAL;
41777 ++ }
41778 ++
41779 ++ return retval;
41780 ++}
41781 ++
41782 ++EXPORT_SYMBOL(pax_check_flags);
41783 ++
41784 ++#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
41785 ++void pax_report_fault(struct pt_regs *regs, void *pc, void *sp)
41786 ++{
41787 ++ struct task_struct *tsk = current;
41788 ++ struct mm_struct *mm = current->mm;
41789 ++ char *buffer_exec = (char *)__get_free_page(GFP_KERNEL);
41790 ++ char *buffer_fault = (char *)__get_free_page(GFP_KERNEL);
41791 ++ char *path_exec = NULL;
41792 ++ char *path_fault = NULL;
41793 ++ unsigned long start = 0UL, end = 0UL, offset = 0UL;
41794 ++
41795 ++ if (buffer_exec && buffer_fault) {
41796 ++ struct vm_area_struct *vma, *vma_exec = NULL, *vma_fault = NULL;
41797 ++
41798 ++ down_read(&mm->mmap_sem);
41799 ++ vma = mm->mmap;
41800 ++ while (vma && (!vma_exec || !vma_fault)) {
41801 ++ if ((vma->vm_flags & VM_EXECUTABLE) && vma->vm_file)
41802 ++ vma_exec = vma;
41803 ++ if (vma->vm_start <= (unsigned long)pc && (unsigned long)pc < vma->vm_end)
41804 ++ vma_fault = vma;
41805 ++ vma = vma->vm_next;
41806 ++ }
41807 ++ if (vma_exec) {
41808 ++ path_exec = d_path(&vma_exec->vm_file->f_path, buffer_exec, PAGE_SIZE);
41809 ++ if (IS_ERR(path_exec))
41810 ++ path_exec = "<path too long>";
41811 ++ else {
41812 ++ path_exec = mangle_path(buffer_exec, path_exec, "\t\n\\");
41813 ++ if (path_exec) {
41814 ++ *path_exec = 0;
41815 ++ path_exec = buffer_exec;
41816 ++ } else
41817 ++ path_exec = "<path too long>";
41818 ++ }
41819 ++ }
41820 ++ if (vma_fault) {
41821 ++ start = vma_fault->vm_start;
41822 ++ end = vma_fault->vm_end;
41823 ++ offset = vma_fault->vm_pgoff << PAGE_SHIFT;
41824 ++ if (vma_fault->vm_file) {
41825 ++ path_fault = d_path(&vma_fault->vm_file->f_path, buffer_fault, PAGE_SIZE);
41826 ++ if (IS_ERR(path_fault))
41827 ++ path_fault = "<path too long>";
41828 ++ else {
41829 ++ path_fault = mangle_path(buffer_fault, path_fault, "\t\n\\");
41830 ++ if (path_fault) {
41831 ++ *path_fault = 0;
41832 ++ path_fault = buffer_fault;
41833 ++ } else
41834 ++ path_fault = "<path too long>";
41835 ++ }
41836 ++ } else
41837 ++ path_fault = "<anonymous mapping>";
41838 ++ }
41839 ++ up_read(&mm->mmap_sem);
41840 ++ }
41841 ++ if (tsk->signal->curr_ip)
41842 ++ printk(KERN_ERR "PAX: From %pI4: execution attempt in: %s, %08lx-%08lx %08lx\n", &tsk->signal->curr_ip, path_fault, start, end, offset);
41843 ++ else
41844 ++ printk(KERN_ERR "PAX: execution attempt in: %s, %08lx-%08lx %08lx\n", path_fault, start, end, offset);
41845 ++ printk(KERN_ERR "PAX: terminating task: %s(%s):%d, uid/euid: %u/%u, "
41846 ++ "PC: %p, SP: %p\n", path_exec, tsk->comm, task_pid_nr(tsk),
41847 ++ task_uid(tsk), task_euid(tsk), pc, sp);
41848 ++ free_page((unsigned long)buffer_exec);
41849 ++ free_page((unsigned long)buffer_fault);
41850 ++ pax_report_insns(regs, pc, sp);
41851 ++ do_coredump(SIGKILL, SIGKILL, regs);
41852 ++}
41853 ++#endif
41854 ++
41855 ++#ifdef CONFIG_PAX_REFCOUNT
41856 ++void pax_report_refcount_overflow(struct pt_regs *regs)
41857 ++{
41858 ++ if (current->signal->curr_ip)
41859 ++ printk(KERN_ERR "PAX: From %pI4: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
41860 ++ &current->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid());
41861 ++ else
41862 ++ printk(KERN_ERR "PAX: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
41863 ++ current->comm, task_pid_nr(current), current_uid(), current_euid());
41864 ++ print_symbol(KERN_ERR "PAX: refcount overflow occured at: %s\n", instruction_pointer(regs));
41865 ++ show_regs(regs);
41866 ++ force_sig_info(SIGKILL, SEND_SIG_FORCED, current);
41867 ++}
41868 ++#endif
41869 ++
41870 ++#ifdef CONFIG_PAX_USERCOPY
41871 ++/* 0: not at all, 1: fully, 2: fully inside frame, -1: partially (implies an error) */
41872 ++int object_is_on_stack(const void *obj, unsigned long len)
41873 ++{
41874 ++ const void * const stack = task_stack_page(current);
41875 ++ const void * const stackend = stack + THREAD_SIZE;
41876 ++
41877 ++#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
41878 ++ const void *frame = NULL;
41879 ++ const void *oldframe;
41880 ++#endif
41881 ++
41882 ++ if (obj + len < obj)
41883 ++ return -1;
41884 ++
41885 ++ if (obj + len <= stack || stackend <= obj)
41886 ++ return 0;
41887 ++
41888 ++ if (obj < stack || stackend < obj + len)
41889 ++ return -1;
41890 ++
41891 ++#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
41892 ++ oldframe = __builtin_frame_address(1);
41893 ++ if (oldframe)
41894 ++ frame = __builtin_frame_address(2);
41895 ++ /*
41896 ++ low ----------------------------------------------> high
41897 ++ [saved bp][saved ip][args][local vars][saved bp][saved ip]
41898 ++ ^----------------^
41899 ++ allow copies only within here
41900 ++ */
41901 ++ while (stack <= frame && frame < stackend) {
41902 ++ /* if obj + len extends past the last frame, this
41903 ++ check won't pass and the next frame will be 0,
41904 ++ causing us to bail out and correctly report
41905 ++ the copy as invalid
41906 ++ */
41907 ++ if (obj + len <= frame)
41908 ++ return obj >= oldframe + 2 * sizeof(void *) ? 2 : -1;
41909 ++ oldframe = frame;
41910 ++ frame = *(const void * const *)frame;
41911 ++ }
41912 ++ return -1;
41913 ++#else
41914 ++ return 1;
41915 ++#endif
41916 ++}
41917 ++
41918 ++NORET_TYPE void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type)
41919 ++{
41920 ++ if (current->signal->curr_ip)
41921 ++ printk(KERN_ERR "PAX: From %pI4: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
41922 ++ &current->signal->curr_ip, to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len);
41923 ++ else
41924 ++ printk(KERN_ERR "PAX: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
41925 ++ to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len);
41926 ++ dump_stack();
41927 ++ gr_handle_kernel_exploit();
41928 ++ do_group_exit(SIGKILL);
41929 ++}
41930 ++#endif
41931 ++
41932 ++#ifdef CONFIG_PAX_MEMORY_STACKLEAK
41933 ++void pax_track_stack(void)
41934 ++{
41935 ++ unsigned long sp = (unsigned long)&sp;
41936 ++ if (sp < current_thread_info()->lowest_stack &&
41937 ++ sp > (unsigned long)task_stack_page(current))
41938 ++ current_thread_info()->lowest_stack = sp;
41939 ++}
41940 ++EXPORT_SYMBOL(pax_track_stack);
41941 ++#endif
41942 ++
41943 + static int zap_process(struct task_struct *start, int exit_code)
41944 + {
41945 + struct task_struct *t;
41946 +@@ -2027,17 +2312,17 @@ static void wait_for_dump_helpers(struct
41947 + pipe = file->f_path.dentry->d_inode->i_pipe;
41948 +
41949 + pipe_lock(pipe);
41950 +- pipe->readers++;
41951 +- pipe->writers--;
41952 ++ atomic_inc(&pipe->readers);
41953 ++ atomic_dec(&pipe->writers);
41954 +
41955 +- while ((pipe->readers > 1) && (!signal_pending(current))) {
41956 ++ while ((atomic_read(&pipe->readers) > 1) && (!signal_pending(current))) {
41957 + wake_up_interruptible_sync(&pipe->wait);
41958 + kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
41959 + pipe_wait(pipe);
41960 + }
41961 +
41962 +- pipe->readers--;
41963 +- pipe->writers++;
41964 ++ atomic_dec(&pipe->readers);
41965 ++ atomic_inc(&pipe->writers);
41966 + pipe_unlock(pipe);
41967 +
41968 + }
41969 +@@ -2098,7 +2383,7 @@ void do_coredump(long signr, int exit_co
41970 + int retval = 0;
41971 + int flag = 0;
41972 + int ispipe;
41973 +- static atomic_t core_dump_count = ATOMIC_INIT(0);
41974 ++ static atomic_unchecked_t core_dump_count = ATOMIC_INIT(0);
41975 + struct coredump_params cprm = {
41976 + .signr = signr,
41977 + .regs = regs,
41978 +@@ -2113,6 +2398,9 @@ void do_coredump(long signr, int exit_co
41979 +
41980 + audit_core_dumps(signr);
41981 +
41982 ++ if (signr == SIGSEGV || signr == SIGBUS || signr == SIGKILL || signr == SIGILL)
41983 ++ gr_handle_brute_attach(current, cprm.mm_flags);
41984 ++
41985 + binfmt = mm->binfmt;
41986 + if (!binfmt || !binfmt->core_dump)
41987 + goto fail;
41988 +@@ -2180,7 +2468,7 @@ void do_coredump(long signr, int exit_co
41989 + }
41990 + cprm.limit = RLIM_INFINITY;
41991 +
41992 +- dump_count = atomic_inc_return(&core_dump_count);
41993 ++ dump_count = atomic_inc_return_unchecked(&core_dump_count);
41994 + if (core_pipe_limit && (core_pipe_limit < dump_count)) {
41995 + printk(KERN_WARNING "Pid %d(%s) over core_pipe_limit\n",
41996 + task_tgid_vnr(current), current->comm);
41997 +@@ -2207,6 +2495,8 @@ void do_coredump(long signr, int exit_co
41998 + } else {
41999 + struct inode *inode;
42000 +
42001 ++ gr_learn_resource(current, RLIMIT_CORE, binfmt->min_coredump, 1);
42002 ++
42003 + if (cprm.limit < binfmt->min_coredump)
42004 + goto fail_unlock;
42005 +
42006 +@@ -2250,7 +2540,7 @@ close_fail:
42007 + filp_close(cprm.file, NULL);
42008 + fail_dropcount:
42009 + if (ispipe)
42010 +- atomic_dec(&core_dump_count);
42011 ++ atomic_dec_unchecked(&core_dump_count);
42012 + fail_unlock:
42013 + kfree(cn.corename);
42014 + fail_corename:
42015 +@@ -2269,7 +2559,7 @@ fail:
42016 + */
42017 + int dump_write(struct file *file, const void *addr, int nr)
42018 + {
42019 +- return access_ok(VERIFY_READ, addr, nr) && file->f_op->write(file, addr, nr, &file->f_pos) == nr;
42020 ++ return access_ok(VERIFY_READ, addr, nr) && file->f_op->write(file, (const char __force_user *)addr, nr, &file->f_pos) == nr;
42021 + }
42022 + EXPORT_SYMBOL(dump_write);
42023 +
42024 +diff -urNp linux-3.1.1/fs/ext2/balloc.c linux-3.1.1/fs/ext2/balloc.c
42025 +--- linux-3.1.1/fs/ext2/balloc.c 2011-11-11 15:19:27.000000000 -0500
42026 ++++ linux-3.1.1/fs/ext2/balloc.c 2011-11-16 18:40:29.000000000 -0500
42027 +@@ -1192,7 +1192,7 @@ static int ext2_has_free_blocks(struct e
42028 +
42029 + free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
42030 + root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
42031 +- if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
42032 ++ if (free_blocks < root_blocks + 1 && !capable_nolog(CAP_SYS_RESOURCE) &&
42033 + sbi->s_resuid != current_fsuid() &&
42034 + (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
42035 + return 0;
42036 +diff -urNp linux-3.1.1/fs/ext3/balloc.c linux-3.1.1/fs/ext3/balloc.c
42037 +--- linux-3.1.1/fs/ext3/balloc.c 2011-11-11 15:19:27.000000000 -0500
42038 ++++ linux-3.1.1/fs/ext3/balloc.c 2011-11-16 18:40:29.000000000 -0500
42039 +@@ -1446,7 +1446,7 @@ static int ext3_has_free_blocks(struct e
42040 +
42041 + free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
42042 + root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
42043 +- if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
42044 ++ if (free_blocks < root_blocks + 1 && !capable_nolog(CAP_SYS_RESOURCE) &&
42045 + sbi->s_resuid != current_fsuid() &&
42046 + (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
42047 + return 0;
42048 +diff -urNp linux-3.1.1/fs/ext4/balloc.c linux-3.1.1/fs/ext4/balloc.c
42049 +--- linux-3.1.1/fs/ext4/balloc.c 2011-11-11 15:19:27.000000000 -0500
42050 ++++ linux-3.1.1/fs/ext4/balloc.c 2011-11-16 18:40:29.000000000 -0500
42051 +@@ -394,8 +394,8 @@ static int ext4_has_free_blocks(struct e
42052 + /* Hm, nope. Are (enough) root reserved blocks available? */
42053 + if (sbi->s_resuid == current_fsuid() ||
42054 + ((sbi->s_resgid != 0) && in_group_p(sbi->s_resgid)) ||
42055 +- capable(CAP_SYS_RESOURCE) ||
42056 +- (flags & EXT4_MB_USE_ROOT_BLOCKS)) {
42057 ++ (flags & EXT4_MB_USE_ROOT_BLOCKS) ||
42058 ++ capable_nolog(CAP_SYS_RESOURCE)) {
42059 +
42060 + if (free_blocks >= (nblocks + dirty_blocks))
42061 + return 1;
42062 +diff -urNp linux-3.1.1/fs/ext4/ext4.h linux-3.1.1/fs/ext4/ext4.h
42063 +--- linux-3.1.1/fs/ext4/ext4.h 2011-11-11 15:19:27.000000000 -0500
42064 ++++ linux-3.1.1/fs/ext4/ext4.h 2011-11-16 18:39:08.000000000 -0500
42065 +@@ -1180,19 +1180,19 @@ struct ext4_sb_info {
42066 + unsigned long s_mb_last_start;
42067 +
42068 + /* stats for buddy allocator */
42069 +- atomic_t s_bal_reqs; /* number of reqs with len > 1 */
42070 +- atomic_t s_bal_success; /* we found long enough chunks */
42071 +- atomic_t s_bal_allocated; /* in blocks */
42072 +- atomic_t s_bal_ex_scanned; /* total extents scanned */
42073 +- atomic_t s_bal_goals; /* goal hits */
42074 +- atomic_t s_bal_breaks; /* too long searches */
42075 +- atomic_t s_bal_2orders; /* 2^order hits */
42076 ++ atomic_unchecked_t s_bal_reqs; /* number of reqs with len > 1 */
42077 ++ atomic_unchecked_t s_bal_success; /* we found long enough chunks */
42078 ++ atomic_unchecked_t s_bal_allocated; /* in blocks */
42079 ++ atomic_unchecked_t s_bal_ex_scanned; /* total extents scanned */
42080 ++ atomic_unchecked_t s_bal_goals; /* goal hits */
42081 ++ atomic_unchecked_t s_bal_breaks; /* too long searches */
42082 ++ atomic_unchecked_t s_bal_2orders; /* 2^order hits */
42083 + spinlock_t s_bal_lock;
42084 + unsigned long s_mb_buddies_generated;
42085 + unsigned long long s_mb_generation_time;
42086 +- atomic_t s_mb_lost_chunks;
42087 +- atomic_t s_mb_preallocated;
42088 +- atomic_t s_mb_discarded;
42089 ++ atomic_unchecked_t s_mb_lost_chunks;
42090 ++ atomic_unchecked_t s_mb_preallocated;
42091 ++ atomic_unchecked_t s_mb_discarded;
42092 + atomic_t s_lock_busy;
42093 +
42094 + /* locality groups */
42095 +diff -urNp linux-3.1.1/fs/ext4/file.c linux-3.1.1/fs/ext4/file.c
42096 +--- linux-3.1.1/fs/ext4/file.c 2011-11-11 15:19:27.000000000 -0500
42097 ++++ linux-3.1.1/fs/ext4/file.c 2011-11-16 18:40:29.000000000 -0500
42098 +@@ -181,8 +181,8 @@ static int ext4_file_open(struct inode *
42099 + path.dentry = mnt->mnt_root;
42100 + cp = d_path(&path, buf, sizeof(buf));
42101 + if (!IS_ERR(cp)) {
42102 +- memcpy(sbi->s_es->s_last_mounted, cp,
42103 +- sizeof(sbi->s_es->s_last_mounted));
42104 ++ strlcpy(sbi->s_es->s_last_mounted, cp,
42105 ++ sizeof(sbi->s_es->s_last_mounted));
42106 + ext4_mark_super_dirty(sb);
42107 + }
42108 + }
42109 +diff -urNp linux-3.1.1/fs/ext4/ioctl.c linux-3.1.1/fs/ext4/ioctl.c
42110 +--- linux-3.1.1/fs/ext4/ioctl.c 2011-11-11 15:19:27.000000000 -0500
42111 ++++ linux-3.1.1/fs/ext4/ioctl.c 2011-11-16 18:39:08.000000000 -0500
42112 +@@ -348,7 +348,7 @@ mext_out:
42113 + if (!blk_queue_discard(q))
42114 + return -EOPNOTSUPP;
42115 +
42116 +- if (copy_from_user(&range, (struct fstrim_range *)arg,
42117 ++ if (copy_from_user(&range, (struct fstrim_range __user *)arg,
42118 + sizeof(range)))
42119 + return -EFAULT;
42120 +
42121 +@@ -358,7 +358,7 @@ mext_out:
42122 + if (ret < 0)
42123 + return ret;
42124 +
42125 +- if (copy_to_user((struct fstrim_range *)arg, &range,
42126 ++ if (copy_to_user((struct fstrim_range __user *)arg, &range,
42127 + sizeof(range)))
42128 + return -EFAULT;
42129 +
42130 +diff -urNp linux-3.1.1/fs/ext4/mballoc.c linux-3.1.1/fs/ext4/mballoc.c
42131 +--- linux-3.1.1/fs/ext4/mballoc.c 2011-11-11 15:19:27.000000000 -0500
42132 ++++ linux-3.1.1/fs/ext4/mballoc.c 2011-11-16 18:40:29.000000000 -0500
42133 +@@ -1795,7 +1795,7 @@ void ext4_mb_simple_scan_group(struct ex
42134 + BUG_ON(ac->ac_b_ex.fe_len != ac->ac_g_ex.fe_len);
42135 +
42136 + if (EXT4_SB(sb)->s_mb_stats)
42137 +- atomic_inc(&EXT4_SB(sb)->s_bal_2orders);
42138 ++ atomic_inc_unchecked(&EXT4_SB(sb)->s_bal_2orders);
42139 +
42140 + break;
42141 + }
42142 +@@ -2089,7 +2089,7 @@ repeat:
42143 + ac->ac_status = AC_STATUS_CONTINUE;
42144 + ac->ac_flags |= EXT4_MB_HINT_FIRST;
42145 + cr = 3;
42146 +- atomic_inc(&sbi->s_mb_lost_chunks);
42147 ++ atomic_inc_unchecked(&sbi->s_mb_lost_chunks);
42148 + goto repeat;
42149 + }
42150 + }
42151 +@@ -2132,6 +2132,8 @@ static int ext4_mb_seq_groups_show(struc
42152 + ext4_grpblk_t counters[16];
42153 + } sg;
42154 +
42155 ++ pax_track_stack();
42156 ++
42157 + group--;
42158 + if (group == 0)
42159 + seq_printf(seq, "#%-5s: %-5s %-5s %-5s "
42160 +@@ -2573,25 +2575,25 @@ int ext4_mb_release(struct super_block *
42161 + if (sbi->s_mb_stats) {
42162 + ext4_msg(sb, KERN_INFO,
42163 + "mballoc: %u blocks %u reqs (%u success)",
42164 +- atomic_read(&sbi->s_bal_allocated),
42165 +- atomic_read(&sbi->s_bal_reqs),
42166 +- atomic_read(&sbi->s_bal_success));
42167 ++ atomic_read_unchecked(&sbi->s_bal_allocated),
42168 ++ atomic_read_unchecked(&sbi->s_bal_reqs),
42169 ++ atomic_read_unchecked(&sbi->s_bal_success));
42170 + ext4_msg(sb, KERN_INFO,
42171 + "mballoc: %u extents scanned, %u goal hits, "
42172 + "%u 2^N hits, %u breaks, %u lost",
42173 +- atomic_read(&sbi->s_bal_ex_scanned),
42174 +- atomic_read(&sbi->s_bal_goals),
42175 +- atomic_read(&sbi->s_bal_2orders),
42176 +- atomic_read(&sbi->s_bal_breaks),
42177 +- atomic_read(&sbi->s_mb_lost_chunks));
42178 ++ atomic_read_unchecked(&sbi->s_bal_ex_scanned),
42179 ++ atomic_read_unchecked(&sbi->s_bal_goals),
42180 ++ atomic_read_unchecked(&sbi->s_bal_2orders),
42181 ++ atomic_read_unchecked(&sbi->s_bal_breaks),
42182 ++ atomic_read_unchecked(&sbi->s_mb_lost_chunks));
42183 + ext4_msg(sb, KERN_INFO,
42184 + "mballoc: %lu generated and it took %Lu",
42185 + sbi->s_mb_buddies_generated,
42186 + sbi->s_mb_generation_time);
42187 + ext4_msg(sb, KERN_INFO,
42188 + "mballoc: %u preallocated, %u discarded",
42189 +- atomic_read(&sbi->s_mb_preallocated),
42190 +- atomic_read(&sbi->s_mb_discarded));
42191 ++ atomic_read_unchecked(&sbi->s_mb_preallocated),
42192 ++ atomic_read_unchecked(&sbi->s_mb_discarded));
42193 + }
42194 +
42195 + free_percpu(sbi->s_locality_groups);
42196 +@@ -3070,16 +3072,16 @@ static void ext4_mb_collect_stats(struct
42197 + struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
42198 +
42199 + if (sbi->s_mb_stats && ac->ac_g_ex.fe_len > 1) {
42200 +- atomic_inc(&sbi->s_bal_reqs);
42201 +- atomic_add(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
42202 ++ atomic_inc_unchecked(&sbi->s_bal_reqs);
42203 ++ atomic_add_unchecked(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
42204 + if (ac->ac_b_ex.fe_len >= ac->ac_o_ex.fe_len)
42205 +- atomic_inc(&sbi->s_bal_success);
42206 +- atomic_add(ac->ac_found, &sbi->s_bal_ex_scanned);
42207 ++ atomic_inc_unchecked(&sbi->s_bal_success);
42208 ++ atomic_add_unchecked(ac->ac_found, &sbi->s_bal_ex_scanned);
42209 + if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start &&
42210 + ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group)
42211 +- atomic_inc(&sbi->s_bal_goals);
42212 ++ atomic_inc_unchecked(&sbi->s_bal_goals);
42213 + if (ac->ac_found > sbi->s_mb_max_to_scan)
42214 +- atomic_inc(&sbi->s_bal_breaks);
42215 ++ atomic_inc_unchecked(&sbi->s_bal_breaks);
42216 + }
42217 +
42218 + if (ac->ac_op == EXT4_MB_HISTORY_ALLOC)
42219 +@@ -3477,7 +3479,7 @@ ext4_mb_new_inode_pa(struct ext4_allocat
42220 + trace_ext4_mb_new_inode_pa(ac, pa);
42221 +
42222 + ext4_mb_use_inode_pa(ac, pa);
42223 +- atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
42224 ++ atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
42225 +
42226 + ei = EXT4_I(ac->ac_inode);
42227 + grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
42228 +@@ -3537,7 +3539,7 @@ ext4_mb_new_group_pa(struct ext4_allocat
42229 + trace_ext4_mb_new_group_pa(ac, pa);
42230 +
42231 + ext4_mb_use_group_pa(ac, pa);
42232 +- atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
42233 ++ atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
42234 +
42235 + grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
42236 + lg = ac->ac_lg;
42237 +@@ -3625,7 +3627,7 @@ ext4_mb_release_inode_pa(struct ext4_bud
42238 + * from the bitmap and continue.
42239 + */
42240 + }
42241 +- atomic_add(free, &sbi->s_mb_discarded);
42242 ++ atomic_add_unchecked(free, &sbi->s_mb_discarded);
42243 +
42244 + return err;
42245 + }
42246 +@@ -3643,7 +3645,7 @@ ext4_mb_release_group_pa(struct ext4_bud
42247 + ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
42248 + BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
42249 + mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len);
42250 +- atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
42251 ++ atomic_add_unchecked(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
42252 + trace_ext4_mballoc_discard(sb, NULL, group, bit, pa->pa_len);
42253 +
42254 + return 0;
42255 +diff -urNp linux-3.1.1/fs/fcntl.c linux-3.1.1/fs/fcntl.c
42256 +--- linux-3.1.1/fs/fcntl.c 2011-11-11 15:19:27.000000000 -0500
42257 ++++ linux-3.1.1/fs/fcntl.c 2011-11-16 23:40:25.000000000 -0500
42258 +@@ -224,6 +224,11 @@ int __f_setown(struct file *filp, struct
42259 + if (err)
42260 + return err;
42261 +
42262 ++ if (gr_handle_chroot_fowner(pid, type))
42263 ++ return -ENOENT;
42264 ++ if (gr_check_protected_task_fowner(pid, type))
42265 ++ return -EACCES;
42266 ++
42267 + f_modown(filp, pid, type, force);
42268 + return 0;
42269 + }
42270 +@@ -266,7 +271,7 @@ pid_t f_getown(struct file *filp)
42271 +
42272 + static int f_setown_ex(struct file *filp, unsigned long arg)
42273 + {
42274 +- struct f_owner_ex * __user owner_p = (void * __user)arg;
42275 ++ struct f_owner_ex __user *owner_p = (void __user *)arg;
42276 + struct f_owner_ex owner;
42277 + struct pid *pid;
42278 + int type;
42279 +@@ -306,7 +311,7 @@ static int f_setown_ex(struct file *filp
42280 +
42281 + static int f_getown_ex(struct file *filp, unsigned long arg)
42282 + {
42283 +- struct f_owner_ex * __user owner_p = (void * __user)arg;
42284 ++ struct f_owner_ex __user *owner_p = (void __user *)arg;
42285 + struct f_owner_ex owner;
42286 + int ret = 0;
42287 +
42288 +@@ -348,6 +353,7 @@ static long do_fcntl(int fd, unsigned in
42289 + switch (cmd) {
42290 + case F_DUPFD:
42291 + case F_DUPFD_CLOEXEC:
42292 ++ gr_learn_resource(current, RLIMIT_NOFILE, arg, 0);
42293 + if (arg >= rlimit(RLIMIT_NOFILE))
42294 + break;
42295 + err = alloc_fd(arg, cmd == F_DUPFD_CLOEXEC ? O_CLOEXEC : 0);
42296 +diff -urNp linux-3.1.1/fs/fifo.c linux-3.1.1/fs/fifo.c
42297 +--- linux-3.1.1/fs/fifo.c 2011-11-11 15:19:27.000000000 -0500
42298 ++++ linux-3.1.1/fs/fifo.c 2011-11-16 18:39:08.000000000 -0500
42299 +@@ -58,10 +58,10 @@ static int fifo_open(struct inode *inode
42300 + */
42301 + filp->f_op = &read_pipefifo_fops;
42302 + pipe->r_counter++;
42303 +- if (pipe->readers++ == 0)
42304 ++ if (atomic_inc_return(&pipe->readers) == 1)
42305 + wake_up_partner(inode);
42306 +
42307 +- if (!pipe->writers) {
42308 ++ if (!atomic_read(&pipe->writers)) {
42309 + if ((filp->f_flags & O_NONBLOCK)) {
42310 + /* suppress POLLHUP until we have
42311 + * seen a writer */
42312 +@@ -81,15 +81,15 @@ static int fifo_open(struct inode *inode
42313 + * errno=ENXIO when there is no process reading the FIFO.
42314 + */
42315 + ret = -ENXIO;
42316 +- if ((filp->f_flags & O_NONBLOCK) && !pipe->readers)
42317 ++ if ((filp->f_flags & O_NONBLOCK) && !atomic_read(&pipe->readers))
42318 + goto err;
42319 +
42320 + filp->f_op = &write_pipefifo_fops;
42321 + pipe->w_counter++;
42322 +- if (!pipe->writers++)
42323 ++ if (atomic_inc_return(&pipe->writers) == 1)
42324 + wake_up_partner(inode);
42325 +
42326 +- if (!pipe->readers) {
42327 ++ if (!atomic_read(&pipe->readers)) {
42328 + wait_for_partner(inode, &pipe->r_counter);
42329 + if (signal_pending(current))
42330 + goto err_wr;
42331 +@@ -105,11 +105,11 @@ static int fifo_open(struct inode *inode
42332 + */
42333 + filp->f_op = &rdwr_pipefifo_fops;
42334 +
42335 +- pipe->readers++;
42336 +- pipe->writers++;
42337 ++ atomic_inc(&pipe->readers);
42338 ++ atomic_inc(&pipe->writers);
42339 + pipe->r_counter++;
42340 + pipe->w_counter++;
42341 +- if (pipe->readers == 1 || pipe->writers == 1)
42342 ++ if (atomic_read(&pipe->readers) == 1 || atomic_read(&pipe->writers) == 1)
42343 + wake_up_partner(inode);
42344 + break;
42345 +
42346 +@@ -123,19 +123,19 @@ static int fifo_open(struct inode *inode
42347 + return 0;
42348 +
42349 + err_rd:
42350 +- if (!--pipe->readers)
42351 ++ if (atomic_dec_and_test(&pipe->readers))
42352 + wake_up_interruptible(&pipe->wait);
42353 + ret = -ERESTARTSYS;
42354 + goto err;
42355 +
42356 + err_wr:
42357 +- if (!--pipe->writers)
42358 ++ if (atomic_dec_and_test(&pipe->writers))
42359 + wake_up_interruptible(&pipe->wait);
42360 + ret = -ERESTARTSYS;
42361 + goto err;
42362 +
42363 + err:
42364 +- if (!pipe->readers && !pipe->writers)
42365 ++ if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers))
42366 + free_pipe_info(inode);
42367 +
42368 + err_nocleanup:
42369 +diff -urNp linux-3.1.1/fs/file.c linux-3.1.1/fs/file.c
42370 +--- linux-3.1.1/fs/file.c 2011-11-11 15:19:27.000000000 -0500
42371 ++++ linux-3.1.1/fs/file.c 2011-11-16 18:40:29.000000000 -0500
42372 +@@ -15,6 +15,7 @@
42373 + #include <linux/slab.h>
42374 + #include <linux/vmalloc.h>
42375 + #include <linux/file.h>
42376 ++#include <linux/security.h>
42377 + #include <linux/fdtable.h>
42378 + #include <linux/bitops.h>
42379 + #include <linux/interrupt.h>
42380 +@@ -254,6 +255,7 @@ int expand_files(struct files_struct *fi
42381 + * N.B. For clone tasks sharing a files structure, this test
42382 + * will limit the total number of files that can be opened.
42383 + */
42384 ++ gr_learn_resource(current, RLIMIT_NOFILE, nr, 0);
42385 + if (nr >= rlimit(RLIMIT_NOFILE))
42386 + return -EMFILE;
42387 +
42388 +diff -urNp linux-3.1.1/fs/filesystems.c linux-3.1.1/fs/filesystems.c
42389 +--- linux-3.1.1/fs/filesystems.c 2011-11-11 15:19:27.000000000 -0500
42390 ++++ linux-3.1.1/fs/filesystems.c 2011-11-16 18:40:29.000000000 -0500
42391 +@@ -274,7 +274,12 @@ struct file_system_type *get_fs_type(con
42392 + int len = dot ? dot - name : strlen(name);
42393 +
42394 + fs = __get_fs_type(name, len);
42395 ++
42396 ++#ifdef CONFIG_GRKERNSEC_MODHARDEN
42397 ++ if (!fs && (___request_module(true, "grsec_modharden_fs", "%.*s", len, name) == 0))
42398 ++#else
42399 + if (!fs && (request_module("%.*s", len, name) == 0))
42400 ++#endif
42401 + fs = __get_fs_type(name, len);
42402 +
42403 + if (dot && fs && !(fs->fs_flags & FS_HAS_SUBTYPE)) {
42404 +diff -urNp linux-3.1.1/fs/fscache/cookie.c linux-3.1.1/fs/fscache/cookie.c
42405 +--- linux-3.1.1/fs/fscache/cookie.c 2011-11-11 15:19:27.000000000 -0500
42406 ++++ linux-3.1.1/fs/fscache/cookie.c 2011-11-16 18:39:08.000000000 -0500
42407 +@@ -68,11 +68,11 @@ struct fscache_cookie *__fscache_acquire
42408 + parent ? (char *) parent->def->name : "<no-parent>",
42409 + def->name, netfs_data);
42410 +
42411 +- fscache_stat(&fscache_n_acquires);
42412 ++ fscache_stat_unchecked(&fscache_n_acquires);
42413 +
42414 + /* if there's no parent cookie, then we don't create one here either */
42415 + if (!parent) {
42416 +- fscache_stat(&fscache_n_acquires_null);
42417 ++ fscache_stat_unchecked(&fscache_n_acquires_null);
42418 + _leave(" [no parent]");
42419 + return NULL;
42420 + }
42421 +@@ -87,7 +87,7 @@ struct fscache_cookie *__fscache_acquire
42422 + /* allocate and initialise a cookie */
42423 + cookie = kmem_cache_alloc(fscache_cookie_jar, GFP_KERNEL);
42424 + if (!cookie) {
42425 +- fscache_stat(&fscache_n_acquires_oom);
42426 ++ fscache_stat_unchecked(&fscache_n_acquires_oom);
42427 + _leave(" [ENOMEM]");
42428 + return NULL;
42429 + }
42430 +@@ -109,13 +109,13 @@ struct fscache_cookie *__fscache_acquire
42431 +
42432 + switch (cookie->def->type) {
42433 + case FSCACHE_COOKIE_TYPE_INDEX:
42434 +- fscache_stat(&fscache_n_cookie_index);
42435 ++ fscache_stat_unchecked(&fscache_n_cookie_index);
42436 + break;
42437 + case FSCACHE_COOKIE_TYPE_DATAFILE:
42438 +- fscache_stat(&fscache_n_cookie_data);
42439 ++ fscache_stat_unchecked(&fscache_n_cookie_data);
42440 + break;
42441 + default:
42442 +- fscache_stat(&fscache_n_cookie_special);
42443 ++ fscache_stat_unchecked(&fscache_n_cookie_special);
42444 + break;
42445 + }
42446 +
42447 +@@ -126,13 +126,13 @@ struct fscache_cookie *__fscache_acquire
42448 + if (fscache_acquire_non_index_cookie(cookie) < 0) {
42449 + atomic_dec(&parent->n_children);
42450 + __fscache_cookie_put(cookie);
42451 +- fscache_stat(&fscache_n_acquires_nobufs);
42452 ++ fscache_stat_unchecked(&fscache_n_acquires_nobufs);
42453 + _leave(" = NULL");
42454 + return NULL;
42455 + }
42456 + }
42457 +
42458 +- fscache_stat(&fscache_n_acquires_ok);
42459 ++ fscache_stat_unchecked(&fscache_n_acquires_ok);
42460 + _leave(" = %p", cookie);
42461 + return cookie;
42462 + }
42463 +@@ -168,7 +168,7 @@ static int fscache_acquire_non_index_coo
42464 + cache = fscache_select_cache_for_object(cookie->parent);
42465 + if (!cache) {
42466 + up_read(&fscache_addremove_sem);
42467 +- fscache_stat(&fscache_n_acquires_no_cache);
42468 ++ fscache_stat_unchecked(&fscache_n_acquires_no_cache);
42469 + _leave(" = -ENOMEDIUM [no cache]");
42470 + return -ENOMEDIUM;
42471 + }
42472 +@@ -256,12 +256,12 @@ static int fscache_alloc_object(struct f
42473 + object = cache->ops->alloc_object(cache, cookie);
42474 + fscache_stat_d(&fscache_n_cop_alloc_object);
42475 + if (IS_ERR(object)) {
42476 +- fscache_stat(&fscache_n_object_no_alloc);
42477 ++ fscache_stat_unchecked(&fscache_n_object_no_alloc);
42478 + ret = PTR_ERR(object);
42479 + goto error;
42480 + }
42481 +
42482 +- fscache_stat(&fscache_n_object_alloc);
42483 ++ fscache_stat_unchecked(&fscache_n_object_alloc);
42484 +
42485 + object->debug_id = atomic_inc_return(&fscache_object_debug_id);
42486 +
42487 +@@ -377,10 +377,10 @@ void __fscache_update_cookie(struct fsca
42488 + struct fscache_object *object;
42489 + struct hlist_node *_p;
42490 +
42491 +- fscache_stat(&fscache_n_updates);
42492 ++ fscache_stat_unchecked(&fscache_n_updates);
42493 +
42494 + if (!cookie) {
42495 +- fscache_stat(&fscache_n_updates_null);
42496 ++ fscache_stat_unchecked(&fscache_n_updates_null);
42497 + _leave(" [no cookie]");
42498 + return;
42499 + }
42500 +@@ -414,12 +414,12 @@ void __fscache_relinquish_cookie(struct
42501 + struct fscache_object *object;
42502 + unsigned long event;
42503 +
42504 +- fscache_stat(&fscache_n_relinquishes);
42505 ++ fscache_stat_unchecked(&fscache_n_relinquishes);
42506 + if (retire)
42507 +- fscache_stat(&fscache_n_relinquishes_retire);
42508 ++ fscache_stat_unchecked(&fscache_n_relinquishes_retire);
42509 +
42510 + if (!cookie) {
42511 +- fscache_stat(&fscache_n_relinquishes_null);
42512 ++ fscache_stat_unchecked(&fscache_n_relinquishes_null);
42513 + _leave(" [no cookie]");
42514 + return;
42515 + }
42516 +@@ -435,7 +435,7 @@ void __fscache_relinquish_cookie(struct
42517 +
42518 + /* wait for the cookie to finish being instantiated (or to fail) */
42519 + if (test_bit(FSCACHE_COOKIE_CREATING, &cookie->flags)) {
42520 +- fscache_stat(&fscache_n_relinquishes_waitcrt);
42521 ++ fscache_stat_unchecked(&fscache_n_relinquishes_waitcrt);
42522 + wait_on_bit(&cookie->flags, FSCACHE_COOKIE_CREATING,
42523 + fscache_wait_bit, TASK_UNINTERRUPTIBLE);
42524 + }
42525 +diff -urNp linux-3.1.1/fs/fscache/internal.h linux-3.1.1/fs/fscache/internal.h
42526 +--- linux-3.1.1/fs/fscache/internal.h 2011-11-11 15:19:27.000000000 -0500
42527 ++++ linux-3.1.1/fs/fscache/internal.h 2011-11-16 18:39:08.000000000 -0500
42528 +@@ -144,94 +144,94 @@ extern void fscache_proc_cleanup(void);
42529 + extern atomic_t fscache_n_ops_processed[FSCACHE_MAX_THREADS];
42530 + extern atomic_t fscache_n_objs_processed[FSCACHE_MAX_THREADS];
42531 +
42532 +-extern atomic_t fscache_n_op_pend;
42533 +-extern atomic_t fscache_n_op_run;
42534 +-extern atomic_t fscache_n_op_enqueue;
42535 +-extern atomic_t fscache_n_op_deferred_release;
42536 +-extern atomic_t fscache_n_op_release;
42537 +-extern atomic_t fscache_n_op_gc;
42538 +-extern atomic_t fscache_n_op_cancelled;
42539 +-extern atomic_t fscache_n_op_rejected;
42540 +-
42541 +-extern atomic_t fscache_n_attr_changed;
42542 +-extern atomic_t fscache_n_attr_changed_ok;
42543 +-extern atomic_t fscache_n_attr_changed_nobufs;
42544 +-extern atomic_t fscache_n_attr_changed_nomem;
42545 +-extern atomic_t fscache_n_attr_changed_calls;
42546 +-
42547 +-extern atomic_t fscache_n_allocs;
42548 +-extern atomic_t fscache_n_allocs_ok;
42549 +-extern atomic_t fscache_n_allocs_wait;
42550 +-extern atomic_t fscache_n_allocs_nobufs;
42551 +-extern atomic_t fscache_n_allocs_intr;
42552 +-extern atomic_t fscache_n_allocs_object_dead;
42553 +-extern atomic_t fscache_n_alloc_ops;
42554 +-extern atomic_t fscache_n_alloc_op_waits;
42555 +-
42556 +-extern atomic_t fscache_n_retrievals;
42557 +-extern atomic_t fscache_n_retrievals_ok;
42558 +-extern atomic_t fscache_n_retrievals_wait;
42559 +-extern atomic_t fscache_n_retrievals_nodata;
42560 +-extern atomic_t fscache_n_retrievals_nobufs;
42561 +-extern atomic_t fscache_n_retrievals_intr;
42562 +-extern atomic_t fscache_n_retrievals_nomem;
42563 +-extern atomic_t fscache_n_retrievals_object_dead;
42564 +-extern atomic_t fscache_n_retrieval_ops;
42565 +-extern atomic_t fscache_n_retrieval_op_waits;
42566 +-
42567 +-extern atomic_t fscache_n_stores;
42568 +-extern atomic_t fscache_n_stores_ok;
42569 +-extern atomic_t fscache_n_stores_again;
42570 +-extern atomic_t fscache_n_stores_nobufs;
42571 +-extern atomic_t fscache_n_stores_oom;
42572 +-extern atomic_t fscache_n_store_ops;
42573 +-extern atomic_t fscache_n_store_calls;
42574 +-extern atomic_t fscache_n_store_pages;
42575 +-extern atomic_t fscache_n_store_radix_deletes;
42576 +-extern atomic_t fscache_n_store_pages_over_limit;
42577 +-
42578 +-extern atomic_t fscache_n_store_vmscan_not_storing;
42579 +-extern atomic_t fscache_n_store_vmscan_gone;
42580 +-extern atomic_t fscache_n_store_vmscan_busy;
42581 +-extern atomic_t fscache_n_store_vmscan_cancelled;
42582 +-
42583 +-extern atomic_t fscache_n_marks;
42584 +-extern atomic_t fscache_n_uncaches;
42585 +-
42586 +-extern atomic_t fscache_n_acquires;
42587 +-extern atomic_t fscache_n_acquires_null;
42588 +-extern atomic_t fscache_n_acquires_no_cache;
42589 +-extern atomic_t fscache_n_acquires_ok;
42590 +-extern atomic_t fscache_n_acquires_nobufs;
42591 +-extern atomic_t fscache_n_acquires_oom;
42592 +-
42593 +-extern atomic_t fscache_n_updates;
42594 +-extern atomic_t fscache_n_updates_null;
42595 +-extern atomic_t fscache_n_updates_run;
42596 +-
42597 +-extern atomic_t fscache_n_relinquishes;
42598 +-extern atomic_t fscache_n_relinquishes_null;
42599 +-extern atomic_t fscache_n_relinquishes_waitcrt;
42600 +-extern atomic_t fscache_n_relinquishes_retire;
42601 +-
42602 +-extern atomic_t fscache_n_cookie_index;
42603 +-extern atomic_t fscache_n_cookie_data;
42604 +-extern atomic_t fscache_n_cookie_special;
42605 +-
42606 +-extern atomic_t fscache_n_object_alloc;
42607 +-extern atomic_t fscache_n_object_no_alloc;
42608 +-extern atomic_t fscache_n_object_lookups;
42609 +-extern atomic_t fscache_n_object_lookups_negative;
42610 +-extern atomic_t fscache_n_object_lookups_positive;
42611 +-extern atomic_t fscache_n_object_lookups_timed_out;
42612 +-extern atomic_t fscache_n_object_created;
42613 +-extern atomic_t fscache_n_object_avail;
42614 +-extern atomic_t fscache_n_object_dead;
42615 +-
42616 +-extern atomic_t fscache_n_checkaux_none;
42617 +-extern atomic_t fscache_n_checkaux_okay;
42618 +-extern atomic_t fscache_n_checkaux_update;
42619 +-extern atomic_t fscache_n_checkaux_obsolete;
42620 ++extern atomic_unchecked_t fscache_n_op_pend;
42621 ++extern atomic_unchecked_t fscache_n_op_run;
42622 ++extern atomic_unchecked_t fscache_n_op_enqueue;
42623 ++extern atomic_unchecked_t fscache_n_op_deferred_release;
42624 ++extern atomic_unchecked_t fscache_n_op_release;
42625 ++extern atomic_unchecked_t fscache_n_op_gc;
42626 ++extern atomic_unchecked_t fscache_n_op_cancelled;
42627 ++extern atomic_unchecked_t fscache_n_op_rejected;
42628 ++
42629 ++extern atomic_unchecked_t fscache_n_attr_changed;
42630 ++extern atomic_unchecked_t fscache_n_attr_changed_ok;
42631 ++extern atomic_unchecked_t fscache_n_attr_changed_nobufs;
42632 ++extern atomic_unchecked_t fscache_n_attr_changed_nomem;
42633 ++extern atomic_unchecked_t fscache_n_attr_changed_calls;
42634 ++
42635 ++extern atomic_unchecked_t fscache_n_allocs;
42636 ++extern atomic_unchecked_t fscache_n_allocs_ok;
42637 ++extern atomic_unchecked_t fscache_n_allocs_wait;
42638 ++extern atomic_unchecked_t fscache_n_allocs_nobufs;
42639 ++extern atomic_unchecked_t fscache_n_allocs_intr;
42640 ++extern atomic_unchecked_t fscache_n_allocs_object_dead;
42641 ++extern atomic_unchecked_t fscache_n_alloc_ops;
42642 ++extern atomic_unchecked_t fscache_n_alloc_op_waits;
42643 ++
42644 ++extern atomic_unchecked_t fscache_n_retrievals;
42645 ++extern atomic_unchecked_t fscache_n_retrievals_ok;
42646 ++extern atomic_unchecked_t fscache_n_retrievals_wait;
42647 ++extern atomic_unchecked_t fscache_n_retrievals_nodata;
42648 ++extern atomic_unchecked_t fscache_n_retrievals_nobufs;
42649 ++extern atomic_unchecked_t fscache_n_retrievals_intr;
42650 ++extern atomic_unchecked_t fscache_n_retrievals_nomem;
42651 ++extern atomic_unchecked_t fscache_n_retrievals_object_dead;
42652 ++extern atomic_unchecked_t fscache_n_retrieval_ops;
42653 ++extern atomic_unchecked_t fscache_n_retrieval_op_waits;
42654 ++
42655 ++extern atomic_unchecked_t fscache_n_stores;
42656 ++extern atomic_unchecked_t fscache_n_stores_ok;
42657 ++extern atomic_unchecked_t fscache_n_stores_again;
42658 ++extern atomic_unchecked_t fscache_n_stores_nobufs;
42659 ++extern atomic_unchecked_t fscache_n_stores_oom;
42660 ++extern atomic_unchecked_t fscache_n_store_ops;
42661 ++extern atomic_unchecked_t fscache_n_store_calls;
42662 ++extern atomic_unchecked_t fscache_n_store_pages;
42663 ++extern atomic_unchecked_t fscache_n_store_radix_deletes;
42664 ++extern atomic_unchecked_t fscache_n_store_pages_over_limit;
42665 ++
42666 ++extern atomic_unchecked_t fscache_n_store_vmscan_not_storing;
42667 ++extern atomic_unchecked_t fscache_n_store_vmscan_gone;
42668 ++extern atomic_unchecked_t fscache_n_store_vmscan_busy;
42669 ++extern atomic_unchecked_t fscache_n_store_vmscan_cancelled;
42670 ++
42671 ++extern atomic_unchecked_t fscache_n_marks;
42672 ++extern atomic_unchecked_t fscache_n_uncaches;
42673 ++
42674 ++extern atomic_unchecked_t fscache_n_acquires;
42675 ++extern atomic_unchecked_t fscache_n_acquires_null;
42676 ++extern atomic_unchecked_t fscache_n_acquires_no_cache;
42677 ++extern atomic_unchecked_t fscache_n_acquires_ok;
42678 ++extern atomic_unchecked_t fscache_n_acquires_nobufs;
42679 ++extern atomic_unchecked_t fscache_n_acquires_oom;
42680 ++
42681 ++extern atomic_unchecked_t fscache_n_updates;
42682 ++extern atomic_unchecked_t fscache_n_updates_null;
42683 ++extern atomic_unchecked_t fscache_n_updates_run;
42684 ++
42685 ++extern atomic_unchecked_t fscache_n_relinquishes;
42686 ++extern atomic_unchecked_t fscache_n_relinquishes_null;
42687 ++extern atomic_unchecked_t fscache_n_relinquishes_waitcrt;
42688 ++extern atomic_unchecked_t fscache_n_relinquishes_retire;
42689 ++
42690 ++extern atomic_unchecked_t fscache_n_cookie_index;
42691 ++extern atomic_unchecked_t fscache_n_cookie_data;
42692 ++extern atomic_unchecked_t fscache_n_cookie_special;
42693 ++
42694 ++extern atomic_unchecked_t fscache_n_object_alloc;
42695 ++extern atomic_unchecked_t fscache_n_object_no_alloc;
42696 ++extern atomic_unchecked_t fscache_n_object_lookups;
42697 ++extern atomic_unchecked_t fscache_n_object_lookups_negative;
42698 ++extern atomic_unchecked_t fscache_n_object_lookups_positive;
42699 ++extern atomic_unchecked_t fscache_n_object_lookups_timed_out;
42700 ++extern atomic_unchecked_t fscache_n_object_created;
42701 ++extern atomic_unchecked_t fscache_n_object_avail;
42702 ++extern atomic_unchecked_t fscache_n_object_dead;
42703 ++
42704 ++extern atomic_unchecked_t fscache_n_checkaux_none;
42705 ++extern atomic_unchecked_t fscache_n_checkaux_okay;
42706 ++extern atomic_unchecked_t fscache_n_checkaux_update;
42707 ++extern atomic_unchecked_t fscache_n_checkaux_obsolete;
42708 +
42709 + extern atomic_t fscache_n_cop_alloc_object;
42710 + extern atomic_t fscache_n_cop_lookup_object;
42711 +@@ -255,6 +255,11 @@ static inline void fscache_stat(atomic_t
42712 + atomic_inc(stat);
42713 + }
42714 +
42715 ++static inline void fscache_stat_unchecked(atomic_unchecked_t *stat)
42716 ++{
42717 ++ atomic_inc_unchecked(stat);
42718 ++}
42719 ++
42720 + static inline void fscache_stat_d(atomic_t *stat)
42721 + {
42722 + atomic_dec(stat);
42723 +@@ -267,6 +272,7 @@ extern const struct file_operations fsca
42724 +
42725 + #define __fscache_stat(stat) (NULL)
42726 + #define fscache_stat(stat) do {} while (0)
42727 ++#define fscache_stat_unchecked(stat) do {} while (0)
42728 + #define fscache_stat_d(stat) do {} while (0)
42729 + #endif
42730 +
42731 +diff -urNp linux-3.1.1/fs/fscache/object.c linux-3.1.1/fs/fscache/object.c
42732 +--- linux-3.1.1/fs/fscache/object.c 2011-11-11 15:19:27.000000000 -0500
42733 ++++ linux-3.1.1/fs/fscache/object.c 2011-11-16 18:39:08.000000000 -0500
42734 +@@ -128,7 +128,7 @@ static void fscache_object_state_machine
42735 + /* update the object metadata on disk */
42736 + case FSCACHE_OBJECT_UPDATING:
42737 + clear_bit(FSCACHE_OBJECT_EV_UPDATE, &object->events);
42738 +- fscache_stat(&fscache_n_updates_run);
42739 ++ fscache_stat_unchecked(&fscache_n_updates_run);
42740 + fscache_stat(&fscache_n_cop_update_object);
42741 + object->cache->ops->update_object(object);
42742 + fscache_stat_d(&fscache_n_cop_update_object);
42743 +@@ -217,7 +217,7 @@ static void fscache_object_state_machine
42744 + spin_lock(&object->lock);
42745 + object->state = FSCACHE_OBJECT_DEAD;
42746 + spin_unlock(&object->lock);
42747 +- fscache_stat(&fscache_n_object_dead);
42748 ++ fscache_stat_unchecked(&fscache_n_object_dead);
42749 + goto terminal_transit;
42750 +
42751 + /* handle the parent cache of this object being withdrawn from
42752 +@@ -232,7 +232,7 @@ static void fscache_object_state_machine
42753 + spin_lock(&object->lock);
42754 + object->state = FSCACHE_OBJECT_DEAD;
42755 + spin_unlock(&object->lock);
42756 +- fscache_stat(&fscache_n_object_dead);
42757 ++ fscache_stat_unchecked(&fscache_n_object_dead);
42758 + goto terminal_transit;
42759 +
42760 + /* complain about the object being woken up once it is
42761 +@@ -461,7 +461,7 @@ static void fscache_lookup_object(struct
42762 + parent->cookie->def->name, cookie->def->name,
42763 + object->cache->tag->name);
42764 +
42765 +- fscache_stat(&fscache_n_object_lookups);
42766 ++ fscache_stat_unchecked(&fscache_n_object_lookups);
42767 + fscache_stat(&fscache_n_cop_lookup_object);
42768 + ret = object->cache->ops->lookup_object(object);
42769 + fscache_stat_d(&fscache_n_cop_lookup_object);
42770 +@@ -472,7 +472,7 @@ static void fscache_lookup_object(struct
42771 + if (ret == -ETIMEDOUT) {
42772 + /* probably stuck behind another object, so move this one to
42773 + * the back of the queue */
42774 +- fscache_stat(&fscache_n_object_lookups_timed_out);
42775 ++ fscache_stat_unchecked(&fscache_n_object_lookups_timed_out);
42776 + set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
42777 + }
42778 +
42779 +@@ -495,7 +495,7 @@ void fscache_object_lookup_negative(stru
42780 +
42781 + spin_lock(&object->lock);
42782 + if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
42783 +- fscache_stat(&fscache_n_object_lookups_negative);
42784 ++ fscache_stat_unchecked(&fscache_n_object_lookups_negative);
42785 +
42786 + /* transit here to allow write requests to begin stacking up
42787 + * and read requests to begin returning ENODATA */
42788 +@@ -541,7 +541,7 @@ void fscache_obtained_object(struct fsca
42789 + * result, in which case there may be data available */
42790 + spin_lock(&object->lock);
42791 + if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
42792 +- fscache_stat(&fscache_n_object_lookups_positive);
42793 ++ fscache_stat_unchecked(&fscache_n_object_lookups_positive);
42794 +
42795 + clear_bit(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
42796 +
42797 +@@ -555,7 +555,7 @@ void fscache_obtained_object(struct fsca
42798 + set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
42799 + } else {
42800 + ASSERTCMP(object->state, ==, FSCACHE_OBJECT_CREATING);
42801 +- fscache_stat(&fscache_n_object_created);
42802 ++ fscache_stat_unchecked(&fscache_n_object_created);
42803 +
42804 + object->state = FSCACHE_OBJECT_AVAILABLE;
42805 + spin_unlock(&object->lock);
42806 +@@ -602,7 +602,7 @@ static void fscache_object_available(str
42807 + fscache_enqueue_dependents(object);
42808 +
42809 + fscache_hist(fscache_obj_instantiate_histogram, object->lookup_jif);
42810 +- fscache_stat(&fscache_n_object_avail);
42811 ++ fscache_stat_unchecked(&fscache_n_object_avail);
42812 +
42813 + _leave("");
42814 + }
42815 +@@ -861,7 +861,7 @@ enum fscache_checkaux fscache_check_aux(
42816 + enum fscache_checkaux result;
42817 +
42818 + if (!object->cookie->def->check_aux) {
42819 +- fscache_stat(&fscache_n_checkaux_none);
42820 ++ fscache_stat_unchecked(&fscache_n_checkaux_none);
42821 + return FSCACHE_CHECKAUX_OKAY;
42822 + }
42823 +
42824 +@@ -870,17 +870,17 @@ enum fscache_checkaux fscache_check_aux(
42825 + switch (result) {
42826 + /* entry okay as is */
42827 + case FSCACHE_CHECKAUX_OKAY:
42828 +- fscache_stat(&fscache_n_checkaux_okay);
42829 ++ fscache_stat_unchecked(&fscache_n_checkaux_okay);
42830 + break;
42831 +
42832 + /* entry requires update */
42833 + case FSCACHE_CHECKAUX_NEEDS_UPDATE:
42834 +- fscache_stat(&fscache_n_checkaux_update);
42835 ++ fscache_stat_unchecked(&fscache_n_checkaux_update);
42836 + break;
42837 +
42838 + /* entry requires deletion */
42839 + case FSCACHE_CHECKAUX_OBSOLETE:
42840 +- fscache_stat(&fscache_n_checkaux_obsolete);
42841 ++ fscache_stat_unchecked(&fscache_n_checkaux_obsolete);
42842 + break;
42843 +
42844 + default:
42845 +diff -urNp linux-3.1.1/fs/fscache/operation.c linux-3.1.1/fs/fscache/operation.c
42846 +--- linux-3.1.1/fs/fscache/operation.c 2011-11-11 15:19:27.000000000 -0500
42847 ++++ linux-3.1.1/fs/fscache/operation.c 2011-11-16 18:39:08.000000000 -0500
42848 +@@ -17,7 +17,7 @@
42849 + #include <linux/slab.h>
42850 + #include "internal.h"
42851 +
42852 +-atomic_t fscache_op_debug_id;
42853 ++atomic_unchecked_t fscache_op_debug_id;
42854 + EXPORT_SYMBOL(fscache_op_debug_id);
42855 +
42856 + /**
42857 +@@ -38,7 +38,7 @@ void fscache_enqueue_operation(struct fs
42858 + ASSERTCMP(op->object->state, >=, FSCACHE_OBJECT_AVAILABLE);
42859 + ASSERTCMP(atomic_read(&op->usage), >, 0);
42860 +
42861 +- fscache_stat(&fscache_n_op_enqueue);
42862 ++ fscache_stat_unchecked(&fscache_n_op_enqueue);
42863 + switch (op->flags & FSCACHE_OP_TYPE) {
42864 + case FSCACHE_OP_ASYNC:
42865 + _debug("queue async");
42866 +@@ -69,7 +69,7 @@ static void fscache_run_op(struct fscach
42867 + wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
42868 + if (op->processor)
42869 + fscache_enqueue_operation(op);
42870 +- fscache_stat(&fscache_n_op_run);
42871 ++ fscache_stat_unchecked(&fscache_n_op_run);
42872 + }
42873 +
42874 + /*
42875 +@@ -98,11 +98,11 @@ int fscache_submit_exclusive_op(struct f
42876 + if (object->n_ops > 1) {
42877 + atomic_inc(&op->usage);
42878 + list_add_tail(&op->pend_link, &object->pending_ops);
42879 +- fscache_stat(&fscache_n_op_pend);
42880 ++ fscache_stat_unchecked(&fscache_n_op_pend);
42881 + } else if (!list_empty(&object->pending_ops)) {
42882 + atomic_inc(&op->usage);
42883 + list_add_tail(&op->pend_link, &object->pending_ops);
42884 +- fscache_stat(&fscache_n_op_pend);
42885 ++ fscache_stat_unchecked(&fscache_n_op_pend);
42886 + fscache_start_operations(object);
42887 + } else {
42888 + ASSERTCMP(object->n_in_progress, ==, 0);
42889 +@@ -118,7 +118,7 @@ int fscache_submit_exclusive_op(struct f
42890 + object->n_exclusive++; /* reads and writes must wait */
42891 + atomic_inc(&op->usage);
42892 + list_add_tail(&op->pend_link, &object->pending_ops);
42893 +- fscache_stat(&fscache_n_op_pend);
42894 ++ fscache_stat_unchecked(&fscache_n_op_pend);
42895 + ret = 0;
42896 + } else {
42897 + /* not allowed to submit ops in any other state */
42898 +@@ -203,11 +203,11 @@ int fscache_submit_op(struct fscache_obj
42899 + if (object->n_exclusive > 0) {
42900 + atomic_inc(&op->usage);
42901 + list_add_tail(&op->pend_link, &object->pending_ops);
42902 +- fscache_stat(&fscache_n_op_pend);
42903 ++ fscache_stat_unchecked(&fscache_n_op_pend);
42904 + } else if (!list_empty(&object->pending_ops)) {
42905 + atomic_inc(&op->usage);
42906 + list_add_tail(&op->pend_link, &object->pending_ops);
42907 +- fscache_stat(&fscache_n_op_pend);
42908 ++ fscache_stat_unchecked(&fscache_n_op_pend);
42909 + fscache_start_operations(object);
42910 + } else {
42911 + ASSERTCMP(object->n_exclusive, ==, 0);
42912 +@@ -219,12 +219,12 @@ int fscache_submit_op(struct fscache_obj
42913 + object->n_ops++;
42914 + atomic_inc(&op->usage);
42915 + list_add_tail(&op->pend_link, &object->pending_ops);
42916 +- fscache_stat(&fscache_n_op_pend);
42917 ++ fscache_stat_unchecked(&fscache_n_op_pend);
42918 + ret = 0;
42919 + } else if (object->state == FSCACHE_OBJECT_DYING ||
42920 + object->state == FSCACHE_OBJECT_LC_DYING ||
42921 + object->state == FSCACHE_OBJECT_WITHDRAWING) {
42922 +- fscache_stat(&fscache_n_op_rejected);
42923 ++ fscache_stat_unchecked(&fscache_n_op_rejected);
42924 + ret = -ENOBUFS;
42925 + } else if (!test_bit(FSCACHE_IOERROR, &object->cache->flags)) {
42926 + fscache_report_unexpected_submission(object, op, ostate);
42927 +@@ -294,7 +294,7 @@ int fscache_cancel_op(struct fscache_ope
42928 +
42929 + ret = -EBUSY;
42930 + if (!list_empty(&op->pend_link)) {
42931 +- fscache_stat(&fscache_n_op_cancelled);
42932 ++ fscache_stat_unchecked(&fscache_n_op_cancelled);
42933 + list_del_init(&op->pend_link);
42934 + object->n_ops--;
42935 + if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags))
42936 +@@ -331,7 +331,7 @@ void fscache_put_operation(struct fscach
42937 + if (test_and_set_bit(FSCACHE_OP_DEAD, &op->flags))
42938 + BUG();
42939 +
42940 +- fscache_stat(&fscache_n_op_release);
42941 ++ fscache_stat_unchecked(&fscache_n_op_release);
42942 +
42943 + if (op->release) {
42944 + op->release(op);
42945 +@@ -348,7 +348,7 @@ void fscache_put_operation(struct fscach
42946 + * lock, and defer it otherwise */
42947 + if (!spin_trylock(&object->lock)) {
42948 + _debug("defer put");
42949 +- fscache_stat(&fscache_n_op_deferred_release);
42950 ++ fscache_stat_unchecked(&fscache_n_op_deferred_release);
42951 +
42952 + cache = object->cache;
42953 + spin_lock(&cache->op_gc_list_lock);
42954 +@@ -410,7 +410,7 @@ void fscache_operation_gc(struct work_st
42955 +
42956 + _debug("GC DEFERRED REL OBJ%x OP%x",
42957 + object->debug_id, op->debug_id);
42958 +- fscache_stat(&fscache_n_op_gc);
42959 ++ fscache_stat_unchecked(&fscache_n_op_gc);
42960 +
42961 + ASSERTCMP(atomic_read(&op->usage), ==, 0);
42962 +
42963 +diff -urNp linux-3.1.1/fs/fscache/page.c linux-3.1.1/fs/fscache/page.c
42964 +--- linux-3.1.1/fs/fscache/page.c 2011-11-11 15:19:27.000000000 -0500
42965 ++++ linux-3.1.1/fs/fscache/page.c 2011-11-16 18:39:08.000000000 -0500
42966 +@@ -60,7 +60,7 @@ bool __fscache_maybe_release_page(struct
42967 + val = radix_tree_lookup(&cookie->stores, page->index);
42968 + if (!val) {
42969 + rcu_read_unlock();
42970 +- fscache_stat(&fscache_n_store_vmscan_not_storing);
42971 ++ fscache_stat_unchecked(&fscache_n_store_vmscan_not_storing);
42972 + __fscache_uncache_page(cookie, page);
42973 + return true;
42974 + }
42975 +@@ -90,11 +90,11 @@ bool __fscache_maybe_release_page(struct
42976 + spin_unlock(&cookie->stores_lock);
42977 +
42978 + if (xpage) {
42979 +- fscache_stat(&fscache_n_store_vmscan_cancelled);
42980 +- fscache_stat(&fscache_n_store_radix_deletes);
42981 ++ fscache_stat_unchecked(&fscache_n_store_vmscan_cancelled);
42982 ++ fscache_stat_unchecked(&fscache_n_store_radix_deletes);
42983 + ASSERTCMP(xpage, ==, page);
42984 + } else {
42985 +- fscache_stat(&fscache_n_store_vmscan_gone);
42986 ++ fscache_stat_unchecked(&fscache_n_store_vmscan_gone);
42987 + }
42988 +
42989 + wake_up_bit(&cookie->flags, 0);
42990 +@@ -107,7 +107,7 @@ page_busy:
42991 + /* we might want to wait here, but that could deadlock the allocator as
42992 + * the work threads writing to the cache may all end up sleeping
42993 + * on memory allocation */
42994 +- fscache_stat(&fscache_n_store_vmscan_busy);
42995 ++ fscache_stat_unchecked(&fscache_n_store_vmscan_busy);
42996 + return false;
42997 + }
42998 + EXPORT_SYMBOL(__fscache_maybe_release_page);
42999 +@@ -131,7 +131,7 @@ static void fscache_end_page_write(struc
43000 + FSCACHE_COOKIE_STORING_TAG);
43001 + if (!radix_tree_tag_get(&cookie->stores, page->index,
43002 + FSCACHE_COOKIE_PENDING_TAG)) {
43003 +- fscache_stat(&fscache_n_store_radix_deletes);
43004 ++ fscache_stat_unchecked(&fscache_n_store_radix_deletes);
43005 + xpage = radix_tree_delete(&cookie->stores, page->index);
43006 + }
43007 + spin_unlock(&cookie->stores_lock);
43008 +@@ -152,7 +152,7 @@ static void fscache_attr_changed_op(stru
43009 +
43010 + _enter("{OBJ%x OP%x}", object->debug_id, op->debug_id);
43011 +
43012 +- fscache_stat(&fscache_n_attr_changed_calls);
43013 ++ fscache_stat_unchecked(&fscache_n_attr_changed_calls);
43014 +
43015 + if (fscache_object_is_active(object)) {
43016 + fscache_stat(&fscache_n_cop_attr_changed);
43017 +@@ -177,11 +177,11 @@ int __fscache_attr_changed(struct fscach
43018 +
43019 + ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
43020 +
43021 +- fscache_stat(&fscache_n_attr_changed);
43022 ++ fscache_stat_unchecked(&fscache_n_attr_changed);
43023 +
43024 + op = kzalloc(sizeof(*op), GFP_KERNEL);
43025 + if (!op) {
43026 +- fscache_stat(&fscache_n_attr_changed_nomem);
43027 ++ fscache_stat_unchecked(&fscache_n_attr_changed_nomem);
43028 + _leave(" = -ENOMEM");
43029 + return -ENOMEM;
43030 + }
43031 +@@ -199,7 +199,7 @@ int __fscache_attr_changed(struct fscach
43032 + if (fscache_submit_exclusive_op(object, op) < 0)
43033 + goto nobufs;
43034 + spin_unlock(&cookie->lock);
43035 +- fscache_stat(&fscache_n_attr_changed_ok);
43036 ++ fscache_stat_unchecked(&fscache_n_attr_changed_ok);
43037 + fscache_put_operation(op);
43038 + _leave(" = 0");
43039 + return 0;
43040 +@@ -207,7 +207,7 @@ int __fscache_attr_changed(struct fscach
43041 + nobufs:
43042 + spin_unlock(&cookie->lock);
43043 + kfree(op);
43044 +- fscache_stat(&fscache_n_attr_changed_nobufs);
43045 ++ fscache_stat_unchecked(&fscache_n_attr_changed_nobufs);
43046 + _leave(" = %d", -ENOBUFS);
43047 + return -ENOBUFS;
43048 + }
43049 +@@ -243,7 +243,7 @@ static struct fscache_retrieval *fscache
43050 + /* allocate a retrieval operation and attempt to submit it */
43051 + op = kzalloc(sizeof(*op), GFP_NOIO);
43052 + if (!op) {
43053 +- fscache_stat(&fscache_n_retrievals_nomem);
43054 ++ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
43055 + return NULL;
43056 + }
43057 +
43058 +@@ -271,13 +271,13 @@ static int fscache_wait_for_deferred_loo
43059 + return 0;
43060 + }
43061 +
43062 +- fscache_stat(&fscache_n_retrievals_wait);
43063 ++ fscache_stat_unchecked(&fscache_n_retrievals_wait);
43064 +
43065 + jif = jiffies;
43066 + if (wait_on_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP,
43067 + fscache_wait_bit_interruptible,
43068 + TASK_INTERRUPTIBLE) != 0) {
43069 +- fscache_stat(&fscache_n_retrievals_intr);
43070 ++ fscache_stat_unchecked(&fscache_n_retrievals_intr);
43071 + _leave(" = -ERESTARTSYS");
43072 + return -ERESTARTSYS;
43073 + }
43074 +@@ -295,8 +295,8 @@ static int fscache_wait_for_deferred_loo
43075 + */
43076 + static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
43077 + struct fscache_retrieval *op,
43078 +- atomic_t *stat_op_waits,
43079 +- atomic_t *stat_object_dead)
43080 ++ atomic_unchecked_t *stat_op_waits,
43081 ++ atomic_unchecked_t *stat_object_dead)
43082 + {
43083 + int ret;
43084 +
43085 +@@ -304,7 +304,7 @@ static int fscache_wait_for_retrieval_ac
43086 + goto check_if_dead;
43087 +
43088 + _debug(">>> WT");
43089 +- fscache_stat(stat_op_waits);
43090 ++ fscache_stat_unchecked(stat_op_waits);
43091 + if (wait_on_bit(&op->op.flags, FSCACHE_OP_WAITING,
43092 + fscache_wait_bit_interruptible,
43093 + TASK_INTERRUPTIBLE) < 0) {
43094 +@@ -321,7 +321,7 @@ static int fscache_wait_for_retrieval_ac
43095 +
43096 + check_if_dead:
43097 + if (unlikely(fscache_object_is_dead(object))) {
43098 +- fscache_stat(stat_object_dead);
43099 ++ fscache_stat_unchecked(stat_object_dead);
43100 + return -ENOBUFS;
43101 + }
43102 + return 0;
43103 +@@ -348,7 +348,7 @@ int __fscache_read_or_alloc_page(struct
43104 +
43105 + _enter("%p,%p,,,", cookie, page);
43106 +
43107 +- fscache_stat(&fscache_n_retrievals);
43108 ++ fscache_stat_unchecked(&fscache_n_retrievals);
43109 +
43110 + if (hlist_empty(&cookie->backing_objects))
43111 + goto nobufs;
43112 +@@ -381,7 +381,7 @@ int __fscache_read_or_alloc_page(struct
43113 + goto nobufs_unlock;
43114 + spin_unlock(&cookie->lock);
43115 +
43116 +- fscache_stat(&fscache_n_retrieval_ops);
43117 ++ fscache_stat_unchecked(&fscache_n_retrieval_ops);
43118 +
43119 + /* pin the netfs read context in case we need to do the actual netfs
43120 + * read because we've encountered a cache read failure */
43121 +@@ -411,15 +411,15 @@ int __fscache_read_or_alloc_page(struct
43122 +
43123 + error:
43124 + if (ret == -ENOMEM)
43125 +- fscache_stat(&fscache_n_retrievals_nomem);
43126 ++ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
43127 + else if (ret == -ERESTARTSYS)
43128 +- fscache_stat(&fscache_n_retrievals_intr);
43129 ++ fscache_stat_unchecked(&fscache_n_retrievals_intr);
43130 + else if (ret == -ENODATA)
43131 +- fscache_stat(&fscache_n_retrievals_nodata);
43132 ++ fscache_stat_unchecked(&fscache_n_retrievals_nodata);
43133 + else if (ret < 0)
43134 +- fscache_stat(&fscache_n_retrievals_nobufs);
43135 ++ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
43136 + else
43137 +- fscache_stat(&fscache_n_retrievals_ok);
43138 ++ fscache_stat_unchecked(&fscache_n_retrievals_ok);
43139 +
43140 + fscache_put_retrieval(op);
43141 + _leave(" = %d", ret);
43142 +@@ -429,7 +429,7 @@ nobufs_unlock:
43143 + spin_unlock(&cookie->lock);
43144 + kfree(op);
43145 + nobufs:
43146 +- fscache_stat(&fscache_n_retrievals_nobufs);
43147 ++ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
43148 + _leave(" = -ENOBUFS");
43149 + return -ENOBUFS;
43150 + }
43151 +@@ -467,7 +467,7 @@ int __fscache_read_or_alloc_pages(struct
43152 +
43153 + _enter("%p,,%d,,,", cookie, *nr_pages);
43154 +
43155 +- fscache_stat(&fscache_n_retrievals);
43156 ++ fscache_stat_unchecked(&fscache_n_retrievals);
43157 +
43158 + if (hlist_empty(&cookie->backing_objects))
43159 + goto nobufs;
43160 +@@ -497,7 +497,7 @@ int __fscache_read_or_alloc_pages(struct
43161 + goto nobufs_unlock;
43162 + spin_unlock(&cookie->lock);
43163 +
43164 +- fscache_stat(&fscache_n_retrieval_ops);
43165 ++ fscache_stat_unchecked(&fscache_n_retrieval_ops);
43166 +
43167 + /* pin the netfs read context in case we need to do the actual netfs
43168 + * read because we've encountered a cache read failure */
43169 +@@ -527,15 +527,15 @@ int __fscache_read_or_alloc_pages(struct
43170 +
43171 + error:
43172 + if (ret == -ENOMEM)
43173 +- fscache_stat(&fscache_n_retrievals_nomem);
43174 ++ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
43175 + else if (ret == -ERESTARTSYS)
43176 +- fscache_stat(&fscache_n_retrievals_intr);
43177 ++ fscache_stat_unchecked(&fscache_n_retrievals_intr);
43178 + else if (ret == -ENODATA)
43179 +- fscache_stat(&fscache_n_retrievals_nodata);
43180 ++ fscache_stat_unchecked(&fscache_n_retrievals_nodata);
43181 + else if (ret < 0)
43182 +- fscache_stat(&fscache_n_retrievals_nobufs);
43183 ++ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
43184 + else
43185 +- fscache_stat(&fscache_n_retrievals_ok);
43186 ++ fscache_stat_unchecked(&fscache_n_retrievals_ok);
43187 +
43188 + fscache_put_retrieval(op);
43189 + _leave(" = %d", ret);
43190 +@@ -545,7 +545,7 @@ nobufs_unlock:
43191 + spin_unlock(&cookie->lock);
43192 + kfree(op);
43193 + nobufs:
43194 +- fscache_stat(&fscache_n_retrievals_nobufs);
43195 ++ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
43196 + _leave(" = -ENOBUFS");
43197 + return -ENOBUFS;
43198 + }
43199 +@@ -569,7 +569,7 @@ int __fscache_alloc_page(struct fscache_
43200 +
43201 + _enter("%p,%p,,,", cookie, page);
43202 +
43203 +- fscache_stat(&fscache_n_allocs);
43204 ++ fscache_stat_unchecked(&fscache_n_allocs);
43205 +
43206 + if (hlist_empty(&cookie->backing_objects))
43207 + goto nobufs;
43208 +@@ -595,7 +595,7 @@ int __fscache_alloc_page(struct fscache_
43209 + goto nobufs_unlock;
43210 + spin_unlock(&cookie->lock);
43211 +
43212 +- fscache_stat(&fscache_n_alloc_ops);
43213 ++ fscache_stat_unchecked(&fscache_n_alloc_ops);
43214 +
43215 + ret = fscache_wait_for_retrieval_activation(
43216 + object, op,
43217 +@@ -611,11 +611,11 @@ int __fscache_alloc_page(struct fscache_
43218 +
43219 + error:
43220 + if (ret == -ERESTARTSYS)
43221 +- fscache_stat(&fscache_n_allocs_intr);
43222 ++ fscache_stat_unchecked(&fscache_n_allocs_intr);
43223 + else if (ret < 0)
43224 +- fscache_stat(&fscache_n_allocs_nobufs);
43225 ++ fscache_stat_unchecked(&fscache_n_allocs_nobufs);
43226 + else
43227 +- fscache_stat(&fscache_n_allocs_ok);
43228 ++ fscache_stat_unchecked(&fscache_n_allocs_ok);
43229 +
43230 + fscache_put_retrieval(op);
43231 + _leave(" = %d", ret);
43232 +@@ -625,7 +625,7 @@ nobufs_unlock:
43233 + spin_unlock(&cookie->lock);
43234 + kfree(op);
43235 + nobufs:
43236 +- fscache_stat(&fscache_n_allocs_nobufs);
43237 ++ fscache_stat_unchecked(&fscache_n_allocs_nobufs);
43238 + _leave(" = -ENOBUFS");
43239 + return -ENOBUFS;
43240 + }
43241 +@@ -666,7 +666,7 @@ static void fscache_write_op(struct fsca
43242 +
43243 + spin_lock(&cookie->stores_lock);
43244 +
43245 +- fscache_stat(&fscache_n_store_calls);
43246 ++ fscache_stat_unchecked(&fscache_n_store_calls);
43247 +
43248 + /* find a page to store */
43249 + page = NULL;
43250 +@@ -677,7 +677,7 @@ static void fscache_write_op(struct fsca
43251 + page = results[0];
43252 + _debug("gang %d [%lx]", n, page->index);
43253 + if (page->index > op->store_limit) {
43254 +- fscache_stat(&fscache_n_store_pages_over_limit);
43255 ++ fscache_stat_unchecked(&fscache_n_store_pages_over_limit);
43256 + goto superseded;
43257 + }
43258 +
43259 +@@ -689,7 +689,7 @@ static void fscache_write_op(struct fsca
43260 + spin_unlock(&cookie->stores_lock);
43261 + spin_unlock(&object->lock);
43262 +
43263 +- fscache_stat(&fscache_n_store_pages);
43264 ++ fscache_stat_unchecked(&fscache_n_store_pages);
43265 + fscache_stat(&fscache_n_cop_write_page);
43266 + ret = object->cache->ops->write_page(op, page);
43267 + fscache_stat_d(&fscache_n_cop_write_page);
43268 +@@ -757,7 +757,7 @@ int __fscache_write_page(struct fscache_
43269 + ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
43270 + ASSERT(PageFsCache(page));
43271 +
43272 +- fscache_stat(&fscache_n_stores);
43273 ++ fscache_stat_unchecked(&fscache_n_stores);
43274 +
43275 + op = kzalloc(sizeof(*op), GFP_NOIO);
43276 + if (!op)
43277 +@@ -808,7 +808,7 @@ int __fscache_write_page(struct fscache_
43278 + spin_unlock(&cookie->stores_lock);
43279 + spin_unlock(&object->lock);
43280 +
43281 +- op->op.debug_id = atomic_inc_return(&fscache_op_debug_id);
43282 ++ op->op.debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
43283 + op->store_limit = object->store_limit;
43284 +
43285 + if (fscache_submit_op(object, &op->op) < 0)
43286 +@@ -816,8 +816,8 @@ int __fscache_write_page(struct fscache_
43287 +
43288 + spin_unlock(&cookie->lock);
43289 + radix_tree_preload_end();
43290 +- fscache_stat(&fscache_n_store_ops);
43291 +- fscache_stat(&fscache_n_stores_ok);
43292 ++ fscache_stat_unchecked(&fscache_n_store_ops);
43293 ++ fscache_stat_unchecked(&fscache_n_stores_ok);
43294 +
43295 + /* the work queue now carries its own ref on the object */
43296 + fscache_put_operation(&op->op);
43297 +@@ -825,14 +825,14 @@ int __fscache_write_page(struct fscache_
43298 + return 0;
43299 +
43300 + already_queued:
43301 +- fscache_stat(&fscache_n_stores_again);
43302 ++ fscache_stat_unchecked(&fscache_n_stores_again);
43303 + already_pending:
43304 + spin_unlock(&cookie->stores_lock);
43305 + spin_unlock(&object->lock);
43306 + spin_unlock(&cookie->lock);
43307 + radix_tree_preload_end();
43308 + kfree(op);
43309 +- fscache_stat(&fscache_n_stores_ok);
43310 ++ fscache_stat_unchecked(&fscache_n_stores_ok);
43311 + _leave(" = 0");
43312 + return 0;
43313 +
43314 +@@ -851,14 +851,14 @@ nobufs:
43315 + spin_unlock(&cookie->lock);
43316 + radix_tree_preload_end();
43317 + kfree(op);
43318 +- fscache_stat(&fscache_n_stores_nobufs);
43319 ++ fscache_stat_unchecked(&fscache_n_stores_nobufs);
43320 + _leave(" = -ENOBUFS");
43321 + return -ENOBUFS;
43322 +
43323 + nomem_free:
43324 + kfree(op);
43325 + nomem:
43326 +- fscache_stat(&fscache_n_stores_oom);
43327 ++ fscache_stat_unchecked(&fscache_n_stores_oom);
43328 + _leave(" = -ENOMEM");
43329 + return -ENOMEM;
43330 + }
43331 +@@ -876,7 +876,7 @@ void __fscache_uncache_page(struct fscac
43332 + ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
43333 + ASSERTCMP(page, !=, NULL);
43334 +
43335 +- fscache_stat(&fscache_n_uncaches);
43336 ++ fscache_stat_unchecked(&fscache_n_uncaches);
43337 +
43338 + /* cache withdrawal may beat us to it */
43339 + if (!PageFsCache(page))
43340 +@@ -929,7 +929,7 @@ void fscache_mark_pages_cached(struct fs
43341 + unsigned long loop;
43342 +
43343 + #ifdef CONFIG_FSCACHE_STATS
43344 +- atomic_add(pagevec->nr, &fscache_n_marks);
43345 ++ atomic_add_unchecked(pagevec->nr, &fscache_n_marks);
43346 + #endif
43347 +
43348 + for (loop = 0; loop < pagevec->nr; loop++) {
43349 +diff -urNp linux-3.1.1/fs/fscache/stats.c linux-3.1.1/fs/fscache/stats.c
43350 +--- linux-3.1.1/fs/fscache/stats.c 2011-11-11 15:19:27.000000000 -0500
43351 ++++ linux-3.1.1/fs/fscache/stats.c 2011-11-16 18:39:08.000000000 -0500
43352 +@@ -18,95 +18,95 @@
43353 + /*
43354 + * operation counters
43355 + */
43356 +-atomic_t fscache_n_op_pend;
43357 +-atomic_t fscache_n_op_run;
43358 +-atomic_t fscache_n_op_enqueue;
43359 +-atomic_t fscache_n_op_requeue;
43360 +-atomic_t fscache_n_op_deferred_release;
43361 +-atomic_t fscache_n_op_release;
43362 +-atomic_t fscache_n_op_gc;
43363 +-atomic_t fscache_n_op_cancelled;
43364 +-atomic_t fscache_n_op_rejected;
43365 +-
43366 +-atomic_t fscache_n_attr_changed;
43367 +-atomic_t fscache_n_attr_changed_ok;
43368 +-atomic_t fscache_n_attr_changed_nobufs;
43369 +-atomic_t fscache_n_attr_changed_nomem;
43370 +-atomic_t fscache_n_attr_changed_calls;
43371 +-
43372 +-atomic_t fscache_n_allocs;
43373 +-atomic_t fscache_n_allocs_ok;
43374 +-atomic_t fscache_n_allocs_wait;
43375 +-atomic_t fscache_n_allocs_nobufs;
43376 +-atomic_t fscache_n_allocs_intr;
43377 +-atomic_t fscache_n_allocs_object_dead;
43378 +-atomic_t fscache_n_alloc_ops;
43379 +-atomic_t fscache_n_alloc_op_waits;
43380 +-
43381 +-atomic_t fscache_n_retrievals;
43382 +-atomic_t fscache_n_retrievals_ok;
43383 +-atomic_t fscache_n_retrievals_wait;
43384 +-atomic_t fscache_n_retrievals_nodata;
43385 +-atomic_t fscache_n_retrievals_nobufs;
43386 +-atomic_t fscache_n_retrievals_intr;
43387 +-atomic_t fscache_n_retrievals_nomem;
43388 +-atomic_t fscache_n_retrievals_object_dead;
43389 +-atomic_t fscache_n_retrieval_ops;
43390 +-atomic_t fscache_n_retrieval_op_waits;
43391 +-
43392 +-atomic_t fscache_n_stores;
43393 +-atomic_t fscache_n_stores_ok;
43394 +-atomic_t fscache_n_stores_again;
43395 +-atomic_t fscache_n_stores_nobufs;
43396 +-atomic_t fscache_n_stores_oom;
43397 +-atomic_t fscache_n_store_ops;
43398 +-atomic_t fscache_n_store_calls;
43399 +-atomic_t fscache_n_store_pages;
43400 +-atomic_t fscache_n_store_radix_deletes;
43401 +-atomic_t fscache_n_store_pages_over_limit;
43402 +-
43403 +-atomic_t fscache_n_store_vmscan_not_storing;
43404 +-atomic_t fscache_n_store_vmscan_gone;
43405 +-atomic_t fscache_n_store_vmscan_busy;
43406 +-atomic_t fscache_n_store_vmscan_cancelled;
43407 +-
43408 +-atomic_t fscache_n_marks;
43409 +-atomic_t fscache_n_uncaches;
43410 +-
43411 +-atomic_t fscache_n_acquires;
43412 +-atomic_t fscache_n_acquires_null;
43413 +-atomic_t fscache_n_acquires_no_cache;
43414 +-atomic_t fscache_n_acquires_ok;
43415 +-atomic_t fscache_n_acquires_nobufs;
43416 +-atomic_t fscache_n_acquires_oom;
43417 +-
43418 +-atomic_t fscache_n_updates;
43419 +-atomic_t fscache_n_updates_null;
43420 +-atomic_t fscache_n_updates_run;
43421 +-
43422 +-atomic_t fscache_n_relinquishes;
43423 +-atomic_t fscache_n_relinquishes_null;
43424 +-atomic_t fscache_n_relinquishes_waitcrt;
43425 +-atomic_t fscache_n_relinquishes_retire;
43426 +-
43427 +-atomic_t fscache_n_cookie_index;
43428 +-atomic_t fscache_n_cookie_data;
43429 +-atomic_t fscache_n_cookie_special;
43430 +-
43431 +-atomic_t fscache_n_object_alloc;
43432 +-atomic_t fscache_n_object_no_alloc;
43433 +-atomic_t fscache_n_object_lookups;
43434 +-atomic_t fscache_n_object_lookups_negative;
43435 +-atomic_t fscache_n_object_lookups_positive;
43436 +-atomic_t fscache_n_object_lookups_timed_out;
43437 +-atomic_t fscache_n_object_created;
43438 +-atomic_t fscache_n_object_avail;
43439 +-atomic_t fscache_n_object_dead;
43440 +-
43441 +-atomic_t fscache_n_checkaux_none;
43442 +-atomic_t fscache_n_checkaux_okay;
43443 +-atomic_t fscache_n_checkaux_update;
43444 +-atomic_t fscache_n_checkaux_obsolete;
43445 ++atomic_unchecked_t fscache_n_op_pend;
43446 ++atomic_unchecked_t fscache_n_op_run;
43447 ++atomic_unchecked_t fscache_n_op_enqueue;
43448 ++atomic_unchecked_t fscache_n_op_requeue;
43449 ++atomic_unchecked_t fscache_n_op_deferred_release;
43450 ++atomic_unchecked_t fscache_n_op_release;
43451 ++atomic_unchecked_t fscache_n_op_gc;
43452 ++atomic_unchecked_t fscache_n_op_cancelled;
43453 ++atomic_unchecked_t fscache_n_op_rejected;
43454 ++
43455 ++atomic_unchecked_t fscache_n_attr_changed;
43456 ++atomic_unchecked_t fscache_n_attr_changed_ok;
43457 ++atomic_unchecked_t fscache_n_attr_changed_nobufs;
43458 ++atomic_unchecked_t fscache_n_attr_changed_nomem;
43459 ++atomic_unchecked_t fscache_n_attr_changed_calls;
43460 ++
43461 ++atomic_unchecked_t fscache_n_allocs;
43462 ++atomic_unchecked_t fscache_n_allocs_ok;
43463 ++atomic_unchecked_t fscache_n_allocs_wait;
43464 ++atomic_unchecked_t fscache_n_allocs_nobufs;
43465 ++atomic_unchecked_t fscache_n_allocs_intr;
43466 ++atomic_unchecked_t fscache_n_allocs_object_dead;
43467 ++atomic_unchecked_t fscache_n_alloc_ops;
43468 ++atomic_unchecked_t fscache_n_alloc_op_waits;
43469 ++
43470 ++atomic_unchecked_t fscache_n_retrievals;
43471 ++atomic_unchecked_t fscache_n_retrievals_ok;
43472 ++atomic_unchecked_t fscache_n_retrievals_wait;
43473 ++atomic_unchecked_t fscache_n_retrievals_nodata;
43474 ++atomic_unchecked_t fscache_n_retrievals_nobufs;
43475 ++atomic_unchecked_t fscache_n_retrievals_intr;
43476 ++atomic_unchecked_t fscache_n_retrievals_nomem;
43477 ++atomic_unchecked_t fscache_n_retrievals_object_dead;
43478 ++atomic_unchecked_t fscache_n_retrieval_ops;
43479 ++atomic_unchecked_t fscache_n_retrieval_op_waits;
43480 ++
43481 ++atomic_unchecked_t fscache_n_stores;
43482 ++atomic_unchecked_t fscache_n_stores_ok;
43483 ++atomic_unchecked_t fscache_n_stores_again;
43484 ++atomic_unchecked_t fscache_n_stores_nobufs;
43485 ++atomic_unchecked_t fscache_n_stores_oom;
43486 ++atomic_unchecked_t fscache_n_store_ops;
43487 ++atomic_unchecked_t fscache_n_store_calls;
43488 ++atomic_unchecked_t fscache_n_store_pages;
43489 ++atomic_unchecked_t fscache_n_store_radix_deletes;
43490 ++atomic_unchecked_t fscache_n_store_pages_over_limit;
43491 ++
43492 ++atomic_unchecked_t fscache_n_store_vmscan_not_storing;
43493 ++atomic_unchecked_t fscache_n_store_vmscan_gone;
43494 ++atomic_unchecked_t fscache_n_store_vmscan_busy;
43495 ++atomic_unchecked_t fscache_n_store_vmscan_cancelled;
43496 ++
43497 ++atomic_unchecked_t fscache_n_marks;
43498 ++atomic_unchecked_t fscache_n_uncaches;
43499 ++
43500 ++atomic_unchecked_t fscache_n_acquires;
43501 ++atomic_unchecked_t fscache_n_acquires_null;
43502 ++atomic_unchecked_t fscache_n_acquires_no_cache;
43503 ++atomic_unchecked_t fscache_n_acquires_ok;
43504 ++atomic_unchecked_t fscache_n_acquires_nobufs;
43505 ++atomic_unchecked_t fscache_n_acquires_oom;
43506 ++
43507 ++atomic_unchecked_t fscache_n_updates;
43508 ++atomic_unchecked_t fscache_n_updates_null;
43509 ++atomic_unchecked_t fscache_n_updates_run;
43510 ++
43511 ++atomic_unchecked_t fscache_n_relinquishes;
43512 ++atomic_unchecked_t fscache_n_relinquishes_null;
43513 ++atomic_unchecked_t fscache_n_relinquishes_waitcrt;
43514 ++atomic_unchecked_t fscache_n_relinquishes_retire;
43515 ++
43516 ++atomic_unchecked_t fscache_n_cookie_index;
43517 ++atomic_unchecked_t fscache_n_cookie_data;
43518 ++atomic_unchecked_t fscache_n_cookie_special;
43519 ++
43520 ++atomic_unchecked_t fscache_n_object_alloc;
43521 ++atomic_unchecked_t fscache_n_object_no_alloc;
43522 ++atomic_unchecked_t fscache_n_object_lookups;
43523 ++atomic_unchecked_t fscache_n_object_lookups_negative;
43524 ++atomic_unchecked_t fscache_n_object_lookups_positive;
43525 ++atomic_unchecked_t fscache_n_object_lookups_timed_out;
43526 ++atomic_unchecked_t fscache_n_object_created;
43527 ++atomic_unchecked_t fscache_n_object_avail;
43528 ++atomic_unchecked_t fscache_n_object_dead;
43529 ++
43530 ++atomic_unchecked_t fscache_n_checkaux_none;
43531 ++atomic_unchecked_t fscache_n_checkaux_okay;
43532 ++atomic_unchecked_t fscache_n_checkaux_update;
43533 ++atomic_unchecked_t fscache_n_checkaux_obsolete;
43534 +
43535 + atomic_t fscache_n_cop_alloc_object;
43536 + atomic_t fscache_n_cop_lookup_object;
43537 +@@ -133,113 +133,113 @@ static int fscache_stats_show(struct seq
43538 + seq_puts(m, "FS-Cache statistics\n");
43539 +
43540 + seq_printf(m, "Cookies: idx=%u dat=%u spc=%u\n",
43541 +- atomic_read(&fscache_n_cookie_index),
43542 +- atomic_read(&fscache_n_cookie_data),
43543 +- atomic_read(&fscache_n_cookie_special));
43544 ++ atomic_read_unchecked(&fscache_n_cookie_index),
43545 ++ atomic_read_unchecked(&fscache_n_cookie_data),
43546 ++ atomic_read_unchecked(&fscache_n_cookie_special));
43547 +
43548 + seq_printf(m, "Objects: alc=%u nal=%u avl=%u ded=%u\n",
43549 +- atomic_read(&fscache_n_object_alloc),
43550 +- atomic_read(&fscache_n_object_no_alloc),
43551 +- atomic_read(&fscache_n_object_avail),
43552 +- atomic_read(&fscache_n_object_dead));
43553 ++ atomic_read_unchecked(&fscache_n_object_alloc),
43554 ++ atomic_read_unchecked(&fscache_n_object_no_alloc),
43555 ++ atomic_read_unchecked(&fscache_n_object_avail),
43556 ++ atomic_read_unchecked(&fscache_n_object_dead));
43557 + seq_printf(m, "ChkAux : non=%u ok=%u upd=%u obs=%u\n",
43558 +- atomic_read(&fscache_n_checkaux_none),
43559 +- atomic_read(&fscache_n_checkaux_okay),
43560 +- atomic_read(&fscache_n_checkaux_update),
43561 +- atomic_read(&fscache_n_checkaux_obsolete));
43562 ++ atomic_read_unchecked(&fscache_n_checkaux_none),
43563 ++ atomic_read_unchecked(&fscache_n_checkaux_okay),
43564 ++ atomic_read_unchecked(&fscache_n_checkaux_update),
43565 ++ atomic_read_unchecked(&fscache_n_checkaux_obsolete));
43566 +
43567 + seq_printf(m, "Pages : mrk=%u unc=%u\n",
43568 +- atomic_read(&fscache_n_marks),
43569 +- atomic_read(&fscache_n_uncaches));
43570 ++ atomic_read_unchecked(&fscache_n_marks),
43571 ++ atomic_read_unchecked(&fscache_n_uncaches));
43572 +
43573 + seq_printf(m, "Acquire: n=%u nul=%u noc=%u ok=%u nbf=%u"
43574 + " oom=%u\n",
43575 +- atomic_read(&fscache_n_acquires),
43576 +- atomic_read(&fscache_n_acquires_null),
43577 +- atomic_read(&fscache_n_acquires_no_cache),
43578 +- atomic_read(&fscache_n_acquires_ok),
43579 +- atomic_read(&fscache_n_acquires_nobufs),
43580 +- atomic_read(&fscache_n_acquires_oom));
43581 ++ atomic_read_unchecked(&fscache_n_acquires),
43582 ++ atomic_read_unchecked(&fscache_n_acquires_null),
43583 ++ atomic_read_unchecked(&fscache_n_acquires_no_cache),
43584 ++ atomic_read_unchecked(&fscache_n_acquires_ok),
43585 ++ atomic_read_unchecked(&fscache_n_acquires_nobufs),
43586 ++ atomic_read_unchecked(&fscache_n_acquires_oom));
43587 +
43588 + seq_printf(m, "Lookups: n=%u neg=%u pos=%u crt=%u tmo=%u\n",
43589 +- atomic_read(&fscache_n_object_lookups),
43590 +- atomic_read(&fscache_n_object_lookups_negative),
43591 +- atomic_read(&fscache_n_object_lookups_positive),
43592 +- atomic_read(&fscache_n_object_created),
43593 +- atomic_read(&fscache_n_object_lookups_timed_out));
43594 ++ atomic_read_unchecked(&fscache_n_object_lookups),
43595 ++ atomic_read_unchecked(&fscache_n_object_lookups_negative),
43596 ++ atomic_read_unchecked(&fscache_n_object_lookups_positive),
43597 ++ atomic_read_unchecked(&fscache_n_object_created),
43598 ++ atomic_read_unchecked(&fscache_n_object_lookups_timed_out));
43599 +
43600 + seq_printf(m, "Updates: n=%u nul=%u run=%u\n",
43601 +- atomic_read(&fscache_n_updates),
43602 +- atomic_read(&fscache_n_updates_null),
43603 +- atomic_read(&fscache_n_updates_run));
43604 ++ atomic_read_unchecked(&fscache_n_updates),
43605 ++ atomic_read_unchecked(&fscache_n_updates_null),
43606 ++ atomic_read_unchecked(&fscache_n_updates_run));
43607 +
43608 + seq_printf(m, "Relinqs: n=%u nul=%u wcr=%u rtr=%u\n",
43609 +- atomic_read(&fscache_n_relinquishes),
43610 +- atomic_read(&fscache_n_relinquishes_null),
43611 +- atomic_read(&fscache_n_relinquishes_waitcrt),
43612 +- atomic_read(&fscache_n_relinquishes_retire));
43613 ++ atomic_read_unchecked(&fscache_n_relinquishes),
43614 ++ atomic_read_unchecked(&fscache_n_relinquishes_null),
43615 ++ atomic_read_unchecked(&fscache_n_relinquishes_waitcrt),
43616 ++ atomic_read_unchecked(&fscache_n_relinquishes_retire));
43617 +
43618 + seq_printf(m, "AttrChg: n=%u ok=%u nbf=%u oom=%u run=%u\n",
43619 +- atomic_read(&fscache_n_attr_changed),
43620 +- atomic_read(&fscache_n_attr_changed_ok),
43621 +- atomic_read(&fscache_n_attr_changed_nobufs),
43622 +- atomic_read(&fscache_n_attr_changed_nomem),
43623 +- atomic_read(&fscache_n_attr_changed_calls));
43624 ++ atomic_read_unchecked(&fscache_n_attr_changed),
43625 ++ atomic_read_unchecked(&fscache_n_attr_changed_ok),
43626 ++ atomic_read_unchecked(&fscache_n_attr_changed_nobufs),
43627 ++ atomic_read_unchecked(&fscache_n_attr_changed_nomem),
43628 ++ atomic_read_unchecked(&fscache_n_attr_changed_calls));
43629 +
43630 + seq_printf(m, "Allocs : n=%u ok=%u wt=%u nbf=%u int=%u\n",
43631 +- atomic_read(&fscache_n_allocs),
43632 +- atomic_read(&fscache_n_allocs_ok),
43633 +- atomic_read(&fscache_n_allocs_wait),
43634 +- atomic_read(&fscache_n_allocs_nobufs),
43635 +- atomic_read(&fscache_n_allocs_intr));
43636 ++ atomic_read_unchecked(&fscache_n_allocs),
43637 ++ atomic_read_unchecked(&fscache_n_allocs_ok),
43638 ++ atomic_read_unchecked(&fscache_n_allocs_wait),
43639 ++ atomic_read_unchecked(&fscache_n_allocs_nobufs),
43640 ++ atomic_read_unchecked(&fscache_n_allocs_intr));
43641 + seq_printf(m, "Allocs : ops=%u owt=%u abt=%u\n",
43642 +- atomic_read(&fscache_n_alloc_ops),
43643 +- atomic_read(&fscache_n_alloc_op_waits),
43644 +- atomic_read(&fscache_n_allocs_object_dead));
43645 ++ atomic_read_unchecked(&fscache_n_alloc_ops),
43646 ++ atomic_read_unchecked(&fscache_n_alloc_op_waits),
43647 ++ atomic_read_unchecked(&fscache_n_allocs_object_dead));
43648 +
43649 + seq_printf(m, "Retrvls: n=%u ok=%u wt=%u nod=%u nbf=%u"
43650 + " int=%u oom=%u\n",
43651 +- atomic_read(&fscache_n_retrievals),
43652 +- atomic_read(&fscache_n_retrievals_ok),
43653 +- atomic_read(&fscache_n_retrievals_wait),
43654 +- atomic_read(&fscache_n_retrievals_nodata),
43655 +- atomic_read(&fscache_n_retrievals_nobufs),
43656 +- atomic_read(&fscache_n_retrievals_intr),
43657 +- atomic_read(&fscache_n_retrievals_nomem));
43658 ++ atomic_read_unchecked(&fscache_n_retrievals),
43659 ++ atomic_read_unchecked(&fscache_n_retrievals_ok),
43660 ++ atomic_read_unchecked(&fscache_n_retrievals_wait),
43661 ++ atomic_read_unchecked(&fscache_n_retrievals_nodata),
43662 ++ atomic_read_unchecked(&fscache_n_retrievals_nobufs),
43663 ++ atomic_read_unchecked(&fscache_n_retrievals_intr),
43664 ++ atomic_read_unchecked(&fscache_n_retrievals_nomem));
43665 + seq_printf(m, "Retrvls: ops=%u owt=%u abt=%u\n",
43666 +- atomic_read(&fscache_n_retrieval_ops),
43667 +- atomic_read(&fscache_n_retrieval_op_waits),
43668 +- atomic_read(&fscache_n_retrievals_object_dead));
43669 ++ atomic_read_unchecked(&fscache_n_retrieval_ops),
43670 ++ atomic_read_unchecked(&fscache_n_retrieval_op_waits),
43671 ++ atomic_read_unchecked(&fscache_n_retrievals_object_dead));
43672 +
43673 + seq_printf(m, "Stores : n=%u ok=%u agn=%u nbf=%u oom=%u\n",
43674 +- atomic_read(&fscache_n_stores),
43675 +- atomic_read(&fscache_n_stores_ok),
43676 +- atomic_read(&fscache_n_stores_again),
43677 +- atomic_read(&fscache_n_stores_nobufs),
43678 +- atomic_read(&fscache_n_stores_oom));
43679 ++ atomic_read_unchecked(&fscache_n_stores),
43680 ++ atomic_read_unchecked(&fscache_n_stores_ok),
43681 ++ atomic_read_unchecked(&fscache_n_stores_again),
43682 ++ atomic_read_unchecked(&fscache_n_stores_nobufs),
43683 ++ atomic_read_unchecked(&fscache_n_stores_oom));
43684 + seq_printf(m, "Stores : ops=%u run=%u pgs=%u rxd=%u olm=%u\n",
43685 +- atomic_read(&fscache_n_store_ops),
43686 +- atomic_read(&fscache_n_store_calls),
43687 +- atomic_read(&fscache_n_store_pages),
43688 +- atomic_read(&fscache_n_store_radix_deletes),
43689 +- atomic_read(&fscache_n_store_pages_over_limit));
43690 ++ atomic_read_unchecked(&fscache_n_store_ops),
43691 ++ atomic_read_unchecked(&fscache_n_store_calls),
43692 ++ atomic_read_unchecked(&fscache_n_store_pages),
43693 ++ atomic_read_unchecked(&fscache_n_store_radix_deletes),
43694 ++ atomic_read_unchecked(&fscache_n_store_pages_over_limit));
43695 +
43696 + seq_printf(m, "VmScan : nos=%u gon=%u bsy=%u can=%u\n",
43697 +- atomic_read(&fscache_n_store_vmscan_not_storing),
43698 +- atomic_read(&fscache_n_store_vmscan_gone),
43699 +- atomic_read(&fscache_n_store_vmscan_busy),
43700 +- atomic_read(&fscache_n_store_vmscan_cancelled));
43701 ++ atomic_read_unchecked(&fscache_n_store_vmscan_not_storing),
43702 ++ atomic_read_unchecked(&fscache_n_store_vmscan_gone),
43703 ++ atomic_read_unchecked(&fscache_n_store_vmscan_busy),
43704 ++ atomic_read_unchecked(&fscache_n_store_vmscan_cancelled));
43705 +
43706 + seq_printf(m, "Ops : pend=%u run=%u enq=%u can=%u rej=%u\n",
43707 +- atomic_read(&fscache_n_op_pend),
43708 +- atomic_read(&fscache_n_op_run),
43709 +- atomic_read(&fscache_n_op_enqueue),
43710 +- atomic_read(&fscache_n_op_cancelled),
43711 +- atomic_read(&fscache_n_op_rejected));
43712 ++ atomic_read_unchecked(&fscache_n_op_pend),
43713 ++ atomic_read_unchecked(&fscache_n_op_run),
43714 ++ atomic_read_unchecked(&fscache_n_op_enqueue),
43715 ++ atomic_read_unchecked(&fscache_n_op_cancelled),
43716 ++ atomic_read_unchecked(&fscache_n_op_rejected));
43717 + seq_printf(m, "Ops : dfr=%u rel=%u gc=%u\n",
43718 +- atomic_read(&fscache_n_op_deferred_release),
43719 +- atomic_read(&fscache_n_op_release),
43720 +- atomic_read(&fscache_n_op_gc));
43721 ++ atomic_read_unchecked(&fscache_n_op_deferred_release),
43722 ++ atomic_read_unchecked(&fscache_n_op_release),
43723 ++ atomic_read_unchecked(&fscache_n_op_gc));
43724 +
43725 + seq_printf(m, "CacheOp: alo=%d luo=%d luc=%d gro=%d\n",
43726 + atomic_read(&fscache_n_cop_alloc_object),
43727 +diff -urNp linux-3.1.1/fs/fs_struct.c linux-3.1.1/fs/fs_struct.c
43728 +--- linux-3.1.1/fs/fs_struct.c 2011-11-11 15:19:27.000000000 -0500
43729 ++++ linux-3.1.1/fs/fs_struct.c 2011-11-16 18:40:29.000000000 -0500
43730 +@@ -4,6 +4,7 @@
43731 + #include <linux/path.h>
43732 + #include <linux/slab.h>
43733 + #include <linux/fs_struct.h>
43734 ++#include <linux/grsecurity.h>
43735 + #include "internal.h"
43736 +
43737 + static inline void path_get_longterm(struct path *path)
43738 +@@ -31,6 +32,7 @@ void set_fs_root(struct fs_struct *fs, s
43739 + old_root = fs->root;
43740 + fs->root = *path;
43741 + path_get_longterm(path);
43742 ++ gr_set_chroot_entries(current, path);
43743 + write_seqcount_end(&fs->seq);
43744 + spin_unlock(&fs->lock);
43745 + if (old_root.dentry)
43746 +@@ -74,6 +76,7 @@ void chroot_fs_refs(struct path *old_roo
43747 + && fs->root.mnt == old_root->mnt) {
43748 + path_get_longterm(new_root);
43749 + fs->root = *new_root;
43750 ++ gr_set_chroot_entries(p, new_root);
43751 + count++;
43752 + }
43753 + if (fs->pwd.dentry == old_root->dentry
43754 +@@ -109,7 +112,8 @@ void exit_fs(struct task_struct *tsk)
43755 + spin_lock(&fs->lock);
43756 + write_seqcount_begin(&fs->seq);
43757 + tsk->fs = NULL;
43758 +- kill = !--fs->users;
43759 ++ gr_clear_chroot_entries(tsk);
43760 ++ kill = !atomic_dec_return(&fs->users);
43761 + write_seqcount_end(&fs->seq);
43762 + spin_unlock(&fs->lock);
43763 + task_unlock(tsk);
43764 +@@ -123,7 +127,7 @@ struct fs_struct *copy_fs_struct(struct
43765 + struct fs_struct *fs = kmem_cache_alloc(fs_cachep, GFP_KERNEL);
43766 + /* We don't need to lock fs - think why ;-) */
43767 + if (fs) {
43768 +- fs->users = 1;
43769 ++ atomic_set(&fs->users, 1);
43770 + fs->in_exec = 0;
43771 + spin_lock_init(&fs->lock);
43772 + seqcount_init(&fs->seq);
43773 +@@ -132,6 +136,9 @@ struct fs_struct *copy_fs_struct(struct
43774 + spin_lock(&old->lock);
43775 + fs->root = old->root;
43776 + path_get_longterm(&fs->root);
43777 ++ /* instead of calling gr_set_chroot_entries here,
43778 ++ we call it from every caller of this function
43779 ++ */
43780 + fs->pwd = old->pwd;
43781 + path_get_longterm(&fs->pwd);
43782 + spin_unlock(&old->lock);
43783 +@@ -150,8 +157,9 @@ int unshare_fs_struct(void)
43784 +
43785 + task_lock(current);
43786 + spin_lock(&fs->lock);
43787 +- kill = !--fs->users;
43788 ++ kill = !atomic_dec_return(&fs->users);
43789 + current->fs = new_fs;
43790 ++ gr_set_chroot_entries(current, &new_fs->root);
43791 + spin_unlock(&fs->lock);
43792 + task_unlock(current);
43793 +
43794 +@@ -170,7 +178,7 @@ EXPORT_SYMBOL(current_umask);
43795 +
43796 + /* to be mentioned only in INIT_TASK */
43797 + struct fs_struct init_fs = {
43798 +- .users = 1,
43799 ++ .users = ATOMIC_INIT(1),
43800 + .lock = __SPIN_LOCK_UNLOCKED(init_fs.lock),
43801 + .seq = SEQCNT_ZERO,
43802 + .umask = 0022,
43803 +@@ -186,12 +194,13 @@ void daemonize_fs_struct(void)
43804 + task_lock(current);
43805 +
43806 + spin_lock(&init_fs.lock);
43807 +- init_fs.users++;
43808 ++ atomic_inc(&init_fs.users);
43809 + spin_unlock(&init_fs.lock);
43810 +
43811 + spin_lock(&fs->lock);
43812 + current->fs = &init_fs;
43813 +- kill = !--fs->users;
43814 ++ gr_set_chroot_entries(current, &current->fs->root);
43815 ++ kill = !atomic_dec_return(&fs->users);
43816 + spin_unlock(&fs->lock);
43817 +
43818 + task_unlock(current);
43819 +diff -urNp linux-3.1.1/fs/fuse/cuse.c linux-3.1.1/fs/fuse/cuse.c
43820 +--- linux-3.1.1/fs/fuse/cuse.c 2011-11-11 15:19:27.000000000 -0500
43821 ++++ linux-3.1.1/fs/fuse/cuse.c 2011-11-16 18:39:08.000000000 -0500
43822 +@@ -586,10 +586,12 @@ static int __init cuse_init(void)
43823 + INIT_LIST_HEAD(&cuse_conntbl[i]);
43824 +
43825 + /* inherit and extend fuse_dev_operations */
43826 +- cuse_channel_fops = fuse_dev_operations;
43827 +- cuse_channel_fops.owner = THIS_MODULE;
43828 +- cuse_channel_fops.open = cuse_channel_open;
43829 +- cuse_channel_fops.release = cuse_channel_release;
43830 ++ pax_open_kernel();
43831 ++ memcpy((void *)&cuse_channel_fops, &fuse_dev_operations, sizeof(fuse_dev_operations));
43832 ++ *(void **)&cuse_channel_fops.owner = THIS_MODULE;
43833 ++ *(void **)&cuse_channel_fops.open = cuse_channel_open;
43834 ++ *(void **)&cuse_channel_fops.release = cuse_channel_release;
43835 ++ pax_close_kernel();
43836 +
43837 + cuse_class = class_create(THIS_MODULE, "cuse");
43838 + if (IS_ERR(cuse_class))
43839 +diff -urNp linux-3.1.1/fs/fuse/dev.c linux-3.1.1/fs/fuse/dev.c
43840 +--- linux-3.1.1/fs/fuse/dev.c 2011-11-11 15:19:27.000000000 -0500
43841 ++++ linux-3.1.1/fs/fuse/dev.c 2011-11-16 18:39:08.000000000 -0500
43842 +@@ -1242,7 +1242,7 @@ static ssize_t fuse_dev_splice_read(stru
43843 + ret = 0;
43844 + pipe_lock(pipe);
43845 +
43846 +- if (!pipe->readers) {
43847 ++ if (!atomic_read(&pipe->readers)) {
43848 + send_sig(SIGPIPE, current, 0);
43849 + if (!ret)
43850 + ret = -EPIPE;
43851 +diff -urNp linux-3.1.1/fs/fuse/dir.c linux-3.1.1/fs/fuse/dir.c
43852 +--- linux-3.1.1/fs/fuse/dir.c 2011-11-11 15:19:27.000000000 -0500
43853 ++++ linux-3.1.1/fs/fuse/dir.c 2011-11-16 18:39:08.000000000 -0500
43854 +@@ -1147,7 +1147,7 @@ static char *read_link(struct dentry *de
43855 + return link;
43856 + }
43857 +
43858 +-static void free_link(char *link)
43859 ++static void free_link(const char *link)
43860 + {
43861 + if (!IS_ERR(link))
43862 + free_page((unsigned long) link);
43863 +diff -urNp linux-3.1.1/fs/gfs2/inode.c linux-3.1.1/fs/gfs2/inode.c
43864 +--- linux-3.1.1/fs/gfs2/inode.c 2011-11-11 15:19:27.000000000 -0500
43865 ++++ linux-3.1.1/fs/gfs2/inode.c 2011-11-16 18:39:08.000000000 -0500
43866 +@@ -1517,7 +1517,7 @@ out:
43867 +
43868 + static void gfs2_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
43869 + {
43870 +- char *s = nd_get_link(nd);
43871 ++ const char *s = nd_get_link(nd);
43872 + if (!IS_ERR(s))
43873 + kfree(s);
43874 + }
43875 +diff -urNp linux-3.1.1/fs/hfs/btree.c linux-3.1.1/fs/hfs/btree.c
43876 +--- linux-3.1.1/fs/hfs/btree.c 2011-11-11 15:19:27.000000000 -0500
43877 ++++ linux-3.1.1/fs/hfs/btree.c 2011-11-18 18:48:11.000000000 -0500
43878 +@@ -46,11 +46,27 @@ struct hfs_btree *hfs_btree_open(struct
43879 + case HFS_EXT_CNID:
43880 + hfs_inode_read_fork(tree->inode, mdb->drXTExtRec, mdb->drXTFlSize,
43881 + mdb->drXTFlSize, be32_to_cpu(mdb->drXTClpSiz));
43882 ++
43883 ++ if (HFS_I(tree->inode)->alloc_blocks >
43884 ++ HFS_I(tree->inode)->first_blocks) {
43885 ++ printk(KERN_ERR "hfs: invalid btree extent records\n");
43886 ++ unlock_new_inode(tree->inode);
43887 ++ goto free_inode;
43888 ++ }
43889 ++
43890 + tree->inode->i_mapping->a_ops = &hfs_btree_aops;
43891 + break;
43892 + case HFS_CAT_CNID:
43893 + hfs_inode_read_fork(tree->inode, mdb->drCTExtRec, mdb->drCTFlSize,
43894 + mdb->drCTFlSize, be32_to_cpu(mdb->drCTClpSiz));
43895 ++
43896 ++ if (!HFS_I(tree->inode)->first_blocks) {
43897 ++ printk(KERN_ERR "hfs: invalid btree extent records "
43898 ++ "(0 size).\n");
43899 ++ unlock_new_inode(tree->inode);
43900 ++ goto free_inode;
43901 ++ }
43902 ++
43903 + tree->inode->i_mapping->a_ops = &hfs_btree_aops;
43904 + break;
43905 + default:
43906 +@@ -59,11 +75,6 @@ struct hfs_btree *hfs_btree_open(struct
43907 + }
43908 + unlock_new_inode(tree->inode);
43909 +
43910 +- if (!HFS_I(tree->inode)->first_blocks) {
43911 +- printk(KERN_ERR "hfs: invalid btree extent records (0 size).\n");
43912 +- goto free_inode;
43913 +- }
43914 +-
43915 + mapping = tree->inode->i_mapping;
43916 + page = read_mapping_page(mapping, 0, NULL);
43917 + if (IS_ERR(page))
43918 +diff -urNp linux-3.1.1/fs/hfs/trans.c linux-3.1.1/fs/hfs/trans.c
43919 +--- linux-3.1.1/fs/hfs/trans.c 2011-11-11 15:19:27.000000000 -0500
43920 ++++ linux-3.1.1/fs/hfs/trans.c 2011-11-18 18:37:38.000000000 -0500
43921 +@@ -40,6 +40,8 @@ int hfs_mac2asc(struct super_block *sb,
43922 +
43923 + src = in->name;
43924 + srclen = in->len;
43925 ++ if (srclen > HFS_NAMELEN)
43926 ++ srclen = HFS_NAMELEN;
43927 + dst = out;
43928 + dstlen = HFS_MAX_NAMELEN;
43929 + if (nls_io) {
43930 +diff -urNp linux-3.1.1/fs/hfsplus/catalog.c linux-3.1.1/fs/hfsplus/catalog.c
43931 +--- linux-3.1.1/fs/hfsplus/catalog.c 2011-11-11 15:19:27.000000000 -0500
43932 ++++ linux-3.1.1/fs/hfsplus/catalog.c 2011-11-16 19:23:09.000000000 -0500
43933 +@@ -179,6 +179,8 @@ int hfsplus_find_cat(struct super_block
43934 + int err;
43935 + u16 type;
43936 +
43937 ++ pax_track_stack();
43938 ++
43939 + hfsplus_cat_build_key(sb, fd->search_key, cnid, NULL);
43940 + err = hfs_brec_read(fd, &tmp, sizeof(hfsplus_cat_entry));
43941 + if (err)
43942 +@@ -210,6 +212,8 @@ int hfsplus_create_cat(u32 cnid, struct
43943 + int entry_size;
43944 + int err;
43945 +
43946 ++ pax_track_stack();
43947 ++
43948 + dprint(DBG_CAT_MOD, "create_cat: %s,%u(%d)\n",
43949 + str->name, cnid, inode->i_nlink);
43950 + err = hfs_find_init(HFSPLUS_SB(sb)->cat_tree, &fd);
43951 +@@ -353,6 +357,8 @@ int hfsplus_rename_cat(u32 cnid,
43952 + int entry_size, type;
43953 + int err;
43954 +
43955 ++ pax_track_stack();
43956 ++
43957 + dprint(DBG_CAT_MOD, "rename_cat: %u - %lu,%s - %lu,%s\n",
43958 + cnid, src_dir->i_ino, src_name->name,
43959 + dst_dir->i_ino, dst_name->name);
43960 +diff -urNp linux-3.1.1/fs/hfsplus/dir.c linux-3.1.1/fs/hfsplus/dir.c
43961 +--- linux-3.1.1/fs/hfsplus/dir.c 2011-11-11 15:19:27.000000000 -0500
43962 ++++ linux-3.1.1/fs/hfsplus/dir.c 2011-11-16 18:40:29.000000000 -0500
43963 +@@ -131,6 +131,8 @@ static int hfsplus_readdir(struct file *
43964 + struct hfsplus_readdir_data *rd;
43965 + u16 type;
43966 +
43967 ++ pax_track_stack();
43968 ++
43969 + if (filp->f_pos >= inode->i_size)
43970 + return 0;
43971 +
43972 +diff -urNp linux-3.1.1/fs/hfsplus/inode.c linux-3.1.1/fs/hfsplus/inode.c
43973 +--- linux-3.1.1/fs/hfsplus/inode.c 2011-11-11 15:19:27.000000000 -0500
43974 ++++ linux-3.1.1/fs/hfsplus/inode.c 2011-11-16 18:40:29.000000000 -0500
43975 +@@ -501,6 +501,8 @@ int hfsplus_cat_read_inode(struct inode
43976 + int res = 0;
43977 + u16 type;
43978 +
43979 ++ pax_track_stack();
43980 ++
43981 + type = hfs_bnode_read_u16(fd->bnode, fd->entryoffset);
43982 +
43983 + HFSPLUS_I(inode)->linkid = 0;
43984 +@@ -564,6 +566,8 @@ int hfsplus_cat_write_inode(struct inode
43985 + struct hfs_find_data fd;
43986 + hfsplus_cat_entry entry;
43987 +
43988 ++ pax_track_stack();
43989 ++
43990 + if (HFSPLUS_IS_RSRC(inode))
43991 + main_inode = HFSPLUS_I(inode)->rsrc_inode;
43992 +
43993 +diff -urNp linux-3.1.1/fs/hfsplus/ioctl.c linux-3.1.1/fs/hfsplus/ioctl.c
43994 +--- linux-3.1.1/fs/hfsplus/ioctl.c 2011-11-11 15:19:27.000000000 -0500
43995 ++++ linux-3.1.1/fs/hfsplus/ioctl.c 2011-11-16 18:40:29.000000000 -0500
43996 +@@ -122,6 +122,8 @@ int hfsplus_setxattr(struct dentry *dent
43997 + struct hfsplus_cat_file *file;
43998 + int res;
43999 +
44000 ++ pax_track_stack();
44001 ++
44002 + if (!S_ISREG(inode->i_mode) || HFSPLUS_IS_RSRC(inode))
44003 + return -EOPNOTSUPP;
44004 +
44005 +@@ -166,6 +168,8 @@ ssize_t hfsplus_getxattr(struct dentry *
44006 + struct hfsplus_cat_file *file;
44007 + ssize_t res = 0;
44008 +
44009 ++ pax_track_stack();
44010 ++
44011 + if (!S_ISREG(inode->i_mode) || HFSPLUS_IS_RSRC(inode))
44012 + return -EOPNOTSUPP;
44013 +
44014 +diff -urNp linux-3.1.1/fs/hfsplus/super.c linux-3.1.1/fs/hfsplus/super.c
44015 +--- linux-3.1.1/fs/hfsplus/super.c 2011-11-11 15:19:27.000000000 -0500
44016 ++++ linux-3.1.1/fs/hfsplus/super.c 2011-11-16 19:23:30.000000000 -0500
44017 +@@ -347,6 +347,8 @@ static int hfsplus_fill_super(struct sup
44018 + u64 last_fs_block, last_fs_page;
44019 + int err;
44020 +
44021 ++ pax_track_stack();
44022 ++
44023 + err = -EINVAL;
44024 + sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
44025 + if (!sbi)
44026 +diff -urNp linux-3.1.1/fs/hugetlbfs/inode.c linux-3.1.1/fs/hugetlbfs/inode.c
44027 +--- linux-3.1.1/fs/hugetlbfs/inode.c 2011-11-11 15:19:27.000000000 -0500
44028 ++++ linux-3.1.1/fs/hugetlbfs/inode.c 2011-11-16 18:40:29.000000000 -0500
44029 +@@ -915,7 +915,7 @@ static struct file_system_type hugetlbfs
44030 + .kill_sb = kill_litter_super,
44031 + };
44032 +
44033 +-static struct vfsmount *hugetlbfs_vfsmount;
44034 ++struct vfsmount *hugetlbfs_vfsmount;
44035 +
44036 + static int can_do_hugetlb_shm(void)
44037 + {
44038 +diff -urNp linux-3.1.1/fs/inode.c linux-3.1.1/fs/inode.c
44039 +--- linux-3.1.1/fs/inode.c 2011-11-11 15:19:27.000000000 -0500
44040 ++++ linux-3.1.1/fs/inode.c 2011-11-16 18:39:08.000000000 -0500
44041 +@@ -787,8 +787,8 @@ unsigned int get_next_ino(void)
44042 +
44043 + #ifdef CONFIG_SMP
44044 + if (unlikely((res & (LAST_INO_BATCH-1)) == 0)) {
44045 +- static atomic_t shared_last_ino;
44046 +- int next = atomic_add_return(LAST_INO_BATCH, &shared_last_ino);
44047 ++ static atomic_unchecked_t shared_last_ino;
44048 ++ int next = atomic_add_return_unchecked(LAST_INO_BATCH, &shared_last_ino);
44049 +
44050 + res = next - LAST_INO_BATCH;
44051 + }
44052 +diff -urNp linux-3.1.1/fs/jbd/checkpoint.c linux-3.1.1/fs/jbd/checkpoint.c
44053 +--- linux-3.1.1/fs/jbd/checkpoint.c 2011-11-11 15:19:27.000000000 -0500
44054 ++++ linux-3.1.1/fs/jbd/checkpoint.c 2011-11-16 18:40:29.000000000 -0500
44055 +@@ -358,6 +358,8 @@ int log_do_checkpoint(journal_t *journal
44056 + tid_t this_tid;
44057 + int result;
44058 +
44059 ++ pax_track_stack();
44060 ++
44061 + jbd_debug(1, "Start checkpoint\n");
44062 +
44063 + /*
44064 +diff -urNp linux-3.1.1/fs/jffs2/compr_rtime.c linux-3.1.1/fs/jffs2/compr_rtime.c
44065 +--- linux-3.1.1/fs/jffs2/compr_rtime.c 2011-11-11 15:19:27.000000000 -0500
44066 ++++ linux-3.1.1/fs/jffs2/compr_rtime.c 2011-11-16 18:40:29.000000000 -0500
44067 +@@ -37,6 +37,8 @@ static int jffs2_rtime_compress(unsigned
44068 + int outpos = 0;
44069 + int pos=0;
44070 +
44071 ++ pax_track_stack();
44072 ++
44073 + memset(positions,0,sizeof(positions));
44074 +
44075 + while (pos < (*sourcelen) && outpos <= (*dstlen)-2) {
44076 +@@ -78,6 +80,8 @@ static int jffs2_rtime_decompress(unsign
44077 + int outpos = 0;
44078 + int pos=0;
44079 +
44080 ++ pax_track_stack();
44081 ++
44082 + memset(positions,0,sizeof(positions));
44083 +
44084 + while (outpos<destlen) {
44085 +diff -urNp linux-3.1.1/fs/jffs2/compr_rubin.c linux-3.1.1/fs/jffs2/compr_rubin.c
44086 +--- linux-3.1.1/fs/jffs2/compr_rubin.c 2011-11-11 15:19:27.000000000 -0500
44087 ++++ linux-3.1.1/fs/jffs2/compr_rubin.c 2011-11-16 18:40:29.000000000 -0500
44088 +@@ -314,6 +314,8 @@ static int jffs2_dynrubin_compress(unsig
44089 + int ret;
44090 + uint32_t mysrclen, mydstlen;
44091 +
44092 ++ pax_track_stack();
44093 ++
44094 + mysrclen = *sourcelen;
44095 + mydstlen = *dstlen - 8;
44096 +
44097 +diff -urNp linux-3.1.1/fs/jffs2/erase.c linux-3.1.1/fs/jffs2/erase.c
44098 +--- linux-3.1.1/fs/jffs2/erase.c 2011-11-11 15:19:27.000000000 -0500
44099 ++++ linux-3.1.1/fs/jffs2/erase.c 2011-11-16 18:39:08.000000000 -0500
44100 +@@ -439,7 +439,8 @@ static void jffs2_mark_erased_block(stru
44101 + struct jffs2_unknown_node marker = {
44102 + .magic = cpu_to_je16(JFFS2_MAGIC_BITMASK),
44103 + .nodetype = cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
44104 +- .totlen = cpu_to_je32(c->cleanmarker_size)
44105 ++ .totlen = cpu_to_je32(c->cleanmarker_size),
44106 ++ .hdr_crc = cpu_to_je32(0)
44107 + };
44108 +
44109 + jffs2_prealloc_raw_node_refs(c, jeb, 1);
44110 +diff -urNp linux-3.1.1/fs/jffs2/wbuf.c linux-3.1.1/fs/jffs2/wbuf.c
44111 +--- linux-3.1.1/fs/jffs2/wbuf.c 2011-11-11 15:19:27.000000000 -0500
44112 ++++ linux-3.1.1/fs/jffs2/wbuf.c 2011-11-16 18:39:08.000000000 -0500
44113 +@@ -1012,7 +1012,8 @@ static const struct jffs2_unknown_node o
44114 + {
44115 + .magic = constant_cpu_to_je16(JFFS2_MAGIC_BITMASK),
44116 + .nodetype = constant_cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
44117 +- .totlen = constant_cpu_to_je32(8)
44118 ++ .totlen = constant_cpu_to_je32(8),
44119 ++ .hdr_crc = constant_cpu_to_je32(0)
44120 + };
44121 +
44122 + /*
44123 +diff -urNp linux-3.1.1/fs/jffs2/xattr.c linux-3.1.1/fs/jffs2/xattr.c
44124 +--- linux-3.1.1/fs/jffs2/xattr.c 2011-11-11 15:19:27.000000000 -0500
44125 ++++ linux-3.1.1/fs/jffs2/xattr.c 2011-11-16 18:40:29.000000000 -0500
44126 +@@ -773,6 +773,8 @@ void jffs2_build_xattr_subsystem(struct
44127 +
44128 + BUG_ON(!(c->flags & JFFS2_SB_FLAG_BUILDING));
44129 +
44130 ++ pax_track_stack();
44131 ++
44132 + /* Phase.1 : Merge same xref */
44133 + for (i=0; i < XREF_TMPHASH_SIZE; i++)
44134 + xref_tmphash[i] = NULL;
44135 +diff -urNp linux-3.1.1/fs/jfs/super.c linux-3.1.1/fs/jfs/super.c
44136 +--- linux-3.1.1/fs/jfs/super.c 2011-11-11 15:19:27.000000000 -0500
44137 ++++ linux-3.1.1/fs/jfs/super.c 2011-11-16 18:39:08.000000000 -0500
44138 +@@ -803,7 +803,7 @@ static int __init init_jfs_fs(void)
44139 +
44140 + jfs_inode_cachep =
44141 + kmem_cache_create("jfs_ip", sizeof(struct jfs_inode_info), 0,
44142 +- SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD,
44143 ++ SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD|SLAB_USERCOPY,
44144 + init_once);
44145 + if (jfs_inode_cachep == NULL)
44146 + return -ENOMEM;
44147 +diff -urNp linux-3.1.1/fs/Kconfig.binfmt linux-3.1.1/fs/Kconfig.binfmt
44148 +--- linux-3.1.1/fs/Kconfig.binfmt 2011-11-11 15:19:27.000000000 -0500
44149 ++++ linux-3.1.1/fs/Kconfig.binfmt 2011-11-16 18:39:08.000000000 -0500
44150 +@@ -86,7 +86,7 @@ config HAVE_AOUT
44151 +
44152 + config BINFMT_AOUT
44153 + tristate "Kernel support for a.out and ECOFF binaries"
44154 +- depends on HAVE_AOUT
44155 ++ depends on HAVE_AOUT && BROKEN
44156 + ---help---
44157 + A.out (Assembler.OUTput) is a set of formats for libraries and
44158 + executables used in the earliest versions of UNIX. Linux used
44159 +diff -urNp linux-3.1.1/fs/libfs.c linux-3.1.1/fs/libfs.c
44160 +--- linux-3.1.1/fs/libfs.c 2011-11-11 15:19:27.000000000 -0500
44161 ++++ linux-3.1.1/fs/libfs.c 2011-11-16 18:39:08.000000000 -0500
44162 +@@ -165,6 +165,9 @@ int dcache_readdir(struct file * filp, v
44163 +
44164 + for (p=q->next; p != &dentry->d_subdirs; p=p->next) {
44165 + struct dentry *next;
44166 ++ char d_name[sizeof(next->d_iname)];
44167 ++ const unsigned char *name;
44168 ++
44169 + next = list_entry(p, struct dentry, d_u.d_child);
44170 + spin_lock_nested(&next->d_lock, DENTRY_D_LOCK_NESTED);
44171 + if (!simple_positive(next)) {
44172 +@@ -174,7 +177,12 @@ int dcache_readdir(struct file * filp, v
44173 +
44174 + spin_unlock(&next->d_lock);
44175 + spin_unlock(&dentry->d_lock);
44176 +- if (filldir(dirent, next->d_name.name,
44177 ++ name = next->d_name.name;
44178 ++ if (name == next->d_iname) {
44179 ++ memcpy(d_name, name, next->d_name.len);
44180 ++ name = d_name;
44181 ++ }
44182 ++ if (filldir(dirent, name,
44183 + next->d_name.len, filp->f_pos,
44184 + next->d_inode->i_ino,
44185 + dt_type(next->d_inode)) < 0)
44186 +diff -urNp linux-3.1.1/fs/lockd/clntproc.c linux-3.1.1/fs/lockd/clntproc.c
44187 +--- linux-3.1.1/fs/lockd/clntproc.c 2011-11-11 15:19:27.000000000 -0500
44188 ++++ linux-3.1.1/fs/lockd/clntproc.c 2011-11-16 18:40:29.000000000 -0500
44189 +@@ -36,11 +36,11 @@ static const struct rpc_call_ops nlmclnt
44190 + /*
44191 + * Cookie counter for NLM requests
44192 + */
44193 +-static atomic_t nlm_cookie = ATOMIC_INIT(0x1234);
44194 ++static atomic_unchecked_t nlm_cookie = ATOMIC_INIT(0x1234);
44195 +
44196 + void nlmclnt_next_cookie(struct nlm_cookie *c)
44197 + {
44198 +- u32 cookie = atomic_inc_return(&nlm_cookie);
44199 ++ u32 cookie = atomic_inc_return_unchecked(&nlm_cookie);
44200 +
44201 + memcpy(c->data, &cookie, 4);
44202 + c->len=4;
44203 +@@ -621,6 +621,8 @@ nlmclnt_reclaim(struct nlm_host *host, s
44204 + struct nlm_rqst reqst, *req;
44205 + int status;
44206 +
44207 ++ pax_track_stack();
44208 ++
44209 + req = &reqst;
44210 + memset(req, 0, sizeof(*req));
44211 + locks_init_lock(&req->a_args.lock.fl);
44212 +diff -urNp linux-3.1.1/fs/locks.c linux-3.1.1/fs/locks.c
44213 +--- linux-3.1.1/fs/locks.c 2011-11-11 15:19:27.000000000 -0500
44214 ++++ linux-3.1.1/fs/locks.c 2011-11-16 18:39:08.000000000 -0500
44215 +@@ -2022,16 +2022,16 @@ void locks_remove_flock(struct file *fil
44216 + return;
44217 +
44218 + if (filp->f_op && filp->f_op->flock) {
44219 +- struct file_lock fl = {
44220 ++ struct file_lock flock = {
44221 + .fl_pid = current->tgid,
44222 + .fl_file = filp,
44223 + .fl_flags = FL_FLOCK,
44224 + .fl_type = F_UNLCK,
44225 + .fl_end = OFFSET_MAX,
44226 + };
44227 +- filp->f_op->flock(filp, F_SETLKW, &fl);
44228 +- if (fl.fl_ops && fl.fl_ops->fl_release_private)
44229 +- fl.fl_ops->fl_release_private(&fl);
44230 ++ filp->f_op->flock(filp, F_SETLKW, &flock);
44231 ++ if (flock.fl_ops && flock.fl_ops->fl_release_private)
44232 ++ flock.fl_ops->fl_release_private(&flock);
44233 + }
44234 +
44235 + lock_flocks();
44236 +diff -urNp linux-3.1.1/fs/logfs/super.c linux-3.1.1/fs/logfs/super.c
44237 +--- linux-3.1.1/fs/logfs/super.c 2011-11-11 15:19:27.000000000 -0500
44238 ++++ linux-3.1.1/fs/logfs/super.c 2011-11-16 18:40:29.000000000 -0500
44239 +@@ -266,6 +266,8 @@ static int logfs_recover_sb(struct super
44240 + struct logfs_disk_super _ds1, *ds1 = &_ds1;
44241 + int err, valid0, valid1;
44242 +
44243 ++ pax_track_stack();
44244 ++
44245 + /* read first superblock */
44246 + err = wbuf_read(sb, super->s_sb_ofs[0], sizeof(*ds0), ds0);
44247 + if (err)
44248 +diff -urNp linux-3.1.1/fs/namei.c linux-3.1.1/fs/namei.c
44249 +--- linux-3.1.1/fs/namei.c 2011-11-11 15:19:27.000000000 -0500
44250 ++++ linux-3.1.1/fs/namei.c 2011-11-17 00:36:54.000000000 -0500
44251 +@@ -283,14 +283,22 @@ int generic_permission(struct inode *ino
44252 +
44253 + if (S_ISDIR(inode->i_mode)) {
44254 + /* DACs are overridable for directories */
44255 +- if (ns_capable(inode_userns(inode), CAP_DAC_OVERRIDE))
44256 +- return 0;
44257 + if (!(mask & MAY_WRITE))
44258 + if (ns_capable(inode_userns(inode), CAP_DAC_READ_SEARCH))
44259 + return 0;
44260 ++ if (ns_capable(inode_userns(inode), CAP_DAC_OVERRIDE))
44261 ++ return 0;
44262 + return -EACCES;
44263 + }
44264 + /*
44265 ++ * Searching includes executable on directories, else just read.
44266 ++ */
44267 ++ mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
44268 ++ if (mask == MAY_READ)
44269 ++ if (ns_capable(inode_userns(inode), CAP_DAC_READ_SEARCH))
44270 ++ return 0;
44271 ++
44272 ++ /*
44273 + * Read/write DACs are always overridable.
44274 + * Executable DACs are overridable when there is
44275 + * at least one exec bit set.
44276 +@@ -299,14 +307,6 @@ int generic_permission(struct inode *ino
44277 + if (ns_capable(inode_userns(inode), CAP_DAC_OVERRIDE))
44278 + return 0;
44279 +
44280 +- /*
44281 +- * Searching includes executable on directories, else just read.
44282 +- */
44283 +- mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
44284 +- if (mask == MAY_READ)
44285 +- if (ns_capable(inode_userns(inode), CAP_DAC_READ_SEARCH))
44286 +- return 0;
44287 +-
44288 + return -EACCES;
44289 + }
44290 +
44291 +@@ -653,11 +653,19 @@ follow_link(struct path *link, struct na
44292 + return error;
44293 + }
44294 +
44295 ++ if (gr_handle_follow_link(dentry->d_parent->d_inode,
44296 ++ dentry->d_inode, dentry, nd->path.mnt)) {
44297 ++ error = -EACCES;
44298 ++ *p = ERR_PTR(error); /* no ->put_link(), please */
44299 ++ path_put(&nd->path);
44300 ++ return error;
44301 ++ }
44302 ++
44303 + nd->last_type = LAST_BIND;
44304 + *p = dentry->d_inode->i_op->follow_link(dentry, nd);
44305 + error = PTR_ERR(*p);
44306 + if (!IS_ERR(*p)) {
44307 +- char *s = nd_get_link(nd);
44308 ++ const char *s = nd_get_link(nd);
44309 + error = 0;
44310 + if (s)
44311 + error = __vfs_follow_link(nd, s);
44312 +@@ -1622,6 +1630,12 @@ static int path_lookupat(int dfd, const
44313 + if (!err)
44314 + err = complete_walk(nd);
44315 +
44316 ++ if (!(nd->flags & LOOKUP_PARENT) && !gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
44317 ++ if (!err)
44318 ++ path_put(&nd->path);
44319 ++ err = -ENOENT;
44320 ++ }
44321 ++
44322 + if (!err && nd->flags & LOOKUP_DIRECTORY) {
44323 + if (!nd->inode->i_op->lookup) {
44324 + path_put(&nd->path);
44325 +@@ -1649,6 +1663,9 @@ static int do_path_lookup(int dfd, const
44326 + retval = path_lookupat(dfd, name, flags | LOOKUP_REVAL, nd);
44327 +
44328 + if (likely(!retval)) {
44329 ++ if (*name != '/' && nd->path.dentry && nd->inode && !gr_chroot_fchdir(nd->path.dentry, nd->path.mnt))
44330 ++ return -ENOENT;
44331 ++
44332 + if (unlikely(!audit_dummy_context())) {
44333 + if (nd->path.dentry && nd->inode)
44334 + audit_inode(name, nd->path.dentry);
44335 +@@ -2049,7 +2066,27 @@ static int may_open(struct path *path, i
44336 + /*
44337 + * Ensure there are no outstanding leases on the file.
44338 + */
44339 +- return break_lease(inode, flag);
44340 ++ error = break_lease(inode, flag);
44341 ++
44342 ++ if (error)
44343 ++ return error;
44344 ++
44345 ++ if (gr_handle_rofs_blockwrite(dentry, path->mnt, acc_mode)) {
44346 ++ error = -EPERM;
44347 ++ goto exit;
44348 ++ }
44349 ++
44350 ++ if (gr_handle_rawio(inode)) {
44351 ++ error = -EPERM;
44352 ++ goto exit;
44353 ++ }
44354 ++
44355 ++ if (!gr_acl_handle_open(dentry, path->mnt, acc_mode)) {
44356 ++ error = -EACCES;
44357 ++ goto exit;
44358 ++ }
44359 ++exit:
44360 ++ return error;
44361 + }
44362 +
44363 + static int handle_truncate(struct file *filp)
44364 +@@ -2110,6 +2147,10 @@ static struct file *do_last(struct namei
44365 + error = complete_walk(nd);
44366 + if (error)
44367 + return ERR_PTR(error);
44368 ++ if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
44369 ++ error = -ENOENT;
44370 ++ goto exit;
44371 ++ }
44372 + audit_inode(pathname, nd->path.dentry);
44373 + if (open_flag & O_CREAT) {
44374 + error = -EISDIR;
44375 +@@ -2120,6 +2161,10 @@ static struct file *do_last(struct namei
44376 + error = complete_walk(nd);
44377 + if (error)
44378 + return ERR_PTR(error);
44379 ++ if (!gr_acl_handle_hidden_file(dir, nd->path.mnt)) {
44380 ++ error = -ENOENT;
44381 ++ goto exit;
44382 ++ }
44383 + audit_inode(pathname, dir);
44384 + goto ok;
44385 + }
44386 +@@ -2142,6 +2187,11 @@ static struct file *do_last(struct namei
44387 + if (error)
44388 + return ERR_PTR(-ECHILD);
44389 +
44390 ++ if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
44391 ++ error = -ENOENT;
44392 ++ goto exit;
44393 ++ }
44394 ++
44395 + error = -ENOTDIR;
44396 + if (nd->flags & LOOKUP_DIRECTORY) {
44397 + if (!nd->inode->i_op->lookup)
44398 +@@ -2181,6 +2231,12 @@ static struct file *do_last(struct namei
44399 + /* Negative dentry, just create the file */
44400 + if (!dentry->d_inode) {
44401 + int mode = op->mode;
44402 ++
44403 ++ if (!gr_acl_handle_creat(path->dentry, nd->path.dentry, path->mnt, open_flag, acc_mode, mode)) {
44404 ++ error = -EACCES;
44405 ++ goto exit_mutex_unlock;
44406 ++ }
44407 ++
44408 + if (!IS_POSIXACL(dir->d_inode))
44409 + mode &= ~current_umask();
44410 + /*
44411 +@@ -2204,6 +2260,8 @@ static struct file *do_last(struct namei
44412 + error = vfs_create(dir->d_inode, dentry, mode, nd);
44413 + if (error)
44414 + goto exit_mutex_unlock;
44415 ++ else
44416 ++ gr_handle_create(path->dentry, path->mnt);
44417 + mutex_unlock(&dir->d_inode->i_mutex);
44418 + dput(nd->path.dentry);
44419 + nd->path.dentry = dentry;
44420 +@@ -2213,6 +2271,19 @@ static struct file *do_last(struct namei
44421 + /*
44422 + * It already exists.
44423 + */
44424 ++
44425 ++ if (!gr_acl_handle_hidden_file(dentry, nd->path.mnt)) {
44426 ++ error = -ENOENT;
44427 ++ goto exit_mutex_unlock;
44428 ++ }
44429 ++
44430 ++ /* only check if O_CREAT is specified, all other checks need to go
44431 ++ into may_open */
44432 ++ if (gr_handle_fifo(path->dentry, path->mnt, dir, open_flag, acc_mode)) {
44433 ++ error = -EACCES;
44434 ++ goto exit_mutex_unlock;
44435 ++ }
44436 ++
44437 + mutex_unlock(&dir->d_inode->i_mutex);
44438 + audit_inode(pathname, path->dentry);
44439 +
44440 +@@ -2425,6 +2496,11 @@ struct dentry *kern_path_create(int dfd,
44441 + *path = nd.path;
44442 + return dentry;
44443 + eexist:
44444 ++ if (!gr_acl_handle_hidden_file(dentry, nd.path.mnt)) {
44445 ++ dput(dentry);
44446 ++ dentry = ERR_PTR(-ENOENT);
44447 ++ goto fail;
44448 ++ }
44449 + dput(dentry);
44450 + dentry = ERR_PTR(-EEXIST);
44451 + fail:
44452 +@@ -2447,6 +2523,20 @@ struct dentry *user_path_create(int dfd,
44453 + }
44454 + EXPORT_SYMBOL(user_path_create);
44455 +
44456 ++static struct dentry *user_path_create_with_name(int dfd, const char __user *pathname, struct path *path, char **to, int is_dir)
44457 ++{
44458 ++ char *tmp = getname(pathname);
44459 ++ struct dentry *res;
44460 ++ if (IS_ERR(tmp))
44461 ++ return ERR_CAST(tmp);
44462 ++ res = kern_path_create(dfd, tmp, path, is_dir);
44463 ++ if (IS_ERR(res))
44464 ++ putname(tmp);
44465 ++ else
44466 ++ *to = tmp;
44467 ++ return res;
44468 ++}
44469 ++
44470 + int vfs_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t dev)
44471 + {
44472 + int error = may_create(dir, dentry);
44473 +@@ -2514,6 +2604,17 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const
44474 + error = mnt_want_write(path.mnt);
44475 + if (error)
44476 + goto out_dput;
44477 ++
44478 ++ if (gr_handle_chroot_mknod(dentry, path.mnt, mode)) {
44479 ++ error = -EPERM;
44480 ++ goto out_drop_write;
44481 ++ }
44482 ++
44483 ++ if (!gr_acl_handle_mknod(dentry, path.dentry, path.mnt, mode)) {
44484 ++ error = -EACCES;
44485 ++ goto out_drop_write;
44486 ++ }
44487 ++
44488 + error = security_path_mknod(&path, dentry, mode, dev);
44489 + if (error)
44490 + goto out_drop_write;
44491 +@@ -2531,6 +2632,9 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const
44492 + }
44493 + out_drop_write:
44494 + mnt_drop_write(path.mnt);
44495 ++
44496 ++ if (!error)
44497 ++ gr_handle_create(dentry, path.mnt);
44498 + out_dput:
44499 + dput(dentry);
44500 + mutex_unlock(&path.dentry->d_inode->i_mutex);
44501 +@@ -2580,12 +2684,21 @@ SYSCALL_DEFINE3(mkdirat, int, dfd, const
44502 + error = mnt_want_write(path.mnt);
44503 + if (error)
44504 + goto out_dput;
44505 ++
44506 ++ if (!gr_acl_handle_mkdir(dentry, path.dentry, path.mnt)) {
44507 ++ error = -EACCES;
44508 ++ goto out_drop_write;
44509 ++ }
44510 ++
44511 + error = security_path_mkdir(&path, dentry, mode);
44512 + if (error)
44513 + goto out_drop_write;
44514 + error = vfs_mkdir(path.dentry->d_inode, dentry, mode);
44515 + out_drop_write:
44516 + mnt_drop_write(path.mnt);
44517 ++
44518 ++ if (!error)
44519 ++ gr_handle_create(dentry, path.mnt);
44520 + out_dput:
44521 + dput(dentry);
44522 + mutex_unlock(&path.dentry->d_inode->i_mutex);
44523 +@@ -2665,6 +2778,8 @@ static long do_rmdir(int dfd, const char
44524 + char * name;
44525 + struct dentry *dentry;
44526 + struct nameidata nd;
44527 ++ ino_t saved_ino = 0;
44528 ++ dev_t saved_dev = 0;
44529 +
44530 + error = user_path_parent(dfd, pathname, &nd, &name);
44531 + if (error)
44532 +@@ -2693,6 +2808,15 @@ static long do_rmdir(int dfd, const char
44533 + error = -ENOENT;
44534 + goto exit3;
44535 + }
44536 ++
44537 ++ saved_ino = dentry->d_inode->i_ino;
44538 ++ saved_dev = gr_get_dev_from_dentry(dentry);
44539 ++
44540 ++ if (!gr_acl_handle_rmdir(dentry, nd.path.mnt)) {
44541 ++ error = -EACCES;
44542 ++ goto exit3;
44543 ++ }
44544 ++
44545 + error = mnt_want_write(nd.path.mnt);
44546 + if (error)
44547 + goto exit3;
44548 +@@ -2700,6 +2824,8 @@ static long do_rmdir(int dfd, const char
44549 + if (error)
44550 + goto exit4;
44551 + error = vfs_rmdir(nd.path.dentry->d_inode, dentry);
44552 ++ if (!error && (saved_dev || saved_ino))
44553 ++ gr_handle_delete(saved_ino, saved_dev);
44554 + exit4:
44555 + mnt_drop_write(nd.path.mnt);
44556 + exit3:
44557 +@@ -2762,6 +2888,8 @@ static long do_unlinkat(int dfd, const c
44558 + struct dentry *dentry;
44559 + struct nameidata nd;
44560 + struct inode *inode = NULL;
44561 ++ ino_t saved_ino = 0;
44562 ++ dev_t saved_dev = 0;
44563 +
44564 + error = user_path_parent(dfd, pathname, &nd, &name);
44565 + if (error)
44566 +@@ -2784,6 +2912,16 @@ static long do_unlinkat(int dfd, const c
44567 + if (!inode)
44568 + goto slashes;
44569 + ihold(inode);
44570 ++
44571 ++ if (inode->i_nlink <= 1) {
44572 ++ saved_ino = inode->i_ino;
44573 ++ saved_dev = gr_get_dev_from_dentry(dentry);
44574 ++ }
44575 ++ if (!gr_acl_handle_unlink(dentry, nd.path.mnt)) {
44576 ++ error = -EACCES;
44577 ++ goto exit2;
44578 ++ }
44579 ++
44580 + error = mnt_want_write(nd.path.mnt);
44581 + if (error)
44582 + goto exit2;
44583 +@@ -2791,6 +2929,8 @@ static long do_unlinkat(int dfd, const c
44584 + if (error)
44585 + goto exit3;
44586 + error = vfs_unlink(nd.path.dentry->d_inode, dentry);
44587 ++ if (!error && (saved_ino || saved_dev))
44588 ++ gr_handle_delete(saved_ino, saved_dev);
44589 + exit3:
44590 + mnt_drop_write(nd.path.mnt);
44591 + exit2:
44592 +@@ -2866,10 +3006,18 @@ SYSCALL_DEFINE3(symlinkat, const char __
44593 + error = mnt_want_write(path.mnt);
44594 + if (error)
44595 + goto out_dput;
44596 ++
44597 ++ if (!gr_acl_handle_symlink(dentry, path.dentry, path.mnt, from)) {
44598 ++ error = -EACCES;
44599 ++ goto out_drop_write;
44600 ++ }
44601 ++
44602 + error = security_path_symlink(&path, dentry, from);
44603 + if (error)
44604 + goto out_drop_write;
44605 + error = vfs_symlink(path.dentry->d_inode, dentry, from);
44606 ++ if (!error)
44607 ++ gr_handle_create(dentry, path.mnt);
44608 + out_drop_write:
44609 + mnt_drop_write(path.mnt);
44610 + out_dput:
44611 +@@ -2941,6 +3089,7 @@ SYSCALL_DEFINE5(linkat, int, olddfd, con
44612 + {
44613 + struct dentry *new_dentry;
44614 + struct path old_path, new_path;
44615 ++ char *to;
44616 + int how = 0;
44617 + int error;
44618 +
44619 +@@ -2964,7 +3113,7 @@ SYSCALL_DEFINE5(linkat, int, olddfd, con
44620 + if (error)
44621 + return error;
44622 +
44623 +- new_dentry = user_path_create(newdfd, newname, &new_path, 0);
44624 ++ new_dentry = user_path_create_with_name(newdfd, newname, &new_path, &to, 0);
44625 + error = PTR_ERR(new_dentry);
44626 + if (IS_ERR(new_dentry))
44627 + goto out;
44628 +@@ -2975,13 +3124,30 @@ SYSCALL_DEFINE5(linkat, int, olddfd, con
44629 + error = mnt_want_write(new_path.mnt);
44630 + if (error)
44631 + goto out_dput;
44632 ++
44633 ++ if (gr_handle_hardlink(old_path.dentry, old_path.mnt,
44634 ++ old_path.dentry->d_inode,
44635 ++ old_path.dentry->d_inode->i_mode, to)) {
44636 ++ error = -EACCES;
44637 ++ goto out_drop_write;
44638 ++ }
44639 ++
44640 ++ if (!gr_acl_handle_link(new_dentry, new_path.dentry, new_path.mnt,
44641 ++ old_path.dentry, old_path.mnt, to)) {
44642 ++ error = -EACCES;
44643 ++ goto out_drop_write;
44644 ++ }
44645 ++
44646 + error = security_path_link(old_path.dentry, &new_path, new_dentry);
44647 + if (error)
44648 + goto out_drop_write;
44649 + error = vfs_link(old_path.dentry, new_path.dentry->d_inode, new_dentry);
44650 ++ if (!error)
44651 ++ gr_handle_create(new_dentry, new_path.mnt);
44652 + out_drop_write:
44653 + mnt_drop_write(new_path.mnt);
44654 + out_dput:
44655 ++ putname(to);
44656 + dput(new_dentry);
44657 + mutex_unlock(&new_path.dentry->d_inode->i_mutex);
44658 + path_put(&new_path);
44659 +@@ -3153,6 +3319,8 @@ SYSCALL_DEFINE4(renameat, int, olddfd, c
44660 + char *to;
44661 + int error;
44662 +
44663 ++ pax_track_stack();
44664 ++
44665 + error = user_path_parent(olddfd, oldname, &oldnd, &from);
44666 + if (error)
44667 + goto exit;
44668 +@@ -3209,6 +3377,12 @@ SYSCALL_DEFINE4(renameat, int, olddfd, c
44669 + if (new_dentry == trap)
44670 + goto exit5;
44671 +
44672 ++ error = gr_acl_handle_rename(new_dentry, new_dir, newnd.path.mnt,
44673 ++ old_dentry, old_dir->d_inode, oldnd.path.mnt,
44674 ++ to);
44675 ++ if (error)
44676 ++ goto exit5;
44677 ++
44678 + error = mnt_want_write(oldnd.path.mnt);
44679 + if (error)
44680 + goto exit5;
44681 +@@ -3218,6 +3392,9 @@ SYSCALL_DEFINE4(renameat, int, olddfd, c
44682 + goto exit6;
44683 + error = vfs_rename(old_dir->d_inode, old_dentry,
44684 + new_dir->d_inode, new_dentry);
44685 ++ if (!error)
44686 ++ gr_handle_rename(old_dir->d_inode, new_dir->d_inode, old_dentry,
44687 ++ new_dentry, oldnd.path.mnt, new_dentry->d_inode ? 1 : 0);
44688 + exit6:
44689 + mnt_drop_write(oldnd.path.mnt);
44690 + exit5:
44691 +@@ -3243,6 +3420,8 @@ SYSCALL_DEFINE2(rename, const char __use
44692 +
44693 + int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const char *link)
44694 + {
44695 ++ char tmpbuf[64];
44696 ++ const char *newlink;
44697 + int len;
44698 +
44699 + len = PTR_ERR(link);
44700 +@@ -3252,7 +3431,14 @@ int vfs_readlink(struct dentry *dentry,
44701 + len = strlen(link);
44702 + if (len > (unsigned) buflen)
44703 + len = buflen;
44704 +- if (copy_to_user(buffer, link, len))
44705 ++
44706 ++ if (len < sizeof(tmpbuf)) {
44707 ++ memcpy(tmpbuf, link, len);
44708 ++ newlink = tmpbuf;
44709 ++ } else
44710 ++ newlink = link;
44711 ++
44712 ++ if (copy_to_user(buffer, newlink, len))
44713 + len = -EFAULT;
44714 + out:
44715 + return len;
44716 +diff -urNp linux-3.1.1/fs/namespace.c linux-3.1.1/fs/namespace.c
44717 +--- linux-3.1.1/fs/namespace.c 2011-11-11 15:19:27.000000000 -0500
44718 ++++ linux-3.1.1/fs/namespace.c 2011-11-16 18:40:29.000000000 -0500
44719 +@@ -1329,6 +1329,9 @@ static int do_umount(struct vfsmount *mn
44720 + if (!(sb->s_flags & MS_RDONLY))
44721 + retval = do_remount_sb(sb, MS_RDONLY, NULL, 0);
44722 + up_write(&sb->s_umount);
44723 ++
44724 ++ gr_log_remount(mnt->mnt_devname, retval);
44725 ++
44726 + return retval;
44727 + }
44728 +
44729 +@@ -1348,6 +1351,9 @@ static int do_umount(struct vfsmount *mn
44730 + br_write_unlock(vfsmount_lock);
44731 + up_write(&namespace_sem);
44732 + release_mounts(&umount_list);
44733 ++
44734 ++ gr_log_unmount(mnt->mnt_devname, retval);
44735 ++
44736 + return retval;
44737 + }
44738 +
44739 +@@ -2339,6 +2345,16 @@ long do_mount(char *dev_name, char *dir_
44740 + MS_NOATIME | MS_NODIRATIME | MS_RELATIME| MS_KERNMOUNT |
44741 + MS_STRICTATIME);
44742 +
44743 ++ if (gr_handle_rofs_mount(path.dentry, path.mnt, mnt_flags)) {
44744 ++ retval = -EPERM;
44745 ++ goto dput_out;
44746 ++ }
44747 ++
44748 ++ if (gr_handle_chroot_mount(path.dentry, path.mnt, dev_name)) {
44749 ++ retval = -EPERM;
44750 ++ goto dput_out;
44751 ++ }
44752 ++
44753 + if (flags & MS_REMOUNT)
44754 + retval = do_remount(&path, flags & ~MS_REMOUNT, mnt_flags,
44755 + data_page);
44756 +@@ -2353,6 +2369,9 @@ long do_mount(char *dev_name, char *dir_
44757 + dev_name, data_page);
44758 + dput_out:
44759 + path_put(&path);
44760 ++
44761 ++ gr_log_mount(dev_name, dir_name, retval);
44762 ++
44763 + return retval;
44764 + }
44765 +
44766 +@@ -2576,6 +2595,11 @@ SYSCALL_DEFINE2(pivot_root, const char _
44767 + if (error)
44768 + goto out2;
44769 +
44770 ++ if (gr_handle_chroot_pivot()) {
44771 ++ error = -EPERM;
44772 ++ goto out2;
44773 ++ }
44774 ++
44775 + get_fs_root(current->fs, &root);
44776 + error = lock_mount(&old);
44777 + if (error)
44778 +diff -urNp linux-3.1.1/fs/ncpfs/dir.c linux-3.1.1/fs/ncpfs/dir.c
44779 +--- linux-3.1.1/fs/ncpfs/dir.c 2011-11-11 15:19:27.000000000 -0500
44780 ++++ linux-3.1.1/fs/ncpfs/dir.c 2011-11-16 18:40:29.000000000 -0500
44781 +@@ -299,6 +299,8 @@ ncp_lookup_validate(struct dentry *dentr
44782 + int res, val = 0, len;
44783 + __u8 __name[NCP_MAXPATHLEN + 1];
44784 +
44785 ++ pax_track_stack();
44786 ++
44787 + if (dentry == dentry->d_sb->s_root)
44788 + return 1;
44789 +
44790 +@@ -844,6 +846,8 @@ static struct dentry *ncp_lookup(struct
44791 + int error, res, len;
44792 + __u8 __name[NCP_MAXPATHLEN + 1];
44793 +
44794 ++ pax_track_stack();
44795 ++
44796 + error = -EIO;
44797 + if (!ncp_conn_valid(server))
44798 + goto finished;
44799 +@@ -931,6 +935,8 @@ int ncp_create_new(struct inode *dir, st
44800 + PPRINTK("ncp_create_new: creating %s/%s, mode=%x\n",
44801 + dentry->d_parent->d_name.name, dentry->d_name.name, mode);
44802 +
44803 ++ pax_track_stack();
44804 ++
44805 + ncp_age_dentry(server, dentry);
44806 + len = sizeof(__name);
44807 + error = ncp_io2vol(server, __name, &len, dentry->d_name.name,
44808 +@@ -992,6 +998,8 @@ static int ncp_mkdir(struct inode *dir,
44809 + int error, len;
44810 + __u8 __name[NCP_MAXPATHLEN + 1];
44811 +
44812 ++ pax_track_stack();
44813 ++
44814 + DPRINTK("ncp_mkdir: making %s/%s\n",
44815 + dentry->d_parent->d_name.name, dentry->d_name.name);
44816 +
44817 +@@ -1140,6 +1148,8 @@ static int ncp_rename(struct inode *old_
44818 + int old_len, new_len;
44819 + __u8 __old_name[NCP_MAXPATHLEN + 1], __new_name[NCP_MAXPATHLEN + 1];
44820 +
44821 ++ pax_track_stack();
44822 ++
44823 + DPRINTK("ncp_rename: %s/%s to %s/%s\n",
44824 + old_dentry->d_parent->d_name.name, old_dentry->d_name.name,
44825 + new_dentry->d_parent->d_name.name, new_dentry->d_name.name);
44826 +diff -urNp linux-3.1.1/fs/ncpfs/inode.c linux-3.1.1/fs/ncpfs/inode.c
44827 +--- linux-3.1.1/fs/ncpfs/inode.c 2011-11-11 15:19:27.000000000 -0500
44828 ++++ linux-3.1.1/fs/ncpfs/inode.c 2011-11-16 18:40:29.000000000 -0500
44829 +@@ -461,6 +461,8 @@ static int ncp_fill_super(struct super_b
44830 + #endif
44831 + struct ncp_entry_info finfo;
44832 +
44833 ++ pax_track_stack();
44834 ++
44835 + memset(&data, 0, sizeof(data));
44836 + server = kzalloc(sizeof(struct ncp_server), GFP_KERNEL);
44837 + if (!server)
44838 +diff -urNp linux-3.1.1/fs/nfs/blocklayout/blocklayout.c linux-3.1.1/fs/nfs/blocklayout/blocklayout.c
44839 +--- linux-3.1.1/fs/nfs/blocklayout/blocklayout.c 2011-11-11 15:19:27.000000000 -0500
44840 ++++ linux-3.1.1/fs/nfs/blocklayout/blocklayout.c 2011-11-16 18:39:08.000000000 -0500
44841 +@@ -90,7 +90,7 @@ static int is_writable(struct pnfs_block
44842 + */
44843 + struct parallel_io {
44844 + struct kref refcnt;
44845 +- struct rpc_call_ops call_ops;
44846 ++ rpc_call_ops_no_const call_ops;
44847 + void (*pnfs_callback) (void *data);
44848 + void *data;
44849 + };
44850 +diff -urNp linux-3.1.1/fs/nfs/inode.c linux-3.1.1/fs/nfs/inode.c
44851 +--- linux-3.1.1/fs/nfs/inode.c 2011-11-11 15:19:27.000000000 -0500
44852 ++++ linux-3.1.1/fs/nfs/inode.c 2011-11-16 18:39:08.000000000 -0500
44853 +@@ -150,7 +150,7 @@ static void nfs_zap_caches_locked(struct
44854 + nfsi->attrtimeo = NFS_MINATTRTIMEO(inode);
44855 + nfsi->attrtimeo_timestamp = jiffies;
44856 +
44857 +- memset(NFS_COOKIEVERF(inode), 0, sizeof(NFS_COOKIEVERF(inode)));
44858 ++ memset(NFS_COOKIEVERF(inode), 0, sizeof(NFS_I(inode)->cookieverf));
44859 + if (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode))
44860 + nfsi->cache_validity |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA|NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL|NFS_INO_REVAL_PAGECACHE;
44861 + else
44862 +@@ -1002,16 +1002,16 @@ static int nfs_size_need_update(const st
44863 + return nfs_size_to_loff_t(fattr->size) > i_size_read(inode);
44864 + }
44865 +
44866 +-static atomic_long_t nfs_attr_generation_counter;
44867 ++static atomic_long_unchecked_t nfs_attr_generation_counter;
44868 +
44869 + static unsigned long nfs_read_attr_generation_counter(void)
44870 + {
44871 +- return atomic_long_read(&nfs_attr_generation_counter);
44872 ++ return atomic_long_read_unchecked(&nfs_attr_generation_counter);
44873 + }
44874 +
44875 + unsigned long nfs_inc_attr_generation_counter(void)
44876 + {
44877 +- return atomic_long_inc_return(&nfs_attr_generation_counter);
44878 ++ return atomic_long_inc_return_unchecked(&nfs_attr_generation_counter);
44879 + }
44880 +
44881 + void nfs_fattr_init(struct nfs_fattr *fattr)
44882 +diff -urNp linux-3.1.1/fs/nfsd/nfs4state.c linux-3.1.1/fs/nfsd/nfs4state.c
44883 +--- linux-3.1.1/fs/nfsd/nfs4state.c 2011-11-11 15:19:27.000000000 -0500
44884 ++++ linux-3.1.1/fs/nfsd/nfs4state.c 2011-11-16 18:40:29.000000000 -0500
44885 +@@ -3999,6 +3999,8 @@ nfsd4_lock(struct svc_rqst *rqstp, struc
44886 + unsigned int strhashval;
44887 + int err;
44888 +
44889 ++ pax_track_stack();
44890 ++
44891 + dprintk("NFSD: nfsd4_lock: start=%Ld length=%Ld\n",
44892 + (long long) lock->lk_offset,
44893 + (long long) lock->lk_length);
44894 +diff -urNp linux-3.1.1/fs/nfsd/nfs4xdr.c linux-3.1.1/fs/nfsd/nfs4xdr.c
44895 +--- linux-3.1.1/fs/nfsd/nfs4xdr.c 2011-11-11 15:19:27.000000000 -0500
44896 ++++ linux-3.1.1/fs/nfsd/nfs4xdr.c 2011-11-16 18:40:29.000000000 -0500
44897 +@@ -1875,6 +1875,8 @@ nfsd4_encode_fattr(struct svc_fh *fhp, s
44898 + .dentry = dentry,
44899 + };
44900 +
44901 ++ pax_track_stack();
44902 ++
44903 + BUG_ON(bmval1 & NFSD_WRITEONLY_ATTRS_WORD1);
44904 + BUG_ON(bmval0 & ~nfsd_suppattrs0(minorversion));
44905 + BUG_ON(bmval1 & ~nfsd_suppattrs1(minorversion));
44906 +diff -urNp linux-3.1.1/fs/nfsd/vfs.c linux-3.1.1/fs/nfsd/vfs.c
44907 +--- linux-3.1.1/fs/nfsd/vfs.c 2011-11-11 15:19:27.000000000 -0500
44908 ++++ linux-3.1.1/fs/nfsd/vfs.c 2011-11-16 18:39:08.000000000 -0500
44909 +@@ -896,7 +896,7 @@ nfsd_vfs_read(struct svc_rqst *rqstp, st
44910 + } else {
44911 + oldfs = get_fs();
44912 + set_fs(KERNEL_DS);
44913 +- host_err = vfs_readv(file, (struct iovec __user *)vec, vlen, &offset);
44914 ++ host_err = vfs_readv(file, (struct iovec __force_user *)vec, vlen, &offset);
44915 + set_fs(oldfs);
44916 + }
44917 +
44918 +@@ -1000,7 +1000,7 @@ nfsd_vfs_write(struct svc_rqst *rqstp, s
44919 +
44920 + /* Write the data. */
44921 + oldfs = get_fs(); set_fs(KERNEL_DS);
44922 +- host_err = vfs_writev(file, (struct iovec __user *)vec, vlen, &offset);
44923 ++ host_err = vfs_writev(file, (struct iovec __force_user *)vec, vlen, &offset);
44924 + set_fs(oldfs);
44925 + if (host_err < 0)
44926 + goto out_nfserr;
44927 +@@ -1535,7 +1535,7 @@ nfsd_readlink(struct svc_rqst *rqstp, st
44928 + */
44929 +
44930 + oldfs = get_fs(); set_fs(KERNEL_DS);
44931 +- host_err = inode->i_op->readlink(dentry, buf, *lenp);
44932 ++ host_err = inode->i_op->readlink(dentry, (char __force_user *)buf, *lenp);
44933 + set_fs(oldfs);
44934 +
44935 + if (host_err < 0)
44936 +diff -urNp linux-3.1.1/fs/notify/fanotify/fanotify_user.c linux-3.1.1/fs/notify/fanotify/fanotify_user.c
44937 +--- linux-3.1.1/fs/notify/fanotify/fanotify_user.c 2011-11-11 15:19:27.000000000 -0500
44938 ++++ linux-3.1.1/fs/notify/fanotify/fanotify_user.c 2011-11-16 18:39:08.000000000 -0500
44939 +@@ -276,7 +276,8 @@ static ssize_t copy_event_to_user(struct
44940 + goto out_close_fd;
44941 +
44942 + ret = -EFAULT;
44943 +- if (copy_to_user(buf, &fanotify_event_metadata,
44944 ++ if (fanotify_event_metadata.event_len > sizeof fanotify_event_metadata ||
44945 ++ copy_to_user(buf, &fanotify_event_metadata,
44946 + fanotify_event_metadata.event_len))
44947 + goto out_kill_access_response;
44948 +
44949 +diff -urNp linux-3.1.1/fs/notify/notification.c linux-3.1.1/fs/notify/notification.c
44950 +--- linux-3.1.1/fs/notify/notification.c 2011-11-11 15:19:27.000000000 -0500
44951 ++++ linux-3.1.1/fs/notify/notification.c 2011-11-16 18:39:08.000000000 -0500
44952 +@@ -57,7 +57,7 @@ static struct kmem_cache *fsnotify_event
44953 + * get set to 0 so it will never get 'freed'
44954 + */
44955 + static struct fsnotify_event *q_overflow_event;
44956 +-static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
44957 ++static atomic_unchecked_t fsnotify_sync_cookie = ATOMIC_INIT(0);
44958 +
44959 + /**
44960 + * fsnotify_get_cookie - return a unique cookie for use in synchronizing events.
44961 +@@ -65,7 +65,7 @@ static atomic_t fsnotify_sync_cookie = A
44962 + */
44963 + u32 fsnotify_get_cookie(void)
44964 + {
44965 +- return atomic_inc_return(&fsnotify_sync_cookie);
44966 ++ return atomic_inc_return_unchecked(&fsnotify_sync_cookie);
44967 + }
44968 + EXPORT_SYMBOL_GPL(fsnotify_get_cookie);
44969 +
44970 +diff -urNp linux-3.1.1/fs/ntfs/dir.c linux-3.1.1/fs/ntfs/dir.c
44971 +--- linux-3.1.1/fs/ntfs/dir.c 2011-11-11 15:19:27.000000000 -0500
44972 ++++ linux-3.1.1/fs/ntfs/dir.c 2011-11-16 18:39:08.000000000 -0500
44973 +@@ -1329,7 +1329,7 @@ find_next_index_buffer:
44974 + ia = (INDEX_ALLOCATION*)(kaddr + (ia_pos & ~PAGE_CACHE_MASK &
44975 + ~(s64)(ndir->itype.index.block_size - 1)));
44976 + /* Bounds checks. */
44977 +- if (unlikely((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
44978 ++ if (unlikely(!kaddr || (u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
44979 + ntfs_error(sb, "Out of bounds check failed. Corrupt directory "
44980 + "inode 0x%lx or driver bug.", vdir->i_ino);
44981 + goto err_out;
44982 +diff -urNp linux-3.1.1/fs/ntfs/file.c linux-3.1.1/fs/ntfs/file.c
44983 +--- linux-3.1.1/fs/ntfs/file.c 2011-11-11 15:19:27.000000000 -0500
44984 ++++ linux-3.1.1/fs/ntfs/file.c 2011-11-16 18:39:08.000000000 -0500
44985 +@@ -2229,6 +2229,6 @@ const struct inode_operations ntfs_file_
44986 + #endif /* NTFS_RW */
44987 + };
44988 +
44989 +-const struct file_operations ntfs_empty_file_ops = {};
44990 ++const struct file_operations ntfs_empty_file_ops __read_only;
44991 +
44992 +-const struct inode_operations ntfs_empty_inode_ops = {};
44993 ++const struct inode_operations ntfs_empty_inode_ops __read_only;
44994 +diff -urNp linux-3.1.1/fs/ocfs2/localalloc.c linux-3.1.1/fs/ocfs2/localalloc.c
44995 +--- linux-3.1.1/fs/ocfs2/localalloc.c 2011-11-11 15:19:27.000000000 -0500
44996 ++++ linux-3.1.1/fs/ocfs2/localalloc.c 2011-11-16 18:39:08.000000000 -0500
44997 +@@ -1283,7 +1283,7 @@ static int ocfs2_local_alloc_slide_windo
44998 + goto bail;
44999 + }
45000 +
45001 +- atomic_inc(&osb->alloc_stats.moves);
45002 ++ atomic_inc_unchecked(&osb->alloc_stats.moves);
45003 +
45004 + bail:
45005 + if (handle)
45006 +diff -urNp linux-3.1.1/fs/ocfs2/namei.c linux-3.1.1/fs/ocfs2/namei.c
45007 +--- linux-3.1.1/fs/ocfs2/namei.c 2011-11-11 15:19:27.000000000 -0500
45008 ++++ linux-3.1.1/fs/ocfs2/namei.c 2011-11-16 18:40:29.000000000 -0500
45009 +@@ -1063,6 +1063,8 @@ static int ocfs2_rename(struct inode *ol
45010 + struct ocfs2_dir_lookup_result orphan_insert = { NULL, };
45011 + struct ocfs2_dir_lookup_result target_insert = { NULL, };
45012 +
45013 ++ pax_track_stack();
45014 ++
45015 + /* At some point it might be nice to break this function up a
45016 + * bit. */
45017 +
45018 +diff -urNp linux-3.1.1/fs/ocfs2/ocfs2.h linux-3.1.1/fs/ocfs2/ocfs2.h
45019 +--- linux-3.1.1/fs/ocfs2/ocfs2.h 2011-11-11 15:19:27.000000000 -0500
45020 ++++ linux-3.1.1/fs/ocfs2/ocfs2.h 2011-11-16 18:39:08.000000000 -0500
45021 +@@ -235,11 +235,11 @@ enum ocfs2_vol_state
45022 +
45023 + struct ocfs2_alloc_stats
45024 + {
45025 +- atomic_t moves;
45026 +- atomic_t local_data;
45027 +- atomic_t bitmap_data;
45028 +- atomic_t bg_allocs;
45029 +- atomic_t bg_extends;
45030 ++ atomic_unchecked_t moves;
45031 ++ atomic_unchecked_t local_data;
45032 ++ atomic_unchecked_t bitmap_data;
45033 ++ atomic_unchecked_t bg_allocs;
45034 ++ atomic_unchecked_t bg_extends;
45035 + };
45036 +
45037 + enum ocfs2_local_alloc_state
45038 +diff -urNp linux-3.1.1/fs/ocfs2/suballoc.c linux-3.1.1/fs/ocfs2/suballoc.c
45039 +--- linux-3.1.1/fs/ocfs2/suballoc.c 2011-11-11 15:19:27.000000000 -0500
45040 ++++ linux-3.1.1/fs/ocfs2/suballoc.c 2011-11-16 18:39:08.000000000 -0500
45041 +@@ -872,7 +872,7 @@ static int ocfs2_reserve_suballoc_bits(s
45042 + mlog_errno(status);
45043 + goto bail;
45044 + }
45045 +- atomic_inc(&osb->alloc_stats.bg_extends);
45046 ++ atomic_inc_unchecked(&osb->alloc_stats.bg_extends);
45047 +
45048 + /* You should never ask for this much metadata */
45049 + BUG_ON(bits_wanted >
45050 +@@ -2008,7 +2008,7 @@ int ocfs2_claim_metadata(handle_t *handl
45051 + mlog_errno(status);
45052 + goto bail;
45053 + }
45054 +- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
45055 ++ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
45056 +
45057 + *suballoc_loc = res.sr_bg_blkno;
45058 + *suballoc_bit_start = res.sr_bit_offset;
45059 +@@ -2172,7 +2172,7 @@ int ocfs2_claim_new_inode_at_loc(handle_
45060 + trace_ocfs2_claim_new_inode_at_loc((unsigned long long)di_blkno,
45061 + res->sr_bits);
45062 +
45063 +- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
45064 ++ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
45065 +
45066 + BUG_ON(res->sr_bits != 1);
45067 +
45068 +@@ -2214,7 +2214,7 @@ int ocfs2_claim_new_inode(handle_t *hand
45069 + mlog_errno(status);
45070 + goto bail;
45071 + }
45072 +- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
45073 ++ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
45074 +
45075 + BUG_ON(res.sr_bits != 1);
45076 +
45077 +@@ -2318,7 +2318,7 @@ int __ocfs2_claim_clusters(handle_t *han
45078 + cluster_start,
45079 + num_clusters);
45080 + if (!status)
45081 +- atomic_inc(&osb->alloc_stats.local_data);
45082 ++ atomic_inc_unchecked(&osb->alloc_stats.local_data);
45083 + } else {
45084 + if (min_clusters > (osb->bitmap_cpg - 1)) {
45085 + /* The only paths asking for contiguousness
45086 +@@ -2344,7 +2344,7 @@ int __ocfs2_claim_clusters(handle_t *han
45087 + ocfs2_desc_bitmap_to_cluster_off(ac->ac_inode,
45088 + res.sr_bg_blkno,
45089 + res.sr_bit_offset);
45090 +- atomic_inc(&osb->alloc_stats.bitmap_data);
45091 ++ atomic_inc_unchecked(&osb->alloc_stats.bitmap_data);
45092 + *num_clusters = res.sr_bits;
45093 + }
45094 + }
45095 +diff -urNp linux-3.1.1/fs/ocfs2/super.c linux-3.1.1/fs/ocfs2/super.c
45096 +--- linux-3.1.1/fs/ocfs2/super.c 2011-11-11 15:19:27.000000000 -0500
45097 ++++ linux-3.1.1/fs/ocfs2/super.c 2011-11-16 18:39:08.000000000 -0500
45098 +@@ -300,11 +300,11 @@ static int ocfs2_osb_dump(struct ocfs2_s
45099 + "%10s => GlobalAllocs: %d LocalAllocs: %d "
45100 + "SubAllocs: %d LAWinMoves: %d SAExtends: %d\n",
45101 + "Stats",
45102 +- atomic_read(&osb->alloc_stats.bitmap_data),
45103 +- atomic_read(&osb->alloc_stats.local_data),
45104 +- atomic_read(&osb->alloc_stats.bg_allocs),
45105 +- atomic_read(&osb->alloc_stats.moves),
45106 +- atomic_read(&osb->alloc_stats.bg_extends));
45107 ++ atomic_read_unchecked(&osb->alloc_stats.bitmap_data),
45108 ++ atomic_read_unchecked(&osb->alloc_stats.local_data),
45109 ++ atomic_read_unchecked(&osb->alloc_stats.bg_allocs),
45110 ++ atomic_read_unchecked(&osb->alloc_stats.moves),
45111 ++ atomic_read_unchecked(&osb->alloc_stats.bg_extends));
45112 +
45113 + out += snprintf(buf + out, len - out,
45114 + "%10s => State: %u Descriptor: %llu Size: %u bits "
45115 +@@ -2112,11 +2112,11 @@ static int ocfs2_initialize_super(struct
45116 + spin_lock_init(&osb->osb_xattr_lock);
45117 + ocfs2_init_steal_slots(osb);
45118 +
45119 +- atomic_set(&osb->alloc_stats.moves, 0);
45120 +- atomic_set(&osb->alloc_stats.local_data, 0);
45121 +- atomic_set(&osb->alloc_stats.bitmap_data, 0);
45122 +- atomic_set(&osb->alloc_stats.bg_allocs, 0);
45123 +- atomic_set(&osb->alloc_stats.bg_extends, 0);
45124 ++ atomic_set_unchecked(&osb->alloc_stats.moves, 0);
45125 ++ atomic_set_unchecked(&osb->alloc_stats.local_data, 0);
45126 ++ atomic_set_unchecked(&osb->alloc_stats.bitmap_data, 0);
45127 ++ atomic_set_unchecked(&osb->alloc_stats.bg_allocs, 0);
45128 ++ atomic_set_unchecked(&osb->alloc_stats.bg_extends, 0);
45129 +
45130 + /* Copy the blockcheck stats from the superblock probe */
45131 + osb->osb_ecc_stats = *stats;
45132 +diff -urNp linux-3.1.1/fs/ocfs2/symlink.c linux-3.1.1/fs/ocfs2/symlink.c
45133 +--- linux-3.1.1/fs/ocfs2/symlink.c 2011-11-11 15:19:27.000000000 -0500
45134 ++++ linux-3.1.1/fs/ocfs2/symlink.c 2011-11-16 18:39:08.000000000 -0500
45135 +@@ -142,7 +142,7 @@ bail:
45136 +
45137 + static void ocfs2_fast_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
45138 + {
45139 +- char *link = nd_get_link(nd);
45140 ++ const char *link = nd_get_link(nd);
45141 + if (!IS_ERR(link))
45142 + kfree(link);
45143 + }
45144 +diff -urNp linux-3.1.1/fs/open.c linux-3.1.1/fs/open.c
45145 +--- linux-3.1.1/fs/open.c 2011-11-11 15:19:27.000000000 -0500
45146 ++++ linux-3.1.1/fs/open.c 2011-11-17 19:07:55.000000000 -0500
45147 +@@ -112,6 +112,10 @@ static long do_sys_truncate(const char _
45148 + error = locks_verify_truncate(inode, NULL, length);
45149 + if (!error)
45150 + error = security_path_truncate(&path);
45151 ++
45152 ++ if (!error && !gr_acl_handle_truncate(path.dentry, path.mnt))
45153 ++ error = -EACCES;
45154 ++
45155 + if (!error)
45156 + error = do_truncate(path.dentry, length, 0, NULL);
45157 +
45158 +@@ -358,6 +362,9 @@ SYSCALL_DEFINE3(faccessat, int, dfd, con
45159 + if (__mnt_is_readonly(path.mnt))
45160 + res = -EROFS;
45161 +
45162 ++ if (!res && !gr_acl_handle_access(path.dentry, path.mnt, mode))
45163 ++ res = -EACCES;
45164 ++
45165 + out_path_release:
45166 + path_put(&path);
45167 + out:
45168 +@@ -384,6 +391,8 @@ SYSCALL_DEFINE1(chdir, const char __user
45169 + if (error)
45170 + goto dput_and_out;
45171 +
45172 ++ gr_log_chdir(path.dentry, path.mnt);
45173 ++
45174 + set_fs_pwd(current->fs, &path);
45175 +
45176 + dput_and_out:
45177 +@@ -410,6 +419,13 @@ SYSCALL_DEFINE1(fchdir, unsigned int, fd
45178 + goto out_putf;
45179 +
45180 + error = inode_permission(inode, MAY_EXEC | MAY_CHDIR);
45181 ++
45182 ++ if (!error && !gr_chroot_fchdir(file->f_path.dentry, file->f_path.mnt))
45183 ++ error = -EPERM;
45184 ++
45185 ++ if (!error)
45186 ++ gr_log_chdir(file->f_path.dentry, file->f_path.mnt);
45187 ++
45188 + if (!error)
45189 + set_fs_pwd(current->fs, &file->f_path);
45190 + out_putf:
45191 +@@ -438,7 +454,13 @@ SYSCALL_DEFINE1(chroot, const char __use
45192 + if (error)
45193 + goto dput_and_out;
45194 +
45195 ++ if (gr_handle_chroot_chroot(path.dentry, path.mnt))
45196 ++ goto dput_and_out;
45197 ++
45198 + set_fs_root(current->fs, &path);
45199 ++
45200 ++ gr_handle_chroot_chdir(&path);
45201 ++
45202 + error = 0;
45203 + dput_and_out:
45204 + path_put(&path);
45205 +@@ -456,6 +478,16 @@ static int chmod_common(struct path *pat
45206 + if (error)
45207 + return error;
45208 + mutex_lock(&inode->i_mutex);
45209 ++
45210 ++ if (!gr_acl_handle_fchmod(path->dentry, path->mnt, mode)) {
45211 ++ error = -EACCES;
45212 ++ goto out_unlock;
45213 ++ }
45214 ++ if (gr_handle_chroot_chmod(path->dentry, path->mnt, mode)) {
45215 ++ error = -EACCES;
45216 ++ goto out_unlock;
45217 ++ }
45218 ++
45219 + error = security_path_chmod(path->dentry, path->mnt, mode);
45220 + if (error)
45221 + goto out_unlock;
45222 +@@ -506,6 +538,9 @@ static int chown_common(struct path *pat
45223 + int error;
45224 + struct iattr newattrs;
45225 +
45226 ++ if (!gr_acl_handle_chown(path->dentry, path->mnt))
45227 ++ return -EACCES;
45228 ++
45229 + newattrs.ia_valid = ATTR_CTIME;
45230 + if (user != (uid_t) -1) {
45231 + newattrs.ia_valid |= ATTR_UID;
45232 +diff -urNp linux-3.1.1/fs/partitions/ldm.c linux-3.1.1/fs/partitions/ldm.c
45233 +--- linux-3.1.1/fs/partitions/ldm.c 2011-11-11 15:19:27.000000000 -0500
45234 ++++ linux-3.1.1/fs/partitions/ldm.c 2011-11-17 19:08:15.000000000 -0500
45235 +@@ -1322,7 +1322,7 @@ static bool ldm_frag_add (const u8 *data
45236 + goto found;
45237 + }
45238 +
45239 +- f = kmalloc (sizeof (*f) + size*num, GFP_KERNEL);
45240 ++ f = kmalloc (size*num + sizeof (*f), GFP_KERNEL);
45241 + if (!f) {
45242 + ldm_crit ("Out of memory.");
45243 + return false;
45244 +diff -urNp linux-3.1.1/fs/pipe.c linux-3.1.1/fs/pipe.c
45245 +--- linux-3.1.1/fs/pipe.c 2011-11-11 15:19:27.000000000 -0500
45246 ++++ linux-3.1.1/fs/pipe.c 2011-11-16 18:40:29.000000000 -0500
45247 +@@ -420,9 +420,9 @@ redo:
45248 + }
45249 + if (bufs) /* More to do? */
45250 + continue;
45251 +- if (!pipe->writers)
45252 ++ if (!atomic_read(&pipe->writers))
45253 + break;
45254 +- if (!pipe->waiting_writers) {
45255 ++ if (!atomic_read(&pipe->waiting_writers)) {
45256 + /* syscall merging: Usually we must not sleep
45257 + * if O_NONBLOCK is set, or if we got some data.
45258 + * But if a writer sleeps in kernel space, then
45259 +@@ -481,7 +481,7 @@ pipe_write(struct kiocb *iocb, const str
45260 + mutex_lock(&inode->i_mutex);
45261 + pipe = inode->i_pipe;
45262 +
45263 +- if (!pipe->readers) {
45264 ++ if (!atomic_read(&pipe->readers)) {
45265 + send_sig(SIGPIPE, current, 0);
45266 + ret = -EPIPE;
45267 + goto out;
45268 +@@ -530,7 +530,7 @@ redo1:
45269 + for (;;) {
45270 + int bufs;
45271 +
45272 +- if (!pipe->readers) {
45273 ++ if (!atomic_read(&pipe->readers)) {
45274 + send_sig(SIGPIPE, current, 0);
45275 + if (!ret)
45276 + ret = -EPIPE;
45277 +@@ -616,9 +616,9 @@ redo2:
45278 + kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
45279 + do_wakeup = 0;
45280 + }
45281 +- pipe->waiting_writers++;
45282 ++ atomic_inc(&pipe->waiting_writers);
45283 + pipe_wait(pipe);
45284 +- pipe->waiting_writers--;
45285 ++ atomic_dec(&pipe->waiting_writers);
45286 + }
45287 + out:
45288 + mutex_unlock(&inode->i_mutex);
45289 +@@ -685,7 +685,7 @@ pipe_poll(struct file *filp, poll_table
45290 + mask = 0;
45291 + if (filp->f_mode & FMODE_READ) {
45292 + mask = (nrbufs > 0) ? POLLIN | POLLRDNORM : 0;
45293 +- if (!pipe->writers && filp->f_version != pipe->w_counter)
45294 ++ if (!atomic_read(&pipe->writers) && filp->f_version != pipe->w_counter)
45295 + mask |= POLLHUP;
45296 + }
45297 +
45298 +@@ -695,7 +695,7 @@ pipe_poll(struct file *filp, poll_table
45299 + * Most Unices do not set POLLERR for FIFOs but on Linux they
45300 + * behave exactly like pipes for poll().
45301 + */
45302 +- if (!pipe->readers)
45303 ++ if (!atomic_read(&pipe->readers))
45304 + mask |= POLLERR;
45305 + }
45306 +
45307 +@@ -709,10 +709,10 @@ pipe_release(struct inode *inode, int de
45308 +
45309 + mutex_lock(&inode->i_mutex);
45310 + pipe = inode->i_pipe;
45311 +- pipe->readers -= decr;
45312 +- pipe->writers -= decw;
45313 ++ atomic_sub(decr, &pipe->readers);
45314 ++ atomic_sub(decw, &pipe->writers);
45315 +
45316 +- if (!pipe->readers && !pipe->writers) {
45317 ++ if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers)) {
45318 + free_pipe_info(inode);
45319 + } else {
45320 + wake_up_interruptible_sync_poll(&pipe->wait, POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM | POLLERR | POLLHUP);
45321 +@@ -802,7 +802,7 @@ pipe_read_open(struct inode *inode, stru
45322 +
45323 + if (inode->i_pipe) {
45324 + ret = 0;
45325 +- inode->i_pipe->readers++;
45326 ++ atomic_inc(&inode->i_pipe->readers);
45327 + }
45328 +
45329 + mutex_unlock(&inode->i_mutex);
45330 +@@ -819,7 +819,7 @@ pipe_write_open(struct inode *inode, str
45331 +
45332 + if (inode->i_pipe) {
45333 + ret = 0;
45334 +- inode->i_pipe->writers++;
45335 ++ atomic_inc(&inode->i_pipe->writers);
45336 + }
45337 +
45338 + mutex_unlock(&inode->i_mutex);
45339 +@@ -837,9 +837,9 @@ pipe_rdwr_open(struct inode *inode, stru
45340 + if (inode->i_pipe) {
45341 + ret = 0;
45342 + if (filp->f_mode & FMODE_READ)
45343 +- inode->i_pipe->readers++;
45344 ++ atomic_inc(&inode->i_pipe->readers);
45345 + if (filp->f_mode & FMODE_WRITE)
45346 +- inode->i_pipe->writers++;
45347 ++ atomic_inc(&inode->i_pipe->writers);
45348 + }
45349 +
45350 + mutex_unlock(&inode->i_mutex);
45351 +@@ -931,7 +931,7 @@ void free_pipe_info(struct inode *inode)
45352 + inode->i_pipe = NULL;
45353 + }
45354 +
45355 +-static struct vfsmount *pipe_mnt __read_mostly;
45356 ++struct vfsmount *pipe_mnt __read_mostly;
45357 +
45358 + /*
45359 + * pipefs_dname() is called from d_path().
45360 +@@ -961,7 +961,8 @@ static struct inode * get_pipe_inode(voi
45361 + goto fail_iput;
45362 + inode->i_pipe = pipe;
45363 +
45364 +- pipe->readers = pipe->writers = 1;
45365 ++ atomic_set(&pipe->readers, 1);
45366 ++ atomic_set(&pipe->writers, 1);
45367 + inode->i_fop = &rdwr_pipefifo_fops;
45368 +
45369 + /*
45370 +diff -urNp linux-3.1.1/fs/proc/array.c linux-3.1.1/fs/proc/array.c
45371 +--- linux-3.1.1/fs/proc/array.c 2011-11-11 15:19:27.000000000 -0500
45372 ++++ linux-3.1.1/fs/proc/array.c 2011-11-17 18:42:02.000000000 -0500
45373 +@@ -60,6 +60,7 @@
45374 + #include <linux/tty.h>
45375 + #include <linux/string.h>
45376 + #include <linux/mman.h>
45377 ++#include <linux/grsecurity.h>
45378 + #include <linux/proc_fs.h>
45379 + #include <linux/ioport.h>
45380 + #include <linux/uaccess.h>
45381 +@@ -337,6 +338,21 @@ static void task_cpus_allowed(struct seq
45382 + seq_putc(m, '\n');
45383 + }
45384 +
45385 ++#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
45386 ++static inline void task_pax(struct seq_file *m, struct task_struct *p)
45387 ++{
45388 ++ if (p->mm)
45389 ++ seq_printf(m, "PaX:\t%c%c%c%c%c\n",
45390 ++ p->mm->pax_flags & MF_PAX_PAGEEXEC ? 'P' : 'p',
45391 ++ p->mm->pax_flags & MF_PAX_EMUTRAMP ? 'E' : 'e',
45392 ++ p->mm->pax_flags & MF_PAX_MPROTECT ? 'M' : 'm',
45393 ++ p->mm->pax_flags & MF_PAX_RANDMMAP ? 'R' : 'r',
45394 ++ p->mm->pax_flags & MF_PAX_SEGMEXEC ? 'S' : 's');
45395 ++ else
45396 ++ seq_printf(m, "PaX:\t-----\n");
45397 ++}
45398 ++#endif
45399 ++
45400 + int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
45401 + struct pid *pid, struct task_struct *task)
45402 + {
45403 +@@ -354,9 +370,24 @@ int proc_pid_status(struct seq_file *m,
45404 + task_cpus_allowed(m, task);
45405 + cpuset_task_status_allowed(m, task);
45406 + task_context_switch_counts(m, task);
45407 ++
45408 ++#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
45409 ++ task_pax(m, task);
45410 ++#endif
45411 ++
45412 ++#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
45413 ++ task_grsec_rbac(m, task);
45414 ++#endif
45415 ++
45416 + return 0;
45417 + }
45418 +
45419 ++#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
45420 ++#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
45421 ++ (_mm->pax_flags & MF_PAX_RANDMMAP || \
45422 ++ _mm->pax_flags & MF_PAX_SEGMEXEC))
45423 ++#endif
45424 ++
45425 + static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
45426 + struct pid *pid, struct task_struct *task, int whole)
45427 + {
45428 +@@ -378,6 +409,8 @@ static int do_task_stat(struct seq_file
45429 + char tcomm[sizeof(task->comm)];
45430 + unsigned long flags;
45431 +
45432 ++ pax_track_stack();
45433 ++
45434 + state = *get_task_state(task);
45435 + vsize = eip = esp = 0;
45436 + permitted = ptrace_may_access(task, PTRACE_MODE_READ);
45437 +@@ -449,6 +482,19 @@ static int do_task_stat(struct seq_file
45438 + gtime = task->gtime;
45439 + }
45440 +
45441 ++#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
45442 ++ if (PAX_RAND_FLAGS(mm)) {
45443 ++ eip = 0;
45444 ++ esp = 0;
45445 ++ wchan = 0;
45446 ++ }
45447 ++#endif
45448 ++#ifdef CONFIG_GRKERNSEC_HIDESYM
45449 ++ wchan = 0;
45450 ++ eip =0;
45451 ++ esp =0;
45452 ++#endif
45453 ++
45454 + /* scale priority and nice values from timeslices to -20..20 */
45455 + /* to make it look like a "normal" Unix priority/nice value */
45456 + priority = task_prio(task);
45457 +@@ -489,9 +535,15 @@ static int do_task_stat(struct seq_file
45458 + vsize,
45459 + mm ? get_mm_rss(mm) : 0,
45460 + rsslim,
45461 ++#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
45462 ++ PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->start_code : 1) : 0),
45463 ++ PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->end_code : 1) : 0),
45464 ++ PAX_RAND_FLAGS(mm) ? 0 : ((permitted && mm) ? mm->start_stack : 0),
45465 ++#else
45466 + mm ? (permitted ? mm->start_code : 1) : 0,
45467 + mm ? (permitted ? mm->end_code : 1) : 0,
45468 + (permitted && mm) ? mm->start_stack : 0,
45469 ++#endif
45470 + esp,
45471 + eip,
45472 + /* The signal information here is obsolete.
45473 +@@ -544,3 +596,18 @@ int proc_pid_statm(struct seq_file *m, s
45474 +
45475 + return 0;
45476 + }
45477 ++
45478 ++#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
45479 ++int proc_pid_ipaddr(struct task_struct *task, char *buffer)
45480 ++{
45481 ++ u32 curr_ip = 0;
45482 ++ unsigned long flags;
45483 ++
45484 ++ if (lock_task_sighand(task, &flags)) {
45485 ++ curr_ip = task->signal->curr_ip;
45486 ++ unlock_task_sighand(task, &flags);
45487 ++ }
45488 ++
45489 ++ return sprintf(buffer, "%pI4\n", &curr_ip);
45490 ++}
45491 ++#endif
45492 +diff -urNp linux-3.1.1/fs/proc/base.c linux-3.1.1/fs/proc/base.c
45493 +--- linux-3.1.1/fs/proc/base.c 2011-11-11 15:19:27.000000000 -0500
45494 ++++ linux-3.1.1/fs/proc/base.c 2011-11-17 18:43:19.000000000 -0500
45495 +@@ -107,6 +107,22 @@ struct pid_entry {
45496 + union proc_op op;
45497 + };
45498 +
45499 ++struct getdents_callback {
45500 ++ struct linux_dirent __user * current_dir;
45501 ++ struct linux_dirent __user * previous;
45502 ++ struct file * file;
45503 ++ int count;
45504 ++ int error;
45505 ++};
45506 ++
45507 ++static int gr_fake_filldir(void * __buf, const char *name, int namlen,
45508 ++ loff_t offset, u64 ino, unsigned int d_type)
45509 ++{
45510 ++ struct getdents_callback * buf = (struct getdents_callback *) __buf;
45511 ++ buf->error = -EINVAL;
45512 ++ return 0;
45513 ++}
45514 ++
45515 + #define NOD(NAME, MODE, IOP, FOP, OP) { \
45516 + .name = (NAME), \
45517 + .len = sizeof(NAME) - 1, \
45518 +@@ -209,6 +225,9 @@ static struct mm_struct *__check_mem_per
45519 + if (task == current)
45520 + return mm;
45521 +
45522 ++ if (gr_handle_proc_ptrace(task) || gr_acl_handle_procpidmem(task))
45523 ++ return ERR_PTR(-EPERM);
45524 ++
45525 + /*
45526 + * If current is actively ptrace'ing, and would also be
45527 + * permitted to freshly attach with ptrace now, permit it.
45528 +@@ -282,6 +301,9 @@ static int proc_pid_cmdline(struct task_
45529 + if (!mm->arg_end)
45530 + goto out_mm; /* Shh! No looking before we're done */
45531 +
45532 ++ if (gr_acl_handle_procpidmem(task))
45533 ++ goto out_mm;
45534 ++
45535 + len = mm->arg_end - mm->arg_start;
45536 +
45537 + if (len > PAGE_SIZE)
45538 +@@ -309,12 +331,28 @@ out:
45539 + return res;
45540 + }
45541 +
45542 ++#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
45543 ++#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
45544 ++ (_mm->pax_flags & MF_PAX_RANDMMAP || \
45545 ++ _mm->pax_flags & MF_PAX_SEGMEXEC))
45546 ++#endif
45547 ++
45548 + static int proc_pid_auxv(struct task_struct *task, char *buffer)
45549 + {
45550 + struct mm_struct *mm = mm_for_maps(task);
45551 + int res = PTR_ERR(mm);
45552 + if (mm && !IS_ERR(mm)) {
45553 + unsigned int nwords = 0;
45554 ++
45555 ++#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
45556 ++ /* allow if we're currently ptracing this task */
45557 ++ if (PAX_RAND_FLAGS(mm) &&
45558 ++ (!(task->ptrace & PT_PTRACED) || (task->parent != current))) {
45559 ++ mmput(mm);
45560 ++ return 0;
45561 ++ }
45562 ++#endif
45563 ++
45564 + do {
45565 + nwords += 2;
45566 + } while (mm->saved_auxv[nwords - 2] != 0); /* AT_NULL */
45567 +@@ -328,7 +366,7 @@ static int proc_pid_auxv(struct task_str
45568 + }
45569 +
45570 +
45571 +-#ifdef CONFIG_KALLSYMS
45572 ++#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
45573 + /*
45574 + * Provides a wchan file via kallsyms in a proper one-value-per-file format.
45575 + * Returns the resolved symbol. If that fails, simply return the address.
45576 +@@ -367,7 +405,7 @@ static void unlock_trace(struct task_str
45577 + mutex_unlock(&task->signal->cred_guard_mutex);
45578 + }
45579 +
45580 +-#ifdef CONFIG_STACKTRACE
45581 ++#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
45582 +
45583 + #define MAX_STACK_TRACE_DEPTH 64
45584 +
45585 +@@ -558,7 +596,7 @@ static int proc_pid_limits(struct task_s
45586 + return count;
45587 + }
45588 +
45589 +-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
45590 ++#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
45591 + static int proc_pid_syscall(struct task_struct *task, char *buffer)
45592 + {
45593 + long nr;
45594 +@@ -587,7 +625,7 @@ static int proc_pid_syscall(struct task_
45595 + /************************************************************************/
45596 +
45597 + /* permission checks */
45598 +-static int proc_fd_access_allowed(struct inode *inode)
45599 ++static int proc_fd_access_allowed(struct inode *inode, unsigned int log)
45600 + {
45601 + struct task_struct *task;
45602 + int allowed = 0;
45603 +@@ -597,7 +635,10 @@ static int proc_fd_access_allowed(struct
45604 + */
45605 + task = get_proc_task(inode);
45606 + if (task) {
45607 +- allowed = ptrace_may_access(task, PTRACE_MODE_READ);
45608 ++ if (log)
45609 ++ allowed = ptrace_may_access_log(task, PTRACE_MODE_READ);
45610 ++ else
45611 ++ allowed = ptrace_may_access(task, PTRACE_MODE_READ);
45612 + put_task_struct(task);
45613 + }
45614 + return allowed;
45615 +@@ -978,6 +1019,9 @@ static ssize_t environ_read(struct file
45616 + if (!task)
45617 + goto out_no_task;
45618 +
45619 ++ if (gr_acl_handle_procpidmem(task))
45620 ++ goto out;
45621 ++
45622 + ret = -ENOMEM;
45623 + page = (char *)__get_free_page(GFP_TEMPORARY);
45624 + if (!page)
45625 +@@ -1613,7 +1657,7 @@ static void *proc_pid_follow_link(struct
45626 + path_put(&nd->path);
45627 +
45628 + /* Are we allowed to snoop on the tasks file descriptors? */
45629 +- if (!proc_fd_access_allowed(inode))
45630 ++ if (!proc_fd_access_allowed(inode,0))
45631 + goto out;
45632 +
45633 + error = PROC_I(inode)->op.proc_get_link(inode, &nd->path);
45634 +@@ -1652,8 +1696,18 @@ static int proc_pid_readlink(struct dent
45635 + struct path path;
45636 +
45637 + /* Are we allowed to snoop on the tasks file descriptors? */
45638 +- if (!proc_fd_access_allowed(inode))
45639 +- goto out;
45640 ++ /* logging this is needed for learning on chromium to work properly,
45641 ++ but we don't want to flood the logs from 'ps' which does a readlink
45642 ++ on /proc/fd/2 of tasks in the listing, nor do we want 'ps' to learn
45643 ++ CAP_SYS_PTRACE as it's not necessary for its basic functionality
45644 ++ */
45645 ++ if (dentry->d_name.name[0] == '2' && dentry->d_name.name[1] == '\0') {
45646 ++ if (!proc_fd_access_allowed(inode,0))
45647 ++ goto out;
45648 ++ } else {
45649 ++ if (!proc_fd_access_allowed(inode,1))
45650 ++ goto out;
45651 ++ }
45652 +
45653 + error = PROC_I(inode)->op.proc_get_link(inode, &path);
45654 + if (error)
45655 +@@ -1718,7 +1772,11 @@ struct inode *proc_pid_make_inode(struct
45656 + rcu_read_lock();
45657 + cred = __task_cred(task);
45658 + inode->i_uid = cred->euid;
45659 ++#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
45660 ++ inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
45661 ++#else
45662 + inode->i_gid = cred->egid;
45663 ++#endif
45664 + rcu_read_unlock();
45665 + }
45666 + security_task_to_inode(task, inode);
45667 +@@ -1736,6 +1794,9 @@ int pid_getattr(struct vfsmount *mnt, st
45668 + struct inode *inode = dentry->d_inode;
45669 + struct task_struct *task;
45670 + const struct cred *cred;
45671 ++#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45672 ++ const struct cred *tmpcred = current_cred();
45673 ++#endif
45674 +
45675 + generic_fillattr(inode, stat);
45676 +
45677 +@@ -1743,13 +1804,41 @@ int pid_getattr(struct vfsmount *mnt, st
45678 + stat->uid = 0;
45679 + stat->gid = 0;
45680 + task = pid_task(proc_pid(inode), PIDTYPE_PID);
45681 ++
45682 ++ if (task && (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))) {
45683 ++ rcu_read_unlock();
45684 ++ return -ENOENT;
45685 ++ }
45686 ++
45687 + if (task) {
45688 ++ cred = __task_cred(task);
45689 ++#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45690 ++ if (!tmpcred->uid || (tmpcred->uid == cred->uid)
45691 ++#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
45692 ++ || in_group_p(CONFIG_GRKERNSEC_PROC_GID)
45693 ++#endif
45694 ++ ) {
45695 ++#endif
45696 + if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
45697 ++#ifdef CONFIG_GRKERNSEC_PROC_USER
45698 ++ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
45699 ++#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45700 ++ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
45701 ++#endif
45702 + task_dumpable(task)) {
45703 +- cred = __task_cred(task);
45704 + stat->uid = cred->euid;
45705 ++#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
45706 ++ stat->gid = CONFIG_GRKERNSEC_PROC_GID;
45707 ++#else
45708 + stat->gid = cred->egid;
45709 ++#endif
45710 + }
45711 ++#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45712 ++ } else {
45713 ++ rcu_read_unlock();
45714 ++ return -ENOENT;
45715 ++ }
45716 ++#endif
45717 + }
45718 + rcu_read_unlock();
45719 + return 0;
45720 +@@ -1786,11 +1875,20 @@ int pid_revalidate(struct dentry *dentry
45721 +
45722 + if (task) {
45723 + if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
45724 ++#ifdef CONFIG_GRKERNSEC_PROC_USER
45725 ++ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
45726 ++#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45727 ++ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
45728 ++#endif
45729 + task_dumpable(task)) {
45730 + rcu_read_lock();
45731 + cred = __task_cred(task);
45732 + inode->i_uid = cred->euid;
45733 ++#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
45734 ++ inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
45735 ++#else
45736 + inode->i_gid = cred->egid;
45737 ++#endif
45738 + rcu_read_unlock();
45739 + } else {
45740 + inode->i_uid = 0;
45741 +@@ -1908,7 +2006,8 @@ static int proc_fd_info(struct inode *in
45742 + int fd = proc_fd(inode);
45743 +
45744 + if (task) {
45745 +- files = get_files_struct(task);
45746 ++ if (!gr_acl_handle_procpidmem(task))
45747 ++ files = get_files_struct(task);
45748 + put_task_struct(task);
45749 + }
45750 + if (files) {
45751 +@@ -2176,11 +2275,21 @@ static const struct file_operations proc
45752 + */
45753 + static int proc_fd_permission(struct inode *inode, int mask)
45754 + {
45755 ++ struct task_struct *task;
45756 + int rv = generic_permission(inode, mask);
45757 +- if (rv == 0)
45758 +- return 0;
45759 ++
45760 + if (task_pid(current) == proc_pid(inode))
45761 + rv = 0;
45762 ++
45763 ++ task = get_proc_task(inode);
45764 ++ if (task == NULL)
45765 ++ return rv;
45766 ++
45767 ++ if (gr_acl_handle_procpidmem(task))
45768 ++ rv = -EACCES;
45769 ++
45770 ++ put_task_struct(task);
45771 ++
45772 + return rv;
45773 + }
45774 +
45775 +@@ -2290,6 +2399,9 @@ static struct dentry *proc_pident_lookup
45776 + if (!task)
45777 + goto out_no_task;
45778 +
45779 ++ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
45780 ++ goto out;
45781 ++
45782 + /*
45783 + * Yes, it does not scale. And it should not. Don't add
45784 + * new entries into /proc/<tgid>/ without very good reasons.
45785 +@@ -2334,6 +2446,9 @@ static int proc_pident_readdir(struct fi
45786 + if (!task)
45787 + goto out_no_task;
45788 +
45789 ++ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
45790 ++ goto out;
45791 ++
45792 + ret = 0;
45793 + i = filp->f_pos;
45794 + switch (i) {
45795 +@@ -2604,7 +2719,7 @@ static void *proc_self_follow_link(struc
45796 + static void proc_self_put_link(struct dentry *dentry, struct nameidata *nd,
45797 + void *cookie)
45798 + {
45799 +- char *s = nd_get_link(nd);
45800 ++ const char *s = nd_get_link(nd);
45801 + if (!IS_ERR(s))
45802 + __putname(s);
45803 + }
45804 +@@ -2802,7 +2917,7 @@ static const struct pid_entry tgid_base_
45805 + REG("autogroup", S_IRUGO|S_IWUSR, proc_pid_sched_autogroup_operations),
45806 + #endif
45807 + REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
45808 +-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
45809 ++#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
45810 + INF("syscall", S_IRUGO, proc_pid_syscall),
45811 + #endif
45812 + INF("cmdline", S_IRUGO, proc_pid_cmdline),
45813 +@@ -2827,10 +2942,10 @@ static const struct pid_entry tgid_base_
45814 + #ifdef CONFIG_SECURITY
45815 + DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
45816 + #endif
45817 +-#ifdef CONFIG_KALLSYMS
45818 ++#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
45819 + INF("wchan", S_IRUGO, proc_pid_wchan),
45820 + #endif
45821 +-#ifdef CONFIG_STACKTRACE
45822 ++#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
45823 + ONE("stack", S_IRUGO, proc_pid_stack),
45824 + #endif
45825 + #ifdef CONFIG_SCHEDSTATS
45826 +@@ -2864,6 +2979,9 @@ static const struct pid_entry tgid_base_
45827 + #ifdef CONFIG_HARDWALL
45828 + INF("hardwall", S_IRUGO, proc_pid_hardwall),
45829 + #endif
45830 ++#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
45831 ++ INF("ipaddr", S_IRUSR, proc_pid_ipaddr),
45832 ++#endif
45833 + };
45834 +
45835 + static int proc_tgid_base_readdir(struct file * filp,
45836 +@@ -2989,7 +3107,14 @@ static struct dentry *proc_pid_instantia
45837 + if (!inode)
45838 + goto out;
45839 +
45840 ++#ifdef CONFIG_GRKERNSEC_PROC_USER
45841 ++ inode->i_mode = S_IFDIR|S_IRUSR|S_IXUSR;
45842 ++#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45843 ++ inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
45844 ++ inode->i_mode = S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP;
45845 ++#else
45846 + inode->i_mode = S_IFDIR|S_IRUGO|S_IXUGO;
45847 ++#endif
45848 + inode->i_op = &proc_tgid_base_inode_operations;
45849 + inode->i_fop = &proc_tgid_base_operations;
45850 + inode->i_flags|=S_IMMUTABLE;
45851 +@@ -3031,7 +3156,14 @@ struct dentry *proc_pid_lookup(struct in
45852 + if (!task)
45853 + goto out;
45854 +
45855 ++ if (!has_group_leader_pid(task))
45856 ++ goto out_put_task;
45857 ++
45858 ++ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
45859 ++ goto out_put_task;
45860 ++
45861 + result = proc_pid_instantiate(dir, dentry, task, NULL);
45862 ++out_put_task:
45863 + put_task_struct(task);
45864 + out:
45865 + return result;
45866 +@@ -3096,6 +3228,11 @@ int proc_pid_readdir(struct file * filp,
45867 + {
45868 + unsigned int nr;
45869 + struct task_struct *reaper;
45870 ++#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45871 ++ const struct cred *tmpcred = current_cred();
45872 ++ const struct cred *itercred;
45873 ++#endif
45874 ++ filldir_t __filldir = filldir;
45875 + struct tgid_iter iter;
45876 + struct pid_namespace *ns;
45877 +
45878 +@@ -3119,8 +3256,27 @@ int proc_pid_readdir(struct file * filp,
45879 + for (iter = next_tgid(ns, iter);
45880 + iter.task;
45881 + iter.tgid += 1, iter = next_tgid(ns, iter)) {
45882 ++#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45883 ++ rcu_read_lock();
45884 ++ itercred = __task_cred(iter.task);
45885 ++#endif
45886 ++ if (gr_pid_is_chrooted(iter.task) || gr_check_hidden_task(iter.task)
45887 ++#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45888 ++ || (tmpcred->uid && (itercred->uid != tmpcred->uid)
45889 ++#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
45890 ++ && !in_group_p(CONFIG_GRKERNSEC_PROC_GID)
45891 ++#endif
45892 ++ )
45893 ++#endif
45894 ++ )
45895 ++ __filldir = &gr_fake_filldir;
45896 ++ else
45897 ++ __filldir = filldir;
45898 ++#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45899 ++ rcu_read_unlock();
45900 ++#endif
45901 + filp->f_pos = iter.tgid + TGID_OFFSET;
45902 +- if (proc_pid_fill_cache(filp, dirent, filldir, iter) < 0) {
45903 ++ if (proc_pid_fill_cache(filp, dirent, __filldir, iter) < 0) {
45904 + put_task_struct(iter.task);
45905 + goto out;
45906 + }
45907 +@@ -3148,7 +3304,7 @@ static const struct pid_entry tid_base_s
45908 + REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),
45909 + #endif
45910 + REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
45911 +-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
45912 ++#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
45913 + INF("syscall", S_IRUGO, proc_pid_syscall),
45914 + #endif
45915 + INF("cmdline", S_IRUGO, proc_pid_cmdline),
45916 +@@ -3172,10 +3328,10 @@ static const struct pid_entry tid_base_s
45917 + #ifdef CONFIG_SECURITY
45918 + DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
45919 + #endif
45920 +-#ifdef CONFIG_KALLSYMS
45921 ++#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
45922 + INF("wchan", S_IRUGO, proc_pid_wchan),
45923 + #endif
45924 +-#ifdef CONFIG_STACKTRACE
45925 ++#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
45926 + ONE("stack", S_IRUGO, proc_pid_stack),
45927 + #endif
45928 + #ifdef CONFIG_SCHEDSTATS
45929 +diff -urNp linux-3.1.1/fs/proc/cmdline.c linux-3.1.1/fs/proc/cmdline.c
45930 +--- linux-3.1.1/fs/proc/cmdline.c 2011-11-11 15:19:27.000000000 -0500
45931 ++++ linux-3.1.1/fs/proc/cmdline.c 2011-11-16 18:40:29.000000000 -0500
45932 +@@ -23,7 +23,11 @@ static const struct file_operations cmdl
45933 +
45934 + static int __init proc_cmdline_init(void)
45935 + {
45936 ++#ifdef CONFIG_GRKERNSEC_PROC_ADD
45937 ++ proc_create_grsec("cmdline", 0, NULL, &cmdline_proc_fops);
45938 ++#else
45939 + proc_create("cmdline", 0, NULL, &cmdline_proc_fops);
45940 ++#endif
45941 + return 0;
45942 + }
45943 + module_init(proc_cmdline_init);
45944 +diff -urNp linux-3.1.1/fs/proc/devices.c linux-3.1.1/fs/proc/devices.c
45945 +--- linux-3.1.1/fs/proc/devices.c 2011-11-11 15:19:27.000000000 -0500
45946 ++++ linux-3.1.1/fs/proc/devices.c 2011-11-16 18:40:29.000000000 -0500
45947 +@@ -64,7 +64,11 @@ static const struct file_operations proc
45948 +
45949 + static int __init proc_devices_init(void)
45950 + {
45951 ++#ifdef CONFIG_GRKERNSEC_PROC_ADD
45952 ++ proc_create_grsec("devices", 0, NULL, &proc_devinfo_operations);
45953 ++#else
45954 + proc_create("devices", 0, NULL, &proc_devinfo_operations);
45955 ++#endif
45956 + return 0;
45957 + }
45958 + module_init(proc_devices_init);
45959 +diff -urNp linux-3.1.1/fs/proc/inode.c linux-3.1.1/fs/proc/inode.c
45960 +--- linux-3.1.1/fs/proc/inode.c 2011-11-11 15:19:27.000000000 -0500
45961 ++++ linux-3.1.1/fs/proc/inode.c 2011-11-16 18:40:29.000000000 -0500
45962 +@@ -18,12 +18,18 @@
45963 + #include <linux/module.h>
45964 + #include <linux/sysctl.h>
45965 + #include <linux/slab.h>
45966 ++#include <linux/grsecurity.h>
45967 +
45968 + #include <asm/system.h>
45969 + #include <asm/uaccess.h>
45970 +
45971 + #include "internal.h"
45972 +
45973 ++#ifdef CONFIG_PROC_SYSCTL
45974 ++extern const struct inode_operations proc_sys_inode_operations;
45975 ++extern const struct inode_operations proc_sys_dir_operations;
45976 ++#endif
45977 ++
45978 + static void proc_evict_inode(struct inode *inode)
45979 + {
45980 + struct proc_dir_entry *de;
45981 +@@ -49,6 +55,13 @@ static void proc_evict_inode(struct inod
45982 + ns_ops = PROC_I(inode)->ns_ops;
45983 + if (ns_ops && ns_ops->put)
45984 + ns_ops->put(PROC_I(inode)->ns);
45985 ++
45986 ++#ifdef CONFIG_PROC_SYSCTL
45987 ++ if (inode->i_op == &proc_sys_inode_operations ||
45988 ++ inode->i_op == &proc_sys_dir_operations)
45989 ++ gr_handle_delete(inode->i_ino, inode->i_sb->s_dev);
45990 ++#endif
45991 ++
45992 + }
45993 +
45994 + static struct kmem_cache * proc_inode_cachep;
45995 +@@ -440,7 +453,11 @@ struct inode *proc_get_inode(struct supe
45996 + if (de->mode) {
45997 + inode->i_mode = de->mode;
45998 + inode->i_uid = de->uid;
45999 ++#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
46000 ++ inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
46001 ++#else
46002 + inode->i_gid = de->gid;
46003 ++#endif
46004 + }
46005 + if (de->size)
46006 + inode->i_size = de->size;
46007 +diff -urNp linux-3.1.1/fs/proc/internal.h linux-3.1.1/fs/proc/internal.h
46008 +--- linux-3.1.1/fs/proc/internal.h 2011-11-11 15:19:27.000000000 -0500
46009 ++++ linux-3.1.1/fs/proc/internal.h 2011-11-16 18:40:29.000000000 -0500
46010 +@@ -51,6 +51,9 @@ extern int proc_pid_status(struct seq_fi
46011 + struct pid *pid, struct task_struct *task);
46012 + extern int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
46013 + struct pid *pid, struct task_struct *task);
46014 ++#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
46015 ++extern int proc_pid_ipaddr(struct task_struct *task, char *buffer);
46016 ++#endif
46017 + extern loff_t mem_lseek(struct file *file, loff_t offset, int orig);
46018 +
46019 + extern const struct file_operations proc_maps_operations;
46020 +diff -urNp linux-3.1.1/fs/proc/Kconfig linux-3.1.1/fs/proc/Kconfig
46021 +--- linux-3.1.1/fs/proc/Kconfig 2011-11-11 15:19:27.000000000 -0500
46022 ++++ linux-3.1.1/fs/proc/Kconfig 2011-11-16 18:40:29.000000000 -0500
46023 +@@ -30,12 +30,12 @@ config PROC_FS
46024 +
46025 + config PROC_KCORE
46026 + bool "/proc/kcore support" if !ARM
46027 +- depends on PROC_FS && MMU
46028 ++ depends on PROC_FS && MMU && !GRKERNSEC_PROC_ADD
46029 +
46030 + config PROC_VMCORE
46031 + bool "/proc/vmcore support"
46032 +- depends on PROC_FS && CRASH_DUMP
46033 +- default y
46034 ++ depends on PROC_FS && CRASH_DUMP && !GRKERNSEC
46035 ++ default n
46036 + help
46037 + Exports the dump image of crashed kernel in ELF format.
46038 +
46039 +@@ -59,8 +59,8 @@ config PROC_SYSCTL
46040 + limited in memory.
46041 +
46042 + config PROC_PAGE_MONITOR
46043 +- default y
46044 +- depends on PROC_FS && MMU
46045 ++ default n
46046 ++ depends on PROC_FS && MMU && !GRKERNSEC
46047 + bool "Enable /proc page monitoring" if EXPERT
46048 + help
46049 + Various /proc files exist to monitor process memory utilization:
46050 +diff -urNp linux-3.1.1/fs/proc/kcore.c linux-3.1.1/fs/proc/kcore.c
46051 +--- linux-3.1.1/fs/proc/kcore.c 2011-11-11 15:19:27.000000000 -0500
46052 ++++ linux-3.1.1/fs/proc/kcore.c 2011-11-16 18:40:29.000000000 -0500
46053 +@@ -321,6 +321,8 @@ static void elf_kcore_store_hdr(char *bu
46054 + off_t offset = 0;
46055 + struct kcore_list *m;
46056 +
46057 ++ pax_track_stack();
46058 ++
46059 + /* setup ELF header */
46060 + elf = (struct elfhdr *) bufp;
46061 + bufp += sizeof(struct elfhdr);
46062 +@@ -478,9 +480,10 @@ read_kcore(struct file *file, char __use
46063 + * the addresses in the elf_phdr on our list.
46064 + */
46065 + start = kc_offset_to_vaddr(*fpos - elf_buflen);
46066 +- if ((tsz = (PAGE_SIZE - (start & ~PAGE_MASK))) > buflen)
46067 ++ tsz = PAGE_SIZE - (start & ~PAGE_MASK);
46068 ++ if (tsz > buflen)
46069 + tsz = buflen;
46070 +-
46071 ++
46072 + while (buflen) {
46073 + struct kcore_list *m;
46074 +
46075 +@@ -509,20 +512,23 @@ read_kcore(struct file *file, char __use
46076 + kfree(elf_buf);
46077 + } else {
46078 + if (kern_addr_valid(start)) {
46079 +- unsigned long n;
46080 ++ char *elf_buf;
46081 ++ mm_segment_t oldfs;
46082 +
46083 +- n = copy_to_user(buffer, (char *)start, tsz);
46084 +- /*
46085 +- * We cannot distingush between fault on source
46086 +- * and fault on destination. When this happens
46087 +- * we clear too and hope it will trigger the
46088 +- * EFAULT again.
46089 +- */
46090 +- if (n) {
46091 +- if (clear_user(buffer + tsz - n,
46092 +- n))
46093 ++ elf_buf = kmalloc(tsz, GFP_KERNEL);
46094 ++ if (!elf_buf)
46095 ++ return -ENOMEM;
46096 ++ oldfs = get_fs();
46097 ++ set_fs(KERNEL_DS);
46098 ++ if (!__copy_from_user(elf_buf, (const void __user *)start, tsz)) {
46099 ++ set_fs(oldfs);
46100 ++ if (copy_to_user(buffer, elf_buf, tsz)) {
46101 ++ kfree(elf_buf);
46102 + return -EFAULT;
46103 ++ }
46104 + }
46105 ++ set_fs(oldfs);
46106 ++ kfree(elf_buf);
46107 + } else {
46108 + if (clear_user(buffer, tsz))
46109 + return -EFAULT;
46110 +@@ -542,6 +548,9 @@ read_kcore(struct file *file, char __use
46111 +
46112 + static int open_kcore(struct inode *inode, struct file *filp)
46113 + {
46114 ++#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
46115 ++ return -EPERM;
46116 ++#endif
46117 + if (!capable(CAP_SYS_RAWIO))
46118 + return -EPERM;
46119 + if (kcore_need_update)
46120 +diff -urNp linux-3.1.1/fs/proc/meminfo.c linux-3.1.1/fs/proc/meminfo.c
46121 +--- linux-3.1.1/fs/proc/meminfo.c 2011-11-11 15:19:27.000000000 -0500
46122 ++++ linux-3.1.1/fs/proc/meminfo.c 2011-11-16 18:40:29.000000000 -0500
46123 +@@ -29,6 +29,8 @@ static int meminfo_proc_show(struct seq_
46124 + unsigned long pages[NR_LRU_LISTS];
46125 + int lru;
46126 +
46127 ++ pax_track_stack();
46128 ++
46129 + /*
46130 + * display in kilobytes.
46131 + */
46132 +@@ -157,7 +159,7 @@ static int meminfo_proc_show(struct seq_
46133 + vmi.used >> 10,
46134 + vmi.largest_chunk >> 10
46135 + #ifdef CONFIG_MEMORY_FAILURE
46136 +- ,atomic_long_read(&mce_bad_pages) << (PAGE_SHIFT - 10)
46137 ++ ,atomic_long_read_unchecked(&mce_bad_pages) << (PAGE_SHIFT - 10)
46138 + #endif
46139 + #ifdef CONFIG_TRANSPARENT_HUGEPAGE
46140 + ,K(global_page_state(NR_ANON_TRANSPARENT_HUGEPAGES) *
46141 +diff -urNp linux-3.1.1/fs/proc/nommu.c linux-3.1.1/fs/proc/nommu.c
46142 +--- linux-3.1.1/fs/proc/nommu.c 2011-11-11 15:19:27.000000000 -0500
46143 ++++ linux-3.1.1/fs/proc/nommu.c 2011-11-16 18:39:08.000000000 -0500
46144 +@@ -66,7 +66,7 @@ static int nommu_region_show(struct seq_
46145 + if (len < 1)
46146 + len = 1;
46147 + seq_printf(m, "%*c", len, ' ');
46148 +- seq_path(m, &file->f_path, "");
46149 ++ seq_path(m, &file->f_path, "\n\\");
46150 + }
46151 +
46152 + seq_putc(m, '\n');
46153 +diff -urNp linux-3.1.1/fs/proc/proc_net.c linux-3.1.1/fs/proc/proc_net.c
46154 +--- linux-3.1.1/fs/proc/proc_net.c 2011-11-11 15:19:27.000000000 -0500
46155 ++++ linux-3.1.1/fs/proc/proc_net.c 2011-11-16 18:40:29.000000000 -0500
46156 +@@ -105,6 +105,17 @@ static struct net *get_proc_task_net(str
46157 + struct task_struct *task;
46158 + struct nsproxy *ns;
46159 + struct net *net = NULL;
46160 ++#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
46161 ++ const struct cred *cred = current_cred();
46162 ++#endif
46163 ++
46164 ++#ifdef CONFIG_GRKERNSEC_PROC_USER
46165 ++ if (cred->fsuid)
46166 ++ return net;
46167 ++#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
46168 ++ if (cred->fsuid && !in_group_p(CONFIG_GRKERNSEC_PROC_GID))
46169 ++ return net;
46170 ++#endif
46171 +
46172 + rcu_read_lock();
46173 + task = pid_task(proc_pid(dir), PIDTYPE_PID);
46174 +diff -urNp linux-3.1.1/fs/proc/proc_sysctl.c linux-3.1.1/fs/proc/proc_sysctl.c
46175 +--- linux-3.1.1/fs/proc/proc_sysctl.c 2011-11-11 15:19:27.000000000 -0500
46176 ++++ linux-3.1.1/fs/proc/proc_sysctl.c 2011-11-18 18:45:33.000000000 -0500
46177 +@@ -8,11 +8,13 @@
46178 + #include <linux/namei.h>
46179 + #include "internal.h"
46180 +
46181 ++extern __u32 gr_handle_sysctl(const struct ctl_table *table, const int op);
46182 ++
46183 + static const struct dentry_operations proc_sys_dentry_operations;
46184 + static const struct file_operations proc_sys_file_operations;
46185 +-static const struct inode_operations proc_sys_inode_operations;
46186 ++const struct inode_operations proc_sys_inode_operations;
46187 + static const struct file_operations proc_sys_dir_file_operations;
46188 +-static const struct inode_operations proc_sys_dir_operations;
46189 ++const struct inode_operations proc_sys_dir_operations;
46190 +
46191 + static struct inode *proc_sys_make_inode(struct super_block *sb,
46192 + struct ctl_table_header *head, struct ctl_table *table)
46193 +@@ -121,8 +123,14 @@ static struct dentry *proc_sys_lookup(st
46194 +
46195 + err = NULL;
46196 + d_set_d_op(dentry, &proc_sys_dentry_operations);
46197 ++
46198 ++ gr_handle_proc_create(dentry, inode);
46199 ++
46200 + d_add(dentry, inode);
46201 +
46202 ++ if (gr_handle_sysctl(p, MAY_EXEC))
46203 ++ err = ERR_PTR(-ENOENT);
46204 ++
46205 + out:
46206 + sysctl_head_finish(head);
46207 + return err;
46208 +@@ -202,6 +210,9 @@ static int proc_sys_fill_cache(struct fi
46209 + return -ENOMEM;
46210 + } else {
46211 + d_set_d_op(child, &proc_sys_dentry_operations);
46212 ++
46213 ++ gr_handle_proc_create(child, inode);
46214 ++
46215 + d_add(child, inode);
46216 + }
46217 + } else {
46218 +@@ -230,6 +241,9 @@ static int scan(struct ctl_table_header
46219 + if (*pos < file->f_pos)
46220 + continue;
46221 +
46222 ++ if (gr_handle_sysctl(table, 0))
46223 ++ continue;
46224 ++
46225 + res = proc_sys_fill_cache(file, dirent, filldir, head, table);
46226 + if (res)
46227 + return res;
46228 +@@ -355,6 +369,9 @@ static int proc_sys_getattr(struct vfsmo
46229 + if (IS_ERR(head))
46230 + return PTR_ERR(head);
46231 +
46232 ++ if (table && gr_handle_sysctl(table, MAY_EXEC))
46233 ++ return -ENOENT;
46234 ++
46235 + generic_fillattr(inode, stat);
46236 + if (table)
46237 + stat->mode = (stat->mode & S_IFMT) | table->mode;
46238 +@@ -370,17 +387,18 @@ static const struct file_operations proc
46239 + };
46240 +
46241 + static const struct file_operations proc_sys_dir_file_operations = {
46242 ++ .read = generic_read_dir,
46243 + .readdir = proc_sys_readdir,
46244 + .llseek = generic_file_llseek,
46245 + };
46246 +
46247 +-static const struct inode_operations proc_sys_inode_operations = {
46248 ++const struct inode_operations proc_sys_inode_operations = {
46249 + .permission = proc_sys_permission,
46250 + .setattr = proc_sys_setattr,
46251 + .getattr = proc_sys_getattr,
46252 + };
46253 +
46254 +-static const struct inode_operations proc_sys_dir_operations = {
46255 ++const struct inode_operations proc_sys_dir_operations = {
46256 + .lookup = proc_sys_lookup,
46257 + .permission = proc_sys_permission,
46258 + .setattr = proc_sys_setattr,
46259 +diff -urNp linux-3.1.1/fs/proc/root.c linux-3.1.1/fs/proc/root.c
46260 +--- linux-3.1.1/fs/proc/root.c 2011-11-11 15:19:27.000000000 -0500
46261 ++++ linux-3.1.1/fs/proc/root.c 2011-11-16 18:40:29.000000000 -0500
46262 +@@ -123,7 +123,15 @@ void __init proc_root_init(void)
46263 + #ifdef CONFIG_PROC_DEVICETREE
46264 + proc_device_tree_init();
46265 + #endif
46266 ++#ifdef CONFIG_GRKERNSEC_PROC_ADD
46267 ++#ifdef CONFIG_GRKERNSEC_PROC_USER
46268 ++ proc_mkdir_mode("bus", S_IRUSR | S_IXUSR, NULL);
46269 ++#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
46270 ++ proc_mkdir_mode("bus", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
46271 ++#endif
46272 ++#else
46273 + proc_mkdir("bus", NULL);
46274 ++#endif
46275 + proc_sys_init();
46276 + }
46277 +
46278 +diff -urNp linux-3.1.1/fs/proc/task_mmu.c linux-3.1.1/fs/proc/task_mmu.c
46279 +--- linux-3.1.1/fs/proc/task_mmu.c 2011-11-11 15:19:27.000000000 -0500
46280 ++++ linux-3.1.1/fs/proc/task_mmu.c 2011-11-16 18:40:29.000000000 -0500
46281 +@@ -51,8 +51,13 @@ void task_mem(struct seq_file *m, struct
46282 + "VmExe:\t%8lu kB\n"
46283 + "VmLib:\t%8lu kB\n"
46284 + "VmPTE:\t%8lu kB\n"
46285 +- "VmSwap:\t%8lu kB\n",
46286 +- hiwater_vm << (PAGE_SHIFT-10),
46287 ++ "VmSwap:\t%8lu kB\n"
46288 ++
46289 ++#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
46290 ++ "CsBase:\t%8lx\nCsLim:\t%8lx\n"
46291 ++#endif
46292 ++
46293 ++ ,hiwater_vm << (PAGE_SHIFT-10),
46294 + (total_vm - mm->reserved_vm) << (PAGE_SHIFT-10),
46295 + mm->locked_vm << (PAGE_SHIFT-10),
46296 + hiwater_rss << (PAGE_SHIFT-10),
46297 +@@ -60,7 +65,13 @@ void task_mem(struct seq_file *m, struct
46298 + data << (PAGE_SHIFT-10),
46299 + mm->stack_vm << (PAGE_SHIFT-10), text, lib,
46300 + (PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10,
46301 +- swap << (PAGE_SHIFT-10));
46302 ++ swap << (PAGE_SHIFT-10)
46303 ++
46304 ++#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
46305 ++ , mm->context.user_cs_base, mm->context.user_cs_limit
46306 ++#endif
46307 ++
46308 ++ );
46309 + }
46310 +
46311 + unsigned long task_vsize(struct mm_struct *mm)
46312 +@@ -207,6 +218,12 @@ static int do_maps_open(struct inode *in
46313 + return ret;
46314 + }
46315 +
46316 ++#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
46317 ++#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
46318 ++ (_mm->pax_flags & MF_PAX_RANDMMAP || \
46319 ++ _mm->pax_flags & MF_PAX_SEGMEXEC))
46320 ++#endif
46321 ++
46322 + static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
46323 + {
46324 + struct mm_struct *mm = vma->vm_mm;
46325 +@@ -225,13 +242,13 @@ static void show_map_vma(struct seq_file
46326 + pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT;
46327 + }
46328 +
46329 +- /* We don't show the stack guard page in /proc/maps */
46330 ++#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
46331 ++ start = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_start;
46332 ++ end = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_end;
46333 ++#else
46334 + start = vma->vm_start;
46335 +- if (stack_guard_page_start(vma, start))
46336 +- start += PAGE_SIZE;
46337 + end = vma->vm_end;
46338 +- if (stack_guard_page_end(vma, end))
46339 +- end -= PAGE_SIZE;
46340 ++#endif
46341 +
46342 + seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu %n",
46343 + start,
46344 +@@ -240,7 +257,11 @@ static void show_map_vma(struct seq_file
46345 + flags & VM_WRITE ? 'w' : '-',
46346 + flags & VM_EXEC ? 'x' : '-',
46347 + flags & VM_MAYSHARE ? 's' : 'p',
46348 ++#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
46349 ++ PAX_RAND_FLAGS(mm) ? 0UL : pgoff,
46350 ++#else
46351 + pgoff,
46352 ++#endif
46353 + MAJOR(dev), MINOR(dev), ino, &len);
46354 +
46355 + /*
46356 +@@ -249,7 +270,7 @@ static void show_map_vma(struct seq_file
46357 + */
46358 + if (file) {
46359 + pad_len_spaces(m, len);
46360 +- seq_path(m, &file->f_path, "\n");
46361 ++ seq_path(m, &file->f_path, "\n\\");
46362 + } else {
46363 + const char *name = arch_vma_name(vma);
46364 + if (!name) {
46365 +@@ -257,8 +278,9 @@ static void show_map_vma(struct seq_file
46366 + if (vma->vm_start <= mm->brk &&
46367 + vma->vm_end >= mm->start_brk) {
46368 + name = "[heap]";
46369 +- } else if (vma->vm_start <= mm->start_stack &&
46370 +- vma->vm_end >= mm->start_stack) {
46371 ++ } else if ((vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP)) ||
46372 ++ (vma->vm_start <= mm->start_stack &&
46373 ++ vma->vm_end >= mm->start_stack)) {
46374 + name = "[stack]";
46375 + }
46376 + } else {
46377 +@@ -433,11 +455,16 @@ static int show_smap(struct seq_file *m,
46378 + };
46379 +
46380 + memset(&mss, 0, sizeof mss);
46381 +- mss.vma = vma;
46382 +- /* mmap_sem is held in m_start */
46383 +- if (vma->vm_mm && !is_vm_hugetlb_page(vma))
46384 +- walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
46385 +-
46386 ++#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
46387 ++ if (!PAX_RAND_FLAGS(vma->vm_mm)) {
46388 ++#endif
46389 ++ mss.vma = vma;
46390 ++ /* mmap_sem is held in m_start */
46391 ++ if (vma->vm_mm && !is_vm_hugetlb_page(vma))
46392 ++ walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
46393 ++#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
46394 ++ }
46395 ++#endif
46396 + show_map_vma(m, vma);
46397 +
46398 + seq_printf(m,
46399 +@@ -455,7 +482,11 @@ static int show_smap(struct seq_file *m,
46400 + "KernelPageSize: %8lu kB\n"
46401 + "MMUPageSize: %8lu kB\n"
46402 + "Locked: %8lu kB\n",
46403 ++#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
46404 ++ PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : (vma->vm_end - vma->vm_start) >> 10,
46405 ++#else
46406 + (vma->vm_end - vma->vm_start) >> 10,
46407 ++#endif
46408 + mss.resident >> 10,
46409 + (unsigned long)(mss.pss >> (10 + PSS_SHIFT)),
46410 + mss.shared_clean >> 10,
46411 +@@ -1031,7 +1062,7 @@ static int show_numa_map(struct seq_file
46412 +
46413 + if (file) {
46414 + seq_printf(m, " file=");
46415 +- seq_path(m, &file->f_path, "\n\t= ");
46416 ++ seq_path(m, &file->f_path, "\n\t\\= ");
46417 + } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
46418 + seq_printf(m, " heap");
46419 + } else if (vma->vm_start <= mm->start_stack &&
46420 +diff -urNp linux-3.1.1/fs/proc/task_nommu.c linux-3.1.1/fs/proc/task_nommu.c
46421 +--- linux-3.1.1/fs/proc/task_nommu.c 2011-11-11 15:19:27.000000000 -0500
46422 ++++ linux-3.1.1/fs/proc/task_nommu.c 2011-11-16 18:39:08.000000000 -0500
46423 +@@ -51,7 +51,7 @@ void task_mem(struct seq_file *m, struct
46424 + else
46425 + bytes += kobjsize(mm);
46426 +
46427 +- if (current->fs && current->fs->users > 1)
46428 ++ if (current->fs && atomic_read(&current->fs->users) > 1)
46429 + sbytes += kobjsize(current->fs);
46430 + else
46431 + bytes += kobjsize(current->fs);
46432 +@@ -166,7 +166,7 @@ static int nommu_vma_show(struct seq_fil
46433 +
46434 + if (file) {
46435 + pad_len_spaces(m, len);
46436 +- seq_path(m, &file->f_path, "");
46437 ++ seq_path(m, &file->f_path, "\n\\");
46438 + } else if (mm) {
46439 + if (vma->vm_start <= mm->start_stack &&
46440 + vma->vm_end >= mm->start_stack) {
46441 +diff -urNp linux-3.1.1/fs/quota/netlink.c linux-3.1.1/fs/quota/netlink.c
46442 +--- linux-3.1.1/fs/quota/netlink.c 2011-11-11 15:19:27.000000000 -0500
46443 ++++ linux-3.1.1/fs/quota/netlink.c 2011-11-16 18:39:08.000000000 -0500
46444 +@@ -33,7 +33,7 @@ static struct genl_family quota_genl_fam
46445 + void quota_send_warning(short type, unsigned int id, dev_t dev,
46446 + const char warntype)
46447 + {
46448 +- static atomic_t seq;
46449 ++ static atomic_unchecked_t seq;
46450 + struct sk_buff *skb;
46451 + void *msg_head;
46452 + int ret;
46453 +@@ -49,7 +49,7 @@ void quota_send_warning(short type, unsi
46454 + "VFS: Not enough memory to send quota warning.\n");
46455 + return;
46456 + }
46457 +- msg_head = genlmsg_put(skb, 0, atomic_add_return(1, &seq),
46458 ++ msg_head = genlmsg_put(skb, 0, atomic_add_return_unchecked(1, &seq),
46459 + &quota_genl_family, 0, QUOTA_NL_C_WARNING);
46460 + if (!msg_head) {
46461 + printk(KERN_ERR
46462 +diff -urNp linux-3.1.1/fs/readdir.c linux-3.1.1/fs/readdir.c
46463 +--- linux-3.1.1/fs/readdir.c 2011-11-11 15:19:27.000000000 -0500
46464 ++++ linux-3.1.1/fs/readdir.c 2011-11-16 18:40:29.000000000 -0500
46465 +@@ -17,6 +17,7 @@
46466 + #include <linux/security.h>
46467 + #include <linux/syscalls.h>
46468 + #include <linux/unistd.h>
46469 ++#include <linux/namei.h>
46470 +
46471 + #include <asm/uaccess.h>
46472 +
46473 +@@ -67,6 +68,7 @@ struct old_linux_dirent {
46474 +
46475 + struct readdir_callback {
46476 + struct old_linux_dirent __user * dirent;
46477 ++ struct file * file;
46478 + int result;
46479 + };
46480 +
46481 +@@ -84,6 +86,10 @@ static int fillonedir(void * __buf, cons
46482 + buf->result = -EOVERFLOW;
46483 + return -EOVERFLOW;
46484 + }
46485 ++
46486 ++ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
46487 ++ return 0;
46488 ++
46489 + buf->result++;
46490 + dirent = buf->dirent;
46491 + if (!access_ok(VERIFY_WRITE, dirent,
46492 +@@ -116,6 +122,7 @@ SYSCALL_DEFINE3(old_readdir, unsigned in
46493 +
46494 + buf.result = 0;
46495 + buf.dirent = dirent;
46496 ++ buf.file = file;
46497 +
46498 + error = vfs_readdir(file, fillonedir, &buf);
46499 + if (buf.result)
46500 +@@ -142,6 +149,7 @@ struct linux_dirent {
46501 + struct getdents_callback {
46502 + struct linux_dirent __user * current_dir;
46503 + struct linux_dirent __user * previous;
46504 ++ struct file * file;
46505 + int count;
46506 + int error;
46507 + };
46508 +@@ -163,6 +171,10 @@ static int filldir(void * __buf, const c
46509 + buf->error = -EOVERFLOW;
46510 + return -EOVERFLOW;
46511 + }
46512 ++
46513 ++ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
46514 ++ return 0;
46515 ++
46516 + dirent = buf->previous;
46517 + if (dirent) {
46518 + if (__put_user(offset, &dirent->d_off))
46519 +@@ -210,6 +222,7 @@ SYSCALL_DEFINE3(getdents, unsigned int,
46520 + buf.previous = NULL;
46521 + buf.count = count;
46522 + buf.error = 0;
46523 ++ buf.file = file;
46524 +
46525 + error = vfs_readdir(file, filldir, &buf);
46526 + if (error >= 0)
46527 +@@ -229,6 +242,7 @@ out:
46528 + struct getdents_callback64 {
46529 + struct linux_dirent64 __user * current_dir;
46530 + struct linux_dirent64 __user * previous;
46531 ++ struct file *file;
46532 + int count;
46533 + int error;
46534 + };
46535 +@@ -244,6 +258,10 @@ static int filldir64(void * __buf, const
46536 + buf->error = -EINVAL; /* only used if we fail.. */
46537 + if (reclen > buf->count)
46538 + return -EINVAL;
46539 ++
46540 ++ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
46541 ++ return 0;
46542 ++
46543 + dirent = buf->previous;
46544 + if (dirent) {
46545 + if (__put_user(offset, &dirent->d_off))
46546 +@@ -291,6 +309,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int
46547 +
46548 + buf.current_dir = dirent;
46549 + buf.previous = NULL;
46550 ++ buf.file = file;
46551 + buf.count = count;
46552 + buf.error = 0;
46553 +
46554 +@@ -299,7 +318,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int
46555 + error = buf.error;
46556 + lastdirent = buf.previous;
46557 + if (lastdirent) {
46558 +- typeof(lastdirent->d_off) d_off = file->f_pos;
46559 ++ typeof(((struct linux_dirent64 *)0)->d_off) d_off = file->f_pos;
46560 + if (__put_user(d_off, &lastdirent->d_off))
46561 + error = -EFAULT;
46562 + else
46563 +diff -urNp linux-3.1.1/fs/reiserfs/dir.c linux-3.1.1/fs/reiserfs/dir.c
46564 +--- linux-3.1.1/fs/reiserfs/dir.c 2011-11-11 15:19:27.000000000 -0500
46565 ++++ linux-3.1.1/fs/reiserfs/dir.c 2011-11-16 18:40:29.000000000 -0500
46566 +@@ -75,6 +75,8 @@ int reiserfs_readdir_dentry(struct dentr
46567 + struct reiserfs_dir_entry de;
46568 + int ret = 0;
46569 +
46570 ++ pax_track_stack();
46571 ++
46572 + reiserfs_write_lock(inode->i_sb);
46573 +
46574 + reiserfs_check_lock_depth(inode->i_sb, "readdir");
46575 +diff -urNp linux-3.1.1/fs/reiserfs/do_balan.c linux-3.1.1/fs/reiserfs/do_balan.c
46576 +--- linux-3.1.1/fs/reiserfs/do_balan.c 2011-11-11 15:19:27.000000000 -0500
46577 ++++ linux-3.1.1/fs/reiserfs/do_balan.c 2011-11-16 18:39:08.000000000 -0500
46578 +@@ -2051,7 +2051,7 @@ void do_balance(struct tree_balance *tb,
46579 + return;
46580 + }
46581 +
46582 +- atomic_inc(&(fs_generation(tb->tb_sb)));
46583 ++ atomic_inc_unchecked(&(fs_generation(tb->tb_sb)));
46584 + do_balance_starts(tb);
46585 +
46586 + /* balance leaf returns 0 except if combining L R and S into
46587 +diff -urNp linux-3.1.1/fs/reiserfs/journal.c linux-3.1.1/fs/reiserfs/journal.c
46588 +--- linux-3.1.1/fs/reiserfs/journal.c 2011-11-11 15:19:27.000000000 -0500
46589 ++++ linux-3.1.1/fs/reiserfs/journal.c 2011-11-16 18:40:29.000000000 -0500
46590 +@@ -2289,6 +2289,8 @@ static struct buffer_head *reiserfs_brea
46591 + struct buffer_head *bh;
46592 + int i, j;
46593 +
46594 ++ pax_track_stack();
46595 ++
46596 + bh = __getblk(dev, block, bufsize);
46597 + if (buffer_uptodate(bh))
46598 + return (bh);
46599 +diff -urNp linux-3.1.1/fs/reiserfs/namei.c linux-3.1.1/fs/reiserfs/namei.c
46600 +--- linux-3.1.1/fs/reiserfs/namei.c 2011-11-11 15:19:27.000000000 -0500
46601 ++++ linux-3.1.1/fs/reiserfs/namei.c 2011-11-16 18:40:29.000000000 -0500
46602 +@@ -1225,6 +1225,8 @@ static int reiserfs_rename(struct inode
46603 + unsigned long savelink = 1;
46604 + struct timespec ctime;
46605 +
46606 ++ pax_track_stack();
46607 ++
46608 + /* three balancings: (1) old name removal, (2) new name insertion
46609 + and (3) maybe "save" link insertion
46610 + stat data updates: (1) old directory,
46611 +diff -urNp linux-3.1.1/fs/reiserfs/procfs.c linux-3.1.1/fs/reiserfs/procfs.c
46612 +--- linux-3.1.1/fs/reiserfs/procfs.c 2011-11-11 15:19:27.000000000 -0500
46613 ++++ linux-3.1.1/fs/reiserfs/procfs.c 2011-11-16 18:40:29.000000000 -0500
46614 +@@ -113,7 +113,7 @@ static int show_super(struct seq_file *m
46615 + "SMALL_TAILS " : "NO_TAILS ",
46616 + replay_only(sb) ? "REPLAY_ONLY " : "",
46617 + convert_reiserfs(sb) ? "CONV " : "",
46618 +- atomic_read(&r->s_generation_counter),
46619 ++ atomic_read_unchecked(&r->s_generation_counter),
46620 + SF(s_disk_reads), SF(s_disk_writes), SF(s_fix_nodes),
46621 + SF(s_do_balance), SF(s_unneeded_left_neighbor),
46622 + SF(s_good_search_by_key_reada), SF(s_bmaps),
46623 +@@ -299,6 +299,8 @@ static int show_journal(struct seq_file
46624 + struct journal_params *jp = &rs->s_v1.s_journal;
46625 + char b[BDEVNAME_SIZE];
46626 +
46627 ++ pax_track_stack();
46628 ++
46629 + seq_printf(m, /* on-disk fields */
46630 + "jp_journal_1st_block: \t%i\n"
46631 + "jp_journal_dev: \t%s[%x]\n"
46632 +diff -urNp linux-3.1.1/fs/reiserfs/stree.c linux-3.1.1/fs/reiserfs/stree.c
46633 +--- linux-3.1.1/fs/reiserfs/stree.c 2011-11-11 15:19:27.000000000 -0500
46634 ++++ linux-3.1.1/fs/reiserfs/stree.c 2011-11-16 18:40:29.000000000 -0500
46635 +@@ -1196,6 +1196,8 @@ int reiserfs_delete_item(struct reiserfs
46636 + int iter = 0;
46637 + #endif
46638 +
46639 ++ pax_track_stack();
46640 ++
46641 + BUG_ON(!th->t_trans_id);
46642 +
46643 + init_tb_struct(th, &s_del_balance, sb, path,
46644 +@@ -1333,6 +1335,8 @@ void reiserfs_delete_solid_item(struct r
46645 + int retval;
46646 + int quota_cut_bytes = 0;
46647 +
46648 ++ pax_track_stack();
46649 ++
46650 + BUG_ON(!th->t_trans_id);
46651 +
46652 + le_key2cpu_key(&cpu_key, key);
46653 +@@ -1562,6 +1566,8 @@ int reiserfs_cut_from_item(struct reiser
46654 + int quota_cut_bytes;
46655 + loff_t tail_pos = 0;
46656 +
46657 ++ pax_track_stack();
46658 ++
46659 + BUG_ON(!th->t_trans_id);
46660 +
46661 + init_tb_struct(th, &s_cut_balance, inode->i_sb, path,
46662 +@@ -1957,6 +1963,8 @@ int reiserfs_paste_into_item(struct reis
46663 + int retval;
46664 + int fs_gen;
46665 +
46666 ++ pax_track_stack();
46667 ++
46668 + BUG_ON(!th->t_trans_id);
46669 +
46670 + fs_gen = get_generation(inode->i_sb);
46671 +@@ -2045,6 +2053,8 @@ int reiserfs_insert_item(struct reiserfs
46672 + int fs_gen = 0;
46673 + int quota_bytes = 0;
46674 +
46675 ++ pax_track_stack();
46676 ++
46677 + BUG_ON(!th->t_trans_id);
46678 +
46679 + if (inode) { /* Do we count quotas for item? */
46680 +diff -urNp linux-3.1.1/fs/reiserfs/super.c linux-3.1.1/fs/reiserfs/super.c
46681 +--- linux-3.1.1/fs/reiserfs/super.c 2011-11-11 15:19:27.000000000 -0500
46682 ++++ linux-3.1.1/fs/reiserfs/super.c 2011-11-16 18:40:29.000000000 -0500
46683 +@@ -927,6 +927,8 @@ static int reiserfs_parse_options(struct
46684 + {.option_name = NULL}
46685 + };
46686 +
46687 ++ pax_track_stack();
46688 ++
46689 + *blocks = 0;
46690 + if (!options || !*options)
46691 + /* use default configuration: create tails, journaling on, no
46692 +diff -urNp linux-3.1.1/fs/select.c linux-3.1.1/fs/select.c
46693 +--- linux-3.1.1/fs/select.c 2011-11-11 15:19:27.000000000 -0500
46694 ++++ linux-3.1.1/fs/select.c 2011-11-16 18:40:29.000000000 -0500
46695 +@@ -20,6 +20,7 @@
46696 + #include <linux/module.h>
46697 + #include <linux/slab.h>
46698 + #include <linux/poll.h>
46699 ++#include <linux/security.h>
46700 + #include <linux/personality.h> /* for STICKY_TIMEOUTS */
46701 + #include <linux/file.h>
46702 + #include <linux/fdtable.h>
46703 +@@ -403,6 +404,8 @@ int do_select(int n, fd_set_bits *fds, s
46704 + int retval, i, timed_out = 0;
46705 + unsigned long slack = 0;
46706 +
46707 ++ pax_track_stack();
46708 ++
46709 + rcu_read_lock();
46710 + retval = max_select_fd(n, fds);
46711 + rcu_read_unlock();
46712 +@@ -528,6 +531,8 @@ int core_sys_select(int n, fd_set __user
46713 + /* Allocate small arguments on the stack to save memory and be faster */
46714 + long stack_fds[SELECT_STACK_ALLOC/sizeof(long)];
46715 +
46716 ++ pax_track_stack();
46717 ++
46718 + ret = -EINVAL;
46719 + if (n < 0)
46720 + goto out_nofds;
46721 +@@ -837,6 +842,9 @@ int do_sys_poll(struct pollfd __user *uf
46722 + struct poll_list *walk = head;
46723 + unsigned long todo = nfds;
46724 +
46725 ++ pax_track_stack();
46726 ++
46727 ++ gr_learn_resource(current, RLIMIT_NOFILE, nfds, 1);
46728 + if (nfds > rlimit(RLIMIT_NOFILE))
46729 + return -EINVAL;
46730 +
46731 +diff -urNp linux-3.1.1/fs/seq_file.c linux-3.1.1/fs/seq_file.c
46732 +--- linux-3.1.1/fs/seq_file.c 2011-11-11 15:19:27.000000000 -0500
46733 ++++ linux-3.1.1/fs/seq_file.c 2011-11-16 18:39:08.000000000 -0500
46734 +@@ -76,7 +76,8 @@ static int traverse(struct seq_file *m,
46735 + return 0;
46736 + }
46737 + if (!m->buf) {
46738 +- m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
46739 ++ m->size = PAGE_SIZE;
46740 ++ m->buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
46741 + if (!m->buf)
46742 + return -ENOMEM;
46743 + }
46744 +@@ -116,7 +117,8 @@ static int traverse(struct seq_file *m,
46745 + Eoverflow:
46746 + m->op->stop(m, p);
46747 + kfree(m->buf);
46748 +- m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
46749 ++ m->size <<= 1;
46750 ++ m->buf = kmalloc(m->size, GFP_KERNEL);
46751 + return !m->buf ? -ENOMEM : -EAGAIN;
46752 + }
46753 +
46754 +@@ -169,7 +171,8 @@ ssize_t seq_read(struct file *file, char
46755 + m->version = file->f_version;
46756 + /* grab buffer if we didn't have one */
46757 + if (!m->buf) {
46758 +- m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
46759 ++ m->size = PAGE_SIZE;
46760 ++ m->buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
46761 + if (!m->buf)
46762 + goto Enomem;
46763 + }
46764 +@@ -210,7 +213,8 @@ ssize_t seq_read(struct file *file, char
46765 + goto Fill;
46766 + m->op->stop(m, p);
46767 + kfree(m->buf);
46768 +- m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
46769 ++ m->size <<= 1;
46770 ++ m->buf = kmalloc(m->size, GFP_KERNEL);
46771 + if (!m->buf)
46772 + goto Enomem;
46773 + m->count = 0;
46774 +@@ -549,7 +553,7 @@ static void single_stop(struct seq_file
46775 + int single_open(struct file *file, int (*show)(struct seq_file *, void *),
46776 + void *data)
46777 + {
46778 +- struct seq_operations *op = kmalloc(sizeof(*op), GFP_KERNEL);
46779 ++ seq_operations_no_const *op = kmalloc(sizeof(*op), GFP_KERNEL);
46780 + int res = -ENOMEM;
46781 +
46782 + if (op) {
46783 +diff -urNp linux-3.1.1/fs/splice.c linux-3.1.1/fs/splice.c
46784 +--- linux-3.1.1/fs/splice.c 2011-11-11 15:19:27.000000000 -0500
46785 ++++ linux-3.1.1/fs/splice.c 2011-11-16 18:40:29.000000000 -0500
46786 +@@ -194,7 +194,7 @@ ssize_t splice_to_pipe(struct pipe_inode
46787 + pipe_lock(pipe);
46788 +
46789 + for (;;) {
46790 +- if (!pipe->readers) {
46791 ++ if (!atomic_read(&pipe->readers)) {
46792 + send_sig(SIGPIPE, current, 0);
46793 + if (!ret)
46794 + ret = -EPIPE;
46795 +@@ -248,9 +248,9 @@ ssize_t splice_to_pipe(struct pipe_inode
46796 + do_wakeup = 0;
46797 + }
46798 +
46799 +- pipe->waiting_writers++;
46800 ++ atomic_inc(&pipe->waiting_writers);
46801 + pipe_wait(pipe);
46802 +- pipe->waiting_writers--;
46803 ++ atomic_dec(&pipe->waiting_writers);
46804 + }
46805 +
46806 + pipe_unlock(pipe);
46807 +@@ -320,6 +320,8 @@ __generic_file_splice_read(struct file *
46808 + .spd_release = spd_release_page,
46809 + };
46810 +
46811 ++ pax_track_stack();
46812 ++
46813 + if (splice_grow_spd(pipe, &spd))
46814 + return -ENOMEM;
46815 +
46816 +@@ -560,7 +562,7 @@ static ssize_t kernel_readv(struct file
46817 + old_fs = get_fs();
46818 + set_fs(get_ds());
46819 + /* The cast to a user pointer is valid due to the set_fs() */
46820 +- res = vfs_readv(file, (const struct iovec __user *)vec, vlen, &pos);
46821 ++ res = vfs_readv(file, (const struct iovec __force_user *)vec, vlen, &pos);
46822 + set_fs(old_fs);
46823 +
46824 + return res;
46825 +@@ -575,7 +577,7 @@ static ssize_t kernel_write(struct file
46826 + old_fs = get_fs();
46827 + set_fs(get_ds());
46828 + /* The cast to a user pointer is valid due to the set_fs() */
46829 +- res = vfs_write(file, (const char __user *)buf, count, &pos);
46830 ++ res = vfs_write(file, (const char __force_user *)buf, count, &pos);
46831 + set_fs(old_fs);
46832 +
46833 + return res;
46834 +@@ -603,6 +605,8 @@ ssize_t default_file_splice_read(struct
46835 + .spd_release = spd_release_page,
46836 + };
46837 +
46838 ++ pax_track_stack();
46839 ++
46840 + if (splice_grow_spd(pipe, &spd))
46841 + return -ENOMEM;
46842 +
46843 +@@ -626,7 +630,7 @@ ssize_t default_file_splice_read(struct
46844 + goto err;
46845 +
46846 + this_len = min_t(size_t, len, PAGE_CACHE_SIZE - offset);
46847 +- vec[i].iov_base = (void __user *) page_address(page);
46848 ++ vec[i].iov_base = (void __force_user *) page_address(page);
46849 + vec[i].iov_len = this_len;
46850 + spd.pages[i] = page;
46851 + spd.nr_pages++;
46852 +@@ -846,10 +850,10 @@ EXPORT_SYMBOL(splice_from_pipe_feed);
46853 + int splice_from_pipe_next(struct pipe_inode_info *pipe, struct splice_desc *sd)
46854 + {
46855 + while (!pipe->nrbufs) {
46856 +- if (!pipe->writers)
46857 ++ if (!atomic_read(&pipe->writers))
46858 + return 0;
46859 +
46860 +- if (!pipe->waiting_writers && sd->num_spliced)
46861 ++ if (!atomic_read(&pipe->waiting_writers) && sd->num_spliced)
46862 + return 0;
46863 +
46864 + if (sd->flags & SPLICE_F_NONBLOCK)
46865 +@@ -1182,7 +1186,7 @@ ssize_t splice_direct_to_actor(struct fi
46866 + * out of the pipe right after the splice_to_pipe(). So set
46867 + * PIPE_READERS appropriately.
46868 + */
46869 +- pipe->readers = 1;
46870 ++ atomic_set(&pipe->readers, 1);
46871 +
46872 + current->splice_pipe = pipe;
46873 + }
46874 +@@ -1619,6 +1623,8 @@ static long vmsplice_to_pipe(struct file
46875 + };
46876 + long ret;
46877 +
46878 ++ pax_track_stack();
46879 ++
46880 + pipe = get_pipe_info(file);
46881 + if (!pipe)
46882 + return -EBADF;
46883 +@@ -1734,9 +1740,9 @@ static int ipipe_prep(struct pipe_inode_
46884 + ret = -ERESTARTSYS;
46885 + break;
46886 + }
46887 +- if (!pipe->writers)
46888 ++ if (!atomic_read(&pipe->writers))
46889 + break;
46890 +- if (!pipe->waiting_writers) {
46891 ++ if (!atomic_read(&pipe->waiting_writers)) {
46892 + if (flags & SPLICE_F_NONBLOCK) {
46893 + ret = -EAGAIN;
46894 + break;
46895 +@@ -1768,7 +1774,7 @@ static int opipe_prep(struct pipe_inode_
46896 + pipe_lock(pipe);
46897 +
46898 + while (pipe->nrbufs >= pipe->buffers) {
46899 +- if (!pipe->readers) {
46900 ++ if (!atomic_read(&pipe->readers)) {
46901 + send_sig(SIGPIPE, current, 0);
46902 + ret = -EPIPE;
46903 + break;
46904 +@@ -1781,9 +1787,9 @@ static int opipe_prep(struct pipe_inode_
46905 + ret = -ERESTARTSYS;
46906 + break;
46907 + }
46908 +- pipe->waiting_writers++;
46909 ++ atomic_inc(&pipe->waiting_writers);
46910 + pipe_wait(pipe);
46911 +- pipe->waiting_writers--;
46912 ++ atomic_dec(&pipe->waiting_writers);
46913 + }
46914 +
46915 + pipe_unlock(pipe);
46916 +@@ -1819,14 +1825,14 @@ retry:
46917 + pipe_double_lock(ipipe, opipe);
46918 +
46919 + do {
46920 +- if (!opipe->readers) {
46921 ++ if (!atomic_read(&opipe->readers)) {
46922 + send_sig(SIGPIPE, current, 0);
46923 + if (!ret)
46924 + ret = -EPIPE;
46925 + break;
46926 + }
46927 +
46928 +- if (!ipipe->nrbufs && !ipipe->writers)
46929 ++ if (!ipipe->nrbufs && !atomic_read(&ipipe->writers))
46930 + break;
46931 +
46932 + /*
46933 +@@ -1923,7 +1929,7 @@ static int link_pipe(struct pipe_inode_i
46934 + pipe_double_lock(ipipe, opipe);
46935 +
46936 + do {
46937 +- if (!opipe->readers) {
46938 ++ if (!atomic_read(&opipe->readers)) {
46939 + send_sig(SIGPIPE, current, 0);
46940 + if (!ret)
46941 + ret = -EPIPE;
46942 +@@ -1968,7 +1974,7 @@ static int link_pipe(struct pipe_inode_i
46943 + * return EAGAIN if we have the potential of some data in the
46944 + * future, otherwise just return 0
46945 + */
46946 +- if (!ret && ipipe->waiting_writers && (flags & SPLICE_F_NONBLOCK))
46947 ++ if (!ret && atomic_read(&ipipe->waiting_writers) && (flags & SPLICE_F_NONBLOCK))
46948 + ret = -EAGAIN;
46949 +
46950 + pipe_unlock(ipipe);
46951 +diff -urNp linux-3.1.1/fs/sysfs/file.c linux-3.1.1/fs/sysfs/file.c
46952 +--- linux-3.1.1/fs/sysfs/file.c 2011-11-11 15:19:27.000000000 -0500
46953 ++++ linux-3.1.1/fs/sysfs/file.c 2011-11-16 18:39:08.000000000 -0500
46954 +@@ -37,7 +37,7 @@ static DEFINE_SPINLOCK(sysfs_open_dirent
46955 +
46956 + struct sysfs_open_dirent {
46957 + atomic_t refcnt;
46958 +- atomic_t event;
46959 ++ atomic_unchecked_t event;
46960 + wait_queue_head_t poll;
46961 + struct list_head buffers; /* goes through sysfs_buffer.list */
46962 + };
46963 +@@ -81,7 +81,7 @@ static int fill_read_buffer(struct dentr
46964 + if (!sysfs_get_active(attr_sd))
46965 + return -ENODEV;
46966 +
46967 +- buffer->event = atomic_read(&attr_sd->s_attr.open->event);
46968 ++ buffer->event = atomic_read_unchecked(&attr_sd->s_attr.open->event);
46969 + count = ops->show(kobj, attr_sd->s_attr.attr, buffer->page);
46970 +
46971 + sysfs_put_active(attr_sd);
46972 +@@ -287,7 +287,7 @@ static int sysfs_get_open_dirent(struct
46973 + return -ENOMEM;
46974 +
46975 + atomic_set(&new_od->refcnt, 0);
46976 +- atomic_set(&new_od->event, 1);
46977 ++ atomic_set_unchecked(&new_od->event, 1);
46978 + init_waitqueue_head(&new_od->poll);
46979 + INIT_LIST_HEAD(&new_od->buffers);
46980 + goto retry;
46981 +@@ -432,7 +432,7 @@ static unsigned int sysfs_poll(struct fi
46982 +
46983 + sysfs_put_active(attr_sd);
46984 +
46985 +- if (buffer->event != atomic_read(&od->event))
46986 ++ if (buffer->event != atomic_read_unchecked(&od->event))
46987 + goto trigger;
46988 +
46989 + return DEFAULT_POLLMASK;
46990 +@@ -451,7 +451,7 @@ void sysfs_notify_dirent(struct sysfs_di
46991 +
46992 + od = sd->s_attr.open;
46993 + if (od) {
46994 +- atomic_inc(&od->event);
46995 ++ atomic_inc_unchecked(&od->event);
46996 + wake_up_interruptible(&od->poll);
46997 + }
46998 +
46999 +diff -urNp linux-3.1.1/fs/sysfs/mount.c linux-3.1.1/fs/sysfs/mount.c
47000 +--- linux-3.1.1/fs/sysfs/mount.c 2011-11-11 15:19:27.000000000 -0500
47001 ++++ linux-3.1.1/fs/sysfs/mount.c 2011-11-16 18:40:29.000000000 -0500
47002 +@@ -36,7 +36,11 @@ struct sysfs_dirent sysfs_root = {
47003 + .s_name = "",
47004 + .s_count = ATOMIC_INIT(1),
47005 + .s_flags = SYSFS_DIR | (KOBJ_NS_TYPE_NONE << SYSFS_NS_TYPE_SHIFT),
47006 ++#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
47007 ++ .s_mode = S_IFDIR | S_IRWXU,
47008 ++#else
47009 + .s_mode = S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO,
47010 ++#endif
47011 + .s_ino = 1,
47012 + };
47013 +
47014 +diff -urNp linux-3.1.1/fs/sysfs/symlink.c linux-3.1.1/fs/sysfs/symlink.c
47015 +--- linux-3.1.1/fs/sysfs/symlink.c 2011-11-11 15:19:27.000000000 -0500
47016 ++++ linux-3.1.1/fs/sysfs/symlink.c 2011-11-16 18:39:08.000000000 -0500
47017 +@@ -286,7 +286,7 @@ static void *sysfs_follow_link(struct de
47018 +
47019 + static void sysfs_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
47020 + {
47021 +- char *page = nd_get_link(nd);
47022 ++ const char *page = nd_get_link(nd);
47023 + if (!IS_ERR(page))
47024 + free_page((unsigned long)page);
47025 + }
47026 +diff -urNp linux-3.1.1/fs/udf/inode.c linux-3.1.1/fs/udf/inode.c
47027 +--- linux-3.1.1/fs/udf/inode.c 2011-11-11 15:19:27.000000000 -0500
47028 ++++ linux-3.1.1/fs/udf/inode.c 2011-11-16 18:40:29.000000000 -0500
47029 +@@ -560,6 +560,8 @@ static struct buffer_head *inode_getblk(
47030 + int goal = 0, pgoal = iinfo->i_location.logicalBlockNum;
47031 + int lastblock = 0;
47032 +
47033 ++ pax_track_stack();
47034 ++
47035 + prev_epos.offset = udf_file_entry_alloc_offset(inode);
47036 + prev_epos.block = iinfo->i_location;
47037 + prev_epos.bh = NULL;
47038 +diff -urNp linux-3.1.1/fs/udf/misc.c linux-3.1.1/fs/udf/misc.c
47039 +--- linux-3.1.1/fs/udf/misc.c 2011-11-11 15:19:27.000000000 -0500
47040 ++++ linux-3.1.1/fs/udf/misc.c 2011-11-16 18:39:08.000000000 -0500
47041 +@@ -286,7 +286,7 @@ void udf_new_tag(char *data, uint16_t id
47042 +
47043 + u8 udf_tag_checksum(const struct tag *t)
47044 + {
47045 +- u8 *data = (u8 *)t;
47046 ++ const u8 *data = (const u8 *)t;
47047 + u8 checksum = 0;
47048 + int i;
47049 + for (i = 0; i < sizeof(struct tag); ++i)
47050 +diff -urNp linux-3.1.1/fs/utimes.c linux-3.1.1/fs/utimes.c
47051 +--- linux-3.1.1/fs/utimes.c 2011-11-11 15:19:27.000000000 -0500
47052 ++++ linux-3.1.1/fs/utimes.c 2011-11-16 18:40:29.000000000 -0500
47053 +@@ -1,6 +1,7 @@
47054 + #include <linux/compiler.h>
47055 + #include <linux/file.h>
47056 + #include <linux/fs.h>
47057 ++#include <linux/security.h>
47058 + #include <linux/linkage.h>
47059 + #include <linux/mount.h>
47060 + #include <linux/namei.h>
47061 +@@ -101,6 +102,12 @@ static int utimes_common(struct path *pa
47062 + goto mnt_drop_write_and_out;
47063 + }
47064 + }
47065 ++
47066 ++ if (!gr_acl_handle_utime(path->dentry, path->mnt)) {
47067 ++ error = -EACCES;
47068 ++ goto mnt_drop_write_and_out;
47069 ++ }
47070 ++
47071 + mutex_lock(&inode->i_mutex);
47072 + error = notify_change(path->dentry, &newattrs);
47073 + mutex_unlock(&inode->i_mutex);
47074 +diff -urNp linux-3.1.1/fs/xattr_acl.c linux-3.1.1/fs/xattr_acl.c
47075 +--- linux-3.1.1/fs/xattr_acl.c 2011-11-11 15:19:27.000000000 -0500
47076 ++++ linux-3.1.1/fs/xattr_acl.c 2011-11-16 18:39:08.000000000 -0500
47077 +@@ -17,8 +17,8 @@
47078 + struct posix_acl *
47079 + posix_acl_from_xattr(const void *value, size_t size)
47080 + {
47081 +- posix_acl_xattr_header *header = (posix_acl_xattr_header *)value;
47082 +- posix_acl_xattr_entry *entry = (posix_acl_xattr_entry *)(header+1), *end;
47083 ++ const posix_acl_xattr_header *header = (const posix_acl_xattr_header *)value;
47084 ++ const posix_acl_xattr_entry *entry = (const posix_acl_xattr_entry *)(header+1), *end;
47085 + int count;
47086 + struct posix_acl *acl;
47087 + struct posix_acl_entry *acl_e;
47088 +diff -urNp linux-3.1.1/fs/xattr.c linux-3.1.1/fs/xattr.c
47089 +--- linux-3.1.1/fs/xattr.c 2011-11-11 15:19:27.000000000 -0500
47090 ++++ linux-3.1.1/fs/xattr.c 2011-11-16 18:40:29.000000000 -0500
47091 +@@ -254,7 +254,7 @@ EXPORT_SYMBOL_GPL(vfs_removexattr);
47092 + * Extended attribute SET operations
47093 + */
47094 + static long
47095 +-setxattr(struct dentry *d, const char __user *name, const void __user *value,
47096 ++setxattr(struct path *path, const char __user *name, const void __user *value,
47097 + size_t size, int flags)
47098 + {
47099 + int error;
47100 +@@ -278,7 +278,13 @@ setxattr(struct dentry *d, const char __
47101 + return PTR_ERR(kvalue);
47102 + }
47103 +
47104 +- error = vfs_setxattr(d, kname, kvalue, size, flags);
47105 ++ if (!gr_acl_handle_setxattr(path->dentry, path->mnt)) {
47106 ++ error = -EACCES;
47107 ++ goto out;
47108 ++ }
47109 ++
47110 ++ error = vfs_setxattr(path->dentry, kname, kvalue, size, flags);
47111 ++out:
47112 + kfree(kvalue);
47113 + return error;
47114 + }
47115 +@@ -295,7 +301,7 @@ SYSCALL_DEFINE5(setxattr, const char __u
47116 + return error;
47117 + error = mnt_want_write(path.mnt);
47118 + if (!error) {
47119 +- error = setxattr(path.dentry, name, value, size, flags);
47120 ++ error = setxattr(&path, name, value, size, flags);
47121 + mnt_drop_write(path.mnt);
47122 + }
47123 + path_put(&path);
47124 +@@ -314,7 +320,7 @@ SYSCALL_DEFINE5(lsetxattr, const char __
47125 + return error;
47126 + error = mnt_want_write(path.mnt);
47127 + if (!error) {
47128 +- error = setxattr(path.dentry, name, value, size, flags);
47129 ++ error = setxattr(&path, name, value, size, flags);
47130 + mnt_drop_write(path.mnt);
47131 + }
47132 + path_put(&path);
47133 +@@ -325,17 +331,15 @@ SYSCALL_DEFINE5(fsetxattr, int, fd, cons
47134 + const void __user *,value, size_t, size, int, flags)
47135 + {
47136 + struct file *f;
47137 +- struct dentry *dentry;
47138 + int error = -EBADF;
47139 +
47140 + f = fget(fd);
47141 + if (!f)
47142 + return error;
47143 +- dentry = f->f_path.dentry;
47144 +- audit_inode(NULL, dentry);
47145 ++ audit_inode(NULL, f->f_path.dentry);
47146 + error = mnt_want_write_file(f);
47147 + if (!error) {
47148 +- error = setxattr(dentry, name, value, size, flags);
47149 ++ error = setxattr(&f->f_path, name, value, size, flags);
47150 + mnt_drop_write(f->f_path.mnt);
47151 + }
47152 + fput(f);
47153 +diff -urNp linux-3.1.1/fs/xfs/xfs_bmap.c linux-3.1.1/fs/xfs/xfs_bmap.c
47154 +--- linux-3.1.1/fs/xfs/xfs_bmap.c 2011-11-11 15:19:27.000000000 -0500
47155 ++++ linux-3.1.1/fs/xfs/xfs_bmap.c 2011-11-16 18:39:08.000000000 -0500
47156 +@@ -250,7 +250,7 @@ xfs_bmap_validate_ret(
47157 + int nmap,
47158 + int ret_nmap);
47159 + #else
47160 +-#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap)
47161 ++#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap) do {} while (0)
47162 + #endif /* DEBUG */
47163 +
47164 + STATIC int
47165 +diff -urNp linux-3.1.1/fs/xfs/xfs_dir2_sf.c linux-3.1.1/fs/xfs/xfs_dir2_sf.c
47166 +--- linux-3.1.1/fs/xfs/xfs_dir2_sf.c 2011-11-11 15:19:27.000000000 -0500
47167 ++++ linux-3.1.1/fs/xfs/xfs_dir2_sf.c 2011-11-16 18:39:08.000000000 -0500
47168 +@@ -852,7 +852,15 @@ xfs_dir2_sf_getdents(
47169 + }
47170 +
47171 + ino = xfs_dir2_sfe_get_ino(sfp, sfep);
47172 +- if (filldir(dirent, (char *)sfep->name, sfep->namelen,
47173 ++ if (dp->i_df.if_u1.if_data == dp->i_df.if_u2.if_inline_data) {
47174 ++ char name[sfep->namelen];
47175 ++ memcpy(name, sfep->name, sfep->namelen);
47176 ++ if (filldir(dirent, name, sfep->namelen,
47177 ++ off & 0x7fffffff, ino, DT_UNKNOWN)) {
47178 ++ *offset = off & 0x7fffffff;
47179 ++ return 0;
47180 ++ }
47181 ++ } else if (filldir(dirent, (char *)sfep->name, sfep->namelen,
47182 + off & 0x7fffffff, ino, DT_UNKNOWN)) {
47183 + *offset = off & 0x7fffffff;
47184 + return 0;
47185 +diff -urNp linux-3.1.1/fs/xfs/xfs_ioctl.c linux-3.1.1/fs/xfs/xfs_ioctl.c
47186 +--- linux-3.1.1/fs/xfs/xfs_ioctl.c 2011-11-11 15:19:27.000000000 -0500
47187 ++++ linux-3.1.1/fs/xfs/xfs_ioctl.c 2011-11-16 18:39:08.000000000 -0500
47188 +@@ -128,7 +128,7 @@ xfs_find_handle(
47189 + }
47190 +
47191 + error = -EFAULT;
47192 +- if (copy_to_user(hreq->ohandle, &handle, hsize) ||
47193 ++ if (hsize > sizeof handle || copy_to_user(hreq->ohandle, &handle, hsize) ||
47194 + copy_to_user(hreq->ohandlen, &hsize, sizeof(__s32)))
47195 + goto out_put;
47196 +
47197 +diff -urNp linux-3.1.1/fs/xfs/xfs_iops.c linux-3.1.1/fs/xfs/xfs_iops.c
47198 +--- linux-3.1.1/fs/xfs/xfs_iops.c 2011-11-11 15:19:27.000000000 -0500
47199 ++++ linux-3.1.1/fs/xfs/xfs_iops.c 2011-11-16 18:39:08.000000000 -0500
47200 +@@ -446,7 +446,7 @@ xfs_vn_put_link(
47201 + struct nameidata *nd,
47202 + void *p)
47203 + {
47204 +- char *s = nd_get_link(nd);
47205 ++ const char *s = nd_get_link(nd);
47206 +
47207 + if (!IS_ERR(s))
47208 + kfree(s);
47209 +diff -urNp linux-3.1.1/fs/xfs/xfs_vnodeops.c linux-3.1.1/fs/xfs/xfs_vnodeops.c
47210 +--- linux-3.1.1/fs/xfs/xfs_vnodeops.c 2011-11-11 15:19:27.000000000 -0500
47211 ++++ linux-3.1.1/fs/xfs/xfs_vnodeops.c 2011-11-18 18:54:56.000000000 -0500
47212 +@@ -123,13 +123,17 @@ xfs_readlink(
47213 +
47214 + xfs_ilock(ip, XFS_ILOCK_SHARED);
47215 +
47216 +- ASSERT(S_ISLNK(ip->i_d.di_mode));
47217 +- ASSERT(ip->i_d.di_size <= MAXPATHLEN);
47218 +-
47219 + pathlen = ip->i_d.di_size;
47220 + if (!pathlen)
47221 + goto out;
47222 +
47223 ++ if (pathlen > MAXPATHLEN) {
47224 ++ xfs_alert(mp, "%s: inode (%llu) symlink length (%d) too long",
47225 ++ __func__, (unsigned long long)ip->i_ino, pathlen);
47226 ++ ASSERT(0);
47227 ++ return XFS_ERROR(EFSCORRUPTED);
47228 ++ }
47229 ++
47230 + if (ip->i_df.if_flags & XFS_IFINLINE) {
47231 + memcpy(link, ip->i_df.if_u1.if_data, pathlen);
47232 + link[pathlen] = '\0';
47233 +diff -urNp linux-3.1.1/grsecurity/gracl_alloc.c linux-3.1.1/grsecurity/gracl_alloc.c
47234 +--- linux-3.1.1/grsecurity/gracl_alloc.c 1969-12-31 19:00:00.000000000 -0500
47235 ++++ linux-3.1.1/grsecurity/gracl_alloc.c 2011-11-16 18:40:31.000000000 -0500
47236 +@@ -0,0 +1,105 @@
47237 ++#include <linux/kernel.h>
47238 ++#include <linux/mm.h>
47239 ++#include <linux/slab.h>
47240 ++#include <linux/vmalloc.h>
47241 ++#include <linux/gracl.h>
47242 ++#include <linux/grsecurity.h>
47243 ++
47244 ++static unsigned long alloc_stack_next = 1;
47245 ++static unsigned long alloc_stack_size = 1;
47246 ++static void **alloc_stack;
47247 ++
47248 ++static __inline__ int
47249 ++alloc_pop(void)
47250 ++{
47251 ++ if (alloc_stack_next == 1)
47252 ++ return 0;
47253 ++
47254 ++ kfree(alloc_stack[alloc_stack_next - 2]);
47255 ++
47256 ++ alloc_stack_next--;
47257 ++
47258 ++ return 1;
47259 ++}
47260 ++
47261 ++static __inline__ int
47262 ++alloc_push(void *buf)
47263 ++{
47264 ++ if (alloc_stack_next >= alloc_stack_size)
47265 ++ return 1;
47266 ++
47267 ++ alloc_stack[alloc_stack_next - 1] = buf;
47268 ++
47269 ++ alloc_stack_next++;
47270 ++
47271 ++ return 0;
47272 ++}
47273 ++
47274 ++void *
47275 ++acl_alloc(unsigned long len)
47276 ++{
47277 ++ void *ret = NULL;
47278 ++
47279 ++ if (!len || len > PAGE_SIZE)
47280 ++ goto out;
47281 ++
47282 ++ ret = kmalloc(len, GFP_KERNEL);
47283 ++
47284 ++ if (ret) {
47285 ++ if (alloc_push(ret)) {
47286 ++ kfree(ret);
47287 ++ ret = NULL;
47288 ++ }
47289 ++ }
47290 ++
47291 ++out:
47292 ++ return ret;
47293 ++}
47294 ++
47295 ++void *
47296 ++acl_alloc_num(unsigned long num, unsigned long len)
47297 ++{
47298 ++ if (!len || (num > (PAGE_SIZE / len)))
47299 ++ return NULL;
47300 ++
47301 ++ return acl_alloc(num * len);
47302 ++}
47303 ++
47304 ++void
47305 ++acl_free_all(void)
47306 ++{
47307 ++ if (gr_acl_is_enabled() || !alloc_stack)
47308 ++ return;
47309 ++
47310 ++ while (alloc_pop()) ;
47311 ++
47312 ++ if (alloc_stack) {
47313 ++ if ((alloc_stack_size * sizeof (void *)) <= PAGE_SIZE)
47314 ++ kfree(alloc_stack);
47315 ++ else
47316 ++ vfree(alloc_stack);
47317 ++ }
47318 ++
47319 ++ alloc_stack = NULL;
47320 ++ alloc_stack_size = 1;
47321 ++ alloc_stack_next = 1;
47322 ++
47323 ++ return;
47324 ++}
47325 ++
47326 ++int
47327 ++acl_alloc_stack_init(unsigned long size)
47328 ++{
47329 ++ if ((size * sizeof (void *)) <= PAGE_SIZE)
47330 ++ alloc_stack =
47331 ++ (void **) kmalloc(size * sizeof (void *), GFP_KERNEL);
47332 ++ else
47333 ++ alloc_stack = (void **) vmalloc(size * sizeof (void *));
47334 ++
47335 ++ alloc_stack_size = size;
47336 ++
47337 ++ if (!alloc_stack)
47338 ++ return 0;
47339 ++ else
47340 ++ return 1;
47341 ++}
47342 +diff -urNp linux-3.1.1/grsecurity/gracl.c linux-3.1.1/grsecurity/gracl.c
47343 +--- linux-3.1.1/grsecurity/gracl.c 1969-12-31 19:00:00.000000000 -0500
47344 ++++ linux-3.1.1/grsecurity/gracl.c 2011-11-16 19:31:00.000000000 -0500
47345 +@@ -0,0 +1,4156 @@
47346 ++#include <linux/kernel.h>
47347 ++#include <linux/module.h>
47348 ++#include <linux/sched.h>
47349 ++#include <linux/mm.h>
47350 ++#include <linux/file.h>
47351 ++#include <linux/fs.h>
47352 ++#include <linux/namei.h>
47353 ++#include <linux/mount.h>
47354 ++#include <linux/tty.h>
47355 ++#include <linux/proc_fs.h>
47356 ++#include <linux/lglock.h>
47357 ++#include <linux/slab.h>
47358 ++#include <linux/vmalloc.h>
47359 ++#include <linux/types.h>
47360 ++#include <linux/sysctl.h>
47361 ++#include <linux/netdevice.h>
47362 ++#include <linux/ptrace.h>
47363 ++#include <linux/gracl.h>
47364 ++#include <linux/gralloc.h>
47365 ++#include <linux/grsecurity.h>
47366 ++#include <linux/grinternal.h>
47367 ++#include <linux/pid_namespace.h>
47368 ++#include <linux/fdtable.h>
47369 ++#include <linux/percpu.h>
47370 ++
47371 ++#include <asm/uaccess.h>
47372 ++#include <asm/errno.h>
47373 ++#include <asm/mman.h>
47374 ++
47375 ++static struct acl_role_db acl_role_set;
47376 ++static struct name_db name_set;
47377 ++static struct inodev_db inodev_set;
47378 ++
47379 ++/* for keeping track of userspace pointers used for subjects, so we
47380 ++ can share references in the kernel as well
47381 ++*/
47382 ++
47383 ++static struct path real_root;
47384 ++
47385 ++static struct acl_subj_map_db subj_map_set;
47386 ++
47387 ++static struct acl_role_label *default_role;
47388 ++
47389 ++static struct acl_role_label *role_list;
47390 ++
47391 ++static u16 acl_sp_role_value;
47392 ++
47393 ++extern char *gr_shared_page[4];
47394 ++static DEFINE_MUTEX(gr_dev_mutex);
47395 ++DEFINE_RWLOCK(gr_inode_lock);
47396 ++
47397 ++struct gr_arg *gr_usermode;
47398 ++
47399 ++static unsigned int gr_status __read_only = GR_STATUS_INIT;
47400 ++
47401 ++extern int chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum);
47402 ++extern void gr_clear_learn_entries(void);
47403 ++
47404 ++#ifdef CONFIG_GRKERNSEC_RESLOG
47405 ++extern void gr_log_resource(const struct task_struct *task,
47406 ++ const int res, const unsigned long wanted, const int gt);
47407 ++#endif
47408 ++
47409 ++unsigned char *gr_system_salt;
47410 ++unsigned char *gr_system_sum;
47411 ++
47412 ++static struct sprole_pw **acl_special_roles = NULL;
47413 ++static __u16 num_sprole_pws = 0;
47414 ++
47415 ++static struct acl_role_label *kernel_role = NULL;
47416 ++
47417 ++static unsigned int gr_auth_attempts = 0;
47418 ++static unsigned long gr_auth_expires = 0UL;
47419 ++
47420 ++#ifdef CONFIG_NET
47421 ++extern struct vfsmount *sock_mnt;
47422 ++#endif
47423 ++
47424 ++extern struct vfsmount *pipe_mnt;
47425 ++extern struct vfsmount *shm_mnt;
47426 ++#ifdef CONFIG_HUGETLBFS
47427 ++extern struct vfsmount *hugetlbfs_vfsmount;
47428 ++#endif
47429 ++
47430 ++static struct acl_object_label *fakefs_obj_rw;
47431 ++static struct acl_object_label *fakefs_obj_rwx;
47432 ++
47433 ++extern int gr_init_uidset(void);
47434 ++extern void gr_free_uidset(void);
47435 ++extern void gr_remove_uid(uid_t uid);
47436 ++extern int gr_find_uid(uid_t uid);
47437 ++
47438 ++DECLARE_BRLOCK(vfsmount_lock);
47439 ++
47440 ++__inline__ int
47441 ++gr_acl_is_enabled(void)
47442 ++{
47443 ++ return (gr_status & GR_READY);
47444 ++}
47445 ++
47446 ++#ifdef CONFIG_BTRFS_FS
47447 ++extern dev_t get_btrfs_dev_from_inode(struct inode *inode);
47448 ++extern int btrfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat);
47449 ++#endif
47450 ++
47451 ++static inline dev_t __get_dev(const struct dentry *dentry)
47452 ++{
47453 ++#ifdef CONFIG_BTRFS_FS
47454 ++ if (dentry->d_inode->i_op && dentry->d_inode->i_op->getattr == &btrfs_getattr)
47455 ++ return get_btrfs_dev_from_inode(dentry->d_inode);
47456 ++ else
47457 ++#endif
47458 ++ return dentry->d_inode->i_sb->s_dev;
47459 ++}
47460 ++
47461 ++dev_t gr_get_dev_from_dentry(struct dentry *dentry)
47462 ++{
47463 ++ return __get_dev(dentry);
47464 ++}
47465 ++
47466 ++static char gr_task_roletype_to_char(struct task_struct *task)
47467 ++{
47468 ++ switch (task->role->roletype &
47469 ++ (GR_ROLE_DEFAULT | GR_ROLE_USER | GR_ROLE_GROUP |
47470 ++ GR_ROLE_SPECIAL)) {
47471 ++ case GR_ROLE_DEFAULT:
47472 ++ return 'D';
47473 ++ case GR_ROLE_USER:
47474 ++ return 'U';
47475 ++ case GR_ROLE_GROUP:
47476 ++ return 'G';
47477 ++ case GR_ROLE_SPECIAL:
47478 ++ return 'S';
47479 ++ }
47480 ++
47481 ++ return 'X';
47482 ++}
47483 ++
47484 ++char gr_roletype_to_char(void)
47485 ++{
47486 ++ return gr_task_roletype_to_char(current);
47487 ++}
47488 ++
47489 ++__inline__ int
47490 ++gr_acl_tpe_check(void)
47491 ++{
47492 ++ if (unlikely(!(gr_status & GR_READY)))
47493 ++ return 0;
47494 ++ if (current->role->roletype & GR_ROLE_TPE)
47495 ++ return 1;
47496 ++ else
47497 ++ return 0;
47498 ++}
47499 ++
47500 ++int
47501 ++gr_handle_rawio(const struct inode *inode)
47502 ++{
47503 ++#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
47504 ++ if (inode && S_ISBLK(inode->i_mode) &&
47505 ++ grsec_enable_chroot_caps && proc_is_chrooted(current) &&
47506 ++ !capable(CAP_SYS_RAWIO))
47507 ++ return 1;
47508 ++#endif
47509 ++ return 0;
47510 ++}
47511 ++
47512 ++static int
47513 ++gr_streq(const char *a, const char *b, const unsigned int lena, const unsigned int lenb)
47514 ++{
47515 ++ if (likely(lena != lenb))
47516 ++ return 0;
47517 ++
47518 ++ return !memcmp(a, b, lena);
47519 ++}
47520 ++
47521 ++static int prepend(char **buffer, int *buflen, const char *str, int namelen)
47522 ++{
47523 ++ *buflen -= namelen;
47524 ++ if (*buflen < 0)
47525 ++ return -ENAMETOOLONG;
47526 ++ *buffer -= namelen;
47527 ++ memcpy(*buffer, str, namelen);
47528 ++ return 0;
47529 ++}
47530 ++
47531 ++static int prepend_name(char **buffer, int *buflen, struct qstr *name)
47532 ++{
47533 ++ return prepend(buffer, buflen, name->name, name->len);
47534 ++}
47535 ++
47536 ++static int prepend_path(const struct path *path, struct path *root,
47537 ++ char **buffer, int *buflen)
47538 ++{
47539 ++ struct dentry *dentry = path->dentry;
47540 ++ struct vfsmount *vfsmnt = path->mnt;
47541 ++ bool slash = false;
47542 ++ int error = 0;
47543 ++
47544 ++ while (dentry != root->dentry || vfsmnt != root->mnt) {
47545 ++ struct dentry * parent;
47546 ++
47547 ++ if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) {
47548 ++ /* Global root? */
47549 ++ if (vfsmnt->mnt_parent == vfsmnt) {
47550 ++ goto out;
47551 ++ }
47552 ++ dentry = vfsmnt->mnt_mountpoint;
47553 ++ vfsmnt = vfsmnt->mnt_parent;
47554 ++ continue;
47555 ++ }
47556 ++ parent = dentry->d_parent;
47557 ++ prefetch(parent);
47558 ++ spin_lock(&dentry->d_lock);
47559 ++ error = prepend_name(buffer, buflen, &dentry->d_name);
47560 ++ spin_unlock(&dentry->d_lock);
47561 ++ if (!error)
47562 ++ error = prepend(buffer, buflen, "/", 1);
47563 ++ if (error)
47564 ++ break;
47565 ++
47566 ++ slash = true;
47567 ++ dentry = parent;
47568 ++ }
47569 ++
47570 ++out:
47571 ++ if (!error && !slash)
47572 ++ error = prepend(buffer, buflen, "/", 1);
47573 ++
47574 ++ return error;
47575 ++}
47576 ++
47577 ++/* this must be called with vfsmount_lock and rename_lock held */
47578 ++
47579 ++static char *__our_d_path(const struct path *path, struct path *root,
47580 ++ char *buf, int buflen)
47581 ++{
47582 ++ char *res = buf + buflen;
47583 ++ int error;
47584 ++
47585 ++ prepend(&res, &buflen, "\0", 1);
47586 ++ error = prepend_path(path, root, &res, &buflen);
47587 ++ if (error)
47588 ++ return ERR_PTR(error);
47589 ++
47590 ++ return res;
47591 ++}
47592 ++
47593 ++static char *
47594 ++gen_full_path(struct path *path, struct path *root, char *buf, int buflen)
47595 ++{
47596 ++ char *retval;
47597 ++
47598 ++ retval = __our_d_path(path, root, buf, buflen);
47599 ++ if (unlikely(IS_ERR(retval)))
47600 ++ retval = strcpy(buf, "<path too long>");
47601 ++ else if (unlikely(retval[1] == '/' && retval[2] == '\0'))
47602 ++ retval[1] = '\0';
47603 ++
47604 ++ return retval;
47605 ++}
47606 ++
47607 ++static char *
47608 ++__d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
47609 ++ char *buf, int buflen)
47610 ++{
47611 ++ struct path path;
47612 ++ char *res;
47613 ++
47614 ++ path.dentry = (struct dentry *)dentry;
47615 ++ path.mnt = (struct vfsmount *)vfsmnt;
47616 ++
47617 ++ /* we can use real_root.dentry, real_root.mnt, because this is only called
47618 ++ by the RBAC system */
47619 ++ res = gen_full_path(&path, &real_root, buf, buflen);
47620 ++
47621 ++ return res;
47622 ++}
47623 ++
47624 ++static char *
47625 ++d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
47626 ++ char *buf, int buflen)
47627 ++{
47628 ++ char *res;
47629 ++ struct path path;
47630 ++ struct path root;
47631 ++ struct task_struct *reaper = &init_task;
47632 ++
47633 ++ path.dentry = (struct dentry *)dentry;
47634 ++ path.mnt = (struct vfsmount *)vfsmnt;
47635 ++
47636 ++ /* we can't use real_root.dentry, real_root.mnt, because they belong only to the RBAC system */
47637 ++ get_fs_root(reaper->fs, &root);
47638 ++
47639 ++ write_seqlock(&rename_lock);
47640 ++ br_read_lock(vfsmount_lock);
47641 ++ res = gen_full_path(&path, &root, buf, buflen);
47642 ++ br_read_unlock(vfsmount_lock);
47643 ++ write_sequnlock(&rename_lock);
47644 ++
47645 ++ path_put(&root);
47646 ++ return res;
47647 ++}
47648 ++
47649 ++static char *
47650 ++gr_to_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
47651 ++{
47652 ++ char *ret;
47653 ++ write_seqlock(&rename_lock);
47654 ++ br_read_lock(vfsmount_lock);
47655 ++ ret = __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
47656 ++ PAGE_SIZE);
47657 ++ br_read_unlock(vfsmount_lock);
47658 ++ write_sequnlock(&rename_lock);
47659 ++ return ret;
47660 ++}
47661 ++
47662 ++static char *
47663 ++gr_to_proc_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
47664 ++{
47665 ++ char *ret;
47666 ++ char *buf;
47667 ++ int buflen;
47668 ++
47669 ++ write_seqlock(&rename_lock);
47670 ++ br_read_lock(vfsmount_lock);
47671 ++ buf = per_cpu_ptr(gr_shared_page[0], smp_processor_id());
47672 ++ ret = __d_real_path(dentry, mnt, buf, PAGE_SIZE - 6);
47673 ++ buflen = (int)(ret - buf);
47674 ++ if (buflen >= 5)
47675 ++ prepend(&ret, &buflen, "/proc", 5);
47676 ++ else
47677 ++ ret = strcpy(buf, "<path too long>");
47678 ++ br_read_unlock(vfsmount_lock);
47679 ++ write_sequnlock(&rename_lock);
47680 ++ return ret;
47681 ++}
47682 ++
47683 ++char *
47684 ++gr_to_filename_nolock(const struct dentry *dentry, const struct vfsmount *mnt)
47685 ++{
47686 ++ return __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
47687 ++ PAGE_SIZE);
47688 ++}
47689 ++
47690 ++char *
47691 ++gr_to_filename(const struct dentry *dentry, const struct vfsmount *mnt)
47692 ++{
47693 ++ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
47694 ++ PAGE_SIZE);
47695 ++}
47696 ++
47697 ++char *
47698 ++gr_to_filename1(const struct dentry *dentry, const struct vfsmount *mnt)
47699 ++{
47700 ++ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[1], smp_processor_id()),
47701 ++ PAGE_SIZE);
47702 ++}
47703 ++
47704 ++char *
47705 ++gr_to_filename2(const struct dentry *dentry, const struct vfsmount *mnt)
47706 ++{
47707 ++ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[2], smp_processor_id()),
47708 ++ PAGE_SIZE);
47709 ++}
47710 ++
47711 ++char *
47712 ++gr_to_filename3(const struct dentry *dentry, const struct vfsmount *mnt)
47713 ++{
47714 ++ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[3], smp_processor_id()),
47715 ++ PAGE_SIZE);
47716 ++}
47717 ++
47718 ++__inline__ __u32
47719 ++to_gr_audit(const __u32 reqmode)
47720 ++{
47721 ++ /* masks off auditable permission flags, then shifts them to create
47722 ++ auditing flags, and adds the special case of append auditing if
47723 ++ we're requesting write */
47724 ++ return (((reqmode & ~GR_AUDITS) << 10) | ((reqmode & GR_WRITE) ? GR_AUDIT_APPEND : 0));
47725 ++}
47726 ++
47727 ++struct acl_subject_label *
47728 ++lookup_subject_map(const struct acl_subject_label *userp)
47729 ++{
47730 ++ unsigned int index = shash(userp, subj_map_set.s_size);
47731 ++ struct subject_map *match;
47732 ++
47733 ++ match = subj_map_set.s_hash[index];
47734 ++
47735 ++ while (match && match->user != userp)
47736 ++ match = match->next;
47737 ++
47738 ++ if (match != NULL)
47739 ++ return match->kernel;
47740 ++ else
47741 ++ return NULL;
47742 ++}
47743 ++
47744 ++static void
47745 ++insert_subj_map_entry(struct subject_map *subjmap)
47746 ++{
47747 ++ unsigned int index = shash(subjmap->user, subj_map_set.s_size);
47748 ++ struct subject_map **curr;
47749 ++
47750 ++ subjmap->prev = NULL;
47751 ++
47752 ++ curr = &subj_map_set.s_hash[index];
47753 ++ if (*curr != NULL)
47754 ++ (*curr)->prev = subjmap;
47755 ++
47756 ++ subjmap->next = *curr;
47757 ++ *curr = subjmap;
47758 ++
47759 ++ return;
47760 ++}
47761 ++
47762 ++static struct acl_role_label *
47763 ++lookup_acl_role_label(const struct task_struct *task, const uid_t uid,
47764 ++ const gid_t gid)
47765 ++{
47766 ++ unsigned int index = rhash(uid, GR_ROLE_USER, acl_role_set.r_size);
47767 ++ struct acl_role_label *match;
47768 ++ struct role_allowed_ip *ipp;
47769 ++ unsigned int x;
47770 ++ u32 curr_ip = task->signal->curr_ip;
47771 ++
47772 ++ task->signal->saved_ip = curr_ip;
47773 ++
47774 ++ match = acl_role_set.r_hash[index];
47775 ++
47776 ++ while (match) {
47777 ++ if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_USER)) == (GR_ROLE_DOMAIN | GR_ROLE_USER)) {
47778 ++ for (x = 0; x < match->domain_child_num; x++) {
47779 ++ if (match->domain_children[x] == uid)
47780 ++ goto found;
47781 ++ }
47782 ++ } else if (match->uidgid == uid && match->roletype & GR_ROLE_USER)
47783 ++ break;
47784 ++ match = match->next;
47785 ++ }
47786 ++found:
47787 ++ if (match == NULL) {
47788 ++ try_group:
47789 ++ index = rhash(gid, GR_ROLE_GROUP, acl_role_set.r_size);
47790 ++ match = acl_role_set.r_hash[index];
47791 ++
47792 ++ while (match) {
47793 ++ if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) == (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) {
47794 ++ for (x = 0; x < match->domain_child_num; x++) {
47795 ++ if (match->domain_children[x] == gid)
47796 ++ goto found2;
47797 ++ }
47798 ++ } else if (match->uidgid == gid && match->roletype & GR_ROLE_GROUP)
47799 ++ break;
47800 ++ match = match->next;
47801 ++ }
47802 ++found2:
47803 ++ if (match == NULL)
47804 ++ match = default_role;
47805 ++ if (match->allowed_ips == NULL)
47806 ++ return match;
47807 ++ else {
47808 ++ for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
47809 ++ if (likely
47810 ++ ((ntohl(curr_ip) & ipp->netmask) ==
47811 ++ (ntohl(ipp->addr) & ipp->netmask)))
47812 ++ return match;
47813 ++ }
47814 ++ match = default_role;
47815 ++ }
47816 ++ } else if (match->allowed_ips == NULL) {
47817 ++ return match;
47818 ++ } else {
47819 ++ for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
47820 ++ if (likely
47821 ++ ((ntohl(curr_ip) & ipp->netmask) ==
47822 ++ (ntohl(ipp->addr) & ipp->netmask)))
47823 ++ return match;
47824 ++ }
47825 ++ goto try_group;
47826 ++ }
47827 ++
47828 ++ return match;
47829 ++}
47830 ++
47831 ++struct acl_subject_label *
47832 ++lookup_acl_subj_label(const ino_t ino, const dev_t dev,
47833 ++ const struct acl_role_label *role)
47834 ++{
47835 ++ unsigned int index = fhash(ino, dev, role->subj_hash_size);
47836 ++ struct acl_subject_label *match;
47837 ++
47838 ++ match = role->subj_hash[index];
47839 ++
47840 ++ while (match && (match->inode != ino || match->device != dev ||
47841 ++ (match->mode & GR_DELETED))) {
47842 ++ match = match->next;
47843 ++ }
47844 ++
47845 ++ if (match && !(match->mode & GR_DELETED))
47846 ++ return match;
47847 ++ else
47848 ++ return NULL;
47849 ++}
47850 ++
47851 ++struct acl_subject_label *
47852 ++lookup_acl_subj_label_deleted(const ino_t ino, const dev_t dev,
47853 ++ const struct acl_role_label *role)
47854 ++{
47855 ++ unsigned int index = fhash(ino, dev, role->subj_hash_size);
47856 ++ struct acl_subject_label *match;
47857 ++
47858 ++ match = role->subj_hash[index];
47859 ++
47860 ++ while (match && (match->inode != ino || match->device != dev ||
47861 ++ !(match->mode & GR_DELETED))) {
47862 ++ match = match->next;
47863 ++ }
47864 ++
47865 ++ if (match && (match->mode & GR_DELETED))
47866 ++ return match;
47867 ++ else
47868 ++ return NULL;
47869 ++}
47870 ++
47871 ++static struct acl_object_label *
47872 ++lookup_acl_obj_label(const ino_t ino, const dev_t dev,
47873 ++ const struct acl_subject_label *subj)
47874 ++{
47875 ++ unsigned int index = fhash(ino, dev, subj->obj_hash_size);
47876 ++ struct acl_object_label *match;
47877 ++
47878 ++ match = subj->obj_hash[index];
47879 ++
47880 ++ while (match && (match->inode != ino || match->device != dev ||
47881 ++ (match->mode & GR_DELETED))) {
47882 ++ match = match->next;
47883 ++ }
47884 ++
47885 ++ if (match && !(match->mode & GR_DELETED))
47886 ++ return match;
47887 ++ else
47888 ++ return NULL;
47889 ++}
47890 ++
47891 ++static struct acl_object_label *
47892 ++lookup_acl_obj_label_create(const ino_t ino, const dev_t dev,
47893 ++ const struct acl_subject_label *subj)
47894 ++{
47895 ++ unsigned int index = fhash(ino, dev, subj->obj_hash_size);
47896 ++ struct acl_object_label *match;
47897 ++
47898 ++ match = subj->obj_hash[index];
47899 ++
47900 ++ while (match && (match->inode != ino || match->device != dev ||
47901 ++ !(match->mode & GR_DELETED))) {
47902 ++ match = match->next;
47903 ++ }
47904 ++
47905 ++ if (match && (match->mode & GR_DELETED))
47906 ++ return match;
47907 ++
47908 ++ match = subj->obj_hash[index];
47909 ++
47910 ++ while (match && (match->inode != ino || match->device != dev ||
47911 ++ (match->mode & GR_DELETED))) {
47912 ++ match = match->next;
47913 ++ }
47914 ++
47915 ++ if (match && !(match->mode & GR_DELETED))
47916 ++ return match;
47917 ++ else
47918 ++ return NULL;
47919 ++}
47920 ++
47921 ++static struct name_entry *
47922 ++lookup_name_entry(const char *name)
47923 ++{
47924 ++ unsigned int len = strlen(name);
47925 ++ unsigned int key = full_name_hash(name, len);
47926 ++ unsigned int index = key % name_set.n_size;
47927 ++ struct name_entry *match;
47928 ++
47929 ++ match = name_set.n_hash[index];
47930 ++
47931 ++ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len)))
47932 ++ match = match->next;
47933 ++
47934 ++ return match;
47935 ++}
47936 ++
47937 ++static struct name_entry *
47938 ++lookup_name_entry_create(const char *name)
47939 ++{
47940 ++ unsigned int len = strlen(name);
47941 ++ unsigned int key = full_name_hash(name, len);
47942 ++ unsigned int index = key % name_set.n_size;
47943 ++ struct name_entry *match;
47944 ++
47945 ++ match = name_set.n_hash[index];
47946 ++
47947 ++ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
47948 ++ !match->deleted))
47949 ++ match = match->next;
47950 ++
47951 ++ if (match && match->deleted)
47952 ++ return match;
47953 ++
47954 ++ match = name_set.n_hash[index];
47955 ++
47956 ++ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
47957 ++ match->deleted))
47958 ++ match = match->next;
47959 ++
47960 ++ if (match && !match->deleted)
47961 ++ return match;
47962 ++ else
47963 ++ return NULL;
47964 ++}
47965 ++
47966 ++static struct inodev_entry *
47967 ++lookup_inodev_entry(const ino_t ino, const dev_t dev)
47968 ++{
47969 ++ unsigned int index = fhash(ino, dev, inodev_set.i_size);
47970 ++ struct inodev_entry *match;
47971 ++
47972 ++ match = inodev_set.i_hash[index];
47973 ++
47974 ++ while (match && (match->nentry->inode != ino || match->nentry->device != dev))
47975 ++ match = match->next;
47976 ++
47977 ++ return match;
47978 ++}
47979 ++
47980 ++static void
47981 ++insert_inodev_entry(struct inodev_entry *entry)
47982 ++{
47983 ++ unsigned int index = fhash(entry->nentry->inode, entry->nentry->device,
47984 ++ inodev_set.i_size);
47985 ++ struct inodev_entry **curr;
47986 ++
47987 ++ entry->prev = NULL;
47988 ++
47989 ++ curr = &inodev_set.i_hash[index];
47990 ++ if (*curr != NULL)
47991 ++ (*curr)->prev = entry;
47992 ++
47993 ++ entry->next = *curr;
47994 ++ *curr = entry;
47995 ++
47996 ++ return;
47997 ++}
47998 ++
47999 ++static void
48000 ++__insert_acl_role_label(struct acl_role_label *role, uid_t uidgid)
48001 ++{
48002 ++ unsigned int index =
48003 ++ rhash(uidgid, role->roletype & (GR_ROLE_USER | GR_ROLE_GROUP), acl_role_set.r_size);
48004 ++ struct acl_role_label **curr;
48005 ++ struct acl_role_label *tmp;
48006 ++
48007 ++ curr = &acl_role_set.r_hash[index];
48008 ++
48009 ++ /* if role was already inserted due to domains and already has
48010 ++ a role in the same bucket as it attached, then we need to
48011 ++ combine these two buckets
48012 ++ */
48013 ++ if (role->next) {
48014 ++ tmp = role->next;
48015 ++ while (tmp->next)
48016 ++ tmp = tmp->next;
48017 ++ tmp->next = *curr;
48018 ++ } else
48019 ++ role->next = *curr;
48020 ++ *curr = role;
48021 ++
48022 ++ return;
48023 ++}
48024 ++
48025 ++static void
48026 ++insert_acl_role_label(struct acl_role_label *role)
48027 ++{
48028 ++ int i;
48029 ++
48030 ++ if (role_list == NULL) {
48031 ++ role_list = role;
48032 ++ role->prev = NULL;
48033 ++ } else {
48034 ++ role->prev = role_list;
48035 ++ role_list = role;
48036 ++ }
48037 ++
48038 ++ /* used for hash chains */
48039 ++ role->next = NULL;
48040 ++
48041 ++ if (role->roletype & GR_ROLE_DOMAIN) {
48042 ++ for (i = 0; i < role->domain_child_num; i++)
48043 ++ __insert_acl_role_label(role, role->domain_children[i]);
48044 ++ } else
48045 ++ __insert_acl_role_label(role, role->uidgid);
48046 ++}
48047 ++
48048 ++static int
48049 ++insert_name_entry(char *name, const ino_t inode, const dev_t device, __u8 deleted)
48050 ++{
48051 ++ struct name_entry **curr, *nentry;
48052 ++ struct inodev_entry *ientry;
48053 ++ unsigned int len = strlen(name);
48054 ++ unsigned int key = full_name_hash(name, len);
48055 ++ unsigned int index = key % name_set.n_size;
48056 ++
48057 ++ curr = &name_set.n_hash[index];
48058 ++
48059 ++ while (*curr && ((*curr)->key != key || !gr_streq((*curr)->name, name, (*curr)->len, len)))
48060 ++ curr = &((*curr)->next);
48061 ++
48062 ++ if (*curr != NULL)
48063 ++ return 1;
48064 ++
48065 ++ nentry = acl_alloc(sizeof (struct name_entry));
48066 ++ if (nentry == NULL)
48067 ++ return 0;
48068 ++ ientry = acl_alloc(sizeof (struct inodev_entry));
48069 ++ if (ientry == NULL)
48070 ++ return 0;
48071 ++ ientry->nentry = nentry;
48072 ++
48073 ++ nentry->key = key;
48074 ++ nentry->name = name;
48075 ++ nentry->inode = inode;
48076 ++ nentry->device = device;
48077 ++ nentry->len = len;
48078 ++ nentry->deleted = deleted;
48079 ++
48080 ++ nentry->prev = NULL;
48081 ++ curr = &name_set.n_hash[index];
48082 ++ if (*curr != NULL)
48083 ++ (*curr)->prev = nentry;
48084 ++ nentry->next = *curr;
48085 ++ *curr = nentry;
48086 ++
48087 ++ /* insert us into the table searchable by inode/dev */
48088 ++ insert_inodev_entry(ientry);
48089 ++
48090 ++ return 1;
48091 ++}
48092 ++
48093 ++static void
48094 ++insert_acl_obj_label(struct acl_object_label *obj,
48095 ++ struct acl_subject_label *subj)
48096 ++{
48097 ++ unsigned int index =
48098 ++ fhash(obj->inode, obj->device, subj->obj_hash_size);
48099 ++ struct acl_object_label **curr;
48100 ++
48101 ++
48102 ++ obj->prev = NULL;
48103 ++
48104 ++ curr = &subj->obj_hash[index];
48105 ++ if (*curr != NULL)
48106 ++ (*curr)->prev = obj;
48107 ++
48108 ++ obj->next = *curr;
48109 ++ *curr = obj;
48110 ++
48111 ++ return;
48112 ++}
48113 ++
48114 ++static void
48115 ++insert_acl_subj_label(struct acl_subject_label *obj,
48116 ++ struct acl_role_label *role)
48117 ++{
48118 ++ unsigned int index = fhash(obj->inode, obj->device, role->subj_hash_size);
48119 ++ struct acl_subject_label **curr;
48120 ++
48121 ++ obj->prev = NULL;
48122 ++
48123 ++ curr = &role->subj_hash[index];
48124 ++ if (*curr != NULL)
48125 ++ (*curr)->prev = obj;
48126 ++
48127 ++ obj->next = *curr;
48128 ++ *curr = obj;
48129 ++
48130 ++ return;
48131 ++}
48132 ++
48133 ++/* allocating chained hash tables, so optimal size is where lambda ~ 1 */
48134 ++
48135 ++static void *
48136 ++create_table(__u32 * len, int elementsize)
48137 ++{
48138 ++ unsigned int table_sizes[] = {
48139 ++ 7, 13, 31, 61, 127, 251, 509, 1021, 2039, 4093, 8191, 16381,
48140 ++ 32749, 65521, 131071, 262139, 524287, 1048573, 2097143,
48141 ++ 4194301, 8388593, 16777213, 33554393, 67108859
48142 ++ };
48143 ++ void *newtable = NULL;
48144 ++ unsigned int pwr = 0;
48145 ++
48146 ++ while ((pwr < ((sizeof (table_sizes) / sizeof (table_sizes[0])) - 1)) &&
48147 ++ table_sizes[pwr] <= *len)
48148 ++ pwr++;
48149 ++
48150 ++ if (table_sizes[pwr] <= *len || (table_sizes[pwr] > ULONG_MAX / elementsize))
48151 ++ return newtable;
48152 ++
48153 ++ if ((table_sizes[pwr] * elementsize) <= PAGE_SIZE)
48154 ++ newtable =
48155 ++ kmalloc(table_sizes[pwr] * elementsize, GFP_KERNEL);
48156 ++ else
48157 ++ newtable = vmalloc(table_sizes[pwr] * elementsize);
48158 ++
48159 ++ *len = table_sizes[pwr];
48160 ++
48161 ++ return newtable;
48162 ++}
48163 ++
48164 ++static int
48165 ++init_variables(const struct gr_arg *arg)
48166 ++{
48167 ++ struct task_struct *reaper = &init_task;
48168 ++ unsigned int stacksize;
48169 ++
48170 ++ subj_map_set.s_size = arg->role_db.num_subjects;
48171 ++ acl_role_set.r_size = arg->role_db.num_roles + arg->role_db.num_domain_children;
48172 ++ name_set.n_size = arg->role_db.num_objects;
48173 ++ inodev_set.i_size = arg->role_db.num_objects;
48174 ++
48175 ++ if (!subj_map_set.s_size || !acl_role_set.r_size ||
48176 ++ !name_set.n_size || !inodev_set.i_size)
48177 ++ return 1;
48178 ++
48179 ++ if (!gr_init_uidset())
48180 ++ return 1;
48181 ++
48182 ++ /* set up the stack that holds allocation info */
48183 ++
48184 ++ stacksize = arg->role_db.num_pointers + 5;
48185 ++
48186 ++ if (!acl_alloc_stack_init(stacksize))
48187 ++ return 1;
48188 ++
48189 ++ /* grab reference for the real root dentry and vfsmount */
48190 ++ get_fs_root(reaper->fs, &real_root);
48191 ++
48192 ++#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
48193 ++ printk(KERN_ALERT "Obtained real root device=%d, inode=%lu\n", __get_dev(real_root.dentry), real_root.dentry->d_inode->i_ino);
48194 ++#endif
48195 ++
48196 ++ fakefs_obj_rw = acl_alloc(sizeof(struct acl_object_label));
48197 ++ if (fakefs_obj_rw == NULL)
48198 ++ return 1;
48199 ++ fakefs_obj_rw->mode = GR_FIND | GR_READ | GR_WRITE;
48200 ++
48201 ++ fakefs_obj_rwx = acl_alloc(sizeof(struct acl_object_label));
48202 ++ if (fakefs_obj_rwx == NULL)
48203 ++ return 1;
48204 ++ fakefs_obj_rwx->mode = GR_FIND | GR_READ | GR_WRITE | GR_EXEC;
48205 ++
48206 ++ subj_map_set.s_hash =
48207 ++ (struct subject_map **) create_table(&subj_map_set.s_size, sizeof(void *));
48208 ++ acl_role_set.r_hash =
48209 ++ (struct acl_role_label **) create_table(&acl_role_set.r_size, sizeof(void *));
48210 ++ name_set.n_hash = (struct name_entry **) create_table(&name_set.n_size, sizeof(void *));
48211 ++ inodev_set.i_hash =
48212 ++ (struct inodev_entry **) create_table(&inodev_set.i_size, sizeof(void *));
48213 ++
48214 ++ if (!subj_map_set.s_hash || !acl_role_set.r_hash ||
48215 ++ !name_set.n_hash || !inodev_set.i_hash)
48216 ++ return 1;
48217 ++
48218 ++ memset(subj_map_set.s_hash, 0,
48219 ++ sizeof(struct subject_map *) * subj_map_set.s_size);
48220 ++ memset(acl_role_set.r_hash, 0,
48221 ++ sizeof (struct acl_role_label *) * acl_role_set.r_size);
48222 ++ memset(name_set.n_hash, 0,
48223 ++ sizeof (struct name_entry *) * name_set.n_size);
48224 ++ memset(inodev_set.i_hash, 0,
48225 ++ sizeof (struct inodev_entry *) * inodev_set.i_size);
48226 ++
48227 ++ return 0;
48228 ++}
48229 ++
48230 ++/* free information not needed after startup
48231 ++ currently contains user->kernel pointer mappings for subjects
48232 ++*/
48233 ++
48234 ++static void
48235 ++free_init_variables(void)
48236 ++{
48237 ++ __u32 i;
48238 ++
48239 ++ if (subj_map_set.s_hash) {
48240 ++ for (i = 0; i < subj_map_set.s_size; i++) {
48241 ++ if (subj_map_set.s_hash[i]) {
48242 ++ kfree(subj_map_set.s_hash[i]);
48243 ++ subj_map_set.s_hash[i] = NULL;
48244 ++ }
48245 ++ }
48246 ++
48247 ++ if ((subj_map_set.s_size * sizeof (struct subject_map *)) <=
48248 ++ PAGE_SIZE)
48249 ++ kfree(subj_map_set.s_hash);
48250 ++ else
48251 ++ vfree(subj_map_set.s_hash);
48252 ++ }
48253 ++
48254 ++ return;
48255 ++}
48256 ++
48257 ++static void
48258 ++free_variables(void)
48259 ++{
48260 ++ struct acl_subject_label *s;
48261 ++ struct acl_role_label *r;
48262 ++ struct task_struct *task, *task2;
48263 ++ unsigned int x;
48264 ++
48265 ++ gr_clear_learn_entries();
48266 ++
48267 ++ read_lock(&tasklist_lock);
48268 ++ do_each_thread(task2, task) {
48269 ++ task->acl_sp_role = 0;
48270 ++ task->acl_role_id = 0;
48271 ++ task->acl = NULL;
48272 ++ task->role = NULL;
48273 ++ } while_each_thread(task2, task);
48274 ++ read_unlock(&tasklist_lock);
48275 ++
48276 ++ /* release the reference to the real root dentry and vfsmount */
48277 ++ path_put(&real_root);
48278 ++
48279 ++ /* free all object hash tables */
48280 ++
48281 ++ FOR_EACH_ROLE_START(r)
48282 ++ if (r->subj_hash == NULL)
48283 ++ goto next_role;
48284 ++ FOR_EACH_SUBJECT_START(r, s, x)
48285 ++ if (s->obj_hash == NULL)
48286 ++ break;
48287 ++ if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
48288 ++ kfree(s->obj_hash);
48289 ++ else
48290 ++ vfree(s->obj_hash);
48291 ++ FOR_EACH_SUBJECT_END(s, x)
48292 ++ FOR_EACH_NESTED_SUBJECT_START(r, s)
48293 ++ if (s->obj_hash == NULL)
48294 ++ break;
48295 ++ if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
48296 ++ kfree(s->obj_hash);
48297 ++ else
48298 ++ vfree(s->obj_hash);
48299 ++ FOR_EACH_NESTED_SUBJECT_END(s)
48300 ++ if ((r->subj_hash_size * sizeof (struct acl_subject_label *)) <= PAGE_SIZE)
48301 ++ kfree(r->subj_hash);
48302 ++ else
48303 ++ vfree(r->subj_hash);
48304 ++ r->subj_hash = NULL;
48305 ++next_role:
48306 ++ FOR_EACH_ROLE_END(r)
48307 ++
48308 ++ acl_free_all();
48309 ++
48310 ++ if (acl_role_set.r_hash) {
48311 ++ if ((acl_role_set.r_size * sizeof (struct acl_role_label *)) <=
48312 ++ PAGE_SIZE)
48313 ++ kfree(acl_role_set.r_hash);
48314 ++ else
48315 ++ vfree(acl_role_set.r_hash);
48316 ++ }
48317 ++ if (name_set.n_hash) {
48318 ++ if ((name_set.n_size * sizeof (struct name_entry *)) <=
48319 ++ PAGE_SIZE)
48320 ++ kfree(name_set.n_hash);
48321 ++ else
48322 ++ vfree(name_set.n_hash);
48323 ++ }
48324 ++
48325 ++ if (inodev_set.i_hash) {
48326 ++ if ((inodev_set.i_size * sizeof (struct inodev_entry *)) <=
48327 ++ PAGE_SIZE)
48328 ++ kfree(inodev_set.i_hash);
48329 ++ else
48330 ++ vfree(inodev_set.i_hash);
48331 ++ }
48332 ++
48333 ++ gr_free_uidset();
48334 ++
48335 ++ memset(&name_set, 0, sizeof (struct name_db));
48336 ++ memset(&inodev_set, 0, sizeof (struct inodev_db));
48337 ++ memset(&acl_role_set, 0, sizeof (struct acl_role_db));
48338 ++ memset(&subj_map_set, 0, sizeof (struct acl_subj_map_db));
48339 ++
48340 ++ default_role = NULL;
48341 ++ role_list = NULL;
48342 ++
48343 ++ return;
48344 ++}
48345 ++
48346 ++static __u32
48347 ++count_user_objs(struct acl_object_label *userp)
48348 ++{
48349 ++ struct acl_object_label o_tmp;
48350 ++ __u32 num = 0;
48351 ++
48352 ++ while (userp) {
48353 ++ if (copy_from_user(&o_tmp, userp,
48354 ++ sizeof (struct acl_object_label)))
48355 ++ break;
48356 ++
48357 ++ userp = o_tmp.prev;
48358 ++ num++;
48359 ++ }
48360 ++
48361 ++ return num;
48362 ++}
48363 ++
48364 ++static struct acl_subject_label *
48365 ++do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role);
48366 ++
48367 ++static int
48368 ++copy_user_glob(struct acl_object_label *obj)
48369 ++{
48370 ++ struct acl_object_label *g_tmp, **guser;
48371 ++ unsigned int len;
48372 ++ char *tmp;
48373 ++
48374 ++ if (obj->globbed == NULL)
48375 ++ return 0;
48376 ++
48377 ++ guser = &obj->globbed;
48378 ++ while (*guser) {
48379 ++ g_tmp = (struct acl_object_label *)
48380 ++ acl_alloc(sizeof (struct acl_object_label));
48381 ++ if (g_tmp == NULL)
48382 ++ return -ENOMEM;
48383 ++
48384 ++ if (copy_from_user(g_tmp, *guser,
48385 ++ sizeof (struct acl_object_label)))
48386 ++ return -EFAULT;
48387 ++
48388 ++ len = strnlen_user(g_tmp->filename, PATH_MAX);
48389 ++
48390 ++ if (!len || len >= PATH_MAX)
48391 ++ return -EINVAL;
48392 ++
48393 ++ if ((tmp = (char *) acl_alloc(len)) == NULL)
48394 ++ return -ENOMEM;
48395 ++
48396 ++ if (copy_from_user(tmp, g_tmp->filename, len))
48397 ++ return -EFAULT;
48398 ++ tmp[len-1] = '\0';
48399 ++ g_tmp->filename = tmp;
48400 ++
48401 ++ *guser = g_tmp;
48402 ++ guser = &(g_tmp->next);
48403 ++ }
48404 ++
48405 ++ return 0;
48406 ++}
48407 ++
48408 ++static int
48409 ++copy_user_objs(struct acl_object_label *userp, struct acl_subject_label *subj,
48410 ++ struct acl_role_label *role)
48411 ++{
48412 ++ struct acl_object_label *o_tmp;
48413 ++ unsigned int len;
48414 ++ int ret;
48415 ++ char *tmp;
48416 ++
48417 ++ while (userp) {
48418 ++ if ((o_tmp = (struct acl_object_label *)
48419 ++ acl_alloc(sizeof (struct acl_object_label))) == NULL)
48420 ++ return -ENOMEM;
48421 ++
48422 ++ if (copy_from_user(o_tmp, userp,
48423 ++ sizeof (struct acl_object_label)))
48424 ++ return -EFAULT;
48425 ++
48426 ++ userp = o_tmp->prev;
48427 ++
48428 ++ len = strnlen_user(o_tmp->filename, PATH_MAX);
48429 ++
48430 ++ if (!len || len >= PATH_MAX)
48431 ++ return -EINVAL;
48432 ++
48433 ++ if ((tmp = (char *) acl_alloc(len)) == NULL)
48434 ++ return -ENOMEM;
48435 ++
48436 ++ if (copy_from_user(tmp, o_tmp->filename, len))
48437 ++ return -EFAULT;
48438 ++ tmp[len-1] = '\0';
48439 ++ o_tmp->filename = tmp;
48440 ++
48441 ++ insert_acl_obj_label(o_tmp, subj);
48442 ++ if (!insert_name_entry(o_tmp->filename, o_tmp->inode,
48443 ++ o_tmp->device, (o_tmp->mode & GR_DELETED) ? 1 : 0))
48444 ++ return -ENOMEM;
48445 ++
48446 ++ ret = copy_user_glob(o_tmp);
48447 ++ if (ret)
48448 ++ return ret;
48449 ++
48450 ++ if (o_tmp->nested) {
48451 ++ o_tmp->nested = do_copy_user_subj(o_tmp->nested, role);
48452 ++ if (IS_ERR(o_tmp->nested))
48453 ++ return PTR_ERR(o_tmp->nested);
48454 ++
48455 ++ /* insert into nested subject list */
48456 ++ o_tmp->nested->next = role->hash->first;
48457 ++ role->hash->first = o_tmp->nested;
48458 ++ }
48459 ++ }
48460 ++
48461 ++ return 0;
48462 ++}
48463 ++
48464 ++static __u32
48465 ++count_user_subjs(struct acl_subject_label *userp)
48466 ++{
48467 ++ struct acl_subject_label s_tmp;
48468 ++ __u32 num = 0;
48469 ++
48470 ++ while (userp) {
48471 ++ if (copy_from_user(&s_tmp, userp,
48472 ++ sizeof (struct acl_subject_label)))
48473 ++ break;
48474 ++
48475 ++ userp = s_tmp.prev;
48476 ++ /* do not count nested subjects against this count, since
48477 ++ they are not included in the hash table, but are
48478 ++ attached to objects. We have already counted
48479 ++ the subjects in userspace for the allocation
48480 ++ stack
48481 ++ */
48482 ++ if (!(s_tmp.mode & GR_NESTED))
48483 ++ num++;
48484 ++ }
48485 ++
48486 ++ return num;
48487 ++}
48488 ++
48489 ++static int
48490 ++copy_user_allowedips(struct acl_role_label *rolep)
48491 ++{
48492 ++ struct role_allowed_ip *ruserip, *rtmp = NULL, *rlast;
48493 ++
48494 ++ ruserip = rolep->allowed_ips;
48495 ++
48496 ++ while (ruserip) {
48497 ++ rlast = rtmp;
48498 ++
48499 ++ if ((rtmp = (struct role_allowed_ip *)
48500 ++ acl_alloc(sizeof (struct role_allowed_ip))) == NULL)
48501 ++ return -ENOMEM;
48502 ++
48503 ++ if (copy_from_user(rtmp, ruserip,
48504 ++ sizeof (struct role_allowed_ip)))
48505 ++ return -EFAULT;
48506 ++
48507 ++ ruserip = rtmp->prev;
48508 ++
48509 ++ if (!rlast) {
48510 ++ rtmp->prev = NULL;
48511 ++ rolep->allowed_ips = rtmp;
48512 ++ } else {
48513 ++ rlast->next = rtmp;
48514 ++ rtmp->prev = rlast;
48515 ++ }
48516 ++
48517 ++ if (!ruserip)
48518 ++ rtmp->next = NULL;
48519 ++ }
48520 ++
48521 ++ return 0;
48522 ++}
48523 ++
48524 ++static int
48525 ++copy_user_transitions(struct acl_role_label *rolep)
48526 ++{
48527 ++ struct role_transition *rusertp, *rtmp = NULL, *rlast;
48528 ++
48529 ++ unsigned int len;
48530 ++ char *tmp;
48531 ++
48532 ++ rusertp = rolep->transitions;
48533 ++
48534 ++ while (rusertp) {
48535 ++ rlast = rtmp;
48536 ++
48537 ++ if ((rtmp = (struct role_transition *)
48538 ++ acl_alloc(sizeof (struct role_transition))) == NULL)
48539 ++ return -ENOMEM;
48540 ++
48541 ++ if (copy_from_user(rtmp, rusertp,
48542 ++ sizeof (struct role_transition)))
48543 ++ return -EFAULT;
48544 ++
48545 ++ rusertp = rtmp->prev;
48546 ++
48547 ++ len = strnlen_user(rtmp->rolename, GR_SPROLE_LEN);
48548 ++
48549 ++ if (!len || len >= GR_SPROLE_LEN)
48550 ++ return -EINVAL;
48551 ++
48552 ++ if ((tmp = (char *) acl_alloc(len)) == NULL)
48553 ++ return -ENOMEM;
48554 ++
48555 ++ if (copy_from_user(tmp, rtmp->rolename, len))
48556 ++ return -EFAULT;
48557 ++ tmp[len-1] = '\0';
48558 ++ rtmp->rolename = tmp;
48559 ++
48560 ++ if (!rlast) {
48561 ++ rtmp->prev = NULL;
48562 ++ rolep->transitions = rtmp;
48563 ++ } else {
48564 ++ rlast->next = rtmp;
48565 ++ rtmp->prev = rlast;
48566 ++ }
48567 ++
48568 ++ if (!rusertp)
48569 ++ rtmp->next = NULL;
48570 ++ }
48571 ++
48572 ++ return 0;
48573 ++}
48574 ++
48575 ++static struct acl_subject_label *
48576 ++do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role)
48577 ++{
48578 ++ struct acl_subject_label *s_tmp = NULL, *s_tmp2;
48579 ++ unsigned int len;
48580 ++ char *tmp;
48581 ++ __u32 num_objs;
48582 ++ struct acl_ip_label **i_tmp, *i_utmp2;
48583 ++ struct gr_hash_struct ghash;
48584 ++ struct subject_map *subjmap;
48585 ++ unsigned int i_num;
48586 ++ int err;
48587 ++
48588 ++ s_tmp = lookup_subject_map(userp);
48589 ++
48590 ++ /* we've already copied this subject into the kernel, just return
48591 ++ the reference to it, and don't copy it over again
48592 ++ */
48593 ++ if (s_tmp)
48594 ++ return(s_tmp);
48595 ++
48596 ++ if ((s_tmp = (struct acl_subject_label *)
48597 ++ acl_alloc(sizeof (struct acl_subject_label))) == NULL)
48598 ++ return ERR_PTR(-ENOMEM);
48599 ++
48600 ++ subjmap = (struct subject_map *)kmalloc(sizeof (struct subject_map), GFP_KERNEL);
48601 ++ if (subjmap == NULL)
48602 ++ return ERR_PTR(-ENOMEM);
48603 ++
48604 ++ subjmap->user = userp;
48605 ++ subjmap->kernel = s_tmp;
48606 ++ insert_subj_map_entry(subjmap);
48607 ++
48608 ++ if (copy_from_user(s_tmp, userp,
48609 ++ sizeof (struct acl_subject_label)))
48610 ++ return ERR_PTR(-EFAULT);
48611 ++
48612 ++ len = strnlen_user(s_tmp->filename, PATH_MAX);
48613 ++
48614 ++ if (!len || len >= PATH_MAX)
48615 ++ return ERR_PTR(-EINVAL);
48616 ++
48617 ++ if ((tmp = (char *) acl_alloc(len)) == NULL)
48618 ++ return ERR_PTR(-ENOMEM);
48619 ++
48620 ++ if (copy_from_user(tmp, s_tmp->filename, len))
48621 ++ return ERR_PTR(-EFAULT);
48622 ++ tmp[len-1] = '\0';
48623 ++ s_tmp->filename = tmp;
48624 ++
48625 ++ if (!strcmp(s_tmp->filename, "/"))
48626 ++ role->root_label = s_tmp;
48627 ++
48628 ++ if (copy_from_user(&ghash, s_tmp->hash, sizeof(struct gr_hash_struct)))
48629 ++ return ERR_PTR(-EFAULT);
48630 ++
48631 ++ /* copy user and group transition tables */
48632 ++
48633 ++ if (s_tmp->user_trans_num) {
48634 ++ uid_t *uidlist;
48635 ++
48636 ++ uidlist = (uid_t *)acl_alloc_num(s_tmp->user_trans_num, sizeof(uid_t));
48637 ++ if (uidlist == NULL)
48638 ++ return ERR_PTR(-ENOMEM);
48639 ++ if (copy_from_user(uidlist, s_tmp->user_transitions, s_tmp->user_trans_num * sizeof(uid_t)))
48640 ++ return ERR_PTR(-EFAULT);
48641 ++
48642 ++ s_tmp->user_transitions = uidlist;
48643 ++ }
48644 ++
48645 ++ if (s_tmp->group_trans_num) {
48646 ++ gid_t *gidlist;
48647 ++
48648 ++ gidlist = (gid_t *)acl_alloc_num(s_tmp->group_trans_num, sizeof(gid_t));
48649 ++ if (gidlist == NULL)
48650 ++ return ERR_PTR(-ENOMEM);
48651 ++ if (copy_from_user(gidlist, s_tmp->group_transitions, s_tmp->group_trans_num * sizeof(gid_t)))
48652 ++ return ERR_PTR(-EFAULT);
48653 ++
48654 ++ s_tmp->group_transitions = gidlist;
48655 ++ }
48656 ++
48657 ++ /* set up object hash table */
48658 ++ num_objs = count_user_objs(ghash.first);
48659 ++
48660 ++ s_tmp->obj_hash_size = num_objs;
48661 ++ s_tmp->obj_hash =
48662 ++ (struct acl_object_label **)
48663 ++ create_table(&(s_tmp->obj_hash_size), sizeof(void *));
48664 ++
48665 ++ if (!s_tmp->obj_hash)
48666 ++ return ERR_PTR(-ENOMEM);
48667 ++
48668 ++ memset(s_tmp->obj_hash, 0,
48669 ++ s_tmp->obj_hash_size *
48670 ++ sizeof (struct acl_object_label *));
48671 ++
48672 ++ /* add in objects */
48673 ++ err = copy_user_objs(ghash.first, s_tmp, role);
48674 ++
48675 ++ if (err)
48676 ++ return ERR_PTR(err);
48677 ++
48678 ++ /* set pointer for parent subject */
48679 ++ if (s_tmp->parent_subject) {
48680 ++ s_tmp2 = do_copy_user_subj(s_tmp->parent_subject, role);
48681 ++
48682 ++ if (IS_ERR(s_tmp2))
48683 ++ return s_tmp2;
48684 ++
48685 ++ s_tmp->parent_subject = s_tmp2;
48686 ++ }
48687 ++
48688 ++ /* add in ip acls */
48689 ++
48690 ++ if (!s_tmp->ip_num) {
48691 ++ s_tmp->ips = NULL;
48692 ++ goto insert;
48693 ++ }
48694 ++
48695 ++ i_tmp =
48696 ++ (struct acl_ip_label **) acl_alloc_num(s_tmp->ip_num,
48697 ++ sizeof (struct acl_ip_label *));
48698 ++
48699 ++ if (!i_tmp)
48700 ++ return ERR_PTR(-ENOMEM);
48701 ++
48702 ++ for (i_num = 0; i_num < s_tmp->ip_num; i_num++) {
48703 ++ *(i_tmp + i_num) =
48704 ++ (struct acl_ip_label *)
48705 ++ acl_alloc(sizeof (struct acl_ip_label));
48706 ++ if (!*(i_tmp + i_num))
48707 ++ return ERR_PTR(-ENOMEM);
48708 ++
48709 ++ if (copy_from_user
48710 ++ (&i_utmp2, s_tmp->ips + i_num,
48711 ++ sizeof (struct acl_ip_label *)))
48712 ++ return ERR_PTR(-EFAULT);
48713 ++
48714 ++ if (copy_from_user
48715 ++ (*(i_tmp + i_num), i_utmp2,
48716 ++ sizeof (struct acl_ip_label)))
48717 ++ return ERR_PTR(-EFAULT);
48718 ++
48719 ++ if ((*(i_tmp + i_num))->iface == NULL)
48720 ++ continue;
48721 ++
48722 ++ len = strnlen_user((*(i_tmp + i_num))->iface, IFNAMSIZ);
48723 ++ if (!len || len >= IFNAMSIZ)
48724 ++ return ERR_PTR(-EINVAL);
48725 ++ tmp = acl_alloc(len);
48726 ++ if (tmp == NULL)
48727 ++ return ERR_PTR(-ENOMEM);
48728 ++ if (copy_from_user(tmp, (*(i_tmp + i_num))->iface, len))
48729 ++ return ERR_PTR(-EFAULT);
48730 ++ (*(i_tmp + i_num))->iface = tmp;
48731 ++ }
48732 ++
48733 ++ s_tmp->ips = i_tmp;
48734 ++
48735 ++insert:
48736 ++ if (!insert_name_entry(s_tmp->filename, s_tmp->inode,
48737 ++ s_tmp->device, (s_tmp->mode & GR_DELETED) ? 1 : 0))
48738 ++ return ERR_PTR(-ENOMEM);
48739 ++
48740 ++ return s_tmp;
48741 ++}
48742 ++
48743 ++static int
48744 ++copy_user_subjs(struct acl_subject_label *userp, struct acl_role_label *role)
48745 ++{
48746 ++ struct acl_subject_label s_pre;
48747 ++ struct acl_subject_label * ret;
48748 ++ int err;
48749 ++
48750 ++ while (userp) {
48751 ++ if (copy_from_user(&s_pre, userp,
48752 ++ sizeof (struct acl_subject_label)))
48753 ++ return -EFAULT;
48754 ++
48755 ++ /* do not add nested subjects here, add
48756 ++ while parsing objects
48757 ++ */
48758 ++
48759 ++ if (s_pre.mode & GR_NESTED) {
48760 ++ userp = s_pre.prev;
48761 ++ continue;
48762 ++ }
48763 ++
48764 ++ ret = do_copy_user_subj(userp, role);
48765 ++
48766 ++ err = PTR_ERR(ret);
48767 ++ if (IS_ERR(ret))
48768 ++ return err;
48769 ++
48770 ++ insert_acl_subj_label(ret, role);
48771 ++
48772 ++ userp = s_pre.prev;
48773 ++ }
48774 ++
48775 ++ return 0;
48776 ++}
48777 ++
48778 ++static int
48779 ++copy_user_acl(struct gr_arg *arg)
48780 ++{
48781 ++ struct acl_role_label *r_tmp = NULL, **r_utmp, *r_utmp2;
48782 ++ struct sprole_pw *sptmp;
48783 ++ struct gr_hash_struct *ghash;
48784 ++ uid_t *domainlist;
48785 ++ unsigned int r_num;
48786 ++ unsigned int len;
48787 ++ char *tmp;
48788 ++ int err = 0;
48789 ++ __u16 i;
48790 ++ __u32 num_subjs;
48791 ++
48792 ++ /* we need a default and kernel role */
48793 ++ if (arg->role_db.num_roles < 2)
48794 ++ return -EINVAL;
48795 ++
48796 ++ /* copy special role authentication info from userspace */
48797 ++
48798 ++ num_sprole_pws = arg->num_sprole_pws;
48799 ++ acl_special_roles = (struct sprole_pw **) acl_alloc_num(num_sprole_pws, sizeof(struct sprole_pw *));
48800 ++
48801 ++ if (!acl_special_roles) {
48802 ++ err = -ENOMEM;
48803 ++ goto cleanup;
48804 ++ }
48805 ++
48806 ++ for (i = 0; i < num_sprole_pws; i++) {
48807 ++ sptmp = (struct sprole_pw *) acl_alloc(sizeof(struct sprole_pw));
48808 ++ if (!sptmp) {
48809 ++ err = -ENOMEM;
48810 ++ goto cleanup;
48811 ++ }
48812 ++ if (copy_from_user(sptmp, arg->sprole_pws + i,
48813 ++ sizeof (struct sprole_pw))) {
48814 ++ err = -EFAULT;
48815 ++ goto cleanup;
48816 ++ }
48817 ++
48818 ++ len =
48819 ++ strnlen_user(sptmp->rolename, GR_SPROLE_LEN);
48820 ++
48821 ++ if (!len || len >= GR_SPROLE_LEN) {
48822 ++ err = -EINVAL;
48823 ++ goto cleanup;
48824 ++ }
48825 ++
48826 ++ if ((tmp = (char *) acl_alloc(len)) == NULL) {
48827 ++ err = -ENOMEM;
48828 ++ goto cleanup;
48829 ++ }
48830 ++
48831 ++ if (copy_from_user(tmp, sptmp->rolename, len)) {
48832 ++ err = -EFAULT;
48833 ++ goto cleanup;
48834 ++ }
48835 ++ tmp[len-1] = '\0';
48836 ++#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
48837 ++ printk(KERN_ALERT "Copying special role %s\n", tmp);
48838 ++#endif
48839 ++ sptmp->rolename = tmp;
48840 ++ acl_special_roles[i] = sptmp;
48841 ++ }
48842 ++
48843 ++ r_utmp = (struct acl_role_label **) arg->role_db.r_table;
48844 ++
48845 ++ for (r_num = 0; r_num < arg->role_db.num_roles; r_num++) {
48846 ++ r_tmp = acl_alloc(sizeof (struct acl_role_label));
48847 ++
48848 ++ if (!r_tmp) {
48849 ++ err = -ENOMEM;
48850 ++ goto cleanup;
48851 ++ }
48852 ++
48853 ++ if (copy_from_user(&r_utmp2, r_utmp + r_num,
48854 ++ sizeof (struct acl_role_label *))) {
48855 ++ err = -EFAULT;
48856 ++ goto cleanup;
48857 ++ }
48858 ++
48859 ++ if (copy_from_user(r_tmp, r_utmp2,
48860 ++ sizeof (struct acl_role_label))) {
48861 ++ err = -EFAULT;
48862 ++ goto cleanup;
48863 ++ }
48864 ++
48865 ++ len = strnlen_user(r_tmp->rolename, GR_SPROLE_LEN);
48866 ++
48867 ++ if (!len || len >= PATH_MAX) {
48868 ++ err = -EINVAL;
48869 ++ goto cleanup;
48870 ++ }
48871 ++
48872 ++ if ((tmp = (char *) acl_alloc(len)) == NULL) {
48873 ++ err = -ENOMEM;
48874 ++ goto cleanup;
48875 ++ }
48876 ++ if (copy_from_user(tmp, r_tmp->rolename, len)) {
48877 ++ err = -EFAULT;
48878 ++ goto cleanup;
48879 ++ }
48880 ++ tmp[len-1] = '\0';
48881 ++ r_tmp->rolename = tmp;
48882 ++
48883 ++ if (!strcmp(r_tmp->rolename, "default")
48884 ++ && (r_tmp->roletype & GR_ROLE_DEFAULT)) {
48885 ++ default_role = r_tmp;
48886 ++ } else if (!strcmp(r_tmp->rolename, ":::kernel:::")) {
48887 ++ kernel_role = r_tmp;
48888 ++ }
48889 ++
48890 ++ if ((ghash = (struct gr_hash_struct *) acl_alloc(sizeof(struct gr_hash_struct))) == NULL) {
48891 ++ err = -ENOMEM;
48892 ++ goto cleanup;
48893 ++ }
48894 ++ if (copy_from_user(ghash, r_tmp->hash, sizeof(struct gr_hash_struct))) {
48895 ++ err = -EFAULT;
48896 ++ goto cleanup;
48897 ++ }
48898 ++
48899 ++ r_tmp->hash = ghash;
48900 ++
48901 ++ num_subjs = count_user_subjs(r_tmp->hash->first);
48902 ++
48903 ++ r_tmp->subj_hash_size = num_subjs;
48904 ++ r_tmp->subj_hash =
48905 ++ (struct acl_subject_label **)
48906 ++ create_table(&(r_tmp->subj_hash_size), sizeof(void *));
48907 ++
48908 ++ if (!r_tmp->subj_hash) {
48909 ++ err = -ENOMEM;
48910 ++ goto cleanup;
48911 ++ }
48912 ++
48913 ++ err = copy_user_allowedips(r_tmp);
48914 ++ if (err)
48915 ++ goto cleanup;
48916 ++
48917 ++ /* copy domain info */
48918 ++ if (r_tmp->domain_children != NULL) {
48919 ++ domainlist = acl_alloc_num(r_tmp->domain_child_num, sizeof(uid_t));
48920 ++ if (domainlist == NULL) {
48921 ++ err = -ENOMEM;
48922 ++ goto cleanup;
48923 ++ }
48924 ++ if (copy_from_user(domainlist, r_tmp->domain_children, r_tmp->domain_child_num * sizeof(uid_t))) {
48925 ++ err = -EFAULT;
48926 ++ goto cleanup;
48927 ++ }
48928 ++ r_tmp->domain_children = domainlist;
48929 ++ }
48930 ++
48931 ++ err = copy_user_transitions(r_tmp);
48932 ++ if (err)
48933 ++ goto cleanup;
48934 ++
48935 ++ memset(r_tmp->subj_hash, 0,
48936 ++ r_tmp->subj_hash_size *
48937 ++ sizeof (struct acl_subject_label *));
48938 ++
48939 ++ err = copy_user_subjs(r_tmp->hash->first, r_tmp);
48940 ++
48941 ++ if (err)
48942 ++ goto cleanup;
48943 ++
48944 ++ /* set nested subject list to null */
48945 ++ r_tmp->hash->first = NULL;
48946 ++
48947 ++ insert_acl_role_label(r_tmp);
48948 ++ }
48949 ++
48950 ++ goto return_err;
48951 ++ cleanup:
48952 ++ free_variables();
48953 ++ return_err:
48954 ++ return err;
48955 ++
48956 ++}
48957 ++
48958 ++static int
48959 ++gracl_init(struct gr_arg *args)
48960 ++{
48961 ++ int error = 0;
48962 ++
48963 ++ memcpy(gr_system_salt, args->salt, GR_SALT_LEN);
48964 ++ memcpy(gr_system_sum, args->sum, GR_SHA_LEN);
48965 ++
48966 ++ if (init_variables(args)) {
48967 ++ gr_log_str(GR_DONT_AUDIT_GOOD, GR_INITF_ACL_MSG, GR_VERSION);
48968 ++ error = -ENOMEM;
48969 ++ free_variables();
48970 ++ goto out;
48971 ++ }
48972 ++
48973 ++ error = copy_user_acl(args);
48974 ++ free_init_variables();
48975 ++ if (error) {
48976 ++ free_variables();
48977 ++ goto out;
48978 ++ }
48979 ++
48980 ++ if ((error = gr_set_acls(0))) {
48981 ++ free_variables();
48982 ++ goto out;
48983 ++ }
48984 ++
48985 ++ pax_open_kernel();
48986 ++ gr_status |= GR_READY;
48987 ++ pax_close_kernel();
48988 ++
48989 ++ out:
48990 ++ return error;
48991 ++}
48992 ++
48993 ++/* derived from glibc fnmatch() 0: match, 1: no match*/
48994 ++
48995 ++static int
48996 ++glob_match(const char *p, const char *n)
48997 ++{
48998 ++ char c;
48999 ++
49000 ++ while ((c = *p++) != '\0') {
49001 ++ switch (c) {
49002 ++ case '?':
49003 ++ if (*n == '\0')
49004 ++ return 1;
49005 ++ else if (*n == '/')
49006 ++ return 1;
49007 ++ break;
49008 ++ case '\\':
49009 ++ if (*n != c)
49010 ++ return 1;
49011 ++ break;
49012 ++ case '*':
49013 ++ for (c = *p++; c == '?' || c == '*'; c = *p++) {
49014 ++ if (*n == '/')
49015 ++ return 1;
49016 ++ else if (c == '?') {
49017 ++ if (*n == '\0')
49018 ++ return 1;
49019 ++ else
49020 ++ ++n;
49021 ++ }
49022 ++ }
49023 ++ if (c == '\0') {
49024 ++ return 0;
49025 ++ } else {
49026 ++ const char *endp;
49027 ++
49028 ++ if ((endp = strchr(n, '/')) == NULL)
49029 ++ endp = n + strlen(n);
49030 ++
49031 ++ if (c == '[') {
49032 ++ for (--p; n < endp; ++n)
49033 ++ if (!glob_match(p, n))
49034 ++ return 0;
49035 ++ } else if (c == '/') {
49036 ++ while (*n != '\0' && *n != '/')
49037 ++ ++n;
49038 ++ if (*n == '/' && !glob_match(p, n + 1))
49039 ++ return 0;
49040 ++ } else {
49041 ++ for (--p; n < endp; ++n)
49042 ++ if (*n == c && !glob_match(p, n))
49043 ++ return 0;
49044 ++ }
49045 ++
49046 ++ return 1;
49047 ++ }
49048 ++ case '[':
49049 ++ {
49050 ++ int not;
49051 ++ char cold;
49052 ++
49053 ++ if (*n == '\0' || *n == '/')
49054 ++ return 1;
49055 ++
49056 ++ not = (*p == '!' || *p == '^');
49057 ++ if (not)
49058 ++ ++p;
49059 ++
49060 ++ c = *p++;
49061 ++ for (;;) {
49062 ++ unsigned char fn = (unsigned char)*n;
49063 ++
49064 ++ if (c == '\0')
49065 ++ return 1;
49066 ++ else {
49067 ++ if (c == fn)
49068 ++ goto matched;
49069 ++ cold = c;
49070 ++ c = *p++;
49071 ++
49072 ++ if (c == '-' && *p != ']') {
49073 ++ unsigned char cend = *p++;
49074 ++
49075 ++ if (cend == '\0')
49076 ++ return 1;
49077 ++
49078 ++ if (cold <= fn && fn <= cend)
49079 ++ goto matched;
49080 ++
49081 ++ c = *p++;
49082 ++ }
49083 ++ }
49084 ++
49085 ++ if (c == ']')
49086 ++ break;
49087 ++ }
49088 ++ if (!not)
49089 ++ return 1;
49090 ++ break;
49091 ++ matched:
49092 ++ while (c != ']') {
49093 ++ if (c == '\0')
49094 ++ return 1;
49095 ++
49096 ++ c = *p++;
49097 ++ }
49098 ++ if (not)
49099 ++ return 1;
49100 ++ }
49101 ++ break;
49102 ++ default:
49103 ++ if (c != *n)
49104 ++ return 1;
49105 ++ }
49106 ++
49107 ++ ++n;
49108 ++ }
49109 ++
49110 ++ if (*n == '\0')
49111 ++ return 0;
49112 ++
49113 ++ if (*n == '/')
49114 ++ return 0;
49115 ++
49116 ++ return 1;
49117 ++}
49118 ++
49119 ++static struct acl_object_label *
49120 ++chk_glob_label(struct acl_object_label *globbed,
49121 ++ struct dentry *dentry, struct vfsmount *mnt, char **path)
49122 ++{
49123 ++ struct acl_object_label *tmp;
49124 ++
49125 ++ if (*path == NULL)
49126 ++ *path = gr_to_filename_nolock(dentry, mnt);
49127 ++
49128 ++ tmp = globbed;
49129 ++
49130 ++ while (tmp) {
49131 ++ if (!glob_match(tmp->filename, *path))
49132 ++ return tmp;
49133 ++ tmp = tmp->next;
49134 ++ }
49135 ++
49136 ++ return NULL;
49137 ++}
49138 ++
49139 ++static struct acl_object_label *
49140 ++__full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
49141 ++ const ino_t curr_ino, const dev_t curr_dev,
49142 ++ const struct acl_subject_label *subj, char **path, const int checkglob)
49143 ++{
49144 ++ struct acl_subject_label *tmpsubj;
49145 ++ struct acl_object_label *retval;
49146 ++ struct acl_object_label *retval2;
49147 ++
49148 ++ tmpsubj = (struct acl_subject_label *) subj;
49149 ++ read_lock(&gr_inode_lock);
49150 ++ do {
49151 ++ retval = lookup_acl_obj_label(curr_ino, curr_dev, tmpsubj);
49152 ++ if (retval) {
49153 ++ if (checkglob && retval->globbed) {
49154 ++ retval2 = chk_glob_label(retval->globbed, (struct dentry *)orig_dentry,
49155 ++ (struct vfsmount *)orig_mnt, path);
49156 ++ if (retval2)
49157 ++ retval = retval2;
49158 ++ }
49159 ++ break;
49160 ++ }
49161 ++ } while ((tmpsubj = tmpsubj->parent_subject));
49162 ++ read_unlock(&gr_inode_lock);
49163 ++
49164 ++ return retval;
49165 ++}
49166 ++
49167 ++static __inline__ struct acl_object_label *
49168 ++full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
49169 ++ struct dentry *curr_dentry,
49170 ++ const struct acl_subject_label *subj, char **path, const int checkglob)
49171 ++{
49172 ++ int newglob = checkglob;
49173 ++ ino_t inode;
49174 ++ dev_t device;
49175 ++
49176 ++ /* if we aren't checking a subdirectory of the original path yet, don't do glob checking
49177 ++ as we don't want a / * rule to match instead of the / object
49178 ++ don't do this for create lookups that call this function though, since they're looking up
49179 ++ on the parent and thus need globbing checks on all paths
49180 ++ */
49181 ++ if (orig_dentry == curr_dentry && newglob != GR_CREATE_GLOB)
49182 ++ newglob = GR_NO_GLOB;
49183 ++
49184 ++ spin_lock(&curr_dentry->d_lock);
49185 ++ inode = curr_dentry->d_inode->i_ino;
49186 ++ device = __get_dev(curr_dentry);
49187 ++ spin_unlock(&curr_dentry->d_lock);
49188 ++
49189 ++ return __full_lookup(orig_dentry, orig_mnt, inode, device, subj, path, newglob);
49190 ++}
49191 ++
49192 ++static struct acl_object_label *
49193 ++__chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
49194 ++ const struct acl_subject_label *subj, char *path, const int checkglob)
49195 ++{
49196 ++ struct dentry *dentry = (struct dentry *) l_dentry;
49197 ++ struct vfsmount *mnt = (struct vfsmount *) l_mnt;
49198 ++ struct acl_object_label *retval;
49199 ++ struct dentry *parent;
49200 ++
49201 ++ write_seqlock(&rename_lock);
49202 ++ br_read_lock(vfsmount_lock);
49203 ++
49204 ++ if (unlikely((mnt == shm_mnt && dentry->d_inode->i_nlink == 0) || mnt == pipe_mnt ||
49205 ++#ifdef CONFIG_NET
49206 ++ mnt == sock_mnt ||
49207 ++#endif
49208 ++#ifdef CONFIG_HUGETLBFS
49209 ++ (mnt == hugetlbfs_vfsmount && dentry->d_inode->i_nlink == 0) ||
49210 ++#endif
49211 ++ /* ignore Eric Biederman */
49212 ++ IS_PRIVATE(l_dentry->d_inode))) {
49213 ++ retval = (subj->mode & GR_SHMEXEC) ? fakefs_obj_rwx : fakefs_obj_rw;
49214 ++ goto out;
49215 ++ }
49216 ++
49217 ++ for (;;) {
49218 ++ if (dentry == real_root.dentry && mnt == real_root.mnt)
49219 ++ break;
49220 ++
49221 ++ if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
49222 ++ if (mnt->mnt_parent == mnt)
49223 ++ break;
49224 ++
49225 ++ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
49226 ++ if (retval != NULL)
49227 ++ goto out;
49228 ++
49229 ++ dentry = mnt->mnt_mountpoint;
49230 ++ mnt = mnt->mnt_parent;
49231 ++ continue;
49232 ++ }
49233 ++
49234 ++ parent = dentry->d_parent;
49235 ++ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
49236 ++ if (retval != NULL)
49237 ++ goto out;
49238 ++
49239 ++ dentry = parent;
49240 ++ }
49241 ++
49242 ++ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
49243 ++
49244 ++ /* real_root is pinned so we don't have to hold a reference */
49245 ++ if (retval == NULL)
49246 ++ retval = full_lookup(l_dentry, l_mnt, real_root.dentry, subj, &path, checkglob);
49247 ++out:
49248 ++ br_read_unlock(vfsmount_lock);
49249 ++ write_sequnlock(&rename_lock);
49250 ++
49251 ++ BUG_ON(retval == NULL);
49252 ++
49253 ++ return retval;
49254 ++}
49255 ++
49256 ++static __inline__ struct acl_object_label *
49257 ++chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
49258 ++ const struct acl_subject_label *subj)
49259 ++{
49260 ++ char *path = NULL;
49261 ++ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_REG_GLOB);
49262 ++}
49263 ++
49264 ++static __inline__ struct acl_object_label *
49265 ++chk_obj_label_noglob(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
49266 ++ const struct acl_subject_label *subj)
49267 ++{
49268 ++ char *path = NULL;
49269 ++ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_NO_GLOB);
49270 ++}
49271 ++
49272 ++static __inline__ struct acl_object_label *
49273 ++chk_obj_create_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
49274 ++ const struct acl_subject_label *subj, char *path)
49275 ++{
49276 ++ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_CREATE_GLOB);
49277 ++}
49278 ++
49279 ++static struct acl_subject_label *
49280 ++chk_subj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
49281 ++ const struct acl_role_label *role)
49282 ++{
49283 ++ struct dentry *dentry = (struct dentry *) l_dentry;
49284 ++ struct vfsmount *mnt = (struct vfsmount *) l_mnt;
49285 ++ struct acl_subject_label *retval;
49286 ++ struct dentry *parent;
49287 ++
49288 ++ write_seqlock(&rename_lock);
49289 ++ br_read_lock(vfsmount_lock);
49290 ++
49291 ++ for (;;) {
49292 ++ if (dentry == real_root.dentry && mnt == real_root.mnt)
49293 ++ break;
49294 ++ if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
49295 ++ if (mnt->mnt_parent == mnt)
49296 ++ break;
49297 ++
49298 ++ spin_lock(&dentry->d_lock);
49299 ++ read_lock(&gr_inode_lock);
49300 ++ retval =
49301 ++ lookup_acl_subj_label(dentry->d_inode->i_ino,
49302 ++ __get_dev(dentry), role);
49303 ++ read_unlock(&gr_inode_lock);
49304 ++ spin_unlock(&dentry->d_lock);
49305 ++ if (retval != NULL)
49306 ++ goto out;
49307 ++
49308 ++ dentry = mnt->mnt_mountpoint;
49309 ++ mnt = mnt->mnt_parent;
49310 ++ continue;
49311 ++ }
49312 ++
49313 ++ spin_lock(&dentry->d_lock);
49314 ++ read_lock(&gr_inode_lock);
49315 ++ retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
49316 ++ __get_dev(dentry), role);
49317 ++ read_unlock(&gr_inode_lock);
49318 ++ parent = dentry->d_parent;
49319 ++ spin_unlock(&dentry->d_lock);
49320 ++
49321 ++ if (retval != NULL)
49322 ++ goto out;
49323 ++
49324 ++ dentry = parent;
49325 ++ }
49326 ++
49327 ++ spin_lock(&dentry->d_lock);
49328 ++ read_lock(&gr_inode_lock);
49329 ++ retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
49330 ++ __get_dev(dentry), role);
49331 ++ read_unlock(&gr_inode_lock);
49332 ++ spin_unlock(&dentry->d_lock);
49333 ++
49334 ++ if (unlikely(retval == NULL)) {
49335 ++ /* real_root is pinned, we don't need to hold a reference */
49336 ++ read_lock(&gr_inode_lock);
49337 ++ retval = lookup_acl_subj_label(real_root.dentry->d_inode->i_ino,
49338 ++ __get_dev(real_root.dentry), role);
49339 ++ read_unlock(&gr_inode_lock);
49340 ++ }
49341 ++out:
49342 ++ br_read_unlock(vfsmount_lock);
49343 ++ write_sequnlock(&rename_lock);
49344 ++
49345 ++ BUG_ON(retval == NULL);
49346 ++
49347 ++ return retval;
49348 ++}
49349 ++
49350 ++static void
49351 ++gr_log_learn(const struct dentry *dentry, const struct vfsmount *mnt, const __u32 mode)
49352 ++{
49353 ++ struct task_struct *task = current;
49354 ++ const struct cred *cred = current_cred();
49355 ++
49356 ++ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
49357 ++ cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
49358 ++ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
49359 ++ 1UL, 1UL, gr_to_filename(dentry, mnt), (unsigned long) mode, &task->signal->saved_ip);
49360 ++
49361 ++ return;
49362 ++}
49363 ++
49364 ++static void
49365 ++gr_log_learn_sysctl(const char *path, const __u32 mode)
49366 ++{
49367 ++ struct task_struct *task = current;
49368 ++ const struct cred *cred = current_cred();
49369 ++
49370 ++ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
49371 ++ cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
49372 ++ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
49373 ++ 1UL, 1UL, path, (unsigned long) mode, &task->signal->saved_ip);
49374 ++
49375 ++ return;
49376 ++}
49377 ++
49378 ++static void
49379 ++gr_log_learn_id_change(const char type, const unsigned int real,
49380 ++ const unsigned int effective, const unsigned int fs)
49381 ++{
49382 ++ struct task_struct *task = current;
49383 ++ const struct cred *cred = current_cred();
49384 ++
49385 ++ security_learn(GR_ID_LEARN_MSG, task->role->rolename, task->role->roletype,
49386 ++ cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
49387 ++ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
49388 ++ type, real, effective, fs, &task->signal->saved_ip);
49389 ++
49390 ++ return;
49391 ++}
49392 ++
49393 ++__u32
49394 ++gr_search_file(const struct dentry * dentry, const __u32 mode,
49395 ++ const struct vfsmount * mnt)
49396 ++{
49397 ++ __u32 retval = mode;
49398 ++ struct acl_subject_label *curracl;
49399 ++ struct acl_object_label *currobj;
49400 ++
49401 ++ if (unlikely(!(gr_status & GR_READY)))
49402 ++ return (mode & ~GR_AUDITS);
49403 ++
49404 ++ curracl = current->acl;
49405 ++
49406 ++ currobj = chk_obj_label(dentry, mnt, curracl);
49407 ++ retval = currobj->mode & mode;
49408 ++
49409 ++ /* if we're opening a specified transfer file for writing
49410 ++ (e.g. /dev/initctl), then transfer our role to init
49411 ++ */
49412 ++ if (unlikely(currobj->mode & GR_INIT_TRANSFER && retval & GR_WRITE &&
49413 ++ current->role->roletype & GR_ROLE_PERSIST)) {
49414 ++ struct task_struct *task = init_pid_ns.child_reaper;
49415 ++
49416 ++ if (task->role != current->role) {
49417 ++ task->acl_sp_role = 0;
49418 ++ task->acl_role_id = current->acl_role_id;
49419 ++ task->role = current->role;
49420 ++ rcu_read_lock();
49421 ++ read_lock(&grsec_exec_file_lock);
49422 ++ gr_apply_subject_to_task(task);
49423 ++ read_unlock(&grsec_exec_file_lock);
49424 ++ rcu_read_unlock();
49425 ++ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_INIT_TRANSFER_MSG);
49426 ++ }
49427 ++ }
49428 ++
49429 ++ if (unlikely
49430 ++ ((curracl->mode & (GR_LEARN | GR_INHERITLEARN)) && !(mode & GR_NOPTRACE)
49431 ++ && (retval != (mode & ~(GR_AUDITS | GR_SUPPRESS))))) {
49432 ++ __u32 new_mode = mode;
49433 ++
49434 ++ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
49435 ++
49436 ++ retval = new_mode;
49437 ++
49438 ++ if (new_mode & GR_EXEC && curracl->mode & GR_INHERITLEARN)
49439 ++ new_mode |= GR_INHERIT;
49440 ++
49441 ++ if (!(mode & GR_NOLEARN))
49442 ++ gr_log_learn(dentry, mnt, new_mode);
49443 ++ }
49444 ++
49445 ++ return retval;
49446 ++}
49447 ++
49448 ++struct acl_object_label *gr_get_create_object(const struct dentry *new_dentry,
49449 ++ const struct dentry *parent,
49450 ++ const struct vfsmount *mnt)
49451 ++{
49452 ++ struct name_entry *match;
49453 ++ struct acl_object_label *matchpo;
49454 ++ struct acl_subject_label *curracl;
49455 ++ char *path;
49456 ++
49457 ++ if (unlikely(!(gr_status & GR_READY)))
49458 ++ return NULL;
49459 ++
49460 ++ preempt_disable();
49461 ++ path = gr_to_filename_rbac(new_dentry, mnt);
49462 ++ match = lookup_name_entry_create(path);
49463 ++
49464 ++ curracl = current->acl;
49465 ++
49466 ++ if (match) {
49467 ++ read_lock(&gr_inode_lock);
49468 ++ matchpo = lookup_acl_obj_label_create(match->inode, match->device, curracl);
49469 ++ read_unlock(&gr_inode_lock);
49470 ++
49471 ++ if (matchpo) {
49472 ++ preempt_enable();
49473 ++ return matchpo;
49474 ++ }
49475 ++ }
49476 ++
49477 ++ // lookup parent
49478 ++
49479 ++ matchpo = chk_obj_create_label(parent, mnt, curracl, path);
49480 ++
49481 ++ preempt_enable();
49482 ++ return matchpo;
49483 ++}
49484 ++
49485 ++__u32
49486 ++gr_check_create(const struct dentry * new_dentry, const struct dentry * parent,
49487 ++ const struct vfsmount * mnt, const __u32 mode)
49488 ++{
49489 ++ struct acl_object_label *matchpo;
49490 ++ __u32 retval;
49491 ++
49492 ++ if (unlikely(!(gr_status & GR_READY)))
49493 ++ return (mode & ~GR_AUDITS);
49494 ++
49495 ++ matchpo = gr_get_create_object(new_dentry, parent, mnt);
49496 ++
49497 ++ retval = matchpo->mode & mode;
49498 ++
49499 ++ if ((retval != (mode & ~(GR_AUDITS | GR_SUPPRESS)))
49500 ++ && (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))) {
49501 ++ __u32 new_mode = mode;
49502 ++
49503 ++ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
49504 ++
49505 ++ gr_log_learn(new_dentry, mnt, new_mode);
49506 ++ return new_mode;
49507 ++ }
49508 ++
49509 ++ return retval;
49510 ++}
49511 ++
49512 ++__u32
49513 ++gr_check_link(const struct dentry * new_dentry,
49514 ++ const struct dentry * parent_dentry,
49515 ++ const struct vfsmount * parent_mnt,
49516 ++ const struct dentry * old_dentry, const struct vfsmount * old_mnt)
49517 ++{
49518 ++ struct acl_object_label *obj;
49519 ++ __u32 oldmode, newmode;
49520 ++ __u32 needmode;
49521 ++ __u32 checkmodes = GR_FIND | GR_APPEND | GR_WRITE | GR_EXEC | GR_SETID | GR_READ |
49522 ++ GR_DELETE | GR_INHERIT;
49523 ++
49524 ++ if (unlikely(!(gr_status & GR_READY)))
49525 ++ return (GR_CREATE | GR_LINK);
49526 ++
49527 ++ obj = chk_obj_label(old_dentry, old_mnt, current->acl);
49528 ++ oldmode = obj->mode;
49529 ++
49530 ++ obj = gr_get_create_object(new_dentry, parent_dentry, parent_mnt);
49531 ++ newmode = obj->mode;
49532 ++
49533 ++ needmode = newmode & checkmodes;
49534 ++
49535 ++ // old name for hardlink must have at least the permissions of the new name
49536 ++ if ((oldmode & needmode) != needmode)
49537 ++ goto bad;
49538 ++
49539 ++ // if old name had restrictions/auditing, make sure the new name does as well
49540 ++ needmode = oldmode & (GR_NOPTRACE | GR_PTRACERD | GR_INHERIT | GR_AUDITS);
49541 ++
49542 ++ // don't allow hardlinking of suid/sgid files without permission
49543 ++ if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID))
49544 ++ needmode |= GR_SETID;
49545 ++
49546 ++ if ((newmode & needmode) != needmode)
49547 ++ goto bad;
49548 ++
49549 ++ // enforce minimum permissions
49550 ++ if ((newmode & (GR_CREATE | GR_LINK)) == (GR_CREATE | GR_LINK))
49551 ++ return newmode;
49552 ++bad:
49553 ++ needmode = oldmode;
49554 ++ if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID))
49555 ++ needmode |= GR_SETID;
49556 ++
49557 ++ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) {
49558 ++ gr_log_learn(old_dentry, old_mnt, needmode | GR_CREATE | GR_LINK);
49559 ++ return (GR_CREATE | GR_LINK);
49560 ++ } else if (newmode & GR_SUPPRESS)
49561 ++ return GR_SUPPRESS;
49562 ++ else
49563 ++ return 0;
49564 ++}
49565 ++
49566 ++int
49567 ++gr_check_hidden_task(const struct task_struct *task)
49568 ++{
49569 ++ if (unlikely(!(gr_status & GR_READY)))
49570 ++ return 0;
49571 ++
49572 ++ if (!(task->acl->mode & GR_PROCFIND) && !(current->acl->mode & GR_VIEW))
49573 ++ return 1;
49574 ++
49575 ++ return 0;
49576 ++}
49577 ++
49578 ++int
49579 ++gr_check_protected_task(const struct task_struct *task)
49580 ++{
49581 ++ if (unlikely(!(gr_status & GR_READY) || !task))
49582 ++ return 0;
49583 ++
49584 ++ if ((task->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
49585 ++ task->acl != current->acl)
49586 ++ return 1;
49587 ++
49588 ++ return 0;
49589 ++}
49590 ++
49591 ++int
49592 ++gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
49593 ++{
49594 ++ struct task_struct *p;
49595 ++ int ret = 0;
49596 ++
49597 ++ if (unlikely(!(gr_status & GR_READY) || !pid))
49598 ++ return ret;
49599 ++
49600 ++ read_lock(&tasklist_lock);
49601 ++ do_each_pid_task(pid, type, p) {
49602 ++ if ((p->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
49603 ++ p->acl != current->acl) {
49604 ++ ret = 1;
49605 ++ goto out;
49606 ++ }
49607 ++ } while_each_pid_task(pid, type, p);
49608 ++out:
49609 ++ read_unlock(&tasklist_lock);
49610 ++
49611 ++ return ret;
49612 ++}
49613 ++
49614 ++void
49615 ++gr_copy_label(struct task_struct *tsk)
49616 ++{
49617 ++ tsk->signal->used_accept = 0;
49618 ++ tsk->acl_sp_role = 0;
49619 ++ tsk->acl_role_id = current->acl_role_id;
49620 ++ tsk->acl = current->acl;
49621 ++ tsk->role = current->role;
49622 ++ tsk->signal->curr_ip = current->signal->curr_ip;
49623 ++ tsk->signal->saved_ip = current->signal->saved_ip;
49624 ++ if (current->exec_file)
49625 ++ get_file(current->exec_file);
49626 ++ tsk->exec_file = current->exec_file;
49627 ++ tsk->is_writable = current->is_writable;
49628 ++ if (unlikely(current->signal->used_accept)) {
49629 ++ current->signal->curr_ip = 0;
49630 ++ current->signal->saved_ip = 0;
49631 ++ }
49632 ++
49633 ++ return;
49634 ++}
49635 ++
49636 ++static void
49637 ++gr_set_proc_res(struct task_struct *task)
49638 ++{
49639 ++ struct acl_subject_label *proc;
49640 ++ unsigned short i;
49641 ++
49642 ++ proc = task->acl;
49643 ++
49644 ++ if (proc->mode & (GR_LEARN | GR_INHERITLEARN))
49645 ++ return;
49646 ++
49647 ++ for (i = 0; i < RLIM_NLIMITS; i++) {
49648 ++ if (!(proc->resmask & (1 << i)))
49649 ++ continue;
49650 ++
49651 ++ task->signal->rlim[i].rlim_cur = proc->res[i].rlim_cur;
49652 ++ task->signal->rlim[i].rlim_max = proc->res[i].rlim_max;
49653 ++ }
49654 ++
49655 ++ return;
49656 ++}
49657 ++
49658 ++extern int __gr_process_user_ban(struct user_struct *user);
49659 ++
49660 ++int
49661 ++gr_check_user_change(int real, int effective, int fs)
49662 ++{
49663 ++ unsigned int i;
49664 ++ __u16 num;
49665 ++ uid_t *uidlist;
49666 ++ int curuid;
49667 ++ int realok = 0;
49668 ++ int effectiveok = 0;
49669 ++ int fsok = 0;
49670 ++
49671 ++#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
49672 ++ struct user_struct *user;
49673 ++
49674 ++ if (real == -1)
49675 ++ goto skipit;
49676 ++
49677 ++ user = find_user(real);
49678 ++ if (user == NULL)
49679 ++ goto skipit;
49680 ++
49681 ++ if (__gr_process_user_ban(user)) {
49682 ++ /* for find_user */
49683 ++ free_uid(user);
49684 ++ return 1;
49685 ++ }
49686 ++
49687 ++ /* for find_user */
49688 ++ free_uid(user);
49689 ++
49690 ++skipit:
49691 ++#endif
49692 ++
49693 ++ if (unlikely(!(gr_status & GR_READY)))
49694 ++ return 0;
49695 ++
49696 ++ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
49697 ++ gr_log_learn_id_change('u', real, effective, fs);
49698 ++
49699 ++ num = current->acl->user_trans_num;
49700 ++ uidlist = current->acl->user_transitions;
49701 ++
49702 ++ if (uidlist == NULL)
49703 ++ return 0;
49704 ++
49705 ++ if (real == -1)
49706 ++ realok = 1;
49707 ++ if (effective == -1)
49708 ++ effectiveok = 1;
49709 ++ if (fs == -1)
49710 ++ fsok = 1;
49711 ++
49712 ++ if (current->acl->user_trans_type & GR_ID_ALLOW) {
49713 ++ for (i = 0; i < num; i++) {
49714 ++ curuid = (int)uidlist[i];
49715 ++ if (real == curuid)
49716 ++ realok = 1;
49717 ++ if (effective == curuid)
49718 ++ effectiveok = 1;
49719 ++ if (fs == curuid)
49720 ++ fsok = 1;
49721 ++ }
49722 ++ } else if (current->acl->user_trans_type & GR_ID_DENY) {
49723 ++ for (i = 0; i < num; i++) {
49724 ++ curuid = (int)uidlist[i];
49725 ++ if (real == curuid)
49726 ++ break;
49727 ++ if (effective == curuid)
49728 ++ break;
49729 ++ if (fs == curuid)
49730 ++ break;
49731 ++ }
49732 ++ /* not in deny list */
49733 ++ if (i == num) {
49734 ++ realok = 1;
49735 ++ effectiveok = 1;
49736 ++ fsok = 1;
49737 ++ }
49738 ++ }
49739 ++
49740 ++ if (realok && effectiveok && fsok)
49741 ++ return 0;
49742 ++ else {
49743 ++ gr_log_int(GR_DONT_AUDIT, GR_USRCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real);
49744 ++ return 1;
49745 ++ }
49746 ++}
49747 ++
49748 ++int
49749 ++gr_check_group_change(int real, int effective, int fs)
49750 ++{
49751 ++ unsigned int i;
49752 ++ __u16 num;
49753 ++ gid_t *gidlist;
49754 ++ int curgid;
49755 ++ int realok = 0;
49756 ++ int effectiveok = 0;
49757 ++ int fsok = 0;
49758 ++
49759 ++ if (unlikely(!(gr_status & GR_READY)))
49760 ++ return 0;
49761 ++
49762 ++ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
49763 ++ gr_log_learn_id_change('g', real, effective, fs);
49764 ++
49765 ++ num = current->acl->group_trans_num;
49766 ++ gidlist = current->acl->group_transitions;
49767 ++
49768 ++ if (gidlist == NULL)
49769 ++ return 0;
49770 ++
49771 ++ if (real == -1)
49772 ++ realok = 1;
49773 ++ if (effective == -1)
49774 ++ effectiveok = 1;
49775 ++ if (fs == -1)
49776 ++ fsok = 1;
49777 ++
49778 ++ if (current->acl->group_trans_type & GR_ID_ALLOW) {
49779 ++ for (i = 0; i < num; i++) {
49780 ++ curgid = (int)gidlist[i];
49781 ++ if (real == curgid)
49782 ++ realok = 1;
49783 ++ if (effective == curgid)
49784 ++ effectiveok = 1;
49785 ++ if (fs == curgid)
49786 ++ fsok = 1;
49787 ++ }
49788 ++ } else if (current->acl->group_trans_type & GR_ID_DENY) {
49789 ++ for (i = 0; i < num; i++) {
49790 ++ curgid = (int)gidlist[i];
49791 ++ if (real == curgid)
49792 ++ break;
49793 ++ if (effective == curgid)
49794 ++ break;
49795 ++ if (fs == curgid)
49796 ++ break;
49797 ++ }
49798 ++ /* not in deny list */
49799 ++ if (i == num) {
49800 ++ realok = 1;
49801 ++ effectiveok = 1;
49802 ++ fsok = 1;
49803 ++ }
49804 ++ }
49805 ++
49806 ++ if (realok && effectiveok && fsok)
49807 ++ return 0;
49808 ++ else {
49809 ++ gr_log_int(GR_DONT_AUDIT, GR_GRPCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real);
49810 ++ return 1;
49811 ++ }
49812 ++}
49813 ++
49814 ++void
49815 ++gr_set_role_label(struct task_struct *task, const uid_t uid, const uid_t gid)
49816 ++{
49817 ++ struct acl_role_label *role = task->role;
49818 ++ struct acl_subject_label *subj = NULL;
49819 ++ struct acl_object_label *obj;
49820 ++ struct file *filp;
49821 ++
49822 ++ if (unlikely(!(gr_status & GR_READY)))
49823 ++ return;
49824 ++
49825 ++ filp = task->exec_file;
49826 ++
49827 ++ /* kernel process, we'll give them the kernel role */
49828 ++ if (unlikely(!filp)) {
49829 ++ task->role = kernel_role;
49830 ++ task->acl = kernel_role->root_label;
49831 ++ return;
49832 ++ } else if (!task->role || !(task->role->roletype & GR_ROLE_SPECIAL))
49833 ++ role = lookup_acl_role_label(task, uid, gid);
49834 ++
49835 ++ /* perform subject lookup in possibly new role
49836 ++ we can use this result below in the case where role == task->role
49837 ++ */
49838 ++ subj = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, role);
49839 ++
49840 ++ /* if we changed uid/gid, but result in the same role
49841 ++ and are using inheritance, don't lose the inherited subject
49842 ++ if current subject is other than what normal lookup
49843 ++ would result in, we arrived via inheritance, don't
49844 ++ lose subject
49845 ++ */
49846 ++ if (role != task->role || (!(task->acl->mode & GR_INHERITLEARN) &&
49847 ++ (subj == task->acl)))
49848 ++ task->acl = subj;
49849 ++
49850 ++ task->role = role;
49851 ++
49852 ++ task->is_writable = 0;
49853 ++
49854 ++ /* ignore additional mmap checks for processes that are writable
49855 ++ by the default ACL */
49856 ++ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
49857 ++ if (unlikely(obj->mode & GR_WRITE))
49858 ++ task->is_writable = 1;
49859 ++ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
49860 ++ if (unlikely(obj->mode & GR_WRITE))
49861 ++ task->is_writable = 1;
49862 ++
49863 ++#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
49864 ++ printk(KERN_ALERT "Set role label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
49865 ++#endif
49866 ++
49867 ++ gr_set_proc_res(task);
49868 ++
49869 ++ return;
49870 ++}
49871 ++
49872 ++int
49873 ++gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
49874 ++ const int unsafe_share)
49875 ++{
49876 ++ struct task_struct *task = current;
49877 ++ struct acl_subject_label *newacl;
49878 ++ struct acl_object_label *obj;
49879 ++ __u32 retmode;
49880 ++
49881 ++ if (unlikely(!(gr_status & GR_READY)))
49882 ++ return 0;
49883 ++
49884 ++ newacl = chk_subj_label(dentry, mnt, task->role);
49885 ++
49886 ++ task_lock(task);
49887 ++ if ((((task->ptrace & PT_PTRACED) || unsafe_share) &&
49888 ++ !(task->acl->mode & GR_POVERRIDE) && (task->acl != newacl) &&
49889 ++ !(task->role->roletype & GR_ROLE_GOD) &&
49890 ++ !gr_search_file(dentry, GR_PTRACERD, mnt) &&
49891 ++ !(task->acl->mode & (GR_LEARN | GR_INHERITLEARN)))) {
49892 ++ task_unlock(task);
49893 ++ if (unsafe_share)
49894 ++ gr_log_fs_generic(GR_DONT_AUDIT, GR_UNSAFESHARE_EXEC_ACL_MSG, dentry, mnt);
49895 ++ else
49896 ++ gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_EXEC_ACL_MSG, dentry, mnt);
49897 ++ return -EACCES;
49898 ++ }
49899 ++ task_unlock(task);
49900 ++
49901 ++ obj = chk_obj_label(dentry, mnt, task->acl);
49902 ++ retmode = obj->mode & (GR_INHERIT | GR_AUDIT_INHERIT);
49903 ++
49904 ++ if (!(task->acl->mode & GR_INHERITLEARN) &&
49905 ++ ((newacl->mode & GR_LEARN) || !(retmode & GR_INHERIT))) {
49906 ++ if (obj->nested)
49907 ++ task->acl = obj->nested;
49908 ++ else
49909 ++ task->acl = newacl;
49910 ++ } else if (retmode & GR_INHERIT && retmode & GR_AUDIT_INHERIT)
49911 ++ gr_log_str_fs(GR_DO_AUDIT, GR_INHERIT_ACL_MSG, task->acl->filename, dentry, mnt);
49912 ++
49913 ++ task->is_writable = 0;
49914 ++
49915 ++ /* ignore additional mmap checks for processes that are writable
49916 ++ by the default ACL */
49917 ++ obj = chk_obj_label(dentry, mnt, default_role->root_label);
49918 ++ if (unlikely(obj->mode & GR_WRITE))
49919 ++ task->is_writable = 1;
49920 ++ obj = chk_obj_label(dentry, mnt, task->role->root_label);
49921 ++ if (unlikely(obj->mode & GR_WRITE))
49922 ++ task->is_writable = 1;
49923 ++
49924 ++ gr_set_proc_res(task);
49925 ++
49926 ++#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
49927 ++ printk(KERN_ALERT "Set subject label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
49928 ++#endif
49929 ++ return 0;
49930 ++}
49931 ++
49932 ++/* always called with valid inodev ptr */
49933 ++static void
49934 ++do_handle_delete(struct inodev_entry *inodev, const ino_t ino, const dev_t dev)
49935 ++{
49936 ++ struct acl_object_label *matchpo;
49937 ++ struct acl_subject_label *matchps;
49938 ++ struct acl_subject_label *subj;
49939 ++ struct acl_role_label *role;
49940 ++ unsigned int x;
49941 ++
49942 ++ FOR_EACH_ROLE_START(role)
49943 ++ FOR_EACH_SUBJECT_START(role, subj, x)
49944 ++ if ((matchpo = lookup_acl_obj_label(ino, dev, subj)) != NULL)
49945 ++ matchpo->mode |= GR_DELETED;
49946 ++ FOR_EACH_SUBJECT_END(subj,x)
49947 ++ FOR_EACH_NESTED_SUBJECT_START(role, subj)
49948 ++ if (subj->inode == ino && subj->device == dev)
49949 ++ subj->mode |= GR_DELETED;
49950 ++ FOR_EACH_NESTED_SUBJECT_END(subj)
49951 ++ if ((matchps = lookup_acl_subj_label(ino, dev, role)) != NULL)
49952 ++ matchps->mode |= GR_DELETED;
49953 ++ FOR_EACH_ROLE_END(role)
49954 ++
49955 ++ inodev->nentry->deleted = 1;
49956 ++
49957 ++ return;
49958 ++}
49959 ++
49960 ++void
49961 ++gr_handle_delete(const ino_t ino, const dev_t dev)
49962 ++{
49963 ++ struct inodev_entry *inodev;
49964 ++
49965 ++ if (unlikely(!(gr_status & GR_READY)))
49966 ++ return;
49967 ++
49968 ++ write_lock(&gr_inode_lock);
49969 ++ inodev = lookup_inodev_entry(ino, dev);
49970 ++ if (inodev != NULL)
49971 ++ do_handle_delete(inodev, ino, dev);
49972 ++ write_unlock(&gr_inode_lock);
49973 ++
49974 ++ return;
49975 ++}
49976 ++
49977 ++static void
49978 ++update_acl_obj_label(const ino_t oldinode, const dev_t olddevice,
49979 ++ const ino_t newinode, const dev_t newdevice,
49980 ++ struct acl_subject_label *subj)
49981 ++{
49982 ++ unsigned int index = fhash(oldinode, olddevice, subj->obj_hash_size);
49983 ++ struct acl_object_label *match;
49984 ++
49985 ++ match = subj->obj_hash[index];
49986 ++
49987 ++ while (match && (match->inode != oldinode ||
49988 ++ match->device != olddevice ||
49989 ++ !(match->mode & GR_DELETED)))
49990 ++ match = match->next;
49991 ++
49992 ++ if (match && (match->inode == oldinode)
49993 ++ && (match->device == olddevice)
49994 ++ && (match->mode & GR_DELETED)) {
49995 ++ if (match->prev == NULL) {
49996 ++ subj->obj_hash[index] = match->next;
49997 ++ if (match->next != NULL)
49998 ++ match->next->prev = NULL;
49999 ++ } else {
50000 ++ match->prev->next = match->next;
50001 ++ if (match->next != NULL)
50002 ++ match->next->prev = match->prev;
50003 ++ }
50004 ++ match->prev = NULL;
50005 ++ match->next = NULL;
50006 ++ match->inode = newinode;
50007 ++ match->device = newdevice;
50008 ++ match->mode &= ~GR_DELETED;
50009 ++
50010 ++ insert_acl_obj_label(match, subj);
50011 ++ }
50012 ++
50013 ++ return;
50014 ++}
50015 ++
50016 ++static void
50017 ++update_acl_subj_label(const ino_t oldinode, const dev_t olddevice,
50018 ++ const ino_t newinode, const dev_t newdevice,
50019 ++ struct acl_role_label *role)
50020 ++{
50021 ++ unsigned int index = fhash(oldinode, olddevice, role->subj_hash_size);
50022 ++ struct acl_subject_label *match;
50023 ++
50024 ++ match = role->subj_hash[index];
50025 ++
50026 ++ while (match && (match->inode != oldinode ||
50027 ++ match->device != olddevice ||
50028 ++ !(match->mode & GR_DELETED)))
50029 ++ match = match->next;
50030 ++
50031 ++ if (match && (match->inode == oldinode)
50032 ++ && (match->device == olddevice)
50033 ++ && (match->mode & GR_DELETED)) {
50034 ++ if (match->prev == NULL) {
50035 ++ role->subj_hash[index] = match->next;
50036 ++ if (match->next != NULL)
50037 ++ match->next->prev = NULL;
50038 ++ } else {
50039 ++ match->prev->next = match->next;
50040 ++ if (match->next != NULL)
50041 ++ match->next->prev = match->prev;
50042 ++ }
50043 ++ match->prev = NULL;
50044 ++ match->next = NULL;
50045 ++ match->inode = newinode;
50046 ++ match->device = newdevice;
50047 ++ match->mode &= ~GR_DELETED;
50048 ++
50049 ++ insert_acl_subj_label(match, role);
50050 ++ }
50051 ++
50052 ++ return;
50053 ++}
50054 ++
50055 ++static void
50056 ++update_inodev_entry(const ino_t oldinode, const dev_t olddevice,
50057 ++ const ino_t newinode, const dev_t newdevice)
50058 ++{
50059 ++ unsigned int index = fhash(oldinode, olddevice, inodev_set.i_size);
50060 ++ struct inodev_entry *match;
50061 ++
50062 ++ match = inodev_set.i_hash[index];
50063 ++
50064 ++ while (match && (match->nentry->inode != oldinode ||
50065 ++ match->nentry->device != olddevice || !match->nentry->deleted))
50066 ++ match = match->next;
50067 ++
50068 ++ if (match && (match->nentry->inode == oldinode)
50069 ++ && (match->nentry->device == olddevice) &&
50070 ++ match->nentry->deleted) {
50071 ++ if (match->prev == NULL) {
50072 ++ inodev_set.i_hash[index] = match->next;
50073 ++ if (match->next != NULL)
50074 ++ match->next->prev = NULL;
50075 ++ } else {
50076 ++ match->prev->next = match->next;
50077 ++ if (match->next != NULL)
50078 ++ match->next->prev = match->prev;
50079 ++ }
50080 ++ match->prev = NULL;
50081 ++ match->next = NULL;
50082 ++ match->nentry->inode = newinode;
50083 ++ match->nentry->device = newdevice;
50084 ++ match->nentry->deleted = 0;
50085 ++
50086 ++ insert_inodev_entry(match);
50087 ++ }
50088 ++
50089 ++ return;
50090 ++}
50091 ++
50092 ++static void
50093 ++__do_handle_create(const struct name_entry *matchn, ino_t ino, dev_t dev)
50094 ++{
50095 ++ struct acl_subject_label *subj;
50096 ++ struct acl_role_label *role;
50097 ++ unsigned int x;
50098 ++
50099 ++ FOR_EACH_ROLE_START(role)
50100 ++ update_acl_subj_label(matchn->inode, matchn->device, ino, dev, role);
50101 ++
50102 ++ FOR_EACH_NESTED_SUBJECT_START(role, subj)
50103 ++ if ((subj->inode == ino) && (subj->device == dev)) {
50104 ++ subj->inode = ino;
50105 ++ subj->device = dev;
50106 ++ }
50107 ++ FOR_EACH_NESTED_SUBJECT_END(subj)
50108 ++ FOR_EACH_SUBJECT_START(role, subj, x)
50109 ++ update_acl_obj_label(matchn->inode, matchn->device,
50110 ++ ino, dev, subj);
50111 ++ FOR_EACH_SUBJECT_END(subj,x)
50112 ++ FOR_EACH_ROLE_END(role)
50113 ++
50114 ++ update_inodev_entry(matchn->inode, matchn->device, ino, dev);
50115 ++
50116 ++ return;
50117 ++}
50118 ++
50119 ++static void
50120 ++do_handle_create(const struct name_entry *matchn, const struct dentry *dentry,
50121 ++ const struct vfsmount *mnt)
50122 ++{
50123 ++ ino_t ino = dentry->d_inode->i_ino;
50124 ++ dev_t dev = __get_dev(dentry);
50125 ++
50126 ++ __do_handle_create(matchn, ino, dev);
50127 ++
50128 ++ return;
50129 ++}
50130 ++
50131 ++void
50132 ++gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
50133 ++{
50134 ++ struct name_entry *matchn;
50135 ++
50136 ++ if (unlikely(!(gr_status & GR_READY)))
50137 ++ return;
50138 ++
50139 ++ preempt_disable();
50140 ++ matchn = lookup_name_entry(gr_to_filename_rbac(dentry, mnt));
50141 ++
50142 ++ if (unlikely((unsigned long)matchn)) {
50143 ++ write_lock(&gr_inode_lock);
50144 ++ do_handle_create(matchn, dentry, mnt);
50145 ++ write_unlock(&gr_inode_lock);
50146 ++ }
50147 ++ preempt_enable();
50148 ++
50149 ++ return;
50150 ++}
50151 ++
50152 ++void
50153 ++gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode)
50154 ++{
50155 ++ struct name_entry *matchn;
50156 ++
50157 ++ if (unlikely(!(gr_status & GR_READY)))
50158 ++ return;
50159 ++
50160 ++ preempt_disable();
50161 ++ matchn = lookup_name_entry(gr_to_proc_filename_rbac(dentry, init_pid_ns.proc_mnt));
50162 ++
50163 ++ if (unlikely((unsigned long)matchn)) {
50164 ++ write_lock(&gr_inode_lock);
50165 ++ __do_handle_create(matchn, inode->i_ino, inode->i_sb->s_dev);
50166 ++ write_unlock(&gr_inode_lock);
50167 ++ }
50168 ++ preempt_enable();
50169 ++
50170 ++ return;
50171 ++}
50172 ++
50173 ++void
50174 ++gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
50175 ++ struct dentry *old_dentry,
50176 ++ struct dentry *new_dentry,
50177 ++ struct vfsmount *mnt, const __u8 replace)
50178 ++{
50179 ++ struct name_entry *matchn;
50180 ++ struct inodev_entry *inodev;
50181 ++ struct inode *inode = new_dentry->d_inode;
50182 ++ ino_t old_ino = old_dentry->d_inode->i_ino;
50183 ++ dev_t old_dev = __get_dev(old_dentry);
50184 ++
50185 ++ /* vfs_rename swaps the name and parent link for old_dentry and
50186 ++ new_dentry
50187 ++ at this point, old_dentry has the new name, parent link, and inode
50188 ++ for the renamed file
50189 ++ if a file is being replaced by a rename, new_dentry has the inode
50190 ++ and name for the replaced file
50191 ++ */
50192 ++
50193 ++ if (unlikely(!(gr_status & GR_READY)))
50194 ++ return;
50195 ++
50196 ++ preempt_disable();
50197 ++ matchn = lookup_name_entry(gr_to_filename_rbac(old_dentry, mnt));
50198 ++
50199 ++ /* we wouldn't have to check d_inode if it weren't for
50200 ++ NFS silly-renaming
50201 ++ */
50202 ++
50203 ++ write_lock(&gr_inode_lock);
50204 ++ if (unlikely(replace && inode)) {
50205 ++ ino_t new_ino = inode->i_ino;
50206 ++ dev_t new_dev = __get_dev(new_dentry);
50207 ++
50208 ++ inodev = lookup_inodev_entry(new_ino, new_dev);
50209 ++ if (inodev != NULL && ((inode->i_nlink <= 1) || S_ISDIR(inode->i_mode)))
50210 ++ do_handle_delete(inodev, new_ino, new_dev);
50211 ++ }
50212 ++
50213 ++ inodev = lookup_inodev_entry(old_ino, old_dev);
50214 ++ if (inodev != NULL && ((old_dentry->d_inode->i_nlink <= 1) || S_ISDIR(old_dentry->d_inode->i_mode)))
50215 ++ do_handle_delete(inodev, old_ino, old_dev);
50216 ++
50217 ++ if (unlikely((unsigned long)matchn))
50218 ++ do_handle_create(matchn, old_dentry, mnt);
50219 ++
50220 ++ write_unlock(&gr_inode_lock);
50221 ++ preempt_enable();
50222 ++
50223 ++ return;
50224 ++}
50225 ++
50226 ++static int
50227 ++lookup_special_role_auth(__u16 mode, const char *rolename, unsigned char **salt,
50228 ++ unsigned char **sum)
50229 ++{
50230 ++ struct acl_role_label *r;
50231 ++ struct role_allowed_ip *ipp;
50232 ++ struct role_transition *trans;
50233 ++ unsigned int i;
50234 ++ int found = 0;
50235 ++ u32 curr_ip = current->signal->curr_ip;
50236 ++
50237 ++ current->signal->saved_ip = curr_ip;
50238 ++
50239 ++ /* check transition table */
50240 ++
50241 ++ for (trans = current->role->transitions; trans; trans = trans->next) {
50242 ++ if (!strcmp(rolename, trans->rolename)) {
50243 ++ found = 1;
50244 ++ break;
50245 ++ }
50246 ++ }
50247 ++
50248 ++ if (!found)
50249 ++ return 0;
50250 ++
50251 ++ /* handle special roles that do not require authentication
50252 ++ and check ip */
50253 ++
50254 ++ FOR_EACH_ROLE_START(r)
50255 ++ if (!strcmp(rolename, r->rolename) &&
50256 ++ (r->roletype & GR_ROLE_SPECIAL)) {
50257 ++ found = 0;
50258 ++ if (r->allowed_ips != NULL) {
50259 ++ for (ipp = r->allowed_ips; ipp; ipp = ipp->next) {
50260 ++ if ((ntohl(curr_ip) & ipp->netmask) ==
50261 ++ (ntohl(ipp->addr) & ipp->netmask))
50262 ++ found = 1;
50263 ++ }
50264 ++ } else
50265 ++ found = 2;
50266 ++ if (!found)
50267 ++ return 0;
50268 ++
50269 ++ if (((mode == GR_SPROLE) && (r->roletype & GR_ROLE_NOPW)) ||
50270 ++ ((mode == GR_SPROLEPAM) && (r->roletype & GR_ROLE_PAM))) {
50271 ++ *salt = NULL;
50272 ++ *sum = NULL;
50273 ++ return 1;
50274 ++ }
50275 ++ }
50276 ++ FOR_EACH_ROLE_END(r)
50277 ++
50278 ++ for (i = 0; i < num_sprole_pws; i++) {
50279 ++ if (!strcmp(rolename, acl_special_roles[i]->rolename)) {
50280 ++ *salt = acl_special_roles[i]->salt;
50281 ++ *sum = acl_special_roles[i]->sum;
50282 ++ return 1;
50283 ++ }
50284 ++ }
50285 ++
50286 ++ return 0;
50287 ++}
50288 ++
50289 ++static void
50290 ++assign_special_role(char *rolename)
50291 ++{
50292 ++ struct acl_object_label *obj;
50293 ++ struct acl_role_label *r;
50294 ++ struct acl_role_label *assigned = NULL;
50295 ++ struct task_struct *tsk;
50296 ++ struct file *filp;
50297 ++
50298 ++ FOR_EACH_ROLE_START(r)
50299 ++ if (!strcmp(rolename, r->rolename) &&
50300 ++ (r->roletype & GR_ROLE_SPECIAL)) {
50301 ++ assigned = r;
50302 ++ break;
50303 ++ }
50304 ++ FOR_EACH_ROLE_END(r)
50305 ++
50306 ++ if (!assigned)
50307 ++ return;
50308 ++
50309 ++ read_lock(&tasklist_lock);
50310 ++ read_lock(&grsec_exec_file_lock);
50311 ++
50312 ++ tsk = current->real_parent;
50313 ++ if (tsk == NULL)
50314 ++ goto out_unlock;
50315 ++
50316 ++ filp = tsk->exec_file;
50317 ++ if (filp == NULL)
50318 ++ goto out_unlock;
50319 ++
50320 ++ tsk->is_writable = 0;
50321 ++
50322 ++ tsk->acl_sp_role = 1;
50323 ++ tsk->acl_role_id = ++acl_sp_role_value;
50324 ++ tsk->role = assigned;
50325 ++ tsk->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role);
50326 ++
50327 ++ /* ignore additional mmap checks for processes that are writable
50328 ++ by the default ACL */
50329 ++ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
50330 ++ if (unlikely(obj->mode & GR_WRITE))
50331 ++ tsk->is_writable = 1;
50332 ++ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role->root_label);
50333 ++ if (unlikely(obj->mode & GR_WRITE))
50334 ++ tsk->is_writable = 1;
50335 ++
50336 ++#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
50337 ++ printk(KERN_ALERT "Assigning special role:%s subject:%s to process (%s:%d)\n", tsk->role->rolename, tsk->acl->filename, tsk->comm, tsk->pid);
50338 ++#endif
50339 ++
50340 ++out_unlock:
50341 ++ read_unlock(&grsec_exec_file_lock);
50342 ++ read_unlock(&tasklist_lock);
50343 ++ return;
50344 ++}
50345 ++
50346 ++int gr_check_secure_terminal(struct task_struct *task)
50347 ++{
50348 ++ struct task_struct *p, *p2, *p3;
50349 ++ struct files_struct *files;
50350 ++ struct fdtable *fdt;
50351 ++ struct file *our_file = NULL, *file;
50352 ++ int i;
50353 ++
50354 ++ if (task->signal->tty == NULL)
50355 ++ return 1;
50356 ++
50357 ++ files = get_files_struct(task);
50358 ++ if (files != NULL) {
50359 ++ rcu_read_lock();
50360 ++ fdt = files_fdtable(files);
50361 ++ for (i=0; i < fdt->max_fds; i++) {
50362 ++ file = fcheck_files(files, i);
50363 ++ if (file && (our_file == NULL) && (file->private_data == task->signal->tty)) {
50364 ++ get_file(file);
50365 ++ our_file = file;
50366 ++ }
50367 ++ }
50368 ++ rcu_read_unlock();
50369 ++ put_files_struct(files);
50370 ++ }
50371 ++
50372 ++ if (our_file == NULL)
50373 ++ return 1;
50374 ++
50375 ++ read_lock(&tasklist_lock);
50376 ++ do_each_thread(p2, p) {
50377 ++ files = get_files_struct(p);
50378 ++ if (files == NULL ||
50379 ++ (p->signal && p->signal->tty == task->signal->tty)) {
50380 ++ if (files != NULL)
50381 ++ put_files_struct(files);
50382 ++ continue;
50383 ++ }
50384 ++ rcu_read_lock();
50385 ++ fdt = files_fdtable(files);
50386 ++ for (i=0; i < fdt->max_fds; i++) {
50387 ++ file = fcheck_files(files, i);
50388 ++ if (file && S_ISCHR(file->f_path.dentry->d_inode->i_mode) &&
50389 ++ file->f_path.dentry->d_inode->i_rdev == our_file->f_path.dentry->d_inode->i_rdev) {
50390 ++ p3 = task;
50391 ++ while (p3->pid > 0) {
50392 ++ if (p3 == p)
50393 ++ break;
50394 ++ p3 = p3->real_parent;
50395 ++ }
50396 ++ if (p3 == p)
50397 ++ break;
50398 ++ gr_log_ttysniff(GR_DONT_AUDIT_GOOD, GR_TTYSNIFF_ACL_MSG, p);
50399 ++ gr_handle_alertkill(p);
50400 ++ rcu_read_unlock();
50401 ++ put_files_struct(files);
50402 ++ read_unlock(&tasklist_lock);
50403 ++ fput(our_file);
50404 ++ return 0;
50405 ++ }
50406 ++ }
50407 ++ rcu_read_unlock();
50408 ++ put_files_struct(files);
50409 ++ } while_each_thread(p2, p);
50410 ++ read_unlock(&tasklist_lock);
50411 ++
50412 ++ fput(our_file);
50413 ++ return 1;
50414 ++}
50415 ++
50416 ++ssize_t
50417 ++write_grsec_handler(struct file *file, const char * buf, size_t count, loff_t *ppos)
50418 ++{
50419 ++ struct gr_arg_wrapper uwrap;
50420 ++ unsigned char *sprole_salt = NULL;
50421 ++ unsigned char *sprole_sum = NULL;
50422 ++ int error = sizeof (struct gr_arg_wrapper);
50423 ++ int error2 = 0;
50424 ++
50425 ++ mutex_lock(&gr_dev_mutex);
50426 ++
50427 ++ if ((gr_status & GR_READY) && !(current->acl->mode & GR_KERNELAUTH)) {
50428 ++ error = -EPERM;
50429 ++ goto out;
50430 ++ }
50431 ++
50432 ++ if (count != sizeof (struct gr_arg_wrapper)) {
50433 ++ gr_log_int_int(GR_DONT_AUDIT_GOOD, GR_DEV_ACL_MSG, (int)count, (int)sizeof(struct gr_arg_wrapper));
50434 ++ error = -EINVAL;
50435 ++ goto out;
50436 ++ }
50437 ++
50438 ++
50439 ++ if (gr_auth_expires && time_after_eq(get_seconds(), gr_auth_expires)) {
50440 ++ gr_auth_expires = 0;
50441 ++ gr_auth_attempts = 0;
50442 ++ }
50443 ++
50444 ++ if (copy_from_user(&uwrap, buf, sizeof (struct gr_arg_wrapper))) {
50445 ++ error = -EFAULT;
50446 ++ goto out;
50447 ++ }
50448 ++
50449 ++ if ((uwrap.version != GRSECURITY_VERSION) || (uwrap.size != sizeof(struct gr_arg))) {
50450 ++ error = -EINVAL;
50451 ++ goto out;
50452 ++ }
50453 ++
50454 ++ if (copy_from_user(gr_usermode, uwrap.arg, sizeof (struct gr_arg))) {
50455 ++ error = -EFAULT;
50456 ++ goto out;
50457 ++ }
50458 ++
50459 ++ if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_SPROLEPAM &&
50460 ++ gr_auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
50461 ++ time_after(gr_auth_expires, get_seconds())) {
50462 ++ error = -EBUSY;
50463 ++ goto out;
50464 ++ }
50465 ++
50466 ++ /* if non-root trying to do anything other than use a special role,
50467 ++ do not attempt authentication, do not count towards authentication
50468 ++ locking
50469 ++ */
50470 ++
50471 ++ if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_STATUS &&
50472 ++ gr_usermode->mode != GR_UNSPROLE && gr_usermode->mode != GR_SPROLEPAM &&
50473 ++ current_uid()) {
50474 ++ error = -EPERM;
50475 ++ goto out;
50476 ++ }
50477 ++
50478 ++ /* ensure pw and special role name are null terminated */
50479 ++
50480 ++ gr_usermode->pw[GR_PW_LEN - 1] = '\0';
50481 ++ gr_usermode->sp_role[GR_SPROLE_LEN - 1] = '\0';
50482 ++
50483 ++ /* Okay.
50484 ++ * We have our enough of the argument structure..(we have yet
50485 ++ * to copy_from_user the tables themselves) . Copy the tables
50486 ++ * only if we need them, i.e. for loading operations. */
50487 ++
50488 ++ switch (gr_usermode->mode) {
50489 ++ case GR_STATUS:
50490 ++ if (gr_status & GR_READY) {
50491 ++ error = 1;
50492 ++ if (!gr_check_secure_terminal(current))
50493 ++ error = 3;
50494 ++ } else
50495 ++ error = 2;
50496 ++ goto out;
50497 ++ case GR_SHUTDOWN:
50498 ++ if ((gr_status & GR_READY)
50499 ++ && !(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
50500 ++ pax_open_kernel();
50501 ++ gr_status &= ~GR_READY;
50502 ++ pax_close_kernel();
50503 ++
50504 ++ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTS_ACL_MSG);
50505 ++ free_variables();
50506 ++ memset(gr_usermode, 0, sizeof (struct gr_arg));
50507 ++ memset(gr_system_salt, 0, GR_SALT_LEN);
50508 ++ memset(gr_system_sum, 0, GR_SHA_LEN);
50509 ++ } else if (gr_status & GR_READY) {
50510 ++ gr_log_noargs(GR_DONT_AUDIT, GR_SHUTF_ACL_MSG);
50511 ++ error = -EPERM;
50512 ++ } else {
50513 ++ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTI_ACL_MSG);
50514 ++ error = -EAGAIN;
50515 ++ }
50516 ++ break;
50517 ++ case GR_ENABLE:
50518 ++ if (!(gr_status & GR_READY) && !(error2 = gracl_init(gr_usermode)))
50519 ++ gr_log_str(GR_DONT_AUDIT_GOOD, GR_ENABLE_ACL_MSG, GR_VERSION);
50520 ++ else {
50521 ++ if (gr_status & GR_READY)
50522 ++ error = -EAGAIN;
50523 ++ else
50524 ++ error = error2;
50525 ++ gr_log_str(GR_DONT_AUDIT, GR_ENABLEF_ACL_MSG, GR_VERSION);
50526 ++ }
50527 ++ break;
50528 ++ case GR_RELOAD:
50529 ++ if (!(gr_status & GR_READY)) {
50530 ++ gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOADI_ACL_MSG, GR_VERSION);
50531 ++ error = -EAGAIN;
50532 ++ } else if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
50533 ++ preempt_disable();
50534 ++
50535 ++ pax_open_kernel();
50536 ++ gr_status &= ~GR_READY;
50537 ++ pax_close_kernel();
50538 ++
50539 ++ free_variables();
50540 ++ if (!(error2 = gracl_init(gr_usermode))) {
50541 ++ preempt_enable();
50542 ++ gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOAD_ACL_MSG, GR_VERSION);
50543 ++ } else {
50544 ++ preempt_enable();
50545 ++ error = error2;
50546 ++ gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
50547 ++ }
50548 ++ } else {
50549 ++ gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
50550 ++ error = -EPERM;
50551 ++ }
50552 ++ break;
50553 ++ case GR_SEGVMOD:
50554 ++ if (unlikely(!(gr_status & GR_READY))) {
50555 ++ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODI_ACL_MSG);
50556 ++ error = -EAGAIN;
50557 ++ break;
50558 ++ }
50559 ++
50560 ++ if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
50561 ++ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODS_ACL_MSG);
50562 ++ if (gr_usermode->segv_device && gr_usermode->segv_inode) {
50563 ++ struct acl_subject_label *segvacl;
50564 ++ segvacl =
50565 ++ lookup_acl_subj_label(gr_usermode->segv_inode,
50566 ++ gr_usermode->segv_device,
50567 ++ current->role);
50568 ++ if (segvacl) {
50569 ++ segvacl->crashes = 0;
50570 ++ segvacl->expires = 0;
50571 ++ }
50572 ++ } else if (gr_find_uid(gr_usermode->segv_uid) >= 0) {
50573 ++ gr_remove_uid(gr_usermode->segv_uid);
50574 ++ }
50575 ++ } else {
50576 ++ gr_log_noargs(GR_DONT_AUDIT, GR_SEGVMODF_ACL_MSG);
50577 ++ error = -EPERM;
50578 ++ }
50579 ++ break;
50580 ++ case GR_SPROLE:
50581 ++ case GR_SPROLEPAM:
50582 ++ if (unlikely(!(gr_status & GR_READY))) {
50583 ++ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SPROLEI_ACL_MSG);
50584 ++ error = -EAGAIN;
50585 ++ break;
50586 ++ }
50587 ++
50588 ++ if (current->role->expires && time_after_eq(get_seconds(), current->role->expires)) {
50589 ++ current->role->expires = 0;
50590 ++ current->role->auth_attempts = 0;
50591 ++ }
50592 ++
50593 ++ if (current->role->auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
50594 ++ time_after(current->role->expires, get_seconds())) {
50595 ++ error = -EBUSY;
50596 ++ goto out;
50597 ++ }
50598 ++
50599 ++ if (lookup_special_role_auth
50600 ++ (gr_usermode->mode, gr_usermode->sp_role, &sprole_salt, &sprole_sum)
50601 ++ && ((!sprole_salt && !sprole_sum)
50602 ++ || !(chkpw(gr_usermode, sprole_salt, sprole_sum)))) {
50603 ++ char *p = "";
50604 ++ assign_special_role(gr_usermode->sp_role);
50605 ++ read_lock(&tasklist_lock);
50606 ++ if (current->real_parent)
50607 ++ p = current->real_parent->role->rolename;
50608 ++ read_unlock(&tasklist_lock);
50609 ++ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLES_ACL_MSG,
50610 ++ p, acl_sp_role_value);
50611 ++ } else {
50612 ++ gr_log_str(GR_DONT_AUDIT, GR_SPROLEF_ACL_MSG, gr_usermode->sp_role);
50613 ++ error = -EPERM;
50614 ++ if(!(current->role->auth_attempts++))
50615 ++ current->role->expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
50616 ++
50617 ++ goto out;
50618 ++ }
50619 ++ break;
50620 ++ case GR_UNSPROLE:
50621 ++ if (unlikely(!(gr_status & GR_READY))) {
50622 ++ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_UNSPROLEI_ACL_MSG);
50623 ++ error = -EAGAIN;
50624 ++ break;
50625 ++ }
50626 ++
50627 ++ if (current->role->roletype & GR_ROLE_SPECIAL) {
50628 ++ char *p = "";
50629 ++ int i = 0;
50630 ++
50631 ++ read_lock(&tasklist_lock);
50632 ++ if (current->real_parent) {
50633 ++ p = current->real_parent->role->rolename;
50634 ++ i = current->real_parent->acl_role_id;
50635 ++ }
50636 ++ read_unlock(&tasklist_lock);
50637 ++
50638 ++ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_UNSPROLES_ACL_MSG, p, i);
50639 ++ gr_set_acls(1);
50640 ++ } else {
50641 ++ error = -EPERM;
50642 ++ goto out;
50643 ++ }
50644 ++ break;
50645 ++ default:
50646 ++ gr_log_int(GR_DONT_AUDIT, GR_INVMODE_ACL_MSG, gr_usermode->mode);
50647 ++ error = -EINVAL;
50648 ++ break;
50649 ++ }
50650 ++
50651 ++ if (error != -EPERM)
50652 ++ goto out;
50653 ++
50654 ++ if(!(gr_auth_attempts++))
50655 ++ gr_auth_expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
50656 ++
50657 ++ out:
50658 ++ mutex_unlock(&gr_dev_mutex);
50659 ++ return error;
50660 ++}
50661 ++
50662 ++/* must be called with
50663 ++ rcu_read_lock();
50664 ++ read_lock(&tasklist_lock);
50665 ++ read_lock(&grsec_exec_file_lock);
50666 ++*/
50667 ++int gr_apply_subject_to_task(struct task_struct *task)
50668 ++{
50669 ++ struct acl_object_label *obj;
50670 ++ char *tmpname;
50671 ++ struct acl_subject_label *tmpsubj;
50672 ++ struct file *filp;
50673 ++ struct name_entry *nmatch;
50674 ++
50675 ++ filp = task->exec_file;
50676 ++ if (filp == NULL)
50677 ++ return 0;
50678 ++
50679 ++ /* the following is to apply the correct subject
50680 ++ on binaries running when the RBAC system
50681 ++ is enabled, when the binaries have been
50682 ++ replaced or deleted since their execution
50683 ++ -----
50684 ++ when the RBAC system starts, the inode/dev
50685 ++ from exec_file will be one the RBAC system
50686 ++ is unaware of. It only knows the inode/dev
50687 ++ of the present file on disk, or the absence
50688 ++ of it.
50689 ++ */
50690 ++ preempt_disable();
50691 ++ tmpname = gr_to_filename_rbac(filp->f_path.dentry, filp->f_path.mnt);
50692 ++
50693 ++ nmatch = lookup_name_entry(tmpname);
50694 ++ preempt_enable();
50695 ++ tmpsubj = NULL;
50696 ++ if (nmatch) {
50697 ++ if (nmatch->deleted)
50698 ++ tmpsubj = lookup_acl_subj_label_deleted(nmatch->inode, nmatch->device, task->role);
50699 ++ else
50700 ++ tmpsubj = lookup_acl_subj_label(nmatch->inode, nmatch->device, task->role);
50701 ++ if (tmpsubj != NULL)
50702 ++ task->acl = tmpsubj;
50703 ++ }
50704 ++ if (tmpsubj == NULL)
50705 ++ task->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt,
50706 ++ task->role);
50707 ++ if (task->acl) {
50708 ++ task->is_writable = 0;
50709 ++ /* ignore additional mmap checks for processes that are writable
50710 ++ by the default ACL */
50711 ++ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
50712 ++ if (unlikely(obj->mode & GR_WRITE))
50713 ++ task->is_writable = 1;
50714 ++ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
50715 ++ if (unlikely(obj->mode & GR_WRITE))
50716 ++ task->is_writable = 1;
50717 ++
50718 ++ gr_set_proc_res(task);
50719 ++
50720 ++#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
50721 ++ printk(KERN_ALERT "gr_set_acls for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
50722 ++#endif
50723 ++ } else {
50724 ++ return 1;
50725 ++ }
50726 ++
50727 ++ return 0;
50728 ++}
50729 ++
50730 ++int
50731 ++gr_set_acls(const int type)
50732 ++{
50733 ++ struct task_struct *task, *task2;
50734 ++ struct acl_role_label *role = current->role;
50735 ++ __u16 acl_role_id = current->acl_role_id;
50736 ++ const struct cred *cred;
50737 ++ int ret;
50738 ++
50739 ++ rcu_read_lock();
50740 ++ read_lock(&tasklist_lock);
50741 ++ read_lock(&grsec_exec_file_lock);
50742 ++ do_each_thread(task2, task) {
50743 ++ /* check to see if we're called from the exit handler,
50744 ++ if so, only replace ACLs that have inherited the admin
50745 ++ ACL */
50746 ++
50747 ++ if (type && (task->role != role ||
50748 ++ task->acl_role_id != acl_role_id))
50749 ++ continue;
50750 ++
50751 ++ task->acl_role_id = 0;
50752 ++ task->acl_sp_role = 0;
50753 ++
50754 ++ if (task->exec_file) {
50755 ++ cred = __task_cred(task);
50756 ++ task->role = lookup_acl_role_label(task, cred->uid, cred->gid);
50757 ++ ret = gr_apply_subject_to_task(task);
50758 ++ if (ret) {
50759 ++ read_unlock(&grsec_exec_file_lock);
50760 ++ read_unlock(&tasklist_lock);
50761 ++ rcu_read_unlock();
50762 ++ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_DEFACL_MSG, task->comm, task->pid);
50763 ++ return ret;
50764 ++ }
50765 ++ } else {
50766 ++ // it's a kernel process
50767 ++ task->role = kernel_role;
50768 ++ task->acl = kernel_role->root_label;
50769 ++#ifdef CONFIG_GRKERNSEC_ACL_HIDEKERN
50770 ++ task->acl->mode &= ~GR_PROCFIND;
50771 ++#endif
50772 ++ }
50773 ++ } while_each_thread(task2, task);
50774 ++ read_unlock(&grsec_exec_file_lock);
50775 ++ read_unlock(&tasklist_lock);
50776 ++ rcu_read_unlock();
50777 ++
50778 ++ return 0;
50779 ++}
50780 ++
50781 ++void
50782 ++gr_learn_resource(const struct task_struct *task,
50783 ++ const int res, const unsigned long wanted, const int gt)
50784 ++{
50785 ++ struct acl_subject_label *acl;
50786 ++ const struct cred *cred;
50787 ++
50788 ++ if (unlikely((gr_status & GR_READY) &&
50789 ++ task->acl && (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))))
50790 ++ goto skip_reslog;
50791 ++
50792 ++#ifdef CONFIG_GRKERNSEC_RESLOG
50793 ++ gr_log_resource(task, res, wanted, gt);
50794 ++#endif
50795 ++ skip_reslog:
50796 ++
50797 ++ if (unlikely(!(gr_status & GR_READY) || !wanted || res >= GR_NLIMITS))
50798 ++ return;
50799 ++
50800 ++ acl = task->acl;
50801 ++
50802 ++ if (likely(!acl || !(acl->mode & (GR_LEARN | GR_INHERITLEARN)) ||
50803 ++ !(acl->resmask & (1 << (unsigned short) res))))
50804 ++ return;
50805 ++
50806 ++ if (wanted >= acl->res[res].rlim_cur) {
50807 ++ unsigned long res_add;
50808 ++
50809 ++ res_add = wanted;
50810 ++ switch (res) {
50811 ++ case RLIMIT_CPU:
50812 ++ res_add += GR_RLIM_CPU_BUMP;
50813 ++ break;
50814 ++ case RLIMIT_FSIZE:
50815 ++ res_add += GR_RLIM_FSIZE_BUMP;
50816 ++ break;
50817 ++ case RLIMIT_DATA:
50818 ++ res_add += GR_RLIM_DATA_BUMP;
50819 ++ break;
50820 ++ case RLIMIT_STACK:
50821 ++ res_add += GR_RLIM_STACK_BUMP;
50822 ++ break;
50823 ++ case RLIMIT_CORE:
50824 ++ res_add += GR_RLIM_CORE_BUMP;
50825 ++ break;
50826 ++ case RLIMIT_RSS:
50827 ++ res_add += GR_RLIM_RSS_BUMP;
50828 ++ break;
50829 ++ case RLIMIT_NPROC:
50830 ++ res_add += GR_RLIM_NPROC_BUMP;
50831 ++ break;
50832 ++ case RLIMIT_NOFILE:
50833 ++ res_add += GR_RLIM_NOFILE_BUMP;
50834 ++ break;
50835 ++ case RLIMIT_MEMLOCK:
50836 ++ res_add += GR_RLIM_MEMLOCK_BUMP;
50837 ++ break;
50838 ++ case RLIMIT_AS:
50839 ++ res_add += GR_RLIM_AS_BUMP;
50840 ++ break;
50841 ++ case RLIMIT_LOCKS:
50842 ++ res_add += GR_RLIM_LOCKS_BUMP;
50843 ++ break;
50844 ++ case RLIMIT_SIGPENDING:
50845 ++ res_add += GR_RLIM_SIGPENDING_BUMP;
50846 ++ break;
50847 ++ case RLIMIT_MSGQUEUE:
50848 ++ res_add += GR_RLIM_MSGQUEUE_BUMP;
50849 ++ break;
50850 ++ case RLIMIT_NICE:
50851 ++ res_add += GR_RLIM_NICE_BUMP;
50852 ++ break;
50853 ++ case RLIMIT_RTPRIO:
50854 ++ res_add += GR_RLIM_RTPRIO_BUMP;
50855 ++ break;
50856 ++ case RLIMIT_RTTIME:
50857 ++ res_add += GR_RLIM_RTTIME_BUMP;
50858 ++ break;
50859 ++ }
50860 ++
50861 ++ acl->res[res].rlim_cur = res_add;
50862 ++
50863 ++ if (wanted > acl->res[res].rlim_max)
50864 ++ acl->res[res].rlim_max = res_add;
50865 ++
50866 ++ /* only log the subject filename, since resource logging is supported for
50867 ++ single-subject learning only */
50868 ++ rcu_read_lock();
50869 ++ cred = __task_cred(task);
50870 ++ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
50871 ++ task->role->roletype, cred->uid, cred->gid, acl->filename,
50872 ++ acl->filename, acl->res[res].rlim_cur, acl->res[res].rlim_max,
50873 ++ "", (unsigned long) res, &task->signal->saved_ip);
50874 ++ rcu_read_unlock();
50875 ++ }
50876 ++
50877 ++ return;
50878 ++}
50879 ++
50880 ++#if defined(CONFIG_PAX_HAVE_ACL_FLAGS) && (defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR))
50881 ++void
50882 ++pax_set_initial_flags(struct linux_binprm *bprm)
50883 ++{
50884 ++ struct task_struct *task = current;
50885 ++ struct acl_subject_label *proc;
50886 ++ unsigned long flags;
50887 ++
50888 ++ if (unlikely(!(gr_status & GR_READY)))
50889 ++ return;
50890 ++
50891 ++ flags = pax_get_flags(task);
50892 ++
50893 ++ proc = task->acl;
50894 ++
50895 ++ if (proc->pax_flags & GR_PAX_DISABLE_PAGEEXEC)
50896 ++ flags &= ~MF_PAX_PAGEEXEC;
50897 ++ if (proc->pax_flags & GR_PAX_DISABLE_SEGMEXEC)
50898 ++ flags &= ~MF_PAX_SEGMEXEC;
50899 ++ if (proc->pax_flags & GR_PAX_DISABLE_RANDMMAP)
50900 ++ flags &= ~MF_PAX_RANDMMAP;
50901 ++ if (proc->pax_flags & GR_PAX_DISABLE_EMUTRAMP)
50902 ++ flags &= ~MF_PAX_EMUTRAMP;
50903 ++ if (proc->pax_flags & GR_PAX_DISABLE_MPROTECT)
50904 ++ flags &= ~MF_PAX_MPROTECT;
50905 ++
50906 ++ if (proc->pax_flags & GR_PAX_ENABLE_PAGEEXEC)
50907 ++ flags |= MF_PAX_PAGEEXEC;
50908 ++ if (proc->pax_flags & GR_PAX_ENABLE_SEGMEXEC)
50909 ++ flags |= MF_PAX_SEGMEXEC;
50910 ++ if (proc->pax_flags & GR_PAX_ENABLE_RANDMMAP)
50911 ++ flags |= MF_PAX_RANDMMAP;
50912 ++ if (proc->pax_flags & GR_PAX_ENABLE_EMUTRAMP)
50913 ++ flags |= MF_PAX_EMUTRAMP;
50914 ++ if (proc->pax_flags & GR_PAX_ENABLE_MPROTECT)
50915 ++ flags |= MF_PAX_MPROTECT;
50916 ++
50917 ++ pax_set_flags(task, flags);
50918 ++
50919 ++ return;
50920 ++}
50921 ++#endif
50922 ++
50923 ++#ifdef CONFIG_SYSCTL
50924 ++/* Eric Biederman likes breaking userland ABI and every inode-based security
50925 ++ system to save 35kb of memory */
50926 ++
50927 ++/* we modify the passed in filename, but adjust it back before returning */
50928 ++static struct acl_object_label *gr_lookup_by_name(char *name, unsigned int len)
50929 ++{
50930 ++ struct name_entry *nmatch;
50931 ++ char *p, *lastp = NULL;
50932 ++ struct acl_object_label *obj = NULL, *tmp;
50933 ++ struct acl_subject_label *tmpsubj;
50934 ++ char c = '\0';
50935 ++
50936 ++ read_lock(&gr_inode_lock);
50937 ++
50938 ++ p = name + len - 1;
50939 ++ do {
50940 ++ nmatch = lookup_name_entry(name);
50941 ++ if (lastp != NULL)
50942 ++ *lastp = c;
50943 ++
50944 ++ if (nmatch == NULL)
50945 ++ goto next_component;
50946 ++ tmpsubj = current->acl;
50947 ++ do {
50948 ++ obj = lookup_acl_obj_label(nmatch->inode, nmatch->device, tmpsubj);
50949 ++ if (obj != NULL) {
50950 ++ tmp = obj->globbed;
50951 ++ while (tmp) {
50952 ++ if (!glob_match(tmp->filename, name)) {
50953 ++ obj = tmp;
50954 ++ goto found_obj;
50955 ++ }
50956 ++ tmp = tmp->next;
50957 ++ }
50958 ++ goto found_obj;
50959 ++ }
50960 ++ } while ((tmpsubj = tmpsubj->parent_subject));
50961 ++next_component:
50962 ++ /* end case */
50963 ++ if (p == name)
50964 ++ break;
50965 ++
50966 ++ while (*p != '/')
50967 ++ p--;
50968 ++ if (p == name)
50969 ++ lastp = p + 1;
50970 ++ else {
50971 ++ lastp = p;
50972 ++ p--;
50973 ++ }
50974 ++ c = *lastp;
50975 ++ *lastp = '\0';
50976 ++ } while (1);
50977 ++found_obj:
50978 ++ read_unlock(&gr_inode_lock);
50979 ++ /* obj returned will always be non-null */
50980 ++ return obj;
50981 ++}
50982 ++
50983 ++/* returns 0 when allowing, non-zero on error
50984 ++ op of 0 is used for readdir, so we don't log the names of hidden files
50985 ++*/
50986 ++__u32
50987 ++gr_handle_sysctl(const struct ctl_table *table, const int op)
50988 ++{
50989 ++ struct ctl_table *tmp;
50990 ++ const char *proc_sys = "/proc/sys";
50991 ++ char *path;
50992 ++ struct acl_object_label *obj;
50993 ++ unsigned short len = 0, pos = 0, depth = 0, i;
50994 ++ __u32 err = 0;
50995 ++ __u32 mode = 0;
50996 ++
50997 ++ if (unlikely(!(gr_status & GR_READY)))
50998 ++ return 0;
50999 ++
51000 ++ /* for now, ignore operations on non-sysctl entries if it's not a
51001 ++ readdir*/
51002 ++ if (table->child != NULL && op != 0)
51003 ++ return 0;
51004 ++
51005 ++ mode |= GR_FIND;
51006 ++ /* it's only a read if it's an entry, read on dirs is for readdir */
51007 ++ if (op & MAY_READ)
51008 ++ mode |= GR_READ;
51009 ++ if (op & MAY_WRITE)
51010 ++ mode |= GR_WRITE;
51011 ++
51012 ++ preempt_disable();
51013 ++
51014 ++ path = per_cpu_ptr(gr_shared_page[0], smp_processor_id());
51015 ++
51016 ++ /* it's only a read/write if it's an actual entry, not a dir
51017 ++ (which are opened for readdir)
51018 ++ */
51019 ++
51020 ++ /* convert the requested sysctl entry into a pathname */
51021 ++
51022 ++ for (tmp = (struct ctl_table *)table; tmp != NULL; tmp = tmp->parent) {
51023 ++ len += strlen(tmp->procname);
51024 ++ len++;
51025 ++ depth++;
51026 ++ }
51027 ++
51028 ++ if ((len + depth + strlen(proc_sys) + 1) > PAGE_SIZE) {
51029 ++ /* deny */
51030 ++ goto out;
51031 ++ }
51032 ++
51033 ++ memset(path, 0, PAGE_SIZE);
51034 ++
51035 ++ memcpy(path, proc_sys, strlen(proc_sys));
51036 ++
51037 ++ pos += strlen(proc_sys);
51038 ++
51039 ++ for (; depth > 0; depth--) {
51040 ++ path[pos] = '/';
51041 ++ pos++;
51042 ++ for (i = 1, tmp = (struct ctl_table *)table; tmp != NULL; tmp = tmp->parent) {
51043 ++ if (depth == i) {
51044 ++ memcpy(path + pos, tmp->procname,
51045 ++ strlen(tmp->procname));
51046 ++ pos += strlen(tmp->procname);
51047 ++ }
51048 ++ i++;
51049 ++ }
51050 ++ }
51051 ++
51052 ++ obj = gr_lookup_by_name(path, pos);
51053 ++ err = obj->mode & (mode | to_gr_audit(mode) | GR_SUPPRESS);
51054 ++
51055 ++ if (unlikely((current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) &&
51056 ++ ((err & mode) != mode))) {
51057 ++ __u32 new_mode = mode;
51058 ++
51059 ++ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
51060 ++
51061 ++ err = 0;
51062 ++ gr_log_learn_sysctl(path, new_mode);
51063 ++ } else if (!(err & GR_FIND) && !(err & GR_SUPPRESS) && op != 0) {
51064 ++ gr_log_hidden_sysctl(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, path);
51065 ++ err = -ENOENT;
51066 ++ } else if (!(err & GR_FIND)) {
51067 ++ err = -ENOENT;
51068 ++ } else if (((err & mode) & ~GR_FIND) != (mode & ~GR_FIND) && !(err & GR_SUPPRESS)) {
51069 ++ gr_log_str4(GR_DONT_AUDIT, GR_SYSCTL_ACL_MSG, "denied",
51070 ++ path, (mode & GR_READ) ? " reading" : "",
51071 ++ (mode & GR_WRITE) ? " writing" : "");
51072 ++ err = -EACCES;
51073 ++ } else if ((err & mode) != mode) {
51074 ++ err = -EACCES;
51075 ++ } else if ((((err & mode) & ~GR_FIND) == (mode & ~GR_FIND)) && (err & GR_AUDITS)) {
51076 ++ gr_log_str4(GR_DO_AUDIT, GR_SYSCTL_ACL_MSG, "successful",
51077 ++ path, (mode & GR_READ) ? " reading" : "",
51078 ++ (mode & GR_WRITE) ? " writing" : "");
51079 ++ err = 0;
51080 ++ } else
51081 ++ err = 0;
51082 ++
51083 ++ out:
51084 ++ preempt_enable();
51085 ++
51086 ++ return err;
51087 ++}
51088 ++#endif
51089 ++
51090 ++int
51091 ++gr_handle_proc_ptrace(struct task_struct *task)
51092 ++{
51093 ++ struct file *filp;
51094 ++ struct task_struct *tmp = task;
51095 ++ struct task_struct *curtemp = current;
51096 ++ __u32 retmode;
51097 ++
51098 ++#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
51099 ++ if (unlikely(!(gr_status & GR_READY)))
51100 ++ return 0;
51101 ++#endif
51102 ++
51103 ++ read_lock(&tasklist_lock);
51104 ++ read_lock(&grsec_exec_file_lock);
51105 ++ filp = task->exec_file;
51106 ++
51107 ++ while (tmp->pid > 0) {
51108 ++ if (tmp == curtemp)
51109 ++ break;
51110 ++ tmp = tmp->real_parent;
51111 ++ }
51112 ++
51113 ++ if (!filp || (tmp->pid == 0 && ((grsec_enable_harden_ptrace && current_uid() && !(gr_status & GR_READY)) ||
51114 ++ ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE))))) {
51115 ++ read_unlock(&grsec_exec_file_lock);
51116 ++ read_unlock(&tasklist_lock);
51117 ++ return 1;
51118 ++ }
51119 ++
51120 ++#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
51121 ++ if (!(gr_status & GR_READY)) {
51122 ++ read_unlock(&grsec_exec_file_lock);
51123 ++ read_unlock(&tasklist_lock);
51124 ++ return 0;
51125 ++ }
51126 ++#endif
51127 ++
51128 ++ retmode = gr_search_file(filp->f_path.dentry, GR_NOPTRACE, filp->f_path.mnt);
51129 ++ read_unlock(&grsec_exec_file_lock);
51130 ++ read_unlock(&tasklist_lock);
51131 ++
51132 ++ if (retmode & GR_NOPTRACE)
51133 ++ return 1;
51134 ++
51135 ++ if (!(current->acl->mode & GR_POVERRIDE) && !(current->role->roletype & GR_ROLE_GOD)
51136 ++ && (current->acl != task->acl || (current->acl != current->role->root_label
51137 ++ && current->pid != task->pid)))
51138 ++ return 1;
51139 ++
51140 ++ return 0;
51141 ++}
51142 ++
51143 ++void task_grsec_rbac(struct seq_file *m, struct task_struct *p)
51144 ++{
51145 ++ if (unlikely(!(gr_status & GR_READY)))
51146 ++ return;
51147 ++
51148 ++ if (!(current->role->roletype & GR_ROLE_GOD))
51149 ++ return;
51150 ++
51151 ++ seq_printf(m, "RBAC:\t%.64s:%c:%.950s\n",
51152 ++ p->role->rolename, gr_task_roletype_to_char(p),
51153 ++ p->acl->filename);
51154 ++}
51155 ++
51156 ++int
51157 ++gr_handle_ptrace(struct task_struct *task, const long request)
51158 ++{
51159 ++ struct task_struct *tmp = task;
51160 ++ struct task_struct *curtemp = current;
51161 ++ __u32 retmode;
51162 ++
51163 ++#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
51164 ++ if (unlikely(!(gr_status & GR_READY)))
51165 ++ return 0;
51166 ++#endif
51167 ++
51168 ++ read_lock(&tasklist_lock);
51169 ++ while (tmp->pid > 0) {
51170 ++ if (tmp == curtemp)
51171 ++ break;
51172 ++ tmp = tmp->real_parent;
51173 ++ }
51174 ++
51175 ++ if (tmp->pid == 0 && ((grsec_enable_harden_ptrace && current_uid() && !(gr_status & GR_READY)) ||
51176 ++ ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE)))) {
51177 ++ read_unlock(&tasklist_lock);
51178 ++ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
51179 ++ return 1;
51180 ++ }
51181 ++ read_unlock(&tasklist_lock);
51182 ++
51183 ++#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
51184 ++ if (!(gr_status & GR_READY))
51185 ++ return 0;
51186 ++#endif
51187 ++
51188 ++ read_lock(&grsec_exec_file_lock);
51189 ++ if (unlikely(!task->exec_file)) {
51190 ++ read_unlock(&grsec_exec_file_lock);
51191 ++ return 0;
51192 ++ }
51193 ++
51194 ++ retmode = gr_search_file(task->exec_file->f_path.dentry, GR_PTRACERD | GR_NOPTRACE, task->exec_file->f_path.mnt);
51195 ++ read_unlock(&grsec_exec_file_lock);
51196 ++
51197 ++ if (retmode & GR_NOPTRACE) {
51198 ++ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
51199 ++ return 1;
51200 ++ }
51201 ++
51202 ++ if (retmode & GR_PTRACERD) {
51203 ++ switch (request) {
51204 ++ case PTRACE_SEIZE:
51205 ++ case PTRACE_POKETEXT:
51206 ++ case PTRACE_POKEDATA:
51207 ++ case PTRACE_POKEUSR:
51208 ++#if !defined(CONFIG_PPC32) && !defined(CONFIG_PPC64) && !defined(CONFIG_PARISC) && !defined(CONFIG_ALPHA) && !defined(CONFIG_IA64)
51209 ++ case PTRACE_SETREGS:
51210 ++ case PTRACE_SETFPREGS:
51211 ++#endif
51212 ++#ifdef CONFIG_X86
51213 ++ case PTRACE_SETFPXREGS:
51214 ++#endif
51215 ++#ifdef CONFIG_ALTIVEC
51216 ++ case PTRACE_SETVRREGS:
51217 ++#endif
51218 ++ return 1;
51219 ++ default:
51220 ++ return 0;
51221 ++ }
51222 ++ } else if (!(current->acl->mode & GR_POVERRIDE) &&
51223 ++ !(current->role->roletype & GR_ROLE_GOD) &&
51224 ++ (current->acl != task->acl)) {
51225 ++ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
51226 ++ return 1;
51227 ++ }
51228 ++
51229 ++ return 0;
51230 ++}
51231 ++
51232 ++static int is_writable_mmap(const struct file *filp)
51233 ++{
51234 ++ struct task_struct *task = current;
51235 ++ struct acl_object_label *obj, *obj2;
51236 ++
51237 ++ if (gr_status & GR_READY && !(task->acl->mode & GR_OVERRIDE) &&
51238 ++ !task->is_writable && S_ISREG(filp->f_path.dentry->d_inode->i_mode) && (filp->f_path.mnt != shm_mnt || (filp->f_path.dentry->d_inode->i_nlink > 0))) {
51239 ++ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
51240 ++ obj2 = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt,
51241 ++ task->role->root_label);
51242 ++ if (unlikely((obj->mode & GR_WRITE) || (obj2->mode & GR_WRITE))) {
51243 ++ gr_log_fs_generic(GR_DONT_AUDIT, GR_WRITLIB_ACL_MSG, filp->f_path.dentry, filp->f_path.mnt);
51244 ++ return 1;
51245 ++ }
51246 ++ }
51247 ++ return 0;
51248 ++}
51249 ++
51250 ++int
51251 ++gr_acl_handle_mmap(const struct file *file, const unsigned long prot)
51252 ++{
51253 ++ __u32 mode;
51254 ++
51255 ++ if (unlikely(!file || !(prot & PROT_EXEC)))
51256 ++ return 1;
51257 ++
51258 ++ if (is_writable_mmap(file))
51259 ++ return 0;
51260 ++
51261 ++ mode =
51262 ++ gr_search_file(file->f_path.dentry,
51263 ++ GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
51264 ++ file->f_path.mnt);
51265 ++
51266 ++ if (!gr_tpe_allow(file))
51267 ++ return 0;
51268 ++
51269 ++ if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
51270 ++ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
51271 ++ return 0;
51272 ++ } else if (unlikely(!(mode & GR_EXEC))) {
51273 ++ return 0;
51274 ++ } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
51275 ++ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
51276 ++ return 1;
51277 ++ }
51278 ++
51279 ++ return 1;
51280 ++}
51281 ++
51282 ++int
51283 ++gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
51284 ++{
51285 ++ __u32 mode;
51286 ++
51287 ++ if (unlikely(!file || !(prot & PROT_EXEC)))
51288 ++ return 1;
51289 ++
51290 ++ if (is_writable_mmap(file))
51291 ++ return 0;
51292 ++
51293 ++ mode =
51294 ++ gr_search_file(file->f_path.dentry,
51295 ++ GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
51296 ++ file->f_path.mnt);
51297 ++
51298 ++ if (!gr_tpe_allow(file))
51299 ++ return 0;
51300 ++
51301 ++ if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
51302 ++ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
51303 ++ return 0;
51304 ++ } else if (unlikely(!(mode & GR_EXEC))) {
51305 ++ return 0;
51306 ++ } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
51307 ++ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
51308 ++ return 1;
51309 ++ }
51310 ++
51311 ++ return 1;
51312 ++}
51313 ++
51314 ++void
51315 ++gr_acl_handle_psacct(struct task_struct *task, const long code)
51316 ++{
51317 ++ unsigned long runtime;
51318 ++ unsigned long cputime;
51319 ++ unsigned int wday, cday;
51320 ++ __u8 whr, chr;
51321 ++ __u8 wmin, cmin;
51322 ++ __u8 wsec, csec;
51323 ++ struct timespec timeval;
51324 ++
51325 ++ if (unlikely(!(gr_status & GR_READY) || !task->acl ||
51326 ++ !(task->acl->mode & GR_PROCACCT)))
51327 ++ return;
51328 ++
51329 ++ do_posix_clock_monotonic_gettime(&timeval);
51330 ++ runtime = timeval.tv_sec - task->start_time.tv_sec;
51331 ++ wday = runtime / (3600 * 24);
51332 ++ runtime -= wday * (3600 * 24);
51333 ++ whr = runtime / 3600;
51334 ++ runtime -= whr * 3600;
51335 ++ wmin = runtime / 60;
51336 ++ runtime -= wmin * 60;
51337 ++ wsec = runtime;
51338 ++
51339 ++ cputime = (task->utime + task->stime) / HZ;
51340 ++ cday = cputime / (3600 * 24);
51341 ++ cputime -= cday * (3600 * 24);
51342 ++ chr = cputime / 3600;
51343 ++ cputime -= chr * 3600;
51344 ++ cmin = cputime / 60;
51345 ++ cputime -= cmin * 60;
51346 ++ csec = cputime;
51347 ++
51348 ++ gr_log_procacct(GR_DO_AUDIT, GR_ACL_PROCACCT_MSG, task, wday, whr, wmin, wsec, cday, chr, cmin, csec, code);
51349 ++
51350 ++ return;
51351 ++}
51352 ++
51353 ++void gr_set_kernel_label(struct task_struct *task)
51354 ++{
51355 ++ if (gr_status & GR_READY) {
51356 ++ task->role = kernel_role;
51357 ++ task->acl = kernel_role->root_label;
51358 ++ }
51359 ++ return;
51360 ++}
51361 ++
51362 ++#ifdef CONFIG_TASKSTATS
51363 ++int gr_is_taskstats_denied(int pid)
51364 ++{
51365 ++ struct task_struct *task;
51366 ++#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
51367 ++ const struct cred *cred;
51368 ++#endif
51369 ++ int ret = 0;
51370 ++
51371 ++ /* restrict taskstats viewing to un-chrooted root users
51372 ++ who have the 'view' subject flag if the RBAC system is enabled
51373 ++ */
51374 ++
51375 ++ rcu_read_lock();
51376 ++ read_lock(&tasklist_lock);
51377 ++ task = find_task_by_vpid(pid);
51378 ++ if (task) {
51379 ++#ifdef CONFIG_GRKERNSEC_CHROOT
51380 ++ if (proc_is_chrooted(task))
51381 ++ ret = -EACCES;
51382 ++#endif
51383 ++#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
51384 ++ cred = __task_cred(task);
51385 ++#ifdef CONFIG_GRKERNSEC_PROC_USER
51386 ++ if (cred->uid != 0)
51387 ++ ret = -EACCES;
51388 ++#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
51389 ++ if (cred->uid != 0 && !groups_search(cred->group_info, CONFIG_GRKERNSEC_PROC_GID))
51390 ++ ret = -EACCES;
51391 ++#endif
51392 ++#endif
51393 ++ if (gr_status & GR_READY) {
51394 ++ if (!(task->acl->mode & GR_VIEW))
51395 ++ ret = -EACCES;
51396 ++ }
51397 ++ } else
51398 ++ ret = -ENOENT;
51399 ++
51400 ++ read_unlock(&tasklist_lock);
51401 ++ rcu_read_unlock();
51402 ++
51403 ++ return ret;
51404 ++}
51405 ++#endif
51406 ++
51407 ++/* AUXV entries are filled via a descendant of search_binary_handler
51408 ++ after we've already applied the subject for the target
51409 ++*/
51410 ++int gr_acl_enable_at_secure(void)
51411 ++{
51412 ++ if (unlikely(!(gr_status & GR_READY)))
51413 ++ return 0;
51414 ++
51415 ++ if (current->acl->mode & GR_ATSECURE)
51416 ++ return 1;
51417 ++
51418 ++ return 0;
51419 ++}
51420 ++
51421 ++int gr_acl_handle_filldir(const struct file *file, const char *name, const unsigned int namelen, const ino_t ino)
51422 ++{
51423 ++ struct task_struct *task = current;
51424 ++ struct dentry *dentry = file->f_path.dentry;
51425 ++ struct vfsmount *mnt = file->f_path.mnt;
51426 ++ struct acl_object_label *obj, *tmp;
51427 ++ struct acl_subject_label *subj;
51428 ++ unsigned int bufsize;
51429 ++ int is_not_root;
51430 ++ char *path;
51431 ++ dev_t dev = __get_dev(dentry);
51432 ++
51433 ++ if (unlikely(!(gr_status & GR_READY)))
51434 ++ return 1;
51435 ++
51436 ++ if (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))
51437 ++ return 1;
51438 ++
51439 ++ /* ignore Eric Biederman */
51440 ++ if (IS_PRIVATE(dentry->d_inode))
51441 ++ return 1;
51442 ++
51443 ++ subj = task->acl;
51444 ++ do {
51445 ++ obj = lookup_acl_obj_label(ino, dev, subj);
51446 ++ if (obj != NULL)
51447 ++ return (obj->mode & GR_FIND) ? 1 : 0;
51448 ++ } while ((subj = subj->parent_subject));
51449 ++
51450 ++ /* this is purely an optimization since we're looking for an object
51451 ++ for the directory we're doing a readdir on
51452 ++ if it's possible for any globbed object to match the entry we're
51453 ++ filling into the directory, then the object we find here will be
51454 ++ an anchor point with attached globbed objects
51455 ++ */
51456 ++ obj = chk_obj_label_noglob(dentry, mnt, task->acl);
51457 ++ if (obj->globbed == NULL)
51458 ++ return (obj->mode & GR_FIND) ? 1 : 0;
51459 ++
51460 ++ is_not_root = ((obj->filename[0] == '/') &&
51461 ++ (obj->filename[1] == '\0')) ? 0 : 1;
51462 ++ bufsize = PAGE_SIZE - namelen - is_not_root;
51463 ++
51464 ++ /* check bufsize > PAGE_SIZE || bufsize == 0 */
51465 ++ if (unlikely((bufsize - 1) > (PAGE_SIZE - 1)))
51466 ++ return 1;
51467 ++
51468 ++ preempt_disable();
51469 ++ path = d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
51470 ++ bufsize);
51471 ++
51472 ++ bufsize = strlen(path);
51473 ++
51474 ++ /* if base is "/", don't append an additional slash */
51475 ++ if (is_not_root)
51476 ++ *(path + bufsize) = '/';
51477 ++ memcpy(path + bufsize + is_not_root, name, namelen);
51478 ++ *(path + bufsize + namelen + is_not_root) = '\0';
51479 ++
51480 ++ tmp = obj->globbed;
51481 ++ while (tmp) {
51482 ++ if (!glob_match(tmp->filename, path)) {
51483 ++ preempt_enable();
51484 ++ return (tmp->mode & GR_FIND) ? 1 : 0;
51485 ++ }
51486 ++ tmp = tmp->next;
51487 ++ }
51488 ++ preempt_enable();
51489 ++ return (obj->mode & GR_FIND) ? 1 : 0;
51490 ++}
51491 ++
51492 ++#ifdef CONFIG_NETFILTER_XT_MATCH_GRADM_MODULE
51493 ++EXPORT_SYMBOL(gr_acl_is_enabled);
51494 ++#endif
51495 ++EXPORT_SYMBOL(gr_learn_resource);
51496 ++EXPORT_SYMBOL(gr_set_kernel_label);
51497 ++#ifdef CONFIG_SECURITY
51498 ++EXPORT_SYMBOL(gr_check_user_change);
51499 ++EXPORT_SYMBOL(gr_check_group_change);
51500 ++#endif
51501 ++
51502 +diff -urNp linux-3.1.1/grsecurity/gracl_cap.c linux-3.1.1/grsecurity/gracl_cap.c
51503 +--- linux-3.1.1/grsecurity/gracl_cap.c 1969-12-31 19:00:00.000000000 -0500
51504 ++++ linux-3.1.1/grsecurity/gracl_cap.c 2011-11-16 18:40:31.000000000 -0500
51505 +@@ -0,0 +1,101 @@
51506 ++#include <linux/kernel.h>
51507 ++#include <linux/module.h>
51508 ++#include <linux/sched.h>
51509 ++#include <linux/gracl.h>
51510 ++#include <linux/grsecurity.h>
51511 ++#include <linux/grinternal.h>
51512 ++
51513 ++extern const char *captab_log[];
51514 ++extern int captab_log_entries;
51515 ++
51516 ++int
51517 ++gr_acl_is_capable(const int cap)
51518 ++{
51519 ++ struct task_struct *task = current;
51520 ++ const struct cred *cred = current_cred();
51521 ++ struct acl_subject_label *curracl;
51522 ++ kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
51523 ++ kernel_cap_t cap_audit = __cap_empty_set;
51524 ++
51525 ++ if (!gr_acl_is_enabled())
51526 ++ return 1;
51527 ++
51528 ++ curracl = task->acl;
51529 ++
51530 ++ cap_drop = curracl->cap_lower;
51531 ++ cap_mask = curracl->cap_mask;
51532 ++ cap_audit = curracl->cap_invert_audit;
51533 ++
51534 ++ while ((curracl = curracl->parent_subject)) {
51535 ++ /* if the cap isn't specified in the current computed mask but is specified in the
51536 ++ current level subject, and is lowered in the current level subject, then add
51537 ++ it to the set of dropped capabilities
51538 ++ otherwise, add the current level subject's mask to the current computed mask
51539 ++ */
51540 ++ if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
51541 ++ cap_raise(cap_mask, cap);
51542 ++ if (cap_raised(curracl->cap_lower, cap))
51543 ++ cap_raise(cap_drop, cap);
51544 ++ if (cap_raised(curracl->cap_invert_audit, cap))
51545 ++ cap_raise(cap_audit, cap);
51546 ++ }
51547 ++ }
51548 ++
51549 ++ if (!cap_raised(cap_drop, cap)) {
51550 ++ if (cap_raised(cap_audit, cap))
51551 ++ gr_log_cap(GR_DO_AUDIT, GR_CAP_ACL_MSG2, task, captab_log[cap]);
51552 ++ return 1;
51553 ++ }
51554 ++
51555 ++ curracl = task->acl;
51556 ++
51557 ++ if ((curracl->mode & (GR_LEARN | GR_INHERITLEARN))
51558 ++ && cap_raised(cred->cap_effective, cap)) {
51559 ++ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
51560 ++ task->role->roletype, cred->uid,
51561 ++ cred->gid, task->exec_file ?
51562 ++ gr_to_filename(task->exec_file->f_path.dentry,
51563 ++ task->exec_file->f_path.mnt) : curracl->filename,
51564 ++ curracl->filename, 0UL,
51565 ++ 0UL, "", (unsigned long) cap, &task->signal->saved_ip);
51566 ++ return 1;
51567 ++ }
51568 ++
51569 ++ if ((cap >= 0) && (cap < captab_log_entries) && cap_raised(cred->cap_effective, cap) && !cap_raised(cap_audit, cap))
51570 ++ gr_log_cap(GR_DONT_AUDIT, GR_CAP_ACL_MSG, task, captab_log[cap]);
51571 ++ return 0;
51572 ++}
51573 ++
51574 ++int
51575 ++gr_acl_is_capable_nolog(const int cap)
51576 ++{
51577 ++ struct acl_subject_label *curracl;
51578 ++ kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
51579 ++
51580 ++ if (!gr_acl_is_enabled())
51581 ++ return 1;
51582 ++
51583 ++ curracl = current->acl;
51584 ++
51585 ++ cap_drop = curracl->cap_lower;
51586 ++ cap_mask = curracl->cap_mask;
51587 ++
51588 ++ while ((curracl = curracl->parent_subject)) {
51589 ++ /* if the cap isn't specified in the current computed mask but is specified in the
51590 ++ current level subject, and is lowered in the current level subject, then add
51591 ++ it to the set of dropped capabilities
51592 ++ otherwise, add the current level subject's mask to the current computed mask
51593 ++ */
51594 ++ if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
51595 ++ cap_raise(cap_mask, cap);
51596 ++ if (cap_raised(curracl->cap_lower, cap))
51597 ++ cap_raise(cap_drop, cap);
51598 ++ }
51599 ++ }
51600 ++
51601 ++ if (!cap_raised(cap_drop, cap))
51602 ++ return 1;
51603 ++
51604 ++ return 0;
51605 ++}
51606 ++
51607 +diff -urNp linux-3.1.1/grsecurity/gracl_fs.c linux-3.1.1/grsecurity/gracl_fs.c
51608 +--- linux-3.1.1/grsecurity/gracl_fs.c 1969-12-31 19:00:00.000000000 -0500
51609 ++++ linux-3.1.1/grsecurity/gracl_fs.c 2011-11-17 00:25:32.000000000 -0500
51610 +@@ -0,0 +1,433 @@
51611 ++#include <linux/kernel.h>
51612 ++#include <linux/sched.h>
51613 ++#include <linux/types.h>
51614 ++#include <linux/fs.h>
51615 ++#include <linux/file.h>
51616 ++#include <linux/stat.h>
51617 ++#include <linux/grsecurity.h>
51618 ++#include <linux/grinternal.h>
51619 ++#include <linux/gracl.h>
51620 ++
51621 ++__u32
51622 ++gr_acl_handle_hidden_file(const struct dentry * dentry,
51623 ++ const struct vfsmount * mnt)
51624 ++{
51625 ++ __u32 mode;
51626 ++
51627 ++ if (unlikely(!dentry->d_inode))
51628 ++ return GR_FIND;
51629 ++
51630 ++ mode =
51631 ++ gr_search_file(dentry, GR_FIND | GR_AUDIT_FIND | GR_SUPPRESS, mnt);
51632 ++
51633 ++ if (unlikely(mode & GR_FIND && mode & GR_AUDIT_FIND)) {
51634 ++ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
51635 ++ return mode;
51636 ++ } else if (unlikely(!(mode & GR_FIND) && !(mode & GR_SUPPRESS))) {
51637 ++ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
51638 ++ return 0;
51639 ++ } else if (unlikely(!(mode & GR_FIND)))
51640 ++ return 0;
51641 ++
51642 ++ return GR_FIND;
51643 ++}
51644 ++
51645 ++__u32
51646 ++gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
51647 ++ int acc_mode)
51648 ++{
51649 ++ __u32 reqmode = GR_FIND;
51650 ++ __u32 mode;
51651 ++
51652 ++ if (unlikely(!dentry->d_inode))
51653 ++ return reqmode;
51654 ++
51655 ++ if (acc_mode & MAY_APPEND)
51656 ++ reqmode |= GR_APPEND;
51657 ++ else if (acc_mode & MAY_WRITE)
51658 ++ reqmode |= GR_WRITE;
51659 ++ if ((acc_mode & MAY_READ) && !S_ISDIR(dentry->d_inode->i_mode))
51660 ++ reqmode |= GR_READ;
51661 ++
51662 ++ mode =
51663 ++ gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
51664 ++ mnt);
51665 ++
51666 ++ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
51667 ++ gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
51668 ++ reqmode & GR_READ ? " reading" : "",
51669 ++ reqmode & GR_WRITE ? " writing" : reqmode &
51670 ++ GR_APPEND ? " appending" : "");
51671 ++ return reqmode;
51672 ++ } else
51673 ++ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
51674 ++ {
51675 ++ gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
51676 ++ reqmode & GR_READ ? " reading" : "",
51677 ++ reqmode & GR_WRITE ? " writing" : reqmode &
51678 ++ GR_APPEND ? " appending" : "");
51679 ++ return 0;
51680 ++ } else if (unlikely((mode & reqmode) != reqmode))
51681 ++ return 0;
51682 ++
51683 ++ return reqmode;
51684 ++}
51685 ++
51686 ++__u32
51687 ++gr_acl_handle_creat(const struct dentry * dentry,
51688 ++ const struct dentry * p_dentry,
51689 ++ const struct vfsmount * p_mnt, int open_flags, int acc_mode,
51690 ++ const int imode)
51691 ++{
51692 ++ __u32 reqmode = GR_WRITE | GR_CREATE;
51693 ++ __u32 mode;
51694 ++
51695 ++ if (acc_mode & MAY_APPEND)
51696 ++ reqmode |= GR_APPEND;
51697 ++ // if a directory was required or the directory already exists, then
51698 ++ // don't count this open as a read
51699 ++ if ((acc_mode & MAY_READ) &&
51700 ++ !((open_flags & O_DIRECTORY) || (dentry->d_inode && S_ISDIR(dentry->d_inode->i_mode))))
51701 ++ reqmode |= GR_READ;
51702 ++ if ((open_flags & O_CREAT) && (imode & (S_ISUID | S_ISGID)))
51703 ++ reqmode |= GR_SETID;
51704 ++
51705 ++ mode =
51706 ++ gr_check_create(dentry, p_dentry, p_mnt,
51707 ++ reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
51708 ++
51709 ++ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
51710 ++ gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
51711 ++ reqmode & GR_READ ? " reading" : "",
51712 ++ reqmode & GR_WRITE ? " writing" : reqmode &
51713 ++ GR_APPEND ? " appending" : "");
51714 ++ return reqmode;
51715 ++ } else
51716 ++ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
51717 ++ {
51718 ++ gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
51719 ++ reqmode & GR_READ ? " reading" : "",
51720 ++ reqmode & GR_WRITE ? " writing" : reqmode &
51721 ++ GR_APPEND ? " appending" : "");
51722 ++ return 0;
51723 ++ } else if (unlikely((mode & reqmode) != reqmode))
51724 ++ return 0;
51725 ++
51726 ++ return reqmode;
51727 ++}
51728 ++
51729 ++__u32
51730 ++gr_acl_handle_access(const struct dentry * dentry, const struct vfsmount * mnt,
51731 ++ const int fmode)
51732 ++{
51733 ++ __u32 mode, reqmode = GR_FIND;
51734 ++
51735 ++ if ((fmode & S_IXOTH) && !S_ISDIR(dentry->d_inode->i_mode))
51736 ++ reqmode |= GR_EXEC;
51737 ++ if (fmode & S_IWOTH)
51738 ++ reqmode |= GR_WRITE;
51739 ++ if (fmode & S_IROTH)
51740 ++ reqmode |= GR_READ;
51741 ++
51742 ++ mode =
51743 ++ gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
51744 ++ mnt);
51745 ++
51746 ++ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
51747 ++ gr_log_fs_rbac_mode3(GR_DO_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
51748 ++ reqmode & GR_READ ? " reading" : "",
51749 ++ reqmode & GR_WRITE ? " writing" : "",
51750 ++ reqmode & GR_EXEC ? " executing" : "");
51751 ++ return reqmode;
51752 ++ } else
51753 ++ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
51754 ++ {
51755 ++ gr_log_fs_rbac_mode3(GR_DONT_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
51756 ++ reqmode & GR_READ ? " reading" : "",
51757 ++ reqmode & GR_WRITE ? " writing" : "",
51758 ++ reqmode & GR_EXEC ? " executing" : "");
51759 ++ return 0;
51760 ++ } else if (unlikely((mode & reqmode) != reqmode))
51761 ++ return 0;
51762 ++
51763 ++ return reqmode;
51764 ++}
51765 ++
51766 ++static __u32 generic_fs_handler(const struct dentry *dentry, const struct vfsmount *mnt, __u32 reqmode, const char *fmt)
51767 ++{
51768 ++ __u32 mode;
51769 ++
51770 ++ mode = gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS, mnt);
51771 ++
51772 ++ if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
51773 ++ gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, dentry, mnt);
51774 ++ return mode;
51775 ++ } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
51776 ++ gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, dentry, mnt);
51777 ++ return 0;
51778 ++ } else if (unlikely((mode & (reqmode)) != (reqmode)))
51779 ++ return 0;
51780 ++
51781 ++ return (reqmode);
51782 ++}
51783 ++
51784 ++__u32
51785 ++gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
51786 ++{
51787 ++ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_RMDIR_ACL_MSG);
51788 ++}
51789 ++
51790 ++__u32
51791 ++gr_acl_handle_unlink(const struct dentry *dentry, const struct vfsmount *mnt)
51792 ++{
51793 ++ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_UNLINK_ACL_MSG);
51794 ++}
51795 ++
51796 ++__u32
51797 ++gr_acl_handle_truncate(const struct dentry *dentry, const struct vfsmount *mnt)
51798 ++{
51799 ++ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_TRUNCATE_ACL_MSG);
51800 ++}
51801 ++
51802 ++__u32
51803 ++gr_acl_handle_utime(const struct dentry *dentry, const struct vfsmount *mnt)
51804 ++{
51805 ++ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_ATIME_ACL_MSG);
51806 ++}
51807 ++
51808 ++__u32
51809 ++gr_acl_handle_fchmod(const struct dentry *dentry, const struct vfsmount *mnt,
51810 ++ mode_t mode)
51811 ++{
51812 ++ if (unlikely(dentry->d_inode && S_ISSOCK(dentry->d_inode->i_mode)))
51813 ++ return 1;
51814 ++
51815 ++ if (unlikely((mode != (mode_t)-1) && (mode & (S_ISUID | S_ISGID)))) {
51816 ++ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
51817 ++ GR_FCHMOD_ACL_MSG);
51818 ++ } else {
51819 ++ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_FCHMOD_ACL_MSG);
51820 ++ }
51821 ++}
51822 ++
51823 ++__u32
51824 ++gr_acl_handle_chmod(const struct dentry *dentry, const struct vfsmount *mnt,
51825 ++ mode_t mode)
51826 ++{
51827 ++ if (unlikely((mode != (mode_t)-1) && (mode & (S_ISUID | S_ISGID)))) {
51828 ++ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
51829 ++ GR_CHMOD_ACL_MSG);
51830 ++ } else {
51831 ++ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHMOD_ACL_MSG);
51832 ++ }
51833 ++}
51834 ++
51835 ++__u32
51836 ++gr_acl_handle_chown(const struct dentry *dentry, const struct vfsmount *mnt)
51837 ++{
51838 ++ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHOWN_ACL_MSG);
51839 ++}
51840 ++
51841 ++__u32
51842 ++gr_acl_handle_setxattr(const struct dentry *dentry, const struct vfsmount *mnt)
51843 ++{
51844 ++ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_SETXATTR_ACL_MSG);
51845 ++}
51846 ++
51847 ++__u32
51848 ++gr_acl_handle_execve(const struct dentry *dentry, const struct vfsmount *mnt)
51849 ++{
51850 ++ return generic_fs_handler(dentry, mnt, GR_EXEC, GR_EXEC_ACL_MSG);
51851 ++}
51852 ++
51853 ++__u32
51854 ++gr_acl_handle_unix(const struct dentry *dentry, const struct vfsmount *mnt)
51855 ++{
51856 ++ return generic_fs_handler(dentry, mnt, GR_READ | GR_WRITE,
51857 ++ GR_UNIXCONNECT_ACL_MSG);
51858 ++}
51859 ++
51860 ++/* hardlinks require at minimum create and link permission,
51861 ++ any additional privilege required is based on the
51862 ++ privilege of the file being linked to
51863 ++*/
51864 ++__u32
51865 ++gr_acl_handle_link(const struct dentry * new_dentry,
51866 ++ const struct dentry * parent_dentry,
51867 ++ const struct vfsmount * parent_mnt,
51868 ++ const struct dentry * old_dentry,
51869 ++ const struct vfsmount * old_mnt, const char *to)
51870 ++{
51871 ++ __u32 mode;
51872 ++ __u32 needmode = GR_CREATE | GR_LINK;
51873 ++ __u32 needaudit = GR_AUDIT_CREATE | GR_AUDIT_LINK;
51874 ++
51875 ++ mode =
51876 ++ gr_check_link(new_dentry, parent_dentry, parent_mnt, old_dentry,
51877 ++ old_mnt);
51878 ++
51879 ++ if (unlikely(((mode & needmode) == needmode) && (mode & needaudit))) {
51880 ++ gr_log_fs_rbac_str(GR_DO_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to);
51881 ++ return mode;
51882 ++ } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
51883 ++ gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to);
51884 ++ return 0;
51885 ++ } else if (unlikely((mode & needmode) != needmode))
51886 ++ return 0;
51887 ++
51888 ++ return 1;
51889 ++}
51890 ++
51891 ++__u32
51892 ++gr_acl_handle_symlink(const struct dentry * new_dentry,
51893 ++ const struct dentry * parent_dentry,
51894 ++ const struct vfsmount * parent_mnt, const char *from)
51895 ++{
51896 ++ __u32 needmode = GR_WRITE | GR_CREATE;
51897 ++ __u32 mode;
51898 ++
51899 ++ mode =
51900 ++ gr_check_create(new_dentry, parent_dentry, parent_mnt,
51901 ++ GR_CREATE | GR_AUDIT_CREATE |
51902 ++ GR_WRITE | GR_AUDIT_WRITE | GR_SUPPRESS);
51903 ++
51904 ++ if (unlikely(mode & GR_WRITE && mode & GR_AUDITS)) {
51905 ++ gr_log_fs_str_rbac(GR_DO_AUDIT, GR_SYMLINK_ACL_MSG, from, new_dentry, parent_mnt);
51906 ++ return mode;
51907 ++ } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
51908 ++ gr_log_fs_str_rbac(GR_DONT_AUDIT, GR_SYMLINK_ACL_MSG, from, new_dentry, parent_mnt);
51909 ++ return 0;
51910 ++ } else if (unlikely((mode & needmode) != needmode))
51911 ++ return 0;
51912 ++
51913 ++ return (GR_WRITE | GR_CREATE);
51914 ++}
51915 ++
51916 ++static __u32 generic_fs_create_handler(const struct dentry *new_dentry, const struct dentry *parent_dentry, const struct vfsmount *parent_mnt, __u32 reqmode, const char *fmt)
51917 ++{
51918 ++ __u32 mode;
51919 ++
51920 ++ mode = gr_check_create(new_dentry, parent_dentry, parent_mnt, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
51921 ++
51922 ++ if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
51923 ++ gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, new_dentry, parent_mnt);
51924 ++ return mode;
51925 ++ } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
51926 ++ gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, new_dentry, parent_mnt);
51927 ++ return 0;
51928 ++ } else if (unlikely((mode & (reqmode)) != (reqmode)))
51929 ++ return 0;
51930 ++
51931 ++ return (reqmode);
51932 ++}
51933 ++
51934 ++__u32
51935 ++gr_acl_handle_mknod(const struct dentry * new_dentry,
51936 ++ const struct dentry * parent_dentry,
51937 ++ const struct vfsmount * parent_mnt,
51938 ++ const int mode)
51939 ++{
51940 ++ __u32 reqmode = GR_WRITE | GR_CREATE;
51941 ++ if (unlikely(mode & (S_ISUID | S_ISGID)))
51942 ++ reqmode |= GR_SETID;
51943 ++
51944 ++ return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
51945 ++ reqmode, GR_MKNOD_ACL_MSG);
51946 ++}
51947 ++
51948 ++__u32
51949 ++gr_acl_handle_mkdir(const struct dentry *new_dentry,
51950 ++ const struct dentry *parent_dentry,
51951 ++ const struct vfsmount *parent_mnt)
51952 ++{
51953 ++ return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
51954 ++ GR_WRITE | GR_CREATE, GR_MKDIR_ACL_MSG);
51955 ++}
51956 ++
51957 ++#define RENAME_CHECK_SUCCESS(old, new) \
51958 ++ (((old & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)) && \
51959 ++ ((new & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)))
51960 ++
51961 ++int
51962 ++gr_acl_handle_rename(struct dentry *new_dentry,
51963 ++ struct dentry *parent_dentry,
51964 ++ const struct vfsmount *parent_mnt,
51965 ++ struct dentry *old_dentry,
51966 ++ struct inode *old_parent_inode,
51967 ++ struct vfsmount *old_mnt, const char *newname)
51968 ++{
51969 ++ __u32 comp1, comp2;
51970 ++ int error = 0;
51971 ++
51972 ++ if (unlikely(!gr_acl_is_enabled()))
51973 ++ return 0;
51974 ++
51975 ++ if (!new_dentry->d_inode) {
51976 ++ comp1 = gr_check_create(new_dentry, parent_dentry, parent_mnt,
51977 ++ GR_READ | GR_WRITE | GR_CREATE | GR_AUDIT_READ |
51978 ++ GR_AUDIT_WRITE | GR_AUDIT_CREATE | GR_SUPPRESS);
51979 ++ comp2 = gr_search_file(old_dentry, GR_READ | GR_WRITE |
51980 ++ GR_DELETE | GR_AUDIT_DELETE |
51981 ++ GR_AUDIT_READ | GR_AUDIT_WRITE |
51982 ++ GR_SUPPRESS, old_mnt);
51983 ++ } else {
51984 ++ comp1 = gr_search_file(new_dentry, GR_READ | GR_WRITE |
51985 ++ GR_CREATE | GR_DELETE |
51986 ++ GR_AUDIT_CREATE | GR_AUDIT_DELETE |
51987 ++ GR_AUDIT_READ | GR_AUDIT_WRITE |
51988 ++ GR_SUPPRESS, parent_mnt);
51989 ++ comp2 =
51990 ++ gr_search_file(old_dentry,
51991 ++ GR_READ | GR_WRITE | GR_AUDIT_READ |
51992 ++ GR_DELETE | GR_AUDIT_DELETE |
51993 ++ GR_AUDIT_WRITE | GR_SUPPRESS, old_mnt);
51994 ++ }
51995 ++
51996 ++ if (RENAME_CHECK_SUCCESS(comp1, comp2) &&
51997 ++ ((comp1 & GR_AUDITS) || (comp2 & GR_AUDITS)))
51998 ++ gr_log_fs_rbac_str(GR_DO_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname);
51999 ++ else if (!RENAME_CHECK_SUCCESS(comp1, comp2) && !(comp1 & GR_SUPPRESS)
52000 ++ && !(comp2 & GR_SUPPRESS)) {
52001 ++ gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname);
52002 ++ error = -EACCES;
52003 ++ } else if (unlikely(!RENAME_CHECK_SUCCESS(comp1, comp2)))
52004 ++ error = -EACCES;
52005 ++
52006 ++ return error;
52007 ++}
52008 ++
52009 ++void
52010 ++gr_acl_handle_exit(void)
52011 ++{
52012 ++ u16 id;
52013 ++ char *rolename;
52014 ++ struct file *exec_file;
52015 ++
52016 ++ if (unlikely(current->acl_sp_role && gr_acl_is_enabled() &&
52017 ++ !(current->role->roletype & GR_ROLE_PERSIST))) {
52018 ++ id = current->acl_role_id;
52019 ++ rolename = current->role->rolename;
52020 ++ gr_set_acls(1);
52021 ++ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLEL_ACL_MSG, rolename, id);
52022 ++ }
52023 ++
52024 ++ write_lock(&grsec_exec_file_lock);
52025 ++ exec_file = current->exec_file;
52026 ++ current->exec_file = NULL;
52027 ++ write_unlock(&grsec_exec_file_lock);
52028 ++
52029 ++ if (exec_file)
52030 ++ fput(exec_file);
52031 ++}
52032 ++
52033 ++int
52034 ++gr_acl_handle_procpidmem(const struct task_struct *task)
52035 ++{
52036 ++ if (unlikely(!gr_acl_is_enabled()))
52037 ++ return 0;
52038 ++
52039 ++ if (task != current && task->acl->mode & GR_PROTPROCFD)
52040 ++ return -EACCES;
52041 ++
52042 ++ return 0;
52043 ++}
52044 +diff -urNp linux-3.1.1/grsecurity/gracl_ip.c linux-3.1.1/grsecurity/gracl_ip.c
52045 +--- linux-3.1.1/grsecurity/gracl_ip.c 1969-12-31 19:00:00.000000000 -0500
52046 ++++ linux-3.1.1/grsecurity/gracl_ip.c 2011-11-16 18:40:31.000000000 -0500
52047 +@@ -0,0 +1,381 @@
52048 ++#include <linux/kernel.h>
52049 ++#include <asm/uaccess.h>
52050 ++#include <asm/errno.h>
52051 ++#include <net/sock.h>
52052 ++#include <linux/file.h>
52053 ++#include <linux/fs.h>
52054 ++#include <linux/net.h>
52055 ++#include <linux/in.h>
52056 ++#include <linux/skbuff.h>
52057 ++#include <linux/ip.h>
52058 ++#include <linux/udp.h>
52059 ++#include <linux/types.h>
52060 ++#include <linux/sched.h>
52061 ++#include <linux/netdevice.h>
52062 ++#include <linux/inetdevice.h>
52063 ++#include <linux/gracl.h>
52064 ++#include <linux/grsecurity.h>
52065 ++#include <linux/grinternal.h>
52066 ++
52067 ++#define GR_BIND 0x01
52068 ++#define GR_CONNECT 0x02
52069 ++#define GR_INVERT 0x04
52070 ++#define GR_BINDOVERRIDE 0x08
52071 ++#define GR_CONNECTOVERRIDE 0x10
52072 ++#define GR_SOCK_FAMILY 0x20
52073 ++
52074 ++static const char * gr_protocols[IPPROTO_MAX] = {
52075 ++ "ip", "icmp", "igmp", "ggp", "ipencap", "st", "tcp", "cbt",
52076 ++ "egp", "igp", "bbn-rcc", "nvp", "pup", "argus", "emcon", "xnet",
52077 ++ "chaos", "udp", "mux", "dcn", "hmp", "prm", "xns-idp", "trunk-1",
52078 ++ "trunk-2", "leaf-1", "leaf-2", "rdp", "irtp", "iso-tp4", "netblt", "mfe-nsp",
52079 ++ "merit-inp", "sep", "3pc", "idpr", "xtp", "ddp", "idpr-cmtp", "tp++",
52080 ++ "il", "ipv6", "sdrp", "ipv6-route", "ipv6-frag", "idrp", "rsvp", "gre",
52081 ++ "mhrp", "bna", "ipv6-crypt", "ipv6-auth", "i-nlsp", "swipe", "narp", "mobile",
52082 ++ "tlsp", "skip", "ipv6-icmp", "ipv6-nonxt", "ipv6-opts", "unknown:61", "cftp", "unknown:63",
52083 ++ "sat-expak", "kryptolan", "rvd", "ippc", "unknown:68", "sat-mon", "visa", "ipcv",
52084 ++ "cpnx", "cphb", "wsn", "pvp", "br-sat-mon", "sun-nd", "wb-mon", "wb-expak",
52085 ++ "iso-ip", "vmtp", "secure-vmtp", "vines", "ttp", "nfsnet-igp", "dgp", "tcf",
52086 ++ "eigrp", "ospf", "sprite-rpc", "larp", "mtp", "ax.25", "ipip", "micp",
52087 ++ "scc-sp", "etherip", "encap", "unknown:99", "gmtp", "ifmp", "pnni", "pim",
52088 ++ "aris", "scps", "qnx", "a/n", "ipcomp", "snp", "compaq-peer", "ipx-in-ip",
52089 ++ "vrrp", "pgm", "unknown:114", "l2tp", "ddx", "iatp", "stp", "srp",
52090 ++ "uti", "smp", "sm", "ptp", "isis", "fire", "crtp", "crdup",
52091 ++ "sscopmce", "iplt", "sps", "pipe", "sctp", "fc", "unkown:134", "unknown:135",
52092 ++ "unknown:136", "unknown:137", "unknown:138", "unknown:139", "unknown:140", "unknown:141", "unknown:142", "unknown:143",
52093 ++ "unknown:144", "unknown:145", "unknown:146", "unknown:147", "unknown:148", "unknown:149", "unknown:150", "unknown:151",
52094 ++ "unknown:152", "unknown:153", "unknown:154", "unknown:155", "unknown:156", "unknown:157", "unknown:158", "unknown:159",
52095 ++ "unknown:160", "unknown:161", "unknown:162", "unknown:163", "unknown:164", "unknown:165", "unknown:166", "unknown:167",
52096 ++ "unknown:168", "unknown:169", "unknown:170", "unknown:171", "unknown:172", "unknown:173", "unknown:174", "unknown:175",
52097 ++ "unknown:176", "unknown:177", "unknown:178", "unknown:179", "unknown:180", "unknown:181", "unknown:182", "unknown:183",
52098 ++ "unknown:184", "unknown:185", "unknown:186", "unknown:187", "unknown:188", "unknown:189", "unknown:190", "unknown:191",
52099 ++ "unknown:192", "unknown:193", "unknown:194", "unknown:195", "unknown:196", "unknown:197", "unknown:198", "unknown:199",
52100 ++ "unknown:200", "unknown:201", "unknown:202", "unknown:203", "unknown:204", "unknown:205", "unknown:206", "unknown:207",
52101 ++ "unknown:208", "unknown:209", "unknown:210", "unknown:211", "unknown:212", "unknown:213", "unknown:214", "unknown:215",
52102 ++ "unknown:216", "unknown:217", "unknown:218", "unknown:219", "unknown:220", "unknown:221", "unknown:222", "unknown:223",
52103 ++ "unknown:224", "unknown:225", "unknown:226", "unknown:227", "unknown:228", "unknown:229", "unknown:230", "unknown:231",
52104 ++ "unknown:232", "unknown:233", "unknown:234", "unknown:235", "unknown:236", "unknown:237", "unknown:238", "unknown:239",
52105 ++ "unknown:240", "unknown:241", "unknown:242", "unknown:243", "unknown:244", "unknown:245", "unknown:246", "unknown:247",
52106 ++ "unknown:248", "unknown:249", "unknown:250", "unknown:251", "unknown:252", "unknown:253", "unknown:254", "unknown:255",
52107 ++ };
52108 ++
52109 ++static const char * gr_socktypes[SOCK_MAX] = {
52110 ++ "unknown:0", "stream", "dgram", "raw", "rdm", "seqpacket", "unknown:6",
52111 ++ "unknown:7", "unknown:8", "unknown:9", "packet"
52112 ++ };
52113 ++
52114 ++static const char * gr_sockfamilies[AF_MAX+1] = {
52115 ++ "unspec", "unix", "inet", "ax25", "ipx", "appletalk", "netrom", "bridge", "atmpvc", "x25",
52116 ++ "inet6", "rose", "decnet", "netbeui", "security", "key", "netlink", "packet", "ash",
52117 ++ "econet", "atmsvc", "rds", "sna", "irda", "ppox", "wanpipe", "llc", "fam_27", "fam_28",
52118 ++ "tipc", "bluetooth", "iucv", "rxrpc", "isdn", "phonet", "ieee802154", "ciaf"
52119 ++ };
52120 ++
52121 ++const char *
52122 ++gr_proto_to_name(unsigned char proto)
52123 ++{
52124 ++ return gr_protocols[proto];
52125 ++}
52126 ++
52127 ++const char *
52128 ++gr_socktype_to_name(unsigned char type)
52129 ++{
52130 ++ return gr_socktypes[type];
52131 ++}
52132 ++
52133 ++const char *
52134 ++gr_sockfamily_to_name(unsigned char family)
52135 ++{
52136 ++ return gr_sockfamilies[family];
52137 ++}
52138 ++
52139 ++int
52140 ++gr_search_socket(const int domain, const int type, const int protocol)
52141 ++{
52142 ++ struct acl_subject_label *curr;
52143 ++ const struct cred *cred = current_cred();
52144 ++
52145 ++ if (unlikely(!gr_acl_is_enabled()))
52146 ++ goto exit;
52147 ++
52148 ++ if ((domain < 0) || (type < 0) || (protocol < 0) ||
52149 ++ (domain >= AF_MAX) || (type >= SOCK_MAX) || (protocol >= IPPROTO_MAX))
52150 ++ goto exit; // let the kernel handle it
52151 ++
52152 ++ curr = current->acl;
52153 ++
52154 ++ if (curr->sock_families[domain / 32] & (1 << (domain % 32))) {
52155 ++ /* the family is allowed, if this is PF_INET allow it only if
52156 ++ the extra sock type/protocol checks pass */
52157 ++ if (domain == PF_INET)
52158 ++ goto inet_check;
52159 ++ goto exit;
52160 ++ } else {
52161 ++ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
52162 ++ __u32 fakeip = 0;
52163 ++ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
52164 ++ current->role->roletype, cred->uid,
52165 ++ cred->gid, current->exec_file ?
52166 ++ gr_to_filename(current->exec_file->f_path.dentry,
52167 ++ current->exec_file->f_path.mnt) :
52168 ++ curr->filename, curr->filename,
52169 ++ &fakeip, domain, 0, 0, GR_SOCK_FAMILY,
52170 ++ &current->signal->saved_ip);
52171 ++ goto exit;
52172 ++ }
52173 ++ goto exit_fail;
52174 ++ }
52175 ++
52176 ++inet_check:
52177 ++ /* the rest of this checking is for IPv4 only */
52178 ++ if (!curr->ips)
52179 ++ goto exit;
52180 ++
52181 ++ if ((curr->ip_type & (1 << type)) &&
52182 ++ (curr->ip_proto[protocol / 32] & (1 << (protocol % 32))))
52183 ++ goto exit;
52184 ++
52185 ++ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
52186 ++ /* we don't place acls on raw sockets , and sometimes
52187 ++ dgram/ip sockets are opened for ioctl and not
52188 ++ bind/connect, so we'll fake a bind learn log */
52189 ++ if (type == SOCK_RAW || type == SOCK_PACKET) {
52190 ++ __u32 fakeip = 0;
52191 ++ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
52192 ++ current->role->roletype, cred->uid,
52193 ++ cred->gid, current->exec_file ?
52194 ++ gr_to_filename(current->exec_file->f_path.dentry,
52195 ++ current->exec_file->f_path.mnt) :
52196 ++ curr->filename, curr->filename,
52197 ++ &fakeip, 0, type,
52198 ++ protocol, GR_CONNECT, &current->signal->saved_ip);
52199 ++ } else if ((type == SOCK_DGRAM) && (protocol == IPPROTO_IP)) {
52200 ++ __u32 fakeip = 0;
52201 ++ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
52202 ++ current->role->roletype, cred->uid,
52203 ++ cred->gid, current->exec_file ?
52204 ++ gr_to_filename(current->exec_file->f_path.dentry,
52205 ++ current->exec_file->f_path.mnt) :
52206 ++ curr->filename, curr->filename,
52207 ++ &fakeip, 0, type,
52208 ++ protocol, GR_BIND, &current->signal->saved_ip);
52209 ++ }
52210 ++ /* we'll log when they use connect or bind */
52211 ++ goto exit;
52212 ++ }
52213 ++
52214 ++exit_fail:
52215 ++ if (domain == PF_INET)
52216 ++ gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(domain),
52217 ++ gr_socktype_to_name(type), gr_proto_to_name(protocol));
52218 ++ else
52219 ++ gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(domain),
52220 ++ gr_socktype_to_name(type), protocol);
52221 ++
52222 ++ return 0;
52223 ++exit:
52224 ++ return 1;
52225 ++}
52226 ++
52227 ++int check_ip_policy(struct acl_ip_label *ip, __u32 ip_addr, __u16 ip_port, __u8 protocol, const int mode, const int type, __u32 our_addr, __u32 our_netmask)
52228 ++{
52229 ++ if ((ip->mode & mode) &&
52230 ++ (ip_port >= ip->low) &&
52231 ++ (ip_port <= ip->high) &&
52232 ++ ((ntohl(ip_addr) & our_netmask) ==
52233 ++ (ntohl(our_addr) & our_netmask))
52234 ++ && (ip->proto[protocol / 32] & (1 << (protocol % 32)))
52235 ++ && (ip->type & (1 << type))) {
52236 ++ if (ip->mode & GR_INVERT)
52237 ++ return 2; // specifically denied
52238 ++ else
52239 ++ return 1; // allowed
52240 ++ }
52241 ++
52242 ++ return 0; // not specifically allowed, may continue parsing
52243 ++}
52244 ++
52245 ++static int
52246 ++gr_search_connectbind(const int full_mode, struct sock *sk,
52247 ++ struct sockaddr_in *addr, const int type)
52248 ++{
52249 ++ char iface[IFNAMSIZ] = {0};
52250 ++ struct acl_subject_label *curr;
52251 ++ struct acl_ip_label *ip;
52252 ++ struct inet_sock *isk;
52253 ++ struct net_device *dev;
52254 ++ struct in_device *idev;
52255 ++ unsigned long i;
52256 ++ int ret;
52257 ++ int mode = full_mode & (GR_BIND | GR_CONNECT);
52258 ++ __u32 ip_addr = 0;
52259 ++ __u32 our_addr;
52260 ++ __u32 our_netmask;
52261 ++ char *p;
52262 ++ __u16 ip_port = 0;
52263 ++ const struct cred *cred = current_cred();
52264 ++
52265 ++ if (unlikely(!gr_acl_is_enabled() || sk->sk_family != PF_INET))
52266 ++ return 0;
52267 ++
52268 ++ curr = current->acl;
52269 ++ isk = inet_sk(sk);
52270 ++
52271 ++ /* INADDR_ANY overriding for binds, inaddr_any_override is already in network order */
52272 ++ if ((full_mode & GR_BINDOVERRIDE) && addr->sin_addr.s_addr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0)
52273 ++ addr->sin_addr.s_addr = curr->inaddr_any_override;
52274 ++ if ((full_mode & GR_CONNECT) && isk->inet_saddr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0) {
52275 ++ struct sockaddr_in saddr;
52276 ++ int err;
52277 ++
52278 ++ saddr.sin_family = AF_INET;
52279 ++ saddr.sin_addr.s_addr = curr->inaddr_any_override;
52280 ++ saddr.sin_port = isk->inet_sport;
52281 ++
52282 ++ err = security_socket_bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
52283 ++ if (err)
52284 ++ return err;
52285 ++
52286 ++ err = sk->sk_socket->ops->bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
52287 ++ if (err)
52288 ++ return err;
52289 ++ }
52290 ++
52291 ++ if (!curr->ips)
52292 ++ return 0;
52293 ++
52294 ++ ip_addr = addr->sin_addr.s_addr;
52295 ++ ip_port = ntohs(addr->sin_port);
52296 ++
52297 ++ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
52298 ++ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
52299 ++ current->role->roletype, cred->uid,
52300 ++ cred->gid, current->exec_file ?
52301 ++ gr_to_filename(current->exec_file->f_path.dentry,
52302 ++ current->exec_file->f_path.mnt) :
52303 ++ curr->filename, curr->filename,
52304 ++ &ip_addr, ip_port, type,
52305 ++ sk->sk_protocol, mode, &current->signal->saved_ip);
52306 ++ return 0;
52307 ++ }
52308 ++
52309 ++ for (i = 0; i < curr->ip_num; i++) {
52310 ++ ip = *(curr->ips + i);
52311 ++ if (ip->iface != NULL) {
52312 ++ strncpy(iface, ip->iface, IFNAMSIZ - 1);
52313 ++ p = strchr(iface, ':');
52314 ++ if (p != NULL)
52315 ++ *p = '\0';
52316 ++ dev = dev_get_by_name(sock_net(sk), iface);
52317 ++ if (dev == NULL)
52318 ++ continue;
52319 ++ idev = in_dev_get(dev);
52320 ++ if (idev == NULL) {
52321 ++ dev_put(dev);
52322 ++ continue;
52323 ++ }
52324 ++ rcu_read_lock();
52325 ++ for_ifa(idev) {
52326 ++ if (!strcmp(ip->iface, ifa->ifa_label)) {
52327 ++ our_addr = ifa->ifa_address;
52328 ++ our_netmask = 0xffffffff;
52329 ++ ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
52330 ++ if (ret == 1) {
52331 ++ rcu_read_unlock();
52332 ++ in_dev_put(idev);
52333 ++ dev_put(dev);
52334 ++ return 0;
52335 ++ } else if (ret == 2) {
52336 ++ rcu_read_unlock();
52337 ++ in_dev_put(idev);
52338 ++ dev_put(dev);
52339 ++ goto denied;
52340 ++ }
52341 ++ }
52342 ++ } endfor_ifa(idev);
52343 ++ rcu_read_unlock();
52344 ++ in_dev_put(idev);
52345 ++ dev_put(dev);
52346 ++ } else {
52347 ++ our_addr = ip->addr;
52348 ++ our_netmask = ip->netmask;
52349 ++ ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
52350 ++ if (ret == 1)
52351 ++ return 0;
52352 ++ else if (ret == 2)
52353 ++ goto denied;
52354 ++ }
52355 ++ }
52356 ++
52357 ++denied:
52358 ++ if (mode == GR_BIND)
52359 ++ gr_log_int5_str2(GR_DONT_AUDIT, GR_BIND_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
52360 ++ else if (mode == GR_CONNECT)
52361 ++ gr_log_int5_str2(GR_DONT_AUDIT, GR_CONNECT_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
52362 ++
52363 ++ return -EACCES;
52364 ++}
52365 ++
52366 ++int
52367 ++gr_search_connect(struct socket *sock, struct sockaddr_in *addr)
52368 ++{
52369 ++ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sock->sk, addr, sock->type);
52370 ++}
52371 ++
52372 ++int
52373 ++gr_search_bind(struct socket *sock, struct sockaddr_in *addr)
52374 ++{
52375 ++ return gr_search_connectbind(GR_BIND | GR_BINDOVERRIDE, sock->sk, addr, sock->type);
52376 ++}
52377 ++
52378 ++int gr_search_listen(struct socket *sock)
52379 ++{
52380 ++ struct sock *sk = sock->sk;
52381 ++ struct sockaddr_in addr;
52382 ++
52383 ++ addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr;
52384 ++ addr.sin_port = inet_sk(sk)->inet_sport;
52385 ++
52386 ++ return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
52387 ++}
52388 ++
52389 ++int gr_search_accept(struct socket *sock)
52390 ++{
52391 ++ struct sock *sk = sock->sk;
52392 ++ struct sockaddr_in addr;
52393 ++
52394 ++ addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr;
52395 ++ addr.sin_port = inet_sk(sk)->inet_sport;
52396 ++
52397 ++ return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
52398 ++}
52399 ++
52400 ++int
52401 ++gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr)
52402 ++{
52403 ++ if (addr)
52404 ++ return gr_search_connectbind(GR_CONNECT, sk, addr, SOCK_DGRAM);
52405 ++ else {
52406 ++ struct sockaddr_in sin;
52407 ++ const struct inet_sock *inet = inet_sk(sk);
52408 ++
52409 ++ sin.sin_addr.s_addr = inet->inet_daddr;
52410 ++ sin.sin_port = inet->inet_dport;
52411 ++
52412 ++ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
52413 ++ }
52414 ++}
52415 ++
52416 ++int
52417 ++gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb)
52418 ++{
52419 ++ struct sockaddr_in sin;
52420 ++
52421 ++ if (unlikely(skb->len < sizeof (struct udphdr)))
52422 ++ return 0; // skip this packet
52423 ++
52424 ++ sin.sin_addr.s_addr = ip_hdr(skb)->saddr;
52425 ++ sin.sin_port = udp_hdr(skb)->source;
52426 ++
52427 ++ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
52428 ++}
52429 +diff -urNp linux-3.1.1/grsecurity/gracl_learn.c linux-3.1.1/grsecurity/gracl_learn.c
52430 +--- linux-3.1.1/grsecurity/gracl_learn.c 1969-12-31 19:00:00.000000000 -0500
52431 ++++ linux-3.1.1/grsecurity/gracl_learn.c 2011-11-16 18:40:31.000000000 -0500
52432 +@@ -0,0 +1,207 @@
52433 ++#include <linux/kernel.h>
52434 ++#include <linux/mm.h>
52435 ++#include <linux/sched.h>
52436 ++#include <linux/poll.h>
52437 ++#include <linux/string.h>
52438 ++#include <linux/file.h>
52439 ++#include <linux/types.h>
52440 ++#include <linux/vmalloc.h>
52441 ++#include <linux/grinternal.h>
52442 ++
52443 ++extern ssize_t write_grsec_handler(struct file * file, const char __user * buf,
52444 ++ size_t count, loff_t *ppos);
52445 ++extern int gr_acl_is_enabled(void);
52446 ++
52447 ++static DECLARE_WAIT_QUEUE_HEAD(learn_wait);
52448 ++static int gr_learn_attached;
52449 ++
52450 ++/* use a 512k buffer */
52451 ++#define LEARN_BUFFER_SIZE (512 * 1024)
52452 ++
52453 ++static DEFINE_SPINLOCK(gr_learn_lock);
52454 ++static DEFINE_MUTEX(gr_learn_user_mutex);
52455 ++
52456 ++/* we need to maintain two buffers, so that the kernel context of grlearn
52457 ++ uses a semaphore around the userspace copying, and the other kernel contexts
52458 ++ use a spinlock when copying into the buffer, since they cannot sleep
52459 ++*/
52460 ++static char *learn_buffer;
52461 ++static char *learn_buffer_user;
52462 ++static int learn_buffer_len;
52463 ++static int learn_buffer_user_len;
52464 ++
52465 ++static ssize_t
52466 ++read_learn(struct file *file, char __user * buf, size_t count, loff_t * ppos)
52467 ++{
52468 ++ DECLARE_WAITQUEUE(wait, current);
52469 ++ ssize_t retval = 0;
52470 ++
52471 ++ add_wait_queue(&learn_wait, &wait);
52472 ++ set_current_state(TASK_INTERRUPTIBLE);
52473 ++ do {
52474 ++ mutex_lock(&gr_learn_user_mutex);
52475 ++ spin_lock(&gr_learn_lock);
52476 ++ if (learn_buffer_len)
52477 ++ break;
52478 ++ spin_unlock(&gr_learn_lock);
52479 ++ mutex_unlock(&gr_learn_user_mutex);
52480 ++ if (file->f_flags & O_NONBLOCK) {
52481 ++ retval = -EAGAIN;
52482 ++ goto out;
52483 ++ }
52484 ++ if (signal_pending(current)) {
52485 ++ retval = -ERESTARTSYS;
52486 ++ goto out;
52487 ++ }
52488 ++
52489 ++ schedule();
52490 ++ } while (1);
52491 ++
52492 ++ memcpy(learn_buffer_user, learn_buffer, learn_buffer_len);
52493 ++ learn_buffer_user_len = learn_buffer_len;
52494 ++ retval = learn_buffer_len;
52495 ++ learn_buffer_len = 0;
52496 ++
52497 ++ spin_unlock(&gr_learn_lock);
52498 ++
52499 ++ if (copy_to_user(buf, learn_buffer_user, learn_buffer_user_len))
52500 ++ retval = -EFAULT;
52501 ++
52502 ++ mutex_unlock(&gr_learn_user_mutex);
52503 ++out:
52504 ++ set_current_state(TASK_RUNNING);
52505 ++ remove_wait_queue(&learn_wait, &wait);
52506 ++ return retval;
52507 ++}
52508 ++
52509 ++static unsigned int
52510 ++poll_learn(struct file * file, poll_table * wait)
52511 ++{
52512 ++ poll_wait(file, &learn_wait, wait);
52513 ++
52514 ++ if (learn_buffer_len)
52515 ++ return (POLLIN | POLLRDNORM);
52516 ++
52517 ++ return 0;
52518 ++}
52519 ++
52520 ++void
52521 ++gr_clear_learn_entries(void)
52522 ++{
52523 ++ char *tmp;
52524 ++
52525 ++ mutex_lock(&gr_learn_user_mutex);
52526 ++ spin_lock(&gr_learn_lock);
52527 ++ tmp = learn_buffer;
52528 ++ learn_buffer = NULL;
52529 ++ spin_unlock(&gr_learn_lock);
52530 ++ if (tmp)
52531 ++ vfree(tmp);
52532 ++ if (learn_buffer_user != NULL) {
52533 ++ vfree(learn_buffer_user);
52534 ++ learn_buffer_user = NULL;
52535 ++ }
52536 ++ learn_buffer_len = 0;
52537 ++ mutex_unlock(&gr_learn_user_mutex);
52538 ++
52539 ++ return;
52540 ++}
52541 ++
52542 ++void
52543 ++gr_add_learn_entry(const char *fmt, ...)
52544 ++{
52545 ++ va_list args;
52546 ++ unsigned int len;
52547 ++
52548 ++ if (!gr_learn_attached)
52549 ++ return;
52550 ++
52551 ++ spin_lock(&gr_learn_lock);
52552 ++
52553 ++ /* leave a gap at the end so we know when it's "full" but don't have to
52554 ++ compute the exact length of the string we're trying to append
52555 ++ */
52556 ++ if (learn_buffer_len > LEARN_BUFFER_SIZE - 16384) {
52557 ++ spin_unlock(&gr_learn_lock);
52558 ++ wake_up_interruptible(&learn_wait);
52559 ++ return;
52560 ++ }
52561 ++ if (learn_buffer == NULL) {
52562 ++ spin_unlock(&gr_learn_lock);
52563 ++ return;
52564 ++ }
52565 ++
52566 ++ va_start(args, fmt);
52567 ++ len = vsnprintf(learn_buffer + learn_buffer_len, LEARN_BUFFER_SIZE - learn_buffer_len, fmt, args);
52568 ++ va_end(args);
52569 ++
52570 ++ learn_buffer_len += len + 1;
52571 ++
52572 ++ spin_unlock(&gr_learn_lock);
52573 ++ wake_up_interruptible(&learn_wait);
52574 ++
52575 ++ return;
52576 ++}
52577 ++
52578 ++static int
52579 ++open_learn(struct inode *inode, struct file *file)
52580 ++{
52581 ++ if (file->f_mode & FMODE_READ && gr_learn_attached)
52582 ++ return -EBUSY;
52583 ++ if (file->f_mode & FMODE_READ) {
52584 ++ int retval = 0;
52585 ++ mutex_lock(&gr_learn_user_mutex);
52586 ++ if (learn_buffer == NULL)
52587 ++ learn_buffer = vmalloc(LEARN_BUFFER_SIZE);
52588 ++ if (learn_buffer_user == NULL)
52589 ++ learn_buffer_user = vmalloc(LEARN_BUFFER_SIZE);
52590 ++ if (learn_buffer == NULL) {
52591 ++ retval = -ENOMEM;
52592 ++ goto out_error;
52593 ++ }
52594 ++ if (learn_buffer_user == NULL) {
52595 ++ retval = -ENOMEM;
52596 ++ goto out_error;
52597 ++ }
52598 ++ learn_buffer_len = 0;
52599 ++ learn_buffer_user_len = 0;
52600 ++ gr_learn_attached = 1;
52601 ++out_error:
52602 ++ mutex_unlock(&gr_learn_user_mutex);
52603 ++ return retval;
52604 ++ }
52605 ++ return 0;
52606 ++}
52607 ++
52608 ++static int
52609 ++close_learn(struct inode *inode, struct file *file)
52610 ++{
52611 ++ if (file->f_mode & FMODE_READ) {
52612 ++ char *tmp = NULL;
52613 ++ mutex_lock(&gr_learn_user_mutex);
52614 ++ spin_lock(&gr_learn_lock);
52615 ++ tmp = learn_buffer;
52616 ++ learn_buffer = NULL;
52617 ++ spin_unlock(&gr_learn_lock);
52618 ++ if (tmp)
52619 ++ vfree(tmp);
52620 ++ if (learn_buffer_user != NULL) {
52621 ++ vfree(learn_buffer_user);
52622 ++ learn_buffer_user = NULL;
52623 ++ }
52624 ++ learn_buffer_len = 0;
52625 ++ learn_buffer_user_len = 0;
52626 ++ gr_learn_attached = 0;
52627 ++ mutex_unlock(&gr_learn_user_mutex);
52628 ++ }
52629 ++
52630 ++ return 0;
52631 ++}
52632 ++
52633 ++const struct file_operations grsec_fops = {
52634 ++ .read = read_learn,
52635 ++ .write = write_grsec_handler,
52636 ++ .open = open_learn,
52637 ++ .release = close_learn,
52638 ++ .poll = poll_learn,
52639 ++};
52640 +diff -urNp linux-3.1.1/grsecurity/gracl_res.c linux-3.1.1/grsecurity/gracl_res.c
52641 +--- linux-3.1.1/grsecurity/gracl_res.c 1969-12-31 19:00:00.000000000 -0500
52642 ++++ linux-3.1.1/grsecurity/gracl_res.c 2011-11-16 18:40:31.000000000 -0500
52643 +@@ -0,0 +1,68 @@
52644 ++#include <linux/kernel.h>
52645 ++#include <linux/sched.h>
52646 ++#include <linux/gracl.h>
52647 ++#include <linux/grinternal.h>
52648 ++
52649 ++static const char *restab_log[] = {
52650 ++ [RLIMIT_CPU] = "RLIMIT_CPU",
52651 ++ [RLIMIT_FSIZE] = "RLIMIT_FSIZE",
52652 ++ [RLIMIT_DATA] = "RLIMIT_DATA",
52653 ++ [RLIMIT_STACK] = "RLIMIT_STACK",
52654 ++ [RLIMIT_CORE] = "RLIMIT_CORE",
52655 ++ [RLIMIT_RSS] = "RLIMIT_RSS",
52656 ++ [RLIMIT_NPROC] = "RLIMIT_NPROC",
52657 ++ [RLIMIT_NOFILE] = "RLIMIT_NOFILE",
52658 ++ [RLIMIT_MEMLOCK] = "RLIMIT_MEMLOCK",
52659 ++ [RLIMIT_AS] = "RLIMIT_AS",
52660 ++ [RLIMIT_LOCKS] = "RLIMIT_LOCKS",
52661 ++ [RLIMIT_SIGPENDING] = "RLIMIT_SIGPENDING",
52662 ++ [RLIMIT_MSGQUEUE] = "RLIMIT_MSGQUEUE",
52663 ++ [RLIMIT_NICE] = "RLIMIT_NICE",
52664 ++ [RLIMIT_RTPRIO] = "RLIMIT_RTPRIO",
52665 ++ [RLIMIT_RTTIME] = "RLIMIT_RTTIME",
52666 ++ [GR_CRASH_RES] = "RLIMIT_CRASH"
52667 ++};
52668 ++
52669 ++void
52670 ++gr_log_resource(const struct task_struct *task,
52671 ++ const int res, const unsigned long wanted, const int gt)
52672 ++{
52673 ++ const struct cred *cred;
52674 ++ unsigned long rlim;
52675 ++
52676 ++ if (!gr_acl_is_enabled() && !grsec_resource_logging)
52677 ++ return;
52678 ++
52679 ++ // not yet supported resource
52680 ++ if (unlikely(!restab_log[res]))
52681 ++ return;
52682 ++
52683 ++ if (res == RLIMIT_CPU || res == RLIMIT_RTTIME)
52684 ++ rlim = task_rlimit_max(task, res);
52685 ++ else
52686 ++ rlim = task_rlimit(task, res);
52687 ++
52688 ++ if (likely((rlim == RLIM_INFINITY) || (gt && wanted <= rlim) || (!gt && wanted < rlim)))
52689 ++ return;
52690 ++
52691 ++ rcu_read_lock();
52692 ++ cred = __task_cred(task);
52693 ++
52694 ++ if (res == RLIMIT_NPROC &&
52695 ++ (cap_raised(cred->cap_effective, CAP_SYS_ADMIN) ||
52696 ++ cap_raised(cred->cap_effective, CAP_SYS_RESOURCE)))
52697 ++ goto out_rcu_unlock;
52698 ++ else if (res == RLIMIT_MEMLOCK &&
52699 ++ cap_raised(cred->cap_effective, CAP_IPC_LOCK))
52700 ++ goto out_rcu_unlock;
52701 ++ else if (res == RLIMIT_NICE && cap_raised(cred->cap_effective, CAP_SYS_NICE))
52702 ++ goto out_rcu_unlock;
52703 ++ rcu_read_unlock();
52704 ++
52705 ++ gr_log_res_ulong2_str(GR_DONT_AUDIT, GR_RESOURCE_MSG, task, wanted, restab_log[res], rlim);
52706 ++
52707 ++ return;
52708 ++out_rcu_unlock:
52709 ++ rcu_read_unlock();
52710 ++ return;
52711 ++}
52712 +diff -urNp linux-3.1.1/grsecurity/gracl_segv.c linux-3.1.1/grsecurity/gracl_segv.c
52713 +--- linux-3.1.1/grsecurity/gracl_segv.c 1969-12-31 19:00:00.000000000 -0500
52714 ++++ linux-3.1.1/grsecurity/gracl_segv.c 2011-11-16 18:40:31.000000000 -0500
52715 +@@ -0,0 +1,299 @@
52716 ++#include <linux/kernel.h>
52717 ++#include <linux/mm.h>
52718 ++#include <asm/uaccess.h>
52719 ++#include <asm/errno.h>
52720 ++#include <asm/mman.h>
52721 ++#include <net/sock.h>
52722 ++#include <linux/file.h>
52723 ++#include <linux/fs.h>
52724 ++#include <linux/net.h>
52725 ++#include <linux/in.h>
52726 ++#include <linux/slab.h>
52727 ++#include <linux/types.h>
52728 ++#include <linux/sched.h>
52729 ++#include <linux/timer.h>
52730 ++#include <linux/gracl.h>
52731 ++#include <linux/grsecurity.h>
52732 ++#include <linux/grinternal.h>
52733 ++
52734 ++static struct crash_uid *uid_set;
52735 ++static unsigned short uid_used;
52736 ++static DEFINE_SPINLOCK(gr_uid_lock);
52737 ++extern rwlock_t gr_inode_lock;
52738 ++extern struct acl_subject_label *
52739 ++ lookup_acl_subj_label(const ino_t inode, const dev_t dev,
52740 ++ struct acl_role_label *role);
52741 ++
52742 ++#ifdef CONFIG_BTRFS_FS
52743 ++extern dev_t get_btrfs_dev_from_inode(struct inode *inode);
52744 ++extern int btrfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat);
52745 ++#endif
52746 ++
52747 ++static inline dev_t __get_dev(const struct dentry *dentry)
52748 ++{
52749 ++#ifdef CONFIG_BTRFS_FS
52750 ++ if (dentry->d_inode->i_op && dentry->d_inode->i_op->getattr == &btrfs_getattr)
52751 ++ return get_btrfs_dev_from_inode(dentry->d_inode);
52752 ++ else
52753 ++#endif
52754 ++ return dentry->d_inode->i_sb->s_dev;
52755 ++}
52756 ++
52757 ++int
52758 ++gr_init_uidset(void)
52759 ++{
52760 ++ uid_set =
52761 ++ kmalloc(GR_UIDTABLE_MAX * sizeof (struct crash_uid), GFP_KERNEL);
52762 ++ uid_used = 0;
52763 ++
52764 ++ return uid_set ? 1 : 0;
52765 ++}
52766 ++
52767 ++void
52768 ++gr_free_uidset(void)
52769 ++{
52770 ++ if (uid_set)
52771 ++ kfree(uid_set);
52772 ++
52773 ++ return;
52774 ++}
52775 ++
52776 ++int
52777 ++gr_find_uid(const uid_t uid)
52778 ++{
52779 ++ struct crash_uid *tmp = uid_set;
52780 ++ uid_t buid;
52781 ++ int low = 0, high = uid_used - 1, mid;
52782 ++
52783 ++ while (high >= low) {
52784 ++ mid = (low + high) >> 1;
52785 ++ buid = tmp[mid].uid;
52786 ++ if (buid == uid)
52787 ++ return mid;
52788 ++ if (buid > uid)
52789 ++ high = mid - 1;
52790 ++ if (buid < uid)
52791 ++ low = mid + 1;
52792 ++ }
52793 ++
52794 ++ return -1;
52795 ++}
52796 ++
52797 ++static __inline__ void
52798 ++gr_insertsort(void)
52799 ++{
52800 ++ unsigned short i, j;
52801 ++ struct crash_uid index;
52802 ++
52803 ++ for (i = 1; i < uid_used; i++) {
52804 ++ index = uid_set[i];
52805 ++ j = i;
52806 ++ while ((j > 0) && uid_set[j - 1].uid > index.uid) {
52807 ++ uid_set[j] = uid_set[j - 1];
52808 ++ j--;
52809 ++ }
52810 ++ uid_set[j] = index;
52811 ++ }
52812 ++
52813 ++ return;
52814 ++}
52815 ++
52816 ++static __inline__ void
52817 ++gr_insert_uid(const uid_t uid, const unsigned long expires)
52818 ++{
52819 ++ int loc;
52820 ++
52821 ++ if (uid_used == GR_UIDTABLE_MAX)
52822 ++ return;
52823 ++
52824 ++ loc = gr_find_uid(uid);
52825 ++
52826 ++ if (loc >= 0) {
52827 ++ uid_set[loc].expires = expires;
52828 ++ return;
52829 ++ }
52830 ++
52831 ++ uid_set[uid_used].uid = uid;
52832 ++ uid_set[uid_used].expires = expires;
52833 ++ uid_used++;
52834 ++
52835 ++ gr_insertsort();
52836 ++
52837 ++ return;
52838 ++}
52839 ++
52840 ++void
52841 ++gr_remove_uid(const unsigned short loc)
52842 ++{
52843 ++ unsigned short i;
52844 ++
52845 ++ for (i = loc + 1; i < uid_used; i++)
52846 ++ uid_set[i - 1] = uid_set[i];
52847 ++
52848 ++ uid_used--;
52849 ++
52850 ++ return;
52851 ++}
52852 ++
52853 ++int
52854 ++gr_check_crash_uid(const uid_t uid)
52855 ++{
52856 ++ int loc;
52857 ++ int ret = 0;
52858 ++
52859 ++ if (unlikely(!gr_acl_is_enabled()))
52860 ++ return 0;
52861 ++
52862 ++ spin_lock(&gr_uid_lock);
52863 ++ loc = gr_find_uid(uid);
52864 ++
52865 ++ if (loc < 0)
52866 ++ goto out_unlock;
52867 ++
52868 ++ if (time_before_eq(uid_set[loc].expires, get_seconds()))
52869 ++ gr_remove_uid(loc);
52870 ++ else
52871 ++ ret = 1;
52872 ++
52873 ++out_unlock:
52874 ++ spin_unlock(&gr_uid_lock);
52875 ++ return ret;
52876 ++}
52877 ++
52878 ++static __inline__ int
52879 ++proc_is_setxid(const struct cred *cred)
52880 ++{
52881 ++ if (cred->uid != cred->euid || cred->uid != cred->suid ||
52882 ++ cred->uid != cred->fsuid)
52883 ++ return 1;
52884 ++ if (cred->gid != cred->egid || cred->gid != cred->sgid ||
52885 ++ cred->gid != cred->fsgid)
52886 ++ return 1;
52887 ++
52888 ++ return 0;
52889 ++}
52890 ++
52891 ++extern int gr_fake_force_sig(int sig, struct task_struct *t);
52892 ++
52893 ++void
52894 ++gr_handle_crash(struct task_struct *task, const int sig)
52895 ++{
52896 ++ struct acl_subject_label *curr;
52897 ++ struct task_struct *tsk, *tsk2;
52898 ++ const struct cred *cred;
52899 ++ const struct cred *cred2;
52900 ++
52901 ++ if (sig != SIGSEGV && sig != SIGKILL && sig != SIGBUS && sig != SIGILL)
52902 ++ return;
52903 ++
52904 ++ if (unlikely(!gr_acl_is_enabled()))
52905 ++ return;
52906 ++
52907 ++ curr = task->acl;
52908 ++
52909 ++ if (!(curr->resmask & (1 << GR_CRASH_RES)))
52910 ++ return;
52911 ++
52912 ++ if (time_before_eq(curr->expires, get_seconds())) {
52913 ++ curr->expires = 0;
52914 ++ curr->crashes = 0;
52915 ++ }
52916 ++
52917 ++ curr->crashes++;
52918 ++
52919 ++ if (!curr->expires)
52920 ++ curr->expires = get_seconds() + curr->res[GR_CRASH_RES].rlim_max;
52921 ++
52922 ++ if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
52923 ++ time_after(curr->expires, get_seconds())) {
52924 ++ rcu_read_lock();
52925 ++ cred = __task_cred(task);
52926 ++ if (cred->uid && proc_is_setxid(cred)) {
52927 ++ gr_log_crash1(GR_DONT_AUDIT, GR_SEGVSTART_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
52928 ++ spin_lock(&gr_uid_lock);
52929 ++ gr_insert_uid(cred->uid, curr->expires);
52930 ++ spin_unlock(&gr_uid_lock);
52931 ++ curr->expires = 0;
52932 ++ curr->crashes = 0;
52933 ++ read_lock(&tasklist_lock);
52934 ++ do_each_thread(tsk2, tsk) {
52935 ++ cred2 = __task_cred(tsk);
52936 ++ if (tsk != task && cred2->uid == cred->uid)
52937 ++ gr_fake_force_sig(SIGKILL, tsk);
52938 ++ } while_each_thread(tsk2, tsk);
52939 ++ read_unlock(&tasklist_lock);
52940 ++ } else {
52941 ++ gr_log_crash2(GR_DONT_AUDIT, GR_SEGVNOSUID_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
52942 ++ read_lock(&tasklist_lock);
52943 ++ read_lock(&grsec_exec_file_lock);
52944 ++ do_each_thread(tsk2, tsk) {
52945 ++ if (likely(tsk != task)) {
52946 ++ // if this thread has the same subject as the one that triggered
52947 ++ // RES_CRASH and it's the same binary, kill it
52948 ++ if (tsk->acl == task->acl && tsk->exec_file == task->exec_file)
52949 ++ gr_fake_force_sig(SIGKILL, tsk);
52950 ++ }
52951 ++ } while_each_thread(tsk2, tsk);
52952 ++ read_unlock(&grsec_exec_file_lock);
52953 ++ read_unlock(&tasklist_lock);
52954 ++ }
52955 ++ rcu_read_unlock();
52956 ++ }
52957 ++
52958 ++ return;
52959 ++}
52960 ++
52961 ++int
52962 ++gr_check_crash_exec(const struct file *filp)
52963 ++{
52964 ++ struct acl_subject_label *curr;
52965 ++
52966 ++ if (unlikely(!gr_acl_is_enabled()))
52967 ++ return 0;
52968 ++
52969 ++ read_lock(&gr_inode_lock);
52970 ++ curr = lookup_acl_subj_label(filp->f_path.dentry->d_inode->i_ino,
52971 ++ __get_dev(filp->f_path.dentry),
52972 ++ current->role);
52973 ++ read_unlock(&gr_inode_lock);
52974 ++
52975 ++ if (!curr || !(curr->resmask & (1 << GR_CRASH_RES)) ||
52976 ++ (!curr->crashes && !curr->expires))
52977 ++ return 0;
52978 ++
52979 ++ if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
52980 ++ time_after(curr->expires, get_seconds()))
52981 ++ return 1;
52982 ++ else if (time_before_eq(curr->expires, get_seconds())) {
52983 ++ curr->crashes = 0;
52984 ++ curr->expires = 0;
52985 ++ }
52986 ++
52987 ++ return 0;
52988 ++}
52989 ++
52990 ++void
52991 ++gr_handle_alertkill(struct task_struct *task)
52992 ++{
52993 ++ struct acl_subject_label *curracl;
52994 ++ __u32 curr_ip;
52995 ++ struct task_struct *p, *p2;
52996 ++
52997 ++ if (unlikely(!gr_acl_is_enabled()))
52998 ++ return;
52999 ++
53000 ++ curracl = task->acl;
53001 ++ curr_ip = task->signal->curr_ip;
53002 ++
53003 ++ if ((curracl->mode & GR_KILLIPPROC) && curr_ip) {
53004 ++ read_lock(&tasklist_lock);
53005 ++ do_each_thread(p2, p) {
53006 ++ if (p->signal->curr_ip == curr_ip)
53007 ++ gr_fake_force_sig(SIGKILL, p);
53008 ++ } while_each_thread(p2, p);
53009 ++ read_unlock(&tasklist_lock);
53010 ++ } else if (curracl->mode & GR_KILLPROC)
53011 ++ gr_fake_force_sig(SIGKILL, task);
53012 ++
53013 ++ return;
53014 ++}
53015 +diff -urNp linux-3.1.1/grsecurity/gracl_shm.c linux-3.1.1/grsecurity/gracl_shm.c
53016 +--- linux-3.1.1/grsecurity/gracl_shm.c 1969-12-31 19:00:00.000000000 -0500
53017 ++++ linux-3.1.1/grsecurity/gracl_shm.c 2011-11-16 18:40:31.000000000 -0500
53018 +@@ -0,0 +1,40 @@
53019 ++#include <linux/kernel.h>
53020 ++#include <linux/mm.h>
53021 ++#include <linux/sched.h>
53022 ++#include <linux/file.h>
53023 ++#include <linux/ipc.h>
53024 ++#include <linux/gracl.h>
53025 ++#include <linux/grsecurity.h>
53026 ++#include <linux/grinternal.h>
53027 ++
53028 ++int
53029 ++gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
53030 ++ const time_t shm_createtime, const uid_t cuid, const int shmid)
53031 ++{
53032 ++ struct task_struct *task;
53033 ++
53034 ++ if (!gr_acl_is_enabled())
53035 ++ return 1;
53036 ++
53037 ++ rcu_read_lock();
53038 ++ read_lock(&tasklist_lock);
53039 ++
53040 ++ task = find_task_by_vpid(shm_cprid);
53041 ++
53042 ++ if (unlikely(!task))
53043 ++ task = find_task_by_vpid(shm_lapid);
53044 ++
53045 ++ if (unlikely(task && (time_before_eq((unsigned long)task->start_time.tv_sec, (unsigned long)shm_createtime) ||
53046 ++ (task->pid == shm_lapid)) &&
53047 ++ (task->acl->mode & GR_PROTSHM) &&
53048 ++ (task->acl != current->acl))) {
53049 ++ read_unlock(&tasklist_lock);
53050 ++ rcu_read_unlock();
53051 ++ gr_log_int3(GR_DONT_AUDIT, GR_SHMAT_ACL_MSG, cuid, shm_cprid, shmid);
53052 ++ return 0;
53053 ++ }
53054 ++ read_unlock(&tasklist_lock);
53055 ++ rcu_read_unlock();
53056 ++
53057 ++ return 1;
53058 ++}
53059 +diff -urNp linux-3.1.1/grsecurity/grsec_chdir.c linux-3.1.1/grsecurity/grsec_chdir.c
53060 +--- linux-3.1.1/grsecurity/grsec_chdir.c 1969-12-31 19:00:00.000000000 -0500
53061 ++++ linux-3.1.1/grsecurity/grsec_chdir.c 2011-11-16 18:40:31.000000000 -0500
53062 +@@ -0,0 +1,19 @@
53063 ++#include <linux/kernel.h>
53064 ++#include <linux/sched.h>
53065 ++#include <linux/fs.h>
53066 ++#include <linux/file.h>
53067 ++#include <linux/grsecurity.h>
53068 ++#include <linux/grinternal.h>
53069 ++
53070 ++void
53071 ++gr_log_chdir(const struct dentry *dentry, const struct vfsmount *mnt)
53072 ++{
53073 ++#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
53074 ++ if ((grsec_enable_chdir && grsec_enable_group &&
53075 ++ in_group_p(grsec_audit_gid)) || (grsec_enable_chdir &&
53076 ++ !grsec_enable_group)) {
53077 ++ gr_log_fs_generic(GR_DO_AUDIT, GR_CHDIR_AUDIT_MSG, dentry, mnt);
53078 ++ }
53079 ++#endif
53080 ++ return;
53081 ++}
53082 +diff -urNp linux-3.1.1/grsecurity/grsec_chroot.c linux-3.1.1/grsecurity/grsec_chroot.c
53083 +--- linux-3.1.1/grsecurity/grsec_chroot.c 1969-12-31 19:00:00.000000000 -0500
53084 ++++ linux-3.1.1/grsecurity/grsec_chroot.c 2011-11-16 18:40:31.000000000 -0500
53085 +@@ -0,0 +1,351 @@
53086 ++#include <linux/kernel.h>
53087 ++#include <linux/module.h>
53088 ++#include <linux/sched.h>
53089 ++#include <linux/file.h>
53090 ++#include <linux/fs.h>
53091 ++#include <linux/mount.h>
53092 ++#include <linux/types.h>
53093 ++#include <linux/pid_namespace.h>
53094 ++#include <linux/grsecurity.h>
53095 ++#include <linux/grinternal.h>
53096 ++
53097 ++void gr_set_chroot_entries(struct task_struct *task, struct path *path)
53098 ++{
53099 ++#ifdef CONFIG_GRKERNSEC
53100 ++ if (task->pid > 1 && path->dentry != init_task.fs->root.dentry &&
53101 ++ path->dentry != task->nsproxy->mnt_ns->root->mnt_root)
53102 ++ task->gr_is_chrooted = 1;
53103 ++ else
53104 ++ task->gr_is_chrooted = 0;
53105 ++
53106 ++ task->gr_chroot_dentry = path->dentry;
53107 ++#endif
53108 ++ return;
53109 ++}
53110 ++
53111 ++void gr_clear_chroot_entries(struct task_struct *task)
53112 ++{
53113 ++#ifdef CONFIG_GRKERNSEC
53114 ++ task->gr_is_chrooted = 0;
53115 ++ task->gr_chroot_dentry = NULL;
53116 ++#endif
53117 ++ return;
53118 ++}
53119 ++
53120 ++int
53121 ++gr_handle_chroot_unix(const pid_t pid)
53122 ++{
53123 ++#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
53124 ++ struct task_struct *p;
53125 ++
53126 ++ if (unlikely(!grsec_enable_chroot_unix))
53127 ++ return 1;
53128 ++
53129 ++ if (likely(!proc_is_chrooted(current)))
53130 ++ return 1;
53131 ++
53132 ++ rcu_read_lock();
53133 ++ read_lock(&tasklist_lock);
53134 ++ p = find_task_by_vpid_unrestricted(pid);
53135 ++ if (unlikely(p && !have_same_root(current, p))) {
53136 ++ read_unlock(&tasklist_lock);
53137 ++ rcu_read_unlock();
53138 ++ gr_log_noargs(GR_DONT_AUDIT, GR_UNIX_CHROOT_MSG);
53139 ++ return 0;
53140 ++ }
53141 ++ read_unlock(&tasklist_lock);
53142 ++ rcu_read_unlock();
53143 ++#endif
53144 ++ return 1;
53145 ++}
53146 ++
53147 ++int
53148 ++gr_handle_chroot_nice(void)
53149 ++{
53150 ++#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
53151 ++ if (grsec_enable_chroot_nice && proc_is_chrooted(current)) {
53152 ++ gr_log_noargs(GR_DONT_AUDIT, GR_NICE_CHROOT_MSG);
53153 ++ return -EPERM;
53154 ++ }
53155 ++#endif
53156 ++ return 0;
53157 ++}
53158 ++
53159 ++int
53160 ++gr_handle_chroot_setpriority(struct task_struct *p, const int niceval)
53161 ++{
53162 ++#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
53163 ++ if (grsec_enable_chroot_nice && (niceval < task_nice(p))
53164 ++ && proc_is_chrooted(current)) {
53165 ++ gr_log_str_int(GR_DONT_AUDIT, GR_PRIORITY_CHROOT_MSG, p->comm, p->pid);
53166 ++ return -EACCES;
53167 ++ }
53168 ++#endif
53169 ++ return 0;
53170 ++}
53171 ++
53172 ++int
53173 ++gr_handle_chroot_rawio(const struct inode *inode)
53174 ++{
53175 ++#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
53176 ++ if (grsec_enable_chroot_caps && proc_is_chrooted(current) &&
53177 ++ inode && S_ISBLK(inode->i_mode) && !capable(CAP_SYS_RAWIO))
53178 ++ return 1;
53179 ++#endif
53180 ++ return 0;
53181 ++}
53182 ++
53183 ++int
53184 ++gr_handle_chroot_fowner(struct pid *pid, enum pid_type type)
53185 ++{
53186 ++#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
53187 ++ struct task_struct *p;
53188 ++ int ret = 0;
53189 ++ if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || !pid)
53190 ++ return ret;
53191 ++
53192 ++ read_lock(&tasklist_lock);
53193 ++ do_each_pid_task(pid, type, p) {
53194 ++ if (!have_same_root(current, p)) {
53195 ++ ret = 1;
53196 ++ goto out;
53197 ++ }
53198 ++ } while_each_pid_task(pid, type, p);
53199 ++out:
53200 ++ read_unlock(&tasklist_lock);
53201 ++ return ret;
53202 ++#endif
53203 ++ return 0;
53204 ++}
53205 ++
53206 ++int
53207 ++gr_pid_is_chrooted(struct task_struct *p)
53208 ++{
53209 ++#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
53210 ++ if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || p == NULL)
53211 ++ return 0;
53212 ++
53213 ++ if ((p->exit_state & (EXIT_ZOMBIE | EXIT_DEAD)) ||
53214 ++ !have_same_root(current, p)) {
53215 ++ return 1;
53216 ++ }
53217 ++#endif
53218 ++ return 0;
53219 ++}
53220 ++
53221 ++EXPORT_SYMBOL(gr_pid_is_chrooted);
53222 ++
53223 ++#if defined(CONFIG_GRKERNSEC_CHROOT_DOUBLE) || defined(CONFIG_GRKERNSEC_CHROOT_FCHDIR)
53224 ++int gr_is_outside_chroot(const struct dentry *u_dentry, const struct vfsmount *u_mnt)
53225 ++{
53226 ++ struct path path, currentroot;
53227 ++ int ret = 0;
53228 ++
53229 ++ path.dentry = (struct dentry *)u_dentry;
53230 ++ path.mnt = (struct vfsmount *)u_mnt;
53231 ++ get_fs_root(current->fs, &currentroot);
53232 ++ if (path_is_under(&path, &currentroot))
53233 ++ ret = 1;
53234 ++ path_put(&currentroot);
53235 ++
53236 ++ return ret;
53237 ++}
53238 ++#endif
53239 ++
53240 ++int
53241 ++gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt)
53242 ++{
53243 ++#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
53244 ++ if (!grsec_enable_chroot_fchdir)
53245 ++ return 1;
53246 ++
53247 ++ if (!proc_is_chrooted(current))
53248 ++ return 1;
53249 ++ else if (!gr_is_outside_chroot(u_dentry, u_mnt)) {
53250 ++ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_FCHDIR_MSG, u_dentry, u_mnt);
53251 ++ return 0;
53252 ++ }
53253 ++#endif
53254 ++ return 1;
53255 ++}
53256 ++
53257 ++int
53258 ++gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
53259 ++ const time_t shm_createtime)
53260 ++{
53261 ++#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
53262 ++ struct task_struct *p;
53263 ++ time_t starttime;
53264 ++
53265 ++ if (unlikely(!grsec_enable_chroot_shmat))
53266 ++ return 1;
53267 ++
53268 ++ if (likely(!proc_is_chrooted(current)))
53269 ++ return 1;
53270 ++
53271 ++ rcu_read_lock();
53272 ++ read_lock(&tasklist_lock);
53273 ++
53274 ++ if ((p = find_task_by_vpid_unrestricted(shm_cprid))) {
53275 ++ starttime = p->start_time.tv_sec;
53276 ++ if (time_before_eq((unsigned long)starttime, (unsigned long)shm_createtime)) {
53277 ++ if (have_same_root(current, p)) {
53278 ++ goto allow;
53279 ++ } else {
53280 ++ read_unlock(&tasklist_lock);
53281 ++ rcu_read_unlock();
53282 ++ gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
53283 ++ return 0;
53284 ++ }
53285 ++ }
53286 ++ /* creator exited, pid reuse, fall through to next check */
53287 ++ }
53288 ++ if ((p = find_task_by_vpid_unrestricted(shm_lapid))) {
53289 ++ if (unlikely(!have_same_root(current, p))) {
53290 ++ read_unlock(&tasklist_lock);
53291 ++ rcu_read_unlock();
53292 ++ gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
53293 ++ return 0;
53294 ++ }
53295 ++ }
53296 ++
53297 ++allow:
53298 ++ read_unlock(&tasklist_lock);
53299 ++ rcu_read_unlock();
53300 ++#endif
53301 ++ return 1;
53302 ++}
53303 ++
53304 ++void
53305 ++gr_log_chroot_exec(const struct dentry *dentry, const struct vfsmount *mnt)
53306 ++{
53307 ++#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
53308 ++ if (grsec_enable_chroot_execlog && proc_is_chrooted(current))
53309 ++ gr_log_fs_generic(GR_DO_AUDIT, GR_EXEC_CHROOT_MSG, dentry, mnt);
53310 ++#endif
53311 ++ return;
53312 ++}
53313 ++
53314 ++int
53315 ++gr_handle_chroot_mknod(const struct dentry *dentry,
53316 ++ const struct vfsmount *mnt, const int mode)
53317 ++{
53318 ++#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
53319 ++ if (grsec_enable_chroot_mknod && !S_ISFIFO(mode) && !S_ISREG(mode) &&
53320 ++ proc_is_chrooted(current)) {
53321 ++ gr_log_fs_generic(GR_DONT_AUDIT, GR_MKNOD_CHROOT_MSG, dentry, mnt);
53322 ++ return -EPERM;
53323 ++ }
53324 ++#endif
53325 ++ return 0;
53326 ++}
53327 ++
53328 ++int
53329 ++gr_handle_chroot_mount(const struct dentry *dentry,
53330 ++ const struct vfsmount *mnt, const char *dev_name)
53331 ++{
53332 ++#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
53333 ++ if (grsec_enable_chroot_mount && proc_is_chrooted(current)) {
53334 ++ gr_log_str_fs(GR_DONT_AUDIT, GR_MOUNT_CHROOT_MSG, dev_name ? dev_name : "none", dentry, mnt);
53335 ++ return -EPERM;
53336 ++ }
53337 ++#endif
53338 ++ return 0;
53339 ++}
53340 ++
53341 ++int
53342 ++gr_handle_chroot_pivot(void)
53343 ++{
53344 ++#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
53345 ++ if (grsec_enable_chroot_pivot && proc_is_chrooted(current)) {
53346 ++ gr_log_noargs(GR_DONT_AUDIT, GR_PIVOT_CHROOT_MSG);
53347 ++ return -EPERM;
53348 ++ }
53349 ++#endif
53350 ++ return 0;
53351 ++}
53352 ++
53353 ++int
53354 ++gr_handle_chroot_chroot(const struct dentry *dentry, const struct vfsmount *mnt)
53355 ++{
53356 ++#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
53357 ++ if (grsec_enable_chroot_double && proc_is_chrooted(current) &&
53358 ++ !gr_is_outside_chroot(dentry, mnt)) {
53359 ++ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_CHROOT_MSG, dentry, mnt);
53360 ++ return -EPERM;
53361 ++ }
53362 ++#endif
53363 ++ return 0;
53364 ++}
53365 ++
53366 ++extern const char *captab_log[];
53367 ++extern int captab_log_entries;
53368 ++
53369 ++int
53370 ++gr_chroot_is_capable(const int cap)
53371 ++{
53372 ++#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
53373 ++ if (grsec_enable_chroot_caps && proc_is_chrooted(current)) {
53374 ++ kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
53375 ++ if (cap_raised(chroot_caps, cap)) {
53376 ++ const struct cred *creds = current_cred();
53377 ++ if (cap_raised(creds->cap_effective, cap) && cap < captab_log_entries) {
53378 ++ gr_log_cap(GR_DONT_AUDIT, GR_CAP_CHROOT_MSG, current, captab_log[cap]);
53379 ++ }
53380 ++ return 0;
53381 ++ }
53382 ++ }
53383 ++#endif
53384 ++ return 1;
53385 ++}
53386 ++
53387 ++int
53388 ++gr_chroot_is_capable_nolog(const int cap)
53389 ++{
53390 ++#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
53391 ++ if (grsec_enable_chroot_caps && proc_is_chrooted(current)) {
53392 ++ kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
53393 ++ if (cap_raised(chroot_caps, cap)) {
53394 ++ return 0;
53395 ++ }
53396 ++ }
53397 ++#endif
53398 ++ return 1;
53399 ++}
53400 ++
53401 ++int
53402 ++gr_handle_chroot_sysctl(const int op)
53403 ++{
53404 ++#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
53405 ++ if (grsec_enable_chroot_sysctl && (op & MAY_WRITE) &&
53406 ++ proc_is_chrooted(current))
53407 ++ return -EACCES;
53408 ++#endif
53409 ++ return 0;
53410 ++}
53411 ++
53412 ++void
53413 ++gr_handle_chroot_chdir(struct path *path)
53414 ++{
53415 ++#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
53416 ++ if (grsec_enable_chroot_chdir)
53417 ++ set_fs_pwd(current->fs, path);
53418 ++#endif
53419 ++ return;
53420 ++}
53421 ++
53422 ++int
53423 ++gr_handle_chroot_chmod(const struct dentry *dentry,
53424 ++ const struct vfsmount *mnt, const int mode)
53425 ++{
53426 ++#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
53427 ++ /* allow chmod +s on directories, but not files */
53428 ++ if (grsec_enable_chroot_chmod && !S_ISDIR(dentry->d_inode->i_mode) &&
53429 ++ ((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))) &&
53430 ++ proc_is_chrooted(current)) {
53431 ++ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHMOD_CHROOT_MSG, dentry, mnt);
53432 ++ return -EPERM;
53433 ++ }
53434 ++#endif
53435 ++ return 0;
53436 ++}
53437 +diff -urNp linux-3.1.1/grsecurity/grsec_disabled.c linux-3.1.1/grsecurity/grsec_disabled.c
53438 +--- linux-3.1.1/grsecurity/grsec_disabled.c 1969-12-31 19:00:00.000000000 -0500
53439 ++++ linux-3.1.1/grsecurity/grsec_disabled.c 2011-11-17 00:16:25.000000000 -0500
53440 +@@ -0,0 +1,439 @@
53441 ++#include <linux/kernel.h>
53442 ++#include <linux/module.h>
53443 ++#include <linux/sched.h>
53444 ++#include <linux/file.h>
53445 ++#include <linux/fs.h>
53446 ++#include <linux/kdev_t.h>
53447 ++#include <linux/net.h>
53448 ++#include <linux/in.h>
53449 ++#include <linux/ip.h>
53450 ++#include <linux/skbuff.h>
53451 ++#include <linux/sysctl.h>
53452 ++
53453 ++#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
53454 ++void
53455 ++pax_set_initial_flags(struct linux_binprm *bprm)
53456 ++{
53457 ++ return;
53458 ++}
53459 ++#endif
53460 ++
53461 ++#ifdef CONFIG_SYSCTL
53462 ++__u32
53463 ++gr_handle_sysctl(const struct ctl_table * table, const int op)
53464 ++{
53465 ++ return 0;
53466 ++}
53467 ++#endif
53468 ++
53469 ++#ifdef CONFIG_TASKSTATS
53470 ++int gr_is_taskstats_denied(int pid)
53471 ++{
53472 ++ return 0;
53473 ++}
53474 ++#endif
53475 ++
53476 ++int
53477 ++gr_acl_is_enabled(void)
53478 ++{
53479 ++ return 0;
53480 ++}
53481 ++
53482 ++void
53483 ++gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode)
53484 ++{
53485 ++ return;
53486 ++}
53487 ++
53488 ++int
53489 ++gr_handle_rawio(const struct inode *inode)
53490 ++{
53491 ++ return 0;
53492 ++}
53493 ++
53494 ++void
53495 ++gr_acl_handle_psacct(struct task_struct *task, const long code)
53496 ++{
53497 ++ return;
53498 ++}
53499 ++
53500 ++int
53501 ++gr_handle_ptrace(struct task_struct *task, const long request)
53502 ++{
53503 ++ return 0;
53504 ++}
53505 ++
53506 ++int
53507 ++gr_handle_proc_ptrace(struct task_struct *task)
53508 ++{
53509 ++ return 0;
53510 ++}
53511 ++
53512 ++void
53513 ++gr_learn_resource(const struct task_struct *task,
53514 ++ const int res, const unsigned long wanted, const int gt)
53515 ++{
53516 ++ return;
53517 ++}
53518 ++
53519 ++int
53520 ++gr_set_acls(const int type)
53521 ++{
53522 ++ return 0;
53523 ++}
53524 ++
53525 ++int
53526 ++gr_check_hidden_task(const struct task_struct *tsk)
53527 ++{
53528 ++ return 0;
53529 ++}
53530 ++
53531 ++int
53532 ++gr_check_protected_task(const struct task_struct *task)
53533 ++{
53534 ++ return 0;
53535 ++}
53536 ++
53537 ++int
53538 ++gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
53539 ++{
53540 ++ return 0;
53541 ++}
53542 ++
53543 ++void
53544 ++gr_copy_label(struct task_struct *tsk)
53545 ++{
53546 ++ return;
53547 ++}
53548 ++
53549 ++void
53550 ++gr_set_pax_flags(struct task_struct *task)
53551 ++{
53552 ++ return;
53553 ++}
53554 ++
53555 ++int
53556 ++gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
53557 ++ const int unsafe_share)
53558 ++{
53559 ++ return 0;
53560 ++}
53561 ++
53562 ++void
53563 ++gr_handle_delete(const ino_t ino, const dev_t dev)
53564 ++{
53565 ++ return;
53566 ++}
53567 ++
53568 ++void
53569 ++gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
53570 ++{
53571 ++ return;
53572 ++}
53573 ++
53574 ++void
53575 ++gr_handle_crash(struct task_struct *task, const int sig)
53576 ++{
53577 ++ return;
53578 ++}
53579 ++
53580 ++int
53581 ++gr_check_crash_exec(const struct file *filp)
53582 ++{
53583 ++ return 0;
53584 ++}
53585 ++
53586 ++int
53587 ++gr_check_crash_uid(const uid_t uid)
53588 ++{
53589 ++ return 0;
53590 ++}
53591 ++
53592 ++void
53593 ++gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
53594 ++ struct dentry *old_dentry,
53595 ++ struct dentry *new_dentry,
53596 ++ struct vfsmount *mnt, const __u8 replace)
53597 ++{
53598 ++ return;
53599 ++}
53600 ++
53601 ++int
53602 ++gr_search_socket(const int family, const int type, const int protocol)
53603 ++{
53604 ++ return 1;
53605 ++}
53606 ++
53607 ++int
53608 ++gr_search_connectbind(const int mode, const struct socket *sock,
53609 ++ const struct sockaddr_in *addr)
53610 ++{
53611 ++ return 0;
53612 ++}
53613 ++
53614 ++void
53615 ++gr_handle_alertkill(struct task_struct *task)
53616 ++{
53617 ++ return;
53618 ++}
53619 ++
53620 ++__u32
53621 ++gr_acl_handle_execve(const struct dentry * dentry, const struct vfsmount * mnt)
53622 ++{
53623 ++ return 1;
53624 ++}
53625 ++
53626 ++__u32
53627 ++gr_acl_handle_hidden_file(const struct dentry * dentry,
53628 ++ const struct vfsmount * mnt)
53629 ++{
53630 ++ return 1;
53631 ++}
53632 ++
53633 ++__u32
53634 ++gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
53635 ++ int acc_mode)
53636 ++{
53637 ++ return 1;
53638 ++}
53639 ++
53640 ++__u32
53641 ++gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
53642 ++{
53643 ++ return 1;
53644 ++}
53645 ++
53646 ++__u32
53647 ++gr_acl_handle_unlink(const struct dentry * dentry, const struct vfsmount * mnt)
53648 ++{
53649 ++ return 1;
53650 ++}
53651 ++
53652 ++int
53653 ++gr_acl_handle_mmap(const struct file *file, const unsigned long prot,
53654 ++ unsigned int *vm_flags)
53655 ++{
53656 ++ return 1;
53657 ++}
53658 ++
53659 ++__u32
53660 ++gr_acl_handle_truncate(const struct dentry * dentry,
53661 ++ const struct vfsmount * mnt)
53662 ++{
53663 ++ return 1;
53664 ++}
53665 ++
53666 ++__u32
53667 ++gr_acl_handle_utime(const struct dentry * dentry, const struct vfsmount * mnt)
53668 ++{
53669 ++ return 1;
53670 ++}
53671 ++
53672 ++__u32
53673 ++gr_acl_handle_access(const struct dentry * dentry,
53674 ++ const struct vfsmount * mnt, const int fmode)
53675 ++{
53676 ++ return 1;
53677 ++}
53678 ++
53679 ++__u32
53680 ++gr_acl_handle_fchmod(const struct dentry * dentry, const struct vfsmount * mnt,
53681 ++ mode_t mode)
53682 ++{
53683 ++ return 1;
53684 ++}
53685 ++
53686 ++__u32
53687 ++gr_acl_handle_chmod(const struct dentry * dentry, const struct vfsmount * mnt,
53688 ++ mode_t mode)
53689 ++{
53690 ++ return 1;
53691 ++}
53692 ++
53693 ++__u32
53694 ++gr_acl_handle_chown(const struct dentry * dentry, const struct vfsmount * mnt)
53695 ++{
53696 ++ return 1;
53697 ++}
53698 ++
53699 ++__u32
53700 ++gr_acl_handle_setxattr(const struct dentry * dentry, const struct vfsmount * mnt)
53701 ++{
53702 ++ return 1;
53703 ++}
53704 ++
53705 ++void
53706 ++grsecurity_init(void)
53707 ++{
53708 ++ return;
53709 ++}
53710 ++
53711 ++__u32
53712 ++gr_acl_handle_mknod(const struct dentry * new_dentry,
53713 ++ const struct dentry * parent_dentry,
53714 ++ const struct vfsmount * parent_mnt,
53715 ++ const int mode)
53716 ++{
53717 ++ return 1;
53718 ++}
53719 ++
53720 ++__u32
53721 ++gr_acl_handle_mkdir(const struct dentry * new_dentry,
53722 ++ const struct dentry * parent_dentry,
53723 ++ const struct vfsmount * parent_mnt)
53724 ++{
53725 ++ return 1;
53726 ++}
53727 ++
53728 ++__u32
53729 ++gr_acl_handle_symlink(const struct dentry * new_dentry,
53730 ++ const struct dentry * parent_dentry,
53731 ++ const struct vfsmount * parent_mnt, const char *from)
53732 ++{
53733 ++ return 1;
53734 ++}
53735 ++
53736 ++__u32
53737 ++gr_acl_handle_link(const struct dentry * new_dentry,
53738 ++ const struct dentry * parent_dentry,
53739 ++ const struct vfsmount * parent_mnt,
53740 ++ const struct dentry * old_dentry,
53741 ++ const struct vfsmount * old_mnt, const char *to)
53742 ++{
53743 ++ return 1;
53744 ++}
53745 ++
53746 ++int
53747 ++gr_acl_handle_rename(const struct dentry *new_dentry,
53748 ++ const struct dentry *parent_dentry,
53749 ++ const struct vfsmount *parent_mnt,
53750 ++ const struct dentry *old_dentry,
53751 ++ const struct inode *old_parent_inode,
53752 ++ const struct vfsmount *old_mnt, const char *newname)
53753 ++{
53754 ++ return 0;
53755 ++}
53756 ++
53757 ++int
53758 ++gr_acl_handle_filldir(const struct file *file, const char *name,
53759 ++ const int namelen, const ino_t ino)
53760 ++{
53761 ++ return 1;
53762 ++}
53763 ++
53764 ++int
53765 ++gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
53766 ++ const time_t shm_createtime, const uid_t cuid, const int shmid)
53767 ++{
53768 ++ return 1;
53769 ++}
53770 ++
53771 ++int
53772 ++gr_search_bind(const struct socket *sock, const struct sockaddr_in *addr)
53773 ++{
53774 ++ return 0;
53775 ++}
53776 ++
53777 ++int
53778 ++gr_search_accept(const struct socket *sock)
53779 ++{
53780 ++ return 0;
53781 ++}
53782 ++
53783 ++int
53784 ++gr_search_listen(const struct socket *sock)
53785 ++{
53786 ++ return 0;
53787 ++}
53788 ++
53789 ++int
53790 ++gr_search_connect(const struct socket *sock, const struct sockaddr_in *addr)
53791 ++{
53792 ++ return 0;
53793 ++}
53794 ++
53795 ++__u32
53796 ++gr_acl_handle_unix(const struct dentry * dentry, const struct vfsmount * mnt)
53797 ++{
53798 ++ return 1;
53799 ++}
53800 ++
53801 ++__u32
53802 ++gr_acl_handle_creat(const struct dentry * dentry,
53803 ++ const struct dentry * p_dentry,
53804 ++ const struct vfsmount * p_mnt, int open_flags, int acc_mode,
53805 ++ const int imode)
53806 ++{
53807 ++ return 1;
53808 ++}
53809 ++
53810 ++void
53811 ++gr_acl_handle_exit(void)
53812 ++{
53813 ++ return;
53814 ++}
53815 ++
53816 ++int
53817 ++gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
53818 ++{
53819 ++ return 1;
53820 ++}
53821 ++
53822 ++void
53823 ++gr_set_role_label(const uid_t uid, const gid_t gid)
53824 ++{
53825 ++ return;
53826 ++}
53827 ++
53828 ++int
53829 ++gr_acl_handle_procpidmem(const struct task_struct *task)
53830 ++{
53831 ++ return 0;
53832 ++}
53833 ++
53834 ++int
53835 ++gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb)
53836 ++{
53837 ++ return 0;
53838 ++}
53839 ++
53840 ++int
53841 ++gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr)
53842 ++{
53843 ++ return 0;
53844 ++}
53845 ++
53846 ++void
53847 ++gr_set_kernel_label(struct task_struct *task)
53848 ++{
53849 ++ return;
53850 ++}
53851 ++
53852 ++int
53853 ++gr_check_user_change(int real, int effective, int fs)
53854 ++{
53855 ++ return 0;
53856 ++}
53857 ++
53858 ++int
53859 ++gr_check_group_change(int real, int effective, int fs)
53860 ++{
53861 ++ return 0;
53862 ++}
53863 ++
53864 ++int gr_acl_enable_at_secure(void)
53865 ++{
53866 ++ return 0;
53867 ++}
53868 ++
53869 ++dev_t gr_get_dev_from_dentry(struct dentry *dentry)
53870 ++{
53871 ++ return dentry->d_inode->i_sb->s_dev;
53872 ++}
53873 ++
53874 ++EXPORT_SYMBOL(gr_learn_resource);
53875 ++EXPORT_SYMBOL(gr_set_kernel_label);
53876 ++#ifdef CONFIG_SECURITY
53877 ++EXPORT_SYMBOL(gr_check_user_change);
53878 ++EXPORT_SYMBOL(gr_check_group_change);
53879 ++#endif
53880 +diff -urNp linux-3.1.1/grsecurity/grsec_exec.c linux-3.1.1/grsecurity/grsec_exec.c
53881 +--- linux-3.1.1/grsecurity/grsec_exec.c 1969-12-31 19:00:00.000000000 -0500
53882 ++++ linux-3.1.1/grsecurity/grsec_exec.c 2011-11-16 18:40:31.000000000 -0500
53883 +@@ -0,0 +1,146 @@
53884 ++#include <linux/kernel.h>
53885 ++#include <linux/sched.h>
53886 ++#include <linux/file.h>
53887 ++#include <linux/binfmts.h>
53888 ++#include <linux/fs.h>
53889 ++#include <linux/types.h>
53890 ++#include <linux/grdefs.h>
53891 ++#include <linux/grsecurity.h>
53892 ++#include <linux/grinternal.h>
53893 ++#include <linux/capability.h>
53894 ++#include <linux/module.h>
53895 ++
53896 ++#include <asm/uaccess.h>
53897 ++
53898 ++#ifdef CONFIG_GRKERNSEC_EXECLOG
53899 ++static char gr_exec_arg_buf[132];
53900 ++static DEFINE_MUTEX(gr_exec_arg_mutex);
53901 ++#endif
53902 ++
53903 ++extern const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr);
53904 ++
53905 ++void
53906 ++gr_handle_exec_args(struct linux_binprm *bprm, struct user_arg_ptr argv)
53907 ++{
53908 ++#ifdef CONFIG_GRKERNSEC_EXECLOG
53909 ++ char *grarg = gr_exec_arg_buf;
53910 ++ unsigned int i, x, execlen = 0;
53911 ++ char c;
53912 ++
53913 ++ if (!((grsec_enable_execlog && grsec_enable_group &&
53914 ++ in_group_p(grsec_audit_gid))
53915 ++ || (grsec_enable_execlog && !grsec_enable_group)))
53916 ++ return;
53917 ++
53918 ++ mutex_lock(&gr_exec_arg_mutex);
53919 ++ memset(grarg, 0, sizeof(gr_exec_arg_buf));
53920 ++
53921 ++ for (i = 0; i < bprm->argc && execlen < 128; i++) {
53922 ++ const char __user *p;
53923 ++ unsigned int len;
53924 ++
53925 ++ p = get_user_arg_ptr(argv, i);
53926 ++ if (IS_ERR(p))
53927 ++ goto log;
53928 ++
53929 ++ len = strnlen_user(p, 128 - execlen);
53930 ++ if (len > 128 - execlen)
53931 ++ len = 128 - execlen;
53932 ++ else if (len > 0)
53933 ++ len--;
53934 ++ if (copy_from_user(grarg + execlen, p, len))
53935 ++ goto log;
53936 ++
53937 ++ /* rewrite unprintable characters */
53938 ++ for (x = 0; x < len; x++) {
53939 ++ c = *(grarg + execlen + x);
53940 ++ if (c < 32 || c > 126)
53941 ++ *(grarg + execlen + x) = ' ';
53942 ++ }
53943 ++
53944 ++ execlen += len;
53945 ++ *(grarg + execlen) = ' ';
53946 ++ *(grarg + execlen + 1) = '\0';
53947 ++ execlen++;
53948 ++ }
53949 ++
53950 ++ log:
53951 ++ gr_log_fs_str(GR_DO_AUDIT, GR_EXEC_AUDIT_MSG, bprm->file->f_path.dentry,
53952 ++ bprm->file->f_path.mnt, grarg);
53953 ++ mutex_unlock(&gr_exec_arg_mutex);
53954 ++#endif
53955 ++ return;
53956 ++}
53957 ++
53958 ++#ifdef CONFIG_GRKERNSEC
53959 ++extern int gr_acl_is_capable(const int cap);
53960 ++extern int gr_acl_is_capable_nolog(const int cap);
53961 ++extern int gr_chroot_is_capable(const int cap);
53962 ++extern int gr_chroot_is_capable_nolog(const int cap);
53963 ++#endif
53964 ++
53965 ++const char *captab_log[] = {
53966 ++ "CAP_CHOWN",
53967 ++ "CAP_DAC_OVERRIDE",
53968 ++ "CAP_DAC_READ_SEARCH",
53969 ++ "CAP_FOWNER",
53970 ++ "CAP_FSETID",
53971 ++ "CAP_KILL",
53972 ++ "CAP_SETGID",
53973 ++ "CAP_SETUID",
53974 ++ "CAP_SETPCAP",
53975 ++ "CAP_LINUX_IMMUTABLE",
53976 ++ "CAP_NET_BIND_SERVICE",
53977 ++ "CAP_NET_BROADCAST",
53978 ++ "CAP_NET_ADMIN",
53979 ++ "CAP_NET_RAW",
53980 ++ "CAP_IPC_LOCK",
53981 ++ "CAP_IPC_OWNER",
53982 ++ "CAP_SYS_MODULE",
53983 ++ "CAP_SYS_RAWIO",
53984 ++ "CAP_SYS_CHROOT",
53985 ++ "CAP_SYS_PTRACE",
53986 ++ "CAP_SYS_PACCT",
53987 ++ "CAP_SYS_ADMIN",
53988 ++ "CAP_SYS_BOOT",
53989 ++ "CAP_SYS_NICE",
53990 ++ "CAP_SYS_RESOURCE",
53991 ++ "CAP_SYS_TIME",
53992 ++ "CAP_SYS_TTY_CONFIG",
53993 ++ "CAP_MKNOD",
53994 ++ "CAP_LEASE",
53995 ++ "CAP_AUDIT_WRITE",
53996 ++ "CAP_AUDIT_CONTROL",
53997 ++ "CAP_SETFCAP",
53998 ++ "CAP_MAC_OVERRIDE",
53999 ++ "CAP_MAC_ADMIN",
54000 ++ "CAP_SYSLOG",
54001 ++ "CAP_WAKE_ALARM"
54002 ++};
54003 ++
54004 ++int captab_log_entries = sizeof(captab_log)/sizeof(captab_log[0]);
54005 ++
54006 ++int gr_is_capable(const int cap)
54007 ++{
54008 ++#ifdef CONFIG_GRKERNSEC
54009 ++ if (gr_acl_is_capable(cap) && gr_chroot_is_capable(cap))
54010 ++ return 1;
54011 ++ return 0;
54012 ++#else
54013 ++ return 1;
54014 ++#endif
54015 ++}
54016 ++
54017 ++int gr_is_capable_nolog(const int cap)
54018 ++{
54019 ++#ifdef CONFIG_GRKERNSEC
54020 ++ if (gr_acl_is_capable_nolog(cap) && gr_chroot_is_capable_nolog(cap))
54021 ++ return 1;
54022 ++ return 0;
54023 ++#else
54024 ++ return 1;
54025 ++#endif
54026 ++}
54027 ++
54028 ++EXPORT_SYMBOL(gr_is_capable);
54029 ++EXPORT_SYMBOL(gr_is_capable_nolog);
54030 +diff -urNp linux-3.1.1/grsecurity/grsec_fifo.c linux-3.1.1/grsecurity/grsec_fifo.c
54031 +--- linux-3.1.1/grsecurity/grsec_fifo.c 1969-12-31 19:00:00.000000000 -0500
54032 ++++ linux-3.1.1/grsecurity/grsec_fifo.c 2011-11-16 18:40:31.000000000 -0500
54033 +@@ -0,0 +1,24 @@
54034 ++#include <linux/kernel.h>
54035 ++#include <linux/sched.h>
54036 ++#include <linux/fs.h>
54037 ++#include <linux/file.h>
54038 ++#include <linux/grinternal.h>
54039 ++
54040 ++int
54041 ++gr_handle_fifo(const struct dentry *dentry, const struct vfsmount *mnt,
54042 ++ const struct dentry *dir, const int flag, const int acc_mode)
54043 ++{
54044 ++#ifdef CONFIG_GRKERNSEC_FIFO
54045 ++ const struct cred *cred = current_cred();
54046 ++
54047 ++ if (grsec_enable_fifo && S_ISFIFO(dentry->d_inode->i_mode) &&
54048 ++ !(flag & O_EXCL) && (dir->d_inode->i_mode & S_ISVTX) &&
54049 ++ (dentry->d_inode->i_uid != dir->d_inode->i_uid) &&
54050 ++ (cred->fsuid != dentry->d_inode->i_uid)) {
54051 ++ if (!inode_permission(dentry->d_inode, acc_mode))
54052 ++ gr_log_fs_int2(GR_DONT_AUDIT, GR_FIFO_MSG, dentry, mnt, dentry->d_inode->i_uid, dentry->d_inode->i_gid);
54053 ++ return -EACCES;
54054 ++ }
54055 ++#endif
54056 ++ return 0;
54057 ++}
54058 +diff -urNp linux-3.1.1/grsecurity/grsec_fork.c linux-3.1.1/grsecurity/grsec_fork.c
54059 +--- linux-3.1.1/grsecurity/grsec_fork.c 1969-12-31 19:00:00.000000000 -0500
54060 ++++ linux-3.1.1/grsecurity/grsec_fork.c 2011-11-16 18:40:31.000000000 -0500
54061 +@@ -0,0 +1,23 @@
54062 ++#include <linux/kernel.h>
54063 ++#include <linux/sched.h>
54064 ++#include <linux/grsecurity.h>
54065 ++#include <linux/grinternal.h>
54066 ++#include <linux/errno.h>
54067 ++
54068 ++void
54069 ++gr_log_forkfail(const int retval)
54070 ++{
54071 ++#ifdef CONFIG_GRKERNSEC_FORKFAIL
54072 ++ if (grsec_enable_forkfail && (retval == -EAGAIN || retval == -ENOMEM)) {
54073 ++ switch (retval) {
54074 ++ case -EAGAIN:
54075 ++ gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "EAGAIN");
54076 ++ break;
54077 ++ case -ENOMEM:
54078 ++ gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "ENOMEM");
54079 ++ break;
54080 ++ }
54081 ++ }
54082 ++#endif
54083 ++ return;
54084 ++}
54085 +diff -urNp linux-3.1.1/grsecurity/grsec_init.c linux-3.1.1/grsecurity/grsec_init.c
54086 +--- linux-3.1.1/grsecurity/grsec_init.c 1969-12-31 19:00:00.000000000 -0500
54087 ++++ linux-3.1.1/grsecurity/grsec_init.c 2011-11-16 18:40:31.000000000 -0500
54088 +@@ -0,0 +1,269 @@
54089 ++#include <linux/kernel.h>
54090 ++#include <linux/sched.h>
54091 ++#include <linux/mm.h>
54092 ++#include <linux/gracl.h>
54093 ++#include <linux/slab.h>
54094 ++#include <linux/vmalloc.h>
54095 ++#include <linux/percpu.h>
54096 ++#include <linux/module.h>
54097 ++
54098 ++int grsec_enable_brute;
54099 ++int grsec_enable_link;
54100 ++int grsec_enable_dmesg;
54101 ++int grsec_enable_harden_ptrace;
54102 ++int grsec_enable_fifo;
54103 ++int grsec_enable_execlog;
54104 ++int grsec_enable_signal;
54105 ++int grsec_enable_forkfail;
54106 ++int grsec_enable_audit_ptrace;
54107 ++int grsec_enable_time;
54108 ++int grsec_enable_audit_textrel;
54109 ++int grsec_enable_group;
54110 ++int grsec_audit_gid;
54111 ++int grsec_enable_chdir;
54112 ++int grsec_enable_mount;
54113 ++int grsec_enable_rofs;
54114 ++int grsec_enable_chroot_findtask;
54115 ++int grsec_enable_chroot_mount;
54116 ++int grsec_enable_chroot_shmat;
54117 ++int grsec_enable_chroot_fchdir;
54118 ++int grsec_enable_chroot_double;
54119 ++int grsec_enable_chroot_pivot;
54120 ++int grsec_enable_chroot_chdir;
54121 ++int grsec_enable_chroot_chmod;
54122 ++int grsec_enable_chroot_mknod;
54123 ++int grsec_enable_chroot_nice;
54124 ++int grsec_enable_chroot_execlog;
54125 ++int grsec_enable_chroot_caps;
54126 ++int grsec_enable_chroot_sysctl;
54127 ++int grsec_enable_chroot_unix;
54128 ++int grsec_enable_tpe;
54129 ++int grsec_tpe_gid;
54130 ++int grsec_enable_blackhole;
54131 ++#ifdef CONFIG_IPV6_MODULE
54132 ++EXPORT_SYMBOL(grsec_enable_blackhole);
54133 ++#endif
54134 ++int grsec_lastack_retries;
54135 ++int grsec_enable_tpe_all;
54136 ++int grsec_enable_tpe_invert;
54137 ++int grsec_enable_socket_all;
54138 ++int grsec_socket_all_gid;
54139 ++int grsec_enable_socket_client;
54140 ++int grsec_socket_client_gid;
54141 ++int grsec_enable_socket_server;
54142 ++int grsec_socket_server_gid;
54143 ++int grsec_resource_logging;
54144 ++int grsec_disable_privio;
54145 ++int grsec_enable_log_rwxmaps;
54146 ++int grsec_lock;
54147 ++
54148 ++DEFINE_SPINLOCK(grsec_alert_lock);
54149 ++unsigned long grsec_alert_wtime = 0;
54150 ++unsigned long grsec_alert_fyet = 0;
54151 ++
54152 ++DEFINE_SPINLOCK(grsec_audit_lock);
54153 ++
54154 ++DEFINE_RWLOCK(grsec_exec_file_lock);
54155 ++
54156 ++char *gr_shared_page[4];
54157 ++
54158 ++char *gr_alert_log_fmt;
54159 ++char *gr_audit_log_fmt;
54160 ++char *gr_alert_log_buf;
54161 ++char *gr_audit_log_buf;
54162 ++
54163 ++extern struct gr_arg *gr_usermode;
54164 ++extern unsigned char *gr_system_salt;
54165 ++extern unsigned char *gr_system_sum;
54166 ++
54167 ++void __init
54168 ++grsecurity_init(void)
54169 ++{
54170 ++ int j;
54171 ++ /* create the per-cpu shared pages */
54172 ++
54173 ++#ifdef CONFIG_X86
54174 ++ memset((char *)(0x41a + PAGE_OFFSET), 0, 36);
54175 ++#endif
54176 ++
54177 ++ for (j = 0; j < 4; j++) {
54178 ++ gr_shared_page[j] = (char *)__alloc_percpu(PAGE_SIZE, __alignof__(unsigned long long));
54179 ++ if (gr_shared_page[j] == NULL) {
54180 ++ panic("Unable to allocate grsecurity shared page");
54181 ++ return;
54182 ++ }
54183 ++ }
54184 ++
54185 ++ /* allocate log buffers */
54186 ++ gr_alert_log_fmt = kmalloc(512, GFP_KERNEL);
54187 ++ if (!gr_alert_log_fmt) {
54188 ++ panic("Unable to allocate grsecurity alert log format buffer");
54189 ++ return;
54190 ++ }
54191 ++ gr_audit_log_fmt = kmalloc(512, GFP_KERNEL);
54192 ++ if (!gr_audit_log_fmt) {
54193 ++ panic("Unable to allocate grsecurity audit log format buffer");
54194 ++ return;
54195 ++ }
54196 ++ gr_alert_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
54197 ++ if (!gr_alert_log_buf) {
54198 ++ panic("Unable to allocate grsecurity alert log buffer");
54199 ++ return;
54200 ++ }
54201 ++ gr_audit_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
54202 ++ if (!gr_audit_log_buf) {
54203 ++ panic("Unable to allocate grsecurity audit log buffer");
54204 ++ return;
54205 ++ }
54206 ++
54207 ++ /* allocate memory for authentication structure */
54208 ++ gr_usermode = kmalloc(sizeof(struct gr_arg), GFP_KERNEL);
54209 ++ gr_system_salt = kmalloc(GR_SALT_LEN, GFP_KERNEL);
54210 ++ gr_system_sum = kmalloc(GR_SHA_LEN, GFP_KERNEL);
54211 ++
54212 ++ if (!gr_usermode || !gr_system_salt || !gr_system_sum) {
54213 ++ panic("Unable to allocate grsecurity authentication structure");
54214 ++ return;
54215 ++ }
54216 ++
54217 ++
54218 ++#ifdef CONFIG_GRKERNSEC_IO
54219 ++#if !defined(CONFIG_GRKERNSEC_SYSCTL_DISTRO)
54220 ++ grsec_disable_privio = 1;
54221 ++#elif defined(CONFIG_GRKERNSEC_SYSCTL_ON)
54222 ++ grsec_disable_privio = 1;
54223 ++#else
54224 ++ grsec_disable_privio = 0;
54225 ++#endif
54226 ++#endif
54227 ++
54228 ++#ifdef CONFIG_GRKERNSEC_TPE_INVERT
54229 ++ /* for backward compatibility, tpe_invert always defaults to on if
54230 ++ enabled in the kernel
54231 ++ */
54232 ++ grsec_enable_tpe_invert = 1;
54233 ++#endif
54234 ++
54235 ++#if !defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_SYSCTL_ON)
54236 ++#ifndef CONFIG_GRKERNSEC_SYSCTL
54237 ++ grsec_lock = 1;
54238 ++#endif
54239 ++
54240 ++#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
54241 ++ grsec_enable_audit_textrel = 1;
54242 ++#endif
54243 ++#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
54244 ++ grsec_enable_log_rwxmaps = 1;
54245 ++#endif
54246 ++#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
54247 ++ grsec_enable_group = 1;
54248 ++ grsec_audit_gid = CONFIG_GRKERNSEC_AUDIT_GID;
54249 ++#endif
54250 ++#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
54251 ++ grsec_enable_chdir = 1;
54252 ++#endif
54253 ++#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
54254 ++ grsec_enable_harden_ptrace = 1;
54255 ++#endif
54256 ++#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
54257 ++ grsec_enable_mount = 1;
54258 ++#endif
54259 ++#ifdef CONFIG_GRKERNSEC_LINK
54260 ++ grsec_enable_link = 1;
54261 ++#endif
54262 ++#ifdef CONFIG_GRKERNSEC_BRUTE
54263 ++ grsec_enable_brute = 1;
54264 ++#endif
54265 ++#ifdef CONFIG_GRKERNSEC_DMESG
54266 ++ grsec_enable_dmesg = 1;
54267 ++#endif
54268 ++#ifdef CONFIG_GRKERNSEC_BLACKHOLE
54269 ++ grsec_enable_blackhole = 1;
54270 ++ grsec_lastack_retries = 4;
54271 ++#endif
54272 ++#ifdef CONFIG_GRKERNSEC_FIFO
54273 ++ grsec_enable_fifo = 1;
54274 ++#endif
54275 ++#ifdef CONFIG_GRKERNSEC_EXECLOG
54276 ++ grsec_enable_execlog = 1;
54277 ++#endif
54278 ++#ifdef CONFIG_GRKERNSEC_SIGNAL
54279 ++ grsec_enable_signal = 1;
54280 ++#endif
54281 ++#ifdef CONFIG_GRKERNSEC_FORKFAIL
54282 ++ grsec_enable_forkfail = 1;
54283 ++#endif
54284 ++#ifdef CONFIG_GRKERNSEC_TIME
54285 ++ grsec_enable_time = 1;
54286 ++#endif
54287 ++#ifdef CONFIG_GRKERNSEC_RESLOG
54288 ++ grsec_resource_logging = 1;
54289 ++#endif
54290 ++#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
54291 ++ grsec_enable_chroot_findtask = 1;
54292 ++#endif
54293 ++#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
54294 ++ grsec_enable_chroot_unix = 1;
54295 ++#endif
54296 ++#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
54297 ++ grsec_enable_chroot_mount = 1;
54298 ++#endif
54299 ++#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
54300 ++ grsec_enable_chroot_fchdir = 1;
54301 ++#endif
54302 ++#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
54303 ++ grsec_enable_chroot_shmat = 1;
54304 ++#endif
54305 ++#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
54306 ++ grsec_enable_audit_ptrace = 1;
54307 ++#endif
54308 ++#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
54309 ++ grsec_enable_chroot_double = 1;
54310 ++#endif
54311 ++#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
54312 ++ grsec_enable_chroot_pivot = 1;
54313 ++#endif
54314 ++#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
54315 ++ grsec_enable_chroot_chdir = 1;
54316 ++#endif
54317 ++#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
54318 ++ grsec_enable_chroot_chmod = 1;
54319 ++#endif
54320 ++#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
54321 ++ grsec_enable_chroot_mknod = 1;
54322 ++#endif
54323 ++#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
54324 ++ grsec_enable_chroot_nice = 1;
54325 ++#endif
54326 ++#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
54327 ++ grsec_enable_chroot_execlog = 1;
54328 ++#endif
54329 ++#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
54330 ++ grsec_enable_chroot_caps = 1;
54331 ++#endif
54332 ++#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
54333 ++ grsec_enable_chroot_sysctl = 1;
54334 ++#endif
54335 ++#ifdef CONFIG_GRKERNSEC_TPE
54336 ++ grsec_enable_tpe = 1;
54337 ++ grsec_tpe_gid = CONFIG_GRKERNSEC_TPE_GID;
54338 ++#ifdef CONFIG_GRKERNSEC_TPE_ALL
54339 ++ grsec_enable_tpe_all = 1;
54340 ++#endif
54341 ++#endif
54342 ++#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
54343 ++ grsec_enable_socket_all = 1;
54344 ++ grsec_socket_all_gid = CONFIG_GRKERNSEC_SOCKET_ALL_GID;
54345 ++#endif
54346 ++#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
54347 ++ grsec_enable_socket_client = 1;
54348 ++ grsec_socket_client_gid = CONFIG_GRKERNSEC_SOCKET_CLIENT_GID;
54349 ++#endif
54350 ++#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
54351 ++ grsec_enable_socket_server = 1;
54352 ++ grsec_socket_server_gid = CONFIG_GRKERNSEC_SOCKET_SERVER_GID;
54353 ++#endif
54354 ++#endif
54355 ++
54356 ++ return;
54357 ++}
54358 +diff -urNp linux-3.1.1/grsecurity/grsec_link.c linux-3.1.1/grsecurity/grsec_link.c
54359 +--- linux-3.1.1/grsecurity/grsec_link.c 1969-12-31 19:00:00.000000000 -0500
54360 ++++ linux-3.1.1/grsecurity/grsec_link.c 2011-11-16 18:40:31.000000000 -0500
54361 +@@ -0,0 +1,43 @@
54362 ++#include <linux/kernel.h>
54363 ++#include <linux/sched.h>
54364 ++#include <linux/fs.h>
54365 ++#include <linux/file.h>
54366 ++#include <linux/grinternal.h>
54367 ++
54368 ++int
54369 ++gr_handle_follow_link(const struct inode *parent,
54370 ++ const struct inode *inode,
54371 ++ const struct dentry *dentry, const struct vfsmount *mnt)
54372 ++{
54373 ++#ifdef CONFIG_GRKERNSEC_LINK
54374 ++ const struct cred *cred = current_cred();
54375 ++
54376 ++ if (grsec_enable_link && S_ISLNK(inode->i_mode) &&
54377 ++ (parent->i_mode & S_ISVTX) && (parent->i_uid != inode->i_uid) &&
54378 ++ (parent->i_mode & S_IWOTH) && (cred->fsuid != inode->i_uid)) {
54379 ++ gr_log_fs_int2(GR_DONT_AUDIT, GR_SYMLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid);
54380 ++ return -EACCES;
54381 ++ }
54382 ++#endif
54383 ++ return 0;
54384 ++}
54385 ++
54386 ++int
54387 ++gr_handle_hardlink(const struct dentry *dentry,
54388 ++ const struct vfsmount *mnt,
54389 ++ struct inode *inode, const int mode, const char *to)
54390 ++{
54391 ++#ifdef CONFIG_GRKERNSEC_LINK
54392 ++ const struct cred *cred = current_cred();
54393 ++
54394 ++ if (grsec_enable_link && cred->fsuid != inode->i_uid &&
54395 ++ (!S_ISREG(mode) || (mode & S_ISUID) ||
54396 ++ ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) ||
54397 ++ (inode_permission(inode, MAY_READ | MAY_WRITE))) &&
54398 ++ !capable(CAP_FOWNER) && cred->uid) {
54399 ++ gr_log_fs_int2_str(GR_DONT_AUDIT, GR_HARDLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid, to);
54400 ++ return -EPERM;
54401 ++ }
54402 ++#endif
54403 ++ return 0;
54404 ++}
54405 +diff -urNp linux-3.1.1/grsecurity/grsec_log.c linux-3.1.1/grsecurity/grsec_log.c
54406 +--- linux-3.1.1/grsecurity/grsec_log.c 1969-12-31 19:00:00.000000000 -0500
54407 ++++ linux-3.1.1/grsecurity/grsec_log.c 2011-11-16 18:40:31.000000000 -0500
54408 +@@ -0,0 +1,322 @@
54409 ++#include <linux/kernel.h>
54410 ++#include <linux/sched.h>
54411 ++#include <linux/file.h>
54412 ++#include <linux/tty.h>
54413 ++#include <linux/fs.h>
54414 ++#include <linux/grinternal.h>
54415 ++
54416 ++#ifdef CONFIG_TREE_PREEMPT_RCU
54417 ++#define DISABLE_PREEMPT() preempt_disable()
54418 ++#define ENABLE_PREEMPT() preempt_enable()
54419 ++#else
54420 ++#define DISABLE_PREEMPT()
54421 ++#define ENABLE_PREEMPT()
54422 ++#endif
54423 ++
54424 ++#define BEGIN_LOCKS(x) \
54425 ++ DISABLE_PREEMPT(); \
54426 ++ rcu_read_lock(); \
54427 ++ read_lock(&tasklist_lock); \
54428 ++ read_lock(&grsec_exec_file_lock); \
54429 ++ if (x != GR_DO_AUDIT) \
54430 ++ spin_lock(&grsec_alert_lock); \
54431 ++ else \
54432 ++ spin_lock(&grsec_audit_lock)
54433 ++
54434 ++#define END_LOCKS(x) \
54435 ++ if (x != GR_DO_AUDIT) \
54436 ++ spin_unlock(&grsec_alert_lock); \
54437 ++ else \
54438 ++ spin_unlock(&grsec_audit_lock); \
54439 ++ read_unlock(&grsec_exec_file_lock); \
54440 ++ read_unlock(&tasklist_lock); \
54441 ++ rcu_read_unlock(); \
54442 ++ ENABLE_PREEMPT(); \
54443 ++ if (x == GR_DONT_AUDIT) \
54444 ++ gr_handle_alertkill(current)
54445 ++
54446 ++enum {
54447 ++ FLOODING,
54448 ++ NO_FLOODING
54449 ++};
54450 ++
54451 ++extern char *gr_alert_log_fmt;
54452 ++extern char *gr_audit_log_fmt;
54453 ++extern char *gr_alert_log_buf;
54454 ++extern char *gr_audit_log_buf;
54455 ++
54456 ++static int gr_log_start(int audit)
54457 ++{
54458 ++ char *loglevel = (audit == GR_DO_AUDIT) ? KERN_INFO : KERN_ALERT;
54459 ++ char *fmt = (audit == GR_DO_AUDIT) ? gr_audit_log_fmt : gr_alert_log_fmt;
54460 ++ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
54461 ++#if (CONFIG_GRKERNSEC_FLOODTIME > 0 && CONFIG_GRKERNSEC_FLOODBURST > 0)
54462 ++ unsigned long curr_secs = get_seconds();
54463 ++
54464 ++ if (audit == GR_DO_AUDIT)
54465 ++ goto set_fmt;
54466 ++
54467 ++ if (!grsec_alert_wtime || time_after(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)) {
54468 ++ grsec_alert_wtime = curr_secs;
54469 ++ grsec_alert_fyet = 0;
54470 ++ } else if (time_before_eq(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)
54471 ++ && (grsec_alert_fyet < CONFIG_GRKERNSEC_FLOODBURST)) {
54472 ++ grsec_alert_fyet++;
54473 ++ } else if (grsec_alert_fyet == CONFIG_GRKERNSEC_FLOODBURST) {
54474 ++ grsec_alert_wtime = curr_secs;
54475 ++ grsec_alert_fyet++;
54476 ++ printk(KERN_ALERT "grsec: more alerts, logging disabled for %d seconds\n", CONFIG_GRKERNSEC_FLOODTIME);
54477 ++ return FLOODING;
54478 ++ }
54479 ++ else return FLOODING;
54480 ++
54481 ++set_fmt:
54482 ++#endif
54483 ++ memset(buf, 0, PAGE_SIZE);
54484 ++ if (current->signal->curr_ip && gr_acl_is_enabled()) {
54485 ++ sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: (%.64s:%c:%.950s) ");
54486 ++ snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
54487 ++ } else if (current->signal->curr_ip) {
54488 ++ sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: ");
54489 ++ snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip);
54490 ++ } else if (gr_acl_is_enabled()) {
54491 ++ sprintf(fmt, "%s%s", loglevel, "grsec: (%.64s:%c:%.950s) ");
54492 ++ snprintf(buf, PAGE_SIZE - 1, fmt, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
54493 ++ } else {
54494 ++ sprintf(fmt, "%s%s", loglevel, "grsec: ");
54495 ++ strcpy(buf, fmt);
54496 ++ }
54497 ++
54498 ++ return NO_FLOODING;
54499 ++}
54500 ++
54501 ++static void gr_log_middle(int audit, const char *msg, va_list ap)
54502 ++ __attribute__ ((format (printf, 2, 0)));
54503 ++
54504 ++static void gr_log_middle(int audit, const char *msg, va_list ap)
54505 ++{
54506 ++ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
54507 ++ unsigned int len = strlen(buf);
54508 ++
54509 ++ vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
54510 ++
54511 ++ return;
54512 ++}
54513 ++
54514 ++static void gr_log_middle_varargs(int audit, const char *msg, ...)
54515 ++ __attribute__ ((format (printf, 2, 3)));
54516 ++
54517 ++static void gr_log_middle_varargs(int audit, const char *msg, ...)
54518 ++{
54519 ++ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
54520 ++ unsigned int len = strlen(buf);
54521 ++ va_list ap;
54522 ++
54523 ++ va_start(ap, msg);
54524 ++ vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
54525 ++ va_end(ap);
54526 ++
54527 ++ return;
54528 ++}
54529 ++
54530 ++static void gr_log_end(int audit, int append_default)
54531 ++{
54532 ++ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
54533 ++
54534 ++ if (append_default) {
54535 ++ unsigned int len = strlen(buf);
54536 ++ snprintf(buf + len, PAGE_SIZE - len - 1, DEFAULTSECMSG, DEFAULTSECARGS(current, current_cred(), __task_cred(current->real_parent)));
54537 ++ }
54538 ++
54539 ++ printk("%s\n", buf);
54540 ++
54541 ++ return;
54542 ++}
54543 ++
54544 ++void gr_log_varargs(int audit, const char *msg, int argtypes, ...)
54545 ++{
54546 ++ int logtype;
54547 ++ char *result = (audit == GR_DO_AUDIT) ? "successful" : "denied";
54548 ++ char *str1 = NULL, *str2 = NULL, *str3 = NULL;
54549 ++ void *voidptr = NULL;
54550 ++ int num1 = 0, num2 = 0;
54551 ++ unsigned long ulong1 = 0, ulong2 = 0;
54552 ++ struct dentry *dentry = NULL;
54553 ++ struct vfsmount *mnt = NULL;
54554 ++ struct file *file = NULL;
54555 ++ struct task_struct *task = NULL;
54556 ++ const struct cred *cred, *pcred;
54557 ++ va_list ap;
54558 ++
54559 ++ BEGIN_LOCKS(audit);
54560 ++ logtype = gr_log_start(audit);
54561 ++ if (logtype == FLOODING) {
54562 ++ END_LOCKS(audit);
54563 ++ return;
54564 ++ }
54565 ++ va_start(ap, argtypes);
54566 ++ switch (argtypes) {
54567 ++ case GR_TTYSNIFF:
54568 ++ task = va_arg(ap, struct task_struct *);
54569 ++ gr_log_middle_varargs(audit, msg, &task->signal->curr_ip, gr_task_fullpath0(task), task->comm, task->pid, gr_parent_task_fullpath0(task), task->real_parent->comm, task->real_parent->pid);
54570 ++ break;
54571 ++ case GR_SYSCTL_HIDDEN:
54572 ++ str1 = va_arg(ap, char *);
54573 ++ gr_log_middle_varargs(audit, msg, result, str1);
54574 ++ break;
54575 ++ case GR_RBAC:
54576 ++ dentry = va_arg(ap, struct dentry *);
54577 ++ mnt = va_arg(ap, struct vfsmount *);
54578 ++ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt));
54579 ++ break;
54580 ++ case GR_RBAC_STR:
54581 ++ dentry = va_arg(ap, struct dentry *);
54582 ++ mnt = va_arg(ap, struct vfsmount *);
54583 ++ str1 = va_arg(ap, char *);
54584 ++ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1);
54585 ++ break;
54586 ++ case GR_STR_RBAC:
54587 ++ str1 = va_arg(ap, char *);
54588 ++ dentry = va_arg(ap, struct dentry *);
54589 ++ mnt = va_arg(ap, struct vfsmount *);
54590 ++ gr_log_middle_varargs(audit, msg, result, str1, gr_to_filename(dentry, mnt));
54591 ++ break;
54592 ++ case GR_RBAC_MODE2:
54593 ++ dentry = va_arg(ap, struct dentry *);
54594 ++ mnt = va_arg(ap, struct vfsmount *);
54595 ++ str1 = va_arg(ap, char *);
54596 ++ str2 = va_arg(ap, char *);
54597 ++ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2);
54598 ++ break;
54599 ++ case GR_RBAC_MODE3:
54600 ++ dentry = va_arg(ap, struct dentry *);
54601 ++ mnt = va_arg(ap, struct vfsmount *);
54602 ++ str1 = va_arg(ap, char *);
54603 ++ str2 = va_arg(ap, char *);
54604 ++ str3 = va_arg(ap, char *);
54605 ++ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2, str3);
54606 ++ break;
54607 ++ case GR_FILENAME:
54608 ++ dentry = va_arg(ap, struct dentry *);
54609 ++ mnt = va_arg(ap, struct vfsmount *);
54610 ++ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt));
54611 ++ break;
54612 ++ case GR_STR_FILENAME:
54613 ++ str1 = va_arg(ap, char *);
54614 ++ dentry = va_arg(ap, struct dentry *);
54615 ++ mnt = va_arg(ap, struct vfsmount *);
54616 ++ gr_log_middle_varargs(audit, msg, str1, gr_to_filename(dentry, mnt));
54617 ++ break;
54618 ++ case GR_FILENAME_STR:
54619 ++ dentry = va_arg(ap, struct dentry *);
54620 ++ mnt = va_arg(ap, struct vfsmount *);
54621 ++ str1 = va_arg(ap, char *);
54622 ++ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), str1);
54623 ++ break;
54624 ++ case GR_FILENAME_TWO_INT:
54625 ++ dentry = va_arg(ap, struct dentry *);
54626 ++ mnt = va_arg(ap, struct vfsmount *);
54627 ++ num1 = va_arg(ap, int);
54628 ++ num2 = va_arg(ap, int);
54629 ++ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2);
54630 ++ break;
54631 ++ case GR_FILENAME_TWO_INT_STR:
54632 ++ dentry = va_arg(ap, struct dentry *);
54633 ++ mnt = va_arg(ap, struct vfsmount *);
54634 ++ num1 = va_arg(ap, int);
54635 ++ num2 = va_arg(ap, int);
54636 ++ str1 = va_arg(ap, char *);
54637 ++ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2, str1);
54638 ++ break;
54639 ++ case GR_TEXTREL:
54640 ++ file = va_arg(ap, struct file *);
54641 ++ ulong1 = va_arg(ap, unsigned long);
54642 ++ ulong2 = va_arg(ap, unsigned long);
54643 ++ gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>", ulong1, ulong2);
54644 ++ break;
54645 ++ case GR_PTRACE:
54646 ++ task = va_arg(ap, struct task_struct *);
54647 ++ gr_log_middle_varargs(audit, msg, task->exec_file ? gr_to_filename(task->exec_file->f_path.dentry, task->exec_file->f_path.mnt) : "(none)", task->comm, task->pid);
54648 ++ break;
54649 ++ case GR_RESOURCE:
54650 ++ task = va_arg(ap, struct task_struct *);
54651 ++ cred = __task_cred(task);
54652 ++ pcred = __task_cred(task->real_parent);
54653 ++ ulong1 = va_arg(ap, unsigned long);
54654 ++ str1 = va_arg(ap, char *);
54655 ++ ulong2 = va_arg(ap, unsigned long);
54656 ++ gr_log_middle_varargs(audit, msg, ulong1, str1, ulong2, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
54657 ++ break;
54658 ++ case GR_CAP:
54659 ++ task = va_arg(ap, struct task_struct *);
54660 ++ cred = __task_cred(task);
54661 ++ pcred = __task_cred(task->real_parent);
54662 ++ str1 = va_arg(ap, char *);
54663 ++ gr_log_middle_varargs(audit, msg, str1, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
54664 ++ break;
54665 ++ case GR_SIG:
54666 ++ str1 = va_arg(ap, char *);
54667 ++ voidptr = va_arg(ap, void *);
54668 ++ gr_log_middle_varargs(audit, msg, str1, voidptr);
54669 ++ break;
54670 ++ case GR_SIG2:
54671 ++ task = va_arg(ap, struct task_struct *);
54672 ++ cred = __task_cred(task);
54673 ++ pcred = __task_cred(task->real_parent);
54674 ++ num1 = va_arg(ap, int);
54675 ++ gr_log_middle_varargs(audit, msg, num1, gr_task_fullpath0(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath0(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
54676 ++ break;
54677 ++ case GR_CRASH1:
54678 ++ task = va_arg(ap, struct task_struct *);
54679 ++ cred = __task_cred(task);
54680 ++ pcred = __task_cred(task->real_parent);
54681 ++ ulong1 = va_arg(ap, unsigned long);
54682 ++ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid, cred->uid, ulong1);
54683 ++ break;
54684 ++ case GR_CRASH2:
54685 ++ task = va_arg(ap, struct task_struct *);
54686 ++ cred = __task_cred(task);
54687 ++ pcred = __task_cred(task->real_parent);
54688 ++ ulong1 = va_arg(ap, unsigned long);
54689 ++ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid, ulong1);
54690 ++ break;
54691 ++ case GR_RWXMAP:
54692 ++ file = va_arg(ap, struct file *);
54693 ++ gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>");
54694 ++ break;
54695 ++ case GR_PSACCT:
54696 ++ {
54697 ++ unsigned int wday, cday;
54698 ++ __u8 whr, chr;
54699 ++ __u8 wmin, cmin;
54700 ++ __u8 wsec, csec;
54701 ++ char cur_tty[64] = { 0 };
54702 ++ char parent_tty[64] = { 0 };
54703 ++
54704 ++ task = va_arg(ap, struct task_struct *);
54705 ++ wday = va_arg(ap, unsigned int);
54706 ++ cday = va_arg(ap, unsigned int);
54707 ++ whr = va_arg(ap, int);
54708 ++ chr = va_arg(ap, int);
54709 ++ wmin = va_arg(ap, int);
54710 ++ cmin = va_arg(ap, int);
54711 ++ wsec = va_arg(ap, int);
54712 ++ csec = va_arg(ap, int);
54713 ++ ulong1 = va_arg(ap, unsigned long);
54714 ++ cred = __task_cred(task);
54715 ++ pcred = __task_cred(task->real_parent);
54716 ++
54717 ++ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, &task->signal->curr_ip, tty_name(task->signal->tty, cur_tty), cred->uid, cred->euid, cred->gid, cred->egid, wday, whr, wmin, wsec, cday, chr, cmin, csec, (task->flags & PF_SIGNALED) ? "killed by signal" : "exited", ulong1, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, &task->real_parent->signal->curr_ip, tty_name(task->real_parent->signal->tty, parent_tty), pcred->uid, pcred->euid, pcred->gid, pcred->egid);
54718 ++ }
54719 ++ break;
54720 ++ default:
54721 ++ gr_log_middle(audit, msg, ap);
54722 ++ }
54723 ++ va_end(ap);
54724 ++ // these don't need DEFAULTSECARGS printed on the end
54725 ++ if (argtypes == GR_CRASH1 || argtypes == GR_CRASH2)
54726 ++ gr_log_end(audit, 0);
54727 ++ else
54728 ++ gr_log_end(audit, 1);
54729 ++ END_LOCKS(audit);
54730 ++}
54731 +diff -urNp linux-3.1.1/grsecurity/grsec_mem.c linux-3.1.1/grsecurity/grsec_mem.c
54732 +--- linux-3.1.1/grsecurity/grsec_mem.c 1969-12-31 19:00:00.000000000 -0500
54733 ++++ linux-3.1.1/grsecurity/grsec_mem.c 2011-11-16 18:40:31.000000000 -0500
54734 +@@ -0,0 +1,33 @@
54735 ++#include <linux/kernel.h>
54736 ++#include <linux/sched.h>
54737 ++#include <linux/mm.h>
54738 ++#include <linux/mman.h>
54739 ++#include <linux/grinternal.h>
54740 ++
54741 ++void
54742 ++gr_handle_ioperm(void)
54743 ++{
54744 ++ gr_log_noargs(GR_DONT_AUDIT, GR_IOPERM_MSG);
54745 ++ return;
54746 ++}
54747 ++
54748 ++void
54749 ++gr_handle_iopl(void)
54750 ++{
54751 ++ gr_log_noargs(GR_DONT_AUDIT, GR_IOPL_MSG);
54752 ++ return;
54753 ++}
54754 ++
54755 ++void
54756 ++gr_handle_mem_readwrite(u64 from, u64 to)
54757 ++{
54758 ++ gr_log_two_u64(GR_DONT_AUDIT, GR_MEM_READWRITE_MSG, from, to);
54759 ++ return;
54760 ++}
54761 ++
54762 ++void
54763 ++gr_handle_vm86(void)
54764 ++{
54765 ++ gr_log_noargs(GR_DONT_AUDIT, GR_VM86_MSG);
54766 ++ return;
54767 ++}
54768 +diff -urNp linux-3.1.1/grsecurity/grsec_mount.c linux-3.1.1/grsecurity/grsec_mount.c
54769 +--- linux-3.1.1/grsecurity/grsec_mount.c 1969-12-31 19:00:00.000000000 -0500
54770 ++++ linux-3.1.1/grsecurity/grsec_mount.c 2011-11-16 18:40:31.000000000 -0500
54771 +@@ -0,0 +1,62 @@
54772 ++#include <linux/kernel.h>
54773 ++#include <linux/sched.h>
54774 ++#include <linux/mount.h>
54775 ++#include <linux/grsecurity.h>
54776 ++#include <linux/grinternal.h>
54777 ++
54778 ++void
54779 ++gr_log_remount(const char *devname, const int retval)
54780 ++{
54781 ++#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
54782 ++ if (grsec_enable_mount && (retval >= 0))
54783 ++ gr_log_str(GR_DO_AUDIT, GR_REMOUNT_AUDIT_MSG, devname ? devname : "none");
54784 ++#endif
54785 ++ return;
54786 ++}
54787 ++
54788 ++void
54789 ++gr_log_unmount(const char *devname, const int retval)
54790 ++{
54791 ++#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
54792 ++ if (grsec_enable_mount && (retval >= 0))
54793 ++ gr_log_str(GR_DO_AUDIT, GR_UNMOUNT_AUDIT_MSG, devname ? devname : "none");
54794 ++#endif
54795 ++ return;
54796 ++}
54797 ++
54798 ++void
54799 ++gr_log_mount(const char *from, const char *to, const int retval)
54800 ++{
54801 ++#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
54802 ++ if (grsec_enable_mount && (retval >= 0))
54803 ++ gr_log_str_str(GR_DO_AUDIT, GR_MOUNT_AUDIT_MSG, from ? from : "none", to);
54804 ++#endif
54805 ++ return;
54806 ++}
54807 ++
54808 ++int
54809 ++gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags)
54810 ++{
54811 ++#ifdef CONFIG_GRKERNSEC_ROFS
54812 ++ if (grsec_enable_rofs && !(mnt_flags & MNT_READONLY)) {
54813 ++ gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_MOUNT_MSG, dentry, mnt);
54814 ++ return -EPERM;
54815 ++ } else
54816 ++ return 0;
54817 ++#endif
54818 ++ return 0;
54819 ++}
54820 ++
54821 ++int
54822 ++gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode)
54823 ++{
54824 ++#ifdef CONFIG_GRKERNSEC_ROFS
54825 ++ if (grsec_enable_rofs && (acc_mode & MAY_WRITE) &&
54826 ++ dentry->d_inode && S_ISBLK(dentry->d_inode->i_mode)) {
54827 ++ gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_BLOCKWRITE_MSG, dentry, mnt);
54828 ++ return -EPERM;
54829 ++ } else
54830 ++ return 0;
54831 ++#endif
54832 ++ return 0;
54833 ++}
54834 +diff -urNp linux-3.1.1/grsecurity/grsec_pax.c linux-3.1.1/grsecurity/grsec_pax.c
54835 +--- linux-3.1.1/grsecurity/grsec_pax.c 1969-12-31 19:00:00.000000000 -0500
54836 ++++ linux-3.1.1/grsecurity/grsec_pax.c 2011-11-16 18:40:31.000000000 -0500
54837 +@@ -0,0 +1,36 @@
54838 ++#include <linux/kernel.h>
54839 ++#include <linux/sched.h>
54840 ++#include <linux/mm.h>
54841 ++#include <linux/file.h>
54842 ++#include <linux/grinternal.h>
54843 ++#include <linux/grsecurity.h>
54844 ++
54845 ++void
54846 ++gr_log_textrel(struct vm_area_struct * vma)
54847 ++{
54848 ++#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
54849 ++ if (grsec_enable_audit_textrel)
54850 ++ gr_log_textrel_ulong_ulong(GR_DO_AUDIT, GR_TEXTREL_AUDIT_MSG, vma->vm_file, vma->vm_start, vma->vm_pgoff);
54851 ++#endif
54852 ++ return;
54853 ++}
54854 ++
54855 ++void
54856 ++gr_log_rwxmmap(struct file *file)
54857 ++{
54858 ++#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
54859 ++ if (grsec_enable_log_rwxmaps)
54860 ++ gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMMAP_MSG, file);
54861 ++#endif
54862 ++ return;
54863 ++}
54864 ++
54865 ++void
54866 ++gr_log_rwxmprotect(struct file *file)
54867 ++{
54868 ++#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
54869 ++ if (grsec_enable_log_rwxmaps)
54870 ++ gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMPROTECT_MSG, file);
54871 ++#endif
54872 ++ return;
54873 ++}
54874 +diff -urNp linux-3.1.1/grsecurity/grsec_ptrace.c linux-3.1.1/grsecurity/grsec_ptrace.c
54875 +--- linux-3.1.1/grsecurity/grsec_ptrace.c 1969-12-31 19:00:00.000000000 -0500
54876 ++++ linux-3.1.1/grsecurity/grsec_ptrace.c 2011-11-16 18:40:31.000000000 -0500
54877 +@@ -0,0 +1,14 @@
54878 ++#include <linux/kernel.h>
54879 ++#include <linux/sched.h>
54880 ++#include <linux/grinternal.h>
54881 ++#include <linux/grsecurity.h>
54882 ++
54883 ++void
54884 ++gr_audit_ptrace(struct task_struct *task)
54885 ++{
54886 ++#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
54887 ++ if (grsec_enable_audit_ptrace)
54888 ++ gr_log_ptrace(GR_DO_AUDIT, GR_PTRACE_AUDIT_MSG, task);
54889 ++#endif
54890 ++ return;
54891 ++}
54892 +diff -urNp linux-3.1.1/grsecurity/grsec_sig.c linux-3.1.1/grsecurity/grsec_sig.c
54893 +--- linux-3.1.1/grsecurity/grsec_sig.c 1969-12-31 19:00:00.000000000 -0500
54894 ++++ linux-3.1.1/grsecurity/grsec_sig.c 2011-11-16 18:40:31.000000000 -0500
54895 +@@ -0,0 +1,206 @@
54896 ++#include <linux/kernel.h>
54897 ++#include <linux/sched.h>
54898 ++#include <linux/delay.h>
54899 ++#include <linux/grsecurity.h>
54900 ++#include <linux/grinternal.h>
54901 ++#include <linux/hardirq.h>
54902 ++
54903 ++char *signames[] = {
54904 ++ [SIGSEGV] = "Segmentation fault",
54905 ++ [SIGILL] = "Illegal instruction",
54906 ++ [SIGABRT] = "Abort",
54907 ++ [SIGBUS] = "Invalid alignment/Bus error"
54908 ++};
54909 ++
54910 ++void
54911 ++gr_log_signal(const int sig, const void *addr, const struct task_struct *t)
54912 ++{
54913 ++#ifdef CONFIG_GRKERNSEC_SIGNAL
54914 ++ if (grsec_enable_signal && ((sig == SIGSEGV) || (sig == SIGILL) ||
54915 ++ (sig == SIGABRT) || (sig == SIGBUS))) {
54916 ++ if (t->pid == current->pid) {
54917 ++ gr_log_sig_addr(GR_DONT_AUDIT_GOOD, GR_UNISIGLOG_MSG, signames[sig], addr);
54918 ++ } else {
54919 ++ gr_log_sig_task(GR_DONT_AUDIT_GOOD, GR_DUALSIGLOG_MSG, t, sig);
54920 ++ }
54921 ++ }
54922 ++#endif
54923 ++ return;
54924 ++}
54925 ++
54926 ++int
54927 ++gr_handle_signal(const struct task_struct *p, const int sig)
54928 ++{
54929 ++#ifdef CONFIG_GRKERNSEC
54930 ++ if (current->pid > 1 && gr_check_protected_task(p)) {
54931 ++ gr_log_sig_task(GR_DONT_AUDIT, GR_SIG_ACL_MSG, p, sig);
54932 ++ return -EPERM;
54933 ++ } else if (gr_pid_is_chrooted((struct task_struct *)p)) {
54934 ++ return -EPERM;
54935 ++ }
54936 ++#endif
54937 ++ return 0;
54938 ++}
54939 ++
54940 ++#ifdef CONFIG_GRKERNSEC
54941 ++extern int specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t);
54942 ++
54943 ++int gr_fake_force_sig(int sig, struct task_struct *t)
54944 ++{
54945 ++ unsigned long int flags;
54946 ++ int ret, blocked, ignored;
54947 ++ struct k_sigaction *action;
54948 ++
54949 ++ spin_lock_irqsave(&t->sighand->siglock, flags);
54950 ++ action = &t->sighand->action[sig-1];
54951 ++ ignored = action->sa.sa_handler == SIG_IGN;
54952 ++ blocked = sigismember(&t->blocked, sig);
54953 ++ if (blocked || ignored) {
54954 ++ action->sa.sa_handler = SIG_DFL;
54955 ++ if (blocked) {
54956 ++ sigdelset(&t->blocked, sig);
54957 ++ recalc_sigpending_and_wake(t);
54958 ++ }
54959 ++ }
54960 ++ if (action->sa.sa_handler == SIG_DFL)
54961 ++ t->signal->flags &= ~SIGNAL_UNKILLABLE;
54962 ++ ret = specific_send_sig_info(sig, SEND_SIG_PRIV, t);
54963 ++
54964 ++ spin_unlock_irqrestore(&t->sighand->siglock, flags);
54965 ++
54966 ++ return ret;
54967 ++}
54968 ++#endif
54969 ++
54970 ++#ifdef CONFIG_GRKERNSEC_BRUTE
54971 ++#define GR_USER_BAN_TIME (15 * 60)
54972 ++
54973 ++static int __get_dumpable(unsigned long mm_flags)
54974 ++{
54975 ++ int ret;
54976 ++
54977 ++ ret = mm_flags & MMF_DUMPABLE_MASK;
54978 ++ return (ret >= 2) ? 2 : ret;
54979 ++}
54980 ++#endif
54981 ++
54982 ++void gr_handle_brute_attach(struct task_struct *p, unsigned long mm_flags)
54983 ++{
54984 ++#ifdef CONFIG_GRKERNSEC_BRUTE
54985 ++ uid_t uid = 0;
54986 ++
54987 ++ if (!grsec_enable_brute)
54988 ++ return;
54989 ++
54990 ++ rcu_read_lock();
54991 ++ read_lock(&tasklist_lock);
54992 ++ read_lock(&grsec_exec_file_lock);
54993 ++ if (p->real_parent && p->real_parent->exec_file == p->exec_file)
54994 ++ p->real_parent->brute = 1;
54995 ++ else {
54996 ++ const struct cred *cred = __task_cred(p), *cred2;
54997 ++ struct task_struct *tsk, *tsk2;
54998 ++
54999 ++ if (!__get_dumpable(mm_flags) && cred->uid) {
55000 ++ struct user_struct *user;
55001 ++
55002 ++ uid = cred->uid;
55003 ++
55004 ++ /* this is put upon execution past expiration */
55005 ++ user = find_user(uid);
55006 ++ if (user == NULL)
55007 ++ goto unlock;
55008 ++ user->banned = 1;
55009 ++ user->ban_expires = get_seconds() + GR_USER_BAN_TIME;
55010 ++ if (user->ban_expires == ~0UL)
55011 ++ user->ban_expires--;
55012 ++
55013 ++ do_each_thread(tsk2, tsk) {
55014 ++ cred2 = __task_cred(tsk);
55015 ++ if (tsk != p && cred2->uid == uid)
55016 ++ gr_fake_force_sig(SIGKILL, tsk);
55017 ++ } while_each_thread(tsk2, tsk);
55018 ++ }
55019 ++ }
55020 ++unlock:
55021 ++ read_unlock(&grsec_exec_file_lock);
55022 ++ read_unlock(&tasklist_lock);
55023 ++ rcu_read_unlock();
55024 ++
55025 ++ if (uid)
55026 ++ printk(KERN_ALERT "grsec: bruteforce prevention initiated against uid %u, banning for %d minutes\n", uid, GR_USER_BAN_TIME / 60);
55027 ++
55028 ++#endif
55029 ++ return;
55030 ++}
55031 ++
55032 ++void gr_handle_brute_check(void)
55033 ++{
55034 ++#ifdef CONFIG_GRKERNSEC_BRUTE
55035 ++ if (current->brute)
55036 ++ msleep(30 * 1000);
55037 ++#endif
55038 ++ return;
55039 ++}
55040 ++
55041 ++void gr_handle_kernel_exploit(void)
55042 ++{
55043 ++#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
55044 ++ const struct cred *cred;
55045 ++ struct task_struct *tsk, *tsk2;
55046 ++ struct user_struct *user;
55047 ++ uid_t uid;
55048 ++
55049 ++ if (in_irq() || in_serving_softirq() || in_nmi())
55050 ++ panic("grsec: halting the system due to suspicious kernel crash caused in interrupt context");
55051 ++
55052 ++ uid = current_uid();
55053 ++
55054 ++ if (uid == 0)
55055 ++ panic("grsec: halting the system due to suspicious kernel crash caused by root");
55056 ++ else {
55057 ++ /* kill all the processes of this user, hold a reference
55058 ++ to their creds struct, and prevent them from creating
55059 ++ another process until system reset
55060 ++ */
55061 ++ printk(KERN_ALERT "grsec: banning user with uid %u until system restart for suspicious kernel crash\n", uid);
55062 ++ /* we intentionally leak this ref */
55063 ++ user = get_uid(current->cred->user);
55064 ++ if (user) {
55065 ++ user->banned = 1;
55066 ++ user->ban_expires = ~0UL;
55067 ++ }
55068 ++
55069 ++ read_lock(&tasklist_lock);
55070 ++ do_each_thread(tsk2, tsk) {
55071 ++ cred = __task_cred(tsk);
55072 ++ if (cred->uid == uid)
55073 ++ gr_fake_force_sig(SIGKILL, tsk);
55074 ++ } while_each_thread(tsk2, tsk);
55075 ++ read_unlock(&tasklist_lock);
55076 ++ }
55077 ++#endif
55078 ++}
55079 ++
55080 ++int __gr_process_user_ban(struct user_struct *user)
55081 ++{
55082 ++#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
55083 ++ if (unlikely(user->banned)) {
55084 ++ if (user->ban_expires != ~0UL && time_after_eq(get_seconds(), user->ban_expires)) {
55085 ++ user->banned = 0;
55086 ++ user->ban_expires = 0;
55087 ++ free_uid(user);
55088 ++ } else
55089 ++ return -EPERM;
55090 ++ }
55091 ++#endif
55092 ++ return 0;
55093 ++}
55094 ++
55095 ++int gr_process_user_ban(void)
55096 ++{
55097 ++#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
55098 ++ return __gr_process_user_ban(current->cred->user);
55099 ++#endif
55100 ++ return 0;
55101 ++}
55102 +diff -urNp linux-3.1.1/grsecurity/grsec_sock.c linux-3.1.1/grsecurity/grsec_sock.c
55103 +--- linux-3.1.1/grsecurity/grsec_sock.c 1969-12-31 19:00:00.000000000 -0500
55104 ++++ linux-3.1.1/grsecurity/grsec_sock.c 2011-11-16 18:40:31.000000000 -0500
55105 +@@ -0,0 +1,244 @@
55106 ++#include <linux/kernel.h>
55107 ++#include <linux/module.h>
55108 ++#include <linux/sched.h>
55109 ++#include <linux/file.h>
55110 ++#include <linux/net.h>
55111 ++#include <linux/in.h>
55112 ++#include <linux/ip.h>
55113 ++#include <net/sock.h>
55114 ++#include <net/inet_sock.h>
55115 ++#include <linux/grsecurity.h>
55116 ++#include <linux/grinternal.h>
55117 ++#include <linux/gracl.h>
55118 ++
55119 ++extern int gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb);
55120 ++extern int gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr);
55121 ++
55122 ++EXPORT_SYMBOL(gr_search_udp_recvmsg);
55123 ++EXPORT_SYMBOL(gr_search_udp_sendmsg);
55124 ++
55125 ++#ifdef CONFIG_UNIX_MODULE
55126 ++EXPORT_SYMBOL(gr_acl_handle_unix);
55127 ++EXPORT_SYMBOL(gr_acl_handle_mknod);
55128 ++EXPORT_SYMBOL(gr_handle_chroot_unix);
55129 ++EXPORT_SYMBOL(gr_handle_create);
55130 ++#endif
55131 ++
55132 ++#ifdef CONFIG_GRKERNSEC
55133 ++#define gr_conn_table_size 32749
55134 ++struct conn_table_entry {
55135 ++ struct conn_table_entry *next;
55136 ++ struct signal_struct *sig;
55137 ++};
55138 ++
55139 ++struct conn_table_entry *gr_conn_table[gr_conn_table_size];
55140 ++DEFINE_SPINLOCK(gr_conn_table_lock);
55141 ++
55142 ++extern const char * gr_socktype_to_name(unsigned char type);
55143 ++extern const char * gr_proto_to_name(unsigned char proto);
55144 ++extern const char * gr_sockfamily_to_name(unsigned char family);
55145 ++
55146 ++static __inline__ int
55147 ++conn_hash(__u32 saddr, __u32 daddr, __u16 sport, __u16 dport, unsigned int size)
55148 ++{
55149 ++ return ((daddr + saddr + (sport << 8) + (dport << 16)) % size);
55150 ++}
55151 ++
55152 ++static __inline__ int
55153 ++conn_match(const struct signal_struct *sig, __u32 saddr, __u32 daddr,
55154 ++ __u16 sport, __u16 dport)
55155 ++{
55156 ++ if (unlikely(sig->gr_saddr == saddr && sig->gr_daddr == daddr &&
55157 ++ sig->gr_sport == sport && sig->gr_dport == dport))
55158 ++ return 1;
55159 ++ else
55160 ++ return 0;
55161 ++}
55162 ++
55163 ++static void gr_add_to_task_ip_table_nolock(struct signal_struct *sig, struct conn_table_entry *newent)
55164 ++{
55165 ++ struct conn_table_entry **match;
55166 ++ unsigned int index;
55167 ++
55168 ++ index = conn_hash(sig->gr_saddr, sig->gr_daddr,
55169 ++ sig->gr_sport, sig->gr_dport,
55170 ++ gr_conn_table_size);
55171 ++
55172 ++ newent->sig = sig;
55173 ++
55174 ++ match = &gr_conn_table[index];
55175 ++ newent->next = *match;
55176 ++ *match = newent;
55177 ++
55178 ++ return;
55179 ++}
55180 ++
55181 ++static void gr_del_task_from_ip_table_nolock(struct signal_struct *sig)
55182 ++{
55183 ++ struct conn_table_entry *match, *last = NULL;
55184 ++ unsigned int index;
55185 ++
55186 ++ index = conn_hash(sig->gr_saddr, sig->gr_daddr,
55187 ++ sig->gr_sport, sig->gr_dport,
55188 ++ gr_conn_table_size);
55189 ++
55190 ++ match = gr_conn_table[index];
55191 ++ while (match && !conn_match(match->sig,
55192 ++ sig->gr_saddr, sig->gr_daddr, sig->gr_sport,
55193 ++ sig->gr_dport)) {
55194 ++ last = match;
55195 ++ match = match->next;
55196 ++ }
55197 ++
55198 ++ if (match) {
55199 ++ if (last)
55200 ++ last->next = match->next;
55201 ++ else
55202 ++ gr_conn_table[index] = NULL;
55203 ++ kfree(match);
55204 ++ }
55205 ++
55206 ++ return;
55207 ++}
55208 ++
55209 ++static struct signal_struct * gr_lookup_task_ip_table(__u32 saddr, __u32 daddr,
55210 ++ __u16 sport, __u16 dport)
55211 ++{
55212 ++ struct conn_table_entry *match;
55213 ++ unsigned int index;
55214 ++
55215 ++ index = conn_hash(saddr, daddr, sport, dport, gr_conn_table_size);
55216 ++
55217 ++ match = gr_conn_table[index];
55218 ++ while (match && !conn_match(match->sig, saddr, daddr, sport, dport))
55219 ++ match = match->next;
55220 ++
55221 ++ if (match)
55222 ++ return match->sig;
55223 ++ else
55224 ++ return NULL;
55225 ++}
55226 ++
55227 ++#endif
55228 ++
55229 ++void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet)
55230 ++{
55231 ++#ifdef CONFIG_GRKERNSEC
55232 ++ struct signal_struct *sig = task->signal;
55233 ++ struct conn_table_entry *newent;
55234 ++
55235 ++ newent = kmalloc(sizeof(struct conn_table_entry), GFP_ATOMIC);
55236 ++ if (newent == NULL)
55237 ++ return;
55238 ++ /* no bh lock needed since we are called with bh disabled */
55239 ++ spin_lock(&gr_conn_table_lock);
55240 ++ gr_del_task_from_ip_table_nolock(sig);
55241 ++ sig->gr_saddr = inet->inet_rcv_saddr;
55242 ++ sig->gr_daddr = inet->inet_daddr;
55243 ++ sig->gr_sport = inet->inet_sport;
55244 ++ sig->gr_dport = inet->inet_dport;
55245 ++ gr_add_to_task_ip_table_nolock(sig, newent);
55246 ++ spin_unlock(&gr_conn_table_lock);
55247 ++#endif
55248 ++ return;
55249 ++}
55250 ++
55251 ++void gr_del_task_from_ip_table(struct task_struct *task)
55252 ++{
55253 ++#ifdef CONFIG_GRKERNSEC
55254 ++ spin_lock_bh(&gr_conn_table_lock);
55255 ++ gr_del_task_from_ip_table_nolock(task->signal);
55256 ++ spin_unlock_bh(&gr_conn_table_lock);
55257 ++#endif
55258 ++ return;
55259 ++}
55260 ++
55261 ++void
55262 ++gr_attach_curr_ip(const struct sock *sk)
55263 ++{
55264 ++#ifdef CONFIG_GRKERNSEC
55265 ++ struct signal_struct *p, *set;
55266 ++ const struct inet_sock *inet = inet_sk(sk);
55267 ++
55268 ++ if (unlikely(sk->sk_protocol != IPPROTO_TCP))
55269 ++ return;
55270 ++
55271 ++ set = current->signal;
55272 ++
55273 ++ spin_lock_bh(&gr_conn_table_lock);
55274 ++ p = gr_lookup_task_ip_table(inet->inet_daddr, inet->inet_rcv_saddr,
55275 ++ inet->inet_dport, inet->inet_sport);
55276 ++ if (unlikely(p != NULL)) {
55277 ++ set->curr_ip = p->curr_ip;
55278 ++ set->used_accept = 1;
55279 ++ gr_del_task_from_ip_table_nolock(p);
55280 ++ spin_unlock_bh(&gr_conn_table_lock);
55281 ++ return;
55282 ++ }
55283 ++ spin_unlock_bh(&gr_conn_table_lock);
55284 ++
55285 ++ set->curr_ip = inet->inet_daddr;
55286 ++ set->used_accept = 1;
55287 ++#endif
55288 ++ return;
55289 ++}
55290 ++
55291 ++int
55292 ++gr_handle_sock_all(const int family, const int type, const int protocol)
55293 ++{
55294 ++#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
55295 ++ if (grsec_enable_socket_all && in_group_p(grsec_socket_all_gid) &&
55296 ++ (family != AF_UNIX)) {
55297 ++ if (family == AF_INET)
55298 ++ gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), gr_proto_to_name(protocol));
55299 ++ else
55300 ++ gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), protocol);
55301 ++ return -EACCES;
55302 ++ }
55303 ++#endif
55304 ++ return 0;
55305 ++}
55306 ++
55307 ++int
55308 ++gr_handle_sock_server(const struct sockaddr *sck)
55309 ++{
55310 ++#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
55311 ++ if (grsec_enable_socket_server &&
55312 ++ in_group_p(grsec_socket_server_gid) &&
55313 ++ sck && (sck->sa_family != AF_UNIX) &&
55314 ++ (sck->sa_family != AF_LOCAL)) {
55315 ++ gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
55316 ++ return -EACCES;
55317 ++ }
55318 ++#endif
55319 ++ return 0;
55320 ++}
55321 ++
55322 ++int
55323 ++gr_handle_sock_server_other(const struct sock *sck)
55324 ++{
55325 ++#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
55326 ++ if (grsec_enable_socket_server &&
55327 ++ in_group_p(grsec_socket_server_gid) &&
55328 ++ sck && (sck->sk_family != AF_UNIX) &&
55329 ++ (sck->sk_family != AF_LOCAL)) {
55330 ++ gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
55331 ++ return -EACCES;
55332 ++ }
55333 ++#endif
55334 ++ return 0;
55335 ++}
55336 ++
55337 ++int
55338 ++gr_handle_sock_client(const struct sockaddr *sck)
55339 ++{
55340 ++#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
55341 ++ if (grsec_enable_socket_client && in_group_p(grsec_socket_client_gid) &&
55342 ++ sck && (sck->sa_family != AF_UNIX) &&
55343 ++ (sck->sa_family != AF_LOCAL)) {
55344 ++ gr_log_noargs(GR_DONT_AUDIT, GR_CONNECT_MSG);
55345 ++ return -EACCES;
55346 ++ }
55347 ++#endif
55348 ++ return 0;
55349 ++}
55350 +diff -urNp linux-3.1.1/grsecurity/grsec_sysctl.c linux-3.1.1/grsecurity/grsec_sysctl.c
55351 +--- linux-3.1.1/grsecurity/grsec_sysctl.c 1969-12-31 19:00:00.000000000 -0500
55352 ++++ linux-3.1.1/grsecurity/grsec_sysctl.c 2011-11-16 18:40:31.000000000 -0500
55353 +@@ -0,0 +1,433 @@
55354 ++#include <linux/kernel.h>
55355 ++#include <linux/sched.h>
55356 ++#include <linux/sysctl.h>
55357 ++#include <linux/grsecurity.h>
55358 ++#include <linux/grinternal.h>
55359 ++
55360 ++int
55361 ++gr_handle_sysctl_mod(const char *dirname, const char *name, const int op)
55362 ++{
55363 ++#ifdef CONFIG_GRKERNSEC_SYSCTL
55364 ++ if (!strcmp(dirname, "grsecurity") && grsec_lock && (op & MAY_WRITE)) {
55365 ++ gr_log_str(GR_DONT_AUDIT, GR_SYSCTL_MSG, name);
55366 ++ return -EACCES;
55367 ++ }
55368 ++#endif
55369 ++ return 0;
55370 ++}
55371 ++
55372 ++#ifdef CONFIG_GRKERNSEC_ROFS
55373 ++static int __maybe_unused one = 1;
55374 ++#endif
55375 ++
55376 ++#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
55377 ++struct ctl_table grsecurity_table[] = {
55378 ++#ifdef CONFIG_GRKERNSEC_SYSCTL
55379 ++#ifdef CONFIG_GRKERNSEC_SYSCTL_DISTRO
55380 ++#ifdef CONFIG_GRKERNSEC_IO
55381 ++ {
55382 ++ .procname = "disable_priv_io",
55383 ++ .data = &grsec_disable_privio,
55384 ++ .maxlen = sizeof(int),
55385 ++ .mode = 0600,
55386 ++ .proc_handler = &proc_dointvec,
55387 ++ },
55388 ++#endif
55389 ++#endif
55390 ++#ifdef CONFIG_GRKERNSEC_LINK
55391 ++ {
55392 ++ .procname = "linking_restrictions",
55393 ++ .data = &grsec_enable_link,
55394 ++ .maxlen = sizeof(int),
55395 ++ .mode = 0600,
55396 ++ .proc_handler = &proc_dointvec,
55397 ++ },
55398 ++#endif
55399 ++#ifdef CONFIG_GRKERNSEC_BRUTE
55400 ++ {
55401 ++ .procname = "deter_bruteforce",
55402 ++ .data = &grsec_enable_brute,
55403 ++ .maxlen = sizeof(int),
55404 ++ .mode = 0600,
55405 ++ .proc_handler = &proc_dointvec,
55406 ++ },
55407 ++#endif
55408 ++#ifdef CONFIG_GRKERNSEC_FIFO
55409 ++ {
55410 ++ .procname = "fifo_restrictions",
55411 ++ .data = &grsec_enable_fifo,
55412 ++ .maxlen = sizeof(int),
55413 ++ .mode = 0600,
55414 ++ .proc_handler = &proc_dointvec,
55415 ++ },
55416 ++#endif
55417 ++#ifdef CONFIG_GRKERNSEC_BLACKHOLE
55418 ++ {
55419 ++ .procname = "ip_blackhole",
55420 ++ .data = &grsec_enable_blackhole,
55421 ++ .maxlen = sizeof(int),
55422 ++ .mode = 0600,
55423 ++ .proc_handler = &proc_dointvec,
55424 ++ },
55425 ++ {
55426 ++ .procname = "lastack_retries",
55427 ++ .data = &grsec_lastack_retries,
55428 ++ .maxlen = sizeof(int),
55429 ++ .mode = 0600,
55430 ++ .proc_handler = &proc_dointvec,
55431 ++ },
55432 ++#endif
55433 ++#ifdef CONFIG_GRKERNSEC_EXECLOG
55434 ++ {
55435 ++ .procname = "exec_logging",
55436 ++ .data = &grsec_enable_execlog,
55437 ++ .maxlen = sizeof(int),
55438 ++ .mode = 0600,
55439 ++ .proc_handler = &proc_dointvec,
55440 ++ },
55441 ++#endif
55442 ++#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
55443 ++ {
55444 ++ .procname = "rwxmap_logging",
55445 ++ .data = &grsec_enable_log_rwxmaps,
55446 ++ .maxlen = sizeof(int),
55447 ++ .mode = 0600,
55448 ++ .proc_handler = &proc_dointvec,
55449 ++ },
55450 ++#endif
55451 ++#ifdef CONFIG_GRKERNSEC_SIGNAL
55452 ++ {
55453 ++ .procname = "signal_logging",
55454 ++ .data = &grsec_enable_signal,
55455 ++ .maxlen = sizeof(int),
55456 ++ .mode = 0600,
55457 ++ .proc_handler = &proc_dointvec,
55458 ++ },
55459 ++#endif
55460 ++#ifdef CONFIG_GRKERNSEC_FORKFAIL
55461 ++ {
55462 ++ .procname = "forkfail_logging",
55463 ++ .data = &grsec_enable_forkfail,
55464 ++ .maxlen = sizeof(int),
55465 ++ .mode = 0600,
55466 ++ .proc_handler = &proc_dointvec,
55467 ++ },
55468 ++#endif
55469 ++#ifdef CONFIG_GRKERNSEC_TIME
55470 ++ {
55471 ++ .procname = "timechange_logging",
55472 ++ .data = &grsec_enable_time,
55473 ++ .maxlen = sizeof(int),
55474 ++ .mode = 0600,
55475 ++ .proc_handler = &proc_dointvec,
55476 ++ },
55477 ++#endif
55478 ++#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
55479 ++ {
55480 ++ .procname = "chroot_deny_shmat",
55481 ++ .data = &grsec_enable_chroot_shmat,
55482 ++ .maxlen = sizeof(int),
55483 ++ .mode = 0600,
55484 ++ .proc_handler = &proc_dointvec,
55485 ++ },
55486 ++#endif
55487 ++#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
55488 ++ {
55489 ++ .procname = "chroot_deny_unix",
55490 ++ .data = &grsec_enable_chroot_unix,
55491 ++ .maxlen = sizeof(int),
55492 ++ .mode = 0600,
55493 ++ .proc_handler = &proc_dointvec,
55494 ++ },
55495 ++#endif
55496 ++#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
55497 ++ {
55498 ++ .procname = "chroot_deny_mount",
55499 ++ .data = &grsec_enable_chroot_mount,
55500 ++ .maxlen = sizeof(int),
55501 ++ .mode = 0600,
55502 ++ .proc_handler = &proc_dointvec,
55503 ++ },
55504 ++#endif
55505 ++#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
55506 ++ {
55507 ++ .procname = "chroot_deny_fchdir",
55508 ++ .data = &grsec_enable_chroot_fchdir,
55509 ++ .maxlen = sizeof(int),
55510 ++ .mode = 0600,
55511 ++ .proc_handler = &proc_dointvec,
55512 ++ },
55513 ++#endif
55514 ++#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
55515 ++ {
55516 ++ .procname = "chroot_deny_chroot",
55517 ++ .data = &grsec_enable_chroot_double,
55518 ++ .maxlen = sizeof(int),
55519 ++ .mode = 0600,
55520 ++ .proc_handler = &proc_dointvec,
55521 ++ },
55522 ++#endif
55523 ++#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
55524 ++ {
55525 ++ .procname = "chroot_deny_pivot",
55526 ++ .data = &grsec_enable_chroot_pivot,
55527 ++ .maxlen = sizeof(int),
55528 ++ .mode = 0600,
55529 ++ .proc_handler = &proc_dointvec,
55530 ++ },
55531 ++#endif
55532 ++#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
55533 ++ {
55534 ++ .procname = "chroot_enforce_chdir",
55535 ++ .data = &grsec_enable_chroot_chdir,
55536 ++ .maxlen = sizeof(int),
55537 ++ .mode = 0600,
55538 ++ .proc_handler = &proc_dointvec,
55539 ++ },
55540 ++#endif
55541 ++#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
55542 ++ {
55543 ++ .procname = "chroot_deny_chmod",
55544 ++ .data = &grsec_enable_chroot_chmod,
55545 ++ .maxlen = sizeof(int),
55546 ++ .mode = 0600,
55547 ++ .proc_handler = &proc_dointvec,
55548 ++ },
55549 ++#endif
55550 ++#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
55551 ++ {
55552 ++ .procname = "chroot_deny_mknod",
55553 ++ .data = &grsec_enable_chroot_mknod,
55554 ++ .maxlen = sizeof(int),
55555 ++ .mode = 0600,
55556 ++ .proc_handler = &proc_dointvec,
55557 ++ },
55558 ++#endif
55559 ++#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
55560 ++ {
55561 ++ .procname = "chroot_restrict_nice",
55562 ++ .data = &grsec_enable_chroot_nice,
55563 ++ .maxlen = sizeof(int),
55564 ++ .mode = 0600,
55565 ++ .proc_handler = &proc_dointvec,
55566 ++ },
55567 ++#endif
55568 ++#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
55569 ++ {
55570 ++ .procname = "chroot_execlog",
55571 ++ .data = &grsec_enable_chroot_execlog,
55572 ++ .maxlen = sizeof(int),
55573 ++ .mode = 0600,
55574 ++ .proc_handler = &proc_dointvec,
55575 ++ },
55576 ++#endif
55577 ++#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
55578 ++ {
55579 ++ .procname = "chroot_caps",
55580 ++ .data = &grsec_enable_chroot_caps,
55581 ++ .maxlen = sizeof(int),
55582 ++ .mode = 0600,
55583 ++ .proc_handler = &proc_dointvec,
55584 ++ },
55585 ++#endif
55586 ++#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
55587 ++ {
55588 ++ .procname = "chroot_deny_sysctl",
55589 ++ .data = &grsec_enable_chroot_sysctl,
55590 ++ .maxlen = sizeof(int),
55591 ++ .mode = 0600,
55592 ++ .proc_handler = &proc_dointvec,
55593 ++ },
55594 ++#endif
55595 ++#ifdef CONFIG_GRKERNSEC_TPE
55596 ++ {
55597 ++ .procname = "tpe",
55598 ++ .data = &grsec_enable_tpe,
55599 ++ .maxlen = sizeof(int),
55600 ++ .mode = 0600,
55601 ++ .proc_handler = &proc_dointvec,
55602 ++ },
55603 ++ {
55604 ++ .procname = "tpe_gid",
55605 ++ .data = &grsec_tpe_gid,
55606 ++ .maxlen = sizeof(int),
55607 ++ .mode = 0600,
55608 ++ .proc_handler = &proc_dointvec,
55609 ++ },
55610 ++#endif
55611 ++#ifdef CONFIG_GRKERNSEC_TPE_INVERT
55612 ++ {
55613 ++ .procname = "tpe_invert",
55614 ++ .data = &grsec_enable_tpe_invert,
55615 ++ .maxlen = sizeof(int),
55616 ++ .mode = 0600,
55617 ++ .proc_handler = &proc_dointvec,
55618 ++ },
55619 ++#endif
55620 ++#ifdef CONFIG_GRKERNSEC_TPE_ALL
55621 ++ {
55622 ++ .procname = "tpe_restrict_all",
55623 ++ .data = &grsec_enable_tpe_all,
55624 ++ .maxlen = sizeof(int),
55625 ++ .mode = 0600,
55626 ++ .proc_handler = &proc_dointvec,
55627 ++ },
55628 ++#endif
55629 ++#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
55630 ++ {
55631 ++ .procname = "socket_all",
55632 ++ .data = &grsec_enable_socket_all,
55633 ++ .maxlen = sizeof(int),
55634 ++ .mode = 0600,
55635 ++ .proc_handler = &proc_dointvec,
55636 ++ },
55637 ++ {
55638 ++ .procname = "socket_all_gid",
55639 ++ .data = &grsec_socket_all_gid,
55640 ++ .maxlen = sizeof(int),
55641 ++ .mode = 0600,
55642 ++ .proc_handler = &proc_dointvec,
55643 ++ },
55644 ++#endif
55645 ++#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
55646 ++ {
55647 ++ .procname = "socket_client",
55648 ++ .data = &grsec_enable_socket_client,
55649 ++ .maxlen = sizeof(int),
55650 ++ .mode = 0600,
55651 ++ .proc_handler = &proc_dointvec,
55652 ++ },
55653 ++ {
55654 ++ .procname = "socket_client_gid",
55655 ++ .data = &grsec_socket_client_gid,
55656 ++ .maxlen = sizeof(int),
55657 ++ .mode = 0600,
55658 ++ .proc_handler = &proc_dointvec,
55659 ++ },
55660 ++#endif
55661 ++#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
55662 ++ {
55663 ++ .procname = "socket_server",
55664 ++ .data = &grsec_enable_socket_server,
55665 ++ .maxlen = sizeof(int),
55666 ++ .mode = 0600,
55667 ++ .proc_handler = &proc_dointvec,
55668 ++ },
55669 ++ {
55670 ++ .procname = "socket_server_gid",
55671 ++ .data = &grsec_socket_server_gid,
55672 ++ .maxlen = sizeof(int),
55673 ++ .mode = 0600,
55674 ++ .proc_handler = &proc_dointvec,
55675 ++ },
55676 ++#endif
55677 ++#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
55678 ++ {
55679 ++ .procname = "audit_group",
55680 ++ .data = &grsec_enable_group,
55681 ++ .maxlen = sizeof(int),
55682 ++ .mode = 0600,
55683 ++ .proc_handler = &proc_dointvec,
55684 ++ },
55685 ++ {
55686 ++ .procname = "audit_gid",
55687 ++ .data = &grsec_audit_gid,
55688 ++ .maxlen = sizeof(int),
55689 ++ .mode = 0600,
55690 ++ .proc_handler = &proc_dointvec,
55691 ++ },
55692 ++#endif
55693 ++#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
55694 ++ {
55695 ++ .procname = "audit_chdir",
55696 ++ .data = &grsec_enable_chdir,
55697 ++ .maxlen = sizeof(int),
55698 ++ .mode = 0600,
55699 ++ .proc_handler = &proc_dointvec,
55700 ++ },
55701 ++#endif
55702 ++#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
55703 ++ {
55704 ++ .procname = "audit_mount",
55705 ++ .data = &grsec_enable_mount,
55706 ++ .maxlen = sizeof(int),
55707 ++ .mode = 0600,
55708 ++ .proc_handler = &proc_dointvec,
55709 ++ },
55710 ++#endif
55711 ++#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
55712 ++ {
55713 ++ .procname = "audit_textrel",
55714 ++ .data = &grsec_enable_audit_textrel,
55715 ++ .maxlen = sizeof(int),
55716 ++ .mode = 0600,
55717 ++ .proc_handler = &proc_dointvec,
55718 ++ },
55719 ++#endif
55720 ++#ifdef CONFIG_GRKERNSEC_DMESG
55721 ++ {
55722 ++ .procname = "dmesg",
55723 ++ .data = &grsec_enable_dmesg,
55724 ++ .maxlen = sizeof(int),
55725 ++ .mode = 0600,
55726 ++ .proc_handler = &proc_dointvec,
55727 ++ },
55728 ++#endif
55729 ++#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
55730 ++ {
55731 ++ .procname = "chroot_findtask",
55732 ++ .data = &grsec_enable_chroot_findtask,
55733 ++ .maxlen = sizeof(int),
55734 ++ .mode = 0600,
55735 ++ .proc_handler = &proc_dointvec,
55736 ++ },
55737 ++#endif
55738 ++#ifdef CONFIG_GRKERNSEC_RESLOG
55739 ++ {
55740 ++ .procname = "resource_logging",
55741 ++ .data = &grsec_resource_logging,
55742 ++ .maxlen = sizeof(int),
55743 ++ .mode = 0600,
55744 ++ .proc_handler = &proc_dointvec,
55745 ++ },
55746 ++#endif
55747 ++#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
55748 ++ {
55749 ++ .procname = "audit_ptrace",
55750 ++ .data = &grsec_enable_audit_ptrace,
55751 ++ .maxlen = sizeof(int),
55752 ++ .mode = 0600,
55753 ++ .proc_handler = &proc_dointvec,
55754 ++ },
55755 ++#endif
55756 ++#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
55757 ++ {
55758 ++ .procname = "harden_ptrace",
55759 ++ .data = &grsec_enable_harden_ptrace,
55760 ++ .maxlen = sizeof(int),
55761 ++ .mode = 0600,
55762 ++ .proc_handler = &proc_dointvec,
55763 ++ },
55764 ++#endif
55765 ++ {
55766 ++ .procname = "grsec_lock",
55767 ++ .data = &grsec_lock,
55768 ++ .maxlen = sizeof(int),
55769 ++ .mode = 0600,
55770 ++ .proc_handler = &proc_dointvec,
55771 ++ },
55772 ++#endif
55773 ++#ifdef CONFIG_GRKERNSEC_ROFS
55774 ++ {
55775 ++ .procname = "romount_protect",
55776 ++ .data = &grsec_enable_rofs,
55777 ++ .maxlen = sizeof(int),
55778 ++ .mode = 0600,
55779 ++ .proc_handler = &proc_dointvec_minmax,
55780 ++ .extra1 = &one,
55781 ++ .extra2 = &one,
55782 ++ },
55783 ++#endif
55784 ++ { }
55785 ++};
55786 ++#endif
55787 +diff -urNp linux-3.1.1/grsecurity/grsec_time.c linux-3.1.1/grsecurity/grsec_time.c
55788 +--- linux-3.1.1/grsecurity/grsec_time.c 1969-12-31 19:00:00.000000000 -0500
55789 ++++ linux-3.1.1/grsecurity/grsec_time.c 2011-11-16 18:40:31.000000000 -0500
55790 +@@ -0,0 +1,16 @@
55791 ++#include <linux/kernel.h>
55792 ++#include <linux/sched.h>
55793 ++#include <linux/grinternal.h>
55794 ++#include <linux/module.h>
55795 ++
55796 ++void
55797 ++gr_log_timechange(void)
55798 ++{
55799 ++#ifdef CONFIG_GRKERNSEC_TIME
55800 ++ if (grsec_enable_time)
55801 ++ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_TIME_MSG);
55802 ++#endif
55803 ++ return;
55804 ++}
55805 ++
55806 ++EXPORT_SYMBOL(gr_log_timechange);
55807 +diff -urNp linux-3.1.1/grsecurity/grsec_tpe.c linux-3.1.1/grsecurity/grsec_tpe.c
55808 +--- linux-3.1.1/grsecurity/grsec_tpe.c 1969-12-31 19:00:00.000000000 -0500
55809 ++++ linux-3.1.1/grsecurity/grsec_tpe.c 2011-11-16 18:40:31.000000000 -0500
55810 +@@ -0,0 +1,39 @@
55811 ++#include <linux/kernel.h>
55812 ++#include <linux/sched.h>
55813 ++#include <linux/file.h>
55814 ++#include <linux/fs.h>
55815 ++#include <linux/grinternal.h>
55816 ++
55817 ++extern int gr_acl_tpe_check(void);
55818 ++
55819 ++int
55820 ++gr_tpe_allow(const struct file *file)
55821 ++{
55822 ++#ifdef CONFIG_GRKERNSEC
55823 ++ struct inode *inode = file->f_path.dentry->d_parent->d_inode;
55824 ++ const struct cred *cred = current_cred();
55825 ++
55826 ++ if (cred->uid && ((grsec_enable_tpe &&
55827 ++#ifdef CONFIG_GRKERNSEC_TPE_INVERT
55828 ++ ((grsec_enable_tpe_invert && !in_group_p(grsec_tpe_gid)) ||
55829 ++ (!grsec_enable_tpe_invert && in_group_p(grsec_tpe_gid)))
55830 ++#else
55831 ++ in_group_p(grsec_tpe_gid)
55832 ++#endif
55833 ++ ) || gr_acl_tpe_check()) &&
55834 ++ (inode->i_uid || (!inode->i_uid && ((inode->i_mode & S_IWGRP) ||
55835 ++ (inode->i_mode & S_IWOTH))))) {
55836 ++ gr_log_fs_generic(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, file->f_path.dentry, file->f_path.mnt);
55837 ++ return 0;
55838 ++ }
55839 ++#ifdef CONFIG_GRKERNSEC_TPE_ALL
55840 ++ if (cred->uid && grsec_enable_tpe && grsec_enable_tpe_all &&
55841 ++ ((inode->i_uid && (inode->i_uid != cred->uid)) ||
55842 ++ (inode->i_mode & S_IWGRP) || (inode->i_mode & S_IWOTH))) {
55843 ++ gr_log_fs_generic(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, file->f_path.dentry, file->f_path.mnt);
55844 ++ return 0;
55845 ++ }
55846 ++#endif
55847 ++#endif
55848 ++ return 1;
55849 ++}
55850 +diff -urNp linux-3.1.1/grsecurity/grsum.c linux-3.1.1/grsecurity/grsum.c
55851 +--- linux-3.1.1/grsecurity/grsum.c 1969-12-31 19:00:00.000000000 -0500
55852 ++++ linux-3.1.1/grsecurity/grsum.c 2011-11-16 18:40:31.000000000 -0500
55853 +@@ -0,0 +1,61 @@
55854 ++#include <linux/err.h>
55855 ++#include <linux/kernel.h>
55856 ++#include <linux/sched.h>
55857 ++#include <linux/mm.h>
55858 ++#include <linux/scatterlist.h>
55859 ++#include <linux/crypto.h>
55860 ++#include <linux/gracl.h>
55861 ++
55862 ++
55863 ++#if !defined(CONFIG_CRYPTO) || defined(CONFIG_CRYPTO_MODULE) || !defined(CONFIG_CRYPTO_SHA256) || defined(CONFIG_CRYPTO_SHA256_MODULE)
55864 ++#error "crypto and sha256 must be built into the kernel"
55865 ++#endif
55866 ++
55867 ++int
55868 ++chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum)
55869 ++{
55870 ++ char *p;
55871 ++ struct crypto_hash *tfm;
55872 ++ struct hash_desc desc;
55873 ++ struct scatterlist sg;
55874 ++ unsigned char temp_sum[GR_SHA_LEN];
55875 ++ volatile int retval = 0;
55876 ++ volatile int dummy = 0;
55877 ++ unsigned int i;
55878 ++
55879 ++ sg_init_table(&sg, 1);
55880 ++
55881 ++ tfm = crypto_alloc_hash("sha256", 0, CRYPTO_ALG_ASYNC);
55882 ++ if (IS_ERR(tfm)) {
55883 ++ /* should never happen, since sha256 should be built in */
55884 ++ return 1;
55885 ++ }
55886 ++
55887 ++ desc.tfm = tfm;
55888 ++ desc.flags = 0;
55889 ++
55890 ++ crypto_hash_init(&desc);
55891 ++
55892 ++ p = salt;
55893 ++ sg_set_buf(&sg, p, GR_SALT_LEN);
55894 ++ crypto_hash_update(&desc, &sg, sg.length);
55895 ++
55896 ++ p = entry->pw;
55897 ++ sg_set_buf(&sg, p, strlen(p));
55898 ++
55899 ++ crypto_hash_update(&desc, &sg, sg.length);
55900 ++
55901 ++ crypto_hash_final(&desc, temp_sum);
55902 ++
55903 ++ memset(entry->pw, 0, GR_PW_LEN);
55904 ++
55905 ++ for (i = 0; i < GR_SHA_LEN; i++)
55906 ++ if (sum[i] != temp_sum[i])
55907 ++ retval = 1;
55908 ++ else
55909 ++ dummy = 1; // waste a cycle
55910 ++
55911 ++ crypto_free_hash(tfm);
55912 ++
55913 ++ return retval;
55914 ++}
55915 +diff -urNp linux-3.1.1/grsecurity/Kconfig linux-3.1.1/grsecurity/Kconfig
55916 +--- linux-3.1.1/grsecurity/Kconfig 1969-12-31 19:00:00.000000000 -0500
55917 ++++ linux-3.1.1/grsecurity/Kconfig 2011-11-16 18:40:31.000000000 -0500
55918 +@@ -0,0 +1,1037 @@
55919 ++#
55920 ++# grecurity configuration
55921 ++#
55922 ++
55923 ++menu "Grsecurity"
55924 ++
55925 ++config GRKERNSEC
55926 ++ bool "Grsecurity"
55927 ++ select CRYPTO
55928 ++ select CRYPTO_SHA256
55929 ++ help
55930 ++ If you say Y here, you will be able to configure many features
55931 ++ that will enhance the security of your system. It is highly
55932 ++ recommended that you say Y here and read through the help
55933 ++ for each option so that you fully understand the features and
55934 ++ can evaluate their usefulness for your machine.
55935 ++
55936 ++choice
55937 ++ prompt "Security Level"
55938 ++ depends on GRKERNSEC
55939 ++ default GRKERNSEC_CUSTOM
55940 ++
55941 ++config GRKERNSEC_LOW
55942 ++ bool "Low"
55943 ++ select GRKERNSEC_LINK
55944 ++ select GRKERNSEC_FIFO
55945 ++ select GRKERNSEC_RANDNET
55946 ++ select GRKERNSEC_DMESG
55947 ++ select GRKERNSEC_CHROOT
55948 ++ select GRKERNSEC_CHROOT_CHDIR
55949 ++
55950 ++ help
55951 ++ If you choose this option, several of the grsecurity options will
55952 ++ be enabled that will give you greater protection against a number
55953 ++ of attacks, while assuring that none of your software will have any
55954 ++ conflicts with the additional security measures. If you run a lot
55955 ++ of unusual software, or you are having problems with the higher
55956 ++ security levels, you should say Y here. With this option, the
55957 ++ following features are enabled:
55958 ++
55959 ++ - Linking restrictions
55960 ++ - FIFO restrictions
55961 ++ - Restricted dmesg
55962 ++ - Enforced chdir("/") on chroot
55963 ++ - Runtime module disabling
55964 ++
55965 ++config GRKERNSEC_MEDIUM
55966 ++ bool "Medium"
55967 ++ select PAX
55968 ++ select PAX_EI_PAX
55969 ++ select PAX_PT_PAX_FLAGS
55970 ++ select PAX_HAVE_ACL_FLAGS
55971 ++ select GRKERNSEC_PROC_MEMMAP if (PAX_NOEXEC || PAX_ASLR)
55972 ++ select GRKERNSEC_CHROOT
55973 ++ select GRKERNSEC_CHROOT_SYSCTL
55974 ++ select GRKERNSEC_LINK
55975 ++ select GRKERNSEC_FIFO
55976 ++ select GRKERNSEC_DMESG
55977 ++ select GRKERNSEC_RANDNET
55978 ++ select GRKERNSEC_FORKFAIL
55979 ++ select GRKERNSEC_TIME
55980 ++ select GRKERNSEC_SIGNAL
55981 ++ select GRKERNSEC_CHROOT
55982 ++ select GRKERNSEC_CHROOT_UNIX
55983 ++ select GRKERNSEC_CHROOT_MOUNT
55984 ++ select GRKERNSEC_CHROOT_PIVOT
55985 ++ select GRKERNSEC_CHROOT_DOUBLE
55986 ++ select GRKERNSEC_CHROOT_CHDIR
55987 ++ select GRKERNSEC_CHROOT_MKNOD
55988 ++ select GRKERNSEC_PROC
55989 ++ select GRKERNSEC_PROC_USERGROUP
55990 ++ select PAX_RANDUSTACK
55991 ++ select PAX_ASLR
55992 ++ select PAX_RANDMMAP
55993 ++ select PAX_REFCOUNT if (X86 || SPARC64)
55994 ++ select PAX_USERCOPY if ((X86 || SPARC || PPC || ARM) && (SLAB || SLUB || SLOB))
55995 ++
55996 ++ help
55997 ++ If you say Y here, several features in addition to those included
55998 ++ in the low additional security level will be enabled. These
55999 ++ features provide even more security to your system, though in rare
56000 ++ cases they may be incompatible with very old or poorly written
56001 ++ software. If you enable this option, make sure that your auth
56002 ++ service (identd) is running as gid 1001. With this option,
56003 ++ the following features (in addition to those provided in the
56004 ++ low additional security level) will be enabled:
56005 ++
56006 ++ - Failed fork logging
56007 ++ - Time change logging
56008 ++ - Signal logging
56009 ++ - Deny mounts in chroot
56010 ++ - Deny double chrooting
56011 ++ - Deny sysctl writes in chroot
56012 ++ - Deny mknod in chroot
56013 ++ - Deny access to abstract AF_UNIX sockets out of chroot
56014 ++ - Deny pivot_root in chroot
56015 ++ - Denied reads/writes of /dev/kmem, /dev/mem, and /dev/port
56016 ++ - /proc restrictions with special GID set to 10 (usually wheel)
56017 ++ - Address Space Layout Randomization (ASLR)
56018 ++ - Prevent exploitation of most refcount overflows
56019 ++ - Bounds checking of copying between the kernel and userland
56020 ++
56021 ++config GRKERNSEC_HIGH
56022 ++ bool "High"
56023 ++ select GRKERNSEC_LINK
56024 ++ select GRKERNSEC_FIFO
56025 ++ select GRKERNSEC_DMESG
56026 ++ select GRKERNSEC_FORKFAIL
56027 ++ select GRKERNSEC_TIME
56028 ++ select GRKERNSEC_SIGNAL
56029 ++ select GRKERNSEC_CHROOT
56030 ++ select GRKERNSEC_CHROOT_SHMAT
56031 ++ select GRKERNSEC_CHROOT_UNIX
56032 ++ select GRKERNSEC_CHROOT_MOUNT
56033 ++ select GRKERNSEC_CHROOT_FCHDIR
56034 ++ select GRKERNSEC_CHROOT_PIVOT
56035 ++ select GRKERNSEC_CHROOT_DOUBLE
56036 ++ select GRKERNSEC_CHROOT_CHDIR
56037 ++ select GRKERNSEC_CHROOT_MKNOD
56038 ++ select GRKERNSEC_CHROOT_CAPS
56039 ++ select GRKERNSEC_CHROOT_SYSCTL
56040 ++ select GRKERNSEC_CHROOT_FINDTASK
56041 ++ select GRKERNSEC_SYSFS_RESTRICT
56042 ++ select GRKERNSEC_PROC
56043 ++ select GRKERNSEC_PROC_MEMMAP if (PAX_NOEXEC || PAX_ASLR)
56044 ++ select GRKERNSEC_HIDESYM
56045 ++ select GRKERNSEC_BRUTE
56046 ++ select GRKERNSEC_PROC_USERGROUP
56047 ++ select GRKERNSEC_KMEM
56048 ++ select GRKERNSEC_RESLOG
56049 ++ select GRKERNSEC_RANDNET
56050 ++ select GRKERNSEC_PROC_ADD
56051 ++ select GRKERNSEC_CHROOT_CHMOD
56052 ++ select GRKERNSEC_CHROOT_NICE
56053 ++ select GRKERNSEC_AUDIT_MOUNT
56054 ++ select GRKERNSEC_MODHARDEN if (MODULES)
56055 ++ select GRKERNSEC_HARDEN_PTRACE
56056 ++ select GRKERNSEC_VM86 if (X86_32)
56057 ++ select GRKERNSEC_KERN_LOCKOUT if (X86 || ARM || PPC || SPARC)
56058 ++ select PAX
56059 ++ select PAX_RANDUSTACK
56060 ++ select PAX_ASLR
56061 ++ select PAX_RANDMMAP
56062 ++ select PAX_NOEXEC
56063 ++ select PAX_MPROTECT
56064 ++ select PAX_EI_PAX
56065 ++ select PAX_PT_PAX_FLAGS
56066 ++ select PAX_HAVE_ACL_FLAGS
56067 ++ select PAX_KERNEXEC if ((PPC || X86) && (!X86_32 || X86_WP_WORKS_OK) && !XEN)
56068 ++ select PAX_MEMORY_UDEREF if (X86 && !XEN)
56069 ++ select PAX_RANDKSTACK if (X86_TSC && X86)
56070 ++ select PAX_SEGMEXEC if (X86_32)
56071 ++ select PAX_PAGEEXEC
56072 ++ select PAX_EMUPLT if (ALPHA || PARISC || SPARC)
56073 ++ select PAX_EMUTRAMP if (PARISC)
56074 ++ select PAX_EMUSIGRT if (PARISC)
56075 ++ select PAX_ETEXECRELOCS if (ALPHA || IA64 || PARISC)
56076 ++ select PAX_ELFRELOCS if (PAX_ETEXECRELOCS || (IA64 || PPC || X86))
56077 ++ select PAX_REFCOUNT if (X86 || SPARC64)
56078 ++ select PAX_USERCOPY if ((X86 || PPC || SPARC || ARM) && (SLAB || SLUB || SLOB))
56079 ++ help
56080 ++ If you say Y here, many of the features of grsecurity will be
56081 ++ enabled, which will protect you against many kinds of attacks
56082 ++ against your system. The heightened security comes at a cost
56083 ++ of an increased chance of incompatibilities with rare software
56084 ++ on your machine. Since this security level enables PaX, you should
56085 ++ view <http://pax.grsecurity.net> and read about the PaX
56086 ++ project. While you are there, download chpax and run it on
56087 ++ binaries that cause problems with PaX. Also remember that
56088 ++ since the /proc restrictions are enabled, you must run your
56089 ++ identd as gid 1001. This security level enables the following
56090 ++ features in addition to those listed in the low and medium
56091 ++ security levels:
56092 ++
56093 ++ - Additional /proc restrictions
56094 ++ - Chmod restrictions in chroot
56095 ++ - No signals, ptrace, or viewing of processes outside of chroot
56096 ++ - Capability restrictions in chroot
56097 ++ - Deny fchdir out of chroot
56098 ++ - Priority restrictions in chroot
56099 ++ - Segmentation-based implementation of PaX
56100 ++ - Mprotect restrictions
56101 ++ - Removal of addresses from /proc/<pid>/[smaps|maps|stat]
56102 ++ - Kernel stack randomization
56103 ++ - Mount/unmount/remount logging
56104 ++ - Kernel symbol hiding
56105 ++ - Hardening of module auto-loading
56106 ++ - Ptrace restrictions
56107 ++ - Restricted vm86 mode
56108 ++ - Restricted sysfs/debugfs
56109 ++ - Active kernel exploit response
56110 ++
56111 ++config GRKERNSEC_CUSTOM
56112 ++ bool "Custom"
56113 ++ help
56114 ++ If you say Y here, you will be able to configure every grsecurity
56115 ++ option, which allows you to enable many more features that aren't
56116 ++ covered in the basic security levels. These additional features
56117 ++ include TPE, socket restrictions, and the sysctl system for
56118 ++ grsecurity. It is advised that you read through the help for
56119 ++ each option to determine its usefulness in your situation.
56120 ++
56121 ++endchoice
56122 ++
56123 ++menu "Address Space Protection"
56124 ++depends on GRKERNSEC
56125 ++
56126 ++config GRKERNSEC_KMEM
56127 ++ bool "Deny reading/writing to /dev/kmem, /dev/mem, and /dev/port"
56128 ++ select STRICT_DEVMEM if (X86 || ARM || TILE || S390)
56129 ++ help
56130 ++ If you say Y here, /dev/kmem and /dev/mem won't be allowed to
56131 ++ be written to or read from to modify or leak the contents of the running
56132 ++ kernel. /dev/port will also not be allowed to be opened. If you have module
56133 ++ support disabled, enabling this will close up four ways that are
56134 ++ currently used to insert malicious code into the running kernel.
56135 ++ Even with all these features enabled, we still highly recommend that
56136 ++ you use the RBAC system, as it is still possible for an attacker to
56137 ++ modify the running kernel through privileged I/O granted by ioperm/iopl.
56138 ++ If you are not using XFree86, you may be able to stop this additional
56139 ++ case by enabling the 'Disable privileged I/O' option. Though nothing
56140 ++ legitimately writes to /dev/kmem, XFree86 does need to write to /dev/mem,
56141 ++ but only to video memory, which is the only writing we allow in this
56142 ++ case. If /dev/kmem or /dev/mem are mmaped without PROT_WRITE, they will
56143 ++ not be allowed to mprotect it with PROT_WRITE later.
56144 ++ It is highly recommended that you say Y here if you meet all the
56145 ++ conditions above.
56146 ++
56147 ++config GRKERNSEC_VM86
56148 ++ bool "Restrict VM86 mode"
56149 ++ depends on X86_32
56150 ++
56151 ++ help
56152 ++ If you say Y here, only processes with CAP_SYS_RAWIO will be able to
56153 ++ make use of a special execution mode on 32bit x86 processors called
56154 ++ Virtual 8086 (VM86) mode. XFree86 may need vm86 mode for certain
56155 ++ video cards and will still work with this option enabled. The purpose
56156 ++ of the option is to prevent exploitation of emulation errors in
56157 ++ virtualization of vm86 mode like the one discovered in VMWare in 2009.
56158 ++ Nearly all users should be able to enable this option.
56159 ++
56160 ++config GRKERNSEC_IO
56161 ++ bool "Disable privileged I/O"
56162 ++ depends on X86
56163 ++ select RTC_CLASS
56164 ++ select RTC_INTF_DEV
56165 ++ select RTC_DRV_CMOS
56166 ++
56167 ++ help
56168 ++ If you say Y here, all ioperm and iopl calls will return an error.
56169 ++ Ioperm and iopl can be used to modify the running kernel.
56170 ++ Unfortunately, some programs need this access to operate properly,
56171 ++ the most notable of which are XFree86 and hwclock. hwclock can be
56172 ++ remedied by having RTC support in the kernel, so real-time
56173 ++ clock support is enabled if this option is enabled, to ensure
56174 ++ that hwclock operates correctly. XFree86 still will not
56175 ++ operate correctly with this option enabled, so DO NOT CHOOSE Y
56176 ++ IF YOU USE XFree86. If you use XFree86 and you still want to
56177 ++ protect your kernel against modification, use the RBAC system.
56178 ++
56179 ++config GRKERNSEC_PROC_MEMMAP
56180 ++ bool "Remove addresses from /proc/<pid>/[smaps|maps|stat]"
56181 ++ default y if (PAX_NOEXEC || PAX_ASLR)
56182 ++ depends on PAX_NOEXEC || PAX_ASLR
56183 ++ help
56184 ++ If you say Y here, the /proc/<pid>/maps and /proc/<pid>/stat files will
56185 ++ give no information about the addresses of its mappings if
56186 ++ PaX features that rely on random addresses are enabled on the task.
56187 ++ If you use PaX it is greatly recommended that you say Y here as it
56188 ++ closes up a hole that makes the full ASLR useless for suid
56189 ++ binaries.
56190 ++
56191 ++config GRKERNSEC_BRUTE
56192 ++ bool "Deter exploit bruteforcing"
56193 ++ help
56194 ++ If you say Y here, attempts to bruteforce exploits against forking
56195 ++ daemons such as apache or sshd, as well as against suid/sgid binaries
56196 ++ will be deterred. When a child of a forking daemon is killed by PaX
56197 ++ or crashes due to an illegal instruction or other suspicious signal,
56198 ++ the parent process will be delayed 30 seconds upon every subsequent
56199 ++ fork until the administrator is able to assess the situation and
56200 ++ restart the daemon.
56201 ++ In the suid/sgid case, the attempt is logged, the user has all their
56202 ++ processes terminated, and they are prevented from executing any further
56203 ++ processes for 15 minutes.
56204 ++ It is recommended that you also enable signal logging in the auditing
56205 ++ section so that logs are generated when a process triggers a suspicious
56206 ++ signal.
56207 ++ If the sysctl option is enabled, a sysctl option with name
56208 ++ "deter_bruteforce" is created.
56209 ++
56210 ++
56211 ++config GRKERNSEC_MODHARDEN
56212 ++ bool "Harden module auto-loading"
56213 ++ depends on MODULES
56214 ++ help
56215 ++ If you say Y here, module auto-loading in response to use of some
56216 ++ feature implemented by an unloaded module will be restricted to
56217 ++ root users. Enabling this option helps defend against attacks
56218 ++ by unprivileged users who abuse the auto-loading behavior to
56219 ++ cause a vulnerable module to load that is then exploited.
56220 ++
56221 ++ If this option prevents a legitimate use of auto-loading for a
56222 ++ non-root user, the administrator can execute modprobe manually
56223 ++ with the exact name of the module mentioned in the alert log.
56224 ++ Alternatively, the administrator can add the module to the list
56225 ++ of modules loaded at boot by modifying init scripts.
56226 ++
56227 ++ Modification of init scripts will most likely be needed on
56228 ++ Ubuntu servers with encrypted home directory support enabled,
56229 ++ as the first non-root user logging in will cause the ecb(aes),
56230 ++ ecb(aes)-all, cbc(aes), and cbc(aes)-all modules to be loaded.
56231 ++
56232 ++config GRKERNSEC_HIDESYM
56233 ++ bool "Hide kernel symbols"
56234 ++ help
56235 ++ If you say Y here, getting information on loaded modules, and
56236 ++ displaying all kernel symbols through a syscall will be restricted
56237 ++ to users with CAP_SYS_MODULE. For software compatibility reasons,
56238 ++ /proc/kallsyms will be restricted to the root user. The RBAC
56239 ++ system can hide that entry even from root.
56240 ++
56241 ++ This option also prevents leaking of kernel addresses through
56242 ++ several /proc entries.
56243 ++
56244 ++ Note that this option is only effective provided the following
56245 ++ conditions are met:
56246 ++ 1) The kernel using grsecurity is not precompiled by some distribution
56247 ++ 2) You have also enabled GRKERNSEC_DMESG
56248 ++ 3) You are using the RBAC system and hiding other files such as your
56249 ++ kernel image and System.map. Alternatively, enabling this option
56250 ++ causes the permissions on /boot, /lib/modules, and the kernel
56251 ++ source directory to change at compile time to prevent
56252 ++ reading by non-root users.
56253 ++ If the above conditions are met, this option will aid in providing a
56254 ++ useful protection against local kernel exploitation of overflows
56255 ++ and arbitrary read/write vulnerabilities.
56256 ++
56257 ++config GRKERNSEC_KERN_LOCKOUT
56258 ++ bool "Active kernel exploit response"
56259 ++ depends on X86 || ARM || PPC || SPARC
56260 ++ help
56261 ++ If you say Y here, when a PaX alert is triggered due to suspicious
56262 ++ activity in the kernel (from KERNEXEC/UDEREF/USERCOPY)
56263 ++ or an OOPs occurs due to bad memory accesses, instead of just
56264 ++ terminating the offending process (and potentially allowing
56265 ++ a subsequent exploit from the same user), we will take one of two
56266 ++ actions:
56267 ++ If the user was root, we will panic the system
56268 ++ If the user was non-root, we will log the attempt, terminate
56269 ++ all processes owned by the user, then prevent them from creating
56270 ++ any new processes until the system is restarted
56271 ++ This deters repeated kernel exploitation/bruteforcing attempts
56272 ++ and is useful for later forensics.
56273 ++
56274 ++endmenu
56275 ++menu "Role Based Access Control Options"
56276 ++depends on GRKERNSEC
56277 ++
56278 ++config GRKERNSEC_RBAC_DEBUG
56279 ++ bool
56280 ++
56281 ++config GRKERNSEC_NO_RBAC
56282 ++ bool "Disable RBAC system"
56283 ++ help
56284 ++ If you say Y here, the /dev/grsec device will be removed from the kernel,
56285 ++ preventing the RBAC system from being enabled. You should only say Y
56286 ++ here if you have no intention of using the RBAC system, so as to prevent
56287 ++ an attacker with root access from misusing the RBAC system to hide files
56288 ++ and processes when loadable module support and /dev/[k]mem have been
56289 ++ locked down.
56290 ++
56291 ++config GRKERNSEC_ACL_HIDEKERN
56292 ++ bool "Hide kernel processes"
56293 ++ help
56294 ++ If you say Y here, all kernel threads will be hidden to all
56295 ++ processes but those whose subject has the "view hidden processes"
56296 ++ flag.
56297 ++
56298 ++config GRKERNSEC_ACL_MAXTRIES
56299 ++ int "Maximum tries before password lockout"
56300 ++ default 3
56301 ++ help
56302 ++ This option enforces the maximum number of times a user can attempt
56303 ++ to authorize themselves with the grsecurity RBAC system before being
56304 ++ denied the ability to attempt authorization again for a specified time.
56305 ++ The lower the number, the harder it will be to brute-force a password.
56306 ++
56307 ++config GRKERNSEC_ACL_TIMEOUT
56308 ++ int "Time to wait after max password tries, in seconds"
56309 ++ default 30
56310 ++ help
56311 ++ This option specifies the time the user must wait after attempting to
56312 ++ authorize to the RBAC system with the maximum number of invalid
56313 ++ passwords. The higher the number, the harder it will be to brute-force
56314 ++ a password.
56315 ++
56316 ++endmenu
56317 ++menu "Filesystem Protections"
56318 ++depends on GRKERNSEC
56319 ++
56320 ++config GRKERNSEC_PROC
56321 ++ bool "Proc restrictions"
56322 ++ help
56323 ++ If you say Y here, the permissions of the /proc filesystem
56324 ++ will be altered to enhance system security and privacy. You MUST
56325 ++ choose either a user only restriction or a user and group restriction.
56326 ++ Depending upon the option you choose, you can either restrict users to
56327 ++ see only the processes they themselves run, or choose a group that can
56328 ++ view all processes and files normally restricted to root if you choose
56329 ++ the "restrict to user only" option. NOTE: If you're running identd as
56330 ++ a non-root user, you will have to run it as the group you specify here.
56331 ++
56332 ++config GRKERNSEC_PROC_USER
56333 ++ bool "Restrict /proc to user only"
56334 ++ depends on GRKERNSEC_PROC
56335 ++ help
56336 ++ If you say Y here, non-root users will only be able to view their own
56337 ++ processes, and restricts them from viewing network-related information,
56338 ++ and viewing kernel symbol and module information.
56339 ++
56340 ++config GRKERNSEC_PROC_USERGROUP
56341 ++ bool "Allow special group"
56342 ++ depends on GRKERNSEC_PROC && !GRKERNSEC_PROC_USER
56343 ++ help
56344 ++ If you say Y here, you will be able to select a group that will be
56345 ++ able to view all processes and network-related information. If you've
56346 ++ enabled GRKERNSEC_HIDESYM, kernel and symbol information may still
56347 ++ remain hidden. This option is useful if you want to run identd as
56348 ++ a non-root user.
56349 ++
56350 ++config GRKERNSEC_PROC_GID
56351 ++ int "GID for special group"
56352 ++ depends on GRKERNSEC_PROC_USERGROUP
56353 ++ default 1001
56354 ++
56355 ++config GRKERNSEC_PROC_ADD
56356 ++ bool "Additional restrictions"
56357 ++ depends on GRKERNSEC_PROC_USER || GRKERNSEC_PROC_USERGROUP
56358 ++ help
56359 ++ If you say Y here, additional restrictions will be placed on
56360 ++ /proc that keep normal users from viewing device information and
56361 ++ slabinfo information that could be useful for exploits.
56362 ++
56363 ++config GRKERNSEC_LINK
56364 ++ bool "Linking restrictions"
56365 ++ help
56366 ++ If you say Y here, /tmp race exploits will be prevented, since users
56367 ++ will no longer be able to follow symlinks owned by other users in
56368 ++ world-writable +t directories (e.g. /tmp), unless the owner of the
56369 ++ symlink is the owner of the directory. users will also not be
56370 ++ able to hardlink to files they do not own. If the sysctl option is
56371 ++ enabled, a sysctl option with name "linking_restrictions" is created.
56372 ++
56373 ++config GRKERNSEC_FIFO
56374 ++ bool "FIFO restrictions"
56375 ++ help
56376 ++ If you say Y here, users will not be able to write to FIFOs they don't
56377 ++ own in world-writable +t directories (e.g. /tmp), unless the owner of
56378 ++ the FIFO is the same owner of the directory it's held in. If the sysctl
56379 ++ option is enabled, a sysctl option with name "fifo_restrictions" is
56380 ++ created.
56381 ++
56382 ++config GRKERNSEC_SYSFS_RESTRICT
56383 ++ bool "Sysfs/debugfs restriction"
56384 ++ depends on SYSFS
56385 ++ help
56386 ++ If you say Y here, sysfs (the pseudo-filesystem mounted at /sys) and
56387 ++ any filesystem normally mounted under it (e.g. debugfs) will only
56388 ++ be accessible by root. These filesystems generally provide access
56389 ++ to hardware and debug information that isn't appropriate for unprivileged
56390 ++ users of the system. Sysfs and debugfs have also become a large source
56391 ++ of new vulnerabilities, ranging from infoleaks to local compromise.
56392 ++ There has been very little oversight with an eye toward security involved
56393 ++ in adding new exporters of information to these filesystems, so their
56394 ++ use is discouraged.
56395 ++ This option is equivalent to a chmod 0700 of the mount paths.
56396 ++
56397 ++config GRKERNSEC_ROFS
56398 ++ bool "Runtime read-only mount protection"
56399 ++ help
56400 ++ If you say Y here, a sysctl option with name "romount_protect" will
56401 ++ be created. By setting this option to 1 at runtime, filesystems
56402 ++ will be protected in the following ways:
56403 ++ * No new writable mounts will be allowed
56404 ++ * Existing read-only mounts won't be able to be remounted read/write
56405 ++ * Write operations will be denied on all block devices
56406 ++ This option acts independently of grsec_lock: once it is set to 1,
56407 ++ it cannot be turned off. Therefore, please be mindful of the resulting
56408 ++ behavior if this option is enabled in an init script on a read-only
56409 ++ filesystem. This feature is mainly intended for secure embedded systems.
56410 ++
56411 ++config GRKERNSEC_CHROOT
56412 ++ bool "Chroot jail restrictions"
56413 ++ help
56414 ++ If you say Y here, you will be able to choose several options that will
56415 ++ make breaking out of a chrooted jail much more difficult. If you
56416 ++ encounter no software incompatibilities with the following options, it
56417 ++ is recommended that you enable each one.
56418 ++
56419 ++config GRKERNSEC_CHROOT_MOUNT
56420 ++ bool "Deny mounts"
56421 ++ depends on GRKERNSEC_CHROOT
56422 ++ help
56423 ++ If you say Y here, processes inside a chroot will not be able to
56424 ++ mount or remount filesystems. If the sysctl option is enabled, a
56425 ++ sysctl option with name "chroot_deny_mount" is created.
56426 ++
56427 ++config GRKERNSEC_CHROOT_DOUBLE
56428 ++ bool "Deny double-chroots"
56429 ++ depends on GRKERNSEC_CHROOT
56430 ++ help
56431 ++ If you say Y here, processes inside a chroot will not be able to chroot
56432 ++ again outside the chroot. This is a widely used method of breaking
56433 ++ out of a chroot jail and should not be allowed. If the sysctl
56434 ++ option is enabled, a sysctl option with name
56435 ++ "chroot_deny_chroot" is created.
56436 ++
56437 ++config GRKERNSEC_CHROOT_PIVOT
56438 ++ bool "Deny pivot_root in chroot"
56439 ++ depends on GRKERNSEC_CHROOT
56440 ++ help
56441 ++ If you say Y here, processes inside a chroot will not be able to use
56442 ++ a function called pivot_root() that was introduced in Linux 2.3.41. It
56443 ++ works similar to chroot in that it changes the root filesystem. This
56444 ++ function could be misused in a chrooted process to attempt to break out
56445 ++ of the chroot, and therefore should not be allowed. If the sysctl
56446 ++ option is enabled, a sysctl option with name "chroot_deny_pivot" is
56447 ++ created.
56448 ++
56449 ++config GRKERNSEC_CHROOT_CHDIR
56450 ++ bool "Enforce chdir(\"/\") on all chroots"
56451 ++ depends on GRKERNSEC_CHROOT
56452 ++ help
56453 ++ If you say Y here, the current working directory of all newly-chrooted
56454 ++ applications will be set to the the root directory of the chroot.
56455 ++ The man page on chroot(2) states:
56456 ++ Note that this call does not change the current working
56457 ++ directory, so that `.' can be outside the tree rooted at
56458 ++ `/'. In particular, the super-user can escape from a
56459 ++ `chroot jail' by doing `mkdir foo; chroot foo; cd ..'.
56460 ++
56461 ++ It is recommended that you say Y here, since it's not known to break
56462 ++ any software. If the sysctl option is enabled, a sysctl option with
56463 ++ name "chroot_enforce_chdir" is created.
56464 ++
56465 ++config GRKERNSEC_CHROOT_CHMOD
56466 ++ bool "Deny (f)chmod +s"
56467 ++ depends on GRKERNSEC_CHROOT
56468 ++ help
56469 ++ If you say Y here, processes inside a chroot will not be able to chmod
56470 ++ or fchmod files to make them have suid or sgid bits. This protects
56471 ++ against another published method of breaking a chroot. If the sysctl
56472 ++ option is enabled, a sysctl option with name "chroot_deny_chmod" is
56473 ++ created.
56474 ++
56475 ++config GRKERNSEC_CHROOT_FCHDIR
56476 ++ bool "Deny fchdir out of chroot"
56477 ++ depends on GRKERNSEC_CHROOT
56478 ++ help
56479 ++ If you say Y here, a well-known method of breaking chroots by fchdir'ing
56480 ++ to a file descriptor of the chrooting process that points to a directory
56481 ++ outside the filesystem will be stopped. If the sysctl option
56482 ++ is enabled, a sysctl option with name "chroot_deny_fchdir" is created.
56483 ++
56484 ++config GRKERNSEC_CHROOT_MKNOD
56485 ++ bool "Deny mknod"
56486 ++ depends on GRKERNSEC_CHROOT
56487 ++ help
56488 ++ If you say Y here, processes inside a chroot will not be allowed to
56489 ++ mknod. The problem with using mknod inside a chroot is that it
56490 ++ would allow an attacker to create a device entry that is the same
56491 ++ as one on the physical root of your system, which could range from
56492 ++ anything from the console device to a device for your harddrive (which
56493 ++ they could then use to wipe the drive or steal data). It is recommended
56494 ++ that you say Y here, unless you run into software incompatibilities.
56495 ++ If the sysctl option is enabled, a sysctl option with name
56496 ++ "chroot_deny_mknod" is created.
56497 ++
56498 ++config GRKERNSEC_CHROOT_SHMAT
56499 ++ bool "Deny shmat() out of chroot"
56500 ++ depends on GRKERNSEC_CHROOT
56501 ++ help
56502 ++ If you say Y here, processes inside a chroot will not be able to attach
56503 ++ to shared memory segments that were created outside of the chroot jail.
56504 ++ It is recommended that you say Y here. If the sysctl option is enabled,
56505 ++ a sysctl option with name "chroot_deny_shmat" is created.
56506 ++
56507 ++config GRKERNSEC_CHROOT_UNIX
56508 ++ bool "Deny access to abstract AF_UNIX sockets out of chroot"
56509 ++ depends on GRKERNSEC_CHROOT
56510 ++ help
56511 ++ If you say Y here, processes inside a chroot will not be able to
56512 ++ connect to abstract (meaning not belonging to a filesystem) Unix
56513 ++ domain sockets that were bound outside of a chroot. It is recommended
56514 ++ that you say Y here. If the sysctl option is enabled, a sysctl option
56515 ++ with name "chroot_deny_unix" is created.
56516 ++
56517 ++config GRKERNSEC_CHROOT_FINDTASK
56518 ++ bool "Protect outside processes"
56519 ++ depends on GRKERNSEC_CHROOT
56520 ++ help
56521 ++ If you say Y here, processes inside a chroot will not be able to
56522 ++ kill, send signals with fcntl, ptrace, capget, getpgid, setpgid,
56523 ++ getsid, or view any process outside of the chroot. If the sysctl
56524 ++ option is enabled, a sysctl option with name "chroot_findtask" is
56525 ++ created.
56526 ++
56527 ++config GRKERNSEC_CHROOT_NICE
56528 ++ bool "Restrict priority changes"
56529 ++ depends on GRKERNSEC_CHROOT
56530 ++ help
56531 ++ If you say Y here, processes inside a chroot will not be able to raise
56532 ++ the priority of processes in the chroot, or alter the priority of
56533 ++ processes outside the chroot. This provides more security than simply
56534 ++ removing CAP_SYS_NICE from the process' capability set. If the
56535 ++ sysctl option is enabled, a sysctl option with name "chroot_restrict_nice"
56536 ++ is created.
56537 ++
56538 ++config GRKERNSEC_CHROOT_SYSCTL
56539 ++ bool "Deny sysctl writes"
56540 ++ depends on GRKERNSEC_CHROOT
56541 ++ help
56542 ++ If you say Y here, an attacker in a chroot will not be able to
56543 ++ write to sysctl entries, either by sysctl(2) or through a /proc
56544 ++ interface. It is strongly recommended that you say Y here. If the
56545 ++ sysctl option is enabled, a sysctl option with name
56546 ++ "chroot_deny_sysctl" is created.
56547 ++
56548 ++config GRKERNSEC_CHROOT_CAPS
56549 ++ bool "Capability restrictions"
56550 ++ depends on GRKERNSEC_CHROOT
56551 ++ help
56552 ++ If you say Y here, the capabilities on all processes within a
56553 ++ chroot jail will be lowered to stop module insertion, raw i/o,
56554 ++ system and net admin tasks, rebooting the system, modifying immutable
56555 ++ files, modifying IPC owned by another, and changing the system time.
56556 ++ This is left an option because it can break some apps. Disable this
56557 ++ if your chrooted apps are having problems performing those kinds of
56558 ++ tasks. If the sysctl option is enabled, a sysctl option with
56559 ++ name "chroot_caps" is created.
56560 ++
56561 ++endmenu
56562 ++menu "Kernel Auditing"
56563 ++depends on GRKERNSEC
56564 ++
56565 ++config GRKERNSEC_AUDIT_GROUP
56566 ++ bool "Single group for auditing"
56567 ++ help
56568 ++ If you say Y here, the exec, chdir, and (un)mount logging features
56569 ++ will only operate on a group you specify. This option is recommended
56570 ++ if you only want to watch certain users instead of having a large
56571 ++ amount of logs from the entire system. If the sysctl option is enabled,
56572 ++ a sysctl option with name "audit_group" is created.
56573 ++
56574 ++config GRKERNSEC_AUDIT_GID
56575 ++ int "GID for auditing"
56576 ++ depends on GRKERNSEC_AUDIT_GROUP
56577 ++ default 1007
56578 ++
56579 ++config GRKERNSEC_EXECLOG
56580 ++ bool "Exec logging"
56581 ++ help
56582 ++ If you say Y here, all execve() calls will be logged (since the
56583 ++ other exec*() calls are frontends to execve(), all execution
56584 ++ will be logged). Useful for shell-servers that like to keep track
56585 ++ of their users. If the sysctl option is enabled, a sysctl option with
56586 ++ name "exec_logging" is created.
56587 ++ WARNING: This option when enabled will produce a LOT of logs, especially
56588 ++ on an active system.
56589 ++
56590 ++config GRKERNSEC_RESLOG
56591 ++ bool "Resource logging"
56592 ++ help
56593 ++ If you say Y here, all attempts to overstep resource limits will
56594 ++ be logged with the resource name, the requested size, and the current
56595 ++ limit. It is highly recommended that you say Y here. If the sysctl
56596 ++ option is enabled, a sysctl option with name "resource_logging" is
56597 ++ created. If the RBAC system is enabled, the sysctl value is ignored.
56598 ++
56599 ++config GRKERNSEC_CHROOT_EXECLOG
56600 ++ bool "Log execs within chroot"
56601 ++ help
56602 ++ If you say Y here, all executions inside a chroot jail will be logged
56603 ++ to syslog. This can cause a large amount of logs if certain
56604 ++ applications (eg. djb's daemontools) are installed on the system, and
56605 ++ is therefore left as an option. If the sysctl option is enabled, a
56606 ++ sysctl option with name "chroot_execlog" is created.
56607 ++
56608 ++config GRKERNSEC_AUDIT_PTRACE
56609 ++ bool "Ptrace logging"
56610 ++ help
56611 ++ If you say Y here, all attempts to attach to a process via ptrace
56612 ++ will be logged. If the sysctl option is enabled, a sysctl option
56613 ++ with name "audit_ptrace" is created.
56614 ++
56615 ++config GRKERNSEC_AUDIT_CHDIR
56616 ++ bool "Chdir logging"
56617 ++ help
56618 ++ If you say Y here, all chdir() calls will be logged. If the sysctl
56619 ++ option is enabled, a sysctl option with name "audit_chdir" is created.
56620 ++
56621 ++config GRKERNSEC_AUDIT_MOUNT
56622 ++ bool "(Un)Mount logging"
56623 ++ help
56624 ++ If you say Y here, all mounts and unmounts will be logged. If the
56625 ++ sysctl option is enabled, a sysctl option with name "audit_mount" is
56626 ++ created.
56627 ++
56628 ++config GRKERNSEC_SIGNAL
56629 ++ bool "Signal logging"
56630 ++ help
56631 ++ If you say Y here, certain important signals will be logged, such as
56632 ++ SIGSEGV, which will as a result inform you of when a error in a program
56633 ++ occurred, which in some cases could mean a possible exploit attempt.
56634 ++ If the sysctl option is enabled, a sysctl option with name
56635 ++ "signal_logging" is created.
56636 ++
56637 ++config GRKERNSEC_FORKFAIL
56638 ++ bool "Fork failure logging"
56639 ++ help
56640 ++ If you say Y here, all failed fork() attempts will be logged.
56641 ++ This could suggest a fork bomb, or someone attempting to overstep
56642 ++ their process limit. If the sysctl option is enabled, a sysctl option
56643 ++ with name "forkfail_logging" is created.
56644 ++
56645 ++config GRKERNSEC_TIME
56646 ++ bool "Time change logging"
56647 ++ help
56648 ++ If you say Y here, any changes of the system clock will be logged.
56649 ++ If the sysctl option is enabled, a sysctl option with name
56650 ++ "timechange_logging" is created.
56651 ++
56652 ++config GRKERNSEC_PROC_IPADDR
56653 ++ bool "/proc/<pid>/ipaddr support"
56654 ++ help
56655 ++ If you say Y here, a new entry will be added to each /proc/<pid>
56656 ++ directory that contains the IP address of the person using the task.
56657 ++ The IP is carried across local TCP and AF_UNIX stream sockets.
56658 ++ This information can be useful for IDS/IPSes to perform remote response
56659 ++ to a local attack. The entry is readable by only the owner of the
56660 ++ process (and root if he has CAP_DAC_OVERRIDE, which can be removed via
56661 ++ the RBAC system), and thus does not create privacy concerns.
56662 ++
56663 ++config GRKERNSEC_RWXMAP_LOG
56664 ++ bool 'Denied RWX mmap/mprotect logging'
56665 ++ depends on PAX_MPROTECT && !PAX_EMUPLT && !PAX_EMUSIGRT
56666 ++ help
56667 ++ If you say Y here, calls to mmap() and mprotect() with explicit
56668 ++ usage of PROT_WRITE and PROT_EXEC together will be logged when
56669 ++ denied by the PAX_MPROTECT feature. If the sysctl option is
56670 ++ enabled, a sysctl option with name "rwxmap_logging" is created.
56671 ++
56672 ++config GRKERNSEC_AUDIT_TEXTREL
56673 ++ bool 'ELF text relocations logging (READ HELP)'
56674 ++ depends on PAX_MPROTECT
56675 ++ help
56676 ++ If you say Y here, text relocations will be logged with the filename
56677 ++ of the offending library or binary. The purpose of the feature is
56678 ++ to help Linux distribution developers get rid of libraries and
56679 ++ binaries that need text relocations which hinder the future progress
56680 ++ of PaX. Only Linux distribution developers should say Y here, and
56681 ++ never on a production machine, as this option creates an information
56682 ++ leak that could aid an attacker in defeating the randomization of
56683 ++ a single memory region. If the sysctl option is enabled, a sysctl
56684 ++ option with name "audit_textrel" is created.
56685 ++
56686 ++endmenu
56687 ++
56688 ++menu "Executable Protections"
56689 ++depends on GRKERNSEC
56690 ++
56691 ++config GRKERNSEC_DMESG
56692 ++ bool "Dmesg(8) restriction"
56693 ++ help
56694 ++ If you say Y here, non-root users will not be able to use dmesg(8)
56695 ++ to view up to the last 4kb of messages in the kernel's log buffer.
56696 ++ The kernel's log buffer often contains kernel addresses and other
56697 ++ identifying information useful to an attacker in fingerprinting a
56698 ++ system for a targeted exploit.
56699 ++ If the sysctl option is enabled, a sysctl option with name "dmesg" is
56700 ++ created.
56701 ++
56702 ++config GRKERNSEC_HARDEN_PTRACE
56703 ++ bool "Deter ptrace-based process snooping"
56704 ++ help
56705 ++ If you say Y here, TTY sniffers and other malicious monitoring
56706 ++ programs implemented through ptrace will be defeated. If you
56707 ++ have been using the RBAC system, this option has already been
56708 ++ enabled for several years for all users, with the ability to make
56709 ++ fine-grained exceptions.
56710 ++
56711 ++ This option only affects the ability of non-root users to ptrace
56712 ++ processes that are not a descendent of the ptracing process.
56713 ++ This means that strace ./binary and gdb ./binary will still work,
56714 ++ but attaching to arbitrary processes will not. If the sysctl
56715 ++ option is enabled, a sysctl option with name "harden_ptrace" is
56716 ++ created.
56717 ++
56718 ++config GRKERNSEC_TPE
56719 ++ bool "Trusted Path Execution (TPE)"
56720 ++ help
56721 ++ If you say Y here, you will be able to choose a gid to add to the
56722 ++ supplementary groups of users you want to mark as "untrusted."
56723 ++ These users will not be able to execute any files that are not in
56724 ++ root-owned directories writable only by root. If the sysctl option
56725 ++ is enabled, a sysctl option with name "tpe" is created.
56726 ++
56727 ++config GRKERNSEC_TPE_ALL
56728 ++ bool "Partially restrict all non-root users"
56729 ++ depends on GRKERNSEC_TPE
56730 ++ help
56731 ++ If you say Y here, all non-root users will be covered under
56732 ++ a weaker TPE restriction. This is separate from, and in addition to,
56733 ++ the main TPE options that you have selected elsewhere. Thus, if a
56734 ++ "trusted" GID is chosen, this restriction applies to even that GID.
56735 ++ Under this restriction, all non-root users will only be allowed to
56736 ++ execute files in directories they own that are not group or
56737 ++ world-writable, or in directories owned by root and writable only by
56738 ++ root. If the sysctl option is enabled, a sysctl option with name
56739 ++ "tpe_restrict_all" is created.
56740 ++
56741 ++config GRKERNSEC_TPE_INVERT
56742 ++ bool "Invert GID option"
56743 ++ depends on GRKERNSEC_TPE
56744 ++ help
56745 ++ If you say Y here, the group you specify in the TPE configuration will
56746 ++ decide what group TPE restrictions will be *disabled* for. This
56747 ++ option is useful if you want TPE restrictions to be applied to most
56748 ++ users on the system. If the sysctl option is enabled, a sysctl option
56749 ++ with name "tpe_invert" is created. Unlike other sysctl options, this
56750 ++ entry will default to on for backward-compatibility.
56751 ++
56752 ++config GRKERNSEC_TPE_GID
56753 ++ int "GID for untrusted users"
56754 ++ depends on GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT
56755 ++ default 1005
56756 ++ help
56757 ++ Setting this GID determines what group TPE restrictions will be
56758 ++ *enabled* for. If the sysctl option is enabled, a sysctl option
56759 ++ with name "tpe_gid" is created.
56760 ++
56761 ++config GRKERNSEC_TPE_GID
56762 ++ int "GID for trusted users"
56763 ++ depends on GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT
56764 ++ default 1005
56765 ++ help
56766 ++ Setting this GID determines what group TPE restrictions will be
56767 ++ *disabled* for. If the sysctl option is enabled, a sysctl option
56768 ++ with name "tpe_gid" is created.
56769 ++
56770 ++endmenu
56771 ++menu "Network Protections"
56772 ++depends on GRKERNSEC
56773 ++
56774 ++config GRKERNSEC_RANDNET
56775 ++ bool "Larger entropy pools"
56776 ++ help
56777 ++ If you say Y here, the entropy pools used for many features of Linux
56778 ++ and grsecurity will be doubled in size. Since several grsecurity
56779 ++ features use additional randomness, it is recommended that you say Y
56780 ++ here. Saying Y here has a similar effect as modifying
56781 ++ /proc/sys/kernel/random/poolsize.
56782 ++
56783 ++config GRKERNSEC_BLACKHOLE
56784 ++ bool "TCP/UDP blackhole and LAST_ACK DoS prevention"
56785 ++ depends on NET
56786 ++ help
56787 ++ If you say Y here, neither TCP resets nor ICMP
56788 ++ destination-unreachable packets will be sent in response to packets
56789 ++ sent to ports for which no associated listening process exists.
56790 ++ This feature supports both IPV4 and IPV6 and exempts the
56791 ++ loopback interface from blackholing. Enabling this feature
56792 ++ makes a host more resilient to DoS attacks and reduces network
56793 ++ visibility against scanners.
56794 ++
56795 ++ The blackhole feature as-implemented is equivalent to the FreeBSD
56796 ++ blackhole feature, as it prevents RST responses to all packets, not
56797 ++ just SYNs. Under most application behavior this causes no
56798 ++ problems, but applications (like haproxy) may not close certain
56799 ++ connections in a way that cleanly terminates them on the remote
56800 ++ end, leaving the remote host in LAST_ACK state. Because of this
56801 ++ side-effect and to prevent intentional LAST_ACK DoSes, this
56802 ++ feature also adds automatic mitigation against such attacks.
56803 ++ The mitigation drastically reduces the amount of time a socket
56804 ++ can spend in LAST_ACK state. If you're using haproxy and not
56805 ++ all servers it connects to have this option enabled, consider
56806 ++ disabling this feature on the haproxy host.
56807 ++
56808 ++ If the sysctl option is enabled, two sysctl options with names
56809 ++ "ip_blackhole" and "lastack_retries" will be created.
56810 ++ While "ip_blackhole" takes the standard zero/non-zero on/off
56811 ++ toggle, "lastack_retries" uses the same kinds of values as
56812 ++ "tcp_retries1" and "tcp_retries2". The default value of 4
56813 ++ prevents a socket from lasting more than 45 seconds in LAST_ACK
56814 ++ state.
56815 ++
56816 ++config GRKERNSEC_SOCKET
56817 ++ bool "Socket restrictions"
56818 ++ depends on NET
56819 ++ help
56820 ++ If you say Y here, you will be able to choose from several options.
56821 ++ If you assign a GID on your system and add it to the supplementary
56822 ++ groups of users you want to restrict socket access to, this patch
56823 ++ will perform up to three things, based on the option(s) you choose.
56824 ++
56825 ++config GRKERNSEC_SOCKET_ALL
56826 ++ bool "Deny any sockets to group"
56827 ++ depends on GRKERNSEC_SOCKET
56828 ++ help
56829 ++ If you say Y here, you will be able to choose a GID of whose users will
56830 ++ be unable to connect to other hosts from your machine or run server
56831 ++ applications from your machine. If the sysctl option is enabled, a
56832 ++ sysctl option with name "socket_all" is created.
56833 ++
56834 ++config GRKERNSEC_SOCKET_ALL_GID
56835 ++ int "GID to deny all sockets for"
56836 ++ depends on GRKERNSEC_SOCKET_ALL
56837 ++ default 1004
56838 ++ help
56839 ++ Here you can choose the GID to disable socket access for. Remember to
56840 ++ add the users you want socket access disabled for to the GID
56841 ++ specified here. If the sysctl option is enabled, a sysctl option
56842 ++ with name "socket_all_gid" is created.
56843 ++
56844 ++config GRKERNSEC_SOCKET_CLIENT
56845 ++ bool "Deny client sockets to group"
56846 ++ depends on GRKERNSEC_SOCKET
56847 ++ help
56848 ++ If you say Y here, you will be able to choose a GID of whose users will
56849 ++ be unable to connect to other hosts from your machine, but will be
56850 ++ able to run servers. If this option is enabled, all users in the group
56851 ++ you specify will have to use passive mode when initiating ftp transfers
56852 ++ from the shell on your machine. If the sysctl option is enabled, a
56853 ++ sysctl option with name "socket_client" is created.
56854 ++
56855 ++config GRKERNSEC_SOCKET_CLIENT_GID
56856 ++ int "GID to deny client sockets for"
56857 ++ depends on GRKERNSEC_SOCKET_CLIENT
56858 ++ default 1003
56859 ++ help
56860 ++ Here you can choose the GID to disable client socket access for.
56861 ++ Remember to add the users you want client socket access disabled for to
56862 ++ the GID specified here. If the sysctl option is enabled, a sysctl
56863 ++ option with name "socket_client_gid" is created.
56864 ++
56865 ++config GRKERNSEC_SOCKET_SERVER
56866 ++ bool "Deny server sockets to group"
56867 ++ depends on GRKERNSEC_SOCKET
56868 ++ help
56869 ++ If you say Y here, you will be able to choose a GID of whose users will
56870 ++ be unable to run server applications from your machine. If the sysctl
56871 ++ option is enabled, a sysctl option with name "socket_server" is created.
56872 ++
56873 ++config GRKERNSEC_SOCKET_SERVER_GID
56874 ++ int "GID to deny server sockets for"
56875 ++ depends on GRKERNSEC_SOCKET_SERVER
56876 ++ default 1002
56877 ++ help
56878 ++ Here you can choose the GID to disable server socket access for.
56879 ++ Remember to add the users you want server socket access disabled for to
56880 ++ the GID specified here. If the sysctl option is enabled, a sysctl
56881 ++ option with name "socket_server_gid" is created.
56882 ++
56883 ++endmenu
56884 ++menu "Sysctl support"
56885 ++depends on GRKERNSEC && SYSCTL
56886 ++
56887 ++config GRKERNSEC_SYSCTL
56888 ++ bool "Sysctl support"
56889 ++ help
56890 ++ If you say Y here, you will be able to change the options that
56891 ++ grsecurity runs with at bootup, without having to recompile your
56892 ++ kernel. You can echo values to files in /proc/sys/kernel/grsecurity
56893 ++ to enable (1) or disable (0) various features. All the sysctl entries
56894 ++ are mutable until the "grsec_lock" entry is set to a non-zero value.
56895 ++ All features enabled in the kernel configuration are disabled at boot
56896 ++ if you do not say Y to the "Turn on features by default" option.
56897 ++ All options should be set at startup, and the grsec_lock entry should
56898 ++ be set to a non-zero value after all the options are set.
56899 ++ *THIS IS EXTREMELY IMPORTANT*
56900 ++
56901 ++config GRKERNSEC_SYSCTL_DISTRO
56902 ++ bool "Extra sysctl support for distro makers (READ HELP)"
56903 ++ depends on GRKERNSEC_SYSCTL && GRKERNSEC_IO
56904 ++ help
56905 ++ If you say Y here, additional sysctl options will be created
56906 ++ for features that affect processes running as root. Therefore,
56907 ++ it is critical when using this option that the grsec_lock entry be
56908 ++ enabled after boot. Only distros with prebuilt kernel packages
56909 ++ with this option enabled that can ensure grsec_lock is enabled
56910 ++ after boot should use this option.
56911 ++ *Failure to set grsec_lock after boot makes all grsec features
56912 ++ this option covers useless*
56913 ++
56914 ++ Currently this option creates the following sysctl entries:
56915 ++ "Disable Privileged I/O": "disable_priv_io"
56916 ++
56917 ++config GRKERNSEC_SYSCTL_ON
56918 ++ bool "Turn on features by default"
56919 ++ depends on GRKERNSEC_SYSCTL
56920 ++ help
56921 ++ If you say Y here, instead of having all features enabled in the
56922 ++ kernel configuration disabled at boot time, the features will be
56923 ++ enabled at boot time. It is recommended you say Y here unless
56924 ++ there is some reason you would want all sysctl-tunable features to
56925 ++ be disabled by default. As mentioned elsewhere, it is important
56926 ++ to enable the grsec_lock entry once you have finished modifying
56927 ++ the sysctl entries.
56928 ++
56929 ++endmenu
56930 ++menu "Logging Options"
56931 ++depends on GRKERNSEC
56932 ++
56933 ++config GRKERNSEC_FLOODTIME
56934 ++ int "Seconds in between log messages (minimum)"
56935 ++ default 10
56936 ++ help
56937 ++ This option allows you to enforce the number of seconds between
56938 ++ grsecurity log messages. The default should be suitable for most
56939 ++ people, however, if you choose to change it, choose a value small enough
56940 ++ to allow informative logs to be produced, but large enough to
56941 ++ prevent flooding.
56942 ++
56943 ++config GRKERNSEC_FLOODBURST
56944 ++ int "Number of messages in a burst (maximum)"
56945 ++ default 6
56946 ++ help
56947 ++ This option allows you to choose the maximum number of messages allowed
56948 ++ within the flood time interval you chose in a separate option. The
56949 ++ default should be suitable for most people, however if you find that
56950 ++ many of your logs are being interpreted as flooding, you may want to
56951 ++ raise this value.
56952 ++
56953 ++endmenu
56954 ++
56955 ++endmenu
56956 +diff -urNp linux-3.1.1/grsecurity/Makefile linux-3.1.1/grsecurity/Makefile
56957 +--- linux-3.1.1/grsecurity/Makefile 1969-12-31 19:00:00.000000000 -0500
56958 ++++ linux-3.1.1/grsecurity/Makefile 2011-11-16 18:40:31.000000000 -0500
56959 +@@ -0,0 +1,36 @@
56960 ++# grsecurity's ACL system was originally written in 2001 by Michael Dalton
56961 ++# during 2001-2009 it has been completely redesigned by Brad Spengler
56962 ++# into an RBAC system
56963 ++#
56964 ++# All code in this directory and various hooks inserted throughout the kernel
56965 ++# are copyright Brad Spengler - Open Source Security, Inc., and released
56966 ++# under the GPL v2 or higher
56967 ++
56968 ++obj-y = grsec_chdir.o grsec_chroot.o grsec_exec.o grsec_fifo.o grsec_fork.o \
56969 ++ grsec_mount.o grsec_sig.o grsec_sysctl.o \
56970 ++ grsec_time.o grsec_tpe.o grsec_link.o grsec_pax.o grsec_ptrace.o
56971 ++
56972 ++obj-$(CONFIG_GRKERNSEC) += grsec_init.o grsum.o gracl.o gracl_segv.o \
56973 ++ gracl_cap.o gracl_alloc.o gracl_shm.o grsec_mem.o gracl_fs.o \
56974 ++ gracl_learn.o grsec_log.o
56975 ++obj-$(CONFIG_GRKERNSEC_RESLOG) += gracl_res.o
56976 ++
56977 ++ifdef CONFIG_NET
56978 ++obj-y += grsec_sock.o
56979 ++obj-$(CONFIG_GRKERNSEC) += gracl_ip.o
56980 ++endif
56981 ++
56982 ++ifndef CONFIG_GRKERNSEC
56983 ++obj-y += grsec_disabled.o
56984 ++endif
56985 ++
56986 ++ifdef CONFIG_GRKERNSEC_HIDESYM
56987 ++extra-y := grsec_hidesym.o
56988 ++$(obj)/grsec_hidesym.o:
56989 ++ @-chmod -f 500 /boot
56990 ++ @-chmod -f 500 /lib/modules
56991 ++ @-chmod -f 500 /lib64/modules
56992 ++ @-chmod -f 500 /lib32/modules
56993 ++ @-chmod -f 700 .
56994 ++ @echo ' grsec: protected kernel image paths'
56995 ++endif
56996 +diff -urNp linux-3.1.1/include/acpi/acpi_bus.h linux-3.1.1/include/acpi/acpi_bus.h
56997 +--- linux-3.1.1/include/acpi/acpi_bus.h 2011-11-11 15:19:27.000000000 -0500
56998 ++++ linux-3.1.1/include/acpi/acpi_bus.h 2011-11-16 18:39:08.000000000 -0500
56999 +@@ -107,7 +107,7 @@ struct acpi_device_ops {
57000 + acpi_op_bind bind;
57001 + acpi_op_unbind unbind;
57002 + acpi_op_notify notify;
57003 +-};
57004 ++} __no_const;
57005 +
57006 + #define ACPI_DRIVER_ALL_NOTIFY_EVENTS 0x1 /* system AND device events */
57007 +
57008 +diff -urNp linux-3.1.1/include/asm-generic/atomic-long.h linux-3.1.1/include/asm-generic/atomic-long.h
57009 +--- linux-3.1.1/include/asm-generic/atomic-long.h 2011-11-11 15:19:27.000000000 -0500
57010 ++++ linux-3.1.1/include/asm-generic/atomic-long.h 2011-11-16 18:39:08.000000000 -0500
57011 +@@ -22,6 +22,12 @@
57012 +
57013 + typedef atomic64_t atomic_long_t;
57014 +
57015 ++#ifdef CONFIG_PAX_REFCOUNT
57016 ++typedef atomic64_unchecked_t atomic_long_unchecked_t;
57017 ++#else
57018 ++typedef atomic64_t atomic_long_unchecked_t;
57019 ++#endif
57020 ++
57021 + #define ATOMIC_LONG_INIT(i) ATOMIC64_INIT(i)
57022 +
57023 + static inline long atomic_long_read(atomic_long_t *l)
57024 +@@ -31,6 +37,15 @@ static inline long atomic_long_read(atom
57025 + return (long)atomic64_read(v);
57026 + }
57027 +
57028 ++#ifdef CONFIG_PAX_REFCOUNT
57029 ++static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
57030 ++{
57031 ++ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
57032 ++
57033 ++ return (long)atomic64_read_unchecked(v);
57034 ++}
57035 ++#endif
57036 ++
57037 + static inline void atomic_long_set(atomic_long_t *l, long i)
57038 + {
57039 + atomic64_t *v = (atomic64_t *)l;
57040 +@@ -38,6 +53,15 @@ static inline void atomic_long_set(atomi
57041 + atomic64_set(v, i);
57042 + }
57043 +
57044 ++#ifdef CONFIG_PAX_REFCOUNT
57045 ++static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
57046 ++{
57047 ++ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
57048 ++
57049 ++ atomic64_set_unchecked(v, i);
57050 ++}
57051 ++#endif
57052 ++
57053 + static inline void atomic_long_inc(atomic_long_t *l)
57054 + {
57055 + atomic64_t *v = (atomic64_t *)l;
57056 +@@ -45,6 +69,15 @@ static inline void atomic_long_inc(atomi
57057 + atomic64_inc(v);
57058 + }
57059 +
57060 ++#ifdef CONFIG_PAX_REFCOUNT
57061 ++static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
57062 ++{
57063 ++ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
57064 ++
57065 ++ atomic64_inc_unchecked(v);
57066 ++}
57067 ++#endif
57068 ++
57069 + static inline void atomic_long_dec(atomic_long_t *l)
57070 + {
57071 + atomic64_t *v = (atomic64_t *)l;
57072 +@@ -52,6 +85,15 @@ static inline void atomic_long_dec(atomi
57073 + atomic64_dec(v);
57074 + }
57075 +
57076 ++#ifdef CONFIG_PAX_REFCOUNT
57077 ++static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
57078 ++{
57079 ++ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
57080 ++
57081 ++ atomic64_dec_unchecked(v);
57082 ++}
57083 ++#endif
57084 ++
57085 + static inline void atomic_long_add(long i, atomic_long_t *l)
57086 + {
57087 + atomic64_t *v = (atomic64_t *)l;
57088 +@@ -59,6 +101,15 @@ static inline void atomic_long_add(long
57089 + atomic64_add(i, v);
57090 + }
57091 +
57092 ++#ifdef CONFIG_PAX_REFCOUNT
57093 ++static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
57094 ++{
57095 ++ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
57096 ++
57097 ++ atomic64_add_unchecked(i, v);
57098 ++}
57099 ++#endif
57100 ++
57101 + static inline void atomic_long_sub(long i, atomic_long_t *l)
57102 + {
57103 + atomic64_t *v = (atomic64_t *)l;
57104 +@@ -66,6 +117,15 @@ static inline void atomic_long_sub(long
57105 + atomic64_sub(i, v);
57106 + }
57107 +
57108 ++#ifdef CONFIG_PAX_REFCOUNT
57109 ++static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l)
57110 ++{
57111 ++ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
57112 ++
57113 ++ atomic64_sub_unchecked(i, v);
57114 ++}
57115 ++#endif
57116 ++
57117 + static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
57118 + {
57119 + atomic64_t *v = (atomic64_t *)l;
57120 +@@ -115,6 +175,15 @@ static inline long atomic_long_inc_retur
57121 + return (long)atomic64_inc_return(v);
57122 + }
57123 +
57124 ++#ifdef CONFIG_PAX_REFCOUNT
57125 ++static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
57126 ++{
57127 ++ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
57128 ++
57129 ++ return (long)atomic64_inc_return_unchecked(v);
57130 ++}
57131 ++#endif
57132 ++
57133 + static inline long atomic_long_dec_return(atomic_long_t *l)
57134 + {
57135 + atomic64_t *v = (atomic64_t *)l;
57136 +@@ -140,6 +209,12 @@ static inline long atomic_long_add_unles
57137 +
57138 + typedef atomic_t atomic_long_t;
57139 +
57140 ++#ifdef CONFIG_PAX_REFCOUNT
57141 ++typedef atomic_unchecked_t atomic_long_unchecked_t;
57142 ++#else
57143 ++typedef atomic_t atomic_long_unchecked_t;
57144 ++#endif
57145 ++
57146 + #define ATOMIC_LONG_INIT(i) ATOMIC_INIT(i)
57147 + static inline long atomic_long_read(atomic_long_t *l)
57148 + {
57149 +@@ -148,6 +223,15 @@ static inline long atomic_long_read(atom
57150 + return (long)atomic_read(v);
57151 + }
57152 +
57153 ++#ifdef CONFIG_PAX_REFCOUNT
57154 ++static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
57155 ++{
57156 ++ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
57157 ++
57158 ++ return (long)atomic_read_unchecked(v);
57159 ++}
57160 ++#endif
57161 ++
57162 + static inline void atomic_long_set(atomic_long_t *l, long i)
57163 + {
57164 + atomic_t *v = (atomic_t *)l;
57165 +@@ -155,6 +239,15 @@ static inline void atomic_long_set(atomi
57166 + atomic_set(v, i);
57167 + }
57168 +
57169 ++#ifdef CONFIG_PAX_REFCOUNT
57170 ++static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
57171 ++{
57172 ++ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
57173 ++
57174 ++ atomic_set_unchecked(v, i);
57175 ++}
57176 ++#endif
57177 ++
57178 + static inline void atomic_long_inc(atomic_long_t *l)
57179 + {
57180 + atomic_t *v = (atomic_t *)l;
57181 +@@ -162,6 +255,15 @@ static inline void atomic_long_inc(atomi
57182 + atomic_inc(v);
57183 + }
57184 +
57185 ++#ifdef CONFIG_PAX_REFCOUNT
57186 ++static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
57187 ++{
57188 ++ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
57189 ++
57190 ++ atomic_inc_unchecked(v);
57191 ++}
57192 ++#endif
57193 ++
57194 + static inline void atomic_long_dec(atomic_long_t *l)
57195 + {
57196 + atomic_t *v = (atomic_t *)l;
57197 +@@ -169,6 +271,15 @@ static inline void atomic_long_dec(atomi
57198 + atomic_dec(v);
57199 + }
57200 +
57201 ++#ifdef CONFIG_PAX_REFCOUNT
57202 ++static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
57203 ++{
57204 ++ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
57205 ++
57206 ++ atomic_dec_unchecked(v);
57207 ++}
57208 ++#endif
57209 ++
57210 + static inline void atomic_long_add(long i, atomic_long_t *l)
57211 + {
57212 + atomic_t *v = (atomic_t *)l;
57213 +@@ -176,6 +287,15 @@ static inline void atomic_long_add(long
57214 + atomic_add(i, v);
57215 + }
57216 +
57217 ++#ifdef CONFIG_PAX_REFCOUNT
57218 ++static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
57219 ++{
57220 ++ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
57221 ++
57222 ++ atomic_add_unchecked(i, v);
57223 ++}
57224 ++#endif
57225 ++
57226 + static inline void atomic_long_sub(long i, atomic_long_t *l)
57227 + {
57228 + atomic_t *v = (atomic_t *)l;
57229 +@@ -183,6 +303,15 @@ static inline void atomic_long_sub(long
57230 + atomic_sub(i, v);
57231 + }
57232 +
57233 ++#ifdef CONFIG_PAX_REFCOUNT
57234 ++static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l)
57235 ++{
57236 ++ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
57237 ++
57238 ++ atomic_sub_unchecked(i, v);
57239 ++}
57240 ++#endif
57241 ++
57242 + static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
57243 + {
57244 + atomic_t *v = (atomic_t *)l;
57245 +@@ -232,6 +361,15 @@ static inline long atomic_long_inc_retur
57246 + return (long)atomic_inc_return(v);
57247 + }
57248 +
57249 ++#ifdef CONFIG_PAX_REFCOUNT
57250 ++static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
57251 ++{
57252 ++ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
57253 ++
57254 ++ return (long)atomic_inc_return_unchecked(v);
57255 ++}
57256 ++#endif
57257 ++
57258 + static inline long atomic_long_dec_return(atomic_long_t *l)
57259 + {
57260 + atomic_t *v = (atomic_t *)l;
57261 +@@ -255,4 +393,49 @@ static inline long atomic_long_add_unles
57262 +
57263 + #endif /* BITS_PER_LONG == 64 */
57264 +
57265 ++#ifdef CONFIG_PAX_REFCOUNT
57266 ++static inline void pax_refcount_needs_these_functions(void)
57267 ++{
57268 ++ atomic_read_unchecked((atomic_unchecked_t *)NULL);
57269 ++ atomic_set_unchecked((atomic_unchecked_t *)NULL, 0);
57270 ++ atomic_add_unchecked(0, (atomic_unchecked_t *)NULL);
57271 ++ atomic_sub_unchecked(0, (atomic_unchecked_t *)NULL);
57272 ++ atomic_inc_unchecked((atomic_unchecked_t *)NULL);
57273 ++ (void)atomic_inc_and_test_unchecked((atomic_unchecked_t *)NULL);
57274 ++ atomic_inc_return_unchecked((atomic_unchecked_t *)NULL);
57275 ++ atomic_add_return_unchecked(0, (atomic_unchecked_t *)NULL);
57276 ++ atomic_dec_unchecked((atomic_unchecked_t *)NULL);
57277 ++ atomic_cmpxchg_unchecked((atomic_unchecked_t *)NULL, 0, 0);
57278 ++ (void)atomic_xchg_unchecked((atomic_unchecked_t *)NULL, 0);
57279 ++
57280 ++ atomic_long_read_unchecked((atomic_long_unchecked_t *)NULL);
57281 ++ atomic_long_set_unchecked((atomic_long_unchecked_t *)NULL, 0);
57282 ++ atomic_long_add_unchecked(0, (atomic_long_unchecked_t *)NULL);
57283 ++ atomic_long_sub_unchecked(0, (atomic_long_unchecked_t *)NULL);
57284 ++ atomic_long_inc_unchecked((atomic_long_unchecked_t *)NULL);
57285 ++ atomic_long_inc_return_unchecked((atomic_long_unchecked_t *)NULL);
57286 ++ atomic_long_dec_unchecked((atomic_long_unchecked_t *)NULL);
57287 ++}
57288 ++#else
57289 ++#define atomic_read_unchecked(v) atomic_read(v)
57290 ++#define atomic_set_unchecked(v, i) atomic_set((v), (i))
57291 ++#define atomic_add_unchecked(i, v) atomic_add((i), (v))
57292 ++#define atomic_sub_unchecked(i, v) atomic_sub((i), (v))
57293 ++#define atomic_inc_unchecked(v) atomic_inc(v)
57294 ++#define atomic_inc_and_test_unchecked(v) atomic_inc_and_test(v)
57295 ++#define atomic_inc_return_unchecked(v) atomic_inc_return(v)
57296 ++#define atomic_add_return_unchecked(i, v) atomic_add_return((i), (v))
57297 ++#define atomic_dec_unchecked(v) atomic_dec(v)
57298 ++#define atomic_cmpxchg_unchecked(v, o, n) atomic_cmpxchg((v), (o), (n))
57299 ++#define atomic_xchg_unchecked(v, i) atomic_xchg((v), (i))
57300 ++
57301 ++#define atomic_long_read_unchecked(v) atomic_long_read(v)
57302 ++#define atomic_long_set_unchecked(v, i) atomic_long_set((v), (i))
57303 ++#define atomic_long_add_unchecked(i, v) atomic_long_add((i), (v))
57304 ++#define atomic_long_sub_unchecked(i, v) atomic_long_sub((i), (v))
57305 ++#define atomic_long_inc_unchecked(v) atomic_long_inc(v)
57306 ++#define atomic_long_inc_return_unchecked(v) atomic_long_inc_return(v)
57307 ++#define atomic_long_dec_unchecked(v) atomic_long_dec(v)
57308 ++#endif
57309 ++
57310 + #endif /* _ASM_GENERIC_ATOMIC_LONG_H */
57311 +diff -urNp linux-3.1.1/include/asm-generic/cache.h linux-3.1.1/include/asm-generic/cache.h
57312 +--- linux-3.1.1/include/asm-generic/cache.h 2011-11-11 15:19:27.000000000 -0500
57313 ++++ linux-3.1.1/include/asm-generic/cache.h 2011-11-16 18:39:08.000000000 -0500
57314 +@@ -6,7 +6,7 @@
57315 + * cache lines need to provide their own cache.h.
57316 + */
57317 +
57318 +-#define L1_CACHE_SHIFT 5
57319 +-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
57320 ++#define L1_CACHE_SHIFT 5UL
57321 ++#define L1_CACHE_BYTES (1UL << L1_CACHE_SHIFT)
57322 +
57323 + #endif /* __ASM_GENERIC_CACHE_H */
57324 +diff -urNp linux-3.1.1/include/asm-generic/int-l64.h linux-3.1.1/include/asm-generic/int-l64.h
57325 +--- linux-3.1.1/include/asm-generic/int-l64.h 2011-11-11 15:19:27.000000000 -0500
57326 ++++ linux-3.1.1/include/asm-generic/int-l64.h 2011-11-16 18:39:08.000000000 -0500
57327 +@@ -46,6 +46,8 @@ typedef unsigned int u32;
57328 + typedef signed long s64;
57329 + typedef unsigned long u64;
57330 +
57331 ++typedef unsigned int intoverflow_t __attribute__ ((mode(TI)));
57332 ++
57333 + #define S8_C(x) x
57334 + #define U8_C(x) x ## U
57335 + #define S16_C(x) x
57336 +diff -urNp linux-3.1.1/include/asm-generic/int-ll64.h linux-3.1.1/include/asm-generic/int-ll64.h
57337 +--- linux-3.1.1/include/asm-generic/int-ll64.h 2011-11-11 15:19:27.000000000 -0500
57338 ++++ linux-3.1.1/include/asm-generic/int-ll64.h 2011-11-16 18:39:08.000000000 -0500
57339 +@@ -51,6 +51,8 @@ typedef unsigned int u32;
57340 + typedef signed long long s64;
57341 + typedef unsigned long long u64;
57342 +
57343 ++typedef unsigned long long intoverflow_t;
57344 ++
57345 + #define S8_C(x) x
57346 + #define U8_C(x) x ## U
57347 + #define S16_C(x) x
57348 +diff -urNp linux-3.1.1/include/asm-generic/kmap_types.h linux-3.1.1/include/asm-generic/kmap_types.h
57349 +--- linux-3.1.1/include/asm-generic/kmap_types.h 2011-11-11 15:19:27.000000000 -0500
57350 ++++ linux-3.1.1/include/asm-generic/kmap_types.h 2011-11-16 18:39:08.000000000 -0500
57351 +@@ -29,10 +29,11 @@ KMAP_D(16) KM_IRQ_PTE,
57352 + KMAP_D(17) KM_NMI,
57353 + KMAP_D(18) KM_NMI_PTE,
57354 + KMAP_D(19) KM_KDB,
57355 ++KMAP_D(20) KM_CLEARPAGE,
57356 + /*
57357 + * Remember to update debug_kmap_atomic() when adding new kmap types!
57358 + */
57359 +-KMAP_D(20) KM_TYPE_NR
57360 ++KMAP_D(21) KM_TYPE_NR
57361 + };
57362 +
57363 + #undef KMAP_D
57364 +diff -urNp linux-3.1.1/include/asm-generic/pgtable.h linux-3.1.1/include/asm-generic/pgtable.h
57365 +--- linux-3.1.1/include/asm-generic/pgtable.h 2011-11-11 15:19:27.000000000 -0500
57366 ++++ linux-3.1.1/include/asm-generic/pgtable.h 2011-11-16 18:39:08.000000000 -0500
57367 +@@ -443,6 +443,14 @@ static inline int pmd_write(pmd_t pmd)
57368 + #endif /* __HAVE_ARCH_PMD_WRITE */
57369 + #endif
57370 +
57371 ++#ifndef __HAVE_ARCH_PAX_OPEN_KERNEL
57372 ++static inline unsigned long pax_open_kernel(void) { return 0; }
57373 ++#endif
57374 ++
57375 ++#ifndef __HAVE_ARCH_PAX_CLOSE_KERNEL
57376 ++static inline unsigned long pax_close_kernel(void) { return 0; }
57377 ++#endif
57378 ++
57379 + #endif /* !__ASSEMBLY__ */
57380 +
57381 + #endif /* _ASM_GENERIC_PGTABLE_H */
57382 +diff -urNp linux-3.1.1/include/asm-generic/pgtable-nopmd.h linux-3.1.1/include/asm-generic/pgtable-nopmd.h
57383 +--- linux-3.1.1/include/asm-generic/pgtable-nopmd.h 2011-11-11 15:19:27.000000000 -0500
57384 ++++ linux-3.1.1/include/asm-generic/pgtable-nopmd.h 2011-11-16 18:39:08.000000000 -0500
57385 +@@ -1,14 +1,19 @@
57386 + #ifndef _PGTABLE_NOPMD_H
57387 + #define _PGTABLE_NOPMD_H
57388 +
57389 +-#ifndef __ASSEMBLY__
57390 +-
57391 + #include <asm-generic/pgtable-nopud.h>
57392 +
57393 +-struct mm_struct;
57394 +-
57395 + #define __PAGETABLE_PMD_FOLDED
57396 +
57397 ++#define PMD_SHIFT PUD_SHIFT
57398 ++#define PTRS_PER_PMD 1
57399 ++#define PMD_SIZE (_AC(1,UL) << PMD_SHIFT)
57400 ++#define PMD_MASK (~(PMD_SIZE-1))
57401 ++
57402 ++#ifndef __ASSEMBLY__
57403 ++
57404 ++struct mm_struct;
57405 ++
57406 + /*
57407 + * Having the pmd type consist of a pud gets the size right, and allows
57408 + * us to conceptually access the pud entry that this pmd is folded into
57409 +@@ -16,11 +21,6 @@ struct mm_struct;
57410 + */
57411 + typedef struct { pud_t pud; } pmd_t;
57412 +
57413 +-#define PMD_SHIFT PUD_SHIFT
57414 +-#define PTRS_PER_PMD 1
57415 +-#define PMD_SIZE (1UL << PMD_SHIFT)
57416 +-#define PMD_MASK (~(PMD_SIZE-1))
57417 +-
57418 + /*
57419 + * The "pud_xxx()" functions here are trivial for a folded two-level
57420 + * setup: the pmd is never bad, and a pmd always exists (as it's folded
57421 +diff -urNp linux-3.1.1/include/asm-generic/pgtable-nopud.h linux-3.1.1/include/asm-generic/pgtable-nopud.h
57422 +--- linux-3.1.1/include/asm-generic/pgtable-nopud.h 2011-11-11 15:19:27.000000000 -0500
57423 ++++ linux-3.1.1/include/asm-generic/pgtable-nopud.h 2011-11-16 18:39:08.000000000 -0500
57424 +@@ -1,10 +1,15 @@
57425 + #ifndef _PGTABLE_NOPUD_H
57426 + #define _PGTABLE_NOPUD_H
57427 +
57428 +-#ifndef __ASSEMBLY__
57429 +-
57430 + #define __PAGETABLE_PUD_FOLDED
57431 +
57432 ++#define PUD_SHIFT PGDIR_SHIFT
57433 ++#define PTRS_PER_PUD 1
57434 ++#define PUD_SIZE (_AC(1,UL) << PUD_SHIFT)
57435 ++#define PUD_MASK (~(PUD_SIZE-1))
57436 ++
57437 ++#ifndef __ASSEMBLY__
57438 ++
57439 + /*
57440 + * Having the pud type consist of a pgd gets the size right, and allows
57441 + * us to conceptually access the pgd entry that this pud is folded into
57442 +@@ -12,11 +17,6 @@
57443 + */
57444 + typedef struct { pgd_t pgd; } pud_t;
57445 +
57446 +-#define PUD_SHIFT PGDIR_SHIFT
57447 +-#define PTRS_PER_PUD 1
57448 +-#define PUD_SIZE (1UL << PUD_SHIFT)
57449 +-#define PUD_MASK (~(PUD_SIZE-1))
57450 +-
57451 + /*
57452 + * The "pgd_xxx()" functions here are trivial for a folded two-level
57453 + * setup: the pud is never bad, and a pud always exists (as it's folded
57454 +diff -urNp linux-3.1.1/include/asm-generic/vmlinux.lds.h linux-3.1.1/include/asm-generic/vmlinux.lds.h
57455 +--- linux-3.1.1/include/asm-generic/vmlinux.lds.h 2011-11-11 15:19:27.000000000 -0500
57456 ++++ linux-3.1.1/include/asm-generic/vmlinux.lds.h 2011-11-16 18:39:08.000000000 -0500
57457 +@@ -217,6 +217,7 @@
57458 + .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \
57459 + VMLINUX_SYMBOL(__start_rodata) = .; \
57460 + *(.rodata) *(.rodata.*) \
57461 ++ *(.data..read_only) \
57462 + *(__vermagic) /* Kernel version magic */ \
57463 + . = ALIGN(8); \
57464 + VMLINUX_SYMBOL(__start___tracepoints_ptrs) = .; \
57465 +@@ -723,17 +724,18 @@
57466 + * section in the linker script will go there too. @phdr should have
57467 + * a leading colon.
57468 + *
57469 +- * Note that this macros defines __per_cpu_load as an absolute symbol.
57470 ++ * Note that this macros defines per_cpu_load as an absolute symbol.
57471 + * If there is no need to put the percpu section at a predetermined
57472 + * address, use PERCPU_SECTION.
57473 + */
57474 + #define PERCPU_VADDR(cacheline, vaddr, phdr) \
57475 +- VMLINUX_SYMBOL(__per_cpu_load) = .; \
57476 +- .data..percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \
57477 ++ per_cpu_load = .; \
57478 ++ .data..percpu vaddr : AT(VMLINUX_SYMBOL(per_cpu_load) \
57479 + - LOAD_OFFSET) { \
57480 ++ VMLINUX_SYMBOL(__per_cpu_load) = . + per_cpu_load; \
57481 + PERCPU_INPUT(cacheline) \
57482 + } phdr \
57483 +- . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data..percpu);
57484 ++ . = VMLINUX_SYMBOL(per_cpu_load) + SIZEOF(.data..percpu);
57485 +
57486 + /**
57487 + * PERCPU_SECTION - define output section for percpu area, simple version
57488 +diff -urNp linux-3.1.1/include/drm/drm_crtc_helper.h linux-3.1.1/include/drm/drm_crtc_helper.h
57489 +--- linux-3.1.1/include/drm/drm_crtc_helper.h 2011-11-11 15:19:27.000000000 -0500
57490 ++++ linux-3.1.1/include/drm/drm_crtc_helper.h 2011-11-16 18:39:08.000000000 -0500
57491 +@@ -74,7 +74,7 @@ struct drm_crtc_helper_funcs {
57492 +
57493 + /* disable crtc when not in use - more explicit than dpms off */
57494 + void (*disable)(struct drm_crtc *crtc);
57495 +-};
57496 ++} __no_const;
57497 +
57498 + struct drm_encoder_helper_funcs {
57499 + void (*dpms)(struct drm_encoder *encoder, int mode);
57500 +@@ -95,7 +95,7 @@ struct drm_encoder_helper_funcs {
57501 + struct drm_connector *connector);
57502 + /* disable encoder when not in use - more explicit than dpms off */
57503 + void (*disable)(struct drm_encoder *encoder);
57504 +-};
57505 ++} __no_const;
57506 +
57507 + struct drm_connector_helper_funcs {
57508 + int (*get_modes)(struct drm_connector *connector);
57509 +diff -urNp linux-3.1.1/include/drm/drmP.h linux-3.1.1/include/drm/drmP.h
57510 +--- linux-3.1.1/include/drm/drmP.h 2011-11-11 15:19:27.000000000 -0500
57511 ++++ linux-3.1.1/include/drm/drmP.h 2011-11-16 18:39:08.000000000 -0500
57512 +@@ -73,6 +73,7 @@
57513 + #include <linux/workqueue.h>
57514 + #include <linux/poll.h>
57515 + #include <asm/pgalloc.h>
57516 ++#include <asm/local.h>
57517 + #include "drm.h"
57518 +
57519 + #include <linux/idr.h>
57520 +@@ -1035,7 +1036,7 @@ struct drm_device {
57521 +
57522 + /** \name Usage Counters */
57523 + /*@{ */
57524 +- int open_count; /**< Outstanding files open */
57525 ++ local_t open_count; /**< Outstanding files open */
57526 + atomic_t ioctl_count; /**< Outstanding IOCTLs pending */
57527 + atomic_t vma_count; /**< Outstanding vma areas open */
57528 + int buf_use; /**< Buffers in use -- cannot alloc */
57529 +@@ -1046,7 +1047,7 @@ struct drm_device {
57530 + /*@{ */
57531 + unsigned long counters;
57532 + enum drm_stat_type types[15];
57533 +- atomic_t counts[15];
57534 ++ atomic_unchecked_t counts[15];
57535 + /*@} */
57536 +
57537 + struct list_head filelist;
57538 +diff -urNp linux-3.1.1/include/drm/ttm/ttm_memory.h linux-3.1.1/include/drm/ttm/ttm_memory.h
57539 +--- linux-3.1.1/include/drm/ttm/ttm_memory.h 2011-11-11 15:19:27.000000000 -0500
57540 ++++ linux-3.1.1/include/drm/ttm/ttm_memory.h 2011-11-16 18:39:08.000000000 -0500
57541 +@@ -47,7 +47,7 @@
57542 +
57543 + struct ttm_mem_shrink {
57544 + int (*do_shrink) (struct ttm_mem_shrink *);
57545 +-};
57546 ++} __no_const;
57547 +
57548 + /**
57549 + * struct ttm_mem_global - Global memory accounting structure.
57550 +diff -urNp linux-3.1.1/include/linux/a.out.h linux-3.1.1/include/linux/a.out.h
57551 +--- linux-3.1.1/include/linux/a.out.h 2011-11-11 15:19:27.000000000 -0500
57552 ++++ linux-3.1.1/include/linux/a.out.h 2011-11-16 18:39:08.000000000 -0500
57553 +@@ -39,6 +39,14 @@ enum machine_type {
57554 + M_MIPS2 = 152 /* MIPS R6000/R4000 binary */
57555 + };
57556 +
57557 ++/* Constants for the N_FLAGS field */
57558 ++#define F_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
57559 ++#define F_PAX_EMUTRAMP 2 /* Emulate trampolines */
57560 ++#define F_PAX_MPROTECT 4 /* Restrict mprotect() */
57561 ++#define F_PAX_RANDMMAP 8 /* Randomize mmap() base */
57562 ++/*#define F_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
57563 ++#define F_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
57564 ++
57565 + #if !defined (N_MAGIC)
57566 + #define N_MAGIC(exec) ((exec).a_info & 0xffff)
57567 + #endif
57568 +diff -urNp linux-3.1.1/include/linux/atmdev.h linux-3.1.1/include/linux/atmdev.h
57569 +--- linux-3.1.1/include/linux/atmdev.h 2011-11-11 15:19:27.000000000 -0500
57570 ++++ linux-3.1.1/include/linux/atmdev.h 2011-11-16 18:39:08.000000000 -0500
57571 +@@ -237,7 +237,7 @@ struct compat_atm_iobuf {
57572 + #endif
57573 +
57574 + struct k_atm_aal_stats {
57575 +-#define __HANDLE_ITEM(i) atomic_t i
57576 ++#define __HANDLE_ITEM(i) atomic_unchecked_t i
57577 + __AAL_STAT_ITEMS
57578 + #undef __HANDLE_ITEM
57579 + };
57580 +diff -urNp linux-3.1.1/include/linux/binfmts.h linux-3.1.1/include/linux/binfmts.h
57581 +--- linux-3.1.1/include/linux/binfmts.h 2011-11-11 15:19:27.000000000 -0500
57582 ++++ linux-3.1.1/include/linux/binfmts.h 2011-11-16 18:39:08.000000000 -0500
57583 +@@ -88,6 +88,7 @@ struct linux_binfmt {
57584 + int (*load_binary)(struct linux_binprm *, struct pt_regs * regs);
57585 + int (*load_shlib)(struct file *);
57586 + int (*core_dump)(struct coredump_params *cprm);
57587 ++ void (*handle_mprotect)(struct vm_area_struct *vma, unsigned long newflags);
57588 + unsigned long min_coredump; /* minimal dump size */
57589 + };
57590 +
57591 +diff -urNp linux-3.1.1/include/linux/blkdev.h linux-3.1.1/include/linux/blkdev.h
57592 +--- linux-3.1.1/include/linux/blkdev.h 2011-11-11 15:19:27.000000000 -0500
57593 ++++ linux-3.1.1/include/linux/blkdev.h 2011-11-16 18:39:08.000000000 -0500
57594 +@@ -1321,7 +1321,7 @@ struct block_device_operations {
57595 + /* this callback is with swap_lock and sometimes page table lock held */
57596 + void (*swap_slot_free_notify) (struct block_device *, unsigned long);
57597 + struct module *owner;
57598 +-};
57599 ++} __do_const;
57600 +
57601 + extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int,
57602 + unsigned long);
57603 +diff -urNp linux-3.1.1/include/linux/blktrace_api.h linux-3.1.1/include/linux/blktrace_api.h
57604 +--- linux-3.1.1/include/linux/blktrace_api.h 2011-11-11 15:19:27.000000000 -0500
57605 ++++ linux-3.1.1/include/linux/blktrace_api.h 2011-11-16 18:39:08.000000000 -0500
57606 +@@ -162,7 +162,7 @@ struct blk_trace {
57607 + struct dentry *dir;
57608 + struct dentry *dropped_file;
57609 + struct dentry *msg_file;
57610 +- atomic_t dropped;
57611 ++ atomic_unchecked_t dropped;
57612 + };
57613 +
57614 + extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *);
57615 +diff -urNp linux-3.1.1/include/linux/byteorder/little_endian.h linux-3.1.1/include/linux/byteorder/little_endian.h
57616 +--- linux-3.1.1/include/linux/byteorder/little_endian.h 2011-11-11 15:19:27.000000000 -0500
57617 ++++ linux-3.1.1/include/linux/byteorder/little_endian.h 2011-11-16 18:39:08.000000000 -0500
57618 +@@ -42,51 +42,51 @@
57619 +
57620 + static inline __le64 __cpu_to_le64p(const __u64 *p)
57621 + {
57622 +- return (__force __le64)*p;
57623 ++ return (__force const __le64)*p;
57624 + }
57625 + static inline __u64 __le64_to_cpup(const __le64 *p)
57626 + {
57627 +- return (__force __u64)*p;
57628 ++ return (__force const __u64)*p;
57629 + }
57630 + static inline __le32 __cpu_to_le32p(const __u32 *p)
57631 + {
57632 +- return (__force __le32)*p;
57633 ++ return (__force const __le32)*p;
57634 + }
57635 + static inline __u32 __le32_to_cpup(const __le32 *p)
57636 + {
57637 +- return (__force __u32)*p;
57638 ++ return (__force const __u32)*p;
57639 + }
57640 + static inline __le16 __cpu_to_le16p(const __u16 *p)
57641 + {
57642 +- return (__force __le16)*p;
57643 ++ return (__force const __le16)*p;
57644 + }
57645 + static inline __u16 __le16_to_cpup(const __le16 *p)
57646 + {
57647 +- return (__force __u16)*p;
57648 ++ return (__force const __u16)*p;
57649 + }
57650 + static inline __be64 __cpu_to_be64p(const __u64 *p)
57651 + {
57652 +- return (__force __be64)__swab64p(p);
57653 ++ return (__force const __be64)__swab64p(p);
57654 + }
57655 + static inline __u64 __be64_to_cpup(const __be64 *p)
57656 + {
57657 +- return __swab64p((__u64 *)p);
57658 ++ return __swab64p((const __u64 *)p);
57659 + }
57660 + static inline __be32 __cpu_to_be32p(const __u32 *p)
57661 + {
57662 +- return (__force __be32)__swab32p(p);
57663 ++ return (__force const __be32)__swab32p(p);
57664 + }
57665 + static inline __u32 __be32_to_cpup(const __be32 *p)
57666 + {
57667 +- return __swab32p((__u32 *)p);
57668 ++ return __swab32p((const __u32 *)p);
57669 + }
57670 + static inline __be16 __cpu_to_be16p(const __u16 *p)
57671 + {
57672 +- return (__force __be16)__swab16p(p);
57673 ++ return (__force const __be16)__swab16p(p);
57674 + }
57675 + static inline __u16 __be16_to_cpup(const __be16 *p)
57676 + {
57677 +- return __swab16p((__u16 *)p);
57678 ++ return __swab16p((const __u16 *)p);
57679 + }
57680 + #define __cpu_to_le64s(x) do { (void)(x); } while (0)
57681 + #define __le64_to_cpus(x) do { (void)(x); } while (0)
57682 +diff -urNp linux-3.1.1/include/linux/cache.h linux-3.1.1/include/linux/cache.h
57683 +--- linux-3.1.1/include/linux/cache.h 2011-11-11 15:19:27.000000000 -0500
57684 ++++ linux-3.1.1/include/linux/cache.h 2011-11-16 18:39:08.000000000 -0500
57685 +@@ -16,6 +16,10 @@
57686 + #define __read_mostly
57687 + #endif
57688 +
57689 ++#ifndef __read_only
57690 ++#define __read_only __read_mostly
57691 ++#endif
57692 ++
57693 + #ifndef ____cacheline_aligned
57694 + #define ____cacheline_aligned __attribute__((__aligned__(SMP_CACHE_BYTES)))
57695 + #endif
57696 +diff -urNp linux-3.1.1/include/linux/capability.h linux-3.1.1/include/linux/capability.h
57697 +--- linux-3.1.1/include/linux/capability.h 2011-11-11 15:19:27.000000000 -0500
57698 ++++ linux-3.1.1/include/linux/capability.h 2011-11-16 18:40:31.000000000 -0500
57699 +@@ -547,6 +547,9 @@ extern bool capable(int cap);
57700 + extern bool ns_capable(struct user_namespace *ns, int cap);
57701 + extern bool task_ns_capable(struct task_struct *t, int cap);
57702 + extern bool nsown_capable(int cap);
57703 ++extern bool task_ns_capable_nolog(struct task_struct *t, int cap);
57704 ++extern bool ns_capable_nolog(struct user_namespace *ns, int cap);
57705 ++extern bool capable_nolog(int cap);
57706 +
57707 + /* audit system wants to get cap info from files as well */
57708 + extern int get_vfs_caps_from_disk(const struct dentry *dentry, struct cpu_vfs_cap_data *cpu_caps);
57709 +diff -urNp linux-3.1.1/include/linux/cleancache.h linux-3.1.1/include/linux/cleancache.h
57710 +--- linux-3.1.1/include/linux/cleancache.h 2011-11-11 15:19:27.000000000 -0500
57711 ++++ linux-3.1.1/include/linux/cleancache.h 2011-11-16 18:39:08.000000000 -0500
57712 +@@ -31,7 +31,7 @@ struct cleancache_ops {
57713 + void (*flush_page)(int, struct cleancache_filekey, pgoff_t);
57714 + void (*flush_inode)(int, struct cleancache_filekey);
57715 + void (*flush_fs)(int);
57716 +-};
57717 ++} __no_const;
57718 +
57719 + extern struct cleancache_ops
57720 + cleancache_register_ops(struct cleancache_ops *ops);
57721 +diff -urNp linux-3.1.1/include/linux/compiler-gcc4.h linux-3.1.1/include/linux/compiler-gcc4.h
57722 +--- linux-3.1.1/include/linux/compiler-gcc4.h 2011-11-11 15:19:27.000000000 -0500
57723 ++++ linux-3.1.1/include/linux/compiler-gcc4.h 2011-11-16 18:39:08.000000000 -0500
57724 +@@ -31,6 +31,12 @@
57725 +
57726 +
57727 + #if __GNUC_MINOR__ >= 5
57728 ++
57729 ++#ifdef CONSTIFY_PLUGIN
57730 ++#define __no_const __attribute__((no_const))
57731 ++#define __do_const __attribute__((do_const))
57732 ++#endif
57733 ++
57734 + /*
57735 + * Mark a position in code as unreachable. This can be used to
57736 + * suppress control flow warnings after asm blocks that transfer
57737 +@@ -46,6 +52,11 @@
57738 + #define __noclone __attribute__((__noclone__))
57739 +
57740 + #endif
57741 ++
57742 ++#define __alloc_size(...) __attribute((alloc_size(__VA_ARGS__)))
57743 ++#define __bos(ptr, arg) __builtin_object_size((ptr), (arg))
57744 ++#define __bos0(ptr) __bos((ptr), 0)
57745 ++#define __bos1(ptr) __bos((ptr), 1)
57746 + #endif
57747 +
57748 + #if __GNUC_MINOR__ > 0
57749 +diff -urNp linux-3.1.1/include/linux/compiler.h linux-3.1.1/include/linux/compiler.h
57750 +--- linux-3.1.1/include/linux/compiler.h 2011-11-11 15:19:27.000000000 -0500
57751 ++++ linux-3.1.1/include/linux/compiler.h 2011-11-16 18:39:08.000000000 -0500
57752 +@@ -5,31 +5,62 @@
57753 +
57754 + #ifdef __CHECKER__
57755 + # define __user __attribute__((noderef, address_space(1)))
57756 ++# define __force_user __force __user
57757 + # define __kernel __attribute__((address_space(0)))
57758 ++# define __force_kernel __force __kernel
57759 + # define __safe __attribute__((safe))
57760 + # define __force __attribute__((force))
57761 + # define __nocast __attribute__((nocast))
57762 + # define __iomem __attribute__((noderef, address_space(2)))
57763 ++# define __force_iomem __force __iomem
57764 + # define __acquires(x) __attribute__((context(x,0,1)))
57765 + # define __releases(x) __attribute__((context(x,1,0)))
57766 + # define __acquire(x) __context__(x,1)
57767 + # define __release(x) __context__(x,-1)
57768 + # define __cond_lock(x,c) ((c) ? ({ __acquire(x); 1; }) : 0)
57769 + # define __percpu __attribute__((noderef, address_space(3)))
57770 ++# define __force_percpu __force __percpu
57771 + #ifdef CONFIG_SPARSE_RCU_POINTER
57772 + # define __rcu __attribute__((noderef, address_space(4)))
57773 ++# define __force_rcu __force __rcu
57774 + #else
57775 + # define __rcu
57776 ++# define __force_rcu
57777 + #endif
57778 + extern void __chk_user_ptr(const volatile void __user *);
57779 + extern void __chk_io_ptr(const volatile void __iomem *);
57780 ++#elif defined(CHECKER_PLUGIN)
57781 ++//# define __user
57782 ++//# define __force_user
57783 ++//# define __kernel
57784 ++//# define __force_kernel
57785 ++# define __safe
57786 ++# define __force
57787 ++# define __nocast
57788 ++# define __iomem
57789 ++# define __force_iomem
57790 ++# define __chk_user_ptr(x) (void)0
57791 ++# define __chk_io_ptr(x) (void)0
57792 ++# define __builtin_warning(x, y...) (1)
57793 ++# define __acquires(x)
57794 ++# define __releases(x)
57795 ++# define __acquire(x) (void)0
57796 ++# define __release(x) (void)0
57797 ++# define __cond_lock(x,c) (c)
57798 ++# define __percpu
57799 ++# define __force_percpu
57800 ++# define __rcu
57801 ++# define __force_rcu
57802 + #else
57803 + # define __user
57804 ++# define __force_user
57805 + # define __kernel
57806 ++# define __force_kernel
57807 + # define __safe
57808 + # define __force
57809 + # define __nocast
57810 + # define __iomem
57811 ++# define __force_iomem
57812 + # define __chk_user_ptr(x) (void)0
57813 + # define __chk_io_ptr(x) (void)0
57814 + # define __builtin_warning(x, y...) (1)
57815 +@@ -39,7 +70,9 @@ extern void __chk_io_ptr(const volatile
57816 + # define __release(x) (void)0
57817 + # define __cond_lock(x,c) (c)
57818 + # define __percpu
57819 ++# define __force_percpu
57820 + # define __rcu
57821 ++# define __force_rcu
57822 + #endif
57823 +
57824 + #ifdef __KERNEL__
57825 +@@ -264,6 +297,14 @@ void ftrace_likely_update(struct ftrace_
57826 + # define __attribute_const__ /* unimplemented */
57827 + #endif
57828 +
57829 ++#ifndef __no_const
57830 ++# define __no_const
57831 ++#endif
57832 ++
57833 ++#ifndef __do_const
57834 ++# define __do_const
57835 ++#endif
57836 ++
57837 + /*
57838 + * Tell gcc if a function is cold. The compiler will assume any path
57839 + * directly leading to the call is unlikely.
57840 +@@ -273,6 +314,22 @@ void ftrace_likely_update(struct ftrace_
57841 + #define __cold
57842 + #endif
57843 +
57844 ++#ifndef __alloc_size
57845 ++#define __alloc_size(...)
57846 ++#endif
57847 ++
57848 ++#ifndef __bos
57849 ++#define __bos(ptr, arg)
57850 ++#endif
57851 ++
57852 ++#ifndef __bos0
57853 ++#define __bos0(ptr)
57854 ++#endif
57855 ++
57856 ++#ifndef __bos1
57857 ++#define __bos1(ptr)
57858 ++#endif
57859 ++
57860 + /* Simple shorthand for a section definition */
57861 + #ifndef __section
57862 + # define __section(S) __attribute__ ((__section__(#S)))
57863 +@@ -306,6 +363,7 @@ void ftrace_likely_update(struct ftrace_
57864 + * use is to mediate communication between process-level code and irq/NMI
57865 + * handlers, all running on the same CPU.
57866 + */
57867 +-#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
57868 ++#define ACCESS_ONCE(x) (*(volatile const typeof(x) *)&(x))
57869 ++#define ACCESS_ONCE_RW(x) (*(volatile typeof(x) *)&(x))
57870 +
57871 + #endif /* __LINUX_COMPILER_H */
57872 +diff -urNp linux-3.1.1/include/linux/cpuset.h linux-3.1.1/include/linux/cpuset.h
57873 +--- linux-3.1.1/include/linux/cpuset.h 2011-11-11 15:19:27.000000000 -0500
57874 ++++ linux-3.1.1/include/linux/cpuset.h 2011-11-16 18:39:08.000000000 -0500
57875 +@@ -118,7 +118,7 @@ static inline void put_mems_allowed(void
57876 + * nodemask.
57877 + */
57878 + smp_mb();
57879 +- --ACCESS_ONCE(current->mems_allowed_change_disable);
57880 ++ --ACCESS_ONCE_RW(current->mems_allowed_change_disable);
57881 + }
57882 +
57883 + static inline void set_mems_allowed(nodemask_t nodemask)
57884 +diff -urNp linux-3.1.1/include/linux/crypto.h linux-3.1.1/include/linux/crypto.h
57885 +--- linux-3.1.1/include/linux/crypto.h 2011-11-11 15:19:27.000000000 -0500
57886 ++++ linux-3.1.1/include/linux/crypto.h 2011-11-16 18:39:08.000000000 -0500
57887 +@@ -361,7 +361,7 @@ struct cipher_tfm {
57888 + const u8 *key, unsigned int keylen);
57889 + void (*cit_encrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
57890 + void (*cit_decrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
57891 +-};
57892 ++} __no_const;
57893 +
57894 + struct hash_tfm {
57895 + int (*init)(struct hash_desc *desc);
57896 +@@ -382,13 +382,13 @@ struct compress_tfm {
57897 + int (*cot_decompress)(struct crypto_tfm *tfm,
57898 + const u8 *src, unsigned int slen,
57899 + u8 *dst, unsigned int *dlen);
57900 +-};
57901 ++} __no_const;
57902 +
57903 + struct rng_tfm {
57904 + int (*rng_gen_random)(struct crypto_rng *tfm, u8 *rdata,
57905 + unsigned int dlen);
57906 + int (*rng_reset)(struct crypto_rng *tfm, u8 *seed, unsigned int slen);
57907 +-};
57908 ++} __no_const;
57909 +
57910 + #define crt_ablkcipher crt_u.ablkcipher
57911 + #define crt_aead crt_u.aead
57912 +diff -urNp linux-3.1.1/include/linux/decompress/mm.h linux-3.1.1/include/linux/decompress/mm.h
57913 +--- linux-3.1.1/include/linux/decompress/mm.h 2011-11-11 15:19:27.000000000 -0500
57914 ++++ linux-3.1.1/include/linux/decompress/mm.h 2011-11-16 18:39:08.000000000 -0500
57915 +@@ -77,7 +77,7 @@ static void free(void *where)
57916 + * warnings when not needed (indeed large_malloc / large_free are not
57917 + * needed by inflate */
57918 +
57919 +-#define malloc(a) kmalloc(a, GFP_KERNEL)
57920 ++#define malloc(a) kmalloc((a), GFP_KERNEL)
57921 + #define free(a) kfree(a)
57922 +
57923 + #define large_malloc(a) vmalloc(a)
57924 +diff -urNp linux-3.1.1/include/linux/dma-mapping.h linux-3.1.1/include/linux/dma-mapping.h
57925 +--- linux-3.1.1/include/linux/dma-mapping.h 2011-11-11 15:19:27.000000000 -0500
57926 ++++ linux-3.1.1/include/linux/dma-mapping.h 2011-11-16 18:39:08.000000000 -0500
57927 +@@ -42,7 +42,7 @@ struct dma_map_ops {
57928 + int (*dma_supported)(struct device *dev, u64 mask);
57929 + int (*set_dma_mask)(struct device *dev, u64 mask);
57930 + int is_phys;
57931 +-};
57932 ++} __do_const;
57933 +
57934 + #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
57935 +
57936 +diff -urNp linux-3.1.1/include/linux/efi.h linux-3.1.1/include/linux/efi.h
57937 +--- linux-3.1.1/include/linux/efi.h 2011-11-11 15:19:27.000000000 -0500
57938 ++++ linux-3.1.1/include/linux/efi.h 2011-11-16 18:39:08.000000000 -0500
57939 +@@ -446,7 +446,7 @@ struct efivar_operations {
57940 + efi_get_variable_t *get_variable;
57941 + efi_get_next_variable_t *get_next_variable;
57942 + efi_set_variable_t *set_variable;
57943 +-};
57944 ++} __no_const;
57945 +
57946 + struct efivars {
57947 + /*
57948 +diff -urNp linux-3.1.1/include/linux/elf.h linux-3.1.1/include/linux/elf.h
57949 +--- linux-3.1.1/include/linux/elf.h 2011-11-11 15:19:27.000000000 -0500
57950 ++++ linux-3.1.1/include/linux/elf.h 2011-11-16 18:39:08.000000000 -0500
57951 +@@ -49,6 +49,17 @@ typedef __s64 Elf64_Sxword;
57952 + #define PT_GNU_EH_FRAME 0x6474e550
57953 +
57954 + #define PT_GNU_STACK (PT_LOOS + 0x474e551)
57955 ++#define PT_GNU_RELRO (PT_LOOS + 0x474e552)
57956 ++
57957 ++#define PT_PAX_FLAGS (PT_LOOS + 0x5041580)
57958 ++
57959 ++/* Constants for the e_flags field */
57960 ++#define EF_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
57961 ++#define EF_PAX_EMUTRAMP 2 /* Emulate trampolines */
57962 ++#define EF_PAX_MPROTECT 4 /* Restrict mprotect() */
57963 ++#define EF_PAX_RANDMMAP 8 /* Randomize mmap() base */
57964 ++/*#define EF_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
57965 ++#define EF_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
57966 +
57967 + /*
57968 + * Extended Numbering
57969 +@@ -106,6 +117,8 @@ typedef __s64 Elf64_Sxword;
57970 + #define DT_DEBUG 21
57971 + #define DT_TEXTREL 22
57972 + #define DT_JMPREL 23
57973 ++#define DT_FLAGS 30
57974 ++ #define DF_TEXTREL 0x00000004
57975 + #define DT_ENCODING 32
57976 + #define OLD_DT_LOOS 0x60000000
57977 + #define DT_LOOS 0x6000000d
57978 +@@ -252,6 +265,19 @@ typedef struct elf64_hdr {
57979 + #define PF_W 0x2
57980 + #define PF_X 0x1
57981 +
57982 ++#define PF_PAGEEXEC (1U << 4) /* Enable PAGEEXEC */
57983 ++#define PF_NOPAGEEXEC (1U << 5) /* Disable PAGEEXEC */
57984 ++#define PF_SEGMEXEC (1U << 6) /* Enable SEGMEXEC */
57985 ++#define PF_NOSEGMEXEC (1U << 7) /* Disable SEGMEXEC */
57986 ++#define PF_MPROTECT (1U << 8) /* Enable MPROTECT */
57987 ++#define PF_NOMPROTECT (1U << 9) /* Disable MPROTECT */
57988 ++/*#define PF_RANDEXEC (1U << 10)*/ /* Enable RANDEXEC */
57989 ++/*#define PF_NORANDEXEC (1U << 11)*/ /* Disable RANDEXEC */
57990 ++#define PF_EMUTRAMP (1U << 12) /* Enable EMUTRAMP */
57991 ++#define PF_NOEMUTRAMP (1U << 13) /* Disable EMUTRAMP */
57992 ++#define PF_RANDMMAP (1U << 14) /* Enable RANDMMAP */
57993 ++#define PF_NORANDMMAP (1U << 15) /* Disable RANDMMAP */
57994 ++
57995 + typedef struct elf32_phdr{
57996 + Elf32_Word p_type;
57997 + Elf32_Off p_offset;
57998 +@@ -344,6 +370,8 @@ typedef struct elf64_shdr {
57999 + #define EI_OSABI 7
58000 + #define EI_PAD 8
58001 +
58002 ++#define EI_PAX 14
58003 ++
58004 + #define ELFMAG0 0x7f /* EI_MAG */
58005 + #define ELFMAG1 'E'
58006 + #define ELFMAG2 'L'
58007 +@@ -422,6 +450,7 @@ extern Elf32_Dyn _DYNAMIC [];
58008 + #define elf_note elf32_note
58009 + #define elf_addr_t Elf32_Off
58010 + #define Elf_Half Elf32_Half
58011 ++#define elf_dyn Elf32_Dyn
58012 +
58013 + #else
58014 +
58015 +@@ -432,6 +461,7 @@ extern Elf64_Dyn _DYNAMIC [];
58016 + #define elf_note elf64_note
58017 + #define elf_addr_t Elf64_Off
58018 + #define Elf_Half Elf64_Half
58019 ++#define elf_dyn Elf64_Dyn
58020 +
58021 + #endif
58022 +
58023 +diff -urNp linux-3.1.1/include/linux/firewire.h linux-3.1.1/include/linux/firewire.h
58024 +--- linux-3.1.1/include/linux/firewire.h 2011-11-11 15:19:27.000000000 -0500
58025 ++++ linux-3.1.1/include/linux/firewire.h 2011-11-16 18:39:08.000000000 -0500
58026 +@@ -428,7 +428,7 @@ struct fw_iso_context {
58027 + union {
58028 + fw_iso_callback_t sc;
58029 + fw_iso_mc_callback_t mc;
58030 +- } callback;
58031 ++ } __no_const callback;
58032 + void *callback_data;
58033 + };
58034 +
58035 +diff -urNp linux-3.1.1/include/linux/fscache-cache.h linux-3.1.1/include/linux/fscache-cache.h
58036 +--- linux-3.1.1/include/linux/fscache-cache.h 2011-11-11 15:19:27.000000000 -0500
58037 ++++ linux-3.1.1/include/linux/fscache-cache.h 2011-11-16 18:39:08.000000000 -0500
58038 +@@ -102,7 +102,7 @@ struct fscache_operation {
58039 + fscache_operation_release_t release;
58040 + };
58041 +
58042 +-extern atomic_t fscache_op_debug_id;
58043 ++extern atomic_unchecked_t fscache_op_debug_id;
58044 + extern void fscache_op_work_func(struct work_struct *work);
58045 +
58046 + extern void fscache_enqueue_operation(struct fscache_operation *);
58047 +@@ -122,7 +122,7 @@ static inline void fscache_operation_ini
58048 + {
58049 + INIT_WORK(&op->work, fscache_op_work_func);
58050 + atomic_set(&op->usage, 1);
58051 +- op->debug_id = atomic_inc_return(&fscache_op_debug_id);
58052 ++ op->debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
58053 + op->processor = processor;
58054 + op->release = release;
58055 + INIT_LIST_HEAD(&op->pend_link);
58056 +diff -urNp linux-3.1.1/include/linux/fs.h linux-3.1.1/include/linux/fs.h
58057 +--- linux-3.1.1/include/linux/fs.h 2011-11-11 15:19:27.000000000 -0500
58058 ++++ linux-3.1.1/include/linux/fs.h 2011-11-16 23:39:39.000000000 -0500
58059 +@@ -1588,7 +1588,8 @@ struct file_operations {
58060 + int (*setlease)(struct file *, long, struct file_lock **);
58061 + long (*fallocate)(struct file *file, int mode, loff_t offset,
58062 + loff_t len);
58063 +-};
58064 ++} __do_const;
58065 ++typedef struct file_operations __no_const file_operations_no_const;
58066 +
58067 + struct inode_operations {
58068 + struct dentry * (*lookup) (struct inode *,struct dentry *, struct nameidata *);
58069 +diff -urNp linux-3.1.1/include/linux/fsnotify.h linux-3.1.1/include/linux/fsnotify.h
58070 +--- linux-3.1.1/include/linux/fsnotify.h 2011-11-11 15:19:27.000000000 -0500
58071 ++++ linux-3.1.1/include/linux/fsnotify.h 2011-11-16 18:39:08.000000000 -0500
58072 +@@ -314,7 +314,7 @@ static inline void fsnotify_change(struc
58073 + */
58074 + static inline const unsigned char *fsnotify_oldname_init(const unsigned char *name)
58075 + {
58076 +- return kstrdup(name, GFP_KERNEL);
58077 ++ return (const unsigned char *)kstrdup((const char *)name, GFP_KERNEL);
58078 + }
58079 +
58080 + /*
58081 +diff -urNp linux-3.1.1/include/linux/fs_struct.h linux-3.1.1/include/linux/fs_struct.h
58082 +--- linux-3.1.1/include/linux/fs_struct.h 2011-11-11 15:19:27.000000000 -0500
58083 ++++ linux-3.1.1/include/linux/fs_struct.h 2011-11-16 18:39:08.000000000 -0500
58084 +@@ -6,7 +6,7 @@
58085 + #include <linux/seqlock.h>
58086 +
58087 + struct fs_struct {
58088 +- int users;
58089 ++ atomic_t users;
58090 + spinlock_t lock;
58091 + seqcount_t seq;
58092 + int umask;
58093 +diff -urNp linux-3.1.1/include/linux/ftrace_event.h linux-3.1.1/include/linux/ftrace_event.h
58094 +--- linux-3.1.1/include/linux/ftrace_event.h 2011-11-11 15:19:27.000000000 -0500
58095 ++++ linux-3.1.1/include/linux/ftrace_event.h 2011-11-16 18:39:08.000000000 -0500
58096 +@@ -97,7 +97,7 @@ struct trace_event_functions {
58097 + trace_print_func raw;
58098 + trace_print_func hex;
58099 + trace_print_func binary;
58100 +-};
58101 ++} __no_const;
58102 +
58103 + struct trace_event {
58104 + struct hlist_node node;
58105 +@@ -252,7 +252,7 @@ extern int trace_define_field(struct ftr
58106 + extern int trace_add_event_call(struct ftrace_event_call *call);
58107 + extern void trace_remove_event_call(struct ftrace_event_call *call);
58108 +
58109 +-#define is_signed_type(type) (((type)(-1)) < 0)
58110 ++#define is_signed_type(type) (((type)(-1)) < (type)1)
58111 +
58112 + int trace_set_clr_event(const char *system, const char *event, int set);
58113 +
58114 +diff -urNp linux-3.1.1/include/linux/genhd.h linux-3.1.1/include/linux/genhd.h
58115 +--- linux-3.1.1/include/linux/genhd.h 2011-11-11 15:19:27.000000000 -0500
58116 ++++ linux-3.1.1/include/linux/genhd.h 2011-11-16 18:39:08.000000000 -0500
58117 +@@ -184,7 +184,7 @@ struct gendisk {
58118 + struct kobject *slave_dir;
58119 +
58120 + struct timer_rand_state *random;
58121 +- atomic_t sync_io; /* RAID */
58122 ++ atomic_unchecked_t sync_io; /* RAID */
58123 + struct disk_events *ev;
58124 + #ifdef CONFIG_BLK_DEV_INTEGRITY
58125 + struct blk_integrity *integrity;
58126 +diff -urNp linux-3.1.1/include/linux/gracl.h linux-3.1.1/include/linux/gracl.h
58127 +--- linux-3.1.1/include/linux/gracl.h 1969-12-31 19:00:00.000000000 -0500
58128 ++++ linux-3.1.1/include/linux/gracl.h 2011-11-16 18:40:31.000000000 -0500
58129 +@@ -0,0 +1,317 @@
58130 ++#ifndef GR_ACL_H
58131 ++#define GR_ACL_H
58132 ++
58133 ++#include <linux/grdefs.h>
58134 ++#include <linux/resource.h>
58135 ++#include <linux/capability.h>
58136 ++#include <linux/dcache.h>
58137 ++#include <asm/resource.h>
58138 ++
58139 ++/* Major status information */
58140 ++
58141 ++#define GR_VERSION "grsecurity 2.2.2"
58142 ++#define GRSECURITY_VERSION 0x2202
58143 ++
58144 ++enum {
58145 ++ GR_SHUTDOWN = 0,
58146 ++ GR_ENABLE = 1,
58147 ++ GR_SPROLE = 2,
58148 ++ GR_RELOAD = 3,
58149 ++ GR_SEGVMOD = 4,
58150 ++ GR_STATUS = 5,
58151 ++ GR_UNSPROLE = 6,
58152 ++ GR_PASSSET = 7,
58153 ++ GR_SPROLEPAM = 8,
58154 ++};
58155 ++
58156 ++/* Password setup definitions
58157 ++ * kernel/grhash.c */
58158 ++enum {
58159 ++ GR_PW_LEN = 128,
58160 ++ GR_SALT_LEN = 16,
58161 ++ GR_SHA_LEN = 32,
58162 ++};
58163 ++
58164 ++enum {
58165 ++ GR_SPROLE_LEN = 64,
58166 ++};
58167 ++
58168 ++enum {
58169 ++ GR_NO_GLOB = 0,
58170 ++ GR_REG_GLOB,
58171 ++ GR_CREATE_GLOB
58172 ++};
58173 ++
58174 ++#define GR_NLIMITS 32
58175 ++
58176 ++/* Begin Data Structures */
58177 ++
58178 ++struct sprole_pw {
58179 ++ unsigned char *rolename;
58180 ++ unsigned char salt[GR_SALT_LEN];
58181 ++ unsigned char sum[GR_SHA_LEN]; /* 256-bit SHA hash of the password */
58182 ++};
58183 ++
58184 ++struct name_entry {
58185 ++ __u32 key;
58186 ++ ino_t inode;
58187 ++ dev_t device;
58188 ++ char *name;
58189 ++ __u16 len;
58190 ++ __u8 deleted;
58191 ++ struct name_entry *prev;
58192 ++ struct name_entry *next;
58193 ++};
58194 ++
58195 ++struct inodev_entry {
58196 ++ struct name_entry *nentry;
58197 ++ struct inodev_entry *prev;
58198 ++ struct inodev_entry *next;
58199 ++};
58200 ++
58201 ++struct acl_role_db {
58202 ++ struct acl_role_label **r_hash;
58203 ++ __u32 r_size;
58204 ++};
58205 ++
58206 ++struct inodev_db {
58207 ++ struct inodev_entry **i_hash;
58208 ++ __u32 i_size;
58209 ++};
58210 ++
58211 ++struct name_db {
58212 ++ struct name_entry **n_hash;
58213 ++ __u32 n_size;
58214 ++};
58215 ++
58216 ++struct crash_uid {
58217 ++ uid_t uid;
58218 ++ unsigned long expires;
58219 ++};
58220 ++
58221 ++struct gr_hash_struct {
58222 ++ void **table;
58223 ++ void **nametable;
58224 ++ void *first;
58225 ++ __u32 table_size;
58226 ++ __u32 used_size;
58227 ++ int type;
58228 ++};
58229 ++
58230 ++/* Userspace Grsecurity ACL data structures */
58231 ++
58232 ++struct acl_subject_label {
58233 ++ char *filename;
58234 ++ ino_t inode;
58235 ++ dev_t device;
58236 ++ __u32 mode;
58237 ++ kernel_cap_t cap_mask;
58238 ++ kernel_cap_t cap_lower;
58239 ++ kernel_cap_t cap_invert_audit;
58240 ++
58241 ++ struct rlimit res[GR_NLIMITS];
58242 ++ __u32 resmask;
58243 ++
58244 ++ __u8 user_trans_type;
58245 ++ __u8 group_trans_type;
58246 ++ uid_t *user_transitions;
58247 ++ gid_t *group_transitions;
58248 ++ __u16 user_trans_num;
58249 ++ __u16 group_trans_num;
58250 ++
58251 ++ __u32 sock_families[2];
58252 ++ __u32 ip_proto[8];
58253 ++ __u32 ip_type;
58254 ++ struct acl_ip_label **ips;
58255 ++ __u32 ip_num;
58256 ++ __u32 inaddr_any_override;
58257 ++
58258 ++ __u32 crashes;
58259 ++ unsigned long expires;
58260 ++
58261 ++ struct acl_subject_label *parent_subject;
58262 ++ struct gr_hash_struct *hash;
58263 ++ struct acl_subject_label *prev;
58264 ++ struct acl_subject_label *next;
58265 ++
58266 ++ struct acl_object_label **obj_hash;
58267 ++ __u32 obj_hash_size;
58268 ++ __u16 pax_flags;
58269 ++};
58270 ++
58271 ++struct role_allowed_ip {
58272 ++ __u32 addr;
58273 ++ __u32 netmask;
58274 ++
58275 ++ struct role_allowed_ip *prev;
58276 ++ struct role_allowed_ip *next;
58277 ++};
58278 ++
58279 ++struct role_transition {
58280 ++ char *rolename;
58281 ++
58282 ++ struct role_transition *prev;
58283 ++ struct role_transition *next;
58284 ++};
58285 ++
58286 ++struct acl_role_label {
58287 ++ char *rolename;
58288 ++ uid_t uidgid;
58289 ++ __u16 roletype;
58290 ++
58291 ++ __u16 auth_attempts;
58292 ++ unsigned long expires;
58293 ++
58294 ++ struct acl_subject_label *root_label;
58295 ++ struct gr_hash_struct *hash;
58296 ++
58297 ++ struct acl_role_label *prev;
58298 ++ struct acl_role_label *next;
58299 ++
58300 ++ struct role_transition *transitions;
58301 ++ struct role_allowed_ip *allowed_ips;
58302 ++ uid_t *domain_children;
58303 ++ __u16 domain_child_num;
58304 ++
58305 ++ struct acl_subject_label **subj_hash;
58306 ++ __u32 subj_hash_size;
58307 ++};
58308 ++
58309 ++struct user_acl_role_db {
58310 ++ struct acl_role_label **r_table;
58311 ++ __u32 num_pointers; /* Number of allocations to track */
58312 ++ __u32 num_roles; /* Number of roles */
58313 ++ __u32 num_domain_children; /* Number of domain children */
58314 ++ __u32 num_subjects; /* Number of subjects */
58315 ++ __u32 num_objects; /* Number of objects */
58316 ++};
58317 ++
58318 ++struct acl_object_label {
58319 ++ char *filename;
58320 ++ ino_t inode;
58321 ++ dev_t device;
58322 ++ __u32 mode;
58323 ++
58324 ++ struct acl_subject_label *nested;
58325 ++ struct acl_object_label *globbed;
58326 ++
58327 ++ /* next two structures not used */
58328 ++
58329 ++ struct acl_object_label *prev;
58330 ++ struct acl_object_label *next;
58331 ++};
58332 ++
58333 ++struct acl_ip_label {
58334 ++ char *iface;
58335 ++ __u32 addr;
58336 ++ __u32 netmask;
58337 ++ __u16 low, high;
58338 ++ __u8 mode;
58339 ++ __u32 type;
58340 ++ __u32 proto[8];
58341 ++
58342 ++ /* next two structures not used */
58343 ++
58344 ++ struct acl_ip_label *prev;
58345 ++ struct acl_ip_label *next;
58346 ++};
58347 ++
58348 ++struct gr_arg {
58349 ++ struct user_acl_role_db role_db;
58350 ++ unsigned char pw[GR_PW_LEN];
58351 ++ unsigned char salt[GR_SALT_LEN];
58352 ++ unsigned char sum[GR_SHA_LEN];
58353 ++ unsigned char sp_role[GR_SPROLE_LEN];
58354 ++ struct sprole_pw *sprole_pws;
58355 ++ dev_t segv_device;
58356 ++ ino_t segv_inode;
58357 ++ uid_t segv_uid;
58358 ++ __u16 num_sprole_pws;
58359 ++ __u16 mode;
58360 ++};
58361 ++
58362 ++struct gr_arg_wrapper {
58363 ++ struct gr_arg *arg;
58364 ++ __u32 version;
58365 ++ __u32 size;
58366 ++};
58367 ++
58368 ++struct subject_map {
58369 ++ struct acl_subject_label *user;
58370 ++ struct acl_subject_label *kernel;
58371 ++ struct subject_map *prev;
58372 ++ struct subject_map *next;
58373 ++};
58374 ++
58375 ++struct acl_subj_map_db {
58376 ++ struct subject_map **s_hash;
58377 ++ __u32 s_size;
58378 ++};
58379 ++
58380 ++/* End Data Structures Section */
58381 ++
58382 ++/* Hash functions generated by empirical testing by Brad Spengler
58383 ++ Makes good use of the low bits of the inode. Generally 0-1 times
58384 ++ in loop for successful match. 0-3 for unsuccessful match.
58385 ++ Shift/add algorithm with modulus of table size and an XOR*/
58386 ++
58387 ++static __inline__ unsigned int
58388 ++rhash(const uid_t uid, const __u16 type, const unsigned int sz)
58389 ++{
58390 ++ return ((((uid + type) << (16 + type)) ^ uid) % sz);
58391 ++}
58392 ++
58393 ++ static __inline__ unsigned int
58394 ++shash(const struct acl_subject_label *userp, const unsigned int sz)
58395 ++{
58396 ++ return ((const unsigned long)userp % sz);
58397 ++}
58398 ++
58399 ++static __inline__ unsigned int
58400 ++fhash(const ino_t ino, const dev_t dev, const unsigned int sz)
58401 ++{
58402 ++ return (((ino + dev) ^ ((ino << 13) + (ino << 23) + (dev << 9))) % sz);
58403 ++}
58404 ++
58405 ++static __inline__ unsigned int
58406 ++nhash(const char *name, const __u16 len, const unsigned int sz)
58407 ++{
58408 ++ return full_name_hash((const unsigned char *)name, len) % sz;
58409 ++}
58410 ++
58411 ++#define FOR_EACH_ROLE_START(role) \
58412 ++ role = role_list; \
58413 ++ while (role) {
58414 ++
58415 ++#define FOR_EACH_ROLE_END(role) \
58416 ++ role = role->prev; \
58417 ++ }
58418 ++
58419 ++#define FOR_EACH_SUBJECT_START(role,subj,iter) \
58420 ++ subj = NULL; \
58421 ++ iter = 0; \
58422 ++ while (iter < role->subj_hash_size) { \
58423 ++ if (subj == NULL) \
58424 ++ subj = role->subj_hash[iter]; \
58425 ++ if (subj == NULL) { \
58426 ++ iter++; \
58427 ++ continue; \
58428 ++ }
58429 ++
58430 ++#define FOR_EACH_SUBJECT_END(subj,iter) \
58431 ++ subj = subj->next; \
58432 ++ if (subj == NULL) \
58433 ++ iter++; \
58434 ++ }
58435 ++
58436 ++
58437 ++#define FOR_EACH_NESTED_SUBJECT_START(role,subj) \
58438 ++ subj = role->hash->first; \
58439 ++ while (subj != NULL) {
58440 ++
58441 ++#define FOR_EACH_NESTED_SUBJECT_END(subj) \
58442 ++ subj = subj->next; \
58443 ++ }
58444 ++
58445 ++#endif
58446 ++
58447 +diff -urNp linux-3.1.1/include/linux/gralloc.h linux-3.1.1/include/linux/gralloc.h
58448 +--- linux-3.1.1/include/linux/gralloc.h 1969-12-31 19:00:00.000000000 -0500
58449 ++++ linux-3.1.1/include/linux/gralloc.h 2011-11-16 18:40:31.000000000 -0500
58450 +@@ -0,0 +1,9 @@
58451 ++#ifndef __GRALLOC_H
58452 ++#define __GRALLOC_H
58453 ++
58454 ++void acl_free_all(void);
58455 ++int acl_alloc_stack_init(unsigned long size);
58456 ++void *acl_alloc(unsigned long len);
58457 ++void *acl_alloc_num(unsigned long num, unsigned long len);
58458 ++
58459 ++#endif
58460 +diff -urNp linux-3.1.1/include/linux/grdefs.h linux-3.1.1/include/linux/grdefs.h
58461 +--- linux-3.1.1/include/linux/grdefs.h 1969-12-31 19:00:00.000000000 -0500
58462 ++++ linux-3.1.1/include/linux/grdefs.h 2011-11-16 18:40:31.000000000 -0500
58463 +@@ -0,0 +1,140 @@
58464 ++#ifndef GRDEFS_H
58465 ++#define GRDEFS_H
58466 ++
58467 ++/* Begin grsecurity status declarations */
58468 ++
58469 ++enum {
58470 ++ GR_READY = 0x01,
58471 ++ GR_STATUS_INIT = 0x00 // disabled state
58472 ++};
58473 ++
58474 ++/* Begin ACL declarations */
58475 ++
58476 ++/* Role flags */
58477 ++
58478 ++enum {
58479 ++ GR_ROLE_USER = 0x0001,
58480 ++ GR_ROLE_GROUP = 0x0002,
58481 ++ GR_ROLE_DEFAULT = 0x0004,
58482 ++ GR_ROLE_SPECIAL = 0x0008,
58483 ++ GR_ROLE_AUTH = 0x0010,
58484 ++ GR_ROLE_NOPW = 0x0020,
58485 ++ GR_ROLE_GOD = 0x0040,
58486 ++ GR_ROLE_LEARN = 0x0080,
58487 ++ GR_ROLE_TPE = 0x0100,
58488 ++ GR_ROLE_DOMAIN = 0x0200,
58489 ++ GR_ROLE_PAM = 0x0400,
58490 ++ GR_ROLE_PERSIST = 0x0800
58491 ++};
58492 ++
58493 ++/* ACL Subject and Object mode flags */
58494 ++enum {
58495 ++ GR_DELETED = 0x80000000
58496 ++};
58497 ++
58498 ++/* ACL Object-only mode flags */
58499 ++enum {
58500 ++ GR_READ = 0x00000001,
58501 ++ GR_APPEND = 0x00000002,
58502 ++ GR_WRITE = 0x00000004,
58503 ++ GR_EXEC = 0x00000008,
58504 ++ GR_FIND = 0x00000010,
58505 ++ GR_INHERIT = 0x00000020,
58506 ++ GR_SETID = 0x00000040,
58507 ++ GR_CREATE = 0x00000080,
58508 ++ GR_DELETE = 0x00000100,
58509 ++ GR_LINK = 0x00000200,
58510 ++ GR_AUDIT_READ = 0x00000400,
58511 ++ GR_AUDIT_APPEND = 0x00000800,
58512 ++ GR_AUDIT_WRITE = 0x00001000,
58513 ++ GR_AUDIT_EXEC = 0x00002000,
58514 ++ GR_AUDIT_FIND = 0x00004000,
58515 ++ GR_AUDIT_INHERIT= 0x00008000,
58516 ++ GR_AUDIT_SETID = 0x00010000,
58517 ++ GR_AUDIT_CREATE = 0x00020000,
58518 ++ GR_AUDIT_DELETE = 0x00040000,
58519 ++ GR_AUDIT_LINK = 0x00080000,
58520 ++ GR_PTRACERD = 0x00100000,
58521 ++ GR_NOPTRACE = 0x00200000,
58522 ++ GR_SUPPRESS = 0x00400000,
58523 ++ GR_NOLEARN = 0x00800000,
58524 ++ GR_INIT_TRANSFER= 0x01000000
58525 ++};
58526 ++
58527 ++#define GR_AUDITS (GR_AUDIT_READ | GR_AUDIT_WRITE | GR_AUDIT_APPEND | GR_AUDIT_EXEC | \
58528 ++ GR_AUDIT_FIND | GR_AUDIT_INHERIT | GR_AUDIT_SETID | \
58529 ++ GR_AUDIT_CREATE | GR_AUDIT_DELETE | GR_AUDIT_LINK)
58530 ++
58531 ++/* ACL subject-only mode flags */
58532 ++enum {
58533 ++ GR_KILL = 0x00000001,
58534 ++ GR_VIEW = 0x00000002,
58535 ++ GR_PROTECTED = 0x00000004,
58536 ++ GR_LEARN = 0x00000008,
58537 ++ GR_OVERRIDE = 0x00000010,
58538 ++ /* just a placeholder, this mode is only used in userspace */
58539 ++ GR_DUMMY = 0x00000020,
58540 ++ GR_PROTSHM = 0x00000040,
58541 ++ GR_KILLPROC = 0x00000080,
58542 ++ GR_KILLIPPROC = 0x00000100,
58543 ++ /* just a placeholder, this mode is only used in userspace */
58544 ++ GR_NOTROJAN = 0x00000200,
58545 ++ GR_PROTPROCFD = 0x00000400,
58546 ++ GR_PROCACCT = 0x00000800,
58547 ++ GR_RELAXPTRACE = 0x00001000,
58548 ++ GR_NESTED = 0x00002000,
58549 ++ GR_INHERITLEARN = 0x00004000,
58550 ++ GR_PROCFIND = 0x00008000,
58551 ++ GR_POVERRIDE = 0x00010000,
58552 ++ GR_KERNELAUTH = 0x00020000,
58553 ++ GR_ATSECURE = 0x00040000,
58554 ++ GR_SHMEXEC = 0x00080000
58555 ++};
58556 ++
58557 ++enum {
58558 ++ GR_PAX_ENABLE_SEGMEXEC = 0x0001,
58559 ++ GR_PAX_ENABLE_PAGEEXEC = 0x0002,
58560 ++ GR_PAX_ENABLE_MPROTECT = 0x0004,
58561 ++ GR_PAX_ENABLE_RANDMMAP = 0x0008,
58562 ++ GR_PAX_ENABLE_EMUTRAMP = 0x0010,
58563 ++ GR_PAX_DISABLE_SEGMEXEC = 0x0100,
58564 ++ GR_PAX_DISABLE_PAGEEXEC = 0x0200,
58565 ++ GR_PAX_DISABLE_MPROTECT = 0x0400,
58566 ++ GR_PAX_DISABLE_RANDMMAP = 0x0800,
58567 ++ GR_PAX_DISABLE_EMUTRAMP = 0x1000,
58568 ++};
58569 ++
58570 ++enum {
58571 ++ GR_ID_USER = 0x01,
58572 ++ GR_ID_GROUP = 0x02,
58573 ++};
58574 ++
58575 ++enum {
58576 ++ GR_ID_ALLOW = 0x01,
58577 ++ GR_ID_DENY = 0x02,
58578 ++};
58579 ++
58580 ++#define GR_CRASH_RES 31
58581 ++#define GR_UIDTABLE_MAX 500
58582 ++
58583 ++/* begin resource learning section */
58584 ++enum {
58585 ++ GR_RLIM_CPU_BUMP = 60,
58586 ++ GR_RLIM_FSIZE_BUMP = 50000,
58587 ++ GR_RLIM_DATA_BUMP = 10000,
58588 ++ GR_RLIM_STACK_BUMP = 1000,
58589 ++ GR_RLIM_CORE_BUMP = 10000,
58590 ++ GR_RLIM_RSS_BUMP = 500000,
58591 ++ GR_RLIM_NPROC_BUMP = 1,
58592 ++ GR_RLIM_NOFILE_BUMP = 5,
58593 ++ GR_RLIM_MEMLOCK_BUMP = 50000,
58594 ++ GR_RLIM_AS_BUMP = 500000,
58595 ++ GR_RLIM_LOCKS_BUMP = 2,
58596 ++ GR_RLIM_SIGPENDING_BUMP = 5,
58597 ++ GR_RLIM_MSGQUEUE_BUMP = 10000,
58598 ++ GR_RLIM_NICE_BUMP = 1,
58599 ++ GR_RLIM_RTPRIO_BUMP = 1,
58600 ++ GR_RLIM_RTTIME_BUMP = 1000000
58601 ++};
58602 ++
58603 ++#endif
58604 +diff -urNp linux-3.1.1/include/linux/grinternal.h linux-3.1.1/include/linux/grinternal.h
58605 +--- linux-3.1.1/include/linux/grinternal.h 1969-12-31 19:00:00.000000000 -0500
58606 ++++ linux-3.1.1/include/linux/grinternal.h 2011-11-16 18:40:31.000000000 -0500
58607 +@@ -0,0 +1,220 @@
58608 ++#ifndef __GRINTERNAL_H
58609 ++#define __GRINTERNAL_H
58610 ++
58611 ++#ifdef CONFIG_GRKERNSEC
58612 ++
58613 ++#include <linux/fs.h>
58614 ++#include <linux/mnt_namespace.h>
58615 ++#include <linux/nsproxy.h>
58616 ++#include <linux/gracl.h>
58617 ++#include <linux/grdefs.h>
58618 ++#include <linux/grmsg.h>
58619 ++
58620 ++void gr_add_learn_entry(const char *fmt, ...)
58621 ++ __attribute__ ((format (printf, 1, 2)));
58622 ++__u32 gr_search_file(const struct dentry *dentry, const __u32 mode,
58623 ++ const struct vfsmount *mnt);
58624 ++__u32 gr_check_create(const struct dentry *new_dentry,
58625 ++ const struct dentry *parent,
58626 ++ const struct vfsmount *mnt, const __u32 mode);
58627 ++int gr_check_protected_task(const struct task_struct *task);
58628 ++__u32 to_gr_audit(const __u32 reqmode);
58629 ++int gr_set_acls(const int type);
58630 ++int gr_apply_subject_to_task(struct task_struct *task);
58631 ++int gr_acl_is_enabled(void);
58632 ++char gr_roletype_to_char(void);
58633 ++
58634 ++void gr_handle_alertkill(struct task_struct *task);
58635 ++char *gr_to_filename(const struct dentry *dentry,
58636 ++ const struct vfsmount *mnt);
58637 ++char *gr_to_filename1(const struct dentry *dentry,
58638 ++ const struct vfsmount *mnt);
58639 ++char *gr_to_filename2(const struct dentry *dentry,
58640 ++ const struct vfsmount *mnt);
58641 ++char *gr_to_filename3(const struct dentry *dentry,
58642 ++ const struct vfsmount *mnt);
58643 ++
58644 ++extern int grsec_enable_harden_ptrace;
58645 ++extern int grsec_enable_link;
58646 ++extern int grsec_enable_fifo;
58647 ++extern int grsec_enable_execve;
58648 ++extern int grsec_enable_shm;
58649 ++extern int grsec_enable_execlog;
58650 ++extern int grsec_enable_signal;
58651 ++extern int grsec_enable_audit_ptrace;
58652 ++extern int grsec_enable_forkfail;
58653 ++extern int grsec_enable_time;
58654 ++extern int grsec_enable_rofs;
58655 ++extern int grsec_enable_chroot_shmat;
58656 ++extern int grsec_enable_chroot_mount;
58657 ++extern int grsec_enable_chroot_double;
58658 ++extern int grsec_enable_chroot_pivot;
58659 ++extern int grsec_enable_chroot_chdir;
58660 ++extern int grsec_enable_chroot_chmod;
58661 ++extern int grsec_enable_chroot_mknod;
58662 ++extern int grsec_enable_chroot_fchdir;
58663 ++extern int grsec_enable_chroot_nice;
58664 ++extern int grsec_enable_chroot_execlog;
58665 ++extern int grsec_enable_chroot_caps;
58666 ++extern int grsec_enable_chroot_sysctl;
58667 ++extern int grsec_enable_chroot_unix;
58668 ++extern int grsec_enable_tpe;
58669 ++extern int grsec_tpe_gid;
58670 ++extern int grsec_enable_tpe_all;
58671 ++extern int grsec_enable_tpe_invert;
58672 ++extern int grsec_enable_socket_all;
58673 ++extern int grsec_socket_all_gid;
58674 ++extern int grsec_enable_socket_client;
58675 ++extern int grsec_socket_client_gid;
58676 ++extern int grsec_enable_socket_server;
58677 ++extern int grsec_socket_server_gid;
58678 ++extern int grsec_audit_gid;
58679 ++extern int grsec_enable_group;
58680 ++extern int grsec_enable_audit_textrel;
58681 ++extern int grsec_enable_log_rwxmaps;
58682 ++extern int grsec_enable_mount;
58683 ++extern int grsec_enable_chdir;
58684 ++extern int grsec_resource_logging;
58685 ++extern int grsec_enable_blackhole;
58686 ++extern int grsec_lastack_retries;
58687 ++extern int grsec_enable_brute;
58688 ++extern int grsec_lock;
58689 ++
58690 ++extern spinlock_t grsec_alert_lock;
58691 ++extern unsigned long grsec_alert_wtime;
58692 ++extern unsigned long grsec_alert_fyet;
58693 ++
58694 ++extern spinlock_t grsec_audit_lock;
58695 ++
58696 ++extern rwlock_t grsec_exec_file_lock;
58697 ++
58698 ++#define gr_task_fullpath(tsk) ((tsk)->exec_file ? \
58699 ++ gr_to_filename2((tsk)->exec_file->f_path.dentry, \
58700 ++ (tsk)->exec_file->f_vfsmnt) : "/")
58701 ++
58702 ++#define gr_parent_task_fullpath(tsk) ((tsk)->real_parent->exec_file ? \
58703 ++ gr_to_filename3((tsk)->real_parent->exec_file->f_path.dentry, \
58704 ++ (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
58705 ++
58706 ++#define gr_task_fullpath0(tsk) ((tsk)->exec_file ? \
58707 ++ gr_to_filename((tsk)->exec_file->f_path.dentry, \
58708 ++ (tsk)->exec_file->f_vfsmnt) : "/")
58709 ++
58710 ++#define gr_parent_task_fullpath0(tsk) ((tsk)->real_parent->exec_file ? \
58711 ++ gr_to_filename1((tsk)->real_parent->exec_file->f_path.dentry, \
58712 ++ (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
58713 ++
58714 ++#define proc_is_chrooted(tsk_a) ((tsk_a)->gr_is_chrooted)
58715 ++
58716 ++#define have_same_root(tsk_a,tsk_b) ((tsk_a)->gr_chroot_dentry == (tsk_b)->gr_chroot_dentry)
58717 ++
58718 ++#define DEFAULTSECARGS(task, cred, pcred) gr_task_fullpath(task), (task)->comm, \
58719 ++ (task)->pid, (cred)->uid, \
58720 ++ (cred)->euid, (cred)->gid, (cred)->egid, \
58721 ++ gr_parent_task_fullpath(task), \
58722 ++ (task)->real_parent->comm, (task)->real_parent->pid, \
58723 ++ (pcred)->uid, (pcred)->euid, \
58724 ++ (pcred)->gid, (pcred)->egid
58725 ++
58726 ++#define GR_CHROOT_CAPS {{ \
58727 ++ CAP_TO_MASK(CAP_LINUX_IMMUTABLE) | CAP_TO_MASK(CAP_NET_ADMIN) | \
58728 ++ CAP_TO_MASK(CAP_SYS_MODULE) | CAP_TO_MASK(CAP_SYS_RAWIO) | \
58729 ++ CAP_TO_MASK(CAP_SYS_PACCT) | CAP_TO_MASK(CAP_SYS_ADMIN) | \
58730 ++ CAP_TO_MASK(CAP_SYS_BOOT) | CAP_TO_MASK(CAP_SYS_TIME) | \
58731 ++ CAP_TO_MASK(CAP_NET_RAW) | CAP_TO_MASK(CAP_SYS_TTY_CONFIG) | \
58732 ++ CAP_TO_MASK(CAP_IPC_OWNER) | CAP_TO_MASK(CAP_SETFCAP), \
58733 ++ CAP_TO_MASK(CAP_SYSLOG) | CAP_TO_MASK(CAP_MAC_ADMIN) }}
58734 ++
58735 ++#define security_learn(normal_msg,args...) \
58736 ++({ \
58737 ++ read_lock(&grsec_exec_file_lock); \
58738 ++ gr_add_learn_entry(normal_msg "\n", ## args); \
58739 ++ read_unlock(&grsec_exec_file_lock); \
58740 ++})
58741 ++
58742 ++enum {
58743 ++ GR_DO_AUDIT,
58744 ++ GR_DONT_AUDIT,
58745 ++ /* used for non-audit messages that we shouldn't kill the task on */
58746 ++ GR_DONT_AUDIT_GOOD
58747 ++};
58748 ++
58749 ++enum {
58750 ++ GR_TTYSNIFF,
58751 ++ GR_RBAC,
58752 ++ GR_RBAC_STR,
58753 ++ GR_STR_RBAC,
58754 ++ GR_RBAC_MODE2,
58755 ++ GR_RBAC_MODE3,
58756 ++ GR_FILENAME,
58757 ++ GR_SYSCTL_HIDDEN,
58758 ++ GR_NOARGS,
58759 ++ GR_ONE_INT,
58760 ++ GR_ONE_INT_TWO_STR,
58761 ++ GR_ONE_STR,
58762 ++ GR_STR_INT,
58763 ++ GR_TWO_STR_INT,
58764 ++ GR_TWO_INT,
58765 ++ GR_TWO_U64,
58766 ++ GR_THREE_INT,
58767 ++ GR_FIVE_INT_TWO_STR,
58768 ++ GR_TWO_STR,
58769 ++ GR_THREE_STR,
58770 ++ GR_FOUR_STR,
58771 ++ GR_STR_FILENAME,
58772 ++ GR_FILENAME_STR,
58773 ++ GR_FILENAME_TWO_INT,
58774 ++ GR_FILENAME_TWO_INT_STR,
58775 ++ GR_TEXTREL,
58776 ++ GR_PTRACE,
58777 ++ GR_RESOURCE,
58778 ++ GR_CAP,
58779 ++ GR_SIG,
58780 ++ GR_SIG2,
58781 ++ GR_CRASH1,
58782 ++ GR_CRASH2,
58783 ++ GR_PSACCT,
58784 ++ GR_RWXMAP
58785 ++};
58786 ++
58787 ++#define gr_log_hidden_sysctl(audit, msg, str) gr_log_varargs(audit, msg, GR_SYSCTL_HIDDEN, str)
58788 ++#define gr_log_ttysniff(audit, msg, task) gr_log_varargs(audit, msg, GR_TTYSNIFF, task)
58789 ++#define gr_log_fs_rbac_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_RBAC, dentry, mnt)
58790 ++#define gr_log_fs_rbac_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_RBAC_STR, dentry, mnt, str)
58791 ++#define gr_log_fs_str_rbac(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_RBAC, str, dentry, mnt)
58792 ++#define gr_log_fs_rbac_mode2(audit, msg, dentry, mnt, str1, str2) gr_log_varargs(audit, msg, GR_RBAC_MODE2, dentry, mnt, str1, str2)
58793 ++#define gr_log_fs_rbac_mode3(audit, msg, dentry, mnt, str1, str2, str3) gr_log_varargs(audit, msg, GR_RBAC_MODE3, dentry, mnt, str1, str2, str3)
58794 ++#define gr_log_fs_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_FILENAME, dentry, mnt)
58795 ++#define gr_log_noargs(audit, msg) gr_log_varargs(audit, msg, GR_NOARGS)
58796 ++#define gr_log_int(audit, msg, num) gr_log_varargs(audit, msg, GR_ONE_INT, num)
58797 ++#define gr_log_int_str2(audit, msg, num, str1, str2) gr_log_varargs(audit, msg, GR_ONE_INT_TWO_STR, num, str1, str2)
58798 ++#define gr_log_str(audit, msg, str) gr_log_varargs(audit, msg, GR_ONE_STR, str)
58799 ++#define gr_log_str_int(audit, msg, str, num) gr_log_varargs(audit, msg, GR_STR_INT, str, num)
58800 ++#define gr_log_int_int(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_INT, num1, num2)
58801 ++#define gr_log_two_u64(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_U64, num1, num2)
58802 ++#define gr_log_int3(audit, msg, num1, num2, num3) gr_log_varargs(audit, msg, GR_THREE_INT, num1, num2, num3)
58803 ++#define gr_log_int5_str2(audit, msg, num1, num2, str1, str2) gr_log_varargs(audit, msg, GR_FIVE_INT_TWO_STR, num1, num2, str1, str2)
58804 ++#define gr_log_str_str(audit, msg, str1, str2) gr_log_varargs(audit, msg, GR_TWO_STR, str1, str2)
58805 ++#define gr_log_str2_int(audit, msg, str1, str2, num) gr_log_varargs(audit, msg, GR_TWO_STR_INT, str1, str2, num)
58806 ++#define gr_log_str3(audit, msg, str1, str2, str3) gr_log_varargs(audit, msg, GR_THREE_STR, str1, str2, str3)
58807 ++#define gr_log_str4(audit, msg, str1, str2, str3, str4) gr_log_varargs(audit, msg, GR_FOUR_STR, str1, str2, str3, str4)
58808 ++#define gr_log_str_fs(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_FILENAME, str, dentry, mnt)
58809 ++#define gr_log_fs_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_FILENAME_STR, dentry, mnt, str)
58810 ++#define gr_log_fs_int2(audit, msg, dentry, mnt, num1, num2) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT, dentry, mnt, num1, num2)
58811 ++#define gr_log_fs_int2_str(audit, msg, dentry, mnt, num1, num2, str) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT_STR, dentry, mnt, num1, num2, str)
58812 ++#define gr_log_textrel_ulong_ulong(audit, msg, file, ulong1, ulong2) gr_log_varargs(audit, msg, GR_TEXTREL, file, ulong1, ulong2)
58813 ++#define gr_log_ptrace(audit, msg, task) gr_log_varargs(audit, msg, GR_PTRACE, task)
58814 ++#define gr_log_res_ulong2_str(audit, msg, task, ulong1, str, ulong2) gr_log_varargs(audit, msg, GR_RESOURCE, task, ulong1, str, ulong2)
58815 ++#define gr_log_cap(audit, msg, task, str) gr_log_varargs(audit, msg, GR_CAP, task, str)
58816 ++#define gr_log_sig_addr(audit, msg, str, addr) gr_log_varargs(audit, msg, GR_SIG, str, addr)
58817 ++#define gr_log_sig_task(audit, msg, task, num) gr_log_varargs(audit, msg, GR_SIG2, task, num)
58818 ++#define gr_log_crash1(audit, msg, task, ulong) gr_log_varargs(audit, msg, GR_CRASH1, task, ulong)
58819 ++#define gr_log_crash2(audit, msg, task, ulong1) gr_log_varargs(audit, msg, GR_CRASH2, task, ulong1)
58820 ++#define gr_log_procacct(audit, msg, task, num1, num2, num3, num4, num5, num6, num7, num8, num9) gr_log_varargs(audit, msg, GR_PSACCT, task, num1, num2, num3, num4, num5, num6, num7, num8, num9)
58821 ++#define gr_log_rwxmap(audit, msg, str) gr_log_varargs(audit, msg, GR_RWXMAP, str)
58822 ++
58823 ++void gr_log_varargs(int audit, const char *msg, int argtypes, ...);
58824 ++
58825 ++#endif
58826 ++
58827 ++#endif
58828 +diff -urNp linux-3.1.1/include/linux/grmsg.h linux-3.1.1/include/linux/grmsg.h
58829 +--- linux-3.1.1/include/linux/grmsg.h 1969-12-31 19:00:00.000000000 -0500
58830 ++++ linux-3.1.1/include/linux/grmsg.h 2011-11-16 18:40:31.000000000 -0500
58831 +@@ -0,0 +1,108 @@
58832 ++#define DEFAULTSECMSG "%.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u, parent %.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u"
58833 ++#define GR_ACL_PROCACCT_MSG "%.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u run time:[%ud %uh %um %us] cpu time:[%ud %uh %um %us] %s with exit code %ld, parent %.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u"
58834 ++#define GR_PTRACE_ACL_MSG "denied ptrace of %.950s(%.16s:%d) by "
58835 ++#define GR_STOPMOD_MSG "denied modification of module state by "
58836 ++#define GR_ROFS_BLOCKWRITE_MSG "denied write to block device %.950s by "
58837 ++#define GR_ROFS_MOUNT_MSG "denied writable mount of %.950s by "
58838 ++#define GR_IOPERM_MSG "denied use of ioperm() by "
58839 ++#define GR_IOPL_MSG "denied use of iopl() by "
58840 ++#define GR_SHMAT_ACL_MSG "denied attach of shared memory of UID %u, PID %d, ID %u by "
58841 ++#define GR_UNIX_CHROOT_MSG "denied connect() to abstract AF_UNIX socket outside of chroot by "
58842 ++#define GR_SHMAT_CHROOT_MSG "denied attach of shared memory outside of chroot by "
58843 ++#define GR_MEM_READWRITE_MSG "denied access of range %Lx -> %Lx in /dev/mem by "
58844 ++#define GR_SYMLINK_MSG "not following symlink %.950s owned by %d.%d by "
58845 ++#define GR_LEARN_AUDIT_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%lu\t%lu\t%.4095s\t%lu\t%pI4"
58846 ++#define GR_ID_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%c\t%d\t%d\t%d\t%pI4"
58847 ++#define GR_HIDDEN_ACL_MSG "%s access to hidden file %.950s by "
58848 ++#define GR_OPEN_ACL_MSG "%s open of %.950s for%s%s by "
58849 ++#define GR_CREATE_ACL_MSG "%s create of %.950s for%s%s by "
58850 ++#define GR_FIFO_MSG "denied writing FIFO %.950s of %d.%d by "
58851 ++#define GR_MKNOD_CHROOT_MSG "denied mknod of %.950s from chroot by "
58852 ++#define GR_MKNOD_ACL_MSG "%s mknod of %.950s by "
58853 ++#define GR_UNIXCONNECT_ACL_MSG "%s connect() to the unix domain socket %.950s by "
58854 ++#define GR_TTYSNIFF_ACL_MSG "terminal being sniffed by IP:%pI4 %.480s[%.16s:%d], parent %.480s[%.16s:%d] against "
58855 ++#define GR_MKDIR_ACL_MSG "%s mkdir of %.950s by "
58856 ++#define GR_RMDIR_ACL_MSG "%s rmdir of %.950s by "
58857 ++#define GR_UNLINK_ACL_MSG "%s unlink of %.950s by "
58858 ++#define GR_SYMLINK_ACL_MSG "%s symlink from %.480s to %.480s by "
58859 ++#define GR_HARDLINK_MSG "denied hardlink of %.930s (owned by %d.%d) to %.30s for "
58860 ++#define GR_LINK_ACL_MSG "%s link of %.480s to %.480s by "
58861 ++#define GR_INHERIT_ACL_MSG "successful inherit of %.480s's ACL for %.480s by "
58862 ++#define GR_RENAME_ACL_MSG "%s rename of %.480s to %.480s by "
58863 ++#define GR_UNSAFESHARE_EXEC_ACL_MSG "denied exec with cloned fs of %.950s by "
58864 ++#define GR_PTRACE_EXEC_ACL_MSG "denied ptrace of %.950s by "
58865 ++#define GR_EXEC_ACL_MSG "%s execution of %.950s by "
58866 ++#define GR_EXEC_TPE_MSG "denied untrusted exec of %.950s by "
58867 ++#define GR_SEGVSTART_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning uid %u from login for %lu seconds"
58868 ++#define GR_SEGVNOSUID_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning execution for %lu seconds"
58869 ++#define GR_MOUNT_CHROOT_MSG "denied mount of %.256s as %.930s from chroot by "
58870 ++#define GR_PIVOT_CHROOT_MSG "denied pivot_root from chroot by "
58871 ++#define GR_TRUNCATE_ACL_MSG "%s truncate of %.950s by "
58872 ++#define GR_ATIME_ACL_MSG "%s access time change of %.950s by "
58873 ++#define GR_ACCESS_ACL_MSG "%s access of %.950s for%s%s%s by "
58874 ++#define GR_CHROOT_CHROOT_MSG "denied double chroot to %.950s by "
58875 ++#define GR_FCHMOD_ACL_MSG "%s fchmod of %.950s by "
58876 ++#define GR_CHMOD_CHROOT_MSG "denied chmod +s of %.950s by "
58877 ++#define GR_CHMOD_ACL_MSG "%s chmod of %.950s by "
58878 ++#define GR_CHROOT_FCHDIR_MSG "denied fchdir outside of chroot to %.950s by "
58879 ++#define GR_CHOWN_ACL_MSG "%s chown of %.950s by "
58880 ++#define GR_SETXATTR_ACL_MSG "%s setting extended attributes of %.950s by "
58881 ++#define GR_WRITLIB_ACL_MSG "denied load of writable library %.950s by "
58882 ++#define GR_INITF_ACL_MSG "init_variables() failed %s by "
58883 ++#define GR_DISABLED_ACL_MSG "Error loading %s, trying to run kernel with acls disabled. To disable acls at startup use <kernel image name> gracl=off from your boot loader"
58884 ++#define GR_DEV_ACL_MSG "/dev/grsec: %d bytes sent %d required, being fed garbaged by "
58885 ++#define GR_SHUTS_ACL_MSG "shutdown auth success for "
58886 ++#define GR_SHUTF_ACL_MSG "shutdown auth failure for "
58887 ++#define GR_SHUTI_ACL_MSG "ignoring shutdown for disabled RBAC system for "
58888 ++#define GR_SEGVMODS_ACL_MSG "segvmod auth success for "
58889 ++#define GR_SEGVMODF_ACL_MSG "segvmod auth failure for "
58890 ++#define GR_SEGVMODI_ACL_MSG "ignoring segvmod for disabled RBAC system for "
58891 ++#define GR_ENABLE_ACL_MSG "%s RBAC system loaded by "
58892 ++#define GR_ENABLEF_ACL_MSG "unable to load %s for "
58893 ++#define GR_RELOADI_ACL_MSG "ignoring reload request for disabled RBAC system"
58894 ++#define GR_RELOAD_ACL_MSG "%s RBAC system reloaded by "
58895 ++#define GR_RELOADF_ACL_MSG "failed reload of %s for "
58896 ++#define GR_SPROLEI_ACL_MSG "ignoring change to special role for disabled RBAC system for "
58897 ++#define GR_SPROLES_ACL_MSG "successful change to special role %s (id %d) by "
58898 ++#define GR_SPROLEL_ACL_MSG "special role %s (id %d) exited by "
58899 ++#define GR_SPROLEF_ACL_MSG "special role %s failure for "
58900 ++#define GR_UNSPROLEI_ACL_MSG "ignoring unauth of special role for disabled RBAC system for "
58901 ++#define GR_UNSPROLES_ACL_MSG "successful unauth of special role %s (id %d) by "
58902 ++#define GR_INVMODE_ACL_MSG "invalid mode %d by "
58903 ++#define GR_PRIORITY_CHROOT_MSG "denied priority change of process (%.16s:%d) by "
58904 ++#define GR_FAILFORK_MSG "failed fork with errno %s by "
58905 ++#define GR_NICE_CHROOT_MSG "denied priority change by "
58906 ++#define GR_UNISIGLOG_MSG "%.32s occurred at %p in "
58907 ++#define GR_DUALSIGLOG_MSG "signal %d sent to " DEFAULTSECMSG " by "
58908 ++#define GR_SIG_ACL_MSG "denied send of signal %d to protected task " DEFAULTSECMSG " by "
58909 ++#define GR_SYSCTL_MSG "denied modification of grsecurity sysctl value : %.32s by "
58910 ++#define GR_SYSCTL_ACL_MSG "%s sysctl of %.950s for%s%s by "
58911 ++#define GR_TIME_MSG "time set by "
58912 ++#define GR_DEFACL_MSG "fatal: unable to find subject for (%.16s:%d), loaded by "
58913 ++#define GR_MMAP_ACL_MSG "%s executable mmap of %.950s by "
58914 ++#define GR_MPROTECT_ACL_MSG "%s executable mprotect of %.950s by "
58915 ++#define GR_SOCK_MSG "denied socket(%.16s,%.16s,%.16s) by "
58916 ++#define GR_SOCK_NOINET_MSG "denied socket(%.16s,%.16s,%d) by "
58917 ++#define GR_BIND_MSG "denied bind() by "
58918 ++#define GR_CONNECT_MSG "denied connect() by "
58919 ++#define GR_BIND_ACL_MSG "denied bind() to %pI4 port %u sock type %.16s protocol %.16s by "
58920 ++#define GR_CONNECT_ACL_MSG "denied connect() to %pI4 port %u sock type %.16s protocol %.16s by "
58921 ++#define GR_IP_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%pI4\t%u\t%u\t%u\t%u\t%pI4"
58922 ++#define GR_EXEC_CHROOT_MSG "exec of %.980s within chroot by process "
58923 ++#define GR_CAP_ACL_MSG "use of %s denied for "
58924 ++#define GR_CAP_CHROOT_MSG "use of %s in chroot denied for "
58925 ++#define GR_CAP_ACL_MSG2 "use of %s permitted for "
58926 ++#define GR_USRCHANGE_ACL_MSG "change to uid %u denied for "
58927 ++#define GR_GRPCHANGE_ACL_MSG "change to gid %u denied for "
58928 ++#define GR_REMOUNT_AUDIT_MSG "remount of %.256s by "
58929 ++#define GR_UNMOUNT_AUDIT_MSG "unmount of %.256s by "
58930 ++#define GR_MOUNT_AUDIT_MSG "mount of %.256s to %.256s by "
58931 ++#define GR_CHDIR_AUDIT_MSG "chdir to %.980s by "
58932 ++#define GR_EXEC_AUDIT_MSG "exec of %.930s (%.128s) by "
58933 ++#define GR_RESOURCE_MSG "denied resource overstep by requesting %lu for %.16s against limit %lu for "
58934 ++#define GR_RWXMMAP_MSG "denied RWX mmap of %.950s by "
58935 ++#define GR_RWXMPROTECT_MSG "denied RWX mprotect of %.950s by "
58936 ++#define GR_TEXTREL_AUDIT_MSG "text relocation in %s, VMA:0x%08lx 0x%08lx by "
58937 ++#define GR_VM86_MSG "denied use of vm86 by "
58938 ++#define GR_PTRACE_AUDIT_MSG "process %.950s(%.16s:%d) attached to via ptrace by "
58939 ++#define GR_INIT_TRANSFER_MSG "persistent special role transferred privilege to init by "
58940 +diff -urNp linux-3.1.1/include/linux/grsecurity.h linux-3.1.1/include/linux/grsecurity.h
58941 +--- linux-3.1.1/include/linux/grsecurity.h 1969-12-31 19:00:00.000000000 -0500
58942 ++++ linux-3.1.1/include/linux/grsecurity.h 2011-11-17 00:16:10.000000000 -0500
58943 +@@ -0,0 +1,228 @@
58944 ++#ifndef GR_SECURITY_H
58945 ++#define GR_SECURITY_H
58946 ++#include <linux/fs.h>
58947 ++#include <linux/fs_struct.h>
58948 ++#include <linux/binfmts.h>
58949 ++#include <linux/gracl.h>
58950 ++
58951 ++/* notify of brain-dead configs */
58952 ++#if defined(CONFIG_GRKERNSEC_PROC_USER) && defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
58953 ++#error "CONFIG_GRKERNSEC_PROC_USER and CONFIG_GRKERNSEC_PROC_USERGROUP cannot both be enabled."
58954 ++#endif
58955 ++#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_PAGEEXEC) && !defined(CONFIG_PAX_SEGMEXEC) && !defined(CONFIG_PAX_KERNEXEC)
58956 ++#error "CONFIG_PAX_NOEXEC enabled, but PAGEEXEC, SEGMEXEC, and KERNEXEC are disabled."
58957 ++#endif
58958 ++#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_EI_PAX) && !defined(CONFIG_PAX_PT_PAX_FLAGS)
58959 ++#error "CONFIG_PAX_NOEXEC enabled, but neither CONFIG_PAX_EI_PAX nor CONFIG_PAX_PT_PAX_FLAGS are enabled."
58960 ++#endif
58961 ++#if defined(CONFIG_PAX_ASLR) && (defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)) && !defined(CONFIG_PAX_EI_PAX) && !defined(CONFIG_PAX_PT_PAX_FLAGS)
58962 ++#error "CONFIG_PAX_ASLR enabled, but neither CONFIG_PAX_EI_PAX nor CONFIG_PAX_PT_PAX_FLAGS are enabled."
58963 ++#endif
58964 ++#if defined(CONFIG_PAX_ASLR) && !defined(CONFIG_PAX_RANDKSTACK) && !defined(CONFIG_PAX_RANDUSTACK) && !defined(CONFIG_PAX_RANDMMAP)
58965 ++#error "CONFIG_PAX_ASLR enabled, but RANDKSTACK, RANDUSTACK, and RANDMMAP are disabled."
58966 ++#endif
58967 ++#if defined(CONFIG_PAX) && !defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_ASLR)
58968 ++#error "CONFIG_PAX enabled, but no PaX options are enabled."
58969 ++#endif
58970 ++
58971 ++#include <linux/compat.h>
58972 ++
58973 ++struct user_arg_ptr {
58974 ++#ifdef CONFIG_COMPAT
58975 ++ bool is_compat;
58976 ++#endif
58977 ++ union {
58978 ++ const char __user *const __user *native;
58979 ++#ifdef CONFIG_COMPAT
58980 ++ compat_uptr_t __user *compat;
58981 ++#endif
58982 ++ } ptr;
58983 ++};
58984 ++
58985 ++void gr_handle_brute_attach(struct task_struct *p, unsigned long mm_flags);
58986 ++void gr_handle_brute_check(void);
58987 ++void gr_handle_kernel_exploit(void);
58988 ++int gr_process_user_ban(void);
58989 ++
58990 ++char gr_roletype_to_char(void);
58991 ++
58992 ++int gr_acl_enable_at_secure(void);
58993 ++
58994 ++int gr_check_user_change(int real, int effective, int fs);
58995 ++int gr_check_group_change(int real, int effective, int fs);
58996 ++
58997 ++void gr_del_task_from_ip_table(struct task_struct *p);
58998 ++
58999 ++int gr_pid_is_chrooted(struct task_struct *p);
59000 ++int gr_handle_chroot_fowner(struct pid *pid, enum pid_type type);
59001 ++int gr_handle_chroot_nice(void);
59002 ++int gr_handle_chroot_sysctl(const int op);
59003 ++int gr_handle_chroot_setpriority(struct task_struct *p,
59004 ++ const int niceval);
59005 ++int gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt);
59006 ++int gr_handle_chroot_chroot(const struct dentry *dentry,
59007 ++ const struct vfsmount *mnt);
59008 ++void gr_handle_chroot_chdir(struct path *path);
59009 ++int gr_handle_chroot_chmod(const struct dentry *dentry,
59010 ++ const struct vfsmount *mnt, const int mode);
59011 ++int gr_handle_chroot_mknod(const struct dentry *dentry,
59012 ++ const struct vfsmount *mnt, const int mode);
59013 ++int gr_handle_chroot_mount(const struct dentry *dentry,
59014 ++ const struct vfsmount *mnt,
59015 ++ const char *dev_name);
59016 ++int gr_handle_chroot_pivot(void);
59017 ++int gr_handle_chroot_unix(const pid_t pid);
59018 ++
59019 ++int gr_handle_rawio(const struct inode *inode);
59020 ++
59021 ++void gr_handle_ioperm(void);
59022 ++void gr_handle_iopl(void);
59023 ++
59024 ++int gr_tpe_allow(const struct file *file);
59025 ++
59026 ++void gr_set_chroot_entries(struct task_struct *task, struct path *path);
59027 ++void gr_clear_chroot_entries(struct task_struct *task);
59028 ++
59029 ++void gr_log_forkfail(const int retval);
59030 ++void gr_log_timechange(void);
59031 ++void gr_log_signal(const int sig, const void *addr, const struct task_struct *t);
59032 ++void gr_log_chdir(const struct dentry *dentry,
59033 ++ const struct vfsmount *mnt);
59034 ++void gr_log_chroot_exec(const struct dentry *dentry,
59035 ++ const struct vfsmount *mnt);
59036 ++void gr_handle_exec_args(struct linux_binprm *bprm, struct user_arg_ptr argv);
59037 ++void gr_log_remount(const char *devname, const int retval);
59038 ++void gr_log_unmount(const char *devname, const int retval);
59039 ++void gr_log_mount(const char *from, const char *to, const int retval);
59040 ++void gr_log_textrel(struct vm_area_struct *vma);
59041 ++void gr_log_rwxmmap(struct file *file);
59042 ++void gr_log_rwxmprotect(struct file *file);
59043 ++
59044 ++int gr_handle_follow_link(const struct inode *parent,
59045 ++ const struct inode *inode,
59046 ++ const struct dentry *dentry,
59047 ++ const struct vfsmount *mnt);
59048 ++int gr_handle_fifo(const struct dentry *dentry,
59049 ++ const struct vfsmount *mnt,
59050 ++ const struct dentry *dir, const int flag,
59051 ++ const int acc_mode);
59052 ++int gr_handle_hardlink(const struct dentry *dentry,
59053 ++ const struct vfsmount *mnt,
59054 ++ struct inode *inode,
59055 ++ const int mode, const char *to);
59056 ++
59057 ++int gr_is_capable(const int cap);
59058 ++int gr_is_capable_nolog(const int cap);
59059 ++void gr_learn_resource(const struct task_struct *task, const int limit,
59060 ++ const unsigned long wanted, const int gt);
59061 ++void gr_copy_label(struct task_struct *tsk);
59062 ++void gr_handle_crash(struct task_struct *task, const int sig);
59063 ++int gr_handle_signal(const struct task_struct *p, const int sig);
59064 ++int gr_check_crash_uid(const uid_t uid);
59065 ++int gr_check_protected_task(const struct task_struct *task);
59066 ++int gr_check_protected_task_fowner(struct pid *pid, enum pid_type type);
59067 ++int gr_acl_handle_mmap(const struct file *file,
59068 ++ const unsigned long prot);
59069 ++int gr_acl_handle_mprotect(const struct file *file,
59070 ++ const unsigned long prot);
59071 ++int gr_check_hidden_task(const struct task_struct *tsk);
59072 ++__u32 gr_acl_handle_truncate(const struct dentry *dentry,
59073 ++ const struct vfsmount *mnt);
59074 ++__u32 gr_acl_handle_utime(const struct dentry *dentry,
59075 ++ const struct vfsmount *mnt);
59076 ++__u32 gr_acl_handle_access(const struct dentry *dentry,
59077 ++ const struct vfsmount *mnt, const int fmode);
59078 ++__u32 gr_acl_handle_fchmod(const struct dentry *dentry,
59079 ++ const struct vfsmount *mnt, mode_t mode);
59080 ++__u32 gr_acl_handle_chmod(const struct dentry *dentry,
59081 ++ const struct vfsmount *mnt, mode_t mode);
59082 ++__u32 gr_acl_handle_chown(const struct dentry *dentry,
59083 ++ const struct vfsmount *mnt);
59084 ++__u32 gr_acl_handle_setxattr(const struct dentry *dentry,
59085 ++ const struct vfsmount *mnt);
59086 ++int gr_handle_ptrace(struct task_struct *task, const long request);
59087 ++int gr_handle_proc_ptrace(struct task_struct *task);
59088 ++__u32 gr_acl_handle_execve(const struct dentry *dentry,
59089 ++ const struct vfsmount *mnt);
59090 ++int gr_check_crash_exec(const struct file *filp);
59091 ++int gr_acl_is_enabled(void);
59092 ++void gr_set_kernel_label(struct task_struct *task);
59093 ++void gr_set_role_label(struct task_struct *task, const uid_t uid,
59094 ++ const gid_t gid);
59095 ++int gr_set_proc_label(const struct dentry *dentry,
59096 ++ const struct vfsmount *mnt,
59097 ++ const int unsafe_share);
59098 ++__u32 gr_acl_handle_hidden_file(const struct dentry *dentry,
59099 ++ const struct vfsmount *mnt);
59100 ++__u32 gr_acl_handle_open(const struct dentry *dentry,
59101 ++ const struct vfsmount *mnt, int acc_mode);
59102 ++__u32 gr_acl_handle_creat(const struct dentry *dentry,
59103 ++ const struct dentry *p_dentry,
59104 ++ const struct vfsmount *p_mnt,
59105 ++ int open_flags, int acc_mode, const int imode);
59106 ++void gr_handle_create(const struct dentry *dentry,
59107 ++ const struct vfsmount *mnt);
59108 ++void gr_handle_proc_create(const struct dentry *dentry,
59109 ++ const struct inode *inode);
59110 ++__u32 gr_acl_handle_mknod(const struct dentry *new_dentry,
59111 ++ const struct dentry *parent_dentry,
59112 ++ const struct vfsmount *parent_mnt,
59113 ++ const int mode);
59114 ++__u32 gr_acl_handle_mkdir(const struct dentry *new_dentry,
59115 ++ const struct dentry *parent_dentry,
59116 ++ const struct vfsmount *parent_mnt);
59117 ++__u32 gr_acl_handle_rmdir(const struct dentry *dentry,
59118 ++ const struct vfsmount *mnt);
59119 ++void gr_handle_delete(const ino_t ino, const dev_t dev);
59120 ++__u32 gr_acl_handle_unlink(const struct dentry *dentry,
59121 ++ const struct vfsmount *mnt);
59122 ++__u32 gr_acl_handle_symlink(const struct dentry *new_dentry,
59123 ++ const struct dentry *parent_dentry,
59124 ++ const struct vfsmount *parent_mnt,
59125 ++ const char *from);
59126 ++__u32 gr_acl_handle_link(const struct dentry *new_dentry,
59127 ++ const struct dentry *parent_dentry,
59128 ++ const struct vfsmount *parent_mnt,
59129 ++ const struct dentry *old_dentry,
59130 ++ const struct vfsmount *old_mnt, const char *to);
59131 ++int gr_acl_handle_rename(struct dentry *new_dentry,
59132 ++ struct dentry *parent_dentry,
59133 ++ const struct vfsmount *parent_mnt,
59134 ++ struct dentry *old_dentry,
59135 ++ struct inode *old_parent_inode,
59136 ++ struct vfsmount *old_mnt, const char *newname);
59137 ++void gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
59138 ++ struct dentry *old_dentry,
59139 ++ struct dentry *new_dentry,
59140 ++ struct vfsmount *mnt, const __u8 replace);
59141 ++__u32 gr_check_link(const struct dentry *new_dentry,
59142 ++ const struct dentry *parent_dentry,
59143 ++ const struct vfsmount *parent_mnt,
59144 ++ const struct dentry *old_dentry,
59145 ++ const struct vfsmount *old_mnt);
59146 ++int gr_acl_handle_filldir(const struct file *file, const char *name,
59147 ++ const unsigned int namelen, const ino_t ino);
59148 ++
59149 ++__u32 gr_acl_handle_unix(const struct dentry *dentry,
59150 ++ const struct vfsmount *mnt);
59151 ++void gr_acl_handle_exit(void);
59152 ++void gr_acl_handle_psacct(struct task_struct *task, const long code);
59153 ++int gr_acl_handle_procpidmem(const struct task_struct *task);
59154 ++int gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags);
59155 ++int gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode);
59156 ++void gr_audit_ptrace(struct task_struct *task);
59157 ++dev_t gr_get_dev_from_dentry(struct dentry *dentry);
59158 ++
59159 ++#ifdef CONFIG_GRKERNSEC
59160 ++void task_grsec_rbac(struct seq_file *m, struct task_struct *p);
59161 ++void gr_handle_vm86(void);
59162 ++void gr_handle_mem_readwrite(u64 from, u64 to);
59163 ++
59164 ++extern int grsec_enable_dmesg;
59165 ++extern int grsec_disable_privio;
59166 ++#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
59167 ++extern int grsec_enable_chroot_findtask;
59168 ++#endif
59169 ++#endif
59170 ++
59171 ++#endif
59172 +diff -urNp linux-3.1.1/include/linux/grsock.h linux-3.1.1/include/linux/grsock.h
59173 +--- linux-3.1.1/include/linux/grsock.h 1969-12-31 19:00:00.000000000 -0500
59174 ++++ linux-3.1.1/include/linux/grsock.h 2011-11-16 18:40:31.000000000 -0500
59175 +@@ -0,0 +1,19 @@
59176 ++#ifndef __GRSOCK_H
59177 ++#define __GRSOCK_H
59178 ++
59179 ++extern void gr_attach_curr_ip(const struct sock *sk);
59180 ++extern int gr_handle_sock_all(const int family, const int type,
59181 ++ const int protocol);
59182 ++extern int gr_handle_sock_server(const struct sockaddr *sck);
59183 ++extern int gr_handle_sock_server_other(const struct sock *sck);
59184 ++extern int gr_handle_sock_client(const struct sockaddr *sck);
59185 ++extern int gr_search_connect(struct socket * sock,
59186 ++ struct sockaddr_in * addr);
59187 ++extern int gr_search_bind(struct socket * sock,
59188 ++ struct sockaddr_in * addr);
59189 ++extern int gr_search_listen(struct socket * sock);
59190 ++extern int gr_search_accept(struct socket * sock);
59191 ++extern int gr_search_socket(const int domain, const int type,
59192 ++ const int protocol);
59193 ++
59194 ++#endif
59195 +diff -urNp linux-3.1.1/include/linux/hid.h linux-3.1.1/include/linux/hid.h
59196 +--- linux-3.1.1/include/linux/hid.h 2011-11-11 15:19:27.000000000 -0500
59197 ++++ linux-3.1.1/include/linux/hid.h 2011-11-16 18:39:08.000000000 -0500
59198 +@@ -676,7 +676,7 @@ struct hid_ll_driver {
59199 + unsigned int code, int value);
59200 +
59201 + int (*parse)(struct hid_device *hdev);
59202 +-};
59203 ++} __no_const;
59204 +
59205 + #define PM_HINT_FULLON 1<<5
59206 + #define PM_HINT_NORMAL 1<<1
59207 +diff -urNp linux-3.1.1/include/linux/highmem.h linux-3.1.1/include/linux/highmem.h
59208 +--- linux-3.1.1/include/linux/highmem.h 2011-11-11 15:19:27.000000000 -0500
59209 ++++ linux-3.1.1/include/linux/highmem.h 2011-11-16 18:39:08.000000000 -0500
59210 +@@ -185,6 +185,18 @@ static inline void clear_highpage(struct
59211 + kunmap_atomic(kaddr, KM_USER0);
59212 + }
59213 +
59214 ++static inline void sanitize_highpage(struct page *page)
59215 ++{
59216 ++ void *kaddr;
59217 ++ unsigned long flags;
59218 ++
59219 ++ local_irq_save(flags);
59220 ++ kaddr = kmap_atomic(page, KM_CLEARPAGE);
59221 ++ clear_page(kaddr);
59222 ++ kunmap_atomic(kaddr, KM_CLEARPAGE);
59223 ++ local_irq_restore(flags);
59224 ++}
59225 ++
59226 + static inline void zero_user_segments(struct page *page,
59227 + unsigned start1, unsigned end1,
59228 + unsigned start2, unsigned end2)
59229 +diff -urNp linux-3.1.1/include/linux/i2c.h linux-3.1.1/include/linux/i2c.h
59230 +--- linux-3.1.1/include/linux/i2c.h 2011-11-11 15:19:27.000000000 -0500
59231 ++++ linux-3.1.1/include/linux/i2c.h 2011-11-16 18:39:08.000000000 -0500
59232 +@@ -346,6 +346,7 @@ struct i2c_algorithm {
59233 + /* To determine what the adapter supports */
59234 + u32 (*functionality) (struct i2c_adapter *);
59235 + };
59236 ++typedef struct i2c_algorithm __no_const i2c_algorithm_no_const;
59237 +
59238 + /*
59239 + * i2c_adapter is the structure used to identify a physical i2c bus along
59240 +diff -urNp linux-3.1.1/include/linux/i2o.h linux-3.1.1/include/linux/i2o.h
59241 +--- linux-3.1.1/include/linux/i2o.h 2011-11-11 15:19:27.000000000 -0500
59242 ++++ linux-3.1.1/include/linux/i2o.h 2011-11-16 18:39:08.000000000 -0500
59243 +@@ -564,7 +564,7 @@ struct i2o_controller {
59244 + struct i2o_device *exec; /* Executive */
59245 + #if BITS_PER_LONG == 64
59246 + spinlock_t context_list_lock; /* lock for context_list */
59247 +- atomic_t context_list_counter; /* needed for unique contexts */
59248 ++ atomic_unchecked_t context_list_counter; /* needed for unique contexts */
59249 + struct list_head context_list; /* list of context id's
59250 + and pointers */
59251 + #endif
59252 +diff -urNp linux-3.1.1/include/linux/init.h linux-3.1.1/include/linux/init.h
59253 +--- linux-3.1.1/include/linux/init.h 2011-11-11 15:19:27.000000000 -0500
59254 ++++ linux-3.1.1/include/linux/init.h 2011-11-16 18:39:08.000000000 -0500
59255 +@@ -293,13 +293,13 @@ void __init parse_early_options(char *cm
59256 +
59257 + /* Each module must use one module_init(). */
59258 + #define module_init(initfn) \
59259 +- static inline initcall_t __inittest(void) \
59260 ++ static inline __used initcall_t __inittest(void) \
59261 + { return initfn; } \
59262 + int init_module(void) __attribute__((alias(#initfn)));
59263 +
59264 + /* This is only required if you want to be unloadable. */
59265 + #define module_exit(exitfn) \
59266 +- static inline exitcall_t __exittest(void) \
59267 ++ static inline __used exitcall_t __exittest(void) \
59268 + { return exitfn; } \
59269 + void cleanup_module(void) __attribute__((alias(#exitfn)));
59270 +
59271 +diff -urNp linux-3.1.1/include/linux/init_task.h linux-3.1.1/include/linux/init_task.h
59272 +--- linux-3.1.1/include/linux/init_task.h 2011-11-11 15:19:27.000000000 -0500
59273 ++++ linux-3.1.1/include/linux/init_task.h 2011-11-16 18:39:08.000000000 -0500
59274 +@@ -126,6 +126,12 @@ extern struct cred init_cred;
59275 + # define INIT_PERF_EVENTS(tsk)
59276 + #endif
59277 +
59278 ++#ifdef CONFIG_X86
59279 ++#define INIT_TASK_THREAD_INFO .tinfo = INIT_THREAD_INFO,
59280 ++#else
59281 ++#define INIT_TASK_THREAD_INFO
59282 ++#endif
59283 ++
59284 + /*
59285 + * INIT_TASK is used to set up the first task table, touch at
59286 + * your own risk!. Base=0, limit=0x1fffff (=2MB)
59287 +@@ -164,6 +170,7 @@ extern struct cred init_cred;
59288 + RCU_INIT_POINTER(.cred, &init_cred), \
59289 + .comm = "swapper", \
59290 + .thread = INIT_THREAD, \
59291 ++ INIT_TASK_THREAD_INFO \
59292 + .fs = &init_fs, \
59293 + .files = &init_files, \
59294 + .signal = &init_signals, \
59295 +diff -urNp linux-3.1.1/include/linux/intel-iommu.h linux-3.1.1/include/linux/intel-iommu.h
59296 +--- linux-3.1.1/include/linux/intel-iommu.h 2011-11-11 15:19:27.000000000 -0500
59297 ++++ linux-3.1.1/include/linux/intel-iommu.h 2011-11-16 18:39:08.000000000 -0500
59298 +@@ -296,7 +296,7 @@ struct iommu_flush {
59299 + u8 fm, u64 type);
59300 + void (*flush_iotlb)(struct intel_iommu *iommu, u16 did, u64 addr,
59301 + unsigned int size_order, u64 type);
59302 +-};
59303 ++} __no_const;
59304 +
59305 + enum {
59306 + SR_DMAR_FECTL_REG,
59307 +diff -urNp linux-3.1.1/include/linux/interrupt.h linux-3.1.1/include/linux/interrupt.h
59308 +--- linux-3.1.1/include/linux/interrupt.h 2011-11-11 15:19:27.000000000 -0500
59309 ++++ linux-3.1.1/include/linux/interrupt.h 2011-11-16 18:39:08.000000000 -0500
59310 +@@ -425,7 +425,7 @@ enum
59311 + /* map softirq index to softirq name. update 'softirq_to_name' in
59312 + * kernel/softirq.c when adding a new softirq.
59313 + */
59314 +-extern char *softirq_to_name[NR_SOFTIRQS];
59315 ++extern const char * const softirq_to_name[NR_SOFTIRQS];
59316 +
59317 + /* softirq mask and active fields moved to irq_cpustat_t in
59318 + * asm/hardirq.h to get better cache usage. KAO
59319 +@@ -433,12 +433,12 @@ extern char *softirq_to_name[NR_SOFTIRQS
59320 +
59321 + struct softirq_action
59322 + {
59323 +- void (*action)(struct softirq_action *);
59324 ++ void (*action)(void);
59325 + };
59326 +
59327 + asmlinkage void do_softirq(void);
59328 + asmlinkage void __do_softirq(void);
59329 +-extern void open_softirq(int nr, void (*action)(struct softirq_action *));
59330 ++extern void open_softirq(int nr, void (*action)(void));
59331 + extern void softirq_init(void);
59332 + static inline void __raise_softirq_irqoff(unsigned int nr)
59333 + {
59334 +diff -urNp linux-3.1.1/include/linux/kallsyms.h linux-3.1.1/include/linux/kallsyms.h
59335 +--- linux-3.1.1/include/linux/kallsyms.h 2011-11-11 15:19:27.000000000 -0500
59336 ++++ linux-3.1.1/include/linux/kallsyms.h 2011-11-16 18:40:31.000000000 -0500
59337 +@@ -15,7 +15,8 @@
59338 +
59339 + struct module;
59340 +
59341 +-#ifdef CONFIG_KALLSYMS
59342 ++#if !defined(__INCLUDED_BY_HIDESYM) || !defined(CONFIG_KALLSYMS)
59343 ++#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
59344 + /* Lookup the address for a symbol. Returns 0 if not found. */
59345 + unsigned long kallsyms_lookup_name(const char *name);
59346 +
59347 +@@ -99,6 +100,16 @@ static inline int lookup_symbol_attrs(un
59348 + /* Stupid that this does nothing, but I didn't create this mess. */
59349 + #define __print_symbol(fmt, addr)
59350 + #endif /*CONFIG_KALLSYMS*/
59351 ++#else /* when included by kallsyms.c, vsnprintf.c, or
59352 ++ arch/x86/kernel/dumpstack.c, with HIDESYM enabled */
59353 ++extern void __print_symbol(const char *fmt, unsigned long address);
59354 ++extern int sprint_backtrace(char *buffer, unsigned long address);
59355 ++extern int sprint_symbol(char *buffer, unsigned long address);
59356 ++const char *kallsyms_lookup(unsigned long addr,
59357 ++ unsigned long *symbolsize,
59358 ++ unsigned long *offset,
59359 ++ char **modname, char *namebuf);
59360 ++#endif
59361 +
59362 + /* This macro allows us to keep printk typechecking */
59363 + static void __check_printsym_format(const char *fmt, ...)
59364 +diff -urNp linux-3.1.1/include/linux/kgdb.h linux-3.1.1/include/linux/kgdb.h
59365 +--- linux-3.1.1/include/linux/kgdb.h 2011-11-11 15:19:27.000000000 -0500
59366 ++++ linux-3.1.1/include/linux/kgdb.h 2011-11-16 18:39:08.000000000 -0500
59367 +@@ -53,7 +53,7 @@ extern int kgdb_connected;
59368 + extern int kgdb_io_module_registered;
59369 +
59370 + extern atomic_t kgdb_setting_breakpoint;
59371 +-extern atomic_t kgdb_cpu_doing_single_step;
59372 ++extern atomic_unchecked_t kgdb_cpu_doing_single_step;
59373 +
59374 + extern struct task_struct *kgdb_usethread;
59375 + extern struct task_struct *kgdb_contthread;
59376 +@@ -251,7 +251,7 @@ struct kgdb_arch {
59377 + void (*disable_hw_break)(struct pt_regs *regs);
59378 + void (*remove_all_hw_break)(void);
59379 + void (*correct_hw_break)(void);
59380 +-};
59381 ++} __do_const;
59382 +
59383 + /**
59384 + * struct kgdb_io - Describe the interface for an I/O driver to talk with KGDB.
59385 +@@ -276,7 +276,7 @@ struct kgdb_io {
59386 + void (*pre_exception) (void);
59387 + void (*post_exception) (void);
59388 + int is_console;
59389 +-};
59390 ++} __do_const;
59391 +
59392 + extern struct kgdb_arch arch_kgdb_ops;
59393 +
59394 +diff -urNp linux-3.1.1/include/linux/kmod.h linux-3.1.1/include/linux/kmod.h
59395 +--- linux-3.1.1/include/linux/kmod.h 2011-11-11 15:19:27.000000000 -0500
59396 ++++ linux-3.1.1/include/linux/kmod.h 2011-11-16 18:40:31.000000000 -0500
59397 +@@ -34,6 +34,8 @@ extern char modprobe_path[]; /* for sysc
59398 + * usually useless though. */
59399 + extern int __request_module(bool wait, const char *name, ...) \
59400 + __attribute__((format(printf, 2, 3)));
59401 ++extern int ___request_module(bool wait, char *param_name, const char *name, ...) \
59402 ++ __attribute__((format(printf, 3, 4)));
59403 + #define request_module(mod...) __request_module(true, mod)
59404 + #define request_module_nowait(mod...) __request_module(false, mod)
59405 + #define try_then_request_module(x, mod...) \
59406 +diff -urNp linux-3.1.1/include/linux/kvm_host.h linux-3.1.1/include/linux/kvm_host.h
59407 +--- linux-3.1.1/include/linux/kvm_host.h 2011-11-11 15:19:27.000000000 -0500
59408 ++++ linux-3.1.1/include/linux/kvm_host.h 2011-11-16 18:39:08.000000000 -0500
59409 +@@ -308,7 +308,7 @@ void kvm_vcpu_uninit(struct kvm_vcpu *vc
59410 + void vcpu_load(struct kvm_vcpu *vcpu);
59411 + void vcpu_put(struct kvm_vcpu *vcpu);
59412 +
59413 +-int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
59414 ++int kvm_init(const void *opaque, unsigned vcpu_size, unsigned vcpu_align,
59415 + struct module *module);
59416 + void kvm_exit(void);
59417 +
59418 +@@ -454,7 +454,7 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(
59419 + struct kvm_guest_debug *dbg);
59420 + int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
59421 +
59422 +-int kvm_arch_init(void *opaque);
59423 ++int kvm_arch_init(const void *opaque);
59424 + void kvm_arch_exit(void);
59425 +
59426 + int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu);
59427 +diff -urNp linux-3.1.1/include/linux/libata.h linux-3.1.1/include/linux/libata.h
59428 +--- linux-3.1.1/include/linux/libata.h 2011-11-11 15:19:27.000000000 -0500
59429 ++++ linux-3.1.1/include/linux/libata.h 2011-11-16 18:39:08.000000000 -0500
59430 +@@ -909,7 +909,7 @@ struct ata_port_operations {
59431 + * fields must be pointers.
59432 + */
59433 + const struct ata_port_operations *inherits;
59434 +-};
59435 ++} __do_const;
59436 +
59437 + struct ata_port_info {
59438 + unsigned long flags;
59439 +diff -urNp linux-3.1.1/include/linux/mca.h linux-3.1.1/include/linux/mca.h
59440 +--- linux-3.1.1/include/linux/mca.h 2011-11-11 15:19:27.000000000 -0500
59441 ++++ linux-3.1.1/include/linux/mca.h 2011-11-16 18:39:08.000000000 -0500
59442 +@@ -80,7 +80,7 @@ struct mca_bus_accessor_functions {
59443 + int region);
59444 + void * (*mca_transform_memory)(struct mca_device *,
59445 + void *memory);
59446 +-};
59447 ++} __no_const;
59448 +
59449 + struct mca_bus {
59450 + u64 default_dma_mask;
59451 +diff -urNp linux-3.1.1/include/linux/memory.h linux-3.1.1/include/linux/memory.h
59452 +--- linux-3.1.1/include/linux/memory.h 2011-11-11 15:19:27.000000000 -0500
59453 ++++ linux-3.1.1/include/linux/memory.h 2011-11-16 18:39:08.000000000 -0500
59454 +@@ -144,7 +144,7 @@ struct memory_accessor {
59455 + size_t count);
59456 + ssize_t (*write)(struct memory_accessor *, const char *buf,
59457 + off_t offset, size_t count);
59458 +-};
59459 ++} __no_const;
59460 +
59461 + /*
59462 + * Kernel text modification mutex, used for code patching. Users of this lock
59463 +diff -urNp linux-3.1.1/include/linux/mfd/abx500.h linux-3.1.1/include/linux/mfd/abx500.h
59464 +--- linux-3.1.1/include/linux/mfd/abx500.h 2011-11-11 15:19:27.000000000 -0500
59465 ++++ linux-3.1.1/include/linux/mfd/abx500.h 2011-11-16 18:39:08.000000000 -0500
59466 +@@ -234,6 +234,7 @@ struct abx500_ops {
59467 + int (*event_registers_startup_state_get) (struct device *, u8 *);
59468 + int (*startup_irq_enabled) (struct device *, unsigned int);
59469 + };
59470 ++typedef struct abx500_ops __no_const abx500_ops_no_const;
59471 +
59472 + int abx500_register_ops(struct device *core_dev, struct abx500_ops *ops);
59473 + void abx500_remove_ops(struct device *dev);
59474 +diff -urNp linux-3.1.1/include/linux/mm.h linux-3.1.1/include/linux/mm.h
59475 +--- linux-3.1.1/include/linux/mm.h 2011-11-11 15:19:27.000000000 -0500
59476 ++++ linux-3.1.1/include/linux/mm.h 2011-11-16 18:39:08.000000000 -0500
59477 +@@ -114,7 +114,14 @@ extern unsigned int kobjsize(const void
59478 +
59479 + #define VM_CAN_NONLINEAR 0x08000000 /* Has ->fault & does nonlinear pages */
59480 + #define VM_MIXEDMAP 0x10000000 /* Can contain "struct page" and pure PFN pages */
59481 ++
59482 ++#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
59483 ++#define VM_SAO 0x00000000 /* Strong Access Ordering (powerpc) */
59484 ++#define VM_PAGEEXEC 0x20000000 /* vma->vm_page_prot needs special handling */
59485 ++#else
59486 + #define VM_SAO 0x20000000 /* Strong Access Ordering (powerpc) */
59487 ++#endif
59488 ++
59489 + #define VM_PFN_AT_MMAP 0x40000000 /* PFNMAP vma that is fully mapped at mmap time */
59490 + #define VM_MERGEABLE 0x80000000 /* KSM may merge identical pages */
59491 +
59492 +@@ -1011,34 +1018,6 @@ int set_page_dirty(struct page *page);
59493 + int set_page_dirty_lock(struct page *page);
59494 + int clear_page_dirty_for_io(struct page *page);
59495 +
59496 +-/* Is the vma a continuation of the stack vma above it? */
59497 +-static inline int vma_growsdown(struct vm_area_struct *vma, unsigned long addr)
59498 +-{
59499 +- return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
59500 +-}
59501 +-
59502 +-static inline int stack_guard_page_start(struct vm_area_struct *vma,
59503 +- unsigned long addr)
59504 +-{
59505 +- return (vma->vm_flags & VM_GROWSDOWN) &&
59506 +- (vma->vm_start == addr) &&
59507 +- !vma_growsdown(vma->vm_prev, addr);
59508 +-}
59509 +-
59510 +-/* Is the vma a continuation of the stack vma below it? */
59511 +-static inline int vma_growsup(struct vm_area_struct *vma, unsigned long addr)
59512 +-{
59513 +- return vma && (vma->vm_start == addr) && (vma->vm_flags & VM_GROWSUP);
59514 +-}
59515 +-
59516 +-static inline int stack_guard_page_end(struct vm_area_struct *vma,
59517 +- unsigned long addr)
59518 +-{
59519 +- return (vma->vm_flags & VM_GROWSUP) &&
59520 +- (vma->vm_end == addr) &&
59521 +- !vma_growsup(vma->vm_next, addr);
59522 +-}
59523 +-
59524 + extern unsigned long move_page_tables(struct vm_area_struct *vma,
59525 + unsigned long old_addr, struct vm_area_struct *new_vma,
59526 + unsigned long new_addr, unsigned long len);
59527 +@@ -1133,6 +1112,15 @@ static inline void sync_mm_rss(struct ta
59528 + }
59529 + #endif
59530 +
59531 ++#ifdef CONFIG_MMU
59532 ++pgprot_t vm_get_page_prot(vm_flags_t vm_flags);
59533 ++#else
59534 ++static inline pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
59535 ++{
59536 ++ return __pgprot(0);
59537 ++}
59538 ++#endif
59539 ++
59540 + int vma_wants_writenotify(struct vm_area_struct *vma);
59541 +
59542 + extern pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
59543 +@@ -1417,6 +1405,7 @@ out:
59544 + }
59545 +
59546 + extern int do_munmap(struct mm_struct *, unsigned long, size_t);
59547 ++extern int __do_munmap(struct mm_struct *, unsigned long, size_t);
59548 +
59549 + extern unsigned long do_brk(unsigned long, unsigned long);
59550 +
59551 +@@ -1474,6 +1463,10 @@ extern struct vm_area_struct * find_vma(
59552 + extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
59553 + struct vm_area_struct **pprev);
59554 +
59555 ++extern struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma);
59556 ++extern __must_check long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma);
59557 ++extern void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl);
59558 ++
59559 + /* Look up the first VMA which intersects the interval start_addr..end_addr-1,
59560 + NULL if none. Assume start_addr < end_addr. */
59561 + static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr)
59562 +@@ -1490,15 +1483,6 @@ static inline unsigned long vma_pages(st
59563 + return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
59564 + }
59565 +
59566 +-#ifdef CONFIG_MMU
59567 +-pgprot_t vm_get_page_prot(unsigned long vm_flags);
59568 +-#else
59569 +-static inline pgprot_t vm_get_page_prot(unsigned long vm_flags)
59570 +-{
59571 +- return __pgprot(0);
59572 +-}
59573 +-#endif
59574 +-
59575 + struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr);
59576 + int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
59577 + unsigned long pfn, unsigned long size, pgprot_t);
59578 +@@ -1612,7 +1596,7 @@ extern int unpoison_memory(unsigned long
59579 + extern int sysctl_memory_failure_early_kill;
59580 + extern int sysctl_memory_failure_recovery;
59581 + extern void shake_page(struct page *p, int access);
59582 +-extern atomic_long_t mce_bad_pages;
59583 ++extern atomic_long_unchecked_t mce_bad_pages;
59584 + extern int soft_offline_page(struct page *page, int flags);
59585 +
59586 + extern void dump_page(struct page *page);
59587 +@@ -1626,5 +1610,11 @@ extern void copy_user_huge_page(struct p
59588 + unsigned int pages_per_huge_page);
59589 + #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */
59590 +
59591 ++#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
59592 ++extern void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot);
59593 ++#else
59594 ++static inline void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot) {}
59595 ++#endif
59596 ++
59597 + #endif /* __KERNEL__ */
59598 + #endif /* _LINUX_MM_H */
59599 +diff -urNp linux-3.1.1/include/linux/mm_types.h linux-3.1.1/include/linux/mm_types.h
59600 +--- linux-3.1.1/include/linux/mm_types.h 2011-11-11 15:19:27.000000000 -0500
59601 ++++ linux-3.1.1/include/linux/mm_types.h 2011-11-16 18:39:08.000000000 -0500
59602 +@@ -230,6 +230,8 @@ struct vm_area_struct {
59603 + #ifdef CONFIG_NUMA
59604 + struct mempolicy *vm_policy; /* NUMA policy for the VMA */
59605 + #endif
59606 ++
59607 ++ struct vm_area_struct *vm_mirror;/* PaX: mirror vma or NULL */
59608 + };
59609 +
59610 + struct core_thread {
59611 +@@ -362,6 +364,24 @@ struct mm_struct {
59612 + #ifdef CONFIG_CPUMASK_OFFSTACK
59613 + struct cpumask cpumask_allocation;
59614 + #endif
59615 ++
59616 ++#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS) || defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
59617 ++ unsigned long pax_flags;
59618 ++#endif
59619 ++
59620 ++#ifdef CONFIG_PAX_DLRESOLVE
59621 ++ unsigned long call_dl_resolve;
59622 ++#endif
59623 ++
59624 ++#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
59625 ++ unsigned long call_syscall;
59626 ++#endif
59627 ++
59628 ++#ifdef CONFIG_PAX_ASLR
59629 ++ unsigned long delta_mmap; /* randomized offset */
59630 ++ unsigned long delta_stack; /* randomized offset */
59631 ++#endif
59632 ++
59633 + };
59634 +
59635 + static inline void mm_init_cpumask(struct mm_struct *mm)
59636 +diff -urNp linux-3.1.1/include/linux/mmu_notifier.h linux-3.1.1/include/linux/mmu_notifier.h
59637 +--- linux-3.1.1/include/linux/mmu_notifier.h 2011-11-11 15:19:27.000000000 -0500
59638 ++++ linux-3.1.1/include/linux/mmu_notifier.h 2011-11-16 18:39:08.000000000 -0500
59639 +@@ -255,12 +255,12 @@ static inline void mmu_notifier_mm_destr
59640 + */
59641 + #define ptep_clear_flush_notify(__vma, __address, __ptep) \
59642 + ({ \
59643 +- pte_t __pte; \
59644 ++ pte_t ___pte; \
59645 + struct vm_area_struct *___vma = __vma; \
59646 + unsigned long ___address = __address; \
59647 +- __pte = ptep_clear_flush(___vma, ___address, __ptep); \
59648 ++ ___pte = ptep_clear_flush(___vma, ___address, __ptep); \
59649 + mmu_notifier_invalidate_page(___vma->vm_mm, ___address); \
59650 +- __pte; \
59651 ++ ___pte; \
59652 + })
59653 +
59654 + #define pmdp_clear_flush_notify(__vma, __address, __pmdp) \
59655 +diff -urNp linux-3.1.1/include/linux/mmzone.h linux-3.1.1/include/linux/mmzone.h
59656 +--- linux-3.1.1/include/linux/mmzone.h 2011-11-11 15:19:27.000000000 -0500
59657 ++++ linux-3.1.1/include/linux/mmzone.h 2011-11-16 18:39:08.000000000 -0500
59658 +@@ -356,7 +356,7 @@ struct zone {
59659 + unsigned long flags; /* zone flags, see below */
59660 +
59661 + /* Zone statistics */
59662 +- atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
59663 ++ atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
59664 +
59665 + /*
59666 + * The target ratio of ACTIVE_ANON to INACTIVE_ANON pages on
59667 +diff -urNp linux-3.1.1/include/linux/mod_devicetable.h linux-3.1.1/include/linux/mod_devicetable.h
59668 +--- linux-3.1.1/include/linux/mod_devicetable.h 2011-11-11 15:19:27.000000000 -0500
59669 ++++ linux-3.1.1/include/linux/mod_devicetable.h 2011-11-16 18:39:08.000000000 -0500
59670 +@@ -12,7 +12,7 @@
59671 + typedef unsigned long kernel_ulong_t;
59672 + #endif
59673 +
59674 +-#define PCI_ANY_ID (~0)
59675 ++#define PCI_ANY_ID ((__u16)~0)
59676 +
59677 + struct pci_device_id {
59678 + __u32 vendor, device; /* Vendor and device ID or PCI_ANY_ID*/
59679 +@@ -131,7 +131,7 @@ struct usb_device_id {
59680 + #define USB_DEVICE_ID_MATCH_INT_SUBCLASS 0x0100
59681 + #define USB_DEVICE_ID_MATCH_INT_PROTOCOL 0x0200
59682 +
59683 +-#define HID_ANY_ID (~0)
59684 ++#define HID_ANY_ID (~0U)
59685 +
59686 + struct hid_device_id {
59687 + __u16 bus;
59688 +diff -urNp linux-3.1.1/include/linux/module.h linux-3.1.1/include/linux/module.h
59689 +--- linux-3.1.1/include/linux/module.h 2011-11-11 15:19:27.000000000 -0500
59690 ++++ linux-3.1.1/include/linux/module.h 2011-11-16 18:39:08.000000000 -0500
59691 +@@ -16,6 +16,7 @@
59692 + #include <linux/kobject.h>
59693 + #include <linux/moduleparam.h>
59694 + #include <linux/tracepoint.h>
59695 ++#include <linux/fs.h>
59696 +
59697 + #include <linux/percpu.h>
59698 + #include <asm/module.h>
59699 +@@ -327,19 +328,16 @@ struct module
59700 + int (*init)(void);
59701 +
59702 + /* If this is non-NULL, vfree after init() returns */
59703 +- void *module_init;
59704 ++ void *module_init_rx, *module_init_rw;
59705 +
59706 + /* Here is the actual code + data, vfree'd on unload. */
59707 +- void *module_core;
59708 ++ void *module_core_rx, *module_core_rw;
59709 +
59710 + /* Here are the sizes of the init and core sections */
59711 +- unsigned int init_size, core_size;
59712 ++ unsigned int init_size_rw, core_size_rw;
59713 +
59714 + /* The size of the executable code in each section. */
59715 +- unsigned int init_text_size, core_text_size;
59716 +-
59717 +- /* Size of RO sections of the module (text+rodata) */
59718 +- unsigned int init_ro_size, core_ro_size;
59719 ++ unsigned int init_size_rx, core_size_rx;
59720 +
59721 + /* Arch-specific module values */
59722 + struct mod_arch_specific arch;
59723 +@@ -395,6 +393,10 @@ struct module
59724 + #ifdef CONFIG_EVENT_TRACING
59725 + struct ftrace_event_call **trace_events;
59726 + unsigned int num_trace_events;
59727 ++ struct file_operations trace_id;
59728 ++ struct file_operations trace_enable;
59729 ++ struct file_operations trace_format;
59730 ++ struct file_operations trace_filter;
59731 + #endif
59732 + #ifdef CONFIG_FTRACE_MCOUNT_RECORD
59733 + unsigned int num_ftrace_callsites;
59734 +@@ -445,16 +447,46 @@ bool is_module_address(unsigned long add
59735 + bool is_module_percpu_address(unsigned long addr);
59736 + bool is_module_text_address(unsigned long addr);
59737 +
59738 ++static inline int within_module_range(unsigned long addr, void *start, unsigned long size)
59739 ++{
59740 ++
59741 ++#ifdef CONFIG_PAX_KERNEXEC
59742 ++ if (ktla_ktva(addr) >= (unsigned long)start &&
59743 ++ ktla_ktva(addr) < (unsigned long)start + size)
59744 ++ return 1;
59745 ++#endif
59746 ++
59747 ++ return ((void *)addr >= start && (void *)addr < start + size);
59748 ++}
59749 ++
59750 ++static inline int within_module_core_rx(unsigned long addr, struct module *mod)
59751 ++{
59752 ++ return within_module_range(addr, mod->module_core_rx, mod->core_size_rx);
59753 ++}
59754 ++
59755 ++static inline int within_module_core_rw(unsigned long addr, struct module *mod)
59756 ++{
59757 ++ return within_module_range(addr, mod->module_core_rw, mod->core_size_rw);
59758 ++}
59759 ++
59760 ++static inline int within_module_init_rx(unsigned long addr, struct module *mod)
59761 ++{
59762 ++ return within_module_range(addr, mod->module_init_rx, mod->init_size_rx);
59763 ++}
59764 ++
59765 ++static inline int within_module_init_rw(unsigned long addr, struct module *mod)
59766 ++{
59767 ++ return within_module_range(addr, mod->module_init_rw, mod->init_size_rw);
59768 ++}
59769 ++
59770 + static inline int within_module_core(unsigned long addr, struct module *mod)
59771 + {
59772 +- return (unsigned long)mod->module_core <= addr &&
59773 +- addr < (unsigned long)mod->module_core + mod->core_size;
59774 ++ return within_module_core_rx(addr, mod) || within_module_core_rw(addr, mod);
59775 + }
59776 +
59777 + static inline int within_module_init(unsigned long addr, struct module *mod)
59778 + {
59779 +- return (unsigned long)mod->module_init <= addr &&
59780 +- addr < (unsigned long)mod->module_init + mod->init_size;
59781 ++ return within_module_init_rx(addr, mod) || within_module_init_rw(addr, mod);
59782 + }
59783 +
59784 + /* Search for module by name: must hold module_mutex. */
59785 +diff -urNp linux-3.1.1/include/linux/moduleloader.h linux-3.1.1/include/linux/moduleloader.h
59786 +--- linux-3.1.1/include/linux/moduleloader.h 2011-11-11 15:19:27.000000000 -0500
59787 ++++ linux-3.1.1/include/linux/moduleloader.h 2011-11-16 18:39:08.000000000 -0500
59788 +@@ -25,9 +25,21 @@ unsigned int arch_mod_section_prepend(st
59789 + sections. Returns NULL on failure. */
59790 + void *module_alloc(unsigned long size);
59791 +
59792 ++#ifdef CONFIG_PAX_KERNEXEC
59793 ++void *module_alloc_exec(unsigned long size);
59794 ++#else
59795 ++#define module_alloc_exec(x) module_alloc(x)
59796 ++#endif
59797 ++
59798 + /* Free memory returned from module_alloc. */
59799 + void module_free(struct module *mod, void *module_region);
59800 +
59801 ++#ifdef CONFIG_PAX_KERNEXEC
59802 ++void module_free_exec(struct module *mod, void *module_region);
59803 ++#else
59804 ++#define module_free_exec(x, y) module_free((x), (y))
59805 ++#endif
59806 ++
59807 + /* Apply the given relocation to the (simplified) ELF. Return -error
59808 + or 0. */
59809 + int apply_relocate(Elf_Shdr *sechdrs,
59810 +diff -urNp linux-3.1.1/include/linux/moduleparam.h linux-3.1.1/include/linux/moduleparam.h
59811 +--- linux-3.1.1/include/linux/moduleparam.h 2011-11-11 15:19:27.000000000 -0500
59812 ++++ linux-3.1.1/include/linux/moduleparam.h 2011-11-16 18:39:08.000000000 -0500
59813 +@@ -255,7 +255,7 @@ static inline void __kernel_param_unlock
59814 + * @len is usually just sizeof(string).
59815 + */
59816 + #define module_param_string(name, string, len, perm) \
59817 +- static const struct kparam_string __param_string_##name \
59818 ++ static const struct kparam_string __param_string_##name __used \
59819 + = { len, string }; \
59820 + __module_param_call(MODULE_PARAM_PREFIX, name, \
59821 + &param_ops_string, \
59822 +@@ -370,7 +370,7 @@ extern int param_get_invbool(char *buffe
59823 + * module_param_named() for why this might be necessary.
59824 + */
59825 + #define module_param_array_named(name, array, type, nump, perm) \
59826 +- static const struct kparam_array __param_arr_##name \
59827 ++ static const struct kparam_array __param_arr_##name __used \
59828 + = { .max = ARRAY_SIZE(array), .num = nump, \
59829 + .ops = &param_ops_##type, \
59830 + .elemsize = sizeof(array[0]), .elem = array }; \
59831 +diff -urNp linux-3.1.1/include/linux/namei.h linux-3.1.1/include/linux/namei.h
59832 +--- linux-3.1.1/include/linux/namei.h 2011-11-11 15:19:27.000000000 -0500
59833 ++++ linux-3.1.1/include/linux/namei.h 2011-11-16 18:39:08.000000000 -0500
59834 +@@ -24,7 +24,7 @@ struct nameidata {
59835 + unsigned seq;
59836 + int last_type;
59837 + unsigned depth;
59838 +- char *saved_names[MAX_NESTED_LINKS + 1];
59839 ++ const char *saved_names[MAX_NESTED_LINKS + 1];
59840 +
59841 + /* Intent data */
59842 + union {
59843 +@@ -94,12 +94,12 @@ extern int follow_up(struct path *);
59844 + extern struct dentry *lock_rename(struct dentry *, struct dentry *);
59845 + extern void unlock_rename(struct dentry *, struct dentry *);
59846 +
59847 +-static inline void nd_set_link(struct nameidata *nd, char *path)
59848 ++static inline void nd_set_link(struct nameidata *nd, const char *path)
59849 + {
59850 + nd->saved_names[nd->depth] = path;
59851 + }
59852 +
59853 +-static inline char *nd_get_link(struct nameidata *nd)
59854 ++static inline const char *nd_get_link(const struct nameidata *nd)
59855 + {
59856 + return nd->saved_names[nd->depth];
59857 + }
59858 +diff -urNp linux-3.1.1/include/linux/netdevice.h linux-3.1.1/include/linux/netdevice.h
59859 +--- linux-3.1.1/include/linux/netdevice.h 2011-11-11 15:19:27.000000000 -0500
59860 ++++ linux-3.1.1/include/linux/netdevice.h 2011-11-16 18:39:08.000000000 -0500
59861 +@@ -944,6 +944,7 @@ struct net_device_ops {
59862 + int (*ndo_set_features)(struct net_device *dev,
59863 + u32 features);
59864 + };
59865 ++typedef struct net_device_ops __no_const net_device_ops_no_const;
59866 +
59867 + /*
59868 + * The DEVICE structure.
59869 +diff -urNp linux-3.1.1/include/linux/netfilter/xt_gradm.h linux-3.1.1/include/linux/netfilter/xt_gradm.h
59870 +--- linux-3.1.1/include/linux/netfilter/xt_gradm.h 1969-12-31 19:00:00.000000000 -0500
59871 ++++ linux-3.1.1/include/linux/netfilter/xt_gradm.h 2011-11-16 18:40:31.000000000 -0500
59872 +@@ -0,0 +1,9 @@
59873 ++#ifndef _LINUX_NETFILTER_XT_GRADM_H
59874 ++#define _LINUX_NETFILTER_XT_GRADM_H 1
59875 ++
59876 ++struct xt_gradm_mtinfo {
59877 ++ __u16 flags;
59878 ++ __u16 invflags;
59879 ++};
59880 ++
59881 ++#endif
59882 +diff -urNp linux-3.1.1/include/linux/of_pdt.h linux-3.1.1/include/linux/of_pdt.h
59883 +--- linux-3.1.1/include/linux/of_pdt.h 2011-11-11 15:19:27.000000000 -0500
59884 ++++ linux-3.1.1/include/linux/of_pdt.h 2011-11-16 18:39:08.000000000 -0500
59885 +@@ -32,7 +32,7 @@ struct of_pdt_ops {
59886 +
59887 + /* return 0 on success; fill in 'len' with number of bytes in path */
59888 + int (*pkg2path)(phandle node, char *buf, const int buflen, int *len);
59889 +-};
59890 ++} __no_const;
59891 +
59892 + extern void *prom_early_alloc(unsigned long size);
59893 +
59894 +diff -urNp linux-3.1.1/include/linux/oprofile.h linux-3.1.1/include/linux/oprofile.h
59895 +--- linux-3.1.1/include/linux/oprofile.h 2011-11-11 15:19:27.000000000 -0500
59896 ++++ linux-3.1.1/include/linux/oprofile.h 2011-11-16 18:39:08.000000000 -0500
59897 +@@ -139,9 +139,9 @@ int oprofilefs_create_ulong(struct super
59898 + int oprofilefs_create_ro_ulong(struct super_block * sb, struct dentry * root,
59899 + char const * name, ulong * val);
59900 +
59901 +-/** Create a file for read-only access to an atomic_t. */
59902 ++/** Create a file for read-only access to an atomic_unchecked_t. */
59903 + int oprofilefs_create_ro_atomic(struct super_block * sb, struct dentry * root,
59904 +- char const * name, atomic_t * val);
59905 ++ char const * name, atomic_unchecked_t * val);
59906 +
59907 + /** create a directory */
59908 + struct dentry * oprofilefs_mkdir(struct super_block * sb, struct dentry * root,
59909 +diff -urNp linux-3.1.1/include/linux/padata.h linux-3.1.1/include/linux/padata.h
59910 +--- linux-3.1.1/include/linux/padata.h 2011-11-11 15:19:27.000000000 -0500
59911 ++++ linux-3.1.1/include/linux/padata.h 2011-11-16 18:39:08.000000000 -0500
59912 +@@ -129,7 +129,7 @@ struct parallel_data {
59913 + struct padata_instance *pinst;
59914 + struct padata_parallel_queue __percpu *pqueue;
59915 + struct padata_serial_queue __percpu *squeue;
59916 +- atomic_t seq_nr;
59917 ++ atomic_unchecked_t seq_nr;
59918 + atomic_t reorder_objects;
59919 + atomic_t refcnt;
59920 + unsigned int max_seq_nr;
59921 +diff -urNp linux-3.1.1/include/linux/perf_event.h linux-3.1.1/include/linux/perf_event.h
59922 +--- linux-3.1.1/include/linux/perf_event.h 2011-11-11 15:19:27.000000000 -0500
59923 ++++ linux-3.1.1/include/linux/perf_event.h 2011-11-16 18:39:08.000000000 -0500
59924 +@@ -745,8 +745,8 @@ struct perf_event {
59925 +
59926 + enum perf_event_active_state state;
59927 + unsigned int attach_state;
59928 +- local64_t count;
59929 +- atomic64_t child_count;
59930 ++ local64_t count; /* PaX: fix it one day */
59931 ++ atomic64_unchecked_t child_count;
59932 +
59933 + /*
59934 + * These are the total time in nanoseconds that the event
59935 +@@ -797,8 +797,8 @@ struct perf_event {
59936 + * These accumulate total time (in nanoseconds) that children
59937 + * events have been enabled and running, respectively.
59938 + */
59939 +- atomic64_t child_total_time_enabled;
59940 +- atomic64_t child_total_time_running;
59941 ++ atomic64_unchecked_t child_total_time_enabled;
59942 ++ atomic64_unchecked_t child_total_time_running;
59943 +
59944 + /*
59945 + * Protect attach/detach and child_list:
59946 +diff -urNp linux-3.1.1/include/linux/pipe_fs_i.h linux-3.1.1/include/linux/pipe_fs_i.h
59947 +--- linux-3.1.1/include/linux/pipe_fs_i.h 2011-11-11 15:19:27.000000000 -0500
59948 ++++ linux-3.1.1/include/linux/pipe_fs_i.h 2011-11-16 18:39:08.000000000 -0500
59949 +@@ -46,9 +46,9 @@ struct pipe_buffer {
59950 + struct pipe_inode_info {
59951 + wait_queue_head_t wait;
59952 + unsigned int nrbufs, curbuf, buffers;
59953 +- unsigned int readers;
59954 +- unsigned int writers;
59955 +- unsigned int waiting_writers;
59956 ++ atomic_t readers;
59957 ++ atomic_t writers;
59958 ++ atomic_t waiting_writers;
59959 + unsigned int r_counter;
59960 + unsigned int w_counter;
59961 + struct page *tmp_page;
59962 +diff -urNp linux-3.1.1/include/linux/pm_runtime.h linux-3.1.1/include/linux/pm_runtime.h
59963 +--- linux-3.1.1/include/linux/pm_runtime.h 2011-11-11 15:19:27.000000000 -0500
59964 ++++ linux-3.1.1/include/linux/pm_runtime.h 2011-11-16 18:39:08.000000000 -0500
59965 +@@ -99,7 +99,7 @@ static inline bool pm_runtime_callbacks_
59966 +
59967 + static inline void pm_runtime_mark_last_busy(struct device *dev)
59968 + {
59969 +- ACCESS_ONCE(dev->power.last_busy) = jiffies;
59970 ++ ACCESS_ONCE_RW(dev->power.last_busy) = jiffies;
59971 + }
59972 +
59973 + #else /* !CONFIG_PM_RUNTIME */
59974 +diff -urNp linux-3.1.1/include/linux/poison.h linux-3.1.1/include/linux/poison.h
59975 +--- linux-3.1.1/include/linux/poison.h 2011-11-11 15:19:27.000000000 -0500
59976 ++++ linux-3.1.1/include/linux/poison.h 2011-11-16 18:39:08.000000000 -0500
59977 +@@ -19,8 +19,8 @@
59978 + * under normal circumstances, used to verify that nobody uses
59979 + * non-initialized list entries.
59980 + */
59981 +-#define LIST_POISON1 ((void *) 0x00100100 + POISON_POINTER_DELTA)
59982 +-#define LIST_POISON2 ((void *) 0x00200200 + POISON_POINTER_DELTA)
59983 ++#define LIST_POISON1 ((void *) (long)0xFFFFFF01)
59984 ++#define LIST_POISON2 ((void *) (long)0xFFFFFF02)
59985 +
59986 + /********** include/linux/timer.h **********/
59987 + /*
59988 +diff -urNp linux-3.1.1/include/linux/preempt.h linux-3.1.1/include/linux/preempt.h
59989 +--- linux-3.1.1/include/linux/preempt.h 2011-11-11 15:19:27.000000000 -0500
59990 ++++ linux-3.1.1/include/linux/preempt.h 2011-11-16 18:39:08.000000000 -0500
59991 +@@ -123,7 +123,7 @@ struct preempt_ops {
59992 + void (*sched_in)(struct preempt_notifier *notifier, int cpu);
59993 + void (*sched_out)(struct preempt_notifier *notifier,
59994 + struct task_struct *next);
59995 +-};
59996 ++} __no_const;
59997 +
59998 + /**
59999 + * preempt_notifier - key for installing preemption notifiers
60000 +diff -urNp linux-3.1.1/include/linux/proc_fs.h linux-3.1.1/include/linux/proc_fs.h
60001 +--- linux-3.1.1/include/linux/proc_fs.h 2011-11-11 15:19:27.000000000 -0500
60002 ++++ linux-3.1.1/include/linux/proc_fs.h 2011-11-16 18:40:31.000000000 -0500
60003 +@@ -155,6 +155,19 @@ static inline struct proc_dir_entry *pro
60004 + return proc_create_data(name, mode, parent, proc_fops, NULL);
60005 + }
60006 +
60007 ++static inline struct proc_dir_entry *proc_create_grsec(const char *name, mode_t mode,
60008 ++ struct proc_dir_entry *parent, const struct file_operations *proc_fops)
60009 ++{
60010 ++#ifdef CONFIG_GRKERNSEC_PROC_USER
60011 ++ return proc_create_data(name, S_IRUSR, parent, proc_fops, NULL);
60012 ++#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
60013 ++ return proc_create_data(name, S_IRUSR | S_IRGRP, parent, proc_fops, NULL);
60014 ++#else
60015 ++ return proc_create_data(name, mode, parent, proc_fops, NULL);
60016 ++#endif
60017 ++}
60018 ++
60019 ++
60020 + static inline struct proc_dir_entry *create_proc_read_entry(const char *name,
60021 + mode_t mode, struct proc_dir_entry *base,
60022 + read_proc_t *read_proc, void * data)
60023 +@@ -258,7 +271,7 @@ union proc_op {
60024 + int (*proc_show)(struct seq_file *m,
60025 + struct pid_namespace *ns, struct pid *pid,
60026 + struct task_struct *task);
60027 +-};
60028 ++} __no_const;
60029 +
60030 + struct ctl_table_header;
60031 + struct ctl_table;
60032 +diff -urNp linux-3.1.1/include/linux/ptrace.h linux-3.1.1/include/linux/ptrace.h
60033 +--- linux-3.1.1/include/linux/ptrace.h 2011-11-11 15:19:27.000000000 -0500
60034 ++++ linux-3.1.1/include/linux/ptrace.h 2011-11-16 18:40:31.000000000 -0500
60035 +@@ -129,10 +129,10 @@ extern void __ptrace_unlink(struct task_
60036 + extern void exit_ptrace(struct task_struct *tracer);
60037 + #define PTRACE_MODE_READ 1
60038 + #define PTRACE_MODE_ATTACH 2
60039 +-/* Returns 0 on success, -errno on denial. */
60040 +-extern int __ptrace_may_access(struct task_struct *task, unsigned int mode);
60041 + /* Returns true on success, false on denial. */
60042 + extern bool ptrace_may_access(struct task_struct *task, unsigned int mode);
60043 ++/* Returns true on success, false on denial. */
60044 ++extern bool ptrace_may_access_log(struct task_struct *task, unsigned int mode);
60045 +
60046 + static inline int ptrace_reparented(struct task_struct *child)
60047 + {
60048 +diff -urNp linux-3.1.1/include/linux/random.h linux-3.1.1/include/linux/random.h
60049 +--- linux-3.1.1/include/linux/random.h 2011-11-11 15:19:27.000000000 -0500
60050 ++++ linux-3.1.1/include/linux/random.h 2011-11-16 18:39:08.000000000 -0500
60051 +@@ -69,12 +69,17 @@ void srandom32(u32 seed);
60052 +
60053 + u32 prandom32(struct rnd_state *);
60054 +
60055 ++static inline unsigned long pax_get_random_long(void)
60056 ++{
60057 ++ return random32() + (sizeof(long) > 4 ? (unsigned long)random32() << 32 : 0);
60058 ++}
60059 ++
60060 + /*
60061 + * Handle minimum values for seeds
60062 + */
60063 + static inline u32 __seed(u32 x, u32 m)
60064 + {
60065 +- return (x < m) ? x + m : x;
60066 ++ return (x <= m) ? x + m + 1 : x;
60067 + }
60068 +
60069 + /**
60070 +diff -urNp linux-3.1.1/include/linux/reboot.h linux-3.1.1/include/linux/reboot.h
60071 +--- linux-3.1.1/include/linux/reboot.h 2011-11-11 15:19:27.000000000 -0500
60072 ++++ linux-3.1.1/include/linux/reboot.h 2011-11-16 18:39:08.000000000 -0500
60073 +@@ -52,9 +52,9 @@ extern int unregister_reboot_notifier(st
60074 + * Architecture-specific implementations of sys_reboot commands.
60075 + */
60076 +
60077 +-extern void machine_restart(char *cmd);
60078 +-extern void machine_halt(void);
60079 +-extern void machine_power_off(void);
60080 ++extern void machine_restart(char *cmd) __noreturn;
60081 ++extern void machine_halt(void) __noreturn;
60082 ++extern void machine_power_off(void) __noreturn;
60083 +
60084 + extern void machine_shutdown(void);
60085 + struct pt_regs;
60086 +@@ -65,9 +65,9 @@ extern void machine_crash_shutdown(struc
60087 + */
60088 +
60089 + extern void kernel_restart_prepare(char *cmd);
60090 +-extern void kernel_restart(char *cmd);
60091 +-extern void kernel_halt(void);
60092 +-extern void kernel_power_off(void);
60093 ++extern void kernel_restart(char *cmd) __noreturn;
60094 ++extern void kernel_halt(void) __noreturn;
60095 ++extern void kernel_power_off(void) __noreturn;
60096 +
60097 + extern int C_A_D; /* for sysctl */
60098 + void ctrl_alt_del(void);
60099 +@@ -81,7 +81,7 @@ extern int orderly_poweroff(bool force);
60100 + * Emergency restart, callable from an interrupt handler.
60101 + */
60102 +
60103 +-extern void emergency_restart(void);
60104 ++extern void emergency_restart(void) __noreturn;
60105 + #include <asm/emergency-restart.h>
60106 +
60107 + #endif
60108 +diff -urNp linux-3.1.1/include/linux/reiserfs_fs.h linux-3.1.1/include/linux/reiserfs_fs.h
60109 +--- linux-3.1.1/include/linux/reiserfs_fs.h 2011-11-11 15:19:27.000000000 -0500
60110 ++++ linux-3.1.1/include/linux/reiserfs_fs.h 2011-11-16 18:39:08.000000000 -0500
60111 +@@ -1406,7 +1406,7 @@ static inline loff_t max_reiserfs_offset
60112 + #define REISERFS_USER_MEM 1 /* reiserfs user memory mode */
60113 +
60114 + #define fs_generation(s) (REISERFS_SB(s)->s_generation_counter)
60115 +-#define get_generation(s) atomic_read (&fs_generation(s))
60116 ++#define get_generation(s) atomic_read_unchecked (&fs_generation(s))
60117 + #define FILESYSTEM_CHANGED_TB(tb) (get_generation((tb)->tb_sb) != (tb)->fs_gen)
60118 + #define __fs_changed(gen,s) (gen != get_generation (s))
60119 + #define fs_changed(gen,s) \
60120 +diff -urNp linux-3.1.1/include/linux/reiserfs_fs_sb.h linux-3.1.1/include/linux/reiserfs_fs_sb.h
60121 +--- linux-3.1.1/include/linux/reiserfs_fs_sb.h 2011-11-11 15:19:27.000000000 -0500
60122 ++++ linux-3.1.1/include/linux/reiserfs_fs_sb.h 2011-11-16 18:39:08.000000000 -0500
60123 +@@ -386,7 +386,7 @@ struct reiserfs_sb_info {
60124 + /* Comment? -Hans */
60125 + wait_queue_head_t s_wait;
60126 + /* To be obsoleted soon by per buffer seals.. -Hans */
60127 +- atomic_t s_generation_counter; // increased by one every time the
60128 ++ atomic_unchecked_t s_generation_counter; // increased by one every time the
60129 + // tree gets re-balanced
60130 + unsigned long s_properties; /* File system properties. Currently holds
60131 + on-disk FS format */
60132 +diff -urNp linux-3.1.1/include/linux/relay.h linux-3.1.1/include/linux/relay.h
60133 +--- linux-3.1.1/include/linux/relay.h 2011-11-11 15:19:27.000000000 -0500
60134 ++++ linux-3.1.1/include/linux/relay.h 2011-11-16 18:39:08.000000000 -0500
60135 +@@ -159,7 +159,7 @@ struct rchan_callbacks
60136 + * The callback should return 0 if successful, negative if not.
60137 + */
60138 + int (*remove_buf_file)(struct dentry *dentry);
60139 +-};
60140 ++} __no_const;
60141 +
60142 + /*
60143 + * CONFIG_RELAY kernel API, kernel/relay.c
60144 +diff -urNp linux-3.1.1/include/linux/rfkill.h linux-3.1.1/include/linux/rfkill.h
60145 +--- linux-3.1.1/include/linux/rfkill.h 2011-11-11 15:19:27.000000000 -0500
60146 ++++ linux-3.1.1/include/linux/rfkill.h 2011-11-16 18:39:08.000000000 -0500
60147 +@@ -147,6 +147,7 @@ struct rfkill_ops {
60148 + void (*query)(struct rfkill *rfkill, void *data);
60149 + int (*set_block)(void *data, bool blocked);
60150 + };
60151 ++typedef struct rfkill_ops __no_const rfkill_ops_no_const;
60152 +
60153 + #if defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE)
60154 + /**
60155 +diff -urNp linux-3.1.1/include/linux/rmap.h linux-3.1.1/include/linux/rmap.h
60156 +--- linux-3.1.1/include/linux/rmap.h 2011-11-11 15:19:27.000000000 -0500
60157 ++++ linux-3.1.1/include/linux/rmap.h 2011-11-16 18:39:08.000000000 -0500
60158 +@@ -119,8 +119,8 @@ static inline void anon_vma_unlock(struc
60159 + void anon_vma_init(void); /* create anon_vma_cachep */
60160 + int anon_vma_prepare(struct vm_area_struct *);
60161 + void unlink_anon_vmas(struct vm_area_struct *);
60162 +-int anon_vma_clone(struct vm_area_struct *, struct vm_area_struct *);
60163 +-int anon_vma_fork(struct vm_area_struct *, struct vm_area_struct *);
60164 ++int anon_vma_clone(struct vm_area_struct *, const struct vm_area_struct *);
60165 ++int anon_vma_fork(struct vm_area_struct *, const struct vm_area_struct *);
60166 + void __anon_vma_link(struct vm_area_struct *);
60167 +
60168 + static inline void anon_vma_merge(struct vm_area_struct *vma,
60169 +diff -urNp linux-3.1.1/include/linux/sched.h linux-3.1.1/include/linux/sched.h
60170 +--- linux-3.1.1/include/linux/sched.h 2011-11-11 15:19:27.000000000 -0500
60171 ++++ linux-3.1.1/include/linux/sched.h 2011-11-16 18:40:31.000000000 -0500
60172 +@@ -100,6 +100,7 @@ struct bio_list;
60173 + struct fs_struct;
60174 + struct perf_event_context;
60175 + struct blk_plug;
60176 ++struct linux_binprm;
60177 +
60178 + /*
60179 + * List of flags we want to share for kernel threads,
60180 +@@ -380,10 +381,13 @@ struct user_namespace;
60181 + #define DEFAULT_MAX_MAP_COUNT (USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
60182 +
60183 + extern int sysctl_max_map_count;
60184 ++extern unsigned long sysctl_heap_stack_gap;
60185 +
60186 + #include <linux/aio.h>
60187 +
60188 + #ifdef CONFIG_MMU
60189 ++extern bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len);
60190 ++extern unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len);
60191 + extern void arch_pick_mmap_layout(struct mm_struct *mm);
60192 + extern unsigned long
60193 + arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
60194 +@@ -629,6 +633,17 @@ struct signal_struct {
60195 + #ifdef CONFIG_TASKSTATS
60196 + struct taskstats *stats;
60197 + #endif
60198 ++
60199 ++#ifdef CONFIG_GRKERNSEC
60200 ++ u32 curr_ip;
60201 ++ u32 saved_ip;
60202 ++ u32 gr_saddr;
60203 ++ u32 gr_daddr;
60204 ++ u16 gr_sport;
60205 ++ u16 gr_dport;
60206 ++ u8 used_accept:1;
60207 ++#endif
60208 ++
60209 + #ifdef CONFIG_AUDIT
60210 + unsigned audit_tty;
60211 + struct tty_audit_buf *tty_audit_buf;
60212 +@@ -710,6 +725,11 @@ struct user_struct {
60213 + struct key *session_keyring; /* UID's default session keyring */
60214 + #endif
60215 +
60216 ++#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
60217 ++ unsigned int banned;
60218 ++ unsigned long ban_expires;
60219 ++#endif
60220 ++
60221 + /* Hash table maintenance information */
60222 + struct hlist_node uidhash_node;
60223 + uid_t uid;
60224 +@@ -1340,8 +1360,8 @@ struct task_struct {
60225 + struct list_head thread_group;
60226 +
60227 + struct completion *vfork_done; /* for vfork() */
60228 +- int __user *set_child_tid; /* CLONE_CHILD_SETTID */
60229 +- int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
60230 ++ pid_t __user *set_child_tid; /* CLONE_CHILD_SETTID */
60231 ++ pid_t __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
60232 +
60233 + cputime_t utime, stime, utimescaled, stimescaled;
60234 + cputime_t gtime;
60235 +@@ -1357,13 +1377,6 @@ struct task_struct {
60236 + struct task_cputime cputime_expires;
60237 + struct list_head cpu_timers[3];
60238 +
60239 +-/* process credentials */
60240 +- const struct cred __rcu *real_cred; /* objective and real subjective task
60241 +- * credentials (COW) */
60242 +- const struct cred __rcu *cred; /* effective (overridable) subjective task
60243 +- * credentials (COW) */
60244 +- struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
60245 +-
60246 + char comm[TASK_COMM_LEN]; /* executable name excluding path
60247 + - access with [gs]et_task_comm (which lock
60248 + it with task_lock())
60249 +@@ -1380,8 +1393,16 @@ struct task_struct {
60250 + #endif
60251 + /* CPU-specific state of this task */
60252 + struct thread_struct thread;
60253 ++/* thread_info moved to task_struct */
60254 ++#ifdef CONFIG_X86
60255 ++ struct thread_info tinfo;
60256 ++#endif
60257 + /* filesystem information */
60258 + struct fs_struct *fs;
60259 ++
60260 ++ const struct cred __rcu *cred; /* effective (overridable) subjective task
60261 ++ * credentials (COW) */
60262 ++
60263 + /* open file information */
60264 + struct files_struct *files;
60265 + /* namespaces */
60266 +@@ -1428,6 +1449,11 @@ struct task_struct {
60267 + struct rt_mutex_waiter *pi_blocked_on;
60268 + #endif
60269 +
60270 ++/* process credentials */
60271 ++ const struct cred __rcu *real_cred; /* objective and real subjective task
60272 ++ * credentials (COW) */
60273 ++ struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
60274 ++
60275 + #ifdef CONFIG_DEBUG_MUTEXES
60276 + /* mutex deadlock detection */
60277 + struct mutex_waiter *blocked_on;
60278 +@@ -1537,6 +1563,21 @@ struct task_struct {
60279 + unsigned long default_timer_slack_ns;
60280 +
60281 + struct list_head *scm_work_list;
60282 ++
60283 ++#ifdef CONFIG_GRKERNSEC
60284 ++ /* grsecurity */
60285 ++ struct dentry *gr_chroot_dentry;
60286 ++ struct acl_subject_label *acl;
60287 ++ struct acl_role_label *role;
60288 ++ struct file *exec_file;
60289 ++ u16 acl_role_id;
60290 ++ /* is this the task that authenticated to the special role */
60291 ++ u8 acl_sp_role;
60292 ++ u8 is_writable;
60293 ++ u8 brute;
60294 ++ u8 gr_is_chrooted;
60295 ++#endif
60296 ++
60297 + #ifdef CONFIG_FUNCTION_GRAPH_TRACER
60298 + /* Index of current stored address in ret_stack */
60299 + int curr_ret_stack;
60300 +@@ -1571,6 +1612,57 @@ struct task_struct {
60301 + #endif
60302 + };
60303 +
60304 ++#define MF_PAX_PAGEEXEC 0x01000000 /* Paging based non-executable pages */
60305 ++#define MF_PAX_EMUTRAMP 0x02000000 /* Emulate trampolines */
60306 ++#define MF_PAX_MPROTECT 0x04000000 /* Restrict mprotect() */
60307 ++#define MF_PAX_RANDMMAP 0x08000000 /* Randomize mmap() base */
60308 ++/*#define MF_PAX_RANDEXEC 0x10000000*/ /* Randomize ET_EXEC base */
60309 ++#define MF_PAX_SEGMEXEC 0x20000000 /* Segmentation based non-executable pages */
60310 ++
60311 ++#ifdef CONFIG_PAX_SOFTMODE
60312 ++extern int pax_softmode;
60313 ++#endif
60314 ++
60315 ++extern int pax_check_flags(unsigned long *);
60316 ++
60317 ++/* if tsk != current then task_lock must be held on it */
60318 ++#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
60319 ++static inline unsigned long pax_get_flags(struct task_struct *tsk)
60320 ++{
60321 ++ if (likely(tsk->mm))
60322 ++ return tsk->mm->pax_flags;
60323 ++ else
60324 ++ return 0UL;
60325 ++}
60326 ++
60327 ++/* if tsk != current then task_lock must be held on it */
60328 ++static inline long pax_set_flags(struct task_struct *tsk, unsigned long flags)
60329 ++{
60330 ++ if (likely(tsk->mm)) {
60331 ++ tsk->mm->pax_flags = flags;
60332 ++ return 0;
60333 ++ }
60334 ++ return -EINVAL;
60335 ++}
60336 ++#endif
60337 ++
60338 ++#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
60339 ++extern void pax_set_initial_flags(struct linux_binprm *bprm);
60340 ++#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
60341 ++extern void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
60342 ++#endif
60343 ++
60344 ++extern void pax_report_fault(struct pt_regs *regs, void *pc, void *sp);
60345 ++extern void pax_report_insns(struct pt_regs *regs, void *pc, void *sp);
60346 ++extern void pax_report_refcount_overflow(struct pt_regs *regs);
60347 ++extern NORET_TYPE void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type) ATTRIB_NORET;
60348 ++
60349 ++#ifdef CONFIG_PAX_MEMORY_STACKLEAK
60350 ++extern void pax_track_stack(void);
60351 ++#else
60352 ++static inline void pax_track_stack(void) {}
60353 ++#endif
60354 ++
60355 + /* Future-safe accessor for struct task_struct's cpus_allowed. */
60356 + #define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
60357 +
60358 +@@ -2074,7 +2166,9 @@ void yield(void);
60359 + extern struct exec_domain default_exec_domain;
60360 +
60361 + union thread_union {
60362 ++#ifndef CONFIG_X86
60363 + struct thread_info thread_info;
60364 ++#endif
60365 + unsigned long stack[THREAD_SIZE/sizeof(long)];
60366 + };
60367 +
60368 +@@ -2107,6 +2201,7 @@ extern struct pid_namespace init_pid_ns;
60369 + */
60370 +
60371 + extern struct task_struct *find_task_by_vpid(pid_t nr);
60372 ++extern struct task_struct *find_task_by_vpid_unrestricted(pid_t nr);
60373 + extern struct task_struct *find_task_by_pid_ns(pid_t nr,
60374 + struct pid_namespace *ns);
60375 +
60376 +@@ -2243,7 +2338,7 @@ extern void __cleanup_sighand(struct sig
60377 + extern void exit_itimers(struct signal_struct *);
60378 + extern void flush_itimer_signals(void);
60379 +
60380 +-extern NORET_TYPE void do_group_exit(int);
60381 ++extern NORET_TYPE void do_group_exit(int) ATTRIB_NORET;
60382 +
60383 + extern void daemonize(const char *, ...);
60384 + extern int allow_signal(int);
60385 +@@ -2408,13 +2503,17 @@ static inline unsigned long *end_of_stac
60386 +
60387 + #endif
60388 +
60389 +-static inline int object_is_on_stack(void *obj)
60390 ++static inline int object_starts_on_stack(void *obj)
60391 + {
60392 +- void *stack = task_stack_page(current);
60393 ++ const void *stack = task_stack_page(current);
60394 +
60395 + return (obj >= stack) && (obj < (stack + THREAD_SIZE));
60396 + }
60397 +
60398 ++#ifdef CONFIG_PAX_USERCOPY
60399 ++extern int object_is_on_stack(const void *obj, unsigned long len);
60400 ++#endif
60401 ++
60402 + extern void thread_info_cache_init(void);
60403 +
60404 + #ifdef CONFIG_DEBUG_STACK_USAGE
60405 +diff -urNp linux-3.1.1/include/linux/screen_info.h linux-3.1.1/include/linux/screen_info.h
60406 +--- linux-3.1.1/include/linux/screen_info.h 2011-11-11 15:19:27.000000000 -0500
60407 ++++ linux-3.1.1/include/linux/screen_info.h 2011-11-16 18:39:08.000000000 -0500
60408 +@@ -43,7 +43,8 @@ struct screen_info {
60409 + __u16 pages; /* 0x32 */
60410 + __u16 vesa_attributes; /* 0x34 */
60411 + __u32 capabilities; /* 0x36 */
60412 +- __u8 _reserved[6]; /* 0x3a */
60413 ++ __u16 vesapm_size; /* 0x3a */
60414 ++ __u8 _reserved[4]; /* 0x3c */
60415 + } __attribute__((packed));
60416 +
60417 + #define VIDEO_TYPE_MDA 0x10 /* Monochrome Text Display */
60418 +diff -urNp linux-3.1.1/include/linux/security.h linux-3.1.1/include/linux/security.h
60419 +--- linux-3.1.1/include/linux/security.h 2011-11-11 15:19:27.000000000 -0500
60420 ++++ linux-3.1.1/include/linux/security.h 2011-11-16 18:40:31.000000000 -0500
60421 +@@ -36,6 +36,7 @@
60422 + #include <linux/key.h>
60423 + #include <linux/xfrm.h>
60424 + #include <linux/slab.h>
60425 ++#include <linux/grsecurity.h>
60426 + #include <net/flow.h>
60427 +
60428 + /* Maximum number of letters for an LSM name string */
60429 +diff -urNp linux-3.1.1/include/linux/seq_file.h linux-3.1.1/include/linux/seq_file.h
60430 +--- linux-3.1.1/include/linux/seq_file.h 2011-11-11 15:19:27.000000000 -0500
60431 ++++ linux-3.1.1/include/linux/seq_file.h 2011-11-16 18:39:08.000000000 -0500
60432 +@@ -33,6 +33,7 @@ struct seq_operations {
60433 + void * (*next) (struct seq_file *m, void *v, loff_t *pos);
60434 + int (*show) (struct seq_file *m, void *v);
60435 + };
60436 ++typedef struct seq_operations __no_const seq_operations_no_const;
60437 +
60438 + #define SEQ_SKIP 1
60439 +
60440 +diff -urNp linux-3.1.1/include/linux/shm.h linux-3.1.1/include/linux/shm.h
60441 +--- linux-3.1.1/include/linux/shm.h 2011-11-11 15:19:27.000000000 -0500
60442 ++++ linux-3.1.1/include/linux/shm.h 2011-11-16 18:59:58.000000000 -0500
60443 +@@ -98,6 +98,10 @@ struct shmid_kernel /* private to the ke
60444 +
60445 + /* The task created the shm object. NULL if the task is dead. */
60446 + struct task_struct *shm_creator;
60447 ++#ifdef CONFIG_GRKERNSEC
60448 ++ time_t shm_createtime;
60449 ++ pid_t shm_lapid;
60450 ++#endif
60451 + };
60452 +
60453 + /* shm_mode upper byte flags */
60454 +diff -urNp linux-3.1.1/include/linux/skbuff.h linux-3.1.1/include/linux/skbuff.h
60455 +--- linux-3.1.1/include/linux/skbuff.h 2011-11-11 15:19:27.000000000 -0500
60456 ++++ linux-3.1.1/include/linux/skbuff.h 2011-11-16 18:39:08.000000000 -0500
60457 +@@ -610,7 +610,7 @@ static inline struct skb_shared_hwtstamp
60458 + */
60459 + static inline int skb_queue_empty(const struct sk_buff_head *list)
60460 + {
60461 +- return list->next == (struct sk_buff *)list;
60462 ++ return list->next == (const struct sk_buff *)list;
60463 + }
60464 +
60465 + /**
60466 +@@ -623,7 +623,7 @@ static inline int skb_queue_empty(const
60467 + static inline bool skb_queue_is_last(const struct sk_buff_head *list,
60468 + const struct sk_buff *skb)
60469 + {
60470 +- return skb->next == (struct sk_buff *)list;
60471 ++ return skb->next == (const struct sk_buff *)list;
60472 + }
60473 +
60474 + /**
60475 +@@ -636,7 +636,7 @@ static inline bool skb_queue_is_last(con
60476 + static inline bool skb_queue_is_first(const struct sk_buff_head *list,
60477 + const struct sk_buff *skb)
60478 + {
60479 +- return skb->prev == (struct sk_buff *)list;
60480 ++ return skb->prev == (const struct sk_buff *)list;
60481 + }
60482 +
60483 + /**
60484 +@@ -1458,7 +1458,7 @@ static inline int pskb_network_may_pull(
60485 + * NET_IP_ALIGN(2) + ethernet_header(14) + IP_header(20/40) + ports(8)
60486 + */
60487 + #ifndef NET_SKB_PAD
60488 +-#define NET_SKB_PAD max(32, L1_CACHE_BYTES)
60489 ++#define NET_SKB_PAD max(_AC(32,UL), L1_CACHE_BYTES)
60490 + #endif
60491 +
60492 + extern int ___pskb_trim(struct sk_buff *skb, unsigned int len);
60493 +diff -urNp linux-3.1.1/include/linux/slab_def.h linux-3.1.1/include/linux/slab_def.h
60494 +--- linux-3.1.1/include/linux/slab_def.h 2011-11-11 15:19:27.000000000 -0500
60495 ++++ linux-3.1.1/include/linux/slab_def.h 2011-11-16 18:39:08.000000000 -0500
60496 +@@ -68,10 +68,10 @@ struct kmem_cache {
60497 + unsigned long node_allocs;
60498 + unsigned long node_frees;
60499 + unsigned long node_overflow;
60500 +- atomic_t allochit;
60501 +- atomic_t allocmiss;
60502 +- atomic_t freehit;
60503 +- atomic_t freemiss;
60504 ++ atomic_unchecked_t allochit;
60505 ++ atomic_unchecked_t allocmiss;
60506 ++ atomic_unchecked_t freehit;
60507 ++ atomic_unchecked_t freemiss;
60508 +
60509 + /*
60510 + * If debugging is enabled, then the allocator can add additional
60511 +diff -urNp linux-3.1.1/include/linux/slab.h linux-3.1.1/include/linux/slab.h
60512 +--- linux-3.1.1/include/linux/slab.h 2011-11-11 15:19:27.000000000 -0500
60513 ++++ linux-3.1.1/include/linux/slab.h 2011-11-16 18:39:08.000000000 -0500
60514 +@@ -11,12 +11,20 @@
60515 +
60516 + #include <linux/gfp.h>
60517 + #include <linux/types.h>
60518 ++#include <linux/err.h>
60519 +
60520 + /*
60521 + * Flags to pass to kmem_cache_create().
60522 + * The ones marked DEBUG are only valid if CONFIG_SLAB_DEBUG is set.
60523 + */
60524 + #define SLAB_DEBUG_FREE 0x00000100UL /* DEBUG: Perform (expensive) checks on free */
60525 ++
60526 ++#ifdef CONFIG_PAX_USERCOPY
60527 ++#define SLAB_USERCOPY 0x00000200UL /* PaX: Allow copying objs to/from userland */
60528 ++#else
60529 ++#define SLAB_USERCOPY 0x00000000UL
60530 ++#endif
60531 ++
60532 + #define SLAB_RED_ZONE 0x00000400UL /* DEBUG: Red zone objs in a cache */
60533 + #define SLAB_POISON 0x00000800UL /* DEBUG: Poison objects */
60534 + #define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */
60535 +@@ -87,10 +95,13 @@
60536 + * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can.
60537 + * Both make kfree a no-op.
60538 + */
60539 +-#define ZERO_SIZE_PTR ((void *)16)
60540 ++#define ZERO_SIZE_PTR \
60541 ++({ \
60542 ++ BUILD_BUG_ON(!(MAX_ERRNO & ~PAGE_MASK));\
60543 ++ (void *)(-MAX_ERRNO-1L); \
60544 ++})
60545 +
60546 +-#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
60547 +- (unsigned long)ZERO_SIZE_PTR)
60548 ++#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) - 1 >= (unsigned long)ZERO_SIZE_PTR - 1)
60549 +
60550 + /*
60551 + * struct kmem_cache related prototypes
60552 +@@ -161,6 +172,7 @@ void * __must_check krealloc(const void
60553 + void kfree(const void *);
60554 + void kzfree(const void *);
60555 + size_t ksize(const void *);
60556 ++void check_object_size(const void *ptr, unsigned long n, bool to);
60557 +
60558 + /*
60559 + * Allocator specific definitions. These are mainly used to establish optimized
60560 +@@ -353,4 +365,59 @@ static inline void *kzalloc_node(size_t
60561 +
60562 + void __init kmem_cache_init_late(void);
60563 +
60564 ++#define kmalloc(x, y) \
60565 ++({ \
60566 ++ void *___retval; \
60567 ++ intoverflow_t ___x = (intoverflow_t)x; \
60568 ++ if (WARN(___x > ULONG_MAX, "kmalloc size overflow\n")) \
60569 ++ ___retval = NULL; \
60570 ++ else \
60571 ++ ___retval = kmalloc((size_t)___x, (y)); \
60572 ++ ___retval; \
60573 ++})
60574 ++
60575 ++#define kmalloc_node(x, y, z) \
60576 ++({ \
60577 ++ void *___retval; \
60578 ++ intoverflow_t ___x = (intoverflow_t)x; \
60579 ++ if (WARN(___x > ULONG_MAX, "kmalloc_node size overflow\n"))\
60580 ++ ___retval = NULL; \
60581 ++ else \
60582 ++ ___retval = kmalloc_node((size_t)___x, (y), (z));\
60583 ++ ___retval; \
60584 ++})
60585 ++
60586 ++#define kzalloc(x, y) \
60587 ++({ \
60588 ++ void *___retval; \
60589 ++ intoverflow_t ___x = (intoverflow_t)x; \
60590 ++ if (WARN(___x > ULONG_MAX, "kzalloc size overflow\n")) \
60591 ++ ___retval = NULL; \
60592 ++ else \
60593 ++ ___retval = kzalloc((size_t)___x, (y)); \
60594 ++ ___retval; \
60595 ++})
60596 ++
60597 ++#define __krealloc(x, y, z) \
60598 ++({ \
60599 ++ void *___retval; \
60600 ++ intoverflow_t ___y = (intoverflow_t)y; \
60601 ++ if (WARN(___y > ULONG_MAX, "__krealloc size overflow\n"))\
60602 ++ ___retval = NULL; \
60603 ++ else \
60604 ++ ___retval = __krealloc((x), (size_t)___y, (z)); \
60605 ++ ___retval; \
60606 ++})
60607 ++
60608 ++#define krealloc(x, y, z) \
60609 ++({ \
60610 ++ void *___retval; \
60611 ++ intoverflow_t ___y = (intoverflow_t)y; \
60612 ++ if (WARN(___y > ULONG_MAX, "krealloc size overflow\n")) \
60613 ++ ___retval = NULL; \
60614 ++ else \
60615 ++ ___retval = krealloc((x), (size_t)___y, (z)); \
60616 ++ ___retval; \
60617 ++})
60618 ++
60619 + #endif /* _LINUX_SLAB_H */
60620 +diff -urNp linux-3.1.1/include/linux/slub_def.h linux-3.1.1/include/linux/slub_def.h
60621 +--- linux-3.1.1/include/linux/slub_def.h 2011-11-11 15:19:27.000000000 -0500
60622 ++++ linux-3.1.1/include/linux/slub_def.h 2011-11-16 18:39:08.000000000 -0500
60623 +@@ -85,7 +85,7 @@ struct kmem_cache {
60624 + struct kmem_cache_order_objects max;
60625 + struct kmem_cache_order_objects min;
60626 + gfp_t allocflags; /* gfp flags to use on each alloc */
60627 +- int refcount; /* Refcount for slab cache destroy */
60628 ++ atomic_t refcount; /* Refcount for slab cache destroy */
60629 + void (*ctor)(void *);
60630 + int inuse; /* Offset to metadata */
60631 + int align; /* Alignment */
60632 +@@ -211,7 +211,7 @@ static __always_inline struct kmem_cache
60633 + }
60634 +
60635 + void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
60636 +-void *__kmalloc(size_t size, gfp_t flags);
60637 ++void *__kmalloc(size_t size, gfp_t flags) __alloc_size(1);
60638 +
60639 + static __always_inline void *
60640 + kmalloc_order(size_t size, gfp_t flags, unsigned int order)
60641 +diff -urNp linux-3.1.1/include/linux/sonet.h linux-3.1.1/include/linux/sonet.h
60642 +--- linux-3.1.1/include/linux/sonet.h 2011-11-11 15:19:27.000000000 -0500
60643 ++++ linux-3.1.1/include/linux/sonet.h 2011-11-16 18:39:08.000000000 -0500
60644 +@@ -61,7 +61,7 @@ struct sonet_stats {
60645 + #include <linux/atomic.h>
60646 +
60647 + struct k_sonet_stats {
60648 +-#define __HANDLE_ITEM(i) atomic_t i
60649 ++#define __HANDLE_ITEM(i) atomic_unchecked_t i
60650 + __SONET_ITEMS
60651 + #undef __HANDLE_ITEM
60652 + };
60653 +diff -urNp linux-3.1.1/include/linux/sunrpc/clnt.h linux-3.1.1/include/linux/sunrpc/clnt.h
60654 +--- linux-3.1.1/include/linux/sunrpc/clnt.h 2011-11-11 15:19:27.000000000 -0500
60655 ++++ linux-3.1.1/include/linux/sunrpc/clnt.h 2011-11-16 18:39:08.000000000 -0500
60656 +@@ -169,9 +169,9 @@ static inline unsigned short rpc_get_por
60657 + {
60658 + switch (sap->sa_family) {
60659 + case AF_INET:
60660 +- return ntohs(((struct sockaddr_in *)sap)->sin_port);
60661 ++ return ntohs(((const struct sockaddr_in *)sap)->sin_port);
60662 + case AF_INET6:
60663 +- return ntohs(((struct sockaddr_in6 *)sap)->sin6_port);
60664 ++ return ntohs(((const struct sockaddr_in6 *)sap)->sin6_port);
60665 + }
60666 + return 0;
60667 + }
60668 +@@ -204,7 +204,7 @@ static inline bool __rpc_cmp_addr4(const
60669 + static inline bool __rpc_copy_addr4(struct sockaddr *dst,
60670 + const struct sockaddr *src)
60671 + {
60672 +- const struct sockaddr_in *ssin = (struct sockaddr_in *) src;
60673 ++ const struct sockaddr_in *ssin = (const struct sockaddr_in *) src;
60674 + struct sockaddr_in *dsin = (struct sockaddr_in *) dst;
60675 +
60676 + dsin->sin_family = ssin->sin_family;
60677 +@@ -301,7 +301,7 @@ static inline u32 rpc_get_scope_id(const
60678 + if (sa->sa_family != AF_INET6)
60679 + return 0;
60680 +
60681 +- return ((struct sockaddr_in6 *) sa)->sin6_scope_id;
60682 ++ return ((const struct sockaddr_in6 *) sa)->sin6_scope_id;
60683 + }
60684 +
60685 + #endif /* __KERNEL__ */
60686 +diff -urNp linux-3.1.1/include/linux/sunrpc/sched.h linux-3.1.1/include/linux/sunrpc/sched.h
60687 +--- linux-3.1.1/include/linux/sunrpc/sched.h 2011-11-11 15:19:27.000000000 -0500
60688 ++++ linux-3.1.1/include/linux/sunrpc/sched.h 2011-11-16 18:39:08.000000000 -0500
60689 +@@ -105,6 +105,7 @@ struct rpc_call_ops {
60690 + void (*rpc_call_done)(struct rpc_task *, void *);
60691 + void (*rpc_release)(void *);
60692 + };
60693 ++typedef struct rpc_call_ops __no_const rpc_call_ops_no_const;
60694 +
60695 + struct rpc_task_setup {
60696 + struct rpc_task *task;
60697 +diff -urNp linux-3.1.1/include/linux/sunrpc/svc_rdma.h linux-3.1.1/include/linux/sunrpc/svc_rdma.h
60698 +--- linux-3.1.1/include/linux/sunrpc/svc_rdma.h 2011-11-11 15:19:27.000000000 -0500
60699 ++++ linux-3.1.1/include/linux/sunrpc/svc_rdma.h 2011-11-16 18:39:08.000000000 -0500
60700 +@@ -53,15 +53,15 @@ extern unsigned int svcrdma_ord;
60701 + extern unsigned int svcrdma_max_requests;
60702 + extern unsigned int svcrdma_max_req_size;
60703 +
60704 +-extern atomic_t rdma_stat_recv;
60705 +-extern atomic_t rdma_stat_read;
60706 +-extern atomic_t rdma_stat_write;
60707 +-extern atomic_t rdma_stat_sq_starve;
60708 +-extern atomic_t rdma_stat_rq_starve;
60709 +-extern atomic_t rdma_stat_rq_poll;
60710 +-extern atomic_t rdma_stat_rq_prod;
60711 +-extern atomic_t rdma_stat_sq_poll;
60712 +-extern atomic_t rdma_stat_sq_prod;
60713 ++extern atomic_unchecked_t rdma_stat_recv;
60714 ++extern atomic_unchecked_t rdma_stat_read;
60715 ++extern atomic_unchecked_t rdma_stat_write;
60716 ++extern atomic_unchecked_t rdma_stat_sq_starve;
60717 ++extern atomic_unchecked_t rdma_stat_rq_starve;
60718 ++extern atomic_unchecked_t rdma_stat_rq_poll;
60719 ++extern atomic_unchecked_t rdma_stat_rq_prod;
60720 ++extern atomic_unchecked_t rdma_stat_sq_poll;
60721 ++extern atomic_unchecked_t rdma_stat_sq_prod;
60722 +
60723 + #define RPCRDMA_VERSION 1
60724 +
60725 +diff -urNp linux-3.1.1/include/linux/sysctl.h linux-3.1.1/include/linux/sysctl.h
60726 +--- linux-3.1.1/include/linux/sysctl.h 2011-11-11 15:19:27.000000000 -0500
60727 ++++ linux-3.1.1/include/linux/sysctl.h 2011-11-16 18:40:31.000000000 -0500
60728 +@@ -155,7 +155,11 @@ enum
60729 + KERN_PANIC_ON_NMI=76, /* int: whether we will panic on an unrecovered */
60730 + };
60731 +
60732 +-
60733 ++#ifdef CONFIG_PAX_SOFTMODE
60734 ++enum {
60735 ++ PAX_SOFTMODE=1 /* PaX: disable/enable soft mode */
60736 ++};
60737 ++#endif
60738 +
60739 + /* CTL_VM names: */
60740 + enum
60741 +@@ -967,6 +971,8 @@ typedef int proc_handler (struct ctl_tab
60742 +
60743 + extern int proc_dostring(struct ctl_table *, int,
60744 + void __user *, size_t *, loff_t *);
60745 ++extern int proc_dostring_modpriv(struct ctl_table *, int,
60746 ++ void __user *, size_t *, loff_t *);
60747 + extern int proc_dointvec(struct ctl_table *, int,
60748 + void __user *, size_t *, loff_t *);
60749 + extern int proc_dointvec_minmax(struct ctl_table *, int,
60750 +diff -urNp linux-3.1.1/include/linux/tty_ldisc.h linux-3.1.1/include/linux/tty_ldisc.h
60751 +--- linux-3.1.1/include/linux/tty_ldisc.h 2011-11-11 15:19:27.000000000 -0500
60752 ++++ linux-3.1.1/include/linux/tty_ldisc.h 2011-11-16 18:39:08.000000000 -0500
60753 +@@ -148,7 +148,7 @@ struct tty_ldisc_ops {
60754 +
60755 + struct module *owner;
60756 +
60757 +- int refcount;
60758 ++ atomic_t refcount;
60759 + };
60760 +
60761 + struct tty_ldisc {
60762 +diff -urNp linux-3.1.1/include/linux/types.h linux-3.1.1/include/linux/types.h
60763 +--- linux-3.1.1/include/linux/types.h 2011-11-11 15:19:27.000000000 -0500
60764 ++++ linux-3.1.1/include/linux/types.h 2011-11-16 18:39:08.000000000 -0500
60765 +@@ -213,10 +213,26 @@ typedef struct {
60766 + int counter;
60767 + } atomic_t;
60768 +
60769 ++#ifdef CONFIG_PAX_REFCOUNT
60770 ++typedef struct {
60771 ++ int counter;
60772 ++} atomic_unchecked_t;
60773 ++#else
60774 ++typedef atomic_t atomic_unchecked_t;
60775 ++#endif
60776 ++
60777 + #ifdef CONFIG_64BIT
60778 + typedef struct {
60779 + long counter;
60780 + } atomic64_t;
60781 ++
60782 ++#ifdef CONFIG_PAX_REFCOUNT
60783 ++typedef struct {
60784 ++ long counter;
60785 ++} atomic64_unchecked_t;
60786 ++#else
60787 ++typedef atomic64_t atomic64_unchecked_t;
60788 ++#endif
60789 + #endif
60790 +
60791 + struct list_head {
60792 +diff -urNp linux-3.1.1/include/linux/uaccess.h linux-3.1.1/include/linux/uaccess.h
60793 +--- linux-3.1.1/include/linux/uaccess.h 2011-11-11 15:19:27.000000000 -0500
60794 ++++ linux-3.1.1/include/linux/uaccess.h 2011-11-16 18:39:08.000000000 -0500
60795 +@@ -76,11 +76,11 @@ static inline unsigned long __copy_from_
60796 + long ret; \
60797 + mm_segment_t old_fs = get_fs(); \
60798 + \
60799 +- set_fs(KERNEL_DS); \
60800 + pagefault_disable(); \
60801 +- ret = __copy_from_user_inatomic(&(retval), (__force typeof(retval) __user *)(addr), sizeof(retval)); \
60802 +- pagefault_enable(); \
60803 ++ set_fs(KERNEL_DS); \
60804 ++ ret = __copy_from_user_inatomic(&(retval), (typeof(retval) __force_user *)(addr), sizeof(retval)); \
60805 + set_fs(old_fs); \
60806 ++ pagefault_enable(); \
60807 + ret; \
60808 + })
60809 +
60810 +diff -urNp linux-3.1.1/include/linux/unaligned/access_ok.h linux-3.1.1/include/linux/unaligned/access_ok.h
60811 +--- linux-3.1.1/include/linux/unaligned/access_ok.h 2011-11-11 15:19:27.000000000 -0500
60812 ++++ linux-3.1.1/include/linux/unaligned/access_ok.h 2011-11-16 18:39:08.000000000 -0500
60813 +@@ -6,32 +6,32 @@
60814 +
60815 + static inline u16 get_unaligned_le16(const void *p)
60816 + {
60817 +- return le16_to_cpup((__le16 *)p);
60818 ++ return le16_to_cpup((const __le16 *)p);
60819 + }
60820 +
60821 + static inline u32 get_unaligned_le32(const void *p)
60822 + {
60823 +- return le32_to_cpup((__le32 *)p);
60824 ++ return le32_to_cpup((const __le32 *)p);
60825 + }
60826 +
60827 + static inline u64 get_unaligned_le64(const void *p)
60828 + {
60829 +- return le64_to_cpup((__le64 *)p);
60830 ++ return le64_to_cpup((const __le64 *)p);
60831 + }
60832 +
60833 + static inline u16 get_unaligned_be16(const void *p)
60834 + {
60835 +- return be16_to_cpup((__be16 *)p);
60836 ++ return be16_to_cpup((const __be16 *)p);
60837 + }
60838 +
60839 + static inline u32 get_unaligned_be32(const void *p)
60840 + {
60841 +- return be32_to_cpup((__be32 *)p);
60842 ++ return be32_to_cpup((const __be32 *)p);
60843 + }
60844 +
60845 + static inline u64 get_unaligned_be64(const void *p)
60846 + {
60847 +- return be64_to_cpup((__be64 *)p);
60848 ++ return be64_to_cpup((const __be64 *)p);
60849 + }
60850 +
60851 + static inline void put_unaligned_le16(u16 val, void *p)
60852 +diff -urNp linux-3.1.1/include/linux/vermagic.h linux-3.1.1/include/linux/vermagic.h
60853 +--- linux-3.1.1/include/linux/vermagic.h 2011-11-11 15:19:27.000000000 -0500
60854 ++++ linux-3.1.1/include/linux/vermagic.h 2011-11-16 18:54:54.000000000 -0500
60855 +@@ -26,9 +26,35 @@
60856 + #define MODULE_ARCH_VERMAGIC ""
60857 + #endif
60858 +
60859 ++#ifdef CONFIG_PAX_REFCOUNT
60860 ++#define MODULE_PAX_REFCOUNT "REFCOUNT "
60861 ++#else
60862 ++#define MODULE_PAX_REFCOUNT ""
60863 ++#endif
60864 ++
60865 ++#ifdef CONSTIFY_PLUGIN
60866 ++#define MODULE_CONSTIFY_PLUGIN "CONSTIFY_PLUGIN "
60867 ++#else
60868 ++#define MODULE_CONSTIFY_PLUGIN ""
60869 ++#endif
60870 ++
60871 ++#ifdef STACKLEAK_PLUGIN
60872 ++#define MODULE_STACKLEAK_PLUGIN "STACKLEAK_PLUGIN "
60873 ++#else
60874 ++#define MODULE_STACKLEAK_PLUGIN ""
60875 ++#endif
60876 ++
60877 ++#ifdef CONFIG_GRKERNSEC
60878 ++#define MODULE_GRSEC "GRSEC "
60879 ++#else
60880 ++#define MODULE_GRSEC ""
60881 ++#endif
60882 ++
60883 + #define VERMAGIC_STRING \
60884 + UTS_RELEASE " " \
60885 + MODULE_VERMAGIC_SMP MODULE_VERMAGIC_PREEMPT \
60886 + MODULE_VERMAGIC_MODULE_UNLOAD MODULE_VERMAGIC_MODVERSIONS \
60887 +- MODULE_ARCH_VERMAGIC
60888 ++ MODULE_ARCH_VERMAGIC \
60889 ++ MODULE_PAX_REFCOUNT MODULE_CONSTIFY_PLUGIN MODULE_STACKLEAK_PLUGIN \
60890 ++ MODULE_GRSEC
60891 +
60892 +diff -urNp linux-3.1.1/include/linux/vmalloc.h linux-3.1.1/include/linux/vmalloc.h
60893 +--- linux-3.1.1/include/linux/vmalloc.h 2011-11-11 15:19:27.000000000 -0500
60894 ++++ linux-3.1.1/include/linux/vmalloc.h 2011-11-16 18:39:08.000000000 -0500
60895 +@@ -14,6 +14,11 @@ struct vm_area_struct; /* vma defining
60896 + #define VM_USERMAP 0x00000008 /* suitable for remap_vmalloc_range */
60897 + #define VM_VPAGES 0x00000010 /* buffer for pages was vmalloc'ed */
60898 + #define VM_UNLIST 0x00000020 /* vm_struct is not listed in vmlist */
60899 ++
60900 ++#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
60901 ++#define VM_KERNEXEC 0x00000040 /* allocate from executable kernel memory range */
60902 ++#endif
60903 ++
60904 + /* bits [20..32] reserved for arch specific ioremap internals */
60905 +
60906 + /*
60907 +@@ -156,4 +161,103 @@ pcpu_free_vm_areas(struct vm_struct **vm
60908 + # endif
60909 + #endif
60910 +
60911 ++#define vmalloc(x) \
60912 ++({ \
60913 ++ void *___retval; \
60914 ++ intoverflow_t ___x = (intoverflow_t)x; \
60915 ++ if (WARN(___x > ULONG_MAX, "vmalloc size overflow\n")) \
60916 ++ ___retval = NULL; \
60917 ++ else \
60918 ++ ___retval = vmalloc((unsigned long)___x); \
60919 ++ ___retval; \
60920 ++})
60921 ++
60922 ++#define vzalloc(x) \
60923 ++({ \
60924 ++ void *___retval; \
60925 ++ intoverflow_t ___x = (intoverflow_t)x; \
60926 ++ if (WARN(___x > ULONG_MAX, "vzalloc size overflow\n")) \
60927 ++ ___retval = NULL; \
60928 ++ else \
60929 ++ ___retval = vzalloc((unsigned long)___x); \
60930 ++ ___retval; \
60931 ++})
60932 ++
60933 ++#define __vmalloc(x, y, z) \
60934 ++({ \
60935 ++ void *___retval; \
60936 ++ intoverflow_t ___x = (intoverflow_t)x; \
60937 ++ if (WARN(___x > ULONG_MAX, "__vmalloc size overflow\n"))\
60938 ++ ___retval = NULL; \
60939 ++ else \
60940 ++ ___retval = __vmalloc((unsigned long)___x, (y), (z));\
60941 ++ ___retval; \
60942 ++})
60943 ++
60944 ++#define vmalloc_user(x) \
60945 ++({ \
60946 ++ void *___retval; \
60947 ++ intoverflow_t ___x = (intoverflow_t)x; \
60948 ++ if (WARN(___x > ULONG_MAX, "vmalloc_user size overflow\n"))\
60949 ++ ___retval = NULL; \
60950 ++ else \
60951 ++ ___retval = vmalloc_user((unsigned long)___x); \
60952 ++ ___retval; \
60953 ++})
60954 ++
60955 ++#define vmalloc_exec(x) \
60956 ++({ \
60957 ++ void *___retval; \
60958 ++ intoverflow_t ___x = (intoverflow_t)x; \
60959 ++ if (WARN(___x > ULONG_MAX, "vmalloc_exec size overflow\n"))\
60960 ++ ___retval = NULL; \
60961 ++ else \
60962 ++ ___retval = vmalloc_exec((unsigned long)___x); \
60963 ++ ___retval; \
60964 ++})
60965 ++
60966 ++#define vmalloc_node(x, y) \
60967 ++({ \
60968 ++ void *___retval; \
60969 ++ intoverflow_t ___x = (intoverflow_t)x; \
60970 ++ if (WARN(___x > ULONG_MAX, "vmalloc_node size overflow\n"))\
60971 ++ ___retval = NULL; \
60972 ++ else \
60973 ++ ___retval = vmalloc_node((unsigned long)___x, (y));\
60974 ++ ___retval; \
60975 ++})
60976 ++
60977 ++#define vzalloc_node(x, y) \
60978 ++({ \
60979 ++ void *___retval; \
60980 ++ intoverflow_t ___x = (intoverflow_t)x; \
60981 ++ if (WARN(___x > ULONG_MAX, "vzalloc_node size overflow\n"))\
60982 ++ ___retval = NULL; \
60983 ++ else \
60984 ++ ___retval = vzalloc_node((unsigned long)___x, (y));\
60985 ++ ___retval; \
60986 ++})
60987 ++
60988 ++#define vmalloc_32(x) \
60989 ++({ \
60990 ++ void *___retval; \
60991 ++ intoverflow_t ___x = (intoverflow_t)x; \
60992 ++ if (WARN(___x > ULONG_MAX, "vmalloc_32 size overflow\n"))\
60993 ++ ___retval = NULL; \
60994 ++ else \
60995 ++ ___retval = vmalloc_32((unsigned long)___x); \
60996 ++ ___retval; \
60997 ++})
60998 ++
60999 ++#define vmalloc_32_user(x) \
61000 ++({ \
61001 ++void *___retval; \
61002 ++ intoverflow_t ___x = (intoverflow_t)x; \
61003 ++ if (WARN(___x > ULONG_MAX, "vmalloc_32_user size overflow\n"))\
61004 ++ ___retval = NULL; \
61005 ++ else \
61006 ++ ___retval = vmalloc_32_user((unsigned long)___x);\
61007 ++ ___retval; \
61008 ++})
61009 ++
61010 + #endif /* _LINUX_VMALLOC_H */
61011 +diff -urNp linux-3.1.1/include/linux/vmstat.h linux-3.1.1/include/linux/vmstat.h
61012 +--- linux-3.1.1/include/linux/vmstat.h 2011-11-11 15:19:27.000000000 -0500
61013 ++++ linux-3.1.1/include/linux/vmstat.h 2011-11-16 18:39:08.000000000 -0500
61014 +@@ -87,18 +87,18 @@ static inline void vm_events_fold_cpu(in
61015 + /*
61016 + * Zone based page accounting with per cpu differentials.
61017 + */
61018 +-extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
61019 ++extern atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
61020 +
61021 + static inline void zone_page_state_add(long x, struct zone *zone,
61022 + enum zone_stat_item item)
61023 + {
61024 +- atomic_long_add(x, &zone->vm_stat[item]);
61025 +- atomic_long_add(x, &vm_stat[item]);
61026 ++ atomic_long_add_unchecked(x, &zone->vm_stat[item]);
61027 ++ atomic_long_add_unchecked(x, &vm_stat[item]);
61028 + }
61029 +
61030 + static inline unsigned long global_page_state(enum zone_stat_item item)
61031 + {
61032 +- long x = atomic_long_read(&vm_stat[item]);
61033 ++ long x = atomic_long_read_unchecked(&vm_stat[item]);
61034 + #ifdef CONFIG_SMP
61035 + if (x < 0)
61036 + x = 0;
61037 +@@ -109,7 +109,7 @@ static inline unsigned long global_page_
61038 + static inline unsigned long zone_page_state(struct zone *zone,
61039 + enum zone_stat_item item)
61040 + {
61041 +- long x = atomic_long_read(&zone->vm_stat[item]);
61042 ++ long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
61043 + #ifdef CONFIG_SMP
61044 + if (x < 0)
61045 + x = 0;
61046 +@@ -126,7 +126,7 @@ static inline unsigned long zone_page_st
61047 + static inline unsigned long zone_page_state_snapshot(struct zone *zone,
61048 + enum zone_stat_item item)
61049 + {
61050 +- long x = atomic_long_read(&zone->vm_stat[item]);
61051 ++ long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
61052 +
61053 + #ifdef CONFIG_SMP
61054 + int cpu;
61055 +@@ -221,8 +221,8 @@ static inline void __mod_zone_page_state
61056 +
61057 + static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
61058 + {
61059 +- atomic_long_inc(&zone->vm_stat[item]);
61060 +- atomic_long_inc(&vm_stat[item]);
61061 ++ atomic_long_inc_unchecked(&zone->vm_stat[item]);
61062 ++ atomic_long_inc_unchecked(&vm_stat[item]);
61063 + }
61064 +
61065 + static inline void __inc_zone_page_state(struct page *page,
61066 +@@ -233,8 +233,8 @@ static inline void __inc_zone_page_state
61067 +
61068 + static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
61069 + {
61070 +- atomic_long_dec(&zone->vm_stat[item]);
61071 +- atomic_long_dec(&vm_stat[item]);
61072 ++ atomic_long_dec_unchecked(&zone->vm_stat[item]);
61073 ++ atomic_long_dec_unchecked(&vm_stat[item]);
61074 + }
61075 +
61076 + static inline void __dec_zone_page_state(struct page *page,
61077 +diff -urNp linux-3.1.1/include/media/saa7146_vv.h linux-3.1.1/include/media/saa7146_vv.h
61078 +--- linux-3.1.1/include/media/saa7146_vv.h 2011-11-11 15:19:27.000000000 -0500
61079 ++++ linux-3.1.1/include/media/saa7146_vv.h 2011-11-16 18:39:08.000000000 -0500
61080 +@@ -163,7 +163,7 @@ struct saa7146_ext_vv
61081 + int (*std_callback)(struct saa7146_dev*, struct saa7146_standard *);
61082 +
61083 + /* the extension can override this */
61084 +- struct v4l2_ioctl_ops ops;
61085 ++ v4l2_ioctl_ops_no_const ops;
61086 + /* pointer to the saa7146 core ops */
61087 + const struct v4l2_ioctl_ops *core_ops;
61088 +
61089 +diff -urNp linux-3.1.1/include/media/v4l2-dev.h linux-3.1.1/include/media/v4l2-dev.h
61090 +--- linux-3.1.1/include/media/v4l2-dev.h 2011-11-11 15:19:27.000000000 -0500
61091 ++++ linux-3.1.1/include/media/v4l2-dev.h 2011-11-16 18:39:08.000000000 -0500
61092 +@@ -56,7 +56,7 @@ int v4l2_prio_check(struct v4l2_prio_sta
61093 +
61094 +
61095 + struct v4l2_file_operations {
61096 +- struct module *owner;
61097 ++ struct module * const owner;
61098 + ssize_t (*read) (struct file *, char __user *, size_t, loff_t *);
61099 + ssize_t (*write) (struct file *, const char __user *, size_t, loff_t *);
61100 + unsigned int (*poll) (struct file *, struct poll_table_struct *);
61101 +@@ -68,6 +68,7 @@ struct v4l2_file_operations {
61102 + int (*open) (struct file *);
61103 + int (*release) (struct file *);
61104 + };
61105 ++typedef struct v4l2_file_operations __no_const v4l2_file_operations_no_const;
61106 +
61107 + /*
61108 + * Newer version of video_device, handled by videodev2.c
61109 +diff -urNp linux-3.1.1/include/media/v4l2-ioctl.h linux-3.1.1/include/media/v4l2-ioctl.h
61110 +--- linux-3.1.1/include/media/v4l2-ioctl.h 2011-11-11 15:19:27.000000000 -0500
61111 ++++ linux-3.1.1/include/media/v4l2-ioctl.h 2011-11-17 18:44:20.000000000 -0500
61112 +@@ -272,7 +272,7 @@ struct v4l2_ioctl_ops {
61113 + long (*vidioc_default) (struct file *file, void *fh,
61114 + bool valid_prio, int cmd, void *arg);
61115 + };
61116 +-
61117 ++typedef struct v4l2_ioctl_ops __no_const v4l2_ioctl_ops_no_const;
61118 +
61119 + /* v4l debugging and diagnostics */
61120 +
61121 +diff -urNp linux-3.1.1/include/net/caif/caif_hsi.h linux-3.1.1/include/net/caif/caif_hsi.h
61122 +--- linux-3.1.1/include/net/caif/caif_hsi.h 2011-11-11 15:19:27.000000000 -0500
61123 ++++ linux-3.1.1/include/net/caif/caif_hsi.h 2011-11-16 18:39:08.000000000 -0500
61124 +@@ -94,7 +94,7 @@ struct cfhsi_drv {
61125 + void (*rx_done_cb) (struct cfhsi_drv *drv);
61126 + void (*wake_up_cb) (struct cfhsi_drv *drv);
61127 + void (*wake_down_cb) (struct cfhsi_drv *drv);
61128 +-};
61129 ++} __no_const;
61130 +
61131 + /* Structure implemented by HSI device. */
61132 + struct cfhsi_dev {
61133 +diff -urNp linux-3.1.1/include/net/caif/cfctrl.h linux-3.1.1/include/net/caif/cfctrl.h
61134 +--- linux-3.1.1/include/net/caif/cfctrl.h 2011-11-11 15:19:27.000000000 -0500
61135 ++++ linux-3.1.1/include/net/caif/cfctrl.h 2011-11-16 18:39:08.000000000 -0500
61136 +@@ -52,7 +52,7 @@ struct cfctrl_rsp {
61137 + void (*radioset_rsp)(void);
61138 + void (*reject_rsp)(struct cflayer *layer, u8 linkid,
61139 + struct cflayer *client_layer);
61140 +-};
61141 ++} __no_const;
61142 +
61143 + /* Link Setup Parameters for CAIF-Links. */
61144 + struct cfctrl_link_param {
61145 +@@ -101,8 +101,8 @@ struct cfctrl_request_info {
61146 + struct cfctrl {
61147 + struct cfsrvl serv;
61148 + struct cfctrl_rsp res;
61149 +- atomic_t req_seq_no;
61150 +- atomic_t rsp_seq_no;
61151 ++ atomic_unchecked_t req_seq_no;
61152 ++ atomic_unchecked_t rsp_seq_no;
61153 + struct list_head list;
61154 + /* Protects from simultaneous access to first_req list */
61155 + spinlock_t info_list_lock;
61156 +diff -urNp linux-3.1.1/include/net/flow.h linux-3.1.1/include/net/flow.h
61157 +--- linux-3.1.1/include/net/flow.h 2011-11-11 15:19:27.000000000 -0500
61158 ++++ linux-3.1.1/include/net/flow.h 2011-11-16 18:39:08.000000000 -0500
61159 +@@ -207,6 +207,6 @@ extern struct flow_cache_object *flow_ca
61160 + u8 dir, flow_resolve_t resolver, void *ctx);
61161 +
61162 + extern void flow_cache_flush(void);
61163 +-extern atomic_t flow_cache_genid;
61164 ++extern atomic_unchecked_t flow_cache_genid;
61165 +
61166 + #endif
61167 +diff -urNp linux-3.1.1/include/net/inetpeer.h linux-3.1.1/include/net/inetpeer.h
61168 +--- linux-3.1.1/include/net/inetpeer.h 2011-11-11 15:19:27.000000000 -0500
61169 ++++ linux-3.1.1/include/net/inetpeer.h 2011-11-16 18:39:08.000000000 -0500
61170 +@@ -47,8 +47,8 @@ struct inet_peer {
61171 + */
61172 + union {
61173 + struct {
61174 +- atomic_t rid; /* Frag reception counter */
61175 +- atomic_t ip_id_count; /* IP ID for the next packet */
61176 ++ atomic_unchecked_t rid; /* Frag reception counter */
61177 ++ atomic_unchecked_t ip_id_count; /* IP ID for the next packet */
61178 + __u32 tcp_ts;
61179 + __u32 tcp_ts_stamp;
61180 + };
61181 +@@ -112,11 +112,11 @@ static inline int inet_getid(struct inet
61182 + more++;
61183 + inet_peer_refcheck(p);
61184 + do {
61185 +- old = atomic_read(&p->ip_id_count);
61186 ++ old = atomic_read_unchecked(&p->ip_id_count);
61187 + new = old + more;
61188 + if (!new)
61189 + new = 1;
61190 +- } while (atomic_cmpxchg(&p->ip_id_count, old, new) != old);
61191 ++ } while (atomic_cmpxchg_unchecked(&p->ip_id_count, old, new) != old);
61192 + return new;
61193 + }
61194 +
61195 +diff -urNp linux-3.1.1/include/net/ip_fib.h linux-3.1.1/include/net/ip_fib.h
61196 +--- linux-3.1.1/include/net/ip_fib.h 2011-11-11 15:19:27.000000000 -0500
61197 ++++ linux-3.1.1/include/net/ip_fib.h 2011-11-16 18:39:08.000000000 -0500
61198 +@@ -146,7 +146,7 @@ extern __be32 fib_info_update_nh_saddr(s
61199 +
61200 + #define FIB_RES_SADDR(net, res) \
61201 + ((FIB_RES_NH(res).nh_saddr_genid == \
61202 +- atomic_read(&(net)->ipv4.dev_addr_genid)) ? \
61203 ++ atomic_read_unchecked(&(net)->ipv4.dev_addr_genid)) ? \
61204 + FIB_RES_NH(res).nh_saddr : \
61205 + fib_info_update_nh_saddr((net), &FIB_RES_NH(res)))
61206 + #define FIB_RES_GW(res) (FIB_RES_NH(res).nh_gw)
61207 +diff -urNp linux-3.1.1/include/net/ip_vs.h linux-3.1.1/include/net/ip_vs.h
61208 +--- linux-3.1.1/include/net/ip_vs.h 2011-11-11 15:19:27.000000000 -0500
61209 ++++ linux-3.1.1/include/net/ip_vs.h 2011-11-16 18:39:08.000000000 -0500
61210 +@@ -509,7 +509,7 @@ struct ip_vs_conn {
61211 + struct ip_vs_conn *control; /* Master control connection */
61212 + atomic_t n_control; /* Number of controlled ones */
61213 + struct ip_vs_dest *dest; /* real server */
61214 +- atomic_t in_pkts; /* incoming packet counter */
61215 ++ atomic_unchecked_t in_pkts; /* incoming packet counter */
61216 +
61217 + /* packet transmitter for different forwarding methods. If it
61218 + mangles the packet, it must return NF_DROP or better NF_STOLEN,
61219 +@@ -647,7 +647,7 @@ struct ip_vs_dest {
61220 + __be16 port; /* port number of the server */
61221 + union nf_inet_addr addr; /* IP address of the server */
61222 + volatile unsigned flags; /* dest status flags */
61223 +- atomic_t conn_flags; /* flags to copy to conn */
61224 ++ atomic_unchecked_t conn_flags; /* flags to copy to conn */
61225 + atomic_t weight; /* server weight */
61226 +
61227 + atomic_t refcnt; /* reference counter */
61228 +diff -urNp linux-3.1.1/include/net/irda/ircomm_core.h linux-3.1.1/include/net/irda/ircomm_core.h
61229 +--- linux-3.1.1/include/net/irda/ircomm_core.h 2011-11-11 15:19:27.000000000 -0500
61230 ++++ linux-3.1.1/include/net/irda/ircomm_core.h 2011-11-16 18:39:08.000000000 -0500
61231 +@@ -51,7 +51,7 @@ typedef struct {
61232 + int (*connect_response)(struct ircomm_cb *, struct sk_buff *);
61233 + int (*disconnect_request)(struct ircomm_cb *, struct sk_buff *,
61234 + struct ircomm_info *);
61235 +-} call_t;
61236 ++} __no_const call_t;
61237 +
61238 + struct ircomm_cb {
61239 + irda_queue_t queue;
61240 +diff -urNp linux-3.1.1/include/net/irda/ircomm_tty.h linux-3.1.1/include/net/irda/ircomm_tty.h
61241 +--- linux-3.1.1/include/net/irda/ircomm_tty.h 2011-11-11 15:19:27.000000000 -0500
61242 ++++ linux-3.1.1/include/net/irda/ircomm_tty.h 2011-11-16 18:39:08.000000000 -0500
61243 +@@ -35,6 +35,7 @@
61244 + #include <linux/termios.h>
61245 + #include <linux/timer.h>
61246 + #include <linux/tty.h> /* struct tty_struct */
61247 ++#include <asm/local.h>
61248 +
61249 + #include <net/irda/irias_object.h>
61250 + #include <net/irda/ircomm_core.h>
61251 +@@ -105,8 +106,8 @@ struct ircomm_tty_cb {
61252 + unsigned short close_delay;
61253 + unsigned short closing_wait; /* time to wait before closing */
61254 +
61255 +- int open_count;
61256 +- int blocked_open; /* # of blocked opens */
61257 ++ local_t open_count;
61258 ++ local_t blocked_open; /* # of blocked opens */
61259 +
61260 + /* Protect concurent access to :
61261 + * o self->open_count
61262 +diff -urNp linux-3.1.1/include/net/iucv/af_iucv.h linux-3.1.1/include/net/iucv/af_iucv.h
61263 +--- linux-3.1.1/include/net/iucv/af_iucv.h 2011-11-11 15:19:27.000000000 -0500
61264 ++++ linux-3.1.1/include/net/iucv/af_iucv.h 2011-11-16 18:39:08.000000000 -0500
61265 +@@ -87,7 +87,7 @@ struct iucv_sock {
61266 + struct iucv_sock_list {
61267 + struct hlist_head head;
61268 + rwlock_t lock;
61269 +- atomic_t autobind_name;
61270 ++ atomic_unchecked_t autobind_name;
61271 + };
61272 +
61273 + unsigned int iucv_sock_poll(struct file *file, struct socket *sock,
61274 +diff -urNp linux-3.1.1/include/net/lapb.h linux-3.1.1/include/net/lapb.h
61275 +--- linux-3.1.1/include/net/lapb.h 2011-11-11 15:19:27.000000000 -0500
61276 ++++ linux-3.1.1/include/net/lapb.h 2011-11-16 18:39:08.000000000 -0500
61277 +@@ -95,7 +95,7 @@ struct lapb_cb {
61278 + struct sk_buff_head write_queue;
61279 + struct sk_buff_head ack_queue;
61280 + unsigned char window;
61281 +- struct lapb_register_struct callbacks;
61282 ++ struct lapb_register_struct *callbacks;
61283 +
61284 + /* FRMR control information */
61285 + struct lapb_frame frmr_data;
61286 +diff -urNp linux-3.1.1/include/net/neighbour.h linux-3.1.1/include/net/neighbour.h
61287 +--- linux-3.1.1/include/net/neighbour.h 2011-11-11 15:19:27.000000000 -0500
61288 ++++ linux-3.1.1/include/net/neighbour.h 2011-11-16 18:39:08.000000000 -0500
61289 +@@ -122,7 +122,7 @@ struct neigh_ops {
61290 + void (*error_report)(struct neighbour *, struct sk_buff *);
61291 + int (*output)(struct neighbour *, struct sk_buff *);
61292 + int (*connected_output)(struct neighbour *, struct sk_buff *);
61293 +-};
61294 ++} __do_const;
61295 +
61296 + struct pneigh_entry {
61297 + struct pneigh_entry *next;
61298 +diff -urNp linux-3.1.1/include/net/netlink.h linux-3.1.1/include/net/netlink.h
61299 +--- linux-3.1.1/include/net/netlink.h 2011-11-11 15:19:27.000000000 -0500
61300 ++++ linux-3.1.1/include/net/netlink.h 2011-11-16 18:39:08.000000000 -0500
61301 +@@ -562,7 +562,7 @@ static inline void *nlmsg_get_pos(struct
61302 + static inline void nlmsg_trim(struct sk_buff *skb, const void *mark)
61303 + {
61304 + if (mark)
61305 +- skb_trim(skb, (unsigned char *) mark - skb->data);
61306 ++ skb_trim(skb, (const unsigned char *) mark - skb->data);
61307 + }
61308 +
61309 + /**
61310 +diff -urNp linux-3.1.1/include/net/netns/ipv4.h linux-3.1.1/include/net/netns/ipv4.h
61311 +--- linux-3.1.1/include/net/netns/ipv4.h 2011-11-11 15:19:27.000000000 -0500
61312 ++++ linux-3.1.1/include/net/netns/ipv4.h 2011-11-16 18:39:08.000000000 -0500
61313 +@@ -56,8 +56,8 @@ struct netns_ipv4 {
61314 +
61315 + unsigned int sysctl_ping_group_range[2];
61316 +
61317 +- atomic_t rt_genid;
61318 +- atomic_t dev_addr_genid;
61319 ++ atomic_unchecked_t rt_genid;
61320 ++ atomic_unchecked_t dev_addr_genid;
61321 +
61322 + #ifdef CONFIG_IP_MROUTE
61323 + #ifndef CONFIG_IP_MROUTE_MULTIPLE_TABLES
61324 +diff -urNp linux-3.1.1/include/net/sctp/sctp.h linux-3.1.1/include/net/sctp/sctp.h
61325 +--- linux-3.1.1/include/net/sctp/sctp.h 2011-11-11 15:19:27.000000000 -0500
61326 ++++ linux-3.1.1/include/net/sctp/sctp.h 2011-11-16 18:39:08.000000000 -0500
61327 +@@ -318,9 +318,9 @@ do { \
61328 +
61329 + #else /* SCTP_DEBUG */
61330 +
61331 +-#define SCTP_DEBUG_PRINTK(whatever...)
61332 +-#define SCTP_DEBUG_PRINTK_CONT(fmt, args...)
61333 +-#define SCTP_DEBUG_PRINTK_IPADDR(whatever...)
61334 ++#define SCTP_DEBUG_PRINTK(whatever...) do {} while (0)
61335 ++#define SCTP_DEBUG_PRINTK_CONT(fmt, args...) do {} while (0)
61336 ++#define SCTP_DEBUG_PRINTK_IPADDR(whatever...) do {} while (0)
61337 + #define SCTP_ENABLE_DEBUG
61338 + #define SCTP_DISABLE_DEBUG
61339 + #define SCTP_ASSERT(expr, str, func)
61340 +diff -urNp linux-3.1.1/include/net/sock.h linux-3.1.1/include/net/sock.h
61341 +--- linux-3.1.1/include/net/sock.h 2011-11-11 15:19:27.000000000 -0500
61342 ++++ linux-3.1.1/include/net/sock.h 2011-11-16 18:39:08.000000000 -0500
61343 +@@ -278,7 +278,7 @@ struct sock {
61344 + #ifdef CONFIG_RPS
61345 + __u32 sk_rxhash;
61346 + #endif
61347 +- atomic_t sk_drops;
61348 ++ atomic_unchecked_t sk_drops;
61349 + int sk_rcvbuf;
61350 +
61351 + struct sk_filter __rcu *sk_filter;
61352 +@@ -1391,7 +1391,7 @@ static inline void sk_nocaps_add(struct
61353 + }
61354 +
61355 + static inline int skb_do_copy_data_nocache(struct sock *sk, struct sk_buff *skb,
61356 +- char __user *from, char *to,
61357 ++ char __user *from, unsigned char *to,
61358 + int copy, int offset)
61359 + {
61360 + if (skb->ip_summed == CHECKSUM_NONE) {
61361 +diff -urNp linux-3.1.1/include/net/tcp.h linux-3.1.1/include/net/tcp.h
61362 +--- linux-3.1.1/include/net/tcp.h 2011-11-11 15:19:27.000000000 -0500
61363 ++++ linux-3.1.1/include/net/tcp.h 2011-11-16 18:39:08.000000000 -0500
61364 +@@ -1401,8 +1401,8 @@ enum tcp_seq_states {
61365 + struct tcp_seq_afinfo {
61366 + char *name;
61367 + sa_family_t family;
61368 +- struct file_operations seq_fops;
61369 +- struct seq_operations seq_ops;
61370 ++ file_operations_no_const seq_fops;
61371 ++ seq_operations_no_const seq_ops;
61372 + };
61373 +
61374 + struct tcp_iter_state {
61375 +diff -urNp linux-3.1.1/include/net/udp.h linux-3.1.1/include/net/udp.h
61376 +--- linux-3.1.1/include/net/udp.h 2011-11-11 15:19:27.000000000 -0500
61377 ++++ linux-3.1.1/include/net/udp.h 2011-11-16 18:39:08.000000000 -0500
61378 +@@ -234,8 +234,8 @@ struct udp_seq_afinfo {
61379 + char *name;
61380 + sa_family_t family;
61381 + struct udp_table *udp_table;
61382 +- struct file_operations seq_fops;
61383 +- struct seq_operations seq_ops;
61384 ++ file_operations_no_const seq_fops;
61385 ++ seq_operations_no_const seq_ops;
61386 + };
61387 +
61388 + struct udp_iter_state {
61389 +diff -urNp linux-3.1.1/include/net/xfrm.h linux-3.1.1/include/net/xfrm.h
61390 +--- linux-3.1.1/include/net/xfrm.h 2011-11-11 15:19:27.000000000 -0500
61391 ++++ linux-3.1.1/include/net/xfrm.h 2011-11-16 18:39:08.000000000 -0500
61392 +@@ -505,7 +505,7 @@ struct xfrm_policy {
61393 + struct timer_list timer;
61394 +
61395 + struct flow_cache_object flo;
61396 +- atomic_t genid;
61397 ++ atomic_unchecked_t genid;
61398 + u32 priority;
61399 + u32 index;
61400 + struct xfrm_mark mark;
61401 +diff -urNp linux-3.1.1/include/rdma/iw_cm.h linux-3.1.1/include/rdma/iw_cm.h
61402 +--- linux-3.1.1/include/rdma/iw_cm.h 2011-11-11 15:19:27.000000000 -0500
61403 ++++ linux-3.1.1/include/rdma/iw_cm.h 2011-11-16 18:39:08.000000000 -0500
61404 +@@ -120,7 +120,7 @@ struct iw_cm_verbs {
61405 + int backlog);
61406 +
61407 + int (*destroy_listen)(struct iw_cm_id *cm_id);
61408 +-};
61409 ++} __no_const;
61410 +
61411 + /**
61412 + * iw_create_cm_id - Create an IW CM identifier.
61413 +diff -urNp linux-3.1.1/include/scsi/libfc.h linux-3.1.1/include/scsi/libfc.h
61414 +--- linux-3.1.1/include/scsi/libfc.h 2011-11-11 15:19:27.000000000 -0500
61415 ++++ linux-3.1.1/include/scsi/libfc.h 2011-11-16 18:39:08.000000000 -0500
61416 +@@ -758,6 +758,7 @@ struct libfc_function_template {
61417 + */
61418 + void (*disc_stop_final) (struct fc_lport *);
61419 + };
61420 ++typedef struct libfc_function_template __no_const libfc_function_template_no_const;
61421 +
61422 + /**
61423 + * struct fc_disc - Discovery context
61424 +@@ -861,7 +862,7 @@ struct fc_lport {
61425 + struct fc_vport *vport;
61426 +
61427 + /* Operational Information */
61428 +- struct libfc_function_template tt;
61429 ++ libfc_function_template_no_const tt;
61430 + u8 link_up;
61431 + u8 qfull;
61432 + enum fc_lport_state state;
61433 +diff -urNp linux-3.1.1/include/scsi/scsi_device.h linux-3.1.1/include/scsi/scsi_device.h
61434 +--- linux-3.1.1/include/scsi/scsi_device.h 2011-11-11 15:19:27.000000000 -0500
61435 ++++ linux-3.1.1/include/scsi/scsi_device.h 2011-11-16 18:39:08.000000000 -0500
61436 +@@ -161,9 +161,9 @@ struct scsi_device {
61437 + unsigned int max_device_blocked; /* what device_blocked counts down from */
61438 + #define SCSI_DEFAULT_DEVICE_BLOCKED 3
61439 +
61440 +- atomic_t iorequest_cnt;
61441 +- atomic_t iodone_cnt;
61442 +- atomic_t ioerr_cnt;
61443 ++ atomic_unchecked_t iorequest_cnt;
61444 ++ atomic_unchecked_t iodone_cnt;
61445 ++ atomic_unchecked_t ioerr_cnt;
61446 +
61447 + struct device sdev_gendev,
61448 + sdev_dev;
61449 +diff -urNp linux-3.1.1/include/scsi/scsi_transport_fc.h linux-3.1.1/include/scsi/scsi_transport_fc.h
61450 +--- linux-3.1.1/include/scsi/scsi_transport_fc.h 2011-11-11 15:19:27.000000000 -0500
61451 ++++ linux-3.1.1/include/scsi/scsi_transport_fc.h 2011-11-16 18:39:08.000000000 -0500
61452 +@@ -711,7 +711,7 @@ struct fc_function_template {
61453 + unsigned long show_host_system_hostname:1;
61454 +
61455 + unsigned long disable_target_scan:1;
61456 +-};
61457 ++} __do_const;
61458 +
61459 +
61460 + /**
61461 +diff -urNp linux-3.1.1/include/sound/ak4xxx-adda.h linux-3.1.1/include/sound/ak4xxx-adda.h
61462 +--- linux-3.1.1/include/sound/ak4xxx-adda.h 2011-11-11 15:19:27.000000000 -0500
61463 ++++ linux-3.1.1/include/sound/ak4xxx-adda.h 2011-11-16 18:39:08.000000000 -0500
61464 +@@ -35,7 +35,7 @@ struct snd_ak4xxx_ops {
61465 + void (*write)(struct snd_akm4xxx *ak, int chip, unsigned char reg,
61466 + unsigned char val);
61467 + void (*set_rate_val)(struct snd_akm4xxx *ak, unsigned int rate);
61468 +-};
61469 ++} __no_const;
61470 +
61471 + #define AK4XXX_IMAGE_SIZE (AK4XXX_MAX_CHIPS * 16) /* 64 bytes */
61472 +
61473 +diff -urNp linux-3.1.1/include/sound/hwdep.h linux-3.1.1/include/sound/hwdep.h
61474 +--- linux-3.1.1/include/sound/hwdep.h 2011-11-11 15:19:27.000000000 -0500
61475 ++++ linux-3.1.1/include/sound/hwdep.h 2011-11-16 18:39:08.000000000 -0500
61476 +@@ -49,7 +49,7 @@ struct snd_hwdep_ops {
61477 + struct snd_hwdep_dsp_status *status);
61478 + int (*dsp_load)(struct snd_hwdep *hw,
61479 + struct snd_hwdep_dsp_image *image);
61480 +-};
61481 ++} __no_const;
61482 +
61483 + struct snd_hwdep {
61484 + struct snd_card *card;
61485 +diff -urNp linux-3.1.1/include/sound/info.h linux-3.1.1/include/sound/info.h
61486 +--- linux-3.1.1/include/sound/info.h 2011-11-11 15:19:27.000000000 -0500
61487 ++++ linux-3.1.1/include/sound/info.h 2011-11-16 18:39:08.000000000 -0500
61488 +@@ -44,7 +44,7 @@ struct snd_info_entry_text {
61489 + struct snd_info_buffer *buffer);
61490 + void (*write)(struct snd_info_entry *entry,
61491 + struct snd_info_buffer *buffer);
61492 +-};
61493 ++} __no_const;
61494 +
61495 + struct snd_info_entry_ops {
61496 + int (*open)(struct snd_info_entry *entry,
61497 +diff -urNp linux-3.1.1/include/sound/pcm.h linux-3.1.1/include/sound/pcm.h
61498 +--- linux-3.1.1/include/sound/pcm.h 2011-11-11 15:19:27.000000000 -0500
61499 ++++ linux-3.1.1/include/sound/pcm.h 2011-11-16 18:39:08.000000000 -0500
61500 +@@ -81,6 +81,7 @@ struct snd_pcm_ops {
61501 + int (*mmap)(struct snd_pcm_substream *substream, struct vm_area_struct *vma);
61502 + int (*ack)(struct snd_pcm_substream *substream);
61503 + };
61504 ++typedef struct snd_pcm_ops __no_const snd_pcm_ops_no_const;
61505 +
61506 + /*
61507 + *
61508 +diff -urNp linux-3.1.1/include/sound/sb16_csp.h linux-3.1.1/include/sound/sb16_csp.h
61509 +--- linux-3.1.1/include/sound/sb16_csp.h 2011-11-11 15:19:27.000000000 -0500
61510 ++++ linux-3.1.1/include/sound/sb16_csp.h 2011-11-16 18:39:08.000000000 -0500
61511 +@@ -146,7 +146,7 @@ struct snd_sb_csp_ops {
61512 + int (*csp_start) (struct snd_sb_csp * p, int sample_width, int channels);
61513 + int (*csp_stop) (struct snd_sb_csp * p);
61514 + int (*csp_qsound_transfer) (struct snd_sb_csp * p);
61515 +-};
61516 ++} __no_const;
61517 +
61518 + /*
61519 + * CSP private data
61520 +diff -urNp linux-3.1.1/include/sound/soc.h linux-3.1.1/include/sound/soc.h
61521 +--- linux-3.1.1/include/sound/soc.h 2011-11-11 15:19:27.000000000 -0500
61522 ++++ linux-3.1.1/include/sound/soc.h 2011-11-16 18:39:08.000000000 -0500
61523 +@@ -676,7 +676,7 @@ struct snd_soc_platform_driver {
61524 + /* platform IO - used for platform DAPM */
61525 + unsigned int (*read)(struct snd_soc_platform *, unsigned int);
61526 + int (*write)(struct snd_soc_platform *, unsigned int, unsigned int);
61527 +-};
61528 ++} __do_const;
61529 +
61530 + struct snd_soc_platform {
61531 + const char *name;
61532 +diff -urNp linux-3.1.1/include/sound/ymfpci.h linux-3.1.1/include/sound/ymfpci.h
61533 +--- linux-3.1.1/include/sound/ymfpci.h 2011-11-11 15:19:27.000000000 -0500
61534 ++++ linux-3.1.1/include/sound/ymfpci.h 2011-11-16 18:39:08.000000000 -0500
61535 +@@ -358,7 +358,7 @@ struct snd_ymfpci {
61536 + spinlock_t reg_lock;
61537 + spinlock_t voice_lock;
61538 + wait_queue_head_t interrupt_sleep;
61539 +- atomic_t interrupt_sleep_count;
61540 ++ atomic_unchecked_t interrupt_sleep_count;
61541 + struct snd_info_entry *proc_entry;
61542 + const struct firmware *dsp_microcode;
61543 + const struct firmware *controller_microcode;
61544 +diff -urNp linux-3.1.1/include/target/target_core_base.h linux-3.1.1/include/target/target_core_base.h
61545 +--- linux-3.1.1/include/target/target_core_base.h 2011-11-11 15:19:27.000000000 -0500
61546 ++++ linux-3.1.1/include/target/target_core_base.h 2011-11-16 18:39:08.000000000 -0500
61547 +@@ -356,7 +356,7 @@ struct t10_reservation_ops {
61548 + int (*t10_seq_non_holder)(struct se_cmd *, unsigned char *, u32);
61549 + int (*t10_pr_register)(struct se_cmd *);
61550 + int (*t10_pr_clear)(struct se_cmd *);
61551 +-};
61552 ++} __no_const;
61553 +
61554 + struct t10_reservation {
61555 + /* Reservation effects all target ports */
61556 +@@ -496,8 +496,8 @@ struct se_cmd {
61557 + atomic_t t_task_cdbs_left;
61558 + atomic_t t_task_cdbs_ex_left;
61559 + atomic_t t_task_cdbs_timeout_left;
61560 +- atomic_t t_task_cdbs_sent;
61561 +- atomic_t t_transport_aborted;
61562 ++ atomic_unchecked_t t_task_cdbs_sent;
61563 ++ atomic_unchecked_t t_transport_aborted;
61564 + atomic_t t_transport_active;
61565 + atomic_t t_transport_complete;
61566 + atomic_t t_transport_queue_active;
61567 +@@ -744,7 +744,7 @@ struct se_device {
61568 + atomic_t active_cmds;
61569 + atomic_t simple_cmds;
61570 + atomic_t depth_left;
61571 +- atomic_t dev_ordered_id;
61572 ++ atomic_unchecked_t dev_ordered_id;
61573 + atomic_t dev_tur_active;
61574 + atomic_t execute_tasks;
61575 + atomic_t dev_status_thr_count;
61576 +diff -urNp linux-3.1.1/include/trace/events/irq.h linux-3.1.1/include/trace/events/irq.h
61577 +--- linux-3.1.1/include/trace/events/irq.h 2011-11-11 15:19:27.000000000 -0500
61578 ++++ linux-3.1.1/include/trace/events/irq.h 2011-11-16 18:39:08.000000000 -0500
61579 +@@ -36,7 +36,7 @@ struct softirq_action;
61580 + */
61581 + TRACE_EVENT(irq_handler_entry,
61582 +
61583 +- TP_PROTO(int irq, struct irqaction *action),
61584 ++ TP_PROTO(int irq, const struct irqaction *action),
61585 +
61586 + TP_ARGS(irq, action),
61587 +
61588 +@@ -66,7 +66,7 @@ TRACE_EVENT(irq_handler_entry,
61589 + */
61590 + TRACE_EVENT(irq_handler_exit,
61591 +
61592 +- TP_PROTO(int irq, struct irqaction *action, int ret),
61593 ++ TP_PROTO(int irq, const struct irqaction *action, int ret),
61594 +
61595 + TP_ARGS(irq, action, ret),
61596 +
61597 +diff -urNp linux-3.1.1/include/video/udlfb.h linux-3.1.1/include/video/udlfb.h
61598 +--- linux-3.1.1/include/video/udlfb.h 2011-11-11 15:19:27.000000000 -0500
61599 ++++ linux-3.1.1/include/video/udlfb.h 2011-11-16 18:39:08.000000000 -0500
61600 +@@ -51,10 +51,10 @@ struct dlfb_data {
61601 + int base8;
61602 + u32 pseudo_palette[256];
61603 + /* blit-only rendering path metrics, exposed through sysfs */
61604 +- atomic_t bytes_rendered; /* raw pixel-bytes driver asked to render */
61605 +- atomic_t bytes_identical; /* saved effort with backbuffer comparison */
61606 +- atomic_t bytes_sent; /* to usb, after compression including overhead */
61607 +- atomic_t cpu_kcycles_used; /* transpired during pixel processing */
61608 ++ atomic_unchecked_t bytes_rendered; /* raw pixel-bytes driver asked to render */
61609 ++ atomic_unchecked_t bytes_identical; /* saved effort with backbuffer comparison */
61610 ++ atomic_unchecked_t bytes_sent; /* to usb, after compression including overhead */
61611 ++ atomic_unchecked_t cpu_kcycles_used; /* transpired during pixel processing */
61612 + };
61613 +
61614 + #define NR_USB_REQUEST_I2C_SUB_IO 0x02
61615 +diff -urNp linux-3.1.1/include/video/uvesafb.h linux-3.1.1/include/video/uvesafb.h
61616 +--- linux-3.1.1/include/video/uvesafb.h 2011-11-11 15:19:27.000000000 -0500
61617 ++++ linux-3.1.1/include/video/uvesafb.h 2011-11-16 18:39:08.000000000 -0500
61618 +@@ -177,6 +177,7 @@ struct uvesafb_par {
61619 + u8 ypan; /* 0 - nothing, 1 - ypan, 2 - ywrap */
61620 + u8 pmi_setpal; /* PMI for palette changes */
61621 + u16 *pmi_base; /* protected mode interface location */
61622 ++ u8 *pmi_code; /* protected mode code location */
61623 + void *pmi_start;
61624 + void *pmi_pal;
61625 + u8 *vbe_state_orig; /*
61626 +diff -urNp linux-3.1.1/init/do_mounts.c linux-3.1.1/init/do_mounts.c
61627 +--- linux-3.1.1/init/do_mounts.c 2011-11-11 15:19:27.000000000 -0500
61628 ++++ linux-3.1.1/init/do_mounts.c 2011-11-16 18:39:08.000000000 -0500
61629 +@@ -287,11 +287,11 @@ static void __init get_fs_names(char *pa
61630 +
61631 + static int __init do_mount_root(char *name, char *fs, int flags, void *data)
61632 + {
61633 +- int err = sys_mount(name, "/root", fs, flags, data);
61634 ++ int err = sys_mount((char __force_user *)name, (char __force_user *)"/root", (char __force_user *)fs, flags, (void __force_user *)data);
61635 + if (err)
61636 + return err;
61637 +
61638 +- sys_chdir((const char __user __force *)"/root");
61639 ++ sys_chdir((const char __force_user*)"/root");
61640 + ROOT_DEV = current->fs->pwd.mnt->mnt_sb->s_dev;
61641 + printk(KERN_INFO
61642 + "VFS: Mounted root (%s filesystem)%s on device %u:%u.\n",
61643 +@@ -383,18 +383,18 @@ void __init change_floppy(char *fmt, ...
61644 + va_start(args, fmt);
61645 + vsprintf(buf, fmt, args);
61646 + va_end(args);
61647 +- fd = sys_open("/dev/root", O_RDWR | O_NDELAY, 0);
61648 ++ fd = sys_open((char __user *)"/dev/root", O_RDWR | O_NDELAY, 0);
61649 + if (fd >= 0) {
61650 + sys_ioctl(fd, FDEJECT, 0);
61651 + sys_close(fd);
61652 + }
61653 + printk(KERN_NOTICE "VFS: Insert %s and press ENTER\n", buf);
61654 +- fd = sys_open("/dev/console", O_RDWR, 0);
61655 ++ fd = sys_open((__force const char __user *)"/dev/console", O_RDWR, 0);
61656 + if (fd >= 0) {
61657 + sys_ioctl(fd, TCGETS, (long)&termios);
61658 + termios.c_lflag &= ~ICANON;
61659 + sys_ioctl(fd, TCSETSF, (long)&termios);
61660 +- sys_read(fd, &c, 1);
61661 ++ sys_read(fd, (char __user *)&c, 1);
61662 + termios.c_lflag |= ICANON;
61663 + sys_ioctl(fd, TCSETSF, (long)&termios);
61664 + sys_close(fd);
61665 +@@ -488,6 +488,6 @@ void __init prepare_namespace(void)
61666 + mount_root();
61667 + out:
61668 + devtmpfs_mount("dev");
61669 +- sys_mount(".", "/", NULL, MS_MOVE, NULL);
61670 +- sys_chroot((const char __user __force *)".");
61671 ++ sys_mount((char __force_user *)".", (char __force_user *)"/", NULL, MS_MOVE, NULL);
61672 ++ sys_chroot((const char __force_user *)".");
61673 + }
61674 +diff -urNp linux-3.1.1/init/do_mounts.h linux-3.1.1/init/do_mounts.h
61675 +--- linux-3.1.1/init/do_mounts.h 2011-11-11 15:19:27.000000000 -0500
61676 ++++ linux-3.1.1/init/do_mounts.h 2011-11-16 18:39:08.000000000 -0500
61677 +@@ -15,15 +15,15 @@ extern int root_mountflags;
61678 +
61679 + static inline int create_dev(char *name, dev_t dev)
61680 + {
61681 +- sys_unlink(name);
61682 +- return sys_mknod(name, S_IFBLK|0600, new_encode_dev(dev));
61683 ++ sys_unlink((char __force_user *)name);
61684 ++ return sys_mknod((char __force_user *)name, S_IFBLK|0600, new_encode_dev(dev));
61685 + }
61686 +
61687 + #if BITS_PER_LONG == 32
61688 + static inline u32 bstat(char *name)
61689 + {
61690 + struct stat64 stat;
61691 +- if (sys_stat64(name, &stat) != 0)
61692 ++ if (sys_stat64((char __force_user *)name, (struct stat64 __force_user *)&stat) != 0)
61693 + return 0;
61694 + if (!S_ISBLK(stat.st_mode))
61695 + return 0;
61696 +@@ -35,7 +35,7 @@ static inline u32 bstat(char *name)
61697 + static inline u32 bstat(char *name)
61698 + {
61699 + struct stat stat;
61700 +- if (sys_newstat(name, &stat) != 0)
61701 ++ if (sys_newstat((const char __force_user *)name, (struct stat __force_user *)&stat) != 0)
61702 + return 0;
61703 + if (!S_ISBLK(stat.st_mode))
61704 + return 0;
61705 +diff -urNp linux-3.1.1/init/do_mounts_initrd.c linux-3.1.1/init/do_mounts_initrd.c
61706 +--- linux-3.1.1/init/do_mounts_initrd.c 2011-11-11 15:19:27.000000000 -0500
61707 ++++ linux-3.1.1/init/do_mounts_initrd.c 2011-11-16 18:39:08.000000000 -0500
61708 +@@ -44,13 +44,13 @@ static void __init handle_initrd(void)
61709 + create_dev("/dev/root.old", Root_RAM0);
61710 + /* mount initrd on rootfs' /root */
61711 + mount_block_root("/dev/root.old", root_mountflags & ~MS_RDONLY);
61712 +- sys_mkdir("/old", 0700);
61713 +- root_fd = sys_open("/", 0, 0);
61714 +- old_fd = sys_open("/old", 0, 0);
61715 ++ sys_mkdir((const char __force_user *)"/old", 0700);
61716 ++ root_fd = sys_open((const char __force_user *)"/", 0, 0);
61717 ++ old_fd = sys_open((const char __force_user *)"/old", 0, 0);
61718 + /* move initrd over / and chdir/chroot in initrd root */
61719 +- sys_chdir("/root");
61720 +- sys_mount(".", "/", NULL, MS_MOVE, NULL);
61721 +- sys_chroot(".");
61722 ++ sys_chdir((const char __force_user *)"/root");
61723 ++ sys_mount((char __force_user *)".", (char __force_user *)"/", NULL, MS_MOVE, NULL);
61724 ++ sys_chroot((const char __force_user *)".");
61725 +
61726 + /*
61727 + * In case that a resume from disk is carried out by linuxrc or one of
61728 +@@ -67,15 +67,15 @@ static void __init handle_initrd(void)
61729 +
61730 + /* move initrd to rootfs' /old */
61731 + sys_fchdir(old_fd);
61732 +- sys_mount("/", ".", NULL, MS_MOVE, NULL);
61733 ++ sys_mount((char __force_user *)"/", (char __force_user *)".", NULL, MS_MOVE, NULL);
61734 + /* switch root and cwd back to / of rootfs */
61735 + sys_fchdir(root_fd);
61736 +- sys_chroot(".");
61737 ++ sys_chroot((const char __force_user *)".");
61738 + sys_close(old_fd);
61739 + sys_close(root_fd);
61740 +
61741 + if (new_decode_dev(real_root_dev) == Root_RAM0) {
61742 +- sys_chdir("/old");
61743 ++ sys_chdir((const char __force_user *)"/old");
61744 + return;
61745 + }
61746 +
61747 +@@ -83,17 +83,17 @@ static void __init handle_initrd(void)
61748 + mount_root();
61749 +
61750 + printk(KERN_NOTICE "Trying to move old root to /initrd ... ");
61751 +- error = sys_mount("/old", "/root/initrd", NULL, MS_MOVE, NULL);
61752 ++ error = sys_mount((char __force_user *)"/old", (char __force_user *)"/root/initrd", NULL, MS_MOVE, NULL);
61753 + if (!error)
61754 + printk("okay\n");
61755 + else {
61756 +- int fd = sys_open("/dev/root.old", O_RDWR, 0);
61757 ++ int fd = sys_open((const char __force_user *)"/dev/root.old", O_RDWR, 0);
61758 + if (error == -ENOENT)
61759 + printk("/initrd does not exist. Ignored.\n");
61760 + else
61761 + printk("failed\n");
61762 + printk(KERN_NOTICE "Unmounting old root\n");
61763 +- sys_umount("/old", MNT_DETACH);
61764 ++ sys_umount((char __force_user *)"/old", MNT_DETACH);
61765 + printk(KERN_NOTICE "Trying to free ramdisk memory ... ");
61766 + if (fd < 0) {
61767 + error = fd;
61768 +@@ -116,11 +116,11 @@ int __init initrd_load(void)
61769 + * mounted in the normal path.
61770 + */
61771 + if (rd_load_image("/initrd.image") && ROOT_DEV != Root_RAM0) {
61772 +- sys_unlink("/initrd.image");
61773 ++ sys_unlink((const char __force_user *)"/initrd.image");
61774 + handle_initrd();
61775 + return 1;
61776 + }
61777 + }
61778 +- sys_unlink("/initrd.image");
61779 ++ sys_unlink((const char __force_user *)"/initrd.image");
61780 + return 0;
61781 + }
61782 +diff -urNp linux-3.1.1/init/do_mounts_md.c linux-3.1.1/init/do_mounts_md.c
61783 +--- linux-3.1.1/init/do_mounts_md.c 2011-11-11 15:19:27.000000000 -0500
61784 ++++ linux-3.1.1/init/do_mounts_md.c 2011-11-16 18:39:08.000000000 -0500
61785 +@@ -170,7 +170,7 @@ static void __init md_setup_drive(void)
61786 + partitioned ? "_d" : "", minor,
61787 + md_setup_args[ent].device_names);
61788 +
61789 +- fd = sys_open(name, 0, 0);
61790 ++ fd = sys_open((char __force_user *)name, 0, 0);
61791 + if (fd < 0) {
61792 + printk(KERN_ERR "md: open failed - cannot start "
61793 + "array %s\n", name);
61794 +@@ -233,7 +233,7 @@ static void __init md_setup_drive(void)
61795 + * array without it
61796 + */
61797 + sys_close(fd);
61798 +- fd = sys_open(name, 0, 0);
61799 ++ fd = sys_open((char __force_user *)name, 0, 0);
61800 + sys_ioctl(fd, BLKRRPART, 0);
61801 + }
61802 + sys_close(fd);
61803 +@@ -283,7 +283,7 @@ static void __init autodetect_raid(void)
61804 +
61805 + wait_for_device_probe();
61806 +
61807 +- fd = sys_open((const char __user __force *) "/dev/md0", 0, 0);
61808 ++ fd = sys_open((const char __force_user *) "/dev/md0", 0, 0);
61809 + if (fd >= 0) {
61810 + sys_ioctl(fd, RAID_AUTORUN, raid_autopart);
61811 + sys_close(fd);
61812 +diff -urNp linux-3.1.1/init/initramfs.c linux-3.1.1/init/initramfs.c
61813 +--- linux-3.1.1/init/initramfs.c 2011-11-11 15:19:27.000000000 -0500
61814 ++++ linux-3.1.1/init/initramfs.c 2011-11-16 18:39:08.000000000 -0500
61815 +@@ -74,7 +74,7 @@ static void __init free_hash(void)
61816 + }
61817 + }
61818 +
61819 +-static long __init do_utime(char __user *filename, time_t mtime)
61820 ++static long __init do_utime(__force char __user *filename, time_t mtime)
61821 + {
61822 + struct timespec t[2];
61823 +
61824 +@@ -109,7 +109,7 @@ static void __init dir_utime(void)
61825 + struct dir_entry *de, *tmp;
61826 + list_for_each_entry_safe(de, tmp, &dir_list, list) {
61827 + list_del(&de->list);
61828 +- do_utime(de->name, de->mtime);
61829 ++ do_utime((char __force_user *)de->name, de->mtime);
61830 + kfree(de->name);
61831 + kfree(de);
61832 + }
61833 +@@ -271,7 +271,7 @@ static int __init maybe_link(void)
61834 + if (nlink >= 2) {
61835 + char *old = find_link(major, minor, ino, mode, collected);
61836 + if (old)
61837 +- return (sys_link(old, collected) < 0) ? -1 : 1;
61838 ++ return (sys_link((char __force_user *)old, (char __force_user *)collected) < 0) ? -1 : 1;
61839 + }
61840 + return 0;
61841 + }
61842 +@@ -280,11 +280,11 @@ static void __init clean_path(char *path
61843 + {
61844 + struct stat st;
61845 +
61846 +- if (!sys_newlstat(path, &st) && (st.st_mode^mode) & S_IFMT) {
61847 ++ if (!sys_newlstat((char __force_user *)path, (struct stat __force_user *)&st) && (st.st_mode^mode) & S_IFMT) {
61848 + if (S_ISDIR(st.st_mode))
61849 +- sys_rmdir(path);
61850 ++ sys_rmdir((char __force_user *)path);
61851 + else
61852 +- sys_unlink(path);
61853 ++ sys_unlink((char __force_user *)path);
61854 + }
61855 + }
61856 +
61857 +@@ -305,7 +305,7 @@ static int __init do_name(void)
61858 + int openflags = O_WRONLY|O_CREAT;
61859 + if (ml != 1)
61860 + openflags |= O_TRUNC;
61861 +- wfd = sys_open(collected, openflags, mode);
61862 ++ wfd = sys_open((char __force_user *)collected, openflags, mode);
61863 +
61864 + if (wfd >= 0) {
61865 + sys_fchown(wfd, uid, gid);
61866 +@@ -317,17 +317,17 @@ static int __init do_name(void)
61867 + }
61868 + }
61869 + } else if (S_ISDIR(mode)) {
61870 +- sys_mkdir(collected, mode);
61871 +- sys_chown(collected, uid, gid);
61872 +- sys_chmod(collected, mode);
61873 ++ sys_mkdir((char __force_user *)collected, mode);
61874 ++ sys_chown((char __force_user *)collected, uid, gid);
61875 ++ sys_chmod((char __force_user *)collected, mode);
61876 + dir_add(collected, mtime);
61877 + } else if (S_ISBLK(mode) || S_ISCHR(mode) ||
61878 + S_ISFIFO(mode) || S_ISSOCK(mode)) {
61879 + if (maybe_link() == 0) {
61880 +- sys_mknod(collected, mode, rdev);
61881 +- sys_chown(collected, uid, gid);
61882 +- sys_chmod(collected, mode);
61883 +- do_utime(collected, mtime);
61884 ++ sys_mknod((char __force_user *)collected, mode, rdev);
61885 ++ sys_chown((char __force_user *)collected, uid, gid);
61886 ++ sys_chmod((char __force_user *)collected, mode);
61887 ++ do_utime((char __force_user *)collected, mtime);
61888 + }
61889 + }
61890 + return 0;
61891 +@@ -336,15 +336,15 @@ static int __init do_name(void)
61892 + static int __init do_copy(void)
61893 + {
61894 + if (count >= body_len) {
61895 +- sys_write(wfd, victim, body_len);
61896 ++ sys_write(wfd, (char __force_user *)victim, body_len);
61897 + sys_close(wfd);
61898 +- do_utime(vcollected, mtime);
61899 ++ do_utime((char __force_user *)vcollected, mtime);
61900 + kfree(vcollected);
61901 + eat(body_len);
61902 + state = SkipIt;
61903 + return 0;
61904 + } else {
61905 +- sys_write(wfd, victim, count);
61906 ++ sys_write(wfd, (char __force_user *)victim, count);
61907 + body_len -= count;
61908 + eat(count);
61909 + return 1;
61910 +@@ -355,9 +355,9 @@ static int __init do_symlink(void)
61911 + {
61912 + collected[N_ALIGN(name_len) + body_len] = '\0';
61913 + clean_path(collected, 0);
61914 +- sys_symlink(collected + N_ALIGN(name_len), collected);
61915 +- sys_lchown(collected, uid, gid);
61916 +- do_utime(collected, mtime);
61917 ++ sys_symlink((char __force_user *)collected + N_ALIGN(name_len), (char __force_user *)collected);
61918 ++ sys_lchown((char __force_user *)collected, uid, gid);
61919 ++ do_utime((char __force_user *)collected, mtime);
61920 + state = SkipIt;
61921 + next_state = Reset;
61922 + return 0;
61923 +diff -urNp linux-3.1.1/init/Kconfig linux-3.1.1/init/Kconfig
61924 +--- linux-3.1.1/init/Kconfig 2011-11-11 15:19:27.000000000 -0500
61925 ++++ linux-3.1.1/init/Kconfig 2011-11-16 18:39:08.000000000 -0500
61926 +@@ -1202,7 +1202,7 @@ config SLUB_DEBUG
61927 +
61928 + config COMPAT_BRK
61929 + bool "Disable heap randomization"
61930 +- default y
61931 ++ default n
61932 + help
61933 + Randomizing heap placement makes heap exploits harder, but it
61934 + also breaks ancient binaries (including anything libc5 based).
61935 +diff -urNp linux-3.1.1/init/main.c linux-3.1.1/init/main.c
61936 +--- linux-3.1.1/init/main.c 2011-11-11 15:19:27.000000000 -0500
61937 ++++ linux-3.1.1/init/main.c 2011-11-16 18:40:44.000000000 -0500
61938 +@@ -96,6 +96,8 @@ static inline void mark_rodata_ro(void)
61939 + extern void tc_init(void);
61940 + #endif
61941 +
61942 ++extern void grsecurity_init(void);
61943 ++
61944 + /*
61945 + * Debug helper: via this flag we know that we are in 'early bootup code'
61946 + * where only the boot processor is running with IRQ disabled. This means
61947 +@@ -149,6 +151,49 @@ static int __init set_reset_devices(char
61948 +
61949 + __setup("reset_devices", set_reset_devices);
61950 +
61951 ++#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
61952 ++extern char pax_enter_kernel_user[];
61953 ++extern char pax_exit_kernel_user[];
61954 ++extern pgdval_t clone_pgd_mask;
61955 ++#endif
61956 ++
61957 ++#if defined(CONFIG_X86) && defined(CONFIG_PAX_MEMORY_UDEREF)
61958 ++static int __init setup_pax_nouderef(char *str)
61959 ++{
61960 ++#ifdef CONFIG_X86_32
61961 ++ unsigned int cpu;
61962 ++ struct desc_struct *gdt;
61963 ++
61964 ++ for (cpu = 0; cpu < NR_CPUS; cpu++) {
61965 ++ gdt = get_cpu_gdt_table(cpu);
61966 ++ gdt[GDT_ENTRY_KERNEL_DS].type = 3;
61967 ++ gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
61968 ++ gdt[GDT_ENTRY_DEFAULT_USER_CS].limit = 0xf;
61969 ++ gdt[GDT_ENTRY_DEFAULT_USER_DS].limit = 0xf;
61970 ++ }
61971 ++ asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r" (__KERNEL_DS) : "memory");
61972 ++#else
61973 ++ memcpy(pax_enter_kernel_user, (unsigned char []){0xc3}, 1);
61974 ++ memcpy(pax_exit_kernel_user, (unsigned char []){0xc3}, 1);
61975 ++ clone_pgd_mask = ~(pgdval_t)0UL;
61976 ++#endif
61977 ++
61978 ++ return 0;
61979 ++}
61980 ++early_param("pax_nouderef", setup_pax_nouderef);
61981 ++#endif
61982 ++
61983 ++#ifdef CONFIG_PAX_SOFTMODE
61984 ++int pax_softmode;
61985 ++
61986 ++static int __init setup_pax_softmode(char *str)
61987 ++{
61988 ++ get_option(&str, &pax_softmode);
61989 ++ return 1;
61990 ++}
61991 ++__setup("pax_softmode=", setup_pax_softmode);
61992 ++#endif
61993 ++
61994 + static const char * argv_init[MAX_INIT_ARGS+2] = { "init", NULL, };
61995 + const char * envp_init[MAX_INIT_ENVS+2] = { "HOME=/", "TERM=linux", NULL, };
61996 + static const char *panic_later, *panic_param;
61997 +@@ -678,6 +723,7 @@ int __init_or_module do_one_initcall(ini
61998 + {
61999 + int count = preempt_count();
62000 + int ret;
62001 ++ const char *msg1 = "", *msg2 = "";
62002 +
62003 + if (initcall_debug)
62004 + ret = do_one_initcall_debug(fn);
62005 +@@ -690,15 +736,15 @@ int __init_or_module do_one_initcall(ini
62006 + sprintf(msgbuf, "error code %d ", ret);
62007 +
62008 + if (preempt_count() != count) {
62009 +- strlcat(msgbuf, "preemption imbalance ", sizeof(msgbuf));
62010 ++ msg1 = " preemption imbalance";
62011 + preempt_count() = count;
62012 + }
62013 + if (irqs_disabled()) {
62014 +- strlcat(msgbuf, "disabled interrupts ", sizeof(msgbuf));
62015 ++ msg2 = " disabled interrupts";
62016 + local_irq_enable();
62017 + }
62018 +- if (msgbuf[0]) {
62019 +- printk("initcall %pF returned with %s\n", fn, msgbuf);
62020 ++ if (msgbuf[0] || *msg1 || *msg2) {
62021 ++ printk("initcall %pF returned with %s%s%s\n", fn, msgbuf, msg1, msg2);
62022 + }
62023 +
62024 + return ret;
62025 +@@ -817,7 +863,7 @@ static int __init kernel_init(void * unu
62026 + do_basic_setup();
62027 +
62028 + /* Open the /dev/console on the rootfs, this should never fail */
62029 +- if (sys_open((const char __user *) "/dev/console", O_RDWR, 0) < 0)
62030 ++ if (sys_open((const char __force_user *) "/dev/console", O_RDWR, 0) < 0)
62031 + printk(KERN_WARNING "Warning: unable to open an initial console.\n");
62032 +
62033 + (void) sys_dup(0);
62034 +@@ -830,11 +876,13 @@ static int __init kernel_init(void * unu
62035 + if (!ramdisk_execute_command)
62036 + ramdisk_execute_command = "/init";
62037 +
62038 +- if (sys_access((const char __user *) ramdisk_execute_command, 0) != 0) {
62039 ++ if (sys_access((const char __force_user *) ramdisk_execute_command, 0) != 0) {
62040 + ramdisk_execute_command = NULL;
62041 + prepare_namespace();
62042 + }
62043 +
62044 ++ grsecurity_init();
62045 ++
62046 + /*
62047 + * Ok, we have completed the initial bootup, and
62048 + * we're essentially up and running. Get rid of the
62049 +diff -urNp linux-3.1.1/ipc/mqueue.c linux-3.1.1/ipc/mqueue.c
62050 +--- linux-3.1.1/ipc/mqueue.c 2011-11-11 15:19:27.000000000 -0500
62051 ++++ linux-3.1.1/ipc/mqueue.c 2011-11-16 18:40:44.000000000 -0500
62052 +@@ -156,6 +156,7 @@ static struct inode *mqueue_get_inode(st
62053 + mq_bytes = (mq_msg_tblsz +
62054 + (info->attr.mq_maxmsg * info->attr.mq_msgsize));
62055 +
62056 ++ gr_learn_resource(current, RLIMIT_MSGQUEUE, u->mq_bytes + mq_bytes, 1);
62057 + spin_lock(&mq_lock);
62058 + if (u->mq_bytes + mq_bytes < u->mq_bytes ||
62059 + u->mq_bytes + mq_bytes > task_rlimit(p, RLIMIT_MSGQUEUE)) {
62060 +diff -urNp linux-3.1.1/ipc/msg.c linux-3.1.1/ipc/msg.c
62061 +--- linux-3.1.1/ipc/msg.c 2011-11-11 15:19:27.000000000 -0500
62062 ++++ linux-3.1.1/ipc/msg.c 2011-11-16 18:39:08.000000000 -0500
62063 +@@ -309,18 +309,19 @@ static inline int msg_security(struct ke
62064 + return security_msg_queue_associate(msq, msgflg);
62065 + }
62066 +
62067 ++static struct ipc_ops msg_ops = {
62068 ++ .getnew = newque,
62069 ++ .associate = msg_security,
62070 ++ .more_checks = NULL
62071 ++};
62072 ++
62073 + SYSCALL_DEFINE2(msgget, key_t, key, int, msgflg)
62074 + {
62075 + struct ipc_namespace *ns;
62076 +- struct ipc_ops msg_ops;
62077 + struct ipc_params msg_params;
62078 +
62079 + ns = current->nsproxy->ipc_ns;
62080 +
62081 +- msg_ops.getnew = newque;
62082 +- msg_ops.associate = msg_security;
62083 +- msg_ops.more_checks = NULL;
62084 +-
62085 + msg_params.key = key;
62086 + msg_params.flg = msgflg;
62087 +
62088 +diff -urNp linux-3.1.1/ipc/sem.c linux-3.1.1/ipc/sem.c
62089 +--- linux-3.1.1/ipc/sem.c 2011-11-11 15:19:27.000000000 -0500
62090 ++++ linux-3.1.1/ipc/sem.c 2011-11-16 18:40:44.000000000 -0500
62091 +@@ -318,10 +318,15 @@ static inline int sem_more_checks(struct
62092 + return 0;
62093 + }
62094 +
62095 ++static struct ipc_ops sem_ops = {
62096 ++ .getnew = newary,
62097 ++ .associate = sem_security,
62098 ++ .more_checks = sem_more_checks
62099 ++};
62100 ++
62101 + SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
62102 + {
62103 + struct ipc_namespace *ns;
62104 +- struct ipc_ops sem_ops;
62105 + struct ipc_params sem_params;
62106 +
62107 + ns = current->nsproxy->ipc_ns;
62108 +@@ -329,10 +334,6 @@ SYSCALL_DEFINE3(semget, key_t, key, int,
62109 + if (nsems < 0 || nsems > ns->sc_semmsl)
62110 + return -EINVAL;
62111 +
62112 +- sem_ops.getnew = newary;
62113 +- sem_ops.associate = sem_security;
62114 +- sem_ops.more_checks = sem_more_checks;
62115 +-
62116 + sem_params.key = key;
62117 + sem_params.flg = semflg;
62118 + sem_params.u.nsems = nsems;
62119 +@@ -848,6 +849,8 @@ static int semctl_main(struct ipc_namesp
62120 + int nsems;
62121 + struct list_head tasks;
62122 +
62123 ++ pax_track_stack();
62124 ++
62125 + sma = sem_lock_check(ns, semid);
62126 + if (IS_ERR(sma))
62127 + return PTR_ERR(sma);
62128 +@@ -1295,6 +1298,8 @@ SYSCALL_DEFINE4(semtimedop, int, semid,
62129 + struct ipc_namespace *ns;
62130 + struct list_head tasks;
62131 +
62132 ++ pax_track_stack();
62133 ++
62134 + ns = current->nsproxy->ipc_ns;
62135 +
62136 + if (nsops < 1 || semid < 0)
62137 +diff -urNp linux-3.1.1/ipc/shm.c linux-3.1.1/ipc/shm.c
62138 +--- linux-3.1.1/ipc/shm.c 2011-11-11 15:19:27.000000000 -0500
62139 ++++ linux-3.1.1/ipc/shm.c 2011-11-16 18:40:44.000000000 -0500
62140 +@@ -69,6 +69,14 @@ static void shm_destroy (struct ipc_name
62141 + static int sysvipc_shm_proc_show(struct seq_file *s, void *it);
62142 + #endif
62143 +
62144 ++#ifdef CONFIG_GRKERNSEC
62145 ++extern int gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
62146 ++ const time_t shm_createtime, const uid_t cuid,
62147 ++ const int shmid);
62148 ++extern int gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
62149 ++ const time_t shm_createtime);
62150 ++#endif
62151 ++
62152 + void shm_init_ns(struct ipc_namespace *ns)
62153 + {
62154 + ns->shm_ctlmax = SHMMAX;
62155 +@@ -508,6 +516,14 @@ static int newseg(struct ipc_namespace *
62156 + shp->shm_lprid = 0;
62157 + shp->shm_atim = shp->shm_dtim = 0;
62158 + shp->shm_ctim = get_seconds();
62159 ++#ifdef CONFIG_GRKERNSEC
62160 ++ {
62161 ++ struct timespec timeval;
62162 ++ do_posix_clock_monotonic_gettime(&timeval);
62163 ++
62164 ++ shp->shm_createtime = timeval.tv_sec;
62165 ++ }
62166 ++#endif
62167 + shp->shm_segsz = size;
62168 + shp->shm_nattch = 0;
62169 + shp->shm_file = file;
62170 +@@ -559,18 +575,19 @@ static inline int shm_more_checks(struct
62171 + return 0;
62172 + }
62173 +
62174 ++static struct ipc_ops shm_ops = {
62175 ++ .getnew = newseg,
62176 ++ .associate = shm_security,
62177 ++ .more_checks = shm_more_checks
62178 ++};
62179 ++
62180 + SYSCALL_DEFINE3(shmget, key_t, key, size_t, size, int, shmflg)
62181 + {
62182 + struct ipc_namespace *ns;
62183 +- struct ipc_ops shm_ops;
62184 + struct ipc_params shm_params;
62185 +
62186 + ns = current->nsproxy->ipc_ns;
62187 +
62188 +- shm_ops.getnew = newseg;
62189 +- shm_ops.associate = shm_security;
62190 +- shm_ops.more_checks = shm_more_checks;
62191 +-
62192 + shm_params.key = key;
62193 + shm_params.flg = shmflg;
62194 + shm_params.u.size = size;
62195 +@@ -870,8 +887,6 @@ SYSCALL_DEFINE3(shmctl, int, shmid, int,
62196 + case SHM_LOCK:
62197 + case SHM_UNLOCK:
62198 + {
62199 +- struct file *uninitialized_var(shm_file);
62200 +-
62201 + lru_add_drain_all(); /* drain pagevecs to lru lists */
62202 +
62203 + shp = shm_lock_check(ns, shmid);
62204 +@@ -1004,9 +1019,21 @@ long do_shmat(int shmid, char __user *sh
62205 + if (err)
62206 + goto out_unlock;
62207 +
62208 ++#ifdef CONFIG_GRKERNSEC
62209 ++ if (!gr_handle_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime,
62210 ++ shp->shm_perm.cuid, shmid) ||
62211 ++ !gr_chroot_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime)) {
62212 ++ err = -EACCES;
62213 ++ goto out_unlock;
62214 ++ }
62215 ++#endif
62216 ++
62217 + path = shp->shm_file->f_path;
62218 + path_get(&path);
62219 + shp->shm_nattch++;
62220 ++#ifdef CONFIG_GRKERNSEC
62221 ++ shp->shm_lapid = current->pid;
62222 ++#endif
62223 + size = i_size_read(path.dentry->d_inode);
62224 + shm_unlock(shp);
62225 +
62226 +diff -urNp linux-3.1.1/kernel/acct.c linux-3.1.1/kernel/acct.c
62227 +--- linux-3.1.1/kernel/acct.c 2011-11-11 15:19:27.000000000 -0500
62228 ++++ linux-3.1.1/kernel/acct.c 2011-11-16 18:39:08.000000000 -0500
62229 +@@ -570,7 +570,7 @@ static void do_acct_process(struct bsd_a
62230 + */
62231 + flim = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
62232 + current->signal->rlim[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY;
62233 +- file->f_op->write(file, (char *)&ac,
62234 ++ file->f_op->write(file, (char __force_user *)&ac,
62235 + sizeof(acct_t), &file->f_pos);
62236 + current->signal->rlim[RLIMIT_FSIZE].rlim_cur = flim;
62237 + set_fs(fs);
62238 +diff -urNp linux-3.1.1/kernel/audit.c linux-3.1.1/kernel/audit.c
62239 +--- linux-3.1.1/kernel/audit.c 2011-11-11 15:19:27.000000000 -0500
62240 ++++ linux-3.1.1/kernel/audit.c 2011-11-16 18:39:08.000000000 -0500
62241 +@@ -115,7 +115,7 @@ u32 audit_sig_sid = 0;
62242 + 3) suppressed due to audit_rate_limit
62243 + 4) suppressed due to audit_backlog_limit
62244 + */
62245 +-static atomic_t audit_lost = ATOMIC_INIT(0);
62246 ++static atomic_unchecked_t audit_lost = ATOMIC_INIT(0);
62247 +
62248 + /* The netlink socket. */
62249 + static struct sock *audit_sock;
62250 +@@ -237,7 +237,7 @@ void audit_log_lost(const char *message)
62251 + unsigned long now;
62252 + int print;
62253 +
62254 +- atomic_inc(&audit_lost);
62255 ++ atomic_inc_unchecked(&audit_lost);
62256 +
62257 + print = (audit_failure == AUDIT_FAIL_PANIC || !audit_rate_limit);
62258 +
62259 +@@ -256,7 +256,7 @@ void audit_log_lost(const char *message)
62260 + printk(KERN_WARNING
62261 + "audit: audit_lost=%d audit_rate_limit=%d "
62262 + "audit_backlog_limit=%d\n",
62263 +- atomic_read(&audit_lost),
62264 ++ atomic_read_unchecked(&audit_lost),
62265 + audit_rate_limit,
62266 + audit_backlog_limit);
62267 + audit_panic(message);
62268 +@@ -689,7 +689,7 @@ static int audit_receive_msg(struct sk_b
62269 + status_set.pid = audit_pid;
62270 + status_set.rate_limit = audit_rate_limit;
62271 + status_set.backlog_limit = audit_backlog_limit;
62272 +- status_set.lost = atomic_read(&audit_lost);
62273 ++ status_set.lost = atomic_read_unchecked(&audit_lost);
62274 + status_set.backlog = skb_queue_len(&audit_skb_queue);
62275 + audit_send_reply(NETLINK_CB(skb).pid, seq, AUDIT_GET, 0, 0,
62276 + &status_set, sizeof(status_set));
62277 +diff -urNp linux-3.1.1/kernel/auditsc.c linux-3.1.1/kernel/auditsc.c
62278 +--- linux-3.1.1/kernel/auditsc.c 2011-11-11 15:19:27.000000000 -0500
62279 ++++ linux-3.1.1/kernel/auditsc.c 2011-11-16 18:39:08.000000000 -0500
62280 +@@ -2118,7 +2118,7 @@ int auditsc_get_stamp(struct audit_conte
62281 + }
62282 +
62283 + /* global counter which is incremented every time something logs in */
62284 +-static atomic_t session_id = ATOMIC_INIT(0);
62285 ++static atomic_unchecked_t session_id = ATOMIC_INIT(0);
62286 +
62287 + /**
62288 + * audit_set_loginuid - set a task's audit_context loginuid
62289 +@@ -2131,7 +2131,7 @@ static atomic_t session_id = ATOMIC_INIT
62290 + */
62291 + int audit_set_loginuid(struct task_struct *task, uid_t loginuid)
62292 + {
62293 +- unsigned int sessionid = atomic_inc_return(&session_id);
62294 ++ unsigned int sessionid = atomic_inc_return_unchecked(&session_id);
62295 + struct audit_context *context = task->audit_context;
62296 +
62297 + if (context && context->in_syscall) {
62298 +diff -urNp linux-3.1.1/kernel/capability.c linux-3.1.1/kernel/capability.c
62299 +--- linux-3.1.1/kernel/capability.c 2011-11-11 15:19:27.000000000 -0500
62300 ++++ linux-3.1.1/kernel/capability.c 2011-11-16 18:40:44.000000000 -0500
62301 +@@ -202,6 +202,9 @@ SYSCALL_DEFINE2(capget, cap_user_header_
62302 + * before modification is attempted and the application
62303 + * fails.
62304 + */
62305 ++ if (tocopy > ARRAY_SIZE(kdata))
62306 ++ return -EFAULT;
62307 ++
62308 + if (copy_to_user(dataptr, kdata, tocopy
62309 + * sizeof(struct __user_cap_data_struct))) {
62310 + return -EFAULT;
62311 +@@ -374,7 +377,7 @@ bool ns_capable(struct user_namespace *n
62312 + BUG();
62313 + }
62314 +
62315 +- if (security_capable(ns, current_cred(), cap) == 0) {
62316 ++ if (security_capable(ns, current_cred(), cap) == 0 && gr_is_capable(cap)) {
62317 + current->flags |= PF_SUPERPRIV;
62318 + return true;
62319 + }
62320 +@@ -382,6 +385,27 @@ bool ns_capable(struct user_namespace *n
62321 + }
62322 + EXPORT_SYMBOL(ns_capable);
62323 +
62324 ++bool ns_capable_nolog(struct user_namespace *ns, int cap)
62325 ++{
62326 ++ if (unlikely(!cap_valid(cap))) {
62327 ++ printk(KERN_CRIT "capable() called with invalid cap=%u\n", cap);
62328 ++ BUG();
62329 ++ }
62330 ++
62331 ++ if (security_capable(ns, current_cred(), cap) == 0 && gr_is_capable_nolog(cap)) {
62332 ++ current->flags |= PF_SUPERPRIV;
62333 ++ return true;
62334 ++ }
62335 ++ return false;
62336 ++}
62337 ++EXPORT_SYMBOL(ns_capable_nolog);
62338 ++
62339 ++bool capable_nolog(int cap)
62340 ++{
62341 ++ return ns_capable_nolog(&init_user_ns, cap);
62342 ++}
62343 ++EXPORT_SYMBOL(capable_nolog);
62344 ++
62345 + /**
62346 + * task_ns_capable - Determine whether current task has a superior
62347 + * capability targeted at a specific task's user namespace.
62348 +@@ -396,6 +420,12 @@ bool task_ns_capable(struct task_struct
62349 + }
62350 + EXPORT_SYMBOL(task_ns_capable);
62351 +
62352 ++bool task_ns_capable_nolog(struct task_struct *t, int cap)
62353 ++{
62354 ++ return ns_capable_nolog(task_cred_xxx(t, user)->user_ns, cap);
62355 ++}
62356 ++EXPORT_SYMBOL(task_ns_capable_nolog);
62357 ++
62358 + /**
62359 + * nsown_capable - Check superior capability to one's own user_ns
62360 + * @cap: The capability in question
62361 +diff -urNp linux-3.1.1/kernel/cgroup.c linux-3.1.1/kernel/cgroup.c
62362 +--- linux-3.1.1/kernel/cgroup.c 2011-11-11 15:19:27.000000000 -0500
62363 ++++ linux-3.1.1/kernel/cgroup.c 2011-11-16 18:40:44.000000000 -0500
62364 +@@ -595,6 +595,8 @@ static struct css_set *find_css_set(
62365 + struct hlist_head *hhead;
62366 + struct cg_cgroup_link *link;
62367 +
62368 ++ pax_track_stack();
62369 ++
62370 + /* First see if we already have a cgroup group that matches
62371 + * the desired set */
62372 + read_lock(&css_set_lock);
62373 +diff -urNp linux-3.1.1/kernel/compat.c linux-3.1.1/kernel/compat.c
62374 +--- linux-3.1.1/kernel/compat.c 2011-11-11 15:19:27.000000000 -0500
62375 ++++ linux-3.1.1/kernel/compat.c 2011-11-16 18:40:44.000000000 -0500
62376 +@@ -13,6 +13,7 @@
62377 +
62378 + #include <linux/linkage.h>
62379 + #include <linux/compat.h>
62380 ++#include <linux/module.h>
62381 + #include <linux/errno.h>
62382 + #include <linux/time.h>
62383 + #include <linux/signal.h>
62384 +@@ -167,7 +168,7 @@ static long compat_nanosleep_restart(str
62385 + mm_segment_t oldfs;
62386 + long ret;
62387 +
62388 +- restart->nanosleep.rmtp = (struct timespec __user *) &rmt;
62389 ++ restart->nanosleep.rmtp = (struct timespec __force_user *) &rmt;
62390 + oldfs = get_fs();
62391 + set_fs(KERNEL_DS);
62392 + ret = hrtimer_nanosleep_restart(restart);
62393 +@@ -199,7 +200,7 @@ asmlinkage long compat_sys_nanosleep(str
62394 + oldfs = get_fs();
62395 + set_fs(KERNEL_DS);
62396 + ret = hrtimer_nanosleep(&tu,
62397 +- rmtp ? (struct timespec __user *)&rmt : NULL,
62398 ++ rmtp ? (struct timespec __force_user *)&rmt : NULL,
62399 + HRTIMER_MODE_REL, CLOCK_MONOTONIC);
62400 + set_fs(oldfs);
62401 +
62402 +@@ -308,7 +309,7 @@ asmlinkage long compat_sys_sigpending(co
62403 + mm_segment_t old_fs = get_fs();
62404 +
62405 + set_fs(KERNEL_DS);
62406 +- ret = sys_sigpending((old_sigset_t __user *) &s);
62407 ++ ret = sys_sigpending((old_sigset_t __force_user *) &s);
62408 + set_fs(old_fs);
62409 + if (ret == 0)
62410 + ret = put_user(s, set);
62411 +@@ -331,8 +332,8 @@ asmlinkage long compat_sys_sigprocmask(i
62412 + old_fs = get_fs();
62413 + set_fs(KERNEL_DS);
62414 + ret = sys_sigprocmask(how,
62415 +- set ? (old_sigset_t __user *) &s : NULL,
62416 +- oset ? (old_sigset_t __user *) &s : NULL);
62417 ++ set ? (old_sigset_t __force_user *) &s : NULL,
62418 ++ oset ? (old_sigset_t __force_user *) &s : NULL);
62419 + set_fs(old_fs);
62420 + if (ret == 0)
62421 + if (oset)
62422 +@@ -369,7 +370,7 @@ asmlinkage long compat_sys_old_getrlimit
62423 + mm_segment_t old_fs = get_fs();
62424 +
62425 + set_fs(KERNEL_DS);
62426 +- ret = sys_old_getrlimit(resource, &r);
62427 ++ ret = sys_old_getrlimit(resource, (struct rlimit __force_user *)&r);
62428 + set_fs(old_fs);
62429 +
62430 + if (!ret) {
62431 +@@ -441,7 +442,7 @@ asmlinkage long compat_sys_getrusage(int
62432 + mm_segment_t old_fs = get_fs();
62433 +
62434 + set_fs(KERNEL_DS);
62435 +- ret = sys_getrusage(who, (struct rusage __user *) &r);
62436 ++ ret = sys_getrusage(who, (struct rusage __force_user *) &r);
62437 + set_fs(old_fs);
62438 +
62439 + if (ret)
62440 +@@ -468,8 +469,8 @@ compat_sys_wait4(compat_pid_t pid, compa
62441 + set_fs (KERNEL_DS);
62442 + ret = sys_wait4(pid,
62443 + (stat_addr ?
62444 +- (unsigned int __user *) &status : NULL),
62445 +- options, (struct rusage __user *) &r);
62446 ++ (unsigned int __force_user *) &status : NULL),
62447 ++ options, (struct rusage __force_user *) &r);
62448 + set_fs (old_fs);
62449 +
62450 + if (ret > 0) {
62451 +@@ -494,8 +495,8 @@ asmlinkage long compat_sys_waitid(int wh
62452 + memset(&info, 0, sizeof(info));
62453 +
62454 + set_fs(KERNEL_DS);
62455 +- ret = sys_waitid(which, pid, (siginfo_t __user *)&info, options,
62456 +- uru ? (struct rusage __user *)&ru : NULL);
62457 ++ ret = sys_waitid(which, pid, (siginfo_t __force_user *)&info, options,
62458 ++ uru ? (struct rusage __force_user *)&ru : NULL);
62459 + set_fs(old_fs);
62460 +
62461 + if ((ret < 0) || (info.si_signo == 0))
62462 +@@ -625,8 +626,8 @@ long compat_sys_timer_settime(timer_t ti
62463 + oldfs = get_fs();
62464 + set_fs(KERNEL_DS);
62465 + err = sys_timer_settime(timer_id, flags,
62466 +- (struct itimerspec __user *) &newts,
62467 +- (struct itimerspec __user *) &oldts);
62468 ++ (struct itimerspec __force_user *) &newts,
62469 ++ (struct itimerspec __force_user *) &oldts);
62470 + set_fs(oldfs);
62471 + if (!err && old && put_compat_itimerspec(old, &oldts))
62472 + return -EFAULT;
62473 +@@ -643,7 +644,7 @@ long compat_sys_timer_gettime(timer_t ti
62474 + oldfs = get_fs();
62475 + set_fs(KERNEL_DS);
62476 + err = sys_timer_gettime(timer_id,
62477 +- (struct itimerspec __user *) &ts);
62478 ++ (struct itimerspec __force_user *) &ts);
62479 + set_fs(oldfs);
62480 + if (!err && put_compat_itimerspec(setting, &ts))
62481 + return -EFAULT;
62482 +@@ -662,7 +663,7 @@ long compat_sys_clock_settime(clockid_t
62483 + oldfs = get_fs();
62484 + set_fs(KERNEL_DS);
62485 + err = sys_clock_settime(which_clock,
62486 +- (struct timespec __user *) &ts);
62487 ++ (struct timespec __force_user *) &ts);
62488 + set_fs(oldfs);
62489 + return err;
62490 + }
62491 +@@ -677,7 +678,7 @@ long compat_sys_clock_gettime(clockid_t
62492 + oldfs = get_fs();
62493 + set_fs(KERNEL_DS);
62494 + err = sys_clock_gettime(which_clock,
62495 +- (struct timespec __user *) &ts);
62496 ++ (struct timespec __force_user *) &ts);
62497 + set_fs(oldfs);
62498 + if (!err && put_compat_timespec(&ts, tp))
62499 + return -EFAULT;
62500 +@@ -697,7 +698,7 @@ long compat_sys_clock_adjtime(clockid_t
62501 +
62502 + oldfs = get_fs();
62503 + set_fs(KERNEL_DS);
62504 +- ret = sys_clock_adjtime(which_clock, (struct timex __user *) &txc);
62505 ++ ret = sys_clock_adjtime(which_clock, (struct timex __force_user *) &txc);
62506 + set_fs(oldfs);
62507 +
62508 + err = compat_put_timex(utp, &txc);
62509 +@@ -717,7 +718,7 @@ long compat_sys_clock_getres(clockid_t w
62510 + oldfs = get_fs();
62511 + set_fs(KERNEL_DS);
62512 + err = sys_clock_getres(which_clock,
62513 +- (struct timespec __user *) &ts);
62514 ++ (struct timespec __force_user *) &ts);
62515 + set_fs(oldfs);
62516 + if (!err && tp && put_compat_timespec(&ts, tp))
62517 + return -EFAULT;
62518 +@@ -729,9 +730,9 @@ static long compat_clock_nanosleep_resta
62519 + long err;
62520 + mm_segment_t oldfs;
62521 + struct timespec tu;
62522 +- struct compat_timespec *rmtp = restart->nanosleep.compat_rmtp;
62523 ++ struct compat_timespec __user *rmtp = restart->nanosleep.compat_rmtp;
62524 +
62525 +- restart->nanosleep.rmtp = (struct timespec __user *) &tu;
62526 ++ restart->nanosleep.rmtp = (struct timespec __force_user *) &tu;
62527 + oldfs = get_fs();
62528 + set_fs(KERNEL_DS);
62529 + err = clock_nanosleep_restart(restart);
62530 +@@ -763,8 +764,8 @@ long compat_sys_clock_nanosleep(clockid_
62531 + oldfs = get_fs();
62532 + set_fs(KERNEL_DS);
62533 + err = sys_clock_nanosleep(which_clock, flags,
62534 +- (struct timespec __user *) &in,
62535 +- (struct timespec __user *) &out);
62536 ++ (struct timespec __force_user *) &in,
62537 ++ (struct timespec __force_user *) &out);
62538 + set_fs(oldfs);
62539 +
62540 + if ((err == -ERESTART_RESTARTBLOCK) && rmtp &&
62541 +diff -urNp linux-3.1.1/kernel/configs.c linux-3.1.1/kernel/configs.c
62542 +--- linux-3.1.1/kernel/configs.c 2011-11-11 15:19:27.000000000 -0500
62543 ++++ linux-3.1.1/kernel/configs.c 2011-11-16 18:40:44.000000000 -0500
62544 +@@ -74,8 +74,19 @@ static int __init ikconfig_init(void)
62545 + struct proc_dir_entry *entry;
62546 +
62547 + /* create the current config file */
62548 ++#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
62549 ++#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_HIDESYM)
62550 ++ entry = proc_create("config.gz", S_IFREG | S_IRUSR, NULL,
62551 ++ &ikconfig_file_ops);
62552 ++#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
62553 ++ entry = proc_create("config.gz", S_IFREG | S_IRUSR | S_IRGRP, NULL,
62554 ++ &ikconfig_file_ops);
62555 ++#endif
62556 ++#else
62557 + entry = proc_create("config.gz", S_IFREG | S_IRUGO, NULL,
62558 + &ikconfig_file_ops);
62559 ++#endif
62560 ++
62561 + if (!entry)
62562 + return -ENOMEM;
62563 +
62564 +diff -urNp linux-3.1.1/kernel/cred.c linux-3.1.1/kernel/cred.c
62565 +--- linux-3.1.1/kernel/cred.c 2011-11-11 15:19:27.000000000 -0500
62566 ++++ linux-3.1.1/kernel/cred.c 2011-11-16 18:40:44.000000000 -0500
62567 +@@ -158,6 +158,8 @@ static void put_cred_rcu(struct rcu_head
62568 + */
62569 + void __put_cred(struct cred *cred)
62570 + {
62571 ++ pax_track_stack();
62572 ++
62573 + kdebug("__put_cred(%p{%d,%d})", cred,
62574 + atomic_read(&cred->usage),
62575 + read_cred_subscribers(cred));
62576 +@@ -182,6 +184,8 @@ void exit_creds(struct task_struct *tsk)
62577 + {
62578 + struct cred *cred;
62579 +
62580 ++ pax_track_stack();
62581 ++
62582 + kdebug("exit_creds(%u,%p,%p,{%d,%d})", tsk->pid, tsk->real_cred, tsk->cred,
62583 + atomic_read(&tsk->cred->usage),
62584 + read_cred_subscribers(tsk->cred));
62585 +@@ -220,6 +224,8 @@ const struct cred *get_task_cred(struct
62586 + {
62587 + const struct cred *cred;
62588 +
62589 ++ pax_track_stack();
62590 ++
62591 + rcu_read_lock();
62592 +
62593 + do {
62594 +@@ -239,6 +245,8 @@ struct cred *cred_alloc_blank(void)
62595 + {
62596 + struct cred *new;
62597 +
62598 ++ pax_track_stack();
62599 ++
62600 + new = kmem_cache_zalloc(cred_jar, GFP_KERNEL);
62601 + if (!new)
62602 + return NULL;
62603 +@@ -287,6 +295,8 @@ struct cred *prepare_creds(void)
62604 + const struct cred *old;
62605 + struct cred *new;
62606 +
62607 ++ pax_track_stack();
62608 ++
62609 + validate_process_creds();
62610 +
62611 + new = kmem_cache_alloc(cred_jar, GFP_KERNEL);
62612 +@@ -333,6 +343,8 @@ struct cred *prepare_exec_creds(void)
62613 + struct thread_group_cred *tgcred = NULL;
62614 + struct cred *new;
62615 +
62616 ++ pax_track_stack();
62617 ++
62618 + #ifdef CONFIG_KEYS
62619 + tgcred = kmalloc(sizeof(*tgcred), GFP_KERNEL);
62620 + if (!tgcred)
62621 +@@ -385,6 +397,8 @@ int copy_creds(struct task_struct *p, un
62622 + struct cred *new;
62623 + int ret;
62624 +
62625 ++ pax_track_stack();
62626 ++
62627 + if (
62628 + #ifdef CONFIG_KEYS
62629 + !p->cred->thread_keyring &&
62630 +@@ -475,6 +489,8 @@ int commit_creds(struct cred *new)
62631 + struct task_struct *task = current;
62632 + const struct cred *old = task->real_cred;
62633 +
62634 ++ pax_track_stack();
62635 ++
62636 + kdebug("commit_creds(%p{%d,%d})", new,
62637 + atomic_read(&new->usage),
62638 + read_cred_subscribers(new));
62639 +@@ -489,6 +505,8 @@ int commit_creds(struct cred *new)
62640 +
62641 + get_cred(new); /* we will require a ref for the subj creds too */
62642 +
62643 ++ gr_set_role_label(task, new->uid, new->gid);
62644 ++
62645 + /* dumpability changes */
62646 + if (old->euid != new->euid ||
62647 + old->egid != new->egid ||
62648 +@@ -549,6 +567,8 @@ EXPORT_SYMBOL(commit_creds);
62649 + */
62650 + void abort_creds(struct cred *new)
62651 + {
62652 ++ pax_track_stack();
62653 ++
62654 + kdebug("abort_creds(%p{%d,%d})", new,
62655 + atomic_read(&new->usage),
62656 + read_cred_subscribers(new));
62657 +@@ -572,6 +592,8 @@ const struct cred *override_creds(const
62658 + {
62659 + const struct cred *old = current->cred;
62660 +
62661 ++ pax_track_stack();
62662 ++
62663 + kdebug("override_creds(%p{%d,%d})", new,
62664 + atomic_read(&new->usage),
62665 + read_cred_subscribers(new));
62666 +@@ -601,6 +623,8 @@ void revert_creds(const struct cred *old
62667 + {
62668 + const struct cred *override = current->cred;
62669 +
62670 ++ pax_track_stack();
62671 ++
62672 + kdebug("revert_creds(%p{%d,%d})", old,
62673 + atomic_read(&old->usage),
62674 + read_cred_subscribers(old));
62675 +@@ -647,6 +671,8 @@ struct cred *prepare_kernel_cred(struct
62676 + const struct cred *old;
62677 + struct cred *new;
62678 +
62679 ++ pax_track_stack();
62680 ++
62681 + new = kmem_cache_alloc(cred_jar, GFP_KERNEL);
62682 + if (!new)
62683 + return NULL;
62684 +@@ -701,6 +727,8 @@ EXPORT_SYMBOL(prepare_kernel_cred);
62685 + */
62686 + int set_security_override(struct cred *new, u32 secid)
62687 + {
62688 ++ pax_track_stack();
62689 ++
62690 + return security_kernel_act_as(new, secid);
62691 + }
62692 + EXPORT_SYMBOL(set_security_override);
62693 +@@ -720,6 +748,8 @@ int set_security_override_from_ctx(struc
62694 + u32 secid;
62695 + int ret;
62696 +
62697 ++ pax_track_stack();
62698 ++
62699 + ret = security_secctx_to_secid(secctx, strlen(secctx), &secid);
62700 + if (ret < 0)
62701 + return ret;
62702 +diff -urNp linux-3.1.1/kernel/debug/debug_core.c linux-3.1.1/kernel/debug/debug_core.c
62703 +--- linux-3.1.1/kernel/debug/debug_core.c 2011-11-11 15:19:27.000000000 -0500
62704 ++++ linux-3.1.1/kernel/debug/debug_core.c 2011-11-16 18:39:08.000000000 -0500
62705 +@@ -119,7 +119,7 @@ static DEFINE_RAW_SPINLOCK(dbg_slave_loc
62706 + */
62707 + static atomic_t masters_in_kgdb;
62708 + static atomic_t slaves_in_kgdb;
62709 +-static atomic_t kgdb_break_tasklet_var;
62710 ++static atomic_unchecked_t kgdb_break_tasklet_var;
62711 + atomic_t kgdb_setting_breakpoint;
62712 +
62713 + struct task_struct *kgdb_usethread;
62714 +@@ -129,7 +129,7 @@ int kgdb_single_step;
62715 + static pid_t kgdb_sstep_pid;
62716 +
62717 + /* to keep track of the CPU which is doing the single stepping*/
62718 +-atomic_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
62719 ++atomic_unchecked_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
62720 +
62721 + /*
62722 + * If you are debugging a problem where roundup (the collection of
62723 +@@ -542,7 +542,7 @@ return_normal:
62724 + * kernel will only try for the value of sstep_tries before
62725 + * giving up and continuing on.
62726 + */
62727 +- if (atomic_read(&kgdb_cpu_doing_single_step) != -1 &&
62728 ++ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1 &&
62729 + (kgdb_info[cpu].task &&
62730 + kgdb_info[cpu].task->pid != kgdb_sstep_pid) && --sstep_tries) {
62731 + atomic_set(&kgdb_active, -1);
62732 +@@ -636,8 +636,8 @@ cpu_master_loop:
62733 + }
62734 +
62735 + kgdb_restore:
62736 +- if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
62737 +- int sstep_cpu = atomic_read(&kgdb_cpu_doing_single_step);
62738 ++ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
62739 ++ int sstep_cpu = atomic_read_unchecked(&kgdb_cpu_doing_single_step);
62740 + if (kgdb_info[sstep_cpu].task)
62741 + kgdb_sstep_pid = kgdb_info[sstep_cpu].task->pid;
62742 + else
62743 +@@ -834,18 +834,18 @@ static void kgdb_unregister_callbacks(vo
62744 + static void kgdb_tasklet_bpt(unsigned long ing)
62745 + {
62746 + kgdb_breakpoint();
62747 +- atomic_set(&kgdb_break_tasklet_var, 0);
62748 ++ atomic_set_unchecked(&kgdb_break_tasklet_var, 0);
62749 + }
62750 +
62751 + static DECLARE_TASKLET(kgdb_tasklet_breakpoint, kgdb_tasklet_bpt, 0);
62752 +
62753 + void kgdb_schedule_breakpoint(void)
62754 + {
62755 +- if (atomic_read(&kgdb_break_tasklet_var) ||
62756 ++ if (atomic_read_unchecked(&kgdb_break_tasklet_var) ||
62757 + atomic_read(&kgdb_active) != -1 ||
62758 + atomic_read(&kgdb_setting_breakpoint))
62759 + return;
62760 +- atomic_inc(&kgdb_break_tasklet_var);
62761 ++ atomic_inc_unchecked(&kgdb_break_tasklet_var);
62762 + tasklet_schedule(&kgdb_tasklet_breakpoint);
62763 + }
62764 + EXPORT_SYMBOL_GPL(kgdb_schedule_breakpoint);
62765 +diff -urNp linux-3.1.1/kernel/debug/kdb/kdb_main.c linux-3.1.1/kernel/debug/kdb/kdb_main.c
62766 +--- linux-3.1.1/kernel/debug/kdb/kdb_main.c 2011-11-11 15:19:27.000000000 -0500
62767 ++++ linux-3.1.1/kernel/debug/kdb/kdb_main.c 2011-11-16 18:39:08.000000000 -0500
62768 +@@ -1980,7 +1980,7 @@ static int kdb_lsmod(int argc, const cha
62769 + list_for_each_entry(mod, kdb_modules, list) {
62770 +
62771 + kdb_printf("%-20s%8u 0x%p ", mod->name,
62772 +- mod->core_size, (void *)mod);
62773 ++ mod->core_size_rx + mod->core_size_rw, (void *)mod);
62774 + #ifdef CONFIG_MODULE_UNLOAD
62775 + kdb_printf("%4d ", module_refcount(mod));
62776 + #endif
62777 +@@ -1990,7 +1990,7 @@ static int kdb_lsmod(int argc, const cha
62778 + kdb_printf(" (Loading)");
62779 + else
62780 + kdb_printf(" (Live)");
62781 +- kdb_printf(" 0x%p", mod->module_core);
62782 ++ kdb_printf(" 0x%p 0x%p", mod->module_core_rx, mod->module_core_rw);
62783 +
62784 + #ifdef CONFIG_MODULE_UNLOAD
62785 + {
62786 +diff -urNp linux-3.1.1/kernel/events/core.c linux-3.1.1/kernel/events/core.c
62787 +--- linux-3.1.1/kernel/events/core.c 2011-11-11 15:19:27.000000000 -0500
62788 ++++ linux-3.1.1/kernel/events/core.c 2011-11-16 18:39:08.000000000 -0500
62789 +@@ -172,7 +172,7 @@ int perf_proc_update_handler(struct ctl_
62790 + return 0;
62791 + }
62792 +
62793 +-static atomic64_t perf_event_id;
62794 ++static atomic64_unchecked_t perf_event_id;
62795 +
62796 + static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
62797 + enum event_type_t event_type);
62798 +@@ -2535,7 +2535,7 @@ static void __perf_event_read(void *info
62799 +
62800 + static inline u64 perf_event_count(struct perf_event *event)
62801 + {
62802 +- return local64_read(&event->count) + atomic64_read(&event->child_count);
62803 ++ return local64_read(&event->count) + atomic64_read_unchecked(&event->child_count);
62804 + }
62805 +
62806 + static u64 perf_event_read(struct perf_event *event)
62807 +@@ -3060,9 +3060,9 @@ u64 perf_event_read_value(struct perf_ev
62808 + mutex_lock(&event->child_mutex);
62809 + total += perf_event_read(event);
62810 + *enabled += event->total_time_enabled +
62811 +- atomic64_read(&event->child_total_time_enabled);
62812 ++ atomic64_read_unchecked(&event->child_total_time_enabled);
62813 + *running += event->total_time_running +
62814 +- atomic64_read(&event->child_total_time_running);
62815 ++ atomic64_read_unchecked(&event->child_total_time_running);
62816 +
62817 + list_for_each_entry(child, &event->child_list, child_list) {
62818 + total += perf_event_read(child);
62819 +@@ -3448,10 +3448,10 @@ void perf_event_update_userpage(struct p
62820 + userpg->offset -= local64_read(&event->hw.prev_count);
62821 +
62822 + userpg->time_enabled = enabled +
62823 +- atomic64_read(&event->child_total_time_enabled);
62824 ++ atomic64_read_unchecked(&event->child_total_time_enabled);
62825 +
62826 + userpg->time_running = running +
62827 +- atomic64_read(&event->child_total_time_running);
62828 ++ atomic64_read_unchecked(&event->child_total_time_running);
62829 +
62830 + barrier();
62831 + ++userpg->lock;
62832 +@@ -3822,11 +3822,11 @@ static void perf_output_read_one(struct
62833 + values[n++] = perf_event_count(event);
62834 + if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
62835 + values[n++] = enabled +
62836 +- atomic64_read(&event->child_total_time_enabled);
62837 ++ atomic64_read_unchecked(&event->child_total_time_enabled);
62838 + }
62839 + if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
62840 + values[n++] = running +
62841 +- atomic64_read(&event->child_total_time_running);
62842 ++ atomic64_read_unchecked(&event->child_total_time_running);
62843 + }
62844 + if (read_format & PERF_FORMAT_ID)
62845 + values[n++] = primary_event_id(event);
62846 +@@ -4477,12 +4477,12 @@ static void perf_event_mmap_event(struct
62847 + * need to add enough zero bytes after the string to handle
62848 + * the 64bit alignment we do later.
62849 + */
62850 +- buf = kzalloc(PATH_MAX + sizeof(u64), GFP_KERNEL);
62851 ++ buf = kzalloc(PATH_MAX, GFP_KERNEL);
62852 + if (!buf) {
62853 + name = strncpy(tmp, "//enomem", sizeof(tmp));
62854 + goto got_name;
62855 + }
62856 +- name = d_path(&file->f_path, buf, PATH_MAX);
62857 ++ name = d_path(&file->f_path, buf, PATH_MAX - sizeof(u64));
62858 + if (IS_ERR(name)) {
62859 + name = strncpy(tmp, "//toolong", sizeof(tmp));
62860 + goto got_name;
62861 +@@ -5833,7 +5833,7 @@ perf_event_alloc(struct perf_event_attr
62862 + event->parent = parent_event;
62863 +
62864 + event->ns = get_pid_ns(current->nsproxy->pid_ns);
62865 +- event->id = atomic64_inc_return(&perf_event_id);
62866 ++ event->id = atomic64_inc_return_unchecked(&perf_event_id);
62867 +
62868 + event->state = PERF_EVENT_STATE_INACTIVE;
62869 +
62870 +@@ -6355,10 +6355,10 @@ static void sync_child_event(struct perf
62871 + /*
62872 + * Add back the child's count to the parent's count:
62873 + */
62874 +- atomic64_add(child_val, &parent_event->child_count);
62875 +- atomic64_add(child_event->total_time_enabled,
62876 ++ atomic64_add_unchecked(child_val, &parent_event->child_count);
62877 ++ atomic64_add_unchecked(child_event->total_time_enabled,
62878 + &parent_event->child_total_time_enabled);
62879 +- atomic64_add(child_event->total_time_running,
62880 ++ atomic64_add_unchecked(child_event->total_time_running,
62881 + &parent_event->child_total_time_running);
62882 +
62883 + /*
62884 +diff -urNp linux-3.1.1/kernel/exit.c linux-3.1.1/kernel/exit.c
62885 +--- linux-3.1.1/kernel/exit.c 2011-11-11 15:19:27.000000000 -0500
62886 ++++ linux-3.1.1/kernel/exit.c 2011-11-16 19:33:48.000000000 -0500
62887 +@@ -57,6 +57,10 @@
62888 + #include <asm/pgtable.h>
62889 + #include <asm/mmu_context.h>
62890 +
62891 ++#ifdef CONFIG_GRKERNSEC
62892 ++extern rwlock_t grsec_exec_file_lock;
62893 ++#endif
62894 ++
62895 + static void exit_mm(struct task_struct * tsk);
62896 +
62897 + static void __unhash_process(struct task_struct *p, bool group_dead)
62898 +@@ -168,6 +172,10 @@ void release_task(struct task_struct * p
62899 + struct task_struct *leader;
62900 + int zap_leader;
62901 + repeat:
62902 ++#ifdef CONFIG_NET
62903 ++ gr_del_task_from_ip_table(p);
62904 ++#endif
62905 ++
62906 + /* don't need to get the RCU readlock here - the process is dead and
62907 + * can't be modifying its own credentials. But shut RCU-lockdep up */
62908 + rcu_read_lock();
62909 +@@ -324,11 +332,22 @@ static void reparent_to_kthreadd(void)
62910 + {
62911 + write_lock_irq(&tasklist_lock);
62912 +
62913 ++#ifdef CONFIG_GRKERNSEC
62914 ++ write_lock(&grsec_exec_file_lock);
62915 ++ if (current->exec_file) {
62916 ++ fput(current->exec_file);
62917 ++ current->exec_file = NULL;
62918 ++ }
62919 ++ write_unlock(&grsec_exec_file_lock);
62920 ++#endif
62921 ++
62922 + ptrace_unlink(current);
62923 + /* Reparent to init */
62924 + current->real_parent = current->parent = kthreadd_task;
62925 + list_move_tail(&current->sibling, &current->real_parent->children);
62926 +
62927 ++ gr_set_kernel_label(current);
62928 ++
62929 + /* Set the exit signal to SIGCHLD so we signal init on exit */
62930 + current->exit_signal = SIGCHLD;
62931 +
62932 +@@ -380,7 +399,7 @@ int allow_signal(int sig)
62933 + * know it'll be handled, so that they don't get converted to
62934 + * SIGKILL or just silently dropped.
62935 + */
62936 +- current->sighand->action[(sig)-1].sa.sa_handler = (void __user *)2;
62937 ++ current->sighand->action[(sig)-1].sa.sa_handler = (__force void __user *)2;
62938 + recalc_sigpending();
62939 + spin_unlock_irq(&current->sighand->siglock);
62940 + return 0;
62941 +@@ -416,6 +435,17 @@ void daemonize(const char *name, ...)
62942 + vsnprintf(current->comm, sizeof(current->comm), name, args);
62943 + va_end(args);
62944 +
62945 ++#ifdef CONFIG_GRKERNSEC
62946 ++ write_lock(&grsec_exec_file_lock);
62947 ++ if (current->exec_file) {
62948 ++ fput(current->exec_file);
62949 ++ current->exec_file = NULL;
62950 ++ }
62951 ++ write_unlock(&grsec_exec_file_lock);
62952 ++#endif
62953 ++
62954 ++ gr_set_kernel_label(current);
62955 ++
62956 + /*
62957 + * If we were started as result of loading a module, close all of the
62958 + * user space pages. We don't need them, and if we didn't close them
62959 +@@ -895,6 +925,8 @@ NORET_TYPE void do_exit(long code)
62960 + struct task_struct *tsk = current;
62961 + int group_dead;
62962 +
62963 ++ set_fs(USER_DS);
62964 ++
62965 + profile_task_exit(tsk);
62966 +
62967 + WARN_ON(blk_needs_flush_plug(tsk));
62968 +@@ -911,7 +943,6 @@ NORET_TYPE void do_exit(long code)
62969 + * mm_release()->clear_child_tid() from writing to a user-controlled
62970 + * kernel address.
62971 + */
62972 +- set_fs(USER_DS);
62973 +
62974 + ptrace_event(PTRACE_EVENT_EXIT, code);
62975 +
62976 +@@ -973,6 +1004,9 @@ NORET_TYPE void do_exit(long code)
62977 + tsk->exit_code = code;
62978 + taskstats_exit(tsk, group_dead);
62979 +
62980 ++ gr_acl_handle_psacct(tsk, code);
62981 ++ gr_acl_handle_exit();
62982 ++
62983 + exit_mm(tsk);
62984 +
62985 + if (group_dead)
62986 +diff -urNp linux-3.1.1/kernel/fork.c linux-3.1.1/kernel/fork.c
62987 +--- linux-3.1.1/kernel/fork.c 2011-11-11 15:19:27.000000000 -0500
62988 ++++ linux-3.1.1/kernel/fork.c 2011-11-16 19:36:31.000000000 -0500
62989 +@@ -285,7 +285,7 @@ static struct task_struct *dup_task_stru
62990 + *stackend = STACK_END_MAGIC; /* for overflow detection */
62991 +
62992 + #ifdef CONFIG_CC_STACKPROTECTOR
62993 +- tsk->stack_canary = get_random_int();
62994 ++ tsk->stack_canary = pax_get_random_long();
62995 + #endif
62996 +
62997 + /*
62998 +@@ -309,13 +309,77 @@ out:
62999 + }
63000 +
63001 + #ifdef CONFIG_MMU
63002 ++static struct vm_area_struct *dup_vma(struct mm_struct *mm, struct vm_area_struct *mpnt)
63003 ++{
63004 ++ struct vm_area_struct *tmp;
63005 ++ unsigned long charge;
63006 ++ struct mempolicy *pol;
63007 ++ struct file *file;
63008 ++
63009 ++ charge = 0;
63010 ++ if (mpnt->vm_flags & VM_ACCOUNT) {
63011 ++ unsigned int len = (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT;
63012 ++ if (security_vm_enough_memory(len))
63013 ++ goto fail_nomem;
63014 ++ charge = len;
63015 ++ }
63016 ++ tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
63017 ++ if (!tmp)
63018 ++ goto fail_nomem;
63019 ++ *tmp = *mpnt;
63020 ++ tmp->vm_mm = mm;
63021 ++ INIT_LIST_HEAD(&tmp->anon_vma_chain);
63022 ++ pol = mpol_dup(vma_policy(mpnt));
63023 ++ if (IS_ERR(pol))
63024 ++ goto fail_nomem_policy;
63025 ++ vma_set_policy(tmp, pol);
63026 ++ if (anon_vma_fork(tmp, mpnt))
63027 ++ goto fail_nomem_anon_vma_fork;
63028 ++ tmp->vm_flags &= ~VM_LOCKED;
63029 ++ tmp->vm_next = tmp->vm_prev = NULL;
63030 ++ tmp->vm_mirror = NULL;
63031 ++ file = tmp->vm_file;
63032 ++ if (file) {
63033 ++ struct inode *inode = file->f_path.dentry->d_inode;
63034 ++ struct address_space *mapping = file->f_mapping;
63035 ++
63036 ++ get_file(file);
63037 ++ if (tmp->vm_flags & VM_DENYWRITE)
63038 ++ atomic_dec(&inode->i_writecount);
63039 ++ mutex_lock(&mapping->i_mmap_mutex);
63040 ++ if (tmp->vm_flags & VM_SHARED)
63041 ++ mapping->i_mmap_writable++;
63042 ++ flush_dcache_mmap_lock(mapping);
63043 ++ /* insert tmp into the share list, just after mpnt */
63044 ++ vma_prio_tree_add(tmp, mpnt);
63045 ++ flush_dcache_mmap_unlock(mapping);
63046 ++ mutex_unlock(&mapping->i_mmap_mutex);
63047 ++ }
63048 ++
63049 ++ /*
63050 ++ * Clear hugetlb-related page reserves for children. This only
63051 ++ * affects MAP_PRIVATE mappings. Faults generated by the child
63052 ++ * are not guaranteed to succeed, even if read-only
63053 ++ */
63054 ++ if (is_vm_hugetlb_page(tmp))
63055 ++ reset_vma_resv_huge_pages(tmp);
63056 ++
63057 ++ return tmp;
63058 ++
63059 ++fail_nomem_anon_vma_fork:
63060 ++ mpol_put(pol);
63061 ++fail_nomem_policy:
63062 ++ kmem_cache_free(vm_area_cachep, tmp);
63063 ++fail_nomem:
63064 ++ vm_unacct_memory(charge);
63065 ++ return NULL;
63066 ++}
63067 ++
63068 + static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
63069 + {
63070 + struct vm_area_struct *mpnt, *tmp, *prev, **pprev;
63071 + struct rb_node **rb_link, *rb_parent;
63072 + int retval;
63073 +- unsigned long charge;
63074 +- struct mempolicy *pol;
63075 +
63076 + down_write(&oldmm->mmap_sem);
63077 + flush_cache_dup_mm(oldmm);
63078 +@@ -327,8 +391,8 @@ static int dup_mmap(struct mm_struct *mm
63079 + mm->locked_vm = 0;
63080 + mm->mmap = NULL;
63081 + mm->mmap_cache = NULL;
63082 +- mm->free_area_cache = oldmm->mmap_base;
63083 +- mm->cached_hole_size = ~0UL;
63084 ++ mm->free_area_cache = oldmm->free_area_cache;
63085 ++ mm->cached_hole_size = oldmm->cached_hole_size;
63086 + mm->map_count = 0;
63087 + cpumask_clear(mm_cpumask(mm));
63088 + mm->mm_rb = RB_ROOT;
63089 +@@ -344,8 +408,6 @@ static int dup_mmap(struct mm_struct *mm
63090 +
63091 + prev = NULL;
63092 + for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) {
63093 +- struct file *file;
63094 +-
63095 + if (mpnt->vm_flags & VM_DONTCOPY) {
63096 + long pages = vma_pages(mpnt);
63097 + mm->total_vm -= pages;
63098 +@@ -353,55 +415,13 @@ static int dup_mmap(struct mm_struct *mm
63099 + -pages);
63100 + continue;
63101 + }
63102 +- charge = 0;
63103 +- if (mpnt->vm_flags & VM_ACCOUNT) {
63104 +- unsigned int len = (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT;
63105 +- if (security_vm_enough_memory(len))
63106 +- goto fail_nomem;
63107 +- charge = len;
63108 +- }
63109 +- tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
63110 +- if (!tmp)
63111 +- goto fail_nomem;
63112 +- *tmp = *mpnt;
63113 +- INIT_LIST_HEAD(&tmp->anon_vma_chain);
63114 +- pol = mpol_dup(vma_policy(mpnt));
63115 +- retval = PTR_ERR(pol);
63116 +- if (IS_ERR(pol))
63117 +- goto fail_nomem_policy;
63118 +- vma_set_policy(tmp, pol);
63119 +- tmp->vm_mm = mm;
63120 +- if (anon_vma_fork(tmp, mpnt))
63121 +- goto fail_nomem_anon_vma_fork;
63122 +- tmp->vm_flags &= ~VM_LOCKED;
63123 +- tmp->vm_next = tmp->vm_prev = NULL;
63124 +- file = tmp->vm_file;
63125 +- if (file) {
63126 +- struct inode *inode = file->f_path.dentry->d_inode;
63127 +- struct address_space *mapping = file->f_mapping;
63128 +-
63129 +- get_file(file);
63130 +- if (tmp->vm_flags & VM_DENYWRITE)
63131 +- atomic_dec(&inode->i_writecount);
63132 +- mutex_lock(&mapping->i_mmap_mutex);
63133 +- if (tmp->vm_flags & VM_SHARED)
63134 +- mapping->i_mmap_writable++;
63135 +- flush_dcache_mmap_lock(mapping);
63136 +- /* insert tmp into the share list, just after mpnt */
63137 +- vma_prio_tree_add(tmp, mpnt);
63138 +- flush_dcache_mmap_unlock(mapping);
63139 +- mutex_unlock(&mapping->i_mmap_mutex);
63140 ++ tmp = dup_vma(mm, mpnt);
63141 ++ if (!tmp) {
63142 ++ retval = -ENOMEM;
63143 ++ goto out;
63144 + }
63145 +
63146 + /*
63147 +- * Clear hugetlb-related page reserves for children. This only
63148 +- * affects MAP_PRIVATE mappings. Faults generated by the child
63149 +- * are not guaranteed to succeed, even if read-only
63150 +- */
63151 +- if (is_vm_hugetlb_page(tmp))
63152 +- reset_vma_resv_huge_pages(tmp);
63153 +-
63154 +- /*
63155 + * Link in the new vma and copy the page table entries.
63156 + */
63157 + *pprev = tmp;
63158 +@@ -422,6 +442,31 @@ static int dup_mmap(struct mm_struct *mm
63159 + if (retval)
63160 + goto out;
63161 + }
63162 ++
63163 ++#ifdef CONFIG_PAX_SEGMEXEC
63164 ++ if (oldmm->pax_flags & MF_PAX_SEGMEXEC) {
63165 ++ struct vm_area_struct *mpnt_m;
63166 ++
63167 ++ for (mpnt = oldmm->mmap, mpnt_m = mm->mmap; mpnt; mpnt = mpnt->vm_next, mpnt_m = mpnt_m->vm_next) {
63168 ++ BUG_ON(!mpnt_m || mpnt_m->vm_mirror || mpnt->vm_mm != oldmm || mpnt_m->vm_mm != mm);
63169 ++
63170 ++ if (!mpnt->vm_mirror)
63171 ++ continue;
63172 ++
63173 ++ if (mpnt->vm_end <= SEGMEXEC_TASK_SIZE) {
63174 ++ BUG_ON(mpnt->vm_mirror->vm_mirror != mpnt);
63175 ++ mpnt->vm_mirror = mpnt_m;
63176 ++ } else {
63177 ++ BUG_ON(mpnt->vm_mirror->vm_mirror == mpnt || mpnt->vm_mirror->vm_mirror->vm_mm != mm);
63178 ++ mpnt_m->vm_mirror = mpnt->vm_mirror->vm_mirror;
63179 ++ mpnt_m->vm_mirror->vm_mirror = mpnt_m;
63180 ++ mpnt->vm_mirror->vm_mirror = mpnt;
63181 ++ }
63182 ++ }
63183 ++ BUG_ON(mpnt_m);
63184 ++ }
63185 ++#endif
63186 ++
63187 + /* a new mm has just been created */
63188 + arch_dup_mmap(oldmm, mm);
63189 + retval = 0;
63190 +@@ -430,14 +475,6 @@ out:
63191 + flush_tlb_mm(oldmm);
63192 + up_write(&oldmm->mmap_sem);
63193 + return retval;
63194 +-fail_nomem_anon_vma_fork:
63195 +- mpol_put(pol);
63196 +-fail_nomem_policy:
63197 +- kmem_cache_free(vm_area_cachep, tmp);
63198 +-fail_nomem:
63199 +- retval = -ENOMEM;
63200 +- vm_unacct_memory(charge);
63201 +- goto out;
63202 + }
63203 +
63204 + static inline int mm_alloc_pgd(struct mm_struct *mm)
63205 +@@ -837,13 +874,14 @@ static int copy_fs(unsigned long clone_f
63206 + spin_unlock(&fs->lock);
63207 + return -EAGAIN;
63208 + }
63209 +- fs->users++;
63210 ++ atomic_inc(&fs->users);
63211 + spin_unlock(&fs->lock);
63212 + return 0;
63213 + }
63214 + tsk->fs = copy_fs_struct(fs);
63215 + if (!tsk->fs)
63216 + return -ENOMEM;
63217 ++ gr_set_chroot_entries(tsk, &tsk->fs->root);
63218 + return 0;
63219 + }
63220 +
63221 +@@ -1105,6 +1143,9 @@ static struct task_struct *copy_process(
63222 + DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
63223 + #endif
63224 + retval = -EAGAIN;
63225 ++
63226 ++ gr_learn_resource(p, RLIMIT_NPROC, atomic_read(&p->real_cred->user->processes), 0);
63227 ++
63228 + if (atomic_read(&p->real_cred->user->processes) >=
63229 + task_rlimit(p, RLIMIT_NPROC)) {
63230 + if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE) &&
63231 +@@ -1264,6 +1305,8 @@ static struct task_struct *copy_process(
63232 + if (clone_flags & CLONE_THREAD)
63233 + p->tgid = current->tgid;
63234 +
63235 ++ gr_copy_label(p);
63236 ++
63237 + p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL;
63238 + /*
63239 + * Clear TID on mm_release()?
63240 +@@ -1428,6 +1471,8 @@ bad_fork_cleanup_count:
63241 + bad_fork_free:
63242 + free_task(p);
63243 + fork_out:
63244 ++ gr_log_forkfail(retval);
63245 ++
63246 + return ERR_PTR(retval);
63247 + }
63248 +
63249 +@@ -1528,6 +1573,8 @@ long do_fork(unsigned long clone_flags,
63250 + if (clone_flags & CLONE_PARENT_SETTID)
63251 + put_user(nr, parent_tidptr);
63252 +
63253 ++ gr_handle_brute_check();
63254 ++
63255 + if (clone_flags & CLONE_VFORK) {
63256 + p->vfork_done = &vfork;
63257 + init_completion(&vfork);
63258 +@@ -1637,7 +1684,7 @@ static int unshare_fs(unsigned long unsh
63259 + return 0;
63260 +
63261 + /* don't need lock here; in the worst case we'll do useless copy */
63262 +- if (fs->users == 1)
63263 ++ if (atomic_read(&fs->users) == 1)
63264 + return 0;
63265 +
63266 + *new_fsp = copy_fs_struct(fs);
63267 +@@ -1726,7 +1773,8 @@ SYSCALL_DEFINE1(unshare, unsigned long,
63268 + fs = current->fs;
63269 + spin_lock(&fs->lock);
63270 + current->fs = new_fs;
63271 +- if (--fs->users)
63272 ++ gr_set_chroot_entries(current, &current->fs->root);
63273 ++ if (atomic_dec_return(&fs->users))
63274 + new_fs = NULL;
63275 + else
63276 + new_fs = fs;
63277 +diff -urNp linux-3.1.1/kernel/futex.c linux-3.1.1/kernel/futex.c
63278 +--- linux-3.1.1/kernel/futex.c 2011-11-11 15:19:27.000000000 -0500
63279 ++++ linux-3.1.1/kernel/futex.c 2011-11-16 18:40:44.000000000 -0500
63280 +@@ -54,6 +54,7 @@
63281 + #include <linux/mount.h>
63282 + #include <linux/pagemap.h>
63283 + #include <linux/syscalls.h>
63284 ++#include <linux/ptrace.h>
63285 + #include <linux/signal.h>
63286 + #include <linux/module.h>
63287 + #include <linux/magic.h>
63288 +@@ -238,6 +239,11 @@ get_futex_key(u32 __user *uaddr, int fsh
63289 + struct page *page, *page_head;
63290 + int err, ro = 0;
63291 +
63292 ++#ifdef CONFIG_PAX_SEGMEXEC
63293 ++ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && address >= SEGMEXEC_TASK_SIZE)
63294 ++ return -EFAULT;
63295 ++#endif
63296 ++
63297 + /*
63298 + * The futex address must be "naturally" aligned.
63299 + */
63300 +@@ -1863,6 +1869,8 @@ static int futex_wait(u32 __user *uaddr,
63301 + struct futex_q q = futex_q_init;
63302 + int ret;
63303 +
63304 ++ pax_track_stack();
63305 ++
63306 + if (!bitset)
63307 + return -EINVAL;
63308 + q.bitset = bitset;
63309 +@@ -2259,6 +2267,8 @@ static int futex_wait_requeue_pi(u32 __u
63310 + struct futex_q q = futex_q_init;
63311 + int res, ret;
63312 +
63313 ++ pax_track_stack();
63314 ++
63315 + if (!bitset)
63316 + return -EINVAL;
63317 +
63318 +@@ -2431,7 +2441,9 @@ SYSCALL_DEFINE3(get_robust_list, int, pi
63319 + {
63320 + struct robust_list_head __user *head;
63321 + unsigned long ret;
63322 ++#ifndef CONFIG_GRKERNSEC_PROC_MEMMAP
63323 + const struct cred *cred = current_cred(), *pcred;
63324 ++#endif
63325 +
63326 + if (!futex_cmpxchg_enabled)
63327 + return -ENOSYS;
63328 +@@ -2447,6 +2459,10 @@ SYSCALL_DEFINE3(get_robust_list, int, pi
63329 + if (!p)
63330 + goto err_unlock;
63331 + ret = -EPERM;
63332 ++#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
63333 ++ if (!ptrace_may_access(p, PTRACE_MODE_READ))
63334 ++ goto err_unlock;
63335 ++#else
63336 + pcred = __task_cred(p);
63337 + /* If victim is in different user_ns, then uids are not
63338 + comparable, so we must have CAP_SYS_PTRACE */
63339 +@@ -2461,6 +2477,7 @@ SYSCALL_DEFINE3(get_robust_list, int, pi
63340 + !ns_capable(pcred->user->user_ns, CAP_SYS_PTRACE))
63341 + goto err_unlock;
63342 + ok:
63343 ++#endif
63344 + head = p->robust_list;
63345 + rcu_read_unlock();
63346 + }
63347 +@@ -2712,6 +2729,7 @@ static int __init futex_init(void)
63348 + {
63349 + u32 curval;
63350 + int i;
63351 ++ mm_segment_t oldfs;
63352 +
63353 + /*
63354 + * This will fail and we want it. Some arch implementations do
63355 +@@ -2723,8 +2741,11 @@ static int __init futex_init(void)
63356 + * implementation, the non-functional ones will return
63357 + * -ENOSYS.
63358 + */
63359 ++ oldfs = get_fs();
63360 ++ set_fs(USER_DS);
63361 + if (cmpxchg_futex_value_locked(&curval, NULL, 0, 0) == -EFAULT)
63362 + futex_cmpxchg_enabled = 1;
63363 ++ set_fs(oldfs);
63364 +
63365 + for (i = 0; i < ARRAY_SIZE(futex_queues); i++) {
63366 + plist_head_init(&futex_queues[i].chain);
63367 +diff -urNp linux-3.1.1/kernel/futex_compat.c linux-3.1.1/kernel/futex_compat.c
63368 +--- linux-3.1.1/kernel/futex_compat.c 2011-11-11 15:19:27.000000000 -0500
63369 ++++ linux-3.1.1/kernel/futex_compat.c 2011-11-16 18:40:44.000000000 -0500
63370 +@@ -10,6 +10,7 @@
63371 + #include <linux/compat.h>
63372 + #include <linux/nsproxy.h>
63373 + #include <linux/futex.h>
63374 ++#include <linux/ptrace.h>
63375 +
63376 + #include <asm/uaccess.h>
63377 +
63378 +@@ -136,7 +137,10 @@ compat_sys_get_robust_list(int pid, comp
63379 + {
63380 + struct compat_robust_list_head __user *head;
63381 + unsigned long ret;
63382 +- const struct cred *cred = current_cred(), *pcred;
63383 ++#ifndef CONFIG_GRKERNSEC_PROC_MEMMAP
63384 ++ const struct cred *cred = current_cred();
63385 ++ const struct cred *pcred;
63386 ++#endif
63387 +
63388 + if (!futex_cmpxchg_enabled)
63389 + return -ENOSYS;
63390 +@@ -152,6 +156,10 @@ compat_sys_get_robust_list(int pid, comp
63391 + if (!p)
63392 + goto err_unlock;
63393 + ret = -EPERM;
63394 ++#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
63395 ++ if (!ptrace_may_access(p, PTRACE_MODE_READ))
63396 ++ goto err_unlock;
63397 ++#else
63398 + pcred = __task_cred(p);
63399 + /* If victim is in different user_ns, then uids are not
63400 + comparable, so we must have CAP_SYS_PTRACE */
63401 +@@ -166,6 +174,7 @@ compat_sys_get_robust_list(int pid, comp
63402 + !ns_capable(pcred->user->user_ns, CAP_SYS_PTRACE))
63403 + goto err_unlock;
63404 + ok:
63405 ++#endif
63406 + head = p->compat_robust_list;
63407 + rcu_read_unlock();
63408 + }
63409 +diff -urNp linux-3.1.1/kernel/gcov/base.c linux-3.1.1/kernel/gcov/base.c
63410 +--- linux-3.1.1/kernel/gcov/base.c 2011-11-11 15:19:27.000000000 -0500
63411 ++++ linux-3.1.1/kernel/gcov/base.c 2011-11-16 18:39:08.000000000 -0500
63412 +@@ -102,11 +102,6 @@ void gcov_enable_events(void)
63413 + }
63414 +
63415 + #ifdef CONFIG_MODULES
63416 +-static inline int within(void *addr, void *start, unsigned long size)
63417 +-{
63418 +- return ((addr >= start) && (addr < start + size));
63419 +-}
63420 +-
63421 + /* Update list and generate events when modules are unloaded. */
63422 + static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
63423 + void *data)
63424 +@@ -121,7 +116,7 @@ static int gcov_module_notifier(struct n
63425 + prev = NULL;
63426 + /* Remove entries located in module from linked list. */
63427 + for (info = gcov_info_head; info; info = info->next) {
63428 +- if (within(info, mod->module_core, mod->core_size)) {
63429 ++ if (within_module_core_rw((unsigned long)info, mod)) {
63430 + if (prev)
63431 + prev->next = info->next;
63432 + else
63433 +diff -urNp linux-3.1.1/kernel/hrtimer.c linux-3.1.1/kernel/hrtimer.c
63434 +--- linux-3.1.1/kernel/hrtimer.c 2011-11-11 15:19:27.000000000 -0500
63435 ++++ linux-3.1.1/kernel/hrtimer.c 2011-11-16 18:39:08.000000000 -0500
63436 +@@ -1391,7 +1391,7 @@ void hrtimer_peek_ahead_timers(void)
63437 + local_irq_restore(flags);
63438 + }
63439 +
63440 +-static void run_hrtimer_softirq(struct softirq_action *h)
63441 ++static void run_hrtimer_softirq(void)
63442 + {
63443 + hrtimer_peek_ahead_timers();
63444 + }
63445 +diff -urNp linux-3.1.1/kernel/jump_label.c linux-3.1.1/kernel/jump_label.c
63446 +--- linux-3.1.1/kernel/jump_label.c 2011-11-11 15:19:27.000000000 -0500
63447 ++++ linux-3.1.1/kernel/jump_label.c 2011-11-16 18:39:08.000000000 -0500
63448 +@@ -55,7 +55,9 @@ jump_label_sort_entries(struct jump_entr
63449 +
63450 + size = (((unsigned long)stop - (unsigned long)start)
63451 + / sizeof(struct jump_entry));
63452 ++ pax_open_kernel();
63453 + sort(start, size, sizeof(struct jump_entry), jump_label_cmp, NULL);
63454 ++ pax_close_kernel();
63455 + }
63456 +
63457 + static void jump_label_update(struct jump_label_key *key, int enable);
63458 +@@ -297,10 +299,12 @@ static void jump_label_invalidate_module
63459 + struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
63460 + struct jump_entry *iter;
63461 +
63462 ++ pax_open_kernel();
63463 + for (iter = iter_start; iter < iter_stop; iter++) {
63464 + if (within_module_init(iter->code, mod))
63465 + iter->code = 0;
63466 + }
63467 ++ pax_close_kernel();
63468 + }
63469 +
63470 + static int
63471 +diff -urNp linux-3.1.1/kernel/kallsyms.c linux-3.1.1/kernel/kallsyms.c
63472 +--- linux-3.1.1/kernel/kallsyms.c 2011-11-11 15:19:27.000000000 -0500
63473 ++++ linux-3.1.1/kernel/kallsyms.c 2011-11-16 18:40:44.000000000 -0500
63474 +@@ -11,6 +11,9 @@
63475 + * Changed the compression method from stem compression to "table lookup"
63476 + * compression (see scripts/kallsyms.c for a more complete description)
63477 + */
63478 ++#ifdef CONFIG_GRKERNSEC_HIDESYM
63479 ++#define __INCLUDED_BY_HIDESYM 1
63480 ++#endif
63481 + #include <linux/kallsyms.h>
63482 + #include <linux/module.h>
63483 + #include <linux/init.h>
63484 +@@ -53,12 +56,33 @@ extern const unsigned long kallsyms_mark
63485 +
63486 + static inline int is_kernel_inittext(unsigned long addr)
63487 + {
63488 ++ if (system_state != SYSTEM_BOOTING)
63489 ++ return 0;
63490 ++
63491 + if (addr >= (unsigned long)_sinittext
63492 + && addr <= (unsigned long)_einittext)
63493 + return 1;
63494 + return 0;
63495 + }
63496 +
63497 ++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
63498 ++#ifdef CONFIG_MODULES
63499 ++static inline int is_module_text(unsigned long addr)
63500 ++{
63501 ++ if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END)
63502 ++ return 1;
63503 ++
63504 ++ addr = ktla_ktva(addr);
63505 ++ return (unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END;
63506 ++}
63507 ++#else
63508 ++static inline int is_module_text(unsigned long addr)
63509 ++{
63510 ++ return 0;
63511 ++}
63512 ++#endif
63513 ++#endif
63514 ++
63515 + static inline int is_kernel_text(unsigned long addr)
63516 + {
63517 + if ((addr >= (unsigned long)_stext && addr <= (unsigned long)_etext) ||
63518 +@@ -69,13 +93,28 @@ static inline int is_kernel_text(unsigne
63519 +
63520 + static inline int is_kernel(unsigned long addr)
63521 + {
63522 ++
63523 ++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
63524 ++ if (is_kernel_text(addr) || is_kernel_inittext(addr))
63525 ++ return 1;
63526 ++
63527 ++ if (ktla_ktva((unsigned long)_text) <= addr && addr < (unsigned long)_end)
63528 ++#else
63529 + if (addr >= (unsigned long)_stext && addr <= (unsigned long)_end)
63530 ++#endif
63531 ++
63532 + return 1;
63533 + return in_gate_area_no_mm(addr);
63534 + }
63535 +
63536 + static int is_ksym_addr(unsigned long addr)
63537 + {
63538 ++
63539 ++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
63540 ++ if (is_module_text(addr))
63541 ++ return 0;
63542 ++#endif
63543 ++
63544 + if (all_var)
63545 + return is_kernel(addr);
63546 +
63547 +@@ -454,7 +493,6 @@ static unsigned long get_ksymbol_core(st
63548 +
63549 + static void reset_iter(struct kallsym_iter *iter, loff_t new_pos)
63550 + {
63551 +- iter->name[0] = '\0';
63552 + iter->nameoff = get_symbol_offset(new_pos);
63553 + iter->pos = new_pos;
63554 + }
63555 +@@ -502,6 +540,11 @@ static int s_show(struct seq_file *m, vo
63556 + {
63557 + struct kallsym_iter *iter = m->private;
63558 +
63559 ++#ifdef CONFIG_GRKERNSEC_HIDESYM
63560 ++ if (current_uid())
63561 ++ return 0;
63562 ++#endif
63563 ++
63564 + /* Some debugging symbols have no name. Ignore them. */
63565 + if (!iter->name[0])
63566 + return 0;
63567 +@@ -540,7 +583,7 @@ static int kallsyms_open(struct inode *i
63568 + struct kallsym_iter *iter;
63569 + int ret;
63570 +
63571 +- iter = kmalloc(sizeof(*iter), GFP_KERNEL);
63572 ++ iter = kzalloc(sizeof(*iter), GFP_KERNEL);
63573 + if (!iter)
63574 + return -ENOMEM;
63575 + reset_iter(iter, 0);
63576 +diff -urNp linux-3.1.1/kernel/kexec.c linux-3.1.1/kernel/kexec.c
63577 +--- linux-3.1.1/kernel/kexec.c 2011-11-11 15:19:27.000000000 -0500
63578 ++++ linux-3.1.1/kernel/kexec.c 2011-11-16 18:39:08.000000000 -0500
63579 +@@ -1033,7 +1033,8 @@ asmlinkage long compat_sys_kexec_load(un
63580 + unsigned long flags)
63581 + {
63582 + struct compat_kexec_segment in;
63583 +- struct kexec_segment out, __user *ksegments;
63584 ++ struct kexec_segment out;
63585 ++ struct kexec_segment __user *ksegments;
63586 + unsigned long i, result;
63587 +
63588 + /* Don't allow clients that don't understand the native
63589 +diff -urNp linux-3.1.1/kernel/kmod.c linux-3.1.1/kernel/kmod.c
63590 +--- linux-3.1.1/kernel/kmod.c 2011-11-11 15:19:27.000000000 -0500
63591 ++++ linux-3.1.1/kernel/kmod.c 2011-11-16 18:40:44.000000000 -0500
63592 +@@ -73,13 +73,12 @@ char modprobe_path[KMOD_PATH_LEN] = "/sb
63593 + * If module auto-loading support is disabled then this function
63594 + * becomes a no-operation.
63595 + */
63596 +-int __request_module(bool wait, const char *fmt, ...)
63597 ++static int ____request_module(bool wait, char *module_param, const char *fmt, va_list ap)
63598 + {
63599 +- va_list args;
63600 + char module_name[MODULE_NAME_LEN];
63601 + unsigned int max_modprobes;
63602 + int ret;
63603 +- char *argv[] = { modprobe_path, "-q", "--", module_name, NULL };
63604 ++ char *argv[] = { modprobe_path, "-q", "--", module_name, module_param, NULL };
63605 + static char *envp[] = { "HOME=/",
63606 + "TERM=linux",
63607 + "PATH=/sbin:/usr/sbin:/bin:/usr/bin",
63608 +@@ -88,9 +87,7 @@ int __request_module(bool wait, const ch
63609 + #define MAX_KMOD_CONCURRENT 50 /* Completely arbitrary value - KAO */
63610 + static int kmod_loop_msg;
63611 +
63612 +- va_start(args, fmt);
63613 +- ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, args);
63614 +- va_end(args);
63615 ++ ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, ap);
63616 + if (ret >= MODULE_NAME_LEN)
63617 + return -ENAMETOOLONG;
63618 +
63619 +@@ -98,6 +95,20 @@ int __request_module(bool wait, const ch
63620 + if (ret)
63621 + return ret;
63622 +
63623 ++#ifdef CONFIG_GRKERNSEC_MODHARDEN
63624 ++ if (!current_uid()) {
63625 ++ /* hack to workaround consolekit/udisks stupidity */
63626 ++ read_lock(&tasklist_lock);
63627 ++ if (!strcmp(current->comm, "mount") &&
63628 ++ current->real_parent && !strncmp(current->real_parent->comm, "udisk", 5)) {
63629 ++ read_unlock(&tasklist_lock);
63630 ++ printk(KERN_ALERT "grsec: denied attempt to auto-load fs module %.64s by udisks\n", module_name);
63631 ++ return -EPERM;
63632 ++ }
63633 ++ read_unlock(&tasklist_lock);
63634 ++ }
63635 ++#endif
63636 ++
63637 + /* If modprobe needs a service that is in a module, we get a recursive
63638 + * loop. Limit the number of running kmod threads to max_threads/2 or
63639 + * MAX_KMOD_CONCURRENT, whichever is the smaller. A cleaner method
63640 +@@ -133,6 +144,47 @@ int __request_module(bool wait, const ch
63641 + atomic_dec(&kmod_concurrent);
63642 + return ret;
63643 + }
63644 ++
63645 ++int ___request_module(bool wait, char *module_param, const char *fmt, ...)
63646 ++{
63647 ++ va_list args;
63648 ++ int ret;
63649 ++
63650 ++ va_start(args, fmt);
63651 ++ ret = ____request_module(wait, module_param, fmt, args);
63652 ++ va_end(args);
63653 ++
63654 ++ return ret;
63655 ++}
63656 ++
63657 ++int __request_module(bool wait, const char *fmt, ...)
63658 ++{
63659 ++ va_list args;
63660 ++ int ret;
63661 ++
63662 ++#ifdef CONFIG_GRKERNSEC_MODHARDEN
63663 ++ if (current_uid()) {
63664 ++ char module_param[MODULE_NAME_LEN];
63665 ++
63666 ++ memset(module_param, 0, sizeof(module_param));
63667 ++
63668 ++ snprintf(module_param, sizeof(module_param) - 1, "grsec_modharden_normal%u_", current_uid());
63669 ++
63670 ++ va_start(args, fmt);
63671 ++ ret = ____request_module(wait, module_param, fmt, args);
63672 ++ va_end(args);
63673 ++
63674 ++ return ret;
63675 ++ }
63676 ++#endif
63677 ++
63678 ++ va_start(args, fmt);
63679 ++ ret = ____request_module(wait, NULL, fmt, args);
63680 ++ va_end(args);
63681 ++
63682 ++ return ret;
63683 ++}
63684 ++
63685 + EXPORT_SYMBOL(__request_module);
63686 + #endif /* CONFIG_MODULES */
63687 +
63688 +@@ -222,7 +274,7 @@ static int wait_for_helper(void *data)
63689 + *
63690 + * Thus the __user pointer cast is valid here.
63691 + */
63692 +- sys_wait4(pid, (int __user *)&ret, 0, NULL);
63693 ++ sys_wait4(pid, (int __force_user *)&ret, 0, NULL);
63694 +
63695 + /*
63696 + * If ret is 0, either ____call_usermodehelper failed and the
63697 +diff -urNp linux-3.1.1/kernel/kprobes.c linux-3.1.1/kernel/kprobes.c
63698 +--- linux-3.1.1/kernel/kprobes.c 2011-11-11 15:19:27.000000000 -0500
63699 ++++ linux-3.1.1/kernel/kprobes.c 2011-11-16 18:39:08.000000000 -0500
63700 +@@ -185,7 +185,7 @@ static kprobe_opcode_t __kprobes *__get_
63701 + * kernel image and loaded module images reside. This is required
63702 + * so x86_64 can correctly handle the %rip-relative fixups.
63703 + */
63704 +- kip->insns = module_alloc(PAGE_SIZE);
63705 ++ kip->insns = module_alloc_exec(PAGE_SIZE);
63706 + if (!kip->insns) {
63707 + kfree(kip);
63708 + return NULL;
63709 +@@ -225,7 +225,7 @@ static int __kprobes collect_one_slot(st
63710 + */
63711 + if (!list_is_singular(&kip->list)) {
63712 + list_del(&kip->list);
63713 +- module_free(NULL, kip->insns);
63714 ++ module_free_exec(NULL, kip->insns);
63715 + kfree(kip);
63716 + }
63717 + return 1;
63718 +@@ -1949,7 +1949,7 @@ static int __init init_kprobes(void)
63719 + {
63720 + int i, err = 0;
63721 + unsigned long offset = 0, size = 0;
63722 +- char *modname, namebuf[128];
63723 ++ char *modname, namebuf[KSYM_NAME_LEN];
63724 + const char *symbol_name;
63725 + void *addr;
63726 + struct kprobe_blackpoint *kb;
63727 +@@ -2075,7 +2075,7 @@ static int __kprobes show_kprobe_addr(st
63728 + const char *sym = NULL;
63729 + unsigned int i = *(loff_t *) v;
63730 + unsigned long offset = 0;
63731 +- char *modname, namebuf[128];
63732 ++ char *modname, namebuf[KSYM_NAME_LEN];
63733 +
63734 + head = &kprobe_table[i];
63735 + preempt_disable();
63736 +diff -urNp linux-3.1.1/kernel/lockdep.c linux-3.1.1/kernel/lockdep.c
63737 +--- linux-3.1.1/kernel/lockdep.c 2011-11-11 15:19:27.000000000 -0500
63738 ++++ linux-3.1.1/kernel/lockdep.c 2011-11-16 18:39:08.000000000 -0500
63739 +@@ -583,6 +583,10 @@ static int static_obj(void *obj)
63740 + end = (unsigned long) &_end,
63741 + addr = (unsigned long) obj;
63742 +
63743 ++#ifdef CONFIG_PAX_KERNEXEC
63744 ++ start = ktla_ktva(start);
63745 ++#endif
63746 ++
63747 + /*
63748 + * static variable?
63749 + */
63750 +@@ -718,6 +722,7 @@ register_lock_class(struct lockdep_map *
63751 + if (!static_obj(lock->key)) {
63752 + debug_locks_off();
63753 + printk("INFO: trying to register non-static key.\n");
63754 ++ printk("lock:%pS key:%pS.\n", lock, lock->key);
63755 + printk("the code is fine but needs lockdep annotation.\n");
63756 + printk("turning off the locking correctness validator.\n");
63757 + dump_stack();
63758 +@@ -2948,7 +2953,7 @@ static int __lock_acquire(struct lockdep
63759 + if (!class)
63760 + return 0;
63761 + }
63762 +- atomic_inc((atomic_t *)&class->ops);
63763 ++ atomic_inc_unchecked((atomic_unchecked_t *)&class->ops);
63764 + if (very_verbose(class)) {
63765 + printk("\nacquire class [%p] %s", class->key, class->name);
63766 + if (class->name_version > 1)
63767 +diff -urNp linux-3.1.1/kernel/lockdep_proc.c linux-3.1.1/kernel/lockdep_proc.c
63768 +--- linux-3.1.1/kernel/lockdep_proc.c 2011-11-11 15:19:27.000000000 -0500
63769 ++++ linux-3.1.1/kernel/lockdep_proc.c 2011-11-16 18:39:08.000000000 -0500
63770 +@@ -39,7 +39,7 @@ static void l_stop(struct seq_file *m, v
63771 +
63772 + static void print_name(struct seq_file *m, struct lock_class *class)
63773 + {
63774 +- char str[128];
63775 ++ char str[KSYM_NAME_LEN];
63776 + const char *name = class->name;
63777 +
63778 + if (!name) {
63779 +diff -urNp linux-3.1.1/kernel/module.c linux-3.1.1/kernel/module.c
63780 +--- linux-3.1.1/kernel/module.c 2011-11-11 15:19:27.000000000 -0500
63781 ++++ linux-3.1.1/kernel/module.c 2011-11-16 18:40:44.000000000 -0500
63782 +@@ -58,6 +58,7 @@
63783 + #include <linux/jump_label.h>
63784 + #include <linux/pfn.h>
63785 + #include <linux/bsearch.h>
63786 ++#include <linux/grsecurity.h>
63787 +
63788 + #define CREATE_TRACE_POINTS
63789 + #include <trace/events/module.h>
63790 +@@ -119,7 +120,8 @@ static BLOCKING_NOTIFIER_HEAD(module_not
63791 +
63792 + /* Bounds of module allocation, for speeding __module_address.
63793 + * Protected by module_mutex. */
63794 +-static unsigned long module_addr_min = -1UL, module_addr_max = 0;
63795 ++static unsigned long module_addr_min_rw = -1UL, module_addr_max_rw = 0;
63796 ++static unsigned long module_addr_min_rx = -1UL, module_addr_max_rx = 0;
63797 +
63798 + int register_module_notifier(struct notifier_block * nb)
63799 + {
63800 +@@ -284,7 +286,7 @@ bool each_symbol_section(bool (*fn)(cons
63801 + return true;
63802 +
63803 + list_for_each_entry_rcu(mod, &modules, list) {
63804 +- struct symsearch arr[] = {
63805 ++ struct symsearch modarr[] = {
63806 + { mod->syms, mod->syms + mod->num_syms, mod->crcs,
63807 + NOT_GPL_ONLY, false },
63808 + { mod->gpl_syms, mod->gpl_syms + mod->num_gpl_syms,
63809 +@@ -306,7 +308,7 @@ bool each_symbol_section(bool (*fn)(cons
63810 + #endif
63811 + };
63812 +
63813 +- if (each_symbol_in_section(arr, ARRAY_SIZE(arr), mod, fn, data))
63814 ++ if (each_symbol_in_section(modarr, ARRAY_SIZE(modarr), mod, fn, data))
63815 + return true;
63816 + }
63817 + return false;
63818 +@@ -438,7 +440,7 @@ static inline void __percpu *mod_percpu(
63819 + static int percpu_modalloc(struct module *mod,
63820 + unsigned long size, unsigned long align)
63821 + {
63822 +- if (align > PAGE_SIZE) {
63823 ++ if (align-1 >= PAGE_SIZE) {
63824 + printk(KERN_WARNING "%s: per-cpu alignment %li > %li\n",
63825 + mod->name, align, PAGE_SIZE);
63826 + align = PAGE_SIZE;
63827 +@@ -1183,7 +1185,7 @@ resolve_symbol_wait(struct module *mod,
63828 + */
63829 + #ifdef CONFIG_SYSFS
63830 +
63831 +-#ifdef CONFIG_KALLSYMS
63832 ++#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
63833 + static inline bool sect_empty(const Elf_Shdr *sect)
63834 + {
63835 + return !(sect->sh_flags & SHF_ALLOC) || sect->sh_size == 0;
63836 +@@ -1649,21 +1651,21 @@ static void set_section_ro_nx(void *base
63837 +
63838 + static void unset_module_core_ro_nx(struct module *mod)
63839 + {
63840 +- set_page_attributes(mod->module_core + mod->core_text_size,
63841 +- mod->module_core + mod->core_size,
63842 ++ set_page_attributes(mod->module_core_rw,
63843 ++ mod->module_core_rw + mod->core_size_rw,
63844 + set_memory_x);
63845 +- set_page_attributes(mod->module_core,
63846 +- mod->module_core + mod->core_ro_size,
63847 ++ set_page_attributes(mod->module_core_rx,
63848 ++ mod->module_core_rx + mod->core_size_rx,
63849 + set_memory_rw);
63850 + }
63851 +
63852 + static void unset_module_init_ro_nx(struct module *mod)
63853 + {
63854 +- set_page_attributes(mod->module_init + mod->init_text_size,
63855 +- mod->module_init + mod->init_size,
63856 ++ set_page_attributes(mod->module_init_rw,
63857 ++ mod->module_init_rw + mod->init_size_rw,
63858 + set_memory_x);
63859 +- set_page_attributes(mod->module_init,
63860 +- mod->module_init + mod->init_ro_size,
63861 ++ set_page_attributes(mod->module_init_rx,
63862 ++ mod->module_init_rx + mod->init_size_rx,
63863 + set_memory_rw);
63864 + }
63865 +
63866 +@@ -1674,14 +1676,14 @@ void set_all_modules_text_rw(void)
63867 +
63868 + mutex_lock(&module_mutex);
63869 + list_for_each_entry_rcu(mod, &modules, list) {
63870 +- if ((mod->module_core) && (mod->core_text_size)) {
63871 +- set_page_attributes(mod->module_core,
63872 +- mod->module_core + mod->core_text_size,
63873 ++ if ((mod->module_core_rx) && (mod->core_size_rx)) {
63874 ++ set_page_attributes(mod->module_core_rx,
63875 ++ mod->module_core_rx + mod->core_size_rx,
63876 + set_memory_rw);
63877 + }
63878 +- if ((mod->module_init) && (mod->init_text_size)) {
63879 +- set_page_attributes(mod->module_init,
63880 +- mod->module_init + mod->init_text_size,
63881 ++ if ((mod->module_init_rx) && (mod->init_size_rx)) {
63882 ++ set_page_attributes(mod->module_init_rx,
63883 ++ mod->module_init_rx + mod->init_size_rx,
63884 + set_memory_rw);
63885 + }
63886 + }
63887 +@@ -1695,14 +1697,14 @@ void set_all_modules_text_ro(void)
63888 +
63889 + mutex_lock(&module_mutex);
63890 + list_for_each_entry_rcu(mod, &modules, list) {
63891 +- if ((mod->module_core) && (mod->core_text_size)) {
63892 +- set_page_attributes(mod->module_core,
63893 +- mod->module_core + mod->core_text_size,
63894 ++ if ((mod->module_core_rx) && (mod->core_size_rx)) {
63895 ++ set_page_attributes(mod->module_core_rx,
63896 ++ mod->module_core_rx + mod->core_size_rx,
63897 + set_memory_ro);
63898 + }
63899 +- if ((mod->module_init) && (mod->init_text_size)) {
63900 +- set_page_attributes(mod->module_init,
63901 +- mod->module_init + mod->init_text_size,
63902 ++ if ((mod->module_init_rx) && (mod->init_size_rx)) {
63903 ++ set_page_attributes(mod->module_init_rx,
63904 ++ mod->module_init_rx + mod->init_size_rx,
63905 + set_memory_ro);
63906 + }
63907 + }
63908 +@@ -1748,16 +1750,19 @@ static void free_module(struct module *m
63909 +
63910 + /* This may be NULL, but that's OK */
63911 + unset_module_init_ro_nx(mod);
63912 +- module_free(mod, mod->module_init);
63913 ++ module_free(mod, mod->module_init_rw);
63914 ++ module_free_exec(mod, mod->module_init_rx);
63915 + kfree(mod->args);
63916 + percpu_modfree(mod);
63917 +
63918 + /* Free lock-classes: */
63919 +- lockdep_free_key_range(mod->module_core, mod->core_size);
63920 ++ lockdep_free_key_range(mod->module_core_rx, mod->core_size_rx);
63921 ++ lockdep_free_key_range(mod->module_core_rw, mod->core_size_rw);
63922 +
63923 + /* Finally, free the core (containing the module structure) */
63924 + unset_module_core_ro_nx(mod);
63925 +- module_free(mod, mod->module_core);
63926 ++ module_free_exec(mod, mod->module_core_rx);
63927 ++ module_free(mod, mod->module_core_rw);
63928 +
63929 + #ifdef CONFIG_MPU
63930 + update_protections(current->mm);
63931 +@@ -1826,10 +1831,31 @@ static int simplify_symbols(struct modul
63932 + unsigned int i;
63933 + int ret = 0;
63934 + const struct kernel_symbol *ksym;
63935 ++#ifdef CONFIG_GRKERNSEC_MODHARDEN
63936 ++ int is_fs_load = 0;
63937 ++ int register_filesystem_found = 0;
63938 ++ char *p;
63939 ++
63940 ++ p = strstr(mod->args, "grsec_modharden_fs");
63941 ++ if (p) {
63942 ++ char *endptr = p + strlen("grsec_modharden_fs");
63943 ++ /* copy \0 as well */
63944 ++ memmove(p, endptr, strlen(mod->args) - (unsigned int)(endptr - mod->args) + 1);
63945 ++ is_fs_load = 1;
63946 ++ }
63947 ++#endif
63948 +
63949 + for (i = 1; i < symsec->sh_size / sizeof(Elf_Sym); i++) {
63950 + const char *name = info->strtab + sym[i].st_name;
63951 +
63952 ++#ifdef CONFIG_GRKERNSEC_MODHARDEN
63953 ++ /* it's a real shame this will never get ripped and copied
63954 ++ upstream! ;(
63955 ++ */
63956 ++ if (is_fs_load && !strcmp(name, "register_filesystem"))
63957 ++ register_filesystem_found = 1;
63958 ++#endif
63959 ++
63960 + switch (sym[i].st_shndx) {
63961 + case SHN_COMMON:
63962 + /* We compiled with -fno-common. These are not
63963 +@@ -1850,7 +1876,9 @@ static int simplify_symbols(struct modul
63964 + ksym = resolve_symbol_wait(mod, info, name);
63965 + /* Ok if resolved. */
63966 + if (ksym && !IS_ERR(ksym)) {
63967 ++ pax_open_kernel();
63968 + sym[i].st_value = ksym->value;
63969 ++ pax_close_kernel();
63970 + break;
63971 + }
63972 +
63973 +@@ -1869,11 +1897,20 @@ static int simplify_symbols(struct modul
63974 + secbase = (unsigned long)mod_percpu(mod);
63975 + else
63976 + secbase = info->sechdrs[sym[i].st_shndx].sh_addr;
63977 ++ pax_open_kernel();
63978 + sym[i].st_value += secbase;
63979 ++ pax_close_kernel();
63980 + break;
63981 + }
63982 + }
63983 +
63984 ++#ifdef CONFIG_GRKERNSEC_MODHARDEN
63985 ++ if (is_fs_load && !register_filesystem_found) {
63986 ++ printk(KERN_ALERT "grsec: Denied attempt to load non-fs module %.64s through mount\n", mod->name);
63987 ++ ret = -EPERM;
63988 ++ }
63989 ++#endif
63990 ++
63991 + return ret;
63992 + }
63993 +
63994 +@@ -1977,22 +2014,12 @@ static void layout_sections(struct modul
63995 + || s->sh_entsize != ~0UL
63996 + || strstarts(sname, ".init"))
63997 + continue;
63998 +- s->sh_entsize = get_offset(mod, &mod->core_size, s, i);
63999 ++ if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
64000 ++ s->sh_entsize = get_offset(mod, &mod->core_size_rw, s, i);
64001 ++ else
64002 ++ s->sh_entsize = get_offset(mod, &mod->core_size_rx, s, i);
64003 + DEBUGP("\t%s\n", name);
64004 + }
64005 +- switch (m) {
64006 +- case 0: /* executable */
64007 +- mod->core_size = debug_align(mod->core_size);
64008 +- mod->core_text_size = mod->core_size;
64009 +- break;
64010 +- case 1: /* RO: text and ro-data */
64011 +- mod->core_size = debug_align(mod->core_size);
64012 +- mod->core_ro_size = mod->core_size;
64013 +- break;
64014 +- case 3: /* whole core */
64015 +- mod->core_size = debug_align(mod->core_size);
64016 +- break;
64017 +- }
64018 + }
64019 +
64020 + DEBUGP("Init section allocation order:\n");
64021 +@@ -2006,23 +2033,13 @@ static void layout_sections(struct modul
64022 + || s->sh_entsize != ~0UL
64023 + || !strstarts(sname, ".init"))
64024 + continue;
64025 +- s->sh_entsize = (get_offset(mod, &mod->init_size, s, i)
64026 +- | INIT_OFFSET_MASK);
64027 ++ if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
64028 ++ s->sh_entsize = get_offset(mod, &mod->init_size_rw, s, i);
64029 ++ else
64030 ++ s->sh_entsize = get_offset(mod, &mod->init_size_rx, s, i);
64031 ++ s->sh_entsize |= INIT_OFFSET_MASK;
64032 + DEBUGP("\t%s\n", sname);
64033 + }
64034 +- switch (m) {
64035 +- case 0: /* executable */
64036 +- mod->init_size = debug_align(mod->init_size);
64037 +- mod->init_text_size = mod->init_size;
64038 +- break;
64039 +- case 1: /* RO: text and ro-data */
64040 +- mod->init_size = debug_align(mod->init_size);
64041 +- mod->init_ro_size = mod->init_size;
64042 +- break;
64043 +- case 3: /* whole init */
64044 +- mod->init_size = debug_align(mod->init_size);
64045 +- break;
64046 +- }
64047 + }
64048 + }
64049 +
64050 +@@ -2187,7 +2204,7 @@ static void layout_symtab(struct module
64051 +
64052 + /* Put symbol section at end of init part of module. */
64053 + symsect->sh_flags |= SHF_ALLOC;
64054 +- symsect->sh_entsize = get_offset(mod, &mod->init_size, symsect,
64055 ++ symsect->sh_entsize = get_offset(mod, &mod->init_size_rx, symsect,
64056 + info->index.sym) | INIT_OFFSET_MASK;
64057 + DEBUGP("\t%s\n", info->secstrings + symsect->sh_name);
64058 +
64059 +@@ -2204,19 +2221,19 @@ static void layout_symtab(struct module
64060 + }
64061 +
64062 + /* Append room for core symbols at end of core part. */
64063 +- info->symoffs = ALIGN(mod->core_size, symsect->sh_addralign ?: 1);
64064 +- mod->core_size = info->symoffs + ndst * sizeof(Elf_Sym);
64065 ++ info->symoffs = ALIGN(mod->core_size_rx, symsect->sh_addralign ?: 1);
64066 ++ mod->core_size_rx = info->symoffs + ndst * sizeof(Elf_Sym);
64067 +
64068 + /* Put string table section at end of init part of module. */
64069 + strsect->sh_flags |= SHF_ALLOC;
64070 +- strsect->sh_entsize = get_offset(mod, &mod->init_size, strsect,
64071 ++ strsect->sh_entsize = get_offset(mod, &mod->init_size_rx, strsect,
64072 + info->index.str) | INIT_OFFSET_MASK;
64073 + DEBUGP("\t%s\n", info->secstrings + strsect->sh_name);
64074 +
64075 + /* Append room for core symbols' strings at end of core part. */
64076 +- info->stroffs = mod->core_size;
64077 ++ info->stroffs = mod->core_size_rx;
64078 + __set_bit(0, info->strmap);
64079 +- mod->core_size += bitmap_weight(info->strmap, strsect->sh_size);
64080 ++ mod->core_size_rx += bitmap_weight(info->strmap, strsect->sh_size);
64081 + }
64082 +
64083 + static void add_kallsyms(struct module *mod, const struct load_info *info)
64084 +@@ -2232,11 +2249,13 @@ static void add_kallsyms(struct module *
64085 + /* Make sure we get permanent strtab: don't use info->strtab. */
64086 + mod->strtab = (void *)info->sechdrs[info->index.str].sh_addr;
64087 +
64088 ++ pax_open_kernel();
64089 ++
64090 + /* Set types up while we still have access to sections. */
64091 + for (i = 0; i < mod->num_symtab; i++)
64092 + mod->symtab[i].st_info = elf_type(&mod->symtab[i], info);
64093 +
64094 +- mod->core_symtab = dst = mod->module_core + info->symoffs;
64095 ++ mod->core_symtab = dst = mod->module_core_rx + info->symoffs;
64096 + src = mod->symtab;
64097 + *dst = *src;
64098 + for (ndst = i = 1; i < mod->num_symtab; ++i, ++src) {
64099 +@@ -2249,10 +2268,12 @@ static void add_kallsyms(struct module *
64100 + }
64101 + mod->core_num_syms = ndst;
64102 +
64103 +- mod->core_strtab = s = mod->module_core + info->stroffs;
64104 ++ mod->core_strtab = s = mod->module_core_rx + info->stroffs;
64105 + for (*s = 0, i = 1; i < info->sechdrs[info->index.str].sh_size; ++i)
64106 + if (test_bit(i, info->strmap))
64107 + *++s = mod->strtab[i];
64108 ++
64109 ++ pax_close_kernel();
64110 + }
64111 + #else
64112 + static inline void layout_symtab(struct module *mod, struct load_info *info)
64113 +@@ -2286,17 +2307,33 @@ void * __weak module_alloc(unsigned long
64114 + return size == 0 ? NULL : vmalloc_exec(size);
64115 + }
64116 +
64117 +-static void *module_alloc_update_bounds(unsigned long size)
64118 ++static void *module_alloc_update_bounds_rw(unsigned long size)
64119 + {
64120 + void *ret = module_alloc(size);
64121 +
64122 + if (ret) {
64123 + mutex_lock(&module_mutex);
64124 + /* Update module bounds. */
64125 +- if ((unsigned long)ret < module_addr_min)
64126 +- module_addr_min = (unsigned long)ret;
64127 +- if ((unsigned long)ret + size > module_addr_max)
64128 +- module_addr_max = (unsigned long)ret + size;
64129 ++ if ((unsigned long)ret < module_addr_min_rw)
64130 ++ module_addr_min_rw = (unsigned long)ret;
64131 ++ if ((unsigned long)ret + size > module_addr_max_rw)
64132 ++ module_addr_max_rw = (unsigned long)ret + size;
64133 ++ mutex_unlock(&module_mutex);
64134 ++ }
64135 ++ return ret;
64136 ++}
64137 ++
64138 ++static void *module_alloc_update_bounds_rx(unsigned long size)
64139 ++{
64140 ++ void *ret = module_alloc_exec(size);
64141 ++
64142 ++ if (ret) {
64143 ++ mutex_lock(&module_mutex);
64144 ++ /* Update module bounds. */
64145 ++ if ((unsigned long)ret < module_addr_min_rx)
64146 ++ module_addr_min_rx = (unsigned long)ret;
64147 ++ if ((unsigned long)ret + size > module_addr_max_rx)
64148 ++ module_addr_max_rx = (unsigned long)ret + size;
64149 + mutex_unlock(&module_mutex);
64150 + }
64151 + return ret;
64152 +@@ -2589,7 +2626,7 @@ static int move_module(struct module *mo
64153 + void *ptr;
64154 +
64155 + /* Do the allocs. */
64156 +- ptr = module_alloc_update_bounds(mod->core_size);
64157 ++ ptr = module_alloc_update_bounds_rw(mod->core_size_rw);
64158 + /*
64159 + * The pointer to this block is stored in the module structure
64160 + * which is inside the block. Just mark it as not being a
64161 +@@ -2599,23 +2636,50 @@ static int move_module(struct module *mo
64162 + if (!ptr)
64163 + return -ENOMEM;
64164 +
64165 +- memset(ptr, 0, mod->core_size);
64166 +- mod->module_core = ptr;
64167 ++ memset(ptr, 0, mod->core_size_rw);
64168 ++ mod->module_core_rw = ptr;
64169 +
64170 +- ptr = module_alloc_update_bounds(mod->init_size);
64171 ++ ptr = module_alloc_update_bounds_rw(mod->init_size_rw);
64172 + /*
64173 + * The pointer to this block is stored in the module structure
64174 + * which is inside the block. This block doesn't need to be
64175 + * scanned as it contains data and code that will be freed
64176 + * after the module is initialized.
64177 + */
64178 +- kmemleak_ignore(ptr);
64179 +- if (!ptr && mod->init_size) {
64180 +- module_free(mod, mod->module_core);
64181 ++ kmemleak_not_leak(ptr);
64182 ++ if (!ptr && mod->init_size_rw) {
64183 ++ module_free(mod, mod->module_core_rw);
64184 + return -ENOMEM;
64185 + }
64186 +- memset(ptr, 0, mod->init_size);
64187 +- mod->module_init = ptr;
64188 ++ memset(ptr, 0, mod->init_size_rw);
64189 ++ mod->module_init_rw = ptr;
64190 ++
64191 ++ ptr = module_alloc_update_bounds_rx(mod->core_size_rx);
64192 ++ kmemleak_not_leak(ptr);
64193 ++ if (!ptr) {
64194 ++ module_free(mod, mod->module_init_rw);
64195 ++ module_free(mod, mod->module_core_rw);
64196 ++ return -ENOMEM;
64197 ++ }
64198 ++
64199 ++ pax_open_kernel();
64200 ++ memset(ptr, 0, mod->core_size_rx);
64201 ++ pax_close_kernel();
64202 ++ mod->module_core_rx = ptr;
64203 ++
64204 ++ ptr = module_alloc_update_bounds_rx(mod->init_size_rx);
64205 ++ kmemleak_not_leak(ptr);
64206 ++ if (!ptr && mod->init_size_rx) {
64207 ++ module_free_exec(mod, mod->module_core_rx);
64208 ++ module_free(mod, mod->module_init_rw);
64209 ++ module_free(mod, mod->module_core_rw);
64210 ++ return -ENOMEM;
64211 ++ }
64212 ++
64213 ++ pax_open_kernel();
64214 ++ memset(ptr, 0, mod->init_size_rx);
64215 ++ pax_close_kernel();
64216 ++ mod->module_init_rx = ptr;
64217 +
64218 + /* Transfer each section which specifies SHF_ALLOC */
64219 + DEBUGP("final section addresses:\n");
64220 +@@ -2626,16 +2690,45 @@ static int move_module(struct module *mo
64221 + if (!(shdr->sh_flags & SHF_ALLOC))
64222 + continue;
64223 +
64224 +- if (shdr->sh_entsize & INIT_OFFSET_MASK)
64225 +- dest = mod->module_init
64226 +- + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
64227 +- else
64228 +- dest = mod->module_core + shdr->sh_entsize;
64229 ++ if (shdr->sh_entsize & INIT_OFFSET_MASK) {
64230 ++ if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
64231 ++ dest = mod->module_init_rw
64232 ++ + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
64233 ++ else
64234 ++ dest = mod->module_init_rx
64235 ++ + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
64236 ++ } else {
64237 ++ if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
64238 ++ dest = mod->module_core_rw + shdr->sh_entsize;
64239 ++ else
64240 ++ dest = mod->module_core_rx + shdr->sh_entsize;
64241 ++ }
64242 ++
64243 ++ if (shdr->sh_type != SHT_NOBITS) {
64244 ++
64245 ++#ifdef CONFIG_PAX_KERNEXEC
64246 ++#ifdef CONFIG_X86_64
64247 ++ if ((shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_EXECINSTR))
64248 ++ set_memory_x((unsigned long)dest, (shdr->sh_size + PAGE_SIZE) >> PAGE_SHIFT);
64249 ++#endif
64250 ++ if (!(shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_ALLOC)) {
64251 ++ pax_open_kernel();
64252 ++ memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
64253 ++ pax_close_kernel();
64254 ++ } else
64255 ++#endif
64256 +
64257 +- if (shdr->sh_type != SHT_NOBITS)
64258 + memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
64259 ++ }
64260 + /* Update sh_addr to point to copy in image. */
64261 +- shdr->sh_addr = (unsigned long)dest;
64262 ++
64263 ++#ifdef CONFIG_PAX_KERNEXEC
64264 ++ if (shdr->sh_flags & SHF_EXECINSTR)
64265 ++ shdr->sh_addr = ktva_ktla((unsigned long)dest);
64266 ++ else
64267 ++#endif
64268 ++
64269 ++ shdr->sh_addr = (unsigned long)dest;
64270 + DEBUGP("\t0x%lx %s\n",
64271 + shdr->sh_addr, info->secstrings + shdr->sh_name);
64272 + }
64273 +@@ -2686,12 +2779,12 @@ static void flush_module_icache(const st
64274 + * Do it before processing of module parameters, so the module
64275 + * can provide parameter accessor functions of its own.
64276 + */
64277 +- if (mod->module_init)
64278 +- flush_icache_range((unsigned long)mod->module_init,
64279 +- (unsigned long)mod->module_init
64280 +- + mod->init_size);
64281 +- flush_icache_range((unsigned long)mod->module_core,
64282 +- (unsigned long)mod->module_core + mod->core_size);
64283 ++ if (mod->module_init_rx)
64284 ++ flush_icache_range((unsigned long)mod->module_init_rx,
64285 ++ (unsigned long)mod->module_init_rx
64286 ++ + mod->init_size_rx);
64287 ++ flush_icache_range((unsigned long)mod->module_core_rx,
64288 ++ (unsigned long)mod->module_core_rx + mod->core_size_rx);
64289 +
64290 + set_fs(old_fs);
64291 + }
64292 +@@ -2771,8 +2864,10 @@ static void module_deallocate(struct mod
64293 + {
64294 + kfree(info->strmap);
64295 + percpu_modfree(mod);
64296 +- module_free(mod, mod->module_init);
64297 +- module_free(mod, mod->module_core);
64298 ++ module_free_exec(mod, mod->module_init_rx);
64299 ++ module_free_exec(mod, mod->module_core_rx);
64300 ++ module_free(mod, mod->module_init_rw);
64301 ++ module_free(mod, mod->module_core_rw);
64302 + }
64303 +
64304 + int __weak module_finalize(const Elf_Ehdr *hdr,
64305 +@@ -2836,9 +2931,38 @@ static struct module *load_module(void _
64306 + if (err)
64307 + goto free_unload;
64308 +
64309 ++ /* Now copy in args */
64310 ++ mod->args = strndup_user(uargs, ~0UL >> 1);
64311 ++ if (IS_ERR(mod->args)) {
64312 ++ err = PTR_ERR(mod->args);
64313 ++ goto free_unload;
64314 ++ }
64315 ++
64316 + /* Set up MODINFO_ATTR fields */
64317 + setup_modinfo(mod, &info);
64318 +
64319 ++#ifdef CONFIG_GRKERNSEC_MODHARDEN
64320 ++ {
64321 ++ char *p, *p2;
64322 ++
64323 ++ if (strstr(mod->args, "grsec_modharden_netdev")) {
64324 ++ printk(KERN_ALERT "grsec: denied auto-loading kernel module for a network device with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%.64s instead.", mod->name);
64325 ++ err = -EPERM;
64326 ++ goto free_modinfo;
64327 ++ } else if ((p = strstr(mod->args, "grsec_modharden_normal"))) {
64328 ++ p += strlen("grsec_modharden_normal");
64329 ++ p2 = strstr(p, "_");
64330 ++ if (p2) {
64331 ++ *p2 = '\0';
64332 ++ printk(KERN_ALERT "grsec: denied kernel module auto-load of %.64s by uid %.9s\n", mod->name, p);
64333 ++ *p2 = '_';
64334 ++ }
64335 ++ err = -EPERM;
64336 ++ goto free_modinfo;
64337 ++ }
64338 ++ }
64339 ++#endif
64340 ++
64341 + /* Fix up syms, so that st_value is a pointer to location. */
64342 + err = simplify_symbols(mod, &info);
64343 + if (err < 0)
64344 +@@ -2854,13 +2978,6 @@ static struct module *load_module(void _
64345 +
64346 + flush_module_icache(mod);
64347 +
64348 +- /* Now copy in args */
64349 +- mod->args = strndup_user(uargs, ~0UL >> 1);
64350 +- if (IS_ERR(mod->args)) {
64351 +- err = PTR_ERR(mod->args);
64352 +- goto free_arch_cleanup;
64353 +- }
64354 +-
64355 + /* Mark state as coming so strong_try_module_get() ignores us. */
64356 + mod->state = MODULE_STATE_COMING;
64357 +
64358 +@@ -2920,11 +3037,10 @@ static struct module *load_module(void _
64359 + unlock:
64360 + mutex_unlock(&module_mutex);
64361 + synchronize_sched();
64362 +- kfree(mod->args);
64363 +- free_arch_cleanup:
64364 + module_arch_cleanup(mod);
64365 + free_modinfo:
64366 + free_modinfo(mod);
64367 ++ kfree(mod->args);
64368 + free_unload:
64369 + module_unload_free(mod);
64370 + free_module:
64371 +@@ -2965,16 +3081,16 @@ SYSCALL_DEFINE3(init_module, void __user
64372 + MODULE_STATE_COMING, mod);
64373 +
64374 + /* Set RO and NX regions for core */
64375 +- set_section_ro_nx(mod->module_core,
64376 +- mod->core_text_size,
64377 +- mod->core_ro_size,
64378 +- mod->core_size);
64379 ++ set_section_ro_nx(mod->module_core_rx,
64380 ++ mod->core_size_rx,
64381 ++ mod->core_size_rx,
64382 ++ mod->core_size_rx);
64383 +
64384 + /* Set RO and NX regions for init */
64385 +- set_section_ro_nx(mod->module_init,
64386 +- mod->init_text_size,
64387 +- mod->init_ro_size,
64388 +- mod->init_size);
64389 ++ set_section_ro_nx(mod->module_init_rx,
64390 ++ mod->init_size_rx,
64391 ++ mod->init_size_rx,
64392 ++ mod->init_size_rx);
64393 +
64394 + do_mod_ctors(mod);
64395 + /* Start the module */
64396 +@@ -3020,11 +3136,12 @@ SYSCALL_DEFINE3(init_module, void __user
64397 + mod->strtab = mod->core_strtab;
64398 + #endif
64399 + unset_module_init_ro_nx(mod);
64400 +- module_free(mod, mod->module_init);
64401 +- mod->module_init = NULL;
64402 +- mod->init_size = 0;
64403 +- mod->init_ro_size = 0;
64404 +- mod->init_text_size = 0;
64405 ++ module_free(mod, mod->module_init_rw);
64406 ++ module_free_exec(mod, mod->module_init_rx);
64407 ++ mod->module_init_rw = NULL;
64408 ++ mod->module_init_rx = NULL;
64409 ++ mod->init_size_rw = 0;
64410 ++ mod->init_size_rx = 0;
64411 + mutex_unlock(&module_mutex);
64412 +
64413 + return 0;
64414 +@@ -3055,10 +3172,16 @@ static const char *get_ksymbol(struct mo
64415 + unsigned long nextval;
64416 +
64417 + /* At worse, next value is at end of module */
64418 +- if (within_module_init(addr, mod))
64419 +- nextval = (unsigned long)mod->module_init+mod->init_text_size;
64420 ++ if (within_module_init_rx(addr, mod))
64421 ++ nextval = (unsigned long)mod->module_init_rx+mod->init_size_rx;
64422 ++ else if (within_module_init_rw(addr, mod))
64423 ++ nextval = (unsigned long)mod->module_init_rw+mod->init_size_rw;
64424 ++ else if (within_module_core_rx(addr, mod))
64425 ++ nextval = (unsigned long)mod->module_core_rx+mod->core_size_rx;
64426 ++ else if (within_module_core_rw(addr, mod))
64427 ++ nextval = (unsigned long)mod->module_core_rw+mod->core_size_rw;
64428 + else
64429 +- nextval = (unsigned long)mod->module_core+mod->core_text_size;
64430 ++ return NULL;
64431 +
64432 + /* Scan for closest preceding symbol, and next symbol. (ELF
64433 + starts real symbols at 1). */
64434 +@@ -3304,7 +3427,7 @@ static int m_show(struct seq_file *m, vo
64435 + char buf[8];
64436 +
64437 + seq_printf(m, "%s %u",
64438 +- mod->name, mod->init_size + mod->core_size);
64439 ++ mod->name, mod->init_size_rx + mod->init_size_rw + mod->core_size_rx + mod->core_size_rw);
64440 + print_unload_info(m, mod);
64441 +
64442 + /* Informative for users. */
64443 +@@ -3313,7 +3436,7 @@ static int m_show(struct seq_file *m, vo
64444 + mod->state == MODULE_STATE_COMING ? "Loading":
64445 + "Live");
64446 + /* Used by oprofile and other similar tools. */
64447 +- seq_printf(m, " 0x%pK", mod->module_core);
64448 ++ seq_printf(m, " 0x%pK 0x%pK", mod->module_core_rx, mod->module_core_rw);
64449 +
64450 + /* Taints info */
64451 + if (mod->taints)
64452 +@@ -3349,7 +3472,17 @@ static const struct file_operations proc
64453 +
64454 + static int __init proc_modules_init(void)
64455 + {
64456 ++#ifndef CONFIG_GRKERNSEC_HIDESYM
64457 ++#ifdef CONFIG_GRKERNSEC_PROC_USER
64458 ++ proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
64459 ++#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
64460 ++ proc_create("modules", S_IRUSR | S_IRGRP, NULL, &proc_modules_operations);
64461 ++#else
64462 + proc_create("modules", 0, NULL, &proc_modules_operations);
64463 ++#endif
64464 ++#else
64465 ++ proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
64466 ++#endif
64467 + return 0;
64468 + }
64469 + module_init(proc_modules_init);
64470 +@@ -3408,12 +3541,12 @@ struct module *__module_address(unsigned
64471 + {
64472 + struct module *mod;
64473 +
64474 +- if (addr < module_addr_min || addr > module_addr_max)
64475 ++ if ((addr < module_addr_min_rx || addr > module_addr_max_rx) &&
64476 ++ (addr < module_addr_min_rw || addr > module_addr_max_rw))
64477 + return NULL;
64478 +
64479 + list_for_each_entry_rcu(mod, &modules, list)
64480 +- if (within_module_core(addr, mod)
64481 +- || within_module_init(addr, mod))
64482 ++ if (within_module_init(addr, mod) || within_module_core(addr, mod))
64483 + return mod;
64484 + return NULL;
64485 + }
64486 +@@ -3447,11 +3580,20 @@ bool is_module_text_address(unsigned lon
64487 + */
64488 + struct module *__module_text_address(unsigned long addr)
64489 + {
64490 +- struct module *mod = __module_address(addr);
64491 ++ struct module *mod;
64492 ++
64493 ++#ifdef CONFIG_X86_32
64494 ++ addr = ktla_ktva(addr);
64495 ++#endif
64496 ++
64497 ++ if (addr < module_addr_min_rx || addr > module_addr_max_rx)
64498 ++ return NULL;
64499 ++
64500 ++ mod = __module_address(addr);
64501 ++
64502 + if (mod) {
64503 + /* Make sure it's within the text section. */
64504 +- if (!within(addr, mod->module_init, mod->init_text_size)
64505 +- && !within(addr, mod->module_core, mod->core_text_size))
64506 ++ if (!within_module_init_rx(addr, mod) && !within_module_core_rx(addr, mod))
64507 + mod = NULL;
64508 + }
64509 + return mod;
64510 +diff -urNp linux-3.1.1/kernel/mutex.c linux-3.1.1/kernel/mutex.c
64511 +--- linux-3.1.1/kernel/mutex.c 2011-11-11 15:19:27.000000000 -0500
64512 ++++ linux-3.1.1/kernel/mutex.c 2011-11-16 18:39:08.000000000 -0500
64513 +@@ -198,7 +198,7 @@ __mutex_lock_common(struct mutex *lock,
64514 + spin_lock_mutex(&lock->wait_lock, flags);
64515 +
64516 + debug_mutex_lock_common(lock, &waiter);
64517 +- debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));
64518 ++ debug_mutex_add_waiter(lock, &waiter, task);
64519 +
64520 + /* add waiting tasks to the end of the waitqueue (FIFO): */
64521 + list_add_tail(&waiter.list, &lock->wait_list);
64522 +@@ -227,8 +227,7 @@ __mutex_lock_common(struct mutex *lock,
64523 + * TASK_UNINTERRUPTIBLE case.)
64524 + */
64525 + if (unlikely(signal_pending_state(state, task))) {
64526 +- mutex_remove_waiter(lock, &waiter,
64527 +- task_thread_info(task));
64528 ++ mutex_remove_waiter(lock, &waiter, task);
64529 + mutex_release(&lock->dep_map, 1, ip);
64530 + spin_unlock_mutex(&lock->wait_lock, flags);
64531 +
64532 +@@ -249,7 +248,7 @@ __mutex_lock_common(struct mutex *lock,
64533 + done:
64534 + lock_acquired(&lock->dep_map, ip);
64535 + /* got the lock - rejoice! */
64536 +- mutex_remove_waiter(lock, &waiter, current_thread_info());
64537 ++ mutex_remove_waiter(lock, &waiter, task);
64538 + mutex_set_owner(lock);
64539 +
64540 + /* set it to 0 if there are no waiters left: */
64541 +diff -urNp linux-3.1.1/kernel/mutex-debug.c linux-3.1.1/kernel/mutex-debug.c
64542 +--- linux-3.1.1/kernel/mutex-debug.c 2011-11-11 15:19:27.000000000 -0500
64543 ++++ linux-3.1.1/kernel/mutex-debug.c 2011-11-16 18:39:08.000000000 -0500
64544 +@@ -49,21 +49,21 @@ void debug_mutex_free_waiter(struct mute
64545 + }
64546 +
64547 + void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
64548 +- struct thread_info *ti)
64549 ++ struct task_struct *task)
64550 + {
64551 + SMP_DEBUG_LOCKS_WARN_ON(!spin_is_locked(&lock->wait_lock));
64552 +
64553 + /* Mark the current thread as blocked on the lock: */
64554 +- ti->task->blocked_on = waiter;
64555 ++ task->blocked_on = waiter;
64556 + }
64557 +
64558 + void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
64559 +- struct thread_info *ti)
64560 ++ struct task_struct *task)
64561 + {
64562 + DEBUG_LOCKS_WARN_ON(list_empty(&waiter->list));
64563 +- DEBUG_LOCKS_WARN_ON(waiter->task != ti->task);
64564 +- DEBUG_LOCKS_WARN_ON(ti->task->blocked_on != waiter);
64565 +- ti->task->blocked_on = NULL;
64566 ++ DEBUG_LOCKS_WARN_ON(waiter->task != task);
64567 ++ DEBUG_LOCKS_WARN_ON(task->blocked_on != waiter);
64568 ++ task->blocked_on = NULL;
64569 +
64570 + list_del_init(&waiter->list);
64571 + waiter->task = NULL;
64572 +diff -urNp linux-3.1.1/kernel/mutex-debug.h linux-3.1.1/kernel/mutex-debug.h
64573 +--- linux-3.1.1/kernel/mutex-debug.h 2011-11-11 15:19:27.000000000 -0500
64574 ++++ linux-3.1.1/kernel/mutex-debug.h 2011-11-16 18:39:08.000000000 -0500
64575 +@@ -20,9 +20,9 @@ extern void debug_mutex_wake_waiter(stru
64576 + extern void debug_mutex_free_waiter(struct mutex_waiter *waiter);
64577 + extern void debug_mutex_add_waiter(struct mutex *lock,
64578 + struct mutex_waiter *waiter,
64579 +- struct thread_info *ti);
64580 ++ struct task_struct *task);
64581 + extern void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
64582 +- struct thread_info *ti);
64583 ++ struct task_struct *task);
64584 + extern void debug_mutex_unlock(struct mutex *lock);
64585 + extern void debug_mutex_init(struct mutex *lock, const char *name,
64586 + struct lock_class_key *key);
64587 +diff -urNp linux-3.1.1/kernel/padata.c linux-3.1.1/kernel/padata.c
64588 +--- linux-3.1.1/kernel/padata.c 2011-11-11 15:19:27.000000000 -0500
64589 ++++ linux-3.1.1/kernel/padata.c 2011-11-16 18:39:08.000000000 -0500
64590 +@@ -132,10 +132,10 @@ int padata_do_parallel(struct padata_ins
64591 + padata->pd = pd;
64592 + padata->cb_cpu = cb_cpu;
64593 +
64594 +- if (unlikely(atomic_read(&pd->seq_nr) == pd->max_seq_nr))
64595 +- atomic_set(&pd->seq_nr, -1);
64596 ++ if (unlikely(atomic_read_unchecked(&pd->seq_nr) == pd->max_seq_nr))
64597 ++ atomic_set_unchecked(&pd->seq_nr, -1);
64598 +
64599 +- padata->seq_nr = atomic_inc_return(&pd->seq_nr);
64600 ++ padata->seq_nr = atomic_inc_return_unchecked(&pd->seq_nr);
64601 +
64602 + target_cpu = padata_cpu_hash(padata);
64603 + queue = per_cpu_ptr(pd->pqueue, target_cpu);
64604 +@@ -444,7 +444,7 @@ static struct parallel_data *padata_allo
64605 + padata_init_pqueues(pd);
64606 + padata_init_squeues(pd);
64607 + setup_timer(&pd->timer, padata_reorder_timer, (unsigned long)pd);
64608 +- atomic_set(&pd->seq_nr, -1);
64609 ++ atomic_set_unchecked(&pd->seq_nr, -1);
64610 + atomic_set(&pd->reorder_objects, 0);
64611 + atomic_set(&pd->refcnt, 0);
64612 + pd->pinst = pinst;
64613 +diff -urNp linux-3.1.1/kernel/panic.c linux-3.1.1/kernel/panic.c
64614 +--- linux-3.1.1/kernel/panic.c 2011-11-11 15:19:27.000000000 -0500
64615 ++++ linux-3.1.1/kernel/panic.c 2011-11-16 18:40:44.000000000 -0500
64616 +@@ -371,7 +371,7 @@ static void warn_slowpath_common(const c
64617 + const char *board;
64618 +
64619 + printk(KERN_WARNING "------------[ cut here ]------------\n");
64620 +- printk(KERN_WARNING "WARNING: at %s:%d %pS()\n", file, line, caller);
64621 ++ printk(KERN_WARNING "WARNING: at %s:%d %pA()\n", file, line, caller);
64622 + board = dmi_get_system_info(DMI_PRODUCT_NAME);
64623 + if (board)
64624 + printk(KERN_WARNING "Hardware name: %s\n", board);
64625 +@@ -426,7 +426,8 @@ EXPORT_SYMBOL(warn_slowpath_null);
64626 + */
64627 + void __stack_chk_fail(void)
64628 + {
64629 +- panic("stack-protector: Kernel stack is corrupted in: %p\n",
64630 ++ dump_stack();
64631 ++ panic("stack-protector: Kernel stack is corrupted in: %pA\n",
64632 + __builtin_return_address(0));
64633 + }
64634 + EXPORT_SYMBOL(__stack_chk_fail);
64635 +diff -urNp linux-3.1.1/kernel/pid.c linux-3.1.1/kernel/pid.c
64636 +--- linux-3.1.1/kernel/pid.c 2011-11-11 15:19:27.000000000 -0500
64637 ++++ linux-3.1.1/kernel/pid.c 2011-11-16 18:40:44.000000000 -0500
64638 +@@ -33,6 +33,7 @@
64639 + #include <linux/rculist.h>
64640 + #include <linux/bootmem.h>
64641 + #include <linux/hash.h>
64642 ++#include <linux/security.h>
64643 + #include <linux/pid_namespace.h>
64644 + #include <linux/init_task.h>
64645 + #include <linux/syscalls.h>
64646 +@@ -45,7 +46,7 @@ struct pid init_struct_pid = INIT_STRUCT
64647 +
64648 + int pid_max = PID_MAX_DEFAULT;
64649 +
64650 +-#define RESERVED_PIDS 300
64651 ++#define RESERVED_PIDS 500
64652 +
64653 + int pid_max_min = RESERVED_PIDS + 1;
64654 + int pid_max_max = PID_MAX_LIMIT;
64655 +@@ -418,8 +419,15 @@ EXPORT_SYMBOL(pid_task);
64656 + */
64657 + struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns)
64658 + {
64659 ++ struct task_struct *task;
64660 ++
64661 + rcu_lockdep_assert(rcu_read_lock_held());
64662 +- return pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
64663 ++ task = pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
64664 ++
64665 ++ if (gr_pid_is_chrooted(task))
64666 ++ return NULL;
64667 ++
64668 ++ return task;
64669 + }
64670 +
64671 + struct task_struct *find_task_by_vpid(pid_t vnr)
64672 +@@ -427,6 +435,12 @@ struct task_struct *find_task_by_vpid(pi
64673 + return find_task_by_pid_ns(vnr, current->nsproxy->pid_ns);
64674 + }
64675 +
64676 ++struct task_struct *find_task_by_vpid_unrestricted(pid_t vnr)
64677 ++{
64678 ++ rcu_lockdep_assert(rcu_read_lock_held());
64679 ++ return pid_task(find_pid_ns(vnr, current->nsproxy->pid_ns), PIDTYPE_PID);
64680 ++}
64681 ++
64682 + struct pid *get_task_pid(struct task_struct *task, enum pid_type type)
64683 + {
64684 + struct pid *pid;
64685 +diff -urNp linux-3.1.1/kernel/posix-cpu-timers.c linux-3.1.1/kernel/posix-cpu-timers.c
64686 +--- linux-3.1.1/kernel/posix-cpu-timers.c 2011-11-11 15:19:27.000000000 -0500
64687 ++++ linux-3.1.1/kernel/posix-cpu-timers.c 2011-11-16 18:40:44.000000000 -0500
64688 +@@ -6,6 +6,7 @@
64689 + #include <linux/posix-timers.h>
64690 + #include <linux/errno.h>
64691 + #include <linux/math64.h>
64692 ++#include <linux/security.h>
64693 + #include <asm/uaccess.h>
64694 + #include <linux/kernel_stat.h>
64695 + #include <trace/events/timer.h>
64696 +@@ -1606,14 +1607,14 @@ struct k_clock clock_posix_cpu = {
64697 +
64698 + static __init int init_posix_cpu_timers(void)
64699 + {
64700 +- struct k_clock process = {
64701 ++ static struct k_clock process = {
64702 + .clock_getres = process_cpu_clock_getres,
64703 + .clock_get = process_cpu_clock_get,
64704 + .timer_create = process_cpu_timer_create,
64705 + .nsleep = process_cpu_nsleep,
64706 + .nsleep_restart = process_cpu_nsleep_restart,
64707 + };
64708 +- struct k_clock thread = {
64709 ++ static struct k_clock thread = {
64710 + .clock_getres = thread_cpu_clock_getres,
64711 + .clock_get = thread_cpu_clock_get,
64712 + .timer_create = thread_cpu_timer_create,
64713 +diff -urNp linux-3.1.1/kernel/posix-timers.c linux-3.1.1/kernel/posix-timers.c
64714 +--- linux-3.1.1/kernel/posix-timers.c 2011-11-11 15:19:27.000000000 -0500
64715 ++++ linux-3.1.1/kernel/posix-timers.c 2011-11-16 18:40:44.000000000 -0500
64716 +@@ -43,6 +43,7 @@
64717 + #include <linux/idr.h>
64718 + #include <linux/posix-clock.h>
64719 + #include <linux/posix-timers.h>
64720 ++#include <linux/grsecurity.h>
64721 + #include <linux/syscalls.h>
64722 + #include <linux/wait.h>
64723 + #include <linux/workqueue.h>
64724 +@@ -129,7 +130,7 @@ static DEFINE_SPINLOCK(idr_lock);
64725 + * which we beg off on and pass to do_sys_settimeofday().
64726 + */
64727 +
64728 +-static struct k_clock posix_clocks[MAX_CLOCKS];
64729 ++static struct k_clock *posix_clocks[MAX_CLOCKS];
64730 +
64731 + /*
64732 + * These ones are defined below.
64733 +@@ -227,7 +228,7 @@ static int posix_get_boottime(const cloc
64734 + */
64735 + static __init int init_posix_timers(void)
64736 + {
64737 +- struct k_clock clock_realtime = {
64738 ++ static struct k_clock clock_realtime = {
64739 + .clock_getres = hrtimer_get_res,
64740 + .clock_get = posix_clock_realtime_get,
64741 + .clock_set = posix_clock_realtime_set,
64742 +@@ -239,7 +240,7 @@ static __init int init_posix_timers(void
64743 + .timer_get = common_timer_get,
64744 + .timer_del = common_timer_del,
64745 + };
64746 +- struct k_clock clock_monotonic = {
64747 ++ static struct k_clock clock_monotonic = {
64748 + .clock_getres = hrtimer_get_res,
64749 + .clock_get = posix_ktime_get_ts,
64750 + .nsleep = common_nsleep,
64751 +@@ -249,19 +250,19 @@ static __init int init_posix_timers(void
64752 + .timer_get = common_timer_get,
64753 + .timer_del = common_timer_del,
64754 + };
64755 +- struct k_clock clock_monotonic_raw = {
64756 ++ static struct k_clock clock_monotonic_raw = {
64757 + .clock_getres = hrtimer_get_res,
64758 + .clock_get = posix_get_monotonic_raw,
64759 + };
64760 +- struct k_clock clock_realtime_coarse = {
64761 ++ static struct k_clock clock_realtime_coarse = {
64762 + .clock_getres = posix_get_coarse_res,
64763 + .clock_get = posix_get_realtime_coarse,
64764 + };
64765 +- struct k_clock clock_monotonic_coarse = {
64766 ++ static struct k_clock clock_monotonic_coarse = {
64767 + .clock_getres = posix_get_coarse_res,
64768 + .clock_get = posix_get_monotonic_coarse,
64769 + };
64770 +- struct k_clock clock_boottime = {
64771 ++ static struct k_clock clock_boottime = {
64772 + .clock_getres = hrtimer_get_res,
64773 + .clock_get = posix_get_boottime,
64774 + .nsleep = common_nsleep,
64775 +@@ -272,6 +273,8 @@ static __init int init_posix_timers(void
64776 + .timer_del = common_timer_del,
64777 + };
64778 +
64779 ++ pax_track_stack();
64780 ++
64781 + posix_timers_register_clock(CLOCK_REALTIME, &clock_realtime);
64782 + posix_timers_register_clock(CLOCK_MONOTONIC, &clock_monotonic);
64783 + posix_timers_register_clock(CLOCK_MONOTONIC_RAW, &clock_monotonic_raw);
64784 +@@ -473,7 +476,7 @@ void posix_timers_register_clock(const c
64785 + return;
64786 + }
64787 +
64788 +- posix_clocks[clock_id] = *new_clock;
64789 ++ posix_clocks[clock_id] = new_clock;
64790 + }
64791 + EXPORT_SYMBOL_GPL(posix_timers_register_clock);
64792 +
64793 +@@ -519,9 +522,9 @@ static struct k_clock *clockid_to_kclock
64794 + return (id & CLOCKFD_MASK) == CLOCKFD ?
64795 + &clock_posix_dynamic : &clock_posix_cpu;
64796 +
64797 +- if (id >= MAX_CLOCKS || !posix_clocks[id].clock_getres)
64798 ++ if (id >= MAX_CLOCKS || !posix_clocks[id] || !posix_clocks[id]->clock_getres)
64799 + return NULL;
64800 +- return &posix_clocks[id];
64801 ++ return posix_clocks[id];
64802 + }
64803 +
64804 + static int common_timer_create(struct k_itimer *new_timer)
64805 +@@ -959,6 +962,13 @@ SYSCALL_DEFINE2(clock_settime, const clo
64806 + if (copy_from_user(&new_tp, tp, sizeof (*tp)))
64807 + return -EFAULT;
64808 +
64809 ++ /* only the CLOCK_REALTIME clock can be set, all other clocks
64810 ++ have their clock_set fptr set to a nosettime dummy function
64811 ++ CLOCK_REALTIME has a NULL clock_set fptr which causes it to
64812 ++ call common_clock_set, which calls do_sys_settimeofday, which
64813 ++ we hook
64814 ++ */
64815 ++
64816 + return kc->clock_set(which_clock, &new_tp);
64817 + }
64818 +
64819 +diff -urNp linux-3.1.1/kernel/power/poweroff.c linux-3.1.1/kernel/power/poweroff.c
64820 +--- linux-3.1.1/kernel/power/poweroff.c 2011-11-11 15:19:27.000000000 -0500
64821 ++++ linux-3.1.1/kernel/power/poweroff.c 2011-11-16 18:39:08.000000000 -0500
64822 +@@ -37,7 +37,7 @@ static struct sysrq_key_op sysrq_powerof
64823 + .enable_mask = SYSRQ_ENABLE_BOOT,
64824 + };
64825 +
64826 +-static int pm_sysrq_init(void)
64827 ++static int __init pm_sysrq_init(void)
64828 + {
64829 + register_sysrq_key('o', &sysrq_poweroff_op);
64830 + return 0;
64831 +diff -urNp linux-3.1.1/kernel/power/process.c linux-3.1.1/kernel/power/process.c
64832 +--- linux-3.1.1/kernel/power/process.c 2011-11-11 15:19:27.000000000 -0500
64833 ++++ linux-3.1.1/kernel/power/process.c 2011-11-16 18:39:08.000000000 -0500
64834 +@@ -41,6 +41,7 @@ static int try_to_freeze_tasks(bool sig_
64835 + u64 elapsed_csecs64;
64836 + unsigned int elapsed_csecs;
64837 + bool wakeup = false;
64838 ++ bool timedout = false;
64839 +
64840 + do_gettimeofday(&start);
64841 +
64842 +@@ -51,6 +52,8 @@ static int try_to_freeze_tasks(bool sig_
64843 +
64844 + while (true) {
64845 + todo = 0;
64846 ++ if (time_after(jiffies, end_time))
64847 ++ timedout = true;
64848 + read_lock(&tasklist_lock);
64849 + do_each_thread(g, p) {
64850 + if (frozen(p) || !freezable(p))
64851 +@@ -71,9 +74,13 @@ static int try_to_freeze_tasks(bool sig_
64852 + * try_to_stop() after schedule() in ptrace/signal
64853 + * stop sees TIF_FREEZE.
64854 + */
64855 +- if (!task_is_stopped_or_traced(p) &&
64856 +- !freezer_should_skip(p))
64857 ++ if (!task_is_stopped_or_traced(p) && !freezer_should_skip(p)) {
64858 + todo++;
64859 ++ if (timedout) {
64860 ++ printk(KERN_ERR "Task refusing to freeze:\n");
64861 ++ sched_show_task(p);
64862 ++ }
64863 ++ }
64864 + } while_each_thread(g, p);
64865 + read_unlock(&tasklist_lock);
64866 +
64867 +@@ -82,7 +89,7 @@ static int try_to_freeze_tasks(bool sig_
64868 + todo += wq_busy;
64869 + }
64870 +
64871 +- if (!todo || time_after(jiffies, end_time))
64872 ++ if (!todo || timedout)
64873 + break;
64874 +
64875 + if (pm_wakeup_pending()) {
64876 +diff -urNp linux-3.1.1/kernel/printk.c linux-3.1.1/kernel/printk.c
64877 +--- linux-3.1.1/kernel/printk.c 2011-11-11 15:19:27.000000000 -0500
64878 ++++ linux-3.1.1/kernel/printk.c 2011-11-16 19:38:11.000000000 -0500
64879 +@@ -313,6 +313,11 @@ static int check_syslog_permissions(int
64880 + if (from_file && type != SYSLOG_ACTION_OPEN)
64881 + return 0;
64882 +
64883 ++#ifdef CONFIG_GRKERNSEC_DMESG
64884 ++ if (grsec_enable_dmesg && !capable(CAP_SYSLOG) && !capable_nolog(CAP_SYS_ADMIN))
64885 ++ return -EPERM;
64886 ++#endif
64887 ++
64888 + if (syslog_action_restricted(type)) {
64889 + if (capable(CAP_SYSLOG))
64890 + return 0;
64891 +diff -urNp linux-3.1.1/kernel/profile.c linux-3.1.1/kernel/profile.c
64892 +--- linux-3.1.1/kernel/profile.c 2011-11-11 15:19:27.000000000 -0500
64893 ++++ linux-3.1.1/kernel/profile.c 2011-11-16 18:39:08.000000000 -0500
64894 +@@ -39,7 +39,7 @@ struct profile_hit {
64895 + /* Oprofile timer tick hook */
64896 + static int (*timer_hook)(struct pt_regs *) __read_mostly;
64897 +
64898 +-static atomic_t *prof_buffer;
64899 ++static atomic_unchecked_t *prof_buffer;
64900 + static unsigned long prof_len, prof_shift;
64901 +
64902 + int prof_on __read_mostly;
64903 +@@ -281,7 +281,7 @@ static void profile_flip_buffers(void)
64904 + hits[i].pc = 0;
64905 + continue;
64906 + }
64907 +- atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
64908 ++ atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
64909 + hits[i].hits = hits[i].pc = 0;
64910 + }
64911 + }
64912 +@@ -342,9 +342,9 @@ static void do_profile_hits(int type, vo
64913 + * Add the current hit(s) and flush the write-queue out
64914 + * to the global buffer:
64915 + */
64916 +- atomic_add(nr_hits, &prof_buffer[pc]);
64917 ++ atomic_add_unchecked(nr_hits, &prof_buffer[pc]);
64918 + for (i = 0; i < NR_PROFILE_HIT; ++i) {
64919 +- atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
64920 ++ atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
64921 + hits[i].pc = hits[i].hits = 0;
64922 + }
64923 + out:
64924 +@@ -419,7 +419,7 @@ static void do_profile_hits(int type, vo
64925 + {
64926 + unsigned long pc;
64927 + pc = ((unsigned long)__pc - (unsigned long)_stext) >> prof_shift;
64928 +- atomic_add(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
64929 ++ atomic_add_unchecked(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
64930 + }
64931 + #endif /* !CONFIG_SMP */
64932 +
64933 +@@ -517,7 +517,7 @@ read_profile(struct file *file, char __u
64934 + return -EFAULT;
64935 + buf++; p++; count--; read++;
64936 + }
64937 +- pnt = (char *)prof_buffer + p - sizeof(atomic_t);
64938 ++ pnt = (char *)prof_buffer + p - sizeof(atomic_unchecked_t);
64939 + if (copy_to_user(buf, (void *)pnt, count))
64940 + return -EFAULT;
64941 + read += count;
64942 +@@ -548,7 +548,7 @@ static ssize_t write_profile(struct file
64943 + }
64944 + #endif
64945 + profile_discard_flip_buffers();
64946 +- memset(prof_buffer, 0, prof_len * sizeof(atomic_t));
64947 ++ memset(prof_buffer, 0, prof_len * sizeof(atomic_unchecked_t));
64948 + return count;
64949 + }
64950 +
64951 +diff -urNp linux-3.1.1/kernel/ptrace.c linux-3.1.1/kernel/ptrace.c
64952 +--- linux-3.1.1/kernel/ptrace.c 2011-11-11 15:19:27.000000000 -0500
64953 ++++ linux-3.1.1/kernel/ptrace.c 2011-11-16 19:50:22.000000000 -0500
64954 +@@ -161,7 +161,8 @@ int ptrace_check_attach(struct task_stru
64955 + return ret;
64956 + }
64957 +
64958 +-int __ptrace_may_access(struct task_struct *task, unsigned int mode)
64959 ++static int __ptrace_may_access(struct task_struct *task, unsigned int mode,
64960 ++ unsigned int log)
64961 + {
64962 + const struct cred *cred = current_cred(), *tcred;
64963 +
64964 +@@ -187,7 +188,8 @@ int __ptrace_may_access(struct task_stru
64965 + cred->gid == tcred->sgid &&
64966 + cred->gid == tcred->gid))
64967 + goto ok;
64968 +- if (ns_capable(tcred->user->user_ns, CAP_SYS_PTRACE))
64969 ++ if ((!log && ns_capable_nolog(tcred->user->user_ns, CAP_SYS_PTRACE)) ||
64970 ++ (log && ns_capable(tcred->user->user_ns, CAP_SYS_PTRACE)))
64971 + goto ok;
64972 + rcu_read_unlock();
64973 + return -EPERM;
64974 +@@ -196,7 +198,9 @@ ok:
64975 + smp_rmb();
64976 + if (task->mm)
64977 + dumpable = get_dumpable(task->mm);
64978 +- if (!dumpable && !task_ns_capable(task, CAP_SYS_PTRACE))
64979 ++ if (!dumpable &&
64980 ++ ((!log && !task_ns_capable_nolog(task, CAP_SYS_PTRACE)) ||
64981 ++ (log && !task_ns_capable(task, CAP_SYS_PTRACE))))
64982 + return -EPERM;
64983 +
64984 + return security_ptrace_access_check(task, mode);
64985 +@@ -206,7 +210,16 @@ bool ptrace_may_access(struct task_struc
64986 + {
64987 + int err;
64988 + task_lock(task);
64989 +- err = __ptrace_may_access(task, mode);
64990 ++ err = __ptrace_may_access(task, mode, 0);
64991 ++ task_unlock(task);
64992 ++ return !err;
64993 ++}
64994 ++
64995 ++bool ptrace_may_access_log(struct task_struct *task, unsigned int mode)
64996 ++{
64997 ++ int err;
64998 ++ task_lock(task);
64999 ++ err = __ptrace_may_access(task, mode, 1);
65000 + task_unlock(task);
65001 + return !err;
65002 + }
65003 +@@ -251,7 +264,7 @@ static int ptrace_attach(struct task_str
65004 + goto out;
65005 +
65006 + task_lock(task);
65007 +- retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH);
65008 ++ retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH, 1);
65009 + task_unlock(task);
65010 + if (retval)
65011 + goto unlock_creds;
65012 +@@ -266,7 +279,7 @@ static int ptrace_attach(struct task_str
65013 + task->ptrace = PT_PTRACED;
65014 + if (seize)
65015 + task->ptrace |= PT_SEIZED;
65016 +- if (task_ns_capable(task, CAP_SYS_PTRACE))
65017 ++ if (task_ns_capable_nolog(task, CAP_SYS_PTRACE))
65018 + task->ptrace |= PT_PTRACE_CAP;
65019 +
65020 + __ptrace_link(task, current);
65021 +@@ -461,6 +474,8 @@ int ptrace_readdata(struct task_struct *
65022 + {
65023 + int copied = 0;
65024 +
65025 ++ pax_track_stack();
65026 ++
65027 + while (len > 0) {
65028 + char buf[128];
65029 + int this_len, retval;
65030 +@@ -472,7 +487,7 @@ int ptrace_readdata(struct task_struct *
65031 + break;
65032 + return -EIO;
65033 + }
65034 +- if (copy_to_user(dst, buf, retval))
65035 ++ if (retval > sizeof(buf) || copy_to_user(dst, buf, retval))
65036 + return -EFAULT;
65037 + copied += retval;
65038 + src += retval;
65039 +@@ -486,6 +501,8 @@ int ptrace_writedata(struct task_struct
65040 + {
65041 + int copied = 0;
65042 +
65043 ++ pax_track_stack();
65044 ++
65045 + while (len > 0) {
65046 + char buf[128];
65047 + int this_len, retval;
65048 +@@ -669,10 +686,12 @@ int ptrace_request(struct task_struct *c
65049 + bool seized = child->ptrace & PT_SEIZED;
65050 + int ret = -EIO;
65051 + siginfo_t siginfo, *si;
65052 +- void __user *datavp = (void __user *) data;
65053 ++ void __user *datavp = (__force void __user *) data;
65054 + unsigned long __user *datalp = datavp;
65055 + unsigned long flags;
65056 +
65057 ++ pax_track_stack();
65058 ++
65059 + switch (request) {
65060 + case PTRACE_PEEKTEXT:
65061 + case PTRACE_PEEKDATA:
65062 +@@ -871,14 +890,21 @@ SYSCALL_DEFINE4(ptrace, long, request, l
65063 + goto out;
65064 + }
65065 +
65066 ++ if (gr_handle_ptrace(child, request)) {
65067 ++ ret = -EPERM;
65068 ++ goto out_put_task_struct;
65069 ++ }
65070 ++
65071 + if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
65072 + ret = ptrace_attach(child, request, data);
65073 + /*
65074 + * Some architectures need to do book-keeping after
65075 + * a ptrace attach.
65076 + */
65077 +- if (!ret)
65078 ++ if (!ret) {
65079 + arch_ptrace_attach(child);
65080 ++ gr_audit_ptrace(child);
65081 ++ }
65082 + goto out_put_task_struct;
65083 + }
65084 +
65085 +@@ -904,7 +930,7 @@ int generic_ptrace_peekdata(struct task_
65086 + copied = access_process_vm(tsk, addr, &tmp, sizeof(tmp), 0);
65087 + if (copied != sizeof(tmp))
65088 + return -EIO;
65089 +- return put_user(tmp, (unsigned long __user *)data);
65090 ++ return put_user(tmp, (__force unsigned long __user *)data);
65091 + }
65092 +
65093 + int generic_ptrace_pokedata(struct task_struct *tsk, unsigned long addr,
65094 +@@ -927,6 +953,8 @@ int compat_ptrace_request(struct task_st
65095 + siginfo_t siginfo;
65096 + int ret;
65097 +
65098 ++ pax_track_stack();
65099 ++
65100 + switch (request) {
65101 + case PTRACE_PEEKTEXT:
65102 + case PTRACE_PEEKDATA:
65103 +@@ -1014,14 +1042,21 @@ asmlinkage long compat_sys_ptrace(compat
65104 + goto out;
65105 + }
65106 +
65107 ++ if (gr_handle_ptrace(child, request)) {
65108 ++ ret = -EPERM;
65109 ++ goto out_put_task_struct;
65110 ++ }
65111 ++
65112 + if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
65113 + ret = ptrace_attach(child, request, data);
65114 + /*
65115 + * Some architectures need to do book-keeping after
65116 + * a ptrace attach.
65117 + */
65118 +- if (!ret)
65119 ++ if (!ret) {
65120 + arch_ptrace_attach(child);
65121 ++ gr_audit_ptrace(child);
65122 ++ }
65123 + goto out_put_task_struct;
65124 + }
65125 +
65126 +diff -urNp linux-3.1.1/kernel/rcutorture.c linux-3.1.1/kernel/rcutorture.c
65127 +--- linux-3.1.1/kernel/rcutorture.c 2011-11-11 15:19:27.000000000 -0500
65128 ++++ linux-3.1.1/kernel/rcutorture.c 2011-11-16 18:39:08.000000000 -0500
65129 +@@ -138,12 +138,12 @@ static DEFINE_PER_CPU(long [RCU_TORTURE_
65130 + { 0 };
65131 + static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch) =
65132 + { 0 };
65133 +-static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
65134 +-static atomic_t n_rcu_torture_alloc;
65135 +-static atomic_t n_rcu_torture_alloc_fail;
65136 +-static atomic_t n_rcu_torture_free;
65137 +-static atomic_t n_rcu_torture_mberror;
65138 +-static atomic_t n_rcu_torture_error;
65139 ++static atomic_unchecked_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
65140 ++static atomic_unchecked_t n_rcu_torture_alloc;
65141 ++static atomic_unchecked_t n_rcu_torture_alloc_fail;
65142 ++static atomic_unchecked_t n_rcu_torture_free;
65143 ++static atomic_unchecked_t n_rcu_torture_mberror;
65144 ++static atomic_unchecked_t n_rcu_torture_error;
65145 + static long n_rcu_torture_boost_ktrerror;
65146 + static long n_rcu_torture_boost_rterror;
65147 + static long n_rcu_torture_boost_failure;
65148 +@@ -223,11 +223,11 @@ rcu_torture_alloc(void)
65149 +
65150 + spin_lock_bh(&rcu_torture_lock);
65151 + if (list_empty(&rcu_torture_freelist)) {
65152 +- atomic_inc(&n_rcu_torture_alloc_fail);
65153 ++ atomic_inc_unchecked(&n_rcu_torture_alloc_fail);
65154 + spin_unlock_bh(&rcu_torture_lock);
65155 + return NULL;
65156 + }
65157 +- atomic_inc(&n_rcu_torture_alloc);
65158 ++ atomic_inc_unchecked(&n_rcu_torture_alloc);
65159 + p = rcu_torture_freelist.next;
65160 + list_del_init(p);
65161 + spin_unlock_bh(&rcu_torture_lock);
65162 +@@ -240,7 +240,7 @@ rcu_torture_alloc(void)
65163 + static void
65164 + rcu_torture_free(struct rcu_torture *p)
65165 + {
65166 +- atomic_inc(&n_rcu_torture_free);
65167 ++ atomic_inc_unchecked(&n_rcu_torture_free);
65168 + spin_lock_bh(&rcu_torture_lock);
65169 + list_add_tail(&p->rtort_free, &rcu_torture_freelist);
65170 + spin_unlock_bh(&rcu_torture_lock);
65171 +@@ -360,7 +360,7 @@ rcu_torture_cb(struct rcu_head *p)
65172 + i = rp->rtort_pipe_count;
65173 + if (i > RCU_TORTURE_PIPE_LEN)
65174 + i = RCU_TORTURE_PIPE_LEN;
65175 +- atomic_inc(&rcu_torture_wcount[i]);
65176 ++ atomic_inc_unchecked(&rcu_torture_wcount[i]);
65177 + if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
65178 + rp->rtort_mbtest = 0;
65179 + rcu_torture_free(rp);
65180 +@@ -407,7 +407,7 @@ static void rcu_sync_torture_deferred_fr
65181 + i = rp->rtort_pipe_count;
65182 + if (i > RCU_TORTURE_PIPE_LEN)
65183 + i = RCU_TORTURE_PIPE_LEN;
65184 +- atomic_inc(&rcu_torture_wcount[i]);
65185 ++ atomic_inc_unchecked(&rcu_torture_wcount[i]);
65186 + if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
65187 + rp->rtort_mbtest = 0;
65188 + list_del(&rp->rtort_free);
65189 +@@ -882,7 +882,7 @@ rcu_torture_writer(void *arg)
65190 + i = old_rp->rtort_pipe_count;
65191 + if (i > RCU_TORTURE_PIPE_LEN)
65192 + i = RCU_TORTURE_PIPE_LEN;
65193 +- atomic_inc(&rcu_torture_wcount[i]);
65194 ++ atomic_inc_unchecked(&rcu_torture_wcount[i]);
65195 + old_rp->rtort_pipe_count++;
65196 + cur_ops->deferred_free(old_rp);
65197 + }
65198 +@@ -950,7 +950,7 @@ static void rcu_torture_timer(unsigned l
65199 + return;
65200 + }
65201 + if (p->rtort_mbtest == 0)
65202 +- atomic_inc(&n_rcu_torture_mberror);
65203 ++ atomic_inc_unchecked(&n_rcu_torture_mberror);
65204 + spin_lock(&rand_lock);
65205 + cur_ops->read_delay(&rand);
65206 + n_rcu_torture_timers++;
65207 +@@ -1011,7 +1011,7 @@ rcu_torture_reader(void *arg)
65208 + continue;
65209 + }
65210 + if (p->rtort_mbtest == 0)
65211 +- atomic_inc(&n_rcu_torture_mberror);
65212 ++ atomic_inc_unchecked(&n_rcu_torture_mberror);
65213 + cur_ops->read_delay(&rand);
65214 + preempt_disable();
65215 + pipe_count = p->rtort_pipe_count;
65216 +@@ -1070,16 +1070,16 @@ rcu_torture_printk(char *page)
65217 + rcu_torture_current,
65218 + rcu_torture_current_version,
65219 + list_empty(&rcu_torture_freelist),
65220 +- atomic_read(&n_rcu_torture_alloc),
65221 +- atomic_read(&n_rcu_torture_alloc_fail),
65222 +- atomic_read(&n_rcu_torture_free),
65223 +- atomic_read(&n_rcu_torture_mberror),
65224 ++ atomic_read_unchecked(&n_rcu_torture_alloc),
65225 ++ atomic_read_unchecked(&n_rcu_torture_alloc_fail),
65226 ++ atomic_read_unchecked(&n_rcu_torture_free),
65227 ++ atomic_read_unchecked(&n_rcu_torture_mberror),
65228 + n_rcu_torture_boost_ktrerror,
65229 + n_rcu_torture_boost_rterror,
65230 + n_rcu_torture_boost_failure,
65231 + n_rcu_torture_boosts,
65232 + n_rcu_torture_timers);
65233 +- if (atomic_read(&n_rcu_torture_mberror) != 0 ||
65234 ++ if (atomic_read_unchecked(&n_rcu_torture_mberror) != 0 ||
65235 + n_rcu_torture_boost_ktrerror != 0 ||
65236 + n_rcu_torture_boost_rterror != 0 ||
65237 + n_rcu_torture_boost_failure != 0)
65238 +@@ -1087,7 +1087,7 @@ rcu_torture_printk(char *page)
65239 + cnt += sprintf(&page[cnt], "\n%s%s ", torture_type, TORTURE_FLAG);
65240 + if (i > 1) {
65241 + cnt += sprintf(&page[cnt], "!!! ");
65242 +- atomic_inc(&n_rcu_torture_error);
65243 ++ atomic_inc_unchecked(&n_rcu_torture_error);
65244 + WARN_ON_ONCE(1);
65245 + }
65246 + cnt += sprintf(&page[cnt], "Reader Pipe: ");
65247 +@@ -1101,7 +1101,7 @@ rcu_torture_printk(char *page)
65248 + cnt += sprintf(&page[cnt], "Free-Block Circulation: ");
65249 + for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
65250 + cnt += sprintf(&page[cnt], " %d",
65251 +- atomic_read(&rcu_torture_wcount[i]));
65252 ++ atomic_read_unchecked(&rcu_torture_wcount[i]));
65253 + }
65254 + cnt += sprintf(&page[cnt], "\n");
65255 + if (cur_ops->stats)
65256 +@@ -1410,7 +1410,7 @@ rcu_torture_cleanup(void)
65257 +
65258 + if (cur_ops->cleanup)
65259 + cur_ops->cleanup();
65260 +- if (atomic_read(&n_rcu_torture_error))
65261 ++ if (atomic_read_unchecked(&n_rcu_torture_error))
65262 + rcu_torture_print_module_parms(cur_ops, "End of test: FAILURE");
65263 + else
65264 + rcu_torture_print_module_parms(cur_ops, "End of test: SUCCESS");
65265 +@@ -1474,17 +1474,17 @@ rcu_torture_init(void)
65266 +
65267 + rcu_torture_current = NULL;
65268 + rcu_torture_current_version = 0;
65269 +- atomic_set(&n_rcu_torture_alloc, 0);
65270 +- atomic_set(&n_rcu_torture_alloc_fail, 0);
65271 +- atomic_set(&n_rcu_torture_free, 0);
65272 +- atomic_set(&n_rcu_torture_mberror, 0);
65273 +- atomic_set(&n_rcu_torture_error, 0);
65274 ++ atomic_set_unchecked(&n_rcu_torture_alloc, 0);
65275 ++ atomic_set_unchecked(&n_rcu_torture_alloc_fail, 0);
65276 ++ atomic_set_unchecked(&n_rcu_torture_free, 0);
65277 ++ atomic_set_unchecked(&n_rcu_torture_mberror, 0);
65278 ++ atomic_set_unchecked(&n_rcu_torture_error, 0);
65279 + n_rcu_torture_boost_ktrerror = 0;
65280 + n_rcu_torture_boost_rterror = 0;
65281 + n_rcu_torture_boost_failure = 0;
65282 + n_rcu_torture_boosts = 0;
65283 + for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
65284 +- atomic_set(&rcu_torture_wcount[i], 0);
65285 ++ atomic_set_unchecked(&rcu_torture_wcount[i], 0);
65286 + for_each_possible_cpu(cpu) {
65287 + for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
65288 + per_cpu(rcu_torture_count, cpu)[i] = 0;
65289 +diff -urNp linux-3.1.1/kernel/rcutree.c linux-3.1.1/kernel/rcutree.c
65290 +--- linux-3.1.1/kernel/rcutree.c 2011-11-11 15:19:27.000000000 -0500
65291 ++++ linux-3.1.1/kernel/rcutree.c 2011-11-16 18:39:08.000000000 -0500
65292 +@@ -356,9 +356,9 @@ void rcu_enter_nohz(void)
65293 + }
65294 + /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
65295 + smp_mb__before_atomic_inc(); /* See above. */
65296 +- atomic_inc(&rdtp->dynticks);
65297 ++ atomic_inc_unchecked(&rdtp->dynticks);
65298 + smp_mb__after_atomic_inc(); /* Force ordering with next sojourn. */
65299 +- WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
65300 ++ WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks) & 0x1);
65301 + local_irq_restore(flags);
65302 +
65303 + /* If the interrupt queued a callback, get out of dyntick mode. */
65304 +@@ -387,10 +387,10 @@ void rcu_exit_nohz(void)
65305 + return;
65306 + }
65307 + smp_mb__before_atomic_inc(); /* Force ordering w/previous sojourn. */
65308 +- atomic_inc(&rdtp->dynticks);
65309 ++ atomic_inc_unchecked(&rdtp->dynticks);
65310 + /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
65311 + smp_mb__after_atomic_inc(); /* See above. */
65312 +- WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
65313 ++ WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks) & 0x1));
65314 + local_irq_restore(flags);
65315 + }
65316 +
65317 +@@ -406,14 +406,14 @@ void rcu_nmi_enter(void)
65318 + struct rcu_dynticks *rdtp = &__get_cpu_var(rcu_dynticks);
65319 +
65320 + if (rdtp->dynticks_nmi_nesting == 0 &&
65321 +- (atomic_read(&rdtp->dynticks) & 0x1))
65322 ++ (atomic_read_unchecked(&rdtp->dynticks) & 0x1))
65323 + return;
65324 + rdtp->dynticks_nmi_nesting++;
65325 + smp_mb__before_atomic_inc(); /* Force delay from prior write. */
65326 +- atomic_inc(&rdtp->dynticks);
65327 ++ atomic_inc_unchecked(&rdtp->dynticks);
65328 + /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
65329 + smp_mb__after_atomic_inc(); /* See above. */
65330 +- WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
65331 ++ WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks) & 0x1));
65332 + }
65333 +
65334 + /**
65335 +@@ -432,9 +432,9 @@ void rcu_nmi_exit(void)
65336 + return;
65337 + /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
65338 + smp_mb__before_atomic_inc(); /* See above. */
65339 +- atomic_inc(&rdtp->dynticks);
65340 ++ atomic_inc_unchecked(&rdtp->dynticks);
65341 + smp_mb__after_atomic_inc(); /* Force delay to next write. */
65342 +- WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
65343 ++ WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks) & 0x1);
65344 + }
65345 +
65346 + /**
65347 +@@ -469,7 +469,7 @@ void rcu_irq_exit(void)
65348 + */
65349 + static int dyntick_save_progress_counter(struct rcu_data *rdp)
65350 + {
65351 +- rdp->dynticks_snap = atomic_add_return(0, &rdp->dynticks->dynticks);
65352 ++ rdp->dynticks_snap = atomic_add_return_unchecked(0, &rdp->dynticks->dynticks);
65353 + return 0;
65354 + }
65355 +
65356 +@@ -484,7 +484,7 @@ static int rcu_implicit_dynticks_qs(stru
65357 + unsigned long curr;
65358 + unsigned long snap;
65359 +
65360 +- curr = (unsigned long)atomic_add_return(0, &rdp->dynticks->dynticks);
65361 ++ curr = (unsigned long)atomic_add_return_unchecked(0, &rdp->dynticks->dynticks);
65362 + snap = (unsigned long)rdp->dynticks_snap;
65363 +
65364 + /*
65365 +@@ -1470,7 +1470,7 @@ __rcu_process_callbacks(struct rcu_state
65366 + /*
65367 + * Do softirq processing for the current CPU.
65368 + */
65369 +-static void rcu_process_callbacks(struct softirq_action *unused)
65370 ++static void rcu_process_callbacks(void)
65371 + {
65372 + __rcu_process_callbacks(&rcu_sched_state,
65373 + &__get_cpu_var(rcu_sched_data));
65374 +diff -urNp linux-3.1.1/kernel/rcutree.h linux-3.1.1/kernel/rcutree.h
65375 +--- linux-3.1.1/kernel/rcutree.h 2011-11-11 15:19:27.000000000 -0500
65376 ++++ linux-3.1.1/kernel/rcutree.h 2011-11-16 18:39:08.000000000 -0500
65377 +@@ -86,7 +86,7 @@
65378 + struct rcu_dynticks {
65379 + int dynticks_nesting; /* Track irq/process nesting level. */
65380 + int dynticks_nmi_nesting; /* Track NMI nesting level. */
65381 +- atomic_t dynticks; /* Even value for dynticks-idle, else odd. */
65382 ++ atomic_unchecked_t dynticks; /* Even value for dynticks-idle, else odd. */
65383 + };
65384 +
65385 + /* RCU's kthread states for tracing. */
65386 +diff -urNp linux-3.1.1/kernel/rcutree_plugin.h linux-3.1.1/kernel/rcutree_plugin.h
65387 +--- linux-3.1.1/kernel/rcutree_plugin.h 2011-11-11 15:19:27.000000000 -0500
65388 ++++ linux-3.1.1/kernel/rcutree_plugin.h 2011-11-16 18:39:08.000000000 -0500
65389 +@@ -822,7 +822,7 @@ void synchronize_rcu_expedited(void)
65390 +
65391 + /* Clean up and exit. */
65392 + smp_mb(); /* ensure expedited GP seen before counter increment. */
65393 +- ACCESS_ONCE(sync_rcu_preempt_exp_count)++;
65394 ++ ACCESS_ONCE_RW(sync_rcu_preempt_exp_count)++;
65395 + unlock_mb_ret:
65396 + mutex_unlock(&sync_rcu_preempt_exp_mutex);
65397 + mb_ret:
65398 +@@ -1774,8 +1774,8 @@ EXPORT_SYMBOL_GPL(synchronize_sched_expe
65399 +
65400 + #else /* #ifndef CONFIG_SMP */
65401 +
65402 +-static atomic_t sync_sched_expedited_started = ATOMIC_INIT(0);
65403 +-static atomic_t sync_sched_expedited_done = ATOMIC_INIT(0);
65404 ++static atomic_unchecked_t sync_sched_expedited_started = ATOMIC_INIT(0);
65405 ++static atomic_unchecked_t sync_sched_expedited_done = ATOMIC_INIT(0);
65406 +
65407 + static int synchronize_sched_expedited_cpu_stop(void *data)
65408 + {
65409 +@@ -1830,7 +1830,7 @@ void synchronize_sched_expedited(void)
65410 + int firstsnap, s, snap, trycount = 0;
65411 +
65412 + /* Note that atomic_inc_return() implies full memory barrier. */
65413 +- firstsnap = snap = atomic_inc_return(&sync_sched_expedited_started);
65414 ++ firstsnap = snap = atomic_inc_return_unchecked(&sync_sched_expedited_started);
65415 + get_online_cpus();
65416 +
65417 + /*
65418 +@@ -1851,7 +1851,7 @@ void synchronize_sched_expedited(void)
65419 + }
65420 +
65421 + /* Check to see if someone else did our work for us. */
65422 +- s = atomic_read(&sync_sched_expedited_done);
65423 ++ s = atomic_read_unchecked(&sync_sched_expedited_done);
65424 + if (UINT_CMP_GE((unsigned)s, (unsigned)firstsnap)) {
65425 + smp_mb(); /* ensure test happens before caller kfree */
65426 + return;
65427 +@@ -1866,7 +1866,7 @@ void synchronize_sched_expedited(void)
65428 + * grace period works for us.
65429 + */
65430 + get_online_cpus();
65431 +- snap = atomic_read(&sync_sched_expedited_started) - 1;
65432 ++ snap = atomic_read_unchecked(&sync_sched_expedited_started) - 1;
65433 + smp_mb(); /* ensure read is before try_stop_cpus(). */
65434 + }
65435 +
65436 +@@ -1877,12 +1877,12 @@ void synchronize_sched_expedited(void)
65437 + * than we did beat us to the punch.
65438 + */
65439 + do {
65440 +- s = atomic_read(&sync_sched_expedited_done);
65441 ++ s = atomic_read_unchecked(&sync_sched_expedited_done);
65442 + if (UINT_CMP_GE((unsigned)s, (unsigned)snap)) {
65443 + smp_mb(); /* ensure test happens before caller kfree */
65444 + break;
65445 + }
65446 +- } while (atomic_cmpxchg(&sync_sched_expedited_done, s, snap) != s);
65447 ++ } while (atomic_cmpxchg_unchecked(&sync_sched_expedited_done, s, snap) != s);
65448 +
65449 + put_online_cpus();
65450 + }
65451 +diff -urNp linux-3.1.1/kernel/relay.c linux-3.1.1/kernel/relay.c
65452 +--- linux-3.1.1/kernel/relay.c 2011-11-11 15:19:27.000000000 -0500
65453 ++++ linux-3.1.1/kernel/relay.c 2011-11-16 18:40:44.000000000 -0500
65454 +@@ -1236,6 +1236,8 @@ static ssize_t subbuf_splice_actor(struc
65455 + };
65456 + ssize_t ret;
65457 +
65458 ++ pax_track_stack();
65459 ++
65460 + if (rbuf->subbufs_produced == rbuf->subbufs_consumed)
65461 + return 0;
65462 + if (splice_grow_spd(pipe, &spd))
65463 +diff -urNp linux-3.1.1/kernel/resource.c linux-3.1.1/kernel/resource.c
65464 +--- linux-3.1.1/kernel/resource.c 2011-11-11 15:19:27.000000000 -0500
65465 ++++ linux-3.1.1/kernel/resource.c 2011-11-16 18:40:44.000000000 -0500
65466 +@@ -141,8 +141,18 @@ static const struct file_operations proc
65467 +
65468 + static int __init ioresources_init(void)
65469 + {
65470 ++#ifdef CONFIG_GRKERNSEC_PROC_ADD
65471 ++#ifdef CONFIG_GRKERNSEC_PROC_USER
65472 ++ proc_create("ioports", S_IRUSR, NULL, &proc_ioports_operations);
65473 ++ proc_create("iomem", S_IRUSR, NULL, &proc_iomem_operations);
65474 ++#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
65475 ++ proc_create("ioports", S_IRUSR | S_IRGRP, NULL, &proc_ioports_operations);
65476 ++ proc_create("iomem", S_IRUSR | S_IRGRP, NULL, &proc_iomem_operations);
65477 ++#endif
65478 ++#else
65479 + proc_create("ioports", 0, NULL, &proc_ioports_operations);
65480 + proc_create("iomem", 0, NULL, &proc_iomem_operations);
65481 ++#endif
65482 + return 0;
65483 + }
65484 + __initcall(ioresources_init);
65485 +diff -urNp linux-3.1.1/kernel/rtmutex-tester.c linux-3.1.1/kernel/rtmutex-tester.c
65486 +--- linux-3.1.1/kernel/rtmutex-tester.c 2011-11-11 15:19:27.000000000 -0500
65487 ++++ linux-3.1.1/kernel/rtmutex-tester.c 2011-11-16 18:39:08.000000000 -0500
65488 +@@ -20,7 +20,7 @@
65489 + #define MAX_RT_TEST_MUTEXES 8
65490 +
65491 + static spinlock_t rttest_lock;
65492 +-static atomic_t rttest_event;
65493 ++static atomic_unchecked_t rttest_event;
65494 +
65495 + struct test_thread_data {
65496 + int opcode;
65497 +@@ -61,7 +61,7 @@ static int handle_op(struct test_thread_
65498 +
65499 + case RTTEST_LOCKCONT:
65500 + td->mutexes[td->opdata] = 1;
65501 +- td->event = atomic_add_return(1, &rttest_event);
65502 ++ td->event = atomic_add_return_unchecked(1, &rttest_event);
65503 + return 0;
65504 +
65505 + case RTTEST_RESET:
65506 +@@ -74,7 +74,7 @@ static int handle_op(struct test_thread_
65507 + return 0;
65508 +
65509 + case RTTEST_RESETEVENT:
65510 +- atomic_set(&rttest_event, 0);
65511 ++ atomic_set_unchecked(&rttest_event, 0);
65512 + return 0;
65513 +
65514 + default:
65515 +@@ -91,9 +91,9 @@ static int handle_op(struct test_thread_
65516 + return ret;
65517 +
65518 + td->mutexes[id] = 1;
65519 +- td->event = atomic_add_return(1, &rttest_event);
65520 ++ td->event = atomic_add_return_unchecked(1, &rttest_event);
65521 + rt_mutex_lock(&mutexes[id]);
65522 +- td->event = atomic_add_return(1, &rttest_event);
65523 ++ td->event = atomic_add_return_unchecked(1, &rttest_event);
65524 + td->mutexes[id] = 4;
65525 + return 0;
65526 +
65527 +@@ -104,9 +104,9 @@ static int handle_op(struct test_thread_
65528 + return ret;
65529 +
65530 + td->mutexes[id] = 1;
65531 +- td->event = atomic_add_return(1, &rttest_event);
65532 ++ td->event = atomic_add_return_unchecked(1, &rttest_event);
65533 + ret = rt_mutex_lock_interruptible(&mutexes[id], 0);
65534 +- td->event = atomic_add_return(1, &rttest_event);
65535 ++ td->event = atomic_add_return_unchecked(1, &rttest_event);
65536 + td->mutexes[id] = ret ? 0 : 4;
65537 + return ret ? -EINTR : 0;
65538 +
65539 +@@ -115,9 +115,9 @@ static int handle_op(struct test_thread_
65540 + if (id < 0 || id >= MAX_RT_TEST_MUTEXES || td->mutexes[id] != 4)
65541 + return ret;
65542 +
65543 +- td->event = atomic_add_return(1, &rttest_event);
65544 ++ td->event = atomic_add_return_unchecked(1, &rttest_event);
65545 + rt_mutex_unlock(&mutexes[id]);
65546 +- td->event = atomic_add_return(1, &rttest_event);
65547 ++ td->event = atomic_add_return_unchecked(1, &rttest_event);
65548 + td->mutexes[id] = 0;
65549 + return 0;
65550 +
65551 +@@ -164,7 +164,7 @@ void schedule_rt_mutex_test(struct rt_mu
65552 + break;
65553 +
65554 + td->mutexes[dat] = 2;
65555 +- td->event = atomic_add_return(1, &rttest_event);
65556 ++ td->event = atomic_add_return_unchecked(1, &rttest_event);
65557 + break;
65558 +
65559 + default:
65560 +@@ -184,7 +184,7 @@ void schedule_rt_mutex_test(struct rt_mu
65561 + return;
65562 +
65563 + td->mutexes[dat] = 3;
65564 +- td->event = atomic_add_return(1, &rttest_event);
65565 ++ td->event = atomic_add_return_unchecked(1, &rttest_event);
65566 + break;
65567 +
65568 + case RTTEST_LOCKNOWAIT:
65569 +@@ -196,7 +196,7 @@ void schedule_rt_mutex_test(struct rt_mu
65570 + return;
65571 +
65572 + td->mutexes[dat] = 1;
65573 +- td->event = atomic_add_return(1, &rttest_event);
65574 ++ td->event = atomic_add_return_unchecked(1, &rttest_event);
65575 + return;
65576 +
65577 + default:
65578 +diff -urNp linux-3.1.1/kernel/sched_autogroup.c linux-3.1.1/kernel/sched_autogroup.c
65579 +--- linux-3.1.1/kernel/sched_autogroup.c 2011-11-11 15:19:27.000000000 -0500
65580 ++++ linux-3.1.1/kernel/sched_autogroup.c 2011-11-16 18:39:08.000000000 -0500
65581 +@@ -7,7 +7,7 @@
65582 +
65583 + unsigned int __read_mostly sysctl_sched_autogroup_enabled = 1;
65584 + static struct autogroup autogroup_default;
65585 +-static atomic_t autogroup_seq_nr;
65586 ++static atomic_unchecked_t autogroup_seq_nr;
65587 +
65588 + static void __init autogroup_init(struct task_struct *init_task)
65589 + {
65590 +@@ -78,7 +78,7 @@ static inline struct autogroup *autogrou
65591 +
65592 + kref_init(&ag->kref);
65593 + init_rwsem(&ag->lock);
65594 +- ag->id = atomic_inc_return(&autogroup_seq_nr);
65595 ++ ag->id = atomic_inc_return_unchecked(&autogroup_seq_nr);
65596 + ag->tg = tg;
65597 + #ifdef CONFIG_RT_GROUP_SCHED
65598 + /*
65599 +diff -urNp linux-3.1.1/kernel/sched.c linux-3.1.1/kernel/sched.c
65600 +--- linux-3.1.1/kernel/sched.c 2011-11-11 15:19:27.000000000 -0500
65601 ++++ linux-3.1.1/kernel/sched.c 2011-11-16 18:40:44.000000000 -0500
65602 +@@ -4264,6 +4264,8 @@ static void __sched __schedule(void)
65603 + struct rq *rq;
65604 + int cpu;
65605 +
65606 ++ pax_track_stack();
65607 ++
65608 + need_resched:
65609 + preempt_disable();
65610 + cpu = smp_processor_id();
65611 +@@ -4950,6 +4952,8 @@ int can_nice(const struct task_struct *p
65612 + /* convert nice value [19,-20] to rlimit style value [1,40] */
65613 + int nice_rlim = 20 - nice;
65614 +
65615 ++ gr_learn_resource(p, RLIMIT_NICE, nice_rlim, 1);
65616 ++
65617 + return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) ||
65618 + capable(CAP_SYS_NICE));
65619 + }
65620 +@@ -4983,7 +4987,8 @@ SYSCALL_DEFINE1(nice, int, increment)
65621 + if (nice > 19)
65622 + nice = 19;
65623 +
65624 +- if (increment < 0 && !can_nice(current, nice))
65625 ++ if (increment < 0 && (!can_nice(current, nice) ||
65626 ++ gr_handle_chroot_nice()))
65627 + return -EPERM;
65628 +
65629 + retval = security_task_setnice(current, nice);
65630 +@@ -5127,6 +5132,7 @@ recheck:
65631 + unsigned long rlim_rtprio =
65632 + task_rlimit(p, RLIMIT_RTPRIO);
65633 +
65634 ++ gr_learn_resource(p, RLIMIT_RTPRIO, param->sched_priority, 1);
65635 + /* can't set/change the rt policy */
65636 + if (policy != p->policy && !rlim_rtprio)
65637 + return -EPERM;
65638 +diff -urNp linux-3.1.1/kernel/sched_fair.c linux-3.1.1/kernel/sched_fair.c
65639 +--- linux-3.1.1/kernel/sched_fair.c 2011-11-11 15:19:27.000000000 -0500
65640 ++++ linux-3.1.1/kernel/sched_fair.c 2011-11-16 18:39:08.000000000 -0500
65641 +@@ -4062,7 +4062,7 @@ static void nohz_idle_balance(int this_c
65642 + * run_rebalance_domains is triggered when needed from the scheduler tick.
65643 + * Also triggered for nohz idle balancing (with nohz_balancing_kick set).
65644 + */
65645 +-static void run_rebalance_domains(struct softirq_action *h)
65646 ++static void run_rebalance_domains(void)
65647 + {
65648 + int this_cpu = smp_processor_id();
65649 + struct rq *this_rq = cpu_rq(this_cpu);
65650 +diff -urNp linux-3.1.1/kernel/signal.c linux-3.1.1/kernel/signal.c
65651 +--- linux-3.1.1/kernel/signal.c 2011-11-11 15:19:27.000000000 -0500
65652 ++++ linux-3.1.1/kernel/signal.c 2011-11-16 19:30:04.000000000 -0500
65653 +@@ -45,12 +45,12 @@ static struct kmem_cache *sigqueue_cache
65654 +
65655 + int print_fatal_signals __read_mostly;
65656 +
65657 +-static void __user *sig_handler(struct task_struct *t, int sig)
65658 ++static __sighandler_t sig_handler(struct task_struct *t, int sig)
65659 + {
65660 + return t->sighand->action[sig - 1].sa.sa_handler;
65661 + }
65662 +
65663 +-static int sig_handler_ignored(void __user *handler, int sig)
65664 ++static int sig_handler_ignored(__sighandler_t handler, int sig)
65665 + {
65666 + /* Is it explicitly or implicitly ignored? */
65667 + return handler == SIG_IGN ||
65668 +@@ -60,7 +60,7 @@ static int sig_handler_ignored(void __us
65669 + static int sig_task_ignored(struct task_struct *t, int sig,
65670 + int from_ancestor_ns)
65671 + {
65672 +- void __user *handler;
65673 ++ __sighandler_t handler;
65674 +
65675 + handler = sig_handler(t, sig);
65676 +
65677 +@@ -364,6 +364,9 @@ __sigqueue_alloc(int sig, struct task_st
65678 + atomic_inc(&user->sigpending);
65679 + rcu_read_unlock();
65680 +
65681 ++ if (!override_rlimit)
65682 ++ gr_learn_resource(t, RLIMIT_SIGPENDING, atomic_read(&user->sigpending), 1);
65683 ++
65684 + if (override_rlimit ||
65685 + atomic_read(&user->sigpending) <=
65686 + task_rlimit(t, RLIMIT_SIGPENDING)) {
65687 +@@ -488,7 +491,7 @@ flush_signal_handlers(struct task_struct
65688 +
65689 + int unhandled_signal(struct task_struct *tsk, int sig)
65690 + {
65691 +- void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
65692 ++ __sighandler_t handler = tsk->sighand->action[sig-1].sa.sa_handler;
65693 + if (is_global_init(tsk))
65694 + return 1;
65695 + if (handler != SIG_IGN && handler != SIG_DFL)
65696 +@@ -815,6 +818,13 @@ static int check_kill_permission(int sig
65697 + }
65698 + }
65699 +
65700 ++ /* allow glibc communication via tgkill to other threads in our
65701 ++ thread group */
65702 ++ if ((info == SEND_SIG_NOINFO || info->si_code != SI_TKILL ||
65703 ++ sig != (SIGRTMIN+1) || task_tgid_vnr(t) != info->si_pid)
65704 ++ && gr_handle_signal(t, sig))
65705 ++ return -EPERM;
65706 ++
65707 + return security_task_kill(t, info, sig, 0);
65708 + }
65709 +
65710 +@@ -1165,7 +1175,7 @@ __group_send_sig_info(int sig, struct si
65711 + return send_signal(sig, info, p, 1);
65712 + }
65713 +
65714 +-static int
65715 ++int
65716 + specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
65717 + {
65718 + return send_signal(sig, info, t, 0);
65719 +@@ -1202,6 +1212,7 @@ force_sig_info(int sig, struct siginfo *
65720 + unsigned long int flags;
65721 + int ret, blocked, ignored;
65722 + struct k_sigaction *action;
65723 ++ int is_unhandled = 0;
65724 +
65725 + spin_lock_irqsave(&t->sighand->siglock, flags);
65726 + action = &t->sighand->action[sig-1];
65727 +@@ -1216,9 +1227,18 @@ force_sig_info(int sig, struct siginfo *
65728 + }
65729 + if (action->sa.sa_handler == SIG_DFL)
65730 + t->signal->flags &= ~SIGNAL_UNKILLABLE;
65731 ++ if (action->sa.sa_handler == SIG_IGN || action->sa.sa_handler == SIG_DFL)
65732 ++ is_unhandled = 1;
65733 + ret = specific_send_sig_info(sig, info, t);
65734 + spin_unlock_irqrestore(&t->sighand->siglock, flags);
65735 +
65736 ++ /* only deal with unhandled signals, java etc trigger SIGSEGV during
65737 ++ normal operation */
65738 ++ if (is_unhandled) {
65739 ++ gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, t);
65740 ++ gr_handle_crash(t, sig);
65741 ++ }
65742 ++
65743 + return ret;
65744 + }
65745 +
65746 +@@ -1285,8 +1305,11 @@ int group_send_sig_info(int sig, struct
65747 + ret = check_kill_permission(sig, info, p);
65748 + rcu_read_unlock();
65749 +
65750 +- if (!ret && sig)
65751 ++ if (!ret && sig) {
65752 + ret = do_send_sig_info(sig, info, p, true);
65753 ++ if (!ret)
65754 ++ gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, p);
65755 ++ }
65756 +
65757 + return ret;
65758 + }
65759 +@@ -1909,6 +1932,8 @@ static void ptrace_do_notify(int signr,
65760 + {
65761 + siginfo_t info;
65762 +
65763 ++ pax_track_stack();
65764 ++
65765 + memset(&info, 0, sizeof info);
65766 + info.si_signo = signr;
65767 + info.si_code = exit_code;
65768 +@@ -2748,7 +2773,15 @@ do_send_specific(pid_t tgid, pid_t pid,
65769 + int error = -ESRCH;
65770 +
65771 + rcu_read_lock();
65772 +- p = find_task_by_vpid(pid);
65773 ++#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
65774 ++ /* allow glibc communication via tgkill to other threads in our
65775 ++ thread group */
65776 ++ if (grsec_enable_chroot_findtask && info->si_code == SI_TKILL &&
65777 ++ sig == (SIGRTMIN+1) && tgid == info->si_pid)
65778 ++ p = find_task_by_vpid_unrestricted(pid);
65779 ++ else
65780 ++#endif
65781 ++ p = find_task_by_vpid(pid);
65782 + if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
65783 + error = check_kill_permission(sig, info, p);
65784 + /*
65785 +diff -urNp linux-3.1.1/kernel/smp.c linux-3.1.1/kernel/smp.c
65786 +--- linux-3.1.1/kernel/smp.c 2011-11-11 15:19:27.000000000 -0500
65787 ++++ linux-3.1.1/kernel/smp.c 2011-11-16 18:39:08.000000000 -0500
65788 +@@ -580,22 +580,22 @@ int smp_call_function(smp_call_func_t fu
65789 + }
65790 + EXPORT_SYMBOL(smp_call_function);
65791 +
65792 +-void ipi_call_lock(void)
65793 ++void ipi_call_lock(void) __acquires(call_function.lock)
65794 + {
65795 + raw_spin_lock(&call_function.lock);
65796 + }
65797 +
65798 +-void ipi_call_unlock(void)
65799 ++void ipi_call_unlock(void) __releases(call_function.lock)
65800 + {
65801 + raw_spin_unlock(&call_function.lock);
65802 + }
65803 +
65804 +-void ipi_call_lock_irq(void)
65805 ++void ipi_call_lock_irq(void) __acquires(call_function.lock)
65806 + {
65807 + raw_spin_lock_irq(&call_function.lock);
65808 + }
65809 +
65810 +-void ipi_call_unlock_irq(void)
65811 ++void ipi_call_unlock_irq(void) __releases(call_function.lock)
65812 + {
65813 + raw_spin_unlock_irq(&call_function.lock);
65814 + }
65815 +diff -urNp linux-3.1.1/kernel/softirq.c linux-3.1.1/kernel/softirq.c
65816 +--- linux-3.1.1/kernel/softirq.c 2011-11-11 15:19:27.000000000 -0500
65817 ++++ linux-3.1.1/kernel/softirq.c 2011-11-16 18:39:08.000000000 -0500
65818 +@@ -56,7 +56,7 @@ static struct softirq_action softirq_vec
65819 +
65820 + DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
65821 +
65822 +-char *softirq_to_name[NR_SOFTIRQS] = {
65823 ++const char * const softirq_to_name[NR_SOFTIRQS] = {
65824 + "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "BLOCK_IOPOLL",
65825 + "TASKLET", "SCHED", "HRTIMER", "RCU"
65826 + };
65827 +@@ -235,7 +235,7 @@ restart:
65828 + kstat_incr_softirqs_this_cpu(vec_nr);
65829 +
65830 + trace_softirq_entry(vec_nr);
65831 +- h->action(h);
65832 ++ h->action();
65833 + trace_softirq_exit(vec_nr);
65834 + if (unlikely(prev_count != preempt_count())) {
65835 + printk(KERN_ERR "huh, entered softirq %u %s %p"
65836 +@@ -385,9 +385,11 @@ void raise_softirq(unsigned int nr)
65837 + local_irq_restore(flags);
65838 + }
65839 +
65840 +-void open_softirq(int nr, void (*action)(struct softirq_action *))
65841 ++void open_softirq(int nr, void (*action)(void))
65842 + {
65843 +- softirq_vec[nr].action = action;
65844 ++ pax_open_kernel();
65845 ++ *(void **)&softirq_vec[nr].action = action;
65846 ++ pax_close_kernel();
65847 + }
65848 +
65849 + /*
65850 +@@ -441,7 +443,7 @@ void __tasklet_hi_schedule_first(struct
65851 +
65852 + EXPORT_SYMBOL(__tasklet_hi_schedule_first);
65853 +
65854 +-static void tasklet_action(struct softirq_action *a)
65855 ++static void tasklet_action(void)
65856 + {
65857 + struct tasklet_struct *list;
65858 +
65859 +@@ -476,7 +478,7 @@ static void tasklet_action(struct softir
65860 + }
65861 + }
65862 +
65863 +-static void tasklet_hi_action(struct softirq_action *a)
65864 ++static void tasklet_hi_action(void)
65865 + {
65866 + struct tasklet_struct *list;
65867 +
65868 +diff -urNp linux-3.1.1/kernel/sys.c linux-3.1.1/kernel/sys.c
65869 +--- linux-3.1.1/kernel/sys.c 2011-11-11 15:19:27.000000000 -0500
65870 ++++ linux-3.1.1/kernel/sys.c 2011-11-16 18:40:44.000000000 -0500
65871 +@@ -157,6 +157,12 @@ static int set_one_prio(struct task_stru
65872 + error = -EACCES;
65873 + goto out;
65874 + }
65875 ++
65876 ++ if (gr_handle_chroot_setpriority(p, niceval)) {
65877 ++ error = -EACCES;
65878 ++ goto out;
65879 ++ }
65880 ++
65881 + no_nice = security_task_setnice(p, niceval);
65882 + if (no_nice) {
65883 + error = no_nice;
65884 +@@ -571,6 +577,9 @@ SYSCALL_DEFINE2(setregid, gid_t, rgid, g
65885 + goto error;
65886 + }
65887 +
65888 ++ if (gr_check_group_change(new->gid, new->egid, -1))
65889 ++ goto error;
65890 ++
65891 + if (rgid != (gid_t) -1 ||
65892 + (egid != (gid_t) -1 && egid != old->gid))
65893 + new->sgid = new->egid;
65894 +@@ -600,6 +609,10 @@ SYSCALL_DEFINE1(setgid, gid_t, gid)
65895 + old = current_cred();
65896 +
65897 + retval = -EPERM;
65898 ++
65899 ++ if (gr_check_group_change(gid, gid, gid))
65900 ++ goto error;
65901 ++
65902 + if (nsown_capable(CAP_SETGID))
65903 + new->gid = new->egid = new->sgid = new->fsgid = gid;
65904 + else if (gid == old->gid || gid == old->sgid)
65905 +@@ -687,6 +700,9 @@ SYSCALL_DEFINE2(setreuid, uid_t, ruid, u
65906 + goto error;
65907 + }
65908 +
65909 ++ if (gr_check_user_change(new->uid, new->euid, -1))
65910 ++ goto error;
65911 ++
65912 + if (new->uid != old->uid) {
65913 + retval = set_user(new);
65914 + if (retval < 0)
65915 +@@ -731,6 +747,12 @@ SYSCALL_DEFINE1(setuid, uid_t, uid)
65916 + old = current_cred();
65917 +
65918 + retval = -EPERM;
65919 ++
65920 ++ if (gr_check_crash_uid(uid))
65921 ++ goto error;
65922 ++ if (gr_check_user_change(uid, uid, uid))
65923 ++ goto error;
65924 ++
65925 + if (nsown_capable(CAP_SETUID)) {
65926 + new->suid = new->uid = uid;
65927 + if (uid != old->uid) {
65928 +@@ -785,6 +807,9 @@ SYSCALL_DEFINE3(setresuid, uid_t, ruid,
65929 + goto error;
65930 + }
65931 +
65932 ++ if (gr_check_user_change(ruid, euid, -1))
65933 ++ goto error;
65934 ++
65935 + if (ruid != (uid_t) -1) {
65936 + new->uid = ruid;
65937 + if (ruid != old->uid) {
65938 +@@ -849,6 +874,9 @@ SYSCALL_DEFINE3(setresgid, gid_t, rgid,
65939 + goto error;
65940 + }
65941 +
65942 ++ if (gr_check_group_change(rgid, egid, -1))
65943 ++ goto error;
65944 ++
65945 + if (rgid != (gid_t) -1)
65946 + new->gid = rgid;
65947 + if (egid != (gid_t) -1)
65948 +@@ -895,6 +923,9 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
65949 + old = current_cred();
65950 + old_fsuid = old->fsuid;
65951 +
65952 ++ if (gr_check_user_change(-1, -1, uid))
65953 ++ goto error;
65954 ++
65955 + if (uid == old->uid || uid == old->euid ||
65956 + uid == old->suid || uid == old->fsuid ||
65957 + nsown_capable(CAP_SETUID)) {
65958 +@@ -905,6 +936,7 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
65959 + }
65960 + }
65961 +
65962 ++error:
65963 + abort_creds(new);
65964 + return old_fsuid;
65965 +
65966 +@@ -931,12 +963,16 @@ SYSCALL_DEFINE1(setfsgid, gid_t, gid)
65967 + if (gid == old->gid || gid == old->egid ||
65968 + gid == old->sgid || gid == old->fsgid ||
65969 + nsown_capable(CAP_SETGID)) {
65970 ++ if (gr_check_group_change(-1, -1, gid))
65971 ++ goto error;
65972 ++
65973 + if (gid != old_fsgid) {
65974 + new->fsgid = gid;
65975 + goto change_okay;
65976 + }
65977 + }
65978 +
65979 ++error:
65980 + abort_creds(new);
65981 + return old_fsgid;
65982 +
65983 +@@ -1242,19 +1278,19 @@ SYSCALL_DEFINE1(olduname, struct oldold_
65984 + return -EFAULT;
65985 +
65986 + down_read(&uts_sem);
65987 +- error = __copy_to_user(&name->sysname, &utsname()->sysname,
65988 ++ error = __copy_to_user(name->sysname, &utsname()->sysname,
65989 + __OLD_UTS_LEN);
65990 + error |= __put_user(0, name->sysname + __OLD_UTS_LEN);
65991 +- error |= __copy_to_user(&name->nodename, &utsname()->nodename,
65992 ++ error |= __copy_to_user(name->nodename, &utsname()->nodename,
65993 + __OLD_UTS_LEN);
65994 + error |= __put_user(0, name->nodename + __OLD_UTS_LEN);
65995 +- error |= __copy_to_user(&name->release, &utsname()->release,
65996 ++ error |= __copy_to_user(name->release, &utsname()->release,
65997 + __OLD_UTS_LEN);
65998 + error |= __put_user(0, name->release + __OLD_UTS_LEN);
65999 +- error |= __copy_to_user(&name->version, &utsname()->version,
66000 ++ error |= __copy_to_user(name->version, &utsname()->version,
66001 + __OLD_UTS_LEN);
66002 + error |= __put_user(0, name->version + __OLD_UTS_LEN);
66003 +- error |= __copy_to_user(&name->machine, &utsname()->machine,
66004 ++ error |= __copy_to_user(name->machine, &utsname()->machine,
66005 + __OLD_UTS_LEN);
66006 + error |= __put_user(0, name->machine + __OLD_UTS_LEN);
66007 + up_read(&uts_sem);
66008 +@@ -1717,7 +1753,7 @@ SYSCALL_DEFINE5(prctl, int, option, unsi
66009 + error = get_dumpable(me->mm);
66010 + break;
66011 + case PR_SET_DUMPABLE:
66012 +- if (arg2 < 0 || arg2 > 1) {
66013 ++ if (arg2 > 1) {
66014 + error = -EINVAL;
66015 + break;
66016 + }
66017 +diff -urNp linux-3.1.1/kernel/sysctl_binary.c linux-3.1.1/kernel/sysctl_binary.c
66018 +--- linux-3.1.1/kernel/sysctl_binary.c 2011-11-11 15:19:27.000000000 -0500
66019 ++++ linux-3.1.1/kernel/sysctl_binary.c 2011-11-16 18:39:08.000000000 -0500
66020 +@@ -989,7 +989,7 @@ static ssize_t bin_intvec(struct file *f
66021 + int i;
66022 +
66023 + set_fs(KERNEL_DS);
66024 +- result = vfs_read(file, buffer, BUFSZ - 1, &pos);
66025 ++ result = vfs_read(file, (char __force_user *)buffer, BUFSZ - 1, &pos);
66026 + set_fs(old_fs);
66027 + if (result < 0)
66028 + goto out_kfree;
66029 +@@ -1034,7 +1034,7 @@ static ssize_t bin_intvec(struct file *f
66030 + }
66031 +
66032 + set_fs(KERNEL_DS);
66033 +- result = vfs_write(file, buffer, str - buffer, &pos);
66034 ++ result = vfs_write(file, (const char __force_user *)buffer, str - buffer, &pos);
66035 + set_fs(old_fs);
66036 + if (result < 0)
66037 + goto out_kfree;
66038 +@@ -1067,7 +1067,7 @@ static ssize_t bin_ulongvec(struct file
66039 + int i;
66040 +
66041 + set_fs(KERNEL_DS);
66042 +- result = vfs_read(file, buffer, BUFSZ - 1, &pos);
66043 ++ result = vfs_read(file, (char __force_user *)buffer, BUFSZ - 1, &pos);
66044 + set_fs(old_fs);
66045 + if (result < 0)
66046 + goto out_kfree;
66047 +@@ -1112,7 +1112,7 @@ static ssize_t bin_ulongvec(struct file
66048 + }
66049 +
66050 + set_fs(KERNEL_DS);
66051 +- result = vfs_write(file, buffer, str - buffer, &pos);
66052 ++ result = vfs_write(file, (const char __force_user *)buffer, str - buffer, &pos);
66053 + set_fs(old_fs);
66054 + if (result < 0)
66055 + goto out_kfree;
66056 +@@ -1138,7 +1138,7 @@ static ssize_t bin_uuid(struct file *fil
66057 + int i;
66058 +
66059 + set_fs(KERNEL_DS);
66060 +- result = vfs_read(file, buf, sizeof(buf) - 1, &pos);
66061 ++ result = vfs_read(file, (char __force_user *)buf, sizeof(buf) - 1, &pos);
66062 + set_fs(old_fs);
66063 + if (result < 0)
66064 + goto out;
66065 +@@ -1185,7 +1185,7 @@ static ssize_t bin_dn_node_address(struc
66066 + __le16 dnaddr;
66067 +
66068 + set_fs(KERNEL_DS);
66069 +- result = vfs_read(file, buf, sizeof(buf) - 1, &pos);
66070 ++ result = vfs_read(file, (char __force_user *)buf, sizeof(buf) - 1, &pos);
66071 + set_fs(old_fs);
66072 + if (result < 0)
66073 + goto out;
66074 +@@ -1233,7 +1233,7 @@ static ssize_t bin_dn_node_address(struc
66075 + le16_to_cpu(dnaddr) & 0x3ff);
66076 +
66077 + set_fs(KERNEL_DS);
66078 +- result = vfs_write(file, buf, len, &pos);
66079 ++ result = vfs_write(file, (const char __force_user *)buf, len, &pos);
66080 + set_fs(old_fs);
66081 + if (result < 0)
66082 + goto out;
66083 +diff -urNp linux-3.1.1/kernel/sysctl.c linux-3.1.1/kernel/sysctl.c
66084 +--- linux-3.1.1/kernel/sysctl.c 2011-11-11 15:19:27.000000000 -0500
66085 ++++ linux-3.1.1/kernel/sysctl.c 2011-11-16 18:40:44.000000000 -0500
66086 +@@ -85,6 +85,13 @@
66087 +
66088 +
66089 + #if defined(CONFIG_SYSCTL)
66090 ++#include <linux/grsecurity.h>
66091 ++#include <linux/grinternal.h>
66092 ++
66093 ++extern __u32 gr_handle_sysctl(const ctl_table *table, const int op);
66094 ++extern int gr_handle_sysctl_mod(const char *dirname, const char *name,
66095 ++ const int op);
66096 ++extern int gr_handle_chroot_sysctl(const int op);
66097 +
66098 + /* External variables not in a header file. */
66099 + extern int sysctl_overcommit_memory;
66100 +@@ -197,6 +204,7 @@ static int sysrq_sysctl_handler(ctl_tabl
66101 + }
66102 +
66103 + #endif
66104 ++extern struct ctl_table grsecurity_table[];
66105 +
66106 + static struct ctl_table root_table[];
66107 + static struct ctl_table_root sysctl_table_root;
66108 +@@ -226,6 +234,20 @@ extern struct ctl_table epoll_table[];
66109 + int sysctl_legacy_va_layout;
66110 + #endif
66111 +
66112 ++#ifdef CONFIG_PAX_SOFTMODE
66113 ++static ctl_table pax_table[] = {
66114 ++ {
66115 ++ .procname = "softmode",
66116 ++ .data = &pax_softmode,
66117 ++ .maxlen = sizeof(unsigned int),
66118 ++ .mode = 0600,
66119 ++ .proc_handler = &proc_dointvec,
66120 ++ },
66121 ++
66122 ++ { }
66123 ++};
66124 ++#endif
66125 ++
66126 + /* The default sysctl tables: */
66127 +
66128 + static struct ctl_table root_table[] = {
66129 +@@ -272,6 +294,22 @@ static int max_extfrag_threshold = 1000;
66130 + #endif
66131 +
66132 + static struct ctl_table kern_table[] = {
66133 ++#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
66134 ++ {
66135 ++ .procname = "grsecurity",
66136 ++ .mode = 0500,
66137 ++ .child = grsecurity_table,
66138 ++ },
66139 ++#endif
66140 ++
66141 ++#ifdef CONFIG_PAX_SOFTMODE
66142 ++ {
66143 ++ .procname = "pax",
66144 ++ .mode = 0500,
66145 ++ .child = pax_table,
66146 ++ },
66147 ++#endif
66148 ++
66149 + {
66150 + .procname = "sched_child_runs_first",
66151 + .data = &sysctl_sched_child_runs_first,
66152 +@@ -546,7 +584,7 @@ static struct ctl_table kern_table[] = {
66153 + .data = &modprobe_path,
66154 + .maxlen = KMOD_PATH_LEN,
66155 + .mode = 0644,
66156 +- .proc_handler = proc_dostring,
66157 ++ .proc_handler = proc_dostring_modpriv,
66158 + },
66159 + {
66160 + .procname = "modules_disabled",
66161 +@@ -713,16 +751,20 @@ static struct ctl_table kern_table[] = {
66162 + .extra1 = &zero,
66163 + .extra2 = &one,
66164 + },
66165 ++#endif
66166 + {
66167 + .procname = "kptr_restrict",
66168 + .data = &kptr_restrict,
66169 + .maxlen = sizeof(int),
66170 + .mode = 0644,
66171 + .proc_handler = proc_dmesg_restrict,
66172 ++#ifdef CONFIG_GRKERNSEC_HIDESYM
66173 ++ .extra1 = &two,
66174 ++#else
66175 + .extra1 = &zero,
66176 ++#endif
66177 + .extra2 = &two,
66178 + },
66179 +-#endif
66180 + {
66181 + .procname = "ngroups_max",
66182 + .data = &ngroups_max,
66183 +@@ -1205,6 +1247,13 @@ static struct ctl_table vm_table[] = {
66184 + .proc_handler = proc_dointvec_minmax,
66185 + .extra1 = &zero,
66186 + },
66187 ++ {
66188 ++ .procname = "heap_stack_gap",
66189 ++ .data = &sysctl_heap_stack_gap,
66190 ++ .maxlen = sizeof(sysctl_heap_stack_gap),
66191 ++ .mode = 0644,
66192 ++ .proc_handler = proc_doulongvec_minmax,
66193 ++ },
66194 + #else
66195 + {
66196 + .procname = "nr_trim_pages",
66197 +@@ -1709,6 +1758,17 @@ static int test_perm(int mode, int op)
66198 + int sysctl_perm(struct ctl_table_root *root, struct ctl_table *table, int op)
66199 + {
66200 + int mode;
66201 ++ int error;
66202 ++
66203 ++ if (table->parent != NULL && table->parent->procname != NULL &&
66204 ++ table->procname != NULL &&
66205 ++ gr_handle_sysctl_mod(table->parent->procname, table->procname, op))
66206 ++ return -EACCES;
66207 ++ if (gr_handle_chroot_sysctl(op))
66208 ++ return -EACCES;
66209 ++ error = gr_handle_sysctl(table, op);
66210 ++ if (error)
66211 ++ return error;
66212 +
66213 + if (root->permissions)
66214 + mode = root->permissions(root, current->nsproxy, table);
66215 +@@ -2113,6 +2173,16 @@ int proc_dostring(struct ctl_table *tabl
66216 + buffer, lenp, ppos);
66217 + }
66218 +
66219 ++int proc_dostring_modpriv(struct ctl_table *table, int write,
66220 ++ void __user *buffer, size_t *lenp, loff_t *ppos)
66221 ++{
66222 ++ if (write && !capable(CAP_SYS_MODULE))
66223 ++ return -EPERM;
66224 ++
66225 ++ return _proc_do_string(table->data, table->maxlen, write,
66226 ++ buffer, lenp, ppos);
66227 ++}
66228 ++
66229 + static size_t proc_skip_spaces(char **buf)
66230 + {
66231 + size_t ret;
66232 +@@ -2218,6 +2288,8 @@ static int proc_put_long(void __user **b
66233 + len = strlen(tmp);
66234 + if (len > *size)
66235 + len = *size;
66236 ++ if (len > sizeof(tmp))
66237 ++ len = sizeof(tmp);
66238 + if (copy_to_user(*buf, tmp, len))
66239 + return -EFAULT;
66240 + *size -= len;
66241 +@@ -2534,8 +2606,11 @@ static int __do_proc_doulongvec_minmax(v
66242 + *i = val;
66243 + } else {
66244 + val = convdiv * (*i) / convmul;
66245 +- if (!first)
66246 ++ if (!first) {
66247 + err = proc_put_char(&buffer, &left, '\t');
66248 ++ if (err)
66249 ++ break;
66250 ++ }
66251 + err = proc_put_long(&buffer, &left, val, false);
66252 + if (err)
66253 + break;
66254 +@@ -2930,6 +3005,12 @@ int proc_dostring(struct ctl_table *tabl
66255 + return -ENOSYS;
66256 + }
66257 +
66258 ++int proc_dostring_modpriv(struct ctl_table *table, int write,
66259 ++ void __user *buffer, size_t *lenp, loff_t *ppos)
66260 ++{
66261 ++ return -ENOSYS;
66262 ++}
66263 ++
66264 + int proc_dointvec(struct ctl_table *table, int write,
66265 + void __user *buffer, size_t *lenp, loff_t *ppos)
66266 + {
66267 +@@ -2986,6 +3067,7 @@ EXPORT_SYMBOL(proc_dointvec_minmax);
66268 + EXPORT_SYMBOL(proc_dointvec_userhz_jiffies);
66269 + EXPORT_SYMBOL(proc_dointvec_ms_jiffies);
66270 + EXPORT_SYMBOL(proc_dostring);
66271 ++EXPORT_SYMBOL(proc_dostring_modpriv);
66272 + EXPORT_SYMBOL(proc_doulongvec_minmax);
66273 + EXPORT_SYMBOL(proc_doulongvec_ms_jiffies_minmax);
66274 + EXPORT_SYMBOL(register_sysctl_table);
66275 +diff -urNp linux-3.1.1/kernel/sysctl_check.c linux-3.1.1/kernel/sysctl_check.c
66276 +--- linux-3.1.1/kernel/sysctl_check.c 2011-11-11 15:19:27.000000000 -0500
66277 ++++ linux-3.1.1/kernel/sysctl_check.c 2011-11-16 18:40:44.000000000 -0500
66278 +@@ -129,6 +129,7 @@ int sysctl_check_table(struct nsproxy *n
66279 + set_fail(&fail, table, "Directory with extra2");
66280 + } else {
66281 + if ((table->proc_handler == proc_dostring) ||
66282 ++ (table->proc_handler == proc_dostring_modpriv) ||
66283 + (table->proc_handler == proc_dointvec) ||
66284 + (table->proc_handler == proc_dointvec_minmax) ||
66285 + (table->proc_handler == proc_dointvec_jiffies) ||
66286 +diff -urNp linux-3.1.1/kernel/taskstats.c linux-3.1.1/kernel/taskstats.c
66287 +--- linux-3.1.1/kernel/taskstats.c 2011-11-11 15:19:27.000000000 -0500
66288 ++++ linux-3.1.1/kernel/taskstats.c 2011-11-16 19:35:09.000000000 -0500
66289 +@@ -27,9 +27,12 @@
66290 + #include <linux/cgroup.h>
66291 + #include <linux/fs.h>
66292 + #include <linux/file.h>
66293 ++#include <linux/grsecurity.h>
66294 + #include <net/genetlink.h>
66295 + #include <linux/atomic.h>
66296 +
66297 ++extern int gr_is_taskstats_denied(int pid);
66298 ++
66299 + /*
66300 + * Maximum length of a cpumask that can be specified in
66301 + * the TASKSTATS_CMD_ATTR_REGISTER/DEREGISTER_CPUMASK attribute
66302 +@@ -556,6 +559,9 @@ err:
66303 +
66304 + static int taskstats_user_cmd(struct sk_buff *skb, struct genl_info *info)
66305 + {
66306 ++ if (gr_is_taskstats_denied(current->pid))
66307 ++ return -EACCES;
66308 ++
66309 + if (info->attrs[TASKSTATS_CMD_ATTR_REGISTER_CPUMASK])
66310 + return cmd_attr_register_cpumask(info);
66311 + else if (info->attrs[TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK])
66312 +diff -urNp linux-3.1.1/kernel/time/alarmtimer.c linux-3.1.1/kernel/time/alarmtimer.c
66313 +--- linux-3.1.1/kernel/time/alarmtimer.c 2011-11-11 15:19:27.000000000 -0500
66314 ++++ linux-3.1.1/kernel/time/alarmtimer.c 2011-11-16 18:39:08.000000000 -0500
66315 +@@ -693,7 +693,7 @@ static int __init alarmtimer_init(void)
66316 + {
66317 + int error = 0;
66318 + int i;
66319 +- struct k_clock alarm_clock = {
66320 ++ static struct k_clock alarm_clock = {
66321 + .clock_getres = alarm_clock_getres,
66322 + .clock_get = alarm_clock_get,
66323 + .timer_create = alarm_timer_create,
66324 +diff -urNp linux-3.1.1/kernel/time/tick-broadcast.c linux-3.1.1/kernel/time/tick-broadcast.c
66325 +--- linux-3.1.1/kernel/time/tick-broadcast.c 2011-11-11 15:19:27.000000000 -0500
66326 ++++ linux-3.1.1/kernel/time/tick-broadcast.c 2011-11-16 18:39:08.000000000 -0500
66327 +@@ -115,7 +115,7 @@ int tick_device_uses_broadcast(struct cl
66328 + * then clear the broadcast bit.
66329 + */
66330 + if (!(dev->features & CLOCK_EVT_FEAT_C3STOP)) {
66331 +- int cpu = smp_processor_id();
66332 ++ cpu = smp_processor_id();
66333 +
66334 + cpumask_clear_cpu(cpu, tick_get_broadcast_mask());
66335 + tick_broadcast_clear_oneshot(cpu);
66336 +diff -urNp linux-3.1.1/kernel/time/timekeeping.c linux-3.1.1/kernel/time/timekeeping.c
66337 +--- linux-3.1.1/kernel/time/timekeeping.c 2011-11-11 15:19:27.000000000 -0500
66338 ++++ linux-3.1.1/kernel/time/timekeeping.c 2011-11-16 18:40:44.000000000 -0500
66339 +@@ -14,6 +14,7 @@
66340 + #include <linux/init.h>
66341 + #include <linux/mm.h>
66342 + #include <linux/sched.h>
66343 ++#include <linux/grsecurity.h>
66344 + #include <linux/syscore_ops.h>
66345 + #include <linux/clocksource.h>
66346 + #include <linux/jiffies.h>
66347 +@@ -361,6 +362,8 @@ int do_settimeofday(const struct timespe
66348 + if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
66349 + return -EINVAL;
66350 +
66351 ++ gr_log_timechange();
66352 ++
66353 + write_seqlock_irqsave(&xtime_lock, flags);
66354 +
66355 + timekeeping_forward_now();
66356 +diff -urNp linux-3.1.1/kernel/time/timer_list.c linux-3.1.1/kernel/time/timer_list.c
66357 +--- linux-3.1.1/kernel/time/timer_list.c 2011-11-11 15:19:27.000000000 -0500
66358 ++++ linux-3.1.1/kernel/time/timer_list.c 2011-11-16 18:40:44.000000000 -0500
66359 +@@ -38,12 +38,16 @@ DECLARE_PER_CPU(struct hrtimer_cpu_base,
66360 +
66361 + static void print_name_offset(struct seq_file *m, void *sym)
66362 + {
66363 ++#ifdef CONFIG_GRKERNSEC_HIDESYM
66364 ++ SEQ_printf(m, "<%p>", NULL);
66365 ++#else
66366 + char symname[KSYM_NAME_LEN];
66367 +
66368 + if (lookup_symbol_name((unsigned long)sym, symname) < 0)
66369 + SEQ_printf(m, "<%pK>", sym);
66370 + else
66371 + SEQ_printf(m, "%s", symname);
66372 ++#endif
66373 + }
66374 +
66375 + static void
66376 +@@ -112,7 +116,11 @@ next_one:
66377 + static void
66378 + print_base(struct seq_file *m, struct hrtimer_clock_base *base, u64 now)
66379 + {
66380 ++#ifdef CONFIG_GRKERNSEC_HIDESYM
66381 ++ SEQ_printf(m, " .base: %p\n", NULL);
66382 ++#else
66383 + SEQ_printf(m, " .base: %pK\n", base);
66384 ++#endif
66385 + SEQ_printf(m, " .index: %d\n",
66386 + base->index);
66387 + SEQ_printf(m, " .resolution: %Lu nsecs\n",
66388 +@@ -293,7 +301,11 @@ static int __init init_timer_list_procfs
66389 + {
66390 + struct proc_dir_entry *pe;
66391 +
66392 ++#ifdef CONFIG_GRKERNSEC_PROC_ADD
66393 ++ pe = proc_create("timer_list", 0400, NULL, &timer_list_fops);
66394 ++#else
66395 + pe = proc_create("timer_list", 0444, NULL, &timer_list_fops);
66396 ++#endif
66397 + if (!pe)
66398 + return -ENOMEM;
66399 + return 0;
66400 +diff -urNp linux-3.1.1/kernel/time/timer_stats.c linux-3.1.1/kernel/time/timer_stats.c
66401 +--- linux-3.1.1/kernel/time/timer_stats.c 2011-11-11 15:19:27.000000000 -0500
66402 ++++ linux-3.1.1/kernel/time/timer_stats.c 2011-11-16 18:40:44.000000000 -0500
66403 +@@ -116,7 +116,7 @@ static ktime_t time_start, time_stop;
66404 + static unsigned long nr_entries;
66405 + static struct entry entries[MAX_ENTRIES];
66406 +
66407 +-static atomic_t overflow_count;
66408 ++static atomic_unchecked_t overflow_count;
66409 +
66410 + /*
66411 + * The entries are in a hash-table, for fast lookup:
66412 +@@ -140,7 +140,7 @@ static void reset_entries(void)
66413 + nr_entries = 0;
66414 + memset(entries, 0, sizeof(entries));
66415 + memset(tstat_hash_table, 0, sizeof(tstat_hash_table));
66416 +- atomic_set(&overflow_count, 0);
66417 ++ atomic_set_unchecked(&overflow_count, 0);
66418 + }
66419 +
66420 + static struct entry *alloc_entry(void)
66421 +@@ -261,7 +261,7 @@ void timer_stats_update_stats(void *time
66422 + if (likely(entry))
66423 + entry->count++;
66424 + else
66425 +- atomic_inc(&overflow_count);
66426 ++ atomic_inc_unchecked(&overflow_count);
66427 +
66428 + out_unlock:
66429 + raw_spin_unlock_irqrestore(lock, flags);
66430 +@@ -269,12 +269,16 @@ void timer_stats_update_stats(void *time
66431 +
66432 + static void print_name_offset(struct seq_file *m, unsigned long addr)
66433 + {
66434 ++#ifdef CONFIG_GRKERNSEC_HIDESYM
66435 ++ seq_printf(m, "<%p>", NULL);
66436 ++#else
66437 + char symname[KSYM_NAME_LEN];
66438 +
66439 + if (lookup_symbol_name(addr, symname) < 0)
66440 + seq_printf(m, "<%p>", (void *)addr);
66441 + else
66442 + seq_printf(m, "%s", symname);
66443 ++#endif
66444 + }
66445 +
66446 + static int tstats_show(struct seq_file *m, void *v)
66447 +@@ -300,9 +304,9 @@ static int tstats_show(struct seq_file *
66448 +
66449 + seq_puts(m, "Timer Stats Version: v0.2\n");
66450 + seq_printf(m, "Sample period: %ld.%03ld s\n", period.tv_sec, ms);
66451 +- if (atomic_read(&overflow_count))
66452 ++ if (atomic_read_unchecked(&overflow_count))
66453 + seq_printf(m, "Overflow: %d entries\n",
66454 +- atomic_read(&overflow_count));
66455 ++ atomic_read_unchecked(&overflow_count));
66456 +
66457 + for (i = 0; i < nr_entries; i++) {
66458 + entry = entries + i;
66459 +@@ -417,7 +421,11 @@ static int __init init_tstats_procfs(voi
66460 + {
66461 + struct proc_dir_entry *pe;
66462 +
66463 ++#ifdef CONFIG_GRKERNSEC_PROC_ADD
66464 ++ pe = proc_create("timer_stats", 0600, NULL, &tstats_fops);
66465 ++#else
66466 + pe = proc_create("timer_stats", 0644, NULL, &tstats_fops);
66467 ++#endif
66468 + if (!pe)
66469 + return -ENOMEM;
66470 + return 0;
66471 +diff -urNp linux-3.1.1/kernel/time.c linux-3.1.1/kernel/time.c
66472 +--- linux-3.1.1/kernel/time.c 2011-11-11 15:19:27.000000000 -0500
66473 ++++ linux-3.1.1/kernel/time.c 2011-11-16 18:40:44.000000000 -0500
66474 +@@ -163,6 +163,11 @@ int do_sys_settimeofday(const struct tim
66475 + return error;
66476 +
66477 + if (tz) {
66478 ++ /* we log in do_settimeofday called below, so don't log twice
66479 ++ */
66480 ++ if (!tv)
66481 ++ gr_log_timechange();
66482 ++
66483 + /* SMP safe, global irq locking makes it work. */
66484 + sys_tz = *tz;
66485 + update_vsyscall_tz();
66486 +diff -urNp linux-3.1.1/kernel/timer.c linux-3.1.1/kernel/timer.c
66487 +--- linux-3.1.1/kernel/timer.c 2011-11-11 15:19:27.000000000 -0500
66488 ++++ linux-3.1.1/kernel/timer.c 2011-11-16 18:39:08.000000000 -0500
66489 +@@ -1304,7 +1304,7 @@ void update_process_times(int user_tick)
66490 + /*
66491 + * This function runs timers and the timer-tq in bottom half context.
66492 + */
66493 +-static void run_timer_softirq(struct softirq_action *h)
66494 ++static void run_timer_softirq(void)
66495 + {
66496 + struct tvec_base *base = __this_cpu_read(tvec_bases);
66497 +
66498 +diff -urNp linux-3.1.1/kernel/trace/blktrace.c linux-3.1.1/kernel/trace/blktrace.c
66499 +--- linux-3.1.1/kernel/trace/blktrace.c 2011-11-11 15:19:27.000000000 -0500
66500 ++++ linux-3.1.1/kernel/trace/blktrace.c 2011-11-16 18:39:08.000000000 -0500
66501 +@@ -323,7 +323,7 @@ static ssize_t blk_dropped_read(struct f
66502 + struct blk_trace *bt = filp->private_data;
66503 + char buf[16];
66504 +
66505 +- snprintf(buf, sizeof(buf), "%u\n", atomic_read(&bt->dropped));
66506 ++ snprintf(buf, sizeof(buf), "%u\n", atomic_read_unchecked(&bt->dropped));
66507 +
66508 + return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
66509 + }
66510 +@@ -388,7 +388,7 @@ static int blk_subbuf_start_callback(str
66511 + return 1;
66512 +
66513 + bt = buf->chan->private_data;
66514 +- atomic_inc(&bt->dropped);
66515 ++ atomic_inc_unchecked(&bt->dropped);
66516 + return 0;
66517 + }
66518 +
66519 +@@ -489,7 +489,7 @@ int do_blk_trace_setup(struct request_qu
66520 +
66521 + bt->dir = dir;
66522 + bt->dev = dev;
66523 +- atomic_set(&bt->dropped, 0);
66524 ++ atomic_set_unchecked(&bt->dropped, 0);
66525 +
66526 + ret = -EIO;
66527 + bt->dropped_file = debugfs_create_file("dropped", 0444, dir, bt,
66528 +diff -urNp linux-3.1.1/kernel/trace/ftrace.c linux-3.1.1/kernel/trace/ftrace.c
66529 +--- linux-3.1.1/kernel/trace/ftrace.c 2011-11-11 15:19:27.000000000 -0500
66530 ++++ linux-3.1.1/kernel/trace/ftrace.c 2011-11-16 18:39:08.000000000 -0500
66531 +@@ -1585,12 +1585,17 @@ ftrace_code_disable(struct module *mod,
66532 + if (unlikely(ftrace_disabled))
66533 + return 0;
66534 +
66535 ++ ret = ftrace_arch_code_modify_prepare();
66536 ++ FTRACE_WARN_ON(ret);
66537 ++ if (ret)
66538 ++ return 0;
66539 ++
66540 + ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
66541 ++ FTRACE_WARN_ON(ftrace_arch_code_modify_post_process());
66542 + if (ret) {
66543 + ftrace_bug(ret, ip);
66544 +- return 0;
66545 + }
66546 +- return 1;
66547 ++ return ret ? 0 : 1;
66548 + }
66549 +
66550 + /*
66551 +@@ -2607,7 +2612,7 @@ static void ftrace_free_entry_rcu(struct
66552 +
66553 + int
66554 + register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
66555 +- void *data)
66556 ++ void *data)
66557 + {
66558 + struct ftrace_func_probe *entry;
66559 + struct ftrace_page *pg;
66560 +diff -urNp linux-3.1.1/kernel/trace/trace.c linux-3.1.1/kernel/trace/trace.c
66561 +--- linux-3.1.1/kernel/trace/trace.c 2011-11-11 15:19:27.000000000 -0500
66562 ++++ linux-3.1.1/kernel/trace/trace.c 2011-11-16 18:40:44.000000000 -0500
66563 +@@ -3451,6 +3451,8 @@ static ssize_t tracing_splice_read_pipe(
66564 + size_t rem;
66565 + unsigned int i;
66566 +
66567 ++ pax_track_stack();
66568 ++
66569 + if (splice_grow_spd(pipe, &spd))
66570 + return -ENOMEM;
66571 +
66572 +@@ -3926,6 +3928,8 @@ tracing_buffers_splice_read(struct file
66573 + int entries, size, i;
66574 + size_t ret;
66575 +
66576 ++ pax_track_stack();
66577 ++
66578 + if (splice_grow_spd(pipe, &spd))
66579 + return -ENOMEM;
66580 +
66581 +@@ -4093,10 +4097,9 @@ static const struct file_operations trac
66582 + };
66583 + #endif
66584 +
66585 +-static struct dentry *d_tracer;
66586 +-
66587 + struct dentry *tracing_init_dentry(void)
66588 + {
66589 ++ static struct dentry *d_tracer;
66590 + static int once;
66591 +
66592 + if (d_tracer)
66593 +@@ -4116,10 +4119,9 @@ struct dentry *tracing_init_dentry(void)
66594 + return d_tracer;
66595 + }
66596 +
66597 +-static struct dentry *d_percpu;
66598 +-
66599 + struct dentry *tracing_dentry_percpu(void)
66600 + {
66601 ++ static struct dentry *d_percpu;
66602 + static int once;
66603 + struct dentry *d_tracer;
66604 +
66605 +diff -urNp linux-3.1.1/kernel/trace/trace_events.c linux-3.1.1/kernel/trace/trace_events.c
66606 +--- linux-3.1.1/kernel/trace/trace_events.c 2011-11-11 15:19:27.000000000 -0500
66607 ++++ linux-3.1.1/kernel/trace/trace_events.c 2011-11-16 18:39:08.000000000 -0500
66608 +@@ -1300,10 +1300,6 @@ static LIST_HEAD(ftrace_module_file_list
66609 + struct ftrace_module_file_ops {
66610 + struct list_head list;
66611 + struct module *mod;
66612 +- struct file_operations id;
66613 +- struct file_operations enable;
66614 +- struct file_operations format;
66615 +- struct file_operations filter;
66616 + };
66617 +
66618 + static struct ftrace_module_file_ops *
66619 +@@ -1324,17 +1320,12 @@ trace_create_file_ops(struct module *mod
66620 +
66621 + file_ops->mod = mod;
66622 +
66623 +- file_ops->id = ftrace_event_id_fops;
66624 +- file_ops->id.owner = mod;
66625 +-
66626 +- file_ops->enable = ftrace_enable_fops;
66627 +- file_ops->enable.owner = mod;
66628 +-
66629 +- file_ops->filter = ftrace_event_filter_fops;
66630 +- file_ops->filter.owner = mod;
66631 +-
66632 +- file_ops->format = ftrace_event_format_fops;
66633 +- file_ops->format.owner = mod;
66634 ++ pax_open_kernel();
66635 ++ *(void **)&mod->trace_id.owner = mod;
66636 ++ *(void **)&mod->trace_enable.owner = mod;
66637 ++ *(void **)&mod->trace_filter.owner = mod;
66638 ++ *(void **)&mod->trace_format.owner = mod;
66639 ++ pax_close_kernel();
66640 +
66641 + list_add(&file_ops->list, &ftrace_module_file_list);
66642 +
66643 +@@ -1358,8 +1349,8 @@ static void trace_module_add_events(stru
66644 +
66645 + for_each_event(call, start, end) {
66646 + __trace_add_event_call(*call, mod,
66647 +- &file_ops->id, &file_ops->enable,
66648 +- &file_ops->filter, &file_ops->format);
66649 ++ &mod->trace_id, &mod->trace_enable,
66650 ++ &mod->trace_filter, &mod->trace_format);
66651 + }
66652 + }
66653 +
66654 +diff -urNp linux-3.1.1/kernel/trace/trace_kprobe.c linux-3.1.1/kernel/trace/trace_kprobe.c
66655 +--- linux-3.1.1/kernel/trace/trace_kprobe.c 2011-11-11 15:19:27.000000000 -0500
66656 ++++ linux-3.1.1/kernel/trace/trace_kprobe.c 2011-11-16 18:39:08.000000000 -0500
66657 +@@ -217,7 +217,7 @@ static __kprobes void FETCH_FUNC_NAME(me
66658 + long ret;
66659 + int maxlen = get_rloc_len(*(u32 *)dest);
66660 + u8 *dst = get_rloc_data(dest);
66661 +- u8 *src = addr;
66662 ++ const u8 __user *src = (const u8 __force_user *)addr;
66663 + mm_segment_t old_fs = get_fs();
66664 + if (!maxlen)
66665 + return;
66666 +@@ -229,7 +229,7 @@ static __kprobes void FETCH_FUNC_NAME(me
66667 + pagefault_disable();
66668 + do
66669 + ret = __copy_from_user_inatomic(dst++, src++, 1);
66670 +- while (dst[-1] && ret == 0 && src - (u8 *)addr < maxlen);
66671 ++ while (dst[-1] && ret == 0 && src - (const u8 __force_user *)addr < maxlen);
66672 + dst[-1] = '\0';
66673 + pagefault_enable();
66674 + set_fs(old_fs);
66675 +@@ -238,7 +238,7 @@ static __kprobes void FETCH_FUNC_NAME(me
66676 + ((u8 *)get_rloc_data(dest))[0] = '\0';
66677 + *(u32 *)dest = make_data_rloc(0, get_rloc_offs(*(u32 *)dest));
66678 + } else
66679 +- *(u32 *)dest = make_data_rloc(src - (u8 *)addr,
66680 ++ *(u32 *)dest = make_data_rloc(src - (const u8 __force_user *)addr,
66681 + get_rloc_offs(*(u32 *)dest));
66682 + }
66683 + /* Return the length of string -- including null terminal byte */
66684 +@@ -252,7 +252,7 @@ static __kprobes void FETCH_FUNC_NAME(me
66685 + set_fs(KERNEL_DS);
66686 + pagefault_disable();
66687 + do {
66688 +- ret = __copy_from_user_inatomic(&c, (u8 *)addr + len, 1);
66689 ++ ret = __copy_from_user_inatomic(&c, (const u8 __force_user *)addr + len, 1);
66690 + len++;
66691 + } while (c && ret == 0 && len < MAX_STRING_SIZE);
66692 + pagefault_enable();
66693 +diff -urNp linux-3.1.1/kernel/trace/trace_mmiotrace.c linux-3.1.1/kernel/trace/trace_mmiotrace.c
66694 +--- linux-3.1.1/kernel/trace/trace_mmiotrace.c 2011-11-11 15:19:27.000000000 -0500
66695 ++++ linux-3.1.1/kernel/trace/trace_mmiotrace.c 2011-11-16 18:39:08.000000000 -0500
66696 +@@ -24,7 +24,7 @@ struct header_iter {
66697 + static struct trace_array *mmio_trace_array;
66698 + static bool overrun_detected;
66699 + static unsigned long prev_overruns;
66700 +-static atomic_t dropped_count;
66701 ++static atomic_unchecked_t dropped_count;
66702 +
66703 + static void mmio_reset_data(struct trace_array *tr)
66704 + {
66705 +@@ -127,7 +127,7 @@ static void mmio_close(struct trace_iter
66706 +
66707 + static unsigned long count_overruns(struct trace_iterator *iter)
66708 + {
66709 +- unsigned long cnt = atomic_xchg(&dropped_count, 0);
66710 ++ unsigned long cnt = atomic_xchg_unchecked(&dropped_count, 0);
66711 + unsigned long over = ring_buffer_overruns(iter->tr->buffer);
66712 +
66713 + if (over > prev_overruns)
66714 +@@ -317,7 +317,7 @@ static void __trace_mmiotrace_rw(struct
66715 + event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_RW,
66716 + sizeof(*entry), 0, pc);
66717 + if (!event) {
66718 +- atomic_inc(&dropped_count);
66719 ++ atomic_inc_unchecked(&dropped_count);
66720 + return;
66721 + }
66722 + entry = ring_buffer_event_data(event);
66723 +@@ -347,7 +347,7 @@ static void __trace_mmiotrace_map(struct
66724 + event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_MAP,
66725 + sizeof(*entry), 0, pc);
66726 + if (!event) {
66727 +- atomic_inc(&dropped_count);
66728 ++ atomic_inc_unchecked(&dropped_count);
66729 + return;
66730 + }
66731 + entry = ring_buffer_event_data(event);
66732 +diff -urNp linux-3.1.1/kernel/trace/trace_output.c linux-3.1.1/kernel/trace/trace_output.c
66733 +--- linux-3.1.1/kernel/trace/trace_output.c 2011-11-11 15:19:27.000000000 -0500
66734 ++++ linux-3.1.1/kernel/trace/trace_output.c 2011-11-16 18:39:08.000000000 -0500
66735 +@@ -278,7 +278,7 @@ int trace_seq_path(struct trace_seq *s,
66736 +
66737 + p = d_path(path, s->buffer + s->len, PAGE_SIZE - s->len);
66738 + if (!IS_ERR(p)) {
66739 +- p = mangle_path(s->buffer + s->len, p, "\n");
66740 ++ p = mangle_path(s->buffer + s->len, p, "\n\\");
66741 + if (p) {
66742 + s->len = p - s->buffer;
66743 + return 1;
66744 +diff -urNp linux-3.1.1/kernel/trace/trace_stack.c linux-3.1.1/kernel/trace/trace_stack.c
66745 +--- linux-3.1.1/kernel/trace/trace_stack.c 2011-11-11 15:19:27.000000000 -0500
66746 ++++ linux-3.1.1/kernel/trace/trace_stack.c 2011-11-16 18:39:08.000000000 -0500
66747 +@@ -50,7 +50,7 @@ static inline void check_stack(void)
66748 + return;
66749 +
66750 + /* we do not handle interrupt stacks yet */
66751 +- if (!object_is_on_stack(&this_size))
66752 ++ if (!object_starts_on_stack(&this_size))
66753 + return;
66754 +
66755 + local_irq_save(flags);
66756 +diff -urNp linux-3.1.1/kernel/trace/trace_workqueue.c linux-3.1.1/kernel/trace/trace_workqueue.c
66757 +--- linux-3.1.1/kernel/trace/trace_workqueue.c 2011-11-11 15:19:27.000000000 -0500
66758 ++++ linux-3.1.1/kernel/trace/trace_workqueue.c 2011-11-16 18:39:08.000000000 -0500
66759 +@@ -22,7 +22,7 @@ struct cpu_workqueue_stats {
66760 + int cpu;
66761 + pid_t pid;
66762 + /* Can be inserted from interrupt or user context, need to be atomic */
66763 +- atomic_t inserted;
66764 ++ atomic_unchecked_t inserted;
66765 + /*
66766 + * Don't need to be atomic, works are serialized in a single workqueue thread
66767 + * on a single CPU.
66768 +@@ -60,7 +60,7 @@ probe_workqueue_insertion(void *ignore,
66769 + spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags);
66770 + list_for_each_entry(node, &workqueue_cpu_stat(cpu)->list, list) {
66771 + if (node->pid == wq_thread->pid) {
66772 +- atomic_inc(&node->inserted);
66773 ++ atomic_inc_unchecked(&node->inserted);
66774 + goto found;
66775 + }
66776 + }
66777 +@@ -210,7 +210,7 @@ static int workqueue_stat_show(struct se
66778 + tsk = get_pid_task(pid, PIDTYPE_PID);
66779 + if (tsk) {
66780 + seq_printf(s, "%3d %6d %6u %s\n", cws->cpu,
66781 +- atomic_read(&cws->inserted), cws->executed,
66782 ++ atomic_read_unchecked(&cws->inserted), cws->executed,
66783 + tsk->comm);
66784 + put_task_struct(tsk);
66785 + }
66786 +diff -urNp linux-3.1.1/lib/bitmap.c linux-3.1.1/lib/bitmap.c
66787 +--- linux-3.1.1/lib/bitmap.c 2011-11-11 15:19:27.000000000 -0500
66788 ++++ linux-3.1.1/lib/bitmap.c 2011-11-16 18:39:08.000000000 -0500
66789 +@@ -419,7 +419,7 @@ int __bitmap_parse(const char *buf, unsi
66790 + {
66791 + int c, old_c, totaldigits, ndigits, nchunks, nbits;
66792 + u32 chunk;
66793 +- const char __user *ubuf = buf;
66794 ++ const char __user *ubuf = (const char __force_user *)buf;
66795 +
66796 + bitmap_zero(maskp, nmaskbits);
66797 +
66798 +@@ -504,7 +504,7 @@ int bitmap_parse_user(const char __user
66799 + {
66800 + if (!access_ok(VERIFY_READ, ubuf, ulen))
66801 + return -EFAULT;
66802 +- return __bitmap_parse((const char *)ubuf, ulen, 1, maskp, nmaskbits);
66803 ++ return __bitmap_parse((const char __force_kernel *)ubuf, ulen, 1, maskp, nmaskbits);
66804 + }
66805 + EXPORT_SYMBOL(bitmap_parse_user);
66806 +
66807 +@@ -594,7 +594,7 @@ static int __bitmap_parselist(const char
66808 + {
66809 + unsigned a, b;
66810 + int c, old_c, totaldigits;
66811 +- const char __user *ubuf = buf;
66812 ++ const char __user *ubuf = (const char __force_user *)buf;
66813 + int exp_digit, in_range;
66814 +
66815 + totaldigits = c = 0;
66816 +@@ -694,7 +694,7 @@ int bitmap_parselist_user(const char __u
66817 + {
66818 + if (!access_ok(VERIFY_READ, ubuf, ulen))
66819 + return -EFAULT;
66820 +- return __bitmap_parselist((const char *)ubuf,
66821 ++ return __bitmap_parselist((const char __force_kernel *)ubuf,
66822 + ulen, 1, maskp, nmaskbits);
66823 + }
66824 + EXPORT_SYMBOL(bitmap_parselist_user);
66825 +diff -urNp linux-3.1.1/lib/bug.c linux-3.1.1/lib/bug.c
66826 +--- linux-3.1.1/lib/bug.c 2011-11-11 15:19:27.000000000 -0500
66827 ++++ linux-3.1.1/lib/bug.c 2011-11-16 18:39:08.000000000 -0500
66828 +@@ -133,6 +133,8 @@ enum bug_trap_type report_bug(unsigned l
66829 + return BUG_TRAP_TYPE_NONE;
66830 +
66831 + bug = find_bug(bugaddr);
66832 ++ if (!bug)
66833 ++ return BUG_TRAP_TYPE_NONE;
66834 +
66835 + file = NULL;
66836 + line = 0;
66837 +diff -urNp linux-3.1.1/lib/debugobjects.c linux-3.1.1/lib/debugobjects.c
66838 +--- linux-3.1.1/lib/debugobjects.c 2011-11-11 15:19:27.000000000 -0500
66839 ++++ linux-3.1.1/lib/debugobjects.c 2011-11-16 18:39:08.000000000 -0500
66840 +@@ -284,7 +284,7 @@ static void debug_object_is_on_stack(voi
66841 + if (limit > 4)
66842 + return;
66843 +
66844 +- is_on_stack = object_is_on_stack(addr);
66845 ++ is_on_stack = object_starts_on_stack(addr);
66846 + if (is_on_stack == onstack)
66847 + return;
66848 +
66849 +diff -urNp linux-3.1.1/lib/devres.c linux-3.1.1/lib/devres.c
66850 +--- linux-3.1.1/lib/devres.c 2011-11-11 15:19:27.000000000 -0500
66851 ++++ linux-3.1.1/lib/devres.c 2011-11-16 18:39:08.000000000 -0500
66852 +@@ -80,7 +80,7 @@ EXPORT_SYMBOL(devm_ioremap_nocache);
66853 + void devm_iounmap(struct device *dev, void __iomem *addr)
66854 + {
66855 + WARN_ON(devres_destroy(dev, devm_ioremap_release, devm_ioremap_match,
66856 +- (void *)addr));
66857 ++ (void __force *)addr));
66858 + iounmap(addr);
66859 + }
66860 + EXPORT_SYMBOL(devm_iounmap);
66861 +@@ -141,7 +141,7 @@ void devm_ioport_unmap(struct device *de
66862 + {
66863 + ioport_unmap(addr);
66864 + WARN_ON(devres_destroy(dev, devm_ioport_map_release,
66865 +- devm_ioport_map_match, (void *)addr));
66866 ++ devm_ioport_map_match, (void __force *)addr));
66867 + }
66868 + EXPORT_SYMBOL(devm_ioport_unmap);
66869 +
66870 +diff -urNp linux-3.1.1/lib/dma-debug.c linux-3.1.1/lib/dma-debug.c
66871 +--- linux-3.1.1/lib/dma-debug.c 2011-11-11 15:19:27.000000000 -0500
66872 ++++ linux-3.1.1/lib/dma-debug.c 2011-11-16 18:39:08.000000000 -0500
66873 +@@ -870,7 +870,7 @@ out:
66874 +
66875 + static void check_for_stack(struct device *dev, void *addr)
66876 + {
66877 +- if (object_is_on_stack(addr))
66878 ++ if (object_starts_on_stack(addr))
66879 + err_printk(dev, NULL, "DMA-API: device driver maps memory from"
66880 + "stack [addr=%p]\n", addr);
66881 + }
66882 +diff -urNp linux-3.1.1/lib/extable.c linux-3.1.1/lib/extable.c
66883 +--- linux-3.1.1/lib/extable.c 2011-11-11 15:19:27.000000000 -0500
66884 ++++ linux-3.1.1/lib/extable.c 2011-11-16 18:39:08.000000000 -0500
66885 +@@ -13,6 +13,7 @@
66886 + #include <linux/init.h>
66887 + #include <linux/sort.h>
66888 + #include <asm/uaccess.h>
66889 ++#include <asm/pgtable.h>
66890 +
66891 + #ifndef ARCH_HAS_SORT_EXTABLE
66892 + /*
66893 +@@ -36,8 +37,10 @@ static int cmp_ex(const void *a, const v
66894 + void sort_extable(struct exception_table_entry *start,
66895 + struct exception_table_entry *finish)
66896 + {
66897 ++ pax_open_kernel();
66898 + sort(start, finish - start, sizeof(struct exception_table_entry),
66899 + cmp_ex, NULL);
66900 ++ pax_close_kernel();
66901 + }
66902 +
66903 + #ifdef CONFIG_MODULES
66904 +diff -urNp linux-3.1.1/lib/inflate.c linux-3.1.1/lib/inflate.c
66905 +--- linux-3.1.1/lib/inflate.c 2011-11-11 15:19:27.000000000 -0500
66906 ++++ linux-3.1.1/lib/inflate.c 2011-11-16 18:39:08.000000000 -0500
66907 +@@ -269,7 +269,7 @@ static void free(void *where)
66908 + malloc_ptr = free_mem_ptr;
66909 + }
66910 + #else
66911 +-#define malloc(a) kmalloc(a, GFP_KERNEL)
66912 ++#define malloc(a) kmalloc((a), GFP_KERNEL)
66913 + #define free(a) kfree(a)
66914 + #endif
66915 +
66916 +diff -urNp linux-3.1.1/lib/Kconfig.debug linux-3.1.1/lib/Kconfig.debug
66917 +--- linux-3.1.1/lib/Kconfig.debug 2011-11-11 15:19:27.000000000 -0500
66918 ++++ linux-3.1.1/lib/Kconfig.debug 2011-11-16 18:40:44.000000000 -0500
66919 +@@ -1091,6 +1091,7 @@ config LATENCYTOP
66920 + depends on DEBUG_KERNEL
66921 + depends on STACKTRACE_SUPPORT
66922 + depends on PROC_FS
66923 ++ depends on !GRKERNSEC_HIDESYM
66924 + select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE
66925 + select KALLSYMS
66926 + select KALLSYMS_ALL
66927 +diff -urNp linux-3.1.1/lib/kref.c linux-3.1.1/lib/kref.c
66928 +--- linux-3.1.1/lib/kref.c 2011-11-11 15:19:27.000000000 -0500
66929 ++++ linux-3.1.1/lib/kref.c 2011-11-16 18:39:08.000000000 -0500
66930 +@@ -52,7 +52,7 @@ void kref_get(struct kref *kref)
66931 + */
66932 + int kref_put(struct kref *kref, void (*release)(struct kref *kref))
66933 + {
66934 +- WARN_ON(release == NULL);
66935 ++ BUG_ON(release == NULL);
66936 + WARN_ON(release == (void (*)(struct kref *))kfree);
66937 +
66938 + if (atomic_dec_and_test(&kref->refcount)) {
66939 +diff -urNp linux-3.1.1/lib/radix-tree.c linux-3.1.1/lib/radix-tree.c
66940 +--- linux-3.1.1/lib/radix-tree.c 2011-11-11 15:19:27.000000000 -0500
66941 ++++ linux-3.1.1/lib/radix-tree.c 2011-11-16 18:39:08.000000000 -0500
66942 +@@ -80,7 +80,7 @@ struct radix_tree_preload {
66943 + int nr;
66944 + struct radix_tree_node *nodes[RADIX_TREE_MAX_PATH];
66945 + };
66946 +-static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, };
66947 ++static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads);
66948 +
66949 + static inline void *ptr_to_indirect(void *ptr)
66950 + {
66951 +diff -urNp linux-3.1.1/lib/vsprintf.c linux-3.1.1/lib/vsprintf.c
66952 +--- linux-3.1.1/lib/vsprintf.c 2011-11-11 15:19:27.000000000 -0500
66953 ++++ linux-3.1.1/lib/vsprintf.c 2011-11-16 18:40:44.000000000 -0500
66954 +@@ -16,6 +16,9 @@
66955 + * - scnprintf and vscnprintf
66956 + */
66957 +
66958 ++#ifdef CONFIG_GRKERNSEC_HIDESYM
66959 ++#define __INCLUDED_BY_HIDESYM 1
66960 ++#endif
66961 + #include <stdarg.h>
66962 + #include <linux/module.h>
66963 + #include <linux/types.h>
66964 +@@ -432,7 +435,7 @@ char *symbol_string(char *buf, char *end
66965 + char sym[KSYM_SYMBOL_LEN];
66966 + if (ext == 'B')
66967 + sprint_backtrace(sym, value);
66968 +- else if (ext != 'f' && ext != 's')
66969 ++ else if (ext != 'f' && ext != 's' && ext != 'a')
66970 + sprint_symbol(sym, value);
66971 + else
66972 + kallsyms_lookup(value, NULL, NULL, NULL, sym);
66973 +@@ -796,7 +799,11 @@ char *uuid_string(char *buf, char *end,
66974 + return string(buf, end, uuid, spec);
66975 + }
66976 +
66977 ++#ifdef CONFIG_GRKERNSEC_HIDESYM
66978 ++int kptr_restrict __read_mostly = 2;
66979 ++#else
66980 + int kptr_restrict __read_mostly;
66981 ++#endif
66982 +
66983 + /*
66984 + * Show a '%p' thing. A kernel extension is that the '%p' is followed
66985 +@@ -810,6 +817,8 @@ int kptr_restrict __read_mostly;
66986 + * - 'S' For symbolic direct pointers with offset
66987 + * - 's' For symbolic direct pointers without offset
66988 + * - 'B' For backtraced symbolic direct pointers with offset
66989 ++ * - 'A' For symbolic direct pointers with offset approved for use with GRKERNSEC_HIDESYM
66990 ++ * - 'a' For symbolic direct pointers without offset approved for use with GRKERNSEC_HIDESYM
66991 + * - 'R' For decoded struct resource, e.g., [mem 0x0-0x1f 64bit pref]
66992 + * - 'r' For raw struct resource, e.g., [mem 0x0-0x1f flags 0x201]
66993 + * - 'M' For a 6-byte MAC address, it prints the address in the
66994 +@@ -854,12 +863,12 @@ char *pointer(const char *fmt, char *buf
66995 + {
66996 + if (!ptr && *fmt != 'K') {
66997 + /*
66998 +- * Print (null) with the same width as a pointer so it makes
66999 ++ * Print (nil) with the same width as a pointer so it makes
67000 + * tabular output look nice.
67001 + */
67002 + if (spec.field_width == -1)
67003 + spec.field_width = 2 * sizeof(void *);
67004 +- return string(buf, end, "(null)", spec);
67005 ++ return string(buf, end, "(nil)", spec);
67006 + }
67007 +
67008 + switch (*fmt) {
67009 +@@ -869,6 +878,13 @@ char *pointer(const char *fmt, char *buf
67010 + /* Fallthrough */
67011 + case 'S':
67012 + case 's':
67013 ++#ifdef CONFIG_GRKERNSEC_HIDESYM
67014 ++ break;
67015 ++#else
67016 ++ return symbol_string(buf, end, ptr, spec, *fmt);
67017 ++#endif
67018 ++ case 'A':
67019 ++ case 'a':
67020 + case 'B':
67021 + return symbol_string(buf, end, ptr, spec, *fmt);
67022 + case 'R':
67023 +@@ -1627,11 +1643,11 @@ int bstr_printf(char *buf, size_t size,
67024 + typeof(type) value; \
67025 + if (sizeof(type) == 8) { \
67026 + args = PTR_ALIGN(args, sizeof(u32)); \
67027 +- *(u32 *)&value = *(u32 *)args; \
67028 +- *((u32 *)&value + 1) = *(u32 *)(args + 4); \
67029 ++ *(u32 *)&value = *(const u32 *)args; \
67030 ++ *((u32 *)&value + 1) = *(const u32 *)(args + 4); \
67031 + } else { \
67032 + args = PTR_ALIGN(args, sizeof(type)); \
67033 +- value = *(typeof(type) *)args; \
67034 ++ value = *(const typeof(type) *)args; \
67035 + } \
67036 + args += sizeof(type); \
67037 + value; \
67038 +@@ -1694,7 +1710,7 @@ int bstr_printf(char *buf, size_t size,
67039 + case FORMAT_TYPE_STR: {
67040 + const char *str_arg = args;
67041 + args += strlen(str_arg) + 1;
67042 +- str = string(str, end, (char *)str_arg, spec);
67043 ++ str = string(str, end, str_arg, spec);
67044 + break;
67045 + }
67046 +
67047 +diff -urNp linux-3.1.1/localversion-grsec linux-3.1.1/localversion-grsec
67048 +--- linux-3.1.1/localversion-grsec 1969-12-31 19:00:00.000000000 -0500
67049 ++++ linux-3.1.1/localversion-grsec 2011-11-16 18:40:44.000000000 -0500
67050 +@@ -0,0 +1 @@
67051 ++-grsec
67052 +diff -urNp linux-3.1.1/Makefile linux-3.1.1/Makefile
67053 +--- linux-3.1.1/Makefile 2011-11-11 15:19:27.000000000 -0500
67054 ++++ linux-3.1.1/Makefile 2011-11-17 18:56:01.000000000 -0500
67055 +@@ -245,8 +245,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH"
67056 +
67057 + HOSTCC = gcc
67058 + HOSTCXX = g++
67059 +-HOSTCFLAGS = -Wall -Wmissing-prototypes -Wstrict-prototypes -O2 -fomit-frame-pointer
67060 +-HOSTCXXFLAGS = -O2
67061 ++HOSTCFLAGS = -Wall -W -Wmissing-prototypes -Wstrict-prototypes -Wno-unused-parameter -Wno-missing-field-initializers -O2 -fomit-frame-pointer -fno-delete-null-pointer-checks
67062 ++HOSTCLFAGS += $(call cc-option, -Wno-empty-body)
67063 ++HOSTCXXFLAGS = -O2 -Wall -W -fno-delete-null-pointer-checks
67064 +
67065 + # Decide whether to build built-in, modular, or both.
67066 + # Normally, just do built-in.
67067 +@@ -407,8 +408,8 @@ export RCS_TAR_IGNORE := --exclude SCCS
67068 + # Rules shared between *config targets and build targets
67069 +
67070 + # Basic helpers built in scripts/
67071 +-PHONY += scripts_basic
67072 +-scripts_basic:
67073 ++PHONY += scripts_basic gcc-plugins
67074 ++scripts_basic: gcc-plugins
67075 + $(Q)$(MAKE) $(build)=scripts/basic
67076 + $(Q)rm -f .tmp_quiet_recordmcount
67077 +
67078 +@@ -564,6 +565,37 @@ else
67079 + KBUILD_CFLAGS += -O2
67080 + endif
67081 +
67082 ++ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-plugin.sh "$(HOSTCC)" "$(CC)"), y)
67083 ++CONSTIFY_PLUGIN := -fplugin=$(objtree)/tools/gcc/constify_plugin.so -DCONSTIFY_PLUGIN
67084 ++ifdef CONFIG_PAX_MEMORY_STACKLEAK
67085 ++STACKLEAK_PLUGIN := -fplugin=$(objtree)/tools/gcc/stackleak_plugin.so -DSTACKLEAK_PLUGIN
67086 ++STACKLEAK_PLUGIN += -fplugin-arg-stackleak_plugin-track-lowest-sp=100
67087 ++endif
67088 ++ifdef CONFIG_KALLOCSTAT_PLUGIN
67089 ++KALLOCSTAT_PLUGIN := -fplugin=$(objtree)/tools/gcc/kallocstat_plugin.so
67090 ++endif
67091 ++ifdef CONFIG_PAX_KERNEXEC_PLUGIN
67092 ++KERNEXEC_PLUGIN := -fplugin=$(objtree)/tools/gcc/kernexec_plugin.so
67093 ++endif
67094 ++ifdef CONFIG_CHECKER_PLUGIN
67095 ++ifeq ($(call cc-ifversion, -ge, 0406, y), y)
67096 ++CHECKER_PLUGIN := -fplugin=$(objtree)/tools/gcc/checker_plugin.so -DCHECKER_PLUGIN
67097 ++endif
67098 ++endif
67099 ++GCC_PLUGINS := $(CONSTIFY_PLUGIN) $(STACKLEAK_PLUGIN) $(KALLOCSTAT_PLUGIN) $(KERNEXEC_PLUGIN) $(CHECKER_PLUGIN)
67100 ++export CONSTIFY_PLUGIN STACKLEAK_PLUGIN KERNEXEC_PLUGIN CHECKER_PLUGIN
67101 ++gcc-plugins:
67102 ++ $(Q)$(MAKE) $(build)=tools/gcc
67103 ++else
67104 ++gcc-plugins:
67105 ++ifeq ($(call cc-ifversion, -ge, 0405, y), y)
67106 ++ $(error Your gcc installation does not support plugins. If the necessary headers for plugin support are missing, they should be installed. On Debian, apt-get install gcc-<ver>-plugin-dev.))
67107 ++else
67108 ++ $(Q)echo "warning, your gcc version does not support plugins, you should upgrade it to gcc 4.5 at least"
67109 ++endif
67110 ++ $(Q)echo "PAX_MEMORY_STACKLEAK and other features will be less secure"
67111 ++endif
67112 ++
67113 + include $(srctree)/arch/$(SRCARCH)/Makefile
67114 +
67115 + ifneq ($(CONFIG_FRAME_WARN),0)
67116 +@@ -708,7 +740,7 @@ export mod_strip_cmd
67117 +
67118 +
67119 + ifeq ($(KBUILD_EXTMOD),)
67120 +-core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/
67121 ++core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
67122 +
67123 + vmlinux-dirs := $(patsubst %/,%,$(filter %/, $(init-y) $(init-m) \
67124 + $(core-y) $(core-m) $(drivers-y) $(drivers-m) \
67125 +@@ -932,6 +964,7 @@ vmlinux.o: $(modpost-init) $(vmlinux-mai
67126 +
67127 + # The actual objects are generated when descending,
67128 + # make sure no implicit rule kicks in
67129 ++$(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): KBUILD_CFLAGS += $(GCC_PLUGINS)
67130 + $(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): $(vmlinux-dirs) ;
67131 +
67132 + # Handle descending into subdirectories listed in $(vmlinux-dirs)
67133 +@@ -941,7 +974,7 @@ $(sort $(vmlinux-init) $(vmlinux-main))
67134 + # Error messages still appears in the original language
67135 +
67136 + PHONY += $(vmlinux-dirs)
67137 +-$(vmlinux-dirs): prepare scripts
67138 ++$(vmlinux-dirs): gcc-plugins prepare scripts
67139 + $(Q)$(MAKE) $(build)=$@
67140 +
67141 + # Store (new) KERNELRELASE string in include/config/kernel.release
67142 +@@ -986,6 +1019,7 @@ prepare0: archprepare FORCE
67143 + $(Q)$(MAKE) $(build)=. missing-syscalls
67144 +
67145 + # All the preparing..
67146 ++prepare: KBUILD_CFLAGS := $(filter-out $(GCC_PLUGINS),$(KBUILD_CFLAGS))
67147 + prepare: prepare0
67148 +
67149 + # Generate some files
67150 +@@ -1087,6 +1121,7 @@ all: modules
67151 + # using awk while concatenating to the final file.
67152 +
67153 + PHONY += modules
67154 ++modules: KBUILD_CFLAGS += $(GCC_PLUGINS)
67155 + modules: $(vmlinux-dirs) $(if $(KBUILD_BUILTIN),vmlinux) modules.builtin
67156 + $(Q)$(AWK) '!x[$$0]++' $(vmlinux-dirs:%=$(objtree)/%/modules.order) > $(objtree)/modules.order
67157 + @$(kecho) ' Building modules, stage 2.';
67158 +@@ -1102,7 +1137,7 @@ modules.builtin: $(vmlinux-dirs:%=%/modu
67159 +
67160 + # Target to prepare building external modules
67161 + PHONY += modules_prepare
67162 +-modules_prepare: prepare scripts
67163 ++modules_prepare: gcc-plugins prepare scripts
67164 +
67165 + # Target to install modules
67166 + PHONY += modules_install
67167 +@@ -1198,7 +1233,7 @@ distclean: mrproper
67168 + @find $(srctree) $(RCS_FIND_IGNORE) \
67169 + \( -name '*.orig' -o -name '*.rej' -o -name '*~' \
67170 + -o -name '*.bak' -o -name '#*#' -o -name '.*.orig' \
67171 +- -o -name '.*.rej' -o -size 0 \
67172 ++ -o -name '.*.rej' -o -name '*.so' -o -size 0 \
67173 + -o -name '*%' -o -name '.*.cmd' -o -name 'core' \) \
67174 + -type f -print | xargs rm -f
67175 +
67176 +@@ -1360,6 +1395,7 @@ PHONY += $(module-dirs) modules
67177 + $(module-dirs): crmodverdir $(objtree)/Module.symvers
67178 + $(Q)$(MAKE) $(build)=$(patsubst _module_%,%,$@)
67179 +
67180 ++modules: KBUILD_CFLAGS += $(GCC_PLUGINS)
67181 + modules: $(module-dirs)
67182 + @$(kecho) ' Building modules, stage 2.';
67183 + $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modpost
67184 +@@ -1486,17 +1522,19 @@ else
67185 + target-dir = $(if $(KBUILD_EXTMOD),$(dir $<),$(dir $@))
67186 + endif
67187 +
67188 +-%.s: %.c prepare scripts FORCE
67189 ++%.s: KBUILD_CFLAGS += $(GCC_PLUGINS)
67190 ++%.s: %.c gcc-plugins prepare scripts FORCE
67191 + $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
67192 + %.i: %.c prepare scripts FORCE
67193 + $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
67194 +-%.o: %.c prepare scripts FORCE
67195 ++%.o: KBUILD_CFLAGS += $(GCC_PLUGINS)
67196 ++%.o: %.c gcc-plugins prepare scripts FORCE
67197 + $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
67198 + %.lst: %.c prepare scripts FORCE
67199 + $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
67200 +-%.s: %.S prepare scripts FORCE
67201 ++%.s: %.S gcc-plugins prepare scripts FORCE
67202 + $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
67203 +-%.o: %.S prepare scripts FORCE
67204 ++%.o: %.S gcc-plugins prepare scripts FORCE
67205 + $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
67206 + %.symtypes: %.c prepare scripts FORCE
67207 + $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
67208 +@@ -1506,11 +1544,13 @@ endif
67209 + $(cmd_crmodverdir)
67210 + $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
67211 + $(build)=$(build-dir)
67212 +-%/: prepare scripts FORCE
67213 ++%/: KBUILD_CFLAGS += $(GCC_PLUGINS)
67214 ++%/: gcc-plugins prepare scripts FORCE
67215 + $(cmd_crmodverdir)
67216 + $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
67217 + $(build)=$(build-dir)
67218 +-%.ko: prepare scripts FORCE
67219 ++%.ko: KBUILD_CFLAGS += $(GCC_PLUGINS)
67220 ++%.ko: gcc-plugins prepare scripts FORCE
67221 + $(cmd_crmodverdir)
67222 + $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
67223 + $(build)=$(build-dir) $(@:.ko=.o)
67224 +diff -urNp linux-3.1.1/mm/filemap.c linux-3.1.1/mm/filemap.c
67225 +--- linux-3.1.1/mm/filemap.c 2011-11-11 15:19:27.000000000 -0500
67226 ++++ linux-3.1.1/mm/filemap.c 2011-11-16 18:40:44.000000000 -0500
67227 +@@ -1784,7 +1784,7 @@ int generic_file_mmap(struct file * file
67228 + struct address_space *mapping = file->f_mapping;
67229 +
67230 + if (!mapping->a_ops->readpage)
67231 +- return -ENOEXEC;
67232 ++ return -ENODEV;
67233 + file_accessed(file);
67234 + vma->vm_ops = &generic_file_vm_ops;
67235 + vma->vm_flags |= VM_CAN_NONLINEAR;
67236 +@@ -2190,6 +2190,7 @@ inline int generic_write_checks(struct f
67237 + *pos = i_size_read(inode);
67238 +
67239 + if (limit != RLIM_INFINITY) {
67240 ++ gr_learn_resource(current, RLIMIT_FSIZE,*pos, 0);
67241 + if (*pos >= limit) {
67242 + send_sig(SIGXFSZ, current, 0);
67243 + return -EFBIG;
67244 +diff -urNp linux-3.1.1/mm/fremap.c linux-3.1.1/mm/fremap.c
67245 +--- linux-3.1.1/mm/fremap.c 2011-11-11 15:19:27.000000000 -0500
67246 ++++ linux-3.1.1/mm/fremap.c 2011-11-16 18:39:08.000000000 -0500
67247 +@@ -156,6 +156,11 @@ SYSCALL_DEFINE5(remap_file_pages, unsign
67248 + retry:
67249 + vma = find_vma(mm, start);
67250 +
67251 ++#ifdef CONFIG_PAX_SEGMEXEC
67252 ++ if (vma && (mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_MAYEXEC))
67253 ++ goto out;
67254 ++#endif
67255 ++
67256 + /*
67257 + * Make sure the vma is shared, that it supports prefaulting,
67258 + * and that the remapped range is valid and fully within
67259 +diff -urNp linux-3.1.1/mm/highmem.c linux-3.1.1/mm/highmem.c
67260 +--- linux-3.1.1/mm/highmem.c 2011-11-11 15:19:27.000000000 -0500
67261 ++++ linux-3.1.1/mm/highmem.c 2011-11-16 18:39:08.000000000 -0500
67262 +@@ -125,9 +125,10 @@ static void flush_all_zero_pkmaps(void)
67263 + * So no dangers, even with speculative execution.
67264 + */
67265 + page = pte_page(pkmap_page_table[i]);
67266 ++ pax_open_kernel();
67267 + pte_clear(&init_mm, (unsigned long)page_address(page),
67268 + &pkmap_page_table[i]);
67269 +-
67270 ++ pax_close_kernel();
67271 + set_page_address(page, NULL);
67272 + need_flush = 1;
67273 + }
67274 +@@ -186,9 +187,11 @@ start:
67275 + }
67276 + }
67277 + vaddr = PKMAP_ADDR(last_pkmap_nr);
67278 ++
67279 ++ pax_open_kernel();
67280 + set_pte_at(&init_mm, vaddr,
67281 + &(pkmap_page_table[last_pkmap_nr]), mk_pte(page, kmap_prot));
67282 +-
67283 ++ pax_close_kernel();
67284 + pkmap_count[last_pkmap_nr] = 1;
67285 + set_page_address(page, (void *)vaddr);
67286 +
67287 +diff -urNp linux-3.1.1/mm/huge_memory.c linux-3.1.1/mm/huge_memory.c
67288 +--- linux-3.1.1/mm/huge_memory.c 2011-11-11 15:19:27.000000000 -0500
67289 ++++ linux-3.1.1/mm/huge_memory.c 2011-11-16 18:39:08.000000000 -0500
67290 +@@ -702,7 +702,7 @@ out:
67291 + * run pte_offset_map on the pmd, if an huge pmd could
67292 + * materialize from under us from a different thread.
67293 + */
67294 +- if (unlikely(__pte_alloc(mm, vma, pmd, address)))
67295 ++ if (unlikely(pmd_none(*pmd) && __pte_alloc(mm, vma, pmd, address)))
67296 + return VM_FAULT_OOM;
67297 + /* if an huge pmd materialized from under us just retry later */
67298 + if (unlikely(pmd_trans_huge(*pmd)))
67299 +diff -urNp linux-3.1.1/mm/hugetlb.c linux-3.1.1/mm/hugetlb.c
67300 +--- linux-3.1.1/mm/hugetlb.c 2011-11-11 15:19:27.000000000 -0500
67301 ++++ linux-3.1.1/mm/hugetlb.c 2011-11-16 18:39:08.000000000 -0500
67302 +@@ -2346,6 +2346,27 @@ static int unmap_ref_private(struct mm_s
67303 + return 1;
67304 + }
67305 +
67306 ++#ifdef CONFIG_PAX_SEGMEXEC
67307 ++static void pax_mirror_huge_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m)
67308 ++{
67309 ++ struct mm_struct *mm = vma->vm_mm;
67310 ++ struct vm_area_struct *vma_m;
67311 ++ unsigned long address_m;
67312 ++ pte_t *ptep_m;
67313 ++
67314 ++ vma_m = pax_find_mirror_vma(vma);
67315 ++ if (!vma_m)
67316 ++ return;
67317 ++
67318 ++ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
67319 ++ address_m = address + SEGMEXEC_TASK_SIZE;
67320 ++ ptep_m = huge_pte_offset(mm, address_m & HPAGE_MASK);
67321 ++ get_page(page_m);
67322 ++ hugepage_add_anon_rmap(page_m, vma_m, address_m);
67323 ++ set_huge_pte_at(mm, address_m, ptep_m, make_huge_pte(vma_m, page_m, 0));
67324 ++}
67325 ++#endif
67326 ++
67327 + /*
67328 + * Hugetlb_cow() should be called with page lock of the original hugepage held.
67329 + */
67330 +@@ -2447,6 +2468,11 @@ retry_avoidcopy:
67331 + make_huge_pte(vma, new_page, 1));
67332 + page_remove_rmap(old_page);
67333 + hugepage_add_new_anon_rmap(new_page, vma, address);
67334 ++
67335 ++#ifdef CONFIG_PAX_SEGMEXEC
67336 ++ pax_mirror_huge_pte(vma, address, new_page);
67337 ++#endif
67338 ++
67339 + /* Make the old page be freed below */
67340 + new_page = old_page;
67341 + mmu_notifier_invalidate_range_end(mm,
67342 +@@ -2598,6 +2624,10 @@ retry:
67343 + && (vma->vm_flags & VM_SHARED)));
67344 + set_huge_pte_at(mm, address, ptep, new_pte);
67345 +
67346 ++#ifdef CONFIG_PAX_SEGMEXEC
67347 ++ pax_mirror_huge_pte(vma, address, page);
67348 ++#endif
67349 ++
67350 + if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
67351 + /* Optimization, do the COW without a second fault */
67352 + ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page);
67353 +@@ -2627,6 +2657,10 @@ int hugetlb_fault(struct mm_struct *mm,
67354 + static DEFINE_MUTEX(hugetlb_instantiation_mutex);
67355 + struct hstate *h = hstate_vma(vma);
67356 +
67357 ++#ifdef CONFIG_PAX_SEGMEXEC
67358 ++ struct vm_area_struct *vma_m;
67359 ++#endif
67360 ++
67361 + ptep = huge_pte_offset(mm, address);
67362 + if (ptep) {
67363 + entry = huge_ptep_get(ptep);
67364 +@@ -2638,6 +2672,26 @@ int hugetlb_fault(struct mm_struct *mm,
67365 + VM_FAULT_SET_HINDEX(h - hstates);
67366 + }
67367 +
67368 ++#ifdef CONFIG_PAX_SEGMEXEC
67369 ++ vma_m = pax_find_mirror_vma(vma);
67370 ++ if (vma_m) {
67371 ++ unsigned long address_m;
67372 ++
67373 ++ if (vma->vm_start > vma_m->vm_start) {
67374 ++ address_m = address;
67375 ++ address -= SEGMEXEC_TASK_SIZE;
67376 ++ vma = vma_m;
67377 ++ h = hstate_vma(vma);
67378 ++ } else
67379 ++ address_m = address + SEGMEXEC_TASK_SIZE;
67380 ++
67381 ++ if (!huge_pte_alloc(mm, address_m, huge_page_size(h)))
67382 ++ return VM_FAULT_OOM;
67383 ++ address_m &= HPAGE_MASK;
67384 ++ unmap_hugepage_range(vma, address_m, address_m + HPAGE_SIZE, NULL);
67385 ++ }
67386 ++#endif
67387 ++
67388 + ptep = huge_pte_alloc(mm, address, huge_page_size(h));
67389 + if (!ptep)
67390 + return VM_FAULT_OOM;
67391 +diff -urNp linux-3.1.1/mm/internal.h linux-3.1.1/mm/internal.h
67392 +--- linux-3.1.1/mm/internal.h 2011-11-11 15:19:27.000000000 -0500
67393 ++++ linux-3.1.1/mm/internal.h 2011-11-16 18:39:08.000000000 -0500
67394 +@@ -95,6 +95,7 @@ extern void putback_lru_page(struct page
67395 + * in mm/page_alloc.c
67396 + */
67397 + extern void __free_pages_bootmem(struct page *page, unsigned int order);
67398 ++extern void free_compound_page(struct page *page);
67399 + extern void prep_compound_page(struct page *page, unsigned long order);
67400 + #ifdef CONFIG_MEMORY_FAILURE
67401 + extern bool is_free_buddy_page(struct page *page);
67402 +diff -urNp linux-3.1.1/mm/Kconfig linux-3.1.1/mm/Kconfig
67403 +--- linux-3.1.1/mm/Kconfig 2011-11-11 15:19:27.000000000 -0500
67404 ++++ linux-3.1.1/mm/Kconfig 2011-11-17 18:57:00.000000000 -0500
67405 +@@ -238,10 +238,10 @@ config KSM
67406 + root has set /sys/kernel/mm/ksm/run to 1 (if CONFIG_SYSFS is set).
67407 +
67408 + config DEFAULT_MMAP_MIN_ADDR
67409 +- int "Low address space to protect from user allocation"
67410 ++ int "Low address space to protect from user allocation"
67411 + depends on MMU
67412 +- default 4096
67413 +- help
67414 ++ default 65536
67415 ++ help
67416 + This is the portion of low virtual memory which should be protected
67417 + from userspace allocation. Keeping a user from writing to low pages
67418 + can help reduce the impact of kernel NULL pointer bugs.
67419 +diff -urNp linux-3.1.1/mm/kmemleak.c linux-3.1.1/mm/kmemleak.c
67420 +--- linux-3.1.1/mm/kmemleak.c 2011-11-11 15:19:27.000000000 -0500
67421 ++++ linux-3.1.1/mm/kmemleak.c 2011-11-16 18:40:44.000000000 -0500
67422 +@@ -357,7 +357,7 @@ static void print_unreferenced(struct se
67423 +
67424 + for (i = 0; i < object->trace_len; i++) {
67425 + void *ptr = (void *)object->trace[i];
67426 +- seq_printf(seq, " [<%p>] %pS\n", ptr, ptr);
67427 ++ seq_printf(seq, " [<%p>] %pA\n", ptr, ptr);
67428 + }
67429 + }
67430 +
67431 +diff -urNp linux-3.1.1/mm/maccess.c linux-3.1.1/mm/maccess.c
67432 +--- linux-3.1.1/mm/maccess.c 2011-11-11 15:19:27.000000000 -0500
67433 ++++ linux-3.1.1/mm/maccess.c 2011-11-16 18:39:08.000000000 -0500
67434 +@@ -26,7 +26,7 @@ long __probe_kernel_read(void *dst, cons
67435 + set_fs(KERNEL_DS);
67436 + pagefault_disable();
67437 + ret = __copy_from_user_inatomic(dst,
67438 +- (__force const void __user *)src, size);
67439 ++ (const void __force_user *)src, size);
67440 + pagefault_enable();
67441 + set_fs(old_fs);
67442 +
67443 +@@ -53,7 +53,7 @@ long __probe_kernel_write(void *dst, con
67444 +
67445 + set_fs(KERNEL_DS);
67446 + pagefault_disable();
67447 +- ret = __copy_to_user_inatomic((__force void __user *)dst, src, size);
67448 ++ ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
67449 + pagefault_enable();
67450 + set_fs(old_fs);
67451 +
67452 +diff -urNp linux-3.1.1/mm/madvise.c linux-3.1.1/mm/madvise.c
67453 +--- linux-3.1.1/mm/madvise.c 2011-11-11 15:19:27.000000000 -0500
67454 ++++ linux-3.1.1/mm/madvise.c 2011-11-16 18:39:08.000000000 -0500
67455 +@@ -45,6 +45,10 @@ static long madvise_behavior(struct vm_a
67456 + pgoff_t pgoff;
67457 + unsigned long new_flags = vma->vm_flags;
67458 +
67459 ++#ifdef CONFIG_PAX_SEGMEXEC
67460 ++ struct vm_area_struct *vma_m;
67461 ++#endif
67462 ++
67463 + switch (behavior) {
67464 + case MADV_NORMAL:
67465 + new_flags = new_flags & ~VM_RAND_READ & ~VM_SEQ_READ;
67466 +@@ -110,6 +114,13 @@ success:
67467 + /*
67468 + * vm_flags is protected by the mmap_sem held in write mode.
67469 + */
67470 ++
67471 ++#ifdef CONFIG_PAX_SEGMEXEC
67472 ++ vma_m = pax_find_mirror_vma(vma);
67473 ++ if (vma_m)
67474 ++ vma_m->vm_flags = new_flags & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT);
67475 ++#endif
67476 ++
67477 + vma->vm_flags = new_flags;
67478 +
67479 + out:
67480 +@@ -168,6 +179,11 @@ static long madvise_dontneed(struct vm_a
67481 + struct vm_area_struct ** prev,
67482 + unsigned long start, unsigned long end)
67483 + {
67484 ++
67485 ++#ifdef CONFIG_PAX_SEGMEXEC
67486 ++ struct vm_area_struct *vma_m;
67487 ++#endif
67488 ++
67489 + *prev = vma;
67490 + if (vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP))
67491 + return -EINVAL;
67492 +@@ -180,6 +196,21 @@ static long madvise_dontneed(struct vm_a
67493 + zap_page_range(vma, start, end - start, &details);
67494 + } else
67495 + zap_page_range(vma, start, end - start, NULL);
67496 ++
67497 ++#ifdef CONFIG_PAX_SEGMEXEC
67498 ++ vma_m = pax_find_mirror_vma(vma);
67499 ++ if (vma_m) {
67500 ++ if (unlikely(vma->vm_flags & VM_NONLINEAR)) {
67501 ++ struct zap_details details = {
67502 ++ .nonlinear_vma = vma_m,
67503 ++ .last_index = ULONG_MAX,
67504 ++ };
67505 ++ zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, &details);
67506 ++ } else
67507 ++ zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, NULL);
67508 ++ }
67509 ++#endif
67510 ++
67511 + return 0;
67512 + }
67513 +
67514 +@@ -376,6 +407,16 @@ SYSCALL_DEFINE3(madvise, unsigned long,
67515 + if (end < start)
67516 + goto out;
67517 +
67518 ++#ifdef CONFIG_PAX_SEGMEXEC
67519 ++ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
67520 ++ if (end > SEGMEXEC_TASK_SIZE)
67521 ++ goto out;
67522 ++ } else
67523 ++#endif
67524 ++
67525 ++ if (end > TASK_SIZE)
67526 ++ goto out;
67527 ++
67528 + error = 0;
67529 + if (end == start)
67530 + goto out;
67531 +diff -urNp linux-3.1.1/mm/memory.c linux-3.1.1/mm/memory.c
67532 +--- linux-3.1.1/mm/memory.c 2011-11-11 15:19:27.000000000 -0500
67533 ++++ linux-3.1.1/mm/memory.c 2011-11-16 18:39:08.000000000 -0500
67534 +@@ -457,8 +457,12 @@ static inline void free_pmd_range(struct
67535 + return;
67536 +
67537 + pmd = pmd_offset(pud, start);
67538 ++
67539 ++#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_PER_CPU_PGD)
67540 + pud_clear(pud);
67541 + pmd_free_tlb(tlb, pmd, start);
67542 ++#endif
67543 ++
67544 + }
67545 +
67546 + static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
67547 +@@ -489,9 +493,12 @@ static inline void free_pud_range(struct
67548 + if (end - 1 > ceiling - 1)
67549 + return;
67550 +
67551 ++#if !defined(CONFIG_X86_64) || !defined(CONFIG_PAX_PER_CPU_PGD)
67552 + pud = pud_offset(pgd, start);
67553 + pgd_clear(pgd);
67554 + pud_free_tlb(tlb, pud, start);
67555 ++#endif
67556 ++
67557 + }
67558 +
67559 + /*
67560 +@@ -1566,12 +1573,6 @@ no_page_table:
67561 + return page;
67562 + }
67563 +
67564 +-static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long addr)
67565 +-{
67566 +- return stack_guard_page_start(vma, addr) ||
67567 +- stack_guard_page_end(vma, addr+PAGE_SIZE);
67568 +-}
67569 +-
67570 + /**
67571 + * __get_user_pages() - pin user pages in memory
67572 + * @tsk: task_struct of target task
67573 +@@ -1644,10 +1645,10 @@ int __get_user_pages(struct task_struct
67574 + (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
67575 + i = 0;
67576 +
67577 +- do {
67578 ++ while (nr_pages) {
67579 + struct vm_area_struct *vma;
67580 +
67581 +- vma = find_extend_vma(mm, start);
67582 ++ vma = find_vma(mm, start);
67583 + if (!vma && in_gate_area(mm, start)) {
67584 + unsigned long pg = start & PAGE_MASK;
67585 + pgd_t *pgd;
67586 +@@ -1695,7 +1696,7 @@ int __get_user_pages(struct task_struct
67587 + goto next_page;
67588 + }
67589 +
67590 +- if (!vma ||
67591 ++ if (!vma || start < vma->vm_start ||
67592 + (vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
67593 + !(vm_flags & vma->vm_flags))
67594 + return i ? : -EFAULT;
67595 +@@ -1722,11 +1723,6 @@ int __get_user_pages(struct task_struct
67596 + int ret;
67597 + unsigned int fault_flags = 0;
67598 +
67599 +- /* For mlock, just skip the stack guard page. */
67600 +- if (foll_flags & FOLL_MLOCK) {
67601 +- if (stack_guard_page(vma, start))
67602 +- goto next_page;
67603 +- }
67604 + if (foll_flags & FOLL_WRITE)
67605 + fault_flags |= FAULT_FLAG_WRITE;
67606 + if (nonblocking)
67607 +@@ -1800,7 +1796,7 @@ next_page:
67608 + start += PAGE_SIZE;
67609 + nr_pages--;
67610 + } while (nr_pages && start < vma->vm_end);
67611 +- } while (nr_pages);
67612 ++ }
67613 + return i;
67614 + }
67615 + EXPORT_SYMBOL(__get_user_pages);
67616 +@@ -2007,6 +2003,10 @@ static int insert_page(struct vm_area_st
67617 + page_add_file_rmap(page);
67618 + set_pte_at(mm, addr, pte, mk_pte(page, prot));
67619 +
67620 ++#ifdef CONFIG_PAX_SEGMEXEC
67621 ++ pax_mirror_file_pte(vma, addr, page, ptl);
67622 ++#endif
67623 ++
67624 + retval = 0;
67625 + pte_unmap_unlock(pte, ptl);
67626 + return retval;
67627 +@@ -2041,10 +2041,22 @@ out:
67628 + int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
67629 + struct page *page)
67630 + {
67631 ++
67632 ++#ifdef CONFIG_PAX_SEGMEXEC
67633 ++ struct vm_area_struct *vma_m;
67634 ++#endif
67635 ++
67636 + if (addr < vma->vm_start || addr >= vma->vm_end)
67637 + return -EFAULT;
67638 + if (!page_count(page))
67639 + return -EINVAL;
67640 ++
67641 ++#ifdef CONFIG_PAX_SEGMEXEC
67642 ++ vma_m = pax_find_mirror_vma(vma);
67643 ++ if (vma_m)
67644 ++ vma_m->vm_flags |= VM_INSERTPAGE;
67645 ++#endif
67646 ++
67647 + vma->vm_flags |= VM_INSERTPAGE;
67648 + return insert_page(vma, addr, page, vma->vm_page_prot);
67649 + }
67650 +@@ -2130,6 +2142,7 @@ int vm_insert_mixed(struct vm_area_struc
67651 + unsigned long pfn)
67652 + {
67653 + BUG_ON(!(vma->vm_flags & VM_MIXEDMAP));
67654 ++ BUG_ON(vma->vm_mirror);
67655 +
67656 + if (addr < vma->vm_start || addr >= vma->vm_end)
67657 + return -EFAULT;
67658 +@@ -2445,6 +2458,186 @@ static inline void cow_user_page(struct
67659 + copy_user_highpage(dst, src, va, vma);
67660 + }
67661 +
67662 ++#ifdef CONFIG_PAX_SEGMEXEC
67663 ++static void pax_unmap_mirror_pte(struct vm_area_struct *vma, unsigned long address, pmd_t *pmd)
67664 ++{
67665 ++ struct mm_struct *mm = vma->vm_mm;
67666 ++ spinlock_t *ptl;
67667 ++ pte_t *pte, entry;
67668 ++
67669 ++ pte = pte_offset_map_lock(mm, pmd, address, &ptl);
67670 ++ entry = *pte;
67671 ++ if (!pte_present(entry)) {
67672 ++ if (!pte_none(entry)) {
67673 ++ BUG_ON(pte_file(entry));
67674 ++ free_swap_and_cache(pte_to_swp_entry(entry));
67675 ++ pte_clear_not_present_full(mm, address, pte, 0);
67676 ++ }
67677 ++ } else {
67678 ++ struct page *page;
67679 ++
67680 ++ flush_cache_page(vma, address, pte_pfn(entry));
67681 ++ entry = ptep_clear_flush(vma, address, pte);
67682 ++ BUG_ON(pte_dirty(entry));
67683 ++ page = vm_normal_page(vma, address, entry);
67684 ++ if (page) {
67685 ++ update_hiwater_rss(mm);
67686 ++ if (PageAnon(page))
67687 ++ dec_mm_counter_fast(mm, MM_ANONPAGES);
67688 ++ else
67689 ++ dec_mm_counter_fast(mm, MM_FILEPAGES);
67690 ++ page_remove_rmap(page);
67691 ++ page_cache_release(page);
67692 ++ }
67693 ++ }
67694 ++ pte_unmap_unlock(pte, ptl);
67695 ++}
67696 ++
67697 ++/* PaX: if vma is mirrored, synchronize the mirror's PTE
67698 ++ *
67699 ++ * the ptl of the lower mapped page is held on entry and is not released on exit
67700 ++ * or inside to ensure atomic changes to the PTE states (swapout, mremap, munmap, etc)
67701 ++ */
67702 ++static void pax_mirror_anon_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
67703 ++{
67704 ++ struct mm_struct *mm = vma->vm_mm;
67705 ++ unsigned long address_m;
67706 ++ spinlock_t *ptl_m;
67707 ++ struct vm_area_struct *vma_m;
67708 ++ pmd_t *pmd_m;
67709 ++ pte_t *pte_m, entry_m;
67710 ++
67711 ++ BUG_ON(!page_m || !PageAnon(page_m));
67712 ++
67713 ++ vma_m = pax_find_mirror_vma(vma);
67714 ++ if (!vma_m)
67715 ++ return;
67716 ++
67717 ++ BUG_ON(!PageLocked(page_m));
67718 ++ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
67719 ++ address_m = address + SEGMEXEC_TASK_SIZE;
67720 ++ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
67721 ++ pte_m = pte_offset_map(pmd_m, address_m);
67722 ++ ptl_m = pte_lockptr(mm, pmd_m);
67723 ++ if (ptl != ptl_m) {
67724 ++ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
67725 ++ if (!pte_none(*pte_m))
67726 ++ goto out;
67727 ++ }
67728 ++
67729 ++ entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
67730 ++ page_cache_get(page_m);
67731 ++ page_add_anon_rmap(page_m, vma_m, address_m);
67732 ++ inc_mm_counter_fast(mm, MM_ANONPAGES);
67733 ++ set_pte_at(mm, address_m, pte_m, entry_m);
67734 ++ update_mmu_cache(vma_m, address_m, entry_m);
67735 ++out:
67736 ++ if (ptl != ptl_m)
67737 ++ spin_unlock(ptl_m);
67738 ++ pte_unmap(pte_m);
67739 ++ unlock_page(page_m);
67740 ++}
67741 ++
67742 ++void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
67743 ++{
67744 ++ struct mm_struct *mm = vma->vm_mm;
67745 ++ unsigned long address_m;
67746 ++ spinlock_t *ptl_m;
67747 ++ struct vm_area_struct *vma_m;
67748 ++ pmd_t *pmd_m;
67749 ++ pte_t *pte_m, entry_m;
67750 ++
67751 ++ BUG_ON(!page_m || PageAnon(page_m));
67752 ++
67753 ++ vma_m = pax_find_mirror_vma(vma);
67754 ++ if (!vma_m)
67755 ++ return;
67756 ++
67757 ++ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
67758 ++ address_m = address + SEGMEXEC_TASK_SIZE;
67759 ++ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
67760 ++ pte_m = pte_offset_map(pmd_m, address_m);
67761 ++ ptl_m = pte_lockptr(mm, pmd_m);
67762 ++ if (ptl != ptl_m) {
67763 ++ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
67764 ++ if (!pte_none(*pte_m))
67765 ++ goto out;
67766 ++ }
67767 ++
67768 ++ entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
67769 ++ page_cache_get(page_m);
67770 ++ page_add_file_rmap(page_m);
67771 ++ inc_mm_counter_fast(mm, MM_FILEPAGES);
67772 ++ set_pte_at(mm, address_m, pte_m, entry_m);
67773 ++ update_mmu_cache(vma_m, address_m, entry_m);
67774 ++out:
67775 ++ if (ptl != ptl_m)
67776 ++ spin_unlock(ptl_m);
67777 ++ pte_unmap(pte_m);
67778 ++}
67779 ++
67780 ++static void pax_mirror_pfn_pte(struct vm_area_struct *vma, unsigned long address, unsigned long pfn_m, spinlock_t *ptl)
67781 ++{
67782 ++ struct mm_struct *mm = vma->vm_mm;
67783 ++ unsigned long address_m;
67784 ++ spinlock_t *ptl_m;
67785 ++ struct vm_area_struct *vma_m;
67786 ++ pmd_t *pmd_m;
67787 ++ pte_t *pte_m, entry_m;
67788 ++
67789 ++ vma_m = pax_find_mirror_vma(vma);
67790 ++ if (!vma_m)
67791 ++ return;
67792 ++
67793 ++ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
67794 ++ address_m = address + SEGMEXEC_TASK_SIZE;
67795 ++ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
67796 ++ pte_m = pte_offset_map(pmd_m, address_m);
67797 ++ ptl_m = pte_lockptr(mm, pmd_m);
67798 ++ if (ptl != ptl_m) {
67799 ++ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
67800 ++ if (!pte_none(*pte_m))
67801 ++ goto out;
67802 ++ }
67803 ++
67804 ++ entry_m = pfn_pte(pfn_m, vma_m->vm_page_prot);
67805 ++ set_pte_at(mm, address_m, pte_m, entry_m);
67806 ++out:
67807 ++ if (ptl != ptl_m)
67808 ++ spin_unlock(ptl_m);
67809 ++ pte_unmap(pte_m);
67810 ++}
67811 ++
67812 ++static void pax_mirror_pte(struct vm_area_struct *vma, unsigned long address, pte_t *pte, pmd_t *pmd, spinlock_t *ptl)
67813 ++{
67814 ++ struct page *page_m;
67815 ++ pte_t entry;
67816 ++
67817 ++ if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC))
67818 ++ goto out;
67819 ++
67820 ++ entry = *pte;
67821 ++ page_m = vm_normal_page(vma, address, entry);
67822 ++ if (!page_m)
67823 ++ pax_mirror_pfn_pte(vma, address, pte_pfn(entry), ptl);
67824 ++ else if (PageAnon(page_m)) {
67825 ++ if (pax_find_mirror_vma(vma)) {
67826 ++ pte_unmap_unlock(pte, ptl);
67827 ++ lock_page(page_m);
67828 ++ pte = pte_offset_map_lock(vma->vm_mm, pmd, address, &ptl);
67829 ++ if (pte_same(entry, *pte))
67830 ++ pax_mirror_anon_pte(vma, address, page_m, ptl);
67831 ++ else
67832 ++ unlock_page(page_m);
67833 ++ }
67834 ++ } else
67835 ++ pax_mirror_file_pte(vma, address, page_m, ptl);
67836 ++
67837 ++out:
67838 ++ pte_unmap_unlock(pte, ptl);
67839 ++}
67840 ++#endif
67841 ++
67842 + /*
67843 + * This routine handles present pages, when users try to write
67844 + * to a shared page. It is done by copying the page to a new address
67845 +@@ -2656,6 +2849,12 @@ gotten:
67846 + */
67847 + page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
67848 + if (likely(pte_same(*page_table, orig_pte))) {
67849 ++
67850 ++#ifdef CONFIG_PAX_SEGMEXEC
67851 ++ if (pax_find_mirror_vma(vma))
67852 ++ BUG_ON(!trylock_page(new_page));
67853 ++#endif
67854 ++
67855 + if (old_page) {
67856 + if (!PageAnon(old_page)) {
67857 + dec_mm_counter_fast(mm, MM_FILEPAGES);
67858 +@@ -2707,6 +2906,10 @@ gotten:
67859 + page_remove_rmap(old_page);
67860 + }
67861 +
67862 ++#ifdef CONFIG_PAX_SEGMEXEC
67863 ++ pax_mirror_anon_pte(vma, address, new_page, ptl);
67864 ++#endif
67865 ++
67866 + /* Free the old page.. */
67867 + new_page = old_page;
67868 + ret |= VM_FAULT_WRITE;
67869 +@@ -2986,6 +3189,11 @@ static int do_swap_page(struct mm_struct
67870 + swap_free(entry);
67871 + if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
67872 + try_to_free_swap(page);
67873 ++
67874 ++#ifdef CONFIG_PAX_SEGMEXEC
67875 ++ if ((flags & FAULT_FLAG_WRITE) || !pax_find_mirror_vma(vma))
67876 ++#endif
67877 ++
67878 + unlock_page(page);
67879 + if (swapcache) {
67880 + /*
67881 +@@ -3009,6 +3217,11 @@ static int do_swap_page(struct mm_struct
67882 +
67883 + /* No need to invalidate - it was non-present before */
67884 + update_mmu_cache(vma, address, page_table);
67885 ++
67886 ++#ifdef CONFIG_PAX_SEGMEXEC
67887 ++ pax_mirror_anon_pte(vma, address, page, ptl);
67888 ++#endif
67889 ++
67890 + unlock:
67891 + pte_unmap_unlock(page_table, ptl);
67892 + out:
67893 +@@ -3028,40 +3241,6 @@ out_release:
67894 + }
67895 +
67896 + /*
67897 +- * This is like a special single-page "expand_{down|up}wards()",
67898 +- * except we must first make sure that 'address{-|+}PAGE_SIZE'
67899 +- * doesn't hit another vma.
67900 +- */
67901 +-static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address)
67902 +-{
67903 +- address &= PAGE_MASK;
67904 +- if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) {
67905 +- struct vm_area_struct *prev = vma->vm_prev;
67906 +-
67907 +- /*
67908 +- * Is there a mapping abutting this one below?
67909 +- *
67910 +- * That's only ok if it's the same stack mapping
67911 +- * that has gotten split..
67912 +- */
67913 +- if (prev && prev->vm_end == address)
67914 +- return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM;
67915 +-
67916 +- expand_downwards(vma, address - PAGE_SIZE);
67917 +- }
67918 +- if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) {
67919 +- struct vm_area_struct *next = vma->vm_next;
67920 +-
67921 +- /* As VM_GROWSDOWN but s/below/above/ */
67922 +- if (next && next->vm_start == address + PAGE_SIZE)
67923 +- return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM;
67924 +-
67925 +- expand_upwards(vma, address + PAGE_SIZE);
67926 +- }
67927 +- return 0;
67928 +-}
67929 +-
67930 +-/*
67931 + * We enter with non-exclusive mmap_sem (to exclude vma changes,
67932 + * but allow concurrent faults), and pte mapped but not yet locked.
67933 + * We return with mmap_sem still held, but pte unmapped and unlocked.
67934 +@@ -3070,27 +3249,23 @@ static int do_anonymous_page(struct mm_s
67935 + unsigned long address, pte_t *page_table, pmd_t *pmd,
67936 + unsigned int flags)
67937 + {
67938 +- struct page *page;
67939 ++ struct page *page = NULL;
67940 + spinlock_t *ptl;
67941 + pte_t entry;
67942 +
67943 +- pte_unmap(page_table);
67944 +-
67945 +- /* Check if we need to add a guard page to the stack */
67946 +- if (check_stack_guard_page(vma, address) < 0)
67947 +- return VM_FAULT_SIGBUS;
67948 +-
67949 +- /* Use the zero-page for reads */
67950 + if (!(flags & FAULT_FLAG_WRITE)) {
67951 + entry = pte_mkspecial(pfn_pte(my_zero_pfn(address),
67952 + vma->vm_page_prot));
67953 +- page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
67954 ++ ptl = pte_lockptr(mm, pmd);
67955 ++ spin_lock(ptl);
67956 + if (!pte_none(*page_table))
67957 + goto unlock;
67958 + goto setpte;
67959 + }
67960 +
67961 + /* Allocate our own private page. */
67962 ++ pte_unmap(page_table);
67963 ++
67964 + if (unlikely(anon_vma_prepare(vma)))
67965 + goto oom;
67966 + page = alloc_zeroed_user_highpage_movable(vma, address);
67967 +@@ -3109,6 +3284,11 @@ static int do_anonymous_page(struct mm_s
67968 + if (!pte_none(*page_table))
67969 + goto release;
67970 +
67971 ++#ifdef CONFIG_PAX_SEGMEXEC
67972 ++ if (pax_find_mirror_vma(vma))
67973 ++ BUG_ON(!trylock_page(page));
67974 ++#endif
67975 ++
67976 + inc_mm_counter_fast(mm, MM_ANONPAGES);
67977 + page_add_new_anon_rmap(page, vma, address);
67978 + setpte:
67979 +@@ -3116,6 +3296,12 @@ setpte:
67980 +
67981 + /* No need to invalidate - it was non-present before */
67982 + update_mmu_cache(vma, address, page_table);
67983 ++
67984 ++#ifdef CONFIG_PAX_SEGMEXEC
67985 ++ if (page)
67986 ++ pax_mirror_anon_pte(vma, address, page, ptl);
67987 ++#endif
67988 ++
67989 + unlock:
67990 + pte_unmap_unlock(page_table, ptl);
67991 + return 0;
67992 +@@ -3259,6 +3445,12 @@ static int __do_fault(struct mm_struct *
67993 + */
67994 + /* Only go through if we didn't race with anybody else... */
67995 + if (likely(pte_same(*page_table, orig_pte))) {
67996 ++
67997 ++#ifdef CONFIG_PAX_SEGMEXEC
67998 ++ if (anon && pax_find_mirror_vma(vma))
67999 ++ BUG_ON(!trylock_page(page));
68000 ++#endif
68001 ++
68002 + flush_icache_page(vma, page);
68003 + entry = mk_pte(page, vma->vm_page_prot);
68004 + if (flags & FAULT_FLAG_WRITE)
68005 +@@ -3278,6 +3470,14 @@ static int __do_fault(struct mm_struct *
68006 +
68007 + /* no need to invalidate: a not-present page won't be cached */
68008 + update_mmu_cache(vma, address, page_table);
68009 ++
68010 ++#ifdef CONFIG_PAX_SEGMEXEC
68011 ++ if (anon)
68012 ++ pax_mirror_anon_pte(vma, address, page, ptl);
68013 ++ else
68014 ++ pax_mirror_file_pte(vma, address, page, ptl);
68015 ++#endif
68016 ++
68017 + } else {
68018 + if (cow_page)
68019 + mem_cgroup_uncharge_page(cow_page);
68020 +@@ -3431,6 +3631,12 @@ int handle_pte_fault(struct mm_struct *m
68021 + if (flags & FAULT_FLAG_WRITE)
68022 + flush_tlb_fix_spurious_fault(vma, address);
68023 + }
68024 ++
68025 ++#ifdef CONFIG_PAX_SEGMEXEC
68026 ++ pax_mirror_pte(vma, address, pte, pmd, ptl);
68027 ++ return 0;
68028 ++#endif
68029 ++
68030 + unlock:
68031 + pte_unmap_unlock(pte, ptl);
68032 + return 0;
68033 +@@ -3447,6 +3653,10 @@ int handle_mm_fault(struct mm_struct *mm
68034 + pmd_t *pmd;
68035 + pte_t *pte;
68036 +
68037 ++#ifdef CONFIG_PAX_SEGMEXEC
68038 ++ struct vm_area_struct *vma_m;
68039 ++#endif
68040 ++
68041 + __set_current_state(TASK_RUNNING);
68042 +
68043 + count_vm_event(PGFAULT);
68044 +@@ -3458,6 +3668,34 @@ int handle_mm_fault(struct mm_struct *mm
68045 + if (unlikely(is_vm_hugetlb_page(vma)))
68046 + return hugetlb_fault(mm, vma, address, flags);
68047 +
68048 ++#ifdef CONFIG_PAX_SEGMEXEC
68049 ++ vma_m = pax_find_mirror_vma(vma);
68050 ++ if (vma_m) {
68051 ++ unsigned long address_m;
68052 ++ pgd_t *pgd_m;
68053 ++ pud_t *pud_m;
68054 ++ pmd_t *pmd_m;
68055 ++
68056 ++ if (vma->vm_start > vma_m->vm_start) {
68057 ++ address_m = address;
68058 ++ address -= SEGMEXEC_TASK_SIZE;
68059 ++ vma = vma_m;
68060 ++ } else
68061 ++ address_m = address + SEGMEXEC_TASK_SIZE;
68062 ++
68063 ++ pgd_m = pgd_offset(mm, address_m);
68064 ++ pud_m = pud_alloc(mm, pgd_m, address_m);
68065 ++ if (!pud_m)
68066 ++ return VM_FAULT_OOM;
68067 ++ pmd_m = pmd_alloc(mm, pud_m, address_m);
68068 ++ if (!pmd_m)
68069 ++ return VM_FAULT_OOM;
68070 ++ if (!pmd_present(*pmd_m) && __pte_alloc(mm, vma_m, pmd_m, address_m))
68071 ++ return VM_FAULT_OOM;
68072 ++ pax_unmap_mirror_pte(vma_m, address_m, pmd_m);
68073 ++ }
68074 ++#endif
68075 ++
68076 + pgd = pgd_offset(mm, address);
68077 + pud = pud_alloc(mm, pgd, address);
68078 + if (!pud)
68079 +@@ -3487,7 +3725,7 @@ int handle_mm_fault(struct mm_struct *mm
68080 + * run pte_offset_map on the pmd, if an huge pmd could
68081 + * materialize from under us from a different thread.
68082 + */
68083 +- if (unlikely(pmd_none(*pmd)) && __pte_alloc(mm, vma, pmd, address))
68084 ++ if (unlikely(pmd_none(*pmd) && __pte_alloc(mm, vma, pmd, address)))
68085 + return VM_FAULT_OOM;
68086 + /* if an huge pmd materialized from under us just retry later */
68087 + if (unlikely(pmd_trans_huge(*pmd)))
68088 +@@ -3591,7 +3829,7 @@ static int __init gate_vma_init(void)
68089 + gate_vma.vm_start = FIXADDR_USER_START;
68090 + gate_vma.vm_end = FIXADDR_USER_END;
68091 + gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
68092 +- gate_vma.vm_page_prot = __P101;
68093 ++ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
68094 + /*
68095 + * Make sure the vDSO gets into every core dump.
68096 + * Dumping its contents makes post-mortem fully interpretable later
68097 +diff -urNp linux-3.1.1/mm/memory-failure.c linux-3.1.1/mm/memory-failure.c
68098 +--- linux-3.1.1/mm/memory-failure.c 2011-11-11 15:19:27.000000000 -0500
68099 ++++ linux-3.1.1/mm/memory-failure.c 2011-11-16 18:39:08.000000000 -0500
68100 +@@ -60,7 +60,7 @@ int sysctl_memory_failure_early_kill __r
68101 +
68102 + int sysctl_memory_failure_recovery __read_mostly = 1;
68103 +
68104 +-atomic_long_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
68105 ++atomic_long_unchecked_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
68106 +
68107 + #if defined(CONFIG_HWPOISON_INJECT) || defined(CONFIG_HWPOISON_INJECT_MODULE)
68108 +
68109 +@@ -201,7 +201,7 @@ static int kill_proc_ao(struct task_stru
68110 + si.si_signo = SIGBUS;
68111 + si.si_errno = 0;
68112 + si.si_code = BUS_MCEERR_AO;
68113 +- si.si_addr = (void *)addr;
68114 ++ si.si_addr = (void __user *)addr;
68115 + #ifdef __ARCH_SI_TRAPNO
68116 + si.si_trapno = trapno;
68117 + #endif
68118 +@@ -1009,7 +1009,7 @@ int __memory_failure(unsigned long pfn,
68119 + }
68120 +
68121 + nr_pages = 1 << compound_trans_order(hpage);
68122 +- atomic_long_add(nr_pages, &mce_bad_pages);
68123 ++ atomic_long_add_unchecked(nr_pages, &mce_bad_pages);
68124 +
68125 + /*
68126 + * We need/can do nothing about count=0 pages.
68127 +@@ -1039,7 +1039,7 @@ int __memory_failure(unsigned long pfn,
68128 + if (!PageHWPoison(hpage)
68129 + || (hwpoison_filter(p) && TestClearPageHWPoison(p))
68130 + || (p != hpage && TestSetPageHWPoison(hpage))) {
68131 +- atomic_long_sub(nr_pages, &mce_bad_pages);
68132 ++ atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
68133 + return 0;
68134 + }
68135 + set_page_hwpoison_huge_page(hpage);
68136 +@@ -1097,7 +1097,7 @@ int __memory_failure(unsigned long pfn,
68137 + }
68138 + if (hwpoison_filter(p)) {
68139 + if (TestClearPageHWPoison(p))
68140 +- atomic_long_sub(nr_pages, &mce_bad_pages);
68141 ++ atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
68142 + unlock_page(hpage);
68143 + put_page(hpage);
68144 + return 0;
68145 +@@ -1314,7 +1314,7 @@ int unpoison_memory(unsigned long pfn)
68146 + return 0;
68147 + }
68148 + if (TestClearPageHWPoison(p))
68149 +- atomic_long_sub(nr_pages, &mce_bad_pages);
68150 ++ atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
68151 + pr_info("MCE: Software-unpoisoned free page %#lx\n", pfn);
68152 + return 0;
68153 + }
68154 +@@ -1328,7 +1328,7 @@ int unpoison_memory(unsigned long pfn)
68155 + */
68156 + if (TestClearPageHWPoison(page)) {
68157 + pr_info("MCE: Software-unpoisoned page %#lx\n", pfn);
68158 +- atomic_long_sub(nr_pages, &mce_bad_pages);
68159 ++ atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
68160 + freeit = 1;
68161 + if (PageHuge(page))
68162 + clear_page_hwpoison_huge_page(page);
68163 +@@ -1441,7 +1441,7 @@ static int soft_offline_huge_page(struct
68164 + }
68165 + done:
68166 + if (!PageHWPoison(hpage))
68167 +- atomic_long_add(1 << compound_trans_order(hpage), &mce_bad_pages);
68168 ++ atomic_long_add_unchecked(1 << compound_trans_order(hpage), &mce_bad_pages);
68169 + set_page_hwpoison_huge_page(hpage);
68170 + dequeue_hwpoisoned_huge_page(hpage);
68171 + /* keep elevated page count for bad page */
68172 +@@ -1572,7 +1572,7 @@ int soft_offline_page(struct page *page,
68173 + return ret;
68174 +
68175 + done:
68176 +- atomic_long_add(1, &mce_bad_pages);
68177 ++ atomic_long_add_unchecked(1, &mce_bad_pages);
68178 + SetPageHWPoison(page);
68179 + /* keep elevated page count for bad page */
68180 + return ret;
68181 +diff -urNp linux-3.1.1/mm/mempolicy.c linux-3.1.1/mm/mempolicy.c
68182 +--- linux-3.1.1/mm/mempolicy.c 2011-11-11 15:19:27.000000000 -0500
68183 ++++ linux-3.1.1/mm/mempolicy.c 2011-11-16 18:40:44.000000000 -0500
68184 +@@ -639,6 +639,10 @@ static int mbind_range(struct mm_struct
68185 + unsigned long vmstart;
68186 + unsigned long vmend;
68187 +
68188 ++#ifdef CONFIG_PAX_SEGMEXEC
68189 ++ struct vm_area_struct *vma_m;
68190 ++#endif
68191 ++
68192 + vma = find_vma_prev(mm, start, &prev);
68193 + if (!vma || vma->vm_start > start)
68194 + return -EFAULT;
68195 +@@ -669,6 +673,16 @@ static int mbind_range(struct mm_struct
68196 + err = policy_vma(vma, new_pol);
68197 + if (err)
68198 + goto out;
68199 ++
68200 ++#ifdef CONFIG_PAX_SEGMEXEC
68201 ++ vma_m = pax_find_mirror_vma(vma);
68202 ++ if (vma_m) {
68203 ++ err = policy_vma(vma_m, new_pol);
68204 ++ if (err)
68205 ++ goto out;
68206 ++ }
68207 ++#endif
68208 ++
68209 + }
68210 +
68211 + out:
68212 +@@ -1102,6 +1116,17 @@ static long do_mbind(unsigned long start
68213 +
68214 + if (end < start)
68215 + return -EINVAL;
68216 ++
68217 ++#ifdef CONFIG_PAX_SEGMEXEC
68218 ++ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
68219 ++ if (end > SEGMEXEC_TASK_SIZE)
68220 ++ return -EINVAL;
68221 ++ } else
68222 ++#endif
68223 ++
68224 ++ if (end > TASK_SIZE)
68225 ++ return -EINVAL;
68226 ++
68227 + if (end == start)
68228 + return 0;
68229 +
68230 +@@ -1320,6 +1345,14 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pi
68231 + if (!mm)
68232 + goto out;
68233 +
68234 ++#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
68235 ++ if (mm != current->mm &&
68236 ++ (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
68237 ++ err = -EPERM;
68238 ++ goto out;
68239 ++ }
68240 ++#endif
68241 ++
68242 + /*
68243 + * Check if this process has the right to modify the specified
68244 + * process. The right exists if the process has administrative
68245 +@@ -1329,8 +1362,7 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pi
68246 + rcu_read_lock();
68247 + tcred = __task_cred(task);
68248 + if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
68249 +- cred->uid != tcred->suid && cred->uid != tcred->uid &&
68250 +- !capable(CAP_SYS_NICE)) {
68251 ++ cred->uid != tcred->suid && !capable(CAP_SYS_NICE)) {
68252 + rcu_read_unlock();
68253 + err = -EPERM;
68254 + goto out;
68255 +diff -urNp linux-3.1.1/mm/migrate.c linux-3.1.1/mm/migrate.c
68256 +--- linux-3.1.1/mm/migrate.c 2011-11-11 15:19:27.000000000 -0500
68257 ++++ linux-3.1.1/mm/migrate.c 2011-11-16 18:40:44.000000000 -0500
68258 +@@ -1124,6 +1124,8 @@ static int do_pages_move(struct mm_struc
68259 + unsigned long chunk_start;
68260 + int err;
68261 +
68262 ++ pax_track_stack();
68263 ++
68264 + task_nodes = cpuset_mems_allowed(task);
68265 +
68266 + err = -ENOMEM;
68267 +@@ -1308,6 +1310,14 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid,
68268 + if (!mm)
68269 + return -EINVAL;
68270 +
68271 ++#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
68272 ++ if (mm != current->mm &&
68273 ++ (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
68274 ++ err = -EPERM;
68275 ++ goto out;
68276 ++ }
68277 ++#endif
68278 ++
68279 + /*
68280 + * Check if this process has the right to modify the specified
68281 + * process. The right exists if the process has administrative
68282 +@@ -1317,8 +1327,7 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid,
68283 + rcu_read_lock();
68284 + tcred = __task_cred(task);
68285 + if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
68286 +- cred->uid != tcred->suid && cred->uid != tcred->uid &&
68287 +- !capable(CAP_SYS_NICE)) {
68288 ++ cred->uid != tcred->suid && !capable(CAP_SYS_NICE)) {
68289 + rcu_read_unlock();
68290 + err = -EPERM;
68291 + goto out;
68292 +diff -urNp linux-3.1.1/mm/mlock.c linux-3.1.1/mm/mlock.c
68293 +--- linux-3.1.1/mm/mlock.c 2011-11-11 15:19:27.000000000 -0500
68294 ++++ linux-3.1.1/mm/mlock.c 2011-11-16 18:40:44.000000000 -0500
68295 +@@ -13,6 +13,7 @@
68296 + #include <linux/pagemap.h>
68297 + #include <linux/mempolicy.h>
68298 + #include <linux/syscalls.h>
68299 ++#include <linux/security.h>
68300 + #include <linux/sched.h>
68301 + #include <linux/module.h>
68302 + #include <linux/rmap.h>
68303 +@@ -377,6 +378,9 @@ static int do_mlock(unsigned long start,
68304 + return -EINVAL;
68305 + if (end == start)
68306 + return 0;
68307 ++ if (end > TASK_SIZE)
68308 ++ return -EINVAL;
68309 ++
68310 + vma = find_vma_prev(current->mm, start, &prev);
68311 + if (!vma || vma->vm_start > start)
68312 + return -ENOMEM;
68313 +@@ -387,6 +391,11 @@ static int do_mlock(unsigned long start,
68314 + for (nstart = start ; ; ) {
68315 + vm_flags_t newflags;
68316 +
68317 ++#ifdef CONFIG_PAX_SEGMEXEC
68318 ++ if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
68319 ++ break;
68320 ++#endif
68321 ++
68322 + /* Here we know that vma->vm_start <= nstart < vma->vm_end. */
68323 +
68324 + newflags = vma->vm_flags | VM_LOCKED;
68325 +@@ -492,6 +501,7 @@ SYSCALL_DEFINE2(mlock, unsigned long, st
68326 + lock_limit >>= PAGE_SHIFT;
68327 +
68328 + /* check against resource limits */
68329 ++ gr_learn_resource(current, RLIMIT_MEMLOCK, (current->mm->locked_vm << PAGE_SHIFT) + len, 1);
68330 + if ((locked <= lock_limit) || capable(CAP_IPC_LOCK))
68331 + error = do_mlock(start, len, 1);
68332 + up_write(&current->mm->mmap_sem);
68333 +@@ -515,17 +525,23 @@ SYSCALL_DEFINE2(munlock, unsigned long,
68334 + static int do_mlockall(int flags)
68335 + {
68336 + struct vm_area_struct * vma, * prev = NULL;
68337 +- unsigned int def_flags = 0;
68338 +
68339 + if (flags & MCL_FUTURE)
68340 +- def_flags = VM_LOCKED;
68341 +- current->mm->def_flags = def_flags;
68342 ++ current->mm->def_flags |= VM_LOCKED;
68343 ++ else
68344 ++ current->mm->def_flags &= ~VM_LOCKED;
68345 + if (flags == MCL_FUTURE)
68346 + goto out;
68347 +
68348 + for (vma = current->mm->mmap; vma ; vma = prev->vm_next) {
68349 + vm_flags_t newflags;
68350 +
68351 ++#ifdef CONFIG_PAX_SEGMEXEC
68352 ++ if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
68353 ++ break;
68354 ++#endif
68355 ++
68356 ++ BUG_ON(vma->vm_end > TASK_SIZE);
68357 + newflags = vma->vm_flags | VM_LOCKED;
68358 + if (!(flags & MCL_CURRENT))
68359 + newflags &= ~VM_LOCKED;
68360 +@@ -557,6 +573,7 @@ SYSCALL_DEFINE1(mlockall, int, flags)
68361 + lock_limit >>= PAGE_SHIFT;
68362 +
68363 + ret = -ENOMEM;
68364 ++ gr_learn_resource(current, RLIMIT_MEMLOCK, current->mm->total_vm << PAGE_SHIFT, 1);
68365 + if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) ||
68366 + capable(CAP_IPC_LOCK))
68367 + ret = do_mlockall(flags);
68368 +diff -urNp linux-3.1.1/mm/mmap.c linux-3.1.1/mm/mmap.c
68369 +--- linux-3.1.1/mm/mmap.c 2011-11-11 15:19:27.000000000 -0500
68370 ++++ linux-3.1.1/mm/mmap.c 2011-11-16 18:40:44.000000000 -0500
68371 +@@ -46,6 +46,16 @@
68372 + #define arch_rebalance_pgtables(addr, len) (addr)
68373 + #endif
68374 +
68375 ++static inline void verify_mm_writelocked(struct mm_struct *mm)
68376 ++{
68377 ++#if defined(CONFIG_DEBUG_VM) || defined(CONFIG_PAX)
68378 ++ if (unlikely(down_read_trylock(&mm->mmap_sem))) {
68379 ++ up_read(&mm->mmap_sem);
68380 ++ BUG();
68381 ++ }
68382 ++#endif
68383 ++}
68384 ++
68385 + static void unmap_region(struct mm_struct *mm,
68386 + struct vm_area_struct *vma, struct vm_area_struct *prev,
68387 + unsigned long start, unsigned long end);
68388 +@@ -71,22 +81,32 @@ static void unmap_region(struct mm_struc
68389 + * x: (no) no x: (no) yes x: (no) yes x: (yes) yes
68390 + *
68391 + */
68392 +-pgprot_t protection_map[16] = {
68393 ++pgprot_t protection_map[16] __read_only = {
68394 + __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
68395 + __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
68396 + };
68397 +
68398 +-pgprot_t vm_get_page_prot(unsigned long vm_flags)
68399 ++pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
68400 + {
68401 +- return __pgprot(pgprot_val(protection_map[vm_flags &
68402 ++ pgprot_t prot = __pgprot(pgprot_val(protection_map[vm_flags &
68403 + (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]) |
68404 + pgprot_val(arch_vm_get_page_prot(vm_flags)));
68405 ++
68406 ++#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
68407 ++ if (!(__supported_pte_mask & _PAGE_NX) &&
68408 ++ (vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC &&
68409 ++ (vm_flags & (VM_READ | VM_WRITE)))
68410 ++ prot = __pgprot(pte_val(pte_exprotect(__pte(pgprot_val(prot)))));
68411 ++#endif
68412 ++
68413 ++ return prot;
68414 + }
68415 + EXPORT_SYMBOL(vm_get_page_prot);
68416 +
68417 + int sysctl_overcommit_memory __read_mostly = OVERCOMMIT_GUESS; /* heuristic overcommit */
68418 + int sysctl_overcommit_ratio __read_mostly = 50; /* default is 50% */
68419 + int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
68420 ++unsigned long sysctl_heap_stack_gap __read_mostly = 64*1024;
68421 + /*
68422 + * Make sure vm_committed_as in one cacheline and not cacheline shared with
68423 + * other variables. It can be updated by several CPUs frequently.
68424 +@@ -228,6 +248,7 @@ static struct vm_area_struct *remove_vma
68425 + struct vm_area_struct *next = vma->vm_next;
68426 +
68427 + might_sleep();
68428 ++ BUG_ON(vma->vm_mirror);
68429 + if (vma->vm_ops && vma->vm_ops->close)
68430 + vma->vm_ops->close(vma);
68431 + if (vma->vm_file) {
68432 +@@ -272,6 +293,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
68433 + * not page aligned -Ram Gupta
68434 + */
68435 + rlim = rlimit(RLIMIT_DATA);
68436 ++ gr_learn_resource(current, RLIMIT_DATA, (brk - mm->start_brk) + (mm->end_data - mm->start_data), 1);
68437 + if (rlim < RLIM_INFINITY && (brk - mm->start_brk) +
68438 + (mm->end_data - mm->start_data) > rlim)
68439 + goto out;
68440 +@@ -689,6 +711,12 @@ static int
68441 + can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags,
68442 + struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
68443 + {
68444 ++
68445 ++#ifdef CONFIG_PAX_SEGMEXEC
68446 ++ if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_start == SEGMEXEC_TASK_SIZE)
68447 ++ return 0;
68448 ++#endif
68449 ++
68450 + if (is_mergeable_vma(vma, file, vm_flags) &&
68451 + is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
68452 + if (vma->vm_pgoff == vm_pgoff)
68453 +@@ -708,6 +736,12 @@ static int
68454 + can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
68455 + struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
68456 + {
68457 ++
68458 ++#ifdef CONFIG_PAX_SEGMEXEC
68459 ++ if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end == SEGMEXEC_TASK_SIZE)
68460 ++ return 0;
68461 ++#endif
68462 ++
68463 + if (is_mergeable_vma(vma, file, vm_flags) &&
68464 + is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
68465 + pgoff_t vm_pglen;
68466 +@@ -750,13 +784,20 @@ can_vma_merge_after(struct vm_area_struc
68467 + struct vm_area_struct *vma_merge(struct mm_struct *mm,
68468 + struct vm_area_struct *prev, unsigned long addr,
68469 + unsigned long end, unsigned long vm_flags,
68470 +- struct anon_vma *anon_vma, struct file *file,
68471 ++ struct anon_vma *anon_vma, struct file *file,
68472 + pgoff_t pgoff, struct mempolicy *policy)
68473 + {
68474 + pgoff_t pglen = (end - addr) >> PAGE_SHIFT;
68475 + struct vm_area_struct *area, *next;
68476 + int err;
68477 +
68478 ++#ifdef CONFIG_PAX_SEGMEXEC
68479 ++ unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE, end_m = end + SEGMEXEC_TASK_SIZE;
68480 ++ struct vm_area_struct *area_m = NULL, *next_m = NULL, *prev_m = NULL;
68481 ++
68482 ++ BUG_ON((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE < end);
68483 ++#endif
68484 ++
68485 + /*
68486 + * We later require that vma->vm_flags == vm_flags,
68487 + * so this tests vma->vm_flags & VM_SPECIAL, too.
68488 +@@ -772,6 +813,15 @@ struct vm_area_struct *vma_merge(struct
68489 + if (next && next->vm_end == end) /* cases 6, 7, 8 */
68490 + next = next->vm_next;
68491 +
68492 ++#ifdef CONFIG_PAX_SEGMEXEC
68493 ++ if (prev)
68494 ++ prev_m = pax_find_mirror_vma(prev);
68495 ++ if (area)
68496 ++ area_m = pax_find_mirror_vma(area);
68497 ++ if (next)
68498 ++ next_m = pax_find_mirror_vma(next);
68499 ++#endif
68500 ++
68501 + /*
68502 + * Can it merge with the predecessor?
68503 + */
68504 +@@ -791,9 +841,24 @@ struct vm_area_struct *vma_merge(struct
68505 + /* cases 1, 6 */
68506 + err = vma_adjust(prev, prev->vm_start,
68507 + next->vm_end, prev->vm_pgoff, NULL);
68508 +- } else /* cases 2, 5, 7 */
68509 ++
68510 ++#ifdef CONFIG_PAX_SEGMEXEC
68511 ++ if (!err && prev_m)
68512 ++ err = vma_adjust(prev_m, prev_m->vm_start,
68513 ++ next_m->vm_end, prev_m->vm_pgoff, NULL);
68514 ++#endif
68515 ++
68516 ++ } else { /* cases 2, 5, 7 */
68517 + err = vma_adjust(prev, prev->vm_start,
68518 + end, prev->vm_pgoff, NULL);
68519 ++
68520 ++#ifdef CONFIG_PAX_SEGMEXEC
68521 ++ if (!err && prev_m)
68522 ++ err = vma_adjust(prev_m, prev_m->vm_start,
68523 ++ end_m, prev_m->vm_pgoff, NULL);
68524 ++#endif
68525 ++
68526 ++ }
68527 + if (err)
68528 + return NULL;
68529 + khugepaged_enter_vma_merge(prev);
68530 +@@ -807,12 +872,27 @@ struct vm_area_struct *vma_merge(struct
68531 + mpol_equal(policy, vma_policy(next)) &&
68532 + can_vma_merge_before(next, vm_flags,
68533 + anon_vma, file, pgoff+pglen)) {
68534 +- if (prev && addr < prev->vm_end) /* case 4 */
68535 ++ if (prev && addr < prev->vm_end) { /* case 4 */
68536 + err = vma_adjust(prev, prev->vm_start,
68537 + addr, prev->vm_pgoff, NULL);
68538 +- else /* cases 3, 8 */
68539 ++
68540 ++#ifdef CONFIG_PAX_SEGMEXEC
68541 ++ if (!err && prev_m)
68542 ++ err = vma_adjust(prev_m, prev_m->vm_start,
68543 ++ addr_m, prev_m->vm_pgoff, NULL);
68544 ++#endif
68545 ++
68546 ++ } else { /* cases 3, 8 */
68547 + err = vma_adjust(area, addr, next->vm_end,
68548 + next->vm_pgoff - pglen, NULL);
68549 ++
68550 ++#ifdef CONFIG_PAX_SEGMEXEC
68551 ++ if (!err && area_m)
68552 ++ err = vma_adjust(area_m, addr_m, next_m->vm_end,
68553 ++ next_m->vm_pgoff - pglen, NULL);
68554 ++#endif
68555 ++
68556 ++ }
68557 + if (err)
68558 + return NULL;
68559 + khugepaged_enter_vma_merge(area);
68560 +@@ -921,14 +1001,11 @@ none:
68561 + void vm_stat_account(struct mm_struct *mm, unsigned long flags,
68562 + struct file *file, long pages)
68563 + {
68564 +- const unsigned long stack_flags
68565 +- = VM_STACK_FLAGS & (VM_GROWSUP|VM_GROWSDOWN);
68566 +-
68567 + if (file) {
68568 + mm->shared_vm += pages;
68569 + if ((flags & (VM_EXEC|VM_WRITE)) == VM_EXEC)
68570 + mm->exec_vm += pages;
68571 +- } else if (flags & stack_flags)
68572 ++ } else if (flags & (VM_GROWSUP|VM_GROWSDOWN))
68573 + mm->stack_vm += pages;
68574 + if (flags & (VM_RESERVED|VM_IO))
68575 + mm->reserved_vm += pages;
68576 +@@ -955,7 +1032,7 @@ unsigned long do_mmap_pgoff(struct file
68577 + * (the exception is when the underlying filesystem is noexec
68578 + * mounted, in which case we dont add PROT_EXEC.)
68579 + */
68580 +- if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
68581 ++ if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
68582 + if (!(file && (file->f_path.mnt->mnt_flags & MNT_NOEXEC)))
68583 + prot |= PROT_EXEC;
68584 +
68585 +@@ -981,7 +1058,7 @@ unsigned long do_mmap_pgoff(struct file
68586 + /* Obtain the address to map to. we verify (or select) it and ensure
68587 + * that it represents a valid section of the address space.
68588 + */
68589 +- addr = get_unmapped_area(file, addr, len, pgoff, flags);
68590 ++ addr = get_unmapped_area(file, addr, len, pgoff, flags | ((prot & PROT_EXEC) ? MAP_EXECUTABLE : 0));
68591 + if (addr & ~PAGE_MASK)
68592 + return addr;
68593 +
68594 +@@ -992,6 +1069,36 @@ unsigned long do_mmap_pgoff(struct file
68595 + vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags) |
68596 + mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
68597 +
68598 ++#ifdef CONFIG_PAX_MPROTECT
68599 ++ if (mm->pax_flags & MF_PAX_MPROTECT) {
68600 ++#ifndef CONFIG_PAX_MPROTECT_COMPAT
68601 ++ if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC)) {
68602 ++ gr_log_rwxmmap(file);
68603 ++
68604 ++#ifdef CONFIG_PAX_EMUPLT
68605 ++ vm_flags &= ~VM_EXEC;
68606 ++#else
68607 ++ return -EPERM;
68608 ++#endif
68609 ++
68610 ++ }
68611 ++
68612 ++ if (!(vm_flags & VM_EXEC))
68613 ++ vm_flags &= ~VM_MAYEXEC;
68614 ++#else
68615 ++ if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
68616 ++ vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
68617 ++#endif
68618 ++ else
68619 ++ vm_flags &= ~VM_MAYWRITE;
68620 ++ }
68621 ++#endif
68622 ++
68623 ++#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
68624 ++ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && file)
68625 ++ vm_flags &= ~VM_PAGEEXEC;
68626 ++#endif
68627 ++
68628 + if (flags & MAP_LOCKED)
68629 + if (!can_do_mlock())
68630 + return -EPERM;
68631 +@@ -1003,6 +1110,7 @@ unsigned long do_mmap_pgoff(struct file
68632 + locked += mm->locked_vm;
68633 + lock_limit = rlimit(RLIMIT_MEMLOCK);
68634 + lock_limit >>= PAGE_SHIFT;
68635 ++ gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
68636 + if (locked > lock_limit && !capable(CAP_IPC_LOCK))
68637 + return -EAGAIN;
68638 + }
68639 +@@ -1073,6 +1181,9 @@ unsigned long do_mmap_pgoff(struct file
68640 + if (error)
68641 + return error;
68642 +
68643 ++ if (!gr_acl_handle_mmap(file, prot))
68644 ++ return -EACCES;
68645 ++
68646 + return mmap_region(file, addr, len, flags, vm_flags, pgoff);
68647 + }
68648 + EXPORT_SYMBOL(do_mmap_pgoff);
68649 +@@ -1153,7 +1264,7 @@ int vma_wants_writenotify(struct vm_area
68650 + vm_flags_t vm_flags = vma->vm_flags;
68651 +
68652 + /* If it was private or non-writable, the write bit is already clear */
68653 +- if ((vm_flags & (VM_WRITE|VM_SHARED)) != ((VM_WRITE|VM_SHARED)))
68654 ++ if ((vm_flags & (VM_WRITE|VM_SHARED)) != (VM_WRITE|VM_SHARED))
68655 + return 0;
68656 +
68657 + /* The backer wishes to know when pages are first written to? */
68658 +@@ -1202,14 +1313,24 @@ unsigned long mmap_region(struct file *f
68659 + unsigned long charged = 0;
68660 + struct inode *inode = file ? file->f_path.dentry->d_inode : NULL;
68661 +
68662 ++#ifdef CONFIG_PAX_SEGMEXEC
68663 ++ struct vm_area_struct *vma_m = NULL;
68664 ++#endif
68665 ++
68666 ++ /*
68667 ++ * mm->mmap_sem is required to protect against another thread
68668 ++ * changing the mappings in case we sleep.
68669 ++ */
68670 ++ verify_mm_writelocked(mm);
68671 ++
68672 + /* Clear old maps */
68673 + error = -ENOMEM;
68674 +-munmap_back:
68675 + vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
68676 + if (vma && vma->vm_start < addr + len) {
68677 + if (do_munmap(mm, addr, len))
68678 + return -ENOMEM;
68679 +- goto munmap_back;
68680 ++ vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
68681 ++ BUG_ON(vma && vma->vm_start < addr + len);
68682 + }
68683 +
68684 + /* Check against address space limit. */
68685 +@@ -1258,6 +1379,16 @@ munmap_back:
68686 + goto unacct_error;
68687 + }
68688 +
68689 ++#ifdef CONFIG_PAX_SEGMEXEC
68690 ++ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vm_flags & VM_EXEC)) {
68691 ++ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
68692 ++ if (!vma_m) {
68693 ++ error = -ENOMEM;
68694 ++ goto free_vma;
68695 ++ }
68696 ++ }
68697 ++#endif
68698 ++
68699 + vma->vm_mm = mm;
68700 + vma->vm_start = addr;
68701 + vma->vm_end = addr + len;
68702 +@@ -1281,6 +1412,19 @@ munmap_back:
68703 + error = file->f_op->mmap(file, vma);
68704 + if (error)
68705 + goto unmap_and_free_vma;
68706 ++
68707 ++#ifdef CONFIG_PAX_SEGMEXEC
68708 ++ if (vma_m && (vm_flags & VM_EXECUTABLE))
68709 ++ added_exe_file_vma(mm);
68710 ++#endif
68711 ++
68712 ++#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
68713 ++ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && !(vma->vm_flags & VM_SPECIAL)) {
68714 ++ vma->vm_flags |= VM_PAGEEXEC;
68715 ++ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
68716 ++ }
68717 ++#endif
68718 ++
68719 + if (vm_flags & VM_EXECUTABLE)
68720 + added_exe_file_vma(mm);
68721 +
68722 +@@ -1316,6 +1460,11 @@ munmap_back:
68723 + vma_link(mm, vma, prev, rb_link, rb_parent);
68724 + file = vma->vm_file;
68725 +
68726 ++#ifdef CONFIG_PAX_SEGMEXEC
68727 ++ if (vma_m)
68728 ++ BUG_ON(pax_mirror_vma(vma_m, vma));
68729 ++#endif
68730 ++
68731 + /* Once vma denies write, undo our temporary denial count */
68732 + if (correct_wcount)
68733 + atomic_inc(&inode->i_writecount);
68734 +@@ -1324,6 +1473,7 @@ out:
68735 +
68736 + mm->total_vm += len >> PAGE_SHIFT;
68737 + vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT);
68738 ++ track_exec_limit(mm, addr, addr + len, vm_flags);
68739 + if (vm_flags & VM_LOCKED) {
68740 + if (!mlock_vma_pages_range(vma, addr, addr + len))
68741 + mm->locked_vm += (len >> PAGE_SHIFT);
68742 +@@ -1341,6 +1491,12 @@ unmap_and_free_vma:
68743 + unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end);
68744 + charged = 0;
68745 + free_vma:
68746 ++
68747 ++#ifdef CONFIG_PAX_SEGMEXEC
68748 ++ if (vma_m)
68749 ++ kmem_cache_free(vm_area_cachep, vma_m);
68750 ++#endif
68751 ++
68752 + kmem_cache_free(vm_area_cachep, vma);
68753 + unacct_error:
68754 + if (charged)
68755 +@@ -1348,6 +1504,44 @@ unacct_error:
68756 + return error;
68757 + }
68758 +
68759 ++bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len)
68760 ++{
68761 ++ if (!vma) {
68762 ++#ifdef CONFIG_STACK_GROWSUP
68763 ++ if (addr > sysctl_heap_stack_gap)
68764 ++ vma = find_vma(current->mm, addr - sysctl_heap_stack_gap);
68765 ++ else
68766 ++ vma = find_vma(current->mm, 0);
68767 ++ if (vma && (vma->vm_flags & VM_GROWSUP))
68768 ++ return false;
68769 ++#endif
68770 ++ return true;
68771 ++ }
68772 ++
68773 ++ if (addr + len > vma->vm_start)
68774 ++ return false;
68775 ++
68776 ++ if (vma->vm_flags & VM_GROWSDOWN)
68777 ++ return sysctl_heap_stack_gap <= vma->vm_start - addr - len;
68778 ++#ifdef CONFIG_STACK_GROWSUP
68779 ++ else if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP))
68780 ++ return addr - vma->vm_prev->vm_end <= sysctl_heap_stack_gap;
68781 ++#endif
68782 ++
68783 ++ return true;
68784 ++}
68785 ++
68786 ++unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len)
68787 ++{
68788 ++ if (vma->vm_start < len)
68789 ++ return -ENOMEM;
68790 ++ if (!(vma->vm_flags & VM_GROWSDOWN))
68791 ++ return vma->vm_start - len;
68792 ++ if (sysctl_heap_stack_gap <= vma->vm_start - len)
68793 ++ return vma->vm_start - len - sysctl_heap_stack_gap;
68794 ++ return -ENOMEM;
68795 ++}
68796 ++
68797 + /* Get an address range which is currently unmapped.
68798 + * For shmat() with addr=0.
68799 + *
68800 +@@ -1374,18 +1568,23 @@ arch_get_unmapped_area(struct file *filp
68801 + if (flags & MAP_FIXED)
68802 + return addr;
68803 +
68804 ++#ifdef CONFIG_PAX_RANDMMAP
68805 ++ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
68806 ++#endif
68807 ++
68808 + if (addr) {
68809 + addr = PAGE_ALIGN(addr);
68810 +- vma = find_vma(mm, addr);
68811 +- if (TASK_SIZE - len >= addr &&
68812 +- (!vma || addr + len <= vma->vm_start))
68813 +- return addr;
68814 ++ if (TASK_SIZE - len >= addr) {
68815 ++ vma = find_vma(mm, addr);
68816 ++ if (check_heap_stack_gap(vma, addr, len))
68817 ++ return addr;
68818 ++ }
68819 + }
68820 + if (len > mm->cached_hole_size) {
68821 +- start_addr = addr = mm->free_area_cache;
68822 ++ start_addr = addr = mm->free_area_cache;
68823 + } else {
68824 +- start_addr = addr = TASK_UNMAPPED_BASE;
68825 +- mm->cached_hole_size = 0;
68826 ++ start_addr = addr = mm->mmap_base;
68827 ++ mm->cached_hole_size = 0;
68828 + }
68829 +
68830 + full_search:
68831 +@@ -1396,34 +1595,40 @@ full_search:
68832 + * Start a new search - just in case we missed
68833 + * some holes.
68834 + */
68835 +- if (start_addr != TASK_UNMAPPED_BASE) {
68836 +- addr = TASK_UNMAPPED_BASE;
68837 +- start_addr = addr;
68838 ++ if (start_addr != mm->mmap_base) {
68839 ++ start_addr = addr = mm->mmap_base;
68840 + mm->cached_hole_size = 0;
68841 + goto full_search;
68842 + }
68843 + return -ENOMEM;
68844 + }
68845 +- if (!vma || addr + len <= vma->vm_start) {
68846 +- /*
68847 +- * Remember the place where we stopped the search:
68848 +- */
68849 +- mm->free_area_cache = addr + len;
68850 +- return addr;
68851 +- }
68852 ++ if (check_heap_stack_gap(vma, addr, len))
68853 ++ break;
68854 + if (addr + mm->cached_hole_size < vma->vm_start)
68855 + mm->cached_hole_size = vma->vm_start - addr;
68856 + addr = vma->vm_end;
68857 + }
68858 ++
68859 ++ /*
68860 ++ * Remember the place where we stopped the search:
68861 ++ */
68862 ++ mm->free_area_cache = addr + len;
68863 ++ return addr;
68864 + }
68865 + #endif
68866 +
68867 + void arch_unmap_area(struct mm_struct *mm, unsigned long addr)
68868 + {
68869 ++
68870 ++#ifdef CONFIG_PAX_SEGMEXEC
68871 ++ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
68872 ++ return;
68873 ++#endif
68874 ++
68875 + /*
68876 + * Is this a new hole at the lowest possible address?
68877 + */
68878 +- if (addr >= TASK_UNMAPPED_BASE && addr < mm->free_area_cache) {
68879 ++ if (addr >= mm->mmap_base && addr < mm->free_area_cache) {
68880 + mm->free_area_cache = addr;
68881 + mm->cached_hole_size = ~0UL;
68882 + }
68883 +@@ -1441,7 +1646,7 @@ arch_get_unmapped_area_topdown(struct fi
68884 + {
68885 + struct vm_area_struct *vma;
68886 + struct mm_struct *mm = current->mm;
68887 +- unsigned long addr = addr0;
68888 ++ unsigned long base = mm->mmap_base, addr = addr0;
68889 +
68890 + /* requested length too big for entire address space */
68891 + if (len > TASK_SIZE)
68892 +@@ -1450,13 +1655,18 @@ arch_get_unmapped_area_topdown(struct fi
68893 + if (flags & MAP_FIXED)
68894 + return addr;
68895 +
68896 ++#ifdef CONFIG_PAX_RANDMMAP
68897 ++ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
68898 ++#endif
68899 ++
68900 + /* requesting a specific address */
68901 + if (addr) {
68902 + addr = PAGE_ALIGN(addr);
68903 +- vma = find_vma(mm, addr);
68904 +- if (TASK_SIZE - len >= addr &&
68905 +- (!vma || addr + len <= vma->vm_start))
68906 +- return addr;
68907 ++ if (TASK_SIZE - len >= addr) {
68908 ++ vma = find_vma(mm, addr);
68909 ++ if (check_heap_stack_gap(vma, addr, len))
68910 ++ return addr;
68911 ++ }
68912 + }
68913 +
68914 + /* check if free_area_cache is useful for us */
68915 +@@ -1471,7 +1681,7 @@ arch_get_unmapped_area_topdown(struct fi
68916 + /* make sure it can fit in the remaining address space */
68917 + if (addr > len) {
68918 + vma = find_vma(mm, addr-len);
68919 +- if (!vma || addr <= vma->vm_start)
68920 ++ if (check_heap_stack_gap(vma, addr - len, len))
68921 + /* remember the address as a hint for next time */
68922 + return (mm->free_area_cache = addr-len);
68923 + }
68924 +@@ -1488,7 +1698,7 @@ arch_get_unmapped_area_topdown(struct fi
68925 + * return with success:
68926 + */
68927 + vma = find_vma(mm, addr);
68928 +- if (!vma || addr+len <= vma->vm_start)
68929 ++ if (check_heap_stack_gap(vma, addr, len))
68930 + /* remember the address as a hint for next time */
68931 + return (mm->free_area_cache = addr);
68932 +
68933 +@@ -1497,8 +1707,8 @@ arch_get_unmapped_area_topdown(struct fi
68934 + mm->cached_hole_size = vma->vm_start - addr;
68935 +
68936 + /* try just below the current vma->vm_start */
68937 +- addr = vma->vm_start-len;
68938 +- } while (len < vma->vm_start);
68939 ++ addr = skip_heap_stack_gap(vma, len);
68940 ++ } while (!IS_ERR_VALUE(addr));
68941 +
68942 + bottomup:
68943 + /*
68944 +@@ -1507,13 +1717,21 @@ bottomup:
68945 + * can happen with large stack limits and large mmap()
68946 + * allocations.
68947 + */
68948 ++ mm->mmap_base = TASK_UNMAPPED_BASE;
68949 ++
68950 ++#ifdef CONFIG_PAX_RANDMMAP
68951 ++ if (mm->pax_flags & MF_PAX_RANDMMAP)
68952 ++ mm->mmap_base += mm->delta_mmap;
68953 ++#endif
68954 ++
68955 ++ mm->free_area_cache = mm->mmap_base;
68956 + mm->cached_hole_size = ~0UL;
68957 +- mm->free_area_cache = TASK_UNMAPPED_BASE;
68958 + addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
68959 + /*
68960 + * Restore the topdown base:
68961 + */
68962 +- mm->free_area_cache = mm->mmap_base;
68963 ++ mm->mmap_base = base;
68964 ++ mm->free_area_cache = base;
68965 + mm->cached_hole_size = ~0UL;
68966 +
68967 + return addr;
68968 +@@ -1522,6 +1740,12 @@ bottomup:
68969 +
68970 + void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
68971 + {
68972 ++
68973 ++#ifdef CONFIG_PAX_SEGMEXEC
68974 ++ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
68975 ++ return;
68976 ++#endif
68977 ++
68978 + /*
68979 + * Is this a new hole at the highest possible address?
68980 + */
68981 +@@ -1529,8 +1753,10 @@ void arch_unmap_area_topdown(struct mm_s
68982 + mm->free_area_cache = addr;
68983 +
68984 + /* dont allow allocations above current base */
68985 +- if (mm->free_area_cache > mm->mmap_base)
68986 ++ if (mm->free_area_cache > mm->mmap_base) {
68987 + mm->free_area_cache = mm->mmap_base;
68988 ++ mm->cached_hole_size = ~0UL;
68989 ++ }
68990 + }
68991 +
68992 + unsigned long
68993 +@@ -1638,6 +1864,28 @@ out:
68994 + return prev ? prev->vm_next : vma;
68995 + }
68996 +
68997 ++#ifdef CONFIG_PAX_SEGMEXEC
68998 ++struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma)
68999 ++{
69000 ++ struct vm_area_struct *vma_m;
69001 ++
69002 ++ BUG_ON(!vma || vma->vm_start >= vma->vm_end);
69003 ++ if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC)) {
69004 ++ BUG_ON(vma->vm_mirror);
69005 ++ return NULL;
69006 ++ }
69007 ++ BUG_ON(vma->vm_start < SEGMEXEC_TASK_SIZE && SEGMEXEC_TASK_SIZE < vma->vm_end);
69008 ++ vma_m = vma->vm_mirror;
69009 ++ BUG_ON(!vma_m || vma_m->vm_mirror != vma);
69010 ++ BUG_ON(vma->vm_file != vma_m->vm_file);
69011 ++ BUG_ON(vma->vm_end - vma->vm_start != vma_m->vm_end - vma_m->vm_start);
69012 ++ BUG_ON(vma->vm_pgoff != vma_m->vm_pgoff);
69013 ++ BUG_ON(vma->anon_vma != vma_m->anon_vma && vma->anon_vma->root != vma_m->anon_vma->root);
69014 ++ BUG_ON((vma->vm_flags ^ vma_m->vm_flags) & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED | VM_RESERVED));
69015 ++ return vma_m;
69016 ++}
69017 ++#endif
69018 ++
69019 + /*
69020 + * Verify that the stack growth is acceptable and
69021 + * update accounting. This is shared with both the
69022 +@@ -1654,6 +1902,7 @@ static int acct_stack_growth(struct vm_a
69023 + return -ENOMEM;
69024 +
69025 + /* Stack limit test */
69026 ++ gr_learn_resource(current, RLIMIT_STACK, size, 1);
69027 + if (size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur))
69028 + return -ENOMEM;
69029 +
69030 +@@ -1664,6 +1913,7 @@ static int acct_stack_growth(struct vm_a
69031 + locked = mm->locked_vm + grow;
69032 + limit = ACCESS_ONCE(rlim[RLIMIT_MEMLOCK].rlim_cur);
69033 + limit >>= PAGE_SHIFT;
69034 ++ gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
69035 + if (locked > limit && !capable(CAP_IPC_LOCK))
69036 + return -ENOMEM;
69037 + }
69038 +@@ -1694,37 +1944,48 @@ static int acct_stack_growth(struct vm_a
69039 + * PA-RISC uses this for its stack; IA64 for its Register Backing Store.
69040 + * vma is the last one with address > vma->vm_end. Have to extend vma.
69041 + */
69042 ++#ifndef CONFIG_IA64
69043 ++static
69044 ++#endif
69045 + int expand_upwards(struct vm_area_struct *vma, unsigned long address)
69046 + {
69047 + int error;
69048 ++ bool locknext;
69049 +
69050 + if (!(vma->vm_flags & VM_GROWSUP))
69051 + return -EFAULT;
69052 +
69053 ++ /* Also guard against wrapping around to address 0. */
69054 ++ if (address < PAGE_ALIGN(address+1))
69055 ++ address = PAGE_ALIGN(address+1);
69056 ++ else
69057 ++ return -ENOMEM;
69058 ++
69059 + /*
69060 + * We must make sure the anon_vma is allocated
69061 + * so that the anon_vma locking is not a noop.
69062 + */
69063 + if (unlikely(anon_vma_prepare(vma)))
69064 + return -ENOMEM;
69065 ++ locknext = vma->vm_next && (vma->vm_next->vm_flags & VM_GROWSDOWN);
69066 ++ if (locknext && anon_vma_prepare(vma->vm_next))
69067 ++ return -ENOMEM;
69068 + vma_lock_anon_vma(vma);
69069 ++ if (locknext)
69070 ++ vma_lock_anon_vma(vma->vm_next);
69071 +
69072 + /*
69073 + * vma->vm_start/vm_end cannot change under us because the caller
69074 + * is required to hold the mmap_sem in read mode. We need the
69075 +- * anon_vma lock to serialize against concurrent expand_stacks.
69076 +- * Also guard against wrapping around to address 0.
69077 ++ * anon_vma locks to serialize against concurrent expand_stacks
69078 ++ * and expand_upwards.
69079 + */
69080 +- if (address < PAGE_ALIGN(address+4))
69081 +- address = PAGE_ALIGN(address+4);
69082 +- else {
69083 +- vma_unlock_anon_vma(vma);
69084 +- return -ENOMEM;
69085 +- }
69086 + error = 0;
69087 +
69088 + /* Somebody else might have raced and expanded it already */
69089 +- if (address > vma->vm_end) {
69090 ++ if (vma->vm_next && (vma->vm_next->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && vma->vm_next->vm_start - address < sysctl_heap_stack_gap)
69091 ++ error = -ENOMEM;
69092 ++ else if (address > vma->vm_end && (!locknext || vma->vm_next->vm_start >= address)) {
69093 + unsigned long size, grow;
69094 +
69095 + size = address - vma->vm_start;
69096 +@@ -1739,6 +2000,8 @@ int expand_upwards(struct vm_area_struct
69097 + }
69098 + }
69099 + }
69100 ++ if (locknext)
69101 ++ vma_unlock_anon_vma(vma->vm_next);
69102 + vma_unlock_anon_vma(vma);
69103 + khugepaged_enter_vma_merge(vma);
69104 + return error;
69105 +@@ -1752,6 +2015,8 @@ int expand_downwards(struct vm_area_stru
69106 + unsigned long address)
69107 + {
69108 + int error;
69109 ++ bool lockprev = false;
69110 ++ struct vm_area_struct *prev;
69111 +
69112 + /*
69113 + * We must make sure the anon_vma is allocated
69114 +@@ -1765,6 +2030,15 @@ int expand_downwards(struct vm_area_stru
69115 + if (error)
69116 + return error;
69117 +
69118 ++ prev = vma->vm_prev;
69119 ++#if defined(CONFIG_STACK_GROWSUP) || defined(CONFIG_IA64)
69120 ++ lockprev = prev && (prev->vm_flags & VM_GROWSUP);
69121 ++#endif
69122 ++ if (lockprev && anon_vma_prepare(prev))
69123 ++ return -ENOMEM;
69124 ++ if (lockprev)
69125 ++ vma_lock_anon_vma(prev);
69126 ++
69127 + vma_lock_anon_vma(vma);
69128 +
69129 + /*
69130 +@@ -1774,9 +2048,17 @@ int expand_downwards(struct vm_area_stru
69131 + */
69132 +
69133 + /* Somebody else might have raced and expanded it already */
69134 +- if (address < vma->vm_start) {
69135 ++ if (prev && (prev->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && address - prev->vm_end < sysctl_heap_stack_gap)
69136 ++ error = -ENOMEM;
69137 ++ else if (address < vma->vm_start && (!lockprev || prev->vm_end <= address)) {
69138 + unsigned long size, grow;
69139 +
69140 ++#ifdef CONFIG_PAX_SEGMEXEC
69141 ++ struct vm_area_struct *vma_m;
69142 ++
69143 ++ vma_m = pax_find_mirror_vma(vma);
69144 ++#endif
69145 ++
69146 + size = vma->vm_end - address;
69147 + grow = (vma->vm_start - address) >> PAGE_SHIFT;
69148 +
69149 +@@ -1786,11 +2068,22 @@ int expand_downwards(struct vm_area_stru
69150 + if (!error) {
69151 + vma->vm_start = address;
69152 + vma->vm_pgoff -= grow;
69153 ++ track_exec_limit(vma->vm_mm, vma->vm_start, vma->vm_end, vma->vm_flags);
69154 ++
69155 ++#ifdef CONFIG_PAX_SEGMEXEC
69156 ++ if (vma_m) {
69157 ++ vma_m->vm_start -= grow << PAGE_SHIFT;
69158 ++ vma_m->vm_pgoff -= grow;
69159 ++ }
69160 ++#endif
69161 ++
69162 + perf_event_mmap(vma);
69163 + }
69164 + }
69165 + }
69166 + vma_unlock_anon_vma(vma);
69167 ++ if (lockprev)
69168 ++ vma_unlock_anon_vma(prev);
69169 + khugepaged_enter_vma_merge(vma);
69170 + return error;
69171 + }
69172 +@@ -1860,6 +2153,13 @@ static void remove_vma_list(struct mm_st
69173 + do {
69174 + long nrpages = vma_pages(vma);
69175 +
69176 ++#ifdef CONFIG_PAX_SEGMEXEC
69177 ++ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE)) {
69178 ++ vma = remove_vma(vma);
69179 ++ continue;
69180 ++ }
69181 ++#endif
69182 ++
69183 + mm->total_vm -= nrpages;
69184 + vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages);
69185 + vma = remove_vma(vma);
69186 +@@ -1905,6 +2205,16 @@ detach_vmas_to_be_unmapped(struct mm_str
69187 + insertion_point = (prev ? &prev->vm_next : &mm->mmap);
69188 + vma->vm_prev = NULL;
69189 + do {
69190 ++
69191 ++#ifdef CONFIG_PAX_SEGMEXEC
69192 ++ if (vma->vm_mirror) {
69193 ++ BUG_ON(!vma->vm_mirror->vm_mirror || vma->vm_mirror->vm_mirror != vma);
69194 ++ vma->vm_mirror->vm_mirror = NULL;
69195 ++ vma->vm_mirror->vm_flags &= ~VM_EXEC;
69196 ++ vma->vm_mirror = NULL;
69197 ++ }
69198 ++#endif
69199 ++
69200 + rb_erase(&vma->vm_rb, &mm->mm_rb);
69201 + mm->map_count--;
69202 + tail_vma = vma;
69203 +@@ -1933,14 +2243,33 @@ static int __split_vma(struct mm_struct
69204 + struct vm_area_struct *new;
69205 + int err = -ENOMEM;
69206 +
69207 ++#ifdef CONFIG_PAX_SEGMEXEC
69208 ++ struct vm_area_struct *vma_m, *new_m = NULL;
69209 ++ unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE;
69210 ++#endif
69211 ++
69212 + if (is_vm_hugetlb_page(vma) && (addr &
69213 + ~(huge_page_mask(hstate_vma(vma)))))
69214 + return -EINVAL;
69215 +
69216 ++#ifdef CONFIG_PAX_SEGMEXEC
69217 ++ vma_m = pax_find_mirror_vma(vma);
69218 ++#endif
69219 ++
69220 + new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
69221 + if (!new)
69222 + goto out_err;
69223 +
69224 ++#ifdef CONFIG_PAX_SEGMEXEC
69225 ++ if (vma_m) {
69226 ++ new_m = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
69227 ++ if (!new_m) {
69228 ++ kmem_cache_free(vm_area_cachep, new);
69229 ++ goto out_err;
69230 ++ }
69231 ++ }
69232 ++#endif
69233 ++
69234 + /* most fields are the same, copy all, and then fixup */
69235 + *new = *vma;
69236 +
69237 +@@ -1953,6 +2282,22 @@ static int __split_vma(struct mm_struct
69238 + new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
69239 + }
69240 +
69241 ++#ifdef CONFIG_PAX_SEGMEXEC
69242 ++ if (vma_m) {
69243 ++ *new_m = *vma_m;
69244 ++ INIT_LIST_HEAD(&new_m->anon_vma_chain);
69245 ++ new_m->vm_mirror = new;
69246 ++ new->vm_mirror = new_m;
69247 ++
69248 ++ if (new_below)
69249 ++ new_m->vm_end = addr_m;
69250 ++ else {
69251 ++ new_m->vm_start = addr_m;
69252 ++ new_m->vm_pgoff += ((addr_m - vma_m->vm_start) >> PAGE_SHIFT);
69253 ++ }
69254 ++ }
69255 ++#endif
69256 ++
69257 + pol = mpol_dup(vma_policy(vma));
69258 + if (IS_ERR(pol)) {
69259 + err = PTR_ERR(pol);
69260 +@@ -1978,6 +2323,42 @@ static int __split_vma(struct mm_struct
69261 + else
69262 + err = vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new);
69263 +
69264 ++#ifdef CONFIG_PAX_SEGMEXEC
69265 ++ if (!err && vma_m) {
69266 ++ if (anon_vma_clone(new_m, vma_m))
69267 ++ goto out_free_mpol;
69268 ++
69269 ++ mpol_get(pol);
69270 ++ vma_set_policy(new_m, pol);
69271 ++
69272 ++ if (new_m->vm_file) {
69273 ++ get_file(new_m->vm_file);
69274 ++ if (vma_m->vm_flags & VM_EXECUTABLE)
69275 ++ added_exe_file_vma(mm);
69276 ++ }
69277 ++
69278 ++ if (new_m->vm_ops && new_m->vm_ops->open)
69279 ++ new_m->vm_ops->open(new_m);
69280 ++
69281 ++ if (new_below)
69282 ++ err = vma_adjust(vma_m, addr_m, vma_m->vm_end, vma_m->vm_pgoff +
69283 ++ ((addr_m - new_m->vm_start) >> PAGE_SHIFT), new_m);
69284 ++ else
69285 ++ err = vma_adjust(vma_m, vma_m->vm_start, addr_m, vma_m->vm_pgoff, new_m);
69286 ++
69287 ++ if (err) {
69288 ++ if (new_m->vm_ops && new_m->vm_ops->close)
69289 ++ new_m->vm_ops->close(new_m);
69290 ++ if (new_m->vm_file) {
69291 ++ if (vma_m->vm_flags & VM_EXECUTABLE)
69292 ++ removed_exe_file_vma(mm);
69293 ++ fput(new_m->vm_file);
69294 ++ }
69295 ++ mpol_put(pol);
69296 ++ }
69297 ++ }
69298 ++#endif
69299 ++
69300 + /* Success. */
69301 + if (!err)
69302 + return 0;
69303 +@@ -1990,10 +2371,18 @@ static int __split_vma(struct mm_struct
69304 + removed_exe_file_vma(mm);
69305 + fput(new->vm_file);
69306 + }
69307 +- unlink_anon_vmas(new);
69308 + out_free_mpol:
69309 + mpol_put(pol);
69310 + out_free_vma:
69311 ++
69312 ++#ifdef CONFIG_PAX_SEGMEXEC
69313 ++ if (new_m) {
69314 ++ unlink_anon_vmas(new_m);
69315 ++ kmem_cache_free(vm_area_cachep, new_m);
69316 ++ }
69317 ++#endif
69318 ++
69319 ++ unlink_anon_vmas(new);
69320 + kmem_cache_free(vm_area_cachep, new);
69321 + out_err:
69322 + return err;
69323 +@@ -2006,6 +2395,15 @@ static int __split_vma(struct mm_struct
69324 + int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
69325 + unsigned long addr, int new_below)
69326 + {
69327 ++
69328 ++#ifdef CONFIG_PAX_SEGMEXEC
69329 ++ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
69330 ++ BUG_ON(vma->vm_end > SEGMEXEC_TASK_SIZE);
69331 ++ if (mm->map_count >= sysctl_max_map_count-1)
69332 ++ return -ENOMEM;
69333 ++ } else
69334 ++#endif
69335 ++
69336 + if (mm->map_count >= sysctl_max_map_count)
69337 + return -ENOMEM;
69338 +
69339 +@@ -2017,11 +2415,30 @@ int split_vma(struct mm_struct *mm, stru
69340 + * work. This now handles partial unmappings.
69341 + * Jeremy Fitzhardinge <jeremy@××××.org>
69342 + */
69343 ++#ifdef CONFIG_PAX_SEGMEXEC
69344 + int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
69345 + {
69346 ++ int ret = __do_munmap(mm, start, len);
69347 ++ if (ret || !(mm->pax_flags & MF_PAX_SEGMEXEC))
69348 ++ return ret;
69349 ++
69350 ++ return __do_munmap(mm, start + SEGMEXEC_TASK_SIZE, len);
69351 ++}
69352 ++
69353 ++int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
69354 ++#else
69355 ++int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
69356 ++#endif
69357 ++{
69358 + unsigned long end;
69359 + struct vm_area_struct *vma, *prev, *last;
69360 +
69361 ++ /*
69362 ++ * mm->mmap_sem is required to protect against another thread
69363 ++ * changing the mappings in case we sleep.
69364 ++ */
69365 ++ verify_mm_writelocked(mm);
69366 ++
69367 + if ((start & ~PAGE_MASK) || start > TASK_SIZE || len > TASK_SIZE-start)
69368 + return -EINVAL;
69369 +
69370 +@@ -2096,6 +2513,8 @@ int do_munmap(struct mm_struct *mm, unsi
69371 + /* Fix up all other VM information */
69372 + remove_vma_list(mm, vma);
69373 +
69374 ++ track_exec_limit(mm, start, end, 0UL);
69375 ++
69376 + return 0;
69377 + }
69378 +
69379 +@@ -2108,22 +2527,18 @@ SYSCALL_DEFINE2(munmap, unsigned long, a
69380 +
69381 + profile_munmap(addr);
69382 +
69383 ++#ifdef CONFIG_PAX_SEGMEXEC
69384 ++ if ((mm->pax_flags & MF_PAX_SEGMEXEC) &&
69385 ++ (len > SEGMEXEC_TASK_SIZE || addr > SEGMEXEC_TASK_SIZE-len))
69386 ++ return -EINVAL;
69387 ++#endif
69388 ++
69389 + down_write(&mm->mmap_sem);
69390 + ret = do_munmap(mm, addr, len);
69391 + up_write(&mm->mmap_sem);
69392 + return ret;
69393 + }
69394 +
69395 +-static inline void verify_mm_writelocked(struct mm_struct *mm)
69396 +-{
69397 +-#ifdef CONFIG_DEBUG_VM
69398 +- if (unlikely(down_read_trylock(&mm->mmap_sem))) {
69399 +- WARN_ON(1);
69400 +- up_read(&mm->mmap_sem);
69401 +- }
69402 +-#endif
69403 +-}
69404 +-
69405 + /*
69406 + * this is really a simplified "do_mmap". it only handles
69407 + * anonymous maps. eventually we may be able to do some
69408 +@@ -2137,6 +2552,7 @@ unsigned long do_brk(unsigned long addr,
69409 + struct rb_node ** rb_link, * rb_parent;
69410 + pgoff_t pgoff = addr >> PAGE_SHIFT;
69411 + int error;
69412 ++ unsigned long charged;
69413 +
69414 + len = PAGE_ALIGN(len);
69415 + if (!len)
69416 +@@ -2148,16 +2564,30 @@ unsigned long do_brk(unsigned long addr,
69417 +
69418 + flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
69419 +
69420 ++#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
69421 ++ if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
69422 ++ flags &= ~VM_EXEC;
69423 ++
69424 ++#ifdef CONFIG_PAX_MPROTECT
69425 ++ if (mm->pax_flags & MF_PAX_MPROTECT)
69426 ++ flags &= ~VM_MAYEXEC;
69427 ++#endif
69428 ++
69429 ++ }
69430 ++#endif
69431 ++
69432 + error = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED);
69433 + if (error & ~PAGE_MASK)
69434 + return error;
69435 +
69436 ++ charged = len >> PAGE_SHIFT;
69437 ++
69438 + /*
69439 + * mlock MCL_FUTURE?
69440 + */
69441 + if (mm->def_flags & VM_LOCKED) {
69442 + unsigned long locked, lock_limit;
69443 +- locked = len >> PAGE_SHIFT;
69444 ++ locked = charged;
69445 + locked += mm->locked_vm;
69446 + lock_limit = rlimit(RLIMIT_MEMLOCK);
69447 + lock_limit >>= PAGE_SHIFT;
69448 +@@ -2174,22 +2604,22 @@ unsigned long do_brk(unsigned long addr,
69449 + /*
69450 + * Clear old maps. this also does some error checking for us
69451 + */
69452 +- munmap_back:
69453 + vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
69454 + if (vma && vma->vm_start < addr + len) {
69455 + if (do_munmap(mm, addr, len))
69456 + return -ENOMEM;
69457 +- goto munmap_back;
69458 ++ vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
69459 ++ BUG_ON(vma && vma->vm_start < addr + len);
69460 + }
69461 +
69462 + /* Check against address space limits *after* clearing old maps... */
69463 +- if (!may_expand_vm(mm, len >> PAGE_SHIFT))
69464 ++ if (!may_expand_vm(mm, charged))
69465 + return -ENOMEM;
69466 +
69467 + if (mm->map_count > sysctl_max_map_count)
69468 + return -ENOMEM;
69469 +
69470 +- if (security_vm_enough_memory(len >> PAGE_SHIFT))
69471 ++ if (security_vm_enough_memory(charged))
69472 + return -ENOMEM;
69473 +
69474 + /* Can we just expand an old private anonymous mapping? */
69475 +@@ -2203,7 +2633,7 @@ unsigned long do_brk(unsigned long addr,
69476 + */
69477 + vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
69478 + if (!vma) {
69479 +- vm_unacct_memory(len >> PAGE_SHIFT);
69480 ++ vm_unacct_memory(charged);
69481 + return -ENOMEM;
69482 + }
69483 +
69484 +@@ -2217,11 +2647,12 @@ unsigned long do_brk(unsigned long addr,
69485 + vma_link(mm, vma, prev, rb_link, rb_parent);
69486 + out:
69487 + perf_event_mmap(vma);
69488 +- mm->total_vm += len >> PAGE_SHIFT;
69489 ++ mm->total_vm += charged;
69490 + if (flags & VM_LOCKED) {
69491 + if (!mlock_vma_pages_range(vma, addr, addr + len))
69492 +- mm->locked_vm += (len >> PAGE_SHIFT);
69493 ++ mm->locked_vm += charged;
69494 + }
69495 ++ track_exec_limit(mm, addr, addr + len, flags);
69496 + return addr;
69497 + }
69498 +
69499 +@@ -2268,8 +2699,10 @@ void exit_mmap(struct mm_struct *mm)
69500 + * Walk the list again, actually closing and freeing it,
69501 + * with preemption enabled, without holding any MM locks.
69502 + */
69503 +- while (vma)
69504 ++ while (vma) {
69505 ++ vma->vm_mirror = NULL;
69506 + vma = remove_vma(vma);
69507 ++ }
69508 +
69509 + BUG_ON(mm->nr_ptes > (FIRST_USER_ADDRESS+PMD_SIZE-1)>>PMD_SHIFT);
69510 + }
69511 +@@ -2283,6 +2716,13 @@ int insert_vm_struct(struct mm_struct *
69512 + struct vm_area_struct * __vma, * prev;
69513 + struct rb_node ** rb_link, * rb_parent;
69514 +
69515 ++#ifdef CONFIG_PAX_SEGMEXEC
69516 ++ struct vm_area_struct *vma_m = NULL;
69517 ++#endif
69518 ++
69519 ++ if (security_file_mmap(NULL, 0, 0, 0, vma->vm_start, 1))
69520 ++ return -EPERM;
69521 ++
69522 + /*
69523 + * The vm_pgoff of a purely anonymous vma should be irrelevant
69524 + * until its first write fault, when page's anon_vma and index
69525 +@@ -2305,7 +2745,22 @@ int insert_vm_struct(struct mm_struct *
69526 + if ((vma->vm_flags & VM_ACCOUNT) &&
69527 + security_vm_enough_memory_mm(mm, vma_pages(vma)))
69528 + return -ENOMEM;
69529 ++
69530 ++#ifdef CONFIG_PAX_SEGMEXEC
69531 ++ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_EXEC)) {
69532 ++ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
69533 ++ if (!vma_m)
69534 ++ return -ENOMEM;
69535 ++ }
69536 ++#endif
69537 ++
69538 + vma_link(mm, vma, prev, rb_link, rb_parent);
69539 ++
69540 ++#ifdef CONFIG_PAX_SEGMEXEC
69541 ++ if (vma_m)
69542 ++ BUG_ON(pax_mirror_vma(vma_m, vma));
69543 ++#endif
69544 ++
69545 + return 0;
69546 + }
69547 +
69548 +@@ -2323,6 +2778,8 @@ struct vm_area_struct *copy_vma(struct v
69549 + struct rb_node **rb_link, *rb_parent;
69550 + struct mempolicy *pol;
69551 +
69552 ++ BUG_ON(vma->vm_mirror);
69553 ++
69554 + /*
69555 + * If anonymous vma has not yet been faulted, update new pgoff
69556 + * to match new location, to increase its chance of merging.
69557 +@@ -2373,6 +2830,39 @@ struct vm_area_struct *copy_vma(struct v
69558 + return NULL;
69559 + }
69560 +
69561 ++#ifdef CONFIG_PAX_SEGMEXEC
69562 ++long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma)
69563 ++{
69564 ++ struct vm_area_struct *prev_m;
69565 ++ struct rb_node **rb_link_m, *rb_parent_m;
69566 ++ struct mempolicy *pol_m;
69567 ++
69568 ++ BUG_ON(!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC));
69569 ++ BUG_ON(vma->vm_mirror || vma_m->vm_mirror);
69570 ++ BUG_ON(!mpol_equal(vma_policy(vma), vma_policy(vma_m)));
69571 ++ *vma_m = *vma;
69572 ++ INIT_LIST_HEAD(&vma_m->anon_vma_chain);
69573 ++ if (anon_vma_clone(vma_m, vma))
69574 ++ return -ENOMEM;
69575 ++ pol_m = vma_policy(vma_m);
69576 ++ mpol_get(pol_m);
69577 ++ vma_set_policy(vma_m, pol_m);
69578 ++ vma_m->vm_start += SEGMEXEC_TASK_SIZE;
69579 ++ vma_m->vm_end += SEGMEXEC_TASK_SIZE;
69580 ++ vma_m->vm_flags &= ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED);
69581 ++ vma_m->vm_page_prot = vm_get_page_prot(vma_m->vm_flags);
69582 ++ if (vma_m->vm_file)
69583 ++ get_file(vma_m->vm_file);
69584 ++ if (vma_m->vm_ops && vma_m->vm_ops->open)
69585 ++ vma_m->vm_ops->open(vma_m);
69586 ++ find_vma_prepare(vma->vm_mm, vma_m->vm_start, &prev_m, &rb_link_m, &rb_parent_m);
69587 ++ vma_link(vma->vm_mm, vma_m, prev_m, rb_link_m, rb_parent_m);
69588 ++ vma_m->vm_mirror = vma;
69589 ++ vma->vm_mirror = vma_m;
69590 ++ return 0;
69591 ++}
69592 ++#endif
69593 ++
69594 + /*
69595 + * Return true if the calling process may expand its vm space by the passed
69596 + * number of pages
69597 +@@ -2383,7 +2873,7 @@ int may_expand_vm(struct mm_struct *mm,
69598 + unsigned long lim;
69599 +
69600 + lim = rlimit(RLIMIT_AS) >> PAGE_SHIFT;
69601 +-
69602 ++ gr_learn_resource(current, RLIMIT_AS, (cur + npages) << PAGE_SHIFT, 1);
69603 + if (cur + npages > lim)
69604 + return 0;
69605 + return 1;
69606 +@@ -2454,6 +2944,22 @@ int install_special_mapping(struct mm_st
69607 + vma->vm_start = addr;
69608 + vma->vm_end = addr + len;
69609 +
69610 ++#ifdef CONFIG_PAX_MPROTECT
69611 ++ if (mm->pax_flags & MF_PAX_MPROTECT) {
69612 ++#ifndef CONFIG_PAX_MPROTECT_COMPAT
69613 ++ if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC))
69614 ++ return -EPERM;
69615 ++ if (!(vm_flags & VM_EXEC))
69616 ++ vm_flags &= ~VM_MAYEXEC;
69617 ++#else
69618 ++ if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
69619 ++ vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
69620 ++#endif
69621 ++ else
69622 ++ vm_flags &= ~VM_MAYWRITE;
69623 ++ }
69624 ++#endif
69625 ++
69626 + vma->vm_flags = vm_flags | mm->def_flags | VM_DONTEXPAND;
69627 + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
69628 +
69629 +diff -urNp linux-3.1.1/mm/mprotect.c linux-3.1.1/mm/mprotect.c
69630 +--- linux-3.1.1/mm/mprotect.c 2011-11-11 15:19:27.000000000 -0500
69631 ++++ linux-3.1.1/mm/mprotect.c 2011-11-16 18:40:44.000000000 -0500
69632 +@@ -23,10 +23,16 @@
69633 + #include <linux/mmu_notifier.h>
69634 + #include <linux/migrate.h>
69635 + #include <linux/perf_event.h>
69636 ++
69637 ++#ifdef CONFIG_PAX_MPROTECT
69638 ++#include <linux/elf.h>
69639 ++#endif
69640 ++
69641 + #include <asm/uaccess.h>
69642 + #include <asm/pgtable.h>
69643 + #include <asm/cacheflush.h>
69644 + #include <asm/tlbflush.h>
69645 ++#include <asm/mmu_context.h>
69646 +
69647 + #ifndef pgprot_modify
69648 + static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
69649 +@@ -141,6 +147,48 @@ static void change_protection(struct vm_
69650 + flush_tlb_range(vma, start, end);
69651 + }
69652 +
69653 ++#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
69654 ++/* called while holding the mmap semaphor for writing except stack expansion */
69655 ++void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot)
69656 ++{
69657 ++ unsigned long oldlimit, newlimit = 0UL;
69658 ++
69659 ++ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || (__supported_pte_mask & _PAGE_NX))
69660 ++ return;
69661 ++
69662 ++ spin_lock(&mm->page_table_lock);
69663 ++ oldlimit = mm->context.user_cs_limit;
69664 ++ if ((prot & VM_EXEC) && oldlimit < end)
69665 ++ /* USER_CS limit moved up */
69666 ++ newlimit = end;
69667 ++ else if (!(prot & VM_EXEC) && start < oldlimit && oldlimit <= end)
69668 ++ /* USER_CS limit moved down */
69669 ++ newlimit = start;
69670 ++
69671 ++ if (newlimit) {
69672 ++ mm->context.user_cs_limit = newlimit;
69673 ++
69674 ++#ifdef CONFIG_SMP
69675 ++ wmb();
69676 ++ cpus_clear(mm->context.cpu_user_cs_mask);
69677 ++ cpu_set(smp_processor_id(), mm->context.cpu_user_cs_mask);
69678 ++#endif
69679 ++
69680 ++ set_user_cs(mm->context.user_cs_base, mm->context.user_cs_limit, smp_processor_id());
69681 ++ }
69682 ++ spin_unlock(&mm->page_table_lock);
69683 ++ if (newlimit == end) {
69684 ++ struct vm_area_struct *vma = find_vma(mm, oldlimit);
69685 ++
69686 ++ for (; vma && vma->vm_start < end; vma = vma->vm_next)
69687 ++ if (is_vm_hugetlb_page(vma))
69688 ++ hugetlb_change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot);
69689 ++ else
69690 ++ change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot, vma_wants_writenotify(vma));
69691 ++ }
69692 ++}
69693 ++#endif
69694 ++
69695 + int
69696 + mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
69697 + unsigned long start, unsigned long end, unsigned long newflags)
69698 +@@ -153,11 +201,29 @@ mprotect_fixup(struct vm_area_struct *vm
69699 + int error;
69700 + int dirty_accountable = 0;
69701 +
69702 ++#ifdef CONFIG_PAX_SEGMEXEC
69703 ++ struct vm_area_struct *vma_m = NULL;
69704 ++ unsigned long start_m, end_m;
69705 ++
69706 ++ start_m = start + SEGMEXEC_TASK_SIZE;
69707 ++ end_m = end + SEGMEXEC_TASK_SIZE;
69708 ++#endif
69709 ++
69710 + if (newflags == oldflags) {
69711 + *pprev = vma;
69712 + return 0;
69713 + }
69714 +
69715 ++ if (newflags & (VM_READ | VM_WRITE | VM_EXEC)) {
69716 ++ struct vm_area_struct *prev = vma->vm_prev, *next = vma->vm_next;
69717 ++
69718 ++ if (next && (next->vm_flags & VM_GROWSDOWN) && sysctl_heap_stack_gap > next->vm_start - end)
69719 ++ return -ENOMEM;
69720 ++
69721 ++ if (prev && (prev->vm_flags & VM_GROWSUP) && sysctl_heap_stack_gap > start - prev->vm_end)
69722 ++ return -ENOMEM;
69723 ++ }
69724 ++
69725 + /*
69726 + * If we make a private mapping writable we increase our commit;
69727 + * but (without finer accounting) cannot reduce our commit if we
69728 +@@ -174,6 +240,42 @@ mprotect_fixup(struct vm_area_struct *vm
69729 + }
69730 + }
69731 +
69732 ++#ifdef CONFIG_PAX_SEGMEXEC
69733 ++ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && ((oldflags ^ newflags) & VM_EXEC)) {
69734 ++ if (start != vma->vm_start) {
69735 ++ error = split_vma(mm, vma, start, 1);
69736 ++ if (error)
69737 ++ goto fail;
69738 ++ BUG_ON(!*pprev || (*pprev)->vm_next == vma);
69739 ++ *pprev = (*pprev)->vm_next;
69740 ++ }
69741 ++
69742 ++ if (end != vma->vm_end) {
69743 ++ error = split_vma(mm, vma, end, 0);
69744 ++ if (error)
69745 ++ goto fail;
69746 ++ }
69747 ++
69748 ++ if (pax_find_mirror_vma(vma)) {
69749 ++ error = __do_munmap(mm, start_m, end_m - start_m);
69750 ++ if (error)
69751 ++ goto fail;
69752 ++ } else {
69753 ++ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
69754 ++ if (!vma_m) {
69755 ++ error = -ENOMEM;
69756 ++ goto fail;
69757 ++ }
69758 ++ vma->vm_flags = newflags;
69759 ++ error = pax_mirror_vma(vma_m, vma);
69760 ++ if (error) {
69761 ++ vma->vm_flags = oldflags;
69762 ++ goto fail;
69763 ++ }
69764 ++ }
69765 ++ }
69766 ++#endif
69767 ++
69768 + /*
69769 + * First try to merge with previous and/or next vma.
69770 + */
69771 +@@ -204,9 +306,21 @@ success:
69772 + * vm_flags and vm_page_prot are protected by the mmap_sem
69773 + * held in write mode.
69774 + */
69775 ++
69776 ++#ifdef CONFIG_PAX_SEGMEXEC
69777 ++ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (newflags & VM_EXEC) && ((vma->vm_flags ^ newflags) & VM_READ))
69778 ++ pax_find_mirror_vma(vma)->vm_flags ^= VM_READ;
69779 ++#endif
69780 ++
69781 + vma->vm_flags = newflags;
69782 ++
69783 ++#ifdef CONFIG_PAX_MPROTECT
69784 ++ if (mm->binfmt && mm->binfmt->handle_mprotect)
69785 ++ mm->binfmt->handle_mprotect(vma, newflags);
69786 ++#endif
69787 ++
69788 + vma->vm_page_prot = pgprot_modify(vma->vm_page_prot,
69789 +- vm_get_page_prot(newflags));
69790 ++ vm_get_page_prot(vma->vm_flags));
69791 +
69792 + if (vma_wants_writenotify(vma)) {
69793 + vma->vm_page_prot = vm_get_page_prot(newflags & ~VM_SHARED);
69794 +@@ -248,6 +362,17 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
69795 + end = start + len;
69796 + if (end <= start)
69797 + return -ENOMEM;
69798 ++
69799 ++#ifdef CONFIG_PAX_SEGMEXEC
69800 ++ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
69801 ++ if (end > SEGMEXEC_TASK_SIZE)
69802 ++ return -EINVAL;
69803 ++ } else
69804 ++#endif
69805 ++
69806 ++ if (end > TASK_SIZE)
69807 ++ return -EINVAL;
69808 ++
69809 + if (!arch_validate_prot(prot))
69810 + return -EINVAL;
69811 +
69812 +@@ -255,7 +380,7 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
69813 + /*
69814 + * Does the application expect PROT_READ to imply PROT_EXEC:
69815 + */
69816 +- if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
69817 ++ if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
69818 + prot |= PROT_EXEC;
69819 +
69820 + vm_flags = calc_vm_prot_bits(prot);
69821 +@@ -287,6 +412,11 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
69822 + if (start > vma->vm_start)
69823 + prev = vma;
69824 +
69825 ++#ifdef CONFIG_PAX_MPROTECT
69826 ++ if (current->mm->binfmt && current->mm->binfmt->handle_mprotect)
69827 ++ current->mm->binfmt->handle_mprotect(vma, vm_flags);
69828 ++#endif
69829 ++
69830 + for (nstart = start ; ; ) {
69831 + unsigned long newflags;
69832 +
69833 +@@ -296,6 +426,14 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
69834 +
69835 + /* newflags >> 4 shift VM_MAY% in place of VM_% */
69836 + if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) {
69837 ++ if (prot & (PROT_WRITE | PROT_EXEC))
69838 ++ gr_log_rwxmprotect(vma->vm_file);
69839 ++
69840 ++ error = -EACCES;
69841 ++ goto out;
69842 ++ }
69843 ++
69844 ++ if (!gr_acl_handle_mprotect(vma->vm_file, prot)) {
69845 + error = -EACCES;
69846 + goto out;
69847 + }
69848 +@@ -310,6 +448,9 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
69849 + error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
69850 + if (error)
69851 + goto out;
69852 ++
69853 ++ track_exec_limit(current->mm, nstart, tmp, vm_flags);
69854 ++
69855 + nstart = tmp;
69856 +
69857 + if (nstart < prev->vm_end)
69858 +diff -urNp linux-3.1.1/mm/mremap.c linux-3.1.1/mm/mremap.c
69859 +--- linux-3.1.1/mm/mremap.c 2011-11-11 15:19:27.000000000 -0500
69860 ++++ linux-3.1.1/mm/mremap.c 2011-11-16 18:39:08.000000000 -0500
69861 +@@ -113,6 +113,12 @@ static void move_ptes(struct vm_area_str
69862 + continue;
69863 + pte = ptep_clear_flush(vma, old_addr, old_pte);
69864 + pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr);
69865 ++
69866 ++#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
69867 ++ if (!(__supported_pte_mask & _PAGE_NX) && (new_vma->vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC)
69868 ++ pte = pte_exprotect(pte);
69869 ++#endif
69870 ++
69871 + set_pte_at(mm, new_addr, new_pte, pte);
69872 + }
69873 +
69874 +@@ -272,6 +278,11 @@ static struct vm_area_struct *vma_to_res
69875 + if (is_vm_hugetlb_page(vma))
69876 + goto Einval;
69877 +
69878 ++#ifdef CONFIG_PAX_SEGMEXEC
69879 ++ if (pax_find_mirror_vma(vma))
69880 ++ goto Einval;
69881 ++#endif
69882 ++
69883 + /* We can't remap across vm area boundaries */
69884 + if (old_len > vma->vm_end - addr)
69885 + goto Efault;
69886 +@@ -328,20 +339,25 @@ static unsigned long mremap_to(unsigned
69887 + unsigned long ret = -EINVAL;
69888 + unsigned long charged = 0;
69889 + unsigned long map_flags;
69890 ++ unsigned long pax_task_size = TASK_SIZE;
69891 +
69892 + if (new_addr & ~PAGE_MASK)
69893 + goto out;
69894 +
69895 +- if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len)
69896 ++#ifdef CONFIG_PAX_SEGMEXEC
69897 ++ if (mm->pax_flags & MF_PAX_SEGMEXEC)
69898 ++ pax_task_size = SEGMEXEC_TASK_SIZE;
69899 ++#endif
69900 ++
69901 ++ pax_task_size -= PAGE_SIZE;
69902 ++
69903 ++ if (new_len > TASK_SIZE || new_addr > pax_task_size - new_len)
69904 + goto out;
69905 +
69906 + /* Check if the location we're moving into overlaps the
69907 + * old location at all, and fail if it does.
69908 + */
69909 +- if ((new_addr <= addr) && (new_addr+new_len) > addr)
69910 +- goto out;
69911 +-
69912 +- if ((addr <= new_addr) && (addr+old_len) > new_addr)
69913 ++ if (addr + old_len > new_addr && new_addr + new_len > addr)
69914 + goto out;
69915 +
69916 + ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
69917 +@@ -413,6 +429,7 @@ unsigned long do_mremap(unsigned long ad
69918 + struct vm_area_struct *vma;
69919 + unsigned long ret = -EINVAL;
69920 + unsigned long charged = 0;
69921 ++ unsigned long pax_task_size = TASK_SIZE;
69922 +
69923 + if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE))
69924 + goto out;
69925 +@@ -431,6 +448,17 @@ unsigned long do_mremap(unsigned long ad
69926 + if (!new_len)
69927 + goto out;
69928 +
69929 ++#ifdef CONFIG_PAX_SEGMEXEC
69930 ++ if (mm->pax_flags & MF_PAX_SEGMEXEC)
69931 ++ pax_task_size = SEGMEXEC_TASK_SIZE;
69932 ++#endif
69933 ++
69934 ++ pax_task_size -= PAGE_SIZE;
69935 ++
69936 ++ if (new_len > pax_task_size || addr > pax_task_size-new_len ||
69937 ++ old_len > pax_task_size || addr > pax_task_size-old_len)
69938 ++ goto out;
69939 ++
69940 + if (flags & MREMAP_FIXED) {
69941 + if (flags & MREMAP_MAYMOVE)
69942 + ret = mremap_to(addr, old_len, new_addr, new_len);
69943 +@@ -480,6 +508,7 @@ unsigned long do_mremap(unsigned long ad
69944 + addr + new_len);
69945 + }
69946 + ret = addr;
69947 ++ track_exec_limit(vma->vm_mm, vma->vm_start, addr + new_len, vma->vm_flags);
69948 + goto out;
69949 + }
69950 + }
69951 +@@ -506,7 +535,13 @@ unsigned long do_mremap(unsigned long ad
69952 + ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
69953 + if (ret)
69954 + goto out;
69955 ++
69956 ++ map_flags = vma->vm_flags;
69957 + ret = move_vma(vma, addr, old_len, new_len, new_addr);
69958 ++ if (!(ret & ~PAGE_MASK)) {
69959 ++ track_exec_limit(current->mm, addr, addr + old_len, 0UL);
69960 ++ track_exec_limit(current->mm, new_addr, new_addr + new_len, map_flags);
69961 ++ }
69962 + }
69963 + out:
69964 + if (ret & ~PAGE_MASK)
69965 +diff -urNp linux-3.1.1/mm/nobootmem.c linux-3.1.1/mm/nobootmem.c
69966 +--- linux-3.1.1/mm/nobootmem.c 2011-11-11 15:19:27.000000000 -0500
69967 ++++ linux-3.1.1/mm/nobootmem.c 2011-11-16 18:39:08.000000000 -0500
69968 +@@ -110,19 +110,30 @@ static void __init __free_pages_memory(u
69969 + unsigned long __init free_all_memory_core_early(int nodeid)
69970 + {
69971 + int i;
69972 +- u64 start, end;
69973 ++ u64 start, end, startrange, endrange;
69974 + unsigned long count = 0;
69975 +- struct range *range = NULL;
69976 ++ struct range *range = NULL, rangerange = { 0, 0 };
69977 + int nr_range;
69978 +
69979 + nr_range = get_free_all_memory_range(&range, nodeid);
69980 ++ startrange = __pa(range) >> PAGE_SHIFT;
69981 ++ endrange = (__pa(range + nr_range) - 1) >> PAGE_SHIFT;
69982 +
69983 + for (i = 0; i < nr_range; i++) {
69984 + start = range[i].start;
69985 + end = range[i].end;
69986 ++ if (start <= endrange && startrange < end) {
69987 ++ BUG_ON(rangerange.start | rangerange.end);
69988 ++ rangerange = range[i];
69989 ++ continue;
69990 ++ }
69991 + count += end - start;
69992 + __free_pages_memory(start, end);
69993 + }
69994 ++ start = rangerange.start;
69995 ++ end = rangerange.end;
69996 ++ count += end - start;
69997 ++ __free_pages_memory(start, end);
69998 +
69999 + return count;
70000 + }
70001 +diff -urNp linux-3.1.1/mm/nommu.c linux-3.1.1/mm/nommu.c
70002 +--- linux-3.1.1/mm/nommu.c 2011-11-11 15:19:27.000000000 -0500
70003 ++++ linux-3.1.1/mm/nommu.c 2011-11-16 18:39:08.000000000 -0500
70004 +@@ -62,7 +62,6 @@ int sysctl_overcommit_memory = OVERCOMMI
70005 + int sysctl_overcommit_ratio = 50; /* default is 50% */
70006 + int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT;
70007 + int sysctl_nr_trim_pages = CONFIG_NOMMU_INITIAL_TRIM_EXCESS;
70008 +-int heap_stack_gap = 0;
70009 +
70010 + atomic_long_t mmap_pages_allocated;
70011 +
70012 +@@ -825,15 +824,6 @@ struct vm_area_struct *find_vma(struct m
70013 + EXPORT_SYMBOL(find_vma);
70014 +
70015 + /*
70016 +- * find a VMA
70017 +- * - we don't extend stack VMAs under NOMMU conditions
70018 +- */
70019 +-struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr)
70020 +-{
70021 +- return find_vma(mm, addr);
70022 +-}
70023 +-
70024 +-/*
70025 + * expand a stack to a given address
70026 + * - not supported under NOMMU conditions
70027 + */
70028 +@@ -1553,6 +1543,7 @@ int split_vma(struct mm_struct *mm, stru
70029 +
70030 + /* most fields are the same, copy all, and then fixup */
70031 + *new = *vma;
70032 ++ INIT_LIST_HEAD(&new->anon_vma_chain);
70033 + *region = *vma->vm_region;
70034 + new->vm_region = region;
70035 +
70036 +diff -urNp linux-3.1.1/mm/oom_kill.c linux-3.1.1/mm/oom_kill.c
70037 +--- linux-3.1.1/mm/oom_kill.c 2011-11-11 15:19:27.000000000 -0500
70038 ++++ linux-3.1.1/mm/oom_kill.c 2011-11-18 18:44:21.000000000 -0500
70039 +@@ -162,7 +162,7 @@ static bool oom_unkillable_task(struct t
70040 + unsigned int oom_badness(struct task_struct *p, struct mem_cgroup *mem,
70041 + const nodemask_t *nodemask, unsigned long totalpages)
70042 + {
70043 +- int points;
70044 ++ long points;
70045 +
70046 + if (oom_unkillable_task(p, mem, nodemask))
70047 + return 0;
70048 +diff -urNp linux-3.1.1/mm/page_alloc.c linux-3.1.1/mm/page_alloc.c
70049 +--- linux-3.1.1/mm/page_alloc.c 2011-11-11 15:19:27.000000000 -0500
70050 ++++ linux-3.1.1/mm/page_alloc.c 2011-11-16 18:40:44.000000000 -0500
70051 +@@ -340,7 +340,7 @@ out:
70052 + * This usage means that zero-order pages may not be compound.
70053 + */
70054 +
70055 +-static void free_compound_page(struct page *page)
70056 ++void free_compound_page(struct page *page)
70057 + {
70058 + __free_pages_ok(page, compound_order(page));
70059 + }
70060 +@@ -653,6 +653,10 @@ static bool free_pages_prepare(struct pa
70061 + int i;
70062 + int bad = 0;
70063 +
70064 ++#ifdef CONFIG_PAX_MEMORY_SANITIZE
70065 ++ unsigned long index = 1UL << order;
70066 ++#endif
70067 ++
70068 + trace_mm_page_free_direct(page, order);
70069 + kmemcheck_free_shadow(page, order);
70070 +
70071 +@@ -668,6 +672,12 @@ static bool free_pages_prepare(struct pa
70072 + debug_check_no_obj_freed(page_address(page),
70073 + PAGE_SIZE << order);
70074 + }
70075 ++
70076 ++#ifdef CONFIG_PAX_MEMORY_SANITIZE
70077 ++ for (; index; --index)
70078 ++ sanitize_highpage(page + index - 1);
70079 ++#endif
70080 ++
70081 + arch_free_page(page, order);
70082 + kernel_map_pages(page, 1 << order, 0);
70083 +
70084 +@@ -783,8 +793,10 @@ static int prep_new_page(struct page *pa
70085 + arch_alloc_page(page, order);
70086 + kernel_map_pages(page, 1 << order, 1);
70087 +
70088 ++#ifndef CONFIG_PAX_MEMORY_SANITIZE
70089 + if (gfp_flags & __GFP_ZERO)
70090 + prep_zero_page(page, order, gfp_flags);
70091 ++#endif
70092 +
70093 + if (order && (gfp_flags & __GFP_COMP))
70094 + prep_compound_page(page, order);
70095 +@@ -2539,6 +2551,8 @@ void show_free_areas(unsigned int filter
70096 + int cpu;
70097 + struct zone *zone;
70098 +
70099 ++ pax_track_stack();
70100 ++
70101 + for_each_populated_zone(zone) {
70102 + if (skip_free_areas_node(filter, zone_to_nid(zone)))
70103 + continue;
70104 +@@ -3350,7 +3364,13 @@ static int pageblock_is_reserved(unsigne
70105 + unsigned long pfn;
70106 +
70107 + for (pfn = start_pfn; pfn < end_pfn; pfn++) {
70108 ++#ifdef CONFIG_X86_32
70109 ++ /* boot failures in VMware 8 on 32bit vanilla since
70110 ++ this change */
70111 ++ if (!pfn_valid(pfn) || PageReserved(pfn_to_page(pfn)))
70112 ++#else
70113 + if (!pfn_valid_within(pfn) || PageReserved(pfn_to_page(pfn)))
70114 ++#endif
70115 + return 1;
70116 + }
70117 + return 0;
70118 +diff -urNp linux-3.1.1/mm/percpu.c linux-3.1.1/mm/percpu.c
70119 +--- linux-3.1.1/mm/percpu.c 2011-11-11 15:19:27.000000000 -0500
70120 ++++ linux-3.1.1/mm/percpu.c 2011-11-16 18:39:08.000000000 -0500
70121 +@@ -121,7 +121,7 @@ static unsigned int pcpu_first_unit_cpu
70122 + static unsigned int pcpu_last_unit_cpu __read_mostly;
70123 +
70124 + /* the address of the first chunk which starts with the kernel static area */
70125 +-void *pcpu_base_addr __read_mostly;
70126 ++void *pcpu_base_addr __read_only;
70127 + EXPORT_SYMBOL_GPL(pcpu_base_addr);
70128 +
70129 + static const int *pcpu_unit_map __read_mostly; /* cpu -> unit */
70130 +diff -urNp linux-3.1.1/mm/rmap.c linux-3.1.1/mm/rmap.c
70131 +--- linux-3.1.1/mm/rmap.c 2011-11-11 15:19:27.000000000 -0500
70132 ++++ linux-3.1.1/mm/rmap.c 2011-11-16 18:39:08.000000000 -0500
70133 +@@ -152,6 +152,10 @@ int anon_vma_prepare(struct vm_area_stru
70134 + struct anon_vma *anon_vma = vma->anon_vma;
70135 + struct anon_vma_chain *avc;
70136 +
70137 ++#ifdef CONFIG_PAX_SEGMEXEC
70138 ++ struct anon_vma_chain *avc_m = NULL;
70139 ++#endif
70140 ++
70141 + might_sleep();
70142 + if (unlikely(!anon_vma)) {
70143 + struct mm_struct *mm = vma->vm_mm;
70144 +@@ -161,6 +165,12 @@ int anon_vma_prepare(struct vm_area_stru
70145 + if (!avc)
70146 + goto out_enomem;
70147 +
70148 ++#ifdef CONFIG_PAX_SEGMEXEC
70149 ++ avc_m = anon_vma_chain_alloc(GFP_KERNEL);
70150 ++ if (!avc_m)
70151 ++ goto out_enomem_free_avc;
70152 ++#endif
70153 ++
70154 + anon_vma = find_mergeable_anon_vma(vma);
70155 + allocated = NULL;
70156 + if (!anon_vma) {
70157 +@@ -174,6 +184,21 @@ int anon_vma_prepare(struct vm_area_stru
70158 + /* page_table_lock to protect against threads */
70159 + spin_lock(&mm->page_table_lock);
70160 + if (likely(!vma->anon_vma)) {
70161 ++
70162 ++#ifdef CONFIG_PAX_SEGMEXEC
70163 ++ struct vm_area_struct *vma_m = pax_find_mirror_vma(vma);
70164 ++
70165 ++ if (vma_m) {
70166 ++ BUG_ON(vma_m->anon_vma);
70167 ++ vma_m->anon_vma = anon_vma;
70168 ++ avc_m->anon_vma = anon_vma;
70169 ++ avc_m->vma = vma;
70170 ++ list_add(&avc_m->same_vma, &vma_m->anon_vma_chain);
70171 ++ list_add(&avc_m->same_anon_vma, &anon_vma->head);
70172 ++ avc_m = NULL;
70173 ++ }
70174 ++#endif
70175 ++
70176 + vma->anon_vma = anon_vma;
70177 + avc->anon_vma = anon_vma;
70178 + avc->vma = vma;
70179 +@@ -187,12 +212,24 @@ int anon_vma_prepare(struct vm_area_stru
70180 +
70181 + if (unlikely(allocated))
70182 + put_anon_vma(allocated);
70183 ++
70184 ++#ifdef CONFIG_PAX_SEGMEXEC
70185 ++ if (unlikely(avc_m))
70186 ++ anon_vma_chain_free(avc_m);
70187 ++#endif
70188 ++
70189 + if (unlikely(avc))
70190 + anon_vma_chain_free(avc);
70191 + }
70192 + return 0;
70193 +
70194 + out_enomem_free_avc:
70195 ++
70196 ++#ifdef CONFIG_PAX_SEGMEXEC
70197 ++ if (avc_m)
70198 ++ anon_vma_chain_free(avc_m);
70199 ++#endif
70200 ++
70201 + anon_vma_chain_free(avc);
70202 + out_enomem:
70203 + return -ENOMEM;
70204 +@@ -243,7 +280,7 @@ static void anon_vma_chain_link(struct v
70205 + * Attach the anon_vmas from src to dst.
70206 + * Returns 0 on success, -ENOMEM on failure.
70207 + */
70208 +-int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
70209 ++int anon_vma_clone(struct vm_area_struct *dst, const struct vm_area_struct *src)
70210 + {
70211 + struct anon_vma_chain *avc, *pavc;
70212 + struct anon_vma *root = NULL;
70213 +@@ -276,7 +313,7 @@ int anon_vma_clone(struct vm_area_struct
70214 + * the corresponding VMA in the parent process is attached to.
70215 + * Returns 0 on success, non-zero on failure.
70216 + */
70217 +-int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
70218 ++int anon_vma_fork(struct vm_area_struct *vma, const struct vm_area_struct *pvma)
70219 + {
70220 + struct anon_vma_chain *avc;
70221 + struct anon_vma *anon_vma;
70222 +diff -urNp linux-3.1.1/mm/shmem.c linux-3.1.1/mm/shmem.c
70223 +--- linux-3.1.1/mm/shmem.c 2011-11-11 15:19:27.000000000 -0500
70224 ++++ linux-3.1.1/mm/shmem.c 2011-11-16 19:28:28.000000000 -0500
70225 +@@ -31,7 +31,7 @@
70226 + #include <linux/module.h>
70227 + #include <linux/swap.h>
70228 +
70229 +-static struct vfsmount *shm_mnt;
70230 ++struct vfsmount *shm_mnt;
70231 +
70232 + #ifdef CONFIG_SHMEM
70233 + /*
70234 +@@ -74,7 +74,7 @@ static struct vfsmount *shm_mnt;
70235 + #define BOGO_DIRENT_SIZE 20
70236 +
70237 + /* Symlink up to this size is kmalloc'ed instead of using a swappable page */
70238 +-#define SHORT_SYMLINK_LEN 128
70239 ++#define SHORT_SYMLINK_LEN 64
70240 +
70241 + struct shmem_xattr {
70242 + struct list_head list; /* anchored by shmem_inode_info->xattr_list */
70243 +@@ -769,6 +769,8 @@ static struct page *shmem_swapin(swp_ent
70244 + struct mempolicy mpol, *spol;
70245 + struct vm_area_struct pvma;
70246 +
70247 ++ pax_track_stack();
70248 ++
70249 + spol = mpol_cond_copy(&mpol,
70250 + mpol_shared_policy_lookup(&info->policy, index));
70251 +
70252 +@@ -2149,8 +2151,7 @@ int shmem_fill_super(struct super_block
70253 + int err = -ENOMEM;
70254 +
70255 + /* Round up to L1_CACHE_BYTES to resist false sharing */
70256 +- sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info),
70257 +- L1_CACHE_BYTES), GFP_KERNEL);
70258 ++ sbinfo = kzalloc(max(sizeof(struct shmem_sb_info), L1_CACHE_BYTES), GFP_KERNEL);
70259 + if (!sbinfo)
70260 + return -ENOMEM;
70261 +
70262 +diff -urNp linux-3.1.1/mm/slab.c linux-3.1.1/mm/slab.c
70263 +--- linux-3.1.1/mm/slab.c 2011-11-11 15:19:27.000000000 -0500
70264 ++++ linux-3.1.1/mm/slab.c 2011-11-16 18:40:44.000000000 -0500
70265 +@@ -151,7 +151,7 @@
70266 +
70267 + /* Legal flag mask for kmem_cache_create(). */
70268 + #if DEBUG
70269 +-# define CREATE_MASK (SLAB_RED_ZONE | \
70270 ++# define CREATE_MASK (SLAB_USERCOPY | SLAB_RED_ZONE | \
70271 + SLAB_POISON | SLAB_HWCACHE_ALIGN | \
70272 + SLAB_CACHE_DMA | \
70273 + SLAB_STORE_USER | \
70274 +@@ -159,7 +159,7 @@
70275 + SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
70276 + SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE | SLAB_NOTRACK)
70277 + #else
70278 +-# define CREATE_MASK (SLAB_HWCACHE_ALIGN | \
70279 ++# define CREATE_MASK (SLAB_USERCOPY | SLAB_HWCACHE_ALIGN | \
70280 + SLAB_CACHE_DMA | \
70281 + SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
70282 + SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
70283 +@@ -288,7 +288,7 @@ struct kmem_list3 {
70284 + * Need this for bootstrapping a per node allocator.
70285 + */
70286 + #define NUM_INIT_LISTS (3 * MAX_NUMNODES)
70287 +-static struct kmem_list3 __initdata initkmem_list3[NUM_INIT_LISTS];
70288 ++static struct kmem_list3 initkmem_list3[NUM_INIT_LISTS];
70289 + #define CACHE_CACHE 0
70290 + #define SIZE_AC MAX_NUMNODES
70291 + #define SIZE_L3 (2 * MAX_NUMNODES)
70292 +@@ -389,10 +389,10 @@ static void kmem_list3_init(struct kmem_
70293 + if ((x)->max_freeable < i) \
70294 + (x)->max_freeable = i; \
70295 + } while (0)
70296 +-#define STATS_INC_ALLOCHIT(x) atomic_inc(&(x)->allochit)
70297 +-#define STATS_INC_ALLOCMISS(x) atomic_inc(&(x)->allocmiss)
70298 +-#define STATS_INC_FREEHIT(x) atomic_inc(&(x)->freehit)
70299 +-#define STATS_INC_FREEMISS(x) atomic_inc(&(x)->freemiss)
70300 ++#define STATS_INC_ALLOCHIT(x) atomic_inc_unchecked(&(x)->allochit)
70301 ++#define STATS_INC_ALLOCMISS(x) atomic_inc_unchecked(&(x)->allocmiss)
70302 ++#define STATS_INC_FREEHIT(x) atomic_inc_unchecked(&(x)->freehit)
70303 ++#define STATS_INC_FREEMISS(x) atomic_inc_unchecked(&(x)->freemiss)
70304 + #else
70305 + #define STATS_INC_ACTIVE(x) do { } while (0)
70306 + #define STATS_DEC_ACTIVE(x) do { } while (0)
70307 +@@ -538,7 +538,7 @@ static inline void *index_to_obj(struct
70308 + * reciprocal_divide(offset, cache->reciprocal_buffer_size)
70309 + */
70310 + static inline unsigned int obj_to_index(const struct kmem_cache *cache,
70311 +- const struct slab *slab, void *obj)
70312 ++ const struct slab *slab, const void *obj)
70313 + {
70314 + u32 offset = (obj - slab->s_mem);
70315 + return reciprocal_divide(offset, cache->reciprocal_buffer_size);
70316 +@@ -564,7 +564,7 @@ struct cache_names {
70317 + static struct cache_names __initdata cache_names[] = {
70318 + #define CACHE(x) { .name = "size-" #x, .name_dma = "size-" #x "(DMA)" },
70319 + #include <linux/kmalloc_sizes.h>
70320 +- {NULL,}
70321 ++ {NULL}
70322 + #undef CACHE
70323 + };
70324 +
70325 +@@ -1571,7 +1571,7 @@ void __init kmem_cache_init(void)
70326 + sizes[INDEX_AC].cs_cachep = kmem_cache_create(names[INDEX_AC].name,
70327 + sizes[INDEX_AC].cs_size,
70328 + ARCH_KMALLOC_MINALIGN,
70329 +- ARCH_KMALLOC_FLAGS|SLAB_PANIC,
70330 ++ ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
70331 + NULL);
70332 +
70333 + if (INDEX_AC != INDEX_L3) {
70334 +@@ -1579,7 +1579,7 @@ void __init kmem_cache_init(void)
70335 + kmem_cache_create(names[INDEX_L3].name,
70336 + sizes[INDEX_L3].cs_size,
70337 + ARCH_KMALLOC_MINALIGN,
70338 +- ARCH_KMALLOC_FLAGS|SLAB_PANIC,
70339 ++ ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
70340 + NULL);
70341 + }
70342 +
70343 +@@ -1597,7 +1597,7 @@ void __init kmem_cache_init(void)
70344 + sizes->cs_cachep = kmem_cache_create(names->name,
70345 + sizes->cs_size,
70346 + ARCH_KMALLOC_MINALIGN,
70347 +- ARCH_KMALLOC_FLAGS|SLAB_PANIC,
70348 ++ ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
70349 + NULL);
70350 + }
70351 + #ifdef CONFIG_ZONE_DMA
70352 +@@ -4324,10 +4324,10 @@ static int s_show(struct seq_file *m, vo
70353 + }
70354 + /* cpu stats */
70355 + {
70356 +- unsigned long allochit = atomic_read(&cachep->allochit);
70357 +- unsigned long allocmiss = atomic_read(&cachep->allocmiss);
70358 +- unsigned long freehit = atomic_read(&cachep->freehit);
70359 +- unsigned long freemiss = atomic_read(&cachep->freemiss);
70360 ++ unsigned long allochit = atomic_read_unchecked(&cachep->allochit);
70361 ++ unsigned long allocmiss = atomic_read_unchecked(&cachep->allocmiss);
70362 ++ unsigned long freehit = atomic_read_unchecked(&cachep->freehit);
70363 ++ unsigned long freemiss = atomic_read_unchecked(&cachep->freemiss);
70364 +
70365 + seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu",
70366 + allochit, allocmiss, freehit, freemiss);
70367 +@@ -4584,15 +4584,70 @@ static const struct file_operations proc
70368 +
70369 + static int __init slab_proc_init(void)
70370 + {
70371 +- proc_create("slabinfo",S_IWUSR|S_IRUGO,NULL,&proc_slabinfo_operations);
70372 ++ mode_t gr_mode = S_IRUGO;
70373 ++
70374 ++#ifdef CONFIG_GRKERNSEC_PROC_ADD
70375 ++ gr_mode = S_IRUSR;
70376 ++#endif
70377 ++
70378 ++ proc_create("slabinfo",S_IWUSR|gr_mode,NULL,&proc_slabinfo_operations);
70379 + #ifdef CONFIG_DEBUG_SLAB_LEAK
70380 +- proc_create("slab_allocators", 0, NULL, &proc_slabstats_operations);
70381 ++ proc_create("slab_allocators", gr_mode, NULL, &proc_slabstats_operations);
70382 + #endif
70383 + return 0;
70384 + }
70385 + module_init(slab_proc_init);
70386 + #endif
70387 +
70388 ++void check_object_size(const void *ptr, unsigned long n, bool to)
70389 ++{
70390 ++
70391 ++#ifdef CONFIG_PAX_USERCOPY
70392 ++ struct page *page;
70393 ++ struct kmem_cache *cachep = NULL;
70394 ++ struct slab *slabp;
70395 ++ unsigned int objnr;
70396 ++ unsigned long offset;
70397 ++ const char *type;
70398 ++
70399 ++ if (!n)
70400 ++ return;
70401 ++
70402 ++ type = "<null>";
70403 ++ if (ZERO_OR_NULL_PTR(ptr))
70404 ++ goto report;
70405 ++
70406 ++ if (!virt_addr_valid(ptr))
70407 ++ return;
70408 ++
70409 ++ page = virt_to_head_page(ptr);
70410 ++
70411 ++ type = "<process stack>";
70412 ++ if (!PageSlab(page)) {
70413 ++ if (object_is_on_stack(ptr, n) == -1)
70414 ++ goto report;
70415 ++ return;
70416 ++ }
70417 ++
70418 ++ cachep = page_get_cache(page);
70419 ++ type = cachep->name;
70420 ++ if (!(cachep->flags & SLAB_USERCOPY))
70421 ++ goto report;
70422 ++
70423 ++ slabp = page_get_slab(page);
70424 ++ objnr = obj_to_index(cachep, slabp, ptr);
70425 ++ BUG_ON(objnr >= cachep->num);
70426 ++ offset = ptr - index_to_obj(cachep, slabp, objnr) - obj_offset(cachep);
70427 ++ if (offset <= obj_size(cachep) && n <= obj_size(cachep) - offset)
70428 ++ return;
70429 ++
70430 ++report:
70431 ++ pax_report_usercopy(ptr, n, to, type);
70432 ++#endif
70433 ++
70434 ++}
70435 ++EXPORT_SYMBOL(check_object_size);
70436 ++
70437 + /**
70438 + * ksize - get the actual amount of memory allocated for a given object
70439 + * @objp: Pointer to the object
70440 +diff -urNp linux-3.1.1/mm/slob.c linux-3.1.1/mm/slob.c
70441 +--- linux-3.1.1/mm/slob.c 2011-11-11 15:19:27.000000000 -0500
70442 ++++ linux-3.1.1/mm/slob.c 2011-11-16 18:39:08.000000000 -0500
70443 +@@ -29,7 +29,7 @@
70444 + * If kmalloc is asked for objects of PAGE_SIZE or larger, it calls
70445 + * alloc_pages() directly, allocating compound pages so the page order
70446 + * does not have to be separately tracked, and also stores the exact
70447 +- * allocation size in page->private so that it can be used to accurately
70448 ++ * allocation size in slob_page->size so that it can be used to accurately
70449 + * provide ksize(). These objects are detected in kfree() because slob_page()
70450 + * is false for them.
70451 + *
70452 +@@ -58,6 +58,7 @@
70453 + */
70454 +
70455 + #include <linux/kernel.h>
70456 ++#include <linux/sched.h>
70457 + #include <linux/slab.h>
70458 + #include <linux/mm.h>
70459 + #include <linux/swap.h> /* struct reclaim_state */
70460 +@@ -102,7 +103,8 @@ struct slob_page {
70461 + unsigned long flags; /* mandatory */
70462 + atomic_t _count; /* mandatory */
70463 + slobidx_t units; /* free units left in page */
70464 +- unsigned long pad[2];
70465 ++ unsigned long pad[1];
70466 ++ unsigned long size; /* size when >=PAGE_SIZE */
70467 + slob_t *free; /* first free slob_t in page */
70468 + struct list_head list; /* linked list of free pages */
70469 + };
70470 +@@ -135,7 +137,7 @@ static LIST_HEAD(free_slob_large);
70471 + */
70472 + static inline int is_slob_page(struct slob_page *sp)
70473 + {
70474 +- return PageSlab((struct page *)sp);
70475 ++ return PageSlab((struct page *)sp) && !sp->size;
70476 + }
70477 +
70478 + static inline void set_slob_page(struct slob_page *sp)
70479 +@@ -150,7 +152,7 @@ static inline void clear_slob_page(struc
70480 +
70481 + static inline struct slob_page *slob_page(const void *addr)
70482 + {
70483 +- return (struct slob_page *)virt_to_page(addr);
70484 ++ return (struct slob_page *)virt_to_head_page(addr);
70485 + }
70486 +
70487 + /*
70488 +@@ -210,7 +212,7 @@ static void set_slob(slob_t *s, slobidx_
70489 + /*
70490 + * Return the size of a slob block.
70491 + */
70492 +-static slobidx_t slob_units(slob_t *s)
70493 ++static slobidx_t slob_units(const slob_t *s)
70494 + {
70495 + if (s->units > 0)
70496 + return s->units;
70497 +@@ -220,7 +222,7 @@ static slobidx_t slob_units(slob_t *s)
70498 + /*
70499 + * Return the next free slob block pointer after this one.
70500 + */
70501 +-static slob_t *slob_next(slob_t *s)
70502 ++static slob_t *slob_next(const slob_t *s)
70503 + {
70504 + slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK);
70505 + slobidx_t next;
70506 +@@ -235,7 +237,7 @@ static slob_t *slob_next(slob_t *s)
70507 + /*
70508 + * Returns true if s is the last free block in its page.
70509 + */
70510 +-static int slob_last(slob_t *s)
70511 ++static int slob_last(const slob_t *s)
70512 + {
70513 + return !((unsigned long)slob_next(s) & ~PAGE_MASK);
70514 + }
70515 +@@ -254,6 +256,7 @@ static void *slob_new_pages(gfp_t gfp, i
70516 + if (!page)
70517 + return NULL;
70518 +
70519 ++ set_slob_page(page);
70520 + return page_address(page);
70521 + }
70522 +
70523 +@@ -370,11 +373,11 @@ static void *slob_alloc(size_t size, gfp
70524 + if (!b)
70525 + return NULL;
70526 + sp = slob_page(b);
70527 +- set_slob_page(sp);
70528 +
70529 + spin_lock_irqsave(&slob_lock, flags);
70530 + sp->units = SLOB_UNITS(PAGE_SIZE);
70531 + sp->free = b;
70532 ++ sp->size = 0;
70533 + INIT_LIST_HEAD(&sp->list);
70534 + set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE));
70535 + set_slob_page_free(sp, slob_list);
70536 +@@ -476,10 +479,9 @@ out:
70537 + * End of slob allocator proper. Begin kmem_cache_alloc and kmalloc frontend.
70538 + */
70539 +
70540 +-void *__kmalloc_node(size_t size, gfp_t gfp, int node)
70541 ++static void *__kmalloc_node_align(size_t size, gfp_t gfp, int node, int align)
70542 + {
70543 +- unsigned int *m;
70544 +- int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
70545 ++ slob_t *m;
70546 + void *ret;
70547 +
70548 + gfp &= gfp_allowed_mask;
70549 +@@ -494,7 +496,10 @@ void *__kmalloc_node(size_t size, gfp_t
70550 +
70551 + if (!m)
70552 + return NULL;
70553 +- *m = size;
70554 ++ BUILD_BUG_ON(ARCH_KMALLOC_MINALIGN < 2 * SLOB_UNIT);
70555 ++ BUILD_BUG_ON(ARCH_SLAB_MINALIGN < 2 * SLOB_UNIT);
70556 ++ m[0].units = size;
70557 ++ m[1].units = align;
70558 + ret = (void *)m + align;
70559 +
70560 + trace_kmalloc_node(_RET_IP_, ret,
70561 +@@ -506,16 +511,25 @@ void *__kmalloc_node(size_t size, gfp_t
70562 + gfp |= __GFP_COMP;
70563 + ret = slob_new_pages(gfp, order, node);
70564 + if (ret) {
70565 +- struct page *page;
70566 +- page = virt_to_page(ret);
70567 +- page->private = size;
70568 ++ struct slob_page *sp;
70569 ++ sp = slob_page(ret);
70570 ++ sp->size = size;
70571 + }
70572 +
70573 + trace_kmalloc_node(_RET_IP_, ret,
70574 + size, PAGE_SIZE << order, gfp, node);
70575 + }
70576 +
70577 +- kmemleak_alloc(ret, size, 1, gfp);
70578 ++ return ret;
70579 ++}
70580 ++
70581 ++void *__kmalloc_node(size_t size, gfp_t gfp, int node)
70582 ++{
70583 ++ int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
70584 ++ void *ret = __kmalloc_node_align(size, gfp, node, align);
70585 ++
70586 ++ if (!ZERO_OR_NULL_PTR(ret))
70587 ++ kmemleak_alloc(ret, size, 1, gfp);
70588 + return ret;
70589 + }
70590 + EXPORT_SYMBOL(__kmalloc_node);
70591 +@@ -533,13 +547,92 @@ void kfree(const void *block)
70592 + sp = slob_page(block);
70593 + if (is_slob_page(sp)) {
70594 + int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
70595 +- unsigned int *m = (unsigned int *)(block - align);
70596 +- slob_free(m, *m + align);
70597 +- } else
70598 ++ slob_t *m = (slob_t *)(block - align);
70599 ++ slob_free(m, m[0].units + align);
70600 ++ } else {
70601 ++ clear_slob_page(sp);
70602 ++ free_slob_page(sp);
70603 ++ sp->size = 0;
70604 + put_page(&sp->page);
70605 ++ }
70606 + }
70607 + EXPORT_SYMBOL(kfree);
70608 +
70609 ++void check_object_size(const void *ptr, unsigned long n, bool to)
70610 ++{
70611 ++
70612 ++#ifdef CONFIG_PAX_USERCOPY
70613 ++ struct slob_page *sp;
70614 ++ const slob_t *free;
70615 ++ const void *base;
70616 ++ unsigned long flags;
70617 ++ const char *type;
70618 ++
70619 ++ if (!n)
70620 ++ return;
70621 ++
70622 ++ type = "<null>";
70623 ++ if (ZERO_OR_NULL_PTR(ptr))
70624 ++ goto report;
70625 ++
70626 ++ if (!virt_addr_valid(ptr))
70627 ++ return;
70628 ++
70629 ++ type = "<process stack>";
70630 ++ sp = slob_page(ptr);
70631 ++ if (!PageSlab((struct page*)sp)) {
70632 ++ if (object_is_on_stack(ptr, n) == -1)
70633 ++ goto report;
70634 ++ return;
70635 ++ }
70636 ++
70637 ++ type = "<slob>";
70638 ++ if (sp->size) {
70639 ++ base = page_address(&sp->page);
70640 ++ if (base <= ptr && n <= sp->size - (ptr - base))
70641 ++ return;
70642 ++ goto report;
70643 ++ }
70644 ++
70645 ++ /* some tricky double walking to find the chunk */
70646 ++ spin_lock_irqsave(&slob_lock, flags);
70647 ++ base = (void *)((unsigned long)ptr & PAGE_MASK);
70648 ++ free = sp->free;
70649 ++
70650 ++ while (!slob_last(free) && (void *)free <= ptr) {
70651 ++ base = free + slob_units(free);
70652 ++ free = slob_next(free);
70653 ++ }
70654 ++
70655 ++ while (base < (void *)free) {
70656 ++ slobidx_t m = ((slob_t *)base)[0].units, align = ((slob_t *)base)[1].units;
70657 ++ int size = SLOB_UNIT * SLOB_UNITS(m + align);
70658 ++ int offset;
70659 ++
70660 ++ if (ptr < base + align)
70661 ++ break;
70662 ++
70663 ++ offset = ptr - base - align;
70664 ++ if (offset >= m) {
70665 ++ base += size;
70666 ++ continue;
70667 ++ }
70668 ++
70669 ++ if (n > m - offset)
70670 ++ break;
70671 ++
70672 ++ spin_unlock_irqrestore(&slob_lock, flags);
70673 ++ return;
70674 ++ }
70675 ++
70676 ++ spin_unlock_irqrestore(&slob_lock, flags);
70677 ++report:
70678 ++ pax_report_usercopy(ptr, n, to, type);
70679 ++#endif
70680 ++
70681 ++}
70682 ++EXPORT_SYMBOL(check_object_size);
70683 ++
70684 + /* can't use ksize for kmem_cache_alloc memory, only kmalloc */
70685 + size_t ksize(const void *block)
70686 + {
70687 +@@ -552,10 +645,10 @@ size_t ksize(const void *block)
70688 + sp = slob_page(block);
70689 + if (is_slob_page(sp)) {
70690 + int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
70691 +- unsigned int *m = (unsigned int *)(block - align);
70692 +- return SLOB_UNITS(*m) * SLOB_UNIT;
70693 ++ slob_t *m = (slob_t *)(block - align);
70694 ++ return SLOB_UNITS(m[0].units) * SLOB_UNIT;
70695 + } else
70696 +- return sp->page.private;
70697 ++ return sp->size;
70698 + }
70699 + EXPORT_SYMBOL(ksize);
70700 +
70701 +@@ -571,8 +664,13 @@ struct kmem_cache *kmem_cache_create(con
70702 + {
70703 + struct kmem_cache *c;
70704 +
70705 ++#ifdef CONFIG_PAX_USERCOPY
70706 ++ c = __kmalloc_node_align(sizeof(struct kmem_cache),
70707 ++ GFP_KERNEL, -1, ARCH_KMALLOC_MINALIGN);
70708 ++#else
70709 + c = slob_alloc(sizeof(struct kmem_cache),
70710 + GFP_KERNEL, ARCH_KMALLOC_MINALIGN, -1);
70711 ++#endif
70712 +
70713 + if (c) {
70714 + c->name = name;
70715 +@@ -614,17 +712,25 @@ void *kmem_cache_alloc_node(struct kmem_
70716 +
70717 + lockdep_trace_alloc(flags);
70718 +
70719 ++#ifdef CONFIG_PAX_USERCOPY
70720 ++ b = __kmalloc_node_align(c->size, flags, node, c->align);
70721 ++#else
70722 + if (c->size < PAGE_SIZE) {
70723 + b = slob_alloc(c->size, flags, c->align, node);
70724 + trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
70725 + SLOB_UNITS(c->size) * SLOB_UNIT,
70726 + flags, node);
70727 + } else {
70728 ++ struct slob_page *sp;
70729 ++
70730 + b = slob_new_pages(flags, get_order(c->size), node);
70731 ++ sp = slob_page(b);
70732 ++ sp->size = c->size;
70733 + trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
70734 + PAGE_SIZE << get_order(c->size),
70735 + flags, node);
70736 + }
70737 ++#endif
70738 +
70739 + if (c->ctor)
70740 + c->ctor(b);
70741 +@@ -636,10 +742,16 @@ EXPORT_SYMBOL(kmem_cache_alloc_node);
70742 +
70743 + static void __kmem_cache_free(void *b, int size)
70744 + {
70745 +- if (size < PAGE_SIZE)
70746 ++ struct slob_page *sp = slob_page(b);
70747 ++
70748 ++ if (is_slob_page(sp))
70749 + slob_free(b, size);
70750 +- else
70751 ++ else {
70752 ++ clear_slob_page(sp);
70753 ++ free_slob_page(sp);
70754 ++ sp->size = 0;
70755 + slob_free_pages(b, get_order(size));
70756 ++ }
70757 + }
70758 +
70759 + static void kmem_rcu_free(struct rcu_head *head)
70760 +@@ -652,17 +764,31 @@ static void kmem_rcu_free(struct rcu_hea
70761 +
70762 + void kmem_cache_free(struct kmem_cache *c, void *b)
70763 + {
70764 ++ int size = c->size;
70765 ++
70766 ++#ifdef CONFIG_PAX_USERCOPY
70767 ++ if (size + c->align < PAGE_SIZE) {
70768 ++ size += c->align;
70769 ++ b -= c->align;
70770 ++ }
70771 ++#endif
70772 ++
70773 + kmemleak_free_recursive(b, c->flags);
70774 + if (unlikely(c->flags & SLAB_DESTROY_BY_RCU)) {
70775 + struct slob_rcu *slob_rcu;
70776 +- slob_rcu = b + (c->size - sizeof(struct slob_rcu));
70777 +- slob_rcu->size = c->size;
70778 ++ slob_rcu = b + (size - sizeof(struct slob_rcu));
70779 ++ slob_rcu->size = size;
70780 + call_rcu(&slob_rcu->head, kmem_rcu_free);
70781 + } else {
70782 +- __kmem_cache_free(b, c->size);
70783 ++ __kmem_cache_free(b, size);
70784 + }
70785 +
70786 ++#ifdef CONFIG_PAX_USERCOPY
70787 ++ trace_kfree(_RET_IP_, b);
70788 ++#else
70789 + trace_kmem_cache_free(_RET_IP_, b);
70790 ++#endif
70791 ++
70792 + }
70793 + EXPORT_SYMBOL(kmem_cache_free);
70794 +
70795 +diff -urNp linux-3.1.1/mm/slub.c linux-3.1.1/mm/slub.c
70796 +--- linux-3.1.1/mm/slub.c 2011-11-11 15:19:27.000000000 -0500
70797 ++++ linux-3.1.1/mm/slub.c 2011-11-16 19:27:25.000000000 -0500
70798 +@@ -208,7 +208,7 @@ struct track {
70799 +
70800 + enum track_item { TRACK_ALLOC, TRACK_FREE };
70801 +
70802 +-#ifdef CONFIG_SYSFS
70803 ++#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
70804 + static int sysfs_slab_add(struct kmem_cache *);
70805 + static int sysfs_slab_alias(struct kmem_cache *, const char *);
70806 + static void sysfs_slab_remove(struct kmem_cache *);
70807 +@@ -556,7 +556,7 @@ static void print_track(const char *s, s
70808 + if (!t->addr)
70809 + return;
70810 +
70811 +- printk(KERN_ERR "INFO: %s in %pS age=%lu cpu=%u pid=%d\n",
70812 ++ printk(KERN_ERR "INFO: %s in %pA age=%lu cpu=%u pid=%d\n",
70813 + s, (void *)t->addr, jiffies - t->when, t->cpu, t->pid);
70814 + #ifdef CONFIG_STACKTRACE
70815 + {
70816 +@@ -2456,6 +2456,8 @@ void kmem_cache_free(struct kmem_cache *
70817 +
70818 + page = virt_to_head_page(x);
70819 +
70820 ++ BUG_ON(!PageSlab(page));
70821 ++
70822 + slab_free(s, page, x, _RET_IP_);
70823 +
70824 + trace_kmem_cache_free(_RET_IP_, x);
70825 +@@ -2489,7 +2491,7 @@ static int slub_min_objects;
70826 + * Merge control. If this is set then no merging of slab caches will occur.
70827 + * (Could be removed. This was introduced to pacify the merge skeptics.)
70828 + */
70829 +-static int slub_nomerge;
70830 ++static int slub_nomerge = 1;
70831 +
70832 + /*
70833 + * Calculate the order of allocation given an slab object size.
70834 +@@ -2912,7 +2914,7 @@ static int kmem_cache_open(struct kmem_c
70835 + * list to avoid pounding the page allocator excessively.
70836 + */
70837 + set_min_partial(s, ilog2(s->size));
70838 +- s->refcount = 1;
70839 ++ atomic_set(&s->refcount, 1);
70840 + #ifdef CONFIG_NUMA
70841 + s->remote_node_defrag_ratio = 1000;
70842 + #endif
70843 +@@ -3017,8 +3019,7 @@ static inline int kmem_cache_close(struc
70844 + void kmem_cache_destroy(struct kmem_cache *s)
70845 + {
70846 + down_write(&slub_lock);
70847 +- s->refcount--;
70848 +- if (!s->refcount) {
70849 ++ if (atomic_dec_and_test(&s->refcount)) {
70850 + list_del(&s->list);
70851 + if (kmem_cache_close(s)) {
70852 + printk(KERN_ERR "SLUB %s: %s called for cache that "
70853 +@@ -3228,6 +3229,50 @@ void *__kmalloc_node(size_t size, gfp_t
70854 + EXPORT_SYMBOL(__kmalloc_node);
70855 + #endif
70856 +
70857 ++void check_object_size(const void *ptr, unsigned long n, bool to)
70858 ++{
70859 ++
70860 ++#ifdef CONFIG_PAX_USERCOPY
70861 ++ struct page *page;
70862 ++ struct kmem_cache *s = NULL;
70863 ++ unsigned long offset;
70864 ++ const char *type;
70865 ++
70866 ++ if (!n)
70867 ++ return;
70868 ++
70869 ++ type = "<null>";
70870 ++ if (ZERO_OR_NULL_PTR(ptr))
70871 ++ goto report;
70872 ++
70873 ++ if (!virt_addr_valid(ptr))
70874 ++ return;
70875 ++
70876 ++ page = virt_to_head_page(ptr);
70877 ++
70878 ++ type = "<process stack>";
70879 ++ if (!PageSlab(page)) {
70880 ++ if (object_is_on_stack(ptr, n) == -1)
70881 ++ goto report;
70882 ++ return;
70883 ++ }
70884 ++
70885 ++ s = page->slab;
70886 ++ type = s->name;
70887 ++ if (!(s->flags & SLAB_USERCOPY))
70888 ++ goto report;
70889 ++
70890 ++ offset = (ptr - page_address(page)) % s->size;
70891 ++ if (offset <= s->objsize && n <= s->objsize - offset)
70892 ++ return;
70893 ++
70894 ++report:
70895 ++ pax_report_usercopy(ptr, n, to, type);
70896 ++#endif
70897 ++
70898 ++}
70899 ++EXPORT_SYMBOL(check_object_size);
70900 ++
70901 + size_t ksize(const void *object)
70902 + {
70903 + struct page *page;
70904 +@@ -3502,7 +3547,7 @@ static void __init kmem_cache_bootstrap_
70905 + int node;
70906 +
70907 + list_add(&s->list, &slab_caches);
70908 +- s->refcount = -1;
70909 ++ atomic_set(&s->refcount, -1);
70910 +
70911 + for_each_node_state(node, N_NORMAL_MEMORY) {
70912 + struct kmem_cache_node *n = get_node(s, node);
70913 +@@ -3619,17 +3664,17 @@ void __init kmem_cache_init(void)
70914 +
70915 + /* Caches that are not of the two-to-the-power-of size */
70916 + if (KMALLOC_MIN_SIZE <= 32) {
70917 +- kmalloc_caches[1] = create_kmalloc_cache("kmalloc-96", 96, 0);
70918 ++ kmalloc_caches[1] = create_kmalloc_cache("kmalloc-96", 96, SLAB_USERCOPY);
70919 + caches++;
70920 + }
70921 +
70922 + if (KMALLOC_MIN_SIZE <= 64) {
70923 +- kmalloc_caches[2] = create_kmalloc_cache("kmalloc-192", 192, 0);
70924 ++ kmalloc_caches[2] = create_kmalloc_cache("kmalloc-192", 192, SLAB_USERCOPY);
70925 + caches++;
70926 + }
70927 +
70928 + for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) {
70929 +- kmalloc_caches[i] = create_kmalloc_cache("kmalloc", 1 << i, 0);
70930 ++ kmalloc_caches[i] = create_kmalloc_cache("kmalloc", 1 << i, SLAB_USERCOPY);
70931 + caches++;
70932 + }
70933 +
70934 +@@ -3697,7 +3742,7 @@ static int slab_unmergeable(struct kmem_
70935 + /*
70936 + * We may have set a slab to be unmergeable during bootstrap.
70937 + */
70938 +- if (s->refcount < 0)
70939 ++ if (atomic_read(&s->refcount) < 0)
70940 + return 1;
70941 +
70942 + return 0;
70943 +@@ -3756,7 +3801,7 @@ struct kmem_cache *kmem_cache_create(con
70944 + down_write(&slub_lock);
70945 + s = find_mergeable(size, align, flags, name, ctor);
70946 + if (s) {
70947 +- s->refcount++;
70948 ++ atomic_inc(&s->refcount);
70949 + /*
70950 + * Adjust the object sizes so that we clear
70951 + * the complete object on kzalloc.
70952 +@@ -3765,7 +3810,7 @@ struct kmem_cache *kmem_cache_create(con
70953 + s->inuse = max_t(int, s->inuse, ALIGN(size, sizeof(void *)));
70954 +
70955 + if (sysfs_slab_alias(s, name)) {
70956 +- s->refcount--;
70957 ++ atomic_dec(&s->refcount);
70958 + goto err;
70959 + }
70960 + up_write(&slub_lock);
70961 +@@ -3893,7 +3938,7 @@ void *__kmalloc_node_track_caller(size_t
70962 + }
70963 + #endif
70964 +
70965 +-#ifdef CONFIG_SYSFS
70966 ++#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
70967 + static int count_inuse(struct page *page)
70968 + {
70969 + return page->inuse;
70970 +@@ -4280,12 +4325,12 @@ static void resiliency_test(void)
70971 + validate_slab_cache(kmalloc_caches[9]);
70972 + }
70973 + #else
70974 +-#ifdef CONFIG_SYSFS
70975 ++#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
70976 + static void resiliency_test(void) {};
70977 + #endif
70978 + #endif
70979 +
70980 +-#ifdef CONFIG_SYSFS
70981 ++#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
70982 + enum slab_stat_type {
70983 + SL_ALL, /* All slabs */
70984 + SL_PARTIAL, /* Only partially allocated slabs */
70985 +@@ -4495,7 +4540,7 @@ SLAB_ATTR_RO(ctor);
70986 +
70987 + static ssize_t aliases_show(struct kmem_cache *s, char *buf)
70988 + {
70989 +- return sprintf(buf, "%d\n", s->refcount - 1);
70990 ++ return sprintf(buf, "%d\n", atomic_read(&s->refcount) - 1);
70991 + }
70992 + SLAB_ATTR_RO(aliases);
70993 +
70994 +@@ -5025,6 +5070,7 @@ static char *create_unique_id(struct kme
70995 + return name;
70996 + }
70997 +
70998 ++#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
70999 + static int sysfs_slab_add(struct kmem_cache *s)
71000 + {
71001 + int err;
71002 +@@ -5087,6 +5133,7 @@ static void sysfs_slab_remove(struct kme
71003 + kobject_del(&s->kobj);
71004 + kobject_put(&s->kobj);
71005 + }
71006 ++#endif
71007 +
71008 + /*
71009 + * Need to buffer aliases during bootup until sysfs becomes
71010 +@@ -5100,6 +5147,7 @@ struct saved_alias {
71011 +
71012 + static struct saved_alias *alias_list;
71013 +
71014 ++#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
71015 + static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
71016 + {
71017 + struct saved_alias *al;
71018 +@@ -5122,6 +5170,7 @@ static int sysfs_slab_alias(struct kmem_
71019 + alias_list = al;
71020 + return 0;
71021 + }
71022 ++#endif
71023 +
71024 + static int __init slab_sysfs_init(void)
71025 + {
71026 +@@ -5257,7 +5306,13 @@ static const struct file_operations proc
71027 +
71028 + static int __init slab_proc_init(void)
71029 + {
71030 +- proc_create("slabinfo", S_IRUGO, NULL, &proc_slabinfo_operations);
71031 ++ mode_t gr_mode = S_IRUGO;
71032 ++
71033 ++#ifdef CONFIG_GRKERNSEC_PROC_ADD
71034 ++ gr_mode = S_IRUSR;
71035 ++#endif
71036 ++
71037 ++ proc_create("slabinfo", gr_mode, NULL, &proc_slabinfo_operations);
71038 + return 0;
71039 + }
71040 + module_init(slab_proc_init);
71041 +diff -urNp linux-3.1.1/mm/swap.c linux-3.1.1/mm/swap.c
71042 +--- linux-3.1.1/mm/swap.c 2011-11-11 15:19:27.000000000 -0500
71043 ++++ linux-3.1.1/mm/swap.c 2011-11-16 18:39:08.000000000 -0500
71044 +@@ -31,6 +31,7 @@
71045 + #include <linux/backing-dev.h>
71046 + #include <linux/memcontrol.h>
71047 + #include <linux/gfp.h>
71048 ++#include <linux/hugetlb.h>
71049 +
71050 + #include "internal.h"
71051 +
71052 +@@ -71,6 +72,8 @@ static void __put_compound_page(struct p
71053 +
71054 + __page_cache_release(page);
71055 + dtor = get_compound_page_dtor(page);
71056 ++ if (!PageHuge(page))
71057 ++ BUG_ON(dtor != free_compound_page);
71058 + (*dtor)(page);
71059 + }
71060 +
71061 +diff -urNp linux-3.1.1/mm/swapfile.c linux-3.1.1/mm/swapfile.c
71062 +--- linux-3.1.1/mm/swapfile.c 2011-11-11 15:19:27.000000000 -0500
71063 ++++ linux-3.1.1/mm/swapfile.c 2011-11-16 18:39:08.000000000 -0500
71064 +@@ -62,7 +62,7 @@ static DEFINE_MUTEX(swapon_mutex);
71065 +
71066 + static DECLARE_WAIT_QUEUE_HEAD(proc_poll_wait);
71067 + /* Activity counter to indicate that a swapon or swapoff has occurred */
71068 +-static atomic_t proc_poll_event = ATOMIC_INIT(0);
71069 ++static atomic_unchecked_t proc_poll_event = ATOMIC_INIT(0);
71070 +
71071 + static inline unsigned char swap_count(unsigned char ent)
71072 + {
71073 +@@ -1671,7 +1671,7 @@ SYSCALL_DEFINE1(swapoff, const char __us
71074 + }
71075 + filp_close(swap_file, NULL);
71076 + err = 0;
71077 +- atomic_inc(&proc_poll_event);
71078 ++ atomic_inc_unchecked(&proc_poll_event);
71079 + wake_up_interruptible(&proc_poll_wait);
71080 +
71081 + out_dput:
71082 +@@ -1687,8 +1687,8 @@ static unsigned swaps_poll(struct file *
71083 +
71084 + poll_wait(file, &proc_poll_wait, wait);
71085 +
71086 +- if (seq->poll_event != atomic_read(&proc_poll_event)) {
71087 +- seq->poll_event = atomic_read(&proc_poll_event);
71088 ++ if (seq->poll_event != atomic_read_unchecked(&proc_poll_event)) {
71089 ++ seq->poll_event = atomic_read_unchecked(&proc_poll_event);
71090 + return POLLIN | POLLRDNORM | POLLERR | POLLPRI;
71091 + }
71092 +
71093 +@@ -1786,7 +1786,7 @@ static int swaps_open(struct inode *inod
71094 + return ret;
71095 +
71096 + seq = file->private_data;
71097 +- seq->poll_event = atomic_read(&proc_poll_event);
71098 ++ seq->poll_event = atomic_read_unchecked(&proc_poll_event);
71099 + return 0;
71100 + }
71101 +
71102 +@@ -2124,7 +2124,7 @@ SYSCALL_DEFINE2(swapon, const char __use
71103 + (p->flags & SWP_DISCARDABLE) ? "D" : "");
71104 +
71105 + mutex_unlock(&swapon_mutex);
71106 +- atomic_inc(&proc_poll_event);
71107 ++ atomic_inc_unchecked(&proc_poll_event);
71108 + wake_up_interruptible(&proc_poll_wait);
71109 +
71110 + if (S_ISREG(inode->i_mode))
71111 +diff -urNp linux-3.1.1/mm/util.c linux-3.1.1/mm/util.c
71112 +--- linux-3.1.1/mm/util.c 2011-11-11 15:19:27.000000000 -0500
71113 ++++ linux-3.1.1/mm/util.c 2011-11-16 18:39:08.000000000 -0500
71114 +@@ -114,6 +114,7 @@ EXPORT_SYMBOL(memdup_user);
71115 + * allocated buffer. Use this if you don't want to free the buffer immediately
71116 + * like, for example, with RCU.
71117 + */
71118 ++#undef __krealloc
71119 + void *__krealloc(const void *p, size_t new_size, gfp_t flags)
71120 + {
71121 + void *ret;
71122 +@@ -147,6 +148,7 @@ EXPORT_SYMBOL(__krealloc);
71123 + * behaves exactly like kmalloc(). If @size is 0 and @p is not a
71124 + * %NULL pointer, the object pointed to is freed.
71125 + */
71126 ++#undef krealloc
71127 + void *krealloc(const void *p, size_t new_size, gfp_t flags)
71128 + {
71129 + void *ret;
71130 +@@ -243,6 +245,12 @@ void __vma_link_list(struct mm_struct *m
71131 + void arch_pick_mmap_layout(struct mm_struct *mm)
71132 + {
71133 + mm->mmap_base = TASK_UNMAPPED_BASE;
71134 ++
71135 ++#ifdef CONFIG_PAX_RANDMMAP
71136 ++ if (mm->pax_flags & MF_PAX_RANDMMAP)
71137 ++ mm->mmap_base += mm->delta_mmap;
71138 ++#endif
71139 ++
71140 + mm->get_unmapped_area = arch_get_unmapped_area;
71141 + mm->unmap_area = arch_unmap_area;
71142 + }
71143 +diff -urNp linux-3.1.1/mm/vmalloc.c linux-3.1.1/mm/vmalloc.c
71144 +--- linux-3.1.1/mm/vmalloc.c 2011-11-11 15:19:27.000000000 -0500
71145 ++++ linux-3.1.1/mm/vmalloc.c 2011-11-16 18:40:44.000000000 -0500
71146 +@@ -39,8 +39,19 @@ static void vunmap_pte_range(pmd_t *pmd,
71147 +
71148 + pte = pte_offset_kernel(pmd, addr);
71149 + do {
71150 +- pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
71151 +- WARN_ON(!pte_none(ptent) && !pte_present(ptent));
71152 ++
71153 ++#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
71154 ++ if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr < (unsigned long)MODULES_EXEC_END) {
71155 ++ BUG_ON(!pte_exec(*pte));
71156 ++ set_pte_at(&init_mm, addr, pte, pfn_pte(__pa(addr) >> PAGE_SHIFT, PAGE_KERNEL_EXEC));
71157 ++ continue;
71158 ++ }
71159 ++#endif
71160 ++
71161 ++ {
71162 ++ pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
71163 ++ WARN_ON(!pte_none(ptent) && !pte_present(ptent));
71164 ++ }
71165 + } while (pte++, addr += PAGE_SIZE, addr != end);
71166 + }
71167 +
71168 +@@ -91,6 +102,7 @@ static int vmap_pte_range(pmd_t *pmd, un
71169 + unsigned long end, pgprot_t prot, struct page **pages, int *nr)
71170 + {
71171 + pte_t *pte;
71172 ++ int ret = -ENOMEM;
71173 +
71174 + /*
71175 + * nr is a running index into the array which helps higher level
71176 +@@ -100,17 +112,30 @@ static int vmap_pte_range(pmd_t *pmd, un
71177 + pte = pte_alloc_kernel(pmd, addr);
71178 + if (!pte)
71179 + return -ENOMEM;
71180 ++
71181 ++ pax_open_kernel();
71182 + do {
71183 + struct page *page = pages[*nr];
71184 +
71185 +- if (WARN_ON(!pte_none(*pte)))
71186 +- return -EBUSY;
71187 +- if (WARN_ON(!page))
71188 +- return -ENOMEM;
71189 ++#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
71190 ++ if (pgprot_val(prot) & _PAGE_NX)
71191 ++#endif
71192 ++
71193 ++ if (WARN_ON(!pte_none(*pte))) {
71194 ++ ret = -EBUSY;
71195 ++ goto out;
71196 ++ }
71197 ++ if (WARN_ON(!page)) {
71198 ++ ret = -ENOMEM;
71199 ++ goto out;
71200 ++ }
71201 + set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
71202 + (*nr)++;
71203 + } while (pte++, addr += PAGE_SIZE, addr != end);
71204 +- return 0;
71205 ++ ret = 0;
71206 ++out:
71207 ++ pax_close_kernel();
71208 ++ return ret;
71209 + }
71210 +
71211 + static int vmap_pmd_range(pud_t *pud, unsigned long addr,
71212 +@@ -191,11 +216,20 @@ int is_vmalloc_or_module_addr(const void
71213 + * and fall back on vmalloc() if that fails. Others
71214 + * just put it in the vmalloc space.
71215 + */
71216 +-#if defined(CONFIG_MODULES) && defined(MODULES_VADDR)
71217 ++#ifdef CONFIG_MODULES
71218 ++#ifdef MODULES_VADDR
71219 + unsigned long addr = (unsigned long)x;
71220 + if (addr >= MODULES_VADDR && addr < MODULES_END)
71221 + return 1;
71222 + #endif
71223 ++
71224 ++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
71225 ++ if (x >= (const void *)MODULES_EXEC_VADDR && x < (const void *)MODULES_EXEC_END)
71226 ++ return 1;
71227 ++#endif
71228 ++
71229 ++#endif
71230 ++
71231 + return is_vmalloc_addr(x);
71232 + }
71233 +
71234 +@@ -216,8 +250,14 @@ struct page *vmalloc_to_page(const void
71235 +
71236 + if (!pgd_none(*pgd)) {
71237 + pud_t *pud = pud_offset(pgd, addr);
71238 ++#ifdef CONFIG_X86
71239 ++ if (!pud_large(*pud))
71240 ++#endif
71241 + if (!pud_none(*pud)) {
71242 + pmd_t *pmd = pmd_offset(pud, addr);
71243 ++#ifdef CONFIG_X86
71244 ++ if (!pmd_large(*pmd))
71245 ++#endif
71246 + if (!pmd_none(*pmd)) {
71247 + pte_t *ptep, pte;
71248 +
71249 +@@ -1294,6 +1334,16 @@ static struct vm_struct *__get_vm_area_n
71250 + struct vm_struct *area;
71251 +
71252 + BUG_ON(in_interrupt());
71253 ++
71254 ++#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
71255 ++ if (flags & VM_KERNEXEC) {
71256 ++ if (start != VMALLOC_START || end != VMALLOC_END)
71257 ++ return NULL;
71258 ++ start = (unsigned long)MODULES_EXEC_VADDR;
71259 ++ end = (unsigned long)MODULES_EXEC_END;
71260 ++ }
71261 ++#endif
71262 ++
71263 + if (flags & VM_IOREMAP) {
71264 + int bit = fls(size);
71265 +
71266 +@@ -1526,6 +1576,11 @@ void *vmap(struct page **pages, unsigned
71267 + if (count > totalram_pages)
71268 + return NULL;
71269 +
71270 ++#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
71271 ++ if (!(pgprot_val(prot) & _PAGE_NX))
71272 ++ flags |= VM_KERNEXEC;
71273 ++#endif
71274 ++
71275 + area = get_vm_area_caller((count << PAGE_SHIFT), flags,
71276 + __builtin_return_address(0));
71277 + if (!area)
71278 +@@ -1627,6 +1682,13 @@ void *__vmalloc_node_range(unsigned long
71279 + if (!size || (size >> PAGE_SHIFT) > totalram_pages)
71280 + return NULL;
71281 +
71282 ++#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
71283 ++ if (!(pgprot_val(prot) & _PAGE_NX))
71284 ++ area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNLIST | VM_KERNEXEC,
71285 ++ VMALLOC_START, VMALLOC_END, node, gfp_mask, caller);
71286 ++ else
71287 ++#endif
71288 ++
71289 + area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNLIST,
71290 + start, end, node, gfp_mask, caller);
71291 +
71292 +@@ -1672,6 +1734,7 @@ static void *__vmalloc_node(unsigned lon
71293 + gfp_mask, prot, node, caller);
71294 + }
71295 +
71296 ++#undef __vmalloc
71297 + void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
71298 + {
71299 + return __vmalloc_node(size, 1, gfp_mask, prot, -1,
71300 +@@ -1695,6 +1758,7 @@ static inline void *__vmalloc_node_flags
71301 + * For tight control over page level allocator and protection flags
71302 + * use __vmalloc() instead.
71303 + */
71304 ++#undef vmalloc
71305 + void *vmalloc(unsigned long size)
71306 + {
71307 + return __vmalloc_node_flags(size, -1, GFP_KERNEL | __GFP_HIGHMEM);
71308 +@@ -1711,6 +1775,7 @@ EXPORT_SYMBOL(vmalloc);
71309 + * For tight control over page level allocator and protection flags
71310 + * use __vmalloc() instead.
71311 + */
71312 ++#undef vzalloc
71313 + void *vzalloc(unsigned long size)
71314 + {
71315 + return __vmalloc_node_flags(size, -1,
71316 +@@ -1725,6 +1790,7 @@ EXPORT_SYMBOL(vzalloc);
71317 + * The resulting memory area is zeroed so it can be mapped to userspace
71318 + * without leaking data.
71319 + */
71320 ++#undef vmalloc_user
71321 + void *vmalloc_user(unsigned long size)
71322 + {
71323 + struct vm_struct *area;
71324 +@@ -1752,6 +1818,7 @@ EXPORT_SYMBOL(vmalloc_user);
71325 + * For tight control over page level allocator and protection flags
71326 + * use __vmalloc() instead.
71327 + */
71328 ++#undef vmalloc_node
71329 + void *vmalloc_node(unsigned long size, int node)
71330 + {
71331 + return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL,
71332 +@@ -1771,6 +1838,7 @@ EXPORT_SYMBOL(vmalloc_node);
71333 + * For tight control over page level allocator and protection flags
71334 + * use __vmalloc_node() instead.
71335 + */
71336 ++#undef vzalloc_node
71337 + void *vzalloc_node(unsigned long size, int node)
71338 + {
71339 + return __vmalloc_node_flags(size, node,
71340 +@@ -1793,10 +1861,10 @@ EXPORT_SYMBOL(vzalloc_node);
71341 + * For tight control over page level allocator and protection flags
71342 + * use __vmalloc() instead.
71343 + */
71344 +-
71345 ++#undef vmalloc_exec
71346 + void *vmalloc_exec(unsigned long size)
71347 + {
71348 +- return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
71349 ++ return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL_EXEC,
71350 + -1, __builtin_return_address(0));
71351 + }
71352 +
71353 +@@ -1815,6 +1883,7 @@ void *vmalloc_exec(unsigned long size)
71354 + * Allocate enough 32bit PA addressable pages to cover @size from the
71355 + * page level allocator and map them into contiguous kernel virtual space.
71356 + */
71357 ++#undef vmalloc_32
71358 + void *vmalloc_32(unsigned long size)
71359 + {
71360 + return __vmalloc_node(size, 1, GFP_VMALLOC32, PAGE_KERNEL,
71361 +@@ -1829,6 +1898,7 @@ EXPORT_SYMBOL(vmalloc_32);
71362 + * The resulting memory area is 32bit addressable and zeroed so it can be
71363 + * mapped to userspace without leaking data.
71364 + */
71365 ++#undef vmalloc_32_user
71366 + void *vmalloc_32_user(unsigned long size)
71367 + {
71368 + struct vm_struct *area;
71369 +@@ -2091,6 +2161,8 @@ int remap_vmalloc_range(struct vm_area_s
71370 + unsigned long uaddr = vma->vm_start;
71371 + unsigned long usize = vma->vm_end - vma->vm_start;
71372 +
71373 ++ BUG_ON(vma->vm_mirror);
71374 ++
71375 + if ((PAGE_SIZE-1) & (unsigned long)addr)
71376 + return -EINVAL;
71377 +
71378 +diff -urNp linux-3.1.1/mm/vmstat.c linux-3.1.1/mm/vmstat.c
71379 +--- linux-3.1.1/mm/vmstat.c 2011-11-11 15:19:27.000000000 -0500
71380 ++++ linux-3.1.1/mm/vmstat.c 2011-11-16 18:40:44.000000000 -0500
71381 +@@ -78,7 +78,7 @@ void vm_events_fold_cpu(int cpu)
71382 + *
71383 + * vm_stat contains the global counters
71384 + */
71385 +-atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
71386 ++atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
71387 + EXPORT_SYMBOL(vm_stat);
71388 +
71389 + #ifdef CONFIG_SMP
71390 +@@ -454,7 +454,7 @@ void refresh_cpu_vm_stats(int cpu)
71391 + v = p->vm_stat_diff[i];
71392 + p->vm_stat_diff[i] = 0;
71393 + local_irq_restore(flags);
71394 +- atomic_long_add(v, &zone->vm_stat[i]);
71395 ++ atomic_long_add_unchecked(v, &zone->vm_stat[i]);
71396 + global_diff[i] += v;
71397 + #ifdef CONFIG_NUMA
71398 + /* 3 seconds idle till flush */
71399 +@@ -492,7 +492,7 @@ void refresh_cpu_vm_stats(int cpu)
71400 +
71401 + for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
71402 + if (global_diff[i])
71403 +- atomic_long_add(global_diff[i], &vm_stat[i]);
71404 ++ atomic_long_add_unchecked(global_diff[i], &vm_stat[i]);
71405 + }
71406 +
71407 + #endif
71408 +@@ -1207,10 +1207,20 @@ static int __init setup_vmstat(void)
71409 + start_cpu_timer(cpu);
71410 + #endif
71411 + #ifdef CONFIG_PROC_FS
71412 +- proc_create("buddyinfo", S_IRUGO, NULL, &fragmentation_file_operations);
71413 +- proc_create("pagetypeinfo", S_IRUGO, NULL, &pagetypeinfo_file_ops);
71414 +- proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations);
71415 +- proc_create("zoneinfo", S_IRUGO, NULL, &proc_zoneinfo_file_operations);
71416 ++ {
71417 ++ mode_t gr_mode = S_IRUGO;
71418 ++#ifdef CONFIG_GRKERNSEC_PROC_ADD
71419 ++ gr_mode = S_IRUSR;
71420 ++#endif
71421 ++ proc_create("buddyinfo", gr_mode, NULL, &fragmentation_file_operations);
71422 ++ proc_create("pagetypeinfo", gr_mode, NULL, &pagetypeinfo_file_ops);
71423 ++#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
71424 ++ proc_create("vmstat", gr_mode | S_IRGRP, NULL, &proc_vmstat_file_operations);
71425 ++#else
71426 ++ proc_create("vmstat", gr_mode, NULL, &proc_vmstat_file_operations);
71427 ++#endif
71428 ++ proc_create("zoneinfo", gr_mode, NULL, &proc_zoneinfo_file_operations);
71429 ++ }
71430 + #endif
71431 + return 0;
71432 + }
71433 +diff -urNp linux-3.1.1/net/8021q/vlan.c linux-3.1.1/net/8021q/vlan.c
71434 +--- linux-3.1.1/net/8021q/vlan.c 2011-11-11 15:19:27.000000000 -0500
71435 ++++ linux-3.1.1/net/8021q/vlan.c 2011-11-16 18:39:08.000000000 -0500
71436 +@@ -588,8 +588,7 @@ static int vlan_ioctl_handler(struct net
71437 + err = -EPERM;
71438 + if (!capable(CAP_NET_ADMIN))
71439 + break;
71440 +- if ((args.u.name_type >= 0) &&
71441 +- (args.u.name_type < VLAN_NAME_TYPE_HIGHEST)) {
71442 ++ if (args.u.name_type < VLAN_NAME_TYPE_HIGHEST) {
71443 + struct vlan_net *vn;
71444 +
71445 + vn = net_generic(net, vlan_net_id);
71446 +diff -urNp linux-3.1.1/net/9p/trans_fd.c linux-3.1.1/net/9p/trans_fd.c
71447 +--- linux-3.1.1/net/9p/trans_fd.c 2011-11-11 15:19:27.000000000 -0500
71448 ++++ linux-3.1.1/net/9p/trans_fd.c 2011-11-16 18:39:08.000000000 -0500
71449 +@@ -423,7 +423,7 @@ static int p9_fd_write(struct p9_client
71450 + oldfs = get_fs();
71451 + set_fs(get_ds());
71452 + /* The cast to a user pointer is valid due to the set_fs() */
71453 +- ret = vfs_write(ts->wr, (__force void __user *)v, len, &ts->wr->f_pos);
71454 ++ ret = vfs_write(ts->wr, (void __force_user *)v, len, &ts->wr->f_pos);
71455 + set_fs(oldfs);
71456 +
71457 + if (ret <= 0 && ret != -ERESTARTSYS && ret != -EAGAIN)
71458 +diff -urNp linux-3.1.1/net/9p/trans_virtio.c linux-3.1.1/net/9p/trans_virtio.c
71459 +--- linux-3.1.1/net/9p/trans_virtio.c 2011-11-11 15:19:27.000000000 -0500
71460 ++++ linux-3.1.1/net/9p/trans_virtio.c 2011-11-16 18:39:08.000000000 -0500
71461 +@@ -327,7 +327,7 @@ req_retry_pinned:
71462 + } else {
71463 + char *pbuf;
71464 + if (req->tc->pubuf)
71465 +- pbuf = (__force char *) req->tc->pubuf;
71466 ++ pbuf = (char __force_kernel *) req->tc->pubuf;
71467 + else
71468 + pbuf = req->tc->pkbuf;
71469 + outp = pack_sg_list(chan->sg, out, VIRTQUEUE_NUM, pbuf,
71470 +@@ -357,7 +357,7 @@ req_retry_pinned:
71471 + } else {
71472 + char *pbuf;
71473 + if (req->tc->pubuf)
71474 +- pbuf = (__force char *) req->tc->pubuf;
71475 ++ pbuf = (char __force_kernel *) req->tc->pubuf;
71476 + else
71477 + pbuf = req->tc->pkbuf;
71478 +
71479 +diff -urNp linux-3.1.1/net/atm/atm_misc.c linux-3.1.1/net/atm/atm_misc.c
71480 +--- linux-3.1.1/net/atm/atm_misc.c 2011-11-11 15:19:27.000000000 -0500
71481 ++++ linux-3.1.1/net/atm/atm_misc.c 2011-11-16 18:39:08.000000000 -0500
71482 +@@ -17,7 +17,7 @@ int atm_charge(struct atm_vcc *vcc, int
71483 + if (atomic_read(&sk_atm(vcc)->sk_rmem_alloc) <= sk_atm(vcc)->sk_rcvbuf)
71484 + return 1;
71485 + atm_return(vcc, truesize);
71486 +- atomic_inc(&vcc->stats->rx_drop);
71487 ++ atomic_inc_unchecked(&vcc->stats->rx_drop);
71488 + return 0;
71489 + }
71490 + EXPORT_SYMBOL(atm_charge);
71491 +@@ -39,7 +39,7 @@ struct sk_buff *atm_alloc_charge(struct
71492 + }
71493 + }
71494 + atm_return(vcc, guess);
71495 +- atomic_inc(&vcc->stats->rx_drop);
71496 ++ atomic_inc_unchecked(&vcc->stats->rx_drop);
71497 + return NULL;
71498 + }
71499 + EXPORT_SYMBOL(atm_alloc_charge);
71500 +@@ -86,7 +86,7 @@ EXPORT_SYMBOL(atm_pcr_goal);
71501 +
71502 + void sonet_copy_stats(struct k_sonet_stats *from, struct sonet_stats *to)
71503 + {
71504 +-#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
71505 ++#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
71506 + __SONET_ITEMS
71507 + #undef __HANDLE_ITEM
71508 + }
71509 +@@ -94,7 +94,7 @@ EXPORT_SYMBOL(sonet_copy_stats);
71510 +
71511 + void sonet_subtract_stats(struct k_sonet_stats *from, struct sonet_stats *to)
71512 + {
71513 +-#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
71514 ++#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i,&from->i)
71515 + __SONET_ITEMS
71516 + #undef __HANDLE_ITEM
71517 + }
71518 +diff -urNp linux-3.1.1/net/atm/lec.h linux-3.1.1/net/atm/lec.h
71519 +--- linux-3.1.1/net/atm/lec.h 2011-11-11 15:19:27.000000000 -0500
71520 ++++ linux-3.1.1/net/atm/lec.h 2011-11-16 18:39:08.000000000 -0500
71521 +@@ -48,7 +48,7 @@ struct lane2_ops {
71522 + const u8 *tlvs, u32 sizeoftlvs);
71523 + void (*associate_indicator) (struct net_device *dev, const u8 *mac_addr,
71524 + const u8 *tlvs, u32 sizeoftlvs);
71525 +-};
71526 ++} __no_const;
71527 +
71528 + /*
71529 + * ATM LAN Emulation supports both LLC & Dix Ethernet EtherType
71530 +diff -urNp linux-3.1.1/net/atm/mpc.h linux-3.1.1/net/atm/mpc.h
71531 +--- linux-3.1.1/net/atm/mpc.h 2011-11-11 15:19:27.000000000 -0500
71532 ++++ linux-3.1.1/net/atm/mpc.h 2011-11-16 18:39:08.000000000 -0500
71533 +@@ -33,7 +33,7 @@ struct mpoa_client {
71534 + struct mpc_parameters parameters; /* parameters for this client */
71535 +
71536 + const struct net_device_ops *old_ops;
71537 +- struct net_device_ops new_ops;
71538 ++ net_device_ops_no_const new_ops;
71539 + };
71540 +
71541 +
71542 +diff -urNp linux-3.1.1/net/atm/mpoa_caches.c linux-3.1.1/net/atm/mpoa_caches.c
71543 +--- linux-3.1.1/net/atm/mpoa_caches.c 2011-11-11 15:19:27.000000000 -0500
71544 ++++ linux-3.1.1/net/atm/mpoa_caches.c 2011-11-16 18:40:44.000000000 -0500
71545 +@@ -255,6 +255,8 @@ static void check_resolving_entries(stru
71546 + struct timeval now;
71547 + struct k_message msg;
71548 +
71549 ++ pax_track_stack();
71550 ++
71551 + do_gettimeofday(&now);
71552 +
71553 + read_lock_bh(&client->ingress_lock);
71554 +diff -urNp linux-3.1.1/net/atm/proc.c linux-3.1.1/net/atm/proc.c
71555 +--- linux-3.1.1/net/atm/proc.c 2011-11-11 15:19:27.000000000 -0500
71556 ++++ linux-3.1.1/net/atm/proc.c 2011-11-16 18:39:08.000000000 -0500
71557 +@@ -45,9 +45,9 @@ static void add_stats(struct seq_file *s
71558 + const struct k_atm_aal_stats *stats)
71559 + {
71560 + seq_printf(seq, "%s ( %d %d %d %d %d )", aal,
71561 +- atomic_read(&stats->tx), atomic_read(&stats->tx_err),
71562 +- atomic_read(&stats->rx), atomic_read(&stats->rx_err),
71563 +- atomic_read(&stats->rx_drop));
71564 ++ atomic_read_unchecked(&stats->tx),atomic_read_unchecked(&stats->tx_err),
71565 ++ atomic_read_unchecked(&stats->rx),atomic_read_unchecked(&stats->rx_err),
71566 ++ atomic_read_unchecked(&stats->rx_drop));
71567 + }
71568 +
71569 + static void atm_dev_info(struct seq_file *seq, const struct atm_dev *dev)
71570 +diff -urNp linux-3.1.1/net/atm/resources.c linux-3.1.1/net/atm/resources.c
71571 +--- linux-3.1.1/net/atm/resources.c 2011-11-11 15:19:27.000000000 -0500
71572 ++++ linux-3.1.1/net/atm/resources.c 2011-11-16 18:39:08.000000000 -0500
71573 +@@ -160,7 +160,7 @@ EXPORT_SYMBOL(atm_dev_deregister);
71574 + static void copy_aal_stats(struct k_atm_aal_stats *from,
71575 + struct atm_aal_stats *to)
71576 + {
71577 +-#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
71578 ++#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
71579 + __AAL_STAT_ITEMS
71580 + #undef __HANDLE_ITEM
71581 + }
71582 +@@ -168,7 +168,7 @@ static void copy_aal_stats(struct k_atm_
71583 + static void subtract_aal_stats(struct k_atm_aal_stats *from,
71584 + struct atm_aal_stats *to)
71585 + {
71586 +-#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
71587 ++#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i, &from->i)
71588 + __AAL_STAT_ITEMS
71589 + #undef __HANDLE_ITEM
71590 + }
71591 +diff -urNp linux-3.1.1/net/batman-adv/hard-interface.c linux-3.1.1/net/batman-adv/hard-interface.c
71592 +--- linux-3.1.1/net/batman-adv/hard-interface.c 2011-11-11 15:19:27.000000000 -0500
71593 ++++ linux-3.1.1/net/batman-adv/hard-interface.c 2011-11-16 18:39:08.000000000 -0500
71594 +@@ -347,8 +347,8 @@ int hardif_enable_interface(struct hard_
71595 + hard_iface->batman_adv_ptype.dev = hard_iface->net_dev;
71596 + dev_add_pack(&hard_iface->batman_adv_ptype);
71597 +
71598 +- atomic_set(&hard_iface->seqno, 1);
71599 +- atomic_set(&hard_iface->frag_seqno, 1);
71600 ++ atomic_set_unchecked(&hard_iface->seqno, 1);
71601 ++ atomic_set_unchecked(&hard_iface->frag_seqno, 1);
71602 + bat_info(hard_iface->soft_iface, "Adding interface: %s\n",
71603 + hard_iface->net_dev->name);
71604 +
71605 +diff -urNp linux-3.1.1/net/batman-adv/routing.c linux-3.1.1/net/batman-adv/routing.c
71606 +--- linux-3.1.1/net/batman-adv/routing.c 2011-11-11 15:19:27.000000000 -0500
71607 ++++ linux-3.1.1/net/batman-adv/routing.c 2011-11-16 18:39:08.000000000 -0500
71608 +@@ -656,7 +656,7 @@ void receive_bat_packet(const struct eth
71609 + return;
71610 +
71611 + /* could be changed by schedule_own_packet() */
71612 +- if_incoming_seqno = atomic_read(&if_incoming->seqno);
71613 ++ if_incoming_seqno = atomic_read_unchecked(&if_incoming->seqno);
71614 +
71615 + has_directlink_flag = (batman_packet->flags & DIRECTLINK ? 1 : 0);
71616 +
71617 +diff -urNp linux-3.1.1/net/batman-adv/send.c linux-3.1.1/net/batman-adv/send.c
71618 +--- linux-3.1.1/net/batman-adv/send.c 2011-11-11 15:19:27.000000000 -0500
71619 ++++ linux-3.1.1/net/batman-adv/send.c 2011-11-16 18:39:08.000000000 -0500
71620 +@@ -326,7 +326,7 @@ void schedule_own_packet(struct hard_ifa
71621 +
71622 + /* change sequence number to network order */
71623 + batman_packet->seqno =
71624 +- htonl((uint32_t)atomic_read(&hard_iface->seqno));
71625 ++ htonl((uint32_t)atomic_read_unchecked(&hard_iface->seqno));
71626 +
71627 + batman_packet->ttvn = atomic_read(&bat_priv->ttvn);
71628 + batman_packet->tt_crc = htons((uint16_t)atomic_read(&bat_priv->tt_crc));
71629 +@@ -343,7 +343,7 @@ void schedule_own_packet(struct hard_ifa
71630 + else
71631 + batman_packet->gw_flags = NO_FLAGS;
71632 +
71633 +- atomic_inc(&hard_iface->seqno);
71634 ++ atomic_inc_unchecked(&hard_iface->seqno);
71635 +
71636 + slide_own_bcast_window(hard_iface);
71637 + send_time = own_send_time(bat_priv);
71638 +diff -urNp linux-3.1.1/net/batman-adv/soft-interface.c linux-3.1.1/net/batman-adv/soft-interface.c
71639 +--- linux-3.1.1/net/batman-adv/soft-interface.c 2011-11-11 15:19:27.000000000 -0500
71640 ++++ linux-3.1.1/net/batman-adv/soft-interface.c 2011-11-16 18:39:08.000000000 -0500
71641 +@@ -632,7 +632,7 @@ static int interface_tx(struct sk_buff *
71642 +
71643 + /* set broadcast sequence number */
71644 + bcast_packet->seqno =
71645 +- htonl(atomic_inc_return(&bat_priv->bcast_seqno));
71646 ++ htonl(atomic_inc_return_unchecked(&bat_priv->bcast_seqno));
71647 +
71648 + add_bcast_packet_to_list(bat_priv, skb, 1);
71649 +
71650 +@@ -824,7 +824,7 @@ struct net_device *softif_create(const c
71651 + atomic_set(&bat_priv->batman_queue_left, BATMAN_QUEUE_LEN);
71652 +
71653 + atomic_set(&bat_priv->mesh_state, MESH_INACTIVE);
71654 +- atomic_set(&bat_priv->bcast_seqno, 1);
71655 ++ atomic_set_unchecked(&bat_priv->bcast_seqno, 1);
71656 + atomic_set(&bat_priv->ttvn, 0);
71657 + atomic_set(&bat_priv->tt_local_changes, 0);
71658 + atomic_set(&bat_priv->tt_ogm_append_cnt, 0);
71659 +diff -urNp linux-3.1.1/net/batman-adv/types.h linux-3.1.1/net/batman-adv/types.h
71660 +--- linux-3.1.1/net/batman-adv/types.h 2011-11-11 15:19:27.000000000 -0500
71661 ++++ linux-3.1.1/net/batman-adv/types.h 2011-11-16 18:39:08.000000000 -0500
71662 +@@ -38,8 +38,8 @@ struct hard_iface {
71663 + int16_t if_num;
71664 + char if_status;
71665 + struct net_device *net_dev;
71666 +- atomic_t seqno;
71667 +- atomic_t frag_seqno;
71668 ++ atomic_unchecked_t seqno;
71669 ++ atomic_unchecked_t frag_seqno;
71670 + unsigned char *packet_buff;
71671 + int packet_len;
71672 + struct kobject *hardif_obj;
71673 +@@ -153,7 +153,7 @@ struct bat_priv {
71674 + atomic_t orig_interval; /* uint */
71675 + atomic_t hop_penalty; /* uint */
71676 + atomic_t log_level; /* uint */
71677 +- atomic_t bcast_seqno;
71678 ++ atomic_unchecked_t bcast_seqno;
71679 + atomic_t bcast_queue_left;
71680 + atomic_t batman_queue_left;
71681 + atomic_t ttvn; /* tranlation table version number */
71682 +diff -urNp linux-3.1.1/net/batman-adv/unicast.c linux-3.1.1/net/batman-adv/unicast.c
71683 +--- linux-3.1.1/net/batman-adv/unicast.c 2011-11-11 15:19:27.000000000 -0500
71684 ++++ linux-3.1.1/net/batman-adv/unicast.c 2011-11-16 18:39:08.000000000 -0500
71685 +@@ -264,7 +264,7 @@ int frag_send_skb(struct sk_buff *skb, s
71686 + frag1->flags = UNI_FRAG_HEAD | large_tail;
71687 + frag2->flags = large_tail;
71688 +
71689 +- seqno = atomic_add_return(2, &hard_iface->frag_seqno);
71690 ++ seqno = atomic_add_return_unchecked(2, &hard_iface->frag_seqno);
71691 + frag1->seqno = htons(seqno - 1);
71692 + frag2->seqno = htons(seqno);
71693 +
71694 +diff -urNp linux-3.1.1/net/bluetooth/hci_conn.c linux-3.1.1/net/bluetooth/hci_conn.c
71695 +--- linux-3.1.1/net/bluetooth/hci_conn.c 2011-11-11 15:19:27.000000000 -0500
71696 ++++ linux-3.1.1/net/bluetooth/hci_conn.c 2011-11-16 18:39:08.000000000 -0500
71697 +@@ -218,7 +218,7 @@ void hci_le_start_enc(struct hci_conn *c
71698 + cp.handle = cpu_to_le16(conn->handle);
71699 + memcpy(cp.ltk, ltk, sizeof(cp.ltk));
71700 + cp.ediv = ediv;
71701 +- memcpy(cp.rand, rand, sizeof(rand));
71702 ++ memcpy(cp.rand, rand, sizeof(cp.rand));
71703 +
71704 + hci_send_cmd(hdev, HCI_OP_LE_START_ENC, sizeof(cp), &cp);
71705 + }
71706 +@@ -234,7 +234,7 @@ void hci_le_ltk_reply(struct hci_conn *c
71707 + memset(&cp, 0, sizeof(cp));
71708 +
71709 + cp.handle = cpu_to_le16(conn->handle);
71710 +- memcpy(cp.ltk, ltk, sizeof(ltk));
71711 ++ memcpy(cp.ltk, ltk, sizeof(cp.ltk));
71712 +
71713 + hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
71714 + }
71715 +diff -urNp linux-3.1.1/net/bridge/br_multicast.c linux-3.1.1/net/bridge/br_multicast.c
71716 +--- linux-3.1.1/net/bridge/br_multicast.c 2011-11-11 15:19:27.000000000 -0500
71717 ++++ linux-3.1.1/net/bridge/br_multicast.c 2011-11-16 18:39:08.000000000 -0500
71718 +@@ -1485,7 +1485,7 @@ static int br_multicast_ipv6_rcv(struct
71719 + nexthdr = ip6h->nexthdr;
71720 + offset = ipv6_skip_exthdr(skb, sizeof(*ip6h), &nexthdr);
71721 +
71722 +- if (offset < 0 || nexthdr != IPPROTO_ICMPV6)
71723 ++ if (nexthdr != IPPROTO_ICMPV6)
71724 + return 0;
71725 +
71726 + /* Okay, we found ICMPv6 header */
71727 +diff -urNp linux-3.1.1/net/bridge/netfilter/ebtables.c linux-3.1.1/net/bridge/netfilter/ebtables.c
71728 +--- linux-3.1.1/net/bridge/netfilter/ebtables.c 2011-11-11 15:19:27.000000000 -0500
71729 ++++ linux-3.1.1/net/bridge/netfilter/ebtables.c 2011-11-16 18:40:44.000000000 -0500
71730 +@@ -1513,7 +1513,7 @@ static int do_ebt_get_ctl(struct sock *s
71731 + tmp.valid_hooks = t->table->valid_hooks;
71732 + }
71733 + mutex_unlock(&ebt_mutex);
71734 +- if (copy_to_user(user, &tmp, *len) != 0){
71735 ++ if (*len > sizeof(tmp) || copy_to_user(user, &tmp, *len) != 0){
71736 + BUGPRINT("c2u Didn't work\n");
71737 + ret = -EFAULT;
71738 + break;
71739 +@@ -1781,6 +1781,8 @@ static int compat_copy_everything_to_use
71740 + int ret;
71741 + void __user *pos;
71742 +
71743 ++ pax_track_stack();
71744 ++
71745 + memset(&tinfo, 0, sizeof(tinfo));
71746 +
71747 + if (cmd == EBT_SO_GET_ENTRIES) {
71748 +diff -urNp linux-3.1.1/net/caif/caif_socket.c linux-3.1.1/net/caif/caif_socket.c
71749 +--- linux-3.1.1/net/caif/caif_socket.c 2011-11-11 15:19:27.000000000 -0500
71750 ++++ linux-3.1.1/net/caif/caif_socket.c 2011-11-16 18:39:08.000000000 -0500
71751 +@@ -48,19 +48,20 @@ static struct dentry *debugfsdir;
71752 + #ifdef CONFIG_DEBUG_FS
71753 + struct debug_fs_counter {
71754 + atomic_t caif_nr_socks;
71755 +- atomic_t caif_sock_create;
71756 +- atomic_t num_connect_req;
71757 +- atomic_t num_connect_resp;
71758 +- atomic_t num_connect_fail_resp;
71759 +- atomic_t num_disconnect;
71760 +- atomic_t num_remote_shutdown_ind;
71761 +- atomic_t num_tx_flow_off_ind;
71762 +- atomic_t num_tx_flow_on_ind;
71763 +- atomic_t num_rx_flow_off;
71764 +- atomic_t num_rx_flow_on;
71765 ++ atomic_unchecked_t caif_sock_create;
71766 ++ atomic_unchecked_t num_connect_req;
71767 ++ atomic_unchecked_t num_connect_resp;
71768 ++ atomic_unchecked_t num_connect_fail_resp;
71769 ++ atomic_unchecked_t num_disconnect;
71770 ++ atomic_unchecked_t num_remote_shutdown_ind;
71771 ++ atomic_unchecked_t num_tx_flow_off_ind;
71772 ++ atomic_unchecked_t num_tx_flow_on_ind;
71773 ++ atomic_unchecked_t num_rx_flow_off;
71774 ++ atomic_unchecked_t num_rx_flow_on;
71775 + };
71776 + static struct debug_fs_counter cnt;
71777 + #define dbfs_atomic_inc(v) atomic_inc_return(v)
71778 ++#define dbfs_atomic_inc_unchecked(v) atomic_inc_return_unchecked(v)
71779 + #define dbfs_atomic_dec(v) atomic_dec_return(v)
71780 + #else
71781 + #define dbfs_atomic_inc(v) 0
71782 +@@ -161,7 +162,7 @@ static int caif_queue_rcv_skb(struct soc
71783 + atomic_read(&cf_sk->sk.sk_rmem_alloc),
71784 + sk_rcvbuf_lowwater(cf_sk));
71785 + set_rx_flow_off(cf_sk);
71786 +- dbfs_atomic_inc(&cnt.num_rx_flow_off);
71787 ++ dbfs_atomic_inc_unchecked(&cnt.num_rx_flow_off);
71788 + caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_OFF_REQ);
71789 + }
71790 +
71791 +@@ -172,7 +173,7 @@ static int caif_queue_rcv_skb(struct soc
71792 + set_rx_flow_off(cf_sk);
71793 + if (net_ratelimit())
71794 + pr_debug("sending flow OFF due to rmem_schedule\n");
71795 +- dbfs_atomic_inc(&cnt.num_rx_flow_off);
71796 ++ dbfs_atomic_inc_unchecked(&cnt.num_rx_flow_off);
71797 + caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_OFF_REQ);
71798 + }
71799 + skb->dev = NULL;
71800 +@@ -233,14 +234,14 @@ static void caif_ctrl_cb(struct cflayer
71801 + switch (flow) {
71802 + case CAIF_CTRLCMD_FLOW_ON_IND:
71803 + /* OK from modem to start sending again */
71804 +- dbfs_atomic_inc(&cnt.num_tx_flow_on_ind);
71805 ++ dbfs_atomic_inc_unchecked(&cnt.num_tx_flow_on_ind);
71806 + set_tx_flow_on(cf_sk);
71807 + cf_sk->sk.sk_state_change(&cf_sk->sk);
71808 + break;
71809 +
71810 + case CAIF_CTRLCMD_FLOW_OFF_IND:
71811 + /* Modem asks us to shut up */
71812 +- dbfs_atomic_inc(&cnt.num_tx_flow_off_ind);
71813 ++ dbfs_atomic_inc_unchecked(&cnt.num_tx_flow_off_ind);
71814 + set_tx_flow_off(cf_sk);
71815 + cf_sk->sk.sk_state_change(&cf_sk->sk);
71816 + break;
71817 +@@ -249,7 +250,7 @@ static void caif_ctrl_cb(struct cflayer
71818 + /* We're now connected */
71819 + caif_client_register_refcnt(&cf_sk->layer,
71820 + cfsk_hold, cfsk_put);
71821 +- dbfs_atomic_inc(&cnt.num_connect_resp);
71822 ++ dbfs_atomic_inc_unchecked(&cnt.num_connect_resp);
71823 + cf_sk->sk.sk_state = CAIF_CONNECTED;
71824 + set_tx_flow_on(cf_sk);
71825 + cf_sk->sk.sk_state_change(&cf_sk->sk);
71826 +@@ -263,7 +264,7 @@ static void caif_ctrl_cb(struct cflayer
71827 +
71828 + case CAIF_CTRLCMD_INIT_FAIL_RSP:
71829 + /* Connect request failed */
71830 +- dbfs_atomic_inc(&cnt.num_connect_fail_resp);
71831 ++ dbfs_atomic_inc_unchecked(&cnt.num_connect_fail_resp);
71832 + cf_sk->sk.sk_err = ECONNREFUSED;
71833 + cf_sk->sk.sk_state = CAIF_DISCONNECTED;
71834 + cf_sk->sk.sk_shutdown = SHUTDOWN_MASK;
71835 +@@ -277,7 +278,7 @@ static void caif_ctrl_cb(struct cflayer
71836 +
71837 + case CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND:
71838 + /* Modem has closed this connection, or device is down. */
71839 +- dbfs_atomic_inc(&cnt.num_remote_shutdown_ind);
71840 ++ dbfs_atomic_inc_unchecked(&cnt.num_remote_shutdown_ind);
71841 + cf_sk->sk.sk_shutdown = SHUTDOWN_MASK;
71842 + cf_sk->sk.sk_err = ECONNRESET;
71843 + set_rx_flow_on(cf_sk);
71844 +@@ -297,7 +298,7 @@ static void caif_check_flow_release(stru
71845 + return;
71846 +
71847 + if (atomic_read(&sk->sk_rmem_alloc) <= sk_rcvbuf_lowwater(cf_sk)) {
71848 +- dbfs_atomic_inc(&cnt.num_rx_flow_on);
71849 ++ dbfs_atomic_inc_unchecked(&cnt.num_rx_flow_on);
71850 + set_rx_flow_on(cf_sk);
71851 + caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_ON_REQ);
71852 + }
71853 +@@ -854,7 +855,7 @@ static int caif_connect(struct socket *s
71854 + /*ifindex = id of the interface.*/
71855 + cf_sk->conn_req.ifindex = cf_sk->sk.sk_bound_dev_if;
71856 +
71857 +- dbfs_atomic_inc(&cnt.num_connect_req);
71858 ++ dbfs_atomic_inc_unchecked(&cnt.num_connect_req);
71859 + cf_sk->layer.receive = caif_sktrecv_cb;
71860 +
71861 + err = caif_connect_client(sock_net(sk), &cf_sk->conn_req,
71862 +@@ -943,7 +944,7 @@ static int caif_release(struct socket *s
71863 + spin_unlock_bh(&sk->sk_receive_queue.lock);
71864 + sock->sk = NULL;
71865 +
71866 +- dbfs_atomic_inc(&cnt.num_disconnect);
71867 ++ dbfs_atomic_inc_unchecked(&cnt.num_disconnect);
71868 +
71869 + WARN_ON(IS_ERR(cf_sk->debugfs_socket_dir));
71870 + if (cf_sk->debugfs_socket_dir != NULL)
71871 +@@ -1122,7 +1123,7 @@ static int caif_create(struct net *net,
71872 + cf_sk->conn_req.protocol = protocol;
71873 + /* Increase the number of sockets created. */
71874 + dbfs_atomic_inc(&cnt.caif_nr_socks);
71875 +- num = dbfs_atomic_inc(&cnt.caif_sock_create);
71876 ++ num = dbfs_atomic_inc_unchecked(&cnt.caif_sock_create);
71877 + #ifdef CONFIG_DEBUG_FS
71878 + if (!IS_ERR(debugfsdir)) {
71879 +
71880 +diff -urNp linux-3.1.1/net/caif/cfctrl.c linux-3.1.1/net/caif/cfctrl.c
71881 +--- linux-3.1.1/net/caif/cfctrl.c 2011-11-11 15:19:27.000000000 -0500
71882 ++++ linux-3.1.1/net/caif/cfctrl.c 2011-11-16 18:40:44.000000000 -0500
71883 +@@ -9,6 +9,7 @@
71884 + #include <linux/stddef.h>
71885 + #include <linux/spinlock.h>
71886 + #include <linux/slab.h>
71887 ++#include <linux/sched.h>
71888 + #include <net/caif/caif_layer.h>
71889 + #include <net/caif/cfpkt.h>
71890 + #include <net/caif/cfctrl.h>
71891 +@@ -45,8 +46,8 @@ struct cflayer *cfctrl_create(void)
71892 + dev_info.id = 0xff;
71893 + memset(this, 0, sizeof(*this));
71894 + cfsrvl_init(&this->serv, 0, &dev_info, false);
71895 +- atomic_set(&this->req_seq_no, 1);
71896 +- atomic_set(&this->rsp_seq_no, 1);
71897 ++ atomic_set_unchecked(&this->req_seq_no, 1);
71898 ++ atomic_set_unchecked(&this->rsp_seq_no, 1);
71899 + this->serv.layer.receive = cfctrl_recv;
71900 + sprintf(this->serv.layer.name, "ctrl");
71901 + this->serv.layer.ctrlcmd = cfctrl_ctrlcmd;
71902 +@@ -132,8 +133,8 @@ static void cfctrl_insert_req(struct cfc
71903 + struct cfctrl_request_info *req)
71904 + {
71905 + spin_lock_bh(&ctrl->info_list_lock);
71906 +- atomic_inc(&ctrl->req_seq_no);
71907 +- req->sequence_no = atomic_read(&ctrl->req_seq_no);
71908 ++ atomic_inc_unchecked(&ctrl->req_seq_no);
71909 ++ req->sequence_no = atomic_read_unchecked(&ctrl->req_seq_no);
71910 + list_add_tail(&req->list, &ctrl->list);
71911 + spin_unlock_bh(&ctrl->info_list_lock);
71912 + }
71913 +@@ -151,7 +152,7 @@ static struct cfctrl_request_info *cfctr
71914 + if (p != first)
71915 + pr_warn("Requests are not received in order\n");
71916 +
71917 +- atomic_set(&ctrl->rsp_seq_no,
71918 ++ atomic_set_unchecked(&ctrl->rsp_seq_no,
71919 + p->sequence_no);
71920 + list_del(&p->list);
71921 + goto out;
71922 +@@ -364,6 +365,7 @@ static int cfctrl_recv(struct cflayer *l
71923 + struct cfctrl *cfctrl = container_obj(layer);
71924 + struct cfctrl_request_info rsp, *req;
71925 +
71926 ++ pax_track_stack();
71927 +
71928 + cfpkt_extr_head(pkt, &cmdrsp, 1);
71929 + cmd = cmdrsp & CFCTRL_CMD_MASK;
71930 +diff -urNp linux-3.1.1/net/compat.c linux-3.1.1/net/compat.c
71931 +--- linux-3.1.1/net/compat.c 2011-11-11 15:19:27.000000000 -0500
71932 ++++ linux-3.1.1/net/compat.c 2011-11-16 18:39:08.000000000 -0500
71933 +@@ -70,9 +70,9 @@ int get_compat_msghdr(struct msghdr *kms
71934 + __get_user(kmsg->msg_controllen, &umsg->msg_controllen) ||
71935 + __get_user(kmsg->msg_flags, &umsg->msg_flags))
71936 + return -EFAULT;
71937 +- kmsg->msg_name = compat_ptr(tmp1);
71938 +- kmsg->msg_iov = compat_ptr(tmp2);
71939 +- kmsg->msg_control = compat_ptr(tmp3);
71940 ++ kmsg->msg_name = (void __force_kernel *)compat_ptr(tmp1);
71941 ++ kmsg->msg_iov = (void __force_kernel *)compat_ptr(tmp2);
71942 ++ kmsg->msg_control = (void __force_kernel *)compat_ptr(tmp3);
71943 + return 0;
71944 + }
71945 +
71946 +@@ -84,7 +84,7 @@ int verify_compat_iovec(struct msghdr *k
71947 +
71948 + if (kern_msg->msg_namelen) {
71949 + if (mode == VERIFY_READ) {
71950 +- int err = move_addr_to_kernel(kern_msg->msg_name,
71951 ++ int err = move_addr_to_kernel((void __force_user *)kern_msg->msg_name,
71952 + kern_msg->msg_namelen,
71953 + kern_address);
71954 + if (err < 0)
71955 +@@ -95,7 +95,7 @@ int verify_compat_iovec(struct msghdr *k
71956 + kern_msg->msg_name = NULL;
71957 +
71958 + tot_len = iov_from_user_compat_to_kern(kern_iov,
71959 +- (struct compat_iovec __user *)kern_msg->msg_iov,
71960 ++ (struct compat_iovec __force_user *)kern_msg->msg_iov,
71961 + kern_msg->msg_iovlen);
71962 + if (tot_len >= 0)
71963 + kern_msg->msg_iov = kern_iov;
71964 +@@ -115,20 +115,20 @@ int verify_compat_iovec(struct msghdr *k
71965 +
71966 + #define CMSG_COMPAT_FIRSTHDR(msg) \
71967 + (((msg)->msg_controllen) >= sizeof(struct compat_cmsghdr) ? \
71968 +- (struct compat_cmsghdr __user *)((msg)->msg_control) : \
71969 ++ (struct compat_cmsghdr __force_user *)((msg)->msg_control) : \
71970 + (struct compat_cmsghdr __user *)NULL)
71971 +
71972 + #define CMSG_COMPAT_OK(ucmlen, ucmsg, mhdr) \
71973 + ((ucmlen) >= sizeof(struct compat_cmsghdr) && \
71974 + (ucmlen) <= (unsigned long) \
71975 + ((mhdr)->msg_controllen - \
71976 +- ((char *)(ucmsg) - (char *)(mhdr)->msg_control)))
71977 ++ ((char __force_kernel *)(ucmsg) - (char *)(mhdr)->msg_control)))
71978 +
71979 + static inline struct compat_cmsghdr __user *cmsg_compat_nxthdr(struct msghdr *msg,
71980 + struct compat_cmsghdr __user *cmsg, int cmsg_len)
71981 + {
71982 + char __user *ptr = (char __user *)cmsg + CMSG_COMPAT_ALIGN(cmsg_len);
71983 +- if ((unsigned long)(ptr + 1 - (char __user *)msg->msg_control) >
71984 ++ if ((unsigned long)(ptr + 1 - (char __force_user *)msg->msg_control) >
71985 + msg->msg_controllen)
71986 + return NULL;
71987 + return (struct compat_cmsghdr __user *)ptr;
71988 +@@ -220,7 +220,7 @@ int put_cmsg_compat(struct msghdr *kmsg,
71989 + {
71990 + struct compat_timeval ctv;
71991 + struct compat_timespec cts[3];
71992 +- struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
71993 ++ struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
71994 + struct compat_cmsghdr cmhdr;
71995 + int cmlen;
71996 +
71997 +@@ -272,7 +272,7 @@ int put_cmsg_compat(struct msghdr *kmsg,
71998 +
71999 + void scm_detach_fds_compat(struct msghdr *kmsg, struct scm_cookie *scm)
72000 + {
72001 +- struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
72002 ++ struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
72003 + int fdmax = (kmsg->msg_controllen - sizeof(struct compat_cmsghdr)) / sizeof(int);
72004 + int fdnum = scm->fp->count;
72005 + struct file **fp = scm->fp->fp;
72006 +@@ -369,7 +369,7 @@ static int do_set_sock_timeout(struct so
72007 + return -EFAULT;
72008 + old_fs = get_fs();
72009 + set_fs(KERNEL_DS);
72010 +- err = sock_setsockopt(sock, level, optname, (char *)&ktime, sizeof(ktime));
72011 ++ err = sock_setsockopt(sock, level, optname, (char __force_user *)&ktime, sizeof(ktime));
72012 + set_fs(old_fs);
72013 +
72014 + return err;
72015 +@@ -430,7 +430,7 @@ static int do_get_sock_timeout(struct so
72016 + len = sizeof(ktime);
72017 + old_fs = get_fs();
72018 + set_fs(KERNEL_DS);
72019 +- err = sock_getsockopt(sock, level, optname, (char *) &ktime, &len);
72020 ++ err = sock_getsockopt(sock, level, optname, (char __force_user *) &ktime, (int __force_user *)&len);
72021 + set_fs(old_fs);
72022 +
72023 + if (!err) {
72024 +@@ -565,7 +565,7 @@ int compat_mc_setsockopt(struct sock *so
72025 + case MCAST_JOIN_GROUP:
72026 + case MCAST_LEAVE_GROUP:
72027 + {
72028 +- struct compat_group_req __user *gr32 = (void *)optval;
72029 ++ struct compat_group_req __user *gr32 = (void __user *)optval;
72030 + struct group_req __user *kgr =
72031 + compat_alloc_user_space(sizeof(struct group_req));
72032 + u32 interface;
72033 +@@ -586,7 +586,7 @@ int compat_mc_setsockopt(struct sock *so
72034 + case MCAST_BLOCK_SOURCE:
72035 + case MCAST_UNBLOCK_SOURCE:
72036 + {
72037 +- struct compat_group_source_req __user *gsr32 = (void *)optval;
72038 ++ struct compat_group_source_req __user *gsr32 = (void __user *)optval;
72039 + struct group_source_req __user *kgsr = compat_alloc_user_space(
72040 + sizeof(struct group_source_req));
72041 + u32 interface;
72042 +@@ -607,7 +607,7 @@ int compat_mc_setsockopt(struct sock *so
72043 + }
72044 + case MCAST_MSFILTER:
72045 + {
72046 +- struct compat_group_filter __user *gf32 = (void *)optval;
72047 ++ struct compat_group_filter __user *gf32 = (void __user *)optval;
72048 + struct group_filter __user *kgf;
72049 + u32 interface, fmode, numsrc;
72050 +
72051 +@@ -645,7 +645,7 @@ int compat_mc_getsockopt(struct sock *so
72052 + char __user *optval, int __user *optlen,
72053 + int (*getsockopt)(struct sock *, int, int, char __user *, int __user *))
72054 + {
72055 +- struct compat_group_filter __user *gf32 = (void *)optval;
72056 ++ struct compat_group_filter __user *gf32 = (void __user *)optval;
72057 + struct group_filter __user *kgf;
72058 + int __user *koptlen;
72059 + u32 interface, fmode, numsrc;
72060 +diff -urNp linux-3.1.1/net/core/datagram.c linux-3.1.1/net/core/datagram.c
72061 +--- linux-3.1.1/net/core/datagram.c 2011-11-11 15:19:27.000000000 -0500
72062 ++++ linux-3.1.1/net/core/datagram.c 2011-11-16 18:39:08.000000000 -0500
72063 +@@ -285,7 +285,7 @@ int skb_kill_datagram(struct sock *sk, s
72064 + }
72065 +
72066 + kfree_skb(skb);
72067 +- atomic_inc(&sk->sk_drops);
72068 ++ atomic_inc_unchecked(&sk->sk_drops);
72069 + sk_mem_reclaim_partial(sk);
72070 +
72071 + return err;
72072 +diff -urNp linux-3.1.1/net/core/dev.c linux-3.1.1/net/core/dev.c
72073 +--- linux-3.1.1/net/core/dev.c 2011-11-11 15:19:27.000000000 -0500
72074 ++++ linux-3.1.1/net/core/dev.c 2011-11-16 18:40:44.000000000 -0500
72075 +@@ -1135,10 +1135,14 @@ void dev_load(struct net *net, const cha
72076 + if (no_module && capable(CAP_NET_ADMIN))
72077 + no_module = request_module("netdev-%s", name);
72078 + if (no_module && capable(CAP_SYS_MODULE)) {
72079 ++#ifdef CONFIG_GRKERNSEC_MODHARDEN
72080 ++ ___request_module(true, "grsec_modharden_netdev", "%s", name);
72081 ++#else
72082 + if (!request_module("%s", name))
72083 + pr_err("Loading kernel module for a network device "
72084 + "with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%s "
72085 + "instead\n", name);
72086 ++#endif
72087 + }
72088 + }
72089 + EXPORT_SYMBOL(dev_load);
72090 +@@ -1977,7 +1981,7 @@ static int illegal_highdma(struct net_de
72091 +
72092 + struct dev_gso_cb {
72093 + void (*destructor)(struct sk_buff *skb);
72094 +-};
72095 ++} __no_const;
72096 +
72097 + #define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
72098 +
72099 +@@ -2930,7 +2934,7 @@ int netif_rx_ni(struct sk_buff *skb)
72100 + }
72101 + EXPORT_SYMBOL(netif_rx_ni);
72102 +
72103 +-static void net_tx_action(struct softirq_action *h)
72104 ++static void net_tx_action(void)
72105 + {
72106 + struct softnet_data *sd = &__get_cpu_var(softnet_data);
72107 +
72108 +@@ -3779,7 +3783,7 @@ void netif_napi_del(struct napi_struct *
72109 + }
72110 + EXPORT_SYMBOL(netif_napi_del);
72111 +
72112 +-static void net_rx_action(struct softirq_action *h)
72113 ++static void net_rx_action(void)
72114 + {
72115 + struct softnet_data *sd = &__get_cpu_var(softnet_data);
72116 + unsigned long time_limit = jiffies + 2;
72117 +diff -urNp linux-3.1.1/net/core/flow.c linux-3.1.1/net/core/flow.c
72118 +--- linux-3.1.1/net/core/flow.c 2011-11-11 15:19:27.000000000 -0500
72119 ++++ linux-3.1.1/net/core/flow.c 2011-11-16 18:39:08.000000000 -0500
72120 +@@ -61,7 +61,7 @@ struct flow_cache {
72121 + struct timer_list rnd_timer;
72122 + };
72123 +
72124 +-atomic_t flow_cache_genid = ATOMIC_INIT(0);
72125 ++atomic_unchecked_t flow_cache_genid = ATOMIC_INIT(0);
72126 + EXPORT_SYMBOL(flow_cache_genid);
72127 + static struct flow_cache flow_cache_global;
72128 + static struct kmem_cache *flow_cachep __read_mostly;
72129 +@@ -86,7 +86,7 @@ static void flow_cache_new_hashrnd(unsig
72130 +
72131 + static int flow_entry_valid(struct flow_cache_entry *fle)
72132 + {
72133 +- if (atomic_read(&flow_cache_genid) != fle->genid)
72134 ++ if (atomic_read_unchecked(&flow_cache_genid) != fle->genid)
72135 + return 0;
72136 + if (fle->object && !fle->object->ops->check(fle->object))
72137 + return 0;
72138 +@@ -259,7 +259,7 @@ flow_cache_lookup(struct net *net, const
72139 + hlist_add_head(&fle->u.hlist, &fcp->hash_table[hash]);
72140 + fcp->hash_count++;
72141 + }
72142 +- } else if (likely(fle->genid == atomic_read(&flow_cache_genid))) {
72143 ++ } else if (likely(fle->genid == atomic_read_unchecked(&flow_cache_genid))) {
72144 + flo = fle->object;
72145 + if (!flo)
72146 + goto ret_object;
72147 +@@ -280,7 +280,7 @@ nocache:
72148 + }
72149 + flo = resolver(net, key, family, dir, flo, ctx);
72150 + if (fle) {
72151 +- fle->genid = atomic_read(&flow_cache_genid);
72152 ++ fle->genid = atomic_read_unchecked(&flow_cache_genid);
72153 + if (!IS_ERR(flo))
72154 + fle->object = flo;
72155 + else
72156 +diff -urNp linux-3.1.1/net/core/iovec.c linux-3.1.1/net/core/iovec.c
72157 +--- linux-3.1.1/net/core/iovec.c 2011-11-11 15:19:27.000000000 -0500
72158 ++++ linux-3.1.1/net/core/iovec.c 2011-11-16 18:39:08.000000000 -0500
72159 +@@ -42,7 +42,7 @@ int verify_iovec(struct msghdr *m, struc
72160 + if (m->msg_namelen) {
72161 + if (mode == VERIFY_READ) {
72162 + void __user *namep;
72163 +- namep = (void __user __force *) m->msg_name;
72164 ++ namep = (void __force_user *) m->msg_name;
72165 + err = move_addr_to_kernel(namep, m->msg_namelen,
72166 + address);
72167 + if (err < 0)
72168 +@@ -54,7 +54,7 @@ int verify_iovec(struct msghdr *m, struc
72169 + }
72170 +
72171 + size = m->msg_iovlen * sizeof(struct iovec);
72172 +- if (copy_from_user(iov, (void __user __force *) m->msg_iov, size))
72173 ++ if (copy_from_user(iov, (void __force_user *) m->msg_iov, size))
72174 + return -EFAULT;
72175 +
72176 + m->msg_iov = iov;
72177 +diff -urNp linux-3.1.1/net/core/rtnetlink.c linux-3.1.1/net/core/rtnetlink.c
72178 +--- linux-3.1.1/net/core/rtnetlink.c 2011-11-11 15:19:27.000000000 -0500
72179 ++++ linux-3.1.1/net/core/rtnetlink.c 2011-11-16 18:39:08.000000000 -0500
72180 +@@ -57,7 +57,7 @@ struct rtnl_link {
72181 + rtnl_doit_func doit;
72182 + rtnl_dumpit_func dumpit;
72183 + rtnl_calcit_func calcit;
72184 +-};
72185 ++} __no_const;
72186 +
72187 + static DEFINE_MUTEX(rtnl_mutex);
72188 + static u16 min_ifinfo_dump_size;
72189 +diff -urNp linux-3.1.1/net/core/scm.c linux-3.1.1/net/core/scm.c
72190 +--- linux-3.1.1/net/core/scm.c 2011-11-11 15:19:27.000000000 -0500
72191 ++++ linux-3.1.1/net/core/scm.c 2011-11-16 18:39:08.000000000 -0500
72192 +@@ -218,7 +218,7 @@ EXPORT_SYMBOL(__scm_send);
72193 + int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
72194 + {
72195 + struct cmsghdr __user *cm
72196 +- = (__force struct cmsghdr __user *)msg->msg_control;
72197 ++ = (struct cmsghdr __force_user *)msg->msg_control;
72198 + struct cmsghdr cmhdr;
72199 + int cmlen = CMSG_LEN(len);
72200 + int err;
72201 +@@ -241,7 +241,7 @@ int put_cmsg(struct msghdr * msg, int le
72202 + err = -EFAULT;
72203 + if (copy_to_user(cm, &cmhdr, sizeof cmhdr))
72204 + goto out;
72205 +- if (copy_to_user(CMSG_DATA(cm), data, cmlen - sizeof(struct cmsghdr)))
72206 ++ if (copy_to_user((void __force_user *)CMSG_DATA((void __force_kernel *)cm), data, cmlen - sizeof(struct cmsghdr)))
72207 + goto out;
72208 + cmlen = CMSG_SPACE(len);
72209 + if (msg->msg_controllen < cmlen)
72210 +@@ -257,7 +257,7 @@ EXPORT_SYMBOL(put_cmsg);
72211 + void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
72212 + {
72213 + struct cmsghdr __user *cm
72214 +- = (__force struct cmsghdr __user*)msg->msg_control;
72215 ++ = (struct cmsghdr __force_user *)msg->msg_control;
72216 +
72217 + int fdmax = 0;
72218 + int fdnum = scm->fp->count;
72219 +@@ -277,7 +277,7 @@ void scm_detach_fds(struct msghdr *msg,
72220 + if (fdnum < fdmax)
72221 + fdmax = fdnum;
72222 +
72223 +- for (i=0, cmfptr=(__force int __user *)CMSG_DATA(cm); i<fdmax;
72224 ++ for (i=0, cmfptr=(int __force_user *)CMSG_DATA((void __force_kernel *)cm); i<fdmax;
72225 + i++, cmfptr++)
72226 + {
72227 + int new_fd;
72228 +diff -urNp linux-3.1.1/net/core/skbuff.c linux-3.1.1/net/core/skbuff.c
72229 +--- linux-3.1.1/net/core/skbuff.c 2011-11-11 15:19:27.000000000 -0500
72230 ++++ linux-3.1.1/net/core/skbuff.c 2011-11-16 18:40:44.000000000 -0500
72231 +@@ -1650,6 +1650,8 @@ int skb_splice_bits(struct sk_buff *skb,
72232 + struct sock *sk = skb->sk;
72233 + int ret = 0;
72234 +
72235 ++ pax_track_stack();
72236 ++
72237 + if (splice_grow_spd(pipe, &spd))
72238 + return -ENOMEM;
72239 +
72240 +diff -urNp linux-3.1.1/net/core/sock.c linux-3.1.1/net/core/sock.c
72241 +--- linux-3.1.1/net/core/sock.c 2011-11-11 15:19:27.000000000 -0500
72242 ++++ linux-3.1.1/net/core/sock.c 2011-11-16 18:40:44.000000000 -0500
72243 +@@ -293,7 +293,7 @@ int sock_queue_rcv_skb(struct sock *sk,
72244 + */
72245 + if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
72246 + (unsigned)sk->sk_rcvbuf) {
72247 +- atomic_inc(&sk->sk_drops);
72248 ++ atomic_inc_unchecked(&sk->sk_drops);
72249 + trace_sock_rcvqueue_full(sk, skb);
72250 + return -ENOMEM;
72251 + }
72252 +@@ -303,7 +303,7 @@ int sock_queue_rcv_skb(struct sock *sk,
72253 + return err;
72254 +
72255 + if (!sk_rmem_schedule(sk, skb->truesize)) {
72256 +- atomic_inc(&sk->sk_drops);
72257 ++ atomic_inc_unchecked(&sk->sk_drops);
72258 + return -ENOBUFS;
72259 + }
72260 +
72261 +@@ -323,7 +323,7 @@ int sock_queue_rcv_skb(struct sock *sk,
72262 + skb_dst_force(skb);
72263 +
72264 + spin_lock_irqsave(&list->lock, flags);
72265 +- skb->dropcount = atomic_read(&sk->sk_drops);
72266 ++ skb->dropcount = atomic_read_unchecked(&sk->sk_drops);
72267 + __skb_queue_tail(list, skb);
72268 + spin_unlock_irqrestore(&list->lock, flags);
72269 +
72270 +@@ -343,7 +343,7 @@ int sk_receive_skb(struct sock *sk, stru
72271 + skb->dev = NULL;
72272 +
72273 + if (sk_rcvqueues_full(sk, skb)) {
72274 +- atomic_inc(&sk->sk_drops);
72275 ++ atomic_inc_unchecked(&sk->sk_drops);
72276 + goto discard_and_relse;
72277 + }
72278 + if (nested)
72279 +@@ -361,7 +361,7 @@ int sk_receive_skb(struct sock *sk, stru
72280 + mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
72281 + } else if (sk_add_backlog(sk, skb)) {
72282 + bh_unlock_sock(sk);
72283 +- atomic_inc(&sk->sk_drops);
72284 ++ atomic_inc_unchecked(&sk->sk_drops);
72285 + goto discard_and_relse;
72286 + }
72287 +
72288 +@@ -924,7 +924,7 @@ int sock_getsockopt(struct socket *sock,
72289 + if (len > sizeof(peercred))
72290 + len = sizeof(peercred);
72291 + cred_to_ucred(sk->sk_peer_pid, sk->sk_peer_cred, &peercred);
72292 +- if (copy_to_user(optval, &peercred, len))
72293 ++ if (len > sizeof(peercred) || copy_to_user(optval, &peercred, len))
72294 + return -EFAULT;
72295 + goto lenout;
72296 + }
72297 +@@ -937,7 +937,7 @@ int sock_getsockopt(struct socket *sock,
72298 + return -ENOTCONN;
72299 + if (lv < len)
72300 + return -EINVAL;
72301 +- if (copy_to_user(optval, address, len))
72302 ++ if (len > sizeof(address) || copy_to_user(optval, address, len))
72303 + return -EFAULT;
72304 + goto lenout;
72305 + }
72306 +@@ -970,7 +970,7 @@ int sock_getsockopt(struct socket *sock,
72307 +
72308 + if (len > lv)
72309 + len = lv;
72310 +- if (copy_to_user(optval, &v, len))
72311 ++ if (len > sizeof(v) || copy_to_user(optval, &v, len))
72312 + return -EFAULT;
72313 + lenout:
72314 + if (put_user(len, optlen))
72315 +@@ -2029,7 +2029,7 @@ void sock_init_data(struct socket *sock,
72316 + */
72317 + smp_wmb();
72318 + atomic_set(&sk->sk_refcnt, 1);
72319 +- atomic_set(&sk->sk_drops, 0);
72320 ++ atomic_set_unchecked(&sk->sk_drops, 0);
72321 + }
72322 + EXPORT_SYMBOL(sock_init_data);
72323 +
72324 +diff -urNp linux-3.1.1/net/decnet/sysctl_net_decnet.c linux-3.1.1/net/decnet/sysctl_net_decnet.c
72325 +--- linux-3.1.1/net/decnet/sysctl_net_decnet.c 2011-11-11 15:19:27.000000000 -0500
72326 ++++ linux-3.1.1/net/decnet/sysctl_net_decnet.c 2011-11-16 18:39:08.000000000 -0500
72327 +@@ -174,7 +174,7 @@ static int dn_node_address_handler(ctl_t
72328 +
72329 + if (len > *lenp) len = *lenp;
72330 +
72331 +- if (copy_to_user(buffer, addr, len))
72332 ++ if (len > sizeof addr || copy_to_user(buffer, addr, len))
72333 + return -EFAULT;
72334 +
72335 + *lenp = len;
72336 +@@ -237,7 +237,7 @@ static int dn_def_dev_handler(ctl_table
72337 +
72338 + if (len > *lenp) len = *lenp;
72339 +
72340 +- if (copy_to_user(buffer, devname, len))
72341 ++ if (len > sizeof devname || copy_to_user(buffer, devname, len))
72342 + return -EFAULT;
72343 +
72344 + *lenp = len;
72345 +diff -urNp linux-3.1.1/net/econet/Kconfig linux-3.1.1/net/econet/Kconfig
72346 +--- linux-3.1.1/net/econet/Kconfig 2011-11-11 15:19:27.000000000 -0500
72347 ++++ linux-3.1.1/net/econet/Kconfig 2011-11-16 18:40:44.000000000 -0500
72348 +@@ -4,7 +4,7 @@
72349 +
72350 + config ECONET
72351 + tristate "Acorn Econet/AUN protocols (EXPERIMENTAL)"
72352 +- depends on EXPERIMENTAL && INET
72353 ++ depends on EXPERIMENTAL && INET && BROKEN
72354 + ---help---
72355 + Econet is a fairly old and slow networking protocol mainly used by
72356 + Acorn computers to access file and print servers. It uses native
72357 +diff -urNp linux-3.1.1/net/ipv4/fib_frontend.c linux-3.1.1/net/ipv4/fib_frontend.c
72358 +--- linux-3.1.1/net/ipv4/fib_frontend.c 2011-11-11 15:19:27.000000000 -0500
72359 ++++ linux-3.1.1/net/ipv4/fib_frontend.c 2011-11-16 18:39:08.000000000 -0500
72360 +@@ -970,12 +970,12 @@ static int fib_inetaddr_event(struct not
72361 + #ifdef CONFIG_IP_ROUTE_MULTIPATH
72362 + fib_sync_up(dev);
72363 + #endif
72364 +- atomic_inc(&net->ipv4.dev_addr_genid);
72365 ++ atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
72366 + rt_cache_flush(dev_net(dev), -1);
72367 + break;
72368 + case NETDEV_DOWN:
72369 + fib_del_ifaddr(ifa, NULL);
72370 +- atomic_inc(&net->ipv4.dev_addr_genid);
72371 ++ atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
72372 + if (ifa->ifa_dev->ifa_list == NULL) {
72373 + /* Last address was deleted from this interface.
72374 + * Disable IP.
72375 +@@ -1011,7 +1011,7 @@ static int fib_netdev_event(struct notif
72376 + #ifdef CONFIG_IP_ROUTE_MULTIPATH
72377 + fib_sync_up(dev);
72378 + #endif
72379 +- atomic_inc(&net->ipv4.dev_addr_genid);
72380 ++ atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
72381 + rt_cache_flush(dev_net(dev), -1);
72382 + break;
72383 + case NETDEV_DOWN:
72384 +diff -urNp linux-3.1.1/net/ipv4/fib_semantics.c linux-3.1.1/net/ipv4/fib_semantics.c
72385 +--- linux-3.1.1/net/ipv4/fib_semantics.c 2011-11-11 15:19:27.000000000 -0500
72386 ++++ linux-3.1.1/net/ipv4/fib_semantics.c 2011-11-16 18:39:08.000000000 -0500
72387 +@@ -699,7 +699,7 @@ __be32 fib_info_update_nh_saddr(struct n
72388 + nh->nh_saddr = inet_select_addr(nh->nh_dev,
72389 + nh->nh_gw,
72390 + nh->nh_parent->fib_scope);
72391 +- nh->nh_saddr_genid = atomic_read(&net->ipv4.dev_addr_genid);
72392 ++ nh->nh_saddr_genid = atomic_read_unchecked(&net->ipv4.dev_addr_genid);
72393 +
72394 + return nh->nh_saddr;
72395 + }
72396 +diff -urNp linux-3.1.1/net/ipv4/inet_diag.c linux-3.1.1/net/ipv4/inet_diag.c
72397 +--- linux-3.1.1/net/ipv4/inet_diag.c 2011-11-11 15:19:27.000000000 -0500
72398 ++++ linux-3.1.1/net/ipv4/inet_diag.c 2011-11-16 18:40:44.000000000 -0500
72399 +@@ -114,8 +114,14 @@ static int inet_csk_diag_fill(struct soc
72400 + r->idiag_retrans = 0;
72401 +
72402 + r->id.idiag_if = sk->sk_bound_dev_if;
72403 ++
72404 ++#ifdef CONFIG_GRKERNSEC_HIDESYM
72405 ++ r->id.idiag_cookie[0] = 0;
72406 ++ r->id.idiag_cookie[1] = 0;
72407 ++#else
72408 + r->id.idiag_cookie[0] = (u32)(unsigned long)sk;
72409 + r->id.idiag_cookie[1] = (u32)(((unsigned long)sk >> 31) >> 1);
72410 ++#endif
72411 +
72412 + r->id.idiag_sport = inet->inet_sport;
72413 + r->id.idiag_dport = inet->inet_dport;
72414 +@@ -201,8 +207,15 @@ static int inet_twsk_diag_fill(struct in
72415 + r->idiag_family = tw->tw_family;
72416 + r->idiag_retrans = 0;
72417 + r->id.idiag_if = tw->tw_bound_dev_if;
72418 ++
72419 ++#ifdef CONFIG_GRKERNSEC_HIDESYM
72420 ++ r->id.idiag_cookie[0] = 0;
72421 ++ r->id.idiag_cookie[1] = 0;
72422 ++#else
72423 + r->id.idiag_cookie[0] = (u32)(unsigned long)tw;
72424 + r->id.idiag_cookie[1] = (u32)(((unsigned long)tw >> 31) >> 1);
72425 ++#endif
72426 ++
72427 + r->id.idiag_sport = tw->tw_sport;
72428 + r->id.idiag_dport = tw->tw_dport;
72429 + r->id.idiag_src[0] = tw->tw_rcv_saddr;
72430 +@@ -285,12 +298,14 @@ static int inet_diag_get_exact(struct sk
72431 + if (sk == NULL)
72432 + goto unlock;
72433 +
72434 ++#ifndef CONFIG_GRKERNSEC_HIDESYM
72435 + err = -ESTALE;
72436 + if ((req->id.idiag_cookie[0] != INET_DIAG_NOCOOKIE ||
72437 + req->id.idiag_cookie[1] != INET_DIAG_NOCOOKIE) &&
72438 + ((u32)(unsigned long)sk != req->id.idiag_cookie[0] ||
72439 + (u32)((((unsigned long)sk) >> 31) >> 1) != req->id.idiag_cookie[1]))
72440 + goto out;
72441 ++#endif
72442 +
72443 + err = -ENOMEM;
72444 + rep = alloc_skb(NLMSG_SPACE((sizeof(struct inet_diag_msg) +
72445 +@@ -580,8 +595,14 @@ static int inet_diag_fill_req(struct sk_
72446 + r->idiag_retrans = req->retrans;
72447 +
72448 + r->id.idiag_if = sk->sk_bound_dev_if;
72449 ++
72450 ++#ifdef CONFIG_GRKERNSEC_HIDESYM
72451 ++ r->id.idiag_cookie[0] = 0;
72452 ++ r->id.idiag_cookie[1] = 0;
72453 ++#else
72454 + r->id.idiag_cookie[0] = (u32)(unsigned long)req;
72455 + r->id.idiag_cookie[1] = (u32)(((unsigned long)req >> 31) >> 1);
72456 ++#endif
72457 +
72458 + tmo = req->expires - jiffies;
72459 + if (tmo < 0)
72460 +diff -urNp linux-3.1.1/net/ipv4/inet_hashtables.c linux-3.1.1/net/ipv4/inet_hashtables.c
72461 +--- linux-3.1.1/net/ipv4/inet_hashtables.c 2011-11-11 15:19:27.000000000 -0500
72462 ++++ linux-3.1.1/net/ipv4/inet_hashtables.c 2011-11-16 18:40:44.000000000 -0500
72463 +@@ -18,12 +18,15 @@
72464 + #include <linux/sched.h>
72465 + #include <linux/slab.h>
72466 + #include <linux/wait.h>
72467 ++#include <linux/security.h>
72468 +
72469 + #include <net/inet_connection_sock.h>
72470 + #include <net/inet_hashtables.h>
72471 + #include <net/secure_seq.h>
72472 + #include <net/ip.h>
72473 +
72474 ++extern void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet);
72475 ++
72476 + /*
72477 + * Allocate and initialize a new local port bind bucket.
72478 + * The bindhash mutex for snum's hash chain must be held here.
72479 +@@ -530,6 +533,8 @@ ok:
72480 + twrefcnt += inet_twsk_bind_unhash(tw, hinfo);
72481 + spin_unlock(&head->lock);
72482 +
72483 ++ gr_update_task_in_ip_table(current, inet_sk(sk));
72484 ++
72485 + if (tw) {
72486 + inet_twsk_deschedule(tw, death_row);
72487 + while (twrefcnt) {
72488 +diff -urNp linux-3.1.1/net/ipv4/inetpeer.c linux-3.1.1/net/ipv4/inetpeer.c
72489 +--- linux-3.1.1/net/ipv4/inetpeer.c 2011-11-11 15:19:27.000000000 -0500
72490 ++++ linux-3.1.1/net/ipv4/inetpeer.c 2011-11-16 19:18:22.000000000 -0500
72491 +@@ -400,6 +400,8 @@ struct inet_peer *inet_getpeer(const str
72492 + unsigned int sequence;
72493 + int invalidated, gccnt = 0;
72494 +
72495 ++ pax_track_stack();
72496 ++
72497 + /* Attempt a lockless lookup first.
72498 + * Because of a concurrent writer, we might not find an existing entry.
72499 + */
72500 +@@ -436,8 +438,8 @@ relookup:
72501 + if (p) {
72502 + p->daddr = *daddr;
72503 + atomic_set(&p->refcnt, 1);
72504 +- atomic_set(&p->rid, 0);
72505 +- atomic_set(&p->ip_id_count,
72506 ++ atomic_set_unchecked(&p->rid, 0);
72507 ++ atomic_set_unchecked(&p->ip_id_count,
72508 + (daddr->family == AF_INET) ?
72509 + secure_ip_id(daddr->addr.a4) :
72510 + secure_ipv6_id(daddr->addr.a6));
72511 +diff -urNp linux-3.1.1/net/ipv4/ipconfig.c linux-3.1.1/net/ipv4/ipconfig.c
72512 +--- linux-3.1.1/net/ipv4/ipconfig.c 2011-11-11 15:19:27.000000000 -0500
72513 ++++ linux-3.1.1/net/ipv4/ipconfig.c 2011-11-16 18:39:08.000000000 -0500
72514 +@@ -313,7 +313,7 @@ static int __init ic_devinet_ioctl(unsig
72515 +
72516 + mm_segment_t oldfs = get_fs();
72517 + set_fs(get_ds());
72518 +- res = devinet_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
72519 ++ res = devinet_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
72520 + set_fs(oldfs);
72521 + return res;
72522 + }
72523 +@@ -324,7 +324,7 @@ static int __init ic_dev_ioctl(unsigned
72524 +
72525 + mm_segment_t oldfs = get_fs();
72526 + set_fs(get_ds());
72527 +- res = dev_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
72528 ++ res = dev_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
72529 + set_fs(oldfs);
72530 + return res;
72531 + }
72532 +@@ -335,7 +335,7 @@ static int __init ic_route_ioctl(unsigne
72533 +
72534 + mm_segment_t oldfs = get_fs();
72535 + set_fs(get_ds());
72536 +- res = ip_rt_ioctl(&init_net, cmd, (void __user *) arg);
72537 ++ res = ip_rt_ioctl(&init_net, cmd, (void __force_user *) arg);
72538 + set_fs(oldfs);
72539 + return res;
72540 + }
72541 +diff -urNp linux-3.1.1/net/ipv4/ip_fragment.c linux-3.1.1/net/ipv4/ip_fragment.c
72542 +--- linux-3.1.1/net/ipv4/ip_fragment.c 2011-11-11 15:19:27.000000000 -0500
72543 ++++ linux-3.1.1/net/ipv4/ip_fragment.c 2011-11-16 18:39:08.000000000 -0500
72544 +@@ -316,7 +316,7 @@ static inline int ip_frag_too_far(struct
72545 + return 0;
72546 +
72547 + start = qp->rid;
72548 +- end = atomic_inc_return(&peer->rid);
72549 ++ end = atomic_inc_return_unchecked(&peer->rid);
72550 + qp->rid = end;
72551 +
72552 + rc = qp->q.fragments && (end - start) > max;
72553 +diff -urNp linux-3.1.1/net/ipv4/ip_sockglue.c linux-3.1.1/net/ipv4/ip_sockglue.c
72554 +--- linux-3.1.1/net/ipv4/ip_sockglue.c 2011-11-11 15:19:27.000000000 -0500
72555 ++++ linux-3.1.1/net/ipv4/ip_sockglue.c 2011-11-16 18:40:44.000000000 -0500
72556 +@@ -1073,6 +1073,8 @@ static int do_ip_getsockopt(struct sock
72557 + int val;
72558 + int len;
72559 +
72560 ++ pax_track_stack();
72561 ++
72562 + if (level != SOL_IP)
72563 + return -EOPNOTSUPP;
72564 +
72565 +@@ -1110,7 +1112,8 @@ static int do_ip_getsockopt(struct sock
72566 + len = min_t(unsigned int, len, opt->optlen);
72567 + if (put_user(len, optlen))
72568 + return -EFAULT;
72569 +- if (copy_to_user(optval, opt->__data, len))
72570 ++ if ((len > (sizeof(optbuf) - sizeof(struct ip_options))) ||
72571 ++ copy_to_user(optval, opt->__data, len))
72572 + return -EFAULT;
72573 + return 0;
72574 + }
72575 +@@ -1238,7 +1241,7 @@ static int do_ip_getsockopt(struct sock
72576 + if (sk->sk_type != SOCK_STREAM)
72577 + return -ENOPROTOOPT;
72578 +
72579 +- msg.msg_control = optval;
72580 ++ msg.msg_control = (void __force_kernel *)optval;
72581 + msg.msg_controllen = len;
72582 + msg.msg_flags = flags;
72583 +
72584 +diff -urNp linux-3.1.1/net/ipv4/netfilter/nf_nat_snmp_basic.c linux-3.1.1/net/ipv4/netfilter/nf_nat_snmp_basic.c
72585 +--- linux-3.1.1/net/ipv4/netfilter/nf_nat_snmp_basic.c 2011-11-11 15:19:27.000000000 -0500
72586 ++++ linux-3.1.1/net/ipv4/netfilter/nf_nat_snmp_basic.c 2011-11-16 18:39:08.000000000 -0500
72587 +@@ -399,7 +399,7 @@ static unsigned char asn1_octets_decode(
72588 +
72589 + *len = 0;
72590 +
72591 +- *octets = kmalloc(eoc - ctx->pointer, GFP_ATOMIC);
72592 ++ *octets = kmalloc((eoc - ctx->pointer), GFP_ATOMIC);
72593 + if (*octets == NULL) {
72594 + if (net_ratelimit())
72595 + pr_notice("OOM in bsalg (%d)\n", __LINE__);
72596 +diff -urNp linux-3.1.1/net/ipv4/ping.c linux-3.1.1/net/ipv4/ping.c
72597 +--- linux-3.1.1/net/ipv4/ping.c 2011-11-11 15:19:27.000000000 -0500
72598 ++++ linux-3.1.1/net/ipv4/ping.c 2011-11-16 18:39:08.000000000 -0500
72599 +@@ -837,7 +837,7 @@ static void ping_format_sock(struct sock
72600 + sk_rmem_alloc_get(sp),
72601 + 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
72602 + atomic_read(&sp->sk_refcnt), sp,
72603 +- atomic_read(&sp->sk_drops), len);
72604 ++ atomic_read_unchecked(&sp->sk_drops), len);
72605 + }
72606 +
72607 + static int ping_seq_show(struct seq_file *seq, void *v)
72608 +diff -urNp linux-3.1.1/net/ipv4/raw.c linux-3.1.1/net/ipv4/raw.c
72609 +--- linux-3.1.1/net/ipv4/raw.c 2011-11-11 15:19:27.000000000 -0500
72610 ++++ linux-3.1.1/net/ipv4/raw.c 2011-11-17 18:58:40.000000000 -0500
72611 +@@ -302,7 +302,7 @@ static int raw_rcv_skb(struct sock * sk,
72612 + int raw_rcv(struct sock *sk, struct sk_buff *skb)
72613 + {
72614 + if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) {
72615 +- atomic_inc(&sk->sk_drops);
72616 ++ atomic_inc_unchecked(&sk->sk_drops);
72617 + kfree_skb(skb);
72618 + return NET_RX_DROP;
72619 + }
72620 +@@ -737,16 +737,20 @@ static int raw_init(struct sock *sk)
72621 +
72622 + static int raw_seticmpfilter(struct sock *sk, char __user *optval, int optlen)
72623 + {
72624 ++ struct icmp_filter filter;
72625 ++
72626 + if (optlen > sizeof(struct icmp_filter))
72627 + optlen = sizeof(struct icmp_filter);
72628 +- if (copy_from_user(&raw_sk(sk)->filter, optval, optlen))
72629 ++ if (copy_from_user(&filter, optval, optlen))
72630 + return -EFAULT;
72631 ++ raw_sk(sk)->filter = filter;
72632 + return 0;
72633 + }
72634 +
72635 + static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *optlen)
72636 + {
72637 + int len, ret = -EFAULT;
72638 ++ struct icmp_filter filter;
72639 +
72640 + if (get_user(len, optlen))
72641 + goto out;
72642 +@@ -756,8 +760,8 @@ static int raw_geticmpfilter(struct sock
72643 + if (len > sizeof(struct icmp_filter))
72644 + len = sizeof(struct icmp_filter);
72645 + ret = -EFAULT;
72646 +- if (put_user(len, optlen) ||
72647 +- copy_to_user(optval, &raw_sk(sk)->filter, len))
72648 ++ filter = raw_sk(sk)->filter;
72649 ++ if (put_user(len, optlen) || len > sizeof filter || copy_to_user(optval, &filter, len))
72650 + goto out;
72651 + ret = 0;
72652 + out: return ret;
72653 +@@ -985,7 +989,13 @@ static void raw_sock_seq_show(struct seq
72654 + sk_wmem_alloc_get(sp),
72655 + sk_rmem_alloc_get(sp),
72656 + 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
72657 +- atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
72658 ++ atomic_read(&sp->sk_refcnt),
72659 ++#ifdef CONFIG_GRKERNSEC_HIDESYM
72660 ++ NULL,
72661 ++#else
72662 ++ sp,
72663 ++#endif
72664 ++ atomic_read_unchecked(&sp->sk_drops));
72665 + }
72666 +
72667 + static int raw_seq_show(struct seq_file *seq, void *v)
72668 +diff -urNp linux-3.1.1/net/ipv4/route.c linux-3.1.1/net/ipv4/route.c
72669 +--- linux-3.1.1/net/ipv4/route.c 2011-11-11 15:19:27.000000000 -0500
72670 ++++ linux-3.1.1/net/ipv4/route.c 2011-11-16 18:39:08.000000000 -0500
72671 +@@ -308,7 +308,7 @@ static inline unsigned int rt_hash(__be3
72672 +
72673 + static inline int rt_genid(struct net *net)
72674 + {
72675 +- return atomic_read(&net->ipv4.rt_genid);
72676 ++ return atomic_read_unchecked(&net->ipv4.rt_genid);
72677 + }
72678 +
72679 + #ifdef CONFIG_PROC_FS
72680 +@@ -837,7 +837,7 @@ static void rt_cache_invalidate(struct n
72681 + unsigned char shuffle;
72682 +
72683 + get_random_bytes(&shuffle, sizeof(shuffle));
72684 +- atomic_add(shuffle + 1U, &net->ipv4.rt_genid);
72685 ++ atomic_add_unchecked(shuffle + 1U, &net->ipv4.rt_genid);
72686 + }
72687 +
72688 + /*
72689 +@@ -2872,7 +2872,7 @@ static int rt_fill_info(struct net *net,
72690 + error = rt->dst.error;
72691 + if (peer) {
72692 + inet_peer_refcheck(rt->peer);
72693 +- id = atomic_read(&peer->ip_id_count) & 0xffff;
72694 ++ id = atomic_read_unchecked(&peer->ip_id_count) & 0xffff;
72695 + if (peer->tcp_ts_stamp) {
72696 + ts = peer->tcp_ts;
72697 + tsage = get_seconds() - peer->tcp_ts_stamp;
72698 +diff -urNp linux-3.1.1/net/ipv4/tcp.c linux-3.1.1/net/ipv4/tcp.c
72699 +--- linux-3.1.1/net/ipv4/tcp.c 2011-11-11 15:19:27.000000000 -0500
72700 ++++ linux-3.1.1/net/ipv4/tcp.c 2011-11-16 18:40:44.000000000 -0500
72701 +@@ -2122,6 +2122,8 @@ static int do_tcp_setsockopt(struct sock
72702 + int val;
72703 + int err = 0;
72704 +
72705 ++ pax_track_stack();
72706 ++
72707 + /* These are data/string values, all the others are ints */
72708 + switch (optname) {
72709 + case TCP_CONGESTION: {
72710 +@@ -2501,6 +2503,8 @@ static int do_tcp_getsockopt(struct sock
72711 + struct tcp_sock *tp = tcp_sk(sk);
72712 + int val, len;
72713 +
72714 ++ pax_track_stack();
72715 ++
72716 + if (get_user(len, optlen))
72717 + return -EFAULT;
72718 +
72719 +diff -urNp linux-3.1.1/net/ipv4/tcp_ipv4.c linux-3.1.1/net/ipv4/tcp_ipv4.c
72720 +--- linux-3.1.1/net/ipv4/tcp_ipv4.c 2011-11-11 15:19:27.000000000 -0500
72721 ++++ linux-3.1.1/net/ipv4/tcp_ipv4.c 2011-11-16 18:40:44.000000000 -0500
72722 +@@ -87,6 +87,9 @@ int sysctl_tcp_tw_reuse __read_mostly;
72723 + int sysctl_tcp_low_latency __read_mostly;
72724 + EXPORT_SYMBOL(sysctl_tcp_low_latency);
72725 +
72726 ++#ifdef CONFIG_GRKERNSEC_BLACKHOLE
72727 ++extern int grsec_enable_blackhole;
72728 ++#endif
72729 +
72730 + #ifdef CONFIG_TCP_MD5SIG
72731 + static struct tcp_md5sig_key *tcp_v4_md5_do_lookup(struct sock *sk,
72732 +@@ -1622,6 +1625,9 @@ int tcp_v4_do_rcv(struct sock *sk, struc
72733 + return 0;
72734 +
72735 + reset:
72736 ++#ifdef CONFIG_GRKERNSEC_BLACKHOLE
72737 ++ if (!grsec_enable_blackhole)
72738 ++#endif
72739 + tcp_v4_send_reset(rsk, skb);
72740 + discard:
72741 + kfree_skb(skb);
72742 +@@ -1684,12 +1690,19 @@ int tcp_v4_rcv(struct sk_buff *skb)
72743 + TCP_SKB_CB(skb)->sacked = 0;
72744 +
72745 + sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
72746 +- if (!sk)
72747 ++ if (!sk) {
72748 ++#ifdef CONFIG_GRKERNSEC_BLACKHOLE
72749 ++ ret = 1;
72750 ++#endif
72751 + goto no_tcp_socket;
72752 +-
72753 ++ }
72754 + process:
72755 +- if (sk->sk_state == TCP_TIME_WAIT)
72756 ++ if (sk->sk_state == TCP_TIME_WAIT) {
72757 ++#ifdef CONFIG_GRKERNSEC_BLACKHOLE
72758 ++ ret = 2;
72759 ++#endif
72760 + goto do_time_wait;
72761 ++ }
72762 +
72763 + if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
72764 + NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
72765 +@@ -1739,6 +1752,10 @@ no_tcp_socket:
72766 + bad_packet:
72767 + TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
72768 + } else {
72769 ++#ifdef CONFIG_GRKERNSEC_BLACKHOLE
72770 ++ if (!grsec_enable_blackhole || (ret == 1 &&
72771 ++ (skb->dev->flags & IFF_LOOPBACK)))
72772 ++#endif
72773 + tcp_v4_send_reset(NULL, skb);
72774 + }
72775 +
72776 +@@ -2403,7 +2420,11 @@ static void get_openreq4(struct sock *sk
72777 + 0, /* non standard timer */
72778 + 0, /* open_requests have no inode */
72779 + atomic_read(&sk->sk_refcnt),
72780 ++#ifdef CONFIG_GRKERNSEC_HIDESYM
72781 ++ NULL,
72782 ++#else
72783 + req,
72784 ++#endif
72785 + len);
72786 + }
72787 +
72788 +@@ -2453,7 +2474,12 @@ static void get_tcp4_sock(struct sock *s
72789 + sock_i_uid(sk),
72790 + icsk->icsk_probes_out,
72791 + sock_i_ino(sk),
72792 +- atomic_read(&sk->sk_refcnt), sk,
72793 ++ atomic_read(&sk->sk_refcnt),
72794 ++#ifdef CONFIG_GRKERNSEC_HIDESYM
72795 ++ NULL,
72796 ++#else
72797 ++ sk,
72798 ++#endif
72799 + jiffies_to_clock_t(icsk->icsk_rto),
72800 + jiffies_to_clock_t(icsk->icsk_ack.ato),
72801 + (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
72802 +@@ -2481,7 +2507,13 @@ static void get_timewait4_sock(struct in
72803 + " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK%n",
72804 + i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
72805 + 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
72806 +- atomic_read(&tw->tw_refcnt), tw, len);
72807 ++ atomic_read(&tw->tw_refcnt),
72808 ++#ifdef CONFIG_GRKERNSEC_HIDESYM
72809 ++ NULL,
72810 ++#else
72811 ++ tw,
72812 ++#endif
72813 ++ len);
72814 + }
72815 +
72816 + #define TMPSZ 150
72817 +diff -urNp linux-3.1.1/net/ipv4/tcp_minisocks.c linux-3.1.1/net/ipv4/tcp_minisocks.c
72818 +--- linux-3.1.1/net/ipv4/tcp_minisocks.c 2011-11-11 15:19:27.000000000 -0500
72819 ++++ linux-3.1.1/net/ipv4/tcp_minisocks.c 2011-11-16 18:40:44.000000000 -0500
72820 +@@ -27,6 +27,10 @@
72821 + #include <net/inet_common.h>
72822 + #include <net/xfrm.h>
72823 +
72824 ++#ifdef CONFIG_GRKERNSEC_BLACKHOLE
72825 ++extern int grsec_enable_blackhole;
72826 ++#endif
72827 ++
72828 + int sysctl_tcp_syncookies __read_mostly = 1;
72829 + EXPORT_SYMBOL(sysctl_tcp_syncookies);
72830 +
72831 +@@ -750,6 +754,10 @@ listen_overflow:
72832 +
72833 + embryonic_reset:
72834 + NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_EMBRYONICRSTS);
72835 ++
72836 ++#ifdef CONFIG_GRKERNSEC_BLACKHOLE
72837 ++ if (!grsec_enable_blackhole)
72838 ++#endif
72839 + if (!(flg & TCP_FLAG_RST))
72840 + req->rsk_ops->send_reset(sk, skb);
72841 +
72842 +diff -urNp linux-3.1.1/net/ipv4/tcp_output.c linux-3.1.1/net/ipv4/tcp_output.c
72843 +--- linux-3.1.1/net/ipv4/tcp_output.c 2011-11-11 15:19:27.000000000 -0500
72844 ++++ linux-3.1.1/net/ipv4/tcp_output.c 2011-11-16 18:40:44.000000000 -0500
72845 +@@ -2421,6 +2421,8 @@ struct sk_buff *tcp_make_synack(struct s
72846 + int mss;
72847 + int s_data_desired = 0;
72848 +
72849 ++ pax_track_stack();
72850 ++
72851 + if (cvp != NULL && cvp->s_data_constant && cvp->s_data_desired)
72852 + s_data_desired = cvp->s_data_desired;
72853 + skb = sock_wmalloc(sk, MAX_TCP_HEADER + 15 + s_data_desired, 1, GFP_ATOMIC);
72854 +diff -urNp linux-3.1.1/net/ipv4/tcp_probe.c linux-3.1.1/net/ipv4/tcp_probe.c
72855 +--- linux-3.1.1/net/ipv4/tcp_probe.c 2011-11-11 15:19:27.000000000 -0500
72856 ++++ linux-3.1.1/net/ipv4/tcp_probe.c 2011-11-16 18:39:08.000000000 -0500
72857 +@@ -202,7 +202,7 @@ static ssize_t tcpprobe_read(struct file
72858 + if (cnt + width >= len)
72859 + break;
72860 +
72861 +- if (copy_to_user(buf + cnt, tbuf, width))
72862 ++ if (width > sizeof tbuf || copy_to_user(buf + cnt, tbuf, width))
72863 + return -EFAULT;
72864 + cnt += width;
72865 + }
72866 +diff -urNp linux-3.1.1/net/ipv4/tcp_timer.c linux-3.1.1/net/ipv4/tcp_timer.c
72867 +--- linux-3.1.1/net/ipv4/tcp_timer.c 2011-11-11 15:19:27.000000000 -0500
72868 ++++ linux-3.1.1/net/ipv4/tcp_timer.c 2011-11-16 18:40:44.000000000 -0500
72869 +@@ -22,6 +22,10 @@
72870 + #include <linux/gfp.h>
72871 + #include <net/tcp.h>
72872 +
72873 ++#ifdef CONFIG_GRKERNSEC_BLACKHOLE
72874 ++extern int grsec_lastack_retries;
72875 ++#endif
72876 ++
72877 + int sysctl_tcp_syn_retries __read_mostly = TCP_SYN_RETRIES;
72878 + int sysctl_tcp_synack_retries __read_mostly = TCP_SYNACK_RETRIES;
72879 + int sysctl_tcp_keepalive_time __read_mostly = TCP_KEEPALIVE_TIME;
72880 +@@ -199,6 +203,13 @@ static int tcp_write_timeout(struct sock
72881 + }
72882 + }
72883 +
72884 ++#ifdef CONFIG_GRKERNSEC_BLACKHOLE
72885 ++ if ((sk->sk_state == TCP_LAST_ACK) &&
72886 ++ (grsec_lastack_retries > 0) &&
72887 ++ (grsec_lastack_retries < retry_until))
72888 ++ retry_until = grsec_lastack_retries;
72889 ++#endif
72890 ++
72891 + if (retransmits_timed_out(sk, retry_until,
72892 + syn_set ? 0 : icsk->icsk_user_timeout, syn_set)) {
72893 + /* Has it gone just too far? */
72894 +diff -urNp linux-3.1.1/net/ipv4/udp.c linux-3.1.1/net/ipv4/udp.c
72895 +--- linux-3.1.1/net/ipv4/udp.c 2011-11-11 15:19:27.000000000 -0500
72896 ++++ linux-3.1.1/net/ipv4/udp.c 2011-11-16 19:17:54.000000000 -0500
72897 +@@ -86,6 +86,7 @@
72898 + #include <linux/types.h>
72899 + #include <linux/fcntl.h>
72900 + #include <linux/module.h>
72901 ++#include <linux/security.h>
72902 + #include <linux/socket.h>
72903 + #include <linux/sockios.h>
72904 + #include <linux/igmp.h>
72905 +@@ -108,6 +109,10 @@
72906 + #include <trace/events/udp.h>
72907 + #include "udp_impl.h"
72908 +
72909 ++#ifdef CONFIG_GRKERNSEC_BLACKHOLE
72910 ++extern int grsec_enable_blackhole;
72911 ++#endif
72912 ++
72913 + struct udp_table udp_table __read_mostly;
72914 + EXPORT_SYMBOL(udp_table);
72915 +
72916 +@@ -565,6 +570,9 @@ found:
72917 + return s;
72918 + }
72919 +
72920 ++extern int gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb);
72921 ++extern int gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr);
72922 ++
72923 + /*
72924 + * This routine is called by the ICMP module when it gets some
72925 + * sort of error condition. If err < 0 then the socket should
72926 +@@ -856,9 +864,18 @@ int udp_sendmsg(struct kiocb *iocb, stru
72927 + dport = usin->sin_port;
72928 + if (dport == 0)
72929 + return -EINVAL;
72930 ++
72931 ++ err = gr_search_udp_sendmsg(sk, usin);
72932 ++ if (err)
72933 ++ return err;
72934 + } else {
72935 + if (sk->sk_state != TCP_ESTABLISHED)
72936 + return -EDESTADDRREQ;
72937 ++
72938 ++ err = gr_search_udp_sendmsg(sk, NULL);
72939 ++ if (err)
72940 ++ return err;
72941 ++
72942 + daddr = inet->inet_daddr;
72943 + dport = inet->inet_dport;
72944 + /* Open fast path for connected socket.
72945 +@@ -1099,7 +1116,7 @@ static unsigned int first_packet_length(
72946 + udp_lib_checksum_complete(skb)) {
72947 + UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
72948 + IS_UDPLITE(sk));
72949 +- atomic_inc(&sk->sk_drops);
72950 ++ atomic_inc_unchecked(&sk->sk_drops);
72951 + __skb_unlink(skb, rcvq);
72952 + __skb_queue_tail(&list_kill, skb);
72953 + }
72954 +@@ -1185,6 +1202,10 @@ try_again:
72955 + if (!skb)
72956 + goto out;
72957 +
72958 ++ err = gr_search_udp_recvmsg(sk, skb);
72959 ++ if (err)
72960 ++ goto out_free;
72961 ++
72962 + ulen = skb->len - sizeof(struct udphdr);
72963 + if (len > ulen)
72964 + len = ulen;
72965 +@@ -1485,7 +1506,7 @@ int udp_queue_rcv_skb(struct sock *sk, s
72966 +
72967 + drop:
72968 + UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
72969 +- atomic_inc(&sk->sk_drops);
72970 ++ atomic_inc_unchecked(&sk->sk_drops);
72971 + kfree_skb(skb);
72972 + return -1;
72973 + }
72974 +@@ -1504,7 +1525,7 @@ static void flush_stack(struct sock **st
72975 + skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC);
72976 +
72977 + if (!skb1) {
72978 +- atomic_inc(&sk->sk_drops);
72979 ++ atomic_inc_unchecked(&sk->sk_drops);
72980 + UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
72981 + IS_UDPLITE(sk));
72982 + UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
72983 +@@ -1673,6 +1694,9 @@ int __udp4_lib_rcv(struct sk_buff *skb,
72984 + goto csum_error;
72985 +
72986 + UDP_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
72987 ++#ifdef CONFIG_GRKERNSEC_BLACKHOLE
72988 ++ if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
72989 ++#endif
72990 + icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
72991 +
72992 + /*
72993 +@@ -2100,8 +2124,13 @@ static void udp4_format_sock(struct sock
72994 + sk_wmem_alloc_get(sp),
72995 + sk_rmem_alloc_get(sp),
72996 + 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
72997 +- atomic_read(&sp->sk_refcnt), sp,
72998 +- atomic_read(&sp->sk_drops), len);
72999 ++ atomic_read(&sp->sk_refcnt),
73000 ++#ifdef CONFIG_GRKERNSEC_HIDESYM
73001 ++ NULL,
73002 ++#else
73003 ++ sp,
73004 ++#endif
73005 ++ atomic_read_unchecked(&sp->sk_drops), len);
73006 + }
73007 +
73008 + int udp4_seq_show(struct seq_file *seq, void *v)
73009 +diff -urNp linux-3.1.1/net/ipv6/addrconf.c linux-3.1.1/net/ipv6/addrconf.c
73010 +--- linux-3.1.1/net/ipv6/addrconf.c 2011-11-11 15:19:27.000000000 -0500
73011 ++++ linux-3.1.1/net/ipv6/addrconf.c 2011-11-16 18:39:08.000000000 -0500
73012 +@@ -2083,7 +2083,7 @@ int addrconf_set_dstaddr(struct net *net
73013 + p.iph.ihl = 5;
73014 + p.iph.protocol = IPPROTO_IPV6;
73015 + p.iph.ttl = 64;
73016 +- ifr.ifr_ifru.ifru_data = (__force void __user *)&p;
73017 ++ ifr.ifr_ifru.ifru_data = (void __force_user *)&p;
73018 +
73019 + if (ops->ndo_do_ioctl) {
73020 + mm_segment_t oldfs = get_fs();
73021 +diff -urNp linux-3.1.1/net/ipv6/inet6_connection_sock.c linux-3.1.1/net/ipv6/inet6_connection_sock.c
73022 +--- linux-3.1.1/net/ipv6/inet6_connection_sock.c 2011-11-11 15:19:27.000000000 -0500
73023 ++++ linux-3.1.1/net/ipv6/inet6_connection_sock.c 2011-11-16 18:39:08.000000000 -0500
73024 +@@ -178,7 +178,7 @@ void __inet6_csk_dst_store(struct sock *
73025 + #ifdef CONFIG_XFRM
73026 + {
73027 + struct rt6_info *rt = (struct rt6_info *)dst;
73028 +- rt->rt6i_flow_cache_genid = atomic_read(&flow_cache_genid);
73029 ++ rt->rt6i_flow_cache_genid = atomic_read_unchecked(&flow_cache_genid);
73030 + }
73031 + #endif
73032 + }
73033 +@@ -193,7 +193,7 @@ struct dst_entry *__inet6_csk_dst_check(
73034 + #ifdef CONFIG_XFRM
73035 + if (dst) {
73036 + struct rt6_info *rt = (struct rt6_info *)dst;
73037 +- if (rt->rt6i_flow_cache_genid != atomic_read(&flow_cache_genid)) {
73038 ++ if (rt->rt6i_flow_cache_genid != atomic_read_unchecked(&flow_cache_genid)) {
73039 + __sk_dst_reset(sk);
73040 + dst = NULL;
73041 + }
73042 +diff -urNp linux-3.1.1/net/ipv6/ipv6_sockglue.c linux-3.1.1/net/ipv6/ipv6_sockglue.c
73043 +--- linux-3.1.1/net/ipv6/ipv6_sockglue.c 2011-11-11 15:19:27.000000000 -0500
73044 ++++ linux-3.1.1/net/ipv6/ipv6_sockglue.c 2011-11-16 18:40:44.000000000 -0500
73045 +@@ -129,6 +129,8 @@ static int do_ipv6_setsockopt(struct soc
73046 + int val, valbool;
73047 + int retv = -ENOPROTOOPT;
73048 +
73049 ++ pax_track_stack();
73050 ++
73051 + if (optval == NULL)
73052 + val=0;
73053 + else {
73054 +@@ -919,6 +921,8 @@ static int do_ipv6_getsockopt(struct soc
73055 + int len;
73056 + int val;
73057 +
73058 ++ pax_track_stack();
73059 ++
73060 + if (ip6_mroute_opt(optname))
73061 + return ip6_mroute_getsockopt(sk, optname, optval, optlen);
73062 +
73063 +@@ -960,7 +964,7 @@ static int do_ipv6_getsockopt(struct soc
73064 + if (sk->sk_type != SOCK_STREAM)
73065 + return -ENOPROTOOPT;
73066 +
73067 +- msg.msg_control = optval;
73068 ++ msg.msg_control = (void __force_kernel *)optval;
73069 + msg.msg_controllen = len;
73070 + msg.msg_flags = flags;
73071 +
73072 +diff -urNp linux-3.1.1/net/ipv6/raw.c linux-3.1.1/net/ipv6/raw.c
73073 +--- linux-3.1.1/net/ipv6/raw.c 2011-11-11 15:19:27.000000000 -0500
73074 ++++ linux-3.1.1/net/ipv6/raw.c 2011-11-16 18:40:44.000000000 -0500
73075 +@@ -376,7 +376,7 @@ static inline int rawv6_rcv_skb(struct s
73076 + {
73077 + if ((raw6_sk(sk)->checksum || rcu_dereference_raw(sk->sk_filter)) &&
73078 + skb_checksum_complete(skb)) {
73079 +- atomic_inc(&sk->sk_drops);
73080 ++ atomic_inc_unchecked(&sk->sk_drops);
73081 + kfree_skb(skb);
73082 + return NET_RX_DROP;
73083 + }
73084 +@@ -403,7 +403,7 @@ int rawv6_rcv(struct sock *sk, struct sk
73085 + struct raw6_sock *rp = raw6_sk(sk);
73086 +
73087 + if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) {
73088 +- atomic_inc(&sk->sk_drops);
73089 ++ atomic_inc_unchecked(&sk->sk_drops);
73090 + kfree_skb(skb);
73091 + return NET_RX_DROP;
73092 + }
73093 +@@ -427,7 +427,7 @@ int rawv6_rcv(struct sock *sk, struct sk
73094 +
73095 + if (inet->hdrincl) {
73096 + if (skb_checksum_complete(skb)) {
73097 +- atomic_inc(&sk->sk_drops);
73098 ++ atomic_inc_unchecked(&sk->sk_drops);
73099 + kfree_skb(skb);
73100 + return NET_RX_DROP;
73101 + }
73102 +@@ -601,7 +601,7 @@ out:
73103 + return err;
73104 + }
73105 +
73106 +-static int rawv6_send_hdrinc(struct sock *sk, void *from, int length,
73107 ++static int rawv6_send_hdrinc(struct sock *sk, void *from, unsigned int length,
73108 + struct flowi6 *fl6, struct dst_entry **dstp,
73109 + unsigned int flags)
73110 + {
73111 +@@ -742,6 +742,8 @@ static int rawv6_sendmsg(struct kiocb *i
73112 + u16 proto;
73113 + int err;
73114 +
73115 ++ pax_track_stack();
73116 ++
73117 + /* Rough check on arithmetic overflow,
73118 + better check is made in ip6_append_data().
73119 + */
73120 +@@ -909,12 +911,15 @@ do_confirm:
73121 + static int rawv6_seticmpfilter(struct sock *sk, int level, int optname,
73122 + char __user *optval, int optlen)
73123 + {
73124 ++ struct icmp6_filter filter;
73125 ++
73126 + switch (optname) {
73127 + case ICMPV6_FILTER:
73128 + if (optlen > sizeof(struct icmp6_filter))
73129 + optlen = sizeof(struct icmp6_filter);
73130 +- if (copy_from_user(&raw6_sk(sk)->filter, optval, optlen))
73131 ++ if (copy_from_user(&filter, optval, optlen))
73132 + return -EFAULT;
73133 ++ raw6_sk(sk)->filter = filter;
73134 + return 0;
73135 + default:
73136 + return -ENOPROTOOPT;
73137 +@@ -927,6 +932,7 @@ static int rawv6_geticmpfilter(struct so
73138 + char __user *optval, int __user *optlen)
73139 + {
73140 + int len;
73141 ++ struct icmp6_filter filter;
73142 +
73143 + switch (optname) {
73144 + case ICMPV6_FILTER:
73145 +@@ -938,7 +944,8 @@ static int rawv6_geticmpfilter(struct so
73146 + len = sizeof(struct icmp6_filter);
73147 + if (put_user(len, optlen))
73148 + return -EFAULT;
73149 +- if (copy_to_user(optval, &raw6_sk(sk)->filter, len))
73150 ++ filter = raw6_sk(sk)->filter;
73151 ++ if (len > sizeof filter || copy_to_user(optval, &filter, len))
73152 + return -EFAULT;
73153 + return 0;
73154 + default:
73155 +@@ -1245,7 +1252,13 @@ static void raw6_sock_seq_show(struct se
73156 + 0, 0L, 0,
73157 + sock_i_uid(sp), 0,
73158 + sock_i_ino(sp),
73159 +- atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
73160 ++ atomic_read(&sp->sk_refcnt),
73161 ++#ifdef CONFIG_GRKERNSEC_HIDESYM
73162 ++ NULL,
73163 ++#else
73164 ++ sp,
73165 ++#endif
73166 ++ atomic_read_unchecked(&sp->sk_drops));
73167 + }
73168 +
73169 + static int raw6_seq_show(struct seq_file *seq, void *v)
73170 +diff -urNp linux-3.1.1/net/ipv6/tcp_ipv6.c linux-3.1.1/net/ipv6/tcp_ipv6.c
73171 +--- linux-3.1.1/net/ipv6/tcp_ipv6.c 2011-11-11 15:19:27.000000000 -0500
73172 ++++ linux-3.1.1/net/ipv6/tcp_ipv6.c 2011-11-16 18:40:44.000000000 -0500
73173 +@@ -93,6 +93,10 @@ static struct tcp_md5sig_key *tcp_v6_md5
73174 + }
73175 + #endif
73176 +
73177 ++#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73178 ++extern int grsec_enable_blackhole;
73179 ++#endif
73180 ++
73181 + static void tcp_v6_hash(struct sock *sk)
73182 + {
73183 + if (sk->sk_state != TCP_CLOSE) {
73184 +@@ -1647,6 +1651,9 @@ static int tcp_v6_do_rcv(struct sock *sk
73185 + return 0;
73186 +
73187 + reset:
73188 ++#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73189 ++ if (!grsec_enable_blackhole)
73190 ++#endif
73191 + tcp_v6_send_reset(sk, skb);
73192 + discard:
73193 + if (opt_skb)
73194 +@@ -1726,12 +1733,20 @@ static int tcp_v6_rcv(struct sk_buff *sk
73195 + TCP_SKB_CB(skb)->sacked = 0;
73196 +
73197 + sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
73198 +- if (!sk)
73199 ++ if (!sk) {
73200 ++#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73201 ++ ret = 1;
73202 ++#endif
73203 + goto no_tcp_socket;
73204 ++ }
73205 +
73206 + process:
73207 +- if (sk->sk_state == TCP_TIME_WAIT)
73208 ++ if (sk->sk_state == TCP_TIME_WAIT) {
73209 ++#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73210 ++ ret = 2;
73211 ++#endif
73212 + goto do_time_wait;
73213 ++ }
73214 +
73215 + if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
73216 + NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
73217 +@@ -1779,6 +1794,10 @@ no_tcp_socket:
73218 + bad_packet:
73219 + TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
73220 + } else {
73221 ++#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73222 ++ if (!grsec_enable_blackhole || (ret == 1 &&
73223 ++ (skb->dev->flags & IFF_LOOPBACK)))
73224 ++#endif
73225 + tcp_v6_send_reset(NULL, skb);
73226 + }
73227 +
73228 +@@ -2039,7 +2058,13 @@ static void get_openreq6(struct seq_file
73229 + uid,
73230 + 0, /* non standard timer */
73231 + 0, /* open_requests have no inode */
73232 +- 0, req);
73233 ++ 0,
73234 ++#ifdef CONFIG_GRKERNSEC_HIDESYM
73235 ++ NULL
73236 ++#else
73237 ++ req
73238 ++#endif
73239 ++ );
73240 + }
73241 +
73242 + static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
73243 +@@ -2089,7 +2114,12 @@ static void get_tcp6_sock(struct seq_fil
73244 + sock_i_uid(sp),
73245 + icsk->icsk_probes_out,
73246 + sock_i_ino(sp),
73247 +- atomic_read(&sp->sk_refcnt), sp,
73248 ++ atomic_read(&sp->sk_refcnt),
73249 ++#ifdef CONFIG_GRKERNSEC_HIDESYM
73250 ++ NULL,
73251 ++#else
73252 ++ sp,
73253 ++#endif
73254 + jiffies_to_clock_t(icsk->icsk_rto),
73255 + jiffies_to_clock_t(icsk->icsk_ack.ato),
73256 + (icsk->icsk_ack.quick << 1 ) | icsk->icsk_ack.pingpong,
73257 +@@ -2124,7 +2154,13 @@ static void get_timewait6_sock(struct se
73258 + dest->s6_addr32[2], dest->s6_addr32[3], destp,
73259 + tw->tw_substate, 0, 0,
73260 + 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
73261 +- atomic_read(&tw->tw_refcnt), tw);
73262 ++ atomic_read(&tw->tw_refcnt),
73263 ++#ifdef CONFIG_GRKERNSEC_HIDESYM
73264 ++ NULL
73265 ++#else
73266 ++ tw
73267 ++#endif
73268 ++ );
73269 + }
73270 +
73271 + static int tcp6_seq_show(struct seq_file *seq, void *v)
73272 +diff -urNp linux-3.1.1/net/ipv6/udp.c linux-3.1.1/net/ipv6/udp.c
73273 +--- linux-3.1.1/net/ipv6/udp.c 2011-11-11 15:19:27.000000000 -0500
73274 ++++ linux-3.1.1/net/ipv6/udp.c 2011-11-16 18:40:44.000000000 -0500
73275 +@@ -50,6 +50,10 @@
73276 + #include <linux/seq_file.h>
73277 + #include "udp_impl.h"
73278 +
73279 ++#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73280 ++extern int grsec_enable_blackhole;
73281 ++#endif
73282 ++
73283 + int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2)
73284 + {
73285 + const struct in6_addr *sk_rcv_saddr6 = &inet6_sk(sk)->rcv_saddr;
73286 +@@ -548,7 +552,7 @@ int udpv6_queue_rcv_skb(struct sock * sk
73287 +
73288 + return 0;
73289 + drop:
73290 +- atomic_inc(&sk->sk_drops);
73291 ++ atomic_inc_unchecked(&sk->sk_drops);
73292 + drop_no_sk_drops_inc:
73293 + UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
73294 + kfree_skb(skb);
73295 +@@ -624,7 +628,7 @@ static void flush_stack(struct sock **st
73296 + continue;
73297 + }
73298 + drop:
73299 +- atomic_inc(&sk->sk_drops);
73300 ++ atomic_inc_unchecked(&sk->sk_drops);
73301 + UDP6_INC_STATS_BH(sock_net(sk),
73302 + UDP_MIB_RCVBUFERRORS, IS_UDPLITE(sk));
73303 + UDP6_INC_STATS_BH(sock_net(sk),
73304 +@@ -779,6 +783,9 @@ int __udp6_lib_rcv(struct sk_buff *skb,
73305 + UDP6_INC_STATS_BH(net, UDP_MIB_NOPORTS,
73306 + proto == IPPROTO_UDPLITE);
73307 +
73308 ++#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73309 ++ if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
73310 ++#endif
73311 + icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
73312 +
73313 + kfree_skb(skb);
73314 +@@ -795,7 +802,7 @@ int __udp6_lib_rcv(struct sk_buff *skb,
73315 + if (!sock_owned_by_user(sk))
73316 + udpv6_queue_rcv_skb(sk, skb);
73317 + else if (sk_add_backlog(sk, skb)) {
73318 +- atomic_inc(&sk->sk_drops);
73319 ++ atomic_inc_unchecked(&sk->sk_drops);
73320 + bh_unlock_sock(sk);
73321 + sock_put(sk);
73322 + goto discard;
73323 +@@ -1406,8 +1413,13 @@ static void udp6_sock_seq_show(struct se
73324 + 0, 0L, 0,
73325 + sock_i_uid(sp), 0,
73326 + sock_i_ino(sp),
73327 +- atomic_read(&sp->sk_refcnt), sp,
73328 +- atomic_read(&sp->sk_drops));
73329 ++ atomic_read(&sp->sk_refcnt),
73330 ++#ifdef CONFIG_GRKERNSEC_HIDESYM
73331 ++ NULL,
73332 ++#else
73333 ++ sp,
73334 ++#endif
73335 ++ atomic_read_unchecked(&sp->sk_drops));
73336 + }
73337 +
73338 + int udp6_seq_show(struct seq_file *seq, void *v)
73339 +diff -urNp linux-3.1.1/net/irda/ircomm/ircomm_tty.c linux-3.1.1/net/irda/ircomm/ircomm_tty.c
73340 +--- linux-3.1.1/net/irda/ircomm/ircomm_tty.c 2011-11-11 15:19:27.000000000 -0500
73341 ++++ linux-3.1.1/net/irda/ircomm/ircomm_tty.c 2011-11-16 18:39:08.000000000 -0500
73342 +@@ -282,16 +282,16 @@ static int ircomm_tty_block_til_ready(st
73343 + add_wait_queue(&self->open_wait, &wait);
73344 +
73345 + IRDA_DEBUG(2, "%s(%d):block_til_ready before block on %s open_count=%d\n",
73346 +- __FILE__,__LINE__, tty->driver->name, self->open_count );
73347 ++ __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count) );
73348 +
73349 + /* As far as I can see, we protect open_count - Jean II */
73350 + spin_lock_irqsave(&self->spinlock, flags);
73351 + if (!tty_hung_up_p(filp)) {
73352 + extra_count = 1;
73353 +- self->open_count--;
73354 ++ local_dec(&self->open_count);
73355 + }
73356 + spin_unlock_irqrestore(&self->spinlock, flags);
73357 +- self->blocked_open++;
73358 ++ local_inc(&self->blocked_open);
73359 +
73360 + while (1) {
73361 + if (tty->termios->c_cflag & CBAUD) {
73362 +@@ -331,7 +331,7 @@ static int ircomm_tty_block_til_ready(st
73363 + }
73364 +
73365 + IRDA_DEBUG(1, "%s(%d):block_til_ready blocking on %s open_count=%d\n",
73366 +- __FILE__,__LINE__, tty->driver->name, self->open_count );
73367 ++ __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count) );
73368 +
73369 + schedule();
73370 + }
73371 +@@ -342,13 +342,13 @@ static int ircomm_tty_block_til_ready(st
73372 + if (extra_count) {
73373 + /* ++ is not atomic, so this should be protected - Jean II */
73374 + spin_lock_irqsave(&self->spinlock, flags);
73375 +- self->open_count++;
73376 ++ local_inc(&self->open_count);
73377 + spin_unlock_irqrestore(&self->spinlock, flags);
73378 + }
73379 +- self->blocked_open--;
73380 ++ local_dec(&self->blocked_open);
73381 +
73382 + IRDA_DEBUG(1, "%s(%d):block_til_ready after blocking on %s open_count=%d\n",
73383 +- __FILE__,__LINE__, tty->driver->name, self->open_count);
73384 ++ __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count));
73385 +
73386 + if (!retval)
73387 + self->flags |= ASYNC_NORMAL_ACTIVE;
73388 +@@ -417,14 +417,14 @@ static int ircomm_tty_open(struct tty_st
73389 + }
73390 + /* ++ is not atomic, so this should be protected - Jean II */
73391 + spin_lock_irqsave(&self->spinlock, flags);
73392 +- self->open_count++;
73393 ++ local_inc(&self->open_count);
73394 +
73395 + tty->driver_data = self;
73396 + self->tty = tty;
73397 + spin_unlock_irqrestore(&self->spinlock, flags);
73398 +
73399 + IRDA_DEBUG(1, "%s(), %s%d, count = %d\n", __func__ , tty->driver->name,
73400 +- self->line, self->open_count);
73401 ++ self->line, local_read(&self->open_count));
73402 +
73403 + /* Not really used by us, but lets do it anyway */
73404 + self->tty->low_latency = (self->flags & ASYNC_LOW_LATENCY) ? 1 : 0;
73405 +@@ -510,7 +510,7 @@ static void ircomm_tty_close(struct tty_
73406 + return;
73407 + }
73408 +
73409 +- if ((tty->count == 1) && (self->open_count != 1)) {
73410 ++ if ((tty->count == 1) && (local_read(&self->open_count) != 1)) {
73411 + /*
73412 + * Uh, oh. tty->count is 1, which means that the tty
73413 + * structure will be freed. state->count should always
73414 +@@ -520,16 +520,16 @@ static void ircomm_tty_close(struct tty_
73415 + */
73416 + IRDA_DEBUG(0, "%s(), bad serial port count; "
73417 + "tty->count is 1, state->count is %d\n", __func__ ,
73418 +- self->open_count);
73419 +- self->open_count = 1;
73420 ++ local_read(&self->open_count));
73421 ++ local_set(&self->open_count, 1);
73422 + }
73423 +
73424 +- if (--self->open_count < 0) {
73425 ++ if (local_dec_return(&self->open_count) < 0) {
73426 + IRDA_ERROR("%s(), bad serial port count for ttys%d: %d\n",
73427 +- __func__, self->line, self->open_count);
73428 +- self->open_count = 0;
73429 ++ __func__, self->line, local_read(&self->open_count));
73430 ++ local_set(&self->open_count, 0);
73431 + }
73432 +- if (self->open_count) {
73433 ++ if (local_read(&self->open_count)) {
73434 + spin_unlock_irqrestore(&self->spinlock, flags);
73435 +
73436 + IRDA_DEBUG(0, "%s(), open count > 0\n", __func__ );
73437 +@@ -561,7 +561,7 @@ static void ircomm_tty_close(struct tty_
73438 + tty->closing = 0;
73439 + self->tty = NULL;
73440 +
73441 +- if (self->blocked_open) {
73442 ++ if (local_read(&self->blocked_open)) {
73443 + if (self->close_delay)
73444 + schedule_timeout_interruptible(self->close_delay);
73445 + wake_up_interruptible(&self->open_wait);
73446 +@@ -1013,7 +1013,7 @@ static void ircomm_tty_hangup(struct tty
73447 + spin_lock_irqsave(&self->spinlock, flags);
73448 + self->flags &= ~ASYNC_NORMAL_ACTIVE;
73449 + self->tty = NULL;
73450 +- self->open_count = 0;
73451 ++ local_set(&self->open_count, 0);
73452 + spin_unlock_irqrestore(&self->spinlock, flags);
73453 +
73454 + wake_up_interruptible(&self->open_wait);
73455 +@@ -1360,7 +1360,7 @@ static void ircomm_tty_line_info(struct
73456 + seq_putc(m, '\n');
73457 +
73458 + seq_printf(m, "Role: %s\n", self->client ? "client" : "server");
73459 +- seq_printf(m, "Open count: %d\n", self->open_count);
73460 ++ seq_printf(m, "Open count: %d\n", local_read(&self->open_count));
73461 + seq_printf(m, "Max data size: %d\n", self->max_data_size);
73462 + seq_printf(m, "Max header size: %d\n", self->max_header_size);
73463 +
73464 +diff -urNp linux-3.1.1/net/iucv/af_iucv.c linux-3.1.1/net/iucv/af_iucv.c
73465 +--- linux-3.1.1/net/iucv/af_iucv.c 2011-11-11 15:19:27.000000000 -0500
73466 ++++ linux-3.1.1/net/iucv/af_iucv.c 2011-11-16 18:39:08.000000000 -0500
73467 +@@ -648,10 +648,10 @@ static int iucv_sock_autobind(struct soc
73468 +
73469 + write_lock_bh(&iucv_sk_list.lock);
73470 +
73471 +- sprintf(name, "%08x", atomic_inc_return(&iucv_sk_list.autobind_name));
73472 ++ sprintf(name, "%08x", atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
73473 + while (__iucv_get_sock_by_name(name)) {
73474 + sprintf(name, "%08x",
73475 +- atomic_inc_return(&iucv_sk_list.autobind_name));
73476 ++ atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
73477 + }
73478 +
73479 + write_unlock_bh(&iucv_sk_list.lock);
73480 +diff -urNp linux-3.1.1/net/Kconfig linux-3.1.1/net/Kconfig
73481 +--- linux-3.1.1/net/Kconfig 2011-11-11 15:19:27.000000000 -0500
73482 ++++ linux-3.1.1/net/Kconfig 2011-11-18 19:02:18.000000000 -0500
73483 +@@ -239,6 +239,7 @@ config BPF_JIT
73484 + bool "enable BPF Just In Time compiler"
73485 + depends on HAVE_BPF_JIT
73486 + depends on MODULES
73487 ++ depends on !GRKERNSEC
73488 + ---help---
73489 + Berkeley Packet Filter filtering capabilities are normally handled
73490 + by an interpreter. This option allows kernel to generate a native
73491 +diff -urNp linux-3.1.1/net/key/af_key.c linux-3.1.1/net/key/af_key.c
73492 +--- linux-3.1.1/net/key/af_key.c 2011-11-11 15:19:27.000000000 -0500
73493 ++++ linux-3.1.1/net/key/af_key.c 2011-11-16 18:40:44.000000000 -0500
73494 +@@ -2481,6 +2481,8 @@ static int pfkey_migrate(struct sock *sk
73495 + struct xfrm_migrate m[XFRM_MAX_DEPTH];
73496 + struct xfrm_kmaddress k;
73497 +
73498 ++ pax_track_stack();
73499 ++
73500 + if (!present_and_same_family(ext_hdrs[SADB_EXT_ADDRESS_SRC - 1],
73501 + ext_hdrs[SADB_EXT_ADDRESS_DST - 1]) ||
73502 + !ext_hdrs[SADB_X_EXT_POLICY - 1]) {
73503 +@@ -3016,10 +3018,10 @@ static int pfkey_send_policy_notify(stru
73504 + static u32 get_acqseq(void)
73505 + {
73506 + u32 res;
73507 +- static atomic_t acqseq;
73508 ++ static atomic_unchecked_t acqseq;
73509 +
73510 + do {
73511 +- res = atomic_inc_return(&acqseq);
73512 ++ res = atomic_inc_return_unchecked(&acqseq);
73513 + } while (!res);
73514 + return res;
73515 + }
73516 +diff -urNp linux-3.1.1/net/lapb/lapb_iface.c linux-3.1.1/net/lapb/lapb_iface.c
73517 +--- linux-3.1.1/net/lapb/lapb_iface.c 2011-11-11 15:19:27.000000000 -0500
73518 ++++ linux-3.1.1/net/lapb/lapb_iface.c 2011-11-16 18:39:08.000000000 -0500
73519 +@@ -158,7 +158,7 @@ int lapb_register(struct net_device *dev
73520 + goto out;
73521 +
73522 + lapb->dev = dev;
73523 +- lapb->callbacks = *callbacks;
73524 ++ lapb->callbacks = callbacks;
73525 +
73526 + __lapb_insert_cb(lapb);
73527 +
73528 +@@ -380,32 +380,32 @@ int lapb_data_received(struct net_device
73529 +
73530 + void lapb_connect_confirmation(struct lapb_cb *lapb, int reason)
73531 + {
73532 +- if (lapb->callbacks.connect_confirmation)
73533 +- lapb->callbacks.connect_confirmation(lapb->dev, reason);
73534 ++ if (lapb->callbacks->connect_confirmation)
73535 ++ lapb->callbacks->connect_confirmation(lapb->dev, reason);
73536 + }
73537 +
73538 + void lapb_connect_indication(struct lapb_cb *lapb, int reason)
73539 + {
73540 +- if (lapb->callbacks.connect_indication)
73541 +- lapb->callbacks.connect_indication(lapb->dev, reason);
73542 ++ if (lapb->callbacks->connect_indication)
73543 ++ lapb->callbacks->connect_indication(lapb->dev, reason);
73544 + }
73545 +
73546 + void lapb_disconnect_confirmation(struct lapb_cb *lapb, int reason)
73547 + {
73548 +- if (lapb->callbacks.disconnect_confirmation)
73549 +- lapb->callbacks.disconnect_confirmation(lapb->dev, reason);
73550 ++ if (lapb->callbacks->disconnect_confirmation)
73551 ++ lapb->callbacks->disconnect_confirmation(lapb->dev, reason);
73552 + }
73553 +
73554 + void lapb_disconnect_indication(struct lapb_cb *lapb, int reason)
73555 + {
73556 +- if (lapb->callbacks.disconnect_indication)
73557 +- lapb->callbacks.disconnect_indication(lapb->dev, reason);
73558 ++ if (lapb->callbacks->disconnect_indication)
73559 ++ lapb->callbacks->disconnect_indication(lapb->dev, reason);
73560 + }
73561 +
73562 + int lapb_data_indication(struct lapb_cb *lapb, struct sk_buff *skb)
73563 + {
73564 +- if (lapb->callbacks.data_indication)
73565 +- return lapb->callbacks.data_indication(lapb->dev, skb);
73566 ++ if (lapb->callbacks->data_indication)
73567 ++ return lapb->callbacks->data_indication(lapb->dev, skb);
73568 +
73569 + kfree_skb(skb);
73570 + return NET_RX_SUCCESS; /* For now; must be != NET_RX_DROP */
73571 +@@ -415,8 +415,8 @@ int lapb_data_transmit(struct lapb_cb *l
73572 + {
73573 + int used = 0;
73574 +
73575 +- if (lapb->callbacks.data_transmit) {
73576 +- lapb->callbacks.data_transmit(lapb->dev, skb);
73577 ++ if (lapb->callbacks->data_transmit) {
73578 ++ lapb->callbacks->data_transmit(lapb->dev, skb);
73579 + used = 1;
73580 + }
73581 +
73582 +diff -urNp linux-3.1.1/net/mac80211/debugfs_sta.c linux-3.1.1/net/mac80211/debugfs_sta.c
73583 +--- linux-3.1.1/net/mac80211/debugfs_sta.c 2011-11-11 15:19:27.000000000 -0500
73584 ++++ linux-3.1.1/net/mac80211/debugfs_sta.c 2011-11-16 18:40:44.000000000 -0500
73585 +@@ -140,6 +140,8 @@ static ssize_t sta_agg_status_read(struc
73586 + struct tid_ampdu_rx *tid_rx;
73587 + struct tid_ampdu_tx *tid_tx;
73588 +
73589 ++ pax_track_stack();
73590 ++
73591 + rcu_read_lock();
73592 +
73593 + p += scnprintf(p, sizeof(buf) + buf - p, "next dialog_token: %#02x\n",
73594 +@@ -240,6 +242,8 @@ static ssize_t sta_ht_capa_read(struct f
73595 + struct sta_info *sta = file->private_data;
73596 + struct ieee80211_sta_ht_cap *htc = &sta->sta.ht_cap;
73597 +
73598 ++ pax_track_stack();
73599 ++
73600 + p += scnprintf(p, sizeof(buf) + buf - p, "ht %ssupported\n",
73601 + htc->ht_supported ? "" : "not ");
73602 + if (htc->ht_supported) {
73603 +diff -urNp linux-3.1.1/net/mac80211/ieee80211_i.h linux-3.1.1/net/mac80211/ieee80211_i.h
73604 +--- linux-3.1.1/net/mac80211/ieee80211_i.h 2011-11-11 15:19:27.000000000 -0500
73605 ++++ linux-3.1.1/net/mac80211/ieee80211_i.h 2011-11-16 18:39:08.000000000 -0500
73606 +@@ -27,6 +27,7 @@
73607 + #include <net/ieee80211_radiotap.h>
73608 + #include <net/cfg80211.h>
73609 + #include <net/mac80211.h>
73610 ++#include <asm/local.h>
73611 + #include "key.h"
73612 + #include "sta_info.h"
73613 +
73614 +@@ -754,7 +755,7 @@ struct ieee80211_local {
73615 + /* also used to protect ampdu_ac_queue and amdpu_ac_stop_refcnt */
73616 + spinlock_t queue_stop_reason_lock;
73617 +
73618 +- int open_count;
73619 ++ local_t open_count;
73620 + int monitors, cooked_mntrs;
73621 + /* number of interfaces with corresponding FIF_ flags */
73622 + int fif_fcsfail, fif_plcpfail, fif_control, fif_other_bss, fif_pspoll,
73623 +diff -urNp linux-3.1.1/net/mac80211/iface.c linux-3.1.1/net/mac80211/iface.c
73624 +--- linux-3.1.1/net/mac80211/iface.c 2011-11-11 15:19:27.000000000 -0500
73625 ++++ linux-3.1.1/net/mac80211/iface.c 2011-11-16 18:39:08.000000000 -0500
73626 +@@ -211,7 +211,7 @@ static int ieee80211_do_open(struct net_
73627 + break;
73628 + }
73629 +
73630 +- if (local->open_count == 0) {
73631 ++ if (local_read(&local->open_count) == 0) {
73632 + res = drv_start(local);
73633 + if (res)
73634 + goto err_del_bss;
73635 +@@ -235,7 +235,7 @@ static int ieee80211_do_open(struct net_
73636 + memcpy(dev->perm_addr, dev->dev_addr, ETH_ALEN);
73637 +
73638 + if (!is_valid_ether_addr(dev->dev_addr)) {
73639 +- if (!local->open_count)
73640 ++ if (!local_read(&local->open_count))
73641 + drv_stop(local);
73642 + return -EADDRNOTAVAIL;
73643 + }
73644 +@@ -327,7 +327,7 @@ static int ieee80211_do_open(struct net_
73645 + mutex_unlock(&local->mtx);
73646 +
73647 + if (coming_up)
73648 +- local->open_count++;
73649 ++ local_inc(&local->open_count);
73650 +
73651 + if (hw_reconf_flags) {
73652 + ieee80211_hw_config(local, hw_reconf_flags);
73653 +@@ -347,7 +347,7 @@ static int ieee80211_do_open(struct net_
73654 + err_del_interface:
73655 + drv_remove_interface(local, &sdata->vif);
73656 + err_stop:
73657 +- if (!local->open_count)
73658 ++ if (!local_read(&local->open_count))
73659 + drv_stop(local);
73660 + err_del_bss:
73661 + sdata->bss = NULL;
73662 +@@ -474,7 +474,7 @@ static void ieee80211_do_stop(struct iee
73663 + }
73664 +
73665 + if (going_down)
73666 +- local->open_count--;
73667 ++ local_dec(&local->open_count);
73668 +
73669 + switch (sdata->vif.type) {
73670 + case NL80211_IFTYPE_AP_VLAN:
73671 +@@ -533,7 +533,7 @@ static void ieee80211_do_stop(struct iee
73672 +
73673 + ieee80211_recalc_ps(local, -1);
73674 +
73675 +- if (local->open_count == 0) {
73676 ++ if (local_read(&local->open_count) == 0) {
73677 + if (local->ops->napi_poll)
73678 + napi_disable(&local->napi);
73679 + ieee80211_clear_tx_pending(local);
73680 +diff -urNp linux-3.1.1/net/mac80211/main.c linux-3.1.1/net/mac80211/main.c
73681 +--- linux-3.1.1/net/mac80211/main.c 2011-11-11 15:19:27.000000000 -0500
73682 ++++ linux-3.1.1/net/mac80211/main.c 2011-11-16 18:39:08.000000000 -0500
73683 +@@ -209,7 +209,7 @@ int ieee80211_hw_config(struct ieee80211
73684 + local->hw.conf.power_level = power;
73685 + }
73686 +
73687 +- if (changed && local->open_count) {
73688 ++ if (changed && local_read(&local->open_count)) {
73689 + ret = drv_config(local, changed);
73690 + /*
73691 + * Goal:
73692 +diff -urNp linux-3.1.1/net/mac80211/mlme.c linux-3.1.1/net/mac80211/mlme.c
73693 +--- linux-3.1.1/net/mac80211/mlme.c 2011-11-11 15:19:27.000000000 -0500
73694 ++++ linux-3.1.1/net/mac80211/mlme.c 2011-11-16 18:40:44.000000000 -0500
73695 +@@ -1464,6 +1464,8 @@ static bool ieee80211_assoc_success(stru
73696 + bool have_higher_than_11mbit = false;
73697 + u16 ap_ht_cap_flags;
73698 +
73699 ++ pax_track_stack();
73700 ++
73701 + /* AssocResp and ReassocResp have identical structure */
73702 +
73703 + aid = le16_to_cpu(mgmt->u.assoc_resp.aid);
73704 +diff -urNp linux-3.1.1/net/mac80211/pm.c linux-3.1.1/net/mac80211/pm.c
73705 +--- linux-3.1.1/net/mac80211/pm.c 2011-11-11 15:19:27.000000000 -0500
73706 ++++ linux-3.1.1/net/mac80211/pm.c 2011-11-16 18:39:08.000000000 -0500
73707 +@@ -34,7 +34,7 @@ int __ieee80211_suspend(struct ieee80211
73708 + struct ieee80211_sub_if_data *sdata;
73709 + struct sta_info *sta;
73710 +
73711 +- if (!local->open_count)
73712 ++ if (!local_read(&local->open_count))
73713 + goto suspend;
73714 +
73715 + ieee80211_scan_cancel(local);
73716 +@@ -72,7 +72,7 @@ int __ieee80211_suspend(struct ieee80211
73717 + cancel_work_sync(&local->dynamic_ps_enable_work);
73718 + del_timer_sync(&local->dynamic_ps_timer);
73719 +
73720 +- local->wowlan = wowlan && local->open_count;
73721 ++ local->wowlan = wowlan && local_read(&local->open_count);
73722 + if (local->wowlan) {
73723 + int err = drv_suspend(local, wowlan);
73724 + if (err < 0) {
73725 +@@ -129,7 +129,7 @@ int __ieee80211_suspend(struct ieee80211
73726 + }
73727 +
73728 + /* stop hardware - this must stop RX */
73729 +- if (local->open_count)
73730 ++ if (local_read(&local->open_count))
73731 + ieee80211_stop_device(local);
73732 +
73733 + suspend:
73734 +diff -urNp linux-3.1.1/net/mac80211/rate.c linux-3.1.1/net/mac80211/rate.c
73735 +--- linux-3.1.1/net/mac80211/rate.c 2011-11-11 15:19:27.000000000 -0500
73736 ++++ linux-3.1.1/net/mac80211/rate.c 2011-11-16 18:39:08.000000000 -0500
73737 +@@ -371,7 +371,7 @@ int ieee80211_init_rate_ctrl_alg(struct
73738 +
73739 + ASSERT_RTNL();
73740 +
73741 +- if (local->open_count)
73742 ++ if (local_read(&local->open_count))
73743 + return -EBUSY;
73744 +
73745 + if (local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL) {
73746 +diff -urNp linux-3.1.1/net/mac80211/rc80211_pid_debugfs.c linux-3.1.1/net/mac80211/rc80211_pid_debugfs.c
73747 +--- linux-3.1.1/net/mac80211/rc80211_pid_debugfs.c 2011-11-11 15:19:27.000000000 -0500
73748 ++++ linux-3.1.1/net/mac80211/rc80211_pid_debugfs.c 2011-11-16 18:39:08.000000000 -0500
73749 +@@ -192,7 +192,7 @@ static ssize_t rate_control_pid_events_r
73750 +
73751 + spin_unlock_irqrestore(&events->lock, status);
73752 +
73753 +- if (copy_to_user(buf, pb, p))
73754 ++ if (p > sizeof(pb) || copy_to_user(buf, pb, p))
73755 + return -EFAULT;
73756 +
73757 + return p;
73758 +diff -urNp linux-3.1.1/net/mac80211/util.c linux-3.1.1/net/mac80211/util.c
73759 +--- linux-3.1.1/net/mac80211/util.c 2011-11-11 15:19:27.000000000 -0500
73760 ++++ linux-3.1.1/net/mac80211/util.c 2011-11-16 18:39:08.000000000 -0500
73761 +@@ -1166,7 +1166,7 @@ int ieee80211_reconfig(struct ieee80211_
73762 + drv_set_coverage_class(local, hw->wiphy->coverage_class);
73763 +
73764 + /* everything else happens only if HW was up & running */
73765 +- if (!local->open_count)
73766 ++ if (!local_read(&local->open_count))
73767 + goto wake_up;
73768 +
73769 + /*
73770 +diff -urNp linux-3.1.1/net/netfilter/ipvs/ip_vs_conn.c linux-3.1.1/net/netfilter/ipvs/ip_vs_conn.c
73771 +--- linux-3.1.1/net/netfilter/ipvs/ip_vs_conn.c 2011-11-11 15:19:27.000000000 -0500
73772 ++++ linux-3.1.1/net/netfilter/ipvs/ip_vs_conn.c 2011-11-16 18:39:08.000000000 -0500
73773 +@@ -556,7 +556,7 @@ ip_vs_bind_dest(struct ip_vs_conn *cp, s
73774 + /* Increase the refcnt counter of the dest */
73775 + atomic_inc(&dest->refcnt);
73776 +
73777 +- conn_flags = atomic_read(&dest->conn_flags);
73778 ++ conn_flags = atomic_read_unchecked(&dest->conn_flags);
73779 + if (cp->protocol != IPPROTO_UDP)
73780 + conn_flags &= ~IP_VS_CONN_F_ONE_PACKET;
73781 + /* Bind with the destination and its corresponding transmitter */
73782 +@@ -869,7 +869,7 @@ ip_vs_conn_new(const struct ip_vs_conn_p
73783 + atomic_set(&cp->refcnt, 1);
73784 +
73785 + atomic_set(&cp->n_control, 0);
73786 +- atomic_set(&cp->in_pkts, 0);
73787 ++ atomic_set_unchecked(&cp->in_pkts, 0);
73788 +
73789 + atomic_inc(&ipvs->conn_count);
73790 + if (flags & IP_VS_CONN_F_NO_CPORT)
73791 +@@ -1149,7 +1149,7 @@ static inline int todrop_entry(struct ip
73792 +
73793 + /* Don't drop the entry if its number of incoming packets is not
73794 + located in [0, 8] */
73795 +- i = atomic_read(&cp->in_pkts);
73796 ++ i = atomic_read_unchecked(&cp->in_pkts);
73797 + if (i > 8 || i < 0) return 0;
73798 +
73799 + if (!todrop_rate[i]) return 0;
73800 +diff -urNp linux-3.1.1/net/netfilter/ipvs/ip_vs_core.c linux-3.1.1/net/netfilter/ipvs/ip_vs_core.c
73801 +--- linux-3.1.1/net/netfilter/ipvs/ip_vs_core.c 2011-11-11 15:19:27.000000000 -0500
73802 ++++ linux-3.1.1/net/netfilter/ipvs/ip_vs_core.c 2011-11-16 18:39:08.000000000 -0500
73803 +@@ -563,7 +563,7 @@ int ip_vs_leave(struct ip_vs_service *sv
73804 + ret = cp->packet_xmit(skb, cp, pd->pp);
73805 + /* do not touch skb anymore */
73806 +
73807 +- atomic_inc(&cp->in_pkts);
73808 ++ atomic_inc_unchecked(&cp->in_pkts);
73809 + ip_vs_conn_put(cp);
73810 + return ret;
73811 + }
73812 +@@ -1612,7 +1612,7 @@ ip_vs_in(unsigned int hooknum, struct sk
73813 + if (cp->flags & IP_VS_CONN_F_ONE_PACKET)
73814 + pkts = sysctl_sync_threshold(ipvs);
73815 + else
73816 +- pkts = atomic_add_return(1, &cp->in_pkts);
73817 ++ pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
73818 +
73819 + if ((ipvs->sync_state & IP_VS_STATE_MASTER) &&
73820 + cp->protocol == IPPROTO_SCTP) {
73821 +diff -urNp linux-3.1.1/net/netfilter/ipvs/ip_vs_ctl.c linux-3.1.1/net/netfilter/ipvs/ip_vs_ctl.c
73822 +--- linux-3.1.1/net/netfilter/ipvs/ip_vs_ctl.c 2011-11-11 15:19:27.000000000 -0500
73823 ++++ linux-3.1.1/net/netfilter/ipvs/ip_vs_ctl.c 2011-11-16 19:13:12.000000000 -0500
73824 +@@ -782,7 +782,7 @@ __ip_vs_update_dest(struct ip_vs_service
73825 + ip_vs_rs_hash(ipvs, dest);
73826 + write_unlock_bh(&ipvs->rs_lock);
73827 + }
73828 +- atomic_set(&dest->conn_flags, conn_flags);
73829 ++ atomic_set_unchecked(&dest->conn_flags, conn_flags);
73830 +
73831 + /* bind the service */
73832 + if (!dest->svc) {
73833 +@@ -2027,7 +2027,7 @@ static int ip_vs_info_seq_show(struct se
73834 + " %-7s %-6d %-10d %-10d\n",
73835 + &dest->addr.in6,
73836 + ntohs(dest->port),
73837 +- ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
73838 ++ ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
73839 + atomic_read(&dest->weight),
73840 + atomic_read(&dest->activeconns),
73841 + atomic_read(&dest->inactconns));
73842 +@@ -2038,7 +2038,7 @@ static int ip_vs_info_seq_show(struct se
73843 + "%-7s %-6d %-10d %-10d\n",
73844 + ntohl(dest->addr.ip),
73845 + ntohs(dest->port),
73846 +- ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
73847 ++ ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
73848 + atomic_read(&dest->weight),
73849 + atomic_read(&dest->activeconns),
73850 + atomic_read(&dest->inactconns));
73851 +@@ -2285,6 +2285,8 @@ do_ip_vs_set_ctl(struct sock *sk, int cm
73852 + struct ip_vs_dest_user_kern udest;
73853 + struct netns_ipvs *ipvs = net_ipvs(net);
73854 +
73855 ++ pax_track_stack();
73856 ++
73857 + if (!capable(CAP_NET_ADMIN))
73858 + return -EPERM;
73859 +
73860 +@@ -2508,7 +2510,7 @@ __ip_vs_get_dest_entries(struct net *net
73861 +
73862 + entry.addr = dest->addr.ip;
73863 + entry.port = dest->port;
73864 +- entry.conn_flags = atomic_read(&dest->conn_flags);
73865 ++ entry.conn_flags = atomic_read_unchecked(&dest->conn_flags);
73866 + entry.weight = atomic_read(&dest->weight);
73867 + entry.u_threshold = dest->u_threshold;
73868 + entry.l_threshold = dest->l_threshold;
73869 +@@ -3041,7 +3043,7 @@ static int ip_vs_genl_fill_dest(struct s
73870 + NLA_PUT_U16(skb, IPVS_DEST_ATTR_PORT, dest->port);
73871 +
73872 + NLA_PUT_U32(skb, IPVS_DEST_ATTR_FWD_METHOD,
73873 +- atomic_read(&dest->conn_flags) & IP_VS_CONN_F_FWD_MASK);
73874 ++ atomic_read_unchecked(&dest->conn_flags) & IP_VS_CONN_F_FWD_MASK);
73875 + NLA_PUT_U32(skb, IPVS_DEST_ATTR_WEIGHT, atomic_read(&dest->weight));
73876 + NLA_PUT_U32(skb, IPVS_DEST_ATTR_U_THRESH, dest->u_threshold);
73877 + NLA_PUT_U32(skb, IPVS_DEST_ATTR_L_THRESH, dest->l_threshold);
73878 +diff -urNp linux-3.1.1/net/netfilter/ipvs/ip_vs_sync.c linux-3.1.1/net/netfilter/ipvs/ip_vs_sync.c
73879 +--- linux-3.1.1/net/netfilter/ipvs/ip_vs_sync.c 2011-11-11 15:19:27.000000000 -0500
73880 ++++ linux-3.1.1/net/netfilter/ipvs/ip_vs_sync.c 2011-11-16 18:39:08.000000000 -0500
73881 +@@ -649,7 +649,7 @@ control:
73882 + * i.e only increment in_pkts for Templates.
73883 + */
73884 + if (cp->flags & IP_VS_CONN_F_TEMPLATE) {
73885 +- int pkts = atomic_add_return(1, &cp->in_pkts);
73886 ++ int pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
73887 +
73888 + if (pkts % sysctl_sync_period(ipvs) != 1)
73889 + return;
73890 +@@ -795,7 +795,7 @@ static void ip_vs_proc_conn(struct net *
73891 +
73892 + if (opt)
73893 + memcpy(&cp->in_seq, opt, sizeof(*opt));
73894 +- atomic_set(&cp->in_pkts, sysctl_sync_threshold(ipvs));
73895 ++ atomic_set_unchecked(&cp->in_pkts, sysctl_sync_threshold(ipvs));
73896 + cp->state = state;
73897 + cp->old_state = cp->state;
73898 + /*
73899 +diff -urNp linux-3.1.1/net/netfilter/ipvs/ip_vs_xmit.c linux-3.1.1/net/netfilter/ipvs/ip_vs_xmit.c
73900 +--- linux-3.1.1/net/netfilter/ipvs/ip_vs_xmit.c 2011-11-11 15:19:27.000000000 -0500
73901 ++++ linux-3.1.1/net/netfilter/ipvs/ip_vs_xmit.c 2011-11-16 18:39:08.000000000 -0500
73902 +@@ -1151,7 +1151,7 @@ ip_vs_icmp_xmit(struct sk_buff *skb, str
73903 + else
73904 + rc = NF_ACCEPT;
73905 + /* do not touch skb anymore */
73906 +- atomic_inc(&cp->in_pkts);
73907 ++ atomic_inc_unchecked(&cp->in_pkts);
73908 + goto out;
73909 + }
73910 +
73911 +@@ -1272,7 +1272,7 @@ ip_vs_icmp_xmit_v6(struct sk_buff *skb,
73912 + else
73913 + rc = NF_ACCEPT;
73914 + /* do not touch skb anymore */
73915 +- atomic_inc(&cp->in_pkts);
73916 ++ atomic_inc_unchecked(&cp->in_pkts);
73917 + goto out;
73918 + }
73919 +
73920 +diff -urNp linux-3.1.1/net/netfilter/Kconfig linux-3.1.1/net/netfilter/Kconfig
73921 +--- linux-3.1.1/net/netfilter/Kconfig 2011-11-11 15:19:27.000000000 -0500
73922 ++++ linux-3.1.1/net/netfilter/Kconfig 2011-11-16 18:40:44.000000000 -0500
73923 +@@ -781,6 +781,16 @@ config NETFILTER_XT_MATCH_ESP
73924 +
73925 + To compile it as a module, choose M here. If unsure, say N.
73926 +
73927 ++config NETFILTER_XT_MATCH_GRADM
73928 ++ tristate '"gradm" match support'
73929 ++ depends on NETFILTER_XTABLES && NETFILTER_ADVANCED
73930 ++ depends on GRKERNSEC && !GRKERNSEC_NO_RBAC
73931 ++ ---help---
73932 ++ The gradm match allows to match on grsecurity RBAC being enabled.
73933 ++ It is useful when iptables rules are applied early on bootup to
73934 ++ prevent connections to the machine (except from a trusted host)
73935 ++ while the RBAC system is disabled.
73936 ++
73937 + config NETFILTER_XT_MATCH_HASHLIMIT
73938 + tristate '"hashlimit" match support'
73939 + depends on (IP6_NF_IPTABLES || IP6_NF_IPTABLES=n)
73940 +diff -urNp linux-3.1.1/net/netfilter/Makefile linux-3.1.1/net/netfilter/Makefile
73941 +--- linux-3.1.1/net/netfilter/Makefile 2011-11-11 15:19:27.000000000 -0500
73942 ++++ linux-3.1.1/net/netfilter/Makefile 2011-11-16 18:40:44.000000000 -0500
73943 +@@ -81,6 +81,7 @@ obj-$(CONFIG_NETFILTER_XT_MATCH_DCCP) +=
73944 + obj-$(CONFIG_NETFILTER_XT_MATCH_DEVGROUP) += xt_devgroup.o
73945 + obj-$(CONFIG_NETFILTER_XT_MATCH_DSCP) += xt_dscp.o
73946 + obj-$(CONFIG_NETFILTER_XT_MATCH_ESP) += xt_esp.o
73947 ++obj-$(CONFIG_NETFILTER_XT_MATCH_GRADM) += xt_gradm.o
73948 + obj-$(CONFIG_NETFILTER_XT_MATCH_HASHLIMIT) += xt_hashlimit.o
73949 + obj-$(CONFIG_NETFILTER_XT_MATCH_HELPER) += xt_helper.o
73950 + obj-$(CONFIG_NETFILTER_XT_MATCH_HL) += xt_hl.o
73951 +diff -urNp linux-3.1.1/net/netfilter/nfnetlink_log.c linux-3.1.1/net/netfilter/nfnetlink_log.c
73952 +--- linux-3.1.1/net/netfilter/nfnetlink_log.c 2011-11-11 15:19:27.000000000 -0500
73953 ++++ linux-3.1.1/net/netfilter/nfnetlink_log.c 2011-11-16 18:39:08.000000000 -0500
73954 +@@ -70,7 +70,7 @@ struct nfulnl_instance {
73955 + };
73956 +
73957 + static DEFINE_SPINLOCK(instances_lock);
73958 +-static atomic_t global_seq;
73959 ++static atomic_unchecked_t global_seq;
73960 +
73961 + #define INSTANCE_BUCKETS 16
73962 + static struct hlist_head instance_table[INSTANCE_BUCKETS];
73963 +@@ -505,7 +505,7 @@ __build_packet_message(struct nfulnl_ins
73964 + /* global sequence number */
73965 + if (inst->flags & NFULNL_CFG_F_SEQ_GLOBAL)
73966 + NLA_PUT_BE32(inst->skb, NFULA_SEQ_GLOBAL,
73967 +- htonl(atomic_inc_return(&global_seq)));
73968 ++ htonl(atomic_inc_return_unchecked(&global_seq)));
73969 +
73970 + if (data_len) {
73971 + struct nlattr *nla;
73972 +diff -urNp linux-3.1.1/net/netfilter/xt_gradm.c linux-3.1.1/net/netfilter/xt_gradm.c
73973 +--- linux-3.1.1/net/netfilter/xt_gradm.c 1969-12-31 19:00:00.000000000 -0500
73974 ++++ linux-3.1.1/net/netfilter/xt_gradm.c 2011-11-16 18:40:44.000000000 -0500
73975 +@@ -0,0 +1,51 @@
73976 ++/*
73977 ++ * gradm match for netfilter
73978 ++ * Copyright © Zbigniew Krzystolik, 2010
73979 ++ *
73980 ++ * This program is free software; you can redistribute it and/or modify
73981 ++ * it under the terms of the GNU General Public License; either version
73982 ++ * 2 or 3 as published by the Free Software Foundation.
73983 ++ */
73984 ++#include <linux/module.h>
73985 ++#include <linux/moduleparam.h>
73986 ++#include <linux/skbuff.h>
73987 ++#include <linux/netfilter/x_tables.h>
73988 ++#include <linux/grsecurity.h>
73989 ++#include <linux/netfilter/xt_gradm.h>
73990 ++
73991 ++static bool
73992 ++gradm_mt(const struct sk_buff *skb, struct xt_action_param *par)
73993 ++{
73994 ++ const struct xt_gradm_mtinfo *info = par->matchinfo;
73995 ++ bool retval = false;
73996 ++ if (gr_acl_is_enabled())
73997 ++ retval = true;
73998 ++ return retval ^ info->invflags;
73999 ++}
74000 ++
74001 ++static struct xt_match gradm_mt_reg __read_mostly = {
74002 ++ .name = "gradm",
74003 ++ .revision = 0,
74004 ++ .family = NFPROTO_UNSPEC,
74005 ++ .match = gradm_mt,
74006 ++ .matchsize = XT_ALIGN(sizeof(struct xt_gradm_mtinfo)),
74007 ++ .me = THIS_MODULE,
74008 ++};
74009 ++
74010 ++static int __init gradm_mt_init(void)
74011 ++{
74012 ++ return xt_register_match(&gradm_mt_reg);
74013 ++}
74014 ++
74015 ++static void __exit gradm_mt_exit(void)
74016 ++{
74017 ++ xt_unregister_match(&gradm_mt_reg);
74018 ++}
74019 ++
74020 ++module_init(gradm_mt_init);
74021 ++module_exit(gradm_mt_exit);
74022 ++MODULE_AUTHOR("Zbigniew Krzystolik <zbyniu@××××××××××.pl>");
74023 ++MODULE_DESCRIPTION("Xtables: Grsecurity RBAC match");
74024 ++MODULE_LICENSE("GPL");
74025 ++MODULE_ALIAS("ipt_gradm");
74026 ++MODULE_ALIAS("ip6t_gradm");
74027 +diff -urNp linux-3.1.1/net/netfilter/xt_statistic.c linux-3.1.1/net/netfilter/xt_statistic.c
74028 +--- linux-3.1.1/net/netfilter/xt_statistic.c 2011-11-11 15:19:27.000000000 -0500
74029 ++++ linux-3.1.1/net/netfilter/xt_statistic.c 2011-11-16 18:39:08.000000000 -0500
74030 +@@ -18,7 +18,7 @@
74031 + #include <linux/netfilter/x_tables.h>
74032 +
74033 + struct xt_statistic_priv {
74034 +- atomic_t count;
74035 ++ atomic_unchecked_t count;
74036 + } ____cacheline_aligned_in_smp;
74037 +
74038 + MODULE_LICENSE("GPL");
74039 +@@ -41,9 +41,9 @@ statistic_mt(const struct sk_buff *skb,
74040 + break;
74041 + case XT_STATISTIC_MODE_NTH:
74042 + do {
74043 +- oval = atomic_read(&info->master->count);
74044 ++ oval = atomic_read_unchecked(&info->master->count);
74045 + nval = (oval == info->u.nth.every) ? 0 : oval + 1;
74046 +- } while (atomic_cmpxchg(&info->master->count, oval, nval) != oval);
74047 ++ } while (atomic_cmpxchg_unchecked(&info->master->count, oval, nval) != oval);
74048 + if (nval == 0)
74049 + ret = !ret;
74050 + break;
74051 +@@ -63,7 +63,7 @@ static int statistic_mt_check(const stru
74052 + info->master = kzalloc(sizeof(*info->master), GFP_KERNEL);
74053 + if (info->master == NULL)
74054 + return -ENOMEM;
74055 +- atomic_set(&info->master->count, info->u.nth.count);
74056 ++ atomic_set_unchecked(&info->master->count, info->u.nth.count);
74057 +
74058 + return 0;
74059 + }
74060 +diff -urNp linux-3.1.1/net/netlink/af_netlink.c linux-3.1.1/net/netlink/af_netlink.c
74061 +--- linux-3.1.1/net/netlink/af_netlink.c 2011-11-11 15:19:27.000000000 -0500
74062 ++++ linux-3.1.1/net/netlink/af_netlink.c 2011-11-16 18:39:08.000000000 -0500
74063 +@@ -742,7 +742,7 @@ static void netlink_overrun(struct sock
74064 + sk->sk_error_report(sk);
74065 + }
74066 + }
74067 +- atomic_inc(&sk->sk_drops);
74068 ++ atomic_inc_unchecked(&sk->sk_drops);
74069 + }
74070 +
74071 + static struct sock *netlink_getsockbypid(struct sock *ssk, u32 pid)
74072 +@@ -2000,7 +2000,7 @@ static int netlink_seq_show(struct seq_f
74073 + sk_wmem_alloc_get(s),
74074 + nlk->cb,
74075 + atomic_read(&s->sk_refcnt),
74076 +- atomic_read(&s->sk_drops),
74077 ++ atomic_read_unchecked(&s->sk_drops),
74078 + sock_i_ino(s)
74079 + );
74080 +
74081 +diff -urNp linux-3.1.1/net/netrom/af_netrom.c linux-3.1.1/net/netrom/af_netrom.c
74082 +--- linux-3.1.1/net/netrom/af_netrom.c 2011-11-11 15:19:27.000000000 -0500
74083 ++++ linux-3.1.1/net/netrom/af_netrom.c 2011-11-16 18:40:44.000000000 -0500
74084 +@@ -839,6 +839,7 @@ static int nr_getname(struct socket *soc
74085 + struct sock *sk = sock->sk;
74086 + struct nr_sock *nr = nr_sk(sk);
74087 +
74088 ++ memset(sax, 0, sizeof(*sax));
74089 + lock_sock(sk);
74090 + if (peer != 0) {
74091 + if (sk->sk_state != TCP_ESTABLISHED) {
74092 +@@ -853,7 +854,6 @@ static int nr_getname(struct socket *soc
74093 + *uaddr_len = sizeof(struct full_sockaddr_ax25);
74094 + } else {
74095 + sax->fsa_ax25.sax25_family = AF_NETROM;
74096 +- sax->fsa_ax25.sax25_ndigis = 0;
74097 + sax->fsa_ax25.sax25_call = nr->source_addr;
74098 + *uaddr_len = sizeof(struct sockaddr_ax25);
74099 + }
74100 +diff -urNp linux-3.1.1/net/packet/af_packet.c linux-3.1.1/net/packet/af_packet.c
74101 +--- linux-3.1.1/net/packet/af_packet.c 2011-11-11 15:19:27.000000000 -0500
74102 ++++ linux-3.1.1/net/packet/af_packet.c 2011-11-16 18:39:08.000000000 -0500
74103 +@@ -954,7 +954,7 @@ static int packet_rcv(struct sk_buff *sk
74104 +
74105 + spin_lock(&sk->sk_receive_queue.lock);
74106 + po->stats.tp_packets++;
74107 +- skb->dropcount = atomic_read(&sk->sk_drops);
74108 ++ skb->dropcount = atomic_read_unchecked(&sk->sk_drops);
74109 + __skb_queue_tail(&sk->sk_receive_queue, skb);
74110 + spin_unlock(&sk->sk_receive_queue.lock);
74111 + sk->sk_data_ready(sk, skb->len);
74112 +@@ -963,7 +963,7 @@ static int packet_rcv(struct sk_buff *sk
74113 + drop_n_acct:
74114 + spin_lock(&sk->sk_receive_queue.lock);
74115 + po->stats.tp_drops++;
74116 +- atomic_inc(&sk->sk_drops);
74117 ++ atomic_inc_unchecked(&sk->sk_drops);
74118 + spin_unlock(&sk->sk_receive_queue.lock);
74119 +
74120 + drop_n_restore:
74121 +@@ -2479,7 +2479,7 @@ static int packet_getsockopt(struct sock
74122 + case PACKET_HDRLEN:
74123 + if (len > sizeof(int))
74124 + len = sizeof(int);
74125 +- if (copy_from_user(&val, optval, len))
74126 ++ if (len > sizeof(val) || copy_from_user(&val, optval, len))
74127 + return -EFAULT;
74128 + switch (val) {
74129 + case TPACKET_V1:
74130 +@@ -2526,7 +2526,7 @@ static int packet_getsockopt(struct sock
74131 +
74132 + if (put_user(len, optlen))
74133 + return -EFAULT;
74134 +- if (copy_to_user(optval, data, len))
74135 ++ if (len > sizeof(st) || copy_to_user(optval, data, len))
74136 + return -EFAULT;
74137 + return 0;
74138 + }
74139 +diff -urNp linux-3.1.1/net/phonet/af_phonet.c linux-3.1.1/net/phonet/af_phonet.c
74140 +--- linux-3.1.1/net/phonet/af_phonet.c 2011-11-11 15:19:27.000000000 -0500
74141 ++++ linux-3.1.1/net/phonet/af_phonet.c 2011-11-16 18:40:44.000000000 -0500
74142 +@@ -41,7 +41,7 @@ static struct phonet_protocol *phonet_pr
74143 + {
74144 + struct phonet_protocol *pp;
74145 +
74146 +- if (protocol >= PHONET_NPROTO)
74147 ++ if (protocol < 0 || protocol >= PHONET_NPROTO)
74148 + return NULL;
74149 +
74150 + rcu_read_lock();
74151 +@@ -469,7 +469,7 @@ int __init_or_module phonet_proto_regist
74152 + {
74153 + int err = 0;
74154 +
74155 +- if (protocol >= PHONET_NPROTO)
74156 ++ if (protocol < 0 || protocol >= PHONET_NPROTO)
74157 + return -EINVAL;
74158 +
74159 + err = proto_register(pp->prot, 1);
74160 +diff -urNp linux-3.1.1/net/phonet/pep.c linux-3.1.1/net/phonet/pep.c
74161 +--- linux-3.1.1/net/phonet/pep.c 2011-11-11 15:19:27.000000000 -0500
74162 ++++ linux-3.1.1/net/phonet/pep.c 2011-11-16 18:39:08.000000000 -0500
74163 +@@ -387,7 +387,7 @@ static int pipe_do_rcv(struct sock *sk,
74164 +
74165 + case PNS_PEP_CTRL_REQ:
74166 + if (skb_queue_len(&pn->ctrlreq_queue) >= PNPIPE_CTRLREQ_MAX) {
74167 +- atomic_inc(&sk->sk_drops);
74168 ++ atomic_inc_unchecked(&sk->sk_drops);
74169 + break;
74170 + }
74171 + __skb_pull(skb, 4);
74172 +@@ -408,7 +408,7 @@ static int pipe_do_rcv(struct sock *sk,
74173 + }
74174 +
74175 + if (pn->rx_credits == 0) {
74176 +- atomic_inc(&sk->sk_drops);
74177 ++ atomic_inc_unchecked(&sk->sk_drops);
74178 + err = -ENOBUFS;
74179 + break;
74180 + }
74181 +@@ -556,7 +556,7 @@ static int pipe_handler_do_rcv(struct so
74182 + }
74183 +
74184 + if (pn->rx_credits == 0) {
74185 +- atomic_inc(&sk->sk_drops);
74186 ++ atomic_inc_unchecked(&sk->sk_drops);
74187 + err = NET_RX_DROP;
74188 + break;
74189 + }
74190 +diff -urNp linux-3.1.1/net/phonet/socket.c linux-3.1.1/net/phonet/socket.c
74191 +--- linux-3.1.1/net/phonet/socket.c 2011-11-11 15:19:27.000000000 -0500
74192 ++++ linux-3.1.1/net/phonet/socket.c 2011-11-16 18:40:44.000000000 -0500
74193 +@@ -612,8 +612,13 @@ static int pn_sock_seq_show(struct seq_f
74194 + pn->resource, sk->sk_state,
74195 + sk_wmem_alloc_get(sk), sk_rmem_alloc_get(sk),
74196 + sock_i_uid(sk), sock_i_ino(sk),
74197 +- atomic_read(&sk->sk_refcnt), sk,
74198 +- atomic_read(&sk->sk_drops), &len);
74199 ++ atomic_read(&sk->sk_refcnt),
74200 ++#ifdef CONFIG_GRKERNSEC_HIDESYM
74201 ++ NULL,
74202 ++#else
74203 ++ sk,
74204 ++#endif
74205 ++ atomic_read_unchecked(&sk->sk_drops), &len);
74206 + }
74207 + seq_printf(seq, "%*s\n", 127 - len, "");
74208 + return 0;
74209 +diff -urNp linux-3.1.1/net/rds/cong.c linux-3.1.1/net/rds/cong.c
74210 +--- linux-3.1.1/net/rds/cong.c 2011-11-11 15:19:27.000000000 -0500
74211 ++++ linux-3.1.1/net/rds/cong.c 2011-11-16 18:39:08.000000000 -0500
74212 +@@ -77,7 +77,7 @@
74213 + * finds that the saved generation number is smaller than the global generation
74214 + * number, it wakes up the process.
74215 + */
74216 +-static atomic_t rds_cong_generation = ATOMIC_INIT(0);
74217 ++static atomic_unchecked_t rds_cong_generation = ATOMIC_INIT(0);
74218 +
74219 + /*
74220 + * Congestion monitoring
74221 +@@ -232,7 +232,7 @@ void rds_cong_map_updated(struct rds_con
74222 + rdsdebug("waking map %p for %pI4\n",
74223 + map, &map->m_addr);
74224 + rds_stats_inc(s_cong_update_received);
74225 +- atomic_inc(&rds_cong_generation);
74226 ++ atomic_inc_unchecked(&rds_cong_generation);
74227 + if (waitqueue_active(&map->m_waitq))
74228 + wake_up(&map->m_waitq);
74229 + if (waitqueue_active(&rds_poll_waitq))
74230 +@@ -258,7 +258,7 @@ EXPORT_SYMBOL_GPL(rds_cong_map_updated);
74231 +
74232 + int rds_cong_updated_since(unsigned long *recent)
74233 + {
74234 +- unsigned long gen = atomic_read(&rds_cong_generation);
74235 ++ unsigned long gen = atomic_read_unchecked(&rds_cong_generation);
74236 +
74237 + if (likely(*recent == gen))
74238 + return 0;
74239 +diff -urNp linux-3.1.1/net/rds/ib_cm.c linux-3.1.1/net/rds/ib_cm.c
74240 +--- linux-3.1.1/net/rds/ib_cm.c 2011-11-11 15:19:27.000000000 -0500
74241 ++++ linux-3.1.1/net/rds/ib_cm.c 2011-11-16 18:39:08.000000000 -0500
74242 +@@ -720,7 +720,7 @@ void rds_ib_conn_shutdown(struct rds_con
74243 + /* Clear the ACK state */
74244 + clear_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags);
74245 + #ifdef KERNEL_HAS_ATOMIC64
74246 +- atomic64_set(&ic->i_ack_next, 0);
74247 ++ atomic64_set_unchecked(&ic->i_ack_next, 0);
74248 + #else
74249 + ic->i_ack_next = 0;
74250 + #endif
74251 +diff -urNp linux-3.1.1/net/rds/ib.h linux-3.1.1/net/rds/ib.h
74252 +--- linux-3.1.1/net/rds/ib.h 2011-11-11 15:19:27.000000000 -0500
74253 ++++ linux-3.1.1/net/rds/ib.h 2011-11-16 18:39:08.000000000 -0500
74254 +@@ -128,7 +128,7 @@ struct rds_ib_connection {
74255 + /* sending acks */
74256 + unsigned long i_ack_flags;
74257 + #ifdef KERNEL_HAS_ATOMIC64
74258 +- atomic64_t i_ack_next; /* next ACK to send */
74259 ++ atomic64_unchecked_t i_ack_next; /* next ACK to send */
74260 + #else
74261 + spinlock_t i_ack_lock; /* protect i_ack_next */
74262 + u64 i_ack_next; /* next ACK to send */
74263 +diff -urNp linux-3.1.1/net/rds/ib_recv.c linux-3.1.1/net/rds/ib_recv.c
74264 +--- linux-3.1.1/net/rds/ib_recv.c 2011-11-11 15:19:27.000000000 -0500
74265 ++++ linux-3.1.1/net/rds/ib_recv.c 2011-11-16 18:39:08.000000000 -0500
74266 +@@ -592,7 +592,7 @@ static u64 rds_ib_get_ack(struct rds_ib_
74267 + static void rds_ib_set_ack(struct rds_ib_connection *ic, u64 seq,
74268 + int ack_required)
74269 + {
74270 +- atomic64_set(&ic->i_ack_next, seq);
74271 ++ atomic64_set_unchecked(&ic->i_ack_next, seq);
74272 + if (ack_required) {
74273 + smp_mb__before_clear_bit();
74274 + set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
74275 +@@ -604,7 +604,7 @@ static u64 rds_ib_get_ack(struct rds_ib_
74276 + clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
74277 + smp_mb__after_clear_bit();
74278 +
74279 +- return atomic64_read(&ic->i_ack_next);
74280 ++ return atomic64_read_unchecked(&ic->i_ack_next);
74281 + }
74282 + #endif
74283 +
74284 +diff -urNp linux-3.1.1/net/rds/iw_cm.c linux-3.1.1/net/rds/iw_cm.c
74285 +--- linux-3.1.1/net/rds/iw_cm.c 2011-11-11 15:19:27.000000000 -0500
74286 ++++ linux-3.1.1/net/rds/iw_cm.c 2011-11-16 18:39:08.000000000 -0500
74287 +@@ -663,7 +663,7 @@ void rds_iw_conn_shutdown(struct rds_con
74288 + /* Clear the ACK state */
74289 + clear_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags);
74290 + #ifdef KERNEL_HAS_ATOMIC64
74291 +- atomic64_set(&ic->i_ack_next, 0);
74292 ++ atomic64_set_unchecked(&ic->i_ack_next, 0);
74293 + #else
74294 + ic->i_ack_next = 0;
74295 + #endif
74296 +diff -urNp linux-3.1.1/net/rds/iw.h linux-3.1.1/net/rds/iw.h
74297 +--- linux-3.1.1/net/rds/iw.h 2011-11-11 15:19:27.000000000 -0500
74298 ++++ linux-3.1.1/net/rds/iw.h 2011-11-16 18:39:08.000000000 -0500
74299 +@@ -134,7 +134,7 @@ struct rds_iw_connection {
74300 + /* sending acks */
74301 + unsigned long i_ack_flags;
74302 + #ifdef KERNEL_HAS_ATOMIC64
74303 +- atomic64_t i_ack_next; /* next ACK to send */
74304 ++ atomic64_unchecked_t i_ack_next; /* next ACK to send */
74305 + #else
74306 + spinlock_t i_ack_lock; /* protect i_ack_next */
74307 + u64 i_ack_next; /* next ACK to send */
74308 +diff -urNp linux-3.1.1/net/rds/iw_rdma.c linux-3.1.1/net/rds/iw_rdma.c
74309 +--- linux-3.1.1/net/rds/iw_rdma.c 2011-11-11 15:19:27.000000000 -0500
74310 ++++ linux-3.1.1/net/rds/iw_rdma.c 2011-11-16 18:40:44.000000000 -0500
74311 +@@ -184,6 +184,8 @@ int rds_iw_update_cm_id(struct rds_iw_de
74312 + struct rdma_cm_id *pcm_id;
74313 + int rc;
74314 +
74315 ++ pax_track_stack();
74316 ++
74317 + src_addr = (struct sockaddr_in *)&cm_id->route.addr.src_addr;
74318 + dst_addr = (struct sockaddr_in *)&cm_id->route.addr.dst_addr;
74319 +
74320 +diff -urNp linux-3.1.1/net/rds/iw_recv.c linux-3.1.1/net/rds/iw_recv.c
74321 +--- linux-3.1.1/net/rds/iw_recv.c 2011-11-11 15:19:27.000000000 -0500
74322 ++++ linux-3.1.1/net/rds/iw_recv.c 2011-11-16 18:39:08.000000000 -0500
74323 +@@ -427,7 +427,7 @@ static u64 rds_iw_get_ack(struct rds_iw_
74324 + static void rds_iw_set_ack(struct rds_iw_connection *ic, u64 seq,
74325 + int ack_required)
74326 + {
74327 +- atomic64_set(&ic->i_ack_next, seq);
74328 ++ atomic64_set_unchecked(&ic->i_ack_next, seq);
74329 + if (ack_required) {
74330 + smp_mb__before_clear_bit();
74331 + set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
74332 +@@ -439,7 +439,7 @@ static u64 rds_iw_get_ack(struct rds_iw_
74333 + clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
74334 + smp_mb__after_clear_bit();
74335 +
74336 +- return atomic64_read(&ic->i_ack_next);
74337 ++ return atomic64_read_unchecked(&ic->i_ack_next);
74338 + }
74339 + #endif
74340 +
74341 +diff -urNp linux-3.1.1/net/rds/tcp.c linux-3.1.1/net/rds/tcp.c
74342 +--- linux-3.1.1/net/rds/tcp.c 2011-11-11 15:19:27.000000000 -0500
74343 ++++ linux-3.1.1/net/rds/tcp.c 2011-11-16 18:39:08.000000000 -0500
74344 +@@ -58,7 +58,7 @@ void rds_tcp_nonagle(struct socket *sock
74345 + int val = 1;
74346 +
74347 + set_fs(KERNEL_DS);
74348 +- sock->ops->setsockopt(sock, SOL_TCP, TCP_NODELAY, (char __user *)&val,
74349 ++ sock->ops->setsockopt(sock, SOL_TCP, TCP_NODELAY, (char __force_user *)&val,
74350 + sizeof(val));
74351 + set_fs(oldfs);
74352 + }
74353 +diff -urNp linux-3.1.1/net/rds/tcp_send.c linux-3.1.1/net/rds/tcp_send.c
74354 +--- linux-3.1.1/net/rds/tcp_send.c 2011-11-11 15:19:27.000000000 -0500
74355 ++++ linux-3.1.1/net/rds/tcp_send.c 2011-11-16 18:39:08.000000000 -0500
74356 +@@ -43,7 +43,7 @@ static void rds_tcp_cork(struct socket *
74357 +
74358 + oldfs = get_fs();
74359 + set_fs(KERNEL_DS);
74360 +- sock->ops->setsockopt(sock, SOL_TCP, TCP_CORK, (char __user *)&val,
74361 ++ sock->ops->setsockopt(sock, SOL_TCP, TCP_CORK, (char __force_user *)&val,
74362 + sizeof(val));
74363 + set_fs(oldfs);
74364 + }
74365 +diff -urNp linux-3.1.1/net/rxrpc/af_rxrpc.c linux-3.1.1/net/rxrpc/af_rxrpc.c
74366 +--- linux-3.1.1/net/rxrpc/af_rxrpc.c 2011-11-11 15:19:27.000000000 -0500
74367 ++++ linux-3.1.1/net/rxrpc/af_rxrpc.c 2011-11-16 18:39:08.000000000 -0500
74368 +@@ -39,7 +39,7 @@ static const struct proto_ops rxrpc_rpc_
74369 + __be32 rxrpc_epoch;
74370 +
74371 + /* current debugging ID */
74372 +-atomic_t rxrpc_debug_id;
74373 ++atomic_unchecked_t rxrpc_debug_id;
74374 +
74375 + /* count of skbs currently in use */
74376 + atomic_t rxrpc_n_skbs;
74377 +diff -urNp linux-3.1.1/net/rxrpc/ar-ack.c linux-3.1.1/net/rxrpc/ar-ack.c
74378 +--- linux-3.1.1/net/rxrpc/ar-ack.c 2011-11-11 15:19:27.000000000 -0500
74379 ++++ linux-3.1.1/net/rxrpc/ar-ack.c 2011-11-16 18:40:44.000000000 -0500
74380 +@@ -175,7 +175,7 @@ static void rxrpc_resend(struct rxrpc_ca
74381 +
74382 + _enter("{%d,%d,%d,%d},",
74383 + call->acks_hard, call->acks_unacked,
74384 +- atomic_read(&call->sequence),
74385 ++ atomic_read_unchecked(&call->sequence),
74386 + CIRC_CNT(call->acks_head, call->acks_tail, call->acks_winsz));
74387 +
74388 + stop = 0;
74389 +@@ -199,7 +199,7 @@ static void rxrpc_resend(struct rxrpc_ca
74390 +
74391 + /* each Tx packet has a new serial number */
74392 + sp->hdr.serial =
74393 +- htonl(atomic_inc_return(&call->conn->serial));
74394 ++ htonl(atomic_inc_return_unchecked(&call->conn->serial));
74395 +
74396 + hdr = (struct rxrpc_header *) txb->head;
74397 + hdr->serial = sp->hdr.serial;
74398 +@@ -403,7 +403,7 @@ static void rxrpc_rotate_tx_window(struc
74399 + */
74400 + static void rxrpc_clear_tx_window(struct rxrpc_call *call)
74401 + {
74402 +- rxrpc_rotate_tx_window(call, atomic_read(&call->sequence));
74403 ++ rxrpc_rotate_tx_window(call, atomic_read_unchecked(&call->sequence));
74404 + }
74405 +
74406 + /*
74407 +@@ -629,7 +629,7 @@ process_further:
74408 +
74409 + latest = ntohl(sp->hdr.serial);
74410 + hard = ntohl(ack.firstPacket);
74411 +- tx = atomic_read(&call->sequence);
74412 ++ tx = atomic_read_unchecked(&call->sequence);
74413 +
74414 + _proto("Rx ACK %%%u { m=%hu f=#%u p=#%u s=%%%u r=%s n=%u }",
74415 + latest,
74416 +@@ -842,6 +842,8 @@ void rxrpc_process_call(struct work_stru
74417 + u32 abort_code = RX_PROTOCOL_ERROR;
74418 + u8 *acks = NULL;
74419 +
74420 ++ pax_track_stack();
74421 ++
74422 + //printk("\n--------------------\n");
74423 + _enter("{%d,%s,%lx} [%lu]",
74424 + call->debug_id, rxrpc_call_states[call->state], call->events,
74425 +@@ -1161,7 +1163,7 @@ void rxrpc_process_call(struct work_stru
74426 + goto maybe_reschedule;
74427 +
74428 + send_ACK_with_skew:
74429 +- ack.maxSkew = htons(atomic_read(&call->conn->hi_serial) -
74430 ++ ack.maxSkew = htons(atomic_read_unchecked(&call->conn->hi_serial) -
74431 + ntohl(ack.serial));
74432 + send_ACK:
74433 + mtu = call->conn->trans->peer->if_mtu;
74434 +@@ -1173,7 +1175,7 @@ send_ACK:
74435 + ackinfo.rxMTU = htonl(5692);
74436 + ackinfo.jumbo_max = htonl(4);
74437 +
74438 +- hdr.serial = htonl(atomic_inc_return(&call->conn->serial));
74439 ++ hdr.serial = htonl(atomic_inc_return_unchecked(&call->conn->serial));
74440 + _proto("Tx ACK %%%u { m=%hu f=#%u p=#%u s=%%%u r=%s n=%u }",
74441 + ntohl(hdr.serial),
74442 + ntohs(ack.maxSkew),
74443 +@@ -1191,7 +1193,7 @@ send_ACK:
74444 + send_message:
74445 + _debug("send message");
74446 +
74447 +- hdr.serial = htonl(atomic_inc_return(&call->conn->serial));
74448 ++ hdr.serial = htonl(atomic_inc_return_unchecked(&call->conn->serial));
74449 + _proto("Tx %s %%%u", rxrpc_pkts[hdr.type], ntohl(hdr.serial));
74450 + send_message_2:
74451 +
74452 +diff -urNp linux-3.1.1/net/rxrpc/ar-call.c linux-3.1.1/net/rxrpc/ar-call.c
74453 +--- linux-3.1.1/net/rxrpc/ar-call.c 2011-11-11 15:19:27.000000000 -0500
74454 ++++ linux-3.1.1/net/rxrpc/ar-call.c 2011-11-16 18:39:08.000000000 -0500
74455 +@@ -83,7 +83,7 @@ static struct rxrpc_call *rxrpc_alloc_ca
74456 + spin_lock_init(&call->lock);
74457 + rwlock_init(&call->state_lock);
74458 + atomic_set(&call->usage, 1);
74459 +- call->debug_id = atomic_inc_return(&rxrpc_debug_id);
74460 ++ call->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
74461 + call->state = RXRPC_CALL_CLIENT_SEND_REQUEST;
74462 +
74463 + memset(&call->sock_node, 0xed, sizeof(call->sock_node));
74464 +diff -urNp linux-3.1.1/net/rxrpc/ar-connection.c linux-3.1.1/net/rxrpc/ar-connection.c
74465 +--- linux-3.1.1/net/rxrpc/ar-connection.c 2011-11-11 15:19:27.000000000 -0500
74466 ++++ linux-3.1.1/net/rxrpc/ar-connection.c 2011-11-16 18:39:08.000000000 -0500
74467 +@@ -206,7 +206,7 @@ static struct rxrpc_connection *rxrpc_al
74468 + rwlock_init(&conn->lock);
74469 + spin_lock_init(&conn->state_lock);
74470 + atomic_set(&conn->usage, 1);
74471 +- conn->debug_id = atomic_inc_return(&rxrpc_debug_id);
74472 ++ conn->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
74473 + conn->avail_calls = RXRPC_MAXCALLS;
74474 + conn->size_align = 4;
74475 + conn->header_size = sizeof(struct rxrpc_header);
74476 +diff -urNp linux-3.1.1/net/rxrpc/ar-connevent.c linux-3.1.1/net/rxrpc/ar-connevent.c
74477 +--- linux-3.1.1/net/rxrpc/ar-connevent.c 2011-11-11 15:19:27.000000000 -0500
74478 ++++ linux-3.1.1/net/rxrpc/ar-connevent.c 2011-11-16 18:39:08.000000000 -0500
74479 +@@ -109,7 +109,7 @@ static int rxrpc_abort_connection(struct
74480 +
74481 + len = iov[0].iov_len + iov[1].iov_len;
74482 +
74483 +- hdr.serial = htonl(atomic_inc_return(&conn->serial));
74484 ++ hdr.serial = htonl(atomic_inc_return_unchecked(&conn->serial));
74485 + _proto("Tx CONN ABORT %%%u { %d }", ntohl(hdr.serial), abort_code);
74486 +
74487 + ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 2, len);
74488 +diff -urNp linux-3.1.1/net/rxrpc/ar-input.c linux-3.1.1/net/rxrpc/ar-input.c
74489 +--- linux-3.1.1/net/rxrpc/ar-input.c 2011-11-11 15:19:27.000000000 -0500
74490 ++++ linux-3.1.1/net/rxrpc/ar-input.c 2011-11-16 18:39:08.000000000 -0500
74491 +@@ -340,9 +340,9 @@ void rxrpc_fast_process_packet(struct rx
74492 + /* track the latest serial number on this connection for ACK packet
74493 + * information */
74494 + serial = ntohl(sp->hdr.serial);
74495 +- hi_serial = atomic_read(&call->conn->hi_serial);
74496 ++ hi_serial = atomic_read_unchecked(&call->conn->hi_serial);
74497 + while (serial > hi_serial)
74498 +- hi_serial = atomic_cmpxchg(&call->conn->hi_serial, hi_serial,
74499 ++ hi_serial = atomic_cmpxchg_unchecked(&call->conn->hi_serial, hi_serial,
74500 + serial);
74501 +
74502 + /* request ACK generation for any ACK or DATA packet that requests
74503 +diff -urNp linux-3.1.1/net/rxrpc/ar-internal.h linux-3.1.1/net/rxrpc/ar-internal.h
74504 +--- linux-3.1.1/net/rxrpc/ar-internal.h 2011-11-11 15:19:27.000000000 -0500
74505 ++++ linux-3.1.1/net/rxrpc/ar-internal.h 2011-11-16 18:39:08.000000000 -0500
74506 +@@ -272,8 +272,8 @@ struct rxrpc_connection {
74507 + int error; /* error code for local abort */
74508 + int debug_id; /* debug ID for printks */
74509 + unsigned call_counter; /* call ID counter */
74510 +- atomic_t serial; /* packet serial number counter */
74511 +- atomic_t hi_serial; /* highest serial number received */
74512 ++ atomic_unchecked_t serial; /* packet serial number counter */
74513 ++ atomic_unchecked_t hi_serial; /* highest serial number received */
74514 + u8 avail_calls; /* number of calls available */
74515 + u8 size_align; /* data size alignment (for security) */
74516 + u8 header_size; /* rxrpc + security header size */
74517 +@@ -346,7 +346,7 @@ struct rxrpc_call {
74518 + spinlock_t lock;
74519 + rwlock_t state_lock; /* lock for state transition */
74520 + atomic_t usage;
74521 +- atomic_t sequence; /* Tx data packet sequence counter */
74522 ++ atomic_unchecked_t sequence; /* Tx data packet sequence counter */
74523 + u32 abort_code; /* local/remote abort code */
74524 + enum { /* current state of call */
74525 + RXRPC_CALL_CLIENT_SEND_REQUEST, /* - client sending request phase */
74526 +@@ -420,7 +420,7 @@ static inline void rxrpc_abort_call(stru
74527 + */
74528 + extern atomic_t rxrpc_n_skbs;
74529 + extern __be32 rxrpc_epoch;
74530 +-extern atomic_t rxrpc_debug_id;
74531 ++extern atomic_unchecked_t rxrpc_debug_id;
74532 + extern struct workqueue_struct *rxrpc_workqueue;
74533 +
74534 + /*
74535 +diff -urNp linux-3.1.1/net/rxrpc/ar-local.c linux-3.1.1/net/rxrpc/ar-local.c
74536 +--- linux-3.1.1/net/rxrpc/ar-local.c 2011-11-11 15:19:27.000000000 -0500
74537 ++++ linux-3.1.1/net/rxrpc/ar-local.c 2011-11-16 18:39:08.000000000 -0500
74538 +@@ -45,7 +45,7 @@ struct rxrpc_local *rxrpc_alloc_local(st
74539 + spin_lock_init(&local->lock);
74540 + rwlock_init(&local->services_lock);
74541 + atomic_set(&local->usage, 1);
74542 +- local->debug_id = atomic_inc_return(&rxrpc_debug_id);
74543 ++ local->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
74544 + memcpy(&local->srx, srx, sizeof(*srx));
74545 + }
74546 +
74547 +diff -urNp linux-3.1.1/net/rxrpc/ar-output.c linux-3.1.1/net/rxrpc/ar-output.c
74548 +--- linux-3.1.1/net/rxrpc/ar-output.c 2011-11-11 15:19:27.000000000 -0500
74549 ++++ linux-3.1.1/net/rxrpc/ar-output.c 2011-11-16 18:39:08.000000000 -0500
74550 +@@ -681,9 +681,9 @@ static int rxrpc_send_data(struct kiocb
74551 + sp->hdr.cid = call->cid;
74552 + sp->hdr.callNumber = call->call_id;
74553 + sp->hdr.seq =
74554 +- htonl(atomic_inc_return(&call->sequence));
74555 ++ htonl(atomic_inc_return_unchecked(&call->sequence));
74556 + sp->hdr.serial =
74557 +- htonl(atomic_inc_return(&conn->serial));
74558 ++ htonl(atomic_inc_return_unchecked(&conn->serial));
74559 + sp->hdr.type = RXRPC_PACKET_TYPE_DATA;
74560 + sp->hdr.userStatus = 0;
74561 + sp->hdr.securityIndex = conn->security_ix;
74562 +diff -urNp linux-3.1.1/net/rxrpc/ar-peer.c linux-3.1.1/net/rxrpc/ar-peer.c
74563 +--- linux-3.1.1/net/rxrpc/ar-peer.c 2011-11-11 15:19:27.000000000 -0500
74564 ++++ linux-3.1.1/net/rxrpc/ar-peer.c 2011-11-16 18:39:08.000000000 -0500
74565 +@@ -72,7 +72,7 @@ static struct rxrpc_peer *rxrpc_alloc_pe
74566 + INIT_LIST_HEAD(&peer->error_targets);
74567 + spin_lock_init(&peer->lock);
74568 + atomic_set(&peer->usage, 1);
74569 +- peer->debug_id = atomic_inc_return(&rxrpc_debug_id);
74570 ++ peer->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
74571 + memcpy(&peer->srx, srx, sizeof(*srx));
74572 +
74573 + rxrpc_assess_MTU_size(peer);
74574 +diff -urNp linux-3.1.1/net/rxrpc/ar-proc.c linux-3.1.1/net/rxrpc/ar-proc.c
74575 +--- linux-3.1.1/net/rxrpc/ar-proc.c 2011-11-11 15:19:27.000000000 -0500
74576 ++++ linux-3.1.1/net/rxrpc/ar-proc.c 2011-11-16 18:39:08.000000000 -0500
74577 +@@ -164,8 +164,8 @@ static int rxrpc_connection_seq_show(str
74578 + atomic_read(&conn->usage),
74579 + rxrpc_conn_states[conn->state],
74580 + key_serial(conn->key),
74581 +- atomic_read(&conn->serial),
74582 +- atomic_read(&conn->hi_serial));
74583 ++ atomic_read_unchecked(&conn->serial),
74584 ++ atomic_read_unchecked(&conn->hi_serial));
74585 +
74586 + return 0;
74587 + }
74588 +diff -urNp linux-3.1.1/net/rxrpc/ar-transport.c linux-3.1.1/net/rxrpc/ar-transport.c
74589 +--- linux-3.1.1/net/rxrpc/ar-transport.c 2011-11-11 15:19:27.000000000 -0500
74590 ++++ linux-3.1.1/net/rxrpc/ar-transport.c 2011-11-16 18:39:08.000000000 -0500
74591 +@@ -47,7 +47,7 @@ static struct rxrpc_transport *rxrpc_all
74592 + spin_lock_init(&trans->client_lock);
74593 + rwlock_init(&trans->conn_lock);
74594 + atomic_set(&trans->usage, 1);
74595 +- trans->debug_id = atomic_inc_return(&rxrpc_debug_id);
74596 ++ trans->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
74597 +
74598 + if (peer->srx.transport.family == AF_INET) {
74599 + switch (peer->srx.transport_type) {
74600 +diff -urNp linux-3.1.1/net/rxrpc/rxkad.c linux-3.1.1/net/rxrpc/rxkad.c
74601 +--- linux-3.1.1/net/rxrpc/rxkad.c 2011-11-11 15:19:27.000000000 -0500
74602 ++++ linux-3.1.1/net/rxrpc/rxkad.c 2011-11-16 18:40:44.000000000 -0500
74603 +@@ -211,6 +211,8 @@ static int rxkad_secure_packet_encrypt(c
74604 + u16 check;
74605 + int nsg;
74606 +
74607 ++ pax_track_stack();
74608 ++
74609 + sp = rxrpc_skb(skb);
74610 +
74611 + _enter("");
74612 +@@ -338,6 +340,8 @@ static int rxkad_verify_packet_auth(cons
74613 + u16 check;
74614 + int nsg;
74615 +
74616 ++ pax_track_stack();
74617 ++
74618 + _enter("");
74619 +
74620 + sp = rxrpc_skb(skb);
74621 +@@ -610,7 +614,7 @@ static int rxkad_issue_challenge(struct
74622 +
74623 + len = iov[0].iov_len + iov[1].iov_len;
74624 +
74625 +- hdr.serial = htonl(atomic_inc_return(&conn->serial));
74626 ++ hdr.serial = htonl(atomic_inc_return_unchecked(&conn->serial));
74627 + _proto("Tx CHALLENGE %%%u", ntohl(hdr.serial));
74628 +
74629 + ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 2, len);
74630 +@@ -660,7 +664,7 @@ static int rxkad_send_response(struct rx
74631 +
74632 + len = iov[0].iov_len + iov[1].iov_len + iov[2].iov_len;
74633 +
74634 +- hdr->serial = htonl(atomic_inc_return(&conn->serial));
74635 ++ hdr->serial = htonl(atomic_inc_return_unchecked(&conn->serial));
74636 + _proto("Tx RESPONSE %%%u", ntohl(hdr->serial));
74637 +
74638 + ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 3, len);
74639 +diff -urNp linux-3.1.1/net/sctp/proc.c linux-3.1.1/net/sctp/proc.c
74640 +--- linux-3.1.1/net/sctp/proc.c 2011-11-11 15:19:27.000000000 -0500
74641 ++++ linux-3.1.1/net/sctp/proc.c 2011-11-16 18:40:44.000000000 -0500
74642 +@@ -318,7 +318,8 @@ static int sctp_assocs_seq_show(struct s
74643 + seq_printf(seq,
74644 + "%8pK %8pK %-3d %-3d %-2d %-4d "
74645 + "%4d %8d %8d %7d %5lu %-5d %5d ",
74646 +- assoc, sk, sctp_sk(sk)->type, sk->sk_state,
74647 ++ assoc, sk,
74648 ++ sctp_sk(sk)->type, sk->sk_state,
74649 + assoc->state, hash,
74650 + assoc->assoc_id,
74651 + assoc->sndbuf_used,
74652 +diff -urNp linux-3.1.1/net/sctp/socket.c linux-3.1.1/net/sctp/socket.c
74653 +--- linux-3.1.1/net/sctp/socket.c 2011-11-11 15:19:27.000000000 -0500
74654 ++++ linux-3.1.1/net/sctp/socket.c 2011-11-16 18:39:08.000000000 -0500
74655 +@@ -4575,7 +4575,7 @@ static int sctp_getsockopt_peer_addrs(st
74656 + addrlen = sctp_get_af_specific(temp.sa.sa_family)->sockaddr_len;
74657 + if (space_left < addrlen)
74658 + return -ENOMEM;
74659 +- if (copy_to_user(to, &temp, addrlen))
74660 ++ if (addrlen > sizeof(temp) || copy_to_user(to, &temp, addrlen))
74661 + return -EFAULT;
74662 + to += addrlen;
74663 + cnt++;
74664 +diff -urNp linux-3.1.1/net/socket.c linux-3.1.1/net/socket.c
74665 +--- linux-3.1.1/net/socket.c 2011-11-11 15:19:27.000000000 -0500
74666 ++++ linux-3.1.1/net/socket.c 2011-11-16 18:40:44.000000000 -0500
74667 +@@ -88,6 +88,7 @@
74668 + #include <linux/nsproxy.h>
74669 + #include <linux/magic.h>
74670 + #include <linux/slab.h>
74671 ++#include <linux/in.h>
74672 +
74673 + #include <asm/uaccess.h>
74674 + #include <asm/unistd.h>
74675 +@@ -105,6 +106,8 @@
74676 + #include <linux/sockios.h>
74677 + #include <linux/atalk.h>
74678 +
74679 ++#include <linux/grsock.h>
74680 ++
74681 + static int sock_no_open(struct inode *irrelevant, struct file *dontcare);
74682 + static ssize_t sock_aio_read(struct kiocb *iocb, const struct iovec *iov,
74683 + unsigned long nr_segs, loff_t pos);
74684 +@@ -321,7 +324,7 @@ static struct dentry *sockfs_mount(struc
74685 + &sockfs_dentry_operations, SOCKFS_MAGIC);
74686 + }
74687 +
74688 +-static struct vfsmount *sock_mnt __read_mostly;
74689 ++struct vfsmount *sock_mnt __read_mostly;
74690 +
74691 + static struct file_system_type sock_fs_type = {
74692 + .name = "sockfs",
74693 +@@ -1187,6 +1190,8 @@ int __sock_create(struct net *net, int f
74694 + return -EAFNOSUPPORT;
74695 + if (type < 0 || type >= SOCK_MAX)
74696 + return -EINVAL;
74697 ++ if (protocol < 0)
74698 ++ return -EINVAL;
74699 +
74700 + /* Compatibility.
74701 +
74702 +@@ -1319,6 +1324,16 @@ SYSCALL_DEFINE3(socket, int, family, int
74703 + if (SOCK_NONBLOCK != O_NONBLOCK && (flags & SOCK_NONBLOCK))
74704 + flags = (flags & ~SOCK_NONBLOCK) | O_NONBLOCK;
74705 +
74706 ++ if(!gr_search_socket(family, type, protocol)) {
74707 ++ retval = -EACCES;
74708 ++ goto out;
74709 ++ }
74710 ++
74711 ++ if (gr_handle_sock_all(family, type, protocol)) {
74712 ++ retval = -EACCES;
74713 ++ goto out;
74714 ++ }
74715 ++
74716 + retval = sock_create(family, type, protocol, &sock);
74717 + if (retval < 0)
74718 + goto out;
74719 +@@ -1431,6 +1446,14 @@ SYSCALL_DEFINE3(bind, int, fd, struct so
74720 + if (sock) {
74721 + err = move_addr_to_kernel(umyaddr, addrlen, (struct sockaddr *)&address);
74722 + if (err >= 0) {
74723 ++ if (gr_handle_sock_server((struct sockaddr *)&address)) {
74724 ++ err = -EACCES;
74725 ++ goto error;
74726 ++ }
74727 ++ err = gr_search_bind(sock, (struct sockaddr_in *)&address);
74728 ++ if (err)
74729 ++ goto error;
74730 ++
74731 + err = security_socket_bind(sock,
74732 + (struct sockaddr *)&address,
74733 + addrlen);
74734 +@@ -1439,6 +1462,7 @@ SYSCALL_DEFINE3(bind, int, fd, struct so
74735 + (struct sockaddr *)
74736 + &address, addrlen);
74737 + }
74738 ++error:
74739 + fput_light(sock->file, fput_needed);
74740 + }
74741 + return err;
74742 +@@ -1462,10 +1486,20 @@ SYSCALL_DEFINE2(listen, int, fd, int, ba
74743 + if ((unsigned)backlog > somaxconn)
74744 + backlog = somaxconn;
74745 +
74746 ++ if (gr_handle_sock_server_other(sock->sk)) {
74747 ++ err = -EPERM;
74748 ++ goto error;
74749 ++ }
74750 ++
74751 ++ err = gr_search_listen(sock);
74752 ++ if (err)
74753 ++ goto error;
74754 ++
74755 + err = security_socket_listen(sock, backlog);
74756 + if (!err)
74757 + err = sock->ops->listen(sock, backlog);
74758 +
74759 ++error:
74760 + fput_light(sock->file, fput_needed);
74761 + }
74762 + return err;
74763 +@@ -1509,6 +1543,18 @@ SYSCALL_DEFINE4(accept4, int, fd, struct
74764 + newsock->type = sock->type;
74765 + newsock->ops = sock->ops;
74766 +
74767 ++ if (gr_handle_sock_server_other(sock->sk)) {
74768 ++ err = -EPERM;
74769 ++ sock_release(newsock);
74770 ++ goto out_put;
74771 ++ }
74772 ++
74773 ++ err = gr_search_accept(sock);
74774 ++ if (err) {
74775 ++ sock_release(newsock);
74776 ++ goto out_put;
74777 ++ }
74778 ++
74779 + /*
74780 + * We don't need try_module_get here, as the listening socket (sock)
74781 + * has the protocol module (sock->ops->owner) held.
74782 +@@ -1547,6 +1593,8 @@ SYSCALL_DEFINE4(accept4, int, fd, struct
74783 + fd_install(newfd, newfile);
74784 + err = newfd;
74785 +
74786 ++ gr_attach_curr_ip(newsock->sk);
74787 ++
74788 + out_put:
74789 + fput_light(sock->file, fput_needed);
74790 + out:
74791 +@@ -1579,6 +1627,7 @@ SYSCALL_DEFINE3(connect, int, fd, struct
74792 + int, addrlen)
74793 + {
74794 + struct socket *sock;
74795 ++ struct sockaddr *sck;
74796 + struct sockaddr_storage address;
74797 + int err, fput_needed;
74798 +
74799 +@@ -1589,6 +1638,17 @@ SYSCALL_DEFINE3(connect, int, fd, struct
74800 + if (err < 0)
74801 + goto out_put;
74802 +
74803 ++ sck = (struct sockaddr *)&address;
74804 ++
74805 ++ if (gr_handle_sock_client(sck)) {
74806 ++ err = -EACCES;
74807 ++ goto out_put;
74808 ++ }
74809 ++
74810 ++ err = gr_search_connect(sock, (struct sockaddr_in *)sck);
74811 ++ if (err)
74812 ++ goto out_put;
74813 ++
74814 + err =
74815 + security_socket_connect(sock, (struct sockaddr *)&address, addrlen);
74816 + if (err)
74817 +@@ -1890,6 +1950,8 @@ static int __sys_sendmsg(struct socket *
74818 + unsigned char *ctl_buf = ctl;
74819 + int err, ctl_len, iov_size, total_len;
74820 +
74821 ++ pax_track_stack();
74822 ++
74823 + err = -EFAULT;
74824 + if (MSG_CMSG_COMPAT & flags) {
74825 + if (get_compat_msghdr(msg_sys, msg_compat))
74826 +@@ -1950,7 +2012,7 @@ static int __sys_sendmsg(struct socket *
74827 + * checking falls down on this.
74828 + */
74829 + if (copy_from_user(ctl_buf,
74830 +- (void __user __force *)msg_sys->msg_control,
74831 ++ (void __force_user *)msg_sys->msg_control,
74832 + ctl_len))
74833 + goto out_freectl;
74834 + msg_sys->msg_control = ctl_buf;
74835 +@@ -2120,7 +2182,7 @@ static int __sys_recvmsg(struct socket *
74836 + * kernel msghdr to use the kernel address space)
74837 + */
74838 +
74839 +- uaddr = (__force void __user *)msg_sys->msg_name;
74840 ++ uaddr = (void __force_user *)msg_sys->msg_name;
74841 + uaddr_len = COMPAT_NAMELEN(msg);
74842 + if (MSG_CMSG_COMPAT & flags) {
74843 + err = verify_compat_iovec(msg_sys, iov,
74844 +@@ -2748,7 +2810,7 @@ static int ethtool_ioctl(struct net *net
74845 + }
74846 +
74847 + ifr = compat_alloc_user_space(buf_size);
74848 +- rxnfc = (void *)ifr + ALIGN(sizeof(struct ifreq), 8);
74849 ++ rxnfc = (void __user *)ifr + ALIGN(sizeof(struct ifreq), 8);
74850 +
74851 + if (copy_in_user(&ifr->ifr_name, &ifr32->ifr_name, IFNAMSIZ))
74852 + return -EFAULT;
74853 +@@ -2772,12 +2834,12 @@ static int ethtool_ioctl(struct net *net
74854 + offsetof(struct ethtool_rxnfc, fs.ring_cookie));
74855 +
74856 + if (copy_in_user(rxnfc, compat_rxnfc,
74857 +- (void *)(&rxnfc->fs.m_ext + 1) -
74858 +- (void *)rxnfc) ||
74859 ++ (void __user *)(&rxnfc->fs.m_ext + 1) -
74860 ++ (void __user *)rxnfc) ||
74861 + copy_in_user(&rxnfc->fs.ring_cookie,
74862 + &compat_rxnfc->fs.ring_cookie,
74863 +- (void *)(&rxnfc->fs.location + 1) -
74864 +- (void *)&rxnfc->fs.ring_cookie) ||
74865 ++ (void __user *)(&rxnfc->fs.location + 1) -
74866 ++ (void __user *)&rxnfc->fs.ring_cookie) ||
74867 + copy_in_user(&rxnfc->rule_cnt, &compat_rxnfc->rule_cnt,
74868 + sizeof(rxnfc->rule_cnt)))
74869 + return -EFAULT;
74870 +@@ -2789,12 +2851,12 @@ static int ethtool_ioctl(struct net *net
74871 +
74872 + if (convert_out) {
74873 + if (copy_in_user(compat_rxnfc, rxnfc,
74874 +- (const void *)(&rxnfc->fs.m_ext + 1) -
74875 +- (const void *)rxnfc) ||
74876 ++ (const void __user *)(&rxnfc->fs.m_ext + 1) -
74877 ++ (const void __user *)rxnfc) ||
74878 + copy_in_user(&compat_rxnfc->fs.ring_cookie,
74879 + &rxnfc->fs.ring_cookie,
74880 +- (const void *)(&rxnfc->fs.location + 1) -
74881 +- (const void *)&rxnfc->fs.ring_cookie) ||
74882 ++ (const void __user *)(&rxnfc->fs.location + 1) -
74883 ++ (const void __user *)&rxnfc->fs.ring_cookie) ||
74884 + copy_in_user(&compat_rxnfc->rule_cnt, &rxnfc->rule_cnt,
74885 + sizeof(rxnfc->rule_cnt)))
74886 + return -EFAULT;
74887 +@@ -2864,7 +2926,7 @@ static int bond_ioctl(struct net *net, u
74888 + old_fs = get_fs();
74889 + set_fs(KERNEL_DS);
74890 + err = dev_ioctl(net, cmd,
74891 +- (struct ifreq __user __force *) &kifr);
74892 ++ (struct ifreq __force_user *) &kifr);
74893 + set_fs(old_fs);
74894 +
74895 + return err;
74896 +@@ -2973,7 +3035,7 @@ static int compat_sioc_ifmap(struct net
74897 +
74898 + old_fs = get_fs();
74899 + set_fs(KERNEL_DS);
74900 +- err = dev_ioctl(net, cmd, (void __user __force *)&ifr);
74901 ++ err = dev_ioctl(net, cmd, (void __force_user *)&ifr);
74902 + set_fs(old_fs);
74903 +
74904 + if (cmd == SIOCGIFMAP && !err) {
74905 +@@ -3078,7 +3140,7 @@ static int routing_ioctl(struct net *net
74906 + ret |= __get_user(rtdev, &(ur4->rt_dev));
74907 + if (rtdev) {
74908 + ret |= copy_from_user(devname, compat_ptr(rtdev), 15);
74909 +- r4.rt_dev = (char __user __force *)devname;
74910 ++ r4.rt_dev = (char __force_user *)devname;
74911 + devname[15] = 0;
74912 + } else
74913 + r4.rt_dev = NULL;
74914 +@@ -3318,8 +3380,8 @@ int kernel_getsockopt(struct socket *soc
74915 + int __user *uoptlen;
74916 + int err;
74917 +
74918 +- uoptval = (char __user __force *) optval;
74919 +- uoptlen = (int __user __force *) optlen;
74920 ++ uoptval = (char __force_user *) optval;
74921 ++ uoptlen = (int __force_user *) optlen;
74922 +
74923 + set_fs(KERNEL_DS);
74924 + if (level == SOL_SOCKET)
74925 +@@ -3339,7 +3401,7 @@ int kernel_setsockopt(struct socket *soc
74926 + char __user *uoptval;
74927 + int err;
74928 +
74929 +- uoptval = (char __user __force *) optval;
74930 ++ uoptval = (char __force_user *) optval;
74931 +
74932 + set_fs(KERNEL_DS);
74933 + if (level == SOL_SOCKET)
74934 +diff -urNp linux-3.1.1/net/sunrpc/sched.c linux-3.1.1/net/sunrpc/sched.c
74935 +--- linux-3.1.1/net/sunrpc/sched.c 2011-11-11 15:19:27.000000000 -0500
74936 ++++ linux-3.1.1/net/sunrpc/sched.c 2011-11-16 18:39:08.000000000 -0500
74937 +@@ -238,9 +238,9 @@ static int rpc_wait_bit_killable(void *w
74938 + #ifdef RPC_DEBUG
74939 + static void rpc_task_set_debuginfo(struct rpc_task *task)
74940 + {
74941 +- static atomic_t rpc_pid;
74942 ++ static atomic_unchecked_t rpc_pid;
74943 +
74944 +- task->tk_pid = atomic_inc_return(&rpc_pid);
74945 ++ task->tk_pid = atomic_inc_return_unchecked(&rpc_pid);
74946 + }
74947 + #else
74948 + static inline void rpc_task_set_debuginfo(struct rpc_task *task)
74949 +diff -urNp linux-3.1.1/net/sunrpc/svcsock.c linux-3.1.1/net/sunrpc/svcsock.c
74950 +--- linux-3.1.1/net/sunrpc/svcsock.c 2011-11-11 15:19:27.000000000 -0500
74951 ++++ linux-3.1.1/net/sunrpc/svcsock.c 2011-11-16 18:39:08.000000000 -0500
74952 +@@ -394,7 +394,7 @@ static int svc_partial_recvfrom(struct s
74953 + int buflen, unsigned int base)
74954 + {
74955 + size_t save_iovlen;
74956 +- void __user *save_iovbase;
74957 ++ void *save_iovbase;
74958 + unsigned int i;
74959 + int ret;
74960 +
74961 +diff -urNp linux-3.1.1/net/sunrpc/xprtrdma/svc_rdma.c linux-3.1.1/net/sunrpc/xprtrdma/svc_rdma.c
74962 +--- linux-3.1.1/net/sunrpc/xprtrdma/svc_rdma.c 2011-11-11 15:19:27.000000000 -0500
74963 ++++ linux-3.1.1/net/sunrpc/xprtrdma/svc_rdma.c 2011-11-16 18:39:08.000000000 -0500
74964 +@@ -61,15 +61,15 @@ unsigned int svcrdma_max_req_size = RPCR
74965 + static unsigned int min_max_inline = 4096;
74966 + static unsigned int max_max_inline = 65536;
74967 +
74968 +-atomic_t rdma_stat_recv;
74969 +-atomic_t rdma_stat_read;
74970 +-atomic_t rdma_stat_write;
74971 +-atomic_t rdma_stat_sq_starve;
74972 +-atomic_t rdma_stat_rq_starve;
74973 +-atomic_t rdma_stat_rq_poll;
74974 +-atomic_t rdma_stat_rq_prod;
74975 +-atomic_t rdma_stat_sq_poll;
74976 +-atomic_t rdma_stat_sq_prod;
74977 ++atomic_unchecked_t rdma_stat_recv;
74978 ++atomic_unchecked_t rdma_stat_read;
74979 ++atomic_unchecked_t rdma_stat_write;
74980 ++atomic_unchecked_t rdma_stat_sq_starve;
74981 ++atomic_unchecked_t rdma_stat_rq_starve;
74982 ++atomic_unchecked_t rdma_stat_rq_poll;
74983 ++atomic_unchecked_t rdma_stat_rq_prod;
74984 ++atomic_unchecked_t rdma_stat_sq_poll;
74985 ++atomic_unchecked_t rdma_stat_sq_prod;
74986 +
74987 + /* Temporary NFS request map and context caches */
74988 + struct kmem_cache *svc_rdma_map_cachep;
74989 +@@ -109,7 +109,7 @@ static int read_reset_stat(ctl_table *ta
74990 + len -= *ppos;
74991 + if (len > *lenp)
74992 + len = *lenp;
74993 +- if (len && copy_to_user(buffer, str_buf, len))
74994 ++ if (len > sizeof str_buf || (len && copy_to_user(buffer, str_buf, len)))
74995 + return -EFAULT;
74996 + *lenp = len;
74997 + *ppos += len;
74998 +@@ -150,63 +150,63 @@ static ctl_table svcrdma_parm_table[] =
74999 + {
75000 + .procname = "rdma_stat_read",
75001 + .data = &rdma_stat_read,
75002 +- .maxlen = sizeof(atomic_t),
75003 ++ .maxlen = sizeof(atomic_unchecked_t),
75004 + .mode = 0644,
75005 + .proc_handler = read_reset_stat,
75006 + },
75007 + {
75008 + .procname = "rdma_stat_recv",
75009 + .data = &rdma_stat_recv,
75010 +- .maxlen = sizeof(atomic_t),
75011 ++ .maxlen = sizeof(atomic_unchecked_t),
75012 + .mode = 0644,
75013 + .proc_handler = read_reset_stat,
75014 + },
75015 + {
75016 + .procname = "rdma_stat_write",
75017 + .data = &rdma_stat_write,
75018 +- .maxlen = sizeof(atomic_t),
75019 ++ .maxlen = sizeof(atomic_unchecked_t),
75020 + .mode = 0644,
75021 + .proc_handler = read_reset_stat,
75022 + },
75023 + {
75024 + .procname = "rdma_stat_sq_starve",
75025 + .data = &rdma_stat_sq_starve,
75026 +- .maxlen = sizeof(atomic_t),
75027 ++ .maxlen = sizeof(atomic_unchecked_t),
75028 + .mode = 0644,
75029 + .proc_handler = read_reset_stat,
75030 + },
75031 + {
75032 + .procname = "rdma_stat_rq_starve",
75033 + .data = &rdma_stat_rq_starve,
75034 +- .maxlen = sizeof(atomic_t),
75035 ++ .maxlen = sizeof(atomic_unchecked_t),
75036 + .mode = 0644,
75037 + .proc_handler = read_reset_stat,
75038 + },
75039 + {
75040 + .procname = "rdma_stat_rq_poll",
75041 + .data = &rdma_stat_rq_poll,
75042 +- .maxlen = sizeof(atomic_t),
75043 ++ .maxlen = sizeof(atomic_unchecked_t),
75044 + .mode = 0644,
75045 + .proc_handler = read_reset_stat,
75046 + },
75047 + {
75048 + .procname = "rdma_stat_rq_prod",
75049 + .data = &rdma_stat_rq_prod,
75050 +- .maxlen = sizeof(atomic_t),
75051 ++ .maxlen = sizeof(atomic_unchecked_t),
75052 + .mode = 0644,
75053 + .proc_handler = read_reset_stat,
75054 + },
75055 + {
75056 + .procname = "rdma_stat_sq_poll",
75057 + .data = &rdma_stat_sq_poll,
75058 +- .maxlen = sizeof(atomic_t),
75059 ++ .maxlen = sizeof(atomic_unchecked_t),
75060 + .mode = 0644,
75061 + .proc_handler = read_reset_stat,
75062 + },
75063 + {
75064 + .procname = "rdma_stat_sq_prod",
75065 + .data = &rdma_stat_sq_prod,
75066 +- .maxlen = sizeof(atomic_t),
75067 ++ .maxlen = sizeof(atomic_unchecked_t),
75068 + .mode = 0644,
75069 + .proc_handler = read_reset_stat,
75070 + },
75071 +diff -urNp linux-3.1.1/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c linux-3.1.1/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
75072 +--- linux-3.1.1/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c 2011-11-11 15:19:27.000000000 -0500
75073 ++++ linux-3.1.1/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c 2011-11-16 18:39:08.000000000 -0500
75074 +@@ -499,7 +499,7 @@ next_sge:
75075 + svc_rdma_put_context(ctxt, 0);
75076 + goto out;
75077 + }
75078 +- atomic_inc(&rdma_stat_read);
75079 ++ atomic_inc_unchecked(&rdma_stat_read);
75080 +
75081 + if (read_wr.num_sge < chl_map->ch[ch_no].count) {
75082 + chl_map->ch[ch_no].count -= read_wr.num_sge;
75083 +@@ -609,7 +609,7 @@ int svc_rdma_recvfrom(struct svc_rqst *r
75084 + dto_q);
75085 + list_del_init(&ctxt->dto_q);
75086 + } else {
75087 +- atomic_inc(&rdma_stat_rq_starve);
75088 ++ atomic_inc_unchecked(&rdma_stat_rq_starve);
75089 + clear_bit(XPT_DATA, &xprt->xpt_flags);
75090 + ctxt = NULL;
75091 + }
75092 +@@ -629,7 +629,7 @@ int svc_rdma_recvfrom(struct svc_rqst *r
75093 + dprintk("svcrdma: processing ctxt=%p on xprt=%p, rqstp=%p, status=%d\n",
75094 + ctxt, rdma_xprt, rqstp, ctxt->wc_status);
75095 + BUG_ON(ctxt->wc_status != IB_WC_SUCCESS);
75096 +- atomic_inc(&rdma_stat_recv);
75097 ++ atomic_inc_unchecked(&rdma_stat_recv);
75098 +
75099 + /* Build up the XDR from the receive buffers. */
75100 + rdma_build_arg_xdr(rqstp, ctxt, ctxt->byte_len);
75101 +diff -urNp linux-3.1.1/net/sunrpc/xprtrdma/svc_rdma_sendto.c linux-3.1.1/net/sunrpc/xprtrdma/svc_rdma_sendto.c
75102 +--- linux-3.1.1/net/sunrpc/xprtrdma/svc_rdma_sendto.c 2011-11-11 15:19:27.000000000 -0500
75103 ++++ linux-3.1.1/net/sunrpc/xprtrdma/svc_rdma_sendto.c 2011-11-16 18:39:08.000000000 -0500
75104 +@@ -362,7 +362,7 @@ static int send_write(struct svcxprt_rdm
75105 + write_wr.wr.rdma.remote_addr = to;
75106 +
75107 + /* Post It */
75108 +- atomic_inc(&rdma_stat_write);
75109 ++ atomic_inc_unchecked(&rdma_stat_write);
75110 + if (svc_rdma_send(xprt, &write_wr))
75111 + goto err;
75112 + return 0;
75113 +diff -urNp linux-3.1.1/net/sunrpc/xprtrdma/svc_rdma_transport.c linux-3.1.1/net/sunrpc/xprtrdma/svc_rdma_transport.c
75114 +--- linux-3.1.1/net/sunrpc/xprtrdma/svc_rdma_transport.c 2011-11-11 15:19:27.000000000 -0500
75115 ++++ linux-3.1.1/net/sunrpc/xprtrdma/svc_rdma_transport.c 2011-11-16 18:39:08.000000000 -0500
75116 +@@ -299,7 +299,7 @@ static void rq_cq_reap(struct svcxprt_rd
75117 + return;
75118 +
75119 + ib_req_notify_cq(xprt->sc_rq_cq, IB_CQ_NEXT_COMP);
75120 +- atomic_inc(&rdma_stat_rq_poll);
75121 ++ atomic_inc_unchecked(&rdma_stat_rq_poll);
75122 +
75123 + while ((ret = ib_poll_cq(xprt->sc_rq_cq, 1, &wc)) > 0) {
75124 + ctxt = (struct svc_rdma_op_ctxt *)(unsigned long)wc.wr_id;
75125 +@@ -321,7 +321,7 @@ static void rq_cq_reap(struct svcxprt_rd
75126 + }
75127 +
75128 + if (ctxt)
75129 +- atomic_inc(&rdma_stat_rq_prod);
75130 ++ atomic_inc_unchecked(&rdma_stat_rq_prod);
75131 +
75132 + set_bit(XPT_DATA, &xprt->sc_xprt.xpt_flags);
75133 + /*
75134 +@@ -393,7 +393,7 @@ static void sq_cq_reap(struct svcxprt_rd
75135 + return;
75136 +
75137 + ib_req_notify_cq(xprt->sc_sq_cq, IB_CQ_NEXT_COMP);
75138 +- atomic_inc(&rdma_stat_sq_poll);
75139 ++ atomic_inc_unchecked(&rdma_stat_sq_poll);
75140 + while ((ret = ib_poll_cq(cq, 1, &wc)) > 0) {
75141 + if (wc.status != IB_WC_SUCCESS)
75142 + /* Close the transport */
75143 +@@ -411,7 +411,7 @@ static void sq_cq_reap(struct svcxprt_rd
75144 + }
75145 +
75146 + if (ctxt)
75147 +- atomic_inc(&rdma_stat_sq_prod);
75148 ++ atomic_inc_unchecked(&rdma_stat_sq_prod);
75149 + }
75150 +
75151 + static void sq_comp_handler(struct ib_cq *cq, void *cq_context)
75152 +@@ -1273,7 +1273,7 @@ int svc_rdma_send(struct svcxprt_rdma *x
75153 + spin_lock_bh(&xprt->sc_lock);
75154 + if (xprt->sc_sq_depth < atomic_read(&xprt->sc_sq_count) + wr_count) {
75155 + spin_unlock_bh(&xprt->sc_lock);
75156 +- atomic_inc(&rdma_stat_sq_starve);
75157 ++ atomic_inc_unchecked(&rdma_stat_sq_starve);
75158 +
75159 + /* See if we can opportunistically reap SQ WR to make room */
75160 + sq_cq_reap(xprt);
75161 +diff -urNp linux-3.1.1/net/sysctl_net.c linux-3.1.1/net/sysctl_net.c
75162 +--- linux-3.1.1/net/sysctl_net.c 2011-11-11 15:19:27.000000000 -0500
75163 ++++ linux-3.1.1/net/sysctl_net.c 2011-11-16 18:40:44.000000000 -0500
75164 +@@ -46,7 +46,7 @@ static int net_ctl_permissions(struct ct
75165 + struct ctl_table *table)
75166 + {
75167 + /* Allow network administrator to have same access as root. */
75168 +- if (capable(CAP_NET_ADMIN)) {
75169 ++ if (capable_nolog(CAP_NET_ADMIN)) {
75170 + int mode = (table->mode >> 6) & 7;
75171 + return (mode << 6) | (mode << 3) | mode;
75172 + }
75173 +diff -urNp linux-3.1.1/net/tipc/link.c linux-3.1.1/net/tipc/link.c
75174 +--- linux-3.1.1/net/tipc/link.c 2011-11-11 15:19:27.000000000 -0500
75175 ++++ linux-3.1.1/net/tipc/link.c 2011-11-16 18:39:08.000000000 -0500
75176 +@@ -1170,7 +1170,7 @@ static int link_send_sections_long(struc
75177 + struct tipc_msg fragm_hdr;
75178 + struct sk_buff *buf, *buf_chain, *prev;
75179 + u32 fragm_crs, fragm_rest, hsz, sect_rest;
75180 +- const unchar *sect_crs;
75181 ++ const unchar __user *sect_crs;
75182 + int curr_sect;
75183 + u32 fragm_no;
75184 +
75185 +@@ -1214,7 +1214,7 @@ again:
75186 +
75187 + if (!sect_rest) {
75188 + sect_rest = msg_sect[++curr_sect].iov_len;
75189 +- sect_crs = (const unchar *)msg_sect[curr_sect].iov_base;
75190 ++ sect_crs = (const unchar __user *)msg_sect[curr_sect].iov_base;
75191 + }
75192 +
75193 + if (sect_rest < fragm_rest)
75194 +@@ -1233,7 +1233,7 @@ error:
75195 + }
75196 + } else
75197 + skb_copy_to_linear_data_offset(buf, fragm_crs,
75198 +- sect_crs, sz);
75199 ++ (const void __force_kernel *)sect_crs, sz);
75200 + sect_crs += sz;
75201 + sect_rest -= sz;
75202 + fragm_crs += sz;
75203 +diff -urNp linux-3.1.1/net/tipc/msg.c linux-3.1.1/net/tipc/msg.c
75204 +--- linux-3.1.1/net/tipc/msg.c 2011-11-11 15:19:27.000000000 -0500
75205 ++++ linux-3.1.1/net/tipc/msg.c 2011-11-16 18:39:08.000000000 -0500
75206 +@@ -99,7 +99,7 @@ int tipc_msg_build(struct tipc_msg *hdr,
75207 + msg_sect[cnt].iov_len);
75208 + else
75209 + skb_copy_to_linear_data_offset(*buf, pos,
75210 +- msg_sect[cnt].iov_base,
75211 ++ (const void __force_kernel *)msg_sect[cnt].iov_base,
75212 + msg_sect[cnt].iov_len);
75213 + pos += msg_sect[cnt].iov_len;
75214 + }
75215 +diff -urNp linux-3.1.1/net/tipc/subscr.c linux-3.1.1/net/tipc/subscr.c
75216 +--- linux-3.1.1/net/tipc/subscr.c 2011-11-11 15:19:27.000000000 -0500
75217 ++++ linux-3.1.1/net/tipc/subscr.c 2011-11-16 18:39:08.000000000 -0500
75218 +@@ -101,7 +101,7 @@ static void subscr_send_event(struct sub
75219 + {
75220 + struct iovec msg_sect;
75221 +
75222 +- msg_sect.iov_base = (void *)&sub->evt;
75223 ++ msg_sect.iov_base = (void __force_user *)&sub->evt;
75224 + msg_sect.iov_len = sizeof(struct tipc_event);
75225 +
75226 + sub->evt.event = htohl(event, sub->swap);
75227 +diff -urNp linux-3.1.1/net/unix/af_unix.c linux-3.1.1/net/unix/af_unix.c
75228 +--- linux-3.1.1/net/unix/af_unix.c 2011-11-11 15:19:27.000000000 -0500
75229 ++++ linux-3.1.1/net/unix/af_unix.c 2011-11-16 19:17:17.000000000 -0500
75230 +@@ -767,6 +767,12 @@ static struct sock *unix_find_other(stru
75231 + err = -ECONNREFUSED;
75232 + if (!S_ISSOCK(inode->i_mode))
75233 + goto put_fail;
75234 ++
75235 ++ if (!gr_acl_handle_unix(path.dentry, path.mnt)) {
75236 ++ err = -EACCES;
75237 ++ goto put_fail;
75238 ++ }
75239 ++
75240 + u = unix_find_socket_byinode(inode);
75241 + if (!u)
75242 + goto put_fail;
75243 +@@ -787,6 +793,13 @@ static struct sock *unix_find_other(stru
75244 + if (u) {
75245 + struct dentry *dentry;
75246 + dentry = unix_sk(u)->dentry;
75247 ++
75248 ++ if (!gr_handle_chroot_unix(pid_vnr(u->sk_peer_pid))) {
75249 ++ err = -EPERM;
75250 ++ sock_put(u);
75251 ++ goto fail;
75252 ++ }
75253 ++
75254 + if (dentry)
75255 + touch_atime(unix_sk(u)->mnt, dentry);
75256 + } else
75257 +@@ -869,11 +882,18 @@ static int unix_bind(struct socket *sock
75258 + err = security_path_mknod(&path, dentry, mode, 0);
75259 + if (err)
75260 + goto out_mknod_drop_write;
75261 ++ if (!gr_acl_handle_mknod(dentry, path.dentry, path.mnt, mode)) {
75262 ++ err = -EACCES;
75263 ++ goto out_mknod_drop_write;
75264 ++ }
75265 + err = vfs_mknod(path.dentry->d_inode, dentry, mode, 0);
75266 + out_mknod_drop_write:
75267 + mnt_drop_write(path.mnt);
75268 + if (err)
75269 + goto out_mknod_dput;
75270 ++
75271 ++ gr_handle_create(dentry, path.mnt);
75272 ++
75273 + mutex_unlock(&path.dentry->d_inode->i_mutex);
75274 + dput(path.dentry);
75275 + path.dentry = dentry;
75276 +diff -urNp linux-3.1.1/net/wireless/core.h linux-3.1.1/net/wireless/core.h
75277 +--- linux-3.1.1/net/wireless/core.h 2011-11-11 15:19:27.000000000 -0500
75278 ++++ linux-3.1.1/net/wireless/core.h 2011-11-16 18:39:08.000000000 -0500
75279 +@@ -27,7 +27,7 @@ struct cfg80211_registered_device {
75280 + struct mutex mtx;
75281 +
75282 + /* rfkill support */
75283 +- struct rfkill_ops rfkill_ops;
75284 ++ rfkill_ops_no_const rfkill_ops;
75285 + struct rfkill *rfkill;
75286 + struct work_struct rfkill_sync;
75287 +
75288 +diff -urNp linux-3.1.1/net/wireless/wext-core.c linux-3.1.1/net/wireless/wext-core.c
75289 +--- linux-3.1.1/net/wireless/wext-core.c 2011-11-11 15:19:27.000000000 -0500
75290 ++++ linux-3.1.1/net/wireless/wext-core.c 2011-11-16 18:39:08.000000000 -0500
75291 +@@ -746,8 +746,7 @@ static int ioctl_standard_iw_point(struc
75292 + */
75293 +
75294 + /* Support for very large requests */
75295 +- if ((descr->flags & IW_DESCR_FLAG_NOMAX) &&
75296 +- (user_length > descr->max_tokens)) {
75297 ++ if (user_length > descr->max_tokens) {
75298 + /* Allow userspace to GET more than max so
75299 + * we can support any size GET requests.
75300 + * There is still a limit : -ENOMEM.
75301 +@@ -784,22 +783,6 @@ static int ioctl_standard_iw_point(struc
75302 + }
75303 + }
75304 +
75305 +- if (IW_IS_GET(cmd) && !(descr->flags & IW_DESCR_FLAG_NOMAX)) {
75306 +- /*
75307 +- * If this is a GET, but not NOMAX, it means that the extra
75308 +- * data is not bounded by userspace, but by max_tokens. Thus
75309 +- * set the length to max_tokens. This matches the extra data
75310 +- * allocation.
75311 +- * The driver should fill it with the number of tokens it
75312 +- * provided, and it may check iwp->length rather than having
75313 +- * knowledge of max_tokens. If the driver doesn't change the
75314 +- * iwp->length, this ioctl just copies back max_token tokens
75315 +- * filled with zeroes. Hopefully the driver isn't claiming
75316 +- * them to be valid data.
75317 +- */
75318 +- iwp->length = descr->max_tokens;
75319 +- }
75320 +-
75321 + err = handler(dev, info, (union iwreq_data *) iwp, extra);
75322 +
75323 + iwp->length += essid_compat;
75324 +diff -urNp linux-3.1.1/net/xfrm/xfrm_policy.c linux-3.1.1/net/xfrm/xfrm_policy.c
75325 +--- linux-3.1.1/net/xfrm/xfrm_policy.c 2011-11-11 15:19:27.000000000 -0500
75326 ++++ linux-3.1.1/net/xfrm/xfrm_policy.c 2011-11-16 18:39:08.000000000 -0500
75327 +@@ -299,7 +299,7 @@ static void xfrm_policy_kill(struct xfrm
75328 + {
75329 + policy->walk.dead = 1;
75330 +
75331 +- atomic_inc(&policy->genid);
75332 ++ atomic_inc_unchecked(&policy->genid);
75333 +
75334 + if (del_timer(&policy->timer))
75335 + xfrm_pol_put(policy);
75336 +@@ -583,7 +583,7 @@ int xfrm_policy_insert(int dir, struct x
75337 + hlist_add_head(&policy->bydst, chain);
75338 + xfrm_pol_hold(policy);
75339 + net->xfrm.policy_count[dir]++;
75340 +- atomic_inc(&flow_cache_genid);
75341 ++ atomic_inc_unchecked(&flow_cache_genid);
75342 + if (delpol)
75343 + __xfrm_policy_unlink(delpol, dir);
75344 + policy->index = delpol ? delpol->index : xfrm_gen_index(net, dir);
75345 +@@ -1530,7 +1530,7 @@ free_dst:
75346 + goto out;
75347 + }
75348 +
75349 +-static int inline
75350 ++static inline int
75351 + xfrm_dst_alloc_copy(void **target, const void *src, int size)
75352 + {
75353 + if (!*target) {
75354 +@@ -1542,7 +1542,7 @@ xfrm_dst_alloc_copy(void **target, const
75355 + return 0;
75356 + }
75357 +
75358 +-static int inline
75359 ++static inline int
75360 + xfrm_dst_update_parent(struct dst_entry *dst, const struct xfrm_selector *sel)
75361 + {
75362 + #ifdef CONFIG_XFRM_SUB_POLICY
75363 +@@ -1554,7 +1554,7 @@ xfrm_dst_update_parent(struct dst_entry
75364 + #endif
75365 + }
75366 +
75367 +-static int inline
75368 ++static inline int
75369 + xfrm_dst_update_origin(struct dst_entry *dst, const struct flowi *fl)
75370 + {
75371 + #ifdef CONFIG_XFRM_SUB_POLICY
75372 +@@ -1648,7 +1648,7 @@ xfrm_resolve_and_create_bundle(struct xf
75373 +
75374 + xdst->num_pols = num_pols;
75375 + memcpy(xdst->pols, pols, sizeof(struct xfrm_policy*) * num_pols);
75376 +- xdst->policy_genid = atomic_read(&pols[0]->genid);
75377 ++ xdst->policy_genid = atomic_read_unchecked(&pols[0]->genid);
75378 +
75379 + return xdst;
75380 + }
75381 +@@ -2335,7 +2335,7 @@ static int xfrm_bundle_ok(struct xfrm_ds
75382 + if (xdst->xfrm_genid != dst->xfrm->genid)
75383 + return 0;
75384 + if (xdst->num_pols > 0 &&
75385 +- xdst->policy_genid != atomic_read(&xdst->pols[0]->genid))
75386 ++ xdst->policy_genid != atomic_read_unchecked(&xdst->pols[0]->genid))
75387 + return 0;
75388 +
75389 + mtu = dst_mtu(dst->child);
75390 +@@ -2870,7 +2870,7 @@ static int xfrm_policy_migrate(struct xf
75391 + sizeof(pol->xfrm_vec[i].saddr));
75392 + pol->xfrm_vec[i].encap_family = mp->new_family;
75393 + /* flush bundles */
75394 +- atomic_inc(&pol->genid);
75395 ++ atomic_inc_unchecked(&pol->genid);
75396 + }
75397 + }
75398 +
75399 +diff -urNp linux-3.1.1/net/xfrm/xfrm_user.c linux-3.1.1/net/xfrm/xfrm_user.c
75400 +--- linux-3.1.1/net/xfrm/xfrm_user.c 2011-11-11 15:19:27.000000000 -0500
75401 ++++ linux-3.1.1/net/xfrm/xfrm_user.c 2011-11-16 18:40:44.000000000 -0500
75402 +@@ -1394,6 +1394,8 @@ static int copy_to_user_tmpl(struct xfrm
75403 + struct xfrm_user_tmpl vec[XFRM_MAX_DEPTH];
75404 + int i;
75405 +
75406 ++ pax_track_stack();
75407 ++
75408 + if (xp->xfrm_nr == 0)
75409 + return 0;
75410 +
75411 +@@ -2062,6 +2064,8 @@ static int xfrm_do_migrate(struct sk_buf
75412 + int err;
75413 + int n = 0;
75414 +
75415 ++ pax_track_stack();
75416 ++
75417 + if (attrs[XFRMA_MIGRATE] == NULL)
75418 + return -EINVAL;
75419 +
75420 +diff -urNp linux-3.1.1/scripts/basic/fixdep.c linux-3.1.1/scripts/basic/fixdep.c
75421 +--- linux-3.1.1/scripts/basic/fixdep.c 2011-11-11 15:19:27.000000000 -0500
75422 ++++ linux-3.1.1/scripts/basic/fixdep.c 2011-11-16 18:39:08.000000000 -0500
75423 +@@ -161,7 +161,7 @@ static unsigned int strhash(const char *
75424 + /*
75425 + * Lookup a value in the configuration string.
75426 + */
75427 +-static int is_defined_config(const char *name, int len, unsigned int hash)
75428 ++static int is_defined_config(const char *name, unsigned int len, unsigned int hash)
75429 + {
75430 + struct item *aux;
75431 +
75432 +@@ -211,10 +211,10 @@ static void clear_config(void)
75433 + /*
75434 + * Record the use of a CONFIG_* word.
75435 + */
75436 +-static void use_config(const char *m, int slen)
75437 ++static void use_config(const char *m, unsigned int slen)
75438 + {
75439 + unsigned int hash = strhash(m, slen);
75440 +- int c, i;
75441 ++ unsigned int c, i;
75442 +
75443 + if (is_defined_config(m, slen, hash))
75444 + return;
75445 +@@ -235,9 +235,9 @@ static void use_config(const char *m, in
75446 +
75447 + static void parse_config_file(const char *map, size_t len)
75448 + {
75449 +- const int *end = (const int *) (map + len);
75450 ++ const unsigned int *end = (const unsigned int *) (map + len);
75451 + /* start at +1, so that p can never be < map */
75452 +- const int *m = (const int *) map + 1;
75453 ++ const unsigned int *m = (const unsigned int *) map + 1;
75454 + const char *p, *q;
75455 +
75456 + for (; m < end; m++) {
75457 +@@ -405,7 +405,7 @@ static void print_deps(void)
75458 + static void traps(void)
75459 + {
75460 + static char test[] __attribute__((aligned(sizeof(int)))) = "CONF";
75461 +- int *p = (int *)test;
75462 ++ unsigned int *p = (unsigned int *)test;
75463 +
75464 + if (*p != INT_CONF) {
75465 + fprintf(stderr, "fixdep: sizeof(int) != 4 or wrong endianess? %#x\n",
75466 +diff -urNp linux-3.1.1/scripts/gcc-plugin.sh linux-3.1.1/scripts/gcc-plugin.sh
75467 +--- linux-3.1.1/scripts/gcc-plugin.sh 1969-12-31 19:00:00.000000000 -0500
75468 ++++ linux-3.1.1/scripts/gcc-plugin.sh 2011-11-16 18:39:08.000000000 -0500
75469 +@@ -0,0 +1,2 @@
75470 ++#!/bin/sh
75471 ++echo "#include \"gcc-plugin.h\"\n#include \"rtl.h\"" | $1 -x c -shared - -o /dev/null -I`$2 -print-file-name=plugin`/include >/dev/null 2>&1 && echo "y"
75472 +diff -urNp linux-3.1.1/scripts/Makefile.build linux-3.1.1/scripts/Makefile.build
75473 +--- linux-3.1.1/scripts/Makefile.build 2011-11-11 15:19:27.000000000 -0500
75474 ++++ linux-3.1.1/scripts/Makefile.build 2011-11-16 18:40:44.000000000 -0500
75475 +@@ -109,7 +109,7 @@ endif
75476 + endif
75477 +
75478 + # Do not include host rules unless needed
75479 +-ifneq ($(hostprogs-y)$(hostprogs-m),)
75480 ++ifneq ($(hostprogs-y)$(hostprogs-m)$(hostlibs-y)$(hostlibs-m),)
75481 + include scripts/Makefile.host
75482 + endif
75483 +
75484 +diff -urNp linux-3.1.1/scripts/Makefile.clean linux-3.1.1/scripts/Makefile.clean
75485 +--- linux-3.1.1/scripts/Makefile.clean 2011-11-11 15:19:27.000000000 -0500
75486 ++++ linux-3.1.1/scripts/Makefile.clean 2011-11-16 18:39:08.000000000 -0500
75487 +@@ -43,7 +43,8 @@ subdir-ymn := $(addprefix $(obj)/,$(subd
75488 + __clean-files := $(extra-y) $(always) \
75489 + $(targets) $(clean-files) \
75490 + $(host-progs) \
75491 +- $(hostprogs-y) $(hostprogs-m) $(hostprogs-)
75492 ++ $(hostprogs-y) $(hostprogs-m) $(hostprogs-) \
75493 ++ $(hostlibs-y) $(hostlibs-m) $(hostlibs-)
75494 +
75495 + __clean-files := $(filter-out $(no-clean-files), $(__clean-files))
75496 +
75497 +diff -urNp linux-3.1.1/scripts/Makefile.host linux-3.1.1/scripts/Makefile.host
75498 +--- linux-3.1.1/scripts/Makefile.host 2011-11-11 15:19:27.000000000 -0500
75499 ++++ linux-3.1.1/scripts/Makefile.host 2011-11-16 18:39:08.000000000 -0500
75500 +@@ -31,6 +31,7 @@
75501 + # Note: Shared libraries consisting of C++ files are not supported
75502 +
75503 + __hostprogs := $(sort $(hostprogs-y) $(hostprogs-m))
75504 ++__hostlibs := $(sort $(hostlibs-y) $(hostlibs-m))
75505 +
75506 + # C code
75507 + # Executables compiled from a single .c file
75508 +@@ -54,6 +55,7 @@ host-cxxobjs := $(sort $(foreach m,$(hos
75509 + # Shared libaries (only .c supported)
75510 + # Shared libraries (.so) - all .so files referenced in "xxx-objs"
75511 + host-cshlib := $(sort $(filter %.so, $(host-cobjs)))
75512 ++host-cshlib += $(sort $(filter %.so, $(__hostlibs)))
75513 + # Remove .so files from "xxx-objs"
75514 + host-cobjs := $(filter-out %.so,$(host-cobjs))
75515 +
75516 +diff -urNp linux-3.1.1/scripts/mod/file2alias.c linux-3.1.1/scripts/mod/file2alias.c
75517 +--- linux-3.1.1/scripts/mod/file2alias.c 2011-11-11 15:19:27.000000000 -0500
75518 ++++ linux-3.1.1/scripts/mod/file2alias.c 2011-11-16 18:39:08.000000000 -0500
75519 +@@ -72,7 +72,7 @@ static void device_id_check(const char *
75520 + unsigned long size, unsigned long id_size,
75521 + void *symval)
75522 + {
75523 +- int i;
75524 ++ unsigned int i;
75525 +
75526 + if (size % id_size || size < id_size) {
75527 + if (cross_build != 0)
75528 +@@ -102,7 +102,7 @@ static void device_id_check(const char *
75529 + /* USB is special because the bcdDevice can be matched against a numeric range */
75530 + /* Looks like "usb:vNpNdNdcNdscNdpNicNiscNipN" */
75531 + static void do_usb_entry(struct usb_device_id *id,
75532 +- unsigned int bcdDevice_initial, int bcdDevice_initial_digits,
75533 ++ unsigned int bcdDevice_initial, unsigned int bcdDevice_initial_digits,
75534 + unsigned char range_lo, unsigned char range_hi,
75535 + unsigned char max, struct module *mod)
75536 + {
75537 +@@ -203,7 +203,7 @@ static void do_usb_entry_multi(struct us
75538 + {
75539 + unsigned int devlo, devhi;
75540 + unsigned char chi, clo, max;
75541 +- int ndigits;
75542 ++ unsigned int ndigits;
75543 +
75544 + id->match_flags = TO_NATIVE(id->match_flags);
75545 + id->idVendor = TO_NATIVE(id->idVendor);
75546 +@@ -437,7 +437,7 @@ static void do_pnp_device_entry(void *sy
75547 + for (i = 0; i < count; i++) {
75548 + const char *id = (char *)devs[i].id;
75549 + char acpi_id[sizeof(devs[0].id)];
75550 +- int j;
75551 ++ unsigned int j;
75552 +
75553 + buf_printf(&mod->dev_table_buf,
75554 + "MODULE_ALIAS(\"pnp:d%s*\");\n", id);
75555 +@@ -467,7 +467,7 @@ static void do_pnp_card_entries(void *sy
75556 +
75557 + for (j = 0; j < PNP_MAX_DEVICES; j++) {
75558 + const char *id = (char *)card->devs[j].id;
75559 +- int i2, j2;
75560 ++ unsigned int i2, j2;
75561 + int dup = 0;
75562 +
75563 + if (!id[0])
75564 +@@ -493,7 +493,7 @@ static void do_pnp_card_entries(void *sy
75565 + /* add an individual alias for every device entry */
75566 + if (!dup) {
75567 + char acpi_id[sizeof(card->devs[0].id)];
75568 +- int k;
75569 ++ unsigned int k;
75570 +
75571 + buf_printf(&mod->dev_table_buf,
75572 + "MODULE_ALIAS(\"pnp:d%s*\");\n", id);
75573 +@@ -786,7 +786,7 @@ static void dmi_ascii_filter(char *d, co
75574 + static int do_dmi_entry(const char *filename, struct dmi_system_id *id,
75575 + char *alias)
75576 + {
75577 +- int i, j;
75578 ++ unsigned int i, j;
75579 +
75580 + sprintf(alias, "dmi*");
75581 +
75582 +diff -urNp linux-3.1.1/scripts/mod/modpost.c linux-3.1.1/scripts/mod/modpost.c
75583 +--- linux-3.1.1/scripts/mod/modpost.c 2011-11-11 15:19:27.000000000 -0500
75584 ++++ linux-3.1.1/scripts/mod/modpost.c 2011-11-16 18:39:08.000000000 -0500
75585 +@@ -919,6 +919,7 @@ enum mismatch {
75586 + ANY_INIT_TO_ANY_EXIT,
75587 + ANY_EXIT_TO_ANY_INIT,
75588 + EXPORT_TO_INIT_EXIT,
75589 ++ DATA_TO_TEXT
75590 + };
75591 +
75592 + struct sectioncheck {
75593 +@@ -1027,6 +1028,12 @@ const struct sectioncheck sectioncheck[]
75594 + .tosec = { INIT_SECTIONS, EXIT_SECTIONS, NULL },
75595 + .mismatch = EXPORT_TO_INIT_EXIT,
75596 + .symbol_white_list = { DEFAULT_SYMBOL_WHITE_LIST, NULL },
75597 ++},
75598 ++/* Do not reference code from writable data */
75599 ++{
75600 ++ .fromsec = { DATA_SECTIONS, NULL },
75601 ++ .tosec = { TEXT_SECTIONS, NULL },
75602 ++ .mismatch = DATA_TO_TEXT
75603 + }
75604 + };
75605 +
75606 +@@ -1149,10 +1156,10 @@ static Elf_Sym *find_elf_symbol(struct e
75607 + continue;
75608 + if (ELF_ST_TYPE(sym->st_info) == STT_SECTION)
75609 + continue;
75610 +- if (sym->st_value == addr)
75611 +- return sym;
75612 + /* Find a symbol nearby - addr are maybe negative */
75613 + d = sym->st_value - addr;
75614 ++ if (d == 0)
75615 ++ return sym;
75616 + if (d < 0)
75617 + d = addr - sym->st_value;
75618 + if (d < distance) {
75619 +@@ -1431,6 +1438,14 @@ static void report_sec_mismatch(const ch
75620 + tosym, prl_to, prl_to, tosym);
75621 + free(prl_to);
75622 + break;
75623 ++ case DATA_TO_TEXT:
75624 ++/*
75625 ++ fprintf(stderr,
75626 ++ "The variable %s references\n"
75627 ++ "the %s %s%s%s\n",
75628 ++ fromsym, to, sec2annotation(tosec), tosym, to_p);
75629 ++*/
75630 ++ break;
75631 + }
75632 + fprintf(stderr, "\n");
75633 + }
75634 +@@ -1656,7 +1671,7 @@ static void section_rel(const char *modn
75635 + static void check_sec_ref(struct module *mod, const char *modname,
75636 + struct elf_info *elf)
75637 + {
75638 +- int i;
75639 ++ unsigned int i;
75640 + Elf_Shdr *sechdrs = elf->sechdrs;
75641 +
75642 + /* Walk through all sections */
75643 +@@ -1754,7 +1769,7 @@ void __attribute__((format(printf, 2, 3)
75644 + va_end(ap);
75645 + }
75646 +
75647 +-void buf_write(struct buffer *buf, const char *s, int len)
75648 ++void buf_write(struct buffer *buf, const char *s, unsigned int len)
75649 + {
75650 + if (buf->size - buf->pos < len) {
75651 + buf->size += len + SZ;
75652 +@@ -1966,7 +1981,7 @@ static void write_if_changed(struct buff
75653 + if (fstat(fileno(file), &st) < 0)
75654 + goto close_write;
75655 +
75656 +- if (st.st_size != b->pos)
75657 ++ if (st.st_size != (off_t)b->pos)
75658 + goto close_write;
75659 +
75660 + tmp = NOFAIL(malloc(b->pos));
75661 +diff -urNp linux-3.1.1/scripts/mod/modpost.h linux-3.1.1/scripts/mod/modpost.h
75662 +--- linux-3.1.1/scripts/mod/modpost.h 2011-11-11 15:19:27.000000000 -0500
75663 ++++ linux-3.1.1/scripts/mod/modpost.h 2011-11-16 18:39:08.000000000 -0500
75664 +@@ -92,15 +92,15 @@ void *do_nofail(void *ptr, const char *e
75665 +
75666 + struct buffer {
75667 + char *p;
75668 +- int pos;
75669 +- int size;
75670 ++ unsigned int pos;
75671 ++ unsigned int size;
75672 + };
75673 +
75674 + void __attribute__((format(printf, 2, 3)))
75675 + buf_printf(struct buffer *buf, const char *fmt, ...);
75676 +
75677 + void
75678 +-buf_write(struct buffer *buf, const char *s, int len);
75679 ++buf_write(struct buffer *buf, const char *s, unsigned int len);
75680 +
75681 + struct module {
75682 + struct module *next;
75683 +diff -urNp linux-3.1.1/scripts/mod/sumversion.c linux-3.1.1/scripts/mod/sumversion.c
75684 +--- linux-3.1.1/scripts/mod/sumversion.c 2011-11-11 15:19:27.000000000 -0500
75685 ++++ linux-3.1.1/scripts/mod/sumversion.c 2011-11-16 18:39:08.000000000 -0500
75686 +@@ -470,7 +470,7 @@ static void write_version(const char *fi
75687 + goto out;
75688 + }
75689 +
75690 +- if (write(fd, sum, strlen(sum)+1) != strlen(sum)+1) {
75691 ++ if (write(fd, sum, strlen(sum)+1) != (ssize_t)strlen(sum)+1) {
75692 + warn("writing sum in %s failed: %s\n",
75693 + filename, strerror(errno));
75694 + goto out;
75695 +diff -urNp linux-3.1.1/scripts/pnmtologo.c linux-3.1.1/scripts/pnmtologo.c
75696 +--- linux-3.1.1/scripts/pnmtologo.c 2011-11-11 15:19:27.000000000 -0500
75697 ++++ linux-3.1.1/scripts/pnmtologo.c 2011-11-16 18:39:08.000000000 -0500
75698 +@@ -237,14 +237,14 @@ static void write_header(void)
75699 + fprintf(out, " * Linux logo %s\n", logoname);
75700 + fputs(" */\n\n", out);
75701 + fputs("#include <linux/linux_logo.h>\n\n", out);
75702 +- fprintf(out, "static unsigned char %s_data[] __initdata = {\n",
75703 ++ fprintf(out, "static unsigned char %s_data[] = {\n",
75704 + logoname);
75705 + }
75706 +
75707 + static void write_footer(void)
75708 + {
75709 + fputs("\n};\n\n", out);
75710 +- fprintf(out, "const struct linux_logo %s __initconst = {\n", logoname);
75711 ++ fprintf(out, "const struct linux_logo %s = {\n", logoname);
75712 + fprintf(out, "\t.type\t\t= %s,\n", logo_types[logo_type]);
75713 + fprintf(out, "\t.width\t\t= %d,\n", logo_width);
75714 + fprintf(out, "\t.height\t\t= %d,\n", logo_height);
75715 +@@ -374,7 +374,7 @@ static void write_logo_clut224(void)
75716 + fputs("\n};\n\n", out);
75717 +
75718 + /* write logo clut */
75719 +- fprintf(out, "static unsigned char %s_clut[] __initdata = {\n",
75720 ++ fprintf(out, "static unsigned char %s_clut[] = {\n",
75721 + logoname);
75722 + write_hex_cnt = 0;
75723 + for (i = 0; i < logo_clutsize; i++) {
75724 +diff -urNp linux-3.1.1/security/apparmor/lsm.c linux-3.1.1/security/apparmor/lsm.c
75725 +--- linux-3.1.1/security/apparmor/lsm.c 2011-11-11 15:19:27.000000000 -0500
75726 ++++ linux-3.1.1/security/apparmor/lsm.c 2011-11-16 18:40:44.000000000 -0500
75727 +@@ -621,7 +621,7 @@ static int apparmor_task_setrlimit(struc
75728 + return error;
75729 + }
75730 +
75731 +-static struct security_operations apparmor_ops = {
75732 ++static struct security_operations apparmor_ops __read_only = {
75733 + .name = "apparmor",
75734 +
75735 + .ptrace_access_check = apparmor_ptrace_access_check,
75736 +diff -urNp linux-3.1.1/security/commoncap.c linux-3.1.1/security/commoncap.c
75737 +--- linux-3.1.1/security/commoncap.c 2011-11-11 15:19:27.000000000 -0500
75738 ++++ linux-3.1.1/security/commoncap.c 2011-11-16 18:40:44.000000000 -0500
75739 +@@ -28,6 +28,7 @@
75740 + #include <linux/prctl.h>
75741 + #include <linux/securebits.h>
75742 + #include <linux/user_namespace.h>
75743 ++#include <net/sock.h>
75744 +
75745 + /*
75746 + * If a non-root user executes a setuid-root binary in
75747 +@@ -58,7 +59,7 @@ int cap_netlink_send(struct sock *sk, st
75748 +
75749 + int cap_netlink_recv(struct sk_buff *skb, int cap)
75750 + {
75751 +- if (!cap_raised(current_cap(), cap))
75752 ++ if (!cap_raised(current_cap(), cap) || !gr_is_capable(cap))
75753 + return -EPERM;
75754 + return 0;
75755 + }
75756 +@@ -575,6 +576,9 @@ int cap_bprm_secureexec(struct linux_bin
75757 + {
75758 + const struct cred *cred = current_cred();
75759 +
75760 ++ if (gr_acl_enable_at_secure())
75761 ++ return 1;
75762 ++
75763 + if (cred->uid != 0) {
75764 + if (bprm->cap_effective)
75765 + return 1;
75766 +diff -urNp linux-3.1.1/security/integrity/ima/ima_api.c linux-3.1.1/security/integrity/ima/ima_api.c
75767 +--- linux-3.1.1/security/integrity/ima/ima_api.c 2011-11-11 15:19:27.000000000 -0500
75768 ++++ linux-3.1.1/security/integrity/ima/ima_api.c 2011-11-16 18:39:08.000000000 -0500
75769 +@@ -75,7 +75,7 @@ void ima_add_violation(struct inode *ino
75770 + int result;
75771 +
75772 + /* can overflow, only indicator */
75773 +- atomic_long_inc(&ima_htable.violations);
75774 ++ atomic_long_inc_unchecked(&ima_htable.violations);
75775 +
75776 + entry = kmalloc(sizeof(*entry), GFP_KERNEL);
75777 + if (!entry) {
75778 +diff -urNp linux-3.1.1/security/integrity/ima/ima_fs.c linux-3.1.1/security/integrity/ima/ima_fs.c
75779 +--- linux-3.1.1/security/integrity/ima/ima_fs.c 2011-11-11 15:19:27.000000000 -0500
75780 ++++ linux-3.1.1/security/integrity/ima/ima_fs.c 2011-11-16 18:39:08.000000000 -0500
75781 +@@ -28,12 +28,12 @@
75782 + static int valid_policy = 1;
75783 + #define TMPBUFLEN 12
75784 + static ssize_t ima_show_htable_value(char __user *buf, size_t count,
75785 +- loff_t *ppos, atomic_long_t *val)
75786 ++ loff_t *ppos, atomic_long_unchecked_t *val)
75787 + {
75788 + char tmpbuf[TMPBUFLEN];
75789 + ssize_t len;
75790 +
75791 +- len = scnprintf(tmpbuf, TMPBUFLEN, "%li\n", atomic_long_read(val));
75792 ++ len = scnprintf(tmpbuf, TMPBUFLEN, "%li\n", atomic_long_read_unchecked(val));
75793 + return simple_read_from_buffer(buf, count, ppos, tmpbuf, len);
75794 + }
75795 +
75796 +diff -urNp linux-3.1.1/security/integrity/ima/ima.h linux-3.1.1/security/integrity/ima/ima.h
75797 +--- linux-3.1.1/security/integrity/ima/ima.h 2011-11-11 15:19:27.000000000 -0500
75798 ++++ linux-3.1.1/security/integrity/ima/ima.h 2011-11-16 18:39:08.000000000 -0500
75799 +@@ -85,8 +85,8 @@ void ima_add_violation(struct inode *ino
75800 + extern spinlock_t ima_queue_lock;
75801 +
75802 + struct ima_h_table {
75803 +- atomic_long_t len; /* number of stored measurements in the list */
75804 +- atomic_long_t violations;
75805 ++ atomic_long_unchecked_t len; /* number of stored measurements in the list */
75806 ++ atomic_long_unchecked_t violations;
75807 + struct hlist_head queue[IMA_MEASURE_HTABLE_SIZE];
75808 + };
75809 + extern struct ima_h_table ima_htable;
75810 +diff -urNp linux-3.1.1/security/integrity/ima/ima_queue.c linux-3.1.1/security/integrity/ima/ima_queue.c
75811 +--- linux-3.1.1/security/integrity/ima/ima_queue.c 2011-11-11 15:19:27.000000000 -0500
75812 ++++ linux-3.1.1/security/integrity/ima/ima_queue.c 2011-11-16 18:39:08.000000000 -0500
75813 +@@ -79,7 +79,7 @@ static int ima_add_digest_entry(struct i
75814 + INIT_LIST_HEAD(&qe->later);
75815 + list_add_tail_rcu(&qe->later, &ima_measurements);
75816 +
75817 +- atomic_long_inc(&ima_htable.len);
75818 ++ atomic_long_inc_unchecked(&ima_htable.len);
75819 + key = ima_hash_key(entry->digest);
75820 + hlist_add_head_rcu(&qe->hnext, &ima_htable.queue[key]);
75821 + return 0;
75822 +diff -urNp linux-3.1.1/security/Kconfig linux-3.1.1/security/Kconfig
75823 +--- linux-3.1.1/security/Kconfig 2011-11-11 15:19:27.000000000 -0500
75824 ++++ linux-3.1.1/security/Kconfig 2011-11-16 18:40:44.000000000 -0500
75825 +@@ -4,6 +4,558 @@
75826 +
75827 + menu "Security options"
75828 +
75829 ++source grsecurity/Kconfig
75830 ++
75831 ++menu "PaX"
75832 ++
75833 ++ config ARCH_TRACK_EXEC_LIMIT
75834 ++ bool
75835 ++
75836 ++ config PAX_KERNEXEC_PLUGIN
75837 ++ bool
75838 ++
75839 ++ config PAX_PER_CPU_PGD
75840 ++ bool
75841 ++
75842 ++ config TASK_SIZE_MAX_SHIFT
75843 ++ int
75844 ++ depends on X86_64
75845 ++ default 47 if !PAX_PER_CPU_PGD
75846 ++ default 42 if PAX_PER_CPU_PGD
75847 ++
75848 ++ config PAX_ENABLE_PAE
75849 ++ bool
75850 ++ default y if (X86_32 && (MPENTIUM4 || MK8 || MPSC || MCORE2 || MATOM))
75851 ++
75852 ++config PAX
75853 ++ bool "Enable various PaX features"
75854 ++ depends on GRKERNSEC && (ALPHA || ARM || AVR32 || IA64 || MIPS || PARISC || PPC || SPARC || X86)
75855 ++ help
75856 ++ This allows you to enable various PaX features. PaX adds
75857 ++ intrusion prevention mechanisms to the kernel that reduce
75858 ++ the risks posed by exploitable memory corruption bugs.
75859 ++
75860 ++menu "PaX Control"
75861 ++ depends on PAX
75862 ++
75863 ++config PAX_SOFTMODE
75864 ++ bool 'Support soft mode'
75865 ++ select PAX_PT_PAX_FLAGS
75866 ++ help
75867 ++ Enabling this option will allow you to run PaX in soft mode, that
75868 ++ is, PaX features will not be enforced by default, only on executables
75869 ++ marked explicitly. You must also enable PT_PAX_FLAGS support as it
75870 ++ is the only way to mark executables for soft mode use.
75871 ++
75872 ++ Soft mode can be activated by using the "pax_softmode=1" kernel command
75873 ++ line option on boot. Furthermore you can control various PaX features
75874 ++ at runtime via the entries in /proc/sys/kernel/pax.
75875 ++
75876 ++config PAX_EI_PAX
75877 ++ bool 'Use legacy ELF header marking'
75878 ++ help
75879 ++ Enabling this option will allow you to control PaX features on
75880 ++ a per executable basis via the 'chpax' utility available at
75881 ++ http://pax.grsecurity.net/. The control flags will be read from
75882 ++ an otherwise reserved part of the ELF header. This marking has
75883 ++ numerous drawbacks (no support for soft-mode, toolchain does not
75884 ++ know about the non-standard use of the ELF header) therefore it
75885 ++ has been deprecated in favour of PT_PAX_FLAGS support.
75886 ++
75887 ++ Note that if you enable PT_PAX_FLAGS marking support as well,
75888 ++ the PT_PAX_FLAG marks will override the legacy EI_PAX marks.
75889 ++
75890 ++config PAX_PT_PAX_FLAGS
75891 ++ bool 'Use ELF program header marking'
75892 ++ help
75893 ++ Enabling this option will allow you to control PaX features on
75894 ++ a per executable basis via the 'paxctl' utility available at
75895 ++ http://pax.grsecurity.net/. The control flags will be read from
75896 ++ a PaX specific ELF program header (PT_PAX_FLAGS). This marking
75897 ++ has the benefits of supporting both soft mode and being fully
75898 ++ integrated into the toolchain (the binutils patch is available
75899 ++ from http://pax.grsecurity.net).
75900 ++
75901 ++ If your toolchain does not support PT_PAX_FLAGS markings,
75902 ++ you can create one in most cases with 'paxctl -C'.
75903 ++
75904 ++ Note that if you enable the legacy EI_PAX marking support as well,
75905 ++ the EI_PAX marks will be overridden by the PT_PAX_FLAGS marks.
75906 ++
75907 ++choice
75908 ++ prompt 'MAC system integration'
75909 ++ default PAX_HAVE_ACL_FLAGS
75910 ++ help
75911 ++ Mandatory Access Control systems have the option of controlling
75912 ++ PaX flags on a per executable basis, choose the method supported
75913 ++ by your particular system.
75914 ++
75915 ++ - "none": if your MAC system does not interact with PaX,
75916 ++ - "direct": if your MAC system defines pax_set_initial_flags() itself,
75917 ++ - "hook": if your MAC system uses the pax_set_initial_flags_func callback.
75918 ++
75919 ++ NOTE: this option is for developers/integrators only.
75920 ++
75921 ++ config PAX_NO_ACL_FLAGS
75922 ++ bool 'none'
75923 ++
75924 ++ config PAX_HAVE_ACL_FLAGS
75925 ++ bool 'direct'
75926 ++
75927 ++ config PAX_HOOK_ACL_FLAGS
75928 ++ bool 'hook'
75929 ++endchoice
75930 ++
75931 ++endmenu
75932 ++
75933 ++menu "Non-executable pages"
75934 ++ depends on PAX
75935 ++
75936 ++config PAX_NOEXEC
75937 ++ bool "Enforce non-executable pages"
75938 ++ depends on (PAX_EI_PAX || PAX_PT_PAX_FLAGS || PAX_HAVE_ACL_FLAGS || PAX_HOOK_ACL_FLAGS) && (ALPHA || (ARM && (CPU_V6 || CPU_V7)) || IA64 || MIPS || PARISC || PPC || S390 || SPARC || X86)
75939 ++ help
75940 ++ By design some architectures do not allow for protecting memory
75941 ++ pages against execution or even if they do, Linux does not make
75942 ++ use of this feature. In practice this means that if a page is
75943 ++ readable (such as the stack or heap) it is also executable.
75944 ++
75945 ++ There is a well known exploit technique that makes use of this
75946 ++ fact and a common programming mistake where an attacker can
75947 ++ introduce code of his choice somewhere in the attacked program's
75948 ++ memory (typically the stack or the heap) and then execute it.
75949 ++
75950 ++ If the attacked program was running with different (typically
75951 ++ higher) privileges than that of the attacker, then he can elevate
75952 ++ his own privilege level (e.g. get a root shell, write to files for
75953 ++ which he does not have write access to, etc).
75954 ++
75955 ++ Enabling this option will let you choose from various features
75956 ++ that prevent the injection and execution of 'foreign' code in
75957 ++ a program.
75958 ++
75959 ++ This will also break programs that rely on the old behaviour and
75960 ++ expect that dynamically allocated memory via the malloc() family
75961 ++ of functions is executable (which it is not). Notable examples
75962 ++ are the XFree86 4.x server, the java runtime and wine.
75963 ++
75964 ++config PAX_PAGEEXEC
75965 ++ bool "Paging based non-executable pages"
75966 ++ depends on PAX_NOEXEC && (!X86_32 || M586 || M586TSC || M586MMX || M686 || MPENTIUMII || MPENTIUMIII || MPENTIUMM || MCORE2 || MATOM || MPENTIUM4 || MPSC || MK7 || MK8 || MWINCHIPC6 || MWINCHIP2 || MWINCHIP3D || MVIAC3_2 || MVIAC7)
75967 ++ select S390_SWITCH_AMODE if S390
75968 ++ select S390_EXEC_PROTECT if S390
75969 ++ select ARCH_TRACK_EXEC_LIMIT if X86_32
75970 ++ help
75971 ++ This implementation is based on the paging feature of the CPU.
75972 ++ On i386 without hardware non-executable bit support there is a
75973 ++ variable but usually low performance impact, however on Intel's
75974 ++ P4 core based CPUs it is very high so you should not enable this
75975 ++ for kernels meant to be used on such CPUs.
75976 ++
75977 ++ On alpha, avr32, ia64, parisc, sparc, sparc64, x86_64 and i386
75978 ++ with hardware non-executable bit support there is no performance
75979 ++ impact, on ppc the impact is negligible.
75980 ++
75981 ++ Note that several architectures require various emulations due to
75982 ++ badly designed userland ABIs, this will cause a performance impact
75983 ++ but will disappear as soon as userland is fixed. For example, ppc
75984 ++ userland MUST have been built with secure-plt by a recent toolchain.
75985 ++
75986 ++config PAX_SEGMEXEC
75987 ++ bool "Segmentation based non-executable pages"
75988 ++ depends on PAX_NOEXEC && X86_32
75989 ++ help
75990 ++ This implementation is based on the segmentation feature of the
75991 ++ CPU and has a very small performance impact, however applications
75992 ++ will be limited to a 1.5 GB address space instead of the normal
75993 ++ 3 GB.
75994 ++
75995 ++config PAX_EMUTRAMP
75996 ++ bool "Emulate trampolines" if (PAX_PAGEEXEC || PAX_SEGMEXEC) && (PARISC || X86)
75997 ++ default y if PARISC
75998 ++ help
75999 ++ There are some programs and libraries that for one reason or
76000 ++ another attempt to execute special small code snippets from
76001 ++ non-executable memory pages. Most notable examples are the
76002 ++ signal handler return code generated by the kernel itself and
76003 ++ the GCC trampolines.
76004 ++
76005 ++ If you enabled CONFIG_PAX_PAGEEXEC or CONFIG_PAX_SEGMEXEC then
76006 ++ such programs will no longer work under your kernel.
76007 ++
76008 ++ As a remedy you can say Y here and use the 'chpax' or 'paxctl'
76009 ++ utilities to enable trampoline emulation for the affected programs
76010 ++ yet still have the protection provided by the non-executable pages.
76011 ++
76012 ++ On parisc you MUST enable this option and EMUSIGRT as well, otherwise
76013 ++ your system will not even boot.
76014 ++
76015 ++ Alternatively you can say N here and use the 'chpax' or 'paxctl'
76016 ++ utilities to disable CONFIG_PAX_PAGEEXEC and CONFIG_PAX_SEGMEXEC
76017 ++ for the affected files.
76018 ++
76019 ++ NOTE: enabling this feature *may* open up a loophole in the
76020 ++ protection provided by non-executable pages that an attacker
76021 ++ could abuse. Therefore the best solution is to not have any
76022 ++ files on your system that would require this option. This can
76023 ++ be achieved by not using libc5 (which relies on the kernel
76024 ++ signal handler return code) and not using or rewriting programs
76025 ++ that make use of the nested function implementation of GCC.
76026 ++ Skilled users can just fix GCC itself so that it implements
76027 ++ nested function calls in a way that does not interfere with PaX.
76028 ++
76029 ++config PAX_EMUSIGRT
76030 ++ bool "Automatically emulate sigreturn trampolines"
76031 ++ depends on PAX_EMUTRAMP && PARISC
76032 ++ default y
76033 ++ help
76034 ++ Enabling this option will have the kernel automatically detect
76035 ++ and emulate signal return trampolines executing on the stack
76036 ++ that would otherwise lead to task termination.
76037 ++
76038 ++ This solution is intended as a temporary one for users with
76039 ++ legacy versions of libc (libc5, glibc 2.0, uClibc before 0.9.17,
76040 ++ Modula-3 runtime, etc) or executables linked to such, basically
76041 ++ everything that does not specify its own SA_RESTORER function in
76042 ++ normal executable memory like glibc 2.1+ does.
76043 ++
76044 ++ On parisc you MUST enable this option, otherwise your system will
76045 ++ not even boot.
76046 ++
76047 ++ NOTE: this feature cannot be disabled on a per executable basis
76048 ++ and since it *does* open up a loophole in the protection provided
76049 ++ by non-executable pages, the best solution is to not have any
76050 ++ files on your system that would require this option.
76051 ++
76052 ++config PAX_MPROTECT
76053 ++ bool "Restrict mprotect()"
76054 ++ depends on (PAX_PAGEEXEC || PAX_SEGMEXEC)
76055 ++ help
76056 ++ Enabling this option will prevent programs from
76057 ++ - changing the executable status of memory pages that were
76058 ++ not originally created as executable,
76059 ++ - making read-only executable pages writable again,
76060 ++ - creating executable pages from anonymous memory,
76061 ++ - making read-only-after-relocations (RELRO) data pages writable again.
76062 ++
76063 ++ You should say Y here to complete the protection provided by
76064 ++ the enforcement of non-executable pages.
76065 ++
76066 ++ NOTE: you can use the 'chpax' or 'paxctl' utilities to control
76067 ++ this feature on a per file basis.
76068 ++
76069 ++config PAX_MPROTECT_COMPAT
76070 ++ bool "Use legacy/compat protection demoting (read help)"
76071 ++ depends on PAX_MPROTECT
76072 ++ default n
76073 ++ help
76074 ++ The current implementation of PAX_MPROTECT denies RWX allocations/mprotects
76075 ++ by sending the proper error code to the application. For some broken
76076 ++ userland, this can cause problems with Python or other applications. The
76077 ++ current implementation however allows for applications like clamav to
76078 ++ detect if JIT compilation/execution is allowed and to fall back gracefully
76079 ++ to an interpreter-based mode if it does not. While we encourage everyone
76080 ++ to use the current implementation as-is and push upstream to fix broken
76081 ++ userland (note that the RWX logging option can assist with this), in some
76082 ++ environments this may not be possible. Having to disable MPROTECT
76083 ++ completely on certain binaries reduces the security benefit of PaX,
76084 ++ so this option is provided for those environments to revert to the old
76085 ++ behavior.
76086 ++
76087 ++config PAX_ELFRELOCS
76088 ++ bool "Allow ELF text relocations (read help)"
76089 ++ depends on PAX_MPROTECT
76090 ++ default n
76091 ++ help
76092 ++ Non-executable pages and mprotect() restrictions are effective
76093 ++ in preventing the introduction of new executable code into an
76094 ++ attacked task's address space. There remain only two venues
76095 ++ for this kind of attack: if the attacker can execute already
76096 ++ existing code in the attacked task then he can either have it
76097 ++ create and mmap() a file containing his code or have it mmap()
76098 ++ an already existing ELF library that does not have position
76099 ++ independent code in it and use mprotect() on it to make it
76100 ++ writable and copy his code there. While protecting against
76101 ++ the former approach is beyond PaX, the latter can be prevented
76102 ++ by having only PIC ELF libraries on one's system (which do not
76103 ++ need to relocate their code). If you are sure this is your case,
76104 ++ as is the case with all modern Linux distributions, then leave
76105 ++ this option disabled. You should say 'n' here.
76106 ++
76107 ++config PAX_ETEXECRELOCS
76108 ++ bool "Allow ELF ET_EXEC text relocations"
76109 ++ depends on PAX_MPROTECT && (ALPHA || IA64 || PARISC)
76110 ++ select PAX_ELFRELOCS
76111 ++ default y
76112 ++ help
76113 ++ On some architectures there are incorrectly created applications
76114 ++ that require text relocations and would not work without enabling
76115 ++ this option. If you are an alpha, ia64 or parisc user, you should
76116 ++ enable this option and disable it once you have made sure that
76117 ++ none of your applications need it.
76118 ++
76119 ++config PAX_EMUPLT
76120 ++ bool "Automatically emulate ELF PLT"
76121 ++ depends on PAX_MPROTECT && (ALPHA || PARISC || SPARC)
76122 ++ default y
76123 ++ help
76124 ++ Enabling this option will have the kernel automatically detect
76125 ++ and emulate the Procedure Linkage Table entries in ELF files.
76126 ++ On some architectures such entries are in writable memory, and
76127 ++ become non-executable leading to task termination. Therefore
76128 ++ it is mandatory that you enable this option on alpha, parisc,
76129 ++ sparc and sparc64, otherwise your system would not even boot.
76130 ++
76131 ++ NOTE: this feature *does* open up a loophole in the protection
76132 ++ provided by the non-executable pages, therefore the proper
76133 ++ solution is to modify the toolchain to produce a PLT that does
76134 ++ not need to be writable.
76135 ++
76136 ++config PAX_DLRESOLVE
76137 ++ bool 'Emulate old glibc resolver stub'
76138 ++ depends on PAX_EMUPLT && SPARC
76139 ++ default n
76140 ++ help
76141 ++ This option is needed if userland has an old glibc (before 2.4)
76142 ++ that puts a 'save' instruction into the runtime generated resolver
76143 ++ stub that needs special emulation.
76144 ++
76145 ++config PAX_KERNEXEC
76146 ++ bool "Enforce non-executable kernel pages"
76147 ++ depends on (PPC || X86) && (!X86_32 || X86_WP_WORKS_OK) && !XEN
76148 ++ select PAX_PER_CPU_PGD if X86_64 || (X86_32 && X86_PAE)
76149 ++ select PAX_KERNEXEC_PLUGIN if X86_64
76150 ++ help
76151 ++ This is the kernel land equivalent of PAGEEXEC and MPROTECT,
76152 ++ that is, enabling this option will make it harder to inject
76153 ++ and execute 'foreign' code in kernel memory itself.
76154 ++
76155 ++ Note that on x86_64 kernels there is a known regression when
76156 ++ this feature and KVM/VMX are both enabled in the host kernel.
76157 ++
76158 ++config PAX_KERNEXEC_MODULE_TEXT
76159 ++ int "Minimum amount of memory reserved for module code"
76160 ++ default "4"
76161 ++ depends on PAX_KERNEXEC && X86_32 && MODULES
76162 ++ help
76163 ++ Due to implementation details the kernel must reserve a fixed
76164 ++ amount of memory for module code at compile time that cannot be
76165 ++ changed at runtime. Here you can specify the minimum amount
76166 ++ in MB that will be reserved. Due to the same implementation
76167 ++ details this size will always be rounded up to the next 2/4 MB
76168 ++ boundary (depends on PAE) so the actually available memory for
76169 ++ module code will usually be more than this minimum.
76170 ++
76171 ++ The default 4 MB should be enough for most users but if you have
76172 ++ an excessive number of modules (e.g., most distribution configs
76173 ++ compile many drivers as modules) or use huge modules such as
76174 ++ nvidia's kernel driver, you will need to adjust this amount.
76175 ++ A good rule of thumb is to look at your currently loaded kernel
76176 ++ modules and add up their sizes.
76177 ++
76178 ++endmenu
76179 ++
76180 ++menu "Address Space Layout Randomization"
76181 ++ depends on PAX
76182 ++
76183 ++config PAX_ASLR
76184 ++ bool "Address Space Layout Randomization"
76185 ++ depends on PAX_EI_PAX || PAX_PT_PAX_FLAGS || PAX_HAVE_ACL_FLAGS || PAX_HOOK_ACL_FLAGS
76186 ++ help
76187 ++ Many if not most exploit techniques rely on the knowledge of
76188 ++ certain addresses in the attacked program. The following options
76189 ++ will allow the kernel to apply a certain amount of randomization
76190 ++ to specific parts of the program thereby forcing an attacker to
76191 ++ guess them in most cases. Any failed guess will most likely crash
76192 ++ the attacked program which allows the kernel to detect such attempts
76193 ++ and react on them. PaX itself provides no reaction mechanisms,
76194 ++ instead it is strongly encouraged that you make use of Nergal's
76195 ++ segvguard (ftp://ftp.pl.openwall.com/misc/segvguard/) or grsecurity's
76196 ++ (http://www.grsecurity.net/) built-in crash detection features or
76197 ++ develop one yourself.
76198 ++
76199 ++ By saying Y here you can choose to randomize the following areas:
76200 ++ - top of the task's kernel stack
76201 ++ - top of the task's userland stack
76202 ++ - base address for mmap() requests that do not specify one
76203 ++ (this includes all libraries)
76204 ++ - base address of the main executable
76205 ++
76206 ++ It is strongly recommended to say Y here as address space layout
76207 ++ randomization has negligible impact on performance yet it provides
76208 ++ a very effective protection.
76209 ++
76210 ++ NOTE: you can use the 'chpax' or 'paxctl' utilities to control
76211 ++ this feature on a per file basis.
76212 ++
76213 ++config PAX_RANDKSTACK
76214 ++ bool "Randomize kernel stack base"
76215 ++ depends on X86_TSC && X86
76216 ++ help
76217 ++ By saying Y here the kernel will randomize every task's kernel
76218 ++ stack on every system call. This will not only force an attacker
76219 ++ to guess it but also prevent him from making use of possible
76220 ++ leaked information about it.
76221 ++
76222 ++ Since the kernel stack is a rather scarce resource, randomization
76223 ++ may cause unexpected stack overflows, therefore you should very
76224 ++ carefully test your system. Note that once enabled in the kernel
76225 ++ configuration, this feature cannot be disabled on a per file basis.
76226 ++
76227 ++config PAX_RANDUSTACK
76228 ++ bool "Randomize user stack base"
76229 ++ depends on PAX_ASLR
76230 ++ help
76231 ++ By saying Y here the kernel will randomize every task's userland
76232 ++ stack. The randomization is done in two steps where the second
76233 ++ one may apply a big amount of shift to the top of the stack and
76234 ++ cause problems for programs that want to use lots of memory (more
76235 ++ than 2.5 GB if SEGMEXEC is not active, or 1.25 GB when it is).
76236 ++ For this reason the second step can be controlled by 'chpax' or
76237 ++ 'paxctl' on a per file basis.
76238 ++
76239 ++config PAX_RANDMMAP
76240 ++ bool "Randomize mmap() base"
76241 ++ depends on PAX_ASLR
76242 ++ help
76243 ++ By saying Y here the kernel will use a randomized base address for
76244 ++ mmap() requests that do not specify one themselves. As a result
76245 ++ all dynamically loaded libraries will appear at random addresses
76246 ++ and therefore be harder to exploit by a technique where an attacker
76247 ++ attempts to execute library code for his purposes (e.g. spawn a
76248 ++ shell from an exploited program that is running at an elevated
76249 ++ privilege level).
76250 ++
76251 ++ Furthermore, if a program is relinked as a dynamic ELF file, its
76252 ++ base address will be randomized as well, completing the full
76253 ++ randomization of the address space layout. Attacking such programs
76254 ++ becomes a guess game. You can find an example of doing this at
76255 ++ http://pax.grsecurity.net/et_dyn.tar.gz and practical samples at
76256 ++ http://www.grsecurity.net/grsec-gcc-specs.tar.gz .
76257 ++
76258 ++ NOTE: you can use the 'chpax' or 'paxctl' utilities to control this
76259 ++ feature on a per file basis.
76260 ++
76261 ++endmenu
76262 ++
76263 ++menu "Miscellaneous hardening features"
76264 ++
76265 ++config PAX_MEMORY_SANITIZE
76266 ++ bool "Sanitize all freed memory"
76267 ++ help
76268 ++ By saying Y here the kernel will erase memory pages as soon as they
76269 ++ are freed. This in turn reduces the lifetime of data stored in the
76270 ++ pages, making it less likely that sensitive information such as
76271 ++ passwords, cryptographic secrets, etc stay in memory for too long.
76272 ++
76273 ++ This is especially useful for programs whose runtime is short, long
76274 ++ lived processes and the kernel itself benefit from this as long as
76275 ++ they operate on whole memory pages and ensure timely freeing of pages
76276 ++ that may hold sensitive information.
76277 ++
76278 ++ The tradeoff is performance impact, on a single CPU system kernel
76279 ++ compilation sees a 3% slowdown, other systems and workloads may vary
76280 ++ and you are advised to test this feature on your expected workload
76281 ++ before deploying it.
76282 ++
76283 ++ Note that this feature does not protect data stored in live pages,
76284 ++ e.g., process memory swapped to disk may stay there for a long time.
76285 ++
76286 ++config PAX_MEMORY_STACKLEAK
76287 ++ bool "Sanitize kernel stack"
76288 ++ depends on X86
76289 ++ help
76290 ++ By saying Y here the kernel will erase the kernel stack before it
76291 ++ returns from a system call. This in turn reduces the information
76292 ++ that a kernel stack leak bug can reveal.
76293 ++
76294 ++ Note that such a bug can still leak information that was put on
76295 ++ the stack by the current system call (the one eventually triggering
76296 ++ the bug) but traces of earlier system calls on the kernel stack
76297 ++ cannot leak anymore.
76298 ++
76299 ++ The tradeoff is performance impact: on a single CPU system kernel
76300 ++ compilation sees a 1% slowdown, other systems and workloads may vary
76301 ++ and you are advised to test this feature on your expected workload
76302 ++ before deploying it.
76303 ++
76304 ++ Note: full support for this feature requires gcc with plugin support
76305 ++ so make sure your compiler is at least gcc 4.5.0 (cross compilation
76306 ++ is not supported). Using older gcc versions means that functions
76307 ++ with large enough stack frames may leave uninitialized memory behind
76308 ++ that may be exposed to a later syscall leaking the stack.
76309 ++
76310 ++config PAX_MEMORY_UDEREF
76311 ++ bool "Prevent invalid userland pointer dereference"
76312 ++ depends on X86 && !UML_X86 && !XEN
76313 ++ select PAX_PER_CPU_PGD if X86_64
76314 ++ help
76315 ++ By saying Y here the kernel will be prevented from dereferencing
76316 ++ userland pointers in contexts where the kernel expects only kernel
76317 ++ pointers. This is both a useful runtime debugging feature and a
76318 ++ security measure that prevents exploiting a class of kernel bugs.
76319 ++
76320 ++ The tradeoff is that some virtualization solutions may experience
76321 ++ a huge slowdown and therefore you should not enable this feature
76322 ++ for kernels meant to run in such environments. Whether a given VM
76323 ++ solution is affected or not is best determined by simply trying it
76324 ++ out, the performance impact will be obvious right on boot as this
76325 ++ mechanism engages from very early on. A good rule of thumb is that
76326 ++ VMs running on CPUs without hardware virtualization support (i.e.,
76327 ++ the majority of IA-32 CPUs) will likely experience the slowdown.
76328 ++
76329 ++config PAX_REFCOUNT
76330 ++ bool "Prevent various kernel object reference counter overflows"
76331 ++ depends on GRKERNSEC && (X86 || SPARC64)
76332 ++ help
76333 ++ By saying Y here the kernel will detect and prevent overflowing
76334 ++ various (but not all) kinds of object reference counters. Such
76335 ++ overflows can normally occur due to bugs only and are often, if
76336 ++ not always, exploitable.
76337 ++
76338 ++ The tradeoff is that data structures protected by an overflowed
76339 ++ refcount will never be freed and therefore will leak memory. Note
76340 ++ that this leak also happens even without this protection but in
76341 ++ that case the overflow can eventually trigger the freeing of the
76342 ++ data structure while it is still being used elsewhere, resulting
76343 ++ in the exploitable situation that this feature prevents.
76344 ++
76345 ++ Since this has a negligible performance impact, you should enable
76346 ++ this feature.
76347 ++
76348 ++config PAX_USERCOPY
76349 ++ bool "Harden heap object copies between kernel and userland"
76350 ++ depends on X86 || PPC || SPARC || ARM
76351 ++ depends on GRKERNSEC && (SLAB || SLUB || SLOB)
76352 ++ help
76353 ++ By saying Y here the kernel will enforce the size of heap objects
76354 ++ when they are copied in either direction between the kernel and
76355 ++ userland, even if only a part of the heap object is copied.
76356 ++
76357 ++ Specifically, this checking prevents information leaking from the
76358 ++ kernel heap during kernel to userland copies (if the kernel heap
76359 ++ object is otherwise fully initialized) and prevents kernel heap
76360 ++ overflows during userland to kernel copies.
76361 ++
76362 ++ Note that the current implementation provides the strictest bounds
76363 ++ checks for the SLUB allocator.
76364 ++
76365 ++ Enabling this option also enables per-slab cache protection against
76366 ++ data in a given cache being copied into/out of via userland
76367 ++ accessors. Though the whitelist of regions will be reduced over
76368 ++ time, it notably protects important data structures like task structs.
76369 ++
76370 ++ If frame pointers are enabled on x86, this option will also restrict
76371 ++ copies into and out of the kernel stack to local variables within a
76372 ++ single frame.
76373 ++
76374 ++ Since this has a negligible performance impact, you should enable
76375 ++ this feature.
76376 ++
76377 ++endmenu
76378 ++
76379 ++endmenu
76380 ++
76381 + config KEYS
76382 + bool "Enable access key retention support"
76383 + help
76384 +@@ -167,7 +719,7 @@ config INTEL_TXT
76385 + config LSM_MMAP_MIN_ADDR
76386 + int "Low address space for LSM to protect from user allocation"
76387 + depends on SECURITY && SECURITY_SELINUX
76388 +- default 32768 if ARM
76389 ++ default 32768 if ALPHA || ARM || PARISC || SPARC32
76390 + default 65536
76391 + help
76392 + This is the portion of low virtual memory which should be protected
76393 +diff -urNp linux-3.1.1/security/keys/compat.c linux-3.1.1/security/keys/compat.c
76394 +--- linux-3.1.1/security/keys/compat.c 2011-11-11 15:19:27.000000000 -0500
76395 ++++ linux-3.1.1/security/keys/compat.c 2011-11-16 18:39:08.000000000 -0500
76396 +@@ -44,7 +44,7 @@ long compat_keyctl_instantiate_key_iov(
76397 + if (ret == 0)
76398 + goto no_payload_free;
76399 +
76400 +- ret = keyctl_instantiate_key_common(id, iov, ioc, ret, ringid);
76401 ++ ret = keyctl_instantiate_key_common(id, (const struct iovec __force_user *)iov, ioc, ret, ringid);
76402 +
76403 + if (iov != iovstack)
76404 + kfree(iov);
76405 +diff -urNp linux-3.1.1/security/keys/keyctl.c linux-3.1.1/security/keys/keyctl.c
76406 +--- linux-3.1.1/security/keys/keyctl.c 2011-11-11 15:19:27.000000000 -0500
76407 ++++ linux-3.1.1/security/keys/keyctl.c 2011-11-16 18:39:08.000000000 -0500
76408 +@@ -921,7 +921,7 @@ static int keyctl_change_reqkey_auth(str
76409 + /*
76410 + * Copy the iovec data from userspace
76411 + */
76412 +-static long copy_from_user_iovec(void *buffer, const struct iovec *iov,
76413 ++static long copy_from_user_iovec(void *buffer, const struct iovec __user *iov,
76414 + unsigned ioc)
76415 + {
76416 + for (; ioc > 0; ioc--) {
76417 +@@ -943,7 +943,7 @@ static long copy_from_user_iovec(void *b
76418 + * If successful, 0 will be returned.
76419 + */
76420 + long keyctl_instantiate_key_common(key_serial_t id,
76421 +- const struct iovec *payload_iov,
76422 ++ const struct iovec __user *payload_iov,
76423 + unsigned ioc,
76424 + size_t plen,
76425 + key_serial_t ringid)
76426 +@@ -1038,7 +1038,7 @@ long keyctl_instantiate_key(key_serial_t
76427 + [0].iov_len = plen
76428 + };
76429 +
76430 +- return keyctl_instantiate_key_common(id, iov, 1, plen, ringid);
76431 ++ return keyctl_instantiate_key_common(id, (const struct iovec __force_user *)iov, 1, plen, ringid);
76432 + }
76433 +
76434 + return keyctl_instantiate_key_common(id, NULL, 0, 0, ringid);
76435 +@@ -1071,7 +1071,7 @@ long keyctl_instantiate_key_iov(key_seri
76436 + if (ret == 0)
76437 + goto no_payload_free;
76438 +
76439 +- ret = keyctl_instantiate_key_common(id, iov, ioc, ret, ringid);
76440 ++ ret = keyctl_instantiate_key_common(id, (const struct iovec __force_user *)iov, ioc, ret, ringid);
76441 +
76442 + if (iov != iovstack)
76443 + kfree(iov);
76444 +diff -urNp linux-3.1.1/security/keys/keyring.c linux-3.1.1/security/keys/keyring.c
76445 +--- linux-3.1.1/security/keys/keyring.c 2011-11-11 15:19:27.000000000 -0500
76446 ++++ linux-3.1.1/security/keys/keyring.c 2011-11-16 18:39:08.000000000 -0500
76447 +@@ -214,15 +214,15 @@ static long keyring_read(const struct ke
76448 + ret = -EFAULT;
76449 +
76450 + for (loop = 0; loop < klist->nkeys; loop++) {
76451 ++ key_serial_t serial;
76452 + key = klist->keys[loop];
76453 ++ serial = key->serial;
76454 +
76455 + tmp = sizeof(key_serial_t);
76456 + if (tmp > buflen)
76457 + tmp = buflen;
76458 +
76459 +- if (copy_to_user(buffer,
76460 +- &key->serial,
76461 +- tmp) != 0)
76462 ++ if (copy_to_user(buffer, &serial, tmp))
76463 + goto error;
76464 +
76465 + buflen -= tmp;
76466 +diff -urNp linux-3.1.1/security/keys/user_defined.c linux-3.1.1/security/keys/user_defined.c
76467 +--- linux-3.1.1/security/keys/user_defined.c 2011-11-11 15:19:27.000000000 -0500
76468 ++++ linux-3.1.1/security/keys/user_defined.c 2011-11-18 18:38:28.000000000 -0500
76469 +@@ -102,7 +102,8 @@ int user_update(struct key *key, const v
76470 + key->expiry = 0;
76471 + }
76472 +
76473 +- kfree_rcu(zap, rcu);
76474 ++ if (zap)
76475 ++ kfree_rcu(zap, rcu);
76476 +
76477 + error:
76478 + return ret;
76479 +diff -urNp linux-3.1.1/security/min_addr.c linux-3.1.1/security/min_addr.c
76480 +--- linux-3.1.1/security/min_addr.c 2011-11-11 15:19:27.000000000 -0500
76481 ++++ linux-3.1.1/security/min_addr.c 2011-11-16 18:40:44.000000000 -0500
76482 +@@ -14,6 +14,7 @@ unsigned long dac_mmap_min_addr = CONFIG
76483 + */
76484 + static void update_mmap_min_addr(void)
76485 + {
76486 ++#ifndef SPARC
76487 + #ifdef CONFIG_LSM_MMAP_MIN_ADDR
76488 + if (dac_mmap_min_addr > CONFIG_LSM_MMAP_MIN_ADDR)
76489 + mmap_min_addr = dac_mmap_min_addr;
76490 +@@ -22,6 +23,7 @@ static void update_mmap_min_addr(void)
76491 + #else
76492 + mmap_min_addr = dac_mmap_min_addr;
76493 + #endif
76494 ++#endif
76495 + }
76496 +
76497 + /*
76498 +diff -urNp linux-3.1.1/security/security.c linux-3.1.1/security/security.c
76499 +--- linux-3.1.1/security/security.c 2011-11-11 15:19:27.000000000 -0500
76500 ++++ linux-3.1.1/security/security.c 2011-11-16 18:40:44.000000000 -0500
76501 +@@ -25,8 +25,8 @@ static __initdata char chosen_lsm[SECURI
76502 + /* things that live in capability.c */
76503 + extern void __init security_fixup_ops(struct security_operations *ops);
76504 +
76505 +-static struct security_operations *security_ops;
76506 +-static struct security_operations default_security_ops = {
76507 ++static struct security_operations *security_ops __read_only;
76508 ++static struct security_operations default_security_ops __read_only = {
76509 + .name = "default",
76510 + };
76511 +
76512 +@@ -67,7 +67,9 @@ int __init security_init(void)
76513 +
76514 + void reset_security_ops(void)
76515 + {
76516 ++ pax_open_kernel();
76517 + security_ops = &default_security_ops;
76518 ++ pax_close_kernel();
76519 + }
76520 +
76521 + /* Save user chosen LSM */
76522 +diff -urNp linux-3.1.1/security/selinux/hooks.c linux-3.1.1/security/selinux/hooks.c
76523 +--- linux-3.1.1/security/selinux/hooks.c 2011-11-11 15:19:27.000000000 -0500
76524 ++++ linux-3.1.1/security/selinux/hooks.c 2011-11-16 18:40:44.000000000 -0500
76525 +@@ -93,7 +93,6 @@
76526 + #define NUM_SEL_MNT_OPTS 5
76527 +
76528 + extern int selinux_nlmsg_lookup(u16 sclass, u16 nlmsg_type, u32 *perm);
76529 +-extern struct security_operations *security_ops;
76530 +
76531 + /* SECMARK reference count */
76532 + atomic_t selinux_secmark_refcount = ATOMIC_INIT(0);
76533 +@@ -5455,7 +5454,7 @@ static int selinux_key_getsecurity(struc
76534 +
76535 + #endif
76536 +
76537 +-static struct security_operations selinux_ops = {
76538 ++static struct security_operations selinux_ops __read_only = {
76539 + .name = "selinux",
76540 +
76541 + .ptrace_access_check = selinux_ptrace_access_check,
76542 +diff -urNp linux-3.1.1/security/selinux/include/xfrm.h linux-3.1.1/security/selinux/include/xfrm.h
76543 +--- linux-3.1.1/security/selinux/include/xfrm.h 2011-11-11 15:19:27.000000000 -0500
76544 ++++ linux-3.1.1/security/selinux/include/xfrm.h 2011-11-16 18:39:08.000000000 -0500
76545 +@@ -48,7 +48,7 @@ int selinux_xfrm_decode_session(struct s
76546 +
76547 + static inline void selinux_xfrm_notify_policyload(void)
76548 + {
76549 +- atomic_inc(&flow_cache_genid);
76550 ++ atomic_inc_unchecked(&flow_cache_genid);
76551 + }
76552 + #else
76553 + static inline int selinux_xfrm_enabled(void)
76554 +diff -urNp linux-3.1.1/security/selinux/ss/services.c linux-3.1.1/security/selinux/ss/services.c
76555 +--- linux-3.1.1/security/selinux/ss/services.c 2011-11-11 15:19:27.000000000 -0500
76556 ++++ linux-3.1.1/security/selinux/ss/services.c 2011-11-16 18:40:44.000000000 -0500
76557 +@@ -1814,6 +1814,8 @@ int security_load_policy(void *data, siz
76558 + int rc = 0;
76559 + struct policy_file file = { data, len }, *fp = &file;
76560 +
76561 ++ pax_track_stack();
76562 ++
76563 + if (!ss_initialized) {
76564 + avtab_cache_init();
76565 + rc = policydb_read(&policydb, fp);
76566 +diff -urNp linux-3.1.1/security/smack/smack_lsm.c linux-3.1.1/security/smack/smack_lsm.c
76567 +--- linux-3.1.1/security/smack/smack_lsm.c 2011-11-11 15:19:27.000000000 -0500
76568 ++++ linux-3.1.1/security/smack/smack_lsm.c 2011-11-16 18:39:08.000000000 -0500
76569 +@@ -3393,7 +3393,7 @@ static int smack_inode_getsecctx(struct
76570 + return 0;
76571 + }
76572 +
76573 +-struct security_operations smack_ops = {
76574 ++struct security_operations smack_ops __read_only = {
76575 + .name = "smack",
76576 +
76577 + .ptrace_access_check = smack_ptrace_access_check,
76578 +diff -urNp linux-3.1.1/security/tomoyo/tomoyo.c linux-3.1.1/security/tomoyo/tomoyo.c
76579 +--- linux-3.1.1/security/tomoyo/tomoyo.c 2011-11-11 15:19:27.000000000 -0500
76580 ++++ linux-3.1.1/security/tomoyo/tomoyo.c 2011-11-16 18:39:08.000000000 -0500
76581 +@@ -446,7 +446,7 @@ static int tomoyo_sb_pivotroot(struct pa
76582 + * tomoyo_security_ops is a "struct security_operations" which is used for
76583 + * registering TOMOYO.
76584 + */
76585 +-static struct security_operations tomoyo_security_ops = {
76586 ++static struct security_operations tomoyo_security_ops __read_only = {
76587 + .name = "tomoyo",
76588 + .cred_alloc_blank = tomoyo_cred_alloc_blank,
76589 + .cred_prepare = tomoyo_cred_prepare,
76590 +diff -urNp linux-3.1.1/sound/aoa/codecs/onyx.c linux-3.1.1/sound/aoa/codecs/onyx.c
76591 +--- linux-3.1.1/sound/aoa/codecs/onyx.c 2011-11-11 15:19:27.000000000 -0500
76592 ++++ linux-3.1.1/sound/aoa/codecs/onyx.c 2011-11-16 18:39:08.000000000 -0500
76593 +@@ -54,7 +54,7 @@ struct onyx {
76594 + spdif_locked:1,
76595 + analog_locked:1,
76596 + original_mute:2;
76597 +- int open_count;
76598 ++ local_t open_count;
76599 + struct codec_info *codec_info;
76600 +
76601 + /* mutex serializes concurrent access to the device
76602 +@@ -753,7 +753,7 @@ static int onyx_open(struct codec_info_i
76603 + struct onyx *onyx = cii->codec_data;
76604 +
76605 + mutex_lock(&onyx->mutex);
76606 +- onyx->open_count++;
76607 ++ local_inc(&onyx->open_count);
76608 + mutex_unlock(&onyx->mutex);
76609 +
76610 + return 0;
76611 +@@ -765,8 +765,7 @@ static int onyx_close(struct codec_info_
76612 + struct onyx *onyx = cii->codec_data;
76613 +
76614 + mutex_lock(&onyx->mutex);
76615 +- onyx->open_count--;
76616 +- if (!onyx->open_count)
76617 ++ if (local_dec_and_test(&onyx->open_count))
76618 + onyx->spdif_locked = onyx->analog_locked = 0;
76619 + mutex_unlock(&onyx->mutex);
76620 +
76621 +diff -urNp linux-3.1.1/sound/aoa/codecs/onyx.h linux-3.1.1/sound/aoa/codecs/onyx.h
76622 +--- linux-3.1.1/sound/aoa/codecs/onyx.h 2011-11-11 15:19:27.000000000 -0500
76623 ++++ linux-3.1.1/sound/aoa/codecs/onyx.h 2011-11-16 18:39:08.000000000 -0500
76624 +@@ -11,6 +11,7 @@
76625 + #include <linux/i2c.h>
76626 + #include <asm/pmac_low_i2c.h>
76627 + #include <asm/prom.h>
76628 ++#include <asm/local.h>
76629 +
76630 + /* PCM3052 register definitions */
76631 +
76632 +diff -urNp linux-3.1.1/sound/core/oss/pcm_oss.c linux-3.1.1/sound/core/oss/pcm_oss.c
76633 +--- linux-3.1.1/sound/core/oss/pcm_oss.c 2011-11-11 15:19:27.000000000 -0500
76634 ++++ linux-3.1.1/sound/core/oss/pcm_oss.c 2011-11-16 18:39:08.000000000 -0500
76635 +@@ -1189,10 +1189,10 @@ snd_pcm_sframes_t snd_pcm_oss_write3(str
76636 + if (in_kernel) {
76637 + mm_segment_t fs;
76638 + fs = snd_enter_user();
76639 +- ret = snd_pcm_lib_write(substream, (void __force __user *)ptr, frames);
76640 ++ ret = snd_pcm_lib_write(substream, (void __force_user *)ptr, frames);
76641 + snd_leave_user(fs);
76642 + } else {
76643 +- ret = snd_pcm_lib_write(substream, (void __force __user *)ptr, frames);
76644 ++ ret = snd_pcm_lib_write(substream, (void __force_user *)ptr, frames);
76645 + }
76646 + if (ret != -EPIPE && ret != -ESTRPIPE)
76647 + break;
76648 +@@ -1234,10 +1234,10 @@ snd_pcm_sframes_t snd_pcm_oss_read3(stru
76649 + if (in_kernel) {
76650 + mm_segment_t fs;
76651 + fs = snd_enter_user();
76652 +- ret = snd_pcm_lib_read(substream, (void __force __user *)ptr, frames);
76653 ++ ret = snd_pcm_lib_read(substream, (void __force_user *)ptr, frames);
76654 + snd_leave_user(fs);
76655 + } else {
76656 +- ret = snd_pcm_lib_read(substream, (void __force __user *)ptr, frames);
76657 ++ ret = snd_pcm_lib_read(substream, (void __force_user *)ptr, frames);
76658 + }
76659 + if (ret == -EPIPE) {
76660 + if (runtime->status->state == SNDRV_PCM_STATE_DRAINING) {
76661 +@@ -1337,7 +1337,7 @@ static ssize_t snd_pcm_oss_write2(struct
76662 + struct snd_pcm_plugin_channel *channels;
76663 + size_t oss_frame_bytes = (runtime->oss.plugin_first->src_width * runtime->oss.plugin_first->src_format.channels) / 8;
76664 + if (!in_kernel) {
76665 +- if (copy_from_user(runtime->oss.buffer, (const char __force __user *)buf, bytes))
76666 ++ if (copy_from_user(runtime->oss.buffer, (const char __force_user *)buf, bytes))
76667 + return -EFAULT;
76668 + buf = runtime->oss.buffer;
76669 + }
76670 +@@ -1407,7 +1407,7 @@ static ssize_t snd_pcm_oss_write1(struct
76671 + }
76672 + } else {
76673 + tmp = snd_pcm_oss_write2(substream,
76674 +- (const char __force *)buf,
76675 ++ (const char __force_kernel *)buf,
76676 + runtime->oss.period_bytes, 0);
76677 + if (tmp <= 0)
76678 + goto err;
76679 +@@ -1433,7 +1433,7 @@ static ssize_t snd_pcm_oss_read2(struct
76680 + struct snd_pcm_runtime *runtime = substream->runtime;
76681 + snd_pcm_sframes_t frames, frames1;
76682 + #ifdef CONFIG_SND_PCM_OSS_PLUGINS
76683 +- char __user *final_dst = (char __force __user *)buf;
76684 ++ char __user *final_dst = (char __force_user *)buf;
76685 + if (runtime->oss.plugin_first) {
76686 + struct snd_pcm_plugin_channel *channels;
76687 + size_t oss_frame_bytes = (runtime->oss.plugin_last->dst_width * runtime->oss.plugin_last->dst_format.channels) / 8;
76688 +@@ -1495,7 +1495,7 @@ static ssize_t snd_pcm_oss_read1(struct
76689 + xfer += tmp;
76690 + runtime->oss.buffer_used -= tmp;
76691 + } else {
76692 +- tmp = snd_pcm_oss_read2(substream, (char __force *)buf,
76693 ++ tmp = snd_pcm_oss_read2(substream, (char __force_kernel *)buf,
76694 + runtime->oss.period_bytes, 0);
76695 + if (tmp <= 0)
76696 + goto err;
76697 +@@ -1663,7 +1663,7 @@ static int snd_pcm_oss_sync(struct snd_p
76698 + size1);
76699 + size1 /= runtime->channels; /* frames */
76700 + fs = snd_enter_user();
76701 +- snd_pcm_lib_write(substream, (void __force __user *)runtime->oss.buffer, size1);
76702 ++ snd_pcm_lib_write(substream, (void __force_user *)runtime->oss.buffer, size1);
76703 + snd_leave_user(fs);
76704 + }
76705 + } else if (runtime->access == SNDRV_PCM_ACCESS_RW_NONINTERLEAVED) {
76706 +diff -urNp linux-3.1.1/sound/core/pcm_compat.c linux-3.1.1/sound/core/pcm_compat.c
76707 +--- linux-3.1.1/sound/core/pcm_compat.c 2011-11-11 15:19:27.000000000 -0500
76708 ++++ linux-3.1.1/sound/core/pcm_compat.c 2011-11-16 18:39:08.000000000 -0500
76709 +@@ -31,7 +31,7 @@ static int snd_pcm_ioctl_delay_compat(st
76710 + int err;
76711 +
76712 + fs = snd_enter_user();
76713 +- err = snd_pcm_delay(substream, &delay);
76714 ++ err = snd_pcm_delay(substream, (snd_pcm_sframes_t __force_user *)&delay);
76715 + snd_leave_user(fs);
76716 + if (err < 0)
76717 + return err;
76718 +diff -urNp linux-3.1.1/sound/core/pcm_native.c linux-3.1.1/sound/core/pcm_native.c
76719 +--- linux-3.1.1/sound/core/pcm_native.c 2011-11-11 15:19:27.000000000 -0500
76720 ++++ linux-3.1.1/sound/core/pcm_native.c 2011-11-16 18:39:08.000000000 -0500
76721 +@@ -2770,11 +2770,11 @@ int snd_pcm_kernel_ioctl(struct snd_pcm_
76722 + switch (substream->stream) {
76723 + case SNDRV_PCM_STREAM_PLAYBACK:
76724 + result = snd_pcm_playback_ioctl1(NULL, substream, cmd,
76725 +- (void __user *)arg);
76726 ++ (void __force_user *)arg);
76727 + break;
76728 + case SNDRV_PCM_STREAM_CAPTURE:
76729 + result = snd_pcm_capture_ioctl1(NULL, substream, cmd,
76730 +- (void __user *)arg);
76731 ++ (void __force_user *)arg);
76732 + break;
76733 + default:
76734 + result = -EINVAL;
76735 +diff -urNp linux-3.1.1/sound/core/seq/seq_device.c linux-3.1.1/sound/core/seq/seq_device.c
76736 +--- linux-3.1.1/sound/core/seq/seq_device.c 2011-11-11 15:19:27.000000000 -0500
76737 ++++ linux-3.1.1/sound/core/seq/seq_device.c 2011-11-16 18:39:08.000000000 -0500
76738 +@@ -63,7 +63,7 @@ struct ops_list {
76739 + int argsize; /* argument size */
76740 +
76741 + /* operators */
76742 +- struct snd_seq_dev_ops ops;
76743 ++ struct snd_seq_dev_ops *ops;
76744 +
76745 + /* registred devices */
76746 + struct list_head dev_list; /* list of devices */
76747 +@@ -332,7 +332,7 @@ int snd_seq_device_register_driver(char
76748 +
76749 + mutex_lock(&ops->reg_mutex);
76750 + /* copy driver operators */
76751 +- ops->ops = *entry;
76752 ++ ops->ops = entry;
76753 + ops->driver |= DRIVER_LOADED;
76754 + ops->argsize = argsize;
76755 +
76756 +@@ -462,7 +462,7 @@ static int init_device(struct snd_seq_de
76757 + dev->name, ops->id, ops->argsize, dev->argsize);
76758 + return -EINVAL;
76759 + }
76760 +- if (ops->ops.init_device(dev) >= 0) {
76761 ++ if (ops->ops->init_device(dev) >= 0) {
76762 + dev->status = SNDRV_SEQ_DEVICE_REGISTERED;
76763 + ops->num_init_devices++;
76764 + } else {
76765 +@@ -489,7 +489,7 @@ static int free_device(struct snd_seq_de
76766 + dev->name, ops->id, ops->argsize, dev->argsize);
76767 + return -EINVAL;
76768 + }
76769 +- if ((result = ops->ops.free_device(dev)) >= 0 || result == -ENXIO) {
76770 ++ if ((result = ops->ops->free_device(dev)) >= 0 || result == -ENXIO) {
76771 + dev->status = SNDRV_SEQ_DEVICE_FREE;
76772 + dev->driver_data = NULL;
76773 + ops->num_init_devices--;
76774 +diff -urNp linux-3.1.1/sound/drivers/mts64.c linux-3.1.1/sound/drivers/mts64.c
76775 +--- linux-3.1.1/sound/drivers/mts64.c 2011-11-11 15:19:27.000000000 -0500
76776 ++++ linux-3.1.1/sound/drivers/mts64.c 2011-11-16 18:39:08.000000000 -0500
76777 +@@ -28,6 +28,7 @@
76778 + #include <sound/initval.h>
76779 + #include <sound/rawmidi.h>
76780 + #include <sound/control.h>
76781 ++#include <asm/local.h>
76782 +
76783 + #define CARD_NAME "Miditerminal 4140"
76784 + #define DRIVER_NAME "MTS64"
76785 +@@ -66,7 +67,7 @@ struct mts64 {
76786 + struct pardevice *pardev;
76787 + int pardev_claimed;
76788 +
76789 +- int open_count;
76790 ++ local_t open_count;
76791 + int current_midi_output_port;
76792 + int current_midi_input_port;
76793 + u8 mode[MTS64_NUM_INPUT_PORTS];
76794 +@@ -696,7 +697,7 @@ static int snd_mts64_rawmidi_open(struct
76795 + {
76796 + struct mts64 *mts = substream->rmidi->private_data;
76797 +
76798 +- if (mts->open_count == 0) {
76799 ++ if (local_read(&mts->open_count) == 0) {
76800 + /* We don't need a spinlock here, because this is just called
76801 + if the device has not been opened before.
76802 + So there aren't any IRQs from the device */
76803 +@@ -704,7 +705,7 @@ static int snd_mts64_rawmidi_open(struct
76804 +
76805 + msleep(50);
76806 + }
76807 +- ++(mts->open_count);
76808 ++ local_inc(&mts->open_count);
76809 +
76810 + return 0;
76811 + }
76812 +@@ -714,8 +715,7 @@ static int snd_mts64_rawmidi_close(struc
76813 + struct mts64 *mts = substream->rmidi->private_data;
76814 + unsigned long flags;
76815 +
76816 +- --(mts->open_count);
76817 +- if (mts->open_count == 0) {
76818 ++ if (local_dec_return(&mts->open_count) == 0) {
76819 + /* We need the spinlock_irqsave here because we can still
76820 + have IRQs at this point */
76821 + spin_lock_irqsave(&mts->lock, flags);
76822 +@@ -724,8 +724,8 @@ static int snd_mts64_rawmidi_close(struc
76823 +
76824 + msleep(500);
76825 +
76826 +- } else if (mts->open_count < 0)
76827 +- mts->open_count = 0;
76828 ++ } else if (local_read(&mts->open_count) < 0)
76829 ++ local_set(&mts->open_count, 0);
76830 +
76831 + return 0;
76832 + }
76833 +diff -urNp linux-3.1.1/sound/drivers/opl4/opl4_lib.c linux-3.1.1/sound/drivers/opl4/opl4_lib.c
76834 +--- linux-3.1.1/sound/drivers/opl4/opl4_lib.c 2011-11-11 15:19:27.000000000 -0500
76835 ++++ linux-3.1.1/sound/drivers/opl4/opl4_lib.c 2011-11-16 18:39:08.000000000 -0500
76836 +@@ -28,7 +28,7 @@ MODULE_AUTHOR("Clemens Ladisch <clemens@
76837 + MODULE_DESCRIPTION("OPL4 driver");
76838 + MODULE_LICENSE("GPL");
76839 +
76840 +-static void inline snd_opl4_wait(struct snd_opl4 *opl4)
76841 ++static inline void snd_opl4_wait(struct snd_opl4 *opl4)
76842 + {
76843 + int timeout = 10;
76844 + while ((inb(opl4->fm_port) & OPL4_STATUS_BUSY) && --timeout > 0)
76845 +diff -urNp linux-3.1.1/sound/drivers/portman2x4.c linux-3.1.1/sound/drivers/portman2x4.c
76846 +--- linux-3.1.1/sound/drivers/portman2x4.c 2011-11-11 15:19:27.000000000 -0500
76847 ++++ linux-3.1.1/sound/drivers/portman2x4.c 2011-11-16 18:39:08.000000000 -0500
76848 +@@ -47,6 +47,7 @@
76849 + #include <sound/initval.h>
76850 + #include <sound/rawmidi.h>
76851 + #include <sound/control.h>
76852 ++#include <asm/local.h>
76853 +
76854 + #define CARD_NAME "Portman 2x4"
76855 + #define DRIVER_NAME "portman"
76856 +@@ -84,7 +85,7 @@ struct portman {
76857 + struct pardevice *pardev;
76858 + int pardev_claimed;
76859 +
76860 +- int open_count;
76861 ++ local_t open_count;
76862 + int mode[PORTMAN_NUM_INPUT_PORTS];
76863 + struct snd_rawmidi_substream *midi_input[PORTMAN_NUM_INPUT_PORTS];
76864 + };
76865 +diff -urNp linux-3.1.1/sound/firewire/amdtp.c linux-3.1.1/sound/firewire/amdtp.c
76866 +--- linux-3.1.1/sound/firewire/amdtp.c 2011-11-11 15:19:27.000000000 -0500
76867 ++++ linux-3.1.1/sound/firewire/amdtp.c 2011-11-16 18:39:08.000000000 -0500
76868 +@@ -371,7 +371,7 @@ static void queue_out_packet(struct amdt
76869 + ptr = s->pcm_buffer_pointer + data_blocks;
76870 + if (ptr >= pcm->runtime->buffer_size)
76871 + ptr -= pcm->runtime->buffer_size;
76872 +- ACCESS_ONCE(s->pcm_buffer_pointer) = ptr;
76873 ++ ACCESS_ONCE_RW(s->pcm_buffer_pointer) = ptr;
76874 +
76875 + s->pcm_period_pointer += data_blocks;
76876 + if (s->pcm_period_pointer >= pcm->runtime->period_size) {
76877 +@@ -511,7 +511,7 @@ EXPORT_SYMBOL(amdtp_out_stream_start);
76878 + */
76879 + void amdtp_out_stream_update(struct amdtp_out_stream *s)
76880 + {
76881 +- ACCESS_ONCE(s->source_node_id_field) =
76882 ++ ACCESS_ONCE_RW(s->source_node_id_field) =
76883 + (fw_parent_device(s->unit)->card->node_id & 0x3f) << 24;
76884 + }
76885 + EXPORT_SYMBOL(amdtp_out_stream_update);
76886 +diff -urNp linux-3.1.1/sound/firewire/amdtp.h linux-3.1.1/sound/firewire/amdtp.h
76887 +--- linux-3.1.1/sound/firewire/amdtp.h 2011-11-11 15:19:27.000000000 -0500
76888 ++++ linux-3.1.1/sound/firewire/amdtp.h 2011-11-16 18:39:08.000000000 -0500
76889 +@@ -146,7 +146,7 @@ static inline void amdtp_out_stream_pcm_
76890 + static inline void amdtp_out_stream_pcm_trigger(struct amdtp_out_stream *s,
76891 + struct snd_pcm_substream *pcm)
76892 + {
76893 +- ACCESS_ONCE(s->pcm) = pcm;
76894 ++ ACCESS_ONCE_RW(s->pcm) = pcm;
76895 + }
76896 +
76897 + /**
76898 +diff -urNp linux-3.1.1/sound/firewire/isight.c linux-3.1.1/sound/firewire/isight.c
76899 +--- linux-3.1.1/sound/firewire/isight.c 2011-11-11 15:19:27.000000000 -0500
76900 ++++ linux-3.1.1/sound/firewire/isight.c 2011-11-16 18:39:08.000000000 -0500
76901 +@@ -97,7 +97,7 @@ static void isight_update_pointers(struc
76902 + ptr += count;
76903 + if (ptr >= runtime->buffer_size)
76904 + ptr -= runtime->buffer_size;
76905 +- ACCESS_ONCE(isight->buffer_pointer) = ptr;
76906 ++ ACCESS_ONCE_RW(isight->buffer_pointer) = ptr;
76907 +
76908 + isight->period_counter += count;
76909 + if (isight->period_counter >= runtime->period_size) {
76910 +@@ -308,7 +308,7 @@ static int isight_hw_params(struct snd_p
76911 + if (err < 0)
76912 + return err;
76913 +
76914 +- ACCESS_ONCE(isight->pcm_active) = true;
76915 ++ ACCESS_ONCE_RW(isight->pcm_active) = true;
76916 +
76917 + return 0;
76918 + }
76919 +@@ -341,7 +341,7 @@ static int isight_hw_free(struct snd_pcm
76920 + {
76921 + struct isight *isight = substream->private_data;
76922 +
76923 +- ACCESS_ONCE(isight->pcm_active) = false;
76924 ++ ACCESS_ONCE_RW(isight->pcm_active) = false;
76925 +
76926 + mutex_lock(&isight->mutex);
76927 + isight_stop_streaming(isight);
76928 +@@ -434,10 +434,10 @@ static int isight_trigger(struct snd_pcm
76929 +
76930 + switch (cmd) {
76931 + case SNDRV_PCM_TRIGGER_START:
76932 +- ACCESS_ONCE(isight->pcm_running) = true;
76933 ++ ACCESS_ONCE_RW(isight->pcm_running) = true;
76934 + break;
76935 + case SNDRV_PCM_TRIGGER_STOP:
76936 +- ACCESS_ONCE(isight->pcm_running) = false;
76937 ++ ACCESS_ONCE_RW(isight->pcm_running) = false;
76938 + break;
76939 + default:
76940 + return -EINVAL;
76941 +diff -urNp linux-3.1.1/sound/isa/cmi8330.c linux-3.1.1/sound/isa/cmi8330.c
76942 +--- linux-3.1.1/sound/isa/cmi8330.c 2011-11-11 15:19:27.000000000 -0500
76943 ++++ linux-3.1.1/sound/isa/cmi8330.c 2011-11-16 18:39:08.000000000 -0500
76944 +@@ -172,7 +172,7 @@ struct snd_cmi8330 {
76945 +
76946 + struct snd_pcm *pcm;
76947 + struct snd_cmi8330_stream {
76948 +- struct snd_pcm_ops ops;
76949 ++ snd_pcm_ops_no_const ops;
76950 + snd_pcm_open_callback_t open;
76951 + void *private_data; /* sb or wss */
76952 + } streams[2];
76953 +diff -urNp linux-3.1.1/sound/oss/sb_audio.c linux-3.1.1/sound/oss/sb_audio.c
76954 +--- linux-3.1.1/sound/oss/sb_audio.c 2011-11-11 15:19:27.000000000 -0500
76955 ++++ linux-3.1.1/sound/oss/sb_audio.c 2011-11-16 18:39:08.000000000 -0500
76956 +@@ -901,7 +901,7 @@ sb16_copy_from_user(int dev,
76957 + buf16 = (signed short *)(localbuf + localoffs);
76958 + while (c)
76959 + {
76960 +- locallen = (c >= LBUFCOPYSIZE ? LBUFCOPYSIZE : c);
76961 ++ locallen = ((unsigned)c >= LBUFCOPYSIZE ? LBUFCOPYSIZE : c);
76962 + if (copy_from_user(lbuf8,
76963 + userbuf+useroffs + p,
76964 + locallen))
76965 +diff -urNp linux-3.1.1/sound/oss/swarm_cs4297a.c linux-3.1.1/sound/oss/swarm_cs4297a.c
76966 +--- linux-3.1.1/sound/oss/swarm_cs4297a.c 2011-11-11 15:19:27.000000000 -0500
76967 ++++ linux-3.1.1/sound/oss/swarm_cs4297a.c 2011-11-16 18:39:08.000000000 -0500
76968 +@@ -2606,7 +2606,6 @@ static int __init cs4297a_init(void)
76969 + {
76970 + struct cs4297a_state *s;
76971 + u32 pwr, id;
76972 +- mm_segment_t fs;
76973 + int rval;
76974 + #ifndef CONFIG_BCM_CS4297A_CSWARM
76975 + u64 cfg;
76976 +@@ -2696,22 +2695,23 @@ static int __init cs4297a_init(void)
76977 + if (!rval) {
76978 + char *sb1250_duart_present;
76979 +
76980 ++#if 0
76981 ++ mm_segment_t fs;
76982 + fs = get_fs();
76983 + set_fs(KERNEL_DS);
76984 +-#if 0
76985 + val = SOUND_MASK_LINE;
76986 + mixer_ioctl(s, SOUND_MIXER_WRITE_RECSRC, (unsigned long) &val);
76987 + for (i = 0; i < ARRAY_SIZE(initvol); i++) {
76988 + val = initvol[i].vol;
76989 + mixer_ioctl(s, initvol[i].mixch, (unsigned long) &val);
76990 + }
76991 ++ set_fs(fs);
76992 + // cs4297a_write_ac97(s, 0x18, 0x0808);
76993 + #else
76994 + // cs4297a_write_ac97(s, 0x5e, 0x180);
76995 + cs4297a_write_ac97(s, 0x02, 0x0808);
76996 + cs4297a_write_ac97(s, 0x18, 0x0808);
76997 + #endif
76998 +- set_fs(fs);
76999 +
77000 + list_add(&s->list, &cs4297a_devs);
77001 +
77002 +diff -urNp linux-3.1.1/sound/pci/hda/hda_codec.h linux-3.1.1/sound/pci/hda/hda_codec.h
77003 +--- linux-3.1.1/sound/pci/hda/hda_codec.h 2011-11-11 15:19:27.000000000 -0500
77004 ++++ linux-3.1.1/sound/pci/hda/hda_codec.h 2011-11-16 18:39:08.000000000 -0500
77005 +@@ -611,7 +611,7 @@ struct hda_bus_ops {
77006 + /* notify power-up/down from codec to controller */
77007 + void (*pm_notify)(struct hda_bus *bus);
77008 + #endif
77009 +-};
77010 ++} __no_const;
77011 +
77012 + /* template to pass to the bus constructor */
77013 + struct hda_bus_template {
77014 +@@ -713,6 +713,7 @@ struct hda_codec_ops {
77015 + #endif
77016 + void (*reboot_notify)(struct hda_codec *codec);
77017 + };
77018 ++typedef struct hda_codec_ops __no_const hda_codec_ops_no_const;
77019 +
77020 + /* record for amp information cache */
77021 + struct hda_cache_head {
77022 +@@ -743,7 +744,7 @@ struct hda_pcm_ops {
77023 + struct snd_pcm_substream *substream);
77024 + int (*cleanup)(struct hda_pcm_stream *info, struct hda_codec *codec,
77025 + struct snd_pcm_substream *substream);
77026 +-};
77027 ++} __no_const;
77028 +
77029 + /* PCM information for each substream */
77030 + struct hda_pcm_stream {
77031 +@@ -801,7 +802,7 @@ struct hda_codec {
77032 + const char *modelname; /* model name for preset */
77033 +
77034 + /* set by patch */
77035 +- struct hda_codec_ops patch_ops;
77036 ++ hda_codec_ops_no_const patch_ops;
77037 +
77038 + /* PCM to create, set by patch_ops.build_pcms callback */
77039 + unsigned int num_pcms;
77040 +diff -urNp linux-3.1.1/sound/pci/ice1712/ice1712.h linux-3.1.1/sound/pci/ice1712/ice1712.h
77041 +--- linux-3.1.1/sound/pci/ice1712/ice1712.h 2011-11-11 15:19:27.000000000 -0500
77042 ++++ linux-3.1.1/sound/pci/ice1712/ice1712.h 2011-11-16 18:39:08.000000000 -0500
77043 +@@ -269,7 +269,7 @@ struct snd_ak4xxx_private {
77044 + unsigned int mask_flags; /* total mask bits */
77045 + struct snd_akm4xxx_ops {
77046 + void (*set_rate_val)(struct snd_akm4xxx *ak, unsigned int rate);
77047 +- } ops;
77048 ++ } __no_const ops;
77049 + };
77050 +
77051 + struct snd_ice1712_spdif {
77052 +@@ -285,7 +285,7 @@ struct snd_ice1712_spdif {
77053 + int (*default_put)(struct snd_ice1712 *, struct snd_ctl_elem_value *ucontrol);
77054 + void (*stream_get)(struct snd_ice1712 *, struct snd_ctl_elem_value *ucontrol);
77055 + int (*stream_put)(struct snd_ice1712 *, struct snd_ctl_elem_value *ucontrol);
77056 +- } ops;
77057 ++ } __no_const ops;
77058 + };
77059 +
77060 +
77061 +diff -urNp linux-3.1.1/sound/pci/ymfpci/ymfpci_main.c linux-3.1.1/sound/pci/ymfpci/ymfpci_main.c
77062 +--- linux-3.1.1/sound/pci/ymfpci/ymfpci_main.c 2011-11-11 15:19:27.000000000 -0500
77063 ++++ linux-3.1.1/sound/pci/ymfpci/ymfpci_main.c 2011-11-16 18:39:08.000000000 -0500
77064 +@@ -202,8 +202,8 @@ static void snd_ymfpci_hw_stop(struct sn
77065 + if ((snd_ymfpci_readl(chip, YDSXGR_STATUS) & 2) == 0)
77066 + break;
77067 + }
77068 +- if (atomic_read(&chip->interrupt_sleep_count)) {
77069 +- atomic_set(&chip->interrupt_sleep_count, 0);
77070 ++ if (atomic_read_unchecked(&chip->interrupt_sleep_count)) {
77071 ++ atomic_set_unchecked(&chip->interrupt_sleep_count, 0);
77072 + wake_up(&chip->interrupt_sleep);
77073 + }
77074 + __end:
77075 +@@ -787,7 +787,7 @@ static void snd_ymfpci_irq_wait(struct s
77076 + continue;
77077 + init_waitqueue_entry(&wait, current);
77078 + add_wait_queue(&chip->interrupt_sleep, &wait);
77079 +- atomic_inc(&chip->interrupt_sleep_count);
77080 ++ atomic_inc_unchecked(&chip->interrupt_sleep_count);
77081 + schedule_timeout_uninterruptible(msecs_to_jiffies(50));
77082 + remove_wait_queue(&chip->interrupt_sleep, &wait);
77083 + }
77084 +@@ -825,8 +825,8 @@ static irqreturn_t snd_ymfpci_interrupt(
77085 + snd_ymfpci_writel(chip, YDSXGR_MODE, mode);
77086 + spin_unlock(&chip->reg_lock);
77087 +
77088 +- if (atomic_read(&chip->interrupt_sleep_count)) {
77089 +- atomic_set(&chip->interrupt_sleep_count, 0);
77090 ++ if (atomic_read_unchecked(&chip->interrupt_sleep_count)) {
77091 ++ atomic_set_unchecked(&chip->interrupt_sleep_count, 0);
77092 + wake_up(&chip->interrupt_sleep);
77093 + }
77094 + }
77095 +@@ -2363,7 +2363,7 @@ int __devinit snd_ymfpci_create(struct s
77096 + spin_lock_init(&chip->reg_lock);
77097 + spin_lock_init(&chip->voice_lock);
77098 + init_waitqueue_head(&chip->interrupt_sleep);
77099 +- atomic_set(&chip->interrupt_sleep_count, 0);
77100 ++ atomic_set_unchecked(&chip->interrupt_sleep_count, 0);
77101 + chip->card = card;
77102 + chip->pci = pci;
77103 + chip->irq = -1;
77104 +diff -urNp linux-3.1.1/sound/soc/soc-pcm.c linux-3.1.1/sound/soc/soc-pcm.c
77105 +--- linux-3.1.1/sound/soc/soc-pcm.c 2011-11-11 15:19:27.000000000 -0500
77106 ++++ linux-3.1.1/sound/soc/soc-pcm.c 2011-11-16 18:39:08.000000000 -0500
77107 +@@ -568,7 +568,7 @@ static snd_pcm_uframes_t soc_pcm_pointer
77108 + }
77109 +
77110 + /* ASoC PCM operations */
77111 +-static struct snd_pcm_ops soc_pcm_ops = {
77112 ++static snd_pcm_ops_no_const soc_pcm_ops = {
77113 + .open = soc_pcm_open,
77114 + .close = soc_pcm_close,
77115 + .hw_params = soc_pcm_hw_params,
77116 +diff -urNp linux-3.1.1/sound/usb/card.h linux-3.1.1/sound/usb/card.h
77117 +--- linux-3.1.1/sound/usb/card.h 2011-11-11 15:19:27.000000000 -0500
77118 ++++ linux-3.1.1/sound/usb/card.h 2011-11-16 18:39:08.000000000 -0500
77119 +@@ -44,6 +44,7 @@ struct snd_urb_ops {
77120 + int (*prepare_sync)(struct snd_usb_substream *subs, struct snd_pcm_runtime *runtime, struct urb *u);
77121 + int (*retire_sync)(struct snd_usb_substream *subs, struct snd_pcm_runtime *runtime, struct urb *u);
77122 + };
77123 ++typedef struct snd_urb_ops __no_const snd_urb_ops_no_const;
77124 +
77125 + struct snd_usb_substream {
77126 + struct snd_usb_stream *stream;
77127 +@@ -93,7 +94,7 @@ struct snd_usb_substream {
77128 + struct snd_pcm_hw_constraint_list rate_list; /* limited rates */
77129 + spinlock_t lock;
77130 +
77131 +- struct snd_urb_ops ops; /* callbacks (must be filled at init) */
77132 ++ snd_urb_ops_no_const ops; /* callbacks (must be filled at init) */
77133 + };
77134 +
77135 + struct snd_usb_stream {
77136 +diff -urNp linux-3.1.1/tools/gcc/checker_plugin.c linux-3.1.1/tools/gcc/checker_plugin.c
77137 +--- linux-3.1.1/tools/gcc/checker_plugin.c 1969-12-31 19:00:00.000000000 -0500
77138 ++++ linux-3.1.1/tools/gcc/checker_plugin.c 2011-11-16 18:39:08.000000000 -0500
77139 +@@ -0,0 +1,171 @@
77140 ++/*
77141 ++ * Copyright 2011 by the PaX Team <pageexec@××××××××.hu>
77142 ++ * Licensed under the GPL v2
77143 ++ *
77144 ++ * Note: the choice of the license means that the compilation process is
77145 ++ * NOT 'eligible' as defined by gcc's library exception to the GPL v3,
77146 ++ * but for the kernel it doesn't matter since it doesn't link against
77147 ++ * any of the gcc libraries
77148 ++ *
77149 ++ * gcc plugin to implement various sparse (source code checker) features
77150 ++ *
77151 ++ * TODO:
77152 ++ * - define separate __iomem, __percpu and __rcu address spaces (lots of code to patch)
77153 ++ *
77154 ++ * BUGS:
77155 ++ * - none known
77156 ++ */
77157 ++#include "gcc-plugin.h"
77158 ++#include "config.h"
77159 ++#include "system.h"
77160 ++#include "coretypes.h"
77161 ++#include "tree.h"
77162 ++#include "tree-pass.h"
77163 ++#include "flags.h"
77164 ++#include "intl.h"
77165 ++#include "toplev.h"
77166 ++#include "plugin.h"
77167 ++//#include "expr.h" where are you...
77168 ++#include "diagnostic.h"
77169 ++#include "plugin-version.h"
77170 ++#include "tm.h"
77171 ++#include "function.h"
77172 ++#include "basic-block.h"
77173 ++#include "gimple.h"
77174 ++#include "rtl.h"
77175 ++#include "emit-rtl.h"
77176 ++#include "tree-flow.h"
77177 ++#include "target.h"
77178 ++
77179 ++extern void c_register_addr_space (const char *str, addr_space_t as);
77180 ++extern enum machine_mode default_addr_space_pointer_mode (addr_space_t);
77181 ++extern enum machine_mode default_addr_space_address_mode (addr_space_t);
77182 ++extern bool default_addr_space_valid_pointer_mode(enum machine_mode mode, addr_space_t as);
77183 ++extern bool default_addr_space_legitimate_address_p(enum machine_mode mode, rtx mem, bool strict, addr_space_t as);
77184 ++extern rtx default_addr_space_legitimize_address(rtx x, rtx oldx, enum machine_mode mode, addr_space_t as);
77185 ++
77186 ++extern void print_gimple_stmt(FILE *, gimple, int, int);
77187 ++extern rtx emit_move_insn(rtx x, rtx y);
77188 ++
77189 ++int plugin_is_GPL_compatible;
77190 ++
77191 ++static struct plugin_info checker_plugin_info = {
77192 ++ .version = "201111150100",
77193 ++};
77194 ++
77195 ++#define ADDR_SPACE_KERNEL 0
77196 ++#define ADDR_SPACE_FORCE_KERNEL 1
77197 ++#define ADDR_SPACE_USER 2
77198 ++#define ADDR_SPACE_FORCE_USER 3
77199 ++#define ADDR_SPACE_IOMEM 0
77200 ++#define ADDR_SPACE_FORCE_IOMEM 0
77201 ++#define ADDR_SPACE_PERCPU 0
77202 ++#define ADDR_SPACE_FORCE_PERCPU 0
77203 ++#define ADDR_SPACE_RCU 0
77204 ++#define ADDR_SPACE_FORCE_RCU 0
77205 ++
77206 ++static enum machine_mode checker_addr_space_pointer_mode(addr_space_t addrspace)
77207 ++{
77208 ++ return default_addr_space_pointer_mode(ADDR_SPACE_GENERIC);
77209 ++}
77210 ++
77211 ++static enum machine_mode checker_addr_space_address_mode(addr_space_t addrspace)
77212 ++{
77213 ++ return default_addr_space_address_mode(ADDR_SPACE_GENERIC);
77214 ++}
77215 ++
77216 ++static bool checker_addr_space_valid_pointer_mode(enum machine_mode mode, addr_space_t as)
77217 ++{
77218 ++ return default_addr_space_valid_pointer_mode(mode, as);
77219 ++}
77220 ++
77221 ++static bool checker_addr_space_legitimate_address_p(enum machine_mode mode, rtx mem, bool strict, addr_space_t as)
77222 ++{
77223 ++ return default_addr_space_legitimate_address_p(mode, mem, strict, ADDR_SPACE_GENERIC);
77224 ++}
77225 ++
77226 ++static rtx checker_addr_space_legitimize_address(rtx x, rtx oldx, enum machine_mode mode, addr_space_t as)
77227 ++{
77228 ++ return default_addr_space_legitimize_address(x, oldx, mode, as);
77229 ++}
77230 ++
77231 ++static bool checker_addr_space_subset_p(addr_space_t subset, addr_space_t superset)
77232 ++{
77233 ++ if (subset == ADDR_SPACE_FORCE_KERNEL && superset == ADDR_SPACE_KERNEL)
77234 ++ return true;
77235 ++
77236 ++ if (subset == ADDR_SPACE_FORCE_USER && superset == ADDR_SPACE_USER)
77237 ++ return true;
77238 ++
77239 ++ if (subset == ADDR_SPACE_FORCE_IOMEM && superset == ADDR_SPACE_IOMEM)
77240 ++ return true;
77241 ++
77242 ++ if (subset == ADDR_SPACE_KERNEL && superset == ADDR_SPACE_FORCE_USER)
77243 ++ return true;
77244 ++
77245 ++ if (subset == ADDR_SPACE_KERNEL && superset == ADDR_SPACE_FORCE_IOMEM)
77246 ++ return true;
77247 ++
77248 ++ if (subset == ADDR_SPACE_USER && superset == ADDR_SPACE_FORCE_KERNEL)
77249 ++ return true;
77250 ++
77251 ++ if (subset == ADDR_SPACE_IOMEM && superset == ADDR_SPACE_FORCE_KERNEL)
77252 ++ return true;
77253 ++
77254 ++ return subset == superset;
77255 ++}
77256 ++
77257 ++static rtx checker_addr_space_convert(rtx op, tree from_type, tree to_type)
77258 ++{
77259 ++// addr_space_t from_as = TYPE_ADDR_SPACE(TREE_TYPE(from_type));
77260 ++// addr_space_t to_as = TYPE_ADDR_SPACE(TREE_TYPE(to_type));
77261 ++
77262 ++ return op;
77263 ++}
77264 ++
77265 ++static void register_checker_address_spaces(void *event_data, void *data)
77266 ++{
77267 ++ c_register_addr_space("__kernel", ADDR_SPACE_KERNEL);
77268 ++ c_register_addr_space("__force_kernel", ADDR_SPACE_FORCE_KERNEL);
77269 ++ c_register_addr_space("__user", ADDR_SPACE_USER);
77270 ++ c_register_addr_space("__force_user", ADDR_SPACE_FORCE_USER);
77271 ++// c_register_addr_space("__iomem", ADDR_SPACE_IOMEM);
77272 ++// c_register_addr_space("__force_iomem", ADDR_SPACE_FORCE_IOMEM);
77273 ++// c_register_addr_space("__percpu", ADDR_SPACE_PERCPU);
77274 ++// c_register_addr_space("__force_percpu", ADDR_SPACE_FORCE_PERCPU);
77275 ++// c_register_addr_space("__rcu", ADDR_SPACE_RCU);
77276 ++// c_register_addr_space("__force_rcu", ADDR_SPACE_FORCE_RCU);
77277 ++
77278 ++ targetm.addr_space.pointer_mode = checker_addr_space_pointer_mode;
77279 ++ targetm.addr_space.address_mode = checker_addr_space_address_mode;
77280 ++ targetm.addr_space.valid_pointer_mode = checker_addr_space_valid_pointer_mode;
77281 ++ targetm.addr_space.legitimate_address_p = checker_addr_space_legitimate_address_p;
77282 ++// targetm.addr_space.legitimize_address = checker_addr_space_legitimize_address;
77283 ++ targetm.addr_space.subset_p = checker_addr_space_subset_p;
77284 ++ targetm.addr_space.convert = checker_addr_space_convert;
77285 ++}
77286 ++
77287 ++int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
77288 ++{
77289 ++ const char * const plugin_name = plugin_info->base_name;
77290 ++ const int argc = plugin_info->argc;
77291 ++ const struct plugin_argument * const argv = plugin_info->argv;
77292 ++ int i;
77293 ++
77294 ++ if (!plugin_default_version_check(version, &gcc_version)) {
77295 ++ error(G_("incompatible gcc/plugin versions"));
77296 ++ return 1;
77297 ++ }
77298 ++
77299 ++ register_callback(plugin_name, PLUGIN_INFO, NULL, &checker_plugin_info);
77300 ++
77301 ++ for (i = 0; i < argc; ++i)
77302 ++ error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
77303 ++
77304 ++ if (TARGET_64BIT == 0)
77305 ++ return 0;
77306 ++
77307 ++ register_callback (plugin_name, PLUGIN_PRAGMAS, register_checker_address_spaces, NULL);
77308 ++
77309 ++ return 0;
77310 ++}
77311 +diff -urNp linux-3.1.1/tools/gcc/constify_plugin.c linux-3.1.1/tools/gcc/constify_plugin.c
77312 +--- linux-3.1.1/tools/gcc/constify_plugin.c 1969-12-31 19:00:00.000000000 -0500
77313 ++++ linux-3.1.1/tools/gcc/constify_plugin.c 2011-11-16 18:39:08.000000000 -0500
77314 +@@ -0,0 +1,303 @@
77315 ++/*
77316 ++ * Copyright 2011 by Emese Revfy <re.emese@×××××.com>
77317 ++ * Copyright 2011 by PaX Team <pageexec@××××××××.hu>
77318 ++ * Licensed under the GPL v2, or (at your option) v3
77319 ++ *
77320 ++ * This gcc plugin constifies all structures which contain only function pointers or are explicitly marked for constification.
77321 ++ *
77322 ++ * Homepage:
77323 ++ * http://www.grsecurity.net/~ephox/const_plugin/
77324 ++ *
77325 ++ * Usage:
77326 ++ * $ gcc -I`gcc -print-file-name=plugin`/include -fPIC -shared -O2 -o constify_plugin.so constify_plugin.c
77327 ++ * $ gcc -fplugin=constify_plugin.so test.c -O2
77328 ++ */
77329 ++
77330 ++#include "gcc-plugin.h"
77331 ++#include "config.h"
77332 ++#include "system.h"
77333 ++#include "coretypes.h"
77334 ++#include "tree.h"
77335 ++#include "tree-pass.h"
77336 ++#include "flags.h"
77337 ++#include "intl.h"
77338 ++#include "toplev.h"
77339 ++#include "plugin.h"
77340 ++#include "diagnostic.h"
77341 ++#include "plugin-version.h"
77342 ++#include "tm.h"
77343 ++#include "function.h"
77344 ++#include "basic-block.h"
77345 ++#include "gimple.h"
77346 ++#include "rtl.h"
77347 ++#include "emit-rtl.h"
77348 ++#include "tree-flow.h"
77349 ++
77350 ++#define C_TYPE_FIELDS_READONLY(TYPE) TREE_LANG_FLAG_1(TYPE)
77351 ++
77352 ++int plugin_is_GPL_compatible;
77353 ++
77354 ++static struct plugin_info const_plugin_info = {
77355 ++ .version = "201111150100",
77356 ++ .help = "no-constify\tturn off constification\n",
77357 ++};
77358 ++
77359 ++static void constify_type(tree type);
77360 ++static bool walk_struct(tree node);
77361 ++
77362 ++static tree deconstify_type(tree old_type)
77363 ++{
77364 ++ tree new_type, field;
77365 ++
77366 ++ new_type = build_qualified_type(old_type, TYPE_QUALS(old_type) & ~TYPE_QUAL_CONST);
77367 ++ TYPE_FIELDS(new_type) = copy_list(TYPE_FIELDS(new_type));
77368 ++ for (field = TYPE_FIELDS(new_type); field; field = TREE_CHAIN(field))
77369 ++ DECL_FIELD_CONTEXT(field) = new_type;
77370 ++ TYPE_READONLY(new_type) = 0;
77371 ++ C_TYPE_FIELDS_READONLY(new_type) = 0;
77372 ++ return new_type;
77373 ++}
77374 ++
77375 ++static tree handle_no_const_attribute(tree *node, tree name, tree args, int flags, bool *no_add_attrs)
77376 ++{
77377 ++ tree type;
77378 ++
77379 ++ *no_add_attrs = true;
77380 ++ if (TREE_CODE(*node) == FUNCTION_DECL) {
77381 ++ error("%qE attribute does not apply to functions", name);
77382 ++ return NULL_TREE;
77383 ++ }
77384 ++
77385 ++ if (TREE_CODE(*node) == VAR_DECL) {
77386 ++ error("%qE attribute does not apply to variables", name);
77387 ++ return NULL_TREE;
77388 ++ }
77389 ++
77390 ++ if (TYPE_P(*node)) {
77391 ++ if (TREE_CODE(*node) == RECORD_TYPE || TREE_CODE(*node) == UNION_TYPE)
77392 ++ *no_add_attrs = false;
77393 ++ else
77394 ++ error("%qE attribute applies to struct and union types only", name);
77395 ++ return NULL_TREE;
77396 ++ }
77397 ++
77398 ++ type = TREE_TYPE(*node);
77399 ++
77400 ++ if (TREE_CODE(type) != RECORD_TYPE && TREE_CODE(type) != UNION_TYPE) {
77401 ++ error("%qE attribute applies to struct and union types only", name);
77402 ++ return NULL_TREE;
77403 ++ }
77404 ++
77405 ++ if (lookup_attribute(IDENTIFIER_POINTER(name), TYPE_ATTRIBUTES(type))) {
77406 ++ error("%qE attribute is already applied to the type", name);
77407 ++ return NULL_TREE;
77408 ++ }
77409 ++
77410 ++ if (TREE_CODE(*node) == TYPE_DECL && !TYPE_READONLY(type)) {
77411 ++ error("%qE attribute used on type that is not constified", name);
77412 ++ return NULL_TREE;
77413 ++ }
77414 ++
77415 ++ if (TREE_CODE(*node) == TYPE_DECL) {
77416 ++ TREE_TYPE(*node) = deconstify_type(type);
77417 ++ TREE_READONLY(*node) = 0;
77418 ++ return NULL_TREE;
77419 ++ }
77420 ++
77421 ++ return NULL_TREE;
77422 ++}
77423 ++
77424 ++static tree handle_do_const_attribute(tree *node, tree name, tree args, int flags, bool *no_add_attrs)
77425 ++{
77426 ++ *no_add_attrs = true;
77427 ++ if (!TYPE_P(*node)) {
77428 ++ error("%qE attribute applies to types only", name);
77429 ++ return NULL_TREE;
77430 ++ }
77431 ++
77432 ++ if (TREE_CODE(*node) != RECORD_TYPE && TREE_CODE(*node) != UNION_TYPE) {
77433 ++ error("%qE attribute applies to struct and union types only", name);
77434 ++ return NULL_TREE;
77435 ++ }
77436 ++
77437 ++ *no_add_attrs = false;
77438 ++ constify_type(*node);
77439 ++ return NULL_TREE;
77440 ++}
77441 ++
77442 ++static struct attribute_spec no_const_attr = {
77443 ++ .name = "no_const",
77444 ++ .min_length = 0,
77445 ++ .max_length = 0,
77446 ++ .decl_required = false,
77447 ++ .type_required = false,
77448 ++ .function_type_required = false,
77449 ++ .handler = handle_no_const_attribute,
77450 ++#if __GNUC__ > 4 || __GNUC_MINOR__ >= 7
77451 ++ .affects_type_identity = true
77452 ++#endif
77453 ++};
77454 ++
77455 ++static struct attribute_spec do_const_attr = {
77456 ++ .name = "do_const",
77457 ++ .min_length = 0,
77458 ++ .max_length = 0,
77459 ++ .decl_required = false,
77460 ++ .type_required = false,
77461 ++ .function_type_required = false,
77462 ++ .handler = handle_do_const_attribute,
77463 ++#if __GNUC__ > 4 || __GNUC_MINOR__ >= 7
77464 ++ .affects_type_identity = true
77465 ++#endif
77466 ++};
77467 ++
77468 ++static void register_attributes(void *event_data, void *data)
77469 ++{
77470 ++ register_attribute(&no_const_attr);
77471 ++ register_attribute(&do_const_attr);
77472 ++}
77473 ++
77474 ++static void constify_type(tree type)
77475 ++{
77476 ++ TYPE_READONLY(type) = 1;
77477 ++ C_TYPE_FIELDS_READONLY(type) = 1;
77478 ++}
77479 ++
77480 ++static bool is_fptr(tree field)
77481 ++{
77482 ++ tree ptr = TREE_TYPE(field);
77483 ++
77484 ++ if (TREE_CODE(ptr) != POINTER_TYPE)
77485 ++ return false;
77486 ++
77487 ++ return TREE_CODE(TREE_TYPE(ptr)) == FUNCTION_TYPE;
77488 ++}
77489 ++
77490 ++static bool walk_struct(tree node)
77491 ++{
77492 ++ tree field;
77493 ++
77494 ++ if (lookup_attribute("no_const", TYPE_ATTRIBUTES(node)))
77495 ++ return false;
77496 ++
77497 ++ if (TYPE_FIELDS(node) == NULL_TREE)
77498 ++ return false;
77499 ++
77500 ++ for (field = TYPE_FIELDS(node); field; field = TREE_CHAIN(field)) {
77501 ++ tree type = TREE_TYPE(field);
77502 ++ enum tree_code code = TREE_CODE(type);
77503 ++ if (code == RECORD_TYPE || code == UNION_TYPE) {
77504 ++ if (!(walk_struct(type)))
77505 ++ return false;
77506 ++ } else if (!is_fptr(field) && !TREE_READONLY(field))
77507 ++ return false;
77508 ++ }
77509 ++ return true;
77510 ++}
77511 ++
77512 ++static void finish_type(void *event_data, void *data)
77513 ++{
77514 ++ tree type = (tree)event_data;
77515 ++
77516 ++ if (type == NULL_TREE)
77517 ++ return;
77518 ++
77519 ++ if (TYPE_READONLY(type))
77520 ++ return;
77521 ++
77522 ++ if (walk_struct(type))
77523 ++ constify_type(type);
77524 ++}
77525 ++
77526 ++static unsigned int check_local_variables(void);
77527 ++
77528 ++struct gimple_opt_pass pass_local_variable = {
77529 ++ {
77530 ++ .type = GIMPLE_PASS,
77531 ++ .name = "check_local_variables",
77532 ++ .gate = NULL,
77533 ++ .execute = check_local_variables,
77534 ++ .sub = NULL,
77535 ++ .next = NULL,
77536 ++ .static_pass_number = 0,
77537 ++ .tv_id = TV_NONE,
77538 ++ .properties_required = 0,
77539 ++ .properties_provided = 0,
77540 ++ .properties_destroyed = 0,
77541 ++ .todo_flags_start = 0,
77542 ++ .todo_flags_finish = 0
77543 ++ }
77544 ++};
77545 ++
77546 ++static unsigned int check_local_variables(void)
77547 ++{
77548 ++ tree var;
77549 ++ referenced_var_iterator rvi;
77550 ++
77551 ++#if __GNUC__ == 4 && __GNUC_MINOR__ == 5
77552 ++ FOR_EACH_REFERENCED_VAR(var, rvi) {
77553 ++#else
77554 ++ FOR_EACH_REFERENCED_VAR(cfun, var, rvi) {
77555 ++#endif
77556 ++ tree type = TREE_TYPE(var);
77557 ++
77558 ++ if (!DECL_P(var) || TREE_STATIC(var) || DECL_EXTERNAL(var))
77559 ++ continue;
77560 ++
77561 ++ if (TREE_CODE(type) != RECORD_TYPE && TREE_CODE(type) != UNION_TYPE)
77562 ++ continue;
77563 ++
77564 ++ if (!TYPE_READONLY(type))
77565 ++ continue;
77566 ++
77567 ++// if (lookup_attribute("no_const", DECL_ATTRIBUTES(var)))
77568 ++// continue;
77569 ++
77570 ++// if (lookup_attribute("no_const", TYPE_ATTRIBUTES(type)))
77571 ++// continue;
77572 ++
77573 ++ if (walk_struct(type)) {
77574 ++ error("constified variable %qE cannot be local", var);
77575 ++ return 1;
77576 ++ }
77577 ++ }
77578 ++ return 0;
77579 ++}
77580 ++
77581 ++int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
77582 ++{
77583 ++ const char * const plugin_name = plugin_info->base_name;
77584 ++ const int argc = plugin_info->argc;
77585 ++ const struct plugin_argument * const argv = plugin_info->argv;
77586 ++ int i;
77587 ++ bool constify = true;
77588 ++
77589 ++ struct register_pass_info local_variable_pass_info = {
77590 ++ .pass = &pass_local_variable.pass,
77591 ++ .reference_pass_name = "*referenced_vars",
77592 ++ .ref_pass_instance_number = 0,
77593 ++ .pos_op = PASS_POS_INSERT_AFTER
77594 ++ };
77595 ++
77596 ++ if (!plugin_default_version_check(version, &gcc_version)) {
77597 ++ error(G_("incompatible gcc/plugin versions"));
77598 ++ return 1;
77599 ++ }
77600 ++
77601 ++ for (i = 0; i < argc; ++i) {
77602 ++ if (!(strcmp(argv[i].key, "no-constify"))) {
77603 ++ constify = false;
77604 ++ continue;
77605 ++ }
77606 ++ error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
77607 ++ }
77608 ++
77609 ++ register_callback(plugin_name, PLUGIN_INFO, NULL, &const_plugin_info);
77610 ++ if (constify) {
77611 ++ register_callback(plugin_name, PLUGIN_FINISH_TYPE, finish_type, NULL);
77612 ++ register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &local_variable_pass_info);
77613 ++ }
77614 ++ register_callback(plugin_name, PLUGIN_ATTRIBUTES, register_attributes, NULL);
77615 ++
77616 ++ return 0;
77617 ++}
77618 +diff -urNp linux-3.1.1/tools/gcc/kallocstat_plugin.c linux-3.1.1/tools/gcc/kallocstat_plugin.c
77619 +--- linux-3.1.1/tools/gcc/kallocstat_plugin.c 1969-12-31 19:00:00.000000000 -0500
77620 ++++ linux-3.1.1/tools/gcc/kallocstat_plugin.c 2011-11-16 18:39:08.000000000 -0500
77621 +@@ -0,0 +1,167 @@
77622 ++/*
77623 ++ * Copyright 2011 by the PaX Team <pageexec@××××××××.hu>
77624 ++ * Licensed under the GPL v2
77625 ++ *
77626 ++ * Note: the choice of the license means that the compilation process is
77627 ++ * NOT 'eligible' as defined by gcc's library exception to the GPL v3,
77628 ++ * but for the kernel it doesn't matter since it doesn't link against
77629 ++ * any of the gcc libraries
77630 ++ *
77631 ++ * gcc plugin to find the distribution of k*alloc sizes
77632 ++ *
77633 ++ * TODO:
77634 ++ *
77635 ++ * BUGS:
77636 ++ * - none known
77637 ++ */
77638 ++#include "gcc-plugin.h"
77639 ++#include "config.h"
77640 ++#include "system.h"
77641 ++#include "coretypes.h"
77642 ++#include "tree.h"
77643 ++#include "tree-pass.h"
77644 ++#include "flags.h"
77645 ++#include "intl.h"
77646 ++#include "toplev.h"
77647 ++#include "plugin.h"
77648 ++//#include "expr.h" where are you...
77649 ++#include "diagnostic.h"
77650 ++#include "plugin-version.h"
77651 ++#include "tm.h"
77652 ++#include "function.h"
77653 ++#include "basic-block.h"
77654 ++#include "gimple.h"
77655 ++#include "rtl.h"
77656 ++#include "emit-rtl.h"
77657 ++
77658 ++extern void print_gimple_stmt(FILE *, gimple, int, int);
77659 ++
77660 ++int plugin_is_GPL_compatible;
77661 ++
77662 ++static const char * const kalloc_functions[] = {
77663 ++ "__kmalloc",
77664 ++ "kmalloc",
77665 ++ "kmalloc_large",
77666 ++ "kmalloc_node",
77667 ++ "kmalloc_order",
77668 ++ "kmalloc_order_trace",
77669 ++ "kmalloc_slab",
77670 ++ "kzalloc",
77671 ++ "kzalloc_node",
77672 ++};
77673 ++
77674 ++static struct plugin_info kallocstat_plugin_info = {
77675 ++ .version = "201111150100",
77676 ++};
77677 ++
77678 ++static unsigned int execute_kallocstat(void);
77679 ++
77680 ++static struct gimple_opt_pass kallocstat_pass = {
77681 ++ .pass = {
77682 ++ .type = GIMPLE_PASS,
77683 ++ .name = "kallocstat",
77684 ++ .gate = NULL,
77685 ++ .execute = execute_kallocstat,
77686 ++ .sub = NULL,
77687 ++ .next = NULL,
77688 ++ .static_pass_number = 0,
77689 ++ .tv_id = TV_NONE,
77690 ++ .properties_required = 0,
77691 ++ .properties_provided = 0,
77692 ++ .properties_destroyed = 0,
77693 ++ .todo_flags_start = 0,
77694 ++ .todo_flags_finish = 0
77695 ++ }
77696 ++};
77697 ++
77698 ++static bool is_kalloc(const char *fnname)
77699 ++{
77700 ++ size_t i;
77701 ++
77702 ++ for (i = 0; i < ARRAY_SIZE(kalloc_functions); i++)
77703 ++ if (!strcmp(fnname, kalloc_functions[i]))
77704 ++ return true;
77705 ++ return false;
77706 ++}
77707 ++
77708 ++static unsigned int execute_kallocstat(void)
77709 ++{
77710 ++ basic_block bb;
77711 ++
77712 ++ // 1. loop through BBs and GIMPLE statements
77713 ++ FOR_EACH_BB(bb) {
77714 ++ gimple_stmt_iterator gsi;
77715 ++ for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
77716 ++ // gimple match:
77717 ++ tree fndecl, size;
77718 ++ gimple call_stmt;
77719 ++ const char *fnname;
77720 ++
77721 ++ // is it a call
77722 ++ call_stmt = gsi_stmt(gsi);
77723 ++ if (!is_gimple_call(call_stmt))
77724 ++ continue;
77725 ++ fndecl = gimple_call_fndecl(call_stmt);
77726 ++ if (fndecl == NULL_TREE)
77727 ++ continue;
77728 ++ if (TREE_CODE(fndecl) != FUNCTION_DECL)
77729 ++ continue;
77730 ++
77731 ++ // is it a call to k*alloc
77732 ++ fnname = IDENTIFIER_POINTER(DECL_NAME(fndecl));
77733 ++ if (!is_kalloc(fnname))
77734 ++ continue;
77735 ++
77736 ++ // is the size arg the result of a simple const assignment
77737 ++ size = gimple_call_arg(call_stmt, 0);
77738 ++ while (true) {
77739 ++ gimple def_stmt;
77740 ++ expanded_location xloc;
77741 ++ size_t size_val;
77742 ++
77743 ++ if (TREE_CODE(size) != SSA_NAME)
77744 ++ break;
77745 ++ def_stmt = SSA_NAME_DEF_STMT(size);
77746 ++ if (!def_stmt || !is_gimple_assign(def_stmt))
77747 ++ break;
77748 ++ if (gimple_num_ops(def_stmt) != 2)
77749 ++ break;
77750 ++ size = gimple_assign_rhs1(def_stmt);
77751 ++ if (!TREE_CONSTANT(size))
77752 ++ continue;
77753 ++ xloc = expand_location(gimple_location(def_stmt));
77754 ++ if (!xloc.file)
77755 ++ xloc = expand_location(DECL_SOURCE_LOCATION(current_function_decl));
77756 ++ size_val = TREE_INT_CST_LOW(size);
77757 ++ fprintf(stderr, "kallocsize: %8zu %8zx %s %s:%u\n", size_val, size_val, fnname, xloc.file, xloc.line);
77758 ++ break;
77759 ++ }
77760 ++//print_gimple_stmt(stderr, call_stmt, 0, TDF_LINENO);
77761 ++//debug_tree(gimple_call_fn(call_stmt));
77762 ++//print_node(stderr, "pax", fndecl, 4);
77763 ++ }
77764 ++ }
77765 ++
77766 ++ return 0;
77767 ++}
77768 ++
77769 ++int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
77770 ++{
77771 ++ const char * const plugin_name = plugin_info->base_name;
77772 ++ struct register_pass_info kallocstat_pass_info = {
77773 ++ .pass = &kallocstat_pass.pass,
77774 ++ .reference_pass_name = "ssa",
77775 ++ .ref_pass_instance_number = 0,
77776 ++ .pos_op = PASS_POS_INSERT_AFTER
77777 ++ };
77778 ++
77779 ++ if (!plugin_default_version_check(version, &gcc_version)) {
77780 ++ error(G_("incompatible gcc/plugin versions"));
77781 ++ return 1;
77782 ++ }
77783 ++
77784 ++ register_callback(plugin_name, PLUGIN_INFO, NULL, &kallocstat_plugin_info);
77785 ++ register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &kallocstat_pass_info);
77786 ++
77787 ++ return 0;
77788 ++}
77789 +diff -urNp linux-3.1.1/tools/gcc/kernexec_plugin.c linux-3.1.1/tools/gcc/kernexec_plugin.c
77790 +--- linux-3.1.1/tools/gcc/kernexec_plugin.c 1969-12-31 19:00:00.000000000 -0500
77791 ++++ linux-3.1.1/tools/gcc/kernexec_plugin.c 2011-11-18 17:57:07.000000000 -0500
77792 +@@ -0,0 +1,275 @@
77793 ++/*
77794 ++ * Copyright 2011 by the PaX Team <pageexec@××××××××.hu>
77795 ++ * Licensed under the GPL v2
77796 ++ *
77797 ++ * Note: the choice of the license means that the compilation process is
77798 ++ * NOT 'eligible' as defined by gcc's library exception to the GPL v3,
77799 ++ * but for the kernel it doesn't matter since it doesn't link against
77800 ++ * any of the gcc libraries
77801 ++ *
77802 ++ * gcc plugin to make KERNEXEC/amd64 almost as good as it is on i386
77803 ++ *
77804 ++ * TODO:
77805 ++ *
77806 ++ * BUGS:
77807 ++ * - none known
77808 ++ */
77809 ++#include "gcc-plugin.h"
77810 ++#include "config.h"
77811 ++#include "system.h"
77812 ++#include "coretypes.h"
77813 ++#include "tree.h"
77814 ++#include "tree-pass.h"
77815 ++#include "flags.h"
77816 ++#include "intl.h"
77817 ++#include "toplev.h"
77818 ++#include "plugin.h"
77819 ++//#include "expr.h" where are you...
77820 ++#include "diagnostic.h"
77821 ++#include "plugin-version.h"
77822 ++#include "tm.h"
77823 ++#include "function.h"
77824 ++#include "basic-block.h"
77825 ++#include "gimple.h"
77826 ++#include "rtl.h"
77827 ++#include "emit-rtl.h"
77828 ++#include "tree-flow.h"
77829 ++
77830 ++extern void print_gimple_stmt(FILE *, gimple, int, int);
77831 ++extern rtx emit_move_insn(rtx x, rtx y);
77832 ++
77833 ++int plugin_is_GPL_compatible;
77834 ++
77835 ++static struct plugin_info kernexec_plugin_info = {
77836 ++ .version = "201111150100",
77837 ++};
77838 ++
77839 ++static unsigned int execute_kernexec_fptr(void);
77840 ++static unsigned int execute_kernexec_retaddr(void);
77841 ++static bool kernexec_cmodel_check(void);
77842 ++
77843 ++static struct gimple_opt_pass kernexec_fptr_pass = {
77844 ++ .pass = {
77845 ++ .type = GIMPLE_PASS,
77846 ++ .name = "kernexec_fptr",
77847 ++ .gate = kernexec_cmodel_check,
77848 ++ .execute = execute_kernexec_fptr,
77849 ++ .sub = NULL,
77850 ++ .next = NULL,
77851 ++ .static_pass_number = 0,
77852 ++ .tv_id = TV_NONE,
77853 ++ .properties_required = 0,
77854 ++ .properties_provided = 0,
77855 ++ .properties_destroyed = 0,
77856 ++ .todo_flags_start = 0,
77857 ++ .todo_flags_finish = TODO_verify_ssa | TODO_verify_stmts | TODO_dump_func | TODO_remove_unused_locals | TODO_update_ssa_no_phi
77858 ++ }
77859 ++};
77860 ++
77861 ++static struct rtl_opt_pass kernexec_retaddr_pass = {
77862 ++ .pass = {
77863 ++ .type = RTL_PASS,
77864 ++ .name = "kernexec_retaddr",
77865 ++ .gate = kernexec_cmodel_check,
77866 ++ .execute = execute_kernexec_retaddr,
77867 ++ .sub = NULL,
77868 ++ .next = NULL,
77869 ++ .static_pass_number = 0,
77870 ++ .tv_id = TV_NONE,
77871 ++ .properties_required = 0,
77872 ++ .properties_provided = 0,
77873 ++ .properties_destroyed = 0,
77874 ++ .todo_flags_start = 0,
77875 ++ .todo_flags_finish = TODO_dump_func | TODO_ggc_collect
77876 ++ }
77877 ++};
77878 ++
77879 ++static bool kernexec_cmodel_check(void)
77880 ++{
77881 ++ tree section;
77882 ++
77883 ++ if (ix86_cmodel != CM_KERNEL)
77884 ++ return false;
77885 ++
77886 ++ section = lookup_attribute("section", DECL_ATTRIBUTES(current_function_decl));
77887 ++ if (!section || !TREE_VALUE(section))
77888 ++ return true;
77889 ++
77890 ++ section = TREE_VALUE(TREE_VALUE(section));
77891 ++ if (strncmp(TREE_STRING_POINTER(section), ".vsyscall_", 10))
77892 ++ return true;
77893 ++
77894 ++ return false;
77895 ++}
77896 ++
77897 ++/*
77898 ++ * add special KERNEXEC instrumentation: force MSB of fptr to 1, which will produce
77899 ++ * a non-canonical address from a userland ptr and will just trigger a GPF on dereference
77900 ++ */
77901 ++static void kernexec_instrument_fptr(gimple_stmt_iterator gsi)
77902 ++{
77903 ++ gimple assign_intptr, assign_new_fptr, call_stmt;
77904 ++ tree intptr, old_fptr, new_fptr, kernexec_mask;
77905 ++
77906 ++ call_stmt = gsi_stmt(gsi);
77907 ++ old_fptr = gimple_call_fn(call_stmt);
77908 ++
77909 ++ // create temporary unsigned long variable used for bitops and cast fptr to it
77910 ++ intptr = create_tmp_var(long_unsigned_type_node, NULL);
77911 ++ add_referenced_var(intptr);
77912 ++ mark_sym_for_renaming(intptr);
77913 ++ assign_intptr = gimple_build_assign(intptr, fold_convert(long_unsigned_type_node, old_fptr));
77914 ++ update_stmt(assign_intptr);
77915 ++ gsi_insert_before(&gsi, assign_intptr, GSI_SAME_STMT);
77916 ++
77917 ++ // apply logical or to temporary unsigned long and bitmask
77918 ++ kernexec_mask = build_int_cstu(long_long_unsigned_type_node, 0x8000000000000000LL);
77919 ++// kernexec_mask = build_int_cstu(long_long_unsigned_type_node, 0xffffffff80000000LL);
77920 ++ assign_intptr = gimple_build_assign(intptr, fold_build2(BIT_IOR_EXPR, long_long_unsigned_type_node, intptr, kernexec_mask));
77921 ++ update_stmt(assign_intptr);
77922 ++ gsi_insert_before(&gsi, assign_intptr, GSI_SAME_STMT);
77923 ++
77924 ++ // cast temporary unsigned long back to a temporary fptr variable
77925 ++ new_fptr = create_tmp_var(TREE_TYPE(old_fptr), NULL);
77926 ++ add_referenced_var(new_fptr);
77927 ++ mark_sym_for_renaming(new_fptr);
77928 ++ assign_new_fptr = gimple_build_assign(new_fptr, fold_convert(TREE_TYPE(old_fptr), intptr));
77929 ++ update_stmt(assign_new_fptr);
77930 ++ gsi_insert_before(&gsi, assign_new_fptr, GSI_SAME_STMT);
77931 ++
77932 ++ // replace call stmt fn with the new fptr
77933 ++ gimple_call_set_fn(call_stmt, new_fptr);
77934 ++ update_stmt(call_stmt);
77935 ++}
77936 ++
77937 ++/*
77938 ++ * find all C level function pointer dereferences and forcibly set the highest bit of the pointer
77939 ++ */
77940 ++static unsigned int execute_kernexec_fptr(void)
77941 ++{
77942 ++ basic_block bb;
77943 ++ gimple_stmt_iterator gsi;
77944 ++
77945 ++ // 1. loop through BBs and GIMPLE statements
77946 ++ FOR_EACH_BB(bb) {
77947 ++ for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
77948 ++ // gimple match: h_1 = get_fptr (); D.2709_3 = h_1 (x_2(D));
77949 ++ tree fn;
77950 ++ gimple call_stmt;
77951 ++
77952 ++ // is it a call ...
77953 ++ call_stmt = gsi_stmt(gsi);
77954 ++ if (!is_gimple_call(call_stmt))
77955 ++ continue;
77956 ++ fn = gimple_call_fn(call_stmt);
77957 ++ if (TREE_CODE(fn) == ADDR_EXPR)
77958 ++ continue;
77959 ++ if (TREE_CODE(fn) != SSA_NAME)
77960 ++ gcc_unreachable();
77961 ++
77962 ++ // ... through a function pointer
77963 ++ fn = SSA_NAME_VAR(fn);
77964 ++ if (TREE_CODE(fn) != VAR_DECL && TREE_CODE(fn) != PARM_DECL)
77965 ++ continue;
77966 ++ fn = TREE_TYPE(fn);
77967 ++ if (TREE_CODE(fn) != POINTER_TYPE)
77968 ++ continue;
77969 ++ fn = TREE_TYPE(fn);
77970 ++ if (TREE_CODE(fn) != FUNCTION_TYPE)
77971 ++ continue;
77972 ++
77973 ++ kernexec_instrument_fptr(gsi);
77974 ++
77975 ++//debug_tree(gimple_call_fn(call_stmt));
77976 ++//print_gimple_stmt(stderr, call_stmt, 0, TDF_LINENO);
77977 ++ }
77978 ++ }
77979 ++
77980 ++ return 0;
77981 ++}
77982 ++
77983 ++// add special KERNEXEC instrumentation: btsq $63,(%rsp) just before retn
77984 ++static void kernexec_instrument_retaddr(rtx insn)
77985 ++{
77986 ++ rtx btsq;
77987 ++ rtvec argvec, constraintvec, labelvec;
77988 ++ int line;
77989 ++
77990 ++ // create asm volatile("btsq $63,(%%rsp)":::)
77991 ++ argvec = rtvec_alloc(0);
77992 ++ constraintvec = rtvec_alloc(0);
77993 ++ labelvec = rtvec_alloc(0);
77994 ++ line = expand_location(RTL_LOCATION(insn)).line;
77995 ++ btsq = gen_rtx_ASM_OPERANDS(VOIDmode, "btsq $63,(%%rsp)", empty_string, 0, argvec, constraintvec, labelvec, line);
77996 ++ MEM_VOLATILE_P(btsq) = 1;
77997 ++// RTX_FRAME_RELATED_P(btsq) = 1; // not for ASM_OPERANDS
77998 ++ emit_insn_before(btsq, insn);
77999 ++}
78000 ++
78001 ++/*
78002 ++ * find all asm level function returns and forcibly set the highest bit of the return address
78003 ++ */
78004 ++static unsigned int execute_kernexec_retaddr(void)
78005 ++{
78006 ++ rtx insn;
78007 ++
78008 ++ // 1. find function returns
78009 ++ for (insn = get_insns(); insn; insn = NEXT_INSN(insn)) {
78010 ++ // rtl match: (jump_insn 41 40 42 2 (return) fptr.c:42 634 {return_internal} (nil))
78011 ++ // (jump_insn 12 9 11 2 (parallel [ (return) (unspec [ (0) ] UNSPEC_REP) ]) fptr.c:46 635 {return_internal_long} (nil))
78012 ++ rtx body;
78013 ++
78014 ++ // is it a retn
78015 ++ if (!JUMP_P(insn))
78016 ++ continue;
78017 ++ body = PATTERN(insn);
78018 ++ if (GET_CODE(body) == PARALLEL)
78019 ++ body = XVECEXP(body, 0, 0);
78020 ++ if (GET_CODE(body) != RETURN)
78021 ++ continue;
78022 ++ kernexec_instrument_retaddr(insn);
78023 ++ }
78024 ++
78025 ++// print_simple_rtl(stderr, get_insns());
78026 ++// print_rtl(stderr, get_insns());
78027 ++
78028 ++ return 0;
78029 ++}
78030 ++
78031 ++int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
78032 ++{
78033 ++ const char * const plugin_name = plugin_info->base_name;
78034 ++ const int argc = plugin_info->argc;
78035 ++ const struct plugin_argument * const argv = plugin_info->argv;
78036 ++ int i;
78037 ++ struct register_pass_info kernexec_fptr_pass_info = {
78038 ++ .pass = &kernexec_fptr_pass.pass,
78039 ++ .reference_pass_name = "ssa",
78040 ++ .ref_pass_instance_number = 0,
78041 ++ .pos_op = PASS_POS_INSERT_AFTER
78042 ++ };
78043 ++ struct register_pass_info kernexec_retaddr_pass_info = {
78044 ++ .pass = &kernexec_retaddr_pass.pass,
78045 ++ .reference_pass_name = "pro_and_epilogue",
78046 ++ .ref_pass_instance_number = 0,
78047 ++ .pos_op = PASS_POS_INSERT_AFTER
78048 ++ };
78049 ++
78050 ++ if (!plugin_default_version_check(version, &gcc_version)) {
78051 ++ error(G_("incompatible gcc/plugin versions"));
78052 ++ return 1;
78053 ++ }
78054 ++
78055 ++ register_callback(plugin_name, PLUGIN_INFO, NULL, &kernexec_plugin_info);
78056 ++
78057 ++ for (i = 0; i < argc; ++i)
78058 ++ error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
78059 ++
78060 ++ if (TARGET_64BIT == 0)
78061 ++ return 0;
78062 ++
78063 ++ register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &kernexec_fptr_pass_info);
78064 ++ register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &kernexec_retaddr_pass_info);
78065 ++
78066 ++ return 0;
78067 ++}
78068 +diff -urNp linux-3.1.1/tools/gcc/Makefile linux-3.1.1/tools/gcc/Makefile
78069 +--- linux-3.1.1/tools/gcc/Makefile 1969-12-31 19:00:00.000000000 -0500
78070 ++++ linux-3.1.1/tools/gcc/Makefile 2011-11-16 20:37:08.000000000 -0500
78071 +@@ -0,0 +1,21 @@
78072 ++#CC := gcc
78073 ++#PLUGIN_SOURCE_FILES := pax_plugin.c
78074 ++#PLUGIN_OBJECT_FILES := $(patsubst %.c,%.o,$(PLUGIN_SOURCE_FILES))
78075 ++GCCPLUGINS_DIR := $(shell $(CC) -print-file-name=plugin)
78076 ++#CFLAGS += -I$(GCCPLUGINS_DIR)/include -fPIC -O2 -Wall -W
78077 ++
78078 ++HOST_EXTRACFLAGS += -I$(GCCPLUGINS_DIR)/include
78079 ++
78080 ++hostlibs-y := constify_plugin.so
78081 ++hostlibs-$(CONFIG_PAX_MEMORY_STACKLEAK) += stackleak_plugin.so
78082 ++hostlibs-$(CONFIG_KALLOCSTAT_PLUGIN) += kallocstat_plugin.so
78083 ++hostlibs-$(CONFIG_PAX_KERNEXEC_PLUGIN) += kernexec_plugin.so
78084 ++hostlibs-$(CONFIG_CHECKER_PLUGIN) += checker_plugin.so
78085 ++
78086 ++always := $(hostlibs-y)
78087 ++
78088 ++constify_plugin-objs := constify_plugin.o
78089 ++stackleak_plugin-objs := stackleak_plugin.o
78090 ++kallocstat_plugin-objs := kallocstat_plugin.o
78091 ++kernexec_plugin-objs := kernexec_plugin.o
78092 ++checker_plugin-objs := checker_plugin.o
78093 +diff -urNp linux-3.1.1/tools/gcc/stackleak_plugin.c linux-3.1.1/tools/gcc/stackleak_plugin.c
78094 +--- linux-3.1.1/tools/gcc/stackleak_plugin.c 1969-12-31 19:00:00.000000000 -0500
78095 ++++ linux-3.1.1/tools/gcc/stackleak_plugin.c 2011-11-16 18:39:08.000000000 -0500
78096 +@@ -0,0 +1,291 @@
78097 ++/*
78098 ++ * Copyright 2011 by the PaX Team <pageexec@××××××××.hu>
78099 ++ * Licensed under the GPL v2
78100 ++ *
78101 ++ * Note: the choice of the license means that the compilation process is
78102 ++ * NOT 'eligible' as defined by gcc's library exception to the GPL v3,
78103 ++ * but for the kernel it doesn't matter since it doesn't link against
78104 ++ * any of the gcc libraries
78105 ++ *
78106 ++ * gcc plugin to help implement various PaX features
78107 ++ *
78108 ++ * - track lowest stack pointer
78109 ++ *
78110 ++ * TODO:
78111 ++ * - initialize all local variables
78112 ++ *
78113 ++ * BUGS:
78114 ++ * - none known
78115 ++ */
78116 ++#include "gcc-plugin.h"
78117 ++#include "config.h"
78118 ++#include "system.h"
78119 ++#include "coretypes.h"
78120 ++#include "tree.h"
78121 ++#include "tree-pass.h"
78122 ++#include "flags.h"
78123 ++#include "intl.h"
78124 ++#include "toplev.h"
78125 ++#include "plugin.h"
78126 ++//#include "expr.h" where are you...
78127 ++#include "diagnostic.h"
78128 ++#include "plugin-version.h"
78129 ++#include "tm.h"
78130 ++#include "function.h"
78131 ++#include "basic-block.h"
78132 ++#include "gimple.h"
78133 ++#include "rtl.h"
78134 ++#include "emit-rtl.h"
78135 ++
78136 ++extern void print_gimple_stmt(FILE *, gimple, int, int);
78137 ++
78138 ++int plugin_is_GPL_compatible;
78139 ++
78140 ++static int track_frame_size = -1;
78141 ++static const char track_function[] = "pax_track_stack";
78142 ++static const char check_function[] = "pax_check_alloca";
78143 ++static bool init_locals;
78144 ++
78145 ++static struct plugin_info stackleak_plugin_info = {
78146 ++ .version = "201111150100",
78147 ++ .help = "track-lowest-sp=nn\ttrack sp in functions whose frame size is at least nn bytes\n"
78148 ++// "initialize-locals\t\tforcibly initialize all stack frames\n"
78149 ++};
78150 ++
78151 ++static bool gate_stackleak_track_stack(void);
78152 ++static unsigned int execute_stackleak_tree_instrument(void);
78153 ++static unsigned int execute_stackleak_final(void);
78154 ++
78155 ++static struct gimple_opt_pass stackleak_tree_instrument_pass = {
78156 ++ .pass = {
78157 ++ .type = GIMPLE_PASS,
78158 ++ .name = "stackleak_tree_instrument",
78159 ++ .gate = gate_stackleak_track_stack,
78160 ++ .execute = execute_stackleak_tree_instrument,
78161 ++ .sub = NULL,
78162 ++ .next = NULL,
78163 ++ .static_pass_number = 0,
78164 ++ .tv_id = TV_NONE,
78165 ++ .properties_required = PROP_gimple_leh | PROP_cfg,
78166 ++ .properties_provided = 0,
78167 ++ .properties_destroyed = 0,
78168 ++ .todo_flags_start = 0, //TODO_verify_ssa | TODO_verify_flow | TODO_verify_stmts,
78169 ++ .todo_flags_finish = TODO_verify_ssa | TODO_verify_stmts | TODO_dump_func | TODO_update_ssa
78170 ++ }
78171 ++};
78172 ++
78173 ++static struct rtl_opt_pass stackleak_final_rtl_opt_pass = {
78174 ++ .pass = {
78175 ++ .type = RTL_PASS,
78176 ++ .name = "stackleak_final",
78177 ++ .gate = gate_stackleak_track_stack,
78178 ++ .execute = execute_stackleak_final,
78179 ++ .sub = NULL,
78180 ++ .next = NULL,
78181 ++ .static_pass_number = 0,
78182 ++ .tv_id = TV_NONE,
78183 ++ .properties_required = 0,
78184 ++ .properties_provided = 0,
78185 ++ .properties_destroyed = 0,
78186 ++ .todo_flags_start = 0,
78187 ++ .todo_flags_finish = TODO_dump_func
78188 ++ }
78189 ++};
78190 ++
78191 ++static bool gate_stackleak_track_stack(void)
78192 ++{
78193 ++ return track_frame_size >= 0;
78194 ++}
78195 ++
78196 ++static void stackleak_check_alloca(gimple_stmt_iterator gsi)
78197 ++{
78198 ++ gimple check_alloca;
78199 ++ tree fndecl, fntype, alloca_size;
78200 ++
78201 ++ // insert call to void pax_check_alloca(unsigned long size)
78202 ++ fntype = build_function_type_list(void_type_node, long_unsigned_type_node, NULL_TREE);
78203 ++ fndecl = build_fn_decl(check_function, fntype);
78204 ++ DECL_ASSEMBLER_NAME(fndecl); // for LTO
78205 ++ alloca_size = gimple_call_arg(gsi_stmt(gsi), 0);
78206 ++ check_alloca = gimple_build_call(fndecl, 1, alloca_size);
78207 ++ gsi_insert_before(&gsi, check_alloca, GSI_CONTINUE_LINKING);
78208 ++}
78209 ++
78210 ++static void stackleak_add_instrumentation(gimple_stmt_iterator gsi)
78211 ++{
78212 ++ gimple track_stack;
78213 ++ tree fndecl, fntype;
78214 ++
78215 ++ // insert call to void pax_track_stack(void)
78216 ++ fntype = build_function_type_list(void_type_node, NULL_TREE);
78217 ++ fndecl = build_fn_decl(track_function, fntype);
78218 ++ DECL_ASSEMBLER_NAME(fndecl); // for LTO
78219 ++ track_stack = gimple_build_call(fndecl, 0);
78220 ++ gsi_insert_after(&gsi, track_stack, GSI_CONTINUE_LINKING);
78221 ++}
78222 ++
78223 ++#if __GNUC__ == 4 && __GNUC_MINOR__ == 5
78224 ++static bool gimple_call_builtin_p(gimple stmt, enum built_in_function code)
78225 ++{
78226 ++ tree fndecl;
78227 ++
78228 ++ if (!is_gimple_call(stmt))
78229 ++ return false;
78230 ++ fndecl = gimple_call_fndecl(stmt);
78231 ++ if (!fndecl)
78232 ++ return false;
78233 ++ if (DECL_BUILT_IN_CLASS(fndecl) != BUILT_IN_NORMAL)
78234 ++ return false;
78235 ++// print_node(stderr, "pax", fndecl, 4);
78236 ++ return DECL_FUNCTION_CODE(fndecl) == code;
78237 ++}
78238 ++#endif
78239 ++
78240 ++static bool is_alloca(gimple stmt)
78241 ++{
78242 ++ if (gimple_call_builtin_p(stmt, BUILT_IN_ALLOCA))
78243 ++ return true;
78244 ++
78245 ++#if __GNUC__ > 4 || __GNUC_MINOR__ >= 7
78246 ++ if (gimple_call_builtin_p(stmt, BUILT_IN_ALLOCA_WITH_ALIGN))
78247 ++ return true;
78248 ++#endif
78249 ++
78250 ++ return false;
78251 ++}
78252 ++
78253 ++static unsigned int execute_stackleak_tree_instrument(void)
78254 ++{
78255 ++ basic_block bb, entry_bb;
78256 ++ bool prologue_instrumented = false;
78257 ++
78258 ++ entry_bb = ENTRY_BLOCK_PTR_FOR_FUNCTION(cfun)->next_bb;
78259 ++
78260 ++ // 1. loop through BBs and GIMPLE statements
78261 ++ FOR_EACH_BB(bb) {
78262 ++ gimple_stmt_iterator gsi;
78263 ++ for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
78264 ++ // gimple match: align 8 built-in BUILT_IN_NORMAL:BUILT_IN_ALLOCA attributes <tree_list 0xb7576450>
78265 ++ if (!is_alloca(gsi_stmt(gsi)))
78266 ++ continue;
78267 ++
78268 ++ // 2. insert stack overflow check before each __builtin_alloca call
78269 ++ stackleak_check_alloca(gsi);
78270 ++
78271 ++ // 3. insert track call after each __builtin_alloca call
78272 ++ stackleak_add_instrumentation(gsi);
78273 ++ if (bb == entry_bb)
78274 ++ prologue_instrumented = true;
78275 ++ }
78276 ++ }
78277 ++
78278 ++ // 4. insert track call at the beginning
78279 ++ if (!prologue_instrumented) {
78280 ++ bb = split_block_after_labels(ENTRY_BLOCK_PTR)->dest;
78281 ++ if (dom_info_available_p(CDI_DOMINATORS))
78282 ++ set_immediate_dominator(CDI_DOMINATORS, bb, ENTRY_BLOCK_PTR);
78283 ++ stackleak_add_instrumentation(gsi_start_bb(bb));
78284 ++ }
78285 ++
78286 ++ return 0;
78287 ++}
78288 ++
78289 ++static unsigned int execute_stackleak_final(void)
78290 ++{
78291 ++ rtx insn;
78292 ++
78293 ++ if (cfun->calls_alloca)
78294 ++ return 0;
78295 ++
78296 ++ // keep calls only if function frame is big enough
78297 ++ if (get_frame_size() >= track_frame_size)
78298 ++ return 0;
78299 ++
78300 ++ // 1. find pax_track_stack calls
78301 ++ for (insn = get_insns(); insn; insn = NEXT_INSN(insn)) {
78302 ++ // rtl match: (call_insn 8 7 9 3 (call (mem (symbol_ref ("pax_track_stack") [flags 0x41] <function_decl 0xb7470e80 pax_track_stack>) [0 S1 A8]) (4)) -1 (nil) (nil))
78303 ++ rtx body;
78304 ++
78305 ++ if (!CALL_P(insn))
78306 ++ continue;
78307 ++ body = PATTERN(insn);
78308 ++ if (GET_CODE(body) != CALL)
78309 ++ continue;
78310 ++ body = XEXP(body, 0);
78311 ++ if (GET_CODE(body) != MEM)
78312 ++ continue;
78313 ++ body = XEXP(body, 0);
78314 ++ if (GET_CODE(body) != SYMBOL_REF)
78315 ++ continue;
78316 ++ if (strcmp(XSTR(body, 0), track_function))
78317 ++ continue;
78318 ++// warning(0, "track_frame_size: %d %ld %d", cfun->calls_alloca, get_frame_size(), track_frame_size);
78319 ++ // 2. delete call
78320 ++ insn = delete_insn_and_edges(insn);
78321 ++#if __GNUC__ > 4 || __GNUC_MINOR__ >= 7
78322 ++ if (GET_CODE(insn) == NOTE && NOTE_KIND(insn) == NOTE_INSN_CALL_ARG_LOCATION)
78323 ++ insn = delete_insn_and_edges(insn);
78324 ++#endif
78325 ++ }
78326 ++
78327 ++// print_simple_rtl(stderr, get_insns());
78328 ++// print_rtl(stderr, get_insns());
78329 ++// warning(0, "track_frame_size: %d %ld %d", cfun->calls_alloca, get_frame_size(), track_frame_size);
78330 ++
78331 ++ return 0;
78332 ++}
78333 ++
78334 ++int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
78335 ++{
78336 ++ const char * const plugin_name = plugin_info->base_name;
78337 ++ const int argc = plugin_info->argc;
78338 ++ const struct plugin_argument * const argv = plugin_info->argv;
78339 ++ int i;
78340 ++ struct register_pass_info stackleak_tree_instrument_pass_info = {
78341 ++ .pass = &stackleak_tree_instrument_pass.pass,
78342 ++// .reference_pass_name = "tree_profile",
78343 ++ .reference_pass_name = "optimized",
78344 ++ .ref_pass_instance_number = 0,
78345 ++ .pos_op = PASS_POS_INSERT_AFTER
78346 ++ };
78347 ++ struct register_pass_info stackleak_final_pass_info = {
78348 ++ .pass = &stackleak_final_rtl_opt_pass.pass,
78349 ++ .reference_pass_name = "final",
78350 ++ .ref_pass_instance_number = 0,
78351 ++ .pos_op = PASS_POS_INSERT_BEFORE
78352 ++ };
78353 ++
78354 ++ if (!plugin_default_version_check(version, &gcc_version)) {
78355 ++ error(G_("incompatible gcc/plugin versions"));
78356 ++ return 1;
78357 ++ }
78358 ++
78359 ++ register_callback(plugin_name, PLUGIN_INFO, NULL, &stackleak_plugin_info);
78360 ++
78361 ++ for (i = 0; i < argc; ++i) {
78362 ++ if (!strcmp(argv[i].key, "track-lowest-sp")) {
78363 ++ if (!argv[i].value) {
78364 ++ error(G_("no value supplied for option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
78365 ++ continue;
78366 ++ }
78367 ++ track_frame_size = atoi(argv[i].value);
78368 ++ if (argv[i].value[0] < '0' || argv[i].value[0] > '9' || track_frame_size < 0)
78369 ++ error(G_("invalid option argument '-fplugin-arg-%s-%s=%s'"), plugin_name, argv[i].key, argv[i].value);
78370 ++ continue;
78371 ++ }
78372 ++ if (!strcmp(argv[i].key, "initialize-locals")) {
78373 ++ if (argv[i].value) {
78374 ++ error(G_("invalid option argument '-fplugin-arg-%s-%s=%s'"), plugin_name, argv[i].key, argv[i].value);
78375 ++ continue;
78376 ++ }
78377 ++ init_locals = true;
78378 ++ continue;
78379 ++ }
78380 ++ error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
78381 ++ }
78382 ++
78383 ++ register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &stackleak_tree_instrument_pass_info);
78384 ++ register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &stackleak_final_pass_info);
78385 ++
78386 ++ return 0;
78387 ++}
78388 +diff -urNp linux-3.1.1/tools/perf/util/include/asm/alternative-asm.h linux-3.1.1/tools/perf/util/include/asm/alternative-asm.h
78389 +--- linux-3.1.1/tools/perf/util/include/asm/alternative-asm.h 2011-11-11 15:19:27.000000000 -0500
78390 ++++ linux-3.1.1/tools/perf/util/include/asm/alternative-asm.h 2011-11-16 18:39:08.000000000 -0500
78391 +@@ -5,4 +5,7 @@
78392 +
78393 + #define altinstruction_entry #
78394 +
78395 ++ .macro pax_force_retaddr rip=0
78396 ++ .endm
78397 ++
78398 + #endif
78399 +diff -urNp linux-3.1.1/usr/gen_init_cpio.c linux-3.1.1/usr/gen_init_cpio.c
78400 +--- linux-3.1.1/usr/gen_init_cpio.c 2011-11-11 15:19:27.000000000 -0500
78401 ++++ linux-3.1.1/usr/gen_init_cpio.c 2011-11-16 18:39:08.000000000 -0500
78402 +@@ -303,7 +303,7 @@ static int cpio_mkfile(const char *name,
78403 + int retval;
78404 + int rc = -1;
78405 + int namesize;
78406 +- int i;
78407 ++ unsigned int i;
78408 +
78409 + mode |= S_IFREG;
78410 +
78411 +@@ -392,9 +392,10 @@ static char *cpio_replace_env(char *new_
78412 + *env_var = *expanded = '\0';
78413 + strncat(env_var, start + 2, end - start - 2);
78414 + strncat(expanded, new_location, start - new_location);
78415 +- strncat(expanded, getenv(env_var), PATH_MAX);
78416 +- strncat(expanded, end + 1, PATH_MAX);
78417 ++ strncat(expanded, getenv(env_var), PATH_MAX - strlen(expanded));
78418 ++ strncat(expanded, end + 1, PATH_MAX - strlen(expanded));
78419 + strncpy(new_location, expanded, PATH_MAX);
78420 ++ new_location[PATH_MAX] = 0;
78421 + } else
78422 + break;
78423 + }
78424 +diff -urNp linux-3.1.1/virt/kvm/kvm_main.c linux-3.1.1/virt/kvm/kvm_main.c
78425 +--- linux-3.1.1/virt/kvm/kvm_main.c 2011-11-11 15:19:27.000000000 -0500
78426 ++++ linux-3.1.1/virt/kvm/kvm_main.c 2011-11-16 18:39:08.000000000 -0500
78427 +@@ -73,7 +73,7 @@ LIST_HEAD(vm_list);
78428 +
78429 + static cpumask_var_t cpus_hardware_enabled;
78430 + static int kvm_usage_count = 0;
78431 +-static atomic_t hardware_enable_failed;
78432 ++static atomic_unchecked_t hardware_enable_failed;
78433 +
78434 + struct kmem_cache *kvm_vcpu_cache;
78435 + EXPORT_SYMBOL_GPL(kvm_vcpu_cache);
78436 +@@ -2266,7 +2266,7 @@ static void hardware_enable_nolock(void
78437 +
78438 + if (r) {
78439 + cpumask_clear_cpu(cpu, cpus_hardware_enabled);
78440 +- atomic_inc(&hardware_enable_failed);
78441 ++ atomic_inc_unchecked(&hardware_enable_failed);
78442 + printk(KERN_INFO "kvm: enabling virtualization on "
78443 + "CPU%d failed\n", cpu);
78444 + }
78445 +@@ -2320,10 +2320,10 @@ static int hardware_enable_all(void)
78446 +
78447 + kvm_usage_count++;
78448 + if (kvm_usage_count == 1) {
78449 +- atomic_set(&hardware_enable_failed, 0);
78450 ++ atomic_set_unchecked(&hardware_enable_failed, 0);
78451 + on_each_cpu(hardware_enable_nolock, NULL, 1);
78452 +
78453 +- if (atomic_read(&hardware_enable_failed)) {
78454 ++ if (atomic_read_unchecked(&hardware_enable_failed)) {
78455 + hardware_disable_all_nolock();
78456 + r = -EBUSY;
78457 + }
78458 +@@ -2588,7 +2588,7 @@ static void kvm_sched_out(struct preempt
78459 + kvm_arch_vcpu_put(vcpu);
78460 + }
78461 +
78462 +-int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
78463 ++int kvm_init(const void *opaque, unsigned vcpu_size, unsigned vcpu_align,
78464 + struct module *module)
78465 + {
78466 + int r;
78467 +@@ -2651,7 +2651,7 @@ int kvm_init(void *opaque, unsigned vcpu
78468 + if (!vcpu_align)
78469 + vcpu_align = __alignof__(struct kvm_vcpu);
78470 + kvm_vcpu_cache = kmem_cache_create("kvm_vcpu", vcpu_size, vcpu_align,
78471 +- 0, NULL);
78472 ++ SLAB_USERCOPY, NULL);
78473 + if (!kvm_vcpu_cache) {
78474 + r = -ENOMEM;
78475 + goto out_free_3;
78476 +@@ -2661,9 +2661,11 @@ int kvm_init(void *opaque, unsigned vcpu
78477 + if (r)
78478 + goto out_free;
78479 +
78480 +- kvm_chardev_ops.owner = module;
78481 +- kvm_vm_fops.owner = module;
78482 +- kvm_vcpu_fops.owner = module;
78483 ++ pax_open_kernel();
78484 ++ *(void **)&kvm_chardev_ops.owner = module;
78485 ++ *(void **)&kvm_vm_fops.owner = module;
78486 ++ *(void **)&kvm_vcpu_fops.owner = module;
78487 ++ pax_close_kernel();
78488 +
78489 + r = misc_register(&kvm_dev);
78490 + if (r) {
78491
78492 diff --git a/3.1.1/4430_remove-legacy-EI_PAX.patch b/3.1.1/4430_remove-legacy-EI_PAX.patch
78493 new file mode 100644
78494 index 0000000..35aff7a
78495 --- /dev/null
78496 +++ b/3.1.1/4430_remove-legacy-EI_PAX.patch
78497 @@ -0,0 +1,207 @@
78498 +diff -Naur linux-3.1.1-xtpax.orig//fs/binfmt_elf.c linux-3.1.1-xtpax/fs/binfmt_elf.c
78499 +--- linux-3.1.1-xtpax.orig//fs/binfmt_elf.c 2011-11-20 20:17:18.968732978 +0000
78500 ++++ linux-3.1.1-xtpax/fs/binfmt_elf.c 2011-11-20 20:21:07.237738723 +0000
78501 +@@ -553,7 +553,7 @@
78502 + return error;
78503 + }
78504 +
78505 +-#if (defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS)) && defined(CONFIG_PAX_SOFTMODE)
78506 ++#if (defined(CONFIG_PAX_PT_PAX_FLAGS)) && defined(CONFIG_PAX_SOFTMODE)
78507 + static unsigned long pax_parse_softmode(const struct elf_phdr * const elf_phdata)
78508 + {
78509 + unsigned long pax_flags = 0UL;
78510 +@@ -639,50 +639,7 @@
78511 + }
78512 + #endif
78513 +
78514 +-#ifdef CONFIG_PAX_EI_PAX
78515 +-static unsigned long pax_parse_ei_pax(const struct elfhdr * const elf_ex)
78516 +-{
78517 +- unsigned long pax_flags = 0UL;
78518 +-
78519 +-#ifdef CONFIG_PAX_PAGEEXEC
78520 +- if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_PAGEEXEC))
78521 +- pax_flags |= MF_PAX_PAGEEXEC;
78522 +-#endif
78523 +-
78524 +-#ifdef CONFIG_PAX_SEGMEXEC
78525 +- if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_SEGMEXEC))
78526 +- pax_flags |= MF_PAX_SEGMEXEC;
78527 +-#endif
78528 +-
78529 +-#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
78530 +- if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
78531 +- if ((__supported_pte_mask & _PAGE_NX))
78532 +- pax_flags &= ~MF_PAX_SEGMEXEC;
78533 +- else
78534 +- pax_flags &= ~MF_PAX_PAGEEXEC;
78535 +- }
78536 +-#endif
78537 +-
78538 +-#ifdef CONFIG_PAX_EMUTRAMP
78539 +- if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && (elf_ex->e_ident[EI_PAX] & EF_PAX_EMUTRAMP))
78540 +- pax_flags |= MF_PAX_EMUTRAMP;
78541 +-#endif
78542 +-
78543 +-#ifdef CONFIG_PAX_MPROTECT
78544 +- if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && !(elf_ex->e_ident[EI_PAX] & EF_PAX_MPROTECT))
78545 +- pax_flags |= MF_PAX_MPROTECT;
78546 +-#endif
78547 +-
78548 +-#ifdef CONFIG_PAX_ASLR
78549 +- if (randomize_va_space && !(elf_ex->e_ident[EI_PAX] & EF_PAX_RANDMMAP))
78550 +- pax_flags |= MF_PAX_RANDMMAP;
78551 +-#endif
78552 +-
78553 +- return pax_flags;
78554 +-}
78555 +-#endif
78556 +-
78557 +-#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS)
78558 ++#if defined(CONFIG_PAX_PT_PAX_FLAGS)
78559 + static long pax_parse_elf_flags(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata)
78560 + {
78561 + unsigned long pax_flags = 0UL;
78562 +@@ -692,10 +649,6 @@
78563 + int found_flags = 0;
78564 + #endif
78565 +
78566 +-#ifdef CONFIG_PAX_EI_PAX
78567 +- pax_flags = pax_parse_ei_pax(elf_ex);
78568 +-#endif
78569 +-
78570 + #ifdef CONFIG_PAX_PT_PAX_FLAGS
78571 + for (i = 0UL; i < elf_ex->e_phnum; i++)
78572 + if (elf_phdata[i].p_type == PT_PAX_FLAGS) {
78573 +@@ -718,7 +671,7 @@
78574 + }
78575 + #endif
78576 +
78577 +-#if !defined(CONFIG_PAX_EI_PAX) && defined(CONFIG_PAX_PT_PAX_FLAGS)
78578 ++#if defined(CONFIG_PAX_PT_PAX_FLAGS)
78579 + if (found_flags == 0) {
78580 + struct elf_phdr phdr;
78581 + memset(&phdr, 0, sizeof(phdr));
78582 +@@ -950,7 +903,7 @@
78583 +
78584 + current->mm->def_flags = 0;
78585 +
78586 +-#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS)
78587 ++#if defined(CONFIG_PAX_PT_PAX_FLAGS)
78588 + if (0 > pax_parse_elf_flags(&loc->elf_ex, elf_phdata)) {
78589 + send_sig(SIGKILL, current, 0);
78590 + goto out_free_dentry;
78591 +diff -Naur linux-3.1.1-xtpax.orig//grsecurity/Kconfig linux-3.1.1-xtpax/grsecurity/Kconfig
78592 +--- linux-3.1.1-xtpax.orig//grsecurity/Kconfig 2011-11-20 20:17:19.115732982 +0000
78593 ++++ linux-3.1.1-xtpax/grsecurity/Kconfig 2011-11-20 20:21:07.238738723 +0000
78594 +@@ -47,7 +47,6 @@
78595 + config GRKERNSEC_MEDIUM
78596 + bool "Medium"
78597 + select PAX
78598 +- select PAX_EI_PAX
78599 + select PAX_PT_PAX_FLAGS
78600 + select PAX_HAVE_ACL_FLAGS
78601 + select GRKERNSEC_PROC_MEMMAP if (PAX_NOEXEC || PAX_ASLR)
78602 +@@ -143,7 +142,6 @@
78603 + select PAX_RANDMMAP
78604 + select PAX_NOEXEC
78605 + select PAX_MPROTECT
78606 +- select PAX_EI_PAX
78607 + select PAX_PT_PAX_FLAGS
78608 + select PAX_HAVE_ACL_FLAGS
78609 + select PAX_KERNEXEC if ((PPC || X86) && (!X86_32 || X86_WP_WORKS_OK) && !XEN)
78610 +diff -Naur linux-3.1.1-xtpax.orig//include/linux/elf.h linux-3.1.1-xtpax/include/linux/elf.h
78611 +--- linux-3.1.1-xtpax.orig//include/linux/elf.h 2011-11-20 20:17:19.136732982 +0000
78612 ++++ linux-3.1.1-xtpax/include/linux/elf.h 2011-11-20 20:21:07.239738723 +0000
78613 +@@ -370,8 +370,6 @@
78614 + #define EI_OSABI 7
78615 + #define EI_PAD 8
78616 +
78617 +-#define EI_PAX 14
78618 +-
78619 + #define ELFMAG0 0x7f /* EI_MAG */
78620 + #define ELFMAG1 'E'
78621 + #define ELFMAG2 'L'
78622 +diff -Naur linux-3.1.1-xtpax.orig//include/linux/grsecurity.h linux-3.1.1-xtpax/include/linux/grsecurity.h
78623 +--- linux-3.1.1-xtpax.orig//include/linux/grsecurity.h 2011-11-20 20:17:19.146732983 +0000
78624 ++++ linux-3.1.1-xtpax/include/linux/grsecurity.h 2011-11-20 20:21:07.240738723 +0000
78625 +@@ -12,11 +12,11 @@
78626 + #if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_PAGEEXEC) && !defined(CONFIG_PAX_SEGMEXEC) && !defined(CONFIG_PAX_KERNEXEC)
78627 + #error "CONFIG_PAX_NOEXEC enabled, but PAGEEXEC, SEGMEXEC, and KERNEXEC are disabled."
78628 + #endif
78629 +-#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_EI_PAX) && !defined(CONFIG_PAX_PT_PAX_FLAGS)
78630 +-#error "CONFIG_PAX_NOEXEC enabled, but neither CONFIG_PAX_EI_PAX nor CONFIG_PAX_PT_PAX_FLAGS are enabled."
78631 ++#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_PT_PAX_FLAGS)
78632 ++#error "CONFIG_PAX_NOEXEC enabled, but CONFIG_PAX_PT_PAX_FLAGS is not enabled."
78633 + #endif
78634 +-#if defined(CONFIG_PAX_ASLR) && (defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)) && !defined(CONFIG_PAX_EI_PAX) && !defined(CONFIG_PAX_PT_PAX_FLAGS)
78635 +-#error "CONFIG_PAX_ASLR enabled, but neither CONFIG_PAX_EI_PAX nor CONFIG_PAX_PT_PAX_FLAGS are enabled."
78636 ++#if defined(CONFIG_PAX_ASLR) && (defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)) && !defined(CONFIG_PAX_PT_PAX_FLAGS)
78637 ++#error "CONFIG_PAX_ASLR enabled, but CONFIG_PAX_PT_PAX_FLAGS is not enabled."
78638 + #endif
78639 + #if defined(CONFIG_PAX_ASLR) && !defined(CONFIG_PAX_RANDKSTACK) && !defined(CONFIG_PAX_RANDUSTACK) && !defined(CONFIG_PAX_RANDMMAP)
78640 + #error "CONFIG_PAX_ASLR enabled, but RANDKSTACK, RANDUSTACK, and RANDMMAP are disabled."
78641 +diff -Naur linux-3.1.1-xtpax.orig//include/linux/mm_types.h linux-3.1.1-xtpax/include/linux/mm_types.h
78642 +--- linux-3.1.1-xtpax.orig//include/linux/mm_types.h 2011-11-20 20:17:19.159732983 +0000
78643 ++++ linux-3.1.1-xtpax/include/linux/mm_types.h 2011-11-20 20:21:07.241738723 +0000
78644 +@@ -365,7 +365,7 @@
78645 + struct cpumask cpumask_allocation;
78646 + #endif
78647 +
78648 +-#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS) || defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
78649 ++#if defined(CONFIG_PAX_PT_PAX_FLAGS) || defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
78650 + unsigned long pax_flags;
78651 + #endif
78652 +
78653 +diff -Naur linux-3.1.1-xtpax.orig//security/Kconfig linux-3.1.1-xtpax/security/Kconfig
78654 +--- linux-3.1.1-xtpax.orig//security/Kconfig 2011-11-20 20:17:19.478732991 +0000
78655 ++++ linux-3.1.1-xtpax/security/Kconfig 2011-11-20 20:21:07.242738723 +0000
78656 +@@ -51,20 +51,6 @@
78657 + line option on boot. Furthermore you can control various PaX features
78658 + at runtime via the entries in /proc/sys/kernel/pax.
78659 +
78660 +-config PAX_EI_PAX
78661 +- bool 'Use legacy ELF header marking'
78662 +- help
78663 +- Enabling this option will allow you to control PaX features on
78664 +- a per executable basis via the 'chpax' utility available at
78665 +- http://pax.grsecurity.net/. The control flags will be read from
78666 +- an otherwise reserved part of the ELF header. This marking has
78667 +- numerous drawbacks (no support for soft-mode, toolchain does not
78668 +- know about the non-standard use of the ELF header) therefore it
78669 +- has been deprecated in favour of PT_PAX_FLAGS support.
78670 +-
78671 +- Note that if you enable PT_PAX_FLAGS marking support as well,
78672 +- the PT_PAX_FLAG marks will override the legacy EI_PAX marks.
78673 +-
78674 + config PAX_PT_PAX_FLAGS
78675 + bool 'Use ELF program header marking'
78676 + help
78677 +@@ -79,9 +65,6 @@
78678 + If your toolchain does not support PT_PAX_FLAGS markings,
78679 + you can create one in most cases with 'paxctl -C'.
78680 +
78681 +- Note that if you enable the legacy EI_PAX marking support as well,
78682 +- the EI_PAX marks will be overridden by the PT_PAX_FLAGS marks.
78683 +-
78684 + choice
78685 + prompt 'MAC system integration'
78686 + default PAX_HAVE_ACL_FLAGS
78687 +@@ -113,7 +96,7 @@
78688 +
78689 + config PAX_NOEXEC
78690 + bool "Enforce non-executable pages"
78691 +- depends on (PAX_EI_PAX || PAX_PT_PAX_FLAGS || PAX_HAVE_ACL_FLAGS || PAX_HOOK_ACL_FLAGS) && (ALPHA || (ARM && (CPU_V6 || CPU_V7)) || IA64 || MIPS || PARISC || PPC || S390 || SPARC || X86)
78692 ++ depends on (PAX_PT_PAX_FLAGS || PAX_HAVE_ACL_FLAGS || PAX_HOOK_ACL_FLAGS) && (ALPHA || (ARM && (CPU_V6 || CPU_V7)) || IA64 || MIPS || PARISC || PPC || S390 || SPARC || X86)
78693 + help
78694 + By design some architectures do not allow for protecting memory
78695 + pages against execution or even if they do, Linux does not make
78696 +@@ -360,7 +343,7 @@
78697 +
78698 + config PAX_ASLR
78699 + bool "Address Space Layout Randomization"
78700 +- depends on PAX_EI_PAX || PAX_PT_PAX_FLAGS || PAX_HAVE_ACL_FLAGS || PAX_HOOK_ACL_FLAGS
78701 ++ depends on PAX_PT_PAX_FLAGS || PAX_HAVE_ACL_FLAGS || PAX_HOOK_ACL_FLAGS
78702 + help
78703 + Many if not most exploit techniques rely on the knowledge of
78704 + certain addresses in the attacked program. The following options
78705
78706 diff --git a/3.1.1/4440_replace-PT_PAX-with-XT_PAX.patch b/3.1.1/4440_replace-PT_PAX-with-XT_PAX.patch
78707 new file mode 100644
78708 index 0000000..5b9c248
78709 --- /dev/null
78710 +++ b/3.1.1/4440_replace-PT_PAX-with-XT_PAX.patch
78711 @@ -0,0 +1,322 @@
78712 +diff -Naur linux-3.1.1-xtpax.orig//fs/binfmt_elf.c linux-3.1.1-xtpax/fs/binfmt_elf.c
78713 +--- linux-3.1.1-xtpax.orig//fs/binfmt_elf.c 2011-11-20 20:24:21.599743615 +0000
78714 ++++ linux-3.1.1-xtpax/fs/binfmt_elf.c 2011-11-20 20:33:31.546757452 +0000
78715 +@@ -32,6 +32,7 @@
78716 + #include <linux/elf.h>
78717 + #include <linux/utsname.h>
78718 + #include <linux/coredump.h>
78719 ++#include <linux/xattr.h>
78720 + #include <asm/uaccess.h>
78721 + #include <asm/param.h>
78722 + #include <asm/page.h>
78723 +@@ -553,18 +554,18 @@
78724 + return error;
78725 + }
78726 +
78727 +-#if (defined(CONFIG_PAX_PT_PAX_FLAGS)) && defined(CONFIG_PAX_SOFTMODE)
78728 +-static unsigned long pax_parse_softmode(const struct elf_phdr * const elf_phdata)
78729 ++#if (defined(CONFIG_PAX_XT_PAX_FLAGS)) && defined(CONFIG_PAX_SOFTMODE)
78730 ++static unsigned long pax_parse_softmode(unsigned long read_flags)
78731 + {
78732 + unsigned long pax_flags = 0UL;
78733 +
78734 + #ifdef CONFIG_PAX_PAGEEXEC
78735 +- if (elf_phdata->p_flags & PF_PAGEEXEC)
78736 ++ if (read_flags & PF_PAGEEXEC)
78737 + pax_flags |= MF_PAX_PAGEEXEC;
78738 + #endif
78739 +
78740 + #ifdef CONFIG_PAX_SEGMEXEC
78741 +- if (elf_phdata->p_flags & PF_SEGMEXEC)
78742 ++ if (read_flags & PF_SEGMEXEC)
78743 + pax_flags |= MF_PAX_SEGMEXEC;
78744 + #endif
78745 +
78746 +@@ -578,17 +579,17 @@
78747 + #endif
78748 +
78749 + #ifdef CONFIG_PAX_EMUTRAMP
78750 +- if (elf_phdata->p_flags & PF_EMUTRAMP)
78751 ++ if (read_flags & PF_EMUTRAMP)
78752 + pax_flags |= MF_PAX_EMUTRAMP;
78753 + #endif
78754 +
78755 + #ifdef CONFIG_PAX_MPROTECT
78756 +- if (elf_phdata->p_flags & PF_MPROTECT)
78757 ++ if (read_flags & PF_MPROTECT)
78758 + pax_flags |= MF_PAX_MPROTECT;
78759 + #endif
78760 +
78761 + #if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
78762 +- if (randomize_va_space && (elf_phdata->p_flags & PF_RANDMMAP))
78763 ++ if (randomize_va_space && (read_flags & PF_RANDMMAP))
78764 + pax_flags |= MF_PAX_RANDMMAP;
78765 + #endif
78766 +
78767 +@@ -596,18 +597,18 @@
78768 + }
78769 + #endif
78770 +
78771 +-#ifdef CONFIG_PAX_PT_PAX_FLAGS
78772 +-static unsigned long pax_parse_hardmode(const struct elf_phdr * const elf_phdata)
78773 ++#ifdef CONFIG_PAX_XT_PAX_FLAGS
78774 ++static unsigned long pax_parse_hardmode(unsigned long read_flags)
78775 + {
78776 + unsigned long pax_flags = 0UL;
78777 +
78778 + #ifdef CONFIG_PAX_PAGEEXEC
78779 +- if (!(elf_phdata->p_flags & PF_NOPAGEEXEC))
78780 ++ if (!(read_flags & PF_NOPAGEEXEC))
78781 + pax_flags |= MF_PAX_PAGEEXEC;
78782 + #endif
78783 +
78784 + #ifdef CONFIG_PAX_SEGMEXEC
78785 +- if (!(elf_phdata->p_flags & PF_NOSEGMEXEC))
78786 ++ if (!(read_flags & PF_NOSEGMEXEC))
78787 + pax_flags |= MF_PAX_SEGMEXEC;
78788 + #endif
78789 +
78790 +@@ -621,17 +622,17 @@
78791 + #endif
78792 +
78793 + #ifdef CONFIG_PAX_EMUTRAMP
78794 +- if (!(elf_phdata->p_flags & PF_NOEMUTRAMP))
78795 ++ if (!(read_flags & PF_NOEMUTRAMP))
78796 + pax_flags |= MF_PAX_EMUTRAMP;
78797 + #endif
78798 +
78799 + #ifdef CONFIG_PAX_MPROTECT
78800 +- if (!(elf_phdata->p_flags & PF_NOMPROTECT))
78801 ++ if (!(read_flags & PF_NOMPROTECT))
78802 + pax_flags |= MF_PAX_MPROTECT;
78803 + #endif
78804 +
78805 + #if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
78806 +- if (randomize_va_space && !(elf_phdata->p_flags & PF_NORANDMMAP))
78807 ++ if (randomize_va_space && !(read_flags & PF_NORANDMMAP))
78808 + pax_flags |= MF_PAX_RANDMMAP;
78809 + #endif
78810 +
78811 +@@ -639,51 +640,30 @@
78812 + }
78813 + #endif
78814 +
78815 +-#if defined(CONFIG_PAX_PT_PAX_FLAGS)
78816 +-static long pax_parse_elf_flags(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata)
78817 ++#ifdef CONFIG_PAX_XT_PAX_FLAGS
78818 ++static long pax_parse_xattr_flags(struct dentry * dentry)
78819 + {
78820 + unsigned long pax_flags = 0UL;
78821 ++ unsigned long read_flags = 0UL;
78822 ++ struct inode * inode = dentry->d_inode;
78823 +
78824 +-#ifdef CONFIG_PAX_PT_PAX_FLAGS
78825 +- unsigned long i;
78826 +- int found_flags = 0;
78827 +-#endif
78828 +-
78829 +-#ifdef CONFIG_PAX_PT_PAX_FLAGS
78830 +- for (i = 0UL; i < elf_ex->e_phnum; i++)
78831 +- if (elf_phdata[i].p_type == PT_PAX_FLAGS) {
78832 +- if (((elf_phdata[i].p_flags & PF_PAGEEXEC) && (elf_phdata[i].p_flags & PF_NOPAGEEXEC)) ||
78833 +- ((elf_phdata[i].p_flags & PF_SEGMEXEC) && (elf_phdata[i].p_flags & PF_NOSEGMEXEC)) ||
78834 +- ((elf_phdata[i].p_flags & PF_EMUTRAMP) && (elf_phdata[i].p_flags & PF_NOEMUTRAMP)) ||
78835 +- ((elf_phdata[i].p_flags & PF_MPROTECT) && (elf_phdata[i].p_flags & PF_NOMPROTECT)) ||
78836 +- ((elf_phdata[i].p_flags & PF_RANDMMAP) && (elf_phdata[i].p_flags & PF_NORANDMMAP)))
78837 +- return -EINVAL;
78838 +-
78839 +-#ifdef CONFIG_PAX_SOFTMODE
78840 +- if (pax_softmode)
78841 +- pax_flags = pax_parse_softmode(&elf_phdata[i]);
78842 +- else
78843 +-#endif
78844 +-
78845 +- pax_flags = pax_parse_hardmode(&elf_phdata[i]);
78846 +- found_flags = 1;
78847 +- break;
78848 +- }
78849 +-#endif
78850 ++ if (inode->i_op->getxattr)
78851 ++ if (inode->i_op->getxattr(dentry, XATTR_PAX, &read_flags, sizeof(long)) <= 0)
78852 ++ read_flags = PF_NOEMUTRAMP;
78853 ++
78854 ++ if (((read_flags & PF_PAGEEXEC) && (read_flags & PF_NOPAGEEXEC)) ||
78855 ++ ((read_flags & PF_SEGMEXEC) && (read_flags & PF_NOSEGMEXEC)) ||
78856 ++ ((read_flags & PF_EMUTRAMP) && (read_flags & PF_NOEMUTRAMP)) ||
78857 ++ ((read_flags & PF_MPROTECT) && (read_flags & PF_NOMPROTECT)) ||
78858 ++ ((read_flags & PF_RANDMMAP) && (read_flags & PF_NORANDMMAP)))
78859 ++ return -EINVAL;
78860 +
78861 +-#if defined(CONFIG_PAX_PT_PAX_FLAGS)
78862 +- if (found_flags == 0) {
78863 +- struct elf_phdr phdr;
78864 +- memset(&phdr, 0, sizeof(phdr));
78865 +- phdr.p_flags = PF_NOEMUTRAMP;
78866 + #ifdef CONFIG_PAX_SOFTMODE
78867 +- if (pax_softmode)
78868 +- pax_flags = pax_parse_softmode(&phdr);
78869 +- else
78870 +-#endif
78871 +- pax_flags = pax_parse_hardmode(&phdr);
78872 +- }
78873 ++ if (pax_softmode)
78874 ++ pax_flags = pax_parse_softmode(read_flags);
78875 ++ else
78876 + #endif
78877 ++ pax_flags = pax_parse_hardmode(read_flags);
78878 +
78879 + if (0 > pax_check_flags(&pax_flags))
78880 + return -EINVAL;
78881 +@@ -747,6 +727,7 @@
78882 + struct elfhdr interp_elf_ex;
78883 + } *loc;
78884 + unsigned long pax_task_size = TASK_SIZE;
78885 ++ struct dentry * dentry = bprm->file->f_path.dentry;
78886 +
78887 + loc = kmalloc(sizeof(*loc), GFP_KERNEL);
78888 + if (!loc) {
78889 +@@ -903,8 +884,8 @@
78890 +
78891 + current->mm->def_flags = 0;
78892 +
78893 +-#if defined(CONFIG_PAX_PT_PAX_FLAGS)
78894 +- if (0 > pax_parse_elf_flags(&loc->elf_ex, elf_phdata)) {
78895 ++#if defined(CONFIG_PAX_XT_PAX_FLAGS)
78896 ++ if (0 > pax_parse_xattr_flags(dentry)) {
78897 + send_sig(SIGKILL, current, 0);
78898 + goto out_free_dentry;
78899 + }
78900 +diff -Naur linux-3.1.1-xtpax.orig//grsecurity/Kconfig linux-3.1.1-xtpax/grsecurity/Kconfig
78901 +--- linux-3.1.1-xtpax.orig//grsecurity/Kconfig 2011-11-20 20:24:21.601743615 +0000
78902 ++++ linux-3.1.1-xtpax/grsecurity/Kconfig 2011-11-20 20:25:27.748745279 +0000
78903 +@@ -47,7 +47,7 @@
78904 + config GRKERNSEC_MEDIUM
78905 + bool "Medium"
78906 + select PAX
78907 +- select PAX_PT_PAX_FLAGS
78908 ++ select PAX_XT_PAX_FLAGS
78909 + select PAX_HAVE_ACL_FLAGS
78910 + select GRKERNSEC_PROC_MEMMAP if (PAX_NOEXEC || PAX_ASLR)
78911 + select GRKERNSEC_CHROOT
78912 +@@ -142,7 +142,7 @@
78913 + select PAX_RANDMMAP
78914 + select PAX_NOEXEC
78915 + select PAX_MPROTECT
78916 +- select PAX_PT_PAX_FLAGS
78917 ++ select PAX_XT_PAX_FLAGS
78918 + select PAX_HAVE_ACL_FLAGS
78919 + select PAX_KERNEXEC if ((PPC || X86) && (!X86_32 || X86_WP_WORKS_OK) && !XEN)
78920 + select PAX_MEMORY_UDEREF if (X86 && !XEN)
78921 +diff -Naur linux-3.1.1-xtpax.orig//include/linux/elf.h linux-3.1.1-xtpax/include/linux/elf.h
78922 +--- linux-3.1.1-xtpax.orig//include/linux/elf.h 2011-11-20 20:24:21.601743615 +0000
78923 ++++ linux-3.1.1-xtpax/include/linux/elf.h 2011-11-20 20:25:27.748745279 +0000
78924 +@@ -51,7 +51,7 @@
78925 + #define PT_GNU_STACK (PT_LOOS + 0x474e551)
78926 + #define PT_GNU_RELRO (PT_LOOS + 0x474e552)
78927 +
78928 +-#define PT_PAX_FLAGS (PT_LOOS + 0x5041580)
78929 ++#define XT_PAX_FLAGS (PT_LOOS + 0x5041580)
78930 +
78931 + /* Constants for the e_flags field */
78932 + #define EF_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
78933 +diff -Naur linux-3.1.1-xtpax.orig//include/linux/grsecurity.h linux-3.1.1-xtpax/include/linux/grsecurity.h
78934 +--- linux-3.1.1-xtpax.orig//include/linux/grsecurity.h 2011-11-20 20:24:21.602743615 +0000
78935 ++++ linux-3.1.1-xtpax/include/linux/grsecurity.h 2011-11-20 20:25:27.749745279 +0000
78936 +@@ -12,11 +12,11 @@
78937 + #if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_PAGEEXEC) && !defined(CONFIG_PAX_SEGMEXEC) && !defined(CONFIG_PAX_KERNEXEC)
78938 + #error "CONFIG_PAX_NOEXEC enabled, but PAGEEXEC, SEGMEXEC, and KERNEXEC are disabled."
78939 + #endif
78940 +-#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_PT_PAX_FLAGS)
78941 +-#error "CONFIG_PAX_NOEXEC enabled, but CONFIG_PAX_PT_PAX_FLAGS is not enabled."
78942 ++#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_XT_PAX_FLAGS)
78943 ++#error "CONFIG_PAX_NOEXEC enabled, but CONFIG_PAX_XT_PAX_FLAGS is not enabled."
78944 + #endif
78945 +-#if defined(CONFIG_PAX_ASLR) && (defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)) && !defined(CONFIG_PAX_PT_PAX_FLAGS)
78946 +-#error "CONFIG_PAX_ASLR enabled, but CONFIG_PAX_PT_PAX_FLAGS is not enabled."
78947 ++#if defined(CONFIG_PAX_ASLR) && (defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)) && !defined(CONFIG_PAX_XT_PAX_FLAGS)
78948 ++#error "CONFIG_PAX_ASLR enabled, but CONFIG_PAX_XT_PAX_FLAGS is not enabled."
78949 + #endif
78950 + #if defined(CONFIG_PAX_ASLR) && !defined(CONFIG_PAX_RANDKSTACK) && !defined(CONFIG_PAX_RANDUSTACK) && !defined(CONFIG_PAX_RANDMMAP)
78951 + #error "CONFIG_PAX_ASLR enabled, but RANDKSTACK, RANDUSTACK, and RANDMMAP are disabled."
78952 +diff -Naur linux-3.1.1-xtpax.orig//include/linux/mm_types.h linux-3.1.1-xtpax/include/linux/mm_types.h
78953 +--- linux-3.1.1-xtpax.orig//include/linux/mm_types.h 2011-11-20 20:24:21.603743615 +0000
78954 ++++ linux-3.1.1-xtpax/include/linux/mm_types.h 2011-11-20 20:25:27.751745279 +0000
78955 +@@ -365,7 +365,7 @@
78956 + struct cpumask cpumask_allocation;
78957 + #endif
78958 +
78959 +-#if defined(CONFIG_PAX_PT_PAX_FLAGS) || defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
78960 ++#if defined(CONFIG_PAX_XT_PAX_FLAGS) || defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
78961 + unsigned long pax_flags;
78962 + #endif
78963 +
78964 +diff -Naur linux-3.1.1-xtpax.orig//include/linux/xattr.h linux-3.1.1-xtpax/include/linux/xattr.h
78965 +--- linux-3.1.1-xtpax.orig//include/linux/xattr.h 2011-10-24 07:10:05.000000000 +0000
78966 ++++ linux-3.1.1-xtpax/include/linux/xattr.h 2011-11-20 20:33:31.547757453 +0000
78967 +@@ -49,6 +49,9 @@
78968 + #define XATTR_CAPS_SUFFIX "capability"
78969 + #define XATTR_NAME_CAPS XATTR_SECURITY_PREFIX XATTR_CAPS_SUFFIX
78970 +
78971 ++#define XATTR_PAX_SUFFIX "pax"
78972 ++#define XATTR_PAX XATTR_USER_PREFIX XATTR_PAX_SUFFIX
78973 ++
78974 + #ifdef __KERNEL__
78975 +
78976 + #include <linux/types.h>
78977 +diff -Naur linux-3.1.1-xtpax.orig//security/Kconfig linux-3.1.1-xtpax/security/Kconfig
78978 +--- linux-3.1.1-xtpax.orig//security/Kconfig 2011-11-20 20:24:21.604743615 +0000
78979 ++++ linux-3.1.1-xtpax/security/Kconfig 2011-11-20 20:25:27.752745279 +0000
78980 +@@ -40,30 +40,21 @@
78981 +
78982 + config PAX_SOFTMODE
78983 + bool 'Support soft mode'
78984 +- select PAX_PT_PAX_FLAGS
78985 ++ select PAX_XT_PAX_FLAGS
78986 + help
78987 + Enabling this option will allow you to run PaX in soft mode, that
78988 + is, PaX features will not be enforced by default, only on executables
78989 +- marked explicitly. You must also enable PT_PAX_FLAGS support as it
78990 ++ marked explicitly. You must also enable XT_PAX_FLAGS support as it
78991 + is the only way to mark executables for soft mode use.
78992 +
78993 + Soft mode can be activated by using the "pax_softmode=1" kernel command
78994 + line option on boot. Furthermore you can control various PaX features
78995 + at runtime via the entries in /proc/sys/kernel/pax.
78996 +
78997 +-config PAX_PT_PAX_FLAGS
78998 +- bool 'Use ELF program header marking'
78999 ++config PAX_XT_PAX_FLAGS
79000 ++ bool 'Use filesystem extended attribute marking'
79001 + help
79002 +- Enabling this option will allow you to control PaX features on
79003 +- a per executable basis via the 'paxctl' utility available at
79004 +- http://pax.grsecurity.net/. The control flags will be read from
79005 +- a PaX specific ELF program header (PT_PAX_FLAGS). This marking
79006 +- has the benefits of supporting both soft mode and being fully
79007 +- integrated into the toolchain (the binutils patch is available
79008 +- from http://pax.grsecurity.net).
79009 +-
79010 +- If your toolchain does not support PT_PAX_FLAGS markings,
79011 +- you can create one in most cases with 'paxctl -C'.
79012 ++ TODO: add a description
79013 +
79014 + choice
79015 + prompt 'MAC system integration'
79016 +@@ -96,7 +87,7 @@
79017 +
79018 + config PAX_NOEXEC
79019 + bool "Enforce non-executable pages"
79020 +- depends on (PAX_PT_PAX_FLAGS || PAX_HAVE_ACL_FLAGS || PAX_HOOK_ACL_FLAGS) && (ALPHA || (ARM && (CPU_V6 || CPU_V7)) || IA64 || MIPS || PARISC || PPC || S390 || SPARC || X86)
79021 ++ depends on (PAX_XT_PAX_FLAGS || PAX_HAVE_ACL_FLAGS || PAX_HOOK_ACL_FLAGS) && (ALPHA || (ARM && (CPU_V6 || CPU_V7)) || IA64 || MIPS || PARISC || PPC || S390 || SPARC || X86)
79022 + help
79023 + By design some architectures do not allow for protecting memory
79024 + pages against execution or even if they do, Linux does not make
79025 +@@ -343,7 +334,7 @@
79026 +
79027 + config PAX_ASLR
79028 + bool "Address Space Layout Randomization"
79029 +- depends on PAX_PT_PAX_FLAGS || PAX_HAVE_ACL_FLAGS || PAX_HOOK_ACL_FLAGS
79030 ++ depends on PAX_XT_PAX_FLAGS || PAX_HAVE_ACL_FLAGS || PAX_HOOK_ACL_FLAGS
79031 + help
79032 + Many if not most exploit techniques rely on the knowledge of
79033 + certain addresses in the attacked program. The following options
79034
79035 diff --git a/3.1.1/4450_grsec-remove-localversion-grsec.patch b/3.1.1/4450_grsec-remove-localversion-grsec.patch
79036 new file mode 100644
79037 index 0000000..31cf878
79038 --- /dev/null
79039 +++ b/3.1.1/4450_grsec-remove-localversion-grsec.patch
79040 @@ -0,0 +1,9 @@
79041 +From: Kerin Millar <kerframil@×××××.com>
79042 +
79043 +Remove grsecurity's localversion-grsec file as it is inconsistent with
79044 +Gentoo's kernel practices and naming scheme.
79045 +
79046 +--- a/localversion-grsec 2008-02-24 14:26:59.000000000 +0000
79047 ++++ b/localversion-grsec 1970-01-01 01:00:00.000000000 +0100
79048 +@@ -1 +0,0 @@
79049 +--grsec
79050
79051 diff --git a/3.1.1/4460_grsec-mute-warnings.patch b/3.1.1/4460_grsec-mute-warnings.patch
79052 new file mode 100644
79053 index 0000000..e85abd6
79054 --- /dev/null
79055 +++ b/3.1.1/4460_grsec-mute-warnings.patch
79056 @@ -0,0 +1,43 @@
79057 +From: Anthony G. Basile <blueness@g.o>
79058 +Updated patch for 2.6.38.6
79059 +
79060 +The credits/description from the original version of this patch remain accurate
79061 +and are included below.
79062 +
79063 +---
79064 +From: Jory A. Pratt <anarchy@g.o>
79065 +Updated patch for kernel 2.6.32
79066 +
79067 +The credits/description from the original version of this patch remain accurate
79068 +and are included below.
79069 +
79070 +---
79071 +From: Gordon Malm <gengor@g.o>
79072 +
79073 +Updated patch for kernel series 2.6.24.
79074 +
79075 +The credits/description from the original version of this patch remain accurate
79076 +and are included below.
79077 +
79078 +---
79079 +From: Alexander Gabert <gaberta@××××××××.de>
79080 +
79081 +This patch removes the warnings introduced by grsec patch 2.1.9 and later.
79082 +It removes the -W options added by the patch and restores the original
79083 +warning flags of vanilla kernel versions.
79084 +
79085 +Acked-by: Christian Heim <phreak@g.o>
79086 +---
79087 +
79088 +--- a/Makefile 2011-11-18 17:50:11.000000000 -0500
79089 ++++ b/Makefile 2011-11-18 17:50:48.000000000 -0500
79090 +@@ -245,7 +245,7 @@
79091 +
79092 + HOSTCC = gcc
79093 + HOSTCXX = g++
79094 +-HOSTCFLAGS = -Wall -W -Wmissing-prototypes -Wstrict-prototypes -Wno-unused-parameter -Wno-missing-field-initializers -O2 -fomit-frame-pointer -fno-delete-null-pointer-checks
79095 ++HOSTCFLAGS = -Wall -Wmissing-prototypes -Wstrict-prototypes -Wno-unused-parameter -Wno-missing-field-initializers -O2 -fomit-frame-pointer -fno-delete-null-pointer-checks
79096 + HOSTCLFAGS += $(call cc-option, -Wno-empty-body)
79097 + HOSTCXXFLAGS = -O2 -Wall -W -fno-delete-null-pointer-checks
79098 +
79099 +
79100
79101 diff --git a/3.1.1/4470_grsec-remove-protected-paths.patch b/3.1.1/4470_grsec-remove-protected-paths.patch
79102 new file mode 100644
79103 index 0000000..4afb3e2
79104 --- /dev/null
79105 +++ b/3.1.1/4470_grsec-remove-protected-paths.patch
79106 @@ -0,0 +1,19 @@
79107 +From: Anthony G. Basile <blueness@g.o>
79108 +
79109 +We don't want GRSEC's Makefile to change permissions on paths in
79110 +the filesystem.
79111 +
79112 +diff -Naur a/grsecurity/Makefile b/grsecurity/Makefile
79113 +--- a/grsecurity/Makefile 2011-10-19 20:42:50.000000000 -0400
79114 ++++ b/grsecurity/Makefile 2011-10-19 20:45:08.000000000 -0400
79115 +@@ -27,10 +27,4 @@
79116 + ifdef CONFIG_GRKERNSEC_HIDESYM
79117 + extra-y := grsec_hidesym.o
79118 + $(obj)/grsec_hidesym.o:
79119 +- @-chmod -f 500 /boot
79120 +- @-chmod -f 500 /lib/modules
79121 +- @-chmod -f 500 /lib64/modules
79122 +- @-chmod -f 500 /lib32/modules
79123 +- @-chmod -f 700 .
79124 +- @echo ' grsec: protected kernel image paths'
79125 + endif
79126
79127 diff --git a/3.1.1/4480_grsec-pax-without-grsec.patch b/3.1.1/4480_grsec-pax-without-grsec.patch
79128 new file mode 100644
79129 index 0000000..97e8837
79130 --- /dev/null
79131 +++ b/3.1.1/4480_grsec-pax-without-grsec.patch
79132 @@ -0,0 +1,88 @@
79133 +From: Anthony G. Basile <blueness@g.o>
79134 +
79135 +With grsecurity-2.2.2-2.6.32.38-201104171745, the functions pax_report_leak_to_user and
79136 +pax_report_overflow_from_user in fs/exec.c were consolidated into pax_report_usercopy.
79137 +This patch has been updated to reflect that change.
79138 +--
79139 +From: Jory Pratt <anarchy@g.o>
79140 +Updated patch for kernel 2.6.32
79141 +
79142 +The credits/description from the original version of this patch remain accurate
79143 +and are included below.
79144 +--
79145 +From: Gordon Malm <gengor@g.o>
79146 +
79147 +Allow PaX options to be selected without first selecting CONFIG_GRKERNSEC.
79148 +
79149 +This patch has been updated to keep current with newer kernel versions.
79150 +The original version of this patch contained no credits/description.
79151 +
79152 +diff -Naur a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
79153 +--- a/arch/x86/mm/fault.c 2011-04-17 19:05:03.000000000 -0400
79154 ++++ a/arch/x86/mm/fault.c 2011-04-17 19:20:30.000000000 -0400
79155 +@@ -651,10 +651,12 @@
79156 +
79157 + #ifdef CONFIG_PAX_KERNEXEC
79158 + if (init_mm.start_code <= address && address < init_mm.end_code) {
79159 ++#ifdef CONFIG_GRKERNSEC
79160 + if (current->signal->curr_ip)
79161 + printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
79162 + &current->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid());
79163 + else
79164 ++#endif
79165 + printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
79166 + current->comm, task_pid_nr(current), current_uid(), current_euid());
79167 + }
79168 +diff -Naur a/fs/exec.c b/fs/exec.c
79169 +--- a/fs/exec.c 2011-04-17 19:05:03.000000000 -0400
79170 ++++ b/fs/exec.c 2011-04-17 19:20:30.000000000 -0400
79171 +@@ -1999,9 +1999,11 @@
79172 + }
79173 + up_read(&mm->mmap_sem);
79174 + }
79175 ++#ifdef CONFIG_GRKERNSEC
79176 + if (tsk->signal->curr_ip)
79177 + printk(KERN_ERR "PAX: From %pI4: execution attempt in: %s, %08lx-%08lx %08lx\n", &tsk->signal->curr_ip, path_fault, start, end, offset);
79178 + else
79179 ++#endif
79180 + printk(KERN_ERR "PAX: execution attempt in: %s, %08lx-%08lx %08lx\n", path_fault, start, end, offset);
79181 + printk(KERN_ERR "PAX: terminating task: %s(%s):%d, uid/euid: %u/%u, "
79182 + "PC: %p, SP: %p\n", path_exec, tsk->comm, task_pid_nr(tsk),
79183 +@@ -2016,10 +2018,12 @@
79184 + #ifdef CONFIG_PAX_REFCOUNT
79185 + void pax_report_refcount_overflow(struct pt_regs *regs)
79186 + {
79187 ++#ifdef CONFIG_GRKERNSEC
79188 + if (current->signal->curr_ip)
79189 + printk(KERN_ERR "PAX: From %pI4: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
79190 + &current->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid());
79191 + else
79192 ++#endif
79193 + printk(KERN_ERR "PAX: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
79194 + current->comm, task_pid_nr(current), current_uid(), current_euid());
79195 + print_symbol(KERN_ERR "PAX: refcount overflow occured at: %s\n", instruction_pointer(regs));
79196 +@@ -2078,10 +2082,12 @@
79197 +
79198 + NORET_TYPE void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type)
79199 + {
79200 ++#ifdef CONFIG_GRKERNSEC
79201 + if (current->signal->curr_ip)
79202 + printk(KERN_ERR "PAX: From %pI4: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
79203 + &current->signal->curr_ip, to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len);
79204 + else
79205 ++#endif
79206 + printk(KERN_ERR "PAX: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
79207 + to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len);
79208 + dump_stack();
79209 +diff -Naur a/security/Kconfig b/security/Kconfig
79210 +--- a/security/Kconfig 2011-04-17 19:05:03.000000000 -0400
79211 ++++ b/security/Kconfig 2011-04-17 19:20:30.000000000 -0400
79212 +@@ -29,7 +29,7 @@
79213 +
79214 + config PAX
79215 + bool "Enable various PaX features"
79216 +- depends on GRKERNSEC && (ALPHA || ARM || AVR32 || IA64 || MIPS || PARISC || PPC || SPARC || X86)
79217 ++ depends on (ALPHA || ARM || AVR32 || IA64 || MIPS || PARISC || PPC || SPARC || X86)
79218 + help
79219 + This allows you to enable various PaX features. PaX adds
79220 + intrusion prevention mechanisms to the kernel that reduce
79221
79222 diff --git a/3.1.1/4490_grsec-kconfig-default-gids.patch b/3.1.1/4490_grsec-kconfig-default-gids.patch
79223 new file mode 100644
79224 index 0000000..671636e
79225 --- /dev/null
79226 +++ b/3.1.1/4490_grsec-kconfig-default-gids.patch
79227 @@ -0,0 +1,77 @@
79228 +From: Kerin Millar <kerframil@×××××.com>
79229 +
79230 +grsecurity contains a number of options which allow certain protections
79231 +to be applied to or exempted from members of a given group. However, the
79232 +default GIDs specified in the upstream patch are entirely arbitrary and
79233 +there is no telling which (if any) groups the GIDs will correlate with
79234 +on an end-user's system. Because some users don't pay a great deal of
79235 +attention to the finer points of kernel configuration, it is probably
79236 +wise to specify some reasonable defaults so as to stop careless users
79237 +from shooting themselves in the foot.
79238 +
79239 +diff -Naur linux-2.6.32-hardened-r44.orig/grsecurity/Kconfig linux-2.6.32-hardened-r44/grsecurity/Kconfig
79240 +--- linux-2.6.32-hardened-r44.orig/grsecurity/Kconfig 2011-04-17 18:15:55.000000000 -0400
79241 ++++ linux-2.6.32-hardened-r44/grsecurity/Kconfig 2011-04-17 18:37:33.000000000 -0400
79242 +@@ -430,7 +430,7 @@
79243 + config GRKERNSEC_PROC_GID
79244 + int "GID for special group"
79245 + depends on GRKERNSEC_PROC_USERGROUP
79246 +- default 1001
79247 ++ default 10
79248 +
79249 + config GRKERNSEC_PROC_ADD
79250 + bool "Additional restrictions"
79251 +@@ -654,7 +654,7 @@
79252 + config GRKERNSEC_AUDIT_GID
79253 + int "GID for auditing"
79254 + depends on GRKERNSEC_AUDIT_GROUP
79255 +- default 1007
79256 ++ default 100
79257 +
79258 + config GRKERNSEC_EXECLOG
79259 + bool "Exec logging"
79260 +@@ -832,7 +832,7 @@
79261 + config GRKERNSEC_TPE_GID
79262 + int "GID for untrusted users"
79263 + depends on GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT
79264 +- default 1005
79265 ++ default 100
79266 + help
79267 + Setting this GID determines what group TPE restrictions will be
79268 + *enabled* for. If the sysctl option is enabled, a sysctl option
79269 +@@ -841,7 +841,7 @@
79270 + config GRKERNSEC_TPE_GID
79271 + int "GID for trusted users"
79272 + depends on GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT
79273 +- default 1005
79274 ++ default 10
79275 + help
79276 + Setting this GID determines what group TPE restrictions will be
79277 + *disabled* for. If the sysctl option is enabled, a sysctl option
79278 +@@ -914,7 +914,7 @@
79279 + config GRKERNSEC_SOCKET_ALL_GID
79280 + int "GID to deny all sockets for"
79281 + depends on GRKERNSEC_SOCKET_ALL
79282 +- default 1004
79283 ++ default 65534
79284 + help
79285 + Here you can choose the GID to disable socket access for. Remember to
79286 + add the users you want socket access disabled for to the GID
79287 +@@ -935,7 +935,7 @@
79288 + config GRKERNSEC_SOCKET_CLIENT_GID
79289 + int "GID to deny client sockets for"
79290 + depends on GRKERNSEC_SOCKET_CLIENT
79291 +- default 1003
79292 ++ default 65534
79293 + help
79294 + Here you can choose the GID to disable client socket access for.
79295 + Remember to add the users you want client socket access disabled for to
79296 +@@ -953,7 +953,7 @@
79297 + config GRKERNSEC_SOCKET_SERVER_GID
79298 + int "GID to deny server sockets for"
79299 + depends on GRKERNSEC_SOCKET_SERVER
79300 +- default 1002
79301 ++ default 65534
79302 + help
79303 + Here you can choose the GID to disable server socket access for.
79304 + Remember to add the users you want server socket access disabled for to
79305
79306 diff --git a/3.1.1/4500_grsec-kconfig-gentoo.patch b/3.1.1/4500_grsec-kconfig-gentoo.patch
79307 new file mode 100644
79308 index 0000000..6d94033
79309 --- /dev/null
79310 +++ b/3.1.1/4500_grsec-kconfig-gentoo.patch
79311 @@ -0,0 +1,311 @@
79312 +From: Anthony G. Basile <blueness@g.o>
79313 +From: Gordon Malm <gengor@g.o>
79314 +From: Jory A. Pratt <anarchy@g.o>
79315 +From: Kerin Millar <kerframil@×××××.com>
79316 +
79317 +Add Hardened Gentoo [server/workstation] predefined grsecurity
79318 +levels. They're designed to provide a comparitively high level of
79319 +security while remaining generally suitable for as great a majority
79320 +of the userbase as possible (particularly new users).
79321 +
79322 +Make Hardened Gentoo [workstation] predefined grsecurity level the
79323 +default. The Hardened Gentoo [server] level is more restrictive
79324 +and conflicts with some software and thus would be less suitable.
79325 +
79326 +The original version of this patch was conceived and created by:
79327 +Ned Ludd <solar@g.o>
79328 +
79329 +diff -Naur a/grsecurity/Kconfig b/grsecurity/Kconfig
79330 +--- a/grsecurity/Kconfig 2011-04-17 19:25:54.000000000 -0400
79331 ++++ b/grsecurity/Kconfig 2011-04-17 19:27:46.000000000 -0400
79332 +@@ -18,7 +18,7 @@
79333 + choice
79334 + prompt "Security Level"
79335 + depends on GRKERNSEC
79336 +- default GRKERNSEC_CUSTOM
79337 ++ default GRKERNSEC_HARDENED_WORKSTATION
79338 +
79339 + config GRKERNSEC_LOW
79340 + bool "Low"
79341 +@@ -191,6 +191,255 @@
79342 + - Restricted sysfs/debugfs
79343 + - Active kernel exploit response
79344 +
79345 ++config GRKERNSEC_HARDENED_SERVER
79346 ++ bool "Hardened Gentoo [server]"
79347 ++ select GRKERNSEC_LINK
79348 ++ select GRKERNSEC_FIFO
79349 ++ select GRKERNSEC_DMESG
79350 ++ select GRKERNSEC_FORKFAIL
79351 ++ select GRKERNSEC_TIME
79352 ++ select GRKERNSEC_SIGNAL
79353 ++ select GRKERNSEC_CHROOT
79354 ++ select GRKERNSEC_CHROOT_SHMAT
79355 ++ select GRKERNSEC_CHROOT_UNIX
79356 ++ select GRKERNSEC_CHROOT_MOUNT
79357 ++ select GRKERNSEC_CHROOT_FCHDIR
79358 ++ select GRKERNSEC_CHROOT_PIVOT
79359 ++ select GRKERNSEC_CHROOT_DOUBLE
79360 ++ select GRKERNSEC_CHROOT_CHDIR
79361 ++ select GRKERNSEC_CHROOT_MKNOD
79362 ++ select GRKERNSEC_CHROOT_CAPS
79363 ++ select GRKERNSEC_CHROOT_SYSCTL
79364 ++ select GRKERNSEC_CHROOT_FINDTASK
79365 ++ select GRKERNSEC_PROC
79366 ++ select GRKERNSEC_PROC_MEMMAP if (PAX_NOEXEC || PAX_ASLR)
79367 ++ select GRKERNSEC_HIDESYM
79368 ++ select GRKERNSEC_BRUTE
79369 ++ select GRKERNSEC_PROC_USERGROUP
79370 ++ select GRKERNSEC_KMEM
79371 ++ select GRKERNSEC_RESLOG
79372 ++ select GRKERNSEC_RANDNET
79373 ++ select GRKERNSEC_PROC_ADD
79374 ++ select GRKERNSEC_CHROOT_CHMOD
79375 ++ select GRKERNSEC_CHROOT_NICE
79376 ++ select GRKERNSEC_AUDIT_MOUNT
79377 ++ select GRKERNSEC_MODHARDEN if (MODULES)
79378 ++ select GRKERNSEC_HARDEN_PTRACE
79379 ++ select GRKERNSEC_VM86 if (X86_32)
79380 ++ select GRKERNSEC_IO if (X86)
79381 ++ select GRKERNSEC_PROC_IPADDR
79382 ++ select GRKERNSEC_RWXMAP_LOG
79383 ++ select GRKERNSEC_SYSCTL
79384 ++ select GRKERNSEC_SYSCTL_ON
79385 ++ select PAX
79386 ++ select PAX_RANDUSTACK
79387 ++ select PAX_ASLR
79388 ++ select PAX_RANDMMAP
79389 ++ select PAX_NOEXEC
79390 ++ select PAX_MPROTECT
79391 ++ select PAX_XT_PAX_FLAGS
79392 ++ select PAX_HAVE_ACL_FLAGS
79393 ++ select PAX_KERNEXEC if ((PPC || X86) && (!X86_32 || X86_WP_WORKS_OK) && !XEN)
79394 ++ select PAX_MEMORY_UDEREF if (X86 && !XEN)
79395 ++ select PAX_RANDKSTACK if (X86_TSC && !X86_64)
79396 ++ select PAX_SEGMEXEC if (X86_32)
79397 ++ select PAX_PAGEEXEC
79398 ++ select PAX_EMUPLT if (ALPHA || PARISC || SPARC32 || SPARC64)
79399 ++ select PAX_EMUTRAMP if (PARISC)
79400 ++ select PAX_EMUSIGRT if (PARISC)
79401 ++ select PAX_ETEXECRELOCS if (ALPHA || IA64 || PARISC)
79402 ++ select PAX_REFCOUNT if (X86 || SPARC64)
79403 ++ select PAX_USERCOPY if ((X86 || PPC || SPARC32 || SPARC64) && (SLAB || SLUB || SLOB))
79404 ++ select PAX_MEMORY_SANITIZE
79405 ++ help
79406 ++ If you say Y here, a configuration for grsecurity/PaX features
79407 ++ will be used that is endorsed by the Hardened Gentoo project.
79408 ++ These pre-defined security levels are designed to provide a high
79409 ++ level of security while minimizing incompatibilities with a majority
79410 ++ of Gentoo's available software.
79411 ++
79412 ++ This "Hardened Gentoo [server]" level is identical to the
79413 ++ "Hardened Gentoo [workstation]" level, but with GRKERNSEC_IO,
79414 ++ and GRKERNSEC_PROC_ADD enabled. Accordingly, this is the preferred
79415 ++ security level if the system will not be utilizing software incompatible
79416 ++ with these features.
79417 ++
79418 ++ When this level is selected, some security features will be forced on,
79419 ++ while others will default to their suggested values of off or on. The
79420 ++ later can be tweaked at the user's discretion, but may cause problems
79421 ++ in some situations. You can fully customize all grsecurity/PaX features
79422 ++ by choosing "Custom" in the Security Level menu. It may be helpful to
79423 ++ inherit the options selected by this security level as a starting point.
79424 ++ To accomplish this, select this security level, then exit the menuconfig
79425 ++ interface, saving changes when prompted. Run make menuconfig again and
79426 ++ select the "Custom" level.
79427 ++
79428 ++config GRKERNSEC_HARDENED_WORKSTATION
79429 ++ bool "Hardened Gentoo [workstation]"
79430 ++ select GRKERNSEC_LINK
79431 ++ select GRKERNSEC_FIFO
79432 ++ select GRKERNSEC_DMESG
79433 ++ select GRKERNSEC_FORKFAIL
79434 ++ select GRKERNSEC_TIME
79435 ++ select GRKERNSEC_SIGNAL
79436 ++ select GRKERNSEC_CHROOT
79437 ++ select GRKERNSEC_CHROOT_SHMAT
79438 ++ select GRKERNSEC_CHROOT_UNIX
79439 ++ select GRKERNSEC_CHROOT_MOUNT
79440 ++ select GRKERNSEC_CHROOT_FCHDIR
79441 ++ select GRKERNSEC_CHROOT_PIVOT
79442 ++ select GRKERNSEC_CHROOT_DOUBLE
79443 ++ select GRKERNSEC_CHROOT_CHDIR
79444 ++ select GRKERNSEC_CHROOT_MKNOD
79445 ++ select GRKERNSEC_CHROOT_CAPS
79446 ++ select GRKERNSEC_CHROOT_SYSCTL
79447 ++ select GRKERNSEC_CHROOT_FINDTASK
79448 ++ select GRKERNSEC_PROC
79449 ++ select GRKERNSEC_PROC_MEMMAP if (PAX_NOEXEC || PAX_ASLR)
79450 ++ select GRKERNSEC_HIDESYM
79451 ++ select GRKERNSEC_BRUTE
79452 ++ select GRKERNSEC_PROC_USERGROUP
79453 ++ select GRKERNSEC_KMEM
79454 ++ select GRKERNSEC_RESLOG
79455 ++ select GRKERNSEC_RANDNET
79456 ++ # select GRKERNSEC_PROC_ADD
79457 ++ select GRKERNSEC_CHROOT_CHMOD
79458 ++ select GRKERNSEC_CHROOT_NICE
79459 ++ select GRKERNSEC_AUDIT_MOUNT
79460 ++ select GRKERNSEC_MODHARDEN if (MODULES)
79461 ++ select GRKERNSEC_HARDEN_PTRACE
79462 ++ select GRKERNSEC_VM86 if (X86_32)
79463 ++ # select GRKERNSEC_IO if (X86)
79464 ++ select GRKERNSEC_PROC_IPADDR
79465 ++ select GRKERNSEC_RWXMAP_LOG
79466 ++ select GRKERNSEC_SYSCTL
79467 ++ select GRKERNSEC_SYSCTL_ON
79468 ++ select PAX
79469 ++ select PAX_RANDUSTACK
79470 ++ select PAX_ASLR
79471 ++ select PAX_RANDMMAP
79472 ++ select PAX_NOEXEC
79473 ++ select PAX_MPROTECT
79474 ++ select PAX_XT_PAX_FLAGS
79475 ++ select PAX_HAVE_ACL_FLAGS
79476 ++ # select PAX_KERNEXEC if ((PPC || X86) && (!X86_32 || X86_WP_WORKS_OK) && !XEN)
79477 ++ # select PAX_MEMORY_UDEREF if (X86 && !XEN)
79478 ++ select PAX_RANDKSTACK if (X86_TSC && !X86_64)
79479 ++ select PAX_SEGMEXEC if (X86_32)
79480 ++ select PAX_PAGEEXEC
79481 ++ select PAX_EMUPLT if (ALPHA || PARISC || SPARC32 || SPARC64)
79482 ++ select PAX_EMUTRAMP if (PARISC)
79483 ++ select PAX_EMUSIGRT if (PARISC)
79484 ++ select PAX_ETEXECRELOCS if (ALPHA || IA64 || PARISC)
79485 ++ select PAX_REFCOUNT if (X86 || SPARC64)
79486 ++ select PAX_USERCOPY if ((X86 || PPC || SPARC32 || SPARC64) && (SLAB || SLUB || SLOB))
79487 ++ select PAX_MEMORY_SANITIZE
79488 ++ help
79489 ++ If you say Y here, a configuration for grsecurity/PaX features
79490 ++ will be used that is endorsed by the Hardened Gentoo project.
79491 ++ These pre-defined security levels are designed to provide a high
79492 ++ level of security while minimizing incompatibilities with a majority
79493 ++ of Gentoo's available software.
79494 ++
79495 ++ This "Hardened Gentoo [workstation]" level is identical to the
79496 ++ "Hardened Gentoo [server]" level, but with GRKERNSEC_IO and
79497 ++ GRKERNSEC_PROC_ADD disabled. Accordingly, this is the preferred
79498 ++ security level if the system will be utilizing software incompatible
79499 ++ with these features.
79500 ++
79501 ++ When this level is selected, some security features will be forced on,
79502 ++ while others will default to their suggested values of off or on. The
79503 ++ later can be tweaked at the user's discretion, but may cause problems
79504 ++ in some situations. You can fully customize all grsecurity/PaX features
79505 ++ by choosing "Custom" in the Security Level menu. It may be helpful to
79506 ++ inherit the options selected by this security level as a starting point.
79507 ++ To accomplish this, select this security level, then exit the menuconfig
79508 ++ interface, saving changes when prompted. Run make menuconfig again and
79509 ++ select the "Custom" level.
79510 ++
79511 ++config GRKERNSEC_HARDENED_VIRTUALIZATION
79512 ++ bool "Hardened Gentoo [virtualization]"
79513 ++ select GRKERNSEC_LINK
79514 ++ select GRKERNSEC_FIFO
79515 ++ select GRKERNSEC_DMESG
79516 ++ select GRKERNSEC_FORKFAIL
79517 ++ select GRKERNSEC_TIME
79518 ++ select GRKERNSEC_SIGNAL
79519 ++ select GRKERNSEC_CHROOT
79520 ++ select GRKERNSEC_CHROOT_SHMAT
79521 ++ select GRKERNSEC_CHROOT_UNIX
79522 ++ select GRKERNSEC_CHROOT_MOUNT
79523 ++ select GRKERNSEC_CHROOT_FCHDIR
79524 ++ select GRKERNSEC_CHROOT_PIVOT
79525 ++ select GRKERNSEC_CHROOT_DOUBLE
79526 ++ select GRKERNSEC_CHROOT_CHDIR
79527 ++ select GRKERNSEC_CHROOT_MKNOD
79528 ++ select GRKERNSEC_CHROOT_CAPS
79529 ++ select GRKERNSEC_CHROOT_SYSCTL
79530 ++ select GRKERNSEC_CHROOT_FINDTASK
79531 ++ select GRKERNSEC_PROC
79532 ++ select GRKERNSEC_PROC_MEMMAP if (PAX_NOEXEC || PAX_ASLR)
79533 ++ select GRKERNSEC_HIDESYM
79534 ++ select GRKERNSEC_BRUTE
79535 ++ select GRKERNSEC_PROC_USERGROUP
79536 ++ select GRKERNSEC_KMEM
79537 ++ select GRKERNSEC_RESLOG
79538 ++ select GRKERNSEC_RANDNET
79539 ++ # select GRKERNSEC_PROC_ADD
79540 ++ select GRKERNSEC_CHROOT_CHMOD
79541 ++ select GRKERNSEC_CHROOT_NICE
79542 ++ select GRKERNSEC_AUDIT_MOUNT
79543 ++ select GRKERNSEC_MODHARDEN if (MODULES)
79544 ++ select GRKERNSEC_HARDEN_PTRACE
79545 ++ select GRKERNSEC_VM86 if (X86_32)
79546 ++ # select GRKERNSEC_IO if (X86)
79547 ++ select GRKERNSEC_PROC_IPADDR
79548 ++ select GRKERNSEC_RWXMAP_LOG
79549 ++ select GRKERNSEC_SYSCTL
79550 ++ select GRKERNSEC_SYSCTL_ON
79551 ++ select PAX
79552 ++ select PAX_RANDUSTACK
79553 ++ select PAX_ASLR
79554 ++ select PAX_RANDMMAP
79555 ++ select PAX_NOEXEC
79556 ++ select PAX_MPROTECT
79557 ++ select PAX_XT_PAX_FLAGS
79558 ++ select PAX_HAVE_ACL_FLAGS
79559 ++ # select PAX_KERNEXEC if ((PPC || X86) && (!X86_32 || X86_WP_WORKS_OK) && !XEN)
79560 ++ # select PAX_MEMORY_UDEREF if (X86 && !XEN)
79561 ++ select PAX_RANDKSTACK if (X86_TSC && !X86_64)
79562 ++ select PAX_SEGMEXEC if (X86_32)
79563 ++ select PAX_PAGEEXEC
79564 ++ select PAX_EMUPLT if (ALPHA || PARISC || SPARC32 || SPARC64)
79565 ++ select PAX_EMUTRAMP if (PARISC)
79566 ++ select PAX_EMUSIGRT if (PARISC)
79567 ++ select PAX_ETEXECRELOCS if (ALPHA || IA64 || PARISC)
79568 ++ select PAX_REFCOUNT if (X86 || SPARC64)
79569 ++ select PAX_USERCOPY if ((X86 || PPC || SPARC32 || SPARC64) && (SLAB || SLUB || SLOB))
79570 ++ select PAX_MEMORY_SANITIZE
79571 ++ help
79572 ++ If you say Y here, a configuration for grsecurity/PaX features
79573 ++ will be used that is endorsed by the Hardened Gentoo project.
79574 ++ These pre-defined security levels are designed to provide a high
79575 ++ level of security while minimizing incompatibilities with a majority
79576 ++ of Gentoo's available software.
79577 ++
79578 ++ This "Hardened Gentoo [virtualization]" level is identical to the
79579 ++ "Hardened Gentoo [workstation]" level, but with the PAX_KERNEXEC and
79580 ++ PAX_MEMORY_UDEREF defaulting to off. Accordingly, this is the preferred
79581 ++ security level if the system will be utilizing virtualization software
79582 ++ incompatible with these features, like VirtualBox or kvm.
79583 ++
79584 ++ When this level is selected, some security features will be forced on,
79585 ++ while others will default to their suggested values of off or on. The
79586 ++ later can be tweaked at the user's discretion, but may cause problems
79587 ++ in some situations. You can fully customize all grsecurity/PaX features
79588 ++ by choosing "Custom" in the Security Level menu. It may be helpful to
79589 ++ inherit the options selected by this security level as a starting point.
79590 ++ To accomplish this, select this security level, then exit the menuconfig
79591 ++ interface, saving changes when prompted. Run make menuconfig again and
79592 ++ select the "Custom" level.
79593 ++
79594 + config GRKERNSEC_CUSTOM
79595 + bool "Custom"
79596 + help
79597 +--- a/security/Kconfig 2011-09-21 07:20:02.000000000 -0400
79598 ++++ b/security/Kconfig 2011-09-21 07:25:50.000000000 -0400
79599 +@@ -322,9 +322,10 @@
79600 +
79601 + config PAX_KERNEXEC
79602 + bool "Enforce non-executable kernel pages"
79603 +- depends on (PPC || X86) && (!X86_32 || X86_WP_WORKS_OK) && !XEN
79604 ++ depends on (PPC || X86) && (!X86_32 || X86_WP_WORKS_OK) && !XEN && !GRKERNSEC_HARDENED_VIRTUALIZATION
79605 + select PAX_PER_CPU_PGD if X86_64 || (X86_32 && X86_PAE)
79606 + select PAX_KERNEXEC_PLUGIN if X86_64
79607 ++ default y if GRKERNSEC_HARDENED_WORKSTATION
79608 + help
79609 + This is the kernel land equivalent of PAGEEXEC and MPROTECT,
79610 + that is, enabling this option will make it harder to inject
79611 +@@ -487,8 +488,9 @@
79612 +
79613 + config PAX_MEMORY_UDEREF
79614 + bool "Prevent invalid userland pointer dereference"
79615 +- depends on X86 && !UML_X86 && !XEN
79616 ++ depends on X86 && !UML_X86 && !XEN && !GRKERNSEC_HARDENED_VIRTUALIZATION
79617 + select PAX_PER_CPU_PGD if X86_64
79618 ++ default y if GRKERNSEC_HARDENED_WORKSTATION
79619 + help
79620 + By saying Y here the kernel will be prevented from dereferencing
79621 + userland pointers in contexts where the kernel expects only kernel
79622 +
79623
79624 diff --git a/3.1.1/4510-grsec-kconfig-proc-user.patch b/3.1.1/4510-grsec-kconfig-proc-user.patch
79625 new file mode 100644
79626 index 0000000..c588683
79627 --- /dev/null
79628 +++ b/3.1.1/4510-grsec-kconfig-proc-user.patch
79629 @@ -0,0 +1,26 @@
79630 +From: Anthony G. Basile <blueness@g.o>
79631 +
79632 +Address the mutually exclusive options GRKERNSEC_PROC_USER and GRKERNSEC_PROC_USERGROUP
79633 +in a different way to avoid bug #366019. This patch should eventually go upstream.
79634 +
79635 +diff -Naur linux-2.6.39-hardened-r4.orig//grsecurity/Kconfig linux-2.6.39-hardened-r4/grsecurity/Kconfig
79636 +--- a/grsecurity/Kconfig 2011-06-29 10:02:56.000000000 -0400
79637 ++++ b/grsecurity/Kconfig 2011-06-29 10:08:07.000000000 -0400
79638 +@@ -666,7 +666,7 @@
79639 +
79640 + config GRKERNSEC_PROC_USER
79641 + bool "Restrict /proc to user only"
79642 +- depends on GRKERNSEC_PROC
79643 ++ depends on GRKERNSEC_PROC && !GRKERNSEC_PROC_USERGROUP
79644 + help
79645 + If you say Y here, non-root users will only be able to view their own
79646 + processes, and restricts them from viewing network-related information,
79647 +@@ -674,7 +674,7 @@
79648 +
79649 + config GRKERNSEC_PROC_USERGROUP
79650 + bool "Allow special group"
79651 +- depends on GRKERNSEC_PROC && !GRKERNSEC_PROC_USER
79652 ++ depends on GRKERNSEC_PROC
79653 + help
79654 + If you say Y here, you will be able to select a group that will be
79655 + able to view all processes and network-related information. If you've
79656
79657 diff --git a/3.1.1/4520_selinux-avc_audit-log-curr_ip.patch b/3.1.1/4520_selinux-avc_audit-log-curr_ip.patch
79658 new file mode 100644
79659 index 0000000..0fd5d2d
79660 --- /dev/null
79661 +++ b/3.1.1/4520_selinux-avc_audit-log-curr_ip.patch
79662 @@ -0,0 +1,73 @@
79663 +From: Anthony G. Basile <blueness@g.o>
79664 +
79665 +Removed deprecated NIPQUAD macro in favor of %pI4.
79666 +See bug #346333.
79667 +
79668 +---
79669 +From: Gordon Malm <gengor@g.o>
79670 +
79671 +This is a reworked version of the original
79672 +*_selinux-avc_audit-log-curr_ip.patch carried in earlier releases of
79673 +hardened-sources.
79674 +
79675 +Dropping the patch, or simply fixing the #ifdef of the original patch
79676 +could break automated logging setups so this route was necessary.
79677 +
79678 +Suggestions for improving the help text are welcome.
79679 +
79680 +The original patch's description is still accurate and included below.
79681 +
79682 +---
79683 +Provides support for a new field ipaddr within the SELinux
79684 +AVC audit log, relying in task_struct->curr_ip (ipv4 only)
79685 +provided by grSecurity patch to be applied before.
79686 +
79687 +Signed-off-by: Lorenzo Hernandez Garcia-Hierro <lorenzo@×××.org>
79688 +---
79689 +
79690 +diff -Naur linux-2.6.38-hardened-r1.orig/grsecurity/Kconfig linux-2.6.38-hardened-r1/grsecurity/Kconfig
79691 +--- linux-2.6.38-hardened-r1.orig/grsecurity/Kconfig 2011-04-17 19:25:54.000000000 -0400
79692 ++++ linux-2.6.38-hardened-r1/grsecurity/Kconfig 2011-04-17 19:32:53.000000000 -0400
79693 +@@ -1265,6 +1265,27 @@
79694 + menu "Logging Options"
79695 + depends on GRKERNSEC
79696 +
79697 ++config GRKERNSEC_SELINUX_AVC_LOG_IPADDR
79698 ++ def_bool n
79699 ++ prompt "Add source IP address to SELinux AVC log messages"
79700 ++ depends on GRKERNSEC && SECURITY_SELINUX
79701 ++ help
79702 ++ If you say Y here, a new field "ipaddr=" will be added to many SELinux
79703 ++ AVC log messages. The value of this field in any given message
79704 ++ represents the source IP address of the remote machine/user that created
79705 ++ the offending process.
79706 ++
79707 ++ This information is sourced from task_struct->curr_ip provided by
79708 ++ grsecurity's GRKERNSEC top-level configuration option. One limitation
79709 ++ is that only IPv4 is supported.
79710 ++
79711 ++ In many instances SELinux AVC log messages already log a superior level
79712 ++ of information that also includes source port and destination ip/port.
79713 ++ Additionally, SELinux's AVC log code supports IPv6.
79714 ++
79715 ++ However, grsecurity's task_struct->curr_ip will sometimes (often?)
79716 ++ provide the offender's IP address where stock SELinux logging fails to.
79717 ++
79718 + config GRKERNSEC_FLOODTIME
79719 + int "Seconds in between log messages (minimum)"
79720 + default 10
79721 +diff -Naur linux-2.6.38-hardened-r1.orig/security/selinux/avc.c linux-2.6.38-hardened-r1/security/selinux/avc.c
79722 +--- linux-2.6.38-hardened-r1.orig/security/selinux/avc.c 2011-04-17 19:04:47.000000000 -0400
79723 ++++ linux-2.6.38-hardened-r1/security/selinux/avc.c 2011-04-17 19:32:53.000000000 -0400
79724 +@@ -139,6 +139,11 @@
79725 + char *scontext;
79726 + u32 scontext_len;
79727 +
79728 ++#ifdef CONFIG_GRKERNSEC_SELINUX_AVC_LOG_IPADDR
79729 ++ if (current->signal->curr_ip)
79730 ++ audit_log_format(ab, "ipaddr=%pI4 ", &current->signal->curr_ip);
79731 ++#endif
79732 ++
79733 + rc = security_sid_to_context(ssid, &scontext, &scontext_len);
79734 + if (rc)
79735 + audit_log_format(ab, "ssid=%d", ssid);
79736
79737 diff --git a/3.1.1/4530_disable-compat_vdso.patch b/3.1.1/4530_disable-compat_vdso.patch
79738 new file mode 100644
79739 index 0000000..3b76b6c
79740 --- /dev/null
79741 +++ b/3.1.1/4530_disable-compat_vdso.patch
79742 @@ -0,0 +1,46 @@
79743 +No need to wrap vdso calls as gentoo does not use any version of
79744 +glibc <=2.3.3
79745 +---
79746 +From: Gordon Malm <gengor@g.o>
79747 +From: Kerin Millar <kerframil@×××××.com>
79748 +From: Jory A. Pratt <anarchy@g.o>
79749 +
79750 +COMPAT_VDSO is inappropriate for any modern Hardened Gentoo system. It
79751 +conflicts with various parts of PaX, crashing the system if enabled
79752 +while PaX's NOEXEC or UDEREF features are active. Moreover, it prevents
79753 +a number of important PaX options from appearing in the configuration
79754 +menu, including all PaX NOEXEC implementations. Unfortunately, the
79755 +reason for the disappearance of these PaX configuration options is
79756 +often far from obvious to inexperienced users.
79757 +
79758 +Therefore, we disable the COMPAT_VDSO menu entry entirely. However,
79759 +COMPAT_VDSO operation can still be enabled via bootparam and sysctl
79760 +interfaces. Consequently, we must also disable the ability to select
79761 +COMPAT_VDSO operation at boot or runtime. Here we patch the kernel so
79762 +that selecting COMPAT_VDSO operation at boot/runtime has no effect if
79763 +conflicting PaX options are enabled, leaving VDSO_ENABLED operation
79764 +intact.
79765 +
79766 +Closes bug: http://bugs.gentoo.org/show_bug.cgi?id=210138
79767 +
79768 +diff -urp a/arch/x86/Kconfig b/arch/x86/Kconfig
79769 +--- a/arch/x86/Kconfig 2009-07-31 01:36:57.323857684 +0100
79770 ++++ b/arch/x86/Kconfig 2009-07-31 01:51:39.395749681 +0100
79771 +@@ -1638,17 +1638,8 @@
79772 +
79773 + config COMPAT_VDSO
79774 + def_bool n
79775 +- prompt "Compat VDSO support"
79776 + depends on X86_32 || IA32_EMULATION
79777 + depends on !PAX_NOEXEC && !PAX_MEMORY_UDEREF
79778 +- ---help---
79779 +- Map the 32-bit VDSO to the predictable old-style address too.
79780 +-
79781 +- Say N here if you are running a sufficiently recent glibc
79782 +- version (2.3.3 or later), to remove the high-mapped
79783 +- VDSO mapping and to exclusively use the randomized VDSO.
79784 +-
79785 +- If unsure, say Y.
79786 +
79787 + config CMDLINE_BOOL
79788 + bool "Built-in kernel command line"