1 |
commit: 40c6b42e97cb6aca75832752d4ad3f28c1ebcf0c |
2 |
Author: Anthony G. Basile <blueness <AT> gentoo <DOT> org> |
3 |
AuthorDate: Sun Jun 22 16:04:26 2014 +0000 |
4 |
Commit: Anthony G. Basile <blueness <AT> gentoo <DOT> org> |
5 |
CommitDate: Sun Jun 22 16:04:26 2014 +0000 |
6 |
URL: http://git.overlays.gentoo.org/gitweb/?p=proj/hardened-patchset.git;a=commit;h=40c6b42e |
7 |
|
8 |
Grsec/PaX: 3.0-{3.2.60,3.14.8}-201406220132 |
9 |
|
10 |
--- |
11 |
3.14.8/0000_README | 2 +- |
12 |
... 4420_grsecurity-3.0-3.14.8-201406220132.patch} | 413 ++++++++++++++++----- |
13 |
3.2.60/0000_README | 2 +- |
14 |
... 4420_grsecurity-3.0-3.2.60-201406220130.patch} | 366 +++++++++++++----- |
15 |
4 files changed, 591 insertions(+), 192 deletions(-) |
16 |
|
17 |
diff --git a/3.14.8/0000_README b/3.14.8/0000_README |
18 |
index d9d0e9a..9ba5226 100644 |
19 |
--- a/3.14.8/0000_README |
20 |
+++ b/3.14.8/0000_README |
21 |
@@ -2,7 +2,7 @@ README |
22 |
----------------------------------------------------------------------------- |
23 |
Individual Patch Descriptions: |
24 |
----------------------------------------------------------------------------- |
25 |
-Patch: 4420_grsecurity-3.0-3.14.8-201406191347.patch |
26 |
+Patch: 4420_grsecurity-3.0-3.14.8-201406220132.patch |
27 |
From: http://www.grsecurity.net |
28 |
Desc: hardened-sources base patch from upstream grsecurity |
29 |
|
30 |
|
31 |
diff --git a/3.14.8/4420_grsecurity-3.0-3.14.8-201406191347.patch b/3.14.8/4420_grsecurity-3.0-3.14.8-201406220132.patch |
32 |
similarity index 99% |
33 |
rename from 3.14.8/4420_grsecurity-3.0-3.14.8-201406191347.patch |
34 |
rename to 3.14.8/4420_grsecurity-3.0-3.14.8-201406220132.patch |
35 |
index cf0e6f3..1e32908 100644 |
36 |
--- a/3.14.8/4420_grsecurity-3.0-3.14.8-201406191347.patch |
37 |
+++ b/3.14.8/4420_grsecurity-3.0-3.14.8-201406220132.patch |
38 |
@@ -17682,7 +17682,7 @@ index 86f9301..b365cda 100644 |
39 |
void unregister_nmi_handler(unsigned int, const char *); |
40 |
|
41 |
diff --git a/arch/x86/include/asm/page.h b/arch/x86/include/asm/page.h |
42 |
-index 775873d..de5f0304 100644 |
43 |
+index 775873d..04cd306 100644 |
44 |
--- a/arch/x86/include/asm/page.h |
45 |
+++ b/arch/x86/include/asm/page.h |
46 |
@@ -52,6 +52,7 @@ static inline void copy_user_page(void *to, void *from, unsigned long vaddr, |
47 |
@@ -17693,6 +17693,29 @@ index 775873d..de5f0304 100644 |
48 |
|
49 |
#define __boot_va(x) __va(x) |
50 |
#define __boot_pa(x) __pa(x) |
51 |
+@@ -60,11 +61,21 @@ static inline void copy_user_page(void *to, void *from, unsigned long vaddr, |
52 |
+ * virt_to_page(kaddr) returns a valid pointer if and only if |
53 |
+ * virt_addr_valid(kaddr) returns true. |
54 |
+ */ |
55 |
+-#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT) |
56 |
+ #define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT) |
57 |
+ extern bool __virt_addr_valid(unsigned long kaddr); |
58 |
+ #define virt_addr_valid(kaddr) __virt_addr_valid((unsigned long) (kaddr)) |
59 |
+ |
60 |
++#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW |
61 |
++#define virt_to_page(kaddr) \ |
62 |
++ ({ \ |
63 |
++ const void *__kaddr = (const void *)(kaddr); \ |
64 |
++ BUG_ON(!virt_addr_valid(__kaddr)); \ |
65 |
++ pfn_to_page(__pa(__kaddr) >> PAGE_SHIFT); \ |
66 |
++ }) |
67 |
++#else |
68 |
++#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT) |
69 |
++#endif |
70 |
++ |
71 |
+ #endif /* __ASSEMBLY__ */ |
72 |
+ |
73 |
+ #include <asm-generic/memory_model.h> |
74 |
diff --git a/arch/x86/include/asm/page_64.h b/arch/x86/include/asm/page_64.h |
75 |
index 0f1ddee..e2fc3d1 100644 |
76 |
--- a/arch/x86/include/asm/page_64.h |
77 |
@@ -82226,8 +82249,33 @@ index b66c211..13d2915 100644 |
78 |
|
79 |
static inline void anon_vma_merge(struct vm_area_struct *vma, |
80 |
struct vm_area_struct *next) |
81 |
+diff --git a/include/linux/scatterlist.h b/include/linux/scatterlist.h |
82 |
+index a964f72..b475afb 100644 |
83 |
+--- a/include/linux/scatterlist.h |
84 |
++++ b/include/linux/scatterlist.h |
85 |
+@@ -1,6 +1,7 @@ |
86 |
+ #ifndef _LINUX_SCATTERLIST_H |
87 |
+ #define _LINUX_SCATTERLIST_H |
88 |
+ |
89 |
++#include <linux/sched.h> |
90 |
+ #include <linux/string.h> |
91 |
+ #include <linux/bug.h> |
92 |
+ #include <linux/mm.h> |
93 |
+@@ -114,6 +115,12 @@ static inline void sg_set_buf(struct scatterlist *sg, const void *buf, |
94 |
+ #ifdef CONFIG_DEBUG_SG |
95 |
+ BUG_ON(!virt_addr_valid(buf)); |
96 |
+ #endif |
97 |
++#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW |
98 |
++ if (object_starts_on_stack(buf)) { |
99 |
++ void *adjbuf = buf - current->stack + current->lowmem_stack; |
100 |
++ sg_set_page(sg, virt_to_page(adjbuf), buflen, offset_in_page(adjbuf)); |
101 |
++ } else |
102 |
++#endif |
103 |
+ sg_set_page(sg, virt_to_page(buf), buflen, offset_in_page(buf)); |
104 |
+ } |
105 |
+ |
106 |
diff --git a/include/linux/sched.h b/include/linux/sched.h |
107 |
-index ccd0c6f..39c28a4 100644 |
108 |
+index ccd0c6f..84d9030 100644 |
109 |
--- a/include/linux/sched.h |
110 |
+++ b/include/linux/sched.h |
111 |
@@ -129,6 +129,7 @@ struct fs_struct; |
112 |
@@ -82318,7 +82366,17 @@ index ccd0c6f..39c28a4 100644 |
113 |
|
114 |
extern int uids_sysfs_init(void); |
115 |
|
116 |
-@@ -1286,8 +1319,8 @@ struct task_struct { |
117 |
+@@ -1164,6 +1197,9 @@ enum perf_event_task_context { |
118 |
+ struct task_struct { |
119 |
+ volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */ |
120 |
+ void *stack; |
121 |
++#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW |
122 |
++ void *lowmem_stack; |
123 |
++#endif |
124 |
+ atomic_t usage; |
125 |
+ unsigned int flags; /* per process flags, defined below */ |
126 |
+ unsigned int ptrace; |
127 |
+@@ -1286,8 +1322,8 @@ struct task_struct { |
128 |
struct list_head thread_node; |
129 |
|
130 |
struct completion *vfork_done; /* for vfork() */ |
131 |
@@ -82329,7 +82387,7 @@ index ccd0c6f..39c28a4 100644 |
132 |
|
133 |
cputime_t utime, stime, utimescaled, stimescaled; |
134 |
cputime_t gtime; |
135 |
-@@ -1312,11 +1345,6 @@ struct task_struct { |
136 |
+@@ -1312,11 +1348,6 @@ struct task_struct { |
137 |
struct task_cputime cputime_expires; |
138 |
struct list_head cpu_timers[3]; |
139 |
|
140 |
@@ -82341,7 +82399,7 @@ index ccd0c6f..39c28a4 100644 |
141 |
char comm[TASK_COMM_LEN]; /* executable name excluding path |
142 |
- access with [gs]et_task_comm (which lock |
143 |
it with task_lock()) |
144 |
-@@ -1333,6 +1361,10 @@ struct task_struct { |
145 |
+@@ -1333,6 +1364,10 @@ struct task_struct { |
146 |
#endif |
147 |
/* CPU-specific state of this task */ |
148 |
struct thread_struct thread; |
149 |
@@ -82352,7 +82410,7 @@ index ccd0c6f..39c28a4 100644 |
150 |
/* filesystem information */ |
151 |
struct fs_struct *fs; |
152 |
/* open file information */ |
153 |
-@@ -1409,6 +1441,10 @@ struct task_struct { |
154 |
+@@ -1409,6 +1444,10 @@ struct task_struct { |
155 |
gfp_t lockdep_reclaim_gfp; |
156 |
#endif |
157 |
|
158 |
@@ -82363,7 +82421,7 @@ index ccd0c6f..39c28a4 100644 |
159 |
/* journalling filesystem info */ |
160 |
void *journal_info; |
161 |
|
162 |
-@@ -1447,6 +1483,10 @@ struct task_struct { |
163 |
+@@ -1447,6 +1486,10 @@ struct task_struct { |
164 |
/* cg_list protected by css_set_lock and tsk->alloc_lock */ |
165 |
struct list_head cg_list; |
166 |
#endif |
167 |
@@ -82374,7 +82432,7 @@ index ccd0c6f..39c28a4 100644 |
168 |
#ifdef CONFIG_FUTEX |
169 |
struct robust_list_head __user *robust_list; |
170 |
#ifdef CONFIG_COMPAT |
171 |
-@@ -1581,7 +1621,78 @@ struct task_struct { |
172 |
+@@ -1581,7 +1624,78 @@ struct task_struct { |
173 |
unsigned int sequential_io; |
174 |
unsigned int sequential_io_avg; |
175 |
#endif |
176 |
@@ -82454,7 +82512,7 @@ index ccd0c6f..39c28a4 100644 |
177 |
|
178 |
/* Future-safe accessor for struct task_struct's cpus_allowed. */ |
179 |
#define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed) |
180 |
-@@ -1658,7 +1769,7 @@ struct pid_namespace; |
181 |
+@@ -1658,7 +1772,7 @@ struct pid_namespace; |
182 |
pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type, |
183 |
struct pid_namespace *ns); |
184 |
|
185 |
@@ -82463,7 +82521,7 @@ index ccd0c6f..39c28a4 100644 |
186 |
{ |
187 |
return tsk->pid; |
188 |
} |
189 |
-@@ -2006,6 +2117,25 @@ extern u64 sched_clock_cpu(int cpu); |
190 |
+@@ -2006,6 +2120,25 @@ extern u64 sched_clock_cpu(int cpu); |
191 |
|
192 |
extern void sched_clock_init(void); |
193 |
|
194 |
@@ -82489,7 +82547,7 @@ index ccd0c6f..39c28a4 100644 |
195 |
#ifndef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK |
196 |
static inline void sched_clock_tick(void) |
197 |
{ |
198 |
-@@ -2130,7 +2260,9 @@ void yield(void); |
199 |
+@@ -2130,7 +2263,9 @@ void yield(void); |
200 |
extern struct exec_domain default_exec_domain; |
201 |
|
202 |
union thread_union { |
203 |
@@ -82499,7 +82557,7 @@ index ccd0c6f..39c28a4 100644 |
204 |
unsigned long stack[THREAD_SIZE/sizeof(long)]; |
205 |
}; |
206 |
|
207 |
-@@ -2163,6 +2295,7 @@ extern struct pid_namespace init_pid_ns; |
208 |
+@@ -2163,6 +2298,7 @@ extern struct pid_namespace init_pid_ns; |
209 |
*/ |
210 |
|
211 |
extern struct task_struct *find_task_by_vpid(pid_t nr); |
212 |
@@ -82507,7 +82565,7 @@ index ccd0c6f..39c28a4 100644 |
213 |
extern struct task_struct *find_task_by_pid_ns(pid_t nr, |
214 |
struct pid_namespace *ns); |
215 |
|
216 |
-@@ -2325,7 +2458,7 @@ extern void __cleanup_sighand(struct sighand_struct *); |
217 |
+@@ -2325,7 +2461,7 @@ extern void __cleanup_sighand(struct sighand_struct *); |
218 |
extern void exit_itimers(struct signal_struct *); |
219 |
extern void flush_itimer_signals(void); |
220 |
|
221 |
@@ -82516,12 +82574,12 @@ index ccd0c6f..39c28a4 100644 |
222 |
|
223 |
extern int allow_signal(int); |
224 |
extern int disallow_signal(int); |
225 |
-@@ -2526,9 +2659,9 @@ static inline unsigned long *end_of_stack(struct task_struct *p) |
226 |
+@@ -2526,9 +2662,9 @@ static inline unsigned long *end_of_stack(struct task_struct *p) |
227 |
|
228 |
#endif |
229 |
|
230 |
-static inline int object_is_on_stack(void *obj) |
231 |
-+static inline int object_starts_on_stack(void *obj) |
232 |
++static inline int object_starts_on_stack(const void *obj) |
233 |
{ |
234 |
- void *stack = task_stack_page(current); |
235 |
+ const void *stack = task_stack_page(current); |
236 |
@@ -83470,7 +83528,7 @@ index 502073a..a7de024 100644 |
237 |
#endif |
238 |
#endif /* _LINUX_VGA_SWITCHEROO_H_ */ |
239 |
diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h |
240 |
-index 4b8a891..05f2361 100644 |
241 |
+index 4b8a891..e9a2863 100644 |
242 |
--- a/include/linux/vmalloc.h |
243 |
+++ b/include/linux/vmalloc.h |
244 |
@@ -16,6 +16,11 @@ struct vm_area_struct; /* vma defining user mapping in mm_types.h */ |
245 |
@@ -83485,15 +83543,18 @@ index 4b8a891..05f2361 100644 |
246 |
/* bits [20..32] reserved for arch specific ioremap internals */ |
247 |
|
248 |
/* |
249 |
-@@ -72,6 +77,7 @@ extern void *vzalloc_node(unsigned long size, int node); |
250 |
- extern void *vmalloc_exec(unsigned long size); |
251 |
- extern void *vmalloc_32(unsigned long size); |
252 |
- extern void *vmalloc_32_user(unsigned long size); |
253 |
-+extern void *vmalloc_stack(int node); |
254 |
- extern void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot); |
255 |
- extern void *__vmalloc_node_range(unsigned long size, unsigned long align, |
256 |
- unsigned long start, unsigned long end, gfp_t gfp_mask, |
257 |
-@@ -142,7 +148,7 @@ extern void free_vm_area(struct vm_struct *area); |
258 |
+@@ -82,6 +87,10 @@ extern void *vmap(struct page **pages, unsigned int count, |
259 |
+ unsigned long flags, pgprot_t prot); |
260 |
+ extern void vunmap(const void *addr); |
261 |
+ |
262 |
++#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW |
263 |
++extern void unmap_process_stacks(struct task_struct *task); |
264 |
++#endif |
265 |
++ |
266 |
+ extern int remap_vmalloc_range_partial(struct vm_area_struct *vma, |
267 |
+ unsigned long uaddr, void *kaddr, |
268 |
+ unsigned long size); |
269 |
+@@ -142,7 +151,7 @@ extern void free_vm_area(struct vm_struct *area); |
270 |
|
271 |
/* for /dev/kmem */ |
272 |
extern long vread(char *buf, char *addr, unsigned long count); |
273 |
@@ -86582,49 +86643,112 @@ index 81b3d67..ef189a4 100644 |
274 |
{ |
275 |
struct signal_struct *sig = current->signal; |
276 |
diff --git a/kernel/fork.c b/kernel/fork.c |
277 |
-index a17621c..d9e4b37 100644 |
278 |
+index a17621c..2a89549 100644 |
279 |
--- a/kernel/fork.c |
280 |
+++ b/kernel/fork.c |
281 |
-@@ -137,6 +137,18 @@ void __weak arch_release_thread_info(struct thread_info *ti) |
282 |
- { |
283 |
- } |
284 |
+@@ -180,6 +180,48 @@ void thread_info_cache_init(void) |
285 |
+ # endif |
286 |
+ #endif |
287 |
|
288 |
+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW |
289 |
-+static struct thread_info *alloc_thread_info_node(struct task_struct *tsk, |
290 |
-+ int node) |
291 |
++static inline struct thread_info *gr_alloc_thread_info_node(struct task_struct *tsk, |
292 |
++ int node, void **lowmem_stack) |
293 |
+{ |
294 |
-+ return vmalloc_stack(node); |
295 |
++ struct page *pages[THREAD_SIZE / PAGE_SIZE]; |
296 |
++ void *ret = NULL; |
297 |
++ unsigned int i; |
298 |
++ |
299 |
++ *lowmem_stack = alloc_thread_info_node(tsk, node); |
300 |
++ if (*lowmem_stack == NULL) |
301 |
++ goto out; |
302 |
++ |
303 |
++ for (i = 0; i < THREAD_SIZE / PAGE_SIZE; i++) |
304 |
++ pages[i] = virt_to_page(*lowmem_stack + (i * PAGE_SIZE)); |
305 |
++ |
306 |
++ /* use VM_IOREMAP to gain THREAD_SIZE alignment */ |
307 |
++ ret = vmap(pages, THREAD_SIZE / PAGE_SIZE, VM_IOREMAP, PAGE_KERNEL); |
308 |
++ if (ret == NULL) { |
309 |
++ free_thread_info(*lowmem_stack); |
310 |
++ *lowmem_stack = NULL; |
311 |
++ } |
312 |
++ |
313 |
++out: |
314 |
++ return ret; |
315 |
+} |
316 |
+ |
317 |
-+static inline void free_thread_info(struct thread_info *ti) |
318 |
++static inline void gr_free_thread_info(struct task_struct *tsk, struct thread_info *ti) |
319 |
+{ |
320 |
-+ vfree(ti); |
321 |
++ unmap_process_stacks(tsk); |
322 |
+} |
323 |
+#else |
324 |
- #ifndef CONFIG_ARCH_THREAD_INFO_ALLOCATOR |
325 |
- |
326 |
- /* |
327 |
-@@ -179,6 +191,7 @@ void thread_info_cache_init(void) |
328 |
- } |
329 |
- # endif |
330 |
- #endif |
331 |
++static inline struct thread_info *gr_alloc_thread_info_node(struct task_struct *tsk, |
332 |
++ int node, void **lowmem_stack) |
333 |
++{ |
334 |
++ return alloc_thread_info_node(tsk, node); |
335 |
++} |
336 |
++static inline void gr_free_thread_info(struct task_struct *tsk, struct thread_info *ti) |
337 |
++{ |
338 |
++ free_thread_info(ti); |
339 |
++} |
340 |
+#endif |
341 |
- |
342 |
++ |
343 |
/* SLAB cache for signal_struct structures (tsk->signal) */ |
344 |
static struct kmem_cache *signal_cachep; |
345 |
-@@ -200,9 +213,11 @@ static struct kmem_cache *mm_cachep; |
346 |
|
347 |
- static void account_kernel_stack(struct thread_info *ti, int account) |
348 |
+@@ -198,18 +240,22 @@ struct kmem_cache *vm_area_cachep; |
349 |
+ /* SLAB cache for mm_struct structures (tsk->mm) */ |
350 |
+ static struct kmem_cache *mm_cachep; |
351 |
+ |
352 |
+-static void account_kernel_stack(struct thread_info *ti, int account) |
353 |
++static void account_kernel_stack(struct task_struct *tsk, struct thread_info *ti, int account) |
354 |
{ |
355 |
-+#ifndef CONFIG_GRKERNSEC_KSTACKOVERFLOW |
356 |
++#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW |
357 |
++ struct zone *zone = page_zone(virt_to_page(tsk->lowmem_stack)); |
358 |
++#else |
359 |
struct zone *zone = page_zone(virt_to_page(ti)); |
360 |
++#endif |
361 |
|
362 |
mod_zone_page_state(zone, NR_KERNEL_STACK, account); |
363 |
-+#endif |
364 |
} |
365 |
|
366 |
void free_task(struct task_struct *tsk) |
367 |
-@@ -319,7 +334,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig) |
368 |
+ { |
369 |
+- account_kernel_stack(tsk->stack, -1); |
370 |
++ account_kernel_stack(tsk, tsk->stack, -1); |
371 |
+ arch_release_thread_info(tsk->stack); |
372 |
+- free_thread_info(tsk->stack); |
373 |
++ gr_free_thread_info(tsk, tsk->stack); |
374 |
+ rt_mutex_debug_task_free(tsk); |
375 |
+ ftrace_graph_exit_task(tsk); |
376 |
+ put_seccomp_filter(tsk); |
377 |
+@@ -295,6 +341,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig) |
378 |
+ struct task_struct *tsk; |
379 |
+ struct thread_info *ti; |
380 |
+ unsigned long *stackend; |
381 |
++ void *lowmem_stack; |
382 |
+ int node = tsk_fork_get_node(orig); |
383 |
+ int err; |
384 |
+ |
385 |
+@@ -302,7 +349,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig) |
386 |
+ if (!tsk) |
387 |
+ return NULL; |
388 |
+ |
389 |
+- ti = alloc_thread_info_node(tsk, node); |
390 |
++ ti = gr_alloc_thread_info_node(tsk, node, &lowmem_stack); |
391 |
+ if (!ti) |
392 |
+ goto free_tsk; |
393 |
+ |
394 |
+@@ -311,6 +358,9 @@ static struct task_struct *dup_task_struct(struct task_struct *orig) |
395 |
+ goto free_ti; |
396 |
+ |
397 |
+ tsk->stack = ti; |
398 |
++#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW |
399 |
++ tsk->lowmem_stack = lowmem_stack; |
400 |
++#endif |
401 |
+ |
402 |
+ setup_thread_stack(tsk, orig); |
403 |
+ clear_user_return_notifier(tsk); |
404 |
+@@ -319,7 +369,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig) |
405 |
*stackend = STACK_END_MAGIC; /* for overflow detection */ |
406 |
|
407 |
#ifdef CONFIG_CC_STACKPROTECTOR |
408 |
@@ -86633,7 +86757,21 @@ index a17621c..d9e4b37 100644 |
409 |
#endif |
410 |
|
411 |
/* |
412 |
-@@ -345,12 +360,80 @@ free_tsk: |
413 |
+@@ -333,24 +383,92 @@ static struct task_struct *dup_task_struct(struct task_struct *orig) |
414 |
+ tsk->splice_pipe = NULL; |
415 |
+ tsk->task_frag.page = NULL; |
416 |
+ |
417 |
+- account_kernel_stack(ti, 1); |
418 |
++ account_kernel_stack(tsk, ti, 1); |
419 |
+ |
420 |
+ return tsk; |
421 |
+ |
422 |
+ free_ti: |
423 |
+- free_thread_info(ti); |
424 |
++ gr_free_thread_info(tsk, ti); |
425 |
+ free_tsk: |
426 |
+ free_task_struct(tsk); |
427 |
+ return NULL; |
428 |
} |
429 |
|
430 |
#ifdef CONFIG_MMU |
431 |
@@ -86716,7 +86854,7 @@ index a17621c..d9e4b37 100644 |
432 |
|
433 |
uprobe_start_dup_mmap(); |
434 |
down_write(&oldmm->mmap_sem); |
435 |
-@@ -379,55 +462,15 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm) |
436 |
+@@ -379,55 +497,15 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm) |
437 |
|
438 |
prev = NULL; |
439 |
for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) { |
440 |
@@ -86776,7 +86914,7 @@ index a17621c..d9e4b37 100644 |
441 |
} |
442 |
|
443 |
/* |
444 |
-@@ -459,6 +502,31 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm) |
445 |
+@@ -459,6 +537,31 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm) |
446 |
if (retval) |
447 |
goto out; |
448 |
} |
449 |
@@ -86808,7 +86946,7 @@ index a17621c..d9e4b37 100644 |
450 |
/* a new mm has just been created */ |
451 |
arch_dup_mmap(oldmm, mm); |
452 |
retval = 0; |
453 |
-@@ -468,14 +536,6 @@ out: |
454 |
+@@ -468,14 +571,6 @@ out: |
455 |
up_write(&oldmm->mmap_sem); |
456 |
uprobe_end_dup_mmap(); |
457 |
return retval; |
458 |
@@ -86823,7 +86961,7 @@ index a17621c..d9e4b37 100644 |
459 |
} |
460 |
|
461 |
static inline int mm_alloc_pgd(struct mm_struct *mm) |
462 |
-@@ -689,8 +749,8 @@ struct mm_struct *mm_access(struct task_struct *task, unsigned int mode) |
463 |
+@@ -689,8 +784,8 @@ struct mm_struct *mm_access(struct task_struct *task, unsigned int mode) |
464 |
return ERR_PTR(err); |
465 |
|
466 |
mm = get_task_mm(task); |
467 |
@@ -86834,7 +86972,7 @@ index a17621c..d9e4b37 100644 |
468 |
mmput(mm); |
469 |
mm = ERR_PTR(-EACCES); |
470 |
} |
471 |
-@@ -906,13 +966,20 @@ static int copy_fs(unsigned long clone_flags, struct task_struct *tsk) |
472 |
+@@ -906,13 +1001,20 @@ static int copy_fs(unsigned long clone_flags, struct task_struct *tsk) |
473 |
spin_unlock(&fs->lock); |
474 |
return -EAGAIN; |
475 |
} |
476 |
@@ -86856,7 +86994,7 @@ index a17621c..d9e4b37 100644 |
477 |
return 0; |
478 |
} |
479 |
|
480 |
-@@ -1130,7 +1197,7 @@ init_task_pid(struct task_struct *task, enum pid_type type, struct pid *pid) |
481 |
+@@ -1130,7 +1232,7 @@ init_task_pid(struct task_struct *task, enum pid_type type, struct pid *pid) |
482 |
* parts of the process environment (as per the clone |
483 |
* flags). The actual kick-off is left to the caller. |
484 |
*/ |
485 |
@@ -86865,7 +87003,7 @@ index a17621c..d9e4b37 100644 |
486 |
unsigned long stack_start, |
487 |
unsigned long stack_size, |
488 |
int __user *child_tidptr, |
489 |
-@@ -1202,6 +1269,9 @@ static struct task_struct *copy_process(unsigned long clone_flags, |
490 |
+@@ -1202,6 +1304,9 @@ static struct task_struct *copy_process(unsigned long clone_flags, |
491 |
DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled); |
492 |
#endif |
493 |
retval = -EAGAIN; |
494 |
@@ -86875,7 +87013,7 @@ index a17621c..d9e4b37 100644 |
495 |
if (atomic_read(&p->real_cred->user->processes) >= |
496 |
task_rlimit(p, RLIMIT_NPROC)) { |
497 |
if (p->real_cred->user != INIT_USER && |
498 |
-@@ -1449,6 +1519,11 @@ static struct task_struct *copy_process(unsigned long clone_flags, |
499 |
+@@ -1449,6 +1554,11 @@ static struct task_struct *copy_process(unsigned long clone_flags, |
500 |
goto bad_fork_free_pid; |
501 |
} |
502 |
|
503 |
@@ -86887,7 +87025,7 @@ index a17621c..d9e4b37 100644 |
504 |
if (likely(p->pid)) { |
505 |
ptrace_init_task(p, (clone_flags & CLONE_PTRACE) || trace); |
506 |
|
507 |
-@@ -1537,6 +1612,8 @@ bad_fork_cleanup_count: |
508 |
+@@ -1537,6 +1647,8 @@ bad_fork_cleanup_count: |
509 |
bad_fork_free: |
510 |
free_task(p); |
511 |
fork_out: |
512 |
@@ -86896,7 +87034,7 @@ index a17621c..d9e4b37 100644 |
513 |
return ERR_PTR(retval); |
514 |
} |
515 |
|
516 |
-@@ -1598,6 +1675,7 @@ long do_fork(unsigned long clone_flags, |
517 |
+@@ -1598,6 +1710,7 @@ long do_fork(unsigned long clone_flags, |
518 |
|
519 |
p = copy_process(clone_flags, stack_start, stack_size, |
520 |
child_tidptr, NULL, trace); |
521 |
@@ -86904,7 +87042,7 @@ index a17621c..d9e4b37 100644 |
522 |
/* |
523 |
* Do this prior waking up the new thread - the thread pointer |
524 |
* might get invalid after that point, if the thread exits quickly. |
525 |
-@@ -1612,6 +1690,8 @@ long do_fork(unsigned long clone_flags, |
526 |
+@@ -1612,6 +1725,8 @@ long do_fork(unsigned long clone_flags, |
527 |
if (clone_flags & CLONE_PARENT_SETTID) |
528 |
put_user(nr, parent_tidptr); |
529 |
|
530 |
@@ -86913,7 +87051,7 @@ index a17621c..d9e4b37 100644 |
531 |
if (clone_flags & CLONE_VFORK) { |
532 |
p->vfork_done = &vfork; |
533 |
init_completion(&vfork); |
534 |
-@@ -1728,7 +1808,7 @@ void __init proc_caches_init(void) |
535 |
+@@ -1728,7 +1843,7 @@ void __init proc_caches_init(void) |
536 |
mm_cachep = kmem_cache_create("mm_struct", |
537 |
sizeof(struct mm_struct), ARCH_MIN_MMSTRUCT_ALIGN, |
538 |
SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK, NULL); |
539 |
@@ -86922,7 +87060,7 @@ index a17621c..d9e4b37 100644 |
540 |
mmap_init(); |
541 |
nsproxy_cache_init(); |
542 |
} |
543 |
-@@ -1768,7 +1848,7 @@ static int unshare_fs(unsigned long unshare_flags, struct fs_struct **new_fsp) |
544 |
+@@ -1768,7 +1883,7 @@ static int unshare_fs(unsigned long unshare_flags, struct fs_struct **new_fsp) |
545 |
return 0; |
546 |
|
547 |
/* don't need lock here; in the worst case we'll do useless copy */ |
548 |
@@ -86931,7 +87069,7 @@ index a17621c..d9e4b37 100644 |
549 |
return 0; |
550 |
|
551 |
*new_fsp = copy_fs_struct(fs); |
552 |
-@@ -1875,7 +1955,8 @@ SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags) |
553 |
+@@ -1875,7 +1990,8 @@ SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags) |
554 |
fs = current->fs; |
555 |
spin_lock(&fs->lock); |
556 |
current->fs = new_fs; |
557 |
@@ -97107,10 +97245,65 @@ index a24aa22..a0d41ae 100644 |
558 |
} |
559 |
#endif |
560 |
diff --git a/mm/vmalloc.c b/mm/vmalloc.c |
561 |
-index 0fdf968..2183ba3 100644 |
562 |
+index 0fdf968..f044efb 100644 |
563 |
--- a/mm/vmalloc.c |
564 |
+++ b/mm/vmalloc.c |
565 |
-@@ -59,8 +59,19 @@ static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end) |
566 |
+@@ -38,6 +38,21 @@ struct vfree_deferred { |
567 |
+ }; |
568 |
+ static DEFINE_PER_CPU(struct vfree_deferred, vfree_deferred); |
569 |
+ |
570 |
++#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW |
571 |
++struct stack_deferred_llist { |
572 |
++ struct llist_head list; |
573 |
++ void *stack; |
574 |
++ void *lowmem_stack; |
575 |
++}; |
576 |
++ |
577 |
++struct stack_deferred { |
578 |
++ struct stack_deferred_llist list; |
579 |
++ struct work_struct wq; |
580 |
++}; |
581 |
++ |
582 |
++static DEFINE_PER_CPU(struct stack_deferred, stack_deferred); |
583 |
++#endif |
584 |
++ |
585 |
+ static void __vunmap(const void *, int); |
586 |
+ |
587 |
+ static void free_work(struct work_struct *w) |
588 |
+@@ -45,12 +60,30 @@ static void free_work(struct work_struct *w) |
589 |
+ struct vfree_deferred *p = container_of(w, struct vfree_deferred, wq); |
590 |
+ struct llist_node *llnode = llist_del_all(&p->list); |
591 |
+ while (llnode) { |
592 |
+- void *p = llnode; |
593 |
++ void *x = llnode; |
594 |
+ llnode = llist_next(llnode); |
595 |
+- __vunmap(p, 1); |
596 |
++ __vunmap(x, 1); |
597 |
+ } |
598 |
+ } |
599 |
+ |
600 |
++#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW |
601 |
++static void unmap_work(struct work_struct *w) |
602 |
++{ |
603 |
++ struct stack_deferred *p = container_of(w, struct stack_deferred, wq); |
604 |
++ struct llist_node *llnode = llist_del_all(&p->list.list); |
605 |
++ while (llnode) { |
606 |
++ struct stack_deferred_llist *x = |
607 |
++ llist_entry((struct llist_head *)llnode, |
608 |
++ struct stack_deferred_llist, list); |
609 |
++ void *stack = ACCESS_ONCE(x->stack); |
610 |
++ void *lowmem_stack = ACCESS_ONCE(x->lowmem_stack); |
611 |
++ llnode = llist_next(llnode); |
612 |
++ __vunmap(stack, 0); |
613 |
++ free_memcg_kmem_pages((unsigned long)lowmem_stack, THREAD_SIZE_ORDER); |
614 |
++ } |
615 |
++} |
616 |
++#endif |
617 |
++ |
618 |
+ /*** Page table manipulation functions ***/ |
619 |
+ |
620 |
+ static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end) |
621 |
+@@ -59,8 +92,19 @@ static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end) |
622 |
|
623 |
pte = pte_offset_kernel(pmd, addr); |
624 |
do { |
625 |
@@ -97132,7 +97325,7 @@ index 0fdf968..2183ba3 100644 |
626 |
} while (pte++, addr += PAGE_SIZE, addr != end); |
627 |
} |
628 |
|
629 |
-@@ -120,16 +131,29 @@ static int vmap_pte_range(pmd_t *pmd, unsigned long addr, |
630 |
+@@ -120,16 +164,29 @@ static int vmap_pte_range(pmd_t *pmd, unsigned long addr, |
631 |
pte = pte_alloc_kernel(pmd, addr); |
632 |
if (!pte) |
633 |
return -ENOMEM; |
634 |
@@ -97164,7 +97357,7 @@ index 0fdf968..2183ba3 100644 |
635 |
return 0; |
636 |
} |
637 |
|
638 |
-@@ -139,7 +163,7 @@ static int vmap_pmd_range(pud_t *pud, unsigned long addr, |
639 |
+@@ -139,7 +196,7 @@ static int vmap_pmd_range(pud_t *pud, unsigned long addr, |
640 |
pmd_t *pmd; |
641 |
unsigned long next; |
642 |
|
643 |
@@ -97173,7 +97366,7 @@ index 0fdf968..2183ba3 100644 |
644 |
if (!pmd) |
645 |
return -ENOMEM; |
646 |
do { |
647 |
-@@ -156,7 +180,7 @@ static int vmap_pud_range(pgd_t *pgd, unsigned long addr, |
648 |
+@@ -156,7 +213,7 @@ static int vmap_pud_range(pgd_t *pgd, unsigned long addr, |
649 |
pud_t *pud; |
650 |
unsigned long next; |
651 |
|
652 |
@@ -97182,7 +97375,7 @@ index 0fdf968..2183ba3 100644 |
653 |
if (!pud) |
654 |
return -ENOMEM; |
655 |
do { |
656 |
-@@ -216,6 +240,12 @@ int is_vmalloc_or_module_addr(const void *x) |
657 |
+@@ -216,6 +273,12 @@ int is_vmalloc_or_module_addr(const void *x) |
658 |
if (addr >= MODULES_VADDR && addr < MODULES_END) |
659 |
return 1; |
660 |
#endif |
661 |
@@ -97195,7 +97388,7 @@ index 0fdf968..2183ba3 100644 |
662 |
return is_vmalloc_addr(x); |
663 |
} |
664 |
|
665 |
-@@ -236,8 +266,14 @@ struct page *vmalloc_to_page(const void *vmalloc_addr) |
666 |
+@@ -236,8 +299,14 @@ struct page *vmalloc_to_page(const void *vmalloc_addr) |
667 |
|
668 |
if (!pgd_none(*pgd)) { |
669 |
pud_t *pud = pud_offset(pgd, addr); |
670 |
@@ -97210,7 +97403,31 @@ index 0fdf968..2183ba3 100644 |
671 |
if (!pmd_none(*pmd)) { |
672 |
pte_t *ptep, pte; |
673 |
|
674 |
-@@ -1309,6 +1345,16 @@ static struct vm_struct *__get_vm_area_node(unsigned long size, |
675 |
+@@ -1175,13 +1244,23 @@ void __init vmalloc_init(void) |
676 |
+ for_each_possible_cpu(i) { |
677 |
+ struct vmap_block_queue *vbq; |
678 |
+ struct vfree_deferred *p; |
679 |
++#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW |
680 |
++ struct stack_deferred *p2; |
681 |
++#endif |
682 |
+ |
683 |
+ vbq = &per_cpu(vmap_block_queue, i); |
684 |
+ spin_lock_init(&vbq->lock); |
685 |
+ INIT_LIST_HEAD(&vbq->free); |
686 |
++ |
687 |
+ p = &per_cpu(vfree_deferred, i); |
688 |
+ init_llist_head(&p->list); |
689 |
+ INIT_WORK(&p->wq, free_work); |
690 |
++ |
691 |
++#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW |
692 |
++ p2 = &per_cpu(stack_deferred, i); |
693 |
++ init_llist_head(&p2->list.list); |
694 |
++ INIT_WORK(&p2->wq, unmap_work); |
695 |
++#endif |
696 |
+ } |
697 |
+ |
698 |
+ /* Import existing vmlist entries. */ |
699 |
+@@ -1309,6 +1388,16 @@ static struct vm_struct *__get_vm_area_node(unsigned long size, |
700 |
struct vm_struct *area; |
701 |
|
702 |
BUG_ON(in_interrupt()); |
703 |
@@ -97227,7 +97444,40 @@ index 0fdf968..2183ba3 100644 |
704 |
if (flags & VM_IOREMAP) |
705 |
align = 1ul << clamp(fls(size), PAGE_SHIFT, IOREMAP_MAX_ORDER); |
706 |
|
707 |
-@@ -1534,6 +1580,11 @@ void *vmap(struct page **pages, unsigned int count, |
708 |
+@@ -1503,7 +1592,7 @@ EXPORT_SYMBOL(vfree); |
709 |
+ * Free the virtually contiguous memory area starting at @addr, |
710 |
+ * which was created from the page array passed to vmap(). |
711 |
+ * |
712 |
+- * Must not be called in interrupt context. |
713 |
++ * Must not be called in NMI context. |
714 |
+ */ |
715 |
+ void vunmap(const void *addr) |
716 |
+ { |
717 |
+@@ -1514,6 +1603,23 @@ void vunmap(const void *addr) |
718 |
+ } |
719 |
+ EXPORT_SYMBOL(vunmap); |
720 |
+ |
721 |
++#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW |
722 |
++void unmap_process_stacks(struct task_struct *task) |
723 |
++{ |
724 |
++ if (unlikely(in_interrupt())) { |
725 |
++ struct stack_deferred *p = &__get_cpu_var(stack_deferred); |
726 |
++ struct stack_deferred_llist *list = task->stack; |
727 |
++ list->stack = task->stack; |
728 |
++ list->lowmem_stack = task->lowmem_stack; |
729 |
++ if (llist_add((struct llist_node *)&list->list, &p->list.list)) |
730 |
++ schedule_work(&p->wq); |
731 |
++ } else { |
732 |
++ __vunmap(task->stack, 0); |
733 |
++ free_memcg_kmem_pages((unsigned long)task->lowmem_stack, THREAD_SIZE_ORDER); |
734 |
++ } |
735 |
++} |
736 |
++#endif |
737 |
++ |
738 |
+ /** |
739 |
+ * vmap - map an array of pages into virtually contiguous space |
740 |
+ * @pages: array of page pointers |
741 |
+@@ -1534,6 +1640,11 @@ void *vmap(struct page **pages, unsigned int count, |
742 |
if (count > totalram_pages) |
743 |
return NULL; |
744 |
|
745 |
@@ -97239,7 +97489,7 @@ index 0fdf968..2183ba3 100644 |
746 |
area = get_vm_area_caller((count << PAGE_SHIFT), flags, |
747 |
__builtin_return_address(0)); |
748 |
if (!area) |
749 |
-@@ -1634,6 +1685,13 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align, |
750 |
+@@ -1634,6 +1745,13 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align, |
751 |
if (!size || (size >> PAGE_SHIFT) > totalram_pages) |
752 |
goto fail; |
753 |
|
754 |
@@ -97253,20 +97503,7 @@ index 0fdf968..2183ba3 100644 |
755 |
area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNINITIALIZED, |
756 |
start, end, node, gfp_mask, caller); |
757 |
if (!area) |
758 |
-@@ -1701,6 +1759,12 @@ static inline void *__vmalloc_node_flags(unsigned long size, |
759 |
- node, __builtin_return_address(0)); |
760 |
- } |
761 |
- |
762 |
-+void *vmalloc_stack(int node) |
763 |
-+{ |
764 |
-+ return __vmalloc_node(THREAD_SIZE, THREAD_SIZE, THREADINFO_GFP, PAGE_KERNEL, |
765 |
-+ node, __builtin_return_address(0)); |
766 |
-+} |
767 |
-+ |
768 |
- /** |
769 |
- * vmalloc - allocate virtually contiguous memory |
770 |
- * @size: allocation size |
771 |
-@@ -1810,10 +1874,9 @@ EXPORT_SYMBOL(vzalloc_node); |
772 |
+@@ -1810,10 +1928,9 @@ EXPORT_SYMBOL(vzalloc_node); |
773 |
* For tight control over page level allocator and protection flags |
774 |
* use __vmalloc() instead. |
775 |
*/ |
776 |
@@ -97278,7 +97515,7 @@ index 0fdf968..2183ba3 100644 |
777 |
NUMA_NO_NODE, __builtin_return_address(0)); |
778 |
} |
779 |
|
780 |
-@@ -2120,6 +2183,8 @@ int remap_vmalloc_range_partial(struct vm_area_struct *vma, unsigned long uaddr, |
781 |
+@@ -2120,6 +2237,8 @@ int remap_vmalloc_range_partial(struct vm_area_struct *vma, unsigned long uaddr, |
782 |
{ |
783 |
struct vm_struct *area; |
784 |
|
785 |
@@ -97287,7 +97524,7 @@ index 0fdf968..2183ba3 100644 |
786 |
size = PAGE_ALIGN(size); |
787 |
|
788 |
if (!PAGE_ALIGNED(uaddr) || !PAGE_ALIGNED(kaddr)) |
789 |
-@@ -2602,7 +2667,11 @@ static int s_show(struct seq_file *m, void *p) |
790 |
+@@ -2602,7 +2721,11 @@ static int s_show(struct seq_file *m, void *p) |
791 |
v->addr, v->addr + v->size, v->size); |
792 |
|
793 |
if (v->caller) |
794 |
|
795 |
diff --git a/3.2.60/0000_README b/3.2.60/0000_README |
796 |
index b5b1f29..e364d06 100644 |
797 |
--- a/3.2.60/0000_README |
798 |
+++ b/3.2.60/0000_README |
799 |
@@ -158,7 +158,7 @@ Patch: 1059_linux-3.2.60.patch |
800 |
From: http://www.kernel.org |
801 |
Desc: Linux 3.2.60 |
802 |
|
803 |
-Patch: 4420_grsecurity-3.0-3.2.60-201406191345.patch |
804 |
+Patch: 4420_grsecurity-3.0-3.2.60-201406220130.patch |
805 |
From: http://www.grsecurity.net |
806 |
Desc: hardened-sources base patch from upstream grsecurity |
807 |
|
808 |
|
809 |
diff --git a/3.2.60/4420_grsecurity-3.0-3.2.60-201406191345.patch b/3.2.60/4420_grsecurity-3.0-3.2.60-201406220130.patch |
810 |
similarity index 99% |
811 |
rename from 3.2.60/4420_grsecurity-3.0-3.2.60-201406191345.patch |
812 |
rename to 3.2.60/4420_grsecurity-3.0-3.2.60-201406220130.patch |
813 |
index 9f3ccfb..d3c1096 100644 |
814 |
--- a/3.2.60/4420_grsecurity-3.0-3.2.60-201406191345.patch |
815 |
+++ b/3.2.60/4420_grsecurity-3.0-3.2.60-201406220130.patch |
816 |
@@ -14240,6 +14240,33 @@ index 9eae775..c914fea 100644 |
817 |
+#define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_PAX_KERNEXEC MODULE_PAX_UDEREF |
818 |
+ |
819 |
#endif /* _ASM_X86_MODULE_H */ |
820 |
+diff --git a/arch/x86/include/asm/page.h b/arch/x86/include/asm/page.h |
821 |
+index 8ca8283..8dc71fa 100644 |
822 |
+--- a/arch/x86/include/asm/page.h |
823 |
++++ b/arch/x86/include/asm/page.h |
824 |
+@@ -55,11 +55,21 @@ static inline void copy_user_page(void *to, void *from, unsigned long vaddr, |
825 |
+ * virt_to_page(kaddr) returns a valid pointer if and only if |
826 |
+ * virt_addr_valid(kaddr) returns true. |
827 |
+ */ |
828 |
+-#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT) |
829 |
+ #define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT) |
830 |
+ extern bool __virt_addr_valid(unsigned long kaddr); |
831 |
+ #define virt_addr_valid(kaddr) __virt_addr_valid((unsigned long) (kaddr)) |
832 |
+ |
833 |
++#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW |
834 |
++#define virt_to_page(kaddr) \ |
835 |
++ ({ \ |
836 |
++ const void *__kaddr = (const void *)(kaddr); \ |
837 |
++ BUG_ON(!virt_addr_valid(__kaddr)); \ |
838 |
++ pfn_to_page(__pa(__kaddr) >> PAGE_SHIFT); \ |
839 |
++ }) |
840 |
++#else |
841 |
++#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT) |
842 |
++#endif |
843 |
++ |
844 |
+ #endif /* __ASSEMBLY__ */ |
845 |
+ |
846 |
+ #include <asm-generic/memory_model.h> |
847 |
diff --git a/arch/x86/include/asm/page_64_types.h b/arch/x86/include/asm/page_64_types.h |
848 |
index 7639dbf..9dc5a94 100644 |
849 |
--- a/arch/x86/include/asm/page_64_types.h |
850 |
@@ -81426,8 +81453,33 @@ index 2148b12..519b820 100644 |
851 |
void __anon_vma_link(struct vm_area_struct *); |
852 |
|
853 |
static inline void anon_vma_merge(struct vm_area_struct *vma, |
854 |
+diff --git a/include/linux/scatterlist.h b/include/linux/scatterlist.h |
855 |
+index 9aaf5bf..d5ee2a5 100644 |
856 |
+--- a/include/linux/scatterlist.h |
857 |
++++ b/include/linux/scatterlist.h |
858 |
+@@ -3,6 +3,7 @@ |
859 |
+ |
860 |
+ #include <asm/types.h> |
861 |
+ #include <asm/scatterlist.h> |
862 |
++#include <linux/sched.h> |
863 |
+ #include <linux/mm.h> |
864 |
+ #include <linux/string.h> |
865 |
+ #include <asm/io.h> |
866 |
+@@ -109,6 +110,12 @@ static inline struct page *sg_page(struct scatterlist *sg) |
867 |
+ static inline void sg_set_buf(struct scatterlist *sg, const void *buf, |
868 |
+ unsigned int buflen) |
869 |
+ { |
870 |
++#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW |
871 |
++ if (object_starts_on_stack(buf)) { |
872 |
++ void *adjbuf = buf - current->stack + current->lowmem_stack; |
873 |
++ sg_set_page(sg, virt_to_page(adjbuf), buflen, offset_in_page(adjbuf)); |
874 |
++ } else |
875 |
++#endif |
876 |
+ sg_set_page(sg, virt_to_page(buf), buflen, offset_in_page(buf)); |
877 |
+ } |
878 |
+ |
879 |
diff --git a/include/linux/sched.h b/include/linux/sched.h |
880 |
-index cb34ff4..1d75f44 100644 |
881 |
+index cb34ff4..df196d4 100644 |
882 |
--- a/include/linux/sched.h |
883 |
+++ b/include/linux/sched.h |
884 |
@@ -101,6 +101,7 @@ struct bio_list; |
885 |
@@ -81543,7 +81595,17 @@ index cb34ff4..1d75f44 100644 |
886 |
|
887 |
struct load_weight { |
888 |
unsigned long weight, inv_weight; |
889 |
-@@ -1306,6 +1344,8 @@ struct task_struct { |
890 |
+@@ -1226,6 +1264,9 @@ enum perf_event_task_context { |
891 |
+ struct task_struct { |
892 |
+ volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */ |
893 |
+ void *stack; |
894 |
++#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW |
895 |
++ void *lowmem_stack; |
896 |
++#endif |
897 |
+ atomic_t usage; |
898 |
+ unsigned int flags; /* per process flags, defined below */ |
899 |
+ unsigned int ptrace; |
900 |
+@@ -1306,6 +1347,8 @@ struct task_struct { |
901 |
* execve */ |
902 |
unsigned in_iowait:1; |
903 |
|
904 |
@@ -81552,7 +81614,7 @@ index cb34ff4..1d75f44 100644 |
905 |
|
906 |
/* Revert to default priority/policy when forking */ |
907 |
unsigned sched_reset_on_fork:1; |
908 |
-@@ -1346,8 +1386,8 @@ struct task_struct { |
909 |
+@@ -1346,8 +1389,8 @@ struct task_struct { |
910 |
struct list_head thread_group; |
911 |
|
912 |
struct completion *vfork_done; /* for vfork() */ |
913 |
@@ -81563,7 +81625,7 @@ index cb34ff4..1d75f44 100644 |
914 |
|
915 |
cputime_t utime, stime, utimescaled, stimescaled; |
916 |
cputime_t gtime; |
917 |
-@@ -1363,13 +1403,6 @@ struct task_struct { |
918 |
+@@ -1363,13 +1406,6 @@ struct task_struct { |
919 |
struct task_cputime cputime_expires; |
920 |
struct list_head cpu_timers[3]; |
921 |
|
922 |
@@ -81577,7 +81639,7 @@ index cb34ff4..1d75f44 100644 |
923 |
char comm[TASK_COMM_LEN]; /* executable name excluding path |
924 |
- access with [gs]et_task_comm (which lock |
925 |
it with task_lock()) |
926 |
-@@ -1386,8 +1419,16 @@ struct task_struct { |
927 |
+@@ -1386,8 +1422,16 @@ struct task_struct { |
928 |
#endif |
929 |
/* CPU-specific state of this task */ |
930 |
struct thread_struct thread; |
931 |
@@ -81594,7 +81656,7 @@ index cb34ff4..1d75f44 100644 |
932 |
/* open file information */ |
933 |
struct files_struct *files; |
934 |
/* namespaces */ |
935 |
-@@ -1410,7 +1451,7 @@ struct task_struct { |
936 |
+@@ -1410,7 +1454,7 @@ struct task_struct { |
937 |
uid_t loginuid; |
938 |
unsigned int sessionid; |
939 |
#endif |
940 |
@@ -81603,7 +81665,7 @@ index cb34ff4..1d75f44 100644 |
941 |
|
942 |
/* Thread group tracking */ |
943 |
u32 parent_exec_id; |
944 |
-@@ -1434,6 +1475,11 @@ struct task_struct { |
945 |
+@@ -1434,6 +1478,11 @@ struct task_struct { |
946 |
struct rt_mutex_waiter *pi_blocked_on; |
947 |
#endif |
948 |
|
949 |
@@ -81615,7 +81677,7 @@ index cb34ff4..1d75f44 100644 |
950 |
#ifdef CONFIG_DEBUG_MUTEXES |
951 |
/* mutex deadlock detection */ |
952 |
struct mutex_waiter *blocked_on; |
953 |
-@@ -1549,6 +1595,30 @@ struct task_struct { |
954 |
+@@ -1549,6 +1598,30 @@ struct task_struct { |
955 |
unsigned long default_timer_slack_ns; |
956 |
|
957 |
struct list_head *scm_work_list; |
958 |
@@ -81646,7 +81708,7 @@ index cb34ff4..1d75f44 100644 |
959 |
#ifdef CONFIG_FUNCTION_GRAPH_TRACER |
960 |
/* Index of current stored address in ret_stack */ |
961 |
int curr_ret_stack; |
962 |
-@@ -1581,7 +1651,54 @@ struct task_struct { |
963 |
+@@ -1581,7 +1654,54 @@ struct task_struct { |
964 |
#ifdef CONFIG_HAVE_HW_BREAKPOINT |
965 |
atomic_t ptrace_bp_refcnt; |
966 |
#endif |
967 |
@@ -81702,7 +81764,7 @@ index cb34ff4..1d75f44 100644 |
968 |
|
969 |
/* Future-safe accessor for struct task_struct's cpus_allowed. */ |
970 |
#define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed) |
971 |
-@@ -1689,8 +1806,19 @@ static inline pid_t task_tgid_vnr(struct task_struct *tsk) |
972 |
+@@ -1689,8 +1809,19 @@ static inline pid_t task_tgid_vnr(struct task_struct *tsk) |
973 |
return pid_vnr(task_tgid(tsk)); |
974 |
} |
975 |
|
976 |
@@ -81723,7 +81785,7 @@ index cb34ff4..1d75f44 100644 |
977 |
static inline pid_t task_ppid_nr_ns(const struct task_struct *tsk, struct pid_namespace *ns) |
978 |
{ |
979 |
pid_t pid = 0; |
980 |
-@@ -1738,19 +1866,6 @@ static inline pid_t task_pgrp_nr(struct task_struct *tsk) |
981 |
+@@ -1738,19 +1869,6 @@ static inline pid_t task_pgrp_nr(struct task_struct *tsk) |
982 |
} |
983 |
|
984 |
/** |
985 |
@@ -81743,7 +81805,7 @@ index cb34ff4..1d75f44 100644 |
986 |
* is_global_init - check if a task structure is init |
987 |
* @tsk: Task structure to be checked. |
988 |
* |
989 |
-@@ -1953,6 +2068,25 @@ extern u64 sched_clock_cpu(int cpu); |
990 |
+@@ -1953,6 +2071,25 @@ extern u64 sched_clock_cpu(int cpu); |
991 |
|
992 |
extern void sched_clock_init(void); |
993 |
|
994 |
@@ -81769,7 +81831,7 @@ index cb34ff4..1d75f44 100644 |
995 |
#ifndef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK |
996 |
static inline void sched_clock_tick(void) |
997 |
{ |
998 |
-@@ -2116,7 +2250,9 @@ void yield(void); |
999 |
+@@ -2116,7 +2253,9 @@ void yield(void); |
1000 |
extern struct exec_domain default_exec_domain; |
1001 |
|
1002 |
union thread_union { |
1003 |
@@ -81779,7 +81841,7 @@ index cb34ff4..1d75f44 100644 |
1004 |
unsigned long stack[THREAD_SIZE/sizeof(long)]; |
1005 |
}; |
1006 |
|
1007 |
-@@ -2149,6 +2285,7 @@ extern struct pid_namespace init_pid_ns; |
1008 |
+@@ -2149,6 +2288,7 @@ extern struct pid_namespace init_pid_ns; |
1009 |
*/ |
1010 |
|
1011 |
extern struct task_struct *find_task_by_vpid(pid_t nr); |
1012 |
@@ -81787,7 +81849,7 @@ index cb34ff4..1d75f44 100644 |
1013 |
extern struct task_struct *find_task_by_pid_ns(pid_t nr, |
1014 |
struct pid_namespace *ns); |
1015 |
|
1016 |
-@@ -2270,6 +2407,12 @@ static inline void mmdrop(struct mm_struct * mm) |
1017 |
+@@ -2270,6 +2410,12 @@ static inline void mmdrop(struct mm_struct * mm) |
1018 |
extern void mmput(struct mm_struct *); |
1019 |
/* Grab a reference to a task's mm, if it is not already going away */ |
1020 |
extern struct mm_struct *get_task_mm(struct task_struct *task); |
1021 |
@@ -81800,7 +81862,7 @@ index cb34ff4..1d75f44 100644 |
1022 |
/* Remove the current tasks stale references to the old mm_struct */ |
1023 |
extern void mm_release(struct task_struct *, struct mm_struct *); |
1024 |
/* Allocate a new mm structure and copy contents from tsk->mm */ |
1025 |
-@@ -2286,9 +2429,8 @@ extern void __cleanup_sighand(struct sighand_struct *); |
1026 |
+@@ -2286,9 +2432,8 @@ extern void __cleanup_sighand(struct sighand_struct *); |
1027 |
extern void exit_itimers(struct signal_struct *); |
1028 |
extern void flush_itimer_signals(void); |
1029 |
|
1030 |
@@ -81811,12 +81873,12 @@ index cb34ff4..1d75f44 100644 |
1031 |
extern int allow_signal(int); |
1032 |
extern int disallow_signal(int); |
1033 |
|
1034 |
-@@ -2451,9 +2593,9 @@ static inline unsigned long *end_of_stack(struct task_struct *p) |
1035 |
+@@ -2451,9 +2596,9 @@ static inline unsigned long *end_of_stack(struct task_struct *p) |
1036 |
|
1037 |
#endif |
1038 |
|
1039 |
-static inline int object_is_on_stack(void *obj) |
1040 |
-+static inline int object_starts_on_stack(void *obj) |
1041 |
++static inline int object_starts_on_stack(const void *obj) |
1042 |
{ |
1043 |
- void *stack = task_stack_page(current); |
1044 |
+ const void *stack = task_stack_page(current); |
1045 |
@@ -83225,7 +83287,7 @@ index 0000000..d6b4440 |
1046 |
+ |
1047 |
+#endif /* _LINUX_VIRTIO_SCSI_H */ |
1048 |
diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h |
1049 |
-index 4bde182..d19c720 100644 |
1050 |
+index 4bde182..1eb2c43 100644 |
1051 |
--- a/include/linux/vmalloc.h |
1052 |
+++ b/include/linux/vmalloc.h |
1053 |
@@ -14,6 +14,11 @@ struct vm_area_struct; /* vma defining user mapping in mm_types.h */ |
1054 |
@@ -83240,15 +83302,18 @@ index 4bde182..d19c720 100644 |
1055 |
/* bits [20..32] reserved for arch specific ioremap internals */ |
1056 |
|
1057 |
/* |
1058 |
-@@ -59,6 +64,7 @@ extern void *vzalloc_node(unsigned long size, int node); |
1059 |
- extern void *vmalloc_exec(unsigned long size); |
1060 |
- extern void *vmalloc_32(unsigned long size); |
1061 |
- extern void *vmalloc_32_user(unsigned long size); |
1062 |
-+extern void *vmalloc_stack(int node); |
1063 |
- extern void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot); |
1064 |
- extern void *__vmalloc_node_range(unsigned long size, unsigned long align, |
1065 |
- unsigned long start, unsigned long end, gfp_t gfp_mask, |
1066 |
-@@ -124,7 +130,7 @@ extern void free_vm_area(struct vm_struct *area); |
1067 |
+@@ -69,6 +74,10 @@ extern void *vmap(struct page **pages, unsigned int count, |
1068 |
+ unsigned long flags, pgprot_t prot); |
1069 |
+ extern void vunmap(const void *addr); |
1070 |
+ |
1071 |
++#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW |
1072 |
++extern void unmap_process_stacks(struct task_struct *task); |
1073 |
++#endif |
1074 |
++ |
1075 |
+ extern int remap_vmalloc_range(struct vm_area_struct *vma, void *addr, |
1076 |
+ unsigned long pgoff); |
1077 |
+ void vmalloc_sync_all(void); |
1078 |
+@@ -124,7 +133,7 @@ extern void free_vm_area(struct vm_struct *area); |
1079 |
|
1080 |
/* for /dev/kmem */ |
1081 |
extern long vread(char *buf, char *addr, unsigned long count); |
1082 |
@@ -86487,7 +86552,7 @@ index fde15f9..99f1b97 100644 |
1083 |
{ |
1084 |
struct signal_struct *sig = current->signal; |
1085 |
diff --git a/kernel/fork.c b/kernel/fork.c |
1086 |
-index ce0c182..b8e5b18 100644 |
1087 |
+index ce0c182..62b0c37 100644 |
1088 |
--- a/kernel/fork.c |
1089 |
+++ b/kernel/fork.c |
1090 |
@@ -34,6 +34,7 @@ |
1091 |
@@ -86498,29 +86563,48 @@ index ce0c182..b8e5b18 100644 |
1092 |
#include <linux/swap.h> |
1093 |
#include <linux/syscalls.h> |
1094 |
#include <linux/jiffies.h> |
1095 |
-@@ -137,6 +138,30 @@ static inline void free_thread_info(struct thread_info *ti) |
1096 |
+@@ -137,6 +138,49 @@ static inline void free_thread_info(struct thread_info *ti) |
1097 |
} |
1098 |
#endif |
1099 |
|
1100 |
+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW |
1101 |
+static inline struct thread_info *gr_alloc_thread_info_node(struct task_struct *tsk, |
1102 |
-+ int node) |
1103 |
++ int node, void **lowmem_stack) |
1104 |
+{ |
1105 |
-+ return vmalloc_stack(node); |
1106 |
++ struct page *pages[THREAD_SIZE / PAGE_SIZE]; |
1107 |
++ void *ret = NULL; |
1108 |
++ unsigned int i; |
1109 |
++ |
1110 |
++ *lowmem_stack = alloc_thread_info_node(tsk, node); |
1111 |
++ if (*lowmem_stack == NULL) |
1112 |
++ goto out; |
1113 |
++ |
1114 |
++ for (i = 0; i < THREAD_SIZE / PAGE_SIZE; i++) |
1115 |
++ pages[i] = virt_to_page(*lowmem_stack + (i * PAGE_SIZE)); |
1116 |
++ |
1117 |
++ /* use VM_IOREMAP to gain THREAD_SIZE alignment */ |
1118 |
++ ret = vmap(pages, THREAD_SIZE / PAGE_SIZE, VM_IOREMAP, PAGE_KERNEL); |
1119 |
++ if (ret == NULL) { |
1120 |
++ free_thread_info(*lowmem_stack); |
1121 |
++ *lowmem_stack = NULL; |
1122 |
++ } |
1123 |
++ |
1124 |
++out: |
1125 |
++ return ret; |
1126 |
+} |
1127 |
+ |
1128 |
-+static inline void gr_free_thread_info(struct thread_info *ti) |
1129 |
++static inline void gr_free_thread_info(struct task_struct *tsk, struct thread_info *ti) |
1130 |
+{ |
1131 |
-+ vfree(ti); |
1132 |
++ unmap_process_stacks(tsk); |
1133 |
+} |
1134 |
+#else |
1135 |
+static inline struct thread_info *gr_alloc_thread_info_node(struct task_struct *tsk, |
1136 |
-+ int node) |
1137 |
++ int node, void **lowmem_stack) |
1138 |
+{ |
1139 |
+ return alloc_thread_info_node(tsk, node); |
1140 |
+} |
1141 |
+ |
1142 |
-+static inline void gr_free_thread_info(struct thread_info *ti) |
1143 |
++static inline void gr_free_thread_info(struct task_struct *tsk, struct thread_info *ti) |
1144 |
+{ |
1145 |
+ free_thread_info(ti); |
1146 |
+} |
1147 |
@@ -86529,34 +86613,48 @@ index ce0c182..b8e5b18 100644 |
1148 |
/* SLAB cache for signal_struct structures (tsk->signal) */ |
1149 |
static struct kmem_cache *signal_cachep; |
1150 |
|
1151 |
-@@ -157,17 +182,20 @@ static struct kmem_cache *mm_cachep; |
1152 |
+@@ -155,19 +199,24 @@ struct kmem_cache *vm_area_cachep; |
1153 |
+ /* SLAB cache for mm_struct structures (tsk->mm) */ |
1154 |
+ static struct kmem_cache *mm_cachep; |
1155 |
|
1156 |
- static void account_kernel_stack(struct thread_info *ti, int account) |
1157 |
+-static void account_kernel_stack(struct thread_info *ti, int account) |
1158 |
++static void account_kernel_stack(struct task_struct *tsk, struct thread_info *ti, int account) |
1159 |
{ |
1160 |
-+#ifndef CONFIG_GRKERNSEC_KSTACKOVERFLOW |
1161 |
++#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW |
1162 |
++ struct zone *zone = page_zone(virt_to_page(tsk->lowmem_stack)); |
1163 |
++#else |
1164 |
struct zone *zone = page_zone(virt_to_page(ti)); |
1165 |
++#endif |
1166 |
|
1167 |
mod_zone_page_state(zone, NR_KERNEL_STACK, account); |
1168 |
-+#endif |
1169 |
} |
1170 |
|
1171 |
void free_task(struct task_struct *tsk) |
1172 |
{ |
1173 |
- account_kernel_stack(tsk->stack, -1); |
1174 |
+- account_kernel_stack(tsk->stack, -1); |
1175 |
- free_thread_info(tsk->stack); |
1176 |
-+ gr_free_thread_info(tsk->stack); |
1177 |
++ account_kernel_stack(tsk, tsk->stack, -1); |
1178 |
++ gr_free_thread_info(tsk, tsk->stack); |
1179 |
rt_mutex_debug_task_free(tsk); |
1180 |
ftrace_graph_exit_task(tsk); |
1181 |
+ put_seccomp_filter(tsk); |
1182 |
free_task_struct(tsk); |
1183 |
} |
1184 |
EXPORT_SYMBOL(free_task); |
1185 |
-@@ -263,26 +291,31 @@ static struct task_struct *dup_task_struct(struct task_struct *orig) |
1186 |
+@@ -254,6 +303,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig) |
1187 |
+ struct task_struct *tsk; |
1188 |
+ struct thread_info *ti; |
1189 |
+ unsigned long *stackend; |
1190 |
++ void *lowmem_stack; |
1191 |
+ int node = tsk_fork_get_node(orig); |
1192 |
+ int err; |
1193 |
+ |
1194 |
+@@ -263,26 +313,34 @@ static struct task_struct *dup_task_struct(struct task_struct *orig) |
1195 |
if (!tsk) |
1196 |
return NULL; |
1197 |
|
1198 |
- ti = alloc_thread_info_node(tsk, node); |
1199 |
-+ ti = gr_alloc_thread_info_node(tsk, node); |
1200 |
++ ti = gr_alloc_thread_info_node(tsk, node, &lowmem_stack); |
1201 |
if (!ti) { |
1202 |
free_task_struct(tsk); |
1203 |
return NULL; |
1204 |
@@ -86572,6 +86670,9 @@ index ce0c182..b8e5b18 100644 |
1205 |
+ */ |
1206 |
tsk->stack = ti; |
1207 |
- |
1208 |
++#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW |
1209 |
++ tsk->lowmem_stack = lowmem_stack; |
1210 |
++#endif |
1211 |
setup_thread_stack(tsk, orig); |
1212 |
+ |
1213 |
+ if (err) |
1214 |
@@ -86588,12 +86689,18 @@ index ce0c182..b8e5b18 100644 |
1215 |
#endif |
1216 |
|
1217 |
/* |
1218 |
-@@ -300,19 +333,84 @@ static struct task_struct *dup_task_struct(struct task_struct *orig) |
1219 |
+@@ -295,24 +353,89 @@ static struct task_struct *dup_task_struct(struct task_struct *orig) |
1220 |
+ #endif |
1221 |
+ tsk->splice_pipe = NULL; |
1222 |
+ |
1223 |
+- account_kernel_stack(ti, 1); |
1224 |
++ account_kernel_stack(tsk, ti, 1); |
1225 |
+ |
1226 |
return tsk; |
1227 |
|
1228 |
out: |
1229 |
- free_thread_info(ti); |
1230 |
-+ gr_free_thread_info(ti); |
1231 |
++ gr_free_thread_info(tsk, ti); |
1232 |
free_task_struct(tsk); |
1233 |
return NULL; |
1234 |
} |
1235 |
@@ -86678,7 +86785,7 @@ index ce0c182..b8e5b18 100644 |
1236 |
|
1237 |
down_write(&oldmm->mmap_sem); |
1238 |
flush_cache_dup_mm(oldmm); |
1239 |
-@@ -324,8 +422,8 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm) |
1240 |
+@@ -324,8 +447,8 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm) |
1241 |
mm->locked_vm = 0; |
1242 |
mm->mmap = NULL; |
1243 |
mm->mmap_cache = NULL; |
1244 |
@@ -86689,7 +86796,7 @@ index ce0c182..b8e5b18 100644 |
1245 |
mm->map_count = 0; |
1246 |
cpumask_clear(mm_cpumask(mm)); |
1247 |
mm->mm_rb = RB_ROOT; |
1248 |
-@@ -341,63 +439,16 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm) |
1249 |
+@@ -341,63 +464,16 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm) |
1250 |
|
1251 |
prev = NULL; |
1252 |
for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) { |
1253 |
@@ -86758,7 +86865,7 @@ index ce0c182..b8e5b18 100644 |
1254 |
|
1255 |
/* |
1256 |
* Link in the new vma and copy the page table entries. |
1257 |
-@@ -420,6 +471,31 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm) |
1258 |
+@@ -420,6 +496,31 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm) |
1259 |
if (retval) |
1260 |
goto out; |
1261 |
} |
1262 |
@@ -86790,7 +86897,7 @@ index ce0c182..b8e5b18 100644 |
1263 |
/* a new mm has just been created */ |
1264 |
arch_dup_mmap(oldmm, mm); |
1265 |
retval = 0; |
1266 |
-@@ -428,14 +504,6 @@ out: |
1267 |
+@@ -428,14 +529,6 @@ out: |
1268 |
flush_tlb_mm(oldmm); |
1269 |
up_write(&oldmm->mmap_sem); |
1270 |
return retval; |
1271 |
@@ -86805,7 +86912,7 @@ index ce0c182..b8e5b18 100644 |
1272 |
} |
1273 |
|
1274 |
static inline int mm_alloc_pgd(struct mm_struct *mm) |
1275 |
-@@ -647,6 +715,26 @@ struct mm_struct *get_task_mm(struct task_struct *task) |
1276 |
+@@ -647,6 +740,26 @@ struct mm_struct *get_task_mm(struct task_struct *task) |
1277 |
} |
1278 |
EXPORT_SYMBOL_GPL(get_task_mm); |
1279 |
|
1280 |
@@ -86832,7 +86939,7 @@ index ce0c182..b8e5b18 100644 |
1281 |
/* Please note the differences between mmput and mm_release. |
1282 |
* mmput is called whenever we stop holding onto a mm_struct, |
1283 |
* error success whatever. |
1284 |
-@@ -832,13 +920,20 @@ static int copy_fs(unsigned long clone_flags, struct task_struct *tsk) |
1285 |
+@@ -832,13 +945,20 @@ static int copy_fs(unsigned long clone_flags, struct task_struct *tsk) |
1286 |
spin_unlock(&fs->lock); |
1287 |
return -EAGAIN; |
1288 |
} |
1289 |
@@ -86854,7 +86961,7 @@ index ce0c182..b8e5b18 100644 |
1290 |
return 0; |
1291 |
} |
1292 |
|
1293 |
-@@ -1047,7 +1142,7 @@ static void posix_cpu_timers_init(struct task_struct *tsk) |
1294 |
+@@ -1047,7 +1167,7 @@ static void posix_cpu_timers_init(struct task_struct *tsk) |
1295 |
* parts of the process environment (as per the clone |
1296 |
* flags). The actual kick-off is left to the caller. |
1297 |
*/ |
1298 |
@@ -86863,7 +86970,7 @@ index ce0c182..b8e5b18 100644 |
1299 |
unsigned long stack_start, |
1300 |
struct pt_regs *regs, |
1301 |
unsigned long stack_size, |
1302 |
-@@ -1096,6 +1191,7 @@ static struct task_struct *copy_process(unsigned long clone_flags, |
1303 |
+@@ -1096,6 +1216,7 @@ static struct task_struct *copy_process(unsigned long clone_flags, |
1304 |
goto fork_out; |
1305 |
|
1306 |
ftrace_graph_init_task(p); |
1307 |
@@ -86871,7 +86978,7 @@ index ce0c182..b8e5b18 100644 |
1308 |
|
1309 |
rt_mutex_init_task(p); |
1310 |
|
1311 |
-@@ -1104,10 +1200,13 @@ static struct task_struct *copy_process(unsigned long clone_flags, |
1312 |
+@@ -1104,10 +1225,13 @@ static struct task_struct *copy_process(unsigned long clone_flags, |
1313 |
DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled); |
1314 |
#endif |
1315 |
retval = -EAGAIN; |
1316 |
@@ -86887,7 +86994,7 @@ index ce0c182..b8e5b18 100644 |
1317 |
goto bad_fork_free; |
1318 |
} |
1319 |
current->flags &= ~PF_NPROC_EXCEEDED; |
1320 |
-@@ -1341,6 +1440,11 @@ static struct task_struct *copy_process(unsigned long clone_flags, |
1321 |
+@@ -1341,6 +1465,11 @@ static struct task_struct *copy_process(unsigned long clone_flags, |
1322 |
goto bad_fork_free_pid; |
1323 |
} |
1324 |
|
1325 |
@@ -86899,7 +87006,7 @@ index ce0c182..b8e5b18 100644 |
1326 |
if (clone_flags & CLONE_THREAD) { |
1327 |
current->signal->nr_threads++; |
1328 |
atomic_inc(¤t->signal->live); |
1329 |
-@@ -1421,6 +1525,8 @@ bad_fork_cleanup_count: |
1330 |
+@@ -1421,6 +1550,8 @@ bad_fork_cleanup_count: |
1331 |
bad_fork_free: |
1332 |
free_task(p); |
1333 |
fork_out: |
1334 |
@@ -86908,7 +87015,7 @@ index ce0c182..b8e5b18 100644 |
1335 |
return ERR_PTR(retval); |
1336 |
} |
1337 |
|
1338 |
-@@ -1507,6 +1613,7 @@ long do_fork(unsigned long clone_flags, |
1339 |
+@@ -1507,6 +1638,7 @@ long do_fork(unsigned long clone_flags, |
1340 |
|
1341 |
p = copy_process(clone_flags, stack_start, regs, stack_size, |
1342 |
child_tidptr, NULL, trace); |
1343 |
@@ -86916,7 +87023,7 @@ index ce0c182..b8e5b18 100644 |
1344 |
/* |
1345 |
* Do this prior waking up the new thread - the thread pointer |
1346 |
* might get invalid after that point, if the thread exits quickly. |
1347 |
-@@ -1521,6 +1628,8 @@ long do_fork(unsigned long clone_flags, |
1348 |
+@@ -1521,6 +1653,8 @@ long do_fork(unsigned long clone_flags, |
1349 |
if (clone_flags & CLONE_PARENT_SETTID) |
1350 |
put_user(nr, parent_tidptr); |
1351 |
|
1352 |
@@ -86925,7 +87032,7 @@ index ce0c182..b8e5b18 100644 |
1353 |
if (clone_flags & CLONE_VFORK) { |
1354 |
p->vfork_done = &vfork; |
1355 |
init_completion(&vfork); |
1356 |
-@@ -1591,7 +1700,7 @@ void __init proc_caches_init(void) |
1357 |
+@@ -1591,7 +1725,7 @@ void __init proc_caches_init(void) |
1358 |
mm_cachep = kmem_cache_create("mm_struct", |
1359 |
sizeof(struct mm_struct), ARCH_MIN_MMSTRUCT_ALIGN, |
1360 |
SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK, NULL); |
1361 |
@@ -86934,7 +87041,7 @@ index ce0c182..b8e5b18 100644 |
1362 |
mmap_init(); |
1363 |
nsproxy_cache_init(); |
1364 |
} |
1365 |
-@@ -1630,7 +1739,7 @@ static int unshare_fs(unsigned long unshare_flags, struct fs_struct **new_fsp) |
1366 |
+@@ -1630,7 +1764,7 @@ static int unshare_fs(unsigned long unshare_flags, struct fs_struct **new_fsp) |
1367 |
return 0; |
1368 |
|
1369 |
/* don't need lock here; in the worst case we'll do useless copy */ |
1370 |
@@ -86943,7 +87050,7 @@ index ce0c182..b8e5b18 100644 |
1371 |
return 0; |
1372 |
|
1373 |
*new_fsp = copy_fs_struct(fs); |
1374 |
-@@ -1719,7 +1828,8 @@ SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags) |
1375 |
+@@ -1719,7 +1853,8 @@ SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags) |
1376 |
fs = current->fs; |
1377 |
spin_lock(&fs->lock); |
1378 |
current->fs = new_fs; |
1379 |
@@ -98412,10 +98519,10 @@ index 136ac4f..f917fa9 100644 |
1380 |
mm->unmap_area = arch_unmap_area; |
1381 |
} |
1382 |
diff --git a/mm/vmalloc.c b/mm/vmalloc.c |
1383 |
-index eeba3bb..0c8633f 100644 |
1384 |
+index eeba3bb..2aaad6e 100644 |
1385 |
--- a/mm/vmalloc.c |
1386 |
+++ b/mm/vmalloc.c |
1387 |
-@@ -27,10 +27,30 @@ |
1388 |
+@@ -27,10 +27,67 @@ |
1389 |
#include <linux/pfn.h> |
1390 |
#include <linux/kmemleak.h> |
1391 |
#include <linux/atomic.h> |
1392 |
@@ -98430,6 +98537,21 @@ index eeba3bb..0c8633f 100644 |
1393 |
+}; |
1394 |
+static DEFINE_PER_CPU(struct vfree_deferred, vfree_deferred); |
1395 |
+ |
1396 |
++#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW |
1397 |
++struct stack_deferred_llist { |
1398 |
++ struct llist_head list; |
1399 |
++ void *stack; |
1400 |
++ void *lowmem_stack; |
1401 |
++}; |
1402 |
++ |
1403 |
++struct stack_deferred { |
1404 |
++ struct stack_deferred_llist list; |
1405 |
++ struct work_struct wq; |
1406 |
++}; |
1407 |
++ |
1408 |
++static DEFINE_PER_CPU(struct stack_deferred, stack_deferred); |
1409 |
++#endif |
1410 |
++ |
1411 |
+static void __vunmap(const void *, int); |
1412 |
+ |
1413 |
+static void free_work(struct work_struct *w) |
1414 |
@@ -98437,16 +98559,38 @@ index eeba3bb..0c8633f 100644 |
1415 |
+ struct vfree_deferred *p = container_of(w, struct vfree_deferred, wq); |
1416 |
+ struct llist_node *llnode = llist_del_all(&p->list); |
1417 |
+ while (llnode) { |
1418 |
-+ void *p = llnode; |
1419 |
++ void *x = llnode; |
1420 |
+ llnode = llist_next(llnode); |
1421 |
-+ __vunmap(p, 1); |
1422 |
++ __vunmap(x, 1); |
1423 |
+ } |
1424 |
+} |
1425 |
+ |
1426 |
++#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW |
1427 |
++static void unmap_work(struct work_struct *w) |
1428 |
++{ |
1429 |
++ struct stack_deferred *p = container_of(w, struct stack_deferred, wq); |
1430 |
++ struct llist_node *llnode = llist_del_all(&p->list.list); |
1431 |
++ while (llnode) { |
1432 |
++ struct stack_deferred_llist *x = |
1433 |
++ llist_entry((struct llist_head *)llnode, |
1434 |
++ struct stack_deferred_llist, list); |
1435 |
++ void *stack = ACCESS_ONCE(x->stack); |
1436 |
++ void *lowmem_stack = ACCESS_ONCE(x->lowmem_stack); |
1437 |
++ llnode = llist_next(llnode); |
1438 |
++ __vunmap(stack, 0); |
1439 |
++#ifndef __HAVE_ARCH_THREAD_INFO_ALLOCATOR |
1440 |
++ free_pages((unsigned long)lowmem_stack, THREAD_SIZE_ORDER); |
1441 |
++#else |
1442 |
++ free_thread_info(lowmem_stack); |
1443 |
++#endif |
1444 |
++ } |
1445 |
++} |
1446 |
++#endif |
1447 |
++ |
1448 |
/*** Page table manipulation functions ***/ |
1449 |
|
1450 |
static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end) |
1451 |
-@@ -39,8 +59,19 @@ static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end) |
1452 |
+@@ -39,8 +96,19 @@ static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end) |
1453 |
|
1454 |
pte = pte_offset_kernel(pmd, addr); |
1455 |
do { |
1456 |
@@ -98468,7 +98612,7 @@ index eeba3bb..0c8633f 100644 |
1457 |
} while (pte++, addr += PAGE_SIZE, addr != end); |
1458 |
} |
1459 |
|
1460 |
-@@ -100,16 +131,29 @@ static int vmap_pte_range(pmd_t *pmd, unsigned long addr, |
1461 |
+@@ -100,16 +168,29 @@ static int vmap_pte_range(pmd_t *pmd, unsigned long addr, |
1462 |
pte = pte_alloc_kernel(pmd, addr); |
1463 |
if (!pte) |
1464 |
return -ENOMEM; |
1465 |
@@ -98500,7 +98644,7 @@ index eeba3bb..0c8633f 100644 |
1466 |
return 0; |
1467 |
} |
1468 |
|
1469 |
-@@ -119,7 +163,7 @@ static int vmap_pmd_range(pud_t *pud, unsigned long addr, |
1470 |
+@@ -119,7 +200,7 @@ static int vmap_pmd_range(pud_t *pud, unsigned long addr, |
1471 |
pmd_t *pmd; |
1472 |
unsigned long next; |
1473 |
|
1474 |
@@ -98509,7 +98653,7 @@ index eeba3bb..0c8633f 100644 |
1475 |
if (!pmd) |
1476 |
return -ENOMEM; |
1477 |
do { |
1478 |
-@@ -136,7 +180,7 @@ static int vmap_pud_range(pgd_t *pgd, unsigned long addr, |
1479 |
+@@ -136,7 +217,7 @@ static int vmap_pud_range(pgd_t *pgd, unsigned long addr, |
1480 |
pud_t *pud; |
1481 |
unsigned long next; |
1482 |
|
1483 |
@@ -98518,7 +98662,7 @@ index eeba3bb..0c8633f 100644 |
1484 |
if (!pud) |
1485 |
return -ENOMEM; |
1486 |
do { |
1487 |
-@@ -196,6 +240,12 @@ int is_vmalloc_or_module_addr(const void *x) |
1488 |
+@@ -196,6 +277,12 @@ int is_vmalloc_or_module_addr(const void *x) |
1489 |
if (addr >= MODULES_VADDR && addr < MODULES_END) |
1490 |
return 1; |
1491 |
#endif |
1492 |
@@ -98531,7 +98675,7 @@ index eeba3bb..0c8633f 100644 |
1493 |
return is_vmalloc_addr(x); |
1494 |
} |
1495 |
|
1496 |
-@@ -216,8 +266,14 @@ struct page *vmalloc_to_page(const void *vmalloc_addr) |
1497 |
+@@ -216,8 +303,14 @@ struct page *vmalloc_to_page(const void *vmalloc_addr) |
1498 |
|
1499 |
if (!pgd_none(*pgd)) { |
1500 |
pud_t *pud = pud_offset(pgd, addr); |
1501 |
@@ -98546,22 +98690,32 @@ index eeba3bb..0c8633f 100644 |
1502 |
if (!pmd_none(*pmd)) { |
1503 |
pte_t *ptep, pte; |
1504 |
|
1505 |
-@@ -1151,10 +1207,14 @@ void __init vmalloc_init(void) |
1506 |
+@@ -1151,10 +1244,24 @@ void __init vmalloc_init(void) |
1507 |
|
1508 |
for_each_possible_cpu(i) { |
1509 |
struct vmap_block_queue *vbq; |
1510 |
+ struct vfree_deferred *p; |
1511 |
++#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW |
1512 |
++ struct stack_deferred *p2; |
1513 |
++#endif |
1514 |
|
1515 |
vbq = &per_cpu(vmap_block_queue, i); |
1516 |
spin_lock_init(&vbq->lock); |
1517 |
INIT_LIST_HEAD(&vbq->free); |
1518 |
++ |
1519 |
+ p = &per_cpu(vfree_deferred, i); |
1520 |
+ init_llist_head(&p->list); |
1521 |
+ INIT_WORK(&p->wq, free_work); |
1522 |
++ |
1523 |
++#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW |
1524 |
++ p2 = &per_cpu(stack_deferred, i); |
1525 |
++ init_llist_head(&p2->list.list); |
1526 |
++ INIT_WORK(&p2->wq, unmap_work); |
1527 |
++#endif |
1528 |
} |
1529 |
|
1530 |
/* Import existing vmlist entries. */ |
1531 |
-@@ -1295,6 +1355,16 @@ static struct vm_struct *__get_vm_area_node(unsigned long size, |
1532 |
+@@ -1295,6 +1402,16 @@ static struct vm_struct *__get_vm_area_node(unsigned long size, |
1533 |
struct vm_struct *area; |
1534 |
|
1535 |
BUG_ON(in_interrupt()); |
1536 |
@@ -98578,7 +98732,7 @@ index eeba3bb..0c8633f 100644 |
1537 |
if (flags & VM_IOREMAP) { |
1538 |
int bit = fls(size); |
1539 |
|
1540 |
-@@ -1469,7 +1539,7 @@ static void __vunmap(const void *addr, int deallocate_pages) |
1541 |
+@@ -1469,7 +1586,7 @@ static void __vunmap(const void *addr, int deallocate_pages) |
1542 |
kfree(area); |
1543 |
return; |
1544 |
} |
1545 |
@@ -98587,7 +98741,7 @@ index eeba3bb..0c8633f 100644 |
1546 |
/** |
1547 |
* vfree - release memory allocated by vmalloc() |
1548 |
* @addr: memory base address |
1549 |
-@@ -1478,15 +1548,26 @@ static void __vunmap(const void *addr, int deallocate_pages) |
1550 |
+@@ -1478,15 +1595,26 @@ static void __vunmap(const void *addr, int deallocate_pages) |
1551 |
* obtained from vmalloc(), vmalloc_32() or __vmalloc(). If @addr is |
1552 |
* NULL, no operation is performed. |
1553 |
* |
1554 |
@@ -98617,7 +98771,14 @@ index eeba3bb..0c8633f 100644 |
1555 |
} |
1556 |
EXPORT_SYMBOL(vfree); |
1557 |
|
1558 |
-@@ -1503,7 +1584,8 @@ void vunmap(const void *addr) |
1559 |
+@@ -1497,16 +1625,34 @@ EXPORT_SYMBOL(vfree); |
1560 |
+ * Free the virtually contiguous memory area starting at @addr, |
1561 |
+ * which was created from the page array passed to vmap(). |
1562 |
+ * |
1563 |
+- * Must not be called in interrupt context. |
1564 |
++ * Must not be called in NMI context. |
1565 |
+ */ |
1566 |
+ void vunmap(const void *addr) |
1567 |
{ |
1568 |
BUG_ON(in_interrupt()); |
1569 |
might_sleep(); |
1570 |
@@ -98627,7 +98788,27 @@ index eeba3bb..0c8633f 100644 |
1571 |
} |
1572 |
EXPORT_SYMBOL(vunmap); |
1573 |
|
1574 |
-@@ -1527,6 +1609,11 @@ void *vmap(struct page **pages, unsigned int count, |
1575 |
++#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW |
1576 |
++void unmap_process_stacks(struct task_struct *task) |
1577 |
++{ |
1578 |
++ if (unlikely(in_interrupt())) { |
1579 |
++ struct stack_deferred *p = &__get_cpu_var(stack_deferred); |
1580 |
++ struct stack_deferred_llist *list = task->stack; |
1581 |
++ list->stack = task->stack; |
1582 |
++ list->lowmem_stack = task->lowmem_stack; |
1583 |
++ if (llist_add((struct llist_node *)&list->list, &p->list.list)) |
1584 |
++ schedule_work(&p->wq); |
1585 |
++ } else { |
1586 |
++ __vunmap(task->stack, 0); |
1587 |
++ free_pages((unsigned long)task->lowmem_stack, THREAD_ORDER); |
1588 |
++ } |
1589 |
++} |
1590 |
++#endif |
1591 |
++ |
1592 |
+ /** |
1593 |
+ * vmap - map an array of pages into virtually contiguous space |
1594 |
+ * @pages: array of page pointers |
1595 |
+@@ -1527,6 +1673,11 @@ void *vmap(struct page **pages, unsigned int count, |
1596 |
if (count > totalram_pages) |
1597 |
return NULL; |
1598 |
|
1599 |
@@ -98639,7 +98820,7 @@ index eeba3bb..0c8633f 100644 |
1600 |
area = get_vm_area_caller((count << PAGE_SHIFT), flags, |
1601 |
__builtin_return_address(0)); |
1602 |
if (!area) |
1603 |
-@@ -1628,6 +1715,13 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align, |
1604 |
+@@ -1628,6 +1779,13 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align, |
1605 |
if (!size || (size >> PAGE_SHIFT) > totalram_pages) |
1606 |
goto fail; |
1607 |
|
1608 |
@@ -98653,26 +98834,7 @@ index eeba3bb..0c8633f 100644 |
1609 |
area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNLIST, |
1610 |
start, end, node, gfp_mask, caller); |
1611 |
if (!area) |
1612 |
-@@ -1694,6 +1788,18 @@ static inline void *__vmalloc_node_flags(unsigned long size, |
1613 |
- node, __builtin_return_address(0)); |
1614 |
- } |
1615 |
- |
1616 |
-+void *vmalloc_stack(int node) |
1617 |
-+{ |
1618 |
-+#ifdef CONFIG_DEBUG_STACK_USAGE |
1619 |
-+ gfp_t mask = GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO; |
1620 |
-+#else |
1621 |
-+ gfp_t mask = GFP_KERNEL | __GFP_NOTRACK; |
1622 |
-+#endif |
1623 |
-+ |
1624 |
-+ return __vmalloc_node(THREAD_SIZE, THREAD_SIZE, mask, PAGE_KERNEL, |
1625 |
-+ node, __builtin_return_address(0)); |
1626 |
-+} |
1627 |
-+ |
1628 |
- /** |
1629 |
- * vmalloc - allocate virtually contiguous memory |
1630 |
- * @size: allocation size |
1631 |
-@@ -1801,10 +1907,9 @@ EXPORT_SYMBOL(vzalloc_node); |
1632 |
+@@ -1801,10 +1959,9 @@ EXPORT_SYMBOL(vzalloc_node); |
1633 |
* For tight control over page level allocator and protection flags |
1634 |
* use __vmalloc() instead. |
1635 |
*/ |
1636 |
@@ -98684,7 +98846,7 @@ index eeba3bb..0c8633f 100644 |
1637 |
-1, __builtin_return_address(0)); |
1638 |
} |
1639 |
|
1640 |
-@@ -2099,6 +2204,8 @@ int remap_vmalloc_range(struct vm_area_struct *vma, void *addr, |
1641 |
+@@ -2099,6 +2256,8 @@ int remap_vmalloc_range(struct vm_area_struct *vma, void *addr, |
1642 |
unsigned long uaddr = vma->vm_start; |
1643 |
unsigned long usize = vma->vm_end - vma->vm_start; |
1644 |
|
1645 |
@@ -98693,7 +98855,7 @@ index eeba3bb..0c8633f 100644 |
1646 |
if ((PAGE_SIZE-1) & (unsigned long)addr) |
1647 |
return -EINVAL; |
1648 |
|
1649 |
-@@ -2351,8 +2458,8 @@ struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets, |
1650 |
+@@ -2351,8 +2510,8 @@ struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets, |
1651 |
return NULL; |
1652 |
} |
1653 |
|
1654 |
@@ -98704,7 +98866,7 @@ index eeba3bb..0c8633f 100644 |
1655 |
if (!vas || !vms) |
1656 |
goto err_free; |
1657 |
|
1658 |
-@@ -2536,11 +2643,15 @@ static int s_show(struct seq_file *m, void *p) |
1659 |
+@@ -2536,11 +2695,15 @@ static int s_show(struct seq_file *m, void *p) |
1660 |
{ |
1661 |
struct vm_struct *v = p; |