1 |
Author: mpagano |
2 |
Date: 2010-08-20 14:30:36 +0000 (Fri, 20 Aug 2010) |
3 |
New Revision: 1756 |
4 |
|
5 |
Added: |
6 |
genpatches-2.6/trunk/2.6.34/1800_page-table-unmap-for-stack-guard-fix.patch |
7 |
Modified: |
8 |
genpatches-2.6/trunk/2.6.34/0000_README |
9 |
Log: |
10 |
Patch to fix page table unmap for stack guard page |
11 |
|
12 |
Modified: genpatches-2.6/trunk/2.6.34/0000_README |
13 |
=================================================================== |
14 |
--- genpatches-2.6/trunk/2.6.34/0000_README 2010-08-20 13:17:15 UTC (rev 1755) |
15 |
+++ genpatches-2.6/trunk/2.6.34/0000_README 2010-08-20 14:30:36 UTC (rev 1756) |
16 |
@@ -59,6 +59,10 @@ |
17 |
From: http://bugs.gentoo.org/show_bug.cgi?id=332949 |
18 |
Desc: Add missing include |
19 |
|
20 |
+Patch: 1800_page-table-unmap-for-stack-guard-fix.patch |
21 |
+From: https://bugzilla.kernel.org/show_bug.cgi?id=16588 |
22 |
+Desc: Fix page table unmap for stack guard page properly |
23 |
+ |
24 |
Patch: 2600_synaptic-cap-ID-check-fix.patch |
25 |
From: http://bugs.gentoo.org/show_bug.cgi?id=328527 |
26 |
Desc: Synaptics capability ID check fix |
27 |
|
28 |
Added: genpatches-2.6/trunk/2.6.34/1800_page-table-unmap-for-stack-guard-fix.patch |
29 |
=================================================================== |
30 |
--- genpatches-2.6/trunk/2.6.34/1800_page-table-unmap-for-stack-guard-fix.patch (rev 0) |
31 |
+++ genpatches-2.6/trunk/2.6.34/1800_page-table-unmap-for-stack-guard-fix.patch 2010-08-20 14:30:36 UTC (rev 1756) |
32 |
@@ -0,0 +1,76 @@ |
33 |
+From 11ac552477e32835cb6970bf0a70c210807f5673 Mon Sep 17 00:00:00 2001 |
34 |
+From: Linus Torvalds <torvalds@××××××××××××××××.org> |
35 |
+Date: Sat, 14 Aug 2010 11:44:56 -0700 |
36 |
+Subject: [PATCH] mm: fix page table unmap for stack guard page properly |
37 |
+MIME-Version: 1.0 |
38 |
+Content-Type: text/plain; charset=utf8 |
39 |
+Content-Transfer-Encoding: 8bit |
40 |
+ |
41 |
+We do in fact need to unmap the page table _before_ doing the whole |
42 |
+stack guard page logic, because if it is needed (mainly 32-bit x86 with |
43 |
+PAE and CONFIG_HIGHPTE, but other architectures may use it too) then it |
44 |
+will do a kmap_atomic/kunmap_atomic. |
45 |
+ |
46 |
+And those kmaps will create an atomic region that we cannot do |
47 |
+allocations in. However, the whole stack expand code will need to do |
48 |
+anon_vma_prepare() and vma_lock_anon_vma() and they cannot do that in an |
49 |
+atomic region. |
50 |
+ |
51 |
+Now, a better model might actually be to do the anon_vma_prepare() when |
52 |
+_creating_ a VM_GROWSDOWN segment, and not have to worry about any of |
53 |
+this at page fault time. But in the meantime, this is the |
54 |
+straightforward fix for the issue. |
55 |
+ |
56 |
+See https://bugzilla.kernel.org/show_bug.cgi?id=16588 for details. |
57 |
+ |
58 |
+Reported-by: Wylda <wylda@×××××.cz> |
59 |
+Reported-by: Sedat Dilek <sedat.dilek@×××××.com> |
60 |
+Reported-by: Mike Pagano <mpagano@g.o> |
61 |
+Reported-by: François Valenduc <francois.valenduc@××××××××××.be> |
62 |
+Tested-by: Ed Tomlinson <edt@×××.ca> |
63 |
+Cc: Pekka Enberg <penberg@××××××.org> |
64 |
+Cc: Greg KH <gregkh@××××.de> |
65 |
+Cc: stable@××××××.org |
66 |
+Signed-off-by: Linus Torvalds <torvalds@××××××××××××××××.org> |
67 |
+--- |
68 |
+ mm/memory.c | 13 ++++++------- |
69 |
+ 1 files changed, 6 insertions(+), 7 deletions(-) |
70 |
+ |
71 |
+diff --git a/mm/memory.c b/mm/memory.c |
72 |
+index 9b3b73f..b6e5fd2 100644 |
73 |
+--- a/mm/memory.c |
74 |
++++ b/mm/memory.c |
75 |
+@@ -2792,24 +2792,23 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma, |
76 |
+ spinlock_t *ptl; |
77 |
+ pte_t entry; |
78 |
+ |
79 |
+- if (check_stack_guard_page(vma, address) < 0) { |
80 |
+- pte_unmap(page_table); |
81 |
++ pte_unmap(page_table); |
82 |
++ |
83 |
++ /* Check if we need to add a guard page to the stack */ |
84 |
++ if (check_stack_guard_page(vma, address) < 0) |
85 |
+ return VM_FAULT_SIGBUS; |
86 |
+- } |
87 |
+ |
88 |
++ /* Use the zero-page for reads */ |
89 |
+ if (!(flags & FAULT_FLAG_WRITE)) { |
90 |
+ entry = pte_mkspecial(pfn_pte(my_zero_pfn(address), |
91 |
+ vma->vm_page_prot)); |
92 |
+- ptl = pte_lockptr(mm, pmd); |
93 |
+- spin_lock(ptl); |
94 |
++ page_table = pte_offset_map_lock(mm, pmd, address, &ptl); |
95 |
+ if (!pte_none(*page_table)) |
96 |
+ goto unlock; |
97 |
+ goto setpte; |
98 |
+ } |
99 |
+ |
100 |
+ /* Allocate our own private page. */ |
101 |
+- pte_unmap(page_table); |
102 |
+- |
103 |
+ if (unlikely(anon_vma_prepare(vma))) |
104 |
+ goto oom; |
105 |
+ page = alloc_zeroed_user_highpage_movable(vma, address); |
106 |
+-- |
107 |
+1.7.2.2 |
108 |
+ |