Gentoo Archives: gentoo-commits

From: "Mike Pagano (mpagano)" <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] linux-patches r1763 - genpatches-2.6/trunk/2.6.32
Date: Sat, 21 Aug 2010 20:58:06
Message-Id: 20100821205801.F0F1A2004E@flycatcher.gentoo.org
1 Author: mpagano
2 Date: 2010-08-21 20:58:01 +0000 (Sat, 21 Aug 2010)
3 New Revision: 1763
4
5 Added:
6 genpatches-2.6/trunk/2.6.32/1019_linux-2.6.32.20.patch
7 Removed:
8 genpatches-2.6/trunk/2.6.32/1800_page-table-unmap-for-stack-guard-fix.patch
9 Modified:
10 genpatches-2.6/trunk/2.6.32/0000_README
11 Log:
12 Remove redundant patch. Linux patch 2.6.32.20
13
14 Modified: genpatches-2.6/trunk/2.6.32/0000_README
15 ===================================================================
16 --- genpatches-2.6/trunk/2.6.32/0000_README 2010-08-20 20:45:11 UTC (rev 1762)
17 +++ genpatches-2.6/trunk/2.6.32/0000_README 2010-08-21 20:58:01 UTC (rev 1763)
18 @@ -111,13 +111,13 @@
19 From: http://www.kernel.org
20 Desc: Linux 2.6.32.18
21
22 -Patch: 1017_linux-2.6.32.19.patch
23 +Patch: 1018_linux-2.6.32.19.patch
24 From: http://www.kernel.org
25 Desc: Linux 2.6.32.19
26
27 -Patch: 1800_page-table-unmap-for-stack-guard-fix.patch
28 -From: https://bugzilla.kernel.org/show_bug.cgi?id=16588
29 -Desc: Fix page table unmap for stack guard page properly
30 +Patch: 1019_linux-2.6.32.20.patch
31 +From: http://www.kernel.org
32 +Desc: Linux 2.6.32.20
33
34 Patch: 2500_libata-fix-truncated-LBA48-ret-vals.patch
35 From: http://bugs.gentoo.org/show_bug.cgi?id=303313
36
37 Added: genpatches-2.6/trunk/2.6.32/1019_linux-2.6.32.20.patch
38 ===================================================================
39 --- genpatches-2.6/trunk/2.6.32/1019_linux-2.6.32.20.patch (rev 0)
40 +++ genpatches-2.6/trunk/2.6.32/1019_linux-2.6.32.20.patch 2010-08-21 20:58:01 UTC (rev 1763)
41 @@ -0,0 +1,81 @@
42 +diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
43 +index 366b101..899145d 100644
44 +--- a/fs/proc/task_mmu.c
45 ++++ b/fs/proc/task_mmu.c
46 +@@ -206,6 +206,7 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
47 + int flags = vma->vm_flags;
48 + unsigned long ino = 0;
49 + unsigned long long pgoff = 0;
50 ++ unsigned long start;
51 + dev_t dev = 0;
52 + int len;
53 +
54 +@@ -216,8 +217,13 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
55 + pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT;
56 + }
57 +
58 ++ /* We don't show the stack guard page in /proc/maps */
59 ++ start = vma->vm_start;
60 ++ if (vma->vm_flags & VM_GROWSDOWN)
61 ++ start += PAGE_SIZE;
62 ++
63 + seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu %n",
64 +- vma->vm_start,
65 ++ start,
66 + vma->vm_end,
67 + flags & VM_READ ? 'r' : '-',
68 + flags & VM_WRITE ? 'w' : '-',
69 +diff --git a/mm/memory.c b/mm/memory.c
70 +index 76d1b21..babb991 100644
71 +--- a/mm/memory.c
72 ++++ b/mm/memory.c
73 +@@ -2662,24 +2662,23 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
74 + spinlock_t *ptl;
75 + pte_t entry;
76 +
77 +- if (check_stack_guard_page(vma, address) < 0) {
78 +- pte_unmap(page_table);
79 ++ pte_unmap(page_table);
80 ++
81 ++ /* Check if we need to add a guard page to the stack */
82 ++ if (check_stack_guard_page(vma, address) < 0)
83 + return VM_FAULT_SIGBUS;
84 +- }
85 +
86 ++ /* Use the zero-page for reads */
87 + if (!(flags & FAULT_FLAG_WRITE)) {
88 + entry = pte_mkspecial(pfn_pte(my_zero_pfn(address),
89 + vma->vm_page_prot));
90 +- ptl = pte_lockptr(mm, pmd);
91 +- spin_lock(ptl);
92 ++ page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
93 + if (!pte_none(*page_table))
94 + goto unlock;
95 + goto setpte;
96 + }
97 +
98 + /* Allocate our own private page. */
99 +- pte_unmap(page_table);
100 +-
101 + if (unlikely(anon_vma_prepare(vma)))
102 + goto oom;
103 + page = alloc_zeroed_user_highpage_movable(vma, address);
104 +diff --git a/mm/mlock.c b/mm/mlock.c
105 +index 2e05c97..524d2a4 100644
106 +--- a/mm/mlock.c
107 ++++ b/mm/mlock.c
108 +@@ -170,6 +170,14 @@ static long __mlock_vma_pages_range(struct vm_area_struct *vma,
109 + if (vma->vm_flags & VM_WRITE)
110 + gup_flags |= FOLL_WRITE;
111 +
112 ++ /* We don't try to access the guard page of a stack vma */
113 ++ if (vma->vm_flags & VM_GROWSDOWN) {
114 ++ if (start == vma->vm_start) {
115 ++ start += PAGE_SIZE;
116 ++ nr_pages--;
117 ++ }
118 ++ }
119 ++
120 + while (nr_pages > 0) {
121 + int i;
122 +
123
124 Deleted: genpatches-2.6/trunk/2.6.32/1800_page-table-unmap-for-stack-guard-fix.patch
125 ===================================================================
126 --- genpatches-2.6/trunk/2.6.32/1800_page-table-unmap-for-stack-guard-fix.patch 2010-08-20 20:45:11 UTC (rev 1762)
127 +++ genpatches-2.6/trunk/2.6.32/1800_page-table-unmap-for-stack-guard-fix.patch 2010-08-21 20:58:01 UTC (rev 1763)
128 @@ -1,76 +0,0 @@
129 -From 11ac552477e32835cb6970bf0a70c210807f5673 Mon Sep 17 00:00:00 2001
130 -From: Linus Torvalds <torvalds@××××××××××××××××.org>
131 -Date: Sat, 14 Aug 2010 11:44:56 -0700
132 -Subject: [PATCH] mm: fix page table unmap for stack guard page properly
133 -MIME-Version: 1.0
134 -Content-Type: text/plain; charset=utf8
135 -Content-Transfer-Encoding: 8bit
136 -
137 -We do in fact need to unmap the page table _before_ doing the whole
138 -stack guard page logic, because if it is needed (mainly 32-bit x86 with
139 -PAE and CONFIG_HIGHPTE, but other architectures may use it too) then it
140 -will do a kmap_atomic/kunmap_atomic.
141 -
142 -And those kmaps will create an atomic region that we cannot do
143 -allocations in. However, the whole stack expand code will need to do
144 -anon_vma_prepare() and vma_lock_anon_vma() and they cannot do that in an
145 -atomic region.
146 -
147 -Now, a better model might actually be to do the anon_vma_prepare() when
148 -_creating_ a VM_GROWSDOWN segment, and not have to worry about any of
149 -this at page fault time. But in the meantime, this is the
150 -straightforward fix for the issue.
151 -
152 -See https://bugzilla.kernel.org/show_bug.cgi?id=16588 for details.
153 -
154 -Reported-by: Wylda <wylda@×××××.cz>
155 -Reported-by: Sedat Dilek <sedat.dilek@×××××.com>
156 -Reported-by: Mike Pagano <mpagano@g.o>
157 -Reported-by: François Valenduc <francois.valenduc@××××××××××.be>
158 -Tested-by: Ed Tomlinson <edt@×××.ca>
159 -Cc: Pekka Enberg <penberg@××××××.org>
160 -Cc: Greg KH <gregkh@××××.de>
161 -Cc: stable@××××××.org
162 -Signed-off-by: Linus Torvalds <torvalds@××××××××××××××××.org>
163 ----
164 - mm/memory.c | 13 ++++++-------
165 - 1 files changed, 6 insertions(+), 7 deletions(-)
166 -
167 -diff --git a/mm/memory.c b/mm/memory.c
168 -index 9b3b73f..b6e5fd2 100644
169 ---- a/mm/memory.c
170 -+++ b/mm/memory.c
171 -@@ -2792,24 +2792,23 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
172 - spinlock_t *ptl;
173 - pte_t entry;
174 -
175 -- if (check_stack_guard_page(vma, address) < 0) {
176 -- pte_unmap(page_table);
177 -+ pte_unmap(page_table);
178 -+
179 -+ /* Check if we need to add a guard page to the stack */
180 -+ if (check_stack_guard_page(vma, address) < 0)
181 - return VM_FAULT_SIGBUS;
182 -- }
183 -
184 -+ /* Use the zero-page for reads */
185 - if (!(flags & FAULT_FLAG_WRITE)) {
186 - entry = pte_mkspecial(pfn_pte(my_zero_pfn(address),
187 - vma->vm_page_prot));
188 -- ptl = pte_lockptr(mm, pmd);
189 -- spin_lock(ptl);
190 -+ page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
191 - if (!pte_none(*page_table))
192 - goto unlock;
193 - goto setpte;
194 - }
195 -
196 - /* Allocate our own private page. */
197 -- pte_unmap(page_table);
198 --
199 - if (unlikely(anon_vma_prepare(vma)))
200 - goto oom;
201 - page = alloc_zeroed_user_highpage_movable(vma, address);
202 ---
203 -1.7.2.2
204 -