Gentoo Archives: gentoo-commits

From: "Anthony G. Basile" <blueness@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/hardened-patchset:master commit in: 3.14.17/, 3.16.1/
Date: Wed, 03 Sep 2014 21:55:33
Message-Id: 1409745636.68d46906563236bdfaebc58465bcc47ca7388a77.blueness@gentoo
1 commit: 68d46906563236bdfaebc58465bcc47ca7388a77
2 Author: Anthony G. Basile <blueness <AT> gentoo <DOT> org>
3 AuthorDate: Wed Sep 3 12:00:36 2014 +0000
4 Commit: Anthony G. Basile <blueness <AT> gentoo <DOT> org>
5 CommitDate: Wed Sep 3 12:00:36 2014 +0000
6 URL: http://sources.gentoo.org/gitweb/?p=proj/hardened-patchset.git;a=commit;h=68d46906
7
8 Grsec/PaX: 3.0-{3.14.17,3.16.1}-201409021826
9
10 ---
11 3.14.17/0000_README | 2 +-
12 ...4420_grsecurity-3.0-3.14.17-201409021816.patch} | 40 ++++-
13 3.16.1/0000_README | 2 +-
14 ... 4420_grsecurity-3.0-3.16.1-201409021826.patch} | 187 ++++++++++++++++-----
15 4 files changed, 184 insertions(+), 47 deletions(-)
16
17 diff --git a/3.14.17/0000_README b/3.14.17/0000_README
18 index 99b0d3a..19f254f 100644
19 --- a/3.14.17/0000_README
20 +++ b/3.14.17/0000_README
21 @@ -2,7 +2,7 @@ README
22 -----------------------------------------------------------------------------
23 Individual Patch Descriptions:
24 -----------------------------------------------------------------------------
25 -Patch: 4420_grsecurity-3.0-3.14.17-201408312006.patch
26 +Patch: 4420_grsecurity-3.0-3.14.17-201409021816.patch
27 From: http://www.grsecurity.net
28 Desc: hardened-sources base patch from upstream grsecurity
29
30
31 diff --git a/3.14.17/4420_grsecurity-3.0-3.14.17-201408312006.patch b/3.14.17/4420_grsecurity-3.0-3.14.17-201409021816.patch
32 similarity index 99%
33 rename from 3.14.17/4420_grsecurity-3.0-3.14.17-201408312006.patch
34 rename to 3.14.17/4420_grsecurity-3.0-3.14.17-201409021816.patch
35 index 5fb863b..7887ba7 100644
36 --- a/3.14.17/4420_grsecurity-3.0-3.14.17-201408312006.patch
37 +++ b/3.14.17/4420_grsecurity-3.0-3.14.17-201409021816.patch
38 @@ -58165,10 +58165,33 @@ index ebaff36..7e3ea26 100644
39 kunmap(page);
40 file_end_write(file);
41 diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c
42 -index 5e0982a..b7e82bc 100644
43 +index 5e0982a..ca18377 100644
44 --- a/fs/ceph/dir.c
45 +++ b/fs/ceph/dir.c
46 -@@ -248,7 +248,7 @@ static int ceph_readdir(struct file *file, struct dir_context *ctx)
47 +@@ -128,6 +128,8 @@ static int __dcache_readdir(struct file *file, struct dir_context *ctx)
48 + struct dentry *dentry, *last;
49 + struct ceph_dentry_info *di;
50 + int err = 0;
51 ++ char d_name[DNAME_INLINE_LEN];
52 ++ const unsigned char *name;
53 +
54 + /* claim ref on last dentry we returned */
55 + last = fi->dentry;
56 +@@ -183,7 +185,12 @@ more:
57 + dout(" %llu (%llu) dentry %p %.*s %p\n", di->offset, ctx->pos,
58 + dentry, dentry->d_name.len, dentry->d_name.name, dentry->d_inode);
59 + ctx->pos = di->offset;
60 +- if (!dir_emit(ctx, dentry->d_name.name,
61 ++ name = dentry->d_name.name;
62 ++ if (name == dentry->d_iname) {
63 ++ memcpy(d_name, name, dentry->d_name.len);
64 ++ name = d_name;
65 ++ }
66 ++ if (!dir_emit(ctx, name,
67 + dentry->d_name.len,
68 + ceph_translate_ino(dentry->d_sb, dentry->d_inode->i_ino),
69 + dentry->d_inode->i_mode >> 12)) {
70 +@@ -248,7 +255,7 @@ static int ceph_readdir(struct file *file, struct dir_context *ctx)
71 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
72 struct ceph_mds_client *mdsc = fsc->mdsc;
73 unsigned frag = fpos_frag(ctx->pos);
74 @@ -96341,6 +96364,19 @@ index a2a54a8..43ecb68 100644
75 EXPORT_SYMBOL_GPL(pcpu_base_addr);
76
77 static const int *pcpu_unit_map __read_mostly; /* cpu -> unit */
78 +diff --git a/mm/pgtable-generic.c b/mm/pgtable-generic.c
79 +index a8b9199..dfb79e0 100644
80 +--- a/mm/pgtable-generic.c
81 ++++ b/mm/pgtable-generic.c
82 +@@ -195,7 +195,7 @@ void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
83 + pmd_t entry = *pmdp;
84 + if (pmd_numa(entry))
85 + entry = pmd_mknonnuma(entry);
86 +- set_pmd_at(vma->vm_mm, address, pmdp, pmd_mknotpresent(*pmdp));
87 ++ set_pmd_at(vma->vm_mm, address, pmdp, pmd_mknotpresent(entry));
88 + flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
89 + }
90 + #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
91 diff --git a/mm/process_vm_access.c b/mm/process_vm_access.c
92 index fd26d04..0cea1b0 100644
93 --- a/mm/process_vm_access.c
94
95 diff --git a/3.16.1/0000_README b/3.16.1/0000_README
96 index 76ef299..7a2bc49 100644
97 --- a/3.16.1/0000_README
98 +++ b/3.16.1/0000_README
99 @@ -2,7 +2,7 @@ README
100 -----------------------------------------------------------------------------
101 Individual Patch Descriptions:
102 -----------------------------------------------------------------------------
103 -Patch: 4420_grsecurity-3.0-3.16.1-201409010104.patch
104 +Patch: 4420_grsecurity-3.0-3.16.1-201409021826.patch
105 From: http://www.grsecurity.net
106 Desc: hardened-sources base patch from upstream grsecurity
107
108
109 diff --git a/3.16.1/4420_grsecurity-3.0-3.16.1-201409010104.patch b/3.16.1/4420_grsecurity-3.0-3.16.1-201409021826.patch
110 similarity index 99%
111 rename from 3.16.1/4420_grsecurity-3.0-3.16.1-201409010104.patch
112 rename to 3.16.1/4420_grsecurity-3.0-3.16.1-201409021826.patch
113 index 6753168..624c5fb 100644
114 --- a/3.16.1/4420_grsecurity-3.0-3.16.1-201409010104.patch
115 +++ b/3.16.1/4420_grsecurity-3.0-3.16.1-201409021826.patch
116 @@ -3775,7 +3775,7 @@ index 7bcee5c..e2f3249 100644
117 __data_loc = .;
118 #endif
119 diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c
120 -index 3c82b37..bd41745 100644
121 +index 3c82b37..69fa3d2 100644
122 --- a/arch/arm/kvm/arm.c
123 +++ b/arch/arm/kvm/arm.c
124 @@ -57,7 +57,7 @@ static unsigned long hyp_default_vectors;
125 @@ -3814,6 +3814,15 @@ index 3c82b37..bd41745 100644
126 kvm->arch.vmid = kvm_next_vmid;
127 kvm_next_vmid++;
128
129 +@@ -1034,7 +1034,7 @@ static void check_kvm_target_cpu(void *ret)
130 + /**
131 + * Initialize Hyp-mode and memory mappings on all CPUs.
132 + */
133 +-int kvm_arch_init(void *opaque)
134 ++int kvm_arch_init(const void *opaque)
135 + {
136 + int err;
137 + int ret, cpu;
138 diff --git a/arch/arm/lib/clear_user.S b/arch/arm/lib/clear_user.S
139 index 14a0d98..7771a7d 100644
140 --- a/arch/arm/lib/clear_user.S
141 @@ -7597,6 +7606,19 @@ index 51706d6..ec1178c 100644
142
143 info.si_code = FPE_INTOVF;
144 info.si_signo = SIGFPE;
145 +diff --git a/arch/mips/kvm/kvm_mips.c b/arch/mips/kvm/kvm_mips.c
146 +index f3c56a1..6a2f01c 100644
147 +--- a/arch/mips/kvm/kvm_mips.c
148 ++++ b/arch/mips/kvm/kvm_mips.c
149 +@@ -841,7 +841,7 @@ long kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
150 + return r;
151 + }
152 +
153 +-int kvm_arch_init(void *opaque)
154 ++int kvm_arch_init(const void *opaque)
155 + {
156 + int ret;
157 +
158 diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c
159 index becc42b..9e43d4b 100644
160 --- a/arch/mips/mm/fault.c
161 @@ -18130,7 +18152,7 @@ index 81bb91b..9392125 100644
162
163 /*
164 diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
165 -index 0ec0560..5dc64bd 100644
166 +index 0ec0560..f169e5b 100644
167 --- a/arch/x86/include/asm/pgtable.h
168 +++ b/arch/x86/include/asm/pgtable.h
169 @@ -46,6 +46,7 @@ extern struct mm_struct *pgd_page_get_mm(struct page *page);
170 @@ -18193,7 +18215,23 @@ index 0ec0560..5dc64bd 100644
171 static inline int pte_dirty(pte_t pte)
172 {
173 return pte_flags(pte) & _PAGE_DIRTY;
174 -@@ -150,6 +190,11 @@ static inline unsigned long pud_pfn(pud_t pud)
175 +@@ -131,8 +171,13 @@ static inline int pte_exec(pte_t pte)
176 +
177 + static inline int pte_special(pte_t pte)
178 + {
179 +- return (pte_flags(pte) & (_PAGE_PRESENT|_PAGE_SPECIAL)) ==
180 +- (_PAGE_PRESENT|_PAGE_SPECIAL);
181 ++ /*
182 ++ * See CONFIG_NUMA_BALANCING pte_numa in include/asm-generic/pgtable.h.
183 ++ * On x86 we have _PAGE_BIT_NUMA == _PAGE_BIT_GLOBAL+1 ==
184 ++ * __PAGE_BIT_SOFTW1 == _PAGE_BIT_SPECIAL.
185 ++ */
186 ++ return (pte_flags(pte) & _PAGE_SPECIAL) &&
187 ++ (pte_flags(pte) & (_PAGE_PRESENT|_PAGE_PROTNONE));
188 + }
189 +
190 + static inline unsigned long pte_pfn(pte_t pte)
191 +@@ -150,6 +195,11 @@ static inline unsigned long pud_pfn(pud_t pud)
192 return (pud_val(pud) & PTE_PFN_MASK) >> PAGE_SHIFT;
193 }
194
195 @@ -18205,7 +18243,7 @@ index 0ec0560..5dc64bd 100644
196 #define pte_page(pte) pfn_to_page(pte_pfn(pte))
197
198 static inline int pmd_large(pmd_t pte)
199 -@@ -203,9 +248,29 @@ static inline pte_t pte_wrprotect(pte_t pte)
200 +@@ -203,9 +253,29 @@ static inline pte_t pte_wrprotect(pte_t pte)
201 return pte_clear_flags(pte, _PAGE_RW);
202 }
203
204 @@ -18236,7 +18274,7 @@ index 0ec0560..5dc64bd 100644
205 }
206
207 static inline pte_t pte_mkdirty(pte_t pte)
208 -@@ -435,6 +500,16 @@ pte_t *populate_extra_pte(unsigned long vaddr);
209 +@@ -435,6 +505,16 @@ pte_t *populate_extra_pte(unsigned long vaddr);
210 #endif
211
212 #ifndef __ASSEMBLY__
213 @@ -18253,7 +18291,7 @@ index 0ec0560..5dc64bd 100644
214 #include <linux/mm_types.h>
215 #include <linux/mmdebug.h>
216 #include <linux/log2.h>
217 -@@ -581,7 +656,7 @@ static inline unsigned long pud_page_vaddr(pud_t pud)
218 +@@ -581,7 +661,7 @@ static inline unsigned long pud_page_vaddr(pud_t pud)
219 * Currently stuck as a macro due to indirect forward reference to
220 * linux/mmzone.h's __section_mem_map_addr() definition:
221 */
222 @@ -18262,7 +18300,7 @@ index 0ec0560..5dc64bd 100644
223
224 /* Find an entry in the second-level page table.. */
225 static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
226 -@@ -621,7 +696,7 @@ static inline unsigned long pgd_page_vaddr(pgd_t pgd)
227 +@@ -621,7 +701,7 @@ static inline unsigned long pgd_page_vaddr(pgd_t pgd)
228 * Currently stuck as a macro due to indirect forward reference to
229 * linux/mmzone.h's __section_mem_map_addr() definition:
230 */
231 @@ -18271,7 +18309,7 @@ index 0ec0560..5dc64bd 100644
232
233 /* to find an entry in a page-table-directory. */
234 static inline unsigned long pud_index(unsigned long address)
235 -@@ -636,7 +711,7 @@ static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address)
236 +@@ -636,7 +716,7 @@ static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address)
237
238 static inline int pgd_bad(pgd_t pgd)
239 {
240 @@ -18280,7 +18318,7 @@ index 0ec0560..5dc64bd 100644
241 }
242
243 static inline int pgd_none(pgd_t pgd)
244 -@@ -659,7 +734,12 @@ static inline int pgd_none(pgd_t pgd)
245 +@@ -659,7 +739,12 @@ static inline int pgd_none(pgd_t pgd)
246 * pgd_offset() returns a (pgd_t *)
247 * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
248 */
249 @@ -18294,7 +18332,7 @@ index 0ec0560..5dc64bd 100644
250 /*
251 * a shortcut which implies the use of the kernel's pgd, instead
252 * of a process's
253 -@@ -670,6 +750,23 @@ static inline int pgd_none(pgd_t pgd)
254 +@@ -670,6 +755,23 @@ static inline int pgd_none(pgd_t pgd)
255 #define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET)
256 #define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
257
258 @@ -18318,7 +18356,7 @@ index 0ec0560..5dc64bd 100644
259 #ifndef __ASSEMBLY__
260
261 extern int direct_gbpages;
262 -@@ -836,11 +933,24 @@ static inline void pmdp_set_wrprotect(struct mm_struct *mm,
263 +@@ -836,11 +938,24 @@ static inline void pmdp_set_wrprotect(struct mm_struct *mm,
264 * dst and src can be on the same page, but the range must not overlap,
265 * and must not cross a page boundary.
266 */
267 @@ -40781,7 +40819,7 @@ index dd3a78c..386d49c 100644
268
269 rc = of_property_read_u32(node, "fixed-divider", &fixed_div);
270 diff --git a/drivers/clk/socfpga/clk-pll.c b/drivers/clk/socfpga/clk-pll.c
271 -index de6da95..a2e72c0 100644
272 +index de6da95..c98278b 100644
273 --- a/drivers/clk/socfpga/clk-pll.c
274 +++ b/drivers/clk/socfpga/clk-pll.c
275 @@ -21,6 +21,7 @@
276 @@ -40797,7 +40835,7 @@ index de6da95..a2e72c0 100644
277 }
278
279 -static struct clk_ops clk_pll_ops = {
280 -+static struct clk_ops_no_const clk_pll_ops __read_only = {
281 ++static clk_ops_no_const clk_pll_ops __read_only = {
282 .recalc_rate = clk_pll_recalc_rate,
283 .get_parent = clk_pll_get_parent,
284 };
285 @@ -60802,10 +60840,33 @@ index 4b1fb5c..0d2a699 100644
286 kunmap(page);
287 file_end_write(file);
288 diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c
289 -index c29d6ae..a56c4ae 100644
290 +index c29d6ae..719b9bb 100644
291 --- a/fs/ceph/dir.c
292 +++ b/fs/ceph/dir.c
293 -@@ -250,7 +250,7 @@ static int ceph_readdir(struct file *file, struct dir_context *ctx)
294 +@@ -129,6 +129,8 @@ static int __dcache_readdir(struct file *file, struct dir_context *ctx,
295 + struct dentry *dentry, *last;
296 + struct ceph_dentry_info *di;
297 + int err = 0;
298 ++ char d_name[DNAME_INLINE_LEN];
299 ++ const unsigned char *name;
300 +
301 + /* claim ref on last dentry we returned */
302 + last = fi->dentry;
303 +@@ -192,7 +194,12 @@ more:
304 +
305 + dout(" %llu (%llu) dentry %p %.*s %p\n", di->offset, ctx->pos,
306 + dentry, dentry->d_name.len, dentry->d_name.name, dentry->d_inode);
307 +- if (!dir_emit(ctx, dentry->d_name.name,
308 ++ name = dentry->d_name.name;
309 ++ if (name == dentry->d_iname) {
310 ++ memcpy(d_name, name, dentry->d_name.len);
311 ++ name = d_name;
312 ++ }
313 ++ if (!dir_emit(ctx, name,
314 + dentry->d_name.len,
315 + ceph_translate_ino(dentry->d_sb, dentry->d_inode->i_ino),
316 + dentry->d_inode->i_mode >> 12)) {
317 +@@ -250,7 +257,7 @@ static int ceph_readdir(struct file *file, struct dir_context *ctx)
318 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
319 struct ceph_mds_client *mdsc = fsc->mdsc;
320 unsigned frag = fpos_frag(ctx->pos);
321 @@ -99455,7 +99516,7 @@ index a013bc9..a897a14 100644
322 }
323 unset_migratetype_isolate(page, MIGRATE_MOVABLE);
324 diff --git a/mm/memory.c b/mm/memory.c
325 -index 8b44f76..66f1954 100644
326 +index 8b44f76..babeaec 100644
327 --- a/mm/memory.c
328 +++ b/mm/memory.c
329 @@ -413,6 +413,7 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
330 @@ -99492,7 +99553,34 @@ index 8b44f76..66f1954 100644
331 }
332
333 /*
334 -@@ -1501,6 +1507,10 @@ static int insert_page(struct vm_area_struct *vma, unsigned long addr,
335 +@@ -751,7 +757,7 @@ struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
336 + unsigned long pfn = pte_pfn(pte);
337 +
338 + if (HAVE_PTE_SPECIAL) {
339 +- if (likely(!pte_special(pte) || pte_numa(pte)))
340 ++ if (likely(!pte_special(pte)))
341 + goto check_pfn;
342 + if (vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP))
343 + return NULL;
344 +@@ -777,15 +783,14 @@ struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
345 + }
346 + }
347 +
348 ++ if (is_zero_pfn(pfn))
349 ++ return NULL;
350 + check_pfn:
351 + if (unlikely(pfn > highest_memmap_pfn)) {
352 + print_bad_pte(vma, addr, pte, NULL);
353 + return NULL;
354 + }
355 +
356 +- if (is_zero_pfn(pfn))
357 +- return NULL;
358 +-
359 + /*
360 + * NOTE! We still have PageReserved() pages in the page tables.
361 + * eg. VDSO mappings can cause them to exist.
362 +@@ -1501,6 +1506,10 @@ static int insert_page(struct vm_area_struct *vma, unsigned long addr,
363 page_add_file_rmap(page);
364 set_pte_at(mm, addr, pte, mk_pte(page, prot));
365
366 @@ -99503,7 +99591,7 @@ index 8b44f76..66f1954 100644
367 retval = 0;
368 pte_unmap_unlock(pte, ptl);
369 return retval;
370 -@@ -1545,9 +1555,21 @@ int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
371 +@@ -1545,9 +1554,21 @@ int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
372 if (!page_count(page))
373 return -EINVAL;
374 if (!(vma->vm_flags & VM_MIXEDMAP)) {
375 @@ -99525,7 +99613,7 @@ index 8b44f76..66f1954 100644
376 }
377 return insert_page(vma, addr, page, vma->vm_page_prot);
378 }
379 -@@ -1630,6 +1652,7 @@ int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
380 +@@ -1630,6 +1651,7 @@ int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
381 unsigned long pfn)
382 {
383 BUG_ON(!(vma->vm_flags & VM_MIXEDMAP));
384 @@ -99533,7 +99621,7 @@ index 8b44f76..66f1954 100644
385
386 if (addr < vma->vm_start || addr >= vma->vm_end)
387 return -EFAULT;
388 -@@ -1877,7 +1900,9 @@ static int apply_to_pmd_range(struct mm_struct *mm, pud_t *pud,
389 +@@ -1877,7 +1899,9 @@ static int apply_to_pmd_range(struct mm_struct *mm, pud_t *pud,
390
391 BUG_ON(pud_huge(*pud));
392
393 @@ -99544,7 +99632,7 @@ index 8b44f76..66f1954 100644
394 if (!pmd)
395 return -ENOMEM;
396 do {
397 -@@ -1897,7 +1922,9 @@ static int apply_to_pud_range(struct mm_struct *mm, pgd_t *pgd,
398 +@@ -1897,7 +1921,9 @@ static int apply_to_pud_range(struct mm_struct *mm, pgd_t *pgd,
399 unsigned long next;
400 int err;
401
402 @@ -99555,7 +99643,7 @@ index 8b44f76..66f1954 100644
403 if (!pud)
404 return -ENOMEM;
405 do {
406 -@@ -2019,6 +2046,186 @@ static int do_page_mkwrite(struct vm_area_struct *vma, struct page *page,
407 +@@ -2019,6 +2045,186 @@ static int do_page_mkwrite(struct vm_area_struct *vma, struct page *page,
408 return ret;
409 }
410
411 @@ -99742,7 +99830,7 @@ index 8b44f76..66f1954 100644
412 /*
413 * This routine handles present pages, when users try to write
414 * to a shared page. It is done by copying the page to a new address
415 -@@ -2216,6 +2423,12 @@ gotten:
416 +@@ -2216,6 +2422,12 @@ gotten:
417 */
418 page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
419 if (likely(pte_same(*page_table, orig_pte))) {
420 @@ -99755,7 +99843,7 @@ index 8b44f76..66f1954 100644
421 if (old_page) {
422 if (!PageAnon(old_page)) {
423 dec_mm_counter_fast(mm, MM_FILEPAGES);
424 -@@ -2267,6 +2480,10 @@ gotten:
425 +@@ -2267,6 +2479,10 @@ gotten:
426 page_remove_rmap(old_page);
427 }
428
429 @@ -99766,7 +99854,7 @@ index 8b44f76..66f1954 100644
430 /* Free the old page.. */
431 new_page = old_page;
432 ret |= VM_FAULT_WRITE;
433 -@@ -2540,6 +2757,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
434 +@@ -2540,6 +2756,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
435 swap_free(entry);
436 if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
437 try_to_free_swap(page);
438 @@ -99778,7 +99866,7 @@ index 8b44f76..66f1954 100644
439 unlock_page(page);
440 if (page != swapcache) {
441 /*
442 -@@ -2563,6 +2785,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
443 +@@ -2563,6 +2784,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
444
445 /* No need to invalidate - it was non-present before */
446 update_mmu_cache(vma, address, page_table);
447 @@ -99790,7 +99878,7 @@ index 8b44f76..66f1954 100644
448 unlock:
449 pte_unmap_unlock(page_table, ptl);
450 out:
451 -@@ -2582,40 +2809,6 @@ out_release:
452 +@@ -2582,40 +2808,6 @@ out_release:
453 }
454
455 /*
456 @@ -99831,7 +99919,7 @@ index 8b44f76..66f1954 100644
457 * We enter with non-exclusive mmap_sem (to exclude vma changes,
458 * but allow concurrent faults), and pte mapped but not yet locked.
459 * We return with mmap_sem still held, but pte unmapped and unlocked.
460 -@@ -2624,27 +2817,23 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
461 +@@ -2624,27 +2816,23 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
462 unsigned long address, pte_t *page_table, pmd_t *pmd,
463 unsigned int flags)
464 {
465 @@ -99864,7 +99952,7 @@ index 8b44f76..66f1954 100644
466 if (unlikely(anon_vma_prepare(vma)))
467 goto oom;
468 page = alloc_zeroed_user_highpage_movable(vma, address);
469 -@@ -2668,6 +2857,11 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
470 +@@ -2668,6 +2856,11 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
471 if (!pte_none(*page_table))
472 goto release;
473
474 @@ -99876,7 +99964,7 @@ index 8b44f76..66f1954 100644
475 inc_mm_counter_fast(mm, MM_ANONPAGES);
476 page_add_new_anon_rmap(page, vma, address);
477 setpte:
478 -@@ -2675,6 +2869,12 @@ setpte:
479 +@@ -2675,6 +2868,12 @@ setpte:
480
481 /* No need to invalidate - it was non-present before */
482 update_mmu_cache(vma, address, page_table);
483 @@ -99889,7 +99977,7 @@ index 8b44f76..66f1954 100644
484 unlock:
485 pte_unmap_unlock(page_table, ptl);
486 return 0;
487 -@@ -2906,6 +3106,11 @@ static int do_read_fault(struct mm_struct *mm, struct vm_area_struct *vma,
488 +@@ -2906,6 +3105,11 @@ static int do_read_fault(struct mm_struct *mm, struct vm_area_struct *vma,
489 return ret;
490 }
491 do_set_pte(vma, address, fault_page, pte, false, false);
492 @@ -99901,7 +99989,7 @@ index 8b44f76..66f1954 100644
493 unlock_page(fault_page);
494 unlock_out:
495 pte_unmap_unlock(pte, ptl);
496 -@@ -2947,7 +3152,18 @@ static int do_cow_fault(struct mm_struct *mm, struct vm_area_struct *vma,
497 +@@ -2947,7 +3151,18 @@ static int do_cow_fault(struct mm_struct *mm, struct vm_area_struct *vma,
498 page_cache_release(fault_page);
499 goto uncharge_out;
500 }
501 @@ -99920,7 +100008,7 @@ index 8b44f76..66f1954 100644
502 pte_unmap_unlock(pte, ptl);
503 unlock_page(fault_page);
504 page_cache_release(fault_page);
505 -@@ -2995,6 +3211,11 @@ static int do_shared_fault(struct mm_struct *mm, struct vm_area_struct *vma,
506 +@@ -2995,6 +3210,11 @@ static int do_shared_fault(struct mm_struct *mm, struct vm_area_struct *vma,
507 return ret;
508 }
509 do_set_pte(vma, address, fault_page, pte, true, false);
510 @@ -99932,7 +100020,7 @@ index 8b44f76..66f1954 100644
511 pte_unmap_unlock(pte, ptl);
512
513 if (set_page_dirty(fault_page))
514 -@@ -3225,6 +3446,12 @@ static int handle_pte_fault(struct mm_struct *mm,
515 +@@ -3225,6 +3445,12 @@ static int handle_pte_fault(struct mm_struct *mm,
516 if (flags & FAULT_FLAG_WRITE)
517 flush_tlb_fix_spurious_fault(vma, address);
518 }
519 @@ -99945,7 +100033,7 @@ index 8b44f76..66f1954 100644
520 unlock:
521 pte_unmap_unlock(pte, ptl);
522 return 0;
523 -@@ -3241,9 +3468,41 @@ static int __handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
524 +@@ -3241,9 +3467,41 @@ static int __handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
525 pmd_t *pmd;
526 pte_t *pte;
527
528 @@ -99987,7 +100075,7 @@ index 8b44f76..66f1954 100644
529 pgd = pgd_offset(mm, address);
530 pud = pud_alloc(mm, pgd, address);
531 if (!pud)
532 -@@ -3371,6 +3630,23 @@ int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
533 +@@ -3371,6 +3629,23 @@ int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
534 spin_unlock(&mm->page_table_lock);
535 return 0;
536 }
537 @@ -100011,7 +100099,7 @@ index 8b44f76..66f1954 100644
538 #endif /* __PAGETABLE_PUD_FOLDED */
539
540 #ifndef __PAGETABLE_PMD_FOLDED
541 -@@ -3401,6 +3677,30 @@ int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
542 +@@ -3401,6 +3676,30 @@ int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
543 spin_unlock(&mm->page_table_lock);
544 return 0;
545 }
546 @@ -100042,7 +100130,7 @@ index 8b44f76..66f1954 100644
547 #endif /* __PAGETABLE_PMD_FOLDED */
548
549 #if !defined(__HAVE_ARCH_GATE_AREA)
550 -@@ -3414,7 +3714,7 @@ static int __init gate_vma_init(void)
551 +@@ -3414,7 +3713,7 @@ static int __init gate_vma_init(void)
552 gate_vma.vm_start = FIXADDR_USER_START;
553 gate_vma.vm_end = FIXADDR_USER_END;
554 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
555 @@ -100051,7 +100139,7 @@ index 8b44f76..66f1954 100644
556
557 return 0;
558 }
559 -@@ -3548,8 +3848,8 @@ out:
560 +@@ -3548,8 +3847,8 @@ out:
561 return ret;
562 }
563
564 @@ -100062,7 +100150,7 @@ index 8b44f76..66f1954 100644
565 {
566 resource_size_t phys_addr;
567 unsigned long prot = 0;
568 -@@ -3575,8 +3875,8 @@ EXPORT_SYMBOL_GPL(generic_access_phys);
569 +@@ -3575,8 +3874,8 @@ EXPORT_SYMBOL_GPL(generic_access_phys);
570 * Access another process' address space as given in mm. If non-NULL, use the
571 * given task for page fault accounting.
572 */
573 @@ -100073,7 +100161,7 @@ index 8b44f76..66f1954 100644
574 {
575 struct vm_area_struct *vma;
576 void *old_buf = buf;
577 -@@ -3584,7 +3884,7 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
578 +@@ -3584,7 +3883,7 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
579 down_read(&mm->mmap_sem);
580 /* ignore errors, just check how much was successfully transferred */
581 while (len) {
582 @@ -100082,7 +100170,7 @@ index 8b44f76..66f1954 100644
583 void *maddr;
584 struct page *page = NULL;
585
586 -@@ -3643,8 +3943,8 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
587 +@@ -3643,8 +3942,8 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
588 *
589 * The caller must hold a reference on @mm.
590 */
591 @@ -100093,7 +100181,7 @@ index 8b44f76..66f1954 100644
592 {
593 return __access_remote_vm(NULL, mm, addr, buf, len, write);
594 }
595 -@@ -3654,11 +3954,11 @@ int access_remote_vm(struct mm_struct *mm, unsigned long addr,
596 +@@ -3654,11 +3953,11 @@ int access_remote_vm(struct mm_struct *mm, unsigned long addr,
597 * Source/target buffer must be kernel space,
598 * Do not walk the page table directly, use get_user_pages
599 */
600 @@ -102063,6 +102151,19 @@ index 2ddf9a9..f8fc075 100644
601 EXPORT_SYMBOL_GPL(pcpu_base_addr);
602
603 static const int *pcpu_unit_map __read_mostly; /* cpu -> unit */
604 +diff --git a/mm/pgtable-generic.c b/mm/pgtable-generic.c
605 +index a8b9199..dfb79e0 100644
606 +--- a/mm/pgtable-generic.c
607 ++++ b/mm/pgtable-generic.c
608 +@@ -195,7 +195,7 @@ void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
609 + pmd_t entry = *pmdp;
610 + if (pmd_numa(entry))
611 + entry = pmd_mknonnuma(entry);
612 +- set_pmd_at(vma->vm_mm, address, pmdp, pmd_mknotpresent(*pmdp));
613 ++ set_pmd_at(vma->vm_mm, address, pmdp, pmd_mknotpresent(entry));
614 + flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
615 + }
616 + #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
617 diff --git a/mm/process_vm_access.c b/mm/process_vm_access.c
618 index 5077afc..846c9ef 100644
619 --- a/mm/process_vm_access.c