Gentoo Archives: gentoo-commits

From: "Anthony G. Basile" <blueness@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/hardened-patchset:master commit in: 4.6.5/
Date: Mon, 01 Aug 2016 23:28:18
Message-Id: 1470094057.f7c364dd1793328c32a80592e272018e9b98299f.blueness@gentoo
1 commit: f7c364dd1793328c32a80592e272018e9b98299f
2 Author: Anthony G. Basile <blueness <AT> gentoo <DOT> org>
3 AuthorDate: Mon Aug 1 23:27:37 2016 +0000
4 Commit: Anthony G. Basile <blueness <AT> gentoo <DOT> org>
5 CommitDate: Mon Aug 1 23:27:37 2016 +0000
6 URL: https://gitweb.gentoo.org/proj/hardened-patchset.git/commit/?id=f7c364dd
7
8 grsecurity-3.1-4.6.5-201607312210
9
10 4.6.5/0000_README | 2 +-
11 ...> 4420_grsecurity-3.1-4.6.5-201607312210.patch} | 164 ++++++++++++++++-----
12 2 files changed, 125 insertions(+), 41 deletions(-)
13
14 diff --git a/4.6.5/0000_README b/4.6.5/0000_README
15 index 016e706..a3be0b4 100644
16 --- a/4.6.5/0000_README
17 +++ b/4.6.5/0000_README
18 @@ -6,7 +6,7 @@ Patch: 1004_linux-4.6.5.patch
19 From: http://www.kernel.org
20 Desc: Linux 4.6.5
21
22 -Patch: 4420_grsecurity-3.1-4.6.5-201607272152.patch
23 +Patch: 4420_grsecurity-3.1-4.6.5-201607312210.patch
24 From: http://www.grsecurity.net
25 Desc: hardened-sources base patch from upstream grsecurity
26
27
28 diff --git a/4.6.5/4420_grsecurity-3.1-4.6.5-201607272152.patch b/4.6.5/4420_grsecurity-3.1-4.6.5-201607312210.patch
29 similarity index 99%
30 rename from 4.6.5/4420_grsecurity-3.1-4.6.5-201607272152.patch
31 rename to 4.6.5/4420_grsecurity-3.1-4.6.5-201607312210.patch
32 index 927b9ba..5a9676a 100644
33 --- a/4.6.5/4420_grsecurity-3.1-4.6.5-201607272152.patch
34 +++ b/4.6.5/4420_grsecurity-3.1-4.6.5-201607312210.patch
35 @@ -956,7 +956,7 @@ index d50430c..01cc53b 100644
36 # but it is being used too early to link to meaningful stack_chk logic.
37 nossp_flags := $(call cc-option, -fno-stack-protector)
38 diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h
39 -index 9e10c45..24a14ce 100644
40 +index 9e10c45..5a423a2 100644
41 --- a/arch/arm/include/asm/atomic.h
42 +++ b/arch/arm/include/asm/atomic.h
43 @@ -18,17 +18,41 @@
44 @@ -1078,7 +1078,7 @@ index 9e10c45..24a14ce 100644
45 }
46
47 #define atomic_add_return_relaxed atomic_add_return_relaxed
48 -+#define atomic_add_return_unchecked atomic_add_return_unchecked_relaxed
49 ++#define atomic_add_return_unchecked_relaxed atomic_add_return_unchecked_relaxed
50 #define atomic_sub_return_relaxed atomic_sub_return_relaxed
51
52 +#define ATOMIC_OP_RETURN(op, c_op, asm_op) __ATOMIC_OP_RETURN(op, _unchecked, c_op, asm_op, , )\
53 @@ -1190,7 +1190,7 @@ index 9e10c45..24a14ce 100644
54 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
55 {
56 int c, old;
57 -@@ -201,16 +300,38 @@ ATOMIC_OP(xor, ^=, eor)
58 +@@ -201,16 +300,32 @@ ATOMIC_OP(xor, ^=, eor)
59
60 #undef ATOMIC_OPS
61 #undef ATOMIC_OP_RETURN
62 @@ -1216,20 +1216,14 @@ index 9e10c45..24a14ce 100644
63 +}
64
65 #define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0)
66 -+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
67 -+{
68 -+ return atomic_add_return_unchecked(1, v) == 0;
69 -+}
70 ++#define atomic_inc_and_test_unchecked(v) (atomic_add_return_unchecked(1, v) == 0)
71 #define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0)
72 #define atomic_inc_return_relaxed(v) (atomic_add_return_relaxed(1, v))
73 -+static inline int atomic_inc_return_unchecked_relaxed(atomic_unchecked_t *v)
74 -+{
75 -+ return atomic_add_return_unchecked_relaxed(1, v);
76 -+}
77 ++#define atomic_inc_return_unchecked_relaxed(v) (atomic_add_return_unchecked_relaxed(1, v))
78 #define atomic_dec_return_relaxed(v) (atomic_sub_return_relaxed(1, v))
79 #define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
80
81 -@@ -221,6 +342,14 @@ typedef struct {
82 +@@ -221,6 +336,14 @@ typedef struct {
83 long long counter;
84 } atomic64_t;
85
86 @@ -1244,7 +1238,7 @@ index 9e10c45..24a14ce 100644
87 #define ATOMIC64_INIT(i) { (i) }
88
89 #ifdef CONFIG_ARM_LPAE
90 -@@ -237,6 +366,19 @@ static inline long long atomic64_read(const atomic64_t *v)
91 +@@ -237,6 +360,19 @@ static inline long long atomic64_read(const atomic64_t *v)
92 return result;
93 }
94
95 @@ -1264,7 +1258,7 @@ index 9e10c45..24a14ce 100644
96 static inline void atomic64_set(atomic64_t *v, long long i)
97 {
98 __asm__ __volatile__("@ atomic64_set\n"
99 -@@ -245,6 +387,15 @@ static inline void atomic64_set(atomic64_t *v, long long i)
100 +@@ -245,6 +381,15 @@ static inline void atomic64_set(atomic64_t *v, long long i)
101 : "r" (&v->counter), "r" (i)
102 );
103 }
104 @@ -1280,7 +1274,7 @@ index 9e10c45..24a14ce 100644
105 #else
106 static inline long long atomic64_read(const atomic64_t *v)
107 {
108 -@@ -259,6 +410,19 @@ static inline long long atomic64_read(const atomic64_t *v)
109 +@@ -259,6 +404,19 @@ static inline long long atomic64_read(const atomic64_t *v)
110 return result;
111 }
112
113 @@ -1300,7 +1294,7 @@ index 9e10c45..24a14ce 100644
114 static inline void atomic64_set(atomic64_t *v, long long i)
115 {
116 long long tmp;
117 -@@ -273,43 +437,73 @@ static inline void atomic64_set(atomic64_t *v, long long i)
118 +@@ -273,43 +431,73 @@ static inline void atomic64_set(atomic64_t *v, long long i)
119 : "r" (&v->counter), "r" (i)
120 : "cc");
121 }
122 @@ -1382,7 +1376,7 @@ index 9e10c45..24a14ce 100644
123 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \
124 : "r" (&v->counter), "r" (i) \
125 : "cc"); \
126 -@@ -317,6 +511,9 @@ atomic64_##op##_return_relaxed(long long i, atomic64_t *v) \
127 +@@ -317,6 +505,9 @@ atomic64_##op##_return_relaxed(long long i, atomic64_t *v) \
128 return result; \
129 }
130
131 @@ -1392,15 +1386,15 @@ index 9e10c45..24a14ce 100644
132 #define ATOMIC64_OPS(op, op1, op2) \
133 ATOMIC64_OP(op, op1, op2) \
134 ATOMIC64_OP_RETURN(op, op1, op2)
135 -@@ -325,6 +522,7 @@ ATOMIC64_OPS(add, adds, adc)
136 +@@ -325,6 +516,7 @@ ATOMIC64_OPS(add, adds, adc)
137 ATOMIC64_OPS(sub, subs, sbc)
138
139 #define atomic64_add_return_relaxed atomic64_add_return_relaxed
140 -+#define atomic64_add_return_unchecked atomic64_add_return_unchecked_relaxed
141 ++#define atomic64_add_return_unchecked_relaxed atomic64_add_return_unchecked_relaxed
142 #define atomic64_sub_return_relaxed atomic64_sub_return_relaxed
143
144 #define atomic64_andnot atomic64_andnot
145 -@@ -336,7 +534,12 @@ ATOMIC64_OP(xor, eor, eor)
146 +@@ -336,7 +528,12 @@ ATOMIC64_OP(xor, eor, eor)
147
148 #undef ATOMIC64_OPS
149 #undef ATOMIC64_OP_RETURN
150 @@ -1413,11 +1407,11 @@ index 9e10c45..24a14ce 100644
151
152 static inline long long
153 atomic64_cmpxchg_relaxed(atomic64_t *ptr, long long old, long long new)
154 -@@ -361,6 +564,33 @@ atomic64_cmpxchg_relaxed(atomic64_t *ptr, long long old, long long new)
155 +@@ -361,6 +558,31 @@ atomic64_cmpxchg_relaxed(atomic64_t *ptr, long long old, long long new)
156 return oldval;
157 }
158 #define atomic64_cmpxchg_relaxed atomic64_cmpxchg_relaxed
159 -+#define atomic64_cmpxchg_unchecked atomic64_cmpxchg_unchecked_relaxed
160 ++#define atomic64_cmpxchg_unchecked_relaxed atomic64_cmpxchg_unchecked_relaxed
161 +
162 +static inline long long
163 +atomic64_cmpxchg_unchecked_relaxed(atomic64_unchecked_t *ptr, long long old,
164 @@ -1426,7 +1420,7 @@ index 9e10c45..24a14ce 100644
165 + long long oldval;
166 + unsigned long res;
167 +
168 -+ smp_mb();
169 ++ prefetchw(&ptr->counter);
170 +
171 + do {
172 + __asm__ __volatile__("@ atomic64_cmpxchg_unchecked\n"
173 @@ -1440,14 +1434,37 @@ index 9e10c45..24a14ce 100644
174 + : "cc");
175 + } while (res);
176 +
177 -+ smp_mb();
178 -+
179 + return oldval;
180 +}
181
182 static inline long long atomic64_xchg_relaxed(atomic64_t *ptr, long long new)
183 {
184 -@@ -385,21 +615,35 @@ static inline long long atomic64_xchg_relaxed(atomic64_t *ptr, long long new)
185 +@@ -380,26 +602,60 @@ static inline long long atomic64_xchg_relaxed(atomic64_t *ptr, long long new)
186 +
187 + return result;
188 + }
189 ++
190 ++static inline long long atomic64_xchg_unchecked_relaxed(atomic64_unchecked_t *ptr, long long new)
191 ++{
192 ++ long long result;
193 ++ unsigned long tmp;
194 ++
195 ++ prefetchw(&ptr->counter);
196 ++
197 ++ __asm__ __volatile__("@ atomic64_xchg_unchecked\n"
198 ++"1: ldrexd %0, %H0, [%3]\n"
199 ++" strexd %1, %4, %H4, [%3]\n"
200 ++" teq %1, #0\n"
201 ++" bne 1b"
202 ++ : "=&r" (result), "=&r" (tmp), "+Qo" (ptr->counter)
203 ++ : "r" (&ptr->counter), "r" (new)
204 ++ : "cc");
205 ++
206 ++ return result;
207 ++}
208 + #define atomic64_xchg_relaxed atomic64_xchg_relaxed
209 ++#define atomic64_xchg_unchecked_relaxed atomic64_xchg_unchecked_relaxed
210 +
211 static inline long long atomic64_dec_if_positive(atomic64_t *v)
212 {
213 long long result;
214 @@ -1489,7 +1506,7 @@ index 9e10c45..24a14ce 100644
215 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
216 : "r" (&v->counter)
217 : "cc");
218 -@@ -423,13 +667,25 @@ static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u)
219 +@@ -423,13 +679,25 @@ static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u)
220 " teq %0, %5\n"
221 " teqeq %H0, %H5\n"
222 " moveq %1, #0\n"
223 @@ -1518,7 +1535,7 @@ index 9e10c45..24a14ce 100644
224 : "=&r" (val), "+r" (ret), "=&r" (tmp), "+Qo" (v->counter)
225 : "r" (&v->counter), "r" (u), "r" (a)
226 : "cc");
227 -@@ -442,10 +698,13 @@ static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u)
228 +@@ -442,10 +710,13 @@ static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u)
229
230 #define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
231 #define atomic64_inc(v) atomic64_add(1LL, (v))
232 @@ -1615,7 +1632,7 @@ index 3848259..bee9d84 100644
233 struct of_cpuidle_method {
234 const char *method;
235 diff --git a/arch/arm/include/asm/domain.h b/arch/arm/include/asm/domain.h
236 -index 99d9f63..e3e4da6 100644
237 +index 99d9f63..ec44cb5 100644
238 --- a/arch/arm/include/asm/domain.h
239 +++ b/arch/arm/include/asm/domain.h
240 @@ -42,7 +42,6 @@
241 @@ -1626,11 +1643,12 @@ index 99d9f63..e3e4da6 100644
242
243 /*
244 * Domain types
245 -@@ -51,9 +50,27 @@
246 +@@ -51,9 +50,28 @@
247 #define DOMAIN_CLIENT 1
248 #ifdef CONFIG_CPU_USE_DOMAINS
249 #define DOMAIN_MANAGER 3
250 +#define DOMAIN_VECTORS 3
251 ++#define DOMAIN_USERCLIENT DOMAIN_CLIENT
252 #else
253 +
254 +#ifdef CONFIG_PAX_KERNEXEC
255 @@ -1654,7 +1672,7 @@ index 99d9f63..e3e4da6 100644
256
257 #define domain_mask(dom) ((3) << (2 * (dom)))
258 #define domain_val(dom,type) ((type) << (2 * (dom)))
259 -@@ -62,13 +79,19 @@
260 +@@ -62,13 +80,19 @@
261 #define DACR_INIT \
262 (domain_val(DOMAIN_USER, DOMAIN_NOACCESS) | \
263 domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \
264 @@ -1677,7 +1695,7 @@ index 99d9f63..e3e4da6 100644
265 domain_val(DOMAIN_VECTORS, DOMAIN_CLIENT))
266 #endif
267
268 -@@ -124,6 +147,17 @@ static inline void set_domain(unsigned val)
269 +@@ -124,6 +148,17 @@ static inline void set_domain(unsigned val)
270 set_domain(domain); \
271 } while (0)
272
273 @@ -102160,6 +102178,18 @@ index 69b8b52..9b58c2d 100644
274 *p = res;
275 put_cpu_var(last_ino);
276 return res;
277 +diff --git a/fs/ioctl.c b/fs/ioctl.c
278 +index 116a333..0f56deb 100644
279 +--- a/fs/ioctl.c
280 ++++ b/fs/ioctl.c
281 +@@ -590,6 +590,7 @@ static long ioctl_file_dedupe_range(struct file *file, void __user *arg)
282 + goto out;
283 + }
284 +
285 ++ same->dest_count = count;
286 + ret = vfs_dedupe_file_range(file, same);
287 + if (ret)
288 + goto out;
289 diff --git a/fs/jbd2/commit.c b/fs/jbd2/commit.c
290 index 2ad98d6..00f8858 100644
291 --- a/fs/jbd2/commit.c
292 @@ -128001,7 +128031,7 @@ index 5bdab6b..9ae82fe 100644
293 #define pud_none(pud) 0
294 #define pud_bad(pud) 0
295 diff --git a/include/asm-generic/atomic-long.h b/include/asm-generic/atomic-long.h
296 -index 5e1f345..e7a174a 100644
297 +index 5e1f345..7104090 100644
298 --- a/include/asm-generic/atomic-long.h
299 +++ b/include/asm-generic/atomic-long.h
300 @@ -22,6 +22,12 @@
301 @@ -128210,7 +128240,7 @@ index 5e1f345..e7a174a 100644
302
303 #undef ATOMIC_LONG_INC_DEC_OP
304
305 -@@ -187,4 +229,56 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
306 +@@ -187,4 +229,58 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
307 #define atomic_long_inc_not_zero(l) \
308 ATOMIC_LONG_PFX(_inc_not_zero)((ATOMIC_LONG_PFX(_t) *)(l))
309
310 @@ -128244,7 +128274,9 @@ index 5e1f345..e7a174a 100644
311 +#define atomic_add_unchecked(i, v) atomic_add((i), (v))
312 +#define atomic_sub_unchecked(i, v) atomic_sub((i), (v))
313 +#define atomic_inc_unchecked(v) atomic_inc(v)
314 ++#ifndef atomic_inc_and_test_unchecked
315 +#define atomic_inc_and_test_unchecked(v) atomic_inc_and_test(v)
316 ++#endif
317 +#ifndef atomic_inc_return_unchecked
318 +#define atomic_inc_return_unchecked(v) atomic_inc_return(v)
319 +#endif
320 @@ -128268,7 +128300,7 @@ index 5e1f345..e7a174a 100644
321 +
322 #endif /* _ASM_GENERIC_ATOMIC_LONG_H */
323 diff --git a/include/asm-generic/atomic64.h b/include/asm-generic/atomic64.h
324 -index d48e78c..d29d3a3 100644
325 +index d48e78c..db16df1 100644
326 --- a/include/asm-generic/atomic64.h
327 +++ b/include/asm-generic/atomic64.h
328 @@ -16,6 +16,8 @@ typedef struct {
329 @@ -128280,7 +128312,7 @@ index d48e78c..d29d3a3 100644
330 #define ATOMIC64_INIT(i) { (i) }
331
332 extern long long atomic64_read(const atomic64_t *v);
333 -@@ -55,4 +57,14 @@ extern int atomic64_add_unless(atomic64_t *v, long long a, long long u);
334 +@@ -55,4 +57,15 @@ extern int atomic64_add_unless(atomic64_t *v, long long a, long long u);
335 #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
336 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
337
338 @@ -128293,6 +128325,7 @@ index d48e78c..d29d3a3 100644
339 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
340 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
341 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
342 ++#define atomic64_xchg_unchecked(v, n) atomic64_xchg((v), (n))
343 +
344 #endif /* _ASM_GENERIC_ATOMIC64_H */
345 diff --git a/include/asm-generic/bitops/__fls.h b/include/asm-generic/bitops/__fls.h
346 @@ -128946,10 +128979,24 @@ index c1da539..1dcec55 100644
347 struct atmphy_ops {
348 int (*start)(struct atm_dev *dev);
349 diff --git a/include/linux/atomic.h b/include/linux/atomic.h
350 -index 506c353..414ddeb 100644
351 +index 506c353..10739bd 100644
352 --- a/include/linux/atomic.h
353 +++ b/include/linux/atomic.h
354 -@@ -113,6 +113,11 @@
355 +@@ -91,6 +91,13 @@
356 + #endif
357 + #endif /* atomic_add_return_relaxed */
358 +
359 ++#ifndef atomic_add_return_unchecked_relaxed
360 ++#define atomic_add_return_unchecked_relaxed atomic_add_return_unchecked
361 ++#else
362 ++#define atomic_add_return_unchecked(...) \
363 ++ __atomic_op_fence(atomic_add_return_unchecked, __VA_ARGS__)
364 ++#endif
365 ++
366 + /* atomic_inc_return_relaxed */
367 + #ifndef atomic_inc_return_relaxed
368 + #define atomic_inc_return_relaxed atomic_inc_return
369 +@@ -113,6 +120,11 @@
370 #define atomic_inc_return(...) \
371 __atomic_op_fence(atomic_inc_return, __VA_ARGS__)
372 #endif
373 @@ -128961,7 +129008,19 @@ index 506c353..414ddeb 100644
374 #endif /* atomic_inc_return_relaxed */
375
376 /* atomic_sub_return_relaxed */
377 -@@ -265,6 +270,11 @@
378 +@@ -241,6 +253,11 @@
379 + #define atomic64_add_return(...) \
380 + __atomic_op_fence(atomic64_add_return, __VA_ARGS__)
381 + #endif
382 ++
383 ++#ifndef atomic64_add_return_unchecked
384 ++#define atomic64_add_return_unchecked(...) \
385 ++ __atomic_op_fence(atomic64_add_return_unchecked, __VA_ARGS__)
386 ++#endif
387 + #endif /* atomic64_add_return_relaxed */
388 +
389 + /* atomic64_inc_return_relaxed */
390 +@@ -265,6 +282,11 @@
391 #define atomic64_inc_return(...) \
392 __atomic_op_fence(atomic64_inc_return, __VA_ARGS__)
393 #endif
394 @@ -128973,7 +129032,32 @@ index 506c353..414ddeb 100644
395 #endif /* atomic64_inc_return_relaxed */
396
397
398 -@@ -442,7 +452,7 @@
399 +@@ -338,6 +360,11 @@
400 + #define atomic64_xchg(...) \
401 + __atomic_op_fence(atomic64_xchg, __VA_ARGS__)
402 + #endif
403 ++
404 ++#ifndef atomic64_xchg_unchecked
405 ++#define atomic64_xchg_unchecked(...) \
406 ++ __atomic_op_fence(atomic64_xchg_unchecked, __VA_ARGS__)
407 ++#endif
408 + #endif /* atomic64_xchg_relaxed */
409 +
410 + /* atomic64_cmpxchg_relaxed */
411 +@@ -362,6 +389,12 @@
412 + #define atomic64_cmpxchg(...) \
413 + __atomic_op_fence(atomic64_cmpxchg, __VA_ARGS__)
414 + #endif
415 ++
416 ++#ifndef atomic64_cmpxchg_unchecked
417 ++#define atomic64_cmpxchg_unchecked(...) \
418 ++ __atomic_op_fence(atomic64_cmpxchg_unchecked, __VA_ARGS__)
419 ++#endif
420 ++
421 + #endif /* atomic64_cmpxchg_relaxed */
422 +
423 + /* cmpxchg_relaxed */
424 +@@ -442,7 +475,7 @@
425 * Atomically adds @a to @v, so long as @v was not already @u.
426 * Returns non-zero if @v was not @u, and zero otherwise.
427 */