1 |
commit: 60daeb8123a7055c17ab82f08a70679460278546 |
2 |
Author: Anthony G. Basile <blueness <AT> gentoo <DOT> org> |
3 |
AuthorDate: Thu Jun 5 22:27:14 2014 +0000 |
4 |
Commit: Anthony G. Basile <blueness <AT> gentoo <DOT> org> |
5 |
CommitDate: Thu Jun 5 22:27:14 2014 +0000 |
6 |
URL: http://git.overlays.gentoo.org/gitweb/?p=proj/hardened-patchset.git;a=commit;h=60daeb81 |
7 |
|
8 |
Grsec/PaX: 3.0-{3.2.59,3.14.5}-201406051310, CVE-2014-3153 |
9 |
|
10 |
--- |
11 |
3.14.5/0000_README | 2 +- |
12 |
... 4420_grsecurity-3.0-3.14.5-201406051310.patch} | 567 ++++++++++++++++++++- |
13 |
3.2.59/0000_README | 2 +- |
14 |
... 4420_grsecurity-3.0-3.2.59-201406051309.patch} | 480 ++++++++++++++++- |
15 |
4 files changed, 1016 insertions(+), 35 deletions(-) |
16 |
|
17 |
diff --git a/3.14.5/0000_README b/3.14.5/0000_README |
18 |
index 287174d..d423279 100644 |
19 |
--- a/3.14.5/0000_README |
20 |
+++ b/3.14.5/0000_README |
21 |
@@ -2,7 +2,7 @@ README |
22 |
----------------------------------------------------------------------------- |
23 |
Individual Patch Descriptions: |
24 |
----------------------------------------------------------------------------- |
25 |
-Patch: 4420_grsecurity-3.0-3.14.5-201406021708.patch |
26 |
+Patch: 4420_grsecurity-3.0-3.14.5-201406051310.patch |
27 |
From: http://www.grsecurity.net |
28 |
Desc: hardened-sources base patch from upstream grsecurity |
29 |
|
30 |
|
31 |
diff --git a/3.14.5/4420_grsecurity-3.0-3.14.5-201406021708.patch b/3.14.5/4420_grsecurity-3.0-3.14.5-201406051310.patch |
32 |
similarity index 99% |
33 |
rename from 3.14.5/4420_grsecurity-3.0-3.14.5-201406021708.patch |
34 |
rename to 3.14.5/4420_grsecurity-3.0-3.14.5-201406051310.patch |
35 |
index 400f193..311f637 100644 |
36 |
--- a/3.14.5/4420_grsecurity-3.0-3.14.5-201406021708.patch |
37 |
+++ b/3.14.5/4420_grsecurity-3.0-3.14.5-201406051310.patch |
38 |
@@ -46525,6 +46525,18 @@ index be7d7a6..a8983f8 100644 |
39 |
break; |
40 |
default: |
41 |
dev_err(&adapter->pdev->dev, "Invalid Virtual NIC opmode\n"); |
42 |
+diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c |
43 |
+index 7d4f549..3e46c89 100644 |
44 |
+--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c |
45 |
++++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c |
46 |
+@@ -1022,6 +1022,7 @@ static int qlcnic_dcb_peer_app_info(struct net_device *netdev, |
47 |
+ struct qlcnic_dcb_cee *peer; |
48 |
+ int i; |
49 |
+ |
50 |
++ memset(info, 0, sizeof(*info)); |
51 |
+ *app_count = 0; |
52 |
+ |
53 |
+ if (!test_bit(QLCNIC_DCB_STATE, &adapter->dcb->state)) |
54 |
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c |
55 |
index 7763962..c3499a7 100644 |
56 |
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c |
57 |
@@ -58815,7 +58827,7 @@ index e4141f2..d8263e8 100644 |
58 |
i += packet_length_size; |
59 |
if (copy_to_user(&buf[i], msg_ctx->msg, msg_ctx->msg_size)) |
60 |
diff --git a/fs/exec.c b/fs/exec.c |
61 |
-index 3d78fcc..75b208f 100644 |
62 |
+index 3d78fcc..6b2fd70 100644 |
63 |
--- a/fs/exec.c |
64 |
+++ b/fs/exec.c |
65 |
@@ -55,8 +55,20 @@ |
66 |
@@ -59561,7 +59573,7 @@ index 3d78fcc..75b208f 100644 |
67 |
+#endif |
68 |
+ |
69 |
+#ifndef CONFIG_STACK_GROWSUP |
70 |
-+ const void * stackstart = task_stack_page(current); |
71 |
++ unsigned long stackstart = (unsigned long)task_stack_page(current); |
72 |
+ if (unlikely(current_stack_pointer < stackstart + 512 || |
73 |
+ current_stack_pointer >= stackstart + THREAD_SIZE)) |
74 |
+ BUG(); |
75 |
@@ -81417,6 +81429,36 @@ index 0000000..33f4af8 |
76 |
+}; |
77 |
+ |
78 |
+#endif |
79 |
+diff --git a/include/linux/netlink.h b/include/linux/netlink.h |
80 |
+index aad8eea..034cda7 100644 |
81 |
+--- a/include/linux/netlink.h |
82 |
++++ b/include/linux/netlink.h |
83 |
+@@ -16,9 +16,10 @@ static inline struct nlmsghdr *nlmsg_hdr(const struct sk_buff *skb) |
84 |
+ } |
85 |
+ |
86 |
+ enum netlink_skb_flags { |
87 |
+- NETLINK_SKB_MMAPED = 0x1, /* Packet data is mmaped */ |
88 |
+- NETLINK_SKB_TX = 0x2, /* Packet was sent by userspace */ |
89 |
+- NETLINK_SKB_DELIVERED = 0x4, /* Packet was delivered */ |
90 |
++ NETLINK_SKB_MMAPED = 0x1, /* Packet data is mmaped */ |
91 |
++ NETLINK_SKB_TX = 0x2, /* Packet was sent by userspace */ |
92 |
++ NETLINK_SKB_DELIVERED = 0x4, /* Packet was delivered */ |
93 |
++ NETLINK_SKB_DST = 0x8, /* Dst set in sendto or sendmsg */ |
94 |
+ }; |
95 |
+ |
96 |
+ struct netlink_skb_parms { |
97 |
+@@ -169,4 +170,11 @@ struct netlink_tap { |
98 |
+ extern int netlink_add_tap(struct netlink_tap *nt); |
99 |
+ extern int netlink_remove_tap(struct netlink_tap *nt); |
100 |
+ |
101 |
++bool __netlink_ns_capable(const struct netlink_skb_parms *nsp, |
102 |
++ struct user_namespace *ns, int cap); |
103 |
++bool netlink_ns_capable(const struct sk_buff *skb, |
104 |
++ struct user_namespace *ns, int cap); |
105 |
++bool netlink_capable(const struct sk_buff *skb, int cap); |
106 |
++bool netlink_net_capable(const struct sk_buff *skb, int cap); |
107 |
++ |
108 |
+ #endif /* __LINUX_NETLINK_H */ |
109 |
diff --git a/include/linux/nls.h b/include/linux/nls.h |
110 |
index 520681b..1d67ed2 100644 |
111 |
--- a/include/linux/nls.h |
112 |
@@ -81508,6 +81550,37 @@ index 5f2e559..7d59314 100644 |
113 |
|
114 |
/** |
115 |
* struct hotplug_slot_info - used to notify the hotplug pci core of the state of the slot |
116 |
+diff --git a/include/linux/percpu-refcount.h b/include/linux/percpu-refcount.h |
117 |
+index 95961f0..0afb48f 100644 |
118 |
+--- a/include/linux/percpu-refcount.h |
119 |
++++ b/include/linux/percpu-refcount.h |
120 |
+@@ -110,7 +110,7 @@ static inline void percpu_ref_get(struct percpu_ref *ref) |
121 |
+ pcpu_count = ACCESS_ONCE(ref->pcpu_count); |
122 |
+ |
123 |
+ if (likely(REF_STATUS(pcpu_count) == PCPU_REF_PTR)) |
124 |
+- __this_cpu_inc(*pcpu_count); |
125 |
++ this_cpu_inc(*pcpu_count); |
126 |
+ else |
127 |
+ atomic_inc(&ref->count); |
128 |
+ |
129 |
+@@ -139,7 +139,7 @@ static inline bool percpu_ref_tryget(struct percpu_ref *ref) |
130 |
+ pcpu_count = ACCESS_ONCE(ref->pcpu_count); |
131 |
+ |
132 |
+ if (likely(REF_STATUS(pcpu_count) == PCPU_REF_PTR)) { |
133 |
+- __this_cpu_inc(*pcpu_count); |
134 |
++ this_cpu_inc(*pcpu_count); |
135 |
+ ret = true; |
136 |
+ } |
137 |
+ |
138 |
+@@ -164,7 +164,7 @@ static inline void percpu_ref_put(struct percpu_ref *ref) |
139 |
+ pcpu_count = ACCESS_ONCE(ref->pcpu_count); |
140 |
+ |
141 |
+ if (likely(REF_STATUS(pcpu_count) == PCPU_REF_PTR)) |
142 |
+- __this_cpu_dec(*pcpu_count); |
143 |
++ this_cpu_dec(*pcpu_count); |
144 |
+ else if (unlikely(atomic_dec_and_test(&ref->count))) |
145 |
+ ref->release(ref); |
146 |
+ |
147 |
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h |
148 |
index e56b07f..aef789b 100644 |
149 |
--- a/include/linux/perf_event.h |
150 |
@@ -83816,7 +83889,7 @@ index a61b98c..aade1eb 100644 |
151 |
int llc_sap_action_unitdata_ind(struct llc_sap *sap, struct sk_buff *skb); |
152 |
int llc_sap_action_send_ui(struct llc_sap *sap, struct sk_buff *skb); |
153 |
diff --git a/include/net/llc_s_st.h b/include/net/llc_s_st.h |
154 |
-index 567c681..cd73ac0 100644 |
155 |
+index 567c681..cd73ac02 100644 |
156 |
--- a/include/net/llc_s_st.h |
157 |
+++ b/include/net/llc_s_st.h |
158 |
@@ -20,7 +20,7 @@ struct llc_sap_state_trans { |
159 |
@@ -86824,7 +86897,7 @@ index a17621c..d9e4b37 100644 |
160 |
else |
161 |
new_fs = fs; |
162 |
diff --git a/kernel/futex.c b/kernel/futex.c |
163 |
-index 6801b37..bb6becca 100644 |
164 |
+index 6801b37..c0f67cf 100644 |
165 |
--- a/kernel/futex.c |
166 |
+++ b/kernel/futex.c |
167 |
@@ -54,6 +54,7 @@ |
168 |
@@ -86835,6 +86908,24 @@ index 6801b37..bb6becca 100644 |
169 |
#include <linux/signal.h> |
170 |
#include <linux/export.h> |
171 |
#include <linux/magic.h> |
172 |
+@@ -188,7 +189,7 @@ struct futex_pi_state { |
173 |
+ atomic_t refcount; |
174 |
+ |
175 |
+ union futex_key key; |
176 |
+-}; |
177 |
++} __randomize_layout; |
178 |
+ |
179 |
+ /** |
180 |
+ * struct futex_q - The hashed futex queue entry, one per waiting task |
181 |
+@@ -222,7 +223,7 @@ struct futex_q { |
182 |
+ struct rt_mutex_waiter *rt_waiter; |
183 |
+ union futex_key *requeue_pi_key; |
184 |
+ u32 bitset; |
185 |
+-}; |
186 |
++} __randomize_layout; |
187 |
+ |
188 |
+ static const struct futex_q futex_q_init = { |
189 |
+ /* list gets initialized in queue_me()*/ |
190 |
@@ -380,6 +381,11 @@ get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw) |
191 |
struct page *page, *page_head; |
192 |
int err, ro = 0; |
193 |
@@ -86856,7 +86947,326 @@ index 6801b37..bb6becca 100644 |
194 |
|
195 |
pagefault_disable(); |
196 |
ret = __copy_from_user_inatomic(dest, from, sizeof(u32)); |
197 |
-@@ -2886,6 +2892,7 @@ static void __init futex_detect_cmpxchg(void) |
198 |
+@@ -729,6 +735,55 @@ void exit_pi_state_list(struct task_struct *curr) |
199 |
+ raw_spin_unlock_irq(&curr->pi_lock); |
200 |
+ } |
201 |
+ |
202 |
++/* |
203 |
++ * We need to check the following states: |
204 |
++ * |
205 |
++ * Waiter | pi_state | pi->owner | uTID | uODIED | ? |
206 |
++ * |
207 |
++ * [1] NULL | --- | --- | 0 | 0/1 | Valid |
208 |
++ * [2] NULL | --- | --- | >0 | 0/1 | Valid |
209 |
++ * |
210 |
++ * [3] Found | NULL | -- | Any | 0/1 | Invalid |
211 |
++ * |
212 |
++ * [4] Found | Found | NULL | 0 | 1 | Valid |
213 |
++ * [5] Found | Found | NULL | >0 | 1 | Invalid |
214 |
++ * |
215 |
++ * [6] Found | Found | task | 0 | 1 | Valid |
216 |
++ * |
217 |
++ * [7] Found | Found | NULL | Any | 0 | Invalid |
218 |
++ * |
219 |
++ * [8] Found | Found | task | ==taskTID | 0/1 | Valid |
220 |
++ * [9] Found | Found | task | 0 | 0 | Invalid |
221 |
++ * [10] Found | Found | task | !=taskTID | 0/1 | Invalid |
222 |
++ * |
223 |
++ * [1] Indicates that the kernel can acquire the futex atomically. We |
224 |
++ * came came here due to a stale FUTEX_WAITERS/FUTEX_OWNER_DIED bit. |
225 |
++ * |
226 |
++ * [2] Valid, if TID does not belong to a kernel thread. If no matching |
227 |
++ * thread is found then it indicates that the owner TID has died. |
228 |
++ * |
229 |
++ * [3] Invalid. The waiter is queued on a non PI futex |
230 |
++ * |
231 |
++ * [4] Valid state after exit_robust_list(), which sets the user space |
232 |
++ * value to FUTEX_WAITERS | FUTEX_OWNER_DIED. |
233 |
++ * |
234 |
++ * [5] The user space value got manipulated between exit_robust_list() |
235 |
++ * and exit_pi_state_list() |
236 |
++ * |
237 |
++ * [6] Valid state after exit_pi_state_list() which sets the new owner in |
238 |
++ * the pi_state but cannot access the user space value. |
239 |
++ * |
240 |
++ * [7] pi_state->owner can only be NULL when the OWNER_DIED bit is set. |
241 |
++ * |
242 |
++ * [8] Owner and user space value match |
243 |
++ * |
244 |
++ * [9] There is no transient state which sets the user space TID to 0 |
245 |
++ * except exit_robust_list(), but this is indicated by the |
246 |
++ * FUTEX_OWNER_DIED bit. See [4] |
247 |
++ * |
248 |
++ * [10] There is no transient state which leaves owner and user space |
249 |
++ * TID out of sync. |
250 |
++ */ |
251 |
+ static int |
252 |
+ lookup_pi_state(u32 uval, struct futex_hash_bucket *hb, |
253 |
+ union futex_key *key, struct futex_pi_state **ps) |
254 |
+@@ -741,12 +796,13 @@ lookup_pi_state(u32 uval, struct futex_hash_bucket *hb, |
255 |
+ plist_for_each_entry_safe(this, next, &hb->chain, list) { |
256 |
+ if (match_futex(&this->key, key)) { |
257 |
+ /* |
258 |
+- * Another waiter already exists - bump up |
259 |
+- * the refcount and return its pi_state: |
260 |
++ * Sanity check the waiter before increasing |
261 |
++ * the refcount and attaching to it. |
262 |
+ */ |
263 |
+ pi_state = this->pi_state; |
264 |
+ /* |
265 |
+- * Userspace might have messed up non-PI and PI futexes |
266 |
++ * Userspace might have messed up non-PI and |
267 |
++ * PI futexes [3] |
268 |
+ */ |
269 |
+ if (unlikely(!pi_state)) |
270 |
+ return -EINVAL; |
271 |
+@@ -754,34 +810,70 @@ lookup_pi_state(u32 uval, struct futex_hash_bucket *hb, |
272 |
+ WARN_ON(!atomic_read(&pi_state->refcount)); |
273 |
+ |
274 |
+ /* |
275 |
+- * When pi_state->owner is NULL then the owner died |
276 |
+- * and another waiter is on the fly. pi_state->owner |
277 |
+- * is fixed up by the task which acquires |
278 |
+- * pi_state->rt_mutex. |
279 |
+- * |
280 |
+- * We do not check for pid == 0 which can happen when |
281 |
+- * the owner died and robust_list_exit() cleared the |
282 |
+- * TID. |
283 |
++ * Handle the owner died case: |
284 |
+ */ |
285 |
+- if (pid && pi_state->owner) { |
286 |
++ if (uval & FUTEX_OWNER_DIED) { |
287 |
+ /* |
288 |
+- * Bail out if user space manipulated the |
289 |
+- * futex value. |
290 |
++ * exit_pi_state_list sets owner to NULL and |
291 |
++ * wakes the topmost waiter. The task which |
292 |
++ * acquires the pi_state->rt_mutex will fixup |
293 |
++ * owner. |
294 |
+ */ |
295 |
+- if (pid != task_pid_vnr(pi_state->owner)) |
296 |
++ if (!pi_state->owner) { |
297 |
++ /* |
298 |
++ * No pi state owner, but the user |
299 |
++ * space TID is not 0. Inconsistent |
300 |
++ * state. [5] |
301 |
++ */ |
302 |
++ if (pid) |
303 |
++ return -EINVAL; |
304 |
++ /* |
305 |
++ * Take a ref on the state and |
306 |
++ * return. [4] |
307 |
++ */ |
308 |
++ goto out_state; |
309 |
++ } |
310 |
++ |
311 |
++ /* |
312 |
++ * If TID is 0, then either the dying owner |
313 |
++ * has not yet executed exit_pi_state_list() |
314 |
++ * or some waiter acquired the rtmutex in the |
315 |
++ * pi state, but did not yet fixup the TID in |
316 |
++ * user space. |
317 |
++ * |
318 |
++ * Take a ref on the state and return. [6] |
319 |
++ */ |
320 |
++ if (!pid) |
321 |
++ goto out_state; |
322 |
++ } else { |
323 |
++ /* |
324 |
++ * If the owner died bit is not set, |
325 |
++ * then the pi_state must have an |
326 |
++ * owner. [7] |
327 |
++ */ |
328 |
++ if (!pi_state->owner) |
329 |
+ return -EINVAL; |
330 |
+ } |
331 |
+ |
332 |
++ /* |
333 |
++ * Bail out if user space manipulated the |
334 |
++ * futex value. If pi state exists then the |
335 |
++ * owner TID must be the same as the user |
336 |
++ * space TID. [9/10] |
337 |
++ */ |
338 |
++ if (pid != task_pid_vnr(pi_state->owner)) |
339 |
++ return -EINVAL; |
340 |
++ |
341 |
++ out_state: |
342 |
+ atomic_inc(&pi_state->refcount); |
343 |
+ *ps = pi_state; |
344 |
+- |
345 |
+ return 0; |
346 |
+ } |
347 |
+ } |
348 |
+ |
349 |
+ /* |
350 |
+ * We are the first waiter - try to look up the real owner and attach |
351 |
+- * the new pi_state to it, but bail out when TID = 0 |
352 |
++ * the new pi_state to it, but bail out when TID = 0 [1] |
353 |
+ */ |
354 |
+ if (!pid) |
355 |
+ return -ESRCH; |
356 |
+@@ -789,6 +881,11 @@ lookup_pi_state(u32 uval, struct futex_hash_bucket *hb, |
357 |
+ if (!p) |
358 |
+ return -ESRCH; |
359 |
+ |
360 |
++ if (!p->mm) { |
361 |
++ put_task_struct(p); |
362 |
++ return -EPERM; |
363 |
++ } |
364 |
++ |
365 |
+ /* |
366 |
+ * We need to look at the task state flags to figure out, |
367 |
+ * whether the task is exiting. To protect against the do_exit |
368 |
+@@ -809,6 +906,9 @@ lookup_pi_state(u32 uval, struct futex_hash_bucket *hb, |
369 |
+ return ret; |
370 |
+ } |
371 |
+ |
372 |
++ /* |
373 |
++ * No existing pi state. First waiter. [2] |
374 |
++ */ |
375 |
+ pi_state = alloc_pi_state(); |
376 |
+ |
377 |
+ /* |
378 |
+@@ -880,10 +980,18 @@ retry: |
379 |
+ return -EDEADLK; |
380 |
+ |
381 |
+ /* |
382 |
+- * Surprise - we got the lock. Just return to userspace: |
383 |
++ * Surprise - we got the lock, but we do not trust user space at all. |
384 |
+ */ |
385 |
+- if (unlikely(!curval)) |
386 |
+- return 1; |
387 |
++ if (unlikely(!curval)) { |
388 |
++ /* |
389 |
++ * We verify whether there is kernel state for this |
390 |
++ * futex. If not, we can safely assume, that the 0 -> |
391 |
++ * TID transition is correct. If state exists, we do |
392 |
++ * not bother to fixup the user space state as it was |
393 |
++ * corrupted already. |
394 |
++ */ |
395 |
++ return futex_top_waiter(hb, key) ? -EINVAL : 1; |
396 |
++ } |
397 |
+ |
398 |
+ uval = curval; |
399 |
+ |
400 |
+@@ -1014,6 +1122,7 @@ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this) |
401 |
+ struct task_struct *new_owner; |
402 |
+ struct futex_pi_state *pi_state = this->pi_state; |
403 |
+ u32 uninitialized_var(curval), newval; |
404 |
++ int ret = 0; |
405 |
+ |
406 |
+ if (!pi_state) |
407 |
+ return -EINVAL; |
408 |
+@@ -1037,23 +1146,19 @@ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this) |
409 |
+ new_owner = this->task; |
410 |
+ |
411 |
+ /* |
412 |
+- * We pass it to the next owner. (The WAITERS bit is always |
413 |
+- * kept enabled while there is PI state around. We must also |
414 |
+- * preserve the owner died bit.) |
415 |
++ * We pass it to the next owner. The WAITERS bit is always |
416 |
++ * kept enabled while there is PI state around. We cleanup the |
417 |
++ * owner died bit, because we are the owner. |
418 |
+ */ |
419 |
+- if (!(uval & FUTEX_OWNER_DIED)) { |
420 |
+- int ret = 0; |
421 |
++ newval = FUTEX_WAITERS | task_pid_vnr(new_owner); |
422 |
+ |
423 |
+- newval = FUTEX_WAITERS | task_pid_vnr(new_owner); |
424 |
+- |
425 |
+- if (cmpxchg_futex_value_locked(&curval, uaddr, uval, newval)) |
426 |
+- ret = -EFAULT; |
427 |
+- else if (curval != uval) |
428 |
+- ret = -EINVAL; |
429 |
+- if (ret) { |
430 |
+- raw_spin_unlock(&pi_state->pi_mutex.wait_lock); |
431 |
+- return ret; |
432 |
+- } |
433 |
++ if (cmpxchg_futex_value_locked(&curval, uaddr, uval, newval)) |
434 |
++ ret = -EFAULT; |
435 |
++ else if (curval != uval) |
436 |
++ ret = -EINVAL; |
437 |
++ if (ret) { |
438 |
++ raw_spin_unlock(&pi_state->pi_mutex.wait_lock); |
439 |
++ return ret; |
440 |
+ } |
441 |
+ |
442 |
+ raw_spin_lock_irq(&pi_state->owner->pi_lock); |
443 |
+@@ -1411,6 +1516,13 @@ static int futex_requeue(u32 __user *uaddr1, unsigned int flags, |
444 |
+ |
445 |
+ if (requeue_pi) { |
446 |
+ /* |
447 |
++ * Requeue PI only works on two distinct uaddrs. This |
448 |
++ * check is only valid for private futexes. See below. |
449 |
++ */ |
450 |
++ if (uaddr1 == uaddr2) |
451 |
++ return -EINVAL; |
452 |
++ |
453 |
++ /* |
454 |
+ * requeue_pi requires a pi_state, try to allocate it now |
455 |
+ * without any locks in case it fails. |
456 |
+ */ |
457 |
+@@ -1448,6 +1560,15 @@ retry: |
458 |
+ if (unlikely(ret != 0)) |
459 |
+ goto out_put_key1; |
460 |
+ |
461 |
++ /* |
462 |
++ * The check above which compares uaddrs is not sufficient for |
463 |
++ * shared futexes. We need to compare the keys: |
464 |
++ */ |
465 |
++ if (requeue_pi && match_futex(&key1, &key2)) { |
466 |
++ ret = -EINVAL; |
467 |
++ goto out_put_keys; |
468 |
++ } |
469 |
++ |
470 |
+ hb1 = hash_futex(&key1); |
471 |
+ hb2 = hash_futex(&key2); |
472 |
+ |
473 |
+@@ -2287,9 +2408,10 @@ retry: |
474 |
+ /* |
475 |
+ * To avoid races, try to do the TID -> 0 atomic transition |
476 |
+ * again. If it succeeds then we can return without waking |
477 |
+- * anyone else up: |
478 |
++ * anyone else up. We only try this if neither the waiters nor |
479 |
++ * the owner died bit are set. |
480 |
+ */ |
481 |
+- if (!(uval & FUTEX_OWNER_DIED) && |
482 |
++ if (!(uval & ~FUTEX_TID_MASK) && |
483 |
+ cmpxchg_futex_value_locked(&uval, uaddr, vpid, 0)) |
484 |
+ goto pi_faulted; |
485 |
+ /* |
486 |
+@@ -2319,11 +2441,9 @@ retry: |
487 |
+ /* |
488 |
+ * No waiters - kernel unlocks the futex: |
489 |
+ */ |
490 |
+- if (!(uval & FUTEX_OWNER_DIED)) { |
491 |
+- ret = unlock_futex_pi(uaddr, uval); |
492 |
+- if (ret == -EFAULT) |
493 |
+- goto pi_faulted; |
494 |
+- } |
495 |
++ ret = unlock_futex_pi(uaddr, uval); |
496 |
++ if (ret == -EFAULT) |
497 |
++ goto pi_faulted; |
498 |
+ |
499 |
+ out_unlock: |
500 |
+ spin_unlock(&hb->lock); |
501 |
+@@ -2485,6 +2605,15 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags, |
502 |
+ if (ret) |
503 |
+ goto out_key2; |
504 |
+ |
505 |
++ /* |
506 |
++ * The check above which compares uaddrs is not sufficient for |
507 |
++ * shared futexes. We need to compare the keys: |
508 |
++ */ |
509 |
++ if (match_futex(&q.key, &key2)) { |
510 |
++ ret = -EINVAL; |
511 |
++ goto out_put_keys; |
512 |
++ } |
513 |
++ |
514 |
+ /* Queue the futex_q, drop the hb lock, wait for wakeup. */ |
515 |
+ futex_wait_queue_me(hb, &q, to); |
516 |
+ |
517 |
+@@ -2886,6 +3015,7 @@ static void __init futex_detect_cmpxchg(void) |
518 |
{ |
519 |
#ifndef CONFIG_HAVE_FUTEX_CMPXCHG |
520 |
u32 curval; |
521 |
@@ -86864,7 +87274,7 @@ index 6801b37..bb6becca 100644 |
522 |
|
523 |
/* |
524 |
* This will fail and we want it. Some arch implementations do |
525 |
-@@ -2897,8 +2904,11 @@ static void __init futex_detect_cmpxchg(void) |
526 |
+@@ -2897,8 +3027,11 @@ static void __init futex_detect_cmpxchg(void) |
527 |
* implementation, the non-functional ones will return |
528 |
* -ENOSYS. |
529 |
*/ |
530 |
@@ -100950,7 +101360,7 @@ index b9ac598..f88cc56 100644 |
531 |
return; |
532 |
|
533 |
diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c |
534 |
-index c4b7218..3e83259 100644 |
535 |
+index c4b7218..c7e9f14 100644 |
536 |
--- a/net/iucv/af_iucv.c |
537 |
+++ b/net/iucv/af_iucv.c |
538 |
@@ -773,10 +773,10 @@ static int iucv_sock_autobind(struct sock *sk) |
539 |
@@ -100966,6 +101376,15 @@ index c4b7218..3e83259 100644 |
540 |
} |
541 |
|
542 |
write_unlock_bh(&iucv_sk_list.lock); |
543 |
+@@ -1829,7 +1829,7 @@ static void iucv_callback_txdone(struct iucv_path *path, |
544 |
+ spin_lock_irqsave(&list->lock, flags); |
545 |
+ |
546 |
+ while (list_skb != (struct sk_buff *)list) { |
547 |
+- if (msg->tag != IUCV_SKB_CB(list_skb)->tag) { |
548 |
++ if (msg->tag == IUCV_SKB_CB(list_skb)->tag) { |
549 |
+ this = list_skb; |
550 |
+ break; |
551 |
+ } |
552 |
diff --git a/net/iucv/iucv.c b/net/iucv/iucv.c |
553 |
index cd5b8ec..f205e6b 100644 |
554 |
--- a/net/iucv/iucv.c |
555 |
@@ -101849,7 +102268,7 @@ index 11de55e..f25e448 100644 |
556 |
return 0; |
557 |
} |
558 |
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c |
559 |
-index 04748ab6..ca8f86f 100644 |
560 |
+index 04748ab6..c72ef1f 100644 |
561 |
--- a/net/netlink/af_netlink.c |
562 |
+++ b/net/netlink/af_netlink.c |
563 |
@@ -257,7 +257,7 @@ static void netlink_overrun(struct sock *sk) |
564 |
@@ -101861,7 +102280,137 @@ index 04748ab6..ca8f86f 100644 |
565 |
} |
566 |
|
567 |
static void netlink_rcv_wake(struct sock *sk) |
568 |
-@@ -2933,7 +2933,7 @@ static int netlink_seq_show(struct seq_file *seq, void *v) |
569 |
+@@ -1360,7 +1360,74 @@ retry: |
570 |
+ return err; |
571 |
+ } |
572 |
+ |
573 |
+-static inline int netlink_capable(const struct socket *sock, unsigned int flag) |
574 |
++/** |
575 |
++ * __netlink_ns_capable - General netlink message capability test |
576 |
++ * @nsp: NETLINK_CB of the socket buffer holding a netlink command from userspace. |
577 |
++ * @user_ns: The user namespace of the capability to use |
578 |
++ * @cap: The capability to use |
579 |
++ * |
580 |
++ * Test to see if the opener of the socket we received the message |
581 |
++ * from had when the netlink socket was created and the sender of the |
582 |
++ * message has has the capability @cap in the user namespace @user_ns. |
583 |
++ */ |
584 |
++bool __netlink_ns_capable(const struct netlink_skb_parms *nsp, |
585 |
++ struct user_namespace *user_ns, int cap) |
586 |
++{ |
587 |
++ return ((nsp->flags & NETLINK_SKB_DST) || |
588 |
++ file_ns_capable(nsp->sk->sk_socket->file, user_ns, cap)) && |
589 |
++ ns_capable(user_ns, cap); |
590 |
++} |
591 |
++EXPORT_SYMBOL(__netlink_ns_capable); |
592 |
++ |
593 |
++/** |
594 |
++ * netlink_ns_capable - General netlink message capability test |
595 |
++ * @skb: socket buffer holding a netlink command from userspace |
596 |
++ * @user_ns: The user namespace of the capability to use |
597 |
++ * @cap: The capability to use |
598 |
++ * |
599 |
++ * Test to see if the opener of the socket we received the message |
600 |
++ * from had when the netlink socket was created and the sender of the |
601 |
++ * message has has the capability @cap in the user namespace @user_ns. |
602 |
++ */ |
603 |
++bool netlink_ns_capable(const struct sk_buff *skb, |
604 |
++ struct user_namespace *user_ns, int cap) |
605 |
++{ |
606 |
++ return __netlink_ns_capable(&NETLINK_CB(skb), user_ns, cap); |
607 |
++} |
608 |
++EXPORT_SYMBOL(netlink_ns_capable); |
609 |
++ |
610 |
++/** |
611 |
++ * netlink_capable - Netlink global message capability test |
612 |
++ * @skb: socket buffer holding a netlink command from userspace |
613 |
++ * @cap: The capability to use |
614 |
++ * |
615 |
++ * Test to see if the opener of the socket we received the message |
616 |
++ * from had when the netlink socket was created and the sender of the |
617 |
++ * message has has the capability @cap in all user namespaces. |
618 |
++ */ |
619 |
++bool netlink_capable(const struct sk_buff *skb, int cap) |
620 |
++{ |
621 |
++ return netlink_ns_capable(skb, &init_user_ns, cap); |
622 |
++} |
623 |
++EXPORT_SYMBOL(netlink_capable); |
624 |
++ |
625 |
++/** |
626 |
++ * netlink_net_capable - Netlink network namespace message capability test |
627 |
++ * @skb: socket buffer holding a netlink command from userspace |
628 |
++ * @cap: The capability to use |
629 |
++ * |
630 |
++ * Test to see if the opener of the socket we received the message |
631 |
++ * from had when the netlink socket was created and the sender of the |
632 |
++ * message has has the capability @cap over the network namespace of |
633 |
++ * the socket we received the message from. |
634 |
++ */ |
635 |
++bool netlink_net_capable(const struct sk_buff *skb, int cap) |
636 |
++{ |
637 |
++ return netlink_ns_capable(skb, sock_net(skb->sk)->user_ns, cap); |
638 |
++} |
639 |
++EXPORT_SYMBOL(netlink_net_capable); |
640 |
++ |
641 |
++static inline int netlink_allowed(const struct socket *sock, unsigned int flag) |
642 |
+ { |
643 |
+ return (nl_table[sock->sk->sk_protocol].flags & flag) || |
644 |
+ ns_capable(sock_net(sock->sk)->user_ns, CAP_NET_ADMIN); |
645 |
+@@ -1428,7 +1495,7 @@ static int netlink_bind(struct socket *sock, struct sockaddr *addr, |
646 |
+ |
647 |
+ /* Only superuser is allowed to listen multicasts */ |
648 |
+ if (nladdr->nl_groups) { |
649 |
+- if (!netlink_capable(sock, NL_CFG_F_NONROOT_RECV)) |
650 |
++ if (!netlink_allowed(sock, NL_CFG_F_NONROOT_RECV)) |
651 |
+ return -EPERM; |
652 |
+ err = netlink_realloc_groups(sk); |
653 |
+ if (err) |
654 |
+@@ -1490,7 +1557,7 @@ static int netlink_connect(struct socket *sock, struct sockaddr *addr, |
655 |
+ return -EINVAL; |
656 |
+ |
657 |
+ if ((nladdr->nl_groups || nladdr->nl_pid) && |
658 |
+- !netlink_capable(sock, NL_CFG_F_NONROOT_SEND)) |
659 |
++ !netlink_allowed(sock, NL_CFG_F_NONROOT_SEND)) |
660 |
+ return -EPERM; |
661 |
+ |
662 |
+ if (!nlk->portid) |
663 |
+@@ -2096,7 +2163,7 @@ static int netlink_setsockopt(struct socket *sock, int level, int optname, |
664 |
+ break; |
665 |
+ case NETLINK_ADD_MEMBERSHIP: |
666 |
+ case NETLINK_DROP_MEMBERSHIP: { |
667 |
+- if (!netlink_capable(sock, NL_CFG_F_NONROOT_RECV)) |
668 |
++ if (!netlink_allowed(sock, NL_CFG_F_NONROOT_RECV)) |
669 |
+ return -EPERM; |
670 |
+ err = netlink_realloc_groups(sk); |
671 |
+ if (err) |
672 |
+@@ -2228,6 +2295,7 @@ static int netlink_sendmsg(struct kiocb *kiocb, struct socket *sock, |
673 |
+ struct sk_buff *skb; |
674 |
+ int err; |
675 |
+ struct scm_cookie scm; |
676 |
++ u32 netlink_skb_flags = 0; |
677 |
+ |
678 |
+ if (msg->msg_flags&MSG_OOB) |
679 |
+ return -EOPNOTSUPP; |
680 |
+@@ -2247,8 +2315,9 @@ static int netlink_sendmsg(struct kiocb *kiocb, struct socket *sock, |
681 |
+ dst_group = ffs(addr->nl_groups); |
682 |
+ err = -EPERM; |
683 |
+ if ((dst_group || dst_portid) && |
684 |
+- !netlink_capable(sock, NL_CFG_F_NONROOT_SEND)) |
685 |
++ !netlink_allowed(sock, NL_CFG_F_NONROOT_SEND)) |
686 |
+ goto out; |
687 |
++ netlink_skb_flags |= NETLINK_SKB_DST; |
688 |
+ } else { |
689 |
+ dst_portid = nlk->dst_portid; |
690 |
+ dst_group = nlk->dst_group; |
691 |
+@@ -2278,6 +2347,7 @@ static int netlink_sendmsg(struct kiocb *kiocb, struct socket *sock, |
692 |
+ NETLINK_CB(skb).portid = nlk->portid; |
693 |
+ NETLINK_CB(skb).dst_group = dst_group; |
694 |
+ NETLINK_CB(skb).creds = siocb->scm->creds; |
695 |
++ NETLINK_CB(skb).flags = netlink_skb_flags; |
696 |
+ |
697 |
+ err = -EFAULT; |
698 |
+ if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)) { |
699 |
+@@ -2933,7 +3003,7 @@ static int netlink_seq_show(struct seq_file *seq, void *v) |
700 |
sk_wmem_alloc_get(s), |
701 |
nlk->cb_running, |
702 |
atomic_read(&s->sk_refcnt), |
703 |
|
704 |
diff --git a/3.2.59/0000_README b/3.2.59/0000_README |
705 |
index 71c8053..d9b8617 100644 |
706 |
--- a/3.2.59/0000_README |
707 |
+++ b/3.2.59/0000_README |
708 |
@@ -154,7 +154,7 @@ Patch: 1058_linux-3.2.59.patch |
709 |
From: http://www.kernel.org |
710 |
Desc: Linux 3.2.59 |
711 |
|
712 |
-Patch: 4420_grsecurity-3.0-3.2.59-201406030716.patch |
713 |
+Patch: 4420_grsecurity-3.0-3.2.59-201406051309.patch |
714 |
From: http://www.grsecurity.net |
715 |
Desc: hardened-sources base patch from upstream grsecurity |
716 |
|
717 |
|
718 |
diff --git a/3.2.59/4420_grsecurity-3.0-3.2.59-201406030716.patch b/3.2.59/4420_grsecurity-3.0-3.2.59-201406051309.patch |
719 |
similarity index 99% |
720 |
rename from 3.2.59/4420_grsecurity-3.0-3.2.59-201406030716.patch |
721 |
rename to 3.2.59/4420_grsecurity-3.0-3.2.59-201406051309.patch |
722 |
index fc192d4..ff8e72f 100644 |
723 |
--- a/3.2.59/4420_grsecurity-3.0-3.2.59-201406030716.patch |
724 |
+++ b/3.2.59/4420_grsecurity-3.0-3.2.59-201406051309.patch |
725 |
@@ -37650,7 +37650,7 @@ index 632ae24..244cf4a 100644 |
726 |
if (drm_lock_free(&master->lock, lock->context)) { |
727 |
/* FIXME: Should really bail out here. */ |
728 |
diff --git a/drivers/gpu/drm/drm_sysfs.c b/drivers/gpu/drm/drm_sysfs.c |
729 |
-index 0f9ef9b..48bd695 100644 |
730 |
+index 0f9ef9b..48bd6956 100644 |
731 |
--- a/drivers/gpu/drm/drm_sysfs.c |
732 |
+++ b/drivers/gpu/drm/drm_sysfs.c |
733 |
@@ -495,7 +495,7 @@ EXPORT_SYMBOL(drm_sysfs_hotplug_event); |
734 |
@@ -56707,7 +56707,7 @@ index 451b9b8..12e5a03 100644 |
735 |
|
736 |
out_free_fd: |
737 |
diff --git a/fs/exec.c b/fs/exec.c |
738 |
-index 78199eb..7ff0dd8 100644 |
739 |
+index 78199eb..38c4c00 100644 |
740 |
--- a/fs/exec.c |
741 |
+++ b/fs/exec.c |
742 |
@@ -55,12 +55,35 @@ |
743 |
@@ -57576,7 +57576,7 @@ index 78199eb..7ff0dd8 100644 |
744 |
+#endif |
745 |
+ |
746 |
+#ifndef CONFIG_STACK_GROWSUP |
747 |
-+ const void * stackstart = task_stack_page(current); |
748 |
++ unsigned long stackstart = (unsigned long)task_stack_page(current); |
749 |
+ if (unlikely(current_stack_pointer < stackstart + 512 || |
750 |
+ current_stack_pointer >= stackstart + THREAD_SIZE)) |
751 |
+ BUG(); |
752 |
@@ -64870,7 +64870,7 @@ index 8a89949..6776861 100644 |
753 |
xfs_init_zones(void) |
754 |
diff --git a/grsecurity/Kconfig b/grsecurity/Kconfig |
755 |
new file mode 100644 |
756 |
-index 0000000..2255157 |
757 |
+index 0000000..ddeec00 |
758 |
--- /dev/null |
759 |
+++ b/grsecurity/Kconfig |
760 |
@@ -0,0 +1,1160 @@ |
761 |
@@ -65016,7 +65016,7 @@ index 0000000..2255157 |
762 |
+config GRKERNSEC_KSTACKOVERFLOW |
763 |
+ bool "Prevent kernel stack overflows" |
764 |
+ default y if GRKERNSEC_CONFIG_AUTO |
765 |
-+ depends on !IA64 && 64BIT && BROKEN |
766 |
++ depends on !IA64 && 64BIT |
767 |
+ help |
768 |
+ If you say Y here, the kernel's process stacks will be allocated |
769 |
+ with vmalloc instead of the kernel's default allocator. This |
770 |
@@ -83699,7 +83699,7 @@ index 37a3bbd..55a4241 100644 |
771 |
extern int llc_sap_action_unitdata_ind(struct llc_sap *sap, |
772 |
struct sk_buff *skb); |
773 |
diff --git a/include/net/llc_s_st.h b/include/net/llc_s_st.h |
774 |
-index 567c681..cd73ac0 100644 |
775 |
+index 567c681..cd73ac02 100644 |
776 |
--- a/include/net/llc_s_st.h |
777 |
+++ b/include/net/llc_s_st.h |
778 |
@@ -20,7 +20,7 @@ struct llc_sap_state_trans { |
779 |
@@ -86873,7 +86873,7 @@ index ce0c182..b8e5b18 100644 |
780 |
else |
781 |
new_fs = fs; |
782 |
diff --git a/kernel/futex.c b/kernel/futex.c |
783 |
-index 8888815..9a6f6fb 100644 |
784 |
+index 8888815..36459d8 100644 |
785 |
--- a/kernel/futex.c |
786 |
+++ b/kernel/futex.c |
787 |
@@ -54,6 +54,7 @@ |
788 |
@@ -86884,6 +86884,24 @@ index 8888815..9a6f6fb 100644 |
789 |
#include <linux/signal.h> |
790 |
#include <linux/export.h> |
791 |
#include <linux/magic.h> |
792 |
+@@ -97,7 +98,7 @@ struct futex_pi_state { |
793 |
+ atomic_t refcount; |
794 |
+ |
795 |
+ union futex_key key; |
796 |
+-}; |
797 |
++} __randomize_layout; |
798 |
+ |
799 |
+ /** |
800 |
+ * struct futex_q - The hashed futex queue entry, one per waiting task |
801 |
+@@ -131,7 +132,7 @@ struct futex_q { |
802 |
+ struct rt_mutex_waiter *rt_waiter; |
803 |
+ union futex_key *requeue_pi_key; |
804 |
+ u32 bitset; |
805 |
+-}; |
806 |
++} __randomize_layout; |
807 |
+ |
808 |
+ static const struct futex_q futex_q_init = { |
809 |
+ /* list gets initialized in queue_me()*/ |
810 |
@@ -240,6 +241,11 @@ get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw) |
811 |
struct page *page, *page_head; |
812 |
int err, ro = 0; |
813 |
@@ -86905,7 +86923,326 @@ index 8888815..9a6f6fb 100644 |
814 |
|
815 |
pagefault_disable(); |
816 |
ret = __copy_from_user_inatomic(dest, from, sizeof(u32)); |
817 |
-@@ -2744,6 +2750,7 @@ static int __init futex_init(void) |
818 |
+@@ -588,6 +594,55 @@ void exit_pi_state_list(struct task_struct *curr) |
819 |
+ raw_spin_unlock_irq(&curr->pi_lock); |
820 |
+ } |
821 |
+ |
822 |
++/* |
823 |
++ * We need to check the following states: |
824 |
++ * |
825 |
++ * Waiter | pi_state | pi->owner | uTID | uODIED | ? |
826 |
++ * |
827 |
++ * [1] NULL | --- | --- | 0 | 0/1 | Valid |
828 |
++ * [2] NULL | --- | --- | >0 | 0/1 | Valid |
829 |
++ * |
830 |
++ * [3] Found | NULL | -- | Any | 0/1 | Invalid |
831 |
++ * |
832 |
++ * [4] Found | Found | NULL | 0 | 1 | Valid |
833 |
++ * [5] Found | Found | NULL | >0 | 1 | Invalid |
834 |
++ * |
835 |
++ * [6] Found | Found | task | 0 | 1 | Valid |
836 |
++ * |
837 |
++ * [7] Found | Found | NULL | Any | 0 | Invalid |
838 |
++ * |
839 |
++ * [8] Found | Found | task | ==taskTID | 0/1 | Valid |
840 |
++ * [9] Found | Found | task | 0 | 0 | Invalid |
841 |
++ * [10] Found | Found | task | !=taskTID | 0/1 | Invalid |
842 |
++ * |
843 |
++ * [1] Indicates that the kernel can acquire the futex atomically. We |
844 |
++ * came came here due to a stale FUTEX_WAITERS/FUTEX_OWNER_DIED bit. |
845 |
++ * |
846 |
++ * [2] Valid, if TID does not belong to a kernel thread. If no matching |
847 |
++ * thread is found then it indicates that the owner TID has died. |
848 |
++ * |
849 |
++ * [3] Invalid. The waiter is queued on a non PI futex |
850 |
++ * |
851 |
++ * [4] Valid state after exit_robust_list(), which sets the user space |
852 |
++ * value to FUTEX_WAITERS | FUTEX_OWNER_DIED. |
853 |
++ * |
854 |
++ * [5] The user space value got manipulated between exit_robust_list() |
855 |
++ * and exit_pi_state_list() |
856 |
++ * |
857 |
++ * [6] Valid state after exit_pi_state_list() which sets the new owner in |
858 |
++ * the pi_state but cannot access the user space value. |
859 |
++ * |
860 |
++ * [7] pi_state->owner can only be NULL when the OWNER_DIED bit is set. |
861 |
++ * |
862 |
++ * [8] Owner and user space value match |
863 |
++ * |
864 |
++ * [9] There is no transient state which sets the user space TID to 0 |
865 |
++ * except exit_robust_list(), but this is indicated by the |
866 |
++ * FUTEX_OWNER_DIED bit. See [4] |
867 |
++ * |
868 |
++ * [10] There is no transient state which leaves owner and user space |
869 |
++ * TID out of sync. |
870 |
++ */ |
871 |
+ static int |
872 |
+ lookup_pi_state(u32 uval, struct futex_hash_bucket *hb, |
873 |
+ union futex_key *key, struct futex_pi_state **ps) |
874 |
+@@ -603,12 +658,13 @@ lookup_pi_state(u32 uval, struct futex_hash_bucket *hb, |
875 |
+ plist_for_each_entry_safe(this, next, head, list) { |
876 |
+ if (match_futex(&this->key, key)) { |
877 |
+ /* |
878 |
+- * Another waiter already exists - bump up |
879 |
+- * the refcount and return its pi_state: |
880 |
++ * Sanity check the waiter before increasing |
881 |
++ * the refcount and attaching to it. |
882 |
+ */ |
883 |
+ pi_state = this->pi_state; |
884 |
+ /* |
885 |
+- * Userspace might have messed up non-PI and PI futexes |
886 |
++ * Userspace might have messed up non-PI and |
887 |
++ * PI futexes [3] |
888 |
+ */ |
889 |
+ if (unlikely(!pi_state)) |
890 |
+ return -EINVAL; |
891 |
+@@ -616,34 +672,70 @@ lookup_pi_state(u32 uval, struct futex_hash_bucket *hb, |
892 |
+ WARN_ON(!atomic_read(&pi_state->refcount)); |
893 |
+ |
894 |
+ /* |
895 |
+- * When pi_state->owner is NULL then the owner died |
896 |
+- * and another waiter is on the fly. pi_state->owner |
897 |
+- * is fixed up by the task which acquires |
898 |
+- * pi_state->rt_mutex. |
899 |
+- * |
900 |
+- * We do not check for pid == 0 which can happen when |
901 |
+- * the owner died and robust_list_exit() cleared the |
902 |
+- * TID. |
903 |
++ * Handle the owner died case: |
904 |
+ */ |
905 |
+- if (pid && pi_state->owner) { |
906 |
++ if (uval & FUTEX_OWNER_DIED) { |
907 |
+ /* |
908 |
+- * Bail out if user space manipulated the |
909 |
+- * futex value. |
910 |
++ * exit_pi_state_list sets owner to NULL and |
911 |
++ * wakes the topmost waiter. The task which |
912 |
++ * acquires the pi_state->rt_mutex will fixup |
913 |
++ * owner. |
914 |
+ */ |
915 |
+- if (pid != task_pid_vnr(pi_state->owner)) |
916 |
++ if (!pi_state->owner) { |
917 |
++ /* |
918 |
++ * No pi state owner, but the user |
919 |
++ * space TID is not 0. Inconsistent |
920 |
++ * state. [5] |
921 |
++ */ |
922 |
++ if (pid) |
923 |
++ return -EINVAL; |
924 |
++ /* |
925 |
++ * Take a ref on the state and |
926 |
++ * return. [4] |
927 |
++ */ |
928 |
++ goto out_state; |
929 |
++ } |
930 |
++ |
931 |
++ /* |
932 |
++ * If TID is 0, then either the dying owner |
933 |
++ * has not yet executed exit_pi_state_list() |
934 |
++ * or some waiter acquired the rtmutex in the |
935 |
++ * pi state, but did not yet fixup the TID in |
936 |
++ * user space. |
937 |
++ * |
938 |
++ * Take a ref on the state and return. [6] |
939 |
++ */ |
940 |
++ if (!pid) |
941 |
++ goto out_state; |
942 |
++ } else { |
943 |
++ /* |
944 |
++ * If the owner died bit is not set, |
945 |
++ * then the pi_state must have an |
946 |
++ * owner. [7] |
947 |
++ */ |
948 |
++ if (!pi_state->owner) |
949 |
+ return -EINVAL; |
950 |
+ } |
951 |
+ |
952 |
++ /* |
953 |
++ * Bail out if user space manipulated the |
954 |
++ * futex value. If pi state exists then the |
955 |
++ * owner TID must be the same as the user |
956 |
++ * space TID. [9/10] |
957 |
++ */ |
958 |
++ if (pid != task_pid_vnr(pi_state->owner)) |
959 |
++ return -EINVAL; |
960 |
++ |
961 |
++ out_state: |
962 |
+ atomic_inc(&pi_state->refcount); |
963 |
+ *ps = pi_state; |
964 |
+- |
965 |
+ return 0; |
966 |
+ } |
967 |
+ } |
968 |
+ |
969 |
+ /* |
970 |
+ * We are the first waiter - try to look up the real owner and attach |
971 |
+- * the new pi_state to it, but bail out when TID = 0 |
972 |
++ * the new pi_state to it, but bail out when TID = 0 [1] |
973 |
+ */ |
974 |
+ if (!pid) |
975 |
+ return -ESRCH; |
976 |
+@@ -651,6 +743,11 @@ lookup_pi_state(u32 uval, struct futex_hash_bucket *hb, |
977 |
+ if (!p) |
978 |
+ return -ESRCH; |
979 |
+ |
980 |
++ if (!p->mm) { |
981 |
++ put_task_struct(p); |
982 |
++ return -EPERM; |
983 |
++ } |
984 |
++ |
985 |
+ /* |
986 |
+ * We need to look at the task state flags to figure out, |
987 |
+ * whether the task is exiting. To protect against the do_exit |
988 |
+@@ -671,6 +768,9 @@ lookup_pi_state(u32 uval, struct futex_hash_bucket *hb, |
989 |
+ return ret; |
990 |
+ } |
991 |
+ |
992 |
++ /* |
993 |
++ * No existing pi state. First waiter. [2] |
994 |
++ */ |
995 |
+ pi_state = alloc_pi_state(); |
996 |
+ |
997 |
+ /* |
998 |
+@@ -742,10 +842,18 @@ retry: |
999 |
+ return -EDEADLK; |
1000 |
+ |
1001 |
+ /* |
1002 |
+- * Surprise - we got the lock. Just return to userspace: |
1003 |
++ * Surprise - we got the lock, but we do not trust user space at all. |
1004 |
+ */ |
1005 |
+- if (unlikely(!curval)) |
1006 |
+- return 1; |
1007 |
++ if (unlikely(!curval)) { |
1008 |
++ /* |
1009 |
++ * We verify whether there is kernel state for this |
1010 |
++ * futex. If not, we can safely assume, that the 0 -> |
1011 |
++ * TID transition is correct. If state exists, we do |
1012 |
++ * not bother to fixup the user space state as it was |
1013 |
++ * corrupted already. |
1014 |
++ */ |
1015 |
++ return futex_top_waiter(hb, key) ? -EINVAL : 1; |
1016 |
++ } |
1017 |
+ |
1018 |
+ uval = curval; |
1019 |
+ |
1020 |
+@@ -875,6 +983,7 @@ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this) |
1021 |
+ struct task_struct *new_owner; |
1022 |
+ struct futex_pi_state *pi_state = this->pi_state; |
1023 |
+ u32 uninitialized_var(curval), newval; |
1024 |
++ int ret = 0; |
1025 |
+ |
1026 |
+ if (!pi_state) |
1027 |
+ return -EINVAL; |
1028 |
+@@ -898,23 +1007,19 @@ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this) |
1029 |
+ new_owner = this->task; |
1030 |
+ |
1031 |
+ /* |
1032 |
+- * We pass it to the next owner. (The WAITERS bit is always |
1033 |
+- * kept enabled while there is PI state around. We must also |
1034 |
+- * preserve the owner died bit.) |
1035 |
++ * We pass it to the next owner. The WAITERS bit is always |
1036 |
++ * kept enabled while there is PI state around. We cleanup the |
1037 |
++ * owner died bit, because we are the owner. |
1038 |
+ */ |
1039 |
+- if (!(uval & FUTEX_OWNER_DIED)) { |
1040 |
+- int ret = 0; |
1041 |
++ newval = FUTEX_WAITERS | task_pid_vnr(new_owner); |
1042 |
+ |
1043 |
+- newval = FUTEX_WAITERS | task_pid_vnr(new_owner); |
1044 |
+- |
1045 |
+- if (cmpxchg_futex_value_locked(&curval, uaddr, uval, newval)) |
1046 |
+- ret = -EFAULT; |
1047 |
+- else if (curval != uval) |
1048 |
+- ret = -EINVAL; |
1049 |
+- if (ret) { |
1050 |
+- raw_spin_unlock(&pi_state->pi_mutex.wait_lock); |
1051 |
+- return ret; |
1052 |
+- } |
1053 |
++ if (cmpxchg_futex_value_locked(&curval, uaddr, uval, newval)) |
1054 |
++ ret = -EFAULT; |
1055 |
++ else if (curval != uval) |
1056 |
++ ret = -EINVAL; |
1057 |
++ if (ret) { |
1058 |
++ raw_spin_unlock(&pi_state->pi_mutex.wait_lock); |
1059 |
++ return ret; |
1060 |
+ } |
1061 |
+ |
1062 |
+ raw_spin_lock_irq(&pi_state->owner->pi_lock); |
1063 |
+@@ -1272,6 +1377,13 @@ static int futex_requeue(u32 __user *uaddr1, unsigned int flags, |
1064 |
+ |
1065 |
+ if (requeue_pi) { |
1066 |
+ /* |
1067 |
++ * Requeue PI only works on two distinct uaddrs. This |
1068 |
++ * check is only valid for private futexes. See below. |
1069 |
++ */ |
1070 |
++ if (uaddr1 == uaddr2) |
1071 |
++ return -EINVAL; |
1072 |
++ |
1073 |
++ /* |
1074 |
+ * requeue_pi requires a pi_state, try to allocate it now |
1075 |
+ * without any locks in case it fails. |
1076 |
+ */ |
1077 |
+@@ -1309,6 +1421,15 @@ retry: |
1078 |
+ if (unlikely(ret != 0)) |
1079 |
+ goto out_put_key1; |
1080 |
+ |
1081 |
++ /* |
1082 |
++ * The check above which compares uaddrs is not sufficient for |
1083 |
++ * shared futexes. We need to compare the keys: |
1084 |
++ */ |
1085 |
++ if (requeue_pi && match_futex(&key1, &key2)) { |
1086 |
++ ret = -EINVAL; |
1087 |
++ goto out_put_keys; |
1088 |
++ } |
1089 |
++ |
1090 |
+ hb1 = hash_futex(&key1); |
1091 |
+ hb2 = hash_futex(&key2); |
1092 |
+ |
1093 |
+@@ -2133,9 +2254,10 @@ retry: |
1094 |
+ /* |
1095 |
+ * To avoid races, try to do the TID -> 0 atomic transition |
1096 |
+ * again. If it succeeds then we can return without waking |
1097 |
+- * anyone else up: |
1098 |
++ * anyone else up. We only try this if neither the waiters nor |
1099 |
++ * the owner died bit are set. |
1100 |
+ */ |
1101 |
+- if (!(uval & FUTEX_OWNER_DIED) && |
1102 |
++ if (!(uval & ~FUTEX_TID_MASK) && |
1103 |
+ cmpxchg_futex_value_locked(&uval, uaddr, vpid, 0)) |
1104 |
+ goto pi_faulted; |
1105 |
+ /* |
1106 |
+@@ -2167,11 +2289,9 @@ retry: |
1107 |
+ /* |
1108 |
+ * No waiters - kernel unlocks the futex: |
1109 |
+ */ |
1110 |
+- if (!(uval & FUTEX_OWNER_DIED)) { |
1111 |
+- ret = unlock_futex_pi(uaddr, uval); |
1112 |
+- if (ret == -EFAULT) |
1113 |
+- goto pi_faulted; |
1114 |
+- } |
1115 |
++ ret = unlock_futex_pi(uaddr, uval); |
1116 |
++ if (ret == -EFAULT) |
1117 |
++ goto pi_faulted; |
1118 |
+ |
1119 |
+ out_unlock: |
1120 |
+ spin_unlock(&hb->lock); |
1121 |
+@@ -2331,6 +2451,15 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags, |
1122 |
+ if (ret) |
1123 |
+ goto out_key2; |
1124 |
+ |
1125 |
++ /* |
1126 |
++ * The check above which compares uaddrs is not sufficient for |
1127 |
++ * shared futexes. We need to compare the keys: |
1128 |
++ */ |
1129 |
++ if (match_futex(&q.key, &key2)) { |
1130 |
++ ret = -EINVAL; |
1131 |
++ goto out_put_keys; |
1132 |
++ } |
1133 |
++ |
1134 |
+ /* Queue the futex_q, drop the hb lock, wait for wakeup. */ |
1135 |
+ futex_wait_queue_me(hb, &q, to); |
1136 |
+ |
1137 |
+@@ -2744,6 +2873,7 @@ static int __init futex_init(void) |
1138 |
{ |
1139 |
u32 curval; |
1140 |
int i; |
1141 |
@@ -86913,7 +87250,7 @@ index 8888815..9a6f6fb 100644 |
1142 |
|
1143 |
/* |
1144 |
* This will fail and we want it. Some arch implementations do |
1145 |
-@@ -2755,8 +2762,11 @@ static int __init futex_init(void) |
1146 |
+@@ -2755,8 +2885,11 @@ static int __init futex_init(void) |
1147 |
* implementation, the non-functional ones will return |
1148 |
* -ENOSYS. |
1149 |
*/ |
1150 |
@@ -98293,10 +98630,41 @@ index 136ac4f..f917fa9 100644 |
1151 |
mm->unmap_area = arch_unmap_area; |
1152 |
} |
1153 |
diff --git a/mm/vmalloc.c b/mm/vmalloc.c |
1154 |
-index eeba3bb..5fc3323 100644 |
1155 |
+index eeba3bb..0c8633f 100644 |
1156 |
--- a/mm/vmalloc.c |
1157 |
+++ b/mm/vmalloc.c |
1158 |
-@@ -39,8 +39,19 @@ static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end) |
1159 |
+@@ -27,10 +27,30 @@ |
1160 |
+ #include <linux/pfn.h> |
1161 |
+ #include <linux/kmemleak.h> |
1162 |
+ #include <linux/atomic.h> |
1163 |
++#include <linux/llist.h> |
1164 |
+ #include <asm/uaccess.h> |
1165 |
+ #include <asm/tlbflush.h> |
1166 |
+ #include <asm/shmparam.h> |
1167 |
+ |
1168 |
++struct vfree_deferred { |
1169 |
++ struct llist_head list; |
1170 |
++ struct work_struct wq; |
1171 |
++}; |
1172 |
++static DEFINE_PER_CPU(struct vfree_deferred, vfree_deferred); |
1173 |
++ |
1174 |
++static void __vunmap(const void *, int); |
1175 |
++ |
1176 |
++static void free_work(struct work_struct *w) |
1177 |
++{ |
1178 |
++ struct vfree_deferred *p = container_of(w, struct vfree_deferred, wq); |
1179 |
++ struct llist_node *llnode = llist_del_all(&p->list); |
1180 |
++ while (llnode) { |
1181 |
++ void *p = llnode; |
1182 |
++ llnode = llist_next(llnode); |
1183 |
++ __vunmap(p, 1); |
1184 |
++ } |
1185 |
++} |
1186 |
++ |
1187 |
+ /*** Page table manipulation functions ***/ |
1188 |
+ |
1189 |
+ static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end) |
1190 |
+@@ -39,8 +59,19 @@ static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end) |
1191 |
|
1192 |
pte = pte_offset_kernel(pmd, addr); |
1193 |
do { |
1194 |
@@ -98318,7 +98686,7 @@ index eeba3bb..5fc3323 100644 |
1195 |
} while (pte++, addr += PAGE_SIZE, addr != end); |
1196 |
} |
1197 |
|
1198 |
-@@ -100,16 +111,29 @@ static int vmap_pte_range(pmd_t *pmd, unsigned long addr, |
1199 |
+@@ -100,16 +131,29 @@ static int vmap_pte_range(pmd_t *pmd, unsigned long addr, |
1200 |
pte = pte_alloc_kernel(pmd, addr); |
1201 |
if (!pte) |
1202 |
return -ENOMEM; |
1203 |
@@ -98350,7 +98718,7 @@ index eeba3bb..5fc3323 100644 |
1204 |
return 0; |
1205 |
} |
1206 |
|
1207 |
-@@ -119,7 +143,7 @@ static int vmap_pmd_range(pud_t *pud, unsigned long addr, |
1208 |
+@@ -119,7 +163,7 @@ static int vmap_pmd_range(pud_t *pud, unsigned long addr, |
1209 |
pmd_t *pmd; |
1210 |
unsigned long next; |
1211 |
|
1212 |
@@ -98359,7 +98727,7 @@ index eeba3bb..5fc3323 100644 |
1213 |
if (!pmd) |
1214 |
return -ENOMEM; |
1215 |
do { |
1216 |
-@@ -136,7 +160,7 @@ static int vmap_pud_range(pgd_t *pgd, unsigned long addr, |
1217 |
+@@ -136,7 +180,7 @@ static int vmap_pud_range(pgd_t *pgd, unsigned long addr, |
1218 |
pud_t *pud; |
1219 |
unsigned long next; |
1220 |
|
1221 |
@@ -98368,7 +98736,7 @@ index eeba3bb..5fc3323 100644 |
1222 |
if (!pud) |
1223 |
return -ENOMEM; |
1224 |
do { |
1225 |
-@@ -196,6 +220,12 @@ int is_vmalloc_or_module_addr(const void *x) |
1226 |
+@@ -196,6 +240,12 @@ int is_vmalloc_or_module_addr(const void *x) |
1227 |
if (addr >= MODULES_VADDR && addr < MODULES_END) |
1228 |
return 1; |
1229 |
#endif |
1230 |
@@ -98381,7 +98749,7 @@ index eeba3bb..5fc3323 100644 |
1231 |
return is_vmalloc_addr(x); |
1232 |
} |
1233 |
|
1234 |
-@@ -216,8 +246,14 @@ struct page *vmalloc_to_page(const void *vmalloc_addr) |
1235 |
+@@ -216,8 +266,14 @@ struct page *vmalloc_to_page(const void *vmalloc_addr) |
1236 |
|
1237 |
if (!pgd_none(*pgd)) { |
1238 |
pud_t *pud = pud_offset(pgd, addr); |
1239 |
@@ -98396,7 +98764,22 @@ index eeba3bb..5fc3323 100644 |
1240 |
if (!pmd_none(*pmd)) { |
1241 |
pte_t *ptep, pte; |
1242 |
|
1243 |
-@@ -1295,6 +1331,16 @@ static struct vm_struct *__get_vm_area_node(unsigned long size, |
1244 |
+@@ -1151,10 +1207,14 @@ void __init vmalloc_init(void) |
1245 |
+ |
1246 |
+ for_each_possible_cpu(i) { |
1247 |
+ struct vmap_block_queue *vbq; |
1248 |
++ struct vfree_deferred *p; |
1249 |
+ |
1250 |
+ vbq = &per_cpu(vmap_block_queue, i); |
1251 |
+ spin_lock_init(&vbq->lock); |
1252 |
+ INIT_LIST_HEAD(&vbq->free); |
1253 |
++ p = &per_cpu(vfree_deferred, i); |
1254 |
++ init_llist_head(&p->list); |
1255 |
++ INIT_WORK(&p->wq, free_work); |
1256 |
+ } |
1257 |
+ |
1258 |
+ /* Import existing vmlist entries. */ |
1259 |
+@@ -1295,6 +1355,16 @@ static struct vm_struct *__get_vm_area_node(unsigned long size, |
1260 |
struct vm_struct *area; |
1261 |
|
1262 |
BUG_ON(in_interrupt()); |
1263 |
@@ -98413,7 +98796,56 @@ index eeba3bb..5fc3323 100644 |
1264 |
if (flags & VM_IOREMAP) { |
1265 |
int bit = fls(size); |
1266 |
|
1267 |
-@@ -1527,6 +1573,11 @@ void *vmap(struct page **pages, unsigned int count, |
1268 |
+@@ -1469,7 +1539,7 @@ static void __vunmap(const void *addr, int deallocate_pages) |
1269 |
+ kfree(area); |
1270 |
+ return; |
1271 |
+ } |
1272 |
+- |
1273 |
++ |
1274 |
+ /** |
1275 |
+ * vfree - release memory allocated by vmalloc() |
1276 |
+ * @addr: memory base address |
1277 |
+@@ -1478,15 +1548,26 @@ static void __vunmap(const void *addr, int deallocate_pages) |
1278 |
+ * obtained from vmalloc(), vmalloc_32() or __vmalloc(). If @addr is |
1279 |
+ * NULL, no operation is performed. |
1280 |
+ * |
1281 |
+- * Must not be called in interrupt context. |
1282 |
++ * Must not be called in NMI context (strictly speaking, only if we don't |
1283 |
++ * have CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG, but making the calling |
1284 |
++ * conventions for vfree() arch-depenedent would be a really bad idea) |
1285 |
++ * |
1286 |
++ * NOTE: assumes that the object at *addr has a size >= sizeof(llist_node) |
1287 |
+ */ |
1288 |
+ void vfree(const void *addr) |
1289 |
+ { |
1290 |
+- BUG_ON(in_interrupt()); |
1291 |
++ BUG_ON(in_nmi()); |
1292 |
+ |
1293 |
+ kmemleak_free(addr); |
1294 |
+ |
1295 |
+- __vunmap(addr, 1); |
1296 |
++ if (!addr) |
1297 |
++ return; |
1298 |
++ if (unlikely(in_interrupt())) { |
1299 |
++ struct vfree_deferred *p = &__get_cpu_var(vfree_deferred); |
1300 |
++ if (llist_add((struct llist_node *)addr, &p->list)) |
1301 |
++ schedule_work(&p->wq); |
1302 |
++ } else |
1303 |
++ __vunmap(addr, 1); |
1304 |
+ } |
1305 |
+ EXPORT_SYMBOL(vfree); |
1306 |
+ |
1307 |
+@@ -1503,7 +1584,8 @@ void vunmap(const void *addr) |
1308 |
+ { |
1309 |
+ BUG_ON(in_interrupt()); |
1310 |
+ might_sleep(); |
1311 |
+- __vunmap(addr, 0); |
1312 |
++ if (addr) |
1313 |
++ __vunmap(addr, 0); |
1314 |
+ } |
1315 |
+ EXPORT_SYMBOL(vunmap); |
1316 |
+ |
1317 |
+@@ -1527,6 +1609,11 @@ void *vmap(struct page **pages, unsigned int count, |
1318 |
if (count > totalram_pages) |
1319 |
return NULL; |
1320 |
|
1321 |
@@ -98425,7 +98857,7 @@ index eeba3bb..5fc3323 100644 |
1322 |
area = get_vm_area_caller((count << PAGE_SHIFT), flags, |
1323 |
__builtin_return_address(0)); |
1324 |
if (!area) |
1325 |
-@@ -1628,6 +1679,13 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align, |
1326 |
+@@ -1628,6 +1715,13 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align, |
1327 |
if (!size || (size >> PAGE_SHIFT) > totalram_pages) |
1328 |
goto fail; |
1329 |
|
1330 |
@@ -98439,7 +98871,7 @@ index eeba3bb..5fc3323 100644 |
1331 |
area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNLIST, |
1332 |
start, end, node, gfp_mask, caller); |
1333 |
if (!area) |
1334 |
-@@ -1694,6 +1752,18 @@ static inline void *__vmalloc_node_flags(unsigned long size, |
1335 |
+@@ -1694,6 +1788,18 @@ static inline void *__vmalloc_node_flags(unsigned long size, |
1336 |
node, __builtin_return_address(0)); |
1337 |
} |
1338 |
|
1339 |
@@ -98458,7 +98890,7 @@ index eeba3bb..5fc3323 100644 |
1340 |
/** |
1341 |
* vmalloc - allocate virtually contiguous memory |
1342 |
* @size: allocation size |
1343 |
-@@ -1801,10 +1871,9 @@ EXPORT_SYMBOL(vzalloc_node); |
1344 |
+@@ -1801,10 +1907,9 @@ EXPORT_SYMBOL(vzalloc_node); |
1345 |
* For tight control over page level allocator and protection flags |
1346 |
* use __vmalloc() instead. |
1347 |
*/ |
1348 |
@@ -98470,7 +98902,7 @@ index eeba3bb..5fc3323 100644 |
1349 |
-1, __builtin_return_address(0)); |
1350 |
} |
1351 |
|
1352 |
-@@ -2099,6 +2168,8 @@ int remap_vmalloc_range(struct vm_area_struct *vma, void *addr, |
1353 |
+@@ -2099,6 +2204,8 @@ int remap_vmalloc_range(struct vm_area_struct *vma, void *addr, |
1354 |
unsigned long uaddr = vma->vm_start; |
1355 |
unsigned long usize = vma->vm_end - vma->vm_start; |
1356 |
|
1357 |
@@ -98479,7 +98911,7 @@ index eeba3bb..5fc3323 100644 |
1358 |
if ((PAGE_SIZE-1) & (unsigned long)addr) |
1359 |
return -EINVAL; |
1360 |
|
1361 |
-@@ -2351,8 +2422,8 @@ struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets, |
1362 |
+@@ -2351,8 +2458,8 @@ struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets, |
1363 |
return NULL; |
1364 |
} |
1365 |
|
1366 |
@@ -98490,7 +98922,7 @@ index eeba3bb..5fc3323 100644 |
1367 |
if (!vas || !vms) |
1368 |
goto err_free; |
1369 |
|
1370 |
-@@ -2536,11 +2607,15 @@ static int s_show(struct seq_file *m, void *p) |
1371 |
+@@ -2536,11 +2643,15 @@ static int s_show(struct seq_file *m, void *p) |
1372 |
{ |
1373 |
struct vm_struct *v = p; |