Gentoo Archives: gentoo-commits

From: "Anthony G. Basile" <blueness@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/hardened-patchset:master commit in: 3.2.51/, 3.11.6/
Date: Sun, 27 Oct 2013 15:03:26
Message-Id: 1382886191.1ec392f3e79af1daa039ffd7fd5245ec48584a3b.blueness@gentoo
1 commit: 1ec392f3e79af1daa039ffd7fd5245ec48584a3b
2 Author: Anthony G. Basile <blueness <AT> gentoo <DOT> org>
3 AuthorDate: Sun Oct 27 15:03:11 2013 +0000
4 Commit: Anthony G. Basile <blueness <AT> gentoo <DOT> org>
5 CommitDate: Sun Oct 27 15:03:11 2013 +0000
6 URL: http://git.overlays.gentoo.org/gitweb/?p=proj/hardened-patchset.git;a=commit;h=1ec392f3
7
8 Grsec/PaX: 2.9.1-{3.2.51,3.11.6}-201310260850
9
10 ---
11 3.11.6/0000_README | 6 +-
12 3.11.6/1005_linux-3.11.6.patch | 2260 --------------------
13 ...420_grsecurity-2.9.1-3.11.6-201310260850.patch} | 23 +-
14 3.2.51/0000_README | 2 +-
15 ...420_grsecurity-2.9.1-3.2.51-201310260849.patch} | 17 +-
16 5 files changed, 18 insertions(+), 2290 deletions(-)
17
18 diff --git a/3.11.6/0000_README b/3.11.6/0000_README
19 index db9995c..2d9249d 100644
20 --- a/3.11.6/0000_README
21 +++ b/3.11.6/0000_README
22 @@ -2,11 +2,7 @@ README
23 -----------------------------------------------------------------------------
24 Individual Patch Descriptions:
25 -----------------------------------------------------------------------------
26 -Patch: 1005_linux-3.11.6.patch
27 -From: http://www.kernel.org
28 -Desc: Linux 3.11.6
29 -
30 -Patch: 4420_grsecurity-2.9.1-3.11.6-201310191259.patch
31 +Patch: 4420_grsecurity-2.9.1-3.11.6-201310260850.patch
32 From: http://www.grsecurity.net
33 Desc: hardened-sources base patch from upstream grsecurity
34
35
36 diff --git a/3.11.6/1005_linux-3.11.6.patch b/3.11.6/1005_linux-3.11.6.patch
37 deleted file mode 100644
38 index ad3cb53..0000000
39 --- a/3.11.6/1005_linux-3.11.6.patch
40 +++ /dev/null
41 @@ -1,2260 +0,0 @@
42 -diff --git a/Makefile b/Makefile
43 -index 83121b7..e87ba83 100644
44 ---- a/Makefile
45 -+++ b/Makefile
46 -@@ -1,6 +1,6 @@
47 - VERSION = 3
48 - PATCHLEVEL = 11
49 --SUBLEVEL = 5
50 -+SUBLEVEL = 6
51 - EXTRAVERSION =
52 - NAME = Linux for Workgroups
53 -
54 -diff --git a/arch/arc/include/asm/delay.h b/arch/arc/include/asm/delay.h
55 -index 442ce5d..43de302 100644
56 ---- a/arch/arc/include/asm/delay.h
57 -+++ b/arch/arc/include/asm/delay.h
58 -@@ -53,11 +53,10 @@ static inline void __udelay(unsigned long usecs)
59 - {
60 - unsigned long loops;
61 -
62 -- /* (long long) cast ensures 64 bit MPY - real or emulated
63 -+ /* (u64) cast ensures 64 bit MPY - real or emulated
64 - * HZ * 4295 is pre-evaluated by gcc - hence only 2 mpy ops
65 - */
66 -- loops = ((long long)(usecs * 4295 * HZ) *
67 -- (long long)(loops_per_jiffy)) >> 32;
68 -+ loops = ((u64) usecs * 4295 * HZ * loops_per_jiffy) >> 32;
69 -
70 - __delay(loops);
71 - }
72 -diff --git a/arch/arc/include/asm/spinlock.h b/arch/arc/include/asm/spinlock.h
73 -index f158197..b6a8c2d 100644
74 ---- a/arch/arc/include/asm/spinlock.h
75 -+++ b/arch/arc/include/asm/spinlock.h
76 -@@ -45,7 +45,14 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock)
77 -
78 - static inline void arch_spin_unlock(arch_spinlock_t *lock)
79 - {
80 -- lock->slock = __ARCH_SPIN_LOCK_UNLOCKED__;
81 -+ unsigned int tmp = __ARCH_SPIN_LOCK_UNLOCKED__;
82 -+
83 -+ __asm__ __volatile__(
84 -+ " ex %0, [%1] \n"
85 -+ : "+r" (tmp)
86 -+ : "r"(&(lock->slock))
87 -+ : "memory");
88 -+
89 - smp_mb();
90 - }
91 -
92 -diff --git a/arch/arc/include/asm/uaccess.h b/arch/arc/include/asm/uaccess.h
93 -index 3242082..30c9baf 100644
94 ---- a/arch/arc/include/asm/uaccess.h
95 -+++ b/arch/arc/include/asm/uaccess.h
96 -@@ -43,7 +43,7 @@
97 - * Because it essentially checks if buffer end is within limit and @len is
98 - * non-ngeative, which implies that buffer start will be within limit too.
99 - *
100 -- * The reason for rewriting being, for majorit yof cases, @len is generally
101 -+ * The reason for rewriting being, for majority of cases, @len is generally
102 - * compile time constant, causing first sub-expression to be compile time
103 - * subsumed.
104 - *
105 -@@ -53,7 +53,7 @@
106 - *
107 - */
108 - #define __user_ok(addr, sz) (((sz) <= TASK_SIZE) && \
109 -- (((addr)+(sz)) <= get_fs()))
110 -+ ((addr) <= (get_fs() - (sz))))
111 - #define __access_ok(addr, sz) (unlikely(__kernel_ok) || \
112 - likely(__user_ok((addr), (sz))))
113 -
114 -diff --git a/arch/arc/kernel/ptrace.c b/arch/arc/kernel/ptrace.c
115 -index 3332385..5d76706 100644
116 ---- a/arch/arc/kernel/ptrace.c
117 -+++ b/arch/arc/kernel/ptrace.c
118 -@@ -102,7 +102,7 @@ static int genregs_set(struct task_struct *target,
119 - REG_IGNORE_ONE(pad2);
120 - REG_IN_CHUNK(callee, efa, cregs); /* callee_regs[r25..r13] */
121 - REG_IGNORE_ONE(efa); /* efa update invalid */
122 -- REG_IN_ONE(stop_pc, &ptregs->ret); /* stop_pc: PC update */
123 -+ REG_IGNORE_ONE(stop_pc); /* PC updated via @ret */
124 -
125 - return ret;
126 - }
127 -diff --git a/arch/arc/kernel/signal.c b/arch/arc/kernel/signal.c
128 -index ee6ef2f..7e95e1a 100644
129 ---- a/arch/arc/kernel/signal.c
130 -+++ b/arch/arc/kernel/signal.c
131 -@@ -101,7 +101,6 @@ SYSCALL_DEFINE0(rt_sigreturn)
132 - {
133 - struct rt_sigframe __user *sf;
134 - unsigned int magic;
135 -- int err;
136 - struct pt_regs *regs = current_pt_regs();
137 -
138 - /* Always make any pending restarted system calls return -EINTR */
139 -@@ -119,15 +118,16 @@ SYSCALL_DEFINE0(rt_sigreturn)
140 - if (!access_ok(VERIFY_READ, sf, sizeof(*sf)))
141 - goto badframe;
142 -
143 -- err = restore_usr_regs(regs, sf);
144 -- err |= __get_user(magic, &sf->sigret_magic);
145 -- if (err)
146 -+ if (__get_user(magic, &sf->sigret_magic))
147 - goto badframe;
148 -
149 - if (unlikely(is_do_ss_needed(magic)))
150 - if (restore_altstack(&sf->uc.uc_stack))
151 - goto badframe;
152 -
153 -+ if (restore_usr_regs(regs, sf))
154 -+ goto badframe;
155 -+
156 - /* Don't restart from sigreturn */
157 - syscall_wont_restart(regs);
158 -
159 -@@ -191,6 +191,15 @@ setup_rt_frame(int signo, struct k_sigaction *ka, siginfo_t *info,
160 - return 1;
161 -
162 - /*
163 -+ * w/o SA_SIGINFO, struct ucontext is partially populated (only
164 -+ * uc_mcontext/uc_sigmask) for kernel's normal user state preservation
165 -+ * during signal handler execution. This works for SA_SIGINFO as well
166 -+ * although the semantics are now overloaded (the same reg state can be
167 -+ * inspected by userland: but are they allowed to fiddle with it ?
168 -+ */
169 -+ err |= stash_usr_regs(sf, regs, set);
170 -+
171 -+ /*
172 - * SA_SIGINFO requires 3 args to signal handler:
173 - * #1: sig-no (common to any handler)
174 - * #2: struct siginfo
175 -@@ -213,14 +222,6 @@ setup_rt_frame(int signo, struct k_sigaction *ka, siginfo_t *info,
176 - magic = MAGIC_SIGALTSTK;
177 - }
178 -
179 -- /*
180 -- * w/o SA_SIGINFO, struct ucontext is partially populated (only
181 -- * uc_mcontext/uc_sigmask) for kernel's normal user state preservation
182 -- * during signal handler execution. This works for SA_SIGINFO as well
183 -- * although the semantics are now overloaded (the same reg state can be
184 -- * inspected by userland: but are they allowed to fiddle with it ?
185 -- */
186 -- err |= stash_usr_regs(sf, regs, set);
187 - err |= __put_user(magic, &sf->sigret_magic);
188 - if (err)
189 - return err;
190 -diff --git a/arch/arc/kernel/unaligned.c b/arch/arc/kernel/unaligned.c
191 -index c0f832f..00ad070 100644
192 ---- a/arch/arc/kernel/unaligned.c
193 -+++ b/arch/arc/kernel/unaligned.c
194 -@@ -233,6 +233,12 @@ int misaligned_fixup(unsigned long address, struct pt_regs *regs,
195 - regs->status32 &= ~STATUS_DE_MASK;
196 - } else {
197 - regs->ret += state.instr_len;
198 -+
199 -+ /* handle zero-overhead-loop */
200 -+ if ((regs->ret == regs->lp_end) && (regs->lp_count)) {
201 -+ regs->ret = regs->lp_start;
202 -+ regs->lp_count--;
203 -+ }
204 - }
205 -
206 - return 0;
207 -diff --git a/arch/arm/include/asm/jump_label.h b/arch/arm/include/asm/jump_label.h
208 -index bfc198c..863c892 100644
209 ---- a/arch/arm/include/asm/jump_label.h
210 -+++ b/arch/arm/include/asm/jump_label.h
211 -@@ -16,7 +16,7 @@
212 -
213 - static __always_inline bool arch_static_branch(struct static_key *key)
214 - {
215 -- asm goto("1:\n\t"
216 -+ asm_volatile_goto("1:\n\t"
217 - JUMP_LABEL_NOP "\n\t"
218 - ".pushsection __jump_table, \"aw\"\n\t"
219 - ".word 1b, %l[l_yes], %c0\n\t"
220 -diff --git a/arch/mips/include/asm/jump_label.h b/arch/mips/include/asm/jump_label.h
221 -index 4d6d77e..e194f95 100644
222 ---- a/arch/mips/include/asm/jump_label.h
223 -+++ b/arch/mips/include/asm/jump_label.h
224 -@@ -22,7 +22,7 @@
225 -
226 - static __always_inline bool arch_static_branch(struct static_key *key)
227 - {
228 -- asm goto("1:\tnop\n\t"
229 -+ asm_volatile_goto("1:\tnop\n\t"
230 - "nop\n\t"
231 - ".pushsection __jump_table, \"aw\"\n\t"
232 - WORD_INSN " 1b, %l[l_yes], %0\n\t"
233 -diff --git a/arch/mips/kernel/octeon_switch.S b/arch/mips/kernel/octeon_switch.S
234 -index 4204d76..029e002 100644
235 ---- a/arch/mips/kernel/octeon_switch.S
236 -+++ b/arch/mips/kernel/octeon_switch.S
237 -@@ -73,7 +73,7 @@
238 - 3:
239 -
240 - #if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP)
241 -- PTR_L t8, __stack_chk_guard
242 -+ PTR_LA t8, __stack_chk_guard
243 - LONG_L t9, TASK_STACK_CANARY(a1)
244 - LONG_S t9, 0(t8)
245 - #endif
246 -diff --git a/arch/mips/kernel/r2300_switch.S b/arch/mips/kernel/r2300_switch.S
247 -index 38af83f..20b7b04 100644
248 ---- a/arch/mips/kernel/r2300_switch.S
249 -+++ b/arch/mips/kernel/r2300_switch.S
250 -@@ -67,7 +67,7 @@ LEAF(resume)
251 - 1:
252 -
253 - #if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP)
254 -- PTR_L t8, __stack_chk_guard
255 -+ PTR_LA t8, __stack_chk_guard
256 - LONG_L t9, TASK_STACK_CANARY(a1)
257 - LONG_S t9, 0(t8)
258 - #endif
259 -diff --git a/arch/mips/kernel/r4k_switch.S b/arch/mips/kernel/r4k_switch.S
260 -index 921238a..078de5e 100644
261 ---- a/arch/mips/kernel/r4k_switch.S
262 -+++ b/arch/mips/kernel/r4k_switch.S
263 -@@ -69,7 +69,7 @@
264 - 1:
265 -
266 - #if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP)
267 -- PTR_L t8, __stack_chk_guard
268 -+ PTR_LA t8, __stack_chk_guard
269 - LONG_L t9, TASK_STACK_CANARY(a1)
270 - LONG_S t9, 0(t8)
271 - #endif
272 -diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c
273 -index 04e47c6..b3f87a3 100644
274 ---- a/arch/parisc/kernel/traps.c
275 -+++ b/arch/parisc/kernel/traps.c
276 -@@ -805,14 +805,14 @@ void notrace handle_interruption(int code, struct pt_regs *regs)
277 - else {
278 -
279 - /*
280 -- * The kernel should never fault on its own address space.
281 -+ * The kernel should never fault on its own address space,
282 -+ * unless pagefault_disable() was called before.
283 - */
284 -
285 -- if (fault_space == 0)
286 -+ if (fault_space == 0 && !in_atomic())
287 - {
288 - pdc_chassis_send_status(PDC_CHASSIS_DIRECT_PANIC);
289 - parisc_terminate("Kernel Fault", regs, code, fault_address);
290 --
291 - }
292 - }
293 -
294 -diff --git a/arch/powerpc/include/asm/jump_label.h b/arch/powerpc/include/asm/jump_label.h
295 -index ae098c4..f016bb6 100644
296 ---- a/arch/powerpc/include/asm/jump_label.h
297 -+++ b/arch/powerpc/include/asm/jump_label.h
298 -@@ -19,7 +19,7 @@
299 -
300 - static __always_inline bool arch_static_branch(struct static_key *key)
301 - {
302 -- asm goto("1:\n\t"
303 -+ asm_volatile_goto("1:\n\t"
304 - "nop\n\t"
305 - ".pushsection __jump_table, \"aw\"\n\t"
306 - JUMP_ENTRY_TYPE "1b, %l[l_yes], %c0\n\t"
307 -diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
308 -index b02f91e..7bcd4d6 100644
309 ---- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
310 -+++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
311 -@@ -1054,7 +1054,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
312 - BEGIN_FTR_SECTION
313 - mfspr r8, SPRN_DSCR
314 - ld r7, HSTATE_DSCR(r13)
315 -- std r8, VCPU_DSCR(r7)
316 -+ std r8, VCPU_DSCR(r9)
317 - mtspr SPRN_DSCR, r7
318 - END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
319 -
320 -diff --git a/arch/s390/include/asm/jump_label.h b/arch/s390/include/asm/jump_label.h
321 -index 6c32190..346b1c8 100644
322 ---- a/arch/s390/include/asm/jump_label.h
323 -+++ b/arch/s390/include/asm/jump_label.h
324 -@@ -15,7 +15,7 @@
325 -
326 - static __always_inline bool arch_static_branch(struct static_key *key)
327 - {
328 -- asm goto("0: brcl 0,0\n"
329 -+ asm_volatile_goto("0: brcl 0,0\n"
330 - ".pushsection __jump_table, \"aw\"\n"
331 - ASM_ALIGN "\n"
332 - ASM_PTR " 0b, %l[label], %0\n"
333 -diff --git a/arch/sparc/include/asm/jump_label.h b/arch/sparc/include/asm/jump_label.h
334 -index 5080d16..ec2e2e2 100644
335 ---- a/arch/sparc/include/asm/jump_label.h
336 -+++ b/arch/sparc/include/asm/jump_label.h
337 -@@ -9,7 +9,7 @@
338 -
339 - static __always_inline bool arch_static_branch(struct static_key *key)
340 - {
341 -- asm goto("1:\n\t"
342 -+ asm_volatile_goto("1:\n\t"
343 - "nop\n\t"
344 - "nop\n\t"
345 - ".pushsection __jump_table, \"aw\"\n\t"
346 -diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
347 -index 47538a6..7290585 100644
348 ---- a/arch/x86/include/asm/cpufeature.h
349 -+++ b/arch/x86/include/asm/cpufeature.h
350 -@@ -373,7 +373,7 @@ static __always_inline __pure bool __static_cpu_has(u16 bit)
351 - * Catch too early usage of this before alternatives
352 - * have run.
353 - */
354 -- asm goto("1: jmp %l[t_warn]\n"
355 -+ asm_volatile_goto("1: jmp %l[t_warn]\n"
356 - "2:\n"
357 - ".section .altinstructions,\"a\"\n"
358 - " .long 1b - .\n"
359 -@@ -386,7 +386,7 @@ static __always_inline __pure bool __static_cpu_has(u16 bit)
360 - : : "i" (X86_FEATURE_ALWAYS) : : t_warn);
361 - #endif
362 -
363 -- asm goto("1: jmp %l[t_no]\n"
364 -+ asm_volatile_goto("1: jmp %l[t_no]\n"
365 - "2:\n"
366 - ".section .altinstructions,\"a\"\n"
367 - " .long 1b - .\n"
368 -@@ -448,7 +448,7 @@ static __always_inline __pure bool _static_cpu_has_safe(u16 bit)
369 - * have. Thus, we force the jump to the widest, 4-byte, signed relative
370 - * offset even though the last would often fit in less bytes.
371 - */
372 -- asm goto("1: .byte 0xe9\n .long %l[t_dynamic] - 2f\n"
373 -+ asm_volatile_goto("1: .byte 0xe9\n .long %l[t_dynamic] - 2f\n"
374 - "2:\n"
375 - ".section .altinstructions,\"a\"\n"
376 - " .long 1b - .\n" /* src offset */
377 -diff --git a/arch/x86/include/asm/e820.h b/arch/x86/include/asm/e820.h
378 -index cccd07f..779c2ef 100644
379 ---- a/arch/x86/include/asm/e820.h
380 -+++ b/arch/x86/include/asm/e820.h
381 -@@ -29,7 +29,7 @@ extern void e820_setup_gap(void);
382 - extern int e820_search_gap(unsigned long *gapstart, unsigned long *gapsize,
383 - unsigned long start_addr, unsigned long long end_addr);
384 - struct setup_data;
385 --extern void parse_e820_ext(struct setup_data *data);
386 -+extern void parse_e820_ext(u64 phys_addr, u32 data_len);
387 -
388 - #if defined(CONFIG_X86_64) || \
389 - (defined(CONFIG_X86_32) && defined(CONFIG_HIBERNATION))
390 -diff --git a/arch/x86/include/asm/jump_label.h b/arch/x86/include/asm/jump_label.h
391 -index 3a16c14..0297669 100644
392 ---- a/arch/x86/include/asm/jump_label.h
393 -+++ b/arch/x86/include/asm/jump_label.h
394 -@@ -13,7 +13,7 @@
395 -
396 - static __always_inline bool arch_static_branch(struct static_key *key)
397 - {
398 -- asm goto("1:"
399 -+ asm_volatile_goto("1:"
400 - STATIC_KEY_INITIAL_NOP
401 - ".pushsection __jump_table, \"aw\" \n\t"
402 - _ASM_ALIGN "\n\t"
403 -diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c
404 -index d32abea..174da5f 100644
405 ---- a/arch/x86/kernel/e820.c
406 -+++ b/arch/x86/kernel/e820.c
407 -@@ -658,15 +658,18 @@ __init void e820_setup_gap(void)
408 - * boot_params.e820_map, others are passed via SETUP_E820_EXT node of
409 - * linked list of struct setup_data, which is parsed here.
410 - */
411 --void __init parse_e820_ext(struct setup_data *sdata)
412 -+void __init parse_e820_ext(u64 phys_addr, u32 data_len)
413 - {
414 - int entries;
415 - struct e820entry *extmap;
416 -+ struct setup_data *sdata;
417 -
418 -+ sdata = early_memremap(phys_addr, data_len);
419 - entries = sdata->len / sizeof(struct e820entry);
420 - extmap = (struct e820entry *)(sdata->data);
421 - __append_e820_map(extmap, entries);
422 - sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
423 -+ early_iounmap(sdata, data_len);
424 - printk(KERN_INFO "e820: extended physical RAM map:\n");
425 - e820_print_map("extended");
426 - }
427 -diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
428 -index f8ec578..234e1e3 100644
429 ---- a/arch/x86/kernel/setup.c
430 -+++ b/arch/x86/kernel/setup.c
431 -@@ -426,25 +426,23 @@ static void __init reserve_initrd(void)
432 - static void __init parse_setup_data(void)
433 - {
434 - struct setup_data *data;
435 -- u64 pa_data;
436 -+ u64 pa_data, pa_next;
437 -
438 - pa_data = boot_params.hdr.setup_data;
439 - while (pa_data) {
440 -- u32 data_len, map_len;
441 -+ u32 data_len, map_len, data_type;
442 -
443 - map_len = max(PAGE_SIZE - (pa_data & ~PAGE_MASK),
444 - (u64)sizeof(struct setup_data));
445 - data = early_memremap(pa_data, map_len);
446 - data_len = data->len + sizeof(struct setup_data);
447 -- if (data_len > map_len) {
448 -- early_iounmap(data, map_len);
449 -- data = early_memremap(pa_data, data_len);
450 -- map_len = data_len;
451 -- }
452 -+ data_type = data->type;
453 -+ pa_next = data->next;
454 -+ early_iounmap(data, map_len);
455 -
456 -- switch (data->type) {
457 -+ switch (data_type) {
458 - case SETUP_E820_EXT:
459 -- parse_e820_ext(data);
460 -+ parse_e820_ext(pa_data, data_len);
461 - break;
462 - case SETUP_DTB:
463 - add_dtb(pa_data);
464 -@@ -452,8 +450,7 @@ static void __init parse_setup_data(void)
465 - default:
466 - break;
467 - }
468 -- pa_data = data->next;
469 -- early_iounmap(data, map_len);
470 -+ pa_data = pa_next;
471 - }
472 - }
473 -
474 -diff --git a/drivers/char/random.c b/drivers/char/random.c
475 -index 0d91fe5..92e6c67 100644
476 ---- a/drivers/char/random.c
477 -+++ b/drivers/char/random.c
478 -@@ -1462,12 +1462,11 @@ struct ctl_table random_table[] = {
479 -
480 - static u32 random_int_secret[MD5_MESSAGE_BYTES / 4] ____cacheline_aligned;
481 -
482 --static int __init random_int_secret_init(void)
483 -+int random_int_secret_init(void)
484 - {
485 - get_random_bytes(random_int_secret, sizeof(random_int_secret));
486 - return 0;
487 - }
488 --late_initcall(random_int_secret_init);
489 -
490 - /*
491 - * Get a random word for internal kernel use only. Similar to urandom but
492 -diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
493 -index 342f1f3..c42d31c 100644
494 ---- a/drivers/gpu/drm/i915/i915_reg.h
495 -+++ b/drivers/gpu/drm/i915/i915_reg.h
496 -@@ -3791,6 +3791,9 @@
497 - #define GEN7_SQ_CHICKEN_MBCUNIT_CONFIG 0x9030
498 - #define GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB (1<<11)
499 -
500 -+#define HSW_SCRATCH1 0xb038
501 -+#define HSW_SCRATCH1_L3_DATA_ATOMICS_DISABLE (1<<27)
502 -+
503 - #define HSW_FUSE_STRAP 0x42014
504 - #define HSW_CDCLK_LIMIT (1 << 24)
505 -
506 -@@ -4624,6 +4627,9 @@
507 - #define GEN7_ROW_CHICKEN2_GT2 0xf4f4
508 - #define DOP_CLOCK_GATING_DISABLE (1<<0)
509 -
510 -+#define HSW_ROW_CHICKEN3 0xe49c
511 -+#define HSW_ROW_CHICKEN3_L3_GLOBAL_ATOMICS_DISABLE (1 << 6)
512 -+
513 - #define G4X_AUD_VID_DID (dev_priv->info->display_mmio_offset + 0x62020)
514 - #define INTEL_AUDIO_DEVCL 0x808629FB
515 - #define INTEL_AUDIO_DEVBLC 0x80862801
516 -diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
517 -index 7fc8a76..90a7c17 100644
518 ---- a/drivers/gpu/drm/i915/intel_display.c
519 -+++ b/drivers/gpu/drm/i915/intel_display.c
520 -@@ -3890,8 +3890,6 @@ static void intel_connector_check_state(struct intel_connector *connector)
521 - * consider. */
522 - void intel_connector_dpms(struct drm_connector *connector, int mode)
523 - {
524 -- struct intel_encoder *encoder = intel_attached_encoder(connector);
525 --
526 - /* All the simple cases only support two dpms states. */
527 - if (mode != DRM_MODE_DPMS_ON)
528 - mode = DRM_MODE_DPMS_OFF;
529 -@@ -3902,10 +3900,8 @@ void intel_connector_dpms(struct drm_connector *connector, int mode)
530 - connector->dpms = mode;
531 -
532 - /* Only need to change hw state when actually enabled */
533 -- if (encoder->base.crtc)
534 -- intel_encoder_dpms(encoder, mode);
535 -- else
536 -- WARN_ON(encoder->connectors_active != false);
537 -+ if (connector->encoder)
538 -+ intel_encoder_dpms(to_intel_encoder(connector->encoder), mode);
539 -
540 - intel_modeset_check_state(connector->dev);
541 - }
542 -diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
543 -index b0e4a0b..cad0482 100644
544 ---- a/drivers/gpu/drm/i915/intel_pm.c
545 -+++ b/drivers/gpu/drm/i915/intel_pm.c
546 -@@ -3603,8 +3603,6 @@ static void valleyview_enable_rps(struct drm_device *dev)
547 - dev_priv->rps.rpe_delay),
548 - dev_priv->rps.rpe_delay);
549 -
550 -- INIT_DELAYED_WORK(&dev_priv->rps.vlv_work, vlv_rps_timer_work);
551 --
552 - valleyview_set_rps(dev_priv->dev, dev_priv->rps.rpe_delay);
553 -
554 - /* requires MSI enabled */
555 -@@ -4699,6 +4697,11 @@ static void haswell_init_clock_gating(struct drm_device *dev)
556 - I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER,
557 - GEN7_WA_L3_CHICKEN_MODE);
558 -
559 -+ /* L3 caching of data atomics doesn't work -- disable it. */
560 -+ I915_WRITE(HSW_SCRATCH1, HSW_SCRATCH1_L3_DATA_ATOMICS_DISABLE);
561 -+ I915_WRITE(HSW_ROW_CHICKEN3,
562 -+ _MASKED_BIT_ENABLE(HSW_ROW_CHICKEN3_L3_GLOBAL_ATOMICS_DISABLE));
563 -+
564 - /* This is required by WaCatErrorRejectionIssue:hsw */
565 - I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
566 - I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
567 -@@ -5562,6 +5565,8 @@ void intel_pm_init(struct drm_device *dev)
568 -
569 - INIT_DELAYED_WORK(&dev_priv->rps.delayed_resume_work,
570 - intel_gen6_powersave_work);
571 -+
572 -+ INIT_DELAYED_WORK(&dev_priv->rps.vlv_work, vlv_rps_timer_work);
573 - }
574 -
575 - int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u8 mbox, u32 *val)
576 -diff --git a/drivers/gpu/drm/radeon/btc_dpm.c b/drivers/gpu/drm/radeon/btc_dpm.c
577 -index 084e694..639b9aa 100644
578 ---- a/drivers/gpu/drm/radeon/btc_dpm.c
579 -+++ b/drivers/gpu/drm/radeon/btc_dpm.c
580 -@@ -1913,7 +1913,7 @@ static int btc_set_mc_special_registers(struct radeon_device *rdev,
581 - }
582 - j++;
583 -
584 -- if (j > SMC_EVERGREEN_MC_REGISTER_ARRAY_SIZE)
585 -+ if (j >= SMC_EVERGREEN_MC_REGISTER_ARRAY_SIZE)
586 - return -EINVAL;
587 -
588 - tmp = RREG32(MC_PMG_CMD_MRS);
589 -@@ -1928,7 +1928,7 @@ static int btc_set_mc_special_registers(struct radeon_device *rdev,
590 - }
591 - j++;
592 -
593 -- if (j > SMC_EVERGREEN_MC_REGISTER_ARRAY_SIZE)
594 -+ if (j >= SMC_EVERGREEN_MC_REGISTER_ARRAY_SIZE)
595 - return -EINVAL;
596 - break;
597 - case MC_SEQ_RESERVE_M >> 2:
598 -@@ -1942,7 +1942,7 @@ static int btc_set_mc_special_registers(struct radeon_device *rdev,
599 - }
600 - j++;
601 -
602 -- if (j > SMC_EVERGREEN_MC_REGISTER_ARRAY_SIZE)
603 -+ if (j >= SMC_EVERGREEN_MC_REGISTER_ARRAY_SIZE)
604 - return -EINVAL;
605 - break;
606 - default:
607 -diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
608 -index 94dab1e..8307883 100644
609 ---- a/drivers/gpu/drm/radeon/evergreen.c
610 -+++ b/drivers/gpu/drm/radeon/evergreen.c
611 -@@ -3126,7 +3126,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
612 - rdev->config.evergreen.sx_max_export_size = 256;
613 - rdev->config.evergreen.sx_max_export_pos_size = 64;
614 - rdev->config.evergreen.sx_max_export_smx_size = 192;
615 -- rdev->config.evergreen.max_hw_contexts = 8;
616 -+ rdev->config.evergreen.max_hw_contexts = 4;
617 - rdev->config.evergreen.sq_num_cf_insts = 2;
618 -
619 - rdev->config.evergreen.sc_prim_fifo_size = 0x40;
620 -diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h
621 -index 20fd17c..6be00c9 100644
622 ---- a/drivers/gpu/drm/radeon/evergreend.h
623 -+++ b/drivers/gpu/drm/radeon/evergreend.h
624 -@@ -1494,7 +1494,7 @@
625 - * 6. COMMAND [29:22] | BYTE_COUNT [20:0]
626 - */
627 - # define PACKET3_CP_DMA_DST_SEL(x) ((x) << 20)
628 -- /* 0 - SRC_ADDR
629 -+ /* 0 - DST_ADDR
630 - * 1 - GDS
631 - */
632 - # define PACKET3_CP_DMA_ENGINE(x) ((x) << 27)
633 -@@ -1509,7 +1509,7 @@
634 - # define PACKET3_CP_DMA_CP_SYNC (1 << 31)
635 - /* COMMAND */
636 - # define PACKET3_CP_DMA_DIS_WC (1 << 21)
637 --# define PACKET3_CP_DMA_CMD_SRC_SWAP(x) ((x) << 23)
638 -+# define PACKET3_CP_DMA_CMD_SRC_SWAP(x) ((x) << 22)
639 - /* 0 - none
640 - * 1 - 8 in 16
641 - * 2 - 8 in 32
642 -diff --git a/drivers/gpu/drm/radeon/r600d.h b/drivers/gpu/drm/radeon/r600d.h
643 -index 7c78083..d079cb1 100644
644 ---- a/drivers/gpu/drm/radeon/r600d.h
645 -+++ b/drivers/gpu/drm/radeon/r600d.h
646 -@@ -1487,7 +1487,7 @@
647 - */
648 - # define PACKET3_CP_DMA_CP_SYNC (1 << 31)
649 - /* COMMAND */
650 --# define PACKET3_CP_DMA_CMD_SRC_SWAP(x) ((x) << 23)
651 -+# define PACKET3_CP_DMA_CMD_SRC_SWAP(x) ((x) << 22)
652 - /* 0 - none
653 - * 1 - 8 in 16
654 - * 2 - 8 in 32
655 -diff --git a/drivers/gpu/drm/radeon/radeon_test.c b/drivers/gpu/drm/radeon/radeon_test.c
656 -index f4d6bce..12e8099 100644
657 ---- a/drivers/gpu/drm/radeon/radeon_test.c
658 -+++ b/drivers/gpu/drm/radeon/radeon_test.c
659 -@@ -36,8 +36,8 @@ static void radeon_do_test_moves(struct radeon_device *rdev, int flag)
660 - struct radeon_bo *vram_obj = NULL;
661 - struct radeon_bo **gtt_obj = NULL;
662 - uint64_t gtt_addr, vram_addr;
663 -- unsigned i, n, size;
664 -- int r, ring;
665 -+ unsigned n, size;
666 -+ int i, r, ring;
667 -
668 - switch (flag) {
669 - case RADEON_TEST_COPY_DMA:
670 -diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c
671 -index 1cfba39..1c23b61 100644
672 ---- a/drivers/gpu/drm/radeon/si_dpm.c
673 -+++ b/drivers/gpu/drm/radeon/si_dpm.c
674 -@@ -5174,7 +5174,7 @@ static int si_set_mc_special_registers(struct radeon_device *rdev,
675 - table->mc_reg_table_entry[k].mc_data[j] |= 0x100;
676 - }
677 - j++;
678 -- if (j > SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE)
679 -+ if (j >= SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE)
680 - return -EINVAL;
681 -
682 - if (!pi->mem_gddr5) {
683 -@@ -5184,7 +5184,7 @@ static int si_set_mc_special_registers(struct radeon_device *rdev,
684 - table->mc_reg_table_entry[k].mc_data[j] =
685 - (table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16;
686 - j++;
687 -- if (j > SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE)
688 -+ if (j >= SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE)
689 - return -EINVAL;
690 - }
691 - break;
692 -@@ -5197,7 +5197,7 @@ static int si_set_mc_special_registers(struct radeon_device *rdev,
693 - (temp_reg & 0xffff0000) |
694 - (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
695 - j++;
696 -- if (j > SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE)
697 -+ if (j >= SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE)
698 - return -EINVAL;
699 - break;
700 - default:
701 -diff --git a/drivers/gpu/drm/radeon/sid.h b/drivers/gpu/drm/radeon/sid.h
702 -index 2010d6b..a75d25a 100644
703 ---- a/drivers/gpu/drm/radeon/sid.h
704 -+++ b/drivers/gpu/drm/radeon/sid.h
705 -@@ -1490,7 +1490,7 @@
706 - * 6. COMMAND [30:21] | BYTE_COUNT [20:0]
707 - */
708 - # define PACKET3_CP_DMA_DST_SEL(x) ((x) << 20)
709 -- /* 0 - SRC_ADDR
710 -+ /* 0 - DST_ADDR
711 - * 1 - GDS
712 - */
713 - # define PACKET3_CP_DMA_ENGINE(x) ((x) << 27)
714 -@@ -1505,7 +1505,7 @@
715 - # define PACKET3_CP_DMA_CP_SYNC (1 << 31)
716 - /* COMMAND */
717 - # define PACKET3_CP_DMA_DIS_WC (1 << 21)
718 --# define PACKET3_CP_DMA_CMD_SRC_SWAP(x) ((x) << 23)
719 -+# define PACKET3_CP_DMA_CMD_SRC_SWAP(x) ((x) << 22)
720 - /* 0 - none
721 - * 1 - 8 in 16
722 - * 2 - 8 in 32
723 -diff --git a/drivers/hwmon/applesmc.c b/drivers/hwmon/applesmc.c
724 -index 98814d1..3288f13 100644
725 ---- a/drivers/hwmon/applesmc.c
726 -+++ b/drivers/hwmon/applesmc.c
727 -@@ -230,6 +230,7 @@ static int send_argument(const char *key)
728 -
729 - static int read_smc(u8 cmd, const char *key, u8 *buffer, u8 len)
730 - {
731 -+ u8 status, data = 0;
732 - int i;
733 -
734 - if (send_command(cmd) || send_argument(key)) {
735 -@@ -237,6 +238,7 @@ static int read_smc(u8 cmd, const char *key, u8 *buffer, u8 len)
736 - return -EIO;
737 - }
738 -
739 -+ /* This has no effect on newer (2012) SMCs */
740 - if (send_byte(len, APPLESMC_DATA_PORT)) {
741 - pr_warn("%.4s: read len fail\n", key);
742 - return -EIO;
743 -@@ -250,6 +252,17 @@ static int read_smc(u8 cmd, const char *key, u8 *buffer, u8 len)
744 - buffer[i] = inb(APPLESMC_DATA_PORT);
745 - }
746 -
747 -+ /* Read the data port until bit0 is cleared */
748 -+ for (i = 0; i < 16; i++) {
749 -+ udelay(APPLESMC_MIN_WAIT);
750 -+ status = inb(APPLESMC_CMD_PORT);
751 -+ if (!(status & 0x01))
752 -+ break;
753 -+ data = inb(APPLESMC_DATA_PORT);
754 -+ }
755 -+ if (i)
756 -+ pr_warn("flushed %d bytes, last value is: %d\n", i, data);
757 -+
758 - return 0;
759 - }
760 -
761 -diff --git a/drivers/i2c/busses/i2c-omap.c b/drivers/i2c/busses/i2c-omap.c
762 -index 142b694d..e6b8dcd 100644
763 ---- a/drivers/i2c/busses/i2c-omap.c
764 -+++ b/drivers/i2c/busses/i2c-omap.c
765 -@@ -944,6 +944,9 @@ omap_i2c_isr_thread(int this_irq, void *dev_id)
766 - /*
767 - * ProDB0017052: Clear ARDY bit twice
768 - */
769 -+ if (stat & OMAP_I2C_STAT_ARDY)
770 -+ omap_i2c_ack_stat(dev, OMAP_I2C_STAT_ARDY);
771 -+
772 - if (stat & (OMAP_I2C_STAT_ARDY | OMAP_I2C_STAT_NACK |
773 - OMAP_I2C_STAT_AL)) {
774 - omap_i2c_ack_stat(dev, (OMAP_I2C_STAT_RRDY |
775 -diff --git a/drivers/watchdog/kempld_wdt.c b/drivers/watchdog/kempld_wdt.c
776 -index 491419e..5c3d4df 100644
777 ---- a/drivers/watchdog/kempld_wdt.c
778 -+++ b/drivers/watchdog/kempld_wdt.c
779 -@@ -35,7 +35,7 @@
780 - #define KEMPLD_WDT_STAGE_TIMEOUT(x) (0x1b + (x) * 4)
781 - #define KEMPLD_WDT_STAGE_CFG(x) (0x18 + (x))
782 - #define STAGE_CFG_GET_PRESCALER(x) (((x) & 0x30) >> 4)
783 --#define STAGE_CFG_SET_PRESCALER(x) (((x) & 0x30) << 4)
784 -+#define STAGE_CFG_SET_PRESCALER(x) (((x) & 0x3) << 4)
785 - #define STAGE_CFG_PRESCALER_MASK 0x30
786 - #define STAGE_CFG_ACTION_MASK 0x7
787 - #define STAGE_CFG_ASSERT (1 << 3)
788 -diff --git a/drivers/watchdog/ts72xx_wdt.c b/drivers/watchdog/ts72xx_wdt.c
789 -index 4da59b4..381999c 100644
790 ---- a/drivers/watchdog/ts72xx_wdt.c
791 -+++ b/drivers/watchdog/ts72xx_wdt.c
792 -@@ -310,7 +310,8 @@ static long ts72xx_wdt_ioctl(struct file *file, unsigned int cmd,
793 -
794 - case WDIOC_GETSTATUS:
795 - case WDIOC_GETBOOTSTATUS:
796 -- return put_user(0, p);
797 -+ error = put_user(0, p);
798 -+ break;
799 -
800 - case WDIOC_KEEPALIVE:
801 - ts72xx_wdt_kick(wdt);
802 -diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
803 -index d3280b2..8220491 100644
804 ---- a/fs/btrfs/inode.c
805 -+++ b/fs/btrfs/inode.c
806 -@@ -8036,7 +8036,7 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
807 -
808 -
809 - /* check for collisions, even if the name isn't there */
810 -- ret = btrfs_check_dir_item_collision(root, new_dir->i_ino,
811 -+ ret = btrfs_check_dir_item_collision(dest, new_dir->i_ino,
812 - new_dentry->d_name.name,
813 - new_dentry->d_name.len);
814 -
815 -diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
816 -index c081e34..03e9beb 100644
817 ---- a/fs/ext4/xattr.c
818 -+++ b/fs/ext4/xattr.c
819 -@@ -1350,6 +1350,8 @@ retry:
820 - s_min_extra_isize) {
821 - tried_min_extra_isize++;
822 - new_extra_isize = s_min_extra_isize;
823 -+ kfree(is); is = NULL;
824 -+ kfree(bs); bs = NULL;
825 - goto retry;
826 - }
827 - error = -1;
828 -diff --git a/fs/statfs.c b/fs/statfs.c
829 -index c219e733..083dc0a 100644
830 ---- a/fs/statfs.c
831 -+++ b/fs/statfs.c
832 -@@ -94,7 +94,7 @@ retry:
833 -
834 - int fd_statfs(int fd, struct kstatfs *st)
835 - {
836 -- struct fd f = fdget(fd);
837 -+ struct fd f = fdget_raw(fd);
838 - int error = -EBADF;
839 - if (f.file) {
840 - error = vfs_statfs(&f.file->f_path, st);
841 -diff --git a/include/linux/compiler-gcc4.h b/include/linux/compiler-gcc4.h
842 -index 842de22..ded4299 100644
843 ---- a/include/linux/compiler-gcc4.h
844 -+++ b/include/linux/compiler-gcc4.h
845 -@@ -65,6 +65,21 @@
846 - #define __visible __attribute__((externally_visible))
847 - #endif
848 -
849 -+/*
850 -+ * GCC 'asm goto' miscompiles certain code sequences:
851 -+ *
852 -+ * http://gcc.gnu.org/bugzilla/show_bug.cgi?id=58670
853 -+ *
854 -+ * Work it around via a compiler barrier quirk suggested by Jakub Jelinek.
855 -+ * Fixed in GCC 4.8.2 and later versions.
856 -+ *
857 -+ * (asm goto is automatically volatile - the naming reflects this.)
858 -+ */
859 -+#if GCC_VERSION <= 40801
860 -+# define asm_volatile_goto(x...) do { asm goto(x); asm (""); } while (0)
861 -+#else
862 -+# define asm_volatile_goto(x...) do { asm goto(x); } while (0)
863 -+#endif
864 -
865 - #ifdef CONFIG_ARCH_USE_BUILTIN_BSWAP
866 - #if GCC_VERSION >= 40400
867 -diff --git a/include/linux/ipc_namespace.h b/include/linux/ipc_namespace.h
868 -index c4d870b..19c19a5 100644
869 ---- a/include/linux/ipc_namespace.h
870 -+++ b/include/linux/ipc_namespace.h
871 -@@ -22,7 +22,7 @@ struct ipc_ids {
872 - int in_use;
873 - unsigned short seq;
874 - unsigned short seq_max;
875 -- struct rw_semaphore rw_mutex;
876 -+ struct rw_semaphore rwsem;
877 - struct idr ipcs_idr;
878 - int next_id;
879 - };
880 -diff --git a/include/linux/random.h b/include/linux/random.h
881 -index 3b9377d..6312dd9 100644
882 ---- a/include/linux/random.h
883 -+++ b/include/linux/random.h
884 -@@ -17,6 +17,7 @@ extern void add_interrupt_randomness(int irq, int irq_flags);
885 - extern void get_random_bytes(void *buf, int nbytes);
886 - extern void get_random_bytes_arch(void *buf, int nbytes);
887 - void generate_random_uuid(unsigned char uuid_out[16]);
888 -+extern int random_int_secret_init(void);
889 -
890 - #ifndef MODULE
891 - extern const struct file_operations random_fops, urandom_fops;
892 -diff --git a/init/main.c b/init/main.c
893 -index d03d2ec..586cd33 100644
894 ---- a/init/main.c
895 -+++ b/init/main.c
896 -@@ -75,6 +75,7 @@
897 - #include <linux/blkdev.h>
898 - #include <linux/elevator.h>
899 - #include <linux/sched_clock.h>
900 -+#include <linux/random.h>
901 -
902 - #include <asm/io.h>
903 - #include <asm/bugs.h>
904 -@@ -778,6 +779,7 @@ static void __init do_basic_setup(void)
905 - do_ctors();
906 - usermodehelper_enable();
907 - do_initcalls();
908 -+ random_int_secret_init();
909 - }
910 -
911 - static void __init do_pre_smp_initcalls(void)
912 -diff --git a/ipc/msg.c b/ipc/msg.c
913 -index a877c16..558aa91 100644
914 ---- a/ipc/msg.c
915 -+++ b/ipc/msg.c
916 -@@ -70,8 +70,6 @@ struct msg_sender {
917 -
918 - #define msg_ids(ns) ((ns)->ids[IPC_MSG_IDS])
919 -
920 --#define msg_unlock(msq) ipc_unlock(&(msq)->q_perm)
921 --
922 - static void freeque(struct ipc_namespace *, struct kern_ipc_perm *);
923 - static int newque(struct ipc_namespace *, struct ipc_params *);
924 - #ifdef CONFIG_PROC_FS
925 -@@ -181,7 +179,7 @@ static void msg_rcu_free(struct rcu_head *head)
926 - * @ns: namespace
927 - * @params: ptr to the structure that contains the key and msgflg
928 - *
929 -- * Called with msg_ids.rw_mutex held (writer)
930 -+ * Called with msg_ids.rwsem held (writer)
931 - */
932 - static int newque(struct ipc_namespace *ns, struct ipc_params *params)
933 - {
934 -@@ -267,8 +265,8 @@ static void expunge_all(struct msg_queue *msq, int res)
935 - * removes the message queue from message queue ID IDR, and cleans up all the
936 - * messages associated with this queue.
937 - *
938 -- * msg_ids.rw_mutex (writer) and the spinlock for this message queue are held
939 -- * before freeque() is called. msg_ids.rw_mutex remains locked on exit.
940 -+ * msg_ids.rwsem (writer) and the spinlock for this message queue are held
941 -+ * before freeque() is called. msg_ids.rwsem remains locked on exit.
942 - */
943 - static void freeque(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp)
944 - {
945 -@@ -278,7 +276,8 @@ static void freeque(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp)
946 - expunge_all(msq, -EIDRM);
947 - ss_wakeup(&msq->q_senders, 1);
948 - msg_rmid(ns, msq);
949 -- msg_unlock(msq);
950 -+ ipc_unlock_object(&msq->q_perm);
951 -+ rcu_read_unlock();
952 -
953 - list_for_each_entry_safe(msg, t, &msq->q_messages, m_list) {
954 - atomic_dec(&ns->msg_hdrs);
955 -@@ -289,7 +288,7 @@ static void freeque(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp)
956 - }
957 -
958 - /*
959 -- * Called with msg_ids.rw_mutex and ipcp locked.
960 -+ * Called with msg_ids.rwsem and ipcp locked.
961 - */
962 - static inline int msg_security(struct kern_ipc_perm *ipcp, int msgflg)
963 - {
964 -@@ -393,9 +392,9 @@ copy_msqid_from_user(struct msqid64_ds *out, void __user *buf, int version)
965 - }
966 -
967 - /*
968 -- * This function handles some msgctl commands which require the rw_mutex
969 -+ * This function handles some msgctl commands which require the rwsem
970 - * to be held in write mode.
971 -- * NOTE: no locks must be held, the rw_mutex is taken inside this function.
972 -+ * NOTE: no locks must be held, the rwsem is taken inside this function.
973 - */
974 - static int msgctl_down(struct ipc_namespace *ns, int msqid, int cmd,
975 - struct msqid_ds __user *buf, int version)
976 -@@ -410,7 +409,7 @@ static int msgctl_down(struct ipc_namespace *ns, int msqid, int cmd,
977 - return -EFAULT;
978 - }
979 -
980 -- down_write(&msg_ids(ns).rw_mutex);
981 -+ down_write(&msg_ids(ns).rwsem);
982 - rcu_read_lock();
983 -
984 - ipcp = ipcctl_pre_down_nolock(ns, &msg_ids(ns), msqid, cmd,
985 -@@ -466,7 +465,7 @@ out_unlock0:
986 - out_unlock1:
987 - rcu_read_unlock();
988 - out_up:
989 -- up_write(&msg_ids(ns).rw_mutex);
990 -+ up_write(&msg_ids(ns).rwsem);
991 - return err;
992 - }
993 -
994 -@@ -501,7 +500,7 @@ static int msgctl_nolock(struct ipc_namespace *ns, int msqid,
995 - msginfo.msgmnb = ns->msg_ctlmnb;
996 - msginfo.msgssz = MSGSSZ;
997 - msginfo.msgseg = MSGSEG;
998 -- down_read(&msg_ids(ns).rw_mutex);
999 -+ down_read(&msg_ids(ns).rwsem);
1000 - if (cmd == MSG_INFO) {
1001 - msginfo.msgpool = msg_ids(ns).in_use;
1002 - msginfo.msgmap = atomic_read(&ns->msg_hdrs);
1003 -@@ -512,7 +511,7 @@ static int msgctl_nolock(struct ipc_namespace *ns, int msqid,
1004 - msginfo.msgtql = MSGTQL;
1005 - }
1006 - max_id = ipc_get_maxid(&msg_ids(ns));
1007 -- up_read(&msg_ids(ns).rw_mutex);
1008 -+ up_read(&msg_ids(ns).rwsem);
1009 - if (copy_to_user(buf, &msginfo, sizeof(struct msginfo)))
1010 - return -EFAULT;
1011 - return (max_id < 0) ? 0 : max_id;
1012 -diff --git a/ipc/namespace.c b/ipc/namespace.c
1013 -index 7ee61bf..aba9a58 100644
1014 ---- a/ipc/namespace.c
1015 -+++ b/ipc/namespace.c
1016 -@@ -81,7 +81,7 @@ void free_ipcs(struct ipc_namespace *ns, struct ipc_ids *ids,
1017 - int next_id;
1018 - int total, in_use;
1019 -
1020 -- down_write(&ids->rw_mutex);
1021 -+ down_write(&ids->rwsem);
1022 -
1023 - in_use = ids->in_use;
1024 -
1025 -@@ -89,11 +89,12 @@ void free_ipcs(struct ipc_namespace *ns, struct ipc_ids *ids,
1026 - perm = idr_find(&ids->ipcs_idr, next_id);
1027 - if (perm == NULL)
1028 - continue;
1029 -- ipc_lock_by_ptr(perm);
1030 -+ rcu_read_lock();
1031 -+ ipc_lock_object(perm);
1032 - free(ns, perm);
1033 - total++;
1034 - }
1035 -- up_write(&ids->rw_mutex);
1036 -+ up_write(&ids->rwsem);
1037 - }
1038 -
1039 - static void free_ipc_ns(struct ipc_namespace *ns)
1040 -diff --git a/ipc/sem.c b/ipc/sem.c
1041 -index 87614511..8e2bf30 100644
1042 ---- a/ipc/sem.c
1043 -+++ b/ipc/sem.c
1044 -@@ -248,12 +248,20 @@ static void merge_queues(struct sem_array *sma)
1045 - * Caller must own sem_perm.lock.
1046 - * New simple ops cannot start, because simple ops first check
1047 - * that sem_perm.lock is free.
1048 -+ * that a) sem_perm.lock is free and b) complex_count is 0.
1049 - */
1050 - static void sem_wait_array(struct sem_array *sma)
1051 - {
1052 - int i;
1053 - struct sem *sem;
1054 -
1055 -+ if (sma->complex_count) {
1056 -+ /* The thread that increased sma->complex_count waited on
1057 -+ * all sem->lock locks. Thus we don't need to wait again.
1058 -+ */
1059 -+ return;
1060 -+ }
1061 -+
1062 - for (i = 0; i < sma->sem_nsems; i++) {
1063 - sem = sma->sem_base + i;
1064 - spin_unlock_wait(&sem->lock);
1065 -@@ -365,7 +373,7 @@ static inline void sem_unlock(struct sem_array *sma, int locknum)
1066 - }
1067 -
1068 - /*
1069 -- * sem_lock_(check_) routines are called in the paths where the rw_mutex
1070 -+ * sem_lock_(check_) routines are called in the paths where the rwsem
1071 - * is not held.
1072 - *
1073 - * The caller holds the RCU read lock.
1074 -@@ -464,7 +472,7 @@ static inline void sem_rmid(struct ipc_namespace *ns, struct sem_array *s)
1075 - * @ns: namespace
1076 - * @params: ptr to the structure that contains key, semflg and nsems
1077 - *
1078 -- * Called with sem_ids.rw_mutex held (as a writer)
1079 -+ * Called with sem_ids.rwsem held (as a writer)
1080 - */
1081 -
1082 - static int newary(struct ipc_namespace *ns, struct ipc_params *params)
1083 -@@ -529,7 +537,7 @@ static int newary(struct ipc_namespace *ns, struct ipc_params *params)
1084 -
1085 -
1086 - /*
1087 -- * Called with sem_ids.rw_mutex and ipcp locked.
1088 -+ * Called with sem_ids.rwsem and ipcp locked.
1089 - */
1090 - static inline int sem_security(struct kern_ipc_perm *ipcp, int semflg)
1091 - {
1092 -@@ -540,7 +548,7 @@ static inline int sem_security(struct kern_ipc_perm *ipcp, int semflg)
1093 - }
1094 -
1095 - /*
1096 -- * Called with sem_ids.rw_mutex and ipcp locked.
1097 -+ * Called with sem_ids.rwsem and ipcp locked.
1098 - */
1099 - static inline int sem_more_checks(struct kern_ipc_perm *ipcp,
1100 - struct ipc_params *params)
1101 -@@ -910,6 +918,24 @@ again:
1102 - }
1103 -
1104 - /**
1105 -+ * set_semotime(sma, sops) - set sem_otime
1106 -+ * @sma: semaphore array
1107 -+ * @sops: operations that modified the array, may be NULL
1108 -+ *
1109 -+ * sem_otime is replicated to avoid cache line trashing.
1110 -+ * This function sets one instance to the current time.
1111 -+ */
1112 -+static void set_semotime(struct sem_array *sma, struct sembuf *sops)
1113 -+{
1114 -+ if (sops == NULL) {
1115 -+ sma->sem_base[0].sem_otime = get_seconds();
1116 -+ } else {
1117 -+ sma->sem_base[sops[0].sem_num].sem_otime =
1118 -+ get_seconds();
1119 -+ }
1120 -+}
1121 -+
1122 -+/**
1123 - * do_smart_update(sma, sops, nsops, otime, pt) - optimized update_queue
1124 - * @sma: semaphore array
1125 - * @sops: operations that were performed
1126 -@@ -959,17 +985,10 @@ static void do_smart_update(struct sem_array *sma, struct sembuf *sops, int nsop
1127 - }
1128 - }
1129 - }
1130 -- if (otime) {
1131 -- if (sops == NULL) {
1132 -- sma->sem_base[0].sem_otime = get_seconds();
1133 -- } else {
1134 -- sma->sem_base[sops[0].sem_num].sem_otime =
1135 -- get_seconds();
1136 -- }
1137 -- }
1138 -+ if (otime)
1139 -+ set_semotime(sma, sops);
1140 - }
1141 -
1142 --
1143 - /* The following counts are associated to each semaphore:
1144 - * semncnt number of tasks waiting on semval being nonzero
1145 - * semzcnt number of tasks waiting on semval being zero
1146 -@@ -1031,8 +1050,8 @@ static int count_semzcnt (struct sem_array * sma, ushort semnum)
1147 - return semzcnt;
1148 - }
1149 -
1150 --/* Free a semaphore set. freeary() is called with sem_ids.rw_mutex locked
1151 -- * as a writer and the spinlock for this semaphore set hold. sem_ids.rw_mutex
1152 -+/* Free a semaphore set. freeary() is called with sem_ids.rwsem locked
1153 -+ * as a writer and the spinlock for this semaphore set hold. sem_ids.rwsem
1154 - * remains locked on exit.
1155 - */
1156 - static void freeary(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp)
1157 -@@ -1152,7 +1171,7 @@ static int semctl_nolock(struct ipc_namespace *ns, int semid,
1158 - seminfo.semmnu = SEMMNU;
1159 - seminfo.semmap = SEMMAP;
1160 - seminfo.semume = SEMUME;
1161 -- down_read(&sem_ids(ns).rw_mutex);
1162 -+ down_read(&sem_ids(ns).rwsem);
1163 - if (cmd == SEM_INFO) {
1164 - seminfo.semusz = sem_ids(ns).in_use;
1165 - seminfo.semaem = ns->used_sems;
1166 -@@ -1161,7 +1180,7 @@ static int semctl_nolock(struct ipc_namespace *ns, int semid,
1167 - seminfo.semaem = SEMAEM;
1168 - }
1169 - max_id = ipc_get_maxid(&sem_ids(ns));
1170 -- up_read(&sem_ids(ns).rw_mutex);
1171 -+ up_read(&sem_ids(ns).rwsem);
1172 - if (copy_to_user(p, &seminfo, sizeof(struct seminfo)))
1173 - return -EFAULT;
1174 - return (max_id < 0) ? 0: max_id;
1175 -@@ -1467,9 +1486,9 @@ copy_semid_from_user(struct semid64_ds *out, void __user *buf, int version)
1176 - }
1177 -
1178 - /*
1179 -- * This function handles some semctl commands which require the rw_mutex
1180 -+ * This function handles some semctl commands which require the rwsem
1181 - * to be held in write mode.
1182 -- * NOTE: no locks must be held, the rw_mutex is taken inside this function.
1183 -+ * NOTE: no locks must be held, the rwsem is taken inside this function.
1184 - */
1185 - static int semctl_down(struct ipc_namespace *ns, int semid,
1186 - int cmd, int version, void __user *p)
1187 -@@ -1484,7 +1503,7 @@ static int semctl_down(struct ipc_namespace *ns, int semid,
1188 - return -EFAULT;
1189 - }
1190 -
1191 -- down_write(&sem_ids(ns).rw_mutex);
1192 -+ down_write(&sem_ids(ns).rwsem);
1193 - rcu_read_lock();
1194 -
1195 - ipcp = ipcctl_pre_down_nolock(ns, &sem_ids(ns), semid, cmd,
1196 -@@ -1523,7 +1542,7 @@ out_unlock0:
1197 - out_unlock1:
1198 - rcu_read_unlock();
1199 - out_up:
1200 -- up_write(&sem_ids(ns).rw_mutex);
1201 -+ up_write(&sem_ids(ns).rwsem);
1202 - return err;
1203 - }
1204 -
1205 -@@ -1831,12 +1850,17 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
1206 -
1207 - error = perform_atomic_semop(sma, sops, nsops, un,
1208 - task_tgid_vnr(current));
1209 -- if (error <= 0) {
1210 -- if (alter && error == 0)
1211 -+ if (error == 0) {
1212 -+ /* If the operation was successful, then do
1213 -+ * the required updates.
1214 -+ */
1215 -+ if (alter)
1216 - do_smart_update(sma, sops, nsops, 1, &tasks);
1217 --
1218 -- goto out_unlock_free;
1219 -+ else
1220 -+ set_semotime(sma, sops);
1221 - }
1222 -+ if (error <= 0)
1223 -+ goto out_unlock_free;
1224 -
1225 - /* We need to sleep on this operation, so we put the current
1226 - * task into the pending queue and go to sleep.
1227 -@@ -2095,6 +2119,14 @@ static int sysvipc_sem_proc_show(struct seq_file *s, void *it)
1228 - struct sem_array *sma = it;
1229 - time_t sem_otime;
1230 -
1231 -+ /*
1232 -+ * The proc interface isn't aware of sem_lock(), it calls
1233 -+ * ipc_lock_object() directly (in sysvipc_find_ipc).
1234 -+ * In order to stay compatible with sem_lock(), we must wait until
1235 -+ * all simple semop() calls have left their critical regions.
1236 -+ */
1237 -+ sem_wait_array(sma);
1238 -+
1239 - sem_otime = get_semotime(sma);
1240 -
1241 - return seq_printf(s,
1242 -diff --git a/ipc/shm.c b/ipc/shm.c
1243 -index 2d6833d..d697396 100644
1244 ---- a/ipc/shm.c
1245 -+++ b/ipc/shm.c
1246 -@@ -19,6 +19,9 @@
1247 - * namespaces support
1248 - * OpenVZ, SWsoft Inc.
1249 - * Pavel Emelianov <xemul@××××××.org>
1250 -+ *
1251 -+ * Better ipc lock (kern_ipc_perm.lock) handling
1252 -+ * Davidlohr Bueso <davidlohr.bueso@××.com>, June 2013.
1253 - */
1254 -
1255 - #include <linux/slab.h>
1256 -@@ -80,8 +83,8 @@ void shm_init_ns(struct ipc_namespace *ns)
1257 - }
1258 -
1259 - /*
1260 -- * Called with shm_ids.rw_mutex (writer) and the shp structure locked.
1261 -- * Only shm_ids.rw_mutex remains locked on exit.
1262 -+ * Called with shm_ids.rwsem (writer) and the shp structure locked.
1263 -+ * Only shm_ids.rwsem remains locked on exit.
1264 - */
1265 - static void do_shm_rmid(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp)
1266 - {
1267 -@@ -124,8 +127,28 @@ void __init shm_init (void)
1268 - IPC_SHM_IDS, sysvipc_shm_proc_show);
1269 - }
1270 -
1271 -+static inline struct shmid_kernel *shm_obtain_object(struct ipc_namespace *ns, int id)
1272 -+{
1273 -+ struct kern_ipc_perm *ipcp = ipc_obtain_object(&shm_ids(ns), id);
1274 -+
1275 -+ if (IS_ERR(ipcp))
1276 -+ return ERR_CAST(ipcp);
1277 -+
1278 -+ return container_of(ipcp, struct shmid_kernel, shm_perm);
1279 -+}
1280 -+
1281 -+static inline struct shmid_kernel *shm_obtain_object_check(struct ipc_namespace *ns, int id)
1282 -+{
1283 -+ struct kern_ipc_perm *ipcp = ipc_obtain_object_check(&shm_ids(ns), id);
1284 -+
1285 -+ if (IS_ERR(ipcp))
1286 -+ return ERR_CAST(ipcp);
1287 -+
1288 -+ return container_of(ipcp, struct shmid_kernel, shm_perm);
1289 -+}
1290 -+
1291 - /*
1292 -- * shm_lock_(check_) routines are called in the paths where the rw_mutex
1293 -+ * shm_lock_(check_) routines are called in the paths where the rwsem
1294 - * is not necessarily held.
1295 - */
1296 - static inline struct shmid_kernel *shm_lock(struct ipc_namespace *ns, int id)
1297 -@@ -144,17 +167,6 @@ static inline void shm_lock_by_ptr(struct shmid_kernel *ipcp)
1298 - ipc_lock_object(&ipcp->shm_perm);
1299 - }
1300 -
1301 --static inline struct shmid_kernel *shm_lock_check(struct ipc_namespace *ns,
1302 -- int id)
1303 --{
1304 -- struct kern_ipc_perm *ipcp = ipc_lock_check(&shm_ids(ns), id);
1305 --
1306 -- if (IS_ERR(ipcp))
1307 -- return (struct shmid_kernel *)ipcp;
1308 --
1309 -- return container_of(ipcp, struct shmid_kernel, shm_perm);
1310 --}
1311 --
1312 - static void shm_rcu_free(struct rcu_head *head)
1313 - {
1314 - struct ipc_rcu *p = container_of(head, struct ipc_rcu, rcu);
1315 -@@ -191,7 +203,7 @@ static void shm_open(struct vm_area_struct *vma)
1316 - * @ns: namespace
1317 - * @shp: struct to free
1318 - *
1319 -- * It has to be called with shp and shm_ids.rw_mutex (writer) locked,
1320 -+ * It has to be called with shp and shm_ids.rwsem (writer) locked,
1321 - * but returns with shp unlocked and freed.
1322 - */
1323 - static void shm_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp)
1324 -@@ -238,7 +250,7 @@ static void shm_close(struct vm_area_struct *vma)
1325 - struct shmid_kernel *shp;
1326 - struct ipc_namespace *ns = sfd->ns;
1327 -
1328 -- down_write(&shm_ids(ns).rw_mutex);
1329 -+ down_write(&shm_ids(ns).rwsem);
1330 - /* remove from the list of attaches of the shm segment */
1331 - shp = shm_lock(ns, sfd->id);
1332 - BUG_ON(IS_ERR(shp));
1333 -@@ -249,10 +261,10 @@ static void shm_close(struct vm_area_struct *vma)
1334 - shm_destroy(ns, shp);
1335 - else
1336 - shm_unlock(shp);
1337 -- up_write(&shm_ids(ns).rw_mutex);
1338 -+ up_write(&shm_ids(ns).rwsem);
1339 - }
1340 -
1341 --/* Called with ns->shm_ids(ns).rw_mutex locked */
1342 -+/* Called with ns->shm_ids(ns).rwsem locked */
1343 - static int shm_try_destroy_current(int id, void *p, void *data)
1344 - {
1345 - struct ipc_namespace *ns = data;
1346 -@@ -283,7 +295,7 @@ static int shm_try_destroy_current(int id, void *p, void *data)
1347 - return 0;
1348 - }
1349 -
1350 --/* Called with ns->shm_ids(ns).rw_mutex locked */
1351 -+/* Called with ns->shm_ids(ns).rwsem locked */
1352 - static int shm_try_destroy_orphaned(int id, void *p, void *data)
1353 - {
1354 - struct ipc_namespace *ns = data;
1355 -@@ -294,7 +306,7 @@ static int shm_try_destroy_orphaned(int id, void *p, void *data)
1356 - * We want to destroy segments without users and with already
1357 - * exit'ed originating process.
1358 - *
1359 -- * As shp->* are changed under rw_mutex, it's safe to skip shp locking.
1360 -+ * As shp->* are changed under rwsem, it's safe to skip shp locking.
1361 - */
1362 - if (shp->shm_creator != NULL)
1363 - return 0;
1364 -@@ -308,10 +320,10 @@ static int shm_try_destroy_orphaned(int id, void *p, void *data)
1365 -
1366 - void shm_destroy_orphaned(struct ipc_namespace *ns)
1367 - {
1368 -- down_write(&shm_ids(ns).rw_mutex);
1369 -+ down_write(&shm_ids(ns).rwsem);
1370 - if (shm_ids(ns).in_use)
1371 - idr_for_each(&shm_ids(ns).ipcs_idr, &shm_try_destroy_orphaned, ns);
1372 -- up_write(&shm_ids(ns).rw_mutex);
1373 -+ up_write(&shm_ids(ns).rwsem);
1374 - }
1375 -
1376 -
1377 -@@ -323,10 +335,10 @@ void exit_shm(struct task_struct *task)
1378 - return;
1379 -
1380 - /* Destroy all already created segments, but not mapped yet */
1381 -- down_write(&shm_ids(ns).rw_mutex);
1382 -+ down_write(&shm_ids(ns).rwsem);
1383 - if (shm_ids(ns).in_use)
1384 - idr_for_each(&shm_ids(ns).ipcs_idr, &shm_try_destroy_current, ns);
1385 -- up_write(&shm_ids(ns).rw_mutex);
1386 -+ up_write(&shm_ids(ns).rwsem);
1387 - }
1388 -
1389 - static int shm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1390 -@@ -460,7 +472,7 @@ static const struct vm_operations_struct shm_vm_ops = {
1391 - * @ns: namespace
1392 - * @params: ptr to the structure that contains key, size and shmflg
1393 - *
1394 -- * Called with shm_ids.rw_mutex held as a writer.
1395 -+ * Called with shm_ids.rwsem held as a writer.
1396 - */
1397 -
1398 - static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
1399 -@@ -567,7 +579,7 @@ no_file:
1400 - }
1401 -
1402 - /*
1403 -- * Called with shm_ids.rw_mutex and ipcp locked.
1404 -+ * Called with shm_ids.rwsem and ipcp locked.
1405 - */
1406 - static inline int shm_security(struct kern_ipc_perm *ipcp, int shmflg)
1407 - {
1408 -@@ -578,7 +590,7 @@ static inline int shm_security(struct kern_ipc_perm *ipcp, int shmflg)
1409 - }
1410 -
1411 - /*
1412 -- * Called with shm_ids.rw_mutex and ipcp locked.
1413 -+ * Called with shm_ids.rwsem and ipcp locked.
1414 - */
1415 - static inline int shm_more_checks(struct kern_ipc_perm *ipcp,
1416 - struct ipc_params *params)
1417 -@@ -691,7 +703,7 @@ static inline unsigned long copy_shminfo_to_user(void __user *buf, struct shminf
1418 -
1419 - /*
1420 - * Calculate and add used RSS and swap pages of a shm.
1421 -- * Called with shm_ids.rw_mutex held as a reader
1422 -+ * Called with shm_ids.rwsem held as a reader
1423 - */
1424 - static void shm_add_rss_swap(struct shmid_kernel *shp,
1425 - unsigned long *rss_add, unsigned long *swp_add)
1426 -@@ -718,7 +730,7 @@ static void shm_add_rss_swap(struct shmid_kernel *shp,
1427 - }
1428 -
1429 - /*
1430 -- * Called with shm_ids.rw_mutex held as a reader
1431 -+ * Called with shm_ids.rwsem held as a reader
1432 - */
1433 - static void shm_get_stat(struct ipc_namespace *ns, unsigned long *rss,
1434 - unsigned long *swp)
1435 -@@ -747,9 +759,9 @@ static void shm_get_stat(struct ipc_namespace *ns, unsigned long *rss,
1436 - }
1437 -
1438 - /*
1439 -- * This function handles some shmctl commands which require the rw_mutex
1440 -+ * This function handles some shmctl commands which require the rwsem
1441 - * to be held in write mode.
1442 -- * NOTE: no locks must be held, the rw_mutex is taken inside this function.
1443 -+ * NOTE: no locks must be held, the rwsem is taken inside this function.
1444 - */
1445 - static int shmctl_down(struct ipc_namespace *ns, int shmid, int cmd,
1446 - struct shmid_ds __user *buf, int version)
1447 -@@ -764,14 +776,13 @@ static int shmctl_down(struct ipc_namespace *ns, int shmid, int cmd,
1448 - return -EFAULT;
1449 - }
1450 -
1451 -- down_write(&shm_ids(ns).rw_mutex);
1452 -+ down_write(&shm_ids(ns).rwsem);
1453 - rcu_read_lock();
1454 -
1455 -- ipcp = ipcctl_pre_down(ns, &shm_ids(ns), shmid, cmd,
1456 -- &shmid64.shm_perm, 0);
1457 -+ ipcp = ipcctl_pre_down_nolock(ns, &shm_ids(ns), shmid, cmd,
1458 -+ &shmid64.shm_perm, 0);
1459 - if (IS_ERR(ipcp)) {
1460 - err = PTR_ERR(ipcp);
1461 -- /* the ipc lock is not held upon failure */
1462 - goto out_unlock1;
1463 - }
1464 -
1465 -@@ -779,14 +790,16 @@ static int shmctl_down(struct ipc_namespace *ns, int shmid, int cmd,
1466 -
1467 - err = security_shm_shmctl(shp, cmd);
1468 - if (err)
1469 -- goto out_unlock0;
1470 -+ goto out_unlock1;
1471 -
1472 - switch (cmd) {
1473 - case IPC_RMID:
1474 -+ ipc_lock_object(&shp->shm_perm);
1475 - /* do_shm_rmid unlocks the ipc object and rcu */
1476 - do_shm_rmid(ns, ipcp);
1477 - goto out_up;
1478 - case IPC_SET:
1479 -+ ipc_lock_object(&shp->shm_perm);
1480 - err = ipc_update_perm(&shmid64.shm_perm, ipcp);
1481 - if (err)
1482 - goto out_unlock0;
1483 -@@ -794,6 +807,7 @@ static int shmctl_down(struct ipc_namespace *ns, int shmid, int cmd,
1484 - break;
1485 - default:
1486 - err = -EINVAL;
1487 -+ goto out_unlock1;
1488 - }
1489 -
1490 - out_unlock0:
1491 -@@ -801,33 +815,28 @@ out_unlock0:
1492 - out_unlock1:
1493 - rcu_read_unlock();
1494 - out_up:
1495 -- up_write(&shm_ids(ns).rw_mutex);
1496 -+ up_write(&shm_ids(ns).rwsem);
1497 - return err;
1498 - }
1499 -
1500 --SYSCALL_DEFINE3(shmctl, int, shmid, int, cmd, struct shmid_ds __user *, buf)
1501 -+static int shmctl_nolock(struct ipc_namespace *ns, int shmid,
1502 -+ int cmd, int version, void __user *buf)
1503 - {
1504 -+ int err;
1505 - struct shmid_kernel *shp;
1506 -- int err, version;
1507 -- struct ipc_namespace *ns;
1508 -
1509 -- if (cmd < 0 || shmid < 0) {
1510 -- err = -EINVAL;
1511 -- goto out;
1512 -+ /* preliminary security checks for *_INFO */
1513 -+ if (cmd == IPC_INFO || cmd == SHM_INFO) {
1514 -+ err = security_shm_shmctl(NULL, cmd);
1515 -+ if (err)
1516 -+ return err;
1517 - }
1518 -
1519 -- version = ipc_parse_version(&cmd);
1520 -- ns = current->nsproxy->ipc_ns;
1521 --
1522 -- switch (cmd) { /* replace with proc interface ? */
1523 -+ switch (cmd) {
1524 - case IPC_INFO:
1525 - {
1526 - struct shminfo64 shminfo;
1527 -
1528 -- err = security_shm_shmctl(NULL, cmd);
1529 -- if (err)
1530 -- return err;
1531 --
1532 - memset(&shminfo, 0, sizeof(shminfo));
1533 - shminfo.shmmni = shminfo.shmseg = ns->shm_ctlmni;
1534 - shminfo.shmmax = ns->shm_ctlmax;
1535 -@@ -837,9 +846,9 @@ SYSCALL_DEFINE3(shmctl, int, shmid, int, cmd, struct shmid_ds __user *, buf)
1536 - if(copy_shminfo_to_user (buf, &shminfo, version))
1537 - return -EFAULT;
1538 -
1539 -- down_read(&shm_ids(ns).rw_mutex);
1540 -+ down_read(&shm_ids(ns).rwsem);
1541 - err = ipc_get_maxid(&shm_ids(ns));
1542 -- up_read(&shm_ids(ns).rw_mutex);
1543 -+ up_read(&shm_ids(ns).rwsem);
1544 -
1545 - if(err<0)
1546 - err = 0;
1547 -@@ -849,19 +858,15 @@ SYSCALL_DEFINE3(shmctl, int, shmid, int, cmd, struct shmid_ds __user *, buf)
1548 - {
1549 - struct shm_info shm_info;
1550 -
1551 -- err = security_shm_shmctl(NULL, cmd);
1552 -- if (err)
1553 -- return err;
1554 --
1555 - memset(&shm_info, 0, sizeof(shm_info));
1556 -- down_read(&shm_ids(ns).rw_mutex);
1557 -+ down_read(&shm_ids(ns).rwsem);
1558 - shm_info.used_ids = shm_ids(ns).in_use;
1559 - shm_get_stat (ns, &shm_info.shm_rss, &shm_info.shm_swp);
1560 - shm_info.shm_tot = ns->shm_tot;
1561 - shm_info.swap_attempts = 0;
1562 - shm_info.swap_successes = 0;
1563 - err = ipc_get_maxid(&shm_ids(ns));
1564 -- up_read(&shm_ids(ns).rw_mutex);
1565 -+ up_read(&shm_ids(ns).rwsem);
1566 - if (copy_to_user(buf, &shm_info, sizeof(shm_info))) {
1567 - err = -EFAULT;
1568 - goto out;
1569 -@@ -876,27 +881,31 @@ SYSCALL_DEFINE3(shmctl, int, shmid, int, cmd, struct shmid_ds __user *, buf)
1570 - struct shmid64_ds tbuf;
1571 - int result;
1572 -
1573 -+ rcu_read_lock();
1574 - if (cmd == SHM_STAT) {
1575 -- shp = shm_lock(ns, shmid);
1576 -+ shp = shm_obtain_object(ns, shmid);
1577 - if (IS_ERR(shp)) {
1578 - err = PTR_ERR(shp);
1579 -- goto out;
1580 -+ goto out_unlock;
1581 - }
1582 - result = shp->shm_perm.id;
1583 - } else {
1584 -- shp = shm_lock_check(ns, shmid);
1585 -+ shp = shm_obtain_object_check(ns, shmid);
1586 - if (IS_ERR(shp)) {
1587 - err = PTR_ERR(shp);
1588 -- goto out;
1589 -+ goto out_unlock;
1590 - }
1591 - result = 0;
1592 - }
1593 -+
1594 - err = -EACCES;
1595 - if (ipcperms(ns, &shp->shm_perm, S_IRUGO))
1596 - goto out_unlock;
1597 -+
1598 - err = security_shm_shmctl(shp, cmd);
1599 - if (err)
1600 - goto out_unlock;
1601 -+
1602 - memset(&tbuf, 0, sizeof(tbuf));
1603 - kernel_to_ipc64_perm(&shp->shm_perm, &tbuf.shm_perm);
1604 - tbuf.shm_segsz = shp->shm_segsz;
1605 -@@ -906,43 +915,76 @@ SYSCALL_DEFINE3(shmctl, int, shmid, int, cmd, struct shmid_ds __user *, buf)
1606 - tbuf.shm_cpid = shp->shm_cprid;
1607 - tbuf.shm_lpid = shp->shm_lprid;
1608 - tbuf.shm_nattch = shp->shm_nattch;
1609 -- shm_unlock(shp);
1610 -- if(copy_shmid_to_user (buf, &tbuf, version))
1611 -+ rcu_read_unlock();
1612 -+
1613 -+ if (copy_shmid_to_user(buf, &tbuf, version))
1614 - err = -EFAULT;
1615 - else
1616 - err = result;
1617 - goto out;
1618 - }
1619 -+ default:
1620 -+ return -EINVAL;
1621 -+ }
1622 -+
1623 -+out_unlock:
1624 -+ rcu_read_unlock();
1625 -+out:
1626 -+ return err;
1627 -+}
1628 -+
1629 -+SYSCALL_DEFINE3(shmctl, int, shmid, int, cmd, struct shmid_ds __user *, buf)
1630 -+{
1631 -+ struct shmid_kernel *shp;
1632 -+ int err, version;
1633 -+ struct ipc_namespace *ns;
1634 -+
1635 -+ if (cmd < 0 || shmid < 0)
1636 -+ return -EINVAL;
1637 -+
1638 -+ version = ipc_parse_version(&cmd);
1639 -+ ns = current->nsproxy->ipc_ns;
1640 -+
1641 -+ switch (cmd) {
1642 -+ case IPC_INFO:
1643 -+ case SHM_INFO:
1644 -+ case SHM_STAT:
1645 -+ case IPC_STAT:
1646 -+ return shmctl_nolock(ns, shmid, cmd, version, buf);
1647 -+ case IPC_RMID:
1648 -+ case IPC_SET:
1649 -+ return shmctl_down(ns, shmid, cmd, buf, version);
1650 - case SHM_LOCK:
1651 - case SHM_UNLOCK:
1652 - {
1653 - struct file *shm_file;
1654 -
1655 -- shp = shm_lock_check(ns, shmid);
1656 -+ rcu_read_lock();
1657 -+ shp = shm_obtain_object_check(ns, shmid);
1658 - if (IS_ERR(shp)) {
1659 - err = PTR_ERR(shp);
1660 -- goto out;
1661 -+ goto out_unlock1;
1662 - }
1663 -
1664 - audit_ipc_obj(&(shp->shm_perm));
1665 -+ err = security_shm_shmctl(shp, cmd);
1666 -+ if (err)
1667 -+ goto out_unlock1;
1668 -
1669 -+ ipc_lock_object(&shp->shm_perm);
1670 - if (!ns_capable(ns->user_ns, CAP_IPC_LOCK)) {
1671 - kuid_t euid = current_euid();
1672 - err = -EPERM;
1673 - if (!uid_eq(euid, shp->shm_perm.uid) &&
1674 - !uid_eq(euid, shp->shm_perm.cuid))
1675 -- goto out_unlock;
1676 -+ goto out_unlock0;
1677 - if (cmd == SHM_LOCK && !rlimit(RLIMIT_MEMLOCK))
1678 -- goto out_unlock;
1679 -+ goto out_unlock0;
1680 - }
1681 -
1682 -- err = security_shm_shmctl(shp, cmd);
1683 -- if (err)
1684 -- goto out_unlock;
1685 --
1686 - shm_file = shp->shm_file;
1687 - if (is_file_hugepages(shm_file))
1688 -- goto out_unlock;
1689 -+ goto out_unlock0;
1690 -
1691 - if (cmd == SHM_LOCK) {
1692 - struct user_struct *user = current_user();
1693 -@@ -951,32 +993,31 @@ SYSCALL_DEFINE3(shmctl, int, shmid, int, cmd, struct shmid_ds __user *, buf)
1694 - shp->shm_perm.mode |= SHM_LOCKED;
1695 - shp->mlock_user = user;
1696 - }
1697 -- goto out_unlock;
1698 -+ goto out_unlock0;
1699 - }
1700 -
1701 - /* SHM_UNLOCK */
1702 - if (!(shp->shm_perm.mode & SHM_LOCKED))
1703 -- goto out_unlock;
1704 -+ goto out_unlock0;
1705 - shmem_lock(shm_file, 0, shp->mlock_user);
1706 - shp->shm_perm.mode &= ~SHM_LOCKED;
1707 - shp->mlock_user = NULL;
1708 - get_file(shm_file);
1709 -- shm_unlock(shp);
1710 -+ ipc_unlock_object(&shp->shm_perm);
1711 -+ rcu_read_unlock();
1712 - shmem_unlock_mapping(shm_file->f_mapping);
1713 -+
1714 - fput(shm_file);
1715 -- goto out;
1716 -- }
1717 -- case IPC_RMID:
1718 -- case IPC_SET:
1719 -- err = shmctl_down(ns, shmid, cmd, buf, version);
1720 - return err;
1721 -+ }
1722 - default:
1723 - return -EINVAL;
1724 - }
1725 -
1726 --out_unlock:
1727 -- shm_unlock(shp);
1728 --out:
1729 -+out_unlock0:
1730 -+ ipc_unlock_object(&shp->shm_perm);
1731 -+out_unlock1:
1732 -+ rcu_read_unlock();
1733 - return err;
1734 - }
1735 -
1736 -@@ -1044,10 +1085,11 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr,
1737 - * additional creator id...
1738 - */
1739 - ns = current->nsproxy->ipc_ns;
1740 -- shp = shm_lock_check(ns, shmid);
1741 -+ rcu_read_lock();
1742 -+ shp = shm_obtain_object_check(ns, shmid);
1743 - if (IS_ERR(shp)) {
1744 - err = PTR_ERR(shp);
1745 -- goto out;
1746 -+ goto out_unlock;
1747 - }
1748 -
1749 - err = -EACCES;
1750 -@@ -1058,24 +1100,31 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr,
1751 - if (err)
1752 - goto out_unlock;
1753 -
1754 -+ ipc_lock_object(&shp->shm_perm);
1755 - path = shp->shm_file->f_path;
1756 - path_get(&path);
1757 - shp->shm_nattch++;
1758 - size = i_size_read(path.dentry->d_inode);
1759 -- shm_unlock(shp);
1760 -+ ipc_unlock_object(&shp->shm_perm);
1761 -+ rcu_read_unlock();
1762 -
1763 - err = -ENOMEM;
1764 - sfd = kzalloc(sizeof(*sfd), GFP_KERNEL);
1765 -- if (!sfd)
1766 -- goto out_put_dentry;
1767 -+ if (!sfd) {
1768 -+ path_put(&path);
1769 -+ goto out_nattch;
1770 -+ }
1771 -
1772 - file = alloc_file(&path, f_mode,
1773 - is_file_hugepages(shp->shm_file) ?
1774 - &shm_file_operations_huge :
1775 - &shm_file_operations);
1776 - err = PTR_ERR(file);
1777 -- if (IS_ERR(file))
1778 -- goto out_free;
1779 -+ if (IS_ERR(file)) {
1780 -+ kfree(sfd);
1781 -+ path_put(&path);
1782 -+ goto out_nattch;
1783 -+ }
1784 -
1785 - file->private_data = sfd;
1786 - file->f_mapping = shp->shm_file->f_mapping;
1787 -@@ -1101,7 +1150,7 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr,
1788 - addr > current->mm->start_stack - size - PAGE_SIZE * 5)
1789 - goto invalid;
1790 - }
1791 --
1792 -+
1793 - addr = do_mmap_pgoff(file, addr, size, prot, flags, 0, &populate);
1794 - *raddr = addr;
1795 - err = 0;
1796 -@@ -1116,7 +1165,7 @@ out_fput:
1797 - fput(file);
1798 -
1799 - out_nattch:
1800 -- down_write(&shm_ids(ns).rw_mutex);
1801 -+ down_write(&shm_ids(ns).rwsem);
1802 - shp = shm_lock(ns, shmid);
1803 - BUG_ON(IS_ERR(shp));
1804 - shp->shm_nattch--;
1805 -@@ -1124,20 +1173,13 @@ out_nattch:
1806 - shm_destroy(ns, shp);
1807 - else
1808 - shm_unlock(shp);
1809 -- up_write(&shm_ids(ns).rw_mutex);
1810 --
1811 --out:
1812 -+ up_write(&shm_ids(ns).rwsem);
1813 - return err;
1814 -
1815 - out_unlock:
1816 -- shm_unlock(shp);
1817 -- goto out;
1818 --
1819 --out_free:
1820 -- kfree(sfd);
1821 --out_put_dentry:
1822 -- path_put(&path);
1823 -- goto out_nattch;
1824 -+ rcu_read_unlock();
1825 -+out:
1826 -+ return err;
1827 - }
1828 -
1829 - SYSCALL_DEFINE3(shmat, int, shmid, char __user *, shmaddr, int, shmflg)
1830 -@@ -1242,8 +1284,7 @@ SYSCALL_DEFINE1(shmdt, char __user *, shmaddr)
1831 - #else /* CONFIG_MMU */
1832 - /* under NOMMU conditions, the exact address to be destroyed must be
1833 - * given */
1834 -- retval = -EINVAL;
1835 -- if (vma->vm_start == addr && vma->vm_ops == &shm_vm_ops) {
1836 -+ if (vma && vma->vm_start == addr && vma->vm_ops == &shm_vm_ops) {
1837 - do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start);
1838 - retval = 0;
1839 - }
1840 -diff --git a/ipc/util.c b/ipc/util.c
1841 -index 0c6566b..fdb8ae7 100644
1842 ---- a/ipc/util.c
1843 -+++ b/ipc/util.c
1844 -@@ -15,6 +15,14 @@
1845 - * Jun 2006 - namespaces ssupport
1846 - * OpenVZ, SWsoft Inc.
1847 - * Pavel Emelianov <xemul@××××××.org>
1848 -+ *
1849 -+ * General sysv ipc locking scheme:
1850 -+ * when doing ipc id lookups, take the ids->rwsem
1851 -+ * rcu_read_lock()
1852 -+ * obtain the ipc object (kern_ipc_perm)
1853 -+ * perform security, capabilities, auditing and permission checks, etc.
1854 -+ * acquire the ipc lock (kern_ipc_perm.lock) throught ipc_lock_object()
1855 -+ * perform data updates (ie: SET, RMID, LOCK/UNLOCK commands)
1856 - */
1857 -
1858 - #include <linux/mm.h>
1859 -@@ -119,7 +127,7 @@ __initcall(ipc_init);
1860 -
1861 - void ipc_init_ids(struct ipc_ids *ids)
1862 - {
1863 -- init_rwsem(&ids->rw_mutex);
1864 -+ init_rwsem(&ids->rwsem);
1865 -
1866 - ids->in_use = 0;
1867 - ids->seq = 0;
1868 -@@ -174,7 +182,7 @@ void __init ipc_init_proc_interface(const char *path, const char *header,
1869 - * @ids: Identifier set
1870 - * @key: The key to find
1871 - *
1872 -- * Requires ipc_ids.rw_mutex locked.
1873 -+ * Requires ipc_ids.rwsem locked.
1874 - * Returns the LOCKED pointer to the ipc structure if found or NULL
1875 - * if not.
1876 - * If key is found ipc points to the owning ipc structure
1877 -@@ -197,7 +205,8 @@ static struct kern_ipc_perm *ipc_findkey(struct ipc_ids *ids, key_t key)
1878 - continue;
1879 - }
1880 -
1881 -- ipc_lock_by_ptr(ipc);
1882 -+ rcu_read_lock();
1883 -+ ipc_lock_object(ipc);
1884 - return ipc;
1885 - }
1886 -
1887 -@@ -208,7 +217,7 @@ static struct kern_ipc_perm *ipc_findkey(struct ipc_ids *ids, key_t key)
1888 - * ipc_get_maxid - get the last assigned id
1889 - * @ids: IPC identifier set
1890 - *
1891 -- * Called with ipc_ids.rw_mutex held.
1892 -+ * Called with ipc_ids.rwsem held.
1893 - */
1894 -
1895 - int ipc_get_maxid(struct ipc_ids *ids)
1896 -@@ -246,7 +255,7 @@ int ipc_get_maxid(struct ipc_ids *ids)
1897 - * is returned. The 'new' entry is returned in a locked state on success.
1898 - * On failure the entry is not locked and a negative err-code is returned.
1899 - *
1900 -- * Called with writer ipc_ids.rw_mutex held.
1901 -+ * Called with writer ipc_ids.rwsem held.
1902 - */
1903 - int ipc_addid(struct ipc_ids* ids, struct kern_ipc_perm* new, int size)
1904 - {
1905 -@@ -312,9 +321,9 @@ static int ipcget_new(struct ipc_namespace *ns, struct ipc_ids *ids,
1906 - {
1907 - int err;
1908 -
1909 -- down_write(&ids->rw_mutex);
1910 -+ down_write(&ids->rwsem);
1911 - err = ops->getnew(ns, params);
1912 -- up_write(&ids->rw_mutex);
1913 -+ up_write(&ids->rwsem);
1914 - return err;
1915 - }
1916 -
1917 -@@ -331,7 +340,7 @@ static int ipcget_new(struct ipc_namespace *ns, struct ipc_ids *ids,
1918 - *
1919 - * On success, the IPC id is returned.
1920 - *
1921 -- * It is called with ipc_ids.rw_mutex and ipcp->lock held.
1922 -+ * It is called with ipc_ids.rwsem and ipcp->lock held.
1923 - */
1924 - static int ipc_check_perms(struct ipc_namespace *ns,
1925 - struct kern_ipc_perm *ipcp,
1926 -@@ -376,7 +385,7 @@ static int ipcget_public(struct ipc_namespace *ns, struct ipc_ids *ids,
1927 - * Take the lock as a writer since we are potentially going to add
1928 - * a new entry + read locks are not "upgradable"
1929 - */
1930 -- down_write(&ids->rw_mutex);
1931 -+ down_write(&ids->rwsem);
1932 - ipcp = ipc_findkey(ids, params->key);
1933 - if (ipcp == NULL) {
1934 - /* key not used */
1935 -@@ -402,7 +411,7 @@ static int ipcget_public(struct ipc_namespace *ns, struct ipc_ids *ids,
1936 - }
1937 - ipc_unlock(ipcp);
1938 - }
1939 -- up_write(&ids->rw_mutex);
1940 -+ up_write(&ids->rwsem);
1941 -
1942 - return err;
1943 - }
1944 -@@ -413,7 +422,7 @@ static int ipcget_public(struct ipc_namespace *ns, struct ipc_ids *ids,
1945 - * @ids: IPC identifier set
1946 - * @ipcp: ipc perm structure containing the identifier to remove
1947 - *
1948 -- * ipc_ids.rw_mutex (as a writer) and the spinlock for this ID are held
1949 -+ * ipc_ids.rwsem (as a writer) and the spinlock for this ID are held
1950 - * before this function is called, and remain locked on the exit.
1951 - */
1952 -
1953 -@@ -613,7 +622,7 @@ struct kern_ipc_perm *ipc_obtain_object(struct ipc_ids *ids, int id)
1954 - }
1955 -
1956 - /**
1957 -- * ipc_lock - Lock an ipc structure without rw_mutex held
1958 -+ * ipc_lock - Lock an ipc structure without rwsem held
1959 - * @ids: IPC identifier set
1960 - * @id: ipc id to look for
1961 - *
1962 -@@ -669,22 +678,6 @@ out:
1963 - return out;
1964 - }
1965 -
1966 --struct kern_ipc_perm *ipc_lock_check(struct ipc_ids *ids, int id)
1967 --{
1968 -- struct kern_ipc_perm *out;
1969 --
1970 -- out = ipc_lock(ids, id);
1971 -- if (IS_ERR(out))
1972 -- return out;
1973 --
1974 -- if (ipc_checkid(out, id)) {
1975 -- ipc_unlock(out);
1976 -- return ERR_PTR(-EIDRM);
1977 -- }
1978 --
1979 -- return out;
1980 --}
1981 --
1982 - /**
1983 - * ipcget - Common sys_*get() code
1984 - * @ns : namsepace
1985 -@@ -725,7 +718,7 @@ int ipc_update_perm(struct ipc64_perm *in, struct kern_ipc_perm *out)
1986 - }
1987 -
1988 - /**
1989 -- * ipcctl_pre_down - retrieve an ipc and check permissions for some IPC_XXX cmd
1990 -+ * ipcctl_pre_down_nolock - retrieve an ipc and check permissions for some IPC_XXX cmd
1991 - * @ns: the ipc namespace
1992 - * @ids: the table of ids where to look for the ipc
1993 - * @id: the id of the ipc to retrieve
1994 -@@ -738,29 +731,13 @@ int ipc_update_perm(struct ipc64_perm *in, struct kern_ipc_perm *out)
1995 - * It must be called without any lock held and
1996 - * - retrieves the ipc with the given id in the given table.
1997 - * - performs some audit and permission check, depending on the given cmd
1998 -- * - returns the ipc with the ipc lock held in case of success
1999 -- * or an err-code without any lock held otherwise.
2000 -+ * - returns a pointer to the ipc object or otherwise, the corresponding error.
2001 - *
2002 -- * Call holding the both the rw_mutex and the rcu read lock.
2003 -+ * Call holding the both the rwsem and the rcu read lock.
2004 - */
2005 --struct kern_ipc_perm *ipcctl_pre_down(struct ipc_namespace *ns,
2006 -- struct ipc_ids *ids, int id, int cmd,
2007 -- struct ipc64_perm *perm, int extra_perm)
2008 --{
2009 -- struct kern_ipc_perm *ipcp;
2010 --
2011 -- ipcp = ipcctl_pre_down_nolock(ns, ids, id, cmd, perm, extra_perm);
2012 -- if (IS_ERR(ipcp))
2013 -- goto out;
2014 --
2015 -- spin_lock(&ipcp->lock);
2016 --out:
2017 -- return ipcp;
2018 --}
2019 --
2020 - struct kern_ipc_perm *ipcctl_pre_down_nolock(struct ipc_namespace *ns,
2021 -- struct ipc_ids *ids, int id, int cmd,
2022 -- struct ipc64_perm *perm, int extra_perm)
2023 -+ struct ipc_ids *ids, int id, int cmd,
2024 -+ struct ipc64_perm *perm, int extra_perm)
2025 - {
2026 - kuid_t euid;
2027 - int err = -EPERM;
2028 -@@ -838,7 +815,8 @@ static struct kern_ipc_perm *sysvipc_find_ipc(struct ipc_ids *ids, loff_t pos,
2029 - ipc = idr_find(&ids->ipcs_idr, pos);
2030 - if (ipc != NULL) {
2031 - *new_pos = pos + 1;
2032 -- ipc_lock_by_ptr(ipc);
2033 -+ rcu_read_lock();
2034 -+ ipc_lock_object(ipc);
2035 - return ipc;
2036 - }
2037 - }
2038 -@@ -876,7 +854,7 @@ static void *sysvipc_proc_start(struct seq_file *s, loff_t *pos)
2039 - * Take the lock - this will be released by the corresponding
2040 - * call to stop().
2041 - */
2042 -- down_read(&ids->rw_mutex);
2043 -+ down_read(&ids->rwsem);
2044 -
2045 - /* pos < 0 is invalid */
2046 - if (*pos < 0)
2047 -@@ -903,7 +881,7 @@ static void sysvipc_proc_stop(struct seq_file *s, void *it)
2048 -
2049 - ids = &iter->ns->ids[iface->ids];
2050 - /* Release the lock we took in start() */
2051 -- up_read(&ids->rw_mutex);
2052 -+ up_read(&ids->rwsem);
2053 - }
2054 -
2055 - static int sysvipc_proc_show(struct seq_file *s, void *it)
2056 -diff --git a/ipc/util.h b/ipc/util.h
2057 -index 25299e7..f2f5036 100644
2058 ---- a/ipc/util.h
2059 -+++ b/ipc/util.h
2060 -@@ -101,10 +101,10 @@ void __init ipc_init_proc_interface(const char *path, const char *header,
2061 - #define ipcid_to_idx(id) ((id) % SEQ_MULTIPLIER)
2062 - #define ipcid_to_seqx(id) ((id) / SEQ_MULTIPLIER)
2063 -
2064 --/* must be called with ids->rw_mutex acquired for writing */
2065 -+/* must be called with ids->rwsem acquired for writing */
2066 - int ipc_addid(struct ipc_ids *, struct kern_ipc_perm *, int);
2067 -
2068 --/* must be called with ids->rw_mutex acquired for reading */
2069 -+/* must be called with ids->rwsem acquired for reading */
2070 - int ipc_get_maxid(struct ipc_ids *);
2071 -
2072 - /* must be called with both locks acquired. */
2073 -@@ -139,9 +139,6 @@ int ipc_update_perm(struct ipc64_perm *in, struct kern_ipc_perm *out);
2074 - struct kern_ipc_perm *ipcctl_pre_down_nolock(struct ipc_namespace *ns,
2075 - struct ipc_ids *ids, int id, int cmd,
2076 - struct ipc64_perm *perm, int extra_perm);
2077 --struct kern_ipc_perm *ipcctl_pre_down(struct ipc_namespace *ns,
2078 -- struct ipc_ids *ids, int id, int cmd,
2079 -- struct ipc64_perm *perm, int extra_perm);
2080 -
2081 - #ifndef CONFIG_ARCH_WANT_IPC_PARSE_VERSION
2082 - /* On IA-64, we always use the "64-bit version" of the IPC structures. */
2083 -@@ -182,19 +179,12 @@ static inline void ipc_assert_locked_object(struct kern_ipc_perm *perm)
2084 - assert_spin_locked(&perm->lock);
2085 - }
2086 -
2087 --static inline void ipc_lock_by_ptr(struct kern_ipc_perm *perm)
2088 --{
2089 -- rcu_read_lock();
2090 -- ipc_lock_object(perm);
2091 --}
2092 --
2093 - static inline void ipc_unlock(struct kern_ipc_perm *perm)
2094 - {
2095 - ipc_unlock_object(perm);
2096 - rcu_read_unlock();
2097 - }
2098 -
2099 --struct kern_ipc_perm *ipc_lock_check(struct ipc_ids *ids, int id);
2100 - struct kern_ipc_perm *ipc_obtain_object_check(struct ipc_ids *ids, int id);
2101 - int ipcget(struct ipc_namespace *ns, struct ipc_ids *ids,
2102 - struct ipc_ops *ops, struct ipc_params *params);
2103 -diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
2104 -index 45850f6..4865756 100644
2105 ---- a/sound/pci/hda/patch_hdmi.c
2106 -+++ b/sound/pci/hda/patch_hdmi.c
2107 -@@ -930,6 +930,14 @@ static void hdmi_setup_audio_infoframe(struct hda_codec *codec,
2108 - }
2109 -
2110 - /*
2111 -+ * always configure channel mapping, it may have been changed by the
2112 -+ * user in the meantime
2113 -+ */
2114 -+ hdmi_setup_channel_mapping(codec, pin_nid, non_pcm, ca,
2115 -+ channels, per_pin->chmap,
2116 -+ per_pin->chmap_set);
2117 -+
2118 -+ /*
2119 - * sizeof(ai) is used instead of sizeof(*hdmi_ai) or
2120 - * sizeof(*dp_ai) to avoid partial match/update problems when
2121 - * the user switches between HDMI/DP monitors.
2122 -@@ -940,20 +948,10 @@ static void hdmi_setup_audio_infoframe(struct hda_codec *codec,
2123 - "pin=%d channels=%d\n",
2124 - pin_nid,
2125 - channels);
2126 -- hdmi_setup_channel_mapping(codec, pin_nid, non_pcm, ca,
2127 -- channels, per_pin->chmap,
2128 -- per_pin->chmap_set);
2129 - hdmi_stop_infoframe_trans(codec, pin_nid);
2130 - hdmi_fill_audio_infoframe(codec, pin_nid,
2131 - ai.bytes, sizeof(ai));
2132 - hdmi_start_infoframe_trans(codec, pin_nid);
2133 -- } else {
2134 -- /* For non-pcm audio switch, setup new channel mapping
2135 -- * accordingly */
2136 -- if (per_pin->non_pcm != non_pcm)
2137 -- hdmi_setup_channel_mapping(codec, pin_nid, non_pcm, ca,
2138 -- channels, per_pin->chmap,
2139 -- per_pin->chmap_set);
2140 - }
2141 -
2142 - per_pin->non_pcm = non_pcm;
2143 -diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
2144 -index 389db4c..1383f38 100644
2145 ---- a/sound/pci/hda/patch_realtek.c
2146 -+++ b/sound/pci/hda/patch_realtek.c
2147 -@@ -3308,6 +3308,15 @@ static void alc269_fixup_limit_int_mic_boost(struct hda_codec *codec,
2148 - }
2149 - }
2150 -
2151 -+static void alc290_fixup_mono_speakers(struct hda_codec *codec,
2152 -+ const struct hda_fixup *fix, int action)
2153 -+{
2154 -+ if (action == HDA_FIXUP_ACT_PRE_PROBE)
2155 -+ /* Remove DAC node 0x03, as it seems to be
2156 -+ giving mono output */
2157 -+ snd_hda_override_wcaps(codec, 0x03, 0);
2158 -+}
2159 -+
2160 - enum {
2161 - ALC269_FIXUP_SONY_VAIO,
2162 - ALC275_FIXUP_SONY_VAIO_GPIO2,
2163 -@@ -3331,9 +3340,12 @@ enum {
2164 - ALC269_FIXUP_HP_GPIO_LED,
2165 - ALC269_FIXUP_INV_DMIC,
2166 - ALC269_FIXUP_LENOVO_DOCK,
2167 -+ ALC286_FIXUP_SONY_MIC_NO_PRESENCE,
2168 - ALC269_FIXUP_PINCFG_NO_HP_TO_LINEOUT,
2169 - ALC269_FIXUP_DELL1_MIC_NO_PRESENCE,
2170 - ALC269_FIXUP_DELL2_MIC_NO_PRESENCE,
2171 -+ ALC269_FIXUP_DELL3_MIC_NO_PRESENCE,
2172 -+ ALC290_FIXUP_MONO_SPEAKERS,
2173 - ALC269_FIXUP_HEADSET_MODE,
2174 - ALC269_FIXUP_HEADSET_MODE_NO_HP_MIC,
2175 - ALC269_FIXUP_ASUS_X101_FUNC,
2176 -@@ -3521,6 +3533,15 @@ static const struct hda_fixup alc269_fixups[] = {
2177 - .chained = true,
2178 - .chain_id = ALC269_FIXUP_HEADSET_MODE_NO_HP_MIC
2179 - },
2180 -+ [ALC269_FIXUP_DELL3_MIC_NO_PRESENCE] = {
2181 -+ .type = HDA_FIXUP_PINS,
2182 -+ .v.pins = (const struct hda_pintbl[]) {
2183 -+ { 0x1a, 0x01a1913c }, /* use as headset mic, without its own jack detect */
2184 -+ { }
2185 -+ },
2186 -+ .chained = true,
2187 -+ .chain_id = ALC269_FIXUP_HEADSET_MODE_NO_HP_MIC
2188 -+ },
2189 - [ALC269_FIXUP_HEADSET_MODE] = {
2190 - .type = HDA_FIXUP_FUNC,
2191 - .v.func = alc_fixup_headset_mode,
2192 -@@ -3529,6 +3550,13 @@ static const struct hda_fixup alc269_fixups[] = {
2193 - .type = HDA_FIXUP_FUNC,
2194 - .v.func = alc_fixup_headset_mode_no_hp_mic,
2195 - },
2196 -+ [ALC286_FIXUP_SONY_MIC_NO_PRESENCE] = {
2197 -+ .type = HDA_FIXUP_PINS,
2198 -+ .v.pins = (const struct hda_pintbl[]) {
2199 -+ { 0x18, 0x01a1913c }, /* use as headset mic, without its own jack detect */
2200 -+ { }
2201 -+ },
2202 -+ },
2203 - [ALC269_FIXUP_ASUS_X101_FUNC] = {
2204 - .type = HDA_FIXUP_FUNC,
2205 - .v.func = alc269_fixup_x101_headset_mic,
2206 -@@ -3595,6 +3623,12 @@ static const struct hda_fixup alc269_fixups[] = {
2207 - { }
2208 - },
2209 - },
2210 -+ [ALC290_FIXUP_MONO_SPEAKERS] = {
2211 -+ .type = HDA_FIXUP_FUNC,
2212 -+ .v.func = alc290_fixup_mono_speakers,
2213 -+ .chained = true,
2214 -+ .chain_id = ALC269_FIXUP_DELL3_MIC_NO_PRESENCE,
2215 -+ },
2216 - };
2217 -
2218 - static const struct snd_pci_quirk alc269_fixup_tbl[] = {
2219 -@@ -3631,6 +3665,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
2220 - SND_PCI_QUIRK(0x1028, 0x0608, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
2221 - SND_PCI_QUIRK(0x1028, 0x0609, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
2222 - SND_PCI_QUIRK(0x1028, 0x0613, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
2223 -+ SND_PCI_QUIRK(0x1028, 0x0616, "Dell Vostro 5470", ALC290_FIXUP_MONO_SPEAKERS),
2224 - SND_PCI_QUIRK(0x1028, 0x15cc, "Dell X5 Precision", ALC269_FIXUP_DELL2_MIC_NO_PRESENCE),
2225 - SND_PCI_QUIRK(0x1028, 0x15cd, "Dell X5 Precision", ALC269_FIXUP_DELL2_MIC_NO_PRESENCE),
2226 - SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2),
2227 -@@ -3651,6 +3686,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
2228 - SND_PCI_QUIRK(0x1043, 0x8398, "ASUS P1005", ALC269_FIXUP_STEREO_DMIC),
2229 - SND_PCI_QUIRK(0x1043, 0x83ce, "ASUS P1005", ALC269_FIXUP_STEREO_DMIC),
2230 - SND_PCI_QUIRK(0x1043, 0x8516, "ASUS X101CH", ALC269_FIXUP_ASUS_X101),
2231 -+ SND_PCI_QUIRK(0x104d, 0x90b6, "Sony VAIO Pro 13", ALC286_FIXUP_SONY_MIC_NO_PRESENCE),
2232 - SND_PCI_QUIRK(0x104d, 0x9073, "Sony VAIO", ALC275_FIXUP_SONY_VAIO_GPIO2),
2233 - SND_PCI_QUIRK(0x104d, 0x907b, "Sony VAIO", ALC275_FIXUP_SONY_HWEQ),
2234 - SND_PCI_QUIRK(0x104d, 0x9084, "Sony VAIO", ALC275_FIXUP_SONY_HWEQ),
2235 -@@ -4345,6 +4381,7 @@ static const struct snd_pci_quirk alc662_fixup_tbl[] = {
2236 - SND_PCI_QUIRK(0x1028, 0x05d8, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE),
2237 - SND_PCI_QUIRK(0x1028, 0x05db, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE),
2238 - SND_PCI_QUIRK(0x103c, 0x1632, "HP RP5800", ALC662_FIXUP_HP_RP5800),
2239 -+ SND_PCI_QUIRK(0x1043, 0x1477, "ASUS N56VZ", ALC662_FIXUP_ASUS_MODE4),
2240 - SND_PCI_QUIRK(0x1043, 0x8469, "ASUS mobo", ALC662_FIXUP_NO_JACK_DETECT),
2241 - SND_PCI_QUIRK(0x105b, 0x0cd6, "Foxconn", ALC662_FIXUP_ASUS_MODE2),
2242 - SND_PCI_QUIRK(0x144d, 0xc051, "Samsung R720", ALC662_FIXUP_IDEAPAD),
2243 -diff --git a/sound/usb/usx2y/usbusx2yaudio.c b/sound/usb/usx2y/usbusx2yaudio.c
2244 -index 63fb521..6234a51 100644
2245 ---- a/sound/usb/usx2y/usbusx2yaudio.c
2246 -+++ b/sound/usb/usx2y/usbusx2yaudio.c
2247 -@@ -299,19 +299,6 @@ static void usX2Y_error_urb_status(struct usX2Ydev *usX2Y,
2248 - usX2Y_clients_stop(usX2Y);
2249 - }
2250 -
2251 --static void usX2Y_error_sequence(struct usX2Ydev *usX2Y,
2252 -- struct snd_usX2Y_substream *subs, struct urb *urb)
2253 --{
2254 -- snd_printk(KERN_ERR
2255 --"Sequence Error!(hcd_frame=%i ep=%i%s;wait=%i,frame=%i).\n"
2256 --"Most probably some urb of usb-frame %i is still missing.\n"
2257 --"Cause could be too long delays in usb-hcd interrupt handling.\n",
2258 -- usb_get_current_frame_number(usX2Y->dev),
2259 -- subs->endpoint, usb_pipein(urb->pipe) ? "in" : "out",
2260 -- usX2Y->wait_iso_frame, urb->start_frame, usX2Y->wait_iso_frame);
2261 -- usX2Y_clients_stop(usX2Y);
2262 --}
2263 --
2264 - static void i_usX2Y_urb_complete(struct urb *urb)
2265 - {
2266 - struct snd_usX2Y_substream *subs = urb->context;
2267 -@@ -328,12 +315,9 @@ static void i_usX2Y_urb_complete(struct urb *urb)
2268 - usX2Y_error_urb_status(usX2Y, subs, urb);
2269 - return;
2270 - }
2271 -- if (likely((urb->start_frame & 0xFFFF) == (usX2Y->wait_iso_frame & 0xFFFF)))
2272 -- subs->completed_urb = urb;
2273 -- else {
2274 -- usX2Y_error_sequence(usX2Y, subs, urb);
2275 -- return;
2276 -- }
2277 -+
2278 -+ subs->completed_urb = urb;
2279 -+
2280 - {
2281 - struct snd_usX2Y_substream *capsubs = usX2Y->subs[SNDRV_PCM_STREAM_CAPTURE],
2282 - *playbacksubs = usX2Y->subs[SNDRV_PCM_STREAM_PLAYBACK];
2283 -diff --git a/sound/usb/usx2y/usx2yhwdeppcm.c b/sound/usb/usx2y/usx2yhwdeppcm.c
2284 -index f2a1acd..814d0e8 100644
2285 ---- a/sound/usb/usx2y/usx2yhwdeppcm.c
2286 -+++ b/sound/usb/usx2y/usx2yhwdeppcm.c
2287 -@@ -244,13 +244,8 @@ static void i_usX2Y_usbpcm_urb_complete(struct urb *urb)
2288 - usX2Y_error_urb_status(usX2Y, subs, urb);
2289 - return;
2290 - }
2291 -- if (likely((urb->start_frame & 0xFFFF) == (usX2Y->wait_iso_frame & 0xFFFF)))
2292 -- subs->completed_urb = urb;
2293 -- else {
2294 -- usX2Y_error_sequence(usX2Y, subs, urb);
2295 -- return;
2296 -- }
2297 -
2298 -+ subs->completed_urb = urb;
2299 - capsubs = usX2Y->subs[SNDRV_PCM_STREAM_CAPTURE];
2300 - capsubs2 = usX2Y->subs[SNDRV_PCM_STREAM_CAPTURE + 2];
2301 - playbacksubs = usX2Y->subs[SNDRV_PCM_STREAM_PLAYBACK];
2302
2303 diff --git a/3.11.6/4420_grsecurity-2.9.1-3.11.6-201310191259.patch b/3.11.6/4420_grsecurity-2.9.1-3.11.6-201310260850.patch
2304 similarity index 99%
2305 rename from 3.11.6/4420_grsecurity-2.9.1-3.11.6-201310191259.patch
2306 rename to 3.11.6/4420_grsecurity-2.9.1-3.11.6-201310260850.patch
2307 index 46b1e15..584d2ee 100644
2308 --- a/3.11.6/4420_grsecurity-2.9.1-3.11.6-201310191259.patch
2309 +++ b/3.11.6/4420_grsecurity-2.9.1-3.11.6-201310260850.patch
2310 @@ -18357,7 +18357,7 @@ index 7f760a9..04b1c65 100644
2311 }
2312
2313 diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h
2314 -index 4f7923d..20cb24e 100644
2315 +index 4f7923d..d3526c1 100644
2316 --- a/arch/x86/include/asm/uaccess_64.h
2317 +++ b/arch/x86/include/asm/uaccess_64.h
2318 @@ -10,6 +10,9 @@
2319 @@ -18370,7 +18370,7 @@ index 4f7923d..20cb24e 100644
2320
2321 /*
2322 * Copy To/From Userspace
2323 -@@ -17,13 +20,13 @@
2324 +@@ -17,14 +20,14 @@
2325
2326 /* Handles exceptions in both to and from, but doesn't do access_ok */
2327 __must_check unsigned long
2328 @@ -18384,16 +18384,13 @@ index 4f7923d..20cb24e 100644
2329 +copy_user_generic_unrolled(void *to, const void *from, unsigned len) __size_overflow(3);
2330
2331 -static __always_inline __must_check unsigned long
2332 +-copy_user_generic(void *to, const void *from, unsigned len)
2333 +static __always_inline __must_check __size_overflow(3) unsigned long
2334 - copy_user_generic(void *to, const void *from, unsigned len)
2335 ++copy_user_generic(void *to, const void *from, unsigned long len)
2336 {
2337 unsigned ret;
2338 -@@ -41,142 +44,204 @@ copy_user_generic(void *to, const void *from, unsigned len)
2339 - ASM_OUTPUT2("=a" (ret), "=D" (to), "=S" (from),
2340 - "=d" (len)),
2341 - "1" (to), "2" (from), "3" (len)
2342 -- : "memory", "rcx", "r8", "r9", "r10", "r11");
2343 -+ : "memory", "rcx", "r8", "r9", "r11");
2344 +
2345 +@@ -45,138 +48,200 @@ copy_user_generic(void *to, const void *from, unsigned len)
2346 return ret;
2347 }
2348
2349 @@ -97256,7 +97253,7 @@ index f5eb43d..1814de8 100644
2350 shdr = (Elf_Shdr *)((char *)ehdr + _r(&ehdr->e_shoff));
2351 shstrtab_sec = shdr + r2(&ehdr->e_shstrndx);
2352 diff --git a/security/Kconfig b/security/Kconfig
2353 -index e9c6ac7..c5d45c8 100644
2354 +index e9c6ac7..5b9d82e 100644
2355 --- a/security/Kconfig
2356 +++ b/security/Kconfig
2357 @@ -4,6 +4,959 @@
2358 @@ -98185,14 +98182,14 @@ index e9c6ac7..c5d45c8 100644
2359 + headers explicitly in addition to the normal gcc package.
2360 +
2361 +config PAX_LATENT_ENTROPY
2362 -+ bool "Generate some entropy during boot"
2363 ++ bool "Generate some entropy during boot and runtime"
2364 + default y if GRKERNSEC_CONFIG_AUTO
2365 + help
2366 -+ By saying Y here the kernel will instrument early boot code to
2367 ++ By saying Y here the kernel will instrument some kernel code to
2368 + extract some entropy from both original and artificially created
2369 + program state. This will help especially embedded systems where
2370 + there is little 'natural' source of entropy normally. The cost
2371 -+ is some slowdown of the boot process.
2372 ++ is some slowdown of the boot process and fork and irq processing.
2373 +
2374 + When pax_extra_latent_entropy is passed on the kernel command line,
2375 + entropy will be extracted from up to the first 4GB of RAM while the
2376
2377 diff --git a/3.2.51/0000_README b/3.2.51/0000_README
2378 index 7299d26..8ba65d1 100644
2379 --- a/3.2.51/0000_README
2380 +++ b/3.2.51/0000_README
2381 @@ -122,7 +122,7 @@ Patch: 1050_linux-3.2.51.patch
2382 From: http://www.kernel.org
2383 Desc: Linux 3.2.51
2384
2385 -Patch: 4420_grsecurity-2.9.1-3.2.51-201310191257.patch
2386 +Patch: 4420_grsecurity-2.9.1-3.2.51-201310260849.patch
2387 From: http://www.grsecurity.net
2388 Desc: hardened-sources base patch from upstream grsecurity
2389
2390
2391 diff --git a/3.2.51/4420_grsecurity-2.9.1-3.2.51-201310191257.patch b/3.2.51/4420_grsecurity-2.9.1-3.2.51-201310260849.patch
2392 similarity index 99%
2393 rename from 3.2.51/4420_grsecurity-2.9.1-3.2.51-201310191257.patch
2394 rename to 3.2.51/4420_grsecurity-2.9.1-3.2.51-201310260849.patch
2395 index 4e9a590..0ea9ee0 100644
2396 --- a/3.2.51/4420_grsecurity-2.9.1-3.2.51-201310191257.patch
2397 +++ b/3.2.51/4420_grsecurity-2.9.1-3.2.51-201310260849.patch
2398 @@ -14469,7 +14469,7 @@ index 566e803..86f1302 100644
2399 }
2400
2401 diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h
2402 -index 1c66d30..6c7b4d8 100644
2403 +index 1c66d30..c815e61 100644
2404 --- a/arch/x86/include/asm/uaccess_64.h
2405 +++ b/arch/x86/include/asm/uaccess_64.h
2406 @@ -10,6 +10,9 @@
2407 @@ -14499,12 +14499,7 @@ index 1c66d30..6c7b4d8 100644
2408 {
2409 unsigned ret;
2410
2411 -@@ -32,142 +35,204 @@ copy_user_generic(void *to, const void *from, unsigned len)
2412 - ASM_OUTPUT2("=a" (ret), "=D" (to), "=S" (from),
2413 - "=d" (len)),
2414 - "1" (to), "2" (from), "3" (len)
2415 -- : "memory", "rcx", "r8", "r9", "r10", "r11");
2416 -+ : "memory", "rcx", "r8", "r9", "r11");
2417 +@@ -36,138 +39,200 @@ copy_user_generic(void *to, const void *from, unsigned len)
2418 return ret;
2419 }
2420
2421 @@ -97045,7 +97040,7 @@ index 38f6617..e70b72b 100755
2422
2423 exuberant()
2424 diff --git a/security/Kconfig b/security/Kconfig
2425 -index 51bd5a0..433ef3c 100644
2426 +index 51bd5a0..e4faa00 100644
2427 --- a/security/Kconfig
2428 +++ b/security/Kconfig
2429 @@ -4,6 +4,954 @@
2430 @@ -97969,14 +97964,14 @@ index 51bd5a0..433ef3c 100644
2431 + headers explicitly in addition to the normal gcc package.
2432 +
2433 +config PAX_LATENT_ENTROPY
2434 -+ bool "Generate some entropy during boot"
2435 ++ bool "Generate some entropy during boot and runtime"
2436 + default y if GRKERNSEC_CONFIG_AUTO
2437 + help
2438 -+ By saying Y here the kernel will instrument early boot code to
2439 ++ By saying Y here the kernel will instrument some kernel code to
2440 + extract some entropy from both original and artificially created
2441 + program state. This will help especially embedded systems where
2442 + there is little 'natural' source of entropy normally. The cost
2443 -+ is some slowdown of the boot process.
2444 ++ is some slowdown of the boot process and fork and irq processing.
2445 +
2446 + When pax_extra_latent_entropy is passed on the kernel command line,
2447 + entropy will be extracted from up to the first 4GB of RAM while the